diff --git a/bsd/cddl/contrib/opensolaris/cmd/zdb/zdb.8 b/bsd/cddl/contrib/opensolaris/cmd/zdb/zdb.8
new file mode 100644
index 0000000000000000000000000000000000000000..e036b964f2d4cb32f80551f3898bce2dfc169f19
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zdb/zdb.8
@@ -0,0 +1,306 @@
+'\" te
+.\" Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>.
+.\" All Rights Reserved.
+.\"
+.\" This file and its contents are supplied under the terms of the
+.\" Common Development and Distribution License ("CDDL"), version 1.0.
+.\" You may only use this file in accordance with the terms of version
+.\" 1.0 of the CDDL.
+.\"
+.\" A full copy of the text of the CDDL should have accompanied this
+.\" source.  A copy of the CDDL is also available via the Internet at
+.\" http://www.illumos.org/license/CDDL.
+.\"
+.\"
+.\" Copyright 2012, Richard Lowe.
+.\" Copyright (c) 2012, Marcelo Araujo <araujo@FreeBSD.org>.
+.\" All Rights Reserved.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd May 10, 2012
+.Dt ZDB 8
+.Os
+.Sh NAME
+.Nm zdb
+.Nd Display zpool debugging and consistency information
+.Sh SYNOPSIS
+.Nm
+.Op Fl CumdibcsDvhLXFPA
+.Op Fl e Op Fl p Ar path...
+.Op Fl t Ar txg
+.Ar poolname
+.Op Ar object ...
+.Nm
+.Op Fl divPA
+.Op Fl e Op Fl p Ar path...
+.Ar dataset
+.Op Ar object ...
+.Nm
+.Fl m Op Fl LXFPA
+.Op Fl t Ar txg
+.Op Fl e Op Fl p Ar path...
+.Ar poolname
+.Nm
+.Fl R Op Fl A
+.Op Fl e Op Fl p Ar path...
+.Ar poolname
+.Ar vdev Ns : Ns Ar offset Ns : Ns Ar size Ns Op Ns : Ns Ar flags
+.Nm
+.Fl S
+.Op Fl AP
+.Op Fl e Op Fl p Ar path...
+.Ar poolname
+.Nm
+.Fl l
+.Op Fl uA
+.Ar device
+.Nm
+.Fl C
+.Op Fl A
+.Op Fl U Ar cache
+.Sh DESCRIPTION
+The
+.Nm
+utility displays information about a ZFS pool useful for debugging and
+performs some amount of consistency checking.
+It is a not a general purpose tool and options (and facilities) may change.
+This is neither a
+.Xr fsck 8
+nor a
+.Xr fsdb 8
+utility.
+.Pp
+The output of this command in general reflects the on-disk structure of a ZFS
+pool, and is inherently unstable.
+The precise output of most invocations is not documented, a knowledge of ZFS
+internals is assumed.
+.Pp
+When operating on an imported and active pool it is possible, though unlikely,
+that zdb may interpret inconsistent pool data and behave erratically.
+.Sh OPTIONS
+Display options:
+.Bl -tag -width indent
+.It Fl b
+Display statistics regarding the number, size (logical, physical and
+allocated) and deduplication of blocks.
+.It Fl c
+Verify the checksum of all metadata blocks while printing block statistics
+(see
+.Fl b Ns ).
+.Pp
+If specified multiple times, verify the checksums of all blocks.
+.It Fl C
+Display information about the configuration. If specified with no other
+options, instead display information about the cache file
+.Po Pa /etc/zfs/zpool.cache Pc .
+To specify the cache file to display, see
+.Fl U
+.Pp
+If specified multiple times, and a pool name is also specified display both
+the cached configuration and the on-disk configuration.
+If specified multiple times with
+.Fl e
+also display the configuration that would be used were the pool to be
+imported.
+.It Fl d
+Display information about datasets. Specified once, displays basic dataset
+information: ID, create transaction, size, and object count.
+.Pp
+If specified multiple times provides greater and greater verbosity.
+.Pp
+If object IDs are specified, display information about those specific objects only.
+.It Fl D
+Display deduplication statistics, including the deduplication ratio (dedup),
+compression ratio (compress), inflation due to the zfs copies property
+(copies), and an overall effective ratio (dedup * compress / copies).
+.Pp
+If specified twice, display a histogram of deduplication statistics, showing
+the allocated (physically present on disk) and referenced (logically
+referenced in the pool) block counts and sizes by reference count.
+.It Fl h
+Display pool history similar to
+.Cm zpool history ,
+but include internal changes, transaction, and dataset information.
+.It Fl i
+Display information about intent log (ZIL) entries relating to each
+dataset.
+If specified multiple times, display counts of each intent log transaction
+type.
+.It Fl l Ar device
+Display the vdev labels from the specified device.
+If the
+.Fl u
+option is also specified, also display the uberblocks on this device.
+.It Fl L
+Disable leak tracing and the loading of space maps.
+By default,
+.Nm
+verifies that all non-free blocks are referenced, which can be very expensive.
+.It Fl m
+Display the offset, spacemap, and free space of each metaslab.
+When specified twice, also display information about the maximum contiguous
+free space and the percentage of free space in each space map.
+When specified three times display every spacemap record.
+.It Xo
+.Fl R Ar poolname
+.Ar vdev Ns : Ns Ar offset Ns : Ns Ar size Ns Op Ns : Ns Ar flags
+.Xc
+Read and display a block from the specified device. By default the block is
+displayed as a hex dump, but see the description of the
+.Fl r
+flag, below.
+.Pp
+The block is specified in terms of a colon-separated tuple
+.Ar vdev
+(an integer vdev identifier)
+.Ar offset
+(the offset within the vdev)
+.Ar size
+(the size of the block to read) and, optionally,
+.Ar flags
+(a set of flags, described below).
+.Bl -tag -width indent
+.It Sy b offset
+Print block pointer
+.It Sy d
+Decompress the block
+.It Sy e
+Byte swap the block
+.It Sy g
+Dump gang block header
+.It Sy i
+Dump indirect block
+.It Sy r
+Dump raw uninterpreted block data
+.El
+.It Fl s
+Report statistics on
+.Nm Ns 's
+I/O.
+Display operation counts, bandwidth, and error counts of I/O to the pool from
+.Nm .
+.It Fl S
+Simulate the effects of deduplication, constructing a DDT and then display
+that DDT as with \fB-DD\fR.
+.It Fl u
+Display the current uberblock.
+.El
+.Pp
+Other options:
+.Bl -tag -width indent
+.It Fl A
+Do not abort should any assertion fail.
+.It Fl AA
+Enable panic recovery, certain errors which would otherwise be fatal are
+demoted to warnings.
+.It Fl AAA
+Do not abort if asserts fail and also enable panic recovery.
+.It Fl e Op Fl p Ar path...
+Operate on an exported pool, not present in
+.Pa /etc/zfs/zpool.cache .
+The
+.Fl p
+flag specifies the path under which devices are to be searched.
+.It Fl F
+Attempt to make an unreadable pool readable by trying progressively older
+transactions.
+.It Fl P
+Print numbers in an unscaled form more amenable to parsing, eg. 1000000 rather
+than 1M.
+.It Fl t Ar transaction
+Specify the highest transaction to use when searching for uberblocks.
+See also the
+.Fl u
+and
+.Fl l
+options for a means to see the available uberblocks and their associated
+transaction numbers.
+.It Fl U Ar cachefile
+Use a cache file other than
+.Pa /etc/zfs/zpool.cache .
+This option is only valid with
+.Fl C
+.It Fl v
+Enable verbosity.
+Specify multiple times for increased verbosity.
+.It Fl X
+Attempt
+.Ql extreme
+transaction rewind, that is attempt the same recovery as
+.Fl F
+but read transactions otherwise deemed too old.
+.El
+.Pp
+Specifying a display option more than once enables verbosity for only that
+option, with more occurrences enabling more verbosity.
+.Pp
+If no options are specified, all information about the named pool will be
+displayed at default verbosity.
+.Sh EXAMPLES
+.Bl -tag -width 0n
+.It Sy Example 1 Display the configuration of imported pool 'rpool'
+.Bd -literal -offset 2n
+.Li # Ic zdb -C rpool
+
+MOS Configuration:
+        version: 28
+        name: 'rpool'
+ ...
+.Ed
+.It Sy Example 2 Display basic dataset information about 'rpool'
+.Bd -literal -offset 2n
+.Li # Ic zdb -d rpool
+Dataset mos [META], ID 0, cr_txg 4, 26.9M, 1051 objects
+Dataset rpool/swap [ZVOL], ID 59, cr_txg 356, 486M, 2 objects
+ ...
+.Ed
+.It Xo Sy Example 3 Display basic information about object 0 in
+.Sy 'rpool/export/home'
+.Xc
+.Bd -literal -offset 2n
+.Li # Ic zdb -d rpool/export/home 0
+Dataset rpool/export/home [ZPL], ID 137, cr_txg 1546, 32K, 8 objects
+
+    Object  lvl   iblk   dblk  dsize  lsize   %full  type
+         0    7    16K    16K  15.0K    16K   25.00  DMU dnode
+.Ed
+.It Xo Sy Example 4 Display the predicted effect of enabling deduplication on
+.Sy 'rpool'
+.Xc
+.Bd -literal -offset 2n
+.Li # Ic zdb -S rpool
+Simulated DDT histogram:
+
+bucket             allocated                      referenced
+______  ______________________________  ______________________________
+refcnt  blocks   LSIZE   PSIZE   DSIZE  blocks   LSIZE   PSIZE   DSIZE
+------  ------   -----   -----   -----  ------   -----   -----   -----
+     1    694K   27.1G   15.0G   15.0G    694K   27.1G   15.0G   15.0G
+     2   35.0K   1.33G    699M    699M   74.7K   2.79G   1.45G   1.45G
+ ...
+dedup = 1.11, compress = 1.80, copies = 1.00, dedup * compress / copies = 2.00
+.Ed
+.El
+.Sh SEE ALSO
+.Xr zfs 8 ,
+.Xr zpool 8
+.Sh AUTHORS
+This manual page is a
+.Xr mdoc 7
+reimplementation of the
+.Tn illumos
+manual page
+.Em zdb(1M) ,
+modified and customized for
+.Fx
+and licensed under the
+Common Development and Distribution License
+.Pq Tn CDDL .
+.Pp
+The
+.Xr mdoc 7
+implementation of this manual page was initially written by
+.An Martin Matuska Aq mm@FreeBSD.org
+and
+.An Marcelo Araujo Aq araujo@FreeBSD.org .
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zdb/zdb.c b/bsd/cddl/contrib/opensolaris/cmd/zdb/zdb.c
new file mode 100644
index 0000000000000000000000000000000000000000..82941af4e93ab1898a4d3135887fd4c144ca396d
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zdb/zdb.c
@@ -0,0 +1,3231 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ */
+
+#include <stdio.h>
+#include <stdio_ext.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <sys/zfs_context.h>
+#include <sys/spa.h>
+#include <sys/spa_impl.h>
+#include <sys/dmu.h>
+#include <sys/zap.h>
+#include <sys/fs/zfs.h>
+#include <sys/zfs_znode.h>
+#include <sys/zfs_sa.h>
+#include <sys/sa.h>
+#include <sys/sa_impl.h>
+#include <sys/vdev.h>
+#include <sys/vdev_impl.h>
+#include <sys/metaslab_impl.h>
+#include <sys/dmu_objset.h>
+#include <sys/dsl_dir.h>
+#include <sys/dsl_dataset.h>
+#include <sys/dsl_pool.h>
+#include <sys/dbuf.h>
+#include <sys/zil.h>
+#include <sys/zil_impl.h>
+#include <sys/stat.h>
+#include <sys/resource.h>
+#include <sys/dmu_traverse.h>
+#include <sys/zio_checksum.h>
+#include <sys/zio_compress.h>
+#include <sys/zfs_fuid.h>
+#include <sys/arc.h>
+#include <sys/ddt.h>
+#include <sys/zfeature.h>
+#undef ZFS_MAXNAMELEN
+#undef verify
+#include <libzfs.h>
+
+#define	ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ?	\
+	zio_compress_table[(idx)].ci_name : "UNKNOWN")
+#define	ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ?	\
+	zio_checksum_table[(idx)].ci_name : "UNKNOWN")
+#define	ZDB_OT_NAME(idx) ((idx) < DMU_OT_NUMTYPES ?	\
+	dmu_ot[(idx)].ot_name : DMU_OT_IS_VALID(idx) ?	\
+	dmu_ot_byteswap[DMU_OT_BYTESWAP(idx)].ob_name : "UNKNOWN")
+#define	ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) :		\
+	(((idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA) ?	\
+	DMU_OT_ZAP_OTHER : DMU_OT_NUMTYPES))
+
+#ifndef lint
+extern int zfs_recover;
+#else
+int zfs_recover;
+#endif
+
+const char cmdname[] = "zdb";
+uint8_t dump_opt[256];
+
+typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
+
+extern void dump_intent_log(zilog_t *);
+uint64_t *zopt_object = NULL;
+int zopt_objects = 0;
+libzfs_handle_t *g_zfs;
+
+/*
+ * These libumem hooks provide a reasonable set of defaults for the allocator's
+ * debugging facilities.
+ */
+const char *
+_umem_debug_init()
+{
+	return ("default,verbose"); /* $UMEM_DEBUG setting */
+}
+
+const char *
+_umem_logging_init(void)
+{
+	return ("fail,contents"); /* $UMEM_LOGGING setting */
+}
+
+static void
+usage(void)
+{
+	(void) fprintf(stderr,
+            "Usage: %s [-CumdibcsDvhLXFPA] [-t txg] [-e [-p path...]]"
+            "poolname [object...]\n"
+            "       %s [-divPA] [-e -p path...] dataset [object...]\n"
+            "       %s -m [-LXFPA] [-t txg] [-e [-p path...]]"
+            "poolname [vdev [metaslab...]]\n"
+            "       %s -R [-A] [-e [-p path...]] poolname "
+            "vdev:offset:size[:flags]\n"
+            "       %s -S [-PA] [-e [-p path...]] poolname\n"
+            "       %s -l [-uA] device\n"
+            "       %s -C [-A] [-U config]\n\n",
+	    cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname);
+
+	(void) fprintf(stderr, "    Dataset name must include at least one "
+	    "separator character '/' or '@'\n");
+	(void) fprintf(stderr, "    If dataset name is specified, only that "
+	    "dataset is dumped\n");
+	(void) fprintf(stderr, "    If object numbers are specified, only "
+	    "those objects are dumped\n\n");
+	(void) fprintf(stderr, "    Options to control amount of output:\n");
+	(void) fprintf(stderr, "        -u uberblock\n");
+	(void) fprintf(stderr, "        -d dataset(s)\n");
+	(void) fprintf(stderr, "        -i intent logs\n");
+	(void) fprintf(stderr, "        -C config (or cachefile if alone)\n");
+	(void) fprintf(stderr, "        -h pool history\n");
+	(void) fprintf(stderr, "        -b block statistics\n");
+	(void) fprintf(stderr, "        -m metaslabs\n");
+	(void) fprintf(stderr, "        -c checksum all metadata (twice for "
+	    "all data) blocks\n");
+	(void) fprintf(stderr, "        -s report stats on zdb's I/O\n");
+	(void) fprintf(stderr, "        -D dedup statistics\n");
+	(void) fprintf(stderr, "        -S simulate dedup to measure effect\n");
+	(void) fprintf(stderr, "        -v verbose (applies to all others)\n");
+	(void) fprintf(stderr, "        -l dump label contents\n");
+	(void) fprintf(stderr, "        -L disable leak tracking (do not "
+	    "load spacemaps)\n");
+	(void) fprintf(stderr, "        -R read and display block from a "
+	    "device\n\n");
+	(void) fprintf(stderr, "    Below options are intended for use "
+	    "with other options (except -l):\n");
+	(void) fprintf(stderr, "        -A ignore assertions (-A), enable "
+	    "panic recovery (-AA) or both (-AAA)\n");
+	(void) fprintf(stderr, "        -F attempt automatic rewind within "
+	    "safe range of transaction groups\n");
+	(void) fprintf(stderr, "        -U <cachefile_path> -- use alternate "
+	    "cachefile\n");
+	(void) fprintf(stderr, "        -X attempt extreme rewind (does not "
+	    "work with dataset)\n");
+	(void) fprintf(stderr, "        -e pool is exported/destroyed/"
+	    "has altroot/not in a cachefile\n");
+	(void) fprintf(stderr, "        -p <path> -- use one or more with "
+	    "-e to specify path to vdev dir\n");
+	(void) fprintf(stderr, "	-P print numbers in parseable form\n");
+	(void) fprintf(stderr, "        -t <txg> -- highest txg to use when "
+	    "searching for uberblocks\n");
+	(void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
+	    "to make only that option verbose\n");
+	(void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
+	exit(1);
+}
+
+/*
+ * Called for usage errors that are discovered after a call to spa_open(),
+ * dmu_bonus_hold(), or pool_match().  abort() is called for other errors.
+ */
+
+static void
+fatal(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	(void) fprintf(stderr, "%s: ", cmdname);
+	(void) vfprintf(stderr, fmt, ap);
+	va_end(ap);
+	(void) fprintf(stderr, "\n");
+
+	exit(1);
+}
+
+/* ARGSUSED */
+static void
+dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
+{
+	nvlist_t *nv;
+	size_t nvsize = *(uint64_t *)data;
+	char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
+
+	VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
+
+	VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
+
+	umem_free(packed, nvsize);
+
+	dump_nvlist(nv, 8);
+
+	nvlist_free(nv);
+}
+
+static void
+zdb_nicenum(uint64_t num, char *buf)
+{
+	if (dump_opt['P'])
+		(void) sprintf(buf, "%llu", (longlong_t)num);
+	else
+		nicenum(num, buf);
+}
+
+const char dump_zap_stars[] = "****************************************";
+const int dump_zap_width = sizeof (dump_zap_stars) - 1;
+
+static void
+dump_zap_histogram(uint64_t histo[ZAP_HISTOGRAM_SIZE])
+{
+	int i;
+	int minidx = ZAP_HISTOGRAM_SIZE - 1;
+	int maxidx = 0;
+	uint64_t max = 0;
+
+	for (i = 0; i < ZAP_HISTOGRAM_SIZE; i++) {
+		if (histo[i] > max)
+			max = histo[i];
+		if (histo[i] > 0 && i > maxidx)
+			maxidx = i;
+		if (histo[i] > 0 && i < minidx)
+			minidx = i;
+	}
+
+	if (max < dump_zap_width)
+		max = dump_zap_width;
+
+	for (i = minidx; i <= maxidx; i++)
+		(void) printf("\t\t\t%u: %6llu %s\n", i, (u_longlong_t)histo[i],
+		    &dump_zap_stars[(max - histo[i]) * dump_zap_width / max]);
+}
+
+static void
+dump_zap_stats(objset_t *os, uint64_t object)
+{
+	int error;
+	zap_stats_t zs;
+
+	error = zap_get_stats(os, object, &zs);
+	if (error)
+		return;
+
+	if (zs.zs_ptrtbl_len == 0) {
+		ASSERT(zs.zs_num_blocks == 1);
+		(void) printf("\tmicrozap: %llu bytes, %llu entries\n",
+		    (u_longlong_t)zs.zs_blocksize,
+		    (u_longlong_t)zs.zs_num_entries);
+		return;
+	}
+
+	(void) printf("\tFat ZAP stats:\n");
+
+	(void) printf("\t\tPointer table:\n");
+	(void) printf("\t\t\t%llu elements\n",
+	    (u_longlong_t)zs.zs_ptrtbl_len);
+	(void) printf("\t\t\tzt_blk: %llu\n",
+	    (u_longlong_t)zs.zs_ptrtbl_zt_blk);
+	(void) printf("\t\t\tzt_numblks: %llu\n",
+	    (u_longlong_t)zs.zs_ptrtbl_zt_numblks);
+	(void) printf("\t\t\tzt_shift: %llu\n",
+	    (u_longlong_t)zs.zs_ptrtbl_zt_shift);
+	(void) printf("\t\t\tzt_blks_copied: %llu\n",
+	    (u_longlong_t)zs.zs_ptrtbl_blks_copied);
+	(void) printf("\t\t\tzt_nextblk: %llu\n",
+	    (u_longlong_t)zs.zs_ptrtbl_nextblk);
+
+	(void) printf("\t\tZAP entries: %llu\n",
+	    (u_longlong_t)zs.zs_num_entries);
+	(void) printf("\t\tLeaf blocks: %llu\n",
+	    (u_longlong_t)zs.zs_num_leafs);
+	(void) printf("\t\tTotal blocks: %llu\n",
+	    (u_longlong_t)zs.zs_num_blocks);
+	(void) printf("\t\tzap_block_type: 0x%llx\n",
+	    (u_longlong_t)zs.zs_block_type);
+	(void) printf("\t\tzap_magic: 0x%llx\n",
+	    (u_longlong_t)zs.zs_magic);
+	(void) printf("\t\tzap_salt: 0x%llx\n",
+	    (u_longlong_t)zs.zs_salt);
+
+	(void) printf("\t\tLeafs with 2^n pointers:\n");
+	dump_zap_histogram(zs.zs_leafs_with_2n_pointers);
+
+	(void) printf("\t\tBlocks with n*5 entries:\n");
+	dump_zap_histogram(zs.zs_blocks_with_n5_entries);
+
+	(void) printf("\t\tBlocks n/10 full:\n");
+	dump_zap_histogram(zs.zs_blocks_n_tenths_full);
+
+	(void) printf("\t\tEntries with n chunks:\n");
+	dump_zap_histogram(zs.zs_entries_using_n_chunks);
+
+	(void) printf("\t\tBuckets with n entries:\n");
+	dump_zap_histogram(zs.zs_buckets_with_n_entries);
+}
+
+/*ARGSUSED*/
+static void
+dump_none(objset_t *os, uint64_t object, void *data, size_t size)
+{
+}
+
+/*ARGSUSED*/
+static void
+dump_unknown(objset_t *os, uint64_t object, void *data, size_t size)
+{
+	(void) printf("\tUNKNOWN OBJECT TYPE\n");
+}
+
+/*ARGSUSED*/
+void
+dump_uint8(objset_t *os, uint64_t object, void *data, size_t size)
+{
+}
+
+/*ARGSUSED*/
+static void
+dump_uint64(objset_t *os, uint64_t object, void *data, size_t size)
+{
+}
+
+/*ARGSUSED*/
+static void
+dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
+{
+	zap_cursor_t zc;
+	zap_attribute_t attr;
+	void *prop;
+	int i;
+
+	dump_zap_stats(os, object);
+	(void) printf("\n");
+
+	for (zap_cursor_init(&zc, os, object);
+	    zap_cursor_retrieve(&zc, &attr) == 0;
+	    zap_cursor_advance(&zc)) {
+		(void) printf("\t\t%s = ", attr.za_name);
+		if (attr.za_num_integers == 0) {
+			(void) printf("\n");
+			continue;
+		}
+		prop = umem_zalloc(attr.za_num_integers *
+		    attr.za_integer_length, UMEM_NOFAIL);
+		(void) zap_lookup(os, object, attr.za_name,
+		    attr.za_integer_length, attr.za_num_integers, prop);
+		if (attr.za_integer_length == 1) {
+			(void) printf("%s", (char *)prop);
+		} else {
+			for (i = 0; i < attr.za_num_integers; i++) {
+				switch (attr.za_integer_length) {
+				case 2:
+					(void) printf("%u ",
+					    ((uint16_t *)prop)[i]);
+					break;
+				case 4:
+					(void) printf("%u ",
+					    ((uint32_t *)prop)[i]);
+					break;
+				case 8:
+					(void) printf("%lld ",
+					    (u_longlong_t)((int64_t *)prop)[i]);
+					break;
+				}
+			}
+		}
+		(void) printf("\n");
+		umem_free(prop, attr.za_num_integers * attr.za_integer_length);
+	}
+	zap_cursor_fini(&zc);
+}
+
+/*ARGSUSED*/
+static void
+dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size)
+{
+	dump_zap_stats(os, object);
+	/* contents are printed elsewhere, properly decoded */
+}
+
+/*ARGSUSED*/
+static void
+dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size)
+{
+	zap_cursor_t zc;
+	zap_attribute_t attr;
+
+	dump_zap_stats(os, object);
+	(void) printf("\n");
+
+	for (zap_cursor_init(&zc, os, object);
+	    zap_cursor_retrieve(&zc, &attr) == 0;
+	    zap_cursor_advance(&zc)) {
+		(void) printf("\t\t%s = ", attr.za_name);
+		if (attr.za_num_integers == 0) {
+			(void) printf("\n");
+			continue;
+		}
+		(void) printf(" %llx : [%d:%d:%d]\n",
+		    (u_longlong_t)attr.za_first_integer,
+		    (int)ATTR_LENGTH(attr.za_first_integer),
+		    (int)ATTR_BSWAP(attr.za_first_integer),
+		    (int)ATTR_NUM(attr.za_first_integer));
+	}
+	zap_cursor_fini(&zc);
+}
+
+/*ARGSUSED*/
+static void
+dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size)
+{
+	zap_cursor_t zc;
+	zap_attribute_t attr;
+	uint16_t *layout_attrs;
+	int i;
+
+	dump_zap_stats(os, object);
+	(void) printf("\n");
+
+	for (zap_cursor_init(&zc, os, object);
+	    zap_cursor_retrieve(&zc, &attr) == 0;
+	    zap_cursor_advance(&zc)) {
+		(void) printf("\t\t%s = [", attr.za_name);
+		if (attr.za_num_integers == 0) {
+			(void) printf("\n");
+			continue;
+		}
+
+		VERIFY(attr.za_integer_length == 2);
+		layout_attrs = umem_zalloc(attr.za_num_integers *
+		    attr.za_integer_length, UMEM_NOFAIL);
+
+		VERIFY(zap_lookup(os, object, attr.za_name,
+		    attr.za_integer_length,
+		    attr.za_num_integers, layout_attrs) == 0);
+
+		for (i = 0; i != attr.za_num_integers; i++)
+			(void) printf(" %d ", (int)layout_attrs[i]);
+		(void) printf("]\n");
+		umem_free(layout_attrs,
+		    attr.za_num_integers * attr.za_integer_length);
+	}
+	zap_cursor_fini(&zc);
+}
+
+/*ARGSUSED*/
+static void
+dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size)
+{
+	zap_cursor_t zc;
+	zap_attribute_t attr;
+	const char *typenames[] = {
+		/* 0 */ "not specified",
+		/* 1 */ "FIFO",
+		/* 2 */ "Character Device",
+		/* 3 */ "3 (invalid)",
+		/* 4 */ "Directory",
+		/* 5 */ "5 (invalid)",
+		/* 6 */ "Block Device",
+		/* 7 */ "7 (invalid)",
+		/* 8 */ "Regular File",
+		/* 9 */ "9 (invalid)",
+		/* 10 */ "Symbolic Link",
+		/* 11 */ "11 (invalid)",
+		/* 12 */ "Socket",
+		/* 13 */ "Door",
+		/* 14 */ "Event Port",
+		/* 15 */ "15 (invalid)",
+	};
+
+	dump_zap_stats(os, object);
+	(void) printf("\n");
+
+	for (zap_cursor_init(&zc, os, object);
+	    zap_cursor_retrieve(&zc, &attr) == 0;
+	    zap_cursor_advance(&zc)) {
+		(void) printf("\t\t%s = %lld (type: %s)\n",
+		    attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer),
+		    typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]);
+	}
+	zap_cursor_fini(&zc);
+}
+
+static void
+dump_spacemap(objset_t *os, space_map_obj_t *smo, space_map_t *sm)
+{
+	uint64_t alloc, offset, entry;
+	uint8_t mapshift = sm->sm_shift;
+	uint64_t mapstart = sm->sm_start;
+	char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
+			    "INVALID", "INVALID", "INVALID", "INVALID" };
+
+	if (smo->smo_object == 0)
+		return;
+
+	/*
+	 * Print out the freelist entries in both encoded and decoded form.
+	 */
+	alloc = 0;
+	for (offset = 0; offset < smo->smo_objsize; offset += sizeof (entry)) {
+		VERIFY3U(0, ==, dmu_read(os, smo->smo_object, offset,
+		    sizeof (entry), &entry, DMU_READ_PREFETCH));
+		if (SM_DEBUG_DECODE(entry)) {
+			(void) printf("\t    [%6llu] %s: txg %llu, pass %llu\n",
+			    (u_longlong_t)(offset / sizeof (entry)),
+			    ddata[SM_DEBUG_ACTION_DECODE(entry)],
+			    (u_longlong_t)SM_DEBUG_TXG_DECODE(entry),
+			    (u_longlong_t)SM_DEBUG_SYNCPASS_DECODE(entry));
+		} else {
+			(void) printf("\t    [%6llu]    %c  range:"
+			    " %010llx-%010llx  size: %06llx\n",
+			    (u_longlong_t)(offset / sizeof (entry)),
+			    SM_TYPE_DECODE(entry) == SM_ALLOC ? 'A' : 'F',
+			    (u_longlong_t)((SM_OFFSET_DECODE(entry) <<
+			    mapshift) + mapstart),
+			    (u_longlong_t)((SM_OFFSET_DECODE(entry) <<
+			    mapshift) + mapstart + (SM_RUN_DECODE(entry) <<
+			    mapshift)),
+			    (u_longlong_t)(SM_RUN_DECODE(entry) << mapshift));
+			if (SM_TYPE_DECODE(entry) == SM_ALLOC)
+				alloc += SM_RUN_DECODE(entry) << mapshift;
+			else
+				alloc -= SM_RUN_DECODE(entry) << mapshift;
+		}
+	}
+	if (alloc != smo->smo_alloc) {
+		(void) printf("space_map_object alloc (%llu) INCONSISTENT "
+		    "with space map summary (%llu)\n",
+		    (u_longlong_t)smo->smo_alloc, (u_longlong_t)alloc);
+	}
+}
+
+static void
+dump_metaslab_stats(metaslab_t *msp)
+{
+	char maxbuf[32];
+	space_map_t *sm = &msp->ms_map;
+	avl_tree_t *t = sm->sm_pp_root;
+	int free_pct = sm->sm_space * 100 / sm->sm_size;
+
+	zdb_nicenum(space_map_maxsize(sm), maxbuf);
+
+	(void) printf("\t %25s %10lu   %7s  %6s   %4s %4d%%\n",
+	    "segments", avl_numnodes(t), "maxsize", maxbuf,
+	    "freepct", free_pct);
+}
+
+static void
+dump_metaslab(metaslab_t *msp)
+{
+	vdev_t *vd = msp->ms_group->mg_vd;
+	spa_t *spa = vd->vdev_spa;
+	space_map_t *sm = &msp->ms_map;
+	space_map_obj_t *smo = &msp->ms_smo;
+	char freebuf[32];
+
+	zdb_nicenum(sm->sm_size - smo->smo_alloc, freebuf);
+
+	(void) printf(
+	    "\tmetaslab %6llu   offset %12llx   spacemap %6llu   free    %5s\n",
+	    (u_longlong_t)(sm->sm_start / sm->sm_size),
+	    (u_longlong_t)sm->sm_start, (u_longlong_t)smo->smo_object, freebuf);
+
+	if (dump_opt['m'] > 1 && !dump_opt['L']) {
+		mutex_enter(&msp->ms_lock);
+		space_map_load_wait(sm);
+		if (!sm->sm_loaded)
+			VERIFY(space_map_load(sm, zfs_metaslab_ops,
+			    SM_FREE, smo, spa->spa_meta_objset) == 0);
+		dump_metaslab_stats(msp);
+		space_map_unload(sm);
+		mutex_exit(&msp->ms_lock);
+	}
+
+	if (dump_opt['d'] > 5 || dump_opt['m'] > 2) {
+		ASSERT(sm->sm_size == (1ULL << vd->vdev_ms_shift));
+
+		mutex_enter(&msp->ms_lock);
+		dump_spacemap(spa->spa_meta_objset, smo, sm);
+		mutex_exit(&msp->ms_lock);
+	}
+}
+
+static void
+print_vdev_metaslab_header(vdev_t *vd)
+{
+	(void) printf("\tvdev %10llu\n\t%-10s%5llu   %-19s   %-15s   %-10s\n",
+	    (u_longlong_t)vd->vdev_id,
+	    "metaslabs", (u_longlong_t)vd->vdev_ms_count,
+	    "offset", "spacemap", "free");
+	(void) printf("\t%15s   %19s   %15s   %10s\n",
+	    "---------------", "-------------------",
+	    "---------------", "-------------");
+}
+
+static void
+dump_metaslabs(spa_t *spa)
+{
+	vdev_t *vd, *rvd = spa->spa_root_vdev;
+	uint64_t m, c = 0, children = rvd->vdev_children;
+
+	(void) printf("\nMetaslabs:\n");
+
+	if (!dump_opt['d'] && zopt_objects > 0) {
+		c = zopt_object[0];
+
+		if (c >= children)
+			(void) fatal("bad vdev id: %llu", (u_longlong_t)c);
+
+		if (zopt_objects > 1) {
+			vd = rvd->vdev_child[c];
+			print_vdev_metaslab_header(vd);
+
+			for (m = 1; m < zopt_objects; m++) {
+				if (zopt_object[m] < vd->vdev_ms_count)
+					dump_metaslab(
+					    vd->vdev_ms[zopt_object[m]]);
+				else
+					(void) fprintf(stderr, "bad metaslab "
+					    "number %llu\n",
+					    (u_longlong_t)zopt_object[m]);
+			}
+			(void) printf("\n");
+			return;
+		}
+		children = c + 1;
+	}
+	for (; c < children; c++) {
+		vd = rvd->vdev_child[c];
+		print_vdev_metaslab_header(vd);
+
+		for (m = 0; m < vd->vdev_ms_count; m++)
+			dump_metaslab(vd->vdev_ms[m]);
+		(void) printf("\n");
+	}
+}
+
+static void
+dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index)
+{
+	const ddt_phys_t *ddp = dde->dde_phys;
+	const ddt_key_t *ddk = &dde->dde_key;
+	char *types[4] = { "ditto", "single", "double", "triple" };
+	char blkbuf[BP_SPRINTF_LEN];
+	blkptr_t blk;
+
+	for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
+		if (ddp->ddp_phys_birth == 0)
+			continue;
+		ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
+		sprintf_blkptr(blkbuf, &blk);
+		(void) printf("index %llx refcnt %llu %s %s\n",
+		    (u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt,
+		    types[p], blkbuf);
+	}
+}
+
+static void
+dump_dedup_ratio(const ddt_stat_t *dds)
+{
+	double rL, rP, rD, D, dedup, compress, copies;
+
+	if (dds->dds_blocks == 0)
+		return;
+
+	rL = (double)dds->dds_ref_lsize;
+	rP = (double)dds->dds_ref_psize;
+	rD = (double)dds->dds_ref_dsize;
+	D = (double)dds->dds_dsize;
+
+	dedup = rD / D;
+	compress = rL / rP;
+	copies = rD / rP;
+
+	(void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, "
+	    "dedup * compress / copies = %.2f\n\n",
+	    dedup, compress, copies, dedup * compress / copies);
+}
+
+static void
+dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
+{
+	char name[DDT_NAMELEN];
+	ddt_entry_t dde;
+	uint64_t walk = 0;
+	dmu_object_info_t doi;
+	uint64_t count, dspace, mspace;
+	int error;
+
+	error = ddt_object_info(ddt, type, class, &doi);
+
+	if (error == ENOENT)
+		return;
+	ASSERT(error == 0);
+
+	error = ddt_object_count(ddt, type, class, &count);
+	ASSERT(error == 0);
+	if (count == 0)
+		return;
+
+	dspace = doi.doi_physical_blocks_512 << 9;
+	mspace = doi.doi_fill_count * doi.doi_data_block_size;
+
+	ddt_object_name(ddt, type, class, name);
+
+	(void) printf("%s: %llu entries, size %llu on disk, %llu in core\n",
+	    name,
+	    (u_longlong_t)count,
+	    (u_longlong_t)(dspace / count),
+	    (u_longlong_t)(mspace / count));
+
+	if (dump_opt['D'] < 3)
+		return;
+
+	zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]);
+
+	if (dump_opt['D'] < 4)
+		return;
+
+	if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE)
+		return;
+
+	(void) printf("%s contents:\n\n", name);
+
+	while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0)
+		dump_dde(ddt, &dde, walk);
+
+	ASSERT(error == ENOENT);
+
+	(void) printf("\n");
+}
+
+static void
+dump_all_ddts(spa_t *spa)
+{
+	ddt_histogram_t ddh_total = { 0 };
+	ddt_stat_t dds_total = { 0 };
+
+	for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
+		ddt_t *ddt = spa->spa_ddt[c];
+		for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
+			for (enum ddt_class class = 0; class < DDT_CLASSES;
+			    class++) {
+				dump_ddt(ddt, type, class);
+			}
+		}
+	}
+
+	ddt_get_dedup_stats(spa, &dds_total);
+
+	if (dds_total.dds_blocks == 0) {
+		(void) printf("All DDTs are empty\n");
+		return;
+	}
+
+	(void) printf("\n");
+
+	if (dump_opt['D'] > 1) {
+		(void) printf("DDT histogram (aggregated over all DDTs):\n");
+		ddt_get_dedup_histogram(spa, &ddh_total);
+		zpool_dump_ddt(&dds_total, &ddh_total);
+	}
+
+	dump_dedup_ratio(&dds_total);
+}
+
+static void
+dump_dtl_seg(space_map_t *sm, uint64_t start, uint64_t size)
+{
+	char *prefix = (void *)sm;
+
+	(void) printf("%s [%llu,%llu) length %llu\n",
+	    prefix,
+	    (u_longlong_t)start,
+	    (u_longlong_t)(start + size),
+	    (u_longlong_t)(size));
+}
+
+static void
+dump_dtl(vdev_t *vd, int indent)
+{
+	spa_t *spa = vd->vdev_spa;
+	boolean_t required;
+	char *name[DTL_TYPES] = { "missing", "partial", "scrub", "outage" };
+	char prefix[256];
+
+	spa_vdev_state_enter(spa, SCL_NONE);
+	required = vdev_dtl_required(vd);
+	(void) spa_vdev_state_exit(spa, NULL, 0);
+
+	if (indent == 0)
+		(void) printf("\nDirty time logs:\n\n");
+
+	(void) printf("\t%*s%s [%s]\n", indent, "",
+	    vd->vdev_path ? vd->vdev_path :
+	    vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa),
+	    required ? "DTL-required" : "DTL-expendable");
+
+	for (int t = 0; t < DTL_TYPES; t++) {
+		space_map_t *sm = &vd->vdev_dtl[t];
+		if (sm->sm_space == 0)
+			continue;
+		(void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
+		    indent + 2, "", name[t]);
+		mutex_enter(sm->sm_lock);
+		space_map_walk(sm, dump_dtl_seg, (void *)prefix);
+		mutex_exit(sm->sm_lock);
+		if (dump_opt['d'] > 5 && vd->vdev_children == 0)
+			dump_spacemap(spa->spa_meta_objset,
+			    &vd->vdev_dtl_smo, sm);
+	}
+
+	for (int c = 0; c < vd->vdev_children; c++)
+		dump_dtl(vd->vdev_child[c], indent + 4);
+}
+
+static void
+dump_history(spa_t *spa)
+{
+	nvlist_t **events = NULL;
+	char buf[SPA_MAXBLOCKSIZE];
+	uint64_t resid, len, off = 0;
+	uint_t num = 0;
+	int error;
+	time_t tsec;
+	struct tm t;
+	char tbuf[30];
+	char internalstr[MAXPATHLEN];
+
+	do {
+		len = sizeof (buf);
+
+		if ((error = spa_history_get(spa, &off, &len, buf)) != 0) {
+			(void) fprintf(stderr, "Unable to read history: "
+			    "error %d\n", error);
+			return;
+		}
+
+		if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0)
+			break;
+
+		off -= resid;
+	} while (len != 0);
+
+	(void) printf("\nHistory:\n");
+	for (int i = 0; i < num; i++) {
+		uint64_t time, txg, ievent;
+		char *cmd, *intstr;
+
+		if (nvlist_lookup_uint64(events[i], ZPOOL_HIST_TIME,
+		    &time) != 0)
+			continue;
+		if (nvlist_lookup_string(events[i], ZPOOL_HIST_CMD,
+		    &cmd) != 0) {
+			if (nvlist_lookup_uint64(events[i],
+			    ZPOOL_HIST_INT_EVENT, &ievent) != 0)
+				continue;
+			verify(nvlist_lookup_uint64(events[i],
+			    ZPOOL_HIST_TXG, &txg) == 0);
+			verify(nvlist_lookup_string(events[i],
+			    ZPOOL_HIST_INT_STR, &intstr) == 0);
+			if (ievent >= LOG_END)
+				continue;
+
+			(void) snprintf(internalstr,
+			    sizeof (internalstr),
+			    "[internal %s txg:%lld] %s",
+			    zfs_history_event_names[ievent], txg,
+			    intstr);
+			cmd = internalstr;
+		}
+		tsec = time;
+		(void) localtime_r(&tsec, &t);
+		(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
+		(void) printf("%s %s\n", tbuf, cmd);
+	}
+}
+
+/*ARGSUSED*/
+static void
+dump_dnode(objset_t *os, uint64_t object, void *data, size_t size)
+{
+}
+
+static uint64_t
+blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp, const zbookmark_t *zb)
+{
+	if (dnp == NULL) {
+		ASSERT(zb->zb_level < 0);
+		if (zb->zb_object == 0)
+			return (zb->zb_blkid);
+		return (zb->zb_blkid * BP_GET_LSIZE(bp));
+	}
+
+	ASSERT(zb->zb_level >= 0);
+
+	return ((zb->zb_blkid <<
+	    (zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) *
+	    dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
+}
+
+static void
+sprintf_blkptr_compact(char *blkbuf, const blkptr_t *bp)
+{
+	const dva_t *dva = bp->blk_dva;
+	int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1;
+
+	if (dump_opt['b'] >= 5) {
+		sprintf_blkptr(blkbuf, bp);
+		return;
+	}
+
+	blkbuf[0] = '\0';
+
+	for (int i = 0; i < ndvas; i++)
+		(void) sprintf(blkbuf + strlen(blkbuf), "%llu:%llx:%llx ",
+		    (u_longlong_t)DVA_GET_VDEV(&dva[i]),
+		    (u_longlong_t)DVA_GET_OFFSET(&dva[i]),
+		    (u_longlong_t)DVA_GET_ASIZE(&dva[i]));
+
+	(void) sprintf(blkbuf + strlen(blkbuf),
+	    "%llxL/%llxP F=%llu B=%llu/%llu",
+	    (u_longlong_t)BP_GET_LSIZE(bp),
+	    (u_longlong_t)BP_GET_PSIZE(bp),
+	    (u_longlong_t)bp->blk_fill,
+	    (u_longlong_t)bp->blk_birth,
+	    (u_longlong_t)BP_PHYSICAL_BIRTH(bp));
+}
+
+static void
+print_indirect(blkptr_t *bp, const zbookmark_t *zb,
+    const dnode_phys_t *dnp)
+{
+	char blkbuf[BP_SPRINTF_LEN];
+	int l;
+
+	ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
+	ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
+
+	(void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
+
+	ASSERT(zb->zb_level >= 0);
+
+	for (l = dnp->dn_nlevels - 1; l >= -1; l--) {
+		if (l == zb->zb_level) {
+			(void) printf("L%llx", (u_longlong_t)zb->zb_level);
+		} else {
+			(void) printf(" ");
+		}
+	}
+
+	sprintf_blkptr_compact(blkbuf, bp);
+	(void) printf("%s\n", blkbuf);
+}
+
+static int
+visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
+    blkptr_t *bp, const zbookmark_t *zb)
+{
+	int err = 0;
+
+	if (bp->blk_birth == 0)
+		return (0);
+
+	print_indirect(bp, zb, dnp);
+
+	if (BP_GET_LEVEL(bp) > 0) {
+		uint32_t flags = ARC_WAIT;
+		int i;
+		blkptr_t *cbp;
+		int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
+		arc_buf_t *buf;
+		uint64_t fill = 0;
+
+		err = arc_read_nolock(NULL, spa, bp, arc_getbuf_func, &buf,
+		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
+		if (err)
+			return (err);
+		ASSERT(buf->b_data);
+
+		/* recursively visit blocks below this */
+		cbp = buf->b_data;
+		for (i = 0; i < epb; i++, cbp++) {
+			zbookmark_t czb;
+
+			SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
+			    zb->zb_level - 1,
+			    zb->zb_blkid * epb + i);
+			err = visit_indirect(spa, dnp, cbp, &czb);
+			if (err)
+				break;
+			fill += cbp->blk_fill;
+		}
+		if (!err)
+			ASSERT3U(fill, ==, bp->blk_fill);
+		(void) arc_buf_remove_ref(buf, &buf);
+	}
+
+	return (err);
+}
+
+/*ARGSUSED*/
+static void
+dump_indirect(dnode_t *dn)
+{
+	dnode_phys_t *dnp = dn->dn_phys;
+	int j;
+	zbookmark_t czb;
+
+	(void) printf("Indirect blocks:\n");
+
+	SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset),
+	    dn->dn_object, dnp->dn_nlevels - 1, 0);
+	for (j = 0; j < dnp->dn_nblkptr; j++) {
+		czb.zb_blkid = j;
+		(void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp,
+		    &dnp->dn_blkptr[j], &czb);
+	}
+
+	(void) printf("\n");
+}
+
+/*ARGSUSED*/
+static void
+dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size)
+{
+	dsl_dir_phys_t *dd = data;
+	time_t crtime;
+	char nice[32];
+
+	if (dd == NULL)
+		return;
+
+	ASSERT3U(size, >=, sizeof (dsl_dir_phys_t));
+
+	crtime = dd->dd_creation_time;
+	(void) printf("\t\tcreation_time = %s", ctime(&crtime));
+	(void) printf("\t\thead_dataset_obj = %llu\n",
+	    (u_longlong_t)dd->dd_head_dataset_obj);
+	(void) printf("\t\tparent_dir_obj = %llu\n",
+	    (u_longlong_t)dd->dd_parent_obj);
+	(void) printf("\t\torigin_obj = %llu\n",
+	    (u_longlong_t)dd->dd_origin_obj);
+	(void) printf("\t\tchild_dir_zapobj = %llu\n",
+	    (u_longlong_t)dd->dd_child_dir_zapobj);
+	zdb_nicenum(dd->dd_used_bytes, nice);
+	(void) printf("\t\tused_bytes = %s\n", nice);
+	zdb_nicenum(dd->dd_compressed_bytes, nice);
+	(void) printf("\t\tcompressed_bytes = %s\n", nice);
+	zdb_nicenum(dd->dd_uncompressed_bytes, nice);
+	(void) printf("\t\tuncompressed_bytes = %s\n", nice);
+	zdb_nicenum(dd->dd_quota, nice);
+	(void) printf("\t\tquota = %s\n", nice);
+	zdb_nicenum(dd->dd_reserved, nice);
+	(void) printf("\t\treserved = %s\n", nice);
+	(void) printf("\t\tprops_zapobj = %llu\n",
+	    (u_longlong_t)dd->dd_props_zapobj);
+	(void) printf("\t\tdeleg_zapobj = %llu\n",
+	    (u_longlong_t)dd->dd_deleg_zapobj);
+	(void) printf("\t\tflags = %llx\n",
+	    (u_longlong_t)dd->dd_flags);
+
+#define	DO(which) \
+	zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice); \
+	(void) printf("\t\tused_breakdown[" #which "] = %s\n", nice)
+	DO(HEAD);
+	DO(SNAP);
+	DO(CHILD);
+	DO(CHILD_RSRV);
+	DO(REFRSRV);
+#undef DO
+}
+
+/*ARGSUSED*/
+static void
+dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size)
+{
+	dsl_dataset_phys_t *ds = data;
+	time_t crtime;
+	char used[32], compressed[32], uncompressed[32], unique[32];
+	char blkbuf[BP_SPRINTF_LEN];
+
+	if (ds == NULL)
+		return;
+
+	ASSERT(size == sizeof (*ds));
+	crtime = ds->ds_creation_time;
+	zdb_nicenum(ds->ds_referenced_bytes, used);
+	zdb_nicenum(ds->ds_compressed_bytes, compressed);
+	zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed);
+	zdb_nicenum(ds->ds_unique_bytes, unique);
+	sprintf_blkptr(blkbuf, &ds->ds_bp);
+
+	(void) printf("\t\tdir_obj = %llu\n",
+	    (u_longlong_t)ds->ds_dir_obj);
+	(void) printf("\t\tprev_snap_obj = %llu\n",
+	    (u_longlong_t)ds->ds_prev_snap_obj);
+	(void) printf("\t\tprev_snap_txg = %llu\n",
+	    (u_longlong_t)ds->ds_prev_snap_txg);
+	(void) printf("\t\tnext_snap_obj = %llu\n",
+	    (u_longlong_t)ds->ds_next_snap_obj);
+	(void) printf("\t\tsnapnames_zapobj = %llu\n",
+	    (u_longlong_t)ds->ds_snapnames_zapobj);
+	(void) printf("\t\tnum_children = %llu\n",
+	    (u_longlong_t)ds->ds_num_children);
+	(void) printf("\t\tuserrefs_obj = %llu\n",
+	    (u_longlong_t)ds->ds_userrefs_obj);
+	(void) printf("\t\tcreation_time = %s", ctime(&crtime));
+	(void) printf("\t\tcreation_txg = %llu\n",
+	    (u_longlong_t)ds->ds_creation_txg);
+	(void) printf("\t\tdeadlist_obj = %llu\n",
+	    (u_longlong_t)ds->ds_deadlist_obj);
+	(void) printf("\t\tused_bytes = %s\n", used);
+	(void) printf("\t\tcompressed_bytes = %s\n", compressed);
+	(void) printf("\t\tuncompressed_bytes = %s\n", uncompressed);
+	(void) printf("\t\tunique = %s\n", unique);
+	(void) printf("\t\tfsid_guid = %llu\n",
+	    (u_longlong_t)ds->ds_fsid_guid);
+	(void) printf("\t\tguid = %llu\n",
+	    (u_longlong_t)ds->ds_guid);
+	(void) printf("\t\tflags = %llx\n",
+	    (u_longlong_t)ds->ds_flags);
+	(void) printf("\t\tnext_clones_obj = %llu\n",
+	    (u_longlong_t)ds->ds_next_clones_obj);
+	(void) printf("\t\tprops_obj = %llu\n",
+	    (u_longlong_t)ds->ds_props_obj);
+	(void) printf("\t\tbp = %s\n", blkbuf);
+}
+
+/* ARGSUSED */
+static int
+dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
+{
+	char blkbuf[BP_SPRINTF_LEN];
+
+	if (bp->blk_birth != 0) {
+		sprintf_blkptr(blkbuf, bp);
+		(void) printf("\t%s\n", blkbuf);
+	}
+	return (0);
+}
+
+static void
+dump_bptree(objset_t *os, uint64_t obj, char *name)
+{
+	char bytes[32];
+	bptree_phys_t *bt;
+	dmu_buf_t *db;
+
+	if (dump_opt['d'] < 3)
+		return;
+
+	VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
+	bt = db->db_data;
+	zdb_nicenum(bt->bt_bytes, bytes);
+	(void) printf("\n    %s: %llu datasets, %s\n",
+	    name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes);
+	dmu_buf_rele(db, FTAG);
+
+	if (dump_opt['d'] < 5)
+		return;
+
+	(void) printf("\n");
+
+	(void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL);
+}
+
+/* ARGSUSED */
+static int
+dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
+{
+	char blkbuf[BP_SPRINTF_LEN];
+
+	ASSERT(bp->blk_birth != 0);
+	sprintf_blkptr_compact(blkbuf, bp);
+	(void) printf("\t%s\n", blkbuf);
+	return (0);
+}
+
+static void
+dump_bpobj(bpobj_t *bpo, char *name)
+{
+	char bytes[32];
+	char comp[32];
+	char uncomp[32];
+
+	if (dump_opt['d'] < 3)
+		return;
+
+	zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes);
+	if (bpo->bpo_havesubobj) {
+		zdb_nicenum(bpo->bpo_phys->bpo_comp, comp);
+		zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp);
+		(void) printf("\n    %s: %llu local blkptrs, %llu subobjs, "
+		    "%s (%s/%s comp)\n",
+		    name, (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
+		    (u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
+		    bytes, comp, uncomp);
+	} else {
+		(void) printf("\n    %s: %llu blkptrs, %s\n",
+		    name, (u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs, bytes);
+	}
+
+	if (dump_opt['d'] < 5)
+		return;
+
+	(void) printf("\n");
+
+	(void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
+}
+
+static void
+dump_deadlist(dsl_deadlist_t *dl)
+{
+	dsl_deadlist_entry_t *dle;
+	char bytes[32];
+	char comp[32];
+	char uncomp[32];
+
+	if (dump_opt['d'] < 3)
+		return;
+
+	zdb_nicenum(dl->dl_phys->dl_used, bytes);
+	zdb_nicenum(dl->dl_phys->dl_comp, comp);
+	zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp);
+	(void) printf("\n    Deadlist: %s (%s/%s comp)\n",
+	    bytes, comp, uncomp);
+
+	if (dump_opt['d'] < 4)
+		return;
+
+	(void) printf("\n");
+
+	for (dle = avl_first(&dl->dl_tree); dle;
+	    dle = AVL_NEXT(&dl->dl_tree, dle)) {
+		(void) printf("      mintxg %llu -> obj %llu\n",
+		    (longlong_t)dle->dle_mintxg,
+		    (longlong_t)dle->dle_bpobj.bpo_object);
+
+		if (dump_opt['d'] >= 5)
+			dump_bpobj(&dle->dle_bpobj, "");
+	}
+}
+
+static avl_tree_t idx_tree;
+static avl_tree_t domain_tree;
+static boolean_t fuid_table_loaded;
+static boolean_t sa_loaded;
+sa_attr_type_t *sa_attr_table;
+
+static void
+fuid_table_destroy()
+{
+	if (fuid_table_loaded) {
+		zfs_fuid_table_destroy(&idx_tree, &domain_tree);
+		fuid_table_loaded = B_FALSE;
+	}
+}
+
+/*
+ * print uid or gid information.
+ * For normal POSIX id just the id is printed in decimal format.
+ * For CIFS files with FUID the fuid is printed in hex followed by
+ * the doman-rid string.
+ */
+static void
+print_idstr(uint64_t id, const char *id_type)
+{
+	if (FUID_INDEX(id)) {
+		char *domain;
+
+		domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id));
+		(void) printf("\t%s     %llx [%s-%d]\n", id_type,
+		    (u_longlong_t)id, domain, (int)FUID_RID(id));
+	} else {
+		(void) printf("\t%s     %llu\n", id_type, (u_longlong_t)id);
+	}
+
+}
+
+static void
+dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
+{
+	uint32_t uid_idx, gid_idx;
+
+	uid_idx = FUID_INDEX(uid);
+	gid_idx = FUID_INDEX(gid);
+
+	/* Load domain table, if not already loaded */
+	if (!fuid_table_loaded && (uid_idx || gid_idx)) {
+		uint64_t fuid_obj;
+
+		/* first find the fuid object.  It lives in the master node */
+		VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
+		    8, 1, &fuid_obj) == 0);
+		zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
+		(void) zfs_fuid_table_load(os, fuid_obj,
+		    &idx_tree, &domain_tree);
+		fuid_table_loaded = B_TRUE;
+	}
+
+	print_idstr(uid, "uid");
+	print_idstr(gid, "gid");
+}
+
+/*ARGSUSED*/
+static void
+dump_znode(objset_t *os, uint64_t object, void *data, size_t size)
+{
+	char path[MAXPATHLEN * 2];	/* allow for xattr and failure prefix */
+	sa_handle_t *hdl;
+	uint64_t xattr, rdev, gen;
+	uint64_t uid, gid, mode, fsize, parent, links;
+	uint64_t pflags;
+	uint64_t acctm[2], modtm[2], chgtm[2], crtm[2];
+	time_t z_crtime, z_atime, z_mtime, z_ctime;
+	sa_bulk_attr_t bulk[12];
+	int idx = 0;
+	int error;
+
+	if (!sa_loaded) {
+		uint64_t sa_attrs = 0;
+		uint64_t version;
+
+		VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZPL_VERSION_STR,
+		    8, 1, &version) == 0);
+		if (version >= ZPL_VERSION_SA) {
+			VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS,
+			    8, 1, &sa_attrs) == 0);
+		}
+		if ((error = sa_setup(os, sa_attrs, zfs_attr_table,
+		    ZPL_END, &sa_attr_table)) != 0) {
+			(void) printf("sa_setup failed errno %d, can't "
+			    "display znode contents\n", error);
+			return;
+		}
+		sa_loaded = B_TRUE;
+	}
+
+	if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) {
+		(void) printf("Failed to get handle for SA znode\n");
+		return;
+	}
+
+	SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8);
+	SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8);
+	SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL,
+	    &links, 8);
+	SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8);
+	SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL,
+	    &mode, 8);
+	SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT],
+	    NULL, &parent, 8);
+	SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL,
+	    &fsize, 8);
+	SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL,
+	    acctm, 16);
+	SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL,
+	    modtm, 16);
+	SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL,
+	    crtm, 16);
+	SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL,
+	    chgtm, 16);
+	SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL,
+	    &pflags, 8);
+
+	if (sa_bulk_lookup(hdl, bulk, idx)) {
+		(void) sa_handle_destroy(hdl);
+		return;
+	}
+
+	error = zfs_obj_to_path(os, object, path, sizeof (path));
+	if (error != 0) {
+		(void) snprintf(path, sizeof (path), "\?\?\?<object#%llu>",
+		    (u_longlong_t)object);
+	}
+	if (dump_opt['d'] < 3) {
+		(void) printf("\t%s\n", path);
+		(void) sa_handle_destroy(hdl);
+		return;
+	}
+
+	z_crtime = (time_t)crtm[0];
+	z_atime = (time_t)acctm[0];
+	z_mtime = (time_t)modtm[0];
+	z_ctime = (time_t)chgtm[0];
+
+	(void) printf("\tpath	%s\n", path);
+	dump_uidgid(os, uid, gid);
+	(void) printf("\tatime	%s", ctime(&z_atime));
+	(void) printf("\tmtime	%s", ctime(&z_mtime));
+	(void) printf("\tctime	%s", ctime(&z_ctime));
+	(void) printf("\tcrtime	%s", ctime(&z_crtime));
+	(void) printf("\tgen	%llu\n", (u_longlong_t)gen);
+	(void) printf("\tmode	%llo\n", (u_longlong_t)mode);
+	(void) printf("\tsize	%llu\n", (u_longlong_t)fsize);
+	(void) printf("\tparent	%llu\n", (u_longlong_t)parent);
+	(void) printf("\tlinks	%llu\n", (u_longlong_t)links);
+	(void) printf("\tpflags	%llx\n", (u_longlong_t)pflags);
+	if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr,
+	    sizeof (uint64_t)) == 0)
+		(void) printf("\txattr	%llu\n", (u_longlong_t)xattr);
+	if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev,
+	    sizeof (uint64_t)) == 0)
+		(void) printf("\trdev	0x%016llx\n", (u_longlong_t)rdev);
+	sa_handle_destroy(hdl);
+}
+
+/*ARGSUSED*/
+static void
+dump_acl(objset_t *os, uint64_t object, void *data, size_t size)
+{
+}
+
+/*ARGSUSED*/
+static void
+dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size)
+{
+}
+
+static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
+	dump_none,		/* unallocated			*/
+	dump_zap,		/* object directory		*/
+	dump_uint64,		/* object array			*/
+	dump_none,		/* packed nvlist		*/
+	dump_packed_nvlist,	/* packed nvlist size		*/
+	dump_none,		/* bplist			*/
+	dump_none,		/* bplist header		*/
+	dump_none,		/* SPA space map header		*/
+	dump_none,		/* SPA space map		*/
+	dump_none,		/* ZIL intent log		*/
+	dump_dnode,		/* DMU dnode			*/
+	dump_dmu_objset,	/* DMU objset			*/
+	dump_dsl_dir,		/* DSL directory		*/
+	dump_zap,		/* DSL directory child map	*/
+	dump_zap,		/* DSL dataset snap map		*/
+	dump_zap,		/* DSL props			*/
+	dump_dsl_dataset,	/* DSL dataset			*/
+	dump_znode,		/* ZFS znode			*/
+	dump_acl,		/* ZFS V0 ACL			*/
+	dump_uint8,		/* ZFS plain file		*/
+	dump_zpldir,		/* ZFS directory		*/
+	dump_zap,		/* ZFS master node		*/
+	dump_zap,		/* ZFS delete queue		*/
+	dump_uint8,		/* zvol object			*/
+	dump_zap,		/* zvol prop			*/
+	dump_uint8,		/* other uint8[]		*/
+	dump_uint64,		/* other uint64[]		*/
+	dump_zap,		/* other ZAP			*/
+	dump_zap,		/* persistent error log		*/
+	dump_uint8,		/* SPA history			*/
+	dump_uint64,		/* SPA history offsets		*/
+	dump_zap,		/* Pool properties		*/
+	dump_zap,		/* DSL permissions		*/
+	dump_acl,		/* ZFS ACL			*/
+	dump_uint8,		/* ZFS SYSACL			*/
+	dump_none,		/* FUID nvlist			*/
+	dump_packed_nvlist,	/* FUID nvlist size		*/
+	dump_zap,		/* DSL dataset next clones	*/
+	dump_zap,		/* DSL scrub queue		*/
+	dump_zap,		/* ZFS user/group used		*/
+	dump_zap,		/* ZFS user/group quota		*/
+	dump_zap,		/* snapshot refcount tags	*/
+	dump_ddt_zap,		/* DDT ZAP object		*/
+	dump_zap,		/* DDT statistics		*/
+	dump_znode,		/* SA object			*/
+	dump_zap,		/* SA Master Node		*/
+	dump_sa_attrs,		/* SA attribute registration	*/
+	dump_sa_layouts,	/* SA attribute layouts		*/
+	dump_zap,		/* DSL scrub translations	*/
+	dump_none,		/* fake dedup BP		*/
+	dump_zap,		/* deadlist			*/
+	dump_none,		/* deadlist hdr			*/
+	dump_zap,		/* dsl clones			*/
+	dump_none,		/* bpobj subobjs		*/
+	dump_unknown,		/* Unknown type, must be last	*/
+};
+
+static void
+dump_object(objset_t *os, uint64_t object, int verbosity, int *print_header)
+{
+	dmu_buf_t *db = NULL;
+	dmu_object_info_t doi;
+	dnode_t *dn;
+	void *bonus = NULL;
+	size_t bsize = 0;
+	char iblk[32], dblk[32], lsize[32], asize[32], fill[32];
+	char bonus_size[32];
+	char aux[50];
+	int error;
+
+	if (*print_header) {
+		(void) printf("\n%10s  %3s  %5s  %5s  %5s  %5s  %6s  %s\n",
+		    "Object", "lvl", "iblk", "dblk", "dsize", "lsize",
+		    "%full", "type");
+		*print_header = 0;
+	}
+
+	if (object == 0) {
+		dn = DMU_META_DNODE(os);
+	} else {
+		error = dmu_bonus_hold(os, object, FTAG, &db);
+		if (error)
+			fatal("dmu_bonus_hold(%llu) failed, errno %u",
+			    object, error);
+		bonus = db->db_data;
+		bsize = db->db_size;
+		dn = DB_DNODE((dmu_buf_impl_t *)db);
+	}
+	dmu_object_info_from_dnode(dn, &doi);
+
+	zdb_nicenum(doi.doi_metadata_block_size, iblk);
+	zdb_nicenum(doi.doi_data_block_size, dblk);
+	zdb_nicenum(doi.doi_max_offset, lsize);
+	zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize);
+	zdb_nicenum(doi.doi_bonus_size, bonus_size);
+	(void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count *
+	    doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) /
+	    doi.doi_max_offset);
+
+	aux[0] = '\0';
+
+	if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) {
+		(void) snprintf(aux + strlen(aux), sizeof (aux), " (K=%s)",
+		    ZDB_CHECKSUM_NAME(doi.doi_checksum));
+	}
+
+	if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) {
+		(void) snprintf(aux + strlen(aux), sizeof (aux), " (Z=%s)",
+		    ZDB_COMPRESS_NAME(doi.doi_compress));
+	}
+
+	(void) printf("%10lld  %3u  %5s  %5s  %5s  %5s  %6s  %s%s\n",
+	    (u_longlong_t)object, doi.doi_indirection, iblk, dblk,
+	    asize, lsize, fill, ZDB_OT_NAME(doi.doi_type), aux);
+
+	if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) {
+		(void) printf("%10s  %3s  %5s  %5s  %5s  %5s  %6s  %s\n",
+		    "", "", "", "", "", bonus_size, "bonus",
+		    ZDB_OT_NAME(doi.doi_bonus_type));
+	}
+
+	if (verbosity >= 4) {
+		(void) printf("\tdnode flags: %s%s%s\n",
+		    (dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ?
+		    "USED_BYTES " : "",
+		    (dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ?
+		    "USERUSED_ACCOUNTED " : "",
+		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ?
+		    "SPILL_BLKPTR" : "");
+		(void) printf("\tdnode maxblkid: %llu\n",
+		    (longlong_t)dn->dn_phys->dn_maxblkid);
+
+		object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os, object,
+		    bonus, bsize);
+		object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object, NULL, 0);
+		*print_header = 1;
+	}
+
+	if (verbosity >= 5)
+		dump_indirect(dn);
+
+	if (verbosity >= 5) {
+		/*
+		 * Report the list of segments that comprise the object.
+		 */
+		uint64_t start = 0;
+		uint64_t end;
+		uint64_t blkfill = 1;
+		int minlvl = 1;
+
+		if (dn->dn_type == DMU_OT_DNODE) {
+			minlvl = 0;
+			blkfill = DNODES_PER_BLOCK;
+		}
+
+		for (;;) {
+			char segsize[32];
+			error = dnode_next_offset(dn,
+			    0, &start, minlvl, blkfill, 0);
+			if (error)
+				break;
+			end = start;
+			error = dnode_next_offset(dn,
+			    DNODE_FIND_HOLE, &end, minlvl, blkfill, 0);
+			zdb_nicenum(end - start, segsize);
+			(void) printf("\t\tsegment [%016llx, %016llx)"
+			    " size %5s\n", (u_longlong_t)start,
+			    (u_longlong_t)end, segsize);
+			if (error)
+				break;
+			start = end;
+		}
+	}
+
+	if (db != NULL)
+		dmu_buf_rele(db, FTAG);
+}
+
+static char *objset_types[DMU_OST_NUMTYPES] = {
+	"NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" };
+
+static void
+dump_dir(objset_t *os)
+{
+	dmu_objset_stats_t dds;
+	uint64_t object, object_count;
+	uint64_t refdbytes, usedobjs, scratch;
+	char numbuf[32];
+	char blkbuf[BP_SPRINTF_LEN + 20];
+	char osname[MAXNAMELEN];
+	char *type = "UNKNOWN";
+	int verbosity = dump_opt['d'];
+	int print_header = 1;
+	int i, error;
+
+	dmu_objset_fast_stat(os, &dds);
+
+	if (dds.dds_type < DMU_OST_NUMTYPES)
+		type = objset_types[dds.dds_type];
+
+	if (dds.dds_type == DMU_OST_META) {
+		dds.dds_creation_txg = TXG_INITIAL;
+		usedobjs = os->os_rootbp->blk_fill;
+		refdbytes = os->os_spa->spa_dsl_pool->
+		    dp_mos_dir->dd_phys->dd_used_bytes;
+	} else {
+		dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch);
+	}
+
+	ASSERT3U(usedobjs, ==, os->os_rootbp->blk_fill);
+
+	zdb_nicenum(refdbytes, numbuf);
+
+	if (verbosity >= 4) {
+		(void) sprintf(blkbuf, ", rootbp ");
+		(void) sprintf_blkptr(blkbuf + strlen(blkbuf), os->os_rootbp);
+	} else {
+		blkbuf[0] = '\0';
+	}
+
+	dmu_objset_name(os, osname);
+
+	(void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, "
+	    "%s, %llu objects%s\n",
+	    osname, type, (u_longlong_t)dmu_objset_id(os),
+	    (u_longlong_t)dds.dds_creation_txg,
+	    numbuf, (u_longlong_t)usedobjs, blkbuf);
+
+	if (zopt_objects != 0) {
+		for (i = 0; i < zopt_objects; i++)
+			dump_object(os, zopt_object[i], verbosity,
+			    &print_header);
+		(void) printf("\n");
+		return;
+	}
+
+	if (dump_opt['i'] != 0 || verbosity >= 2)
+		dump_intent_log(dmu_objset_zil(os));
+
+	if (dmu_objset_ds(os) != NULL)
+		dump_deadlist(&dmu_objset_ds(os)->ds_deadlist);
+
+	if (verbosity < 2)
+		return;
+
+	if (os->os_rootbp->blk_birth == 0)
+		return;
+
+	dump_object(os, 0, verbosity, &print_header);
+	object_count = 0;
+	if (DMU_USERUSED_DNODE(os) != NULL &&
+	    DMU_USERUSED_DNODE(os)->dn_type != 0) {
+		dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header);
+		dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header);
+	}
+
+	object = 0;
+	while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
+		dump_object(os, object, verbosity, &print_header);
+		object_count++;
+	}
+
+	ASSERT3U(object_count, ==, usedobjs);
+
+	(void) printf("\n");
+
+	if (error != ESRCH) {
+		(void) fprintf(stderr, "dmu_object_next() = %d\n", error);
+		abort();
+	}
+}
+
+static void
+dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
+{
+	time_t timestamp = ub->ub_timestamp;
+
+	(void) printf(header ? header : "");
+	(void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic);
+	(void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version);
+	(void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg);
+	(void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum);
+	(void) printf("\ttimestamp = %llu UTC = %s",
+	    (u_longlong_t)ub->ub_timestamp, asctime(localtime(&timestamp)));
+	if (dump_opt['u'] >= 3) {
+		char blkbuf[BP_SPRINTF_LEN];
+		sprintf_blkptr(blkbuf, &ub->ub_rootbp);
+		(void) printf("\trootbp = %s\n", blkbuf);
+	}
+	(void) printf(footer ? footer : "");
+}
+
+static void
+dump_config(spa_t *spa)
+{
+	dmu_buf_t *db;
+	size_t nvsize = 0;
+	int error = 0;
+
+
+	error = dmu_bonus_hold(spa->spa_meta_objset,
+	    spa->spa_config_object, FTAG, &db);
+
+	if (error == 0) {
+		nvsize = *(uint64_t *)db->db_data;
+		dmu_buf_rele(db, FTAG);
+
+		(void) printf("\nMOS Configuration:\n");
+		dump_packed_nvlist(spa->spa_meta_objset,
+		    spa->spa_config_object, (void *)&nvsize, 1);
+	} else {
+		(void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d",
+		    (u_longlong_t)spa->spa_config_object, error);
+	}
+}
+
+static void
+dump_cachefile(const char *cachefile)
+{
+	int fd;
+	struct stat64 statbuf;
+	char *buf;
+	nvlist_t *config;
+
+	if ((fd = open64(cachefile, O_RDONLY)) < 0) {
+		(void) printf("cannot open '%s': %s\n", cachefile,
+		    strerror(errno));
+		exit(1);
+	}
+
+	if (fstat64(fd, &statbuf) != 0) {
+		(void) printf("failed to stat '%s': %s\n", cachefile,
+		    strerror(errno));
+		exit(1);
+	}
+
+	if ((buf = malloc(statbuf.st_size)) == NULL) {
+		(void) fprintf(stderr, "failed to allocate %llu bytes\n",
+		    (u_longlong_t)statbuf.st_size);
+		exit(1);
+	}
+
+	if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
+		(void) fprintf(stderr, "failed to read %llu bytes\n",
+		    (u_longlong_t)statbuf.st_size);
+		exit(1);
+	}
+
+	(void) close(fd);
+
+	if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) {
+		(void) fprintf(stderr, "failed to unpack nvlist\n");
+		exit(1);
+	}
+
+	free(buf);
+
+	dump_nvlist(config, 0);
+
+	nvlist_free(config);
+}
+
+#define	ZDB_MAX_UB_HEADER_SIZE 32
+
+static void
+dump_label_uberblocks(vdev_label_t *lbl, uint64_t ashift)
+{
+	vdev_t vd;
+	vdev_t *vdp = &vd;
+	char header[ZDB_MAX_UB_HEADER_SIZE];
+
+	vd.vdev_ashift = ashift;
+	vdp->vdev_top = vdp;
+
+	for (int i = 0; i < VDEV_UBERBLOCK_COUNT(vdp); i++) {
+		uint64_t uoff = VDEV_UBERBLOCK_OFFSET(vdp, i);
+		uberblock_t *ub = (void *)((char *)lbl + uoff);
+
+		if (uberblock_verify(ub))
+			continue;
+		(void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE,
+		    "Uberblock[%d]\n", i);
+		dump_uberblock(ub, header, "");
+	}
+}
+
+static void
+dump_label(const char *dev)
+{
+	int fd;
+	vdev_label_t label;
+	char *path, *buf = label.vl_vdev_phys.vp_nvlist;
+	size_t buflen = sizeof (label.vl_vdev_phys.vp_nvlist);
+	struct stat64 statbuf;
+	uint64_t psize, ashift;
+	int len = strlen(dev) + 1;
+
+	if (strncmp(dev, "/dev/dsk/", 9) == 0) {
+		len++;
+		path = malloc(len);
+		(void) snprintf(path, len, "%s%s", "/dev/rdsk/", dev + 9);
+	} else {
+		path = strdup(dev);
+	}
+
+	if ((fd = open64(path, O_RDONLY)) < 0) {
+		(void) printf("cannot open '%s': %s\n", path, strerror(errno));
+		free(path);
+		exit(1);
+	}
+
+	if (fstat64(fd, &statbuf) != 0) {
+		(void) printf("failed to stat '%s': %s\n", path,
+		    strerror(errno));
+		free(path);
+		(void) close(fd);
+		exit(1);
+	}
+
+	if (S_ISBLK(statbuf.st_mode)) {
+		(void) printf("cannot use '%s': character device required\n",
+		    path);
+		free(path);
+		(void) close(fd);
+		exit(1);
+	}
+
+	psize = statbuf.st_size;
+	psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t));
+
+	for (int l = 0; l < VDEV_LABELS; l++) {
+		nvlist_t *config = NULL;
+
+		(void) printf("--------------------------------------------\n");
+		(void) printf("LABEL %d\n", l);
+		(void) printf("--------------------------------------------\n");
+
+		if (pread64(fd, &label, sizeof (label),
+		    vdev_label_offset(psize, l, 0)) != sizeof (label)) {
+			(void) printf("failed to read label %d\n", l);
+			continue;
+		}
+
+		if (nvlist_unpack(buf, buflen, &config, 0) != 0) {
+			(void) printf("failed to unpack label %d\n", l);
+			ashift = SPA_MINBLOCKSHIFT;
+		} else {
+			nvlist_t *vdev_tree = NULL;
+
+			dump_nvlist(config, 4);
+			if ((nvlist_lookup_nvlist(config,
+			    ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) ||
+			    (nvlist_lookup_uint64(vdev_tree,
+			    ZPOOL_CONFIG_ASHIFT, &ashift) != 0))
+				ashift = SPA_MINBLOCKSHIFT;
+			nvlist_free(config);
+		}
+		if (dump_opt['u'])
+			dump_label_uberblocks(&label, ashift);
+	}
+
+	free(path);
+	(void) close(fd);
+}
+
+/*ARGSUSED*/
+static int
+dump_one_dir(const char *dsname, void *arg)
+{
+	int error;
+	objset_t *os;
+
+	error = dmu_objset_own(dsname, DMU_OST_ANY, B_TRUE, FTAG, &os);
+	if (error) {
+		(void) printf("Could not open %s, error %d\n", dsname, error);
+		return (0);
+	}
+	dump_dir(os);
+	dmu_objset_disown(os, FTAG);
+	fuid_table_destroy();
+	sa_loaded = B_FALSE;
+	return (0);
+}
+
+/*
+ * Block statistics.
+ */
+typedef struct zdb_blkstats {
+	uint64_t	zb_asize;
+	uint64_t	zb_lsize;
+	uint64_t	zb_psize;
+	uint64_t	zb_count;
+} zdb_blkstats_t;
+
+/*
+ * Extended object types to report deferred frees and dedup auto-ditto blocks.
+ */
+#define	ZDB_OT_DEFERRED	(DMU_OT_NUMTYPES + 0)
+#define	ZDB_OT_DITTO	(DMU_OT_NUMTYPES + 1)
+#define	ZDB_OT_OTHER	(DMU_OT_NUMTYPES + 2)
+#define	ZDB_OT_TOTAL	(DMU_OT_NUMTYPES + 3)
+
+static char *zdb_ot_extname[] = {
+	"deferred free",
+	"dedup ditto",
+	"other",
+	"Total",
+};
+
+#define	ZB_TOTAL	DN_MAX_LEVELS
+
+typedef struct zdb_cb {
+	zdb_blkstats_t	zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1];
+	uint64_t	zcb_dedup_asize;
+	uint64_t	zcb_dedup_blocks;
+	uint64_t	zcb_errors[256];
+	int		zcb_readfails;
+	int		zcb_haderrors;
+	spa_t		*zcb_spa;
+} zdb_cb_t;
+
+static void
+zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
+    dmu_object_type_t type)
+{
+	uint64_t refcnt = 0;
+
+	ASSERT(type < ZDB_OT_TOTAL);
+
+	if (zilog && zil_bp_tree_add(zilog, bp) != 0)
+		return;
+
+	for (int i = 0; i < 4; i++) {
+		int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL;
+		int t = (i & 1) ? type : ZDB_OT_TOTAL;
+		zdb_blkstats_t *zb = &zcb->zcb_type[l][t];
+
+		zb->zb_asize += BP_GET_ASIZE(bp);
+		zb->zb_lsize += BP_GET_LSIZE(bp);
+		zb->zb_psize += BP_GET_PSIZE(bp);
+		zb->zb_count++;
+	}
+
+	if (dump_opt['L'])
+		return;
+
+	if (BP_GET_DEDUP(bp)) {
+		ddt_t *ddt;
+		ddt_entry_t *dde;
+
+		ddt = ddt_select(zcb->zcb_spa, bp);
+		ddt_enter(ddt);
+		dde = ddt_lookup(ddt, bp, B_FALSE);
+
+		if (dde == NULL) {
+			refcnt = 0;
+		} else {
+			ddt_phys_t *ddp = ddt_phys_select(dde, bp);
+			ddt_phys_decref(ddp);
+			refcnt = ddp->ddp_refcnt;
+			if (ddt_phys_total_refcnt(dde) == 0)
+				ddt_remove(ddt, dde);
+		}
+		ddt_exit(ddt);
+	}
+
+	VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa,
+	    refcnt ? 0 : spa_first_txg(zcb->zcb_spa),
+	    bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0);
+}
+
+/* ARGSUSED */
+static int
+zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
+    const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
+{
+	zdb_cb_t *zcb = arg;
+	char blkbuf[BP_SPRINTF_LEN];
+	dmu_object_type_t type;
+	boolean_t is_metadata;
+
+	if (bp == NULL)
+		return (0);
+
+	type = BP_GET_TYPE(bp);
+
+	zdb_count_block(zcb, zilog, bp,
+	    (type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type);
+
+	is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type));
+
+	if (dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata)) {
+		int ioerr;
+		size_t size = BP_GET_PSIZE(bp);
+		void *data = malloc(size);
+		int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW;
+
+		/* If it's an intent log block, failure is expected. */
+		if (zb->zb_level == ZB_ZIL_LEVEL)
+			flags |= ZIO_FLAG_SPECULATIVE;
+
+		ioerr = zio_wait(zio_read(NULL, spa, bp, data, size,
+		    NULL, NULL, ZIO_PRIORITY_ASYNC_READ, flags, zb));
+
+		free(data);
+
+		if (ioerr && !(flags & ZIO_FLAG_SPECULATIVE)) {
+			zcb->zcb_haderrors = 1;
+			zcb->zcb_errors[ioerr]++;
+
+			if (dump_opt['b'] >= 2)
+				sprintf_blkptr(blkbuf, bp);
+			else
+				blkbuf[0] = '\0';
+
+			(void) printf("zdb_blkptr_cb: "
+			    "Got error %d reading "
+			    "<%llu, %llu, %lld, %llx> %s -- skipping\n",
+			    ioerr,
+			    (u_longlong_t)zb->zb_objset,
+			    (u_longlong_t)zb->zb_object,
+			    (u_longlong_t)zb->zb_level,
+			    (u_longlong_t)zb->zb_blkid,
+			    blkbuf);
+		}
+	}
+
+	zcb->zcb_readfails = 0;
+
+	if (dump_opt['b'] >= 4) {
+		sprintf_blkptr(blkbuf, bp);
+		(void) printf("objset %llu object %llu "
+		    "level %lld offset 0x%llx %s\n",
+		    (u_longlong_t)zb->zb_objset,
+		    (u_longlong_t)zb->zb_object,
+		    (longlong_t)zb->zb_level,
+		    (u_longlong_t)blkid2offset(dnp, bp, zb),
+		    blkbuf);
+	}
+
+	return (0);
+}
+
+static void
+zdb_leak(space_map_t *sm, uint64_t start, uint64_t size)
+{
+	vdev_t *vd = sm->sm_ppd;
+
+	(void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
+	    (u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size);
+}
+
+/* ARGSUSED */
+static void
+zdb_space_map_load(space_map_t *sm)
+{
+}
+
+static void
+zdb_space_map_unload(space_map_t *sm)
+{
+	space_map_vacate(sm, zdb_leak, sm);
+}
+
+/* ARGSUSED */
+static void
+zdb_space_map_claim(space_map_t *sm, uint64_t start, uint64_t size)
+{
+}
+
+static space_map_ops_t zdb_space_map_ops = {
+	zdb_space_map_load,
+	zdb_space_map_unload,
+	NULL,	/* alloc */
+	zdb_space_map_claim,
+	NULL,	/* free */
+	NULL	/* maxsize */
+};
+
+static void
+zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb)
+{
+	ddt_bookmark_t ddb = { 0 };
+	ddt_entry_t dde;
+	int error;
+
+	while ((error = ddt_walk(spa, &ddb, &dde)) == 0) {
+		blkptr_t blk;
+		ddt_phys_t *ddp = dde.dde_phys;
+
+		if (ddb.ddb_class == DDT_CLASS_UNIQUE)
+			return;
+
+		ASSERT(ddt_phys_total_refcnt(&dde) > 1);
+
+		for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
+			if (ddp->ddp_phys_birth == 0)
+				continue;
+			ddt_bp_create(ddb.ddb_checksum,
+			    &dde.dde_key, ddp, &blk);
+			if (p == DDT_PHYS_DITTO) {
+				zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO);
+			} else {
+				zcb->zcb_dedup_asize +=
+				    BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1);
+				zcb->zcb_dedup_blocks++;
+			}
+		}
+		if (!dump_opt['L']) {
+			ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum];
+			ddt_enter(ddt);
+			VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL);
+			ddt_exit(ddt);
+		}
+	}
+
+	ASSERT(error == ENOENT);
+}
+
+static void
+zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
+{
+	zcb->zcb_spa = spa;
+
+	if (!dump_opt['L']) {
+		vdev_t *rvd = spa->spa_root_vdev;
+		for (int c = 0; c < rvd->vdev_children; c++) {
+			vdev_t *vd = rvd->vdev_child[c];
+			for (int m = 0; m < vd->vdev_ms_count; m++) {
+				metaslab_t *msp = vd->vdev_ms[m];
+				mutex_enter(&msp->ms_lock);
+				space_map_unload(&msp->ms_map);
+				VERIFY(space_map_load(&msp->ms_map,
+				    &zdb_space_map_ops, SM_ALLOC, &msp->ms_smo,
+				    spa->spa_meta_objset) == 0);
+				msp->ms_map.sm_ppd = vd;
+				mutex_exit(&msp->ms_lock);
+			}
+		}
+	}
+
+	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+
+	zdb_ddt_leak_init(spa, zcb);
+
+	spa_config_exit(spa, SCL_CONFIG, FTAG);
+}
+
+static void
+zdb_leak_fini(spa_t *spa)
+{
+	if (!dump_opt['L']) {
+		vdev_t *rvd = spa->spa_root_vdev;
+		for (int c = 0; c < rvd->vdev_children; c++) {
+			vdev_t *vd = rvd->vdev_child[c];
+			for (int m = 0; m < vd->vdev_ms_count; m++) {
+				metaslab_t *msp = vd->vdev_ms[m];
+				mutex_enter(&msp->ms_lock);
+				space_map_unload(&msp->ms_map);
+				mutex_exit(&msp->ms_lock);
+			}
+		}
+	}
+}
+
+/* ARGSUSED */
+static int
+count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
+{
+	zdb_cb_t *zcb = arg;
+
+	if (dump_opt['b'] >= 4) {
+		char blkbuf[BP_SPRINTF_LEN];
+		sprintf_blkptr(blkbuf, bp);
+		(void) printf("[%s] %s\n",
+		    "deferred free", blkbuf);
+	}
+	zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED);
+	return (0);
+}
+
+static int
+dump_block_stats(spa_t *spa)
+{
+	zdb_cb_t zcb = { 0 };
+	zdb_blkstats_t *zb, *tzb;
+	uint64_t norm_alloc, norm_space, total_alloc, total_found;
+	int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA | TRAVERSE_HARD;
+	int leaks = 0;
+
+	(void) printf("\nTraversing all blocks %s%s%s%s%s...\n",
+	    (dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
+	    (dump_opt['c'] == 1) ? "metadata " : "",
+	    dump_opt['c'] ? "checksums " : "",
+	    (dump_opt['c'] && !dump_opt['L']) ? "and verify " : "",
+	    !dump_opt['L'] ? "nothing leaked " : "");
+
+	/*
+	 * Load all space maps as SM_ALLOC maps, then traverse the pool
+	 * claiming each block we discover.  If the pool is perfectly
+	 * consistent, the space maps will be empty when we're done.
+	 * Anything left over is a leak; any block we can't claim (because
+	 * it's not part of any space map) is a double allocation,
+	 * reference to a freed block, or an unclaimed log block.
+	 */
+	zdb_leak_init(spa, &zcb);
+
+	/*
+	 * If there's a deferred-free bplist, process that first.
+	 */
+	(void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj,
+	    count_block_cb, &zcb, NULL);
+	if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
+		(void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj,
+		    count_block_cb, &zcb, NULL);
+	}
+	if (spa_feature_is_active(spa,
+	    &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
+		VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset,
+		    spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb,
+		    &zcb, NULL));
+	}
+
+	if (dump_opt['c'] > 1)
+		flags |= TRAVERSE_PREFETCH_DATA;
+
+	zcb.zcb_haderrors |= traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
+
+	if (zcb.zcb_haderrors) {
+		(void) printf("\nError counts:\n\n");
+		(void) printf("\t%5s  %s\n", "errno", "count");
+		for (int e = 0; e < 256; e++) {
+			if (zcb.zcb_errors[e] != 0) {
+				(void) printf("\t%5d  %llu\n",
+				    e, (u_longlong_t)zcb.zcb_errors[e]);
+			}
+		}
+	}
+
+	/*
+	 * Report any leaked segments.
+	 */
+	zdb_leak_fini(spa);
+
+	tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
+
+	norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
+	norm_space = metaslab_class_get_space(spa_normal_class(spa));
+
+	total_alloc = norm_alloc + metaslab_class_get_alloc(spa_log_class(spa));
+	total_found = tzb->zb_asize - zcb.zcb_dedup_asize;
+
+	if (total_found == total_alloc) {
+		if (!dump_opt['L'])
+			(void) printf("\n\tNo leaks (block sum matches space"
+			    " maps exactly)\n");
+	} else {
+		(void) printf("block traversal size %llu != alloc %llu "
+		    "(%s %lld)\n",
+		    (u_longlong_t)total_found,
+		    (u_longlong_t)total_alloc,
+		    (dump_opt['L']) ? "unreachable" : "leaked",
+		    (longlong_t)(total_alloc - total_found));
+		leaks = 1;
+	}
+
+	if (tzb->zb_count == 0)
+		return (2);
+
+	(void) printf("\n");
+	(void) printf("\tbp count:      %10llu\n",
+	    (u_longlong_t)tzb->zb_count);
+	(void) printf("\tbp logical:    %10llu      avg: %6llu\n",
+	    (u_longlong_t)tzb->zb_lsize,
+	    (u_longlong_t)(tzb->zb_lsize / tzb->zb_count));
+	(void) printf("\tbp physical:   %10llu      avg:"
+	    " %6llu     compression: %6.2f\n",
+	    (u_longlong_t)tzb->zb_psize,
+	    (u_longlong_t)(tzb->zb_psize / tzb->zb_count),
+	    (double)tzb->zb_lsize / tzb->zb_psize);
+	(void) printf("\tbp allocated:  %10llu      avg:"
+	    " %6llu     compression: %6.2f\n",
+	    (u_longlong_t)tzb->zb_asize,
+	    (u_longlong_t)(tzb->zb_asize / tzb->zb_count),
+	    (double)tzb->zb_lsize / tzb->zb_asize);
+	(void) printf("\tbp deduped:    %10llu    ref>1:"
+	    " %6llu   deduplication: %6.2f\n",
+	    (u_longlong_t)zcb.zcb_dedup_asize,
+	    (u_longlong_t)zcb.zcb_dedup_blocks,
+	    (double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0);
+	(void) printf("\tSPA allocated: %10llu     used: %5.2f%%\n",
+	    (u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space);
+
+	if (dump_opt['b'] >= 2) {
+		int l, t, level;
+		(void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
+		    "\t  avg\t comp\t%%Total\tType\n");
+
+		for (t = 0; t <= ZDB_OT_TOTAL; t++) {
+			char csize[32], lsize[32], psize[32], asize[32];
+			char avg[32];
+			char *typename;
+
+			if (t < DMU_OT_NUMTYPES)
+				typename = dmu_ot[t].ot_name;
+			else
+				typename = zdb_ot_extname[t - DMU_OT_NUMTYPES];
+
+			if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) {
+				(void) printf("%6s\t%5s\t%5s\t%5s"
+				    "\t%5s\t%5s\t%6s\t%s\n",
+				    "-",
+				    "-",
+				    "-",
+				    "-",
+				    "-",
+				    "-",
+				    "-",
+				    typename);
+				continue;
+			}
+
+			for (l = ZB_TOTAL - 1; l >= -1; l--) {
+				level = (l == -1 ? ZB_TOTAL : l);
+				zb = &zcb.zcb_type[level][t];
+
+				if (zb->zb_asize == 0)
+					continue;
+
+				if (dump_opt['b'] < 3 && level != ZB_TOTAL)
+					continue;
+
+				if (level == 0 && zb->zb_asize ==
+				    zcb.zcb_type[ZB_TOTAL][t].zb_asize)
+					continue;
+
+				zdb_nicenum(zb->zb_count, csize);
+				zdb_nicenum(zb->zb_lsize, lsize);
+				zdb_nicenum(zb->zb_psize, psize);
+				zdb_nicenum(zb->zb_asize, asize);
+				zdb_nicenum(zb->zb_asize / zb->zb_count, avg);
+
+				(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
+				    "\t%5.2f\t%6.2f\t",
+				    csize, lsize, psize, asize, avg,
+				    (double)zb->zb_lsize / zb->zb_psize,
+				    100.0 * zb->zb_asize / tzb->zb_asize);
+
+				if (level == ZB_TOTAL)
+					(void) printf("%s\n", typename);
+				else
+					(void) printf("    L%d %s\n",
+					    level, typename);
+			}
+		}
+	}
+
+	(void) printf("\n");
+
+	if (leaks)
+		return (2);
+
+	if (zcb.zcb_haderrors)
+		return (3);
+
+	return (0);
+}
+
+typedef struct zdb_ddt_entry {
+	ddt_key_t	zdde_key;
+	uint64_t	zdde_ref_blocks;
+	uint64_t	zdde_ref_lsize;
+	uint64_t	zdde_ref_psize;
+	uint64_t	zdde_ref_dsize;
+	avl_node_t	zdde_node;
+} zdb_ddt_entry_t;
+
+/* ARGSUSED */
+static int
+zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
+    arc_buf_t *pbuf, const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
+{
+	avl_tree_t *t = arg;
+	avl_index_t where;
+	zdb_ddt_entry_t *zdde, zdde_search;
+
+	if (bp == NULL)
+		return (0);
+
+	if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) {
+		(void) printf("traversing objset %llu, %llu objects, "
+		    "%lu blocks so far\n",
+		    (u_longlong_t)zb->zb_objset,
+		    (u_longlong_t)bp->blk_fill,
+		    avl_numnodes(t));
+	}
+
+	if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF ||
+	    BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
+		return (0);
+
+	ddt_key_fill(&zdde_search.zdde_key, bp);
+
+	zdde = avl_find(t, &zdde_search, &where);
+
+	if (zdde == NULL) {
+		zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL);
+		zdde->zdde_key = zdde_search.zdde_key;
+		avl_insert(t, zdde, where);
+	}
+
+	zdde->zdde_ref_blocks += 1;
+	zdde->zdde_ref_lsize += BP_GET_LSIZE(bp);
+	zdde->zdde_ref_psize += BP_GET_PSIZE(bp);
+	zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp);
+
+	return (0);
+}
+
+static void
+dump_simulated_ddt(spa_t *spa)
+{
+	avl_tree_t t;
+	void *cookie = NULL;
+	zdb_ddt_entry_t *zdde;
+	ddt_histogram_t ddh_total = { 0 };
+	ddt_stat_t dds_total = { 0 };
+
+	avl_create(&t, ddt_entry_compare,
+	    sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
+
+	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+
+	(void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
+	    zdb_ddt_add_cb, &t);
+
+	spa_config_exit(spa, SCL_CONFIG, FTAG);
+
+	while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) {
+		ddt_stat_t dds;
+		uint64_t refcnt = zdde->zdde_ref_blocks;
+		ASSERT(refcnt != 0);
+
+		dds.dds_blocks = zdde->zdde_ref_blocks / refcnt;
+		dds.dds_lsize = zdde->zdde_ref_lsize / refcnt;
+		dds.dds_psize = zdde->zdde_ref_psize / refcnt;
+		dds.dds_dsize = zdde->zdde_ref_dsize / refcnt;
+
+		dds.dds_ref_blocks = zdde->zdde_ref_blocks;
+		dds.dds_ref_lsize = zdde->zdde_ref_lsize;
+		dds.dds_ref_psize = zdde->zdde_ref_psize;
+		dds.dds_ref_dsize = zdde->zdde_ref_dsize;
+
+		ddt_stat_add(&ddh_total.ddh_stat[highbit(refcnt) - 1], &dds, 0);
+
+		umem_free(zdde, sizeof (*zdde));
+	}
+
+	avl_destroy(&t);
+
+	ddt_histogram_stat(&dds_total, &ddh_total);
+
+	(void) printf("Simulated DDT histogram:\n");
+
+	zpool_dump_ddt(&dds_total, &ddh_total);
+
+	dump_dedup_ratio(&dds_total);
+}
+
+static void
+dump_zpool(spa_t *spa)
+{
+	dsl_pool_t *dp = spa_get_dsl(spa);
+	int rc = 0;
+
+	if (dump_opt['S']) {
+		dump_simulated_ddt(spa);
+		return;
+	}
+
+	if (!dump_opt['e'] && dump_opt['C'] > 1) {
+		(void) printf("\nCached configuration:\n");
+		dump_nvlist(spa->spa_config, 8);
+	}
+
+	if (dump_opt['C'])
+		dump_config(spa);
+
+	if (dump_opt['u'])
+		dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n");
+
+	if (dump_opt['D'])
+		dump_all_ddts(spa);
+
+	if (dump_opt['d'] > 2 || dump_opt['m'])
+		dump_metaslabs(spa);
+
+	if (dump_opt['d'] || dump_opt['i']) {
+		dump_dir(dp->dp_meta_objset);
+		if (dump_opt['d'] >= 3) {
+			dump_bpobj(&spa->spa_deferred_bpobj, "Deferred frees");
+			if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
+				dump_bpobj(&spa->spa_dsl_pool->dp_free_bpobj,
+				    "Pool snapshot frees");
+			}
+
+			if (spa_feature_is_active(spa,
+			    &spa_feature_table[SPA_FEATURE_ASYNC_DESTROY])) {
+				dump_bptree(spa->spa_meta_objset,
+				    spa->spa_dsl_pool->dp_bptree_obj,
+				    "Pool dataset frees");
+			}
+			dump_dtl(spa->spa_root_vdev, 0);
+		}
+		(void) dmu_objset_find(spa_name(spa), dump_one_dir,
+		    NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
+	}
+	if (dump_opt['b'] || dump_opt['c'])
+		rc = dump_block_stats(spa);
+
+	if (dump_opt['s'])
+		show_pool_stats(spa);
+
+	if (dump_opt['h'])
+		dump_history(spa);
+
+	if (rc != 0)
+		exit(rc);
+}
+
+#define	ZDB_FLAG_CHECKSUM	0x0001
+#define	ZDB_FLAG_DECOMPRESS	0x0002
+#define	ZDB_FLAG_BSWAP		0x0004
+#define	ZDB_FLAG_GBH		0x0008
+#define	ZDB_FLAG_INDIRECT	0x0010
+#define	ZDB_FLAG_PHYS		0x0020
+#define	ZDB_FLAG_RAW		0x0040
+#define	ZDB_FLAG_PRINT_BLKPTR	0x0080
+
+int flagbits[256];
+
+static void
+zdb_print_blkptr(blkptr_t *bp, int flags)
+{
+	char blkbuf[BP_SPRINTF_LEN];
+
+	if (flags & ZDB_FLAG_BSWAP)
+		byteswap_uint64_array((void *)bp, sizeof (blkptr_t));
+
+	sprintf_blkptr(blkbuf, bp);
+	(void) printf("%s\n", blkbuf);
+}
+
+static void
+zdb_dump_indirect(blkptr_t *bp, int nbps, int flags)
+{
+	int i;
+
+	for (i = 0; i < nbps; i++)
+		zdb_print_blkptr(&bp[i], flags);
+}
+
+static void
+zdb_dump_gbh(void *buf, int flags)
+{
+	zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags);
+}
+
+static void
+zdb_dump_block_raw(void *buf, uint64_t size, int flags)
+{
+	if (flags & ZDB_FLAG_BSWAP)
+		byteswap_uint64_array(buf, size);
+	(void) write(1, buf, size);
+}
+
+static void
+zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
+{
+	uint64_t *d = (uint64_t *)buf;
+	int nwords = size / sizeof (uint64_t);
+	int do_bswap = !!(flags & ZDB_FLAG_BSWAP);
+	int i, j;
+	char *hdr, *c;
+
+
+	if (do_bswap)
+		hdr = " 7 6 5 4 3 2 1 0   f e d c b a 9 8";
+	else
+		hdr = " 0 1 2 3 4 5 6 7   8 9 a b c d e f";
+
+	(void) printf("\n%s\n%6s   %s  0123456789abcdef\n", label, "", hdr);
+
+	for (i = 0; i < nwords; i += 2) {
+		(void) printf("%06llx:  %016llx  %016llx  ",
+		    (u_longlong_t)(i * sizeof (uint64_t)),
+		    (u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]),
+		    (u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1]));
+
+		c = (char *)&d[i];
+		for (j = 0; j < 2 * sizeof (uint64_t); j++)
+			(void) printf("%c", isprint(c[j]) ? c[j] : '.');
+		(void) printf("\n");
+	}
+}
+
+/*
+ * There are two acceptable formats:
+ *	leaf_name	  - For example: c1t0d0 or /tmp/ztest.0a
+ *	child[.child]*    - For example: 0.1.1
+ *
+ * The second form can be used to specify arbitrary vdevs anywhere
+ * in the heirarchy.  For example, in a pool with a mirror of
+ * RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
+ */
+static vdev_t *
+zdb_vdev_lookup(vdev_t *vdev, char *path)
+{
+	char *s, *p, *q;
+	int i;
+
+	if (vdev == NULL)
+		return (NULL);
+
+	/* First, assume the x.x.x.x format */
+	i = (int)strtoul(path, &s, 10);
+	if (s == path || (s && *s != '.' && *s != '\0'))
+		goto name;
+	if (i < 0 || i >= vdev->vdev_children)
+		return (NULL);
+
+	vdev = vdev->vdev_child[i];
+	if (*s == '\0')
+		return (vdev);
+	return (zdb_vdev_lookup(vdev, s+1));
+
+name:
+	for (i = 0; i < vdev->vdev_children; i++) {
+		vdev_t *vc = vdev->vdev_child[i];
+
+		if (vc->vdev_path == NULL) {
+			vc = zdb_vdev_lookup(vc, path);
+			if (vc == NULL)
+				continue;
+			else
+				return (vc);
+		}
+
+		p = strrchr(vc->vdev_path, '/');
+		p = p ? p + 1 : vc->vdev_path;
+		q = &vc->vdev_path[strlen(vc->vdev_path) - 2];
+
+		if (strcmp(vc->vdev_path, path) == 0)
+			return (vc);
+		if (strcmp(p, path) == 0)
+			return (vc);
+		if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0)
+			return (vc);
+	}
+
+	return (NULL);
+}
+
+/*
+ * Read a block from a pool and print it out.  The syntax of the
+ * block descriptor is:
+ *
+ *	pool:vdev_specifier:offset:size[:flags]
+ *
+ *	pool           - The name of the pool you wish to read from
+ *	vdev_specifier - Which vdev (see comment for zdb_vdev_lookup)
+ *	offset         - offset, in hex, in bytes
+ *	size           - Amount of data to read, in hex, in bytes
+ *	flags          - A string of characters specifying options
+ *		 b: Decode a blkptr at given offset within block
+ *		*c: Calculate and display checksums
+ *		 d: Decompress data before dumping
+ *		 e: Byteswap data before dumping
+ *		 g: Display data as a gang block header
+ *		 i: Display as an indirect block
+ *		 p: Do I/O to physical offset
+ *		 r: Dump raw data to stdout
+ *
+ *              * = not yet implemented
+ */
+static void
+zdb_read_block(char *thing, spa_t *spa)
+{
+	blkptr_t blk, *bp = &blk;
+	dva_t *dva = bp->blk_dva;
+	int flags = 0;
+	uint64_t offset = 0, size = 0, psize = 0, lsize = 0, blkptr_offset = 0;
+	zio_t *zio;
+	vdev_t *vd;
+	void *pbuf, *lbuf, *buf;
+	char *s, *p, *dup, *vdev, *flagstr;
+	int i, error;
+
+	dup = strdup(thing);
+	s = strtok(dup, ":");
+	vdev = s ? s : "";
+	s = strtok(NULL, ":");
+	offset = strtoull(s ? s : "", NULL, 16);
+	s = strtok(NULL, ":");
+	size = strtoull(s ? s : "", NULL, 16);
+	s = strtok(NULL, ":");
+	flagstr = s ? s : "";
+
+	s = NULL;
+	if (size == 0)
+		s = "size must not be zero";
+	if (!IS_P2ALIGNED(size, DEV_BSIZE))
+		s = "size must be a multiple of sector size";
+	if (!IS_P2ALIGNED(offset, DEV_BSIZE))
+		s = "offset must be a multiple of sector size";
+	if (s) {
+		(void) printf("Invalid block specifier: %s  - %s\n", thing, s);
+		free(dup);
+		return;
+	}
+
+	for (s = strtok(flagstr, ":"); s; s = strtok(NULL, ":")) {
+		for (i = 0; flagstr[i]; i++) {
+			int bit = flagbits[(uchar_t)flagstr[i]];
+
+			if (bit == 0) {
+				(void) printf("***Invalid flag: %c\n",
+				    flagstr[i]);
+				continue;
+			}
+			flags |= bit;
+
+			/* If it's not something with an argument, keep going */
+			if ((bit & (ZDB_FLAG_CHECKSUM |
+			    ZDB_FLAG_PRINT_BLKPTR)) == 0)
+				continue;
+
+			p = &flagstr[i + 1];
+			if (bit == ZDB_FLAG_PRINT_BLKPTR)
+				blkptr_offset = strtoull(p, &p, 16);
+			if (*p != ':' && *p != '\0') {
+				(void) printf("***Invalid flag arg: '%s'\n", s);
+				free(dup);
+				return;
+			}
+		}
+	}
+
+	vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev);
+	if (vd == NULL) {
+		(void) printf("***Invalid vdev: %s\n", vdev);
+		free(dup);
+		return;
+	} else {
+		if (vd->vdev_path)
+			(void) fprintf(stderr, "Found vdev: %s\n",
+			    vd->vdev_path);
+		else
+			(void) fprintf(stderr, "Found vdev type: %s\n",
+			    vd->vdev_ops->vdev_op_type);
+	}
+
+	psize = size;
+	lsize = size;
+
+	pbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
+	lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
+
+	BP_ZERO(bp);
+
+	DVA_SET_VDEV(&dva[0], vd->vdev_id);
+	DVA_SET_OFFSET(&dva[0], offset);
+	DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH));
+	DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize));
+
+	BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
+
+	BP_SET_LSIZE(bp, lsize);
+	BP_SET_PSIZE(bp, psize);
+	BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
+	BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
+	BP_SET_TYPE(bp, DMU_OT_NONE);
+	BP_SET_LEVEL(bp, 0);
+	BP_SET_DEDUP(bp, 0);
+	BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
+
+	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
+	zio = zio_root(spa, NULL, NULL, 0);
+
+	if (vd == vd->vdev_top) {
+		/*
+		 * Treat this as a normal block read.
+		 */
+		zio_nowait(zio_read(zio, spa, bp, pbuf, psize, NULL, NULL,
+		    ZIO_PRIORITY_SYNC_READ,
+		    ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL));
+	} else {
+		/*
+		 * Treat this as a vdev child I/O.
+		 */
+		zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pbuf, psize,
+		    ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ,
+		    ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE |
+		    ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
+		    ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL, NULL));
+	}
+
+	error = zio_wait(zio);
+	spa_config_exit(spa, SCL_STATE, FTAG);
+
+	if (error) {
+		(void) printf("Read of %s failed, error: %d\n", thing, error);
+		goto out;
+	}
+
+	if (flags & ZDB_FLAG_DECOMPRESS) {
+		/*
+		 * We don't know how the data was compressed, so just try
+		 * every decompress function at every inflated blocksize.
+		 */
+		enum zio_compress c;
+		void *pbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
+		void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
+
+		bcopy(pbuf, pbuf2, psize);
+
+		VERIFY(random_get_pseudo_bytes((uint8_t *)pbuf + psize,
+		    SPA_MAXBLOCKSIZE - psize) == 0);
+
+		VERIFY(random_get_pseudo_bytes((uint8_t *)pbuf2 + psize,
+		    SPA_MAXBLOCKSIZE - psize) == 0);
+
+		for (lsize = SPA_MAXBLOCKSIZE; lsize > psize;
+		    lsize -= SPA_MINBLOCKSIZE) {
+			for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) {
+				if (zio_decompress_data(c, pbuf, lbuf,
+				    psize, lsize) == 0 &&
+				    zio_decompress_data(c, pbuf2, lbuf2,
+				    psize, lsize) == 0 &&
+				    bcmp(lbuf, lbuf2, lsize) == 0)
+					break;
+			}
+			if (c != ZIO_COMPRESS_FUNCTIONS)
+				break;
+			lsize -= SPA_MINBLOCKSIZE;
+		}
+
+		umem_free(pbuf2, SPA_MAXBLOCKSIZE);
+		umem_free(lbuf2, SPA_MAXBLOCKSIZE);
+
+		if (lsize <= psize) {
+			(void) printf("Decompress of %s failed\n", thing);
+			goto out;
+		}
+		buf = lbuf;
+		size = lsize;
+	} else {
+		buf = pbuf;
+		size = psize;
+	}
+
+	if (flags & ZDB_FLAG_PRINT_BLKPTR)
+		zdb_print_blkptr((blkptr_t *)(void *)
+		    ((uintptr_t)buf + (uintptr_t)blkptr_offset), flags);
+	else if (flags & ZDB_FLAG_RAW)
+		zdb_dump_block_raw(buf, size, flags);
+	else if (flags & ZDB_FLAG_INDIRECT)
+		zdb_dump_indirect((blkptr_t *)buf, size / sizeof (blkptr_t),
+		    flags);
+	else if (flags & ZDB_FLAG_GBH)
+		zdb_dump_gbh(buf, flags);
+	else
+		zdb_dump_block(thing, buf, size, flags);
+
+out:
+	umem_free(pbuf, SPA_MAXBLOCKSIZE);
+	umem_free(lbuf, SPA_MAXBLOCKSIZE);
+	free(dup);
+}
+
+static boolean_t
+pool_match(nvlist_t *cfg, char *tgt)
+{
+	uint64_t v, guid = strtoull(tgt, NULL, 0);
+	char *s;
+
+	if (guid != 0) {
+		if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0)
+			return (v == guid);
+	} else {
+		if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0)
+			return (strcmp(s, tgt) == 0);
+	}
+	return (B_FALSE);
+}
+
+static char *
+find_zpool(char **target, nvlist_t **configp, int dirc, char **dirv)
+{
+	nvlist_t *pools;
+	nvlist_t *match = NULL;
+	char *name = NULL;
+	char *sepp = NULL;
+	char sep;
+	int count = 0;
+	importargs_t args = { 0 };
+
+	args.paths = dirc;
+	args.path = dirv;
+	args.can_be_active = B_TRUE;
+
+	if ((sepp = strpbrk(*target, "/@")) != NULL) {
+		sep = *sepp;
+		*sepp = '\0';
+	}
+
+	pools = zpool_search_import(g_zfs, &args);
+
+	if (pools != NULL) {
+		nvpair_t *elem = NULL;
+		while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
+			verify(nvpair_value_nvlist(elem, configp) == 0);
+			if (pool_match(*configp, *target)) {
+				count++;
+				if (match != NULL) {
+					/* print previously found config */
+					if (name != NULL) {
+						(void) printf("%s\n", name);
+						dump_nvlist(match, 8);
+						name = NULL;
+					}
+					(void) printf("%s\n",
+					    nvpair_name(elem));
+					dump_nvlist(*configp, 8);
+				} else {
+					match = *configp;
+					name = nvpair_name(elem);
+				}
+			}
+		}
+	}
+	if (count > 1)
+		(void) fatal("\tMatched %d pools - use pool GUID "
+		    "instead of pool name or \n"
+		    "\tpool name part of a dataset name to select pool", count);
+
+	if (sepp)
+		*sepp = sep;
+	/*
+	 * If pool GUID was specified for pool id, replace it with pool name
+	 */
+	if (name && (strstr(*target, name) != *target)) {
+		int sz = 1 + strlen(name) + ((sepp) ? strlen(sepp) : 0);
+
+		*target = umem_alloc(sz, UMEM_NOFAIL);
+		(void) snprintf(*target, sz, "%s%s", name, sepp ? sepp : "");
+	}
+
+	*configp = name ? match : NULL;
+
+	return (name);
+}
+
+int
+main(int argc, char **argv)
+{
+	int i, c;
+	struct rlimit rl = { 1024, 1024 };
+	spa_t *spa = NULL;
+	objset_t *os = NULL;
+	int dump_all = 1;
+	int verbose = 0;
+	int error = 0;
+	char **searchdirs = NULL;
+	int nsearch = 0;
+	char *target;
+	nvlist_t *policy = NULL;
+	uint64_t max_txg = UINT64_MAX;
+	int rewind = ZPOOL_NEVER_REWIND;
+
+	(void) setrlimit(RLIMIT_NOFILE, &rl);
+	(void) enable_extended_FILE_stdio(-1, -1);
+
+	dprintf_setup(&argc, argv);
+
+	while ((c = getopt(argc, argv, "bcdhilmsuCDRSAFLXevp:t:U:P")) != -1) {
+		switch (c) {
+		case 'b':
+		case 'c':
+		case 'd':
+		case 'h':
+		case 'i':
+		case 'l':
+		case 'm':
+		case 's':
+		case 'u':
+		case 'C':
+		case 'D':
+		case 'R':
+		case 'S':
+			dump_opt[c]++;
+			dump_all = 0;
+			break;
+		case 'A':
+		case 'F':
+		case 'L':
+		case 'X':
+		case 'e':
+		case 'P':
+			dump_opt[c]++;
+			break;
+		case 'v':
+			verbose++;
+			break;
+		case 'p':
+			if (searchdirs == NULL) {
+				searchdirs = umem_alloc(sizeof (char *),
+				    UMEM_NOFAIL);
+			} else {
+				char **tmp = umem_alloc((nsearch + 1) *
+				    sizeof (char *), UMEM_NOFAIL);
+				bcopy(searchdirs, tmp, nsearch *
+				    sizeof (char *));
+				umem_free(searchdirs,
+				    nsearch * sizeof (char *));
+				searchdirs = tmp;
+			}
+			searchdirs[nsearch++] = optarg;
+			break;
+		case 't':
+			max_txg = strtoull(optarg, NULL, 0);
+			if (max_txg < TXG_INITIAL) {
+				(void) fprintf(stderr, "incorrect txg "
+				    "specified: %s\n", optarg);
+				usage();
+			}
+			break;
+		case 'U':
+			spa_config_path = optarg;
+			break;
+		default:
+			usage();
+			break;
+		}
+	}
+
+	if (!dump_opt['e'] && searchdirs != NULL) {
+		(void) fprintf(stderr, "-p option requires use of -e\n");
+		usage();
+	}
+
+	kernel_init(FREAD);
+	g_zfs = libzfs_init();
+	ASSERT(g_zfs != NULL);
+
+	if (dump_all)
+		verbose = MAX(verbose, 1);
+
+	for (c = 0; c < 256; c++) {
+		if (dump_all && !strchr("elAFLRSXP", c))
+			dump_opt[c] = 1;
+		if (dump_opt[c])
+			dump_opt[c] += verbose;
+	}
+
+	aok = (dump_opt['A'] == 1) || (dump_opt['A'] > 2);
+	zfs_recover = (dump_opt['A'] > 1);
+
+	argc -= optind;
+	argv += optind;
+
+	if (argc < 2 && dump_opt['R'])
+		usage();
+	if (argc < 1) {
+		if (!dump_opt['e'] && dump_opt['C']) {
+			dump_cachefile(spa_config_path);
+			return (0);
+		}
+		usage();
+	}
+
+	if (dump_opt['l']) {
+		dump_label(argv[0]);
+		return (0);
+	}
+
+	if (dump_opt['X'] || dump_opt['F'])
+		rewind = ZPOOL_DO_REWIND |
+		    (dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0);
+
+	if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 ||
+	    nvlist_add_uint64(policy, ZPOOL_REWIND_REQUEST_TXG, max_txg) != 0 ||
+	    nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind) != 0)
+		fatal("internal error: %s", strerror(ENOMEM));
+
+	error = 0;
+	target = argv[0];
+
+	if (dump_opt['e']) {
+		nvlist_t *cfg = NULL;
+		char *name = find_zpool(&target, &cfg, nsearch, searchdirs);
+
+		error = ENOENT;
+		if (name) {
+			if (dump_opt['C'] > 1) {
+				(void) printf("\nConfiguration for import:\n");
+				dump_nvlist(cfg, 8);
+			}
+			if (nvlist_add_nvlist(cfg,
+			    ZPOOL_REWIND_POLICY, policy) != 0) {
+				fatal("can't open '%s': %s",
+				    target, strerror(ENOMEM));
+			}
+			if ((error = spa_import(name, cfg, NULL,
+			    ZFS_IMPORT_MISSING_LOG)) != 0) {
+				error = spa_import(name, cfg, NULL,
+				    ZFS_IMPORT_VERBATIM);
+			}
+		}
+	}
+
+	if (error == 0) {
+		if (strpbrk(target, "/@") == NULL || dump_opt['R']) {
+			error = spa_open_rewind(target, &spa, FTAG, policy,
+			    NULL);
+			if (error) {
+				/*
+				 * If we're missing the log device then
+				 * try opening the pool after clearing the
+				 * log state.
+				 */
+				mutex_enter(&spa_namespace_lock);
+				if ((spa = spa_lookup(target)) != NULL &&
+				    spa->spa_log_state == SPA_LOG_MISSING) {
+					spa->spa_log_state = SPA_LOG_CLEAR;
+					error = 0;
+				}
+				mutex_exit(&spa_namespace_lock);
+
+				if (!error) {
+					error = spa_open_rewind(target, &spa,
+					    FTAG, policy, NULL);
+				}
+			}
+		} else {
+			error = dmu_objset_own(target, DMU_OST_ANY,
+			    B_TRUE, FTAG, &os);
+		}
+	}
+	nvlist_free(policy);
+
+	if (error)
+		fatal("can't open '%s': %s", target, strerror(error));
+
+	argv++;
+	argc--;
+	if (!dump_opt['R']) {
+		if (argc > 0) {
+			zopt_objects = argc;
+			zopt_object = calloc(zopt_objects, sizeof (uint64_t));
+			for (i = 0; i < zopt_objects; i++) {
+				errno = 0;
+				zopt_object[i] = strtoull(argv[i], NULL, 0);
+				if (zopt_object[i] == 0 && errno != 0)
+					fatal("bad number %s: %s",
+					    argv[i], strerror(errno));
+			}
+		}
+		if (os != NULL) {
+			dump_dir(os);
+		} else if (zopt_objects > 0 && !dump_opt['m']) {
+			dump_dir(spa->spa_meta_objset);
+		} else {
+			dump_zpool(spa);
+		}
+	} else {
+		flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR;
+		flagbits['c'] = ZDB_FLAG_CHECKSUM;
+		flagbits['d'] = ZDB_FLAG_DECOMPRESS;
+		flagbits['e'] = ZDB_FLAG_BSWAP;
+		flagbits['g'] = ZDB_FLAG_GBH;
+		flagbits['i'] = ZDB_FLAG_INDIRECT;
+		flagbits['p'] = ZDB_FLAG_PHYS;
+		flagbits['r'] = ZDB_FLAG_RAW;
+
+		for (i = 0; i < argc; i++)
+			zdb_read_block(argv[i], spa);
+	}
+
+	(os != NULL) ? dmu_objset_disown(os, FTAG) : spa_close(spa, FTAG);
+
+	fuid_table_destroy();
+	sa_loaded = B_FALSE;
+
+	libzfs_fini(g_zfs);
+	kernel_fini();
+
+	return (0);
+}
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zdb/zdb_il.c b/bsd/cddl/contrib/opensolaris/cmd/zdb/zdb_il.c
new file mode 100644
index 0000000000000000000000000000000000000000..a0ed985f52b77fb9007594fdc9e14f358b771302
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zdb/zdb_il.c
@@ -0,0 +1,384 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * Print intent log header and statistics.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <sys/zfs_context.h>
+#include <sys/spa.h>
+#include <sys/dmu.h>
+#include <sys/stat.h>
+#include <sys/resource.h>
+#include <sys/zil.h>
+#include <sys/zil_impl.h>
+
+extern uint8_t dump_opt[256];
+
+static char prefix[4] = "\t\t\t";
+
+static void
+print_log_bp(const blkptr_t *bp, const char *prefix)
+{
+	char blkbuf[BP_SPRINTF_LEN];
+
+	sprintf_blkptr(blkbuf, bp);
+	(void) printf("%s%s\n", prefix, blkbuf);
+}
+
+/* ARGSUSED */
+static void
+zil_prt_rec_create(zilog_t *zilog, int txtype, lr_create_t *lr)
+{
+	time_t crtime = lr->lr_crtime[0];
+	char *name, *link;
+	lr_attr_t *lrattr;
+
+	name = (char *)(lr + 1);
+
+	if (lr->lr_common.lrc_txtype == TX_CREATE_ATTR ||
+	    lr->lr_common.lrc_txtype == TX_MKDIR_ATTR) {
+		lrattr = (lr_attr_t *)(lr + 1);
+		name += ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
+	}
+
+	if (txtype == TX_SYMLINK) {
+		link = name + strlen(name) + 1;
+		(void) printf("%s%s -> %s\n", prefix, name, link);
+	} else if (txtype != TX_MKXATTR) {
+		(void) printf("%s%s\n", prefix, name);
+	}
+
+	(void) printf("%s%s", prefix, ctime(&crtime));
+	(void) printf("%sdoid %llu, foid %llu, mode %llo\n", prefix,
+	    (u_longlong_t)lr->lr_doid, (u_longlong_t)lr->lr_foid,
+	    (longlong_t)lr->lr_mode);
+	(void) printf("%suid %llu, gid %llu, gen %llu, rdev 0x%llx\n", prefix,
+	    (u_longlong_t)lr->lr_uid, (u_longlong_t)lr->lr_gid,
+	    (u_longlong_t)lr->lr_gen, (u_longlong_t)lr->lr_rdev);
+}
+
+/* ARGSUSED */
+static void
+zil_prt_rec_remove(zilog_t *zilog, int txtype, lr_remove_t *lr)
+{
+	(void) printf("%sdoid %llu, name %s\n", prefix,
+	    (u_longlong_t)lr->lr_doid, (char *)(lr + 1));
+}
+
+/* ARGSUSED */
+static void
+zil_prt_rec_link(zilog_t *zilog, int txtype, lr_link_t *lr)
+{
+	(void) printf("%sdoid %llu, link_obj %llu, name %s\n", prefix,
+	    (u_longlong_t)lr->lr_doid, (u_longlong_t)lr->lr_link_obj,
+	    (char *)(lr + 1));
+}
+
+/* ARGSUSED */
+static void
+zil_prt_rec_rename(zilog_t *zilog, int txtype, lr_rename_t *lr)
+{
+	char *snm = (char *)(lr + 1);
+	char *tnm = snm + strlen(snm) + 1;
+
+	(void) printf("%ssdoid %llu, tdoid %llu\n", prefix,
+	    (u_longlong_t)lr->lr_sdoid, (u_longlong_t)lr->lr_tdoid);
+	(void) printf("%ssrc %s tgt %s\n", prefix, snm, tnm);
+}
+
+/* ARGSUSED */
+static void
+zil_prt_rec_write(zilog_t *zilog, int txtype, lr_write_t *lr)
+{
+	char *data, *dlimit;
+	blkptr_t *bp = &lr->lr_blkptr;
+	zbookmark_t zb;
+	char buf[SPA_MAXBLOCKSIZE];
+	int verbose = MAX(dump_opt['d'], dump_opt['i']);
+	int error;
+
+	(void) printf("%sfoid %llu, offset %llx, length %llx\n", prefix,
+	    (u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_offset,
+	    (u_longlong_t)lr->lr_length);
+
+	if (txtype == TX_WRITE2 || verbose < 5)
+		return;
+
+	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
+		(void) printf("%shas blkptr, %s\n", prefix,
+		    bp->blk_birth >= spa_first_txg(zilog->zl_spa) ?
+		    "will claim" : "won't claim");
+		print_log_bp(bp, prefix);
+
+		if (BP_IS_HOLE(bp)) {
+			(void) printf("\t\t\tLSIZE 0x%llx\n",
+			    (u_longlong_t)BP_GET_LSIZE(bp));
+		}
+		if (bp->blk_birth == 0) {
+			bzero(buf, sizeof (buf));
+			(void) printf("%s<hole>\n", prefix);
+			return;
+		}
+		if (bp->blk_birth < zilog->zl_header->zh_claim_txg) {
+			(void) printf("%s<block already committed>\n", prefix);
+			return;
+		}
+
+		SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os),
+		    lr->lr_foid, ZB_ZIL_LEVEL,
+		    lr->lr_offset / BP_GET_LSIZE(bp));
+
+		error = zio_wait(zio_read(NULL, zilog->zl_spa,
+		    bp, buf, BP_GET_LSIZE(bp), NULL, NULL,
+		    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &zb));
+		if (error)
+			return;
+		data = buf;
+	} else {
+		data = (char *)(lr + 1);
+	}
+
+	dlimit = data + MIN(lr->lr_length,
+	    (verbose < 6 ? 20 : SPA_MAXBLOCKSIZE));
+
+	(void) printf("%s", prefix);
+	while (data < dlimit) {
+		if (isprint(*data))
+			(void) printf("%c ", *data);
+		else
+			(void) printf("%2X", *data);
+		data++;
+	}
+	(void) printf("\n");
+}
+
+/* ARGSUSED */
+static void
+zil_prt_rec_truncate(zilog_t *zilog, int txtype, lr_truncate_t *lr)
+{
+	(void) printf("%sfoid %llu, offset 0x%llx, length 0x%llx\n", prefix,
+	    (u_longlong_t)lr->lr_foid, (longlong_t)lr->lr_offset,
+	    (u_longlong_t)lr->lr_length);
+}
+
+/* ARGSUSED */
+static void
+zil_prt_rec_setattr(zilog_t *zilog, int txtype, lr_setattr_t *lr)
+{
+	time_t atime = (time_t)lr->lr_atime[0];
+	time_t mtime = (time_t)lr->lr_mtime[0];
+
+	(void) printf("%sfoid %llu, mask 0x%llx\n", prefix,
+	    (u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_mask);
+
+	if (lr->lr_mask & AT_MODE) {
+		(void) printf("%sAT_MODE  %llo\n", prefix,
+		    (longlong_t)lr->lr_mode);
+	}
+
+	if (lr->lr_mask & AT_UID) {
+		(void) printf("%sAT_UID   %llu\n", prefix,
+		    (u_longlong_t)lr->lr_uid);
+	}
+
+	if (lr->lr_mask & AT_GID) {
+		(void) printf("%sAT_GID   %llu\n", prefix,
+		    (u_longlong_t)lr->lr_gid);
+	}
+
+	if (lr->lr_mask & AT_SIZE) {
+		(void) printf("%sAT_SIZE  %llu\n", prefix,
+		    (u_longlong_t)lr->lr_size);
+	}
+
+	if (lr->lr_mask & AT_ATIME) {
+		(void) printf("%sAT_ATIME %llu.%09llu %s", prefix,
+		    (u_longlong_t)lr->lr_atime[0],
+		    (u_longlong_t)lr->lr_atime[1],
+		    ctime(&atime));
+	}
+
+	if (lr->lr_mask & AT_MTIME) {
+		(void) printf("%sAT_MTIME %llu.%09llu %s", prefix,
+		    (u_longlong_t)lr->lr_mtime[0],
+		    (u_longlong_t)lr->lr_mtime[1],
+		    ctime(&mtime));
+	}
+}
+
+/* ARGSUSED */
+static void
+zil_prt_rec_acl(zilog_t *zilog, int txtype, lr_acl_t *lr)
+{
+	(void) printf("%sfoid %llu, aclcnt %llu\n", prefix,
+	    (u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_aclcnt);
+}
+
+typedef void (*zil_prt_rec_func_t)();
+typedef struct zil_rec_info {
+	zil_prt_rec_func_t	zri_print;
+	char			*zri_name;
+	uint64_t		zri_count;
+} zil_rec_info_t;
+
+static zil_rec_info_t zil_rec_info[TX_MAX_TYPE] = {
+	{	NULL,			"Total              " },
+	{	zil_prt_rec_create,	"TX_CREATE          " },
+	{	zil_prt_rec_create,	"TX_MKDIR           " },
+	{	zil_prt_rec_create,	"TX_MKXATTR         " },
+	{	zil_prt_rec_create,	"TX_SYMLINK         " },
+	{	zil_prt_rec_remove,	"TX_REMOVE          " },
+	{	zil_prt_rec_remove,	"TX_RMDIR           " },
+	{	zil_prt_rec_link,	"TX_LINK            " },
+	{	zil_prt_rec_rename,	"TX_RENAME          " },
+	{	zil_prt_rec_write,	"TX_WRITE           " },
+	{	zil_prt_rec_truncate,	"TX_TRUNCATE        " },
+	{	zil_prt_rec_setattr,	"TX_SETATTR         " },
+	{	zil_prt_rec_acl,	"TX_ACL_V0          " },
+	{	zil_prt_rec_acl,	"TX_ACL_ACL         " },
+	{	zil_prt_rec_create,	"TX_CREATE_ACL      " },
+	{	zil_prt_rec_create,	"TX_CREATE_ATTR     " },
+	{	zil_prt_rec_create,	"TX_CREATE_ACL_ATTR " },
+	{	zil_prt_rec_create,	"TX_MKDIR_ACL       " },
+	{	zil_prt_rec_create,	"TX_MKDIR_ATTR      " },
+	{	zil_prt_rec_create,	"TX_MKDIR_ACL_ATTR  " },
+	{	zil_prt_rec_write,	"TX_WRITE2          " },
+};
+
+/* ARGSUSED */
+static int
+print_log_record(zilog_t *zilog, lr_t *lr, void *arg, uint64_t claim_txg)
+{
+	int txtype;
+	int verbose = MAX(dump_opt['d'], dump_opt['i']);
+
+	/* reduce size of txtype to strip off TX_CI bit */
+	txtype = lr->lrc_txtype;
+
+	ASSERT(txtype != 0 && (uint_t)txtype < TX_MAX_TYPE);
+	ASSERT(lr->lrc_txg);
+
+	(void) printf("\t\t%s%s len %6llu, txg %llu, seq %llu\n",
+	    (lr->lrc_txtype & TX_CI) ? "CI-" : "",
+	    zil_rec_info[txtype].zri_name,
+	    (u_longlong_t)lr->lrc_reclen,
+	    (u_longlong_t)lr->lrc_txg,
+	    (u_longlong_t)lr->lrc_seq);
+
+	if (txtype && verbose >= 3)
+		zil_rec_info[txtype].zri_print(zilog, txtype, lr);
+
+	zil_rec_info[txtype].zri_count++;
+	zil_rec_info[0].zri_count++;
+
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+print_log_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
+{
+	char blkbuf[BP_SPRINTF_LEN + 10];
+	int verbose = MAX(dump_opt['d'], dump_opt['i']);
+	char *claim;
+
+	if (verbose <= 3)
+		return (0);
+
+	if (verbose >= 5) {
+		(void) strcpy(blkbuf, ", ");
+		sprintf_blkptr(blkbuf + strlen(blkbuf), bp);
+	} else {
+		blkbuf[0] = '\0';
+	}
+
+	if (claim_txg != 0)
+		claim = "already claimed";
+	else if (bp->blk_birth >= spa_first_txg(zilog->zl_spa))
+		claim = "will claim";
+	else
+		claim = "won't claim";
+
+	(void) printf("\tBlock seqno %llu, %s%s\n",
+	    (u_longlong_t)bp->blk_cksum.zc_word[ZIL_ZC_SEQ], claim, blkbuf);
+
+	return (0);
+}
+
+static void
+print_log_stats(int verbose)
+{
+	int i, w, p10;
+
+	if (verbose > 3)
+		(void) printf("\n");
+
+	if (zil_rec_info[0].zri_count == 0)
+		return;
+
+	for (w = 1, p10 = 10; zil_rec_info[0].zri_count >= p10; p10 *= 10)
+		w++;
+
+	for (i = 0; i < TX_MAX_TYPE; i++)
+		if (zil_rec_info[i].zri_count || verbose >= 3)
+			(void) printf("\t\t%s %*llu\n",
+			    zil_rec_info[i].zri_name, w,
+			    (u_longlong_t)zil_rec_info[i].zri_count);
+	(void) printf("\n");
+}
+
+/* ARGSUSED */
+void
+dump_intent_log(zilog_t *zilog)
+{
+	const zil_header_t *zh = zilog->zl_header;
+	int verbose = MAX(dump_opt['d'], dump_opt['i']);
+	int i;
+
+	if (zh->zh_log.blk_birth == 0 || verbose < 1)
+		return;
+
+	(void) printf("\n    ZIL header: claim_txg %llu, "
+	    "claim_blk_seq %llu, claim_lr_seq %llu",
+	    (u_longlong_t)zh->zh_claim_txg,
+	    (u_longlong_t)zh->zh_claim_blk_seq,
+	    (u_longlong_t)zh->zh_claim_lr_seq);
+	(void) printf(" replay_seq %llu, flags 0x%llx\n",
+	    (u_longlong_t)zh->zh_replay_seq, (u_longlong_t)zh->zh_flags);
+
+	for (i = 0; i < TX_MAX_TYPE; i++)
+		zil_rec_info[i].zri_count = 0;
+
+	if (verbose >= 2) {
+		(void) printf("\n");
+		(void) zil_parse(zilog, print_log_block, print_log_record, NULL,
+		    zh->zh_claim_txg);
+		print_log_stats(verbose);
+	}
+}
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs.8 b/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs.8
new file mode 100644
index 0000000000000000000000000000000000000000..3a7568177fa5548b351c7c371f0a1278fd162835
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs.8
@@ -0,0 +1,3255 @@
+'\" te
+.\" Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>.
+.\" All Rights Reserved.
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" Copyright (c) 2010, Sun Microsystems, Inc. All Rights Reserved.
+.\" Copyright (c) 2012 by Delphix. All rights reserved.
+.\" Copyright (c) 2012 Nexenta Systems, Inc. All Rights Reserved.
+.\" Copyright (c) 2012, Joyent, Inc. All rights reserved.
+.\" Copyright (c) 2011, Pawel Jakub Dawidek <pjd@FreeBSD.org>
+.\" Copyright (c) 2012, Glen Barber <gjb@FreeBSD.org>
+.\" Copyright (c) 2012, Bryan Drewery <bdrewery@FreeBSD.org>
+.\"
+.\" $FreeBSD$
+.\"
+.Dd November 26, 2012
+.Dt ZFS 8
+.Os
+.Sh NAME
+.Nm zfs
+.Nd configures ZFS file systems
+.Sh SYNOPSIS
+.Nm
+.Op Fl \&?
+.Nm
+.Cm create
+.Op Fl pu
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ... filesystem
+.Nm
+.Cm create
+.Op Fl ps
+.Op Fl b Ar blocksize
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ...
+.Fl V
+.Ar size volume
+.Nm
+.Cm destroy
+.Op Fl fnpRrv
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm destroy
+.Op Fl dnpRrv
+.Sm off
+.Ar snapshot
+.Op % Ns Ar snapname
+.Op , Ns Ar ...
+.Sm on
+.Nm
+.Cm snapshot
+.Op Fl r
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ... filesystem@snapname Ns | Ns Ar volume@snapname
+.Nm
+.Cm rollback
+.Op Fl rRf
+.Ar snapshot
+.Nm
+.Cm clone
+.Op Fl p
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ... snapshot filesystem Ns | Ns Ar volume
+.Nm
+.Cm promote
+.Ar clone-filesystem
+.Nm
+.Cm rename
+.Op Fl f
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Nm
+.Cm rename
+.Op Fl f
+.Fl p
+.Ar filesystem Ns | Ns Ar volume
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm rename
+.Fl r
+.Ar snapshot snapshot
+.Nm
+.Cm rename
+.Fl u
+.Op Fl p
+.Ar filesystem filesystem
+.Nm
+.Cm list
+.Op Fl r Ns | Ns Fl d Ar depth
+.Op Fl H
+.Op Fl o Ar property Ns Op , Ns Ar ...
+.Op Fl t Ar type Ns Op , Ns Ar ...
+.Op Fl s Ar property
+.Ar ...
+.Op Fl S Ar property
+.Ar ...
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Nm
+.Cm set
+.Ar property Ns = Ns Ar value
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Nm
+.Cm get
+.Op Fl r Ns | Ns Fl d Ar depth
+.Op Fl Hp
+.Op Fl o Ar all | field Ns Op , Ns Ar ...
+.Op Fl t Ar type Ns Op , Ns Ar ...
+.Op Fl s Ar source Ns Op , Ns Ar ...
+.Ar all | property Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Nm
+.Cm inherit
+.Op Fl rS
+.Ar property
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Nm
+.Cm upgrade
+.Op Fl v
+.Nm
+.Cm upgrade
+.Op Fl r
+.Op Fl V Ar version
+.Fl a | Ar filesystem
+.Nm
+.Cm userspace
+.Op Fl Hinp
+.Op Fl o Ar field Ns Op , Ns Ar ...
+.Op Fl s Ar field
+.Ar ...
+.Op Fl S Ar field
+.Ar ...
+.Op Fl t Ar type Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar snapshot
+.Nm
+.Cm groupspace
+.Op Fl Hinp
+.Op Fl o Ar field Ns Op , Ns Ar ...
+.Op Fl s Ar field
+.Ar ...
+.Op Fl S Ar field
+.Ar ...
+.Op Fl t Ar type Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar snapshot
+.Nm
+.Cm mount
+.Nm
+.Cm mount
+.Op Fl vO
+.Op Fl o Ar property Ns Op , Ns Ar ...
+.Fl a | Ar filesystem
+.Nm
+.Cm unmount
+.Op Fl f
+.Fl a | Ar filesystem Ns | Ns Ar mountpoint
+.Nm
+.Cm share
+.Fl a | Ar filesystem
+.Nm
+.Cm unshare
+.Fl a | Ar filesystem Ns | Ns Ar mountpoint
+.Nm
+.Cm send
+.Op Fl DnPpRv
+.Op Fl i Ar snapshot | Fl I Ar snapshot
+.Ar snapshot
+.Nm
+.Cm receive
+.Op Fl vnFu
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Nm
+.Cm receive
+.Op Fl vnFu
+.Op Fl d | e
+.Ar filesystem
+.Nm
+.Cm allow
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm allow
+.Op Fl ldug
+.Cm everyone Ns | Ns Ar user Ns | Ns Ar group Ns Op , Ns Ar ...
+.Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm allow
+.Op Fl ld
+.Fl e
+.Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm allow
+.Fl c
+.Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm allow
+.Fl s
+.Ar @setname
+.Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm unallow
+.Op Fl rldug
+.Cm everyone Ns | Ns Ar user Ns | Ns Ar group Ns Op , Ns Ar ...
+.Op Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm unallow
+.Op Fl rld
+.Fl e
+.Op Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm unallow
+.Op Fl r
+.Fl c
+.Op Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm unallow
+.Op Fl r
+.Fl s
+.Ar @setname
+.Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Nm
+.Cm hold
+.Op Fl r
+.Ar tag snapshot ...
+.Nm
+.Cm holds
+.Op Fl r
+.Ar snapshot ...
+.Nm
+.Cm release
+.Op Fl r
+.Ar tag snapshot ...
+.Nm
+.Cm diff
+.Op Fl FHt
+.Ar snapshot
+.Op Ar snapshot Ns | Ns Ar filesystem
+.Nm
+.Cm jail
+.Ar jailid Ns | Ns Ar jailname filesystem
+.Nm
+.Cm unjail
+.Ar jailid Ns | Ns Ar jailname filesystem
+.Sh DESCRIPTION
+The
+.Nm
+command configures
+.Tn ZFS
+datasets within a
+.Tn ZFS
+storage pool, as described in
+.Xr zpool 8 .
+A dataset is identified by a unique path within the
+.Tn ZFS
+namespace. For example:
+.Bd -ragged -offset 4n
+.No pool/ Ns Brq filesystem,volume,snapshot
+.Ed
+.Pp
+where the maximum length of a dataset name is
+.Dv MAXNAMELEN
+(256 bytes).
+.Pp
+A dataset can be one of the following:
+.Bl -hang -width 12n
+.It Sy file system
+A
+.Tn ZFS
+dataset of type
+.Em filesystem
+can be mounted within the standard system namespace and behaves like other file
+systems. While
+.Tn ZFS
+file systems are designed to be
+.Tn POSIX
+compliant, known issues exist that prevent compliance in some cases.
+Applications that depend on standards conformance might fail due to nonstandard
+behavior when checking file system free space.
+.It Sy volume
+A logical volume exported as a raw or block device. This type of dataset should
+only be used under special circumstances. File systems are typically used in
+most environments.
+.It Sy snapshot
+A read-only version of a file system or volume at a given point in time. It is
+specified as
+.Em filesystem@name
+or
+.Em volume@name .
+.El
+.Ss ZFS File System Hierarchy
+A
+.Tn ZFS
+storage pool is a logical collection of devices that provide space for
+datasets. A storage pool is also the root of the
+.Tn ZFS
+file system hierarchy.
+.Pp
+The root of the pool can be accessed as a file system, such as mounting and
+unmounting, taking snapshots, and setting properties. The physical storage
+characteristics, however, are managed by the
+.Xr zpool 8
+command.
+.Pp
+See
+.Xr zpool 8
+for more information on creating and administering pools.
+.Ss Snapshots
+A snapshot is a read-only copy of a file system or volume. Snapshots can be
+created extremely quickly, and initially consume no additional space within the
+pool. As data within the active dataset changes, the snapshot consumes more
+data than would otherwise be shared with the active dataset.
+.Pp
+Snapshots can have arbitrary names. Snapshots of volumes can be cloned or
+rolled back, but cannot be accessed independently.
+.Pp
+File system snapshots can be accessed under the
+.Pa \&.zfs/snapshot
+directory in the root of the file system. Snapshots are automatically mounted
+on demand and may be unmounted at regular intervals. The visibility of the
+.Pa \&.zfs
+directory can be controlled by the
+.Sy snapdir
+property.
+.Ss Clones
+A clone is a writable volume or file system whose initial contents are the same
+as another dataset. As with snapshots, creating a clone is nearly
+instantaneous, and initially consumes no additional space.
+.Pp
+Clones can only be created from a snapshot. When a snapshot is cloned, it
+creates an implicit dependency between the parent and child. Even though the
+clone is created somewhere else in the dataset hierarchy, the original snapshot
+cannot be destroyed as long as a clone exists. The
+.Sy origin
+property exposes this dependency, and the
+.Cm destroy
+command lists any such dependencies, if they exist.
+.Pp
+The clone parent-child dependency relationship can be reversed by using the
+.Cm promote
+subcommand. This causes the "origin" file system to become a clone of the
+specified file system, which makes it possible to destroy the file system that
+the clone was created from.
+.Ss Mount Points
+Creating a
+.Tn ZFS
+file system is a simple operation, so the number of file systems per system is
+likely to be numerous. To cope with this,
+.Tn ZFS
+automatically manages mounting and unmounting file systems without the need to
+edit the
+.Pa /etc/fstab
+file. All automatically managed file systems are mounted by
+.Tn ZFS
+at boot time.
+.Pp
+By default, file systems are mounted under
+.Pa /path ,
+where
+.Ar path
+is the name of the file system in the
+.Tn ZFS
+namespace. Directories are created and destroyed as needed.
+.Pp
+A file system can also have a mount point set in the
+.Sy mountpoint
+property. This directory is created as needed, and
+.Tn ZFS
+automatically mounts the file system when the
+.Qq Nm Cm mount Fl a
+command is invoked (without editing
+.Pa /etc/fstab ) .
+The
+.Sy mountpoint
+property can be inherited, so if
+.Em pool/home
+has a mount point of
+.Pa /home ,
+then
+.Em pool/home/user
+automatically inherits a mount point of
+.Pa /home/user .
+.Pp
+A file system
+.Sy mountpoint
+property of
+.Cm none
+prevents the file system from being mounted.
+.Pp
+If needed,
+.Tn ZFS
+file systems can also be managed with traditional tools
+.Pq Xr mount 8 , Xr umount 8 , Xr fstab 5 .
+If a file system's mount point is set to
+.Cm legacy ,
+.Tn ZFS
+makes no attempt to manage the file system, and the administrator is
+responsible for mounting and unmounting the file system.
+.Ss Jails
+.No A Tn ZFS
+dataset can be attached to a jail by using the
+.Qq Nm Cm jail
+subcommand. You cannot attach a dataset to one jail and the children of the
+same dataset to another jails. To allow management of the dataset from within
+a jail, the
+.Sy jailed
+property has to be set and the jail needs access to the
+.Pa /dev/zfs
+device. The
+.Sy quota
+property cannot be changed from within a jail. See
+.Xr jail 8
+for information on how to allow mounting
+.Tn ZFS
+datasets from within a jail.
+.Pp
+.No A Tn ZFS
+dataset can be detached from a jail using the
+.Qq Nm Cm unjail
+subcommand.
+.Pp
+After a dataset is attached to a jail and the jailed property is set, a jailed
+file system cannot be mounted outside the jail, since the jail administrator
+might have set the mount point to an unacceptable value.
+.Ss Deduplication
+Deduplication is the process for removing redundant data at the block-level,
+reducing the total amount of data stored. If a file system has the
+.Cm dedup
+property enabled, duplicate data blocks are removed synchronously. The result
+is that only unique data is stored and common components are shared among
+files.
+.Ss Native Properties
+Properties are divided into two types, native properties and user-defined (or
+"user") properties. Native properties either export internal statistics or
+control
+.Tn ZFS
+behavior. In addition, native properties are either editable or read-only. User
+properties have no effect on
+.Tn ZFS
+behavior, but you can use them to annotate datasets in a way that is meaningful
+in your environment. For more information about user properties, see the
+.Qq Sx User Properties
+section, below.
+.Pp
+Every dataset has a set of properties that export statistics about the dataset
+as well as control various behaviors. Properties are inherited from the parent
+unless overridden by the child. Some properties apply only to certain types of
+datasets (file systems, volumes, or snapshots).
+.Pp
+The values of numeric properties can be specified using human-readable suffixes
+(for example,
+.Sy k , KB , M , Gb ,
+and so forth, up to
+.Sy Z
+for zettabyte). The following are all valid (and equal) specifications:
+.Bd -ragged -offset 4n
+1536M, 1.5g, 1.50GB
+.Ed
+.Pp
+The values of non-numeric properties are case sensitive and must be lowercase,
+except for
+.Sy mountpoint , sharenfs , No and Sy sharesmb .
+.Pp
+The following native properties consist of read-only statistics about the
+dataset. These properties can be neither set, nor inherited. Native properties
+apply to all dataset types unless otherwise noted.
+.Bl -tag -width 2n
+.It Sy available
+The amount of space available to the dataset and all its children, assuming
+that there is no other activity in the pool. Because space is shared within a
+pool, availability can be limited by any number of factors, including physical
+pool size, quotas, reservations, or other datasets within the pool.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy avail .
+.It Sy compressratio
+For non-snapshots, the compression ratio achieved for the
+.Sy used
+space of this dataset, expressed as a multiplier.  The
+.Sy used
+property includes descendant datasets, and, for clones, does not include
+the space shared with the origin snapshot.  For snapshots, the
+.Sy compressratio
+is the same as the
+.Sy refcompressratio
+property. Compression can be turned on by running:
+.Qq Nm Cm set compression=on Ar dataset
+The default value is
+.Cm off .
+.It Sy creation
+The time this dataset was created.
+.It Sy clones
+For snapshots, this property is a comma-separated list of filesystems or
+volumes which are clones of this snapshot.  The clones'
+.Sy origin
+property is this snapshot.  If the
+.Sy clones
+property is not empty, then this snapshot can not be destroyed (even with the
+.Fl r
+or
+.Fl f
+options).
+.It Sy defer_destroy
+This property is
+.Cm on
+if the snapshot has been marked for deferred destroy by using the
+.Qq Nm Cm destroy -d
+command. Otherwise, the property is
+.Cm off .
+.It Sy mounted
+For file systems, indicates whether the file system is currently mounted. This
+property can be either
+.Cm yes
+or
+.Cm no .
+.It Sy origin
+For cloned file systems or volumes, the snapshot from which the clone was
+created. See also the
+.Sy clones
+property.
+.It Sy referenced
+The amount of data that is accessible by this dataset, which may or may not be
+shared with other datasets in the pool. When a snapshot or clone is created, it
+initially references the same amount of space as the file system or snapshot it
+was created from, since its contents are identical.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy refer .
+.It Sy refcompressratio
+The compression ratio achieved for the
+.Sy referenced
+space of this dataset, expressed as a multiplier.  See also the
+.Sy compressratio
+property.
+.It Sy type
+The type of dataset:
+.Sy filesystem , volume , No or Sy snapshot .
+.It Sy used
+The amount of space consumed by this dataset and all its descendents. This is
+the value that is checked against this dataset's quota and reservation. The
+space used does not include this dataset's reservation, but does take into
+account the reservations of any descendent datasets. The amount of space that a
+dataset consumes from its parent, as well as the amount of space that are freed
+if this dataset is recursively destroyed, is the greater of its space used and
+its reservation.
+.Pp
+When snapshots (see the
+.Qq Sx Snapshots
+section) are created, their space is
+initially shared between the snapshot and the file system, and possibly with
+previous snapshots. As the file system changes, space that was previously
+shared becomes unique to the snapshot, and counted in the snapshot's space
+used. Additionally, deleting snapshots can increase the amount of space unique
+to (and used by) other snapshots.
+.Pp
+The amount of space used, available, or referenced does not take into account
+pending changes. Pending changes are generally accounted for within a few
+seconds. Committing a change to a disk using
+.Xr fsync 2
+or
+.Sy O_SYNC
+does not necessarily guarantee that the space usage information is updated
+immediately.
+.It Sy usedby*
+The
+.Sy usedby*
+properties decompose the
+.Sy used
+properties into the various reasons that space is used. Specifically,
+.Sy used No =
+.Sy usedbysnapshots + usedbydataset + usedbychildren + usedbyrefreservation .
+These properties are only available for datasets created
+with
+.Tn ZFS
+pool version 13 pools and higher.
+.It Sy usedbysnapshots
+The amount of space consumed by snapshots of this dataset. In particular, it is
+the amount of space that would be freed if all of this dataset's snapshots were
+destroyed. Note that this is not simply the sum of the snapshots'
+.Sy used
+properties because space can be shared by multiple snapshots.
+.It Sy usedbydataset
+The amount of space used by this dataset itself, which would be freed if the
+dataset were destroyed (after first removing any
+.Sy refreservation
+and destroying any necessary snapshots or descendents).
+.It Sy usedbychildren
+The amount of space used by children of this dataset, which would be freed if
+all the dataset's children were destroyed.
+.It Sy usedbyrefreservation
+The amount of space used by a
+.Sy refreservation
+set on this dataset, which would be freed if the
+.Sy refreservation
+was removed.
+.It Sy userused@ Ns Ar user
+The amount of space consumed by the specified user in this dataset. Space is
+charged to the owner of each file, as displayed by
+.Qq Nm ls Fl l .
+The amount of space charged is displayed by
+.Qq Nm du
+and
+.Qq Nm ls Fl s .
+See the
+.Qq Nm Cm userspace
+subcommand for more information.
+.Pp
+Unprivileged users can access only their own space usage. The root user, or a
+user who has been granted the
+.Sy userused
+privilege with
+.Qq Nm Cm allow ,
+can access everyone's usage.
+.Pp
+The
+.Sy userused@ Ns ...
+properties are not displayed by
+.Qq Nm Cm get all .
+The user's name must be appended after the
+.Sy @
+symbol, using one of the following forms:
+.Bl -bullet -offset 2n
+.It
+POSIX name (for example,
+.Em joe )
+.It
+POSIX numeric ID (for example,
+.Em 1001 )
+.El
+.It Sy userrefs
+This property is set to the number of user holds on this snapshot. User holds
+are set by using the
+.Qq Nm Cm hold
+command.
+.It Sy groupused@ Ns Ar group
+The amount of space consumed by the specified group in this dataset. Space is
+charged to the group of each file, as displayed by
+.Nm ls Fl l .
+See the
+.Sy userused@ Ns Ar user
+property for more information.
+.Pp
+Unprivileged users can only access their own groups' space usage. The root
+user, or a user who has been granted the
+.Sy groupused
+privilege with
+.Qq Nm Cm allow ,
+can access all groups' usage.
+.It Sy volblocksize Ns = Ns Ar blocksize
+For volumes, specifies the block size of the volume. The
+.Ar blocksize
+cannot be changed once the volume has been written, so it should be set at
+volume creation time. The default
+.Ar blocksize
+for volumes is 8 Kbytes. Any
+power of 2 from 512 bytes to 128 Kbytes is valid.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy volblock .
+.It Sy written
+The amount of
+.Sy referenced
+space written to this dataset since the previous snapshot.
+.It Sy written@ Ns Ar snapshot
+The amount of
+.Sy referenced
+space written to this dataset since the specified snapshot.  This is the space
+that is referenced by this dataset but was not referenced by the specified
+snapshot.
+.Pp
+The
+.Ar snapshot
+may be specified as a short snapshot name (just the part after the
+.Sy @ ) ,
+in which case it will be interpreted as a snapshot in the same filesystem as
+this dataset. The
+.Ar snapshot
+may be a full snapshot name
+.Pq Em filesystem@snapshot ,
+which for clones may be a snapshot in the origin's filesystem (or the origin of
+the origin's filesystem, etc).
+.El
+.Pp
+The following native properties can be used to change the behavior of a
+.Tn ZFS
+dataset.
+.Bl -tag -width 2n
+.It Xo
+.Sy aclinherit Ns = Ns Cm discard |
+.Cm noallow |
+.Cm restricted |
+.Cm passthrough |
+.Cm passthrough-x
+.Xc
+Controls how
+.Tn ACL
+entries are inherited when files and directories are created. A file system
+with an
+.Sy aclinherit
+property of
+.Cm discard
+does not inherit any
+.Tn ACL
+entries. A file system with an
+.Sy aclinherit
+property value of
+.Cm noallow
+only inherits inheritable
+.Tn ACL
+entries that specify "deny" permissions. The property value
+.Cm restricted
+(the default) removes the
+.Em write_acl
+and
+.Em write_owner
+permissions when the
+.Tn ACL
+entry is inherited. A file system with an
+.Sy aclinherit
+property value of
+.Cm passthrough
+inherits all inheritable
+.Tn ACL
+entries without any modifications made to the
+.Tn ACL
+entries when they are inherited. A file system with an
+.Sy aclinherit
+property value of
+.Cm passthrough-x
+has the same meaning as
+.Cm passthrough ,
+except that the
+.Em owner@ , group@ , No and Em everyone@ Tn ACE Ns s
+inherit the execute permission only if the file creation mode also requests the
+execute bit.
+.Pp
+When the property value is set to
+.Cm passthrough ,
+files are created with a mode determined by the inheritable
+.Tn ACE Ns s.
+If no inheritable
+.Tn ACE Ns s
+exist that affect the mode, then the mode is set in accordance to the requested
+mode from the application.
+.It Sy aclmode Ns = Ns Cm discard | groupmask | passthrough | restricted
+Controls how an
+.Tn ACL
+is modified during
+.Xr chmod 2 .
+A file system with an
+.Sy aclmode
+property of
+.Cm discard
+(the default) deletes all
+.Tn ACL
+entries that do not represent the mode of the file. An
+.Sy aclmode
+property of
+.Cm groupmask
+reduces permissions granted in all
+.Em ALLOW
+entries found in the
+.Tn ACL
+such that they are no greater than the group permissions specified by
+.Xr chmod 2 .
+A file system with an
+.Sy aclmode
+property of
+.Cm passthrough
+indicates that no changes are made to the
+.Tn ACL
+other than creating or updating the necessary
+.Tn ACL
+entries to represent the new mode of the file or directory.
+An
+.Sy aclmode
+property of
+.Cm restricted
+will cause the
+.Xr chmod 2
+operation to return an error when used on any file or directory which has
+a non-trivial
+.Tn ACL
+whose entries can not be represented by a mode.
+.Xr chmod 2
+is required to change the set user ID, set group ID, or sticky bits on a file
+or directory, as they do not have equivalent
+.Tn ACL
+entries.
+In order to use
+.Xr chmod 2
+on a file or directory with a non-trivial
+.Tn ACL
+when
+.Sy aclmode
+is set to
+.Cm restricted ,
+you must first remove all
+.Tn ACL
+entries which do not represent the current mode.
+.It Sy atime Ns = Ns Cm on | off
+Controls whether the access time for files is updated when they are read.
+Turning this property off avoids producing write traffic when reading files and
+can result in significant performance gains, though it might confuse mailers
+and other similar utilities. The default value is
+.Cm on .
+.It Sy canmount Ns = Ns Cm on | off | noauto
+If this property is set to
+.Cm off ,
+the file system cannot be mounted, and is ignored by
+.Qq Nm Cm mount Fl a .
+Setting this property to
+.Cm off
+is similar to setting the
+.Sy mountpoint
+property to
+.Cm none ,
+except that the dataset still has a normal
+.Sy mountpoint
+property, which can be inherited. Setting this property to
+.Cm off
+allows datasets to be used solely as a mechanism to inherit properties. One
+example of setting
+.Sy canmount Ns = Ns Cm off
+is to have two datasets with the same
+.Sy mountpoint ,
+so that the children of both datasets appear in the same directory, but might
+have different inherited characteristics.
+.Pp
+When the
+.Cm noauto
+value is set, a dataset can only be mounted and unmounted explicitly. The
+dataset is not mounted automatically when the dataset is created or imported,
+nor is it mounted by the
+.Qq Nm Cm mount Fl a
+command or unmounted by the
+.Qq Nm Cm umount Fl a
+command.
+.Pp
+This property is not inherited.
+.It Sy checksum Ns = Ns Cm on | off | fletcher2 | fletcher4 | sha256
+Controls the checksum used to verify data integrity. The default value is
+.Cm on ,
+which automatically selects an appropriate algorithm (currently,
+.Cm fletcher4 ,
+but this may change in future releases). The value
+.Cm off
+disables integrity checking on user data. Disabling checksums is
+.Em NOT
+a recommended practice.
+.It Sy compression Ns = Ns Cm on | off | lzjb | gzip | gzip- Ns Ar N | Cm zle
+Controls the compression algorithm used for this dataset. The
+.Cm lzjb
+compression algorithm is optimized for performance while providing decent data
+compression. Setting compression to
+.Cm on
+uses the
+.Cm lzjb
+compression algorithm. The
+.Cm gzip
+compression algorithm uses the same compression as the
+.Xr gzip 1
+command. You can specify the
+.Cm gzip
+level by using the value
+.Cm gzip- Ns Ar N
+where
+.Ar N
+is an integer from 1 (fastest) to 9 (best compression ratio). Currently,
+.Cm gzip
+is equivalent to
+.Cm gzip-6
+(which is also the default for
+.Xr gzip 1 ) .
+The
+.Cm zle
+compression algorithm compresses runs of zeros.
+.Pp
+This property can also be referred to by its shortened column name
+.Cm compress .
+Changing this property affects only newly-written data.
+.It Sy copies Ns = Ns Cm 1 | 2 | 3
+Controls the number of copies of data stored for this dataset. These copies are
+in addition to any redundancy provided by the pool, for example, mirroring or
+RAID-Z. The copies are stored on different disks, if possible. The space used
+by multiple copies is charged to the associated file and dataset, changing the
+.Sy used
+property and counting against quotas and reservations.
+.Pp
+Changing this property only affects newly-written data. Therefore, set this
+property at file system creation time by using the
+.Fl o Cm copies= Ns Ar N
+option.
+.It Sy dedup Ns = Ns Cm on | off | verify | sha256 Ns Op Cm ,verify
+Configures deduplication for a dataset. The default value is
+.Cm off .
+The default deduplication checksum is
+.Cm sha256
+(this may change in the future).
+When
+.Sy dedup
+is enabled, the checksum defined here overrides the
+.Sy checksum
+property. Setting the value to
+.Cm verify
+has the same effect as the setting
+.Cm sha256,verify .
+.Pp
+If set to
+.Cm verify ,
+.Tn ZFS
+will do a byte-to-byte comparsion in case of two blocks having the same
+signature to make sure the block contents are identical.
+.It Sy devices Ns = Ns Cm on | off
+The
+.Sy devices
+property is currently not supported on
+.Fx .
+.It Sy exec Ns = Ns Cm on | off
+Controls whether processes can be executed from within this file system. The
+default value is
+.Cm on .
+.It Sy mlslabel Ns = Ns Ar label | Cm none
+The
+.Sy mlslabel
+property is currently not supported on
+.Fx .
+.It Sy mountpoint Ns = Ns Ar path | Cm none | legacy
+Controls the mount point used for this file system. See the
+.Qq Sx Mount Points
+section for more information on how this property is used.
+.Pp
+When the
+.Sy mountpoint
+property is changed for a file system, the file system and any children that
+inherit the mount point are unmounted. If the new value is
+.Cm legacy ,
+then they remain unmounted. Otherwise, they are automatically remounted in the
+new location if the property was previously
+.Cm legacy
+or
+.Cm none ,
+or if they were mounted before the property was changed. In addition, any
+shared file systems are unshared and shared in the new location.
+.It Sy nbmand Ns = Ns Cm on | off
+The
+.Sy nbmand
+property is currently not supported on
+.Fx .
+.It Sy primarycache Ns = Ns Cm all | none | metadata
+Controls what is cached in the primary cache (ARC). If this property is set to
+.Cm all ,
+then both user data and metadata is cached. If this property is set to
+.Cm none ,
+then neither user data nor metadata is cached. If this property is set to
+.Cm metadata ,
+then only metadata is cached. The default value is
+.Cm all .
+.It Sy quota Ns = Ns Ar size | Cm none
+Limits the amount of space a dataset and its descendents can consume. This
+property enforces a hard limit on the amount of space used. This includes all
+space consumed by descendents, including file systems and snapshots. Setting a
+quota on a descendent of a dataset that already has a quota does not override
+the ancestor's quota, but rather imposes an additional limit.
+.Pp
+Quotas cannot be set on volumes, as the
+.Sy volsize
+property acts as an implicit quota.
+.It Sy userquota@ Ns Ar user Ns = Ns Ar size | Cm none
+Limits the amount of space consumed by the specified user.
+Similar to the
+.Sy refquota
+property, the
+.Sy userquota
+space calculation does not include space that is used by descendent datasets,
+such as snapshots and clones. User space consumption is identified by the
+.Sy userspace@ Ns Ar user
+property.
+.Pp
+Enforcement of user quotas may be delayed by several seconds. This delay means
+that a user might exceed their quota before the system notices that they are
+over quota and begins to refuse additional writes with the
+.Em EDQUOT
+error message. See the
+.Cm userspace
+subcommand for more information.
+.Pp
+Unprivileged users can only access their own groups' space usage. The root
+user, or a user who has been granted the
+.Sy userquota
+privilege with
+.Qq Nm Cm allow ,
+can get and set everyone's quota.
+.Pp
+This property is not available on volumes, on file systems before version 4, or
+on pools before version 15. The
+.Sy userquota@ Ns ...
+properties are not displayed by
+.Qq Nm Cm get all .
+The user's name must be appended after the
+.Sy @
+symbol, using one of the following forms:
+.Bl -bullet -offset 2n
+.It
+POSIX name (for example,
+.Em joe )
+.It
+POSIX numeric ID (for example,
+.Em 1001 )
+.El
+.It Sy groupquota@ Ns Ar group Ns = Ns Ar size | Cm none
+Limits the amount of space consumed by the specified group. Group space
+consumption is identified by the
+.Sy userquota@ Ns Ar user
+property.
+.Pp
+Unprivileged users can access only their own groups' space usage. The root
+user, or a user who has been granted the
+.Sy groupquota
+privilege with
+.Qq Nm Cm allow ,
+can get and set all groups' quotas.
+.It Sy readonly Ns = Ns Cm on | off
+Controls whether this dataset can be modified. The default value is
+.Cm off .
+.It Sy recordsize Ns = Ns Ar size
+Specifies a suggested block size for files in the file system. This property is
+designed solely for use with database workloads that access files in fixed-size
+records.
+.Tn ZFS
+automatically tunes block sizes according to internal algorithms optimized for
+typical access patterns.
+.Pp
+For databases that create very large files but access them in small random
+chunks, these algorithms may be suboptimal. Specifying a
+.Sy recordsize
+greater than or equal to the record size of the database can result in
+significant performance gains. Use of this property for general purpose file
+systems is strongly discouraged, and may adversely affect performance.
+.Pp
+The size specified must be a power of two greater than or equal to 512 and less
+than or equal to 128 Kbytes.
+.Pp
+Changing the file system's
+.Sy recordsize
+affects only files created afterward; existing files are unaffected.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy recsize .
+.It Sy refquota Ns = Ns Ar size | Cm none
+Limits the amount of space a dataset can consume. This property enforces a hard
+limit on the amount of space used. This hard limit does not include space used
+by descendents, including file systems and snapshots.
+.It Sy refreservation Ns = Ns Ar size | Cm none
+The minimum amount of space guaranteed to a dataset, not including its
+descendents. When the amount of space used is below this value, the dataset is
+treated as if it were taking up the amount of space specified by
+.Sy refreservation .
+The
+.Sy refreservation
+reservation is accounted for in the parent datasets' space used, and counts
+against the parent datasets' quotas and reservations.
+.Pp
+If
+.Sy refreservation
+is set, a snapshot is only allowed if there is enough free pool space outside
+of this reservation to accommodate the current number of "referenced" bytes in
+the dataset.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy refreserv .
+.It Sy reservation Ns = Ns Ar size | Cm none
+The minimum amount of space guaranteed to a dataset and its descendents. When
+the amount of space used is below this value, the dataset is treated as if it
+were taking up the amount of space specified by its reservation. Reservations
+are accounted for in the parent datasets' space used, and count against the
+parent datasets' quotas and reservations.
+.Pp
+This property can also be referred to by its shortened column name,
+.Sy reserv .
+.It Sy secondarycache Ns = Ns Cm all | none | metadata
+Controls what is cached in the secondary cache (L2ARC). If this property is set
+to
+.Cm all ,
+then both user data and metadata is cached. If this property is set to
+.Cm none ,
+then neither user data nor metadata is cached. If this property is set to
+.Cm metadata ,
+then only metadata is cached. The default value is
+.Cm all .
+.It Sy setuid Ns = Ns Cm on | off
+Controls whether the
+.No set- Ns Tn UID
+bit is respected for the file system. The default value is
+.Cm on .
+.It Sy sharesmb Ns = Ns Cm on | off | Ar opts
+The
+.Sy sharesmb
+property currently has no effect on
+.Fx .
+.It Sy sharenfs Ns = Ns Cm on | off | Ar opts
+Controls whether the file system is shared via
+.Tn NFS ,
+and what options are used. A file system with a
+.Sy sharenfs
+property of
+.Cm off
+is managed the traditional way via
+.Xr exports 5 .
+Otherwise, the file system is automatically shared and unshared with the
+.Qq Nm Cm share
+and
+.Qq Nm Cm unshare
+commands. If the property is set to
+.Cm on
+no
+.Tn NFS
+export options are used. Otherwise,
+.Tn NFS
+export options are equivalent to the contents of this property. The export
+options may be comma-separated. See
+.Xr exports 5
+for a list of valid options.
+.Pp
+When the
+.Sy sharenfs
+property is changed for a dataset, the
+.Xr mountd 8
+dameon is reloaded.
+.It Sy logbias Ns = Ns Cm latency | throughput
+Provide a hint to
+.Tn ZFS
+about handling of synchronous requests in this dataset.
+If
+.Sy logbias
+is set to
+.Cm latency
+(the default),
+.Tn ZFS
+will use pool log devices (if configured) to handle the requests at low
+latency. If
+.Sy logbias
+is set to
+.Cm throughput ,
+.Tn ZFS
+will not use configured pool log devices.
+.Tn ZFS
+will instead optimize synchronous operations for global pool throughput and
+efficient use of resources.
+.It Sy snapdir Ns = Ns Cm hidden | visible
+Controls whether the
+.Pa \&.zfs
+directory is hidden or visible in the root of the file system as discussed in
+the
+.Qq Sx Snapshots
+section. The default value is
+.Cm hidden .
+.It Sy sync Ns = Ns Cm standard | always | disabled
+Controls the behavior of synchronous requests (e.g.
+.Xr fsync 2 ,
+O_DSYNC). This property accepts the following values:
+.Bl -tag -offset 4n -width 8n
+.It Sy standard
+This is the POSIX specified behavior of ensuring all synchronous requests are
+written to stable storage and all devices are flushed to ensure data is not
+cached by device controllers (this is the default).
+.It Sy always
+All file system transactions are written and flushed before their system calls
+return. This has a large performance penalty.
+.It Sy disabled
+Disables synchronous requests. File system transactions are only committed to
+stable storage periodically. This option will give the highest performance.
+However, it is very dangerous as
+.Tn ZFS
+would be ignoring the synchronous transaction demands of applications such as
+databases or
+.Tn NFS .
+Administrators should only use this option when the risks are understood.
+.El
+.It Sy volsize Ns = Ns Ar size
+For volumes, specifies the logical size of the volume. By default, creating a
+volume establishes a reservation of equal size. For storage pools with a
+version number of 9 or higher, a
+.Sy refreservation
+is set instead. Any changes to
+.Sy volsize
+are reflected in an equivalent change to the reservation (or
+.Sy refreservation ) .
+The
+.Sy volsize
+can only be set to a multiple of
+.Cm volblocksize ,
+and cannot be zero.
+.Pp
+The reservation is kept equal to the volume's logical size to prevent
+unexpected behavior for consumers. Without the reservation, the volume could
+run out of space, resulting in undefined behavior or data corruption, depending
+on how the volume is used. These effects can also occur when the volume size is
+changed while it is in use (particularly when shrinking the size). Extreme care
+should be used when adjusting the volume size.
+.Pp
+Though not recommended, a "sparse volume" (also known as "thin provisioning")
+can be created by specifying the
+.Fl s
+option to the
+.Qq Nm Cm create Fl V
+command, or by changing the reservation after the volume has been created. A
+"sparse volume" is a volume where the reservation is less then the volume size.
+Consequently, writes to a sparse volume can fail with
+.Sy ENOSPC
+when the pool is low on space. For a sparse volume, changes to
+.Sy volsize
+are not reflected in the reservation.
+.It Sy vscan Ns = Ns Cm off | on
+The
+.Sy vscan
+property is currently not supported on
+.Fx .
+.It Sy xattr Ns = Ns Cm off | on
+The
+.Sy xattr
+property is currently not supported on
+.Fx .
+.It Sy jailed Ns = Ns Cm off | on
+Controls whether the dataset is managed from a jail. See the
+.Qq Sx Jails
+section for more information. The default value is
+.Cm off .
+.El
+.Pp
+The following three properties cannot be changed after the file system is
+created, and therefore, should be set when the file system is created. If the
+properties are not set with the
+.Qq Nm Cm create
+or
+.Nm zpool Cm create
+commands, these properties are inherited from the parent dataset. If the parent
+dataset lacks these properties due to having been created prior to these
+features being supported, the new file system will have the default values for
+these properties.
+.Bl -tag -width 4n
+.It Sy casesensitivity Ns = Ns Cm sensitive | insensitive | mixed
+The
+.Sy casesensitivity
+property is currently not supported on
+.Fx .
+.It Sy normalization Ns = Ns Cm none | formC | formD | formKC | formKD
+Indicates whether the file system should perform a
+.Sy unicode
+normalization of file names whenever two file names are compared, and which
+normalization algorithm should be used. File names are always stored
+unmodified, names are normalized as part of any comparison process. If this
+property is set to a legal value other than
+.Cm none ,
+and the
+.Sy utf8only
+property was left unspecified, the
+.Sy utf8only
+property is automatically set to
+.Cm on .
+The default value of the
+.Sy normalization
+property is
+.Cm none .
+This property cannot be changed after the file system is created.
+.It Sy utf8only Ns = Ns Cm on | off
+Indicates whether the file system should reject file names that include
+characters that are not present in the
+.Sy UTF-8
+character code set. If this property is explicitly set to
+.Cm off ,
+the normalization property must either not be explicitly set or be set to
+.Cm none .
+The default value for the
+.Sy utf8only
+property is
+.Cm off .
+This property cannot be changed after the file system is created.
+.El
+.Pp
+The
+.Sy casesensitivity , normalization , No and Sy utf8only
+properties are also new permissions that can be assigned to non-privileged
+users by using the
+.Tn ZFS
+delegated administration feature.
+.Ss Temporary Mount Point Properties
+When a file system is mounted, either through
+.Xr mount 8
+for legacy mounts or the
+.Qq Nm Cm mount
+command for normal file systems, its mount options are set according to its
+properties. The correlation between properties and mount options is as follows:
+.Bl -column -offset 4n "PROPERTY" "MOUNT OPTION"
+.It "PROPERTY	MOUNT OPTION"
+.It "atime	atime/noatime"
+.It "exec	exec/noexec"
+.It "readonly	ro/rw"
+.It "setuid	suid/nosuid"
+.El
+.Pp
+In addition, these options can be set on a per-mount basis using the
+.Fl o
+option, without affecting the property that is stored on disk. The values
+specified on the command line override the values stored in the dataset. These
+properties are reported as "temporary" by the
+.Qq Nm Cm get
+command. If the properties are changed while the dataset is mounted, the new
+setting overrides any temporary settings.
+.Ss User Properties
+In addition to the standard native properties,
+.Tn ZFS
+supports arbitrary user properties. User properties have no effect on
+.Tn ZFS
+behavior, but applications or administrators can use them to annotate datasets
+(file systems, volumes, and snapshots).
+.Pp
+User property names must contain a colon
+.Pq Sy \&:
+character to distinguish them from native properties. They may contain
+lowercase letters, numbers, and the following punctuation characters: colon
+.Pq Sy \&: ,
+dash
+.Pq Sy \&- ,
+period
+.Pq Sy \&.
+and underscore
+.Pq Sy \&_ .
+The expected convention is that the property name is divided into two portions
+such as
+.Em module Ns Sy \&: Ns Em property ,
+but this namespace is not enforced by
+.Tn ZFS .
+User property names can be at most 256 characters, and cannot begin with a dash
+.Pq Sy \&- .
+.Pp
+When making programmatic use of user properties, it is strongly suggested to
+use a reversed
+.Tn DNS
+domain name for the
+.Ar module
+component of property names to reduce the chance that two
+independently-developed packages use the same property name for different
+purposes. Property names beginning with
+.Em com.sun
+are reserved for use by Sun Microsystems.
+.Pp
+The values of user properties are arbitrary strings, are always inherited, and
+are never validated. All of the commands that operate on properties
+.Po
+.Qq Nm Cm list ,
+.Qq Nm Cm get ,
+.Qq Nm Cm set
+and so forth
+.Pc
+can be used to manipulate both native properties and user properties. Use the
+.Qq Nm Cm inherit
+command to clear a user property. If the property is not defined in any parent
+dataset, it is removed entirely. Property values are limited to 1024
+characters.
+.Sh SUBCOMMANDS
+All subcommands that modify state are logged persistently to the pool in their
+original form.
+.Bl -tag -width 2n
+.It Xo
+.Nm
+.Op Fl \&?
+.Xc
+.Pp
+Displays a help message.
+.It Xo
+.Nm
+.Cm create
+.Op Fl pu
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ... filesystem
+.Xc
+.Pp
+Creates a new
+.Tn ZFS
+file system. The file system is automatically mounted according to the
+.Sy mountpoint
+property inherited from the parent.
+.Bl -tag -width indent
+.It Fl p
+Creates all the non-existing parent datasets. Datasets created in this manner
+are automatically mounted according to the
+.Sy mountpoint
+property inherited from their parent. Any property specified on the command
+line using the
+.Fl o
+option is ignored. If the target filesystem already exists, the operation
+completes successfully.
+.It Fl u
+Newly created file system is not mounted.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property as if the command
+.Qq Nm Cm set Ar property Ns = Ns Ar value
+was invoked at the same time the dataset was created. Any editable
+.Tn ZFS
+property can also be set at creation time. Multiple
+.Fl o
+options can be specified. An error results if the same property is specified in
+multiple
+.Fl o
+options.
+.El
+.It Xo
+.Nm
+.Cm create
+.Op Fl ps
+.Op Fl b Ar blocksize
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ...
+.Fl V
+.Ar size volume
+.Xc
+.Pp
+Creates a volume of the given size. The volume is exported as a block device in
+.Pa /dev/zvol/path ,
+where
+.Ar path
+is the name of the volume in the
+.Tn ZFS
+namespace. The size represents the logical size as exported by the device. By
+default, a reservation of equal size is created.
+.Pp
+.Ar size
+is automatically rounded up to the nearest 128 Kbytes to ensure that
+the volume has an integral number of blocks regardless of
+.Ar blocksize .
+.Bl -tag -width indent
+.It Fl p
+Creates all the non-existing parent datasets. Datasets created in this manner
+are automatically mounted according to the
+.Sy mountpoint
+property inherited from their parent. Any property specified on the command
+line using the
+.Fl o
+option is ignored. If the target filesystem already exists, the operation
+completes successfully.
+.It Fl s
+Creates a sparse volume with no reservation. See
+.Sy volsize
+in the
+.Qq Sx Native Properties
+section for more information about sparse volumes.
+.It Fl b Ar blocksize
+Equivalent to
+.Fl o Cm volblocksize Ns = Ns Ar blocksize .
+If this option is specified in conjunction with
+.Fl o Cm volblocksize ,
+the resulting behavior is undefined.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property as if the
+.Qq Nm Cm set Ar property Ns = Ns Ar value
+command was invoked at the same time the dataset was created. Any editable
+.Tn ZFS
+property can also be set at creation time. Multiple
+.Fl o
+options can be specified. An error results if the same property is specified in
+multiple
+.Fl o
+options.
+.El
+.It Xo
+.Nm
+.Cm destroy
+.Op Fl fnpRrv
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+.Pp
+Destroys the given dataset. By default, the command unshares any file systems
+that are currently shared, unmounts any file systems that are currently
+mounted, and refuses to destroy a dataset that has active dependents (children
+or clones).
+.Bl -tag -width indent
+.It Fl r
+Recursively destroy all children.
+.It Fl R
+Recursively destroy all dependents, including cloned file systems outside the
+target hierarchy.
+.It Fl f
+Force an unmount of any file systems using the
+.Qq Nm Cm unmount Fl f
+command. This option has no effect on non-file systems or unmounted file
+systems.
+.It Fl n
+Do a dry-run ("No-op") deletion. No data will be deleted. This is useful in
+conjunction with the
+.Fl v
+or
+.Fl p
+flags to determine what data would be deleted.
+.It Fl p
+Print machine-parsable verbose information about the deleted data.
+.It Fl v
+Print verbose information about the deleted data.
+.El
+.Pp
+Extreme care should be taken when applying either the
+.Fl r
+or the
+.Fl R
+options, as they can destroy large portions of a pool and cause unexpected
+behavior for mounted file systems in use.
+.It Xo
+.Nm
+.Cm destroy
+.Op Fl dnpRrv
+.Sm off
+.Ar snapshot
+.Op % Ns Ar snapname
+.Op , Ns Ar ...
+.Sm on
+.Xc
+.Pp
+The given snapshots are destroyed immediately if and only if the
+.Qq Nm Cm destroy
+command without the
+.Fl d
+option would have destroyed it. Such immediate destruction would occur, for
+example, if the snapshot had no clones and the user-initiated reference count
+were zero.
+.Pp
+If a snapshot does not qualify for immediate destruction, it is marked for
+deferred deletion. In this state, it exists as a usable, visible snapshot until
+both of the preconditions listed above are met, at which point it is destroyed.
+.Pp
+An inclusive range of snapshots may be specified by separating the
+first and last snapshots with a percent sign
+.Pq Sy % .
+The first and/or last snapshots may be left blank, in which case the
+filesystem's oldest or newest snapshot will be implied.
+.Pp
+Multiple snapshots
+(or ranges of snapshots) of the same filesystem or volume may be specified
+in a comma-separated list of snapshots.
+Only the snapshot's short name (the
+part after the
+.Sy @ )
+should be specified when using a range or comma-separated list to identify
+multiple snapshots.
+.Bl -tag -width indent
+.It Fl r
+Destroy (or mark for deferred deletion) all snapshots with this name in
+descendent file systems.
+.It Fl R
+Recursively destroy all dependents.
+.It Fl n
+Do a dry-run ("No-op") deletion. No data will be deleted. This is useful in
+conjunction with the
+.Fl v
+or
+.Fl p
+flags to determine what data would be deleted.
+.It Fl p
+Print machine-parsable verbose information about the deleted data.
+.It Fl v
+Print verbose information about the deleted data.
+.It Fl d
+Defer snapshot deletion.
+.El
+.Pp
+Extreme care should be taken when applying either the
+.Fl r
+or the
+.Fl R
+options, as they can destroy large portions of a pool and cause unexpected
+behavior for mounted file systems in use.
+.It Xo
+.Nm
+.Cm snapshot
+.Op Fl r
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ...
+.Ar filesystem@snapname Ns | Ns volume@snapname
+.Xc
+.Pp
+Creates a snapshot with the given name. All previous modifications by
+successful system calls to the file system are part of the snapshot. See the
+.Qq Sx Snapshots
+section for details.
+.Bl -tag -width indent
+.It Fl r
+Recursively create snapshots of all descendent datasets. Snapshots are taken
+atomically, so that all recursive snapshots correspond to the same moment in
+time.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property; see
+.Qq Nm Cm create
+for details.
+.El
+.It Xo
+.Nm
+.Cm rollback
+.Op Fl rRf
+.Ar snapshot
+.Xc
+.Pp
+Roll back the given dataset to a previous snapshot. When a dataset is rolled
+back, all data that has changed since the snapshot is discarded, and the
+dataset reverts to the state at the time of the snapshot. By default, the
+command refuses to roll back to a snapshot other than the most recent one. In
+order to do so, all intermediate snapshots must be destroyed by specifying the
+.Fl r
+option.
+.Bl -tag -width indent
+.It Fl r
+Recursively destroy any snapshots more recent than the one specified.
+.It Fl R
+Recursively destroy any more recent snapshots, as well as any clones of those
+snapshots.
+.It Fl f
+Used with the
+.Fl R
+option to force an unmount of any clone file systems that are to be destroyed.
+.El
+.It Xo
+.Nm
+.Cm clone
+.Op Fl p
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ... snapshot filesystem Ns | Ns Ar volume
+.Xc
+.Pp
+Creates a clone of the given snapshot. See the
+.Qq Sx Clones
+section for details. The target dataset can be located anywhere in the
+.Tn ZFS
+hierarchy, and is created as the same type as the original.
+.Bl -tag -width indent
+.It Fl p
+Creates all the non-existing parent datasets. Datasets created in this manner
+are automatically mounted according to the
+.Sy mountpoint
+property inherited from their parent. If the target filesystem or volume
+already exists, the operation completes successfully.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property; see
+.Qq Nm Cm create
+for details.
+.El
+.It Xo
+.Nm
+.Cm promote
+.Ar clone-filesystem
+.Xc
+.Pp
+Promotes a clone file system to no longer be dependent on its "origin"
+snapshot. This makes it possible to destroy the file system that the clone was
+created from. The clone parent-child dependency relationship is reversed, so
+that the origin file system becomes a clone of the specified file system.
+.Pp
+The snapshot that was cloned, and any snapshots previous to this snapshot, are
+now owned by the promoted clone. The space they use moves from the origin file
+system to the promoted clone, so enough space must be available to accommodate
+these snapshots. No new space is consumed by this operation, but the space
+accounting is adjusted. The promoted clone must not have any conflicting
+snapshot names of its own. The
+.Cm rename
+subcommand can be used to rename any conflicting snapshots.
+.It Xo
+.Nm
+.Cm rename
+.Op Fl f
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Xc
+.It Xo
+.Nm
+.Cm rename
+.Op Fl f
+.Fl p
+.Ar filesystem Ns | Ns Ar volume
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+.It Xo
+.Nm
+.Cm rename
+.Fl u
+.Op Fl p
+.Ar filesystem filesystem
+.Xc
+.Pp
+Renames the given dataset. The new target can be located anywhere in the
+.Tn ZFS
+hierarchy, with the exception of snapshots. Snapshots can only be renamed
+within the parent file system or volume. When renaming a snapshot, the parent
+file system of the snapshot does not need to be specified as part of the second
+argument. Renamed file systems can inherit new mount points, in which case they
+are unmounted and remounted at the new mount point.
+.Bl -tag -width indent
+.It Fl p
+Creates all the nonexistent parent datasets. Datasets created in this manner
+are automatically mounted according to the
+.Sy mountpoint
+property inherited from their parent.
+.It Fl u
+Do not remount file systems during rename. If a file system's
+.Sy mountpoint
+property is set to
+.Cm legacy
+or
+.Cm none ,
+file system is not unmounted even if this option is not given.
+.It Fl f
+Force unmount any filesystems that need to be unmounted in the process.
+This flag has no effect if used together with the
+.Fl u
+flag.
+.El
+.It Xo
+.Nm
+.Cm rename
+.Fl r
+.Ar snapshot snapshot
+.Xc
+.Pp
+Recursively rename the snapshots of all descendent datasets. Snapshots are the
+only dataset that can be renamed recursively.
+.It Xo
+.Nm
+.Cm list
+.Op Fl r Ns | Ns Fl d Ar depth
+.Op Fl H
+.Op Fl o Ar property Ns Op , Ns Ar ...
+.Op Fl t Ar type Ns Op , Ns Ar ...
+.Op Fl s Ar property
+.Ar ...
+.Op Fl S Ar property
+.Ar ...
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Xc
+.Pp
+Lists the property information for the given datasets in tabular form. If
+specified, you can list property information by the absolute pathname or the
+relative pathname. By default, all file systems and volumes are displayed.
+Snapshots are displayed if the
+.Sy listsnaps
+property is
+.Cm on
+(the default is
+.Cm off ) .
+The following fields are displayed,
+.Sy name , used , available , referenced , mountpoint .
+.Bl -tag -width indent
+.It Fl r
+Recursively display any children of the dataset on the command line.
+.It Fl d Ar depth
+Recursively display any children of the dataset, limiting the recursion to
+.Ar depth .
+A depth of
+.Sy 1
+will display only the dataset and its direct children.
+.It Fl H
+Used for scripting mode. Do not print headers and separate fields by a single
+tab instead of arbitrary white space.
+.It Fl o Ar property Ns Op , Ns Ar ...
+A comma-separated list of properties to display. The property must be:
+.Bl -bullet -offset 2n
+.It
+One of the properties described in the
+.Qq Sx Native Properties
+section
+.It
+A user property
+.It
+The value
+.Cm name
+to display the dataset name
+.It
+The value
+.Cm space
+to display space usage properties on file systems and volumes. This is a
+shortcut for specifying
+.Fl o
+.Sy name,avail,used,usedsnap,usedds,usedrefreserv,usedchild
+.Fl t
+.Sy filesystem,volume
+syntax.
+.El
+.It Fl t Ar type Ns Op , Ns Ar ...
+A comma-separated list of types to display, where
+.Ar type
+is one of
+.Sy filesystem , snapshot , volume , No or Sy all .
+For example, specifying
+.Fl t Cm snapshot
+displays only snapshots.
+.It Fl s Ar property
+A property for sorting the output by column in ascending order based on the
+value of the property. The property must be one of the properties described in
+the
+.Qq Sx Properties
+section, or the special value
+.Cm name
+to sort by the dataset name. Multiple properties can be specified at one time
+using multiple
+.Fl s
+property options. Multiple
+.Fl s
+options are evaluated from left to right in decreasing order of importance.
+.Pp
+The following is a list of sorting criteria:
+.Bl -bullet -offset 2n
+.It
+Numeric types sort in numeric order.
+.It
+String types sort in alphabetical order.
+.It
+Types inappropriate for a row sort that row to the literal bottom, regardless
+of the specified ordering.
+.It
+If no sorting options are specified the existing behavior of
+.Qq Nm Cm list
+is preserved.
+.El
+.It Fl S Ar property
+Same as the
+.Fl s
+option, but sorts by property in descending order.
+.El
+.It Xo
+.Nm
+.Cm set
+.Ar property Ns = Ns Ar value
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Xc
+.Pp
+Sets the property to the given value for each dataset. Only some properties can
+be edited. See the "Properties" section for more information on what properties
+can be set and acceptable values. Numeric values can be specified as exact
+values, or in a human-readable form with a suffix of
+.Sy B , K , M , G , T , P , E , Z
+(for bytes, kilobytes, megabytes, gigabytes, terabytes, petabytes, exabytes, or
+zettabytes, respectively). User properties can be set on snapshots. For more
+information, see the
+.Qq Sx User Properties
+section.
+.It Xo
+.Nm
+.Cm get
+.Op Fl r Ns | Ns Fl d Ar depth
+.Op Fl Hp
+.Op Fl o Ar all | field Ns Op , Ns Ar ...
+.Op Fl t Ar type Ns Op , Ns Ar ...
+.Op Fl s Ar source Ns Op , Ns Ar ...
+.Ar all | property Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Xc
+.Pp
+Displays properties for the given datasets. If no datasets are specified, then
+the command displays properties for all datasets on the system. For each
+property, the following columns are displayed:
+.Pp
+.Bl -hang -width "property" -offset indent -compact
+.It name
+Dataset name
+.It property
+Property name
+.It value
+Property value
+.It source
+Property source. Can either be local, default, temporary, inherited, or none
+(\&-).
+.El
+.Pp
+All columns except the
+.Sy RECEIVED
+column are displayed by default. The columns to display can be specified
+by using the
+.Fl o
+option. This command takes a comma-separated list of properties as described in
+the
+.Qq Sx Native Properties
+and
+.Qq Sx User Properties
+sections.
+.Pp
+The special value
+.Cm all
+can be used to display all properties that apply to the given dataset's type
+(filesystem, volume, or snapshot).
+.Bl -tag -width indent
+.It Fl r
+Recursively display properties for any children.
+.It Fl d Ar depth
+Recursively display any children of the dataset, limiting the recursion to
+.Ar depth .
+A depth of
+.Sy 1
+will display only the dataset and its direct children.
+.It Fl H
+Display output in a form more easily parsed by scripts. Any headers are
+omitted, and fields are explicitly separated by a single tab instead of an
+arbitrary amount of space.
+.It Fl p
+Display numbers in parseable (exact) values.
+.It Fl o Cm all | Ar field Ns Op , Ns Ar ...
+A comma-separated list of columns to display. Supported values are
+.Sy name,property,value,received,source .
+Default values are
+.Sy name,property,value,source .
+The keyword
+.Cm all
+specifies all columns.
+.It Fl t Ar type Ns Op , Ns Ar ...
+A comma-separated list of types to display, where
+.Ar type
+is one of
+.Sy filesystem , snapshot , volume , No or Sy all .
+For example, specifying
+.Fl t Cm snapshot
+displays only snapshots.
+.It Fl s Ar source Ns Op , Ns Ar ...
+A comma-separated list of sources to display. Those properties coming from a
+source other than those in this list are ignored. Each source must be one of
+the following:
+.Sy local,default,inherited,temporary,received,none .
+The default value is all sources.
+.El
+.It Xo
+.Nm
+.Cm inherit
+.Op Fl rS
+.Ar property
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Xc
+.Pp
+Clears the specified property, causing it to be inherited from an ancestor. If
+no ancestor has the property set, then the default value is used. See the
+.Qq Sx Properties
+section for a listing of default values, and details on which properties can be
+inherited.
+.Bl -tag -width indent
+.It Fl r
+Recursively inherit the given property for all children.
+.It Fl S
+For properties with a received value, revert to this value. This flag has no
+effect on properties that do not have a received value.
+.El
+.It Xo
+.Nm
+.Cm upgrade
+.Op Fl v
+.Xc
+.Pp
+Displays a list of file systems that are not the most recent version.
+.Bl -tag -width indent
+.It Fl v
+Displays
+.Tn ZFS
+filesystem versions supported by the current software. The current
+.Tn ZFS
+filesystem version and all previous supported versions are displayed, along
+with an explanation of the features provided with each version.
+.El
+.It Xo
+.Nm
+.Cm upgrade
+.Op Fl r
+.Op Fl V Ar version
+.Fl a | Ar filesystem
+.Xc
+.Pp
+Upgrades file systems to a new on-disk version. Once this is done, the file
+systems will no longer be accessible on systems running older versions of the
+software.
+.Qq Nm Cm send
+streams generated from new snapshots of these file systems cannot be accessed
+on systems running older versions of the software.
+.Pp
+In general, the file system version is independent of the pool version. See
+.Xr zpool 8
+for information on the
+.Nm zpool Cm upgrade
+command.
+.Pp
+In some cases, the file system version and the pool version are interrelated
+and the pool version must be upgraded before the file system version can be
+upgraded.
+.Bl -tag -width indent
+.It Fl r
+Upgrade the specified file system and all descendent file systems.
+.It Fl V Ar version
+Upgrade to the specified
+.Ar version .
+If the
+.Fl V
+flag is not specified, this command upgrades to the most recent version. This
+option can only be used to increase the version number, and only up to the most
+recent version supported by this software.
+.It Fl a
+Upgrade all file systems on all imported pools.
+.It Ar filesystem
+Upgrade the specified file system.
+.El
+.It Xo
+.Nm
+.Cm userspace
+.Op Fl Hinp
+.Op Fl o Ar field Ns Op , Ns Ar ...
+.Op Fl s Ar field
+.Ar ...
+.Op Fl S Ar field
+.Ar ...
+.Op Fl t Ar type Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar snapshot
+.Xc
+.Pp
+Displays space consumed by, and quotas on, each user in the specified
+filesystem or snapshot. This corresponds to the
+.Sy userused@ Ns Ar user
+and
+.Sy userquota@ Ns Ar user
+properties.
+.Bl -tag -width indent
+.It Fl n
+Print numeric ID instead of user/group name.
+.It Fl H
+Do not print headers, use tab-delimited output.
+.It Fl p
+Use exact (parsable) numeric output.
+.It Fl o Ar field Ns Op , Ns Ar ...
+Display only the specified fields from the following set:
+.Sy type,name,used,quota .
+The default is to display all fields.
+.It Fl s Ar field
+Sort output by this field. The
+.Fl s
+and
+.Fl S
+flags may be specified multiple times to sort first by one field, then by
+another. The default is
+.Fl s Cm type Fl s Cm name .
+.It Fl S Ar field
+Sort by this field in reverse order. See
+.Fl s .
+.It Fl t Ar type Ns Op , Ns Ar ...
+Print only the specified types from the following set:
+.Sy all,posixuser,smbuser,posixgroup,smbgroup .
+.Pp
+The default is
+.Fl t Cm posixuser,smbuser .
+.Pp
+The default can be changed to include group types.
+.It Fl i
+Translate SID to POSIX ID. This flag currently has no effect on
+.Fx .
+.El
+.It Xo
+.Nm
+.Cm groupspace
+.Op Fl Hinp
+.Op Fl o Ar field Ns Op , Ns Ar ...
+.Op Fl s Ar field
+.Ar ...
+.Op Fl S Ar field
+.Ar ...
+.Op Fl t Ar type Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar snapshot
+.Xc
+.Pp
+Displays space consumed by, and quotas on, each group in the specified
+filesystem or snapshot. This subcommand is identical to
+.Qq Nm Cm userspace ,
+except that the default types to display are
+.Fl t Sy posixgroup,smbgroup .
+.It Xo
+.Nm
+.Cm mount
+.Xc
+.Pp
+Displays all
+.Tn ZFS
+file systems currently mounted.
+.Bl -tag -width indent
+.It Fl f
+.El
+.It Xo
+.Nm
+.Cm mount
+.Op Fl vO
+.Op Fl o Ar property Ns Op , Ns Ar ...
+.Fl a | Ar filesystem
+.Xc
+.Pp
+Mounts
+.Tn ZFS
+file systems.
+.Bl -tag -width indent
+.It Fl v
+Report mount progress.
+.It Fl O
+Perform an overlay mount. Overlay mounts are not supported on
+.Fx .
+.It Fl o Ar property Ns Op , Ns Ar ...
+An optional, comma-separated list of mount options to use temporarily for the
+duration of the mount. See the
+.Qq Sx Temporary Mount Point Properties
+section for details.
+.It Fl a
+Mount all available
+.Tn ZFS
+file systems.
+This command may be executed on
+.Fx
+system startup by
+.Pa /etc/rc.d/zfs .
+For more information, see variable
+.Va zfs_enable
+in
+.Xr rc.conf 5 .
+.It Ar filesystem
+Mount the specified filesystem.
+.El
+.It Xo
+.Nm
+.Cm unmount
+.Op Fl f
+.Fl a | Ar filesystem Ns | Ns Ar mountpoint
+.Xc
+.Pp
+Unmounts currently mounted
+.Tn ZFS
+file systems.
+.Bl -tag -width indent
+.It Fl f
+Forcefully unmount the file system, even if it is currently in use.
+.It Fl a
+Unmount all available
+.Tn ZFS
+file systems.
+.It Ar filesystem | mountpoint
+Unmount the specified filesystem. The command can also be given a path to a
+.Tn ZFS
+file system mount point on the system.
+.El
+.It Xo
+.Nm
+.Cm share
+.Fl a | Ar filesystem
+.Xc
+.Pp
+Shares
+.Tn ZFS
+file systems that have the
+.Sy sharenfs
+property set.
+.Bl -tag -width indent
+.It Fl a
+Share all
+.Tn ZFS
+file systems that have the
+.Sy sharenfs
+property set.
+This command may be executed on
+.Fx
+system startup by
+.Pa /etc/rc.d/zfs .
+For more information, see variable
+.Va zfs_enable
+in
+.Xr rc.conf 5 .
+.It Ar filesystem
+Share the specified filesystem according to the
+.Tn sharenfs
+property. File systems are shared when the
+.Tn sharenfs
+property is set.
+.El
+.It Xo
+.Nm
+.Cm unshare
+.Fl a | Ar filesystem Ns | Ns Ar mountpoint
+.Xc
+.Pp
+Unshares
+.Tn ZFS
+file systems that have the
+.Tn sharenfs
+property set.
+.Bl -tag -width indent
+.It Fl a
+Unshares
+.Tn ZFS
+file systems that have the
+.Sy sharenfs
+property set.
+This command may be executed on
+.Fx
+system shutdown by
+.Pa /etc/rc.d/zfs .
+For more information, see variable
+.Va zfs_enable
+in
+.Xr rc.conf 5 .
+.It Ar filesystem | mountpoint
+Unshare the specified filesystem. The command can also be given a path to a
+.Tn ZFS
+file system shared on the system.
+.El
+.It Xo
+.Nm
+.Cm send
+.Op Fl DnPpRv
+.Op Fl i Ar snapshot | Fl I Ar snapshot
+.Ar snapshot
+.Xc
+.Pp
+Creates a stream representation of the last
+.Ar snapshot
+argument (not part of
+.Fl i
+or
+.Fl I )
+which is written to standard output. The output can be redirected to
+a file or to a different system (for example, using
+.Xr ssh 1 ) .
+By default, a full stream is generated.
+.Bl -tag -width indent
+.It Fl i Ar snapshot
+Generate an incremental stream from the
+.Fl i Ar snapshot
+to the last
+.Ar snapshot .
+The incremental source (the
+.Fl i Ar snapshot )
+can be specified as the last component of the snapshot name (for example, the
+part after the
+.Sy @ ) ,
+and it is assumed to be from the same file system as the last
+.Ar snapshot .
+.Pp
+If the destination is a clone, the source may be the origin snapshot, which
+must be fully specified (for example,
+.Cm pool/fs@origin ,
+not just
+.Cm @origin ) .
+.It Fl I Ar snapshot
+Generate a stream package that sends all intermediary snapshots from the
+.Fl I Ar snapshot
+to the last
+.Ar snapshot .
+For example,
+.Ic -I @a fs@d
+is similar to
+.Ic -i @a fs@b; -i @b fs@c; -i @c fs@d .
+The incremental source snapshot may be specified as with the
+.Fl i
+option.
+.It Fl R
+Generate a replication stream package, which will replicate the specified
+filesystem, and all descendent file systems, up to the named snapshot. When
+received, all properties, snapshots, descendent file systems, and clones are
+preserved.
+.Pp
+If the
+.Fl i
+or
+.Fl I
+flags are used in conjunction with the
+.Fl R
+flag, an incremental replication stream is generated. The current values of
+properties, and current snapshot and file system names are set when the stream
+is received. If the
+.Fl F
+flag is specified when this stream is received, snapshots and file systems that
+do not exist on the sending side are destroyed.
+.It Fl D
+Generate a deduplicated stream. Blocks which would have been sent multiple
+times in the send stream will only be sent once.  The receiving system must
+also support this feature to receive a deduplicated stream.  This flag can
+be used regardless of the dataset's
+.Sy dedup
+property, but performance will be much better if the filesystem uses a
+dedup-capable checksum (eg.
+.Sy sha256 ) .
+.It Fl p
+Include the dataset's properties in the stream. This flag is implicit when
+.Fl R
+is specified. The receiving system must also support this feature.
+.It Fl n
+Do a dry-run ("No-op") send.  Do not generate any actual send data.  This is
+useful in conjunction with the
+.Fl v
+or
+.Fl P
+flags to determine what data will be sent.
+.It Fl P
+Print machine-parsable verbose information about the stream package generated.
+.It Fl v
+Print verbose information about the stream package generated.
+This information includes a per-second report of how much data has been sent.
+.El
+.Pp
+The format of the stream is committed. You will be able to receive your streams
+on future versions of
+.Tn ZFS .
+.It Xo
+.Nm
+.Cm receive
+.Op Fl vnFu
+.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
+.Xc
+.It Xo
+.Nm
+.Cm receive
+.Op Fl vnFu
+.Op Fl d | e
+.Ar filesystem
+.Xc
+.Pp
+Creates a snapshot whose contents are as specified in the stream provided on
+standard input. If a full stream is received, then a new file system is created
+as well. Streams are created using the
+.Qq Nm Cm send
+subcommand, which by default creates a full stream.
+.Qq Nm Cm recv
+can be used as an alias for
+.Qq Nm Cm receive .
+.Pp
+If an incremental stream is received, then the destination file system must
+already exist, and its most recent snapshot must match the incremental stream's
+source. For
+.Sy zvol Ns s,
+the destination device link is destroyed and recreated, which means the
+.Sy zvol
+cannot be accessed during the
+.Sy receive
+operation.
+.Pp
+When a snapshot replication package stream that is generated by using the
+.Qq Nm Cm send Fl R
+command is received, any snapshots that do not exist on the sending location
+are destroyed by using the
+.Qq Nm Cm destroy Fl d
+command.
+.Pp
+The name of the snapshot (and file system, if a full stream is received) that
+this subcommand creates depends on the argument type and the
+.Fl d
+or
+.Fl e
+option.
+.Pp
+If the argument is a snapshot name, the specified
+.Ar snapshot
+is created. If the argument is a file system or volume name, a snapshot with
+the same name as the sent snapshot is created within the specified
+.Ar filesystem
+or
+.Ar volume .
+If the
+.Fl d
+or
+.Fl e
+option is specified, the snapshot name is determined by appending the sent
+snapshot's name to the specified
+.Ar filesystem .
+If the
+.Fl d
+option is specified, all but the pool name of the sent snapshot path is
+appended (for example,
+.Sy b/c@1
+appended from sent snapshot
+.Sy a/b/c@1 ) ,
+and if the
+.Fl e
+option is specified, only the tail of the sent snapshot path is appended (for
+example,
+.Sy c@1
+appended from sent snapshot
+.Sy a/b/c@1 ) .
+In the case of
+.Fl d ,
+any file systems needed to replicate the path of the sent snapshot are created
+within the specified file system.
+.Bl -tag -width indent
+.It Fl d
+Use the full sent snapshot path without the first element (without pool name)
+to determine the name of the new snapshot as described in the paragraph above.
+.It Fl e
+Use only the last element of the sent snapshot path to determine the name of
+the new snapshot as described in the paragraph above.
+.It Fl u
+File system that is associated with the received stream is not mounted.
+.It Fl v
+Print verbose information about the stream and the time required to perform the
+receive operation.
+.It Fl n
+Do not actually receive the stream. This can be useful in conjunction with the
+.Fl v
+option to verify the name the receive operation would use.
+.It Fl F
+Force a rollback of the file system to the most recent snapshot before
+performing the receive operation. If receiving an incremental replication
+stream (for example, one generated by
+.Qq Nm Cm send Fl R Fi iI ) ,
+destroy snapshots and file systems that do not exist on the sending side.
+.El
+.It Xo
+.Nm
+.Cm allow
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+.Pp
+Displays permissions that have been delegated on the specified filesystem or
+volume. See the other forms of
+.Qq Nm Cm allow
+for more information.
+.It Xo
+.Nm
+.Cm allow
+.Op Fl ldug
+.Cm everyone Ns | Ns Ar user Ns | Ns Ar group Ns Op , Ns Ar ...
+.Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+.It Xo
+.Nm
+.Cm allow
+.Op Fl ld
+.Fl e
+.Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+.Pp
+Delegates
+.Tn ZFS
+administration permission for the file systems to non-privileged users.
+.Bl -tag -width indent
+.It Xo
+.Op Fl ug
+.Cm everyone Ns | Ns Ar user Ns | Ns Ar group Ns Op , Ns Ar ...
+.Xc
+Specifies to whom the permissions are delegated. Multiple entities can be
+specified as a comma-separated list. If neither of the
+.Fl ug
+options are specified, then the argument is interpreted preferentially as the
+keyword "everyone", then as a user name, and lastly as a group name. To specify
+a user or group named "everyone", use the
+.Fl u
+or
+.Fl g
+options. To specify a group with the same name as a user, use the
+.Fl g
+option.
+.It Xo
+.Op Fl e
+.Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Xc
+Specifies that the permissions be delegated to "everyone".
+Multiple permissions
+may be specified as a comma-separated list. Permission names are the same as
+.Tn ZFS
+subcommand and property names. See the property list below. Property set names,
+which begin with an at sign
+.Pq Sy @ ,
+may be specified. See the
+.Fl s
+form below for details.
+.It Xo
+.Op Fl ld
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+Specifies where the permissions are delegated. If neither of the
+.Fl ld
+options are specified, or both are, then the permissions are allowed for the
+file system or volume, and all of its descendents. If only the
+.Fl l
+option is used, then is allowed "locally" only for the specified file system.
+If only the
+.Fl d
+option is used, then is allowed only for the descendent file systems.
+.El
+.Pp
+Permissions are generally the ability to use a
+.Tn ZFS
+subcommand or change a
+.Tn ZFS
+property. The following permissions are available:
+.Bl -column -offset 4n "secondarycache" "subcommand"
+.It NAME Ta TYPE Ta NOTES
+.It allow Ta subcommand Ta Must Xo
+also have the permission that is being allowed
+.Xc
+.It clone Ta subcommand Ta Must Xo
+also have the 'create' ability and 'mount' ability in the origin file system
+.Xc
+.It create Ta subcommand Ta Must also have the 'mount' ability
+.It destroy Ta subcommand Ta Must also have the 'mount' ability
+.It diff Ta subcommand Ta Allows lookup of paths within a dataset given an
+object number, and the ability to create snapshots necessary to 'zfs diff'
+.It hold Ta subcommand Ta Allows adding a user hold to a snapshot
+.It mount Ta subcommand Ta Allows mount/umount of Tn ZFS No datasets
+.It promote Ta subcommand Ta Must Xo
+also have the 'mount' and 'promote' ability in the origin file system
+.Xc
+.It receive Ta subcommand Ta Must also have the 'mount' and 'create' ability
+.It release Ta subcommand Ta Allows Xo
+releasing a user hold which might destroy the snapshot
+.Xc
+.It rename Ta subcommand Ta Must Xo
+also have the 'mount' and 'create' ability in the new parent
+.Xc
+.It rollback Ta subcommand Ta Must also have the 'mount' ability
+.It send Ta subcommand
+.It share Ta subcommand Ta Allows Xo
+sharing file systems over the
+.Tn NFS
+protocol
+.Xc
+.It snapshot Ta subcommand Ta Must also have the 'mount' ability
+.It groupquota Ta other Ta Allows accessing any groupquota@... property
+.It groupused Ta other Ta Allows reading any groupused@... property
+.It userprop Ta other Ta Allows changing any user property
+.It userquota Ta other Ta Allows accessing any userquota@... property
+.It userused Ta other Ta Allows reading any userused@... property
+.It aclinherit Ta property
+.It aclmode Ta property
+.It atime Ta property
+.It canmount Ta property
+.It casesensitivity Ta property
+.It checksum Ta property
+.It compression Ta property
+.It copies Ta property
+.It dedup Ta property
+.It devices Ta property
+.It exec Ta property
+.It logbias Ta property
+.It jailed Ta property
+.It mlslabel Ta property
+.It mountpoint Ta property
+.It nbmand Ta property
+.It normalization Ta property
+.It primarycache Ta property
+.It quota Ta property
+.It readonly Ta property
+.It recordsize Ta property
+.It refquota Ta property
+.It refreservation Ta property
+.It reservation Ta property
+.It secondarycache Ta property
+.It setuid Ta property
+.It sharenfs Ta property
+.It sharesmb Ta property
+.It snapdir Ta property
+.It sync Ta property
+.It utf8only Ta property
+.It version Ta property
+.It volblocksize Ta property
+.It volsize Ta property
+.It vscan Ta property
+.It xattr Ta property
+.El
+.It Xo
+.Nm
+.Cm allow
+.Fl c
+.Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+.Pp
+Sets "create time" permissions. These permissions are granted (locally) to the
+creator of any newly-created descendent file system.
+.It Xo
+.Nm
+.Cm allow
+.Fl s
+.Ar @setname
+.Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+.Pp
+Defines or adds permissions to a permission set. The set can be used by other
+.Qq Nm Cm allow
+commands for the specified file system and its descendents. Sets are evaluated
+dynamically, so changes to a set are immediately reflected. Permission sets
+follow the same naming restrictions as ZFS file systems, but the name must
+begin with an "at sign"
+.Pq Sy @ ,
+and can be no more than 64 characters long.
+.It Xo
+.Nm
+.Cm unallow
+.Op Fl rldug
+.Cm everyone Ns | Ns Ar user Ns | Ns Ar group Ns Op , Ns Ar ...
+.Op Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+.It Xo
+.Nm
+.Cm unallow
+.Op Fl rld
+.Fl e
+.Op Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+.It Xo
+.Nm
+.Cm unallow
+.Op Fl r
+.Fl c
+.Op Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+.Pp
+Removes permissions that were granted with the
+.Qq Nm Cm allow
+command. No permissions are explicitly denied, so other permissions granted are
+still in effect. For example, if the permission is granted by an ancestor. If
+no permissions are specified, then all permissions for the specified
+.Ar user , group , No or Ar everyone
+are removed. Specifying "everyone" (or using the
+.Fl e
+option) only removes the permissions that were granted to "everyone",
+not all permissions for every user and group. See the
+.Qq Nm Cm allow
+command for a description of the
+.Fl ldugec
+options.
+.Bl -tag -width indent
+.It Fl r
+Recursively remove the permissions from this file system and all descendents.
+.El
+.It Xo
+.Nm
+.Cm unallow
+.Op Fl r
+.Fl s
+.Ar @setname
+.Ar perm Ns | Ns Ar @setname Ns Op , Ns Ar ...
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+.Pp
+Removes permissions from a permission set. If no permissions are specified,
+then all permissions are removed, thus removing the set entirely.
+.It Xo
+.Nm
+.Cm hold
+.Op Fl r
+.Ar tag snapshot ...
+.Xc
+.Pp
+Adds a single reference, named with the
+.Ar tag
+argument, to the specified snapshot or snapshots. Each snapshot has its own tag
+namespace, and tags must be unique within that space.
+.Pp
+If a hold exists on a snapshot, attempts to destroy that snapshot by using the
+.Qq Nm Cm destroy
+command returns
+.Em EBUSY .
+.Bl -tag -width indent
+.It Fl r
+Specifies that a hold with the given tag is applied recursively to the
+snapshots of all descendent file systems.
+.El
+.It Xo
+.Nm
+.Cm holds
+.Op Fl r
+.Ar snapshot ...
+.Xc
+.Pp
+Lists all existing user references for the given snapshot or snapshots.
+.Bl -tag -width indent
+.It Fl r
+Lists the holds that are set on the named descendent snapshots, in addition to
+listing the holds on the named snapshot.
+.El
+.It Xo
+.Nm
+.Cm release
+.Op Fl r
+.Ar tag snapshot ...
+.Xc
+.Pp
+Removes a single reference, named with the
+.Ar tag
+argument, from the specified snapshot or snapshots. The tag must already exist
+for each snapshot.
+.Bl -tag -width indent
+.It Fl r
+Recursively releases a hold with the given tag on the snapshots of all
+descendent file systems.
+.El
+.It Xo
+.Nm
+.Cm diff
+.Op Fl FHt
+.Ar snapshot
+.Op Ar snapshot Ns | Ns Ar filesystem
+.Xc
+.Pp
+Display the difference between a snapshot of a given filesystem and another
+snapshot of that filesystem from a later time or the current contents of the
+filesystem.  The first column is a character indicating the type of change,
+the other columns indicate pathname, new pathname
+.Pq in case of rename ,
+change in link count, and optionally file type and/or change time.
+.Pp
+The types of change are:
+.Bl -column -offset 2n indent
+.It \&- Ta path was removed
+.It \&+ Ta path was added
+.It \&M Ta path was modified
+.It \&R Ta path was renamed
+.El
+.Bl -tag -width indent
+.It Fl F
+Display an indication of the type of file, in a manner similar to the
+.Fl F
+option of
+.Xr ls 1 .
+.Bl -column -offset 2n indent
+.It \&B Ta block device
+.It \&C Ta character device
+.It \&F Ta regular file
+.It \&/ Ta directory
+.It \&@ Ta symbolic link
+.It \&= Ta socket
+.It \&> Ta door (not supported on Fx )
+.It \&| Ta named pipe (not supported on Fx )
+.It \&P Ta event port (not supported on Fx )
+.El
+.It Fl H
+Give more parseable tab-separated output, without header lines and without
+arrows.
+.It Fl t
+Display the path's inode change time as the first column of output.
+.El
+.It Xo
+.Nm
+.Cm jail
+.Ar jailid filesystem
+.Xc
+.Pp
+Attaches the specified
+.Ar filesystem
+to the jail identified by JID
+.Ar jailid .
+From now on this file system tree can be managed from within a jail if the
+.Sy jailed
+property has been set. To use this functionality, the jail needs the
+.Va allow.mount
+and
+.Va allow.mount.zfs
+parameters set to 1 and the
+.Va enforce_statfs
+parameter set to a value lower than 2.
+.Pp
+See
+.Xr jail 8
+for more information on managing jails and configuring the parameters above.
+.It Xo
+.Nm
+.Cm unjail
+.Ar jailid filesystem
+.Xc
+.Pp
+Detaches the specified
+.Ar filesystem
+from the jail identified by JID
+.Ar jailid .
+.El
+.Sh EXIT STATUS
+The following exit values are returned:
+.Bl -tag -offset 2n -width 2n
+.It 0
+Successful completion.
+.It 1
+An error occurred.
+.It 2
+Invalid command line options were specified.
+.El
+.Sh EXAMPLES
+.Bl -tag -width 0n
+.It Sy Example 1 No Creating a Tn ZFS No File System Hierarchy
+.Pp
+The following commands create a file system named
+.Em pool/home
+and a file system named
+.Em pool/home/bob .
+The mount point
+.Pa /home
+is set for the parent file system, and is automatically inherited by the child
+file system.
+.Bd -literal -offset 2n
+.Li # Ic zfs create pool/home
+.Li # Ic zfs set mountpoint=/home pool/home
+.Li # Ic zfs create pool/home/bob
+.Ed
+.It Sy Example 2 No Creating a Tn ZFS No Snapshot
+.Pp
+The following command creates a snapshot named
+.Sy yesterday .
+This snapshot is mounted on demand in the
+.Pa \&.zfs/snapshot
+directory at the root of the
+.Em pool/home/bob
+file system.
+.Bd -literal -offset 2n
+.Li # Ic zfs snapshot pool/home/bob@yesterday
+.Ed
+.It Sy Example 3 No Creating and Destroying Multiple Snapshots
+.Pp
+The following command creates snapshots named
+.Em yesterday
+of
+.Em pool/home
+and all of its descendent file systems. Each snapshot is mounted on demand in
+the
+.Pa \&.zfs/snapshot
+directory at the root of its file system. The second command destroys the newly
+created snapshots.
+.Bd -literal -offset 2n
+.Li # Ic zfs snapshot -r pool/home@yesterday
+.Li # Ic zfs destroy -r pool/home@yesterday
+.Ed
+.It Sy Example 4 No Disabling and Enabling File System Compression
+.Pp
+The following command disables the
+.Sy compression
+property for all file systems under
+.Em pool/home .
+The next command explicitly enables
+.Sy compression
+for
+.Em pool/home/anne .
+.Bd -literal -offset 2n
+.Li # Ic zfs set compression=off pool/home
+.Li # Ic zfs set compression=on pool/home/anne
+.Ed
+.It Sy Example 5 No Listing Tn ZFS No Datasets
+.Pp
+The following command lists all active file systems and volumes in the system.
+Snapshots are displayed if the
+.Sy listsnaps
+property is
+.Cm on .
+The default is
+.Cm off .
+See
+.Xr zpool 8
+for more information on pool properties.
+.Bd -literal -offset 2n
+.Li # Ic zfs list
+   NAME                      USED  AVAIL  REFER  MOUNTPOINT
+   pool                      450K   457G    18K  /pool
+   pool/home                 315K   457G    21K  /home
+   pool/home/anne             18K   457G    18K  /home/anne
+   pool/home/bob             276K   457G   276K  /home/bob
+.Ed
+.It Sy Example 6 No Setting a Quota on a Tn ZFS No File System
+.Pp
+The following command sets a quota of 50 Gbytes for
+.Em pool/home/bob .
+.Bd -literal -offset 2n
+.Li # Ic zfs set quota=50G pool/home/bob
+.Ed
+.It Sy Example 7 No Listing Tn ZFS No Properties
+.Pp
+The following command lists all properties for
+.Em pool/home/bob .
+.Bd -literal -offset 2n
+.Li # Ic zfs get all pool/home/bob
+NAME           PROPERTY              VALUE                  SOURCE
+pool/home/bob  type                  filesystem             -
+pool/home/bob  creation              Tue Jul 21 15:53 2009  -
+pool/home/bob  used                  21K                    -
+pool/home/bob  available             20.0G                  -
+pool/home/bob  referenced            21K                    -
+pool/home/bob  compressratio         1.00x                  -
+pool/home/bob  mounted               yes                    -
+pool/home/bob  quota                 20G                    local
+pool/home/bob  reservation           none                   default
+pool/home/bob  recordsize            128K                   default
+pool/home/bob  mountpoint            /home/bob              default
+pool/home/bob  sharenfs              off                    default
+pool/home/bob  checksum              on                     default
+pool/home/bob  compression           on                     local
+pool/home/bob  atime                 on                     default
+pool/home/bob  devices               on                     default
+pool/home/bob  exec                  on                     default
+pool/home/bob  setuid                on                     default
+pool/home/bob  readonly              off                    default
+pool/home/bob  jailed                off                    default
+pool/home/bob  snapdir               hidden                 default
+pool/home/bob  aclmode               discard                default
+pool/home/bob  aclinherit            restricted             default
+pool/home/bob  canmount              on                     default
+pool/home/bob  xattr                 on                     default
+pool/home/bob  copies                1                      default
+pool/home/bob  version               5                      -
+pool/home/bob  utf8only              off                    -
+pool/home/bob  normalization         none                   -
+pool/home/bob  casesensitivity       sensitive              -
+pool/home/bob  vscan                 off                    default
+pool/home/bob  nbmand                off                    default
+pool/home/bob  sharesmb              off                    default
+pool/home/bob  refquota              none                   default
+pool/home/bob  refreservation        none                   default
+pool/home/bob  primarycache          all                    default
+pool/home/bob  secondarycache        all                    default
+pool/home/bob  usedbysnapshots       0                      -
+pool/home/bob  usedbydataset         21K                    -
+pool/home/bob  usedbychildren        0                      -
+pool/home/bob  usedbyrefreservation  0                      -
+pool/home/bob  logbias               latency                default
+pool/home/bob  dedup                 off                    default
+pool/home/bob  mlslabel                                     -
+pool/home/bob  sync                  standard               default
+pool/home/bob  refcompressratio      1.00x                  -
+.Ed
+.Pp
+The following command gets a single property value.
+.Bd -literal -offset 2n
+.Li # Ic zfs get -H -o value compression pool/home/bob
+on
+.Ed
+.Pp
+The following command lists all properties with local settings for
+.Em pool/home/bob .
+.Bd -literal -offset 2n
+.Li # Ic zfs get -s local -o name,property,value all pool/home/bob
+NAME           PROPERTY              VALUE
+pool/home/bob  quota                 20G
+pool/home/bob  compression           on
+.Ed
+.It Sy Example 8 No Rolling Back a Tn ZFS No File System
+.Pp
+The following command reverts the contents of
+.Em pool/home/anne
+to the snapshot named
+.Em yesterday ,
+deleting all intermediate snapshots.
+.Bd -literal -offset 2n
+.Li # Ic zfs rollback -r pool/home/anne@yesterday
+.Ed
+.It Sy Example 9 No Creating a Tn ZFS No Clone
+.Pp
+The following command creates a writable file system whose initial contents are
+the same as
+.Em pool/home/bob@yesterday .
+.Bd -literal -offset 2n
+.Li # Ic zfs clone pool/home/bob@yesterday pool/clone
+.Ed
+.It Sy Example 10 No Promoting a Tn ZFS No Clone
+.Pp
+The following commands illustrate how to test out changes to a file system, and
+then replace the original file system with the changed one, using clones, clone
+promotion, and renaming:
+.Bd -literal -offset 2n
+.Li # Ic zfs create pool/project/production
+.Ed
+.Pp
+Populate
+.Pa /pool/project/production
+with data and continue with the following commands:
+.Bd -literal -offset 2n
+.Li # Ic zfs snapshot pool/project/production@today
+.Li # Ic zfs clone pool/project/production@today pool/project/beta
+.Ed
+.Pp
+Now make changes to
+.Pa /pool/project/beta
+and continue with the following commands:
+.Bd -literal -offset 2n
+.Li # Ic zfs promote pool/project/beta
+.Li # Ic zfs rename pool/project/production pool/project/legacy
+.Li # Ic zfs rename pool/project/beta pool/project/production
+.Ed
+.Pp
+Once the legacy version is no longer needed, it can be destroyed.
+.Bd -literal -offset 2n
+.Li # Ic zfs destroy pool/project/legacy
+.Ed
+.It Sy Example 11 No Inheriting Tn ZFS No Properties
+.Pp
+The following command causes
+.Em pool/home/bob
+and
+.Em pool/home/anne
+to inherit the
+.Sy checksum
+property from their parent.
+.Bd -literal -offset 2n
+.Li # Ic zfs inherit checksum pool/home/bob pool/home/anne
+.Ed
+.It Sy Example 12 No Remotely Replicating Tn ZFS No Data
+.Pp
+The following commands send a full stream and then an incremental stream to a
+remote machine, restoring them into
+.Sy poolB/received/fs@a
+and
+.Sy poolB/received/fs@b ,
+respectively.
+.Sy poolB
+must contain the file system
+.Sy poolB/received ,
+and must not initially contain
+.Sy poolB/received/fs .
+.Bd -literal -offset 2n
+.Li # Ic zfs send pool/fs@a | ssh host zfs receive poolB/received/fs@a
+.Li # Ic zfs send -i a pool/fs@b | ssh host zfs receive poolB/received/fs
+.Ed
+.It Xo
+.Sy Example 13
+Using the
+.Qq zfs receive -d
+Option
+.Xc
+.Pp
+The following command sends a full stream of
+.Sy poolA/fsA/fsB@snap
+to a remote machine, receiving it into
+.Sy poolB/received/fsA/fsB@snap .
+The
+.Sy fsA/fsB@snap
+portion of the received snapshot's name is determined from the name of the sent
+snapshot.
+.Sy poolB
+must contain the file system
+.Sy poolB/received .
+If
+.Sy poolB/received/fsA
+does not exist, it is created as an empty file system.
+.Bd -literal -offset 2n
+.Li # Ic zfs send poolA/fsA/fsB@snap | ssh host zfs receive -d poolB/received
+.Ed
+.It Sy Example 14 No Setting User Properties
+.Pp
+The following example sets the user-defined
+.Sy com.example:department
+property for a dataset.
+.Bd -literal -offset 2n
+.Li # Ic zfs set com.example:department=12345 tank/accounting
+.Ed
+.It Sy Example 15 No Performing a Rolling Snapshot
+.Pp
+The following example shows how to maintain a history of snapshots with a
+consistent naming scheme. To keep a week's worth of snapshots, the user
+destroys the oldest snapshot, renames the remaining snapshots, and then creates
+a new snapshot, as follows:
+.Bd -literal -offset 2n
+.Li # Ic zfs destroy -r pool/users@7daysago
+.Li # Ic zfs rename -r pool/users@6daysago @7daysago
+.Li # Ic zfs rename -r pool/users@5daysago @6daysago
+.Li # Ic zfs rename -r pool/users@4daysago @5daysago
+.Li # Ic zfs rename -r pool/users@3daysago @4daysago
+.Li # Ic zfs rename -r pool/users@2daysago @3daysago
+.Li # Ic zfs rename -r pool/users@yesterday @2daysago
+.Li # Ic zfs rename -r pool/users@today @yesterday
+.Li # Ic zfs snapshot -r pool/users@today
+.Ed
+.It Xo
+.Sy Example 16
+Setting
+.Qq sharenfs
+Property Options on a ZFS File System
+.Xc
+.Pp
+The following command shows how to set
+.Sy sharenfs
+property options to enable root access for a specific network on the
+.Em tank/home
+file system. The contents of the
+.Sy sharenfs
+property are valid
+.Xr exports 5
+options.
+.Bd -literal -offset 2n
+.Li # Ic zfs set sharenfs="maproot=root,network 192.168.0.0/24" tank/home
+.Ed
+.Pp
+Another way to write this command with the same result is:
+.Bd -literal -offset 2n
+.Li # Ic set zfs sharenfs="-maproot=root -network 192.168.0.0/24" tank/home
+.Ed
+.It Xo
+.Sy Example 17
+Delegating
+.Tn ZFS
+Administration Permissions on a
+.Tn ZFS
+Dataset
+.Xc
+.Pp
+The following example shows how to set permissions so that user
+.Em cindys
+can create, destroy, mount, and take snapshots on
+.Em tank/cindys .
+The permissions on
+.Em tank/cindys
+are also displayed.
+.Bd -literal -offset 2n
+.Li # Ic zfs allow cindys create,destroy,mount,snapshot tank/cindys
+.Li # Ic zfs allow tank/cindys
+-------------------------------------------------------------
+Local+Descendent permissions on (tank/cindys)
+          user cindys create,destroy,mount,snapshot
+-------------------------------------------------------------
+.Ed
+.It Sy Example 18 No Delegating Create Time Permissions on a Tn ZFS No Dataset
+.Pp
+The following example shows how to grant anyone in the group
+.Em staff
+to create file systems in
+.Em tank/users .
+This syntax also allows staff members to destroy their own file systems, but
+not destroy anyone else's file system. The permissions on
+.Em tank/users
+are also displayed.
+.Bd -literal -offset 2n
+.Li # Ic zfs allow staff create,mount tank/users
+.Li # Ic zfs allow -c destroy tank/users
+.Li # Ic zfs allow tank/users
+-------------------------------------------------------------
+Create time permissions on (tank/users)
+          create,destroy
+Local+Descendent permissions on (tank/users)
+          group staff create,mount
+-------------------------------------------------------------
+.Ed
+.It Xo
+.Sy Example 19
+Defining and Granting a Permission Set on a
+.Tn ZFS
+Dataset
+.Xc
+.Pp
+The following example shows how to define and grant a permission set on the
+.Em tank/users
+file system. The permissions on
+.Em tank/users
+are also displayed.
+.Bd -literal -offset 2n
+.Li # Ic zfs allow -s @pset create,destroy,snapshot,mount tank/users
+.Li # Ic zfs allow staff @pset tank/users
+.Li # Ic zfs allow tank/users
+-------------------------------------------------------------
+Permission sets on (tank/users)
+        @pset create,destroy,mount,snapshot
+Create time permissions on (tank/users)
+        create,destroy
+Local+Descendent permissions on (tank/users)
+        group staff @pset,create,mount
+-------------------------------------------------------------
+.Ed
+.It Sy Example 20 No Delegating Property Permissions on a Tn ZFS No Dataset
+.Pp
+The following example shows to grant the ability to set quotas and reservations
+on the
+.Sy users/home
+file system. The permissions on
+.Sy users/home
+are also displayed.
+.Bd -literal -offset 2n
+.Li # Ic zfs allow cindys quota,reservation users/home
+.Li # Ic zfs allow cindys
+-------------------------------------------------------------
+Local+Descendent permissions on (users/home)
+        user cindys quota,reservation
+-------------------------------------------------------------
+.Li # Ic su - cindys
+.Li cindys% Ic zfs set quota=10G users/home/marks
+.Li cindys% Ic zfs get quota users/home/marks
+NAME              PROPERTY  VALUE             SOURCE
+users/home/marks  quota     10G               local
+.Ed
+.It Sy Example 21 No Removing ZFS Delegated Permissions on a Tn ZFS No Dataset
+.Pp
+The following example shows how to remove the snapshot permission from the
+.Em staff
+group on the
+.Em tank/users
+file system. The permissions on
+.Em tank/users
+are also displayed.
+.Bd -literal -offset 2n
+.Li # Ic zfs unallow staff snapshot tank/users
+.Li # Ic zfs allow tank/users
+-------------------------------------------------------------
+Permission sets on (tank/users)
+        @pset create,destroy,mount,snapshot
+Create time permissions on (tank/users)
+        create,destroy
+Local+Descendent permissions on (tank/users)
+        group staff @pset,create,mount
+-------------------------------------------------------------
+.Ed
+.It Sy Example 22 Showing the differences between a snapshot and a ZFS Dataset
+.Pp
+The following example shows how to see what has changed between a prior
+snapshot of a ZFS Dataset and its current state.  The
+.Fl F
+option is used to indicate type information for the files affected.
+.Bd -literal -offset 2n
+.Li # Ic zfs diff tank/test@before tank/test
+M       /       /tank/test/
+M       F       /tank/test/linked      (+1)
+R       F       /tank/test/oldname -> /tank/test/newname
+-       F       /tank/test/deleted
++       F       /tank/test/created
+M       F       /tank/test/modified
+.Ed
+.El
+.Sh SEE ALSO
+.Xr chmod 2 ,
+.Xr fsync 2 ,
+.Xr exports 5 ,
+.Xr fstab 5 ,
+.Xr rc.conf 5 ,
+.Xr jail 8 ,
+.Xr mount 8 ,
+.Xr umount 8 ,
+.Xr zpool 8
+.Sh AUTHORS
+This manual page is a
+.Xr mdoc 7
+reimplementation of the
+.Tn OpenSolaris
+manual page
+.Em zfs(1M) ,
+modified and customized for
+.Fx
+and licensed under the
+Common Development and Distribution License
+.Pq Tn CDDL .
+.Pp
+The
+.Xr mdoc 7
+implementation of this manual page was initially written by
+.An Martin Matuska Aq mm@FreeBSD.org .
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs_iter.c b/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs_iter.c
new file mode 100644
index 0000000000000000000000000000000000000000..62cd9d01e89cf22608d8742bbb17d10fe82baecc
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs_iter.c
@@ -0,0 +1,490 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
+ * All rights reserved.
+ */
+
+#include <libintl.h>
+#include <libuutil.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+
+#include <libzfs.h>
+
+#include "zfs_util.h"
+#include "zfs_iter.h"
+
+/*
+ * This is a private interface used to gather up all the datasets specified on
+ * the command line so that we can iterate over them in order.
+ *
+ * First, we iterate over all filesystems, gathering them together into an
+ * AVL tree.  We report errors for any explicitly specified datasets
+ * that we couldn't open.
+ *
+ * When finished, we have an AVL tree of ZFS handles.  We go through and execute
+ * the provided callback for each one, passing whatever data the user supplied.
+ */
+
+typedef struct zfs_node {
+	zfs_handle_t	*zn_handle;
+	uu_avl_node_t	zn_avlnode;
+} zfs_node_t;
+
+typedef struct callback_data {
+	uu_avl_t		*cb_avl;
+	int			cb_flags;
+	zfs_type_t		cb_types;
+	zfs_sort_column_t	*cb_sortcol;
+	zprop_list_t		**cb_proplist;
+	int			cb_depth_limit;
+	int			cb_depth;
+	uint8_t			cb_props_table[ZFS_NUM_PROPS];
+} callback_data_t;
+
+uu_avl_pool_t *avl_pool;
+
+/*
+ * Include snaps if they were requested or if this a zfs list where types
+ * were not specified and the "listsnapshots" property is set on this pool.
+ */
+static int
+zfs_include_snapshots(zfs_handle_t *zhp, callback_data_t *cb)
+{
+	zpool_handle_t *zph;
+
+	if ((cb->cb_flags & ZFS_ITER_PROP_LISTSNAPS) == 0)
+		return (cb->cb_types & ZFS_TYPE_SNAPSHOT);
+
+	zph = zfs_get_pool_handle(zhp);
+	return (zpool_get_prop_int(zph, ZPOOL_PROP_LISTSNAPS, NULL));
+}
+
+/*
+ * Called for each dataset.  If the object is of an appropriate type,
+ * add it to the avl tree and recurse over any children as necessary.
+ */
+static int
+zfs_callback(zfs_handle_t *zhp, void *data)
+{
+	callback_data_t *cb = data;
+	int dontclose = 0;
+	int include_snaps = zfs_include_snapshots(zhp, cb);
+
+	if ((zfs_get_type(zhp) & cb->cb_types) ||
+	    ((zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT) && include_snaps)) {
+		uu_avl_index_t idx;
+		zfs_node_t *node = safe_malloc(sizeof (zfs_node_t));
+
+		node->zn_handle = zhp;
+		uu_avl_node_init(node, &node->zn_avlnode, avl_pool);
+		if (uu_avl_find(cb->cb_avl, node, cb->cb_sortcol,
+		    &idx) == NULL) {
+			if (cb->cb_proplist) {
+				if ((*cb->cb_proplist) &&
+				    !(*cb->cb_proplist)->pl_all)
+					zfs_prune_proplist(zhp,
+					    cb->cb_props_table);
+
+				if (zfs_expand_proplist(zhp, cb->cb_proplist,
+				    (cb->cb_flags & ZFS_ITER_RECVD_PROPS))
+				    != 0) {
+					free(node);
+					return (-1);
+				}
+			}
+			uu_avl_insert(cb->cb_avl, node, idx);
+			dontclose = 1;
+		} else {
+			free(node);
+		}
+	}
+
+	/*
+	 * Recurse if necessary.
+	 */
+	if (cb->cb_flags & ZFS_ITER_RECURSE &&
+	    ((cb->cb_flags & ZFS_ITER_DEPTH_LIMIT) == 0 ||
+	    cb->cb_depth < cb->cb_depth_limit)) {
+		cb->cb_depth++;
+		if (zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM)
+			(void) zfs_iter_filesystems(zhp, zfs_callback, data);
+		if ((zfs_get_type(zhp) != ZFS_TYPE_SNAPSHOT) && include_snaps) {
+			(void) zfs_iter_snapshots(zhp,
+			    (cb->cb_flags & ZFS_ITER_SIMPLE) != 0, zfs_callback,
+			    data);
+		}
+		cb->cb_depth--;
+	}
+
+	if (!dontclose)
+		zfs_close(zhp);
+
+	return (0);
+}
+
+int
+zfs_add_sort_column(zfs_sort_column_t **sc, const char *name,
+    boolean_t reverse)
+{
+	zfs_sort_column_t *col;
+	zfs_prop_t prop;
+
+	if ((prop = zfs_name_to_prop(name)) == ZPROP_INVAL &&
+	    !zfs_prop_user(name))
+		return (-1);
+
+	col = safe_malloc(sizeof (zfs_sort_column_t));
+
+	col->sc_prop = prop;
+	col->sc_reverse = reverse;
+	if (prop == ZPROP_INVAL) {
+		col->sc_user_prop = safe_malloc(strlen(name) + 1);
+		(void) strcpy(col->sc_user_prop, name);
+	}
+
+	if (*sc == NULL) {
+		col->sc_last = col;
+		*sc = col;
+	} else {
+		(*sc)->sc_last->sc_next = col;
+		(*sc)->sc_last = col;
+	}
+
+	return (0);
+}
+
+void
+zfs_free_sort_columns(zfs_sort_column_t *sc)
+{
+	zfs_sort_column_t *col;
+
+	while (sc != NULL) {
+		col = sc->sc_next;
+		free(sc->sc_user_prop);
+		free(sc);
+		sc = col;
+	}
+}
+
+boolean_t
+zfs_sort_only_by_name(const zfs_sort_column_t *sc)
+{
+
+	return (sc != NULL && sc->sc_next == NULL &&
+	    sc->sc_prop == ZFS_PROP_NAME);
+}
+
+/* ARGSUSED */
+static int
+zfs_compare(const void *larg, const void *rarg, void *unused)
+{
+	zfs_handle_t *l = ((zfs_node_t *)larg)->zn_handle;
+	zfs_handle_t *r = ((zfs_node_t *)rarg)->zn_handle;
+	const char *lname = zfs_get_name(l);
+	const char *rname = zfs_get_name(r);
+	char *lat, *rat;
+	uint64_t lcreate, rcreate;
+	int ret;
+
+	lat = (char *)strchr(lname, '@');
+	rat = (char *)strchr(rname, '@');
+
+	if (lat != NULL)
+		*lat = '\0';
+	if (rat != NULL)
+		*rat = '\0';
+
+	ret = strcmp(lname, rname);
+	if (ret == 0) {
+		/*
+		 * If we're comparing a dataset to one of its snapshots, we
+		 * always make the full dataset first.
+		 */
+		if (lat == NULL) {
+			ret = -1;
+		} else if (rat == NULL) {
+			ret = 1;
+		} else {
+			/*
+			 * If we have two snapshots from the same dataset, then
+			 * we want to sort them according to creation time.  We
+			 * use the hidden CREATETXG property to get an absolute
+			 * ordering of snapshots.
+			 */
+			lcreate = zfs_prop_get_int(l, ZFS_PROP_CREATETXG);
+			rcreate = zfs_prop_get_int(r, ZFS_PROP_CREATETXG);
+
+			/*
+			 * Both lcreate and rcreate being 0 means we don't have
+			 * properties and we should compare full name.
+			 */
+			if (lcreate == 0 && rcreate == 0)
+				ret = strcmp(lat + 1, rat + 1);
+			else if (lcreate < rcreate)
+				ret = -1;
+			else if (lcreate > rcreate)
+				ret = 1;
+		}
+	}
+
+	if (lat != NULL)
+		*lat = '@';
+	if (rat != NULL)
+		*rat = '@';
+
+	return (ret);
+}
+
+/*
+ * Sort datasets by specified columns.
+ *
+ * o  Numeric types sort in ascending order.
+ * o  String types sort in alphabetical order.
+ * o  Types inappropriate for a row sort that row to the literal
+ *    bottom, regardless of the specified ordering.
+ *
+ * If no sort columns are specified, or two datasets compare equally
+ * across all specified columns, they are sorted alphabetically by name
+ * with snapshots grouped under their parents.
+ */
+static int
+zfs_sort(const void *larg, const void *rarg, void *data)
+{
+	zfs_handle_t *l = ((zfs_node_t *)larg)->zn_handle;
+	zfs_handle_t *r = ((zfs_node_t *)rarg)->zn_handle;
+	zfs_sort_column_t *sc = (zfs_sort_column_t *)data;
+	zfs_sort_column_t *psc;
+
+	for (psc = sc; psc != NULL; psc = psc->sc_next) {
+		char lbuf[ZFS_MAXPROPLEN], rbuf[ZFS_MAXPROPLEN];
+		char *lstr, *rstr;
+		uint64_t lnum, rnum;
+		boolean_t lvalid, rvalid;
+		int ret = 0;
+
+		/*
+		 * We group the checks below the generic code.  If 'lstr' and
+		 * 'rstr' are non-NULL, then we do a string based comparison.
+		 * Otherwise, we compare 'lnum' and 'rnum'.
+		 */
+		lstr = rstr = NULL;
+		if (psc->sc_prop == ZPROP_INVAL) {
+			nvlist_t *luser, *ruser;
+			nvlist_t *lval, *rval;
+
+			luser = zfs_get_user_props(l);
+			ruser = zfs_get_user_props(r);
+
+			lvalid = (nvlist_lookup_nvlist(luser,
+			    psc->sc_user_prop, &lval) == 0);
+			rvalid = (nvlist_lookup_nvlist(ruser,
+			    psc->sc_user_prop, &rval) == 0);
+
+			if (lvalid)
+				verify(nvlist_lookup_string(lval,
+				    ZPROP_VALUE, &lstr) == 0);
+			if (rvalid)
+				verify(nvlist_lookup_string(rval,
+				    ZPROP_VALUE, &rstr) == 0);
+		} else if (psc->sc_prop == ZFS_PROP_NAME) {
+			lvalid = rvalid = B_TRUE;
+
+			(void) strlcpy(lbuf, zfs_get_name(l), sizeof(lbuf));
+			(void) strlcpy(rbuf, zfs_get_name(r), sizeof(rbuf));
+
+			lstr = lbuf;
+			rstr = rbuf;
+		} else if (zfs_prop_is_string(psc->sc_prop)) {
+			lvalid = (zfs_prop_get(l, psc->sc_prop, lbuf,
+			    sizeof (lbuf), NULL, NULL, 0, B_TRUE) == 0);
+			rvalid = (zfs_prop_get(r, psc->sc_prop, rbuf,
+			    sizeof (rbuf), NULL, NULL, 0, B_TRUE) == 0);
+
+			lstr = lbuf;
+			rstr = rbuf;
+		} else {
+			lvalid = zfs_prop_valid_for_type(psc->sc_prop,
+			    zfs_get_type(l));
+			rvalid = zfs_prop_valid_for_type(psc->sc_prop,
+			    zfs_get_type(r));
+
+			if (lvalid)
+				(void) zfs_prop_get_numeric(l, psc->sc_prop,
+				    &lnum, NULL, NULL, 0);
+			if (rvalid)
+				(void) zfs_prop_get_numeric(r, psc->sc_prop,
+				    &rnum, NULL, NULL, 0);
+		}
+
+		if (!lvalid && !rvalid)
+			continue;
+		else if (!lvalid)
+			return (1);
+		else if (!rvalid)
+			return (-1);
+
+		if (lstr)
+			ret = strcmp(lstr, rstr);
+		else if (lnum < rnum)
+			ret = -1;
+		else if (lnum > rnum)
+			ret = 1;
+
+		if (ret != 0) {
+			if (psc->sc_reverse == B_TRUE)
+				ret = (ret < 0) ? 1 : -1;
+			return (ret);
+		}
+	}
+
+	return (zfs_compare(larg, rarg, NULL));
+}
+
+int
+zfs_for_each(int argc, char **argv, int flags, zfs_type_t types,
+    zfs_sort_column_t *sortcol, zprop_list_t **proplist, int limit,
+    zfs_iter_f callback, void *data)
+{
+	callback_data_t cb = {0};
+	int ret = 0;
+	zfs_node_t *node;
+	uu_avl_walk_t *walk;
+
+	avl_pool = uu_avl_pool_create("zfs_pool", sizeof (zfs_node_t),
+	    offsetof(zfs_node_t, zn_avlnode), zfs_sort, UU_DEFAULT);
+
+	if (avl_pool == NULL)
+		nomem();
+
+	cb.cb_sortcol = sortcol;
+	cb.cb_flags = flags;
+	cb.cb_proplist = proplist;
+	cb.cb_types = types;
+	cb.cb_depth_limit = limit;
+	/*
+	 * If cb_proplist is provided then in the zfs_handles created we
+	 * retain only those properties listed in cb_proplist and sortcol.
+	 * The rest are pruned. So, the caller should make sure that no other
+	 * properties other than those listed in cb_proplist/sortcol are
+	 * accessed.
+	 *
+	 * If cb_proplist is NULL then we retain all the properties.  We
+	 * always retain the zoned property, which some other properties
+	 * need (userquota & friends), and the createtxg property, which
+	 * we need to sort snapshots.
+	 */
+	if (cb.cb_proplist && *cb.cb_proplist) {
+		zprop_list_t *p = *cb.cb_proplist;
+
+		while (p) {
+			if (p->pl_prop >= ZFS_PROP_TYPE &&
+			    p->pl_prop < ZFS_NUM_PROPS) {
+				cb.cb_props_table[p->pl_prop] = B_TRUE;
+			}
+			p = p->pl_next;
+		}
+
+		while (sortcol) {
+			if (sortcol->sc_prop >= ZFS_PROP_TYPE &&
+			    sortcol->sc_prop < ZFS_NUM_PROPS) {
+				cb.cb_props_table[sortcol->sc_prop] = B_TRUE;
+			}
+			sortcol = sortcol->sc_next;
+		}
+
+		cb.cb_props_table[ZFS_PROP_ZONED] = B_TRUE;
+		cb.cb_props_table[ZFS_PROP_CREATETXG] = B_TRUE;
+	} else {
+		(void) memset(cb.cb_props_table, B_TRUE,
+		    sizeof (cb.cb_props_table));
+	}
+
+	if ((cb.cb_avl = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL)
+		nomem();
+
+	if (argc == 0) {
+		/*
+		 * If given no arguments, iterate over all datasets.
+		 */
+		cb.cb_flags |= ZFS_ITER_RECURSE;
+		ret = zfs_iter_root(g_zfs, zfs_callback, &cb);
+	} else {
+		int i;
+		zfs_handle_t *zhp;
+		zfs_type_t argtype;
+
+		/*
+		 * If we're recursive, then we always allow filesystems as
+		 * arguments.  If we also are interested in snapshots, then we
+		 * can take volumes as well.
+		 */
+		argtype = types;
+		if (flags & ZFS_ITER_RECURSE) {
+			argtype |= ZFS_TYPE_FILESYSTEM;
+			if (types & ZFS_TYPE_SNAPSHOT)
+				argtype |= ZFS_TYPE_VOLUME;
+		}
+
+		for (i = 0; i < argc; i++) {
+			if (flags & ZFS_ITER_ARGS_CAN_BE_PATHS) {
+				zhp = zfs_path_to_zhandle(g_zfs, argv[i],
+				    argtype);
+			} else {
+				zhp = zfs_open(g_zfs, argv[i], argtype);
+			}
+			if (zhp != NULL)
+				ret |= zfs_callback(zhp, &cb);
+			else
+				ret = 1;
+		}
+	}
+
+	/*
+	 * At this point we've got our AVL tree full of zfs handles, so iterate
+	 * over each one and execute the real user callback.
+	 */
+	for (node = uu_avl_first(cb.cb_avl); node != NULL;
+	    node = uu_avl_next(cb.cb_avl, node))
+		ret |= callback(node->zn_handle, data);
+
+	/*
+	 * Finally, clean up the AVL tree.
+	 */
+	if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL)
+		nomem();
+
+	while ((node = uu_avl_walk_next(walk)) != NULL) {
+		uu_avl_remove(cb.cb_avl, node);
+		zfs_close(node->zn_handle);
+		free(node);
+	}
+
+	uu_avl_walk_end(walk);
+	uu_avl_destroy(cb.cb_avl);
+	uu_avl_pool_destroy(avl_pool);
+
+	return (ret);
+}
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs_iter.h b/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs_iter.h
new file mode 100644
index 0000000000000000000000000000000000000000..a2873743cca9dd1fdc1deeeacddfaca283b85722
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs_iter.h
@@ -0,0 +1,58 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef	ZFS_ITER_H
+#define	ZFS_ITER_H
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+typedef struct zfs_sort_column {
+	struct zfs_sort_column	*sc_next;
+	struct zfs_sort_column	*sc_last;
+	zfs_prop_t		sc_prop;
+	char			*sc_user_prop;
+	boolean_t		sc_reverse;
+} zfs_sort_column_t;
+
+#define	ZFS_ITER_RECURSE	   (1 << 0)
+#define	ZFS_ITER_ARGS_CAN_BE_PATHS (1 << 1)
+#define	ZFS_ITER_PROP_LISTSNAPS    (1 << 2)
+#define	ZFS_ITER_DEPTH_LIMIT	   (1 << 3)
+#define	ZFS_ITER_RECVD_PROPS	   (1 << 4)
+#define	ZFS_ITER_SIMPLE		   (1 << 5)
+
+int zfs_for_each(int, char **, int options, zfs_type_t,
+    zfs_sort_column_t *, zprop_list_t **, int, zfs_iter_f, void *);
+int zfs_add_sort_column(zfs_sort_column_t **, const char *, boolean_t);
+void zfs_free_sort_columns(zfs_sort_column_t *);
+boolean_t zfs_sort_only_by_name(const zfs_sort_column_t *);
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* ZFS_ITER_H */
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs_main.c b/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs_main.c
new file mode 100644
index 0000000000000000000000000000000000000000..1dfc82d44e7857f7efd52a309d74e452c8687bdb
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs_main.c
@@ -0,0 +1,6687 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright 2012 Milan Jurik. All rights reserved.
+ * Copyright (c) 2012, Joyent, Inc. All rights reserved.
+ * Copyright (c) 2011-2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
+ * All rights reserved.
+ * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libgen.h>
+#include <libintl.h>
+#include <libuutil.h>
+#include <libnvpair.h>
+#include <locale.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <zone.h>
+#include <grp.h>
+#include <pwd.h>
+#include <signal.h>
+#include <sys/list.h>
+#include <sys/mntent.h>
+#include <sys/mnttab.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/fs/zfs.h>
+#include <sys/types.h>
+#include <time.h>
+
+#include <libzfs.h>
+#include <zfs_prop.h>
+#include <zfs_deleg.h>
+#include <libuutil.h>
+#ifdef sun
+#include <aclutils.h>
+#include <directory.h>
+#endif
+
+#include "zfs_iter.h"
+#include "zfs_util.h"
+#include "zfs_comutil.h"
+
+libzfs_handle_t *g_zfs;
+
+static FILE *mnttab_file;
+static char history_str[HIS_MAX_RECORD_LEN];
+
+static int zfs_do_clone(int argc, char **argv);
+static int zfs_do_create(int argc, char **argv);
+static int zfs_do_destroy(int argc, char **argv);
+static int zfs_do_get(int argc, char **argv);
+static int zfs_do_inherit(int argc, char **argv);
+static int zfs_do_list(int argc, char **argv);
+static int zfs_do_mount(int argc, char **argv);
+static int zfs_do_rename(int argc, char **argv);
+static int zfs_do_rollback(int argc, char **argv);
+static int zfs_do_set(int argc, char **argv);
+static int zfs_do_upgrade(int argc, char **argv);
+static int zfs_do_snapshot(int argc, char **argv);
+static int zfs_do_unmount(int argc, char **argv);
+static int zfs_do_share(int argc, char **argv);
+static int zfs_do_unshare(int argc, char **argv);
+static int zfs_do_send(int argc, char **argv);
+static int zfs_do_receive(int argc, char **argv);
+static int zfs_do_promote(int argc, char **argv);
+static int zfs_do_userspace(int argc, char **argv);
+static int zfs_do_allow(int argc, char **argv);
+static int zfs_do_unallow(int argc, char **argv);
+static int zfs_do_hold(int argc, char **argv);
+static int zfs_do_holds(int argc, char **argv);
+static int zfs_do_release(int argc, char **argv);
+static int zfs_do_diff(int argc, char **argv);
+static int zfs_do_jail(int argc, char **argv);
+static int zfs_do_unjail(int argc, char **argv);
+
+/*
+ * Enable a reasonable set of defaults for libumem debugging on DEBUG builds.
+ */
+
+#ifdef DEBUG
+const char *
+_umem_debug_init(void)
+{
+	return ("default,verbose"); /* $UMEM_DEBUG setting */
+}
+
+const char *
+_umem_logging_init(void)
+{
+	return ("fail,contents"); /* $UMEM_LOGGING setting */
+}
+#endif
+
+typedef enum {
+	HELP_CLONE,
+	HELP_CREATE,
+	HELP_DESTROY,
+	HELP_GET,
+	HELP_INHERIT,
+	HELP_UPGRADE,
+	HELP_JAIL,
+	HELP_UNJAIL,
+	HELP_LIST,
+	HELP_MOUNT,
+	HELP_PROMOTE,
+	HELP_RECEIVE,
+	HELP_RENAME,
+	HELP_ROLLBACK,
+	HELP_SEND,
+	HELP_SET,
+	HELP_SHARE,
+	HELP_SNAPSHOT,
+	HELP_UNMOUNT,
+	HELP_UNSHARE,
+	HELP_ALLOW,
+	HELP_UNALLOW,
+	HELP_USERSPACE,
+	HELP_GROUPSPACE,
+	HELP_HOLD,
+	HELP_HOLDS,
+	HELP_RELEASE,
+	HELP_DIFF,
+} zfs_help_t;
+
+typedef struct zfs_command {
+	const char	*name;
+	int		(*func)(int argc, char **argv);
+	zfs_help_t	usage;
+} zfs_command_t;
+
+/*
+ * Master command table.  Each ZFS command has a name, associated function, and
+ * usage message.  The usage messages need to be internationalized, so we have
+ * to have a function to return the usage message based on a command index.
+ *
+ * These commands are organized according to how they are displayed in the usage
+ * message.  An empty command (one with a NULL name) indicates an empty line in
+ * the generic usage message.
+ */
+static zfs_command_t command_table[] = {
+	{ "create",	zfs_do_create,		HELP_CREATE		},
+	{ "destroy",	zfs_do_destroy,		HELP_DESTROY		},
+	{ NULL },
+	{ "snapshot",	zfs_do_snapshot,	HELP_SNAPSHOT		},
+	{ "rollback",	zfs_do_rollback,	HELP_ROLLBACK		},
+	{ "clone",	zfs_do_clone,		HELP_CLONE		},
+	{ "promote",	zfs_do_promote,		HELP_PROMOTE		},
+	{ "rename",	zfs_do_rename,		HELP_RENAME		},
+	{ NULL },
+	{ "list",	zfs_do_list,		HELP_LIST		},
+	{ NULL },
+	{ "set",	zfs_do_set,		HELP_SET		},
+	{ "get",	zfs_do_get,		HELP_GET		},
+	{ "inherit",	zfs_do_inherit,		HELP_INHERIT		},
+	{ "upgrade",	zfs_do_upgrade,		HELP_UPGRADE		},
+	{ "userspace",	zfs_do_userspace,	HELP_USERSPACE		},
+	{ "groupspace",	zfs_do_userspace,	HELP_GROUPSPACE		},
+	{ NULL },
+	{ "mount",	zfs_do_mount,		HELP_MOUNT		},
+	{ "unmount",	zfs_do_unmount,		HELP_UNMOUNT		},
+	{ "share",	zfs_do_share,		HELP_SHARE		},
+	{ "unshare",	zfs_do_unshare,		HELP_UNSHARE		},
+	{ NULL },
+	{ "send",	zfs_do_send,		HELP_SEND		},
+	{ "receive",	zfs_do_receive,		HELP_RECEIVE		},
+	{ NULL },
+	{ "allow",	zfs_do_allow,		HELP_ALLOW		},
+	{ NULL },
+	{ "unallow",	zfs_do_unallow,		HELP_UNALLOW		},
+	{ NULL },
+	{ "hold",	zfs_do_hold,		HELP_HOLD		},
+	{ "holds",	zfs_do_holds,		HELP_HOLDS		},
+	{ "release",	zfs_do_release,		HELP_RELEASE		},
+	{ "diff",	zfs_do_diff,		HELP_DIFF		},
+	{ NULL },
+	{ "jail",	zfs_do_jail,		HELP_JAIL		},
+	{ "unjail",	zfs_do_unjail,		HELP_UNJAIL		},
+};
+
+#define	NCOMMAND	(sizeof (command_table) / sizeof (command_table[0]))
+
+zfs_command_t *current_command;
+
+static const char *
+get_usage(zfs_help_t idx)
+{
+	switch (idx) {
+	case HELP_CLONE:
+		return (gettext("\tclone [-p] [-o property=value] ... "
+		    "<snapshot> <filesystem|volume>\n"));
+	case HELP_CREATE:
+		return (gettext("\tcreate [-pu] [-o property=value] ... "
+		    "<filesystem>\n"
+		    "\tcreate [-ps] [-b blocksize] [-o property=value] ... "
+		    "-V <size> <volume>\n"));
+	case HELP_DESTROY:
+		return (gettext("\tdestroy [-fnpRrv] <filesystem|volume>\n"
+		    "\tdestroy [-dnpRrv] "
+		    "<snapshot>[%<snapname>][,...]\n"));
+	case HELP_GET:
+		return (gettext("\tget [-rHp] [-d max] "
+		    "[-o \"all\" | field[,...]] [-t type[,...]] "
+		    "[-s source[,...]]\n"
+		    "\t    <\"all\" | property[,...]> "
+		    "[filesystem|volume|snapshot] ...\n"));
+	case HELP_INHERIT:
+		return (gettext("\tinherit [-rS] <property> "
+		    "<filesystem|volume|snapshot> ...\n"));
+	case HELP_UPGRADE:
+		return (gettext("\tupgrade [-v]\n"
+		    "\tupgrade [-r] [-V version] <-a | filesystem ...>\n"));
+	case HELP_JAIL:
+		return (gettext("\tjail <jailid|jailname> <filesystem>\n"));
+	case HELP_UNJAIL:
+		return (gettext("\tunjail <jailid|jailname> <filesystem>\n"));
+	case HELP_LIST:
+		return (gettext("\tlist [-rH][-d max] "
+		    "[-o property[,...]] [-t type[,...]] [-s property] ...\n"
+		    "\t    [-S property] ... "
+		    "[filesystem|volume|snapshot] ...\n"));
+	case HELP_MOUNT:
+		return (gettext("\tmount\n"
+		    "\tmount [-vO] [-o opts] <-a | filesystem>\n"));
+	case HELP_PROMOTE:
+		return (gettext("\tpromote <clone-filesystem>\n"));
+	case HELP_RECEIVE:
+		return (gettext("\treceive [-vnFu] <filesystem|volume|"
+		"snapshot>\n"
+		"\treceive [-vnFu] [-d | -e] <filesystem>\n"));
+	case HELP_RENAME:
+		return (gettext("\trename [-f] <filesystem|volume|snapshot> "
+		    "<filesystem|volume|snapshot>\n"
+		    "\trename [-f] -p <filesystem|volume> "
+		    "<filesystem|volume>\n"
+		    "\trename -r <snapshot> <snapshot>\n"
+		    "\trename -u [-p] <filesystem> <filesystem>"));
+	case HELP_ROLLBACK:
+		return (gettext("\trollback [-rRf] <snapshot>\n"));
+	case HELP_SEND:
+		return (gettext("\tsend [-DnPpRv] "
+		    "[-i snapshot | -I snapshot] <snapshot>\n"));
+	case HELP_SET:
+		return (gettext("\tset <property=value> "
+		    "<filesystem|volume|snapshot> ...\n"));
+	case HELP_SHARE:
+		return (gettext("\tshare <-a | filesystem>\n"));
+	case HELP_SNAPSHOT:
+		return (gettext("\tsnapshot [-r] [-o property=value] ... "
+		    "<filesystem@snapname|volume@snapname>\n"));
+	case HELP_UNMOUNT:
+		return (gettext("\tunmount [-f] "
+		    "<-a | filesystem|mountpoint>\n"));
+	case HELP_UNSHARE:
+		return (gettext("\tunshare "
+		    "<-a | filesystem|mountpoint>\n"));
+	case HELP_ALLOW:
+		return (gettext("\tallow <filesystem|volume>\n"
+		    "\tallow [-ldug] "
+		    "<\"everyone\"|user|group>[,...] <perm|@setname>[,...]\n"
+		    "\t    <filesystem|volume>\n"
+		    "\tallow [-ld] -e <perm|@setname>[,...] "
+		    "<filesystem|volume>\n"
+		    "\tallow -c <perm|@setname>[,...] <filesystem|volume>\n"
+		    "\tallow -s @setname <perm|@setname>[,...] "
+		    "<filesystem|volume>\n"));
+	case HELP_UNALLOW:
+		return (gettext("\tunallow [-rldug] "
+		    "<\"everyone\"|user|group>[,...]\n"
+		    "\t    [<perm|@setname>[,...]] <filesystem|volume>\n"
+		    "\tunallow [-rld] -e [<perm|@setname>[,...]] "
+		    "<filesystem|volume>\n"
+		    "\tunallow [-r] -c [<perm|@setname>[,...]] "
+		    "<filesystem|volume>\n"
+		    "\tunallow [-r] -s @setname [<perm|@setname>[,...]] "
+		    "<filesystem|volume>\n"));
+	case HELP_USERSPACE:
+		return (gettext("\tuserspace [-Hinp] [-o field[,...]] "
+		    "[-s field] ...\n\t[-S field] ... "
+		    "[-t type[,...]] <filesystem|snapshot>\n"));
+	case HELP_GROUPSPACE:
+		return (gettext("\tgroupspace [-Hinp] [-o field[,...]] "
+		    "[-s field] ...\n\t[-S field] ... "
+		    "[-t type[,...]] <filesystem|snapshot>\n"));
+	case HELP_HOLD:
+		return (gettext("\thold [-r] <tag> <snapshot> ...\n"));
+	case HELP_HOLDS:
+		return (gettext("\tholds [-r] <snapshot> ...\n"));
+	case HELP_RELEASE:
+		return (gettext("\trelease [-r] <tag> <snapshot> ...\n"));
+	case HELP_DIFF:
+		return (gettext("\tdiff [-FHt] <snapshot> "
+		    "[snapshot|filesystem]\n"));
+	}
+
+	abort();
+	/* NOTREACHED */
+}
+
+void
+nomem(void)
+{
+	(void) fprintf(stderr, gettext("internal error: out of memory\n"));
+	exit(1);
+}
+
+/*
+ * Utility function to guarantee malloc() success.
+ */
+
+void *
+safe_malloc(size_t size)
+{
+	void *data;
+
+	if ((data = calloc(1, size)) == NULL)
+		nomem();
+
+	return (data);
+}
+
+static char *
+safe_strdup(char *str)
+{
+	char *dupstr = strdup(str);
+
+	if (dupstr == NULL)
+		nomem();
+
+	return (dupstr);
+}
+
+/*
+ * Callback routine that will print out information for each of
+ * the properties.
+ */
+static int
+usage_prop_cb(int prop, void *cb)
+{
+	FILE *fp = cb;
+
+	(void) fprintf(fp, "\t%-15s ", zfs_prop_to_name(prop));
+
+	if (zfs_prop_readonly(prop))
+		(void) fprintf(fp, " NO    ");
+	else
+		(void) fprintf(fp, "YES    ");
+
+	if (zfs_prop_inheritable(prop))
+		(void) fprintf(fp, "  YES   ");
+	else
+		(void) fprintf(fp, "   NO   ");
+
+	if (zfs_prop_values(prop) == NULL)
+		(void) fprintf(fp, "-\n");
+	else
+		(void) fprintf(fp, "%s\n", zfs_prop_values(prop));
+
+	return (ZPROP_CONT);
+}
+
+/*
+ * Display usage message.  If we're inside a command, display only the usage for
+ * that command.  Otherwise, iterate over the entire command table and display
+ * a complete usage message.
+ */
+static void
+usage(boolean_t requested)
+{
+	int i;
+	boolean_t show_properties = B_FALSE;
+	FILE *fp = requested ? stdout : stderr;
+
+	if (current_command == NULL) {
+
+		(void) fprintf(fp, gettext("usage: zfs command args ...\n"));
+		(void) fprintf(fp,
+		    gettext("where 'command' is one of the following:\n\n"));
+
+		for (i = 0; i < NCOMMAND; i++) {
+			if (command_table[i].name == NULL)
+				(void) fprintf(fp, "\n");
+			else
+				(void) fprintf(fp, "%s",
+				    get_usage(command_table[i].usage));
+		}
+
+		(void) fprintf(fp, gettext("\nEach dataset is of the form: "
+		    "pool/[dataset/]*dataset[@name]\n"));
+	} else {
+		(void) fprintf(fp, gettext("usage:\n"));
+		(void) fprintf(fp, "%s", get_usage(current_command->usage));
+	}
+
+	if (current_command != NULL &&
+	    (strcmp(current_command->name, "set") == 0 ||
+	    strcmp(current_command->name, "get") == 0 ||
+	    strcmp(current_command->name, "inherit") == 0 ||
+	    strcmp(current_command->name, "list") == 0))
+		show_properties = B_TRUE;
+
+	if (show_properties) {
+		(void) fprintf(fp,
+		    gettext("\nThe following properties are supported:\n"));
+
+		(void) fprintf(fp, "\n\t%-14s %s  %s   %s\n\n",
+		    "PROPERTY", "EDIT", "INHERIT", "VALUES");
+
+		/* Iterate over all properties */
+		(void) zprop_iter(usage_prop_cb, fp, B_FALSE, B_TRUE,
+		    ZFS_TYPE_DATASET);
+
+		(void) fprintf(fp, "\t%-15s ", "userused@...");
+		(void) fprintf(fp, " NO       NO   <size>\n");
+		(void) fprintf(fp, "\t%-15s ", "groupused@...");
+		(void) fprintf(fp, " NO       NO   <size>\n");
+		(void) fprintf(fp, "\t%-15s ", "userquota@...");
+		(void) fprintf(fp, "YES       NO   <size> | none\n");
+		(void) fprintf(fp, "\t%-15s ", "groupquota@...");
+		(void) fprintf(fp, "YES       NO   <size> | none\n");
+		(void) fprintf(fp, "\t%-15s ", "written@<snap>");
+		(void) fprintf(fp, " NO       NO   <size>\n");
+
+		(void) fprintf(fp, gettext("\nSizes are specified in bytes "
+		    "with standard units such as K, M, G, etc.\n"));
+		(void) fprintf(fp, gettext("\nUser-defined properties can "
+		    "be specified by using a name containing a colon (:).\n"));
+		(void) fprintf(fp, gettext("\nThe {user|group}{used|quota}@ "
+		    "properties must be appended with\n"
+		    "a user or group specifier of one of these forms:\n"
+		    "    POSIX name      (eg: \"matt\")\n"
+		    "    POSIX id        (eg: \"126829\")\n"
+		    "    SMB name@domain (eg: \"matt@sun\")\n"
+		    "    SMB SID         (eg: \"S-1-234-567-89\")\n"));
+	} else {
+		(void) fprintf(fp,
+		    gettext("\nFor the property list, run: %s\n"),
+		    "zfs set|get");
+		(void) fprintf(fp,
+		    gettext("\nFor the delegated permission list, run: %s\n"),
+		    "zfs allow|unallow");
+	}
+
+	/*
+	 * See comments at end of main().
+	 */
+	if (getenv("ZFS_ABORT") != NULL) {
+		(void) printf("dumping core by request\n");
+		abort();
+	}
+
+	exit(requested ? 0 : 2);
+}
+
+static int
+parseprop(nvlist_t *props)
+{
+	char *propname = optarg;
+	char *propval, *strval;
+
+	if ((propval = strchr(propname, '=')) == NULL) {
+		(void) fprintf(stderr, gettext("missing "
+		    "'=' for -o option\n"));
+		return (-1);
+	}
+	*propval = '\0';
+	propval++;
+	if (nvlist_lookup_string(props, propname, &strval) == 0) {
+		(void) fprintf(stderr, gettext("property '%s' "
+		    "specified multiple times\n"), propname);
+		return (-1);
+	}
+	if (nvlist_add_string(props, propname, propval) != 0)
+		nomem();
+	return (0);
+}
+
+static int
+parse_depth(char *opt, int *flags)
+{
+	char *tmp;
+	int depth;
+
+	depth = (int)strtol(opt, &tmp, 0);
+	if (*tmp) {
+		(void) fprintf(stderr,
+		    gettext("%s is not an integer\n"), optarg);
+		usage(B_FALSE);
+	}
+	if (depth < 0) {
+		(void) fprintf(stderr,
+		    gettext("Depth can not be negative.\n"));
+		usage(B_FALSE);
+	}
+	*flags |= (ZFS_ITER_DEPTH_LIMIT|ZFS_ITER_RECURSE);
+	return (depth);
+}
+
+#define	PROGRESS_DELAY 2		/* seconds */
+
+static char *pt_reverse = "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b";
+static time_t pt_begin;
+static char *pt_header = NULL;
+static boolean_t pt_shown;
+
+static void
+start_progress_timer(void)
+{
+	pt_begin = time(NULL) + PROGRESS_DELAY;
+	pt_shown = B_FALSE;
+}
+
+static void
+set_progress_header(char *header)
+{
+	assert(pt_header == NULL);
+	pt_header = safe_strdup(header);
+	if (pt_shown) {
+		(void) printf("%s: ", header);
+		(void) fflush(stdout);
+	}
+}
+
+static void
+update_progress(char *update)
+{
+	if (!pt_shown && time(NULL) > pt_begin) {
+		int len = strlen(update);
+
+		(void) printf("%s: %s%*.*s", pt_header, update, len, len,
+		    pt_reverse);
+		(void) fflush(stdout);
+		pt_shown = B_TRUE;
+	} else if (pt_shown) {
+		int len = strlen(update);
+
+		(void) printf("%s%*.*s", update, len, len, pt_reverse);
+		(void) fflush(stdout);
+	}
+}
+
+static void
+finish_progress(char *done)
+{
+	if (pt_shown) {
+		(void) printf("%s\n", done);
+		(void) fflush(stdout);
+	}
+	free(pt_header);
+	pt_header = NULL;
+}
+/*
+ * zfs clone [-p] [-o prop=value] ... <snap> <fs | vol>
+ *
+ * Given an existing dataset, create a writable copy whose initial contents
+ * are the same as the source.  The newly created dataset maintains a
+ * dependency on the original; the original cannot be destroyed so long as
+ * the clone exists.
+ *
+ * The '-p' flag creates all the non-existing ancestors of the target first.
+ */
+static int
+zfs_do_clone(int argc, char **argv)
+{
+	zfs_handle_t *zhp = NULL;
+	boolean_t parents = B_FALSE;
+	nvlist_t *props;
+	int ret = 0;
+	int c;
+
+	if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
+		nomem();
+
+	/* check options */
+	while ((c = getopt(argc, argv, "o:p")) != -1) {
+		switch (c) {
+		case 'o':
+			if (parseprop(props))
+				return (1);
+			break;
+		case 'p':
+			parents = B_TRUE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			goto usage;
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing source dataset "
+		    "argument\n"));
+		goto usage;
+	}
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("missing target dataset "
+		    "argument\n"));
+		goto usage;
+	}
+	if (argc > 2) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		goto usage;
+	}
+
+	/* open the source dataset */
+	if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL)
+		return (1);
+
+	if (parents && zfs_name_valid(argv[1], ZFS_TYPE_FILESYSTEM |
+	    ZFS_TYPE_VOLUME)) {
+		/*
+		 * Now create the ancestors of the target dataset.  If the
+		 * target already exists and '-p' option was used we should not
+		 * complain.
+		 */
+		if (zfs_dataset_exists(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM |
+		    ZFS_TYPE_VOLUME))
+			return (0);
+		if (zfs_create_ancestors(g_zfs, argv[1]) != 0)
+			return (1);
+	}
+
+	/* pass to libzfs */
+	ret = zfs_clone(zhp, argv[1], props);
+
+	/* create the mountpoint if necessary */
+	if (ret == 0) {
+		zfs_handle_t *clone;
+
+		clone = zfs_open(g_zfs, argv[1], ZFS_TYPE_DATASET);
+		if (clone != NULL) {
+			if (zfs_get_type(clone) != ZFS_TYPE_VOLUME)
+				if ((ret = zfs_mount(clone, NULL, 0)) == 0)
+					ret = zfs_share(clone);
+			zfs_close(clone);
+		}
+	}
+
+	zfs_close(zhp);
+	nvlist_free(props);
+
+	return (!!ret);
+
+usage:
+	if (zhp)
+		zfs_close(zhp);
+	nvlist_free(props);
+	usage(B_FALSE);
+	return (-1);
+}
+
+/*
+ * zfs create [-pu] [-o prop=value] ... fs
+ * zfs create [-ps] [-b blocksize] [-o prop=value] ... -V vol size
+ *
+ * Create a new dataset.  This command can be used to create filesystems
+ * and volumes.  Snapshot creation is handled by 'zfs snapshot'.
+ * For volumes, the user must specify a size to be used.
+ *
+ * The '-s' flag applies only to volumes, and indicates that we should not try
+ * to set the reservation for this volume.  By default we set a reservation
+ * equal to the size for any volume.  For pools with SPA_VERSION >=
+ * SPA_VERSION_REFRESERVATION, we set a refreservation instead.
+ *
+ * The '-p' flag creates all the non-existing ancestors of the target first.
+ *
+ * The '-u' flag prevents mounting of newly created file system.
+ */
+static int
+zfs_do_create(int argc, char **argv)
+{
+	zfs_type_t type = ZFS_TYPE_FILESYSTEM;
+	zfs_handle_t *zhp = NULL;
+	uint64_t volsize;
+	int c;
+	boolean_t noreserve = B_FALSE;
+	boolean_t bflag = B_FALSE;
+	boolean_t parents = B_FALSE;
+	boolean_t nomount = B_FALSE;
+	int ret = 1;
+	nvlist_t *props;
+	uint64_t intval;
+	int canmount = ZFS_CANMOUNT_OFF;
+
+	if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
+		nomem();
+
+	/* check options */
+	while ((c = getopt(argc, argv, ":V:b:so:pu")) != -1) {
+		switch (c) {
+		case 'V':
+			type = ZFS_TYPE_VOLUME;
+			if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) {
+				(void) fprintf(stderr, gettext("bad volume "
+				    "size '%s': %s\n"), optarg,
+				    libzfs_error_description(g_zfs));
+				goto error;
+			}
+
+			if (nvlist_add_uint64(props,
+			    zfs_prop_to_name(ZFS_PROP_VOLSIZE), intval) != 0)
+				nomem();
+			volsize = intval;
+			break;
+		case 'p':
+			parents = B_TRUE;
+			break;
+		case 'b':
+			bflag = B_TRUE;
+			if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) {
+				(void) fprintf(stderr, gettext("bad volume "
+				    "block size '%s': %s\n"), optarg,
+				    libzfs_error_description(g_zfs));
+				goto error;
+			}
+
+			if (nvlist_add_uint64(props,
+			    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
+			    intval) != 0)
+				nomem();
+			break;
+		case 'o':
+			if (parseprop(props))
+				goto error;
+			break;
+		case 's':
+			noreserve = B_TRUE;
+			break;
+		case 'u':
+			nomount = B_TRUE;
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing size "
+			    "argument\n"));
+			goto badusage;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			goto badusage;
+		}
+	}
+
+	if ((bflag || noreserve) && type != ZFS_TYPE_VOLUME) {
+		(void) fprintf(stderr, gettext("'-s' and '-b' can only be "
+		    "used when creating a volume\n"));
+		goto badusage;
+	}
+	if (nomount && type != ZFS_TYPE_FILESYSTEM) {
+		(void) fprintf(stderr, gettext("'-u' can only be "
+		    "used when creating a file system\n"));
+		goto badusage;
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check number of arguments */
+	if (argc == 0) {
+		(void) fprintf(stderr, gettext("missing %s argument\n"),
+		    zfs_type_to_name(type));
+		goto badusage;
+	}
+	if (argc > 1) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		goto badusage;
+	}
+
+	if (type == ZFS_TYPE_VOLUME && !noreserve) {
+		zpool_handle_t *zpool_handle;
+		uint64_t spa_version;
+		char *p;
+		zfs_prop_t resv_prop;
+		char *strval;
+
+		if (p = strchr(argv[0], '/'))
+			*p = '\0';
+		zpool_handle = zpool_open(g_zfs, argv[0]);
+		if (p != NULL)
+			*p = '/';
+		if (zpool_handle == NULL)
+			goto error;
+		spa_version = zpool_get_prop_int(zpool_handle,
+		    ZPOOL_PROP_VERSION, NULL);
+		zpool_close(zpool_handle);
+		if (spa_version >= SPA_VERSION_REFRESERVATION)
+			resv_prop = ZFS_PROP_REFRESERVATION;
+		else
+			resv_prop = ZFS_PROP_RESERVATION;
+		volsize = zvol_volsize_to_reservation(volsize, props);
+
+		if (nvlist_lookup_string(props, zfs_prop_to_name(resv_prop),
+		    &strval) != 0) {
+			if (nvlist_add_uint64(props,
+			    zfs_prop_to_name(resv_prop), volsize) != 0) {
+				nvlist_free(props);
+				nomem();
+			}
+		}
+	}
+
+	if (parents && zfs_name_valid(argv[0], type)) {
+		/*
+		 * Now create the ancestors of target dataset.  If the target
+		 * already exists and '-p' option was used we should not
+		 * complain.
+		 */
+		if (zfs_dataset_exists(g_zfs, argv[0], type)) {
+			ret = 0;
+			goto error;
+		}
+		if (zfs_create_ancestors(g_zfs, argv[0]) != 0)
+			goto error;
+	}
+
+	/* pass to libzfs */
+	if (zfs_create(g_zfs, argv[0], type, props) != 0)
+		goto error;
+
+	if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET)) == NULL)
+		goto error;
+
+	ret = 0;
+	/*
+	 * if the user doesn't want the dataset automatically mounted,
+	 * then skip the mount/share step
+	 */
+	if (zfs_prop_valid_for_type(ZFS_PROP_CANMOUNT, type))
+		canmount = zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT);
+
+	/*
+	 * Mount and/or share the new filesystem as appropriate.  We provide a
+	 * verbose error message to let the user know that their filesystem was
+	 * in fact created, even if we failed to mount or share it.
+	 */
+	if (!nomount && canmount == ZFS_CANMOUNT_ON) {
+		if (zfs_mount(zhp, NULL, 0) != 0) {
+			(void) fprintf(stderr, gettext("filesystem "
+			    "successfully created, but not mounted\n"));
+			ret = 1;
+		} else if (zfs_share(zhp) != 0) {
+			(void) fprintf(stderr, gettext("filesystem "
+			    "successfully created, but not shared\n"));
+			ret = 1;
+		}
+	}
+
+error:
+	if (zhp)
+		zfs_close(zhp);
+	nvlist_free(props);
+	return (ret);
+badusage:
+	nvlist_free(props);
+	usage(B_FALSE);
+	return (2);
+}
+
+/*
+ * zfs destroy [-rRf] <fs, vol>
+ * zfs destroy [-rRd] <snap>
+ *
+ *	-r	Recursively destroy all children
+ *	-R	Recursively destroy all dependents, including clones
+ *	-f	Force unmounting of any dependents
+ *	-d	If we can't destroy now, mark for deferred destruction
+ *
+ * Destroys the given dataset.  By default, it will unmount any filesystems,
+ * and refuse to destroy a dataset that has any dependents.  A dependent can
+ * either be a child, or a clone of a child.
+ */
+typedef struct destroy_cbdata {
+	boolean_t	cb_first;
+	boolean_t	cb_force;
+	boolean_t	cb_recurse;
+	boolean_t	cb_error;
+	boolean_t	cb_doclones;
+	zfs_handle_t	*cb_target;
+	boolean_t	cb_defer_destroy;
+	boolean_t	cb_verbose;
+	boolean_t	cb_parsable;
+	boolean_t	cb_dryrun;
+	nvlist_t	*cb_nvl;
+
+	/* first snap in contiguous run */
+	zfs_handle_t	*cb_firstsnap;
+	/* previous snap in contiguous run */
+	zfs_handle_t	*cb_prevsnap;
+	int64_t		cb_snapused;
+	char		*cb_snapspec;
+} destroy_cbdata_t;
+
+/*
+ * Check for any dependents based on the '-r' or '-R' flags.
+ */
+static int
+destroy_check_dependent(zfs_handle_t *zhp, void *data)
+{
+	destroy_cbdata_t *cbp = data;
+	const char *tname = zfs_get_name(cbp->cb_target);
+	const char *name = zfs_get_name(zhp);
+
+	if (strncmp(tname, name, strlen(tname)) == 0 &&
+	    (name[strlen(tname)] == '/' || name[strlen(tname)] == '@')) {
+		/*
+		 * This is a direct descendant, not a clone somewhere else in
+		 * the hierarchy.
+		 */
+		if (cbp->cb_recurse)
+			goto out;
+
+		if (cbp->cb_first) {
+			(void) fprintf(stderr, gettext("cannot destroy '%s': "
+			    "%s has children\n"),
+			    zfs_get_name(cbp->cb_target),
+			    zfs_type_to_name(zfs_get_type(cbp->cb_target)));
+			(void) fprintf(stderr, gettext("use '-r' to destroy "
+			    "the following datasets:\n"));
+			cbp->cb_first = B_FALSE;
+			cbp->cb_error = B_TRUE;
+		}
+
+		(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
+	} else {
+		/*
+		 * This is a clone.  We only want to report this if the '-r'
+		 * wasn't specified, or the target is a snapshot.
+		 */
+		if (!cbp->cb_recurse &&
+		    zfs_get_type(cbp->cb_target) != ZFS_TYPE_SNAPSHOT)
+			goto out;
+
+		if (cbp->cb_first) {
+			(void) fprintf(stderr, gettext("cannot destroy '%s': "
+			    "%s has dependent clones\n"),
+			    zfs_get_name(cbp->cb_target),
+			    zfs_type_to_name(zfs_get_type(cbp->cb_target)));
+			(void) fprintf(stderr, gettext("use '-R' to destroy "
+			    "the following datasets:\n"));
+			cbp->cb_first = B_FALSE;
+			cbp->cb_error = B_TRUE;
+			cbp->cb_dryrun = B_TRUE;
+		}
+
+		(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
+	}
+
+out:
+	zfs_close(zhp);
+	return (0);
+}
+
+static int
+destroy_callback(zfs_handle_t *zhp, void *data)
+{
+	destroy_cbdata_t *cb = data;
+	const char *name = zfs_get_name(zhp);
+
+	if (cb->cb_verbose) {
+		if (cb->cb_parsable) {
+			(void) printf("destroy\t%s\n", name);
+		} else if (cb->cb_dryrun) {
+			(void) printf(gettext("would destroy %s\n"),
+			    name);
+		} else {
+			(void) printf(gettext("will destroy %s\n"),
+			    name);
+		}
+	}
+
+	/*
+	 * Ignore pools (which we've already flagged as an error before getting
+	 * here).
+	 */
+	if (strchr(zfs_get_name(zhp), '/') == NULL &&
+	    zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) {
+		zfs_close(zhp);
+		return (0);
+	}
+
+	if (!cb->cb_dryrun) {
+		if (zfs_unmount(zhp, NULL, cb->cb_force ? MS_FORCE : 0) != 0 ||
+		    zfs_destroy(zhp, cb->cb_defer_destroy) != 0) {
+			zfs_close(zhp);
+			return (-1);
+		}
+	}
+
+	zfs_close(zhp);
+	return (0);
+}
+
+static int
+destroy_print_cb(zfs_handle_t *zhp, void *arg)
+{
+	destroy_cbdata_t *cb = arg;
+	const char *name = zfs_get_name(zhp);
+	int err = 0;
+
+	if (nvlist_exists(cb->cb_nvl, name)) {
+		if (cb->cb_firstsnap == NULL)
+			cb->cb_firstsnap = zfs_handle_dup(zhp);
+		if (cb->cb_prevsnap != NULL)
+			zfs_close(cb->cb_prevsnap);
+		/* this snap continues the current range */
+		cb->cb_prevsnap = zfs_handle_dup(zhp);
+		if (cb->cb_verbose) {
+			if (cb->cb_parsable) {
+				(void) printf("destroy\t%s\n", name);
+			} else if (cb->cb_dryrun) {
+				(void) printf(gettext("would destroy %s\n"),
+				    name);
+			} else {
+				(void) printf(gettext("will destroy %s\n"),
+				    name);
+			}
+		}
+	} else if (cb->cb_firstsnap != NULL) {
+		/* end of this range */
+		uint64_t used = 0;
+		err = zfs_get_snapused_int(cb->cb_firstsnap,
+		    cb->cb_prevsnap, &used);
+		cb->cb_snapused += used;
+		zfs_close(cb->cb_firstsnap);
+		cb->cb_firstsnap = NULL;
+		zfs_close(cb->cb_prevsnap);
+		cb->cb_prevsnap = NULL;
+	}
+	zfs_close(zhp);
+	return (err);
+}
+
+static int
+destroy_print_snapshots(zfs_handle_t *fs_zhp, destroy_cbdata_t *cb)
+{
+	int err = 0;
+	assert(cb->cb_firstsnap == NULL);
+	assert(cb->cb_prevsnap == NULL);
+	err = zfs_iter_snapshots_sorted(fs_zhp, destroy_print_cb, cb);
+	if (cb->cb_firstsnap != NULL) {
+		uint64_t used = 0;
+		if (err == 0) {
+			err = zfs_get_snapused_int(cb->cb_firstsnap,
+			    cb->cb_prevsnap, &used);
+		}
+		cb->cb_snapused += used;
+		zfs_close(cb->cb_firstsnap);
+		cb->cb_firstsnap = NULL;
+		zfs_close(cb->cb_prevsnap);
+		cb->cb_prevsnap = NULL;
+	}
+	return (err);
+}
+
+static int
+snapshot_to_nvl_cb(zfs_handle_t *zhp, void *arg)
+{
+	destroy_cbdata_t *cb = arg;
+	int err = 0;
+
+	/* Check for clones. */
+	if (!cb->cb_doclones && !cb->cb_defer_destroy) {
+		cb->cb_target = zhp;
+		cb->cb_first = B_TRUE;
+		err = zfs_iter_dependents(zhp, B_TRUE,
+		    destroy_check_dependent, cb);
+	}
+
+	if (err == 0) {
+		if (nvlist_add_boolean(cb->cb_nvl, zfs_get_name(zhp)))
+			nomem();
+	}
+	zfs_close(zhp);
+	return (err);
+}
+
+static int
+gather_snapshots(zfs_handle_t *zhp, void *arg)
+{
+	destroy_cbdata_t *cb = arg;
+	int err = 0;
+
+	err = zfs_iter_snapspec(zhp, cb->cb_snapspec, snapshot_to_nvl_cb, cb);
+	if (err == ENOENT)
+		err = 0;
+	if (err != 0)
+		goto out;
+
+	if (cb->cb_verbose) {
+		err = destroy_print_snapshots(zhp, cb);
+		if (err != 0)
+			goto out;
+	}
+
+	if (cb->cb_recurse)
+		err = zfs_iter_filesystems(zhp, gather_snapshots, cb);
+
+out:
+	zfs_close(zhp);
+	return (err);
+}
+
+static int
+destroy_clones(destroy_cbdata_t *cb)
+{
+	nvpair_t *pair;
+	for (pair = nvlist_next_nvpair(cb->cb_nvl, NULL);
+	    pair != NULL;
+	    pair = nvlist_next_nvpair(cb->cb_nvl, pair)) {
+		zfs_handle_t *zhp = zfs_open(g_zfs, nvpair_name(pair),
+		    ZFS_TYPE_SNAPSHOT);
+		if (zhp != NULL) {
+			boolean_t defer = cb->cb_defer_destroy;
+			int err = 0;
+
+			/*
+			 * We can't defer destroy non-snapshots, so set it to
+			 * false while destroying the clones.
+			 */
+			cb->cb_defer_destroy = B_FALSE;
+			err = zfs_iter_dependents(zhp, B_FALSE,
+			    destroy_callback, cb);
+			cb->cb_defer_destroy = defer;
+			zfs_close(zhp);
+			if (err != 0)
+				return (err);
+		}
+	}
+	return (0);
+}
+
+static int
+zfs_do_destroy(int argc, char **argv)
+{
+	destroy_cbdata_t cb = { 0 };
+	int c;
+	zfs_handle_t *zhp;
+	char *at;
+	zfs_type_t type = ZFS_TYPE_DATASET;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "vpndfrR")) != -1) {
+		switch (c) {
+		case 'v':
+			cb.cb_verbose = B_TRUE;
+			break;
+		case 'p':
+			cb.cb_verbose = B_TRUE;
+			cb.cb_parsable = B_TRUE;
+			break;
+		case 'n':
+			cb.cb_dryrun = B_TRUE;
+			break;
+		case 'd':
+			cb.cb_defer_destroy = B_TRUE;
+			type = ZFS_TYPE_SNAPSHOT;
+			break;
+		case 'f':
+			cb.cb_force = B_TRUE;
+			break;
+		case 'r':
+			cb.cb_recurse = B_TRUE;
+			break;
+		case 'R':
+			cb.cb_recurse = B_TRUE;
+			cb.cb_doclones = B_TRUE;
+			break;
+		case '?':
+		default:
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check number of arguments */
+	if (argc == 0) {
+		(void) fprintf(stderr, gettext("missing dataset argument\n"));
+		usage(B_FALSE);
+	}
+	if (argc > 1) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		usage(B_FALSE);
+	}
+
+	at = strchr(argv[0], '@');
+	if (at != NULL) {
+		int err = 0;
+
+		/* Build the list of snaps to destroy in cb_nvl. */
+		if (nvlist_alloc(&cb.cb_nvl, NV_UNIQUE_NAME, 0) != 0)
+			nomem();
+
+		*at = '\0';
+		zhp = zfs_open(g_zfs, argv[0],
+		    ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+		if (zhp == NULL)
+			return (1);
+
+		cb.cb_snapspec = at + 1;
+		if (gather_snapshots(zfs_handle_dup(zhp), &cb) != 0 ||
+		    cb.cb_error) {
+			zfs_close(zhp);
+			nvlist_free(cb.cb_nvl);
+			return (1);
+		}
+
+		if (nvlist_empty(cb.cb_nvl)) {
+			(void) fprintf(stderr, gettext("could not find any "
+			    "snapshots to destroy; check snapshot names.\n"));
+			zfs_close(zhp);
+			nvlist_free(cb.cb_nvl);
+			return (1);
+		}
+
+		if (cb.cb_verbose) {
+			char buf[16];
+			zfs_nicenum(cb.cb_snapused, buf, sizeof (buf));
+			if (cb.cb_parsable) {
+				(void) printf("reclaim\t%llu\n",
+				    cb.cb_snapused);
+			} else if (cb.cb_dryrun) {
+				(void) printf(gettext("would reclaim %s\n"),
+				    buf);
+			} else {
+				(void) printf(gettext("will reclaim %s\n"),
+				    buf);
+			}
+		}
+
+		if (!cb.cb_dryrun) {
+			if (cb.cb_doclones)
+				err = destroy_clones(&cb);
+			if (err == 0) {
+				err = zfs_destroy_snaps_nvl(zhp, cb.cb_nvl,
+				    cb.cb_defer_destroy);
+			}
+		}
+
+		zfs_close(zhp);
+		nvlist_free(cb.cb_nvl);
+		if (err != 0)
+			return (1);
+	} else {
+		/* Open the given dataset */
+		if ((zhp = zfs_open(g_zfs, argv[0], type)) == NULL)
+			return (1);
+
+		cb.cb_target = zhp;
+
+		/*
+		 * Perform an explicit check for pools before going any further.
+		 */
+		if (!cb.cb_recurse && strchr(zfs_get_name(zhp), '/') == NULL &&
+		    zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) {
+			(void) fprintf(stderr, gettext("cannot destroy '%s': "
+			    "operation does not apply to pools\n"),
+			    zfs_get_name(zhp));
+			(void) fprintf(stderr, gettext("use 'zfs destroy -r "
+			    "%s' to destroy all datasets in the pool\n"),
+			    zfs_get_name(zhp));
+			(void) fprintf(stderr, gettext("use 'zpool destroy %s' "
+			    "to destroy the pool itself\n"), zfs_get_name(zhp));
+			zfs_close(zhp);
+			return (1);
+		}
+
+		/*
+		 * Check for any dependents and/or clones.
+		 */
+		cb.cb_first = B_TRUE;
+		if (!cb.cb_doclones &&
+		    zfs_iter_dependents(zhp, B_TRUE, destroy_check_dependent,
+		    &cb) != 0) {
+			zfs_close(zhp);
+			return (1);
+		}
+
+		if (cb.cb_error) {
+			zfs_close(zhp);
+			return (1);
+		}
+
+		if (zfs_iter_dependents(zhp, B_FALSE, destroy_callback,
+		    &cb) != 0) {
+			zfs_close(zhp);
+			return (1);
+		}
+
+		/*
+		 * Do the real thing.  The callback will close the
+		 * handle regardless of whether it succeeds or not.
+		 */
+		if (destroy_callback(zhp, &cb) != 0)
+			return (1);
+	}
+
+	return (0);
+}
+
+static boolean_t
+is_recvd_column(zprop_get_cbdata_t *cbp)
+{
+	int i;
+	zfs_get_column_t col;
+
+	for (i = 0; i < ZFS_GET_NCOLS &&
+	    (col = cbp->cb_columns[i]) != GET_COL_NONE; i++)
+		if (col == GET_COL_RECVD)
+			return (B_TRUE);
+	return (B_FALSE);
+}
+
+/*
+ * zfs get [-rHp] [-o all | field[,field]...] [-s source[,source]...]
+ *	< all | property[,property]... > < fs | snap | vol > ...
+ *
+ *	-r	recurse over any child datasets
+ *	-H	scripted mode.  Headers are stripped, and fields are separated
+ *		by tabs instead of spaces.
+ *	-o	Set of fields to display.  One of "name,property,value,
+ *		received,source". Default is "name,property,value,source".
+ *		"all" is an alias for all five.
+ *	-s	Set of sources to allow.  One of
+ *		"local,default,inherited,received,temporary,none".  Default is
+ *		all six.
+ *	-p	Display values in parsable (literal) format.
+ *
+ *  Prints properties for the given datasets.  The user can control which
+ *  columns to display as well as which property types to allow.
+ */
+
+/*
+ * Invoked to display the properties for a single dataset.
+ */
+static int
+get_callback(zfs_handle_t *zhp, void *data)
+{
+	char buf[ZFS_MAXPROPLEN];
+	char rbuf[ZFS_MAXPROPLEN];
+	zprop_source_t sourcetype;
+	char source[ZFS_MAXNAMELEN];
+	zprop_get_cbdata_t *cbp = data;
+	nvlist_t *user_props = zfs_get_user_props(zhp);
+	zprop_list_t *pl = cbp->cb_proplist;
+	nvlist_t *propval;
+	char *strval;
+	char *sourceval;
+	boolean_t received = is_recvd_column(cbp);
+
+	for (; pl != NULL; pl = pl->pl_next) {
+		char *recvdval = NULL;
+		/*
+		 * Skip the special fake placeholder.  This will also skip over
+		 * the name property when 'all' is specified.
+		 */
+		if (pl->pl_prop == ZFS_PROP_NAME &&
+		    pl == cbp->cb_proplist)
+			continue;
+
+		if (pl->pl_prop != ZPROP_INVAL) {
+			if (zfs_prop_get(zhp, pl->pl_prop, buf,
+			    sizeof (buf), &sourcetype, source,
+			    sizeof (source),
+			    cbp->cb_literal) != 0) {
+				if (pl->pl_all)
+					continue;
+				if (!zfs_prop_valid_for_type(pl->pl_prop,
+				    ZFS_TYPE_DATASET)) {
+					(void) fprintf(stderr,
+					    gettext("No such property '%s'\n"),
+					    zfs_prop_to_name(pl->pl_prop));
+					continue;
+				}
+				sourcetype = ZPROP_SRC_NONE;
+				(void) strlcpy(buf, "-", sizeof (buf));
+			}
+
+			if (received && (zfs_prop_get_recvd(zhp,
+			    zfs_prop_to_name(pl->pl_prop), rbuf, sizeof (rbuf),
+			    cbp->cb_literal) == 0))
+				recvdval = rbuf;
+
+			zprop_print_one_property(zfs_get_name(zhp), cbp,
+			    zfs_prop_to_name(pl->pl_prop),
+			    buf, sourcetype, source, recvdval);
+		} else if (zfs_prop_userquota(pl->pl_user_prop)) {
+			sourcetype = ZPROP_SRC_LOCAL;
+
+			if (zfs_prop_get_userquota(zhp, pl->pl_user_prop,
+			    buf, sizeof (buf), cbp->cb_literal) != 0) {
+				sourcetype = ZPROP_SRC_NONE;
+				(void) strlcpy(buf, "-", sizeof (buf));
+			}
+
+			zprop_print_one_property(zfs_get_name(zhp), cbp,
+			    pl->pl_user_prop, buf, sourcetype, source, NULL);
+		} else if (zfs_prop_written(pl->pl_user_prop)) {
+			sourcetype = ZPROP_SRC_LOCAL;
+
+			if (zfs_prop_get_written(zhp, pl->pl_user_prop,
+			    buf, sizeof (buf), cbp->cb_literal) != 0) {
+				sourcetype = ZPROP_SRC_NONE;
+				(void) strlcpy(buf, "-", sizeof (buf));
+			}
+
+			zprop_print_one_property(zfs_get_name(zhp), cbp,
+			    pl->pl_user_prop, buf, sourcetype, source, NULL);
+		} else {
+			if (nvlist_lookup_nvlist(user_props,
+			    pl->pl_user_prop, &propval) != 0) {
+				if (pl->pl_all)
+					continue;
+				sourcetype = ZPROP_SRC_NONE;
+				strval = "-";
+			} else {
+				verify(nvlist_lookup_string(propval,
+				    ZPROP_VALUE, &strval) == 0);
+				verify(nvlist_lookup_string(propval,
+				    ZPROP_SOURCE, &sourceval) == 0);
+
+				if (strcmp(sourceval,
+				    zfs_get_name(zhp)) == 0) {
+					sourcetype = ZPROP_SRC_LOCAL;
+				} else if (strcmp(sourceval,
+				    ZPROP_SOURCE_VAL_RECVD) == 0) {
+					sourcetype = ZPROP_SRC_RECEIVED;
+				} else {
+					sourcetype = ZPROP_SRC_INHERITED;
+					(void) strlcpy(source,
+					    sourceval, sizeof (source));
+				}
+			}
+
+			if (received && (zfs_prop_get_recvd(zhp,
+			    pl->pl_user_prop, rbuf, sizeof (rbuf),
+			    cbp->cb_literal) == 0))
+				recvdval = rbuf;
+
+			zprop_print_one_property(zfs_get_name(zhp), cbp,
+			    pl->pl_user_prop, strval, sourcetype,
+			    source, recvdval);
+		}
+	}
+
+	return (0);
+}
+
+static int
+zfs_do_get(int argc, char **argv)
+{
+	zprop_get_cbdata_t cb = { 0 };
+	int i, c, flags = ZFS_ITER_ARGS_CAN_BE_PATHS;
+	int types = ZFS_TYPE_DATASET;
+	char *value, *fields;
+	int ret = 0;
+	int limit = 0;
+	zprop_list_t fake_name = { 0 };
+
+	/*
+	 * Set up default columns and sources.
+	 */
+	cb.cb_sources = ZPROP_SRC_ALL;
+	cb.cb_columns[0] = GET_COL_NAME;
+	cb.cb_columns[1] = GET_COL_PROPERTY;
+	cb.cb_columns[2] = GET_COL_VALUE;
+	cb.cb_columns[3] = GET_COL_SOURCE;
+	cb.cb_type = ZFS_TYPE_DATASET;
+
+	/* check options */
+	while ((c = getopt(argc, argv, ":d:o:s:rt:Hp")) != -1) {
+		switch (c) {
+		case 'p':
+			cb.cb_literal = B_TRUE;
+			break;
+		case 'd':
+			limit = parse_depth(optarg, &flags);
+			break;
+		case 'r':
+			flags |= ZFS_ITER_RECURSE;
+			break;
+		case 'H':
+			cb.cb_scripted = B_TRUE;
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing argument for "
+			    "'%c' option\n"), optopt);
+			usage(B_FALSE);
+			break;
+		case 'o':
+			/*
+			 * Process the set of columns to display.  We zero out
+			 * the structure to give us a blank slate.
+			 */
+			bzero(&cb.cb_columns, sizeof (cb.cb_columns));
+			i = 0;
+			while (*optarg != '\0') {
+				static char *col_subopts[] =
+				    { "name", "property", "value", "received",
+				    "source", "all", NULL };
+
+				if (i == ZFS_GET_NCOLS) {
+					(void) fprintf(stderr, gettext("too "
+					    "many fields given to -o "
+					    "option\n"));
+					usage(B_FALSE);
+				}
+
+				switch (getsubopt(&optarg, col_subopts,
+				    &value)) {
+				case 0:
+					cb.cb_columns[i++] = GET_COL_NAME;
+					break;
+				case 1:
+					cb.cb_columns[i++] = GET_COL_PROPERTY;
+					break;
+				case 2:
+					cb.cb_columns[i++] = GET_COL_VALUE;
+					break;
+				case 3:
+					cb.cb_columns[i++] = GET_COL_RECVD;
+					flags |= ZFS_ITER_RECVD_PROPS;
+					break;
+				case 4:
+					cb.cb_columns[i++] = GET_COL_SOURCE;
+					break;
+				case 5:
+					if (i > 0) {
+						(void) fprintf(stderr,
+						    gettext("\"all\" conflicts "
+						    "with specific fields "
+						    "given to -o option\n"));
+						usage(B_FALSE);
+					}
+					cb.cb_columns[0] = GET_COL_NAME;
+					cb.cb_columns[1] = GET_COL_PROPERTY;
+					cb.cb_columns[2] = GET_COL_VALUE;
+					cb.cb_columns[3] = GET_COL_RECVD;
+					cb.cb_columns[4] = GET_COL_SOURCE;
+					flags |= ZFS_ITER_RECVD_PROPS;
+					i = ZFS_GET_NCOLS;
+					break;
+				default:
+					(void) fprintf(stderr,
+					    gettext("invalid column name "
+					    "'%s'\n"), value);
+					usage(B_FALSE);
+				}
+			}
+			break;
+
+		case 's':
+			cb.cb_sources = 0;
+			while (*optarg != '\0') {
+				static char *source_subopts[] = {
+					"local", "default", "inherited",
+					"received", "temporary", "none",
+					NULL };
+
+				switch (getsubopt(&optarg, source_subopts,
+				    &value)) {
+				case 0:
+					cb.cb_sources |= ZPROP_SRC_LOCAL;
+					break;
+				case 1:
+					cb.cb_sources |= ZPROP_SRC_DEFAULT;
+					break;
+				case 2:
+					cb.cb_sources |= ZPROP_SRC_INHERITED;
+					break;
+				case 3:
+					cb.cb_sources |= ZPROP_SRC_RECEIVED;
+					break;
+				case 4:
+					cb.cb_sources |= ZPROP_SRC_TEMPORARY;
+					break;
+				case 5:
+					cb.cb_sources |= ZPROP_SRC_NONE;
+					break;
+				default:
+					(void) fprintf(stderr,
+					    gettext("invalid source "
+					    "'%s'\n"), value);
+					usage(B_FALSE);
+				}
+			}
+			break;
+
+		case 't':
+			types = 0;
+			flags &= ~ZFS_ITER_PROP_LISTSNAPS;
+			while (*optarg != '\0') {
+				static char *type_subopts[] = { "filesystem",
+				    "volume", "snapshot", "all", NULL };
+
+				switch (getsubopt(&optarg, type_subopts,
+				    &value)) {
+				case 0:
+					types |= ZFS_TYPE_FILESYSTEM;
+					break;
+				case 1:
+					types |= ZFS_TYPE_VOLUME;
+					break;
+				case 2:
+					types |= ZFS_TYPE_SNAPSHOT;
+					break;
+				case 3:
+					types = ZFS_TYPE_DATASET;
+					break;
+
+				default:
+					(void) fprintf(stderr,
+					    gettext("invalid type '%s'\n"),
+					    value);
+					usage(B_FALSE);
+				}
+			}
+			break;
+
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing property "
+		    "argument\n"));
+		usage(B_FALSE);
+	}
+
+	fields = argv[0];
+
+	if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET)
+	    != 0)
+		usage(B_FALSE);
+
+	argc--;
+	argv++;
+
+	/*
+	 * As part of zfs_expand_proplist(), we keep track of the maximum column
+	 * width for each property.  For the 'NAME' (and 'SOURCE') columns, we
+	 * need to know the maximum name length.  However, the user likely did
+	 * not specify 'name' as one of the properties to fetch, so we need to
+	 * make sure we always include at least this property for
+	 * print_get_headers() to work properly.
+	 */
+	if (cb.cb_proplist != NULL) {
+		fake_name.pl_prop = ZFS_PROP_NAME;
+		fake_name.pl_width = strlen(gettext("NAME"));
+		fake_name.pl_next = cb.cb_proplist;
+		cb.cb_proplist = &fake_name;
+	}
+
+	cb.cb_first = B_TRUE;
+
+	/* run for each object */
+	ret = zfs_for_each(argc, argv, flags, types, NULL,
+	    &cb.cb_proplist, limit, get_callback, &cb);
+
+	if (cb.cb_proplist == &fake_name)
+		zprop_free_list(fake_name.pl_next);
+	else
+		zprop_free_list(cb.cb_proplist);
+
+	return (ret);
+}
+
+/*
+ * inherit [-rS] <property> <fs|vol> ...
+ *
+ *	-r	Recurse over all children
+ *	-S	Revert to received value, if any
+ *
+ * For each dataset specified on the command line, inherit the given property
+ * from its parent.  Inheriting a property at the pool level will cause it to
+ * use the default value.  The '-r' flag will recurse over all children, and is
+ * useful for setting a property on a hierarchy-wide basis, regardless of any
+ * local modifications for each dataset.
+ */
+
+typedef struct inherit_cbdata {
+	const char *cb_propname;
+	boolean_t cb_received;
+} inherit_cbdata_t;
+
+static int
+inherit_recurse_cb(zfs_handle_t *zhp, void *data)
+{
+	inherit_cbdata_t *cb = data;
+	zfs_prop_t prop = zfs_name_to_prop(cb->cb_propname);
+
+	/*
+	 * If we're doing it recursively, then ignore properties that
+	 * are not valid for this type of dataset.
+	 */
+	if (prop != ZPROP_INVAL &&
+	    !zfs_prop_valid_for_type(prop, zfs_get_type(zhp)))
+		return (0);
+
+	return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0);
+}
+
+static int
+inherit_cb(zfs_handle_t *zhp, void *data)
+{
+	inherit_cbdata_t *cb = data;
+
+	return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0);
+}
+
+static int
+zfs_do_inherit(int argc, char **argv)
+{
+	int c;
+	zfs_prop_t prop;
+	inherit_cbdata_t cb = { 0 };
+	char *propname;
+	int ret = 0;
+	int flags = 0;
+	boolean_t received = B_FALSE;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "rS")) != -1) {
+		switch (c) {
+		case 'r':
+			flags |= ZFS_ITER_RECURSE;
+			break;
+		case 'S':
+			received = B_TRUE;
+			break;
+		case '?':
+		default:
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing property argument\n"));
+		usage(B_FALSE);
+	}
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("missing dataset argument\n"));
+		usage(B_FALSE);
+	}
+
+	propname = argv[0];
+	argc--;
+	argv++;
+
+	if ((prop = zfs_name_to_prop(propname)) != ZPROP_INVAL) {
+		if (zfs_prop_readonly(prop)) {
+			(void) fprintf(stderr, gettext(
+			    "%s property is read-only\n"),
+			    propname);
+			return (1);
+		}
+		if (!zfs_prop_inheritable(prop) && !received) {
+			(void) fprintf(stderr, gettext("'%s' property cannot "
+			    "be inherited\n"), propname);
+			if (prop == ZFS_PROP_QUOTA ||
+			    prop == ZFS_PROP_RESERVATION ||
+			    prop == ZFS_PROP_REFQUOTA ||
+			    prop == ZFS_PROP_REFRESERVATION)
+				(void) fprintf(stderr, gettext("use 'zfs set "
+				    "%s=none' to clear\n"), propname);
+			return (1);
+		}
+		if (received && (prop == ZFS_PROP_VOLSIZE ||
+		    prop == ZFS_PROP_VERSION)) {
+			(void) fprintf(stderr, gettext("'%s' property cannot "
+			    "be reverted to a received value\n"), propname);
+			return (1);
+		}
+	} else if (!zfs_prop_user(propname)) {
+		(void) fprintf(stderr, gettext("invalid property '%s'\n"),
+		    propname);
+		usage(B_FALSE);
+	}
+
+	cb.cb_propname = propname;
+	cb.cb_received = received;
+
+	if (flags & ZFS_ITER_RECURSE) {
+		ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET,
+		    NULL, NULL, 0, inherit_recurse_cb, &cb);
+	} else {
+		ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET,
+		    NULL, NULL, 0, inherit_cb, &cb);
+	}
+
+	return (ret);
+}
+
+typedef struct upgrade_cbdata {
+	uint64_t cb_numupgraded;
+	uint64_t cb_numsamegraded;
+	uint64_t cb_numfailed;
+	uint64_t cb_version;
+	boolean_t cb_newer;
+	boolean_t cb_foundone;
+	char cb_lastfs[ZFS_MAXNAMELEN];
+} upgrade_cbdata_t;
+
+static int
+same_pool(zfs_handle_t *zhp, const char *name)
+{
+	int len1 = strcspn(name, "/@");
+	const char *zhname = zfs_get_name(zhp);
+	int len2 = strcspn(zhname, "/@");
+
+	if (len1 != len2)
+		return (B_FALSE);
+	return (strncmp(name, zhname, len1) == 0);
+}
+
+static int
+upgrade_list_callback(zfs_handle_t *zhp, void *data)
+{
+	upgrade_cbdata_t *cb = data;
+	int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
+
+	/* list if it's old/new */
+	if ((!cb->cb_newer && version < ZPL_VERSION) ||
+	    (cb->cb_newer && version > ZPL_VERSION)) {
+		char *str;
+		if (cb->cb_newer) {
+			str = gettext("The following filesystems are "
+			    "formatted using a newer software version and\n"
+			    "cannot be accessed on the current system.\n\n");
+		} else {
+			str = gettext("The following filesystems are "
+			    "out of date, and can be upgraded.  After being\n"
+			    "upgraded, these filesystems (and any 'zfs send' "
+			    "streams generated from\n"
+			    "subsequent snapshots) will no longer be "
+			    "accessible by older software versions.\n\n");
+		}
+
+		if (!cb->cb_foundone) {
+			(void) puts(str);
+			(void) printf(gettext("VER  FILESYSTEM\n"));
+			(void) printf(gettext("---  ------------\n"));
+			cb->cb_foundone = B_TRUE;
+		}
+
+		(void) printf("%2u   %s\n", version, zfs_get_name(zhp));
+	}
+
+	return (0);
+}
+
+static int
+upgrade_set_callback(zfs_handle_t *zhp, void *data)
+{
+	upgrade_cbdata_t *cb = data;
+	int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
+	int needed_spa_version;
+	int spa_version;
+
+	if (zfs_spa_version(zhp, &spa_version) < 0)
+		return (-1);
+
+	needed_spa_version = zfs_spa_version_map(cb->cb_version);
+
+	if (needed_spa_version < 0)
+		return (-1);
+
+	if (spa_version < needed_spa_version) {
+		/* can't upgrade */
+		(void) printf(gettext("%s: can not be "
+		    "upgraded; the pool version needs to first "
+		    "be upgraded\nto version %d\n\n"),
+		    zfs_get_name(zhp), needed_spa_version);
+		cb->cb_numfailed++;
+		return (0);
+	}
+
+	/* upgrade */
+	if (version < cb->cb_version) {
+		char verstr[16];
+		(void) snprintf(verstr, sizeof (verstr),
+		    "%llu", cb->cb_version);
+		if (cb->cb_lastfs[0] && !same_pool(zhp, cb->cb_lastfs)) {
+			/*
+			 * If they did "zfs upgrade -a", then we could
+			 * be doing ioctls to different pools.  We need
+			 * to log this history once to each pool.
+			 */
+			verify(zpool_stage_history(g_zfs, history_str) == 0);
+		}
+		if (zfs_prop_set(zhp, "version", verstr) == 0)
+			cb->cb_numupgraded++;
+		else
+			cb->cb_numfailed++;
+		(void) strcpy(cb->cb_lastfs, zfs_get_name(zhp));
+	} else if (version > cb->cb_version) {
+		/* can't downgrade */
+		(void) printf(gettext("%s: can not be downgraded; "
+		    "it is already at version %u\n"),
+		    zfs_get_name(zhp), version);
+		cb->cb_numfailed++;
+	} else {
+		cb->cb_numsamegraded++;
+	}
+	return (0);
+}
+
+/*
+ * zfs upgrade
+ * zfs upgrade -v
+ * zfs upgrade [-r] [-V <version>] <-a | filesystem>
+ */
+static int
+zfs_do_upgrade(int argc, char **argv)
+{
+	boolean_t all = B_FALSE;
+	boolean_t showversions = B_FALSE;
+	int ret = 0;
+	upgrade_cbdata_t cb = { 0 };
+	char c;
+	int flags = ZFS_ITER_ARGS_CAN_BE_PATHS;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "rvV:a")) != -1) {
+		switch (c) {
+		case 'r':
+			flags |= ZFS_ITER_RECURSE;
+			break;
+		case 'v':
+			showversions = B_TRUE;
+			break;
+		case 'V':
+			if (zfs_prop_string_to_index(ZFS_PROP_VERSION,
+			    optarg, &cb.cb_version) != 0) {
+				(void) fprintf(stderr,
+				    gettext("invalid version %s\n"), optarg);
+				usage(B_FALSE);
+			}
+			break;
+		case 'a':
+			all = B_TRUE;
+			break;
+		case '?':
+		default:
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	if ((!all && !argc) && ((flags & ZFS_ITER_RECURSE) | cb.cb_version))
+		usage(B_FALSE);
+	if (showversions && (flags & ZFS_ITER_RECURSE || all ||
+	    cb.cb_version || argc))
+		usage(B_FALSE);
+	if ((all || argc) && (showversions))
+		usage(B_FALSE);
+	if (all && argc)
+		usage(B_FALSE);
+
+	if (showversions) {
+		/* Show info on available versions. */
+		(void) printf(gettext("The following filesystem versions are "
+		    "supported:\n\n"));
+		(void) printf(gettext("VER  DESCRIPTION\n"));
+		(void) printf("---  -----------------------------------------"
+		    "---------------\n");
+		(void) printf(gettext(" 1   Initial ZFS filesystem version\n"));
+		(void) printf(gettext(" 2   Enhanced directory entries\n"));
+		(void) printf(gettext(" 3   Case insensitive and filesystem "
+		    "user identifier (FUID)\n"));
+		(void) printf(gettext(" 4   userquota, groupquota "
+		    "properties\n"));
+		(void) printf(gettext(" 5   System attributes\n"));
+		(void) printf(gettext("\nFor more information on a particular "
+		    "version, including supported releases,\n"));
+		(void) printf("see the ZFS Administration Guide.\n\n");
+		ret = 0;
+	} else if (argc || all) {
+		/* Upgrade filesystems */
+		if (cb.cb_version == 0)
+			cb.cb_version = ZPL_VERSION;
+		ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_FILESYSTEM,
+		    NULL, NULL, 0, upgrade_set_callback, &cb);
+		(void) printf(gettext("%llu filesystems upgraded\n"),
+		    cb.cb_numupgraded);
+		if (cb.cb_numsamegraded) {
+			(void) printf(gettext("%llu filesystems already at "
+			    "this version\n"),
+			    cb.cb_numsamegraded);
+		}
+		if (cb.cb_numfailed != 0)
+			ret = 1;
+	} else {
+		/* List old-version filesytems */
+		boolean_t found;
+		(void) printf(gettext("This system is currently running "
+		    "ZFS filesystem version %llu.\n\n"), ZPL_VERSION);
+
+		flags |= ZFS_ITER_RECURSE;
+		ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM,
+		    NULL, NULL, 0, upgrade_list_callback, &cb);
+
+		found = cb.cb_foundone;
+		cb.cb_foundone = B_FALSE;
+		cb.cb_newer = B_TRUE;
+
+		ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM,
+		    NULL, NULL, 0, upgrade_list_callback, &cb);
+
+		if (!cb.cb_foundone && !found) {
+			(void) printf(gettext("All filesystems are "
+			    "formatted with the current version.\n"));
+		}
+	}
+
+	return (ret);
+}
+
+/*
+ * zfs userspace [-Hinp] [-o field[,...]] [-s field [-s field]...]
+ *               [-S field [-S field]...] [-t type[,...]] filesystem | snapshot
+ * zfs groupspace [-Hinp] [-o field[,...]] [-s field [-s field]...]
+ *                [-S field [-S field]...] [-t type[,...]] filesystem | snapshot
+ *
+ *	-H      Scripted mode; elide headers and separate columns by tabs.
+ *	-i	Translate SID to POSIX ID.
+ *	-n	Print numeric ID instead of user/group name.
+ *	-o      Control which fields to display.
+ *	-p	Use exact (parseable) numeric output.
+ *	-s      Specify sort columns, descending order.
+ *	-S      Specify sort columns, ascending order.
+ *	-t      Control which object types to display.
+ *
+ *	Displays space consumed by, and quotas on, each user in the specified
+ *	filesystem or snapshot.
+ */
+
+/* us_field_types, us_field_hdr and us_field_names should be kept in sync */
+enum us_field_types {
+	USFIELD_TYPE,
+	USFIELD_NAME,
+	USFIELD_USED,
+	USFIELD_QUOTA
+};
+static char *us_field_hdr[] = { "TYPE", "NAME", "USED", "QUOTA" };
+static char *us_field_names[] = { "type", "name", "used", "quota" };
+#define	USFIELD_LAST	(sizeof (us_field_names) / sizeof (char *))
+
+#define	USTYPE_PSX_GRP	(1 << 0)
+#define	USTYPE_PSX_USR	(1 << 1)
+#define	USTYPE_SMB_GRP	(1 << 2)
+#define	USTYPE_SMB_USR	(1 << 3)
+#define	USTYPE_ALL	\
+	(USTYPE_PSX_GRP | USTYPE_PSX_USR | USTYPE_SMB_GRP | USTYPE_SMB_USR)
+
+static int us_type_bits[] = {
+	USTYPE_PSX_GRP,
+	USTYPE_PSX_USR,
+	USTYPE_SMB_GRP,
+	USTYPE_SMB_USR,
+	USTYPE_ALL
+};
+static char *us_type_names[] = { "posixgroup", "posxiuser", "smbgroup",
+	"smbuser", "all" };
+
+typedef struct us_node {
+	nvlist_t	*usn_nvl;
+	uu_avl_node_t	usn_avlnode;
+	uu_list_node_t	usn_listnode;
+} us_node_t;
+
+typedef struct us_cbdata {
+	nvlist_t	**cb_nvlp;
+	uu_avl_pool_t	*cb_avl_pool;
+	uu_avl_t	*cb_avl;
+	boolean_t	cb_numname;
+	boolean_t	cb_nicenum;
+	boolean_t	cb_sid2posix;
+	zfs_userquota_prop_t cb_prop;
+	zfs_sort_column_t *cb_sortcol;
+	size_t		cb_width[USFIELD_LAST];
+} us_cbdata_t;
+
+static boolean_t us_populated = B_FALSE;
+
+typedef struct {
+	zfs_sort_column_t *si_sortcol;
+	boolean_t	si_numname;
+} us_sort_info_t;
+
+static int
+us_field_index(char *field)
+{
+	int i;
+
+	for (i = 0; i < USFIELD_LAST; i++) {
+		if (strcmp(field, us_field_names[i]) == 0)
+			return (i);
+	}
+
+	return (-1);
+}
+
+static int
+us_compare(const void *larg, const void *rarg, void *unused)
+{
+	const us_node_t *l = larg;
+	const us_node_t *r = rarg;
+	us_sort_info_t *si = (us_sort_info_t *)unused;
+	zfs_sort_column_t *sortcol = si->si_sortcol;
+	boolean_t numname = si->si_numname;
+	nvlist_t *lnvl = l->usn_nvl;
+	nvlist_t *rnvl = r->usn_nvl;
+	int rc = 0;
+	boolean_t lvb, rvb;
+
+	for (; sortcol != NULL; sortcol = sortcol->sc_next) {
+		char *lvstr = "";
+		char *rvstr = "";
+		uint32_t lv32 = 0;
+		uint32_t rv32 = 0;
+		uint64_t lv64 = 0;
+		uint64_t rv64 = 0;
+		zfs_prop_t prop = sortcol->sc_prop;
+		const char *propname = NULL;
+		boolean_t reverse = sortcol->sc_reverse;
+
+		switch (prop) {
+		case ZFS_PROP_TYPE:
+			propname = "type";
+			(void) nvlist_lookup_uint32(lnvl, propname, &lv32);
+			(void) nvlist_lookup_uint32(rnvl, propname, &rv32);
+			if (rv32 != lv32)
+				rc = (rv32 < lv32) ? 1 : -1;
+			break;
+		case ZFS_PROP_NAME:
+			propname = "name";
+			if (numname) {
+				(void) nvlist_lookup_uint64(lnvl, propname,
+				    &lv64);
+				(void) nvlist_lookup_uint64(rnvl, propname,
+				    &rv64);
+				if (rv64 != lv64)
+					rc = (rv64 < lv64) ? 1 : -1;
+			} else {
+				(void) nvlist_lookup_string(lnvl, propname,
+				    &lvstr);
+				(void) nvlist_lookup_string(rnvl, propname,
+				    &rvstr);
+				rc = strcmp(lvstr, rvstr);
+			}
+			break;
+		case ZFS_PROP_USED:
+		case ZFS_PROP_QUOTA:
+			if (!us_populated)
+				break;
+			if (prop == ZFS_PROP_USED)
+				propname = "used";
+			else
+				propname = "quota";
+			(void) nvlist_lookup_uint64(lnvl, propname, &lv64);
+			(void) nvlist_lookup_uint64(rnvl, propname, &rv64);
+			if (rv64 != lv64)
+				rc = (rv64 < lv64) ? 1 : -1;
+			break;
+		}
+
+		if (rc != 0) {
+			if (rc < 0)
+				return (reverse ? 1 : -1);
+			else
+				return (reverse ? -1 : 1);
+		}
+	}
+
+	/*
+	 * If entries still seem to be the same, check if they are of the same
+	 * type (smbentity is added only if we are doing SID to POSIX ID
+	 * translation where we can have duplicate type/name combinations).
+	 */
+	if (nvlist_lookup_boolean_value(lnvl, "smbentity", &lvb) == 0 &&
+	    nvlist_lookup_boolean_value(rnvl, "smbentity", &rvb) == 0 &&
+	    lvb != rvb)
+		return (lvb < rvb ? -1 : 1);
+
+	return (0);
+}
+
+static inline const char *
+us_type2str(unsigned field_type)
+{
+	switch (field_type) {
+	case USTYPE_PSX_USR:
+		return ("POSIX User");
+	case USTYPE_PSX_GRP:
+		return ("POSIX Group");
+	case USTYPE_SMB_USR:
+		return ("SMB User");
+	case USTYPE_SMB_GRP:
+		return ("SMB Group");
+	default:
+		return ("Undefined");
+	}
+}
+
+static int
+userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space)
+{
+	us_cbdata_t *cb = (us_cbdata_t *)arg;
+	zfs_userquota_prop_t prop = cb->cb_prop;
+	char *name = NULL;
+	char *propname;
+	char sizebuf[32];
+	us_node_t *node;
+	uu_avl_pool_t *avl_pool = cb->cb_avl_pool;
+	uu_avl_t *avl = cb->cb_avl;
+	uu_avl_index_t idx;
+	nvlist_t *props;
+	us_node_t *n;
+	zfs_sort_column_t *sortcol = cb->cb_sortcol;
+	unsigned type;
+	const char *typestr;
+	size_t namelen;
+	size_t typelen;
+	size_t sizelen;
+	int typeidx, nameidx, sizeidx;
+	us_sort_info_t sortinfo = { sortcol, cb->cb_numname };
+	boolean_t smbentity = B_FALSE;
+
+	if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
+		nomem();
+	node = safe_malloc(sizeof (us_node_t));
+	uu_avl_node_init(node, &node->usn_avlnode, avl_pool);
+	node->usn_nvl = props;
+
+	if (domain != NULL && domain[0] != '\0') {
+		/* SMB */
+		char sid[ZFS_MAXNAMELEN + 32];
+		uid_t id;
+		uint64_t classes;
+#ifdef sun
+		int err;
+		directory_error_t e;
+#endif
+
+		smbentity = B_TRUE;
+
+		(void) snprintf(sid, sizeof (sid), "%s-%u", domain, rid);
+
+		if (prop == ZFS_PROP_GROUPUSED || prop == ZFS_PROP_GROUPQUOTA) {
+			type = USTYPE_SMB_GRP;
+#ifdef sun
+			err = sid_to_id(sid, B_FALSE, &id);
+#endif
+		} else {
+			type = USTYPE_SMB_USR;
+#ifdef sun
+			err = sid_to_id(sid, B_TRUE, &id);
+#endif
+		}
+
+#ifdef sun
+		if (err == 0) {
+			rid = id;
+			if (!cb->cb_sid2posix) {
+				e = directory_name_from_sid(NULL, sid, &name,
+				    &classes);
+				if (e != NULL)
+					directory_error_free(e);
+				if (name == NULL)
+					name = sid;
+			}
+		}
+#endif
+	}
+
+	if (cb->cb_sid2posix || domain == NULL || domain[0] == '\0') {
+		/* POSIX or -i */
+		if (prop == ZFS_PROP_GROUPUSED || prop == ZFS_PROP_GROUPQUOTA) {
+			type = USTYPE_PSX_GRP;
+			if (!cb->cb_numname) {
+				struct group *g;
+
+				if ((g = getgrgid(rid)) != NULL)
+					name = g->gr_name;
+			}
+		} else {
+			type = USTYPE_PSX_USR;
+			if (!cb->cb_numname) {
+				struct passwd *p;
+
+				if ((p = getpwuid(rid)) != NULL)
+					name = p->pw_name;
+			}
+		}
+	}
+
+	/*
+	 * Make sure that the type/name combination is unique when doing
+	 * SID to POSIX ID translation (hence changing the type from SMB to
+	 * POSIX).
+	 */
+	if (cb->cb_sid2posix &&
+	    nvlist_add_boolean_value(props, "smbentity", smbentity) != 0)
+		nomem();
+
+	/* Calculate/update width of TYPE field */
+	typestr = us_type2str(type);
+	typelen = strlen(gettext(typestr));
+	typeidx = us_field_index("type");
+	if (typelen > cb->cb_width[typeidx])
+		cb->cb_width[typeidx] = typelen;
+	if (nvlist_add_uint32(props, "type", type) != 0)
+		nomem();
+
+	/* Calculate/update width of NAME field */
+	if ((cb->cb_numname && cb->cb_sid2posix) || name == NULL) {
+		if (nvlist_add_uint64(props, "name", rid) != 0)
+			nomem();
+		namelen = snprintf(NULL, 0, "%u", rid);
+	} else {
+		if (nvlist_add_string(props, "name", name) != 0)
+			nomem();
+		namelen = strlen(name);
+	}
+	nameidx = us_field_index("name");
+	if (namelen > cb->cb_width[nameidx])
+		cb->cb_width[nameidx] = namelen;
+
+	/*
+	 * Check if this type/name combination is in the list and update it;
+	 * otherwise add new node to the list.
+	 */
+	if ((n = uu_avl_find(avl, node, &sortinfo, &idx)) == NULL) {
+		uu_avl_insert(avl, node, idx);
+	} else {
+		nvlist_free(props);
+		free(node);
+		node = n;
+		props = node->usn_nvl;
+	}
+
+	/* Calculate/update width of USED/QUOTA fields */
+	if (cb->cb_nicenum)
+		zfs_nicenum(space, sizebuf, sizeof (sizebuf));
+	else
+		(void) snprintf(sizebuf, sizeof (sizebuf), "%llu", space);
+	sizelen = strlen(sizebuf);
+	if (prop == ZFS_PROP_USERUSED || prop == ZFS_PROP_GROUPUSED) {
+		propname = "used";
+		if (!nvlist_exists(props, "quota"))
+			(void) nvlist_add_uint64(props, "quota", 0);
+	} else {
+		propname = "quota";
+		if (!nvlist_exists(props, "used"))
+			(void) nvlist_add_uint64(props, "used", 0);
+	}
+	sizeidx = us_field_index(propname);
+	if (sizelen > cb->cb_width[sizeidx])
+		cb->cb_width[sizeidx] = sizelen;
+
+	if (nvlist_add_uint64(props, propname, space) != 0)
+		nomem();
+
+	return (0);
+}
+
+static void
+print_us_node(boolean_t scripted, boolean_t parsable, int *fields, int types,
+    size_t *width, us_node_t *node)
+{
+	nvlist_t *nvl = node->usn_nvl;
+	char valstr[ZFS_MAXNAMELEN];
+	boolean_t first = B_TRUE;
+	int cfield = 0;
+	int field;
+	uint32_t ustype;
+
+	/* Check type */
+	(void) nvlist_lookup_uint32(nvl, "type", &ustype);
+	if (!(ustype & types))
+		return;
+
+	while ((field = fields[cfield]) != USFIELD_LAST) {
+		nvpair_t *nvp = NULL;
+		data_type_t type;
+		uint32_t val32;
+		uint64_t val64;
+		char *strval = NULL;
+
+		while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
+			if (strcmp(nvpair_name(nvp),
+			    us_field_names[field]) == 0)
+				break;
+		}
+
+		type = nvpair_type(nvp);
+		switch (type) {
+		case DATA_TYPE_UINT32:
+			(void) nvpair_value_uint32(nvp, &val32);
+			break;
+		case DATA_TYPE_UINT64:
+			(void) nvpair_value_uint64(nvp, &val64);
+			break;
+		case DATA_TYPE_STRING:
+			(void) nvpair_value_string(nvp, &strval);
+			break;
+		default:
+			(void) fprintf(stderr, "invalid data type\n");
+		}
+
+		switch (field) {
+		case USFIELD_TYPE:
+			strval = (char *)us_type2str(val32);
+			break;
+		case USFIELD_NAME:
+			if (type == DATA_TYPE_UINT64) {
+				(void) sprintf(valstr, "%llu", val64);
+				strval = valstr;
+			}
+			break;
+		case USFIELD_USED:
+		case USFIELD_QUOTA:
+			if (type == DATA_TYPE_UINT64) {
+				if (parsable) {
+					(void) sprintf(valstr, "%llu", val64);
+				} else {
+					zfs_nicenum(val64, valstr,
+					    sizeof (valstr));
+				}
+				if (field == USFIELD_QUOTA &&
+				    strcmp(valstr, "0") == 0)
+					strval = "none";
+				else
+					strval = valstr;
+			}
+			break;
+		}
+
+		if (!first) {
+			if (scripted)
+				(void) printf("\t");
+			else
+				(void) printf("  ");
+		}
+		if (scripted)
+			(void) printf("%s", strval);
+		else if (field == USFIELD_TYPE || field == USFIELD_NAME)
+			(void) printf("%-*s", width[field], strval);
+		else
+			(void) printf("%*s", width[field], strval);
+
+		first = B_FALSE;
+		cfield++;
+	}
+
+	(void) printf("\n");
+}
+
+static void
+print_us(boolean_t scripted, boolean_t parsable, int *fields, int types,
+    size_t *width, boolean_t rmnode, uu_avl_t *avl)
+{
+	us_node_t *node;
+	const char *col;
+	int cfield = 0;
+	int field;
+
+	if (!scripted) {
+		boolean_t first = B_TRUE;
+
+		while ((field = fields[cfield]) != USFIELD_LAST) {
+			col = gettext(us_field_hdr[field]);
+			if (field == USFIELD_TYPE || field == USFIELD_NAME) {
+				(void) printf(first ? "%-*s" : "  %-*s",
+				    width[field], col);
+			} else {
+				(void) printf(first ? "%*s" : "  %*s",
+				    width[field], col);
+			}
+			first = B_FALSE;
+			cfield++;
+		}
+		(void) printf("\n");
+	}
+
+	for (node = uu_avl_first(avl); node; node = uu_avl_next(avl, node)) {
+		print_us_node(scripted, parsable, fields, types, width, node);
+		if (rmnode)
+			nvlist_free(node->usn_nvl);
+	}
+}
+
+static int
+zfs_do_userspace(int argc, char **argv)
+{
+	zfs_handle_t *zhp;
+	zfs_userquota_prop_t p;
+
+	uu_avl_pool_t *avl_pool;
+	uu_avl_t *avl_tree;
+	uu_avl_walk_t *walk;
+	char *delim;
+	char deffields[] = "type,name,used,quota";
+	char *ofield = NULL;
+	char *tfield = NULL;
+	int cfield = 0;
+	int fields[256];
+	int i;
+	boolean_t scripted = B_FALSE;
+	boolean_t prtnum = B_FALSE;
+	boolean_t parsable = B_FALSE;
+	boolean_t sid2posix = B_FALSE;
+	int ret = 0;
+	int c;
+	zfs_sort_column_t *sortcol = NULL;
+	int types = USTYPE_PSX_USR | USTYPE_SMB_USR;
+	us_cbdata_t cb;
+	us_node_t *node;
+	us_node_t *rmnode;
+	uu_list_pool_t *listpool;
+	uu_list_t *list;
+	uu_avl_index_t idx = 0;
+	uu_list_index_t idx2 = 0;
+
+	if (argc < 2)
+		usage(B_FALSE);
+
+	if (strcmp(argv[0], "groupspace") == 0)
+		/* Toggle default group types */
+		types = USTYPE_PSX_GRP | USTYPE_SMB_GRP;
+
+	while ((c = getopt(argc, argv, "nHpo:s:S:t:i")) != -1) {
+		switch (c) {
+		case 'n':
+			prtnum = B_TRUE;
+			break;
+		case 'H':
+			scripted = B_TRUE;
+			break;
+		case 'p':
+			parsable = B_TRUE;
+			break;
+		case 'o':
+			ofield = optarg;
+			break;
+		case 's':
+		case 'S':
+			if (zfs_add_sort_column(&sortcol, optarg,
+			    c == 's' ? B_FALSE : B_TRUE) != 0) {
+				(void) fprintf(stderr,
+				    gettext("invalid field '%s'\n"), optarg);
+				usage(B_FALSE);
+			}
+			break;
+		case 't':
+			tfield = optarg;
+			break;
+		case 'i':
+			sid2posix = B_TRUE;
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing argument for "
+			    "'%c' option\n"), optopt);
+			usage(B_FALSE);
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing dataset name\n"));
+		usage(B_FALSE);
+	}
+	if (argc > 1) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		usage(B_FALSE);
+	}
+
+	/* Use default output fields if not specified using -o */
+	if (ofield == NULL)
+		ofield = deffields;
+	do {
+		if ((delim = strchr(ofield, ',')) != NULL)
+			*delim = '\0';
+		if ((fields[cfield++] = us_field_index(ofield)) == -1) {
+			(void) fprintf(stderr, gettext("invalid type '%s' "
+			    "for -o option\n"), ofield);
+			return (-1);
+		}
+		if (delim != NULL)
+			ofield = delim + 1;
+	} while (delim != NULL);
+	fields[cfield] = USFIELD_LAST;
+
+	/* Override output types (-t option) */
+	if (tfield != NULL) {
+		types = 0;
+
+		do {
+			boolean_t found = B_FALSE;
+
+			if ((delim = strchr(tfield, ',')) != NULL)
+				*delim = '\0';
+			for (i = 0; i < sizeof (us_type_bits) / sizeof (int);
+			    i++) {
+				if (strcmp(tfield, us_type_names[i]) == 0) {
+					found = B_TRUE;
+					types |= us_type_bits[i];
+					break;
+				}
+			}
+			if (!found) {
+				(void) fprintf(stderr, gettext("invalid type "
+				    "'%s' for -t option\n"), tfield);
+				return (-1);
+			}
+			if (delim != NULL)
+				tfield = delim + 1;
+		} while (delim != NULL);
+	}
+
+	if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET)) == NULL)
+		return (1);
+
+	if ((avl_pool = uu_avl_pool_create("us_avl_pool", sizeof (us_node_t),
+	    offsetof(us_node_t, usn_avlnode), us_compare, UU_DEFAULT)) == NULL)
+		nomem();
+	if ((avl_tree = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL)
+		nomem();
+
+	/* Always add default sorting columns */
+	(void) zfs_add_sort_column(&sortcol, "type", B_FALSE);
+	(void) zfs_add_sort_column(&sortcol, "name", B_FALSE);
+
+	cb.cb_sortcol = sortcol;
+	cb.cb_numname = prtnum;
+	cb.cb_nicenum = !parsable;
+	cb.cb_avl_pool = avl_pool;
+	cb.cb_avl = avl_tree;
+	cb.cb_sid2posix = sid2posix;
+
+	for (i = 0; i < USFIELD_LAST; i++)
+		cb.cb_width[i] = strlen(gettext(us_field_hdr[i]));
+
+	for (p = 0; p < ZFS_NUM_USERQUOTA_PROPS; p++) {
+		if (((p == ZFS_PROP_USERUSED || p == ZFS_PROP_USERQUOTA) &&
+		    !(types & (USTYPE_PSX_USR | USTYPE_SMB_USR))) ||
+		    ((p == ZFS_PROP_GROUPUSED || p == ZFS_PROP_GROUPQUOTA) &&
+		    !(types & (USTYPE_PSX_GRP | USTYPE_SMB_GRP))))
+			continue;
+		cb.cb_prop = p;
+		if ((ret = zfs_userspace(zhp, p, userspace_cb, &cb)) != 0)
+			return (ret);
+	}
+
+	/* Sort the list */
+	if ((node = uu_avl_first(avl_tree)) == NULL)
+		return (0);
+
+	us_populated = B_TRUE;
+
+	listpool = uu_list_pool_create("tmplist", sizeof (us_node_t),
+	    offsetof(us_node_t, usn_listnode), NULL, UU_DEFAULT);
+	list = uu_list_create(listpool, NULL, UU_DEFAULT);
+	uu_list_node_init(node, &node->usn_listnode, listpool);
+
+	while (node != NULL) {
+		rmnode = node;
+		node = uu_avl_next(avl_tree, node);
+		uu_avl_remove(avl_tree, rmnode);
+		if (uu_list_find(list, rmnode, NULL, &idx2) == NULL)
+			uu_list_insert(list, rmnode, idx2);
+	}
+
+	for (node = uu_list_first(list); node != NULL;
+	    node = uu_list_next(list, node)) {
+		us_sort_info_t sortinfo = { sortcol, cb.cb_numname };
+
+		if (uu_avl_find(avl_tree, node, &sortinfo, &idx) == NULL)
+			uu_avl_insert(avl_tree, node, idx);
+	}
+
+	uu_list_destroy(list);
+	uu_list_pool_destroy(listpool);
+
+	/* Print and free node nvlist memory */
+	print_us(scripted, parsable, fields, types, cb.cb_width, B_TRUE,
+	    cb.cb_avl);
+
+	zfs_free_sort_columns(sortcol);
+
+	/* Clean up the AVL tree */
+	if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL)
+		nomem();
+
+	while ((node = uu_avl_walk_next(walk)) != NULL) {
+		uu_avl_remove(cb.cb_avl, node);
+		free(node);
+	}
+
+	uu_avl_walk_end(walk);
+	uu_avl_destroy(avl_tree);
+	uu_avl_pool_destroy(avl_pool);
+
+	return (ret);
+}
+
+/*
+ * list [-r][-d max] [-H] [-o property[,property]...] [-t type[,type]...]
+ *      [-s property [-s property]...] [-S property [-S property]...]
+ *      <dataset> ...
+ *
+ *	-r	Recurse over all children
+ *	-d	Limit recursion by depth.
+ *	-H	Scripted mode; elide headers and separate columns by tabs
+ *	-o	Control which fields to display.
+ *	-t	Control which object types to display.
+ *	-s	Specify sort columns, descending order.
+ *	-S	Specify sort columns, ascending order.
+ *
+ * When given no arguments, lists all filesystems in the system.
+ * Otherwise, list the specified datasets, optionally recursing down them if
+ * '-r' is specified.
+ */
+typedef struct list_cbdata {
+	boolean_t	cb_first;
+	boolean_t	cb_scripted;
+	zprop_list_t	*cb_proplist;
+} list_cbdata_t;
+
+/*
+ * Given a list of columns to display, output appropriate headers for each one.
+ */
+static void
+print_header(zprop_list_t *pl)
+{
+	char headerbuf[ZFS_MAXPROPLEN];
+	const char *header;
+	int i;
+	boolean_t first = B_TRUE;
+	boolean_t right_justify;
+
+	for (; pl != NULL; pl = pl->pl_next) {
+		if (!first) {
+			(void) printf("  ");
+		} else {
+			first = B_FALSE;
+		}
+
+		right_justify = B_FALSE;
+		if (pl->pl_prop != ZPROP_INVAL) {
+			header = zfs_prop_column_name(pl->pl_prop);
+			right_justify = zfs_prop_align_right(pl->pl_prop);
+		} else {
+			for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
+				headerbuf[i] = toupper(pl->pl_user_prop[i]);
+			headerbuf[i] = '\0';
+			header = headerbuf;
+		}
+
+		if (pl->pl_next == NULL && !right_justify)
+			(void) printf("%s", header);
+		else if (right_justify)
+			(void) printf("%*s", pl->pl_width, header);
+		else
+			(void) printf("%-*s", pl->pl_width, header);
+	}
+
+	(void) printf("\n");
+}
+
+/*
+ * Given a dataset and a list of fields, print out all the properties according
+ * to the described layout.
+ */
+static void
+print_dataset(zfs_handle_t *zhp, zprop_list_t *pl, boolean_t scripted)
+{
+	boolean_t first = B_TRUE;
+	char property[ZFS_MAXPROPLEN];
+	nvlist_t *userprops = zfs_get_user_props(zhp);
+	nvlist_t *propval;
+	char *propstr;
+	boolean_t right_justify;
+	int width;
+
+	for (; pl != NULL; pl = pl->pl_next) {
+		if (!first) {
+			if (scripted)
+				(void) printf("\t");
+			else
+				(void) printf("  ");
+		} else {
+			first = B_FALSE;
+		}
+
+		if (pl->pl_prop == ZFS_PROP_NAME) {
+			(void) strlcpy(property, zfs_get_name(zhp),
+			    sizeof(property));
+			propstr = property;
+			right_justify = zfs_prop_align_right(pl->pl_prop);
+		} else if (pl->pl_prop != ZPROP_INVAL) {
+			if (zfs_prop_get(zhp, pl->pl_prop, property,
+			    sizeof (property), NULL, NULL, 0, B_FALSE) != 0)
+				propstr = "-";
+			else
+				propstr = property;
+
+			right_justify = zfs_prop_align_right(pl->pl_prop);
+		} else if (zfs_prop_userquota(pl->pl_user_prop)) {
+			if (zfs_prop_get_userquota(zhp, pl->pl_user_prop,
+			    property, sizeof (property), B_FALSE) != 0)
+				propstr = "-";
+			else
+				propstr = property;
+			right_justify = B_TRUE;
+		} else if (zfs_prop_written(pl->pl_user_prop)) {
+			if (zfs_prop_get_written(zhp, pl->pl_user_prop,
+			    property, sizeof (property), B_FALSE) != 0)
+				propstr = "-";
+			else
+				propstr = property;
+			right_justify = B_TRUE;
+		} else {
+			if (nvlist_lookup_nvlist(userprops,
+			    pl->pl_user_prop, &propval) != 0)
+				propstr = "-";
+			else
+				verify(nvlist_lookup_string(propval,
+				    ZPROP_VALUE, &propstr) == 0);
+			right_justify = B_FALSE;
+		}
+
+		width = pl->pl_width;
+
+		/*
+		 * If this is being called in scripted mode, or if this is the
+		 * last column and it is left-justified, don't include a width
+		 * format specifier.
+		 */
+		if (scripted || (pl->pl_next == NULL && !right_justify))
+			(void) printf("%s", propstr);
+		else if (right_justify)
+			(void) printf("%*s", width, propstr);
+		else
+			(void) printf("%-*s", width, propstr);
+	}
+
+	(void) printf("\n");
+}
+
+/*
+ * Generic callback function to list a dataset or snapshot.
+ */
+static int
+list_callback(zfs_handle_t *zhp, void *data)
+{
+	list_cbdata_t *cbp = data;
+
+	if (cbp->cb_first) {
+		if (!cbp->cb_scripted)
+			print_header(cbp->cb_proplist);
+		cbp->cb_first = B_FALSE;
+	}
+
+	print_dataset(zhp, cbp->cb_proplist, cbp->cb_scripted);
+
+	return (0);
+}
+
+static int
+zfs_do_list(int argc, char **argv)
+{
+	int c;
+	boolean_t scripted = B_FALSE;
+	static char default_fields[] =
+	    "name,used,available,referenced,mountpoint";
+	int types = ZFS_TYPE_DATASET;
+	boolean_t types_specified = B_FALSE;
+	char *fields = NULL;
+	list_cbdata_t cb = { 0 };
+	char *value;
+	int limit = 0;
+	int ret = 0;
+	zfs_sort_column_t *sortcol = NULL;
+	int flags = ZFS_ITER_PROP_LISTSNAPS | ZFS_ITER_ARGS_CAN_BE_PATHS;
+
+	/* check options */
+	while ((c = getopt(argc, argv, ":d:o:rt:Hs:S:")) != -1) {
+		switch (c) {
+		case 'o':
+			fields = optarg;
+			break;
+		case 'd':
+			limit = parse_depth(optarg, &flags);
+			break;
+		case 'r':
+			flags |= ZFS_ITER_RECURSE;
+			break;
+		case 'H':
+			scripted = B_TRUE;
+			break;
+		case 's':
+			if (zfs_add_sort_column(&sortcol, optarg,
+			    B_FALSE) != 0) {
+				(void) fprintf(stderr,
+				    gettext("invalid property '%s'\n"), optarg);
+				usage(B_FALSE);
+			}
+			break;
+		case 'S':
+			if (zfs_add_sort_column(&sortcol, optarg,
+			    B_TRUE) != 0) {
+				(void) fprintf(stderr,
+				    gettext("invalid property '%s'\n"), optarg);
+				usage(B_FALSE);
+			}
+			break;
+		case 't':
+			types = 0;
+			types_specified = B_TRUE;
+			flags &= ~ZFS_ITER_PROP_LISTSNAPS;
+			while (*optarg != '\0') {
+				static char *type_subopts[] = { "filesystem",
+				    "volume", "snapshot", "all", NULL };
+
+				switch (getsubopt(&optarg, type_subopts,
+				    &value)) {
+				case 0:
+					types |= ZFS_TYPE_FILESYSTEM;
+					break;
+				case 1:
+					types |= ZFS_TYPE_VOLUME;
+					break;
+				case 2:
+					types |= ZFS_TYPE_SNAPSHOT;
+					break;
+				case 3:
+					types = ZFS_TYPE_DATASET;
+					break;
+
+				default:
+					(void) fprintf(stderr,
+					    gettext("invalid type '%s'\n"),
+					    value);
+					usage(B_FALSE);
+				}
+			}
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing argument for "
+			    "'%c' option\n"), optopt);
+			usage(B_FALSE);
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	if (fields == NULL)
+		fields = default_fields;
+
+	/*
+	 * If we are only going to list snapshot names and sort by name,
+	 * then we can use faster version.
+	 */
+	if (strcmp(fields, "name") == 0 && zfs_sort_only_by_name(sortcol))
+		flags |= ZFS_ITER_SIMPLE;
+
+	/*
+	 * If "-o space" and no types were specified, don't display snapshots.
+	 */
+	if (strcmp(fields, "space") == 0 && types_specified == B_FALSE)
+		types &= ~ZFS_TYPE_SNAPSHOT;
+
+	/*
+	 * If the user specifies '-o all', the zprop_get_list() doesn't
+	 * normally include the name of the dataset.  For 'zfs list', we always
+	 * want this property to be first.
+	 */
+	if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET)
+	    != 0)
+		usage(B_FALSE);
+
+	cb.cb_scripted = scripted;
+	cb.cb_first = B_TRUE;
+
+	ret = zfs_for_each(argc, argv, flags, types, sortcol, &cb.cb_proplist,
+	    limit, list_callback, &cb);
+
+	zprop_free_list(cb.cb_proplist);
+	zfs_free_sort_columns(sortcol);
+
+	if (ret == 0 && cb.cb_first && !cb.cb_scripted)
+		(void) printf(gettext("no datasets available\n"));
+
+	return (ret);
+}
+
+/*
+ * zfs rename [-f] <fs | snap | vol> <fs | snap | vol>
+ * zfs rename [-f] -p <fs | vol> <fs | vol>
+ * zfs rename -r <snap> <snap>
+ * zfs rename -u [-p] <fs> <fs>
+ *
+ * Renames the given dataset to another of the same type.
+ *
+ * The '-p' flag creates all the non-existing ancestors of the target first.
+ */
+/* ARGSUSED */
+static int
+zfs_do_rename(int argc, char **argv)
+{
+	zfs_handle_t *zhp;
+	renameflags_t flags = { 0 };
+	int c;
+	int ret = 0;
+	int types;
+	boolean_t parents = B_FALSE;
+	char *snapshot = NULL;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "fpru")) != -1) {
+		switch (c) {
+		case 'p':
+			parents = B_TRUE;
+			break;
+		case 'r':
+			flags.recurse = B_TRUE;
+			break;
+		case 'u':
+			flags.nounmount = B_TRUE;
+			break;
+		case 'f':
+			flags.forceunmount = B_TRUE;
+			break;
+		case '?':
+		default:
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing source dataset "
+		    "argument\n"));
+		usage(B_FALSE);
+	}
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("missing target dataset "
+		    "argument\n"));
+		usage(B_FALSE);
+	}
+	if (argc > 2) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		usage(B_FALSE);
+	}
+
+	if (flags.recurse && parents) {
+		(void) fprintf(stderr, gettext("-p and -r options are mutually "
+		    "exclusive\n"));
+		usage(B_FALSE);
+	}
+
+	if (flags.recurse && strchr(argv[0], '@') == 0) {
+		(void) fprintf(stderr, gettext("source dataset for recursive "
+		    "rename must be a snapshot\n"));
+		usage(B_FALSE);
+	}
+
+	if (flags.nounmount && parents) {
+		(void) fprintf(stderr, gettext("-u and -p options are mutually "
+		    "exclusive\n"));
+		usage(B_FALSE);
+	}
+
+	if (flags.nounmount)
+		types = ZFS_TYPE_FILESYSTEM;
+	else if (parents)
+		types = ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME;
+	else
+		types = ZFS_TYPE_DATASET;
+
+	if (flags.recurse) {
+		/*
+		 * When we do recursive rename we are fine when the given
+		 * snapshot for the given dataset doesn't exist - it can
+		 * still exists below.
+		 */
+
+		snapshot = strchr(argv[0], '@');
+		assert(snapshot != NULL);
+		*snapshot = '\0';
+		snapshot++;
+	}
+
+	if ((zhp = zfs_open(g_zfs, argv[0], types)) == NULL)
+		return (1);
+
+	/* If we were asked and the name looks good, try to create ancestors. */
+	if (parents && zfs_name_valid(argv[1], zfs_get_type(zhp)) &&
+	    zfs_create_ancestors(g_zfs, argv[1]) != 0) {
+		zfs_close(zhp);
+		return (1);
+	}
+
+	ret = (zfs_rename(zhp, snapshot, argv[1], flags) != 0);
+
+	zfs_close(zhp);
+	return (ret);
+}
+
+/*
+ * zfs promote <fs>
+ *
+ * Promotes the given clone fs to be the parent
+ */
+/* ARGSUSED */
+static int
+zfs_do_promote(int argc, char **argv)
+{
+	zfs_handle_t *zhp;
+	int ret = 0;
+
+	/* check options */
+	if (argc > 1 && argv[1][0] == '-') {
+		(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+		    argv[1][1]);
+		usage(B_FALSE);
+	}
+
+	/* check number of arguments */
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("missing clone filesystem"
+		    " argument\n"));
+		usage(B_FALSE);
+	}
+	if (argc > 2) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		usage(B_FALSE);
+	}
+
+	zhp = zfs_open(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+	if (zhp == NULL)
+		return (1);
+
+	ret = (zfs_promote(zhp) != 0);
+
+
+	zfs_close(zhp);
+	return (ret);
+}
+
+/*
+ * zfs rollback [-rRf] <snapshot>
+ *
+ *	-r	Delete any intervening snapshots before doing rollback
+ *	-R	Delete any snapshots and their clones
+ *	-f	ignored for backwards compatability
+ *
+ * Given a filesystem, rollback to a specific snapshot, discarding any changes
+ * since then and making it the active dataset.  If more recent snapshots exist,
+ * the command will complain unless the '-r' flag is given.
+ */
+typedef struct rollback_cbdata {
+	uint64_t	cb_create;
+	boolean_t	cb_first;
+	int		cb_doclones;
+	char		*cb_target;
+	int		cb_error;
+	boolean_t	cb_recurse;
+	boolean_t	cb_dependent;
+} rollback_cbdata_t;
+
+/*
+ * Report any snapshots more recent than the one specified.  Used when '-r' is
+ * not specified.  We reuse this same callback for the snapshot dependents - if
+ * 'cb_dependent' is set, then this is a dependent and we should report it
+ * without checking the transaction group.
+ */
+static int
+rollback_check(zfs_handle_t *zhp, void *data)
+{
+	rollback_cbdata_t *cbp = data;
+
+	if (cbp->cb_doclones) {
+		zfs_close(zhp);
+		return (0);
+	}
+
+	if (!cbp->cb_dependent) {
+		if (strcmp(zfs_get_name(zhp), cbp->cb_target) != 0 &&
+		    zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT &&
+		    zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) >
+		    cbp->cb_create) {
+
+			if (cbp->cb_first && !cbp->cb_recurse) {
+				(void) fprintf(stderr, gettext("cannot "
+				    "rollback to '%s': more recent snapshots "
+				    "exist\n"),
+				    cbp->cb_target);
+				(void) fprintf(stderr, gettext("use '-r' to "
+				    "force deletion of the following "
+				    "snapshots:\n"));
+				cbp->cb_first = 0;
+				cbp->cb_error = 1;
+			}
+
+			if (cbp->cb_recurse) {
+				cbp->cb_dependent = B_TRUE;
+				if (zfs_iter_dependents(zhp, B_TRUE,
+				    rollback_check, cbp) != 0) {
+					zfs_close(zhp);
+					return (-1);
+				}
+				cbp->cb_dependent = B_FALSE;
+			} else {
+				(void) fprintf(stderr, "%s\n",
+				    zfs_get_name(zhp));
+			}
+		}
+	} else {
+		if (cbp->cb_first && cbp->cb_recurse) {
+			(void) fprintf(stderr, gettext("cannot rollback to "
+			    "'%s': clones of previous snapshots exist\n"),
+			    cbp->cb_target);
+			(void) fprintf(stderr, gettext("use '-R' to "
+			    "force deletion of the following clones and "
+			    "dependents:\n"));
+			cbp->cb_first = 0;
+			cbp->cb_error = 1;
+		}
+
+		(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
+	}
+
+	zfs_close(zhp);
+	return (0);
+}
+
+static int
+zfs_do_rollback(int argc, char **argv)
+{
+	int ret = 0;
+	int c;
+	boolean_t force = B_FALSE;
+	rollback_cbdata_t cb = { 0 };
+	zfs_handle_t *zhp, *snap;
+	char parentname[ZFS_MAXNAMELEN];
+	char *delim;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "rRf")) != -1) {
+		switch (c) {
+		case 'r':
+			cb.cb_recurse = 1;
+			break;
+		case 'R':
+			cb.cb_recurse = 1;
+			cb.cb_doclones = 1;
+			break;
+		case 'f':
+			force = B_TRUE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing dataset argument\n"));
+		usage(B_FALSE);
+	}
+	if (argc > 1) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		usage(B_FALSE);
+	}
+
+	/* open the snapshot */
+	if ((snap = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL)
+		return (1);
+
+	/* open the parent dataset */
+	(void) strlcpy(parentname, argv[0], sizeof (parentname));
+	verify((delim = strrchr(parentname, '@')) != NULL);
+	*delim = '\0';
+	if ((zhp = zfs_open(g_zfs, parentname, ZFS_TYPE_DATASET)) == NULL) {
+		zfs_close(snap);
+		return (1);
+	}
+
+	/*
+	 * Check for more recent snapshots and/or clones based on the presence
+	 * of '-r' and '-R'.
+	 */
+	cb.cb_target = argv[0];
+	cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
+	cb.cb_first = B_TRUE;
+	cb.cb_error = 0;
+	if ((ret = zfs_iter_children(zhp, rollback_check, &cb)) != 0)
+		goto out;
+
+	if ((ret = cb.cb_error) != 0)
+		goto out;
+
+	/*
+	 * Rollback parent to the given snapshot.
+	 */
+	ret = zfs_rollback(zhp, snap, force);
+
+out:
+	zfs_close(snap);
+	zfs_close(zhp);
+
+	if (ret == 0)
+		return (0);
+	else
+		return (1);
+}
+
+/*
+ * zfs set property=value { fs | snap | vol } ...
+ *
+ * Sets the given property for all datasets specified on the command line.
+ */
+typedef struct set_cbdata {
+	char		*cb_propname;
+	char		*cb_value;
+} set_cbdata_t;
+
+static int
+set_callback(zfs_handle_t *zhp, void *data)
+{
+	set_cbdata_t *cbp = data;
+
+	if (zfs_prop_set(zhp, cbp->cb_propname, cbp->cb_value) != 0) {
+		switch (libzfs_errno(g_zfs)) {
+		case EZFS_MOUNTFAILED:
+			(void) fprintf(stderr, gettext("property may be set "
+			    "but unable to remount filesystem\n"));
+			break;
+		case EZFS_SHARENFSFAILED:
+			(void) fprintf(stderr, gettext("property may be set "
+			    "but unable to reshare filesystem\n"));
+			break;
+		}
+		return (1);
+	}
+	return (0);
+}
+
+static int
+zfs_do_set(int argc, char **argv)
+{
+	set_cbdata_t cb;
+	int ret = 0;
+
+	/* check for options */
+	if (argc > 1 && argv[1][0] == '-') {
+		(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+		    argv[1][1]);
+		usage(B_FALSE);
+	}
+
+	/* check number of arguments */
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("missing property=value "
+		    "argument\n"));
+		usage(B_FALSE);
+	}
+	if (argc < 3) {
+		(void) fprintf(stderr, gettext("missing dataset name\n"));
+		usage(B_FALSE);
+	}
+
+	/* validate property=value argument */
+	cb.cb_propname = argv[1];
+	if (((cb.cb_value = strchr(cb.cb_propname, '=')) == NULL) ||
+	    (cb.cb_value[1] == '\0')) {
+		(void) fprintf(stderr, gettext("missing value in "
+		    "property=value argument\n"));
+		usage(B_FALSE);
+	}
+
+	*cb.cb_value = '\0';
+	cb.cb_value++;
+
+	if (*cb.cb_propname == '\0') {
+		(void) fprintf(stderr,
+		    gettext("missing property in property=value argument\n"));
+		usage(B_FALSE);
+	}
+
+	ret = zfs_for_each(argc - 2, argv + 2, 0,
+	    ZFS_TYPE_DATASET, NULL, NULL, 0, set_callback, &cb);
+
+	return (ret);
+}
+
+/*
+ * zfs snapshot [-r] [-o prop=value] ... <fs@snap>
+ *
+ * Creates a snapshot with the given name.  While functionally equivalent to
+ * 'zfs create', it is a separate command to differentiate intent.
+ */
+static int
+zfs_do_snapshot(int argc, char **argv)
+{
+	boolean_t recursive = B_FALSE;
+	int ret = 0;
+	char c;
+	nvlist_t *props;
+
+	if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
+		nomem();
+
+	/* check options */
+	while ((c = getopt(argc, argv, "ro:")) != -1) {
+		switch (c) {
+		case 'o':
+			if (parseprop(props))
+				return (1);
+			break;
+		case 'r':
+			recursive = B_TRUE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			goto usage;
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing snapshot argument\n"));
+		goto usage;
+	}
+	if (argc > 1) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		goto usage;
+	}
+
+	ret = zfs_snapshot(g_zfs, argv[0], recursive, props);
+	nvlist_free(props);
+	if (ret && recursive)
+		(void) fprintf(stderr, gettext("no snapshots were created\n"));
+	return (ret != 0);
+
+usage:
+	nvlist_free(props);
+	usage(B_FALSE);
+	return (-1);
+}
+
+/*
+ * Send a backup stream to stdout.
+ */
+static int
+zfs_do_send(int argc, char **argv)
+{
+	char *fromname = NULL;
+	char *toname = NULL;
+	char *cp;
+	zfs_handle_t *zhp;
+	sendflags_t flags = { 0 };
+	int c, err;
+	nvlist_t *dbgnv = NULL;
+	boolean_t extraverbose = B_FALSE;
+
+	/* check options */
+	while ((c = getopt(argc, argv, ":i:I:RDpvnP")) != -1) {
+		switch (c) {
+		case 'i':
+			if (fromname)
+				usage(B_FALSE);
+			fromname = optarg;
+			break;
+		case 'I':
+			if (fromname)
+				usage(B_FALSE);
+			fromname = optarg;
+			flags.doall = B_TRUE;
+			break;
+		case 'R':
+			flags.replicate = B_TRUE;
+			break;
+		case 'p':
+			flags.props = B_TRUE;
+			break;
+		case 'P':
+			flags.parsable = B_TRUE;
+			flags.verbose = B_TRUE;
+			break;
+		case 'v':
+			if (flags.verbose)
+				extraverbose = B_TRUE;
+			flags.verbose = B_TRUE;
+			flags.progress = B_TRUE;
+			break;
+		case 'D':
+			flags.dedup = B_TRUE;
+			break;
+		case 'n':
+			flags.dryrun = B_TRUE;
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing argument for "
+			    "'%c' option\n"), optopt);
+			usage(B_FALSE);
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing snapshot argument\n"));
+		usage(B_FALSE);
+	}
+	if (argc > 1) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		usage(B_FALSE);
+	}
+
+	if (!flags.dryrun && isatty(STDOUT_FILENO)) {
+		(void) fprintf(stderr,
+		    gettext("Error: Stream can not be written to a terminal.\n"
+		    "You must redirect standard output.\n"));
+		return (1);
+	}
+
+	cp = strchr(argv[0], '@');
+	if (cp == NULL) {
+		(void) fprintf(stderr,
+		    gettext("argument must be a snapshot\n"));
+		usage(B_FALSE);
+	}
+	*cp = '\0';
+	toname = cp + 1;
+	zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+	if (zhp == NULL)
+		return (1);
+
+	/*
+	 * If they specified the full path to the snapshot, chop off
+	 * everything except the short name of the snapshot, but special
+	 * case if they specify the origin.
+	 */
+	if (fromname && (cp = strchr(fromname, '@')) != NULL) {
+		char origin[ZFS_MAXNAMELEN];
+		zprop_source_t src;
+
+		(void) zfs_prop_get(zhp, ZFS_PROP_ORIGIN,
+		    origin, sizeof (origin), &src, NULL, 0, B_FALSE);
+
+		if (strcmp(origin, fromname) == 0) {
+			fromname = NULL;
+			flags.fromorigin = B_TRUE;
+		} else {
+			*cp = '\0';
+			if (cp != fromname && strcmp(argv[0], fromname)) {
+				(void) fprintf(stderr,
+				    gettext("incremental source must be "
+				    "in same filesystem\n"));
+				usage(B_FALSE);
+			}
+			fromname = cp + 1;
+			if (strchr(fromname, '@') || strchr(fromname, '/')) {
+				(void) fprintf(stderr,
+				    gettext("invalid incremental source\n"));
+				usage(B_FALSE);
+			}
+		}
+	}
+
+	if (flags.replicate && fromname == NULL)
+		flags.doall = B_TRUE;
+
+	err = zfs_send(zhp, fromname, toname, &flags, STDOUT_FILENO, NULL, 0,
+	    extraverbose ? &dbgnv : NULL);
+
+	if (extraverbose && dbgnv != NULL) {
+		/*
+		 * dump_nvlist prints to stdout, but that's been
+		 * redirected to a file.  Make it print to stderr
+		 * instead.
+		 */
+		(void) dup2(STDERR_FILENO, STDOUT_FILENO);
+		dump_nvlist(dbgnv, 0);
+		nvlist_free(dbgnv);
+	}
+	zfs_close(zhp);
+
+	return (err != 0);
+}
+
+/*
+ * zfs receive [-vnFu] [-d | -e] <fs@snap>
+ *
+ * Restore a backup stream from stdin.
+ */
+static int
+zfs_do_receive(int argc, char **argv)
+{
+	int c, err;
+	recvflags_t flags = { 0 };
+
+	/* check options */
+	while ((c = getopt(argc, argv, ":denuvF")) != -1) {
+		switch (c) {
+		case 'd':
+			flags.isprefix = B_TRUE;
+			break;
+		case 'e':
+			flags.isprefix = B_TRUE;
+			flags.istail = B_TRUE;
+			break;
+		case 'n':
+			flags.dryrun = B_TRUE;
+			break;
+		case 'u':
+			flags.nomount = B_TRUE;
+			break;
+		case 'v':
+			flags.verbose = B_TRUE;
+			break;
+		case 'F':
+			flags.force = B_TRUE;
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing argument for "
+			    "'%c' option\n"), optopt);
+			usage(B_FALSE);
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing snapshot argument\n"));
+		usage(B_FALSE);
+	}
+	if (argc > 1) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		usage(B_FALSE);
+	}
+
+	if (isatty(STDIN_FILENO)) {
+		(void) fprintf(stderr,
+		    gettext("Error: Backup stream can not be read "
+		    "from a terminal.\n"
+		    "You must redirect standard input.\n"));
+		return (1);
+	}
+
+	err = zfs_receive(g_zfs, argv[0], &flags, STDIN_FILENO, NULL);
+
+	return (err != 0);
+}
+
+/*
+ * allow/unallow stuff
+ */
+/* copied from zfs/sys/dsl_deleg.h */
+#define	ZFS_DELEG_PERM_CREATE		"create"
+#define	ZFS_DELEG_PERM_DESTROY		"destroy"
+#define	ZFS_DELEG_PERM_SNAPSHOT		"snapshot"
+#define	ZFS_DELEG_PERM_ROLLBACK		"rollback"
+#define	ZFS_DELEG_PERM_CLONE		"clone"
+#define	ZFS_DELEG_PERM_PROMOTE		"promote"
+#define	ZFS_DELEG_PERM_RENAME		"rename"
+#define	ZFS_DELEG_PERM_MOUNT		"mount"
+#define	ZFS_DELEG_PERM_SHARE		"share"
+#define	ZFS_DELEG_PERM_SEND		"send"
+#define	ZFS_DELEG_PERM_RECEIVE		"receive"
+#define	ZFS_DELEG_PERM_ALLOW		"allow"
+#define	ZFS_DELEG_PERM_USERPROP		"userprop"
+#define	ZFS_DELEG_PERM_VSCAN		"vscan" /* ??? */
+#define	ZFS_DELEG_PERM_USERQUOTA	"userquota"
+#define	ZFS_DELEG_PERM_GROUPQUOTA	"groupquota"
+#define	ZFS_DELEG_PERM_USERUSED		"userused"
+#define	ZFS_DELEG_PERM_GROUPUSED	"groupused"
+#define	ZFS_DELEG_PERM_HOLD		"hold"
+#define	ZFS_DELEG_PERM_RELEASE		"release"
+#define	ZFS_DELEG_PERM_DIFF		"diff"
+
+#define	ZFS_NUM_DELEG_NOTES ZFS_DELEG_NOTE_NONE
+
+static zfs_deleg_perm_tab_t zfs_deleg_perm_tbl[] = {
+	{ ZFS_DELEG_PERM_ALLOW, ZFS_DELEG_NOTE_ALLOW },
+	{ ZFS_DELEG_PERM_CLONE, ZFS_DELEG_NOTE_CLONE },
+	{ ZFS_DELEG_PERM_CREATE, ZFS_DELEG_NOTE_CREATE },
+	{ ZFS_DELEG_PERM_DESTROY, ZFS_DELEG_NOTE_DESTROY },
+	{ ZFS_DELEG_PERM_DIFF, ZFS_DELEG_NOTE_DIFF},
+	{ ZFS_DELEG_PERM_HOLD, ZFS_DELEG_NOTE_HOLD },
+	{ ZFS_DELEG_PERM_MOUNT, ZFS_DELEG_NOTE_MOUNT },
+	{ ZFS_DELEG_PERM_PROMOTE, ZFS_DELEG_NOTE_PROMOTE },
+	{ ZFS_DELEG_PERM_RECEIVE, ZFS_DELEG_NOTE_RECEIVE },
+	{ ZFS_DELEG_PERM_RELEASE, ZFS_DELEG_NOTE_RELEASE },
+	{ ZFS_DELEG_PERM_RENAME, ZFS_DELEG_NOTE_RENAME },
+	{ ZFS_DELEG_PERM_ROLLBACK, ZFS_DELEG_NOTE_ROLLBACK },
+	{ ZFS_DELEG_PERM_SEND, ZFS_DELEG_NOTE_SEND },
+	{ ZFS_DELEG_PERM_SHARE, ZFS_DELEG_NOTE_SHARE },
+	{ ZFS_DELEG_PERM_SNAPSHOT, ZFS_DELEG_NOTE_SNAPSHOT },
+
+	{ ZFS_DELEG_PERM_GROUPQUOTA, ZFS_DELEG_NOTE_GROUPQUOTA },
+	{ ZFS_DELEG_PERM_GROUPUSED, ZFS_DELEG_NOTE_GROUPUSED },
+	{ ZFS_DELEG_PERM_USERPROP, ZFS_DELEG_NOTE_USERPROP },
+	{ ZFS_DELEG_PERM_USERQUOTA, ZFS_DELEG_NOTE_USERQUOTA },
+	{ ZFS_DELEG_PERM_USERUSED, ZFS_DELEG_NOTE_USERUSED },
+	{ NULL, ZFS_DELEG_NOTE_NONE }
+};
+
+/* permission structure */
+typedef struct deleg_perm {
+	zfs_deleg_who_type_t	dp_who_type;
+	const char		*dp_name;
+	boolean_t		dp_local;
+	boolean_t		dp_descend;
+} deleg_perm_t;
+
+/* */
+typedef struct deleg_perm_node {
+	deleg_perm_t		dpn_perm;
+
+	uu_avl_node_t		dpn_avl_node;
+} deleg_perm_node_t;
+
+typedef struct fs_perm fs_perm_t;
+
+/* permissions set */
+typedef struct who_perm {
+	zfs_deleg_who_type_t	who_type;
+	const char		*who_name;		/* id */
+	char			who_ug_name[256];	/* user/group name */
+	fs_perm_t		*who_fsperm;		/* uplink */
+
+	uu_avl_t		*who_deleg_perm_avl;	/* permissions */
+} who_perm_t;
+
+/* */
+typedef struct who_perm_node {
+	who_perm_t	who_perm;
+	uu_avl_node_t	who_avl_node;
+} who_perm_node_t;
+
+typedef struct fs_perm_set fs_perm_set_t;
+/* fs permissions */
+struct fs_perm {
+	const char		*fsp_name;
+
+	uu_avl_t		*fsp_sc_avl;	/* sets,create */
+	uu_avl_t		*fsp_uge_avl;	/* user,group,everyone */
+
+	fs_perm_set_t		*fsp_set;	/* uplink */
+};
+
+/* */
+typedef struct fs_perm_node {
+	fs_perm_t	fspn_fsperm;
+	uu_avl_t	*fspn_avl;
+
+	uu_list_node_t	fspn_list_node;
+} fs_perm_node_t;
+
+/* top level structure */
+struct fs_perm_set {
+	uu_list_pool_t	*fsps_list_pool;
+	uu_list_t	*fsps_list; /* list of fs_perms */
+
+	uu_avl_pool_t	*fsps_named_set_avl_pool;
+	uu_avl_pool_t	*fsps_who_perm_avl_pool;
+	uu_avl_pool_t	*fsps_deleg_perm_avl_pool;
+};
+
+static inline const char *
+deleg_perm_type(zfs_deleg_note_t note)
+{
+	/* subcommands */
+	switch (note) {
+		/* SUBCOMMANDS */
+		/* OTHER */
+	case ZFS_DELEG_NOTE_GROUPQUOTA:
+	case ZFS_DELEG_NOTE_GROUPUSED:
+	case ZFS_DELEG_NOTE_USERPROP:
+	case ZFS_DELEG_NOTE_USERQUOTA:
+	case ZFS_DELEG_NOTE_USERUSED:
+		/* other */
+		return (gettext("other"));
+	default:
+		return (gettext("subcommand"));
+	}
+}
+
+static int inline
+who_type2weight(zfs_deleg_who_type_t who_type)
+{
+	int res;
+	switch (who_type) {
+		case ZFS_DELEG_NAMED_SET_SETS:
+		case ZFS_DELEG_NAMED_SET:
+			res = 0;
+			break;
+		case ZFS_DELEG_CREATE_SETS:
+		case ZFS_DELEG_CREATE:
+			res = 1;
+			break;
+		case ZFS_DELEG_USER_SETS:
+		case ZFS_DELEG_USER:
+			res = 2;
+			break;
+		case ZFS_DELEG_GROUP_SETS:
+		case ZFS_DELEG_GROUP:
+			res = 3;
+			break;
+		case ZFS_DELEG_EVERYONE_SETS:
+		case ZFS_DELEG_EVERYONE:
+			res = 4;
+			break;
+		default:
+			res = -1;
+	}
+
+	return (res);
+}
+
+/* ARGSUSED */
+static int
+who_perm_compare(const void *larg, const void *rarg, void *unused)
+{
+	const who_perm_node_t *l = larg;
+	const who_perm_node_t *r = rarg;
+	zfs_deleg_who_type_t ltype = l->who_perm.who_type;
+	zfs_deleg_who_type_t rtype = r->who_perm.who_type;
+	int lweight = who_type2weight(ltype);
+	int rweight = who_type2weight(rtype);
+	int res = lweight - rweight;
+	if (res == 0)
+		res = strncmp(l->who_perm.who_name, r->who_perm.who_name,
+		    ZFS_MAX_DELEG_NAME-1);
+
+	if (res == 0)
+		return (0);
+	if (res > 0)
+		return (1);
+	else
+		return (-1);
+}
+
+/* ARGSUSED */
+static int
+deleg_perm_compare(const void *larg, const void *rarg, void *unused)
+{
+	const deleg_perm_node_t *l = larg;
+	const deleg_perm_node_t *r = rarg;
+	int res =  strncmp(l->dpn_perm.dp_name, r->dpn_perm.dp_name,
+	    ZFS_MAX_DELEG_NAME-1);
+
+	if (res == 0)
+		return (0);
+
+	if (res > 0)
+		return (1);
+	else
+		return (-1);
+}
+
+static inline void
+fs_perm_set_init(fs_perm_set_t *fspset)
+{
+	bzero(fspset, sizeof (fs_perm_set_t));
+
+	if ((fspset->fsps_list_pool = uu_list_pool_create("fsps_list_pool",
+	    sizeof (fs_perm_node_t), offsetof(fs_perm_node_t, fspn_list_node),
+	    NULL, UU_DEFAULT)) == NULL)
+		nomem();
+	if ((fspset->fsps_list = uu_list_create(fspset->fsps_list_pool, NULL,
+	    UU_DEFAULT)) == NULL)
+		nomem();
+
+	if ((fspset->fsps_named_set_avl_pool = uu_avl_pool_create(
+	    "named_set_avl_pool", sizeof (who_perm_node_t), offsetof(
+	    who_perm_node_t, who_avl_node), who_perm_compare,
+	    UU_DEFAULT)) == NULL)
+		nomem();
+
+	if ((fspset->fsps_who_perm_avl_pool = uu_avl_pool_create(
+	    "who_perm_avl_pool", sizeof (who_perm_node_t), offsetof(
+	    who_perm_node_t, who_avl_node), who_perm_compare,
+	    UU_DEFAULT)) == NULL)
+		nomem();
+
+	if ((fspset->fsps_deleg_perm_avl_pool = uu_avl_pool_create(
+	    "deleg_perm_avl_pool", sizeof (deleg_perm_node_t), offsetof(
+	    deleg_perm_node_t, dpn_avl_node), deleg_perm_compare, UU_DEFAULT))
+	    == NULL)
+		nomem();
+}
+
+static inline void fs_perm_fini(fs_perm_t *);
+static inline void who_perm_fini(who_perm_t *);
+
+static inline void
+fs_perm_set_fini(fs_perm_set_t *fspset)
+{
+	fs_perm_node_t *node = uu_list_first(fspset->fsps_list);
+
+	while (node != NULL) {
+		fs_perm_node_t *next_node =
+		    uu_list_next(fspset->fsps_list, node);
+		fs_perm_t *fsperm = &node->fspn_fsperm;
+		fs_perm_fini(fsperm);
+		uu_list_remove(fspset->fsps_list, node);
+		free(node);
+		node = next_node;
+	}
+
+	uu_avl_pool_destroy(fspset->fsps_named_set_avl_pool);
+	uu_avl_pool_destroy(fspset->fsps_who_perm_avl_pool);
+	uu_avl_pool_destroy(fspset->fsps_deleg_perm_avl_pool);
+}
+
+static inline void
+deleg_perm_init(deleg_perm_t *deleg_perm, zfs_deleg_who_type_t type,
+    const char *name)
+{
+	deleg_perm->dp_who_type = type;
+	deleg_perm->dp_name = name;
+}
+
+static inline void
+who_perm_init(who_perm_t *who_perm, fs_perm_t *fsperm,
+    zfs_deleg_who_type_t type, const char *name)
+{
+	uu_avl_pool_t	*pool;
+	pool = fsperm->fsp_set->fsps_deleg_perm_avl_pool;
+
+	bzero(who_perm, sizeof (who_perm_t));
+
+	if ((who_perm->who_deleg_perm_avl = uu_avl_create(pool, NULL,
+	    UU_DEFAULT)) == NULL)
+		nomem();
+
+	who_perm->who_type = type;
+	who_perm->who_name = name;
+	who_perm->who_fsperm = fsperm;
+}
+
+static inline void
+who_perm_fini(who_perm_t *who_perm)
+{
+	deleg_perm_node_t *node = uu_avl_first(who_perm->who_deleg_perm_avl);
+
+	while (node != NULL) {
+		deleg_perm_node_t *next_node =
+		    uu_avl_next(who_perm->who_deleg_perm_avl, node);
+
+		uu_avl_remove(who_perm->who_deleg_perm_avl, node);
+		free(node);
+		node = next_node;
+	}
+
+	uu_avl_destroy(who_perm->who_deleg_perm_avl);
+}
+
+static inline void
+fs_perm_init(fs_perm_t *fsperm, fs_perm_set_t *fspset, const char *fsname)
+{
+	uu_avl_pool_t	*nset_pool = fspset->fsps_named_set_avl_pool;
+	uu_avl_pool_t	*who_pool = fspset->fsps_who_perm_avl_pool;
+
+	bzero(fsperm, sizeof (fs_perm_t));
+
+	if ((fsperm->fsp_sc_avl = uu_avl_create(nset_pool, NULL, UU_DEFAULT))
+	    == NULL)
+		nomem();
+
+	if ((fsperm->fsp_uge_avl = uu_avl_create(who_pool, NULL, UU_DEFAULT))
+	    == NULL)
+		nomem();
+
+	fsperm->fsp_set = fspset;
+	fsperm->fsp_name = fsname;
+}
+
+static inline void
+fs_perm_fini(fs_perm_t *fsperm)
+{
+	who_perm_node_t *node = uu_avl_first(fsperm->fsp_sc_avl);
+	while (node != NULL) {
+		who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_sc_avl,
+		    node);
+		who_perm_t *who_perm = &node->who_perm;
+		who_perm_fini(who_perm);
+		uu_avl_remove(fsperm->fsp_sc_avl, node);
+		free(node);
+		node = next_node;
+	}
+
+	node = uu_avl_first(fsperm->fsp_uge_avl);
+	while (node != NULL) {
+		who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_uge_avl,
+		    node);
+		who_perm_t *who_perm = &node->who_perm;
+		who_perm_fini(who_perm);
+		uu_avl_remove(fsperm->fsp_uge_avl, node);
+		free(node);
+		node = next_node;
+	}
+
+	uu_avl_destroy(fsperm->fsp_sc_avl);
+	uu_avl_destroy(fsperm->fsp_uge_avl);
+}
+
+static void inline
+set_deleg_perm_node(uu_avl_t *avl, deleg_perm_node_t *node,
+    zfs_deleg_who_type_t who_type, const char *name, char locality)
+{
+	uu_avl_index_t idx = 0;
+
+	deleg_perm_node_t *found_node = NULL;
+	deleg_perm_t	*deleg_perm = &node->dpn_perm;
+
+	deleg_perm_init(deleg_perm, who_type, name);
+
+	if ((found_node = uu_avl_find(avl, node, NULL, &idx))
+	    == NULL)
+		uu_avl_insert(avl, node, idx);
+	else {
+		node = found_node;
+		deleg_perm = &node->dpn_perm;
+	}
+
+
+	switch (locality) {
+	case ZFS_DELEG_LOCAL:
+		deleg_perm->dp_local = B_TRUE;
+		break;
+	case ZFS_DELEG_DESCENDENT:
+		deleg_perm->dp_descend = B_TRUE;
+		break;
+	case ZFS_DELEG_NA:
+		break;
+	default:
+		assert(B_FALSE); /* invalid locality */
+	}
+}
+
+static inline int
+parse_who_perm(who_perm_t *who_perm, nvlist_t *nvl, char locality)
+{
+	nvpair_t *nvp = NULL;
+	fs_perm_set_t *fspset = who_perm->who_fsperm->fsp_set;
+	uu_avl_t *avl = who_perm->who_deleg_perm_avl;
+	zfs_deleg_who_type_t who_type = who_perm->who_type;
+
+	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
+		const char *name = nvpair_name(nvp);
+		data_type_t type = nvpair_type(nvp);
+		uu_avl_pool_t *avl_pool = fspset->fsps_deleg_perm_avl_pool;
+		deleg_perm_node_t *node =
+		    safe_malloc(sizeof (deleg_perm_node_t));
+
+		assert(type == DATA_TYPE_BOOLEAN);
+
+		uu_avl_node_init(node, &node->dpn_avl_node, avl_pool);
+		set_deleg_perm_node(avl, node, who_type, name, locality);
+	}
+
+	return (0);
+}
+
+static inline int
+parse_fs_perm(fs_perm_t *fsperm, nvlist_t *nvl)
+{
+	nvpair_t *nvp = NULL;
+	fs_perm_set_t *fspset = fsperm->fsp_set;
+
+	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
+		nvlist_t *nvl2 = NULL;
+		const char *name = nvpair_name(nvp);
+		uu_avl_t *avl = NULL;
+		uu_avl_pool_t *avl_pool;
+		zfs_deleg_who_type_t perm_type = name[0];
+		char perm_locality = name[1];
+		const char *perm_name = name + 3;
+		boolean_t is_set = B_TRUE;
+		who_perm_t *who_perm = NULL;
+
+		assert('$' == name[2]);
+
+		if (nvpair_value_nvlist(nvp, &nvl2) != 0)
+			return (-1);
+
+		switch (perm_type) {
+		case ZFS_DELEG_CREATE:
+		case ZFS_DELEG_CREATE_SETS:
+		case ZFS_DELEG_NAMED_SET:
+		case ZFS_DELEG_NAMED_SET_SETS:
+			avl_pool = fspset->fsps_named_set_avl_pool;
+			avl = fsperm->fsp_sc_avl;
+			break;
+		case ZFS_DELEG_USER:
+		case ZFS_DELEG_USER_SETS:
+		case ZFS_DELEG_GROUP:
+		case ZFS_DELEG_GROUP_SETS:
+		case ZFS_DELEG_EVERYONE:
+		case ZFS_DELEG_EVERYONE_SETS:
+			avl_pool = fspset->fsps_who_perm_avl_pool;
+			avl = fsperm->fsp_uge_avl;
+			break;
+		}
+
+		if (is_set) {
+			who_perm_node_t *found_node = NULL;
+			who_perm_node_t *node = safe_malloc(
+			    sizeof (who_perm_node_t));
+			who_perm = &node->who_perm;
+			uu_avl_index_t idx = 0;
+
+			uu_avl_node_init(node, &node->who_avl_node, avl_pool);
+			who_perm_init(who_perm, fsperm, perm_type, perm_name);
+
+			if ((found_node = uu_avl_find(avl, node, NULL, &idx))
+			    == NULL) {
+				if (avl == fsperm->fsp_uge_avl) {
+					uid_t rid = 0;
+					struct passwd *p = NULL;
+					struct group *g = NULL;
+					const char *nice_name = NULL;
+
+					switch (perm_type) {
+					case ZFS_DELEG_USER_SETS:
+					case ZFS_DELEG_USER:
+						rid = atoi(perm_name);
+						p = getpwuid(rid);
+						if (p)
+							nice_name = p->pw_name;
+						break;
+					case ZFS_DELEG_GROUP_SETS:
+					case ZFS_DELEG_GROUP:
+						rid = atoi(perm_name);
+						g = getgrgid(rid);
+						if (g)
+							nice_name = g->gr_name;
+						break;
+					}
+
+					if (nice_name != NULL)
+						(void) strlcpy(
+						    node->who_perm.who_ug_name,
+						    nice_name, 256);
+				}
+
+				uu_avl_insert(avl, node, idx);
+			} else {
+				node = found_node;
+				who_perm = &node->who_perm;
+			}
+		}
+
+		(void) parse_who_perm(who_perm, nvl2, perm_locality);
+	}
+
+	return (0);
+}
+
+static inline int
+parse_fs_perm_set(fs_perm_set_t *fspset, nvlist_t *nvl)
+{
+	nvpair_t *nvp = NULL;
+	uu_avl_index_t idx = 0;
+
+	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
+		nvlist_t *nvl2 = NULL;
+		const char *fsname = nvpair_name(nvp);
+		data_type_t type = nvpair_type(nvp);
+		fs_perm_t *fsperm = NULL;
+		fs_perm_node_t *node = safe_malloc(sizeof (fs_perm_node_t));
+		if (node == NULL)
+			nomem();
+
+		fsperm = &node->fspn_fsperm;
+
+		assert(DATA_TYPE_NVLIST == type);
+
+		uu_list_node_init(node, &node->fspn_list_node,
+		    fspset->fsps_list_pool);
+
+		idx = uu_list_numnodes(fspset->fsps_list);
+		fs_perm_init(fsperm, fspset, fsname);
+
+		if (nvpair_value_nvlist(nvp, &nvl2) != 0)
+			return (-1);
+
+		(void) parse_fs_perm(fsperm, nvl2);
+
+		uu_list_insert(fspset->fsps_list, node, idx);
+	}
+
+	return (0);
+}
+
+static inline const char *
+deleg_perm_comment(zfs_deleg_note_t note)
+{
+	const char *str = "";
+
+	/* subcommands */
+	switch (note) {
+		/* SUBCOMMANDS */
+	case ZFS_DELEG_NOTE_ALLOW:
+		str = gettext("Must also have the permission that is being"
+		    "\n\t\t\t\tallowed");
+		break;
+	case ZFS_DELEG_NOTE_CLONE:
+		str = gettext("Must also have the 'create' ability and 'mount'"
+		    "\n\t\t\t\tability in the origin file system");
+		break;
+	case ZFS_DELEG_NOTE_CREATE:
+		str = gettext("Must also have the 'mount' ability");
+		break;
+	case ZFS_DELEG_NOTE_DESTROY:
+		str = gettext("Must also have the 'mount' ability");
+		break;
+	case ZFS_DELEG_NOTE_DIFF:
+		str = gettext("Allows lookup of paths within a dataset;"
+		    "\n\t\t\t\tgiven an object number. Ordinary users need this"
+		    "\n\t\t\t\tin order to use zfs diff");
+		break;
+	case ZFS_DELEG_NOTE_HOLD:
+		str = gettext("Allows adding a user hold to a snapshot");
+		break;
+	case ZFS_DELEG_NOTE_MOUNT:
+		str = gettext("Allows mount/umount of ZFS datasets");
+		break;
+	case ZFS_DELEG_NOTE_PROMOTE:
+		str = gettext("Must also have the 'mount'\n\t\t\t\tand"
+		    " 'promote' ability in the origin file system");
+		break;
+	case ZFS_DELEG_NOTE_RECEIVE:
+		str = gettext("Must also have the 'mount' and 'create'"
+		    " ability");
+		break;
+	case ZFS_DELEG_NOTE_RELEASE:
+		str = gettext("Allows releasing a user hold which\n\t\t\t\t"
+		    "might destroy the snapshot");
+		break;
+	case ZFS_DELEG_NOTE_RENAME:
+		str = gettext("Must also have the 'mount' and 'create'"
+		    "\n\t\t\t\tability in the new parent");
+		break;
+	case ZFS_DELEG_NOTE_ROLLBACK:
+		str = gettext("");
+		break;
+	case ZFS_DELEG_NOTE_SEND:
+		str = gettext("");
+		break;
+	case ZFS_DELEG_NOTE_SHARE:
+		str = gettext("Allows sharing file systems over NFS or SMB"
+		    "\n\t\t\t\tprotocols");
+		break;
+	case ZFS_DELEG_NOTE_SNAPSHOT:
+		str = gettext("");
+		break;
+/*
+ *	case ZFS_DELEG_NOTE_VSCAN:
+ *		str = gettext("");
+ *		break;
+ */
+		/* OTHER */
+	case ZFS_DELEG_NOTE_GROUPQUOTA:
+		str = gettext("Allows accessing any groupquota@... property");
+		break;
+	case ZFS_DELEG_NOTE_GROUPUSED:
+		str = gettext("Allows reading any groupused@... property");
+		break;
+	case ZFS_DELEG_NOTE_USERPROP:
+		str = gettext("Allows changing any user property");
+		break;
+	case ZFS_DELEG_NOTE_USERQUOTA:
+		str = gettext("Allows accessing any userquota@... property");
+		break;
+	case ZFS_DELEG_NOTE_USERUSED:
+		str = gettext("Allows reading any userused@... property");
+		break;
+		/* other */
+	default:
+		str = "";
+	}
+
+	return (str);
+}
+
+struct allow_opts {
+	boolean_t local;
+	boolean_t descend;
+	boolean_t user;
+	boolean_t group;
+	boolean_t everyone;
+	boolean_t create;
+	boolean_t set;
+	boolean_t recursive; /* unallow only */
+	boolean_t prt_usage;
+
+	boolean_t prt_perms;
+	char *who;
+	char *perms;
+	const char *dataset;
+};
+
+static inline int
+prop_cmp(const void *a, const void *b)
+{
+	const char *str1 = *(const char **)a;
+	const char *str2 = *(const char **)b;
+	return (strcmp(str1, str2));
+}
+
+static void
+allow_usage(boolean_t un, boolean_t requested, const char *msg)
+{
+	const char *opt_desc[] = {
+		"-h", gettext("show this help message and exit"),
+		"-l", gettext("set permission locally"),
+		"-d", gettext("set permission for descents"),
+		"-u", gettext("set permission for user"),
+		"-g", gettext("set permission for group"),
+		"-e", gettext("set permission for everyone"),
+		"-c", gettext("set create time permission"),
+		"-s", gettext("define permission set"),
+		/* unallow only */
+		"-r", gettext("remove permissions recursively"),
+	};
+	size_t unallow_size = sizeof (opt_desc) / sizeof (char *);
+	size_t allow_size = unallow_size - 2;
+	const char *props[ZFS_NUM_PROPS];
+	int i;
+	size_t count = 0;
+	FILE *fp = requested ? stdout : stderr;
+	zprop_desc_t *pdtbl = zfs_prop_get_table();
+	const char *fmt = gettext("%-16s %-14s\t%s\n");
+
+	(void) fprintf(fp, gettext("Usage: %s\n"), get_usage(un ? HELP_UNALLOW :
+	    HELP_ALLOW));
+	(void) fprintf(fp, gettext("Options:\n"));
+	for (i = 0; i < (un ? unallow_size : allow_size); i++) {
+		const char *opt = opt_desc[i++];
+		const char *optdsc = opt_desc[i];
+		(void) fprintf(fp, gettext("  %-10s  %s\n"), opt, optdsc);
+	}
+
+	(void) fprintf(fp, gettext("\nThe following permissions are "
+	    "supported:\n\n"));
+	(void) fprintf(fp, fmt, gettext("NAME"), gettext("TYPE"),
+	    gettext("NOTES"));
+	for (i = 0; i < ZFS_NUM_DELEG_NOTES; i++) {
+		const char *perm_name = zfs_deleg_perm_tbl[i].z_perm;
+		zfs_deleg_note_t perm_note = zfs_deleg_perm_tbl[i].z_note;
+		const char *perm_type = deleg_perm_type(perm_note);
+		const char *perm_comment = deleg_perm_comment(perm_note);
+		(void) fprintf(fp, fmt, perm_name, perm_type, perm_comment);
+	}
+
+	for (i = 0; i < ZFS_NUM_PROPS; i++) {
+		zprop_desc_t *pd = &pdtbl[i];
+		if (pd->pd_visible != B_TRUE)
+			continue;
+
+		if (pd->pd_attr == PROP_READONLY)
+			continue;
+
+		props[count++] = pd->pd_name;
+	}
+	props[count] = NULL;
+
+	qsort(props, count, sizeof (char *), prop_cmp);
+
+	for (i = 0; i < count; i++)
+		(void) fprintf(fp, fmt, props[i], gettext("property"), "");
+
+	if (msg != NULL)
+		(void) fprintf(fp, gettext("\nzfs: error: %s"), msg);
+
+	exit(requested ? 0 : 2);
+}
+
+static inline const char *
+munge_args(int argc, char **argv, boolean_t un, size_t expected_argc,
+    char **permsp)
+{
+	if (un && argc == expected_argc - 1)
+		*permsp = NULL;
+	else if (argc == expected_argc)
+		*permsp = argv[argc - 2];
+	else
+		allow_usage(un, B_FALSE,
+		    gettext("wrong number of parameters\n"));
+
+	return (argv[argc - 1]);
+}
+
+static void
+parse_allow_args(int argc, char **argv, boolean_t un, struct allow_opts *opts)
+{
+	int uge_sum = opts->user + opts->group + opts->everyone;
+	int csuge_sum = opts->create + opts->set + uge_sum;
+	int ldcsuge_sum = csuge_sum + opts->local + opts->descend;
+	int all_sum = un ? ldcsuge_sum + opts->recursive : ldcsuge_sum;
+
+	if (uge_sum > 1)
+		allow_usage(un, B_FALSE,
+		    gettext("-u, -g, and -e are mutually exclusive\n"));
+
+	if (opts->prt_usage)
+		if (argc == 0 && all_sum == 0)
+			allow_usage(un, B_TRUE, NULL);
+		else
+			usage(B_FALSE);
+
+	if (opts->set) {
+		if (csuge_sum > 1)
+			allow_usage(un, B_FALSE,
+			    gettext("invalid options combined with -s\n"));
+
+		opts->dataset = munge_args(argc, argv, un, 3, &opts->perms);
+		if (argv[0][0] != '@')
+			allow_usage(un, B_FALSE,
+			    gettext("invalid set name: missing '@' prefix\n"));
+		opts->who = argv[0];
+	} else if (opts->create) {
+		if (ldcsuge_sum > 1)
+			allow_usage(un, B_FALSE,
+			    gettext("invalid options combined with -c\n"));
+		opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
+	} else if (opts->everyone) {
+		if (csuge_sum > 1)
+			allow_usage(un, B_FALSE,
+			    gettext("invalid options combined with -e\n"));
+		opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
+	} else if (uge_sum == 0 && argc > 0 && strcmp(argv[0], "everyone")
+	    == 0) {
+		opts->everyone = B_TRUE;
+		argc--;
+		argv++;
+		opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
+	} else if (argc == 1 && !un) {
+		opts->prt_perms = B_TRUE;
+		opts->dataset = argv[argc-1];
+	} else {
+		opts->dataset = munge_args(argc, argv, un, 3, &opts->perms);
+		opts->who = argv[0];
+	}
+
+	if (!opts->local && !opts->descend) {
+		opts->local = B_TRUE;
+		opts->descend = B_TRUE;
+	}
+}
+
+static void
+store_allow_perm(zfs_deleg_who_type_t type, boolean_t local, boolean_t descend,
+    const char *who, char *perms, nvlist_t *top_nvl)
+{
+	int i;
+	char ld[2] = { '\0', '\0' };
+	char who_buf[ZFS_MAXNAMELEN+32];
+	char base_type;
+	char set_type;
+	nvlist_t *base_nvl = NULL;
+	nvlist_t *set_nvl = NULL;
+	nvlist_t *nvl;
+
+	if (nvlist_alloc(&base_nvl, NV_UNIQUE_NAME, 0) != 0)
+		nomem();
+	if (nvlist_alloc(&set_nvl, NV_UNIQUE_NAME, 0) !=  0)
+		nomem();
+
+	switch (type) {
+	case ZFS_DELEG_NAMED_SET_SETS:
+	case ZFS_DELEG_NAMED_SET:
+		set_type = ZFS_DELEG_NAMED_SET_SETS;
+		base_type = ZFS_DELEG_NAMED_SET;
+		ld[0] = ZFS_DELEG_NA;
+		break;
+	case ZFS_DELEG_CREATE_SETS:
+	case ZFS_DELEG_CREATE:
+		set_type = ZFS_DELEG_CREATE_SETS;
+		base_type = ZFS_DELEG_CREATE;
+		ld[0] = ZFS_DELEG_NA;
+		break;
+	case ZFS_DELEG_USER_SETS:
+	case ZFS_DELEG_USER:
+		set_type = ZFS_DELEG_USER_SETS;
+		base_type = ZFS_DELEG_USER;
+		if (local)
+			ld[0] = ZFS_DELEG_LOCAL;
+		if (descend)
+			ld[1] = ZFS_DELEG_DESCENDENT;
+		break;
+	case ZFS_DELEG_GROUP_SETS:
+	case ZFS_DELEG_GROUP:
+		set_type = ZFS_DELEG_GROUP_SETS;
+		base_type = ZFS_DELEG_GROUP;
+		if (local)
+			ld[0] = ZFS_DELEG_LOCAL;
+		if (descend)
+			ld[1] = ZFS_DELEG_DESCENDENT;
+		break;
+	case ZFS_DELEG_EVERYONE_SETS:
+	case ZFS_DELEG_EVERYONE:
+		set_type = ZFS_DELEG_EVERYONE_SETS;
+		base_type = ZFS_DELEG_EVERYONE;
+		if (local)
+			ld[0] = ZFS_DELEG_LOCAL;
+		if (descend)
+			ld[1] = ZFS_DELEG_DESCENDENT;
+	}
+
+	if (perms != NULL) {
+		char *curr = perms;
+		char *end = curr + strlen(perms);
+
+		while (curr < end) {
+			char *delim = strchr(curr, ',');
+			if (delim == NULL)
+				delim = end;
+			else
+				*delim = '\0';
+
+			if (curr[0] == '@')
+				nvl = set_nvl;
+			else
+				nvl = base_nvl;
+
+			(void) nvlist_add_boolean(nvl, curr);
+			if (delim != end)
+				*delim = ',';
+			curr = delim + 1;
+		}
+
+		for (i = 0; i < 2; i++) {
+			char locality = ld[i];
+			if (locality == 0)
+				continue;
+
+			if (!nvlist_empty(base_nvl)) {
+				if (who != NULL)
+					(void) snprintf(who_buf,
+					    sizeof (who_buf), "%c%c$%s",
+					    base_type, locality, who);
+				else
+					(void) snprintf(who_buf,
+					    sizeof (who_buf), "%c%c$",
+					    base_type, locality);
+
+				(void) nvlist_add_nvlist(top_nvl, who_buf,
+				    base_nvl);
+			}
+
+
+			if (!nvlist_empty(set_nvl)) {
+				if (who != NULL)
+					(void) snprintf(who_buf,
+					    sizeof (who_buf), "%c%c$%s",
+					    set_type, locality, who);
+				else
+					(void) snprintf(who_buf,
+					    sizeof (who_buf), "%c%c$",
+					    set_type, locality);
+
+				(void) nvlist_add_nvlist(top_nvl, who_buf,
+				    set_nvl);
+			}
+		}
+	} else {
+		for (i = 0; i < 2; i++) {
+			char locality = ld[i];
+			if (locality == 0)
+				continue;
+
+			if (who != NULL)
+				(void) snprintf(who_buf, sizeof (who_buf),
+				    "%c%c$%s", base_type, locality, who);
+			else
+				(void) snprintf(who_buf, sizeof (who_buf),
+				    "%c%c$", base_type, locality);
+			(void) nvlist_add_boolean(top_nvl, who_buf);
+
+			if (who != NULL)
+				(void) snprintf(who_buf, sizeof (who_buf),
+				    "%c%c$%s", set_type, locality, who);
+			else
+				(void) snprintf(who_buf, sizeof (who_buf),
+				    "%c%c$", set_type, locality);
+			(void) nvlist_add_boolean(top_nvl, who_buf);
+		}
+	}
+}
+
+static int
+construct_fsacl_list(boolean_t un, struct allow_opts *opts, nvlist_t **nvlp)
+{
+	if (nvlist_alloc(nvlp, NV_UNIQUE_NAME, 0) != 0)
+		nomem();
+
+	if (opts->set) {
+		store_allow_perm(ZFS_DELEG_NAMED_SET, opts->local,
+		    opts->descend, opts->who, opts->perms, *nvlp);
+	} else if (opts->create) {
+		store_allow_perm(ZFS_DELEG_CREATE, opts->local,
+		    opts->descend, NULL, opts->perms, *nvlp);
+	} else if (opts->everyone) {
+		store_allow_perm(ZFS_DELEG_EVERYONE, opts->local,
+		    opts->descend, NULL, opts->perms, *nvlp);
+	} else {
+		char *curr = opts->who;
+		char *end = curr + strlen(curr);
+
+		while (curr < end) {
+			const char *who;
+			zfs_deleg_who_type_t who_type;
+			char *endch;
+			char *delim = strchr(curr, ',');
+			char errbuf[256];
+			char id[64];
+			struct passwd *p = NULL;
+			struct group *g = NULL;
+
+			uid_t rid;
+			if (delim == NULL)
+				delim = end;
+			else
+				*delim = '\0';
+
+			rid = (uid_t)strtol(curr, &endch, 0);
+			if (opts->user) {
+				who_type = ZFS_DELEG_USER;
+				if (*endch != '\0')
+					p = getpwnam(curr);
+				else
+					p = getpwuid(rid);
+
+				if (p != NULL)
+					rid = p->pw_uid;
+				else {
+					(void) snprintf(errbuf, 256, gettext(
+					    "invalid user %s"), curr);
+					allow_usage(un, B_TRUE, errbuf);
+				}
+			} else if (opts->group) {
+				who_type = ZFS_DELEG_GROUP;
+				if (*endch != '\0')
+					g = getgrnam(curr);
+				else
+					g = getgrgid(rid);
+
+				if (g != NULL)
+					rid = g->gr_gid;
+				else {
+					(void) snprintf(errbuf, 256, gettext(
+					    "invalid group %s"),  curr);
+					allow_usage(un, B_TRUE, errbuf);
+				}
+			} else {
+				if (*endch != '\0') {
+					p = getpwnam(curr);
+				} else {
+					p = getpwuid(rid);
+				}
+
+				if (p == NULL)
+					if (*endch != '\0') {
+						g = getgrnam(curr);
+					} else {
+						g = getgrgid(rid);
+					}
+
+				if (p != NULL) {
+					who_type = ZFS_DELEG_USER;
+					rid = p->pw_uid;
+				} else if (g != NULL) {
+					who_type = ZFS_DELEG_GROUP;
+					rid = g->gr_gid;
+				} else {
+					(void) snprintf(errbuf, 256, gettext(
+					    "invalid user/group %s"), curr);
+					allow_usage(un, B_TRUE, errbuf);
+				}
+			}
+
+			(void) sprintf(id, "%u", rid);
+			who = id;
+
+			store_allow_perm(who_type, opts->local,
+			    opts->descend, who, opts->perms, *nvlp);
+			curr = delim + 1;
+		}
+	}
+
+	return (0);
+}
+
+static void
+print_set_creat_perms(uu_avl_t *who_avl)
+{
+	const char *sc_title[] = {
+		gettext("Permission sets:\n"),
+		gettext("Create time permissions:\n"),
+		NULL
+	};
+	const char **title_ptr = sc_title;
+	who_perm_node_t *who_node = NULL;
+	int prev_weight = -1;
+
+	for (who_node = uu_avl_first(who_avl); who_node != NULL;
+	    who_node = uu_avl_next(who_avl, who_node)) {
+		uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
+		zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
+		const char *who_name = who_node->who_perm.who_name;
+		int weight = who_type2weight(who_type);
+		boolean_t first = B_TRUE;
+		deleg_perm_node_t *deleg_node;
+
+		if (prev_weight != weight) {
+			(void) printf(*title_ptr++);
+			prev_weight = weight;
+		}
+
+		if (who_name == NULL || strnlen(who_name, 1) == 0)
+			(void) printf("\t");
+		else
+			(void) printf("\t%s ", who_name);
+
+		for (deleg_node = uu_avl_first(avl); deleg_node != NULL;
+		    deleg_node = uu_avl_next(avl, deleg_node)) {
+			if (first) {
+				(void) printf("%s",
+				    deleg_node->dpn_perm.dp_name);
+				first = B_FALSE;
+			} else
+				(void) printf(",%s",
+				    deleg_node->dpn_perm.dp_name);
+		}
+
+		(void) printf("\n");
+	}
+}
+
+static void inline
+print_uge_deleg_perms(uu_avl_t *who_avl, boolean_t local, boolean_t descend,
+    const char *title)
+{
+	who_perm_node_t *who_node = NULL;
+	boolean_t prt_title = B_TRUE;
+	uu_avl_walk_t *walk;
+
+	if ((walk = uu_avl_walk_start(who_avl, UU_WALK_ROBUST)) == NULL)
+		nomem();
+
+	while ((who_node = uu_avl_walk_next(walk)) != NULL) {
+		const char *who_name = who_node->who_perm.who_name;
+		const char *nice_who_name = who_node->who_perm.who_ug_name;
+		uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
+		zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
+		char delim = ' ';
+		deleg_perm_node_t *deleg_node;
+		boolean_t prt_who = B_TRUE;
+
+		for (deleg_node = uu_avl_first(avl);
+		    deleg_node != NULL;
+		    deleg_node = uu_avl_next(avl, deleg_node)) {
+			if (local != deleg_node->dpn_perm.dp_local ||
+			    descend != deleg_node->dpn_perm.dp_descend)
+				continue;
+
+			if (prt_who) {
+				const char *who = NULL;
+				if (prt_title) {
+					prt_title = B_FALSE;
+					(void) printf(title);
+				}
+
+				switch (who_type) {
+				case ZFS_DELEG_USER_SETS:
+				case ZFS_DELEG_USER:
+					who = gettext("user");
+					if (nice_who_name)
+						who_name  = nice_who_name;
+					break;
+				case ZFS_DELEG_GROUP_SETS:
+				case ZFS_DELEG_GROUP:
+					who = gettext("group");
+					if (nice_who_name)
+						who_name  = nice_who_name;
+					break;
+				case ZFS_DELEG_EVERYONE_SETS:
+				case ZFS_DELEG_EVERYONE:
+					who = gettext("everyone");
+					who_name = NULL;
+				}
+
+				prt_who = B_FALSE;
+				if (who_name == NULL)
+					(void) printf("\t%s", who);
+				else
+					(void) printf("\t%s %s", who, who_name);
+			}
+
+			(void) printf("%c%s", delim,
+			    deleg_node->dpn_perm.dp_name);
+			delim = ',';
+		}
+
+		if (!prt_who)
+			(void) printf("\n");
+	}
+
+	uu_avl_walk_end(walk);
+}
+
+static void
+print_fs_perms(fs_perm_set_t *fspset)
+{
+	fs_perm_node_t *node = NULL;
+	char buf[ZFS_MAXNAMELEN+32];
+	const char *dsname = buf;
+
+	for (node = uu_list_first(fspset->fsps_list); node != NULL;
+	    node = uu_list_next(fspset->fsps_list, node)) {
+		uu_avl_t *sc_avl = node->fspn_fsperm.fsp_sc_avl;
+		uu_avl_t *uge_avl = node->fspn_fsperm.fsp_uge_avl;
+		int left = 0;
+
+		(void) snprintf(buf, ZFS_MAXNAMELEN+32,
+		    gettext("---- Permissions on %s "),
+		    node->fspn_fsperm.fsp_name);
+		(void) printf(dsname);
+		left = 70 - strlen(buf);
+		while (left-- > 0)
+			(void) printf("-");
+		(void) printf("\n");
+
+		print_set_creat_perms(sc_avl);
+		print_uge_deleg_perms(uge_avl, B_TRUE, B_FALSE,
+		    gettext("Local permissions:\n"));
+		print_uge_deleg_perms(uge_avl, B_FALSE, B_TRUE,
+		    gettext("Descendent permissions:\n"));
+		print_uge_deleg_perms(uge_avl, B_TRUE, B_TRUE,
+		    gettext("Local+Descendent permissions:\n"));
+	}
+}
+
+static fs_perm_set_t fs_perm_set = { NULL, NULL, NULL, NULL };
+
+struct deleg_perms {
+	boolean_t un;
+	nvlist_t *nvl;
+};
+
+static int
+set_deleg_perms(zfs_handle_t *zhp, void *data)
+{
+	struct deleg_perms *perms = (struct deleg_perms *)data;
+	zfs_type_t zfs_type = zfs_get_type(zhp);
+
+	if (zfs_type != ZFS_TYPE_FILESYSTEM && zfs_type != ZFS_TYPE_VOLUME)
+		return (0);
+
+	return (zfs_set_fsacl(zhp, perms->un, perms->nvl));
+}
+
+static int
+zfs_do_allow_unallow_impl(int argc, char **argv, boolean_t un)
+{
+	zfs_handle_t *zhp;
+	nvlist_t *perm_nvl = NULL;
+	nvlist_t *update_perm_nvl = NULL;
+	int error = 1;
+	int c;
+	struct allow_opts opts = { 0 };
+
+	const char *optstr = un ? "ldugecsrh" : "ldugecsh";
+
+	/* check opts */
+	while ((c = getopt(argc, argv, optstr)) != -1) {
+		switch (c) {
+		case 'l':
+			opts.local = B_TRUE;
+			break;
+		case 'd':
+			opts.descend = B_TRUE;
+			break;
+		case 'u':
+			opts.user = B_TRUE;
+			break;
+		case 'g':
+			opts.group = B_TRUE;
+			break;
+		case 'e':
+			opts.everyone = B_TRUE;
+			break;
+		case 's':
+			opts.set = B_TRUE;
+			break;
+		case 'c':
+			opts.create = B_TRUE;
+			break;
+		case 'r':
+			opts.recursive = B_TRUE;
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing argument for "
+			    "'%c' option\n"), optopt);
+			usage(B_FALSE);
+			break;
+		case 'h':
+			opts.prt_usage = B_TRUE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check arguments */
+	parse_allow_args(argc, argv, un, &opts);
+
+	/* try to open the dataset */
+	if ((zhp = zfs_open(g_zfs, opts.dataset, ZFS_TYPE_FILESYSTEM |
+	    ZFS_TYPE_VOLUME)) == NULL) {
+		(void) fprintf(stderr, "Failed to open dataset: %s\n",
+		    opts.dataset);
+		return (-1);
+	}
+
+	if (zfs_get_fsacl(zhp, &perm_nvl) != 0)
+		goto cleanup2;
+
+	fs_perm_set_init(&fs_perm_set);
+	if (parse_fs_perm_set(&fs_perm_set, perm_nvl) != 0) {
+		(void) fprintf(stderr, "Failed to parse fsacl permissions\n");
+		goto cleanup1;
+	}
+
+	if (opts.prt_perms)
+		print_fs_perms(&fs_perm_set);
+	else {
+		(void) construct_fsacl_list(un, &opts, &update_perm_nvl);
+		if (zfs_set_fsacl(zhp, un, update_perm_nvl) != 0)
+			goto cleanup0;
+
+		if (un && opts.recursive) {
+			struct deleg_perms data = { un, update_perm_nvl };
+			if (zfs_iter_filesystems(zhp, set_deleg_perms,
+			    &data) != 0)
+				goto cleanup0;
+		}
+	}
+
+	error = 0;
+
+cleanup0:
+	nvlist_free(perm_nvl);
+	if (update_perm_nvl != NULL)
+		nvlist_free(update_perm_nvl);
+cleanup1:
+	fs_perm_set_fini(&fs_perm_set);
+cleanup2:
+	zfs_close(zhp);
+
+	return (error);
+}
+
+/*
+ * zfs allow [-r] [-t] <tag> <snap> ...
+ *
+ *	-r	Recursively hold
+ *	-t	Temporary hold (hidden option)
+ *
+ * Apply a user-hold with the given tag to the list of snapshots.
+ */
+static int
+zfs_do_allow(int argc, char **argv)
+{
+	return (zfs_do_allow_unallow_impl(argc, argv, B_FALSE));
+}
+
+/*
+ * zfs unallow [-r] [-t] <tag> <snap> ...
+ *
+ *	-r	Recursively hold
+ *	-t	Temporary hold (hidden option)
+ *
+ * Apply a user-hold with the given tag to the list of snapshots.
+ */
+static int
+zfs_do_unallow(int argc, char **argv)
+{
+	return (zfs_do_allow_unallow_impl(argc, argv, B_TRUE));
+}
+
+static int
+zfs_do_hold_rele_impl(int argc, char **argv, boolean_t holding)
+{
+	int errors = 0;
+	int i;
+	const char *tag;
+	boolean_t recursive = B_FALSE;
+	boolean_t temphold = B_FALSE;
+	const char *opts = holding ? "rt" : "r";
+	int c;
+
+	/* check options */
+	while ((c = getopt(argc, argv, opts)) != -1) {
+		switch (c) {
+		case 'r':
+			recursive = B_TRUE;
+			break;
+		case 't':
+			temphold = B_TRUE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check number of arguments */
+	if (argc < 2)
+		usage(B_FALSE);
+
+	tag = argv[0];
+	--argc;
+	++argv;
+
+	if (holding && tag[0] == '.') {
+		/* tags starting with '.' are reserved for libzfs */
+		(void) fprintf(stderr, gettext("tag may not start with '.'\n"));
+		usage(B_FALSE);
+	}
+
+	for (i = 0; i < argc; ++i) {
+		zfs_handle_t *zhp;
+		char parent[ZFS_MAXNAMELEN];
+		const char *delim;
+		char *path = argv[i];
+
+		delim = strchr(path, '@');
+		if (delim == NULL) {
+			(void) fprintf(stderr,
+			    gettext("'%s' is not a snapshot\n"), path);
+			++errors;
+			continue;
+		}
+		(void) strncpy(parent, path, delim - path);
+		parent[delim - path] = '\0';
+
+		zhp = zfs_open(g_zfs, parent,
+		    ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+		if (zhp == NULL) {
+			++errors;
+			continue;
+		}
+		if (holding) {
+			if (zfs_hold(zhp, delim+1, tag, recursive,
+			    temphold, B_FALSE, -1, 0, 0) != 0)
+				++errors;
+		} else {
+			if (zfs_release(zhp, delim+1, tag, recursive) != 0)
+				++errors;
+		}
+		zfs_close(zhp);
+	}
+
+	return (errors != 0);
+}
+
+/*
+ * zfs hold [-r] [-t] <tag> <snap> ...
+ *
+ *	-r	Recursively hold
+ *	-t	Temporary hold (hidden option)
+ *
+ * Apply a user-hold with the given tag to the list of snapshots.
+ */
+static int
+zfs_do_hold(int argc, char **argv)
+{
+	return (zfs_do_hold_rele_impl(argc, argv, B_TRUE));
+}
+
+/*
+ * zfs release [-r] <tag> <snap> ...
+ *
+ *	-r	Recursively release
+ *
+ * Release a user-hold with the given tag from the list of snapshots.
+ */
+static int
+zfs_do_release(int argc, char **argv)
+{
+	return (zfs_do_hold_rele_impl(argc, argv, B_FALSE));
+}
+
+typedef struct holds_cbdata {
+	boolean_t	cb_recursive;
+	const char	*cb_snapname;
+	nvlist_t	**cb_nvlp;
+	size_t		cb_max_namelen;
+	size_t		cb_max_taglen;
+} holds_cbdata_t;
+
+#define	STRFTIME_FMT_STR "%a %b %e %k:%M %Y"
+#define	DATETIME_BUF_LEN (32)
+/*
+ *
+ */
+static void
+print_holds(boolean_t scripted, size_t nwidth, size_t tagwidth, nvlist_t *nvl)
+{
+	int i;
+	nvpair_t *nvp = NULL;
+	char *hdr_cols[] = { "NAME", "TAG", "TIMESTAMP" };
+	const char *col;
+
+	if (!scripted) {
+		for (i = 0; i < 3; i++) {
+			col = gettext(hdr_cols[i]);
+			if (i < 2)
+				(void) printf("%-*s  ", i ? tagwidth : nwidth,
+				    col);
+			else
+				(void) printf("%s\n", col);
+		}
+	}
+
+	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
+		char *zname = nvpair_name(nvp);
+		nvlist_t *nvl2;
+		nvpair_t *nvp2 = NULL;
+		(void) nvpair_value_nvlist(nvp, &nvl2);
+		while ((nvp2 = nvlist_next_nvpair(nvl2, nvp2)) != NULL) {
+			char tsbuf[DATETIME_BUF_LEN];
+			char *tagname = nvpair_name(nvp2);
+			uint64_t val = 0;
+			time_t time;
+			struct tm t;
+			char sep = scripted ? '\t' : ' ';
+			size_t sepnum = scripted ? 1 : 2;
+
+			(void) nvpair_value_uint64(nvp2, &val);
+			time = (time_t)val;
+			(void) localtime_r(&time, &t);
+			(void) strftime(tsbuf, DATETIME_BUF_LEN,
+			    gettext(STRFTIME_FMT_STR), &t);
+
+			(void) printf("%-*s%*c%-*s%*c%s\n", nwidth, zname,
+			    sepnum, sep, tagwidth, tagname, sepnum, sep, tsbuf);
+		}
+	}
+}
+
+/*
+ * Generic callback function to list a dataset or snapshot.
+ */
+static int
+holds_callback(zfs_handle_t *zhp, void *data)
+{
+	holds_cbdata_t *cbp = data;
+	nvlist_t *top_nvl = *cbp->cb_nvlp;
+	nvlist_t *nvl = NULL;
+	nvpair_t *nvp = NULL;
+	const char *zname = zfs_get_name(zhp);
+	size_t znamelen = strnlen(zname, ZFS_MAXNAMELEN);
+
+	if (cbp->cb_recursive) {
+		const char *snapname;
+		char *delim  = strchr(zname, '@');
+		if (delim == NULL)
+			return (0);
+
+		snapname = delim + 1;
+		if (strcmp(cbp->cb_snapname, snapname))
+			return (0);
+	}
+
+	if (zfs_get_holds(zhp, &nvl) != 0)
+		return (-1);
+
+	if (znamelen > cbp->cb_max_namelen)
+		cbp->cb_max_namelen  = znamelen;
+
+	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
+		const char *tag = nvpair_name(nvp);
+		size_t taglen = strnlen(tag, MAXNAMELEN);
+		if (taglen > cbp->cb_max_taglen)
+			cbp->cb_max_taglen  = taglen;
+	}
+
+	return (nvlist_add_nvlist(top_nvl, zname, nvl));
+}
+
+/*
+ * zfs holds [-r] <snap> ...
+ *
+ *	-r	Recursively hold
+ */
+static int
+zfs_do_holds(int argc, char **argv)
+{
+	int errors = 0;
+	int c;
+	int i;
+	boolean_t scripted = B_FALSE;
+	boolean_t recursive = B_FALSE;
+	const char *opts = "rH";
+	nvlist_t *nvl;
+
+	int types = ZFS_TYPE_SNAPSHOT;
+	holds_cbdata_t cb = { 0 };
+
+	int limit = 0;
+	int ret = 0;
+	int flags = 0;
+
+	/* check options */
+	while ((c = getopt(argc, argv, opts)) != -1) {
+		switch (c) {
+		case 'r':
+			recursive = B_TRUE;
+			break;
+		case 'H':
+			scripted = B_TRUE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	if (recursive) {
+		types |= ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME;
+		flags |= ZFS_ITER_RECURSE;
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check number of arguments */
+	if (argc < 1)
+		usage(B_FALSE);
+
+	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
+		nomem();
+
+	for (i = 0; i < argc; ++i) {
+		char *snapshot = argv[i];
+		const char *delim;
+		const char *snapname;
+
+		delim = strchr(snapshot, '@');
+		if (delim == NULL) {
+			(void) fprintf(stderr,
+			    gettext("'%s' is not a snapshot\n"), snapshot);
+			++errors;
+			continue;
+		}
+		snapname = delim + 1;
+		if (recursive)
+			snapshot[delim - snapshot] = '\0';
+
+		cb.cb_recursive = recursive;
+		cb.cb_snapname = snapname;
+		cb.cb_nvlp = &nvl;
+
+		/*
+		 *  1. collect holds data, set format options
+		 */
+		ret = zfs_for_each(argc, argv, flags, types, NULL, NULL, limit,
+		    holds_callback, &cb);
+		if (ret != 0)
+			++errors;
+	}
+
+	/*
+	 *  2. print holds data
+	 */
+	print_holds(scripted, cb.cb_max_namelen, cb.cb_max_taglen, nvl);
+
+	if (nvlist_empty(nvl))
+		(void) printf(gettext("no datasets available\n"));
+
+	nvlist_free(nvl);
+
+	return (0 != errors);
+}
+
+#define	CHECK_SPINNER 30
+#define	SPINNER_TIME 3		/* seconds */
+#define	MOUNT_TIME 5		/* seconds */
+
+static int
+get_one_dataset(zfs_handle_t *zhp, void *data)
+{
+	static char *spin[] = { "-", "\\", "|", "/" };
+	static int spinval = 0;
+	static int spincheck = 0;
+	static time_t last_spin_time = (time_t)0;
+	get_all_cb_t *cbp = data;
+	zfs_type_t type = zfs_get_type(zhp);
+
+	if (cbp->cb_verbose) {
+		if (--spincheck < 0) {
+			time_t now = time(NULL);
+			if (last_spin_time + SPINNER_TIME < now) {
+				update_progress(spin[spinval++ % 4]);
+				last_spin_time = now;
+			}
+			spincheck = CHECK_SPINNER;
+		}
+	}
+
+	/*
+	 * Interate over any nested datasets.
+	 */
+	if (zfs_iter_filesystems(zhp, get_one_dataset, data) != 0) {
+		zfs_close(zhp);
+		return (1);
+	}
+
+	/*
+	 * Skip any datasets whose type does not match.
+	 */
+	if ((type & ZFS_TYPE_FILESYSTEM) == 0) {
+		zfs_close(zhp);
+		return (0);
+	}
+	libzfs_add_handle(cbp, zhp);
+	assert(cbp->cb_used <= cbp->cb_alloc);
+
+	return (0);
+}
+
+static void
+get_all_datasets(zfs_handle_t ***dslist, size_t *count, boolean_t verbose)
+{
+	get_all_cb_t cb = { 0 };
+	cb.cb_verbose = verbose;
+	cb.cb_getone = get_one_dataset;
+
+	if (verbose)
+		set_progress_header(gettext("Reading ZFS config"));
+	(void) zfs_iter_root(g_zfs, get_one_dataset, &cb);
+
+	*dslist = cb.cb_handles;
+	*count = cb.cb_used;
+
+	if (verbose)
+		finish_progress(gettext("done."));
+}
+
+/*
+ * Generic callback for sharing or mounting filesystems.  Because the code is so
+ * similar, we have a common function with an extra parameter to determine which
+ * mode we are using.
+ */
+#define	OP_SHARE	0x1
+#define	OP_MOUNT	0x2
+
+/*
+ * Share or mount a dataset.
+ */
+static int
+share_mount_one(zfs_handle_t *zhp, int op, int flags, char *protocol,
+    boolean_t explicit, const char *options)
+{
+	char mountpoint[ZFS_MAXPROPLEN];
+	char shareopts[ZFS_MAXPROPLEN];
+	char smbshareopts[ZFS_MAXPROPLEN];
+	const char *cmdname = op == OP_SHARE ? "share" : "mount";
+	struct mnttab mnt;
+	uint64_t zoned, canmount;
+	boolean_t shared_nfs, shared_smb;
+
+	assert(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM);
+
+	/*
+	 * Check to make sure we can mount/share this dataset.  If we
+	 * are in the global zone and the filesystem is exported to a
+	 * local zone, or if we are in a local zone and the
+	 * filesystem is not exported, then it is an error.
+	 */
+	zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
+
+	if (zoned && getzoneid() == GLOBAL_ZONEID) {
+		if (!explicit)
+			return (0);
+
+		(void) fprintf(stderr, gettext("cannot %s '%s': "
+		    "dataset is exported to a local zone\n"), cmdname,
+		    zfs_get_name(zhp));
+		return (1);
+
+	} else if (!zoned && getzoneid() != GLOBAL_ZONEID) {
+		if (!explicit)
+			return (0);
+
+		(void) fprintf(stderr, gettext("cannot %s '%s': "
+		    "permission denied\n"), cmdname,
+		    zfs_get_name(zhp));
+		return (1);
+	}
+
+	/*
+	 * Ignore any filesystems which don't apply to us. This
+	 * includes those with a legacy mountpoint, or those with
+	 * legacy share options.
+	 */
+	verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
+	    sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
+	verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, shareopts,
+	    sizeof (shareopts), NULL, NULL, 0, B_FALSE) == 0);
+	verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshareopts,
+	    sizeof (smbshareopts), NULL, NULL, 0, B_FALSE) == 0);
+
+	if (op == OP_SHARE && strcmp(shareopts, "off") == 0 &&
+	    strcmp(smbshareopts, "off") == 0) {
+		if (!explicit)
+			return (0);
+
+		(void) fprintf(stderr, gettext("cannot share '%s': "
+		    "legacy share\n"), zfs_get_name(zhp));
+		(void) fprintf(stderr, gettext("use share(1M) to "
+		    "share this filesystem, or set "
+		    "sharenfs property on\n"));
+		return (1);
+	}
+
+	/*
+	 * We cannot share or mount legacy filesystems. If the
+	 * shareopts is non-legacy but the mountpoint is legacy, we
+	 * treat it as a legacy share.
+	 */
+	if (strcmp(mountpoint, "legacy") == 0) {
+		if (!explicit)
+			return (0);
+
+		(void) fprintf(stderr, gettext("cannot %s '%s': "
+		    "legacy mountpoint\n"), cmdname, zfs_get_name(zhp));
+		(void) fprintf(stderr, gettext("use %s(1M) to "
+		    "%s this filesystem\n"), cmdname, cmdname);
+		return (1);
+	}
+
+	if (strcmp(mountpoint, "none") == 0) {
+		if (!explicit)
+			return (0);
+
+		(void) fprintf(stderr, gettext("cannot %s '%s': no "
+		    "mountpoint set\n"), cmdname, zfs_get_name(zhp));
+		return (1);
+	}
+
+	/*
+	 * canmount	explicit	outcome
+	 * on		no		pass through
+	 * on		yes		pass through
+	 * off		no		return 0
+	 * off		yes		display error, return 1
+	 * noauto	no		return 0
+	 * noauto	yes		pass through
+	 */
+	canmount = zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT);
+	if (canmount == ZFS_CANMOUNT_OFF) {
+		if (!explicit)
+			return (0);
+
+		(void) fprintf(stderr, gettext("cannot %s '%s': "
+		    "'canmount' property is set to 'off'\n"), cmdname,
+		    zfs_get_name(zhp));
+		return (1);
+	} else if (canmount == ZFS_CANMOUNT_NOAUTO && !explicit) {
+		return (0);
+	}
+
+	/*
+	 * At this point, we have verified that the mountpoint and/or
+	 * shareopts are appropriate for auto management. If the
+	 * filesystem is already mounted or shared, return (failing
+	 * for explicit requests); otherwise mount or share the
+	 * filesystem.
+	 */
+	switch (op) {
+	case OP_SHARE:
+
+		shared_nfs = zfs_is_shared_nfs(zhp, NULL);
+		shared_smb = zfs_is_shared_smb(zhp, NULL);
+
+		if (shared_nfs && shared_smb ||
+		    (shared_nfs && strcmp(shareopts, "on") == 0 &&
+		    strcmp(smbshareopts, "off") == 0) ||
+		    (shared_smb && strcmp(smbshareopts, "on") == 0 &&
+		    strcmp(shareopts, "off") == 0)) {
+			if (!explicit)
+				return (0);
+
+			(void) fprintf(stderr, gettext("cannot share "
+			    "'%s': filesystem already shared\n"),
+			    zfs_get_name(zhp));
+			return (1);
+		}
+
+		if (!zfs_is_mounted(zhp, NULL) &&
+		    zfs_mount(zhp, NULL, 0) != 0)
+			return (1);
+
+		if (protocol == NULL) {
+			if (zfs_shareall(zhp) != 0)
+				return (1);
+		} else if (strcmp(protocol, "nfs") == 0) {
+			if (zfs_share_nfs(zhp))
+				return (1);
+		} else if (strcmp(protocol, "smb") == 0) {
+			if (zfs_share_smb(zhp))
+				return (1);
+		} else {
+			(void) fprintf(stderr, gettext("cannot share "
+			    "'%s': invalid share type '%s' "
+			    "specified\n"),
+			    zfs_get_name(zhp), protocol);
+			return (1);
+		}
+
+		break;
+
+	case OP_MOUNT:
+		if (options == NULL)
+			mnt.mnt_mntopts = "";
+		else
+			mnt.mnt_mntopts = (char *)options;
+
+		if (!hasmntopt(&mnt, MNTOPT_REMOUNT) &&
+		    zfs_is_mounted(zhp, NULL)) {
+			if (!explicit)
+				return (0);
+
+			(void) fprintf(stderr, gettext("cannot mount "
+			    "'%s': filesystem already mounted\n"),
+			    zfs_get_name(zhp));
+			return (1);
+		}
+
+		if (zfs_mount(zhp, options, flags) != 0)
+			return (1);
+		break;
+	}
+
+	return (0);
+}
+
+/*
+ * Reports progress in the form "(current/total)".  Not thread-safe.
+ */
+static void
+report_mount_progress(int current, int total)
+{
+	static time_t last_progress_time = 0;
+	time_t now = time(NULL);
+	char info[32];
+
+	/* report 1..n instead of 0..n-1 */
+	++current;
+
+	/* display header if we're here for the first time */
+	if (current == 1) {
+		set_progress_header(gettext("Mounting ZFS filesystems"));
+	} else if (current != total && last_progress_time + MOUNT_TIME >= now) {
+		/* too soon to report again */
+		return;
+	}
+
+	last_progress_time = now;
+
+	(void) sprintf(info, "(%d/%d)", current, total);
+
+	if (current == total)
+		finish_progress(info);
+	else
+		update_progress(info);
+}
+
+static void
+append_options(char *mntopts, char *newopts)
+{
+	int len = strlen(mntopts);
+
+	/* original length plus new string to append plus 1 for the comma */
+	if (len + 1 + strlen(newopts) >= MNT_LINE_MAX) {
+		(void) fprintf(stderr, gettext("the opts argument for "
+		    "'%c' option is too long (more than %d chars)\n"),
+		    "-o", MNT_LINE_MAX);
+		usage(B_FALSE);
+	}
+
+	if (*mntopts)
+		mntopts[len++] = ',';
+
+	(void) strcpy(&mntopts[len], newopts);
+}
+
+static int
+share_mount(int op, int argc, char **argv)
+{
+	int do_all = 0;
+	boolean_t verbose = B_FALSE;
+	int c, ret = 0;
+	char *options = NULL;
+	int flags = 0;
+
+	/* check options */
+	while ((c = getopt(argc, argv, op == OP_MOUNT ? ":avo:O" : "a"))
+	    != -1) {
+		switch (c) {
+		case 'a':
+			do_all = 1;
+			break;
+		case 'v':
+			verbose = B_TRUE;
+			break;
+		case 'o':
+			if (*optarg == '\0') {
+				(void) fprintf(stderr, gettext("empty mount "
+				    "options (-o) specified\n"));
+				usage(B_FALSE);
+			}
+
+			if (options == NULL)
+				options = safe_malloc(MNT_LINE_MAX + 1);
+
+			/* option validation is done later */
+			append_options(options, optarg);
+			break;
+
+		case 'O':
+			warnx("no overlay mounts support on FreeBSD, ignoring");
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing argument for "
+			    "'%c' option\n"), optopt);
+			usage(B_FALSE);
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check number of arguments */
+	if (do_all) {
+		zfs_handle_t **dslist = NULL;
+		size_t i, count = 0;
+		char *protocol = NULL;
+
+		if (op == OP_SHARE && argc > 0) {
+			if (strcmp(argv[0], "nfs") != 0 &&
+			    strcmp(argv[0], "smb") != 0) {
+				(void) fprintf(stderr, gettext("share type "
+				    "must be 'nfs' or 'smb'\n"));
+				usage(B_FALSE);
+			}
+			protocol = argv[0];
+			argc--;
+			argv++;
+		}
+
+		if (argc != 0) {
+			(void) fprintf(stderr, gettext("too many arguments\n"));
+			usage(B_FALSE);
+		}
+
+		start_progress_timer();
+		get_all_datasets(&dslist, &count, verbose);
+
+		if (count == 0)
+			return (0);
+
+		qsort(dslist, count, sizeof (void *), libzfs_dataset_cmp);
+
+		for (i = 0; i < count; i++) {
+			if (verbose)
+				report_mount_progress(i, count);
+
+			if (share_mount_one(dslist[i], op, flags, protocol,
+			    B_FALSE, options) != 0)
+				ret = 1;
+			zfs_close(dslist[i]);
+		}
+
+		free(dslist);
+	} else if (argc == 0) {
+		struct mnttab entry;
+
+		if ((op == OP_SHARE) || (options != NULL)) {
+			(void) fprintf(stderr, gettext("missing filesystem "
+			    "argument (specify -a for all)\n"));
+			usage(B_FALSE);
+		}
+
+		/*
+		 * When mount is given no arguments, go through /etc/mnttab and
+		 * display any active ZFS mounts.  We hide any snapshots, since
+		 * they are controlled automatically.
+		 */
+		rewind(mnttab_file);
+		while (getmntent(mnttab_file, &entry) == 0) {
+			if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0 ||
+			    strchr(entry.mnt_special, '@') != NULL)
+				continue;
+
+			(void) printf("%-30s  %s\n", entry.mnt_special,
+			    entry.mnt_mountp);
+		}
+
+	} else {
+		zfs_handle_t *zhp;
+
+		if (argc > 1) {
+			(void) fprintf(stderr,
+			    gettext("too many arguments\n"));
+			usage(B_FALSE);
+		}
+
+		if ((zhp = zfs_open(g_zfs, argv[0],
+		    ZFS_TYPE_FILESYSTEM)) == NULL) {
+			ret = 1;
+		} else {
+			ret = share_mount_one(zhp, op, flags, NULL, B_TRUE,
+			    options);
+			zfs_close(zhp);
+		}
+	}
+
+	return (ret);
+}
+
+/*
+ * zfs mount -a [nfs]
+ * zfs mount filesystem
+ *
+ * Mount all filesystems, or mount the given filesystem.
+ */
+static int
+zfs_do_mount(int argc, char **argv)
+{
+	return (share_mount(OP_MOUNT, argc, argv));
+}
+
+/*
+ * zfs share -a [nfs | smb]
+ * zfs share filesystem
+ *
+ * Share all filesystems, or share the given filesystem.
+ */
+static int
+zfs_do_share(int argc, char **argv)
+{
+	return (share_mount(OP_SHARE, argc, argv));
+}
+
+typedef struct unshare_unmount_node {
+	zfs_handle_t	*un_zhp;
+	char		*un_mountp;
+	uu_avl_node_t	un_avlnode;
+} unshare_unmount_node_t;
+
+/* ARGSUSED */
+static int
+unshare_unmount_compare(const void *larg, const void *rarg, void *unused)
+{
+	const unshare_unmount_node_t *l = larg;
+	const unshare_unmount_node_t *r = rarg;
+
+	return (strcmp(l->un_mountp, r->un_mountp));
+}
+
+/*
+ * Convenience routine used by zfs_do_umount() and manual_unmount().  Given an
+ * absolute path, find the entry /etc/mnttab, verify that its a ZFS filesystem,
+ * and unmount it appropriately.
+ */
+static int
+unshare_unmount_path(int op, char *path, int flags, boolean_t is_manual)
+{
+	zfs_handle_t *zhp;
+	int ret = 0;
+	struct stat64 statbuf;
+	struct extmnttab entry;
+	const char *cmdname = (op == OP_SHARE) ? "unshare" : "unmount";
+	ino_t path_inode;
+
+	/*
+	 * Search for the path in /etc/mnttab.  Rather than looking for the
+	 * specific path, which can be fooled by non-standard paths (i.e. ".."
+	 * or "//"), we stat() the path and search for the corresponding
+	 * (major,minor) device pair.
+	 */
+	if (stat64(path, &statbuf) != 0) {
+		(void) fprintf(stderr, gettext("cannot %s '%s': %s\n"),
+		    cmdname, path, strerror(errno));
+		return (1);
+	}
+	path_inode = statbuf.st_ino;
+
+	/*
+	 * Search for the given (major,minor) pair in the mount table.
+	 */
+#ifdef sun
+	rewind(mnttab_file);
+	while ((ret = getextmntent(mnttab_file, &entry, 0)) == 0) {
+		if (entry.mnt_major == major(statbuf.st_dev) &&
+		    entry.mnt_minor == minor(statbuf.st_dev))
+			break;
+	}
+#else
+	{
+		struct statfs sfs;
+
+		if (statfs(path, &sfs) != 0) {
+			(void) fprintf(stderr, "%s: %s\n", path,
+			    strerror(errno));
+			ret = -1;
+		}
+		statfs2mnttab(&sfs, &entry);
+	}
+#endif
+	if (ret != 0) {
+		if (op == OP_SHARE) {
+			(void) fprintf(stderr, gettext("cannot %s '%s': not "
+			    "currently mounted\n"), cmdname, path);
+			return (1);
+		}
+		(void) fprintf(stderr, gettext("warning: %s not in mnttab\n"),
+		    path);
+		if ((ret = umount2(path, flags)) != 0)
+			(void) fprintf(stderr, gettext("%s: %s\n"), path,
+			    strerror(errno));
+		return (ret != 0);
+	}
+
+	if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) {
+		(void) fprintf(stderr, gettext("cannot %s '%s': not a ZFS "
+		    "filesystem\n"), cmdname, path);
+		return (1);
+	}
+
+	if ((zhp = zfs_open(g_zfs, entry.mnt_special,
+	    ZFS_TYPE_FILESYSTEM)) == NULL)
+		return (1);
+
+	ret = 1;
+	if (stat64(entry.mnt_mountp, &statbuf) != 0) {
+		(void) fprintf(stderr, gettext("cannot %s '%s': %s\n"),
+		    cmdname, path, strerror(errno));
+		goto out;
+	} else if (statbuf.st_ino != path_inode) {
+		(void) fprintf(stderr, gettext("cannot "
+		    "%s '%s': not a mountpoint\n"), cmdname, path);
+		goto out;
+	}
+
+	if (op == OP_SHARE) {
+		char nfs_mnt_prop[ZFS_MAXPROPLEN];
+		char smbshare_prop[ZFS_MAXPROPLEN];
+
+		verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, nfs_mnt_prop,
+		    sizeof (nfs_mnt_prop), NULL, NULL, 0, B_FALSE) == 0);
+		verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshare_prop,
+		    sizeof (smbshare_prop), NULL, NULL, 0, B_FALSE) == 0);
+
+		if (strcmp(nfs_mnt_prop, "off") == 0 &&
+		    strcmp(smbshare_prop, "off") == 0) {
+			(void) fprintf(stderr, gettext("cannot unshare "
+			    "'%s': legacy share\n"), path);
+			(void) fprintf(stderr, gettext("use "
+			    "unshare(1M) to unshare this filesystem\n"));
+		} else if (!zfs_is_shared(zhp)) {
+			(void) fprintf(stderr, gettext("cannot unshare '%s': "
+			    "not currently shared\n"), path);
+		} else {
+			ret = zfs_unshareall_bypath(zhp, path);
+		}
+	} else {
+		char mtpt_prop[ZFS_MAXPROPLEN];
+
+		verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mtpt_prop,
+		    sizeof (mtpt_prop), NULL, NULL, 0, B_FALSE) == 0);
+
+		if (is_manual) {
+			ret = zfs_unmount(zhp, NULL, flags);
+		} else if (strcmp(mtpt_prop, "legacy") == 0) {
+			(void) fprintf(stderr, gettext("cannot unmount "
+			    "'%s': legacy mountpoint\n"),
+			    zfs_get_name(zhp));
+			(void) fprintf(stderr, gettext("use umount(1M) "
+			    "to unmount this filesystem\n"));
+		} else {
+			ret = zfs_unmountall(zhp, flags);
+		}
+	}
+
+out:
+	zfs_close(zhp);
+
+	return (ret != 0);
+}
+
+/*
+ * Generic callback for unsharing or unmounting a filesystem.
+ */
+static int
+unshare_unmount(int op, int argc, char **argv)
+{
+	int do_all = 0;
+	int flags = 0;
+	int ret = 0;
+	int c;
+	zfs_handle_t *zhp;
+	char nfs_mnt_prop[ZFS_MAXPROPLEN];
+	char sharesmb[ZFS_MAXPROPLEN];
+
+	/* check options */
+	while ((c = getopt(argc, argv, op == OP_SHARE ? "a" : "af")) != -1) {
+		switch (c) {
+		case 'a':
+			do_all = 1;
+			break;
+		case 'f':
+			flags = MS_FORCE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	if (do_all) {
+		/*
+		 * We could make use of zfs_for_each() to walk all datasets in
+		 * the system, but this would be very inefficient, especially
+		 * since we would have to linearly search /etc/mnttab for each
+		 * one.  Instead, do one pass through /etc/mnttab looking for
+		 * zfs entries and call zfs_unmount() for each one.
+		 *
+		 * Things get a little tricky if the administrator has created
+		 * mountpoints beneath other ZFS filesystems.  In this case, we
+		 * have to unmount the deepest filesystems first.  To accomplish
+		 * this, we place all the mountpoints in an AVL tree sorted by
+		 * the special type (dataset name), and walk the result in
+		 * reverse to make sure to get any snapshots first.
+		 */
+		struct mnttab entry;
+		uu_avl_pool_t *pool;
+		uu_avl_t *tree;
+		unshare_unmount_node_t *node;
+		uu_avl_index_t idx;
+		uu_avl_walk_t *walk;
+
+		if (argc != 0) {
+			(void) fprintf(stderr, gettext("too many arguments\n"));
+			usage(B_FALSE);
+		}
+
+		if (((pool = uu_avl_pool_create("unmount_pool",
+		    sizeof (unshare_unmount_node_t),
+		    offsetof(unshare_unmount_node_t, un_avlnode),
+		    unshare_unmount_compare, UU_DEFAULT)) == NULL) ||
+		    ((tree = uu_avl_create(pool, NULL, UU_DEFAULT)) == NULL))
+			nomem();
+
+		rewind(mnttab_file);
+		while (getmntent(mnttab_file, &entry) == 0) {
+
+			/* ignore non-ZFS entries */
+			if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
+				continue;
+
+			/* ignore snapshots */
+			if (strchr(entry.mnt_special, '@') != NULL)
+				continue;
+
+			if ((zhp = zfs_open(g_zfs, entry.mnt_special,
+			    ZFS_TYPE_FILESYSTEM)) == NULL) {
+				ret = 1;
+				continue;
+			}
+
+			switch (op) {
+			case OP_SHARE:
+				verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS,
+				    nfs_mnt_prop,
+				    sizeof (nfs_mnt_prop),
+				    NULL, NULL, 0, B_FALSE) == 0);
+				if (strcmp(nfs_mnt_prop, "off") != 0)
+					break;
+				verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB,
+				    nfs_mnt_prop,
+				    sizeof (nfs_mnt_prop),
+				    NULL, NULL, 0, B_FALSE) == 0);
+				if (strcmp(nfs_mnt_prop, "off") == 0)
+					continue;
+				break;
+			case OP_MOUNT:
+				/* Ignore legacy mounts */
+				verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT,
+				    nfs_mnt_prop,
+				    sizeof (nfs_mnt_prop),
+				    NULL, NULL, 0, B_FALSE) == 0);
+				if (strcmp(nfs_mnt_prop, "legacy") == 0)
+					continue;
+				/* Ignore canmount=noauto mounts */
+				if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) ==
+				    ZFS_CANMOUNT_NOAUTO)
+					continue;
+			default:
+				break;
+			}
+
+			node = safe_malloc(sizeof (unshare_unmount_node_t));
+			node->un_zhp = zhp;
+			node->un_mountp = safe_strdup(entry.mnt_mountp);
+
+			uu_avl_node_init(node, &node->un_avlnode, pool);
+
+			if (uu_avl_find(tree, node, NULL, &idx) == NULL) {
+				uu_avl_insert(tree, node, idx);
+			} else {
+				zfs_close(node->un_zhp);
+				free(node->un_mountp);
+				free(node);
+			}
+		}
+
+		/*
+		 * Walk the AVL tree in reverse, unmounting each filesystem and
+		 * removing it from the AVL tree in the process.
+		 */
+		if ((walk = uu_avl_walk_start(tree,
+		    UU_WALK_REVERSE | UU_WALK_ROBUST)) == NULL)
+			nomem();
+
+		while ((node = uu_avl_walk_next(walk)) != NULL) {
+			uu_avl_remove(tree, node);
+
+			switch (op) {
+			case OP_SHARE:
+				if (zfs_unshareall_bypath(node->un_zhp,
+				    node->un_mountp) != 0)
+					ret = 1;
+				break;
+
+			case OP_MOUNT:
+				if (zfs_unmount(node->un_zhp,
+				    node->un_mountp, flags) != 0)
+					ret = 1;
+				break;
+			}
+
+			zfs_close(node->un_zhp);
+			free(node->un_mountp);
+			free(node);
+		}
+
+		uu_avl_walk_end(walk);
+		uu_avl_destroy(tree);
+		uu_avl_pool_destroy(pool);
+
+	} else {
+		if (argc != 1) {
+			if (argc == 0)
+				(void) fprintf(stderr,
+				    gettext("missing filesystem argument\n"));
+			else
+				(void) fprintf(stderr,
+				    gettext("too many arguments\n"));
+			usage(B_FALSE);
+		}
+
+		/*
+		 * We have an argument, but it may be a full path or a ZFS
+		 * filesystem.  Pass full paths off to unmount_path() (shared by
+		 * manual_unmount), otherwise open the filesystem and pass to
+		 * zfs_unmount().
+		 */
+		if (argv[0][0] == '/')
+			return (unshare_unmount_path(op, argv[0],
+			    flags, B_FALSE));
+
+		if ((zhp = zfs_open(g_zfs, argv[0],
+		    ZFS_TYPE_FILESYSTEM)) == NULL)
+			return (1);
+
+		verify(zfs_prop_get(zhp, op == OP_SHARE ?
+		    ZFS_PROP_SHARENFS : ZFS_PROP_MOUNTPOINT,
+		    nfs_mnt_prop, sizeof (nfs_mnt_prop), NULL,
+		    NULL, 0, B_FALSE) == 0);
+
+		switch (op) {
+		case OP_SHARE:
+			verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS,
+			    nfs_mnt_prop,
+			    sizeof (nfs_mnt_prop),
+			    NULL, NULL, 0, B_FALSE) == 0);
+			verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB,
+			    sharesmb, sizeof (sharesmb), NULL, NULL,
+			    0, B_FALSE) == 0);
+
+			if (strcmp(nfs_mnt_prop, "off") == 0 &&
+			    strcmp(sharesmb, "off") == 0) {
+				(void) fprintf(stderr, gettext("cannot "
+				    "unshare '%s': legacy share\n"),
+				    zfs_get_name(zhp));
+				(void) fprintf(stderr, gettext("use "
+				    "unshare(1M) to unshare this "
+				    "filesystem\n"));
+				ret = 1;
+			} else if (!zfs_is_shared(zhp)) {
+				(void) fprintf(stderr, gettext("cannot "
+				    "unshare '%s': not currently "
+				    "shared\n"), zfs_get_name(zhp));
+				ret = 1;
+			} else if (zfs_unshareall(zhp) != 0) {
+				ret = 1;
+			}
+			break;
+
+		case OP_MOUNT:
+			if (strcmp(nfs_mnt_prop, "legacy") == 0) {
+				(void) fprintf(stderr, gettext("cannot "
+				    "unmount '%s': legacy "
+				    "mountpoint\n"), zfs_get_name(zhp));
+				(void) fprintf(stderr, gettext("use "
+				    "umount(1M) to unmount this "
+				    "filesystem\n"));
+				ret = 1;
+			} else if (!zfs_is_mounted(zhp, NULL)) {
+				(void) fprintf(stderr, gettext("cannot "
+				    "unmount '%s': not currently "
+				    "mounted\n"),
+				    zfs_get_name(zhp));
+				ret = 1;
+			} else if (zfs_unmountall(zhp, flags) != 0) {
+				ret = 1;
+			}
+			break;
+		}
+
+		zfs_close(zhp);
+	}
+
+	return (ret);
+}
+
+/*
+ * zfs unmount -a
+ * zfs unmount filesystem
+ *
+ * Unmount all filesystems, or a specific ZFS filesystem.
+ */
+static int
+zfs_do_unmount(int argc, char **argv)
+{
+	return (unshare_unmount(OP_MOUNT, argc, argv));
+}
+
+/*
+ * zfs unshare -a
+ * zfs unshare filesystem
+ *
+ * Unshare all filesystems, or a specific ZFS filesystem.
+ */
+static int
+zfs_do_unshare(int argc, char **argv)
+{
+	return (unshare_unmount(OP_SHARE, argc, argv));
+}
+
+/*
+ * Attach/detach the given dataset to/from the given jail
+ */
+/* ARGSUSED */
+static int
+do_jail(int argc, char **argv, int attach)
+{
+	zfs_handle_t *zhp;
+	int jailid, ret;
+
+	/* check number of arguments */
+	if (argc < 3) {
+		(void) fprintf(stderr, gettext("missing argument(s)\n"));
+		usage(B_FALSE);
+	}
+	if (argc > 3) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		usage(B_FALSE);
+	}
+
+	jailid = jail_getid(argv[1]);
+	if (jailid < 0) {
+		(void) fprintf(stderr, gettext("invalid jail id or name\n"));
+		usage(B_FALSE);
+	}
+
+	zhp = zfs_open(g_zfs, argv[2], ZFS_TYPE_FILESYSTEM);
+	if (zhp == NULL)
+		return (1);
+
+	ret = (zfs_jail(zhp, jailid, attach) != 0);
+
+	zfs_close(zhp);
+	return (ret);
+}
+
+/*
+ * zfs jail jailid filesystem
+ *
+ * Attach the given dataset to the given jail
+ */
+/* ARGSUSED */
+static int
+zfs_do_jail(int argc, char **argv)
+{
+
+	return (do_jail(argc, argv, 1));
+}
+
+/*
+ * zfs unjail jailid filesystem
+ *
+ * Detach the given dataset from the given jail
+ */
+/* ARGSUSED */
+static int
+zfs_do_unjail(int argc, char **argv)
+{
+
+	return (do_jail(argc, argv, 0));
+}
+
+/*
+ * Called when invoked as /etc/fs/zfs/mount.  Do the mount if the mountpoint is
+ * 'legacy'.  Otherwise, complain that use should be using 'zfs mount'.
+ */
+static int
+manual_mount(int argc, char **argv)
+{
+	zfs_handle_t *zhp;
+	char mountpoint[ZFS_MAXPROPLEN];
+	char mntopts[MNT_LINE_MAX] = { '\0' };
+	int ret = 0;
+	int c;
+	int flags = 0;
+	char *dataset, *path;
+
+	/* check options */
+	while ((c = getopt(argc, argv, ":mo:O")) != -1) {
+		switch (c) {
+		case 'o':
+			(void) strlcpy(mntopts, optarg, sizeof (mntopts));
+			break;
+		case 'O':
+			flags |= MS_OVERLAY;
+			break;
+		case 'm':
+			flags |= MS_NOMNTTAB;
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing argument for "
+			    "'%c' option\n"), optopt);
+			usage(B_FALSE);
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			(void) fprintf(stderr, gettext("usage: mount [-o opts] "
+			    "<path>\n"));
+			return (2);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check that we only have two arguments */
+	if (argc != 2) {
+		if (argc == 0)
+			(void) fprintf(stderr, gettext("missing dataset "
+			    "argument\n"));
+		else if (argc == 1)
+			(void) fprintf(stderr,
+			    gettext("missing mountpoint argument\n"));
+		else
+			(void) fprintf(stderr, gettext("too many arguments\n"));
+		(void) fprintf(stderr, "usage: mount <dataset> <mountpoint>\n");
+		return (2);
+	}
+
+	dataset = argv[0];
+	path = argv[1];
+
+	/* try to open the dataset */
+	if ((zhp = zfs_open(g_zfs, dataset, ZFS_TYPE_FILESYSTEM)) == NULL)
+		return (1);
+
+	(void) zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
+	    sizeof (mountpoint), NULL, NULL, 0, B_FALSE);
+
+	/* check for legacy mountpoint and complain appropriately */
+	ret = 0;
+	if (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) == 0) {
+		if (zmount(dataset, path, flags, MNTTYPE_ZFS,
+		    NULL, 0, mntopts, sizeof (mntopts)) != 0) {
+			(void) fprintf(stderr, gettext("mount failed: %s\n"),
+			    strerror(errno));
+			ret = 1;
+		}
+	} else {
+		(void) fprintf(stderr, gettext("filesystem '%s' cannot be "
+		    "mounted using 'mount -F zfs'\n"), dataset);
+		(void) fprintf(stderr, gettext("Use 'zfs set mountpoint=%s' "
+		    "instead.\n"), path);
+		(void) fprintf(stderr, gettext("If you must use 'mount -F zfs' "
+		    "or /etc/vfstab, use 'zfs set mountpoint=legacy'.\n"));
+		(void) fprintf(stderr, gettext("See zfs(1M) for more "
+		    "information.\n"));
+		ret = 1;
+	}
+
+	return (ret);
+}
+
+/*
+ * Called when invoked as /etc/fs/zfs/umount.  Unlike a manual mount, we allow
+ * unmounts of non-legacy filesystems, as this is the dominant administrative
+ * interface.
+ */
+static int
+manual_unmount(int argc, char **argv)
+{
+	int flags = 0;
+	int c;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "f")) != -1) {
+		switch (c) {
+		case 'f':
+			flags = MS_FORCE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			(void) fprintf(stderr, gettext("usage: unmount [-f] "
+			    "<path>\n"));
+			return (2);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check arguments */
+	if (argc != 1) {
+		if (argc == 0)
+			(void) fprintf(stderr, gettext("missing path "
+			    "argument\n"));
+		else
+			(void) fprintf(stderr, gettext("too many arguments\n"));
+		(void) fprintf(stderr, gettext("usage: unmount [-f] <path>\n"));
+		return (2);
+	}
+
+	return (unshare_unmount_path(OP_MOUNT, argv[0], flags, B_TRUE));
+}
+
+static int
+find_command_idx(char *command, int *idx)
+{
+	int i;
+
+	for (i = 0; i < NCOMMAND; i++) {
+		if (command_table[i].name == NULL)
+			continue;
+
+		if (strcmp(command, command_table[i].name) == 0) {
+			*idx = i;
+			return (0);
+		}
+	}
+	return (1);
+}
+
+static int
+zfs_do_diff(int argc, char **argv)
+{
+	zfs_handle_t *zhp;
+	int flags = 0;
+	char *tosnap = NULL;
+	char *fromsnap = NULL;
+	char *atp, *copy;
+	int err = 0;
+	int c;
+
+	while ((c = getopt(argc, argv, "FHt")) != -1) {
+		switch (c) {
+		case 'F':
+			flags |= ZFS_DIFF_CLASSIFY;
+			break;
+		case 'H':
+			flags |= ZFS_DIFF_PARSEABLE;
+			break;
+		case 't':
+			flags |= ZFS_DIFF_TIMESTAMP;
+			break;
+		default:
+			(void) fprintf(stderr,
+			    gettext("invalid option '%c'\n"), optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	if (argc < 1) {
+		(void) fprintf(stderr,
+		gettext("must provide at least one snapshot name\n"));
+		usage(B_FALSE);
+	}
+
+	if (argc > 2) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		usage(B_FALSE);
+	}
+
+	fromsnap = argv[0];
+	tosnap = (argc == 2) ? argv[1] : NULL;
+
+	copy = NULL;
+	if (*fromsnap != '@')
+		copy = strdup(fromsnap);
+	else if (tosnap)
+		copy = strdup(tosnap);
+	if (copy == NULL)
+		usage(B_FALSE);
+
+	if (atp = strchr(copy, '@'))
+		*atp = '\0';
+
+	if ((zhp = zfs_open(g_zfs, copy, ZFS_TYPE_FILESYSTEM)) == NULL)
+		return (1);
+
+	free(copy);
+
+	/*
+	 * Ignore SIGPIPE so that the library can give us
+	 * information on any failure
+	 */
+	(void) sigignore(SIGPIPE);
+
+	err = zfs_show_diffs(zhp, STDOUT_FILENO, fromsnap, tosnap, flags);
+
+	zfs_close(zhp);
+
+	return (err != 0);
+}
+
+int
+main(int argc, char **argv)
+{
+	int ret = 0;
+	int i;
+	char *progname;
+	char *cmdname;
+
+	(void) setlocale(LC_ALL, "");
+	(void) textdomain(TEXT_DOMAIN);
+
+	opterr = 0;
+
+	if ((g_zfs = libzfs_init()) == NULL) {
+		(void) fprintf(stderr, gettext("internal error: failed to "
+		    "initialize ZFS library\n"));
+		return (1);
+	}
+
+	zpool_set_history_str("zfs", argc, argv, history_str);
+	verify(zpool_stage_history(g_zfs, history_str) == 0);
+
+	libzfs_print_on_error(g_zfs, B_TRUE);
+
+	if ((mnttab_file = fopen(MNTTAB, "r")) == NULL) {
+		(void) fprintf(stderr, gettext("internal error: unable to "
+		    "open %s\n"), MNTTAB);
+		return (1);
+	}
+
+	/*
+	 * This command also doubles as the /etc/fs mount and unmount program.
+	 * Determine if we should take this behavior based on argv[0].
+	 */
+	progname = basename(argv[0]);
+	if (strcmp(progname, "mount") == 0) {
+		ret = manual_mount(argc, argv);
+	} else if (strcmp(progname, "umount") == 0) {
+		ret = manual_unmount(argc, argv);
+	} else {
+		/*
+		 * Make sure the user has specified some command.
+		 */
+		if (argc < 2) {
+			(void) fprintf(stderr, gettext("missing command\n"));
+			usage(B_FALSE);
+		}
+
+		cmdname = argv[1];
+
+		/*
+		 * The 'umount' command is an alias for 'unmount'
+		 */
+		if (strcmp(cmdname, "umount") == 0)
+			cmdname = "unmount";
+
+		/*
+		 * The 'recv' command is an alias for 'receive'
+		 */
+		if (strcmp(cmdname, "recv") == 0)
+			cmdname = "receive";
+
+		/*
+		 * Special case '-?'
+		 */
+		if (strcmp(cmdname, "-?") == 0)
+			usage(B_TRUE);
+
+		/*
+		 * Run the appropriate command.
+		 */
+		libzfs_mnttab_cache(g_zfs, B_TRUE);
+		if (find_command_idx(cmdname, &i) == 0) {
+			current_command = &command_table[i];
+			ret = command_table[i].func(argc - 1, argv + 1);
+		} else if (strchr(cmdname, '=') != NULL) {
+			verify(find_command_idx("set", &i) == 0);
+			current_command = &command_table[i];
+			ret = command_table[i].func(argc, argv);
+		} else {
+			(void) fprintf(stderr, gettext("unrecognized "
+			    "command '%s'\n"), cmdname);
+			usage(B_FALSE);
+		}
+		libzfs_mnttab_cache(g_zfs, B_FALSE);
+	}
+
+	(void) fclose(mnttab_file);
+
+	libzfs_fini(g_zfs);
+
+	/*
+	 * The 'ZFS_ABORT' environment variable causes us to dump core on exit
+	 * for the purposes of running ::findleaks.
+	 */
+	if (getenv("ZFS_ABORT") != NULL) {
+		(void) printf("dumping core by request\n");
+		abort();
+	}
+
+	return (ret);
+}
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs_util.h b/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs_util.h
new file mode 100644
index 0000000000000000000000000000000000000000..3ddff9e22d7db7014b99f3150d3408c63d1409b2
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zfs/zfs_util.h
@@ -0,0 +1,42 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef	_ZFS_UTIL_H
+#define	_ZFS_UTIL_H
+
+#include <libzfs.h>
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+void * safe_malloc(size_t size);
+void nomem(void);
+libzfs_handle_t *g_zfs;
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _ZFS_UTIL_H */
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zhack/zhack.c b/bsd/cddl/contrib/opensolaris/cmd/zhack/zhack.c
new file mode 100644
index 0000000000000000000000000000000000000000..2618cea32b414f2df1c19b0eb6d665db5d3ae9ae
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zhack/zhack.c
@@ -0,0 +1,533 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ */
+
+/*
+ * zhack is a debugging tool that can write changes to ZFS pool using libzpool
+ * for testing purposes. Altering pools with zhack is unsupported and may
+ * result in corrupted pools.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <sys/zfs_context.h>
+#include <sys/spa.h>
+#include <sys/spa_impl.h>
+#include <sys/dmu.h>
+#include <sys/zap.h>
+#include <sys/zfs_znode.h>
+#include <sys/dsl_synctask.h>
+#include <sys/vdev.h>
+#include <sys/fs/zfs.h>
+#include <sys/dmu_objset.h>
+#include <sys/dsl_pool.h>
+#include <sys/zio_checksum.h>
+#include <sys/zio_compress.h>
+#include <sys/zfeature.h>
+#undef ZFS_MAXNAMELEN
+#undef verify
+#include <libzfs.h>
+
+extern boolean_t zfeature_checks_disable;
+
+const char cmdname[] = "zhack";
+libzfs_handle_t *g_zfs;
+static importargs_t g_importargs;
+static char *g_pool;
+static boolean_t g_readonly;
+
+static void
+usage(void)
+{
+	(void) fprintf(stderr,
+	    "Usage: %s [-c cachefile] [-d dir] <subcommand> <args> ...\n"
+	    "where <subcommand> <args> is one of the following:\n"
+	    "\n", cmdname);
+
+	(void) fprintf(stderr,
+	    "    feature stat <pool>\n"
+	    "        print information about enabled features\n"
+	    "    feature enable [-d desc] <pool> <feature>\n"
+	    "        add a new enabled feature to the pool\n"
+	    "        -d <desc> sets the feature's description\n"
+	    "    feature ref [-md] <pool> <feature>\n"
+	    "        change the refcount on the given feature\n"
+	    "        -d decrease instead of increase the refcount\n"
+	    "        -m add the feature to the label if increasing refcount\n"
+	    "\n"
+	    "    <feature> : should be a feature guid\n");
+	exit(1);
+}
+
+
+static void
+fatal(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	(void) fprintf(stderr, "%s: ", cmdname);
+	(void) vfprintf(stderr, fmt, ap);
+	va_end(ap);
+	(void) fprintf(stderr, "\n");
+
+	exit(1);
+}
+
+/* ARGSUSED */
+static int
+space_delta_cb(dmu_object_type_t bonustype, void *data,
+    uint64_t *userp, uint64_t *groupp)
+{
+	/*
+	 * Is it a valid type of object to track?
+	 */
+	if (bonustype != DMU_OT_ZNODE && bonustype != DMU_OT_SA)
+		return (ENOENT);
+	(void) fprintf(stderr, "modifying object that needs user accounting");
+	abort();
+	/* NOTREACHED */
+}
+
+/*
+ * Target is the dataset whose pool we want to open.
+ */
+static void
+import_pool(const char *target, boolean_t readonly)
+{
+	nvlist_t *config;
+	nvlist_t *pools;
+	int error;
+	char *sepp;
+	spa_t *spa;
+	nvpair_t *elem;
+	nvlist_t *props;
+	const char *name;
+
+	kernel_init(readonly ? FREAD : (FREAD | FWRITE));
+	g_zfs = libzfs_init();
+	ASSERT(g_zfs != NULL);
+
+	dmu_objset_register_type(DMU_OST_ZFS, space_delta_cb);
+
+	g_readonly = readonly;
+
+	/*
+	 * If we only want readonly access, it's OK if we find
+	 * a potentially-active (ie, imported into the kernel) pool from the
+	 * default cachefile.
+	 */
+	if (readonly && spa_open(target, &spa, FTAG) == 0) {
+		spa_close(spa, FTAG);
+		return;
+	}
+
+	g_importargs.unique = B_TRUE;
+	g_importargs.can_be_active = readonly;
+	g_pool = strdup(target);
+	if ((sepp = strpbrk(g_pool, "/@")) != NULL)
+		*sepp = '\0';
+	g_importargs.poolname = g_pool;
+	pools = zpool_search_import(g_zfs, &g_importargs);
+
+	if (pools == NULL || nvlist_next_nvpair(pools, NULL) == NULL) {
+		if (!g_importargs.can_be_active) {
+			g_importargs.can_be_active = B_TRUE;
+			if (zpool_search_import(g_zfs, &g_importargs) != NULL ||
+			    spa_open(target, &spa, FTAG) == 0) {
+				fatal("cannot import '%s': pool is active; run "
+				    "\"zpool export %s\" first\n",
+				    g_pool, g_pool);
+			}
+		}
+
+		fatal("cannot import '%s': no such pool available\n", g_pool);
+	}
+
+	elem = nvlist_next_nvpair(pools, NULL);
+	name = nvpair_name(elem);
+	verify(nvpair_value_nvlist(elem, &config) == 0);
+
+	props = NULL;
+	if (readonly) {
+		verify(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
+		verify(nvlist_add_uint64(props,
+		    zpool_prop_to_name(ZPOOL_PROP_READONLY), 1) == 0);
+	}
+
+	zfeature_checks_disable = B_TRUE;
+	error = spa_import(name, config, props, ZFS_IMPORT_NORMAL);
+	zfeature_checks_disable = B_FALSE;
+	if (error == EEXIST)
+		error = 0;
+
+	if (error)
+		fatal("can't import '%s': %s", name, strerror(error));
+}
+
+static void
+zhack_spa_open(const char *target, boolean_t readonly, void *tag, spa_t **spa)
+{
+	int err;
+
+	import_pool(target, readonly);
+
+	zfeature_checks_disable = B_TRUE;
+	err = spa_open(target, spa, tag);
+	zfeature_checks_disable = B_FALSE;
+
+	if (err != 0)
+		fatal("cannot open '%s': %s", target, strerror(err));
+	if (spa_version(*spa) < SPA_VERSION_FEATURES) {
+		fatal("'%s' has version %d, features not enabled", target,
+		    (int)spa_version(*spa));
+	}
+}
+
+static void
+dump_obj(objset_t *os, uint64_t obj, const char *name)
+{
+	zap_cursor_t zc;
+	zap_attribute_t za;
+
+	(void) printf("%s_obj:\n", name);
+
+	for (zap_cursor_init(&zc, os, obj);
+	    zap_cursor_retrieve(&zc, &za) == 0;
+	    zap_cursor_advance(&zc)) {
+		if (za.za_integer_length == 8) {
+			ASSERT(za.za_num_integers == 1);
+			(void) printf("\t%s = %llu\n",
+			    za.za_name, (u_longlong_t)za.za_first_integer);
+		} else {
+			ASSERT(za.za_integer_length == 1);
+			char val[1024];
+			VERIFY(zap_lookup(os, obj, za.za_name,
+			    1, sizeof (val), val) == 0);
+			(void) printf("\t%s = %s\n", za.za_name, val);
+		}
+	}
+	zap_cursor_fini(&zc);
+}
+
+static void
+dump_mos(spa_t *spa)
+{
+	nvlist_t *nv = spa->spa_label_features;
+
+	(void) printf("label config:\n");
+	for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
+	    pair != NULL;
+	    pair = nvlist_next_nvpair(nv, pair)) {
+		(void) printf("\t%s\n", nvpair_name(pair));
+	}
+}
+
+static void
+zhack_do_feature_stat(int argc, char **argv)
+{
+	spa_t *spa;
+	objset_t *os;
+	char *target;
+
+	argc--;
+	argv++;
+
+	if (argc < 1) {
+		(void) fprintf(stderr, "error: missing pool name\n");
+		usage();
+	}
+	target = argv[0];
+
+	zhack_spa_open(target, B_TRUE, FTAG, &spa);
+	os = spa->spa_meta_objset;
+
+	dump_obj(os, spa->spa_feat_for_read_obj, "for_read");
+	dump_obj(os, spa->spa_feat_for_write_obj, "for_write");
+	dump_obj(os, spa->spa_feat_desc_obj, "descriptions");
+	dump_mos(spa);
+
+	spa_close(spa, FTAG);
+}
+
+static void
+feature_enable_sync(void *arg1, void *arg2, dmu_tx_t *tx)
+{
+	spa_t *spa = arg1;
+	zfeature_info_t *feature = arg2;
+
+	spa_feature_enable(spa, feature, tx);
+}
+
+static void
+zhack_do_feature_enable(int argc, char **argv)
+{
+	char c;
+	char *desc, *target;
+	spa_t *spa;
+	objset_t *mos;
+	zfeature_info_t feature;
+	zfeature_info_t *nodeps[] = { NULL };
+
+	/*
+	 * Features are not added to the pool's label until their refcounts
+	 * are incremented, so fi_mos can just be left as false for now.
+	 */
+	desc = NULL;
+	feature.fi_uname = "zhack";
+	feature.fi_mos = B_FALSE;
+	feature.fi_can_readonly = B_FALSE;
+	feature.fi_depends = nodeps;
+
+	optind = 1;
+	while ((c = getopt(argc, argv, "rmd:")) != -1) {
+		switch (c) {
+		case 'r':
+			feature.fi_can_readonly = B_TRUE;
+			break;
+		case 'd':
+			desc = strdup(optarg);
+			break;
+		default:
+			usage();
+			break;
+		}
+	}
+
+	if (desc == NULL)
+		desc = strdup("zhack injected");
+	feature.fi_desc = desc;
+
+	argc -= optind;
+	argv += optind;
+
+	if (argc < 2) {
+		(void) fprintf(stderr, "error: missing feature or pool name\n");
+		usage();
+	}
+	target = argv[0];
+	feature.fi_guid = argv[1];
+
+	if (!zfeature_is_valid_guid(feature.fi_guid))
+		fatal("invalid feature guid: %s", feature.fi_guid);
+
+	zhack_spa_open(target, B_FALSE, FTAG, &spa);
+	mos = spa->spa_meta_objset;
+
+	if (0 == zfeature_lookup_guid(feature.fi_guid, NULL))
+		fatal("'%s' is a real feature, will not enable");
+	if (0 == zap_contains(mos, spa->spa_feat_desc_obj, feature.fi_guid))
+		fatal("feature already enabled: %s", feature.fi_guid);
+
+	VERIFY3U(0, ==, dsl_sync_task_do(spa->spa_dsl_pool, NULL,
+	    feature_enable_sync, spa, &feature, 5));
+
+	spa_close(spa, FTAG);
+
+	free(desc);
+}
+
+static void
+feature_incr_sync(void *arg1, void *arg2, dmu_tx_t *tx)
+{
+	spa_t *spa = arg1;
+	zfeature_info_t *feature = arg2;
+
+	spa_feature_incr(spa, feature, tx);
+}
+
+static void
+feature_decr_sync(void *arg1, void *arg2, dmu_tx_t *tx)
+{
+	spa_t *spa = arg1;
+	zfeature_info_t *feature = arg2;
+
+	spa_feature_decr(spa, feature, tx);
+}
+
+static void
+zhack_do_feature_ref(int argc, char **argv)
+{
+	char c;
+	char *target;
+	boolean_t decr = B_FALSE;
+	spa_t *spa;
+	objset_t *mos;
+	zfeature_info_t feature;
+	zfeature_info_t *nodeps[] = { NULL };
+
+	/*
+	 * fi_desc does not matter here because it was written to disk
+	 * when the feature was enabled, but we need to properly set the
+	 * feature for read or write based on the information we read off
+	 * disk later.
+	 */
+	feature.fi_uname = "zhack";
+	feature.fi_mos = B_FALSE;
+	feature.fi_desc = NULL;
+	feature.fi_depends = nodeps;
+
+	optind = 1;
+	while ((c = getopt(argc, argv, "md")) != -1) {
+		switch (c) {
+		case 'm':
+			feature.fi_mos = B_TRUE;
+			break;
+		case 'd':
+			decr = B_TRUE;
+			break;
+		default:
+			usage();
+			break;
+		}
+	}
+	argc -= optind;
+	argv += optind;
+
+	if (argc < 2) {
+		(void) fprintf(stderr, "error: missing feature or pool name\n");
+		usage();
+	}
+	target = argv[0];
+	feature.fi_guid = argv[1];
+
+	if (!zfeature_is_valid_guid(feature.fi_guid))
+		fatal("invalid feature guid: %s", feature.fi_guid);
+
+	zhack_spa_open(target, B_FALSE, FTAG, &spa);
+	mos = spa->spa_meta_objset;
+
+	if (0 == zfeature_lookup_guid(feature.fi_guid, NULL))
+		fatal("'%s' is a real feature, will not change refcount");
+
+	if (0 == zap_contains(mos, spa->spa_feat_for_read_obj,
+	    feature.fi_guid)) {
+		feature.fi_can_readonly = B_FALSE;
+	} else if (0 == zap_contains(mos, spa->spa_feat_for_write_obj,
+	    feature.fi_guid)) {
+		feature.fi_can_readonly = B_TRUE;
+	} else {
+		fatal("feature is not enabled: %s", feature.fi_guid);
+	}
+
+	if (decr && !spa_feature_is_active(spa, &feature))
+		fatal("feature refcount already 0: %s", feature.fi_guid);
+
+	VERIFY3U(0, ==, dsl_sync_task_do(spa->spa_dsl_pool, NULL,
+	    decr ? feature_decr_sync : feature_incr_sync, spa, &feature, 5));
+
+	spa_close(spa, FTAG);
+}
+
+static int
+zhack_do_feature(int argc, char **argv)
+{
+	char *subcommand;
+
+	argc--;
+	argv++;
+	if (argc == 0) {
+		(void) fprintf(stderr,
+		    "error: no feature operation specified\n");
+		usage();
+	}
+
+	subcommand = argv[0];
+	if (strcmp(subcommand, "stat") == 0) {
+		zhack_do_feature_stat(argc, argv);
+	} else if (strcmp(subcommand, "enable") == 0) {
+		zhack_do_feature_enable(argc, argv);
+	} else if (strcmp(subcommand, "ref") == 0) {
+		zhack_do_feature_ref(argc, argv);
+	} else {
+		(void) fprintf(stderr, "error: unknown subcommand: %s\n",
+		    subcommand);
+		usage();
+	}
+
+	return (0);
+}
+
+#define	MAX_NUM_PATHS 1024
+
+int
+main(int argc, char **argv)
+{
+	extern void zfs_prop_init(void);
+
+	char *path[MAX_NUM_PATHS];
+	const char *subcommand;
+	int rv = 0;
+	char c;
+
+	g_importargs.path = path;
+
+	dprintf_setup(&argc, argv);
+	zfs_prop_init();
+
+	while ((c = getopt(argc, argv, "c:d:")) != -1) {
+		switch (c) {
+		case 'c':
+			g_importargs.cachefile = optarg;
+			break;
+		case 'd':
+			assert(g_importargs.paths < MAX_NUM_PATHS);
+			g_importargs.path[g_importargs.paths++] = optarg;
+			break;
+		default:
+			usage();
+			break;
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+	optind = 1;
+
+	if (argc == 0) {
+		(void) fprintf(stderr, "error: no command specified\n");
+		usage();
+	}
+
+	subcommand = argv[0];
+
+	if (strcmp(subcommand, "feature") == 0) {
+		rv = zhack_do_feature(argc, argv);
+	} else {
+		(void) fprintf(stderr, "error: unknown subcommand: %s\n",
+		    subcommand);
+		usage();
+	}
+
+	if (!g_readonly && spa_export(g_pool, NULL, B_TRUE, B_TRUE) != 0) {
+		fatal("pool export failed; "
+		    "changes may not be committed to disk\n");
+	}
+
+	libzfs_fini(g_zfs);
+	kernel_fini();
+
+	return (rv);
+}
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zinject/translate.c b/bsd/cddl/contrib/opensolaris/cmd/zinject/translate.c
new file mode 100644
index 0000000000000000000000000000000000000000..442f220c442ae565b7dfc848a3219ca3e319d631
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zinject/translate.c
@@ -0,0 +1,477 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <libzfs.h>
+
+#include <sys/zfs_context.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <sys/file.h>
+#include <sys/mntent.h>
+#include <sys/mnttab.h>
+#include <sys/param.h>
+#include <sys/stat.h>
+
+#include <sys/dmu.h>
+#include <sys/dmu_objset.h>
+#include <sys/dnode.h>
+#include <sys/vdev_impl.h>
+
+#include "zinject.h"
+
+extern void kernel_init(int);
+extern void kernel_fini(void);
+
+static int debug;
+
+static void
+ziprintf(const char *fmt, ...)
+{
+	va_list ap;
+
+	if (!debug)
+		return;
+
+	va_start(ap, fmt);
+	(void) vprintf(fmt, ap);
+	va_end(ap);
+}
+
+static void
+compress_slashes(const char *src, char *dest)
+{
+	while (*src != '\0') {
+		*dest = *src++;
+		while (*dest == '/' && *src == '/')
+			++src;
+		++dest;
+	}
+	*dest = '\0';
+}
+
+/*
+ * Given a full path to a file, translate into a dataset name and a relative
+ * path within the dataset.  'dataset' must be at least MAXNAMELEN characters,
+ * and 'relpath' must be at least MAXPATHLEN characters.  We also pass a stat64
+ * buffer, which we need later to get the object ID.
+ */
+static int
+parse_pathname(const char *inpath, char *dataset, char *relpath,
+    struct stat64 *statbuf)
+{
+	struct statfs sfs;
+	const char *rel;
+	char fullpath[MAXPATHLEN];
+
+	compress_slashes(inpath, fullpath);
+
+	if (fullpath[0] != '/') {
+		(void) fprintf(stderr, "invalid object '%s': must be full "
+		    "path\n", fullpath);
+		usage();
+		return (-1);
+	}
+
+	if (strlen(fullpath) >= MAXPATHLEN) {
+		(void) fprintf(stderr, "invalid object; pathname too long\n");
+		return (-1);
+	}
+
+	if (stat64(fullpath, statbuf) != 0) {
+		(void) fprintf(stderr, "cannot open '%s': %s\n",
+		    fullpath, strerror(errno));
+		return (-1);
+	}
+
+	if (statfs(fullpath, &sfs) == -1) {
+		(void) fprintf(stderr, "cannot find mountpoint for '%s': %s\n",
+		    fullpath, strerror(errno));
+		return (-1);
+	}
+
+	if (strcmp(sfs.f_fstypename, MNTTYPE_ZFS) != 0) {
+		(void) fprintf(stderr, "invalid path '%s': not a ZFS "
+		    "filesystem\n", fullpath);
+		return (-1);
+	}
+
+	if (strncmp(fullpath, sfs.f_mntonname, strlen(sfs.f_mntonname)) != 0) {
+		(void) fprintf(stderr, "invalid path '%s': mountpoint "
+		    "doesn't match path\n", fullpath);
+		return (-1);
+	}
+
+	(void) strcpy(dataset, sfs.f_mntfromname);
+
+	rel = fullpath + strlen(sfs.f_mntonname);
+	if (rel[0] == '/')
+		rel++;
+	(void) strcpy(relpath, rel);
+
+	return (0);
+}
+
+/*
+ * Convert from a (dataset, path) pair into a (objset, object) pair.  Note that
+ * we grab the object number from the inode number, since looking this up via
+ * libzpool is a real pain.
+ */
+/* ARGSUSED */
+static int
+object_from_path(const char *dataset, const char *path, struct stat64 *statbuf,
+    zinject_record_t *record)
+{
+	objset_t *os;
+	int err;
+
+	/*
+	 * Before doing any libzpool operations, call sync() to ensure that the
+	 * on-disk state is consistent with the in-core state.
+	 */
+	sync();
+
+	err = dmu_objset_own(dataset, DMU_OST_ZFS, B_TRUE, FTAG, &os);
+	if (err != 0) {
+		(void) fprintf(stderr, "cannot open dataset '%s': %s\n",
+		    dataset, strerror(err));
+		return (-1);
+	}
+
+	record->zi_objset = dmu_objset_id(os);
+	record->zi_object = statbuf->st_ino;
+
+	dmu_objset_disown(os, FTAG);
+
+	return (0);
+}
+
+/*
+ * Calculate the real range based on the type, level, and range given.
+ */
+static int
+calculate_range(const char *dataset, err_type_t type, int level, char *range,
+    zinject_record_t *record)
+{
+	objset_t *os = NULL;
+	dnode_t *dn = NULL;
+	int err;
+	int ret = -1;
+
+	/*
+	 * Determine the numeric range from the string.
+	 */
+	if (range == NULL) {
+		/*
+		 * If range is unspecified, set the range to [0,-1], which
+		 * indicates that the whole object should be treated as an
+		 * error.
+		 */
+		record->zi_start = 0;
+		record->zi_end = -1ULL;
+	} else {
+		char *end;
+
+		/* XXX add support for suffixes */
+		record->zi_start = strtoull(range, &end, 10);
+
+
+		if (*end == '\0')
+			record->zi_end = record->zi_start + 1;
+		else if (*end == ',')
+			record->zi_end = strtoull(end + 1, &end, 10);
+
+		if (*end != '\0') {
+			(void) fprintf(stderr, "invalid range '%s': must be "
+			    "a numeric range of the form 'start[,end]'\n",
+			    range);
+			goto out;
+		}
+	}
+
+	switch (type) {
+	case TYPE_DATA:
+		break;
+
+	case TYPE_DNODE:
+		/*
+		 * If this is a request to inject faults into the dnode, then we
+		 * must translate the current (objset,object) pair into an
+		 * offset within the metadnode for the objset.  Specifying any
+		 * kind of range with type 'dnode' is illegal.
+		 */
+		if (range != NULL) {
+			(void) fprintf(stderr, "range cannot be specified when "
+			    "type is 'dnode'\n");
+			goto out;
+		}
+
+		record->zi_start = record->zi_object * sizeof (dnode_phys_t);
+		record->zi_end = record->zi_start + sizeof (dnode_phys_t);
+		record->zi_object = 0;
+		break;
+	}
+
+	/*
+	 * Get the dnode associated with object, so we can calculate the block
+	 * size.
+	 */
+	if ((err = dmu_objset_own(dataset, DMU_OST_ANY,
+	    B_TRUE, FTAG, &os)) != 0) {
+		(void) fprintf(stderr, "cannot open dataset '%s': %s\n",
+		    dataset, strerror(err));
+		goto out;
+	}
+
+	if (record->zi_object == 0) {
+		dn = DMU_META_DNODE(os);
+	} else {
+		err = dnode_hold(os, record->zi_object, FTAG, &dn);
+		if (err != 0) {
+			(void) fprintf(stderr, "failed to hold dnode "
+			    "for object %llu\n",
+			    (u_longlong_t)record->zi_object);
+			goto out;
+		}
+	}
+
+
+	ziprintf("data shift: %d\n", (int)dn->dn_datablkshift);
+	ziprintf(" ind shift: %d\n", (int)dn->dn_indblkshift);
+
+	/*
+	 * Translate range into block IDs.
+	 */
+	if (record->zi_start != 0 || record->zi_end != -1ULL) {
+		record->zi_start >>= dn->dn_datablkshift;
+		record->zi_end >>= dn->dn_datablkshift;
+	}
+
+	/*
+	 * Check level, and then translate level 0 blkids into ranges
+	 * appropriate for level of indirection.
+	 */
+	record->zi_level = level;
+	if (level > 0) {
+		ziprintf("level 0 blkid range: [%llu, %llu]\n",
+		    record->zi_start, record->zi_end);
+
+		if (level >= dn->dn_nlevels) {
+			(void) fprintf(stderr, "level %d exceeds max level "
+			    "of object (%d)\n", level, dn->dn_nlevels - 1);
+			goto out;
+		}
+
+		if (record->zi_start != 0 || record->zi_end != 0) {
+			int shift = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
+
+			for (; level > 0; level--) {
+				record->zi_start >>= shift;
+				record->zi_end >>= shift;
+			}
+		}
+	}
+
+	ret = 0;
+out:
+	if (dn) {
+		if (dn != DMU_META_DNODE(os))
+			dnode_rele(dn, FTAG);
+	}
+	if (os)
+		dmu_objset_disown(os, FTAG);
+
+	return (ret);
+}
+
+int
+translate_record(err_type_t type, const char *object, const char *range,
+    int level, zinject_record_t *record, char *poolname, char *dataset)
+{
+	char path[MAXPATHLEN];
+	char *slash;
+	struct stat64 statbuf;
+	int ret = -1;
+
+	kernel_init(FREAD);
+
+	debug = (getenv("ZINJECT_DEBUG") != NULL);
+
+	ziprintf("translating: %s\n", object);
+
+	if (MOS_TYPE(type)) {
+		/*
+		 * MOS objects are treated specially.
+		 */
+		switch (type) {
+		case TYPE_MOS:
+			record->zi_type = 0;
+			break;
+		case TYPE_MOSDIR:
+			record->zi_type = DMU_OT_OBJECT_DIRECTORY;
+			break;
+		case TYPE_METASLAB:
+			record->zi_type = DMU_OT_OBJECT_ARRAY;
+			break;
+		case TYPE_CONFIG:
+			record->zi_type = DMU_OT_PACKED_NVLIST;
+			break;
+		case TYPE_BPOBJ:
+			record->zi_type = DMU_OT_BPOBJ;
+			break;
+		case TYPE_SPACEMAP:
+			record->zi_type = DMU_OT_SPACE_MAP;
+			break;
+		case TYPE_ERRLOG:
+			record->zi_type = DMU_OT_ERROR_LOG;
+			break;
+		}
+
+		dataset[0] = '\0';
+		(void) strcpy(poolname, object);
+		return (0);
+	}
+
+	/*
+	 * Convert a full path into a (dataset, file) pair.
+	 */
+	if (parse_pathname(object, dataset, path, &statbuf) != 0)
+		goto err;
+
+	ziprintf("   dataset: %s\n", dataset);
+	ziprintf("      path: %s\n", path);
+
+	/*
+	 * Convert (dataset, file) into (objset, object)
+	 */
+	if (object_from_path(dataset, path, &statbuf, record) != 0)
+		goto err;
+
+	ziprintf("raw objset: %llu\n", record->zi_objset);
+	ziprintf("raw object: %llu\n", record->zi_object);
+
+	/*
+	 * For the given object, calculate the real (type, level, range)
+	 */
+	if (calculate_range(dataset, type, level, (char *)range, record) != 0)
+		goto err;
+
+	ziprintf("    objset: %llu\n", record->zi_objset);
+	ziprintf("    object: %llu\n", record->zi_object);
+	if (record->zi_start == 0 &&
+	    record->zi_end == -1ULL)
+		ziprintf("     range: all\n");
+	else
+		ziprintf("     range: [%llu, %llu]\n", record->zi_start,
+		    record->zi_end);
+
+	/*
+	 * Copy the pool name
+	 */
+	(void) strcpy(poolname, dataset);
+	if ((slash = strchr(poolname, '/')) != NULL)
+		*slash = '\0';
+
+	ret = 0;
+
+err:
+	kernel_fini();
+	return (ret);
+}
+
+int
+translate_raw(const char *str, zinject_record_t *record)
+{
+	/*
+	 * A raw bookmark of the form objset:object:level:blkid, where each
+	 * number is a hexidecimal value.
+	 */
+	if (sscanf(str, "%llx:%llx:%x:%llx", (u_longlong_t *)&record->zi_objset,
+	    (u_longlong_t *)&record->zi_object, &record->zi_level,
+	    (u_longlong_t *)&record->zi_start) != 4) {
+		(void) fprintf(stderr, "bad raw spec '%s': must be of the form "
+		    "'objset:object:level:blkid'\n", str);
+		return (-1);
+	}
+
+	record->zi_end = record->zi_start;
+
+	return (0);
+}
+
+int
+translate_device(const char *pool, const char *device, err_type_t label_type,
+    zinject_record_t *record)
+{
+	char *end;
+	zpool_handle_t *zhp;
+	nvlist_t *tgt;
+	boolean_t isspare, iscache;
+
+	/*
+	 * Given a device name or GUID, create an appropriate injection record
+	 * with zi_guid set.
+	 */
+	if ((zhp = zpool_open(g_zfs, pool)) == NULL)
+		return (-1);
+
+	record->zi_guid = strtoull(device, &end, 16);
+	if (record->zi_guid == 0 || *end != '\0') {
+		tgt = zpool_find_vdev(zhp, device, &isspare, &iscache, NULL);
+
+		if (tgt == NULL) {
+			(void) fprintf(stderr, "cannot find device '%s' in "
+			    "pool '%s'\n", device, pool);
+			return (-1);
+		}
+
+		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
+		    &record->zi_guid) == 0);
+	}
+
+	switch (label_type) {
+	case TYPE_LABEL_UBERBLOCK:
+		record->zi_start = offsetof(vdev_label_t, vl_uberblock[0]);
+		record->zi_end = record->zi_start + VDEV_UBERBLOCK_RING - 1;
+		break;
+	case TYPE_LABEL_NVLIST:
+		record->zi_start = offsetof(vdev_label_t, vl_vdev_phys);
+		record->zi_end = record->zi_start + VDEV_PHYS_SIZE - 1;
+		break;
+	case TYPE_LABEL_PAD1:
+		record->zi_start = offsetof(vdev_label_t, vl_pad1);
+		record->zi_end = record->zi_start + VDEV_PAD_SIZE - 1;
+		break;
+	case TYPE_LABEL_PAD2:
+		record->zi_start = offsetof(vdev_label_t, vl_pad2);
+		record->zi_end = record->zi_start + VDEV_PAD_SIZE - 1;
+		break;
+	}
+	return (0);
+}
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zinject/zinject.c b/bsd/cddl/contrib/opensolaris/cmd/zinject/zinject.c
new file mode 100644
index 0000000000000000000000000000000000000000..d17ed534e329ace6c78ec27636ec65a8e6b2e10c
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zinject/zinject.c
@@ -0,0 +1,973 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * ZFS Fault Injector
+ *
+ * This userland component takes a set of options and uses libzpool to translate
+ * from a user-visible object type and name to an internal representation.
+ * There are two basic types of faults: device faults and data faults.
+ *
+ *
+ * DEVICE FAULTS
+ *
+ * Errors can be injected into a particular vdev using the '-d' option.  This
+ * option takes a path or vdev GUID to uniquely identify the device within a
+ * pool.  There are two types of errors that can be injected, EIO and ENXIO,
+ * that can be controlled through the '-e' option.  The default is ENXIO.  For
+ * EIO failures, any attempt to read data from the device will return EIO, but
+ * subsequent attempt to reopen the device will succeed.  For ENXIO failures,
+ * any attempt to read from the device will return EIO, but any attempt to
+ * reopen the device will also return ENXIO.
+ * For label faults, the -L option must be specified. This allows faults
+ * to be injected into either the nvlist, uberblock, pad1, or pad2 region
+ * of all the labels for the specified device.
+ *
+ * This form of the command looks like:
+ *
+ * 	zinject -d device [-e errno] [-L <uber | nvlist | pad1 | pad2>] pool
+ *
+ *
+ * DATA FAULTS
+ *
+ * We begin with a tuple of the form:
+ *
+ * 	<type,level,range,object>
+ *
+ * 	type	A string describing the type of data to target.  Each type
+ * 		implicitly describes how to interpret 'object'. Currently,
+ * 		the following values are supported:
+ *
+ * 		data		User data for a file
+ * 		dnode		Dnode for a file or directory
+ *
+ *		The following MOS objects are special.  Instead of injecting
+ *		errors on a particular object or blkid, we inject errors across
+ *		all objects of the given type.
+ *
+ * 		mos		Any data in the MOS
+ * 		mosdir		object directory
+ * 		config		pool configuration
+ * 		bpobj		blkptr list
+ * 		spacemap	spacemap
+ * 		metaslab	metaslab
+ * 		errlog		persistent error log
+ *
+ * 	level	Object level.  Defaults to '0', not applicable to all types.  If
+ * 		a range is given, this corresponds to the indirect block
+ * 		corresponding to the specific range.
+ *
+ *	range	A numerical range [start,end) within the object.  Defaults to
+ *		the full size of the file.
+ *
+ * 	object	A string describing the logical location of the object.  For
+ * 		files and directories (currently the only supported types),
+ * 		this is the path of the object on disk.
+ *
+ * This is translated, via libzpool, into the following internal representation:
+ *
+ * 	<type,objset,object,level,range>
+ *
+ * These types should be self-explanatory.  This tuple is then passed to the
+ * kernel via a special ioctl() to initiate fault injection for the given
+ * object.  Note that 'type' is not strictly necessary for fault injection, but
+ * is used when translating existing faults into a human-readable string.
+ *
+ *
+ * The command itself takes one of the forms:
+ *
+ * 	zinject
+ * 	zinject <-a | -u pool>
+ * 	zinject -c <id|all>
+ * 	zinject [-q] <-t type> [-f freq] [-u] [-a] [-m] [-e errno] [-l level]
+ *	    [-r range] <object>
+ * 	zinject [-f freq] [-a] [-m] [-u] -b objset:object:level:start:end pool
+ *
+ * With no arguments, the command prints all currently registered injection
+ * handlers, with their numeric identifiers.
+ *
+ * The '-c' option will clear the given handler, or all handlers if 'all' is
+ * specified.
+ *
+ * The '-e' option takes a string describing the errno to simulate.  This must
+ * be either 'io' or 'checksum'.  In most cases this will result in the same
+ * behavior, but RAID-Z will produce a different set of ereports for this
+ * situation.
+ *
+ * The '-a', '-u', and '-m' flags toggle internal flush behavior.  If '-a' is
+ * specified, then the ARC cache is flushed appropriately.  If '-u' is
+ * specified, then the underlying SPA is unloaded.  Either of these flags can be
+ * specified independently of any other handlers.  The '-m' flag automatically
+ * does an unmount and remount of the underlying dataset to aid in flushing the
+ * cache.
+ *
+ * The '-f' flag controls the frequency of errors injected, expressed as a
+ * integer percentage between 1 and 100.  The default is 100.
+ *
+ * The this form is responsible for actually injecting the handler into the
+ * framework.  It takes the arguments described above, translates them to the
+ * internal tuple using libzpool, and then issues an ioctl() to register the
+ * handler.
+ *
+ * The final form can target a specific bookmark, regardless of whether a
+ * human-readable interface has been designed.  It allows developers to specify
+ * a particular block by number.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+
+#include <sys/fs/zfs.h>
+#include <sys/param.h>
+#include <sys/mount.h>
+
+#include <libzfs.h>
+
+#undef verify	/* both libzfs.h and zfs_context.h want to define this */
+
+#include "zinject.h"
+
+libzfs_handle_t *g_zfs;
+int zfs_fd;
+
+#ifndef ECKSUM
+#define	ECKSUM	EBADE
+#endif
+
+static const char *errtable[TYPE_INVAL] = {
+	"data",
+	"dnode",
+	"mos",
+	"mosdir",
+	"metaslab",
+	"config",
+	"bpobj",
+	"spacemap",
+	"errlog",
+	"uber",
+	"nvlist",
+	"pad1",
+	"pad2"
+};
+
+static err_type_t
+name_to_type(const char *arg)
+{
+	int i;
+	for (i = 0; i < TYPE_INVAL; i++)
+		if (strcmp(errtable[i], arg) == 0)
+			return (i);
+
+	return (TYPE_INVAL);
+}
+
+static const char *
+type_to_name(uint64_t type)
+{
+	switch (type) {
+	case DMU_OT_OBJECT_DIRECTORY:
+		return ("mosdir");
+	case DMU_OT_OBJECT_ARRAY:
+		return ("metaslab");
+	case DMU_OT_PACKED_NVLIST:
+		return ("config");
+	case DMU_OT_BPOBJ:
+		return ("bpobj");
+	case DMU_OT_SPACE_MAP:
+		return ("spacemap");
+	case DMU_OT_ERROR_LOG:
+		return ("errlog");
+	default:
+		return ("-");
+	}
+}
+
+
+/*
+ * Print usage message.
+ */
+void
+usage(void)
+{
+	(void) printf(
+	    "usage:\n"
+	    "\n"
+	    "\tzinject\n"
+	    "\n"
+	    "\t\tList all active injection records.\n"
+	    "\n"
+	    "\tzinject -c <id|all>\n"
+	    "\n"
+	    "\t\tClear the particular record (if given a numeric ID), or\n"
+	    "\t\tall records if 'all' is specificed.\n"
+	    "\n"
+	    "\tzinject -p <function name> pool\n"
+	    "\t\tInject a panic fault at the specified function. Only \n"
+	    "\t\tfunctions which call spa_vdev_config_exit(), or \n"
+	    "\t\tspa_vdev_exit() will trigger a panic.\n"
+	    "\n"
+	    "\tzinject -d device [-e errno] [-L <nvlist|uber|pad1|pad2>] [-F]\n"
+	    "\t    [-T <read|write|free|claim|all> pool\n"
+	    "\t\tInject a fault into a particular device or the device's\n"
+	    "\t\tlabel.  Label injection can either be 'nvlist', 'uber',\n "
+	    "\t\t'pad1', or 'pad2'.\n"
+	    "\t\t'errno' can be 'nxio' (the default), 'io', or 'dtl'.\n"
+	    "\n"
+	    "\tzinject -d device -A <degrade|fault> pool\n"
+	    "\t\tPerform a specific action on a particular device\n"
+	    "\n"
+	    "\tzinject -I [-s <seconds> | -g <txgs>] pool\n"
+	    "\t\tCause the pool to stop writing blocks yet not\n"
+	    "\t\treport errors for a duration.  Simulates buggy hardware\n"
+	    "\t\tthat fails to honor cache flush requests.\n"
+	    "\t\tDefault duration is 30 seconds.  The machine is panicked\n"
+	    "\t\tat the end of the duration.\n"
+	    "\n"
+	    "\tzinject -b objset:object:level:blkid pool\n"
+	    "\n"
+	    "\t\tInject an error into pool 'pool' with the numeric bookmark\n"
+	    "\t\tspecified by the remaining tuple.  Each number is in\n"
+	    "\t\thexidecimal, and only one block can be specified.\n"
+	    "\n"
+	    "\tzinject [-q] <-t type> [-e errno] [-l level] [-r range]\n"
+	    "\t    [-a] [-m] [-u] [-f freq] <object>\n"
+	    "\n"
+	    "\t\tInject an error into the object specified by the '-t' option\n"
+	    "\t\tand the object descriptor.  The 'object' parameter is\n"
+	    "\t\tinterperted depending on the '-t' option.\n"
+	    "\n"
+	    "\t\t-q\tQuiet mode.  Only print out the handler number added.\n"
+	    "\t\t-e\tInject a specific error.  Must be either 'io' or\n"
+	    "\t\t\t'checksum'.  Default is 'io'.\n"
+	    "\t\t-l\tInject error at a particular block level. Default is "
+	    "0.\n"
+	    "\t\t-m\tAutomatically remount underlying filesystem.\n"
+	    "\t\t-r\tInject error over a particular logical range of an\n"
+	    "\t\t\tobject.  Will be translated to the appropriate blkid\n"
+	    "\t\t\trange according to the object's properties.\n"
+	    "\t\t-a\tFlush the ARC cache.  Can be specified without any\n"
+	    "\t\t\tassociated object.\n"
+	    "\t\t-u\tUnload the associated pool.  Can be specified with only\n"
+	    "\t\t\ta pool object.\n"
+	    "\t\t-f\tOnly inject errors a fraction of the time.  Expressed as\n"
+	    "\t\t\ta percentage between 1 and 100.\n"
+	    "\n"
+	    "\t-t data\t\tInject an error into the plain file contents of a\n"
+	    "\t\t\tfile.  The object must be specified as a complete path\n"
+	    "\t\t\tto a file on a ZFS filesystem.\n"
+	    "\n"
+	    "\t-t dnode\tInject an error into the metadnode in the block\n"
+	    "\t\t\tcorresponding to the dnode for a file or directory.  The\n"
+	    "\t\t\t'-r' option is incompatible with this mode.  The object\n"
+	    "\t\t\tis specified as a complete path to a file or directory\n"
+	    "\t\t\ton a ZFS filesystem.\n"
+	    "\n"
+	    "\t-t <mos>\tInject errors into the MOS for objects of the given\n"
+	    "\t\t\ttype.  Valid types are: mos, mosdir, config, bpobj,\n"
+	    "\t\t\tspacemap, metaslab, errlog.  The only valid <object> is\n"
+	    "\t\t\tthe poolname.\n");
+}
+
+static int
+iter_handlers(int (*func)(int, const char *, zinject_record_t *, void *),
+    void *data)
+{
+	zfs_cmd_t zc = { 0 };
+	int ret;
+
+	while (ioctl(zfs_fd, ZFS_IOC_INJECT_LIST_NEXT, &zc) == 0)
+		if ((ret = func((int)zc.zc_guid, zc.zc_name,
+		    &zc.zc_inject_record, data)) != 0)
+			return (ret);
+
+	if (errno != ENOENT) {
+		(void) fprintf(stderr, "Unable to list handlers: %s\n",
+		    strerror(errno));
+		return (-1);
+	}
+
+	return (0);
+}
+
+static int
+print_data_handler(int id, const char *pool, zinject_record_t *record,
+    void *data)
+{
+	int *count = data;
+
+	if (record->zi_guid != 0 || record->zi_func[0] != '\0')
+		return (0);
+
+	if (*count == 0) {
+		(void) printf("%3s  %-15s  %-6s  %-6s  %-8s  %3s  %-15s\n",
+		    "ID", "POOL", "OBJSET", "OBJECT", "TYPE", "LVL",  "RANGE");
+		(void) printf("---  ---------------  ------  "
+		    "------  --------  ---  ---------------\n");
+	}
+
+	*count += 1;
+
+	(void) printf("%3d  %-15s  %-6llu  %-6llu  %-8s  %3d  ", id, pool,
+	    (u_longlong_t)record->zi_objset, (u_longlong_t)record->zi_object,
+	    type_to_name(record->zi_type), record->zi_level);
+
+	if (record->zi_start == 0 &&
+	    record->zi_end == -1ULL)
+		(void) printf("all\n");
+	else
+		(void) printf("[%llu, %llu]\n", (u_longlong_t)record->zi_start,
+		    (u_longlong_t)record->zi_end);
+
+	return (0);
+}
+
+static int
+print_device_handler(int id, const char *pool, zinject_record_t *record,
+    void *data)
+{
+	int *count = data;
+
+	if (record->zi_guid == 0 || record->zi_func[0] != '\0')
+		return (0);
+
+	if (*count == 0) {
+		(void) printf("%3s  %-15s  %s\n", "ID", "POOL", "GUID");
+		(void) printf("---  ---------------  ----------------\n");
+	}
+
+	*count += 1;
+
+	(void) printf("%3d  %-15s  %llx\n", id, pool,
+	    (u_longlong_t)record->zi_guid);
+
+	return (0);
+}
+
+static int
+print_panic_handler(int id, const char *pool, zinject_record_t *record,
+    void *data)
+{
+	int *count = data;
+
+	if (record->zi_func[0] == '\0')
+		return (0);
+
+	if (*count == 0) {
+		(void) printf("%3s  %-15s  %s\n", "ID", "POOL", "FUNCTION");
+		(void) printf("---  ---------------  ----------------\n");
+	}
+
+	*count += 1;
+
+	(void) printf("%3d  %-15s  %s\n", id, pool, record->zi_func);
+
+	return (0);
+}
+
+/*
+ * Print all registered error handlers.  Returns the number of handlers
+ * registered.
+ */
+static int
+print_all_handlers(void)
+{
+	int count = 0, total = 0;
+
+	(void) iter_handlers(print_device_handler, &count);
+	if (count > 0) {
+		total += count;
+		(void) printf("\n");
+		count = 0;
+	}
+
+	(void) iter_handlers(print_data_handler, &count);
+	if (count > 0) {
+		total += count;
+		(void) printf("\n");
+		count = 0;
+	}
+
+	(void) iter_handlers(print_panic_handler, &count);
+
+	return (count + total);
+}
+
+/* ARGSUSED */
+static int
+cancel_one_handler(int id, const char *pool, zinject_record_t *record,
+    void *data)
+{
+	zfs_cmd_t zc = { 0 };
+
+	zc.zc_guid = (uint64_t)id;
+
+	if (ioctl(zfs_fd, ZFS_IOC_CLEAR_FAULT, &zc) != 0) {
+		(void) fprintf(stderr, "failed to remove handler %d: %s\n",
+		    id, strerror(errno));
+		return (1);
+	}
+
+	return (0);
+}
+
+/*
+ * Remove all fault injection handlers.
+ */
+static int
+cancel_all_handlers(void)
+{
+	int ret = iter_handlers(cancel_one_handler, NULL);
+
+	if (ret == 0)
+		(void) printf("removed all registered handlers\n");
+
+	return (ret);
+}
+
+/*
+ * Remove a specific fault injection handler.
+ */
+static int
+cancel_handler(int id)
+{
+	zfs_cmd_t zc = { 0 };
+
+	zc.zc_guid = (uint64_t)id;
+
+	if (ioctl(zfs_fd, ZFS_IOC_CLEAR_FAULT, &zc) != 0) {
+		(void) fprintf(stderr, "failed to remove handler %d: %s\n",
+		    id, strerror(errno));
+		return (1);
+	}
+
+	(void) printf("removed handler %d\n", id);
+
+	return (0);
+}
+
+/*
+ * Register a new fault injection handler.
+ */
+static int
+register_handler(const char *pool, int flags, zinject_record_t *record,
+    int quiet)
+{
+	zfs_cmd_t zc = { 0 };
+
+	(void) strcpy(zc.zc_name, pool);
+	zc.zc_inject_record = *record;
+	zc.zc_guid = flags;
+
+	if (ioctl(zfs_fd, ZFS_IOC_INJECT_FAULT, &zc) != 0) {
+		(void) fprintf(stderr, "failed to add handler: %s\n",
+		    strerror(errno));
+		return (1);
+	}
+
+	if (flags & ZINJECT_NULL)
+		return (0);
+
+	if (quiet) {
+		(void) printf("%llu\n", (u_longlong_t)zc.zc_guid);
+	} else {
+		(void) printf("Added handler %llu with the following "
+		    "properties:\n", (u_longlong_t)zc.zc_guid);
+		(void) printf("  pool: %s\n", pool);
+		if (record->zi_guid) {
+			(void) printf("  vdev: %llx\n",
+			    (u_longlong_t)record->zi_guid);
+		} else if (record->zi_func[0] != '\0') {
+			(void) printf("  panic function: %s\n",
+			    record->zi_func);
+		} else if (record->zi_duration > 0) {
+			(void) printf(" time: %lld seconds\n",
+			    (u_longlong_t)record->zi_duration);
+		} else if (record->zi_duration < 0) {
+			(void) printf(" txgs: %lld \n",
+			    (u_longlong_t)-record->zi_duration);
+		} else {
+			(void) printf("objset: %llu\n",
+			    (u_longlong_t)record->zi_objset);
+			(void) printf("object: %llu\n",
+			    (u_longlong_t)record->zi_object);
+			(void) printf("  type: %llu\n",
+			    (u_longlong_t)record->zi_type);
+			(void) printf(" level: %d\n", record->zi_level);
+			if (record->zi_start == 0 &&
+			    record->zi_end == -1ULL)
+				(void) printf(" range: all\n");
+			else
+				(void) printf(" range: [%llu, %llu)\n",
+				    (u_longlong_t)record->zi_start,
+				    (u_longlong_t)record->zi_end);
+		}
+	}
+
+	return (0);
+}
+
+int
+perform_action(const char *pool, zinject_record_t *record, int cmd)
+{
+	zfs_cmd_t zc = { 0 };
+
+	ASSERT(cmd == VDEV_STATE_DEGRADED || cmd == VDEV_STATE_FAULTED);
+	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
+	zc.zc_guid = record->zi_guid;
+	zc.zc_cookie = cmd;
+
+	if (ioctl(zfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
+		return (0);
+
+	return (1);
+}
+
+int
+main(int argc, char **argv)
+{
+	int c;
+	char *range = NULL;
+	char *cancel = NULL;
+	char *end;
+	char *raw = NULL;
+	char *device = NULL;
+	int level = 0;
+	int quiet = 0;
+	int error = 0;
+	int domount = 0;
+	int io_type = ZIO_TYPES;
+	int action = VDEV_STATE_UNKNOWN;
+	err_type_t type = TYPE_INVAL;
+	err_type_t label = TYPE_INVAL;
+	zinject_record_t record = { 0 };
+	char pool[MAXNAMELEN];
+	char dataset[MAXNAMELEN];
+	zfs_handle_t *zhp;
+	int nowrites = 0;
+	int dur_txg = 0;
+	int dur_secs = 0;
+	int ret;
+	int flags = 0;
+
+	if ((g_zfs = libzfs_init()) == NULL) {
+		(void) fprintf(stderr, "internal error: failed to "
+		    "initialize ZFS library\n");
+		return (1);
+	}
+
+	libzfs_print_on_error(g_zfs, B_TRUE);
+
+	if ((zfs_fd = open(ZFS_DEV, O_RDWR)) < 0) {
+		(void) fprintf(stderr, "failed to open ZFS device\n");
+		return (1);
+	}
+
+	if (argc == 1) {
+		/*
+		 * No arguments.  Print the available handlers.  If there are no
+		 * available handlers, direct the user to '-h' for help
+		 * information.
+		 */
+		if (print_all_handlers() == 0) {
+			(void) printf("No handlers registered.\n");
+			(void) printf("Run 'zinject -h' for usage "
+			    "information.\n");
+		}
+
+		return (0);
+	}
+
+	while ((c = getopt(argc, argv,
+	    ":aA:b:d:f:Fg:qhIc:t:T:l:mr:s:e:uL:p:")) != -1) {
+		switch (c) {
+		case 'a':
+			flags |= ZINJECT_FLUSH_ARC;
+			break;
+		case 'A':
+			if (strcasecmp(optarg, "degrade") == 0) {
+				action = VDEV_STATE_DEGRADED;
+			} else if (strcasecmp(optarg, "fault") == 0) {
+				action = VDEV_STATE_FAULTED;
+			} else {
+				(void) fprintf(stderr, "invalid action '%s': "
+				    "must be 'degrade' or 'fault'\n", optarg);
+				usage();
+				return (1);
+			}
+			break;
+		case 'b':
+			raw = optarg;
+			break;
+		case 'c':
+			cancel = optarg;
+			break;
+		case 'd':
+			device = optarg;
+			break;
+		case 'e':
+			if (strcasecmp(optarg, "io") == 0) {
+				error = EIO;
+			} else if (strcasecmp(optarg, "checksum") == 0) {
+				error = ECKSUM;
+			} else if (strcasecmp(optarg, "nxio") == 0) {
+				error = ENXIO;
+			} else if (strcasecmp(optarg, "dtl") == 0) {
+				error = ECHILD;
+			} else {
+				(void) fprintf(stderr, "invalid error type "
+				    "'%s': must be 'io', 'checksum' or "
+				    "'nxio'\n", optarg);
+				usage();
+				return (1);
+			}
+			break;
+		case 'f':
+			record.zi_freq = atoi(optarg);
+			if (record.zi_freq < 1 || record.zi_freq > 100) {
+				(void) fprintf(stderr, "frequency range must "
+				    "be in the range (0, 100]\n");
+				return (1);
+			}
+			break;
+		case 'F':
+			record.zi_failfast = B_TRUE;
+			break;
+		case 'g':
+			dur_txg = 1;
+			record.zi_duration = (int)strtol(optarg, &end, 10);
+			if (record.zi_duration <= 0 || *end != '\0') {
+				(void) fprintf(stderr, "invalid duration '%s': "
+				    "must be a positive integer\n", optarg);
+				usage();
+				return (1);
+			}
+			/* store duration of txgs as its negative */
+			record.zi_duration *= -1;
+			break;
+		case 'h':
+			usage();
+			return (0);
+		case 'I':
+			/* default duration, if one hasn't yet been defined */
+			nowrites = 1;
+			if (dur_secs == 0 && dur_txg == 0)
+				record.zi_duration = 30;
+			break;
+		case 'l':
+			level = (int)strtol(optarg, &end, 10);
+			if (*end != '\0') {
+				(void) fprintf(stderr, "invalid level '%s': "
+				    "must be an integer\n", optarg);
+				usage();
+				return (1);
+			}
+			break;
+		case 'm':
+			domount = 1;
+			break;
+		case 'p':
+			(void) strlcpy(record.zi_func, optarg,
+			    sizeof (record.zi_func));
+			break;
+		case 'q':
+			quiet = 1;
+			break;
+		case 'r':
+			range = optarg;
+			break;
+		case 's':
+			dur_secs = 1;
+			record.zi_duration = (int)strtol(optarg, &end, 10);
+			if (record.zi_duration <= 0 || *end != '\0') {
+				(void) fprintf(stderr, "invalid duration '%s': "
+				    "must be a positive integer\n", optarg);
+				usage();
+				return (1);
+			}
+			break;
+		case 'T':
+			if (strcasecmp(optarg, "read") == 0) {
+				io_type = ZIO_TYPE_READ;
+			} else if (strcasecmp(optarg, "write") == 0) {
+				io_type = ZIO_TYPE_WRITE;
+			} else if (strcasecmp(optarg, "free") == 0) {
+				io_type = ZIO_TYPE_FREE;
+			} else if (strcasecmp(optarg, "claim") == 0) {
+				io_type = ZIO_TYPE_CLAIM;
+			} else if (strcasecmp(optarg, "all") == 0) {
+				io_type = ZIO_TYPES;
+			} else {
+				(void) fprintf(stderr, "invalid I/O type "
+				    "'%s': must be 'read', 'write', 'free', "
+				    "'claim' or 'all'\n", optarg);
+				usage();
+				return (1);
+			}
+			break;
+		case 't':
+			if ((type = name_to_type(optarg)) == TYPE_INVAL &&
+			    !MOS_TYPE(type)) {
+				(void) fprintf(stderr, "invalid type '%s'\n",
+				    optarg);
+				usage();
+				return (1);
+			}
+			break;
+		case 'u':
+			flags |= ZINJECT_UNLOAD_SPA;
+			break;
+		case 'L':
+			if ((label = name_to_type(optarg)) == TYPE_INVAL &&
+			    !LABEL_TYPE(type)) {
+				(void) fprintf(stderr, "invalid label type "
+				    "'%s'\n", optarg);
+				usage();
+				return (1);
+			}
+			break;
+		case ':':
+			(void) fprintf(stderr, "option -%c requires an "
+			    "operand\n", optopt);
+			usage();
+			return (1);
+		case '?':
+			(void) fprintf(stderr, "invalid option '%c'\n",
+			    optopt);
+			usage();
+			return (2);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	if (cancel != NULL) {
+		/*
+		 * '-c' is invalid with any other options.
+		 */
+		if (raw != NULL || range != NULL || type != TYPE_INVAL ||
+		    level != 0 || record.zi_func[0] != '\0' ||
+		    record.zi_duration != 0) {
+			(void) fprintf(stderr, "cancel (-c) incompatible with "
+			    "any other options\n");
+			usage();
+			return (2);
+		}
+		if (argc != 0) {
+			(void) fprintf(stderr, "extraneous argument to '-c'\n");
+			usage();
+			return (2);
+		}
+
+		if (strcmp(cancel, "all") == 0) {
+			return (cancel_all_handlers());
+		} else {
+			int id = (int)strtol(cancel, &end, 10);
+			if (*end != '\0') {
+				(void) fprintf(stderr, "invalid handle id '%s':"
+				    " must be an integer or 'all'\n", cancel);
+				usage();
+				return (1);
+			}
+			return (cancel_handler(id));
+		}
+	}
+
+	if (device != NULL) {
+		/*
+		 * Device (-d) injection uses a completely different mechanism
+		 * for doing injection, so handle it separately here.
+		 */
+		if (raw != NULL || range != NULL || type != TYPE_INVAL ||
+		    level != 0 || record.zi_func[0] != '\0' ||
+		    record.zi_duration != 0) {
+			(void) fprintf(stderr, "device (-d) incompatible with "
+			    "data error injection\n");
+			usage();
+			return (2);
+		}
+
+		if (argc != 1) {
+			(void) fprintf(stderr, "device (-d) injection requires "
+			    "a single pool name\n");
+			usage();
+			return (2);
+		}
+
+		(void) strcpy(pool, argv[0]);
+		dataset[0] = '\0';
+
+		if (error == ECKSUM) {
+			(void) fprintf(stderr, "device error type must be "
+			    "'io' or 'nxio'\n");
+			return (1);
+		}
+
+		record.zi_iotype = io_type;
+		if (translate_device(pool, device, label, &record) != 0)
+			return (1);
+		if (!error)
+			error = ENXIO;
+
+		if (action != VDEV_STATE_UNKNOWN)
+			return (perform_action(pool, &record, action));
+
+	} else if (raw != NULL) {
+		if (range != NULL || type != TYPE_INVAL || level != 0 ||
+		    record.zi_func[0] != '\0' || record.zi_duration != 0) {
+			(void) fprintf(stderr, "raw (-b) format with "
+			    "any other options\n");
+			usage();
+			return (2);
+		}
+
+		if (argc != 1) {
+			(void) fprintf(stderr, "raw (-b) format expects a "
+			    "single pool name\n");
+			usage();
+			return (2);
+		}
+
+		(void) strcpy(pool, argv[0]);
+		dataset[0] = '\0';
+
+		if (error == ENXIO) {
+			(void) fprintf(stderr, "data error type must be "
+			    "'checksum' or 'io'\n");
+			return (1);
+		}
+
+		if (translate_raw(raw, &record) != 0)
+			return (1);
+		if (!error)
+			error = EIO;
+	} else if (record.zi_func[0] != '\0') {
+		if (raw != NULL || range != NULL || type != TYPE_INVAL ||
+		    level != 0 || device != NULL || record.zi_duration != 0) {
+			(void) fprintf(stderr, "panic (-p) incompatible with "
+			    "other options\n");
+			usage();
+			return (2);
+		}
+
+		if (argc < 1 || argc > 2) {
+			(void) fprintf(stderr, "panic (-p) injection requires "
+			    "a single pool name and an optional id\n");
+			usage();
+			return (2);
+		}
+
+		(void) strcpy(pool, argv[0]);
+		if (argv[1] != NULL)
+			record.zi_type = atoi(argv[1]);
+		dataset[0] = '\0';
+	} else if (record.zi_duration != 0) {
+		if (nowrites == 0) {
+			(void) fprintf(stderr, "-s or -g meaningless "
+			    "without -I (ignore writes)\n");
+			usage();
+			return (2);
+		} else if (dur_secs && dur_txg) {
+			(void) fprintf(stderr, "choose a duration either "
+			    "in seconds (-s) or a number of txgs (-g) "
+			    "but not both\n");
+			usage();
+			return (2);
+		} else if (argc != 1) {
+			(void) fprintf(stderr, "ignore writes (-I) "
+			    "injection requires a single pool name\n");
+			usage();
+			return (2);
+		}
+
+		(void) strcpy(pool, argv[0]);
+		dataset[0] = '\0';
+	} else if (type == TYPE_INVAL) {
+		if (flags == 0) {
+			(void) fprintf(stderr, "at least one of '-b', '-d', "
+			    "'-t', '-a', '-p', '-I' or '-u' "
+			    "must be specified\n");
+			usage();
+			return (2);
+		}
+
+		if (argc == 1 && (flags & ZINJECT_UNLOAD_SPA)) {
+			(void) strcpy(pool, argv[0]);
+			dataset[0] = '\0';
+		} else if (argc != 0) {
+			(void) fprintf(stderr, "extraneous argument for "
+			    "'-f'\n");
+			usage();
+			return (2);
+		}
+
+		flags |= ZINJECT_NULL;
+	} else {
+		if (argc != 1) {
+			(void) fprintf(stderr, "missing object\n");
+			usage();
+			return (2);
+		}
+
+		if (error == ENXIO) {
+			(void) fprintf(stderr, "data error type must be "
+			    "'checksum' or 'io'\n");
+			return (1);
+		}
+
+		if (translate_record(type, argv[0], range, level, &record, pool,
+		    dataset) != 0)
+			return (1);
+		if (!error)
+			error = EIO;
+	}
+
+	/*
+	 * If this is pool-wide metadata, unmount everything.  The ioctl() will
+	 * unload the pool, so that we trigger spa-wide reopen of metadata next
+	 * time we access the pool.
+	 */
+	if (dataset[0] != '\0' && domount) {
+		if ((zhp = zfs_open(g_zfs, dataset, ZFS_TYPE_DATASET)) == NULL)
+			return (1);
+
+		if (zfs_unmount(zhp, NULL, 0) != 0)
+			return (1);
+	}
+
+	record.zi_error = error;
+
+	ret = register_handler(pool, flags, &record, quiet);
+
+	if (dataset[0] != '\0' && domount)
+		ret = (zfs_mount(zhp, NULL, 0) != 0);
+
+	libzfs_fini(g_zfs);
+
+	return (ret);
+}
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zinject/zinject.h b/bsd/cddl/contrib/opensolaris/cmd/zinject/zinject.h
new file mode 100644
index 0000000000000000000000000000000000000000..46fdcad8b31f905cb02f7da14720e3bc0fdb47fe
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zinject/zinject.h
@@ -0,0 +1,70 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef	_ZINJECT_H
+#define	_ZINJECT_H
+
+#include <sys/zfs_ioctl.h>
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+	TYPE_DATA,		/* plain file contents		*/
+	TYPE_DNODE,		/* metadnode contents		*/
+	TYPE_MOS,		/* all MOS data			*/
+	TYPE_MOSDIR,		/* MOS object directory		*/
+	TYPE_METASLAB,		/* metaslab objects		*/
+	TYPE_CONFIG,		/* MOS config			*/
+	TYPE_BPOBJ,		/* block pointer list		*/
+	TYPE_SPACEMAP,		/* space map objects		*/
+	TYPE_ERRLOG,		/* persistent error log		*/
+	TYPE_LABEL_UBERBLOCK,	/* label specific uberblock	*/
+	TYPE_LABEL_NVLIST,	/* label specific nvlist	*/
+	TYPE_LABEL_PAD1,	/* label specific 8K pad1 area	*/
+	TYPE_LABEL_PAD2,	/* label specific 8K pad2 area	*/
+	TYPE_INVAL
+} err_type_t;
+
+#define	MOS_TYPE(t)	\
+	((t) >= TYPE_MOS && (t) < TYPE_LABEL_UBERBLOCK)
+
+#define	LABEL_TYPE(t)	\
+	((t) >= TYPE_LABEL_UBERBLOCK && (t) < TYPE_INVAL)
+
+int translate_record(err_type_t type, const char *object, const char *range,
+    int level, zinject_record_t *record, char *poolname, char *dataset);
+int translate_raw(const char *raw, zinject_record_t *record);
+int translate_device(const char *pool, const char *device,
+    err_type_t label_type, zinject_record_t *record);
+void usage(void);
+
+extern libzfs_handle_t *g_zfs;
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _ZINJECT_H */
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zlook/zlook.c b/bsd/cddl/contrib/opensolaris/cmd/zlook/zlook.c
new file mode 100644
index 0000000000000000000000000000000000000000..29a6559f90230d2f6be46cbc2993adcffbf710bd
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zlook/zlook.c
@@ -0,0 +1,411 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * This is a test program that uses ioctls to the ZFS Unit Test driver
+ * to perform readdirs or lookups using flags not normally available
+ * to user-land programs.  This allows testing of the flags'
+ * behavior outside of a complicated consumer, such as the SMB driver.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stropts.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/dirent.h>
+#include <sys/attr.h>
+#include <stddef.h>
+#include <fcntl.h>
+#include <string.h>
+#include <time.h>
+
+#define	_KERNEL
+
+#include <sys/fs/zut.h>
+#include <sys/extdirent.h>
+
+#undef	_KERNEL
+
+#define	MAXBUF (64 * 1024)
+#define	BIGBUF 4096
+#define	LILBUF (sizeof (dirent_t))
+
+#define	DIRENT_NAMELEN(reclen)	\
+	((reclen) - (offsetof(dirent_t, d_name[0])))
+
+static void
+usage(char *pnam)
+{
+	(void) fprintf(stderr, "Usage:\n    %s -l [-is] dir-to-look-in "
+	    "file-in-dir [xfile-on-file]\n", pnam);
+	(void) fprintf(stderr, "    %s -i [-ls] dir-to-look-in "
+	    "file-in-dir [xfile-on-file]\n", pnam);
+	(void) fprintf(stderr, "    %s -s [-il] dir-to-look-in "
+	    "file-in-dir [xfile-on-file]\n", pnam);
+	(void) fprintf(stderr, "\t    Perform a lookup\n");
+	(void) fprintf(stderr, "\t    -l == lookup\n");
+	(void) fprintf(stderr, "\t    -i == request FIGNORECASE\n");
+	(void) fprintf(stderr, "\t    -s == request stat(2) and xvattr info\n");
+	(void) fprintf(stderr, "    %s -r [-ea] [-b buffer-size-in-bytes] "
+	    "dir-to-look-in [file-in-dir]\n", pnam);
+	(void) fprintf(stderr, "    %s -e [-ra] [-b buffer-size-in-bytes] "
+	    "dir-to-look-in [file-in-dir]\n", pnam);
+	(void) fprintf(stderr, "    %s -a [-re] [-b buffer-size-in-bytes] "
+	    "dir-to-look-in [file-in-dir]\n", pnam);
+	(void) fprintf(stderr, "\t    Perform a readdir\n");
+	(void) fprintf(stderr, "\t    -r == readdir\n");
+	(void) fprintf(stderr, "\t    -e == request extended entries\n");
+	(void) fprintf(stderr, "\t    -a == request access filtering\n");
+	(void) fprintf(stderr, "\t    -b == buffer size (default 4K)\n");
+	(void) fprintf(stderr, "    %s -A path\n", pnam);
+	(void) fprintf(stderr, "\t    Look up _PC_ACCESS_FILTERING "
+	    "for path with pathconf(2)\n");
+	(void) fprintf(stderr, "    %s -E path\n", pnam);
+	(void) fprintf(stderr, "\t    Look up _PC_SATTR_EXISTS "
+	    "for path with pathconf(2)\n");
+	(void) fprintf(stderr, "    %s -S path\n", pnam);
+	(void) fprintf(stderr, "\t    Look up _PC_SATTR_EXISTS "
+	    "for path with pathconf(2)\n");
+	exit(EINVAL);
+}
+
+static void
+print_extd_entries(zut_readdir_t *r)
+{
+	struct edirent *eodp;
+	char *bufstart;
+
+	eodp = (edirent_t *)(uintptr_t)r->zr_buf;
+	bufstart = (char *)eodp;
+	while ((char *)eodp < bufstart + r->zr_bytes) {
+		char *blanks = "                ";
+		int i = 0;
+		while (i < EDIRENT_NAMELEN(eodp->ed_reclen)) {
+			if (!eodp->ed_name[i])
+				break;
+			(void) printf("%c", eodp->ed_name[i++]);
+		}
+		if (i < 16)
+			(void) printf("%.*s", 16 - i, blanks);
+		(void) printf("\t%x\n", eodp->ed_eflags);
+		eodp = (edirent_t *)((intptr_t)eodp + eodp->ed_reclen);
+	}
+}
+
+static void
+print_entries(zut_readdir_t *r)
+{
+	dirent64_t *dp;
+	char *bufstart;
+
+	dp = (dirent64_t *)(intptr_t)r->zr_buf;
+	bufstart = (char *)dp;
+	while ((char *)dp < bufstart + r->zr_bytes) {
+		int i = 0;
+		while (i < DIRENT_NAMELEN(dp->d_reclen)) {
+			if (!dp->d_name[i])
+				break;
+			(void) printf("%c", dp->d_name[i++]);
+		}
+		(void) printf("\n");
+		dp = (dirent64_t *)((intptr_t)dp + dp->d_reclen);
+	}
+}
+
+static void
+print_stats(struct stat64 *sb)
+{
+	char timebuf[512];
+
+	(void) printf("st_mode\t\t\t%04lo\n", (unsigned long)sb->st_mode);
+	(void) printf("st_ino\t\t\t%llu\n", (unsigned long long)sb->st_ino);
+	(void) printf("st_nlink\t\t%lu\n", (unsigned long)sb->st_nlink);
+	(void) printf("st_uid\t\t\t%d\n", sb->st_uid);
+	(void) printf("st_gid\t\t\t%d\n", sb->st_gid);
+	(void) printf("st_size\t\t\t%lld\n", (long long)sb->st_size);
+	(void) printf("st_blksize\t\t%ld\n", (long)sb->st_blksize);
+	(void) printf("st_blocks\t\t%lld\n", (long long)sb->st_blocks);
+
+	timebuf[0] = 0;
+	if (ctime_r(&sb->st_atime, timebuf, 512)) {
+		(void) printf("st_atime\t\t");
+		(void) printf("%s", timebuf);
+	}
+	timebuf[0] = 0;
+	if (ctime_r(&sb->st_mtime, timebuf, 512)) {
+		(void) printf("st_mtime\t\t");
+		(void) printf("%s", timebuf);
+	}
+	timebuf[0] = 0;
+	if (ctime_r(&sb->st_ctime, timebuf, 512)) {
+		(void) printf("st_ctime\t\t");
+		(void) printf("%s", timebuf);
+	}
+}
+
+static void
+print_xvs(uint64_t xvs)
+{
+	uint_t bits;
+	int idx = 0;
+
+	if (xvs == 0)
+		return;
+
+	(void) printf("-------------------\n");
+	(void) printf("Attribute bit(s) set:\n");
+	(void) printf("-------------------\n");
+
+	bits = xvs & ((1 << F_ATTR_ALL) - 1);
+	while (bits) {
+		uint_t rest = bits >> 1;
+		if (bits & 1) {
+			(void) printf("%s", attr_to_name((f_attr_t)idx));
+			if (rest)
+				(void) printf(", ");
+		}
+		idx++;
+		bits = rest;
+	}
+	(void) printf("\n");
+}
+
+int
+main(int argc, char **argv)
+{
+	zut_lookup_t lk = {0};
+	zut_readdir_t rd = {0};
+	boolean_t checking = B_FALSE;
+	boolean_t looking = B_FALSE;
+	boolean_t reading = B_FALSE;
+	boolean_t bflag = B_FALSE;
+	long rddir_bufsize = BIGBUF;
+	int error = 0;
+	int check;
+	int fd;
+	int c;
+
+	while ((c = getopt(argc, argv, "lisaerb:ASE")) != -1) {
+		switch (c) {
+		case 'l':
+			looking = B_TRUE;
+			break;
+		case 'i':
+			lk.zl_reqflags |= ZUT_IGNORECASE;
+			looking = B_TRUE;
+			break;
+		case 's':
+			lk.zl_reqflags |= ZUT_GETSTAT;
+			looking = B_TRUE;
+			break;
+		case 'a':
+			rd.zr_reqflags |= ZUT_ACCFILTER;
+			reading = B_TRUE;
+			break;
+		case 'e':
+			rd.zr_reqflags |= ZUT_EXTRDDIR;
+			reading = B_TRUE;
+			break;
+		case 'r':
+			reading = B_TRUE;
+			break;
+		case 'b':
+			reading = B_TRUE;
+			bflag = B_TRUE;
+			rddir_bufsize = strtol(optarg, NULL, 0);
+			break;
+		case 'A':
+			checking = B_TRUE;
+			check = _PC_ACCESS_FILTERING;
+			break;
+		case 'S':
+			checking = B_TRUE;
+			check = _PC_SATTR_ENABLED;
+			break;
+		case 'E':
+			checking = B_TRUE;
+			check = _PC_SATTR_EXISTS;
+			break;
+		case '?':
+		default:
+			usage(argv[0]);		/* no return */
+		}
+	}
+
+	if ((checking && looking) || (checking && reading) ||
+	    (looking && reading) || (!reading && bflag) ||
+	    (!checking && !reading && !looking))
+		usage(argv[0]);		/* no return */
+
+	if (rddir_bufsize < LILBUF || rddir_bufsize > MAXBUF) {
+		(void) fprintf(stderr, "Sorry, buffer size "
+		    "must be >= %d and less than or equal to %d bytes.\n",
+		    (int)LILBUF, MAXBUF);
+		exit(EINVAL);
+	}
+
+	if (checking) {
+		char pathbuf[MAXPATHLEN];
+		long result;
+
+		if (argc - optind < 1)
+			usage(argv[0]);		/* no return */
+		(void) strlcpy(pathbuf, argv[optind], MAXPATHLEN);
+		result = pathconf(pathbuf, check);
+		(void) printf("pathconf(2) check for %s\n", pathbuf);
+		switch (check) {
+		case _PC_SATTR_ENABLED:
+			(void) printf("System attributes ");
+			if (result != 0)
+				(void) printf("Enabled\n");
+			else
+				(void) printf("Not enabled\n");
+			break;
+		case _PC_SATTR_EXISTS:
+			(void) printf("System attributes ");
+			if (result != 0)
+				(void) printf("Exist\n");
+			else
+				(void) printf("Do not exist\n");
+			break;
+		case _PC_ACCESS_FILTERING:
+			(void) printf("Access filtering ");
+			if (result != 0)
+				(void) printf("Available\n");
+			else
+				(void) printf("Not available\n");
+			break;
+		}
+		return (result);
+	}
+
+	if ((fd = open(ZUT_DEV, O_RDONLY)) < 0) {
+		perror(ZUT_DEV);
+		return (ENXIO);
+	}
+
+	if (reading) {
+		char *buf;
+
+		if (argc - optind < 1)
+			usage(argv[0]);		/* no return */
+
+		(void) strlcpy(rd.zr_dir, argv[optind], MAXPATHLEN);
+		if (argc - optind > 1) {
+			(void) strlcpy(rd.zr_file, argv[optind + 1],
+			    MAXNAMELEN);
+			rd.zr_reqflags |= ZUT_XATTR;
+		}
+
+		if ((buf = malloc(rddir_bufsize)) == NULL) {
+			error = errno;
+			perror("malloc");
+			(void) close(fd);
+			return (error);
+		}
+
+		rd.zr_buf = (uint64_t)(uintptr_t)buf;
+		rd.zr_buflen = rddir_bufsize;
+
+		while (!rd.zr_eof) {
+			int ierr;
+
+			if ((ierr = ioctl(fd, ZUT_IOC_READDIR, &rd)) != 0) {
+				(void) fprintf(stderr,
+				    "IOCTL error: %s (%d)\n",
+				    strerror(ierr), ierr);
+				free(buf);
+				(void) close(fd);
+				return (ierr);
+			}
+			if (rd.zr_retcode) {
+				(void) fprintf(stderr,
+				    "readdir result: %s (%d)\n",
+				    strerror(rd.zr_retcode), rd.zr_retcode);
+				free(buf);
+				(void) close(fd);
+				return (rd.zr_retcode);
+			}
+			if (rd.zr_reqflags & ZUT_EXTRDDIR)
+				print_extd_entries(&rd);
+			else
+				print_entries(&rd);
+		}
+		free(buf);
+	} else {
+		int ierr;
+
+		if (argc - optind < 2)
+			usage(argv[0]);		/* no return */
+
+		(void) strlcpy(lk.zl_dir, argv[optind], MAXPATHLEN);
+		(void) strlcpy(lk.zl_file, argv[optind + 1], MAXNAMELEN);
+		if (argc - optind > 2) {
+			(void) strlcpy(lk.zl_xfile,
+			    argv[optind + 2], MAXNAMELEN);
+			lk.zl_reqflags |= ZUT_XATTR;
+		}
+
+		if ((ierr = ioctl(fd, ZUT_IOC_LOOKUP, &lk)) != 0) {
+			(void) fprintf(stderr,
+			    "IOCTL error: %s (%d)\n",
+			    strerror(ierr), ierr);
+			(void) close(fd);
+			return (ierr);
+		}
+
+		(void) printf("\nLookup of ");
+		if (lk.zl_reqflags & ZUT_XATTR) {
+			(void) printf("extended attribute \"%s\" of ",
+			    lk.zl_xfile);
+		}
+		(void) printf("file \"%s\" ", lk.zl_file);
+		(void) printf("in directory \"%s\" ", lk.zl_dir);
+		if (lk.zl_retcode) {
+			(void) printf("failed: %s (%d)\n",
+			    strerror(lk.zl_retcode), lk.zl_retcode);
+			(void) close(fd);
+			return (lk.zl_retcode);
+		}
+
+		(void) printf("succeeded.\n");
+		if (lk.zl_reqflags & ZUT_IGNORECASE) {
+			(void) printf("----------------------------\n");
+			(void) printf("dirent flags: 0x%0x\n", lk.zl_deflags);
+			(void) printf("real name: %s\n", lk.zl_real);
+		}
+		if (lk.zl_reqflags & ZUT_GETSTAT) {
+			(void) printf("----------------------------\n");
+			print_stats(&lk.zl_statbuf);
+			print_xvs(lk.zl_xvattrs);
+		}
+	}
+
+	(void) close(fd);
+	return (0);
+}
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool-features.7 b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool-features.7
new file mode 100644
index 0000000000000000000000000000000000000000..999212c16a0a1106b249be2110c2cdd26f639136
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool-features.7
@@ -0,0 +1,206 @@
+'\" te
+.\" Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>.
+.\" All Rights Reserved.
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" Copyright (c) 2012 by Delphix. All rights reserved.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd Aug 28, 2012
+.Dt ZPOOL-FEATURES 7
+.Os
+.Sh NAME
+.Nm zpool-features
+.Nd ZFS pool feature descriptions
+.Sh DESCRIPTION
+ZFS pool on\-disk format versions are specified via "features" which replace
+the old on\-disk format numbers (the last supported on\-disk format number is
+28).
+To enable a feature on a pool use the
+.Cm upgrade
+subcommand of the
+.Xr zpool 8
+command, or set the
+.Sy feature@feature_name
+property to
+.Ar enabled .
+.Pp
+The pool format does not affect file system version compatibility or the ability
+to send file systems between pools.
+.Pp
+Since most features can be enabled independently of each other the on\-disk
+format of the pool is specified by the set of all features marked as
+.Sy active
+on the pool. If the pool was created by another software version this set may
+include unsupported features.
+.Ss Identifying features
+Every feature has a guid of the form
+.Sy com.example:feature_name .
+The reverse DNS name ensures that the feature's guid is unique across all ZFS
+implementations. When unsupported features are encountered on a pool they will
+be identified by their guids.
+Refer to the documentation for the ZFS implementation that created the pool
+for information about those features.
+.Pp
+Each supported feature also has a short name.
+By convention a feature's short name is the portion of its guid which follows
+the ':' (e.g.
+.Sy com.example:feature_name
+would have the short name
+.Sy feature_name ),
+however a feature's short name may differ across ZFS implementations if
+following the convention would result in name conflicts.
+.Ss Feature states
+Features can be in one of three states:
+.Bl -tag -width "XXXXXXXX"
+.It Sy active
+This feature's on\-disk format changes are in effect on the pool.
+Support for this feature is required to import the pool in read\-write mode.
+If this feature is not read-only compatible, support is also required to
+import the pool in read\-only mode (see "Read\-only compatibility").
+.It Sy enabled
+An administrator has marked this feature as enabled on the pool, but the
+feature's on\-disk format changes have not been made yet.
+The pool can still be imported by software that does not support this feature,
+but changes may be made to the on\-disk format at any time which will move
+the feature to the
+.Sy active
+state.
+Some features may support returning to the
+.Sy enabled
+state after becoming
+.Sy active .
+See feature\-specific documentation for details.
+.It Sy disabled
+This feature's on\-disk format changes have not been made and will not be made
+unless an administrator moves the feature to the
+.Sy enabled
+state.
+Features cannot be disabled once they have been enabled.
+.El
+.Pp
+The state of supported features is exposed through pool properties of the form
+.Sy feature@short_name .
+.Ss Read\-only compatibility
+Some features may make on\-disk format changes that do not interfere with other
+software's ability to read from the pool.
+These features are referred to as "read\-only compatible".
+If all unsupported features on a pool are read\-only compatible, the pool can
+be imported in read\-only mode by setting the
+.Sy readonly
+property during import (see
+.Xr zpool 8
+for details on importing pools).
+.Ss Unsupported features
+For each unsupported feature enabled on an imported pool a pool property
+named
+.Sy unsupported@feature_guid
+will indicate why the import was allowed despite the unsupported feature.
+Possible values for this property are:
+.Bl -tag -width "XXXXXXXX"
+.It Sy inactive
+The feature is in the
+.Sy enabled
+state and therefore the pool's on\-disk format is still compatible with
+software that does not support this feature.
+.It Sy readonly
+The feature is read\-only compatible and the pool has been imported in
+read\-only mode.
+.El
+.Ss Feature dependencies
+Some features depend on other features being enabled in order to function
+properly.
+Enabling a feature will automatically enable any features it depends on.
+.Sh FEATURES
+The following features are supported on this system:
+.Bl -tag -width "XXXXXXXX"
+.It Sy async_destroy
+.Bl -column "READ\-ONLY COMPATIBLE" "com.delphix:async_destroy"
+.It GUID Ta com.delphix:async_destroy
+.It READ\-ONLY COMPATIBLE Ta yes
+.It DEPENDENCIES Ta none
+.El
+.Pp
+Destroying a file system requires traversing all of its data in order to
+return its used space to the pool.
+Without
+.Sy async_destroy
+the file system is not fully removed until all space has been reclaimed.
+If the destroy operation is interrupted by a reboot or power outage the next
+attempt to open the pool will need to complete the destroy operation
+synchronously.
+.Pp
+When
+.Sy async_destroy
+is enabled the file system's data will be reclaimed by a background process,
+allowing the destroy operation to complete without traversing the entire file
+system.
+The background process is able to resume interrupted destroys after the pool
+has been opened, eliminating the need to finish interrupted destroys as part
+of the open operation.
+The amount of space remaining to be reclaimed by the background process is
+available through the
+.Sy freeing
+property.
+.Pp
+This feature is only
+.Sy active
+while
+.Sy freeing
+is non\-zero.
+.It Sy empty_bpobj
+.Bl -column "READ\-ONLY COMPATIBLE" "com.delphix:empty_bpobj"
+.It GUID Ta com.delphix:empty_bpobj
+.It READ\-ONLY COMPATIBLE Ta yes
+.It DEPENDENCIES Ta none
+.El
+.Pp
+This feature increases the performance of creating and using a large number
+of snapshots of a single filesystem or volume, and also reduces the disk
+space required.
+.Pp
+When there are many snapshots, each snapshot uses many Block Pointer Objects
+.Pq bpobj's
+to track blocks associated with that snapshot.
+However, in common use cases, most of these bpobj's are empty.
+This feature allows us to create each bpobj on-demand, thus eliminating the
+empty bpobjs.
+.Pp
+This feature is
+.Sy active
+while there are any filesystems, volumes, or snapshots which were created
+after enabling this feature.
+.El
+.Sh SEE ALSO
+.Xr zpool 8
+.Sh AUTHORS
+This manual page is a
+.Xr mdoc 7
+reimplementation of the
+.Tn illumos
+manual page
+.Em zpool-features(5) ,
+modified and customized for
+.Fx
+and licensed under the Common Development and Distribution License
+.Pq Tn CDDL .
+.Pp
+The
+.Xr mdoc 7
+implementation of this manual page was initially written by
+.An Martin Matuska Aq mm@FreeBSD.org .
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool.8 b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool.8
new file mode 100644
index 0000000000000000000000000000000000000000..4c82741ab0e1d79b32b96b1ed446d089db234ad5
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool.8
@@ -0,0 +1,1947 @@
+'\" te
+.\" Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>.
+.\" All Rights Reserved.
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" Copyright (c) 2010, Sun Microsystems, Inc. All Rights Reserved.
+.\" Copyright 2011, Nexenta Systems, Inc. All Rights Reserved.
+.\" Copyright (c) 2011, Justin T. Gibbs <gibbs@FreeBSD.org>
+.\" Copyright (c) 2012 by Delphix. All Rights Reserved.
+.\" Copyright (c) 2012, Glen Barber <gjb@FreeBSD.org>
+.\"
+.\" $FreeBSD$
+.\"
+.Dd November 15, 2012
+.Dt ZPOOL 8
+.Os
+.Sh NAME
+.Nm zpool
+.Nd configures ZFS storage pools
+.Sh SYNOPSIS
+.Nm
+.Op Fl \&?
+.Nm
+.Cm add
+.Op Fl fn
+.Ar pool vdev ...
+.Nm
+.Cm attach
+.Op Fl f
+.Ar pool device new_device
+.Nm
+.Cm clear
+.Op Fl F Op Fl n
+.Ar pool
+.Op Ar device
+.Nm
+.Cm create
+.Op Fl fnd
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ...
+.Op Fl O Ar file-system-property Ns = Ns Ar value
+.Ar ...
+.Op Fl m Ar mountpoint
+.Op Fl R Ar root
+.Ar pool vdev ...
+.Nm
+.Cm destroy
+.Op Fl f
+.Ar pool
+.Nm
+.Cm detach
+.Ar pool device
+.Nm
+.Cm export
+.Op Fl f
+.Ar pool ...
+.Nm
+.Cm get
+.Ar all | property Ns Op , Ns Ar ...
+.Ar pool ...
+.Nm
+.Cm history
+.Op Fl il
+.Op Ar pool
+.Ar ...
+.Nm
+.Cm import
+.Op Fl d Ar dir | Fl c Ar cachefile
+.Op Fl D
+.Nm
+.Cm import
+.Op Fl o Ar mntopts
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ...
+.Op Fl d Ar dir | Fl c Ar cachefile
+.Op Fl D
+.Op Fl f
+.Op Fl m
+.Op Fl N
+.Op Fl R Ar root
+.Op Fl F Op Fl n
+.Fl a
+.Nm
+.Cm import
+.Op Fl o Ar mntopts
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ...
+.Op Fl d Ar dir | Fl c Ar cachefile
+.Op Fl D
+.Op Fl f
+.Op Fl m
+.Op Fl N
+.Op Fl R Ar root
+.Op Fl F Op Fl n
+.Ar pool | id
+.Op Ar newpool
+.Nm
+.Cm iostat
+.Op Fl T Cm d Ns | Ns Cm u
+.Op Fl v
+.Op Ar pool
+.Ar ...
+.Nm
+.Cm labelclear
+.Op Fl f
+.Ar device
+.Nm
+.Cm list
+.Op Fl H
+.Op Fl o Ar property Ns Op , Ns Ar ...
+.Op Fl T Cm d Ns | Ns Cm u
+.Op Ar pool
+.Ar ...
+.Op Ar inverval Op Ar count
+.Nm
+.Cm offline
+.Op Fl t
+.Ar pool device ...
+.Nm
+.Cm online
+.Op Fl e
+.Ar pool device ...
+.Nm
+.Cm reguid
+.Ar pool
+.Nm
+.Cm remove
+.Ar pool device ...
+.Nm
+.Cm replace
+.Op Fl f
+.Ar pool device
+.Op Ar new_device
+.Nm
+.Cm scrub
+.Op Fl s
+.Ar pool ...
+.Nm
+.Cm set
+.Ar property Ns = Ns Ar value pool
+.Nm
+.Cm split
+.Op Fl n
+.Op Fl R Ar altroot
+.Op Fl o Ar mntopts
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar pool newpool
+.Op Ar device ...
+.Nm
+.Cm status
+.Op Fl vx
+.Op Fl T Cm d Ns | Ns Cm u
+.Op Ar pool
+.Ar ...
+.Op Ar interval Op Ar count
+.Nm
+.Cm upgrade
+.Op Fl v
+.Nm
+.Cm upgrade
+.Op Fl V Ar version
+.Fl a | Ar pool ...
+.Sh DESCRIPTION
+The
+.Nm
+command configures
+.Tn ZFS
+storage pools. A storage pool is a collection of devices that provides physical
+storage and data replication for
+.Tn ZFS
+datasets.
+.Pp
+All datasets within a storage pool share the same space. See
+.Xr zfs 8
+for information on managing datasets.
+.Ss Virtual Devices (vdevs)
+A
+.Qq virtual device
+.Pq No vdev
+describes a single device or a collection of devices organized according to
+certain performance and fault characteristics. The following virtual devices
+are supported:
+.Bl -tag -width "XXXXXX"
+.It Sy disk
+A block device, typically located under
+.Pa /dev .
+.Tn ZFS
+can use individual slices or partitions, though the recommended mode of
+operation is to use whole disks. A disk can be specified by a full path to the
+device or the
+.Xr geom 4
+provider name. When given a whole disk,
+.Tn ZFS
+automatically labels the disk, if necessary.
+.It Sy file
+A regular file. The use of files as a backing store is strongly discouraged. It
+is designed primarily for experimental purposes, as the fault tolerance of a
+file is only as good the file system of which it is a part. A file must be
+specified by a full path.
+.It Sy mirror
+A mirror of two or more devices. Data is replicated in an identical fashion
+across all components of a mirror. A mirror with
+.Em N
+disks of size
+.Em X
+can hold
+.Em X
+bytes and can withstand
+.Pq Em N-1
+devices failing before data integrity is compromised.
+.It Sy raidz
+(or
+.Sy raidz1 raidz2 raidz3 ) .
+A variation on
+.Sy RAID-5
+that allows for better distribution of parity and eliminates the
+.Qq Sy RAID-5
+write hole (in which data and parity become inconsistent after a power loss).
+Data and parity is striped across all disks within a
+.No raidz
+group.
+.Pp
+A
+.No raidz
+group can have single-, double- , or triple parity, meaning that the
+.No raidz
+group can sustain one, two, or three failures, respectively, without
+losing any data. The
+.Sy raidz1 No vdev
+type specifies a single-parity
+.No raidz
+group; the
+.Sy raidz2 No vdev
+type specifies a double-parity
+.No raidz
+group; and the
+.Sy raidz3 No vdev
+type specifies a triple-parity
+.No raidz
+group. The
+.Sy raidz No vdev
+type is an alias for
+.Sy raidz1 .
+.Pp
+A
+.No raidz
+group with
+.Em N
+disks of size
+.Em X
+with
+.Em P
+parity disks can hold approximately
+.Sm off
+.Pq Em N-P
+*X
+.Sm on
+bytes and can withstand
+.Em P
+device(s) failing before data integrity is compromised. The minimum number of
+devices in a
+.No raidz
+group is one more than the number of parity disks. The
+recommended number is between 3 and 9 to help increase performance.
+.It Sy spare
+A special
+.No pseudo- Ns No vdev
+which keeps track of available hot spares for a pool.
+For more information, see the
+.Qq Sx Hot Spares
+section.
+.It Sy log
+A separate-intent log device. If more than one log device is specified, then
+writes are load-balanced between devices. Log devices can be mirrored. However,
+.No raidz
+.No vdev
+types are not supported for the intent log. For more information,
+see the
+.Qq Sx Intent Log
+section.
+.It Sy cache
+A device used to cache storage pool data. A cache device cannot be configured
+as a mirror or
+.No raidz
+group. For more information, see the
+.Qq Sx Cache Devices
+section.
+.El
+.Pp
+Virtual devices cannot be nested, so a mirror or
+.No raidz
+virtual device can only
+contain files or disks. Mirrors of mirrors (or other combinations) are not
+allowed.
+.Pp
+A pool can have any number of virtual devices at the top of the configuration
+(known as
+.Qq root
+.No vdev Ns s).
+Data is dynamically distributed across all top-level devices to balance data
+among devices. As new virtual devices are added,
+.Tn ZFS
+automatically places data on the newly available devices.
+.Pp
+Virtual devices are specified one at a time on the command line, separated by
+whitespace. The keywords
+.Qq mirror
+and
+.Qq raidz
+are used to distinguish where a group ends and another begins. For example, the
+following creates two root
+.No vdev Ns s,
+each a mirror of two disks:
+.Bd -literal -offset 2n
+.Li # Ic zpool create mypool mirror da0 da1 mirror da2 da3
+.Ed
+.Ss Device Failure and Recovery
+.Tn ZFS
+supports a rich set of mechanisms for handling device failure and data
+corruption. All metadata and data is checksummed, and
+.Tn ZFS
+automatically repairs bad data from a good copy when corruption is detected.
+.Pp
+In order to take advantage of these features, a pool must make use of some form
+of redundancy, using either mirrored or
+.No raidz
+groups. While
+.Tn ZFS
+supports running in a non-redundant configuration, where each root
+.No vdev
+is simply a disk or file, this is strongly discouraged. A single case of bit
+corruption can render some or all of your data unavailable.
+.Pp
+A pool's health status is described by one of three states: online, degraded,
+or faulted. An online pool has all devices operating normally. A degraded pool
+is one in which one or more devices have failed, but the data is still
+available due to a redundant configuration. A faulted pool has corrupted
+metadata, or one or more faulted devices, and insufficient replicas to continue
+functioning.
+.Pp
+The health of the top-level
+.No vdev ,
+such as mirror or
+.No raidz
+device, is
+potentially impacted by the state of its associated
+.No vdev Ns s,
+or component devices. A top-level
+.No vdev
+or component device is in one of the following states:
+.Bl -tag -width "DEGRADED"
+.It Sy DEGRADED
+One or more top-level
+.No vdev Ns s
+is in the degraded state because one or more
+component devices are offline. Sufficient replicas exist to continue
+functioning.
+.Pp
+One or more component devices is in the degraded or faulted state, but
+sufficient replicas exist to continue functioning. The underlying conditions
+are as follows:
+.Bl -bullet -offset 2n
+.It
+The number of checksum errors exceeds acceptable levels and the device is
+degraded as an indication that something may be wrong.
+.Tn ZFS
+continues to use the device as necessary.
+.It
+The number of
+.Tn I/O
+errors exceeds acceptable levels. The device could not be
+marked as faulted because there are insufficient replicas to continue
+functioning.
+.El
+.It Sy FAULTED
+One or more top-level
+.No vdev Ns s
+is in the faulted state because one or more
+component devices are offline. Insufficient replicas exist to continue
+functioning.
+.Pp
+One or more component devices is in the faulted state, and insufficient
+replicas exist to continue functioning. The underlying conditions are as
+follows:
+.Bl -bullet -offset 2n
+.It
+The device could be opened, but the contents did not match expected values.
+.It
+The number of
+.Tn I/O
+errors exceeds acceptable levels and the device is faulted to
+prevent further use of the device.
+.El
+.It Sy OFFLINE
+The device was explicitly taken offline by the
+.Qq Nm Cm offline
+command.
+.It Sy ONLINE
+The device is online and functioning.
+.It Sy REMOVED
+The device was physically removed while the system was running. Device removal
+detection is hardware-dependent and may not be supported on all platforms.
+.It Sy UNAVAIL
+The device could not be opened. If a pool is imported when a device was
+unavailable, then the device will be identified by a unique identifier instead
+of its path since the path was never correct in the first place.
+.El
+.Pp
+If a device is removed and later reattached to the system,
+.Tn ZFS
+attempts to put the device online automatically. Device attach detection is
+hardware-dependent and might not be supported on all platforms.
+.Ss Hot Spares
+.Tn ZFS
+allows devices to be associated with pools as
+.Qq hot spares .
+These devices are not actively used in the pool, but when an active device
+fails, it is automatically replaced by a hot spare. To create a pool with hot
+spares, specify a
+.Qq spare
+.No vdev
+with any number of devices. For example,
+.Bd -literal -offset 2n
+.Li # Ic zpool create pool mirror da0 da1 spare da2 da3
+.Ed
+.Pp
+Spares can be shared across multiple pools, and can be added with the
+.Qq Nm Cm add
+command and removed with the
+.Qq Nm Cm remove
+command. Once a spare replacement is initiated, a new "spare"
+.No vdev
+is created
+within the configuration that will remain there until the original device is
+replaced. At this point, the hot spare becomes available again if another
+device fails.
+.Pp
+If a pool has a shared spare that is currently being used, the pool can not be
+exported since other pools may use this shared spare, which may lead to
+potential data corruption.
+.Pp
+An in-progress spare replacement can be cancelled by detaching the hot spare.
+If the original faulted device is detached, then the hot spare assumes its
+place in the configuration, and is removed from the spare list of all active
+pools.
+.Pp
+Spares cannot replace log devices.
+.Ss Intent Log
+The
+.Tn ZFS
+Intent Log
+.Pq Tn ZIL
+satisfies
+.Tn POSIX
+requirements for synchronous transactions. For instance, databases often
+require their transactions to be on stable storage devices when returning from
+a system call.
+.Tn NFS
+and other applications can also use
+.Xr fsync 2
+to ensure data stability. By default, the intent log is allocated from blocks
+within the main pool. However, it might be possible to get better performance
+using separate intent log devices such as
+.Tn NVRAM
+or a dedicated disk. For example:
+.Bd -literal -offset 2n
+.Li # Ic zpool create pool da0 da1 log da2
+.Ed
+.Pp
+Multiple log devices can also be specified, and they can be mirrored. See the
+.Sx EXAMPLES
+section for an example of mirroring multiple log devices.
+.Pp
+Log devices can be added, replaced, attached, detached, imported and exported
+as part of the larger pool. Mirrored log devices can be removed by specifying
+the top-level mirror for the log.
+.Ss Cache devices
+Devices can be added to a storage pool as "cache devices." These devices
+provide an additional layer of caching between main memory and disk. For
+read-heavy workloads, where the working set size is much larger than what can
+be cached in main memory, using cache devices allow much more of this working
+set to be served from low latency media. Using cache devices provides the
+greatest performance improvement for random read-workloads of mostly static
+content.
+.Pp
+To create a pool with cache devices, specify a "cache"
+.No vdev
+with any number of devices. For example:
+.Bd -literal -offset 2n
+.Li # Ic zpool create pool da0 da1 cache da2 da3
+.Ed
+.Pp
+Cache devices cannot be mirrored or part of a
+.No raidz
+configuration. If a read
+error is encountered on a cache device, that read
+.Tn I/O
+is reissued to the original storage pool device, which might be part of a
+mirrored or
+.No raidz
+configuration.
+.Pp
+The content of the cache devices is considered volatile, as is the case with
+other system caches.
+.Ss Properties
+Each pool has several properties associated with it. Some properties are
+read-only statistics while others are configurable and change the behavior of
+the pool. The following are read-only properties:
+.Bl -tag -width "dedupratio"
+.It Sy alloc
+Amount of storage space within the pool that has been physically allocated.
+.It Sy capacity
+Percentage of pool space used. This property can also be referred to by its
+shortened column name, "cap".
+.It Sy comment
+A text string consisting of printable ASCII characters that will be stored
+such that it is available even if the pool becomes faulted.  An administrator
+can provide additional information about a pool using this property.
+.It Sy dedupratio
+The deduplication ratio specified for a pool, expressed as a multiplier.
+For example, a
+.Sy dedupratio
+value of 1.76 indicates that 1.76 units of data were stored but only 1 unit of disk space was actually consumed. See
+.Xr zfs 8
+for a description of the deduplication feature.
+.It Sy free
+Number of blocks within the pool that are not allocated.
+.It Sy freeing
+After a file system or snapshot is destroyed, the space it was using is
+returned to the pool asynchronously.
+.Sy freeing
+is the amount of space remaining to be reclaimed.
+Over time
+.Sy freeing
+will decrease while
+.Sy free
+increases.
+.It Sy expandsize
+This property has currently no value on FreeBSD.
+.It Sy guid
+A unique identifier for the pool.
+.It Sy health
+The current health of the pool. Health can be
+.Qq Sy ONLINE ,
+.Qq Sy DEGRADED ,
+.Qq Sy FAULTED ,
+.Qq Sy OFFLINE ,
+.Qq Sy REMOVED ,
+or
+.Qq Sy UNAVAIL .
+.It Sy size
+Total size of the storage pool.
+.It Sy unsupported@ Ns Ar feature_guid
+Information about unsupported features that are enabled on the pool.
+See
+.Xr zpool-features 7
+for details.
+.It Sy used
+Amount of storage space used within the pool.
+.El
+.Pp
+The space usage properties report actual physical space available to the
+storage pool. The physical space can be different from the total amount of
+space that any contained datasets can actually use. The amount of space used in
+a
+.No raidz
+configuration depends on the characteristics of the data being written.
+In addition,
+.Tn ZFS
+reserves some space for internal accounting that the
+.Xr zfs 8
+command takes into account, but the
+.Xr zpool 8
+command does not. For non-full pools of a reasonable size, these effects should
+be invisible. For small pools, or pools that are close to being completely
+full, these discrepancies may become more noticeable.
+.Pp
+The following property can be set at creation time and import time:
+.Bl -tag -width 2n
+.It Sy altroot
+Alternate root directory. If set, this directory is prepended to any mount
+points within the pool. This can be used when examining an unknown pool where
+the mount points cannot be trusted, or in an alternate boot environment, where
+the typical paths are not valid.
+.Sy altroot
+is not a persistent property. It is valid only while the system is up.
+Setting
+.Sy altroot
+defaults to using
+.Cm cachefile=none ,
+though this may be overridden using an explicit setting.
+.El
+.Pp
+The following property can only be set at import time:
+.Bl -tag -width 2n
+.It Sy readonly Ns = Ns Cm on No | Cm off
+If set to
+.Cm on ,
+pool will be imported in read-only mode with the following restrictions:
+.Bl -bullet -offset 2n
+.It
+Synchronous data in the intent log will not be accessible
+.It
+Properties of the pool can not be changed
+.It
+Datasets of this pool can only be mounted read-only
+.It
+To write to a read-only pool, a export and import of the pool is required.
+.El
+.El
+.Pp
+The following properties can be set at creation time and import time, and later
+changed with the
+.Ic zpool set
+command:
+.Bl -tag -width 2n
+.It Sy autoexpand Ns = Ns Cm on No | Cm off
+Controls automatic pool expansion when the underlying LUN is grown. If set to
+.Qq Cm on ,
+the pool will be resized according to the size of the expanded
+device. If the device is part of a mirror or
+.No raidz
+then all devices within that
+.No mirror/ Ns No raidz
+group must be expanded before the new space is made available to
+the pool. The default behavior is
+.Qq off .
+This property can also be referred to by its shortened column name,
+.Sy expand .
+.It Sy autoreplace Ns = Ns Cm on No | Cm off
+Controls automatic device replacement. If set to
+.Qq Cm off ,
+device replacement must be initiated by the administrator by using the
+.Qq Nm Cm replace
+command. If set to
+.Qq Cm on ,
+any new device, found in the same
+physical location as a device that previously belonged to the pool, is
+automatically formatted and replaced. The default behavior is
+.Qq Cm off .
+This property can also be referred to by its shortened column name, "replace".
+.It Sy bootfs Ns = Ns Ar pool Ns / Ns Ar dataset
+Identifies the default bootable dataset for the root pool. This property is
+expected to be set mainly by the installation and upgrade programs.
+.It Sy cachefile Ns = Ns Ar path No | Cm none
+Controls the location of where the pool configuration is cached. Discovering
+all pools on system startup requires a cached copy of the configuration data
+that is stored on the root file system. All pools in this cache are
+automatically imported when the system boots. Some environments, such as
+install and clustering, need to cache this information in a different location
+so that pools are not automatically imported. Setting this property caches the
+pool configuration in a different location that can later be imported with
+.Qq Nm Cm import Fl c .
+Setting it to the special value
+.Qq Cm none
+creates a temporary pool that is never cached, and the special value
+.Cm ''
+(empty string) uses the default location.
+.It Sy comment Ns = Ns Ar text
+A text string consisting of printable ASCII characters that will be stored
+such that it is available even if the pool becomes faulted.
+An administrator can provide additional information about a pool using this
+property.
+.It Sy dedupditto Ns = Ns Ar number
+Threshold for the number of block ditto copies. If the reference count for a
+deduplicated block increases above this number, a new ditto copy of this block
+is automatically stored. Default setting is
+.Cm 0 .
+.It Sy delegation Ns = Ns Cm on No | Cm off
+Controls whether a non-privileged user is granted access based on the dataset
+permissions defined on the dataset. See
+.Xr zfs 8
+for more information on
+.Tn ZFS
+delegated administration.
+.It Sy failmode Ns = Ns Cm wait No | Cm continue No | Cm panic
+Controls the system behavior in the event of catastrophic pool failure. This
+condition is typically a result of a loss of connectivity to the underlying
+storage device(s) or a failure of all devices within the pool. The behavior of
+such an event is determined as follows:
+.Bl -tag -width indent
+.It Sy wait
+Blocks all
+.Tn I/O
+access until the device connectivity is recovered and the errors are cleared.
+This is the default behavior.
+.It Sy continue
+Returns
+.Em EIO
+to any new write
+.Tn I/O
+requests but allows reads to any of the remaining healthy devices. Any write
+requests that have yet to be committed to disk would be blocked.
+.It Sy panic
+Prints out a message to the console and generates a system crash dump.
+.El
+.It Sy feature@ Ns Ar feature_name Ns = Ns Sy enabled
+The value of this property is the current state of
+.Ar feature_name .
+The only valid value when setting this property is
+.Sy enabled
+which moves
+.Ar feature_name
+to the enabled state.
+See
+.Xr zpool-features 7
+for details on feature states.
+.It Sy listsnaps Ns = Ns Cm on No | Cm off
+Controls whether information about snapshots associated with this pool is
+output when
+.Qq Nm zfs Cm list
+is run without the
+.Fl t
+option. The default value is
+.Cm off .
+.It Sy version Ns = Ns Ar version
+The current on-disk version of the pool. This can be increased, but never
+decreased. The preferred method of updating pools is with the
+.Qq Nm Cm upgrade
+command, though this property can be used when a specific version is needed
+for backwards compatibility.
+Once feature flags is enabled on a pool this property will no longer have a
+value.
+.El
+.Sh SUBCOMMANDS
+All subcommands that modify state are logged persistently to the pool in their
+original form.
+.Pp
+The
+.Nm
+command provides subcommands to create and destroy storage pools, add capacity
+to storage pools, and provide information about the storage pools. The following
+subcommands are supported:
+.Bl -tag -width 2n
+.It Xo
+.Nm
+.Op Fl \&?
+.Xc
+.Pp
+Displays a help message.
+.It Xo
+.Nm
+.Cm add
+.Op Fl fn
+.Ar pool vdev ...
+.Xc
+.Pp
+Adds the specified virtual devices to the given pool. The
+.No vdev
+specification is described in the
+.Qq Sx Virtual Devices
+section. The behavior of the
+.Fl f
+option, and the device checks performed are described in the
+.Qq Nm Cm create
+subcommand.
+.Bl -tag -width indent
+.It Fl f
+Forces use of
+.Ar vdev ,
+even if they appear in use or specify a conflicting replication level.
+Not all devices can be overridden in this manner.
+.It Fl n
+Displays the configuration that would be used without actually adding the
+.Ar vdev Ns s.
+The actual pool creation can still fail due to insufficient privileges or device
+sharing.
+.Pp
+Do not add a disk that is currently configured as a quorum device to a zpool.
+After a disk is in the pool, that disk can then be configured as a quorum
+device.
+.El
+.It Xo
+.Nm
+.Cm attach
+.Op Fl f
+.Ar pool device new_device
+.Xc
+.Pp
+Attaches
+.Ar new_device
+to an existing
+.Sy zpool
+device. The existing device cannot be part of a
+.No raidz
+configuration. If
+.Ar device
+is not currently part of a mirrored configuration,
+.Ar device
+automatically transforms into a two-way mirror of
+.Ar device No and Ar new_device .
+If
+.Ar device
+is part of a two-way mirror, attaching
+.Ar new_device
+creates a three-way mirror, and so on. In either case,
+.Ar new_device
+begins to resilver immediately.
+.Bl -tag -width indent
+.It Fl f
+Forces use of
+.Ar new_device ,
+even if its appears to be in use. Not all devices can be overridden in this
+manner.
+.El
+.It Xo
+.Nm
+.Cm clear
+.Op Fl F Op Fl n
+.Ar pool
+.Op Ar device
+.Xc
+.Pp
+Clears device errors in a pool. If no arguments are specified, all device
+errors within the pool are cleared. If one or more devices is specified, only
+those errors associated with the specified device or devices are cleared.
+.Bl -tag -width indent
+.It Fl F
+Initiates recovery mode for an unopenable pool. Attempts to discard the last
+few transactions in the pool to return it to an openable state. Not all damaged
+pools can be recovered by using this option. If successful, the data from the
+discarded transactions is irretrievably lost.
+.It Fl n
+Used in combination with the
+.Fl F
+flag. Check whether discarding transactions would make the pool openable, but
+do not actually discard any transactions.
+.El
+.It Xo
+.Nm
+.Cm create
+.Op Fl fnd
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ...
+.Op Fl O Ar file-system-property Ns = Ns Ar value
+.Ar ...
+.Op Fl m Ar mountpoint
+.Op Fl R Ar root
+.Ar pool vdev ...
+.Xc
+.Pp
+Creates a new storage pool containing the virtual devices specified on the
+command line. The pool name must begin with a letter, and can only contain
+alphanumeric characters as well as underscore ("_"), dash ("-"), and period
+("."). The pool names "mirror", "raidz", "spare" and "log" are reserved, as are
+names beginning with the pattern "c[0-9]". The
+.No vdev
+specification is described in the
+.Qq Sx Virtual Devices
+section.
+.Pp
+The command verifies that each device specified is accessible and not currently
+in use by another subsystem. There are some uses, such as being currently
+mounted, or specified as the dedicated dump device, that prevents a device from
+ever being used by
+.Tn ZFS
+Other uses, such as having a preexisting
+.Sy UFS
+file system, can be overridden with the
+.Fl f
+option.
+.Pp
+The command also checks that the replication strategy for the pool is
+consistent. An attempt to combine redundant and non-redundant storage in a
+single pool, or to mix disks and files, results in an error unless
+.Fl f
+is specified. The use of differently sized devices within a single
+.No raidz
+or mirror group is also flagged as an error unless
+.Fl f
+is specified.
+.Pp
+Unless the
+.Fl R
+option is specified, the default mount point is
+.Qq Pa /pool .
+The mount point must not exist or must be empty, or else the
+root dataset cannot be mounted. This can be overridden with the
+.Fl m
+option.
+.Pp
+By default all supported features are enabled on the new pool unless the
+.Fl d
+option is specified.
+.Bl -tag -width indent
+.It Fl f
+Forces use of
+.Ar vdev Ns s,
+even if they appear in use or specify a conflicting replication level.
+Not all devices can be overridden in this manner.
+.It Fl n
+Displays the configuration that would be used without actually creating the
+pool. The actual pool creation can still fail due to insufficient privileges or
+device sharing.
+.It Fl d
+Do not enable any features on the new pool.
+Individual features can be enabled by setting their corresponding properties
+to
+.Sy enabled
+with the
+.Fl o
+option.
+See
+.Xr zpool-features 7
+for details about feature properties.
+.It Xo
+.Fl o Ar property Ns = Ns Ar value
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ...
+.Xc
+Sets the given pool properties. See the
+.Qq Sx Properties
+section for a list of valid properties that can be set.
+.It Xo
+.Fl O
+.Ar file-system-property Ns = Ns Ar value
+.Op Fl O Ar file-system-property Ns = Ns Ar value
+.Ar ...
+.Xc
+Sets the given file system properties in the root file system of the pool. See
+.Xr zfs 8 Properties
+for a list of valid properties that
+can be set.
+.It Fl R Ar root
+Equivalent to
+.Qq Fl o Cm cachefile=none,altroot= Ns Pa root
+.It Fl m Ar mountpoint
+Sets the mount point for the root dataset. The default mount point is
+.Qq Pa /pool
+or
+.Qq Cm altroot Ns Pa /pool
+if
+.Sy altroot
+is specified. The mount point must be an absolute path,
+.Qq Cm legacy ,
+or
+.Qq Cm none .
+For more information on dataset mount points, see
+.Xr zfs 8 .
+.El
+.It Xo
+.Nm
+.Cm destroy
+.Op Fl f
+.Ar pool
+.Xc
+.Pp
+Destroys the given pool, freeing up any devices for other use. This command
+tries to unmount any active datasets before destroying the pool.
+.Bl -tag -width indent
+.It Fl f
+Forces any active datasets contained within the pool to be unmounted.
+.El
+.It Xo
+.Nm
+.Cm detach
+.Ar pool device
+.Xc
+.Pp
+Detaches
+.Ar device
+from a mirror. The operation is refused if there are no other valid replicas
+of the data.
+.It Xo
+.Nm
+.Cm export
+.Op Fl f
+.Ar pool ...
+.Xc
+.Pp
+Exports the given pools from the system. All devices are marked as exported,
+but are still considered in use by other subsystems. The devices can be moved
+between systems (even those of different endianness) and imported as long as a
+sufficient number of devices are present.
+.Pp
+Before exporting the pool, all datasets within the pool are unmounted. A pool
+can not be exported if it has a shared spare that is currently being used.
+.Pp
+For pools to be portable, you must give the
+.Nm
+command whole disks, not just slices, so that
+.Tn ZFS
+can label the disks with portable
+.Sy EFI
+labels. Otherwise, disk drivers on platforms of different endianness will not
+recognize the disks.
+.Bl -tag -width indent
+.It Fl f
+Forcefully unmount all datasets, using the
+.Qq Nm unmount Fl f
+command.
+.Pp
+This command will forcefully export the pool even if it has a shared spare that
+is currently being used. This may lead to potential data corruption.
+.El
+.It Xo
+.Nm
+.Cm get
+.Ar all | property Ns Op , Ns Ar ...
+.Ar pool ...
+.Xc
+.Pp
+Retrieves the given list of properties (or all properties if
+.Qq Cm all
+is used) for the specified storage pool(s). These properties are displayed with
+the following fields:
+.Bl -column -offset indent "property"
+.It name Ta Name of storage pool
+.It property Ta Property name
+.It value Ta Property value
+.It source Ta Property source, either 'default' or 'local'.
+.El
+.Pp
+See the
+.Qq Sx Properties
+section for more information on the available pool properties.
+.It Xo
+.Nm
+.Cm history
+.Op Fl il
+.Op Ar pool
+.Ar ...
+.Xc
+.Pp
+Displays the command history of the specified pools or all pools if no pool is
+specified.
+.Bl -tag -width indent
+.It Fl i
+Displays internally logged
+.Tn ZFS
+events in addition to user initiated events.
+.It Fl l
+Displays log records in long format, which in addition to standard format
+includes, the user name, the hostname, and the zone in which the operation was
+performed.
+.El
+.It Xo
+.Nm
+.Cm import
+.Op Fl d Ar dir | Fl c Ar cachefile
+.Op Fl D
+.Xc
+.Pp
+Lists pools available to import. If the
+.Fl d
+option is not specified, this command searches for devices in
+.Qq Pa /dev .
+The
+.Fl d
+option can be specified multiple times, and all directories are searched. If
+the device appears to be part of an exported pool, this command displays a
+summary of the pool with the name of the pool, a numeric identifier, as well as
+the
+.No vdev
+layout and current health of the device for each device or file.
+Destroyed pools, pools that were previously destroyed with the
+.Qq Nm Cm destroy
+command, are not listed unless the
+.Fl D
+option is specified.
+.Pp
+The numeric identifier is unique, and can be used instead of the pool name when
+multiple exported pools of the same name are available.
+.Bl -tag -width indent
+.It Fl c Ar cachefile
+Reads configuration from the given
+.Ar cachefile
+that was created with the
+.Qq Sy cachefile
+pool property. This
+.Ar cachefile
+is used instead of searching for devices.
+.It Fl d Ar dir
+Searches for devices or files in
+.Ar dir .
+The
+.Fl d
+option can be specified multiple times.
+.It Fl D
+Lists destroyed pools only.
+.El
+.It Xo
+.Nm
+.Cm import
+.Op Fl o Ar mntopts
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ...
+.Op Fl d Ar dir | Fl c Ar cachefile
+.Op Fl D
+.Op Fl f
+.Op Fl m
+.Op Fl N
+.Op Fl R Ar root
+.Op Fl F Op Fl n
+.Fl a
+.Xc
+.Pp
+Imports all pools found in the search directories. Identical to the previous
+command, except that all pools with a sufficient number of devices available
+are imported. Destroyed pools, pools that were previously destroyed with the
+.Qq Nm Cm destroy
+command, will not be imported unless the
+.Fl D
+option is specified.
+.Bl -tag -width indent
+.It Fl o Ar mntopts
+Comma-separated list of mount options to use when mounting datasets within the
+pool. See
+.Xr zfs 8
+for a description of dataset properties and mount options.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property on the imported pool. See the
+.Qq Sx Properties
+section for more information on the available pool properties.
+.It Fl c Ar cachefile
+Reads configuration from the given
+.Ar cachefile
+that was created with the
+.Qq Sy cachefile
+pool property. This
+.Ar cachefile
+is used instead of searching for devices.
+.It Fl d Ar dir
+Searches for devices or files in
+.Ar dir .
+The
+.Fl d
+option can be specified multiple times. This option is incompatible with the
+.Fl c
+option.
+.It Fl D
+Imports destroyed pools only. The
+.Fl f
+option is also required.
+.It Fl f
+Forces import, even if the pool appears to be potentially active.
+.It Fl m
+Enables import with missing log devices.
+.It Fl N
+Do not mount any filesystems from the imported pool.
+.It Fl R Ar root
+Sets the
+.Qq Sy cachefile
+property to
+.Qq Cm none
+and the
+.Qq Sy altroot
+property to
+.Qq Ar root
+.It Fl F
+Recovery mode for a non-importable pool. Attempt to return the pool to an
+importable state by discarding the last few transactions. Not all damaged pools
+can be recovered by using this option. If successful, the data from the
+discarded transactions is irretrievably lost. This option is ignored if the
+pool is importable or already imported.
+.It Fl n
+Used with the
+.Fl F
+recovery option. Determines whether a non-importable pool can be made
+importable again, but does not actually perform the pool recovery. For more
+details about pool recovery mode, see the
+.Fl F
+option, above.
+.It Fl a
+Searches for and imports all pools found.
+.El
+.It Xo
+.Nm
+.Cm import
+.Op Fl o Ar mntopts
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar ...
+.Op Fl d Ar dir | Fl c Ar cachefile
+.Op Fl D
+.Op Fl f
+.Op Fl m
+.Op Fl N
+.Op Fl R Ar root
+.Op Fl F Op Fl n
+.Ar pool | id
+.Op Ar newpool
+.Xc
+.Pp
+Imports a specific pool. A pool can be identified by its name or the numeric
+identifier. If
+.Ar newpool
+is specified, the pool is imported using the name
+.Ar newpool .
+Otherwise, it is imported with the same name as its exported name.
+.Pp
+If a device is removed from a system without running
+.Qq Nm Cm export
+first, the device appears as potentially active. It cannot be determined if
+this was a failed export, or whether the device is really in use from another
+host. To import a pool in this state, the
+.Fl f
+option is required.
+.Bl -tag -width indent
+.It Fl o Ar mntopts
+Comma-separated list of mount options to use when mounting datasets within the
+pool. See
+.Xr zfs 8
+for a description of dataset properties and mount options.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property on the imported pool. See the
+.Qq Sx Properties
+section for more information on the available pool properties.
+.It Fl c Ar cachefile
+Reads configuration from the given
+.Ar cachefile
+that was created with the
+.Qq Sy cachefile
+pool property. This
+.Ar cachefile
+is used instead of searching for devices.
+.It Fl d Ar dir
+Searches for devices or files in
+.Ar dir .
+The
+.Fl d
+option can be specified multiple times. This option is incompatible with the
+.Fl c
+option.
+.It Fl D
+Imports destroyed pools only. The
+.Fl f
+option is also required.
+.It Fl f
+Forces import, even if the pool appears to be potentially active.
+.It Fl m
+Enables import with missing log devices.
+.It Fl N
+Do not mount any filesystems from the imported pool.
+.It Fl R Ar root
+Equivalent to
+.Qq Fl o Cm cachefile=none,altroot= Ns Pa root
+.It Fl F
+Recovery mode for a non-importable pool. Attempt to return the pool to an
+importable state by discarding the last few transactions. Not all damaged pools
+can be recovered by using this option. If successful, the data from the
+discarded transactions is irretrievably lost. This option is ignored if the
+pool is importable or already imported.
+.It Fl n
+Used with the
+.Fl F
+recovery option. Determines whether a non-importable pool can be made
+importable again, but does not actually perform the pool recovery. For more
+details about pool recovery mode, see the
+.Fl F
+option, above.
+.El
+.It Xo
+.Nm
+.Cm iostat
+.Op Fl T Cm d Ns | Ns Cm u
+.Op Fl v
+.Op Ar pool
+.Ar ...
+.Op Ar interval Op Ar count
+.Xc
+.Pp
+Displays
+.Tn I/O
+statistics for the given pools. When given an interval, the statistics are
+printed every
+.Ar interval
+seconds until
+.Sy Ctrl-C
+is pressed. If no
+.Ar pools
+are specified, statistics for every pool in the system is shown. If
+.Ar count
+is specified, the command exits after
+.Ar count
+reports are printed.
+.Bl -tag -width indent
+.It Fl T Cm d Ns | Ns Cm u
+Print a timestamp.
+.Pp
+Use modifier
+.Cm d
+for standard date format. See
+.Xr date 1 .
+Use modifier
+.Cm u
+for unixtime
+.Pq equals Qq Ic date +%s .
+.It Fl v
+Verbose statistics. Reports usage statistics for individual
+.No vdev Ns s
+within the pool, in addition to the pool-wide statistics.
+.El
+.It Xo
+.Nm
+.Cm labelclear
+.Op Fl f
+.Ar device
+.Xc
+.Pp
+Removes
+.Tn ZFS
+label information from the specified
+.Ar device .
+The
+.Ar device
+must not be part of an active pool configuration.
+.Bl -tag -width indent
+.It Fl v
+Treat exported or foreign devices as inactive.
+.El
+.It Xo
+.Nm
+.Cm list
+.Op Fl Hv
+.Op Fl o Ar property Ns Op , Ns Ar ...
+.Op Fl T Cm d Ns | Ns Cm u
+.Op Ar pool
+.Ar ...
+.Op Ar inverval Op Ar count
+.Xc
+.Pp
+Lists the given pools along with a health status and space usage. When given no
+arguments, all pools in the system are listed.
+.Pp
+When given an interval, the output is printed every
+.Ar interval
+seconds until
+.Sy Ctrl-C
+is pressed. If
+.Ar count
+is specified, the command exits after
+.Ar count
+reports are printed.
+.Bl -tag -width indent
+.It Fl H
+Scripted mode. Do not display headers, and separate fields by a single tab
+instead of arbitrary space.
+.It Fl v
+Show more detailed information.
+.It Fl o Ar property Ns Op , Ns Ar ...
+Comma-separated list of properties to display. See the
+.Qq Sx Properties
+section for a list of valid properties. The default list is
+.Sy name ,
+.Sy size ,
+.Sy used ,
+.Sy available ,
+.Sy capacity  ,
+.Sy health ,
+.Sy altroot .
+.It Fl T Cm d Ns | Ns Cm u
+Print a timestamp.
+.Pp
+Use modifier
+.Cm d
+for standard date format. See
+.Xr date 1 .
+Use modifier
+.Cm u
+for unixtime
+.Pq equals Qq Ic date +%s .
+.El
+.It Xo
+.Nm
+.Cm offline
+.Op Fl t
+.Ar pool device ...
+.Xc
+.Pp
+Takes the specified physical device offline. While the
+.Ar device
+is offline, no attempt is made to read or write to the device.
+.Bl -tag -width indent
+.It Fl t
+Temporary. Upon reboot, the specified physical device reverts to its previous
+state.
+.El
+.It Xo
+.Nm
+.Cm online
+.Op Fl e
+.Ar pool device ...
+.Xc
+.Pp
+Brings the specified physical device online.
+.Pp
+This command is not applicable to spares or cache devices.
+.Bl -tag -width indent
+.It Fl e
+Expand the device to use all available space. If the device is part of a mirror
+or
+.No raidz
+then all devices must be expanded before the new space will become
+available to the pool.
+.El
+.It Xo
+.Nm
+.Cm reguid
+.Ar pool
+.Xc
+.Pp
+Generates a new unique identifier for the pool.  You must ensure that all
+devices in this pool are online and healthy before performing this action.
+.It Xo
+.Nm
+.Cm remove
+.Ar pool device ...
+.Xc
+.Pp
+Removes the specified device from the pool. This command currently only
+supports removing hot spares, cache, and log devices. A mirrored log device can
+be removed by specifying the top-level mirror for the log. Non-log devices that
+are part of a mirrored configuration can be removed using the
+.Qq Nm Cm detach
+command. Non-redundant and
+.No raidz
+devices cannot be removed from a pool.
+.It Xo
+.Nm
+.Cm replace
+.Op Fl f
+.Ar pool device
+.Op Ar new_device
+.Xc
+.Pp
+Replaces
+.Ar old_device
+with
+.Ar new_device .
+This is equivalent to attaching
+.Ar new_device ,
+waiting for it to resilver, and then detaching
+.Ar old_device .
+.Pp
+The size of
+.Ar new_device
+must be greater than or equal to the minimum size
+of all the devices in a mirror or
+.No raidz
+configuration.
+.Pp
+.Ar new_device
+is required if the pool is not redundant. If
+.Ar new_device
+is not specified, it defaults to
+.Ar old_device .
+This form of replacement is useful after an existing disk has failed and has
+been physically replaced. In this case, the new disk may have the same
+.Pa /dev
+path as the old device, even though it is actually a different disk.
+.Tn ZFS
+recognizes this.
+.Bl -tag -width indent
+.It Fl f
+Forces use of
+.Ar new_device ,
+even if its appears to be in use. Not all devices can be overridden in this
+manner.
+.El
+.It Xo
+.Nm
+.Cm scrub
+.Op Fl s
+.Ar pool ...
+.Xc
+.Pp
+Begins a scrub. The scrub examines all data in the specified pools to verify
+that it checksums correctly. For replicated (mirror or
+.No raidz )
+devices,
+.Tn ZFS
+automatically repairs any damage discovered during the scrub. The
+.Qq Nm Cm status
+command reports the progress of the scrub and summarizes the results of the
+scrub upon completion.
+.Pp
+Scrubbing and resilvering are very similar operations. The difference is that
+resilvering only examines data that
+.Tn ZFS
+knows to be out of date (for example, when attaching a new device to a mirror
+or replacing an existing device), whereas scrubbing examines all data to
+discover silent errors due to hardware faults or disk failure.
+.Pp
+Because scrubbing and resilvering are
+.Tn I/O Ns -intensive
+operations,
+.Tn ZFS
+only allows one at a time. If a scrub is already in progress, the
+.Qq Nm Cm scrub
+command returns an error. To start a new scrub, you have to stop the old scrub
+with the
+.Qq Nm Cm scrub Fl s
+command first. If a resilver is in progress,
+.Tn ZFS
+does not allow a scrub to be started until the resilver completes.
+.Bl -tag -width indent
+.It Fl s
+Stop scrubbing.
+.El
+.It Xo
+.Nm
+.Cm set
+.Ar property Ns = Ns Ar value pool
+.Xc
+.Pp
+Sets the given property on the specified pool. See the
+.Qq Sx Properties
+section for more information on what properties can be set and acceptable
+values.
+.It Xo
+.Nm
+.Cm split
+.Op Fl n
+.Op Fl R Ar altroot
+.Op Fl o Ar mntopts
+.Op Fl o Ar property Ns = Ns Ar value
+.Ar pool newpool
+.Op Ar device ...
+.Xc
+.Pp
+Splits off one disk from each mirrored top-level
+.No vdev
+in a pool and creates a new pool from the split-off disks. The original pool
+must be made up of one or more mirrors and must not be in the process of
+resilvering. The
+.Cm split
+subcommand chooses the last device in each mirror
+.No vdev
+unless overridden by a device specification on the command line.
+.Pp
+When using a
+.Ar device
+argument,
+.Cm split
+includes the specified device(s) in a new pool and, should any devices remain
+unspecified, assigns the last device in each mirror
+.No vdev
+to that pool, as it does normally. If you are uncertain about the outcome of a
+.Cm split
+command, use the
+.Fl n
+("dry-run") option to ensure your command will have the effect you intend.
+.Bl -tag -width indent
+.It Fl R Ar altroot
+Automatically import the newly created pool after splitting, using the
+specified
+.Ar altroot
+parameter for the new pool's alternate root. See the
+.Sy altroot
+description in the
+.Qq Sx Properties
+section, above.
+.It Fl n
+Displays the configuration that would be created without actually splitting the
+pool. The actual pool split could still fail due to insufficient privileges or
+device status.
+.It Fl o Ar mntopts
+Comma-separated list of mount options to use when mounting datasets within the
+pool. See
+.Xr zfs 8
+for a description of dataset properties and mount options. Valid only in
+conjunction with the
+.Fl R
+option.
+.It Fl o Ar property Ns = Ns Ar value
+Sets the specified property on the new pool. See the
+.Qq Sx Properties
+section, above, for more information on the available pool properties.
+.El
+.It Xo
+.Nm
+.Cm status
+.Op Fl vx
+.Op Fl T Cm d Ns | Ns Cm u
+.Op Ar pool
+.Ar ...
+.Op Ar interval Op Ar count
+.Xc
+.Pp
+Displays the detailed health status for the given pools. If no
+.Ar pool
+is specified, then the status of each pool in the system is displayed. For more
+information on pool and device health, see the
+.Qq Sx Device Failure and Recovery
+section.
+.Pp
+When given an interval, the output is printed every
+.Ar interval
+seconds until
+.Sy Ctrl-C
+is pressed. If
+.Ar count
+is specified, the command exits after
+.Ar count
+reports are printed.
+.Pp
+If a scrub or resilver is in progress, this command reports the percentage done
+and the estimated time to completion. Both of these are only approximate,
+because the amount of data in the pool and the other workloads on the system
+can change.
+.Bl -tag -width indent
+.It Fl x
+Only display status for pools that are exhibiting errors or are otherwise
+unavailable.
+.It Fl v
+Displays verbose data error information, printing out a complete list of all
+data errors since the last complete pool scrub.
+.It Fl T Cm d Ns | Ns Cm u
+Print a timestamp.
+.Pp
+Use modifier
+.Cm d
+for standard date format. See
+.Xr date 1 .
+Use modifier
+.Cm u
+for unixtime
+.Pq equals Qq Ic date +%s .
+.El
+.It Xo
+.Nm
+.Cm upgrade
+.Op Fl v
+.Xc
+.Pp
+Displays pools which do not have all supported features enabled and pools
+formatted using a legacy
+.Tn ZFS
+version number.
+These pools can continue to be used, but some features may not be available.
+Use
+.Nm Cm upgrade Fl a
+to enable all features on all pools.
+.Bl -tag -width indent
+.It Fl v
+Displays legacy
+.Tn ZFS
+versions supported by the current software.
+See
+.Xr zpool-features 7
+for a description of feature flags features supported by the current software.
+.El
+.It Xo
+.Nm
+.Cm upgrade
+.Op Fl V Ar version
+.Fl a | Ar pool ...
+.Xc
+.Pp
+Enables all supported features on the given pool.
+Once this is done, the pool will no longer be accessible on systems that do
+not support feature flags.
+See
+.Xr zpool-features 7
+for details on compatability with system sthat support feature flags, but do
+not support all features enabled on the pool.
+.Bl -tag -width indent
+.It Fl a
+Enables all supported features on all pools.
+.It Fl V Ar version
+Upgrade to the specified legacy version. If the
+.Fl V
+flag is specified, no features will be enabled on the pool.
+This option can only be used to increase version number up to the last
+supported legacy version number.
+.El
+.El
+.Sh EXIT STATUS
+The following exit values are returned:
+.Bl -tag -offset 2n -width 2n
+.It 0
+Successful completion.
+.It 1
+An error occurred.
+.It 2
+Invalid command line options were specified.
+.El
+.Sh EXAMPLES
+.Bl -tag -width 0n
+.It Sy Example 1 No Creating a RAID-Z Storage Pool
+.Pp
+The following command creates a pool with a single
+.No raidz
+root
+.No vdev
+that consists of six disks.
+.Bd -literal -offset 2n
+.Li # Ic zpool create tank raidz da0 da1 da2 da3 da4 da5
+.Ed
+.It Sy Example 2 No Creating a Mirrored Storage Pool
+.Pp
+The following command creates a pool with two mirrors, where each mirror
+contains two disks.
+.Bd -literal -offset 2n
+.Li # Ic zpool create tank mirror da0 da1 mirror da2 da3
+.Ed
+.It Sy Example 3 No Creating a Tn ZFS No Storage Pool by Using Partitions
+.Pp
+The following command creates an unmirrored pool using two GPT partitions.
+.Bd -literal -offset 2n
+.Li # Ic zpool create tank da0p3 da1p3
+.Ed
+.It Sy Example 4 No Creating a Tn ZFS No Storage Pool by Using Files
+.Pp
+The following command creates an unmirrored pool using files. While not
+recommended, a pool based on files can be useful for experimental purposes.
+.Bd -literal -offset 2n
+.Li # Ic zpool create tank /path/to/file/a /path/to/file/b
+.Ed
+.It Sy Example 5 No Adding a Mirror to a Tn ZFS No Storage Pool
+.Pp
+The following command adds two mirrored disks to the pool
+.Em tank ,
+assuming the pool is already made up of two-way mirrors. The additional space
+is immediately available to any datasets within the pool.
+.Bd -literal -offset 2n
+.Li # Ic zpool add tank mirror da2 da3
+.Ed
+.It Sy Example 6 No Listing Available Tn ZFS No Storage Pools
+.Pp
+The following command lists all available pools on the system.
+.Bd -literal -offset 2n
+.Li # Ic zpool list
+NAME   SIZE  ALLOC   FREE    CAP  DEDUP  HEALTH  ALTROOT
+pool  2.70T   473G  2.24T    17%  1.00x  ONLINE  -
+test  1.98G  89.5K  1.98G     0%  1.00x  ONLINE  -
+.Ed
+.It Sy Example 7 No Listing All Properties for a Pool
+.Pp
+The following command lists all the properties for a pool.
+.Bd -literal -offset 2n
+.Li # Ic zpool get all pool
+pool  size           2.70T       -
+pool  capacity       17%         -
+pool  altroot        -           default
+pool  health         ONLINE      -
+pool  guid           2501120270416322443  default
+pool  version        28          default
+pool  bootfs         pool/root   local
+pool  delegation     on          default
+pool  autoreplace    off         default
+pool  cachefile      -           default
+pool  failmode       wait        default
+pool  listsnapshots  off         default
+pool  autoexpand     off         default
+pool  dedupditto     0           default
+pool  dedupratio     1.00x       -
+pool  free           2.24T       -
+pool  allocated      473G        -
+pool  readonly       off         -
+.Ed
+.It Sy Example 8 No Destroying a Tn ZFS No Storage Pool
+.Pp
+The following command destroys the pool
+.Qq Em tank
+and any datasets contained within.
+.Bd -literal -offset 2n
+.Li # Ic zpool destroy -f tank
+.Ed
+.It Sy Example 9 No Exporting a Tn ZFS No Storage Pool
+.Pp
+The following command exports the devices in pool
+.Em tank
+so that they can be relocated or later imported.
+.Bd -literal -offset 2n
+.Li # Ic zpool export tank
+.Ed
+.It Sy Example 10 No Importing a Tn ZFS No Storage Pool
+.Pp
+The following command displays available pools, and then imports the pool
+.Qq Em tank
+for use on the system.
+.Pp
+The results from this command are similar to the following:
+.Bd -literal -offset 2n
+.Li # Ic zpool import
+
+  pool: tank
+    id: 15451357997522795478
+ state: ONLINE
+action: The pool can be imported using its name or numeric identifier.
+config:
+
+        tank        ONLINE
+          mirror    ONLINE
+               da0  ONLINE
+               da1  ONLINE
+.Ed
+.It Xo
+.Sy Example 11
+Upgrading All
+.Tn ZFS
+Storage Pools to the Current Version
+.Xc
+.Pp
+The following command upgrades all
+.Tn ZFS
+Storage pools to the current version of
+the software.
+.Bd -literal -offset 2n
+.Li # Ic zpool upgrade -a
+This system is currently running ZFS pool version 28.
+.Ed
+.It Sy Example 12 No Managing Hot Spares
+.Pp
+The following command creates a new pool with an available hot spare:
+.Bd -literal -offset 2n
+.Li # Ic zpool create tank mirror da0 da1 spare da2
+.Ed
+.Pp
+If one of the disks were to fail, the pool would be reduced to the degraded
+state. The failed device can be replaced using the following command:
+.Bd -literal -offset 2n
+.Li # Ic zpool replace tank da0 da2
+.Ed
+.Pp
+Once the data has been resilvered, the spare is automatically removed and is
+made available should another device fails. The hot spare can be permanently
+removed from the pool using the following command:
+.Bd -literal -offset 2n
+.Li # Ic zpool remove tank da2
+.Ed
+.It Xo
+.Sy Example 13
+Creating a
+.Tn ZFS
+Pool with Mirrored Separate Intent Logs
+.Xc
+.Pp
+The following command creates a
+.Tn ZFS
+storage pool consisting of two, two-way
+mirrors and mirrored log devices:
+.Bd -literal -offset 2n
+.Li # Ic zpool create pool mirror da0 da1 mirror da2 da3 log mirror da4 da5
+.Ed
+.It Sy Example 14 No Adding Cache Devices to a Tn ZFS No Pool
+.Pp
+The following command adds two disks for use as cache devices to a
+.Tn ZFS
+storage pool:
+.Bd -literal -offset 2n
+.Li # Ic zpool add pool cache da2 da3
+.Ed
+.Pp
+Once added, the cache devices gradually fill with content from main memory.
+Depending on the size of your cache devices, it could take over an hour for
+them to fill. Capacity and reads can be monitored using the
+.Cm iostat
+subcommand as follows:
+.Bd -literal -offset 2n
+.Li # Ic zpool iostat -v pool 5
+.Ed
+.It Sy Example 15 No Removing a Mirrored Log Device
+.Pp
+The following command removes the mirrored log device
+.Em mirror-2 .
+.Pp
+Given this configuration:
+.Bd -literal -offset 2n
+   pool: tank
+  state: ONLINE
+  scrub: none requested
+ config:
+
+         NAME        STATE     READ WRITE CKSUM
+         tank        ONLINE       0     0     0
+           mirror-0  ONLINE       0     0     0
+                da0  ONLINE       0     0     0
+                da1  ONLINE       0     0     0
+           mirror-1  ONLINE       0     0     0
+                da2  ONLINE       0     0     0
+                da3  ONLINE       0     0     0
+         logs
+           mirror-2  ONLINE       0     0     0
+                da4  ONLINE       0     0     0
+                da5  ONLINE       0     0     0
+.Ed
+.Pp
+The command to remove the mirrored log
+.Em mirror-2
+is:
+.Bd -literal -offset 2n
+.Li # Ic zpool remove tank mirror-2
+.Ed
+.It Sy Example 16 No Recovering a Faulted Tn ZFS No Pool
+.Pp
+If a pool is faulted but recoverable, a message indicating this state is
+provided by
+.Qq Nm Cm status
+if the pool was cached (see the
+.Fl c Ar cachefile
+argument above), or as part of the error output from a failed
+.Qq Nm Cm import
+of the pool.
+.Pp
+Recover a cached pool with the
+.Qq Nm Cm clear
+command:
+.Bd -literal -offset 2n
+.Li # Ic zpool clear -F data
+Pool data returned to its state as of Tue Sep 08 13:23:35 2009.
+Discarded approximately 29 seconds of transactions.
+.Ed
+.Pp
+If the pool configuration was not cached, use
+.Qq Nm Cm import
+with the recovery mode flag:
+.Bd -literal -offset 2n
+.Li # Ic zpool import -F data
+Pool data returned to its state as of Tue Sep 08 13:23:35 2009.
+Discarded approximately 29 seconds of transactions.
+.Ed
+.El
+.Sh SEE ALSO
+.Xr zpool-features 7 ,
+.Xr zfs 8
+.Sh AUTHORS
+This manual page is a
+.Xr mdoc 7
+reimplementation of the
+.Tn OpenSolaris
+manual page
+.Em zpool(1M) ,
+modified and customized for
+.Fx
+and licensed under the Common Development and Distribution License
+.Pq Tn CDDL .
+.Pp
+The
+.Xr mdoc 7
+implementation of this manual page was initially written by
+.An Martin Matuska Aq mm@FreeBSD.org .
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_iter.c b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_iter.c
new file mode 100644
index 0000000000000000000000000000000000000000..6ba91b105fe92ded49de182a4d1344eb9232266a
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_iter.c
@@ -0,0 +1,253 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident	"%Z%%M%	%I%	%E% SMI"
+
+#include <solaris.h>
+#include <libintl.h>
+#include <libuutil.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+
+#include <libzfs.h>
+
+#include "zpool_util.h"
+
+/*
+ * Private interface for iterating over pools specified on the command line.
+ * Most consumers will call for_each_pool, but in order to support iostat, we
+ * allow fined grained control through the zpool_list_t interface.
+ */
+
+typedef struct zpool_node {
+	zpool_handle_t	*zn_handle;
+	uu_avl_node_t	zn_avlnode;
+	int		zn_mark;
+} zpool_node_t;
+
+struct zpool_list {
+	boolean_t	zl_findall;
+	uu_avl_t	*zl_avl;
+	uu_avl_pool_t	*zl_pool;
+	zprop_list_t	**zl_proplist;
+};
+
+/* ARGSUSED */
+static int
+zpool_compare(const void *larg, const void *rarg, void *unused)
+{
+	zpool_handle_t *l = ((zpool_node_t *)larg)->zn_handle;
+	zpool_handle_t *r = ((zpool_node_t *)rarg)->zn_handle;
+	const char *lname = zpool_get_name(l);
+	const char *rname = zpool_get_name(r);
+
+	return (strcmp(lname, rname));
+}
+
+/*
+ * Callback function for pool_list_get().  Adds the given pool to the AVL tree
+ * of known pools.
+ */
+static int
+add_pool(zpool_handle_t *zhp, void *data)
+{
+	zpool_list_t *zlp = data;
+	zpool_node_t *node = safe_malloc(sizeof (zpool_node_t));
+	uu_avl_index_t idx;
+
+	node->zn_handle = zhp;
+	uu_avl_node_init(node, &node->zn_avlnode, zlp->zl_pool);
+	if (uu_avl_find(zlp->zl_avl, node, NULL, &idx) == NULL) {
+		if (zlp->zl_proplist &&
+		    zpool_expand_proplist(zhp, zlp->zl_proplist) != 0) {
+			zpool_close(zhp);
+			free(node);
+			return (-1);
+		}
+		uu_avl_insert(zlp->zl_avl, node, idx);
+	} else {
+		zpool_close(zhp);
+		free(node);
+		return (-1);
+	}
+
+	return (0);
+}
+
+/*
+ * Create a list of pools based on the given arguments.  If we're given no
+ * arguments, then iterate over all pools in the system and add them to the AVL
+ * tree.  Otherwise, add only those pool explicitly specified on the command
+ * line.
+ */
+zpool_list_t *
+pool_list_get(int argc, char **argv, zprop_list_t **proplist, int *err)
+{
+	zpool_list_t *zlp;
+
+	zlp = safe_malloc(sizeof (zpool_list_t));
+
+	zlp->zl_pool = uu_avl_pool_create("zfs_pool", sizeof (zpool_node_t),
+	    offsetof(zpool_node_t, zn_avlnode), zpool_compare, UU_DEFAULT);
+
+	if (zlp->zl_pool == NULL)
+		zpool_no_memory();
+
+	if ((zlp->zl_avl = uu_avl_create(zlp->zl_pool, NULL,
+	    UU_DEFAULT)) == NULL)
+		zpool_no_memory();
+
+	zlp->zl_proplist = proplist;
+
+	if (argc == 0) {
+		(void) zpool_iter(g_zfs, add_pool, zlp);
+		zlp->zl_findall = B_TRUE;
+	} else {
+		int i;
+
+		for (i = 0; i < argc; i++) {
+			zpool_handle_t *zhp;
+
+			if (zhp = zpool_open_canfail(g_zfs, argv[i])) {
+				if (add_pool(zhp, zlp) != 0)
+					*err = B_TRUE;
+			} else {
+				*err = B_TRUE;
+			}
+		}
+	}
+
+	return (zlp);
+}
+
+/*
+ * Search for any new pools, adding them to the list.  We only add pools when no
+ * options were given on the command line.  Otherwise, we keep the list fixed as
+ * those that were explicitly specified.
+ */
+void
+pool_list_update(zpool_list_t *zlp)
+{
+	if (zlp->zl_findall)
+		(void) zpool_iter(g_zfs, add_pool, zlp);
+}
+
+/*
+ * Iterate over all pools in the list, executing the callback for each
+ */
+int
+pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func,
+    void *data)
+{
+	zpool_node_t *node, *next_node;
+	int ret = 0;
+
+	for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next_node) {
+		next_node = uu_avl_next(zlp->zl_avl, node);
+		if (zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL ||
+		    unavail)
+			ret |= func(node->zn_handle, data);
+	}
+
+	return (ret);
+}
+
+/*
+ * Remove the given pool from the list.  When running iostat, we want to remove
+ * those pools that no longer exist.
+ */
+void
+pool_list_remove(zpool_list_t *zlp, zpool_handle_t *zhp)
+{
+	zpool_node_t search, *node;
+
+	search.zn_handle = zhp;
+	if ((node = uu_avl_find(zlp->zl_avl, &search, NULL, NULL)) != NULL) {
+		uu_avl_remove(zlp->zl_avl, node);
+		zpool_close(node->zn_handle);
+		free(node);
+	}
+}
+
+/*
+ * Free all the handles associated with this list.
+ */
+void
+pool_list_free(zpool_list_t *zlp)
+{
+	uu_avl_walk_t *walk;
+	zpool_node_t *node;
+
+	if ((walk = uu_avl_walk_start(zlp->zl_avl, UU_WALK_ROBUST)) == NULL) {
+		(void) fprintf(stderr,
+		    gettext("internal error: out of memory"));
+		exit(1);
+	}
+
+	while ((node = uu_avl_walk_next(walk)) != NULL) {
+		uu_avl_remove(zlp->zl_avl, node);
+		zpool_close(node->zn_handle);
+		free(node);
+	}
+
+	uu_avl_walk_end(walk);
+	uu_avl_destroy(zlp->zl_avl);
+	uu_avl_pool_destroy(zlp->zl_pool);
+
+	free(zlp);
+}
+
+/*
+ * Returns the number of elements in the pool list.
+ */
+int
+pool_list_count(zpool_list_t *zlp)
+{
+	return (uu_avl_numnodes(zlp->zl_avl));
+}
+
+/*
+ * High level function which iterates over all pools given on the command line,
+ * using the pool_list_* interfaces.
+ */
+int
+for_each_pool(int argc, char **argv, boolean_t unavail,
+    zprop_list_t **proplist, zpool_iter_f func, void *data)
+{
+	zpool_list_t *list;
+	int ret = 0;
+
+	if ((list = pool_list_get(argc, argv, proplist, &ret)) == NULL)
+		return (1);
+
+	if (pool_list_iter(list, unavail, func, data) != 0)
+		ret = 1;
+
+	pool_list_free(list);
+
+	return (ret);
+}
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_main.c b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_main.c
new file mode 100644
index 0000000000000000000000000000000000000000..dd6c90ba0578673e68cf9325ce34f28003ceb556
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_main.c
@@ -0,0 +1,5317 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2012 by Frederik Wessels. All rights reserved.
+ * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
+ */
+
+#include <solaris.h>
+#include <assert.h>
+#include <ctype.h>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <libgen.h>
+#include <libintl.h>
+#include <libuutil.h>
+#include <locale.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+#include <priv.h>
+#include <pwd.h>
+#include <zone.h>
+#include <sys/time.h>
+#include <zfs_prop.h>
+#include <sys/fs/zfs.h>
+#include <sys/stat.h>
+
+#include <libzfs.h>
+
+#include "zpool_util.h"
+#include "zfs_comutil.h"
+#include "zfeature_common.h"
+
+#include "statcommon.h"
+
+static int zpool_do_create(int, char **);
+static int zpool_do_destroy(int, char **);
+
+static int zpool_do_add(int, char **);
+static int zpool_do_remove(int, char **);
+static int zpool_do_labelclear(int, char **);
+
+static int zpool_do_list(int, char **);
+static int zpool_do_iostat(int, char **);
+static int zpool_do_status(int, char **);
+
+static int zpool_do_online(int, char **);
+static int zpool_do_offline(int, char **);
+static int zpool_do_clear(int, char **);
+static int zpool_do_reopen(int, char **);
+
+static int zpool_do_reguid(int, char **);
+
+static int zpool_do_attach(int, char **);
+static int zpool_do_detach(int, char **);
+static int zpool_do_replace(int, char **);
+static int zpool_do_split(int, char **);
+
+static int zpool_do_scrub(int, char **);
+
+static int zpool_do_import(int, char **);
+static int zpool_do_export(int, char **);
+
+static int zpool_do_upgrade(int, char **);
+
+static int zpool_do_history(int, char **);
+
+static int zpool_do_get(int, char **);
+static int zpool_do_set(int, char **);
+
+/*
+ * These libumem hooks provide a reasonable set of defaults for the allocator's
+ * debugging facilities.
+ */
+
+#ifdef DEBUG
+const char *
+_umem_debug_init(void)
+{
+	return ("default,verbose"); /* $UMEM_DEBUG setting */
+}
+
+const char *
+_umem_logging_init(void)
+{
+	return ("fail,contents"); /* $UMEM_LOGGING setting */
+}
+#endif
+
+typedef enum {
+	HELP_ADD,
+	HELP_ATTACH,
+	HELP_CLEAR,
+	HELP_CREATE,
+	HELP_DESTROY,
+	HELP_DETACH,
+	HELP_EXPORT,
+	HELP_HISTORY,
+	HELP_IMPORT,
+	HELP_IOSTAT,
+	HELP_LABELCLEAR,
+	HELP_LIST,
+	HELP_OFFLINE,
+	HELP_ONLINE,
+	HELP_REPLACE,
+	HELP_REMOVE,
+	HELP_SCRUB,
+	HELP_STATUS,
+	HELP_UPGRADE,
+	HELP_GET,
+	HELP_SET,
+	HELP_SPLIT,
+	HELP_REGUID,
+	HELP_REOPEN
+} zpool_help_t;
+
+
+typedef struct zpool_command {
+	const char	*name;
+	int		(*func)(int, char **);
+	zpool_help_t	usage;
+} zpool_command_t;
+
+/*
+ * Master command table.  Each ZFS command has a name, associated function, and
+ * usage message.  The usage messages need to be internationalized, so we have
+ * to have a function to return the usage message based on a command index.
+ *
+ * These commands are organized according to how they are displayed in the usage
+ * message.  An empty command (one with a NULL name) indicates an empty line in
+ * the generic usage message.
+ */
+static zpool_command_t command_table[] = {
+	{ "create",	zpool_do_create,	HELP_CREATE		},
+	{ "destroy",	zpool_do_destroy,	HELP_DESTROY		},
+	{ NULL },
+	{ "add",	zpool_do_add,		HELP_ADD		},
+	{ "remove",	zpool_do_remove,	HELP_REMOVE		},
+	{ NULL },
+	{ "labelclear",	zpool_do_labelclear,	HELP_LABELCLEAR		},
+	{ NULL },
+	{ "list",	zpool_do_list,		HELP_LIST		},
+	{ "iostat",	zpool_do_iostat,	HELP_IOSTAT		},
+	{ "status",	zpool_do_status,	HELP_STATUS		},
+	{ NULL },
+	{ "online",	zpool_do_online,	HELP_ONLINE		},
+	{ "offline",	zpool_do_offline,	HELP_OFFLINE		},
+	{ "clear",	zpool_do_clear,		HELP_CLEAR		},
+	{ "reopen",	zpool_do_reopen,	HELP_REOPEN		},
+	{ NULL },
+	{ "attach",	zpool_do_attach,	HELP_ATTACH		},
+	{ "detach",	zpool_do_detach,	HELP_DETACH		},
+	{ "replace",	zpool_do_replace,	HELP_REPLACE		},
+	{ "split",	zpool_do_split,		HELP_SPLIT		},
+	{ NULL },
+	{ "scrub",	zpool_do_scrub,		HELP_SCRUB		},
+	{ NULL },
+	{ "import",	zpool_do_import,	HELP_IMPORT		},
+	{ "export",	zpool_do_export,	HELP_EXPORT		},
+	{ "upgrade",	zpool_do_upgrade,	HELP_UPGRADE		},
+	{ "reguid",	zpool_do_reguid,	HELP_REGUID		},
+	{ NULL },
+	{ "history",	zpool_do_history,	HELP_HISTORY		},
+	{ "get",	zpool_do_get,		HELP_GET		},
+	{ "set",	zpool_do_set,		HELP_SET		},
+};
+
+#define	NCOMMAND	(sizeof (command_table) / sizeof (command_table[0]))
+
+zpool_command_t *current_command;
+static char history_str[HIS_MAX_RECORD_LEN];
+
+static uint_t timestamp_fmt = NODATE;
+
+static const char *
+get_usage(zpool_help_t idx) {
+	switch (idx) {
+	case HELP_ADD:
+		return (gettext("\tadd [-fn] <pool> <vdev> ...\n"));
+	case HELP_ATTACH:
+		return (gettext("\tattach [-f] <pool> <device> "
+		    "<new-device>\n"));
+	case HELP_CLEAR:
+		return (gettext("\tclear [-nF] <pool> [device]\n"));
+	case HELP_CREATE:
+		return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
+		    "\t    [-O file-system-property=value] ... \n"
+		    "\t    [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
+	case HELP_DESTROY:
+		return (gettext("\tdestroy [-f] <pool>\n"));
+	case HELP_DETACH:
+		return (gettext("\tdetach <pool> <device>\n"));
+	case HELP_EXPORT:
+		return (gettext("\texport [-f] <pool> ...\n"));
+	case HELP_HISTORY:
+		return (gettext("\thistory [-il] [<pool>] ...\n"));
+	case HELP_IMPORT:
+		return (gettext("\timport [-d dir] [-D]\n"
+		    "\timport [-d dir | -c cachefile] [-F [-n]] <pool | id>\n"
+		    "\timport [-o mntopts] [-o property=value] ... \n"
+		    "\t    [-d dir | -c cachefile] [-D] [-f] [-m] [-N] "
+		    "[-R root] [-F [-n]] -a\n"
+		    "\timport [-o mntopts] [-o property=value] ... \n"
+		    "\t    [-d dir | -c cachefile] [-D] [-f] [-m] [-N] "
+		    "[-R root] [-F [-n]]\n"
+		    "\t    <pool | id> [newpool]\n"));
+	case HELP_IOSTAT:
+		return (gettext("\tiostat [-v] [-T d|u] [pool] ... [interval "
+		    "[count]]\n"));
+	case HELP_LABELCLEAR:
+		return (gettext("\tlabelclear [-f] <vdev>\n"));
+	case HELP_LIST:
+		return (gettext("\tlist [-Hv] [-o property[,...]] "
+		    "[-T d|u] [pool] ... [interval [count]]\n"));
+	case HELP_OFFLINE:
+		return (gettext("\toffline [-t] <pool> <device> ...\n"));
+	case HELP_ONLINE:
+		return (gettext("\tonline [-e] <pool> <device> ...\n"));
+	case HELP_REPLACE:
+		return (gettext("\treplace [-f] <pool> <device> "
+		    "[new-device]\n"));
+	case HELP_REMOVE:
+		return (gettext("\tremove <pool> <device> ...\n"));
+	case HELP_REOPEN:
+		return (""); /* Undocumented command */
+	case HELP_SCRUB:
+		return (gettext("\tscrub [-s] <pool> ...\n"));
+	case HELP_STATUS:
+		return (gettext("\tstatus [-vx] [-T d|u] [pool] ... [interval "
+		    "[count]]\n"));
+	case HELP_UPGRADE:
+		return (gettext("\tupgrade [-v]\n"
+		    "\tupgrade [-V version] <-a | pool ...>\n"));
+	case HELP_GET:
+		return (gettext("\tget <\"all\" | property[,...]> "
+		    "<pool> ...\n"));
+	case HELP_SET:
+		return (gettext("\tset <property=value> <pool> \n"));
+	case HELP_SPLIT:
+		return (gettext("\tsplit [-n] [-R altroot] [-o mntopts]\n"
+		    "\t    [-o property=value] <pool> <newpool> "
+		    "[<device> ...]\n"));
+	case HELP_REGUID:
+		return (gettext("\treguid <pool>\n"));
+	}
+
+	abort();
+	/* NOTREACHED */
+}
+
+
+/*
+ * Callback routine that will print out a pool property value.
+ */
+static int
+print_prop_cb(int prop, void *cb)
+{
+	FILE *fp = cb;
+
+	(void) fprintf(fp, "\t%-15s  ", zpool_prop_to_name(prop));
+
+	if (zpool_prop_readonly(prop))
+		(void) fprintf(fp, "  NO   ");
+	else
+		(void) fprintf(fp, " YES   ");
+
+	if (zpool_prop_values(prop) == NULL)
+		(void) fprintf(fp, "-\n");
+	else
+		(void) fprintf(fp, "%s\n", zpool_prop_values(prop));
+
+	return (ZPROP_CONT);
+}
+
+/*
+ * Display usage message.  If we're inside a command, display only the usage for
+ * that command.  Otherwise, iterate over the entire command table and display
+ * a complete usage message.
+ */
+void
+usage(boolean_t requested)
+{
+	FILE *fp = requested ? stdout : stderr;
+
+	if (current_command == NULL) {
+		int i;
+
+		(void) fprintf(fp, gettext("usage: zpool command args ...\n"));
+		(void) fprintf(fp,
+		    gettext("where 'command' is one of the following:\n\n"));
+
+		for (i = 0; i < NCOMMAND; i++) {
+			if (command_table[i].name == NULL)
+				(void) fprintf(fp, "\n");
+			else
+				(void) fprintf(fp, "%s",
+				    get_usage(command_table[i].usage));
+		}
+	} else {
+		(void) fprintf(fp, gettext("usage:\n"));
+		(void) fprintf(fp, "%s", get_usage(current_command->usage));
+	}
+
+	if (current_command != NULL &&
+	    ((strcmp(current_command->name, "set") == 0) ||
+	    (strcmp(current_command->name, "get") == 0) ||
+	    (strcmp(current_command->name, "list") == 0))) {
+
+		(void) fprintf(fp,
+		    gettext("\nthe following properties are supported:\n"));
+
+		(void) fprintf(fp, "\n\t%-15s  %s   %s\n\n",
+		    "PROPERTY", "EDIT", "VALUES");
+
+		/* Iterate over all properties */
+		(void) zprop_iter(print_prop_cb, fp, B_FALSE, B_TRUE,
+		    ZFS_TYPE_POOL);
+
+		(void) fprintf(fp, "\t%-15s   ", "feature@...");
+		(void) fprintf(fp, "YES   disabled | enabled | active\n");
+
+		(void) fprintf(fp, gettext("\nThe feature@ properties must be "
+		    "appended with a feature name.\nSee zpool-features(7).\n"));
+	}
+
+	/*
+	 * See comments at end of main().
+	 */
+	if (getenv("ZFS_ABORT") != NULL) {
+		(void) printf("dumping core by request\n");
+		abort();
+	}
+
+	exit(requested ? 0 : 2);
+}
+
+void
+print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
+    boolean_t print_logs)
+{
+	nvlist_t **child;
+	uint_t c, children;
+	char *vname;
+
+	if (name != NULL)
+		(void) printf("\t%*s%s\n", indent, "", name);
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) != 0)
+		return;
+
+	for (c = 0; c < children; c++) {
+		uint64_t is_log = B_FALSE;
+
+		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
+		    &is_log);
+		if ((is_log && !print_logs) || (!is_log && print_logs))
+			continue;
+
+		vname = zpool_vdev_name(g_zfs, zhp, child[c], B_FALSE);
+		print_vdev_tree(zhp, vname, child[c], indent + 2,
+		    B_FALSE);
+		free(vname);
+	}
+}
+
+static boolean_t
+prop_list_contains_feature(nvlist_t *proplist)
+{
+	nvpair_t *nvp;
+	for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
+	    nvp = nvlist_next_nvpair(proplist, nvp)) {
+		if (zpool_prop_feature(nvpair_name(nvp)))
+			return (B_TRUE);
+	}
+	return (B_FALSE);
+}
+
+/*
+ * Add a property pair (name, string-value) into a property nvlist.
+ */
+static int
+add_prop_list(const char *propname, char *propval, nvlist_t **props,
+    boolean_t poolprop)
+{
+	zpool_prop_t prop = ZPROP_INVAL;
+	zfs_prop_t fprop;
+	nvlist_t *proplist;
+	const char *normnm;
+	char *strval;
+
+	if (*props == NULL &&
+	    nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
+		(void) fprintf(stderr,
+		    gettext("internal error: out of memory\n"));
+		return (1);
+	}
+
+	proplist = *props;
+
+	if (poolprop) {
+		const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
+
+		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL &&
+		    !zpool_prop_feature(propname)) {
+			(void) fprintf(stderr, gettext("property '%s' is "
+			    "not a valid pool property\n"), propname);
+			return (2);
+		}
+
+		/*
+		 * feature@ properties and version should not be specified
+		 * at the same time.
+		 */
+		if ((prop == ZPROP_INVAL && zpool_prop_feature(propname) &&
+		    nvlist_exists(proplist, vname)) ||
+		    (prop == ZPOOL_PROP_VERSION &&
+		    prop_list_contains_feature(proplist))) {
+			(void) fprintf(stderr, gettext("'feature@' and "
+			    "'version' properties cannot be specified "
+			    "together\n"));
+			return (2);
+		}
+
+
+		if (zpool_prop_feature(propname))
+			normnm = propname;
+		else
+			normnm = zpool_prop_to_name(prop);
+	} else {
+		if ((fprop = zfs_name_to_prop(propname)) != ZPROP_INVAL) {
+			normnm = zfs_prop_to_name(fprop);
+		} else {
+			normnm = propname;
+		}
+	}
+
+	if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
+	    prop != ZPOOL_PROP_CACHEFILE) {
+		(void) fprintf(stderr, gettext("property '%s' "
+		    "specified multiple times\n"), propname);
+		return (2);
+	}
+
+	if (nvlist_add_string(proplist, normnm, propval) != 0) {
+		(void) fprintf(stderr, gettext("internal "
+		    "error: out of memory\n"));
+		return (1);
+	}
+
+	return (0);
+}
+
+/*
+ * zpool add [-fn] <pool> <vdev> ...
+ *
+ *	-f	Force addition of devices, even if they appear in use
+ *	-n	Do not add the devices, but display the resulting layout if
+ *		they were to be added.
+ *
+ * Adds the given vdevs to 'pool'.  As with create, the bulk of this work is
+ * handled by get_vdev_spec(), which constructs the nvlist needed to pass to
+ * libzfs.
+ */
+int
+zpool_do_add(int argc, char **argv)
+{
+	boolean_t force = B_FALSE;
+	boolean_t dryrun = B_FALSE;
+	int c;
+	nvlist_t *nvroot;
+	char *poolname;
+	int ret;
+	zpool_handle_t *zhp;
+	nvlist_t *config;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "fn")) != -1) {
+		switch (c) {
+		case 'f':
+			force = B_TRUE;
+			break;
+		case 'n':
+			dryrun = B_TRUE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* get pool name and check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing pool name argument\n"));
+		usage(B_FALSE);
+	}
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("missing vdev specification\n"));
+		usage(B_FALSE);
+	}
+
+	poolname = argv[0];
+
+	argc--;
+	argv++;
+
+	if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
+		return (1);
+
+	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
+		(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
+		    poolname);
+		zpool_close(zhp);
+		return (1);
+	}
+
+	/* pass off to get_vdev_spec for processing */
+	nvroot = make_root_vdev(zhp, force, !force, B_FALSE, dryrun,
+	    argc, argv);
+	if (nvroot == NULL) {
+		zpool_close(zhp);
+		return (1);
+	}
+
+	if (dryrun) {
+		nvlist_t *poolnvroot;
+
+		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+		    &poolnvroot) == 0);
+
+		(void) printf(gettext("would update '%s' to the following "
+		    "configuration:\n"), zpool_get_name(zhp));
+
+		/* print original main pool and new tree */
+		print_vdev_tree(zhp, poolname, poolnvroot, 0, B_FALSE);
+		print_vdev_tree(zhp, NULL, nvroot, 0, B_FALSE);
+
+		/* Do the same for the logs */
+		if (num_logs(poolnvroot) > 0) {
+			print_vdev_tree(zhp, "logs", poolnvroot, 0, B_TRUE);
+			print_vdev_tree(zhp, NULL, nvroot, 0, B_TRUE);
+		} else if (num_logs(nvroot) > 0) {
+			print_vdev_tree(zhp, "logs", nvroot, 0, B_TRUE);
+		}
+
+		ret = 0;
+	} else {
+		ret = (zpool_add(zhp, nvroot) != 0);
+	}
+
+	nvlist_free(nvroot);
+	zpool_close(zhp);
+
+	return (ret);
+}
+
+/*
+ * zpool remove  <pool> <vdev> ...
+ *
+ * Removes the given vdev from the pool.  Currently, this supports removing
+ * spares, cache, and log devices from the pool.
+ */
+int
+zpool_do_remove(int argc, char **argv)
+{
+	char *poolname;
+	int i, ret = 0;
+	zpool_handle_t *zhp;
+
+	argc--;
+	argv++;
+
+	/* get pool name and check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing pool name argument\n"));
+		usage(B_FALSE);
+	}
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("missing device\n"));
+		usage(B_FALSE);
+	}
+
+	poolname = argv[0];
+
+	if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
+		return (1);
+
+	for (i = 1; i < argc; i++) {
+		if (zpool_vdev_remove(zhp, argv[i]) != 0)
+			ret = 1;
+	}
+
+	return (ret);
+}
+
+/*
+ * zpool labelclear <vdev>
+ *
+ * Verifies that the vdev is not active and zeros out the label information
+ * on the device.
+ */
+int
+zpool_do_labelclear(int argc, char **argv)
+{
+	char *vdev, *name;
+	int c, fd = -1, ret = 0;
+	pool_state_t state;
+	boolean_t inuse = B_FALSE;
+	boolean_t force = B_FALSE;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "f")) != -1) {
+		switch (c) {
+		case 'f':
+			force = B_TRUE;
+			break;
+		default:
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* get vdev name */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing vdev device name\n"));
+		usage(B_FALSE);
+	}
+
+	vdev = argv[0];
+	if ((fd = open(vdev, O_RDWR)) < 0) {
+		(void) fprintf(stderr, gettext("Unable to open %s\n"), vdev);
+		return (B_FALSE);
+	}
+
+	name = NULL;
+	if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0) {
+		if (force)
+			goto wipe_label;
+		
+		(void) fprintf(stderr,
+		    gettext("Unable to determine pool state for %s\n"
+		    "Use -f to force the clearing any label data\n"), vdev);
+
+		return (1);
+	}
+
+	if (inuse) {
+		switch (state) {
+		default:
+		case POOL_STATE_ACTIVE:
+		case POOL_STATE_SPARE:
+		case POOL_STATE_L2CACHE:
+			(void) fprintf(stderr,
+gettext("labelclear operation failed.\n"
+	"\tVdev %s is a member (%s), of pool \"%s\".\n"
+	"\tTo remove label information from this device, export or destroy\n"
+	"\tthe pool, or remove %s from the configuration of this pool\n"
+	"\tand retry the labelclear operation\n"),
+			    vdev, zpool_pool_state_to_name(state), name, vdev);
+			ret = 1;
+			goto errout;
+
+		case POOL_STATE_EXPORTED:
+			if (force)
+				break;
+
+			(void) fprintf(stderr,
+gettext("labelclear operation failed.\n"
+	"\tVdev %s is a member of the exported pool \"%s\".\n"
+	"\tUse \"zpool labelclear -f %s\" to force the removal of label\n"
+	"\tinformation.\n"),
+			    vdev, name, vdev);
+			ret = 1;
+			goto errout;
+
+		case POOL_STATE_POTENTIALLY_ACTIVE:
+			if (force)
+				break;
+
+			(void) fprintf(stderr,
+gettext("labelclear operation failed.\n"
+	"\tVdev %s is a member of the pool \"%s\".\n"
+	"\tThis pool is unknown to this system, but may be active on\n"
+	"\tanother system. Use \'zpool labelclear -f %s\' to force the\n"
+	"\tremoval of label information.\n"),
+			    vdev, name, vdev);
+			ret = 1;
+			goto errout;
+
+		case POOL_STATE_DESTROYED:
+			/* inuse should never be set for a destoryed pool... */
+			break;
+		}
+	}
+
+wipe_label:
+	if (zpool_clear_label(fd) != 0) {
+		(void) fprintf(stderr,
+		    gettext("Label clear failed on vdev %s\n"), vdev);
+		ret = 1;
+	}
+
+errout:
+	close(fd);
+	if (name != NULL)
+		free(name);
+
+	return (ret);
+}
+
+/*
+ * zpool create [-fnd] [-o property=value] ...
+ *		[-O file-system-property=value] ...
+ *		[-R root] [-m mountpoint] <pool> <dev> ...
+ *
+ *	-f	Force creation, even if devices appear in use
+ *	-n	Do not create the pool, but display the resulting layout if it
+ *		were to be created.
+ *      -R	Create a pool under an alternate root
+ *      -m	Set default mountpoint for the root dataset.  By default it's
+ *		'/<pool>'
+ *	-o	Set property=value.
+ *	-d	Don't automatically enable all supported pool features
+ *		(individual features can be enabled with -o).
+ *	-O	Set fsproperty=value in the pool's root file system
+ *
+ * Creates the named pool according to the given vdev specification.  The
+ * bulk of the vdev processing is done in get_vdev_spec() in zpool_vdev.c.  Once
+ * we get the nvlist back from get_vdev_spec(), we either print out the contents
+ * (if '-n' was specified), or pass it to libzfs to do the creation.
+ */
+int
+zpool_do_create(int argc, char **argv)
+{
+	boolean_t force = B_FALSE;
+	boolean_t dryrun = B_FALSE;
+	boolean_t enable_all_pool_feat = B_TRUE;
+	int c;
+	nvlist_t *nvroot = NULL;
+	char *poolname;
+	int ret = 1;
+	char *altroot = NULL;
+	char *mountpoint = NULL;
+	nvlist_t *fsprops = NULL;
+	nvlist_t *props = NULL;
+	char *propval;
+
+	/* check options */
+	while ((c = getopt(argc, argv, ":fndR:m:o:O:")) != -1) {
+		switch (c) {
+		case 'f':
+			force = B_TRUE;
+			break;
+		case 'n':
+			dryrun = B_TRUE;
+			break;
+		case 'd':
+			enable_all_pool_feat = B_FALSE;
+			break;
+		case 'R':
+			altroot = optarg;
+			if (add_prop_list(zpool_prop_to_name(
+			    ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
+				goto errout;
+			if (nvlist_lookup_string(props,
+			    zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
+			    &propval) == 0)
+				break;
+			if (add_prop_list(zpool_prop_to_name(
+			    ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
+				goto errout;
+			break;
+		case 'm':
+			mountpoint = optarg;
+			break;
+		case 'o':
+			if ((propval = strchr(optarg, '=')) == NULL) {
+				(void) fprintf(stderr, gettext("missing "
+				    "'=' for -o option\n"));
+				goto errout;
+			}
+			*propval = '\0';
+			propval++;
+
+			if (add_prop_list(optarg, propval, &props, B_TRUE))
+				goto errout;
+
+			/*
+			 * If the user is creating a pool that doesn't support
+			 * feature flags, don't enable any features.
+			 */
+			if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
+				char *end;
+				u_longlong_t ver;
+
+				ver = strtoull(propval, &end, 10);
+				if (*end == '\0' &&
+				    ver < SPA_VERSION_FEATURES) {
+					enable_all_pool_feat = B_FALSE;
+				}
+			}
+			break;
+		case 'O':
+			if ((propval = strchr(optarg, '=')) == NULL) {
+				(void) fprintf(stderr, gettext("missing "
+				    "'=' for -O option\n"));
+				goto errout;
+			}
+			*propval = '\0';
+			propval++;
+
+			if (add_prop_list(optarg, propval, &fsprops, B_FALSE))
+				goto errout;
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing argument for "
+			    "'%c' option\n"), optopt);
+			goto badusage;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			goto badusage;
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* get pool name and check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing pool name argument\n"));
+		goto badusage;
+	}
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("missing vdev specification\n"));
+		goto badusage;
+	}
+
+	poolname = argv[0];
+
+	/*
+	 * As a special case, check for use of '/' in the name, and direct the
+	 * user to use 'zfs create' instead.
+	 */
+	if (strchr(poolname, '/') != NULL) {
+		(void) fprintf(stderr, gettext("cannot create '%s': invalid "
+		    "character '/' in pool name\n"), poolname);
+		(void) fprintf(stderr, gettext("use 'zfs create' to "
+		    "create a dataset\n"));
+		goto errout;
+	}
+
+	/* pass off to get_vdev_spec for bulk processing */
+	nvroot = make_root_vdev(NULL, force, !force, B_FALSE, dryrun,
+	    argc - 1, argv + 1);
+	if (nvroot == NULL)
+		goto errout;
+
+	/* make_root_vdev() allows 0 toplevel children if there are spares */
+	if (!zfs_allocatable_devs(nvroot)) {
+		(void) fprintf(stderr, gettext("invalid vdev "
+		    "specification: at least one toplevel vdev must be "
+		    "specified\n"));
+		goto errout;
+	}
+
+	if (altroot != NULL && altroot[0] != '/') {
+		(void) fprintf(stderr, gettext("invalid alternate root '%s': "
+		    "must be an absolute path\n"), altroot);
+		goto errout;
+	}
+
+	/*
+	 * Check the validity of the mountpoint and direct the user to use the
+	 * '-m' mountpoint option if it looks like its in use.
+	 * Ignore the checks if the '-f' option is given.
+	 */
+	if (!force && (mountpoint == NULL ||
+	    (strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
+	    strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0))) {
+		char buf[MAXPATHLEN];
+		DIR *dirp;
+
+		if (mountpoint && mountpoint[0] != '/') {
+			(void) fprintf(stderr, gettext("invalid mountpoint "
+			    "'%s': must be an absolute path, 'legacy', or "
+			    "'none'\n"), mountpoint);
+			goto errout;
+		}
+
+		if (mountpoint == NULL) {
+			if (altroot != NULL)
+				(void) snprintf(buf, sizeof (buf), "%s/%s",
+				    altroot, poolname);
+			else
+				(void) snprintf(buf, sizeof (buf), "/%s",
+				    poolname);
+		} else {
+			if (altroot != NULL)
+				(void) snprintf(buf, sizeof (buf), "%s%s",
+				    altroot, mountpoint);
+			else
+				(void) snprintf(buf, sizeof (buf), "%s",
+				    mountpoint);
+		}
+
+		if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
+			(void) fprintf(stderr, gettext("mountpoint '%s' : "
+			    "%s\n"), buf, strerror(errno));
+			(void) fprintf(stderr, gettext("use '-m' "
+			    "option to provide a different default\n"));
+			goto errout;
+		} else if (dirp) {
+			int count = 0;
+
+			while (count < 3 && readdir(dirp) != NULL)
+				count++;
+			(void) closedir(dirp);
+
+			if (count > 2) {
+				(void) fprintf(stderr, gettext("mountpoint "
+				    "'%s' exists and is not empty\n"), buf);
+				(void) fprintf(stderr, gettext("use '-m' "
+				    "option to provide a "
+				    "different default\n"));
+				goto errout;
+			}
+		}
+	}
+
+	if (dryrun) {
+		/*
+		 * For a dry run invocation, print out a basic message and run
+		 * through all the vdevs in the list and print out in an
+		 * appropriate hierarchy.
+		 */
+		(void) printf(gettext("would create '%s' with the "
+		    "following layout:\n\n"), poolname);
+
+		print_vdev_tree(NULL, poolname, nvroot, 0, B_FALSE);
+		if (num_logs(nvroot) > 0)
+			print_vdev_tree(NULL, "logs", nvroot, 0, B_TRUE);
+
+		ret = 0;
+	} else {
+		/*
+		 * Hand off to libzfs.
+		 */
+		if (enable_all_pool_feat) {
+			int i;
+			for (i = 0; i < SPA_FEATURES; i++) {
+				char propname[MAXPATHLEN];
+				zfeature_info_t *feat = &spa_feature_table[i];
+
+				(void) snprintf(propname, sizeof (propname),
+				    "feature@%s", feat->fi_uname);
+
+				/*
+				 * Skip feature if user specified it manually
+				 * on the command line.
+				 */
+				if (nvlist_exists(props, propname))
+					continue;
+
+				if (add_prop_list(propname, ZFS_FEATURE_ENABLED,
+				    &props, B_TRUE) != 0)
+					goto errout;
+			}
+		}
+		if (zpool_create(g_zfs, poolname,
+		    nvroot, props, fsprops) == 0) {
+			zfs_handle_t *pool = zfs_open(g_zfs, poolname,
+			    ZFS_TYPE_FILESYSTEM);
+			if (pool != NULL) {
+				if (mountpoint != NULL)
+					verify(zfs_prop_set(pool,
+					    zfs_prop_to_name(
+					    ZFS_PROP_MOUNTPOINT),
+					    mountpoint) == 0);
+				if (zfs_mount(pool, NULL, 0) == 0)
+					ret = zfs_shareall(pool);
+				zfs_close(pool);
+			}
+		} else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
+			(void) fprintf(stderr, gettext("pool name may have "
+			    "been omitted\n"));
+		}
+	}
+
+errout:
+	nvlist_free(nvroot);
+	nvlist_free(fsprops);
+	nvlist_free(props);
+	return (ret);
+badusage:
+	nvlist_free(fsprops);
+	nvlist_free(props);
+	usage(B_FALSE);
+	return (2);
+}
+
+/*
+ * zpool destroy <pool>
+ *
+ * 	-f	Forcefully unmount any datasets
+ *
+ * Destroy the given pool.  Automatically unmounts any datasets in the pool.
+ */
+int
+zpool_do_destroy(int argc, char **argv)
+{
+	boolean_t force = B_FALSE;
+	int c;
+	char *pool;
+	zpool_handle_t *zhp;
+	int ret;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "f")) != -1) {
+		switch (c) {
+		case 'f':
+			force = B_TRUE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing pool argument\n"));
+		usage(B_FALSE);
+	}
+	if (argc > 1) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		usage(B_FALSE);
+	}
+
+	pool = argv[0];
+
+	if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
+		/*
+		 * As a special case, check for use of '/' in the name, and
+		 * direct the user to use 'zfs destroy' instead.
+		 */
+		if (strchr(pool, '/') != NULL)
+			(void) fprintf(stderr, gettext("use 'zfs destroy' to "
+			    "destroy a dataset\n"));
+		return (1);
+	}
+
+	if (zpool_disable_datasets(zhp, force) != 0) {
+		(void) fprintf(stderr, gettext("could not destroy '%s': "
+		    "could not unmount datasets\n"), zpool_get_name(zhp));
+		return (1);
+	}
+
+	ret = (zpool_destroy(zhp) != 0);
+
+	zpool_close(zhp);
+
+	return (ret);
+}
+
+/*
+ * zpool export [-f] <pool> ...
+ *
+ *	-f	Forcefully unmount datasets
+ *
+ * Export the given pools.  By default, the command will attempt to cleanly
+ * unmount any active datasets within the pool.  If the '-f' flag is specified,
+ * then the datasets will be forcefully unmounted.
+ */
+int
+zpool_do_export(int argc, char **argv)
+{
+	boolean_t force = B_FALSE;
+	boolean_t hardforce = B_FALSE;
+	int c;
+	zpool_handle_t *zhp;
+	int ret;
+	int i;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "fF")) != -1) {
+		switch (c) {
+		case 'f':
+			force = B_TRUE;
+			break;
+		case 'F':
+			hardforce = B_TRUE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* check arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing pool argument\n"));
+		usage(B_FALSE);
+	}
+
+	ret = 0;
+	for (i = 0; i < argc; i++) {
+		if ((zhp = zpool_open_canfail(g_zfs, argv[i])) == NULL) {
+			ret = 1;
+			continue;
+		}
+
+		if (zpool_disable_datasets(zhp, force) != 0) {
+			ret = 1;
+			zpool_close(zhp);
+			continue;
+		}
+
+		if (hardforce) {
+			if (zpool_export_force(zhp) != 0)
+				ret = 1;
+		} else if (zpool_export(zhp, force) != 0) {
+			ret = 1;
+		}
+
+		zpool_close(zhp);
+	}
+
+	return (ret);
+}
+
+/*
+ * Given a vdev configuration, determine the maximum width needed for the device
+ * name column.
+ */
+static int
+max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max)
+{
+	char *name = zpool_vdev_name(g_zfs, zhp, nv, B_TRUE);
+	nvlist_t **child;
+	uint_t c, children;
+	int ret;
+
+	if (strlen(name) + depth > max)
+		max = strlen(name) + depth;
+
+	free(name);
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
+	    &child, &children) == 0) {
+		for (c = 0; c < children; c++)
+			if ((ret = max_width(zhp, child[c], depth + 2,
+			    max)) > max)
+				max = ret;
+	}
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
+	    &child, &children) == 0) {
+		for (c = 0; c < children; c++)
+			if ((ret = max_width(zhp, child[c], depth + 2,
+			    max)) > max)
+				max = ret;
+	}
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) == 0) {
+		for (c = 0; c < children; c++)
+			if ((ret = max_width(zhp, child[c], depth + 2,
+			    max)) > max)
+				max = ret;
+	}
+
+
+	return (max);
+}
+
+typedef struct spare_cbdata {
+	uint64_t	cb_guid;
+	zpool_handle_t	*cb_zhp;
+} spare_cbdata_t;
+
+static boolean_t
+find_vdev(nvlist_t *nv, uint64_t search)
+{
+	uint64_t guid;
+	nvlist_t **child;
+	uint_t c, children;
+
+	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
+	    search == guid)
+		return (B_TRUE);
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) == 0) {
+		for (c = 0; c < children; c++)
+			if (find_vdev(child[c], search))
+				return (B_TRUE);
+	}
+
+	return (B_FALSE);
+}
+
+static int
+find_spare(zpool_handle_t *zhp, void *data)
+{
+	spare_cbdata_t *cbp = data;
+	nvlist_t *config, *nvroot;
+
+	config = zpool_get_config(zhp, NULL);
+	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+	    &nvroot) == 0);
+
+	if (find_vdev(nvroot, cbp->cb_guid)) {
+		cbp->cb_zhp = zhp;
+		return (1);
+	}
+
+	zpool_close(zhp);
+	return (0);
+}
+
+/*
+ * Print out configuration state as requested by status_callback.
+ */
+void
+print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
+    int namewidth, int depth, boolean_t isspare)
+{
+	nvlist_t **child;
+	uint_t c, children;
+	pool_scan_stat_t *ps = NULL;
+	vdev_stat_t *vs;
+	char rbuf[6], wbuf[6], cbuf[6];
+	char *vname;
+	uint64_t notpresent;
+	spare_cbdata_t cb;
+	const char *state;
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) != 0)
+		children = 0;
+
+	verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
+	    (uint64_t **)&vs, &c) == 0);
+
+	state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
+	if (isspare) {
+		/*
+		 * For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
+		 * online drives.
+		 */
+		if (vs->vs_aux == VDEV_AUX_SPARED)
+			state = "INUSE";
+		else if (vs->vs_state == VDEV_STATE_HEALTHY)
+			state = "AVAIL";
+	}
+
+	(void) printf("\t%*s%-*s  %-8s", depth, "", namewidth - depth,
+	    name, state);
+
+	if (!isspare) {
+		zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
+		zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
+		zfs_nicenum(vs->vs_checksum_errors, cbuf, sizeof (cbuf));
+		(void) printf(" %5s %5s %5s", rbuf, wbuf, cbuf);
+	}
+
+	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
+	    &notpresent) == 0 ||
+	    vs->vs_state <= VDEV_STATE_CANT_OPEN) {
+		char *path;
+		if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0)
+			(void) printf("  was %s", path);
+	} else if (vs->vs_aux != 0) {
+		(void) printf("  ");
+
+		switch (vs->vs_aux) {
+		case VDEV_AUX_OPEN_FAILED:
+			(void) printf(gettext("cannot open"));
+			break;
+
+		case VDEV_AUX_BAD_GUID_SUM:
+			(void) printf(gettext("missing device"));
+			break;
+
+		case VDEV_AUX_NO_REPLICAS:
+			(void) printf(gettext("insufficient replicas"));
+			break;
+
+		case VDEV_AUX_VERSION_NEWER:
+			(void) printf(gettext("newer version"));
+			break;
+
+		case VDEV_AUX_UNSUP_FEAT:
+			(void) printf(gettext("unsupported feature(s)"));
+			break;
+
+		case VDEV_AUX_SPARED:
+			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
+			    &cb.cb_guid) == 0);
+			if (zpool_iter(g_zfs, find_spare, &cb) == 1) {
+				if (strcmp(zpool_get_name(cb.cb_zhp),
+				    zpool_get_name(zhp)) == 0)
+					(void) printf(gettext("currently in "
+					    "use"));
+				else
+					(void) printf(gettext("in use by "
+					    "pool '%s'"),
+					    zpool_get_name(cb.cb_zhp));
+				zpool_close(cb.cb_zhp);
+			} else {
+				(void) printf(gettext("currently in use"));
+			}
+			break;
+
+		case VDEV_AUX_ERR_EXCEEDED:
+			(void) printf(gettext("too many errors"));
+			break;
+
+		case VDEV_AUX_IO_FAILURE:
+			(void) printf(gettext("experienced I/O failures"));
+			break;
+
+		case VDEV_AUX_BAD_LOG:
+			(void) printf(gettext("bad intent log"));
+			break;
+
+		case VDEV_AUX_EXTERNAL:
+			(void) printf(gettext("external device fault"));
+			break;
+
+		case VDEV_AUX_SPLIT_POOL:
+			(void) printf(gettext("split into new pool"));
+			break;
+
+		default:
+			(void) printf(gettext("corrupted data"));
+			break;
+		}
+	}
+
+	(void) nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_SCAN_STATS,
+	    (uint64_t **)&ps, &c);
+
+	if (ps && ps->pss_state == DSS_SCANNING &&
+	    vs->vs_scan_processed != 0 && children == 0) {
+		(void) printf(gettext("  (%s)"),
+		    (ps->pss_func == POOL_SCAN_RESILVER) ?
+		    "resilvering" : "repairing");
+	}
+
+	(void) printf("\n");
+
+	for (c = 0; c < children; c++) {
+		uint64_t islog = B_FALSE, ishole = B_FALSE;
+
+		/* Don't print logs or holes here */
+		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
+		    &islog);
+		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
+		    &ishole);
+		if (islog || ishole)
+			continue;
+		vname = zpool_vdev_name(g_zfs, zhp, child[c], B_TRUE);
+		print_status_config(zhp, vname, child[c],
+		    namewidth, depth + 2, isspare);
+		free(vname);
+	}
+}
+
+
+/*
+ * Print the configuration of an exported pool.  Iterate over all vdevs in the
+ * pool, printing out the name and status for each one.
+ */
+void
+print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth)
+{
+	nvlist_t **child;
+	uint_t c, children;
+	vdev_stat_t *vs;
+	char *type, *vname;
+
+	verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
+	if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
+	    strcmp(type, VDEV_TYPE_HOLE) == 0)
+		return;
+
+	verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
+	    (uint64_t **)&vs, &c) == 0);
+
+	(void) printf("\t%*s%-*s", depth, "", namewidth - depth, name);
+	(void) printf("  %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
+
+	if (vs->vs_aux != 0) {
+		(void) printf("  ");
+
+		switch (vs->vs_aux) {
+		case VDEV_AUX_OPEN_FAILED:
+			(void) printf(gettext("cannot open"));
+			break;
+
+		case VDEV_AUX_BAD_GUID_SUM:
+			(void) printf(gettext("missing device"));
+			break;
+
+		case VDEV_AUX_NO_REPLICAS:
+			(void) printf(gettext("insufficient replicas"));
+			break;
+
+		case VDEV_AUX_VERSION_NEWER:
+			(void) printf(gettext("newer version"));
+			break;
+
+		case VDEV_AUX_UNSUP_FEAT:
+			(void) printf(gettext("unsupported feature(s)"));
+			break;
+
+		case VDEV_AUX_ERR_EXCEEDED:
+			(void) printf(gettext("too many errors"));
+			break;
+
+		default:
+			(void) printf(gettext("corrupted data"));
+			break;
+		}
+	}
+	(void) printf("\n");
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) != 0)
+		return;
+
+	for (c = 0; c < children; c++) {
+		uint64_t is_log = B_FALSE;
+
+		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
+		    &is_log);
+		if (is_log)
+			continue;
+
+		vname = zpool_vdev_name(g_zfs, NULL, child[c], B_TRUE);
+		print_import_config(vname, child[c], namewidth, depth + 2);
+		free(vname);
+	}
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
+	    &child, &children) == 0) {
+		(void) printf(gettext("\tcache\n"));
+		for (c = 0; c < children; c++) {
+			vname = zpool_vdev_name(g_zfs, NULL, child[c], B_FALSE);
+			(void) printf("\t  %s\n", vname);
+			free(vname);
+		}
+	}
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
+	    &child, &children) == 0) {
+		(void) printf(gettext("\tspares\n"));
+		for (c = 0; c < children; c++) {
+			vname = zpool_vdev_name(g_zfs, NULL, child[c], B_FALSE);
+			(void) printf("\t  %s\n", vname);
+			free(vname);
+		}
+	}
+}
+
+/*
+ * Print log vdevs.
+ * Logs are recorded as top level vdevs in the main pool child array
+ * but with "is_log" set to 1. We use either print_status_config() or
+ * print_import_config() to print the top level logs then any log
+ * children (eg mirrored slogs) are printed recursively - which
+ * works because only the top level vdev is marked "is_log"
+ */
+static void
+print_logs(zpool_handle_t *zhp, nvlist_t *nv, int namewidth, boolean_t verbose)
+{
+	uint_t c, children;
+	nvlist_t **child;
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
+	    &children) != 0)
+		return;
+
+	(void) printf(gettext("\tlogs\n"));
+
+	for (c = 0; c < children; c++) {
+		uint64_t is_log = B_FALSE;
+		char *name;
+
+		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
+		    &is_log);
+		if (!is_log)
+			continue;
+		name = zpool_vdev_name(g_zfs, zhp, child[c], B_TRUE);
+		if (verbose)
+			print_status_config(zhp, name, child[c], namewidth,
+			    2, B_FALSE);
+		else
+			print_import_config(name, child[c], namewidth, 2);
+		free(name);
+	}
+}
+
+/*
+ * Display the status for the given pool.
+ */
+static void
+show_import(nvlist_t *config)
+{
+	uint64_t pool_state;
+	vdev_stat_t *vs;
+	char *name;
+	uint64_t guid;
+	char *msgid;
+	nvlist_t *nvroot;
+	int reason;
+	const char *health;
+	uint_t vsc;
+	int namewidth;
+	char *comment;
+
+	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+	    &name) == 0);
+	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+	    &guid) == 0);
+	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
+	    &pool_state) == 0);
+	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+	    &nvroot) == 0);
+
+	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
+	    (uint64_t **)&vs, &vsc) == 0);
+	health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
+
+	reason = zpool_import_status(config, &msgid);
+
+	(void) printf(gettext("   pool: %s\n"), name);
+	(void) printf(gettext("     id: %llu\n"), (u_longlong_t)guid);
+	(void) printf(gettext("  state: %s"), health);
+	if (pool_state == POOL_STATE_DESTROYED)
+		(void) printf(gettext(" (DESTROYED)"));
+	(void) printf("\n");
+
+	switch (reason) {
+	case ZPOOL_STATUS_MISSING_DEV_R:
+	case ZPOOL_STATUS_MISSING_DEV_NR:
+	case ZPOOL_STATUS_BAD_GUID_SUM:
+		(void) printf(gettext(" status: One or more devices are "
+		    "missing from the system.\n"));
+		break;
+
+	case ZPOOL_STATUS_CORRUPT_LABEL_R:
+	case ZPOOL_STATUS_CORRUPT_LABEL_NR:
+		(void) printf(gettext(" status: One or more devices contains "
+		    "corrupted data.\n"));
+		break;
+
+	case ZPOOL_STATUS_CORRUPT_DATA:
+		(void) printf(
+		    gettext(" status: The pool data is corrupted.\n"));
+		break;
+
+	case ZPOOL_STATUS_OFFLINE_DEV:
+		(void) printf(gettext(" status: One or more devices "
+		    "are offlined.\n"));
+		break;
+
+	case ZPOOL_STATUS_CORRUPT_POOL:
+		(void) printf(gettext(" status: The pool metadata is "
+		    "corrupted.\n"));
+		break;
+
+	case ZPOOL_STATUS_VERSION_OLDER:
+		(void) printf(gettext(" status: The pool is formatted using a "
+		    "legacy on-disk version.\n"));
+		break;
+
+	case ZPOOL_STATUS_VERSION_NEWER:
+		(void) printf(gettext(" status: The pool is formatted using an "
+		    "incompatible version.\n"));
+		break;
+
+	case ZPOOL_STATUS_FEAT_DISABLED:
+		(void) printf(gettext(" status: Some supported features are "
+		    "not enabled on the pool.\n"));
+		break;
+
+	case ZPOOL_STATUS_UNSUP_FEAT_READ:
+		(void) printf(gettext("status: The pool uses the following "
+		    "feature(s) not supported on this sytem:\n"));
+		zpool_print_unsup_feat(config);
+		break;
+
+	case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
+		(void) printf(gettext("status: The pool can only be accessed "
+		    "in read-only mode on this system. It\n\tcannot be "
+		    "accessed in read-write mode because it uses the "
+		    "following\n\tfeature(s) not supported on this system:\n"));
+		zpool_print_unsup_feat(config);
+		break;
+
+	case ZPOOL_STATUS_HOSTID_MISMATCH:
+		(void) printf(gettext(" status: The pool was last accessed by "
+		    "another system.\n"));
+		break;
+
+	case ZPOOL_STATUS_FAULTED_DEV_R:
+	case ZPOOL_STATUS_FAULTED_DEV_NR:
+		(void) printf(gettext(" status: One or more devices are "
+		    "faulted.\n"));
+		break;
+
+	case ZPOOL_STATUS_BAD_LOG:
+		(void) printf(gettext(" status: An intent log record cannot be "
+		    "read.\n"));
+		break;
+
+	case ZPOOL_STATUS_RESILVERING:
+		(void) printf(gettext(" status: One or more devices were being "
+		    "resilvered.\n"));
+		break;
+
+	default:
+		/*
+		 * No other status can be seen when importing pools.
+		 */
+		assert(reason == ZPOOL_STATUS_OK);
+	}
+
+	/*
+	 * Print out an action according to the overall state of the pool.
+	 */
+	if (vs->vs_state == VDEV_STATE_HEALTHY) {
+		if (reason == ZPOOL_STATUS_VERSION_OLDER ||
+		    reason == ZPOOL_STATUS_FEAT_DISABLED) {
+			(void) printf(gettext(" action: The pool can be "
+			    "imported using its name or numeric identifier, "
+			    "though\n\tsome features will not be available "
+			    "without an explicit 'zpool upgrade'.\n"));
+		} else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
+			(void) printf(gettext(" action: The pool can be "
+			    "imported using its name or numeric "
+			    "identifier and\n\tthe '-f' flag.\n"));
+		} else {
+			(void) printf(gettext(" action: The pool can be "
+			    "imported using its name or numeric "
+			    "identifier.\n"));
+		}
+	} else if (vs->vs_state == VDEV_STATE_DEGRADED) {
+		(void) printf(gettext(" action: The pool can be imported "
+		    "despite missing or damaged devices.  The\n\tfault "
+		    "tolerance of the pool may be compromised if imported.\n"));
+	} else {
+		switch (reason) {
+		case ZPOOL_STATUS_VERSION_NEWER:
+			(void) printf(gettext(" action: The pool cannot be "
+			    "imported.  Access the pool on a system running "
+			    "newer\n\tsoftware, or recreate the pool from "
+			    "backup.\n"));
+			break;
+		case ZPOOL_STATUS_UNSUP_FEAT_READ:
+			(void) printf(gettext("action: The pool cannot be "
+			    "imported. Access the pool on a system that "
+			    "supports\n\tthe required feature(s), or recreate "
+			    "the pool from backup.\n"));
+			break;
+		case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
+			(void) printf(gettext("action: The pool cannot be "
+			    "imported in read-write mode. Import the pool "
+			    "with\n"
+			    "\t\"-o readonly=on\", access the pool on a system "
+			    "that supports the\n\trequired feature(s), or "
+			    "recreate the pool from backup.\n"));
+			break;
+		case ZPOOL_STATUS_MISSING_DEV_R:
+		case ZPOOL_STATUS_MISSING_DEV_NR:
+		case ZPOOL_STATUS_BAD_GUID_SUM:
+			(void) printf(gettext(" action: The pool cannot be "
+			    "imported. Attach the missing\n\tdevices and try "
+			    "again.\n"));
+			break;
+		default:
+			(void) printf(gettext(" action: The pool cannot be "
+			    "imported due to damaged devices or data.\n"));
+		}
+	}
+
+	/* Print the comment attached to the pool. */
+	if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
+		(void) printf(gettext("comment: %s\n"), comment);
+
+	/*
+	 * If the state is "closed" or "can't open", and the aux state
+	 * is "corrupt data":
+	 */
+	if (((vs->vs_state == VDEV_STATE_CLOSED) ||
+	    (vs->vs_state == VDEV_STATE_CANT_OPEN)) &&
+	    (vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) {
+		if (pool_state == POOL_STATE_DESTROYED)
+			(void) printf(gettext("\tThe pool was destroyed, "
+			    "but can be imported using the '-Df' flags.\n"));
+		else if (pool_state != POOL_STATE_EXPORTED)
+			(void) printf(gettext("\tThe pool may be active on "
+			    "another system, but can be imported using\n\t"
+			    "the '-f' flag.\n"));
+	}
+
+	if (msgid != NULL)
+		(void) printf(gettext("   see: http://illumos.org/msg/%s\n"),
+		    msgid);
+
+	(void) printf(gettext(" config:\n\n"));
+
+	namewidth = max_width(NULL, nvroot, 0, 0);
+	if (namewidth < 10)
+		namewidth = 10;
+
+	print_import_config(name, nvroot, namewidth, 0);
+	if (num_logs(nvroot) > 0)
+		print_logs(NULL, nvroot, namewidth, B_FALSE);
+
+	if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
+		(void) printf(gettext("\n\tAdditional devices are known to "
+		    "be part of this pool, though their\n\texact "
+		    "configuration cannot be determined.\n"));
+	}
+}
+
+/*
+ * Perform the import for the given configuration.  This passes the heavy
+ * lifting off to zpool_import_props(), and then mounts the datasets contained
+ * within the pool.
+ */
+static int
+do_import(nvlist_t *config, const char *newname, const char *mntopts,
+    nvlist_t *props, int flags)
+{
+	zpool_handle_t *zhp;
+	char *name;
+	uint64_t state;
+	uint64_t version;
+
+	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+	    &name) == 0);
+
+	verify(nvlist_lookup_uint64(config,
+	    ZPOOL_CONFIG_POOL_STATE, &state) == 0);
+	verify(nvlist_lookup_uint64(config,
+	    ZPOOL_CONFIG_VERSION, &version) == 0);
+	if (!SPA_VERSION_IS_SUPPORTED(version)) {
+		(void) fprintf(stderr, gettext("cannot import '%s': pool "
+		    "is formatted using an unsupported ZFS version\n"), name);
+		return (1);
+	} else if (state != POOL_STATE_EXPORTED &&
+	    !(flags & ZFS_IMPORT_ANY_HOST)) {
+		uint64_t hostid;
+
+		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID,
+		    &hostid) == 0) {
+			if ((unsigned long)hostid != gethostid()) {
+				char *hostname;
+				uint64_t timestamp;
+				time_t t;
+
+				verify(nvlist_lookup_string(config,
+				    ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
+				verify(nvlist_lookup_uint64(config,
+				    ZPOOL_CONFIG_TIMESTAMP, &timestamp) == 0);
+				t = timestamp;
+				(void) fprintf(stderr, gettext("cannot import "
+				    "'%s': pool may be in use from other "
+				    "system, it was last accessed by %s "
+				    "(hostid: 0x%lx) on %s"), name, hostname,
+				    (unsigned long)hostid,
+				    asctime(localtime(&t)));
+				(void) fprintf(stderr, gettext("use '-f' to "
+				    "import anyway\n"));
+				return (1);
+			}
+		} else {
+			(void) fprintf(stderr, gettext("cannot import '%s': "
+			    "pool may be in use from other system\n"), name);
+			(void) fprintf(stderr, gettext("use '-f' to import "
+			    "anyway\n"));
+			return (1);
+		}
+	}
+
+	if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
+		return (1);
+
+	if (newname != NULL)
+		name = (char *)newname;
+
+	if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
+		return (1);
+
+	if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
+	    !(flags & ZFS_IMPORT_ONLY) &&
+	    zpool_enable_datasets(zhp, mntopts, 0) != 0) {
+		zpool_close(zhp);
+		return (1);
+	}
+
+	zpool_close(zhp);
+	return (0);
+}
+
+/*
+ * zpool import [-d dir] [-D]
+ *       import [-o mntopts] [-o prop=value] ... [-R root] [-D]
+ *              [-d dir | -c cachefile] [-f] -a
+ *       import [-o mntopts] [-o prop=value] ... [-R root] [-D]
+ *              [-d dir | -c cachefile] [-f] [-n] [-F] <pool | id> [newpool]
+ *
+ *	 -c	Read pool information from a cachefile instead of searching
+ *		devices.
+ *
+ *       -d	Scan in a specific directory, other than /dev/dsk.  More than
+ *		one directory can be specified using multiple '-d' options.
+ *
+ *       -D     Scan for previously destroyed pools or import all or only
+ *              specified destroyed pools.
+ *
+ *       -R	Temporarily import the pool, with all mountpoints relative to
+ *		the given root.  The pool will remain exported when the machine
+ *		is rebooted.
+ *
+ *       -V	Import even in the presence of faulted vdevs.  This is an
+ *       	intentionally undocumented option for testing purposes, and
+ *       	treats the pool configuration as complete, leaving any bad
+ *		vdevs in the FAULTED state. In other words, it does verbatim
+ *		import.
+ *
+ *       -f	Force import, even if it appears that the pool is active.
+ *
+ *       -F     Attempt rewind if necessary.
+ *
+ *       -n     See if rewind would work, but don't actually rewind.
+ *
+ *       -N     Import the pool but don't mount datasets.
+ *
+ *       -T     Specify a starting txg to use for import. This option is
+ *       	intentionally undocumented option for testing purposes.
+ *
+ *       -a	Import all pools found.
+ *
+ *       -o	Set property=value and/or temporary mount options (without '=').
+ *
+ * The import command scans for pools to import, and import pools based on pool
+ * name and GUID.  The pool can also be renamed as part of the import process.
+ */
+int
+zpool_do_import(int argc, char **argv)
+{
+	char **searchdirs = NULL;
+	int nsearch = 0;
+	int c;
+	int err = 0;
+	nvlist_t *pools = NULL;
+	boolean_t do_all = B_FALSE;
+	boolean_t do_destroyed = B_FALSE;
+	char *mntopts = NULL;
+	nvpair_t *elem;
+	nvlist_t *config;
+	uint64_t searchguid = 0;
+	char *searchname = NULL;
+	char *propval;
+	nvlist_t *found_config;
+	nvlist_t *policy = NULL;
+	nvlist_t *props = NULL;
+	boolean_t first;
+	int flags = ZFS_IMPORT_NORMAL;
+	uint32_t rewind_policy = ZPOOL_NO_REWIND;
+	boolean_t dryrun = B_FALSE;
+	boolean_t do_rewind = B_FALSE;
+	boolean_t xtreme_rewind = B_FALSE;
+	uint64_t pool_state, txg = -1ULL;
+	char *cachefile = NULL;
+	importargs_t idata = { 0 };
+	char *endptr;
+
+	/* check options */
+	while ((c = getopt(argc, argv, ":aCc:d:DEfFmnNo:rR:T:VX")) != -1) {
+		switch (c) {
+		case 'a':
+			do_all = B_TRUE;
+			break;
+		case 'c':
+			cachefile = optarg;
+			break;
+		case 'd':
+			if (searchdirs == NULL) {
+				searchdirs = safe_malloc(sizeof (char *));
+			} else {
+				char **tmp = safe_malloc((nsearch + 1) *
+				    sizeof (char *));
+				bcopy(searchdirs, tmp, nsearch *
+				    sizeof (char *));
+				free(searchdirs);
+				searchdirs = tmp;
+			}
+			searchdirs[nsearch++] = optarg;
+			break;
+		case 'D':
+			do_destroyed = B_TRUE;
+			break;
+		case 'f':
+			flags |= ZFS_IMPORT_ANY_HOST;
+			break;
+		case 'F':
+			do_rewind = B_TRUE;
+			break;
+		case 'm':
+			flags |= ZFS_IMPORT_MISSING_LOG;
+			break;
+		case 'n':
+			dryrun = B_TRUE;
+			break;
+		case 'N':
+			flags |= ZFS_IMPORT_ONLY;
+			break;
+		case 'o':
+			if ((propval = strchr(optarg, '=')) != NULL) {
+				*propval = '\0';
+				propval++;
+				if (add_prop_list(optarg, propval,
+				    &props, B_TRUE))
+					goto error;
+			} else {
+				mntopts = optarg;
+			}
+			break;
+		case 'R':
+			if (add_prop_list(zpool_prop_to_name(
+			    ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
+				goto error;
+			if (nvlist_lookup_string(props,
+			    zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
+			    &propval) == 0)
+				break;
+			if (add_prop_list(zpool_prop_to_name(
+			    ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
+				goto error;
+			break;
+		case 'T':
+			errno = 0;
+			txg = strtoull(optarg, &endptr, 10);
+			if (errno != 0 || *endptr != '\0') {
+				(void) fprintf(stderr,
+				    gettext("invalid txg value\n"));
+				usage(B_FALSE);
+			}
+			rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
+			break;
+		case 'V':
+			flags |= ZFS_IMPORT_VERBATIM;
+			break;
+		case 'X':
+			xtreme_rewind = B_TRUE;
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing argument for "
+			    "'%c' option\n"), optopt);
+			usage(B_FALSE);
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	if (cachefile && nsearch != 0) {
+		(void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
+		usage(B_FALSE);
+	}
+
+	if ((dryrun || xtreme_rewind) && !do_rewind) {
+		(void) fprintf(stderr,
+		    gettext("-n or -X only meaningful with -F\n"));
+		usage(B_FALSE);
+	}
+	if (dryrun)
+		rewind_policy = ZPOOL_TRY_REWIND;
+	else if (do_rewind)
+		rewind_policy = ZPOOL_DO_REWIND;
+	if (xtreme_rewind)
+		rewind_policy |= ZPOOL_EXTREME_REWIND;
+
+	/* In the future, we can capture further policy and include it here */
+	if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
+	    nvlist_add_uint64(policy, ZPOOL_REWIND_REQUEST_TXG, txg) != 0 ||
+	    nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind_policy) != 0)
+		goto error;
+
+	if (searchdirs == NULL) {
+		searchdirs = safe_malloc(sizeof (char *));
+		searchdirs[0] = "/dev";
+		nsearch = 1;
+	}
+
+	/* check argument count */
+	if (do_all) {
+		if (argc != 0) {
+			(void) fprintf(stderr, gettext("too many arguments\n"));
+			usage(B_FALSE);
+		}
+	} else {
+		if (argc > 2) {
+			(void) fprintf(stderr, gettext("too many arguments\n"));
+			usage(B_FALSE);
+		}
+
+		/*
+		 * Check for the SYS_CONFIG privilege.  We do this explicitly
+		 * here because otherwise any attempt to discover pools will
+		 * silently fail.
+		 */
+		if (argc == 0 && !priv_ineffect(PRIV_SYS_CONFIG)) {
+			(void) fprintf(stderr, gettext("cannot "
+			    "discover pools: permission denied\n"));
+			free(searchdirs);
+			nvlist_free(policy);
+			return (1);
+		}
+	}
+
+	/*
+	 * Depending on the arguments given, we do one of the following:
+	 *
+	 *	<none>	Iterate through all pools and display information about
+	 *		each one.
+	 *
+	 *	-a	Iterate through all pools and try to import each one.
+	 *
+	 *	<id>	Find the pool that corresponds to the given GUID/pool
+	 *		name and import that one.
+	 *
+	 *	-D	Above options applies only to destroyed pools.
+	 */
+	if (argc != 0) {
+		char *endptr;
+
+		errno = 0;
+		searchguid = strtoull(argv[0], &endptr, 10);
+		if (errno != 0 || *endptr != '\0')
+			searchname = argv[0];
+		found_config = NULL;
+
+		/*
+		 * User specified a name or guid.  Ensure it's unique.
+		 */
+		idata.unique = B_TRUE;
+	}
+
+
+	idata.path = searchdirs;
+	idata.paths = nsearch;
+	idata.poolname = searchname;
+	idata.guid = searchguid;
+	idata.cachefile = cachefile;
+
+	pools = zpool_search_import(g_zfs, &idata);
+
+	if (pools != NULL && idata.exists &&
+	    (argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
+		(void) fprintf(stderr, gettext("cannot import '%s': "
+		    "a pool with that name already exists\n"),
+		    argv[0]);
+		(void) fprintf(stderr, gettext("use the form '%s "
+		    "<pool | id> <newpool>' to give it a new name\n"),
+		    "zpool import");
+		err = 1;
+	} else if (pools == NULL && idata.exists) {
+		(void) fprintf(stderr, gettext("cannot import '%s': "
+		    "a pool with that name is already created/imported,\n"),
+		    argv[0]);
+		(void) fprintf(stderr, gettext("and no additional pools "
+		    "with that name were found\n"));
+		err = 1;
+	} else if (pools == NULL) {
+		if (argc != 0) {
+			(void) fprintf(stderr, gettext("cannot import '%s': "
+			    "no such pool available\n"), argv[0]);
+		}
+		err = 1;
+	}
+
+	if (err == 1) {
+		free(searchdirs);
+		nvlist_free(policy);
+		return (1);
+	}
+
+	/*
+	 * At this point we have a list of import candidate configs. Even if
+	 * we were searching by pool name or guid, we still need to
+	 * post-process the list to deal with pool state and possible
+	 * duplicate names.
+	 */
+	err = 0;
+	elem = NULL;
+	first = B_TRUE;
+	while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
+
+		verify(nvpair_value_nvlist(elem, &config) == 0);
+
+		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
+		    &pool_state) == 0);
+		if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
+			continue;
+		if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
+			continue;
+
+		verify(nvlist_add_nvlist(config, ZPOOL_REWIND_POLICY,
+		    policy) == 0);
+
+		if (argc == 0) {
+			if (first)
+				first = B_FALSE;
+			else if (!do_all)
+				(void) printf("\n");
+
+			if (do_all) {
+				err |= do_import(config, NULL, mntopts,
+				    props, flags);
+			} else {
+				show_import(config);
+			}
+		} else if (searchname != NULL) {
+			char *name;
+
+			/*
+			 * We are searching for a pool based on name.
+			 */
+			verify(nvlist_lookup_string(config,
+			    ZPOOL_CONFIG_POOL_NAME, &name) == 0);
+
+			if (strcmp(name, searchname) == 0) {
+				if (found_config != NULL) {
+					(void) fprintf(stderr, gettext(
+					    "cannot import '%s': more than "
+					    "one matching pool\n"), searchname);
+					(void) fprintf(stderr, gettext(
+					    "import by numeric ID instead\n"));
+					err = B_TRUE;
+				}
+				found_config = config;
+			}
+		} else {
+			uint64_t guid;
+
+			/*
+			 * Search for a pool by guid.
+			 */
+			verify(nvlist_lookup_uint64(config,
+			    ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
+
+			if (guid == searchguid)
+				found_config = config;
+		}
+	}
+
+	/*
+	 * If we were searching for a specific pool, verify that we found a
+	 * pool, and then do the import.
+	 */
+	if (argc != 0 && err == 0) {
+		if (found_config == NULL) {
+			(void) fprintf(stderr, gettext("cannot import '%s': "
+			    "no such pool available\n"), argv[0]);
+			err = B_TRUE;
+		} else {
+			err |= do_import(found_config, argc == 1 ? NULL :
+			    argv[1], mntopts, props, flags);
+		}
+	}
+
+	/*
+	 * If we were just looking for pools, report an error if none were
+	 * found.
+	 */
+	if (argc == 0 && first)
+		(void) fprintf(stderr,
+		    gettext("no pools available to import\n"));
+
+error:
+	nvlist_free(props);
+	nvlist_free(pools);
+	nvlist_free(policy);
+	free(searchdirs);
+
+	return (err ? 1 : 0);
+}
+
+typedef struct iostat_cbdata {
+	boolean_t cb_verbose;
+	int cb_namewidth;
+	int cb_iteration;
+	zpool_list_t *cb_list;
+} iostat_cbdata_t;
+
+static void
+print_iostat_separator(iostat_cbdata_t *cb)
+{
+	int i = 0;
+
+	for (i = 0; i < cb->cb_namewidth; i++)
+		(void) printf("-");
+	(void) printf("  -----  -----  -----  -----  -----  -----\n");
+}
+
+static void
+print_iostat_header(iostat_cbdata_t *cb)
+{
+	(void) printf("%*s     capacity     operations    bandwidth\n",
+	    cb->cb_namewidth, "");
+	(void) printf("%-*s  alloc   free   read  write   read  write\n",
+	    cb->cb_namewidth, "pool");
+	print_iostat_separator(cb);
+}
+
+/*
+ * Display a single statistic.
+ */
+static void
+print_one_stat(uint64_t value)
+{
+	char buf[64];
+
+	zfs_nicenum(value, buf, sizeof (buf));
+	(void) printf("  %5s", buf);
+}
+
+/*
+ * Print out all the statistics for the given vdev.  This can either be the
+ * toplevel configuration, or called recursively.  If 'name' is NULL, then this
+ * is a verbose output, and we don't want to display the toplevel pool stats.
+ */
+void
+print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
+    nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
+{
+	nvlist_t **oldchild, **newchild;
+	uint_t c, children;
+	vdev_stat_t *oldvs, *newvs;
+	vdev_stat_t zerovs = { 0 };
+	uint64_t tdelta;
+	double scale;
+	char *vname;
+
+	if (oldnv != NULL) {
+		verify(nvlist_lookup_uint64_array(oldnv,
+		    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
+	} else {
+		oldvs = &zerovs;
+	}
+
+	verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
+	    (uint64_t **)&newvs, &c) == 0);
+
+	if (strlen(name) + depth > cb->cb_namewidth)
+		(void) printf("%*s%s", depth, "", name);
+	else
+		(void) printf("%*s%s%*s", depth, "", name,
+		    (int)(cb->cb_namewidth - strlen(name) - depth), "");
+
+	tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
+
+	if (tdelta == 0)
+		scale = 1.0;
+	else
+		scale = (double)NANOSEC / tdelta;
+
+	/* only toplevel vdevs have capacity stats */
+	if (newvs->vs_space == 0) {
+		(void) printf("      -      -");
+	} else {
+		print_one_stat(newvs->vs_alloc);
+		print_one_stat(newvs->vs_space - newvs->vs_alloc);
+	}
+
+	print_one_stat((uint64_t)(scale * (newvs->vs_ops[ZIO_TYPE_READ] -
+	    oldvs->vs_ops[ZIO_TYPE_READ])));
+
+	print_one_stat((uint64_t)(scale * (newvs->vs_ops[ZIO_TYPE_WRITE] -
+	    oldvs->vs_ops[ZIO_TYPE_WRITE])));
+
+	print_one_stat((uint64_t)(scale * (newvs->vs_bytes[ZIO_TYPE_READ] -
+	    oldvs->vs_bytes[ZIO_TYPE_READ])));
+
+	print_one_stat((uint64_t)(scale * (newvs->vs_bytes[ZIO_TYPE_WRITE] -
+	    oldvs->vs_bytes[ZIO_TYPE_WRITE])));
+
+	(void) printf("\n");
+
+	if (!cb->cb_verbose)
+		return;
+
+	if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
+	    &newchild, &children) != 0)
+		return;
+
+	if (oldnv && nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
+	    &oldchild, &c) != 0)
+		return;
+
+	for (c = 0; c < children; c++) {
+		uint64_t ishole = B_FALSE, islog = B_FALSE;
+
+		(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
+		    &ishole);
+
+		(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
+		    &islog);
+
+		if (ishole || islog)
+			continue;
+
+		vname = zpool_vdev_name(g_zfs, zhp, newchild[c], B_FALSE);
+		print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
+		    newchild[c], cb, depth + 2);
+		free(vname);
+	}
+
+	/*
+	 * Log device section
+	 */
+
+	if (num_logs(newnv) > 0) {
+		(void) printf("%-*s      -      -      -      -      -      "
+		    "-\n", cb->cb_namewidth, "logs");
+
+		for (c = 0; c < children; c++) {
+			uint64_t islog = B_FALSE;
+			(void) nvlist_lookup_uint64(newchild[c],
+			    ZPOOL_CONFIG_IS_LOG, &islog);
+
+			if (islog) {
+				vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
+				    B_FALSE);
+				print_vdev_stats(zhp, vname, oldnv ?
+				    oldchild[c] : NULL, newchild[c],
+				    cb, depth + 2);
+				free(vname);
+			}
+		}
+
+	}
+
+	/*
+	 * Include level 2 ARC devices in iostat output
+	 */
+	if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
+	    &newchild, &children) != 0)
+		return;
+
+	if (oldnv && nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
+	    &oldchild, &c) != 0)
+		return;
+
+	if (children > 0) {
+		(void) printf("%-*s      -      -      -      -      -      "
+		    "-\n", cb->cb_namewidth, "cache");
+		for (c = 0; c < children; c++) {
+			vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
+			    B_FALSE);
+			print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
+			    newchild[c], cb, depth + 2);
+			free(vname);
+		}
+	}
+}
+
+static int
+refresh_iostat(zpool_handle_t *zhp, void *data)
+{
+	iostat_cbdata_t *cb = data;
+	boolean_t missing;
+
+	/*
+	 * If the pool has disappeared, remove it from the list and continue.
+	 */
+	if (zpool_refresh_stats(zhp, &missing) != 0)
+		return (-1);
+
+	if (missing)
+		pool_list_remove(cb->cb_list, zhp);
+
+	return (0);
+}
+
+/*
+ * Callback to print out the iostats for the given pool.
+ */
+int
+print_iostat(zpool_handle_t *zhp, void *data)
+{
+	iostat_cbdata_t *cb = data;
+	nvlist_t *oldconfig, *newconfig;
+	nvlist_t *oldnvroot, *newnvroot;
+
+	newconfig = zpool_get_config(zhp, &oldconfig);
+
+	if (cb->cb_iteration == 1)
+		oldconfig = NULL;
+
+	verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
+	    &newnvroot) == 0);
+
+	if (oldconfig == NULL)
+		oldnvroot = NULL;
+	else
+		verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
+		    &oldnvroot) == 0);
+
+	/*
+	 * Print out the statistics for the pool.
+	 */
+	print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot, cb, 0);
+
+	if (cb->cb_verbose)
+		print_iostat_separator(cb);
+
+	return (0);
+}
+
+int
+get_namewidth(zpool_handle_t *zhp, void *data)
+{
+	iostat_cbdata_t *cb = data;
+	nvlist_t *config, *nvroot;
+
+	if ((config = zpool_get_config(zhp, NULL)) != NULL) {
+		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+		    &nvroot) == 0);
+		if (!cb->cb_verbose)
+			cb->cb_namewidth = strlen(zpool_get_name(zhp));
+		else
+			cb->cb_namewidth = max_width(zhp, nvroot, 0,
+			    cb->cb_namewidth);
+	}
+
+	/*
+	 * The width must fall into the range [10,38].  The upper limit is the
+	 * maximum we can have and still fit in 80 columns.
+	 */
+	if (cb->cb_namewidth < 10)
+		cb->cb_namewidth = 10;
+	if (cb->cb_namewidth > 38)
+		cb->cb_namewidth = 38;
+
+	return (0);
+}
+
+/*
+ * Parse the input string, get the 'interval' and 'count' value if there is one.
+ */
+static void
+get_interval_count(int *argcp, char **argv, unsigned long *iv,
+    unsigned long *cnt)
+{
+	unsigned long interval = 0, count = 0;
+	int argc = *argcp, errno;
+
+	/*
+	 * Determine if the last argument is an integer or a pool name
+	 */
+	if (argc > 0 && isdigit(argv[argc - 1][0])) {
+		char *end;
+
+		errno = 0;
+		interval = strtoul(argv[argc - 1], &end, 10);
+
+		if (*end == '\0' && errno == 0) {
+			if (interval == 0) {
+				(void) fprintf(stderr, gettext("interval "
+				    "cannot be zero\n"));
+				usage(B_FALSE);
+			}
+			/*
+			 * Ignore the last parameter
+			 */
+			argc--;
+		} else {
+			/*
+			 * If this is not a valid number, just plow on.  The
+			 * user will get a more informative error message later
+			 * on.
+			 */
+			interval = 0;
+		}
+	}
+
+	/*
+	 * If the last argument is also an integer, then we have both a count
+	 * and an interval.
+	 */
+	if (argc > 0 && isdigit(argv[argc - 1][0])) {
+		char *end;
+
+		errno = 0;
+		count = interval;
+		interval = strtoul(argv[argc - 1], &end, 10);
+
+		if (*end == '\0' && errno == 0) {
+			if (interval == 0) {
+				(void) fprintf(stderr, gettext("interval "
+				    "cannot be zero\n"));
+				usage(B_FALSE);
+			}
+
+			/*
+			 * Ignore the last parameter
+			 */
+			argc--;
+		} else {
+			interval = 0;
+		}
+	}
+
+	*iv = interval;
+	*cnt = count;
+	*argcp = argc;
+}
+
+static void
+get_timestamp_arg(char c)
+{
+	if (c == 'u')
+		timestamp_fmt = UDATE;
+	else if (c == 'd')
+		timestamp_fmt = DDATE;
+	else
+		usage(B_FALSE);
+}
+
+/*
+ * zpool iostat [-v] [-T d|u] [pool] ... [interval [count]]
+ *
+ *	-v	Display statistics for individual vdevs
+ *	-T	Display a timestamp in date(1) or Unix format
+ *
+ * This command can be tricky because we want to be able to deal with pool
+ * creation/destruction as well as vdev configuration changes.  The bulk of this
+ * processing is handled by the pool_list_* routines in zpool_iter.c.  We rely
+ * on pool_list_update() to detect the addition of new pools.  Configuration
+ * changes are all handled within libzfs.
+ */
+int
+zpool_do_iostat(int argc, char **argv)
+{
+	int c;
+	int ret;
+	int npools;
+	unsigned long interval = 0, count = 0;
+	zpool_list_t *list;
+	boolean_t verbose = B_FALSE;
+	iostat_cbdata_t cb;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "T:v")) != -1) {
+		switch (c) {
+		case 'T':
+			get_timestamp_arg(*optarg);
+			break;
+		case 'v':
+			verbose = B_TRUE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	get_interval_count(&argc, argv, &interval, &count);
+
+	/*
+	 * Construct the list of all interesting pools.
+	 */
+	ret = 0;
+	if ((list = pool_list_get(argc, argv, NULL, &ret)) == NULL)
+		return (1);
+
+	if (pool_list_count(list) == 0 && argc != 0) {
+		pool_list_free(list);
+		return (1);
+	}
+
+	if (pool_list_count(list) == 0 && interval == 0) {
+		pool_list_free(list);
+		(void) fprintf(stderr, gettext("no pools available\n"));
+		return (1);
+	}
+
+	/*
+	 * Enter the main iostat loop.
+	 */
+	cb.cb_list = list;
+	cb.cb_verbose = verbose;
+	cb.cb_iteration = 0;
+	cb.cb_namewidth = 0;
+
+	for (;;) {
+		pool_list_update(list);
+
+		if ((npools = pool_list_count(list)) == 0)
+			break;
+
+		/*
+		 * Refresh all statistics.  This is done as an explicit step
+		 * before calculating the maximum name width, so that any
+		 * configuration changes are properly accounted for.
+		 */
+		(void) pool_list_iter(list, B_FALSE, refresh_iostat, &cb);
+
+		/*
+		 * Iterate over all pools to determine the maximum width
+		 * for the pool / device name column across all pools.
+		 */
+		cb.cb_namewidth = 0;
+		(void) pool_list_iter(list, B_FALSE, get_namewidth, &cb);
+
+		if (timestamp_fmt != NODATE)
+			print_timestamp(timestamp_fmt);
+
+		/*
+		 * If it's the first time, or verbose mode, print the header.
+		 */
+		if (++cb.cb_iteration == 1 || verbose)
+			print_iostat_header(&cb);
+
+		(void) pool_list_iter(list, B_FALSE, print_iostat, &cb);
+
+		/*
+		 * If there's more than one pool, and we're not in verbose mode
+		 * (which prints a separator for us), then print a separator.
+		 */
+		if (npools > 1 && !verbose)
+			print_iostat_separator(&cb);
+
+		if (verbose)
+			(void) printf("\n");
+
+		/*
+		 * Flush the output so that redirection to a file isn't buffered
+		 * indefinitely.
+		 */
+		(void) fflush(stdout);
+
+		if (interval == 0)
+			break;
+
+		if (count != 0 && --count == 0)
+			break;
+
+		(void) sleep(interval);
+	}
+
+	pool_list_free(list);
+
+	return (ret);
+}
+
+typedef struct list_cbdata {
+	boolean_t	cb_verbose;
+	int		cb_namewidth;
+	boolean_t	cb_scripted;
+	zprop_list_t	*cb_proplist;
+} list_cbdata_t;
+
+/*
+ * Given a list of columns to display, output appropriate headers for each one.
+ */
+static void
+print_header(list_cbdata_t *cb)
+{
+	zprop_list_t *pl = cb->cb_proplist;
+	char headerbuf[ZPOOL_MAXPROPLEN];
+	const char *header;
+	boolean_t first = B_TRUE;
+	boolean_t right_justify;
+	size_t width = 0;
+
+	for (; pl != NULL; pl = pl->pl_next) {
+		width = pl->pl_width;
+		if (first && cb->cb_verbose) {
+			/*
+			 * Reset the width to accommodate the verbose listing
+			 * of devices.
+			 */
+			width = cb->cb_namewidth;
+		}
+
+		if (!first)
+			(void) printf("  ");
+		else
+			first = B_FALSE;
+
+		right_justify = B_FALSE;
+		if (pl->pl_prop != ZPROP_INVAL) {
+			header = zpool_prop_column_name(pl->pl_prop);
+			right_justify = zpool_prop_align_right(pl->pl_prop);
+		} else {
+			int i;
+
+			for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
+				headerbuf[i] = toupper(pl->pl_user_prop[i]);
+			headerbuf[i] = '\0';
+			header = headerbuf;
+		}
+
+		if (pl->pl_next == NULL && !right_justify)
+			(void) printf("%s", header);
+		else if (right_justify)
+			(void) printf("%*s", width, header);
+		else
+			(void) printf("%-*s", width, header);
+
+	}
+
+	(void) printf("\n");
+}
+
+/*
+ * Given a pool and a list of properties, print out all the properties according
+ * to the described layout.
+ */
+static void
+print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
+{
+	zprop_list_t *pl = cb->cb_proplist;
+	boolean_t first = B_TRUE;
+	char property[ZPOOL_MAXPROPLEN];
+	char *propstr;
+	boolean_t right_justify;
+	size_t width;
+
+	for (; pl != NULL; pl = pl->pl_next) {
+
+		width = pl->pl_width;
+		if (first && cb->cb_verbose) {
+			/*
+			 * Reset the width to accommodate the verbose listing
+			 * of devices.
+			 */
+			width = cb->cb_namewidth;
+		}
+
+		if (!first) {
+			if (cb->cb_scripted)
+				(void) printf("\t");
+			else
+				(void) printf("  ");
+		} else {
+			first = B_FALSE;
+		}
+
+		right_justify = B_FALSE;
+		if (pl->pl_prop != ZPROP_INVAL) {
+			if (pl->pl_prop == ZPOOL_PROP_EXPANDSZ &&
+			    zpool_get_prop_int(zhp, pl->pl_prop, NULL) == 0)
+				propstr = "-";
+			else if (zpool_get_prop(zhp, pl->pl_prop, property,
+			    sizeof (property), NULL) != 0)
+				propstr = "-";
+			else
+				propstr = property;
+
+			right_justify = zpool_prop_align_right(pl->pl_prop);
+		} else if ((zpool_prop_feature(pl->pl_user_prop) ||
+		    zpool_prop_unsupported(pl->pl_user_prop)) &&
+		    zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
+		    sizeof (property)) == 0) {
+			propstr = property;
+		} else {
+			propstr = "-";
+		}
+
+
+		/*
+		 * If this is being called in scripted mode, or if this is the
+		 * last column and it is left-justified, don't include a width
+		 * format specifier.
+		 */
+		if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
+			(void) printf("%s", propstr);
+		else if (right_justify)
+			(void) printf("%*s", width, propstr);
+		else
+			(void) printf("%-*s", width, propstr);
+	}
+
+	(void) printf("\n");
+}
+
+static void
+print_one_column(zpool_prop_t prop, uint64_t value, boolean_t scripted)
+{
+	char propval[64];
+	boolean_t fixed;
+	size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
+
+	zfs_nicenum(value, propval, sizeof (propval));
+
+	if (prop == ZPOOL_PROP_EXPANDSZ && value == 0)
+		(void) strlcpy(propval, "-", sizeof (propval));
+
+	if (scripted)
+		(void) printf("\t%s", propval);
+	else
+		(void) printf("  %*s", width, propval);
+}
+
+void
+print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
+    list_cbdata_t *cb, int depth)
+{
+	nvlist_t **child;
+	vdev_stat_t *vs;
+	uint_t c, children;
+	char *vname;
+	boolean_t scripted = cb->cb_scripted;
+
+	verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
+	    (uint64_t **)&vs, &c) == 0);
+
+	if (name != NULL) {
+		if (scripted)
+			(void) printf("\t%s", name);
+		else if (strlen(name) + depth > cb->cb_namewidth)
+			(void) printf("%*s%s", depth, "", name);
+		else
+			(void) printf("%*s%s%*s", depth, "", name,
+			    (int)(cb->cb_namewidth - strlen(name) - depth), "");
+
+		/* only toplevel vdevs have capacity stats */
+		if (vs->vs_space == 0) {
+			if (scripted)
+				(void) printf("\t-\t-\t-");
+			else
+				(void) printf("      -      -      -");
+		} else {
+			print_one_column(ZPOOL_PROP_SIZE, vs->vs_space,
+			    scripted);
+			print_one_column(ZPOOL_PROP_CAPACITY, vs->vs_alloc,
+			    scripted);
+			print_one_column(ZPOOL_PROP_FREE,
+			    vs->vs_space - vs->vs_alloc, scripted);
+		}
+		print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize,
+		    scripted);
+		(void) printf("\n");
+	}
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) != 0)
+		return;
+
+	for (c = 0; c < children; c++) {
+		uint64_t ishole = B_FALSE;
+
+		if (nvlist_lookup_uint64(child[c],
+		    ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
+			continue;
+
+		vname = zpool_vdev_name(g_zfs, zhp, child[c], B_FALSE);
+		print_list_stats(zhp, vname, child[c], cb, depth + 2);
+		free(vname);
+	}
+
+	/*
+	 * Include level 2 ARC devices in iostat output
+	 */
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
+	    &child, &children) != 0)
+		return;
+
+	if (children > 0) {
+		(void) printf("%-*s      -      -      -      -      -      "
+		    "-\n", cb->cb_namewidth, "cache");
+		for (c = 0; c < children; c++) {
+			vname = zpool_vdev_name(g_zfs, zhp, child[c],
+			    B_FALSE);
+			print_list_stats(zhp, vname, child[c], cb, depth + 2);
+			free(vname);
+		}
+	}
+}
+
+
+/*
+ * Generic callback function to list a pool.
+ */
+int
+list_callback(zpool_handle_t *zhp, void *data)
+{
+	list_cbdata_t *cbp = data;
+	nvlist_t *config;
+	nvlist_t *nvroot;
+
+	config = zpool_get_config(zhp, NULL);
+
+	print_pool(zhp, cbp);
+	if (!cbp->cb_verbose)
+		return (0);
+
+	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+	    &nvroot) == 0);
+	print_list_stats(zhp, NULL, nvroot, cbp, 0);
+
+	return (0);
+}
+
+/*
+ * zpool list [-H] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
+ *
+ *	-H	Scripted mode.  Don't display headers, and separate properties
+ *		by a single tab.
+ *	-o	List of properties to display.  Defaults to
+ *		"name,size,allocated,free,capacity,health,altroot"
+ *	-T	Display a timestamp in date(1) or Unix format
+ *
+ * List all pools in the system, whether or not they're healthy.  Output space
+ * statistics for each one, as well as health status summary.
+ */
+int
+zpool_do_list(int argc, char **argv)
+{
+	int c;
+	int ret;
+	list_cbdata_t cb = { 0 };
+	static char default_props[] =
+	    "name,size,allocated,free,capacity,dedupratio,"
+	    "health,altroot";
+	char *props = default_props;
+	unsigned long interval = 0, count = 0;
+	zpool_list_t *list;
+	boolean_t first = B_TRUE;
+
+	/* check options */
+	while ((c = getopt(argc, argv, ":Ho:T:v")) != -1) {
+		switch (c) {
+		case 'H':
+			cb.cb_scripted = B_TRUE;
+			break;
+		case 'o':
+			props = optarg;
+			break;
+		case 'T':
+			get_timestamp_arg(*optarg);
+			break;
+		case 'v':
+			cb.cb_verbose = B_TRUE;
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing argument for "
+			    "'%c' option\n"), optopt);
+			usage(B_FALSE);
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	get_interval_count(&argc, argv, &interval, &count);
+
+	if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
+		usage(B_FALSE);
+
+	if ((list = pool_list_get(argc, argv, &cb.cb_proplist, &ret)) == NULL)
+		return (1);
+
+	if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) {
+		(void) printf(gettext("no pools available\n"));
+		zprop_free_list(cb.cb_proplist);
+		return (0);
+	}
+
+	for (;;) {
+		pool_list_update(list);
+
+		if (pool_list_count(list) == 0)
+			break;
+
+		cb.cb_namewidth = 0;
+		(void) pool_list_iter(list, B_FALSE, get_namewidth, &cb);
+
+		if (timestamp_fmt != NODATE)
+			print_timestamp(timestamp_fmt);
+
+		if (!cb.cb_scripted && (first || cb.cb_verbose)) {
+			print_header(&cb);
+			first = B_FALSE;
+		}
+		ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
+
+		if (interval == 0)
+			break;
+
+		if (count != 0 && --count == 0)
+			break;
+
+		(void) sleep(interval);
+	}
+
+	zprop_free_list(cb.cb_proplist);
+	return (ret);
+}
+
+static nvlist_t *
+zpool_get_vdev_by_name(nvlist_t *nv, char *name)
+{
+	nvlist_t **child;
+	uint_t c, children;
+	nvlist_t *match;
+	char *path;
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) != 0) {
+		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
+		if (strncmp(name, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
+			name += sizeof(_PATH_DEV) - 1;
+		if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
+			path += sizeof(_PATH_DEV) - 1;
+		if (strcmp(name, path) == 0)
+			return (nv);
+		return (NULL);
+	}
+
+	for (c = 0; c < children; c++)
+		if ((match = zpool_get_vdev_by_name(child[c], name)) != NULL)
+			return (match);
+
+	return (NULL);
+}
+
+static int
+zpool_do_attach_or_replace(int argc, char **argv, int replacing)
+{
+	boolean_t force = B_FALSE;
+	int c;
+	nvlist_t *nvroot;
+	char *poolname, *old_disk, *new_disk;
+	zpool_handle_t *zhp;
+	int ret;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "f")) != -1) {
+		switch (c) {
+		case 'f':
+			force = B_TRUE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* get pool name and check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing pool name argument\n"));
+		usage(B_FALSE);
+	}
+
+	poolname = argv[0];
+
+	if (argc < 2) {
+		(void) fprintf(stderr,
+		    gettext("missing <device> specification\n"));
+		usage(B_FALSE);
+	}
+
+	old_disk = argv[1];
+
+	if (argc < 3) {
+		if (!replacing) {
+			(void) fprintf(stderr,
+			    gettext("missing <new_device> specification\n"));
+			usage(B_FALSE);
+		}
+		new_disk = old_disk;
+		argc -= 1;
+		argv += 1;
+	} else {
+		new_disk = argv[2];
+		argc -= 2;
+		argv += 2;
+	}
+
+	if (argc > 1) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		usage(B_FALSE);
+	}
+
+	if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
+		return (1);
+
+	if (zpool_get_config(zhp, NULL) == NULL) {
+		(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
+		    poolname);
+		zpool_close(zhp);
+		return (1);
+	}
+
+	nvroot = make_root_vdev(zhp, force, B_FALSE, replacing, B_FALSE,
+	    argc, argv);
+	if (nvroot == NULL) {
+		zpool_close(zhp);
+		return (1);
+	}
+
+	ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing);
+
+	nvlist_free(nvroot);
+	zpool_close(zhp);
+
+	return (ret);
+}
+
+/*
+ * zpool replace [-f] <pool> <device> <new_device>
+ *
+ *	-f	Force attach, even if <new_device> appears to be in use.
+ *
+ * Replace <device> with <new_device>.
+ */
+/* ARGSUSED */
+int
+zpool_do_replace(int argc, char **argv)
+{
+	return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
+}
+
+/*
+ * zpool attach [-f] <pool> <device> <new_device>
+ *
+ *	-f	Force attach, even if <new_device> appears to be in use.
+ *
+ * Attach <new_device> to the mirror containing <device>.  If <device> is not
+ * part of a mirror, then <device> will be transformed into a mirror of
+ * <device> and <new_device>.  In either case, <new_device> will begin life
+ * with a DTL of [0, now], and will immediately begin to resilver itself.
+ */
+int
+zpool_do_attach(int argc, char **argv)
+{
+	return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
+}
+
+/*
+ * zpool detach [-f] <pool> <device>
+ *
+ *	-f	Force detach of <device>, even if DTLs argue against it
+ *		(not supported yet)
+ *
+ * Detach a device from a mirror.  The operation will be refused if <device>
+ * is the last device in the mirror, or if the DTLs indicate that this device
+ * has the only valid copy of some data.
+ */
+/* ARGSUSED */
+int
+zpool_do_detach(int argc, char **argv)
+{
+	int c;
+	char *poolname, *path;
+	zpool_handle_t *zhp;
+	int ret;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "f")) != -1) {
+		switch (c) {
+		case 'f':
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* get pool name and check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing pool name argument\n"));
+		usage(B_FALSE);
+	}
+
+	if (argc < 2) {
+		(void) fprintf(stderr,
+		    gettext("missing <device> specification\n"));
+		usage(B_FALSE);
+	}
+
+	poolname = argv[0];
+	path = argv[1];
+
+	if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
+		return (1);
+
+	ret = zpool_vdev_detach(zhp, path);
+
+	zpool_close(zhp);
+
+	return (ret);
+}
+
+/*
+ * zpool split [-n] [-o prop=val] ...
+ *		[-o mntopt] ...
+ *		[-R altroot] <pool> <newpool> [<device> ...]
+ *
+ *	-n	Do not split the pool, but display the resulting layout if
+ *		it were to be split.
+ *	-o	Set property=value, or set mount options.
+ *	-R	Mount the split-off pool under an alternate root.
+ *
+ * Splits the named pool and gives it the new pool name.  Devices to be split
+ * off may be listed, provided that no more than one device is specified
+ * per top-level vdev mirror.  The newly split pool is left in an exported
+ * state unless -R is specified.
+ *
+ * Restrictions: the top-level of the pool pool must only be made up of
+ * mirrors; all devices in the pool must be healthy; no device may be
+ * undergoing a resilvering operation.
+ */
+int
+zpool_do_split(int argc, char **argv)
+{
+	char *srcpool, *newpool, *propval;
+	char *mntopts = NULL;
+	splitflags_t flags;
+	int c, ret = 0;
+	zpool_handle_t *zhp;
+	nvlist_t *config, *props = NULL;
+
+	flags.dryrun = B_FALSE;
+	flags.import = B_FALSE;
+
+	/* check options */
+	while ((c = getopt(argc, argv, ":R:no:")) != -1) {
+		switch (c) {
+		case 'R':
+			flags.import = B_TRUE;
+			if (add_prop_list(
+			    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
+			    &props, B_TRUE) != 0) {
+				if (props)
+					nvlist_free(props);
+				usage(B_FALSE);
+			}
+			break;
+		case 'n':
+			flags.dryrun = B_TRUE;
+			break;
+		case 'o':
+			if ((propval = strchr(optarg, '=')) != NULL) {
+				*propval = '\0';
+				propval++;
+				if (add_prop_list(optarg, propval,
+				    &props, B_TRUE) != 0) {
+					if (props)
+						nvlist_free(props);
+					usage(B_FALSE);
+				}
+			} else {
+				mntopts = optarg;
+			}
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing argument for "
+			    "'%c' option\n"), optopt);
+			usage(B_FALSE);
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+			break;
+		}
+	}
+
+	if (!flags.import && mntopts != NULL) {
+		(void) fprintf(stderr, gettext("setting mntopts is only "
+		    "valid when importing the pool\n"));
+		usage(B_FALSE);
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("Missing pool name\n"));
+		usage(B_FALSE);
+	}
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("Missing new pool name\n"));
+		usage(B_FALSE);
+	}
+
+	srcpool = argv[0];
+	newpool = argv[1];
+
+	argc -= 2;
+	argv += 2;
+
+	if ((zhp = zpool_open(g_zfs, srcpool)) == NULL)
+		return (1);
+
+	config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
+	if (config == NULL) {
+		ret = 1;
+	} else {
+		if (flags.dryrun) {
+			(void) printf(gettext("would create '%s' with the "
+			    "following layout:\n\n"), newpool);
+			print_vdev_tree(NULL, newpool, config, 0, B_FALSE);
+		}
+		nvlist_free(config);
+	}
+
+	zpool_close(zhp);
+
+	if (ret != 0 || flags.dryrun || !flags.import)
+		return (ret);
+
+	/*
+	 * The split was successful. Now we need to open the new
+	 * pool and import it.
+	 */
+	if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL)
+		return (1);
+	if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
+	    zpool_enable_datasets(zhp, mntopts, 0) != 0) {
+		ret = 1;
+		(void) fprintf(stderr, gettext("Split was successful, but "
+		    "the datasets could not all be mounted\n"));
+		(void) fprintf(stderr, gettext("Try doing '%s' with a "
+		    "different altroot\n"), "zpool import");
+	}
+	zpool_close(zhp);
+
+	return (ret);
+}
+
+
+
+/*
+ * zpool online <pool> <device> ...
+ */
+int
+zpool_do_online(int argc, char **argv)
+{
+	int c, i;
+	char *poolname;
+	zpool_handle_t *zhp;
+	int ret = 0;
+	vdev_state_t newstate;
+	int flags = 0;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "et")) != -1) {
+		switch (c) {
+		case 'e':
+			flags |= ZFS_ONLINE_EXPAND;
+			break;
+		case 't':
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* get pool name and check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing pool name\n"));
+		usage(B_FALSE);
+	}
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("missing device name\n"));
+		usage(B_FALSE);
+	}
+
+	poolname = argv[0];
+
+	if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
+		return (1);
+
+	for (i = 1; i < argc; i++) {
+		if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
+			if (newstate != VDEV_STATE_HEALTHY) {
+				(void) printf(gettext("warning: device '%s' "
+				    "onlined, but remains in faulted state\n"),
+				    argv[i]);
+				if (newstate == VDEV_STATE_FAULTED)
+					(void) printf(gettext("use 'zpool "
+					    "clear' to restore a faulted "
+					    "device\n"));
+				else
+					(void) printf(gettext("use 'zpool "
+					    "replace' to replace devices "
+					    "that are no longer present\n"));
+			}
+		} else {
+			ret = 1;
+		}
+	}
+
+	zpool_close(zhp);
+
+	return (ret);
+}
+
+/*
+ * zpool offline [-ft] <pool> <device> ...
+ *
+ *	-f	Force the device into the offline state, even if doing
+ *		so would appear to compromise pool availability.
+ *		(not supported yet)
+ *
+ *	-t	Only take the device off-line temporarily.  The offline
+ *		state will not be persistent across reboots.
+ */
+/* ARGSUSED */
+int
+zpool_do_offline(int argc, char **argv)
+{
+	int c, i;
+	char *poolname;
+	zpool_handle_t *zhp;
+	int ret = 0;
+	boolean_t istmp = B_FALSE;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "ft")) != -1) {
+		switch (c) {
+		case 't':
+			istmp = B_TRUE;
+			break;
+		case 'f':
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* get pool name and check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing pool name\n"));
+		usage(B_FALSE);
+	}
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("missing device name\n"));
+		usage(B_FALSE);
+	}
+
+	poolname = argv[0];
+
+	if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
+		return (1);
+
+	for (i = 1; i < argc; i++) {
+		if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
+			ret = 1;
+	}
+
+	zpool_close(zhp);
+
+	return (ret);
+}
+
+/*
+ * zpool clear <pool> [device]
+ *
+ * Clear all errors associated with a pool or a particular device.
+ */
+int
+zpool_do_clear(int argc, char **argv)
+{
+	int c;
+	int ret = 0;
+	boolean_t dryrun = B_FALSE;
+	boolean_t do_rewind = B_FALSE;
+	boolean_t xtreme_rewind = B_FALSE;
+	uint32_t rewind_policy = ZPOOL_NO_REWIND;
+	nvlist_t *policy = NULL;
+	zpool_handle_t *zhp;
+	char *pool, *device;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "FnX")) != -1) {
+		switch (c) {
+		case 'F':
+			do_rewind = B_TRUE;
+			break;
+		case 'n':
+			dryrun = B_TRUE;
+			break;
+		case 'X':
+			xtreme_rewind = B_TRUE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing pool name\n"));
+		usage(B_FALSE);
+	}
+
+	if (argc > 2) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		usage(B_FALSE);
+	}
+
+	if ((dryrun || xtreme_rewind) && !do_rewind) {
+		(void) fprintf(stderr,
+		    gettext("-n or -X only meaningful with -F\n"));
+		usage(B_FALSE);
+	}
+	if (dryrun)
+		rewind_policy = ZPOOL_TRY_REWIND;
+	else if (do_rewind)
+		rewind_policy = ZPOOL_DO_REWIND;
+	if (xtreme_rewind)
+		rewind_policy |= ZPOOL_EXTREME_REWIND;
+
+	/* In future, further rewind policy choices can be passed along here */
+	if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
+	    nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind_policy) != 0)
+		return (1);
+
+	pool = argv[0];
+	device = argc == 2 ? argv[1] : NULL;
+
+	if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
+		nvlist_free(policy);
+		return (1);
+	}
+
+	if (zpool_clear(zhp, device, policy) != 0)
+		ret = 1;
+
+	zpool_close(zhp);
+
+	nvlist_free(policy);
+
+	return (ret);
+}
+
+/*
+ * zpool reguid <pool>
+ */
+int
+zpool_do_reguid(int argc, char **argv)
+{
+	int c;
+	char *poolname;
+	zpool_handle_t *zhp;
+	int ret = 0;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "")) != -1) {
+		switch (c) {
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	/* get pool name and check number of arguments */
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing pool name\n"));
+		usage(B_FALSE);
+	}
+
+	if (argc > 1) {
+		(void) fprintf(stderr, gettext("too many arguments\n"));
+		usage(B_FALSE);
+	}
+
+	poolname = argv[0];
+	if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
+		return (1);
+
+	ret = zpool_reguid(zhp);
+
+	zpool_close(zhp);
+	return (ret);
+}
+
+
+/*
+ * zpool reopen <pool>
+ *
+ * Reopen the pool so that the kernel can update the sizes of all vdevs.
+ *
+ * NOTE: This command is currently undocumented.  If the command is ever
+ * exposed then the appropriate usage() messages will need to be made.
+ */
+int
+zpool_do_reopen(int argc, char **argv)
+{
+	int ret = 0;
+	zpool_handle_t *zhp;
+	char *pool;
+
+	argc--;
+	argv++;
+
+	if (argc != 1)
+		return (2);
+
+	pool = argv[0];
+	if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL)
+		return (1);
+
+	ret = zpool_reopen(zhp);
+	zpool_close(zhp);
+	return (ret);
+}
+
+typedef struct scrub_cbdata {
+	int	cb_type;
+	int	cb_argc;
+	char	**cb_argv;
+} scrub_cbdata_t;
+
+int
+scrub_callback(zpool_handle_t *zhp, void *data)
+{
+	scrub_cbdata_t *cb = data;
+	int err;
+
+	/*
+	 * Ignore faulted pools.
+	 */
+	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
+		(void) fprintf(stderr, gettext("cannot scrub '%s': pool is "
+		    "currently unavailable\n"), zpool_get_name(zhp));
+		return (1);
+	}
+
+	err = zpool_scan(zhp, cb->cb_type);
+
+	return (err != 0);
+}
+
+/*
+ * zpool scrub [-s] <pool> ...
+ *
+ *	-s	Stop.  Stops any in-progress scrub.
+ */
+int
+zpool_do_scrub(int argc, char **argv)
+{
+	int c;
+	scrub_cbdata_t cb;
+
+	cb.cb_type = POOL_SCAN_SCRUB;
+
+	/* check options */
+	while ((c = getopt(argc, argv, "s")) != -1) {
+		switch (c) {
+		case 's':
+			cb.cb_type = POOL_SCAN_NONE;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	cb.cb_argc = argc;
+	cb.cb_argv = argv;
+	argc -= optind;
+	argv += optind;
+
+	if (argc < 1) {
+		(void) fprintf(stderr, gettext("missing pool name argument\n"));
+		usage(B_FALSE);
+	}
+
+	return (for_each_pool(argc, argv, B_TRUE, NULL, scrub_callback, &cb));
+}
+
+typedef struct status_cbdata {
+	int		cb_count;
+	boolean_t	cb_allpools;
+	boolean_t	cb_verbose;
+	boolean_t	cb_explain;
+	boolean_t	cb_first;
+	boolean_t	cb_dedup_stats;
+} status_cbdata_t;
+
+/*
+ * Print out detailed scrub status.
+ */
+void
+print_scan_status(pool_scan_stat_t *ps)
+{
+	time_t start, end;
+	uint64_t elapsed, mins_left, hours_left;
+	uint64_t pass_exam, examined, total;
+	uint_t rate;
+	double fraction_done;
+	char processed_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
+
+	(void) printf(gettext("  scan: "));
+
+	/* If there's never been a scan, there's not much to say. */
+	if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
+	    ps->pss_func >= POOL_SCAN_FUNCS) {
+		(void) printf(gettext("none requested\n"));
+		return;
+	}
+
+	start = ps->pss_start_time;
+	end = ps->pss_end_time;
+	zfs_nicenum(ps->pss_processed, processed_buf, sizeof (processed_buf));
+
+	assert(ps->pss_func == POOL_SCAN_SCRUB ||
+	    ps->pss_func == POOL_SCAN_RESILVER);
+	/*
+	 * Scan is finished or canceled.
+	 */
+	if (ps->pss_state == DSS_FINISHED) {
+		uint64_t minutes_taken = (end - start) / 60;
+		char *fmt;
+
+		if (ps->pss_func == POOL_SCAN_SCRUB) {
+			fmt = gettext("scrub repaired %s in %lluh%um with "
+			    "%llu errors on %s");
+		} else if (ps->pss_func == POOL_SCAN_RESILVER) {
+			fmt = gettext("resilvered %s in %lluh%um with "
+			    "%llu errors on %s");
+		}
+		/* LINTED */
+		(void) printf(fmt, processed_buf,
+		    (u_longlong_t)(minutes_taken / 60),
+		    (uint_t)(minutes_taken % 60),
+		    (u_longlong_t)ps->pss_errors,
+		    ctime((time_t *)&end));
+		return;
+	} else if (ps->pss_state == DSS_CANCELED) {
+		if (ps->pss_func == POOL_SCAN_SCRUB) {
+			(void) printf(gettext("scrub canceled on %s"),
+			    ctime(&end));
+		} else if (ps->pss_func == POOL_SCAN_RESILVER) {
+			(void) printf(gettext("resilver canceled on %s"),
+			    ctime(&end));
+		}
+		return;
+	}
+
+	assert(ps->pss_state == DSS_SCANNING);
+
+	/*
+	 * Scan is in progress.
+	 */
+	if (ps->pss_func == POOL_SCAN_SCRUB) {
+		(void) printf(gettext("scrub in progress since %s"),
+		    ctime(&start));
+	} else if (ps->pss_func == POOL_SCAN_RESILVER) {
+		(void) printf(gettext("resilver in progress since %s"),
+		    ctime(&start));
+	}
+
+	examined = ps->pss_examined ? ps->pss_examined : 1;
+	total = ps->pss_to_examine;
+	fraction_done = (double)examined / total;
+
+	/* elapsed time for this pass */
+	elapsed = time(NULL) - ps->pss_pass_start;
+	elapsed = elapsed ? elapsed : 1;
+	pass_exam = ps->pss_pass_exam ? ps->pss_pass_exam : 1;
+	rate = pass_exam / elapsed;
+	rate = rate ? rate : 1;
+	mins_left = ((total - examined) / rate) / 60;
+	hours_left = mins_left / 60;
+
+	zfs_nicenum(examined, examined_buf, sizeof (examined_buf));
+	zfs_nicenum(total, total_buf, sizeof (total_buf));
+	zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
+
+	/*
+	 * do not print estimated time if hours_left is more than 30 days
+	 */
+	(void) printf(gettext("        %s scanned out of %s at %s/s"),
+	    examined_buf, total_buf, rate_buf);
+	if (hours_left < (30 * 24)) {
+		(void) printf(gettext(", %lluh%um to go\n"),
+		    (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
+	} else {
+		(void) printf(gettext(
+		    ", (scan is slow, no estimated time)\n"));
+	}
+
+	if (ps->pss_func == POOL_SCAN_RESILVER) {
+		(void) printf(gettext("        %s resilvered, %.2f%% done\n"),
+		    processed_buf, 100 * fraction_done);
+	} else if (ps->pss_func == POOL_SCAN_SCRUB) {
+		(void) printf(gettext("        %s repaired, %.2f%% done\n"),
+		    processed_buf, 100 * fraction_done);
+	}
+}
+
+static void
+print_error_log(zpool_handle_t *zhp)
+{
+	nvlist_t *nverrlist = NULL;
+	nvpair_t *elem;
+	char *pathname;
+	size_t len = MAXPATHLEN * 2;
+
+	if (zpool_get_errlog(zhp, &nverrlist) != 0) {
+		(void) printf("errors: List of errors unavailable "
+		    "(insufficient privileges)\n");
+		return;
+	}
+
+	(void) printf("errors: Permanent errors have been "
+	    "detected in the following files:\n\n");
+
+	pathname = safe_malloc(len);
+	elem = NULL;
+	while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
+		nvlist_t *nv;
+		uint64_t dsobj, obj;
+
+		verify(nvpair_value_nvlist(elem, &nv) == 0);
+		verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
+		    &dsobj) == 0);
+		verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
+		    &obj) == 0);
+		zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
+		(void) printf("%7s %s\n", "", pathname);
+	}
+	free(pathname);
+	nvlist_free(nverrlist);
+}
+
+static void
+print_spares(zpool_handle_t *zhp, nvlist_t **spares, uint_t nspares,
+    int namewidth)
+{
+	uint_t i;
+	char *name;
+
+	if (nspares == 0)
+		return;
+
+	(void) printf(gettext("\tspares\n"));
+
+	for (i = 0; i < nspares; i++) {
+		name = zpool_vdev_name(g_zfs, zhp, spares[i], B_FALSE);
+		print_status_config(zhp, name, spares[i],
+		    namewidth, 2, B_TRUE);
+		free(name);
+	}
+}
+
+static void
+print_l2cache(zpool_handle_t *zhp, nvlist_t **l2cache, uint_t nl2cache,
+    int namewidth)
+{
+	uint_t i;
+	char *name;
+
+	if (nl2cache == 0)
+		return;
+
+	(void) printf(gettext("\tcache\n"));
+
+	for (i = 0; i < nl2cache; i++) {
+		name = zpool_vdev_name(g_zfs, zhp, l2cache[i], B_FALSE);
+		print_status_config(zhp, name, l2cache[i],
+		    namewidth, 2, B_FALSE);
+		free(name);
+	}
+}
+
+static void
+print_dedup_stats(nvlist_t *config)
+{
+	ddt_histogram_t *ddh;
+	ddt_stat_t *dds;
+	ddt_object_t *ddo;
+	uint_t c;
+
+	/*
+	 * If the pool was faulted then we may not have been able to
+	 * obtain the config. Otherwise, if have anything in the dedup
+	 * table continue processing the stats.
+	 */
+	if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
+	    (uint64_t **)&ddo, &c) != 0)
+		return;
+
+	(void) printf("\n");
+	(void) printf(gettext(" dedup: "));
+	if (ddo->ddo_count == 0) {
+		(void) printf(gettext("no DDT entries\n"));
+		return;
+	}
+
+	(void) printf("DDT entries %llu, size %llu on disk, %llu in core\n",
+	    (u_longlong_t)ddo->ddo_count,
+	    (u_longlong_t)ddo->ddo_dspace,
+	    (u_longlong_t)ddo->ddo_mspace);
+
+	verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
+	    (uint64_t **)&dds, &c) == 0);
+	verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
+	    (uint64_t **)&ddh, &c) == 0);
+	zpool_dump_ddt(dds, ddh);
+}
+
+/*
+ * Display a summary of pool status.  Displays a summary such as:
+ *
+ *        pool: tank
+ *	status: DEGRADED
+ *	reason: One or more devices ...
+ *         see: http://illumos.org/msg/ZFS-xxxx-01
+ *	config:
+ *		mirror		DEGRADED
+ *                c1t0d0	OK
+ *                c2t0d0	UNAVAIL
+ *
+ * When given the '-v' option, we print out the complete config.  If the '-e'
+ * option is specified, then we print out error rate information as well.
+ */
+int
+status_callback(zpool_handle_t *zhp, void *data)
+{
+	status_cbdata_t *cbp = data;
+	nvlist_t *config, *nvroot;
+	char *msgid;
+	int reason;
+	const char *health;
+	uint_t c;
+	vdev_stat_t *vs;
+
+	config = zpool_get_config(zhp, NULL);
+	reason = zpool_get_status(zhp, &msgid);
+
+	cbp->cb_count++;
+
+	/*
+	 * If we were given 'zpool status -x', only report those pools with
+	 * problems.
+	 */
+	if (reason == ZPOOL_STATUS_OK && cbp->cb_explain) {
+		if (!cbp->cb_allpools) {
+			(void) printf(gettext("pool '%s' is healthy\n"),
+			    zpool_get_name(zhp));
+			if (cbp->cb_first)
+				cbp->cb_first = B_FALSE;
+		}
+		return (0);
+	}
+
+	if (cbp->cb_first)
+		cbp->cb_first = B_FALSE;
+	else
+		(void) printf("\n");
+
+	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+	    &nvroot) == 0);
+	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
+	    (uint64_t **)&vs, &c) == 0);
+	health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
+
+	(void) printf(gettext("  pool: %s\n"), zpool_get_name(zhp));
+	(void) printf(gettext(" state: %s\n"), health);
+
+	switch (reason) {
+	case ZPOOL_STATUS_MISSING_DEV_R:
+		(void) printf(gettext("status: One or more devices could not "
+		    "be opened.  Sufficient replicas exist for\n\tthe pool to "
+		    "continue functioning in a degraded state.\n"));
+		(void) printf(gettext("action: Attach the missing device and "
+		    "online it using 'zpool online'.\n"));
+		break;
+
+	case ZPOOL_STATUS_MISSING_DEV_NR:
+		(void) printf(gettext("status: One or more devices could not "
+		    "be opened.  There are insufficient\n\treplicas for the "
+		    "pool to continue functioning.\n"));
+		(void) printf(gettext("action: Attach the missing device and "
+		    "online it using 'zpool online'.\n"));
+		break;
+
+	case ZPOOL_STATUS_CORRUPT_LABEL_R:
+		(void) printf(gettext("status: One or more devices could not "
+		    "be used because the label is missing or\n\tinvalid.  "
+		    "Sufficient replicas exist for the pool to continue\n\t"
+		    "functioning in a degraded state.\n"));
+		(void) printf(gettext("action: Replace the device using "
+		    "'zpool replace'.\n"));
+		break;
+
+	case ZPOOL_STATUS_CORRUPT_LABEL_NR:
+		(void) printf(gettext("status: One or more devices could not "
+		    "be used because the label is missing \n\tor invalid.  "
+		    "There are insufficient replicas for the pool to "
+		    "continue\n\tfunctioning.\n"));
+		zpool_explain_recover(zpool_get_handle(zhp),
+		    zpool_get_name(zhp), reason, config);
+		break;
+
+	case ZPOOL_STATUS_FAILING_DEV:
+		(void) printf(gettext("status: One or more devices has "
+		    "experienced an unrecoverable error.  An\n\tattempt was "
+		    "made to correct the error.  Applications are "
+		    "unaffected.\n"));
+		(void) printf(gettext("action: Determine if the device needs "
+		    "to be replaced, and clear the errors\n\tusing "
+		    "'zpool clear' or replace the device with 'zpool "
+		    "replace'.\n"));
+		break;
+
+	case ZPOOL_STATUS_OFFLINE_DEV:
+		(void) printf(gettext("status: One or more devices has "
+		    "been taken offline by the administrator.\n\tSufficient "
+		    "replicas exist for the pool to continue functioning in "
+		    "a\n\tdegraded state.\n"));
+		(void) printf(gettext("action: Online the device using "
+		    "'zpool online' or replace the device with\n\t'zpool "
+		    "replace'.\n"));
+		break;
+
+	case ZPOOL_STATUS_REMOVED_DEV:
+		(void) printf(gettext("status: One or more devices has "
+		    "been removed by the administrator.\n\tSufficient "
+		    "replicas exist for the pool to continue functioning in "
+		    "a\n\tdegraded state.\n"));
+		(void) printf(gettext("action: Online the device using "
+		    "'zpool online' or replace the device with\n\t'zpool "
+		    "replace'.\n"));
+		break;
+
+	case ZPOOL_STATUS_RESILVERING:
+		(void) printf(gettext("status: One or more devices is "
+		    "currently being resilvered.  The pool will\n\tcontinue "
+		    "to function, possibly in a degraded state.\n"));
+		(void) printf(gettext("action: Wait for the resilver to "
+		    "complete.\n"));
+		break;
+
+	case ZPOOL_STATUS_CORRUPT_DATA:
+		(void) printf(gettext("status: One or more devices has "
+		    "experienced an error resulting in data\n\tcorruption.  "
+		    "Applications may be affected.\n"));
+		(void) printf(gettext("action: Restore the file in question "
+		    "if possible.  Otherwise restore the\n\tentire pool from "
+		    "backup.\n"));
+		break;
+
+	case ZPOOL_STATUS_CORRUPT_POOL:
+		(void) printf(gettext("status: The pool metadata is corrupted "
+		    "and the pool cannot be opened.\n"));
+		zpool_explain_recover(zpool_get_handle(zhp),
+		    zpool_get_name(zhp), reason, config);
+		break;
+
+	case ZPOOL_STATUS_VERSION_OLDER:
+		(void) printf(gettext("status: The pool is formatted using a "
+		    "legacy on-disk format.  The pool can\n\tstill be used, "
+		    "but some features are unavailable.\n"));
+		(void) printf(gettext("action: Upgrade the pool using 'zpool "
+		    "upgrade'.  Once this is done, the\n\tpool will no longer "
+		    "be accessible on software that does not support feature\n"
+		    "\tflags.\n"));
+		break;
+
+	case ZPOOL_STATUS_VERSION_NEWER:
+		(void) printf(gettext("status: The pool has been upgraded to a "
+		    "newer, incompatible on-disk version.\n\tThe pool cannot "
+		    "be accessed on this system.\n"));
+		(void) printf(gettext("action: Access the pool from a system "
+		    "running more recent software, or\n\trestore the pool from "
+		    "backup.\n"));
+		break;
+
+	case ZPOOL_STATUS_FEAT_DISABLED:
+		(void) printf(gettext("status: Some supported features are not "
+		    "enabled on the pool. The pool can\n\tstill be used, but "
+		    "some features are unavailable.\n"));
+		(void) printf(gettext("action: Enable all features using "
+		    "'zpool upgrade'. Once this is done,\n\tthe pool may no "
+		    "longer be accessible by software that does not support\n\t"
+		    "the features. See zpool-features(7) for details.\n"));
+		break;
+
+	case ZPOOL_STATUS_UNSUP_FEAT_READ:
+		(void) printf(gettext("status: The pool cannot be accessed on "
+		    "this system because it uses the\n\tfollowing feature(s) "
+		    "not supported on this system:\n"));
+		zpool_print_unsup_feat(config);
+		(void) printf("\n");
+		(void) printf(gettext("action: Access the pool from a system "
+		    "that supports the required feature(s),\n\tor restore the "
+		    "pool from backup.\n"));
+		break;
+
+	case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
+		(void) printf(gettext("status: The pool can only be accessed "
+		    "in read-only mode on this system. It\n\tcannot be "
+		    "accessed in read-write mode because it uses the "
+		    "following\n\tfeature(s) not supported on this system:\n"));
+		zpool_print_unsup_feat(config);
+		(void) printf("\n");
+		(void) printf(gettext("action: The pool cannot be accessed in "
+		    "read-write mode. Import the pool with\n"
+		    "\t\"-o readonly=on\", access the pool from a system that "
+		    "supports the\n\trequired feature(s), or restore the "
+		    "pool from backup.\n"));
+		break;
+
+	case ZPOOL_STATUS_FAULTED_DEV_R:
+		(void) printf(gettext("status: One or more devices are "
+		    "faulted in response to persistent errors.\n\tSufficient "
+		    "replicas exist for the pool to continue functioning "
+		    "in a\n\tdegraded state.\n"));
+		(void) printf(gettext("action: Replace the faulted device, "
+		    "or use 'zpool clear' to mark the device\n\trepaired.\n"));
+		break;
+
+	case ZPOOL_STATUS_FAULTED_DEV_NR:
+		(void) printf(gettext("status: One or more devices are "
+		    "faulted in response to persistent errors.  There are "
+		    "insufficient replicas for the pool to\n\tcontinue "
+		    "functioning.\n"));
+		(void) printf(gettext("action: Destroy and re-create the pool "
+		    "from a backup source.  Manually marking the device\n"
+		    "\trepaired using 'zpool clear' may allow some data "
+		    "to be recovered.\n"));
+		break;
+
+	case ZPOOL_STATUS_IO_FAILURE_WAIT:
+	case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
+		(void) printf(gettext("status: One or more devices are "
+		    "faulted in response to IO failures.\n"));
+		(void) printf(gettext("action: Make sure the affected devices "
+		    "are connected, then run 'zpool clear'.\n"));
+		break;
+
+	case ZPOOL_STATUS_BAD_LOG:
+		(void) printf(gettext("status: An intent log record "
+		    "could not be read.\n"
+		    "\tWaiting for adminstrator intervention to fix the "
+		    "faulted pool.\n"));
+		(void) printf(gettext("action: Either restore the affected "
+		    "device(s) and run 'zpool online',\n"
+		    "\tor ignore the intent log records by running "
+		    "'zpool clear'.\n"));
+		break;
+
+	default:
+		/*
+		 * The remaining errors can't actually be generated, yet.
+		 */
+		assert(reason == ZPOOL_STATUS_OK);
+	}
+
+	if (msgid != NULL)
+		(void) printf(gettext("   see: http://illumos.org/msg/%s\n"),
+		    msgid);
+
+	if (config != NULL) {
+		int namewidth;
+		uint64_t nerr;
+		nvlist_t **spares, **l2cache;
+		uint_t nspares, nl2cache;
+		pool_scan_stat_t *ps = NULL;
+
+		(void) nvlist_lookup_uint64_array(nvroot,
+		    ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &c);
+		print_scan_status(ps);
+
+		namewidth = max_width(zhp, nvroot, 0, 0);
+		if (namewidth < 10)
+			namewidth = 10;
+
+		(void) printf(gettext("config:\n\n"));
+		(void) printf(gettext("\t%-*s  %-8s %5s %5s %5s\n"), namewidth,
+		    "NAME", "STATE", "READ", "WRITE", "CKSUM");
+		print_status_config(zhp, zpool_get_name(zhp), nvroot,
+		    namewidth, 0, B_FALSE);
+
+		if (num_logs(nvroot) > 0)
+			print_logs(zhp, nvroot, namewidth, B_TRUE);
+		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
+		    &l2cache, &nl2cache) == 0)
+			print_l2cache(zhp, l2cache, nl2cache, namewidth);
+
+		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
+		    &spares, &nspares) == 0)
+			print_spares(zhp, spares, nspares, namewidth);
+
+		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
+		    &nerr) == 0) {
+			nvlist_t *nverrlist = NULL;
+
+			/*
+			 * If the approximate error count is small, get a
+			 * precise count by fetching the entire log and
+			 * uniquifying the results.
+			 */
+			if (nerr > 0 && nerr < 100 && !cbp->cb_verbose &&
+			    zpool_get_errlog(zhp, &nverrlist) == 0) {
+				nvpair_t *elem;
+
+				elem = NULL;
+				nerr = 0;
+				while ((elem = nvlist_next_nvpair(nverrlist,
+				    elem)) != NULL) {
+					nerr++;
+				}
+			}
+			nvlist_free(nverrlist);
+
+			(void) printf("\n");
+
+			if (nerr == 0)
+				(void) printf(gettext("errors: No known data "
+				    "errors\n"));
+			else if (!cbp->cb_verbose)
+				(void) printf(gettext("errors: %llu data "
+				    "errors, use '-v' for a list\n"),
+				    (u_longlong_t)nerr);
+			else
+				print_error_log(zhp);
+		}
+
+		if (cbp->cb_dedup_stats)
+			print_dedup_stats(config);
+	} else {
+		(void) printf(gettext("config: The configuration cannot be "
+		    "determined.\n"));
+	}
+
+	return (0);
+}
+
+/*
+ * zpool status [-vx] [-T d|u] [pool] ... [interval [count]]
+ *
+ *	-v	Display complete error logs
+ *	-x	Display only pools with potential problems
+ *	-D	Display dedup status (undocumented)
+ *	-T	Display a timestamp in date(1) or Unix format
+ *
+ * Describes the health status of all pools or some subset.
+ */
+int
+zpool_do_status(int argc, char **argv)
+{
+	int c;
+	int ret;
+	unsigned long interval = 0, count = 0;
+	status_cbdata_t cb = { 0 };
+
+	/* check options */
+	while ((c = getopt(argc, argv, "vxDT:")) != -1) {
+		switch (c) {
+		case 'v':
+			cb.cb_verbose = B_TRUE;
+			break;
+		case 'x':
+			cb.cb_explain = B_TRUE;
+			break;
+		case 'D':
+			cb.cb_dedup_stats = B_TRUE;
+			break;
+		case 'T':
+			get_timestamp_arg(*optarg);
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	argc -= optind;
+	argv += optind;
+
+	get_interval_count(&argc, argv, &interval, &count);
+
+	if (argc == 0)
+		cb.cb_allpools = B_TRUE;
+
+	cb.cb_first = B_TRUE;
+
+	for (;;) {
+		if (timestamp_fmt != NODATE)
+			print_timestamp(timestamp_fmt);
+
+		ret = for_each_pool(argc, argv, B_TRUE, NULL,
+		    status_callback, &cb);
+
+		if (argc == 0 && cb.cb_count == 0)
+			(void) printf(gettext("no pools available\n"));
+		else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
+			(void) printf(gettext("all pools are healthy\n"));
+
+		if (ret != 0)
+			return (ret);
+
+		if (interval == 0)
+			break;
+
+		if (count != 0 && --count == 0)
+			break;
+
+		(void) sleep(interval);
+	}
+
+	return (0);
+}
+
+typedef struct upgrade_cbdata {
+	int	cb_first;
+	char	cb_poolname[ZPOOL_MAXNAMELEN];
+	int	cb_argc;
+	uint64_t cb_version;
+	char	**cb_argv;
+} upgrade_cbdata_t;
+
+#ifdef __FreeBSD__
+static int
+is_root_pool(zpool_handle_t *zhp)
+{
+	static struct statfs sfs;
+	static char *poolname = NULL;
+	static boolean_t stated = B_FALSE;
+	char *slash;
+
+	if (!stated) {
+		stated = B_TRUE;
+		if (statfs("/", &sfs) == -1) {
+			(void) fprintf(stderr,
+			    "Unable to stat root file system: %s.\n",
+			    strerror(errno));
+			return (0);
+		}
+		if (strcmp(sfs.f_fstypename, "zfs") != 0)
+			return (0);
+		poolname = sfs.f_mntfromname;
+		if ((slash = strchr(poolname, '/')) != NULL)
+			*slash = '\0';
+	}
+	return (poolname != NULL && strcmp(poolname, zpool_get_name(zhp)) == 0);
+}
+
+static void
+root_pool_upgrade_check(zpool_handle_t *zhp, char *poolname, int size) {
+
+	if (poolname[0] == '\0' && is_root_pool(zhp))
+		(void) strlcpy(poolname, zpool_get_name(zhp), size);
+}
+#endif	/* FreeBSD */
+
+static int
+upgrade_version(zpool_handle_t *zhp, uint64_t version)
+{
+	int ret;
+	nvlist_t *config;
+	uint64_t oldversion;
+
+	config = zpool_get_config(zhp, NULL);
+	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
+	    &oldversion) == 0);
+
+	assert(SPA_VERSION_IS_SUPPORTED(oldversion));
+	assert(oldversion < version);
+
+	ret = zpool_upgrade(zhp, version);
+	if (ret != 0)
+		return (ret);
+
+	if (version >= SPA_VERSION_FEATURES) {
+		(void) printf(gettext("Successfully upgraded "
+		    "'%s' from version %llu to feature flags.\n"),
+		    zpool_get_name(zhp), oldversion);
+	} else {
+		(void) printf(gettext("Successfully upgraded "
+		    "'%s' from version %llu to version %llu.\n"),
+		    zpool_get_name(zhp), oldversion, version);
+	}
+
+	return (0);
+}
+
+static int
+upgrade_enable_all(zpool_handle_t *zhp, int *countp)
+{
+	int i, ret, count;
+	boolean_t firstff = B_TRUE;
+	nvlist_t *enabled = zpool_get_features(zhp);
+
+	count = 0;
+	for (i = 0; i < SPA_FEATURES; i++) {
+		const char *fname = spa_feature_table[i].fi_uname;
+		const char *fguid = spa_feature_table[i].fi_guid;
+		if (!nvlist_exists(enabled, fguid)) {
+			char *propname;
+			verify(-1 != asprintf(&propname, "feature@%s", fname));
+			ret = zpool_set_prop(zhp, propname,
+			    ZFS_FEATURE_ENABLED);
+			if (ret != 0) {
+				free(propname);
+				return (ret);
+			}
+			count++;
+
+			if (firstff) {
+				(void) printf(gettext("Enabled the "
+				    "following features on '%s':\n"),
+				    zpool_get_name(zhp));
+				firstff = B_FALSE;
+			}
+			(void) printf(gettext("  %s\n"), fname);
+			free(propname);
+		}
+	}
+
+	if (countp != NULL)
+		*countp = count;
+	return (0);
+}
+
+static int
+upgrade_cb(zpool_handle_t *zhp, void *arg)
+{
+	upgrade_cbdata_t *cbp = arg;
+	nvlist_t *config;
+	uint64_t version;
+	boolean_t printnl = B_FALSE;
+	int ret;
+
+	config = zpool_get_config(zhp, NULL);
+	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
+	    &version) == 0);
+
+	assert(SPA_VERSION_IS_SUPPORTED(version));
+
+	if (version < cbp->cb_version) {
+		cbp->cb_first = B_FALSE;
+		ret = upgrade_version(zhp, cbp->cb_version);
+		if (ret != 0)
+			return (ret);
+#ifdef __FreeBSD__
+		root_pool_upgrade_check(zhp, cbp->cb_poolname,
+		    sizeof(cbp->cb_poolname));
+#endif	/* ___FreeBSD__ */
+		printnl = B_TRUE;
+
+#ifdef illumos
+		/*
+		 * If they did "zpool upgrade -a", then we could
+		 * be doing ioctls to different pools.  We need
+		 * to log this history once to each pool, and bypass
+		 * the normal history logging that happens in main().
+		 */
+		(void) zpool_log_history(g_zfs, history_str);
+		log_history = B_FALSE;
+#endif
+	}
+
+	if (cbp->cb_version >= SPA_VERSION_FEATURES) {
+		int count;
+		ret = upgrade_enable_all(zhp, &count);
+		if (ret != 0)
+			return (ret);
+
+		if (count > 0) {
+			cbp->cb_first = B_FALSE;
+			printnl = B_TRUE;
+		}
+	}
+
+	if (printnl) {
+		(void) printf(gettext("\n"));
+	}
+
+	return (0);
+}
+
+static int
+upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
+{
+	upgrade_cbdata_t *cbp = arg;
+	nvlist_t *config;
+	uint64_t version;
+
+	config = zpool_get_config(zhp, NULL);
+	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
+	    &version) == 0);
+
+	assert(SPA_VERSION_IS_SUPPORTED(version));
+
+	if (version < SPA_VERSION_FEATURES) {
+		if (cbp->cb_first) {
+			(void) printf(gettext("The following pools are "
+			    "formatted with legacy version numbers and can\n"
+			    "be upgraded to use feature flags.  After "
+			    "being upgraded, these pools\nwill no "
+			    "longer be accessible by software that does not "
+			    "support feature\nflags.\n\n"));
+			(void) printf(gettext("VER  POOL\n"));
+			(void) printf(gettext("---  ------------\n"));
+			cbp->cb_first = B_FALSE;
+		}
+
+		(void) printf("%2llu   %s\n", (u_longlong_t)version,
+		    zpool_get_name(zhp));
+	}
+
+	return (0);
+}
+
+static int
+upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
+{
+	upgrade_cbdata_t *cbp = arg;
+	nvlist_t *config;
+	uint64_t version;
+
+	config = zpool_get_config(zhp, NULL);
+	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
+	    &version) == 0);
+
+	if (version >= SPA_VERSION_FEATURES) {
+		int i;
+		boolean_t poolfirst = B_TRUE;
+		nvlist_t *enabled = zpool_get_features(zhp);
+
+		for (i = 0; i < SPA_FEATURES; i++) {
+			const char *fguid = spa_feature_table[i].fi_guid;
+			const char *fname = spa_feature_table[i].fi_uname;
+			if (!nvlist_exists(enabled, fguid)) {
+				if (cbp->cb_first) {
+					(void) printf(gettext("\nSome "
+					    "supported features are not "
+					    "enabled on the following pools. "
+					    "Once a\nfeature is enabled the "
+					    "pool may become incompatible with "
+					    "software\nthat does not support "
+					    "the feature. See "
+					    "zpool-features(7) for "
+					    "details.\n\n"));
+					(void) printf(gettext("POOL  "
+					    "FEATURE\n"));
+					(void) printf(gettext("------"
+					    "---------\n"));
+					cbp->cb_first = B_FALSE;
+				}
+
+				if (poolfirst) {
+					(void) printf(gettext("%s\n"),
+					    zpool_get_name(zhp));
+					poolfirst = B_FALSE;
+				}
+
+				(void) printf(gettext("      %s\n"), fname);
+			}
+		}
+	}
+
+	return (0);
+}
+
+/* ARGSUSED */
+static int
+upgrade_one(zpool_handle_t *zhp, void *data)
+{
+	boolean_t printnl = B_FALSE;
+	upgrade_cbdata_t *cbp = data;
+	uint64_t cur_version;
+	int ret;
+
+	if (strcmp("log", zpool_get_name(zhp)) == 0) {
+		(void) printf(gettext("'log' is now a reserved word\n"
+		    "Pool 'log' must be renamed using export and import"
+		    " to upgrade.\n"));
+		return (1);
+	}
+
+	cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
+	if (cur_version > cbp->cb_version) {
+		(void) printf(gettext("Pool '%s' is already formatted "
+		    "using more current version '%llu'.\n\n"),
+		    zpool_get_name(zhp), cur_version);
+		return (0);
+	}
+
+	if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
+		(void) printf(gettext("Pool '%s' is already formatted "
+		    "using version %llu.\n\n"), zpool_get_name(zhp),
+		    cbp->cb_version);
+		return (0);
+	}
+
+	if (cur_version != cbp->cb_version) {
+		printnl = B_TRUE;
+		ret = upgrade_version(zhp, cbp->cb_version);
+		if (ret != 0)
+			return (ret);
+#ifdef __FreeBSD__
+		root_pool_upgrade_check(zhp, cbp->cb_poolname,
+		    sizeof(cbp->cb_poolname));
+#endif	/* ___FreeBSD__ */
+	}
+
+	if (cbp->cb_version >= SPA_VERSION_FEATURES) {
+		int count = 0;
+		ret = upgrade_enable_all(zhp, &count);
+		if (ret != 0)
+			return (ret);
+
+		if (count != 0) {
+			printnl = B_TRUE;
+#ifdef __FreeBSD__
+			root_pool_upgrade_check(zhp, cbp->cb_poolname,
+			    sizeof(cbp->cb_poolname));
+#endif	/* __FreeBSD __*/
+		} else if (cur_version == SPA_VERSION) {
+			(void) printf(gettext("Pool '%s' already has all "
+			    "supported features enabled.\n"),
+			    zpool_get_name(zhp));
+		}
+	}
+
+	if (printnl) {
+		(void) printf(gettext("\n"));
+	}
+
+	return (0);
+}
+
+/*
+ * zpool upgrade
+ * zpool upgrade -v
+ * zpool upgrade [-V version] <-a | pool ...>
+ *
+ * With no arguments, display downrev'd ZFS pool available for upgrade.
+ * Individual pools can be upgraded by specifying the pool, and '-a' will
+ * upgrade all pools.
+ */
+int
+zpool_do_upgrade(int argc, char **argv)
+{
+	int c;
+	upgrade_cbdata_t cb = { 0 };
+	int ret = 0;
+	boolean_t showversions = B_FALSE;
+	boolean_t upgradeall = B_FALSE;
+	char *end;
+
+
+	/* check options */
+	while ((c = getopt(argc, argv, ":avV:")) != -1) {
+		switch (c) {
+		case 'a':
+			upgradeall = B_TRUE;
+			break;
+		case 'v':
+			showversions = B_TRUE;
+			break;
+		case 'V':
+			cb.cb_version = strtoll(optarg, &end, 10);
+			if (*end != '\0' ||
+			    !SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
+				(void) fprintf(stderr,
+				    gettext("invalid version '%s'\n"), optarg);
+				usage(B_FALSE);
+			}
+			break;
+		case ':':
+			(void) fprintf(stderr, gettext("missing argument for "
+			    "'%c' option\n"), optopt);
+			usage(B_FALSE);
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+
+	cb.cb_argc = argc;
+	cb.cb_argv = argv;
+	argc -= optind;
+	argv += optind;
+
+	if (cb.cb_version == 0) {
+		cb.cb_version = SPA_VERSION;
+	} else if (!upgradeall && argc == 0) {
+		(void) fprintf(stderr, gettext("-V option is "
+		    "incompatible with other arguments\n"));
+		usage(B_FALSE);
+	}
+
+	if (showversions) {
+		if (upgradeall || argc != 0) {
+			(void) fprintf(stderr, gettext("-v option is "
+			    "incompatible with other arguments\n"));
+			usage(B_FALSE);
+		}
+	} else if (upgradeall) {
+		if (argc != 0) {
+			(void) fprintf(stderr, gettext("-a option should not "
+			    "be used along with a pool name\n"));
+			usage(B_FALSE);
+		}
+	}
+
+	(void) printf(gettext("This system supports ZFS pool feature "
+	    "flags.\n\n"));
+	if (showversions) {
+		int i;
+
+		(void) printf(gettext("The following features are "
+		    "supported:\n\n"));
+		(void) printf(gettext("FEAT DESCRIPTION\n"));
+		(void) printf("----------------------------------------------"
+		    "---------------\n");
+		for (i = 0; i < SPA_FEATURES; i++) {
+			zfeature_info_t *fi = &spa_feature_table[i];
+			const char *ro = fi->fi_can_readonly ?
+			    " (read-only compatible)" : "";
+
+			(void) printf("%-37s%s\n", fi->fi_uname, ro);
+			(void) printf("     %s\n", fi->fi_desc);
+		}
+		(void) printf("\n");
+
+		(void) printf(gettext("The following legacy versions are also "
+		    "supported:\n\n"));
+		(void) printf(gettext("VER  DESCRIPTION\n"));
+		(void) printf("---  -----------------------------------------"
+		    "---------------\n");
+		(void) printf(gettext(" 1   Initial ZFS version\n"));
+		(void) printf(gettext(" 2   Ditto blocks "
+		    "(replicated metadata)\n"));
+		(void) printf(gettext(" 3   Hot spares and double parity "
+		    "RAID-Z\n"));
+		(void) printf(gettext(" 4   zpool history\n"));
+		(void) printf(gettext(" 5   Compression using the gzip "
+		    "algorithm\n"));
+		(void) printf(gettext(" 6   bootfs pool property\n"));
+		(void) printf(gettext(" 7   Separate intent log devices\n"));
+		(void) printf(gettext(" 8   Delegated administration\n"));
+		(void) printf(gettext(" 9   refquota and refreservation "
+		    "properties\n"));
+		(void) printf(gettext(" 10  Cache devices\n"));
+		(void) printf(gettext(" 11  Improved scrub performance\n"));
+		(void) printf(gettext(" 12  Snapshot properties\n"));
+		(void) printf(gettext(" 13  snapused property\n"));
+		(void) printf(gettext(" 14  passthrough-x aclinherit\n"));
+		(void) printf(gettext(" 15  user/group space accounting\n"));
+		(void) printf(gettext(" 16  stmf property support\n"));
+		(void) printf(gettext(" 17  Triple-parity RAID-Z\n"));
+		(void) printf(gettext(" 18  Snapshot user holds\n"));
+		(void) printf(gettext(" 19  Log device removal\n"));
+		(void) printf(gettext(" 20  Compression using zle "
+		    "(zero-length encoding)\n"));
+		(void) printf(gettext(" 21  Deduplication\n"));
+		(void) printf(gettext(" 22  Received properties\n"));
+		(void) printf(gettext(" 23  Slim ZIL\n"));
+		(void) printf(gettext(" 24  System attributes\n"));
+		(void) printf(gettext(" 25  Improved scrub stats\n"));
+		(void) printf(gettext(" 26  Improved snapshot deletion "
+		    "performance\n"));
+		(void) printf(gettext(" 27  Improved snapshot creation "
+		    "performance\n"));
+		(void) printf(gettext(" 28  Multiple vdev replacements\n"));
+		(void) printf(gettext("\nFor more information on a particular "
+		    "version, including supported releases,\n"));
+		(void) printf(gettext("see the ZFS Administration Guide.\n\n"));
+	} else if (argc == 0 && upgradeall) {
+		cb.cb_first = B_TRUE;
+		ret = zpool_iter(g_zfs, upgrade_cb, &cb);
+		if (ret == 0 && cb.cb_first) {
+			if (cb.cb_version == SPA_VERSION) {
+				(void) printf(gettext("All pools are already "
+				    "formatted using feature flags.\n\n"));
+				(void) printf(gettext("Every feature flags "
+				    "pool already has all supported features "
+				    "enabled.\n"));
+			} else {
+				(void) printf(gettext("All pools are already "
+				    "formatted with version %llu or higher.\n"),
+				    cb.cb_version);
+			}
+		}
+	} else if (argc == 0) {
+		cb.cb_first = B_TRUE;
+		ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
+		assert(ret == 0);
+
+		if (cb.cb_first) {
+			(void) printf(gettext("All pools are formatted "
+			    "using feature flags.\n\n"));
+		} else {
+			(void) printf(gettext("\nUse 'zpool upgrade -v' "
+			    "for a list of available legacy versions.\n"));
+		}
+
+		cb.cb_first = B_TRUE;
+		ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
+		assert(ret == 0);
+
+		if (cb.cb_first) {
+			(void) printf(gettext("Every feature flags pool has "
+			    "all supported features enabled.\n"));
+		} else {
+			(void) printf(gettext("\n"));
+		}
+	} else {
+		ret = for_each_pool(argc, argv, B_FALSE, NULL,
+		    upgrade_one, &cb);
+	}
+
+	if (cb.cb_poolname[0] != '\0') {
+		(void) printf(
+		    "If you boot from pool '%s', don't forget to update boot code.\n"
+		    "Assuming you use GPT partitioning and da0 is your boot disk\n"
+		    "the following command will do it:\n"
+		    "\n"
+		    "\tgpart bootcode -b /boot/pmbr -p /boot/gptzfsboot -i 1 da0\n\n",
+		    cb.cb_poolname);
+	}
+
+	return (ret);
+}
+
+typedef struct hist_cbdata {
+	boolean_t first;
+	int longfmt;
+	int internal;
+} hist_cbdata_t;
+
+/*
+ * Print out the command history for a specific pool.
+ */
+static int
+get_history_one(zpool_handle_t *zhp, void *data)
+{
+	nvlist_t *nvhis;
+	nvlist_t **records;
+	uint_t numrecords;
+	char *cmdstr;
+	char *pathstr;
+	uint64_t dst_time;
+	time_t tsec;
+	struct tm t;
+	char tbuf[30];
+	int ret, i;
+	uint64_t who;
+	struct passwd *pwd;
+	char *hostname;
+	char *zonename;
+	char internalstr[MAXPATHLEN];
+	hist_cbdata_t *cb = (hist_cbdata_t *)data;
+	uint64_t txg;
+	uint64_t ievent;
+
+	cb->first = B_FALSE;
+
+	(void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
+
+	if ((ret = zpool_get_history(zhp, &nvhis)) != 0)
+		return (ret);
+
+	verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
+	    &records, &numrecords) == 0);
+	for (i = 0; i < numrecords; i++) {
+		if (nvlist_lookup_uint64(records[i], ZPOOL_HIST_TIME,
+		    &dst_time) != 0)
+			continue;
+
+		/* is it an internal event or a standard event? */
+		if (nvlist_lookup_string(records[i], ZPOOL_HIST_CMD,
+		    &cmdstr) != 0) {
+			if (cb->internal == 0)
+				continue;
+
+			if (nvlist_lookup_uint64(records[i],
+			    ZPOOL_HIST_INT_EVENT, &ievent) != 0)
+				continue;
+			verify(nvlist_lookup_uint64(records[i],
+			    ZPOOL_HIST_TXG, &txg) == 0);
+			verify(nvlist_lookup_string(records[i],
+			    ZPOOL_HIST_INT_STR, &pathstr) == 0);
+			if (ievent >= LOG_END)
+				continue;
+			(void) snprintf(internalstr,
+			    sizeof (internalstr),
+			    "[internal %s txg:%lld] %s",
+			    zfs_history_event_names[ievent], txg,
+			    pathstr);
+			cmdstr = internalstr;
+		}
+		tsec = dst_time;
+		(void) localtime_r(&tsec, &t);
+		(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
+		(void) printf("%s %s", tbuf, cmdstr);
+
+		if (!cb->longfmt) {
+			(void) printf("\n");
+			continue;
+		}
+		(void) printf(" [");
+		if (nvlist_lookup_uint64(records[i],
+		    ZPOOL_HIST_WHO, &who) == 0) {
+			pwd = getpwuid((uid_t)who);
+			if (pwd)
+				(void) printf("user %s on",
+				    pwd->pw_name);
+			else
+				(void) printf("user %d on",
+				    (int)who);
+		} else {
+			(void) printf(gettext("no info]\n"));
+			continue;
+		}
+		if (nvlist_lookup_string(records[i],
+		    ZPOOL_HIST_HOST, &hostname) == 0) {
+			(void) printf(" %s", hostname);
+		}
+		if (nvlist_lookup_string(records[i],
+		    ZPOOL_HIST_ZONE, &zonename) == 0) {
+			(void) printf(":%s", zonename);
+		}
+
+		(void) printf("]");
+		(void) printf("\n");
+	}
+	(void) printf("\n");
+	nvlist_free(nvhis);
+
+	return (ret);
+}
+
+/*
+ * zpool history <pool>
+ *
+ * Displays the history of commands that modified pools.
+ */
+
+
+int
+zpool_do_history(int argc, char **argv)
+{
+	hist_cbdata_t cbdata = { 0 };
+	int ret;
+	int c;
+
+	cbdata.first = B_TRUE;
+	/* check options */
+	while ((c = getopt(argc, argv, "li")) != -1) {
+		switch (c) {
+		case 'l':
+			cbdata.longfmt = 1;
+			break;
+		case 'i':
+			cbdata.internal = 1;
+			break;
+		case '?':
+			(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+			    optopt);
+			usage(B_FALSE);
+		}
+	}
+	argc -= optind;
+	argv += optind;
+
+	ret = for_each_pool(argc, argv, B_FALSE,  NULL, get_history_one,
+	    &cbdata);
+
+	if (argc == 0 && cbdata.first == B_TRUE) {
+		(void) printf(gettext("no pools available\n"));
+		return (0);
+	}
+
+	return (ret);
+}
+
+static int
+get_callback(zpool_handle_t *zhp, void *data)
+{
+	zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
+	char value[MAXNAMELEN];
+	zprop_source_t srctype;
+	zprop_list_t *pl;
+
+	for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
+
+		/*
+		 * Skip the special fake placeholder. This will also skip
+		 * over the name property when 'all' is specified.
+		 */
+		if (pl->pl_prop == ZPOOL_PROP_NAME &&
+		    pl == cbp->cb_proplist)
+			continue;
+
+		if (pl->pl_prop == ZPROP_INVAL &&
+		    (zpool_prop_feature(pl->pl_user_prop) ||
+		    zpool_prop_unsupported(pl->pl_user_prop))) {
+			srctype = ZPROP_SRC_LOCAL;
+
+			if (zpool_prop_get_feature(zhp, pl->pl_user_prop,
+			    value, sizeof (value)) == 0) {
+				zprop_print_one_property(zpool_get_name(zhp),
+				    cbp, pl->pl_user_prop, value, srctype,
+				    NULL, NULL);
+			}
+		} else {
+			if (zpool_get_prop(zhp, pl->pl_prop, value,
+			    sizeof (value), &srctype) != 0)
+				continue;
+
+			zprop_print_one_property(zpool_get_name(zhp), cbp,
+			    zpool_prop_to_name(pl->pl_prop), value, srctype,
+			    NULL, NULL);
+		}
+	}
+	return (0);
+}
+
+int
+zpool_do_get(int argc, char **argv)
+{
+	zprop_get_cbdata_t cb = { 0 };
+	zprop_list_t fake_name = { 0 };
+	int ret;
+
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("missing property "
+		    "argument\n"));
+		usage(B_FALSE);
+	}
+
+	cb.cb_first = B_TRUE;
+	cb.cb_sources = ZPROP_SRC_ALL;
+	cb.cb_columns[0] = GET_COL_NAME;
+	cb.cb_columns[1] = GET_COL_PROPERTY;
+	cb.cb_columns[2] = GET_COL_VALUE;
+	cb.cb_columns[3] = GET_COL_SOURCE;
+	cb.cb_type = ZFS_TYPE_POOL;
+
+	if (zprop_get_list(g_zfs, argv[1], &cb.cb_proplist,
+	    ZFS_TYPE_POOL) != 0)
+		usage(B_FALSE);
+
+	if (cb.cb_proplist != NULL) {
+		fake_name.pl_prop = ZPOOL_PROP_NAME;
+		fake_name.pl_width = strlen(gettext("NAME"));
+		fake_name.pl_next = cb.cb_proplist;
+		cb.cb_proplist = &fake_name;
+	}
+
+	ret = for_each_pool(argc - 2, argv + 2, B_TRUE, &cb.cb_proplist,
+	    get_callback, &cb);
+
+	if (cb.cb_proplist == &fake_name)
+		zprop_free_list(fake_name.pl_next);
+	else
+		zprop_free_list(cb.cb_proplist);
+
+	return (ret);
+}
+
+typedef struct set_cbdata {
+	char *cb_propname;
+	char *cb_value;
+	boolean_t cb_any_successful;
+} set_cbdata_t;
+
+int
+set_callback(zpool_handle_t *zhp, void *data)
+{
+	int error;
+	set_cbdata_t *cb = (set_cbdata_t *)data;
+
+	error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
+
+	if (!error)
+		cb->cb_any_successful = B_TRUE;
+
+	return (error);
+}
+
+int
+zpool_do_set(int argc, char **argv)
+{
+	set_cbdata_t cb = { 0 };
+	int error;
+
+	if (argc > 1 && argv[1][0] == '-') {
+		(void) fprintf(stderr, gettext("invalid option '%c'\n"),
+		    argv[1][1]);
+		usage(B_FALSE);
+	}
+
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("missing property=value "
+		    "argument\n"));
+		usage(B_FALSE);
+	}
+
+	if (argc < 3) {
+		(void) fprintf(stderr, gettext("missing pool name\n"));
+		usage(B_FALSE);
+	}
+
+	if (argc > 3) {
+		(void) fprintf(stderr, gettext("too many pool names\n"));
+		usage(B_FALSE);
+	}
+
+	cb.cb_propname = argv[1];
+	cb.cb_value = strchr(cb.cb_propname, '=');
+	if (cb.cb_value == NULL) {
+		(void) fprintf(stderr, gettext("missing value in "
+		    "property=value argument\n"));
+		usage(B_FALSE);
+	}
+
+	*(cb.cb_value) = '\0';
+	cb.cb_value++;
+
+	error = for_each_pool(argc - 2, argv + 2, B_TRUE, NULL,
+	    set_callback, &cb);
+
+	return (error);
+}
+
+static int
+find_command_idx(char *command, int *idx)
+{
+	int i;
+
+	for (i = 0; i < NCOMMAND; i++) {
+		if (command_table[i].name == NULL)
+			continue;
+
+		if (strcmp(command, command_table[i].name) == 0) {
+			*idx = i;
+			return (0);
+		}
+	}
+	return (1);
+}
+
+int
+main(int argc, char **argv)
+{
+	int ret;
+	int i;
+	char *cmdname;
+
+	(void) setlocale(LC_ALL, "");
+	(void) textdomain(TEXT_DOMAIN);
+
+	if ((g_zfs = libzfs_init()) == NULL) {
+		(void) fprintf(stderr, gettext("internal error: failed to "
+		    "initialize ZFS library\n"));
+		return (1);
+	}
+
+	libzfs_print_on_error(g_zfs, B_TRUE);
+
+	opterr = 0;
+
+	/*
+	 * Make sure the user has specified some command.
+	 */
+	if (argc < 2) {
+		(void) fprintf(stderr, gettext("missing command\n"));
+		usage(B_FALSE);
+	}
+
+	cmdname = argv[1];
+
+	/*
+	 * Special case '-?'
+	 */
+	if (strcmp(cmdname, "-?") == 0)
+		usage(B_TRUE);
+
+	zpool_set_history_str("zpool", argc, argv, history_str);
+	verify(zpool_stage_history(g_zfs, history_str) == 0);
+
+	/*
+	 * Run the appropriate command.
+	 */
+	if (find_command_idx(cmdname, &i) == 0) {
+		current_command = &command_table[i];
+		ret = command_table[i].func(argc - 1, argv + 1);
+	} else if (strchr(cmdname, '=')) {
+		verify(find_command_idx("set", &i) == 0);
+		current_command = &command_table[i];
+		ret = command_table[i].func(argc, argv);
+	} else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
+		/*
+		 * 'freeze' is a vile debugging abomination, so we treat
+		 * it as such.
+		 */
+		char buf[16384];
+		int fd = open(ZFS_DEV, O_RDWR);
+		(void) strcpy((void *)buf, argv[2]);
+		return (!!ioctl(fd, ZFS_IOC_POOL_FREEZE, buf));
+	} else {
+		(void) fprintf(stderr, gettext("unrecognized "
+		    "command '%s'\n"), cmdname);
+		usage(B_FALSE);
+	}
+
+	libzfs_fini(g_zfs);
+
+	/*
+	 * The 'ZFS_ABORT' environment variable causes us to dump core on exit
+	 * for the purposes of running ::findleaks.
+	 */
+	if (getenv("ZFS_ABORT") != NULL) {
+		(void) printf("dumping core by request\n");
+		abort();
+	}
+
+	return (ret);
+}
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_util.c b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_util.c
new file mode 100644
index 0000000000000000000000000000000000000000..c7a002efb17cff9fe4039a6df68cf2fa01d58297
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_util.c
@@ -0,0 +1,86 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <errno.h>
+#include <libgen.h>
+#include <libintl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+
+#include "zpool_util.h"
+
+/*
+ * Utility function to guarantee malloc() success.
+ */
+void *
+safe_malloc(size_t size)
+{
+	void *data;
+
+	if ((data = calloc(1, size)) == NULL) {
+		(void) fprintf(stderr, "internal error: out of memory\n");
+		exit(1);
+	}
+
+	return (data);
+}
+
+/*
+ * Display an out of memory error message and abort the current program.
+ */
+void
+zpool_no_memory(void)
+{
+	assert(errno == ENOMEM);
+	(void) fprintf(stderr,
+	    gettext("internal error: out of memory\n"));
+	exit(1);
+}
+
+/*
+ * Return the number of logs in supplied nvlist
+ */
+uint_t
+num_logs(nvlist_t *nv)
+{
+	uint_t nlogs = 0;
+	uint_t c, children;
+	nvlist_t **child;
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) != 0)
+		return (0);
+
+	for (c = 0; c < children; c++) {
+		uint64_t is_log = B_FALSE;
+
+		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
+		    &is_log);
+		if (is_log)
+			nlogs++;
+	}
+	return (nlogs);
+}
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_util.h b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_util.h
new file mode 100644
index 0000000000000000000000000000000000000000..134c730fcf8e3058e2184e8dafb1110c638e3661
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_util.h
@@ -0,0 +1,72 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#ifndef	ZPOOL_UTIL_H
+#define	ZPOOL_UTIL_H
+
+#include <libnvpair.h>
+#include <libzfs.h>
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+/*
+ * Basic utility functions
+ */
+void *safe_malloc(size_t);
+void zpool_no_memory(void);
+uint_t num_logs(nvlist_t *nv);
+
+/*
+ * Virtual device functions
+ */
+
+nvlist_t *make_root_vdev(zpool_handle_t *zhp, int force, int check_rep,
+    boolean_t replacing, boolean_t dryrun, int argc, char **argv);
+nvlist_t *split_mirror_vdev(zpool_handle_t *zhp, char *newname,
+    nvlist_t *props, splitflags_t flags, int argc, char **argv);
+
+/*
+ * Pool list functions
+ */
+int for_each_pool(int, char **, boolean_t unavail, zprop_list_t **,
+    zpool_iter_f, void *);
+
+typedef struct zpool_list zpool_list_t;
+
+zpool_list_t *pool_list_get(int, char **, zprop_list_t **, int *);
+void pool_list_update(zpool_list_t *);
+int pool_list_iter(zpool_list_t *, int unavail, zpool_iter_f, void *);
+void pool_list_free(zpool_list_t *);
+int pool_list_count(zpool_list_t *);
+void pool_list_remove(zpool_list_t *, zpool_handle_t *);
+
+libzfs_handle_t *g_zfs;
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* ZPOOL_UTIL_H */
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_vdev.c b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_vdev.c
new file mode 100644
index 0000000000000000000000000000000000000000..5ffd39ac8fe6091a8b81c58a36dca111a9421c32
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zpool/zpool_vdev.c
@@ -0,0 +1,1514 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Functions to convert between a list of vdevs and an nvlist representing the
+ * configuration.  Each entry in the list can be one of:
+ *
+ * 	Device vdevs
+ * 		disk=(path=..., devid=...)
+ * 		file=(path=...)
+ *
+ * 	Group vdevs
+ * 		raidz[1|2]=(...)
+ * 		mirror=(...)
+ *
+ * 	Hot spares
+ *
+ * While the underlying implementation supports it, group vdevs cannot contain
+ * other group vdevs.  All userland verification of devices is contained within
+ * this file.  If successful, the nvlist returned can be passed directly to the
+ * kernel; we've done as much verification as possible in userland.
+ *
+ * Hot spares are a special case, and passed down as an array of disk vdevs, at
+ * the same level as the root of the vdev tree.
+ *
+ * The only function exported by this file is 'make_root_vdev'.  The
+ * function performs several passes:
+ *
+ * 	1. Construct the vdev specification.  Performs syntax validation and
+ *         makes sure each device is valid.
+ * 	2. Check for devices in use.  Using libdiskmgt, makes sure that no
+ *         devices are also in use.  Some can be overridden using the 'force'
+ *         flag, others cannot.
+ * 	3. Check for replication errors if the 'force' flag is not specified.
+ *         validates that the replication level is consistent across the
+ *         entire pool.
+ * 	4. Call libzfs to label any whole disks with an EFI label.
+ */
+
+#include <assert.h>
+#include <devid.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <libintl.h>
+#include <libnvpair.h>
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <paths.h>
+#include <sys/stat.h>
+#include <sys/disk.h>
+#include <sys/mntent.h>
+#include <libgeom.h>
+
+#include "zpool_util.h"
+
+#define	DISK_ROOT	"/dev/dsk"
+#define	RDISK_ROOT	"/dev/rdsk"
+#define	BACKUP_SLICE	"s2"
+
+/*
+ * For any given vdev specification, we can have multiple errors.  The
+ * vdev_error() function keeps track of whether we have seen an error yet, and
+ * prints out a header if its the first error we've seen.
+ */
+boolean_t error_seen;
+boolean_t is_force;
+
+/*PRINTFLIKE1*/
+static void
+vdev_error(const char *fmt, ...)
+{
+	va_list ap;
+
+	if (!error_seen) {
+		(void) fprintf(stderr, gettext("invalid vdev specification\n"));
+		if (!is_force)
+			(void) fprintf(stderr, gettext("use '-f' to override "
+			    "the following errors:\n"));
+		else
+			(void) fprintf(stderr, gettext("the following errors "
+			    "must be manually repaired:\n"));
+		error_seen = B_TRUE;
+	}
+
+	va_start(ap, fmt);
+	(void) vfprintf(stderr, fmt, ap);
+	va_end(ap);
+}
+
+#ifdef sun
+static void
+libdiskmgt_error(int error)
+{
+	/*
+	 * ENXIO/ENODEV is a valid error message if the device doesn't live in
+	 * /dev/dsk.  Don't bother printing an error message in this case.
+	 */
+	if (error == ENXIO || error == ENODEV)
+		return;
+
+	(void) fprintf(stderr, gettext("warning: device in use checking "
+	    "failed: %s\n"), strerror(error));
+}
+
+/*
+ * Validate a device, passing the bulk of the work off to libdiskmgt.
+ */
+static int
+check_slice(const char *path, int force, boolean_t wholedisk, boolean_t isspare)
+{
+	char *msg;
+	int error = 0;
+	dm_who_type_t who;
+
+	if (force)
+		who = DM_WHO_ZPOOL_FORCE;
+	else if (isspare)
+		who = DM_WHO_ZPOOL_SPARE;
+	else
+		who = DM_WHO_ZPOOL;
+
+	if (dm_inuse((char *)path, &msg, who, &error) || error) {
+		if (error != 0) {
+			libdiskmgt_error(error);
+			return (0);
+		} else {
+			vdev_error("%s", msg);
+			free(msg);
+			return (-1);
+		}
+	}
+
+	/*
+	 * If we're given a whole disk, ignore overlapping slices since we're
+	 * about to label it anyway.
+	 */
+	error = 0;
+	if (!wholedisk && !force &&
+	    (dm_isoverlapping((char *)path, &msg, &error) || error)) {
+		if (error == 0) {
+			/* dm_isoverlapping returned -1 */
+			vdev_error(gettext("%s overlaps with %s\n"), path, msg);
+			free(msg);
+			return (-1);
+		} else if (error != ENODEV) {
+			/* libdiskmgt's devcache only handles physical drives */
+			libdiskmgt_error(error);
+			return (0);
+		}
+	}
+
+	return (0);
+}
+
+
+/*
+ * Validate a whole disk.  Iterate over all slices on the disk and make sure
+ * that none is in use by calling check_slice().
+ */
+static int
+check_disk(const char *name, dm_descriptor_t disk, int force, int isspare)
+{
+	dm_descriptor_t *drive, *media, *slice;
+	int err = 0;
+	int i;
+	int ret;
+
+	/*
+	 * Get the drive associated with this disk.  This should never fail,
+	 * because we already have an alias handle open for the device.
+	 */
+	if ((drive = dm_get_associated_descriptors(disk, DM_DRIVE,
+	    &err)) == NULL || *drive == NULL) {
+		if (err)
+			libdiskmgt_error(err);
+		return (0);
+	}
+
+	if ((media = dm_get_associated_descriptors(*drive, DM_MEDIA,
+	    &err)) == NULL) {
+		dm_free_descriptors(drive);
+		if (err)
+			libdiskmgt_error(err);
+		return (0);
+	}
+
+	dm_free_descriptors(drive);
+
+	/*
+	 * It is possible that the user has specified a removable media drive,
+	 * and the media is not present.
+	 */
+	if (*media == NULL) {
+		dm_free_descriptors(media);
+		vdev_error(gettext("'%s' has no media in drive\n"), name);
+		return (-1);
+	}
+
+	if ((slice = dm_get_associated_descriptors(*media, DM_SLICE,
+	    &err)) == NULL) {
+		dm_free_descriptors(media);
+		if (err)
+			libdiskmgt_error(err);
+		return (0);
+	}
+
+	dm_free_descriptors(media);
+
+	ret = 0;
+
+	/*
+	 * Iterate over all slices and report any errors.  We don't care about
+	 * overlapping slices because we are using the whole disk.
+	 */
+	for (i = 0; slice[i] != NULL; i++) {
+		char *name = dm_get_name(slice[i], &err);
+
+		if (check_slice(name, force, B_TRUE, isspare) != 0)
+			ret = -1;
+
+		dm_free_name(name);
+	}
+
+	dm_free_descriptors(slice);
+	return (ret);
+}
+
+/*
+ * Validate a device.
+ */
+static int
+check_device(const char *path, boolean_t force, boolean_t isspare)
+{
+	dm_descriptor_t desc;
+	int err;
+	char *dev;
+
+	/*
+	 * For whole disks, libdiskmgt does not include the leading dev path.
+	 */
+	dev = strrchr(path, '/');
+	assert(dev != NULL);
+	dev++;
+	if ((desc = dm_get_descriptor_by_name(DM_ALIAS, dev, &err)) != NULL) {
+		err = check_disk(path, desc, force, isspare);
+		dm_free_descriptor(desc);
+		return (err);
+	}
+
+	return (check_slice(path, force, B_FALSE, isspare));
+}
+#endif	/* sun */
+
+/*
+ * Check that a file is valid.  All we can do in this case is check that it's
+ * not in use by another pool, and not in use by swap.
+ */
+static int
+check_file(const char *file, boolean_t force, boolean_t isspare)
+{
+	char  *name;
+	int fd;
+	int ret = 0;
+	int err;
+	pool_state_t state;
+	boolean_t inuse;
+
+#ifdef sun
+	if (dm_inuse_swap(file, &err)) {
+		if (err)
+			libdiskmgt_error(err);
+		else
+			vdev_error(gettext("%s is currently used by swap. "
+			    "Please see swap(1M).\n"), file);
+		return (-1);
+	}
+#endif
+
+	if ((fd = open(file, O_RDONLY)) < 0)
+		return (0);
+
+	if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) == 0 && inuse) {
+		const char *desc;
+
+		switch (state) {
+		case POOL_STATE_ACTIVE:
+			desc = gettext("active");
+			break;
+
+		case POOL_STATE_EXPORTED:
+			desc = gettext("exported");
+			break;
+
+		case POOL_STATE_POTENTIALLY_ACTIVE:
+			desc = gettext("potentially active");
+			break;
+
+		default:
+			desc = gettext("unknown");
+			break;
+		}
+
+		/*
+		 * Allow hot spares to be shared between pools.
+		 */
+		if (state == POOL_STATE_SPARE && isspare)
+			return (0);
+
+		if (state == POOL_STATE_ACTIVE ||
+		    state == POOL_STATE_SPARE || !force) {
+			switch (state) {
+			case POOL_STATE_SPARE:
+				vdev_error(gettext("%s is reserved as a hot "
+				    "spare for pool %s\n"), file, name);
+				break;
+			default:
+				vdev_error(gettext("%s is part of %s pool "
+				    "'%s'\n"), file, desc, name);
+				break;
+			}
+			ret = -1;
+		}
+
+		free(name);
+	}
+
+	(void) close(fd);
+	return (ret);
+}
+
+static int
+check_device(const char *name, boolean_t force, boolean_t isspare)
+{
+	char path[MAXPATHLEN];
+
+	if (strncmp(name, _PATH_DEV, sizeof(_PATH_DEV) - 1) != 0)
+		snprintf(path, sizeof(path), "%s%s", _PATH_DEV, name);
+	else
+		strlcpy(path, name, sizeof(path));
+
+	return (check_file(path, force, isspare));
+}
+
+/*
+ * By "whole disk" we mean an entire physical disk (something we can
+ * label, toggle the write cache on, etc.) as opposed to the full
+ * capacity of a pseudo-device such as lofi or did.  We act as if we
+ * are labeling the disk, which should be a pretty good test of whether
+ * it's a viable device or not.  Returns B_TRUE if it is and B_FALSE if
+ * it isn't.
+ */
+static boolean_t
+is_whole_disk(const char *arg)
+{
+#ifdef sun
+	struct dk_gpt *label;
+	int	fd;
+	char	path[MAXPATHLEN];
+
+	(void) snprintf(path, sizeof (path), "%s%s%s",
+	    RDISK_ROOT, strrchr(arg, '/'), BACKUP_SLICE);
+	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0)
+		return (B_FALSE);
+	if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
+		(void) close(fd);
+		return (B_FALSE);
+	}
+	efi_free(label);
+	(void) close(fd);
+	return (B_TRUE);
+#else
+	int fd;
+
+	fd = g_open(arg, 0);
+	if (fd >= 0) {
+		g_close(fd);
+		return (B_TRUE);
+	}
+	return (B_FALSE);
+#endif
+}
+
+/*
+ * Create a leaf vdev.  Determine if this is a file or a device.  If it's a
+ * device, fill in the device id to make a complete nvlist.  Valid forms for a
+ * leaf vdev are:
+ *
+ * 	/dev/dsk/xxx	Complete disk path
+ * 	/xxx		Full path to file
+ * 	xxx		Shorthand for /dev/dsk/xxx
+ */
+static nvlist_t *
+make_leaf_vdev(const char *arg, uint64_t is_log)
+{
+	char path[MAXPATHLEN];
+	struct stat64 statbuf;
+	nvlist_t *vdev = NULL;
+	char *type = NULL;
+	boolean_t wholedisk = B_FALSE;
+
+	/*
+	 * Determine what type of vdev this is, and put the full path into
+	 * 'path'.  We detect whether this is a device of file afterwards by
+	 * checking the st_mode of the file.
+	 */
+	if (arg[0] == '/') {
+		/*
+		 * Complete device or file path.  Exact type is determined by
+		 * examining the file descriptor afterwards.
+		 */
+		wholedisk = is_whole_disk(arg);
+		if (!wholedisk && (stat64(arg, &statbuf) != 0)) {
+			(void) fprintf(stderr,
+			    gettext("cannot open '%s': %s\n"),
+			    arg, strerror(errno));
+			return (NULL);
+		}
+
+		(void) strlcpy(path, arg, sizeof (path));
+	} else {
+		/*
+		 * This may be a short path for a device, or it could be total
+		 * gibberish.  Check to see if it's a known device in
+		 * /dev/dsk/.  As part of this check, see if we've been given a
+		 * an entire disk (minus the slice number).
+		 */
+		if (strncmp(arg, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
+			strlcpy(path, arg, sizeof (path));
+		else
+			snprintf(path, sizeof (path), "%s%s", _PATH_DEV, arg);
+		wholedisk = is_whole_disk(path);
+		if (!wholedisk && (stat64(path, &statbuf) != 0)) {
+			/*
+			 * If we got ENOENT, then the user gave us
+			 * gibberish, so try to direct them with a
+			 * reasonable error message.  Otherwise,
+			 * regurgitate strerror() since it's the best we
+			 * can do.
+			 */
+			if (errno == ENOENT) {
+				(void) fprintf(stderr,
+				    gettext("cannot open '%s': no such "
+				    "GEOM provider\n"), arg);
+				(void) fprintf(stderr,
+				    gettext("must be a full path or "
+				    "shorthand device name\n"));
+				return (NULL);
+			} else {
+				(void) fprintf(stderr,
+				    gettext("cannot open '%s': %s\n"),
+				    path, strerror(errno));
+				return (NULL);
+			}
+		}
+	}
+
+#ifdef __FreeBSD__
+	if (S_ISCHR(statbuf.st_mode)) {
+		statbuf.st_mode &= ~S_IFCHR;
+		statbuf.st_mode |= S_IFBLK;
+		wholedisk = B_FALSE;
+	}
+#endif
+
+	/*
+	 * Determine whether this is a device or a file.
+	 */
+	if (wholedisk || S_ISBLK(statbuf.st_mode)) {
+		type = VDEV_TYPE_DISK;
+	} else if (S_ISREG(statbuf.st_mode)) {
+		type = VDEV_TYPE_FILE;
+	} else {
+		(void) fprintf(stderr, gettext("cannot use '%s': must be a "
+		    "GEOM provider or regular file\n"), path);
+		return (NULL);
+	}
+
+	/*
+	 * Finally, we have the complete device or file, and we know that it is
+	 * acceptable to use.  Construct the nvlist to describe this vdev.  All
+	 * vdevs have a 'path' element, and devices also have a 'devid' element.
+	 */
+	verify(nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) == 0);
+	verify(nvlist_add_string(vdev, ZPOOL_CONFIG_PATH, path) == 0);
+	verify(nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, type) == 0);
+	verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_LOG, is_log) == 0);
+	if (strcmp(type, VDEV_TYPE_DISK) == 0)
+		verify(nvlist_add_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK,
+		    (uint64_t)wholedisk) == 0);
+
+	/*
+	 * For a whole disk, defer getting its devid until after labeling it.
+	 */
+	if (S_ISBLK(statbuf.st_mode) && !wholedisk) {
+		/*
+		 * Get the devid for the device.
+		 */
+		int fd;
+		ddi_devid_t devid;
+		char *minor = NULL, *devid_str = NULL;
+
+		if ((fd = open(path, O_RDONLY)) < 0) {
+			(void) fprintf(stderr, gettext("cannot open '%s': "
+			    "%s\n"), path, strerror(errno));
+			nvlist_free(vdev);
+			return (NULL);
+		}
+
+		if (devid_get(fd, &devid) == 0) {
+			if (devid_get_minor_name(fd, &minor) == 0 &&
+			    (devid_str = devid_str_encode(devid, minor)) !=
+			    NULL) {
+				verify(nvlist_add_string(vdev,
+				    ZPOOL_CONFIG_DEVID, devid_str) == 0);
+			}
+			if (devid_str != NULL)
+				devid_str_free(devid_str);
+			if (minor != NULL)
+				devid_str_free(minor);
+			devid_free(devid);
+		}
+
+		(void) close(fd);
+	}
+
+	return (vdev);
+}
+
+/*
+ * Go through and verify the replication level of the pool is consistent.
+ * Performs the following checks:
+ *
+ * 	For the new spec, verifies that devices in mirrors and raidz are the
+ * 	same size.
+ *
+ * 	If the current configuration already has inconsistent replication
+ * 	levels, ignore any other potential problems in the new spec.
+ *
+ * 	Otherwise, make sure that the current spec (if there is one) and the new
+ * 	spec have consistent replication levels.
+ */
+typedef struct replication_level {
+	char *zprl_type;
+	uint64_t zprl_children;
+	uint64_t zprl_parity;
+} replication_level_t;
+
+#define	ZPOOL_FUZZ	(16 * 1024 * 1024)
+
+/*
+ * Given a list of toplevel vdevs, return the current replication level.  If
+ * the config is inconsistent, then NULL is returned.  If 'fatal' is set, then
+ * an error message will be displayed for each self-inconsistent vdev.
+ */
+static replication_level_t *
+get_replication(nvlist_t *nvroot, boolean_t fatal)
+{
+	nvlist_t **top;
+	uint_t t, toplevels;
+	nvlist_t **child;
+	uint_t c, children;
+	nvlist_t *nv;
+	char *type;
+	replication_level_t lastrep, rep, *ret;
+	boolean_t dontreport;
+
+	ret = safe_malloc(sizeof (replication_level_t));
+
+	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+	    &top, &toplevels) == 0);
+
+	lastrep.zprl_type = NULL;
+	for (t = 0; t < toplevels; t++) {
+		uint64_t is_log = B_FALSE;
+
+		nv = top[t];
+
+		/*
+		 * For separate logs we ignore the top level vdev replication
+		 * constraints.
+		 */
+		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &is_log);
+		if (is_log)
+			continue;
+
+		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE,
+		    &type) == 0);
+		if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+		    &child, &children) != 0) {
+			/*
+			 * This is a 'file' or 'disk' vdev.
+			 */
+			rep.zprl_type = type;
+			rep.zprl_children = 1;
+			rep.zprl_parity = 0;
+		} else {
+			uint64_t vdev_size;
+
+			/*
+			 * This is a mirror or RAID-Z vdev.  Go through and make
+			 * sure the contents are all the same (files vs. disks),
+			 * keeping track of the number of elements in the
+			 * process.
+			 *
+			 * We also check that the size of each vdev (if it can
+			 * be determined) is the same.
+			 */
+			rep.zprl_type = type;
+			rep.zprl_children = 0;
+
+			if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
+				verify(nvlist_lookup_uint64(nv,
+				    ZPOOL_CONFIG_NPARITY,
+				    &rep.zprl_parity) == 0);
+				assert(rep.zprl_parity != 0);
+			} else {
+				rep.zprl_parity = 0;
+			}
+
+			/*
+			 * The 'dontreport' variable indicates that we've
+			 * already reported an error for this spec, so don't
+			 * bother doing it again.
+			 */
+			type = NULL;
+			dontreport = 0;
+			vdev_size = -1ULL;
+			for (c = 0; c < children; c++) {
+				nvlist_t *cnv = child[c];
+				char *path;
+				struct stat64 statbuf;
+				uint64_t size = -1ULL;
+				char *childtype;
+				int fd, err;
+
+				rep.zprl_children++;
+
+				verify(nvlist_lookup_string(cnv,
+				    ZPOOL_CONFIG_TYPE, &childtype) == 0);
+
+				/*
+				 * If this is a replacing or spare vdev, then
+				 * get the real first child of the vdev.
+				 */
+				if (strcmp(childtype,
+				    VDEV_TYPE_REPLACING) == 0 ||
+				    strcmp(childtype, VDEV_TYPE_SPARE) == 0) {
+					nvlist_t **rchild;
+					uint_t rchildren;
+
+					verify(nvlist_lookup_nvlist_array(cnv,
+					    ZPOOL_CONFIG_CHILDREN, &rchild,
+					    &rchildren) == 0);
+					assert(rchildren == 2);
+					cnv = rchild[0];
+
+					verify(nvlist_lookup_string(cnv,
+					    ZPOOL_CONFIG_TYPE,
+					    &childtype) == 0);
+				}
+
+				verify(nvlist_lookup_string(cnv,
+				    ZPOOL_CONFIG_PATH, &path) == 0);
+
+				/*
+				 * If we have a raidz/mirror that combines disks
+				 * with files, report it as an error.
+				 */
+				if (!dontreport && type != NULL &&
+				    strcmp(type, childtype) != 0) {
+					if (ret != NULL)
+						free(ret);
+					ret = NULL;
+					if (fatal)
+						vdev_error(gettext(
+						    "mismatched replication "
+						    "level: %s contains both "
+						    "files and devices\n"),
+						    rep.zprl_type);
+					else
+						return (NULL);
+					dontreport = B_TRUE;
+				}
+
+				/*
+				 * According to stat(2), the value of 'st_size'
+				 * is undefined for block devices and character
+				 * devices.  But there is no effective way to
+				 * determine the real size in userland.
+				 *
+				 * Instead, we'll take advantage of an
+				 * implementation detail of spec_size().  If the
+				 * device is currently open, then we (should)
+				 * return a valid size.
+				 *
+				 * If we still don't get a valid size (indicated
+				 * by a size of 0 or MAXOFFSET_T), then ignore
+				 * this device altogether.
+				 */
+				if ((fd = open(path, O_RDONLY)) >= 0) {
+					err = fstat64(fd, &statbuf);
+					(void) close(fd);
+				} else {
+					err = stat64(path, &statbuf);
+				}
+
+				if (err != 0 ||
+				    statbuf.st_size == 0 ||
+				    statbuf.st_size == MAXOFFSET_T)
+					continue;
+
+				size = statbuf.st_size;
+
+				/*
+				 * Also make sure that devices and
+				 * slices have a consistent size.  If
+				 * they differ by a significant amount
+				 * (~16MB) then report an error.
+				 */
+				if (!dontreport &&
+				    (vdev_size != -1ULL &&
+				    (labs(size - vdev_size) >
+				    ZPOOL_FUZZ))) {
+					if (ret != NULL)
+						free(ret);
+					ret = NULL;
+					if (fatal)
+						vdev_error(gettext(
+						    "%s contains devices of "
+						    "different sizes\n"),
+						    rep.zprl_type);
+					else
+						return (NULL);
+					dontreport = B_TRUE;
+				}
+
+				type = childtype;
+				vdev_size = size;
+			}
+		}
+
+		/*
+		 * At this point, we have the replication of the last toplevel
+		 * vdev in 'rep'.  Compare it to 'lastrep' to see if its
+		 * different.
+		 */
+		if (lastrep.zprl_type != NULL) {
+			if (strcmp(lastrep.zprl_type, rep.zprl_type) != 0) {
+				if (ret != NULL)
+					free(ret);
+				ret = NULL;
+				if (fatal)
+					vdev_error(gettext(
+					    "mismatched replication level: "
+					    "both %s and %s vdevs are "
+					    "present\n"),
+					    lastrep.zprl_type, rep.zprl_type);
+				else
+					return (NULL);
+			} else if (lastrep.zprl_parity != rep.zprl_parity) {
+				if (ret)
+					free(ret);
+				ret = NULL;
+				if (fatal)
+					vdev_error(gettext(
+					    "mismatched replication level: "
+					    "both %llu and %llu device parity "
+					    "%s vdevs are present\n"),
+					    lastrep.zprl_parity,
+					    rep.zprl_parity,
+					    rep.zprl_type);
+				else
+					return (NULL);
+			} else if (lastrep.zprl_children != rep.zprl_children) {
+				if (ret)
+					free(ret);
+				ret = NULL;
+				if (fatal)
+					vdev_error(gettext(
+					    "mismatched replication level: "
+					    "both %llu-way and %llu-way %s "
+					    "vdevs are present\n"),
+					    lastrep.zprl_children,
+					    rep.zprl_children,
+					    rep.zprl_type);
+				else
+					return (NULL);
+			}
+		}
+		lastrep = rep;
+	}
+
+	if (ret != NULL)
+		*ret = rep;
+
+	return (ret);
+}
+
+/*
+ * Check the replication level of the vdev spec against the current pool.  Calls
+ * get_replication() to make sure the new spec is self-consistent.  If the pool
+ * has a consistent replication level, then we ignore any errors.  Otherwise,
+ * report any difference between the two.
+ */
+static int
+check_replication(nvlist_t *config, nvlist_t *newroot)
+{
+	nvlist_t **child;
+	uint_t	children;
+	replication_level_t *current = NULL, *new;
+	int ret;
+
+	/*
+	 * If we have a current pool configuration, check to see if it's
+	 * self-consistent.  If not, simply return success.
+	 */
+	if (config != NULL) {
+		nvlist_t *nvroot;
+
+		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+		    &nvroot) == 0);
+		if ((current = get_replication(nvroot, B_FALSE)) == NULL)
+			return (0);
+	}
+	/*
+	 * for spares there may be no children, and therefore no
+	 * replication level to check
+	 */
+	if ((nvlist_lookup_nvlist_array(newroot, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) != 0) || (children == 0)) {
+		free(current);
+		return (0);
+	}
+
+	/*
+	 * If all we have is logs then there's no replication level to check.
+	 */
+	if (num_logs(newroot) == children) {
+		free(current);
+		return (0);
+	}
+
+	/*
+	 * Get the replication level of the new vdev spec, reporting any
+	 * inconsistencies found.
+	 */
+	if ((new = get_replication(newroot, B_TRUE)) == NULL) {
+		free(current);
+		return (-1);
+	}
+
+	/*
+	 * Check to see if the new vdev spec matches the replication level of
+	 * the current pool.
+	 */
+	ret = 0;
+	if (current != NULL) {
+		if (strcmp(current->zprl_type, new->zprl_type) != 0) {
+			vdev_error(gettext(
+			    "mismatched replication level: pool uses %s "
+			    "and new vdev is %s\n"),
+			    current->zprl_type, new->zprl_type);
+			ret = -1;
+		} else if (current->zprl_parity != new->zprl_parity) {
+			vdev_error(gettext(
+			    "mismatched replication level: pool uses %llu "
+			    "device parity and new vdev uses %llu\n"),
+			    current->zprl_parity, new->zprl_parity);
+			ret = -1;
+		} else if (current->zprl_children != new->zprl_children) {
+			vdev_error(gettext(
+			    "mismatched replication level: pool uses %llu-way "
+			    "%s and new vdev uses %llu-way %s\n"),
+			    current->zprl_children, current->zprl_type,
+			    new->zprl_children, new->zprl_type);
+			ret = -1;
+		}
+	}
+
+	free(new);
+	if (current != NULL)
+		free(current);
+
+	return (ret);
+}
+
+#ifdef sun
+/*
+ * Go through and find any whole disks in the vdev specification, labelling them
+ * as appropriate.  When constructing the vdev spec, we were unable to open this
+ * device in order to provide a devid.  Now that we have labelled the disk and
+ * know that slice 0 is valid, we can construct the devid now.
+ *
+ * If the disk was already labeled with an EFI label, we will have gotten the
+ * devid already (because we were able to open the whole disk).  Otherwise, we
+ * need to get the devid after we label the disk.
+ */
+static int
+make_disks(zpool_handle_t *zhp, nvlist_t *nv)
+{
+	nvlist_t **child;
+	uint_t c, children;
+	char *type, *path, *diskname;
+	char buf[MAXPATHLEN];
+	uint64_t wholedisk;
+	int fd;
+	int ret;
+	ddi_devid_t devid;
+	char *minor = NULL, *devid_str = NULL;
+
+	verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) != 0) {
+
+		if (strcmp(type, VDEV_TYPE_DISK) != 0)
+			return (0);
+
+		/*
+		 * We have a disk device.  Get the path to the device
+		 * and see if it's a whole disk by appending the backup
+		 * slice and stat()ing the device.
+		 */
+		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
+		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
+		    &wholedisk) != 0 || !wholedisk)
+			return (0);
+
+		diskname = strrchr(path, '/');
+		assert(diskname != NULL);
+		diskname++;
+		if (zpool_label_disk(g_zfs, zhp, diskname) == -1)
+			return (-1);
+
+		/*
+		 * Fill in the devid, now that we've labeled the disk.
+		 */
+		(void) snprintf(buf, sizeof (buf), "%ss0", path);
+		if ((fd = open(buf, O_RDONLY)) < 0) {
+			(void) fprintf(stderr,
+			    gettext("cannot open '%s': %s\n"),
+			    buf, strerror(errno));
+			return (-1);
+		}
+
+		if (devid_get(fd, &devid) == 0) {
+			if (devid_get_minor_name(fd, &minor) == 0 &&
+			    (devid_str = devid_str_encode(devid, minor)) !=
+			    NULL) {
+				verify(nvlist_add_string(nv,
+				    ZPOOL_CONFIG_DEVID, devid_str) == 0);
+			}
+			if (devid_str != NULL)
+				devid_str_free(devid_str);
+			if (minor != NULL)
+				devid_str_free(minor);
+			devid_free(devid);
+		}
+
+		/*
+		 * Update the path to refer to the 's0' slice.  The presence of
+		 * the 'whole_disk' field indicates to the CLI that we should
+		 * chop off the slice number when displaying the device in
+		 * future output.
+		 */
+		verify(nvlist_add_string(nv, ZPOOL_CONFIG_PATH, buf) == 0);
+
+		(void) close(fd);
+
+		return (0);
+	}
+
+	for (c = 0; c < children; c++)
+		if ((ret = make_disks(zhp, child[c])) != 0)
+			return (ret);
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
+	    &child, &children) == 0)
+		for (c = 0; c < children; c++)
+			if ((ret = make_disks(zhp, child[c])) != 0)
+				return (ret);
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
+	    &child, &children) == 0)
+		for (c = 0; c < children; c++)
+			if ((ret = make_disks(zhp, child[c])) != 0)
+				return (ret);
+
+	return (0);
+}
+#endif	/* sun */
+
+/*
+ * Determine if the given path is a hot spare within the given configuration.
+ */
+static boolean_t
+is_spare(nvlist_t *config, const char *path)
+{
+	int fd;
+	pool_state_t state;
+	char *name = NULL;
+	nvlist_t *label;
+	uint64_t guid, spareguid;
+	nvlist_t *nvroot;
+	nvlist_t **spares;
+	uint_t i, nspares;
+	boolean_t inuse;
+
+	if ((fd = open(path, O_RDONLY)) < 0)
+		return (B_FALSE);
+
+	if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0 ||
+	    !inuse ||
+	    state != POOL_STATE_SPARE ||
+	    zpool_read_label(fd, &label) != 0) {
+		free(name);
+		(void) close(fd);
+		return (B_FALSE);
+	}
+	free(name);
+	(void) close(fd);
+
+	verify(nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) == 0);
+	nvlist_free(label);
+
+	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+	    &nvroot) == 0);
+	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
+	    &spares, &nspares) == 0) {
+		for (i = 0; i < nspares; i++) {
+			verify(nvlist_lookup_uint64(spares[i],
+			    ZPOOL_CONFIG_GUID, &spareguid) == 0);
+			if (spareguid == guid)
+				return (B_TRUE);
+		}
+	}
+
+	return (B_FALSE);
+}
+
+/*
+ * Go through and find any devices that are in use.  We rely on libdiskmgt for
+ * the majority of this task.
+ */
+static int
+check_in_use(nvlist_t *config, nvlist_t *nv, boolean_t force,
+    boolean_t replacing, boolean_t isspare)
+{
+	nvlist_t **child;
+	uint_t c, children;
+	char *type, *path;
+	int ret;
+	char buf[MAXPATHLEN];
+	uint64_t wholedisk;
+
+	verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) != 0) {
+
+		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
+
+		/*
+		 * As a generic check, we look to see if this is a replace of a
+		 * hot spare within the same pool.  If so, we allow it
+		 * regardless of what libdiskmgt or zpool_in_use() says.
+		 */
+		if (replacing) {
+#ifdef sun
+			if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
+			    &wholedisk) == 0 && wholedisk)
+				(void) snprintf(buf, sizeof (buf), "%ss0",
+				    path);
+			else
+#endif
+				(void) strlcpy(buf, path, sizeof (buf));
+
+			if (is_spare(config, buf))
+				return (0);
+		}
+
+		if (strcmp(type, VDEV_TYPE_DISK) == 0)
+			ret = check_device(path, force, isspare);
+
+		if (strcmp(type, VDEV_TYPE_FILE) == 0)
+			ret = check_file(path, force, isspare);
+
+		return (ret);
+	}
+
+	for (c = 0; c < children; c++)
+		if ((ret = check_in_use(config, child[c], force,
+		    replacing, B_FALSE)) != 0)
+			return (ret);
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
+	    &child, &children) == 0)
+		for (c = 0; c < children; c++)
+			if ((ret = check_in_use(config, child[c], force,
+			    replacing, B_TRUE)) != 0)
+				return (ret);
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
+	    &child, &children) == 0)
+		for (c = 0; c < children; c++)
+			if ((ret = check_in_use(config, child[c], force,
+			    replacing, B_FALSE)) != 0)
+				return (ret);
+
+	return (0);
+}
+
+static const char *
+is_grouping(const char *type, int *mindev, int *maxdev)
+{
+	if (strncmp(type, "raidz", 5) == 0) {
+		const char *p = type + 5;
+		char *end;
+		long nparity;
+
+		if (*p == '\0') {
+			nparity = 1;
+		} else if (*p == '0') {
+			return (NULL); /* no zero prefixes allowed */
+		} else {
+			errno = 0;
+			nparity = strtol(p, &end, 10);
+			if (errno != 0 || nparity < 1 || nparity >= 255 ||
+			    *end != '\0')
+				return (NULL);
+		}
+
+		if (mindev != NULL)
+			*mindev = nparity + 1;
+		if (maxdev != NULL)
+			*maxdev = 255;
+		return (VDEV_TYPE_RAIDZ);
+	}
+
+	if (maxdev != NULL)
+		*maxdev = INT_MAX;
+
+	if (strcmp(type, "mirror") == 0) {
+		if (mindev != NULL)
+			*mindev = 2;
+		return (VDEV_TYPE_MIRROR);
+	}
+
+	if (strcmp(type, "spare") == 0) {
+		if (mindev != NULL)
+			*mindev = 1;
+		return (VDEV_TYPE_SPARE);
+	}
+
+	if (strcmp(type, "log") == 0) {
+		if (mindev != NULL)
+			*mindev = 1;
+		return (VDEV_TYPE_LOG);
+	}
+
+	if (strcmp(type, "cache") == 0) {
+		if (mindev != NULL)
+			*mindev = 1;
+		return (VDEV_TYPE_L2CACHE);
+	}
+
+	return (NULL);
+}
+
+/*
+ * Construct a syntactically valid vdev specification,
+ * and ensure that all devices and files exist and can be opened.
+ * Note: we don't bother freeing anything in the error paths
+ * because the program is just going to exit anyway.
+ */
+nvlist_t *
+construct_spec(int argc, char **argv)
+{
+	nvlist_t *nvroot, *nv, **top, **spares, **l2cache;
+	int t, toplevels, mindev, maxdev, nspares, nlogs, nl2cache;
+	const char *type;
+	uint64_t is_log;
+	boolean_t seen_logs;
+
+	top = NULL;
+	toplevels = 0;
+	spares = NULL;
+	l2cache = NULL;
+	nspares = 0;
+	nlogs = 0;
+	nl2cache = 0;
+	is_log = B_FALSE;
+	seen_logs = B_FALSE;
+
+	while (argc > 0) {
+		nv = NULL;
+
+		/*
+		 * If it's a mirror or raidz, the subsequent arguments are
+		 * its leaves -- until we encounter the next mirror or raidz.
+		 */
+		if ((type = is_grouping(argv[0], &mindev, &maxdev)) != NULL) {
+			nvlist_t **child = NULL;
+			int c, children = 0;
+
+			if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
+				if (spares != NULL) {
+					(void) fprintf(stderr,
+					    gettext("invalid vdev "
+					    "specification: 'spare' can be "
+					    "specified only once\n"));
+					return (NULL);
+				}
+				is_log = B_FALSE;
+			}
+
+			if (strcmp(type, VDEV_TYPE_LOG) == 0) {
+				if (seen_logs) {
+					(void) fprintf(stderr,
+					    gettext("invalid vdev "
+					    "specification: 'log' can be "
+					    "specified only once\n"));
+					return (NULL);
+				}
+				seen_logs = B_TRUE;
+				is_log = B_TRUE;
+				argc--;
+				argv++;
+				/*
+				 * A log is not a real grouping device.
+				 * We just set is_log and continue.
+				 */
+				continue;
+			}
+
+			if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
+				if (l2cache != NULL) {
+					(void) fprintf(stderr,
+					    gettext("invalid vdev "
+					    "specification: 'cache' can be "
+					    "specified only once\n"));
+					return (NULL);
+				}
+				is_log = B_FALSE;
+			}
+
+			if (is_log) {
+				if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
+					(void) fprintf(stderr,
+					    gettext("invalid vdev "
+					    "specification: unsupported 'log' "
+					    "device: %s\n"), type);
+					return (NULL);
+				}
+				nlogs++;
+			}
+
+			for (c = 1; c < argc; c++) {
+				if (is_grouping(argv[c], NULL, NULL) != NULL)
+					break;
+				children++;
+				child = realloc(child,
+				    children * sizeof (nvlist_t *));
+				if (child == NULL)
+					zpool_no_memory();
+				if ((nv = make_leaf_vdev(argv[c], B_FALSE))
+				    == NULL)
+					return (NULL);
+				child[children - 1] = nv;
+			}
+
+			if (children < mindev) {
+				(void) fprintf(stderr, gettext("invalid vdev "
+				    "specification: %s requires at least %d "
+				    "devices\n"), argv[0], mindev);
+				return (NULL);
+			}
+
+			if (children > maxdev) {
+				(void) fprintf(stderr, gettext("invalid vdev "
+				    "specification: %s supports no more than "
+				    "%d devices\n"), argv[0], maxdev);
+				return (NULL);
+			}
+
+			argc -= c;
+			argv += c;
+
+			if (strcmp(type, VDEV_TYPE_SPARE) == 0) {
+				spares = child;
+				nspares = children;
+				continue;
+			} else if (strcmp(type, VDEV_TYPE_L2CACHE) == 0) {
+				l2cache = child;
+				nl2cache = children;
+				continue;
+			} else {
+				verify(nvlist_alloc(&nv, NV_UNIQUE_NAME,
+				    0) == 0);
+				verify(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE,
+				    type) == 0);
+				verify(nvlist_add_uint64(nv,
+				    ZPOOL_CONFIG_IS_LOG, is_log) == 0);
+				if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
+					verify(nvlist_add_uint64(nv,
+					    ZPOOL_CONFIG_NPARITY,
+					    mindev - 1) == 0);
+				}
+				verify(nvlist_add_nvlist_array(nv,
+				    ZPOOL_CONFIG_CHILDREN, child,
+				    children) == 0);
+
+				for (c = 0; c < children; c++)
+					nvlist_free(child[c]);
+				free(child);
+			}
+		} else {
+			/*
+			 * We have a device.  Pass off to make_leaf_vdev() to
+			 * construct the appropriate nvlist describing the vdev.
+			 */
+			if ((nv = make_leaf_vdev(argv[0], is_log)) == NULL)
+				return (NULL);
+			if (is_log)
+				nlogs++;
+			argc--;
+			argv++;
+		}
+
+		toplevels++;
+		top = realloc(top, toplevels * sizeof (nvlist_t *));
+		if (top == NULL)
+			zpool_no_memory();
+		top[toplevels - 1] = nv;
+	}
+
+	if (toplevels == 0 && nspares == 0 && nl2cache == 0) {
+		(void) fprintf(stderr, gettext("invalid vdev "
+		    "specification: at least one toplevel vdev must be "
+		    "specified\n"));
+		return (NULL);
+	}
+
+	if (seen_logs && nlogs == 0) {
+		(void) fprintf(stderr, gettext("invalid vdev specification: "
+		    "log requires at least 1 device\n"));
+		return (NULL);
+	}
+
+	/*
+	 * Finally, create nvroot and add all top-level vdevs to it.
+	 */
+	verify(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) == 0);
+	verify(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
+	    VDEV_TYPE_ROOT) == 0);
+	verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+	    top, toplevels) == 0);
+	if (nspares != 0)
+		verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
+		    spares, nspares) == 0);
+	if (nl2cache != 0)
+		verify(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
+		    l2cache, nl2cache) == 0);
+
+	for (t = 0; t < toplevels; t++)
+		nvlist_free(top[t]);
+	for (t = 0; t < nspares; t++)
+		nvlist_free(spares[t]);
+	for (t = 0; t < nl2cache; t++)
+		nvlist_free(l2cache[t]);
+	if (spares)
+		free(spares);
+	if (l2cache)
+		free(l2cache);
+	free(top);
+
+	return (nvroot);
+}
+
+nvlist_t *
+split_mirror_vdev(zpool_handle_t *zhp, char *newname, nvlist_t *props,
+    splitflags_t flags, int argc, char **argv)
+{
+	nvlist_t *newroot = NULL, **child;
+	uint_t c, children;
+
+	if (argc > 0) {
+		if ((newroot = construct_spec(argc, argv)) == NULL) {
+			(void) fprintf(stderr, gettext("Unable to build a "
+			    "pool from the specified devices\n"));
+			return (NULL);
+		}
+
+#ifdef sun
+		if (!flags.dryrun && make_disks(zhp, newroot) != 0) {
+			nvlist_free(newroot);
+			return (NULL);
+		}
+#endif
+
+		/* avoid any tricks in the spec */
+		verify(nvlist_lookup_nvlist_array(newroot,
+		    ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
+		for (c = 0; c < children; c++) {
+			char *path;
+			const char *type;
+			int min, max;
+
+			verify(nvlist_lookup_string(child[c],
+			    ZPOOL_CONFIG_PATH, &path) == 0);
+			if ((type = is_grouping(path, &min, &max)) != NULL) {
+				(void) fprintf(stderr, gettext("Cannot use "
+				    "'%s' as a device for splitting\n"), type);
+				nvlist_free(newroot);
+				return (NULL);
+			}
+		}
+	}
+
+	if (zpool_vdev_split(zhp, newname, &newroot, props, flags) != 0) {
+		if (newroot != NULL)
+			nvlist_free(newroot);
+		return (NULL);
+	}
+
+	return (newroot);
+}
+
+/*
+ * Get and validate the contents of the given vdev specification.  This ensures
+ * that the nvlist returned is well-formed, that all the devices exist, and that
+ * they are not currently in use by any other known consumer.  The 'poolconfig'
+ * parameter is the current configuration of the pool when adding devices
+ * existing pool, and is used to perform additional checks, such as changing the
+ * replication level of the pool.  It can be 'NULL' to indicate that this is a
+ * new pool.  The 'force' flag controls whether devices should be forcefully
+ * added, even if they appear in use.
+ */
+nvlist_t *
+make_root_vdev(zpool_handle_t *zhp, int force, int check_rep,
+    boolean_t replacing, boolean_t dryrun, int argc, char **argv)
+{
+	nvlist_t *newroot;
+	nvlist_t *poolconfig = NULL;
+	is_force = force;
+
+	/*
+	 * Construct the vdev specification.  If this is successful, we know
+	 * that we have a valid specification, and that all devices can be
+	 * opened.
+	 */
+	if ((newroot = construct_spec(argc, argv)) == NULL)
+		return (NULL);
+
+	if (zhp && ((poolconfig = zpool_get_config(zhp, NULL)) == NULL))
+		return (NULL);
+
+	/*
+	 * Validate each device to make sure that its not shared with another
+	 * subsystem.  We do this even if 'force' is set, because there are some
+	 * uses (such as a dedicated dump device) that even '-f' cannot
+	 * override.
+	 */
+	if (check_in_use(poolconfig, newroot, force, replacing, B_FALSE) != 0) {
+		nvlist_free(newroot);
+		return (NULL);
+	}
+
+	/*
+	 * Check the replication level of the given vdevs and report any errors
+	 * found.  We include the existing pool spec, if any, as we need to
+	 * catch changes against the existing replication level.
+	 */
+	if (check_rep && check_replication(poolconfig, newroot) != 0) {
+		nvlist_free(newroot);
+		return (NULL);
+	}
+
+#ifdef sun
+	/*
+	 * Run through the vdev specification and label any whole disks found.
+	 */
+	if (!dryrun && make_disks(zhp, newroot) != 0) {
+		nvlist_free(newroot);
+		return (NULL);
+	}
+#endif
+
+	return (newroot);
+}
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zstreamdump/zstreamdump.1 b/bsd/cddl/contrib/opensolaris/cmd/zstreamdump/zstreamdump.1
new file mode 100644
index 0000000000000000000000000000000000000000..6f8ca6e6eb879b2af55c38199c4cb8d9329f6c90
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zstreamdump/zstreamdump.1
@@ -0,0 +1,67 @@
+'\" te
+.\" Copyright (c) 2011, Martin Matuska <mm@FreeBSD.org>.
+.\" All Rights Reserved.
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" Copyright (c) 2009, Sun Microsystems, Inc. All Rights Reserved.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd November 26, 2011
+.Dt ZSTREAMDUMP 8
+.Os
+.Sh NAME
+.Nm zstreamdump
+.Nd filter data in zfs send stream
+.Sh SYNOPSIS
+.Nm
+.Op Fl C
+.Op Fl v
+.Sh DESCRIPTION
+The
+.Nm
+utility reads from the output of the
+.Qq Nm zfs Cm send
+command, then displays headers and some statistics from that output. See
+.Xr zfs 8 .
+.Pp
+The following options are supported:
+.Bl -tag -width indent
+.It Fl C
+Suppress the validation of checksums.
+.It Fl v
+Verbose. Dump all headers, not only begin and end headers.
+.El
+.Sh SEE ALSO
+.Xr zfs 8
+.Sh AUTHORS
+This manual page is a
+.Xr mdoc 7
+reimplementation of the
+.Tn OpenSolaris
+manual page
+.Em zstreamdump(1M) ,
+modified and customized for
+.Fx
+and licensed under the
+.Tn Common Development and Distribution License
+.Pq Tn CDDL .
+.Pp
+The
+.Xr mdoc 7
+implementation of this manual page was initially written by
+.An Martin Matuska Aq mm@FreeBSD.org .
diff --git a/bsd/cddl/contrib/opensolaris/cmd/zstreamdump/zstreamdump.c b/bsd/cddl/contrib/opensolaris/cmd/zstreamdump/zstreamdump.c
new file mode 100644
index 0000000000000000000000000000000000000000..df23cc1e5a38a09526c2e4cd4e67d39568b4fe18
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/zstreamdump/zstreamdump.c
@@ -0,0 +1,429 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <libnvpair.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+
+#include <sys/dmu.h>
+#include <sys/zfs_ioctl.h>
+#include <zfs_fletcher.h>
+
+uint64_t drr_record_count[DRR_NUMTYPES];
+uint64_t total_write_size = 0;
+uint64_t total_stream_len = 0;
+FILE *send_stream = 0;
+boolean_t do_byteswap = B_FALSE;
+boolean_t do_cksum = B_TRUE;
+#define	INITIAL_BUFLEN (1<<20)
+
+static void
+usage(void)
+{
+	(void) fprintf(stderr, "usage: zstreamdump [-v] [-C] < file\n");
+	(void) fprintf(stderr, "\t -v -- verbose\n");
+	(void) fprintf(stderr, "\t -C -- suppress checksum verification\n");
+	exit(1);
+}
+
+/*
+ * ssread - send stream read.
+ *
+ * Read while computing incremental checksum
+ */
+
+static size_t
+ssread(void *buf, size_t len, zio_cksum_t *cksum)
+{
+	size_t outlen;
+
+	if ((outlen = fread(buf, len, 1, send_stream)) == 0)
+		return (0);
+
+	if (do_cksum && cksum) {
+		if (do_byteswap)
+			fletcher_4_incremental_byteswap(buf, len, cksum);
+		else
+			fletcher_4_incremental_native(buf, len, cksum);
+	}
+	total_stream_len += len;
+	return (outlen);
+}
+
+int
+main(int argc, char *argv[])
+{
+	char *buf = malloc(INITIAL_BUFLEN);
+	dmu_replay_record_t thedrr;
+	dmu_replay_record_t *drr = &thedrr;
+	struct drr_begin *drrb = &thedrr.drr_u.drr_begin;
+	struct drr_end *drre = &thedrr.drr_u.drr_end;
+	struct drr_object *drro = &thedrr.drr_u.drr_object;
+	struct drr_freeobjects *drrfo = &thedrr.drr_u.drr_freeobjects;
+	struct drr_write *drrw = &thedrr.drr_u.drr_write;
+	struct drr_write_byref *drrwbr = &thedrr.drr_u.drr_write_byref;
+	struct drr_free *drrf = &thedrr.drr_u.drr_free;
+	struct drr_spill *drrs = &thedrr.drr_u.drr_spill;
+	char c;
+	boolean_t verbose = B_FALSE;
+	boolean_t first = B_TRUE;
+	int err;
+	zio_cksum_t zc = { 0 };
+	zio_cksum_t pcksum = { 0 };
+
+	while ((c = getopt(argc, argv, ":vC")) != -1) {
+		switch (c) {
+		case 'C':
+			do_cksum = B_FALSE;
+			break;
+		case 'v':
+			verbose = B_TRUE;
+			break;
+		case ':':
+			(void) fprintf(stderr,
+			    "missing argument for '%c' option\n", optopt);
+			usage();
+			break;
+		case '?':
+			(void) fprintf(stderr, "invalid option '%c'\n",
+			    optopt);
+			usage();
+		}
+	}
+
+	if (isatty(STDIN_FILENO)) {
+		(void) fprintf(stderr,
+		    "Error: Backup stream can not be read "
+		    "from a terminal.\n"
+		    "You must redirect standard input.\n");
+		exit(1);
+	}
+
+	send_stream = stdin;
+	pcksum = zc;
+	while (ssread(drr, sizeof (dmu_replay_record_t), &zc)) {
+
+		if (first) {
+			if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
+				do_byteswap = B_TRUE;
+				if (do_cksum) {
+					ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
+					/*
+					 * recalculate header checksum now
+					 * that we know it needs to be
+					 * byteswapped.
+					 */
+					fletcher_4_incremental_byteswap(drr,
+					    sizeof (dmu_replay_record_t), &zc);
+				}
+			} else if (drrb->drr_magic != DMU_BACKUP_MAGIC) {
+				(void) fprintf(stderr, "Invalid stream "
+				    "(bad magic number)\n");
+				exit(1);
+			}
+			first = B_FALSE;
+		}
+		if (do_byteswap) {
+			drr->drr_type = BSWAP_32(drr->drr_type);
+			drr->drr_payloadlen =
+			    BSWAP_32(drr->drr_payloadlen);
+		}
+
+		/*
+		 * At this point, the leading fields of the replay record
+		 * (drr_type and drr_payloadlen) have been byte-swapped if
+		 * necessary, but the rest of the data structure (the
+		 * union of type-specific structures) is still in its
+		 * original state.
+		 */
+		if (drr->drr_type >= DRR_NUMTYPES) {
+			(void) printf("INVALID record found: type 0x%x\n",
+			    drr->drr_type);
+			(void) printf("Aborting.\n");
+			exit(1);
+		}
+
+		drr_record_count[drr->drr_type]++;
+
+		switch (drr->drr_type) {
+		case DRR_BEGIN:
+			if (do_byteswap) {
+				drrb->drr_magic = BSWAP_64(drrb->drr_magic);
+				drrb->drr_versioninfo =
+				    BSWAP_64(drrb->drr_versioninfo);
+				drrb->drr_creation_time =
+				    BSWAP_64(drrb->drr_creation_time);
+				drrb->drr_type = BSWAP_32(drrb->drr_type);
+				drrb->drr_flags = BSWAP_32(drrb->drr_flags);
+				drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
+				drrb->drr_fromguid =
+				    BSWAP_64(drrb->drr_fromguid);
+			}
+
+			(void) printf("BEGIN record\n");
+			(void) printf("\thdrtype = %lld\n",
+			    DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo));
+			(void) printf("\tfeatures = %llx\n",
+			    DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo));
+			(void) printf("\tmagic = %llx\n",
+			    (u_longlong_t)drrb->drr_magic);
+			(void) printf("\tcreation_time = %llx\n",
+			    (u_longlong_t)drrb->drr_creation_time);
+			(void) printf("\ttype = %u\n", drrb->drr_type);
+			(void) printf("\tflags = 0x%x\n", drrb->drr_flags);
+			(void) printf("\ttoguid = %llx\n",
+			    (u_longlong_t)drrb->drr_toguid);
+			(void) printf("\tfromguid = %llx\n",
+			    (u_longlong_t)drrb->drr_fromguid);
+			(void) printf("\ttoname = %s\n", drrb->drr_toname);
+			if (verbose)
+				(void) printf("\n");
+
+			if ((DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
+			    DMU_COMPOUNDSTREAM) && drr->drr_payloadlen != 0) {
+				nvlist_t *nv;
+				int sz = drr->drr_payloadlen;
+
+				if (sz > 1<<20) {
+					free(buf);
+					buf = malloc(sz);
+				}
+				(void) ssread(buf, sz, &zc);
+				if (ferror(send_stream))
+					perror("fread");
+				err = nvlist_unpack(buf, sz, &nv, 0);
+				if (err)
+					perror(strerror(err));
+				nvlist_print(stdout, nv);
+				nvlist_free(nv);
+			}
+			break;
+
+		case DRR_END:
+			if (do_byteswap) {
+				drre->drr_checksum.zc_word[0] =
+				    BSWAP_64(drre->drr_checksum.zc_word[0]);
+				drre->drr_checksum.zc_word[1] =
+				    BSWAP_64(drre->drr_checksum.zc_word[1]);
+				drre->drr_checksum.zc_word[2] =
+				    BSWAP_64(drre->drr_checksum.zc_word[2]);
+				drre->drr_checksum.zc_word[3] =
+				    BSWAP_64(drre->drr_checksum.zc_word[3]);
+			}
+			/*
+			 * We compare against the *previous* checksum
+			 * value, because the stored checksum is of
+			 * everything before the DRR_END record.
+			 */
+			if (do_cksum && !ZIO_CHECKSUM_EQUAL(drre->drr_checksum,
+			    pcksum)) {
+				(void) printf("Expected checksum differs from "
+				    "checksum in stream.\n");
+				(void) printf("Expected checksum = "
+				    "%llx/%llx/%llx/%llx\n",
+				    pcksum.zc_word[0],
+				    pcksum.zc_word[1],
+				    pcksum.zc_word[2],
+				    pcksum.zc_word[3]);
+			}
+			(void) printf("END checksum = %llx/%llx/%llx/%llx\n",
+			    drre->drr_checksum.zc_word[0],
+			    drre->drr_checksum.zc_word[1],
+			    drre->drr_checksum.zc_word[2],
+			    drre->drr_checksum.zc_word[3]);
+
+			ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
+			break;
+
+		case DRR_OBJECT:
+			if (do_byteswap) {
+				drro->drr_object = BSWAP_64(drro->drr_object);
+				drro->drr_type = BSWAP_32(drro->drr_type);
+				drro->drr_bonustype =
+				    BSWAP_32(drro->drr_bonustype);
+				drro->drr_blksz = BSWAP_32(drro->drr_blksz);
+				drro->drr_bonuslen =
+				    BSWAP_32(drro->drr_bonuslen);
+				drro->drr_toguid = BSWAP_64(drro->drr_toguid);
+			}
+			if (verbose) {
+				(void) printf("OBJECT object = %llu type = %u "
+				    "bonustype = %u blksz = %u bonuslen = %u\n",
+				    (u_longlong_t)drro->drr_object,
+				    drro->drr_type,
+				    drro->drr_bonustype,
+				    drro->drr_blksz,
+				    drro->drr_bonuslen);
+			}
+			if (drro->drr_bonuslen > 0) {
+				(void) ssread(buf, P2ROUNDUP(drro->drr_bonuslen,
+				    8), &zc);
+			}
+			break;
+
+		case DRR_FREEOBJECTS:
+			if (do_byteswap) {
+				drrfo->drr_firstobj =
+				    BSWAP_64(drrfo->drr_firstobj);
+				drrfo->drr_numobjs =
+				    BSWAP_64(drrfo->drr_numobjs);
+				drrfo->drr_toguid = BSWAP_64(drrfo->drr_toguid);
+			}
+			if (verbose) {
+				(void) printf("FREEOBJECTS firstobj = %llu "
+				    "numobjs = %llu\n",
+				    (u_longlong_t)drrfo->drr_firstobj,
+				    (u_longlong_t)drrfo->drr_numobjs);
+			}
+			break;
+
+		case DRR_WRITE:
+			if (do_byteswap) {
+				drrw->drr_object = BSWAP_64(drrw->drr_object);
+				drrw->drr_type = BSWAP_32(drrw->drr_type);
+				drrw->drr_offset = BSWAP_64(drrw->drr_offset);
+				drrw->drr_length = BSWAP_64(drrw->drr_length);
+				drrw->drr_toguid = BSWAP_64(drrw->drr_toguid);
+				drrw->drr_key.ddk_prop =
+				    BSWAP_64(drrw->drr_key.ddk_prop);
+			}
+			if (verbose) {
+				(void) printf("WRITE object = %llu type = %u "
+				    "checksum type = %u\n"
+				    "offset = %llu length = %llu "
+				    "props = %llx\n",
+				    (u_longlong_t)drrw->drr_object,
+				    drrw->drr_type,
+				    drrw->drr_checksumtype,
+				    (u_longlong_t)drrw->drr_offset,
+				    (u_longlong_t)drrw->drr_length,
+				    (u_longlong_t)drrw->drr_key.ddk_prop);
+			}
+			(void) ssread(buf, drrw->drr_length, &zc);
+			total_write_size += drrw->drr_length;
+			break;
+
+		case DRR_WRITE_BYREF:
+			if (do_byteswap) {
+				drrwbr->drr_object =
+				    BSWAP_64(drrwbr->drr_object);
+				drrwbr->drr_offset =
+				    BSWAP_64(drrwbr->drr_offset);
+				drrwbr->drr_length =
+				    BSWAP_64(drrwbr->drr_length);
+				drrwbr->drr_toguid =
+				    BSWAP_64(drrwbr->drr_toguid);
+				drrwbr->drr_refguid =
+				    BSWAP_64(drrwbr->drr_refguid);
+				drrwbr->drr_refobject =
+				    BSWAP_64(drrwbr->drr_refobject);
+				drrwbr->drr_refoffset =
+				    BSWAP_64(drrwbr->drr_refoffset);
+				drrwbr->drr_key.ddk_prop =
+				    BSWAP_64(drrwbr->drr_key.ddk_prop);
+			}
+			if (verbose) {
+				(void) printf("WRITE_BYREF object = %llu "
+				    "checksum type = %u props = %llx\n"
+				    "offset = %llu length = %llu\n"
+				    "toguid = %llx refguid = %llx\n"
+				    "refobject = %llu refoffset = %llu\n",
+				    (u_longlong_t)drrwbr->drr_object,
+				    drrwbr->drr_checksumtype,
+				    (u_longlong_t)drrwbr->drr_key.ddk_prop,
+				    (u_longlong_t)drrwbr->drr_offset,
+				    (u_longlong_t)drrwbr->drr_length,
+				    (u_longlong_t)drrwbr->drr_toguid,
+				    (u_longlong_t)drrwbr->drr_refguid,
+				    (u_longlong_t)drrwbr->drr_refobject,
+				    (u_longlong_t)drrwbr->drr_refoffset);
+			}
+			break;
+
+		case DRR_FREE:
+			if (do_byteswap) {
+				drrf->drr_object = BSWAP_64(drrf->drr_object);
+				drrf->drr_offset = BSWAP_64(drrf->drr_offset);
+				drrf->drr_length = BSWAP_64(drrf->drr_length);
+			}
+			if (verbose) {
+				(void) printf("FREE object = %llu "
+				    "offset = %llu length = %lld\n",
+				    (u_longlong_t)drrf->drr_object,
+				    (u_longlong_t)drrf->drr_offset,
+				    (longlong_t)drrf->drr_length);
+			}
+			break;
+		case DRR_SPILL:
+			if (do_byteswap) {
+				drrs->drr_object = BSWAP_64(drrs->drr_object);
+				drrs->drr_length = BSWAP_64(drrs->drr_length);
+			}
+			if (verbose) {
+				(void) printf("SPILL block for object = %llu "
+				    "length = %llu\n", drrs->drr_object,
+				    drrs->drr_length);
+			}
+			(void) ssread(buf, drrs->drr_length, &zc);
+			break;
+		}
+		pcksum = zc;
+	}
+	free(buf);
+
+	/* Print final summary */
+
+	(void) printf("SUMMARY:\n");
+	(void) printf("\tTotal DRR_BEGIN records = %lld\n",
+	    (u_longlong_t)drr_record_count[DRR_BEGIN]);
+	(void) printf("\tTotal DRR_END records = %lld\n",
+	    (u_longlong_t)drr_record_count[DRR_END]);
+	(void) printf("\tTotal DRR_OBJECT records = %lld\n",
+	    (u_longlong_t)drr_record_count[DRR_OBJECT]);
+	(void) printf("\tTotal DRR_FREEOBJECTS records = %lld\n",
+	    (u_longlong_t)drr_record_count[DRR_FREEOBJECTS]);
+	(void) printf("\tTotal DRR_WRITE records = %lld\n",
+	    (u_longlong_t)drr_record_count[DRR_WRITE]);
+	(void) printf("\tTotal DRR_FREE records = %lld\n",
+	    (u_longlong_t)drr_record_count[DRR_FREE]);
+	(void) printf("\tTotal DRR_SPILL records = %lld\n",
+	    (u_longlong_t)drr_record_count[DRR_SPILL]);
+	(void) printf("\tTotal records = %lld\n",
+	    (u_longlong_t)(drr_record_count[DRR_BEGIN] +
+	    drr_record_count[DRR_OBJECT] +
+	    drr_record_count[DRR_FREEOBJECTS] +
+	    drr_record_count[DRR_WRITE] +
+	    drr_record_count[DRR_FREE] +
+	    drr_record_count[DRR_SPILL] +
+	    drr_record_count[DRR_END]));
+	(void) printf("\tTotal write size = %lld (0x%llx)\n",
+	    (u_longlong_t)total_write_size, (u_longlong_t)total_write_size);
+	(void) printf("\tTotal stream length = %lld (0x%llx)\n",
+	    (u_longlong_t)total_stream_len, (u_longlong_t)total_stream_len);
+	return (0);
+}
diff --git a/bsd/cddl/contrib/opensolaris/cmd/ztest/ztest.c b/bsd/cddl/contrib/opensolaris/cmd/ztest/ztest.c
new file mode 100644
index 0000000000000000000000000000000000000000..9c38b3f3434816d09e4da25d7696521d41349170
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/cmd/ztest/ztest.c
@@ -0,0 +1,6209 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
+ * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>.  All rights reserved.
+ */
+
+/*
+ * The objective of this program is to provide a DMU/ZAP/SPA stress test
+ * that runs entirely in userland, is easy to use, and easy to extend.
+ *
+ * The overall design of the ztest program is as follows:
+ *
+ * (1) For each major functional area (e.g. adding vdevs to a pool,
+ *     creating and destroying datasets, reading and writing objects, etc)
+ *     we have a simple routine to test that functionality.  These
+ *     individual routines do not have to do anything "stressful".
+ *
+ * (2) We turn these simple functionality tests into a stress test by
+ *     running them all in parallel, with as many threads as desired,
+ *     and spread across as many datasets, objects, and vdevs as desired.
+ *
+ * (3) While all this is happening, we inject faults into the pool to
+ *     verify that self-healing data really works.
+ *
+ * (4) Every time we open a dataset, we change its checksum and compression
+ *     functions.  Thus even individual objects vary from block to block
+ *     in which checksum they use and whether they're compressed.
+ *
+ * (5) To verify that we never lose on-disk consistency after a crash,
+ *     we run the entire test in a child of the main process.
+ *     At random times, the child self-immolates with a SIGKILL.
+ *     This is the software equivalent of pulling the power cord.
+ *     The parent then runs the test again, using the existing
+ *     storage pool, as many times as desired. If backwards compatability
+ *     testing is enabled ztest will sometimes run the "older" version
+ *     of ztest after a SIGKILL.
+ *
+ * (6) To verify that we don't have future leaks or temporal incursions,
+ *     many of the functional tests record the transaction group number
+ *     as part of their data.  When reading old data, they verify that
+ *     the transaction group number is less than the current, open txg.
+ *     If you add a new test, please do this if applicable.
+ *
+ * When run with no arguments, ztest runs for about five minutes and
+ * produces no output if successful.  To get a little bit of information,
+ * specify -V.  To get more information, specify -VV, and so on.
+ *
+ * To turn this into an overnight stress test, use -T to specify run time.
+ *
+ * You can ask more more vdevs [-v], datasets [-d], or threads [-t]
+ * to increase the pool capacity, fanout, and overall stress level.
+ *
+ * Use the -k option to set the desired frequency of kills.
+ *
+ * When ztest invokes itself it passes all relevant information through a
+ * temporary file which is mmap-ed in the child process. This allows shared
+ * memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
+ * stored at offset 0 of this file and contains information on the size and
+ * number of shared structures in the file. The information stored in this file
+ * must remain backwards compatible with older versions of ztest so that
+ * ztest can invoke them during backwards compatibility testing (-B).
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/spa.h>
+#include <sys/dmu.h>
+#include <sys/txg.h>
+#include <sys/dbuf.h>
+#include <sys/zap.h>
+#include <sys/dmu_objset.h>
+#include <sys/poll.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/wait.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/zio.h>
+#include <sys/zil.h>
+#include <sys/zil_impl.h>
+#include <sys/vdev_impl.h>
+#include <sys/vdev_file.h>
+#include <sys/spa_impl.h>
+#include <sys/metaslab_impl.h>
+#include <sys/dsl_prop.h>
+#include <sys/dsl_dataset.h>
+#include <sys/dsl_scan.h>
+#include <sys/zio_checksum.h>
+#include <sys/refcount.h>
+#include <sys/zfeature.h>
+#include <stdio.h>
+#include <stdio_ext.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <umem.h>
+#include <dlfcn.h>
+#include <ctype.h>
+#include <math.h>
+#include <errno.h>
+#include <sys/fs/zfs.h>
+#include <libnvpair.h>
+
+static int ztest_fd_data = -1;
+static int ztest_fd_rand = -1;
+
+typedef struct ztest_shared_hdr {
+	uint64_t	zh_hdr_size;
+	uint64_t	zh_opts_size;
+	uint64_t	zh_size;
+	uint64_t	zh_stats_size;
+	uint64_t	zh_stats_count;
+	uint64_t	zh_ds_size;
+	uint64_t	zh_ds_count;
+} ztest_shared_hdr_t;
+
+static ztest_shared_hdr_t *ztest_shared_hdr;
+
+typedef struct ztest_shared_opts {
+	char zo_pool[MAXNAMELEN];
+	char zo_dir[MAXNAMELEN];
+	char zo_alt_ztest[MAXNAMELEN];
+	char zo_alt_libpath[MAXNAMELEN];
+	uint64_t zo_vdevs;
+	uint64_t zo_vdevtime;
+	size_t zo_vdev_size;
+	int zo_ashift;
+	int zo_mirrors;
+	int zo_raidz;
+	int zo_raidz_parity;
+	int zo_datasets;
+	int zo_threads;
+	uint64_t zo_passtime;
+	uint64_t zo_killrate;
+	int zo_verbose;
+	int zo_init;
+	uint64_t zo_time;
+	uint64_t zo_maxloops;
+	uint64_t zo_metaslab_gang_bang;
+} ztest_shared_opts_t;
+
+static const ztest_shared_opts_t ztest_opts_defaults = {
+	.zo_pool = { 'z', 't', 'e', 's', 't', '\0' },
+	.zo_dir = { '/', 't', 'm', 'p', '\0' },
+	.zo_alt_ztest = { '\0' },
+	.zo_alt_libpath = { '\0' },
+	.zo_vdevs = 5,
+	.zo_ashift = SPA_MINBLOCKSHIFT,
+	.zo_mirrors = 2,
+	.zo_raidz = 4,
+	.zo_raidz_parity = 1,
+	.zo_vdev_size = SPA_MINDEVSIZE,
+	.zo_datasets = 7,
+	.zo_threads = 23,
+	.zo_passtime = 60,		/* 60 seconds */
+	.zo_killrate = 70,		/* 70% kill rate */
+	.zo_verbose = 0,
+	.zo_init = 1,
+	.zo_time = 300,			/* 5 minutes */
+	.zo_maxloops = 50,		/* max loops during spa_freeze() */
+	.zo_metaslab_gang_bang = 32 << 10
+};
+
+extern uint64_t metaslab_gang_bang;
+extern uint64_t metaslab_df_alloc_threshold;
+
+static ztest_shared_opts_t *ztest_shared_opts;
+static ztest_shared_opts_t ztest_opts;
+
+typedef struct ztest_shared_ds {
+	uint64_t	zd_seq;
+} ztest_shared_ds_t;
+
+static ztest_shared_ds_t *ztest_shared_ds;
+#define	ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
+
+#define	BT_MAGIC	0x123456789abcdefULL
+#define	MAXFAULTS() \
+	(MAX(zs->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
+
+enum ztest_io_type {
+	ZTEST_IO_WRITE_TAG,
+	ZTEST_IO_WRITE_PATTERN,
+	ZTEST_IO_WRITE_ZEROES,
+	ZTEST_IO_TRUNCATE,
+	ZTEST_IO_SETATTR,
+	ZTEST_IO_REWRITE,
+	ZTEST_IO_TYPES
+};
+
+typedef struct ztest_block_tag {
+	uint64_t	bt_magic;
+	uint64_t	bt_objset;
+	uint64_t	bt_object;
+	uint64_t	bt_offset;
+	uint64_t	bt_gen;
+	uint64_t	bt_txg;
+	uint64_t	bt_crtxg;
+} ztest_block_tag_t;
+
+typedef struct bufwad {
+	uint64_t	bw_index;
+	uint64_t	bw_txg;
+	uint64_t	bw_data;
+} bufwad_t;
+
+/*
+ * XXX -- fix zfs range locks to be generic so we can use them here.
+ */
+typedef enum {
+	RL_READER,
+	RL_WRITER,
+	RL_APPEND
+} rl_type_t;
+
+typedef struct rll {
+	void		*rll_writer;
+	int		rll_readers;
+	mutex_t		rll_lock;
+	cond_t		rll_cv;
+} rll_t;
+
+typedef struct rl {
+	uint64_t	rl_object;
+	uint64_t	rl_offset;
+	uint64_t	rl_size;
+	rll_t		*rl_lock;
+} rl_t;
+
+#define	ZTEST_RANGE_LOCKS	64
+#define	ZTEST_OBJECT_LOCKS	64
+
+/*
+ * Object descriptor.  Used as a template for object lookup/create/remove.
+ */
+typedef struct ztest_od {
+	uint64_t	od_dir;
+	uint64_t	od_object;
+	dmu_object_type_t od_type;
+	dmu_object_type_t od_crtype;
+	uint64_t	od_blocksize;
+	uint64_t	od_crblocksize;
+	uint64_t	od_gen;
+	uint64_t	od_crgen;
+	char		od_name[MAXNAMELEN];
+} ztest_od_t;
+
+/*
+ * Per-dataset state.
+ */
+typedef struct ztest_ds {
+	ztest_shared_ds_t *zd_shared;
+	objset_t	*zd_os;
+	rwlock_t	zd_zilog_lock;
+	zilog_t		*zd_zilog;
+	ztest_od_t	*zd_od;		/* debugging aid */
+	char		zd_name[MAXNAMELEN];
+	mutex_t		zd_dirobj_lock;
+	rll_t		zd_object_lock[ZTEST_OBJECT_LOCKS];
+	rll_t		zd_range_lock[ZTEST_RANGE_LOCKS];
+} ztest_ds_t;
+
+/*
+ * Per-iteration state.
+ */
+typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
+
+typedef struct ztest_info {
+	ztest_func_t	*zi_func;	/* test function */
+	uint64_t	zi_iters;	/* iterations per execution */
+	uint64_t	*zi_interval;	/* execute every <interval> seconds */
+} ztest_info_t;
+
+typedef struct ztest_shared_callstate {
+	uint64_t	zc_count;	/* per-pass count */
+	uint64_t	zc_time;	/* per-pass time */
+	uint64_t	zc_next;	/* next time to call this function */
+} ztest_shared_callstate_t;
+
+static ztest_shared_callstate_t *ztest_shared_callstate;
+#define	ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
+
+/*
+ * Note: these aren't static because we want dladdr() to work.
+ */
+ztest_func_t ztest_dmu_read_write;
+ztest_func_t ztest_dmu_write_parallel;
+ztest_func_t ztest_dmu_object_alloc_free;
+ztest_func_t ztest_dmu_commit_callbacks;
+ztest_func_t ztest_zap;
+ztest_func_t ztest_zap_parallel;
+ztest_func_t ztest_zil_commit;
+ztest_func_t ztest_zil_remount;
+ztest_func_t ztest_dmu_read_write_zcopy;
+ztest_func_t ztest_dmu_objset_create_destroy;
+ztest_func_t ztest_dmu_prealloc;
+ztest_func_t ztest_fzap;
+ztest_func_t ztest_dmu_snapshot_create_destroy;
+ztest_func_t ztest_dsl_prop_get_set;
+ztest_func_t ztest_spa_prop_get_set;
+ztest_func_t ztest_spa_create_destroy;
+ztest_func_t ztest_fault_inject;
+ztest_func_t ztest_ddt_repair;
+ztest_func_t ztest_dmu_snapshot_hold;
+ztest_func_t ztest_spa_rename;
+ztest_func_t ztest_scrub;
+ztest_func_t ztest_dsl_dataset_promote_busy;
+ztest_func_t ztest_vdev_attach_detach;
+ztest_func_t ztest_vdev_LUN_growth;
+ztest_func_t ztest_vdev_add_remove;
+ztest_func_t ztest_vdev_aux_add_remove;
+ztest_func_t ztest_split_pool;
+ztest_func_t ztest_reguid;
+ztest_func_t ztest_spa_upgrade;
+
+uint64_t zopt_always = 0ULL * NANOSEC;		/* all the time */
+uint64_t zopt_incessant = 1ULL * NANOSEC / 10;	/* every 1/10 second */
+uint64_t zopt_often = 1ULL * NANOSEC;		/* every second */
+uint64_t zopt_sometimes = 10ULL * NANOSEC;	/* every 10 seconds */
+uint64_t zopt_rarely = 60ULL * NANOSEC;		/* every 60 seconds */
+
+ztest_info_t ztest_info[] = {
+	{ ztest_dmu_read_write,			1,	&zopt_always	},
+	{ ztest_dmu_write_parallel,		10,	&zopt_always	},
+	{ ztest_dmu_object_alloc_free,		1,	&zopt_always	},
+	{ ztest_dmu_commit_callbacks,		1,	&zopt_always	},
+	{ ztest_zap,				30,	&zopt_always	},
+	{ ztest_zap_parallel,			100,	&zopt_always	},
+	{ ztest_split_pool,			1,	&zopt_always	},
+	{ ztest_zil_commit,			1,	&zopt_incessant	},
+	{ ztest_zil_remount,			1,	&zopt_sometimes	},
+	{ ztest_dmu_read_write_zcopy,		1,	&zopt_often	},
+	{ ztest_dmu_objset_create_destroy,	1,	&zopt_often	},
+	{ ztest_dsl_prop_get_set,		1,	&zopt_often	},
+	{ ztest_spa_prop_get_set,		1,	&zopt_sometimes	},
+#if 0
+	{ ztest_dmu_prealloc,			1,	&zopt_sometimes	},
+#endif
+	{ ztest_fzap,				1,	&zopt_sometimes	},
+	{ ztest_dmu_snapshot_create_destroy,	1,	&zopt_sometimes	},
+	{ ztest_spa_create_destroy,		1,	&zopt_sometimes	},
+	{ ztest_fault_inject,			1,	&zopt_sometimes	},
+	{ ztest_ddt_repair,			1,	&zopt_sometimes	},
+	{ ztest_dmu_snapshot_hold,		1,	&zopt_sometimes	},
+	{ ztest_reguid,				1,	&zopt_sometimes },
+	{ ztest_spa_rename,			1,	&zopt_rarely	},
+	{ ztest_scrub,				1,	&zopt_rarely	},
+	{ ztest_spa_upgrade,			1,	&zopt_rarely	},
+	{ ztest_dsl_dataset_promote_busy,	1,	&zopt_rarely	},
+	{ ztest_vdev_attach_detach,		1,	&zopt_rarely	},
+	{ ztest_vdev_LUN_growth,		1,	&zopt_rarely	},
+	{ ztest_vdev_add_remove,		1,
+	    &ztest_opts.zo_vdevtime				},
+	{ ztest_vdev_aux_add_remove,		1,
+	    &ztest_opts.zo_vdevtime				},
+};
+
+#define	ZTEST_FUNCS	(sizeof (ztest_info) / sizeof (ztest_info_t))
+
+/*
+ * The following struct is used to hold a list of uncalled commit callbacks.
+ * The callbacks are ordered by txg number.
+ */
+typedef struct ztest_cb_list {
+	mutex_t	zcl_callbacks_lock;
+	list_t	zcl_callbacks;
+} ztest_cb_list_t;
+
+/*
+ * Stuff we need to share writably between parent and child.
+ */
+typedef struct ztest_shared {
+	boolean_t	zs_do_init;
+	hrtime_t	zs_proc_start;
+	hrtime_t	zs_proc_stop;
+	hrtime_t	zs_thread_start;
+	hrtime_t	zs_thread_stop;
+	hrtime_t	zs_thread_kill;
+	uint64_t	zs_enospc_count;
+	uint64_t	zs_vdev_next_leaf;
+	uint64_t	zs_vdev_aux;
+	uint64_t	zs_alloc;
+	uint64_t	zs_space;
+	uint64_t	zs_splits;
+	uint64_t	zs_mirrors;
+	uint64_t	zs_metaslab_sz;
+	uint64_t	zs_metaslab_df_alloc_threshold;
+	uint64_t	zs_guid;
+} ztest_shared_t;
+
+#define	ID_PARALLEL	-1ULL
+
+static char ztest_dev_template[] = "%s/%s.%llua";
+static char ztest_aux_template[] = "%s/%s.%s.%llu";
+ztest_shared_t *ztest_shared;
+
+static spa_t *ztest_spa = NULL;
+static ztest_ds_t *ztest_ds;
+
+static mutex_t ztest_vdev_lock;
+
+/*
+ * The ztest_name_lock protects the pool and dataset namespace used by
+ * the individual tests. To modify the namespace, consumers must grab
+ * this lock as writer. Grabbing the lock as reader will ensure that the
+ * namespace does not change while the lock is held.
+ */
+static rwlock_t ztest_name_lock;
+
+static boolean_t ztest_dump_core = B_TRUE;
+static boolean_t ztest_exiting;
+
+/* Global commit callback list */
+static ztest_cb_list_t zcl;
+
+enum ztest_object {
+	ZTEST_META_DNODE = 0,
+	ZTEST_DIROBJ,
+	ZTEST_OBJECTS
+};
+
+static void usage(boolean_t) __NORETURN;
+
+/*
+ * These libumem hooks provide a reasonable set of defaults for the allocator's
+ * debugging facilities.
+ */
+const char *
+_umem_debug_init()
+{
+	return ("default,verbose"); /* $UMEM_DEBUG setting */
+}
+
+const char *
+_umem_logging_init(void)
+{
+	return ("fail,contents"); /* $UMEM_LOGGING setting */
+}
+
+#define	FATAL_MSG_SZ	1024
+
+char *fatal_msg;
+
+static void
+fatal(int do_perror, char *message, ...)
+{
+	va_list args;
+	int save_errno = errno;
+	char buf[FATAL_MSG_SZ];
+
+	(void) fflush(stdout);
+
+	va_start(args, message);
+	(void) sprintf(buf, "ztest: ");
+	/* LINTED */
+	(void) vsprintf(buf + strlen(buf), message, args);
+	va_end(args);
+	if (do_perror) {
+		(void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
+		    ": %s", strerror(save_errno));
+	}
+	(void) fprintf(stderr, "%s\n", buf);
+	fatal_msg = buf;			/* to ease debugging */
+	if (ztest_dump_core)
+		abort();
+	exit(3);
+}
+
+static int
+str2shift(const char *buf)
+{
+	const char *ends = "BKMGTPEZ";
+	int i;
+
+	if (buf[0] == '\0')
+		return (0);
+	for (i = 0; i < strlen(ends); i++) {
+		if (toupper(buf[0]) == ends[i])
+			break;
+	}
+	if (i == strlen(ends)) {
+		(void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
+		    buf);
+		usage(B_FALSE);
+	}
+	if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
+		return (10*i);
+	}
+	(void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
+	usage(B_FALSE);
+	/* NOTREACHED */
+}
+
+static uint64_t
+nicenumtoull(const char *buf)
+{
+	char *end;
+	uint64_t val;
+
+	val = strtoull(buf, &end, 0);
+	if (end == buf) {
+		(void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
+		usage(B_FALSE);
+	} else if (end[0] == '.') {
+		double fval = strtod(buf, &end);
+		fval *= pow(2, str2shift(end));
+		if (fval > UINT64_MAX) {
+			(void) fprintf(stderr, "ztest: value too large: %s\n",
+			    buf);
+			usage(B_FALSE);
+		}
+		val = (uint64_t)fval;
+	} else {
+		int shift = str2shift(end);
+		if (shift >= 64 || (val << shift) >> shift != val) {
+			(void) fprintf(stderr, "ztest: value too large: %s\n",
+			    buf);
+			usage(B_FALSE);
+		}
+		val <<= shift;
+	}
+	return (val);
+}
+
+static void
+usage(boolean_t requested)
+{
+	const ztest_shared_opts_t *zo = &ztest_opts_defaults;
+
+	char nice_vdev_size[10];
+	char nice_gang_bang[10];
+	FILE *fp = requested ? stdout : stderr;
+
+	nicenum(zo->zo_vdev_size, nice_vdev_size);
+	nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang);
+
+	(void) fprintf(fp, "Usage: %s\n"
+	    "\t[-v vdevs (default: %llu)]\n"
+	    "\t[-s size_of_each_vdev (default: %s)]\n"
+	    "\t[-a alignment_shift (default: %d)] use 0 for random\n"
+	    "\t[-m mirror_copies (default: %d)]\n"
+	    "\t[-r raidz_disks (default: %d)]\n"
+	    "\t[-R raidz_parity (default: %d)]\n"
+	    "\t[-d datasets (default: %d)]\n"
+	    "\t[-t threads (default: %d)]\n"
+	    "\t[-g gang_block_threshold (default: %s)]\n"
+	    "\t[-i init_count (default: %d)] initialize pool i times\n"
+	    "\t[-k kill_percentage (default: %llu%%)]\n"
+	    "\t[-p pool_name (default: %s)]\n"
+	    "\t[-f dir (default: %s)] file directory for vdev files\n"
+	    "\t[-V] verbose (use multiple times for ever more blather)\n"
+	    "\t[-E] use existing pool instead of creating new one\n"
+	    "\t[-T time (default: %llu sec)] total run time\n"
+	    "\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
+	    "\t[-P passtime (default: %llu sec)] time per pass\n"
+	    "\t[-B alt_ztest (default: <none>)] alternate ztest path\n"
+	    "\t[-h] (print help)\n"
+	    "",
+	    zo->zo_pool,
+	    (u_longlong_t)zo->zo_vdevs,			/* -v */
+	    nice_vdev_size,				/* -s */
+	    zo->zo_ashift,				/* -a */
+	    zo->zo_mirrors,				/* -m */
+	    zo->zo_raidz,				/* -r */
+	    zo->zo_raidz_parity,			/* -R */
+	    zo->zo_datasets,				/* -d */
+	    zo->zo_threads,				/* -t */
+	    nice_gang_bang,				/* -g */
+	    zo->zo_init,				/* -i */
+	    (u_longlong_t)zo->zo_killrate,		/* -k */
+	    zo->zo_pool,				/* -p */
+	    zo->zo_dir,					/* -f */
+	    (u_longlong_t)zo->zo_time,			/* -T */
+	    (u_longlong_t)zo->zo_maxloops,		/* -F */
+	    (u_longlong_t)zo->zo_passtime);
+	exit(requested ? 0 : 1);
+}
+
+static void
+process_options(int argc, char **argv)
+{
+	char *path;
+	ztest_shared_opts_t *zo = &ztest_opts;
+
+	int opt;
+	uint64_t value;
+	char altdir[MAXNAMELEN] = { 0 };
+
+	bcopy(&ztest_opts_defaults, zo, sizeof (*zo));
+
+	while ((opt = getopt(argc, argv,
+	    "v:s:a:m:r:R:d:t:g:i:k:p:f:VET:P:hF:B:")) != EOF) {
+		value = 0;
+		switch (opt) {
+		case 'v':
+		case 's':
+		case 'a':
+		case 'm':
+		case 'r':
+		case 'R':
+		case 'd':
+		case 't':
+		case 'g':
+		case 'i':
+		case 'k':
+		case 'T':
+		case 'P':
+		case 'F':
+			value = nicenumtoull(optarg);
+		}
+		switch (opt) {
+		case 'v':
+			zo->zo_vdevs = value;
+			break;
+		case 's':
+			zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value);
+			break;
+		case 'a':
+			zo->zo_ashift = value;
+			break;
+		case 'm':
+			zo->zo_mirrors = value;
+			break;
+		case 'r':
+			zo->zo_raidz = MAX(1, value);
+			break;
+		case 'R':
+			zo->zo_raidz_parity = MIN(MAX(value, 1), 3);
+			break;
+		case 'd':
+			zo->zo_datasets = MAX(1, value);
+			break;
+		case 't':
+			zo->zo_threads = MAX(1, value);
+			break;
+		case 'g':
+			zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1,
+			    value);
+			break;
+		case 'i':
+			zo->zo_init = value;
+			break;
+		case 'k':
+			zo->zo_killrate = value;
+			break;
+		case 'p':
+			(void) strlcpy(zo->zo_pool, optarg,
+			    sizeof (zo->zo_pool));
+			break;
+		case 'f':
+			path = realpath(optarg, NULL);
+			if (path == NULL) {
+				(void) fprintf(stderr, "error: %s: %s\n",
+				    optarg, strerror(errno));
+				usage(B_FALSE);
+			} else {
+				(void) strlcpy(zo->zo_dir, path,
+				    sizeof (zo->zo_dir));
+			}
+			break;
+		case 'V':
+			zo->zo_verbose++;
+			break;
+		case 'E':
+			zo->zo_init = 0;
+			break;
+		case 'T':
+			zo->zo_time = value;
+			break;
+		case 'P':
+			zo->zo_passtime = MAX(1, value);
+			break;
+		case 'F':
+			zo->zo_maxloops = MAX(1, value);
+			break;
+		case 'B':
+			(void) strlcpy(altdir, optarg, sizeof (altdir));
+			break;
+		case 'h':
+			usage(B_TRUE);
+			break;
+		case '?':
+		default:
+			usage(B_FALSE);
+			break;
+		}
+	}
+
+	zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1);
+
+	zo->zo_vdevtime =
+	    (zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
+	    UINT64_MAX >> 2);
+
+	if (strlen(altdir) > 0) {
+		char *cmd;
+		char *realaltdir;
+		char *bin;
+		char *ztest;
+		char *isa;
+		int isalen;
+
+		cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
+		realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
+
+		VERIFY(NULL != realpath(getexecname(), cmd));
+		if (0 != access(altdir, F_OK)) {
+			ztest_dump_core = B_FALSE;
+			fatal(B_TRUE, "invalid alternate ztest path: %s",
+			    altdir);
+		}
+		VERIFY(NULL != realpath(altdir, realaltdir));
+
+		/*
+		 * 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest".
+		 * We want to extract <isa> to determine if we should use
+		 * 32 or 64 bit binaries.
+		 */
+		bin = strstr(cmd, "/usr/bin/");
+		ztest = strstr(bin, "/ztest");
+		isa = bin + 9;
+		isalen = ztest - isa;
+		(void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest),
+		    "%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa);
+		(void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath),
+		    "%s/usr/lib/%.*s", realaltdir, isalen, isa);
+
+		if (0 != access(zo->zo_alt_ztest, X_OK)) {
+			ztest_dump_core = B_FALSE;
+			fatal(B_TRUE, "invalid alternate ztest: %s",
+			    zo->zo_alt_ztest);
+		} else if (0 != access(zo->zo_alt_libpath, X_OK)) {
+			ztest_dump_core = B_FALSE;
+			fatal(B_TRUE, "invalid alternate lib directory %s",
+			    zo->zo_alt_libpath);
+		}
+
+		umem_free(cmd, MAXPATHLEN);
+		umem_free(realaltdir, MAXPATHLEN);
+	}
+}
+
+static void
+ztest_kill(ztest_shared_t *zs)
+{
+	zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
+	zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
+	(void) kill(getpid(), SIGKILL);
+}
+
+static uint64_t
+ztest_random(uint64_t range)
+{
+	uint64_t r;
+
+	ASSERT3S(ztest_fd_rand, >=, 0);
+
+	if (range == 0)
+		return (0);
+
+	if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r))
+		fatal(1, "short read from /dev/urandom");
+
+	return (r % range);
+}
+
+/* ARGSUSED */
+static void
+ztest_record_enospc(const char *s)
+{
+	ztest_shared->zs_enospc_count++;
+}
+
+static uint64_t
+ztest_get_ashift(void)
+{
+	if (ztest_opts.zo_ashift == 0)
+		return (SPA_MINBLOCKSHIFT + ztest_random(3));
+	return (ztest_opts.zo_ashift);
+}
+
+static nvlist_t *
+make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift)
+{
+	char pathbuf[MAXPATHLEN];
+	uint64_t vdev;
+	nvlist_t *file;
+
+	if (ashift == 0)
+		ashift = ztest_get_ashift();
+
+	if (path == NULL) {
+		path = pathbuf;
+
+		if (aux != NULL) {
+			vdev = ztest_shared->zs_vdev_aux;
+			(void) snprintf(path, sizeof (pathbuf),
+			    ztest_aux_template, ztest_opts.zo_dir,
+			    pool == NULL ? ztest_opts.zo_pool : pool,
+			    aux, vdev);
+		} else {
+			vdev = ztest_shared->zs_vdev_next_leaf++;
+			(void) snprintf(path, sizeof (pathbuf),
+			    ztest_dev_template, ztest_opts.zo_dir,
+			    pool == NULL ? ztest_opts.zo_pool : pool, vdev);
+		}
+	}
+
+	if (size != 0) {
+		int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
+		if (fd == -1)
+			fatal(1, "can't open %s", path);
+		if (ftruncate(fd, size) != 0)
+			fatal(1, "can't ftruncate %s", path);
+		(void) close(fd);
+	}
+
+	VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
+	VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
+	VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0);
+	VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
+
+	return (file);
+}
+
+static nvlist_t *
+make_vdev_raidz(char *path, char *aux, char *pool, size_t size,
+    uint64_t ashift, int r)
+{
+	nvlist_t *raidz, **child;
+	int c;
+
+	if (r < 2)
+		return (make_vdev_file(path, aux, pool, size, ashift));
+	child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
+
+	for (c = 0; c < r; c++)
+		child[c] = make_vdev_file(path, aux, pool, size, ashift);
+
+	VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0);
+	VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE,
+	    VDEV_TYPE_RAIDZ) == 0);
+	VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY,
+	    ztest_opts.zo_raidz_parity) == 0);
+	VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN,
+	    child, r) == 0);
+
+	for (c = 0; c < r; c++)
+		nvlist_free(child[c]);
+
+	umem_free(child, r * sizeof (nvlist_t *));
+
+	return (raidz);
+}
+
+static nvlist_t *
+make_vdev_mirror(char *path, char *aux, char *pool, size_t size,
+    uint64_t ashift, int r, int m)
+{
+	nvlist_t *mirror, **child;
+	int c;
+
+	if (m < 1)
+		return (make_vdev_raidz(path, aux, pool, size, ashift, r));
+
+	child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
+
+	for (c = 0; c < m; c++)
+		child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r);
+
+	VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0);
+	VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE,
+	    VDEV_TYPE_MIRROR) == 0);
+	VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
+	    child, m) == 0);
+
+	for (c = 0; c < m; c++)
+		nvlist_free(child[c]);
+
+	umem_free(child, m * sizeof (nvlist_t *));
+
+	return (mirror);
+}
+
+static nvlist_t *
+make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift,
+    int log, int r, int m, int t)
+{
+	nvlist_t *root, **child;
+	int c;
+
+	ASSERT(t > 0);
+
+	child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
+
+	for (c = 0; c < t; c++) {
+		child[c] = make_vdev_mirror(path, aux, pool, size, ashift,
+		    r, m);
+		VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
+		    log) == 0);
+	}
+
+	VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
+	VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
+	VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
+	    child, t) == 0);
+
+	for (c = 0; c < t; c++)
+		nvlist_free(child[c]);
+
+	umem_free(child, t * sizeof (nvlist_t *));
+
+	return (root);
+}
+
+/*
+ * Find a random spa version. Returns back a random spa version in the
+ * range [initial_version, SPA_VERSION_FEATURES].
+ */
+static uint64_t
+ztest_random_spa_version(uint64_t initial_version)
+{
+	uint64_t version = initial_version;
+
+	if (version <= SPA_VERSION_BEFORE_FEATURES) {
+		version = version +
+		    ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1);
+	}
+
+	if (version > SPA_VERSION_BEFORE_FEATURES)
+		version = SPA_VERSION_FEATURES;
+
+	ASSERT(SPA_VERSION_IS_SUPPORTED(version));
+	return (version);
+}
+
+static int
+ztest_random_blocksize(void)
+{
+	return (1 << (SPA_MINBLOCKSHIFT +
+	    ztest_random(SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1)));
+}
+
+static int
+ztest_random_ibshift(void)
+{
+	return (DN_MIN_INDBLKSHIFT +
+	    ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
+}
+
+static uint64_t
+ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
+{
+	uint64_t top;
+	vdev_t *rvd = spa->spa_root_vdev;
+	vdev_t *tvd;
+
+	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
+
+	do {
+		top = ztest_random(rvd->vdev_children);
+		tvd = rvd->vdev_child[top];
+	} while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) ||
+	    tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
+
+	return (top);
+}
+
+static uint64_t
+ztest_random_dsl_prop(zfs_prop_t prop)
+{
+	uint64_t value;
+
+	do {
+		value = zfs_prop_random_value(prop, ztest_random(-1ULL));
+	} while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
+
+	return (value);
+}
+
+static int
+ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
+    boolean_t inherit)
+{
+	const char *propname = zfs_prop_to_name(prop);
+	const char *valname;
+	char setpoint[MAXPATHLEN];
+	uint64_t curval;
+	int error;
+
+	error = dsl_prop_set(osname, propname,
+	    (inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL),
+	    sizeof (value), 1, &value);
+
+	if (error == ENOSPC) {
+		ztest_record_enospc(FTAG);
+		return (error);
+	}
+	ASSERT0(error);
+
+	VERIFY3U(dsl_prop_get(osname, propname, sizeof (curval),
+	    1, &curval, setpoint), ==, 0);
+
+	if (ztest_opts.zo_verbose >= 6) {
+		VERIFY(zfs_prop_index_to_string(prop, curval, &valname) == 0);
+		(void) printf("%s %s = %s at '%s'\n",
+		    osname, propname, valname, setpoint);
+	}
+
+	return (error);
+}
+
+static int
+ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
+{
+	spa_t *spa = ztest_spa;
+	nvlist_t *props = NULL;
+	int error;
+
+	VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
+	VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0);
+
+	error = spa_prop_set(spa, props);
+
+	nvlist_free(props);
+
+	if (error == ENOSPC) {
+		ztest_record_enospc(FTAG);
+		return (error);
+	}
+	ASSERT0(error);
+
+	return (error);
+}
+
+static void
+ztest_rll_init(rll_t *rll)
+{
+	rll->rll_writer = NULL;
+	rll->rll_readers = 0;
+	VERIFY(_mutex_init(&rll->rll_lock, USYNC_THREAD, NULL) == 0);
+	VERIFY(cond_init(&rll->rll_cv, USYNC_THREAD, NULL) == 0);
+}
+
+static void
+ztest_rll_destroy(rll_t *rll)
+{
+	ASSERT(rll->rll_writer == NULL);
+	ASSERT(rll->rll_readers == 0);
+	VERIFY(_mutex_destroy(&rll->rll_lock) == 0);
+	VERIFY(cond_destroy(&rll->rll_cv) == 0);
+}
+
+static void
+ztest_rll_lock(rll_t *rll, rl_type_t type)
+{
+	VERIFY(mutex_lock(&rll->rll_lock) == 0);
+
+	if (type == RL_READER) {
+		while (rll->rll_writer != NULL)
+			(void) cond_wait(&rll->rll_cv, &rll->rll_lock);
+		rll->rll_readers++;
+	} else {
+		while (rll->rll_writer != NULL || rll->rll_readers)
+			(void) cond_wait(&rll->rll_cv, &rll->rll_lock);
+		rll->rll_writer = curthread;
+	}
+
+	VERIFY(mutex_unlock(&rll->rll_lock) == 0);
+}
+
+static void
+ztest_rll_unlock(rll_t *rll)
+{
+	VERIFY(mutex_lock(&rll->rll_lock) == 0);
+
+	if (rll->rll_writer) {
+		ASSERT(rll->rll_readers == 0);
+		rll->rll_writer = NULL;
+	} else {
+		ASSERT(rll->rll_readers != 0);
+		ASSERT(rll->rll_writer == NULL);
+		rll->rll_readers--;
+	}
+
+	if (rll->rll_writer == NULL && rll->rll_readers == 0)
+		VERIFY(cond_broadcast(&rll->rll_cv) == 0);
+
+	VERIFY(mutex_unlock(&rll->rll_lock) == 0);
+}
+
+static void
+ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
+{
+	rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
+
+	ztest_rll_lock(rll, type);
+}
+
+static void
+ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
+{
+	rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
+
+	ztest_rll_unlock(rll);
+}
+
+static rl_t *
+ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
+    uint64_t size, rl_type_t type)
+{
+	uint64_t hash = object ^ (offset % (ZTEST_RANGE_LOCKS + 1));
+	rll_t *rll = &zd->zd_range_lock[hash & (ZTEST_RANGE_LOCKS - 1)];
+	rl_t *rl;
+
+	rl = umem_alloc(sizeof (*rl), UMEM_NOFAIL);
+	rl->rl_object = object;
+	rl->rl_offset = offset;
+	rl->rl_size = size;
+	rl->rl_lock = rll;
+
+	ztest_rll_lock(rll, type);
+
+	return (rl);
+}
+
+static void
+ztest_range_unlock(rl_t *rl)
+{
+	rll_t *rll = rl->rl_lock;
+
+	ztest_rll_unlock(rll);
+
+	umem_free(rl, sizeof (*rl));
+}
+
+static void
+ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
+{
+	zd->zd_os = os;
+	zd->zd_zilog = dmu_objset_zil(os);
+	zd->zd_shared = szd;
+	dmu_objset_name(os, zd->zd_name);
+
+	if (zd->zd_shared != NULL)
+		zd->zd_shared->zd_seq = 0;
+
+	VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0);
+	VERIFY(_mutex_init(&zd->zd_dirobj_lock, USYNC_THREAD, NULL) == 0);
+
+	for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
+		ztest_rll_init(&zd->zd_object_lock[l]);
+
+	for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
+		ztest_rll_init(&zd->zd_range_lock[l]);
+}
+
+static void
+ztest_zd_fini(ztest_ds_t *zd)
+{
+	VERIFY(_mutex_destroy(&zd->zd_dirobj_lock) == 0);
+
+	for (int l = 0; l < ZTEST_OBJECT_LOCKS; l++)
+		ztest_rll_destroy(&zd->zd_object_lock[l]);
+
+	for (int l = 0; l < ZTEST_RANGE_LOCKS; l++)
+		ztest_rll_destroy(&zd->zd_range_lock[l]);
+}
+
+#define	TXG_MIGHTWAIT	(ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
+
+static uint64_t
+ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
+{
+	uint64_t txg;
+	int error;
+
+	/*
+	 * Attempt to assign tx to some transaction group.
+	 */
+	error = dmu_tx_assign(tx, txg_how);
+	if (error) {
+		if (error == ERESTART) {
+			ASSERT(txg_how == TXG_NOWAIT);
+			dmu_tx_wait(tx);
+		} else {
+			ASSERT3U(error, ==, ENOSPC);
+			ztest_record_enospc(tag);
+		}
+		dmu_tx_abort(tx);
+		return (0);
+	}
+	txg = dmu_tx_get_txg(tx);
+	ASSERT(txg != 0);
+	return (txg);
+}
+
+static void
+ztest_pattern_set(void *buf, uint64_t size, uint64_t value)
+{
+	uint64_t *ip = buf;
+	uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
+
+	while (ip < ip_end)
+		*ip++ = value;
+}
+
+static boolean_t
+ztest_pattern_match(void *buf, uint64_t size, uint64_t value)
+{
+	uint64_t *ip = buf;
+	uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
+	uint64_t diff = 0;
+
+	while (ip < ip_end)
+		diff |= (value - *ip++);
+
+	return (diff == 0);
+}
+
+static void
+ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
+    uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
+{
+	bt->bt_magic = BT_MAGIC;
+	bt->bt_objset = dmu_objset_id(os);
+	bt->bt_object = object;
+	bt->bt_offset = offset;
+	bt->bt_gen = gen;
+	bt->bt_txg = txg;
+	bt->bt_crtxg = crtxg;
+}
+
+static void
+ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
+    uint64_t offset, uint64_t gen, uint64_t txg, uint64_t crtxg)
+{
+	ASSERT(bt->bt_magic == BT_MAGIC);
+	ASSERT(bt->bt_objset == dmu_objset_id(os));
+	ASSERT(bt->bt_object == object);
+	ASSERT(bt->bt_offset == offset);
+	ASSERT(bt->bt_gen <= gen);
+	ASSERT(bt->bt_txg <= txg);
+	ASSERT(bt->bt_crtxg == crtxg);
+}
+
+static ztest_block_tag_t *
+ztest_bt_bonus(dmu_buf_t *db)
+{
+	dmu_object_info_t doi;
+	ztest_block_tag_t *bt;
+
+	dmu_object_info_from_db(db, &doi);
+	ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
+	ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
+	bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
+
+	return (bt);
+}
+
+/*
+ * ZIL logging ops
+ */
+
+#define	lrz_type	lr_mode
+#define	lrz_blocksize	lr_uid
+#define	lrz_ibshift	lr_gid
+#define	lrz_bonustype	lr_rdev
+#define	lrz_bonuslen	lr_crtime[1]
+
+static void
+ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
+{
+	char *name = (void *)(lr + 1);		/* name follows lr */
+	size_t namesize = strlen(name) + 1;
+	itx_t *itx;
+
+	if (zil_replaying(zd->zd_zilog, tx))
+		return;
+
+	itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
+	bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
+	    sizeof (*lr) + namesize - sizeof (lr_t));
+
+	zil_itx_assign(zd->zd_zilog, itx, tx);
+}
+
+static void
+ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
+{
+	char *name = (void *)(lr + 1);		/* name follows lr */
+	size_t namesize = strlen(name) + 1;
+	itx_t *itx;
+
+	if (zil_replaying(zd->zd_zilog, tx))
+		return;
+
+	itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
+	bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
+	    sizeof (*lr) + namesize - sizeof (lr_t));
+
+	itx->itx_oid = object;
+	zil_itx_assign(zd->zd_zilog, itx, tx);
+}
+
+static void
+ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
+{
+	itx_t *itx;
+	itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
+
+	if (zil_replaying(zd->zd_zilog, tx))
+		return;
+
+	if (lr->lr_length > ZIL_MAX_LOG_DATA)
+		write_state = WR_INDIRECT;
+
+	itx = zil_itx_create(TX_WRITE,
+	    sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
+
+	if (write_state == WR_COPIED &&
+	    dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
+	    ((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
+		zil_itx_destroy(itx);
+		itx = zil_itx_create(TX_WRITE, sizeof (*lr));
+		write_state = WR_NEED_COPY;
+	}
+	itx->itx_private = zd;
+	itx->itx_wr_state = write_state;
+	itx->itx_sync = (ztest_random(8) == 0);
+	itx->itx_sod += (write_state == WR_NEED_COPY ? lr->lr_length : 0);
+
+	bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
+	    sizeof (*lr) - sizeof (lr_t));
+
+	zil_itx_assign(zd->zd_zilog, itx, tx);
+}
+
+static void
+ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
+{
+	itx_t *itx;
+
+	if (zil_replaying(zd->zd_zilog, tx))
+		return;
+
+	itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
+	bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
+	    sizeof (*lr) - sizeof (lr_t));
+
+	itx->itx_sync = B_FALSE;
+	zil_itx_assign(zd->zd_zilog, itx, tx);
+}
+
+static void
+ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
+{
+	itx_t *itx;
+
+	if (zil_replaying(zd->zd_zilog, tx))
+		return;
+
+	itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
+	bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
+	    sizeof (*lr) - sizeof (lr_t));
+
+	itx->itx_sync = B_FALSE;
+	zil_itx_assign(zd->zd_zilog, itx, tx);
+}
+
+/*
+ * ZIL replay ops
+ */
+static int
+ztest_replay_create(ztest_ds_t *zd, lr_create_t *lr, boolean_t byteswap)
+{
+	char *name = (void *)(lr + 1);		/* name follows lr */
+	objset_t *os = zd->zd_os;
+	ztest_block_tag_t *bbt;
+	dmu_buf_t *db;
+	dmu_tx_t *tx;
+	uint64_t txg;
+	int error = 0;
+
+	if (byteswap)
+		byteswap_uint64_array(lr, sizeof (*lr));
+
+	ASSERT(lr->lr_doid == ZTEST_DIROBJ);
+	ASSERT(name[0] != '\0');
+
+	tx = dmu_tx_create(os);
+
+	dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
+
+	if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
+		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
+	} else {
+		dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
+	}
+
+	txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
+	if (txg == 0)
+		return (ENOSPC);
+
+	ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid);
+
+	if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
+		if (lr->lr_foid == 0) {
+			lr->lr_foid = zap_create(os,
+			    lr->lrz_type, lr->lrz_bonustype,
+			    lr->lrz_bonuslen, tx);
+		} else {
+			error = zap_create_claim(os, lr->lr_foid,
+			    lr->lrz_type, lr->lrz_bonustype,
+			    lr->lrz_bonuslen, tx);
+		}
+	} else {
+		if (lr->lr_foid == 0) {
+			lr->lr_foid = dmu_object_alloc(os,
+			    lr->lrz_type, 0, lr->lrz_bonustype,
+			    lr->lrz_bonuslen, tx);
+		} else {
+			error = dmu_object_claim(os, lr->lr_foid,
+			    lr->lrz_type, 0, lr->lrz_bonustype,
+			    lr->lrz_bonuslen, tx);
+		}
+	}
+
+	if (error) {
+		ASSERT3U(error, ==, EEXIST);
+		ASSERT(zd->zd_zilog->zl_replay);
+		dmu_tx_commit(tx);
+		return (error);
+	}
+
+	ASSERT(lr->lr_foid != 0);
+
+	if (lr->lrz_type != DMU_OT_ZAP_OTHER)
+		VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid,
+		    lr->lrz_blocksize, lr->lrz_ibshift, tx));
+
+	VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
+	bbt = ztest_bt_bonus(db);
+	dmu_buf_will_dirty(db, tx);
+	ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_gen, txg, txg);
+	dmu_buf_rele(db, FTAG);
+
+	VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
+	    &lr->lr_foid, tx));
+
+	(void) ztest_log_create(zd, tx, lr);
+
+	dmu_tx_commit(tx);
+
+	return (0);
+}
+
+static int
+ztest_replay_remove(ztest_ds_t *zd, lr_remove_t *lr, boolean_t byteswap)
+{
+	char *name = (void *)(lr + 1);		/* name follows lr */
+	objset_t *os = zd->zd_os;
+	dmu_object_info_t doi;
+	dmu_tx_t *tx;
+	uint64_t object, txg;
+
+	if (byteswap)
+		byteswap_uint64_array(lr, sizeof (*lr));
+
+	ASSERT(lr->lr_doid == ZTEST_DIROBJ);
+	ASSERT(name[0] != '\0');
+
+	VERIFY3U(0, ==,
+	    zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
+	ASSERT(object != 0);
+
+	ztest_object_lock(zd, object, RL_WRITER);
+
+	VERIFY3U(0, ==, dmu_object_info(os, object, &doi));
+
+	tx = dmu_tx_create(os);
+
+	dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
+	dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
+
+	txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
+	if (txg == 0) {
+		ztest_object_unlock(zd, object);
+		return (ENOSPC);
+	}
+
+	if (doi.doi_type == DMU_OT_ZAP_OTHER) {
+		VERIFY3U(0, ==, zap_destroy(os, object, tx));
+	} else {
+		VERIFY3U(0, ==, dmu_object_free(os, object, tx));
+	}
+
+	VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx));
+
+	(void) ztest_log_remove(zd, tx, lr, object);
+
+	dmu_tx_commit(tx);
+
+	ztest_object_unlock(zd, object);
+
+	return (0);
+}
+
+static int
+ztest_replay_write(ztest_ds_t *zd, lr_write_t *lr, boolean_t byteswap)
+{
+	objset_t *os = zd->zd_os;
+	void *data = lr + 1;			/* data follows lr */
+	uint64_t offset, length;
+	ztest_block_tag_t *bt = data;
+	ztest_block_tag_t *bbt;
+	uint64_t gen, txg, lrtxg, crtxg;
+	dmu_object_info_t doi;
+	dmu_tx_t *tx;
+	dmu_buf_t *db;
+	arc_buf_t *abuf = NULL;
+	rl_t *rl;
+
+	if (byteswap)
+		byteswap_uint64_array(lr, sizeof (*lr));
+
+	offset = lr->lr_offset;
+	length = lr->lr_length;
+
+	/* If it's a dmu_sync() block, write the whole block */
+	if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
+		uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
+		if (length < blocksize) {
+			offset -= offset % blocksize;
+			length = blocksize;
+		}
+	}
+
+	if (bt->bt_magic == BSWAP_64(BT_MAGIC))
+		byteswap_uint64_array(bt, sizeof (*bt));
+
+	if (bt->bt_magic != BT_MAGIC)
+		bt = NULL;
+
+	ztest_object_lock(zd, lr->lr_foid, RL_READER);
+	rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
+
+	VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
+
+	dmu_object_info_from_db(db, &doi);
+
+	bbt = ztest_bt_bonus(db);
+	ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
+	gen = bbt->bt_gen;
+	crtxg = bbt->bt_crtxg;
+	lrtxg = lr->lr_common.lrc_txg;
+
+	tx = dmu_tx_create(os);
+
+	dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
+
+	if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
+	    P2PHASE(offset, length) == 0)
+		abuf = dmu_request_arcbuf(db, length);
+
+	txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
+	if (txg == 0) {
+		if (abuf != NULL)
+			dmu_return_arcbuf(abuf);
+		dmu_buf_rele(db, FTAG);
+		ztest_range_unlock(rl);
+		ztest_object_unlock(zd, lr->lr_foid);
+		return (ENOSPC);
+	}
+
+	if (bt != NULL) {
+		/*
+		 * Usually, verify the old data before writing new data --
+		 * but not always, because we also want to verify correct
+		 * behavior when the data was not recently read into cache.
+		 */
+		ASSERT(offset % doi.doi_data_block_size == 0);
+		if (ztest_random(4) != 0) {
+			int prefetch = ztest_random(2) ?
+			    DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
+			ztest_block_tag_t rbt;
+
+			VERIFY(dmu_read(os, lr->lr_foid, offset,
+			    sizeof (rbt), &rbt, prefetch) == 0);
+			if (rbt.bt_magic == BT_MAGIC) {
+				ztest_bt_verify(&rbt, os, lr->lr_foid,
+				    offset, gen, txg, crtxg);
+			}
+		}
+
+		/*
+		 * Writes can appear to be newer than the bonus buffer because
+		 * the ztest_get_data() callback does a dmu_read() of the
+		 * open-context data, which may be different than the data
+		 * as it was when the write was generated.
+		 */
+		if (zd->zd_zilog->zl_replay) {
+			ztest_bt_verify(bt, os, lr->lr_foid, offset,
+			    MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
+			    bt->bt_crtxg);
+		}
+
+		/*
+		 * Set the bt's gen/txg to the bonus buffer's gen/txg
+		 * so that all of the usual ASSERTs will work.
+		 */
+		ztest_bt_generate(bt, os, lr->lr_foid, offset, gen, txg, crtxg);
+	}
+
+	if (abuf == NULL) {
+		dmu_write(os, lr->lr_foid, offset, length, data, tx);
+	} else {
+		bcopy(data, abuf->b_data, length);
+		dmu_assign_arcbuf(db, offset, abuf, tx);
+	}
+
+	(void) ztest_log_write(zd, tx, lr);
+
+	dmu_buf_rele(db, FTAG);
+
+	dmu_tx_commit(tx);
+
+	ztest_range_unlock(rl);
+	ztest_object_unlock(zd, lr->lr_foid);
+
+	return (0);
+}
+
+static int
+ztest_replay_truncate(ztest_ds_t *zd, lr_truncate_t *lr, boolean_t byteswap)
+{
+	objset_t *os = zd->zd_os;
+	dmu_tx_t *tx;
+	uint64_t txg;
+	rl_t *rl;
+
+	if (byteswap)
+		byteswap_uint64_array(lr, sizeof (*lr));
+
+	ztest_object_lock(zd, lr->lr_foid, RL_READER);
+	rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
+	    RL_WRITER);
+
+	tx = dmu_tx_create(os);
+
+	dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
+
+	txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
+	if (txg == 0) {
+		ztest_range_unlock(rl);
+		ztest_object_unlock(zd, lr->lr_foid);
+		return (ENOSPC);
+	}
+
+	VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
+	    lr->lr_length, tx) == 0);
+
+	(void) ztest_log_truncate(zd, tx, lr);
+
+	dmu_tx_commit(tx);
+
+	ztest_range_unlock(rl);
+	ztest_object_unlock(zd, lr->lr_foid);
+
+	return (0);
+}
+
+static int
+ztest_replay_setattr(ztest_ds_t *zd, lr_setattr_t *lr, boolean_t byteswap)
+{
+	objset_t *os = zd->zd_os;
+	dmu_tx_t *tx;
+	dmu_buf_t *db;
+	ztest_block_tag_t *bbt;
+	uint64_t txg, lrtxg, crtxg;
+
+	if (byteswap)
+		byteswap_uint64_array(lr, sizeof (*lr));
+
+	ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
+
+	VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
+
+	tx = dmu_tx_create(os);
+	dmu_tx_hold_bonus(tx, lr->lr_foid);
+
+	txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
+	if (txg == 0) {
+		dmu_buf_rele(db, FTAG);
+		ztest_object_unlock(zd, lr->lr_foid);
+		return (ENOSPC);
+	}
+
+	bbt = ztest_bt_bonus(db);
+	ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
+	crtxg = bbt->bt_crtxg;
+	lrtxg = lr->lr_common.lrc_txg;
+
+	if (zd->zd_zilog->zl_replay) {
+		ASSERT(lr->lr_size != 0);
+		ASSERT(lr->lr_mode != 0);
+		ASSERT(lrtxg != 0);
+	} else {
+		/*
+		 * Randomly change the size and increment the generation.
+		 */
+		lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
+		    sizeof (*bbt);
+		lr->lr_mode = bbt->bt_gen + 1;
+		ASSERT(lrtxg == 0);
+	}
+
+	/*
+	 * Verify that the current bonus buffer is not newer than our txg.
+	 */
+	ztest_bt_verify(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode,
+	    MAX(txg, lrtxg), crtxg);
+
+	dmu_buf_will_dirty(db, tx);
+
+	ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
+	ASSERT3U(lr->lr_size, <=, db->db_size);
+	VERIFY0(dmu_set_bonus(db, lr->lr_size, tx));
+	bbt = ztest_bt_bonus(db);
+
+	ztest_bt_generate(bbt, os, lr->lr_foid, -1ULL, lr->lr_mode, txg, crtxg);
+
+	dmu_buf_rele(db, FTAG);
+
+	(void) ztest_log_setattr(zd, tx, lr);
+
+	dmu_tx_commit(tx);
+
+	ztest_object_unlock(zd, lr->lr_foid);
+
+	return (0);
+}
+
+zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
+	NULL,			/* 0 no such transaction type */
+	ztest_replay_create,	/* TX_CREATE */
+	NULL,			/* TX_MKDIR */
+	NULL,			/* TX_MKXATTR */
+	NULL,			/* TX_SYMLINK */
+	ztest_replay_remove,	/* TX_REMOVE */
+	NULL,			/* TX_RMDIR */
+	NULL,			/* TX_LINK */
+	NULL,			/* TX_RENAME */
+	ztest_replay_write,	/* TX_WRITE */
+	ztest_replay_truncate,	/* TX_TRUNCATE */
+	ztest_replay_setattr,	/* TX_SETATTR */
+	NULL,			/* TX_ACL */
+	NULL,			/* TX_CREATE_ACL */
+	NULL,			/* TX_CREATE_ATTR */
+	NULL,			/* TX_CREATE_ACL_ATTR */
+	NULL,			/* TX_MKDIR_ACL */
+	NULL,			/* TX_MKDIR_ATTR */
+	NULL,			/* TX_MKDIR_ACL_ATTR */
+	NULL,			/* TX_WRITE2 */
+};
+
+/*
+ * ZIL get_data callbacks
+ */
+
+static void
+ztest_get_done(zgd_t *zgd, int error)
+{
+	ztest_ds_t *zd = zgd->zgd_private;
+	uint64_t object = zgd->zgd_rl->rl_object;
+
+	if (zgd->zgd_db)
+		dmu_buf_rele(zgd->zgd_db, zgd);
+
+	ztest_range_unlock(zgd->zgd_rl);
+	ztest_object_unlock(zd, object);
+
+	if (error == 0 && zgd->zgd_bp)
+		zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
+
+	umem_free(zgd, sizeof (*zgd));
+}
+
+static int
+ztest_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
+{
+	ztest_ds_t *zd = arg;
+	objset_t *os = zd->zd_os;
+	uint64_t object = lr->lr_foid;
+	uint64_t offset = lr->lr_offset;
+	uint64_t size = lr->lr_length;
+	blkptr_t *bp = &lr->lr_blkptr;
+	uint64_t txg = lr->lr_common.lrc_txg;
+	uint64_t crtxg;
+	dmu_object_info_t doi;
+	dmu_buf_t *db;
+	zgd_t *zgd;
+	int error;
+
+	ztest_object_lock(zd, object, RL_READER);
+	error = dmu_bonus_hold(os, object, FTAG, &db);
+	if (error) {
+		ztest_object_unlock(zd, object);
+		return (error);
+	}
+
+	crtxg = ztest_bt_bonus(db)->bt_crtxg;
+
+	if (crtxg == 0 || crtxg > txg) {
+		dmu_buf_rele(db, FTAG);
+		ztest_object_unlock(zd, object);
+		return (ENOENT);
+	}
+
+	dmu_object_info_from_db(db, &doi);
+	dmu_buf_rele(db, FTAG);
+	db = NULL;
+
+	zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
+	zgd->zgd_zilog = zd->zd_zilog;
+	zgd->zgd_private = zd;
+
+	if (buf != NULL) {	/* immediate write */
+		zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
+		    RL_READER);
+
+		error = dmu_read(os, object, offset, size, buf,
+		    DMU_READ_NO_PREFETCH);
+		ASSERT(error == 0);
+	} else {
+		size = doi.doi_data_block_size;
+		if (ISP2(size)) {
+			offset = P2ALIGN(offset, size);
+		} else {
+			ASSERT(offset < size);
+			offset = 0;
+		}
+
+		zgd->zgd_rl = ztest_range_lock(zd, object, offset, size,
+		    RL_READER);
+
+		error = dmu_buf_hold(os, object, offset, zgd, &db,
+		    DMU_READ_NO_PREFETCH);
+
+		if (error == 0) {
+			blkptr_t *obp = dmu_buf_get_blkptr(db);
+			if (obp) {
+				ASSERT(BP_IS_HOLE(bp));
+				*bp = *obp;
+			}
+
+			zgd->zgd_db = db;
+			zgd->zgd_bp = bp;
+
+			ASSERT(db->db_offset == offset);
+			ASSERT(db->db_size == size);
+
+			error = dmu_sync(zio, lr->lr_common.lrc_txg,
+			    ztest_get_done, zgd);
+
+			if (error == 0)
+				return (0);
+		}
+	}
+
+	ztest_get_done(zgd, error);
+
+	return (error);
+}
+
+static void *
+ztest_lr_alloc(size_t lrsize, char *name)
+{
+	char *lr;
+	size_t namesize = name ? strlen(name) + 1 : 0;
+
+	lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
+
+	if (name)
+		bcopy(name, lr + lrsize, namesize);
+
+	return (lr);
+}
+
+void
+ztest_lr_free(void *lr, size_t lrsize, char *name)
+{
+	size_t namesize = name ? strlen(name) + 1 : 0;
+
+	umem_free(lr, lrsize + namesize);
+}
+
+/*
+ * Lookup a bunch of objects.  Returns the number of objects not found.
+ */
+static int
+ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
+{
+	int missing = 0;
+	int error;
+
+	ASSERT(_mutex_held(&zd->zd_dirobj_lock));
+
+	for (int i = 0; i < count; i++, od++) {
+		od->od_object = 0;
+		error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
+		    sizeof (uint64_t), 1, &od->od_object);
+		if (error) {
+			ASSERT(error == ENOENT);
+			ASSERT(od->od_object == 0);
+			missing++;
+		} else {
+			dmu_buf_t *db;
+			ztest_block_tag_t *bbt;
+			dmu_object_info_t doi;
+
+			ASSERT(od->od_object != 0);
+			ASSERT(missing == 0);	/* there should be no gaps */
+
+			ztest_object_lock(zd, od->od_object, RL_READER);
+			VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os,
+			    od->od_object, FTAG, &db));
+			dmu_object_info_from_db(db, &doi);
+			bbt = ztest_bt_bonus(db);
+			ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
+			od->od_type = doi.doi_type;
+			od->od_blocksize = doi.doi_data_block_size;
+			od->od_gen = bbt->bt_gen;
+			dmu_buf_rele(db, FTAG);
+			ztest_object_unlock(zd, od->od_object);
+		}
+	}
+
+	return (missing);
+}
+
+static int
+ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
+{
+	int missing = 0;
+
+	ASSERT(_mutex_held(&zd->zd_dirobj_lock));
+
+	for (int i = 0; i < count; i++, od++) {
+		if (missing) {
+			od->od_object = 0;
+			missing++;
+			continue;
+		}
+
+		lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
+
+		lr->lr_doid = od->od_dir;
+		lr->lr_foid = 0;	/* 0 to allocate, > 0 to claim */
+		lr->lrz_type = od->od_crtype;
+		lr->lrz_blocksize = od->od_crblocksize;
+		lr->lrz_ibshift = ztest_random_ibshift();
+		lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
+		lr->lrz_bonuslen = dmu_bonus_max();
+		lr->lr_gen = od->od_crgen;
+		lr->lr_crtime[0] = time(NULL);
+
+		if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
+			ASSERT(missing == 0);
+			od->od_object = 0;
+			missing++;
+		} else {
+			od->od_object = lr->lr_foid;
+			od->od_type = od->od_crtype;
+			od->od_blocksize = od->od_crblocksize;
+			od->od_gen = od->od_crgen;
+			ASSERT(od->od_object != 0);
+		}
+
+		ztest_lr_free(lr, sizeof (*lr), od->od_name);
+	}
+
+	return (missing);
+}
+
+static int
+ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
+{
+	int missing = 0;
+	int error;
+
+	ASSERT(_mutex_held(&zd->zd_dirobj_lock));
+
+	od += count - 1;
+
+	for (int i = count - 1; i >= 0; i--, od--) {
+		if (missing) {
+			missing++;
+			continue;
+		}
+
+		/*
+		 * No object was found.
+		 */
+		if (od->od_object == 0)
+			continue;
+
+		lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
+
+		lr->lr_doid = od->od_dir;
+
+		if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
+			ASSERT3U(error, ==, ENOSPC);
+			missing++;
+		} else {
+			od->od_object = 0;
+		}
+		ztest_lr_free(lr, sizeof (*lr), od->od_name);
+	}
+
+	return (missing);
+}
+
+static int
+ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
+    void *data)
+{
+	lr_write_t *lr;
+	int error;
+
+	lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
+
+	lr->lr_foid = object;
+	lr->lr_offset = offset;
+	lr->lr_length = size;
+	lr->lr_blkoff = 0;
+	BP_ZERO(&lr->lr_blkptr);
+
+	bcopy(data, lr + 1, size);
+
+	error = ztest_replay_write(zd, lr, B_FALSE);
+
+	ztest_lr_free(lr, sizeof (*lr) + size, NULL);
+
+	return (error);
+}
+
+static int
+ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
+{
+	lr_truncate_t *lr;
+	int error;
+
+	lr = ztest_lr_alloc(sizeof (*lr), NULL);
+
+	lr->lr_foid = object;
+	lr->lr_offset = offset;
+	lr->lr_length = size;
+
+	error = ztest_replay_truncate(zd, lr, B_FALSE);
+
+	ztest_lr_free(lr, sizeof (*lr), NULL);
+
+	return (error);
+}
+
+static int
+ztest_setattr(ztest_ds_t *zd, uint64_t object)
+{
+	lr_setattr_t *lr;
+	int error;
+
+	lr = ztest_lr_alloc(sizeof (*lr), NULL);
+
+	lr->lr_foid = object;
+	lr->lr_size = 0;
+	lr->lr_mode = 0;
+
+	error = ztest_replay_setattr(zd, lr, B_FALSE);
+
+	ztest_lr_free(lr, sizeof (*lr), NULL);
+
+	return (error);
+}
+
+static void
+ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
+{
+	objset_t *os = zd->zd_os;
+	dmu_tx_t *tx;
+	uint64_t txg;
+	rl_t *rl;
+
+	txg_wait_synced(dmu_objset_pool(os), 0);
+
+	ztest_object_lock(zd, object, RL_READER);
+	rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
+
+	tx = dmu_tx_create(os);
+
+	dmu_tx_hold_write(tx, object, offset, size);
+
+	txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
+
+	if (txg != 0) {
+		dmu_prealloc(os, object, offset, size, tx);
+		dmu_tx_commit(tx);
+		txg_wait_synced(dmu_objset_pool(os), txg);
+	} else {
+		(void) dmu_free_long_range(os, object, offset, size);
+	}
+
+	ztest_range_unlock(rl);
+	ztest_object_unlock(zd, object);
+}
+
+static void
+ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
+{
+	int err;
+	ztest_block_tag_t wbt;
+	dmu_object_info_t doi;
+	enum ztest_io_type io_type;
+	uint64_t blocksize;
+	void *data;
+
+	VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0);
+	blocksize = doi.doi_data_block_size;
+	data = umem_alloc(blocksize, UMEM_NOFAIL);
+
+	/*
+	 * Pick an i/o type at random, biased toward writing block tags.
+	 */
+	io_type = ztest_random(ZTEST_IO_TYPES);
+	if (ztest_random(2) == 0)
+		io_type = ZTEST_IO_WRITE_TAG;
+
+	(void) rw_rdlock(&zd->zd_zilog_lock);
+
+	switch (io_type) {
+
+	case ZTEST_IO_WRITE_TAG:
+		ztest_bt_generate(&wbt, zd->zd_os, object, offset, 0, 0, 0);
+		(void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
+		break;
+
+	case ZTEST_IO_WRITE_PATTERN:
+		(void) memset(data, 'a' + (object + offset) % 5, blocksize);
+		if (ztest_random(2) == 0) {
+			/*
+			 * Induce fletcher2 collisions to ensure that
+			 * zio_ddt_collision() detects and resolves them
+			 * when using fletcher2-verify for deduplication.
+			 */
+			((uint64_t *)data)[0] ^= 1ULL << 63;
+			((uint64_t *)data)[4] ^= 1ULL << 63;
+		}
+		(void) ztest_write(zd, object, offset, blocksize, data);
+		break;
+
+	case ZTEST_IO_WRITE_ZEROES:
+		bzero(data, blocksize);
+		(void) ztest_write(zd, object, offset, blocksize, data);
+		break;
+
+	case ZTEST_IO_TRUNCATE:
+		(void) ztest_truncate(zd, object, offset, blocksize);
+		break;
+
+	case ZTEST_IO_SETATTR:
+		(void) ztest_setattr(zd, object);
+		break;
+
+	case ZTEST_IO_REWRITE:
+		(void) rw_rdlock(&ztest_name_lock);
+		err = ztest_dsl_prop_set_uint64(zd->zd_name,
+		    ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa),
+		    B_FALSE);
+		VERIFY(err == 0 || err == ENOSPC);
+		err = ztest_dsl_prop_set_uint64(zd->zd_name,
+		    ZFS_PROP_COMPRESSION,
+		    ztest_random_dsl_prop(ZFS_PROP_COMPRESSION),
+		    B_FALSE);
+		VERIFY(err == 0 || err == ENOSPC);
+		(void) rw_unlock(&ztest_name_lock);
+
+		VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data,
+		    DMU_READ_NO_PREFETCH));
+
+		(void) ztest_write(zd, object, offset, blocksize, data);
+		break;
+	}
+
+	(void) rw_unlock(&zd->zd_zilog_lock);
+
+	umem_free(data, blocksize);
+}
+
+/*
+ * Initialize an object description template.
+ */
+static void
+ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
+    dmu_object_type_t type, uint64_t blocksize, uint64_t gen)
+{
+	od->od_dir = ZTEST_DIROBJ;
+	od->od_object = 0;
+
+	od->od_crtype = type;
+	od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
+	od->od_crgen = gen;
+
+	od->od_type = DMU_OT_NONE;
+	od->od_blocksize = 0;
+	od->od_gen = 0;
+
+	(void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]",
+	    tag, (int64_t)id, index);
+}
+
+/*
+ * Lookup or create the objects for a test using the od template.
+ * If the objects do not all exist, or if 'remove' is specified,
+ * remove any existing objects and create new ones.  Otherwise,
+ * use the existing objects.
+ */
+static int
+ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
+{
+	int count = size / sizeof (*od);
+	int rv = 0;
+
+	VERIFY(mutex_lock(&zd->zd_dirobj_lock) == 0);
+	if ((ztest_lookup(zd, od, count) != 0 || remove) &&
+	    (ztest_remove(zd, od, count) != 0 ||
+	    ztest_create(zd, od, count) != 0))
+		rv = -1;
+	zd->zd_od = od;
+	VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
+
+	return (rv);
+}
+
+/* ARGSUSED */
+void
+ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
+{
+	zilog_t *zilog = zd->zd_zilog;
+
+	(void) rw_rdlock(&zd->zd_zilog_lock);
+
+	zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
+
+	/*
+	 * Remember the committed values in zd, which is in parent/child
+	 * shared memory.  If we die, the next iteration of ztest_run()
+	 * will verify that the log really does contain this record.
+	 */
+	mutex_enter(&zilog->zl_lock);
+	ASSERT(zd->zd_shared != NULL);
+	ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
+	zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
+	mutex_exit(&zilog->zl_lock);
+
+	(void) rw_unlock(&zd->zd_zilog_lock);
+}
+
+/*
+ * This function is designed to simulate the operations that occur during a
+ * mount/unmount operation.  We hold the dataset across these operations in an
+ * attempt to expose any implicit assumptions about ZIL management.
+ */
+/* ARGSUSED */
+void
+ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
+{
+	objset_t *os = zd->zd_os;
+
+	/*
+	 * We grab the zd_dirobj_lock to ensure that no other thread is
+	 * updating the zil (i.e. adding in-memory log records) and the
+	 * zd_zilog_lock to block any I/O.
+	 */
+	VERIFY0(mutex_lock(&zd->zd_dirobj_lock));
+	(void) rw_wrlock(&zd->zd_zilog_lock);
+
+	/* zfsvfs_teardown() */
+	zil_close(zd->zd_zilog);
+
+	/* zfsvfs_setup() */
+	VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
+	zil_replay(os, zd, ztest_replay_vector);
+
+	(void) rw_unlock(&zd->zd_zilog_lock);
+	VERIFY(mutex_unlock(&zd->zd_dirobj_lock) == 0);
+}
+
+/*
+ * Verify that we can't destroy an active pool, create an existing pool,
+ * or create a pool with a bad vdev spec.
+ */
+/* ARGSUSED */
+void
+ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
+{
+	ztest_shared_opts_t *zo = &ztest_opts;
+	spa_t *spa;
+	nvlist_t *nvroot;
+
+	/*
+	 * Attempt to create using a bad file.
+	 */
+	nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
+	VERIFY3U(ENOENT, ==,
+	    spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL));
+	nvlist_free(nvroot);
+
+	/*
+	 * Attempt to create using a bad mirror.
+	 */
+	nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1);
+	VERIFY3U(ENOENT, ==,
+	    spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL));
+	nvlist_free(nvroot);
+
+	/*
+	 * Attempt to create an existing pool.  It shouldn't matter
+	 * what's in the nvroot; we should fail with EEXIST.
+	 */
+	(void) rw_rdlock(&ztest_name_lock);
+	nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
+	VERIFY3U(EEXIST, ==, spa_create(zo->zo_pool, nvroot, NULL, NULL, NULL));
+	nvlist_free(nvroot);
+	VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
+	VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
+	spa_close(spa, FTAG);
+
+	(void) rw_unlock(&ztest_name_lock);
+}
+
+/* ARGSUSED */
+void
+ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
+{
+	spa_t *spa;
+	uint64_t initial_version = SPA_VERSION_INITIAL;
+	uint64_t version, newversion;
+	nvlist_t *nvroot, *props;
+	char *name;
+
+	VERIFY0(mutex_lock(&ztest_vdev_lock));
+	name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
+
+	/*
+	 * Clean up from previous runs.
+	 */
+	(void) spa_destroy(name);
+
+	nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0,
+	    0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1);
+
+	/*
+	 * If we're configuring a RAIDZ device then make sure that the
+	 * the initial version is capable of supporting that feature.
+	 */
+	switch (ztest_opts.zo_raidz_parity) {
+	case 0:
+	case 1:
+		initial_version = SPA_VERSION_INITIAL;
+		break;
+	case 2:
+		initial_version = SPA_VERSION_RAIDZ2;
+		break;
+	case 3:
+		initial_version = SPA_VERSION_RAIDZ3;
+		break;
+	}
+
+	/*
+	 * Create a pool with a spa version that can be upgraded. Pick
+	 * a value between initial_version and SPA_VERSION_BEFORE_FEATURES.
+	 */
+	do {
+		version = ztest_random_spa_version(initial_version);
+	} while (version > SPA_VERSION_BEFORE_FEATURES);
+
+	props = fnvlist_alloc();
+	fnvlist_add_uint64(props,
+	    zpool_prop_to_name(ZPOOL_PROP_VERSION), version);
+	VERIFY0(spa_create(name, nvroot, props, NULL, NULL));
+	fnvlist_free(nvroot);
+	fnvlist_free(props);
+
+	VERIFY0(spa_open(name, &spa, FTAG));
+	VERIFY3U(spa_version(spa), ==, version);
+	newversion = ztest_random_spa_version(version + 1);
+
+	if (ztest_opts.zo_verbose >= 4) {
+		(void) printf("upgrading spa version from %llu to %llu\n",
+		    (u_longlong_t)version, (u_longlong_t)newversion);
+	}
+
+	spa_upgrade(spa, newversion);
+	VERIFY3U(spa_version(spa), >, version);
+	VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config,
+	    zpool_prop_to_name(ZPOOL_PROP_VERSION)));
+	spa_close(spa, FTAG);
+
+	strfree(name);
+	VERIFY0(mutex_unlock(&ztest_vdev_lock));
+}
+
+static vdev_t *
+vdev_lookup_by_path(vdev_t *vd, const char *path)
+{
+	vdev_t *mvd;
+
+	if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
+		return (vd);
+
+	for (int c = 0; c < vd->vdev_children; c++)
+		if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
+		    NULL)
+			return (mvd);
+
+	return (NULL);
+}
+
+/*
+ * Find the first available hole which can be used as a top-level.
+ */
+int
+find_vdev_hole(spa_t *spa)
+{
+	vdev_t *rvd = spa->spa_root_vdev;
+	int c;
+
+	ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV);
+
+	for (c = 0; c < rvd->vdev_children; c++) {
+		vdev_t *cvd = rvd->vdev_child[c];
+
+		if (cvd->vdev_ishole)
+			break;
+	}
+	return (c);
+}
+
+/*
+ * Verify that vdev_add() works as expected.
+ */
+/* ARGSUSED */
+void
+ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
+{
+	ztest_shared_t *zs = ztest_shared;
+	spa_t *spa = ztest_spa;
+	uint64_t leaves;
+	uint64_t guid;
+	nvlist_t *nvroot;
+	int error;
+
+	VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+	leaves =
+	    MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
+
+	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+
+	ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
+
+	/*
+	 * If we have slogs then remove them 1/4 of the time.
+	 */
+	if (spa_has_slogs(spa) && ztest_random(4) == 0) {
+		/*
+		 * Grab the guid from the head of the log class rotor.
+		 */
+		guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid;
+
+		spa_config_exit(spa, SCL_VDEV, FTAG);
+
+		/*
+		 * We have to grab the zs_name_lock as writer to
+		 * prevent a race between removing a slog (dmu_objset_find)
+		 * and destroying a dataset. Removing the slog will
+		 * grab a reference on the dataset which may cause
+		 * dmu_objset_destroy() to fail with EBUSY thus
+		 * leaving the dataset in an inconsistent state.
+		 */
+		VERIFY(rw_wrlock(&ztest_name_lock) == 0);
+		error = spa_vdev_remove(spa, guid, B_FALSE);
+		VERIFY(rw_unlock(&ztest_name_lock) == 0);
+
+		if (error && error != EEXIST)
+			fatal(0, "spa_vdev_remove() = %d", error);
+	} else {
+		spa_config_exit(spa, SCL_VDEV, FTAG);
+
+		/*
+		 * Make 1/4 of the devices be log devices.
+		 */
+		nvroot = make_vdev_root(NULL, NULL, NULL,
+		    ztest_opts.zo_vdev_size, 0,
+		    ztest_random(4) == 0, ztest_opts.zo_raidz,
+		    zs->zs_mirrors, 1);
+
+		error = spa_vdev_add(spa, nvroot);
+		nvlist_free(nvroot);
+
+		if (error == ENOSPC)
+			ztest_record_enospc("spa_vdev_add");
+		else if (error != 0)
+			fatal(0, "spa_vdev_add() = %d", error);
+	}
+
+	VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+}
+
+/*
+ * Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
+ */
+/* ARGSUSED */
+void
+ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
+{
+	ztest_shared_t *zs = ztest_shared;
+	spa_t *spa = ztest_spa;
+	vdev_t *rvd = spa->spa_root_vdev;
+	spa_aux_vdev_t *sav;
+	char *aux;
+	uint64_t guid = 0;
+	int error;
+
+	if (ztest_random(2) == 0) {
+		sav = &spa->spa_spares;
+		aux = ZPOOL_CONFIG_SPARES;
+	} else {
+		sav = &spa->spa_l2cache;
+		aux = ZPOOL_CONFIG_L2CACHE;
+	}
+
+	VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+
+	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+
+	if (sav->sav_count != 0 && ztest_random(4) == 0) {
+		/*
+		 * Pick a random device to remove.
+		 */
+		guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid;
+	} else {
+		/*
+		 * Find an unused device we can add.
+		 */
+		zs->zs_vdev_aux = 0;
+		for (;;) {
+			char path[MAXPATHLEN];
+			int c;
+			(void) snprintf(path, sizeof (path), ztest_aux_template,
+			    ztest_opts.zo_dir, ztest_opts.zo_pool, aux,
+			    zs->zs_vdev_aux);
+			for (c = 0; c < sav->sav_count; c++)
+				if (strcmp(sav->sav_vdevs[c]->vdev_path,
+				    path) == 0)
+					break;
+			if (c == sav->sav_count &&
+			    vdev_lookup_by_path(rvd, path) == NULL)
+				break;
+			zs->zs_vdev_aux++;
+		}
+	}
+
+	spa_config_exit(spa, SCL_VDEV, FTAG);
+
+	if (guid == 0) {
+		/*
+		 * Add a new device.
+		 */
+		nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL,
+		    (ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1);
+		error = spa_vdev_add(spa, nvroot);
+		if (error != 0)
+			fatal(0, "spa_vdev_add(%p) = %d", nvroot, error);
+		nvlist_free(nvroot);
+	} else {
+		/*
+		 * Remove an existing device.  Sometimes, dirty its
+		 * vdev state first to make sure we handle removal
+		 * of devices that have pending state changes.
+		 */
+		if (ztest_random(2) == 0)
+			(void) vdev_online(spa, guid, 0, NULL);
+
+		error = spa_vdev_remove(spa, guid, B_FALSE);
+		if (error != 0 && error != EBUSY)
+			fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
+	}
+
+	VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+}
+
+/*
+ * split a pool if it has mirror tlvdevs
+ */
+/* ARGSUSED */
+void
+ztest_split_pool(ztest_ds_t *zd, uint64_t id)
+{
+	ztest_shared_t *zs = ztest_shared;
+	spa_t *spa = ztest_spa;
+	vdev_t *rvd = spa->spa_root_vdev;
+	nvlist_t *tree, **child, *config, *split, **schild;
+	uint_t c, children, schildren = 0, lastlogid = 0;
+	int error = 0;
+
+	VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+
+	/* ensure we have a useable config; mirrors of raidz aren't supported */
+	if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
+		VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+		return;
+	}
+
+	/* clean up the old pool, if any */
+	(void) spa_destroy("splitp");
+
+	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+
+	/* generate a config from the existing config */
+	mutex_enter(&spa->spa_props_lock);
+	VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE,
+	    &tree) == 0);
+	mutex_exit(&spa->spa_props_lock);
+
+	VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
+	    &children) == 0);
+
+	schild = malloc(rvd->vdev_children * sizeof (nvlist_t *));
+	for (c = 0; c < children; c++) {
+		vdev_t *tvd = rvd->vdev_child[c];
+		nvlist_t **mchild;
+		uint_t mchildren;
+
+		if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
+			VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME,
+			    0) == 0);
+			VERIFY(nvlist_add_string(schild[schildren],
+			    ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0);
+			VERIFY(nvlist_add_uint64(schild[schildren],
+			    ZPOOL_CONFIG_IS_HOLE, 1) == 0);
+			if (lastlogid == 0)
+				lastlogid = schildren;
+			++schildren;
+			continue;
+		}
+		lastlogid = 0;
+		VERIFY(nvlist_lookup_nvlist_array(child[c],
+		    ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
+		VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0);
+	}
+
+	/* OK, create a config that can be used to split */
+	VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0);
+	VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE,
+	    VDEV_TYPE_ROOT) == 0);
+	VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild,
+	    lastlogid != 0 ? lastlogid : schildren) == 0);
+
+	VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
+	VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0);
+
+	for (c = 0; c < schildren; c++)
+		nvlist_free(schild[c]);
+	free(schild);
+	nvlist_free(split);
+
+	spa_config_exit(spa, SCL_VDEV, FTAG);
+
+	(void) rw_wrlock(&ztest_name_lock);
+	error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
+	(void) rw_unlock(&ztest_name_lock);
+
+	nvlist_free(config);
+
+	if (error == 0) {
+		(void) printf("successful split - results:\n");
+		mutex_enter(&spa_namespace_lock);
+		show_pool_stats(spa);
+		show_pool_stats(spa_lookup("splitp"));
+		mutex_exit(&spa_namespace_lock);
+		++zs->zs_splits;
+		--zs->zs_mirrors;
+	}
+	VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+
+}
+
+/*
+ * Verify that we can attach and detach devices.
+ */
+/* ARGSUSED */
+void
+ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
+{
+	ztest_shared_t *zs = ztest_shared;
+	spa_t *spa = ztest_spa;
+	spa_aux_vdev_t *sav = &spa->spa_spares;
+	vdev_t *rvd = spa->spa_root_vdev;
+	vdev_t *oldvd, *newvd, *pvd;
+	nvlist_t *root;
+	uint64_t leaves;
+	uint64_t leaf, top;
+	uint64_t ashift = ztest_get_ashift();
+	uint64_t oldguid, pguid;
+	size_t oldsize, newsize;
+	char oldpath[MAXPATHLEN], newpath[MAXPATHLEN];
+	int replacing;
+	int oldvd_has_siblings = B_FALSE;
+	int newvd_is_spare = B_FALSE;
+	int oldvd_is_log;
+	int error, expected_error;
+
+	VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+	leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
+
+	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+
+	/*
+	 * Decide whether to do an attach or a replace.
+	 */
+	replacing = ztest_random(2);
+
+	/*
+	 * Pick a random top-level vdev.
+	 */
+	top = ztest_random_vdev_top(spa, B_TRUE);
+
+	/*
+	 * Pick a random leaf within it.
+	 */
+	leaf = ztest_random(leaves);
+
+	/*
+	 * Locate this vdev.
+	 */
+	oldvd = rvd->vdev_child[top];
+	if (zs->zs_mirrors >= 1) {
+		ASSERT(oldvd->vdev_ops == &vdev_mirror_ops);
+		ASSERT(oldvd->vdev_children >= zs->zs_mirrors);
+		oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz];
+	}
+	if (ztest_opts.zo_raidz > 1) {
+		ASSERT(oldvd->vdev_ops == &vdev_raidz_ops);
+		ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz);
+		oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz];
+	}
+
+	/*
+	 * If we're already doing an attach or replace, oldvd may be a
+	 * mirror vdev -- in which case, pick a random child.
+	 */
+	while (oldvd->vdev_children != 0) {
+		oldvd_has_siblings = B_TRUE;
+		ASSERT(oldvd->vdev_children >= 2);
+		oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
+	}
+
+	oldguid = oldvd->vdev_guid;
+	oldsize = vdev_get_min_asize(oldvd);
+	oldvd_is_log = oldvd->vdev_top->vdev_islog;
+	(void) strcpy(oldpath, oldvd->vdev_path);
+	pvd = oldvd->vdev_parent;
+	pguid = pvd->vdev_guid;
+
+	/*
+	 * If oldvd has siblings, then half of the time, detach it.
+	 */
+	if (oldvd_has_siblings && ztest_random(2) == 0) {
+		spa_config_exit(spa, SCL_VDEV, FTAG);
+		error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
+		if (error != 0 && error != ENODEV && error != EBUSY &&
+		    error != ENOTSUP)
+			fatal(0, "detach (%s) returned %d", oldpath, error);
+		VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+		return;
+	}
+
+	/*
+	 * For the new vdev, choose with equal probability between the two
+	 * standard paths (ending in either 'a' or 'b') or a random hot spare.
+	 */
+	if (sav->sav_count != 0 && ztest_random(3) == 0) {
+		newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
+		newvd_is_spare = B_TRUE;
+		(void) strcpy(newpath, newvd->vdev_path);
+	} else {
+		(void) snprintf(newpath, sizeof (newpath), ztest_dev_template,
+		    ztest_opts.zo_dir, ztest_opts.zo_pool,
+		    top * leaves + leaf);
+		if (ztest_random(2) == 0)
+			newpath[strlen(newpath) - 1] = 'b';
+		newvd = vdev_lookup_by_path(rvd, newpath);
+	}
+
+	if (newvd) {
+		newsize = vdev_get_min_asize(newvd);
+	} else {
+		/*
+		 * Make newsize a little bigger or smaller than oldsize.
+		 * If it's smaller, the attach should fail.
+		 * If it's larger, and we're doing a replace,
+		 * we should get dynamic LUN growth when we're done.
+		 */
+		newsize = 10 * oldsize / (9 + ztest_random(3));
+	}
+
+	/*
+	 * If pvd is not a mirror or root, the attach should fail with ENOTSUP,
+	 * unless it's a replace; in that case any non-replacing parent is OK.
+	 *
+	 * If newvd is already part of the pool, it should fail with EBUSY.
+	 *
+	 * If newvd is too small, it should fail with EOVERFLOW.
+	 */
+	if (pvd->vdev_ops != &vdev_mirror_ops &&
+	    pvd->vdev_ops != &vdev_root_ops && (!replacing ||
+	    pvd->vdev_ops == &vdev_replacing_ops ||
+	    pvd->vdev_ops == &vdev_spare_ops))
+		expected_error = ENOTSUP;
+	else if (newvd_is_spare && (!replacing || oldvd_is_log))
+		expected_error = ENOTSUP;
+	else if (newvd == oldvd)
+		expected_error = replacing ? 0 : EBUSY;
+	else if (vdev_lookup_by_path(rvd, newpath) != NULL)
+		expected_error = EBUSY;
+	else if (newsize < oldsize)
+		expected_error = EOVERFLOW;
+	else if (ashift > oldvd->vdev_top->vdev_ashift)
+		expected_error = EDOM;
+	else
+		expected_error = 0;
+
+	spa_config_exit(spa, SCL_VDEV, FTAG);
+
+	/*
+	 * Build the nvlist describing newpath.
+	 */
+	root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0,
+	    ashift, 0, 0, 0, 1);
+
+	error = spa_vdev_attach(spa, oldguid, root, replacing);
+
+	nvlist_free(root);
+
+	/*
+	 * If our parent was the replacing vdev, but the replace completed,
+	 * then instead of failing with ENOTSUP we may either succeed,
+	 * fail with ENODEV, or fail with EOVERFLOW.
+	 */
+	if (expected_error == ENOTSUP &&
+	    (error == 0 || error == ENODEV || error == EOVERFLOW))
+		expected_error = error;
+
+	/*
+	 * If someone grew the LUN, the replacement may be too small.
+	 */
+	if (error == EOVERFLOW || error == EBUSY)
+		expected_error = error;
+
+	/* XXX workaround 6690467 */
+	if (error != expected_error && expected_error != EBUSY) {
+		fatal(0, "attach (%s %llu, %s %llu, %d) "
+		    "returned %d, expected %d",
+		    oldpath, (longlong_t)oldsize, newpath,
+		    (longlong_t)newsize, replacing, error, expected_error);
+	}
+
+	VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+}
+
+/*
+ * Callback function which expands the physical size of the vdev.
+ */
+vdev_t *
+grow_vdev(vdev_t *vd, void *arg)
+{
+	spa_t *spa = vd->vdev_spa;
+	size_t *newsize = arg;
+	size_t fsize;
+	int fd;
+
+	ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
+	ASSERT(vd->vdev_ops->vdev_op_leaf);
+
+	if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
+		return (vd);
+
+	fsize = lseek(fd, 0, SEEK_END);
+	(void) ftruncate(fd, *newsize);
+
+	if (ztest_opts.zo_verbose >= 6) {
+		(void) printf("%s grew from %lu to %lu bytes\n",
+		    vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
+	}
+	(void) close(fd);
+	return (NULL);
+}
+
+/*
+ * Callback function which expands a given vdev by calling vdev_online().
+ */
+/* ARGSUSED */
+vdev_t *
+online_vdev(vdev_t *vd, void *arg)
+{
+	spa_t *spa = vd->vdev_spa;
+	vdev_t *tvd = vd->vdev_top;
+	uint64_t guid = vd->vdev_guid;
+	uint64_t generation = spa->spa_config_generation + 1;
+	vdev_state_t newstate = VDEV_STATE_UNKNOWN;
+	int error;
+
+	ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
+	ASSERT(vd->vdev_ops->vdev_op_leaf);
+
+	/* Calling vdev_online will initialize the new metaslabs */
+	spa_config_exit(spa, SCL_STATE, spa);
+	error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
+	spa_config_enter(spa, SCL_STATE, spa, RW_READER);
+
+	/*
+	 * If vdev_online returned an error or the underlying vdev_open
+	 * failed then we abort the expand. The only way to know that
+	 * vdev_open fails is by checking the returned newstate.
+	 */
+	if (error || newstate != VDEV_STATE_HEALTHY) {
+		if (ztest_opts.zo_verbose >= 5) {
+			(void) printf("Unable to expand vdev, state %llu, "
+			    "error %d\n", (u_longlong_t)newstate, error);
+		}
+		return (vd);
+	}
+	ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
+
+	/*
+	 * Since we dropped the lock we need to ensure that we're
+	 * still talking to the original vdev. It's possible this
+	 * vdev may have been detached/replaced while we were
+	 * trying to online it.
+	 */
+	if (generation != spa->spa_config_generation) {
+		if (ztest_opts.zo_verbose >= 5) {
+			(void) printf("vdev configuration has changed, "
+			    "guid %llu, state %llu, expected gen %llu, "
+			    "got gen %llu\n",
+			    (u_longlong_t)guid,
+			    (u_longlong_t)tvd->vdev_state,
+			    (u_longlong_t)generation,
+			    (u_longlong_t)spa->spa_config_generation);
+		}
+		return (vd);
+	}
+	return (NULL);
+}
+
+/*
+ * Traverse the vdev tree calling the supplied function.
+ * We continue to walk the tree until we either have walked all
+ * children or we receive a non-NULL return from the callback.
+ * If a NULL callback is passed, then we just return back the first
+ * leaf vdev we encounter.
+ */
+vdev_t *
+vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
+{
+	if (vd->vdev_ops->vdev_op_leaf) {
+		if (func == NULL)
+			return (vd);
+		else
+			return (func(vd, arg));
+	}
+
+	for (uint_t c = 0; c < vd->vdev_children; c++) {
+		vdev_t *cvd = vd->vdev_child[c];
+		if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
+			return (cvd);
+	}
+	return (NULL);
+}
+
+/*
+ * Verify that dynamic LUN growth works as expected.
+ */
+/* ARGSUSED */
+void
+ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
+{
+	spa_t *spa = ztest_spa;
+	vdev_t *vd, *tvd;
+	metaslab_class_t *mc;
+	metaslab_group_t *mg;
+	size_t psize, newsize;
+	uint64_t top;
+	uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
+
+	VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+	spa_config_enter(spa, SCL_STATE, spa, RW_READER);
+
+	top = ztest_random_vdev_top(spa, B_TRUE);
+
+	tvd = spa->spa_root_vdev->vdev_child[top];
+	mg = tvd->vdev_mg;
+	mc = mg->mg_class;
+	old_ms_count = tvd->vdev_ms_count;
+	old_class_space = metaslab_class_get_space(mc);
+
+	/*
+	 * Determine the size of the first leaf vdev associated with
+	 * our top-level device.
+	 */
+	vd = vdev_walk_tree(tvd, NULL, NULL);
+	ASSERT3P(vd, !=, NULL);
+	ASSERT(vd->vdev_ops->vdev_op_leaf);
+
+	psize = vd->vdev_psize;
+
+	/*
+	 * We only try to expand the vdev if it's healthy, less than 4x its
+	 * original size, and it has a valid psize.
+	 */
+	if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
+	    psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
+		spa_config_exit(spa, SCL_STATE, spa);
+		VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+		return;
+	}
+	ASSERT(psize > 0);
+	newsize = psize + psize / 8;
+	ASSERT3U(newsize, >, psize);
+
+	if (ztest_opts.zo_verbose >= 6) {
+		(void) printf("Expanding LUN %s from %lu to %lu\n",
+		    vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
+	}
+
+	/*
+	 * Growing the vdev is a two step process:
+	 *	1). expand the physical size (i.e. relabel)
+	 *	2). online the vdev to create the new metaslabs
+	 */
+	if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
+	    vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
+	    tvd->vdev_state != VDEV_STATE_HEALTHY) {
+		if (ztest_opts.zo_verbose >= 5) {
+			(void) printf("Could not expand LUN because "
+			    "the vdev configuration changed.\n");
+		}
+		spa_config_exit(spa, SCL_STATE, spa);
+		VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+		return;
+	}
+
+	spa_config_exit(spa, SCL_STATE, spa);
+
+	/*
+	 * Expanding the LUN will update the config asynchronously,
+	 * thus we must wait for the async thread to complete any
+	 * pending tasks before proceeding.
+	 */
+	for (;;) {
+		boolean_t done;
+		mutex_enter(&spa->spa_async_lock);
+		done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
+		mutex_exit(&spa->spa_async_lock);
+		if (done)
+			break;
+		txg_wait_synced(spa_get_dsl(spa), 0);
+		(void) poll(NULL, 0, 100);
+	}
+
+	spa_config_enter(spa, SCL_STATE, spa, RW_READER);
+
+	tvd = spa->spa_root_vdev->vdev_child[top];
+	new_ms_count = tvd->vdev_ms_count;
+	new_class_space = metaslab_class_get_space(mc);
+
+	if (tvd->vdev_mg != mg || mg->mg_class != mc) {
+		if (ztest_opts.zo_verbose >= 5) {
+			(void) printf("Could not verify LUN expansion due to "
+			    "intervening vdev offline or remove.\n");
+		}
+		spa_config_exit(spa, SCL_STATE, spa);
+		VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+		return;
+	}
+
+	/*
+	 * Make sure we were able to grow the vdev.
+	 */
+	if (new_ms_count <= old_ms_count)
+		fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
+		    old_ms_count, new_ms_count);
+
+	/*
+	 * Make sure we were able to grow the pool.
+	 */
+	if (new_class_space <= old_class_space)
+		fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
+		    old_class_space, new_class_space);
+
+	if (ztest_opts.zo_verbose >= 5) {
+		char oldnumbuf[6], newnumbuf[6];
+
+		nicenum(old_class_space, oldnumbuf);
+		nicenum(new_class_space, newnumbuf);
+		(void) printf("%s grew from %s to %s\n",
+		    spa->spa_name, oldnumbuf, newnumbuf);
+	}
+
+	spa_config_exit(spa, SCL_STATE, spa);
+	VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+}
+
+/*
+ * Verify that dmu_objset_{create,destroy,open,close} work as expected.
+ */
+/* ARGSUSED */
+static void
+ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
+{
+	/*
+	 * Create the objects common to all ztest datasets.
+	 */
+	VERIFY(zap_create_claim(os, ZTEST_DIROBJ,
+	    DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0);
+}
+
+static int
+ztest_dataset_create(char *dsname)
+{
+	uint64_t zilset = ztest_random(100);
+	int err = dmu_objset_create(dsname, DMU_OST_OTHER, 0,
+	    ztest_objset_create_cb, NULL);
+
+	if (err || zilset < 80)
+		return (err);
+
+	if (ztest_opts.zo_verbose >= 6)
+		(void) printf("Setting dataset %s to sync always\n", dsname);
+	return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
+	    ZFS_SYNC_ALWAYS, B_FALSE));
+}
+
+/* ARGSUSED */
+static int
+ztest_objset_destroy_cb(const char *name, void *arg)
+{
+	objset_t *os;
+	dmu_object_info_t doi;
+	int error;
+
+	/*
+	 * Verify that the dataset contains a directory object.
+	 */
+	VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os));
+	error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
+	if (error != ENOENT) {
+		/* We could have crashed in the middle of destroying it */
+		ASSERT0(error);
+		ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
+		ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
+	}
+	dmu_objset_rele(os, FTAG);
+
+	/*
+	 * Destroy the dataset.
+	 */
+	VERIFY3U(0, ==, dmu_objset_destroy(name, B_FALSE));
+	return (0);
+}
+
+static boolean_t
+ztest_snapshot_create(char *osname, uint64_t id)
+{
+	char snapname[MAXNAMELEN];
+	int error;
+
+	(void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
+	    (u_longlong_t)id);
+
+	error = dmu_objset_snapshot(osname, strchr(snapname, '@') + 1,
+	    NULL, NULL, B_FALSE, B_FALSE, -1);
+	if (error == ENOSPC) {
+		ztest_record_enospc(FTAG);
+		return (B_FALSE);
+	}
+	if (error != 0 && error != EEXIST)
+		fatal(0, "ztest_snapshot_create(%s) = %d", snapname, error);
+	return (B_TRUE);
+}
+
+static boolean_t
+ztest_snapshot_destroy(char *osname, uint64_t id)
+{
+	char snapname[MAXNAMELEN];
+	int error;
+
+	(void) snprintf(snapname, MAXNAMELEN, "%s@%llu", osname,
+	    (u_longlong_t)id);
+
+	error = dmu_objset_destroy(snapname, B_FALSE);
+	if (error != 0 && error != ENOENT)
+		fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error);
+	return (B_TRUE);
+}
+
+/* ARGSUSED */
+void
+ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
+{
+	ztest_ds_t zdtmp;
+	int iters;
+	int error;
+	objset_t *os, *os2;
+	char name[MAXNAMELEN];
+	zilog_t *zilog;
+
+	(void) rw_rdlock(&ztest_name_lock);
+
+	(void) snprintf(name, MAXNAMELEN, "%s/temp_%llu",
+	    ztest_opts.zo_pool, (u_longlong_t)id);
+
+	/*
+	 * If this dataset exists from a previous run, process its replay log
+	 * half of the time.  If we don't replay it, then dmu_objset_destroy()
+	 * (invoked from ztest_objset_destroy_cb()) should just throw it away.
+	 */
+	if (ztest_random(2) == 0 &&
+	    dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os) == 0) {
+		ztest_zd_init(&zdtmp, NULL, os);
+		zil_replay(os, &zdtmp, ztest_replay_vector);
+		ztest_zd_fini(&zdtmp);
+		dmu_objset_disown(os, FTAG);
+	}
+
+	/*
+	 * There may be an old instance of the dataset we're about to
+	 * create lying around from a previous run.  If so, destroy it
+	 * and all of its snapshots.
+	 */
+	(void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
+	    DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
+
+	/*
+	 * Verify that the destroyed dataset is no longer in the namespace.
+	 */
+	VERIFY3U(ENOENT, ==, dmu_objset_hold(name, FTAG, &os));
+
+	/*
+	 * Verify that we can create a new dataset.
+	 */
+	error = ztest_dataset_create(name);
+	if (error) {
+		if (error == ENOSPC) {
+			ztest_record_enospc(FTAG);
+			(void) rw_unlock(&ztest_name_lock);
+			return;
+		}
+		fatal(0, "dmu_objset_create(%s) = %d", name, error);
+	}
+
+	VERIFY3U(0, ==,
+	    dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os));
+
+	ztest_zd_init(&zdtmp, NULL, os);
+
+	/*
+	 * Open the intent log for it.
+	 */
+	zilog = zil_open(os, ztest_get_data);
+
+	/*
+	 * Put some objects in there, do a little I/O to them,
+	 * and randomly take a couple of snapshots along the way.
+	 */
+	iters = ztest_random(5);
+	for (int i = 0; i < iters; i++) {
+		ztest_dmu_object_alloc_free(&zdtmp, id);
+		if (ztest_random(iters) == 0)
+			(void) ztest_snapshot_create(name, i);
+	}
+
+	/*
+	 * Verify that we cannot create an existing dataset.
+	 */
+	VERIFY3U(EEXIST, ==,
+	    dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL));
+
+	/*
+	 * Verify that we can hold an objset that is also owned.
+	 */
+	VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2));
+	dmu_objset_rele(os2, FTAG);
+
+	/*
+	 * Verify that we cannot own an objset that is already owned.
+	 */
+	VERIFY3U(EBUSY, ==,
+	    dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, FTAG, &os2));
+
+	zil_close(zilog);
+	dmu_objset_disown(os, FTAG);
+	ztest_zd_fini(&zdtmp);
+
+	(void) rw_unlock(&ztest_name_lock);
+}
+
+/*
+ * Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
+ */
+void
+ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
+{
+	(void) rw_rdlock(&ztest_name_lock);
+	(void) ztest_snapshot_destroy(zd->zd_name, id);
+	(void) ztest_snapshot_create(zd->zd_name, id);
+	(void) rw_unlock(&ztest_name_lock);
+}
+
+/*
+ * Cleanup non-standard snapshots and clones.
+ */
+void
+ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
+{
+	char snap1name[MAXNAMELEN];
+	char clone1name[MAXNAMELEN];
+	char snap2name[MAXNAMELEN];
+	char clone2name[MAXNAMELEN];
+	char snap3name[MAXNAMELEN];
+	int error;
+
+	(void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
+	(void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
+	(void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
+	(void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
+	(void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
+
+	error = dmu_objset_destroy(clone2name, B_FALSE);
+	if (error && error != ENOENT)
+		fatal(0, "dmu_objset_destroy(%s) = %d", clone2name, error);
+	error = dmu_objset_destroy(snap3name, B_FALSE);
+	if (error && error != ENOENT)
+		fatal(0, "dmu_objset_destroy(%s) = %d", snap3name, error);
+	error = dmu_objset_destroy(snap2name, B_FALSE);
+	if (error && error != ENOENT)
+		fatal(0, "dmu_objset_destroy(%s) = %d", snap2name, error);
+	error = dmu_objset_destroy(clone1name, B_FALSE);
+	if (error && error != ENOENT)
+		fatal(0, "dmu_objset_destroy(%s) = %d", clone1name, error);
+	error = dmu_objset_destroy(snap1name, B_FALSE);
+	if (error && error != ENOENT)
+		fatal(0, "dmu_objset_destroy(%s) = %d", snap1name, error);
+}
+
+/*
+ * Verify dsl_dataset_promote handles EBUSY
+ */
+void
+ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
+{
+	objset_t *clone;
+	dsl_dataset_t *ds;
+	char snap1name[MAXNAMELEN];
+	char clone1name[MAXNAMELEN];
+	char snap2name[MAXNAMELEN];
+	char clone2name[MAXNAMELEN];
+	char snap3name[MAXNAMELEN];
+	char *osname = zd->zd_name;
+	int error;
+
+	(void) rw_rdlock(&ztest_name_lock);
+
+	ztest_dsl_dataset_cleanup(osname, id);
+
+	(void) snprintf(snap1name, MAXNAMELEN, "%s@s1_%llu", osname, id);
+	(void) snprintf(clone1name, MAXNAMELEN, "%s/c1_%llu", osname, id);
+	(void) snprintf(snap2name, MAXNAMELEN, "%s@s2_%llu", clone1name, id);
+	(void) snprintf(clone2name, MAXNAMELEN, "%s/c2_%llu", osname, id);
+	(void) snprintf(snap3name, MAXNAMELEN, "%s@s3_%llu", clone1name, id);
+
+	error = dmu_objset_snapshot(osname, strchr(snap1name, '@')+1,
+	    NULL, NULL, B_FALSE, B_FALSE, -1);
+	if (error && error != EEXIST) {
+		if (error == ENOSPC) {
+			ztest_record_enospc(FTAG);
+			goto out;
+		}
+		fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error);
+	}
+
+	error = dmu_objset_hold(snap1name, FTAG, &clone);
+	if (error)
+		fatal(0, "dmu_open_snapshot(%s) = %d", snap1name, error);
+
+	error = dmu_objset_clone(clone1name, dmu_objset_ds(clone), 0);
+	dmu_objset_rele(clone, FTAG);
+	if (error) {
+		if (error == ENOSPC) {
+			ztest_record_enospc(FTAG);
+			goto out;
+		}
+		fatal(0, "dmu_objset_create(%s) = %d", clone1name, error);
+	}
+
+	error = dmu_objset_snapshot(clone1name, strchr(snap2name, '@')+1,
+	    NULL, NULL, B_FALSE, B_FALSE, -1);
+	if (error && error != EEXIST) {
+		if (error == ENOSPC) {
+			ztest_record_enospc(FTAG);
+			goto out;
+		}
+		fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error);
+	}
+
+	error = dmu_objset_snapshot(clone1name, strchr(snap3name, '@')+1,
+	    NULL, NULL, B_FALSE, B_FALSE, -1);
+	if (error && error != EEXIST) {
+		if (error == ENOSPC) {
+			ztest_record_enospc(FTAG);
+			goto out;
+		}
+		fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
+	}
+
+	error = dmu_objset_hold(snap3name, FTAG, &clone);
+	if (error)
+		fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
+
+	error = dmu_objset_clone(clone2name, dmu_objset_ds(clone), 0);
+	dmu_objset_rele(clone, FTAG);
+	if (error) {
+		if (error == ENOSPC) {
+			ztest_record_enospc(FTAG);
+			goto out;
+		}
+		fatal(0, "dmu_objset_create(%s) = %d", clone2name, error);
+	}
+
+	error = dsl_dataset_own(snap2name, B_FALSE, FTAG, &ds);
+	if (error)
+		fatal(0, "dsl_dataset_own(%s) = %d", snap2name, error);
+	error = dsl_dataset_promote(clone2name, NULL);
+	if (error != EBUSY)
+		fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name,
+		    error);
+	dsl_dataset_disown(ds, FTAG);
+
+out:
+	ztest_dsl_dataset_cleanup(osname, id);
+
+	(void) rw_unlock(&ztest_name_lock);
+}
+
+/*
+ * Verify that dmu_object_{alloc,free} work as expected.
+ */
+void
+ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
+{
+	ztest_od_t od[4];
+	int batchsize = sizeof (od) / sizeof (od[0]);
+
+	for (int b = 0; b < batchsize; b++)
+		ztest_od_init(&od[b], id, FTAG, b, DMU_OT_UINT64_OTHER, 0, 0);
+
+	/*
+	 * Destroy the previous batch of objects, create a new batch,
+	 * and do some I/O on the new objects.
+	 */
+	if (ztest_object_init(zd, od, sizeof (od), B_TRUE) != 0)
+		return;
+
+	while (ztest_random(4 * batchsize) != 0)
+		ztest_io(zd, od[ztest_random(batchsize)].od_object,
+		    ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
+}
+
+/*
+ * Verify that dmu_{read,write} work as expected.
+ */
+void
+ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
+{
+	objset_t *os = zd->zd_os;
+	ztest_od_t od[2];
+	dmu_tx_t *tx;
+	int i, freeit, error;
+	uint64_t n, s, txg;
+	bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
+	uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
+	uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
+	uint64_t regions = 997;
+	uint64_t stride = 123456789ULL;
+	uint64_t width = 40;
+	int free_percent = 5;
+
+	/*
+	 * This test uses two objects, packobj and bigobj, that are always
+	 * updated together (i.e. in the same tx) so that their contents are
+	 * in sync and can be compared.  Their contents relate to each other
+	 * in a simple way: packobj is a dense array of 'bufwad' structures,
+	 * while bigobj is a sparse array of the same bufwads.  Specifically,
+	 * for any index n, there are three bufwads that should be identical:
+	 *
+	 *	packobj, at offset n * sizeof (bufwad_t)
+	 *	bigobj, at the head of the nth chunk
+	 *	bigobj, at the tail of the nth chunk
+	 *
+	 * The chunk size is arbitrary. It doesn't have to be a power of two,
+	 * and it doesn't have any relation to the object blocksize.
+	 * The only requirement is that it can hold at least two bufwads.
+	 *
+	 * Normally, we write the bufwad to each of these locations.
+	 * However, free_percent of the time we instead write zeroes to
+	 * packobj and perform a dmu_free_range() on bigobj.  By comparing
+	 * bigobj to packobj, we can verify that the DMU is correctly
+	 * tracking which parts of an object are allocated and free,
+	 * and that the contents of the allocated blocks are correct.
+	 */
+
+	/*
+	 * Read the directory info.  If it's the first time, set things up.
+	 */
+	ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, chunksize);
+	ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
+
+	if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
+		return;
+
+	bigobj = od[0].od_object;
+	packobj = od[1].od_object;
+	chunksize = od[0].od_gen;
+	ASSERT(chunksize == od[1].od_gen);
+
+	/*
+	 * Prefetch a random chunk of the big object.
+	 * Our aim here is to get some async reads in flight
+	 * for blocks that we may free below; the DMU should
+	 * handle this race correctly.
+	 */
+	n = ztest_random(regions) * stride + ztest_random(width);
+	s = 1 + ztest_random(2 * width - 1);
+	dmu_prefetch(os, bigobj, n * chunksize, s * chunksize);
+
+	/*
+	 * Pick a random index and compute the offsets into packobj and bigobj.
+	 */
+	n = ztest_random(regions) * stride + ztest_random(width);
+	s = 1 + ztest_random(width - 1);
+
+	packoff = n * sizeof (bufwad_t);
+	packsize = s * sizeof (bufwad_t);
+
+	bigoff = n * chunksize;
+	bigsize = s * chunksize;
+
+	packbuf = umem_alloc(packsize, UMEM_NOFAIL);
+	bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
+
+	/*
+	 * free_percent of the time, free a range of bigobj rather than
+	 * overwriting it.
+	 */
+	freeit = (ztest_random(100) < free_percent);
+
+	/*
+	 * Read the current contents of our objects.
+	 */
+	error = dmu_read(os, packobj, packoff, packsize, packbuf,
+	    DMU_READ_PREFETCH);
+	ASSERT0(error);
+	error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
+	    DMU_READ_PREFETCH);
+	ASSERT0(error);
+
+	/*
+	 * Get a tx for the mods to both packobj and bigobj.
+	 */
+	tx = dmu_tx_create(os);
+
+	dmu_tx_hold_write(tx, packobj, packoff, packsize);
+
+	if (freeit)
+		dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
+	else
+		dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
+
+	txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
+	if (txg == 0) {
+		umem_free(packbuf, packsize);
+		umem_free(bigbuf, bigsize);
+		return;
+	}
+
+	dmu_object_set_checksum(os, bigobj,
+	    (enum zio_checksum)ztest_random_dsl_prop(ZFS_PROP_CHECKSUM), tx);
+
+	dmu_object_set_compress(os, bigobj,
+	    (enum zio_compress)ztest_random_dsl_prop(ZFS_PROP_COMPRESSION), tx);
+
+	/*
+	 * For each index from n to n + s, verify that the existing bufwad
+	 * in packobj matches the bufwads at the head and tail of the
+	 * corresponding chunk in bigobj.  Then update all three bufwads
+	 * with the new values we want to write out.
+	 */
+	for (i = 0; i < s; i++) {
+		/* LINTED */
+		pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
+		/* LINTED */
+		bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
+		/* LINTED */
+		bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
+
+		ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
+		ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
+
+		if (pack->bw_txg > txg)
+			fatal(0, "future leak: got %llx, open txg is %llx",
+			    pack->bw_txg, txg);
+
+		if (pack->bw_data != 0 && pack->bw_index != n + i)
+			fatal(0, "wrong index: got %llx, wanted %llx+%llx",
+			    pack->bw_index, n, i);
+
+		if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
+			fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
+
+		if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
+			fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
+
+		if (freeit) {
+			bzero(pack, sizeof (bufwad_t));
+		} else {
+			pack->bw_index = n + i;
+			pack->bw_txg = txg;
+			pack->bw_data = 1 + ztest_random(-2ULL);
+		}
+		*bigH = *pack;
+		*bigT = *pack;
+	}
+
+	/*
+	 * We've verified all the old bufwads, and made new ones.
+	 * Now write them out.
+	 */
+	dmu_write(os, packobj, packoff, packsize, packbuf, tx);
+
+	if (freeit) {
+		if (ztest_opts.zo_verbose >= 7) {
+			(void) printf("freeing offset %llx size %llx"
+			    " txg %llx\n",
+			    (u_longlong_t)bigoff,
+			    (u_longlong_t)bigsize,
+			    (u_longlong_t)txg);
+		}
+		VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx));
+	} else {
+		if (ztest_opts.zo_verbose >= 7) {
+			(void) printf("writing offset %llx size %llx"
+			    " txg %llx\n",
+			    (u_longlong_t)bigoff,
+			    (u_longlong_t)bigsize,
+			    (u_longlong_t)txg);
+		}
+		dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
+	}
+
+	dmu_tx_commit(tx);
+
+	/*
+	 * Sanity check the stuff we just wrote.
+	 */
+	{
+		void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
+		void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
+
+		VERIFY(0 == dmu_read(os, packobj, packoff,
+		    packsize, packcheck, DMU_READ_PREFETCH));
+		VERIFY(0 == dmu_read(os, bigobj, bigoff,
+		    bigsize, bigcheck, DMU_READ_PREFETCH));
+
+		ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
+		ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
+
+		umem_free(packcheck, packsize);
+		umem_free(bigcheck, bigsize);
+	}
+
+	umem_free(packbuf, packsize);
+	umem_free(bigbuf, bigsize);
+}
+
+void
+compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
+    uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
+{
+	uint64_t i;
+	bufwad_t *pack;
+	bufwad_t *bigH;
+	bufwad_t *bigT;
+
+	/*
+	 * For each index from n to n + s, verify that the existing bufwad
+	 * in packobj matches the bufwads at the head and tail of the
+	 * corresponding chunk in bigobj.  Then update all three bufwads
+	 * with the new values we want to write out.
+	 */
+	for (i = 0; i < s; i++) {
+		/* LINTED */
+		pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
+		/* LINTED */
+		bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
+		/* LINTED */
+		bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
+
+		ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
+		ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
+
+		if (pack->bw_txg > txg)
+			fatal(0, "future leak: got %llx, open txg is %llx",
+			    pack->bw_txg, txg);
+
+		if (pack->bw_data != 0 && pack->bw_index != n + i)
+			fatal(0, "wrong index: got %llx, wanted %llx+%llx",
+			    pack->bw_index, n, i);
+
+		if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
+			fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
+
+		if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
+			fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
+
+		pack->bw_index = n + i;
+		pack->bw_txg = txg;
+		pack->bw_data = 1 + ztest_random(-2ULL);
+
+		*bigH = *pack;
+		*bigT = *pack;
+	}
+}
+
+void
+ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
+{
+	objset_t *os = zd->zd_os;
+	ztest_od_t od[2];
+	dmu_tx_t *tx;
+	uint64_t i;
+	int error;
+	uint64_t n, s, txg;
+	bufwad_t *packbuf, *bigbuf;
+	uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
+	uint64_t blocksize = ztest_random_blocksize();
+	uint64_t chunksize = blocksize;
+	uint64_t regions = 997;
+	uint64_t stride = 123456789ULL;
+	uint64_t width = 9;
+	dmu_buf_t *bonus_db;
+	arc_buf_t **bigbuf_arcbufs;
+	dmu_object_info_t doi;
+
+	/*
+	 * This test uses two objects, packobj and bigobj, that are always
+	 * updated together (i.e. in the same tx) so that their contents are
+	 * in sync and can be compared.  Their contents relate to each other
+	 * in a simple way: packobj is a dense array of 'bufwad' structures,
+	 * while bigobj is a sparse array of the same bufwads.  Specifically,
+	 * for any index n, there are three bufwads that should be identical:
+	 *
+	 *	packobj, at offset n * sizeof (bufwad_t)
+	 *	bigobj, at the head of the nth chunk
+	 *	bigobj, at the tail of the nth chunk
+	 *
+	 * The chunk size is set equal to bigobj block size so that
+	 * dmu_assign_arcbuf() can be tested for object updates.
+	 */
+
+	/*
+	 * Read the directory info.  If it's the first time, set things up.
+	 */
+	ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
+	ztest_od_init(&od[1], id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, chunksize);
+
+	if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
+		return;
+
+	bigobj = od[0].od_object;
+	packobj = od[1].od_object;
+	blocksize = od[0].od_blocksize;
+	chunksize = blocksize;
+	ASSERT(chunksize == od[1].od_gen);
+
+	VERIFY(dmu_object_info(os, bigobj, &doi) == 0);
+	VERIFY(ISP2(doi.doi_data_block_size));
+	VERIFY(chunksize == doi.doi_data_block_size);
+	VERIFY(chunksize >= 2 * sizeof (bufwad_t));
+
+	/*
+	 * Pick a random index and compute the offsets into packobj and bigobj.
+	 */
+	n = ztest_random(regions) * stride + ztest_random(width);
+	s = 1 + ztest_random(width - 1);
+
+	packoff = n * sizeof (bufwad_t);
+	packsize = s * sizeof (bufwad_t);
+
+	bigoff = n * chunksize;
+	bigsize = s * chunksize;
+
+	packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
+	bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
+
+	VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
+
+	bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
+
+	/*
+	 * Iteration 0 test zcopy for DB_UNCACHED dbufs.
+	 * Iteration 1 test zcopy to already referenced dbufs.
+	 * Iteration 2 test zcopy to dirty dbuf in the same txg.
+	 * Iteration 3 test zcopy to dbuf dirty in previous txg.
+	 * Iteration 4 test zcopy when dbuf is no longer dirty.
+	 * Iteration 5 test zcopy when it can't be done.
+	 * Iteration 6 one more zcopy write.
+	 */
+	for (i = 0; i < 7; i++) {
+		uint64_t j;
+		uint64_t off;
+
+		/*
+		 * In iteration 5 (i == 5) use arcbufs
+		 * that don't match bigobj blksz to test
+		 * dmu_assign_arcbuf() when it can't directly
+		 * assign an arcbuf to a dbuf.
+		 */
+		for (j = 0; j < s; j++) {
+			if (i != 5) {
+				bigbuf_arcbufs[j] =
+				    dmu_request_arcbuf(bonus_db, chunksize);
+			} else {
+				bigbuf_arcbufs[2 * j] =
+				    dmu_request_arcbuf(bonus_db, chunksize / 2);
+				bigbuf_arcbufs[2 * j + 1] =
+				    dmu_request_arcbuf(bonus_db, chunksize / 2);
+			}
+		}
+
+		/*
+		 * Get a tx for the mods to both packobj and bigobj.
+		 */
+		tx = dmu_tx_create(os);
+
+		dmu_tx_hold_write(tx, packobj, packoff, packsize);
+		dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
+
+		txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
+		if (txg == 0) {
+			umem_free(packbuf, packsize);
+			umem_free(bigbuf, bigsize);
+			for (j = 0; j < s; j++) {
+				if (i != 5) {
+					dmu_return_arcbuf(bigbuf_arcbufs[j]);
+				} else {
+					dmu_return_arcbuf(
+					    bigbuf_arcbufs[2 * j]);
+					dmu_return_arcbuf(
+					    bigbuf_arcbufs[2 * j + 1]);
+				}
+			}
+			umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
+			dmu_buf_rele(bonus_db, FTAG);
+			return;
+		}
+
+		/*
+		 * 50% of the time don't read objects in the 1st iteration to
+		 * test dmu_assign_arcbuf() for the case when there're no
+		 * existing dbufs for the specified offsets.
+		 */
+		if (i != 0 || ztest_random(2) != 0) {
+			error = dmu_read(os, packobj, packoff,
+			    packsize, packbuf, DMU_READ_PREFETCH);
+			ASSERT0(error);
+			error = dmu_read(os, bigobj, bigoff, bigsize,
+			    bigbuf, DMU_READ_PREFETCH);
+			ASSERT0(error);
+		}
+		compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
+		    n, chunksize, txg);
+
+		/*
+		 * We've verified all the old bufwads, and made new ones.
+		 * Now write them out.
+		 */
+		dmu_write(os, packobj, packoff, packsize, packbuf, tx);
+		if (ztest_opts.zo_verbose >= 7) {
+			(void) printf("writing offset %llx size %llx"
+			    " txg %llx\n",
+			    (u_longlong_t)bigoff,
+			    (u_longlong_t)bigsize,
+			    (u_longlong_t)txg);
+		}
+		for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
+			dmu_buf_t *dbt;
+			if (i != 5) {
+				bcopy((caddr_t)bigbuf + (off - bigoff),
+				    bigbuf_arcbufs[j]->b_data, chunksize);
+			} else {
+				bcopy((caddr_t)bigbuf + (off - bigoff),
+				    bigbuf_arcbufs[2 * j]->b_data,
+				    chunksize / 2);
+				bcopy((caddr_t)bigbuf + (off - bigoff) +
+				    chunksize / 2,
+				    bigbuf_arcbufs[2 * j + 1]->b_data,
+				    chunksize / 2);
+			}
+
+			if (i == 1) {
+				VERIFY(dmu_buf_hold(os, bigobj, off,
+				    FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
+			}
+			if (i != 5) {
+				dmu_assign_arcbuf(bonus_db, off,
+				    bigbuf_arcbufs[j], tx);
+			} else {
+				dmu_assign_arcbuf(bonus_db, off,
+				    bigbuf_arcbufs[2 * j], tx);
+				dmu_assign_arcbuf(bonus_db,
+				    off + chunksize / 2,
+				    bigbuf_arcbufs[2 * j + 1], tx);
+			}
+			if (i == 1) {
+				dmu_buf_rele(dbt, FTAG);
+			}
+		}
+		dmu_tx_commit(tx);
+
+		/*
+		 * Sanity check the stuff we just wrote.
+		 */
+		{
+			void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
+			void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
+
+			VERIFY(0 == dmu_read(os, packobj, packoff,
+			    packsize, packcheck, DMU_READ_PREFETCH));
+			VERIFY(0 == dmu_read(os, bigobj, bigoff,
+			    bigsize, bigcheck, DMU_READ_PREFETCH));
+
+			ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
+			ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
+
+			umem_free(packcheck, packsize);
+			umem_free(bigcheck, bigsize);
+		}
+		if (i == 2) {
+			txg_wait_open(dmu_objset_pool(os), 0);
+		} else if (i == 3) {
+			txg_wait_synced(dmu_objset_pool(os), 0);
+		}
+	}
+
+	dmu_buf_rele(bonus_db, FTAG);
+	umem_free(packbuf, packsize);
+	umem_free(bigbuf, bigsize);
+	umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
+}
+
+/* ARGSUSED */
+void
+ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
+{
+	ztest_od_t od[1];
+	uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
+	    (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
+
+	/*
+	 * Have multiple threads write to large offsets in an object
+	 * to verify that parallel writes to an object -- even to the
+	 * same blocks within the object -- doesn't cause any trouble.
+	 */
+	ztest_od_init(&od[0], ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
+
+	if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
+		return;
+
+	while (ztest_random(10) != 0)
+		ztest_io(zd, od[0].od_object, offset);
+}
+
+void
+ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
+{
+	ztest_od_t od[1];
+	uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
+	    (ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
+	uint64_t count = ztest_random(20) + 1;
+	uint64_t blocksize = ztest_random_blocksize();
+	void *data;
+
+	ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
+
+	if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
+		return;
+
+	if (ztest_truncate(zd, od[0].od_object, offset, count * blocksize) != 0)
+		return;
+
+	ztest_prealloc(zd, od[0].od_object, offset, count * blocksize);
+
+	data = umem_zalloc(blocksize, UMEM_NOFAIL);
+
+	while (ztest_random(count) != 0) {
+		uint64_t randoff = offset + (ztest_random(count) * blocksize);
+		if (ztest_write(zd, od[0].od_object, randoff, blocksize,
+		    data) != 0)
+			break;
+		while (ztest_random(4) != 0)
+			ztest_io(zd, od[0].od_object, randoff);
+	}
+
+	umem_free(data, blocksize);
+}
+
+/*
+ * Verify that zap_{create,destroy,add,remove,update} work as expected.
+ */
+#define	ZTEST_ZAP_MIN_INTS	1
+#define	ZTEST_ZAP_MAX_INTS	4
+#define	ZTEST_ZAP_MAX_PROPS	1000
+
+void
+ztest_zap(ztest_ds_t *zd, uint64_t id)
+{
+	objset_t *os = zd->zd_os;
+	ztest_od_t od[1];
+	uint64_t object;
+	uint64_t txg, last_txg;
+	uint64_t value[ZTEST_ZAP_MAX_INTS];
+	uint64_t zl_ints, zl_intsize, prop;
+	int i, ints;
+	dmu_tx_t *tx;
+	char propname[100], txgname[100];
+	int error;
+	char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
+
+	ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
+
+	if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
+		return;
+
+	object = od[0].od_object;
+
+	/*
+	 * Generate a known hash collision, and verify that
+	 * we can lookup and remove both entries.
+	 */
+	tx = dmu_tx_create(os);
+	dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
+	txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
+	if (txg == 0)
+		return;
+	for (i = 0; i < 2; i++) {
+		value[i] = i;
+		VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t),
+		    1, &value[i], tx));
+	}
+	for (i = 0; i < 2; i++) {
+		VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
+		    sizeof (uint64_t), 1, &value[i], tx));
+		VERIFY3U(0, ==,
+		    zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
+		ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
+		ASSERT3U(zl_ints, ==, 1);
+	}
+	for (i = 0; i < 2; i++) {
+		VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx));
+	}
+	dmu_tx_commit(tx);
+
+	/*
+	 * Generate a buch of random entries.
+	 */
+	ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
+
+	prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
+	(void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
+	(void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
+	bzero(value, sizeof (value));
+	last_txg = 0;
+
+	/*
+	 * If these zap entries already exist, validate their contents.
+	 */
+	error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
+	if (error == 0) {
+		ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
+		ASSERT3U(zl_ints, ==, 1);
+
+		VERIFY(zap_lookup(os, object, txgname, zl_intsize,
+		    zl_ints, &last_txg) == 0);
+
+		VERIFY(zap_length(os, object, propname, &zl_intsize,
+		    &zl_ints) == 0);
+
+		ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
+		ASSERT3U(zl_ints, ==, ints);
+
+		VERIFY(zap_lookup(os, object, propname, zl_intsize,
+		    zl_ints, value) == 0);
+
+		for (i = 0; i < ints; i++) {
+			ASSERT3U(value[i], ==, last_txg + object + i);
+		}
+	} else {
+		ASSERT3U(error, ==, ENOENT);
+	}
+
+	/*
+	 * Atomically update two entries in our zap object.
+	 * The first is named txg_%llu, and contains the txg
+	 * in which the property was last updated.  The second
+	 * is named prop_%llu, and the nth element of its value
+	 * should be txg + object + n.
+	 */
+	tx = dmu_tx_create(os);
+	dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
+	txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
+	if (txg == 0)
+		return;
+
+	if (last_txg > txg)
+		fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
+
+	for (i = 0; i < ints; i++)
+		value[i] = txg + object + i;
+
+	VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t),
+	    1, &txg, tx));
+	VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t),
+	    ints, value, tx));
+
+	dmu_tx_commit(tx);
+
+	/*
+	 * Remove a random pair of entries.
+	 */
+	prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
+	(void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
+	(void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
+
+	error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
+
+	if (error == ENOENT)
+		return;
+
+	ASSERT0(error);
+
+	tx = dmu_tx_create(os);
+	dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
+	txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
+	if (txg == 0)
+		return;
+	VERIFY3U(0, ==, zap_remove(os, object, txgname, tx));
+	VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
+	dmu_tx_commit(tx);
+}
+
+/*
+ * Testcase to test the upgrading of a microzap to fatzap.
+ */
+void
+ztest_fzap(ztest_ds_t *zd, uint64_t id)
+{
+	objset_t *os = zd->zd_os;
+	ztest_od_t od[1];
+	uint64_t object, txg;
+
+	ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0);
+
+	if (ztest_object_init(zd, od, sizeof (od), !ztest_random(2)) != 0)
+		return;
+
+	object = od[0].od_object;
+
+	/*
+	 * Add entries to this ZAP and make sure it spills over
+	 * and gets upgraded to a fatzap. Also, since we are adding
+	 * 2050 entries we should see ptrtbl growth and leaf-block split.
+	 */
+	for (int i = 0; i < 2050; i++) {
+		char name[MAXNAMELEN];
+		uint64_t value = i;
+		dmu_tx_t *tx;
+		int error;
+
+		(void) snprintf(name, sizeof (name), "fzap-%llu-%llu",
+		    id, value);
+
+		tx = dmu_tx_create(os);
+		dmu_tx_hold_zap(tx, object, B_TRUE, name);
+		txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
+		if (txg == 0)
+			return;
+		error = zap_add(os, object, name, sizeof (uint64_t), 1,
+		    &value, tx);
+		ASSERT(error == 0 || error == EEXIST);
+		dmu_tx_commit(tx);
+	}
+}
+
+/* ARGSUSED */
+void
+ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
+{
+	objset_t *os = zd->zd_os;
+	ztest_od_t od[1];
+	uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
+	dmu_tx_t *tx;
+	int i, namelen, error;
+	int micro = ztest_random(2);
+	char name[20], string_value[20];
+	void *data;
+
+	ztest_od_init(&od[0], ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0);
+
+	if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
+		return;
+
+	object = od[0].od_object;
+
+	/*
+	 * Generate a random name of the form 'xxx.....' where each
+	 * x is a random printable character and the dots are dots.
+	 * There are 94 such characters, and the name length goes from
+	 * 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
+	 */
+	namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
+
+	for (i = 0; i < 3; i++)
+		name[i] = '!' + ztest_random('~' - '!' + 1);
+	for (; i < namelen - 1; i++)
+		name[i] = '.';
+	name[i] = '\0';
+
+	if ((namelen & 1) || micro) {
+		wsize = sizeof (txg);
+		wc = 1;
+		data = &txg;
+	} else {
+		wsize = 1;
+		wc = namelen;
+		data = string_value;
+	}
+
+	count = -1ULL;
+	VERIFY(zap_count(os, object, &count) == 0);
+	ASSERT(count != -1ULL);
+
+	/*
+	 * Select an operation: length, lookup, add, update, remove.
+	 */
+	i = ztest_random(5);
+
+	if (i >= 2) {
+		tx = dmu_tx_create(os);
+		dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
+		txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
+		if (txg == 0)
+			return;
+		bcopy(name, string_value, namelen);
+	} else {
+		tx = NULL;
+		txg = 0;
+		bzero(string_value, namelen);
+	}
+
+	switch (i) {
+
+	case 0:
+		error = zap_length(os, object, name, &zl_wsize, &zl_wc);
+		if (error == 0) {
+			ASSERT3U(wsize, ==, zl_wsize);
+			ASSERT3U(wc, ==, zl_wc);
+		} else {
+			ASSERT3U(error, ==, ENOENT);
+		}
+		break;
+
+	case 1:
+		error = zap_lookup(os, object, name, wsize, wc, data);
+		if (error == 0) {
+			if (data == string_value &&
+			    bcmp(name, data, namelen) != 0)
+				fatal(0, "name '%s' != val '%s' len %d",
+				    name, data, namelen);
+		} else {
+			ASSERT3U(error, ==, ENOENT);
+		}
+		break;
+
+	case 2:
+		error = zap_add(os, object, name, wsize, wc, data, tx);
+		ASSERT(error == 0 || error == EEXIST);
+		break;
+
+	case 3:
+		VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0);
+		break;
+
+	case 4:
+		error = zap_remove(os, object, name, tx);
+		ASSERT(error == 0 || error == ENOENT);
+		break;
+	}
+
+	if (tx != NULL)
+		dmu_tx_commit(tx);
+}
+
+/*
+ * Commit callback data.
+ */
+typedef struct ztest_cb_data {
+	list_node_t		zcd_node;
+	uint64_t		zcd_txg;
+	int			zcd_expected_err;
+	boolean_t		zcd_added;
+	boolean_t		zcd_called;
+	spa_t			*zcd_spa;
+} ztest_cb_data_t;
+
+/* This is the actual commit callback function */
+static void
+ztest_commit_callback(void *arg, int error)
+{
+	ztest_cb_data_t *data = arg;
+	uint64_t synced_txg;
+
+	VERIFY(data != NULL);
+	VERIFY3S(data->zcd_expected_err, ==, error);
+	VERIFY(!data->zcd_called);
+
+	synced_txg = spa_last_synced_txg(data->zcd_spa);
+	if (data->zcd_txg > synced_txg)
+		fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
+		    ", last synced txg = %" PRIu64 "\n", data->zcd_txg,
+		    synced_txg);
+
+	data->zcd_called = B_TRUE;
+
+	if (error == ECANCELED) {
+		ASSERT0(data->zcd_txg);
+		ASSERT(!data->zcd_added);
+
+		/*
+		 * The private callback data should be destroyed here, but
+		 * since we are going to check the zcd_called field after
+		 * dmu_tx_abort(), we will destroy it there.
+		 */
+		return;
+	}
+
+	/* Was this callback added to the global callback list? */
+	if (!data->zcd_added)
+		goto out;
+
+	ASSERT3U(data->zcd_txg, !=, 0);
+
+	/* Remove our callback from the list */
+	(void) mutex_lock(&zcl.zcl_callbacks_lock);
+	list_remove(&zcl.zcl_callbacks, data);
+	(void) mutex_unlock(&zcl.zcl_callbacks_lock);
+
+out:
+	umem_free(data, sizeof (ztest_cb_data_t));
+}
+
+/* Allocate and initialize callback data structure */
+static ztest_cb_data_t *
+ztest_create_cb_data(objset_t *os, uint64_t txg)
+{
+	ztest_cb_data_t *cb_data;
+
+	cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
+
+	cb_data->zcd_txg = txg;
+	cb_data->zcd_spa = dmu_objset_spa(os);
+
+	return (cb_data);
+}
+
+/*
+ * If a number of txgs equal to this threshold have been created after a commit
+ * callback has been registered but not called, then we assume there is an
+ * implementation bug.
+ */
+#define	ZTEST_COMMIT_CALLBACK_THRESH	(TXG_CONCURRENT_STATES + 2)
+
+/*
+ * Commit callback test.
+ */
+void
+ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
+{
+	objset_t *os = zd->zd_os;
+	ztest_od_t od[1];
+	dmu_tx_t *tx;
+	ztest_cb_data_t *cb_data[3], *tmp_cb;
+	uint64_t old_txg, txg;
+	int i, error;
+
+	ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0);
+
+	if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
+		return;
+
+	tx = dmu_tx_create(os);
+
+	cb_data[0] = ztest_create_cb_data(os, 0);
+	dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
+
+	dmu_tx_hold_write(tx, od[0].od_object, 0, sizeof (uint64_t));
+
+	/* Every once in a while, abort the transaction on purpose */
+	if (ztest_random(100) == 0)
+		error = -1;
+
+	if (!error)
+		error = dmu_tx_assign(tx, TXG_NOWAIT);
+
+	txg = error ? 0 : dmu_tx_get_txg(tx);
+
+	cb_data[0]->zcd_txg = txg;
+	cb_data[1] = ztest_create_cb_data(os, txg);
+	dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
+
+	if (error) {
+		/*
+		 * It's not a strict requirement to call the registered
+		 * callbacks from inside dmu_tx_abort(), but that's what
+		 * it's supposed to happen in the current implementation
+		 * so we will check for that.
+		 */
+		for (i = 0; i < 2; i++) {
+			cb_data[i]->zcd_expected_err = ECANCELED;
+			VERIFY(!cb_data[i]->zcd_called);
+		}
+
+		dmu_tx_abort(tx);
+
+		for (i = 0; i < 2; i++) {
+			VERIFY(cb_data[i]->zcd_called);
+			umem_free(cb_data[i], sizeof (ztest_cb_data_t));
+		}
+
+		return;
+	}
+
+	cb_data[2] = ztest_create_cb_data(os, txg);
+	dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
+
+	/*
+	 * Read existing data to make sure there isn't a future leak.
+	 */
+	VERIFY(0 == dmu_read(os, od[0].od_object, 0, sizeof (uint64_t),
+	    &old_txg, DMU_READ_PREFETCH));
+
+	if (old_txg > txg)
+		fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
+		    old_txg, txg);
+
+	dmu_write(os, od[0].od_object, 0, sizeof (uint64_t), &txg, tx);
+
+	(void) mutex_lock(&zcl.zcl_callbacks_lock);
+
+	/*
+	 * Since commit callbacks don't have any ordering requirement and since
+	 * it is theoretically possible for a commit callback to be called
+	 * after an arbitrary amount of time has elapsed since its txg has been
+	 * synced, it is difficult to reliably determine whether a commit
+	 * callback hasn't been called due to high load or due to a flawed
+	 * implementation.
+	 *
+	 * In practice, we will assume that if after a certain number of txgs a
+	 * commit callback hasn't been called, then most likely there's an
+	 * implementation bug..
+	 */
+	tmp_cb = list_head(&zcl.zcl_callbacks);
+	if (tmp_cb != NULL &&
+	    tmp_cb->zcd_txg > txg - ZTEST_COMMIT_CALLBACK_THRESH) {
+		fatal(0, "Commit callback threshold exceeded, oldest txg: %"
+		    PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg);
+	}
+
+	/*
+	 * Let's find the place to insert our callbacks.
+	 *
+	 * Even though the list is ordered by txg, it is possible for the
+	 * insertion point to not be the end because our txg may already be
+	 * quiescing at this point and other callbacks in the open txg
+	 * (from other objsets) may have sneaked in.
+	 */
+	tmp_cb = list_tail(&zcl.zcl_callbacks);
+	while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
+		tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
+
+	/* Add the 3 callbacks to the list */
+	for (i = 0; i < 3; i++) {
+		if (tmp_cb == NULL)
+			list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
+		else
+			list_insert_after(&zcl.zcl_callbacks, tmp_cb,
+			    cb_data[i]);
+
+		cb_data[i]->zcd_added = B_TRUE;
+		VERIFY(!cb_data[i]->zcd_called);
+
+		tmp_cb = cb_data[i];
+	}
+
+	(void) mutex_unlock(&zcl.zcl_callbacks_lock);
+
+	dmu_tx_commit(tx);
+}
+
+/* ARGSUSED */
+void
+ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
+{
+	zfs_prop_t proplist[] = {
+		ZFS_PROP_CHECKSUM,
+		ZFS_PROP_COMPRESSION,
+		ZFS_PROP_COPIES,
+		ZFS_PROP_DEDUP
+	};
+
+	(void) rw_rdlock(&ztest_name_lock);
+
+	for (int p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
+		(void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
+		    ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
+
+	(void) rw_unlock(&ztest_name_lock);
+}
+
+/* ARGSUSED */
+void
+ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
+{
+	nvlist_t *props = NULL;
+
+	(void) rw_rdlock(&ztest_name_lock);
+
+	(void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
+	    ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
+
+	VERIFY0(spa_prop_get(ztest_spa, &props));
+
+	if (ztest_opts.zo_verbose >= 6)
+		dump_nvlist(props, 4);
+
+	nvlist_free(props);
+
+	(void) rw_unlock(&ztest_name_lock);
+}
+
+/*
+ * Test snapshot hold/release and deferred destroy.
+ */
+void
+ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
+{
+	int error;
+	objset_t *os = zd->zd_os;
+	objset_t *origin;
+	char snapname[100];
+	char fullname[100];
+	char clonename[100];
+	char tag[100];
+	char osname[MAXNAMELEN];
+
+	(void) rw_rdlock(&ztest_name_lock);
+
+	dmu_objset_name(os, osname);
+
+	(void) snprintf(snapname, 100, "sh1_%llu", id);
+	(void) snprintf(fullname, 100, "%s@%s", osname, snapname);
+	(void) snprintf(clonename, 100, "%s/ch1_%llu", osname, id);
+	(void) snprintf(tag, 100, "%tag_%llu", id);
+
+	/*
+	 * Clean up from any previous run.
+	 */
+	(void) dmu_objset_destroy(clonename, B_FALSE);
+	(void) dsl_dataset_user_release(osname, snapname, tag, B_FALSE);
+	(void) dmu_objset_destroy(fullname, B_FALSE);
+
+	/*
+	 * Create snapshot, clone it, mark snap for deferred destroy,
+	 * destroy clone, verify snap was also destroyed.
+	 */
+	error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE,
+	    FALSE, -1);
+	if (error) {
+		if (error == ENOSPC) {
+			ztest_record_enospc("dmu_objset_snapshot");
+			goto out;
+		}
+		fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
+	}
+
+	error = dmu_objset_hold(fullname, FTAG, &origin);
+	if (error)
+		fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
+
+	error = dmu_objset_clone(clonename, dmu_objset_ds(origin), 0);
+	dmu_objset_rele(origin, FTAG);
+	if (error) {
+		if (error == ENOSPC) {
+			ztest_record_enospc("dmu_objset_clone");
+			goto out;
+		}
+		fatal(0, "dmu_objset_clone(%s) = %d", clonename, error);
+	}
+
+	error = dmu_objset_destroy(fullname, B_TRUE);
+	if (error) {
+		fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
+		    fullname, error);
+	}
+
+	error = dmu_objset_destroy(clonename, B_FALSE);
+	if (error)
+		fatal(0, "dmu_objset_destroy(%s) = %d", clonename, error);
+
+	error = dmu_objset_hold(fullname, FTAG, &origin);
+	if (error != ENOENT)
+		fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
+
+	/*
+	 * Create snapshot, add temporary hold, verify that we can't
+	 * destroy a held snapshot, mark for deferred destroy,
+	 * release hold, verify snapshot was destroyed.
+	 */
+	error = dmu_objset_snapshot(osname, snapname, NULL, NULL, FALSE,
+	    FALSE, -1);
+	if (error) {
+		if (error == ENOSPC) {
+			ztest_record_enospc("dmu_objset_snapshot");
+			goto out;
+		}
+		fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
+	}
+
+	error = dsl_dataset_user_hold(osname, snapname, tag, B_FALSE,
+	    B_TRUE, -1);
+	if (error)
+		fatal(0, "dsl_dataset_user_hold(%s)", fullname, tag);
+
+	error = dmu_objset_destroy(fullname, B_FALSE);
+	if (error != EBUSY) {
+		fatal(0, "dmu_objset_destroy(%s, B_FALSE) = %d",
+		    fullname, error);
+	}
+
+	error = dmu_objset_destroy(fullname, B_TRUE);
+	if (error) {
+		fatal(0, "dmu_objset_destroy(%s, B_TRUE) = %d",
+		    fullname, error);
+	}
+
+	error = dsl_dataset_user_release(osname, snapname, tag, B_FALSE);
+	if (error)
+		fatal(0, "dsl_dataset_user_release(%s)", fullname, tag);
+
+	VERIFY(dmu_objset_hold(fullname, FTAG, &origin) == ENOENT);
+
+out:
+	(void) rw_unlock(&ztest_name_lock);
+}
+
+/*
+ * Inject random faults into the on-disk data.
+ */
+/* ARGSUSED */
+void
+ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
+{
+	ztest_shared_t *zs = ztest_shared;
+	spa_t *spa = ztest_spa;
+	int fd;
+	uint64_t offset;
+	uint64_t leaves;
+	uint64_t bad = 0x1990c0ffeedecadeULL;
+	uint64_t top, leaf;
+	char path0[MAXPATHLEN];
+	char pathrand[MAXPATHLEN];
+	size_t fsize;
+	int bshift = SPA_MAXBLOCKSHIFT + 2;	/* don't scrog all labels */
+	int iters = 1000;
+	int maxfaults;
+	int mirror_save;
+	vdev_t *vd0 = NULL;
+	uint64_t guid0 = 0;
+	boolean_t islog = B_FALSE;
+
+	VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+	maxfaults = MAXFAULTS();
+	leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
+	mirror_save = zs->zs_mirrors;
+	VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+
+	ASSERT(leaves >= 1);
+
+	/*
+	 * We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
+	 */
+	spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
+
+	if (ztest_random(2) == 0) {
+		/*
+		 * Inject errors on a normal data device or slog device.
+		 */
+		top = ztest_random_vdev_top(spa, B_TRUE);
+		leaf = ztest_random(leaves) + zs->zs_splits;
+
+		/*
+		 * Generate paths to the first leaf in this top-level vdev,
+		 * and to the random leaf we selected.  We'll induce transient
+		 * write failures and random online/offline activity on leaf 0,
+		 * and we'll write random garbage to the randomly chosen leaf.
+		 */
+		(void) snprintf(path0, sizeof (path0), ztest_dev_template,
+		    ztest_opts.zo_dir, ztest_opts.zo_pool,
+		    top * leaves + zs->zs_splits);
+		(void) snprintf(pathrand, sizeof (pathrand), ztest_dev_template,
+		    ztest_opts.zo_dir, ztest_opts.zo_pool,
+		    top * leaves + leaf);
+
+		vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
+		if (vd0 != NULL && vd0->vdev_top->vdev_islog)
+			islog = B_TRUE;
+
+		if (vd0 != NULL && maxfaults != 1) {
+			/*
+			 * Make vd0 explicitly claim to be unreadable,
+			 * or unwriteable, or reach behind its back
+			 * and close the underlying fd.  We can do this if
+			 * maxfaults == 0 because we'll fail and reexecute,
+			 * and we can do it if maxfaults >= 2 because we'll
+			 * have enough redundancy.  If maxfaults == 1, the
+			 * combination of this with injection of random data
+			 * corruption below exceeds the pool's fault tolerance.
+			 */
+			vdev_file_t *vf = vd0->vdev_tsd;
+
+			if (vf != NULL && ztest_random(3) == 0) {
+				(void) close(vf->vf_vnode->v_fd);
+				vf->vf_vnode->v_fd = -1;
+			} else if (ztest_random(2) == 0) {
+				vd0->vdev_cant_read = B_TRUE;
+			} else {
+				vd0->vdev_cant_write = B_TRUE;
+			}
+			guid0 = vd0->vdev_guid;
+		}
+	} else {
+		/*
+		 * Inject errors on an l2cache device.
+		 */
+		spa_aux_vdev_t *sav = &spa->spa_l2cache;
+
+		if (sav->sav_count == 0) {
+			spa_config_exit(spa, SCL_STATE, FTAG);
+			return;
+		}
+		vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
+		guid0 = vd0->vdev_guid;
+		(void) strcpy(path0, vd0->vdev_path);
+		(void) strcpy(pathrand, vd0->vdev_path);
+
+		leaf = 0;
+		leaves = 1;
+		maxfaults = INT_MAX;	/* no limit on cache devices */
+	}
+
+	spa_config_exit(spa, SCL_STATE, FTAG);
+
+	/*
+	 * If we can tolerate two or more faults, or we're dealing
+	 * with a slog, randomly online/offline vd0.
+	 */
+	if ((maxfaults >= 2 || islog) && guid0 != 0) {
+		if (ztest_random(10) < 6) {
+			int flags = (ztest_random(2) == 0 ?
+			    ZFS_OFFLINE_TEMPORARY : 0);
+
+			/*
+			 * We have to grab the zs_name_lock as writer to
+			 * prevent a race between offlining a slog and
+			 * destroying a dataset. Offlining the slog will
+			 * grab a reference on the dataset which may cause
+			 * dmu_objset_destroy() to fail with EBUSY thus
+			 * leaving the dataset in an inconsistent state.
+			 */
+			if (islog)
+				(void) rw_wrlock(&ztest_name_lock);
+
+			VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
+
+			if (islog)
+				(void) rw_unlock(&ztest_name_lock);
+		} else {
+			/*
+			 * Ideally we would like to be able to randomly
+			 * call vdev_[on|off]line without holding locks
+			 * to force unpredictable failures but the side
+			 * effects of vdev_[on|off]line prevent us from
+			 * doing so. We grab the ztest_vdev_lock here to
+			 * prevent a race between injection testing and
+			 * aux_vdev removal.
+			 */
+			VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+			(void) vdev_online(spa, guid0, 0, NULL);
+			VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+		}
+	}
+
+	if (maxfaults == 0)
+		return;
+
+	/*
+	 * We have at least single-fault tolerance, so inject data corruption.
+	 */
+	fd = open(pathrand, O_RDWR);
+
+	if (fd == -1)	/* we hit a gap in the device namespace */
+		return;
+
+	fsize = lseek(fd, 0, SEEK_END);
+
+	while (--iters != 0) {
+		offset = ztest_random(fsize / (leaves << bshift)) *
+		    (leaves << bshift) + (leaf << bshift) +
+		    (ztest_random(1ULL << (bshift - 1)) & -8ULL);
+
+		if (offset >= fsize)
+			continue;
+
+		VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
+		if (mirror_save != zs->zs_mirrors) {
+			VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+			(void) close(fd);
+			return;
+		}
+
+		if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
+			fatal(1, "can't inject bad word at 0x%llx in %s",
+			    offset, pathrand);
+
+		VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
+
+		if (ztest_opts.zo_verbose >= 7)
+			(void) printf("injected bad word into %s,"
+			    " offset 0x%llx\n", pathrand, (u_longlong_t)offset);
+	}
+
+	(void) close(fd);
+}
+
+/*
+ * Verify that DDT repair works as expected.
+ */
+void
+ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
+{
+	ztest_shared_t *zs = ztest_shared;
+	spa_t *spa = ztest_spa;
+	objset_t *os = zd->zd_os;
+	ztest_od_t od[1];
+	uint64_t object, blocksize, txg, pattern, psize;
+	enum zio_checksum checksum = spa_dedup_checksum(spa);
+	dmu_buf_t *db;
+	dmu_tx_t *tx;
+	void *buf;
+	blkptr_t blk;
+	int copies = 2 * ZIO_DEDUPDITTO_MIN;
+
+	blocksize = ztest_random_blocksize();
+	blocksize = MIN(blocksize, 2048);	/* because we write so many */
+
+	ztest_od_init(&od[0], id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0);
+
+	if (ztest_object_init(zd, od, sizeof (od), B_FALSE) != 0)
+		return;
+
+	/*
+	 * Take the name lock as writer to prevent anyone else from changing
+	 * the pool and dataset properies we need to maintain during this test.
+	 */
+	(void) rw_wrlock(&ztest_name_lock);
+
+	if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
+	    B_FALSE) != 0 ||
+	    ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
+	    B_FALSE) != 0) {
+		(void) rw_unlock(&ztest_name_lock);
+		return;
+	}
+
+	object = od[0].od_object;
+	blocksize = od[0].od_blocksize;
+	pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os);
+
+	ASSERT(object != 0);
+
+	tx = dmu_tx_create(os);
+	dmu_tx_hold_write(tx, object, 0, copies * blocksize);
+	txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
+	if (txg == 0) {
+		(void) rw_unlock(&ztest_name_lock);
+		return;
+	}
+
+	/*
+	 * Write all the copies of our block.
+	 */
+	for (int i = 0; i < copies; i++) {
+		uint64_t offset = i * blocksize;
+		VERIFY0(dmu_buf_hold(os, object, offset, FTAG, &db,
+		    DMU_READ_NO_PREFETCH));
+		ASSERT(db->db_offset == offset);
+		ASSERT(db->db_size == blocksize);
+		ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) ||
+		    ztest_pattern_match(db->db_data, db->db_size, 0ULL));
+		dmu_buf_will_fill(db, tx);
+		ztest_pattern_set(db->db_data, db->db_size, pattern);
+		dmu_buf_rele(db, FTAG);
+	}
+
+	dmu_tx_commit(tx);
+	txg_wait_synced(spa_get_dsl(spa), txg);
+
+	/*
+	 * Find out what block we got.
+	 */
+	VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db,
+	    DMU_READ_NO_PREFETCH));
+	blk = *((dmu_buf_impl_t *)db)->db_blkptr;
+	dmu_buf_rele(db, FTAG);
+
+	/*
+	 * Damage the block.  Dedup-ditto will save us when we read it later.
+	 */
+	psize = BP_GET_PSIZE(&blk);
+	buf = zio_buf_alloc(psize);
+	ztest_pattern_set(buf, psize, ~pattern);
+
+	(void) zio_wait(zio_rewrite(NULL, spa, 0, &blk,
+	    buf, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
+	    ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
+
+	zio_buf_free(buf, psize);
+
+	(void) rw_unlock(&ztest_name_lock);
+}
+
+/*
+ * Scrub the pool.
+ */
+/* ARGSUSED */
+void
+ztest_scrub(ztest_ds_t *zd, uint64_t id)
+{
+	spa_t *spa = ztest_spa;
+
+	(void) spa_scan(spa, POOL_SCAN_SCRUB);
+	(void) poll(NULL, 0, 100); /* wait a moment, then force a restart */
+	(void) spa_scan(spa, POOL_SCAN_SCRUB);
+}
+
+/*
+ * Change the guid for the pool.
+ */
+/* ARGSUSED */
+void
+ztest_reguid(ztest_ds_t *zd, uint64_t id)
+{
+	spa_t *spa = ztest_spa;
+	uint64_t orig, load;
+	int error;
+
+	orig = spa_guid(spa);
+	load = spa_load_guid(spa);
+
+	(void) rw_wrlock(&ztest_name_lock);
+	error = spa_change_guid(spa);
+	(void) rw_unlock(&ztest_name_lock);
+
+	if (error != 0)
+		return;
+
+	if (ztest_opts.zo_verbose >= 4) {
+		(void) printf("Changed guid old %llu -> %llu\n",
+		    (u_longlong_t)orig, (u_longlong_t)spa_guid(spa));
+	}
+
+	VERIFY3U(orig, !=, spa_guid(spa));
+	VERIFY3U(load, ==, spa_load_guid(spa));
+}
+
+/*
+ * Rename the pool to a different name and then rename it back.
+ */
+/* ARGSUSED */
+void
+ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
+{
+	char *oldname, *newname;
+	spa_t *spa;
+
+	(void) rw_wrlock(&ztest_name_lock);
+
+	oldname = ztest_opts.zo_pool;
+	newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
+	(void) strcpy(newname, oldname);
+	(void) strcat(newname, "_tmp");
+
+	/*
+	 * Do the rename
+	 */
+	VERIFY3U(0, ==, spa_rename(oldname, newname));
+
+	/*
+	 * Try to open it under the old name, which shouldn't exist
+	 */
+	VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
+
+	/*
+	 * Open it under the new name and make sure it's still the same spa_t.
+	 */
+	VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
+
+	ASSERT(spa == ztest_spa);
+	spa_close(spa, FTAG);
+
+	/*
+	 * Rename it back to the original
+	 */
+	VERIFY3U(0, ==, spa_rename(newname, oldname));
+
+	/*
+	 * Make sure it can still be opened
+	 */
+	VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
+
+	ASSERT(spa == ztest_spa);
+	spa_close(spa, FTAG);
+
+	umem_free(newname, strlen(newname) + 1);
+
+	(void) rw_unlock(&ztest_name_lock);
+}
+
+/*
+ * Verify pool integrity by running zdb.
+ */
+static void
+ztest_run_zdb(char *pool)
+{
+	int status;
+	char zdb[MAXPATHLEN + MAXNAMELEN + 20];
+	char zbuf[1024];
+	char *bin;
+	char *ztest;
+	char *isa;
+	int isalen;
+	FILE *fp;
+
+	strlcpy(zdb, "/usr/bin/ztest", sizeof(zdb));
+
+	/* zdb lives in /usr/sbin, while ztest lives in /usr/bin */
+	bin = strstr(zdb, "/usr/bin/");
+	ztest = strstr(bin, "/ztest");
+	isa = bin + 8;
+	isalen = ztest - isa;
+	isa = strdup(isa);
+	/* LINTED */
+	(void) sprintf(bin,
+	    "/usr/sbin%.*s/zdb -bcc%s%s -U %s %s",
+	    isalen,
+	    isa,
+	    ztest_opts.zo_verbose >= 3 ? "s" : "",
+	    ztest_opts.zo_verbose >= 4 ? "v" : "",
+	    spa_config_path,
+	    pool);
+	free(isa);
+
+	if (ztest_opts.zo_verbose >= 5)
+		(void) printf("Executing %s\n", strstr(zdb, "zdb "));
+
+	fp = popen(zdb, "r");
+	assert(fp != NULL);
+
+	while (fgets(zbuf, sizeof (zbuf), fp) != NULL)
+		if (ztest_opts.zo_verbose >= 3)
+			(void) printf("%s", zbuf);
+
+	status = pclose(fp);
+
+	if (status == 0)
+		return;
+
+	ztest_dump_core = 0;
+	if (WIFEXITED(status))
+		fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
+	else
+		fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
+}
+
+static void
+ztest_walk_pool_directory(char *header)
+{
+	spa_t *spa = NULL;
+
+	if (ztest_opts.zo_verbose >= 6)
+		(void) printf("%s\n", header);
+
+	mutex_enter(&spa_namespace_lock);
+	while ((spa = spa_next(spa)) != NULL)
+		if (ztest_opts.zo_verbose >= 6)
+			(void) printf("\t%s\n", spa_name(spa));
+	mutex_exit(&spa_namespace_lock);
+}
+
+static void
+ztest_spa_import_export(char *oldname, char *newname)
+{
+	nvlist_t *config, *newconfig;
+	uint64_t pool_guid;
+	spa_t *spa;
+
+	if (ztest_opts.zo_verbose >= 4) {
+		(void) printf("import/export: old = %s, new = %s\n",
+		    oldname, newname);
+	}
+
+	/*
+	 * Clean up from previous runs.
+	 */
+	(void) spa_destroy(newname);
+
+	/*
+	 * Get the pool's configuration and guid.
+	 */
+	VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
+
+	/*
+	 * Kick off a scrub to tickle scrub/export races.
+	 */
+	if (ztest_random(2) == 0)
+		(void) spa_scan(spa, POOL_SCAN_SCRUB);
+
+	pool_guid = spa_guid(spa);
+	spa_close(spa, FTAG);
+
+	ztest_walk_pool_directory("pools before export");
+
+	/*
+	 * Export it.
+	 */
+	VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE));
+
+	ztest_walk_pool_directory("pools after export");
+
+	/*
+	 * Try to import it.
+	 */
+	newconfig = spa_tryimport(config);
+	ASSERT(newconfig != NULL);
+	nvlist_free(newconfig);
+
+	/*
+	 * Import it under the new name.
+	 */
+	VERIFY3U(0, ==, spa_import(newname, config, NULL, 0));
+
+	ztest_walk_pool_directory("pools after import");
+
+	/*
+	 * Try to import it again -- should fail with EEXIST.
+	 */
+	VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
+
+	/*
+	 * Try to import it under a different name -- should fail with EEXIST.
+	 */
+	VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
+
+	/*
+	 * Verify that the pool is no longer visible under the old name.
+	 */
+	VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
+
+	/*
+	 * Verify that we can open and close the pool using the new name.
+	 */
+	VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
+	ASSERT(pool_guid == spa_guid(spa));
+	spa_close(spa, FTAG);
+
+	nvlist_free(config);
+}
+
+static void
+ztest_resume(spa_t *spa)
+{
+	if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
+		(void) printf("resuming from suspended state\n");
+	spa_vdev_state_enter(spa, SCL_NONE);
+	vdev_clear(spa, NULL);
+	(void) spa_vdev_state_exit(spa, NULL, 0);
+	(void) zio_resume(spa);
+}
+
+static void *
+ztest_resume_thread(void *arg)
+{
+	spa_t *spa = arg;
+
+	while (!ztest_exiting) {
+		if (spa_suspended(spa))
+			ztest_resume(spa);
+		(void) poll(NULL, 0, 100);
+	}
+	return (NULL);
+}
+
+static void *
+ztest_deadman_thread(void *arg)
+{
+	ztest_shared_t *zs = arg;
+	int grace = 300;
+	hrtime_t delta;
+
+	delta = (zs->zs_thread_stop - zs->zs_thread_start) / NANOSEC + grace;
+
+	(void) poll(NULL, 0, (int)(1000 * delta));
+
+	fatal(0, "failed to complete within %d seconds of deadline", grace);
+
+	return (NULL);
+}
+
+static void
+ztest_execute(int test, ztest_info_t *zi, uint64_t id)
+{
+	ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
+	ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
+	hrtime_t functime = gethrtime();
+
+	for (int i = 0; i < zi->zi_iters; i++)
+		zi->zi_func(zd, id);
+
+	functime = gethrtime() - functime;
+
+	atomic_add_64(&zc->zc_count, 1);
+	atomic_add_64(&zc->zc_time, functime);
+
+	if (ztest_opts.zo_verbose >= 4) {
+		Dl_info dli;
+		(void) dladdr((void *)zi->zi_func, &dli);
+		(void) printf("%6.2f sec in %s\n",
+		    (double)functime / NANOSEC, dli.dli_sname);
+	}
+}
+
+static void *
+ztest_thread(void *arg)
+{
+	int rand;
+	uint64_t id = (uintptr_t)arg;
+	ztest_shared_t *zs = ztest_shared;
+	uint64_t call_next;
+	hrtime_t now;
+	ztest_info_t *zi;
+	ztest_shared_callstate_t *zc;
+
+	while ((now = gethrtime()) < zs->zs_thread_stop) {
+		/*
+		 * See if it's time to force a crash.
+		 */
+		if (now > zs->zs_thread_kill)
+			ztest_kill(zs);
+
+		/*
+		 * If we're getting ENOSPC with some regularity, stop.
+		 */
+		if (zs->zs_enospc_count > 10)
+			break;
+
+		/*
+		 * Pick a random function to execute.
+		 */
+		rand = ztest_random(ZTEST_FUNCS);
+		zi = &ztest_info[rand];
+		zc = ZTEST_GET_SHARED_CALLSTATE(rand);
+		call_next = zc->zc_next;
+
+		if (now >= call_next &&
+		    atomic_cas_64(&zc->zc_next, call_next, call_next +
+		    ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) {
+			ztest_execute(rand, zi, id);
+		}
+	}
+
+	return (NULL);
+}
+
+static void
+ztest_dataset_name(char *dsname, char *pool, int d)
+{
+	(void) snprintf(dsname, MAXNAMELEN, "%s/ds_%d", pool, d);
+}
+
+static void
+ztest_dataset_destroy(int d)
+{
+	char name[MAXNAMELEN];
+
+	ztest_dataset_name(name, ztest_opts.zo_pool, d);
+
+	if (ztest_opts.zo_verbose >= 3)
+		(void) printf("Destroying %s to free up space\n", name);
+
+	/*
+	 * Cleanup any non-standard clones and snapshots.  In general,
+	 * ztest thread t operates on dataset (t % zopt_datasets),
+	 * so there may be more than one thing to clean up.
+	 */
+	for (int t = d; t < ztest_opts.zo_threads;
+	    t += ztest_opts.zo_datasets) {
+		ztest_dsl_dataset_cleanup(name, t);
+	}
+
+	(void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
+	    DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
+}
+
+static void
+ztest_dataset_dirobj_verify(ztest_ds_t *zd)
+{
+	uint64_t usedobjs, dirobjs, scratch;
+
+	/*
+	 * ZTEST_DIROBJ is the object directory for the entire dataset.
+	 * Therefore, the number of objects in use should equal the
+	 * number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
+	 * If not, we have an object leak.
+	 *
+	 * Note that we can only check this in ztest_dataset_open(),
+	 * when the open-context and syncing-context values agree.
+	 * That's because zap_count() returns the open-context value,
+	 * while dmu_objset_space() returns the rootbp fill count.
+	 */
+	VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
+	dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
+	ASSERT3U(dirobjs + 1, ==, usedobjs);
+}
+
+static int
+ztest_dataset_open(int d)
+{
+	ztest_ds_t *zd = &ztest_ds[d];
+	uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
+	objset_t *os;
+	zilog_t *zilog;
+	char name[MAXNAMELEN];
+	int error;
+
+	ztest_dataset_name(name, ztest_opts.zo_pool, d);
+
+	(void) rw_rdlock(&ztest_name_lock);
+
+	error = ztest_dataset_create(name);
+	if (error == ENOSPC) {
+		(void) rw_unlock(&ztest_name_lock);
+		ztest_record_enospc(FTAG);
+		return (error);
+	}
+	ASSERT(error == 0 || error == EEXIST);
+
+	VERIFY0(dmu_objset_hold(name, zd, &os));
+	(void) rw_unlock(&ztest_name_lock);
+
+	ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
+
+	zilog = zd->zd_zilog;
+
+	if (zilog->zl_header->zh_claim_lr_seq != 0 &&
+	    zilog->zl_header->zh_claim_lr_seq < committed_seq)
+		fatal(0, "missing log records: claimed %llu < committed %llu",
+		    zilog->zl_header->zh_claim_lr_seq, committed_seq);
+
+	ztest_dataset_dirobj_verify(zd);
+
+	zil_replay(os, zd, ztest_replay_vector);
+
+	ztest_dataset_dirobj_verify(zd);
+
+	if (ztest_opts.zo_verbose >= 6)
+		(void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
+		    zd->zd_name,
+		    (u_longlong_t)zilog->zl_parse_blk_count,
+		    (u_longlong_t)zilog->zl_parse_lr_count,
+		    (u_longlong_t)zilog->zl_replaying_seq);
+
+	zilog = zil_open(os, ztest_get_data);
+
+	if (zilog->zl_replaying_seq != 0 &&
+	    zilog->zl_replaying_seq < committed_seq)
+		fatal(0, "missing log records: replayed %llu < committed %llu",
+		    zilog->zl_replaying_seq, committed_seq);
+
+	return (0);
+}
+
+static void
+ztest_dataset_close(int d)
+{
+	ztest_ds_t *zd = &ztest_ds[d];
+
+	zil_close(zd->zd_zilog);
+	dmu_objset_rele(zd->zd_os, zd);
+
+	ztest_zd_fini(zd);
+}
+
+/*
+ * Kick off threads to run tests on all datasets in parallel.
+ */
+static void
+ztest_run(ztest_shared_t *zs)
+{
+	thread_t *tid;
+	spa_t *spa;
+	objset_t *os;
+	thread_t resume_tid;
+	int error;
+
+	ztest_exiting = B_FALSE;
+
+	/*
+	 * Initialize parent/child shared state.
+	 */
+	VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
+	VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
+
+	zs->zs_thread_start = gethrtime();
+	zs->zs_thread_stop =
+	    zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
+	zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
+	zs->zs_thread_kill = zs->zs_thread_stop;
+	if (ztest_random(100) < ztest_opts.zo_killrate) {
+		zs->zs_thread_kill -=
+		    ztest_random(ztest_opts.zo_passtime * NANOSEC);
+	}
+
+	(void) _mutex_init(&zcl.zcl_callbacks_lock, USYNC_THREAD, NULL);
+
+	list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
+	    offsetof(ztest_cb_data_t, zcd_node));
+
+	/*
+	 * Open our pool.
+	 */
+	kernel_init(FREAD | FWRITE);
+	VERIFY(spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0);
+	spa->spa_debug = B_TRUE;
+	ztest_spa = spa;
+
+	VERIFY3U(0, ==, dmu_objset_hold(ztest_opts.zo_pool, FTAG, &os));
+	zs->zs_guid = dmu_objset_fsid_guid(os);
+	dmu_objset_rele(os, FTAG);
+
+	spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
+
+	/*
+	 * We don't expect the pool to suspend unless maxfaults == 0,
+	 * in which case ztest_fault_inject() temporarily takes away
+	 * the only valid replica.
+	 */
+	if (MAXFAULTS() == 0)
+		spa->spa_failmode = ZIO_FAILURE_MODE_WAIT;
+	else
+		spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
+
+	/*
+	 * Create a thread to periodically resume suspended I/O.
+	 */
+	VERIFY(thr_create(0, 0, ztest_resume_thread, spa, THR_BOUND,
+	    &resume_tid) == 0);
+
+	/*
+	 * Create a deadman thread to abort() if we hang.
+	 */
+	VERIFY(thr_create(0, 0, ztest_deadman_thread, zs, THR_BOUND,
+	    NULL) == 0);
+
+	/*
+	 * Verify that we can safely inquire about about any object,
+	 * whether it's allocated or not.  To make it interesting,
+	 * we probe a 5-wide window around each power of two.
+	 * This hits all edge cases, including zero and the max.
+	 */
+	for (int t = 0; t < 64; t++) {
+		for (int d = -5; d <= 5; d++) {
+			error = dmu_object_info(spa->spa_meta_objset,
+			    (1ULL << t) + d, NULL);
+			ASSERT(error == 0 || error == ENOENT ||
+			    error == EINVAL);
+		}
+	}
+
+	/*
+	 * If we got any ENOSPC errors on the previous run, destroy something.
+	 */
+	if (zs->zs_enospc_count != 0) {
+		int d = ztest_random(ztest_opts.zo_datasets);
+		ztest_dataset_destroy(d);
+	}
+	zs->zs_enospc_count = 0;
+
+	tid = umem_zalloc(ztest_opts.zo_threads * sizeof (thread_t),
+	    UMEM_NOFAIL);
+
+	if (ztest_opts.zo_verbose >= 4)
+		(void) printf("starting main threads...\n");
+
+	/*
+	 * Kick off all the tests that run in parallel.
+	 */
+	for (int t = 0; t < ztest_opts.zo_threads; t++) {
+		if (t < ztest_opts.zo_datasets &&
+		    ztest_dataset_open(t) != 0)
+			return;
+		VERIFY(thr_create(0, 0, ztest_thread, (void *)(uintptr_t)t,
+		    THR_BOUND, &tid[t]) == 0);
+	}
+
+	/*
+	 * Wait for all of the tests to complete.  We go in reverse order
+	 * so we don't close datasets while threads are still using them.
+	 */
+	for (int t = ztest_opts.zo_threads - 1; t >= 0; t--) {
+		VERIFY(thr_join(tid[t], NULL, NULL) == 0);
+		if (t < ztest_opts.zo_datasets)
+			ztest_dataset_close(t);
+	}
+
+	txg_wait_synced(spa_get_dsl(spa), 0);
+
+	zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
+	zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
+
+	umem_free(tid, ztest_opts.zo_threads * sizeof (thread_t));
+
+	/* Kill the resume thread */
+	ztest_exiting = B_TRUE;
+	VERIFY(thr_join(resume_tid, NULL, NULL) == 0);
+	ztest_resume(spa);
+
+	/*
+	 * Right before closing the pool, kick off a bunch of async I/O;
+	 * spa_close() should wait for it to complete.
+	 */
+	for (uint64_t object = 1; object < 50; object++)
+		dmu_prefetch(spa->spa_meta_objset, object, 0, 1ULL << 20);
+
+	spa_close(spa, FTAG);
+
+	/*
+	 * Verify that we can loop over all pools.
+	 */
+	mutex_enter(&spa_namespace_lock);
+	for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
+		if (ztest_opts.zo_verbose > 3)
+			(void) printf("spa_next: found %s\n", spa_name(spa));
+	mutex_exit(&spa_namespace_lock);
+
+	/*
+	 * Verify that we can export the pool and reimport it under a
+	 * different name.
+	 */
+	if (ztest_random(2) == 0) {
+		char name[MAXNAMELEN];
+		(void) snprintf(name, MAXNAMELEN, "%s_import",
+		    ztest_opts.zo_pool);
+		ztest_spa_import_export(ztest_opts.zo_pool, name);
+		ztest_spa_import_export(name, ztest_opts.zo_pool);
+	}
+
+	kernel_fini();
+
+	list_destroy(&zcl.zcl_callbacks);
+
+	(void) _mutex_destroy(&zcl.zcl_callbacks_lock);
+
+	(void) rwlock_destroy(&ztest_name_lock);
+	(void) _mutex_destroy(&ztest_vdev_lock);
+}
+
+static void
+ztest_freeze(void)
+{
+	ztest_ds_t *zd = &ztest_ds[0];
+	spa_t *spa;
+	int numloops = 0;
+
+	if (ztest_opts.zo_verbose >= 3)
+		(void) printf("testing spa_freeze()...\n");
+
+	kernel_init(FREAD | FWRITE);
+	VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
+	VERIFY3U(0, ==, ztest_dataset_open(0));
+	spa->spa_debug = B_TRUE;
+	ztest_spa = spa;
+
+	/*
+	 * Force the first log block to be transactionally allocated.
+	 * We have to do this before we freeze the pool -- otherwise
+	 * the log chain won't be anchored.
+	 */
+	while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
+		ztest_dmu_object_alloc_free(zd, 0);
+		zil_commit(zd->zd_zilog, 0);
+	}
+
+	txg_wait_synced(spa_get_dsl(spa), 0);
+
+	/*
+	 * Freeze the pool.  This stops spa_sync() from doing anything,
+	 * so that the only way to record changes from now on is the ZIL.
+	 */
+	spa_freeze(spa);
+
+	/*
+	 * Run tests that generate log records but don't alter the pool config
+	 * or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
+	 * We do a txg_wait_synced() after each iteration to force the txg
+	 * to increase well beyond the last synced value in the uberblock.
+	 * The ZIL should be OK with that.
+	 */
+	while (ztest_random(10) != 0 &&
+	    numloops++ < ztest_opts.zo_maxloops) {
+		ztest_dmu_write_parallel(zd, 0);
+		ztest_dmu_object_alloc_free(zd, 0);
+		txg_wait_synced(spa_get_dsl(spa), 0);
+	}
+
+	/*
+	 * Commit all of the changes we just generated.
+	 */
+	zil_commit(zd->zd_zilog, 0);
+	txg_wait_synced(spa_get_dsl(spa), 0);
+
+	/*
+	 * Close our dataset and close the pool.
+	 */
+	ztest_dataset_close(0);
+	spa_close(spa, FTAG);
+	kernel_fini();
+
+	/*
+	 * Open and close the pool and dataset to induce log replay.
+	 */
+	kernel_init(FREAD | FWRITE);
+	VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
+	ASSERT(spa_freeze_txg(spa) == UINT64_MAX);
+	VERIFY3U(0, ==, ztest_dataset_open(0));
+	ztest_dataset_close(0);
+
+	spa->spa_debug = B_TRUE;
+	ztest_spa = spa;
+	txg_wait_synced(spa_get_dsl(spa), 0);
+	ztest_reguid(NULL, 0);
+
+	spa_close(spa, FTAG);
+	kernel_fini();
+}
+
+void
+print_time(hrtime_t t, char *timebuf)
+{
+	hrtime_t s = t / NANOSEC;
+	hrtime_t m = s / 60;
+	hrtime_t h = m / 60;
+	hrtime_t d = h / 24;
+
+	s -= m * 60;
+	m -= h * 60;
+	h -= d * 24;
+
+	timebuf[0] = '\0';
+
+	if (d)
+		(void) sprintf(timebuf,
+		    "%llud%02lluh%02llum%02llus", d, h, m, s);
+	else if (h)
+		(void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
+	else if (m)
+		(void) sprintf(timebuf, "%llum%02llus", m, s);
+	else
+		(void) sprintf(timebuf, "%llus", s);
+}
+
+static nvlist_t *
+make_random_props()
+{
+	nvlist_t *props;
+
+	VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
+	if (ztest_random(2) == 0)
+		return (props);
+	VERIFY(nvlist_add_uint64(props, "autoreplace", 1) == 0);
+
+	return (props);
+}
+
+/*
+ * Create a storage pool with the given name and initial vdev size.
+ * Then test spa_freeze() functionality.
+ */
+static void
+ztest_init(ztest_shared_t *zs)
+{
+	spa_t *spa;
+	nvlist_t *nvroot, *props;
+
+	VERIFY(_mutex_init(&ztest_vdev_lock, USYNC_THREAD, NULL) == 0);
+	VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
+
+	kernel_init(FREAD | FWRITE);
+
+	/*
+	 * Create the storage pool.
+	 */
+	(void) spa_destroy(ztest_opts.zo_pool);
+	ztest_shared->zs_vdev_next_leaf = 0;
+	zs->zs_splits = 0;
+	zs->zs_mirrors = ztest_opts.zo_mirrors;
+	nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
+	    0, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
+	props = make_random_props();
+	for (int i = 0; i < SPA_FEATURES; i++) {
+		char buf[1024];
+		(void) snprintf(buf, sizeof (buf), "feature@%s",
+		    spa_feature_table[i].fi_uname);
+		VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0));
+	}
+	VERIFY3U(0, ==, spa_create(ztest_opts.zo_pool, nvroot, props,
+	    NULL, NULL));
+	nvlist_free(nvroot);
+
+	VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
+	zs->zs_metaslab_sz =
+	    1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
+
+	spa_close(spa, FTAG);
+
+	kernel_fini();
+
+	ztest_run_zdb(ztest_opts.zo_pool);
+
+	ztest_freeze();
+
+	ztest_run_zdb(ztest_opts.zo_pool);
+
+	(void) rwlock_destroy(&ztest_name_lock);
+	(void) _mutex_destroy(&ztest_vdev_lock);
+}
+
+static void
+setup_data_fd(void)
+{
+	static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX";
+
+	ztest_fd_data = mkstemp(ztest_name_data);
+	ASSERT3S(ztest_fd_data, >=, 0);
+	(void) unlink(ztest_name_data);
+}
+
+
+static int
+shared_data_size(ztest_shared_hdr_t *hdr)
+{
+	int size;
+
+	size = hdr->zh_hdr_size;
+	size += hdr->zh_opts_size;
+	size += hdr->zh_size;
+	size += hdr->zh_stats_size * hdr->zh_stats_count;
+	size += hdr->zh_ds_size * hdr->zh_ds_count;
+
+	return (size);
+}
+
+static void
+setup_hdr(void)
+{
+	int size;
+	ztest_shared_hdr_t *hdr;
+
+	hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
+	    PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
+	ASSERT(hdr != MAP_FAILED);
+
+	VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t)));
+
+	hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
+	hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
+	hdr->zh_size = sizeof (ztest_shared_t);
+	hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
+	hdr->zh_stats_count = ZTEST_FUNCS;
+	hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
+	hdr->zh_ds_count = ztest_opts.zo_datasets;
+
+	size = shared_data_size(hdr);
+	VERIFY3U(0, ==, ftruncate(ztest_fd_data, size));
+
+	(void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
+}
+
+static void
+setup_data(void)
+{
+	int size, offset;
+	ztest_shared_hdr_t *hdr;
+	uint8_t *buf;
+
+	hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
+	    PROT_READ, MAP_SHARED, ztest_fd_data, 0);
+	ASSERT(hdr != MAP_FAILED);
+
+	size = shared_data_size(hdr);
+
+	(void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
+	hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
+	    PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
+	ASSERT(hdr != MAP_FAILED);
+	buf = (uint8_t *)hdr;
+
+	offset = hdr->zh_hdr_size;
+	ztest_shared_opts = (void *)&buf[offset];
+	offset += hdr->zh_opts_size;
+	ztest_shared = (void *)&buf[offset];
+	offset += hdr->zh_size;
+	ztest_shared_callstate = (void *)&buf[offset];
+	offset += hdr->zh_stats_size * hdr->zh_stats_count;
+	ztest_shared_ds = (void *)&buf[offset];
+}
+
+static boolean_t
+exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
+{
+	pid_t pid;
+	int status;
+	char *cmdbuf = NULL;
+
+	pid = fork();
+
+	if (cmd == NULL) {
+		cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
+		(void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN);
+		cmd = cmdbuf;
+	}
+
+	if (pid == -1)
+		fatal(1, "fork failed");
+
+	if (pid == 0) {	/* child */
+		char *emptyargv[2] = { cmd, NULL };
+		char fd_data_str[12];
+
+		struct rlimit rl = { 1024, 1024 };
+		(void) setrlimit(RLIMIT_NOFILE, &rl);
+
+		(void) close(ztest_fd_rand);
+		VERIFY3U(11, >=,
+		    snprintf(fd_data_str, 12, "%d", ztest_fd_data));
+		VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1));
+
+		(void) enable_extended_FILE_stdio(-1, -1);
+		if (libpath != NULL)
+			VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1));
+#ifdef illumos
+		(void) execv(cmd, emptyargv);
+#else
+		(void) execvp(cmd, emptyargv);
+#endif
+		ztest_dump_core = B_FALSE;
+		fatal(B_TRUE, "exec failed: %s", cmd);
+	}
+
+	if (cmdbuf != NULL) {
+		umem_free(cmdbuf, MAXPATHLEN);
+		cmd = NULL;
+	}
+
+	while (waitpid(pid, &status, 0) != pid)
+		continue;
+	if (statusp != NULL)
+		*statusp = status;
+
+	if (WIFEXITED(status)) {
+		if (WEXITSTATUS(status) != 0) {
+			(void) fprintf(stderr, "child exited with code %d\n",
+			    WEXITSTATUS(status));
+			exit(2);
+		}
+		return (B_FALSE);
+	} else if (WIFSIGNALED(status)) {
+		if (!ignorekill || WTERMSIG(status) != SIGKILL) {
+			(void) fprintf(stderr, "child died with signal %d\n",
+			    WTERMSIG(status));
+			exit(3);
+		}
+		return (B_TRUE);
+	} else {
+		(void) fprintf(stderr, "something strange happened to child\n");
+		exit(4);
+		/* NOTREACHED */
+	}
+}
+
+static void
+ztest_run_init(void)
+{
+	ztest_shared_t *zs = ztest_shared;
+
+	ASSERT(ztest_opts.zo_init != 0);
+
+	/*
+	 * Blow away any existing copy of zpool.cache
+	 */
+	(void) remove(spa_config_path);
+
+	/*
+	 * Create and initialize our storage pool.
+	 */
+	for (int i = 1; i <= ztest_opts.zo_init; i++) {
+		bzero(zs, sizeof (ztest_shared_t));
+		if (ztest_opts.zo_verbose >= 3 &&
+		    ztest_opts.zo_init != 1) {
+			(void) printf("ztest_init(), pass %d\n", i);
+		}
+		ztest_init(zs);
+	}
+}
+
+int
+main(int argc, char **argv)
+{
+	int kills = 0;
+	int iters = 0;
+	int older = 0;
+	int newer = 0;
+	ztest_shared_t *zs;
+	ztest_info_t *zi;
+	ztest_shared_callstate_t *zc;
+	char timebuf[100];
+	char numbuf[6];
+	spa_t *spa;
+	char *cmd;
+	boolean_t hasalt;
+	char *fd_data_str = getenv("ZTEST_FD_DATA");
+
+	(void) setvbuf(stdout, NULL, _IOLBF, 0);
+
+	dprintf_setup(&argc, argv);
+
+	ztest_fd_rand = open("/dev/urandom", O_RDONLY);
+	ASSERT3S(ztest_fd_rand, >=, 0);
+
+	if (!fd_data_str) {
+		process_options(argc, argv);
+
+		setup_data_fd();
+		setup_hdr();
+		setup_data();
+		bcopy(&ztest_opts, ztest_shared_opts,
+		    sizeof (*ztest_shared_opts));
+	} else {
+		ztest_fd_data = atoi(fd_data_str);
+		setup_data();
+		bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
+	}
+	ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
+
+	/* Override location of zpool.cache */
+	VERIFY3U(asprintf((char **)&spa_config_path, "%s/zpool.cache",
+	    ztest_opts.zo_dir), !=, -1);
+
+	ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
+	    UMEM_NOFAIL);
+	zs = ztest_shared;
+
+	if (fd_data_str) {
+		metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang;
+		metaslab_df_alloc_threshold =
+		    zs->zs_metaslab_df_alloc_threshold;
+
+		if (zs->zs_do_init)
+			ztest_run_init();
+		else
+			ztest_run(zs);
+		exit(0);
+	}
+
+	hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0);
+
+	if (ztest_opts.zo_verbose >= 1) {
+		(void) printf("%llu vdevs, %d datasets, %d threads,"
+		    " %llu seconds...\n",
+		    (u_longlong_t)ztest_opts.zo_vdevs,
+		    ztest_opts.zo_datasets,
+		    ztest_opts.zo_threads,
+		    (u_longlong_t)ztest_opts.zo_time);
+	}
+
+	cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
+	(void) strlcpy(cmd, getexecname(), MAXNAMELEN);
+
+	zs->zs_do_init = B_TRUE;
+	if (strlen(ztest_opts.zo_alt_ztest) != 0) {
+		if (ztest_opts.zo_verbose >= 1) {
+			(void) printf("Executing older ztest for "
+			    "initialization: %s\n", ztest_opts.zo_alt_ztest);
+		}
+		VERIFY(!exec_child(ztest_opts.zo_alt_ztest,
+		    ztest_opts.zo_alt_libpath, B_FALSE, NULL));
+	} else {
+		VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL));
+	}
+	zs->zs_do_init = B_FALSE;
+
+	zs->zs_proc_start = gethrtime();
+	zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC;
+
+	for (int f = 0; f < ZTEST_FUNCS; f++) {
+		zi = &ztest_info[f];
+		zc = ZTEST_GET_SHARED_CALLSTATE(f);
+		if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
+			zc->zc_next = UINT64_MAX;
+		else
+			zc->zc_next = zs->zs_proc_start +
+			    ztest_random(2 * zi->zi_interval[0] + 1);
+	}
+
+	/*
+	 * Run the tests in a loop.  These tests include fault injection
+	 * to verify that self-healing data works, and forced crashes
+	 * to verify that we never lose on-disk consistency.
+	 */
+	while (gethrtime() < zs->zs_proc_stop) {
+		int status;
+		boolean_t killed;
+
+		/*
+		 * Initialize the workload counters for each function.
+		 */
+		for (int f = 0; f < ZTEST_FUNCS; f++) {
+			zc = ZTEST_GET_SHARED_CALLSTATE(f);
+			zc->zc_count = 0;
+			zc->zc_time = 0;
+		}
+
+		/* Set the allocation switch size */
+		zs->zs_metaslab_df_alloc_threshold =
+		    ztest_random(zs->zs_metaslab_sz / 4) + 1;
+
+		if (!hasalt || ztest_random(2) == 0) {
+			if (hasalt && ztest_opts.zo_verbose >= 1) {
+				(void) printf("Executing newer ztest: %s\n",
+				    cmd);
+			}
+			newer++;
+			killed = exec_child(cmd, NULL, B_TRUE, &status);
+		} else {
+			if (hasalt && ztest_opts.zo_verbose >= 1) {
+				(void) printf("Executing older ztest: %s\n",
+				    ztest_opts.zo_alt_ztest);
+			}
+			older++;
+			killed = exec_child(ztest_opts.zo_alt_ztest,
+			    ztest_opts.zo_alt_libpath, B_TRUE, &status);
+		}
+
+		if (killed)
+			kills++;
+		iters++;
+
+		if (ztest_opts.zo_verbose >= 1) {
+			hrtime_t now = gethrtime();
+
+			now = MIN(now, zs->zs_proc_stop);
+			print_time(zs->zs_proc_stop - now, timebuf);
+			nicenum(zs->zs_space, numbuf);
+
+			(void) printf("Pass %3d, %8s, %3llu ENOSPC, "
+			    "%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
+			    iters,
+			    WIFEXITED(status) ? "Complete" : "SIGKILL",
+			    (u_longlong_t)zs->zs_enospc_count,
+			    100.0 * zs->zs_alloc / zs->zs_space,
+			    numbuf,
+			    100.0 * (now - zs->zs_proc_start) /
+			    (ztest_opts.zo_time * NANOSEC), timebuf);
+		}
+
+		if (ztest_opts.zo_verbose >= 2) {
+			(void) printf("\nWorkload summary:\n\n");
+			(void) printf("%7s %9s   %s\n",
+			    "Calls", "Time", "Function");
+			(void) printf("%7s %9s   %s\n",
+			    "-----", "----", "--------");
+			for (int f = 0; f < ZTEST_FUNCS; f++) {
+				Dl_info dli;
+
+				zi = &ztest_info[f];
+				zc = ZTEST_GET_SHARED_CALLSTATE(f);
+				print_time(zc->zc_time, timebuf);
+				(void) dladdr((void *)zi->zi_func, &dli);
+				(void) printf("%7llu %9s   %s\n",
+				    (u_longlong_t)zc->zc_count, timebuf,
+				    dli.dli_sname);
+			}
+			(void) printf("\n");
+		}
+
+		/*
+		 * It's possible that we killed a child during a rename test,
+		 * in which case we'll have a 'ztest_tmp' pool lying around
+		 * instead of 'ztest'.  Do a blind rename in case this happened.
+		 */
+		kernel_init(FREAD);
+		if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) {
+			spa_close(spa, FTAG);
+		} else {
+			char tmpname[MAXNAMELEN];
+			kernel_fini();
+			kernel_init(FREAD | FWRITE);
+			(void) snprintf(tmpname, sizeof (tmpname), "%s_tmp",
+			    ztest_opts.zo_pool);
+			(void) spa_rename(tmpname, ztest_opts.zo_pool);
+		}
+		kernel_fini();
+
+		ztest_run_zdb(ztest_opts.zo_pool);
+	}
+
+	if (ztest_opts.zo_verbose >= 1) {
+		if (hasalt) {
+			(void) printf("%d runs of older ztest: %s\n", older,
+			    ztest_opts.zo_alt_ztest);
+			(void) printf("%d runs of newer ztest: %s\n", newer,
+			    cmd);
+		}
+		(void) printf("%d killed, %d completed, %.0f%% kill rate\n",
+		    kills, iters - kills, (100.0 * kills) / MAX(1, iters));
+	}
+
+	umem_free(cmd, MAXNAMELEN);
+
+	return (0);
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs.h b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs.h
new file mode 100644
index 0000000000000000000000000000000000000000..2660059f7a817ba3e8c6f4072d28020495ebdf7d
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs.h
@@ -0,0 +1,785 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
+ * All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2012, Joyent, Inc. All rights reserved.
+ * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
+ */
+
+#ifndef	_LIBZFS_H
+#define	_LIBZFS_H
+
+#include <assert.h>
+#include <libnvpair.h>
+#include <sys/mnttab.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/varargs.h>
+#include <sys/fs/zfs.h>
+#include <sys/avl.h>
+#include <sys/zfs_ioctl.h>
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+/*
+ * Miscellaneous ZFS constants
+ */
+#define	ZFS_MAXNAMELEN		MAXNAMELEN
+#define	ZPOOL_MAXNAMELEN	MAXNAMELEN
+#define	ZFS_MAXPROPLEN		MAXPATHLEN
+#define	ZPOOL_MAXPROPLEN	MAXPATHLEN
+
+/*
+ * libzfs errors
+ */
+enum {
+	EZFS_NOMEM = 2000,	/* out of memory */
+	EZFS_BADPROP,		/* invalid property value */
+	EZFS_PROPREADONLY,	/* cannot set readonly property */
+	EZFS_PROPTYPE,		/* property does not apply to dataset type */
+	EZFS_PROPNONINHERIT,	/* property is not inheritable */
+	EZFS_PROPSPACE,		/* bad quota or reservation */
+	EZFS_BADTYPE,		/* dataset is not of appropriate type */
+	EZFS_BUSY,		/* pool or dataset is busy */
+	EZFS_EXISTS,		/* pool or dataset already exists */
+	EZFS_NOENT,		/* no such pool or dataset */
+	EZFS_BADSTREAM,		/* bad backup stream */
+	EZFS_DSREADONLY,	/* dataset is readonly */
+	EZFS_VOLTOOBIG,		/* volume is too large for 32-bit system */
+	EZFS_INVALIDNAME,	/* invalid dataset name */
+	EZFS_BADRESTORE,	/* unable to restore to destination */
+	EZFS_BADBACKUP,		/* backup failed */
+	EZFS_BADTARGET,		/* bad attach/detach/replace target */
+	EZFS_NODEVICE,		/* no such device in pool */
+	EZFS_BADDEV,		/* invalid device to add */
+	EZFS_NOREPLICAS,	/* no valid replicas */
+	EZFS_RESILVERING,	/* currently resilvering */
+	EZFS_BADVERSION,	/* unsupported version */
+	EZFS_POOLUNAVAIL,	/* pool is currently unavailable */
+	EZFS_DEVOVERFLOW,	/* too many devices in one vdev */
+	EZFS_BADPATH,		/* must be an absolute path */
+	EZFS_CROSSTARGET,	/* rename or clone across pool or dataset */
+	EZFS_ZONED,		/* used improperly in local zone */
+	EZFS_MOUNTFAILED,	/* failed to mount dataset */
+	EZFS_UMOUNTFAILED,	/* failed to unmount dataset */
+	EZFS_UNSHARENFSFAILED,	/* unshare(1M) failed */
+	EZFS_SHARENFSFAILED,	/* share(1M) failed */
+	EZFS_PERM,		/* permission denied */
+	EZFS_NOSPC,		/* out of space */
+	EZFS_FAULT,		/* bad address */
+	EZFS_IO,		/* I/O error */
+	EZFS_INTR,		/* signal received */
+	EZFS_ISSPARE,		/* device is a hot spare */
+	EZFS_INVALCONFIG,	/* invalid vdev configuration */
+	EZFS_RECURSIVE,		/* recursive dependency */
+	EZFS_NOHISTORY,		/* no history object */
+	EZFS_POOLPROPS,		/* couldn't retrieve pool props */
+	EZFS_POOL_NOTSUP,	/* ops not supported for this type of pool */
+	EZFS_POOL_INVALARG,	/* invalid argument for this pool operation */
+	EZFS_NAMETOOLONG,	/* dataset name is too long */
+	EZFS_OPENFAILED,	/* open of device failed */
+	EZFS_NOCAP,		/* couldn't get capacity */
+	EZFS_LABELFAILED,	/* write of label failed */
+	EZFS_BADWHO,		/* invalid permission who */
+	EZFS_BADPERM,		/* invalid permission */
+	EZFS_BADPERMSET,	/* invalid permission set name */
+	EZFS_NODELEGATION,	/* delegated administration is disabled */
+	EZFS_UNSHARESMBFAILED,	/* failed to unshare over smb */
+	EZFS_SHARESMBFAILED,	/* failed to share over smb */
+	EZFS_BADCACHE,		/* bad cache file */
+	EZFS_ISL2CACHE,		/* device is for the level 2 ARC */
+	EZFS_VDEVNOTSUP,	/* unsupported vdev type */
+	EZFS_NOTSUP,		/* ops not supported on this dataset */
+	EZFS_ACTIVE_SPARE,	/* pool has active shared spare devices */
+	EZFS_UNPLAYED_LOGS,	/* log device has unplayed logs */
+	EZFS_REFTAG_RELE,	/* snapshot release: tag not found */
+	EZFS_REFTAG_HOLD,	/* snapshot hold: tag already exists */
+	EZFS_TAGTOOLONG,	/* snapshot hold/rele: tag too long */
+	EZFS_PIPEFAILED,	/* pipe create failed */
+	EZFS_THREADCREATEFAILED, /* thread create failed */
+	EZFS_POSTSPLIT_ONLINE,	/* onlining a disk after splitting it */
+	EZFS_SCRUBBING,		/* currently scrubbing */
+	EZFS_NO_SCRUB,		/* no active scrub */
+	EZFS_DIFF,		/* general failure of zfs diff */
+	EZFS_DIFFDATA,		/* bad zfs diff data */
+	EZFS_POOLREADONLY,	/* pool is in read-only mode */
+	EZFS_UNKNOWN
+};
+
+/*
+ * The following data structures are all part
+ * of the zfs_allow_t data structure which is
+ * used for printing 'allow' permissions.
+ * It is a linked list of zfs_allow_t's which
+ * then contain avl tree's for user/group/sets/...
+ * and each one of the entries in those trees have
+ * avl tree's for the permissions they belong to and
+ * whether they are local,descendent or local+descendent
+ * permissions.  The AVL trees are used primarily for
+ * sorting purposes, but also so that we can quickly find
+ * a given user and or permission.
+ */
+typedef struct zfs_perm_node {
+	avl_node_t z_node;
+	char z_pname[MAXPATHLEN];
+} zfs_perm_node_t;
+
+typedef struct zfs_allow_node {
+	avl_node_t z_node;
+	char z_key[MAXPATHLEN];		/* name, such as joe */
+	avl_tree_t z_localdescend;	/* local+descendent perms */
+	avl_tree_t z_local;		/* local permissions */
+	avl_tree_t z_descend;		/* descendent permissions */
+} zfs_allow_node_t;
+
+typedef struct zfs_allow {
+	struct zfs_allow *z_next;
+	char z_setpoint[MAXPATHLEN];
+	avl_tree_t z_sets;
+	avl_tree_t z_crperms;
+	avl_tree_t z_user;
+	avl_tree_t z_group;
+	avl_tree_t z_everyone;
+} zfs_allow_t;
+
+/*
+ * Basic handle types
+ */
+typedef struct zfs_handle zfs_handle_t;
+typedef struct zpool_handle zpool_handle_t;
+typedef struct libzfs_handle libzfs_handle_t;
+
+/*
+ * Library initialization
+ */
+extern libzfs_handle_t *libzfs_init(void);
+extern void libzfs_fini(libzfs_handle_t *);
+
+extern libzfs_handle_t *zpool_get_handle(zpool_handle_t *);
+extern libzfs_handle_t *zfs_get_handle(zfs_handle_t *);
+
+extern void libzfs_print_on_error(libzfs_handle_t *, boolean_t);
+
+extern int libzfs_errno(libzfs_handle_t *);
+extern const char *libzfs_error_action(libzfs_handle_t *);
+extern const char *libzfs_error_description(libzfs_handle_t *);
+extern void libzfs_mnttab_init(libzfs_handle_t *);
+extern void libzfs_mnttab_fini(libzfs_handle_t *);
+extern void libzfs_mnttab_cache(libzfs_handle_t *, boolean_t);
+extern int libzfs_mnttab_find(libzfs_handle_t *, const char *,
+    struct mnttab *);
+extern void libzfs_mnttab_add(libzfs_handle_t *, const char *,
+    const char *, const char *);
+extern void libzfs_mnttab_remove(libzfs_handle_t *, const char *);
+
+/*
+ * Basic handle functions
+ */
+extern zpool_handle_t *zpool_open(libzfs_handle_t *, const char *);
+extern zpool_handle_t *zpool_open_canfail(libzfs_handle_t *, const char *);
+extern void zpool_close(zpool_handle_t *);
+extern const char *zpool_get_name(zpool_handle_t *);
+extern int zpool_get_state(zpool_handle_t *);
+extern const char *zpool_state_to_name(vdev_state_t, vdev_aux_t);
+extern const char *zpool_pool_state_to_name(pool_state_t);
+extern void zpool_free_handles(libzfs_handle_t *);
+
+/*
+ * Iterate over all active pools in the system.
+ */
+typedef int (*zpool_iter_f)(zpool_handle_t *, void *);
+extern int zpool_iter(libzfs_handle_t *, zpool_iter_f, void *);
+
+/*
+ * Functions to create and destroy pools
+ */
+extern int zpool_create(libzfs_handle_t *, const char *, nvlist_t *,
+    nvlist_t *, nvlist_t *);
+extern int zpool_destroy(zpool_handle_t *);
+extern int zpool_add(zpool_handle_t *, nvlist_t *);
+
+typedef struct splitflags {
+	/* do not split, but return the config that would be split off */
+	int dryrun : 1;
+
+	/* after splitting, import the pool */
+	int import : 1;
+} splitflags_t;
+
+/*
+ * Functions to manipulate pool and vdev state
+ */
+extern int zpool_scan(zpool_handle_t *, pool_scan_func_t);
+extern int zpool_clear(zpool_handle_t *, const char *, nvlist_t *);
+extern int zpool_reguid(zpool_handle_t *);
+extern int zpool_reopen(zpool_handle_t *);
+
+extern int zpool_vdev_online(zpool_handle_t *, const char *, int,
+    vdev_state_t *);
+extern int zpool_vdev_offline(zpool_handle_t *, const char *, boolean_t);
+extern int zpool_vdev_attach(zpool_handle_t *, const char *,
+    const char *, nvlist_t *, int);
+extern int zpool_vdev_detach(zpool_handle_t *, const char *);
+extern int zpool_vdev_remove(zpool_handle_t *, const char *);
+extern int zpool_vdev_split(zpool_handle_t *, char *, nvlist_t **, nvlist_t *,
+    splitflags_t);
+
+extern int zpool_vdev_fault(zpool_handle_t *, uint64_t, vdev_aux_t);
+extern int zpool_vdev_degrade(zpool_handle_t *, uint64_t, vdev_aux_t);
+extern int zpool_vdev_clear(zpool_handle_t *, uint64_t);
+
+extern nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *,
+    boolean_t *, boolean_t *);
+extern nvlist_t *zpool_find_vdev_by_physpath(zpool_handle_t *, const char *,
+    boolean_t *, boolean_t *, boolean_t *);
+extern int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *, const char *);
+
+/*
+ * Functions to manage pool properties
+ */
+extern int zpool_set_prop(zpool_handle_t *, const char *, const char *);
+extern int zpool_get_prop(zpool_handle_t *, zpool_prop_t, char *,
+    size_t proplen, zprop_source_t *);
+extern uint64_t zpool_get_prop_int(zpool_handle_t *, zpool_prop_t,
+    zprop_source_t *);
+
+extern const char *zpool_prop_to_name(zpool_prop_t);
+extern const char *zpool_prop_values(zpool_prop_t);
+
+/*
+ * Pool health statistics.
+ */
+typedef enum {
+	/*
+	 * The following correspond to faults as defined in the (fault.fs.zfs.*)
+	 * event namespace.  Each is associated with a corresponding message ID.
+	 */
+	ZPOOL_STATUS_CORRUPT_CACHE,	/* corrupt /kernel/drv/zpool.cache */
+	ZPOOL_STATUS_MISSING_DEV_R,	/* missing device with replicas */
+	ZPOOL_STATUS_MISSING_DEV_NR,	/* missing device with no replicas */
+	ZPOOL_STATUS_CORRUPT_LABEL_R,	/* bad device label with replicas */
+	ZPOOL_STATUS_CORRUPT_LABEL_NR,	/* bad device label with no replicas */
+	ZPOOL_STATUS_BAD_GUID_SUM,	/* sum of device guids didn't match */
+	ZPOOL_STATUS_CORRUPT_POOL,	/* pool metadata is corrupted */
+	ZPOOL_STATUS_CORRUPT_DATA,	/* data errors in user (meta)data */
+	ZPOOL_STATUS_FAILING_DEV,	/* device experiencing errors */
+	ZPOOL_STATUS_VERSION_NEWER,	/* newer on-disk version */
+	ZPOOL_STATUS_HOSTID_MISMATCH,	/* last accessed by another system */
+	ZPOOL_STATUS_IO_FAILURE_WAIT,	/* failed I/O, failmode 'wait' */
+	ZPOOL_STATUS_IO_FAILURE_CONTINUE, /* failed I/O, failmode 'continue' */
+	ZPOOL_STATUS_BAD_LOG,		/* cannot read log chain(s) */
+
+	/*
+	 * If the pool has unsupported features but can still be opened in
+	 * read-only mode, its status is ZPOOL_STATUS_UNSUP_FEAT_WRITE. If the
+	 * pool has unsupported features but cannot be opened at all, its
+	 * status is ZPOOL_STATUS_UNSUP_FEAT_READ.
+	 */
+	ZPOOL_STATUS_UNSUP_FEAT_READ,	/* unsupported features for read */
+	ZPOOL_STATUS_UNSUP_FEAT_WRITE,	/* unsupported features for write */
+
+	/*
+	 * These faults have no corresponding message ID.  At the time we are
+	 * checking the status, the original reason for the FMA fault (I/O or
+	 * checksum errors) has been lost.
+	 */
+	ZPOOL_STATUS_FAULTED_DEV_R,	/* faulted device with replicas */
+	ZPOOL_STATUS_FAULTED_DEV_NR,	/* faulted device with no replicas */
+
+	/*
+	 * The following are not faults per se, but still an error possibly
+	 * requiring administrative attention.  There is no corresponding
+	 * message ID.
+	 */
+	ZPOOL_STATUS_VERSION_OLDER,	/* older legacy on-disk version */
+	ZPOOL_STATUS_FEAT_DISABLED,	/* supported features are disabled */
+	ZPOOL_STATUS_RESILVERING,	/* device being resilvered */
+	ZPOOL_STATUS_OFFLINE_DEV,	/* device online */
+	ZPOOL_STATUS_REMOVED_DEV,	/* removed device */
+
+	/*
+	 * Finally, the following indicates a healthy pool.
+	 */
+	ZPOOL_STATUS_OK
+} zpool_status_t;
+
+extern zpool_status_t zpool_get_status(zpool_handle_t *, char **);
+extern zpool_status_t zpool_import_status(nvlist_t *, char **);
+extern void zpool_dump_ddt(const ddt_stat_t *dds, const ddt_histogram_t *ddh);
+
+/*
+ * Statistics and configuration functions.
+ */
+extern nvlist_t *zpool_get_config(zpool_handle_t *, nvlist_t **);
+extern nvlist_t *zpool_get_features(zpool_handle_t *);
+extern int zpool_refresh_stats(zpool_handle_t *, boolean_t *);
+extern int zpool_get_errlog(zpool_handle_t *, nvlist_t **);
+
+/*
+ * Import and export functions
+ */
+extern int zpool_export(zpool_handle_t *, boolean_t);
+extern int zpool_export_force(zpool_handle_t *);
+extern int zpool_import(libzfs_handle_t *, nvlist_t *, const char *,
+    char *altroot);
+extern int zpool_import_props(libzfs_handle_t *, nvlist_t *, const char *,
+    nvlist_t *, int);
+extern void zpool_print_unsup_feat(nvlist_t *config);
+
+/*
+ * Search for pools to import
+ */
+
+typedef struct importargs {
+	char **path;		/* a list of paths to search		*/
+	int paths;		/* number of paths to search		*/
+	char *poolname;		/* name of a pool to find		*/
+	uint64_t guid;		/* guid of a pool to find		*/
+	char *cachefile;	/* cachefile to use for import		*/
+	int can_be_active : 1;	/* can the pool be active?		*/
+	int unique : 1;		/* does 'poolname' already exist?	*/
+	int exists : 1;		/* set on return if pool already exists	*/
+} importargs_t;
+
+extern nvlist_t *zpool_search_import(libzfs_handle_t *, importargs_t *);
+
+/* legacy pool search routines */
+extern nvlist_t *zpool_find_import(libzfs_handle_t *, int, char **);
+extern nvlist_t *zpool_find_import_cached(libzfs_handle_t *, const char *,
+    char *, uint64_t);
+
+/*
+ * Miscellaneous pool functions
+ */
+struct zfs_cmd;
+
+extern const char *zfs_history_event_names[LOG_END];
+
+extern char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *,
+    boolean_t verbose);
+extern int zpool_upgrade(zpool_handle_t *, uint64_t);
+extern int zpool_get_history(zpool_handle_t *, nvlist_t **);
+extern int zpool_history_unpack(char *, uint64_t, uint64_t *,
+    nvlist_t ***, uint_t *);
+extern void zpool_set_history_str(const char *subcommand, int argc,
+    char **argv, char *history_str);
+extern int zpool_stage_history(libzfs_handle_t *, const char *);
+extern void zpool_obj_to_path(zpool_handle_t *, uint64_t, uint64_t, char *,
+    size_t len);
+extern int zfs_ioctl(libzfs_handle_t *, unsigned long, struct zfs_cmd *);
+extern int zpool_get_physpath(zpool_handle_t *, char *, size_t);
+extern void zpool_explain_recover(libzfs_handle_t *, const char *, int,
+    nvlist_t *);
+
+/*
+ * Basic handle manipulations.  These functions do not create or destroy the
+ * underlying datasets, only the references to them.
+ */
+extern zfs_handle_t *zfs_open(libzfs_handle_t *, const char *, int);
+extern zfs_handle_t *zfs_handle_dup(zfs_handle_t *);
+extern void zfs_close(zfs_handle_t *);
+extern zfs_type_t zfs_get_type(const zfs_handle_t *);
+extern const char *zfs_get_name(const zfs_handle_t *);
+extern zpool_handle_t *zfs_get_pool_handle(const zfs_handle_t *);
+
+/*
+ * Property management functions.  Some functions are shared with the kernel,
+ * and are found in sys/fs/zfs.h.
+ */
+
+/*
+ * zfs dataset property management
+ */
+extern const char *zfs_prop_default_string(zfs_prop_t);
+extern uint64_t zfs_prop_default_numeric(zfs_prop_t);
+extern const char *zfs_prop_column_name(zfs_prop_t);
+extern boolean_t zfs_prop_align_right(zfs_prop_t);
+
+extern nvlist_t *zfs_valid_proplist(libzfs_handle_t *, zfs_type_t,
+    nvlist_t *, uint64_t, zfs_handle_t *, const char *);
+
+extern const char *zfs_prop_to_name(zfs_prop_t);
+extern int zfs_prop_set(zfs_handle_t *, const char *, const char *);
+extern int zfs_prop_get(zfs_handle_t *, zfs_prop_t, char *, size_t,
+    zprop_source_t *, char *, size_t, boolean_t);
+extern int zfs_prop_get_recvd(zfs_handle_t *, const char *, char *, size_t,
+    boolean_t);
+extern int zfs_prop_get_numeric(zfs_handle_t *, zfs_prop_t, uint64_t *,
+    zprop_source_t *, char *, size_t);
+extern int zfs_prop_get_userquota_int(zfs_handle_t *zhp, const char *propname,
+    uint64_t *propvalue);
+extern int zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname,
+    char *propbuf, int proplen, boolean_t literal);
+extern int zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname,
+    uint64_t *propvalue);
+extern int zfs_prop_get_written(zfs_handle_t *zhp, const char *propname,
+    char *propbuf, int proplen, boolean_t literal);
+extern int zfs_prop_get_feature(zfs_handle_t *zhp, const char *propname,
+    char *buf, size_t len);
+extern int zfs_get_snapused_int(zfs_handle_t *firstsnap, zfs_handle_t *lastsnap,
+    uint64_t *usedp);
+extern uint64_t zfs_prop_get_int(zfs_handle_t *, zfs_prop_t);
+extern int zfs_prop_inherit(zfs_handle_t *, const char *, boolean_t);
+extern const char *zfs_prop_values(zfs_prop_t);
+extern int zfs_prop_is_string(zfs_prop_t prop);
+extern nvlist_t *zfs_get_user_props(zfs_handle_t *);
+extern nvlist_t *zfs_get_recvd_props(zfs_handle_t *);
+extern nvlist_t *zfs_get_clones_nvl(zfs_handle_t *);
+
+
+typedef struct zprop_list {
+	int		pl_prop;
+	char		*pl_user_prop;
+	struct zprop_list *pl_next;
+	boolean_t	pl_all;
+	size_t		pl_width;
+	size_t		pl_recvd_width;
+	boolean_t	pl_fixed;
+} zprop_list_t;
+
+extern int zfs_expand_proplist(zfs_handle_t *, zprop_list_t **, boolean_t);
+extern void zfs_prune_proplist(zfs_handle_t *, uint8_t *);
+
+#define	ZFS_MOUNTPOINT_NONE	"none"
+#define	ZFS_MOUNTPOINT_LEGACY	"legacy"
+
+#define	ZFS_FEATURE_DISABLED	"disabled"
+#define	ZFS_FEATURE_ENABLED	"enabled"
+#define	ZFS_FEATURE_ACTIVE	"active"
+
+#define	ZFS_UNSUPPORTED_INACTIVE	"inactive"
+#define	ZFS_UNSUPPORTED_READONLY	"readonly"
+
+/*
+ * zpool property management
+ */
+extern int zpool_expand_proplist(zpool_handle_t *, zprop_list_t **);
+extern int zpool_prop_get_feature(zpool_handle_t *, const char *, char *,
+    size_t);
+extern const char *zpool_prop_default_string(zpool_prop_t);
+extern uint64_t zpool_prop_default_numeric(zpool_prop_t);
+extern const char *zpool_prop_column_name(zpool_prop_t);
+extern boolean_t zpool_prop_align_right(zpool_prop_t);
+
+/*
+ * Functions shared by zfs and zpool property management.
+ */
+extern int zprop_iter(zprop_func func, void *cb, boolean_t show_all,
+    boolean_t ordered, zfs_type_t type);
+extern int zprop_get_list(libzfs_handle_t *, char *, zprop_list_t **,
+    zfs_type_t);
+extern void zprop_free_list(zprop_list_t *);
+
+#define	ZFS_GET_NCOLS	5
+
+typedef enum {
+	GET_COL_NONE,
+	GET_COL_NAME,
+	GET_COL_PROPERTY,
+	GET_COL_VALUE,
+	GET_COL_RECVD,
+	GET_COL_SOURCE
+} zfs_get_column_t;
+
+/*
+ * Functions for printing zfs or zpool properties
+ */
+typedef struct zprop_get_cbdata {
+	int cb_sources;
+	zfs_get_column_t cb_columns[ZFS_GET_NCOLS];
+	int cb_colwidths[ZFS_GET_NCOLS + 1];
+	boolean_t cb_scripted;
+	boolean_t cb_literal;
+	boolean_t cb_first;
+	zprop_list_t *cb_proplist;
+	zfs_type_t cb_type;
+} zprop_get_cbdata_t;
+
+void zprop_print_one_property(const char *, zprop_get_cbdata_t *,
+    const char *, const char *, zprop_source_t, const char *,
+    const char *);
+
+/*
+ * Iterator functions.
+ */
+typedef int (*zfs_iter_f)(zfs_handle_t *, void *);
+extern int zfs_iter_root(libzfs_handle_t *, zfs_iter_f, void *);
+extern int zfs_iter_children(zfs_handle_t *, zfs_iter_f, void *);
+extern int zfs_iter_dependents(zfs_handle_t *, boolean_t, zfs_iter_f, void *);
+extern int zfs_iter_filesystems(zfs_handle_t *, zfs_iter_f, void *);
+extern int zfs_iter_snapshots(zfs_handle_t *, boolean_t, zfs_iter_f, void *);
+extern int zfs_iter_snapshots_sorted(zfs_handle_t *, zfs_iter_f, void *);
+extern int zfs_iter_snapspec(zfs_handle_t *, const char *, zfs_iter_f, void *);
+
+typedef struct get_all_cb {
+	zfs_handle_t	**cb_handles;
+	size_t		cb_alloc;
+	size_t		cb_used;
+	boolean_t	cb_verbose;
+	int		(*cb_getone)(zfs_handle_t *, void *);
+} get_all_cb_t;
+
+void libzfs_add_handle(get_all_cb_t *, zfs_handle_t *);
+int libzfs_dataset_cmp(const void *, const void *);
+
+/*
+ * Functions to create and destroy datasets.
+ */
+extern int zfs_create(libzfs_handle_t *, const char *, zfs_type_t,
+    nvlist_t *);
+extern int zfs_create_ancestors(libzfs_handle_t *, const char *);
+extern int zfs_destroy(zfs_handle_t *, boolean_t);
+extern int zfs_destroy_snaps(zfs_handle_t *, char *, boolean_t);
+extern int zfs_destroy_snaps_nvl(zfs_handle_t *, nvlist_t *, boolean_t);
+extern int zfs_clone(zfs_handle_t *, const char *, nvlist_t *);
+extern int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t, nvlist_t *);
+extern int zfs_rollback(zfs_handle_t *, zfs_handle_t *, boolean_t);
+
+typedef struct renameflags {
+	/* recursive rename */
+	int recurse : 1;
+
+	/* don't unmount file systems */
+	int nounmount : 1;
+
+	/* force unmount file systems */
+	int forceunmount : 1;
+} renameflags_t;
+
+extern int zfs_rename(zfs_handle_t *, const char *, const char *,
+    renameflags_t flags);
+
+typedef struct sendflags {
+	/* print informational messages (ie, -v was specified) */
+	boolean_t verbose;
+
+	/* recursive send  (ie, -R) */
+	boolean_t replicate;
+
+	/* for incrementals, do all intermediate snapshots */
+	boolean_t doall;
+
+	/* if dataset is a clone, do incremental from its origin */
+	boolean_t fromorigin;
+
+	/* do deduplication */
+	boolean_t dedup;
+
+	/* send properties (ie, -p) */
+	boolean_t props;
+
+	/* do not send (no-op, ie. -n) */
+	boolean_t dryrun;
+
+	/* parsable verbose output (ie. -P) */
+	boolean_t parsable;
+
+	/* show progress (ie. -v) */
+	boolean_t progress;
+} sendflags_t;
+
+typedef boolean_t (snapfilter_cb_t)(zfs_handle_t *, void *);
+
+extern int zfs_send(zfs_handle_t *, const char *, const char *,
+    sendflags_t *, int, snapfilter_cb_t, void *, nvlist_t **);
+
+extern int zfs_promote(zfs_handle_t *);
+extern int zfs_hold(zfs_handle_t *, const char *, const char *, boolean_t,
+    boolean_t, boolean_t, int, uint64_t, uint64_t);
+extern int zfs_release(zfs_handle_t *, const char *, const char *, boolean_t);
+extern int zfs_get_holds(zfs_handle_t *, nvlist_t **);
+extern uint64_t zvol_volsize_to_reservation(uint64_t, nvlist_t *);
+
+typedef int (*zfs_userspace_cb_t)(void *arg, const char *domain,
+    uid_t rid, uint64_t space);
+
+extern int zfs_userspace(zfs_handle_t *, zfs_userquota_prop_t,
+    zfs_userspace_cb_t, void *);
+
+extern int zfs_get_fsacl(zfs_handle_t *, nvlist_t **);
+extern int zfs_set_fsacl(zfs_handle_t *, boolean_t, nvlist_t *);
+
+typedef struct recvflags {
+	/* print informational messages (ie, -v was specified) */
+	boolean_t verbose;
+
+	/* the destination is a prefix, not the exact fs (ie, -d) */
+	boolean_t isprefix;
+
+	/*
+	 * Only the tail of the sent snapshot path is appended to the
+	 * destination to determine the received snapshot name (ie, -e).
+	 */
+	boolean_t istail;
+
+	/* do not actually do the recv, just check if it would work (ie, -n) */
+	boolean_t dryrun;
+
+	/* rollback/destroy filesystems as necessary (eg, -F) */
+	boolean_t force;
+
+	/* set "canmount=off" on all modified filesystems */
+	boolean_t canmountoff;
+
+	/* byteswap flag is used internally; callers need not specify */
+	boolean_t byteswap;
+
+	/* do not mount file systems as they are extracted (private) */
+	boolean_t nomount;
+} recvflags_t;
+
+extern int zfs_receive(libzfs_handle_t *, const char *, recvflags_t *,
+    int, avl_tree_t *);
+
+typedef enum diff_flags {
+	ZFS_DIFF_PARSEABLE = 0x1,
+	ZFS_DIFF_TIMESTAMP = 0x2,
+	ZFS_DIFF_CLASSIFY = 0x4
+} diff_flags_t;
+
+extern int zfs_show_diffs(zfs_handle_t *, int, const char *, const char *,
+    int);
+
+/*
+ * Miscellaneous functions.
+ */
+extern const char *zfs_type_to_name(zfs_type_t);
+extern void zfs_refresh_properties(zfs_handle_t *);
+extern int zfs_name_valid(const char *, zfs_type_t);
+extern zfs_handle_t *zfs_path_to_zhandle(libzfs_handle_t *, char *, zfs_type_t);
+extern boolean_t zfs_dataset_exists(libzfs_handle_t *, const char *,
+    zfs_type_t);
+extern int zfs_spa_version(zfs_handle_t *, int *);
+
+/*
+ * Mount support functions.
+ */
+extern boolean_t is_mounted(libzfs_handle_t *, const char *special, char **);
+extern boolean_t zfs_is_mounted(zfs_handle_t *, char **);
+extern int zfs_mount(zfs_handle_t *, const char *, int);
+extern int zfs_unmount(zfs_handle_t *, const char *, int);
+extern int zfs_unmountall(zfs_handle_t *, int);
+
+/*
+ * Share support functions.
+ */
+extern boolean_t zfs_is_shared(zfs_handle_t *);
+extern int zfs_share(zfs_handle_t *);
+extern int zfs_unshare(zfs_handle_t *);
+
+/*
+ * Protocol-specific share support functions.
+ */
+extern boolean_t zfs_is_shared_nfs(zfs_handle_t *, char **);
+extern boolean_t zfs_is_shared_smb(zfs_handle_t *, char **);
+extern int zfs_share_nfs(zfs_handle_t *);
+extern int zfs_share_smb(zfs_handle_t *);
+extern int zfs_shareall(zfs_handle_t *);
+extern int zfs_unshare_nfs(zfs_handle_t *, const char *);
+extern int zfs_unshare_smb(zfs_handle_t *, const char *);
+extern int zfs_unshareall_nfs(zfs_handle_t *);
+extern int zfs_unshareall_smb(zfs_handle_t *);
+extern int zfs_unshareall_bypath(zfs_handle_t *, const char *);
+extern int zfs_unshareall(zfs_handle_t *);
+extern int zfs_deleg_share_nfs(libzfs_handle_t *, char *, char *, char *,
+    void *, void *, int, zfs_share_op_t);
+
+/*
+ * FreeBSD-specific jail support function.
+ */
+extern int zfs_jail(zfs_handle_t *, int, int);
+
+/*
+ * When dealing with nvlists, verify() is extremely useful
+ */
+#ifndef verify
+#ifdef NDEBUG
+#define	verify(EX)	((void)(EX))
+#else
+#define	verify(EX)	assert(EX)
+#endif
+#endif
+
+/*
+ * Utility function to convert a number to a human-readable form.
+ */
+extern void zfs_nicenum(uint64_t, char *, size_t);
+extern int zfs_nicestrtonum(libzfs_handle_t *, const char *, uint64_t *);
+
+/*
+ * Given a device or file, determine if it is part of a pool.
+ */
+extern int zpool_in_use(libzfs_handle_t *, int, pool_state_t *, char **,
+    boolean_t *);
+
+/*
+ * Label manipulation.
+ */
+extern int zpool_read_label(int, nvlist_t **);
+extern int zpool_clear_label(int);
+
+/* is this zvol valid for use as a dump device? */
+extern int zvol_check_dump_config(char *);
+
+/*
+ * Management interfaces for SMB ACL files
+ */
+
+int zfs_smb_acl_add(libzfs_handle_t *, char *, char *, char *);
+int zfs_smb_acl_remove(libzfs_handle_t *, char *, char *, char *);
+int zfs_smb_acl_purge(libzfs_handle_t *, char *, char *);
+int zfs_smb_acl_rename(libzfs_handle_t *, char *, char *, char *, char *);
+
+/*
+ * Enable and disable datasets within a pool by mounting/unmounting and
+ * sharing/unsharing them.
+ */
+extern int zpool_enable_datasets(zpool_handle_t *, const char *, int);
+extern int zpool_disable_datasets(zpool_handle_t *, boolean_t);
+
+/*
+ * Mappings between vdev and FRU.
+ */
+extern void libzfs_fru_refresh(libzfs_handle_t *);
+extern const char *libzfs_fru_lookup(libzfs_handle_t *, const char *);
+extern const char *libzfs_fru_devpath(libzfs_handle_t *, const char *);
+extern boolean_t libzfs_fru_compare(libzfs_handle_t *, const char *,
+    const char *);
+extern boolean_t libzfs_fru_notself(libzfs_handle_t *, const char *);
+extern int zpool_fru_set(zpool_handle_t *, uint64_t, const char *);
+
+#ifndef sun
+extern int zmount(const char *, const char *, int, char *, char *, int, char *,
+    int);
+#endif	/* !sun */
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _LIBZFS_H */
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_changelist.c b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_changelist.c
new file mode 100644
index 0000000000000000000000000000000000000000..a899965ca0f787934dc6c1a4317c0ed93cd1935d
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_changelist.c
@@ -0,0 +1,700 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Portions Copyright 2007 Ramprakash Jelari
+ *
+ * Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
+ * All rights reserved.
+ */
+
+#include <libintl.h>
+#include <libuutil.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <zone.h>
+
+#include <libzfs.h>
+
+#include "libzfs_impl.h"
+
+/*
+ * Structure to keep track of dataset state.  Before changing the 'sharenfs' or
+ * 'mountpoint' property, we record whether the filesystem was previously
+ * mounted/shared.  This prior state dictates whether we remount/reshare the
+ * dataset after the property has been changed.
+ *
+ * The interface consists of the following sequence of functions:
+ *
+ * 	changelist_gather()
+ * 	changelist_prefix()
+ * 	< change property >
+ * 	changelist_postfix()
+ * 	changelist_free()
+ *
+ * Other interfaces:
+ *
+ * changelist_remove() - remove a node from a gathered list
+ * changelist_rename() - renames all datasets appropriately when doing a rename
+ * changelist_unshare() - unshares all the nodes in a given changelist
+ * changelist_haszonedchild() - check if there is any child exported to
+ *				a local zone
+ */
+typedef struct prop_changenode {
+	zfs_handle_t		*cn_handle;
+	int			cn_shared;
+	int			cn_mounted;
+	int			cn_zoned;
+	boolean_t		cn_needpost;	/* is postfix() needed? */
+	uu_list_node_t		cn_listnode;
+} prop_changenode_t;
+
+struct prop_changelist {
+	zfs_prop_t		cl_prop;
+	zfs_prop_t		cl_realprop;
+	zfs_prop_t		cl_shareprop;  /* used with sharenfs/sharesmb */
+	uu_list_pool_t		*cl_pool;
+	uu_list_t		*cl_list;
+	boolean_t		cl_waslegacy;
+	boolean_t		cl_allchildren;
+	boolean_t		cl_alldependents;
+	int			cl_mflags;	/* Mount flags */
+	int			cl_gflags;	/* Gather request flags */
+	boolean_t		cl_haszonedchild;
+	boolean_t		cl_sorted;
+};
+
+/*
+ * If the property is 'mountpoint', go through and unmount filesystems as
+ * necessary.  We don't do the same for 'sharenfs', because we can just re-share
+ * with different options without interrupting service. We do handle 'sharesmb'
+ * since there may be old resource names that need to be removed.
+ */
+int
+changelist_prefix(prop_changelist_t *clp)
+{
+	prop_changenode_t *cn;
+	int ret = 0;
+
+	if (clp->cl_prop != ZFS_PROP_MOUNTPOINT &&
+	    clp->cl_prop != ZFS_PROP_SHARESMB)
+		return (0);
+
+	for (cn = uu_list_first(clp->cl_list); cn != NULL;
+	    cn = uu_list_next(clp->cl_list, cn)) {
+
+		/* if a previous loop failed, set the remaining to false */
+		if (ret == -1) {
+			cn->cn_needpost = B_FALSE;
+			continue;
+		}
+
+		/*
+		 * If we are in the global zone, but this dataset is exported
+		 * to a local zone, do nothing.
+		 */
+		if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned)
+			continue;
+
+		if (!ZFS_IS_VOLUME(cn->cn_handle)) {
+			/*
+			 * Do the property specific processing.
+			 */
+			switch (clp->cl_prop) {
+			case ZFS_PROP_MOUNTPOINT:
+				if (clp->cl_gflags & CL_GATHER_DONT_UNMOUNT)
+					break;
+				if (zfs_unmount(cn->cn_handle, NULL,
+				    clp->cl_mflags) != 0) {
+					ret = -1;
+					cn->cn_needpost = B_FALSE;
+				}
+				break;
+			case ZFS_PROP_SHARESMB:
+				(void) zfs_unshare_smb(cn->cn_handle, NULL);
+				break;
+			}
+		}
+	}
+
+	if (ret == -1)
+		(void) changelist_postfix(clp);
+
+	return (ret);
+}
+
+/*
+ * If the property is 'mountpoint' or 'sharenfs', go through and remount and/or
+ * reshare the filesystems as necessary.  In changelist_gather() we recorded
+ * whether the filesystem was previously shared or mounted.  The action we take
+ * depends on the previous state, and whether the value was previously 'legacy'.
+ * For non-legacy properties, we only remount/reshare the filesystem if it was
+ * previously mounted/shared.  Otherwise, we always remount/reshare the
+ * filesystem.
+ */
+int
+changelist_postfix(prop_changelist_t *clp)
+{
+	prop_changenode_t *cn;
+	char shareopts[ZFS_MAXPROPLEN];
+	int errors = 0;
+	libzfs_handle_t *hdl;
+
+	/*
+	 * If we're changing the mountpoint, attempt to destroy the underlying
+	 * mountpoint.  All other datasets will have inherited from this dataset
+	 * (in which case their mountpoints exist in the filesystem in the new
+	 * location), or have explicit mountpoints set (in which case they won't
+	 * be in the changelist).
+	 */
+	if ((cn = uu_list_last(clp->cl_list)) == NULL)
+		return (0);
+
+	if (clp->cl_prop == ZFS_PROP_MOUNTPOINT &&
+	    !(clp->cl_gflags & CL_GATHER_DONT_UNMOUNT)) {
+		remove_mountpoint(cn->cn_handle);
+	}
+
+	/*
+	 * It is possible that the changelist_prefix() used libshare
+	 * to unshare some entries. Since libshare caches data, an
+	 * attempt to reshare during postfix can fail unless libshare
+	 * is uninitialized here so that it will reinitialize later.
+	 */
+	if (cn->cn_handle != NULL) {
+		hdl = cn->cn_handle->zfs_hdl;
+		assert(hdl != NULL);
+		zfs_uninit_libshare(hdl);
+	}
+
+	/*
+	 * We walk the datasets in reverse, because we want to mount any parent
+	 * datasets before mounting the children.  We walk all datasets even if
+	 * there are errors.
+	 */
+	for (cn = uu_list_last(clp->cl_list); cn != NULL;
+	    cn = uu_list_prev(clp->cl_list, cn)) {
+
+		boolean_t sharenfs;
+		boolean_t sharesmb;
+		boolean_t mounted;
+
+		/*
+		 * If we are in the global zone, but this dataset is exported
+		 * to a local zone, do nothing.
+		 */
+		if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned)
+			continue;
+
+		/* Only do post-processing if it's required */
+		if (!cn->cn_needpost)
+			continue;
+		cn->cn_needpost = B_FALSE;
+
+		zfs_refresh_properties(cn->cn_handle);
+
+		if (ZFS_IS_VOLUME(cn->cn_handle))
+			continue;
+
+		/*
+		 * Remount if previously mounted or mountpoint was legacy,
+		 * or sharenfs or sharesmb  property is set.
+		 */
+		sharenfs = ((zfs_prop_get(cn->cn_handle, ZFS_PROP_SHARENFS,
+		    shareopts, sizeof (shareopts), NULL, NULL, 0,
+		    B_FALSE) == 0) && (strcmp(shareopts, "off") != 0));
+
+		sharesmb = ((zfs_prop_get(cn->cn_handle, ZFS_PROP_SHARESMB,
+		    shareopts, sizeof (shareopts), NULL, NULL, 0,
+		    B_FALSE) == 0) && (strcmp(shareopts, "off") != 0));
+
+		mounted = (clp->cl_gflags & CL_GATHER_DONT_UNMOUNT) ||
+		    zfs_is_mounted(cn->cn_handle, NULL);
+
+		if (!mounted && (cn->cn_mounted ||
+		    ((sharenfs || sharesmb || clp->cl_waslegacy) &&
+		    (zfs_prop_get_int(cn->cn_handle,
+		    ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_ON)))) {
+
+			if (zfs_mount(cn->cn_handle, NULL, 0) != 0)
+				errors++;
+			else
+				mounted = TRUE;
+		}
+
+		/*
+		 * If the file system is mounted we always re-share even
+		 * if the filesystem is currently shared, so that we can
+		 * adopt any new options.
+		 */
+		if (sharenfs && mounted)
+			errors += zfs_share_nfs(cn->cn_handle);
+		else if (cn->cn_shared || clp->cl_waslegacy)
+			errors += zfs_unshare_nfs(cn->cn_handle, NULL);
+		if (sharesmb && mounted)
+			errors += zfs_share_smb(cn->cn_handle);
+		else if (cn->cn_shared || clp->cl_waslegacy)
+			errors += zfs_unshare_smb(cn->cn_handle, NULL);
+	}
+
+	return (errors ? -1 : 0);
+}
+
+/*
+ * Is this "dataset" a child of "parent"?
+ */
+boolean_t
+isa_child_of(const char *dataset, const char *parent)
+{
+	int len;
+
+	len = strlen(parent);
+
+	if (strncmp(dataset, parent, len) == 0 &&
+	    (dataset[len] == '@' || dataset[len] == '/' ||
+	    dataset[len] == '\0'))
+		return (B_TRUE);
+	else
+		return (B_FALSE);
+
+}
+
+/*
+ * If we rename a filesystem, child filesystem handles are no longer valid
+ * since we identify each dataset by its name in the ZFS namespace.  As a
+ * result, we have to go through and fix up all the names appropriately.  We
+ * could do this automatically if libzfs kept track of all open handles, but
+ * this is a lot less work.
+ */
+void
+changelist_rename(prop_changelist_t *clp, const char *src, const char *dst)
+{
+	prop_changenode_t *cn;
+	char newname[ZFS_MAXNAMELEN];
+
+	for (cn = uu_list_first(clp->cl_list); cn != NULL;
+	    cn = uu_list_next(clp->cl_list, cn)) {
+		/*
+		 * Do not rename a clone that's not in the source hierarchy.
+		 */
+		if (!isa_child_of(cn->cn_handle->zfs_name, src))
+			continue;
+
+		/*
+		 * Destroy the previous mountpoint if needed.
+		 */
+		remove_mountpoint(cn->cn_handle);
+
+		(void) strlcpy(newname, dst, sizeof (newname));
+		(void) strcat(newname, cn->cn_handle->zfs_name + strlen(src));
+
+		(void) strlcpy(cn->cn_handle->zfs_name, newname,
+		    sizeof (cn->cn_handle->zfs_name));
+	}
+}
+
+/*
+ * Given a gathered changelist for the 'sharenfs' or 'sharesmb' property,
+ * unshare all the datasets in the list.
+ */
+int
+changelist_unshare(prop_changelist_t *clp, zfs_share_proto_t *proto)
+{
+	prop_changenode_t *cn;
+	int ret = 0;
+
+	if (clp->cl_prop != ZFS_PROP_SHARENFS &&
+	    clp->cl_prop != ZFS_PROP_SHARESMB)
+		return (0);
+
+	for (cn = uu_list_first(clp->cl_list); cn != NULL;
+	    cn = uu_list_next(clp->cl_list, cn)) {
+		if (zfs_unshare_proto(cn->cn_handle, NULL, proto) != 0)
+			ret = -1;
+	}
+
+	return (ret);
+}
+
+/*
+ * Check if there is any child exported to a local zone in a given changelist.
+ * This information has already been recorded while gathering the changelist
+ * via changelist_gather().
+ */
+int
+changelist_haszonedchild(prop_changelist_t *clp)
+{
+	return (clp->cl_haszonedchild);
+}
+
+/*
+ * Remove a node from a gathered list.
+ */
+void
+changelist_remove(prop_changelist_t *clp, const char *name)
+{
+	prop_changenode_t *cn;
+
+	for (cn = uu_list_first(clp->cl_list); cn != NULL;
+	    cn = uu_list_next(clp->cl_list, cn)) {
+
+		if (strcmp(cn->cn_handle->zfs_name, name) == 0) {
+			uu_list_remove(clp->cl_list, cn);
+			zfs_close(cn->cn_handle);
+			free(cn);
+			return;
+		}
+	}
+}
+
+/*
+ * Release any memory associated with a changelist.
+ */
+void
+changelist_free(prop_changelist_t *clp)
+{
+	prop_changenode_t *cn;
+	void *cookie;
+
+	if (clp->cl_list) {
+		cookie = NULL;
+		while ((cn = uu_list_teardown(clp->cl_list, &cookie)) != NULL) {
+			zfs_close(cn->cn_handle);
+			free(cn);
+		}
+
+		uu_list_destroy(clp->cl_list);
+	}
+	if (clp->cl_pool)
+		uu_list_pool_destroy(clp->cl_pool);
+
+	free(clp);
+}
+
+static int
+change_one(zfs_handle_t *zhp, void *data)
+{
+	prop_changelist_t *clp = data;
+	char property[ZFS_MAXPROPLEN];
+	char where[64];
+	prop_changenode_t *cn;
+	zprop_source_t sourcetype;
+	zprop_source_t share_sourcetype;
+
+	/*
+	 * We only want to unmount/unshare those filesystems that may inherit
+	 * from the target filesystem.  If we find any filesystem with a
+	 * locally set mountpoint, we ignore any children since changing the
+	 * property will not affect them.  If this is a rename, we iterate
+	 * over all children regardless, since we need them unmounted in
+	 * order to do the rename.  Also, if this is a volume and we're doing
+	 * a rename, then always add it to the changelist.
+	 */
+
+	if (!(ZFS_IS_VOLUME(zhp) && clp->cl_realprop == ZFS_PROP_NAME) &&
+	    zfs_prop_get(zhp, clp->cl_prop, property,
+	    sizeof (property), &sourcetype, where, sizeof (where),
+	    B_FALSE) != 0) {
+		zfs_close(zhp);
+		return (0);
+	}
+
+	/*
+	 * If we are "watching" sharenfs or sharesmb
+	 * then check out the companion property which is tracked
+	 * in cl_shareprop
+	 */
+	if (clp->cl_shareprop != ZPROP_INVAL &&
+	    zfs_prop_get(zhp, clp->cl_shareprop, property,
+	    sizeof (property), &share_sourcetype, where, sizeof (where),
+	    B_FALSE) != 0) {
+		zfs_close(zhp);
+		return (0);
+	}
+
+	if (clp->cl_alldependents || clp->cl_allchildren ||
+	    sourcetype == ZPROP_SRC_DEFAULT ||
+	    sourcetype == ZPROP_SRC_INHERITED ||
+	    (clp->cl_shareprop != ZPROP_INVAL &&
+	    (share_sourcetype == ZPROP_SRC_DEFAULT ||
+	    share_sourcetype == ZPROP_SRC_INHERITED))) {
+		if ((cn = zfs_alloc(zfs_get_handle(zhp),
+		    sizeof (prop_changenode_t))) == NULL) {
+			zfs_close(zhp);
+			return (-1);
+		}
+
+		cn->cn_handle = zhp;
+		cn->cn_mounted = (clp->cl_gflags & CL_GATHER_MOUNT_ALWAYS) ||
+		    zfs_is_mounted(zhp, NULL);
+		cn->cn_shared = zfs_is_shared(zhp);
+		cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
+		cn->cn_needpost = B_TRUE;
+
+		/* Indicate if any child is exported to a local zone. */
+		if (getzoneid() == GLOBAL_ZONEID && cn->cn_zoned)
+			clp->cl_haszonedchild = B_TRUE;
+
+		uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool);
+
+		if (clp->cl_sorted) {
+			uu_list_index_t idx;
+
+			(void) uu_list_find(clp->cl_list, cn, NULL,
+			    &idx);
+			uu_list_insert(clp->cl_list, cn, idx);
+		} else {
+			/*
+			 * Add this child to beginning of the list. Children
+			 * below this one in the hierarchy will get added above
+			 * this one in the list. This produces a list in
+			 * reverse dataset name order.
+			 * This is necessary when the original mountpoint
+			 * is legacy or none.
+			 */
+			verify(uu_list_insert_before(clp->cl_list,
+			    uu_list_first(clp->cl_list), cn) == 0);
+		}
+
+		if (!clp->cl_alldependents)
+			return (zfs_iter_children(zhp, change_one, data));
+	} else {
+		zfs_close(zhp);
+	}
+
+	return (0);
+}
+
+/*ARGSUSED*/
+static int
+compare_mountpoints(const void *a, const void *b, void *unused)
+{
+	const prop_changenode_t *ca = a;
+	const prop_changenode_t *cb = b;
+
+	char mounta[MAXPATHLEN];
+	char mountb[MAXPATHLEN];
+
+	boolean_t hasmounta, hasmountb;
+
+	/*
+	 * When unsharing or unmounting filesystems, we need to do it in
+	 * mountpoint order.  This allows the user to have a mountpoint
+	 * hierarchy that is different from the dataset hierarchy, and still
+	 * allow it to be changed.  However, if either dataset doesn't have a
+	 * mountpoint (because it is a volume or a snapshot), we place it at the
+	 * end of the list, because it doesn't affect our change at all.
+	 */
+	hasmounta = (zfs_prop_get(ca->cn_handle, ZFS_PROP_MOUNTPOINT, mounta,
+	    sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0);
+	hasmountb = (zfs_prop_get(cb->cn_handle, ZFS_PROP_MOUNTPOINT, mountb,
+	    sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0);
+
+	if (!hasmounta && hasmountb)
+		return (-1);
+	else if (hasmounta && !hasmountb)
+		return (1);
+	else if (!hasmounta && !hasmountb)
+		return (0);
+	else
+		return (strcmp(mountb, mounta));
+}
+
+/*
+ * Given a ZFS handle and a property, construct a complete list of datasets
+ * that need to be modified as part of this process.  For anything but the
+ * 'mountpoint' and 'sharenfs' properties, this just returns an empty list.
+ * Otherwise, we iterate over all children and look for any datasets that
+ * inherit the property.  For each such dataset, we add it to the list and
+ * mark whether it was shared beforehand.
+ */
+prop_changelist_t *
+changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int gather_flags,
+    int mnt_flags)
+{
+	prop_changelist_t *clp;
+	prop_changenode_t *cn;
+	zfs_handle_t *temp;
+	char property[ZFS_MAXPROPLEN];
+	uu_compare_fn_t *compare = NULL;
+	boolean_t legacy = B_FALSE;
+
+	if ((clp = zfs_alloc(zhp->zfs_hdl, sizeof (prop_changelist_t))) == NULL)
+		return (NULL);
+
+	/*
+	 * For mountpoint-related tasks, we want to sort everything by
+	 * mountpoint, so that we mount and unmount them in the appropriate
+	 * order, regardless of their position in the hierarchy.
+	 */
+	if (prop == ZFS_PROP_NAME || prop == ZFS_PROP_ZONED ||
+	    prop == ZFS_PROP_MOUNTPOINT || prop == ZFS_PROP_SHARENFS ||
+	    prop == ZFS_PROP_SHARESMB) {
+
+		if (zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT,
+		    property, sizeof (property),
+		    NULL, NULL, 0, B_FALSE) == 0 &&
+		    (strcmp(property, "legacy") == 0 ||
+		    strcmp(property, "none") == 0)) {
+
+			legacy = B_TRUE;
+		}
+		if (!legacy) {
+			compare = compare_mountpoints;
+			clp->cl_sorted = B_TRUE;
+		}
+	}
+
+	clp->cl_pool = uu_list_pool_create("changelist_pool",
+	    sizeof (prop_changenode_t),
+	    offsetof(prop_changenode_t, cn_listnode),
+	    compare, 0);
+	if (clp->cl_pool == NULL) {
+		assert(uu_error() == UU_ERROR_NO_MEMORY);
+		(void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error");
+		changelist_free(clp);
+		return (NULL);
+	}
+
+	clp->cl_list = uu_list_create(clp->cl_pool, NULL,
+	    clp->cl_sorted ? UU_LIST_SORTED : 0);
+	clp->cl_gflags = gather_flags;
+	clp->cl_mflags = mnt_flags;
+
+	if (clp->cl_list == NULL) {
+		assert(uu_error() == UU_ERROR_NO_MEMORY);
+		(void) zfs_error(zhp->zfs_hdl, EZFS_NOMEM, "internal error");
+		changelist_free(clp);
+		return (NULL);
+	}
+
+	/*
+	 * If this is a rename or the 'zoned' property, we pretend we're
+	 * changing the mountpoint and flag it so we can catch all children in
+	 * change_one().
+	 *
+	 * Flag cl_alldependents to catch all children plus the dependents
+	 * (clones) that are not in the hierarchy.
+	 */
+	if (prop == ZFS_PROP_NAME) {
+		clp->cl_prop = ZFS_PROP_MOUNTPOINT;
+		clp->cl_alldependents = B_TRUE;
+	} else if (prop == ZFS_PROP_ZONED) {
+		clp->cl_prop = ZFS_PROP_MOUNTPOINT;
+		clp->cl_allchildren = B_TRUE;
+	} else if (prop == ZFS_PROP_CANMOUNT) {
+		clp->cl_prop = ZFS_PROP_MOUNTPOINT;
+	} else if (prop == ZFS_PROP_VOLSIZE) {
+		clp->cl_prop = ZFS_PROP_MOUNTPOINT;
+	} else {
+		clp->cl_prop = prop;
+	}
+	clp->cl_realprop = prop;
+
+	if (clp->cl_prop != ZFS_PROP_MOUNTPOINT &&
+	    clp->cl_prop != ZFS_PROP_SHARENFS &&
+	    clp->cl_prop != ZFS_PROP_SHARESMB)
+		return (clp);
+
+	/*
+	 * If watching SHARENFS or SHARESMB then
+	 * also watch its companion property.
+	 */
+	if (clp->cl_prop == ZFS_PROP_SHARENFS)
+		clp->cl_shareprop = ZFS_PROP_SHARESMB;
+	else if (clp->cl_prop == ZFS_PROP_SHARESMB)
+		clp->cl_shareprop = ZFS_PROP_SHARENFS;
+
+	if (clp->cl_alldependents) {
+		if (zfs_iter_dependents(zhp, B_TRUE, change_one, clp) != 0) {
+			changelist_free(clp);
+			return (NULL);
+		}
+	} else if (zfs_iter_children(zhp, change_one, clp) != 0) {
+		changelist_free(clp);
+		return (NULL);
+	}
+
+	/*
+	 * We have to re-open ourselves because we auto-close all the handles
+	 * and can't tell the difference.
+	 */
+	if ((temp = zfs_open(zhp->zfs_hdl, zfs_get_name(zhp),
+	    ZFS_TYPE_DATASET)) == NULL) {
+		changelist_free(clp);
+		return (NULL);
+	}
+
+	/*
+	 * Always add ourself to the list.  We add ourselves to the end so that
+	 * we're the last to be unmounted.
+	 */
+	if ((cn = zfs_alloc(zhp->zfs_hdl,
+	    sizeof (prop_changenode_t))) == NULL) {
+		zfs_close(temp);
+		changelist_free(clp);
+		return (NULL);
+	}
+
+	cn->cn_handle = temp;
+	cn->cn_mounted = (clp->cl_gflags & CL_GATHER_MOUNT_ALWAYS) ||
+	    zfs_is_mounted(temp, NULL);
+	cn->cn_shared = zfs_is_shared(temp);
+	cn->cn_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
+	cn->cn_needpost = B_TRUE;
+
+	uu_list_node_init(cn, &cn->cn_listnode, clp->cl_pool);
+	if (clp->cl_sorted) {
+		uu_list_index_t idx;
+		(void) uu_list_find(clp->cl_list, cn, NULL, &idx);
+		uu_list_insert(clp->cl_list, cn, idx);
+	} else {
+		/*
+		 * Add the target dataset to the end of the list.
+		 * The list is not really unsorted. The list will be
+		 * in reverse dataset name order. This is necessary
+		 * when the original mountpoint is legacy or none.
+		 */
+		verify(uu_list_insert_after(clp->cl_list,
+		    uu_list_last(clp->cl_list), cn) == 0);
+	}
+
+	/*
+	 * If the mountpoint property was previously 'legacy', or 'none',
+	 * record it as the behavior of changelist_postfix() will be different.
+	 */
+	if ((clp->cl_prop == ZFS_PROP_MOUNTPOINT) && legacy) {
+		/*
+		 * do not automatically mount ex-legacy datasets if
+		 * we specifically set canmount to noauto
+		 */
+		if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) !=
+		    ZFS_CANMOUNT_NOAUTO)
+			clp->cl_waslegacy = B_TRUE;
+	}
+
+	return (clp);
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_config.c b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_config.c
new file mode 100644
index 0000000000000000000000000000000000000000..d5ba20fde0cf1d6b5702b5f4d9e9af5cfa277ed7
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_config.c
@@ -0,0 +1,453 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ */
+
+/*
+ * The pool configuration repository is stored in /etc/zfs/zpool.cache as a
+ * single packed nvlist.  While it would be nice to just read in this
+ * file from userland, this wouldn't work from a local zone.  So we have to have
+ * a zpool ioctl to return the complete configuration for all pools.  In the
+ * global zone, this will be identical to reading the file and unpacking it in
+ * userland.
+ */
+
+#include <errno.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <libintl.h>
+#include <libuutil.h>
+
+#include "libzfs_impl.h"
+
+typedef struct config_node {
+	char		*cn_name;
+	nvlist_t	*cn_config;
+	uu_avl_node_t	cn_avl;
+} config_node_t;
+
+/* ARGSUSED */
+static int
+config_node_compare(const void *a, const void *b, void *unused)
+{
+	int ret;
+
+	const config_node_t *ca = (config_node_t *)a;
+	const config_node_t *cb = (config_node_t *)b;
+
+	ret = strcmp(ca->cn_name, cb->cn_name);
+
+	if (ret < 0)
+		return (-1);
+	else if (ret > 0)
+		return (1);
+	else
+		return (0);
+}
+
+void
+namespace_clear(libzfs_handle_t *hdl)
+{
+	if (hdl->libzfs_ns_avl) {
+		config_node_t *cn;
+		void *cookie = NULL;
+
+		while ((cn = uu_avl_teardown(hdl->libzfs_ns_avl,
+		    &cookie)) != NULL) {
+			nvlist_free(cn->cn_config);
+			free(cn->cn_name);
+			free(cn);
+		}
+
+		uu_avl_destroy(hdl->libzfs_ns_avl);
+		hdl->libzfs_ns_avl = NULL;
+	}
+
+	if (hdl->libzfs_ns_avlpool) {
+		uu_avl_pool_destroy(hdl->libzfs_ns_avlpool);
+		hdl->libzfs_ns_avlpool = NULL;
+	}
+}
+
+/*
+ * Loads the pool namespace, or re-loads it if the cache has changed.
+ */
+static int
+namespace_reload(libzfs_handle_t *hdl)
+{
+	nvlist_t *config;
+	config_node_t *cn;
+	nvpair_t *elem;
+	zfs_cmd_t zc = { 0 };
+	void *cookie;
+
+	if (hdl->libzfs_ns_gen == 0) {
+		/*
+		 * This is the first time we've accessed the configuration
+		 * cache.  Initialize the AVL tree and then fall through to the
+		 * common code.
+		 */
+		if ((hdl->libzfs_ns_avlpool = uu_avl_pool_create("config_pool",
+		    sizeof (config_node_t),
+		    offsetof(config_node_t, cn_avl),
+		    config_node_compare, UU_DEFAULT)) == NULL)
+			return (no_memory(hdl));
+
+		if ((hdl->libzfs_ns_avl = uu_avl_create(hdl->libzfs_ns_avlpool,
+		    NULL, UU_DEFAULT)) == NULL)
+			return (no_memory(hdl));
+	}
+
+	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
+		return (-1);
+
+	for (;;) {
+		zc.zc_cookie = hdl->libzfs_ns_gen;
+		if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CONFIGS, &zc) != 0) {
+			switch (errno) {
+			case EEXIST:
+				/*
+				 * The namespace hasn't changed.
+				 */
+				zcmd_free_nvlists(&zc);
+				return (0);
+
+			case ENOMEM:
+				if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+					zcmd_free_nvlists(&zc);
+					return (-1);
+				}
+				break;
+
+			default:
+				zcmd_free_nvlists(&zc);
+				return (zfs_standard_error(hdl, errno,
+				    dgettext(TEXT_DOMAIN, "failed to read "
+				    "pool configuration")));
+			}
+		} else {
+			hdl->libzfs_ns_gen = zc.zc_cookie;
+			break;
+		}
+	}
+
+	if (zcmd_read_dst_nvlist(hdl, &zc, &config) != 0) {
+		zcmd_free_nvlists(&zc);
+		return (-1);
+	}
+
+	zcmd_free_nvlists(&zc);
+
+	/*
+	 * Clear out any existing configuration information.
+	 */
+	cookie = NULL;
+	while ((cn = uu_avl_teardown(hdl->libzfs_ns_avl, &cookie)) != NULL) {
+		nvlist_free(cn->cn_config);
+		free(cn->cn_name);
+		free(cn);
+	}
+
+	elem = NULL;
+	while ((elem = nvlist_next_nvpair(config, elem)) != NULL) {
+		nvlist_t *child;
+		uu_avl_index_t where;
+
+		if ((cn = zfs_alloc(hdl, sizeof (config_node_t))) == NULL) {
+			nvlist_free(config);
+			return (-1);
+		}
+
+		if ((cn->cn_name = zfs_strdup(hdl,
+		    nvpair_name(elem))) == NULL) {
+			free(cn);
+			nvlist_free(config);
+			return (-1);
+		}
+
+		verify(nvpair_value_nvlist(elem, &child) == 0);
+		if (nvlist_dup(child, &cn->cn_config, 0) != 0) {
+			free(cn->cn_name);
+			free(cn);
+			nvlist_free(config);
+			return (no_memory(hdl));
+		}
+		verify(uu_avl_find(hdl->libzfs_ns_avl, cn, NULL, &where)
+		    == NULL);
+
+		uu_avl_insert(hdl->libzfs_ns_avl, cn, where);
+	}
+
+	nvlist_free(config);
+	return (0);
+}
+
+/*
+ * Retrieve the configuration for the given pool.  The configuration is a nvlist
+ * describing the vdevs, as well as the statistics associated with each one.
+ */
+nvlist_t *
+zpool_get_config(zpool_handle_t *zhp, nvlist_t **oldconfig)
+{
+	if (oldconfig)
+		*oldconfig = zhp->zpool_old_config;
+	return (zhp->zpool_config);
+}
+
+/*
+ * Retrieves a list of enabled features and their refcounts and caches it in
+ * the pool handle.
+ */
+nvlist_t *
+zpool_get_features(zpool_handle_t *zhp)
+{
+	nvlist_t *config, *features;
+
+	config = zpool_get_config(zhp, NULL);
+
+	if (config == NULL || !nvlist_exists(config,
+	    ZPOOL_CONFIG_FEATURE_STATS)) {
+		int error;
+		boolean_t missing = B_FALSE;
+
+		error = zpool_refresh_stats(zhp, &missing);
+
+		if (error != 0 || missing)
+			return (NULL);
+
+		config = zpool_get_config(zhp, NULL);
+	}
+
+	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
+	    &features) == 0);
+
+	return (features);
+}
+
+/*
+ * Refresh the vdev statistics associated with the given pool.  This is used in
+ * iostat to show configuration changes and determine the delta from the last
+ * time the function was called.  This function can fail, in case the pool has
+ * been destroyed.
+ */
+int
+zpool_refresh_stats(zpool_handle_t *zhp, boolean_t *missing)
+{
+	zfs_cmd_t zc = { 0 };
+	int error;
+	nvlist_t *config;
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+	*missing = B_FALSE;
+	(void) strcpy(zc.zc_name, zhp->zpool_name);
+
+	if (zhp->zpool_config_size == 0)
+		zhp->zpool_config_size = 1 << 16;
+
+	if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size) != 0)
+		return (-1);
+
+	for (;;) {
+		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_STATS,
+		    &zc) == 0) {
+			/*
+			 * The real error is returned in the zc_cookie field.
+			 */
+			error = zc.zc_cookie;
+			break;
+		}
+
+		if (errno == ENOMEM) {
+			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+				zcmd_free_nvlists(&zc);
+				return (-1);
+			}
+		} else {
+			zcmd_free_nvlists(&zc);
+			if (errno == ENOENT || errno == EINVAL)
+				*missing = B_TRUE;
+			zhp->zpool_state = POOL_STATE_UNAVAIL;
+			return (0);
+		}
+	}
+
+	if (zcmd_read_dst_nvlist(hdl, &zc, &config) != 0) {
+		zcmd_free_nvlists(&zc);
+		return (-1);
+	}
+
+	zcmd_free_nvlists(&zc);
+
+	zhp->zpool_config_size = zc.zc_nvlist_dst_size;
+
+	if (zhp->zpool_config != NULL) {
+		uint64_t oldtxg, newtxg;
+
+		verify(nvlist_lookup_uint64(zhp->zpool_config,
+		    ZPOOL_CONFIG_POOL_TXG, &oldtxg) == 0);
+		verify(nvlist_lookup_uint64(config,
+		    ZPOOL_CONFIG_POOL_TXG, &newtxg) == 0);
+
+		if (zhp->zpool_old_config != NULL)
+			nvlist_free(zhp->zpool_old_config);
+
+		if (oldtxg != newtxg) {
+			nvlist_free(zhp->zpool_config);
+			zhp->zpool_old_config = NULL;
+		} else {
+			zhp->zpool_old_config = zhp->zpool_config;
+		}
+	}
+
+	zhp->zpool_config = config;
+	if (error)
+		zhp->zpool_state = POOL_STATE_UNAVAIL;
+	else
+		zhp->zpool_state = POOL_STATE_ACTIVE;
+
+	return (0);
+}
+
+/*
+ * If the __ZFS_POOL_RESTRICT environment variable is set we only iterate over
+ * pools it lists.
+ *
+ * This is an undocumented feature for use during testing only.
+ *
+ * This function returns B_TRUE if the pool should be skipped
+ * during iteration.
+ */
+static boolean_t
+check_restricted(const char *poolname)
+{
+	static boolean_t initialized = B_FALSE;
+	static char *restricted = NULL;
+
+	const char *cur, *end;
+	int len, namelen;
+
+	if (!initialized) {
+		initialized = B_TRUE;
+		restricted = getenv("__ZFS_POOL_RESTRICT");
+	}
+
+	if (NULL == restricted)
+		return (B_FALSE);
+
+	cur = restricted;
+	namelen = strlen(poolname);
+	do {
+		end = strchr(cur, ' ');
+		len = (NULL == end) ? strlen(cur) : (end - cur);
+
+		if (len == namelen && 0 == strncmp(cur, poolname, len)) {
+			return (B_FALSE);
+		}
+
+		cur += (len + 1);
+	} while (NULL != end);
+
+	return (B_TRUE);
+}
+
+/*
+ * Iterate over all pools in the system.
+ */
+int
+zpool_iter(libzfs_handle_t *hdl, zpool_iter_f func, void *data)
+{
+	config_node_t *cn;
+	zpool_handle_t *zhp;
+	int ret;
+
+	/*
+	 * If someone makes a recursive call to zpool_iter(), we want to avoid
+	 * refreshing the namespace because that will invalidate the parent
+	 * context.  We allow recursive calls, but simply re-use the same
+	 * namespace AVL tree.
+	 */
+	if (!hdl->libzfs_pool_iter && namespace_reload(hdl) != 0)
+		return (-1);
+
+	hdl->libzfs_pool_iter++;
+	for (cn = uu_avl_first(hdl->libzfs_ns_avl); cn != NULL;
+	    cn = uu_avl_next(hdl->libzfs_ns_avl, cn)) {
+
+		if (check_restricted(cn->cn_name))
+			continue;
+
+		if (zpool_open_silent(hdl, cn->cn_name, &zhp) != 0) {
+			hdl->libzfs_pool_iter--;
+			return (-1);
+		}
+
+		if (zhp == NULL)
+			continue;
+
+		if ((ret = func(zhp, data)) != 0) {
+			hdl->libzfs_pool_iter--;
+			return (ret);
+		}
+	}
+	hdl->libzfs_pool_iter--;
+
+	return (0);
+}
+
+/*
+ * Iterate over root datasets, calling the given function for each.  The zfs
+ * handle passed each time must be explicitly closed by the callback.
+ */
+int
+zfs_iter_root(libzfs_handle_t *hdl, zfs_iter_f func, void *data)
+{
+	config_node_t *cn;
+	zfs_handle_t *zhp;
+	int ret;
+
+	if (namespace_reload(hdl) != 0)
+		return (-1);
+
+	for (cn = uu_avl_first(hdl->libzfs_ns_avl); cn != NULL;
+	    cn = uu_avl_next(hdl->libzfs_ns_avl, cn)) {
+
+		if (check_restricted(cn->cn_name))
+			continue;
+
+		if ((zhp = make_dataset_handle(hdl, cn->cn_name)) == NULL)
+			continue;
+
+		if ((ret = func(zhp, data)) != 0)
+			return (ret);
+	}
+
+	return (0);
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c
new file mode 100644
index 0000000000000000000000000000000000000000..1696cb184dc6b40f9297128750f9aa182c90cb23
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_dataset.c
@@ -0,0 +1,4511 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2011 by Delphix. All rights reserved.
+ * Copyright (c) 2012 DEY Storage Systems, Inc.  All rights reserved.
+ * Copyright (c) 2011-2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
+ * All rights reserved.
+ * Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <libintl.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <stddef.h>
+#include <zone.h>
+#include <fcntl.h>
+#include <sys/mntent.h>
+#include <sys/mount.h>
+#include <priv.h>
+#include <pwd.h>
+#include <grp.h>
+#include <stddef.h>
+#include <idmap.h>
+
+#include <sys/dnode.h>
+#include <sys/spa.h>
+#include <sys/zap.h>
+#include <sys/misc.h>
+#include <libzfs.h>
+
+#include "zfs_namecheck.h"
+#include "zfs_prop.h"
+#include "libzfs_impl.h"
+#include "zfs_deleg.h"
+
+static int userquota_propname_decode(const char *propname, boolean_t zoned,
+    zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp);
+
+/*
+ * Given a single type (not a mask of types), return the type in a human
+ * readable form.
+ */
+const char *
+zfs_type_to_name(zfs_type_t type)
+{
+	switch (type) {
+	case ZFS_TYPE_FILESYSTEM:
+		return (dgettext(TEXT_DOMAIN, "filesystem"));
+	case ZFS_TYPE_SNAPSHOT:
+		return (dgettext(TEXT_DOMAIN, "snapshot"));
+	case ZFS_TYPE_VOLUME:
+		return (dgettext(TEXT_DOMAIN, "volume"));
+	}
+
+	return (NULL);
+}
+
+/*
+ * Given a path and mask of ZFS types, return a string describing this dataset.
+ * This is used when we fail to open a dataset and we cannot get an exact type.
+ * We guess what the type would have been based on the path and the mask of
+ * acceptable types.
+ */
+static const char *
+path_to_str(const char *path, int types)
+{
+	/*
+	 * When given a single type, always report the exact type.
+	 */
+	if (types == ZFS_TYPE_SNAPSHOT)
+		return (dgettext(TEXT_DOMAIN, "snapshot"));
+	if (types == ZFS_TYPE_FILESYSTEM)
+		return (dgettext(TEXT_DOMAIN, "filesystem"));
+	if (types == ZFS_TYPE_VOLUME)
+		return (dgettext(TEXT_DOMAIN, "volume"));
+
+	/*
+	 * The user is requesting more than one type of dataset.  If this is the
+	 * case, consult the path itself.  If we're looking for a snapshot, and
+	 * a '@' is found, then report it as "snapshot".  Otherwise, remove the
+	 * snapshot attribute and try again.
+	 */
+	if (types & ZFS_TYPE_SNAPSHOT) {
+		if (strchr(path, '@') != NULL)
+			return (dgettext(TEXT_DOMAIN, "snapshot"));
+		return (path_to_str(path, types & ~ZFS_TYPE_SNAPSHOT));
+	}
+
+	/*
+	 * The user has requested either filesystems or volumes.
+	 * We have no way of knowing a priori what type this would be, so always
+	 * report it as "filesystem" or "volume", our two primitive types.
+	 */
+	if (types & ZFS_TYPE_FILESYSTEM)
+		return (dgettext(TEXT_DOMAIN, "filesystem"));
+
+	assert(types & ZFS_TYPE_VOLUME);
+	return (dgettext(TEXT_DOMAIN, "volume"));
+}
+
+/*
+ * Validate a ZFS path.  This is used even before trying to open the dataset, to
+ * provide a more meaningful error message.  We call zfs_error_aux() to
+ * explain exactly why the name was not valid.
+ */
+int
+zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type,
+    boolean_t modifying)
+{
+	namecheck_err_t why;
+	char what;
+
+	(void) zfs_prop_get_table();
+	if (dataset_namecheck(path, &why, &what) != 0) {
+		if (hdl != NULL) {
+			switch (why) {
+			case NAME_ERR_TOOLONG:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "name is too long"));
+				break;
+
+			case NAME_ERR_LEADING_SLASH:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "leading slash in name"));
+				break;
+
+			case NAME_ERR_EMPTY_COMPONENT:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "empty component in name"));
+				break;
+
+			case NAME_ERR_TRAILING_SLASH:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "trailing slash in name"));
+				break;
+
+			case NAME_ERR_INVALCHAR:
+				zfs_error_aux(hdl,
+				    dgettext(TEXT_DOMAIN, "invalid character "
+				    "'%c' in name"), what);
+				break;
+
+			case NAME_ERR_MULTIPLE_AT:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "multiple '@' delimiters in name"));
+				break;
+
+			case NAME_ERR_NOLETTER:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "pool doesn't begin with a letter"));
+				break;
+
+			case NAME_ERR_RESERVED:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "name is reserved"));
+				break;
+
+			case NAME_ERR_DISKLIKE:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "reserved disk name"));
+				break;
+			}
+		}
+
+		return (0);
+	}
+
+	if (!(type & ZFS_TYPE_SNAPSHOT) && strchr(path, '@') != NULL) {
+		if (hdl != NULL)
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "snapshot delimiter '@' in filesystem name"));
+		return (0);
+	}
+
+	if (type == ZFS_TYPE_SNAPSHOT && strchr(path, '@') == NULL) {
+		if (hdl != NULL)
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "missing '@' delimiter in snapshot name"));
+		return (0);
+	}
+
+	if (modifying && strchr(path, '%') != NULL) {
+		if (hdl != NULL)
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "invalid character %c in name"), '%');
+		return (0);
+	}
+
+	return (-1);
+}
+
+int
+zfs_name_valid(const char *name, zfs_type_t type)
+{
+	if (type == ZFS_TYPE_POOL)
+		return (zpool_name_valid(NULL, B_FALSE, name));
+	return (zfs_validate_name(NULL, name, type, B_FALSE));
+}
+
+/*
+ * This function takes the raw DSL properties, and filters out the user-defined
+ * properties into a separate nvlist.
+ */
+static nvlist_t *
+process_user_props(zfs_handle_t *zhp, nvlist_t *props)
+{
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	nvpair_t *elem;
+	nvlist_t *propval;
+	nvlist_t *nvl;
+
+	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) {
+		(void) no_memory(hdl);
+		return (NULL);
+	}
+
+	elem = NULL;
+	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
+		if (!zfs_prop_user(nvpair_name(elem)))
+			continue;
+
+		verify(nvpair_value_nvlist(elem, &propval) == 0);
+		if (nvlist_add_nvlist(nvl, nvpair_name(elem), propval) != 0) {
+			nvlist_free(nvl);
+			(void) no_memory(hdl);
+			return (NULL);
+		}
+	}
+
+	return (nvl);
+}
+
+static zpool_handle_t *
+zpool_add_handle(zfs_handle_t *zhp, const char *pool_name)
+{
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	zpool_handle_t *zph;
+
+	if ((zph = zpool_open_canfail(hdl, pool_name)) != NULL) {
+		if (hdl->libzfs_pool_handles != NULL)
+			zph->zpool_next = hdl->libzfs_pool_handles;
+		hdl->libzfs_pool_handles = zph;
+	}
+	return (zph);
+}
+
+static zpool_handle_t *
+zpool_find_handle(zfs_handle_t *zhp, const char *pool_name, int len)
+{
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	zpool_handle_t *zph = hdl->libzfs_pool_handles;
+
+	while ((zph != NULL) &&
+	    (strncmp(pool_name, zpool_get_name(zph), len) != 0))
+		zph = zph->zpool_next;
+	return (zph);
+}
+
+/*
+ * Returns a handle to the pool that contains the provided dataset.
+ * If a handle to that pool already exists then that handle is returned.
+ * Otherwise, a new handle is created and added to the list of handles.
+ */
+static zpool_handle_t *
+zpool_handle(zfs_handle_t *zhp)
+{
+	char *pool_name;
+	int len;
+	zpool_handle_t *zph;
+
+	len = strcspn(zhp->zfs_name, "/@") + 1;
+	pool_name = zfs_alloc(zhp->zfs_hdl, len);
+	(void) strlcpy(pool_name, zhp->zfs_name, len);
+
+	zph = zpool_find_handle(zhp, pool_name, len);
+	if (zph == NULL)
+		zph = zpool_add_handle(zhp, pool_name);
+
+	free(pool_name);
+	return (zph);
+}
+
+void
+zpool_free_handles(libzfs_handle_t *hdl)
+{
+	zpool_handle_t *next, *zph = hdl->libzfs_pool_handles;
+
+	while (zph != NULL) {
+		next = zph->zpool_next;
+		zpool_close(zph);
+		zph = next;
+	}
+	hdl->libzfs_pool_handles = NULL;
+}
+
+/*
+ * Utility function to gather stats (objset and zpl) for the given object.
+ */
+static int
+get_stats_ioctl(zfs_handle_t *zhp, zfs_cmd_t *zc)
+{
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+
+	(void) strlcpy(zc->zc_name, zhp->zfs_name, sizeof (zc->zc_name));
+
+	while (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, zc) != 0) {
+		if (errno == ENOMEM) {
+			if (zcmd_expand_dst_nvlist(hdl, zc) != 0) {
+				return (-1);
+			}
+		} else {
+			return (-1);
+		}
+	}
+	return (0);
+}
+
+/*
+ * Utility function to get the received properties of the given object.
+ */
+static int
+get_recvd_props_ioctl(zfs_handle_t *zhp)
+{
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	nvlist_t *recvdprops;
+	zfs_cmd_t zc = { 0 };
+	int err;
+
+	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
+		return (-1);
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+	while (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_RECVD_PROPS, &zc) != 0) {
+		if (errno == ENOMEM) {
+			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+				return (-1);
+			}
+		} else {
+			zcmd_free_nvlists(&zc);
+			return (-1);
+		}
+	}
+
+	err = zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &recvdprops);
+	zcmd_free_nvlists(&zc);
+	if (err != 0)
+		return (-1);
+
+	nvlist_free(zhp->zfs_recvd_props);
+	zhp->zfs_recvd_props = recvdprops;
+
+	return (0);
+}
+
+static int
+put_stats_zhdl(zfs_handle_t *zhp, zfs_cmd_t *zc)
+{
+	nvlist_t *allprops, *userprops;
+
+	zhp->zfs_dmustats = zc->zc_objset_stats; /* structure assignment */
+
+	if (zcmd_read_dst_nvlist(zhp->zfs_hdl, zc, &allprops) != 0) {
+		return (-1);
+	}
+
+	/*
+	 * XXX Why do we store the user props separately, in addition to
+	 * storing them in zfs_props?
+	 */
+	if ((userprops = process_user_props(zhp, allprops)) == NULL) {
+		nvlist_free(allprops);
+		return (-1);
+	}
+
+	nvlist_free(zhp->zfs_props);
+	nvlist_free(zhp->zfs_user_props);
+
+	zhp->zfs_props = allprops;
+	zhp->zfs_user_props = userprops;
+
+	return (0);
+}
+
+static int
+get_stats(zfs_handle_t *zhp)
+{
+	int rc = 0;
+	zfs_cmd_t zc = { 0 };
+
+	if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
+		return (-1);
+	if (get_stats_ioctl(zhp, &zc) != 0)
+		rc = -1;
+	else if (put_stats_zhdl(zhp, &zc) != 0)
+		rc = -1;
+	zcmd_free_nvlists(&zc);
+	return (rc);
+}
+
+/*
+ * Refresh the properties currently stored in the handle.
+ */
+void
+zfs_refresh_properties(zfs_handle_t *zhp)
+{
+	(void) get_stats(zhp);
+}
+
+/*
+ * Makes a handle from the given dataset name.  Used by zfs_open() and
+ * zfs_iter_* to create child handles on the fly.
+ */
+static int
+make_dataset_handle_common(zfs_handle_t *zhp, zfs_cmd_t *zc)
+{
+	if (put_stats_zhdl(zhp, zc) != 0)
+		return (-1);
+
+	/*
+	 * We've managed to open the dataset and gather statistics.  Determine
+	 * the high-level type.
+	 */
+	if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
+		zhp->zfs_head_type = ZFS_TYPE_VOLUME;
+	else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
+		zhp->zfs_head_type = ZFS_TYPE_FILESYSTEM;
+	else
+		abort();
+
+	if (zhp->zfs_dmustats.dds_is_snapshot)
+		zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
+	else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
+		zhp->zfs_type = ZFS_TYPE_VOLUME;
+	else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
+		zhp->zfs_type = ZFS_TYPE_FILESYSTEM;
+	else
+		abort();	/* we should never see any other types */
+
+	if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL)
+		return (-1);
+
+	return (0);
+}
+
+zfs_handle_t *
+make_dataset_handle(libzfs_handle_t *hdl, const char *path)
+{
+	zfs_cmd_t zc = { 0 };
+
+	zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1);
+
+	if (zhp == NULL)
+		return (NULL);
+
+	zhp->zfs_hdl = hdl;
+	(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
+	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) {
+		free(zhp);
+		return (NULL);
+	}
+	if (get_stats_ioctl(zhp, &zc) == -1) {
+		zcmd_free_nvlists(&zc);
+		free(zhp);
+		return (NULL);
+	}
+	if (make_dataset_handle_common(zhp, &zc) == -1) {
+		free(zhp);
+		zhp = NULL;
+	}
+	zcmd_free_nvlists(&zc);
+	return (zhp);
+}
+
+zfs_handle_t *
+make_dataset_handle_zc(libzfs_handle_t *hdl, zfs_cmd_t *zc)
+{
+	zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1);
+
+	if (zhp == NULL)
+		return (NULL);
+
+	zhp->zfs_hdl = hdl;
+	(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
+	if (make_dataset_handle_common(zhp, zc) == -1) {
+		free(zhp);
+		return (NULL);
+	}
+	return (zhp);
+}
+
+zfs_handle_t *
+make_dataset_simple_handle_zc(zfs_handle_t *pzhp, zfs_cmd_t *zc)
+{
+	zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1);
+
+	if (zhp == NULL)
+		return (NULL);
+
+	zhp->zfs_hdl = pzhp->zfs_hdl;
+	(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
+	zhp->zfs_head_type = pzhp->zfs_type;
+	zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
+	zhp->zpool_hdl = zpool_handle(zhp);
+	return (zhp);
+}
+
+zfs_handle_t *
+zfs_handle_dup(zfs_handle_t *zhp_orig)
+{
+	zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1);
+
+	if (zhp == NULL)
+		return (NULL);
+
+	zhp->zfs_hdl = zhp_orig->zfs_hdl;
+	zhp->zpool_hdl = zhp_orig->zpool_hdl;
+	(void) strlcpy(zhp->zfs_name, zhp_orig->zfs_name,
+	    sizeof (zhp->zfs_name));
+	zhp->zfs_type = zhp_orig->zfs_type;
+	zhp->zfs_head_type = zhp_orig->zfs_head_type;
+	zhp->zfs_dmustats = zhp_orig->zfs_dmustats;
+	if (zhp_orig->zfs_props != NULL) {
+		if (nvlist_dup(zhp_orig->zfs_props, &zhp->zfs_props, 0) != 0) {
+			(void) no_memory(zhp->zfs_hdl);
+			zfs_close(zhp);
+			return (NULL);
+		}
+	}
+	if (zhp_orig->zfs_user_props != NULL) {
+		if (nvlist_dup(zhp_orig->zfs_user_props,
+		    &zhp->zfs_user_props, 0) != 0) {
+			(void) no_memory(zhp->zfs_hdl);
+			zfs_close(zhp);
+			return (NULL);
+		}
+	}
+	if (zhp_orig->zfs_recvd_props != NULL) {
+		if (nvlist_dup(zhp_orig->zfs_recvd_props,
+		    &zhp->zfs_recvd_props, 0)) {
+			(void) no_memory(zhp->zfs_hdl);
+			zfs_close(zhp);
+			return (NULL);
+		}
+	}
+	zhp->zfs_mntcheck = zhp_orig->zfs_mntcheck;
+	if (zhp_orig->zfs_mntopts != NULL) {
+		zhp->zfs_mntopts = zfs_strdup(zhp_orig->zfs_hdl,
+		    zhp_orig->zfs_mntopts);
+	}
+	zhp->zfs_props_table = zhp_orig->zfs_props_table;
+	return (zhp);
+}
+
+/*
+ * Opens the given snapshot, filesystem, or volume.   The 'types'
+ * argument is a mask of acceptable types.  The function will print an
+ * appropriate error message and return NULL if it can't be opened.
+ */
+zfs_handle_t *
+zfs_open(libzfs_handle_t *hdl, const char *path, int types)
+{
+	zfs_handle_t *zhp;
+	char errbuf[1024];
+
+	(void) snprintf(errbuf, sizeof (errbuf),
+	    dgettext(TEXT_DOMAIN, "cannot open '%s'"), path);
+
+	/*
+	 * Validate the name before we even try to open it.
+	 */
+	if (!zfs_validate_name(hdl, path, ZFS_TYPE_DATASET, B_FALSE)) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "invalid dataset name"));
+		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
+		return (NULL);
+	}
+
+	/*
+	 * Try to get stats for the dataset, which will tell us if it exists.
+	 */
+	errno = 0;
+	if ((zhp = make_dataset_handle(hdl, path)) == NULL) {
+		(void) zfs_standard_error(hdl, errno, errbuf);
+		return (NULL);
+	}
+
+	if (zhp == NULL) {
+		char *at = strchr(path, '@');
+
+		if (at != NULL)
+			*at = '\0';
+		errno = 0;
+		if ((zhp = make_dataset_handle(hdl, path)) == NULL) {
+			(void) zfs_standard_error(hdl, errno, errbuf);
+			return (NULL);
+		}
+		if (at != NULL)
+			*at = '@';
+		(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
+		zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
+	}
+
+	if (!(types & zhp->zfs_type)) {
+		(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
+		zfs_close(zhp);
+		return (NULL);
+	}
+
+	return (zhp);
+}
+
+/*
+ * Release a ZFS handle.  Nothing to do but free the associated memory.
+ */
+void
+zfs_close(zfs_handle_t *zhp)
+{
+	if (zhp->zfs_mntopts)
+		free(zhp->zfs_mntopts);
+	nvlist_free(zhp->zfs_props);
+	nvlist_free(zhp->zfs_user_props);
+	nvlist_free(zhp->zfs_recvd_props);
+	free(zhp);
+}
+
+typedef struct mnttab_node {
+	struct mnttab mtn_mt;
+	avl_node_t mtn_node;
+} mnttab_node_t;
+
+static int
+libzfs_mnttab_cache_compare(const void *arg1, const void *arg2)
+{
+	const mnttab_node_t *mtn1 = arg1;
+	const mnttab_node_t *mtn2 = arg2;
+	int rv;
+
+	rv = strcmp(mtn1->mtn_mt.mnt_special, mtn2->mtn_mt.mnt_special);
+
+	if (rv == 0)
+		return (0);
+	return (rv > 0 ? 1 : -1);
+}
+
+void
+libzfs_mnttab_init(libzfs_handle_t *hdl)
+{
+	assert(avl_numnodes(&hdl->libzfs_mnttab_cache) == 0);
+	avl_create(&hdl->libzfs_mnttab_cache, libzfs_mnttab_cache_compare,
+	    sizeof (mnttab_node_t), offsetof(mnttab_node_t, mtn_node));
+}
+
+void
+libzfs_mnttab_update(libzfs_handle_t *hdl)
+{
+	struct mnttab entry;
+
+	rewind(hdl->libzfs_mnttab);
+	while (getmntent(hdl->libzfs_mnttab, &entry) == 0) {
+		mnttab_node_t *mtn;
+
+		if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
+			continue;
+		mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
+		mtn->mtn_mt.mnt_special = zfs_strdup(hdl, entry.mnt_special);
+		mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, entry.mnt_mountp);
+		mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, entry.mnt_fstype);
+		mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, entry.mnt_mntopts);
+		avl_add(&hdl->libzfs_mnttab_cache, mtn);
+	}
+}
+
+void
+libzfs_mnttab_fini(libzfs_handle_t *hdl)
+{
+	void *cookie = NULL;
+	mnttab_node_t *mtn;
+
+	while (mtn = avl_destroy_nodes(&hdl->libzfs_mnttab_cache, &cookie)) {
+		free(mtn->mtn_mt.mnt_special);
+		free(mtn->mtn_mt.mnt_mountp);
+		free(mtn->mtn_mt.mnt_fstype);
+		free(mtn->mtn_mt.mnt_mntopts);
+		free(mtn);
+	}
+	avl_destroy(&hdl->libzfs_mnttab_cache);
+}
+
+void
+libzfs_mnttab_cache(libzfs_handle_t *hdl, boolean_t enable)
+{
+	hdl->libzfs_mnttab_enable = enable;
+}
+
+int
+libzfs_mnttab_find(libzfs_handle_t *hdl, const char *fsname,
+    struct mnttab *entry)
+{
+	mnttab_node_t find;
+	mnttab_node_t *mtn;
+
+	if (!hdl->libzfs_mnttab_enable) {
+		struct mnttab srch = { 0 };
+
+		if (avl_numnodes(&hdl->libzfs_mnttab_cache))
+			libzfs_mnttab_fini(hdl);
+		rewind(hdl->libzfs_mnttab);
+		srch.mnt_special = (char *)fsname;
+		srch.mnt_fstype = MNTTYPE_ZFS;
+		if (getmntany(hdl->libzfs_mnttab, entry, &srch) == 0)
+			return (0);
+		else
+			return (ENOENT);
+	}
+
+	if (avl_numnodes(&hdl->libzfs_mnttab_cache) == 0)
+		libzfs_mnttab_update(hdl);
+
+	find.mtn_mt.mnt_special = (char *)fsname;
+	mtn = avl_find(&hdl->libzfs_mnttab_cache, &find, NULL);
+	if (mtn) {
+		*entry = mtn->mtn_mt;
+		return (0);
+	}
+	return (ENOENT);
+}
+
+void
+libzfs_mnttab_add(libzfs_handle_t *hdl, const char *special,
+    const char *mountp, const char *mntopts)
+{
+	mnttab_node_t *mtn;
+
+	if (avl_numnodes(&hdl->libzfs_mnttab_cache) == 0)
+		return;
+	mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
+	mtn->mtn_mt.mnt_special = zfs_strdup(hdl, special);
+	mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, mountp);
+	mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, MNTTYPE_ZFS);
+	mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, mntopts);
+	avl_add(&hdl->libzfs_mnttab_cache, mtn);
+}
+
+void
+libzfs_mnttab_remove(libzfs_handle_t *hdl, const char *fsname)
+{
+	mnttab_node_t find;
+	mnttab_node_t *ret;
+
+	find.mtn_mt.mnt_special = (char *)fsname;
+	if (ret = avl_find(&hdl->libzfs_mnttab_cache, (void *)&find, NULL)) {
+		avl_remove(&hdl->libzfs_mnttab_cache, ret);
+		free(ret->mtn_mt.mnt_special);
+		free(ret->mtn_mt.mnt_mountp);
+		free(ret->mtn_mt.mnt_fstype);
+		free(ret->mtn_mt.mnt_mntopts);
+		free(ret);
+	}
+}
+
+int
+zfs_spa_version(zfs_handle_t *zhp, int *spa_version)
+{
+	zpool_handle_t *zpool_handle = zhp->zpool_hdl;
+
+	if (zpool_handle == NULL)
+		return (-1);
+
+	*spa_version = zpool_get_prop_int(zpool_handle,
+	    ZPOOL_PROP_VERSION, NULL);
+	return (0);
+}
+
+/*
+ * The choice of reservation property depends on the SPA version.
+ */
+static int
+zfs_which_resv_prop(zfs_handle_t *zhp, zfs_prop_t *resv_prop)
+{
+	int spa_version;
+
+	if (zfs_spa_version(zhp, &spa_version) < 0)
+		return (-1);
+
+	if (spa_version >= SPA_VERSION_REFRESERVATION)
+		*resv_prop = ZFS_PROP_REFRESERVATION;
+	else
+		*resv_prop = ZFS_PROP_RESERVATION;
+
+	return (0);
+}
+
+/*
+ * Given an nvlist of properties to set, validates that they are correct, and
+ * parses any numeric properties (index, boolean, etc) if they are specified as
+ * strings.
+ */
+nvlist_t *
+zfs_valid_proplist(libzfs_handle_t *hdl, zfs_type_t type, nvlist_t *nvl,
+    uint64_t zoned, zfs_handle_t *zhp, const char *errbuf)
+{
+	nvpair_t *elem;
+	uint64_t intval;
+	char *strval;
+	zfs_prop_t prop;
+	nvlist_t *ret;
+	int chosen_normal = -1;
+	int chosen_utf = -1;
+
+	if (nvlist_alloc(&ret, NV_UNIQUE_NAME, 0) != 0) {
+		(void) no_memory(hdl);
+		return (NULL);
+	}
+
+	/*
+	 * Make sure this property is valid and applies to this type.
+	 */
+
+	elem = NULL;
+	while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) {
+		const char *propname = nvpair_name(elem);
+
+		prop = zfs_name_to_prop(propname);
+		if (prop == ZPROP_INVAL && zfs_prop_user(propname)) {
+			/*
+			 * This is a user property: make sure it's a
+			 * string, and that it's less than ZAP_MAXNAMELEN.
+			 */
+			if (nvpair_type(elem) != DATA_TYPE_STRING) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "'%s' must be a string"), propname);
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+
+			if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "property name '%s' is too long"),
+				    propname);
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+
+			(void) nvpair_value_string(elem, &strval);
+			if (nvlist_add_string(ret, propname, strval) != 0) {
+				(void) no_memory(hdl);
+				goto error;
+			}
+			continue;
+		}
+
+		/*
+		 * Currently, only user properties can be modified on
+		 * snapshots.
+		 */
+		if (type == ZFS_TYPE_SNAPSHOT) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "this property can not be modified for snapshots"));
+			(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
+			goto error;
+		}
+
+		if (prop == ZPROP_INVAL && zfs_prop_userquota(propname)) {
+			zfs_userquota_prop_t uqtype;
+			char newpropname[128];
+			char domain[128];
+			uint64_t rid;
+			uint64_t valary[3];
+
+			if (userquota_propname_decode(propname, zoned,
+			    &uqtype, domain, sizeof (domain), &rid) != 0) {
+				zfs_error_aux(hdl,
+				    dgettext(TEXT_DOMAIN,
+				    "'%s' has an invalid user/group name"),
+				    propname);
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+
+			if (uqtype != ZFS_PROP_USERQUOTA &&
+			    uqtype != ZFS_PROP_GROUPQUOTA) {
+				zfs_error_aux(hdl,
+				    dgettext(TEXT_DOMAIN, "'%s' is readonly"),
+				    propname);
+				(void) zfs_error(hdl, EZFS_PROPREADONLY,
+				    errbuf);
+				goto error;
+			}
+
+			if (nvpair_type(elem) == DATA_TYPE_STRING) {
+				(void) nvpair_value_string(elem, &strval);
+				if (strcmp(strval, "none") == 0) {
+					intval = 0;
+				} else if (zfs_nicestrtonum(hdl,
+				    strval, &intval) != 0) {
+					(void) zfs_error(hdl,
+					    EZFS_BADPROP, errbuf);
+					goto error;
+				}
+			} else if (nvpair_type(elem) ==
+			    DATA_TYPE_UINT64) {
+				(void) nvpair_value_uint64(elem, &intval);
+				if (intval == 0) {
+					zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+					    "use 'none' to disable "
+					    "userquota/groupquota"));
+					goto error;
+				}
+			} else {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "'%s' must be a number"), propname);
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+
+			/*
+			 * Encode the prop name as
+			 * userquota@<hex-rid>-domain, to make it easy
+			 * for the kernel to decode.
+			 */
+			(void) snprintf(newpropname, sizeof (newpropname),
+			    "%s%llx-%s", zfs_userquota_prop_prefixes[uqtype],
+			    (longlong_t)rid, domain);
+			valary[0] = uqtype;
+			valary[1] = rid;
+			valary[2] = intval;
+			if (nvlist_add_uint64_array(ret, newpropname,
+			    valary, 3) != 0) {
+				(void) no_memory(hdl);
+				goto error;
+			}
+			continue;
+		} else if (prop == ZPROP_INVAL && zfs_prop_written(propname)) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "'%s' is readonly"),
+			    propname);
+			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
+			goto error;
+		}
+
+		if (prop == ZPROP_INVAL) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "invalid property '%s'"), propname);
+			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+			goto error;
+		}
+
+		if (!zfs_prop_valid_for_type(prop, type)) {
+			zfs_error_aux(hdl,
+			    dgettext(TEXT_DOMAIN, "'%s' does not "
+			    "apply to datasets of this type"), propname);
+			(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
+			goto error;
+		}
+
+		if (zfs_prop_readonly(prop) &&
+		    (!zfs_prop_setonce(prop) || zhp != NULL)) {
+			zfs_error_aux(hdl,
+			    dgettext(TEXT_DOMAIN, "'%s' is readonly"),
+			    propname);
+			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
+			goto error;
+		}
+
+		if (zprop_parse_value(hdl, elem, prop, type, ret,
+		    &strval, &intval, errbuf) != 0)
+			goto error;
+
+		/*
+		 * Perform some additional checks for specific properties.
+		 */
+		switch (prop) {
+		case ZFS_PROP_VERSION:
+		{
+			int version;
+
+			if (zhp == NULL)
+				break;
+			version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
+			if (intval < version) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "Can not downgrade; already at version %u"),
+				    version);
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+			break;
+		}
+
+		case ZFS_PROP_RECORDSIZE:
+		case ZFS_PROP_VOLBLOCKSIZE:
+			/* must be power of two within SPA_{MIN,MAX}BLOCKSIZE */
+			if (intval < SPA_MINBLOCKSIZE ||
+			    intval > SPA_MAXBLOCKSIZE || !ISP2(intval)) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "'%s' must be power of 2 from %u "
+				    "to %uk"), propname,
+				    (uint_t)SPA_MINBLOCKSIZE,
+				    (uint_t)SPA_MAXBLOCKSIZE >> 10);
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+			break;
+
+		case ZFS_PROP_MLSLABEL:
+		{
+#ifdef sun
+			/*
+			 * Verify the mlslabel string and convert to
+			 * internal hex label string.
+			 */
+
+			m_label_t *new_sl;
+			char *hex = NULL;	/* internal label string */
+
+			/* Default value is already OK. */
+			if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0)
+				break;
+
+			/* Verify the label can be converted to binary form */
+			if (((new_sl = m_label_alloc(MAC_LABEL)) == NULL) ||
+			    (str_to_label(strval, &new_sl, MAC_LABEL,
+			    L_NO_CORRECTION, NULL) == -1)) {
+				goto badlabel;
+			}
+
+			/* Now translate to hex internal label string */
+			if (label_to_str(new_sl, &hex, M_INTERNAL,
+			    DEF_NAMES) != 0) {
+				if (hex)
+					free(hex);
+				goto badlabel;
+			}
+			m_label_free(new_sl);
+
+			/* If string is already in internal form, we're done. */
+			if (strcmp(strval, hex) == 0) {
+				free(hex);
+				break;
+			}
+
+			/* Replace the label string with the internal form. */
+			(void) nvlist_remove(ret, zfs_prop_to_name(prop),
+			    DATA_TYPE_STRING);
+			verify(nvlist_add_string(ret, zfs_prop_to_name(prop),
+			    hex) == 0);
+			free(hex);
+
+			break;
+
+badlabel:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "invalid mlslabel '%s'"), strval);
+			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+			m_label_free(new_sl);	/* OK if null */
+#else	/* !sun */
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "mlslabel is not supported on FreeBSD"));
+			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+#endif	/* !sun */
+			goto error;
+
+		}
+
+		case ZFS_PROP_MOUNTPOINT:
+		{
+			namecheck_err_t why;
+
+			if (strcmp(strval, ZFS_MOUNTPOINT_NONE) == 0 ||
+			    strcmp(strval, ZFS_MOUNTPOINT_LEGACY) == 0)
+				break;
+
+			if (mountpoint_namecheck(strval, &why)) {
+				switch (why) {
+				case NAME_ERR_LEADING_SLASH:
+					zfs_error_aux(hdl,
+					    dgettext(TEXT_DOMAIN,
+					    "'%s' must be an absolute path, "
+					    "'none', or 'legacy'"), propname);
+					break;
+				case NAME_ERR_TOOLONG:
+					zfs_error_aux(hdl,
+					    dgettext(TEXT_DOMAIN,
+					    "component of '%s' is too long"),
+					    propname);
+					break;
+				}
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+		}
+
+			/*FALLTHRU*/
+
+		case ZFS_PROP_SHARESMB:
+		case ZFS_PROP_SHARENFS:
+			/*
+			 * For the mountpoint and sharenfs or sharesmb
+			 * properties, check if it can be set in a
+			 * global/non-global zone based on
+			 * the zoned property value:
+			 *
+			 *		global zone	    non-global zone
+			 * --------------------------------------------------
+			 * zoned=on	mountpoint (no)	    mountpoint (yes)
+			 *		sharenfs (no)	    sharenfs (no)
+			 *		sharesmb (no)	    sharesmb (no)
+			 *
+			 * zoned=off	mountpoint (yes)	N/A
+			 *		sharenfs (yes)
+			 *		sharesmb (yes)
+			 */
+			if (zoned) {
+				if (getzoneid() == GLOBAL_ZONEID) {
+					zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+					    "'%s' cannot be set on "
+					    "dataset in a non-global zone"),
+					    propname);
+					(void) zfs_error(hdl, EZFS_ZONED,
+					    errbuf);
+					goto error;
+				} else if (prop == ZFS_PROP_SHARENFS ||
+				    prop == ZFS_PROP_SHARESMB) {
+					zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+					    "'%s' cannot be set in "
+					    "a non-global zone"), propname);
+					(void) zfs_error(hdl, EZFS_ZONED,
+					    errbuf);
+					goto error;
+				}
+			} else if (getzoneid() != GLOBAL_ZONEID) {
+				/*
+				 * If zoned property is 'off', this must be in
+				 * a global zone. If not, something is wrong.
+				 */
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "'%s' cannot be set while dataset "
+				    "'zoned' property is set"), propname);
+				(void) zfs_error(hdl, EZFS_ZONED, errbuf);
+				goto error;
+			}
+
+			/*
+			 * At this point, it is legitimate to set the
+			 * property. Now we want to make sure that the
+			 * property value is valid if it is sharenfs.
+			 */
+			if ((prop == ZFS_PROP_SHARENFS ||
+			    prop == ZFS_PROP_SHARESMB) &&
+			    strcmp(strval, "on") != 0 &&
+			    strcmp(strval, "off") != 0) {
+				zfs_share_proto_t proto;
+
+				if (prop == ZFS_PROP_SHARESMB)
+					proto = PROTO_SMB;
+				else
+					proto = PROTO_NFS;
+
+				/*
+				 * Must be an valid sharing protocol
+				 * option string so init the libshare
+				 * in order to enable the parser and
+				 * then parse the options. We use the
+				 * control API since we don't care about
+				 * the current configuration and don't
+				 * want the overhead of loading it
+				 * until we actually do something.
+				 */
+
+				if (zfs_init_libshare(hdl,
+				    SA_INIT_CONTROL_API) != SA_OK) {
+					/*
+					 * An error occurred so we can't do
+					 * anything
+					 */
+					zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+					    "'%s' cannot be set: problem "
+					    "in share initialization"),
+					    propname);
+					(void) zfs_error(hdl, EZFS_BADPROP,
+					    errbuf);
+					goto error;
+				}
+
+				if (zfs_parse_options(strval, proto) != SA_OK) {
+					/*
+					 * There was an error in parsing so
+					 * deal with it by issuing an error
+					 * message and leaving after
+					 * uninitializing the the libshare
+					 * interface.
+					 */
+					zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+					    "'%s' cannot be set to invalid "
+					    "options"), propname);
+					(void) zfs_error(hdl, EZFS_BADPROP,
+					    errbuf);
+					zfs_uninit_libshare(hdl);
+					goto error;
+				}
+				zfs_uninit_libshare(hdl);
+			}
+
+			break;
+		case ZFS_PROP_UTF8ONLY:
+			chosen_utf = (int)intval;
+			break;
+		case ZFS_PROP_NORMALIZE:
+			chosen_normal = (int)intval;
+			break;
+		}
+
+		/*
+		 * For changes to existing volumes, we have some additional
+		 * checks to enforce.
+		 */
+		if (type == ZFS_TYPE_VOLUME && zhp != NULL) {
+			uint64_t volsize = zfs_prop_get_int(zhp,
+			    ZFS_PROP_VOLSIZE);
+			uint64_t blocksize = zfs_prop_get_int(zhp,
+			    ZFS_PROP_VOLBLOCKSIZE);
+			char buf[64];
+
+			switch (prop) {
+			case ZFS_PROP_RESERVATION:
+			case ZFS_PROP_REFRESERVATION:
+				if (intval > volsize) {
+					zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+					    "'%s' is greater than current "
+					    "volume size"), propname);
+					(void) zfs_error(hdl, EZFS_BADPROP,
+					    errbuf);
+					goto error;
+				}
+				break;
+
+			case ZFS_PROP_VOLSIZE:
+				if (intval % blocksize != 0) {
+					zfs_nicenum(blocksize, buf,
+					    sizeof (buf));
+					zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+					    "'%s' must be a multiple of "
+					    "volume block size (%s)"),
+					    propname, buf);
+					(void) zfs_error(hdl, EZFS_BADPROP,
+					    errbuf);
+					goto error;
+				}
+
+				if (intval == 0) {
+					zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+					    "'%s' cannot be zero"),
+					    propname);
+					(void) zfs_error(hdl, EZFS_BADPROP,
+					    errbuf);
+					goto error;
+				}
+				break;
+			}
+		}
+	}
+
+	/*
+	 * If normalization was chosen, but no UTF8 choice was made,
+	 * enforce rejection of non-UTF8 names.
+	 *
+	 * If normalization was chosen, but rejecting non-UTF8 names
+	 * was explicitly not chosen, it is an error.
+	 */
+	if (chosen_normal > 0 && chosen_utf < 0) {
+		if (nvlist_add_uint64(ret,
+		    zfs_prop_to_name(ZFS_PROP_UTF8ONLY), 1) != 0) {
+			(void) no_memory(hdl);
+			goto error;
+		}
+	} else if (chosen_normal > 0 && chosen_utf == 0) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "'%s' must be set 'on' if normalization chosen"),
+		    zfs_prop_to_name(ZFS_PROP_UTF8ONLY));
+		(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+		goto error;
+	}
+	return (ret);
+
+error:
+	nvlist_free(ret);
+	return (NULL);
+}
+
+int
+zfs_add_synthetic_resv(zfs_handle_t *zhp, nvlist_t *nvl)
+{
+	uint64_t old_volsize;
+	uint64_t new_volsize;
+	uint64_t old_reservation;
+	uint64_t new_reservation;
+	zfs_prop_t resv_prop;
+
+	/*
+	 * If this is an existing volume, and someone is setting the volsize,
+	 * make sure that it matches the reservation, or add it if necessary.
+	 */
+	old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
+	if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
+		return (-1);
+	old_reservation = zfs_prop_get_int(zhp, resv_prop);
+	if ((zvol_volsize_to_reservation(old_volsize, zhp->zfs_props) !=
+	    old_reservation) || nvlist_lookup_uint64(nvl,
+	    zfs_prop_to_name(resv_prop), &new_reservation) != ENOENT) {
+		return (0);
+	}
+	if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE),
+	    &new_volsize) != 0)
+		return (-1);
+	new_reservation = zvol_volsize_to_reservation(new_volsize,
+	    zhp->zfs_props);
+	if (nvlist_add_uint64(nvl, zfs_prop_to_name(resv_prop),
+	    new_reservation) != 0) {
+		(void) no_memory(zhp->zfs_hdl);
+		return (-1);
+	}
+	return (1);
+}
+
+void
+zfs_setprop_error(libzfs_handle_t *hdl, zfs_prop_t prop, int err,
+    char *errbuf)
+{
+	switch (err) {
+
+	case ENOSPC:
+		/*
+		 * For quotas and reservations, ENOSPC indicates
+		 * something different; setting a quota or reservation
+		 * doesn't use any disk space.
+		 */
+		switch (prop) {
+		case ZFS_PROP_QUOTA:
+		case ZFS_PROP_REFQUOTA:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "size is less than current used or "
+			    "reserved space"));
+			(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
+			break;
+
+		case ZFS_PROP_RESERVATION:
+		case ZFS_PROP_REFRESERVATION:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "size is greater than available space"));
+			(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
+			break;
+
+		default:
+			(void) zfs_standard_error(hdl, err, errbuf);
+			break;
+		}
+		break;
+
+	case EBUSY:
+		(void) zfs_standard_error(hdl, EBUSY, errbuf);
+		break;
+
+	case EROFS:
+		(void) zfs_error(hdl, EZFS_DSREADONLY, errbuf);
+		break;
+
+	case ENOTSUP:
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "pool and or dataset must be upgraded to set this "
+		    "property or value"));
+		(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
+		break;
+
+	case ERANGE:
+		if (prop == ZFS_PROP_COMPRESSION) {
+			(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "property setting is not allowed on "
+			    "bootable datasets"));
+			(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
+		} else {
+			(void) zfs_standard_error(hdl, err, errbuf);
+		}
+		break;
+
+	case EINVAL:
+		if (prop == ZPROP_INVAL) {
+			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+		} else {
+			(void) zfs_standard_error(hdl, err, errbuf);
+		}
+		break;
+
+	case EOVERFLOW:
+		/*
+		 * This platform can't address a volume this big.
+		 */
+#ifdef _ILP32
+		if (prop == ZFS_PROP_VOLSIZE) {
+			(void) zfs_error(hdl, EZFS_VOLTOOBIG, errbuf);
+			break;
+		}
+#endif
+		/* FALLTHROUGH */
+	default:
+		(void) zfs_standard_error(hdl, err, errbuf);
+	}
+}
+
+/*
+ * Given a property name and value, set the property for the given dataset.
+ */
+int
+zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
+{
+	zfs_cmd_t zc = { 0 };
+	int ret = -1;
+	prop_changelist_t *cl = NULL;
+	char errbuf[1024];
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	nvlist_t *nvl = NULL, *realprops;
+	zfs_prop_t prop;
+	boolean_t do_prefix = B_TRUE;
+	uint64_t idx;
+	int added_resv;
+
+	(void) snprintf(errbuf, sizeof (errbuf),
+	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
+	    zhp->zfs_name);
+
+	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
+	    nvlist_add_string(nvl, propname, propval) != 0) {
+		(void) no_memory(hdl);
+		goto error;
+	}
+
+	if ((realprops = zfs_valid_proplist(hdl, zhp->zfs_type, nvl,
+	    zfs_prop_get_int(zhp, ZFS_PROP_ZONED), zhp, errbuf)) == NULL)
+		goto error;
+
+	nvlist_free(nvl);
+	nvl = realprops;
+
+	prop = zfs_name_to_prop(propname);
+
+	/* We don't support those properties on FreeBSD. */
+	switch (prop) {
+	case ZFS_PROP_DEVICES:
+	case ZFS_PROP_ISCSIOPTIONS:
+	case ZFS_PROP_XATTR:
+	case ZFS_PROP_VSCAN:
+	case ZFS_PROP_NBMAND:
+	case ZFS_PROP_MLSLABEL:
+		(void) snprintf(errbuf, sizeof (errbuf),
+		    "property '%s' not supported on FreeBSD", propname);
+		ret = zfs_error(hdl, EZFS_PERM, errbuf);
+		goto error;
+	}
+
+	if (prop == ZFS_PROP_VOLSIZE) {
+		if ((added_resv = zfs_add_synthetic_resv(zhp, nvl)) == -1)
+			goto error;
+	}
+
+	if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL)
+		goto error;
+
+	if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "child dataset with inherited mountpoint is used "
+		    "in a non-global zone"));
+		ret = zfs_error(hdl, EZFS_ZONED, errbuf);
+		goto error;
+	}
+
+	/*
+	 * We don't want to unmount & remount the dataset when changing
+	 * its canmount property to 'on' or 'noauto'.  We only use
+	 * the changelist logic to unmount when setting canmount=off.
+	 */
+	if (prop == ZFS_PROP_CANMOUNT) {
+		uint64_t idx;
+		int err = zprop_string_to_index(prop, propval, &idx,
+		    ZFS_TYPE_DATASET);
+		if (err == 0 && idx != ZFS_CANMOUNT_OFF)
+			do_prefix = B_FALSE;
+	}
+
+	if (do_prefix && (ret = changelist_prefix(cl)) != 0)
+		goto error;
+
+	/*
+	 * Execute the corresponding ioctl() to set this property.
+	 */
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+	if (zcmd_write_src_nvlist(hdl, &zc, nvl) != 0)
+		goto error;
+
+	ret = zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
+
+	if (ret != 0) {
+		zfs_setprop_error(hdl, prop, errno, errbuf);
+		if (added_resv && errno == ENOSPC) {
+			/* clean up the volsize property we tried to set */
+			uint64_t old_volsize = zfs_prop_get_int(zhp,
+			    ZFS_PROP_VOLSIZE);
+			nvlist_free(nvl);
+			zcmd_free_nvlists(&zc);
+			if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
+				goto error;
+			if (nvlist_add_uint64(nvl,
+			    zfs_prop_to_name(ZFS_PROP_VOLSIZE),
+			    old_volsize) != 0)
+				goto error;
+			if (zcmd_write_src_nvlist(hdl, &zc, nvl) != 0)
+				goto error;
+			(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
+		}
+	} else {
+		if (do_prefix)
+			ret = changelist_postfix(cl);
+
+		/*
+		 * Refresh the statistics so the new property value
+		 * is reflected.
+		 */
+		if (ret == 0)
+			(void) get_stats(zhp);
+	}
+
+error:
+	nvlist_free(nvl);
+	zcmd_free_nvlists(&zc);
+	if (cl)
+		changelist_free(cl);
+	return (ret);
+}
+
+/*
+ * Given a property, inherit the value from the parent dataset, or if received
+ * is TRUE, revert to the received value, if any.
+ */
+int
+zfs_prop_inherit(zfs_handle_t *zhp, const char *propname, boolean_t received)
+{
+	zfs_cmd_t zc = { 0 };
+	int ret;
+	prop_changelist_t *cl;
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	char errbuf[1024];
+	zfs_prop_t prop;
+
+	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+	    "cannot inherit %s for '%s'"), propname, zhp->zfs_name);
+
+	zc.zc_cookie = received;
+	if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL) {
+		/*
+		 * For user properties, the amount of work we have to do is very
+		 * small, so just do it here.
+		 */
+		if (!zfs_prop_user(propname)) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "invalid property"));
+			return (zfs_error(hdl, EZFS_BADPROP, errbuf));
+		}
+
+		(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+		(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
+
+		if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc) != 0)
+			return (zfs_standard_error(hdl, errno, errbuf));
+
+		return (0);
+	}
+
+	/*
+	 * Verify that this property is inheritable.
+	 */
+	if (zfs_prop_readonly(prop))
+		return (zfs_error(hdl, EZFS_PROPREADONLY, errbuf));
+
+	if (!zfs_prop_inheritable(prop) && !received)
+		return (zfs_error(hdl, EZFS_PROPNONINHERIT, errbuf));
+
+	/*
+	 * Check to see if the value applies to this type
+	 */
+	if (!zfs_prop_valid_for_type(prop, zhp->zfs_type))
+		return (zfs_error(hdl, EZFS_PROPTYPE, errbuf));
+
+	/*
+	 * Normalize the name, to get rid of shorthand abbreviations.
+	 */
+	propname = zfs_prop_to_name(prop);
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+	(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
+
+	if (prop == ZFS_PROP_MOUNTPOINT && getzoneid() == GLOBAL_ZONEID &&
+	    zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "dataset is used in a non-global zone"));
+		return (zfs_error(hdl, EZFS_ZONED, errbuf));
+	}
+
+	/*
+	 * Determine datasets which will be affected by this change, if any.
+	 */
+	if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL)
+		return (-1);
+
+	if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "child dataset with inherited mountpoint is used "
+		    "in a non-global zone"));
+		ret = zfs_error(hdl, EZFS_ZONED, errbuf);
+		goto error;
+	}
+
+	if ((ret = changelist_prefix(cl)) != 0)
+		goto error;
+
+	if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc)) != 0) {
+		return (zfs_standard_error(hdl, errno, errbuf));
+	} else {
+
+		if ((ret = changelist_postfix(cl)) != 0)
+			goto error;
+
+		/*
+		 * Refresh the statistics so the new property is reflected.
+		 */
+		(void) get_stats(zhp);
+	}
+
+error:
+	changelist_free(cl);
+	return (ret);
+}
+
+/*
+ * True DSL properties are stored in an nvlist.  The following two functions
+ * extract them appropriately.
+ */
+static uint64_t
+getprop_uint64(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
+{
+	nvlist_t *nv;
+	uint64_t value;
+
+	*source = NULL;
+	if (nvlist_lookup_nvlist(zhp->zfs_props,
+	    zfs_prop_to_name(prop), &nv) == 0) {
+		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
+		(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
+	} else {
+		verify(!zhp->zfs_props_table ||
+		    zhp->zfs_props_table[prop] == B_TRUE);
+		value = zfs_prop_default_numeric(prop);
+		*source = "";
+	}
+
+	return (value);
+}
+
+static char *
+getprop_string(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
+{
+	nvlist_t *nv;
+	char *value;
+
+	*source = NULL;
+	if (nvlist_lookup_nvlist(zhp->zfs_props,
+	    zfs_prop_to_name(prop), &nv) == 0) {
+		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
+		(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
+	} else {
+		verify(!zhp->zfs_props_table ||
+		    zhp->zfs_props_table[prop] == B_TRUE);
+		if ((value = (char *)zfs_prop_default_string(prop)) == NULL)
+			value = "";
+		*source = "";
+	}
+
+	return (value);
+}
+
+static boolean_t
+zfs_is_recvd_props_mode(zfs_handle_t *zhp)
+{
+	return (zhp->zfs_props == zhp->zfs_recvd_props);
+}
+
+static void
+zfs_set_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie)
+{
+	*cookie = (uint64_t)(uintptr_t)zhp->zfs_props;
+	zhp->zfs_props = zhp->zfs_recvd_props;
+}
+
+static void
+zfs_unset_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie)
+{
+	zhp->zfs_props = (nvlist_t *)(uintptr_t)*cookie;
+	*cookie = 0;
+}
+
+/*
+ * Internal function for getting a numeric property.  Both zfs_prop_get() and
+ * zfs_prop_get_int() are built using this interface.
+ *
+ * Certain properties can be overridden using 'mount -o'.  In this case, scan
+ * the contents of the /etc/mnttab entry, searching for the appropriate options.
+ * If they differ from the on-disk values, report the current values and mark
+ * the source "temporary".
+ */
+static int
+get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zprop_source_t *src,
+    char **source, uint64_t *val)
+{
+	zfs_cmd_t zc = { 0 };
+	nvlist_t *zplprops = NULL;
+	struct mnttab mnt;
+	char *mntopt_on = NULL;
+	char *mntopt_off = NULL;
+	boolean_t received = zfs_is_recvd_props_mode(zhp);
+
+	*source = NULL;
+
+	switch (prop) {
+	case ZFS_PROP_ATIME:
+		mntopt_on = MNTOPT_ATIME;
+		mntopt_off = MNTOPT_NOATIME;
+		break;
+
+	case ZFS_PROP_DEVICES:
+		mntopt_on = MNTOPT_DEVICES;
+		mntopt_off = MNTOPT_NODEVICES;
+		break;
+
+	case ZFS_PROP_EXEC:
+		mntopt_on = MNTOPT_EXEC;
+		mntopt_off = MNTOPT_NOEXEC;
+		break;
+
+	case ZFS_PROP_READONLY:
+		mntopt_on = MNTOPT_RO;
+		mntopt_off = MNTOPT_RW;
+		break;
+
+	case ZFS_PROP_SETUID:
+		mntopt_on = MNTOPT_SETUID;
+		mntopt_off = MNTOPT_NOSETUID;
+		break;
+
+	case ZFS_PROP_XATTR:
+		mntopt_on = MNTOPT_XATTR;
+		mntopt_off = MNTOPT_NOXATTR;
+		break;
+
+	case ZFS_PROP_NBMAND:
+		mntopt_on = MNTOPT_NBMAND;
+		mntopt_off = MNTOPT_NONBMAND;
+		break;
+	}
+
+	/*
+	 * Because looking up the mount options is potentially expensive
+	 * (iterating over all of /etc/mnttab), we defer its calculation until
+	 * we're looking up a property which requires its presence.
+	 */
+	if (!zhp->zfs_mntcheck &&
+	    (mntopt_on != NULL || prop == ZFS_PROP_MOUNTED)) {
+		libzfs_handle_t *hdl = zhp->zfs_hdl;
+		struct mnttab entry;
+
+		if (libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0) {
+			zhp->zfs_mntopts = zfs_strdup(hdl,
+			    entry.mnt_mntopts);
+			if (zhp->zfs_mntopts == NULL)
+				return (-1);
+		}
+
+		zhp->zfs_mntcheck = B_TRUE;
+	}
+
+	if (zhp->zfs_mntopts == NULL)
+		mnt.mnt_mntopts = "";
+	else
+		mnt.mnt_mntopts = zhp->zfs_mntopts;
+
+	switch (prop) {
+	case ZFS_PROP_ATIME:
+	case ZFS_PROP_DEVICES:
+	case ZFS_PROP_EXEC:
+	case ZFS_PROP_READONLY:
+	case ZFS_PROP_SETUID:
+	case ZFS_PROP_XATTR:
+	case ZFS_PROP_NBMAND:
+		*val = getprop_uint64(zhp, prop, source);
+
+		if (received)
+			break;
+
+		if (hasmntopt(&mnt, mntopt_on) && !*val) {
+			*val = B_TRUE;
+			if (src)
+				*src = ZPROP_SRC_TEMPORARY;
+		} else if (hasmntopt(&mnt, mntopt_off) && *val) {
+			*val = B_FALSE;
+			if (src)
+				*src = ZPROP_SRC_TEMPORARY;
+		}
+		break;
+
+	case ZFS_PROP_CANMOUNT:
+	case ZFS_PROP_VOLSIZE:
+	case ZFS_PROP_QUOTA:
+	case ZFS_PROP_REFQUOTA:
+	case ZFS_PROP_RESERVATION:
+	case ZFS_PROP_REFRESERVATION:
+		*val = getprop_uint64(zhp, prop, source);
+
+		if (*source == NULL) {
+			/* not default, must be local */
+			*source = zhp->zfs_name;
+		}
+		break;
+
+	case ZFS_PROP_MOUNTED:
+		*val = (zhp->zfs_mntopts != NULL);
+		break;
+
+	case ZFS_PROP_NUMCLONES:
+		*val = zhp->zfs_dmustats.dds_num_clones;
+		break;
+
+	case ZFS_PROP_VERSION:
+	case ZFS_PROP_NORMALIZE:
+	case ZFS_PROP_UTF8ONLY:
+	case ZFS_PROP_CASE:
+		if (!zfs_prop_valid_for_type(prop, zhp->zfs_head_type) ||
+		    zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
+			return (-1);
+		(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+		if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_ZPLPROPS, &zc)) {
+			zcmd_free_nvlists(&zc);
+			return (-1);
+		}
+		if (zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &zplprops) != 0 ||
+		    nvlist_lookup_uint64(zplprops, zfs_prop_to_name(prop),
+		    val) != 0) {
+			zcmd_free_nvlists(&zc);
+			return (-1);
+		}
+		if (zplprops)
+			nvlist_free(zplprops);
+		zcmd_free_nvlists(&zc);
+		break;
+
+	default:
+		switch (zfs_prop_get_type(prop)) {
+		case PROP_TYPE_NUMBER:
+		case PROP_TYPE_INDEX:
+			*val = getprop_uint64(zhp, prop, source);
+			/*
+			 * If we tried to use a default value for a
+			 * readonly property, it means that it was not
+			 * present.
+			 */
+			if (zfs_prop_readonly(prop) &&
+			    *source != NULL && (*source)[0] == '\0') {
+				*source = NULL;
+			}
+			break;
+
+		case PROP_TYPE_STRING:
+		default:
+			zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+			    "cannot get non-numeric property"));
+			return (zfs_error(zhp->zfs_hdl, EZFS_BADPROP,
+			    dgettext(TEXT_DOMAIN, "internal error")));
+		}
+	}
+
+	return (0);
+}
+
+/*
+ * Calculate the source type, given the raw source string.
+ */
+static void
+get_source(zfs_handle_t *zhp, zprop_source_t *srctype, char *source,
+    char *statbuf, size_t statlen)
+{
+	if (statbuf == NULL || *srctype == ZPROP_SRC_TEMPORARY)
+		return;
+
+	if (source == NULL) {
+		*srctype = ZPROP_SRC_NONE;
+	} else if (source[0] == '\0') {
+		*srctype = ZPROP_SRC_DEFAULT;
+	} else if (strstr(source, ZPROP_SOURCE_VAL_RECVD) != NULL) {
+		*srctype = ZPROP_SRC_RECEIVED;
+	} else {
+		if (strcmp(source, zhp->zfs_name) == 0) {
+			*srctype = ZPROP_SRC_LOCAL;
+		} else {
+			(void) strlcpy(statbuf, source, statlen);
+			*srctype = ZPROP_SRC_INHERITED;
+		}
+	}
+
+}
+
+int
+zfs_prop_get_recvd(zfs_handle_t *zhp, const char *propname, char *propbuf,
+    size_t proplen, boolean_t literal)
+{
+	zfs_prop_t prop;
+	int err = 0;
+
+	if (zhp->zfs_recvd_props == NULL)
+		if (get_recvd_props_ioctl(zhp) != 0)
+			return (-1);
+
+	prop = zfs_name_to_prop(propname);
+
+	if (prop != ZPROP_INVAL) {
+		uint64_t cookie;
+		if (!nvlist_exists(zhp->zfs_recvd_props, propname))
+			return (-1);
+		zfs_set_recvd_props_mode(zhp, &cookie);
+		err = zfs_prop_get(zhp, prop, propbuf, proplen,
+		    NULL, NULL, 0, literal);
+		zfs_unset_recvd_props_mode(zhp, &cookie);
+	} else {
+		nvlist_t *propval;
+		char *recvdval;
+		if (nvlist_lookup_nvlist(zhp->zfs_recvd_props,
+		    propname, &propval) != 0)
+			return (-1);
+		verify(nvlist_lookup_string(propval, ZPROP_VALUE,
+		    &recvdval) == 0);
+		(void) strlcpy(propbuf, recvdval, proplen);
+	}
+
+	return (err == 0 ? 0 : -1);
+}
+
+static int
+get_clones_string(zfs_handle_t *zhp, char *propbuf, size_t proplen)
+{
+	nvlist_t *value;
+	nvpair_t *pair;
+
+	value = zfs_get_clones_nvl(zhp);
+	if (value == NULL)
+		return (-1);
+
+	propbuf[0] = '\0';
+	for (pair = nvlist_next_nvpair(value, NULL); pair != NULL;
+	    pair = nvlist_next_nvpair(value, pair)) {
+		if (propbuf[0] != '\0')
+			(void) strlcat(propbuf, ",", proplen);
+		(void) strlcat(propbuf, nvpair_name(pair), proplen);
+	}
+
+	return (0);
+}
+
+struct get_clones_arg {
+	uint64_t numclones;
+	nvlist_t *value;
+	const char *origin;
+	char buf[ZFS_MAXNAMELEN];
+};
+
+int
+get_clones_cb(zfs_handle_t *zhp, void *arg)
+{
+	struct get_clones_arg *gca = arg;
+
+	if (gca->numclones == 0) {
+		zfs_close(zhp);
+		return (0);
+	}
+
+	if (zfs_prop_get(zhp, ZFS_PROP_ORIGIN, gca->buf, sizeof (gca->buf),
+	    NULL, NULL, 0, B_TRUE) != 0)
+		goto out;
+	if (strcmp(gca->buf, gca->origin) == 0) {
+		if (nvlist_add_boolean(gca->value, zfs_get_name(zhp)) != 0) {
+			zfs_close(zhp);
+			return (no_memory(zhp->zfs_hdl));
+		}
+		gca->numclones--;
+	}
+
+out:
+	(void) zfs_iter_children(zhp, get_clones_cb, gca);
+	zfs_close(zhp);
+	return (0);
+}
+
+nvlist_t *
+zfs_get_clones_nvl(zfs_handle_t *zhp)
+{
+	nvlist_t *nv, *value;
+
+	if (nvlist_lookup_nvlist(zhp->zfs_props,
+	    zfs_prop_to_name(ZFS_PROP_CLONES), &nv) != 0) {
+		struct get_clones_arg gca;
+
+		/*
+		 * if this is a snapshot, then the kernel wasn't able
+		 * to get the clones.  Do it by slowly iterating.
+		 */
+		if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT)
+			return (NULL);
+		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, 0) != 0)
+			return (NULL);
+		if (nvlist_alloc(&value, NV_UNIQUE_NAME, 0) != 0) {
+			nvlist_free(nv);
+			return (NULL);
+		}
+
+		gca.numclones = zfs_prop_get_int(zhp, ZFS_PROP_NUMCLONES);
+		gca.value = value;
+		gca.origin = zhp->zfs_name;
+
+		if (gca.numclones != 0) {
+			zfs_handle_t *root;
+			char pool[ZFS_MAXNAMELEN];
+			char *cp = pool;
+
+			/* get the pool name */
+			(void) strlcpy(pool, zhp->zfs_name, sizeof (pool));
+			(void) strsep(&cp, "/@");
+			root = zfs_open(zhp->zfs_hdl, pool,
+			    ZFS_TYPE_FILESYSTEM);
+
+			(void) get_clones_cb(root, &gca);
+		}
+
+		if (gca.numclones != 0 ||
+		    nvlist_add_nvlist(nv, ZPROP_VALUE, value) != 0 ||
+		    nvlist_add_nvlist(zhp->zfs_props,
+		    zfs_prop_to_name(ZFS_PROP_CLONES), nv) != 0) {
+			nvlist_free(nv);
+			nvlist_free(value);
+			return (NULL);
+		}
+		nvlist_free(nv);
+		nvlist_free(value);
+		verify(0 == nvlist_lookup_nvlist(zhp->zfs_props,
+		    zfs_prop_to_name(ZFS_PROP_CLONES), &nv));
+	}
+
+	verify(nvlist_lookup_nvlist(nv, ZPROP_VALUE, &value) == 0);
+
+	return (value);
+}
+
+/*
+ * Retrieve a property from the given object.  If 'literal' is specified, then
+ * numbers are left as exact values.  Otherwise, numbers are converted to a
+ * human-readable form.
+ *
+ * Returns 0 on success, or -1 on error.
+ */
+int
+zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
+    zprop_source_t *src, char *statbuf, size_t statlen, boolean_t literal)
+{
+	char *source = NULL;
+	uint64_t val;
+	char *str;
+	const char *strval;
+	boolean_t received = zfs_is_recvd_props_mode(zhp);
+
+	/*
+	 * Check to see if this property applies to our object
+	 */
+	if (!zfs_prop_valid_for_type(prop, zhp->zfs_type))
+		return (-1);
+
+	if (received && zfs_prop_readonly(prop))
+		return (-1);
+
+	if (src)
+		*src = ZPROP_SRC_NONE;
+
+	switch (prop) {
+	case ZFS_PROP_CREATION:
+		/*
+		 * 'creation' is a time_t stored in the statistics.  We convert
+		 * this into a string unless 'literal' is specified.
+		 */
+		{
+			val = getprop_uint64(zhp, prop, &source);
+			time_t time = (time_t)val;
+			struct tm t;
+
+			if (literal ||
+			    localtime_r(&time, &t) == NULL ||
+			    strftime(propbuf, proplen, "%a %b %e %k:%M %Y",
+			    &t) == 0)
+				(void) snprintf(propbuf, proplen, "%llu", val);
+		}
+		break;
+
+	case ZFS_PROP_MOUNTPOINT:
+		/*
+		 * Getting the precise mountpoint can be tricky.
+		 *
+		 *  - for 'none' or 'legacy', return those values.
+		 *  - for inherited mountpoints, we want to take everything
+		 *    after our ancestor and append it to the inherited value.
+		 *
+		 * If the pool has an alternate root, we want to prepend that
+		 * root to any values we return.
+		 */
+
+		str = getprop_string(zhp, prop, &source);
+
+		if (str[0] == '/') {
+			char buf[MAXPATHLEN];
+			char *root = buf;
+			const char *relpath;
+
+			/*
+			 * If we inherit the mountpoint, even from a dataset
+			 * with a received value, the source will be the path of
+			 * the dataset we inherit from. If source is
+			 * ZPROP_SOURCE_VAL_RECVD, the received value is not
+			 * inherited.
+			 */
+			if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) {
+				relpath = "";
+			} else {
+				relpath = zhp->zfs_name + strlen(source);
+				if (relpath[0] == '/')
+					relpath++;
+			}
+
+			if ((zpool_get_prop(zhp->zpool_hdl,
+			    ZPOOL_PROP_ALTROOT, buf, MAXPATHLEN, NULL)) ||
+			    (strcmp(root, "-") == 0))
+				root[0] = '\0';
+			/*
+			 * Special case an alternate root of '/'. This will
+			 * avoid having multiple leading slashes in the
+			 * mountpoint path.
+			 */
+			if (strcmp(root, "/") == 0)
+				root++;
+
+			/*
+			 * If the mountpoint is '/' then skip over this
+			 * if we are obtaining either an alternate root or
+			 * an inherited mountpoint.
+			 */
+			if (str[1] == '\0' && (root[0] != '\0' ||
+			    relpath[0] != '\0'))
+				str++;
+
+			if (relpath[0] == '\0')
+				(void) snprintf(propbuf, proplen, "%s%s",
+				    root, str);
+			else
+				(void) snprintf(propbuf, proplen, "%s%s%s%s",
+				    root, str, relpath[0] == '@' ? "" : "/",
+				    relpath);
+		} else {
+			/* 'legacy' or 'none' */
+			(void) strlcpy(propbuf, str, proplen);
+		}
+
+		break;
+
+	case ZFS_PROP_ORIGIN:
+		(void) strlcpy(propbuf, getprop_string(zhp, prop, &source),
+		    proplen);
+		/*
+		 * If there is no parent at all, return failure to indicate that
+		 * it doesn't apply to this dataset.
+		 */
+		if (propbuf[0] == '\0')
+			return (-1);
+		break;
+
+	case ZFS_PROP_CLONES:
+		if (get_clones_string(zhp, propbuf, proplen) != 0)
+			return (-1);
+		break;
+
+	case ZFS_PROP_QUOTA:
+	case ZFS_PROP_REFQUOTA:
+	case ZFS_PROP_RESERVATION:
+	case ZFS_PROP_REFRESERVATION:
+
+		if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
+			return (-1);
+
+		/*
+		 * If quota or reservation is 0, we translate this into 'none'
+		 * (unless literal is set), and indicate that it's the default
+		 * value.  Otherwise, we print the number nicely and indicate
+		 * that its set locally.
+		 */
+		if (val == 0) {
+			if (literal)
+				(void) strlcpy(propbuf, "0", proplen);
+			else
+				(void) strlcpy(propbuf, "none", proplen);
+		} else {
+			if (literal)
+				(void) snprintf(propbuf, proplen, "%llu",
+				    (u_longlong_t)val);
+			else
+				zfs_nicenum(val, propbuf, proplen);
+		}
+		break;
+
+	case ZFS_PROP_REFRATIO:
+	case ZFS_PROP_COMPRESSRATIO:
+		if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
+			return (-1);
+		(void) snprintf(propbuf, proplen, "%llu.%02llux",
+		    (u_longlong_t)(val / 100),
+		    (u_longlong_t)(val % 100));
+		break;
+
+	case ZFS_PROP_TYPE:
+		switch (zhp->zfs_type) {
+		case ZFS_TYPE_FILESYSTEM:
+			str = "filesystem";
+			break;
+		case ZFS_TYPE_VOLUME:
+			str = "volume";
+			break;
+		case ZFS_TYPE_SNAPSHOT:
+			str = "snapshot";
+			break;
+		default:
+			abort();
+		}
+		(void) snprintf(propbuf, proplen, "%s", str);
+		break;
+
+	case ZFS_PROP_MOUNTED:
+		/*
+		 * The 'mounted' property is a pseudo-property that described
+		 * whether the filesystem is currently mounted.  Even though
+		 * it's a boolean value, the typical values of "on" and "off"
+		 * don't make sense, so we translate to "yes" and "no".
+		 */
+		if (get_numeric_property(zhp, ZFS_PROP_MOUNTED,
+		    src, &source, &val) != 0)
+			return (-1);
+		if (val)
+			(void) strlcpy(propbuf, "yes", proplen);
+		else
+			(void) strlcpy(propbuf, "no", proplen);
+		break;
+
+	case ZFS_PROP_NAME:
+		/*
+		 * The 'name' property is a pseudo-property derived from the
+		 * dataset name.  It is presented as a real property to simplify
+		 * consumers.
+		 */
+		(void) strlcpy(propbuf, zhp->zfs_name, proplen);
+		break;
+
+	case ZFS_PROP_MLSLABEL:
+		{
+#ifdef sun
+			m_label_t *new_sl = NULL;
+			char *ascii = NULL;	/* human readable label */
+
+			(void) strlcpy(propbuf,
+			    getprop_string(zhp, prop, &source), proplen);
+
+			if (literal || (strcasecmp(propbuf,
+			    ZFS_MLSLABEL_DEFAULT) == 0))
+				break;
+
+			/*
+			 * Try to translate the internal hex string to
+			 * human-readable output.  If there are any
+			 * problems just use the hex string.
+			 */
+
+			if (str_to_label(propbuf, &new_sl, MAC_LABEL,
+			    L_NO_CORRECTION, NULL) == -1) {
+				m_label_free(new_sl);
+				break;
+			}
+
+			if (label_to_str(new_sl, &ascii, M_LABEL,
+			    DEF_NAMES) != 0) {
+				if (ascii)
+					free(ascii);
+				m_label_free(new_sl);
+				break;
+			}
+			m_label_free(new_sl);
+
+			(void) strlcpy(propbuf, ascii, proplen);
+			free(ascii);
+#else	/* !sun */
+			propbuf[0] = '\0';
+#endif	/* !sun */
+		}
+		break;
+
+	case ZFS_PROP_GUID:
+		/*
+		 * GUIDs are stored as numbers, but they are identifiers.
+		 * We don't want them to be pretty printed, because pretty
+		 * printing mangles the ID into a truncated and useless value.
+		 */
+		if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
+			return (-1);
+		(void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val);
+		break;
+
+	default:
+		switch (zfs_prop_get_type(prop)) {
+		case PROP_TYPE_NUMBER:
+			if (get_numeric_property(zhp, prop, src,
+			    &source, &val) != 0)
+				return (-1);
+			if (literal)
+				(void) snprintf(propbuf, proplen, "%llu",
+				    (u_longlong_t)val);
+			else
+				zfs_nicenum(val, propbuf, proplen);
+			break;
+
+		case PROP_TYPE_STRING:
+			(void) strlcpy(propbuf,
+			    getprop_string(zhp, prop, &source), proplen);
+			break;
+
+		case PROP_TYPE_INDEX:
+			if (get_numeric_property(zhp, prop, src,
+			    &source, &val) != 0)
+				return (-1);
+			if (zfs_prop_index_to_string(prop, val, &strval) != 0)
+				return (-1);
+			(void) strlcpy(propbuf, strval, proplen);
+			break;
+
+		default:
+			abort();
+		}
+	}
+
+	get_source(zhp, src, source, statbuf, statlen);
+
+	return (0);
+}
+
+/*
+ * Utility function to get the given numeric property.  Does no validation that
+ * the given property is the appropriate type; should only be used with
+ * hard-coded property types.
+ */
+uint64_t
+zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop)
+{
+	char *source;
+	uint64_t val;
+
+	(void) get_numeric_property(zhp, prop, NULL, &source, &val);
+
+	return (val);
+}
+
+int
+zfs_prop_set_int(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t val)
+{
+	char buf[64];
+
+	(void) snprintf(buf, sizeof (buf), "%llu", (longlong_t)val);
+	return (zfs_prop_set(zhp, zfs_prop_to_name(prop), buf));
+}
+
+/*
+ * Similar to zfs_prop_get(), but returns the value as an integer.
+ */
+int
+zfs_prop_get_numeric(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t *value,
+    zprop_source_t *src, char *statbuf, size_t statlen)
+{
+	char *source;
+
+	/*
+	 * Check to see if this property applies to our object
+	 */
+	if (!zfs_prop_valid_for_type(prop, zhp->zfs_type)) {
+		return (zfs_error_fmt(zhp->zfs_hdl, EZFS_PROPTYPE,
+		    dgettext(TEXT_DOMAIN, "cannot get property '%s'"),
+		    zfs_prop_to_name(prop)));
+	}
+
+	if (src)
+		*src = ZPROP_SRC_NONE;
+
+	if (get_numeric_property(zhp, prop, src, &source, value) != 0)
+		return (-1);
+
+	get_source(zhp, src, source, statbuf, statlen);
+
+	return (0);
+}
+
+static int
+idmap_id_to_numeric_domain_rid(uid_t id, boolean_t isuser,
+    char **domainp, idmap_rid_t *ridp)
+{
+#ifdef sun
+	idmap_get_handle_t *get_hdl = NULL;
+	idmap_stat status;
+	int err = EINVAL;
+
+	if (idmap_get_create(&get_hdl) != IDMAP_SUCCESS)
+		goto out;
+
+	if (isuser) {
+		err = idmap_get_sidbyuid(get_hdl, id,
+		    IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
+	} else {
+		err = idmap_get_sidbygid(get_hdl, id,
+		    IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
+	}
+	if (err == IDMAP_SUCCESS &&
+	    idmap_get_mappings(get_hdl) == IDMAP_SUCCESS &&
+	    status == IDMAP_SUCCESS)
+		err = 0;
+	else
+		err = EINVAL;
+out:
+	if (get_hdl)
+		idmap_get_destroy(get_hdl);
+	return (err);
+#else	/* !sun */
+	assert(!"invalid code path");
+#endif	/* !sun */
+}
+
+/*
+ * convert the propname into parameters needed by kernel
+ * Eg: userquota@ahrens -> ZFS_PROP_USERQUOTA, "", 126829
+ * Eg: userused@matt@domain -> ZFS_PROP_USERUSED, "S-1-123-456", 789
+ */
+static int
+userquota_propname_decode(const char *propname, boolean_t zoned,
+    zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp)
+{
+	zfs_userquota_prop_t type;
+	char *cp, *end;
+	char *numericsid = NULL;
+	boolean_t isuser;
+
+	domain[0] = '\0';
+
+	/* Figure out the property type ({user|group}{quota|space}) */
+	for (type = 0; type < ZFS_NUM_USERQUOTA_PROPS; type++) {
+		if (strncmp(propname, zfs_userquota_prop_prefixes[type],
+		    strlen(zfs_userquota_prop_prefixes[type])) == 0)
+			break;
+	}
+	if (type == ZFS_NUM_USERQUOTA_PROPS)
+		return (EINVAL);
+	*typep = type;
+
+	isuser = (type == ZFS_PROP_USERQUOTA ||
+	    type == ZFS_PROP_USERUSED);
+
+	cp = strchr(propname, '@') + 1;
+
+	if (strchr(cp, '@')) {
+#ifdef sun
+		/*
+		 * It's a SID name (eg "user@domain") that needs to be
+		 * turned into S-1-domainID-RID.
+		 */
+		directory_error_t e;
+		if (zoned && getzoneid() == GLOBAL_ZONEID)
+			return (ENOENT);
+		if (isuser) {
+			e = directory_sid_from_user_name(NULL,
+			    cp, &numericsid);
+		} else {
+			e = directory_sid_from_group_name(NULL,
+			    cp, &numericsid);
+		}
+		if (e != NULL) {
+			directory_error_free(e);
+			return (ENOENT);
+		}
+		if (numericsid == NULL)
+			return (ENOENT);
+		cp = numericsid;
+		/* will be further decoded below */
+#else	/* !sun */
+		return (ENOENT);
+#endif	/* !sun */
+	}
+
+	if (strncmp(cp, "S-1-", 4) == 0) {
+		/* It's a numeric SID (eg "S-1-234-567-89") */
+		(void) strlcpy(domain, cp, domainlen);
+		cp = strrchr(domain, '-');
+		*cp = '\0';
+		cp++;
+
+		errno = 0;
+		*ridp = strtoull(cp, &end, 10);
+		if (numericsid) {
+			free(numericsid);
+			numericsid = NULL;
+		}
+		if (errno != 0 || *end != '\0')
+			return (EINVAL);
+	} else if (!isdigit(*cp)) {
+		/*
+		 * It's a user/group name (eg "user") that needs to be
+		 * turned into a uid/gid
+		 */
+		if (zoned && getzoneid() == GLOBAL_ZONEID)
+			return (ENOENT);
+		if (isuser) {
+			struct passwd *pw;
+			pw = getpwnam(cp);
+			if (pw == NULL)
+				return (ENOENT);
+			*ridp = pw->pw_uid;
+		} else {
+			struct group *gr;
+			gr = getgrnam(cp);
+			if (gr == NULL)
+				return (ENOENT);
+			*ridp = gr->gr_gid;
+		}
+	} else {
+		/* It's a user/group ID (eg "12345"). */
+		uid_t id = strtoul(cp, &end, 10);
+		idmap_rid_t rid;
+		char *mapdomain;
+
+		if (*end != '\0')
+			return (EINVAL);
+		if (id > MAXUID) {
+			/* It's an ephemeral ID. */
+			if (idmap_id_to_numeric_domain_rid(id, isuser,
+			    &mapdomain, &rid) != 0)
+				return (ENOENT);
+			(void) strlcpy(domain, mapdomain, domainlen);
+			*ridp = rid;
+		} else {
+			*ridp = id;
+		}
+	}
+
+	ASSERT3P(numericsid, ==, NULL);
+	return (0);
+}
+
+static int
+zfs_prop_get_userquota_common(zfs_handle_t *zhp, const char *propname,
+    uint64_t *propvalue, zfs_userquota_prop_t *typep)
+{
+	int err;
+	zfs_cmd_t zc = { 0 };
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+	err = userquota_propname_decode(propname,
+	    zfs_prop_get_int(zhp, ZFS_PROP_ZONED),
+	    typep, zc.zc_value, sizeof (zc.zc_value), &zc.zc_guid);
+	zc.zc_objset_type = *typep;
+	if (err)
+		return (err);
+
+	err = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_USERSPACE_ONE, &zc);
+	if (err)
+		return (err);
+
+	*propvalue = zc.zc_cookie;
+	return (0);
+}
+
+int
+zfs_prop_get_userquota_int(zfs_handle_t *zhp, const char *propname,
+    uint64_t *propvalue)
+{
+	zfs_userquota_prop_t type;
+
+	return (zfs_prop_get_userquota_common(zhp, propname, propvalue,
+	    &type));
+}
+
+int
+zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname,
+    char *propbuf, int proplen, boolean_t literal)
+{
+	int err;
+	uint64_t propvalue;
+	zfs_userquota_prop_t type;
+
+	err = zfs_prop_get_userquota_common(zhp, propname, &propvalue,
+	    &type);
+
+	if (err)
+		return (err);
+
+	if (literal) {
+		(void) snprintf(propbuf, proplen, "%llu", propvalue);
+	} else if (propvalue == 0 &&
+	    (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA)) {
+		(void) strlcpy(propbuf, "none", proplen);
+	} else {
+		zfs_nicenum(propvalue, propbuf, proplen);
+	}
+	return (0);
+}
+
+int
+zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname,
+    uint64_t *propvalue)
+{
+	int err;
+	zfs_cmd_t zc = { 0 };
+	const char *snapname;
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+	snapname = strchr(propname, '@') + 1;
+	if (strchr(snapname, '@')) {
+		(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
+	} else {
+		/* snapname is the short name, append it to zhp's fsname */
+		char *cp;
+
+		(void) strlcpy(zc.zc_value, zhp->zfs_name,
+		    sizeof (zc.zc_value));
+		cp = strchr(zc.zc_value, '@');
+		if (cp != NULL)
+			*cp = '\0';
+		(void) strlcat(zc.zc_value, "@", sizeof (zc.zc_value));
+		(void) strlcat(zc.zc_value, snapname, sizeof (zc.zc_value));
+	}
+
+	err = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SPACE_WRITTEN, &zc);
+	if (err)
+		return (err);
+
+	*propvalue = zc.zc_cookie;
+	return (0);
+}
+
+int
+zfs_prop_get_written(zfs_handle_t *zhp, const char *propname,
+    char *propbuf, int proplen, boolean_t literal)
+{
+	int err;
+	uint64_t propvalue;
+
+	err = zfs_prop_get_written_int(zhp, propname, &propvalue);
+
+	if (err)
+		return (err);
+
+	if (literal) {
+		(void) snprintf(propbuf, proplen, "%llu", propvalue);
+	} else {
+		zfs_nicenum(propvalue, propbuf, proplen);
+	}
+	return (0);
+}
+
+int
+zfs_get_snapused_int(zfs_handle_t *firstsnap, zfs_handle_t *lastsnap,
+    uint64_t *usedp)
+{
+	int err;
+	zfs_cmd_t zc = { 0 };
+
+	(void) strlcpy(zc.zc_name, lastsnap->zfs_name, sizeof (zc.zc_name));
+	(void) strlcpy(zc.zc_value, firstsnap->zfs_name, sizeof (zc.zc_value));
+
+	err = ioctl(lastsnap->zfs_hdl->libzfs_fd, ZFS_IOC_SPACE_SNAPS, &zc);
+	if (err)
+		return (err);
+
+	*usedp = zc.zc_cookie;
+
+	return (0);
+}
+
+/*
+ * Returns the name of the given zfs handle.
+ */
+const char *
+zfs_get_name(const zfs_handle_t *zhp)
+{
+	return (zhp->zfs_name);
+}
+
+/*
+ * Returns the type of the given zfs handle.
+ */
+zfs_type_t
+zfs_get_type(const zfs_handle_t *zhp)
+{
+	return (zhp->zfs_type);
+}
+
+/*
+ * Is one dataset name a child dataset of another?
+ *
+ * Needs to handle these cases:
+ * Dataset 1	"a/foo"		"a/foo"		"a/foo"		"a/foo"
+ * Dataset 2	"a/fo"		"a/foobar"	"a/bar/baz"	"a/foo/bar"
+ * Descendant?	No.		No.		No.		Yes.
+ */
+static boolean_t
+is_descendant(const char *ds1, const char *ds2)
+{
+	size_t d1len = strlen(ds1);
+
+	/* ds2 can't be a descendant if it's smaller */
+	if (strlen(ds2) < d1len)
+		return (B_FALSE);
+
+	/* otherwise, compare strings and verify that there's a '/' char */
+	return (ds2[d1len] == '/' && (strncmp(ds1, ds2, d1len) == 0));
+}
+
+/*
+ * Given a complete name, return just the portion that refers to the parent.
+ * Will return -1 if there is no parent (path is just the name of the
+ * pool).
+ */
+static int
+parent_name(const char *path, char *buf, size_t buflen)
+{
+	char *slashp;
+
+	(void) strlcpy(buf, path, buflen);
+
+	if ((slashp = strrchr(buf, '/')) == NULL)
+		return (-1);
+	*slashp = '\0';
+
+	return (0);
+}
+
+/*
+ * If accept_ancestor is false, then check to make sure that the given path has
+ * a parent, and that it exists.  If accept_ancestor is true, then find the
+ * closest existing ancestor for the given path.  In prefixlen return the
+ * length of already existing prefix of the given path.  We also fetch the
+ * 'zoned' property, which is used to validate property settings when creating
+ * new datasets.
+ */
+static int
+check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned,
+    boolean_t accept_ancestor, int *prefixlen)
+{
+	zfs_cmd_t zc = { 0 };
+	char parent[ZFS_MAXNAMELEN];
+	char *slash;
+	zfs_handle_t *zhp;
+	char errbuf[1024];
+	uint64_t is_zoned;
+
+	(void) snprintf(errbuf, sizeof (errbuf),
+	    dgettext(TEXT_DOMAIN, "cannot create '%s'"), path);
+
+	/* get parent, and check to see if this is just a pool */
+	if (parent_name(path, parent, sizeof (parent)) != 0) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "missing dataset name"));
+		return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+	}
+
+	/* check to see if the pool exists */
+	if ((slash = strchr(parent, '/')) == NULL)
+		slash = parent + strlen(parent);
+	(void) strncpy(zc.zc_name, parent, slash - parent);
+	zc.zc_name[slash - parent] = '\0';
+	if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 &&
+	    errno == ENOENT) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "no such pool '%s'"), zc.zc_name);
+		return (zfs_error(hdl, EZFS_NOENT, errbuf));
+	}
+
+	/* check to see if the parent dataset exists */
+	while ((zhp = make_dataset_handle(hdl, parent)) == NULL) {
+		if (errno == ENOENT && accept_ancestor) {
+			/*
+			 * Go deeper to find an ancestor, give up on top level.
+			 */
+			if (parent_name(parent, parent, sizeof (parent)) != 0) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "no such pool '%s'"), zc.zc_name);
+				return (zfs_error(hdl, EZFS_NOENT, errbuf));
+			}
+		} else if (errno == ENOENT) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "parent does not exist"));
+			return (zfs_error(hdl, EZFS_NOENT, errbuf));
+		} else
+			return (zfs_standard_error(hdl, errno, errbuf));
+	}
+
+	is_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
+	if (zoned != NULL)
+		*zoned = is_zoned;
+
+	/* we are in a non-global zone, but parent is in the global zone */
+	if (getzoneid() != GLOBAL_ZONEID && !is_zoned) {
+		(void) zfs_standard_error(hdl, EPERM, errbuf);
+		zfs_close(zhp);
+		return (-1);
+	}
+
+	/* make sure parent is a filesystem */
+	if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "parent is not a filesystem"));
+		(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
+		zfs_close(zhp);
+		return (-1);
+	}
+
+	zfs_close(zhp);
+	if (prefixlen != NULL)
+		*prefixlen = strlen(parent);
+	return (0);
+}
+
+/*
+ * Finds whether the dataset of the given type(s) exists.
+ */
+boolean_t
+zfs_dataset_exists(libzfs_handle_t *hdl, const char *path, zfs_type_t types)
+{
+	zfs_handle_t *zhp;
+
+	if (!zfs_validate_name(hdl, path, types, B_FALSE))
+		return (B_FALSE);
+
+	/*
+	 * Try to get stats for the dataset, which will tell us if it exists.
+	 */
+	if ((zhp = make_dataset_handle(hdl, path)) != NULL) {
+		int ds_type = zhp->zfs_type;
+
+		zfs_close(zhp);
+		if (types & ds_type)
+			return (B_TRUE);
+	}
+	return (B_FALSE);
+}
+
+/*
+ * Given a path to 'target', create all the ancestors between
+ * the prefixlen portion of the path, and the target itself.
+ * Fail if the initial prefixlen-ancestor does not already exist.
+ */
+int
+create_parents(libzfs_handle_t *hdl, char *target, int prefixlen)
+{
+	zfs_handle_t *h;
+	char *cp;
+	const char *opname;
+
+	/* make sure prefix exists */
+	cp = target + prefixlen;
+	if (*cp != '/') {
+		assert(strchr(cp, '/') == NULL);
+		h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
+	} else {
+		*cp = '\0';
+		h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
+		*cp = '/';
+	}
+	if (h == NULL)
+		return (-1);
+	zfs_close(h);
+
+	/*
+	 * Attempt to create, mount, and share any ancestor filesystems,
+	 * up to the prefixlen-long one.
+	 */
+	for (cp = target + prefixlen + 1;
+	    cp = strchr(cp, '/'); *cp = '/', cp++) {
+		char *logstr;
+
+		*cp = '\0';
+
+		h = make_dataset_handle(hdl, target);
+		if (h) {
+			/* it already exists, nothing to do here */
+			zfs_close(h);
+			continue;
+		}
+
+		logstr = hdl->libzfs_log_str;
+		hdl->libzfs_log_str = NULL;
+		if (zfs_create(hdl, target, ZFS_TYPE_FILESYSTEM,
+		    NULL) != 0) {
+			hdl->libzfs_log_str = logstr;
+			opname = dgettext(TEXT_DOMAIN, "create");
+			goto ancestorerr;
+		}
+
+		hdl->libzfs_log_str = logstr;
+		h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
+		if (h == NULL) {
+			opname = dgettext(TEXT_DOMAIN, "open");
+			goto ancestorerr;
+		}
+
+		if (zfs_mount(h, NULL, 0) != 0) {
+			opname = dgettext(TEXT_DOMAIN, "mount");
+			goto ancestorerr;
+		}
+
+		if (zfs_share(h) != 0) {
+			opname = dgettext(TEXT_DOMAIN, "share");
+			goto ancestorerr;
+		}
+
+		zfs_close(h);
+	}
+
+	return (0);
+
+ancestorerr:
+	zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+	    "failed to %s ancestor '%s'"), opname, target);
+	return (-1);
+}
+
+/*
+ * Creates non-existing ancestors of the given path.
+ */
+int
+zfs_create_ancestors(libzfs_handle_t *hdl, const char *path)
+{
+	int prefix;
+	char *path_copy;
+	int rc;
+
+	if (check_parents(hdl, path, NULL, B_TRUE, &prefix) != 0)
+		return (-1);
+
+	if ((path_copy = strdup(path)) != NULL) {
+		rc = create_parents(hdl, path_copy, prefix);
+		free(path_copy);
+	}
+	if (path_copy == NULL || rc != 0)
+		return (-1);
+
+	return (0);
+}
+
+/*
+ * Create a new filesystem or volume.
+ */
+int
+zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type,
+    nvlist_t *props)
+{
+	zfs_cmd_t zc = { 0 };
+	int ret;
+	uint64_t size = 0;
+	uint64_t blocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
+	char errbuf[1024];
+	uint64_t zoned;
+
+	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+	    "cannot create '%s'"), path);
+
+	/* validate the path, taking care to note the extended error message */
+	if (!zfs_validate_name(hdl, path, type, B_TRUE))
+		return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+
+	/* validate parents exist */
+	if (check_parents(hdl, path, &zoned, B_FALSE, NULL) != 0)
+		return (-1);
+
+	/*
+	 * The failure modes when creating a dataset of a different type over
+	 * one that already exists is a little strange.  In particular, if you
+	 * try to create a dataset on top of an existing dataset, the ioctl()
+	 * will return ENOENT, not EEXIST.  To prevent this from happening, we
+	 * first try to see if the dataset exists.
+	 */
+	(void) strlcpy(zc.zc_name, path, sizeof (zc.zc_name));
+	if (zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "dataset already exists"));
+		return (zfs_error(hdl, EZFS_EXISTS, errbuf));
+	}
+
+	if (type == ZFS_TYPE_VOLUME)
+		zc.zc_objset_type = DMU_OST_ZVOL;
+	else
+		zc.zc_objset_type = DMU_OST_ZFS;
+
+	if (props && (props = zfs_valid_proplist(hdl, type, props,
+	    zoned, NULL, errbuf)) == 0)
+		return (-1);
+
+	if (type == ZFS_TYPE_VOLUME) {
+		/*
+		 * If we are creating a volume, the size and block size must
+		 * satisfy a few restraints.  First, the blocksize must be a
+		 * valid block size between SPA_{MIN,MAX}BLOCKSIZE.  Second, the
+		 * volsize must be a multiple of the block size, and cannot be
+		 * zero.
+		 */
+		if (props == NULL || nvlist_lookup_uint64(props,
+		    zfs_prop_to_name(ZFS_PROP_VOLSIZE), &size) != 0) {
+			nvlist_free(props);
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "missing volume size"));
+			return (zfs_error(hdl, EZFS_BADPROP, errbuf));
+		}
+
+		if ((ret = nvlist_lookup_uint64(props,
+		    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
+		    &blocksize)) != 0) {
+			if (ret == ENOENT) {
+				blocksize = zfs_prop_default_numeric(
+				    ZFS_PROP_VOLBLOCKSIZE);
+			} else {
+				nvlist_free(props);
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "missing volume block size"));
+				return (zfs_error(hdl, EZFS_BADPROP, errbuf));
+			}
+		}
+
+		if (size == 0) {
+			nvlist_free(props);
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "volume size cannot be zero"));
+			return (zfs_error(hdl, EZFS_BADPROP, errbuf));
+		}
+
+		if (size % blocksize != 0) {
+			nvlist_free(props);
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "volume size must be a multiple of volume block "
+			    "size"));
+			return (zfs_error(hdl, EZFS_BADPROP, errbuf));
+		}
+	}
+
+	if (props && zcmd_write_src_nvlist(hdl, &zc, props) != 0)
+		return (-1);
+	nvlist_free(props);
+
+	/* create the dataset */
+	ret = zfs_ioctl(hdl, ZFS_IOC_CREATE, &zc);
+
+	zcmd_free_nvlists(&zc);
+
+	/* check for failure */
+	if (ret != 0) {
+		char parent[ZFS_MAXNAMELEN];
+		(void) parent_name(path, parent, sizeof (parent));
+
+		switch (errno) {
+		case ENOENT:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "no such parent '%s'"), parent);
+			return (zfs_error(hdl, EZFS_NOENT, errbuf));
+
+		case EINVAL:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "parent '%s' is not a filesystem"), parent);
+			return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
+
+		case EDOM:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "volume block size must be power of 2 from "
+			    "%u to %uk"),
+			    (uint_t)SPA_MINBLOCKSIZE,
+			    (uint_t)SPA_MAXBLOCKSIZE >> 10);
+
+			return (zfs_error(hdl, EZFS_BADPROP, errbuf));
+
+		case ENOTSUP:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "pool must be upgraded to set this "
+			    "property or value"));
+			return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
+#ifdef _ILP32
+		case EOVERFLOW:
+			/*
+			 * This platform can't address a volume this big.
+			 */
+			if (type == ZFS_TYPE_VOLUME)
+				return (zfs_error(hdl, EZFS_VOLTOOBIG,
+				    errbuf));
+#endif
+			/* FALLTHROUGH */
+		default:
+			return (zfs_standard_error(hdl, errno, errbuf));
+		}
+	}
+
+	return (0);
+}
+
+/*
+ * Destroys the given dataset.  The caller must make sure that the filesystem
+ * isn't mounted, and that there are no active dependents. If the file system
+ * does not exist this function does nothing.
+ */
+int
+zfs_destroy(zfs_handle_t *zhp, boolean_t defer)
+{
+	zfs_cmd_t zc = { 0 };
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+	if (ZFS_IS_VOLUME(zhp)) {
+		zc.zc_objset_type = DMU_OST_ZVOL;
+	} else {
+		zc.zc_objset_type = DMU_OST_ZFS;
+	}
+
+	zc.zc_defer_destroy = defer;
+	if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_DESTROY, &zc) != 0 &&
+	    errno != ENOENT) {
+		return (zfs_standard_error_fmt(zhp->zfs_hdl, errno,
+		    dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
+		    zhp->zfs_name));
+	}
+
+	remove_mountpoint(zhp);
+
+	return (0);
+}
+
+struct destroydata {
+	nvlist_t *nvl;
+	const char *snapname;
+};
+
+static int
+zfs_check_snap_cb(zfs_handle_t *zhp, void *arg)
+{
+	struct destroydata *dd = arg;
+	zfs_handle_t *szhp;
+	char name[ZFS_MAXNAMELEN];
+	int rv = 0;
+
+	(void) snprintf(name, sizeof (name),
+	    "%s@%s", zhp->zfs_name, dd->snapname);
+
+	szhp = make_dataset_handle(zhp->zfs_hdl, name);
+	if (szhp) {
+		verify(nvlist_add_boolean(dd->nvl, name) == 0);
+		zfs_close(szhp);
+	}
+
+	rv = zfs_iter_filesystems(zhp, zfs_check_snap_cb, dd);
+	zfs_close(zhp);
+	return (rv);
+}
+
+/*
+ * Destroys all snapshots with the given name in zhp & descendants.
+ */
+int
+zfs_destroy_snaps(zfs_handle_t *zhp, char *snapname, boolean_t defer)
+{
+	int ret;
+	struct destroydata dd = { 0 };
+
+	dd.snapname = snapname;
+	verify(nvlist_alloc(&dd.nvl, NV_UNIQUE_NAME, 0) == 0);
+	(void) zfs_check_snap_cb(zfs_handle_dup(zhp), &dd);
+
+	if (nvlist_next_nvpair(dd.nvl, NULL) == NULL) {
+		ret = zfs_standard_error_fmt(zhp->zfs_hdl, ENOENT,
+		    dgettext(TEXT_DOMAIN, "cannot destroy '%s@%s'"),
+		    zhp->zfs_name, snapname);
+	} else {
+		ret = zfs_destroy_snaps_nvl(zhp, dd.nvl, defer);
+	}
+	nvlist_free(dd.nvl);
+	return (ret);
+}
+
+/*
+ * Destroys all the snapshots named in the nvlist.  They must be underneath
+ * the zhp (either snapshots of it, or snapshots of its descendants).
+ */
+int
+zfs_destroy_snaps_nvl(zfs_handle_t *zhp, nvlist_t *snaps, boolean_t defer)
+{
+	int ret;
+	zfs_cmd_t zc = { 0 };
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+	if (zcmd_write_src_nvlist(zhp->zfs_hdl, &zc, snaps) != 0)
+		return (-1);
+	zc.zc_defer_destroy = defer;
+
+	ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_DESTROY_SNAPS_NVL, &zc);
+	if (ret != 0) {
+		char errbuf[1024];
+
+		(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+		    "cannot destroy snapshots in %s"), zc.zc_name);
+
+		switch (errno) {
+		case EEXIST:
+			zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+			    "snapshot is cloned"));
+			return (zfs_error(zhp->zfs_hdl, EZFS_EXISTS, errbuf));
+
+		default:
+			return (zfs_standard_error(zhp->zfs_hdl, errno,
+			    errbuf));
+		}
+	}
+
+	return (0);
+}
+
+/*
+ * Clones the given dataset.  The target must be of the same type as the source.
+ */
+int
+zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props)
+{
+	zfs_cmd_t zc = { 0 };
+	char parent[ZFS_MAXNAMELEN];
+	int ret;
+	char errbuf[1024];
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	zfs_type_t type;
+	uint64_t zoned;
+
+	assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
+
+	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+	    "cannot create '%s'"), target);
+
+	/* validate the target/clone name */
+	if (!zfs_validate_name(hdl, target, ZFS_TYPE_FILESYSTEM, B_TRUE))
+		return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+
+	/* validate parents exist */
+	if (check_parents(hdl, target, &zoned, B_FALSE, NULL) != 0)
+		return (-1);
+
+	(void) parent_name(target, parent, sizeof (parent));
+
+	/* do the clone */
+	if (ZFS_IS_VOLUME(zhp)) {
+		zc.zc_objset_type = DMU_OST_ZVOL;
+		type = ZFS_TYPE_VOLUME;
+	} else {
+		zc.zc_objset_type = DMU_OST_ZFS;
+		type = ZFS_TYPE_FILESYSTEM;
+	}
+
+	if (props) {
+		if ((props = zfs_valid_proplist(hdl, type, props, zoned,
+		    zhp, errbuf)) == NULL)
+			return (-1);
+
+		if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
+			nvlist_free(props);
+			return (-1);
+		}
+
+		nvlist_free(props);
+	}
+
+	(void) strlcpy(zc.zc_name, target, sizeof (zc.zc_name));
+	(void) strlcpy(zc.zc_value, zhp->zfs_name, sizeof (zc.zc_value));
+	ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_CREATE, &zc);
+
+	zcmd_free_nvlists(&zc);
+
+	if (ret != 0) {
+		switch (errno) {
+
+		case ENOENT:
+			/*
+			 * The parent doesn't exist.  We should have caught this
+			 * above, but there may a race condition that has since
+			 * destroyed the parent.
+			 *
+			 * At this point, we don't know whether it's the source
+			 * that doesn't exist anymore, or whether the target
+			 * dataset doesn't exist.
+			 */
+			zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+			    "no such parent '%s'"), parent);
+			return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
+
+		case EXDEV:
+			zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+			    "source and target pools differ"));
+			return (zfs_error(zhp->zfs_hdl, EZFS_CROSSTARGET,
+			    errbuf));
+
+		default:
+			return (zfs_standard_error(zhp->zfs_hdl, errno,
+			    errbuf));
+		}
+	}
+
+	return (ret);
+}
+
+/*
+ * Promotes the given clone fs to be the clone parent.
+ */
+int
+zfs_promote(zfs_handle_t *zhp)
+{
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	zfs_cmd_t zc = { 0 };
+	char parent[MAXPATHLEN];
+	int ret;
+	char errbuf[1024];
+
+	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+	    "cannot promote '%s'"), zhp->zfs_name);
+
+	if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "snapshots can not be promoted"));
+		return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
+	}
+
+	(void) strlcpy(parent, zhp->zfs_dmustats.dds_origin, sizeof (parent));
+	if (parent[0] == '\0') {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "not a cloned filesystem"));
+		return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
+	}
+
+	(void) strlcpy(zc.zc_value, zhp->zfs_dmustats.dds_origin,
+	    sizeof (zc.zc_value));
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+	ret = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
+
+	if (ret != 0) {
+		int save_errno = errno;
+
+		switch (save_errno) {
+		case EEXIST:
+			/* There is a conflicting snapshot name. */
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "conflicting snapshot '%s' from parent '%s'"),
+			    zc.zc_string, parent);
+			return (zfs_error(hdl, EZFS_EXISTS, errbuf));
+
+		default:
+			return (zfs_standard_error(hdl, save_errno, errbuf));
+		}
+	}
+	return (ret);
+}
+
+/*
+ * Takes a snapshot of the given dataset.
+ */
+int
+zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive,
+    nvlist_t *props)
+{
+	const char *delim;
+	char parent[ZFS_MAXNAMELEN];
+	zfs_handle_t *zhp;
+	zfs_cmd_t zc = { 0 };
+	int ret;
+	char errbuf[1024];
+
+	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+	    "cannot snapshot '%s'"), path);
+
+	/* validate the target name */
+	if (!zfs_validate_name(hdl, path, ZFS_TYPE_SNAPSHOT, B_TRUE))
+		return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+
+	if (props) {
+		if ((props = zfs_valid_proplist(hdl, ZFS_TYPE_SNAPSHOT,
+		    props, B_FALSE, NULL, errbuf)) == NULL)
+			return (-1);
+
+		if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
+			nvlist_free(props);
+			return (-1);
+		}
+
+		nvlist_free(props);
+	}
+
+	/* make sure the parent exists and is of the appropriate type */
+	delim = strchr(path, '@');
+	(void) strncpy(parent, path, delim - path);
+	parent[delim - path] = '\0';
+
+	if ((zhp = zfs_open(hdl, parent, ZFS_TYPE_FILESYSTEM |
+	    ZFS_TYPE_VOLUME)) == NULL) {
+		zcmd_free_nvlists(&zc);
+		return (-1);
+	}
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+	(void) strlcpy(zc.zc_value, delim+1, sizeof (zc.zc_value));
+	if (ZFS_IS_VOLUME(zhp))
+		zc.zc_objset_type = DMU_OST_ZVOL;
+	else
+		zc.zc_objset_type = DMU_OST_ZFS;
+	zc.zc_cookie = recursive;
+	ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SNAPSHOT, &zc);
+
+	zcmd_free_nvlists(&zc);
+
+	/*
+	 * if it was recursive, the one that actually failed will be in
+	 * zc.zc_name.
+	 */
+	if (ret != 0) {
+		(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+		    "cannot create snapshot '%s@%s'"), zc.zc_name, zc.zc_value);
+		(void) zfs_standard_error(hdl, errno, errbuf);
+	}
+
+	zfs_close(zhp);
+
+	return (ret);
+}
+
+/*
+ * Destroy any more recent snapshots.  We invoke this callback on any dependents
+ * of the snapshot first.  If the 'cb_dependent' member is non-zero, then this
+ * is a dependent and we should just destroy it without checking the transaction
+ * group.
+ */
+typedef struct rollback_data {
+	const char	*cb_target;		/* the snapshot */
+	uint64_t	cb_create;		/* creation time reference */
+	boolean_t	cb_error;
+	boolean_t	cb_dependent;
+	boolean_t	cb_force;
+} rollback_data_t;
+
+static int
+rollback_destroy(zfs_handle_t *zhp, void *data)
+{
+	rollback_data_t *cbp = data;
+
+	if (!cbp->cb_dependent) {
+		if (strcmp(zhp->zfs_name, cbp->cb_target) != 0 &&
+		    zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT &&
+		    zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) >
+		    cbp->cb_create) {
+			char *logstr;
+
+			cbp->cb_dependent = B_TRUE;
+			cbp->cb_error |= zfs_iter_dependents(zhp, B_FALSE,
+			    rollback_destroy, cbp);
+			cbp->cb_dependent = B_FALSE;
+
+			logstr = zhp->zfs_hdl->libzfs_log_str;
+			zhp->zfs_hdl->libzfs_log_str = NULL;
+			cbp->cb_error |= zfs_destroy(zhp, B_FALSE);
+			zhp->zfs_hdl->libzfs_log_str = logstr;
+		}
+	} else {
+		/* We must destroy this clone; first unmount it */
+		prop_changelist_t *clp;
+
+		clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
+		    cbp->cb_force ? MS_FORCE: 0);
+		if (clp == NULL || changelist_prefix(clp) != 0) {
+			cbp->cb_error = B_TRUE;
+			zfs_close(zhp);
+			return (0);
+		}
+		if (zfs_destroy(zhp, B_FALSE) != 0)
+			cbp->cb_error = B_TRUE;
+		else
+			changelist_remove(clp, zhp->zfs_name);
+		(void) changelist_postfix(clp);
+		changelist_free(clp);
+	}
+
+	zfs_close(zhp);
+	return (0);
+}
+
+/*
+ * Given a dataset, rollback to a specific snapshot, discarding any
+ * data changes since then and making it the active dataset.
+ *
+ * Any snapshots more recent than the target are destroyed, along with
+ * their dependents.
+ */
+int
+zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, boolean_t force)
+{
+	rollback_data_t cb = { 0 };
+	int err;
+	zfs_cmd_t zc = { 0 };
+	boolean_t restore_resv = 0;
+	uint64_t old_volsize, new_volsize;
+	zfs_prop_t resv_prop;
+
+	assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM ||
+	    zhp->zfs_type == ZFS_TYPE_VOLUME);
+
+	/*
+	 * Destroy all recent snapshots and their dependents.
+	 */
+	cb.cb_force = force;
+	cb.cb_target = snap->zfs_name;
+	cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
+	(void) zfs_iter_children(zhp, rollback_destroy, &cb);
+
+	if (cb.cb_error)
+		return (-1);
+
+	/*
+	 * Now that we have verified that the snapshot is the latest,
+	 * rollback to the given snapshot.
+	 */
+
+	if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
+		if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
+			return (-1);
+		old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
+		restore_resv =
+		    (old_volsize == zfs_prop_get_int(zhp, resv_prop));
+	}
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+	if (ZFS_IS_VOLUME(zhp))
+		zc.zc_objset_type = DMU_OST_ZVOL;
+	else
+		zc.zc_objset_type = DMU_OST_ZFS;
+
+	/*
+	 * We rely on zfs_iter_children() to verify that there are no
+	 * newer snapshots for the given dataset.  Therefore, we can
+	 * simply pass the name on to the ioctl() call.  There is still
+	 * an unlikely race condition where the user has taken a
+	 * snapshot since we verified that this was the most recent.
+	 *
+	 */
+	if ((err = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_ROLLBACK, &zc)) != 0) {
+		(void) zfs_standard_error_fmt(zhp->zfs_hdl, errno,
+		    dgettext(TEXT_DOMAIN, "cannot rollback '%s'"),
+		    zhp->zfs_name);
+		return (err);
+	}
+
+	/*
+	 * For volumes, if the pre-rollback volsize matched the pre-
+	 * rollback reservation and the volsize has changed then set
+	 * the reservation property to the post-rollback volsize.
+	 * Make a new handle since the rollback closed the dataset.
+	 */
+	if ((zhp->zfs_type == ZFS_TYPE_VOLUME) &&
+	    (zhp = make_dataset_handle(zhp->zfs_hdl, zhp->zfs_name))) {
+		if (restore_resv) {
+			new_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
+			if (old_volsize != new_volsize)
+				err = zfs_prop_set_int(zhp, resv_prop,
+				    new_volsize);
+		}
+		zfs_close(zhp);
+	}
+	return (err);
+}
+
+/*
+ * Renames the given dataset.
+ */
+int
+zfs_rename(zfs_handle_t *zhp, const char *source, const char *target,
+    renameflags_t flags)
+{
+	int ret;
+	zfs_cmd_t zc = { 0 };
+	char *delim;
+	prop_changelist_t *cl = NULL;
+	zfs_handle_t *zhrp = NULL;
+	char *parentname = NULL;
+	char parent[ZFS_MAXNAMELEN];
+	char property[ZFS_MAXPROPLEN];
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	char errbuf[1024];
+
+	/* if we have the same exact name, just return success */
+	if (strcmp(zhp->zfs_name, target) == 0)
+		return (0);
+
+	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+	    "cannot rename to '%s'"), target);
+
+	if (source != NULL) {
+		/*
+		 * This is recursive snapshots rename, put snapshot name
+		 * (that might not exist) into zfs_name.
+		 */
+		assert(flags.recurse);
+
+		(void) strlcat(zhp->zfs_name, "@", sizeof(zhp->zfs_name));
+		(void) strlcat(zhp->zfs_name, source, sizeof(zhp->zfs_name));
+		zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
+	}
+
+	/*
+	 * Make sure the target name is valid
+	 */
+	if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
+		if ((strchr(target, '@') == NULL) ||
+		    *target == '@') {
+			/*
+			 * Snapshot target name is abbreviated,
+			 * reconstruct full dataset name
+			 */
+			(void) strlcpy(parent, zhp->zfs_name,
+			    sizeof (parent));
+			delim = strchr(parent, '@');
+			if (strchr(target, '@') == NULL)
+				*(++delim) = '\0';
+			else
+				*delim = '\0';
+			(void) strlcat(parent, target, sizeof (parent));
+			target = parent;
+		} else {
+			/*
+			 * Make sure we're renaming within the same dataset.
+			 */
+			delim = strchr(target, '@');
+			if (strncmp(zhp->zfs_name, target, delim - target)
+			    != 0 || zhp->zfs_name[delim - target] != '@') {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "snapshots must be part of same "
+				    "dataset"));
+				return (zfs_error(hdl, EZFS_CROSSTARGET,
+				    errbuf));
+			}
+		}
+		if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
+			return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+	} else {
+		if (flags.recurse) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "recursive rename must be a snapshot"));
+			return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
+		}
+
+		if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
+			return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+
+		/* validate parents */
+		if (check_parents(hdl, target, NULL, B_FALSE, NULL) != 0)
+			return (-1);
+
+		/* make sure we're in the same pool */
+		verify((delim = strchr(target, '/')) != NULL);
+		if (strncmp(zhp->zfs_name, target, delim - target) != 0 ||
+		    zhp->zfs_name[delim - target] != '/') {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "datasets must be within same pool"));
+			return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
+		}
+
+		/* new name cannot be a child of the current dataset name */
+		if (is_descendant(zhp->zfs_name, target)) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "New dataset name cannot be a descendant of "
+			    "current dataset name"));
+			return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+		}
+	}
+
+	(void) snprintf(errbuf, sizeof (errbuf),
+	    dgettext(TEXT_DOMAIN, "cannot rename '%s'"), zhp->zfs_name);
+
+	if (getzoneid() == GLOBAL_ZONEID &&
+	    zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "dataset is used in a non-global zone"));
+		return (zfs_error(hdl, EZFS_ZONED, errbuf));
+	}
+
+	/*
+	 * Avoid unmounting file systems with mountpoint property set to
+	 * 'legacy' or 'none' even if -u option is not given.
+	 */
+	if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM &&
+	    !flags.recurse && !flags.nounmount &&
+	    zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, property,
+	    sizeof (property), NULL, NULL, 0, B_FALSE) == 0 &&
+	    (strcmp(property, "legacy") == 0 ||
+	     strcmp(property, "none") == 0)) {
+		flags.nounmount = B_TRUE;
+	}
+
+	if (flags.recurse) {
+
+		parentname = zfs_strdup(zhp->zfs_hdl, zhp->zfs_name);
+		if (parentname == NULL) {
+			ret = -1;
+			goto error;
+		}
+		delim = strchr(parentname, '@');
+		*delim = '\0';
+		zhrp = zfs_open(zhp->zfs_hdl, parentname, ZFS_TYPE_DATASET);
+		if (zhrp == NULL) {
+			ret = -1;
+			goto error;
+		}
+
+	} else {
+		if ((cl = changelist_gather(zhp, ZFS_PROP_NAME,
+		    flags.nounmount ? CL_GATHER_DONT_UNMOUNT : 0,
+		    flags.forceunmount ? MS_FORCE : 0)) == NULL) {
+			return (-1);
+		}
+
+		if (changelist_haszonedchild(cl)) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "child dataset with inherited mountpoint is used "
+			    "in a non-global zone"));
+			(void) zfs_error(hdl, EZFS_ZONED, errbuf);
+			goto error;
+		}
+
+		if ((ret = changelist_prefix(cl)) != 0)
+			goto error;
+	}
+
+	if (ZFS_IS_VOLUME(zhp))
+		zc.zc_objset_type = DMU_OST_ZVOL;
+	else
+		zc.zc_objset_type = DMU_OST_ZFS;
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+	(void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value));
+
+	zc.zc_cookie = flags.recurse ? 1 : 0;
+	if (flags.nounmount)
+		zc.zc_cookie |= 2;
+
+	if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_RENAME, &zc)) != 0) {
+		/*
+		 * if it was recursive, the one that actually failed will
+		 * be in zc.zc_name
+		 */
+		(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+		    "cannot rename '%s'"), zc.zc_name);
+
+		if (flags.recurse && errno == EEXIST) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "a child dataset already has a snapshot "
+			    "with the new name"));
+			(void) zfs_error(hdl, EZFS_EXISTS, errbuf);
+		} else {
+			(void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf);
+		}
+
+		/*
+		 * On failure, we still want to remount any filesystems that
+		 * were previously mounted, so we don't alter the system state.
+		 */
+		if (!flags.recurse)
+			(void) changelist_postfix(cl);
+	} else {
+		if (!flags.recurse) {
+			changelist_rename(cl, zfs_get_name(zhp), target);
+			ret = changelist_postfix(cl);
+		}
+	}
+
+error:
+	if (parentname) {
+		free(parentname);
+	}
+	if (zhrp) {
+		zfs_close(zhrp);
+	}
+	if (cl) {
+		changelist_free(cl);
+	}
+	return (ret);
+}
+
+nvlist_t *
+zfs_get_user_props(zfs_handle_t *zhp)
+{
+	return (zhp->zfs_user_props);
+}
+
+nvlist_t *
+zfs_get_recvd_props(zfs_handle_t *zhp)
+{
+	if (zhp->zfs_recvd_props == NULL)
+		if (get_recvd_props_ioctl(zhp) != 0)
+			return (NULL);
+	return (zhp->zfs_recvd_props);
+}
+
+/*
+ * This function is used by 'zfs list' to determine the exact set of columns to
+ * display, and their maximum widths.  This does two main things:
+ *
+ *      - If this is a list of all properties, then expand the list to include
+ *        all native properties, and set a flag so that for each dataset we look
+ *        for new unique user properties and add them to the list.
+ *
+ *      - For non fixed-width properties, keep track of the maximum width seen
+ *        so that we can size the column appropriately. If the user has
+ *        requested received property values, we also need to compute the width
+ *        of the RECEIVED column.
+ */
+int
+zfs_expand_proplist(zfs_handle_t *zhp, zprop_list_t **plp, boolean_t received)
+{
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	zprop_list_t *entry;
+	zprop_list_t **last, **start;
+	nvlist_t *userprops, *propval;
+	nvpair_t *elem;
+	char *strval;
+	char buf[ZFS_MAXPROPLEN];
+
+	if (zprop_expand_list(hdl, plp, ZFS_TYPE_DATASET) != 0)
+		return (-1);
+
+	userprops = zfs_get_user_props(zhp);
+
+	entry = *plp;
+	if (entry->pl_all && nvlist_next_nvpair(userprops, NULL) != NULL) {
+		/*
+		 * Go through and add any user properties as necessary.  We
+		 * start by incrementing our list pointer to the first
+		 * non-native property.
+		 */
+		start = plp;
+		while (*start != NULL) {
+			if ((*start)->pl_prop == ZPROP_INVAL)
+				break;
+			start = &(*start)->pl_next;
+		}
+
+		elem = NULL;
+		while ((elem = nvlist_next_nvpair(userprops, elem)) != NULL) {
+			/*
+			 * See if we've already found this property in our list.
+			 */
+			for (last = start; *last != NULL;
+			    last = &(*last)->pl_next) {
+				if (strcmp((*last)->pl_user_prop,
+				    nvpair_name(elem)) == 0)
+					break;
+			}
+
+			if (*last == NULL) {
+				if ((entry = zfs_alloc(hdl,
+				    sizeof (zprop_list_t))) == NULL ||
+				    ((entry->pl_user_prop = zfs_strdup(hdl,
+				    nvpair_name(elem)))) == NULL) {
+					free(entry);
+					return (-1);
+				}
+
+				entry->pl_prop = ZPROP_INVAL;
+				entry->pl_width = strlen(nvpair_name(elem));
+				entry->pl_all = B_TRUE;
+				*last = entry;
+			}
+		}
+	}
+
+	/*
+	 * Now go through and check the width of any non-fixed columns
+	 */
+	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
+		if (entry->pl_fixed)
+			continue;
+
+		if (entry->pl_prop != ZPROP_INVAL) {
+			if (zfs_prop_get(zhp, entry->pl_prop,
+			    buf, sizeof (buf), NULL, NULL, 0, B_FALSE) == 0) {
+				if (strlen(buf) > entry->pl_width)
+					entry->pl_width = strlen(buf);
+			}
+			if (received && zfs_prop_get_recvd(zhp,
+			    zfs_prop_to_name(entry->pl_prop),
+			    buf, sizeof (buf), B_FALSE) == 0)
+				if (strlen(buf) > entry->pl_recvd_width)
+					entry->pl_recvd_width = strlen(buf);
+		} else {
+			if (nvlist_lookup_nvlist(userprops, entry->pl_user_prop,
+			    &propval) == 0) {
+				verify(nvlist_lookup_string(propval,
+				    ZPROP_VALUE, &strval) == 0);
+				if (strlen(strval) > entry->pl_width)
+					entry->pl_width = strlen(strval);
+			}
+			if (received && zfs_prop_get_recvd(zhp,
+			    entry->pl_user_prop,
+			    buf, sizeof (buf), B_FALSE) == 0)
+				if (strlen(buf) > entry->pl_recvd_width)
+					entry->pl_recvd_width = strlen(buf);
+		}
+	}
+
+	return (0);
+}
+
+int
+zfs_deleg_share_nfs(libzfs_handle_t *hdl, char *dataset, char *path,
+    char *resource, void *export, void *sharetab,
+    int sharemax, zfs_share_op_t operation)
+{
+	zfs_cmd_t zc = { 0 };
+	int error;
+
+	(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+	(void) strlcpy(zc.zc_value, path, sizeof (zc.zc_value));
+	if (resource)
+		(void) strlcpy(zc.zc_string, resource, sizeof (zc.zc_string));
+	zc.zc_share.z_sharedata = (uint64_t)(uintptr_t)sharetab;
+	zc.zc_share.z_exportdata = (uint64_t)(uintptr_t)export;
+	zc.zc_share.z_sharetype = operation;
+	zc.zc_share.z_sharemax = sharemax;
+	error = ioctl(hdl->libzfs_fd, ZFS_IOC_SHARE, &zc);
+	return (error);
+}
+
+void
+zfs_prune_proplist(zfs_handle_t *zhp, uint8_t *props)
+{
+	nvpair_t *curr;
+
+	/*
+	 * Keep a reference to the props-table against which we prune the
+	 * properties.
+	 */
+	zhp->zfs_props_table = props;
+
+	curr = nvlist_next_nvpair(zhp->zfs_props, NULL);
+
+	while (curr) {
+		zfs_prop_t zfs_prop = zfs_name_to_prop(nvpair_name(curr));
+		nvpair_t *next = nvlist_next_nvpair(zhp->zfs_props, curr);
+
+		/*
+		 * User properties will result in ZPROP_INVAL, and since we
+		 * only know how to prune standard ZFS properties, we always
+		 * leave these in the list.  This can also happen if we
+		 * encounter an unknown DSL property (when running older
+		 * software, for example).
+		 */
+		if (zfs_prop != ZPROP_INVAL && props[zfs_prop] == B_FALSE)
+			(void) nvlist_remove(zhp->zfs_props,
+			    nvpair_name(curr), nvpair_type(curr));
+		curr = next;
+	}
+}
+
+#ifdef sun
+static int
+zfs_smb_acl_mgmt(libzfs_handle_t *hdl, char *dataset, char *path,
+    zfs_smb_acl_op_t cmd, char *resource1, char *resource2)
+{
+	zfs_cmd_t zc = { 0 };
+	nvlist_t *nvlist = NULL;
+	int error;
+
+	(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
+	(void) strlcpy(zc.zc_value, path, sizeof (zc.zc_value));
+	zc.zc_cookie = (uint64_t)cmd;
+
+	if (cmd == ZFS_SMB_ACL_RENAME) {
+		if (nvlist_alloc(&nvlist, NV_UNIQUE_NAME, 0) != 0) {
+			(void) no_memory(hdl);
+			return (NULL);
+		}
+	}
+
+	switch (cmd) {
+	case ZFS_SMB_ACL_ADD:
+	case ZFS_SMB_ACL_REMOVE:
+		(void) strlcpy(zc.zc_string, resource1, sizeof (zc.zc_string));
+		break;
+	case ZFS_SMB_ACL_RENAME:
+		if (nvlist_add_string(nvlist, ZFS_SMB_ACL_SRC,
+		    resource1) != 0) {
+				(void) no_memory(hdl);
+				return (-1);
+		}
+		if (nvlist_add_string(nvlist, ZFS_SMB_ACL_TARGET,
+		    resource2) != 0) {
+				(void) no_memory(hdl);
+				return (-1);
+		}
+		if (zcmd_write_src_nvlist(hdl, &zc, nvlist) != 0) {
+			nvlist_free(nvlist);
+			return (-1);
+		}
+		break;
+	case ZFS_SMB_ACL_PURGE:
+		break;
+	default:
+		return (-1);
+	}
+	error = ioctl(hdl->libzfs_fd, ZFS_IOC_SMB_ACL, &zc);
+	if (nvlist)
+		nvlist_free(nvlist);
+	return (error);
+}
+
+int
+zfs_smb_acl_add(libzfs_handle_t *hdl, char *dataset,
+    char *path, char *resource)
+{
+	return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_ADD,
+	    resource, NULL));
+}
+
+int
+zfs_smb_acl_remove(libzfs_handle_t *hdl, char *dataset,
+    char *path, char *resource)
+{
+	return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_REMOVE,
+	    resource, NULL));
+}
+
+int
+zfs_smb_acl_purge(libzfs_handle_t *hdl, char *dataset, char *path)
+{
+	return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_PURGE,
+	    NULL, NULL));
+}
+
+int
+zfs_smb_acl_rename(libzfs_handle_t *hdl, char *dataset, char *path,
+    char *oldname, char *newname)
+{
+	return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_RENAME,
+	    oldname, newname));
+}
+#endif	/* sun */
+
+int
+zfs_userspace(zfs_handle_t *zhp, zfs_userquota_prop_t type,
+    zfs_userspace_cb_t func, void *arg)
+{
+	zfs_cmd_t zc = { 0 };
+	zfs_useracct_t buf[100];
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	int ret;
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+	zc.zc_objset_type = type;
+	zc.zc_nvlist_dst = (uintptr_t)buf;
+
+	for (;;) {
+		zfs_useracct_t *zua = buf;
+
+		zc.zc_nvlist_dst_size = sizeof (buf);
+		if (zfs_ioctl(hdl, ZFS_IOC_USERSPACE_MANY, &zc) != 0) {
+			char errbuf[ZFS_MAXNAMELEN + 32];
+
+			(void) snprintf(errbuf, sizeof (errbuf),
+			    dgettext(TEXT_DOMAIN,
+			    "cannot get used/quota for %s"), zc.zc_name);
+			return (zfs_standard_error_fmt(hdl, errno, errbuf));
+		}
+		if (zc.zc_nvlist_dst_size == 0)
+			break;
+
+		while (zc.zc_nvlist_dst_size > 0) {
+			if ((ret = func(arg, zua->zu_domain, zua->zu_rid,
+			    zua->zu_space)) != 0)
+				return (ret);
+			zua++;
+			zc.zc_nvlist_dst_size -= sizeof (zfs_useracct_t);
+		}
+	}
+
+	return (0);
+}
+
+int
+zfs_hold(zfs_handle_t *zhp, const char *snapname, const char *tag,
+    boolean_t recursive, boolean_t temphold, boolean_t enoent_ok,
+    int cleanup_fd, uint64_t dsobj, uint64_t createtxg)
+{
+	zfs_cmd_t zc = { 0 };
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+
+	ASSERT(!recursive || dsobj == 0);
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+	(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
+	if (strlcpy(zc.zc_string, tag, sizeof (zc.zc_string))
+	    >= sizeof (zc.zc_string))
+		return (zfs_error(hdl, EZFS_TAGTOOLONG, tag));
+	zc.zc_cookie = recursive;
+	zc.zc_temphold = temphold;
+	zc.zc_cleanup_fd = cleanup_fd;
+	zc.zc_sendobj = dsobj;
+	zc.zc_createtxg = createtxg;
+
+	if (zfs_ioctl(hdl, ZFS_IOC_HOLD, &zc) != 0) {
+		char errbuf[ZFS_MAXNAMELEN+32];
+
+		/*
+		 * if it was recursive, the one that actually failed will be in
+		 * zc.zc_name.
+		 */
+		(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+		    "cannot hold '%s@%s'"), zc.zc_name, snapname);
+		switch (errno) {
+		case E2BIG:
+			/*
+			 * Temporary tags wind up having the ds object id
+			 * prepended. So even if we passed the length check
+			 * above, it's still possible for the tag to wind
+			 * up being slightly too long.
+			 */
+			return (zfs_error(hdl, EZFS_TAGTOOLONG, errbuf));
+		case ENOTSUP:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "pool must be upgraded"));
+			return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
+		case EINVAL:
+			return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
+		case EEXIST:
+			return (zfs_error(hdl, EZFS_REFTAG_HOLD, errbuf));
+		case ENOENT:
+			if (enoent_ok)
+				return (ENOENT);
+			/* FALLTHROUGH */
+		default:
+			return (zfs_standard_error_fmt(hdl, errno, errbuf));
+		}
+	}
+
+	return (0);
+}
+
+int
+zfs_release(zfs_handle_t *zhp, const char *snapname, const char *tag,
+    boolean_t recursive)
+{
+	zfs_cmd_t zc = { 0 };
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+	(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
+	if (strlcpy(zc.zc_string, tag, sizeof (zc.zc_string))
+	    >= sizeof (zc.zc_string))
+		return (zfs_error(hdl, EZFS_TAGTOOLONG, tag));
+	zc.zc_cookie = recursive;
+
+	if (zfs_ioctl(hdl, ZFS_IOC_RELEASE, &zc) != 0) {
+		char errbuf[ZFS_MAXNAMELEN+32];
+
+		/*
+		 * if it was recursive, the one that actually failed will be in
+		 * zc.zc_name.
+		 */
+		(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+		    "cannot release '%s' from '%s@%s'"), tag, zc.zc_name,
+		    snapname);
+		switch (errno) {
+		case ESRCH:
+			return (zfs_error(hdl, EZFS_REFTAG_RELE, errbuf));
+		case ENOTSUP:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "pool must be upgraded"));
+			return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
+		case EINVAL:
+			return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
+		default:
+			return (zfs_standard_error_fmt(hdl, errno, errbuf));
+		}
+	}
+
+	return (0);
+}
+
+int
+zfs_get_fsacl(zfs_handle_t *zhp, nvlist_t **nvl)
+{
+	zfs_cmd_t zc = { 0 };
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	int nvsz = 2048;
+	void *nvbuf;
+	int err = 0;
+	char errbuf[ZFS_MAXNAMELEN+32];
+
+	assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
+	    zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
+
+tryagain:
+
+	nvbuf = malloc(nvsz);
+	if (nvbuf == NULL) {
+		err = (zfs_error(hdl, EZFS_NOMEM, strerror(errno)));
+		goto out;
+	}
+
+	zc.zc_nvlist_dst_size = nvsz;
+	zc.zc_nvlist_dst = (uintptr_t)nvbuf;
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, ZFS_MAXNAMELEN);
+
+	if (ioctl(hdl->libzfs_fd, ZFS_IOC_GET_FSACL, &zc) != 0) {
+		(void) snprintf(errbuf, sizeof (errbuf),
+		    dgettext(TEXT_DOMAIN, "cannot get permissions on '%s'"),
+		    zc.zc_name);
+		switch (errno) {
+		case ENOMEM:
+			free(nvbuf);
+			nvsz = zc.zc_nvlist_dst_size;
+			goto tryagain;
+
+		case ENOTSUP:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "pool must be upgraded"));
+			err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
+			break;
+		case EINVAL:
+			err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
+			break;
+		case ENOENT:
+			err = zfs_error(hdl, EZFS_NOENT, errbuf);
+			break;
+		default:
+			err = zfs_standard_error_fmt(hdl, errno, errbuf);
+			break;
+		}
+	} else {
+		/* success */
+		int rc = nvlist_unpack(nvbuf, zc.zc_nvlist_dst_size, nvl, 0);
+		if (rc) {
+			(void) snprintf(errbuf, sizeof (errbuf), dgettext(
+			    TEXT_DOMAIN, "cannot get permissions on '%s'"),
+			    zc.zc_name);
+			err = zfs_standard_error_fmt(hdl, rc, errbuf);
+		}
+	}
+
+	free(nvbuf);
+out:
+	return (err);
+}
+
+int
+zfs_set_fsacl(zfs_handle_t *zhp, boolean_t un, nvlist_t *nvl)
+{
+	zfs_cmd_t zc = { 0 };
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	char *nvbuf;
+	char errbuf[ZFS_MAXNAMELEN+32];
+	size_t nvsz;
+	int err;
+
+	assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
+	    zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
+
+	err = nvlist_size(nvl, &nvsz, NV_ENCODE_NATIVE);
+	assert(err == 0);
+
+	nvbuf = malloc(nvsz);
+
+	err = nvlist_pack(nvl, &nvbuf, &nvsz, NV_ENCODE_NATIVE, 0);
+	assert(err == 0);
+
+	zc.zc_nvlist_src_size = nvsz;
+	zc.zc_nvlist_src = (uintptr_t)nvbuf;
+	zc.zc_perm_action = un;
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+	if (zfs_ioctl(hdl, ZFS_IOC_SET_FSACL, &zc) != 0) {
+		(void) snprintf(errbuf, sizeof (errbuf),
+		    dgettext(TEXT_DOMAIN, "cannot set permissions on '%s'"),
+		    zc.zc_name);
+		switch (errno) {
+		case ENOTSUP:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "pool must be upgraded"));
+			err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
+			break;
+		case EINVAL:
+			err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
+			break;
+		case ENOENT:
+			err = zfs_error(hdl, EZFS_NOENT, errbuf);
+			break;
+		default:
+			err = zfs_standard_error_fmt(hdl, errno, errbuf);
+			break;
+		}
+	}
+
+	free(nvbuf);
+
+	return (err);
+}
+
+int
+zfs_get_holds(zfs_handle_t *zhp, nvlist_t **nvl)
+{
+	zfs_cmd_t zc = { 0 };
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	int nvsz = 2048;
+	void *nvbuf;
+	int err = 0;
+	char errbuf[ZFS_MAXNAMELEN+32];
+
+	assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
+
+tryagain:
+
+	nvbuf = malloc(nvsz);
+	if (nvbuf == NULL) {
+		err = (zfs_error(hdl, EZFS_NOMEM, strerror(errno)));
+		goto out;
+	}
+
+	zc.zc_nvlist_dst_size = nvsz;
+	zc.zc_nvlist_dst = (uintptr_t)nvbuf;
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, ZFS_MAXNAMELEN);
+
+	if (zfs_ioctl(hdl, ZFS_IOC_GET_HOLDS, &zc) != 0) {
+		(void) snprintf(errbuf, sizeof (errbuf),
+		    dgettext(TEXT_DOMAIN, "cannot get holds for '%s'"),
+		    zc.zc_name);
+		switch (errno) {
+		case ENOMEM:
+			free(nvbuf);
+			nvsz = zc.zc_nvlist_dst_size;
+			goto tryagain;
+
+		case ENOTSUP:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "pool must be upgraded"));
+			err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
+			break;
+		case EINVAL:
+			err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
+			break;
+		case ENOENT:
+			err = zfs_error(hdl, EZFS_NOENT, errbuf);
+			break;
+		default:
+			err = zfs_standard_error_fmt(hdl, errno, errbuf);
+			break;
+		}
+	} else {
+		/* success */
+		int rc = nvlist_unpack(nvbuf, zc.zc_nvlist_dst_size, nvl, 0);
+		if (rc) {
+			(void) snprintf(errbuf, sizeof (errbuf),
+			    dgettext(TEXT_DOMAIN, "cannot get holds for '%s'"),
+			    zc.zc_name);
+			err = zfs_standard_error_fmt(hdl, rc, errbuf);
+		}
+	}
+
+	free(nvbuf);
+out:
+	return (err);
+}
+
+uint64_t
+zvol_volsize_to_reservation(uint64_t volsize, nvlist_t *props)
+{
+	uint64_t numdb;
+	uint64_t nblocks, volblocksize;
+	int ncopies;
+	char *strval;
+
+	if (nvlist_lookup_string(props,
+	    zfs_prop_to_name(ZFS_PROP_COPIES), &strval) == 0)
+		ncopies = atoi(strval);
+	else
+		ncopies = 1;
+	if (nvlist_lookup_uint64(props,
+	    zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
+	    &volblocksize) != 0)
+		volblocksize = ZVOL_DEFAULT_BLOCKSIZE;
+	nblocks = volsize/volblocksize;
+	/* start with metadnode L0-L6 */
+	numdb = 7;
+	/* calculate number of indirects */
+	while (nblocks > 1) {
+		nblocks += DNODES_PER_LEVEL - 1;
+		nblocks /= DNODES_PER_LEVEL;
+		numdb += nblocks;
+	}
+	numdb *= MIN(SPA_DVAS_PER_BP, ncopies + 1);
+	volsize *= ncopies;
+	/*
+	 * this is exactly DN_MAX_INDBLKSHIFT when metadata isn't
+	 * compressed, but in practice they compress down to about
+	 * 1100 bytes
+	 */
+	numdb *= 1ULL << DN_MAX_INDBLKSHIFT;
+	volsize += numdb;
+	return (volsize);
+}
+
+/*
+ * Attach/detach the given filesystem to/from the given jail.
+ */
+int
+zfs_jail(zfs_handle_t *zhp, int jailid, int attach)
+{
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	zfs_cmd_t zc = { 0 };
+	char errbuf[1024];
+	unsigned long cmd;
+	int ret;
+
+	if (attach) {
+		(void) snprintf(errbuf, sizeof (errbuf),
+		    dgettext(TEXT_DOMAIN, "cannot jail '%s'"), zhp->zfs_name);
+	} else {
+		(void) snprintf(errbuf, sizeof (errbuf),
+		    dgettext(TEXT_DOMAIN, "cannot jail '%s'"), zhp->zfs_name);
+	}
+
+	switch (zhp->zfs_type) {
+	case ZFS_TYPE_VOLUME:
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "volumes can not be jailed"));
+		return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
+	case ZFS_TYPE_SNAPSHOT:
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "snapshots can not be jailed"));
+		return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
+	}
+	assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+	zc.zc_objset_type = DMU_OST_ZFS;
+	zc.zc_jailid = jailid;
+
+	cmd = attach ? ZFS_IOC_JAIL : ZFS_IOC_UNJAIL;
+	if ((ret = ioctl(hdl->libzfs_fd, cmd, &zc)) != 0)
+		zfs_standard_error(hdl, errno, errbuf);
+
+	return (ret);
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_diff.c b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_diff.c
new file mode 100644
index 0000000000000000000000000000000000000000..ab2007d98a3381cf2bcb2175f51348aa3ab0d920
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_diff.c
@@ -0,0 +1,834 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * zfs diff support
+ */
+#include <ctype.h>
+#include <errno.h>
+#include <libintl.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <sys/zfs_ioctl.h>
+#include <libzfs.h>
+#include "libzfs_impl.h"
+
+#define	ZDIFF_SNAPDIR		"/.zfs/snapshot/"
+#define	ZDIFF_SHARESDIR 	"/.zfs/shares/"
+#define	ZDIFF_PREFIX		"zfs-diff-%d"
+
+#define	ZDIFF_ADDED	'+'
+#define	ZDIFF_MODIFIED	'M'
+#define	ZDIFF_REMOVED	'-'
+#define	ZDIFF_RENAMED	'R'
+
+static boolean_t
+do_name_cmp(const char *fpath, const char *tpath)
+{
+	char *fname, *tname;
+	fname = strrchr(fpath, '/') + 1;
+	tname = strrchr(tpath, '/') + 1;
+	return (strcmp(fname, tname) == 0);
+}
+
+typedef struct differ_info {
+	zfs_handle_t *zhp;
+	char *fromsnap;
+	char *frommnt;
+	char *tosnap;
+	char *tomnt;
+	char *ds;
+	char *dsmnt;
+	char *tmpsnap;
+	char errbuf[1024];
+	boolean_t isclone;
+	boolean_t scripted;
+	boolean_t classify;
+	boolean_t timestamped;
+	uint64_t shares;
+	int zerr;
+	int cleanupfd;
+	int outputfd;
+	int datafd;
+} differ_info_t;
+
+/*
+ * Given a {dsname, object id}, get the object path
+ */
+static int
+get_stats_for_obj(differ_info_t *di, const char *dsname, uint64_t obj,
+    char *pn, int maxlen, zfs_stat_t *sb)
+{
+	zfs_cmd_t zc = { 0 };
+	int error;
+
+	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
+	zc.zc_obj = obj;
+
+	errno = 0;
+	error = ioctl(di->zhp->zfs_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_STATS, &zc);
+	di->zerr = errno;
+
+	/* we can get stats even if we failed to get a path */
+	(void) memcpy(sb, &zc.zc_stat, sizeof (zfs_stat_t));
+	if (error == 0) {
+		ASSERT(di->zerr == 0);
+		(void) strlcpy(pn, zc.zc_value, maxlen);
+		return (0);
+	}
+
+	if (di->zerr == EPERM) {
+		(void) snprintf(di->errbuf, sizeof (di->errbuf),
+		    dgettext(TEXT_DOMAIN,
+		    "The sys_config privilege or diff delegated permission "
+		    "is needed\nto discover path names"));
+		return (-1);
+	} else {
+		(void) snprintf(di->errbuf, sizeof (di->errbuf),
+		    dgettext(TEXT_DOMAIN,
+		    "Unable to determine path or stats for "
+		    "object %lld in %s"), obj, dsname);
+		return (-1);
+	}
+}
+
+/*
+ * stream_bytes
+ *
+ * Prints a file name out a character at a time.  If the character is
+ * not in the range of what we consider "printable" ASCII, display it
+ * as an escaped 3-digit octal value.  ASCII values less than a space
+ * are all control characters and we declare the upper end as the
+ * DELete character.  This also is the last 7-bit ASCII character.
+ * We choose to treat all 8-bit ASCII as not printable for this
+ * application.
+ */
+static void
+stream_bytes(FILE *fp, const char *string)
+{
+	while (*string) {
+		if (*string > ' ' && *string != '\\' && *string < '\177')
+			(void) fprintf(fp, "%c", *string++);
+		else {
+			(void) fprintf(fp, "\\%03hho",
+			    (unsigned char)*string++);
+		}
+	}
+}
+
+static void
+print_what(FILE *fp, mode_t what)
+{
+	char symbol;
+
+	switch (what & S_IFMT) {
+	case S_IFBLK:
+		symbol = 'B';
+		break;
+	case S_IFCHR:
+		symbol = 'C';
+		break;
+	case S_IFDIR:
+		symbol = '/';
+		break;
+#ifdef S_IFDOOR
+	case S_IFDOOR:
+		symbol = '>';
+		break;
+#endif
+	case S_IFIFO:
+		symbol = '|';
+		break;
+	case S_IFLNK:
+		symbol = '@';
+		break;
+#ifdef S_IFPORT
+	case S_IFPORT:
+		symbol = 'P';
+		break;
+#endif
+	case S_IFSOCK:
+		symbol = '=';
+		break;
+	case S_IFREG:
+		symbol = 'F';
+		break;
+	default:
+		symbol = '?';
+		break;
+	}
+	(void) fprintf(fp, "%c", symbol);
+}
+
+static void
+print_cmn(FILE *fp, differ_info_t *di, const char *file)
+{
+	stream_bytes(fp, di->dsmnt);
+	stream_bytes(fp, file);
+}
+
+static void
+print_rename(FILE *fp, differ_info_t *di, const char *old, const char *new,
+    zfs_stat_t *isb)
+{
+	if (di->timestamped)
+		(void) fprintf(fp, "%10lld.%09lld\t",
+		    (longlong_t)isb->zs_ctime[0],
+		    (longlong_t)isb->zs_ctime[1]);
+	(void) fprintf(fp, "%c\t", ZDIFF_RENAMED);
+	if (di->classify) {
+		print_what(fp, isb->zs_mode);
+		(void) fprintf(fp, "\t");
+	}
+	print_cmn(fp, di, old);
+	if (di->scripted)
+		(void) fprintf(fp, "\t");
+	else
+		(void) fprintf(fp, " -> ");
+	print_cmn(fp, di, new);
+	(void) fprintf(fp, "\n");
+}
+
+static void
+print_link_change(FILE *fp, differ_info_t *di, int delta, const char *file,
+    zfs_stat_t *isb)
+{
+	if (di->timestamped)
+		(void) fprintf(fp, "%10lld.%09lld\t",
+		    (longlong_t)isb->zs_ctime[0],
+		    (longlong_t)isb->zs_ctime[1]);
+	(void) fprintf(fp, "%c\t", ZDIFF_MODIFIED);
+	if (di->classify) {
+		print_what(fp, isb->zs_mode);
+		(void) fprintf(fp, "\t");
+	}
+	print_cmn(fp, di, file);
+	(void) fprintf(fp, "\t(%+d)", delta);
+	(void) fprintf(fp, "\n");
+}
+
+static void
+print_file(FILE *fp, differ_info_t *di, char type, const char *file,
+    zfs_stat_t *isb)
+{
+	if (di->timestamped)
+		(void) fprintf(fp, "%10lld.%09lld\t",
+		    (longlong_t)isb->zs_ctime[0],
+		    (longlong_t)isb->zs_ctime[1]);
+	(void) fprintf(fp, "%c\t", type);
+	if (di->classify) {
+		print_what(fp, isb->zs_mode);
+		(void) fprintf(fp, "\t");
+	}
+	print_cmn(fp, di, file);
+	(void) fprintf(fp, "\n");
+}
+
+static int
+write_inuse_diffs_one(FILE *fp, differ_info_t *di, uint64_t dobj)
+{
+	struct zfs_stat fsb, tsb;
+	boolean_t same_name;
+	mode_t fmode, tmode;
+	char fobjname[MAXPATHLEN], tobjname[MAXPATHLEN];
+	int fobjerr, tobjerr;
+	int change;
+
+	if (dobj == di->shares)
+		return (0);
+
+	/*
+	 * Check the from and to snapshots for info on the object. If
+	 * we get ENOENT, then the object just didn't exist in that
+	 * snapshot.  If we get ENOTSUP, then we tried to get
+	 * info on a non-ZPL object, which we don't care about anyway.
+	 */
+	fobjerr = get_stats_for_obj(di, di->fromsnap, dobj, fobjname,
+	    MAXPATHLEN, &fsb);
+	if (fobjerr && di->zerr != ENOENT && di->zerr != ENOTSUP)
+		return (-1);
+
+	tobjerr = get_stats_for_obj(di, di->tosnap, dobj, tobjname,
+	    MAXPATHLEN, &tsb);
+	if (tobjerr && di->zerr != ENOENT && di->zerr != ENOTSUP)
+		return (-1);
+
+	/*
+	 * Unallocated object sharing the same meta dnode block
+	 */
+	if (fobjerr && tobjerr) {
+		ASSERT(di->zerr == ENOENT || di->zerr == ENOTSUP);
+		di->zerr = 0;
+		return (0);
+	}
+
+	di->zerr = 0; /* negate get_stats_for_obj() from side that failed */
+	fmode = fsb.zs_mode & S_IFMT;
+	tmode = tsb.zs_mode & S_IFMT;
+	if (fmode == S_IFDIR || tmode == S_IFDIR || fsb.zs_links == 0 ||
+	    tsb.zs_links == 0)
+		change = 0;
+	else
+		change = tsb.zs_links - fsb.zs_links;
+
+	if (fobjerr) {
+		if (change) {
+			print_link_change(fp, di, change, tobjname, &tsb);
+			return (0);
+		}
+		print_file(fp, di, ZDIFF_ADDED, tobjname, &tsb);
+		return (0);
+	} else if (tobjerr) {
+		if (change) {
+			print_link_change(fp, di, change, fobjname, &fsb);
+			return (0);
+		}
+		print_file(fp, di, ZDIFF_REMOVED, fobjname, &fsb);
+		return (0);
+	}
+
+	if (fmode != tmode && fsb.zs_gen == tsb.zs_gen)
+		tsb.zs_gen++;	/* Force a generational difference */
+	same_name = do_name_cmp(fobjname, tobjname);
+
+	/* Simple modification or no change */
+	if (fsb.zs_gen == tsb.zs_gen) {
+		/* No apparent changes.  Could we assert !this?  */
+		if (fsb.zs_ctime[0] == tsb.zs_ctime[0] &&
+		    fsb.zs_ctime[1] == tsb.zs_ctime[1])
+			return (0);
+		if (change) {
+			print_link_change(fp, di, change,
+			    change > 0 ? fobjname : tobjname, &tsb);
+		} else if (same_name) {
+			print_file(fp, di, ZDIFF_MODIFIED, fobjname, &tsb);
+		} else {
+			print_rename(fp, di, fobjname, tobjname, &tsb);
+		}
+		return (0);
+	} else {
+		/* file re-created or object re-used */
+		print_file(fp, di, ZDIFF_REMOVED, fobjname, &fsb);
+		print_file(fp, di, ZDIFF_ADDED, tobjname, &tsb);
+		return (0);
+	}
+}
+
+static int
+write_inuse_diffs(FILE *fp, differ_info_t *di, dmu_diff_record_t *dr)
+{
+	uint64_t o;
+	int err;
+
+	for (o = dr->ddr_first; o <= dr->ddr_last; o++) {
+		if (err = write_inuse_diffs_one(fp, di, o))
+			return (err);
+	}
+	return (0);
+}
+
+static int
+describe_free(FILE *fp, differ_info_t *di, uint64_t object, char *namebuf,
+    int maxlen)
+{
+	struct zfs_stat sb;
+
+	if (get_stats_for_obj(di, di->fromsnap, object, namebuf,
+	    maxlen, &sb) != 0) {
+		/* Let it slide, if in the delete queue on from side */
+		if (di->zerr == ENOENT && sb.zs_links == 0) {
+			di->zerr = 0;
+			return (0);
+		}
+		return (-1);
+	}
+
+	print_file(fp, di, ZDIFF_REMOVED, namebuf, &sb);
+	return (0);
+}
+
+static int
+write_free_diffs(FILE *fp, differ_info_t *di, dmu_diff_record_t *dr)
+{
+	zfs_cmd_t zc = { 0 };
+	libzfs_handle_t *lhdl = di->zhp->zfs_hdl;
+	char fobjname[MAXPATHLEN];
+
+	(void) strlcpy(zc.zc_name, di->fromsnap, sizeof (zc.zc_name));
+	zc.zc_obj = dr->ddr_first - 1;
+
+	ASSERT(di->zerr == 0);
+
+	while (zc.zc_obj < dr->ddr_last) {
+		int err;
+
+		err = ioctl(lhdl->libzfs_fd, ZFS_IOC_NEXT_OBJ, &zc);
+		if (err == 0) {
+			if (zc.zc_obj == di->shares) {
+				zc.zc_obj++;
+				continue;
+			}
+			if (zc.zc_obj > dr->ddr_last) {
+				break;
+			}
+			err = describe_free(fp, di, zc.zc_obj, fobjname,
+			    MAXPATHLEN);
+			if (err)
+				break;
+		} else if (errno == ESRCH) {
+			break;
+		} else {
+			(void) snprintf(di->errbuf, sizeof (di->errbuf),
+			    dgettext(TEXT_DOMAIN,
+			    "next allocated object (> %lld) find failure"),
+			    zc.zc_obj);
+			di->zerr = errno;
+			break;
+		}
+	}
+	if (di->zerr)
+		return (-1);
+	return (0);
+}
+
+static void *
+differ(void *arg)
+{
+	differ_info_t *di = arg;
+	dmu_diff_record_t dr;
+	FILE *ofp;
+	int err = 0;
+
+	if ((ofp = fdopen(di->outputfd, "w")) == NULL) {
+		di->zerr = errno;
+		(void) strerror_r(errno, di->errbuf, sizeof (di->errbuf));
+		(void) close(di->datafd);
+		return ((void *)-1);
+	}
+
+	for (;;) {
+		char *cp = (char *)&dr;
+		int len = sizeof (dr);
+		int rv;
+
+		do {
+			rv = read(di->datafd, cp, len);
+			cp += rv;
+			len -= rv;
+		} while (len > 0 && rv > 0);
+
+		if (rv < 0 || (rv == 0 && len != sizeof (dr))) {
+			di->zerr = EPIPE;
+			break;
+		} else if (rv == 0) {
+			/* end of file at a natural breaking point */
+			break;
+		}
+
+		switch (dr.ddr_type) {
+		case DDR_FREE:
+			err = write_free_diffs(ofp, di, &dr);
+			break;
+		case DDR_INUSE:
+			err = write_inuse_diffs(ofp, di, &dr);
+			break;
+		default:
+			di->zerr = EPIPE;
+			break;
+		}
+
+		if (err || di->zerr)
+			break;
+	}
+
+	(void) fclose(ofp);
+	(void) close(di->datafd);
+	if (err)
+		return ((void *)-1);
+	if (di->zerr) {
+		ASSERT(di->zerr == EINVAL);
+		(void) snprintf(di->errbuf, sizeof (di->errbuf),
+		    dgettext(TEXT_DOMAIN,
+		    "Internal error: bad data from diff IOCTL"));
+		return ((void *)-1);
+	}
+	return ((void *)0);
+}
+
+static int
+find_shares_object(differ_info_t *di)
+{
+	char fullpath[MAXPATHLEN];
+	struct stat64 sb = { 0 };
+
+	(void) strlcpy(fullpath, di->dsmnt, MAXPATHLEN);
+	(void) strlcat(fullpath, ZDIFF_SHARESDIR, MAXPATHLEN);
+
+	if (stat64(fullpath, &sb) != 0) {
+#ifdef sun
+		(void) snprintf(di->errbuf, sizeof (di->errbuf),
+		    dgettext(TEXT_DOMAIN, "Cannot stat %s"), fullpath);
+		return (zfs_error(di->zhp->zfs_hdl, EZFS_DIFF, di->errbuf));
+#else
+		return (0);
+#endif
+	}
+
+	di->shares = (uint64_t)sb.st_ino;
+	return (0);
+}
+
+static int
+make_temp_snapshot(differ_info_t *di)
+{
+	libzfs_handle_t *hdl = di->zhp->zfs_hdl;
+	zfs_cmd_t zc = { 0 };
+
+	(void) snprintf(zc.zc_value, sizeof (zc.zc_value),
+	    ZDIFF_PREFIX, getpid());
+	(void) strlcpy(zc.zc_name, di->ds, sizeof (zc.zc_name));
+	zc.zc_cleanup_fd = di->cleanupfd;
+
+	if (ioctl(hdl->libzfs_fd, ZFS_IOC_TMP_SNAPSHOT, &zc) != 0) {
+		int err = errno;
+		if (err == EPERM) {
+			(void) snprintf(di->errbuf, sizeof (di->errbuf),
+			    dgettext(TEXT_DOMAIN, "The diff delegated "
+			    "permission is needed in order\nto create a "
+			    "just-in-time snapshot for diffing\n"));
+			return (zfs_error(hdl, EZFS_DIFF, di->errbuf));
+		} else {
+			(void) snprintf(di->errbuf, sizeof (di->errbuf),
+			    dgettext(TEXT_DOMAIN, "Cannot create just-in-time "
+			    "snapshot of '%s'"), zc.zc_name);
+			return (zfs_standard_error(hdl, err, di->errbuf));
+		}
+	}
+
+	di->tmpsnap = zfs_strdup(hdl, zc.zc_value);
+	di->tosnap = zfs_asprintf(hdl, "%s@%s", di->ds, di->tmpsnap);
+	return (0);
+}
+
+static void
+teardown_differ_info(differ_info_t *di)
+{
+	free(di->ds);
+	free(di->dsmnt);
+	free(di->fromsnap);
+	free(di->frommnt);
+	free(di->tosnap);
+	free(di->tmpsnap);
+	free(di->tomnt);
+	(void) close(di->cleanupfd);
+}
+
+static int
+get_snapshot_names(differ_info_t *di, const char *fromsnap,
+    const char *tosnap)
+{
+	libzfs_handle_t *hdl = di->zhp->zfs_hdl;
+	char *atptrf = NULL;
+	char *atptrt = NULL;
+	int fdslen, fsnlen;
+	int tdslen, tsnlen;
+
+	/*
+	 * Can accept
+	 *    dataset@snap1
+	 *    dataset@snap1 dataset@snap2
+	 *    dataset@snap1 @snap2
+	 *    dataset@snap1 dataset
+	 *    @snap1 dataset@snap2
+	 */
+	if (tosnap == NULL) {
+		/* only a from snapshot given, must be valid */
+		(void) snprintf(di->errbuf, sizeof (di->errbuf),
+		    dgettext(TEXT_DOMAIN,
+		    "Badly formed snapshot name %s"), fromsnap);
+
+		if (!zfs_validate_name(hdl, fromsnap, ZFS_TYPE_SNAPSHOT,
+		    B_FALSE)) {
+			return (zfs_error(hdl, EZFS_INVALIDNAME,
+			    di->errbuf));
+		}
+
+		atptrf = strchr(fromsnap, '@');
+		ASSERT(atptrf != NULL);
+		fdslen = atptrf - fromsnap;
+
+		di->fromsnap = zfs_strdup(hdl, fromsnap);
+		di->ds = zfs_strdup(hdl, fromsnap);
+		di->ds[fdslen] = '\0';
+
+		/* the to snap will be a just-in-time snap of the head */
+		return (make_temp_snapshot(di));
+	}
+
+	(void) snprintf(di->errbuf, sizeof (di->errbuf),
+	    dgettext(TEXT_DOMAIN,
+	    "Unable to determine which snapshots to compare"));
+
+	atptrf = strchr(fromsnap, '@');
+	atptrt = strchr(tosnap, '@');
+	fdslen = atptrf ? atptrf - fromsnap : strlen(fromsnap);
+	tdslen = atptrt ? atptrt - tosnap : strlen(tosnap);
+	fsnlen = strlen(fromsnap) - fdslen;	/* includes @ sign */
+	tsnlen = strlen(tosnap) - tdslen;	/* includes @ sign */
+
+	if (fsnlen <= 1 || tsnlen == 1 || (fdslen == 0 && tdslen == 0) ||
+	    (fsnlen == 0 && tsnlen == 0)) {
+		return (zfs_error(hdl, EZFS_INVALIDNAME, di->errbuf));
+	} else if ((fdslen > 0 && tdslen > 0) &&
+	    ((tdslen != fdslen || strncmp(fromsnap, tosnap, fdslen) != 0))) {
+		/*
+		 * not the same dataset name, might be okay if
+		 * tosnap is a clone of a fromsnap descendant.
+		 */
+		char origin[ZFS_MAXNAMELEN];
+		zprop_source_t src;
+		zfs_handle_t *zhp;
+
+		di->ds = zfs_alloc(di->zhp->zfs_hdl, tdslen + 1);
+		(void) strncpy(di->ds, tosnap, tdslen);
+		di->ds[tdslen] = '\0';
+
+		zhp = zfs_open(hdl, di->ds, ZFS_TYPE_FILESYSTEM);
+		while (zhp != NULL) {
+			(void) zfs_prop_get(zhp, ZFS_PROP_ORIGIN,
+			    origin, sizeof (origin), &src, NULL, 0, B_FALSE);
+
+			if (strncmp(origin, fromsnap, fsnlen) == 0)
+				break;
+
+			(void) zfs_close(zhp);
+			zhp = zfs_open(hdl, origin, ZFS_TYPE_FILESYSTEM);
+		}
+
+		if (zhp == NULL) {
+			(void) snprintf(di->errbuf, sizeof (di->errbuf),
+			    dgettext(TEXT_DOMAIN,
+			    "Not an earlier snapshot from the same fs"));
+			return (zfs_error(hdl, EZFS_INVALIDNAME, di->errbuf));
+		} else {
+			(void) zfs_close(zhp);
+		}
+
+		di->isclone = B_TRUE;
+		di->fromsnap = zfs_strdup(hdl, fromsnap);
+		if (tsnlen) {
+			di->tosnap = zfs_strdup(hdl, tosnap);
+		} else {
+			return (make_temp_snapshot(di));
+		}
+	} else {
+		int dslen = fdslen ? fdslen : tdslen;
+
+		di->ds = zfs_alloc(hdl, dslen + 1);
+		(void) strncpy(di->ds, fdslen ? fromsnap : tosnap, dslen);
+		di->ds[dslen] = '\0';
+
+		di->fromsnap = zfs_asprintf(hdl, "%s%s", di->ds, atptrf);
+		if (tsnlen) {
+			di->tosnap = zfs_asprintf(hdl, "%s%s", di->ds, atptrt);
+		} else {
+			return (make_temp_snapshot(di));
+		}
+	}
+	return (0);
+}
+
+static int
+get_mountpoint(differ_info_t *di, char *dsnm, char **mntpt)
+{
+	boolean_t mounted;
+
+	mounted = is_mounted(di->zhp->zfs_hdl, dsnm, mntpt);
+	if (mounted == B_FALSE) {
+		(void) snprintf(di->errbuf, sizeof (di->errbuf),
+		    dgettext(TEXT_DOMAIN,
+		    "Cannot diff an unmounted snapshot"));
+		return (zfs_error(di->zhp->zfs_hdl, EZFS_BADTYPE, di->errbuf));
+	}
+
+	/* Avoid a double slash at the beginning of root-mounted datasets */
+	if (**mntpt == '/' && *(*mntpt + 1) == '\0')
+		**mntpt = '\0';
+	return (0);
+}
+
+static int
+get_mountpoints(differ_info_t *di)
+{
+	char *strptr;
+	char *frommntpt;
+
+	/*
+	 * first get the mountpoint for the parent dataset
+	 */
+	if (get_mountpoint(di, di->ds, &di->dsmnt) != 0)
+		return (-1);
+
+	strptr = strchr(di->tosnap, '@');
+	ASSERT3P(strptr, !=, NULL);
+	di->tomnt = zfs_asprintf(di->zhp->zfs_hdl, "%s%s%s", di->dsmnt,
+	    ZDIFF_SNAPDIR, ++strptr);
+
+	strptr = strchr(di->fromsnap, '@');
+	ASSERT3P(strptr, !=, NULL);
+
+	frommntpt = di->dsmnt;
+	if (di->isclone) {
+		char *mntpt;
+		int err;
+
+		*strptr = '\0';
+		err = get_mountpoint(di, di->fromsnap, &mntpt);
+		*strptr = '@';
+		if (err != 0)
+			return (-1);
+		frommntpt = mntpt;
+	}
+
+	di->frommnt = zfs_asprintf(di->zhp->zfs_hdl, "%s%s%s", frommntpt,
+	    ZDIFF_SNAPDIR, ++strptr);
+
+	if (di->isclone)
+		free(frommntpt);
+
+	return (0);
+}
+
+static int
+setup_differ_info(zfs_handle_t *zhp, const char *fromsnap,
+    const char *tosnap, differ_info_t *di)
+{
+	di->zhp = zhp;
+
+	di->cleanupfd = open(ZFS_DEV, O_RDWR|O_EXCL);
+	VERIFY(di->cleanupfd >= 0);
+
+	if (get_snapshot_names(di, fromsnap, tosnap) != 0)
+		return (-1);
+
+	if (get_mountpoints(di) != 0)
+		return (-1);
+
+	if (find_shares_object(di) != 0)
+		return (-1);
+
+	return (0);
+}
+
+int
+zfs_show_diffs(zfs_handle_t *zhp, int outfd, const char *fromsnap,
+    const char *tosnap, int flags)
+{
+	zfs_cmd_t zc = { 0 };
+	char errbuf[1024];
+	differ_info_t di = { 0 };
+	pthread_t tid;
+	int pipefd[2];
+	int iocerr;
+
+	(void) snprintf(errbuf, sizeof (errbuf),
+	    dgettext(TEXT_DOMAIN, "zfs diff failed"));
+
+	if (setup_differ_info(zhp, fromsnap, tosnap, &di)) {
+		teardown_differ_info(&di);
+		return (-1);
+	}
+
+	if (pipe(pipefd)) {
+		zfs_error_aux(zhp->zfs_hdl, strerror(errno));
+		teardown_differ_info(&di);
+		return (zfs_error(zhp->zfs_hdl, EZFS_PIPEFAILED, errbuf));
+	}
+
+	di.scripted = (flags & ZFS_DIFF_PARSEABLE);
+	di.classify = (flags & ZFS_DIFF_CLASSIFY);
+	di.timestamped = (flags & ZFS_DIFF_TIMESTAMP);
+
+	di.outputfd = outfd;
+	di.datafd = pipefd[0];
+
+	if (pthread_create(&tid, NULL, differ, &di)) {
+		zfs_error_aux(zhp->zfs_hdl, strerror(errno));
+		(void) close(pipefd[0]);
+		(void) close(pipefd[1]);
+		teardown_differ_info(&di);
+		return (zfs_error(zhp->zfs_hdl,
+		    EZFS_THREADCREATEFAILED, errbuf));
+	}
+
+	/* do the ioctl() */
+	(void) strlcpy(zc.zc_value, di.fromsnap, strlen(di.fromsnap) + 1);
+	(void) strlcpy(zc.zc_name, di.tosnap, strlen(di.tosnap) + 1);
+	zc.zc_cookie = pipefd[1];
+
+	iocerr = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_DIFF, &zc);
+	if (iocerr != 0) {
+		(void) snprintf(errbuf, sizeof (errbuf),
+		    dgettext(TEXT_DOMAIN, "Unable to obtain diffs"));
+		if (errno == EPERM) {
+			zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+			    "\n   The sys_mount privilege or diff delegated "
+			    "permission is needed\n   to execute the "
+			    "diff ioctl"));
+		} else if (errno == EXDEV) {
+			zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+			    "\n   Not an earlier snapshot from the same fs"));
+		} else if (errno != EPIPE || di.zerr == 0) {
+			zfs_error_aux(zhp->zfs_hdl, strerror(errno));
+		}
+		(void) close(pipefd[1]);
+		(void) pthread_cancel(tid);
+		(void) pthread_join(tid, NULL);
+		teardown_differ_info(&di);
+		if (di.zerr != 0 && di.zerr != EPIPE) {
+			zfs_error_aux(zhp->zfs_hdl, strerror(di.zerr));
+			return (zfs_error(zhp->zfs_hdl, EZFS_DIFF, di.errbuf));
+		} else {
+			return (zfs_error(zhp->zfs_hdl, EZFS_DIFFDATA, errbuf));
+		}
+	}
+
+	(void) close(pipefd[1]);
+	(void) pthread_join(tid, NULL);
+
+	if (di.zerr != 0) {
+		zfs_error_aux(zhp->zfs_hdl, strerror(di.zerr));
+		return (zfs_error(zhp->zfs_hdl, EZFS_DIFF, di.errbuf));
+	}
+	teardown_differ_info(&di);
+	return (0);
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_fru.c b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_fru.c
new file mode 100644
index 0000000000000000000000000000000000000000..788fa2cfb763de47952089676b3422b5477737df
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_fru.c
@@ -0,0 +1,452 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <libintl.h>
+#include <link.h>
+#include <pthread.h>
+#include <strings.h>
+#include <unistd.h>
+
+#include <libzfs.h>
+
+#include <fm/libtopo.h>
+#include <sys/fm/protocol.h>
+#include <sys/systeminfo.h>
+
+#include "libzfs_impl.h"
+
+/*
+ * This file is responsible for determining the relationship between I/O
+ * devices paths and physical locations.  In the world of MPxIO and external
+ * enclosures, the device path is not synonymous with the physical location.
+ * If you remove a drive and insert it into a different slot, it will end up
+ * with the same path under MPxIO.  If you recable storage enclosures, the
+ * device paths may change.  All of this makes it difficult to implement the
+ * 'autoreplace' property, which is supposed to automatically manage disk
+ * replacement based on physical slot.
+ *
+ * In order to work around these limitations, we have a per-vdev FRU property
+ * that is the libtopo path (minus disk-specific authority information) to the
+ * physical location of the device on the system.  This is an optional
+ * property, and is only needed when using the 'autoreplace' property or when
+ * generating FMA faults against vdevs.
+ */
+
+/*
+ * Because the FMA packages depend on ZFS, we have to dlopen() libtopo in case
+ * it is not present.  We only need this once per library instance, so it is
+ * not part of the libzfs handle.
+ */
+static void *_topo_dlhandle;
+static topo_hdl_t *(*_topo_open)(int, const char *, int *);
+static void (*_topo_close)(topo_hdl_t *);
+static char *(*_topo_snap_hold)(topo_hdl_t *, const char *, int *);
+static void (*_topo_snap_release)(topo_hdl_t *);
+static topo_walk_t *(*_topo_walk_init)(topo_hdl_t *, const char *,
+    topo_walk_cb_t, void *, int *);
+static int (*_topo_walk_step)(topo_walk_t *, int);
+static void (*_topo_walk_fini)(topo_walk_t *);
+static void (*_topo_hdl_strfree)(topo_hdl_t *, char *);
+static char *(*_topo_node_name)(tnode_t *);
+static int (*_topo_prop_get_string)(tnode_t *, const char *, const char *,
+    char **, int *);
+static int (*_topo_node_fru)(tnode_t *, nvlist_t **, nvlist_t *, int *);
+static int (*_topo_fmri_nvl2str)(topo_hdl_t *, nvlist_t *, char **, int *);
+static int (*_topo_fmri_strcmp_noauth)(topo_hdl_t *, const char *,
+    const char *);
+
+#define	ZFS_FRU_HASH_SIZE	257
+
+static size_t
+fru_strhash(const char *key)
+{
+	ulong_t g, h = 0;
+	const char *p;
+
+	for (p = key; *p != '\0'; p++) {
+		h = (h << 4) + *p;
+
+		if ((g = (h & 0xf0000000)) != 0) {
+			h ^= (g >> 24);
+			h ^= g;
+		}
+	}
+
+	return (h % ZFS_FRU_HASH_SIZE);
+}
+
+static int
+libzfs_fru_gather(topo_hdl_t *thp, tnode_t *tn, void *arg)
+{
+	libzfs_handle_t *hdl = arg;
+	nvlist_t *fru;
+	char *devpath, *frustr;
+	int err;
+	libzfs_fru_t *frup;
+	size_t idx;
+
+	/*
+	 * If this is the chassis node, and we don't yet have the system
+	 * chassis ID, then fill in this value now.
+	 */
+	if (hdl->libzfs_chassis_id[0] == '\0' &&
+	    strcmp(_topo_node_name(tn), "chassis") == 0) {
+		if (_topo_prop_get_string(tn, FM_FMRI_AUTHORITY,
+		    FM_FMRI_AUTH_CHASSIS, &devpath, &err) == 0)
+			(void) strlcpy(hdl->libzfs_chassis_id, devpath,
+			    sizeof (hdl->libzfs_chassis_id));
+	}
+
+	/*
+	 * Skip non-disk nodes.
+	 */
+	if (strcmp(_topo_node_name(tn), "disk") != 0)
+		return (TOPO_WALK_NEXT);
+
+	/*
+	 * Get the devfs path and FRU.
+	 */
+	if (_topo_prop_get_string(tn, "io", "devfs-path", &devpath, &err) != 0)
+		return (TOPO_WALK_NEXT);
+
+	if (libzfs_fru_lookup(hdl, devpath) != NULL) {
+		_topo_hdl_strfree(thp, devpath);
+		return (TOPO_WALK_NEXT);
+	}
+
+	if (_topo_node_fru(tn, &fru, NULL, &err) != 0) {
+		_topo_hdl_strfree(thp, devpath);
+		return (TOPO_WALK_NEXT);
+	}
+
+	/*
+	 * Convert the FRU into a string.
+	 */
+	if (_topo_fmri_nvl2str(thp, fru, &frustr, &err) != 0) {
+		nvlist_free(fru);
+		_topo_hdl_strfree(thp, devpath);
+		return (TOPO_WALK_NEXT);
+	}
+
+	nvlist_free(fru);
+
+	/*
+	 * Finally, we have a FRU string and device path.  Add it to the hash.
+	 */
+	if ((frup = calloc(sizeof (libzfs_fru_t), 1)) == NULL) {
+		_topo_hdl_strfree(thp, devpath);
+		_topo_hdl_strfree(thp, frustr);
+		return (TOPO_WALK_NEXT);
+	}
+
+	if ((frup->zf_device = strdup(devpath)) == NULL ||
+	    (frup->zf_fru = strdup(frustr)) == NULL) {
+		free(frup->zf_device);
+		free(frup);
+		_topo_hdl_strfree(thp, devpath);
+		_topo_hdl_strfree(thp, frustr);
+		return (TOPO_WALK_NEXT);
+	}
+
+	_topo_hdl_strfree(thp, devpath);
+	_topo_hdl_strfree(thp, frustr);
+
+	idx = fru_strhash(frup->zf_device);
+	frup->zf_chain = hdl->libzfs_fru_hash[idx];
+	hdl->libzfs_fru_hash[idx] = frup;
+	frup->zf_next = hdl->libzfs_fru_list;
+	hdl->libzfs_fru_list = frup;
+
+	return (TOPO_WALK_NEXT);
+}
+
+/*
+ * Called during initialization to setup the dynamic libtopo connection.
+ */
+#pragma init(libzfs_init_fru)
+static void
+libzfs_init_fru(void)
+{
+	char path[MAXPATHLEN];
+	char isa[257];
+
+#if defined(_LP64)
+	if (sysinfo(SI_ARCHITECTURE_64, isa, sizeof (isa)) < 0)
+		isa[0] = '\0';
+#else
+	isa[0] = '\0';
+#endif
+	(void) snprintf(path, sizeof (path),
+	    "/usr/lib/fm/%s/libtopo.so", isa);
+
+	if ((_topo_dlhandle = dlopen(path, RTLD_LAZY)) == NULL)
+		return;
+
+	_topo_open = (topo_hdl_t *(*)())
+	    dlsym(_topo_dlhandle, "topo_open");
+	_topo_close = (void (*)())
+	    dlsym(_topo_dlhandle, "topo_close");
+	_topo_snap_hold = (char *(*)())
+	    dlsym(_topo_dlhandle, "topo_snap_hold");
+	_topo_snap_release = (void (*)())
+	    dlsym(_topo_dlhandle, "topo_snap_release");
+	_topo_walk_init = (topo_walk_t *(*)())
+	    dlsym(_topo_dlhandle, "topo_walk_init");
+	_topo_walk_step = (int (*)())
+	    dlsym(_topo_dlhandle, "topo_walk_step");
+	_topo_walk_fini = (void (*)())
+	    dlsym(_topo_dlhandle, "topo_walk_fini");
+	_topo_hdl_strfree = (void (*)())
+	    dlsym(_topo_dlhandle, "topo_hdl_strfree");
+	_topo_node_name = (char *(*)())
+	    dlsym(_topo_dlhandle, "topo_node_name");
+	_topo_prop_get_string = (int (*)())
+	    dlsym(_topo_dlhandle, "topo_prop_get_string");
+	_topo_node_fru = (int (*)())
+	    dlsym(_topo_dlhandle, "topo_node_fru");
+	_topo_fmri_nvl2str = (int (*)())
+	    dlsym(_topo_dlhandle, "topo_fmri_nvl2str");
+	_topo_fmri_strcmp_noauth = (int (*)())
+	    dlsym(_topo_dlhandle, "topo_fmri_strcmp_noauth");
+
+	if (_topo_open == NULL || _topo_close == NULL ||
+	    _topo_snap_hold == NULL || _topo_snap_release == NULL ||
+	    _topo_walk_init == NULL || _topo_walk_step == NULL ||
+	    _topo_walk_fini == NULL || _topo_hdl_strfree == NULL ||
+	    _topo_node_name == NULL || _topo_prop_get_string == NULL ||
+	    _topo_node_fru == NULL || _topo_fmri_nvl2str == NULL ||
+	    _topo_fmri_strcmp_noauth == NULL) {
+		(void) dlclose(_topo_dlhandle);
+		_topo_dlhandle = NULL;
+	}
+}
+
+/*
+ * Refresh the mappings from device path -> FMRI.  We do this by walking the
+ * hc topology looking for disk nodes, and recording the io/devfs-path and FRU.
+ * Note that we strip out the disk-specific authority information (serial,
+ * part, revision, etc) so that we are left with only the identifying
+ * characteristics of the slot (hc path and chassis-id).
+ */
+void
+libzfs_fru_refresh(libzfs_handle_t *hdl)
+{
+	int err;
+	char *uuid;
+	topo_hdl_t *thp;
+	topo_walk_t *twp;
+
+	if (_topo_dlhandle == NULL)
+		return;
+
+	/*
+	 * Clear the FRU hash and initialize our basic structures.
+	 */
+	libzfs_fru_clear(hdl, B_FALSE);
+
+	if ((hdl->libzfs_topo_hdl = _topo_open(TOPO_VERSION,
+	    NULL, &err)) == NULL)
+		return;
+
+	thp = hdl->libzfs_topo_hdl;
+
+	if ((uuid = _topo_snap_hold(thp, NULL, &err)) == NULL)
+		return;
+
+	_topo_hdl_strfree(thp, uuid);
+
+	if (hdl->libzfs_fru_hash == NULL &&
+	    (hdl->libzfs_fru_hash =
+	    calloc(ZFS_FRU_HASH_SIZE * sizeof (void *), 1)) == NULL)
+		return;
+
+	/*
+	 * We now have a topo snapshot, so iterate over the hc topology looking
+	 * for disks to add to the hash.
+	 */
+	twp = _topo_walk_init(thp, FM_FMRI_SCHEME_HC,
+	    libzfs_fru_gather, hdl, &err);
+	if (twp != NULL) {
+		(void) _topo_walk_step(twp, TOPO_WALK_CHILD);
+		_topo_walk_fini(twp);
+	}
+}
+
+/*
+ * Given a devfs path, return the FRU for the device, if known.  This will
+ * automatically call libzfs_fru_refresh() if it hasn't already been called by
+ * the consumer.  The string returned is valid until the next call to
+ * libzfs_fru_refresh().
+ */
+const char *
+libzfs_fru_lookup(libzfs_handle_t *hdl, const char *devpath)
+{
+	size_t idx = fru_strhash(devpath);
+	libzfs_fru_t *frup;
+
+	if (hdl->libzfs_fru_hash == NULL)
+		libzfs_fru_refresh(hdl);
+
+	if (hdl->libzfs_fru_hash == NULL)
+		return (NULL);
+
+	for (frup = hdl->libzfs_fru_hash[idx]; frup != NULL;
+	    frup = frup->zf_chain) {
+		if (strcmp(devpath, frup->zf_device) == 0)
+			return (frup->zf_fru);
+	}
+
+	return (NULL);
+}
+
+/*
+ * Given a fru path, return the device path.  This will automatically call
+ * libzfs_fru_refresh() if it hasn't already been called by the consumer.  The
+ * string returned is valid until the next call to libzfs_fru_refresh().
+ */
+const char *
+libzfs_fru_devpath(libzfs_handle_t *hdl, const char *fru)
+{
+	libzfs_fru_t *frup;
+	size_t idx;
+
+	if (hdl->libzfs_fru_hash == NULL)
+		libzfs_fru_refresh(hdl);
+
+	if (hdl->libzfs_fru_hash == NULL)
+		return (NULL);
+
+	for (idx = 0; idx < ZFS_FRU_HASH_SIZE; idx++) {
+		for (frup = hdl->libzfs_fru_hash[idx]; frup != NULL;
+		    frup = frup->zf_next) {
+			if (_topo_fmri_strcmp_noauth(hdl->libzfs_topo_hdl,
+			    fru, frup->zf_fru))
+				return (frup->zf_device);
+		}
+	}
+
+	return (NULL);
+}
+
+/*
+ * Change the stored FRU for the given vdev.
+ */
+int
+zpool_fru_set(zpool_handle_t *zhp, uint64_t vdev_guid, const char *fru)
+{
+	zfs_cmd_t zc = { 0 };
+
+	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	(void) strncpy(zc.zc_value, fru, sizeof (zc.zc_value));
+	zc.zc_guid = vdev_guid;
+
+	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SETFRU, &zc) != 0)
+		return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
+		    dgettext(TEXT_DOMAIN, "cannot set FRU")));
+
+	return (0);
+}
+
+/*
+ * Compare to two FRUs, ignoring any authority information.
+ */
+boolean_t
+libzfs_fru_compare(libzfs_handle_t *hdl, const char *a, const char *b)
+{
+	if (hdl->libzfs_fru_hash == NULL)
+		libzfs_fru_refresh(hdl);
+
+	if (hdl->libzfs_fru_hash == NULL)
+		return (strcmp(a, b) == 0);
+
+	return (_topo_fmri_strcmp_noauth(hdl->libzfs_topo_hdl, a, b));
+}
+
+/*
+ * This special function checks to see whether the FRU indicates it's supposed
+ * to be in the system chassis, but the chassis-id doesn't match.  This can
+ * happen in a clustered case, where both head nodes have the same logical
+ * disk, but opening the device on the other head node is meaningless.
+ */
+boolean_t
+libzfs_fru_notself(libzfs_handle_t *hdl, const char *fru)
+{
+	const char *chassisid;
+	size_t len;
+
+	if (hdl->libzfs_fru_hash == NULL)
+		libzfs_fru_refresh(hdl);
+
+	if (hdl->libzfs_chassis_id[0] == '\0')
+		return (B_FALSE);
+
+	if (strstr(fru, "/chassis=0/") == NULL)
+		return (B_FALSE);
+
+	if ((chassisid = strstr(fru, ":chassis-id=")) == NULL)
+		return (B_FALSE);
+
+	chassisid += 12;
+	len = strlen(hdl->libzfs_chassis_id);
+	if (strncmp(chassisid, hdl->libzfs_chassis_id, len) == 0 &&
+	    (chassisid[len] == '/' || chassisid[len] == ':'))
+		return (B_FALSE);
+
+	return (B_TRUE);
+}
+
+/*
+ * Clear memory associated with the FRU hash.
+ */
+void
+libzfs_fru_clear(libzfs_handle_t *hdl, boolean_t final)
+{
+	libzfs_fru_t *frup;
+
+	while ((frup = hdl->libzfs_fru_list) != NULL) {
+		hdl->libzfs_fru_list = frup->zf_next;
+		free(frup->zf_device);
+		free(frup->zf_fru);
+		free(frup);
+	}
+
+	hdl->libzfs_fru_list = NULL;
+
+	if (hdl->libzfs_topo_hdl != NULL) {
+		_topo_snap_release(hdl->libzfs_topo_hdl);
+		_topo_close(hdl->libzfs_topo_hdl);
+		hdl->libzfs_topo_hdl = NULL;
+	}
+
+	if (final) {
+		free(hdl->libzfs_fru_hash);
+	} else if (hdl->libzfs_fru_hash != NULL) {
+		bzero(hdl->libzfs_fru_hash,
+		    ZFS_FRU_HASH_SIZE * sizeof (void *));
+	}
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_impl.h b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_impl.h
new file mode 100644
index 0000000000000000000000000000000000000000..1c46d32f4bab85682c68e9ef17ec859578723245
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_impl.h
@@ -0,0 +1,259 @@
+/*
+ * CDDL HEADER SART
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
+ * All rights reserved.
+ * Copyright (c) 2011 by Delphix. All rights reserved.
+ */
+
+#ifndef	_LIBFS_IMPL_H
+#define	_LIBFS_IMPL_H
+
+#include <sys/dmu.h>
+#include <sys/fs/zfs.h>
+#include <sys/zfs_ioctl.h>
+#include <sys/spa.h>
+#include <sys/nvpair.h>
+
+#include <libshare.h>
+#include <libuutil.h>
+#include <libzfs.h>
+
+#include "zfs_ioctl_compat.h"
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#ifdef	VERIFY
+#undef	VERIFY
+#endif
+#define	VERIFY	verify
+
+typedef struct libzfs_fru {
+	char *zf_device;
+	char *zf_fru;
+	struct libzfs_fru *zf_chain;
+	struct libzfs_fru *zf_next;
+} libzfs_fru_t;
+
+struct libzfs_handle {
+	int libzfs_error;
+	int libzfs_fd;
+	FILE *libzfs_mnttab;
+	FILE *libzfs_sharetab;
+	zpool_handle_t *libzfs_pool_handles;
+	uu_avl_pool_t *libzfs_ns_avlpool;
+	uu_avl_t *libzfs_ns_avl;
+	uint64_t libzfs_ns_gen;
+	int libzfs_desc_active;
+	char libzfs_action[1024];
+	char libzfs_desc[1024];
+	char *libzfs_log_str;
+	int libzfs_printerr;
+	int libzfs_storeerr; /* stuff error messages into buffer */
+	void *libzfs_sharehdl; /* libshare handle */
+	uint_t libzfs_shareflags;
+	boolean_t libzfs_mnttab_enable;
+	avl_tree_t libzfs_mnttab_cache;
+	int libzfs_pool_iter;
+	libzfs_fru_t **libzfs_fru_hash;
+	libzfs_fru_t *libzfs_fru_list;
+	char libzfs_chassis_id[256];
+};
+
+#define	ZFSSHARE_MISS	0x01	/* Didn't find entry in cache */
+
+struct zfs_handle {
+	libzfs_handle_t *zfs_hdl;
+	zpool_handle_t *zpool_hdl;
+	char zfs_name[ZFS_MAXNAMELEN];
+	zfs_type_t zfs_type; /* type including snapshot */
+	zfs_type_t zfs_head_type; /* type excluding snapshot */
+	dmu_objset_stats_t zfs_dmustats;
+	nvlist_t *zfs_props;
+	nvlist_t *zfs_user_props;
+	nvlist_t *zfs_recvd_props;
+	boolean_t zfs_mntcheck;
+	char *zfs_mntopts;
+	uint8_t *zfs_props_table;
+};
+
+/*
+ * This is different from checking zfs_type, because it will also catch
+ * snapshots of volumes.
+ */
+#define	ZFS_IS_VOLUME(zhp) ((zhp)->zfs_head_type == ZFS_TYPE_VOLUME)
+
+struct zpool_handle {
+	libzfs_handle_t *zpool_hdl;
+	zpool_handle_t *zpool_next;
+	char zpool_name[ZPOOL_MAXNAMELEN];
+	int zpool_state;
+	size_t zpool_config_size;
+	nvlist_t *zpool_config;
+	nvlist_t *zpool_old_config;
+	nvlist_t *zpool_props;
+	diskaddr_t zpool_start_block;
+};
+
+typedef enum {
+	PROTO_NFS = 0,
+	PROTO_SMB = 1,
+	PROTO_END = 2
+} zfs_share_proto_t;
+
+/*
+ * The following can be used as a bitmask and any new values
+ * added must preserve that capability.
+ */
+typedef enum {
+	SHARED_NOT_SHARED = 0x0,
+	SHARED_NFS = 0x2,
+	SHARED_SMB = 0x4
+} zfs_share_type_t;
+
+int zfs_error(libzfs_handle_t *, int, const char *);
+int zfs_error_fmt(libzfs_handle_t *, int, const char *, ...);
+void zfs_error_aux(libzfs_handle_t *, const char *, ...);
+void *zfs_alloc(libzfs_handle_t *, size_t);
+void *zfs_realloc(libzfs_handle_t *, void *, size_t, size_t);
+char *zfs_asprintf(libzfs_handle_t *, const char *, ...);
+char *zfs_strdup(libzfs_handle_t *, const char *);
+int no_memory(libzfs_handle_t *);
+
+int zfs_standard_error(libzfs_handle_t *, int, const char *);
+int zfs_standard_error_fmt(libzfs_handle_t *, int, const char *, ...);
+int zpool_standard_error(libzfs_handle_t *, int, const char *);
+int zpool_standard_error_fmt(libzfs_handle_t *, int, const char *, ...);
+
+int get_dependents(libzfs_handle_t *, boolean_t, const char *, char ***,
+    size_t *);
+zfs_handle_t *make_dataset_handle_zc(libzfs_handle_t *, zfs_cmd_t *);
+zfs_handle_t *make_dataset_simple_handle_zc(zfs_handle_t *, zfs_cmd_t *);
+
+int zprop_parse_value(libzfs_handle_t *, nvpair_t *, int, zfs_type_t,
+    nvlist_t *, char **, uint64_t *, const char *);
+int zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp,
+    zfs_type_t type);
+
+/*
+ * Use this changelist_gather() flag to force attempting mounts
+ * on each change node regardless of whether or not it is currently
+ * mounted.
+ */
+#define	CL_GATHER_MOUNT_ALWAYS	0x01
+/*
+ * Use this changelist_gather() flag to prevent unmounting of file systems.
+ */
+#define	CL_GATHER_DONT_UNMOUNT	0x02
+
+typedef struct prop_changelist prop_changelist_t;
+
+int zcmd_alloc_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *, size_t);
+int zcmd_write_src_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t *);
+int zcmd_write_conf_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t *);
+int zcmd_expand_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *);
+int zcmd_read_dst_nvlist(libzfs_handle_t *, zfs_cmd_t *, nvlist_t **);
+void zcmd_free_nvlists(zfs_cmd_t *);
+
+int changelist_prefix(prop_changelist_t *);
+int changelist_postfix(prop_changelist_t *);
+void changelist_rename(prop_changelist_t *, const char *, const char *);
+void changelist_remove(prop_changelist_t *, const char *);
+void changelist_free(prop_changelist_t *);
+prop_changelist_t *changelist_gather(zfs_handle_t *, zfs_prop_t, int, int);
+int changelist_unshare(prop_changelist_t *, zfs_share_proto_t *);
+int changelist_haszonedchild(prop_changelist_t *);
+
+void remove_mountpoint(zfs_handle_t *);
+int create_parents(libzfs_handle_t *, char *, int);
+boolean_t isa_child_of(const char *dataset, const char *parent);
+
+zfs_handle_t *make_dataset_handle(libzfs_handle_t *, const char *);
+
+int zpool_open_silent(libzfs_handle_t *, const char *, zpool_handle_t **);
+
+boolean_t zpool_name_valid(libzfs_handle_t *, boolean_t, const char *);
+
+int zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type,
+    boolean_t modifying);
+
+void namespace_clear(libzfs_handle_t *);
+
+/*
+ * libshare (sharemgr) interfaces used internally.
+ */
+
+extern int zfs_init_libshare(libzfs_handle_t *, int);
+extern void zfs_uninit_libshare(libzfs_handle_t *);
+extern int zfs_parse_options(char *, zfs_share_proto_t);
+
+extern int zfs_unshare_proto(zfs_handle_t *,
+    const char *, zfs_share_proto_t *);
+
+extern void libzfs_fru_clear(libzfs_handle_t *, boolean_t);
+
+#ifndef sun
+static int zfs_kernel_version = 0;
+
+/*
+ * This is FreeBSD version of ioctl, because Solaris' ioctl() updates
+ * zc_nvlist_dst_size even if an error is returned, on FreeBSD if an
+ * error is returned zc_nvlist_dst_size won't be updated.
+ */
+static __inline int
+zcmd_ioctl(int fd, unsigned long cmd, zfs_cmd_t *zc)
+{
+	size_t oldsize, zfs_kernel_version_size;
+	int version, ret, cflag = ZFS_CMD_COMPAT_NONE;
+
+	zfs_kernel_version_size = sizeof(zfs_kernel_version);
+	if (zfs_kernel_version == 0) {
+		sysctlbyname("vfs.zfs.version.spa", &zfs_kernel_version,
+		    &zfs_kernel_version_size, NULL, 0);
+	}
+
+	if (zfs_kernel_version == SPA_VERSION_15 ||
+	    zfs_kernel_version == SPA_VERSION_14 ||
+	    zfs_kernel_version == SPA_VERSION_13)
+		cflag = ZFS_CMD_COMPAT_V15;
+
+	oldsize = zc->zc_nvlist_dst_size;
+	ret = zcmd_ioctl_compat(fd, cmd, zc, cflag);
+
+	if (ret == 0 && oldsize < zc->zc_nvlist_dst_size) {
+		ret = -1;
+		errno = ENOMEM;
+	}
+
+	return (ret);
+}
+#define	ioctl(fd, cmd, zc)	zcmd_ioctl((fd), (cmd), (zc))
+#endif	/* !sun */
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _LIBFS_IMPL_H */
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c
new file mode 100644
index 0000000000000000000000000000000000000000..7e39b0b78f2b8ec1a6f3ee89a5d7ee1fcee8b64e
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_import.c
@@ -0,0 +1,1735 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ */
+
+/*
+ * Pool import support functions.
+ *
+ * To import a pool, we rely on reading the configuration information from the
+ * ZFS label of each device.  If we successfully read the label, then we
+ * organize the configuration information in the following hierarchy:
+ *
+ * 	pool guid -> toplevel vdev guid -> label txg
+ *
+ * Duplicate entries matching this same tuple will be discarded.  Once we have
+ * examined every device, we pick the best label txg config for each toplevel
+ * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
+ * update any paths that have changed.  Finally, we attempt to import the pool
+ * using our derived config, and record the results.
+ */
+
+#include <ctype.h>
+#include <devid.h>
+#include <dirent.h>
+#include <errno.h>
+#include <libintl.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <thread_pool.h>
+#include <libgeom.h>
+
+#include <sys/vdev_impl.h>
+
+#include "libzfs.h"
+#include "libzfs_impl.h"
+
+/*
+ * Intermediate structures used to gather configuration information.
+ */
+typedef struct config_entry {
+	uint64_t		ce_txg;
+	nvlist_t		*ce_config;
+	struct config_entry	*ce_next;
+} config_entry_t;
+
+typedef struct vdev_entry {
+	uint64_t		ve_guid;
+	config_entry_t		*ve_configs;
+	struct vdev_entry	*ve_next;
+} vdev_entry_t;
+
+typedef struct pool_entry {
+	uint64_t		pe_guid;
+	vdev_entry_t		*pe_vdevs;
+	struct pool_entry	*pe_next;
+} pool_entry_t;
+
+typedef struct name_entry {
+	char			*ne_name;
+	uint64_t		ne_guid;
+	struct name_entry	*ne_next;
+} name_entry_t;
+
+typedef struct pool_list {
+	pool_entry_t		*pools;
+	name_entry_t		*names;
+} pool_list_t;
+
+static char *
+get_devid(const char *path)
+{
+	int fd;
+	ddi_devid_t devid;
+	char *minor, *ret;
+
+	if ((fd = open(path, O_RDONLY)) < 0)
+		return (NULL);
+
+	minor = NULL;
+	ret = NULL;
+	if (devid_get(fd, &devid) == 0) {
+		if (devid_get_minor_name(fd, &minor) == 0)
+			ret = devid_str_encode(devid, minor);
+		if (minor != NULL)
+			devid_str_free(minor);
+		devid_free(devid);
+	}
+	(void) close(fd);
+
+	return (ret);
+}
+
+
+/*
+ * Go through and fix up any path and/or devid information for the given vdev
+ * configuration.
+ */
+static int
+fix_paths(nvlist_t *nv, name_entry_t *names)
+{
+	nvlist_t **child;
+	uint_t c, children;
+	uint64_t guid;
+	name_entry_t *ne, *best;
+	char *path, *devid;
+	int matched;
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) == 0) {
+		for (c = 0; c < children; c++)
+			if (fix_paths(child[c], names) != 0)
+				return (-1);
+		return (0);
+	}
+
+	/*
+	 * This is a leaf (file or disk) vdev.  In either case, go through
+	 * the name list and see if we find a matching guid.  If so, replace
+	 * the path and see if we can calculate a new devid.
+	 *
+	 * There may be multiple names associated with a particular guid, in
+	 * which case we have overlapping slices or multiple paths to the same
+	 * disk.  If this is the case, then we want to pick the path that is
+	 * the most similar to the original, where "most similar" is the number
+	 * of matching characters starting from the end of the path.  This will
+	 * preserve slice numbers even if the disks have been reorganized, and
+	 * will also catch preferred disk names if multiple paths exist.
+	 */
+	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
+	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
+		path = NULL;
+
+	matched = 0;
+	best = NULL;
+	for (ne = names; ne != NULL; ne = ne->ne_next) {
+		if (ne->ne_guid == guid) {
+			const char *src, *dst;
+			int count;
+
+			if (path == NULL) {
+				best = ne;
+				break;
+			}
+
+			src = ne->ne_name + strlen(ne->ne_name) - 1;
+			dst = path + strlen(path) - 1;
+			for (count = 0; src >= ne->ne_name && dst >= path;
+			    src--, dst--, count++)
+				if (*src != *dst)
+					break;
+
+			/*
+			 * At this point, 'count' is the number of characters
+			 * matched from the end.
+			 */
+			if (count > matched || best == NULL) {
+				best = ne;
+				matched = count;
+			}
+		}
+	}
+
+	if (best == NULL)
+		return (0);
+
+	if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
+		return (-1);
+
+	if ((devid = get_devid(best->ne_name)) == NULL) {
+		(void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
+	} else {
+		if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
+			return (-1);
+		devid_str_free(devid);
+	}
+
+	return (0);
+}
+
+/*
+ * Add the given configuration to the list of known devices.
+ */
+static int
+add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
+    nvlist_t *config)
+{
+	uint64_t pool_guid, vdev_guid, top_guid, txg, state;
+	pool_entry_t *pe;
+	vdev_entry_t *ve;
+	config_entry_t *ce;
+	name_entry_t *ne;
+
+	/*
+	 * If this is a hot spare not currently in use or level 2 cache
+	 * device, add it to the list of names to translate, but don't do
+	 * anything else.
+	 */
+	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
+	    &state) == 0 &&
+	    (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
+	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
+		if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
+			return (-1);
+
+		if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
+			free(ne);
+			return (-1);
+		}
+		ne->ne_guid = vdev_guid;
+		ne->ne_next = pl->names;
+		pl->names = ne;
+		return (0);
+	}
+
+	/*
+	 * If we have a valid config but cannot read any of these fields, then
+	 * it means we have a half-initialized label.  In vdev_label_init()
+	 * we write a label with txg == 0 so that we can identify the device
+	 * in case the user refers to the same disk later on.  If we fail to
+	 * create the pool, we'll be left with a label in this state
+	 * which should not be considered part of a valid pool.
+	 */
+	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+	    &pool_guid) != 0 ||
+	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
+	    &vdev_guid) != 0 ||
+	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
+	    &top_guid) != 0 ||
+	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
+	    &txg) != 0 || txg == 0) {
+		nvlist_free(config);
+		return (0);
+	}
+
+	/*
+	 * First, see if we know about this pool.  If not, then add it to the
+	 * list of known pools.
+	 */
+	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
+		if (pe->pe_guid == pool_guid)
+			break;
+	}
+
+	if (pe == NULL) {
+		if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
+			nvlist_free(config);
+			return (-1);
+		}
+		pe->pe_guid = pool_guid;
+		pe->pe_next = pl->pools;
+		pl->pools = pe;
+	}
+
+	/*
+	 * Second, see if we know about this toplevel vdev.  Add it if its
+	 * missing.
+	 */
+	for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
+		if (ve->ve_guid == top_guid)
+			break;
+	}
+
+	if (ve == NULL) {
+		if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
+			nvlist_free(config);
+			return (-1);
+		}
+		ve->ve_guid = top_guid;
+		ve->ve_next = pe->pe_vdevs;
+		pe->pe_vdevs = ve;
+	}
+
+	/*
+	 * Third, see if we have a config with a matching transaction group.  If
+	 * so, then we do nothing.  Otherwise, add it to the list of known
+	 * configs.
+	 */
+	for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
+		if (ce->ce_txg == txg)
+			break;
+	}
+
+	if (ce == NULL) {
+		if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
+			nvlist_free(config);
+			return (-1);
+		}
+		ce->ce_txg = txg;
+		ce->ce_config = config;
+		ce->ce_next = ve->ve_configs;
+		ve->ve_configs = ce;
+	} else {
+		nvlist_free(config);
+	}
+
+	/*
+	 * At this point we've successfully added our config to the list of
+	 * known configs.  The last thing to do is add the vdev guid -> path
+	 * mappings so that we can fix up the configuration as necessary before
+	 * doing the import.
+	 */
+	if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
+		return (-1);
+
+	if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
+		free(ne);
+		return (-1);
+	}
+
+	ne->ne_guid = vdev_guid;
+	ne->ne_next = pl->names;
+	pl->names = ne;
+
+	return (0);
+}
+
+/*
+ * Returns true if the named pool matches the given GUID.
+ */
+static int
+pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
+    boolean_t *isactive)
+{
+	zpool_handle_t *zhp;
+	uint64_t theguid;
+
+	if (zpool_open_silent(hdl, name, &zhp) != 0)
+		return (-1);
+
+	if (zhp == NULL) {
+		*isactive = B_FALSE;
+		return (0);
+	}
+
+	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
+	    &theguid) == 0);
+
+	zpool_close(zhp);
+
+	*isactive = (theguid == guid);
+	return (0);
+}
+
+static nvlist_t *
+refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
+{
+	nvlist_t *nvl;
+	zfs_cmd_t zc = { 0 };
+	int err;
+
+	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
+		return (NULL);
+
+	if (zcmd_alloc_dst_nvlist(hdl, &zc,
+	    zc.zc_nvlist_conf_size * 2) != 0) {
+		zcmd_free_nvlists(&zc);
+		return (NULL);
+	}
+
+	while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
+	    &zc)) != 0 && errno == ENOMEM) {
+		if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+			zcmd_free_nvlists(&zc);
+			return (NULL);
+		}
+	}
+
+	if (err) {
+		zcmd_free_nvlists(&zc);
+		return (NULL);
+	}
+
+	if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
+		zcmd_free_nvlists(&zc);
+		return (NULL);
+	}
+
+	zcmd_free_nvlists(&zc);
+	return (nvl);
+}
+
+/*
+ * Determine if the vdev id is a hole in the namespace.
+ */
+boolean_t
+vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
+{
+	for (int c = 0; c < holes; c++) {
+
+		/* Top-level is a hole */
+		if (hole_array[c] == id)
+			return (B_TRUE);
+	}
+	return (B_FALSE);
+}
+
+/*
+ * Convert our list of pools into the definitive set of configurations.  We
+ * start by picking the best config for each toplevel vdev.  Once that's done,
+ * we assemble the toplevel vdevs into a full config for the pool.  We make a
+ * pass to fix up any incorrect paths, and then add it to the main list to
+ * return to the user.
+ */
+static nvlist_t *
+get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
+{
+	pool_entry_t *pe;
+	vdev_entry_t *ve;
+	config_entry_t *ce;
+	nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
+	nvlist_t **spares, **l2cache;
+	uint_t i, nspares, nl2cache;
+	boolean_t config_seen;
+	uint64_t best_txg;
+	char *name, *hostname;
+	uint64_t guid;
+	uint_t children = 0;
+	nvlist_t **child = NULL;
+	uint_t holes;
+	uint64_t *hole_array, max_id;
+	uint_t c;
+	boolean_t isactive;
+	uint64_t hostid;
+	nvlist_t *nvl;
+	boolean_t found_one = B_FALSE;
+	boolean_t valid_top_config = B_FALSE;
+
+	if (nvlist_alloc(&ret, 0, 0) != 0)
+		goto nomem;
+
+	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
+		uint64_t id, max_txg = 0;
+
+		if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
+			goto nomem;
+		config_seen = B_FALSE;
+
+		/*
+		 * Iterate over all toplevel vdevs.  Grab the pool configuration
+		 * from the first one we find, and then go through the rest and
+		 * add them as necessary to the 'vdevs' member of the config.
+		 */
+		for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
+
+			/*
+			 * Determine the best configuration for this vdev by
+			 * selecting the config with the latest transaction
+			 * group.
+			 */
+			best_txg = 0;
+			for (ce = ve->ve_configs; ce != NULL;
+			    ce = ce->ce_next) {
+
+				if (ce->ce_txg > best_txg) {
+					tmp = ce->ce_config;
+					best_txg = ce->ce_txg;
+				}
+			}
+
+			/*
+			 * We rely on the fact that the max txg for the
+			 * pool will contain the most up-to-date information
+			 * about the valid top-levels in the vdev namespace.
+			 */
+			if (best_txg > max_txg) {
+				(void) nvlist_remove(config,
+				    ZPOOL_CONFIG_VDEV_CHILDREN,
+				    DATA_TYPE_UINT64);
+				(void) nvlist_remove(config,
+				    ZPOOL_CONFIG_HOLE_ARRAY,
+				    DATA_TYPE_UINT64_ARRAY);
+
+				max_txg = best_txg;
+				hole_array = NULL;
+				holes = 0;
+				max_id = 0;
+				valid_top_config = B_FALSE;
+
+				if (nvlist_lookup_uint64(tmp,
+				    ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
+					verify(nvlist_add_uint64(config,
+					    ZPOOL_CONFIG_VDEV_CHILDREN,
+					    max_id) == 0);
+					valid_top_config = B_TRUE;
+				}
+
+				if (nvlist_lookup_uint64_array(tmp,
+				    ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
+				    &holes) == 0) {
+					verify(nvlist_add_uint64_array(config,
+					    ZPOOL_CONFIG_HOLE_ARRAY,
+					    hole_array, holes) == 0);
+				}
+			}
+
+			if (!config_seen) {
+				/*
+				 * Copy the relevant pieces of data to the pool
+				 * configuration:
+				 *
+				 *	version
+				 *	pool guid
+				 *	name
+				 *	pool txg (if available)
+				 *	comment (if available)
+				 *	pool state
+				 *	hostid (if available)
+				 *	hostname (if available)
+				 */
+				uint64_t state, version, pool_txg;
+				char *comment = NULL;
+
+				version = fnvlist_lookup_uint64(tmp,
+				    ZPOOL_CONFIG_VERSION);
+				fnvlist_add_uint64(config,
+				    ZPOOL_CONFIG_VERSION, version);
+				guid = fnvlist_lookup_uint64(tmp,
+				    ZPOOL_CONFIG_POOL_GUID);
+				fnvlist_add_uint64(config,
+				    ZPOOL_CONFIG_POOL_GUID, guid);
+				name = fnvlist_lookup_string(tmp,
+				    ZPOOL_CONFIG_POOL_NAME);
+				fnvlist_add_string(config,
+				    ZPOOL_CONFIG_POOL_NAME, name);
+
+				if (nvlist_lookup_uint64(tmp,
+				    ZPOOL_CONFIG_POOL_TXG, &pool_txg) == 0)
+					fnvlist_add_uint64(config,
+					    ZPOOL_CONFIG_POOL_TXG, pool_txg);
+
+				if (nvlist_lookup_string(tmp,
+				    ZPOOL_CONFIG_COMMENT, &comment) == 0)
+					fnvlist_add_string(config,
+					    ZPOOL_CONFIG_COMMENT, comment);
+
+				state = fnvlist_lookup_uint64(tmp,
+				    ZPOOL_CONFIG_POOL_STATE);
+				fnvlist_add_uint64(config,
+				    ZPOOL_CONFIG_POOL_STATE, state);
+
+				hostid = 0;
+				if (nvlist_lookup_uint64(tmp,
+				    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
+					fnvlist_add_uint64(config,
+					    ZPOOL_CONFIG_HOSTID, hostid);
+					hostname = fnvlist_lookup_string(tmp,
+					    ZPOOL_CONFIG_HOSTNAME);
+					fnvlist_add_string(config,
+					    ZPOOL_CONFIG_HOSTNAME, hostname);
+				}
+
+				config_seen = B_TRUE;
+			}
+
+			/*
+			 * Add this top-level vdev to the child array.
+			 */
+			verify(nvlist_lookup_nvlist(tmp,
+			    ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
+			verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
+			    &id) == 0);
+
+			if (id >= children) {
+				nvlist_t **newchild;
+
+				newchild = zfs_alloc(hdl, (id + 1) *
+				    sizeof (nvlist_t *));
+				if (newchild == NULL)
+					goto nomem;
+
+				for (c = 0; c < children; c++)
+					newchild[c] = child[c];
+
+				free(child);
+				child = newchild;
+				children = id + 1;
+			}
+			if (nvlist_dup(nvtop, &child[id], 0) != 0)
+				goto nomem;
+
+		}
+
+		/*
+		 * If we have information about all the top-levels then
+		 * clean up the nvlist which we've constructed. This
+		 * means removing any extraneous devices that are
+		 * beyond the valid range or adding devices to the end
+		 * of our array which appear to be missing.
+		 */
+		if (valid_top_config) {
+			if (max_id < children) {
+				for (c = max_id; c < children; c++)
+					nvlist_free(child[c]);
+				children = max_id;
+			} else if (max_id > children) {
+				nvlist_t **newchild;
+
+				newchild = zfs_alloc(hdl, (max_id) *
+				    sizeof (nvlist_t *));
+				if (newchild == NULL)
+					goto nomem;
+
+				for (c = 0; c < children; c++)
+					newchild[c] = child[c];
+
+				free(child);
+				child = newchild;
+				children = max_id;
+			}
+		}
+
+		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+		    &guid) == 0);
+
+		/*
+		 * The vdev namespace may contain holes as a result of
+		 * device removal. We must add them back into the vdev
+		 * tree before we process any missing devices.
+		 */
+		if (holes > 0) {
+			ASSERT(valid_top_config);
+
+			for (c = 0; c < children; c++) {
+				nvlist_t *holey;
+
+				if (child[c] != NULL ||
+				    !vdev_is_hole(hole_array, holes, c))
+					continue;
+
+				if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
+				    0) != 0)
+					goto nomem;
+
+				/*
+				 * Holes in the namespace are treated as
+				 * "hole" top-level vdevs and have a
+				 * special flag set on them.
+				 */
+				if (nvlist_add_string(holey,
+				    ZPOOL_CONFIG_TYPE,
+				    VDEV_TYPE_HOLE) != 0 ||
+				    nvlist_add_uint64(holey,
+				    ZPOOL_CONFIG_ID, c) != 0 ||
+				    nvlist_add_uint64(holey,
+				    ZPOOL_CONFIG_GUID, 0ULL) != 0)
+					goto nomem;
+				child[c] = holey;
+			}
+		}
+
+		/*
+		 * Look for any missing top-level vdevs.  If this is the case,
+		 * create a faked up 'missing' vdev as a placeholder.  We cannot
+		 * simply compress the child array, because the kernel performs
+		 * certain checks to make sure the vdev IDs match their location
+		 * in the configuration.
+		 */
+		for (c = 0; c < children; c++) {
+			if (child[c] == NULL) {
+				nvlist_t *missing;
+				if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
+				    0) != 0)
+					goto nomem;
+				if (nvlist_add_string(missing,
+				    ZPOOL_CONFIG_TYPE,
+				    VDEV_TYPE_MISSING) != 0 ||
+				    nvlist_add_uint64(missing,
+				    ZPOOL_CONFIG_ID, c) != 0 ||
+				    nvlist_add_uint64(missing,
+				    ZPOOL_CONFIG_GUID, 0ULL) != 0) {
+					nvlist_free(missing);
+					goto nomem;
+				}
+				child[c] = missing;
+			}
+		}
+
+		/*
+		 * Put all of this pool's top-level vdevs into a root vdev.
+		 */
+		if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
+			goto nomem;
+		if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
+		    VDEV_TYPE_ROOT) != 0 ||
+		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
+		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
+		    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+		    child, children) != 0) {
+			nvlist_free(nvroot);
+			goto nomem;
+		}
+
+		for (c = 0; c < children; c++)
+			nvlist_free(child[c]);
+		free(child);
+		children = 0;
+		child = NULL;
+
+		/*
+		 * Go through and fix up any paths and/or devids based on our
+		 * known list of vdev GUID -> path mappings.
+		 */
+		if (fix_paths(nvroot, pl->names) != 0) {
+			nvlist_free(nvroot);
+			goto nomem;
+		}
+
+		/*
+		 * Add the root vdev to this pool's configuration.
+		 */
+		if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+		    nvroot) != 0) {
+			nvlist_free(nvroot);
+			goto nomem;
+		}
+		nvlist_free(nvroot);
+
+		/*
+		 * zdb uses this path to report on active pools that were
+		 * imported or created using -R.
+		 */
+		if (active_ok)
+			goto add_pool;
+
+		/*
+		 * Determine if this pool is currently active, in which case we
+		 * can't actually import it.
+		 */
+		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+		    &name) == 0);
+		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+		    &guid) == 0);
+
+		if (pool_active(hdl, name, guid, &isactive) != 0)
+			goto error;
+
+		if (isactive) {
+			nvlist_free(config);
+			config = NULL;
+			continue;
+		}
+
+		if ((nvl = refresh_config(hdl, config)) == NULL) {
+			nvlist_free(config);
+			config = NULL;
+			continue;
+		}
+
+		nvlist_free(config);
+		config = nvl;
+
+		/*
+		 * Go through and update the paths for spares, now that we have
+		 * them.
+		 */
+		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+		    &nvroot) == 0);
+		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
+		    &spares, &nspares) == 0) {
+			for (i = 0; i < nspares; i++) {
+				if (fix_paths(spares[i], pl->names) != 0)
+					goto nomem;
+			}
+		}
+
+		/*
+		 * Update the paths for l2cache devices.
+		 */
+		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
+		    &l2cache, &nl2cache) == 0) {
+			for (i = 0; i < nl2cache; i++) {
+				if (fix_paths(l2cache[i], pl->names) != 0)
+					goto nomem;
+			}
+		}
+
+		/*
+		 * Restore the original information read from the actual label.
+		 */
+		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
+		    DATA_TYPE_UINT64);
+		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
+		    DATA_TYPE_STRING);
+		if (hostid != 0) {
+			verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
+			    hostid) == 0);
+			verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
+			    hostname) == 0);
+		}
+
+add_pool:
+		/*
+		 * Add this pool to the list of configs.
+		 */
+		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+		    &name) == 0);
+		if (nvlist_add_nvlist(ret, name, config) != 0)
+			goto nomem;
+
+		found_one = B_TRUE;
+		nvlist_free(config);
+		config = NULL;
+	}
+
+	if (!found_one) {
+		nvlist_free(ret);
+		ret = NULL;
+	}
+
+	return (ret);
+
+nomem:
+	(void) no_memory(hdl);
+error:
+	nvlist_free(config);
+	nvlist_free(ret);
+	for (c = 0; c < children; c++)
+		nvlist_free(child[c]);
+	free(child);
+
+	return (NULL);
+}
+
+/*
+ * Return the offset of the given label.
+ */
+static uint64_t
+label_offset(uint64_t size, int l)
+{
+	ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
+	return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
+	    0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
+}
+
+/*
+ * Given a file descriptor, read the label information and return an nvlist
+ * describing the configuration, if there is one.
+ */
+int
+zpool_read_label(int fd, nvlist_t **config)
+{
+	struct stat64 statbuf;
+	int l;
+	vdev_label_t *label;
+	uint64_t state, txg, size;
+
+	*config = NULL;
+
+	if (fstat64(fd, &statbuf) == -1)
+		return (0);
+	size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
+
+	if ((label = malloc(sizeof (vdev_label_t))) == NULL)
+		return (-1);
+
+	for (l = 0; l < VDEV_LABELS; l++) {
+		if (pread64(fd, label, sizeof (vdev_label_t),
+		    label_offset(size, l)) != sizeof (vdev_label_t))
+			continue;
+
+		if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
+		    sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
+			continue;
+
+		if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
+		    &state) != 0 || state > POOL_STATE_L2CACHE) {
+			nvlist_free(*config);
+			continue;
+		}
+
+		if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
+		    (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
+		    &txg) != 0 || txg == 0)) {
+			nvlist_free(*config);
+			continue;
+		}
+
+		free(label);
+		return (0);
+	}
+
+	free(label);
+	*config = NULL;
+	return (0);
+}
+
+typedef struct rdsk_node {
+	char *rn_name;
+	int rn_dfd;
+	libzfs_handle_t *rn_hdl;
+	nvlist_t *rn_config;
+	avl_tree_t *rn_avl;
+	avl_node_t rn_node;
+	boolean_t rn_nozpool;
+} rdsk_node_t;
+
+static int
+slice_cache_compare(const void *arg1, const void *arg2)
+{
+	const char  *nm1 = ((rdsk_node_t *)arg1)->rn_name;
+	const char  *nm2 = ((rdsk_node_t *)arg2)->rn_name;
+	char *nm1slice, *nm2slice;
+	int rv;
+
+	/*
+	 * slices zero and two are the most likely to provide results,
+	 * so put those first
+	 */
+	nm1slice = strstr(nm1, "s0");
+	nm2slice = strstr(nm2, "s0");
+	if (nm1slice && !nm2slice) {
+		return (-1);
+	}
+	if (!nm1slice && nm2slice) {
+		return (1);
+	}
+	nm1slice = strstr(nm1, "s2");
+	nm2slice = strstr(nm2, "s2");
+	if (nm1slice && !nm2slice) {
+		return (-1);
+	}
+	if (!nm1slice && nm2slice) {
+		return (1);
+	}
+
+	rv = strcmp(nm1, nm2);
+	if (rv == 0)
+		return (0);
+	return (rv > 0 ? 1 : -1);
+}
+
+#ifdef sun
+static void
+check_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
+    diskaddr_t size, uint_t blksz)
+{
+	rdsk_node_t tmpnode;
+	rdsk_node_t *node;
+	char sname[MAXNAMELEN];
+
+	tmpnode.rn_name = &sname[0];
+	(void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
+	    diskname, partno);
+	/*
+	 * protect against division by zero for disk labels that
+	 * contain a bogus sector size
+	 */
+	if (blksz == 0)
+		blksz = DEV_BSIZE;
+	/* too small to contain a zpool? */
+	if ((size < (SPA_MINDEVSIZE / blksz)) &&
+	    (node = avl_find(r, &tmpnode, NULL)))
+		node->rn_nozpool = B_TRUE;
+}
+#endif	/* sun */
+
+static void
+nozpool_all_slices(avl_tree_t *r, const char *sname)
+{
+#ifdef sun
+	char diskname[MAXNAMELEN];
+	char *ptr;
+	int i;
+
+	(void) strncpy(diskname, sname, MAXNAMELEN);
+	if (((ptr = strrchr(diskname, 's')) == NULL) &&
+	    ((ptr = strrchr(diskname, 'p')) == NULL))
+		return;
+	ptr[0] = 's';
+	ptr[1] = '\0';
+	for (i = 0; i < NDKMAP; i++)
+		check_one_slice(r, diskname, i, 0, 1);
+	ptr[0] = 'p';
+	for (i = 0; i <= FD_NUMPART; i++)
+		check_one_slice(r, diskname, i, 0, 1);
+#endif	/* sun */
+}
+
+static void
+check_slices(avl_tree_t *r, int fd, const char *sname)
+{
+#ifdef sun
+	struct extvtoc vtoc;
+	struct dk_gpt *gpt;
+	char diskname[MAXNAMELEN];
+	char *ptr;
+	int i;
+
+	(void) strncpy(diskname, sname, MAXNAMELEN);
+	if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
+		return;
+	ptr[1] = '\0';
+
+	if (read_extvtoc(fd, &vtoc) >= 0) {
+		for (i = 0; i < NDKMAP; i++)
+			check_one_slice(r, diskname, i,
+			    vtoc.v_part[i].p_size, vtoc.v_sectorsz);
+	} else if (efi_alloc_and_read(fd, &gpt) >= 0) {
+		/*
+		 * on x86 we'll still have leftover links that point
+		 * to slices s[9-15], so use NDKMAP instead
+		 */
+		for (i = 0; i < NDKMAP; i++)
+			check_one_slice(r, diskname, i,
+			    gpt->efi_parts[i].p_size, gpt->efi_lbasize);
+		/* nodes p[1-4] are never used with EFI labels */
+		ptr[0] = 'p';
+		for (i = 1; i <= FD_NUMPART; i++)
+			check_one_slice(r, diskname, i, 0, 1);
+		efi_free(gpt);
+	}
+#endif	/* sun */
+}
+
+static void
+zpool_open_func(void *arg)
+{
+	rdsk_node_t *rn = arg;
+	struct stat64 statbuf;
+	nvlist_t *config;
+	int fd;
+
+	if (rn->rn_nozpool)
+		return;
+	if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
+		/* symlink to a device that's no longer there */
+		if (errno == ENOENT)
+			nozpool_all_slices(rn->rn_avl, rn->rn_name);
+		return;
+	}
+	/*
+	 * Ignore failed stats.  We only want regular
+	 * files, character devs and block devs.
+	 */
+	if (fstat64(fd, &statbuf) != 0 ||
+	    (!S_ISREG(statbuf.st_mode) &&
+	    !S_ISCHR(statbuf.st_mode) &&
+	    !S_ISBLK(statbuf.st_mode))) {
+		(void) close(fd);
+		return;
+	}
+	/* this file is too small to hold a zpool */
+	if (S_ISREG(statbuf.st_mode) &&
+	    statbuf.st_size < SPA_MINDEVSIZE) {
+		(void) close(fd);
+		return;
+	} else if (!S_ISREG(statbuf.st_mode)) {
+		/*
+		 * Try to read the disk label first so we don't have to
+		 * open a bunch of minor nodes that can't have a zpool.
+		 */
+		check_slices(rn->rn_avl, fd, rn->rn_name);
+	}
+
+	if ((zpool_read_label(fd, &config)) != 0) {
+		(void) close(fd);
+		(void) no_memory(rn->rn_hdl);
+		return;
+	}
+	(void) close(fd);
+
+
+	rn->rn_config = config;
+	if (config != NULL) {
+		assert(rn->rn_nozpool == B_FALSE);
+	}
+}
+
+/*
+ * Given a file descriptor, clear (zero) the label information.  This function
+ * is used in the appliance stack as part of the ZFS sysevent module and
+ * to implement the "zpool labelclear" command.
+ */
+int
+zpool_clear_label(int fd)
+{
+	struct stat64 statbuf;
+	int l;
+	vdev_label_t *label;
+	uint64_t size;
+
+	if (fstat64(fd, &statbuf) == -1)
+		return (0);
+	size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
+
+	if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
+		return (-1);
+
+	for (l = 0; l < VDEV_LABELS; l++) {
+		if (pwrite64(fd, label, sizeof (vdev_label_t),
+		    label_offset(size, l)) != sizeof (vdev_label_t))
+			return (-1);
+	}
+
+	free(label);
+	return (0);
+}
+
+/*
+ * Given a list of directories to search, find all pools stored on disk.  This
+ * includes partial pools which are not available to import.  If no args are
+ * given (argc is 0), then the default directory (/dev/dsk) is searched.
+ * poolname or guid (but not both) are provided by the caller when trying
+ * to import a specific pool.
+ */
+static nvlist_t *
+zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
+{
+	int i, dirs = iarg->paths;
+	DIR *dirp = NULL;
+	struct dirent64 *dp;
+	char path[MAXPATHLEN];
+	char *end, **dir = iarg->path;
+	size_t pathleft;
+	nvlist_t *ret = NULL;
+	static char *default_dir = "/dev";
+	pool_list_t pools = { 0 };
+	pool_entry_t *pe, *penext;
+	vdev_entry_t *ve, *venext;
+	config_entry_t *ce, *cenext;
+	name_entry_t *ne, *nenext;
+	avl_tree_t slice_cache;
+	rdsk_node_t *slice;
+	void *cookie;
+
+	if (dirs == 0) {
+		dirs = 1;
+		dir = &default_dir;
+	}
+
+	/*
+	 * Go through and read the label configuration information from every
+	 * possible device, organizing the information according to pool GUID
+	 * and toplevel GUID.
+	 */
+	for (i = 0; i < dirs; i++) {
+		tpool_t *t;
+		char *rdsk;
+		int dfd;
+
+		/* use realpath to normalize the path */
+		if (realpath(dir[i], path) == 0) {
+			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
+			    dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
+			goto error;
+		}
+		end = &path[strlen(path)];
+		*end++ = '/';
+		*end = 0;
+		pathleft = &path[sizeof (path)] - end;
+
+		/*
+		 * Using raw devices instead of block devices when we're
+		 * reading the labels skips a bunch of slow operations during
+		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
+		 */
+		if (strcmp(path, "/dev/dsk/") == 0)
+			rdsk = "/dev/";
+		else
+			rdsk = path;
+
+		if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
+		    (dirp = fdopendir(dfd)) == NULL) {
+			zfs_error_aux(hdl, strerror(errno));
+			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
+			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
+			    rdsk);
+			goto error;
+		}
+
+		avl_create(&slice_cache, slice_cache_compare,
+		    sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
+
+		if (strcmp(rdsk, "/dev/") == 0) {
+			struct gmesh mesh;
+			struct gclass *mp;
+			struct ggeom *gp;
+			struct gprovider *pp;
+
+			errno = geom_gettree(&mesh);
+			if (errno != 0) {
+				zfs_error_aux(hdl, strerror(errno));
+				(void) zfs_error_fmt(hdl, EZFS_BADPATH,
+				    dgettext(TEXT_DOMAIN, "cannot get GEOM tree"));
+				goto error;
+			}
+
+			LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
+		        	LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
+					LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
+						slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
+						slice->rn_name = zfs_strdup(hdl, pp->lg_name);
+						slice->rn_avl = &slice_cache;
+						slice->rn_dfd = dfd;
+						slice->rn_hdl = hdl;
+						slice->rn_nozpool = B_FALSE;
+						avl_add(&slice_cache, slice);
+					}
+				}
+			}
+
+			geom_deletetree(&mesh);
+			goto skipdir;
+		}
+
+		/*
+		 * This is not MT-safe, but we have no MT consumers of libzfs
+		 */
+		while ((dp = readdir64(dirp)) != NULL) {
+			const char *name = dp->d_name;
+			if (name[0] == '.' &&
+			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
+				continue;
+
+			slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
+			slice->rn_name = zfs_strdup(hdl, name);
+			slice->rn_avl = &slice_cache;
+			slice->rn_dfd = dfd;
+			slice->rn_hdl = hdl;
+			slice->rn_nozpool = B_FALSE;
+			avl_add(&slice_cache, slice);
+		}
+skipdir:
+		/*
+		 * create a thread pool to do all of this in parallel;
+		 * rn_nozpool is not protected, so this is racy in that
+		 * multiple tasks could decide that the same slice can
+		 * not hold a zpool, which is benign.  Also choose
+		 * double the number of processors; we hold a lot of
+		 * locks in the kernel, so going beyond this doesn't
+		 * buy us much.
+		 */
+		t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
+		    0, NULL);
+		for (slice = avl_first(&slice_cache); slice;
+		    (slice = avl_walk(&slice_cache, slice,
+		    AVL_AFTER)))
+			(void) tpool_dispatch(t, zpool_open_func, slice);
+		tpool_wait(t);
+		tpool_destroy(t);
+
+		cookie = NULL;
+		while ((slice = avl_destroy_nodes(&slice_cache,
+		    &cookie)) != NULL) {
+			if (slice->rn_config != NULL) {
+				nvlist_t *config = slice->rn_config;
+				boolean_t matched = B_TRUE;
+
+				if (iarg->poolname != NULL) {
+					char *pname;
+
+					matched = nvlist_lookup_string(config,
+					    ZPOOL_CONFIG_POOL_NAME,
+					    &pname) == 0 &&
+					    strcmp(iarg->poolname, pname) == 0;
+				} else if (iarg->guid != 0) {
+					uint64_t this_guid;
+
+					matched = nvlist_lookup_uint64(config,
+					    ZPOOL_CONFIG_POOL_GUID,
+					    &this_guid) == 0 &&
+					    iarg->guid == this_guid;
+				}
+				if (!matched) {
+					nvlist_free(config);
+					config = NULL;
+					continue;
+				}
+				/* use the non-raw path for the config */
+				(void) strlcpy(end, slice->rn_name, pathleft);
+				if (add_config(hdl, &pools, path, config) != 0)
+					goto error;
+			}
+			free(slice->rn_name);
+			free(slice);
+		}
+		avl_destroy(&slice_cache);
+
+		(void) closedir(dirp);
+		dirp = NULL;
+	}
+
+	ret = get_configs(hdl, &pools, iarg->can_be_active);
+
+error:
+	for (pe = pools.pools; pe != NULL; pe = penext) {
+		penext = pe->pe_next;
+		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
+			venext = ve->ve_next;
+			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
+				cenext = ce->ce_next;
+				if (ce->ce_config)
+					nvlist_free(ce->ce_config);
+				free(ce);
+			}
+			free(ve);
+		}
+		free(pe);
+	}
+
+	for (ne = pools.names; ne != NULL; ne = nenext) {
+		nenext = ne->ne_next;
+		if (ne->ne_name)
+			free(ne->ne_name);
+		free(ne);
+	}
+
+	if (dirp)
+		(void) closedir(dirp);
+
+	return (ret);
+}
+
+nvlist_t *
+zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
+{
+	importargs_t iarg = { 0 };
+
+	iarg.paths = argc;
+	iarg.path = argv;
+
+	return (zpool_find_import_impl(hdl, &iarg));
+}
+
+/*
+ * Given a cache file, return the contents as a list of importable pools.
+ * poolname or guid (but not both) are provided by the caller when trying
+ * to import a specific pool.
+ */
+nvlist_t *
+zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
+    char *poolname, uint64_t guid)
+{
+	char *buf;
+	int fd;
+	struct stat64 statbuf;
+	nvlist_t *raw, *src, *dst;
+	nvlist_t *pools;
+	nvpair_t *elem;
+	char *name;
+	uint64_t this_guid;
+	boolean_t active;
+
+	verify(poolname == NULL || guid == 0);
+
+	if ((fd = open(cachefile, O_RDONLY)) < 0) {
+		zfs_error_aux(hdl, "%s", strerror(errno));
+		(void) zfs_error(hdl, EZFS_BADCACHE,
+		    dgettext(TEXT_DOMAIN, "failed to open cache file"));
+		return (NULL);
+	}
+
+	if (fstat64(fd, &statbuf) != 0) {
+		zfs_error_aux(hdl, "%s", strerror(errno));
+		(void) close(fd);
+		(void) zfs_error(hdl, EZFS_BADCACHE,
+		    dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
+		return (NULL);
+	}
+
+	if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
+		(void) close(fd);
+		return (NULL);
+	}
+
+	if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
+		(void) close(fd);
+		free(buf);
+		(void) zfs_error(hdl, EZFS_BADCACHE,
+		    dgettext(TEXT_DOMAIN,
+		    "failed to read cache file contents"));
+		return (NULL);
+	}
+
+	(void) close(fd);
+
+	if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
+		free(buf);
+		(void) zfs_error(hdl, EZFS_BADCACHE,
+		    dgettext(TEXT_DOMAIN,
+		    "invalid or corrupt cache file contents"));
+		return (NULL);
+	}
+
+	free(buf);
+
+	/*
+	 * Go through and get the current state of the pools and refresh their
+	 * state.
+	 */
+	if (nvlist_alloc(&pools, 0, 0) != 0) {
+		(void) no_memory(hdl);
+		nvlist_free(raw);
+		return (NULL);
+	}
+
+	elem = NULL;
+	while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
+		verify(nvpair_value_nvlist(elem, &src) == 0);
+
+		verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
+		    &name) == 0);
+		if (poolname != NULL && strcmp(poolname, name) != 0)
+			continue;
+
+		verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
+		    &this_guid) == 0);
+		if (guid != 0) {
+			verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
+			    &this_guid) == 0);
+			if (guid != this_guid)
+				continue;
+		}
+
+		if (pool_active(hdl, name, this_guid, &active) != 0) {
+			nvlist_free(raw);
+			nvlist_free(pools);
+			return (NULL);
+		}
+
+		if (active)
+			continue;
+
+		if ((dst = refresh_config(hdl, src)) == NULL) {
+			nvlist_free(raw);
+			nvlist_free(pools);
+			return (NULL);
+		}
+
+		if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
+			(void) no_memory(hdl);
+			nvlist_free(dst);
+			nvlist_free(raw);
+			nvlist_free(pools);
+			return (NULL);
+		}
+		nvlist_free(dst);
+	}
+
+	nvlist_free(raw);
+	return (pools);
+}
+
+static int
+name_or_guid_exists(zpool_handle_t *zhp, void *data)
+{
+	importargs_t *import = data;
+	int found = 0;
+
+	if (import->poolname != NULL) {
+		char *pool_name;
+
+		verify(nvlist_lookup_string(zhp->zpool_config,
+		    ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
+		if (strcmp(pool_name, import->poolname) == 0)
+			found = 1;
+	} else {
+		uint64_t pool_guid;
+
+		verify(nvlist_lookup_uint64(zhp->zpool_config,
+		    ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
+		if (pool_guid == import->guid)
+			found = 1;
+	}
+
+	zpool_close(zhp);
+	return (found);
+}
+
+nvlist_t *
+zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
+{
+	verify(import->poolname == NULL || import->guid == 0);
+
+	if (import->unique)
+		import->exists = zpool_iter(hdl, name_or_guid_exists, import);
+
+	if (import->cachefile != NULL)
+		return (zpool_find_import_cached(hdl, import->cachefile,
+		    import->poolname, import->guid));
+
+	return (zpool_find_import_impl(hdl, import));
+}
+
+boolean_t
+find_guid(nvlist_t *nv, uint64_t guid)
+{
+	uint64_t tmp;
+	nvlist_t **child;
+	uint_t c, children;
+
+	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
+	if (tmp == guid)
+		return (B_TRUE);
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) == 0) {
+		for (c = 0; c < children; c++)
+			if (find_guid(child[c], guid))
+				return (B_TRUE);
+	}
+
+	return (B_FALSE);
+}
+
+typedef struct aux_cbdata {
+	const char	*cb_type;
+	uint64_t	cb_guid;
+	zpool_handle_t	*cb_zhp;
+} aux_cbdata_t;
+
+static int
+find_aux(zpool_handle_t *zhp, void *data)
+{
+	aux_cbdata_t *cbp = data;
+	nvlist_t **list;
+	uint_t i, count;
+	uint64_t guid;
+	nvlist_t *nvroot;
+
+	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
+	    &nvroot) == 0);
+
+	if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
+	    &list, &count) == 0) {
+		for (i = 0; i < count; i++) {
+			verify(nvlist_lookup_uint64(list[i],
+			    ZPOOL_CONFIG_GUID, &guid) == 0);
+			if (guid == cbp->cb_guid) {
+				cbp->cb_zhp = zhp;
+				return (1);
+			}
+		}
+	}
+
+	zpool_close(zhp);
+	return (0);
+}
+
+/*
+ * Determines if the pool is in use.  If so, it returns true and the state of
+ * the pool as well as the name of the pool.  Both strings are allocated and
+ * must be freed by the caller.
+ */
+int
+zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
+    boolean_t *inuse)
+{
+	nvlist_t *config;
+	char *name;
+	boolean_t ret;
+	uint64_t guid, vdev_guid;
+	zpool_handle_t *zhp;
+	nvlist_t *pool_config;
+	uint64_t stateval, isspare;
+	aux_cbdata_t cb = { 0 };
+	boolean_t isactive;
+
+	*inuse = B_FALSE;
+
+	if (zpool_read_label(fd, &config) != 0) {
+		(void) no_memory(hdl);
+		return (-1);
+	}
+
+	if (config == NULL)
+		return (0);
+
+	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
+	    &stateval) == 0);
+	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
+	    &vdev_guid) == 0);
+
+	if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
+		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+		    &name) == 0);
+		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+		    &guid) == 0);
+	}
+
+	switch (stateval) {
+	case POOL_STATE_EXPORTED:
+		/*
+		 * A pool with an exported state may in fact be imported
+		 * read-only, so check the in-core state to see if it's
+		 * active and imported read-only.  If it is, set
+		 * its state to active.
+		 */
+		if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
+		    (zhp = zpool_open_canfail(hdl, name)) != NULL &&
+		    zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
+			stateval = POOL_STATE_ACTIVE;
+
+		ret = B_TRUE;
+		break;
+
+	case POOL_STATE_ACTIVE:
+		/*
+		 * For an active pool, we have to determine if it's really part
+		 * of a currently active pool (in which case the pool will exist
+		 * and the guid will be the same), or whether it's part of an
+		 * active pool that was disconnected without being explicitly
+		 * exported.
+		 */
+		if (pool_active(hdl, name, guid, &isactive) != 0) {
+			nvlist_free(config);
+			return (-1);
+		}
+
+		if (isactive) {
+			/*
+			 * Because the device may have been removed while
+			 * offlined, we only report it as active if the vdev is
+			 * still present in the config.  Otherwise, pretend like
+			 * it's not in use.
+			 */
+			if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
+			    (pool_config = zpool_get_config(zhp, NULL))
+			    != NULL) {
+				nvlist_t *nvroot;
+
+				verify(nvlist_lookup_nvlist(pool_config,
+				    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
+				ret = find_guid(nvroot, vdev_guid);
+			} else {
+				ret = B_FALSE;
+			}
+
+			/*
+			 * If this is an active spare within another pool, we
+			 * treat it like an unused hot spare.  This allows the
+			 * user to create a pool with a hot spare that currently
+			 * in use within another pool.  Since we return B_TRUE,
+			 * libdiskmgt will continue to prevent generic consumers
+			 * from using the device.
+			 */
+			if (ret && nvlist_lookup_uint64(config,
+			    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
+				stateval = POOL_STATE_SPARE;
+
+			if (zhp != NULL)
+				zpool_close(zhp);
+		} else {
+			stateval = POOL_STATE_POTENTIALLY_ACTIVE;
+			ret = B_TRUE;
+		}
+		break;
+
+	case POOL_STATE_SPARE:
+		/*
+		 * For a hot spare, it can be either definitively in use, or
+		 * potentially active.  To determine if it's in use, we iterate
+		 * over all pools in the system and search for one with a spare
+		 * with a matching guid.
+		 *
+		 * Due to the shared nature of spares, we don't actually report
+		 * the potentially active case as in use.  This means the user
+		 * can freely create pools on the hot spares of exported pools,
+		 * but to do otherwise makes the resulting code complicated, and
+		 * we end up having to deal with this case anyway.
+		 */
+		cb.cb_zhp = NULL;
+		cb.cb_guid = vdev_guid;
+		cb.cb_type = ZPOOL_CONFIG_SPARES;
+		if (zpool_iter(hdl, find_aux, &cb) == 1) {
+			name = (char *)zpool_get_name(cb.cb_zhp);
+			ret = TRUE;
+		} else {
+			ret = FALSE;
+		}
+		break;
+
+	case POOL_STATE_L2CACHE:
+
+		/*
+		 * Check if any pool is currently using this l2cache device.
+		 */
+		cb.cb_zhp = NULL;
+		cb.cb_guid = vdev_guid;
+		cb.cb_type = ZPOOL_CONFIG_L2CACHE;
+		if (zpool_iter(hdl, find_aux, &cb) == 1) {
+			name = (char *)zpool_get_name(cb.cb_zhp);
+			ret = TRUE;
+		} else {
+			ret = FALSE;
+		}
+		break;
+
+	default:
+		ret = B_FALSE;
+	}
+
+
+	if (ret) {
+		if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
+			if (cb.cb_zhp)
+				zpool_close(cb.cb_zhp);
+			nvlist_free(config);
+			return (-1);
+		}
+		*state = (pool_state_t)stateval;
+	}
+
+	if (cb.cb_zhp)
+		zpool_close(cb.cb_zhp);
+
+	nvlist_free(config);
+	*inuse = ret;
+	return (0);
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_iter.c b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_iter.c
new file mode 100644
index 0000000000000000000000000000000000000000..a4db909e180e27ea540644241a52185ac875422b
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_iter.c
@@ -0,0 +1,471 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2010 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2011 by Delphix. All rights reserved.
+ * Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <stddef.h>
+#include <libintl.h>
+#include <libzfs.h>
+
+#include "libzfs_impl.h"
+
+int
+zfs_iter_clones(zfs_handle_t *zhp, zfs_iter_f func, void *data)
+{
+	nvlist_t *nvl = zfs_get_clones_nvl(zhp);
+	nvpair_t *pair;
+
+	if (nvl == NULL)
+		return (0);
+
+	for (pair = nvlist_next_nvpair(nvl, NULL); pair != NULL;
+	    pair = nvlist_next_nvpair(nvl, pair)) {
+		zfs_handle_t *clone = zfs_open(zhp->zfs_hdl, nvpair_name(pair),
+		    ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+		if (clone != NULL) {
+			int err = func(clone, data);
+			if (err != 0)
+				return (err);
+		}
+	}
+	return (0);
+}
+
+static int
+zfs_do_list_ioctl(zfs_handle_t *zhp, unsigned long arg, zfs_cmd_t *zc)
+{
+	int rc;
+	uint64_t	orig_cookie;
+
+	orig_cookie = zc->zc_cookie;
+top:
+	(void) strlcpy(zc->zc_name, zhp->zfs_name, sizeof (zc->zc_name));
+	rc = ioctl(zhp->zfs_hdl->libzfs_fd, arg, zc);
+
+	if (rc == -1) {
+		switch (errno) {
+		case ENOMEM:
+			/* expand nvlist memory and try again */
+			if (zcmd_expand_dst_nvlist(zhp->zfs_hdl, zc) != 0) {
+				zcmd_free_nvlists(zc);
+				return (-1);
+			}
+			zc->zc_cookie = orig_cookie;
+			goto top;
+		/*
+		 * An errno value of ESRCH indicates normal completion.
+		 * If ENOENT is returned, then the underlying dataset
+		 * has been removed since we obtained the handle.
+		 */
+		case ESRCH:
+		case ENOENT:
+			rc = 1;
+			break;
+		default:
+			rc = zfs_standard_error(zhp->zfs_hdl, errno,
+			    dgettext(TEXT_DOMAIN,
+			    "cannot iterate filesystems"));
+			break;
+		}
+	}
+	return (rc);
+}
+
+/*
+ * Iterate over all child filesystems
+ */
+int
+zfs_iter_filesystems(zfs_handle_t *zhp, zfs_iter_f func, void *data)
+{
+	zfs_cmd_t zc = { 0 };
+	zfs_handle_t *nzhp;
+	int ret;
+
+	if (zhp->zfs_type != ZFS_TYPE_FILESYSTEM)
+		return (0);
+
+	if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
+		return (-1);
+
+	while ((ret = zfs_do_list_ioctl(zhp, ZFS_IOC_DATASET_LIST_NEXT,
+	    &zc)) == 0) {
+		/*
+		 * Silently ignore errors, as the only plausible explanation is
+		 * that the pool has since been removed.
+		 */
+		if ((nzhp = make_dataset_handle_zc(zhp->zfs_hdl,
+		    &zc)) == NULL) {
+			continue;
+		}
+
+		if ((ret = func(nzhp, data)) != 0) {
+			zcmd_free_nvlists(&zc);
+			return (ret);
+		}
+	}
+	zcmd_free_nvlists(&zc);
+	return ((ret < 0) ? ret : 0);
+}
+
+/*
+ * Iterate over all snapshots
+ */
+int
+zfs_iter_snapshots(zfs_handle_t *zhp, boolean_t simple, zfs_iter_f func,
+    void *data)
+{
+	zfs_cmd_t zc = { 0 };
+	zfs_handle_t *nzhp;
+	int ret;
+
+	if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT)
+		return (0);
+
+	zc.zc_simple = simple;
+
+	if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
+		return (-1);
+	while ((ret = zfs_do_list_ioctl(zhp, ZFS_IOC_SNAPSHOT_LIST_NEXT,
+	    &zc)) == 0) {
+
+		if (simple)
+			nzhp = make_dataset_simple_handle_zc(zhp, &zc);
+		else
+			nzhp = make_dataset_handle_zc(zhp->zfs_hdl, &zc);
+		if (nzhp == NULL)
+			continue;
+
+		if ((ret = func(nzhp, data)) != 0) {
+			zcmd_free_nvlists(&zc);
+			return (ret);
+		}
+	}
+	zcmd_free_nvlists(&zc);
+	return ((ret < 0) ? ret : 0);
+}
+
+/*
+ * Routines for dealing with the sorted snapshot functionality
+ */
+typedef struct zfs_node {
+	zfs_handle_t	*zn_handle;
+	avl_node_t	zn_avlnode;
+} zfs_node_t;
+
+static int
+zfs_sort_snaps(zfs_handle_t *zhp, void *data)
+{
+	avl_tree_t *avl = data;
+	zfs_node_t *node;
+	zfs_node_t search;
+
+	search.zn_handle = zhp;
+	node = avl_find(avl, &search, NULL);
+	if (node) {
+		/*
+		 * If this snapshot was renamed while we were creating the
+		 * AVL tree, it's possible that we already inserted it under
+		 * its old name. Remove the old handle before adding the new
+		 * one.
+		 */
+		zfs_close(node->zn_handle);
+		avl_remove(avl, node);
+		free(node);
+	}
+
+	node = zfs_alloc(zhp->zfs_hdl, sizeof (zfs_node_t));
+	node->zn_handle = zhp;
+	avl_add(avl, node);
+
+	return (0);
+}
+
+static int
+zfs_snapshot_compare(const void *larg, const void *rarg)
+{
+	zfs_handle_t *l = ((zfs_node_t *)larg)->zn_handle;
+	zfs_handle_t *r = ((zfs_node_t *)rarg)->zn_handle;
+	uint64_t lcreate, rcreate;
+
+	/*
+	 * Sort them according to creation time.  We use the hidden
+	 * CREATETXG property to get an absolute ordering of snapshots.
+	 */
+	lcreate = zfs_prop_get_int(l, ZFS_PROP_CREATETXG);
+	rcreate = zfs_prop_get_int(r, ZFS_PROP_CREATETXG);
+
+	if (lcreate < rcreate)
+		return (-1);
+	else if (lcreate > rcreate)
+		return (+1);
+	else
+		return (0);
+}
+
+int
+zfs_iter_snapshots_sorted(zfs_handle_t *zhp, zfs_iter_f callback, void *data)
+{
+	int ret = 0;
+	zfs_node_t *node;
+	avl_tree_t avl;
+	void *cookie = NULL;
+
+	avl_create(&avl, zfs_snapshot_compare,
+	    sizeof (zfs_node_t), offsetof(zfs_node_t, zn_avlnode));
+
+	ret = zfs_iter_snapshots(zhp, B_FALSE, zfs_sort_snaps, &avl);
+
+	for (node = avl_first(&avl); node != NULL; node = AVL_NEXT(&avl, node))
+		ret |= callback(node->zn_handle, data);
+
+	while ((node = avl_destroy_nodes(&avl, &cookie)) != NULL)
+		free(node);
+
+	avl_destroy(&avl);
+
+	return (ret);
+}
+
+typedef struct {
+	char *ssa_first;
+	char *ssa_last;
+	boolean_t ssa_seenfirst;
+	boolean_t ssa_seenlast;
+	zfs_iter_f ssa_func;
+	void *ssa_arg;
+} snapspec_arg_t;
+
+static int
+snapspec_cb(zfs_handle_t *zhp, void *arg) {
+	snapspec_arg_t *ssa = arg;
+	char *shortsnapname;
+	int err = 0;
+
+	if (ssa->ssa_seenlast)
+		return (0);
+	shortsnapname = zfs_strdup(zhp->zfs_hdl,
+	    strchr(zfs_get_name(zhp), '@') + 1);
+
+	if (!ssa->ssa_seenfirst && strcmp(shortsnapname, ssa->ssa_first) == 0)
+		ssa->ssa_seenfirst = B_TRUE;
+
+	if (ssa->ssa_seenfirst) {
+		err = ssa->ssa_func(zhp, ssa->ssa_arg);
+	} else {
+		zfs_close(zhp);
+	}
+
+	if (strcmp(shortsnapname, ssa->ssa_last) == 0)
+		ssa->ssa_seenlast = B_TRUE;
+	free(shortsnapname);
+
+	return (err);
+}
+
+/*
+ * spec is a string like "A,B%C,D"
+ *
+ * <snaps>, where <snaps> can be:
+ *      <snap>          (single snapshot)
+ *      <snap>%<snap>   (range of snapshots, inclusive)
+ *      %<snap>         (range of snapshots, starting with earliest)
+ *      <snap>%         (range of snapshots, ending with last)
+ *      %               (all snapshots)
+ *      <snaps>[,...]   (comma separated list of the above)
+ *
+ * If a snapshot can not be opened, continue trying to open the others, but
+ * return ENOENT at the end.
+ */
+int
+zfs_iter_snapspec(zfs_handle_t *fs_zhp, const char *spec_orig,
+    zfs_iter_f func, void *arg)
+{
+	char buf[ZFS_MAXNAMELEN];
+	char *comma_separated, *cp;
+	int err = 0;
+	int ret = 0;
+
+	(void) strlcpy(buf, spec_orig, sizeof (buf));
+	cp = buf;
+
+	while ((comma_separated = strsep(&cp, ",")) != NULL) {
+		char *pct = strchr(comma_separated, '%');
+		if (pct != NULL) {
+			snapspec_arg_t ssa = { 0 };
+			ssa.ssa_func = func;
+			ssa.ssa_arg = arg;
+
+			if (pct == comma_separated)
+				ssa.ssa_seenfirst = B_TRUE;
+			else
+				ssa.ssa_first = comma_separated;
+			*pct = '\0';
+			ssa.ssa_last = pct + 1;
+
+			/*
+			 * If there is a lastname specified, make sure it
+			 * exists.
+			 */
+			if (ssa.ssa_last[0] != '\0') {
+				char snapname[ZFS_MAXNAMELEN];
+				(void) snprintf(snapname, sizeof (snapname),
+				    "%s@%s", zfs_get_name(fs_zhp),
+				    ssa.ssa_last);
+				if (!zfs_dataset_exists(fs_zhp->zfs_hdl,
+				    snapname, ZFS_TYPE_SNAPSHOT)) {
+					ret = ENOENT;
+					continue;
+				}
+			}
+
+			err = zfs_iter_snapshots_sorted(fs_zhp,
+			    snapspec_cb, &ssa);
+			if (ret == 0)
+				ret = err;
+			if (ret == 0 && (!ssa.ssa_seenfirst ||
+			    (ssa.ssa_last[0] != '\0' && !ssa.ssa_seenlast))) {
+				ret = ENOENT;
+			}
+		} else {
+			char snapname[ZFS_MAXNAMELEN];
+			zfs_handle_t *snap_zhp;
+			(void) snprintf(snapname, sizeof (snapname), "%s@%s",
+			    zfs_get_name(fs_zhp), comma_separated);
+			snap_zhp = make_dataset_handle(fs_zhp->zfs_hdl,
+			    snapname);
+			if (snap_zhp == NULL) {
+				ret = ENOENT;
+				continue;
+			}
+			err = func(snap_zhp, arg);
+			if (ret == 0)
+				ret = err;
+		}
+	}
+
+	return (ret);
+}
+
+/*
+ * Iterate over all children, snapshots and filesystems
+ */
+int
+zfs_iter_children(zfs_handle_t *zhp, zfs_iter_f func, void *data)
+{
+	int ret;
+
+	if ((ret = zfs_iter_filesystems(zhp, func, data)) != 0)
+		return (ret);
+
+	return (zfs_iter_snapshots(zhp, B_FALSE, func, data));
+}
+
+
+typedef struct iter_stack_frame {
+	struct iter_stack_frame *next;
+	zfs_handle_t *zhp;
+} iter_stack_frame_t;
+
+typedef struct iter_dependents_arg {
+	boolean_t first;
+	boolean_t allowrecursion;
+	iter_stack_frame_t *stack;
+	zfs_iter_f func;
+	void *data;
+} iter_dependents_arg_t;
+
+static int
+iter_dependents_cb(zfs_handle_t *zhp, void *arg)
+{
+	iter_dependents_arg_t *ida = arg;
+	int err;
+	boolean_t first = ida->first;
+	ida->first = B_FALSE;
+
+	if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
+		err = zfs_iter_clones(zhp, iter_dependents_cb, ida);
+	} else {
+		iter_stack_frame_t isf;
+		iter_stack_frame_t *f;
+
+		/*
+		 * check if there is a cycle by seeing if this fs is already
+		 * on the stack.
+		 */
+		for (f = ida->stack; f != NULL; f = f->next) {
+			if (f->zhp->zfs_dmustats.dds_guid ==
+			    zhp->zfs_dmustats.dds_guid) {
+				if (ida->allowrecursion) {
+					zfs_close(zhp);
+					return (0);
+				} else {
+					zfs_error_aux(zhp->zfs_hdl,
+					    dgettext(TEXT_DOMAIN,
+					    "recursive dependency at '%s'"),
+					    zfs_get_name(zhp));
+					err = zfs_error(zhp->zfs_hdl,
+					    EZFS_RECURSIVE,
+					    dgettext(TEXT_DOMAIN,
+					    "cannot determine dependent "
+					    "datasets"));
+					zfs_close(zhp);
+					return (err);
+				}
+			}
+		}
+
+		isf.zhp = zhp;
+		isf.next = ida->stack;
+		ida->stack = &isf;
+		err = zfs_iter_filesystems(zhp, iter_dependents_cb, ida);
+		if (err == 0) {
+			err = zfs_iter_snapshots(zhp, B_FALSE,
+			    iter_dependents_cb, ida);
+		}
+		ida->stack = isf.next;
+	}
+	if (!first && err == 0)
+		err = ida->func(zhp, ida->data);
+	return (err);
+}
+
+int
+zfs_iter_dependents(zfs_handle_t *zhp, boolean_t allowrecursion,
+    zfs_iter_f func, void *data)
+{
+	iter_dependents_arg_t ida;
+	ida.allowrecursion = allowrecursion;
+	ida.stack = NULL;
+	ida.func = func;
+	ida.data = data;
+	ida.first = B_TRUE;
+	return (iter_dependents_cb(zfs_handle_dup(zhp), &ida));
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_mount.c b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_mount.c
new file mode 100644
index 0000000000000000000000000000000000000000..b2959dd1b8419292cdc1441aaca3071f995bbf2b
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_mount.c
@@ -0,0 +1,1323 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+/*
+ * Routines to manage ZFS mounts.  We separate all the nasty routines that have
+ * to deal with the OS.  The following functions are the main entry points --
+ * they are used by mount and unmount and when changing a filesystem's
+ * mountpoint.
+ *
+ * 	zfs_is_mounted()
+ * 	zfs_mount()
+ * 	zfs_unmount()
+ * 	zfs_unmountall()
+ *
+ * This file also contains the functions used to manage sharing filesystems via
+ * NFS and iSCSI:
+ *
+ * 	zfs_is_shared()
+ * 	zfs_share()
+ * 	zfs_unshare()
+ *
+ * 	zfs_is_shared_nfs()
+ * 	zfs_is_shared_smb()
+ * 	zfs_share_proto()
+ * 	zfs_shareall();
+ * 	zfs_unshare_nfs()
+ * 	zfs_unshare_smb()
+ * 	zfs_unshareall_nfs()
+ *	zfs_unshareall_smb()
+ *	zfs_unshareall()
+ *	zfs_unshareall_bypath()
+ *
+ * The following functions are available for pool consumers, and will
+ * mount/unmount and share/unshare all datasets within pool:
+ *
+ * 	zpool_enable_datasets()
+ * 	zpool_disable_datasets()
+ */
+
+#include <dirent.h>
+#include <dlfcn.h>
+#include <errno.h>
+#include <libgen.h>
+#include <libintl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <zone.h>
+#include <sys/mntent.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+
+#include <libzfs.h>
+
+#include "libzfs_impl.h"
+
+#include <libshare.h>
+#define	MAXISALEN	257	/* based on sysinfo(2) man page */
+
+static int zfs_share_proto(zfs_handle_t *, zfs_share_proto_t *);
+zfs_share_type_t zfs_is_shared_proto(zfs_handle_t *, char **,
+    zfs_share_proto_t);
+
+/*
+ * The share protocols table must be in the same order as the zfs_share_prot_t
+ * enum in libzfs_impl.h
+ */
+typedef struct {
+	zfs_prop_t p_prop;
+	char *p_name;
+	int p_share_err;
+	int p_unshare_err;
+} proto_table_t;
+
+proto_table_t proto_table[PROTO_END] = {
+	{ZFS_PROP_SHARENFS, "nfs", EZFS_SHARENFSFAILED, EZFS_UNSHARENFSFAILED},
+	{ZFS_PROP_SHARESMB, "smb", EZFS_SHARESMBFAILED, EZFS_UNSHARESMBFAILED},
+};
+
+zfs_share_proto_t nfs_only[] = {
+	PROTO_NFS,
+	PROTO_END
+};
+
+zfs_share_proto_t smb_only[] = {
+	PROTO_SMB,
+	PROTO_END
+};
+zfs_share_proto_t share_all_proto[] = {
+	PROTO_NFS,
+	PROTO_SMB,
+	PROTO_END
+};
+
+/*
+ * Search the sharetab for the given mountpoint and protocol, returning
+ * a zfs_share_type_t value.
+ */
+static zfs_share_type_t
+is_shared(libzfs_handle_t *hdl, const char *mountpoint, zfs_share_proto_t proto)
+{
+	char buf[MAXPATHLEN], *tab;
+	char *ptr;
+
+	if (hdl->libzfs_sharetab == NULL)
+		return (SHARED_NOT_SHARED);
+
+	(void) fseek(hdl->libzfs_sharetab, 0, SEEK_SET);
+
+	while (fgets(buf, sizeof (buf), hdl->libzfs_sharetab) != NULL) {
+
+		/* the mountpoint is the first entry on each line */
+		if ((tab = strchr(buf, '\t')) == NULL)
+			continue;
+
+		*tab = '\0';
+		if (strcmp(buf, mountpoint) == 0) {
+#ifdef sun
+			/*
+			 * the protocol field is the third field
+			 * skip over second field
+			 */
+			ptr = ++tab;
+			if ((tab = strchr(ptr, '\t')) == NULL)
+				continue;
+			ptr = ++tab;
+			if ((tab = strchr(ptr, '\t')) == NULL)
+				continue;
+			*tab = '\0';
+			if (strcmp(ptr,
+			    proto_table[proto].p_name) == 0) {
+				switch (proto) {
+				case PROTO_NFS:
+					return (SHARED_NFS);
+				case PROTO_SMB:
+					return (SHARED_SMB);
+				default:
+					return (0);
+				}
+			}
+#else
+			if (proto == PROTO_NFS)
+				return (SHARED_NFS);
+#endif
+		}
+	}
+
+	return (SHARED_NOT_SHARED);
+}
+
+#ifdef sun
+/*
+ * Returns true if the specified directory is empty.  If we can't open the
+ * directory at all, return true so that the mount can fail with a more
+ * informative error message.
+ */
+static boolean_t
+dir_is_empty(const char *dirname)
+{
+	DIR *dirp;
+	struct dirent64 *dp;
+
+	if ((dirp = opendir(dirname)) == NULL)
+		return (B_TRUE);
+
+	while ((dp = readdir64(dirp)) != NULL) {
+
+		if (strcmp(dp->d_name, ".") == 0 ||
+		    strcmp(dp->d_name, "..") == 0)
+			continue;
+
+		(void) closedir(dirp);
+		return (B_FALSE);
+	}
+
+	(void) closedir(dirp);
+	return (B_TRUE);
+}
+#endif
+
+/*
+ * Checks to see if the mount is active.  If the filesystem is mounted, we fill
+ * in 'where' with the current mountpoint, and return 1.  Otherwise, we return
+ * 0.
+ */
+boolean_t
+is_mounted(libzfs_handle_t *zfs_hdl, const char *special, char **where)
+{
+	struct mnttab entry;
+
+	if (libzfs_mnttab_find(zfs_hdl, special, &entry) != 0)
+		return (B_FALSE);
+
+	if (where != NULL)
+		*where = zfs_strdup(zfs_hdl, entry.mnt_mountp);
+
+	return (B_TRUE);
+}
+
+boolean_t
+zfs_is_mounted(zfs_handle_t *zhp, char **where)
+{
+	return (is_mounted(zhp->zfs_hdl, zfs_get_name(zhp), where));
+}
+
+/*
+ * Returns true if the given dataset is mountable, false otherwise.  Returns the
+ * mountpoint in 'buf'.
+ */
+static boolean_t
+zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen,
+    zprop_source_t *source)
+{
+	char sourceloc[ZFS_MAXNAMELEN];
+	zprop_source_t sourcetype;
+
+	if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type))
+		return (B_FALSE);
+
+	verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, buf, buflen,
+	    &sourcetype, sourceloc, sizeof (sourceloc), B_FALSE) == 0);
+
+	if (strcmp(buf, ZFS_MOUNTPOINT_NONE) == 0 ||
+	    strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0)
+		return (B_FALSE);
+
+	if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF)
+		return (B_FALSE);
+
+	if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) &&
+	    getzoneid() == GLOBAL_ZONEID)
+		return (B_FALSE);
+
+	if (source)
+		*source = sourcetype;
+
+	return (B_TRUE);
+}
+
+/*
+ * Mount the given filesystem.
+ */
+int
+zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
+{
+	struct stat buf;
+	char mountpoint[ZFS_MAXPROPLEN];
+	char mntopts[MNT_LINE_MAX];
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+
+	if (options == NULL)
+		mntopts[0] = '\0';
+	else
+		(void) strlcpy(mntopts, options, sizeof (mntopts));
+
+	/*
+	 * If the pool is imported read-only then all mounts must be read-only
+	 */
+	if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL))
+		flags |= MS_RDONLY;
+
+	if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
+		return (0);
+
+	/* Create the directory if it doesn't already exist */
+	if (lstat(mountpoint, &buf) != 0) {
+		if (mkdirp(mountpoint, 0755) != 0) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "failed to create mountpoint"));
+			return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
+			    dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
+			    mountpoint));
+		}
+	}
+
+#ifdef sun	/* FreeBSD: overlay mounts are not checked. */
+	/*
+	 * Determine if the mountpoint is empty.  If so, refuse to perform the
+	 * mount.  We don't perform this check if MS_OVERLAY is specified, which
+	 * would defeat the point.  We also avoid this check if 'remount' is
+	 * specified.
+	 */
+	if ((flags & MS_OVERLAY) == 0 &&
+	    strstr(mntopts, MNTOPT_REMOUNT) == NULL &&
+	    !dir_is_empty(mountpoint)) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "directory is not empty"));
+		return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
+		    dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint));
+	}
+#endif
+
+	/* perform the mount */
+	if (zmount(zfs_get_name(zhp), mountpoint, flags,
+	    MNTTYPE_ZFS, NULL, 0, mntopts, sizeof (mntopts)) != 0) {
+		/*
+		 * Generic errors are nasty, but there are just way too many
+		 * from mount(), and they're well-understood.  We pick a few
+		 * common ones to improve upon.
+		 */
+		if (errno == EBUSY) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "mountpoint or dataset is busy"));
+		} else if (errno == EPERM) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "Insufficient privileges"));
+		} else if (errno == ENOTSUP) {
+			char buf[256];
+			int spa_version;
+
+			VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
+			(void) snprintf(buf, sizeof (buf),
+			    dgettext(TEXT_DOMAIN, "Can't mount a version %lld "
+			    "file system on a version %d pool. Pool must be"
+			    " upgraded to mount this file system."),
+			    (u_longlong_t)zfs_prop_get_int(zhp,
+			    ZFS_PROP_VERSION), spa_version);
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, buf));
+		} else {
+			zfs_error_aux(hdl, strerror(errno));
+		}
+		return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
+		    dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
+		    zhp->zfs_name));
+	}
+
+	/* add the mounted entry into our cache */
+	libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint,
+	    mntopts);
+	return (0);
+}
+
+/*
+ * Unmount a single filesystem.
+ */
+static int
+unmount_one(libzfs_handle_t *hdl, const char *mountpoint, int flags)
+{
+	if (umount2(mountpoint, flags) != 0) {
+		zfs_error_aux(hdl, strerror(errno));
+		return (zfs_error_fmt(hdl, EZFS_UMOUNTFAILED,
+		    dgettext(TEXT_DOMAIN, "cannot unmount '%s'"),
+		    mountpoint));
+	}
+
+	return (0);
+}
+
+/*
+ * Unmount the given filesystem.
+ */
+int
+zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
+{
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	struct mnttab entry;
+	char *mntpt = NULL;
+
+	/* check to see if we need to unmount the filesystem */
+	if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
+	    libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)) {
+		/*
+		 * mountpoint may have come from a call to
+		 * getmnt/getmntany if it isn't NULL. If it is NULL,
+		 * we know it comes from libzfs_mnttab_find which can
+		 * then get freed later. We strdup it to play it safe.
+		 */
+		if (mountpoint == NULL)
+			mntpt = zfs_strdup(hdl, entry.mnt_mountp);
+		else
+			mntpt = zfs_strdup(hdl, mountpoint);
+
+		/*
+		 * Unshare and unmount the filesystem
+		 */
+		if (zfs_unshare_proto(zhp, mntpt, share_all_proto) != 0)
+			return (-1);
+
+		if (unmount_one(hdl, mntpt, flags) != 0) {
+			free(mntpt);
+			(void) zfs_shareall(zhp);
+			return (-1);
+		}
+		libzfs_mnttab_remove(hdl, zhp->zfs_name);
+		free(mntpt);
+	}
+
+	return (0);
+}
+
+/*
+ * Unmount this filesystem and any children inheriting the mountpoint property.
+ * To do this, just act like we're changing the mountpoint property, but don't
+ * remount the filesystems afterwards.
+ */
+int
+zfs_unmountall(zfs_handle_t *zhp, int flags)
+{
+	prop_changelist_t *clp;
+	int ret;
+
+	clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, 0, flags);
+	if (clp == NULL)
+		return (-1);
+
+	ret = changelist_prefix(clp);
+	changelist_free(clp);
+
+	return (ret);
+}
+
+boolean_t
+zfs_is_shared(zfs_handle_t *zhp)
+{
+	zfs_share_type_t rc = 0;
+	zfs_share_proto_t *curr_proto;
+
+	if (ZFS_IS_VOLUME(zhp))
+		return (B_FALSE);
+
+	for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
+	    curr_proto++)
+		rc |= zfs_is_shared_proto(zhp, NULL, *curr_proto);
+
+	return (rc ? B_TRUE : B_FALSE);
+}
+
+int
+zfs_share(zfs_handle_t *zhp)
+{
+	assert(!ZFS_IS_VOLUME(zhp));
+	return (zfs_share_proto(zhp, share_all_proto));
+}
+
+int
+zfs_unshare(zfs_handle_t *zhp)
+{
+	assert(!ZFS_IS_VOLUME(zhp));
+	return (zfs_unshareall(zhp));
+}
+
+/*
+ * Check to see if the filesystem is currently shared.
+ */
+zfs_share_type_t
+zfs_is_shared_proto(zfs_handle_t *zhp, char **where, zfs_share_proto_t proto)
+{
+	char *mountpoint;
+	zfs_share_type_t rc;
+
+	if (!zfs_is_mounted(zhp, &mountpoint))
+		return (SHARED_NOT_SHARED);
+
+	if (rc = is_shared(zhp->zfs_hdl, mountpoint, proto)) {
+		if (where != NULL)
+			*where = mountpoint;
+		else
+			free(mountpoint);
+		return (rc);
+	} else {
+		free(mountpoint);
+		return (SHARED_NOT_SHARED);
+	}
+}
+
+boolean_t
+zfs_is_shared_nfs(zfs_handle_t *zhp, char **where)
+{
+	return (zfs_is_shared_proto(zhp, where,
+	    PROTO_NFS) != SHARED_NOT_SHARED);
+}
+
+boolean_t
+zfs_is_shared_smb(zfs_handle_t *zhp, char **where)
+{
+	return (zfs_is_shared_proto(zhp, where,
+	    PROTO_SMB) != SHARED_NOT_SHARED);
+}
+
+/*
+ * Make sure things will work if libshare isn't installed by using
+ * wrapper functions that check to see that the pointers to functions
+ * initialized in _zfs_init_libshare() are actually present.
+ */
+
+#ifdef sun
+static sa_handle_t (*_sa_init)(int);
+static void (*_sa_fini)(sa_handle_t);
+static sa_share_t (*_sa_find_share)(sa_handle_t, char *);
+static int (*_sa_enable_share)(sa_share_t, char *);
+static int (*_sa_disable_share)(sa_share_t, char *);
+static char *(*_sa_errorstr)(int);
+static int (*_sa_parse_legacy_options)(sa_group_t, char *, char *);
+static boolean_t (*_sa_needs_refresh)(sa_handle_t *);
+static libzfs_handle_t *(*_sa_get_zfs_handle)(sa_handle_t);
+static int (*_sa_zfs_process_share)(sa_handle_t, sa_group_t, sa_share_t,
+    char *, char *, zprop_source_t, char *, char *, char *);
+static void (*_sa_update_sharetab_ts)(sa_handle_t);
+#endif
+
+/*
+ * _zfs_init_libshare()
+ *
+ * Find the libshare.so.1 entry points that we use here and save the
+ * values to be used later. This is triggered by the runtime loader.
+ * Make sure the correct ISA version is loaded.
+ */
+
+#pragma init(_zfs_init_libshare)
+static void
+_zfs_init_libshare(void)
+{
+#ifdef sun
+	void *libshare;
+	char path[MAXPATHLEN];
+	char isa[MAXISALEN];
+
+#if defined(_LP64)
+	if (sysinfo(SI_ARCHITECTURE_64, isa, MAXISALEN) == -1)
+		isa[0] = '\0';
+#else
+	isa[0] = '\0';
+#endif
+	(void) snprintf(path, MAXPATHLEN,
+	    "/usr/lib/%s/libshare.so.1", isa);
+
+	if ((libshare = dlopen(path, RTLD_LAZY | RTLD_GLOBAL)) != NULL) {
+		_sa_init = (sa_handle_t (*)(int))dlsym(libshare, "sa_init");
+		_sa_fini = (void (*)(sa_handle_t))dlsym(libshare, "sa_fini");
+		_sa_find_share = (sa_share_t (*)(sa_handle_t, char *))
+		    dlsym(libshare, "sa_find_share");
+		_sa_enable_share = (int (*)(sa_share_t, char *))dlsym(libshare,
+		    "sa_enable_share");
+		_sa_disable_share = (int (*)(sa_share_t, char *))dlsym(libshare,
+		    "sa_disable_share");
+		_sa_errorstr = (char *(*)(int))dlsym(libshare, "sa_errorstr");
+		_sa_parse_legacy_options = (int (*)(sa_group_t, char *, char *))
+		    dlsym(libshare, "sa_parse_legacy_options");
+		_sa_needs_refresh = (boolean_t (*)(sa_handle_t *))
+		    dlsym(libshare, "sa_needs_refresh");
+		_sa_get_zfs_handle = (libzfs_handle_t *(*)(sa_handle_t))
+		    dlsym(libshare, "sa_get_zfs_handle");
+		_sa_zfs_process_share = (int (*)(sa_handle_t, sa_group_t,
+		    sa_share_t, char *, char *, zprop_source_t, char *,
+		    char *, char *))dlsym(libshare, "sa_zfs_process_share");
+		_sa_update_sharetab_ts = (void (*)(sa_handle_t))
+		    dlsym(libshare, "sa_update_sharetab_ts");
+		if (_sa_init == NULL || _sa_fini == NULL ||
+		    _sa_find_share == NULL || _sa_enable_share == NULL ||
+		    _sa_disable_share == NULL || _sa_errorstr == NULL ||
+		    _sa_parse_legacy_options == NULL ||
+		    _sa_needs_refresh == NULL || _sa_get_zfs_handle == NULL ||
+		    _sa_zfs_process_share == NULL ||
+		    _sa_update_sharetab_ts == NULL) {
+			_sa_init = NULL;
+			_sa_fini = NULL;
+			_sa_disable_share = NULL;
+			_sa_enable_share = NULL;
+			_sa_errorstr = NULL;
+			_sa_parse_legacy_options = NULL;
+			(void) dlclose(libshare);
+			_sa_needs_refresh = NULL;
+			_sa_get_zfs_handle = NULL;
+			_sa_zfs_process_share = NULL;
+			_sa_update_sharetab_ts = NULL;
+		}
+	}
+#endif
+}
+
+/*
+ * zfs_init_libshare(zhandle, service)
+ *
+ * Initialize the libshare API if it hasn't already been initialized.
+ * In all cases it returns 0 if it succeeded and an error if not. The
+ * service value is which part(s) of the API to initialize and is a
+ * direct map to the libshare sa_init(service) interface.
+ */
+int
+zfs_init_libshare(libzfs_handle_t *zhandle, int service)
+{
+	int ret = SA_OK;
+
+#ifdef sun
+	if (_sa_init == NULL)
+		ret = SA_CONFIG_ERR;
+
+	if (ret == SA_OK && zhandle->libzfs_shareflags & ZFSSHARE_MISS) {
+		/*
+		 * We had a cache miss. Most likely it is a new ZFS
+		 * dataset that was just created. We want to make sure
+		 * so check timestamps to see if a different process
+		 * has updated any of the configuration. If there was
+		 * some non-ZFS change, we need to re-initialize the
+		 * internal cache.
+		 */
+		zhandle->libzfs_shareflags &= ~ZFSSHARE_MISS;
+		if (_sa_needs_refresh != NULL &&
+		    _sa_needs_refresh(zhandle->libzfs_sharehdl)) {
+			zfs_uninit_libshare(zhandle);
+			zhandle->libzfs_sharehdl = _sa_init(service);
+		}
+	}
+
+	if (ret == SA_OK && zhandle && zhandle->libzfs_sharehdl == NULL)
+		zhandle->libzfs_sharehdl = _sa_init(service);
+
+	if (ret == SA_OK && zhandle->libzfs_sharehdl == NULL)
+		ret = SA_NO_MEMORY;
+#endif
+
+	return (ret);
+}
+
+/*
+ * zfs_uninit_libshare(zhandle)
+ *
+ * Uninitialize the libshare API if it hasn't already been
+ * uninitialized. It is OK to call multiple times.
+ */
+void
+zfs_uninit_libshare(libzfs_handle_t *zhandle)
+{
+	if (zhandle != NULL && zhandle->libzfs_sharehdl != NULL) {
+#ifdef sun
+		if (_sa_fini != NULL)
+			_sa_fini(zhandle->libzfs_sharehdl);
+#endif
+		zhandle->libzfs_sharehdl = NULL;
+	}
+}
+
+/*
+ * zfs_parse_options(options, proto)
+ *
+ * Call the legacy parse interface to get the protocol specific
+ * options using the NULL arg to indicate that this is a "parse" only.
+ */
+int
+zfs_parse_options(char *options, zfs_share_proto_t proto)
+{
+#ifdef sun
+	if (_sa_parse_legacy_options != NULL) {
+		return (_sa_parse_legacy_options(NULL, options,
+		    proto_table[proto].p_name));
+	}
+	return (SA_CONFIG_ERR);
+#else
+	return (SA_OK);
+#endif
+}
+
+#ifdef sun
+/*
+ * zfs_sa_find_share(handle, path)
+ *
+ * wrapper around sa_find_share to find a share path in the
+ * configuration.
+ */
+static sa_share_t
+zfs_sa_find_share(sa_handle_t handle, char *path)
+{
+	if (_sa_find_share != NULL)
+		return (_sa_find_share(handle, path));
+	return (NULL);
+}
+
+/*
+ * zfs_sa_enable_share(share, proto)
+ *
+ * Wrapper for sa_enable_share which enables a share for a specified
+ * protocol.
+ */
+static int
+zfs_sa_enable_share(sa_share_t share, char *proto)
+{
+	if (_sa_enable_share != NULL)
+		return (_sa_enable_share(share, proto));
+	return (SA_CONFIG_ERR);
+}
+
+/*
+ * zfs_sa_disable_share(share, proto)
+ *
+ * Wrapper for sa_enable_share which disables a share for a specified
+ * protocol.
+ */
+static int
+zfs_sa_disable_share(sa_share_t share, char *proto)
+{
+	if (_sa_disable_share != NULL)
+		return (_sa_disable_share(share, proto));
+	return (SA_CONFIG_ERR);
+}
+#endif	/* sun */
+
+/*
+ * Share the given filesystem according to the options in the specified
+ * protocol specific properties (sharenfs, sharesmb).  We rely
+ * on "libshare" to the dirty work for us.
+ */
+static int
+zfs_share_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
+{
+	char mountpoint[ZFS_MAXPROPLEN];
+	char shareopts[ZFS_MAXPROPLEN];
+	char sourcestr[ZFS_MAXPROPLEN];
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	zfs_share_proto_t *curr_proto;
+	zprop_source_t sourcetype;
+	int error, ret;
+
+	if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
+		return (0);
+
+#ifdef sun
+	if ((ret = zfs_init_libshare(hdl, SA_INIT_SHARE_API)) != SA_OK) {
+		(void) zfs_error_fmt(hdl, EZFS_SHARENFSFAILED,
+		    dgettext(TEXT_DOMAIN, "cannot share '%s': %s"),
+		    zfs_get_name(zhp), _sa_errorstr != NULL ?
+		    _sa_errorstr(ret) : "");
+		return (-1);
+	}
+#endif
+
+	for (curr_proto = proto; *curr_proto != PROTO_END; curr_proto++) {
+		/*
+		 * Return success if there are no share options.
+		 */
+		if (zfs_prop_get(zhp, proto_table[*curr_proto].p_prop,
+		    shareopts, sizeof (shareopts), &sourcetype, sourcestr,
+		    ZFS_MAXPROPLEN, B_FALSE) != 0 ||
+		    strcmp(shareopts, "off") == 0)
+			continue;
+
+		/*
+		 * If the 'zoned' property is set, then zfs_is_mountable()
+		 * will have already bailed out if we are in the global zone.
+		 * But local zones cannot be NFS servers, so we ignore it for
+		 * local zones as well.
+		 */
+		if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED))
+			continue;
+
+#ifdef sun
+		share = zfs_sa_find_share(hdl->libzfs_sharehdl, mountpoint);
+		if (share == NULL) {
+			/*
+			 * This may be a new file system that was just
+			 * created so isn't in the internal cache
+			 * (second time through). Rather than
+			 * reloading the entire configuration, we can
+			 * assume ZFS has done the checking and it is
+			 * safe to add this to the internal
+			 * configuration.
+			 */
+			if (_sa_zfs_process_share(hdl->libzfs_sharehdl,
+			    NULL, NULL, mountpoint,
+			    proto_table[*curr_proto].p_name, sourcetype,
+			    shareopts, sourcestr, zhp->zfs_name) != SA_OK) {
+				(void) zfs_error_fmt(hdl,
+				    proto_table[*curr_proto].p_share_err,
+				    dgettext(TEXT_DOMAIN, "cannot share '%s'"),
+				    zfs_get_name(zhp));
+				return (-1);
+			}
+			hdl->libzfs_shareflags |= ZFSSHARE_MISS;
+			share = zfs_sa_find_share(hdl->libzfs_sharehdl,
+			    mountpoint);
+		}
+		if (share != NULL) {
+			int err;
+			err = zfs_sa_enable_share(share,
+			    proto_table[*curr_proto].p_name);
+			if (err != SA_OK) {
+				(void) zfs_error_fmt(hdl,
+				    proto_table[*curr_proto].p_share_err,
+				    dgettext(TEXT_DOMAIN, "cannot share '%s'"),
+				    zfs_get_name(zhp));
+				return (-1);
+			}
+		} else
+#else
+		if (*curr_proto != PROTO_NFS) {
+			fprintf(stderr, "Unsupported share protocol: %d.\n",
+			    *curr_proto);
+			continue;
+		}
+
+		if (strcmp(shareopts, "on") == 0)
+			error = fsshare(ZFS_EXPORTS_PATH, mountpoint, "");
+		else
+			error = fsshare(ZFS_EXPORTS_PATH, mountpoint, shareopts);
+		if (error != 0)
+#endif
+		{
+			(void) zfs_error_fmt(hdl,
+			    proto_table[*curr_proto].p_share_err,
+			    dgettext(TEXT_DOMAIN, "cannot share '%s'"),
+			    zfs_get_name(zhp));
+			return (-1);
+		}
+
+	}
+	return (0);
+}
+
+
+int
+zfs_share_nfs(zfs_handle_t *zhp)
+{
+	return (zfs_share_proto(zhp, nfs_only));
+}
+
+int
+zfs_share_smb(zfs_handle_t *zhp)
+{
+	return (zfs_share_proto(zhp, smb_only));
+}
+
+int
+zfs_shareall(zfs_handle_t *zhp)
+{
+	return (zfs_share_proto(zhp, share_all_proto));
+}
+
+/*
+ * Unshare a filesystem by mountpoint.
+ */
+static int
+unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint,
+    zfs_share_proto_t proto)
+{
+#ifdef sun
+	sa_share_t share;
+	int err;
+	char *mntpt;
+	/*
+	 * Mountpoint could get trashed if libshare calls getmntany
+	 * which it does during API initialization, so strdup the
+	 * value.
+	 */
+	mntpt = zfs_strdup(hdl, mountpoint);
+
+	/* make sure libshare initialized */
+	if ((err = zfs_init_libshare(hdl, SA_INIT_SHARE_API)) != SA_OK) {
+		free(mntpt);	/* don't need the copy anymore */
+		return (zfs_error_fmt(hdl, EZFS_SHARENFSFAILED,
+		    dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
+		    name, _sa_errorstr(err)));
+	}
+
+	share = zfs_sa_find_share(hdl->libzfs_sharehdl, mntpt);
+	free(mntpt);	/* don't need the copy anymore */
+
+	if (share != NULL) {
+		err = zfs_sa_disable_share(share, proto_table[proto].p_name);
+		if (err != SA_OK) {
+			return (zfs_error_fmt(hdl, EZFS_UNSHARENFSFAILED,
+			    dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
+			    name, _sa_errorstr(err)));
+		}
+	} else {
+		return (zfs_error_fmt(hdl, EZFS_UNSHARENFSFAILED,
+		    dgettext(TEXT_DOMAIN, "cannot unshare '%s': not found"),
+		    name));
+	}
+#else
+	char buf[MAXPATHLEN];
+	FILE *fp;
+	int err;
+
+	if (proto != PROTO_NFS) {
+		fprintf(stderr, "No SMB support in FreeBSD yet.\n");
+		return (EOPNOTSUPP);
+	}
+
+	err = fsunshare(ZFS_EXPORTS_PATH, mountpoint);
+	if (err != 0) {
+		zfs_error_aux(hdl, "%s", strerror(err));
+		return (zfs_error_fmt(hdl, EZFS_UNSHARENFSFAILED,
+		    dgettext(TEXT_DOMAIN,
+		    "cannot unshare '%s'"), name));
+	}
+#endif
+	return (0);
+}
+
+/*
+ * Unshare the given filesystem.
+ */
+int
+zfs_unshare_proto(zfs_handle_t *zhp, const char *mountpoint,
+    zfs_share_proto_t *proto)
+{
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	struct mnttab entry;
+	char *mntpt = NULL;
+
+	/* check to see if need to unmount the filesystem */
+	rewind(zhp->zfs_hdl->libzfs_mnttab);
+	if (mountpoint != NULL)
+		mountpoint = mntpt = zfs_strdup(hdl, mountpoint);
+
+	if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
+	    libzfs_mnttab_find(hdl, zfs_get_name(zhp), &entry) == 0)) {
+		zfs_share_proto_t *curr_proto;
+
+		if (mountpoint == NULL)
+			mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp);
+
+		for (curr_proto = proto; *curr_proto != PROTO_END;
+		    curr_proto++) {
+
+			if (is_shared(hdl, mntpt, *curr_proto) &&
+			    unshare_one(hdl, zhp->zfs_name,
+			    mntpt, *curr_proto) != 0) {
+				if (mntpt != NULL)
+					free(mntpt);
+				return (-1);
+			}
+		}
+	}
+	if (mntpt != NULL)
+		free(mntpt);
+
+	return (0);
+}
+
+int
+zfs_unshare_nfs(zfs_handle_t *zhp, const char *mountpoint)
+{
+	return (zfs_unshare_proto(zhp, mountpoint, nfs_only));
+}
+
+int
+zfs_unshare_smb(zfs_handle_t *zhp, const char *mountpoint)
+{
+	return (zfs_unshare_proto(zhp, mountpoint, smb_only));
+}
+
+/*
+ * Same as zfs_unmountall(), but for NFS and SMB unshares.
+ */
+int
+zfs_unshareall_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
+{
+	prop_changelist_t *clp;
+	int ret;
+
+	clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0, 0);
+	if (clp == NULL)
+		return (-1);
+
+	ret = changelist_unshare(clp, proto);
+	changelist_free(clp);
+
+	return (ret);
+}
+
+int
+zfs_unshareall_nfs(zfs_handle_t *zhp)
+{
+	return (zfs_unshareall_proto(zhp, nfs_only));
+}
+
+int
+zfs_unshareall_smb(zfs_handle_t *zhp)
+{
+	return (zfs_unshareall_proto(zhp, smb_only));
+}
+
+int
+zfs_unshareall(zfs_handle_t *zhp)
+{
+	return (zfs_unshareall_proto(zhp, share_all_proto));
+}
+
+int
+zfs_unshareall_bypath(zfs_handle_t *zhp, const char *mountpoint)
+{
+	return (zfs_unshare_proto(zhp, mountpoint, share_all_proto));
+}
+
+/*
+ * Remove the mountpoint associated with the current dataset, if necessary.
+ * We only remove the underlying directory if:
+ *
+ *	- The mountpoint is not 'none' or 'legacy'
+ *	- The mountpoint is non-empty
+ *	- The mountpoint is the default or inherited
+ *	- The 'zoned' property is set, or we're in a local zone
+ *
+ * Any other directories we leave alone.
+ */
+void
+remove_mountpoint(zfs_handle_t *zhp)
+{
+	char mountpoint[ZFS_MAXPROPLEN];
+	zprop_source_t source;
+
+	if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint),
+	    &source))
+		return;
+
+	if (source == ZPROP_SRC_DEFAULT ||
+	    source == ZPROP_SRC_INHERITED) {
+		/*
+		 * Try to remove the directory, silently ignoring any errors.
+		 * The filesystem may have since been removed or moved around,
+		 * and this error isn't really useful to the administrator in
+		 * any way.
+		 */
+		(void) rmdir(mountpoint);
+	}
+}
+
+void
+libzfs_add_handle(get_all_cb_t *cbp, zfs_handle_t *zhp)
+{
+	if (cbp->cb_alloc == cbp->cb_used) {
+		size_t newsz;
+		void *ptr;
+
+		newsz = cbp->cb_alloc ? cbp->cb_alloc * 2 : 64;
+		ptr = zfs_realloc(zhp->zfs_hdl,
+		    cbp->cb_handles, cbp->cb_alloc * sizeof (void *),
+		    newsz * sizeof (void *));
+		cbp->cb_handles = ptr;
+		cbp->cb_alloc = newsz;
+	}
+	cbp->cb_handles[cbp->cb_used++] = zhp;
+}
+
+static int
+mount_cb(zfs_handle_t *zhp, void *data)
+{
+	get_all_cb_t *cbp = data;
+
+	if (!(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM)) {
+		zfs_close(zhp);
+		return (0);
+	}
+
+	if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) {
+		zfs_close(zhp);
+		return (0);
+	}
+
+	libzfs_add_handle(cbp, zhp);
+	if (zfs_iter_filesystems(zhp, mount_cb, cbp) != 0) {
+		zfs_close(zhp);
+		return (-1);
+	}
+	return (0);
+}
+
+int
+libzfs_dataset_cmp(const void *a, const void *b)
+{
+	zfs_handle_t **za = (zfs_handle_t **)a;
+	zfs_handle_t **zb = (zfs_handle_t **)b;
+	char mounta[MAXPATHLEN];
+	char mountb[MAXPATHLEN];
+	boolean_t gota, gotb;
+
+	if ((gota = (zfs_get_type(*za) == ZFS_TYPE_FILESYSTEM)) != 0)
+		verify(zfs_prop_get(*za, ZFS_PROP_MOUNTPOINT, mounta,
+		    sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0);
+	if ((gotb = (zfs_get_type(*zb) == ZFS_TYPE_FILESYSTEM)) != 0)
+		verify(zfs_prop_get(*zb, ZFS_PROP_MOUNTPOINT, mountb,
+		    sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0);
+
+	if (gota && gotb)
+		return (strcmp(mounta, mountb));
+
+	if (gota)
+		return (-1);
+	if (gotb)
+		return (1);
+
+	return (strcmp(zfs_get_name(a), zfs_get_name(b)));
+}
+
+/*
+ * Mount and share all datasets within the given pool.  This assumes that no
+ * datasets within the pool are currently mounted.  Because users can create
+ * complicated nested hierarchies of mountpoints, we first gather all the
+ * datasets and mountpoints within the pool, and sort them by mountpoint.  Once
+ * we have the list of all filesystems, we iterate over them in order and mount
+ * and/or share each one.
+ */
+#pragma weak zpool_mount_datasets = zpool_enable_datasets
+int
+zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags)
+{
+	get_all_cb_t cb = { 0 };
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+	zfs_handle_t *zfsp;
+	int i, ret = -1;
+	int *good;
+
+	/*
+	 * Gather all non-snap datasets within the pool.
+	 */
+	if ((zfsp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_DATASET)) == NULL)
+		goto out;
+
+	libzfs_add_handle(&cb, zfsp);
+	if (zfs_iter_filesystems(zfsp, mount_cb, &cb) != 0)
+		goto out;
+	/*
+	 * Sort the datasets by mountpoint.
+	 */
+	qsort(cb.cb_handles, cb.cb_used, sizeof (void *),
+	    libzfs_dataset_cmp);
+
+	/*
+	 * And mount all the datasets, keeping track of which ones
+	 * succeeded or failed.
+	 */
+	if ((good = zfs_alloc(zhp->zpool_hdl,
+	    cb.cb_used * sizeof (int))) == NULL)
+		goto out;
+
+	ret = 0;
+	for (i = 0; i < cb.cb_used; i++) {
+		if (zfs_mount(cb.cb_handles[i], mntopts, flags) != 0)
+			ret = -1;
+		else
+			good[i] = 1;
+	}
+
+	/*
+	 * Then share all the ones that need to be shared. This needs
+	 * to be a separate pass in order to avoid excessive reloading
+	 * of the configuration. Good should never be NULL since
+	 * zfs_alloc is supposed to exit if memory isn't available.
+	 */
+	for (i = 0; i < cb.cb_used; i++) {
+		if (good[i] && zfs_share(cb.cb_handles[i]) != 0)
+			ret = -1;
+	}
+
+	free(good);
+
+out:
+	for (i = 0; i < cb.cb_used; i++)
+		zfs_close(cb.cb_handles[i]);
+	free(cb.cb_handles);
+
+	return (ret);
+}
+
+static int
+mountpoint_compare(const void *a, const void *b)
+{
+	const char *mounta = *((char **)a);
+	const char *mountb = *((char **)b);
+
+	return (strcmp(mountb, mounta));
+}
+
+/* alias for 2002/240 */
+#pragma weak zpool_unmount_datasets = zpool_disable_datasets
+/*
+ * Unshare and unmount all datasets within the given pool.  We don't want to
+ * rely on traversing the DSL to discover the filesystems within the pool,
+ * because this may be expensive (if not all of them are mounted), and can fail
+ * arbitrarily (on I/O error, for example).  Instead, we walk /etc/mnttab and
+ * gather all the filesystems that are currently mounted.
+ */
+int
+zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force)
+{
+	int used, alloc;
+	struct mnttab entry;
+	size_t namelen;
+	char **mountpoints = NULL;
+	zfs_handle_t **datasets = NULL;
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+	int i;
+	int ret = -1;
+	int flags = (force ? MS_FORCE : 0);
+
+	namelen = strlen(zhp->zpool_name);
+
+	rewind(hdl->libzfs_mnttab);
+	used = alloc = 0;
+	while (getmntent(hdl->libzfs_mnttab, &entry) == 0) {
+		/*
+		 * Ignore non-ZFS entries.
+		 */
+		if (entry.mnt_fstype == NULL ||
+		    strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
+			continue;
+
+		/*
+		 * Ignore filesystems not within this pool.
+		 */
+		if (entry.mnt_mountp == NULL ||
+		    strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 ||
+		    (entry.mnt_special[namelen] != '/' &&
+		    entry.mnt_special[namelen] != '\0'))
+			continue;
+
+		/*
+		 * At this point we've found a filesystem within our pool.  Add
+		 * it to our growing list.
+		 */
+		if (used == alloc) {
+			if (alloc == 0) {
+				if ((mountpoints = zfs_alloc(hdl,
+				    8 * sizeof (void *))) == NULL)
+					goto out;
+
+				if ((datasets = zfs_alloc(hdl,
+				    8 * sizeof (void *))) == NULL)
+					goto out;
+
+				alloc = 8;
+			} else {
+				void *ptr;
+
+				if ((ptr = zfs_realloc(hdl, mountpoints,
+				    alloc * sizeof (void *),
+				    alloc * 2 * sizeof (void *))) == NULL)
+					goto out;
+				mountpoints = ptr;
+
+				if ((ptr = zfs_realloc(hdl, datasets,
+				    alloc * sizeof (void *),
+				    alloc * 2 * sizeof (void *))) == NULL)
+					goto out;
+				datasets = ptr;
+
+				alloc *= 2;
+			}
+		}
+
+		if ((mountpoints[used] = zfs_strdup(hdl,
+		    entry.mnt_mountp)) == NULL)
+			goto out;
+
+		/*
+		 * This is allowed to fail, in case there is some I/O error.  It
+		 * is only used to determine if we need to remove the underlying
+		 * mountpoint, so failure is not fatal.
+		 */
+		datasets[used] = make_dataset_handle(hdl, entry.mnt_special);
+
+		used++;
+	}
+
+	/*
+	 * At this point, we have the entire list of filesystems, so sort it by
+	 * mountpoint.
+	 */
+	qsort(mountpoints, used, sizeof (char *), mountpoint_compare);
+
+	/*
+	 * Walk through and first unshare everything.
+	 */
+	for (i = 0; i < used; i++) {
+		zfs_share_proto_t *curr_proto;
+		for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
+		    curr_proto++) {
+			if (is_shared(hdl, mountpoints[i], *curr_proto) &&
+			    unshare_one(hdl, mountpoints[i],
+			    mountpoints[i], *curr_proto) != 0)
+				goto out;
+		}
+	}
+
+	/*
+	 * Now unmount everything, removing the underlying directories as
+	 * appropriate.
+	 */
+	for (i = 0; i < used; i++) {
+		if (unmount_one(hdl, mountpoints[i], flags) != 0)
+			goto out;
+	}
+
+	for (i = 0; i < used; i++) {
+		if (datasets[i])
+			remove_mountpoint(datasets[i]);
+	}
+
+	ret = 0;
+out:
+	for (i = 0; i < used; i++) {
+		if (datasets[i])
+			zfs_close(datasets[i]);
+		free(mountpoints[i]);
+	}
+	free(datasets);
+	free(mountpoints);
+
+	return (ret);
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c
new file mode 100644
index 0000000000000000000000000000000000000000..03bc3e6586ddbca1ef4be483c865d201a54c9e80
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c
@@ -0,0 +1,4146 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <ctype.h>
+#include <errno.h>
+#include <devid.h>
+#include <fcntl.h>
+#include <libintl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <sys/zfs_ioctl.h>
+#include <dlfcn.h>
+
+#include "zfs_namecheck.h"
+#include "zfs_prop.h"
+#include "libzfs_impl.h"
+#include "zfs_comutil.h"
+#include "zfeature_common.h"
+
+static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
+
+#define	DISK_ROOT	"/dev/dsk"
+#define	RDISK_ROOT	"/dev/rdsk"
+#define	BACKUP_SLICE	"s2"
+
+typedef struct prop_flags {
+	int create:1;	/* Validate property on creation */
+	int import:1;	/* Validate property on import */
+} prop_flags_t;
+
+/*
+ * ====================================================================
+ *   zpool property functions
+ * ====================================================================
+ */
+
+static int
+zpool_get_all_props(zpool_handle_t *zhp)
+{
+	zfs_cmd_t zc = { 0 };
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
+		return (-1);
+
+	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
+		if (errno == ENOMEM) {
+			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+				zcmd_free_nvlists(&zc);
+				return (-1);
+			}
+		} else {
+			zcmd_free_nvlists(&zc);
+			return (-1);
+		}
+	}
+
+	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
+		zcmd_free_nvlists(&zc);
+		return (-1);
+	}
+
+	zcmd_free_nvlists(&zc);
+
+	return (0);
+}
+
+static int
+zpool_props_refresh(zpool_handle_t *zhp)
+{
+	nvlist_t *old_props;
+
+	old_props = zhp->zpool_props;
+
+	if (zpool_get_all_props(zhp) != 0)
+		return (-1);
+
+	nvlist_free(old_props);
+	return (0);
+}
+
+static char *
+zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
+    zprop_source_t *src)
+{
+	nvlist_t *nv, *nvl;
+	uint64_t ival;
+	char *value;
+	zprop_source_t source;
+
+	nvl = zhp->zpool_props;
+	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
+		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
+		source = ival;
+		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
+	} else {
+		source = ZPROP_SRC_DEFAULT;
+		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
+			value = "-";
+	}
+
+	if (src)
+		*src = source;
+
+	return (value);
+}
+
+uint64_t
+zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
+{
+	nvlist_t *nv, *nvl;
+	uint64_t value;
+	zprop_source_t source;
+
+	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
+		/*
+		 * zpool_get_all_props() has most likely failed because
+		 * the pool is faulted, but if all we need is the top level
+		 * vdev's guid then get it from the zhp config nvlist.
+		 */
+		if ((prop == ZPOOL_PROP_GUID) &&
+		    (nvlist_lookup_nvlist(zhp->zpool_config,
+		    ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
+		    (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
+		    == 0)) {
+			return (value);
+		}
+		return (zpool_prop_default_numeric(prop));
+	}
+
+	nvl = zhp->zpool_props;
+	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
+		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
+		source = value;
+		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
+	} else {
+		source = ZPROP_SRC_DEFAULT;
+		value = zpool_prop_default_numeric(prop);
+	}
+
+	if (src)
+		*src = source;
+
+	return (value);
+}
+
+/*
+ * Map VDEV STATE to printed strings.
+ */
+const char *
+zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
+{
+	switch (state) {
+	case VDEV_STATE_CLOSED:
+	case VDEV_STATE_OFFLINE:
+		return (gettext("OFFLINE"));
+	case VDEV_STATE_REMOVED:
+		return (gettext("REMOVED"));
+	case VDEV_STATE_CANT_OPEN:
+		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
+			return (gettext("FAULTED"));
+		else if (aux == VDEV_AUX_SPLIT_POOL)
+			return (gettext("SPLIT"));
+		else
+			return (gettext("UNAVAIL"));
+	case VDEV_STATE_FAULTED:
+		return (gettext("FAULTED"));
+	case VDEV_STATE_DEGRADED:
+		return (gettext("DEGRADED"));
+	case VDEV_STATE_HEALTHY:
+		return (gettext("ONLINE"));
+	}
+
+	return (gettext("UNKNOWN"));
+}
+
+/*
+ * Map POOL STATE to printed strings.
+ */
+const char *
+zpool_pool_state_to_name(pool_state_t state)
+{
+	switch (state) {
+	case POOL_STATE_ACTIVE:
+		return (gettext("ACTIVE"));
+	case POOL_STATE_EXPORTED:
+		return (gettext("EXPORTED"));
+	case POOL_STATE_DESTROYED:
+		return (gettext("DESTROYED"));
+	case POOL_STATE_SPARE:
+		return (gettext("SPARE"));
+	case POOL_STATE_L2CACHE:
+		return (gettext("L2CACHE"));
+	case POOL_STATE_UNINITIALIZED:
+		return (gettext("UNINITIALIZED"));
+	case POOL_STATE_UNAVAIL:
+		return (gettext("UNAVAIL"));
+	case POOL_STATE_POTENTIALLY_ACTIVE:
+		return (gettext("POTENTIALLY_ACTIVE"));
+	}
+
+	return (gettext("UNKNOWN"));
+}
+
+/*
+ * Get a zpool property value for 'prop' and return the value in
+ * a pre-allocated buffer.
+ */
+int
+zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
+    zprop_source_t *srctype)
+{
+	uint64_t intval;
+	const char *strval;
+	zprop_source_t src = ZPROP_SRC_NONE;
+	nvlist_t *nvroot;
+	vdev_stat_t *vs;
+	uint_t vsc;
+
+	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
+		switch (prop) {
+		case ZPOOL_PROP_NAME:
+			(void) strlcpy(buf, zpool_get_name(zhp), len);
+			break;
+
+		case ZPOOL_PROP_HEALTH:
+			(void) strlcpy(buf, "FAULTED", len);
+			break;
+
+		case ZPOOL_PROP_GUID:
+			intval = zpool_get_prop_int(zhp, prop, &src);
+			(void) snprintf(buf, len, "%llu", intval);
+			break;
+
+		case ZPOOL_PROP_ALTROOT:
+		case ZPOOL_PROP_CACHEFILE:
+		case ZPOOL_PROP_COMMENT:
+			if (zhp->zpool_props != NULL ||
+			    zpool_get_all_props(zhp) == 0) {
+				(void) strlcpy(buf,
+				    zpool_get_prop_string(zhp, prop, &src),
+				    len);
+				if (srctype != NULL)
+					*srctype = src;
+				return (0);
+			}
+			/* FALLTHROUGH */
+		default:
+			(void) strlcpy(buf, "-", len);
+			break;
+		}
+
+		if (srctype != NULL)
+			*srctype = src;
+		return (0);
+	}
+
+	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
+	    prop != ZPOOL_PROP_NAME)
+		return (-1);
+
+	switch (zpool_prop_get_type(prop)) {
+	case PROP_TYPE_STRING:
+		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
+		    len);
+		break;
+
+	case PROP_TYPE_NUMBER:
+		intval = zpool_get_prop_int(zhp, prop, &src);
+
+		switch (prop) {
+		case ZPOOL_PROP_SIZE:
+		case ZPOOL_PROP_ALLOCATED:
+		case ZPOOL_PROP_FREE:
+		case ZPOOL_PROP_FREEING:
+		case ZPOOL_PROP_EXPANDSZ:
+			(void) zfs_nicenum(intval, buf, len);
+			break;
+
+		case ZPOOL_PROP_CAPACITY:
+			(void) snprintf(buf, len, "%llu%%",
+			    (u_longlong_t)intval);
+			break;
+
+		case ZPOOL_PROP_DEDUPRATIO:
+			(void) snprintf(buf, len, "%llu.%02llux",
+			    (u_longlong_t)(intval / 100),
+			    (u_longlong_t)(intval % 100));
+			break;
+
+		case ZPOOL_PROP_HEALTH:
+			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
+			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
+			verify(nvlist_lookup_uint64_array(nvroot,
+			    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
+			    == 0);
+
+			(void) strlcpy(buf, zpool_state_to_name(intval,
+			    vs->vs_aux), len);
+			break;
+		case ZPOOL_PROP_VERSION:
+			if (intval >= SPA_VERSION_FEATURES) {
+				(void) snprintf(buf, len, "-");
+				break;
+			}
+			/* FALLTHROUGH */
+		default:
+			(void) snprintf(buf, len, "%llu", intval);
+		}
+		break;
+
+	case PROP_TYPE_INDEX:
+		intval = zpool_get_prop_int(zhp, prop, &src);
+		if (zpool_prop_index_to_string(prop, intval, &strval)
+		    != 0)
+			return (-1);
+		(void) strlcpy(buf, strval, len);
+		break;
+
+	default:
+		abort();
+	}
+
+	if (srctype)
+		*srctype = src;
+
+	return (0);
+}
+
+/*
+ * Check if the bootfs name has the same pool name as it is set to.
+ * Assuming bootfs is a valid dataset name.
+ */
+static boolean_t
+bootfs_name_valid(const char *pool, char *bootfs)
+{
+	int len = strlen(pool);
+
+	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
+		return (B_FALSE);
+
+	if (strncmp(pool, bootfs, len) == 0 &&
+	    (bootfs[len] == '/' || bootfs[len] == '\0'))
+		return (B_TRUE);
+
+	return (B_FALSE);
+}
+
+/*
+ * Inspect the configuration to determine if any of the devices contain
+ * an EFI label.
+ */
+static boolean_t
+pool_uses_efi(nvlist_t *config)
+{
+#ifdef sun
+	nvlist_t **child;
+	uint_t c, children;
+
+	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) != 0)
+		return (read_efi_label(config, NULL) >= 0);
+
+	for (c = 0; c < children; c++) {
+		if (pool_uses_efi(child[c]))
+			return (B_TRUE);
+	}
+#endif	/* sun */
+	return (B_FALSE);
+}
+
+boolean_t
+zpool_is_bootable(zpool_handle_t *zhp)
+{
+	char bootfs[ZPOOL_MAXNAMELEN];
+
+	return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
+	    sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
+	    sizeof (bootfs)) != 0);
+}
+
+
+/*
+ * Given an nvlist of zpool properties to be set, validate that they are
+ * correct, and parse any numeric properties (index, boolean, etc) if they are
+ * specified as strings.
+ */
+static nvlist_t *
+zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
+    nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
+{
+	nvpair_t *elem;
+	nvlist_t *retprops;
+	zpool_prop_t prop;
+	char *strval;
+	uint64_t intval;
+	char *slash, *check;
+	struct stat64 statbuf;
+	zpool_handle_t *zhp;
+	nvlist_t *nvroot;
+
+	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
+		(void) no_memory(hdl);
+		return (NULL);
+	}
+
+	elem = NULL;
+	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
+		const char *propname = nvpair_name(elem);
+
+		prop = zpool_name_to_prop(propname);
+		if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
+			int err;
+			zfeature_info_t *feature;
+			char *fname = strchr(propname, '@') + 1;
+
+			err = zfeature_lookup_name(fname, &feature);
+			if (err != 0) {
+				ASSERT3U(err, ==, ENOENT);
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "invalid feature '%s'"), fname);
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+
+			if (nvpair_type(elem) != DATA_TYPE_STRING) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "'%s' must be a string"), propname);
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+
+			(void) nvpair_value_string(elem, &strval);
+			if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "property '%s' can only be set to "
+				    "'enabled'"), propname);
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+
+			if (nvlist_add_uint64(retprops, propname, 0) != 0) {
+				(void) no_memory(hdl);
+				goto error;
+			}
+			continue;
+		}
+
+		/*
+		 * Make sure this property is valid and applies to this type.
+		 */
+		if (prop == ZPROP_INVAL) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "invalid property '%s'"), propname);
+			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+			goto error;
+		}
+
+		if (zpool_prop_readonly(prop)) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
+			    "is readonly"), propname);
+			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
+			goto error;
+		}
+
+		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
+		    &strval, &intval, errbuf) != 0)
+			goto error;
+
+		/*
+		 * Perform additional checking for specific properties.
+		 */
+		switch (prop) {
+		case ZPOOL_PROP_VERSION:
+			if (intval < version ||
+			    !SPA_VERSION_IS_SUPPORTED(intval)) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "property '%s' number %d is invalid."),
+				    propname, intval);
+				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
+				goto error;
+			}
+			break;
+
+		case ZPOOL_PROP_BOOTFS:
+			if (flags.create || flags.import) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "property '%s' cannot be set at creation "
+				    "or import time"), propname);
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+
+			if (version < SPA_VERSION_BOOTFS) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "pool must be upgraded to support "
+				    "'%s' property"), propname);
+				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
+				goto error;
+			}
+
+			/*
+			 * bootfs property value has to be a dataset name and
+			 * the dataset has to be in the same pool as it sets to.
+			 */
+			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
+			    strval)) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
+				    "is an invalid name"), strval);
+				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
+				goto error;
+			}
+
+			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "could not open pool '%s'"), poolname);
+				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
+				goto error;
+			}
+			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
+			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
+
+#ifdef sun
+			/*
+			 * bootfs property cannot be set on a disk which has
+			 * been EFI labeled.
+			 */
+			if (pool_uses_efi(nvroot)) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "property '%s' not supported on "
+				    "EFI labeled devices"), propname);
+				(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
+				zpool_close(zhp);
+				goto error;
+			}
+#endif	/* sun */
+			zpool_close(zhp);
+			break;
+
+		case ZPOOL_PROP_ALTROOT:
+			if (!flags.create && !flags.import) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "property '%s' can only be set during pool "
+				    "creation or import"), propname);
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+
+			if (strval[0] != '/') {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "bad alternate root '%s'"), strval);
+				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
+				goto error;
+			}
+			break;
+
+		case ZPOOL_PROP_CACHEFILE:
+			if (strval[0] == '\0')
+				break;
+
+			if (strcmp(strval, "none") == 0)
+				break;
+
+			if (strval[0] != '/') {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "property '%s' must be empty, an "
+				    "absolute path, or 'none'"), propname);
+				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
+				goto error;
+			}
+
+			slash = strrchr(strval, '/');
+
+			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
+			    strcmp(slash, "/..") == 0) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "'%s' is not a valid file"), strval);
+				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
+				goto error;
+			}
+
+			*slash = '\0';
+
+			if (strval[0] != '\0' &&
+			    (stat64(strval, &statbuf) != 0 ||
+			    !S_ISDIR(statbuf.st_mode))) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "'%s' is not a valid directory"),
+				    strval);
+				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
+				goto error;
+			}
+
+			*slash = '/';
+			break;
+
+		case ZPOOL_PROP_COMMENT:
+			for (check = strval; *check != '\0'; check++) {
+				if (!isprint(*check)) {
+					zfs_error_aux(hdl,
+					    dgettext(TEXT_DOMAIN,
+					    "comment may only have printable "
+					    "characters"));
+					(void) zfs_error(hdl, EZFS_BADPROP,
+					    errbuf);
+					goto error;
+				}
+			}
+			if (strlen(strval) > ZPROP_MAX_COMMENT) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "comment must not exceed %d characters"),
+				    ZPROP_MAX_COMMENT);
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+			break;
+		case ZPOOL_PROP_READONLY:
+			if (!flags.import) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "property '%s' can only be set at "
+				    "import time"), propname);
+				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+				goto error;
+			}
+			break;
+		}
+	}
+
+	return (retprops);
+error:
+	nvlist_free(retprops);
+	return (NULL);
+}
+
+/*
+ * Set zpool property : propname=propval.
+ */
+int
+zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
+{
+	zfs_cmd_t zc = { 0 };
+	int ret = -1;
+	char errbuf[1024];
+	nvlist_t *nvl = NULL;
+	nvlist_t *realprops;
+	uint64_t version;
+	prop_flags_t flags = { 0 };
+
+	(void) snprintf(errbuf, sizeof (errbuf),
+	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
+	    zhp->zpool_name);
+
+	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
+		return (no_memory(zhp->zpool_hdl));
+
+	if (nvlist_add_string(nvl, propname, propval) != 0) {
+		nvlist_free(nvl);
+		return (no_memory(zhp->zpool_hdl));
+	}
+
+	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
+	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
+	    zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
+		nvlist_free(nvl);
+		return (-1);
+	}
+
+	nvlist_free(nvl);
+	nvl = realprops;
+
+	/*
+	 * Execute the corresponding ioctl() to set this property.
+	 */
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
+		nvlist_free(nvl);
+		return (-1);
+	}
+
+	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
+
+	zcmd_free_nvlists(&zc);
+	nvlist_free(nvl);
+
+	if (ret)
+		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
+	else
+		(void) zpool_props_refresh(zhp);
+
+	return (ret);
+}
+
+int
+zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
+{
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+	zprop_list_t *entry;
+	char buf[ZFS_MAXPROPLEN];
+	nvlist_t *features = NULL;
+	zprop_list_t **last;
+	boolean_t firstexpand = (NULL == *plp);
+
+	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
+		return (-1);
+
+	last = plp;
+	while (*last != NULL)
+		last = &(*last)->pl_next;
+
+	if ((*plp)->pl_all)
+		features = zpool_get_features(zhp);
+
+	if ((*plp)->pl_all && firstexpand) {
+		for (int i = 0; i < SPA_FEATURES; i++) {
+			zprop_list_t *entry = zfs_alloc(hdl,
+			    sizeof (zprop_list_t));
+			entry->pl_prop = ZPROP_INVAL;
+			entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
+			    spa_feature_table[i].fi_uname);
+			entry->pl_width = strlen(entry->pl_user_prop);
+			entry->pl_all = B_TRUE;
+
+			*last = entry;
+			last = &entry->pl_next;
+		}
+	}
+
+	/* add any unsupported features */
+	for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
+	    nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
+		char *propname;
+		boolean_t found;
+		zprop_list_t *entry;
+
+		if (zfeature_is_supported(nvpair_name(nvp)))
+			continue;
+
+		propname = zfs_asprintf(hdl, "unsupported@%s",
+		    nvpair_name(nvp));
+
+		/*
+		 * Before adding the property to the list make sure that no
+		 * other pool already added the same property.
+		 */
+		found = B_FALSE;
+		entry = *plp;
+		while (entry != NULL) {
+			if (entry->pl_user_prop != NULL &&
+			    strcmp(propname, entry->pl_user_prop) == 0) {
+				found = B_TRUE;
+				break;
+			}
+			entry = entry->pl_next;
+		}
+		if (found) {
+			free(propname);
+			continue;
+		}
+
+		entry = zfs_alloc(hdl, sizeof (zprop_list_t));
+		entry->pl_prop = ZPROP_INVAL;
+		entry->pl_user_prop = propname;
+		entry->pl_width = strlen(entry->pl_user_prop);
+		entry->pl_all = B_TRUE;
+
+		*last = entry;
+		last = &entry->pl_next;
+	}
+
+	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
+
+		if (entry->pl_fixed)
+			continue;
+
+		if (entry->pl_prop != ZPROP_INVAL &&
+		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
+		    NULL) == 0) {
+			if (strlen(buf) > entry->pl_width)
+				entry->pl_width = strlen(buf);
+		}
+	}
+
+	return (0);
+}
+
+/*
+ * Get the state for the given feature on the given ZFS pool.
+ */
+int
+zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
+    size_t len)
+{
+	uint64_t refcount;
+	boolean_t found = B_FALSE;
+	nvlist_t *features = zpool_get_features(zhp);
+	boolean_t supported;
+	const char *feature = strchr(propname, '@') + 1;
+
+	supported = zpool_prop_feature(propname);
+	ASSERT(supported || zpool_prop_unsupported(propname));
+
+	/*
+	 * Convert from feature name to feature guid. This conversion is
+	 * unecessary for unsupported@... properties because they already
+	 * use guids.
+	 */
+	if (supported) {
+		int ret;
+		zfeature_info_t *fi;
+
+		ret = zfeature_lookup_name(feature, &fi);
+		if (ret != 0) {
+			(void) strlcpy(buf, "-", len);
+			return (ENOTSUP);
+		}
+		feature = fi->fi_guid;
+	}
+
+	if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
+		found = B_TRUE;
+
+	if (supported) {
+		if (!found) {
+			(void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
+		} else  {
+			if (refcount == 0)
+				(void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
+			else
+				(void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
+		}
+	} else {
+		if (found) {
+			if (refcount == 0) {
+				(void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
+			} else {
+				(void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
+			}
+		} else {
+			(void) strlcpy(buf, "-", len);
+			return (ENOTSUP);
+		}
+	}
+
+	return (0);
+}
+
+/*
+ * Don't start the slice at the default block of 34; many storage
+ * devices will use a stripe width of 128k, so start there instead.
+ */
+#define	NEW_START_BLOCK	256
+
+/*
+ * Validate the given pool name, optionally putting an extended error message in
+ * 'buf'.
+ */
+boolean_t
+zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
+{
+	namecheck_err_t why;
+	char what;
+	int ret;
+
+	ret = pool_namecheck(pool, &why, &what);
+
+	/*
+	 * The rules for reserved pool names were extended at a later point.
+	 * But we need to support users with existing pools that may now be
+	 * invalid.  So we only check for this expanded set of names during a
+	 * create (or import), and only in userland.
+	 */
+	if (ret == 0 && !isopen &&
+	    (strncmp(pool, "mirror", 6) == 0 ||
+	    strncmp(pool, "raidz", 5) == 0 ||
+	    strncmp(pool, "spare", 5) == 0 ||
+	    strcmp(pool, "log") == 0)) {
+		if (hdl != NULL)
+			zfs_error_aux(hdl,
+			    dgettext(TEXT_DOMAIN, "name is reserved"));
+		return (B_FALSE);
+	}
+
+
+	if (ret != 0) {
+		if (hdl != NULL) {
+			switch (why) {
+			case NAME_ERR_TOOLONG:
+				zfs_error_aux(hdl,
+				    dgettext(TEXT_DOMAIN, "name is too long"));
+				break;
+
+			case NAME_ERR_INVALCHAR:
+				zfs_error_aux(hdl,
+				    dgettext(TEXT_DOMAIN, "invalid character "
+				    "'%c' in pool name"), what);
+				break;
+
+			case NAME_ERR_NOLETTER:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "name must begin with a letter"));
+				break;
+
+			case NAME_ERR_RESERVED:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "name is reserved"));
+				break;
+
+			case NAME_ERR_DISKLIKE:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "pool name is reserved"));
+				break;
+
+			case NAME_ERR_LEADING_SLASH:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "leading slash in name"));
+				break;
+
+			case NAME_ERR_EMPTY_COMPONENT:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "empty component in name"));
+				break;
+
+			case NAME_ERR_TRAILING_SLASH:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "trailing slash in name"));
+				break;
+
+			case NAME_ERR_MULTIPLE_AT:
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "multiple '@' delimiters in name"));
+				break;
+
+			}
+		}
+		return (B_FALSE);
+	}
+
+	return (B_TRUE);
+}
+
+/*
+ * Open a handle to the given pool, even if the pool is currently in the FAULTED
+ * state.
+ */
+zpool_handle_t *
+zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
+{
+	zpool_handle_t *zhp;
+	boolean_t missing;
+
+	/*
+	 * Make sure the pool name is valid.
+	 */
+	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
+		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
+		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
+		    pool);
+		return (NULL);
+	}
+
+	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
+		return (NULL);
+
+	zhp->zpool_hdl = hdl;
+	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
+
+	if (zpool_refresh_stats(zhp, &missing) != 0) {
+		zpool_close(zhp);
+		return (NULL);
+	}
+
+	if (missing) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
+		(void) zfs_error_fmt(hdl, EZFS_NOENT,
+		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
+		zpool_close(zhp);
+		return (NULL);
+	}
+
+	return (zhp);
+}
+
+/*
+ * Like the above, but silent on error.  Used when iterating over pools (because
+ * the configuration cache may be out of date).
+ */
+int
+zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
+{
+	zpool_handle_t *zhp;
+	boolean_t missing;
+
+	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
+		return (-1);
+
+	zhp->zpool_hdl = hdl;
+	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
+
+	if (zpool_refresh_stats(zhp, &missing) != 0) {
+		zpool_close(zhp);
+		return (-1);
+	}
+
+	if (missing) {
+		zpool_close(zhp);
+		*ret = NULL;
+		return (0);
+	}
+
+	*ret = zhp;
+	return (0);
+}
+
+/*
+ * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
+ * state.
+ */
+zpool_handle_t *
+zpool_open(libzfs_handle_t *hdl, const char *pool)
+{
+	zpool_handle_t *zhp;
+
+	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
+		return (NULL);
+
+	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
+		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
+		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
+		zpool_close(zhp);
+		return (NULL);
+	}
+
+	return (zhp);
+}
+
+/*
+ * Close the handle.  Simply frees the memory associated with the handle.
+ */
+void
+zpool_close(zpool_handle_t *zhp)
+{
+	if (zhp->zpool_config)
+		nvlist_free(zhp->zpool_config);
+	if (zhp->zpool_old_config)
+		nvlist_free(zhp->zpool_old_config);
+	if (zhp->zpool_props)
+		nvlist_free(zhp->zpool_props);
+	free(zhp);
+}
+
+/*
+ * Return the name of the pool.
+ */
+const char *
+zpool_get_name(zpool_handle_t *zhp)
+{
+	return (zhp->zpool_name);
+}
+
+
+/*
+ * Return the state of the pool (ACTIVE or UNAVAILABLE)
+ */
+int
+zpool_get_state(zpool_handle_t *zhp)
+{
+	return (zhp->zpool_state);
+}
+
+/*
+ * Create the named pool, using the provided vdev list.  It is assumed
+ * that the consumer has already validated the contents of the nvlist, so we
+ * don't have to worry about error semantics.
+ */
+int
+zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
+    nvlist_t *props, nvlist_t *fsprops)
+{
+	zfs_cmd_t zc = { 0 };
+	nvlist_t *zc_fsprops = NULL;
+	nvlist_t *zc_props = NULL;
+	char msg[1024];
+	char *altroot;
+	int ret = -1;
+
+	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+	    "cannot create '%s'"), pool);
+
+	if (!zpool_name_valid(hdl, B_FALSE, pool))
+		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
+
+	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
+		return (-1);
+
+	if (props) {
+		prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
+
+		if ((zc_props = zpool_valid_proplist(hdl, pool, props,
+		    SPA_VERSION_1, flags, msg)) == NULL) {
+			goto create_failed;
+		}
+	}
+
+	if (fsprops) {
+		uint64_t zoned;
+		char *zonestr;
+
+		zoned = ((nvlist_lookup_string(fsprops,
+		    zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
+		    strcmp(zonestr, "on") == 0);
+
+		if ((zc_fsprops = zfs_valid_proplist(hdl,
+		    ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
+			goto create_failed;
+		}
+		if (!zc_props &&
+		    (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
+			goto create_failed;
+		}
+		if (nvlist_add_nvlist(zc_props,
+		    ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
+			goto create_failed;
+		}
+	}
+
+	if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
+		goto create_failed;
+
+	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
+
+	if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
+
+		zcmd_free_nvlists(&zc);
+		nvlist_free(zc_props);
+		nvlist_free(zc_fsprops);
+
+		switch (errno) {
+		case EBUSY:
+			/*
+			 * This can happen if the user has specified the same
+			 * device multiple times.  We can't reliably detect this
+			 * until we try to add it and see we already have a
+			 * label.
+			 */
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "one or more vdevs refer to the same device"));
+			return (zfs_error(hdl, EZFS_BADDEV, msg));
+
+		case EOVERFLOW:
+			/*
+			 * This occurs when one of the devices is below
+			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
+			 * device was the problem device since there's no
+			 * reliable way to determine device size from userland.
+			 */
+			{
+				char buf[64];
+
+				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
+
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "one or more devices is less than the "
+				    "minimum size (%s)"), buf);
+			}
+			return (zfs_error(hdl, EZFS_BADDEV, msg));
+
+		case ENOSPC:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "one or more devices is out of space"));
+			return (zfs_error(hdl, EZFS_BADDEV, msg));
+
+		case ENOTBLK:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "cache device must be a disk or disk slice"));
+			return (zfs_error(hdl, EZFS_BADDEV, msg));
+
+		default:
+			return (zpool_standard_error(hdl, errno, msg));
+		}
+	}
+
+	/*
+	 * If this is an alternate root pool, then we automatically set the
+	 * mountpoint of the root dataset to be '/'.
+	 */
+	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
+	    &altroot) == 0) {
+		zfs_handle_t *zhp;
+
+		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
+		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
+		    "/") == 0);
+
+		zfs_close(zhp);
+	}
+
+create_failed:
+	zcmd_free_nvlists(&zc);
+	nvlist_free(zc_props);
+	nvlist_free(zc_fsprops);
+	return (ret);
+}
+
+/*
+ * Destroy the given pool.  It is up to the caller to ensure that there are no
+ * datasets left in the pool.
+ */
+int
+zpool_destroy(zpool_handle_t *zhp)
+{
+	zfs_cmd_t zc = { 0 };
+	zfs_handle_t *zfp = NULL;
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+	char msg[1024];
+
+	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
+	    (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
+		return (-1);
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+	if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
+		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+		    "cannot destroy '%s'"), zhp->zpool_name);
+
+		if (errno == EROFS) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "one or more devices is read only"));
+			(void) zfs_error(hdl, EZFS_BADDEV, msg);
+		} else {
+			(void) zpool_standard_error(hdl, errno, msg);
+		}
+
+		if (zfp)
+			zfs_close(zfp);
+		return (-1);
+	}
+
+	if (zfp) {
+		remove_mountpoint(zfp);
+		zfs_close(zfp);
+	}
+
+	return (0);
+}
+
+/*
+ * Add the given vdevs to the pool.  The caller must have already performed the
+ * necessary verification to ensure that the vdev specification is well-formed.
+ */
+int
+zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
+{
+	zfs_cmd_t zc = { 0 };
+	int ret;
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+	char msg[1024];
+	nvlist_t **spares, **l2cache;
+	uint_t nspares, nl2cache;
+
+	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+	    "cannot add to '%s'"), zhp->zpool_name);
+
+	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
+	    SPA_VERSION_SPARES &&
+	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
+	    &spares, &nspares) == 0) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
+		    "upgraded to add hot spares"));
+		return (zfs_error(hdl, EZFS_BADVERSION, msg));
+	}
+
+	if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
+	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
+		uint64_t s;
+
+		for (s = 0; s < nspares; s++) {
+			char *path;
+
+			if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
+			    &path) == 0 && pool_uses_efi(spares[s])) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "device '%s' contains an EFI label and "
+				    "cannot be used on root pools."),
+				    zpool_vdev_name(hdl, NULL, spares[s],
+				    B_FALSE));
+				return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
+			}
+		}
+	}
+
+	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
+	    SPA_VERSION_L2CACHE &&
+	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
+	    &l2cache, &nl2cache) == 0) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
+		    "upgraded to add cache devices"));
+		return (zfs_error(hdl, EZFS_BADVERSION, msg));
+	}
+
+	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
+		return (-1);
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
+		switch (errno) {
+		case EBUSY:
+			/*
+			 * This can happen if the user has specified the same
+			 * device multiple times.  We can't reliably detect this
+			 * until we try to add it and see we already have a
+			 * label.
+			 */
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "one or more vdevs refer to the same device"));
+			(void) zfs_error(hdl, EZFS_BADDEV, msg);
+			break;
+
+		case EOVERFLOW:
+			/*
+			 * This occurrs when one of the devices is below
+			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
+			 * device was the problem device since there's no
+			 * reliable way to determine device size from userland.
+			 */
+			{
+				char buf[64];
+
+				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
+
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "device is less than the minimum "
+				    "size (%s)"), buf);
+			}
+			(void) zfs_error(hdl, EZFS_BADDEV, msg);
+			break;
+
+		case ENOTSUP:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "pool must be upgraded to add these vdevs"));
+			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
+			break;
+
+		case EDOM:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "root pool can not have multiple vdevs"
+			    " or separate logs"));
+			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
+			break;
+
+		case ENOTBLK:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "cache device must be a disk or disk slice"));
+			(void) zfs_error(hdl, EZFS_BADDEV, msg);
+			break;
+
+		default:
+			(void) zpool_standard_error(hdl, errno, msg);
+		}
+
+		ret = -1;
+	} else {
+		ret = 0;
+	}
+
+	zcmd_free_nvlists(&zc);
+
+	return (ret);
+}
+
+/*
+ * Exports the pool from the system.  The caller must ensure that there are no
+ * mounted datasets in the pool.
+ */
+int
+zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
+{
+	zfs_cmd_t zc = { 0 };
+	char msg[1024];
+
+	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+	    "cannot export '%s'"), zhp->zpool_name);
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	zc.zc_cookie = force;
+	zc.zc_guid = hardforce;
+
+	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
+		switch (errno) {
+		case EXDEV:
+			zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
+			    "use '-f' to override the following errors:\n"
+			    "'%s' has an active shared spare which could be"
+			    " used by other pools once '%s' is exported."),
+			    zhp->zpool_name, zhp->zpool_name);
+			return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
+			    msg));
+		default:
+			return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
+			    msg));
+		}
+	}
+
+	return (0);
+}
+
+int
+zpool_export(zpool_handle_t *zhp, boolean_t force)
+{
+	return (zpool_export_common(zhp, force, B_FALSE));
+}
+
+int
+zpool_export_force(zpool_handle_t *zhp)
+{
+	return (zpool_export_common(zhp, B_TRUE, B_TRUE));
+}
+
+static void
+zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
+    nvlist_t *config)
+{
+	nvlist_t *nv = NULL;
+	uint64_t rewindto;
+	int64_t loss = -1;
+	struct tm t;
+	char timestr[128];
+
+	if (!hdl->libzfs_printerr || config == NULL)
+		return;
+
+	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
+	    nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
+		return;
+	}
+
+	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
+		return;
+	(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
+
+	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
+	    strftime(timestr, 128, 0, &t) != 0) {
+		if (dryrun) {
+			(void) printf(dgettext(TEXT_DOMAIN,
+			    "Would be able to return %s "
+			    "to its state as of %s.\n"),
+			    name, timestr);
+		} else {
+			(void) printf(dgettext(TEXT_DOMAIN,
+			    "Pool %s returned to its state as of %s.\n"),
+			    name, timestr);
+		}
+		if (loss > 120) {
+			(void) printf(dgettext(TEXT_DOMAIN,
+			    "%s approximately %lld "),
+			    dryrun ? "Would discard" : "Discarded",
+			    (loss + 30) / 60);
+			(void) printf(dgettext(TEXT_DOMAIN,
+			    "minutes of transactions.\n"));
+		} else if (loss > 0) {
+			(void) printf(dgettext(TEXT_DOMAIN,
+			    "%s approximately %lld "),
+			    dryrun ? "Would discard" : "Discarded", loss);
+			(void) printf(dgettext(TEXT_DOMAIN,
+			    "seconds of transactions.\n"));
+		}
+	}
+}
+
+void
+zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
+    nvlist_t *config)
+{
+	nvlist_t *nv = NULL;
+	int64_t loss = -1;
+	uint64_t edata = UINT64_MAX;
+	uint64_t rewindto;
+	struct tm t;
+	char timestr[128];
+
+	if (!hdl->libzfs_printerr)
+		return;
+
+	if (reason >= 0)
+		(void) printf(dgettext(TEXT_DOMAIN, "action: "));
+	else
+		(void) printf(dgettext(TEXT_DOMAIN, "\t"));
+
+	/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
+	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
+	    nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
+	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
+		goto no_info;
+
+	(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
+	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
+	    &edata);
+
+	(void) printf(dgettext(TEXT_DOMAIN,
+	    "Recovery is possible, but will result in some data loss.\n"));
+
+	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
+	    strftime(timestr, 128, 0, &t) != 0) {
+		(void) printf(dgettext(TEXT_DOMAIN,
+		    "\tReturning the pool to its state as of %s\n"
+		    "\tshould correct the problem.  "),
+		    timestr);
+	} else {
+		(void) printf(dgettext(TEXT_DOMAIN,
+		    "\tReverting the pool to an earlier state "
+		    "should correct the problem.\n\t"));
+	}
+
+	if (loss > 120) {
+		(void) printf(dgettext(TEXT_DOMAIN,
+		    "Approximately %lld minutes of data\n"
+		    "\tmust be discarded, irreversibly.  "), (loss + 30) / 60);
+	} else if (loss > 0) {
+		(void) printf(dgettext(TEXT_DOMAIN,
+		    "Approximately %lld seconds of data\n"
+		    "\tmust be discarded, irreversibly.  "), loss);
+	}
+	if (edata != 0 && edata != UINT64_MAX) {
+		if (edata == 1) {
+			(void) printf(dgettext(TEXT_DOMAIN,
+			    "After rewind, at least\n"
+			    "\tone persistent user-data error will remain.  "));
+		} else {
+			(void) printf(dgettext(TEXT_DOMAIN,
+			    "After rewind, several\n"
+			    "\tpersistent user-data errors will remain.  "));
+		}
+	}
+	(void) printf(dgettext(TEXT_DOMAIN,
+	    "Recovery can be attempted\n\tby executing 'zpool %s -F %s'.  "),
+	    reason >= 0 ? "clear" : "import", name);
+
+	(void) printf(dgettext(TEXT_DOMAIN,
+	    "A scrub of the pool\n"
+	    "\tis strongly recommended after recovery.\n"));
+	return;
+
+no_info:
+	(void) printf(dgettext(TEXT_DOMAIN,
+	    "Destroy and re-create the pool from\n\ta backup source.\n"));
+}
+
+/*
+ * zpool_import() is a contracted interface. Should be kept the same
+ * if possible.
+ *
+ * Applications should use zpool_import_props() to import a pool with
+ * new properties value to be set.
+ */
+int
+zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
+    char *altroot)
+{
+	nvlist_t *props = NULL;
+	int ret;
+
+	if (altroot != NULL) {
+		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
+			return (zfs_error_fmt(hdl, EZFS_NOMEM,
+			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
+			    newname));
+		}
+
+		if (nvlist_add_string(props,
+		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
+		    nvlist_add_string(props,
+		    zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
+			nvlist_free(props);
+			return (zfs_error_fmt(hdl, EZFS_NOMEM,
+			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
+			    newname));
+		}
+	}
+
+	ret = zpool_import_props(hdl, config, newname, props,
+	    ZFS_IMPORT_NORMAL);
+	if (props)
+		nvlist_free(props);
+	return (ret);
+}
+
+static void
+print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
+    int indent)
+{
+	nvlist_t **child;
+	uint_t c, children;
+	char *vname;
+	uint64_t is_log = 0;
+
+	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
+	    &is_log);
+
+	if (name != NULL)
+		(void) printf("\t%*s%s%s\n", indent, "", name,
+		    is_log ? " [log]" : "");
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) != 0)
+		return;
+
+	for (c = 0; c < children; c++) {
+		vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
+		print_vdev_tree(hdl, vname, child[c], indent + 2);
+		free(vname);
+	}
+}
+
+void
+zpool_print_unsup_feat(nvlist_t *config)
+{
+	nvlist_t *nvinfo, *unsup_feat;
+
+	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
+	    0);
+	verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
+	    &unsup_feat) == 0);
+
+	for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
+	    nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
+		char *desc;
+
+		verify(nvpair_type(nvp) == DATA_TYPE_STRING);
+		verify(nvpair_value_string(nvp, &desc) == 0);
+
+		if (strlen(desc) > 0)
+			(void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
+		else
+			(void) printf("\t%s\n", nvpair_name(nvp));
+	}
+}
+
+/*
+ * Import the given pool using the known configuration and a list of
+ * properties to be set. The configuration should have come from
+ * zpool_find_import(). The 'newname' parameters control whether the pool
+ * is imported with a different name.
+ */
+int
+zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
+    nvlist_t *props, int flags)
+{
+	zfs_cmd_t zc = { 0 };
+	zpool_rewind_policy_t policy;
+	nvlist_t *nv = NULL;
+	nvlist_t *nvinfo = NULL;
+	nvlist_t *missing = NULL;
+	char *thename;
+	char *origname;
+	int ret;
+	int error = 0;
+	char errbuf[1024];
+
+	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+	    &origname) == 0);
+
+	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+	    "cannot import pool '%s'"), origname);
+
+	if (newname != NULL) {
+		if (!zpool_name_valid(hdl, B_FALSE, newname))
+			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
+			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
+			    newname));
+		thename = (char *)newname;
+	} else {
+		thename = origname;
+	}
+
+	if (props) {
+		uint64_t version;
+		prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
+
+		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
+		    &version) == 0);
+
+		if ((props = zpool_valid_proplist(hdl, origname,
+		    props, version, flags, errbuf)) == NULL) {
+			return (-1);
+		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
+			nvlist_free(props);
+			return (-1);
+		}
+	}
+
+	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
+
+	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
+	    &zc.zc_guid) == 0);
+
+	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
+		nvlist_free(props);
+		return (-1);
+	}
+	if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
+		nvlist_free(props);
+		return (-1);
+	}
+
+	zc.zc_cookie = flags;
+	while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
+	    errno == ENOMEM) {
+		if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+			zcmd_free_nvlists(&zc);
+			return (-1);
+		}
+	}
+	if (ret != 0)
+		error = errno;
+
+	(void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
+	zpool_get_rewind_policy(config, &policy);
+
+	if (error) {
+		char desc[1024];
+
+		/*
+		 * Dry-run failed, but we print out what success
+		 * looks like if we found a best txg
+		 */
+		if (policy.zrp_request & ZPOOL_TRY_REWIND) {
+			zpool_rewind_exclaim(hdl, newname ? origname : thename,
+			    B_TRUE, nv);
+			nvlist_free(nv);
+			return (-1);
+		}
+
+		if (newname == NULL)
+			(void) snprintf(desc, sizeof (desc),
+			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
+			    thename);
+		else
+			(void) snprintf(desc, sizeof (desc),
+			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
+			    origname, thename);
+
+		switch (error) {
+		case ENOTSUP:
+			if (nv != NULL && nvlist_lookup_nvlist(nv,
+			    ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
+			    nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
+				(void) printf(dgettext(TEXT_DOMAIN, "This "
+				    "pool uses the following feature(s) not "
+				    "supported by this system:\n"));
+				zpool_print_unsup_feat(nv);
+				if (nvlist_exists(nvinfo,
+				    ZPOOL_CONFIG_CAN_RDONLY)) {
+					(void) printf(dgettext(TEXT_DOMAIN,
+					    "All unsupported features are only "
+					    "required for writing to the pool."
+					    "\nThe pool can be imported using "
+					    "'-o readonly=on'.\n"));
+				}
+			}
+			/*
+			 * Unsupported version.
+			 */
+			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
+			break;
+
+		case EINVAL:
+			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
+			break;
+
+		case EROFS:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "one or more devices is read only"));
+			(void) zfs_error(hdl, EZFS_BADDEV, desc);
+			break;
+
+		case ENXIO:
+			if (nv && nvlist_lookup_nvlist(nv,
+			    ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
+			    nvlist_lookup_nvlist(nvinfo,
+			    ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
+				(void) printf(dgettext(TEXT_DOMAIN,
+				    "The devices below are missing, use "
+				    "'-m' to import the pool anyway:\n"));
+				print_vdev_tree(hdl, NULL, missing, 2);
+				(void) printf("\n");
+			}
+			(void) zpool_standard_error(hdl, error, desc);
+			break;
+
+		case EEXIST:
+			(void) zpool_standard_error(hdl, error, desc);
+			break;
+
+		default:
+			(void) zpool_standard_error(hdl, error, desc);
+			zpool_explain_recover(hdl,
+			    newname ? origname : thename, -error, nv);
+			break;
+		}
+
+		nvlist_free(nv);
+		ret = -1;
+	} else {
+		zpool_handle_t *zhp;
+
+		/*
+		 * This should never fail, but play it safe anyway.
+		 */
+		if (zpool_open_silent(hdl, thename, &zhp) != 0)
+			ret = -1;
+		else if (zhp != NULL)
+			zpool_close(zhp);
+		if (policy.zrp_request &
+		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
+			zpool_rewind_exclaim(hdl, newname ? origname : thename,
+			    ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
+		}
+		nvlist_free(nv);
+		return (0);
+	}
+
+	zcmd_free_nvlists(&zc);
+	nvlist_free(props);
+
+	return (ret);
+}
+
+/*
+ * Scan the pool.
+ */
+int
+zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
+{
+	zfs_cmd_t zc = { 0 };
+	char msg[1024];
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	zc.zc_cookie = func;
+
+	if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
+	    (errno == ENOENT && func != POOL_SCAN_NONE))
+		return (0);
+
+	if (func == POOL_SCAN_SCRUB) {
+		(void) snprintf(msg, sizeof (msg),
+		    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
+	} else if (func == POOL_SCAN_NONE) {
+		(void) snprintf(msg, sizeof (msg),
+		    dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
+		    zc.zc_name);
+	} else {
+		assert(!"unexpected result");
+	}
+
+	if (errno == EBUSY) {
+		nvlist_t *nvroot;
+		pool_scan_stat_t *ps = NULL;
+		uint_t psc;
+
+		verify(nvlist_lookup_nvlist(zhp->zpool_config,
+		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
+		(void) nvlist_lookup_uint64_array(nvroot,
+		    ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
+		if (ps && ps->pss_func == POOL_SCAN_SCRUB)
+			return (zfs_error(hdl, EZFS_SCRUBBING, msg));
+		else
+			return (zfs_error(hdl, EZFS_RESILVERING, msg));
+	} else if (errno == ENOENT) {
+		return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
+	} else {
+		return (zpool_standard_error(hdl, errno, msg));
+	}
+}
+
+/*
+ * This provides a very minimal check whether a given string is likely a
+ * c#t#d# style string.  Users of this are expected to do their own
+ * verification of the s# part.
+ */
+#define	CTD_CHECK(str)  (str && str[0] == 'c' && isdigit(str[1]))
+
+/*
+ * More elaborate version for ones which may start with "/dev/dsk/"
+ * and the like.
+ */
+static int
+ctd_check_path(char *str) {
+	/*
+	 * If it starts with a slash, check the last component.
+	 */
+	if (str && str[0] == '/') {
+		char *tmp = strrchr(str, '/');
+
+		/*
+		 * If it ends in "/old", check the second-to-last
+		 * component of the string instead.
+		 */
+		if (tmp != str && strcmp(tmp, "/old") == 0) {
+			for (tmp--; *tmp != '/'; tmp--)
+				;
+		}
+		str = tmp + 1;
+	}
+	return (CTD_CHECK(str));
+}
+
+/*
+ * Find a vdev that matches the search criteria specified. We use the
+ * the nvpair name to determine how we should look for the device.
+ * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
+ * spare; but FALSE if its an INUSE spare.
+ */
+static nvlist_t *
+vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
+    boolean_t *l2cache, boolean_t *log)
+{
+	uint_t c, children;
+	nvlist_t **child;
+	nvlist_t *ret;
+	uint64_t is_log;
+	char *srchkey;
+	nvpair_t *pair = nvlist_next_nvpair(search, NULL);
+
+	/* Nothing to look for */
+	if (search == NULL || pair == NULL)
+		return (NULL);
+
+	/* Obtain the key we will use to search */
+	srchkey = nvpair_name(pair);
+
+	switch (nvpair_type(pair)) {
+	case DATA_TYPE_UINT64:
+		if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
+			uint64_t srchval, theguid;
+
+			verify(nvpair_value_uint64(pair, &srchval) == 0);
+			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
+			    &theguid) == 0);
+			if (theguid == srchval)
+				return (nv);
+		}
+		break;
+
+	case DATA_TYPE_STRING: {
+		char *srchval, *val;
+
+		verify(nvpair_value_string(pair, &srchval) == 0);
+		if (nvlist_lookup_string(nv, srchkey, &val) != 0)
+			break;
+
+		/*
+		 * Search for the requested value. Special cases:
+		 *
+		 * - ZPOOL_CONFIG_PATH for whole disk entries.  These end in
+		 *   "s0" or "s0/old".  The "s0" part is hidden from the user,
+		 *   but included in the string, so this matches around it.
+		 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
+		 *
+		 * Otherwise, all other searches are simple string compares.
+		 */
+		if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
+		    ctd_check_path(val)) {
+			uint64_t wholedisk = 0;
+
+			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
+			    &wholedisk);
+			if (wholedisk) {
+				int slen = strlen(srchval);
+				int vlen = strlen(val);
+
+				if (slen != vlen - 2)
+					break;
+
+				/*
+				 * make_leaf_vdev() should only set
+				 * wholedisk for ZPOOL_CONFIG_PATHs which
+				 * will include "/dev/dsk/", giving plenty of
+				 * room for the indices used next.
+				 */
+				ASSERT(vlen >= 6);
+
+				/*
+				 * strings identical except trailing "s0"
+				 */
+				if (strcmp(&val[vlen - 2], "s0") == 0 &&
+				    strncmp(srchval, val, slen) == 0)
+					return (nv);
+
+				/*
+				 * strings identical except trailing "s0/old"
+				 */
+				if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
+				    strcmp(&srchval[slen - 4], "/old") == 0 &&
+				    strncmp(srchval, val, slen - 4) == 0)
+					return (nv);
+
+				break;
+			}
+		} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
+			char *type, *idx, *end, *p;
+			uint64_t id, vdev_id;
+
+			/*
+			 * Determine our vdev type, keeping in mind
+			 * that the srchval is composed of a type and
+			 * vdev id pair (i.e. mirror-4).
+			 */
+			if ((type = strdup(srchval)) == NULL)
+				return (NULL);
+
+			if ((p = strrchr(type, '-')) == NULL) {
+				free(type);
+				break;
+			}
+			idx = p + 1;
+			*p = '\0';
+
+			/*
+			 * If the types don't match then keep looking.
+			 */
+			if (strncmp(val, type, strlen(val)) != 0) {
+				free(type);
+				break;
+			}
+
+			verify(strncmp(type, VDEV_TYPE_RAIDZ,
+			    strlen(VDEV_TYPE_RAIDZ)) == 0 ||
+			    strncmp(type, VDEV_TYPE_MIRROR,
+			    strlen(VDEV_TYPE_MIRROR)) == 0);
+			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
+			    &id) == 0);
+
+			errno = 0;
+			vdev_id = strtoull(idx, &end, 10);
+
+			free(type);
+			if (errno != 0)
+				return (NULL);
+
+			/*
+			 * Now verify that we have the correct vdev id.
+			 */
+			if (vdev_id == id)
+				return (nv);
+		}
+
+		/*
+		 * Common case
+		 */
+		if (strcmp(srchval, val) == 0)
+			return (nv);
+		break;
+	}
+
+	default:
+		break;
+	}
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) != 0)
+		return (NULL);
+
+	for (c = 0; c < children; c++) {
+		if ((ret = vdev_to_nvlist_iter(child[c], search,
+		    avail_spare, l2cache, NULL)) != NULL) {
+			/*
+			 * The 'is_log' value is only set for the toplevel
+			 * vdev, not the leaf vdevs.  So we always lookup the
+			 * log device from the root of the vdev tree (where
+			 * 'log' is non-NULL).
+			 */
+			if (log != NULL &&
+			    nvlist_lookup_uint64(child[c],
+			    ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
+			    is_log) {
+				*log = B_TRUE;
+			}
+			return (ret);
+		}
+	}
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
+	    &child, &children) == 0) {
+		for (c = 0; c < children; c++) {
+			if ((ret = vdev_to_nvlist_iter(child[c], search,
+			    avail_spare, l2cache, NULL)) != NULL) {
+				*avail_spare = B_TRUE;
+				return (ret);
+			}
+		}
+	}
+
+	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
+	    &child, &children) == 0) {
+		for (c = 0; c < children; c++) {
+			if ((ret = vdev_to_nvlist_iter(child[c], search,
+			    avail_spare, l2cache, NULL)) != NULL) {
+				*l2cache = B_TRUE;
+				return (ret);
+			}
+		}
+	}
+
+	return (NULL);
+}
+
+/*
+ * Given a physical path (minus the "/devices" prefix), find the
+ * associated vdev.
+ */
+nvlist_t *
+zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
+    boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
+{
+	nvlist_t *search, *nvroot, *ret;
+
+	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+	verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
+
+	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
+	    &nvroot) == 0);
+
+	*avail_spare = B_FALSE;
+	*l2cache = B_FALSE;
+	if (log != NULL)
+		*log = B_FALSE;
+	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
+	nvlist_free(search);
+
+	return (ret);
+}
+
+/*
+ * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
+ */
+boolean_t
+zpool_vdev_is_interior(const char *name)
+{
+	if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
+	    strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
+		return (B_TRUE);
+	return (B_FALSE);
+}
+
+nvlist_t *
+zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
+    boolean_t *l2cache, boolean_t *log)
+{
+	char buf[MAXPATHLEN];
+	char *end;
+	nvlist_t *nvroot, *search, *ret;
+	uint64_t guid;
+
+	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
+
+	guid = strtoull(path, &end, 10);
+	if (guid != 0 && *end == '\0') {
+		verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
+	} else if (zpool_vdev_is_interior(path)) {
+		verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
+	} else if (path[0] != '/') {
+		(void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path);
+		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
+	} else {
+		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
+	}
+
+	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
+	    &nvroot) == 0);
+
+	*avail_spare = B_FALSE;
+	*l2cache = B_FALSE;
+	if (log != NULL)
+		*log = B_FALSE;
+	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
+	nvlist_free(search);
+
+	return (ret);
+}
+
+static int
+vdev_online(nvlist_t *nv)
+{
+	uint64_t ival;
+
+	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
+	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
+	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
+		return (0);
+
+	return (1);
+}
+
+/*
+ * Helper function for zpool_get_physpaths().
+ */
+static int
+vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
+    size_t *bytes_written)
+{
+	size_t bytes_left, pos, rsz;
+	char *tmppath;
+	const char *format;
+
+	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
+	    &tmppath) != 0)
+		return (EZFS_NODEVICE);
+
+	pos = *bytes_written;
+	bytes_left = physpath_size - pos;
+	format = (pos == 0) ? "%s" : " %s";
+
+	rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
+	*bytes_written += rsz;
+
+	if (rsz >= bytes_left) {
+		/* if physpath was not copied properly, clear it */
+		if (bytes_left != 0) {
+			physpath[pos] = 0;
+		}
+		return (EZFS_NOSPC);
+	}
+	return (0);
+}
+
+static int
+vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
+    size_t *rsz, boolean_t is_spare)
+{
+	char *type;
+	int ret;
+
+	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
+		return (EZFS_INVALCONFIG);
+
+	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
+		/*
+		 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
+		 * For a spare vdev, we only want to boot from the active
+		 * spare device.
+		 */
+		if (is_spare) {
+			uint64_t spare = 0;
+			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
+			    &spare);
+			if (!spare)
+				return (EZFS_INVALCONFIG);
+		}
+
+		if (vdev_online(nv)) {
+			if ((ret = vdev_get_one_physpath(nv, physpath,
+			    phypath_size, rsz)) != 0)
+				return (ret);
+		}
+	} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
+	    strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
+	    (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
+		nvlist_t **child;
+		uint_t count;
+		int i, ret;
+
+		if (nvlist_lookup_nvlist_array(nv,
+		    ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
+			return (EZFS_INVALCONFIG);
+
+		for (i = 0; i < count; i++) {
+			ret = vdev_get_physpaths(child[i], physpath,
+			    phypath_size, rsz, is_spare);
+			if (ret == EZFS_NOSPC)
+				return (ret);
+		}
+	}
+
+	return (EZFS_POOL_INVALARG);
+}
+
+/*
+ * Get phys_path for a root pool config.
+ * Return 0 on success; non-zero on failure.
+ */
+static int
+zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
+{
+	size_t rsz;
+	nvlist_t *vdev_root;
+	nvlist_t **child;
+	uint_t count;
+	char *type;
+
+	rsz = 0;
+
+	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+	    &vdev_root) != 0)
+		return (EZFS_INVALCONFIG);
+
+	if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
+	    nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
+	    &child, &count) != 0)
+		return (EZFS_INVALCONFIG);
+
+	/*
+	 * root pool can not have EFI labeled disks and can only have
+	 * a single top-level vdev.
+	 */
+	if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
+	    pool_uses_efi(vdev_root))
+		return (EZFS_POOL_INVALARG);
+
+	(void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
+	    B_FALSE);
+
+	/* No online devices */
+	if (rsz == 0)
+		return (EZFS_NODEVICE);
+
+	return (0);
+}
+
+/*
+ * Get phys_path for a root pool
+ * Return 0 on success; non-zero on failure.
+ */
+int
+zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
+{
+	return (zpool_get_config_physpath(zhp->zpool_config, physpath,
+	    phypath_size));
+}
+
+/*
+ * If the device has being dynamically expanded then we need to relabel
+ * the disk to use the new unallocated space.
+ */
+static int
+zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
+{
+#ifdef sun
+	char path[MAXPATHLEN];
+	char errbuf[1024];
+	int fd, error;
+	int (*_efi_use_whole_disk)(int);
+
+	if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
+	    "efi_use_whole_disk")) == NULL)
+		return (-1);
+
+	(void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
+
+	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
+		    "relabel '%s': unable to open device"), name);
+		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
+	}
+
+	/*
+	 * It's possible that we might encounter an error if the device
+	 * does not have any unallocated space left. If so, we simply
+	 * ignore that error and continue on.
+	 */
+	error = _efi_use_whole_disk(fd);
+	(void) close(fd);
+	if (error && error != VT_ENOSPC) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
+		    "relabel '%s': unable to read disk capacity"), name);
+		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
+	}
+#endif	/* sun */
+	return (0);
+}
+
+/*
+ * Bring the specified vdev online.   The 'flags' parameter is a set of the
+ * ZFS_ONLINE_* flags.
+ */
+int
+zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
+    vdev_state_t *newstate)
+{
+	zfs_cmd_t zc = { 0 };
+	char msg[1024];
+	nvlist_t *tgt;
+	boolean_t avail_spare, l2cache, islog;
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+	if (flags & ZFS_ONLINE_EXPAND) {
+		(void) snprintf(msg, sizeof (msg),
+		    dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
+	} else {
+		(void) snprintf(msg, sizeof (msg),
+		    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
+	}
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
+	    &islog)) == NULL)
+		return (zfs_error(hdl, EZFS_NODEVICE, msg));
+
+	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+
+	if (avail_spare)
+		return (zfs_error(hdl, EZFS_ISSPARE, msg));
+
+	if (flags & ZFS_ONLINE_EXPAND ||
+	    zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
+		char *pathname = NULL;
+		uint64_t wholedisk = 0;
+
+		(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
+		    &wholedisk);
+		verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
+		    &pathname) == 0);
+
+		/*
+		 * XXX - L2ARC 1.0 devices can't support expansion.
+		 */
+		if (l2cache) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "cannot expand cache devices"));
+			return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
+		}
+
+		if (wholedisk) {
+			pathname += strlen(DISK_ROOT) + 1;
+			(void) zpool_relabel_disk(hdl, pathname);
+		}
+	}
+
+	zc.zc_cookie = VDEV_STATE_ONLINE;
+	zc.zc_obj = flags;
+
+	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
+		if (errno == EINVAL) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
+			    "from this pool into a new one.  Use '%s' "
+			    "instead"), "zpool detach");
+			return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
+		}
+		return (zpool_standard_error(hdl, errno, msg));
+	}
+
+	*newstate = zc.zc_cookie;
+	return (0);
+}
+
+/*
+ * Take the specified vdev offline
+ */
+int
+zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
+{
+	zfs_cmd_t zc = { 0 };
+	char msg[1024];
+	nvlist_t *tgt;
+	boolean_t avail_spare, l2cache;
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+	(void) snprintf(msg, sizeof (msg),
+	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
+	    NULL)) == NULL)
+		return (zfs_error(hdl, EZFS_NODEVICE, msg));
+
+	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+
+	if (avail_spare)
+		return (zfs_error(hdl, EZFS_ISSPARE, msg));
+
+	zc.zc_cookie = VDEV_STATE_OFFLINE;
+	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
+
+	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
+		return (0);
+
+	switch (errno) {
+	case EBUSY:
+
+		/*
+		 * There are no other replicas of this device.
+		 */
+		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
+
+	case EEXIST:
+		/*
+		 * The log device has unplayed logs
+		 */
+		return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
+
+	default:
+		return (zpool_standard_error(hdl, errno, msg));
+	}
+}
+
+/*
+ * Mark the given vdev faulted.
+ */
+int
+zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
+{
+	zfs_cmd_t zc = { 0 };
+	char msg[1024];
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+	(void) snprintf(msg, sizeof (msg),
+	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	zc.zc_guid = guid;
+	zc.zc_cookie = VDEV_STATE_FAULTED;
+	zc.zc_obj = aux;
+
+	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
+		return (0);
+
+	switch (errno) {
+	case EBUSY:
+
+		/*
+		 * There are no other replicas of this device.
+		 */
+		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
+
+	default:
+		return (zpool_standard_error(hdl, errno, msg));
+	}
+
+}
+
+/*
+ * Mark the given vdev degraded.
+ */
+int
+zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
+{
+	zfs_cmd_t zc = { 0 };
+	char msg[1024];
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+	(void) snprintf(msg, sizeof (msg),
+	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	zc.zc_guid = guid;
+	zc.zc_cookie = VDEV_STATE_DEGRADED;
+	zc.zc_obj = aux;
+
+	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
+		return (0);
+
+	return (zpool_standard_error(hdl, errno, msg));
+}
+
+/*
+ * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
+ * a hot spare.
+ */
+static boolean_t
+is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
+{
+	nvlist_t **child;
+	uint_t c, children;
+	char *type;
+
+	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
+	    &children) == 0) {
+		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
+		    &type) == 0);
+
+		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
+		    children == 2 && child[which] == tgt)
+			return (B_TRUE);
+
+		for (c = 0; c < children; c++)
+			if (is_replacing_spare(child[c], tgt, which))
+				return (B_TRUE);
+	}
+
+	return (B_FALSE);
+}
+
+/*
+ * Attach new_disk (fully described by nvroot) to old_disk.
+ * If 'replacing' is specified, the new disk will replace the old one.
+ */
+int
+zpool_vdev_attach(zpool_handle_t *zhp,
+    const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
+{
+	zfs_cmd_t zc = { 0 };
+	char msg[1024];
+	int ret;
+	nvlist_t *tgt;
+	boolean_t avail_spare, l2cache, islog;
+	uint64_t val;
+	char *newname;
+	nvlist_t **child;
+	uint_t children;
+	nvlist_t *config_root;
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+	boolean_t rootpool = zpool_is_bootable(zhp);
+
+	if (replacing)
+		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+		    "cannot replace %s with %s"), old_disk, new_disk);
+	else
+		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
+		    "cannot attach %s to %s"), new_disk, old_disk);
+
+	/*
+	 * If this is a root pool, make sure that we're not attaching an
+	 * EFI labeled device.
+	 */
+	if (rootpool && pool_uses_efi(nvroot)) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "EFI labeled devices are not supported on root pools."));
+		return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
+	}
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
+	    &islog)) == 0)
+		return (zfs_error(hdl, EZFS_NODEVICE, msg));
+
+	if (avail_spare)
+		return (zfs_error(hdl, EZFS_ISSPARE, msg));
+
+	if (l2cache)
+		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
+
+	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+	zc.zc_cookie = replacing;
+
+	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) != 0 || children != 1) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "new device must be a single disk"));
+		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
+	}
+
+	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
+	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
+
+	if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
+		return (-1);
+
+	/*
+	 * If the target is a hot spare that has been swapped in, we can only
+	 * replace it with another hot spare.
+	 */
+	if (replacing &&
+	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
+	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
+	    NULL) == NULL || !avail_spare) &&
+	    is_replacing_spare(config_root, tgt, 1)) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "can only be replaced by another hot spare"));
+		free(newname);
+		return (zfs_error(hdl, EZFS_BADTARGET, msg));
+	}
+
+	free(newname);
+
+	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
+		return (-1);
+
+	ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
+
+	zcmd_free_nvlists(&zc);
+
+	if (ret == 0) {
+		if (rootpool) {
+			/*
+			 * XXX need a better way to prevent user from
+			 * booting up a half-baked vdev.
+			 */
+			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
+			    "sure to wait until resilver is done "
+			    "before rebooting.\n"));
+			(void) fprintf(stderr, "\n");
+			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If "
+			    "you boot from pool '%s', you may need to update\n"
+			    "boot code on newly attached disk '%s'.\n\n"
+			    "Assuming you use GPT partitioning and 'da0' is "
+			    "your new boot disk\n"
+			    "you may use the following command:\n\n"
+			    "\tgpart bootcode -b /boot/pmbr -p "
+			    "/boot/gptzfsboot -i 1 da0\n\n"),
+			    zhp->zpool_name, new_disk);
+		}
+		return (0);
+	}
+
+	switch (errno) {
+	case ENOTSUP:
+		/*
+		 * Can't attach to or replace this type of vdev.
+		 */
+		if (replacing) {
+			uint64_t version = zpool_get_prop_int(zhp,
+			    ZPOOL_PROP_VERSION, NULL);
+
+			if (islog)
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "cannot replace a log with a spare"));
+			else if (version >= SPA_VERSION_MULTI_REPLACE)
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "already in replacing/spare config; wait "
+				    "for completion or use 'zpool detach'"));
+			else
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "cannot replace a replacing device"));
+		} else {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "can only attach to mirrors and top-level "
+			    "disks"));
+		}
+		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
+		break;
+
+	case EINVAL:
+		/*
+		 * The new device must be a single disk.
+		 */
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "new device must be a single disk"));
+		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
+		break;
+
+	case EBUSY:
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
+		    new_disk);
+		(void) zfs_error(hdl, EZFS_BADDEV, msg);
+		break;
+
+	case EOVERFLOW:
+		/*
+		 * The new device is too small.
+		 */
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "device is too small"));
+		(void) zfs_error(hdl, EZFS_BADDEV, msg);
+		break;
+
+	case EDOM:
+		/*
+		 * The new device has a different alignment requirement.
+		 */
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "devices have different sector alignment"));
+		(void) zfs_error(hdl, EZFS_BADDEV, msg);
+		break;
+
+	case ENAMETOOLONG:
+		/*
+		 * The resulting top-level vdev spec won't fit in the label.
+		 */
+		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
+		break;
+
+	default:
+		(void) zpool_standard_error(hdl, errno, msg);
+	}
+
+	return (-1);
+}
+
+/*
+ * Detach the specified device.
+ */
+int
+zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
+{
+	zfs_cmd_t zc = { 0 };
+	char msg[1024];
+	nvlist_t *tgt;
+	boolean_t avail_spare, l2cache;
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+	(void) snprintf(msg, sizeof (msg),
+	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
+	    NULL)) == 0)
+		return (zfs_error(hdl, EZFS_NODEVICE, msg));
+
+	if (avail_spare)
+		return (zfs_error(hdl, EZFS_ISSPARE, msg));
+
+	if (l2cache)
+		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
+
+	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+
+	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
+		return (0);
+
+	switch (errno) {
+
+	case ENOTSUP:
+		/*
+		 * Can't detach from this type of vdev.
+		 */
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
+		    "applicable to mirror and replacing vdevs"));
+		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
+		break;
+
+	case EBUSY:
+		/*
+		 * There are no other replicas of this device.
+		 */
+		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
+		break;
+
+	default:
+		(void) zpool_standard_error(hdl, errno, msg);
+	}
+
+	return (-1);
+}
+
+/*
+ * Find a mirror vdev in the source nvlist.
+ *
+ * The mchild array contains a list of disks in one of the top-level mirrors
+ * of the source pool.  The schild array contains a list of disks that the
+ * user specified on the command line.  We loop over the mchild array to
+ * see if any entry in the schild array matches.
+ *
+ * If a disk in the mchild array is found in the schild array, we return
+ * the index of that entry.  Otherwise we return -1.
+ */
+static int
+find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
+    nvlist_t **schild, uint_t schildren)
+{
+	uint_t mc;
+
+	for (mc = 0; mc < mchildren; mc++) {
+		uint_t sc;
+		char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
+		    mchild[mc], B_FALSE);
+
+		for (sc = 0; sc < schildren; sc++) {
+			char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
+			    schild[sc], B_FALSE);
+			boolean_t result = (strcmp(mpath, spath) == 0);
+
+			free(spath);
+			if (result) {
+				free(mpath);
+				return (mc);
+			}
+		}
+
+		free(mpath);
+	}
+
+	return (-1);
+}
+
+/*
+ * Split a mirror pool.  If newroot points to null, then a new nvlist
+ * is generated and it is the responsibility of the caller to free it.
+ */
+int
+zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
+    nvlist_t *props, splitflags_t flags)
+{
+	zfs_cmd_t zc = { 0 };
+	char msg[1024];
+	nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
+	nvlist_t **varray = NULL, *zc_props = NULL;
+	uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+	uint64_t vers;
+	boolean_t freelist = B_FALSE, memory_err = B_TRUE;
+	int retval = 0;
+
+	(void) snprintf(msg, sizeof (msg),
+	    dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
+
+	if (!zpool_name_valid(hdl, B_FALSE, newname))
+		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
+
+	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
+		(void) fprintf(stderr, gettext("Internal error: unable to "
+		    "retrieve pool configuration\n"));
+		return (-1);
+	}
+
+	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
+	    == 0);
+	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
+
+	if (props) {
+		prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
+		if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
+		    props, vers, flags, msg)) == NULL)
+			return (-1);
+	}
+
+	if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
+	    &children) != 0) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "Source pool is missing vdev tree"));
+		if (zc_props)
+			nvlist_free(zc_props);
+		return (-1);
+	}
+
+	varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
+	vcount = 0;
+
+	if (*newroot == NULL ||
+	    nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
+	    &newchild, &newchildren) != 0)
+		newchildren = 0;
+
+	for (c = 0; c < children; c++) {
+		uint64_t is_log = B_FALSE, is_hole = B_FALSE;
+		char *type;
+		nvlist_t **mchild, *vdev;
+		uint_t mchildren;
+		int entry;
+
+		/*
+		 * Unlike cache & spares, slogs are stored in the
+		 * ZPOOL_CONFIG_CHILDREN array.  We filter them out here.
+		 */
+		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
+		    &is_log);
+		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
+		    &is_hole);
+		if (is_log || is_hole) {
+			/*
+			 * Create a hole vdev and put it in the config.
+			 */
+			if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
+				goto out;
+			if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
+			    VDEV_TYPE_HOLE) != 0)
+				goto out;
+			if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
+			    1) != 0)
+				goto out;
+			if (lastlog == 0)
+				lastlog = vcount;
+			varray[vcount++] = vdev;
+			continue;
+		}
+		lastlog = 0;
+		verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
+		    == 0);
+		if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "Source pool must be composed only of mirrors\n"));
+			retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
+			goto out;
+		}
+
+		verify(nvlist_lookup_nvlist_array(child[c],
+		    ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
+
+		/* find or add an entry for this top-level vdev */
+		if (newchildren > 0 &&
+		    (entry = find_vdev_entry(zhp, mchild, mchildren,
+		    newchild, newchildren)) >= 0) {
+			/* We found a disk that the user specified. */
+			vdev = mchild[entry];
+			++found;
+		} else {
+			/* User didn't specify a disk for this vdev. */
+			vdev = mchild[mchildren - 1];
+		}
+
+		if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
+			goto out;
+	}
+
+	/* did we find every disk the user specified? */
+	if (found != newchildren) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
+		    "include at most one disk from each mirror"));
+		retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
+		goto out;
+	}
+
+	/* Prepare the nvlist for populating. */
+	if (*newroot == NULL) {
+		if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
+			goto out;
+		freelist = B_TRUE;
+		if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
+		    VDEV_TYPE_ROOT) != 0)
+			goto out;
+	} else {
+		verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
+	}
+
+	/* Add all the children we found */
+	if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
+	    lastlog == 0 ? vcount : lastlog) != 0)
+		goto out;
+
+	/*
+	 * If we're just doing a dry run, exit now with success.
+	 */
+	if (flags.dryrun) {
+		memory_err = B_FALSE;
+		freelist = B_FALSE;
+		goto out;
+	}
+
+	/* now build up the config list & call the ioctl */
+	if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
+		goto out;
+
+	if (nvlist_add_nvlist(newconfig,
+	    ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
+	    nvlist_add_string(newconfig,
+	    ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
+	    nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
+		goto out;
+
+	/*
+	 * The new pool is automatically part of the namespace unless we
+	 * explicitly export it.
+	 */
+	if (!flags.import)
+		zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
+	if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
+		goto out;
+	if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
+		goto out;
+
+	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
+		retval = zpool_standard_error(hdl, errno, msg);
+		goto out;
+	}
+
+	freelist = B_FALSE;
+	memory_err = B_FALSE;
+
+out:
+	if (varray != NULL) {
+		int v;
+
+		for (v = 0; v < vcount; v++)
+			nvlist_free(varray[v]);
+		free(varray);
+	}
+	zcmd_free_nvlists(&zc);
+	if (zc_props)
+		nvlist_free(zc_props);
+	if (newconfig)
+		nvlist_free(newconfig);
+	if (freelist) {
+		nvlist_free(*newroot);
+		*newroot = NULL;
+	}
+
+	if (retval != 0)
+		return (retval);
+
+	if (memory_err)
+		return (no_memory(hdl));
+
+	return (0);
+}
+
+/*
+ * Remove the given device.  Currently, this is supported only for hot spares
+ * and level 2 cache devices.
+ */
+int
+zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
+{
+	zfs_cmd_t zc = { 0 };
+	char msg[1024];
+	nvlist_t *tgt;
+	boolean_t avail_spare, l2cache, islog;
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+	uint64_t version;
+
+	(void) snprintf(msg, sizeof (msg),
+	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
+	    &islog)) == 0)
+		return (zfs_error(hdl, EZFS_NODEVICE, msg));
+	/*
+	 * XXX - this should just go away.
+	 */
+	if (!avail_spare && !l2cache && !islog) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "only inactive hot spares, cache, top-level, "
+		    "or log devices can be removed"));
+		return (zfs_error(hdl, EZFS_NODEVICE, msg));
+	}
+
+	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
+	if (islog && version < SPA_VERSION_HOLES) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "pool must be upgrade to support log removal"));
+		return (zfs_error(hdl, EZFS_BADVERSION, msg));
+	}
+
+	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+
+	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
+		return (0);
+
+	return (zpool_standard_error(hdl, errno, msg));
+}
+
+/*
+ * Clear the errors for the pool, or the particular device if specified.
+ */
+int
+zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
+{
+	zfs_cmd_t zc = { 0 };
+	char msg[1024];
+	nvlist_t *tgt;
+	zpool_rewind_policy_t policy;
+	boolean_t avail_spare, l2cache;
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+	nvlist_t *nvi = NULL;
+	int error;
+
+	if (path)
+		(void) snprintf(msg, sizeof (msg),
+		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
+		    path);
+	else
+		(void) snprintf(msg, sizeof (msg),
+		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
+		    zhp->zpool_name);
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	if (path) {
+		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
+		    &l2cache, NULL)) == 0)
+			return (zfs_error(hdl, EZFS_NODEVICE, msg));
+
+		/*
+		 * Don't allow error clearing for hot spares.  Do allow
+		 * error clearing for l2cache devices.
+		 */
+		if (avail_spare)
+			return (zfs_error(hdl, EZFS_ISSPARE, msg));
+
+		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
+		    &zc.zc_guid) == 0);
+	}
+
+	zpool_get_rewind_policy(rewindnvl, &policy);
+	zc.zc_cookie = policy.zrp_request;
+
+	if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
+		return (-1);
+
+	if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
+		return (-1);
+
+	while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
+	    errno == ENOMEM) {
+		if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
+			zcmd_free_nvlists(&zc);
+			return (-1);
+		}
+	}
+
+	if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
+	    errno != EPERM && errno != EACCES)) {
+		if (policy.zrp_request &
+		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
+			(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
+			zpool_rewind_exclaim(hdl, zc.zc_name,
+			    ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
+			    nvi);
+			nvlist_free(nvi);
+		}
+		zcmd_free_nvlists(&zc);
+		return (0);
+	}
+
+	zcmd_free_nvlists(&zc);
+	return (zpool_standard_error(hdl, errno, msg));
+}
+
+/*
+ * Similar to zpool_clear(), but takes a GUID (used by fmd).
+ */
+int
+zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
+{
+	zfs_cmd_t zc = { 0 };
+	char msg[1024];
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+	(void) snprintf(msg, sizeof (msg),
+	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
+	    guid);
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	zc.zc_guid = guid;
+	zc.zc_cookie = ZPOOL_NO_REWIND;
+
+	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
+		return (0);
+
+	return (zpool_standard_error(hdl, errno, msg));
+}
+
+/*
+ * Change the GUID for a pool.
+ */
+int
+zpool_reguid(zpool_handle_t *zhp)
+{
+	char msg[1024];
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+	zfs_cmd_t zc = { 0 };
+
+	(void) snprintf(msg, sizeof (msg),
+	    dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
+		return (0);
+
+	return (zpool_standard_error(hdl, errno, msg));
+}
+
+/*
+ * Reopen the pool.
+ */
+int
+zpool_reopen(zpool_handle_t *zhp)
+{
+	zfs_cmd_t zc = { 0 };
+	char msg[1024];
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+	(void) snprintf(msg, sizeof (msg),
+	    dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
+	    zhp->zpool_name);
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
+		return (0);
+	return (zpool_standard_error(hdl, errno, msg));
+}
+
+/*
+ * Convert from a devid string to a path.
+ */
+static char *
+devid_to_path(char *devid_str)
+{
+	ddi_devid_t devid;
+	char *minor;
+	char *path;
+	devid_nmlist_t *list = NULL;
+	int ret;
+
+	if (devid_str_decode(devid_str, &devid, &minor) != 0)
+		return (NULL);
+
+	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
+
+	devid_str_free(minor);
+	devid_free(devid);
+
+	if (ret != 0)
+		return (NULL);
+
+	if ((path = strdup(list[0].devname)) == NULL)
+		return (NULL);
+
+	devid_free_nmlist(list);
+
+	return (path);
+}
+
+/*
+ * Convert from a path to a devid string.
+ */
+static char *
+path_to_devid(const char *path)
+{
+	int fd;
+	ddi_devid_t devid;
+	char *minor, *ret;
+
+	if ((fd = open(path, O_RDONLY)) < 0)
+		return (NULL);
+
+	minor = NULL;
+	ret = NULL;
+	if (devid_get(fd, &devid) == 0) {
+		if (devid_get_minor_name(fd, &minor) == 0)
+			ret = devid_str_encode(devid, minor);
+		if (minor != NULL)
+			devid_str_free(minor);
+		devid_free(devid);
+	}
+	(void) close(fd);
+
+	return (ret);
+}
+
+/*
+ * Issue the necessary ioctl() to update the stored path value for the vdev.  We
+ * ignore any failure here, since a common case is for an unprivileged user to
+ * type 'zpool status', and we'll display the correct information anyway.
+ */
+static void
+set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
+{
+	zfs_cmd_t zc = { 0 };
+
+	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
+	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
+	    &zc.zc_guid) == 0);
+
+	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
+}
+
+/*
+ * Given a vdev, return the name to display in iostat.  If the vdev has a path,
+ * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
+ * We also check if this is a whole disk, in which case we strip off the
+ * trailing 's0' slice name.
+ *
+ * This routine is also responsible for identifying when disks have been
+ * reconfigured in a new location.  The kernel will have opened the device by
+ * devid, but the path will still refer to the old location.  To catch this, we
+ * first do a path -> devid translation (which is fast for the common case).  If
+ * the devid matches, we're done.  If not, we do a reverse devid -> path
+ * translation and issue the appropriate ioctl() to update the path of the vdev.
+ * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
+ * of these checks.
+ */
+char *
+zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
+    boolean_t verbose)
+{
+	char *path, *devid;
+	uint64_t value;
+	char buf[64];
+	vdev_stat_t *vs;
+	uint_t vsc;
+	int have_stats;
+	int have_path;
+
+	have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
+	    (uint64_t **)&vs, &vsc) == 0;
+	have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0;
+
+	/*
+	 * If the device is not currently present, assume it will not
+	 * come back at the same device path.  Display the device by GUID.
+	 */
+	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
+	    have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) {
+		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
+		    &value) == 0);
+		(void) snprintf(buf, sizeof (buf), "%llu",
+		    (u_longlong_t)value);
+		path = buf;
+	} else if (have_path) {
+
+		/*
+		 * If the device is dead (faulted, offline, etc) then don't
+		 * bother opening it.  Otherwise we may be forcing the user to
+		 * open a misbehaving device, which can have undesirable
+		 * effects.
+		 */
+		if ((have_stats == 0 ||
+		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
+		    zhp != NULL &&
+		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
+			/*
+			 * Determine if the current path is correct.
+			 */
+			char *newdevid = path_to_devid(path);
+
+			if (newdevid == NULL ||
+			    strcmp(devid, newdevid) != 0) {
+				char *newpath;
+
+				if ((newpath = devid_to_path(devid)) != NULL) {
+					/*
+					 * Update the path appropriately.
+					 */
+					set_path(zhp, nv, newpath);
+					if (nvlist_add_string(nv,
+					    ZPOOL_CONFIG_PATH, newpath) == 0)
+						verify(nvlist_lookup_string(nv,
+						    ZPOOL_CONFIG_PATH,
+						    &path) == 0);
+					free(newpath);
+				}
+			}
+
+			if (newdevid)
+				devid_str_free(newdevid);
+		}
+
+#ifdef sun
+		if (strncmp(path, "/dev/dsk/", 9) == 0)
+			path += 9;
+
+		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
+		    &value) == 0 && value) {
+			int pathlen = strlen(path);
+			char *tmp = zfs_strdup(hdl, path);
+
+			/*
+			 * If it starts with c#, and ends with "s0", chop
+			 * the "s0" off, or if it ends with "s0/old", remove
+			 * the "s0" from the middle.
+			 */
+			if (CTD_CHECK(tmp)) {
+				if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
+					tmp[pathlen - 2] = '\0';
+				} else if (pathlen > 6 &&
+				    strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
+					(void) strcpy(&tmp[pathlen - 6],
+					    "/old");
+				}
+			}
+			return (tmp);
+		}
+#else	/* !sun */
+		if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
+			path += sizeof(_PATH_DEV) - 1;
+#endif	/* !sun */
+	} else {
+		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
+
+		/*
+		 * If it's a raidz device, we need to stick in the parity level.
+		 */
+		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
+			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
+			    &value) == 0);
+			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
+			    (u_longlong_t)value);
+			path = buf;
+		}
+
+		/*
+		 * We identify each top-level vdev by using a <type-id>
+		 * naming convention.
+		 */
+		if (verbose) {
+			uint64_t id;
+
+			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
+			    &id) == 0);
+			(void) snprintf(buf, sizeof (buf), "%s-%llu", path,
+			    (u_longlong_t)id);
+			path = buf;
+		}
+	}
+
+	return (zfs_strdup(hdl, path));
+}
+
+static int
+zbookmark_compare(const void *a, const void *b)
+{
+	return (memcmp(a, b, sizeof (zbookmark_t)));
+}
+
+/*
+ * Retrieve the persistent error log, uniquify the members, and return to the
+ * caller.
+ */
+int
+zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
+{
+	zfs_cmd_t zc = { 0 };
+	uint64_t count;
+	zbookmark_t *zb = NULL;
+	int i;
+
+	/*
+	 * Retrieve the raw error list from the kernel.  If the number of errors
+	 * has increased, allocate more space and continue until we get the
+	 * entire list.
+	 */
+	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
+	    &count) == 0);
+	if (count == 0)
+		return (0);
+	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
+	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
+		return (-1);
+	zc.zc_nvlist_dst_size = count;
+	(void) strcpy(zc.zc_name, zhp->zpool_name);
+	for (;;) {
+		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
+		    &zc) != 0) {
+			free((void *)(uintptr_t)zc.zc_nvlist_dst);
+			if (errno == ENOMEM) {
+				count = zc.zc_nvlist_dst_size;
+				if ((zc.zc_nvlist_dst = (uintptr_t)
+				    zfs_alloc(zhp->zpool_hdl, count *
+				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
+					return (-1);
+			} else {
+				return (-1);
+			}
+		} else {
+			break;
+		}
+	}
+
+	/*
+	 * Sort the resulting bookmarks.  This is a little confusing due to the
+	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
+	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
+	 * _not_ copied as part of the process.  So we point the start of our
+	 * array appropriate and decrement the total number of elements.
+	 */
+	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
+	    zc.zc_nvlist_dst_size;
+	count -= zc.zc_nvlist_dst_size;
+
+	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
+
+	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
+
+	/*
+	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
+	 */
+	for (i = 0; i < count; i++) {
+		nvlist_t *nv;
+
+		/* ignoring zb_blkid and zb_level for now */
+		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
+		    zb[i-1].zb_object == zb[i].zb_object)
+			continue;
+
+		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
+			goto nomem;
+		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
+		    zb[i].zb_objset) != 0) {
+			nvlist_free(nv);
+			goto nomem;
+		}
+		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
+		    zb[i].zb_object) != 0) {
+			nvlist_free(nv);
+			goto nomem;
+		}
+		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
+			nvlist_free(nv);
+			goto nomem;
+		}
+		nvlist_free(nv);
+	}
+
+	free((void *)(uintptr_t)zc.zc_nvlist_dst);
+	return (0);
+
+nomem:
+	free((void *)(uintptr_t)zc.zc_nvlist_dst);
+	return (no_memory(zhp->zpool_hdl));
+}
+
+/*
+ * Upgrade a ZFS pool to the latest on-disk version.
+ */
+int
+zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
+{
+	zfs_cmd_t zc = { 0 };
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+	(void) strcpy(zc.zc_name, zhp->zpool_name);
+	zc.zc_cookie = new_version;
+
+	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
+		return (zpool_standard_error_fmt(hdl, errno,
+		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
+		    zhp->zpool_name));
+	return (0);
+}
+
+void
+zpool_set_history_str(const char *subcommand, int argc, char **argv,
+    char *history_str)
+{
+	int i;
+
+	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
+	for (i = 1; i < argc; i++) {
+		if (strlen(history_str) + 1 + strlen(argv[i]) >
+		    HIS_MAX_RECORD_LEN)
+			break;
+		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
+		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
+	}
+}
+
+/*
+ * Stage command history for logging.
+ */
+int
+zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
+{
+	if (history_str == NULL)
+		return (EINVAL);
+
+	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
+		return (EINVAL);
+
+	if (hdl->libzfs_log_str != NULL)
+		free(hdl->libzfs_log_str);
+
+	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
+		return (no_memory(hdl));
+
+	return (0);
+}
+
+/*
+ * Perform ioctl to get some command history of a pool.
+ *
+ * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
+ * logical offset of the history buffer to start reading from.
+ *
+ * Upon return, 'off' is the next logical offset to read from and
+ * 'len' is the actual amount of bytes read into 'buf'.
+ */
+static int
+get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
+{
+	zfs_cmd_t zc = { 0 };
+	libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+
+	zc.zc_history = (uint64_t)(uintptr_t)buf;
+	zc.zc_history_len = *len;
+	zc.zc_history_offset = *off;
+
+	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
+		switch (errno) {
+		case EPERM:
+			return (zfs_error_fmt(hdl, EZFS_PERM,
+			    dgettext(TEXT_DOMAIN,
+			    "cannot show history for pool '%s'"),
+			    zhp->zpool_name));
+		case ENOENT:
+			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
+			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
+			    "'%s'"), zhp->zpool_name));
+		case ENOTSUP:
+			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
+			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
+			    "'%s', pool must be upgraded"), zhp->zpool_name));
+		default:
+			return (zpool_standard_error_fmt(hdl, errno,
+			    dgettext(TEXT_DOMAIN,
+			    "cannot get history for '%s'"), zhp->zpool_name));
+		}
+	}
+
+	*len = zc.zc_history_len;
+	*off = zc.zc_history_offset;
+
+	return (0);
+}
+
+/*
+ * Process the buffer of nvlists, unpacking and storing each nvlist record
+ * into 'records'.  'leftover' is set to the number of bytes that weren't
+ * processed as there wasn't a complete record.
+ */
+int
+zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
+    nvlist_t ***records, uint_t *numrecords)
+{
+	uint64_t reclen;
+	nvlist_t *nv;
+	int i;
+
+	while (bytes_read > sizeof (reclen)) {
+
+		/* get length of packed record (stored as little endian) */
+		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
+			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
+
+		if (bytes_read < sizeof (reclen) + reclen)
+			break;
+
+		/* unpack record */
+		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
+			return (ENOMEM);
+		bytes_read -= sizeof (reclen) + reclen;
+		buf += sizeof (reclen) + reclen;
+
+		/* add record to nvlist array */
+		(*numrecords)++;
+		if (ISP2(*numrecords + 1)) {
+			*records = realloc(*records,
+			    *numrecords * 2 * sizeof (nvlist_t *));
+		}
+		(*records)[*numrecords - 1] = nv;
+	}
+
+	*leftover = bytes_read;
+	return (0);
+}
+
+#define	HIS_BUF_LEN	(128*1024)
+
+/*
+ * Retrieve the command history of a pool.
+ */
+int
+zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
+{
+	char buf[HIS_BUF_LEN];
+	uint64_t off = 0;
+	nvlist_t **records = NULL;
+	uint_t numrecords = 0;
+	int err, i;
+
+	do {
+		uint64_t bytes_read = sizeof (buf);
+		uint64_t leftover;
+
+		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
+			break;
+
+		/* if nothing else was read in, we're at EOF, just return */
+		if (!bytes_read)
+			break;
+
+		if ((err = zpool_history_unpack(buf, bytes_read,
+		    &leftover, &records, &numrecords)) != 0)
+			break;
+		off -= leftover;
+
+		/* CONSTCOND */
+	} while (1);
+
+	if (!err) {
+		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
+		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
+		    records, numrecords) == 0);
+	}
+	for (i = 0; i < numrecords; i++)
+		nvlist_free(records[i]);
+	free(records);
+
+	return (err);
+}
+
+void
+zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
+    char *pathname, size_t len)
+{
+	zfs_cmd_t zc = { 0 };
+	boolean_t mounted = B_FALSE;
+	char *mntpnt = NULL;
+	char dsname[MAXNAMELEN];
+
+	if (dsobj == 0) {
+		/* special case for the MOS */
+		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
+		return;
+	}
+
+	/* get the dataset's name */
+	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+	zc.zc_obj = dsobj;
+	if (ioctl(zhp->zpool_hdl->libzfs_fd,
+	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
+		/* just write out a path of two object numbers */
+		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
+		    dsobj, obj);
+		return;
+	}
+	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
+
+	/* find out if the dataset is mounted */
+	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
+
+	/* get the corrupted object's path */
+	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
+	zc.zc_obj = obj;
+	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
+	    &zc) == 0) {
+		if (mounted) {
+			(void) snprintf(pathname, len, "%s%s", mntpnt,
+			    zc.zc_value);
+		} else {
+			(void) snprintf(pathname, len, "%s:%s",
+			    dsname, zc.zc_value);
+		}
+	} else {
+		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
+	}
+	free(mntpnt);
+}
+
+#ifdef sun
+/*
+ * Read the EFI label from the config, if a label does not exist then
+ * pass back the error to the caller. If the caller has passed a non-NULL
+ * diskaddr argument then we set it to the starting address of the EFI
+ * partition.
+ */
+static int
+read_efi_label(nvlist_t *config, diskaddr_t *sb)
+{
+	char *path;
+	int fd;
+	char diskname[MAXPATHLEN];
+	int err = -1;
+
+	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
+		return (err);
+
+	(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
+	    strrchr(path, '/'));
+	if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
+		struct dk_gpt *vtoc;
+
+		if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
+			if (sb != NULL)
+				*sb = vtoc->efi_parts[0].p_start;
+			efi_free(vtoc);
+		}
+		(void) close(fd);
+	}
+	return (err);
+}
+
+/*
+ * determine where a partition starts on a disk in the current
+ * configuration
+ */
+static diskaddr_t
+find_start_block(nvlist_t *config)
+{
+	nvlist_t **child;
+	uint_t c, children;
+	diskaddr_t sb = MAXOFFSET_T;
+	uint64_t wholedisk;
+
+	if (nvlist_lookup_nvlist_array(config,
+	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
+		if (nvlist_lookup_uint64(config,
+		    ZPOOL_CONFIG_WHOLE_DISK,
+		    &wholedisk) != 0 || !wholedisk) {
+			return (MAXOFFSET_T);
+		}
+		if (read_efi_label(config, &sb) < 0)
+			sb = MAXOFFSET_T;
+		return (sb);
+	}
+
+	for (c = 0; c < children; c++) {
+		sb = find_start_block(child[c]);
+		if (sb != MAXOFFSET_T) {
+			return (sb);
+		}
+	}
+	return (MAXOFFSET_T);
+}
+#endif /* sun */
+
+/*
+ * Label an individual disk.  The name provided is the short name,
+ * stripped of any leading /dev path.
+ */
+int
+zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name)
+{
+#ifdef sun
+	char path[MAXPATHLEN];
+	struct dk_gpt *vtoc;
+	int fd;
+	size_t resv = EFI_MIN_RESV_SIZE;
+	uint64_t slice_size;
+	diskaddr_t start_block;
+	char errbuf[1024];
+
+	/* prepare an error message just in case */
+	(void) snprintf(errbuf, sizeof (errbuf),
+	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
+
+	if (zhp) {
+		nvlist_t *nvroot;
+
+		if (zpool_is_bootable(zhp)) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "EFI labeled devices are not supported on root "
+			    "pools."));
+			return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
+		}
+
+		verify(nvlist_lookup_nvlist(zhp->zpool_config,
+		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
+
+		if (zhp->zpool_start_block == 0)
+			start_block = find_start_block(nvroot);
+		else
+			start_block = zhp->zpool_start_block;
+		zhp->zpool_start_block = start_block;
+	} else {
+		/* new pool */
+		start_block = NEW_START_BLOCK;
+	}
+
+	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
+	    BACKUP_SLICE);
+
+	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
+		/*
+		 * This shouldn't happen.  We've long since verified that this
+		 * is a valid device.
+		 */
+		zfs_error_aux(hdl,
+		    dgettext(TEXT_DOMAIN, "unable to open device"));
+		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
+	}
+
+	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
+		/*
+		 * The only way this can fail is if we run out of memory, or we
+		 * were unable to read the disk's capacity
+		 */
+		if (errno == ENOMEM)
+			(void) no_memory(hdl);
+
+		(void) close(fd);
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "unable to read disk capacity"), name);
+
+		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
+	}
+
+	slice_size = vtoc->efi_last_u_lba + 1;
+	slice_size -= EFI_MIN_RESV_SIZE;
+	if (start_block == MAXOFFSET_T)
+		start_block = NEW_START_BLOCK;
+	slice_size -= start_block;
+
+	vtoc->efi_parts[0].p_start = start_block;
+	vtoc->efi_parts[0].p_size = slice_size;
+
+	/*
+	 * Why we use V_USR: V_BACKUP confuses users, and is considered
+	 * disposable by some EFI utilities (since EFI doesn't have a backup
+	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
+	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
+	 * etc. were all pretty specific.  V_USR is as close to reality as we
+	 * can get, in the absence of V_OTHER.
+	 */
+	vtoc->efi_parts[0].p_tag = V_USR;
+	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
+
+	vtoc->efi_parts[8].p_start = slice_size + start_block;
+	vtoc->efi_parts[8].p_size = resv;
+	vtoc->efi_parts[8].p_tag = V_RESERVED;
+
+	if (efi_write(fd, vtoc) != 0) {
+		/*
+		 * Some block drivers (like pcata) may not support EFI
+		 * GPT labels.  Print out a helpful error message dir-
+		 * ecting the user to manually label the disk and give
+		 * a specific slice.
+		 */
+		(void) close(fd);
+		efi_free(vtoc);
+
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "try using fdisk(1M) and then provide a specific slice"));
+		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
+	}
+
+	(void) close(fd);
+	efi_free(vtoc);
+#endif /* sun */
+	return (0);
+}
+
+static boolean_t
+supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
+{
+	char *type;
+	nvlist_t **child;
+	uint_t children, c;
+
+	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
+	if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
+	    strcmp(type, VDEV_TYPE_FILE) == 0 ||
+	    strcmp(type, VDEV_TYPE_LOG) == 0 ||
+	    strcmp(type, VDEV_TYPE_HOLE) == 0 ||
+	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "vdev type '%s' is not supported"), type);
+		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
+		return (B_FALSE);
+	}
+	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
+	    &child, &children) == 0) {
+		for (c = 0; c < children; c++) {
+			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
+				return (B_FALSE);
+		}
+	}
+	return (B_TRUE);
+}
+
+/*
+ * check if this zvol is allowable for use as a dump device; zero if
+ * it is, > 0 if it isn't, < 0 if it isn't a zvol
+ */
+int
+zvol_check_dump_config(char *arg)
+{
+	zpool_handle_t *zhp = NULL;
+	nvlist_t *config, *nvroot;
+	char *p, *volname;
+	nvlist_t **top;
+	uint_t toplevels;
+	libzfs_handle_t *hdl;
+	char errbuf[1024];
+	char poolname[ZPOOL_MAXNAMELEN];
+	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
+	int ret = 1;
+
+	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
+		return (-1);
+	}
+
+	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+	    "dump is not supported on device '%s'"), arg);
+
+	if ((hdl = libzfs_init()) == NULL)
+		return (1);
+	libzfs_print_on_error(hdl, B_TRUE);
+
+	volname = arg + pathlen;
+
+	/* check the configuration of the pool */
+	if ((p = strchr(volname, '/')) == NULL) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "malformed dataset name"));
+		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
+		return (1);
+	} else if (p - volname >= ZFS_MAXNAMELEN) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "dataset name is too long"));
+		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
+		return (1);
+	} else {
+		(void) strncpy(poolname, volname, p - volname);
+		poolname[p - volname] = '\0';
+	}
+
+	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "could not open pool '%s'"), poolname);
+		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
+		goto out;
+	}
+	config = zpool_get_config(zhp, NULL);
+	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+	    &nvroot) != 0) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "could not obtain vdev configuration for  '%s'"), poolname);
+		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
+		goto out;
+	}
+
+	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+	    &top, &toplevels) == 0);
+	if (toplevels != 1) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "'%s' has multiple top level vdevs"), poolname);
+		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
+		goto out;
+	}
+
+	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
+		goto out;
+	}
+	ret = 0;
+
+out:
+	if (zhp)
+		zpool_close(zhp);
+	libzfs_fini(hdl);
+	return (ret);
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_sendrecv.c b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_sendrecv.c
new file mode 100644
index 0000000000000000000000000000000000000000..662801eec1c7ef979a557735eb4458f6b7df4f37
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_sendrecv.c
@@ -0,0 +1,3232 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ * Copyright (c) 2012, Joyent, Inc. All rights reserved.
+ * Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
+ * All rights reserved.
+ */
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <libintl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <stddef.h>
+#include <fcntl.h>
+#include <sys/param.h>
+#include <sys/mount.h>
+#include <pthread.h>
+#include <umem.h>
+#include <time.h>
+
+#include <libzfs.h>
+
+#include "zfs_namecheck.h"
+#include "zfs_prop.h"
+#include "zfs_fletcher.h"
+#include "libzfs_impl.h"
+#include <sha2.h>
+#include <sys/zio_checksum.h>
+#include <sys/ddt.h>
+
+/* in libzfs_dataset.c */
+extern void zfs_setprop_error(libzfs_handle_t *, zfs_prop_t, int, char *);
+/* We need to use something for ENODATA. */
+#define	ENODATA	EIDRM
+
+static int zfs_receive_impl(libzfs_handle_t *, const char *, recvflags_t *,
+    int, const char *, nvlist_t *, avl_tree_t *, char **, int, uint64_t *);
+
+static const zio_cksum_t zero_cksum = { 0 };
+
+typedef struct dedup_arg {
+	int	inputfd;
+	int	outputfd;
+	libzfs_handle_t  *dedup_hdl;
+} dedup_arg_t;
+
+typedef struct progress_arg {
+	zfs_handle_t *pa_zhp;
+	int pa_fd;
+	boolean_t pa_parsable;
+} progress_arg_t;
+
+typedef struct dataref {
+	uint64_t ref_guid;
+	uint64_t ref_object;
+	uint64_t ref_offset;
+} dataref_t;
+
+typedef struct dedup_entry {
+	struct dedup_entry	*dde_next;
+	zio_cksum_t dde_chksum;
+	uint64_t dde_prop;
+	dataref_t dde_ref;
+} dedup_entry_t;
+
+#define	MAX_DDT_PHYSMEM_PERCENT		20
+#define	SMALLEST_POSSIBLE_MAX_DDT_MB		128
+
+typedef struct dedup_table {
+	dedup_entry_t	**dedup_hash_array;
+	umem_cache_t	*ddecache;
+	uint64_t	max_ddt_size;  /* max dedup table size in bytes */
+	uint64_t	cur_ddt_size;  /* current dedup table size in bytes */
+	uint64_t	ddt_count;
+	int		numhashbits;
+	boolean_t	ddt_full;
+} dedup_table_t;
+
+static int
+high_order_bit(uint64_t n)
+{
+	int count;
+
+	for (count = 0; n != 0; count++)
+		n >>= 1;
+	return (count);
+}
+
+static size_t
+ssread(void *buf, size_t len, FILE *stream)
+{
+	size_t outlen;
+
+	if ((outlen = fread(buf, len, 1, stream)) == 0)
+		return (0);
+
+	return (outlen);
+}
+
+static void
+ddt_hash_append(libzfs_handle_t *hdl, dedup_table_t *ddt, dedup_entry_t **ddepp,
+    zio_cksum_t *cs, uint64_t prop, dataref_t *dr)
+{
+	dedup_entry_t	*dde;
+
+	if (ddt->cur_ddt_size >= ddt->max_ddt_size) {
+		if (ddt->ddt_full == B_FALSE) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "Dedup table full.  Deduplication will continue "
+			    "with existing table entries"));
+			ddt->ddt_full = B_TRUE;
+		}
+		return;
+	}
+
+	if ((dde = umem_cache_alloc(ddt->ddecache, UMEM_DEFAULT))
+	    != NULL) {
+		assert(*ddepp == NULL);
+		dde->dde_next = NULL;
+		dde->dde_chksum = *cs;
+		dde->dde_prop = prop;
+		dde->dde_ref = *dr;
+		*ddepp = dde;
+		ddt->cur_ddt_size += sizeof (dedup_entry_t);
+		ddt->ddt_count++;
+	}
+}
+
+/*
+ * Using the specified dedup table, do a lookup for an entry with
+ * the checksum cs.  If found, return the block's reference info
+ * in *dr. Otherwise, insert a new entry in the dedup table, using
+ * the reference information specified by *dr.
+ *
+ * return value:  true - entry was found
+ *		  false - entry was not found
+ */
+static boolean_t
+ddt_update(libzfs_handle_t *hdl, dedup_table_t *ddt, zio_cksum_t *cs,
+    uint64_t prop, dataref_t *dr)
+{
+	uint32_t hashcode;
+	dedup_entry_t **ddepp;
+
+	hashcode = BF64_GET(cs->zc_word[0], 0, ddt->numhashbits);
+
+	for (ddepp = &(ddt->dedup_hash_array[hashcode]); *ddepp != NULL;
+	    ddepp = &((*ddepp)->dde_next)) {
+		if (ZIO_CHECKSUM_EQUAL(((*ddepp)->dde_chksum), *cs) &&
+		    (*ddepp)->dde_prop == prop) {
+			*dr = (*ddepp)->dde_ref;
+			return (B_TRUE);
+		}
+	}
+	ddt_hash_append(hdl, ddt, ddepp, cs, prop, dr);
+	return (B_FALSE);
+}
+
+static int
+cksum_and_write(const void *buf, uint64_t len, zio_cksum_t *zc, int outfd)
+{
+	fletcher_4_incremental_native(buf, len, zc);
+	return (write(outfd, buf, len));
+}
+
+/*
+ * This function is started in a separate thread when the dedup option
+ * has been requested.  The main send thread determines the list of
+ * snapshots to be included in the send stream and makes the ioctl calls
+ * for each one.  But instead of having the ioctl send the output to the
+ * the output fd specified by the caller of zfs_send()), the
+ * ioctl is told to direct the output to a pipe, which is read by the
+ * alternate thread running THIS function.  This function does the
+ * dedup'ing by:
+ *  1. building a dedup table (the DDT)
+ *  2. doing checksums on each data block and inserting a record in the DDT
+ *  3. looking for matching checksums, and
+ *  4.  sending a DRR_WRITE_BYREF record instead of a write record whenever
+ *      a duplicate block is found.
+ * The output of this function then goes to the output fd requested
+ * by the caller of zfs_send().
+ */
+static void *
+cksummer(void *arg)
+{
+	dedup_arg_t *dda = arg;
+	char *buf = malloc(1<<20);
+	dmu_replay_record_t thedrr;
+	dmu_replay_record_t *drr = &thedrr;
+	struct drr_begin *drrb = &thedrr.drr_u.drr_begin;
+	struct drr_end *drre = &thedrr.drr_u.drr_end;
+	struct drr_object *drro = &thedrr.drr_u.drr_object;
+	struct drr_write *drrw = &thedrr.drr_u.drr_write;
+	struct drr_spill *drrs = &thedrr.drr_u.drr_spill;
+	FILE *ofp;
+	int outfd;
+	dmu_replay_record_t wbr_drr = {0};
+	struct drr_write_byref *wbr_drrr = &wbr_drr.drr_u.drr_write_byref;
+	dedup_table_t ddt;
+	zio_cksum_t stream_cksum;
+	uint64_t physmem = sysconf(_SC_PHYS_PAGES) * sysconf(_SC_PAGESIZE);
+	uint64_t numbuckets;
+
+	ddt.max_ddt_size =
+	    MAX((physmem * MAX_DDT_PHYSMEM_PERCENT)/100,
+	    SMALLEST_POSSIBLE_MAX_DDT_MB<<20);
+
+	numbuckets = ddt.max_ddt_size/(sizeof (dedup_entry_t));
+
+	/*
+	 * numbuckets must be a power of 2.  Increase number to
+	 * a power of 2 if necessary.
+	 */
+	if (!ISP2(numbuckets))
+		numbuckets = 1 << high_order_bit(numbuckets);
+
+	ddt.dedup_hash_array = calloc(numbuckets, sizeof (dedup_entry_t *));
+	ddt.ddecache = umem_cache_create("dde", sizeof (dedup_entry_t), 0,
+	    NULL, NULL, NULL, NULL, NULL, 0);
+	ddt.cur_ddt_size = numbuckets * sizeof (dedup_entry_t *);
+	ddt.numhashbits = high_order_bit(numbuckets) - 1;
+	ddt.ddt_full = B_FALSE;
+
+	/* Initialize the write-by-reference block. */
+	wbr_drr.drr_type = DRR_WRITE_BYREF;
+	wbr_drr.drr_payloadlen = 0;
+
+	outfd = dda->outputfd;
+	ofp = fdopen(dda->inputfd, "r");
+	while (ssread(drr, sizeof (dmu_replay_record_t), ofp) != 0) {
+
+		switch (drr->drr_type) {
+		case DRR_BEGIN:
+		{
+			int	fflags;
+			ZIO_SET_CHECKSUM(&stream_cksum, 0, 0, 0, 0);
+
+			/* set the DEDUP feature flag for this stream */
+			fflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
+			fflags |= (DMU_BACKUP_FEATURE_DEDUP |
+			    DMU_BACKUP_FEATURE_DEDUPPROPS);
+			DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, fflags);
+
+			if (cksum_and_write(drr, sizeof (dmu_replay_record_t),
+			    &stream_cksum, outfd) == -1)
+				goto out;
+			if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
+			    DMU_COMPOUNDSTREAM && drr->drr_payloadlen != 0) {
+				int sz = drr->drr_payloadlen;
+
+				if (sz > 1<<20) {
+					free(buf);
+					buf = malloc(sz);
+				}
+				(void) ssread(buf, sz, ofp);
+				if (ferror(stdin))
+					perror("fread");
+				if (cksum_and_write(buf, sz, &stream_cksum,
+				    outfd) == -1)
+					goto out;
+			}
+			break;
+		}
+
+		case DRR_END:
+		{
+			/* use the recalculated checksum */
+			ZIO_SET_CHECKSUM(&drre->drr_checksum,
+			    stream_cksum.zc_word[0], stream_cksum.zc_word[1],
+			    stream_cksum.zc_word[2], stream_cksum.zc_word[3]);
+			if ((write(outfd, drr,
+			    sizeof (dmu_replay_record_t))) == -1)
+				goto out;
+			break;
+		}
+
+		case DRR_OBJECT:
+		{
+			if (cksum_and_write(drr, sizeof (dmu_replay_record_t),
+			    &stream_cksum, outfd) == -1)
+				goto out;
+			if (drro->drr_bonuslen > 0) {
+				(void) ssread(buf,
+				    P2ROUNDUP((uint64_t)drro->drr_bonuslen, 8),
+				    ofp);
+				if (cksum_and_write(buf,
+				    P2ROUNDUP((uint64_t)drro->drr_bonuslen, 8),
+				    &stream_cksum, outfd) == -1)
+					goto out;
+			}
+			break;
+		}
+
+		case DRR_SPILL:
+		{
+			if (cksum_and_write(drr, sizeof (dmu_replay_record_t),
+			    &stream_cksum, outfd) == -1)
+				goto out;
+			(void) ssread(buf, drrs->drr_length, ofp);
+			if (cksum_and_write(buf, drrs->drr_length,
+			    &stream_cksum, outfd) == -1)
+				goto out;
+			break;
+		}
+
+		case DRR_FREEOBJECTS:
+		{
+			if (cksum_and_write(drr, sizeof (dmu_replay_record_t),
+			    &stream_cksum, outfd) == -1)
+				goto out;
+			break;
+		}
+
+		case DRR_WRITE:
+		{
+			dataref_t	dataref;
+
+			(void) ssread(buf, drrw->drr_length, ofp);
+
+			/*
+			 * Use the existing checksum if it's dedup-capable,
+			 * else calculate a SHA256 checksum for it.
+			 */
+
+			if (ZIO_CHECKSUM_EQUAL(drrw->drr_key.ddk_cksum,
+			    zero_cksum) ||
+			    !DRR_IS_DEDUP_CAPABLE(drrw->drr_checksumflags)) {
+				SHA256_CTX	ctx;
+				zio_cksum_t	tmpsha256;
+
+				SHA256Init(&ctx);
+				SHA256Update(&ctx, buf, drrw->drr_length);
+				SHA256Final(&tmpsha256, &ctx);
+				drrw->drr_key.ddk_cksum.zc_word[0] =
+				    BE_64(tmpsha256.zc_word[0]);
+				drrw->drr_key.ddk_cksum.zc_word[1] =
+				    BE_64(tmpsha256.zc_word[1]);
+				drrw->drr_key.ddk_cksum.zc_word[2] =
+				    BE_64(tmpsha256.zc_word[2]);
+				drrw->drr_key.ddk_cksum.zc_word[3] =
+				    BE_64(tmpsha256.zc_word[3]);
+				drrw->drr_checksumtype = ZIO_CHECKSUM_SHA256;
+				drrw->drr_checksumflags = DRR_CHECKSUM_DEDUP;
+			}
+
+			dataref.ref_guid = drrw->drr_toguid;
+			dataref.ref_object = drrw->drr_object;
+			dataref.ref_offset = drrw->drr_offset;
+
+			if (ddt_update(dda->dedup_hdl, &ddt,
+			    &drrw->drr_key.ddk_cksum, drrw->drr_key.ddk_prop,
+			    &dataref)) {
+				/* block already present in stream */
+				wbr_drrr->drr_object = drrw->drr_object;
+				wbr_drrr->drr_offset = drrw->drr_offset;
+				wbr_drrr->drr_length = drrw->drr_length;
+				wbr_drrr->drr_toguid = drrw->drr_toguid;
+				wbr_drrr->drr_refguid = dataref.ref_guid;
+				wbr_drrr->drr_refobject =
+				    dataref.ref_object;
+				wbr_drrr->drr_refoffset =
+				    dataref.ref_offset;
+
+				wbr_drrr->drr_checksumtype =
+				    drrw->drr_checksumtype;
+				wbr_drrr->drr_checksumflags =
+				    drrw->drr_checksumtype;
+				wbr_drrr->drr_key.ddk_cksum =
+				    drrw->drr_key.ddk_cksum;
+				wbr_drrr->drr_key.ddk_prop =
+				    drrw->drr_key.ddk_prop;
+
+				if (cksum_and_write(&wbr_drr,
+				    sizeof (dmu_replay_record_t), &stream_cksum,
+				    outfd) == -1)
+					goto out;
+			} else {
+				/* block not previously seen */
+				if (cksum_and_write(drr,
+				    sizeof (dmu_replay_record_t), &stream_cksum,
+				    outfd) == -1)
+					goto out;
+				if (cksum_and_write(buf,
+				    drrw->drr_length,
+				    &stream_cksum, outfd) == -1)
+					goto out;
+			}
+			break;
+		}
+
+		case DRR_FREE:
+		{
+			if (cksum_and_write(drr, sizeof (dmu_replay_record_t),
+			    &stream_cksum, outfd) == -1)
+				goto out;
+			break;
+		}
+
+		default:
+			(void) printf("INVALID record type 0x%x\n",
+			    drr->drr_type);
+			/* should never happen, so assert */
+			assert(B_FALSE);
+		}
+	}
+out:
+	umem_cache_destroy(ddt.ddecache);
+	free(ddt.dedup_hash_array);
+	free(buf);
+	(void) fclose(ofp);
+
+	return (NULL);
+}
+
+/*
+ * Routines for dealing with the AVL tree of fs-nvlists
+ */
+typedef struct fsavl_node {
+	avl_node_t fn_node;
+	nvlist_t *fn_nvfs;
+	char *fn_snapname;
+	uint64_t fn_guid;
+} fsavl_node_t;
+
+static int
+fsavl_compare(const void *arg1, const void *arg2)
+{
+	const fsavl_node_t *fn1 = arg1;
+	const fsavl_node_t *fn2 = arg2;
+
+	if (fn1->fn_guid > fn2->fn_guid)
+		return (+1);
+	else if (fn1->fn_guid < fn2->fn_guid)
+		return (-1);
+	else
+		return (0);
+}
+
+/*
+ * Given the GUID of a snapshot, find its containing filesystem and
+ * (optionally) name.
+ */
+static nvlist_t *
+fsavl_find(avl_tree_t *avl, uint64_t snapguid, char **snapname)
+{
+	fsavl_node_t fn_find;
+	fsavl_node_t *fn;
+
+	fn_find.fn_guid = snapguid;
+
+	fn = avl_find(avl, &fn_find, NULL);
+	if (fn) {
+		if (snapname)
+			*snapname = fn->fn_snapname;
+		return (fn->fn_nvfs);
+	}
+	return (NULL);
+}
+
+static void
+fsavl_destroy(avl_tree_t *avl)
+{
+	fsavl_node_t *fn;
+	void *cookie;
+
+	if (avl == NULL)
+		return;
+
+	cookie = NULL;
+	while ((fn = avl_destroy_nodes(avl, &cookie)) != NULL)
+		free(fn);
+	avl_destroy(avl);
+	free(avl);
+}
+
+/*
+ * Given an nvlist, produce an avl tree of snapshots, ordered by guid
+ */
+static avl_tree_t *
+fsavl_create(nvlist_t *fss)
+{
+	avl_tree_t *fsavl;
+	nvpair_t *fselem = NULL;
+
+	if ((fsavl = malloc(sizeof (avl_tree_t))) == NULL)
+		return (NULL);
+
+	avl_create(fsavl, fsavl_compare, sizeof (fsavl_node_t),
+	    offsetof(fsavl_node_t, fn_node));
+
+	while ((fselem = nvlist_next_nvpair(fss, fselem)) != NULL) {
+		nvlist_t *nvfs, *snaps;
+		nvpair_t *snapelem = NULL;
+
+		VERIFY(0 == nvpair_value_nvlist(fselem, &nvfs));
+		VERIFY(0 == nvlist_lookup_nvlist(nvfs, "snaps", &snaps));
+
+		while ((snapelem =
+		    nvlist_next_nvpair(snaps, snapelem)) != NULL) {
+			fsavl_node_t *fn;
+			uint64_t guid;
+
+			VERIFY(0 == nvpair_value_uint64(snapelem, &guid));
+			if ((fn = malloc(sizeof (fsavl_node_t))) == NULL) {
+				fsavl_destroy(fsavl);
+				return (NULL);
+			}
+			fn->fn_nvfs = nvfs;
+			fn->fn_snapname = nvpair_name(snapelem);
+			fn->fn_guid = guid;
+
+			/*
+			 * Note: if there are multiple snaps with the
+			 * same GUID, we ignore all but one.
+			 */
+			if (avl_find(fsavl, fn, NULL) == NULL)
+				avl_add(fsavl, fn);
+			else
+				free(fn);
+		}
+	}
+
+	return (fsavl);
+}
+
+/*
+ * Routines for dealing with the giant nvlist of fs-nvlists, etc.
+ */
+typedef struct send_data {
+	uint64_t parent_fromsnap_guid;
+	nvlist_t *parent_snaps;
+	nvlist_t *fss;
+	nvlist_t *snapprops;
+	const char *fromsnap;
+	const char *tosnap;
+	boolean_t recursive;
+
+	/*
+	 * The header nvlist is of the following format:
+	 * {
+	 *   "tosnap" -> string
+	 *   "fromsnap" -> string (if incremental)
+	 *   "fss" -> {
+	 *	id -> {
+	 *
+	 *	 "name" -> string (full name; for debugging)
+	 *	 "parentfromsnap" -> number (guid of fromsnap in parent)
+	 *
+	 *	 "props" -> { name -> value (only if set here) }
+	 *	 "snaps" -> { name (lastname) -> number (guid) }
+	 *	 "snapprops" -> { name (lastname) -> { name -> value } }
+	 *
+	 *	 "origin" -> number (guid) (if clone)
+	 *	 "sent" -> boolean (not on-disk)
+	 *	}
+	 *   }
+	 * }
+	 *
+	 */
+} send_data_t;
+
+static void send_iterate_prop(zfs_handle_t *zhp, nvlist_t *nv);
+
+static int
+send_iterate_snap(zfs_handle_t *zhp, void *arg)
+{
+	send_data_t *sd = arg;
+	uint64_t guid = zhp->zfs_dmustats.dds_guid;
+	char *snapname;
+	nvlist_t *nv;
+
+	snapname = strrchr(zhp->zfs_name, '@')+1;
+
+	VERIFY(0 == nvlist_add_uint64(sd->parent_snaps, snapname, guid));
+	/*
+	 * NB: if there is no fromsnap here (it's a newly created fs in
+	 * an incremental replication), we will substitute the tosnap.
+	 */
+	if ((sd->fromsnap && strcmp(snapname, sd->fromsnap) == 0) ||
+	    (sd->parent_fromsnap_guid == 0 && sd->tosnap &&
+	    strcmp(snapname, sd->tosnap) == 0)) {
+		sd->parent_fromsnap_guid = guid;
+	}
+
+	VERIFY(0 == nvlist_alloc(&nv, NV_UNIQUE_NAME, 0));
+	send_iterate_prop(zhp, nv);
+	VERIFY(0 == nvlist_add_nvlist(sd->snapprops, snapname, nv));
+	nvlist_free(nv);
+
+	zfs_close(zhp);
+	return (0);
+}
+
+static void
+send_iterate_prop(zfs_handle_t *zhp, nvlist_t *nv)
+{
+	nvpair_t *elem = NULL;
+
+	while ((elem = nvlist_next_nvpair(zhp->zfs_props, elem)) != NULL) {
+		char *propname = nvpair_name(elem);
+		zfs_prop_t prop = zfs_name_to_prop(propname);
+		nvlist_t *propnv;
+
+		if (!zfs_prop_user(propname)) {
+			/*
+			 * Realistically, this should never happen.  However,
+			 * we want the ability to add DSL properties without
+			 * needing to make incompatible version changes.  We
+			 * need to ignore unknown properties to allow older
+			 * software to still send datasets containing these
+			 * properties, with the unknown properties elided.
+			 */
+			if (prop == ZPROP_INVAL)
+				continue;
+
+			if (zfs_prop_readonly(prop))
+				continue;
+		}
+
+		verify(nvpair_value_nvlist(elem, &propnv) == 0);
+		if (prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_RESERVATION ||
+		    prop == ZFS_PROP_REFQUOTA ||
+		    prop == ZFS_PROP_REFRESERVATION) {
+			char *source;
+			uint64_t value;
+			verify(nvlist_lookup_uint64(propnv,
+			    ZPROP_VALUE, &value) == 0);
+			if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT)
+				continue;
+			/*
+			 * May have no source before SPA_VERSION_RECVD_PROPS,
+			 * but is still modifiable.
+			 */
+			if (nvlist_lookup_string(propnv,
+			    ZPROP_SOURCE, &source) == 0) {
+				if ((strcmp(source, zhp->zfs_name) != 0) &&
+				    (strcmp(source,
+				    ZPROP_SOURCE_VAL_RECVD) != 0))
+					continue;
+			}
+		} else {
+			char *source;
+			if (nvlist_lookup_string(propnv,
+			    ZPROP_SOURCE, &source) != 0)
+				continue;
+			if ((strcmp(source, zhp->zfs_name) != 0) &&
+			    (strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0))
+				continue;
+		}
+
+		if (zfs_prop_user(propname) ||
+		    zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
+			char *value;
+			verify(nvlist_lookup_string(propnv,
+			    ZPROP_VALUE, &value) == 0);
+			VERIFY(0 == nvlist_add_string(nv, propname, value));
+		} else {
+			uint64_t value;
+			verify(nvlist_lookup_uint64(propnv,
+			    ZPROP_VALUE, &value) == 0);
+			VERIFY(0 == nvlist_add_uint64(nv, propname, value));
+		}
+	}
+}
+
+/*
+ * recursively generate nvlists describing datasets.  See comment
+ * for the data structure send_data_t above for description of contents
+ * of the nvlist.
+ */
+static int
+send_iterate_fs(zfs_handle_t *zhp, void *arg)
+{
+	send_data_t *sd = arg;
+	nvlist_t *nvfs, *nv;
+	int rv = 0;
+	uint64_t parent_fromsnap_guid_save = sd->parent_fromsnap_guid;
+	uint64_t guid = zhp->zfs_dmustats.dds_guid;
+	char guidstring[64];
+
+	VERIFY(0 == nvlist_alloc(&nvfs, NV_UNIQUE_NAME, 0));
+	VERIFY(0 == nvlist_add_string(nvfs, "name", zhp->zfs_name));
+	VERIFY(0 == nvlist_add_uint64(nvfs, "parentfromsnap",
+	    sd->parent_fromsnap_guid));
+
+	if (zhp->zfs_dmustats.dds_origin[0]) {
+		zfs_handle_t *origin = zfs_open(zhp->zfs_hdl,
+		    zhp->zfs_dmustats.dds_origin, ZFS_TYPE_SNAPSHOT);
+		if (origin == NULL)
+			return (-1);
+		VERIFY(0 == nvlist_add_uint64(nvfs, "origin",
+		    origin->zfs_dmustats.dds_guid));
+	}
+
+	/* iterate over props */
+	VERIFY(0 == nvlist_alloc(&nv, NV_UNIQUE_NAME, 0));
+	send_iterate_prop(zhp, nv);
+	VERIFY(0 == nvlist_add_nvlist(nvfs, "props", nv));
+	nvlist_free(nv);
+
+	/* iterate over snaps, and set sd->parent_fromsnap_guid */
+	sd->parent_fromsnap_guid = 0;
+	VERIFY(0 == nvlist_alloc(&sd->parent_snaps, NV_UNIQUE_NAME, 0));
+	VERIFY(0 == nvlist_alloc(&sd->snapprops, NV_UNIQUE_NAME, 0));
+	(void) zfs_iter_snapshots_sorted(zhp, send_iterate_snap, sd);
+	VERIFY(0 == nvlist_add_nvlist(nvfs, "snaps", sd->parent_snaps));
+	VERIFY(0 == nvlist_add_nvlist(nvfs, "snapprops", sd->snapprops));
+	nvlist_free(sd->parent_snaps);
+	nvlist_free(sd->snapprops);
+
+	/* add this fs to nvlist */
+	(void) snprintf(guidstring, sizeof (guidstring),
+	    "0x%llx", (longlong_t)guid);
+	VERIFY(0 == nvlist_add_nvlist(sd->fss, guidstring, nvfs));
+	nvlist_free(nvfs);
+
+	/* iterate over children */
+	if (sd->recursive)
+		rv = zfs_iter_filesystems(zhp, send_iterate_fs, sd);
+
+	sd->parent_fromsnap_guid = parent_fromsnap_guid_save;
+
+	zfs_close(zhp);
+	return (rv);
+}
+
+static int
+gather_nvlist(libzfs_handle_t *hdl, const char *fsname, const char *fromsnap,
+    const char *tosnap, boolean_t recursive, nvlist_t **nvlp, avl_tree_t **avlp)
+{
+	zfs_handle_t *zhp;
+	send_data_t sd = { 0 };
+	int error;
+
+	zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+	if (zhp == NULL)
+		return (EZFS_BADTYPE);
+
+	VERIFY(0 == nvlist_alloc(&sd.fss, NV_UNIQUE_NAME, 0));
+	sd.fromsnap = fromsnap;
+	sd.tosnap = tosnap;
+	sd.recursive = recursive;
+
+	if ((error = send_iterate_fs(zhp, &sd)) != 0) {
+		nvlist_free(sd.fss);
+		if (avlp != NULL)
+			*avlp = NULL;
+		*nvlp = NULL;
+		return (error);
+	}
+
+	if (avlp != NULL && (*avlp = fsavl_create(sd.fss)) == NULL) {
+		nvlist_free(sd.fss);
+		*nvlp = NULL;
+		return (EZFS_NOMEM);
+	}
+
+	*nvlp = sd.fss;
+	return (0);
+}
+
+/*
+ * Routines specific to "zfs send"
+ */
+typedef struct send_dump_data {
+	/* these are all just the short snapname (the part after the @) */
+	const char *fromsnap;
+	const char *tosnap;
+	char prevsnap[ZFS_MAXNAMELEN];
+	uint64_t prevsnap_obj;
+	boolean_t seenfrom, seento, replicate, doall, fromorigin;
+	boolean_t verbose, dryrun, parsable, progress;
+	int outfd;
+	boolean_t err;
+	nvlist_t *fss;
+	avl_tree_t *fsavl;
+	snapfilter_cb_t *filter_cb;
+	void *filter_cb_arg;
+	nvlist_t *debugnv;
+	char holdtag[ZFS_MAXNAMELEN];
+	int cleanup_fd;
+	uint64_t size;
+} send_dump_data_t;
+
+static int
+estimate_ioctl(zfs_handle_t *zhp, uint64_t fromsnap_obj,
+    boolean_t fromorigin, uint64_t *sizep)
+{
+	zfs_cmd_t zc = { 0 };
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+
+	assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
+	assert(fromsnap_obj == 0 || !fromorigin);
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+	zc.zc_obj = fromorigin;
+	zc.zc_sendobj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
+	zc.zc_fromobj = fromsnap_obj;
+	zc.zc_guid = 1;  /* estimate flag */
+
+	if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND, &zc) != 0) {
+		char errbuf[1024];
+		(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+		    "warning: cannot estimate space for '%s'"), zhp->zfs_name);
+
+		switch (errno) {
+		case EXDEV:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "not an earlier snapshot from the same fs"));
+			return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
+
+		case ENOENT:
+			if (zfs_dataset_exists(hdl, zc.zc_name,
+			    ZFS_TYPE_SNAPSHOT)) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "incremental source (@%s) does not exist"),
+				    zc.zc_value);
+			}
+			return (zfs_error(hdl, EZFS_NOENT, errbuf));
+
+		case EDQUOT:
+		case EFBIG:
+		case EIO:
+		case ENOLINK:
+		case ENOSPC:
+		case ENXIO:
+		case EPIPE:
+		case ERANGE:
+		case EFAULT:
+		case EROFS:
+			zfs_error_aux(hdl, strerror(errno));
+			return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
+
+		default:
+			return (zfs_standard_error(hdl, errno, errbuf));
+		}
+	}
+
+	*sizep = zc.zc_objset_type;
+
+	return (0);
+}
+
+/*
+ * Dumps a backup of the given snapshot (incremental from fromsnap if it's not
+ * NULL) to the file descriptor specified by outfd.
+ */
+static int
+dump_ioctl(zfs_handle_t *zhp, const char *fromsnap, uint64_t fromsnap_obj,
+    boolean_t fromorigin, int outfd, nvlist_t *debugnv)
+{
+	zfs_cmd_t zc = { 0 };
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	nvlist_t *thisdbg;
+
+	assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
+	assert(fromsnap_obj == 0 || !fromorigin);
+
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+	zc.zc_cookie = outfd;
+	zc.zc_obj = fromorigin;
+	zc.zc_sendobj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
+	zc.zc_fromobj = fromsnap_obj;
+
+	VERIFY(0 == nvlist_alloc(&thisdbg, NV_UNIQUE_NAME, 0));
+	if (fromsnap && fromsnap[0] != '\0') {
+		VERIFY(0 == nvlist_add_string(thisdbg,
+		    "fromsnap", fromsnap));
+	}
+
+	if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND, &zc) != 0) {
+		char errbuf[1024];
+		(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+		    "warning: cannot send '%s'"), zhp->zfs_name);
+
+		VERIFY(0 == nvlist_add_uint64(thisdbg, "error", errno));
+		if (debugnv) {
+			VERIFY(0 == nvlist_add_nvlist(debugnv,
+			    zhp->zfs_name, thisdbg));
+		}
+		nvlist_free(thisdbg);
+
+		switch (errno) {
+		case EXDEV:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "not an earlier snapshot from the same fs"));
+			return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
+
+		case ENOENT:
+			if (zfs_dataset_exists(hdl, zc.zc_name,
+			    ZFS_TYPE_SNAPSHOT)) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "incremental source (@%s) does not exist"),
+				    zc.zc_value);
+			}
+			return (zfs_error(hdl, EZFS_NOENT, errbuf));
+
+		case EDQUOT:
+		case EFBIG:
+		case EIO:
+		case ENOLINK:
+		case ENOSPC:
+#ifdef sun
+		case ENOSTR:
+#endif
+		case ENXIO:
+		case EPIPE:
+		case ERANGE:
+		case EFAULT:
+		case EROFS:
+			zfs_error_aux(hdl, strerror(errno));
+			return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
+
+		default:
+			return (zfs_standard_error(hdl, errno, errbuf));
+		}
+	}
+
+	if (debugnv)
+		VERIFY(0 == nvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg));
+	nvlist_free(thisdbg);
+
+	return (0);
+}
+
+static int
+hold_for_send(zfs_handle_t *zhp, send_dump_data_t *sdd)
+{
+	zfs_handle_t *pzhp;
+	int error = 0;
+	char *thissnap;
+
+	assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
+
+	if (sdd->dryrun)
+		return (0);
+
+	/*
+	 * zfs_send() only opens a cleanup_fd for sends that need it,
+	 * e.g. replication and doall.
+	 */
+	if (sdd->cleanup_fd == -1)
+		return (0);
+
+	thissnap = strchr(zhp->zfs_name, '@') + 1;
+	*(thissnap - 1) = '\0';
+	pzhp = zfs_open(zhp->zfs_hdl, zhp->zfs_name, ZFS_TYPE_DATASET);
+	*(thissnap - 1) = '@';
+
+	/*
+	 * It's OK if the parent no longer exists.  The send code will
+	 * handle that error.
+	 */
+	if (pzhp) {
+		error = zfs_hold(pzhp, thissnap, sdd->holdtag,
+		    B_FALSE, B_TRUE, B_TRUE, sdd->cleanup_fd,
+		    zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID),
+		    zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG));
+		zfs_close(pzhp);
+	}
+
+	return (error);
+}
+
+static void *
+send_progress_thread(void *arg)
+{
+	progress_arg_t *pa = arg;
+
+	zfs_cmd_t zc = { 0 };
+	zfs_handle_t *zhp = pa->pa_zhp;
+	libzfs_handle_t *hdl = zhp->zfs_hdl;
+	unsigned long long bytes;
+	char buf[16];
+
+	time_t t;
+	struct tm *tm;
+
+	assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
+	(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
+
+	if (!pa->pa_parsable)
+		(void) fprintf(stderr, "TIME        SENT   SNAPSHOT\n");
+
+	/*
+	 * Print the progress from ZFS_IOC_SEND_PROGRESS every second.
+	 */
+	for (;;) {
+		(void) sleep(1);
+
+		zc.zc_cookie = pa->pa_fd;
+		if (zfs_ioctl(hdl, ZFS_IOC_SEND_PROGRESS, &zc) != 0)
+			return ((void *)-1);
+
+		(void) time(&t);
+		tm = localtime(&t);
+		bytes = zc.zc_cookie;
+
+		if (pa->pa_parsable) {
+			(void) fprintf(stderr, "%02d:%02d:%02d\t%llu\t%s\n",
+			    tm->tm_hour, tm->tm_min, tm->tm_sec,
+			    bytes, zhp->zfs_name);
+		} else {
+			zfs_nicenum(bytes, buf, sizeof (buf));
+			(void) fprintf(stderr, "%02d:%02d:%02d   %5s   %s\n",
+			    tm->tm_hour, tm->tm_min, tm->tm_sec,
+			    buf, zhp->zfs_name);
+		}
+	}
+}
+
+static int
+dump_snapshot(zfs_handle_t *zhp, void *arg)
+{
+	send_dump_data_t *sdd = arg;
+	progress_arg_t pa = { 0 };
+	pthread_t tid;
+
+	char *thissnap;
+	int err;
+	boolean_t isfromsnap, istosnap, fromorigin;
+	boolean_t exclude = B_FALSE;
+
+	thissnap = strchr(zhp->zfs_name, '@') + 1;
+	isfromsnap = (sdd->fromsnap != NULL &&
+	    strcmp(sdd->fromsnap, thissnap) == 0);
+
+	if (!sdd->seenfrom && isfromsnap) {
+		err = hold_for_send(zhp, sdd);
+		if (err == 0) {
+			sdd->seenfrom = B_TRUE;
+			(void) strcpy(sdd->prevsnap, thissnap);
+			sdd->prevsnap_obj = zfs_prop_get_int(zhp,
+			    ZFS_PROP_OBJSETID);
+		} else if (err == ENOENT) {
+			err = 0;
+		}
+		zfs_close(zhp);
+		return (err);
+	}
+
+	if (sdd->seento || !sdd->seenfrom) {
+		zfs_close(zhp);
+		return (0);
+	}
+
+	istosnap = (strcmp(sdd->tosnap, thissnap) == 0);
+	if (istosnap)
+		sdd->seento = B_TRUE;
+
+	if (!sdd->doall && !isfromsnap && !istosnap) {
+		if (sdd->replicate) {
+			char *snapname;
+			nvlist_t *snapprops;
+			/*
+			 * Filter out all intermediate snapshots except origin
+			 * snapshots needed to replicate clones.
+			 */
+			nvlist_t *nvfs = fsavl_find(sdd->fsavl,
+			    zhp->zfs_dmustats.dds_guid, &snapname);
+
+			VERIFY(0 == nvlist_lookup_nvlist(nvfs,
+			    "snapprops", &snapprops));
+			VERIFY(0 == nvlist_lookup_nvlist(snapprops,
+			    thissnap, &snapprops));
+			exclude = !nvlist_exists(snapprops, "is_clone_origin");
+		} else {
+			exclude = B_TRUE;
+		}
+	}
+
+	/*
+	 * If a filter function exists, call it to determine whether
+	 * this snapshot will be sent.
+	 */
+	if (exclude || (sdd->filter_cb != NULL &&
+	    sdd->filter_cb(zhp, sdd->filter_cb_arg) == B_FALSE)) {
+		/*
+		 * This snapshot is filtered out.  Don't send it, and don't
+		 * set prevsnap_obj, so it will be as if this snapshot didn't
+		 * exist, and the next accepted snapshot will be sent as
+		 * an incremental from the last accepted one, or as the
+		 * first (and full) snapshot in the case of a replication,
+		 * non-incremental send.
+		 */
+		zfs_close(zhp);
+		return (0);
+	}
+
+	err = hold_for_send(zhp, sdd);
+	if (err) {
+		if (err == ENOENT)
+			err = 0;
+		zfs_close(zhp);
+		return (err);
+	}
+
+	fromorigin = sdd->prevsnap[0] == '\0' &&
+	    (sdd->fromorigin || sdd->replicate);
+
+	if (sdd->verbose) {
+		uint64_t size;
+		err = estimate_ioctl(zhp, sdd->prevsnap_obj,
+		    fromorigin, &size);
+
+		if (sdd->parsable) {
+			if (sdd->prevsnap[0] != '\0') {
+				(void) fprintf(stderr, "incremental\t%s\t%s",
+				    sdd->prevsnap, zhp->zfs_name);
+			} else {
+				(void) fprintf(stderr, "full\t%s",
+				    zhp->zfs_name);
+			}
+		} else {
+			(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
+			    "send from @%s to %s"),
+			    sdd->prevsnap, zhp->zfs_name);
+		}
+		if (err == 0) {
+			if (sdd->parsable) {
+				(void) fprintf(stderr, "\t%llu\n",
+				    (longlong_t)size);
+			} else {
+				char buf[16];
+				zfs_nicenum(size, buf, sizeof (buf));
+				(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
+				    " estimated size is %s\n"), buf);
+			}
+			sdd->size += size;
+		} else {
+			(void) fprintf(stderr, "\n");
+		}
+	}
+
+	if (!sdd->dryrun) {
+		/*
+		 * If progress reporting is requested, spawn a new thread to
+		 * poll ZFS_IOC_SEND_PROGRESS at a regular interval.
+		 */
+		if (sdd->progress) {
+			pa.pa_zhp = zhp;
+			pa.pa_fd = sdd->outfd;
+			pa.pa_parsable = sdd->parsable;
+
+			if (err = pthread_create(&tid, NULL,
+			    send_progress_thread, &pa)) {
+				zfs_close(zhp);
+				return (err);
+			}
+		}
+
+		err = dump_ioctl(zhp, sdd->prevsnap, sdd->prevsnap_obj,
+		    fromorigin, sdd->outfd, sdd->debugnv);
+
+		if (sdd->progress) {
+			(void) pthread_cancel(tid);
+			(void) pthread_join(tid, NULL);
+		}
+	}
+
+	(void) strcpy(sdd->prevsnap, thissnap);
+	sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
+	zfs_close(zhp);
+	return (err);
+}
+
+static int
+dump_filesystem(zfs_handle_t *zhp, void *arg)
+{
+	int rv = 0;
+	send_dump_data_t *sdd = arg;
+	boolean_t missingfrom = B_FALSE;
+	zfs_cmd_t zc = { 0 };
+
+	(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
+	    zhp->zfs_name, sdd->tosnap);
+	if (ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0) {
+		(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
+		    "WARNING: could not send %s@%s: does not exist\n"),
+		    zhp->zfs_name, sdd->tosnap);
+		sdd->err = B_TRUE;
+		return (0);
+	}
+
+	if (sdd->replicate && sdd->fromsnap) {
+		/*
+		 * If this fs does not have fromsnap, and we're doing
+		 * recursive, we need to send a full stream from the
+		 * beginning (or an incremental from the origin if this
+		 * is a clone).  If we're doing non-recursive, then let
+		 * them get the error.
+		 */
+		(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
+		    zhp->zfs_name, sdd->fromsnap);
+		if (ioctl(zhp->zfs_hdl->libzfs_fd,
+		    ZFS_IOC_OBJSET_STATS, &zc) != 0) {
+			missingfrom = B_TRUE;
+		}
+	}
+
+	sdd->seenfrom = sdd->seento = sdd->prevsnap[0] = 0;
+	sdd->prevsnap_obj = 0;
+	if (sdd->fromsnap == NULL || missingfrom)
+		sdd->seenfrom = B_TRUE;
+
+	rv = zfs_iter_snapshots_sorted(zhp, dump_snapshot, arg);
+	if (!sdd->seenfrom) {
+		(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
+		    "WARNING: could not send %s@%s:\n"
+		    "incremental source (%s@%s) does not exist\n"),
+		    zhp->zfs_name, sdd->tosnap,
+		    zhp->zfs_name, sdd->fromsnap);
+		sdd->err = B_TRUE;
+	} else if (!sdd->seento) {
+		if (sdd->fromsnap) {
+			(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
+			    "WARNING: could not send %s@%s:\n"
+			    "incremental source (%s@%s) "
+			    "is not earlier than it\n"),
+			    zhp->zfs_name, sdd->tosnap,
+			    zhp->zfs_name, sdd->fromsnap);
+		} else {
+			(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
+			    "WARNING: "
+			    "could not send %s@%s: does not exist\n"),
+			    zhp->zfs_name, sdd->tosnap);
+		}
+		sdd->err = B_TRUE;
+	}
+
+	return (rv);
+}
+
+static int
+dump_filesystems(zfs_handle_t *rzhp, void *arg)
+{
+	send_dump_data_t *sdd = arg;
+	nvpair_t *fspair;
+	boolean_t needagain, progress;
+
+	if (!sdd->replicate)
+		return (dump_filesystem(rzhp, sdd));
+
+	/* Mark the clone origin snapshots. */
+	for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
+	    fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
+		nvlist_t *nvfs;
+		uint64_t origin_guid = 0;
+
+		VERIFY(0 == nvpair_value_nvlist(fspair, &nvfs));
+		(void) nvlist_lookup_uint64(nvfs, "origin", &origin_guid);
+		if (origin_guid != 0) {
+			char *snapname;
+			nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
+			    origin_guid, &snapname);
+			if (origin_nv != NULL) {
+				nvlist_t *snapprops;
+				VERIFY(0 == nvlist_lookup_nvlist(origin_nv,
+				    "snapprops", &snapprops));
+				VERIFY(0 == nvlist_lookup_nvlist(snapprops,
+				    snapname, &snapprops));
+				VERIFY(0 == nvlist_add_boolean(
+				    snapprops, "is_clone_origin"));
+			}
+		}
+	}
+again:
+	needagain = progress = B_FALSE;
+	for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
+	    fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
+		nvlist_t *fslist, *parent_nv;
+		char *fsname;
+		zfs_handle_t *zhp;
+		int err;
+		uint64_t origin_guid = 0;
+		uint64_t parent_guid = 0;
+
+		VERIFY(nvpair_value_nvlist(fspair, &fslist) == 0);
+		if (nvlist_lookup_boolean(fslist, "sent") == 0)
+			continue;
+
+		VERIFY(nvlist_lookup_string(fslist, "name", &fsname) == 0);
+		(void) nvlist_lookup_uint64(fslist, "origin", &origin_guid);
+		(void) nvlist_lookup_uint64(fslist, "parentfromsnap",
+		    &parent_guid);
+
+		if (parent_guid != 0) {
+			parent_nv = fsavl_find(sdd->fsavl, parent_guid, NULL);
+			if (!nvlist_exists(parent_nv, "sent")) {
+				/* parent has not been sent; skip this one */
+				needagain = B_TRUE;
+				continue;
+			}
+		}
+
+		if (origin_guid != 0) {
+			nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
+			    origin_guid, NULL);
+			if (origin_nv != NULL &&
+			    !nvlist_exists(origin_nv, "sent")) {
+				/*
+				 * origin has not been sent yet;
+				 * skip this clone.
+				 */
+				needagain = B_TRUE;
+				continue;
+			}
+		}
+
+		zhp = zfs_open(rzhp->zfs_hdl, fsname, ZFS_TYPE_DATASET);
+		if (zhp == NULL)
+			return (-1);
+		err = dump_filesystem(zhp, sdd);
+		VERIFY(nvlist_add_boolean(fslist, "sent") == 0);
+		progress = B_TRUE;
+		zfs_close(zhp);
+		if (err)
+			return (err);
+	}
+	if (needagain) {
+		assert(progress);
+		goto again;
+	}
+
+	/* clean out the sent flags in case we reuse this fss */
+	for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
+	    fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
+		nvlist_t *fslist;
+
+		VERIFY(nvpair_value_nvlist(fspair, &fslist) == 0);
+		(void) nvlist_remove_all(fslist, "sent");
+	}
+
+	return (0);
+}
+
+/*
+ * Generate a send stream for the dataset identified by the argument zhp.
+ *
+ * The content of the send stream is the snapshot identified by
+ * 'tosnap'.  Incremental streams are requested in two ways:
+ *     - from the snapshot identified by "fromsnap" (if non-null) or
+ *     - from the origin of the dataset identified by zhp, which must
+ *	 be a clone.  In this case, "fromsnap" is null and "fromorigin"
+ *	 is TRUE.
+ *
+ * The send stream is recursive (i.e. dumps a hierarchy of snapshots) and
+ * uses a special header (with a hdrtype field of DMU_COMPOUNDSTREAM)
+ * if "replicate" is set.  If "doall" is set, dump all the intermediate
+ * snapshots. The DMU_COMPOUNDSTREAM header is used in the "doall"
+ * case too. If "props" is set, send properties.
+ */
+int
+zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
+    sendflags_t *flags, int outfd, snapfilter_cb_t filter_func,
+    void *cb_arg, nvlist_t **debugnvp)
+{
+	char errbuf[1024];
+	send_dump_data_t sdd = { 0 };
+	int err = 0;
+	nvlist_t *fss = NULL;
+	avl_tree_t *fsavl = NULL;
+	static uint64_t holdseq;
+	int spa_version;
+	pthread_t tid;
+	int pipefd[2];
+	dedup_arg_t dda = { 0 };
+	int featureflags = 0;
+
+	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+	    "cannot send '%s'"), zhp->zfs_name);
+
+	if (fromsnap && fromsnap[0] == '\0') {
+		zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
+		    "zero-length incremental source"));
+		return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
+	}
+
+	if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM) {
+		uint64_t version;
+		version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
+		if (version >= ZPL_VERSION_SA) {
+			featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
+		}
+	}
+
+	if (flags->dedup && !flags->dryrun) {
+		featureflags |= (DMU_BACKUP_FEATURE_DEDUP |
+		    DMU_BACKUP_FEATURE_DEDUPPROPS);
+		if (err = pipe(pipefd)) {
+			zfs_error_aux(zhp->zfs_hdl, strerror(errno));
+			return (zfs_error(zhp->zfs_hdl, EZFS_PIPEFAILED,
+			    errbuf));
+		}
+		dda.outputfd = outfd;
+		dda.inputfd = pipefd[1];
+		dda.dedup_hdl = zhp->zfs_hdl;
+		if (err = pthread_create(&tid, NULL, cksummer, &dda)) {
+			(void) close(pipefd[0]);
+			(void) close(pipefd[1]);
+			zfs_error_aux(zhp->zfs_hdl, strerror(errno));
+			return (zfs_error(zhp->zfs_hdl,
+			    EZFS_THREADCREATEFAILED, errbuf));
+		}
+	}
+
+	if (flags->replicate || flags->doall || flags->props) {
+		dmu_replay_record_t drr = { 0 };
+		char *packbuf = NULL;
+		size_t buflen = 0;
+		zio_cksum_t zc = { 0 };
+
+		if (flags->replicate || flags->props) {
+			nvlist_t *hdrnv;
+
+			VERIFY(0 == nvlist_alloc(&hdrnv, NV_UNIQUE_NAME, 0));
+			if (fromsnap) {
+				VERIFY(0 == nvlist_add_string(hdrnv,
+				    "fromsnap", fromsnap));
+			}
+			VERIFY(0 == nvlist_add_string(hdrnv, "tosnap", tosnap));
+			if (!flags->replicate) {
+				VERIFY(0 == nvlist_add_boolean(hdrnv,
+				    "not_recursive"));
+			}
+
+			err = gather_nvlist(zhp->zfs_hdl, zhp->zfs_name,
+			    fromsnap, tosnap, flags->replicate, &fss, &fsavl);
+			if (err)
+				goto err_out;
+			VERIFY(0 == nvlist_add_nvlist(hdrnv, "fss", fss));
+			err = nvlist_pack(hdrnv, &packbuf, &buflen,
+			    NV_ENCODE_XDR, 0);
+			if (debugnvp)
+				*debugnvp = hdrnv;
+			else
+				nvlist_free(hdrnv);
+			if (err) {
+				fsavl_destroy(fsavl);
+				nvlist_free(fss);
+				goto stderr_out;
+			}
+		}
+
+		if (!flags->dryrun) {
+			/* write first begin record */
+			drr.drr_type = DRR_BEGIN;
+			drr.drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
+			DMU_SET_STREAM_HDRTYPE(drr.drr_u.drr_begin.
+			    drr_versioninfo, DMU_COMPOUNDSTREAM);
+			DMU_SET_FEATUREFLAGS(drr.drr_u.drr_begin.
+			    drr_versioninfo, featureflags);
+			(void) snprintf(drr.drr_u.drr_begin.drr_toname,
+			    sizeof (drr.drr_u.drr_begin.drr_toname),
+			    "%s@%s", zhp->zfs_name, tosnap);
+			drr.drr_payloadlen = buflen;
+			err = cksum_and_write(&drr, sizeof (drr), &zc, outfd);
+
+			/* write header nvlist */
+			if (err != -1 && packbuf != NULL) {
+				err = cksum_and_write(packbuf, buflen, &zc,
+				    outfd);
+			}
+			free(packbuf);
+			if (err == -1) {
+				fsavl_destroy(fsavl);
+				nvlist_free(fss);
+				err = errno;
+				goto stderr_out;
+			}
+
+			/* write end record */
+			bzero(&drr, sizeof (drr));
+			drr.drr_type = DRR_END;
+			drr.drr_u.drr_end.drr_checksum = zc;
+			err = write(outfd, &drr, sizeof (drr));
+			if (err == -1) {
+				fsavl_destroy(fsavl);
+				nvlist_free(fss);
+				err = errno;
+				goto stderr_out;
+			}
+
+			err = 0;
+		}
+	}
+
+	/* dump each stream */
+	sdd.fromsnap = fromsnap;
+	sdd.tosnap = tosnap;
+	if (flags->dedup)
+		sdd.outfd = pipefd[0];
+	else
+		sdd.outfd = outfd;
+	sdd.replicate = flags->replicate;
+	sdd.doall = flags->doall;
+	sdd.fromorigin = flags->fromorigin;
+	sdd.fss = fss;
+	sdd.fsavl = fsavl;
+	sdd.verbose = flags->verbose;
+	sdd.parsable = flags->parsable;
+	sdd.progress = flags->progress;
+	sdd.dryrun = flags->dryrun;
+	sdd.filter_cb = filter_func;
+	sdd.filter_cb_arg = cb_arg;
+	if (debugnvp)
+		sdd.debugnv = *debugnvp;
+
+	/*
+	 * Some flags require that we place user holds on the datasets that are
+	 * being sent so they don't get destroyed during the send. We can skip
+	 * this step if the pool is imported read-only since the datasets cannot
+	 * be destroyed.
+	 */
+	if (!flags->dryrun && !zpool_get_prop_int(zfs_get_pool_handle(zhp),
+	    ZPOOL_PROP_READONLY, NULL) &&
+	    zfs_spa_version(zhp, &spa_version) == 0 &&
+	    spa_version >= SPA_VERSION_USERREFS &&
+	    (flags->doall || flags->replicate)) {
+		++holdseq;
+		(void) snprintf(sdd.holdtag, sizeof (sdd.holdtag),
+		    ".send-%d-%llu", getpid(), (u_longlong_t)holdseq);
+		sdd.cleanup_fd = open(ZFS_DEV, O_RDWR|O_EXCL);
+		if (sdd.cleanup_fd < 0) {
+			err = errno;
+			goto stderr_out;
+		}
+	} else {
+		sdd.cleanup_fd = -1;
+	}
+	if (flags->verbose) {
+		/*
+		 * Do a verbose no-op dry run to get all the verbose output
+		 * before generating any data.  Then do a non-verbose real
+		 * run to generate the streams.
+		 */
+		sdd.dryrun = B_TRUE;
+		err = dump_filesystems(zhp, &sdd);
+		sdd.dryrun = flags->dryrun;
+		sdd.verbose = B_FALSE;
+		if (flags->parsable) {
+			(void) fprintf(stderr, "size\t%llu\n",
+			    (longlong_t)sdd.size);
+		} else {
+			char buf[16];
+			zfs_nicenum(sdd.size, buf, sizeof (buf));
+			(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
+			    "total estimated size is %s\n"), buf);
+		}
+	}
+	err = dump_filesystems(zhp, &sdd);
+	fsavl_destroy(fsavl);
+	nvlist_free(fss);
+
+	if (flags->dedup) {
+		(void) close(pipefd[0]);
+		(void) pthread_join(tid, NULL);
+	}
+
+	if (sdd.cleanup_fd != -1) {
+		VERIFY(0 == close(sdd.cleanup_fd));
+		sdd.cleanup_fd = -1;
+	}
+
+	if (!flags->dryrun && (flags->replicate || flags->doall ||
+	    flags->props)) {
+		/*
+		 * write final end record.  NB: want to do this even if
+		 * there was some error, because it might not be totally
+		 * failed.
+		 */
+		dmu_replay_record_t drr = { 0 };
+		drr.drr_type = DRR_END;
+		if (write(outfd, &drr, sizeof (drr)) == -1) {
+			return (zfs_standard_error(zhp->zfs_hdl,
+			    errno, errbuf));
+		}
+	}
+
+	return (err || sdd.err);
+
+stderr_out:
+	err = zfs_standard_error(zhp->zfs_hdl, err, errbuf);
+err_out:
+	if (sdd.cleanup_fd != -1)
+		VERIFY(0 == close(sdd.cleanup_fd));
+	if (flags->dedup) {
+		(void) pthread_cancel(tid);
+		(void) pthread_join(tid, NULL);
+		(void) close(pipefd[0]);
+	}
+	return (err);
+}
+
+/*
+ * Routines specific to "zfs recv"
+ */
+
+static int
+recv_read(libzfs_handle_t *hdl, int fd, void *buf, int ilen,
+    boolean_t byteswap, zio_cksum_t *zc)
+{
+	char *cp = buf;
+	int rv;
+	int len = ilen;
+
+	do {
+		rv = read(fd, cp, len);
+		cp += rv;
+		len -= rv;
+	} while (rv > 0);
+
+	if (rv < 0 || len != 0) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "failed to read from stream"));
+		return (zfs_error(hdl, EZFS_BADSTREAM, dgettext(TEXT_DOMAIN,
+		    "cannot receive")));
+	}
+
+	if (zc) {
+		if (byteswap)
+			fletcher_4_incremental_byteswap(buf, ilen, zc);
+		else
+			fletcher_4_incremental_native(buf, ilen, zc);
+	}
+	return (0);
+}
+
+static int
+recv_read_nvlist(libzfs_handle_t *hdl, int fd, int len, nvlist_t **nvp,
+    boolean_t byteswap, zio_cksum_t *zc)
+{
+	char *buf;
+	int err;
+
+	buf = zfs_alloc(hdl, len);
+	if (buf == NULL)
+		return (ENOMEM);
+
+	err = recv_read(hdl, fd, buf, len, byteswap, zc);
+	if (err != 0) {
+		free(buf);
+		return (err);
+	}
+
+	err = nvlist_unpack(buf, len, nvp, 0);
+	free(buf);
+	if (err != 0) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
+		    "stream (malformed nvlist)"));
+		return (EINVAL);
+	}
+	return (0);
+}
+
+static int
+recv_rename(libzfs_handle_t *hdl, const char *name, const char *tryname,
+    int baselen, char *newname, recvflags_t *flags)
+{
+	static int seq;
+	zfs_cmd_t zc = { 0 };
+	int err;
+	prop_changelist_t *clp;
+	zfs_handle_t *zhp;
+
+	zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
+	if (zhp == NULL)
+		return (-1);
+	clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
+	    flags->force ? MS_FORCE : 0);
+	zfs_close(zhp);
+	if (clp == NULL)
+		return (-1);
+	err = changelist_prefix(clp);
+	if (err)
+		return (err);
+
+	zc.zc_objset_type = DMU_OST_ZFS;
+	(void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
+
+	if (tryname) {
+		(void) strcpy(newname, tryname);
+
+		(void) strlcpy(zc.zc_value, tryname, sizeof (zc.zc_value));
+
+		if (flags->verbose) {
+			(void) printf("attempting rename %s to %s\n",
+			    zc.zc_name, zc.zc_value);
+		}
+		err = ioctl(hdl->libzfs_fd, ZFS_IOC_RENAME, &zc);
+		if (err == 0)
+			changelist_rename(clp, name, tryname);
+	} else {
+		err = ENOENT;
+	}
+
+	if (err != 0 && strncmp(name+baselen, "recv-", 5) != 0) {
+		seq++;
+
+		(void) strncpy(newname, name, baselen);
+		(void) snprintf(newname+baselen, ZFS_MAXNAMELEN-baselen,
+		    "recv-%u-%u", getpid(), seq);
+		(void) strlcpy(zc.zc_value, newname, sizeof (zc.zc_value));
+
+		if (flags->verbose) {
+			(void) printf("failed - trying rename %s to %s\n",
+			    zc.zc_name, zc.zc_value);
+		}
+		err = ioctl(hdl->libzfs_fd, ZFS_IOC_RENAME, &zc);
+		if (err == 0)
+			changelist_rename(clp, name, newname);
+		if (err && flags->verbose) {
+			(void) printf("failed (%u) - "
+			    "will try again on next pass\n", errno);
+		}
+		err = EAGAIN;
+	} else if (flags->verbose) {
+		if (err == 0)
+			(void) printf("success\n");
+		else
+			(void) printf("failed (%u)\n", errno);
+	}
+
+	(void) changelist_postfix(clp);
+	changelist_free(clp);
+
+	return (err);
+}
+
+static int
+recv_destroy(libzfs_handle_t *hdl, const char *name, int baselen,
+    char *newname, recvflags_t *flags)
+{
+	zfs_cmd_t zc = { 0 };
+	int err = 0;
+	prop_changelist_t *clp;
+	zfs_handle_t *zhp;
+	boolean_t defer = B_FALSE;
+	int spa_version;
+
+	zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
+	if (zhp == NULL)
+		return (-1);
+	clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
+	    flags->force ? MS_FORCE : 0);
+	if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT &&
+	    zfs_spa_version(zhp, &spa_version) == 0 &&
+	    spa_version >= SPA_VERSION_USERREFS)
+		defer = B_TRUE;
+	zfs_close(zhp);
+	if (clp == NULL)
+		return (-1);
+	err = changelist_prefix(clp);
+	if (err)
+		return (err);
+
+	zc.zc_objset_type = DMU_OST_ZFS;
+	zc.zc_defer_destroy = defer;
+	(void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
+
+	if (flags->verbose)
+		(void) printf("attempting destroy %s\n", zc.zc_name);
+	err = ioctl(hdl->libzfs_fd, ZFS_IOC_DESTROY, &zc);
+	if (err == 0) {
+		if (flags->verbose)
+			(void) printf("success\n");
+		changelist_remove(clp, zc.zc_name);
+	}
+
+	(void) changelist_postfix(clp);
+	changelist_free(clp);
+
+	/*
+	 * Deferred destroy might destroy the snapshot or only mark it to be
+	 * destroyed later, and it returns success in either case.
+	 */
+	if (err != 0 || (defer && zfs_dataset_exists(hdl, name,
+	    ZFS_TYPE_SNAPSHOT))) {
+		err = recv_rename(hdl, name, NULL, baselen, newname, flags);
+	}
+
+	return (err);
+}
+
+typedef struct guid_to_name_data {
+	uint64_t guid;
+	char *name;
+	char *skip;
+} guid_to_name_data_t;
+
+static int
+guid_to_name_cb(zfs_handle_t *zhp, void *arg)
+{
+	guid_to_name_data_t *gtnd = arg;
+	int err;
+
+	if (gtnd->skip != NULL &&
+	    strcmp(zhp->zfs_name, gtnd->skip) == 0) {
+		return (0);
+	}
+
+	if (zhp->zfs_dmustats.dds_guid == gtnd->guid) {
+		(void) strcpy(gtnd->name, zhp->zfs_name);
+		zfs_close(zhp);
+		return (EEXIST);
+	}
+
+	err = zfs_iter_children(zhp, guid_to_name_cb, gtnd);
+	zfs_close(zhp);
+	return (err);
+}
+
+/*
+ * Attempt to find the local dataset associated with this guid.  In the case of
+ * multiple matches, we attempt to find the "best" match by searching
+ * progressively larger portions of the hierarchy.  This allows one to send a
+ * tree of datasets individually and guarantee that we will find the source
+ * guid within that hierarchy, even if there are multiple matches elsewhere.
+ */
+static int
+guid_to_name(libzfs_handle_t *hdl, const char *parent, uint64_t guid,
+    char *name)
+{
+	/* exhaustive search all local snapshots */
+	char pname[ZFS_MAXNAMELEN];
+	guid_to_name_data_t gtnd;
+	int err = 0;
+	zfs_handle_t *zhp;
+	char *cp;
+
+	gtnd.guid = guid;
+	gtnd.name = name;
+	gtnd.skip = NULL;
+
+	(void) strlcpy(pname, parent, sizeof (pname));
+
+	/*
+	 * Search progressively larger portions of the hierarchy.  This will
+	 * select the "most local" version of the origin snapshot in the case
+	 * that there are multiple matching snapshots in the system.
+	 */
+	while ((cp = strrchr(pname, '/')) != NULL) {
+
+		/* Chop off the last component and open the parent */
+		*cp = '\0';
+		zhp = make_dataset_handle(hdl, pname);
+
+		if (zhp == NULL)
+			continue;
+
+		err = zfs_iter_children(zhp, guid_to_name_cb, &gtnd);
+		zfs_close(zhp);
+		if (err == EEXIST)
+			return (0);
+
+		/*
+		 * Remember the dataset that we already searched, so we
+		 * skip it next time through.
+		 */
+		gtnd.skip = pname;
+	}
+
+	return (ENOENT);
+}
+
+/*
+ * Return +1 if guid1 is before guid2, 0 if they are the same, and -1 if
+ * guid1 is after guid2.
+ */
+static int
+created_before(libzfs_handle_t *hdl, avl_tree_t *avl,
+    uint64_t guid1, uint64_t guid2)
+{
+	nvlist_t *nvfs;
+	char *fsname, *snapname;
+	char buf[ZFS_MAXNAMELEN];
+	int rv;
+	zfs_handle_t *guid1hdl, *guid2hdl;
+	uint64_t create1, create2;
+
+	if (guid2 == 0)
+		return (0);
+	if (guid1 == 0)
+		return (1);
+
+	nvfs = fsavl_find(avl, guid1, &snapname);
+	VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname));
+	(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
+	guid1hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
+	if (guid1hdl == NULL)
+		return (-1);
+
+	nvfs = fsavl_find(avl, guid2, &snapname);
+	VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname));
+	(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
+	guid2hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
+	if (guid2hdl == NULL) {
+		zfs_close(guid1hdl);
+		return (-1);
+	}
+
+	create1 = zfs_prop_get_int(guid1hdl, ZFS_PROP_CREATETXG);
+	create2 = zfs_prop_get_int(guid2hdl, ZFS_PROP_CREATETXG);
+
+	if (create1 < create2)
+		rv = -1;
+	else if (create1 > create2)
+		rv = +1;
+	else
+		rv = 0;
+
+	zfs_close(guid1hdl);
+	zfs_close(guid2hdl);
+
+	return (rv);
+}
+
+static int
+recv_incremental_replication(libzfs_handle_t *hdl, const char *tofs,
+    recvflags_t *flags, nvlist_t *stream_nv, avl_tree_t *stream_avl,
+    nvlist_t *renamed)
+{
+	nvlist_t *local_nv, *deleted = NULL;
+	avl_tree_t *local_avl;
+	nvpair_t *fselem, *nextfselem;
+	char *fromsnap;
+	char newname[ZFS_MAXNAMELEN];
+	char guidname[32];
+	int error;
+	boolean_t needagain, progress, recursive;
+	char *s1, *s2;
+
+	VERIFY(0 == nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap));
+
+	recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
+	    ENOENT);
+
+	if (flags->dryrun)
+		return (0);
+
+again:
+	needagain = progress = B_FALSE;
+
+	VERIFY(0 == nvlist_alloc(&deleted, NV_UNIQUE_NAME, 0));
+
+	if ((error = gather_nvlist(hdl, tofs, fromsnap, NULL,
+	    recursive, &local_nv, &local_avl)) != 0)
+		return (error);
+
+	/*
+	 * Process deletes and renames
+	 */
+	for (fselem = nvlist_next_nvpair(local_nv, NULL);
+	    fselem; fselem = nextfselem) {
+		nvlist_t *nvfs, *snaps;
+		nvlist_t *stream_nvfs = NULL;
+		nvpair_t *snapelem, *nextsnapelem;
+		uint64_t fromguid = 0;
+		uint64_t originguid = 0;
+		uint64_t stream_originguid = 0;
+		uint64_t parent_fromsnap_guid, stream_parent_fromsnap_guid;
+		char *fsname, *stream_fsname;
+
+		nextfselem = nvlist_next_nvpair(local_nv, fselem);
+
+		VERIFY(0 == nvpair_value_nvlist(fselem, &nvfs));
+		VERIFY(0 == nvlist_lookup_nvlist(nvfs, "snaps", &snaps));
+		VERIFY(0 == nvlist_lookup_string(nvfs, "name", &fsname));
+		VERIFY(0 == nvlist_lookup_uint64(nvfs, "parentfromsnap",
+		    &parent_fromsnap_guid));
+		(void) nvlist_lookup_uint64(nvfs, "origin", &originguid);
+
+		/*
+		 * First find the stream's fs, so we can check for
+		 * a different origin (due to "zfs promote")
+		 */
+		for (snapelem = nvlist_next_nvpair(snaps, NULL);
+		    snapelem; snapelem = nvlist_next_nvpair(snaps, snapelem)) {
+			uint64_t thisguid;
+
+			VERIFY(0 == nvpair_value_uint64(snapelem, &thisguid));
+			stream_nvfs = fsavl_find(stream_avl, thisguid, NULL);
+
+			if (stream_nvfs != NULL)
+				break;
+		}
+
+		/* check for promote */
+		(void) nvlist_lookup_uint64(stream_nvfs, "origin",
+		    &stream_originguid);
+		if (stream_nvfs && originguid != stream_originguid) {
+			switch (created_before(hdl, local_avl,
+			    stream_originguid, originguid)) {
+			case 1: {
+				/* promote it! */
+				zfs_cmd_t zc = { 0 };
+				nvlist_t *origin_nvfs;
+				char *origin_fsname;
+
+				if (flags->verbose)
+					(void) printf("promoting %s\n", fsname);
+
+				origin_nvfs = fsavl_find(local_avl, originguid,
+				    NULL);
+				VERIFY(0 == nvlist_lookup_string(origin_nvfs,
+				    "name", &origin_fsname));
+				(void) strlcpy(zc.zc_value, origin_fsname,
+				    sizeof (zc.zc_value));
+				(void) strlcpy(zc.zc_name, fsname,
+				    sizeof (zc.zc_name));
+				error = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
+				if (error == 0)
+					progress = B_TRUE;
+				break;
+			}
+			default:
+				break;
+			case -1:
+				fsavl_destroy(local_avl);
+				nvlist_free(local_nv);
+				return (-1);
+			}
+			/*
+			 * We had/have the wrong origin, therefore our
+			 * list of snapshots is wrong.  Need to handle
+			 * them on the next pass.
+			 */
+			needagain = B_TRUE;
+			continue;
+		}
+
+		for (snapelem = nvlist_next_nvpair(snaps, NULL);
+		    snapelem; snapelem = nextsnapelem) {
+			uint64_t thisguid;
+			char *stream_snapname;
+			nvlist_t *found, *props;
+
+			nextsnapelem = nvlist_next_nvpair(snaps, snapelem);
+
+			VERIFY(0 == nvpair_value_uint64(snapelem, &thisguid));
+			found = fsavl_find(stream_avl, thisguid,
+			    &stream_snapname);
+
+			/* check for delete */
+			if (found == NULL) {
+				char name[ZFS_MAXNAMELEN];
+
+				if (!flags->force)
+					continue;
+
+				(void) snprintf(name, sizeof (name), "%s@%s",
+				    fsname, nvpair_name(snapelem));
+
+				error = recv_destroy(hdl, name,
+				    strlen(fsname)+1, newname, flags);
+				if (error)
+					needagain = B_TRUE;
+				else
+					progress = B_TRUE;
+				sprintf(guidname, "%lu", thisguid);
+				nvlist_add_boolean(deleted, guidname);
+				continue;
+			}
+
+			stream_nvfs = found;
+
+			if (0 == nvlist_lookup_nvlist(stream_nvfs, "snapprops",
+			    &props) && 0 == nvlist_lookup_nvlist(props,
+			    stream_snapname, &props)) {
+				zfs_cmd_t zc = { 0 };
+
+				zc.zc_cookie = B_TRUE; /* received */
+				(void) snprintf(zc.zc_name, sizeof (zc.zc_name),
+				    "%s@%s", fsname, nvpair_name(snapelem));
+				if (zcmd_write_src_nvlist(hdl, &zc,
+				    props) == 0) {
+					(void) zfs_ioctl(hdl,
+					    ZFS_IOC_SET_PROP, &zc);
+					zcmd_free_nvlists(&zc);
+				}
+			}
+
+			/* check for different snapname */
+			if (strcmp(nvpair_name(snapelem),
+			    stream_snapname) != 0) {
+				char name[ZFS_MAXNAMELEN];
+				char tryname[ZFS_MAXNAMELEN];
+
+				(void) snprintf(name, sizeof (name), "%s@%s",
+				    fsname, nvpair_name(snapelem));
+				(void) snprintf(tryname, sizeof (name), "%s@%s",
+				    fsname, stream_snapname);
+
+				error = recv_rename(hdl, name, tryname,
+				    strlen(fsname)+1, newname, flags);
+				if (error)
+					needagain = B_TRUE;
+				else
+					progress = B_TRUE;
+			}
+
+			if (strcmp(stream_snapname, fromsnap) == 0)
+				fromguid = thisguid;
+		}
+
+		/* check for delete */
+		if (stream_nvfs == NULL) {
+			if (!flags->force)
+				continue;
+
+			error = recv_destroy(hdl, fsname, strlen(tofs)+1,
+			    newname, flags);
+			if (error)
+				needagain = B_TRUE;
+			else
+				progress = B_TRUE;
+			sprintf(guidname, "%lu", parent_fromsnap_guid);
+			nvlist_add_boolean(deleted, guidname);
+			continue;
+		}
+
+		if (fromguid == 0) {
+			if (flags->verbose) {
+				(void) printf("local fs %s does not have "
+				    "fromsnap (%s in stream); must have "
+				    "been deleted locally; ignoring\n",
+				    fsname, fromsnap);
+			}
+			continue;
+		}
+
+		VERIFY(0 == nvlist_lookup_string(stream_nvfs,
+		    "name", &stream_fsname));
+		VERIFY(0 == nvlist_lookup_uint64(stream_nvfs,
+		    "parentfromsnap", &stream_parent_fromsnap_guid));
+
+		s1 = strrchr(fsname, '/');
+		s2 = strrchr(stream_fsname, '/');
+
+		/*
+		 * Check if we're going to rename based on parent guid change
+		 * and the current parent guid was also deleted. If it was then
+		 * rename will fail and is likely unneeded, so avoid this and
+		 * force an early retry to determine the new
+		 * parent_fromsnap_guid.
+		 */
+		if (stream_parent_fromsnap_guid != 0 &&
+                    parent_fromsnap_guid != 0 &&
+                    stream_parent_fromsnap_guid != parent_fromsnap_guid) {
+			sprintf(guidname, "%lu", parent_fromsnap_guid);
+			if (nvlist_exists(deleted, guidname)) {
+				progress = B_TRUE;
+				needagain = B_TRUE;
+				goto doagain;
+			}
+		}
+
+		/*
+		 * Check for rename. If the exact receive path is specified, it
+		 * does not count as a rename, but we still need to check the
+		 * datasets beneath it.
+		 */
+		if ((stream_parent_fromsnap_guid != 0 &&
+		    parent_fromsnap_guid != 0 &&
+		    stream_parent_fromsnap_guid != parent_fromsnap_guid) ||
+		    ((flags->isprefix || strcmp(tofs, fsname) != 0) &&
+		    (s1 != NULL) && (s2 != NULL) && strcmp(s1, s2) != 0)) {
+			nvlist_t *parent;
+			char tryname[ZFS_MAXNAMELEN];
+
+			parent = fsavl_find(local_avl,
+			    stream_parent_fromsnap_guid, NULL);
+			/*
+			 * NB: parent might not be found if we used the
+			 * tosnap for stream_parent_fromsnap_guid,
+			 * because the parent is a newly-created fs;
+			 * we'll be able to rename it after we recv the
+			 * new fs.
+			 */
+			if (parent != NULL) {
+				char *pname;
+
+				VERIFY(0 == nvlist_lookup_string(parent, "name",
+				    &pname));
+				(void) snprintf(tryname, sizeof (tryname),
+				    "%s%s", pname, strrchr(stream_fsname, '/'));
+			} else {
+				tryname[0] = '\0';
+				if (flags->verbose) {
+					(void) printf("local fs %s new parent "
+					    "not found\n", fsname);
+				}
+			}
+
+			newname[0] = '\0';
+
+			error = recv_rename(hdl, fsname, tryname,
+			    strlen(tofs)+1, newname, flags);
+
+			if (renamed != NULL && newname[0] != '\0') {
+				VERIFY(0 == nvlist_add_boolean(renamed,
+				    newname));
+			}
+
+			if (error)
+				needagain = B_TRUE;
+			else
+				progress = B_TRUE;
+		}
+	}
+
+doagain:
+	fsavl_destroy(local_avl);
+	nvlist_free(local_nv);
+	nvlist_free(deleted);
+
+	if (needagain && progress) {
+		/* do another pass to fix up temporary names */
+		if (flags->verbose)
+			(void) printf("another pass:\n");
+		goto again;
+	}
+
+	return (needagain);
+}
+
+static int
+zfs_receive_package(libzfs_handle_t *hdl, int fd, const char *destname,
+    recvflags_t *flags, dmu_replay_record_t *drr, zio_cksum_t *zc,
+    char **top_zfs, int cleanup_fd, uint64_t *action_handlep)
+{
+	nvlist_t *stream_nv = NULL;
+	avl_tree_t *stream_avl = NULL;
+	char *fromsnap = NULL;
+	char *cp;
+	char tofs[ZFS_MAXNAMELEN];
+	char sendfs[ZFS_MAXNAMELEN];
+	char errbuf[1024];
+	dmu_replay_record_t drre;
+	int error;
+	boolean_t anyerr = B_FALSE;
+	boolean_t softerr = B_FALSE;
+	boolean_t recursive;
+
+	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+	    "cannot receive"));
+
+	assert(drr->drr_type == DRR_BEGIN);
+	assert(drr->drr_u.drr_begin.drr_magic == DMU_BACKUP_MAGIC);
+	assert(DMU_GET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo) ==
+	    DMU_COMPOUNDSTREAM);
+
+	/*
+	 * Read in the nvlist from the stream.
+	 */
+	if (drr->drr_payloadlen != 0) {
+		error = recv_read_nvlist(hdl, fd, drr->drr_payloadlen,
+		    &stream_nv, flags->byteswap, zc);
+		if (error) {
+			error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
+			goto out;
+		}
+	}
+
+	recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
+	    ENOENT);
+
+	if (recursive && strchr(destname, '@')) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "cannot specify snapshot name for multi-snapshot stream"));
+		error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
+		goto out;
+	}
+
+	/*
+	 * Read in the end record and verify checksum.
+	 */
+	if (0 != (error = recv_read(hdl, fd, &drre, sizeof (drre),
+	    flags->byteswap, NULL)))
+		goto out;
+	if (flags->byteswap) {
+		drre.drr_type = BSWAP_32(drre.drr_type);
+		drre.drr_u.drr_end.drr_checksum.zc_word[0] =
+		    BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[0]);
+		drre.drr_u.drr_end.drr_checksum.zc_word[1] =
+		    BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[1]);
+		drre.drr_u.drr_end.drr_checksum.zc_word[2] =
+		    BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[2]);
+		drre.drr_u.drr_end.drr_checksum.zc_word[3] =
+		    BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[3]);
+	}
+	if (drre.drr_type != DRR_END) {
+		error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
+		goto out;
+	}
+	if (!ZIO_CHECKSUM_EQUAL(drre.drr_u.drr_end.drr_checksum, *zc)) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "incorrect header checksum"));
+		error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
+		goto out;
+	}
+
+	(void) nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap);
+
+	if (drr->drr_payloadlen != 0) {
+		nvlist_t *stream_fss;
+
+		VERIFY(0 == nvlist_lookup_nvlist(stream_nv, "fss",
+		    &stream_fss));
+		if ((stream_avl = fsavl_create(stream_fss)) == NULL) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "couldn't allocate avl tree"));
+			error = zfs_error(hdl, EZFS_NOMEM, errbuf);
+			goto out;
+		}
+
+		if (fromsnap != NULL) {
+			nvlist_t *renamed = NULL;
+			nvpair_t *pair = NULL;
+
+			(void) strlcpy(tofs, destname, ZFS_MAXNAMELEN);
+			if (flags->isprefix) {
+				struct drr_begin *drrb = &drr->drr_u.drr_begin;
+				int i;
+
+				if (flags->istail) {
+					cp = strrchr(drrb->drr_toname, '/');
+					if (cp == NULL) {
+						(void) strlcat(tofs, "/",
+						    ZFS_MAXNAMELEN);
+						i = 0;
+					} else {
+						i = (cp - drrb->drr_toname);
+					}
+				} else {
+					i = strcspn(drrb->drr_toname, "/@");
+				}
+				/* zfs_receive_one() will create_parents() */
+				(void) strlcat(tofs, &drrb->drr_toname[i],
+				    ZFS_MAXNAMELEN);
+				*strchr(tofs, '@') = '\0';
+			}
+
+			if (recursive && !flags->dryrun && !flags->nomount) {
+				VERIFY(0 == nvlist_alloc(&renamed,
+				    NV_UNIQUE_NAME, 0));
+			}
+
+			softerr = recv_incremental_replication(hdl, tofs, flags,
+			    stream_nv, stream_avl, renamed);
+
+			/* Unmount renamed filesystems before receiving. */
+			while ((pair = nvlist_next_nvpair(renamed,
+			    pair)) != NULL) {
+				zfs_handle_t *zhp;
+				prop_changelist_t *clp = NULL;
+
+				zhp = zfs_open(hdl, nvpair_name(pair),
+				    ZFS_TYPE_FILESYSTEM);
+				if (zhp != NULL) {
+					clp = changelist_gather(zhp,
+					    ZFS_PROP_MOUNTPOINT, 0, 0);
+					zfs_close(zhp);
+					if (clp != NULL) {
+						softerr |=
+						    changelist_prefix(clp);
+						changelist_free(clp);
+					}
+				}
+			}
+
+			nvlist_free(renamed);
+		}
+	}
+
+	/*
+	 * Get the fs specified by the first path in the stream (the top level
+	 * specified by 'zfs send') and pass it to each invocation of
+	 * zfs_receive_one().
+	 */
+	(void) strlcpy(sendfs, drr->drr_u.drr_begin.drr_toname,
+	    ZFS_MAXNAMELEN);
+	if ((cp = strchr(sendfs, '@')) != NULL)
+		*cp = '\0';
+
+	/* Finally, receive each contained stream */
+	do {
+		/*
+		 * we should figure out if it has a recoverable
+		 * error, in which case do a recv_skip() and drive on.
+		 * Note, if we fail due to already having this guid,
+		 * zfs_receive_one() will take care of it (ie,
+		 * recv_skip() and return 0).
+		 */
+		error = zfs_receive_impl(hdl, destname, flags, fd,
+		    sendfs, stream_nv, stream_avl, top_zfs, cleanup_fd,
+		    action_handlep);
+		if (error == ENODATA) {
+			error = 0;
+			break;
+		}
+		anyerr |= error;
+	} while (error == 0);
+
+	if (drr->drr_payloadlen != 0 && fromsnap != NULL) {
+		/*
+		 * Now that we have the fs's they sent us, try the
+		 * renames again.
+		 */
+		softerr = recv_incremental_replication(hdl, tofs, flags,
+		    stream_nv, stream_avl, NULL);
+	}
+
+out:
+	fsavl_destroy(stream_avl);
+	if (stream_nv)
+		nvlist_free(stream_nv);
+	if (softerr)
+		error = -2;
+	if (anyerr)
+		error = -1;
+	return (error);
+}
+
+static void
+trunc_prop_errs(int truncated)
+{
+	ASSERT(truncated != 0);
+
+	if (truncated == 1)
+		(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
+		    "1 more property could not be set\n"));
+	else
+		(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
+		    "%d more properties could not be set\n"), truncated);
+}
+
+static int
+recv_skip(libzfs_handle_t *hdl, int fd, boolean_t byteswap)
+{
+	dmu_replay_record_t *drr;
+	void *buf = malloc(1<<20);
+	char errbuf[1024];
+
+	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+	    "cannot receive:"));
+
+	/* XXX would be great to use lseek if possible... */
+	drr = buf;
+
+	while (recv_read(hdl, fd, drr, sizeof (dmu_replay_record_t),
+	    byteswap, NULL) == 0) {
+		if (byteswap)
+			drr->drr_type = BSWAP_32(drr->drr_type);
+
+		switch (drr->drr_type) {
+		case DRR_BEGIN:
+			/* NB: not to be used on v2 stream packages */
+			if (drr->drr_payloadlen != 0) {
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "invalid substream header"));
+				return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+			}
+			break;
+
+		case DRR_END:
+			free(buf);
+			return (0);
+
+		case DRR_OBJECT:
+			if (byteswap) {
+				drr->drr_u.drr_object.drr_bonuslen =
+				    BSWAP_32(drr->drr_u.drr_object.
+				    drr_bonuslen);
+			}
+			(void) recv_read(hdl, fd, buf,
+			    P2ROUNDUP(drr->drr_u.drr_object.drr_bonuslen, 8),
+			    B_FALSE, NULL);
+			break;
+
+		case DRR_WRITE:
+			if (byteswap) {
+				drr->drr_u.drr_write.drr_length =
+				    BSWAP_64(drr->drr_u.drr_write.drr_length);
+			}
+			(void) recv_read(hdl, fd, buf,
+			    drr->drr_u.drr_write.drr_length, B_FALSE, NULL);
+			break;
+		case DRR_SPILL:
+			if (byteswap) {
+				drr->drr_u.drr_write.drr_length =
+				    BSWAP_64(drr->drr_u.drr_spill.drr_length);
+			}
+			(void) recv_read(hdl, fd, buf,
+			    drr->drr_u.drr_spill.drr_length, B_FALSE, NULL);
+			break;
+		case DRR_WRITE_BYREF:
+		case DRR_FREEOBJECTS:
+		case DRR_FREE:
+			break;
+
+		default:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "invalid record type"));
+			return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+		}
+	}
+
+	free(buf);
+	return (-1);
+}
+
+/*
+ * Restores a backup of tosnap from the file descriptor specified by infd.
+ */
+static int
+zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
+    recvflags_t *flags, dmu_replay_record_t *drr,
+    dmu_replay_record_t *drr_noswap, const char *sendfs,
+    nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs, int cleanup_fd,
+    uint64_t *action_handlep)
+{
+	zfs_cmd_t zc = { 0 };
+	time_t begin_time;
+	int ioctl_err, ioctl_errno, err;
+	char *cp;
+	struct drr_begin *drrb = &drr->drr_u.drr_begin;
+	char errbuf[1024];
+	char prop_errbuf[1024];
+	const char *chopprefix;
+	boolean_t newfs = B_FALSE;
+	boolean_t stream_wantsnewfs;
+	uint64_t parent_snapguid = 0;
+	prop_changelist_t *clp = NULL;
+	nvlist_t *snapprops_nvlist = NULL;
+	zprop_errflags_t prop_errflags;
+	boolean_t recursive;
+
+	begin_time = time(NULL);
+
+	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+	    "cannot receive"));
+
+	recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
+	    ENOENT);
+
+	if (stream_avl != NULL) {
+		char *snapname;
+		nvlist_t *fs = fsavl_find(stream_avl, drrb->drr_toguid,
+		    &snapname);
+		nvlist_t *props;
+		int ret;
+
+		(void) nvlist_lookup_uint64(fs, "parentfromsnap",
+		    &parent_snapguid);
+		err = nvlist_lookup_nvlist(fs, "props", &props);
+		if (err)
+			VERIFY(0 == nvlist_alloc(&props, NV_UNIQUE_NAME, 0));
+
+		if (flags->canmountoff) {
+			VERIFY(0 == nvlist_add_uint64(props,
+			    zfs_prop_to_name(ZFS_PROP_CANMOUNT), 0));
+		}
+		ret = zcmd_write_src_nvlist(hdl, &zc, props);
+		if (err)
+			nvlist_free(props);
+
+		if (0 == nvlist_lookup_nvlist(fs, "snapprops", &props)) {
+			VERIFY(0 == nvlist_lookup_nvlist(props,
+			    snapname, &snapprops_nvlist));
+		}
+
+		if (ret != 0)
+			return (-1);
+	}
+
+	cp = NULL;
+
+	/*
+	 * Determine how much of the snapshot name stored in the stream
+	 * we are going to tack on to the name they specified on the
+	 * command line, and how much we are going to chop off.
+	 *
+	 * If they specified a snapshot, chop the entire name stored in
+	 * the stream.
+	 */
+	if (flags->istail) {
+		/*
+		 * A filesystem was specified with -e. We want to tack on only
+		 * the tail of the sent snapshot path.
+		 */
+		if (strchr(tosnap, '@')) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
+			    "argument - snapshot not allowed with -e"));
+			return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+		}
+
+		chopprefix = strrchr(sendfs, '/');
+
+		if (chopprefix == NULL) {
+			/*
+			 * The tail is the poolname, so we need to
+			 * prepend a path separator.
+			 */
+			int len = strlen(drrb->drr_toname);
+			cp = malloc(len + 2);
+			cp[0] = '/';
+			(void) strcpy(&cp[1], drrb->drr_toname);
+			chopprefix = cp;
+		} else {
+			chopprefix = drrb->drr_toname + (chopprefix - sendfs);
+		}
+	} else if (flags->isprefix) {
+		/*
+		 * A filesystem was specified with -d. We want to tack on
+		 * everything but the first element of the sent snapshot path
+		 * (all but the pool name).
+		 */
+		if (strchr(tosnap, '@')) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
+			    "argument - snapshot not allowed with -d"));
+			return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+		}
+
+		chopprefix = strchr(drrb->drr_toname, '/');
+		if (chopprefix == NULL)
+			chopprefix = strchr(drrb->drr_toname, '@');
+	} else if (strchr(tosnap, '@') == NULL) {
+		/*
+		 * If a filesystem was specified without -d or -e, we want to
+		 * tack on everything after the fs specified by 'zfs send'.
+		 */
+		chopprefix = drrb->drr_toname + strlen(sendfs);
+	} else {
+		/* A snapshot was specified as an exact path (no -d or -e). */
+		if (recursive) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "cannot specify snapshot name for multi-snapshot "
+			    "stream"));
+			return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+		}
+		chopprefix = drrb->drr_toname + strlen(drrb->drr_toname);
+	}
+
+	ASSERT(strstr(drrb->drr_toname, sendfs) == drrb->drr_toname);
+	ASSERT(chopprefix > drrb->drr_toname);
+	ASSERT(chopprefix <= drrb->drr_toname + strlen(drrb->drr_toname));
+	ASSERT(chopprefix[0] == '/' || chopprefix[0] == '@' ||
+	    chopprefix[0] == '\0');
+
+	/*
+	 * Determine name of destination snapshot, store in zc_value.
+	 */
+	(void) strcpy(zc.zc_top_ds, tosnap);
+	(void) strcpy(zc.zc_value, tosnap);
+	(void) strncat(zc.zc_value, chopprefix, sizeof (zc.zc_value));
+	free(cp);
+	if (!zfs_name_valid(zc.zc_value, ZFS_TYPE_SNAPSHOT)) {
+		zcmd_free_nvlists(&zc);
+		return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
+	}
+
+	/*
+	 * Determine the name of the origin snapshot, store in zc_string.
+	 */
+	if (drrb->drr_flags & DRR_FLAG_CLONE) {
+		if (guid_to_name(hdl, zc.zc_value,
+		    drrb->drr_fromguid, zc.zc_string) != 0) {
+			zcmd_free_nvlists(&zc);
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "local origin for clone %s does not exist"),
+			    zc.zc_value);
+			return (zfs_error(hdl, EZFS_NOENT, errbuf));
+		}
+		if (flags->verbose)
+			(void) printf("found clone origin %s\n", zc.zc_string);
+	}
+
+	stream_wantsnewfs = (drrb->drr_fromguid == 0 ||
+	    (drrb->drr_flags & DRR_FLAG_CLONE));
+
+	if (stream_wantsnewfs) {
+		/*
+		 * if the parent fs does not exist, look for it based on
+		 * the parent snap GUID
+		 */
+		(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+		    "cannot receive new filesystem stream"));
+
+		(void) strcpy(zc.zc_name, zc.zc_value);
+		cp = strrchr(zc.zc_name, '/');
+		if (cp)
+			*cp = '\0';
+		if (cp &&
+		    !zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) {
+			char suffix[ZFS_MAXNAMELEN];
+			(void) strcpy(suffix, strrchr(zc.zc_value, '/'));
+			if (guid_to_name(hdl, zc.zc_name, parent_snapguid,
+			    zc.zc_value) == 0) {
+				*strchr(zc.zc_value, '@') = '\0';
+				(void) strcat(zc.zc_value, suffix);
+			}
+		}
+	} else {
+		/*
+		 * if the fs does not exist, look for it based on the
+		 * fromsnap GUID
+		 */
+		(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+		    "cannot receive incremental stream"));
+
+		(void) strcpy(zc.zc_name, zc.zc_value);
+		*strchr(zc.zc_name, '@') = '\0';
+
+		/*
+		 * If the exact receive path was specified and this is the
+		 * topmost path in the stream, then if the fs does not exist we
+		 * should look no further.
+		 */
+		if ((flags->isprefix || (*(chopprefix = drrb->drr_toname +
+		    strlen(sendfs)) != '\0' && *chopprefix != '@')) &&
+		    !zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) {
+			char snap[ZFS_MAXNAMELEN];
+			(void) strcpy(snap, strchr(zc.zc_value, '@'));
+			if (guid_to_name(hdl, zc.zc_name, drrb->drr_fromguid,
+			    zc.zc_value) == 0) {
+				*strchr(zc.zc_value, '@') = '\0';
+				(void) strcat(zc.zc_value, snap);
+			}
+		}
+	}
+
+	(void) strcpy(zc.zc_name, zc.zc_value);
+	*strchr(zc.zc_name, '@') = '\0';
+
+	if (zfs_dataset_exists(hdl, zc.zc_name, ZFS_TYPE_DATASET)) {
+		zfs_handle_t *zhp;
+
+		/*
+		 * Destination fs exists.  Therefore this should either
+		 * be an incremental, or the stream specifies a new fs
+		 * (full stream or clone) and they want us to blow it
+		 * away (and have therefore specified -F and removed any
+		 * snapshots).
+		 */
+		if (stream_wantsnewfs) {
+			if (!flags->force) {
+				zcmd_free_nvlists(&zc);
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "destination '%s' exists\n"
+				    "must specify -F to overwrite it"),
+				    zc.zc_name);
+				return (zfs_error(hdl, EZFS_EXISTS, errbuf));
+			}
+			if (ioctl(hdl->libzfs_fd, ZFS_IOC_SNAPSHOT_LIST_NEXT,
+			    &zc) == 0) {
+				zcmd_free_nvlists(&zc);
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "destination has snapshots (eg. %s)\n"
+				    "must destroy them to overwrite it"),
+				    zc.zc_name);
+				return (zfs_error(hdl, EZFS_EXISTS, errbuf));
+			}
+		}
+
+		if ((zhp = zfs_open(hdl, zc.zc_name,
+		    ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) {
+			zcmd_free_nvlists(&zc);
+			return (-1);
+		}
+
+		if (stream_wantsnewfs &&
+		    zhp->zfs_dmustats.dds_origin[0]) {
+			zcmd_free_nvlists(&zc);
+			zfs_close(zhp);
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "destination '%s' is a clone\n"
+			    "must destroy it to overwrite it"),
+			    zc.zc_name);
+			return (zfs_error(hdl, EZFS_EXISTS, errbuf));
+		}
+
+		if (!flags->dryrun && zhp->zfs_type == ZFS_TYPE_FILESYSTEM &&
+		    stream_wantsnewfs) {
+			/* We can't do online recv in this case */
+			clp = changelist_gather(zhp, ZFS_PROP_NAME, 0, 0);
+			if (clp == NULL) {
+				zfs_close(zhp);
+				zcmd_free_nvlists(&zc);
+				return (-1);
+			}
+			if (changelist_prefix(clp) != 0) {
+				changelist_free(clp);
+				zfs_close(zhp);
+				zcmd_free_nvlists(&zc);
+				return (-1);
+			}
+		}
+		zfs_close(zhp);
+	} else {
+		/*
+		 * Destination filesystem does not exist.  Therefore we better
+		 * be creating a new filesystem (either from a full backup, or
+		 * a clone).  It would therefore be invalid if the user
+		 * specified only the pool name (i.e. if the destination name
+		 * contained no slash character).
+		 */
+		if (!stream_wantsnewfs ||
+		    (cp = strrchr(zc.zc_name, '/')) == NULL) {
+			zcmd_free_nvlists(&zc);
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "destination '%s' does not exist"), zc.zc_name);
+			return (zfs_error(hdl, EZFS_NOENT, errbuf));
+		}
+
+		/*
+		 * Trim off the final dataset component so we perform the
+		 * recvbackup ioctl to the filesystems's parent.
+		 */
+		*cp = '\0';
+
+		if (flags->isprefix && !flags->istail && !flags->dryrun &&
+		    create_parents(hdl, zc.zc_value, strlen(tosnap)) != 0) {
+			zcmd_free_nvlists(&zc);
+			return (zfs_error(hdl, EZFS_BADRESTORE, errbuf));
+		}
+
+		newfs = B_TRUE;
+	}
+
+	zc.zc_begin_record = drr_noswap->drr_u.drr_begin;
+	zc.zc_cookie = infd;
+	zc.zc_guid = flags->force;
+	if (flags->verbose) {
+		(void) printf("%s %s stream of %s into %s\n",
+		    flags->dryrun ? "would receive" : "receiving",
+		    drrb->drr_fromguid ? "incremental" : "full",
+		    drrb->drr_toname, zc.zc_value);
+		(void) fflush(stdout);
+	}
+
+	if (flags->dryrun) {
+		zcmd_free_nvlists(&zc);
+		return (recv_skip(hdl, infd, flags->byteswap));
+	}
+
+	zc.zc_nvlist_dst = (uint64_t)(uintptr_t)prop_errbuf;
+	zc.zc_nvlist_dst_size = sizeof (prop_errbuf);
+	zc.zc_cleanup_fd = cleanup_fd;
+	zc.zc_action_handle = *action_handlep;
+
+	err = ioctl_err = zfs_ioctl(hdl, ZFS_IOC_RECV, &zc);
+	ioctl_errno = errno;
+	prop_errflags = (zprop_errflags_t)zc.zc_obj;
+
+	if (err == 0) {
+		nvlist_t *prop_errors;
+		VERIFY(0 == nvlist_unpack((void *)(uintptr_t)zc.zc_nvlist_dst,
+		    zc.zc_nvlist_dst_size, &prop_errors, 0));
+
+		nvpair_t *prop_err = NULL;
+
+		while ((prop_err = nvlist_next_nvpair(prop_errors,
+		    prop_err)) != NULL) {
+			char tbuf[1024];
+			zfs_prop_t prop;
+			int intval;
+
+			prop = zfs_name_to_prop(nvpair_name(prop_err));
+			(void) nvpair_value_int32(prop_err, &intval);
+			if (strcmp(nvpair_name(prop_err),
+			    ZPROP_N_MORE_ERRORS) == 0) {
+				trunc_prop_errs(intval);
+				break;
+			} else {
+				(void) snprintf(tbuf, sizeof (tbuf),
+				    dgettext(TEXT_DOMAIN,
+				    "cannot receive %s property on %s"),
+				    nvpair_name(prop_err), zc.zc_name);
+				zfs_setprop_error(hdl, prop, intval, tbuf);
+			}
+		}
+		nvlist_free(prop_errors);
+	}
+
+	zc.zc_nvlist_dst = 0;
+	zc.zc_nvlist_dst_size = 0;
+	zcmd_free_nvlists(&zc);
+
+	if (err == 0 && snapprops_nvlist) {
+		zfs_cmd_t zc2 = { 0 };
+
+		(void) strcpy(zc2.zc_name, zc.zc_value);
+		zc2.zc_cookie = B_TRUE; /* received */
+		if (zcmd_write_src_nvlist(hdl, &zc2, snapprops_nvlist) == 0) {
+			(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc2);
+			zcmd_free_nvlists(&zc2);
+		}
+	}
+
+	if (err && (ioctl_errno == ENOENT || ioctl_errno == EEXIST)) {
+		/*
+		 * It may be that this snapshot already exists,
+		 * in which case we want to consume & ignore it
+		 * rather than failing.
+		 */
+		avl_tree_t *local_avl;
+		nvlist_t *local_nv, *fs;
+		cp = strchr(zc.zc_value, '@');
+
+		/*
+		 * XXX Do this faster by just iterating over snaps in
+		 * this fs.  Also if zc_value does not exist, we will
+		 * get a strange "does not exist" error message.
+		 */
+		*cp = '\0';
+		if (gather_nvlist(hdl, zc.zc_value, NULL, NULL, B_FALSE,
+		    &local_nv, &local_avl) == 0) {
+			*cp = '@';
+			fs = fsavl_find(local_avl, drrb->drr_toguid, NULL);
+			fsavl_destroy(local_avl);
+			nvlist_free(local_nv);
+
+			if (fs != NULL) {
+				if (flags->verbose) {
+					(void) printf("snap %s already exists; "
+					    "ignoring\n", zc.zc_value);
+				}
+				err = ioctl_err = recv_skip(hdl, infd,
+				    flags->byteswap);
+			}
+		}
+		*cp = '@';
+	}
+
+	if (ioctl_err != 0) {
+		switch (ioctl_errno) {
+		case ENODEV:
+			cp = strchr(zc.zc_value, '@');
+			*cp = '\0';
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "most recent snapshot of %s does not\n"
+			    "match incremental source"), zc.zc_value);
+			(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
+			*cp = '@';
+			break;
+		case ETXTBSY:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "destination %s has been modified\n"
+			    "since most recent snapshot"), zc.zc_name);
+			(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
+			break;
+		case EEXIST:
+			cp = strchr(zc.zc_value, '@');
+			if (newfs) {
+				/* it's the containing fs that exists */
+				*cp = '\0';
+			}
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "destination already exists"));
+			(void) zfs_error_fmt(hdl, EZFS_EXISTS,
+			    dgettext(TEXT_DOMAIN, "cannot restore to %s"),
+			    zc.zc_value);
+			*cp = '@';
+			break;
+		case EINVAL:
+			(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
+			break;
+		case ECKSUM:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "invalid stream (checksum mismatch)"));
+			(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
+			break;
+		case ENOTSUP:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "pool must be upgraded to receive this stream."));
+			(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
+			break;
+		case EDQUOT:
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "destination %s space quota exceeded"), zc.zc_name);
+			(void) zfs_error(hdl, EZFS_NOSPC, errbuf);
+			break;
+		default:
+			(void) zfs_standard_error(hdl, ioctl_errno, errbuf);
+		}
+	}
+
+	/*
+	 * Mount the target filesystem (if created).  Also mount any
+	 * children of the target filesystem if we did a replication
+	 * receive (indicated by stream_avl being non-NULL).
+	 */
+	cp = strchr(zc.zc_value, '@');
+	if (cp && (ioctl_err == 0 || !newfs)) {
+		zfs_handle_t *h;
+
+		*cp = '\0';
+		h = zfs_open(hdl, zc.zc_value,
+		    ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
+		if (h != NULL) {
+			if (h->zfs_type == ZFS_TYPE_VOLUME) {
+				*cp = '@';
+			} else if (newfs || stream_avl) {
+				/*
+				 * Track the first/top of hierarchy fs,
+				 * for mounting and sharing later.
+				 */
+				if (top_zfs && *top_zfs == NULL)
+					*top_zfs = zfs_strdup(hdl, zc.zc_value);
+			}
+			zfs_close(h);
+		}
+		*cp = '@';
+	}
+
+	if (clp) {
+		err |= changelist_postfix(clp);
+		changelist_free(clp);
+	}
+
+	if (prop_errflags & ZPROP_ERR_NOCLEAR) {
+		(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
+		    "failed to clear unreceived properties on %s"),
+		    zc.zc_name);
+		(void) fprintf(stderr, "\n");
+	}
+	if (prop_errflags & ZPROP_ERR_NORESTORE) {
+		(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
+		    "failed to restore original properties on %s"),
+		    zc.zc_name);
+		(void) fprintf(stderr, "\n");
+	}
+
+	if (err || ioctl_err)
+		return (-1);
+
+	*action_handlep = zc.zc_action_handle;
+
+	if (flags->verbose) {
+		char buf1[64];
+		char buf2[64];
+		uint64_t bytes = zc.zc_cookie;
+		time_t delta = time(NULL) - begin_time;
+		if (delta == 0)
+			delta = 1;
+		zfs_nicenum(bytes, buf1, sizeof (buf1));
+		zfs_nicenum(bytes/delta, buf2, sizeof (buf1));
+
+		(void) printf("received %sB stream in %lu seconds (%sB/sec)\n",
+		    buf1, delta, buf2);
+	}
+
+	return (0);
+}
+
+static int
+zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap, recvflags_t *flags,
+    int infd, const char *sendfs, nvlist_t *stream_nv, avl_tree_t *stream_avl,
+    char **top_zfs, int cleanup_fd, uint64_t *action_handlep)
+{
+	int err;
+	dmu_replay_record_t drr, drr_noswap;
+	struct drr_begin *drrb = &drr.drr_u.drr_begin;
+	char errbuf[1024];
+	zio_cksum_t zcksum = { 0 };
+	uint64_t featureflags;
+	int hdrtype;
+
+	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+	    "cannot receive"));
+
+	if (flags->isprefix &&
+	    !zfs_dataset_exists(hdl, tosnap, ZFS_TYPE_DATASET)) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified fs "
+		    "(%s) does not exist"), tosnap);
+		return (zfs_error(hdl, EZFS_NOENT, errbuf));
+	}
+
+	/* read in the BEGIN record */
+	if (0 != (err = recv_read(hdl, infd, &drr, sizeof (drr), B_FALSE,
+	    &zcksum)))
+		return (err);
+
+	if (drr.drr_type == DRR_END || drr.drr_type == BSWAP_32(DRR_END)) {
+		/* It's the double end record at the end of a package */
+		return (ENODATA);
+	}
+
+	/* the kernel needs the non-byteswapped begin record */
+	drr_noswap = drr;
+
+	flags->byteswap = B_FALSE;
+	if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
+		/*
+		 * We computed the checksum in the wrong byteorder in
+		 * recv_read() above; do it again correctly.
+		 */
+		bzero(&zcksum, sizeof (zio_cksum_t));
+		fletcher_4_incremental_byteswap(&drr, sizeof (drr), &zcksum);
+		flags->byteswap = B_TRUE;
+
+		drr.drr_type = BSWAP_32(drr.drr_type);
+		drr.drr_payloadlen = BSWAP_32(drr.drr_payloadlen);
+		drrb->drr_magic = BSWAP_64(drrb->drr_magic);
+		drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
+		drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
+		drrb->drr_type = BSWAP_32(drrb->drr_type);
+		drrb->drr_flags = BSWAP_32(drrb->drr_flags);
+		drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
+		drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
+	}
+
+	if (drrb->drr_magic != DMU_BACKUP_MAGIC || drr.drr_type != DRR_BEGIN) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
+		    "stream (bad magic number)"));
+		return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+	}
+
+	featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
+	hdrtype = DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo);
+
+	if (!DMU_STREAM_SUPPORTED(featureflags) ||
+	    (hdrtype != DMU_SUBSTREAM && hdrtype != DMU_COMPOUNDSTREAM)) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "stream has unsupported feature, feature flags = %lx"),
+		    featureflags);
+		return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+	}
+
+	if (strchr(drrb->drr_toname, '@') == NULL) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
+		    "stream (bad snapshot name)"));
+		return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
+	}
+
+	if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_SUBSTREAM) {
+		char nonpackage_sendfs[ZFS_MAXNAMELEN];
+		if (sendfs == NULL) {
+			/*
+			 * We were not called from zfs_receive_package(). Get
+			 * the fs specified by 'zfs send'.
+			 */
+			char *cp;
+			(void) strlcpy(nonpackage_sendfs,
+			    drr.drr_u.drr_begin.drr_toname, ZFS_MAXNAMELEN);
+			if ((cp = strchr(nonpackage_sendfs, '@')) != NULL)
+				*cp = '\0';
+			sendfs = nonpackage_sendfs;
+		}
+		return (zfs_receive_one(hdl, infd, tosnap, flags,
+		    &drr, &drr_noswap, sendfs, stream_nv, stream_avl,
+		    top_zfs, cleanup_fd, action_handlep));
+	} else {
+		assert(DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
+		    DMU_COMPOUNDSTREAM);
+		return (zfs_receive_package(hdl, infd, tosnap, flags,
+		    &drr, &zcksum, top_zfs, cleanup_fd, action_handlep));
+	}
+}
+
+/*
+ * Restores a backup of tosnap from the file descriptor specified by infd.
+ * Return 0 on total success, -2 if some things couldn't be
+ * destroyed/renamed/promoted, -1 if some things couldn't be received.
+ * (-1 will override -2).
+ */
+int
+zfs_receive(libzfs_handle_t *hdl, const char *tosnap, recvflags_t *flags,
+    int infd, avl_tree_t *stream_avl)
+{
+	char *top_zfs = NULL;
+	int err;
+	int cleanup_fd;
+	uint64_t action_handle = 0;
+
+	cleanup_fd = open(ZFS_DEV, O_RDWR|O_EXCL);
+	VERIFY(cleanup_fd >= 0);
+
+	err = zfs_receive_impl(hdl, tosnap, flags, infd, NULL, NULL,
+	    stream_avl, &top_zfs, cleanup_fd, &action_handle);
+
+	VERIFY(0 == close(cleanup_fd));
+
+	if (err == 0 && !flags->nomount && top_zfs) {
+		zfs_handle_t *zhp;
+		prop_changelist_t *clp;
+
+		zhp = zfs_open(hdl, top_zfs, ZFS_TYPE_FILESYSTEM);
+		if (zhp != NULL) {
+			clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT,
+			    CL_GATHER_MOUNT_ALWAYS, 0);
+			zfs_close(zhp);
+			if (clp != NULL) {
+				/* mount and share received datasets */
+				err = changelist_postfix(clp);
+				changelist_free(clp);
+			}
+		}
+		if (zhp == NULL || clp == NULL || err)
+			err = -1;
+	}
+	if (top_zfs)
+		free(top_zfs);
+
+	return (err);
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_status.c b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_status.c
new file mode 100644
index 0000000000000000000000000000000000000000..6af5f77d24df4b1a6d702c3d427c3e68ea690afb
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_status.c
@@ -0,0 +1,449 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ */
+
+/*
+ * This file contains the functions which analyze the status of a pool.  This
+ * include both the status of an active pool, as well as the status exported
+ * pools.  Returns one of the ZPOOL_STATUS_* defines describing the status of
+ * the pool.  This status is independent (to a certain degree) from the state of
+ * the pool.  A pool's state describes only whether or not it is capable of
+ * providing the necessary fault tolerance for data.  The status describes the
+ * overall status of devices.  A pool that is online can still have a device
+ * that is experiencing errors.
+ *
+ * Only a subset of the possible faults can be detected using 'zpool status',
+ * and not all possible errors correspond to a FMA message ID.  The explanation
+ * is left up to the caller, depending on whether it is a live pool or an
+ * import.
+ */
+
+#include <libzfs.h>
+#include <string.h>
+#include <unistd.h>
+#include "libzfs_impl.h"
+#include "zfeature_common.h"
+
+/*
+ * Message ID table.  This must be kept in sync with the ZPOOL_STATUS_* defines
+ * in libzfs.h.  Note that there are some status results which go past the end
+ * of this table, and hence have no associated message ID.
+ */
+static char *zfs_msgid_table[] = {
+	"ZFS-8000-14",
+	"ZFS-8000-2Q",
+	"ZFS-8000-3C",
+	"ZFS-8000-4J",
+	"ZFS-8000-5E",
+	"ZFS-8000-6X",
+	"ZFS-8000-72",
+	"ZFS-8000-8A",
+	"ZFS-8000-9P",
+	"ZFS-8000-A5",
+	"ZFS-8000-EY",
+	"ZFS-8000-HC",
+	"ZFS-8000-JQ",
+	"ZFS-8000-K4",
+};
+
+#define	NMSGID	(sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
+
+/* ARGSUSED */
+static int
+vdev_missing(uint64_t state, uint64_t aux, uint64_t errs)
+{
+	return (state == VDEV_STATE_CANT_OPEN &&
+	    aux == VDEV_AUX_OPEN_FAILED);
+}
+
+/* ARGSUSED */
+static int
+vdev_faulted(uint64_t state, uint64_t aux, uint64_t errs)
+{
+	return (state == VDEV_STATE_FAULTED);
+}
+
+/* ARGSUSED */
+static int
+vdev_errors(uint64_t state, uint64_t aux, uint64_t errs)
+{
+	return (state == VDEV_STATE_DEGRADED || errs != 0);
+}
+
+/* ARGSUSED */
+static int
+vdev_broken(uint64_t state, uint64_t aux, uint64_t errs)
+{
+	return (state == VDEV_STATE_CANT_OPEN);
+}
+
+/* ARGSUSED */
+static int
+vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs)
+{
+	return (state == VDEV_STATE_OFFLINE);
+}
+
+/* ARGSUSED */
+static int
+vdev_removed(uint64_t state, uint64_t aux, uint64_t errs)
+{
+	return (state == VDEV_STATE_REMOVED);
+}
+
+/*
+ * Detect if any leaf devices that have seen errors or could not be opened.
+ */
+static boolean_t
+find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
+{
+	nvlist_t **child;
+	vdev_stat_t *vs;
+	uint_t c, children;
+	char *type;
+
+	/*
+	 * Ignore problems within a 'replacing' vdev, since we're presumably in
+	 * the process of repairing any such errors, and don't want to call them
+	 * out again.  We'll pick up the fact that a resilver is happening
+	 * later.
+	 */
+	verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
+	if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
+		return (B_FALSE);
+
+	if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
+	    &children) == 0) {
+		for (c = 0; c < children; c++)
+			if (find_vdev_problem(child[c], func))
+				return (B_TRUE);
+	} else {
+		verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
+		    (uint64_t **)&vs, &c) == 0);
+
+		if (func(vs->vs_state, vs->vs_aux,
+		    vs->vs_read_errors +
+		    vs->vs_write_errors +
+		    vs->vs_checksum_errors))
+			return (B_TRUE);
+	}
+
+	/*
+	 * Check any L2 cache devs
+	 */
+	if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_L2CACHE, &child,
+	    &children) == 0) {
+		for (c = 0; c < children; c++)
+			if (find_vdev_problem(child[c], func))
+				return (B_TRUE);
+	}
+
+	return (B_FALSE);
+}
+
+/*
+ * Active pool health status.
+ *
+ * To determine the status for a pool, we make several passes over the config,
+ * picking the most egregious error we find.  In order of importance, we do the
+ * following:
+ *
+ *	- Check for a complete and valid configuration
+ *	- Look for any faulted or missing devices in a non-replicated config
+ *	- Check for any data errors
+ *	- Check for any faulted or missing devices in a replicated config
+ *	- Look for any devices showing errors
+ *	- Check for any resilvering devices
+ *
+ * There can obviously be multiple errors within a single pool, so this routine
+ * only picks the most damaging of all the current errors to report.
+ */
+static zpool_status_t
+check_status(nvlist_t *config, boolean_t isimport)
+{
+	nvlist_t *nvroot;
+	vdev_stat_t *vs;
+	pool_scan_stat_t *ps = NULL;
+	uint_t vsc, psc;
+	uint64_t nerr;
+	uint64_t version;
+	uint64_t stateval;
+	uint64_t suspended;
+	uint64_t hostid = 0;
+
+	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
+	    &version) == 0);
+	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+	    &nvroot) == 0);
+	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
+	    (uint64_t **)&vs, &vsc) == 0);
+	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
+	    &stateval) == 0);
+
+	/*
+	 * Currently resilvering a vdev
+	 */
+	(void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
+	    (uint64_t **)&ps, &psc);
+	if (ps && ps->pss_func == POOL_SCAN_RESILVER &&
+	    ps->pss_state == DSS_SCANNING)
+		return (ZPOOL_STATUS_RESILVERING);
+
+	/*
+	 * Pool last accessed by another system.
+	 */
+	(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
+	if (hostid != 0 && (unsigned long)hostid != gethostid() &&
+	    stateval == POOL_STATE_ACTIVE)
+		return (ZPOOL_STATUS_HOSTID_MISMATCH);
+
+	/*
+	 * Newer on-disk version.
+	 */
+	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+	    vs->vs_aux == VDEV_AUX_VERSION_NEWER)
+		return (ZPOOL_STATUS_VERSION_NEWER);
+
+	/*
+	 * Unsupported feature(s).
+	 */
+	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+	    vs->vs_aux == VDEV_AUX_UNSUP_FEAT) {
+		nvlist_t *nvinfo;
+
+		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
+		    &nvinfo) == 0);
+		if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY))
+			return (ZPOOL_STATUS_UNSUP_FEAT_WRITE);
+		return (ZPOOL_STATUS_UNSUP_FEAT_READ);
+	}
+
+	/*
+	 * Check that the config is complete.
+	 */
+	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+	    vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
+		return (ZPOOL_STATUS_BAD_GUID_SUM);
+
+	/*
+	 * Check whether the pool has suspended due to failed I/O.
+	 */
+	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
+	    &suspended) == 0) {
+		if (suspended == ZIO_FAILURE_MODE_CONTINUE)
+			return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
+		return (ZPOOL_STATUS_IO_FAILURE_WAIT);
+	}
+
+	/*
+	 * Could not read a log.
+	 */
+	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+	    vs->vs_aux == VDEV_AUX_BAD_LOG) {
+		return (ZPOOL_STATUS_BAD_LOG);
+	}
+
+	/*
+	 * Bad devices in non-replicated config.
+	 */
+	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+	    find_vdev_problem(nvroot, vdev_faulted))
+		return (ZPOOL_STATUS_FAULTED_DEV_NR);
+
+	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+	    find_vdev_problem(nvroot, vdev_missing))
+		return (ZPOOL_STATUS_MISSING_DEV_NR);
+
+	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+	    find_vdev_problem(nvroot, vdev_broken))
+		return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
+
+	/*
+	 * Corrupted pool metadata
+	 */
+	if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
+	    vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
+		return (ZPOOL_STATUS_CORRUPT_POOL);
+
+	/*
+	 * Persistent data errors.
+	 */
+	if (!isimport) {
+		if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
+		    &nerr) == 0 && nerr != 0)
+			return (ZPOOL_STATUS_CORRUPT_DATA);
+	}
+
+	/*
+	 * Missing devices in a replicated config.
+	 */
+	if (find_vdev_problem(nvroot, vdev_faulted))
+		return (ZPOOL_STATUS_FAULTED_DEV_R);
+	if (find_vdev_problem(nvroot, vdev_missing))
+		return (ZPOOL_STATUS_MISSING_DEV_R);
+	if (find_vdev_problem(nvroot, vdev_broken))
+		return (ZPOOL_STATUS_CORRUPT_LABEL_R);
+
+	/*
+	 * Devices with errors
+	 */
+	if (!isimport && find_vdev_problem(nvroot, vdev_errors))
+		return (ZPOOL_STATUS_FAILING_DEV);
+
+	/*
+	 * Offlined devices
+	 */
+	if (find_vdev_problem(nvroot, vdev_offlined))
+		return (ZPOOL_STATUS_OFFLINE_DEV);
+
+	/*
+	 * Removed device
+	 */
+	if (find_vdev_problem(nvroot, vdev_removed))
+		return (ZPOOL_STATUS_REMOVED_DEV);
+
+	/*
+	 * Outdated, but usable, version
+	 */
+	if (SPA_VERSION_IS_SUPPORTED(version) && version != SPA_VERSION)
+		return (ZPOOL_STATUS_VERSION_OLDER);
+
+	/*
+	 * Usable pool with disabled features
+	 */
+	if (version >= SPA_VERSION_FEATURES) {
+		int i;
+		nvlist_t *feat;
+
+		if (isimport) {
+			feat = fnvlist_lookup_nvlist(config,
+			    ZPOOL_CONFIG_LOAD_INFO);
+			feat = fnvlist_lookup_nvlist(feat,
+			    ZPOOL_CONFIG_ENABLED_FEAT);
+		} else {
+			feat = fnvlist_lookup_nvlist(config,
+			    ZPOOL_CONFIG_FEATURE_STATS);
+		}
+
+		for (i = 0; i < SPA_FEATURES; i++) {
+			zfeature_info_t *fi = &spa_feature_table[i];
+			if (!nvlist_exists(feat, fi->fi_guid))
+				return (ZPOOL_STATUS_FEAT_DISABLED);
+		}
+	}
+
+	return (ZPOOL_STATUS_OK);
+}
+
+zpool_status_t
+zpool_get_status(zpool_handle_t *zhp, char **msgid)
+{
+	zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE);
+
+	if (ret >= NMSGID)
+		*msgid = NULL;
+	else
+		*msgid = zfs_msgid_table[ret];
+
+	return (ret);
+}
+
+zpool_status_t
+zpool_import_status(nvlist_t *config, char **msgid)
+{
+	zpool_status_t ret = check_status(config, B_TRUE);
+
+	if (ret >= NMSGID)
+		*msgid = NULL;
+	else
+		*msgid = zfs_msgid_table[ret];
+
+	return (ret);
+}
+
+static void
+dump_ddt_stat(const ddt_stat_t *dds, int h)
+{
+	char refcnt[6];
+	char blocks[6], lsize[6], psize[6], dsize[6];
+	char ref_blocks[6], ref_lsize[6], ref_psize[6], ref_dsize[6];
+
+	if (dds == NULL || dds->dds_blocks == 0)
+		return;
+
+	if (h == -1)
+		(void) strcpy(refcnt, "Total");
+	else
+		zfs_nicenum(1ULL << h, refcnt, sizeof (refcnt));
+
+	zfs_nicenum(dds->dds_blocks, blocks, sizeof (blocks));
+	zfs_nicenum(dds->dds_lsize, lsize, sizeof (lsize));
+	zfs_nicenum(dds->dds_psize, psize, sizeof (psize));
+	zfs_nicenum(dds->dds_dsize, dsize, sizeof (dsize));
+	zfs_nicenum(dds->dds_ref_blocks, ref_blocks, sizeof (ref_blocks));
+	zfs_nicenum(dds->dds_ref_lsize, ref_lsize, sizeof (ref_lsize));
+	zfs_nicenum(dds->dds_ref_psize, ref_psize, sizeof (ref_psize));
+	zfs_nicenum(dds->dds_ref_dsize, ref_dsize, sizeof (ref_dsize));
+
+	(void) printf("%6s   %6s   %5s   %5s   %5s   %6s   %5s   %5s   %5s\n",
+	    refcnt,
+	    blocks, lsize, psize, dsize,
+	    ref_blocks, ref_lsize, ref_psize, ref_dsize);
+}
+
+/*
+ * Print the DDT histogram and the column totals.
+ */
+void
+zpool_dump_ddt(const ddt_stat_t *dds_total, const ddt_histogram_t *ddh)
+{
+	int h;
+
+	(void) printf("\n");
+
+	(void) printf("bucket   "
+	    "           allocated             "
+	    "          referenced          \n");
+	(void) printf("______   "
+	    "______________________________   "
+	    "______________________________\n");
+
+	(void) printf("%6s   %6s   %5s   %5s   %5s   %6s   %5s   %5s   %5s\n",
+	    "refcnt",
+	    "blocks", "LSIZE", "PSIZE", "DSIZE",
+	    "blocks", "LSIZE", "PSIZE", "DSIZE");
+
+	(void) printf("%6s   %6s   %5s   %5s   %5s   %6s   %5s   %5s   %5s\n",
+	    "------",
+	    "------", "-----", "-----", "-----",
+	    "------", "-----", "-----", "-----");
+
+	for (h = 0; h < 64; h++)
+		dump_ddt_stat(&ddh->ddh_stat[h], h);
+
+	dump_ddt_stat(dds_total, -1);
+
+	(void) printf("\n");
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_util.c b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_util.c
new file mode 100644
index 0000000000000000000000000000000000000000..2b802a529a70b162b3766cd7062e90a8c1c0e27a
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_util.c
@@ -0,0 +1,1537 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ */
+
+/*
+ * Internal utility routines for the ZFS library.
+ */
+
+#include <sys/param.h>
+#include <sys/linker.h>
+#include <sys/module.h>
+#include <sys/stat.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <libintl.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <unistd.h>
+#include <ctype.h>
+#include <math.h>
+#include <sys/mnttab.h>
+#include <sys/mntent.h>
+#include <sys/types.h>
+
+#include <libzfs.h>
+
+#include "libzfs_impl.h"
+#include "zfs_prop.h"
+#include "zfeature_common.h"
+
+int aok;
+
+int
+libzfs_errno(libzfs_handle_t *hdl)
+{
+	return (hdl->libzfs_error);
+}
+
+const char *
+libzfs_error_action(libzfs_handle_t *hdl)
+{
+	return (hdl->libzfs_action);
+}
+
+const char *
+libzfs_error_description(libzfs_handle_t *hdl)
+{
+	if (hdl->libzfs_desc[0] != '\0')
+		return (hdl->libzfs_desc);
+
+	switch (hdl->libzfs_error) {
+	case EZFS_NOMEM:
+		return (dgettext(TEXT_DOMAIN, "out of memory"));
+	case EZFS_BADPROP:
+		return (dgettext(TEXT_DOMAIN, "invalid property value"));
+	case EZFS_PROPREADONLY:
+		return (dgettext(TEXT_DOMAIN, "read-only property"));
+	case EZFS_PROPTYPE:
+		return (dgettext(TEXT_DOMAIN, "property doesn't apply to "
+		    "datasets of this type"));
+	case EZFS_PROPNONINHERIT:
+		return (dgettext(TEXT_DOMAIN, "property cannot be inherited"));
+	case EZFS_PROPSPACE:
+		return (dgettext(TEXT_DOMAIN, "invalid quota or reservation"));
+	case EZFS_BADTYPE:
+		return (dgettext(TEXT_DOMAIN, "operation not applicable to "
+		    "datasets of this type"));
+	case EZFS_BUSY:
+		return (dgettext(TEXT_DOMAIN, "pool or dataset is busy"));
+	case EZFS_EXISTS:
+		return (dgettext(TEXT_DOMAIN, "pool or dataset exists"));
+	case EZFS_NOENT:
+		return (dgettext(TEXT_DOMAIN, "no such pool or dataset"));
+	case EZFS_BADSTREAM:
+		return (dgettext(TEXT_DOMAIN, "invalid backup stream"));
+	case EZFS_DSREADONLY:
+		return (dgettext(TEXT_DOMAIN, "dataset is read-only"));
+	case EZFS_VOLTOOBIG:
+		return (dgettext(TEXT_DOMAIN, "volume size exceeds limit for "
+		    "this system"));
+	case EZFS_INVALIDNAME:
+		return (dgettext(TEXT_DOMAIN, "invalid name"));
+	case EZFS_BADRESTORE:
+		return (dgettext(TEXT_DOMAIN, "unable to restore to "
+		    "destination"));
+	case EZFS_BADBACKUP:
+		return (dgettext(TEXT_DOMAIN, "backup failed"));
+	case EZFS_BADTARGET:
+		return (dgettext(TEXT_DOMAIN, "invalid target vdev"));
+	case EZFS_NODEVICE:
+		return (dgettext(TEXT_DOMAIN, "no such device in pool"));
+	case EZFS_BADDEV:
+		return (dgettext(TEXT_DOMAIN, "invalid device"));
+	case EZFS_NOREPLICAS:
+		return (dgettext(TEXT_DOMAIN, "no valid replicas"));
+	case EZFS_RESILVERING:
+		return (dgettext(TEXT_DOMAIN, "currently resilvering"));
+	case EZFS_BADVERSION:
+		return (dgettext(TEXT_DOMAIN, "unsupported version or "
+		    "feature"));
+	case EZFS_POOLUNAVAIL:
+		return (dgettext(TEXT_DOMAIN, "pool is unavailable"));
+	case EZFS_DEVOVERFLOW:
+		return (dgettext(TEXT_DOMAIN, "too many devices in one vdev"));
+	case EZFS_BADPATH:
+		return (dgettext(TEXT_DOMAIN, "must be an absolute path"));
+	case EZFS_CROSSTARGET:
+		return (dgettext(TEXT_DOMAIN, "operation crosses datasets or "
+		    "pools"));
+	case EZFS_ZONED:
+		return (dgettext(TEXT_DOMAIN, "dataset in use by local zone"));
+	case EZFS_MOUNTFAILED:
+		return (dgettext(TEXT_DOMAIN, "mount failed"));
+	case EZFS_UMOUNTFAILED:
+		return (dgettext(TEXT_DOMAIN, "umount failed"));
+	case EZFS_UNSHARENFSFAILED:
+		return (dgettext(TEXT_DOMAIN, "unshare(1M) failed"));
+	case EZFS_SHARENFSFAILED:
+		return (dgettext(TEXT_DOMAIN, "share(1M) failed"));
+	case EZFS_UNSHARESMBFAILED:
+		return (dgettext(TEXT_DOMAIN, "smb remove share failed"));
+	case EZFS_SHARESMBFAILED:
+		return (dgettext(TEXT_DOMAIN, "smb add share failed"));
+	case EZFS_PERM:
+		return (dgettext(TEXT_DOMAIN, "permission denied"));
+	case EZFS_NOSPC:
+		return (dgettext(TEXT_DOMAIN, "out of space"));
+	case EZFS_FAULT:
+		return (dgettext(TEXT_DOMAIN, "bad address"));
+	case EZFS_IO:
+		return (dgettext(TEXT_DOMAIN, "I/O error"));
+	case EZFS_INTR:
+		return (dgettext(TEXT_DOMAIN, "signal received"));
+	case EZFS_ISSPARE:
+		return (dgettext(TEXT_DOMAIN, "device is reserved as a hot "
+		    "spare"));
+	case EZFS_INVALCONFIG:
+		return (dgettext(TEXT_DOMAIN, "invalid vdev configuration"));
+	case EZFS_RECURSIVE:
+		return (dgettext(TEXT_DOMAIN, "recursive dataset dependency"));
+	case EZFS_NOHISTORY:
+		return (dgettext(TEXT_DOMAIN, "no history available"));
+	case EZFS_POOLPROPS:
+		return (dgettext(TEXT_DOMAIN, "failed to retrieve "
+		    "pool properties"));
+	case EZFS_POOL_NOTSUP:
+		return (dgettext(TEXT_DOMAIN, "operation not supported "
+		    "on this type of pool"));
+	case EZFS_POOL_INVALARG:
+		return (dgettext(TEXT_DOMAIN, "invalid argument for "
+		    "this pool operation"));
+	case EZFS_NAMETOOLONG:
+		return (dgettext(TEXT_DOMAIN, "dataset name is too long"));
+	case EZFS_OPENFAILED:
+		return (dgettext(TEXT_DOMAIN, "open failed"));
+	case EZFS_NOCAP:
+		return (dgettext(TEXT_DOMAIN,
+		    "disk capacity information could not be retrieved"));
+	case EZFS_LABELFAILED:
+		return (dgettext(TEXT_DOMAIN, "write of label failed"));
+	case EZFS_BADWHO:
+		return (dgettext(TEXT_DOMAIN, "invalid user/group"));
+	case EZFS_BADPERM:
+		return (dgettext(TEXT_DOMAIN, "invalid permission"));
+	case EZFS_BADPERMSET:
+		return (dgettext(TEXT_DOMAIN, "invalid permission set name"));
+	case EZFS_NODELEGATION:
+		return (dgettext(TEXT_DOMAIN, "delegated administration is "
+		    "disabled on pool"));
+	case EZFS_BADCACHE:
+		return (dgettext(TEXT_DOMAIN, "invalid or missing cache file"));
+	case EZFS_ISL2CACHE:
+		return (dgettext(TEXT_DOMAIN, "device is in use as a cache"));
+	case EZFS_VDEVNOTSUP:
+		return (dgettext(TEXT_DOMAIN, "vdev specification is not "
+		    "supported"));
+	case EZFS_NOTSUP:
+		return (dgettext(TEXT_DOMAIN, "operation not supported "
+		    "on this dataset"));
+	case EZFS_ACTIVE_SPARE:
+		return (dgettext(TEXT_DOMAIN, "pool has active shared spare "
+		    "device"));
+	case EZFS_UNPLAYED_LOGS:
+		return (dgettext(TEXT_DOMAIN, "log device has unplayed intent "
+		    "logs"));
+	case EZFS_REFTAG_RELE:
+		return (dgettext(TEXT_DOMAIN, "no such tag on this dataset"));
+	case EZFS_REFTAG_HOLD:
+		return (dgettext(TEXT_DOMAIN, "tag already exists on this "
+		    "dataset"));
+	case EZFS_TAGTOOLONG:
+		return (dgettext(TEXT_DOMAIN, "tag too long"));
+	case EZFS_PIPEFAILED:
+		return (dgettext(TEXT_DOMAIN, "pipe create failed"));
+	case EZFS_THREADCREATEFAILED:
+		return (dgettext(TEXT_DOMAIN, "thread create failed"));
+	case EZFS_POSTSPLIT_ONLINE:
+		return (dgettext(TEXT_DOMAIN, "disk was split from this pool "
+		    "into a new one"));
+	case EZFS_SCRUBBING:
+		return (dgettext(TEXT_DOMAIN, "currently scrubbing; "
+		    "use 'zpool scrub -s' to cancel current scrub"));
+	case EZFS_NO_SCRUB:
+		return (dgettext(TEXT_DOMAIN, "there is no active scrub"));
+	case EZFS_DIFF:
+		return (dgettext(TEXT_DOMAIN, "unable to generate diffs"));
+	case EZFS_DIFFDATA:
+		return (dgettext(TEXT_DOMAIN, "invalid diff data"));
+	case EZFS_POOLREADONLY:
+		return (dgettext(TEXT_DOMAIN, "pool is read-only"));
+	case EZFS_UNKNOWN:
+		return (dgettext(TEXT_DOMAIN, "unknown error"));
+	default:
+		assert(hdl->libzfs_error == 0);
+		return (dgettext(TEXT_DOMAIN, "no error"));
+	}
+}
+
+/*PRINTFLIKE2*/
+void
+zfs_error_aux(libzfs_handle_t *hdl, const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+
+	(void) vsnprintf(hdl->libzfs_desc, sizeof (hdl->libzfs_desc),
+	    fmt, ap);
+	hdl->libzfs_desc_active = 1;
+
+	va_end(ap);
+}
+
+static void
+zfs_verror(libzfs_handle_t *hdl, int error, const char *fmt, va_list ap)
+{
+	(void) vsnprintf(hdl->libzfs_action, sizeof (hdl->libzfs_action),
+	    fmt, ap);
+	hdl->libzfs_error = error;
+
+	if (hdl->libzfs_desc_active)
+		hdl->libzfs_desc_active = 0;
+	else
+		hdl->libzfs_desc[0] = '\0';
+
+	if (hdl->libzfs_printerr) {
+		if (error == EZFS_UNKNOWN) {
+			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "internal "
+			    "error: %s\n"), libzfs_error_description(hdl));
+			abort();
+		}
+
+		(void) fprintf(stderr, "%s: %s\n", hdl->libzfs_action,
+		    libzfs_error_description(hdl));
+		if (error == EZFS_NOMEM)
+			exit(1);
+	}
+}
+
+int
+zfs_error(libzfs_handle_t *hdl, int error, const char *msg)
+{
+	return (zfs_error_fmt(hdl, error, "%s", msg));
+}
+
+/*PRINTFLIKE3*/
+int
+zfs_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+
+	zfs_verror(hdl, error, fmt, ap);
+
+	va_end(ap);
+
+	return (-1);
+}
+
+static int
+zfs_common_error(libzfs_handle_t *hdl, int error, const char *fmt,
+    va_list ap)
+{
+	switch (error) {
+	case EPERM:
+	case EACCES:
+		zfs_verror(hdl, EZFS_PERM, fmt, ap);
+		return (-1);
+
+	case ECANCELED:
+		zfs_verror(hdl, EZFS_NODELEGATION, fmt, ap);
+		return (-1);
+
+	case EIO:
+		zfs_verror(hdl, EZFS_IO, fmt, ap);
+		return (-1);
+
+	case EFAULT:
+		zfs_verror(hdl, EZFS_FAULT, fmt, ap);
+		return (-1);
+
+	case EINTR:
+		zfs_verror(hdl, EZFS_INTR, fmt, ap);
+		return (-1);
+	}
+
+	return (0);
+}
+
+int
+zfs_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
+{
+	return (zfs_standard_error_fmt(hdl, error, "%s", msg));
+}
+
+/*PRINTFLIKE3*/
+int
+zfs_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+
+	if (zfs_common_error(hdl, error, fmt, ap) != 0) {
+		va_end(ap);
+		return (-1);
+	}
+
+	switch (error) {
+	case ENXIO:
+	case ENODEV:
+	case EPIPE:
+		zfs_verror(hdl, EZFS_IO, fmt, ap);
+		break;
+
+	case ENOENT:
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "dataset does not exist"));
+		zfs_verror(hdl, EZFS_NOENT, fmt, ap);
+		break;
+
+	case ENOSPC:
+	case EDQUOT:
+		zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
+		va_end(ap);
+		return (-1);
+
+	case EEXIST:
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "dataset already exists"));
+		zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
+		break;
+
+	case EBUSY:
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "dataset is busy"));
+		zfs_verror(hdl, EZFS_BUSY, fmt, ap);
+		break;
+	case EROFS:
+		zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
+		break;
+	case ENAMETOOLONG:
+		zfs_verror(hdl, EZFS_NAMETOOLONG, fmt, ap);
+		break;
+	case ENOTSUP:
+		zfs_verror(hdl, EZFS_BADVERSION, fmt, ap);
+		break;
+	case EAGAIN:
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "pool I/O is currently suspended"));
+		zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
+		break;
+	default:
+		zfs_error_aux(hdl, strerror(error));
+		zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
+		break;
+	}
+
+	va_end(ap);
+	return (-1);
+}
+
+int
+zpool_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
+{
+	return (zpool_standard_error_fmt(hdl, error, "%s", msg));
+}
+
+/*PRINTFLIKE3*/
+int
+zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+
+	if (zfs_common_error(hdl, error, fmt, ap) != 0) {
+		va_end(ap);
+		return (-1);
+	}
+
+	switch (error) {
+	case ENODEV:
+		zfs_verror(hdl, EZFS_NODEVICE, fmt, ap);
+		break;
+
+	case ENOENT:
+		zfs_error_aux(hdl,
+		    dgettext(TEXT_DOMAIN, "no such pool or dataset"));
+		zfs_verror(hdl, EZFS_NOENT, fmt, ap);
+		break;
+
+	case EEXIST:
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "pool already exists"));
+		zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
+		break;
+
+	case EBUSY:
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool is busy"));
+		zfs_verror(hdl, EZFS_BUSY, fmt, ap);
+		break;
+
+	case ENXIO:
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "one or more devices is currently unavailable"));
+		zfs_verror(hdl, EZFS_BADDEV, fmt, ap);
+		break;
+
+	case ENAMETOOLONG:
+		zfs_verror(hdl, EZFS_DEVOVERFLOW, fmt, ap);
+		break;
+
+	case ENOTSUP:
+		zfs_verror(hdl, EZFS_POOL_NOTSUP, fmt, ap);
+		break;
+
+	case EINVAL:
+		zfs_verror(hdl, EZFS_POOL_INVALARG, fmt, ap);
+		break;
+
+	case ENOSPC:
+	case EDQUOT:
+		zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
+		va_end(ap);
+		return (-1);
+
+	case EAGAIN:
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "pool I/O is currently suspended"));
+		zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
+		break;
+
+	case EROFS:
+		zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
+		break;
+
+	default:
+		zfs_error_aux(hdl, strerror(error));
+		zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
+	}
+
+	va_end(ap);
+	return (-1);
+}
+
+/*
+ * Display an out of memory error message and abort the current program.
+ */
+int
+no_memory(libzfs_handle_t *hdl)
+{
+	return (zfs_error(hdl, EZFS_NOMEM, "internal error"));
+}
+
+/*
+ * A safe form of malloc() which will die if the allocation fails.
+ */
+void *
+zfs_alloc(libzfs_handle_t *hdl, size_t size)
+{
+	void *data;
+
+	if ((data = calloc(1, size)) == NULL)
+		(void) no_memory(hdl);
+
+	return (data);
+}
+
+/*
+ * A safe form of asprintf() which will die if the allocation fails.
+ */
+/*PRINTFLIKE2*/
+char *
+zfs_asprintf(libzfs_handle_t *hdl, const char *fmt, ...)
+{
+	va_list ap;
+	char *ret;
+	int err;
+
+	va_start(ap, fmt);
+
+	err = vasprintf(&ret, fmt, ap);
+
+	va_end(ap);
+
+	if (err < 0)
+		(void) no_memory(hdl);
+
+	return (ret);
+}
+
+/*
+ * A safe form of realloc(), which also zeroes newly allocated space.
+ */
+void *
+zfs_realloc(libzfs_handle_t *hdl, void *ptr, size_t oldsize, size_t newsize)
+{
+	void *ret;
+
+	if ((ret = realloc(ptr, newsize)) == NULL) {
+		(void) no_memory(hdl);
+		return (NULL);
+	}
+
+	bzero((char *)ret + oldsize, (newsize - oldsize));
+	return (ret);
+}
+
+/*
+ * A safe form of strdup() which will die if the allocation fails.
+ */
+char *
+zfs_strdup(libzfs_handle_t *hdl, const char *str)
+{
+	char *ret;
+
+	if ((ret = strdup(str)) == NULL)
+		(void) no_memory(hdl);
+
+	return (ret);
+}
+
+/*
+ * Convert a number to an appropriately human-readable output.
+ */
+void
+zfs_nicenum(uint64_t num, char *buf, size_t buflen)
+{
+	uint64_t n = num;
+	int index = 0;
+	char u;
+
+	while (n >= 1024) {
+		n /= 1024;
+		index++;
+	}
+
+	u = " KMGTPE"[index];
+
+	if (index == 0) {
+		(void) snprintf(buf, buflen, "%llu", n);
+	} else if ((num & ((1ULL << 10 * index) - 1)) == 0) {
+		/*
+		 * If this is an even multiple of the base, always display
+		 * without any decimal precision.
+		 */
+		(void) snprintf(buf, buflen, "%llu%c", n, u);
+	} else {
+		/*
+		 * We want to choose a precision that reflects the best choice
+		 * for fitting in 5 characters.  This can get rather tricky when
+		 * we have numbers that are very close to an order of magnitude.
+		 * For example, when displaying 10239 (which is really 9.999K),
+		 * we want only a single place of precision for 10.0K.  We could
+		 * develop some complex heuristics for this, but it's much
+		 * easier just to try each combination in turn.
+		 */
+		int i;
+		for (i = 2; i >= 0; i--) {
+			if (snprintf(buf, buflen, "%.*f%c", i,
+			    (double)num / (1ULL << 10 * index), u) <= 5)
+				break;
+		}
+	}
+}
+
+void
+libzfs_print_on_error(libzfs_handle_t *hdl, boolean_t printerr)
+{
+	hdl->libzfs_printerr = printerr;
+}
+
+static int
+libzfs_load(void)
+{
+	int error;
+
+	if (modfind("zfs") < 0) {
+		/* Not present in kernel, try loading it. */
+		if (kldload("zfs") < 0 || modfind("zfs") < 0) {
+			if (errno != EEXIST)
+				return (-1);
+		}
+	}
+	return (0);
+}
+
+libzfs_handle_t *
+libzfs_init(void)
+{
+	libzfs_handle_t *hdl;
+
+	if ((hdl = calloc(1, sizeof (libzfs_handle_t))) == NULL) {
+		return (NULL);
+	}
+
+	if (libzfs_load() < 0) {
+		free(hdl);
+		return (NULL);
+	}
+
+	if ((hdl->libzfs_fd = open(ZFS_DEV, O_RDWR)) < 0) {
+		free(hdl);
+		return (NULL);
+	}
+
+	if ((hdl->libzfs_mnttab = fopen(MNTTAB, "r")) == NULL) {
+		(void) close(hdl->libzfs_fd);
+		free(hdl);
+		return (NULL);
+	}
+
+	hdl->libzfs_sharetab = fopen(ZFS_EXPORTS_PATH, "r");
+
+	zfs_prop_init();
+	zpool_prop_init();
+	zpool_feature_init();
+	libzfs_mnttab_init(hdl);
+
+	return (hdl);
+}
+
+void
+libzfs_fini(libzfs_handle_t *hdl)
+{
+	(void) close(hdl->libzfs_fd);
+	if (hdl->libzfs_mnttab)
+		(void) fclose(hdl->libzfs_mnttab);
+	if (hdl->libzfs_sharetab)
+		(void) fclose(hdl->libzfs_sharetab);
+	zfs_uninit_libshare(hdl);
+	if (hdl->libzfs_log_str)
+		(void) free(hdl->libzfs_log_str);
+	zpool_free_handles(hdl);
+#ifdef sun
+	libzfs_fru_clear(hdl, B_TRUE);
+#endif
+	namespace_clear(hdl);
+	libzfs_mnttab_fini(hdl);
+	free(hdl);
+}
+
+libzfs_handle_t *
+zpool_get_handle(zpool_handle_t *zhp)
+{
+	return (zhp->zpool_hdl);
+}
+
+libzfs_handle_t *
+zfs_get_handle(zfs_handle_t *zhp)
+{
+	return (zhp->zfs_hdl);
+}
+
+zpool_handle_t *
+zfs_get_pool_handle(const zfs_handle_t *zhp)
+{
+	return (zhp->zpool_hdl);
+}
+
+/*
+ * Given a name, determine whether or not it's a valid path
+ * (starts with '/' or "./").  If so, walk the mnttab trying
+ * to match the device number.  If not, treat the path as an
+ * fs/vol/snap name.
+ */
+zfs_handle_t *
+zfs_path_to_zhandle(libzfs_handle_t *hdl, char *path, zfs_type_t argtype)
+{
+	struct stat64 statbuf;
+	struct extmnttab entry;
+	int ret;
+
+	if (path[0] != '/' && strncmp(path, "./", strlen("./")) != 0) {
+		/*
+		 * It's not a valid path, assume it's a name of type 'argtype'.
+		 */
+		return (zfs_open(hdl, path, argtype));
+	}
+
+	if (stat64(path, &statbuf) != 0) {
+		(void) fprintf(stderr, "%s: %s\n", path, strerror(errno));
+		return (NULL);
+	}
+
+#ifdef sun
+	rewind(hdl->libzfs_mnttab);
+	while ((ret = getextmntent(hdl->libzfs_mnttab, &entry, 0)) == 0) {
+		if (makedevice(entry.mnt_major, entry.mnt_minor) ==
+		    statbuf.st_dev) {
+			break;
+		}
+	}
+#else
+	{
+		struct statfs sfs;
+
+		ret = statfs(path, &sfs);
+		if (ret == 0)
+			statfs2mnttab(&sfs, &entry);
+		else {
+			(void) fprintf(stderr, "%s: %s\n", path,
+			    strerror(errno));
+		}
+	}
+#endif	/* sun */
+	if (ret != 0) {
+		return (NULL);
+	}
+
+	if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) {
+		(void) fprintf(stderr, gettext("'%s': not a ZFS filesystem\n"),
+		    path);
+		return (NULL);
+	}
+
+	return (zfs_open(hdl, entry.mnt_special, ZFS_TYPE_FILESYSTEM));
+}
+
+/*
+ * Initialize the zc_nvlist_dst member to prepare for receiving an nvlist from
+ * an ioctl().
+ */
+int
+zcmd_alloc_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, size_t len)
+{
+	if (len == 0)
+		len = 16 * 1024;
+	zc->zc_nvlist_dst_size = len;
+	if ((zc->zc_nvlist_dst = (uint64_t)(uintptr_t)
+	    zfs_alloc(hdl, zc->zc_nvlist_dst_size)) == 0)
+		return (-1);
+
+	return (0);
+}
+
+/*
+ * Called when an ioctl() which returns an nvlist fails with ENOMEM.  This will
+ * expand the nvlist to the size specified in 'zc_nvlist_dst_size', which was
+ * filled in by the kernel to indicate the actual required size.
+ */
+int
+zcmd_expand_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc)
+{
+	free((void *)(uintptr_t)zc->zc_nvlist_dst);
+	if ((zc->zc_nvlist_dst = (uint64_t)(uintptr_t)
+	    zfs_alloc(hdl, zc->zc_nvlist_dst_size))
+	    == 0)
+		return (-1);
+
+	return (0);
+}
+
+/*
+ * Called to free the src and dst nvlists stored in the command structure.
+ */
+void
+zcmd_free_nvlists(zfs_cmd_t *zc)
+{
+	free((void *)(uintptr_t)zc->zc_nvlist_conf);
+	free((void *)(uintptr_t)zc->zc_nvlist_src);
+	free((void *)(uintptr_t)zc->zc_nvlist_dst);
+}
+
+static int
+zcmd_write_nvlist_com(libzfs_handle_t *hdl, uint64_t *outnv, uint64_t *outlen,
+    nvlist_t *nvl)
+{
+	char *packed;
+	size_t len;
+
+	verify(nvlist_size(nvl, &len, NV_ENCODE_NATIVE) == 0);
+
+	if ((packed = zfs_alloc(hdl, len)) == NULL)
+		return (-1);
+
+	verify(nvlist_pack(nvl, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
+
+	*outnv = (uint64_t)(uintptr_t)packed;
+	*outlen = len;
+
+	return (0);
+}
+
+int
+zcmd_write_conf_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
+{
+	return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_conf,
+	    &zc->zc_nvlist_conf_size, nvl));
+}
+
+int
+zcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
+{
+	return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_src,
+	    &zc->zc_nvlist_src_size, nvl));
+}
+
+/*
+ * Unpacks an nvlist from the ZFS ioctl command structure.
+ */
+int
+zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp)
+{
+	if (nvlist_unpack((void *)(uintptr_t)zc->zc_nvlist_dst,
+	    zc->zc_nvlist_dst_size, nvlp, 0) != 0)
+		return (no_memory(hdl));
+
+	return (0);
+}
+
+int
+zfs_ioctl(libzfs_handle_t *hdl, unsigned long request, zfs_cmd_t *zc)
+{
+	int error;
+
+	zc->zc_history = (uint64_t)(uintptr_t)hdl->libzfs_log_str;
+	error = ioctl(hdl->libzfs_fd, request, zc);
+	if (hdl->libzfs_log_str) {
+		free(hdl->libzfs_log_str);
+		hdl->libzfs_log_str = NULL;
+	}
+	zc->zc_history = 0;
+
+	return (error);
+}
+
+/*
+ * ================================================================
+ * API shared by zfs and zpool property management
+ * ================================================================
+ */
+
+static void
+zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type)
+{
+	zprop_list_t *pl = cbp->cb_proplist;
+	int i;
+	char *title;
+	size_t len;
+
+	cbp->cb_first = B_FALSE;
+	if (cbp->cb_scripted)
+		return;
+
+	/*
+	 * Start with the length of the column headers.
+	 */
+	cbp->cb_colwidths[GET_COL_NAME] = strlen(dgettext(TEXT_DOMAIN, "NAME"));
+	cbp->cb_colwidths[GET_COL_PROPERTY] = strlen(dgettext(TEXT_DOMAIN,
+	    "PROPERTY"));
+	cbp->cb_colwidths[GET_COL_VALUE] = strlen(dgettext(TEXT_DOMAIN,
+	    "VALUE"));
+	cbp->cb_colwidths[GET_COL_RECVD] = strlen(dgettext(TEXT_DOMAIN,
+	    "RECEIVED"));
+	cbp->cb_colwidths[GET_COL_SOURCE] = strlen(dgettext(TEXT_DOMAIN,
+	    "SOURCE"));
+
+	/* first property is always NAME */
+	assert(cbp->cb_proplist->pl_prop ==
+	    ((type == ZFS_TYPE_POOL) ?  ZPOOL_PROP_NAME : ZFS_PROP_NAME));
+
+	/*
+	 * Go through and calculate the widths for each column.  For the
+	 * 'source' column, we kludge it up by taking the worst-case scenario of
+	 * inheriting from the longest name.  This is acceptable because in the
+	 * majority of cases 'SOURCE' is the last column displayed, and we don't
+	 * use the width anyway.  Note that the 'VALUE' column can be oversized,
+	 * if the name of the property is much longer than any values we find.
+	 */
+	for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
+		/*
+		 * 'PROPERTY' column
+		 */
+		if (pl->pl_prop != ZPROP_INVAL) {
+			const char *propname = (type == ZFS_TYPE_POOL) ?
+			    zpool_prop_to_name(pl->pl_prop) :
+			    zfs_prop_to_name(pl->pl_prop);
+
+			len = strlen(propname);
+			if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
+				cbp->cb_colwidths[GET_COL_PROPERTY] = len;
+		} else {
+			len = strlen(pl->pl_user_prop);
+			if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
+				cbp->cb_colwidths[GET_COL_PROPERTY] = len;
+		}
+
+		/*
+		 * 'VALUE' column.  The first property is always the 'name'
+		 * property that was tacked on either by /sbin/zfs's
+		 * zfs_do_get() or when calling zprop_expand_list(), so we
+		 * ignore its width.  If the user specified the name property
+		 * to display, then it will be later in the list in any case.
+		 */
+		if (pl != cbp->cb_proplist &&
+		    pl->pl_width > cbp->cb_colwidths[GET_COL_VALUE])
+			cbp->cb_colwidths[GET_COL_VALUE] = pl->pl_width;
+
+		/* 'RECEIVED' column. */
+		if (pl != cbp->cb_proplist &&
+		    pl->pl_recvd_width > cbp->cb_colwidths[GET_COL_RECVD])
+			cbp->cb_colwidths[GET_COL_RECVD] = pl->pl_recvd_width;
+
+		/*
+		 * 'NAME' and 'SOURCE' columns
+		 */
+		if (pl->pl_prop == (type == ZFS_TYPE_POOL ? ZPOOL_PROP_NAME :
+		    ZFS_PROP_NAME) &&
+		    pl->pl_width > cbp->cb_colwidths[GET_COL_NAME]) {
+			cbp->cb_colwidths[GET_COL_NAME] = pl->pl_width;
+			cbp->cb_colwidths[GET_COL_SOURCE] = pl->pl_width +
+			    strlen(dgettext(TEXT_DOMAIN, "inherited from"));
+		}
+	}
+
+	/*
+	 * Now go through and print the headers.
+	 */
+	for (i = 0; i < ZFS_GET_NCOLS; i++) {
+		switch (cbp->cb_columns[i]) {
+		case GET_COL_NAME:
+			title = dgettext(TEXT_DOMAIN, "NAME");
+			break;
+		case GET_COL_PROPERTY:
+			title = dgettext(TEXT_DOMAIN, "PROPERTY");
+			break;
+		case GET_COL_VALUE:
+			title = dgettext(TEXT_DOMAIN, "VALUE");
+			break;
+		case GET_COL_RECVD:
+			title = dgettext(TEXT_DOMAIN, "RECEIVED");
+			break;
+		case GET_COL_SOURCE:
+			title = dgettext(TEXT_DOMAIN, "SOURCE");
+			break;
+		default:
+			title = NULL;
+		}
+
+		if (title != NULL) {
+			if (i == (ZFS_GET_NCOLS - 1) ||
+			    cbp->cb_columns[i + 1] == GET_COL_NONE)
+				(void) printf("%s", title);
+			else
+				(void) printf("%-*s  ",
+				    cbp->cb_colwidths[cbp->cb_columns[i]],
+				    title);
+		}
+	}
+	(void) printf("\n");
+}
+
+/*
+ * Display a single line of output, according to the settings in the callback
+ * structure.
+ */
+void
+zprop_print_one_property(const char *name, zprop_get_cbdata_t *cbp,
+    const char *propname, const char *value, zprop_source_t sourcetype,
+    const char *source, const char *recvd_value)
+{
+	int i;
+	const char *str;
+	char buf[128];
+
+	/*
+	 * Ignore those source types that the user has chosen to ignore.
+	 */
+	if ((sourcetype & cbp->cb_sources) == 0)
+		return;
+
+	if (cbp->cb_first)
+		zprop_print_headers(cbp, cbp->cb_type);
+
+	for (i = 0; i < ZFS_GET_NCOLS; i++) {
+		switch (cbp->cb_columns[i]) {
+		case GET_COL_NAME:
+			str = name;
+			break;
+
+		case GET_COL_PROPERTY:
+			str = propname;
+			break;
+
+		case GET_COL_VALUE:
+			str = value;
+			break;
+
+		case GET_COL_SOURCE:
+			switch (sourcetype) {
+			case ZPROP_SRC_NONE:
+				str = "-";
+				break;
+
+			case ZPROP_SRC_DEFAULT:
+				str = "default";
+				break;
+
+			case ZPROP_SRC_LOCAL:
+				str = "local";
+				break;
+
+			case ZPROP_SRC_TEMPORARY:
+				str = "temporary";
+				break;
+
+			case ZPROP_SRC_INHERITED:
+				(void) snprintf(buf, sizeof (buf),
+				    "inherited from %s", source);
+				str = buf;
+				break;
+			case ZPROP_SRC_RECEIVED:
+				str = "received";
+				break;
+			}
+			break;
+
+		case GET_COL_RECVD:
+			str = (recvd_value == NULL ? "-" : recvd_value);
+			break;
+
+		default:
+			continue;
+		}
+
+		if (cbp->cb_columns[i + 1] == GET_COL_NONE)
+			(void) printf("%s", str);
+		else if (cbp->cb_scripted)
+			(void) printf("%s\t", str);
+		else
+			(void) printf("%-*s  ",
+			    cbp->cb_colwidths[cbp->cb_columns[i]],
+			    str);
+	}
+
+	(void) printf("\n");
+}
+
+/*
+ * Given a numeric suffix, convert the value into a number of bits that the
+ * resulting value must be shifted.
+ */
+static int
+str2shift(libzfs_handle_t *hdl, const char *buf)
+{
+	const char *ends = "BKMGTPEZ";
+	int i;
+
+	if (buf[0] == '\0')
+		return (0);
+	for (i = 0; i < strlen(ends); i++) {
+		if (toupper(buf[0]) == ends[i])
+			break;
+	}
+	if (i == strlen(ends)) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "invalid numeric suffix '%s'"), buf);
+		return (-1);
+	}
+
+	/*
+	 * We want to allow trailing 'b' characters for 'GB' or 'Mb'.  But don't
+	 * allow 'BB' - that's just weird.
+	 */
+	if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0' &&
+	    toupper(buf[0]) != 'B'))
+		return (10*i);
+
+	zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+	    "invalid numeric suffix '%s'"), buf);
+	return (-1);
+}
+
+/*
+ * Convert a string of the form '100G' into a real number.  Used when setting
+ * properties or creating a volume.  'buf' is used to place an extended error
+ * message for the caller to use.
+ */
+int
+zfs_nicestrtonum(libzfs_handle_t *hdl, const char *value, uint64_t *num)
+{
+	char *end;
+	int shift;
+
+	*num = 0;
+
+	/* Check to see if this looks like a number.  */
+	if ((value[0] < '0' || value[0] > '9') && value[0] != '.') {
+		if (hdl)
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "bad numeric value '%s'"), value);
+		return (-1);
+	}
+
+	/* Rely on strtoull() to process the numeric portion.  */
+	errno = 0;
+	*num = strtoull(value, &end, 10);
+
+	/*
+	 * Check for ERANGE, which indicates that the value is too large to fit
+	 * in a 64-bit value.
+	 */
+	if (errno == ERANGE) {
+		if (hdl)
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "numeric value is too large"));
+		return (-1);
+	}
+
+	/*
+	 * If we have a decimal value, then do the computation with floating
+	 * point arithmetic.  Otherwise, use standard arithmetic.
+	 */
+	if (*end == '.') {
+		double fval = strtod(value, &end);
+
+		if ((shift = str2shift(hdl, end)) == -1)
+			return (-1);
+
+		fval *= pow(2, shift);
+
+		if (fval > UINT64_MAX) {
+			if (hdl)
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "numeric value is too large"));
+			return (-1);
+		}
+
+		*num = (uint64_t)fval;
+	} else {
+		if ((shift = str2shift(hdl, end)) == -1)
+			return (-1);
+
+		/* Check for overflow */
+		if (shift >= 64 || (*num << shift) >> shift != *num) {
+			if (hdl)
+				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+				    "numeric value is too large"));
+			return (-1);
+		}
+
+		*num <<= shift;
+	}
+
+	return (0);
+}
+
+/*
+ * Given a propname=value nvpair to set, parse any numeric properties
+ * (index, boolean, etc) if they are specified as strings and add the
+ * resulting nvpair to the returned nvlist.
+ *
+ * At the DSL layer, all properties are either 64-bit numbers or strings.
+ * We want the user to be able to ignore this fact and specify properties
+ * as native values (numbers, for example) or as strings (to simplify
+ * command line utilities).  This also handles converting index types
+ * (compression, checksum, etc) from strings to their on-disk index.
+ */
+int
+zprop_parse_value(libzfs_handle_t *hdl, nvpair_t *elem, int prop,
+    zfs_type_t type, nvlist_t *ret, char **svalp, uint64_t *ivalp,
+    const char *errbuf)
+{
+	data_type_t datatype = nvpair_type(elem);
+	zprop_type_t proptype;
+	const char *propname;
+	char *value;
+	boolean_t isnone = B_FALSE;
+
+	if (type == ZFS_TYPE_POOL) {
+		proptype = zpool_prop_get_type(prop);
+		propname = zpool_prop_to_name(prop);
+	} else {
+		proptype = zfs_prop_get_type(prop);
+		propname = zfs_prop_to_name(prop);
+	}
+
+	/*
+	 * Convert any properties to the internal DSL value types.
+	 */
+	*svalp = NULL;
+	*ivalp = 0;
+
+	switch (proptype) {
+	case PROP_TYPE_STRING:
+		if (datatype != DATA_TYPE_STRING) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "'%s' must be a string"), nvpair_name(elem));
+			goto error;
+		}
+		(void) nvpair_value_string(elem, svalp);
+		if (strlen(*svalp) >= ZFS_MAXPROPLEN) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "'%s' is too long"), nvpair_name(elem));
+			goto error;
+		}
+		break;
+
+	case PROP_TYPE_NUMBER:
+		if (datatype == DATA_TYPE_STRING) {
+			(void) nvpair_value_string(elem, &value);
+			if (strcmp(value, "none") == 0) {
+				isnone = B_TRUE;
+			} else if (zfs_nicestrtonum(hdl, value, ivalp)
+			    != 0) {
+				goto error;
+			}
+		} else if (datatype == DATA_TYPE_UINT64) {
+			(void) nvpair_value_uint64(elem, ivalp);
+		} else {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "'%s' must be a number"), nvpair_name(elem));
+			goto error;
+		}
+
+		/*
+		 * Quota special: force 'none' and don't allow 0.
+		 */
+		if ((type & ZFS_TYPE_DATASET) && *ivalp == 0 && !isnone &&
+		    (prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_REFQUOTA)) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "use 'none' to disable quota/refquota"));
+			goto error;
+		}
+		break;
+
+	case PROP_TYPE_INDEX:
+		if (datatype != DATA_TYPE_STRING) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "'%s' must be a string"), nvpair_name(elem));
+			goto error;
+		}
+
+		(void) nvpair_value_string(elem, &value);
+
+		if (zprop_string_to_index(prop, value, ivalp, type) != 0) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "'%s' must be one of '%s'"), propname,
+			    zprop_values(prop, type));
+			goto error;
+		}
+		break;
+
+	default:
+		abort();
+	}
+
+	/*
+	 * Add the result to our return set of properties.
+	 */
+	if (*svalp != NULL) {
+		if (nvlist_add_string(ret, propname, *svalp) != 0) {
+			(void) no_memory(hdl);
+			return (-1);
+		}
+	} else {
+		if (nvlist_add_uint64(ret, propname, *ivalp) != 0) {
+			(void) no_memory(hdl);
+			return (-1);
+		}
+	}
+
+	return (0);
+error:
+	(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
+	return (-1);
+}
+
+static int
+addlist(libzfs_handle_t *hdl, char *propname, zprop_list_t **listp,
+    zfs_type_t type)
+{
+	int prop;
+	zprop_list_t *entry;
+
+	prop = zprop_name_to_prop(propname, type);
+
+	if (prop != ZPROP_INVAL && !zprop_valid_for_type(prop, type))
+		prop = ZPROP_INVAL;
+
+	/*
+	 * When no property table entry can be found, return failure if
+	 * this is a pool property or if this isn't a user-defined
+	 * dataset property,
+	 */
+	if (prop == ZPROP_INVAL && ((type == ZFS_TYPE_POOL &&
+	    !zpool_prop_feature(propname) &&
+	    !zpool_prop_unsupported(propname)) ||
+	    (type == ZFS_TYPE_DATASET && !zfs_prop_user(propname) &&
+	    !zfs_prop_userquota(propname) && !zfs_prop_written(propname)))) {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "invalid property '%s'"), propname);
+		return (zfs_error(hdl, EZFS_BADPROP,
+		    dgettext(TEXT_DOMAIN, "bad property list")));
+	}
+
+	if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL)
+		return (-1);
+
+	entry->pl_prop = prop;
+	if (prop == ZPROP_INVAL) {
+		if ((entry->pl_user_prop = zfs_strdup(hdl, propname)) ==
+		    NULL) {
+			free(entry);
+			return (-1);
+		}
+		entry->pl_width = strlen(propname);
+	} else {
+		entry->pl_width = zprop_width(prop, &entry->pl_fixed,
+		    type);
+	}
+
+	*listp = entry;
+
+	return (0);
+}
+
+/*
+ * Given a comma-separated list of properties, construct a property list
+ * containing both user-defined and native properties.  This function will
+ * return a NULL list if 'all' is specified, which can later be expanded
+ * by zprop_expand_list().
+ */
+int
+zprop_get_list(libzfs_handle_t *hdl, char *props, zprop_list_t **listp,
+    zfs_type_t type)
+{
+	*listp = NULL;
+
+	/*
+	 * If 'all' is specified, return a NULL list.
+	 */
+	if (strcmp(props, "all") == 0)
+		return (0);
+
+	/*
+	 * If no props were specified, return an error.
+	 */
+	if (props[0] == '\0') {
+		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+		    "no properties specified"));
+		return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN,
+		    "bad property list")));
+	}
+
+	/*
+	 * It would be nice to use getsubopt() here, but the inclusion of column
+	 * aliases makes this more effort than it's worth.
+	 */
+	while (*props != '\0') {
+		size_t len;
+		char *p;
+		char c;
+
+		if ((p = strchr(props, ',')) == NULL) {
+			len = strlen(props);
+			p = props + len;
+		} else {
+			len = p - props;
+		}
+
+		/*
+		 * Check for empty options.
+		 */
+		if (len == 0) {
+			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+			    "empty property name"));
+			return (zfs_error(hdl, EZFS_BADPROP,
+			    dgettext(TEXT_DOMAIN, "bad property list")));
+		}
+
+		/*
+		 * Check all regular property names.
+		 */
+		c = props[len];
+		props[len] = '\0';
+
+		if (strcmp(props, "space") == 0) {
+			static char *spaceprops[] = {
+				"name", "avail", "used", "usedbysnapshots",
+				"usedbydataset", "usedbyrefreservation",
+				"usedbychildren", NULL
+			};
+			int i;
+
+			for (i = 0; spaceprops[i]; i++) {
+				if (addlist(hdl, spaceprops[i], listp, type))
+					return (-1);
+				listp = &(*listp)->pl_next;
+			}
+		} else {
+			if (addlist(hdl, props, listp, type))
+				return (-1);
+			listp = &(*listp)->pl_next;
+		}
+
+		props = p;
+		if (c == ',')
+			props++;
+	}
+
+	return (0);
+}
+
+void
+zprop_free_list(zprop_list_t *pl)
+{
+	zprop_list_t *next;
+
+	while (pl != NULL) {
+		next = pl->pl_next;
+		free(pl->pl_user_prop);
+		free(pl);
+		pl = next;
+	}
+}
+
+typedef struct expand_data {
+	zprop_list_t	**last;
+	libzfs_handle_t	*hdl;
+	zfs_type_t type;
+} expand_data_t;
+
+int
+zprop_expand_list_cb(int prop, void *cb)
+{
+	zprop_list_t *entry;
+	expand_data_t *edp = cb;
+
+	if ((entry = zfs_alloc(edp->hdl, sizeof (zprop_list_t))) == NULL)
+		return (ZPROP_INVAL);
+
+	entry->pl_prop = prop;
+	entry->pl_width = zprop_width(prop, &entry->pl_fixed, edp->type);
+	entry->pl_all = B_TRUE;
+
+	*(edp->last) = entry;
+	edp->last = &entry->pl_next;
+
+	return (ZPROP_CONT);
+}
+
+int
+zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp, zfs_type_t type)
+{
+	zprop_list_t *entry;
+	zprop_list_t **last;
+	expand_data_t exp;
+
+	if (*plp == NULL) {
+		/*
+		 * If this is the very first time we've been called for an 'all'
+		 * specification, expand the list to include all native
+		 * properties.
+		 */
+		last = plp;
+
+		exp.last = last;
+		exp.hdl = hdl;
+		exp.type = type;
+
+		if (zprop_iter_common(zprop_expand_list_cb, &exp, B_FALSE,
+		    B_FALSE, type) == ZPROP_INVAL)
+			return (-1);
+
+		/*
+		 * Add 'name' to the beginning of the list, which is handled
+		 * specially.
+		 */
+		if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL)
+			return (-1);
+
+		entry->pl_prop = (type == ZFS_TYPE_POOL) ?  ZPOOL_PROP_NAME :
+		    ZFS_PROP_NAME;
+		entry->pl_width = zprop_width(entry->pl_prop,
+		    &entry->pl_fixed, type);
+		entry->pl_all = B_TRUE;
+		entry->pl_next = *plp;
+		*plp = entry;
+	}
+	return (0);
+}
+
+int
+zprop_iter(zprop_func func, void *cb, boolean_t show_all, boolean_t ordered,
+    zfs_type_t type)
+{
+	return (zprop_iter_common(func, cb, show_all, ordered, type));
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzpool/common/kernel.c b/bsd/cddl/contrib/opensolaris/lib/libzpool/common/kernel.c
new file mode 100644
index 0000000000000000000000000000000000000000..56bf7181d8bdf20bb709fa1ac6790bd9c47da6c9
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzpool/common/kernel.c
@@ -0,0 +1,1046 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <assert.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <zlib.h>
+#include <sys/spa.h>
+#include <sys/stat.h>
+#include <sys/processor.h>
+#include <sys/zfs_context.h>
+#include <sys/zmod.h>
+#include <sys/utsname.h>
+#include <sys/systeminfo.h>
+
+/*
+ * Emulation of kernel services in userland.
+ */
+
+int aok;
+uint64_t physmem;
+vnode_t *rootdir = (vnode_t *)0xabcd1234;
+char hw_serial[HW_HOSTID_LEN];
+
+struct utsname utsname = {
+	"userland", "libzpool", "1", "1", "na"
+};
+
+/* this only exists to have its address taken */
+struct proc p0;
+
+/*
+ * =========================================================================
+ * threads
+ * =========================================================================
+ */
+/*ARGSUSED*/
+kthread_t *
+zk_thread_create(void (*func)(), void *arg)
+{
+	thread_t tid;
+
+	VERIFY(thr_create(0, 0, (void *(*)(void *))func, arg, THR_DETACHED,
+	    &tid) == 0);
+
+	return ((void *)(uintptr_t)tid);
+}
+
+/*
+ * =========================================================================
+ * kstats
+ * =========================================================================
+ */
+/*ARGSUSED*/
+kstat_t *
+kstat_create(char *module, int instance, char *name, char *class,
+    uchar_t type, ulong_t ndata, uchar_t ks_flag)
+{
+	return (NULL);
+}
+
+/*ARGSUSED*/
+void
+kstat_install(kstat_t *ksp)
+{}
+
+/*ARGSUSED*/
+void
+kstat_delete(kstat_t *ksp)
+{}
+
+/*
+ * =========================================================================
+ * mutexes
+ * =========================================================================
+ */
+void
+zmutex_init(kmutex_t *mp)
+{
+	mp->m_owner = NULL;
+	mp->initialized = B_TRUE;
+	(void) _mutex_init(&mp->m_lock, USYNC_THREAD, NULL);
+}
+
+void
+zmutex_destroy(kmutex_t *mp)
+{
+	ASSERT(mp->initialized == B_TRUE);
+	ASSERT(mp->m_owner == NULL);
+	(void) _mutex_destroy(&(mp)->m_lock);
+	mp->m_owner = (void *)-1UL;
+	mp->initialized = B_FALSE;
+}
+
+int
+zmutex_owned(kmutex_t *mp)
+{
+	ASSERT(mp->initialized == B_TRUE);
+
+	return (mp->m_owner == curthread);
+}
+
+void
+mutex_enter(kmutex_t *mp)
+{
+	ASSERT(mp->initialized == B_TRUE);
+	ASSERT(mp->m_owner != (void *)-1UL);
+	ASSERT(mp->m_owner != curthread);
+	VERIFY(mutex_lock(&mp->m_lock) == 0);
+	ASSERT(mp->m_owner == NULL);
+	mp->m_owner = curthread;
+}
+
+int
+mutex_tryenter(kmutex_t *mp)
+{
+	ASSERT(mp->initialized == B_TRUE);
+	ASSERT(mp->m_owner != (void *)-1UL);
+	if (0 == mutex_trylock(&mp->m_lock)) {
+		ASSERT(mp->m_owner == NULL);
+		mp->m_owner = curthread;
+		return (1);
+	} else {
+		return (0);
+	}
+}
+
+void
+mutex_exit(kmutex_t *mp)
+{
+	ASSERT(mp->initialized == B_TRUE);
+	ASSERT(mutex_owner(mp) == curthread);
+	mp->m_owner = NULL;
+	VERIFY(mutex_unlock(&mp->m_lock) == 0);
+}
+
+void *
+mutex_owner(kmutex_t *mp)
+{
+	ASSERT(mp->initialized == B_TRUE);
+	return (mp->m_owner);
+}
+
+/*
+ * =========================================================================
+ * rwlocks
+ * =========================================================================
+ */
+/*ARGSUSED*/
+void
+rw_init(krwlock_t *rwlp, char *name, int type, void *arg)
+{
+	rwlock_init(&rwlp->rw_lock, USYNC_THREAD, NULL);
+	rwlp->rw_owner = NULL;
+	rwlp->initialized = B_TRUE;
+	rwlp->rw_count = 0;
+}
+
+void
+rw_destroy(krwlock_t *rwlp)
+{
+	ASSERT(rwlp->rw_count == 0);
+	rwlock_destroy(&rwlp->rw_lock);
+	rwlp->rw_owner = (void *)-1UL;
+	rwlp->initialized = B_FALSE;
+}
+
+void
+rw_enter(krwlock_t *rwlp, krw_t rw)
+{
+	//ASSERT(!RW_LOCK_HELD(rwlp));
+	ASSERT(rwlp->initialized == B_TRUE);
+	ASSERT(rwlp->rw_owner != (void *)-1UL);
+	ASSERT(rwlp->rw_owner != curthread);
+
+	if (rw == RW_READER) {
+		VERIFY(rw_rdlock(&rwlp->rw_lock) == 0);
+		ASSERT(rwlp->rw_count >= 0);
+		atomic_add_int(&rwlp->rw_count, 1);
+	} else {
+		VERIFY(rw_wrlock(&rwlp->rw_lock) == 0);
+		ASSERT(rwlp->rw_count == 0);
+		rwlp->rw_count = -1;
+		rwlp->rw_owner = curthread;
+	}
+}
+
+void
+rw_exit(krwlock_t *rwlp)
+{
+	ASSERT(rwlp->initialized == B_TRUE);
+	ASSERT(rwlp->rw_owner != (void *)-1UL);
+
+	if (rwlp->rw_owner == curthread) {
+		/* Write locked. */
+		ASSERT(rwlp->rw_count == -1);
+		rwlp->rw_count = 0;
+		rwlp->rw_owner = NULL;
+	} else {
+		/* Read locked. */
+		ASSERT(rwlp->rw_count > 0);
+		atomic_add_int(&rwlp->rw_count, -1);
+	}
+	VERIFY(rw_unlock(&rwlp->rw_lock) == 0);
+}
+
+int
+rw_tryenter(krwlock_t *rwlp, krw_t rw)
+{
+	int rv;
+
+	ASSERT(rwlp->initialized == B_TRUE);
+	ASSERT(rwlp->rw_owner != (void *)-1UL);
+	ASSERT(rwlp->rw_owner != curthread);
+
+	if (rw == RW_READER)
+		rv = rw_tryrdlock(&rwlp->rw_lock);
+	else
+		rv = rw_trywrlock(&rwlp->rw_lock);
+
+	if (rv == 0) {
+		ASSERT(rwlp->rw_owner == NULL);
+		if (rw == RW_READER) {
+			ASSERT(rwlp->rw_count >= 0);
+			atomic_add_int(&rwlp->rw_count, 1);
+		} else {
+			ASSERT(rwlp->rw_count == 0);
+			rwlp->rw_count = -1;
+			rwlp->rw_owner = curthread;
+		}
+		return (1);
+	}
+
+	return (0);
+}
+
+/*ARGSUSED*/
+int
+rw_tryupgrade(krwlock_t *rwlp)
+{
+	ASSERT(rwlp->initialized == B_TRUE);
+	ASSERT(rwlp->rw_owner != (void *)-1UL);
+
+	return (0);
+}
+
+int
+rw_lock_held(krwlock_t *rwlp)
+{
+
+	return (rwlp->rw_count != 0);
+}
+
+/*
+ * =========================================================================
+ * condition variables
+ * =========================================================================
+ */
+/*ARGSUSED*/
+void
+cv_init(kcondvar_t *cv, char *name, int type, void *arg)
+{
+	VERIFY(cond_init(cv, name, NULL) == 0);
+}
+
+void
+cv_destroy(kcondvar_t *cv)
+{
+	VERIFY(cond_destroy(cv) == 0);
+}
+
+void
+cv_wait(kcondvar_t *cv, kmutex_t *mp)
+{
+	ASSERT(mutex_owner(mp) == curthread);
+	mp->m_owner = NULL;
+	int ret = cond_wait(cv, &mp->m_lock);
+	VERIFY(ret == 0 || ret == EINTR);
+	mp->m_owner = curthread;
+}
+
+clock_t
+cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime)
+{
+	int error;
+	struct timespec ts;
+	struct timeval tv;
+	clock_t delta;
+
+	abstime += ddi_get_lbolt();
+top:
+	delta = abstime - ddi_get_lbolt();
+	if (delta <= 0)
+		return (-1);
+
+	if (gettimeofday(&tv, NULL) != 0)
+		assert(!"gettimeofday() failed");
+
+	ts.tv_sec = tv.tv_sec + delta / hz;
+	ts.tv_nsec = tv.tv_usec * 1000 + (delta % hz) * (NANOSEC / hz);
+	ASSERT(ts.tv_nsec >= 0);
+
+	if (ts.tv_nsec >= NANOSEC) {
+		ts.tv_sec++;
+		ts.tv_nsec -= NANOSEC;
+	}
+
+	ASSERT(mutex_owner(mp) == curthread);
+	mp->m_owner = NULL;
+	error = pthread_cond_timedwait(cv, &mp->m_lock, &ts);
+	mp->m_owner = curthread;
+
+	if (error == EINTR)
+		goto top;
+
+	if (error == ETIMEDOUT)
+		return (-1);
+
+	ASSERT(error == 0);
+
+	return (1);
+}
+
+void
+cv_signal(kcondvar_t *cv)
+{
+	VERIFY(cond_signal(cv) == 0);
+}
+
+void
+cv_broadcast(kcondvar_t *cv)
+{
+	VERIFY(cond_broadcast(cv) == 0);
+}
+
+/*
+ * =========================================================================
+ * vnode operations
+ * =========================================================================
+ */
+/*
+ * Note: for the xxxat() versions of these functions, we assume that the
+ * starting vp is always rootdir (which is true for spa_directory.c, the only
+ * ZFS consumer of these interfaces).  We assert this is true, and then emulate
+ * them by adding '/' in front of the path.
+ */
+
+/*ARGSUSED*/
+int
+vn_open(char *path, int x1, int flags, int mode, vnode_t **vpp, int x2, int x3)
+{
+	int fd;
+	vnode_t *vp;
+	int old_umask;
+	char realpath[MAXPATHLEN];
+	struct stat64 st;
+
+	/*
+	 * If we're accessing a real disk from userland, we need to use
+	 * the character interface to avoid caching.  This is particularly
+	 * important if we're trying to look at a real in-kernel storage
+	 * pool from userland, e.g. via zdb, because otherwise we won't
+	 * see the changes occurring under the segmap cache.
+	 * On the other hand, the stupid character device returns zero
+	 * for its size.  So -- gag -- we open the block device to get
+	 * its size, and remember it for subsequent VOP_GETATTR().
+	 */
+	if (strncmp(path, "/dev/", 5) == 0) {
+		char *dsk;
+		fd = open64(path, O_RDONLY);
+		if (fd == -1)
+			return (errno);
+		if (fstat64(fd, &st) == -1) {
+			close(fd);
+			return (errno);
+		}
+		close(fd);
+		(void) sprintf(realpath, "%s", path);
+		dsk = strstr(path, "/dsk/");
+		if (dsk != NULL)
+			(void) sprintf(realpath + (dsk - path) + 1, "r%s",
+			    dsk + 1);
+	} else {
+		(void) sprintf(realpath, "%s", path);
+		if (!(flags & FCREAT) && stat64(realpath, &st) == -1)
+			return (errno);
+	}
+
+	if (flags & FCREAT)
+		old_umask = umask(0);
+
+	/*
+	 * The construct 'flags - FREAD' conveniently maps combinations of
+	 * FREAD and FWRITE to the corresponding O_RDONLY, O_WRONLY, and O_RDWR.
+	 */
+	fd = open64(realpath, flags - FREAD, mode);
+
+	if (flags & FCREAT)
+		(void) umask(old_umask);
+
+	if (fd == -1)
+		return (errno);
+
+	if (fstat64(fd, &st) == -1) {
+		close(fd);
+		return (errno);
+	}
+
+	(void) fcntl(fd, F_SETFD, FD_CLOEXEC);
+
+	*vpp = vp = umem_zalloc(sizeof (vnode_t), UMEM_NOFAIL);
+
+	vp->v_fd = fd;
+	vp->v_size = st.st_size;
+	vp->v_path = spa_strdup(path);
+
+	return (0);
+}
+
+/*ARGSUSED*/
+int
+vn_openat(char *path, int x1, int flags, int mode, vnode_t **vpp, int x2,
+    int x3, vnode_t *startvp, int fd)
+{
+	char *realpath = umem_alloc(strlen(path) + 2, UMEM_NOFAIL);
+	int ret;
+
+	ASSERT(startvp == rootdir);
+	(void) sprintf(realpath, "/%s", path);
+
+	/* fd ignored for now, need if want to simulate nbmand support */
+	ret = vn_open(realpath, x1, flags, mode, vpp, x2, x3);
+
+	umem_free(realpath, strlen(path) + 2);
+
+	return (ret);
+}
+
+/*ARGSUSED*/
+int
+vn_rdwr(int uio, vnode_t *vp, void *addr, ssize_t len, offset_t offset,
+	int x1, int x2, rlim64_t x3, void *x4, ssize_t *residp)
+{
+	ssize_t iolen, split;
+
+	if (uio == UIO_READ) {
+		iolen = pread64(vp->v_fd, addr, len, offset);
+	} else {
+		/*
+		 * To simulate partial disk writes, we split writes into two
+		 * system calls so that the process can be killed in between.
+		 */
+		int sectors = len >> SPA_MINBLOCKSHIFT;
+		split = (sectors > 0 ? rand() % sectors : 0) <<
+		    SPA_MINBLOCKSHIFT;
+		iolen = pwrite64(vp->v_fd, addr, split, offset);
+		iolen += pwrite64(vp->v_fd, (char *)addr + split,
+		    len - split, offset + split);
+	}
+
+	if (iolen == -1)
+		return (errno);
+	if (residp)
+		*residp = len - iolen;
+	else if (iolen != len)
+		return (EIO);
+	return (0);
+}
+
+void
+vn_close(vnode_t *vp, int openflag, cred_t *cr, kthread_t *td)
+{
+	close(vp->v_fd);
+	spa_strfree(vp->v_path);
+	umem_free(vp, sizeof (vnode_t));
+}
+
+/*
+ * At a minimum we need to update the size since vdev_reopen()
+ * will no longer call vn_openat().
+ */
+int
+fop_getattr(vnode_t *vp, vattr_t *vap)
+{
+	struct stat64 st;
+
+	if (fstat64(vp->v_fd, &st) == -1) {
+		close(vp->v_fd);
+		return (errno);
+	}
+
+	vap->va_size = st.st_size;
+	return (0);
+}
+
+#ifdef ZFS_DEBUG
+
+/*
+ * =========================================================================
+ * Figure out which debugging statements to print
+ * =========================================================================
+ */
+
+static char *dprintf_string;
+static int dprintf_print_all;
+
+int
+dprintf_find_string(const char *string)
+{
+	char *tmp_str = dprintf_string;
+	int len = strlen(string);
+
+	/*
+	 * Find out if this is a string we want to print.
+	 * String format: file1.c,function_name1,file2.c,file3.c
+	 */
+
+	while (tmp_str != NULL) {
+		if (strncmp(tmp_str, string, len) == 0 &&
+		    (tmp_str[len] == ',' || tmp_str[len] == '\0'))
+			return (1);
+		tmp_str = strchr(tmp_str, ',');
+		if (tmp_str != NULL)
+			tmp_str++; /* Get rid of , */
+	}
+	return (0);
+}
+
+void
+dprintf_setup(int *argc, char **argv)
+{
+	int i, j;
+
+	/*
+	 * Debugging can be specified two ways: by setting the
+	 * environment variable ZFS_DEBUG, or by including a
+	 * "debug=..."  argument on the command line.  The command
+	 * line setting overrides the environment variable.
+	 */
+
+	for (i = 1; i < *argc; i++) {
+		int len = strlen("debug=");
+		/* First look for a command line argument */
+		if (strncmp("debug=", argv[i], len) == 0) {
+			dprintf_string = argv[i] + len;
+			/* Remove from args */
+			for (j = i; j < *argc; j++)
+				argv[j] = argv[j+1];
+			argv[j] = NULL;
+			(*argc)--;
+		}
+	}
+
+	if (dprintf_string == NULL) {
+		/* Look for ZFS_DEBUG environment variable */
+		dprintf_string = getenv("ZFS_DEBUG");
+	}
+
+	/*
+	 * Are we just turning on all debugging?
+	 */
+	if (dprintf_find_string("on"))
+		dprintf_print_all = 1;
+}
+
+/*
+ * =========================================================================
+ * debug printfs
+ * =========================================================================
+ */
+void
+__dprintf(const char *file, const char *func, int line, const char *fmt, ...)
+{
+	const char *newfile;
+	va_list adx;
+
+	/*
+	 * Get rid of annoying "../common/" prefix to filename.
+	 */
+	newfile = strrchr(file, '/');
+	if (newfile != NULL) {
+		newfile = newfile + 1; /* Get rid of leading / */
+	} else {
+		newfile = file;
+	}
+
+	if (dprintf_print_all ||
+	    dprintf_find_string(newfile) ||
+	    dprintf_find_string(func)) {
+		/* Print out just the function name if requested */
+		flockfile(stdout);
+		if (dprintf_find_string("pid"))
+			(void) printf("%d ", getpid());
+		if (dprintf_find_string("tid"))
+			(void) printf("%u ", thr_self());
+#if 0
+		if (dprintf_find_string("cpu"))
+			(void) printf("%u ", getcpuid());
+#endif
+		if (dprintf_find_string("time"))
+			(void) printf("%llu ", gethrtime());
+		if (dprintf_find_string("long"))
+			(void) printf("%s, line %d: ", newfile, line);
+		(void) printf("%s: ", func);
+		va_start(adx, fmt);
+		(void) vprintf(fmt, adx);
+		va_end(adx);
+		funlockfile(stdout);
+	}
+}
+
+#endif /* ZFS_DEBUG */
+
+/*
+ * =========================================================================
+ * cmn_err() and panic()
+ * =========================================================================
+ */
+static char ce_prefix[CE_IGNORE][10] = { "", "NOTICE: ", "WARNING: ", "" };
+static char ce_suffix[CE_IGNORE][2] = { "", "\n", "\n", "" };
+
+void
+vpanic(const char *fmt, va_list adx)
+{
+	(void) fprintf(stderr, "error: ");
+	(void) vfprintf(stderr, fmt, adx);
+	(void) fprintf(stderr, "\n");
+
+	abort();	/* think of it as a "user-level crash dump" */
+}
+
+void
+panic(const char *fmt, ...)
+{
+	va_list adx;
+
+	va_start(adx, fmt);
+	vpanic(fmt, adx);
+	va_end(adx);
+}
+
+void
+vcmn_err(int ce, const char *fmt, va_list adx)
+{
+	if (ce == CE_PANIC)
+		vpanic(fmt, adx);
+	if (ce != CE_NOTE) {	/* suppress noise in userland stress testing */
+		(void) fprintf(stderr, "%s", ce_prefix[ce]);
+		(void) vfprintf(stderr, fmt, adx);
+		(void) fprintf(stderr, "%s", ce_suffix[ce]);
+	}
+}
+
+/*PRINTFLIKE2*/
+void
+cmn_err(int ce, const char *fmt, ...)
+{
+	va_list adx;
+
+	va_start(adx, fmt);
+	vcmn_err(ce, fmt, adx);
+	va_end(adx);
+}
+
+/*
+ * =========================================================================
+ * kobj interfaces
+ * =========================================================================
+ */
+struct _buf *
+kobj_open_file(char *name)
+{
+	struct _buf *file;
+	vnode_t *vp;
+
+	/* set vp as the _fd field of the file */
+	if (vn_openat(name, UIO_SYSSPACE, FREAD, 0, &vp, 0, 0, rootdir,
+	    -1) != 0)
+		return ((void *)-1UL);
+
+	file = umem_zalloc(sizeof (struct _buf), UMEM_NOFAIL);
+	file->_fd = (intptr_t)vp;
+	return (file);
+}
+
+int
+kobj_read_file(struct _buf *file, char *buf, unsigned size, unsigned off)
+{
+	ssize_t resid;
+
+	vn_rdwr(UIO_READ, (vnode_t *)file->_fd, buf, size, (offset_t)off,
+	    UIO_SYSSPACE, 0, 0, 0, &resid);
+
+	return (size - resid);
+}
+
+void
+kobj_close_file(struct _buf *file)
+{
+	vn_close((vnode_t *)file->_fd, 0, NULL, NULL);
+	umem_free(file, sizeof (struct _buf));
+}
+
+int
+kobj_get_filesize(struct _buf *file, uint64_t *size)
+{
+	struct stat64 st;
+	vnode_t *vp = (vnode_t *)file->_fd;
+
+	if (fstat64(vp->v_fd, &st) == -1) {
+		vn_close(vp, 0, NULL, NULL);
+		return (errno);
+	}
+	*size = st.st_size;
+	return (0);
+}
+
+/*
+ * =========================================================================
+ * misc routines
+ * =========================================================================
+ */
+
+void
+delay(clock_t ticks)
+{
+	poll(0, 0, ticks * (1000 / hz));
+}
+
+#if 0
+/*
+ * Find highest one bit set.
+ *	Returns bit number + 1 of highest bit that is set, otherwise returns 0.
+ * High order bit is 31 (or 63 in _LP64 kernel).
+ */
+int
+highbit(ulong_t i)
+{
+	register int h = 1;
+
+	if (i == 0)
+		return (0);
+#ifdef _LP64
+	if (i & 0xffffffff00000000ul) {
+		h += 32; i >>= 32;
+	}
+#endif
+	if (i & 0xffff0000) {
+		h += 16; i >>= 16;
+	}
+	if (i & 0xff00) {
+		h += 8; i >>= 8;
+	}
+	if (i & 0xf0) {
+		h += 4; i >>= 4;
+	}
+	if (i & 0xc) {
+		h += 2; i >>= 2;
+	}
+	if (i & 0x2) {
+		h += 1;
+	}
+	return (h);
+}
+#endif
+
+static int random_fd = -1, urandom_fd = -1;
+
+static int
+random_get_bytes_common(uint8_t *ptr, size_t len, int fd)
+{
+	size_t resid = len;
+	ssize_t bytes;
+
+	ASSERT(fd != -1);
+
+	while (resid != 0) {
+		bytes = read(fd, ptr, resid);
+		ASSERT3S(bytes, >=, 0);
+		ptr += bytes;
+		resid -= bytes;
+	}
+
+	return (0);
+}
+
+int
+random_get_bytes(uint8_t *ptr, size_t len)
+{
+	return (random_get_bytes_common(ptr, len, random_fd));
+}
+
+int
+random_get_pseudo_bytes(uint8_t *ptr, size_t len)
+{
+	return (random_get_bytes_common(ptr, len, urandom_fd));
+}
+
+int
+ddi_strtoul(const char *hw_serial, char **nptr, int base, unsigned long *result)
+{
+	char *end;
+
+	*result = strtoul(hw_serial, &end, base);
+	if (*result == 0)
+		return (errno);
+	return (0);
+}
+
+int
+ddi_strtoull(const char *str, char **nptr, int base, u_longlong_t *result)
+{
+	char *end;
+
+	*result = strtoull(str, &end, base);
+	if (*result == 0)
+		return (errno);
+	return (0);
+}
+
+/*
+ * =========================================================================
+ * kernel emulation setup & teardown
+ * =========================================================================
+ */
+static int
+umem_out_of_memory(void)
+{
+	char errmsg[] = "out of memory -- generating core dump\n";
+
+	write(fileno(stderr), errmsg, sizeof (errmsg));
+	abort();
+	return (0);
+}
+
+void
+kernel_init(int mode)
+{
+	umem_nofail_callback(umem_out_of_memory);
+
+	physmem = sysconf(_SC_PHYS_PAGES);
+
+	dprintf("physmem = %llu pages (%.2f GB)\n", physmem,
+	    (double)physmem * sysconf(_SC_PAGE_SIZE) / (1ULL << 30));
+
+	(void) snprintf(hw_serial, sizeof (hw_serial), "%lu",
+	    (mode & FWRITE) ? (unsigned long)gethostid() : 0);
+
+	VERIFY((random_fd = open("/dev/random", O_RDONLY)) != -1);
+	VERIFY((urandom_fd = open("/dev/urandom", O_RDONLY)) != -1);
+
+	system_taskq_init();
+
+	spa_init(mode);
+}
+
+void
+kernel_fini(void)
+{
+	spa_fini();
+
+	system_taskq_fini();
+
+	close(random_fd);
+	close(urandom_fd);
+
+	random_fd = -1;
+	urandom_fd = -1;
+}
+
+int
+z_uncompress(void *dst, size_t *dstlen, const void *src, size_t srclen)
+{
+	int ret;
+	uLongf len = *dstlen;
+
+	if ((ret = uncompress(dst, &len, src, srclen)) == Z_OK)
+		*dstlen = (size_t)len;
+
+	return (ret);
+}
+
+int
+z_compress_level(void *dst, size_t *dstlen, const void *src, size_t srclen,
+    int level)
+{
+	int ret;
+	uLongf len = *dstlen;
+
+	if ((ret = compress2(dst, &len, src, srclen, level)) == Z_OK)
+		*dstlen = (size_t)len;
+
+	return (ret);
+}
+
+uid_t
+crgetuid(cred_t *cr)
+{
+	return (0);
+}
+
+gid_t
+crgetgid(cred_t *cr)
+{
+	return (0);
+}
+
+int
+crgetngroups(cred_t *cr)
+{
+	return (0);
+}
+
+gid_t *
+crgetgroups(cred_t *cr)
+{
+	return (NULL);
+}
+
+int
+zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr)
+{
+	return (0);
+}
+
+int
+zfs_secpolicy_rename_perms(const char *from, const char *to, cred_t *cr)
+{
+	return (0);
+}
+
+int
+zfs_secpolicy_destroy_perms(const char *name, cred_t *cr)
+{
+	return (0);
+}
+
+ksiddomain_t *
+ksid_lookupdomain(const char *dom)
+{
+	ksiddomain_t *kd;
+
+	kd = umem_zalloc(sizeof (ksiddomain_t), UMEM_NOFAIL);
+	kd->kd_name = spa_strdup(dom);
+	return (kd);
+}
+
+void
+ksiddomain_rele(ksiddomain_t *ksid)
+{
+	spa_strfree(ksid->kd_name);
+	umem_free(ksid, sizeof (ksiddomain_t));
+}
+
+/*
+ * Do not change the length of the returned string; it must be freed
+ * with strfree().
+ */
+char *
+kmem_asprintf(const char *fmt, ...)
+{
+	int size;
+	va_list adx;
+	char *buf;
+
+	va_start(adx, fmt);
+	size = vsnprintf(NULL, 0, fmt, adx) + 1;
+	va_end(adx);
+
+	buf = kmem_alloc(size, KM_SLEEP);
+
+	va_start(adx, fmt);
+	size = vsnprintf(buf, size, fmt, adx);
+	va_end(adx);
+
+	return (buf);
+}
+
+/* ARGSUSED */
+int
+zfs_onexit_fd_hold(int fd, minor_t *minorp)
+{
+	*minorp = 0;
+	return (0);
+}
+
+/* ARGSUSED */
+void
+zfs_onexit_fd_rele(int fd)
+{
+}
+
+/* ARGSUSED */
+int
+zfs_onexit_add_cb(minor_t minor, void (*func)(void *), void *data,
+    uint64_t *action_handle)
+{
+	return (0);
+}
+
+/* ARGSUSED */
+int
+zfs_onexit_del_cb(minor_t minor, uint64_t action_handle, boolean_t fire)
+{
+	return (0);
+}
+
+/* ARGSUSED */
+int
+zfs_onexit_cb_data(minor_t minor, uint64_t action_handle, void **data)
+{
+	return (0);
+}
+
+#ifdef __FreeBSD__
+/* ARGSUSED */
+int
+zvol_create_minors(const char *name)
+{
+	return (0);
+}
+#endif
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzpool/common/sys/zfs_context.h b/bsd/cddl/contrib/opensolaris/lib/libzpool/common/sys/zfs_context.h
new file mode 100644
index 0000000000000000000000000000000000000000..11fb68156edc852ecdc3b6c84ada8a0d5fa5c69a
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzpool/common/sys/zfs_context.h
@@ -0,0 +1,631 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, Joyent, Inc. All rights reserved.
+ */
+
+#ifndef _SYS_ZFS_CONTEXT_H
+#define	_SYS_ZFS_CONTEXT_H
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+#define	_SYS_MUTEX_H
+#define	_SYS_RWLOCK_H
+#define	_SYS_CONDVAR_H
+#define	_SYS_SYSTM_H
+#define	_SYS_T_LOCK_H
+#define	_SYS_VNODE_H
+#define	_SYS_VFS_H
+#define	_SYS_SUNDDI_H
+#define	_SYS_CALLB_H
+#define	_SYS_SCHED_H_
+
+#include <solaris.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdarg.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <strings.h>
+#include <thread.h>
+#include <assert.h>
+#include <limits.h>
+#include <dirent.h>
+#include <time.h>
+#include <math.h>
+#include <umem.h>
+#include <inttypes.h>
+#include <fsshare.h>
+#include <sys/note.h>
+#include <sys/types.h>
+#include <sys/cred.h>
+#include <sys/atomic.h>
+#include <sys/sysmacros.h>
+#include <sys/bitmap.h>
+#include <sys/resource.h>
+#include <sys/byteorder.h>
+#include <sys/list.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <sys/mntent.h>
+#include <sys/mnttab.h>
+#include <sys/zfs_debug.h>
+#include <sys/sdt.h>
+#include <sys/kstat.h>
+#include <sys/u8_textprep.h>
+#include <sys/kernel.h>
+#include <sys/disk.h>
+#include <sys/sysevent.h>
+#include <sys/sysevent/eventdefs.h>
+#include <sys/sysevent/dev.h>
+#include <machine/atomic.h>
+#include <sys/debug.h>
+
+#define	ZFS_EXPORTS_PATH	"/etc/zfs/exports"
+
+/*
+ * Debugging
+ */
+
+/*
+ * Note that we are not using the debugging levels.
+ */
+
+#define	CE_CONT		0	/* continuation		*/
+#define	CE_NOTE		1	/* notice		*/
+#define	CE_WARN		2	/* warning		*/
+#define	CE_PANIC	3	/* panic		*/
+#define	CE_IGNORE	4	/* print nothing	*/
+
+/*
+ * ZFS debugging
+ */
+
+#define	ZFS_LOG(...)	do {  } while (0)
+
+typedef u_longlong_t      rlim64_t;
+#define	RLIM64_INFINITY	((rlim64_t)-3)
+
+#ifdef ZFS_DEBUG
+extern void dprintf_setup(int *argc, char **argv);
+#endif /* ZFS_DEBUG */
+
+extern void cmn_err(int, const char *, ...);
+extern void vcmn_err(int, const char *, __va_list);
+extern void panic(const char *, ...);
+extern void vpanic(const char *, __va_list);
+
+#define	fm_panic	panic
+
+extern int aok;
+
+/*
+ * DTrace SDT probes have different signatures in userland than they do in
+ * kernel.  If they're being used in kernel code, re-define them out of
+ * existence for their counterparts in libzpool.
+ */
+
+#ifdef DTRACE_PROBE
+#undef	DTRACE_PROBE
+#define	DTRACE_PROBE(a)	((void)0)
+#endif	/* DTRACE_PROBE */
+
+#ifdef DTRACE_PROBE1
+#undef	DTRACE_PROBE1
+#define	DTRACE_PROBE1(a, b, c)	((void)0)
+#endif	/* DTRACE_PROBE1 */
+
+#ifdef DTRACE_PROBE2
+#undef	DTRACE_PROBE2
+#define	DTRACE_PROBE2(a, b, c, d, e)	((void)0)
+#endif	/* DTRACE_PROBE2 */
+
+#ifdef DTRACE_PROBE3
+#undef	DTRACE_PROBE3
+#define	DTRACE_PROBE3(a, b, c, d, e, f, g)	((void)0)
+#endif	/* DTRACE_PROBE3 */
+
+#ifdef DTRACE_PROBE4
+#undef	DTRACE_PROBE4
+#define	DTRACE_PROBE4(a, b, c, d, e, f, g, h, i)	((void)0)
+#endif	/* DTRACE_PROBE4 */
+
+/*
+ * Threads
+ */
+#define	curthread	((void *)(uintptr_t)thr_self())
+
+typedef struct kthread kthread_t;
+
+#define	thread_create(stk, stksize, func, arg, len, pp, state, pri)	\
+	zk_thread_create(func, arg)
+#define	thread_exit() thr_exit(NULL)
+#define	thread_join(t)	panic("libzpool cannot join threads")
+
+#define	newproc(f, a, cid, pri, ctp, pid)	(ENOSYS)
+
+/* in libzpool, p0 exists only to have its address taken */
+struct proc {
+	uintptr_t	this_is_never_used_dont_dereference_it;
+};
+
+extern struct proc p0;
+#define	curproc		(&p0)
+
+#define	PS_NONE		-1
+
+extern kthread_t *zk_thread_create(void (*func)(), void *arg);
+
+#define	issig(why)	(FALSE)
+#define	ISSIG(thr, why)	(FALSE)
+
+/*
+ * Mutexes
+ */
+typedef struct kmutex {
+	void		*m_owner;
+	boolean_t	initialized;
+	mutex_t		m_lock;
+} kmutex_t;
+
+#define	MUTEX_DEFAULT	USYNC_THREAD
+#undef	MUTEX_HELD
+#undef	MUTEX_NOT_HELD
+#define	MUTEX_HELD(m)	((m)->m_owner == curthread)
+#define	MUTEX_NOT_HELD(m) (!MUTEX_HELD(m))
+#define	_mutex_held(m)	pthread_mutex_isowned_np(m)
+
+/*
+ * Argh -- we have to get cheesy here because the kernel and userland
+ * have different signatures for the same routine.
+ */
+//extern int _mutex_init(mutex_t *mp, int type, void *arg);
+//extern int _mutex_destroy(mutex_t *mp);
+//extern int _mutex_owned(mutex_t *mp);
+
+#define	mutex_init(mp, b, c, d)		zmutex_init((kmutex_t *)(mp))
+#define	mutex_destroy(mp)		zmutex_destroy((kmutex_t *)(mp))
+#define	mutex_owned(mp)			zmutex_owned((kmutex_t *)(mp))
+
+extern void zmutex_init(kmutex_t *mp);
+extern void zmutex_destroy(kmutex_t *mp);
+extern int zmutex_owned(kmutex_t *mp);
+extern void mutex_enter(kmutex_t *mp);
+extern void mutex_exit(kmutex_t *mp);
+extern int mutex_tryenter(kmutex_t *mp);
+extern void *mutex_owner(kmutex_t *mp);
+
+/*
+ * RW locks
+ */
+typedef struct krwlock {
+	int		rw_count;
+	void		*rw_owner;
+	boolean_t	initialized;
+	rwlock_t	rw_lock;
+} krwlock_t;
+
+typedef int krw_t;
+
+#define	RW_READER	0
+#define	RW_WRITER	1
+#define	RW_DEFAULT	USYNC_THREAD
+
+#undef RW_READ_HELD
+#define RW_READ_HELD(x)		((x)->rw_owner == NULL && (x)->rw_count > 0)
+
+#undef RW_WRITE_HELD
+#define	RW_WRITE_HELD(x)	((x)->rw_owner == curthread)
+#define	RW_LOCK_HELD(x)		rw_lock_held(x)
+
+extern void rw_init(krwlock_t *rwlp, char *name, int type, void *arg);
+extern void rw_destroy(krwlock_t *rwlp);
+extern void rw_enter(krwlock_t *rwlp, krw_t rw);
+extern int rw_tryenter(krwlock_t *rwlp, krw_t rw);
+extern int rw_tryupgrade(krwlock_t *rwlp);
+extern void rw_exit(krwlock_t *rwlp);
+extern int rw_lock_held(krwlock_t *rwlp);
+#define	rw_downgrade(rwlp) do { } while (0)
+
+extern uid_t crgetuid(cred_t *cr);
+extern gid_t crgetgid(cred_t *cr);
+extern int crgetngroups(cred_t *cr);
+extern gid_t *crgetgroups(cred_t *cr);
+
+/*
+ * Condition variables
+ */
+typedef cond_t kcondvar_t;
+
+#define	CV_DEFAULT	USYNC_THREAD
+
+extern void cv_init(kcondvar_t *cv, char *name, int type, void *arg);
+extern void cv_destroy(kcondvar_t *cv);
+extern void cv_wait(kcondvar_t *cv, kmutex_t *mp);
+extern clock_t cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime);
+extern void cv_signal(kcondvar_t *cv);
+extern void cv_broadcast(kcondvar_t *cv);
+
+/*
+ * Kernel memory
+ */
+#define	KM_SLEEP		UMEM_NOFAIL
+#define	KM_PUSHPAGE		KM_SLEEP
+#define	KM_NOSLEEP		UMEM_DEFAULT
+#define	KMC_NODEBUG		UMC_NODEBUG
+#define	KMC_NOTOUCH		0	/* not needed for userland caches */
+#define	KM_NODEBUG		0
+#define	kmem_alloc(_s, _f)	umem_alloc(_s, _f)
+#define	kmem_zalloc(_s, _f)	umem_zalloc(_s, _f)
+#define	kmem_free(_b, _s)	umem_free(_b, _s)
+#define	kmem_size()		(physmem * PAGESIZE)
+#define	kmem_cache_create(_a, _b, _c, _d, _e, _f, _g, _h, _i) \
+	umem_cache_create(_a, _b, _c, _d, _e, _f, _g, _h, _i)
+#define	kmem_cache_destroy(_c)	umem_cache_destroy(_c)
+#define	kmem_cache_alloc(_c, _f) umem_cache_alloc(_c, _f)
+#define	kmem_cache_free(_c, _b)	umem_cache_free(_c, _b)
+#define	kmem_debugging()	0
+#define	kmem_cache_reap_now(_c)		/* nothing */
+#define	kmem_cache_set_move(_c, _cb)	/* nothing */
+#define	POINTER_INVALIDATE(_pp)		/* nothing */
+#define	POINTER_IS_VALID(_p)	0
+
+typedef umem_cache_t kmem_cache_t;
+
+typedef enum kmem_cbrc {
+	KMEM_CBRC_YES,
+	KMEM_CBRC_NO,
+	KMEM_CBRC_LATER,
+	KMEM_CBRC_DONT_NEED,
+	KMEM_CBRC_DONT_KNOW
+} kmem_cbrc_t;
+
+/*
+ * Task queues
+ */
+typedef struct taskq taskq_t;
+typedef uintptr_t taskqid_t;
+typedef void (task_func_t)(void *);
+
+#define	TASKQ_PREPOPULATE	0x0001
+#define	TASKQ_CPR_SAFE		0x0002	/* Use CPR safe protocol */
+#define	TASKQ_DYNAMIC		0x0004	/* Use dynamic thread scheduling */
+#define	TASKQ_THREADS_CPU_PCT	0x0008	/* Scale # threads by # cpus */
+#define	TASKQ_DC_BATCH		0x0010	/* Mark threads as batch */
+
+#define	TQ_SLEEP	KM_SLEEP	/* Can block for memory */
+#define	TQ_NOSLEEP	KM_NOSLEEP	/* cannot block for memory; may fail */
+#define	TQ_NOQUEUE	0x02		/* Do not enqueue if can't dispatch */
+#define	TQ_FRONT	0x08		/* Queue in front */
+
+extern taskq_t *system_taskq;
+
+extern taskq_t	*taskq_create(const char *, int, pri_t, int, int, uint_t);
+#define	taskq_create_proc(a, b, c, d, e, p, f) \
+	    (taskq_create(a, b, c, d, e, f))
+#define	taskq_create_sysdc(a, b, d, e, p, dc, f) \
+	    (taskq_create(a, b, maxclsyspri, d, e, f))
+extern taskqid_t taskq_dispatch(taskq_t *, task_func_t, void *, uint_t);
+extern void	taskq_destroy(taskq_t *);
+extern void	taskq_wait(taskq_t *);
+extern int	taskq_member(taskq_t *, void *);
+extern void	system_taskq_init(void);
+extern void	system_taskq_fini(void);
+
+#define	taskq_dispatch_safe(tq, func, arg, flags, task)			\
+	taskq_dispatch((tq), (func), (arg), (flags))
+
+#define	XVA_MAPSIZE	3
+#define	XVA_MAGIC	0x78766174
+
+/*
+ * vnodes
+ */
+typedef struct vnode {
+	uint64_t	v_size;
+	int		v_fd;
+	char		*v_path;
+} vnode_t;
+
+#define	AV_SCANSTAMP_SZ	32		/* length of anti-virus scanstamp */
+
+typedef struct xoptattr {
+	timestruc_t	xoa_createtime;	/* Create time of file */
+	uint8_t		xoa_archive;
+	uint8_t		xoa_system;
+	uint8_t		xoa_readonly;
+	uint8_t		xoa_hidden;
+	uint8_t		xoa_nounlink;
+	uint8_t		xoa_immutable;
+	uint8_t		xoa_appendonly;
+	uint8_t		xoa_nodump;
+	uint8_t		xoa_settable;
+	uint8_t		xoa_opaque;
+	uint8_t		xoa_av_quarantined;
+	uint8_t		xoa_av_modified;
+	uint8_t		xoa_av_scanstamp[AV_SCANSTAMP_SZ];
+	uint8_t		xoa_reparse;
+	uint8_t		xoa_offline;
+	uint8_t		xoa_sparse;
+} xoptattr_t;
+
+typedef struct vattr {
+	uint_t		va_mask;	/* bit-mask of attributes */
+	u_offset_t	va_size;	/* file size in bytes */
+} vattr_t;
+
+
+typedef struct xvattr {
+	vattr_t		xva_vattr;	/* Embedded vattr structure */
+	uint32_t	xva_magic;	/* Magic Number */
+	uint32_t	xva_mapsize;	/* Size of attr bitmap (32-bit words) */
+	uint32_t	*xva_rtnattrmapp;	/* Ptr to xva_rtnattrmap[] */
+	uint32_t	xva_reqattrmap[XVA_MAPSIZE];	/* Requested attrs */
+	uint32_t	xva_rtnattrmap[XVA_MAPSIZE];	/* Returned attrs */
+	xoptattr_t	xva_xoptattrs;	/* Optional attributes */
+} xvattr_t;
+
+typedef struct vsecattr {
+	uint_t		vsa_mask;	/* See below */
+	int		vsa_aclcnt;	/* ACL entry count */
+	void		*vsa_aclentp;	/* pointer to ACL entries */
+	int		vsa_dfaclcnt;	/* default ACL entry count */
+	void		*vsa_dfaclentp;	/* pointer to default ACL entries */
+	size_t		vsa_aclentsz;	/* ACE size in bytes of vsa_aclentp */
+} vsecattr_t;
+
+#define	AT_TYPE		0x00001
+#define	AT_MODE		0x00002
+#define	AT_UID		0x00004
+#define	AT_GID		0x00008
+#define	AT_FSID		0x00010
+#define	AT_NODEID	0x00020
+#define	AT_NLINK	0x00040
+#define	AT_SIZE		0x00080
+#define	AT_ATIME	0x00100
+#define	AT_MTIME	0x00200
+#define	AT_CTIME	0x00400
+#define	AT_RDEV		0x00800
+#define	AT_BLKSIZE	0x01000
+#define	AT_NBLOCKS	0x02000
+#define	AT_SEQ		0x08000
+#define	AT_XVATTR	0x10000
+
+#define	CRCREAT		0
+
+extern int fop_getattr(vnode_t *vp, vattr_t *vap);
+
+#define	VOP_CLOSE(vp, f, c, o, cr, ct)	0
+#define	VOP_PUTPAGE(vp, of, sz, fl, cr, ct)	0
+#define	VOP_GETATTR(vp, vap, cr)  fop_getattr((vp), (vap));
+
+#define	VOP_FSYNC(vp, f, cr, ct)	fsync((vp)->v_fd)
+
+#define	VN_RELE(vp)			vn_close(vp, 0, NULL, NULL)
+#define	VN_RELE_ASYNC(vp, taskq)	vn_close(vp, 0, NULL, NULL)
+
+#define	vn_lock(vp, type)
+#define	VOP_UNLOCK(vp, type)
+
+extern int vn_open(char *path, int x1, int oflags, int mode, vnode_t **vpp,
+    int x2, int x3);
+extern int vn_openat(char *path, int x1, int oflags, int mode, vnode_t **vpp,
+    int x2, int x3, vnode_t *vp, int fd);
+extern int vn_rdwr(int uio, vnode_t *vp, void *addr, ssize_t len,
+    offset_t offset, int x1, int x2, rlim64_t x3, void *x4, ssize_t *residp);
+extern void vn_close(vnode_t *vp, int openflag, cred_t *cr, kthread_t *td);
+
+#define	vn_remove(path, x1, x2)		remove(path)
+#define	vn_rename(from, to, seg)	rename((from), (to))
+#define	vn_is_readonly(vp)		B_FALSE
+
+extern vnode_t *rootdir;
+
+#include <sys/file.h>		/* for FREAD, FWRITE, etc */
+#define	FTRUNC	O_TRUNC
+
+/*
+ * Random stuff
+ */
+#define	ddi_get_lbolt()		(gethrtime() >> 23)
+#define	ddi_get_lbolt64()	(gethrtime() >> 23)
+#define	hz	119	/* frequency when using gethrtime() >> 23 for lbolt */
+
+extern void delay(clock_t ticks);
+
+#define	gethrestime_sec() time(NULL)
+#define	gethrestime(t) \
+	do {\
+		(t)->tv_sec = gethrestime_sec();\
+		(t)->tv_nsec = 0;\
+	} while (0);
+
+#define	max_ncpus	64
+
+#define	minclsyspri	60
+#define	maxclsyspri	99
+
+#define	CPU_SEQID	(thr_self() & (max_ncpus - 1))
+
+#define	kcred		NULL
+#define	CRED()		NULL
+
+#ifndef ptob
+#define	ptob(x)		((x) * PAGESIZE)
+#endif
+
+extern uint64_t physmem;
+
+extern int highbit(ulong_t i);
+extern int random_get_bytes(uint8_t *ptr, size_t len);
+extern int random_get_pseudo_bytes(uint8_t *ptr, size_t len);
+
+extern void kernel_init(int);
+extern void kernel_fini(void);
+
+struct spa;
+extern void nicenum(uint64_t num, char *buf);
+extern void show_pool_stats(struct spa *);
+
+typedef struct callb_cpr {
+	kmutex_t	*cc_lockp;
+} callb_cpr_t;
+
+#define	CALLB_CPR_INIT(cp, lockp, func, name)	{		\
+	(cp)->cc_lockp = lockp;					\
+}
+
+#define	CALLB_CPR_SAFE_BEGIN(cp) {				\
+	ASSERT(MUTEX_HELD((cp)->cc_lockp));			\
+}
+
+#define	CALLB_CPR_SAFE_END(cp, lockp) {				\
+	ASSERT(MUTEX_HELD((cp)->cc_lockp));			\
+}
+
+#define	CALLB_CPR_EXIT(cp) {					\
+	ASSERT(MUTEX_HELD((cp)->cc_lockp));			\
+	mutex_exit((cp)->cc_lockp);				\
+}
+
+#define	zone_dataset_visible(x, y)	(1)
+#define	INGLOBALZONE(z)			(1)
+
+extern char *kmem_asprintf(const char *fmt, ...);
+#define	strfree(str) kmem_free((str), strlen(str)+1)
+
+/*
+ * Hostname information
+ */
+extern struct utsname utsname;
+extern char hw_serial[];	/* for userland-emulated hostid access */
+extern int ddi_strtoul(const char *str, char **nptr, int base,
+    unsigned long *result);
+
+extern int ddi_strtoull(const char *str, char **nptr, int base,
+    u_longlong_t *result);
+
+/* ZFS Boot Related stuff. */
+
+struct _buf {
+	intptr_t	_fd;
+};
+
+struct bootstat {
+	uint64_t st_size;
+};
+
+typedef struct ace_object {
+	uid_t		a_who;
+	uint32_t	a_access_mask;
+	uint16_t	a_flags;
+	uint16_t	a_type;
+	uint8_t		a_obj_type[16];
+	uint8_t		a_inherit_obj_type[16];
+} ace_object_t;
+
+
+#define	ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE	0x05
+#define	ACE_ACCESS_DENIED_OBJECT_ACE_TYPE	0x06
+#define	ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE	0x07
+#define	ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE	0x08
+
+extern struct _buf *kobj_open_file(char *name);
+extern int kobj_read_file(struct _buf *file, char *buf, unsigned size,
+    unsigned off);
+extern void kobj_close_file(struct _buf *file);
+extern int kobj_get_filesize(struct _buf *file, uint64_t *size);
+extern int zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr);
+extern int zfs_secpolicy_rename_perms(const char *from, const char *to,
+    cred_t *cr);
+extern int zfs_secpolicy_destroy_perms(const char *name, cred_t *cr);
+extern zoneid_t getzoneid(void);
+/* Random compatibility stuff. */
+#define	lbolt	(gethrtime() >> 23)
+#define	lbolt64	(gethrtime() >> 23)
+
+extern uint64_t physmem;
+
+#define	gethrestime_sec()	time(NULL)
+
+#define	pwrite64(d, p, n, o)	pwrite(d, p, n, o)
+#define	readdir64(d)		readdir(d)
+#define	SIGPENDING(td)		(0)
+#define	root_mount_wait()	do { } while (0)
+#define	root_mounted()		(1)
+
+struct file {
+	void *dummy;
+};
+
+#define	FCREAT	O_CREAT
+#define	FOFFMAX	0x0
+
+/* SID stuff */
+typedef struct ksiddomain {
+	uint_t	kd_ref;
+	uint_t	kd_len;
+	char	*kd_name;
+} ksiddomain_t;
+
+ksiddomain_t *ksid_lookupdomain(const char *);
+void ksiddomain_rele(ksiddomain_t *);
+
+typedef	uint32_t	idmap_rid_t;
+
+#define	DDI_SLEEP	KM_SLEEP
+#define	ddi_log_sysevent(_a, _b, _c, _d, _e, _f, _g)	(0)
+
+#define	SX_SYSINIT(name, lock, desc)
+
+#define	SYSCTL_DECL(...)
+#define	SYSCTL_NODE(...)
+#define	SYSCTL_INT(...)
+#define	SYSCTL_UINT(...)
+#define	SYSCTL_ULONG(...)
+#define	SYSCTL_QUAD(...)
+#define	SYSCTL_UQUAD(...)
+#ifdef TUNABLE_INT
+#undef TUNABLE_INT
+#undef TUNABLE_ULONG
+#undef TUNABLE_QUAD
+#endif
+#define	TUNABLE_INT(...)
+#define	TUNABLE_ULONG(...)
+#define	TUNABLE_QUAD(...)
+
+/* Errors */
+
+#ifndef	ERESTART
+#define	ERESTART	(-1)
+#endif
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* _SYS_ZFS_CONTEXT_H */
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzpool/common/taskq.c b/bsd/cddl/contrib/opensolaris/lib/libzpool/common/taskq.c
new file mode 100644
index 0000000000000000000000000000000000000000..c407bba09ccdf34f74d7c3d92dd88b99b5bb0d2d
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzpool/common/taskq.c
@@ -0,0 +1,303 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/zfs_context.h>
+
+int taskq_now;
+taskq_t *system_taskq;
+
+typedef struct task {
+	struct task	*task_next;
+	struct task	*task_prev;
+	task_func_t	*task_func;
+	void		*task_arg;
+} task_t;
+
+#define	TASKQ_ACTIVE	0x00010000
+
+struct taskq {
+	kmutex_t	tq_lock;
+	krwlock_t	tq_threadlock;
+	kcondvar_t	tq_dispatch_cv;
+	kcondvar_t	tq_wait_cv;
+	thread_t	*tq_threadlist;
+	int		tq_flags;
+	int		tq_active;
+	int		tq_nthreads;
+	int		tq_nalloc;
+	int		tq_minalloc;
+	int		tq_maxalloc;
+	kcondvar_t	tq_maxalloc_cv;
+	int		tq_maxalloc_wait;
+	task_t		*tq_freelist;
+	task_t		tq_task;
+};
+
+static task_t *
+task_alloc(taskq_t *tq, int tqflags)
+{
+	task_t *t;
+	int rv;
+
+again:	if ((t = tq->tq_freelist) != NULL && tq->tq_nalloc >= tq->tq_minalloc) {
+		tq->tq_freelist = t->task_next;
+	} else {
+		if (tq->tq_nalloc >= tq->tq_maxalloc) {
+			if (!(tqflags & KM_SLEEP))
+				return (NULL);
+
+			/*
+			 * We don't want to exceed tq_maxalloc, but we can't
+			 * wait for other tasks to complete (and thus free up
+			 * task structures) without risking deadlock with
+			 * the caller.  So, we just delay for one second
+			 * to throttle the allocation rate. If we have tasks
+			 * complete before one second timeout expires then
+			 * taskq_ent_free will signal us and we will
+			 * immediately retry the allocation.
+			 */
+			tq->tq_maxalloc_wait++;
+			rv = cv_timedwait(&tq->tq_maxalloc_cv,
+			    &tq->tq_lock, ddi_get_lbolt() + hz);
+			tq->tq_maxalloc_wait--;
+			if (rv > 0)
+				goto again;		/* signaled */
+		}
+		mutex_exit(&tq->tq_lock);
+
+		t = kmem_alloc(sizeof (task_t), tqflags & KM_SLEEP);
+
+		mutex_enter(&tq->tq_lock);
+		if (t != NULL)
+			tq->tq_nalloc++;
+	}
+	return (t);
+}
+
+static void
+task_free(taskq_t *tq, task_t *t)
+{
+	if (tq->tq_nalloc <= tq->tq_minalloc) {
+		t->task_next = tq->tq_freelist;
+		tq->tq_freelist = t;
+	} else {
+		tq->tq_nalloc--;
+		mutex_exit(&tq->tq_lock);
+		kmem_free(t, sizeof (task_t));
+		mutex_enter(&tq->tq_lock);
+	}
+
+	if (tq->tq_maxalloc_wait)
+		cv_signal(&tq->tq_maxalloc_cv);
+}
+
+taskqid_t
+taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t tqflags)
+{
+	task_t *t;
+
+	if (taskq_now) {
+		func(arg);
+		return (1);
+	}
+
+	mutex_enter(&tq->tq_lock);
+	ASSERT(tq->tq_flags & TASKQ_ACTIVE);
+	if ((t = task_alloc(tq, tqflags)) == NULL) {
+		mutex_exit(&tq->tq_lock);
+		return (0);
+	}
+	if (tqflags & TQ_FRONT) {
+		t->task_next = tq->tq_task.task_next;
+		t->task_prev = &tq->tq_task;
+	} else {
+		t->task_next = &tq->tq_task;
+		t->task_prev = tq->tq_task.task_prev;
+	}
+	t->task_next->task_prev = t;
+	t->task_prev->task_next = t;
+	t->task_func = func;
+	t->task_arg = arg;
+	cv_signal(&tq->tq_dispatch_cv);
+	mutex_exit(&tq->tq_lock);
+	return (1);
+}
+
+void
+taskq_wait(taskq_t *tq)
+{
+	mutex_enter(&tq->tq_lock);
+	while (tq->tq_task.task_next != &tq->tq_task || tq->tq_active != 0)
+		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
+	mutex_exit(&tq->tq_lock);
+}
+
+static void *
+taskq_thread(void *arg)
+{
+	taskq_t *tq = arg;
+	task_t *t;
+
+	mutex_enter(&tq->tq_lock);
+	while (tq->tq_flags & TASKQ_ACTIVE) {
+		if ((t = tq->tq_task.task_next) == &tq->tq_task) {
+			if (--tq->tq_active == 0)
+				cv_broadcast(&tq->tq_wait_cv);
+			cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock);
+			tq->tq_active++;
+			continue;
+		}
+		t->task_prev->task_next = t->task_next;
+		t->task_next->task_prev = t->task_prev;
+		mutex_exit(&tq->tq_lock);
+
+		rw_enter(&tq->tq_threadlock, RW_READER);
+		t->task_func(t->task_arg);
+		rw_exit(&tq->tq_threadlock);
+
+		mutex_enter(&tq->tq_lock);
+		task_free(tq, t);
+	}
+	tq->tq_nthreads--;
+	cv_broadcast(&tq->tq_wait_cv);
+	mutex_exit(&tq->tq_lock);
+	return (NULL);
+}
+
+/*ARGSUSED*/
+taskq_t *
+taskq_create(const char *name, int nthreads, pri_t pri,
+	int minalloc, int maxalloc, uint_t flags)
+{
+	taskq_t *tq = kmem_zalloc(sizeof (taskq_t), KM_SLEEP);
+	int t;
+
+	if (flags & TASKQ_THREADS_CPU_PCT) {
+		int pct;
+		ASSERT3S(nthreads, >=, 0);
+		ASSERT3S(nthreads, <=, 100);
+		pct = MIN(nthreads, 100);
+		pct = MAX(pct, 0);
+
+		nthreads = (sysconf(_SC_NPROCESSORS_ONLN) * pct) / 100;
+		nthreads = MAX(nthreads, 1);	/* need at least 1 thread */
+	} else {
+		ASSERT3S(nthreads, >=, 1);
+	}
+
+	rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
+	mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
+	cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
+	cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
+	cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL);
+	tq->tq_flags = flags | TASKQ_ACTIVE;
+	tq->tq_active = nthreads;
+	tq->tq_nthreads = nthreads;
+	tq->tq_minalloc = minalloc;
+	tq->tq_maxalloc = maxalloc;
+	tq->tq_task.task_next = &tq->tq_task;
+	tq->tq_task.task_prev = &tq->tq_task;
+	tq->tq_threadlist = kmem_alloc(nthreads * sizeof (thread_t), KM_SLEEP);
+
+	if (flags & TASKQ_PREPOPULATE) {
+		mutex_enter(&tq->tq_lock);
+		while (minalloc-- > 0)
+			task_free(tq, task_alloc(tq, KM_SLEEP));
+		mutex_exit(&tq->tq_lock);
+	}
+
+	for (t = 0; t < nthreads; t++)
+		(void) thr_create(0, 0, taskq_thread,
+		    tq, THR_BOUND, &tq->tq_threadlist[t]);
+
+	return (tq);
+}
+
+void
+taskq_destroy(taskq_t *tq)
+{
+	int t;
+	int nthreads = tq->tq_nthreads;
+
+	taskq_wait(tq);
+
+	mutex_enter(&tq->tq_lock);
+
+	tq->tq_flags &= ~TASKQ_ACTIVE;
+	cv_broadcast(&tq->tq_dispatch_cv);
+
+	while (tq->tq_nthreads != 0)
+		cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
+
+	tq->tq_minalloc = 0;
+	while (tq->tq_nalloc != 0) {
+		ASSERT(tq->tq_freelist != NULL);
+		task_free(tq, task_alloc(tq, KM_SLEEP));
+	}
+
+	mutex_exit(&tq->tq_lock);
+
+	for (t = 0; t < nthreads; t++)
+		(void) thr_join(tq->tq_threadlist[t], NULL, NULL);
+
+	kmem_free(tq->tq_threadlist, nthreads * sizeof (thread_t));
+
+	rw_destroy(&tq->tq_threadlock);
+	mutex_destroy(&tq->tq_lock);
+	cv_destroy(&tq->tq_dispatch_cv);
+	cv_destroy(&tq->tq_wait_cv);
+	cv_destroy(&tq->tq_maxalloc_cv);
+
+	kmem_free(tq, sizeof (taskq_t));
+}
+
+int
+taskq_member(taskq_t *tq, void *t)
+{
+	int i;
+
+	if (taskq_now)
+		return (1);
+
+	for (i = 0; i < tq->tq_nthreads; i++)
+		if (tq->tq_threadlist[i] == (thread_t)(uintptr_t)t)
+			return (1);
+
+	return (0);
+}
+
+void
+system_taskq_init(void)
+{
+	system_taskq = taskq_create("system_taskq", 64, minclsyspri, 4, 512,
+	    TASKQ_DYNAMIC | TASKQ_PREPOPULATE);
+}
+
+void
+system_taskq_fini(void)
+{
+	taskq_destroy(system_taskq);
+	system_taskq = NULL; /* defensive */
+}
diff --git a/bsd/cddl/contrib/opensolaris/lib/libzpool/common/util.c b/bsd/cddl/contrib/opensolaris/lib/libzpool/common/util.c
new file mode 100644
index 0000000000000000000000000000000000000000..9b99531fd1c5290ade0620bb0994a78de2a8733d
--- /dev/null
+++ b/bsd/cddl/contrib/opensolaris/lib/libzpool/common/util.c
@@ -0,0 +1,155 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#include <assert.h>
+#include <sys/zfs_context.h>
+#include <sys/avl.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/spa.h>
+#include <sys/fs/zfs.h>
+#include <sys/refcount.h>
+
+/*
+ * Routines needed by more than one client of libzpool.
+ */
+
+void
+nicenum(uint64_t num, char *buf)
+{
+	uint64_t n = num;
+	int index = 0;
+	char u;
+
+	while (n >= 1024) {
+		n = (n + (1024 / 2)) / 1024; /* Round up or down */
+		index++;
+	}
+
+	u = " KMGTPE"[index];
+
+	if (index == 0) {
+		(void) sprintf(buf, "%llu", (u_longlong_t)n);
+	} else if (n < 10 && (num & (num - 1)) != 0) {
+		(void) sprintf(buf, "%.2f%c",
+		    (double)num / (1ULL << 10 * index), u);
+	} else if (n < 100 && (num & (num - 1)) != 0) {
+		(void) sprintf(buf, "%.1f%c",
+		    (double)num / (1ULL << 10 * index), u);
+	} else {
+		(void) sprintf(buf, "%llu%c", (u_longlong_t)n, u);
+	}
+}
+
+static void
+show_vdev_stats(const char *desc, const char *ctype, nvlist_t *nv, int indent)
+{
+	vdev_stat_t *vs;
+	vdev_stat_t v0 = { 0 };
+	uint64_t sec;
+	uint64_t is_log = 0;
+	nvlist_t **child;
+	uint_t c, children;
+	char used[6], avail[6];
+	char rops[6], wops[6], rbytes[6], wbytes[6], rerr[6], werr[6], cerr[6];
+	char *prefix = "";
+
+	if (indent == 0 && desc != NULL) {
+		(void) printf("                           "
+		    " capacity   operations   bandwidth  ---- errors ----\n");
+		(void) printf("description                "
+		    "used avail  read write  read write  read write cksum\n");
+	}
+
+	if (desc != NULL) {
+		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &is_log);
+
+		if (is_log)
+			prefix = "log ";
+
+		if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
+		    (uint64_t **)&vs, &c) != 0)
+			vs = &v0;
+
+		sec = MAX(1, vs->vs_timestamp / NANOSEC);
+
+		nicenum(vs->vs_alloc, used);
+		nicenum(vs->vs_space - vs->vs_alloc, avail);
+		nicenum(vs->vs_ops[ZIO_TYPE_READ] / sec, rops);
+		nicenum(vs->vs_ops[ZIO_TYPE_WRITE] / sec, wops);
+		nicenum(vs->vs_bytes[ZIO_TYPE_READ] / sec, rbytes);
+		nicenum(vs->vs_bytes[ZIO_TYPE_WRITE] / sec, wbytes);
+		nicenum(vs->vs_read_errors, rerr);
+		nicenum(vs->vs_write_errors, werr);
+		nicenum(vs->vs_checksum_errors, cerr);
+
+		(void) printf("%*s%s%*s%*s%*s %5s %5s %5s %5s %5s %5s %5s\n",
+		    indent, "",
+		    prefix,
+		    indent + strlen(prefix) - 25 - (vs->vs_space ? 0 : 12),
+		    desc,
+		    vs->vs_space ? 6 : 0, vs->vs_space ? used : "",
+		    vs->vs_space ? 6 : 0, vs->vs_space ? avail : "",
+		    rops, wops, rbytes, wbytes, rerr, werr, cerr);
+	}
+
+	if (nvlist_lookup_nvlist_array(nv, ctype, &child, &children) != 0)
+		return;
+
+	for (c = 0; c < children; c++) {
+		nvlist_t *cnv = child[c];
+		char *cname, *tname;
+		uint64_t np;
+		if (nvlist_lookup_string(cnv, ZPOOL_CONFIG_PATH, &cname) &&
+		    nvlist_lookup_string(cnv, ZPOOL_CONFIG_TYPE, &cname))
+			cname = "<unknown>";
+		tname = calloc(1, strlen(cname) + 2);
+		(void) strcpy(tname, cname);
+		if (nvlist_lookup_uint64(cnv, ZPOOL_CONFIG_NPARITY, &np) == 0)
+			tname[strlen(tname)] = '0' + np;
+		show_vdev_stats(tname, ctype, cnv, indent + 2);
+		free(tname);
+	}
+}
+
+void
+show_pool_stats(spa_t *spa)
+{
+	nvlist_t *config, *nvroot;
+	char *name;
+
+	VERIFY(spa_get_stats(spa_name(spa), &config, NULL, 0) == 0);
+
+	VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
+	    &nvroot) == 0);
+	VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
+	    &name) == 0);
+
+	show_vdev_stats(name, ZPOOL_CONFIG_CHILDREN, nvroot, 0);
+	show_vdev_stats(NULL, ZPOOL_CONFIG_L2CACHE, nvroot, 0);
+	show_vdev_stats(NULL, ZPOOL_CONFIG_SPARES, nvroot, 0);
+
+	nvlist_free(config);
+}
diff --git a/bsd/cddl/lib/libzfs/Makefile b/bsd/cddl/lib/libzfs/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..9e68da4c3733986a84e0a7aad9e757e5c5cd5f53
--- /dev/null
+++ b/bsd/cddl/lib/libzfs/Makefile
@@ -0,0 +1,58 @@
+# $FreeBSD$
+
+.PATH: ${.CURDIR}/../../../cddl/compat/opensolaris/misc
+.PATH: ${.CURDIR}/../../../sys/cddl/contrib/opensolaris/common/zfs
+.PATH: ${.CURDIR}/../../../sys/cddl/contrib/opensolaris/uts/common/fs/zfs
+.PATH: ${.CURDIR}/../../../cddl/contrib/opensolaris/lib/libzfs/common
+
+LIB=	zfs
+DPADD=	${LIBMD} ${LIBPTHREAD} ${LIBUMEM} ${LIBUTIL} ${LIBM} ${LIBNVPAIR}
+LDADD=	-lmd -lpthread -lumem -lutil -lm -lnvpair
+
+SRCS=	deviceid.c \
+	fsshare.c \
+	mkdirp.c \
+	mnttab.c \
+	zmount.c \
+	zone.c
+
+SRCS+=	libzfs_changelist.c \
+	libzfs_config.c \
+	libzfs_dataset.c \
+	libzfs_diff.c \
+	libzfs_import.c \
+	libzfs_iter.c \
+	libzfs_mount.c \
+	libzfs_pool.c \
+	libzfs_sendrecv.c \
+	libzfs_status.c \
+	libzfs_util.c \
+	zfeature_common.c \
+	zfs_comutil.c \
+	zfs_deleg.c \
+	zfs_fletcher.c \
+	zfs_ioctl_compat.c \
+	zfs_namecheck.c \
+	zfs_prop.c \
+	zpool_prop.c \
+	zprop_common.c \
+
+WARNS?=	0
+CSTD=	c99
+CFLAGS+= -DZFS_NO_ACL
+CFLAGS+= -I${.CURDIR}/../../../sbin/mount
+CFLAGS+= -I${.CURDIR}/../../../cddl/lib/libumem
+CFLAGS+= -I${.CURDIR}/../../../sys/cddl/compat/opensolaris
+CFLAGS+= -I${.CURDIR}/../../../cddl/compat/opensolaris/include
+CFLAGS+= -I${.CURDIR}/../../../cddl/compat/opensolaris/lib/libumem
+CFLAGS+= -I${.CURDIR}/../../../cddl/contrib/opensolaris/lib/libzpool/common
+CFLAGS+= -I${.CURDIR}/../../../sys/cddl/contrib/opensolaris/common/zfs
+CFLAGS+= -I${.CURDIR}/../../../sys/cddl/contrib/opensolaris/uts/common/fs/zfs
+CFLAGS+= -I${.CURDIR}/../../../sys/cddl/contrib/opensolaris/uts/common/sys
+CFLAGS+= -I${.CURDIR}/../../../cddl/contrib/opensolaris/head
+CFLAGS+= -I${.CURDIR}/../../../sys/cddl/contrib/opensolaris/uts/common
+CFLAGS+= -I${.CURDIR}/../../../cddl/contrib/opensolaris/lib/libnvpair
+CFLAGS+= -I${.CURDIR}/../../../cddl/contrib/opensolaris/lib/libuutil/common
+CFLAGS+= -I${.CURDIR}/../../../cddl/contrib/opensolaris/lib/libzfs/common
+
+.include <bsd.lib.mk>
diff --git a/bsd/cddl/lib/libzpool/Makefile b/bsd/cddl/lib/libzpool/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..b159d3af73117f374bd37e3658693a96ea2db089
--- /dev/null
+++ b/bsd/cddl/lib/libzpool/Makefile
@@ -0,0 +1,70 @@
+# $FreeBSD$
+
+.include "${.CURDIR}/../../../sys/cddl/contrib/opensolaris/uts/common/Makefile.files"
+
+# ZFS_COMMON_SRCS
+.PATH: ${.CURDIR}/../../../sys/cddl/contrib/opensolaris/uts/common/fs/zfs
+# ZFS_SHARED_SRCS
+.PATH: ${.CURDIR}/../../../sys/cddl/contrib/opensolaris/common/zfs
+# KERNEL_SRCS
+.PATH: ${.CURDIR}/../../../cddl/contrib/opensolaris/lib/libzpool/common
+# LIST_SRCS
+.PATH: ${.CURDIR}/../../../sys/cddl/contrib/opensolaris/uts/common/os
+# ATOMIC_SRCS
+.if exists(${.CURDIR}/../../../sys/cddl/contrib/opensolaris/common/atomic/${MACHINE_ARCH}/opensolaris_atomic.S)
+.PATH: ${.CURDIR}/../../../sys/cddl/contrib/opensolaris/common/atomic/${MACHINE_ARCH}
+ATOMIC_SRCS=	opensolaris_atomic.S
+.if ${MACHINE_ARCH} != "ia64" && ${MACHINE_ARCH} != "sparc64"
+ACFLAGS+=	-Wa,--noexecstack
+.endif
+.else
+.PATH: ${.CURDIR}/../../../sys/cddl/compat/opensolaris/kern
+ATOMIC_SRCS=	opensolaris_atomic.c
+.endif
+# UNICODE_SRCS
+.PATH: ${.CURDIR}/../../../sys/cddl/contrib/opensolaris/common/unicode
+
+LIB=		zpool
+
+ZFS_COMMON_SRCS= ${ZFS_COMMON_OBJS:C/.o$/.c/} vdev_file.c trim_map.c
+ZFS_SHARED_SRCS= ${ZFS_SHARED_OBJS:C/.o$/.c/}
+KERNEL_SRCS=	kernel.c taskq.c util.c
+LIST_SRCS=	list.c
+UNICODE_SRCS=	u8_textprep.c
+
+SRCS=		${ZFS_COMMON_SRCS} ${ZFS_SHARED_SRCS} \
+		${KERNEL_SRCS} ${LIST_SRCS} ${ATOMIC_SRCS} \
+		${UNICODE_SRCS}
+
+WARNS?=		0
+CFLAGS+=	-I${.CURDIR}/../../../sys/cddl/compat/opensolaris
+CFLAGS+=	-I${.CURDIR}/../../compat/opensolaris/include
+CFLAGS+=	-I${.CURDIR}/../../compat/opensolaris/lib/libumem
+CFLAGS+=	-I${.CURDIR}/../../contrib/opensolaris/lib/libzpool/common
+CFLAGS+=	-I${.CURDIR}/../../../sys/cddl/contrib/opensolaris/uts/common/sys
+CFLAGS+=	-I${.CURDIR}/../../../sys/cddl/contrib/opensolaris/uts/common/fs/zfs
+CFLAGS+=	-I${.CURDIR}/../../../sys/cddl/contrib/opensolaris/common/zfs
+CFLAGS+=	-I${.CURDIR}/../../../sys/cddl/contrib/opensolaris/uts/common
+CFLAGS+=	-I${.CURDIR}/../../contrib/opensolaris/head
+CFLAGS+=	-I${.CURDIR}/../../lib/libumem
+CFLAGS+=	-I${.CURDIR}/../../contrib/opensolaris/lib/libnvpair
+# XXX: pthread doesn't have mutex_owned() equivalent, so we need to look
+#      into libthr private structures. That's sooo evil, but it's only for
+#      ZFS debugging tools needs.
+CFLAGS+=	-DWANTS_MUTEX_OWNED
+CFLAGS+=	-I${.CURDIR}/../../../lib/libpthread/thread
+CFLAGS+=	-I${.CURDIR}/../../../lib/libpthread/sys
+CFLAGS+=	-I${.CURDIR}/../../../lib/libthr/arch/${MACHINE_CPUARCH}/include
+
+DPADD=		${LIBMD} ${LIBPTHREAD} ${LIBZ}
+LDADD=		-lmd -lpthread -lz
+
+# atomic.S doesn't like profiling.
+NO_PROFILE=
+
+CSTD=	c99
+
+CFLAGS+=	-DDEBUG=1
+#DEBUG_FLAGS+=	-g
+
+.include <bsd.lib.mk>