diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index e6bade11c859..9830e65abb54 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -2127,7 +2127,7 @@ int dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) { dnode_t *dn; - int err; + int restarted = 0, err; restart: err = dnode_hold(os, object, FTAG, &dn); @@ -2139,19 +2139,23 @@ dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) if (dnode_is_dirty(dn)) { /* * If the zfs_dmu_offset_next_sync module option is enabled - * then strict hole reporting has been requested. Dirty - * dnodes must be synced to disk to accurately report all - * holes. When disabled dirty dnodes are reported to not - * have any holes which is always safe. - * - * When called by zfs_holey_common() the zp->z_rangelock - * is held to prevent zfs_write() and mmap writeback from - * re-dirtying the dnode after txg_wait_synced(). + * then hole reporting has been requested. Dirty dnodes must + * be synced to disk to accurately report holes. When called + * by zfs_holey_common() the zp->z_rangelock is held to prevent + * zfs_write() and mmap writeback from re-dirtying the dnode + * after txg_wait_synced(). Regardless, if a dnode has been + * dirtied in consecutive txgs by another caller not holding + * the range lock disable hole reporting to avoid looping. */ if (zfs_dmu_offset_next_sync) { rw_exit(&dn->dn_struct_rwlock); dnode_rele(dn, FTAG); + + if (restarted) + return (SET_ERROR(EBUSY)); + txg_wait_synced(dmu_objset_pool(os), 0); + restarted = 1; goto restart; }