diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index c5ccd4cd1e0c..3892f2595921 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -2945,7 +2945,8 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) while (db->db_state == DB_READ || db->db_state == DB_FILL) cv_wait(&db->db_changed, &db->db_mtx); - ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); + ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED || + db->db_state == DB_NOFILL); if (db->db_state == DB_CACHED && zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { @@ -2982,7 +2983,17 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) arc_buf_destroy(db->db_buf, db); } db->db_buf = NULL; + } else if (db->db_state == DB_NOFILL) { + /* + * We will be completely replacing the cloned block. In case + * it was cloned in this transaction group, let's undirty the + * pending clone and mark the block as uncached. This will be + * as if the clone was never done. + */ + VERIFY(!dbuf_undirty(db, tx)); + db->db_state = DB_UNCACHED; } + ASSERT(db->db_buf == NULL); dbuf_set_data(db, buf); db->db_state = DB_FILL; diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run index 17ba23352422..8909305d338c 100644 --- a/tests/runfiles/linux.run +++ b/tests/runfiles/linux.run @@ -44,7 +44,8 @@ tests = ['block_cloning_copyfilerange', 'block_cloning_copyfilerange_partial', 'block_cloning_copyfilerange_cross_dataset', 'block_cloning_cross_enc_dataset', 'block_cloning_copyfilerange_fallback_same_txg', - 'block_cloning_replay', 'block_cloning_replay_encrypted'] + 'block_cloning_replay', 'block_cloning_replay_encrypted', + 'block_cloning_ficlone_and_write'] tags = ['functional', 'block_cloning'] [tests/functional/chattr:Linux] diff --git a/tests/test-runner/bin/zts-report.py.in b/tests/test-runner/bin/zts-report.py.in index 3b5eeacb6bad..273a5fb20490 100755 --- a/tests/test-runner/bin/zts-report.py.in +++ b/tests/test-runner/bin/zts-report.py.in @@ -311,6 +311,8 @@ elif sys.platform.startswith('linux'): ['SKIP', cfr_cross_reason], 'block_cloning/block_cloning_cross_enc_dataset': ['SKIP', cfr_cross_reason], + 'block_cloning/block_cloning_ficlone_and_write': + ['SKIP', cfr_reason], }) # Not all Github actions runners have scsi_debug module, so we may skip diff --git a/tests/zfs-tests/cmd/clonefile.c b/tests/zfs-tests/cmd/clonefile.c index 696dc471d8c3..ab947ce46af4 100644 --- a/tests/zfs-tests/cmd/clonefile.c +++ b/tests/zfs-tests/cmd/clonefile.c @@ -135,14 +135,23 @@ usage(void) { printf( "usage:\n" + "\n" " FICLONE:\n" " clonefile -c \n" + " clonefile [opts] -c \n" " FICLONERANGE:\n" " clonefile -r \n" + " clonefile [opts] -r \n" " copy_file_range:\n" " clonefile -f \n" + " clonefile [opts] -f \n" " FIDEDUPERANGE:\n" - " clonefile -d \n"); + " clonefile -d \n" + " clonefile [opts] -d \n" + "\n" + " options:\n" + " -t truncate dstfile (open with O_TRUNC)\n" + "\n"); return (1); } @@ -157,9 +166,10 @@ int main(int argc, char **argv) { cf_mode_t mode = CF_MODE_NONE; + int dstflags = 0; char c; - while ((c = getopt(argc, argv, "crfdq")) != -1) { + while ((c = getopt(argc, argv, "crfdqt")) != -1) { switch (c) { case 'c': mode = CF_MODE_CLONE; @@ -176,6 +186,9 @@ main(int argc, char **argv) case 'q': quiet = 1; break; + case 't': + dstflags = O_TRUNC; + break; } } @@ -210,7 +223,7 @@ main(int argc, char **argv) return (1); } - int dfd = open(argv[optind+1], O_WRONLY|O_CREAT, + int dfd = open(argv[optind+1], O_WRONLY|O_CREAT|dstflags, S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH); if (dfd < 0) { fprintf(stderr, "open: %s: %s\n", diff --git a/tests/zfs-tests/tests/Makefile.am b/tests/zfs-tests/tests/Makefile.am index 3c9f09382424..09a8c13b4c80 100644 --- a/tests/zfs-tests/tests/Makefile.am +++ b/tests/zfs-tests/tests/Makefile.am @@ -454,6 +454,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \ functional/block_cloning/block_cloning_cross_enc_dataset.ksh \ functional/block_cloning/block_cloning_replay.ksh \ functional/block_cloning/block_cloning_replay_encrypted.ksh \ + functional/block_cloning/block_cloning_ficlone_and_write.ksh \ functional/bootfs/bootfs_001_pos.ksh \ functional/bootfs/bootfs_002_neg.ksh \ functional/bootfs/bootfs_003_pos.ksh \ diff --git a/tests/zfs-tests/tests/functional/block_cloning/block_cloning_ficlone_and_write.ksh b/tests/zfs-tests/tests/functional/block_cloning/block_cloning_ficlone_and_write.ksh new file mode 100755 index 000000000000..2db98308c1c5 --- /dev/null +++ b/tests/zfs-tests/tests/functional/block_cloning/block_cloning_ficlone_and_write.ksh @@ -0,0 +1,80 @@ +#!/bin/ksh -p +# +# CDDL HEADER START +# +# The contents of this file are subject to the terms of the +# Common Development and Distribution License (the "License"). +# You may not use this file except in compliance with the License. +# +# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE +# or https://opensource.org/licenses/CDDL-1.0. +# See the License for the specific language governing permissions +# and limitations under the License. +# +# When distributing Covered Code, include this CDDL HEADER in each +# file and include the License file at usr/src/OPENSOLARIS.LICENSE. +# If applicable, add the following below this CDDL HEADER, with the +# fields enclosed by brackets "[]" replaced with your own identifying +# information: Portions Copyright [yyyy] [name of copyright owner] +# +# CDDL HEADER END +# + +# +# Copyright (c) 2023, Kay Pedersen +# + +. $STF_SUITE/include/libtest.shlib +. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib + +verify_runnable "global" + +if [[ $(linux_version) -lt $(linux_version "4.5") ]]; then + log_unsupported "copy_file_range not available before Linux 4.5" +fi + +claim="O_TRUNC, writing and FICLONE to a large (>4G) file shouldn't fail" + +log_assert $claim + +NO_LOOP_BREAK=true + +function cleanup +{ + datasetexists $TESTPOOL && destroy_pool $TESTPOOL +} + +function loop +{ + while $NO_LOOP_BREAK; do clonefile -c -t -q /$TESTPOOL/file /$TESTPOOL/clone; done +} + +log_onexit cleanup + +log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS + +log_must dd if=/dev/urandom of=/$TESTPOOL/file bs=1M count=4000 +log_must sync_pool $TESTPOOL + + +log_note "Copying entire file with FICLONE" + +log_must clonefile -c /$TESTPOOL/file /$TESTPOOL/clone +log_must sync_pool $TESTPOOL + +log_must have_same_content /$TESTPOOL/file /$TESTPOOL/clone + +log_note "looping a clone" +loop & + +log_must dd if=/dev/urandom of=/$TESTPOOL/clone bs=1M count=4000 +log_must dd if=/dev/urandom of=/$TESTPOOL/clone bs=1M count=4000 + +NO_LOOP_BREAK=false + +# just to be sure all background jobs are killed. +log_must kill $(jobs -p) + +log_must sync_pool $TESTPOOL + +log_pass $claim