Skip to content

Commit

Permalink
dbuf: Handle arcbuf assignment after block cloning
Browse files Browse the repository at this point in the history
In some cases dbuf_assign_arcbuf() may be called on a block that
was recently cloned. If it happened in current TXG we must undo
the block cloning first, since the only one dirty record per TXG
can't and shouldn't mean both cloning and overwrite same time.
For example this can happen while writting to a file and cloning
a file at the same time.

This is also covered by a test. The filesize must be huge like
4G to trigger this bug. For that reason a 4G file is created
from /dev/urandom and than a file clone loop whith FICLONE
starts. After that the whole file is overwritten twice. The
test can trigger the bug most times but not every time.

Signed-off-by: oromenahar <mail@mkwg.de>
  • Loading branch information
oromenahar committed Dec 12, 2023
1 parent 917c8d3 commit a230b1d
Show file tree
Hide file tree
Showing 5 changed files with 97 additions and 2 deletions.
13 changes: 12 additions & 1 deletion module/zfs/dbuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -2945,7 +2945,8 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);

ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL);

if (db->db_state == DB_CACHED &&
zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
Expand Down Expand Up @@ -2982,7 +2983,17 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
arc_buf_destroy(db->db_buf, db);
}
db->db_buf = NULL;
} else if (db->db_state == DB_NOFILL) {
/*
* We will be completely replacing the cloned block. In case
* it was cloned in this transaction group, let's undirty the
* pending clone and mark the block as uncached. This will be
* as if the clone was never done.
*/
VERIFY(!dbuf_undirty(db, tx));
db->db_state = DB_UNCACHED;
}

ASSERT(db->db_buf == NULL);
dbuf_set_data(db, buf);
db->db_state = DB_FILL;
Expand Down
3 changes: 2 additions & 1 deletion tests/runfiles/linux.run
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ tests = ['block_cloning_copyfilerange', 'block_cloning_copyfilerange_partial',
'block_cloning_copyfilerange_cross_dataset',
'block_cloning_cross_enc_dataset',
'block_cloning_copyfilerange_fallback_same_txg',
'block_cloning_replay', 'block_cloning_replay_encrypted']
'block_cloning_replay', 'block_cloning_replay_encrypted',
'block_cloning_ficlone_and_write']
tags = ['functional', 'block_cloning']

[tests/functional/chattr:Linux]
Expand Down
2 changes: 2 additions & 0 deletions tests/test-runner/bin/zts-report.py.in
Original file line number Diff line number Diff line change
Expand Up @@ -311,6 +311,8 @@ elif sys.platform.startswith('linux'):
['SKIP', cfr_cross_reason],
'block_cloning/block_cloning_cross_enc_dataset':
['SKIP', cfr_cross_reason],
'block_cloning/block_cloning_ficlone_and_write':
['SKIP', cfr_reason],
})

# Not all Github actions runners have scsi_debug module, so we may skip
Expand Down
1 change: 1 addition & 0 deletions tests/zfs-tests/tests/Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -454,6 +454,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/block_cloning/block_cloning_cross_enc_dataset.ksh \
functional/block_cloning/block_cloning_replay.ksh \
functional/block_cloning/block_cloning_replay_encrypted.ksh \
functional/block_cloning/block_cloning_ficlone_and_write.ksh \
functional/bootfs/bootfs_001_pos.ksh \
functional/bootfs/bootfs_002_neg.ksh \
functional/bootfs/bootfs_003_pos.ksh \
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or https://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#

#
# Copyright (c) 2023, Kay Pedersen <mail@mkwg.de>
#

. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib

verify_runnable "global"

if [[ $(linux_version) -lt $(linux_version "4.5") ]]; then
log_unsupported "copy_file_range not available before Linux 4.5"
fi

claim="O_TRUNC, writing and FICLONE to a large (>4G) file shouldn't fail"

log_assert $claim

NO_LOOP_BREAK=true

function cleanup
{
datasetexists $TESTPOOL && destroy_pool $TESTPOOL
}

function loop
{
while $NO_LOOP_BREAK; do clonefile -c -t -q /$TESTPOOL/file /$TESTPOOL/clone; done
}

log_onexit cleanup

log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS

log_must dd if=/dev/urandom of=/$TESTPOOL/file bs=1M count=4000
log_must sync_pool $TESTPOOL


log_note "Copying entire file with FICLONE"

log_must clonefile -c /$TESTPOOL/file /$TESTPOOL/clone
log_must sync_pool $TESTPOOL

log_must have_same_content /$TESTPOOL/file /$TESTPOOL/clone

log_note "looping a clone"
loop &

log_must dd if=/dev/urandom of=/$TESTPOOL/clone bs=1M count=4000
log_must dd if=/dev/urandom of=/$TESTPOOL/clone bs=1M count=4000

NO_LOOP_BREAK=false

# just to be sure all background jobs are killed.
log_must kill $(jobs -p)

log_must sync_pool $TESTPOOL

log_pass $claim

0 comments on commit a230b1d

Please sign in to comment.