Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[VDO-5820][RHEL-57824] dm vdo: handle unaligned discards correctly #69

Merged
merged 1 commit into from
Sep 20, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 9 additions & 6 deletions drivers/md/dm-vdo/data-vio.c
Original file line number Diff line number Diff line change
Expand Up @@ -501,6 +501,7 @@ static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lb

memset(&data_vio->record_name, 0, sizeof(data_vio->record_name));
memset(&data_vio->duplicate, 0, sizeof(data_vio->duplicate));
vdo_reset_completion(&data_vio->decrement_completion);
vdo_reset_completion(completion);
completion->error_handler = handle_data_vio_error;
set_data_vio_logical_callback(data_vio, attempt_logical_block_lock);
Expand Down Expand Up @@ -1274,12 +1275,14 @@ static void clean_hash_lock(struct vdo_completion *completion)
static void finish_cleanup(struct data_vio *data_vio)
{
struct vdo_completion *completion = &data_vio->vio.completion;
u32 discard_size = min_t(u32, data_vio->remaining_discard,
VDO_BLOCK_SIZE - data_vio->offset);

VDO_ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL,
"complete data_vio has no allocation lock");
VDO_ASSERT_LOG_ONLY(data_vio->hash_lock == NULL,
"complete data_vio has no hash lock");
if ((data_vio->remaining_discard <= VDO_BLOCK_SIZE) ||
if ((data_vio->remaining_discard <= discard_size) ||
(completion->result != VDO_SUCCESS)) {
struct data_vio_pool *pool = completion->vdo->data_vio_pool;

Expand All @@ -1288,12 +1291,12 @@ static void finish_cleanup(struct data_vio *data_vio)
return;
}

data_vio->remaining_discard -= min_t(u32, data_vio->remaining_discard,
VDO_BLOCK_SIZE - data_vio->offset);
data_vio->remaining_discard -= discard_size;
data_vio->is_partial = (data_vio->remaining_discard < VDO_BLOCK_SIZE);
data_vio->read = data_vio->is_partial;
data_vio->offset = 0;
completion->requeue = true;
data_vio->first_reference_operation_complete = false;
launch_data_vio(data_vio, data_vio->logical.lbn + 1);
}

Expand Down Expand Up @@ -1966,7 +1969,8 @@ static void allocate_block(struct vdo_completion *completion)
.state = VDO_MAPPING_STATE_UNCOMPRESSED,
};

if (data_vio->fua) {
if (data_vio->fua ||
data_vio->remaining_discard > (u32) (VDO_BLOCK_SIZE - data_vio->offset)) {
prepare_for_dedupe(data_vio);
return;
}
Expand Down Expand Up @@ -2043,7 +2047,6 @@ void continue_data_vio_with_block_map_slot(struct vdo_completion *completion)
return;
}


/*
* We don't need to write any data, so skip allocation and just update the block map and
* reference counts (via the journal).
Expand All @@ -2052,7 +2055,7 @@ void continue_data_vio_with_block_map_slot(struct vdo_completion *completion)
if (data_vio->is_zero)
data_vio->new_mapped.state = VDO_MAPPING_STATE_UNCOMPRESSED;

if (data_vio->remaining_discard > VDO_BLOCK_SIZE) {
if (data_vio->remaining_discard > (u32) (VDO_BLOCK_SIZE - data_vio->offset)) {
/* This is not the final block of a discard so we can't acknowledge it yet. */
update_metadata_for_data_vio_write(data_vio, NULL);
return;
Expand Down
Loading