Skip to content

Commit

Permalink
Version 8.1.0.316
Browse files Browse the repository at this point in the history
- Rebased to upstream candidate.
- Added support for vdo volumes which were created with the vdo script, but
  have been converted to be managed by LVM.
- Fixed GCC implicit-fallthrough errors when building for latest kernel.
  • Loading branch information
corwin committed Aug 9, 2021
1 parent 000a4c2 commit 3f9bde5
Show file tree
Hide file tree
Showing 26 changed files with 101 additions and 326 deletions.
12 changes: 6 additions & 6 deletions kvdo.spec
Original file line number Diff line number Diff line change
Expand Up @@ -96,10 +96,10 @@ rm -rf $RPM_BUILD_ROOT
%{_usr}/src/%{kmod_name}-%{version}

%changelog
* Fri Aug 06 2021 - Red Hat VDO Team <[email protected]> - 8.1.0.316-1
* Mon Aug 09 2021 - Red Hat VDO Team <[email protected]> - 8.1.0.316-1
- Rebased to upstream candidate.
- Removed support for UDS super block version 6 which can only exist in
versions of the index which the current code does not support.
- Fixed a bug which could result in the UDS index issuing I/O while
suspended.
- Added support for vdo volumes which were created with the vdo script, but
have been converted to be managed by LVM.
- Fixed GCC implicit-fallthrough errors when building for latest kernel.


7 changes: 2 additions & 5 deletions vdo/allocatingVIO.c
Original file line number Diff line number Diff line change
Expand Up @@ -199,8 +199,7 @@ static int allocate_block_in_zone(struct allocating_vio *allocating_vio)
}
allocating_vio->zone = vdo->physical_zones[zone_number];
vio_launch_physical_zone_callback(allocating_vio,
allocate_block_for_write,
THIS_LOCATION("$F;cb=allocBlockInZone"));
allocate_block_for_write);
return VDO_SUCCESS;
}

Expand All @@ -215,7 +214,6 @@ static void allocate_block_for_write(struct vdo_completion *completion)
int result;
struct allocating_vio *allocating_vio = as_allocating_vio(completion);
assert_vio_in_physical_zone(allocating_vio);
allocating_vio_add_trace_record(allocating_vio, THIS_LOCATION(NULL));
result = allocate_block_in_zone(allocating_vio);
if (result != VDO_SUCCESS) {
set_vdo_completion_result(completion, result);
Expand All @@ -240,8 +238,7 @@ void vio_allocate_data_block(struct allocating_vio *allocating_vio,
vio->vdo->physical_zones[get_next_vdo_allocation_zone(selector)];

vio_launch_physical_zone_callback(allocating_vio,
allocate_block_for_write,
THIS_LOCATION("$F;cb=allocDataBlock"));
allocate_block_for_write);
}

/**********************************************************************/
Expand Down
22 changes: 3 additions & 19 deletions vdo/allocatingVIO.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,19 +156,6 @@ waiter_as_allocating_vio(struct waiter *waiter)
return container_of(waiter, struct allocating_vio, waiter);
}

/**
* Add a trace record for the current source location.
*
* @param allocating_vio The allocating_vio structure to be updated
* @param location The source-location descriptor to be recorded
**/
static inline void
allocating_vio_add_trace_record(struct allocating_vio *allocating_vio,
const struct trace_location *location)
{
vio_add_trace_record(allocating_vio_as_vio(allocating_vio), location);
}

/**
* Get the vdo from an allocating_vio.
*
Expand Down Expand Up @@ -210,13 +197,11 @@ assert_vio_in_physical_zone(struct allocating_vio *allocating_vio)
**/
static inline void
vio_set_physical_zone_callback(struct allocating_vio *allocating_vio,
vdo_action *callback,
const struct trace_location *location)
vdo_action *callback)
{
set_vdo_completion_callback(allocating_vio_as_completion(allocating_vio),
callback,
get_vdo_physical_zone_thread_id(allocating_vio->zone));
allocating_vio_add_trace_record(allocating_vio, location);
}

/**
Expand All @@ -228,10 +213,9 @@ vio_set_physical_zone_callback(struct allocating_vio *allocating_vio,
**/
static inline void
vio_launch_physical_zone_callback(struct allocating_vio *allocating_vio,
vdo_action *callback,
const struct trace_location *location)
vdo_action *callback)
{
vio_set_physical_zone_callback(allocating_vio, callback, location);
vio_set_physical_zone_callback(allocating_vio, callback);
invoke_vdo_completion_callback(allocating_vio_as_completion(allocating_vio));
}

Expand Down
1 change: 0 additions & 1 deletion vdo/bio.c
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,6 @@ void vdo_count_completed_bios(struct bio *bio)
void vdo_complete_async_bio(struct bio *bio)
{
struct vio *vio = (struct vio *) bio->bi_private;
vio_add_trace_record(vio, THIS_LOCATION("$F($io);cb=io($io)"));
vdo_count_completed_bios(bio);
continue_vio(vio, vdo_get_bio_result(bio));
}
Expand Down
24 changes: 8 additions & 16 deletions vdo/blockMapTree.c
Original file line number Diff line number Diff line change
Expand Up @@ -950,8 +950,7 @@ static int attempt_page_lock(struct block_map_tree_zone *zone,
}

// Someone else is loading or allocating the page we need
return enqueue_data_vio(&lock_holder->waiters, data_vio,
THIS_LOCATION("$F;cb=blockMapTreePage"));
return enqueue_data_vio(&lock_holder->waiters, data_vio);
}

/**
Expand Down Expand Up @@ -1131,15 +1130,13 @@ static void release_block_map_write_lock(struct vdo_completion *completion)
data_vio_as_allocating_vio(data_vio);
assert_data_vio_in_allocated_zone(data_vio);
if (completion->result != VDO_SUCCESS) {
launch_data_vio_logical_callback(data_vio, allocation_failure,
THIS_LOCATION(NULL));
launch_data_vio_logical_callback(data_vio, allocation_failure);
return;
}

vio_release_allocation_lock(allocating_vio);
vio_reset_allocation(allocating_vio);
launch_data_vio_logical_callback(data_vio, finish_block_map_allocation,
THIS_LOCATION("$F;cb=finish_block_map_allocation"));
launch_data_vio_logical_callback(data_vio, finish_block_map_allocation);
}

/**
Expand All @@ -1159,8 +1156,7 @@ set_block_map_page_reference_count(struct vdo_completion *completion)
struct tree_lock *lock = &data_vio->tree_lock;
assert_data_vio_in_allocated_zone(data_vio);
if (completion->result != VDO_SUCCESS) {
launch_data_vio_logical_callback(data_vio, allocation_failure,
THIS_LOCATION(NULL));
launch_data_vio_logical_callback(data_vio, allocation_failure);
return;
}

Expand All @@ -1182,13 +1178,11 @@ static void journal_block_map_allocation(struct vdo_completion *completion)
struct data_vio *data_vio = as_data_vio(completion);
assert_data_vio_in_journal_zone(data_vio);
if (completion->result != VDO_SUCCESS) {
launch_data_vio_logical_callback(data_vio, allocation_failure,
THIS_LOCATION(NULL));
launch_data_vio_logical_callback(data_vio, allocation_failure);
return;
}

set_data_vio_allocated_zone_callback(data_vio, set_block_map_page_reference_count,
THIS_LOCATION(NULL));
set_data_vio_allocated_zone_callback(data_vio, set_block_map_page_reference_count);
add_vdo_recovery_journal_entry(get_vdo_from_data_vio(data_vio)->recovery_journal,
data_vio);
}
Expand All @@ -1208,8 +1202,7 @@ continue_block_map_page_allocation(struct allocating_vio *allocating_vio)
physical_block_number_t pbn = allocating_vio->allocation;

if (!data_vio_has_allocation(data_vio)) {
set_data_vio_logical_callback(data_vio, allocation_failure,
THIS_LOCATION(NULL));
set_data_vio_logical_callback(data_vio, allocation_failure);
continue_data_vio(data_vio, VDO_NO_SPACE);
return;
}
Expand All @@ -1220,8 +1213,7 @@ continue_block_map_page_allocation(struct allocating_vio *allocating_vio)
VDO_MAPPING_STATE_UNCOMPRESSED,
allocating_vio->allocation_lock,
&data_vio->operation);
launch_data_vio_journal_callback(data_vio, journal_block_map_allocation,
THIS_LOCATION("$F;cb=journal_block_map_allocation"));
launch_data_vio_journal_callback(data_vio, journal_block_map_allocation);
}

/**
Expand Down
30 changes: 0 additions & 30 deletions vdo/dataKVIO.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,17 +98,14 @@ static void vdo_acknowledge_data_vio(struct data_vio *data_vio)
}


vio_add_trace_record(vio, THIS_LOCATION(NULL));
vdo_complete_bio(bio, error);
}

/**********************************************************************/
static noinline void clean_data_vio(struct data_vio *data_vio,
struct free_buffer_pointers *fbp)
{
vio_add_trace_record(data_vio_as_vio(data_vio), THIS_LOCATION(NULL));
vdo_acknowledge_data_vio(data_vio);
log_data_vio_trace(data_vio);
add_free_buffer_pointer(fbp, data_vio);
}

Expand Down Expand Up @@ -156,8 +153,6 @@ static void vdo_complete_data_vio(struct vdo_completion *completion)
struct data_vio *data_vio = as_data_vio(completion);
struct vdo *vdo = get_vdo_from_data_vio(data_vio);

data_vio_add_trace_record(data_vio, THIS_LOCATION(NULL));

if (use_bio_ack_queue(vdo) && VDO_USE_BIO_ACK_QUEUE_FOR_READ &&
(data_vio->user_bio != NULL)) {
launch_data_vio_on_bio_ack_queue(data_vio,
Expand Down Expand Up @@ -311,7 +306,6 @@ static void read_bio_callback(struct bio *bio)
{
struct data_vio *data_vio = (struct data_vio *) bio->bi_private;
data_vio->read_block.data = data_vio->read_block.buffer;
data_vio_add_trace_record(data_vio, THIS_LOCATION(NULL));
vdo_count_completed_bios(bio);
complete_read(data_vio);
}
Expand All @@ -327,10 +321,6 @@ void vdo_read_block(struct data_vio *data_vio,
struct read_block *read_block = &data_vio->read_block;
int result;

// This can be run on either a read of compressed data, or a write
// trying to read-verify, so we can't assert about the operation.
data_vio_add_trace_record(data_vio, THIS_LOCATION(NULL));

read_block->callback = callback;
read_block->status = VDO_SUCCESS;
read_block->mapping_state = mapping_state;
Expand All @@ -353,7 +343,6 @@ static void acknowledge_user_bio(struct bio *bio)
int error = vdo_get_bio_result(bio);
struct vio *vio = (struct vio *) bio->bi_private;

vio_add_trace_record(vio, THIS_LOCATION("$F($io);cb=io($io)"));
vdo_count_completed_bios(bio);
if (error == 0) {
acknowledge_data_vio(vio_as_data_vio(vio));
Expand All @@ -374,7 +363,6 @@ void read_data_vio(struct data_vio *data_vio)

ASSERT_LOG_ONLY(!is_write_vio(vio),
"operation set correctly for data read");
data_vio_add_trace_record(data_vio, THIS_LOCATION("$F;io=readData"));

if (vdo_is_state_compressed(data_vio->mapped.state)) {
vdo_read_block(data_vio,
Expand Down Expand Up @@ -418,7 +406,6 @@ vdo_acknowledge_and_enqueue(struct vdo_work_item *item)
{
struct data_vio *data_vio = work_item_as_data_vio(item);

data_vio_add_trace_record(data_vio, THIS_LOCATION(NULL));
vdo_acknowledge_data_vio(data_vio);
// Even if we're not using bio-ack threads, we may be in the wrong
// base-code thread.
Expand All @@ -443,7 +430,6 @@ void acknowledge_data_vio(struct data_vio *data_vio)
// We've finished with the vio; acknowledge completion of the bio to
// the kernel.
if (use_bio_ack_queue(vdo)) {
data_vio_add_trace_record(data_vio, THIS_LOCATION(NULL));
launch_data_vio_on_bio_ack_queue(data_vio,
vdo_acknowledge_and_enqueue,
NULL,
Expand All @@ -462,8 +448,6 @@ void write_data_vio(struct data_vio *data_vio)

ASSERT_LOG_ONLY(is_write_vio(vio),
"write_data_vio must be passed a write data_vio");
data_vio_add_trace_record(data_vio,
THIS_LOCATION("$F;io=writeData;j=normal"));


// Write the data from the data block buffer.
Expand Down Expand Up @@ -545,7 +529,6 @@ static inline bool is_zero_block(struct data_vio *data_vio)
void vdo_apply_partial_write(struct data_vio *data_vio)
{
struct bio *bio = data_vio->user_bio;
data_vio_add_trace_record(data_vio, THIS_LOCATION(NULL));

if (bio_op(bio) != REQ_OP_DISCARD) {
vdo_bio_copy_data_in(bio, data_vio->data_block + data_vio->offset);
Expand All @@ -564,8 +547,6 @@ void zero_data_vio(struct data_vio *data_vio)
{
ASSERT_LOG_ONLY(!is_write_vio(data_vio_as_vio(data_vio)),
"only attempt to zero non-writes");
data_vio_add_trace_record(data_vio,
THIS_LOCATION("zeroDataVIO;io=readData"));
if (data_vio->is_partial) {
memset(data_vio->data_block, 0, VDO_BLOCK_SIZE);
} else {
Expand All @@ -581,7 +562,6 @@ void vdo_copy_data(struct data_vio *source, struct data_vio *destination)
ASSERT_LOG_ONLY(is_write_vio(data_vio_as_vio(source)),
"only copy from a write");

data_vio_add_trace_record(destination, THIS_LOCATION(NULL));
if (destination->is_partial) {
memcpy(destination->data_block, source->data_block,
VDO_BLOCK_SIZE);
Expand All @@ -598,8 +578,6 @@ static void vdo_compress_work(struct vdo_work_item *item)
char *context = get_work_queue_private_data();
int size;

data_vio_add_trace_record(data_vio, THIS_LOCATION(NULL));

size = LZ4_compress_default(data_vio->data_block,
data_vio->scratch_block,
VDO_BLOCK_SIZE,
Expand All @@ -622,9 +600,6 @@ static void vdo_compress_work(struct vdo_work_item *item)
/**********************************************************************/
void compress_data_vio(struct data_vio *data_vio)
{
data_vio_add_trace_record(data_vio,
THIS_LOCATION("compressDataVIO;io=compress;cb=compress"));

/*
* If the orignal bio was a discard, but we got this far because the
* discard was a partial one (r/m/w), and it is part of a larger
Expand Down Expand Up @@ -794,7 +769,6 @@ static void vdo_continue_discard_vio(struct vdo_completion *completion)
static void vdo_complete_partial_read(struct vdo_completion *completion)
{
struct data_vio *data_vio = as_data_vio(completion);
data_vio_add_trace_record(data_vio, THIS_LOCATION(NULL));

vdo_bio_copy_data_out(data_vio->user_bio,
data_vio->read_block.data + data_vio->offset);
Expand Down Expand Up @@ -878,7 +852,6 @@ int vdo_launch_data_vio_from_bio(struct vdo *vdo,
static void vdo_hash_data_work(struct vdo_work_item *item)
{
struct data_vio *data_vio = work_item_as_data_vio(item);
data_vio_add_trace_record(data_vio, THIS_LOCATION(NULL));

MurmurHash3_x64_128(data_vio->data_block, VDO_BLOCK_SIZE, 0x62ea60be,
&data_vio->chunk_name);
Expand All @@ -889,7 +862,6 @@ static void vdo_hash_data_work(struct vdo_work_item *item)
/**********************************************************************/
void hash_data_vio(struct data_vio *data_vio)
{
data_vio_add_trace_record(data_vio, THIS_LOCATION(NULL));
launch_data_vio_on_cpu_queue(data_vio,
vdo_hash_data_work,
NULL,
Expand All @@ -899,8 +871,6 @@ void hash_data_vio(struct data_vio *data_vio)
/**********************************************************************/
void check_data_vio_for_duplication(struct data_vio *data_vio)
{
data_vio_add_trace_record(data_vio,
THIS_LOCATION("checkForDuplication;dup=post"));
ASSERT_LOG_ONLY(!data_vio->is_zero_block,
"zero block not checked for duplication");
ASSERT_LOG_ONLY(data_vio->new_mapped.state != VDO_MAPPING_STATE_UNMAPPED,
Expand Down
11 changes: 3 additions & 8 deletions vdo/dataVIO.c
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,7 @@ void prepare_data_vio(struct data_vio *data_vio,
VDO_MAPPING_STATE_UNCOMPRESSED);
reset_vdo_completion(vio_as_completion(vio));
set_data_vio_logical_callback(data_vio,
vdo_attempt_logical_block_lock,
THIS_LOCATION("$F;cb=acquire_logical_block_lock"));
vdo_attempt_logical_block_lock);
}

/**********************************************************************/
Expand All @@ -139,7 +138,6 @@ void complete_data_vio(struct vdo_completion *completion)
get_data_vio_operation_name(data_vio));
}

data_vio_add_trace_record(data_vio, THIS_LOCATION("$F($io)"));
if (is_read_data_vio(data_vio)) {
cleanup_read_data_vio(data_vio);
} else {
Expand Down Expand Up @@ -228,7 +226,6 @@ int set_data_vio_mapped_location(struct data_vio *data_vio,
**/
static void launch_locked_request(struct data_vio *data_vio)
{
data_vio_add_trace_record(data_vio, THIS_LOCATION(NULL));
data_vio->logical.locked = true;

if (is_write_data_vio(data_vio)) {
Expand Down Expand Up @@ -293,8 +290,7 @@ void vdo_attempt_logical_block_lock(struct vdo_completion *completion)

data_vio->last_async_operation = VIO_ASYNC_OP_ATTEMPT_LOGICAL_BLOCK_LOCK;
result = enqueue_data_vio(&lock_holder->logical.waiters,
data_vio,
THIS_LOCATION("$F;cb=logicalBlockLock"));
data_vio);
if (result != VDO_SUCCESS) {
finish_data_vio(data_vio, result);
return;
Expand All @@ -306,8 +302,7 @@ void vdo_attempt_logical_block_lock(struct vdo_completion *completion)
cancel_vio_compression(lock_holder)) {
data_vio->compression.lock_holder = lock_holder;
launch_data_vio_packer_callback(data_vio,
remove_lock_holder_from_vdo_packer,
THIS_LOCATION("$F;cb=remove_lock_holder_from_vdo_packer"));
remove_lock_holder_from_vdo_packer);
}
}

Expand Down
Loading

0 comments on commit 3f9bde5

Please sign in to comment.