Skip to content

Commit

Permalink
Merge pull request dm-vdo#73 from C2Redhat/fixSmallNits
Browse files Browse the repository at this point in the history
Fix small nits
  • Loading branch information
C2Redhat authored Jan 26, 2024
2 parents 020e608 + b992e3a commit 34507ed
Show file tree
Hide file tree
Showing 6 changed files with 29 additions and 28 deletions.
10 changes: 5 additions & 5 deletions src/c++/uds/src/uds/index-layout.c
Original file line number Diff line number Diff line change
Expand Up @@ -390,9 +390,10 @@ static void define_sub_index_nonce(struct index_layout *layout)
encode_u64_le(buffer, &offset, sil->sub_index.start_block);
encode_u16_le(buffer, &offset, 0);
sil->nonce = generate_secondary_nonce(primary_nonce, buffer, sizeof(buffer));
if (sil->nonce == 0)
if (sil->nonce == 0) {
sil->nonce = generate_secondary_nonce(~primary_nonce + 1, buffer,
sizeof(buffer));
}
}

static void setup_sub_index(struct index_layout *layout, u64 start_block,
Expand Down Expand Up @@ -655,10 +656,11 @@ STATIC int discard_index_state_data(struct index_layout *layout)
saved_result = result;
}

if (saved_result != UDS_SUCCESS)
if (saved_result != UDS_SUCCESS) {
return uds_log_error_strerror(result,
"%s: cannot destroy all index saves",
__func__);
}

return UDS_SUCCESS;
}
Expand Down Expand Up @@ -1255,9 +1257,7 @@ static int __must_check read_super_block_data(struct buffered_reader *reader,
"unknown superblock magic label");

if ((super->version < SUPER_VERSION_MINIMUM) ||
(super->version == 4) ||
(super->version == 5) ||
(super->version == 6) ||
(super->version == 4) || (super->version == 5) || (super->version == 6) ||
(super->version > SUPER_VERSION_MAXIMUM)) {
return uds_log_error_strerror(UDS_UNSUPPORTED_VERSION,
"unknown superblock version number %u",
Expand Down
3 changes: 1 addition & 2 deletions src/c++/uds/src/uds/index-page-map.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,7 @@ void uds_get_list_number_bounds(const struct index_page_map *map, u32 chapter_nu
u32 slot = chapter_number * map->entries_per_chapter;

*lowest_list = ((index_page_number == 0) ?
0 :
map->entries[slot + index_page_number - 1] + 1);
0 : map->entries[slot + index_page_number - 1] + 1);
*highest_list = ((index_page_number < map->entries_per_chapter) ?
map->entries[slot + index_page_number] :
map->geometry->delta_lists_per_chapter - 1);
Expand Down
12 changes: 8 additions & 4 deletions src/c++/uds/src/uds/index-session.c
Original file line number Diff line number Diff line change
Expand Up @@ -392,9 +392,10 @@ int uds_open_index(enum uds_open_index_type open_type,
static void wait_for_no_requests_in_progress(struct uds_index_session *index_session)
{
uds_lock_mutex(&index_session->request_mutex);
while (index_session->request_count > 0)
while (index_session->request_count > 0) {
uds_wait_cond(&index_session->request_cond,
&index_session->request_mutex);
}
uds_unlock_mutex(&index_session->request_mutex);
}

Expand Down Expand Up @@ -624,9 +625,10 @@ int uds_close_index(struct uds_index_session *index_session)
/* Wait for any current index state change to complete. */
uds_lock_mutex(&index_session->request_mutex);
while ((index_session->state & IS_FLAG_WAITING) ||
(index_session->state & IS_FLAG_CLOSING))
(index_session->state & IS_FLAG_CLOSING)) {
uds_wait_cond(&index_session->request_cond,
&index_session->request_mutex);
}

if (index_session->state & IS_FLAG_SUSPENDED) {
uds_log_info("Index session is suspended");
Expand Down Expand Up @@ -665,9 +667,10 @@ int uds_destroy_index_session(struct uds_index_session *index_session)
/* Wait for any current index state change to complete. */
uds_lock_mutex(&index_session->request_mutex);
while ((index_session->state & IS_FLAG_WAITING) ||
(index_session->state & IS_FLAG_CLOSING))
(index_session->state & IS_FLAG_CLOSING)) {
uds_wait_cond(&index_session->request_cond,
&index_session->request_mutex);
}

if (index_session->state & IS_FLAG_DESTROYING) {
uds_unlock_mutex(&index_session->request_mutex);
Expand All @@ -691,9 +694,10 @@ int uds_destroy_index_session(struct uds_index_session *index_session)

/* Wait until the load exits before proceeding. */
uds_lock_mutex(&index_session->request_mutex);
while (index_session->state & IS_FLAG_LOADING)
while (index_session->state & IS_FLAG_LOADING) {
uds_wait_cond(&index_session->request_cond,
&index_session->request_mutex);
}
uds_unlock_mutex(&index_session->request_mutex);
}

Expand Down
3 changes: 1 addition & 2 deletions src/c++/uds/src/uds/index.c
Original file line number Diff line number Diff line change
Expand Up @@ -1352,8 +1352,7 @@ int uds_save_index(struct uds_index *index)
uds_wait_for_idle_index(index);
index->prev_save = index->last_save;
index->last_save = ((index->newest_virtual_chapter == 0) ?
NO_LAST_SAVE :
index->newest_virtual_chapter - 1);
NO_LAST_SAVE : index->newest_virtual_chapter - 1);
uds_log_info("beginning save (vcn %llu)", (unsigned long long) index->last_save);

result = uds_save_index_state(index->layout, index);
Expand Down
10 changes: 5 additions & 5 deletions src/c++/vdo/base/dedupe.c
Original file line number Diff line number Diff line change
Expand Up @@ -1069,7 +1069,7 @@ static void start_deduping(struct hash_lock *lock, struct data_vio *agent,
* increment_stat() - Increment a statistic counter in a non-atomic yet thread-safe manner.
* @stat: The statistic field to increment.
*/
static void increment_stat(u64 *stat)
static inline void increment_stat(u64 *stat)
{
/*
* Must only be mutated on the hash zone thread. Prevents any compiler shenanigans from
Expand Down Expand Up @@ -1144,8 +1144,8 @@ static bool blocks_equal(char *block1, char *block2)
"Data blocks are expected to be aligned");
ASSERT_LOG_ONLY((uintptr_t) block2 % sizeof(u64) == 0,
"Data blocks are expected to be aligned");
#endif /* INTERNAL */

#endif /* INTERNAL */
for (i = 0; i < VDO_BLOCK_SIZE; i += sizeof(u64)) {
if (*((u64 *) &block1[i]) != *((u64 *) &block2[i]))
return false;
Expand Down Expand Up @@ -1471,13 +1471,13 @@ static void finish_writing(struct hash_lock *lock, struct data_vio *agent)
lock->duplicate = agent->new_mapped;
lock->verified = true;

if (vdo_is_state_compressed(lock->duplicate.state) &&
lock->registered)
if (vdo_is_state_compressed(lock->duplicate.state) && lock->registered) {
/*
* Compression means the location we gave in the UDS query is not the location
* we're using to deduplicate.
*/
lock->update_advice = true;
}

/* If there are any waiters, we need to start deduping them. */
if (vdo_waitq_has_waiters(&lock->waiters)) {
Expand Down Expand Up @@ -2393,7 +2393,7 @@ static void timeout_index_operations_callback(struct vdo_completion *completion)
DEDUPE_CONTEXT_TIMED_OUT)) {
/*
* This context completed between the time the timeout fired, and now. We
* can treat it as a a successful query, its requestor is already enqueued
* can treat it as a successful query, its requestor is already enqueued
* to process it.
*/
continue;
Expand Down
19 changes: 9 additions & 10 deletions src/c++/vdo/base/slab-depot.c
Original file line number Diff line number Diff line change
Expand Up @@ -1360,7 +1360,7 @@ static unsigned int calculate_slab_priority(struct vdo_slab *slab)

/*
* Slabs are essentially prioritized by an approximation of the number of free blocks in the slab
* so slabs with lots of free blocks with be opened for allocation before slabs that have few free
* so slabs with lots of free blocks will be opened for allocation before slabs that have few free
* blocks.
*/
static void prioritize_slab(struct vdo_slab *slab)
Expand All @@ -1374,14 +1374,14 @@ static void prioritize_slab(struct vdo_slab *slab)

/**
* adjust_free_block_count() - Adjust the free block count and (if needed) reprioritize the slab.
* @increment: should be true if the free block count went up.
* @incremented: true if the free block count went up.
*/
static void adjust_free_block_count(struct vdo_slab *slab, bool increment)
static void adjust_free_block_count(struct vdo_slab *slab, bool incremented)
{
struct block_allocator *allocator = slab->allocator;

WRITE_ONCE(allocator->allocated_blocks,
allocator->allocated_blocks + (increment ? -1 : 1));
allocator->allocated_blocks + (incremented ? -1 : 1));

/* The open slab doesn't need to be reprioritized until it is closed. */
if (slab == allocator->open_slab)
Expand Down Expand Up @@ -1747,9 +1747,8 @@ static void add_entry_from_waiter(struct vdo_waiter *waiter, void *context)
static inline bool is_next_entry_a_block_map_increment(struct slab_journal *journal)
{
struct vdo_waiter *waiter = vdo_waitq_get_first_waiter(&journal->entry_waiters);
struct reference_updater *updater = container_of(waiter,
struct reference_updater,
waiter);
struct reference_updater *updater =
container_of(waiter, struct reference_updater, waiter);

return (updater->operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING);
}
Expand Down Expand Up @@ -2642,7 +2641,7 @@ static struct vdo_slab *get_next_slab(struct slab_scrubber *scrubber)
*
* Return: true if the scrubber has slabs to scrub.
*/
static bool __must_check has_slabs_to_scrub(struct slab_scrubber *scrubber)
static inline bool __must_check has_slabs_to_scrub(struct slab_scrubber *scrubber)
{
return (get_next_slab(scrubber) != NULL);
}
Expand Down Expand Up @@ -2817,8 +2816,8 @@ static int apply_block_entries(struct packed_slab_journal_block *block,
static void apply_journal_entries(struct vdo_completion *completion)
{
int result;
struct slab_scrubber *scrubber
= container_of(as_vio(completion), struct slab_scrubber, vio);
struct slab_scrubber *scrubber =
container_of(as_vio(completion), struct slab_scrubber, vio);
struct vdo_slab *slab = scrubber->slab;
struct slab_journal *journal = &slab->journal;

Expand Down

0 comments on commit 34507ed

Please sign in to comment.