Skip to content

Commit

Permalink
added some const qualifiers
Browse files Browse the repository at this point in the history
  • Loading branch information
Jan Buethe committed Nov 24, 2023
1 parent 36f2a49 commit 72e3330
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 23 deletions.
2 changes: 1 addition & 1 deletion dnn/adaconvtest.c
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ void adacomb_compare(
}

adacomb_process_frame(hAdaComb, x_out, x_in, features, kernel_layer, gain_layer, global_gain_layer,
pitch_lag, frame_size, overlap_size, kernel_size, left_padding, filter_gain_a, filter_gain_b, log_gain_limit, NULL);
pitch_lag, feature_dim, frame_size, overlap_size, kernel_size, left_padding, filter_gain_a, filter_gain_b, log_gain_limit, NULL);


mse = 0;
Expand Down
23 changes: 12 additions & 11 deletions dnn/nndsp.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ void init_adacomb_state(AdaCombState *hAdaComb)
}

#ifdef DEBUG_NNDSP
void print_float_vector(const char* name, float *vec, int length)
void print_float_vector(const char* name, const float *vec, int length)
{
for (int i = 0; i < length; i ++)
{
Expand Down Expand Up @@ -83,11 +83,11 @@ static void transform_gains(
void adaconv_process_frame(
AdaConvState* hAdaConv,
float *x_out,
float *x_in,
float *features,
LinearLayer *kernel_layer,
LinearLayer *gain_layer,
int feature_dim, // not strictly necessary
const float *x_in,
const float *features,
const LinearLayer *kernel_layer,
const LinearLayer *gain_layer,
int feature_dim,
int frame_size,
int overlap_size,
int in_channels,
Expand Down Expand Up @@ -198,12 +198,13 @@ void adaconv_process_frame(
void adacomb_process_frame(
AdaCombState* hAdaComb,
float *x_out,
float *x_in,
float *features,
LinearLayer *kernel_layer,
LinearLayer *gain_layer,
LinearLayer *global_gain_layer,
const float *x_in,
const float *features,
const LinearLayer *kernel_layer,
const LinearLayer *gain_layer,
const LinearLayer *global_gain_layer,
int pitch_lag,
int feature_dim,
int frame_size,
int overlap_size,
int kernel_size,
Expand Down
23 changes: 12 additions & 11 deletions dnn/nndsp.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@
#define ADACOMB_MAX_FRAME_SIZE 80
#define ADACOMB_MAX_OVERLAP_SIZE 40

// #define DEBUG_NNDSP
//#define DEBUG_NNDSP
#ifdef DEBUG_NNDSP
#include <stdio.h>
#endif

void print_float_vector(const char* name, float *vec, int length);
void print_float_vector(const char* name, const float *vec, int length);

typedef struct {
float history[ADACONV_MAX_KERNEL_SIZE * ADACONV_MAX_INPUT_CHANNELS];
Expand All @@ -46,10 +46,10 @@ void init_adacomb_state(AdaCombState *hAdaComb);
void adaconv_process_frame(
AdaConvState* hAdaConv,
float *x_out,
float *x_in,
float *features,
LinearLayer *kernel_layer,
LinearLayer *gain_layer,
const float *x_in,
const float *features,
const LinearLayer *kernel_layer,
const LinearLayer *gain_layer,
int feature_dim, // not strictly necessary
int frame_size,
int overlap_size,
Expand All @@ -66,12 +66,13 @@ void adaconv_process_frame(
void adacomb_process_frame(
AdaCombState* hAdaComb,
float *x_out,
float *x_in,
float *features,
LinearLayer *kernel_layer,
LinearLayer *gain_layer,
LinearLayer *global_gain_layer,
const float *x_in,
const float *features,
const LinearLayer *kernel_layer,
const LinearLayer *gain_layer,
const LinearLayer *global_gain_layer,
int pitch_lag,
int feature_dim,
int frame_size,
int overlap_size,
int kernel_size,
Expand Down

0 comments on commit 72e3330

Please sign in to comment.