Skip to content

Commit

Permalink
[DAPHNE-daphne-eu#724] CUDNN unit test refactoring and cleanup
Browse files Browse the repository at this point in the history
This commit moves catch2 tests of CUDNN based operations to the CUDA subdirectory to distinguish them better from the newly added tests for CPU DNN ops. Furthermore, before this change, it was either CPU or GPU test being executed in some mixed up tests like DNNPoolingTest.

Fixes daphne-eu#724
  • Loading branch information
corepointer committed Sep 13, 2024
1 parent 940cc90 commit 6da3edf
Show file tree
Hide file tree
Showing 9 changed files with 119 additions and 303 deletions.
10 changes: 5 additions & 5 deletions test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -170,11 +170,11 @@ if(USE_CUDA AND CMAKE_CUDA_COMPILER)
runtime/local/kernels/CUDA/FillTest.cpp
runtime/local/kernels/CUDA/MatMulTest.cpp
runtime/local/kernels/CUDA_ContextTest.cpp
runtime/local/kernels/DNNActivationTest.cpp
runtime/local/kernels/DNNAffineTest.cpp
runtime/local/kernels/DNNBatchNormTest.cpp
runtime/local/kernels/DNNConvolutionTest.cpp
runtime/local/kernels/DNNSoftmaxTest.cpp)
runtime/local/kernels/CUDA/DNNActivationTest.cpp
runtime/local/kernels/CUDA/DNNAffineTest.cpp
runtime/local/kernels/CUDA/DNNBatchNormTest.cpp
runtime/local/kernels/CUDA/DNNConvolutionTest.cpp
runtime/local/kernels/CUDA/DNNSoftmaxTest.cpp)
endif()

add_executable(run_tests ${TEST_SOURCES})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,26 +14,20 @@
* limitations under the License.
*/

#ifdef USE_CUDA

#include <runtime/local/datagen/GenGivenVals.h>
#include <runtime/local/datastructures/DenseMatrix.h>
#include <runtime/local/kernels/CheckEq.h>
#include "runtime/local/kernels/CUDA/Activation.h"

#include <catch.hpp>
#include <tags.h>

#include "run_tests.h"

#include "runtime/local/datagen/GenGivenVals.h"
#include "runtime/local/kernels/CUDA/Activation.h"

template<class OP, class DT>
void check(const DT* in, const DT* exp, DaphneContext* dctx) {
DT* res = nullptr;
CUDA::NN::Activation::Forward<OP, DT, DT>::apply(res, in, dctx);
CHECK(*res == *exp);
}

TEMPLATE_PRODUCT_TEST_CASE("CUDA::Activation::ReLU::Forward", TAG_DNN, (DenseMatrix), (float, double)) { // NOLINT(cert-err58-cpp)
TEMPLATE_PRODUCT_TEST_CASE("CUDA::NN::Activation::ReLU::Forward", TAG_DNN, (DenseMatrix), (float, double)) { // NOLINT(cert-err58-cpp)
using DT = TestType;

auto dctx = setupContextAndLogger();
Expand All @@ -48,5 +42,3 @@ TEMPLATE_PRODUCT_TEST_CASE("CUDA::Activation::ReLU::Forward", TAG_DNN, (DenseMat
DataObjectFactory::destroy(input);
DataObjectFactory::destroy(result);
}

#endif
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,8 @@

#include "run_tests.h"

#ifdef USE_CUDA

#include "runtime/local/kernels/CUDA/Affine.h"
#include <runtime/local/datagen/GenGivenVals.h>
#include <runtime/local/datastructures/DenseMatrix.h>
#include <runtime/local/kernels/CheckEq.h>

#include <catch.hpp>
#include <tags.h>
#include "runtime/local/datagen/GenGivenVals.h"

template<class DT>
void check(const DT* in, const DT* W, const DT* b, const DT* exp, DaphneContext* dctx) {
Expand All @@ -33,7 +26,7 @@ template<class DT>
CHECK(*res == *exp);
}

TEMPLATE_PRODUCT_TEST_CASE("affine_fwd", TAG_DNN, (DenseMatrix), (float, double)) { // NOLINT(cert-err58-cpp)
TEMPLATE_PRODUCT_TEST_CASE("CUDA::NN::Affine::Forward", TAG_DNN, (DenseMatrix), (float, double)) { // NOLINT(cert-err58-cpp)
auto dctx = setupContextAndLogger();
using DT = TestType;

Expand All @@ -49,5 +42,3 @@ TEMPLATE_PRODUCT_TEST_CASE("affine_fwd", TAG_DNN, (DenseMatrix), (float, double)
DataObjectFactory::destroy(input);
DataObjectFactory::destroy(result);
}

#endif
49 changes: 49 additions & 0 deletions test/runtime/local/kernels/CUDA/DNNBatchNormTest.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
/*
* Copyright 2021 The DAPHNE Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/


#include "run_tests.h"

#include "runtime/local/datagen/GenGivenVals.h"
#include "runtime/local/kernels/CUDA/BatchNorm.h"

template<class DT>
void check(const DT* in, const DT* gamma, const DT* beta, const DT* ema_mean, const DT* ema_var, const DT* exp,
DaphneContext* dctx)
{
DT* res = nullptr;
typename DT::VT epsilon = 1e-5;
CUDA::BatchNorm::Forward<DT, DT>::apply(res, in, gamma, beta, ema_mean, ema_var, epsilon, dctx);
CHECK(Approx(*(res->getValues())).epsilon(epsilon) == *(exp->getValues()));
}

TEMPLATE_PRODUCT_TEST_CASE("CUDA::NN::BatchNorm::Forward", TAG_DNN, (DenseMatrix), (float, double)) { // NOLINT(cert-err58-cpp)
auto dctx = setupContextAndLogger();
using DT = TestType;

auto input = genGivenVals<DT>(1, { -3, -2, -1, 0, 1, 2, 3, 4, 5});
auto gamma = genGivenVals<DT>(1, { 1 });
auto beta = genGivenVals<DT>(1, { 0 });
auto ema_mean = genGivenVals<DT>(1, { 0 });
auto ema_var = genGivenVals<DT>(1, { 1 });

auto result = genGivenVals<DT>(1, { -3, -2, -1, 0, 1, 2, 3, 4, 5});

check(input, gamma, beta, ema_mean, ema_var, result, dctx.get());

DataObjectFactory::destroy(input);
DataObjectFactory::destroy(result);
}
48 changes: 48 additions & 0 deletions test/runtime/local/kernels/CUDA/DNNConvolutionTest.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
/*
* Copyright 2021 The DAPHNE Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/


#include "run_tests.h"

#include "runtime/local/datagen/GenGivenVals.h"
#include "runtime/local/kernels/CUDA/Convolution.h"

template<class DT>
void check(const DT* in, const DT* filter, const DT* exp, DaphneContext* dctx) {
DT* res = nullptr;
size_t out_h;
size_t out_w;
CUDA::Convolution::Forward<DT, DT>::apply(res, out_h, out_w, in, filter, nullptr, in->getNumRows(), 1, 3, 3, 2, 2,
1, 1, 0, 0, dctx);
CHECK(*res == *exp);
}

TEMPLATE_PRODUCT_TEST_CASE("CUDA::NN::Convolution::Forward", TAG_DNN, (DenseMatrix), (float, double)) { // NOLINT(cert-err58-cpp)
auto dctx = setupContextAndLogger();
using DT = TestType;

auto input = genGivenVals<DT>(1, { 1, 2, 3, 4, 5, 6, 7, 8, 9});
auto filter = genGivenVals<DT>(1, { 1, 0, 0, 1});

// expected output when used with settings filter 2x2, stride 1x1, padding 0x0
auto result = genGivenVals<DT>(1, { 6, 8, 12, 14 });

check(input, filter, result, dctx.get());

DataObjectFactory::destroy(filter);
DataObjectFactory::destroy(input);
DataObjectFactory::destroy(result);
}
Original file line number Diff line number Diff line change
Expand Up @@ -14,31 +14,11 @@
* limitations under the License.
*/


#include "run_tests.h"

#include <runtime/local/datagen/GenGivenVals.h>
#include <runtime/local/datastructures/DenseMatrix.h>
#include <runtime/local/kernels/CheckEq.h>

#ifdef USE_CUDA
#include <runtime/local/kernels/CUDA/Pooling.h>
#include "runtime/local/kernels/CUDA/CreateCUDAContext.h"
#else
#include <runtime/local/kernels/Pooling.h>
#include <runtime/local/kernels/MaxPoolForward.h>
#include <runtime/local/kernels/AvgPoolForward.h>

// #include <runtime/local/kernels/AvgPoolBackward.h>
// #include <runtime/local/kernels/Conv2DBackwardFilter.h>
// #include <runtime/local/kernels/BatchNorm2DBackward.h>

#endif

#include <tags.h>

#include <catch.hpp>

#include <vector>
#include <runtime/local/kernels/CUDA/Pooling.h>

template<typename DT>
DT* genInput() {
Expand All @@ -58,16 +38,11 @@ void checkPoolingForward(const DT* in, const DT* exp, DaphneContext* dctx) {
DT* res = nullptr;
size_t out_h;
size_t out_w;
#ifdef USE_CUDA
CUDA::NN::Pooling::Forward<OP, DT, DT>::apply(res, out_h, out_w, in, in->getNumRows(), 3, 5, 5, 2, 2, 1, 1, 0, 0, dctx);
#else
//NN::Pooling::Forward<OP, DT, DT>::apply(res, out_h, out_w, in, in->getNumRows(), 3, 5, 5, 2, 2, 1, 1, 0, 0, dctx);
NN::Pooling::Forward<OP, DT, DT>::apply(res, out_h, out_w, in, in->getNumRows(), 3, 5, 5, 2, 2, 2, 2, 1, 1, dctx);
#endif
CHECK(*res == *exp);
}

TEMPLATE_PRODUCT_TEST_CASE("pool_fwd_avg", TAG_DNN, (DenseMatrix), (float, double)) { // NOLINT(cert-err58-cpp)
TEMPLATE_PRODUCT_TEST_CASE("CUDA::NN::Pooling::AVG::Forward", TAG_DNN, (DenseMatrix), (float, double)) { // NOLINT(cert-err58-cpp)
using DT = TestType;

auto dctx = setupContextAndLogger();
Expand Down Expand Up @@ -96,15 +71,11 @@ TEMPLATE_PRODUCT_TEST_CASE("pool_fwd_avg", TAG_DNN, (DenseMatrix), (float, doubl
DataObjectFactory::destroy(out_f2x2_s2x2_p1x1);
}

TEMPLATE_PRODUCT_TEST_CASE("pool_fwd_max", TAG_DNN, (DenseMatrix), (float, double)) { // NOLINT(cert-err58-cpp)
TEMPLATE_PRODUCT_TEST_CASE("CUDA::NN::Pooling::MAX::Forward", TAG_DNN, (DenseMatrix), (float, double)) { // NOLINT(cert-err58-cpp)
using DT = TestType;

auto dctx = setupContextAndLogger();

#ifdef USE_CUDA
CUDA::createCUDAContext(dctx.get());
#endif

// two rgb "images" of 5x5 pixels
auto inputs = genInput<DT>();

Expand Down Expand Up @@ -152,14 +123,15 @@ TEMPLATE_PRODUCT_TEST_CASE("pool_fwd_max", TAG_DNN, (DenseMatrix), (float, doubl
52, 53, 54, 54,
52, 53, 54, 54
});
auto out_f2x2_s2x2_p1x1 = genGivenVals<DT>(2, {1., 3., 5., 11., 13., 15.,

auto out_f2x2_s2x2_p1x1 = genGivenVals<DT>(2, {1., 3., 5., 11., 13., 15.,
21., 23., 25., 26., 28., 30., 36., 38., 40., 46., 48., 50.,
51., 53., 55., 61., 63., 65., 71., 73., 75.,
76., 78., 80., 86., 88., 90.,96., 98., 100., 101., 103., 105., 111., 113., 115.,
121., 123., 125., 126., 128., 130., 136., 138., 140., 146., 148., 150.
});

checkPoolingForward<NN::Pooling::MAX>(inputs, out_f2x2_s2x2_p1x1, dctx.get());
//check<NN::Pooling::MAX>(inputs_p1x1, out_f2x2_s1x1_p1x1, dctx.get());

DataObjectFactory::destroy(inputs);
DataObjectFactory::destroy(out_f2x2_s1x1_p0x0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,11 @@
* limitations under the License.
*/

#include "run_tests.h"

#include <runtime/local/datagen/GenGivenVals.h>
#include <runtime/local/datastructures/DenseMatrix.h>

#include <catch.hpp>
#include <tags.h>
#include "run_tests.h"

#ifdef USE_CUDA
#include "runtime/local/kernels/CUDA/Softmax.h"
#include "runtime/local/datagen/GenGivenVals.h"
#include "runtime/local/kernels/CUDA/Softmax.h"

template<class DT>
void check(const DT* in, const DT* exp, DaphneContext* dctx) {
Expand All @@ -32,7 +27,7 @@ void check(const DT* in, const DT* exp, DaphneContext* dctx) {
CHECK(Approx(*(res->getValues())).epsilon(1e-6) == *(exp->getValues()));
}

TEMPLATE_PRODUCT_TEST_CASE("softmax_fwd", TAG_DNN, (DenseMatrix), (float, double)) { // NOLINT(cert-err58-cpp)
TEMPLATE_PRODUCT_TEST_CASE("CUDA::NN::Softmax::Forward", TAG_DNN, (DenseMatrix), (float, double)) { // NOLINT(cert-err58-cpp)
auto dctx = setupContextAndLogger();
using DT = TestType;

Expand All @@ -47,5 +42,3 @@ TEMPLATE_PRODUCT_TEST_CASE("softmax_fwd", TAG_DNN, (DenseMatrix), (float, double
DataObjectFactory::destroy(input);
DataObjectFactory::destroy(result);
}

#endif // USE_CUDA
Loading

0 comments on commit 6da3edf

Please sign in to comment.