Skip to content

Commit

Permalink
Pull request #13: [ML-470] Add tutorial and demo plugin for machine l…
Browse files Browse the repository at this point in the history
…earning inference

Merge in SUITE/public-demos from ML-470-ml-tutorial to master

Squashed commit of the following:

commit 5fcfe75f32e592dc9e067bf52fa37eb27d247c41
Author: Raphael Prevost <[email protected]>
Date:   Wed Sep 15 10:15:55 2021 +0200

    [ML-470] Remove pre-processing from the C++ algorithm to avoid linking against ImageMathPlugin

commit 3795c96646d8f1dc922af8e666d207fd5510ea59
Author: Raphael Prevost <[email protected]>
Date:   Fri Sep 10 12:27:13 2021 +0200

    [ML-470] Improve the demo following reviews

commit c7db9e0c48938f38dacc9cb0a6362fe50ed4a257
Author: Raphael Prevost <[email protected]>
Date:   Mon Sep 6 11:32:49 2021 +0200

    [ML-470] Add tutorial and demo plugin for machine learning inference
  • Loading branch information
raphael-prevost authored and schultezb committed Sep 15, 2021
1 parent dd8e3cd commit 85ade0b
Show file tree
Hide file tree
Showing 15 changed files with 489 additions and 0 deletions.
1 change: 1 addition & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
*.pt filter=lfs diff=lfs merge=lfs -text
44 changes: 44 additions & 0 deletions ExampleMachineLearningInference/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# Define a new CMake project for the MachineLearningInference plugin
project(MachineLearningInferencePlugin)
cmake_minimum_required(VERSION 3.2.0)

# Locate the ImFusion SDK.
# List required modules/plugins in the COMPONENTS section (e.g. COMPONENTS ImFusionSeg ImFusionReg).
find_package(ImFusionLib COMPONENTS ImFusionML TorchPlugin REQUIRED)

# Enable automatic MOC, RCC and UIC preprocessing for Qt
set(CMAKE_AUTOMOC ON)
set(CMAKE_AUTORCC ON)
set(CMAKE_AUTOUIC ON)

# Define and configure the CMake target
set(Sources
RunPretrainedModelAlgorithm.cpp
MachineLearningInferenceFactory.cpp
MachineLearningInferencePlugin.cpp
)
set(Headers
RunPretrainedModelAlgorithm.h
MachineLearningInferenceFactory.h
MachineLearningInferencePlugin.h
)
set(UiSources
)

# Define target library
add_library(MachineLearningInferencePlugin SHARED ${Sources} ${UiHeaders} ${Headers})
target_include_directories(MachineLearningInferencePlugin PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
)
# Link agains the ImFusionLib and selected modules/plugins
target_link_libraries(MachineLearningInferencePlugin PRIVATE
ImFusionLib
ImFusionML # note that we do need to link against the ML plugin (but not against TorchPlugin or OnnxRuntimePlugin)
)

# Define output target directories and provide instructions on how to launch
# the ImFusion Suite with the built custom plugin.
# These functions are provided by the ImFusionLib target config.
imfusion_set_common_target_properties()
imfusion_provide_ide_instructions()

Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#include "MachineLearningInferenceFactory.h"
#include "RunPretrainedModelAlgorithm.h"

#include <ImFusion/GUI/DefaultAlgorithmController.h>

namespace ImFusion
{
MachineLearningInferenceAlgorithmFactory::MachineLearningInferenceAlgorithmFactory()
{
// register the RunPretrainedModelAlgorithm in the sub-category "Machine Learning"
registerAlgorithm<RunPretrainedModelAlgorithm>("Machine Learning;My Demo Machine Learning Algorithm");
}


AlgorithmController* MachineLearningInferenceControllerFactory::create(Algorithm* a) const
{
// register the MachineLearningInferenceController for the MachineLearningInferenceAlgorithm
if (RunPretrainedModelAlgorithm* alg = dynamic_cast<RunPretrainedModelAlgorithm*>(a))
return new DefaultAlgorithmController(alg);
return 0;
}
}
24 changes: 24 additions & 0 deletions ExampleMachineLearningInference/MachineLearningInferenceFactory.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
/* Copyright (c) 2012-2019 ImFusion GmbH, Munich, Germany. All rights reserved. */
#pragma once

#include <ImFusion/Base/AlgorithmControllerFactory.h>
#include <ImFusion/Base/AlgorithmFactory.h>

namespace ImFusion
{
class Algorithm;

/// AlgorithmFactory for MachineLearningInference plugin
class MachineLearningInferenceAlgorithmFactory : public AlgorithmFactory
{
public:
MachineLearningInferenceAlgorithmFactory();
};

/// AlgorithmControllerFactory for MachineLearningInference plugin
class MachineLearningInferenceControllerFactory : public AlgorithmControllerFactory
{
public:
virtual AlgorithmController* create(Algorithm* a) const;
};
}
35 changes: 35 additions & 0 deletions ExampleMachineLearningInference/MachineLearningInferencePlugin.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
#include "MachineLearningInferencePlugin.h"

#include "MachineLearningInferenceFactory.h"

// Export free factory function to instantiate plugin
#ifdef WIN32
extern "C" __declspec(dllexport) ImFusion::ImFusionPlugin* createPlugin()
{
return new ImFusion::MachineLearningInferencePlugin;
}
#else
extern "C" ImFusion::ImFusionPlugin* createPlugin()
{
return new ImFusion::MachineLearningInferencePlugin;
}
#endif


namespace ImFusion
{
MachineLearningInferencePlugin::MachineLearningInferencePlugin()
{
m_algFactory = new MachineLearningInferenceAlgorithmFactory;
m_algCtrlFactory = new MachineLearningInferenceControllerFactory;
}


MachineLearningInferencePlugin::~MachineLearningInferencePlugin() {}


const ImFusion::AlgorithmFactory* MachineLearningInferencePlugin::getAlgorithmFactory() { return m_algFactory; }


const ImFusion::AlgorithmControllerFactory* MachineLearningInferencePlugin::getAlgorithmControllerFactory() { return m_algCtrlFactory; }
}
23 changes: 23 additions & 0 deletions ExampleMachineLearningInference/MachineLearningInferencePlugin.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#pragma once

#include <ImFusion/Base/ImFusionPlugin.h>

namespace ImFusion
{
class AlgorithmFactory;
class AlgorithmControllerFactory;

/// Minimal example for defining a custom plugin for the ImFusion SDK
class MachineLearningInferencePlugin : public ImFusionPlugin
{
public:
MachineLearningInferencePlugin();
virtual ~MachineLearningInferencePlugin();
virtual const AlgorithmFactory* getAlgorithmFactory();
virtual const AlgorithmControllerFactory* getAlgorithmControllerFactory();

private:
AlgorithmFactory* m_algFactory;
AlgorithmControllerFactory* m_algCtrlFactory;
};
}
147 changes: 147 additions & 0 deletions ExampleMachineLearningInference/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
# Machine Learning Model Inference

## Summary

This tutorial describes how to use within the ImFusion framework a deep learning model that has been trained independently from Python.
The C++ plugin included in this repo will define a demo algorithm that runs such a model in the Suite.

![Screenshot of the demo algorithm running a segmentation model](demo_algorithm.png "Screenshot of the demo algorithm running a segmentation model")

## Requirements and Build Instructions
- Installed ImFusion SDK with ImageMathPlugin and TorchPlugin (or OnnxRuntimePlugin)
- Qt5 (at least the version that the ImFusion SDK comes with)
- CMake version 3.2 or newer

Use CMake to generate build/project files for your build system of choice.
If you are using Visual Studio the CMake scripts will automatically configure the generated Solution with the correct environment parameters so that you can launch the ImFusion Suite including your plugin directly from Visual Studio.
If the ImFusion Suite does not pick up the built plugin on start, configure the `IMFUSION_PLUGIN_PATH` environment variable to contain .dll/.so directory.

The two main deep learning engines supported in the ImFusion framework are [Torch](https://pytorch.org/) and [ONNXRuntime](https://onnxruntime.ai/) (experimental).

To make sure that you have the appropriate plugins (`TorchPlugin`, `OnnxRuntimePlugin`) installed with the Suite/SDK, you can start the ImFusion Suite or Console and look at the log window:
```
ImFusionLib Version x.x.x built on yyyy-mm-dd.
[OnnxRuntimePlugin] Providers: CUDAExecutionProvider
[OnnxRuntimePlugin] Providers: CPUExecutionProvider
[Framework] Available Plugins: [...], ImFusionML, OnnxRuntimePlugin, TorchPlugin [...]
```

## Step 1: Exporting trained models from PyTorch

### As Torch traced model

The currently supported version of PyTorch is 1.8 (or 1.5 for older versions of our SDK, or Windows VS2017 installers).
Exporting the model with a different version of PyTorch may lead to incompatibility errors.

```python
# For 3D volumes
dummy_input = torch.rand(1, 1, 64, 64, 64) # batch x channels x slices x height x width
# For 2D images
dummy_input = torch.rand(1, 3, 256, 256) # batch x channels x height x width

traced_script_module = torch.jit.trace(model, dummy_input)
traced_script_module.save("traced_model.pt")
```

### As ONNX model

```python
# For 3D volumes
dummy_input = torch.rand(1, 1, 64, 64, 64) # batch x channels x slices x height x width
# For 2D images
dummy_input = torch.rand(1, 3, 256, 256) # batch x channels x height x width

# Define dynamic axes (that can be resized at inference time)
input_dynamic_axes = {0: 'batch', 2: 'width', 3: 'height', 4: 'depth'}
input_dynamic_axes = {dim: name for dim, name in input_dynamic_axes.items() if dim < len(dummy_input.shape)}

torch.onnx.export(model, dummy_input, "traced_model.onnx", input_names=['input'], dynamic_axes={'input': input_dynamic_axes})
```

Other models exported as ONNX (for instance converted from TensorFlow) can be used as long as they follow the same convention as described above.

## Step 2: Preparing the YAML model file

Once you have exported your model in a suitable format, you need to create a model configuration file with the YAML format.
This file will contain all the necessary information for the ImFusion framework to run it on an input image.

The section below shows an example of such a file.

```yaml
Version: 4.0
Type: NeuralNetwork
Name: Name of the model (no practical purpose)
Description: Description of the model (no practical purpose)
Engine: torch # Could be onnx
ModelFile: traced_model.pt # Path to the actual model file (could be a onnx file)
ForceCPU: false # Set it to true if you want to perform the inference on the CPU instead of the GPU
Verbose: false # Print many info messages
MaxBatchSize: 1 # Maximum number of images to run through the network simulatenously
LabelNames: [FirstObject, SecondObject] # Names of the different labels encoded as channels of the output tensor

#############################################################################################
# Sequence of preprocessing operations run before the network
# (all available operations are available in the Python documentation of the SDK)
#############################################################################################
PreProcessing:
- MakeFloat: {} # First convert to float
- BakeTransformation: {} # If the image has a matrix, apply the transformation to the image
- Resample: # Resample to a fixed resolution of 1.5mm
resolution: 1.5
- NormalizePercentile: # Normalize image intensities based on the image percentile
min: 0.001
max: 0.999
clip: false

#############################################################################################
# For pixelwise (fully convolutional) models, it might be necessary to split the input in sub-images
# because of GPU memory constraints, especially for 3D volumes.
# Each of those images will be fed into the network and the predictions will be recombined.
# This section can be removed for imagewise models.
#############################################################################################
Sampling:
# Maximum size of the sub-image (set to -1 if you never want to split the image)
- MaxSizeSubdivision: 96
# Some network architectures require each sub-image dimension to be a multiple of this number
- DimensionDivisor: 16
# Recommended for real-time applications when speed is paramount
- SkipUnpadding: false
# Sub-images are extracted with overlap in order to avoid border effect - this is the size in pixels of this overlap
- PixelsOverlap: 32
# Weigh the different contributions at each pixel of overlap regions based on their position
- RecombineWeighted: true
# Repeat border values or mirror pad when extracting sub-images at to the border
- MirrorPadding: false

#############################################################################################
# Sequence of preprocessing operations run
# after the network and the recombination of the sub-images
# (all available operations are available in the Python documentation of the SDK)
#############################################################################################
PostProcessing:
- ResampleToInput: {} # Resample the prediction image back to the original image
- ArgMax: {} # Convert the multi-channel probability map to a label map
```
## Step 3a: Executing a model in the ImFusion Suite
The two main algorithms are `Apply Imagewise Model` and `Apply Pixelwise Model`.
Both of them take a single dataset as input and are available in the *Machine Learning* sub-menu of the algorithm list.

Select the path to the YAML model configuration file, and click on _Compute_.


## Step 3b: Executing a model from the ImFusion SDK

The two aforementioned algorithms are also available via our C++ SDK.
For more details, have a look at the source code of this sample plugin (in particular `RunPretrainedModelAlgorithm`), which consists in running a segmentation model after pre-processing the input.

In order to reproduce what is shown in the screenshot, build the `MachineLearningInferencePlugin` and start the ImFusionSuite.
Open the image `horse.png`, and select the new algorithm called _My Demo Machine Learning Algorithm_ in the _Machine Learning_ sub-menu.
Select the `demo_model.yaml` and click on Compute.

The segmentation model `demo_model_traced.pt` has been generated via the Python script `generate_demo_model.py`.

The expected input of this algorithm is a 2D RGB images with intensities between [0;255]. The applied pre- and post-processing operations are defined in the YAML file `demo_model.yaml`.
76 changes: 76 additions & 0 deletions ExampleMachineLearningInference/RunPretrainedModelAlgorithm.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
#include "RunPretrainedModelAlgorithm.h"

#include <ImFusion/Base/DataList.h>
#include <ImFusion/Base/Log.h>
#include <ImFusion/Base/SharedImageSet.h>
#include <ImFusion/ImageMath/SharedImageSetArithmetic.h>
#include <ImFusion/ML/PixelwiseLearningModel.h>
#include <ImFusion/ML/Operations.h>

namespace IM = ImFusion::ImageMath;

namespace ImFusion
{
RunPretrainedModelAlgorithm::RunPretrainedModelAlgorithm(SharedImageSet* img)
: m_imgIn(img)
{
// we define the parameter as Path so that the controller includes a button to open an OpenDialog
modelPath.setType(Properties::ParamType::Path);
modelPath.setLabel("Model Path");
modelPath.setAttribute("filter", "YAML files (*.yaml)");
}


bool RunPretrainedModelAlgorithm::createCompatible(const DataList& data, Algorithm** a)
{
// check requirements to create the algorithm
if (data.size() != 1)
return false;

// accept images from all modalities
SharedImageSet* inputImg = data.getImage(Data::UNKNOWN);
if (inputImg == nullptr)
return false;

// only accept 2D RGB images
if (inputImg->img()->dimension() != 2 || inputImg->img()->channels() != 3)
return false;

// requirements are met, create the algorithm if asked
if (a)
{
*a = new RunPretrainedModelAlgorithm(inputImg);
}
return true;
}


void RunPretrainedModelAlgorithm::compute()
{
// set generic error status until we have finished
m_status = Status::Error;

// create the machine learning model
ML::PixelwiseLearningModel model(modelPath);

if (!model.isInitialized())
{
LOG_ERROR("RunPretrainedModelAlgorithm", "Model could not be initialized");
return;
}

// run the model and retrieve its output
m_imgOut = model.predict(m_imgIn);

// set algorithm status to success
if (m_imgOut)
m_status = Status::Success;
}


OwningDataList RunPretrainedModelAlgorithm::takeOutput()
{
// if we have produced some output, add it to the list
return OwningDataList(std::move(m_imgOut));
}
}
Loading

0 comments on commit 85ade0b

Please sign in to comment.