diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..4fb662854f3175a3c793711a89b0c4942f3cdb6a
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,8 @@
+*.png filter=lfs diff=lfs merge=lfs -text
+*.jp2 filter=lfs diff=lfs merge=lfs -text
+*.tif filter=lfs diff=lfs merge=lfs -text
+*.data-* filter=lfs diff=lfs merge=lfs -text
+*.gpkg filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.jpg filter=lfs diff=lfs merge=lfs -text
+*.jpeg filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 271b1daf7274419b0577a9dc37a862c8b3eab5ab..fe0ab91251dabee7b08824aaf3e71be8f77e8daa 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -3,6 +3,9 @@ image: gitlab-registry.irstea.fr/remi.cresson/otbtf:2.4-cpu-basic-testing
 variables:
     OTB_BUILD: /src/otb/build/OTB/build  # Local OTB build directory
     OTBTF_SRC: /src/otbtf  # Local OTBTF source directory
+    OTB_TEST_DIR: $OTB_BUILD/Testing/Temporary  # OTB testing directory
+    ARTIFACT_TEST_DIR: $CI_PROJECT_DIR/testing
+    CRC_BOOK_TMP: /tmp/crc_book_tests_tmp
 
 workflow:
   rules:
@@ -13,13 +16,17 @@ stages:
   - Build
   - Static Analysis
   - Test
+  - Applications Test
 
 .update_otbtf_src: &update_otbtf_src
   - sudo rm -rf $OTBTF_SRC && sudo ln -s $PWD $OTBTF_SRC  # Replace local OTBTF source directory
 
 .compile_otbtf: &compile_otbtf
   - cd $OTB_BUILD && sudo make install -j$(nproc --all)  # Rebuild OTB with new OTBTF sources
-
+  
+.install_pytest: &install_pytest
+  - pip3 install pytest pytest-cov pytest-order  # Install pytest stuff
+  
 before_script:
   - *update_otbtf_src
 
@@ -60,21 +67,46 @@ ctest:
   stage: Test
   script:
     - *compile_otbtf
-    - sudo rm -rf $OTB_BUILD/Testing/Temporary/*  # Empty testing temporary folder (old files here)
+    - sudo rm -rf $OTB_TEST_DIR/*  # Empty testing temporary folder (old files here)
     - cd $OTB_BUILD/ && sudo ctest -L OTBTensorflow  # Run ctest
   after_script:
-    - cp -r $OTB_BUILD/Testing/Temporary $CI_PROJECT_DIR/testing  # Copy artifacts (they must be in $CI_PROJECT_DIR)
+    - cp -r $OTB_TEST_DIR $ARTIFACT_TEST_DIR
   artifacts:
     paths:
-      - testing/*.*
+      - $ARTIFACT_TEST_DIR/*.*
     expire_in: 1 week
     when: on_failure
 
+.applications_test_base:
+  stage: Applications Test
+  rules:
+      # Only for MR targeting 'develop' and 'master' branches because applications tests are slow
+    - if: $CI_MERGE_REQUEST_ID && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == 'develop'
+    - if: $CI_MERGE_REQUEST_ID && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == 'master'
+  artifacts:
+    when: on_failure
+    paths:
+      - $CI_PROJECT_DIR/report_*.xml
+      - $ARTIFACT_TEST_DIR/*.*
+    expire_in: 1 week
+          
+crc_book:
+  extends: .applications_test_base
+  script:
+    - *compile_otbtf
+    - *install_pytest
+    - cd $CI_PROJECT_DIR
+    - mkdir -p $CRC_BOOK_TMP
+    - TMPDIR=$CRC_BOOK_TMP DATADIR=$CI_PROJECT_DIR/test/data python -m pytest --junitxml=$CI_PROJECT_DIR/report_tutorial.xml $OTBTF_SRC/test/tutorial_unittest.py
+  after_script:
+    - mkdir -p $ARTIFACT_TEST_DIR
+    - cp $CRC_BOOK_TMP/*.* $ARTIFACT_TEST_DIR/
+    
 sr4rs:
-  stage: Test
+  extends: .applications_test_base
   script:
     - *compile_otbtf
-    - pip3 install pytest pytest-cov
+    - *install_pytest
     - cd $CI_PROJECT_DIR
     - wget -O sr4rs_sentinel2_bands4328_france2020_savedmodel.zip
       https://nextcloud.inrae.fr/s/boabW9yCjdpLPGX/download/sr4rs_sentinel2_bands4328_france2020_savedmodel.zip
@@ -85,8 +117,4 @@ sr4rs:
     - git clone https://github.com/remicres/sr4rs.git
     - export PYTHONPATH=$PYTHONPATH:$PWD/sr4rs
     - python -m pytest --junitxml=$CI_PROJECT_DIR/report_sr4rs.xml $OTBTF_SRC/test/sr4rs_unittest.py
-  artifacts:
-    when: on_failure
-    paths:
-      - $CI_PROJECT_DIR/report_sr4rs.xml
-    expire_in: 1 week
+
diff --git a/Dockerfile b/Dockerfile
index c9aa6e9fcb40cba228c9a4d2c0634eb055e38fdf..c2c5efa46dc17b7e81fa548e40a6e64cd29abae3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -74,6 +74,7 @@ RUN git clone --single-branch -b $TF https://github.com/tensorflow/tensorflow.gi
  && ln -s $(find /opt/otbtf -type d -wholename "*/site-packages/tensorflow/include") /opt/otbtf/include/tf \
  # The only missing header in the wheel
  && cp tensorflow/cc/saved_model/tag_constants.h /opt/otbtf/include/tf/tensorflow/cc/saved_model/ \
+ && cp tensorflow/cc/saved_model/signature_constants.h /opt/otbtf/include/tf/tensorflow/cc/saved_model/ \
  # Symlink external libs (required for MKL - libiomp5)
  && for f in $(find -L /opt/otbtf/include/tf -wholename "*/external/*/*.so"); do ln -s $f /opt/otbtf/lib/; done \
  # Compress and save TF binaries
@@ -90,7 +91,10 @@ WORKDIR /src/otb
 
 # SuperBuild OTB
 COPY tools/docker/build-flags-otb.txt ./
-RUN git clone --single-branch -b $OTB https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb.git \
+RUN apt-get update -y \
+ && apt-get install --reinstall ca-certificates -y \
+ && update-ca-certificates \
+ && git clone --single-branch -b $OTB https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb.git \
  && mkdir -p build \
  && cd build \
  # Set GL/Qt build flags
diff --git a/README.md b/README.md
index a18961b3038b680b4a52f86666bc2ef8005a18d9..9434ec96fd9a8bc8ffeeabd98bcc4e4200376761 100644
--- a/README.md
+++ b/README.md
@@ -18,21 +18,23 @@ Applications can be used to build OTB pipelines from Python or C++ APIs.
 
 ### Python
 
-This is a work in progress. For now, `tricks.py` provides a set of helpers to build deep nets, and `otbtf.py` provides datasets which can be used in Tensorflow pipelines to train networks from python.
+`otbtf.py` targets python developers that want to train their own model from python with TensorFlow or Keras.
+It provides various classes for datasets and iterators to handle the _patches images_ generated from the `PatchesExtraction` OTB application.
+For instance, the `otbtf.Dataset` class provides a method `get_tf_dataset()` which returns a `tf.dataset` that can be used in your favorite TensorFlow pipelines, or convert your patches into TFRecords.
 
-## Portfolio
+`tricks.py` is here for backward compatibility with codes based on OTBTF 1.x and 2.x.
 
-Below are some screen captures of deep learning applications performed at large scale with OTBTF.
- - Image to image translation (Spot-7 image --> Wikimedia Map using CGAN)
-<img src ="doc/images/pix2pix.png" />
+## Examples
 
+Below are some screen captures of deep learning applications performed at large scale with OTBTF.
  - Landcover mapping (Spot-7 images --> Building map using semantic segmentation)
 <img src ="doc/images/landcover.png" />
 
- - Image enhancement (Enhancement of Sentinel-2 images at 1.5m  using SRGAN)
+ - Super resolution (Sentinel-2 images upsampled with the [SR4RS software](https://github.com/remicres/sr4rs), which is based on OTBTF)
 <img src ="doc/images/supresol.png" />
 
-You can read more details about these applications on [this blog](https://mdl4eo.irstea.fr/2019/)
+ - Image to image translation (Spot-7 image --> Wikimedia Map using CGAN. So unnecessary but fun!)
+<img src ="doc/images/pix2pix.png" />
 
 ## How to install
 
@@ -42,8 +44,8 @@ For now you have two options: either use the existing **docker image**, or build
 
 Use the latest image from dockerhub:
 ```
-docker pull mdl4eo/otbtf2.5:cpu
-docker run -u otbuser -v $(pwd):/home/otbuser mdl4eo/otbtf2.5:cpu otbcli_PatchesExtraction -help
+docker pull mdl4eo/otbtf3.0:cpu
+docker run -u otbuser -v $(pwd):/home/otbuser mdl4eo/otbtf3.0:cpu otbcli_PatchesExtraction -help
 ```
 
 Read more in the [docker use documentation](doc/DOCKERUSE.md).
@@ -57,12 +59,13 @@ Read more in the [build from sources documentation](doc/HOWTOBUILD.md).
 - Reading [the applications documentation](doc/APPLICATIONS.md) will help, of course 😉
 - A small [tutorial](https://mdl4eo.irstea.fr/2019/01/04/an-introduction-to-deep-learning-on-remote-sensing-images-tutorial/) on MDL4EO's blog
 - in the `python` folder are provided some [ready-to-use deep networks, with documentation and scientific references](doc/EXAMPLES.md).
-- A book: *Cresson, R. (2020). Deep Learning for Remote Sensing Images with Open Source Software. CRC Press.* Use QGIS, OTB and Tensorflow to perform various kind of deep learning sorcery on remote sensing images (patch-based classification for landcover mapping, semantic segmentation of buildings, optical image restoration from joint SAR/Optical time series).
+- A [book](https://doi.org/10.1201/9781003020851): *Cresson, R. (2020). Deep Learning for Remote Sensing Images with Open Source Software. CRC Press.* Use QGIS, OTB and Tensorflow to perform various kind of deep learning sorcery on remote sensing images (patch-based classification for landcover mapping, semantic segmentation of buildings, optical image restoration from joint SAR/Optical time series).
 - Check [our repository](https://github.com/remicres/otbtf_tutorials_resources) containing stuff (data and models) to begin with with!
+- Finally, take a look in the `test` folder. You will find plenty of command lines for applications tests!
 
 ## Contribute
 
-Every one can **contribute** to OTBTF! Don't be shy.
+Every one can **contribute** to OTBTF. Just open a PR :)
 
 ## Cite
 
diff --git a/doc/DOCKERUSE.md b/doc/DOCKERUSE.md
index 0f1086015a59c46efd5a1ca3f93a61dd962fa75f..34e58cc3eab4a170e8e4abd445da8a826890b8bb 100644
--- a/doc/DOCKERUSE.md
+++ b/doc/DOCKERUSE.md
@@ -20,6 +20,10 @@ Here is the list of OTBTF docker images hosted on [dockerhub](https://hub.docker
 | **mdl4eo/otbtf2.5:cpu**           | Ubuntu Focal  | r2.5   | 7.4.0 | CPU, few optimization  | no        | 5.2,6.1,7.0,7.5,8.6|
 | **mdl4eo/otbtf2.5:gpu**           | Ubuntu Focal  | r2.5   | 7.4.0 | GPU                    | no        | 5.2,6.1,7.0,7.5,8.6|
 | **mdl4eo/otbtf2.5:gpu-dev**       | Ubuntu Focal  | r2.5   | 7.4.0 | GPU (dev)              | yes       | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf3.0:cpu-basic**     | Ubuntu Focal  | r2.5   | 7.4.0 | CPU, no optimization   | no        | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf3.0:cpu-basic-dev** | Ubuntu Focal  | r2.5   | 7.4.0 | CPU, no optimization (dev) |  yes  | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf3.0:gpu**           | Ubuntu Focal  | r2.5   | 7.4.0 | GPU                    | yes       | 5.2,6.1,7.0,7.5,8.6|
+| **mdl4eo/otbtf3.0:gpu-dev**       | Ubuntu Focal  | r2.5   | 7.4.0 | GPU (dev)              | yes       | 5.2,6.1,7.0,7.5,8.6|
 
 - `cpu` tagged docker images are compiled without optimization.
 - `gpu` tagged docker images are suited for **NVIDIA GPUs**. They use CUDA/CUDNN support. 
diff --git a/doc/images/classif_map.png b/doc/images/classif_map.png
index bc2a10799b03752c6a720c1d6b0ecca2aa6ae15d..017118c778d6d553fd1499a2bef5848782bcfc9c 100644
Binary files a/doc/images/classif_map.png and b/doc/images/classif_map.png differ
diff --git a/doc/images/docker_desktop_1.jpeg b/doc/images/docker_desktop_1.jpeg
index 9a03bd58cae40dfb428de083531c687020572883..21902e77e6a2e77463ddc452b6359e0dc1b0ca3d 100644
Binary files a/doc/images/docker_desktop_1.jpeg and b/doc/images/docker_desktop_1.jpeg differ
diff --git a/doc/images/docker_desktop_2.jpeg b/doc/images/docker_desktop_2.jpeg
index e393cdb40d45c1fbcfd69c8983a47aef8731b899..ec9f5632ba49c2b3f642c777531802023882e4ea 100644
Binary files a/doc/images/docker_desktop_2.jpeg and b/doc/images/docker_desktop_2.jpeg differ
diff --git a/doc/images/landcover.png b/doc/images/landcover.png
index 913f3600614420a70adc1bfa8110cdc6e3c3f45a..0fecc763fd72bccb1aba68bf7a1cdc236980101e 100644
Binary files a/doc/images/landcover.png and b/doc/images/landcover.png differ
diff --git a/doc/images/logo.png b/doc/images/logo.png
index 13aeaffd1c9ba0c5836b4a06857084ee126cede6..13f91d5a4a91fc6f372996e6a72fd5de27167567 100644
Binary files a/doc/images/logo.png and b/doc/images/logo.png differ
diff --git a/doc/images/model_training.png b/doc/images/model_training.png
index d385dbb6c4c6d5fc8d5e95e210800165d388a0b4..3e7d168937add573723cc686b8b263dbaebe42f4 100644
Binary files a/doc/images/model_training.png and b/doc/images/model_training.png differ
diff --git a/doc/images/patches_extraction.png b/doc/images/patches_extraction.png
index 024beb692493896d450481ba54e6b7b2069d60e9..bd04ff592b0c11091763bc094f3f22dc8bb63303 100644
Binary files a/doc/images/patches_extraction.png and b/doc/images/patches_extraction.png differ
diff --git a/doc/images/pix2pix.png b/doc/images/pix2pix.png
index 1c79652ff0bae389cbee1c7cde40b2d46f20f089..5df6b349acb27a04419fb226c2085a757cb6fe40 100644
Binary files a/doc/images/pix2pix.png and b/doc/images/pix2pix.png differ
diff --git a/doc/images/savedmodel_simple_cnn.png b/doc/images/savedmodel_simple_cnn.png
index d494e99e824ec0c2852dd6d21686a5df33aaa4f5..7ebe28ee37d744c542ed66a4eb63bf3b95d2e8af 100644
Binary files a/doc/images/savedmodel_simple_cnn.png and b/doc/images/savedmodel_simple_cnn.png differ
diff --git a/doc/images/savedmodel_simple_fcnn.png b/doc/images/savedmodel_simple_fcnn.png
index bc9e7aab8246c02dec572df1daab3d3799e5f6d8..d2e13fc31d6adfd0c84feae0f836d1840411f8e9 100644
Binary files a/doc/images/savedmodel_simple_fcnn.png and b/doc/images/savedmodel_simple_fcnn.png differ
diff --git a/doc/images/savedmodel_simple_pxs_fcn.png b/doc/images/savedmodel_simple_pxs_fcn.png
index 169e9703faa25b3e934d888b637a0f7ecd182ed8..bfb72eab3b9d0de7488e386b2403adbd8eadb9e2 100644
Binary files a/doc/images/savedmodel_simple_pxs_fcn.png and b/doc/images/savedmodel_simple_pxs_fcn.png differ
diff --git a/doc/images/schema.png b/doc/images/schema.png
index df80fcc1cdbcc58e081e13a4918d3652e649c5ab..f5788e33078f20299e0275d1b8a950fcfda35882 100644
Binary files a/doc/images/schema.png and b/doc/images/schema.png differ
diff --git a/doc/images/supresol.png b/doc/images/supresol.png
index efff2fd23176e0fab501ced42aa24c56a2a77a9b..310d36abce642b6c3813c0c21f31c28e2904124a 100644
Binary files a/doc/images/supresol.png and b/doc/images/supresol.png differ
diff --git a/include/otbTensorflowCommon.cxx b/include/otbTensorflowCommon.cxx
index 662c9d3e979c5e67ccf9effc4564c9d9fd5c6d0e..b7a27c60c5ef49fbee42556ace70b54751f682f9 100644
--- a/include/otbTensorflowCommon.cxx
+++ b/include/otbTensorflowCommon.cxx
@@ -11,8 +11,10 @@
 =========================================================================*/
 #include "otbTensorflowCommon.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 //
 // Environment variable for the number of sources in "Multisource" applications
@@ -22,21 +24,21 @@ const std::string ENV_VAR_NAME_NSOURCES = "OTB_TF_NSOURCES";
 //
 // Get the environment variable as int
 //
-int GetEnvironmentVariableAsInt(const std::string & variableName)
+int
+GetEnvironmentVariableAsInt(const std::string & variableName)
 {
-  int ret = -1;
-  char const* tmp = getenv( variableName.c_str() );
-  if ( tmp != NULL )
+  int          ret = -1;
+  char const * tmp = getenv(variableName.c_str());
+  if (tmp != NULL)
   {
-    std::string s( tmp );
+    std::string s(tmp);
     try
     {
       ret = std::stoi(s);
     }
-    catch(...)
+    catch (...)
     {
-      itkGenericExceptionMacro("Error parsing variable "
-          << variableName << " as integer. Value is " << s);
+      itkGenericExceptionMacro("Error parsing variable " << variableName << " as integer. Value is " << s);
     }
   }
 
@@ -47,7 +49,8 @@ int GetEnvironmentVariableAsInt(const std::string & variableName)
 // This function returns the numeric content of the ENV_VAR_NAME_NSOURCES
 // environment variable
 //
-int GetNumberOfSources()
+int
+GetNumberOfSources()
 {
   int ret = GetEnvironmentVariableAsInt(ENV_VAR_NAME_NSOURCES);
   if (ret != -1)
@@ -60,15 +63,18 @@ int GetNumberOfSources()
 //
 // This function copy a patch from an input image to an output image
 //
-template<class TImage>
-void CopyPatch(typename TImage::Pointer inputImg, typename TImage::IndexType & inputPatchIndex,
-    typename TImage::Pointer outputImg, typename TImage::IndexType & outputPatchIndex,
-    typename TImage::SizeType patchSize)
+template <class TImage>
+void
+CopyPatch(typename TImage::Pointer     inputImg,
+          typename TImage::IndexType & inputPatchIndex,
+          typename TImage::Pointer     outputImg,
+          typename TImage::IndexType & outputPatchIndex,
+          typename TImage::SizeType    patchSize)
 {
-  typename TImage::RegionType inputPatchRegion(inputPatchIndex, patchSize);
-  typename TImage::RegionType outputPatchRegion(outputPatchIndex, patchSize);
-  typename itk::ImageRegionConstIterator<TImage> inIt (inputImg, inputPatchRegion);
-  typename itk::ImageRegionIterator<TImage> outIt (outputImg, outputPatchRegion);
+  typename TImage::RegionType                    inputPatchRegion(inputPatchIndex, patchSize);
+  typename TImage::RegionType                    outputPatchRegion(outputPatchIndex, patchSize);
+  typename itk::ImageRegionConstIterator<TImage> inIt(inputImg, inputPatchRegion);
+  typename itk::ImageRegionIterator<TImage>      outIt(outputImg, outputPatchRegion);
   for (inIt.GoToBegin(), outIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt, ++outIt)
   {
     outIt.Set(inIt.Get());
@@ -78,9 +84,9 @@ void CopyPatch(typename TImage::Pointer inputImg, typename TImage::IndexType & i
 //
 // Get image infos
 //
-template<class TImage>
-void GetImageInfo(typename TImage::Pointer image,
-    unsigned int & sizex, unsigned int & sizey, unsigned int & nBands)
+template <class TImage>
+void
+GetImageInfo(typename TImage::Pointer image, unsigned int & sizex, unsigned int & sizey, unsigned int & nBands)
 {
   nBands = image->GetNumberOfComponentsPerPixel();
   sizex = image->GetLargestPossibleRegion().GetSize(0);
@@ -90,8 +96,9 @@ void GetImageInfo(typename TImage::Pointer image,
 //
 // Propagate the requested region in the image
 //
-template<class TImage>
-void PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::RegionType & region)
+template <class TImage>
+void
+PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::RegionType & region)
 {
   image->SetRequestedRegion(region);
   image->PropagateRequestedRegion();
@@ -101,13 +108,16 @@ void PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::R
 //
 // Sample an input image at the specified location
 //
-template<class TImage>
-bool SampleImage(const typename TImage::Pointer inPtr, typename TImage::Pointer outPtr,
-    typename TImage::PointType point, unsigned int elemIdx,
-    typename TImage::SizeType patchSize)
+template <class TImage>
+bool
+SampleImage(const typename TImage::Pointer inPtr,
+            typename TImage::Pointer       outPtr,
+            typename TImage::PointType     point,
+            unsigned int                   elemIdx,
+            typename TImage::SizeType      patchSize)
 {
   typename TImage::IndexType index, outIndex;
-  bool canTransform = inPtr->TransformPhysicalPointToIndex(point, index);
+  bool                       canTransform = inPtr->TransformPhysicalPointToIndex(point, index);
   if (canTransform)
   {
     outIndex[0] = 0;
@@ -128,7 +138,6 @@ bool SampleImage(const typename TImage::Pointer inPtr, typename TImage::Pointer
     }
   }
   return false;
-
 }
 
 } // end namespace tf
diff --git a/include/otbTensorflowCommon.h b/include/otbTensorflowCommon.h
index fbd7281035185c2acc3a56dac3850a23d76280df..a012173c66ec9c4a10ae0f5f7df9908f01dd4833 100644
--- a/include/otbTensorflowCommon.h
+++ b/include/otbTensorflowCommon.h
@@ -22,38 +22,49 @@
 #include "itkImageRegionConstIterator.h"
 #include "itkImageRegionIterator.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 // Environment variable for the number of sources in "Multisource" applications
 extern const std::string ENV_VAR_NAME_NSOURCES;
 
 // Get the environment variable as int
-int GetEnvironmentVariableAsInt(const std::string & variableName);
+int
+GetEnvironmentVariableAsInt(const std::string & variableName);
 
 // Get the value (as int) of the environment variable ENV_VAR_NAME_NSOURCES
-int GetNumberOfSources();
+int
+GetNumberOfSources();
 
 // This function copy a patch from an input image to an output image
-template<class TImage>
-void CopyPatch(typename TImage::Pointer inputImg, typename TImage::IndexType & inputPatchIndex,
-    typename TImage::Pointer outputImg, typename TImage::IndexType & outputPatchIndex,
-    typename TImage::SizeType patchSize);
+template <class TImage>
+void
+CopyPatch(typename TImage::Pointer     inputImg,
+          typename TImage::IndexType & inputPatchIndex,
+          typename TImage::Pointer     outputImg,
+          typename TImage::IndexType & outputPatchIndex,
+          typename TImage::SizeType    patchSize);
 
 // Get image infos
-template<class TImage>
-void GetImageInfo(typename TImage::Pointer image,
-    unsigned int & sizex, unsigned int & sizey, unsigned int & nBands);
+template <class TImage>
+void
+GetImageInfo(typename TImage::Pointer image, unsigned int & sizex, unsigned int & sizey, unsigned int & nBands);
 
 // Propagate the requested region in the image
-template<class TImage>
-void PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::RegionType & region);
+template <class TImage>
+void
+PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::RegionType & region);
 
 // Sample an input image at the specified location
-template<class TImage>
-bool SampleImage(const typename TImage::Pointer inPtr, typename TImage::Pointer outPtr,
-    typename TImage::PointType point, unsigned int elemIdx,
-    typename TImage::SizeType patchSize);
+template <class TImage>
+bool
+SampleImage(const typename TImage::Pointer inPtr,
+            typename TImage::Pointer       outPtr,
+            typename TImage::PointType     point,
+            unsigned int                   elemIdx,
+            typename TImage::SizeType      patchSize);
 
 } // end namespace tf
 } // end namespace otb
diff --git a/include/otbTensorflowCopyUtils.cxx b/include/otbTensorflowCopyUtils.cxx
index e969051102c6babaaa2aa3433dd25bafb64af18e..3aa117691c96993574afc27a085cf64a0140524e 100644
--- a/include/otbTensorflowCopyUtils.cxx
+++ b/include/otbTensorflowCopyUtils.cxx
@@ -187,7 +187,13 @@ GetNumberOfChannelsFromShapeProto(const tensorflow::TensorShapeProto & proto)
     return 1;
   // any other dimension: we assume that the last dimension represent the
   // number of channels in the output image.
-  return proto.dim(nDims - 1).size();
+  tensorflow::int64 nbChannels = proto.dim(nDims - 1).size();
+  if (nbChannels < 1)
+    itkGenericExceptionMacro("Cannot determine the size of the last dimension of one output tensor. Dimension index is "
+                             << (nDims - 1)
+                             << ". Please rewrite your model with output tensors having a shape where the last "
+                                "dimension is a constant value.");
+  return nbChannels;
 }
 
 //
@@ -218,9 +224,10 @@ CopyTensorToImageRegion(const tensorflow::Tensor &          tensor,
   const tensorflow::int64 nElmI = bufferRegion.GetNumberOfPixels() * outputDimSize_C;
   if (nElmI != nElmT)
   {
-    itkGenericExceptionMacro("Number of elements in the tensor is " << nElmT 
-                             << " but image outputRegion has " << nElmI << " values to fill.\n"
-                             << "Buffer region is: \n" << bufferRegion << "\n"
+    itkGenericExceptionMacro("Number of elements in the tensor is "
+                             << nElmT << " but image outputRegion has " << nElmI << " values to fill.\n"
+                             << "Buffer region is: \n"
+                             << bufferRegion << "\n"
                              << "Number of components in the output image: " << outputDimSize_C << "\n"
                              << "Tensor shape: " << PrintTensorShape(tensor.shape()) << "\n"
                              << "Please check the input(s) field of view (FOV), "
@@ -347,7 +354,7 @@ ValueToTensor(std::string value)
   }
 
   // Create tensor
-  tensorflow::TensorShape shape({values.size()});
+  tensorflow::TensorShape shape({ values.size() });
   tensorflow::Tensor      out(tensorflow::DT_BOOL, shape);
   if (is_digit)
   {
@@ -409,7 +416,7 @@ ValueToTensor(std::string value)
     }
     idx++;
   }
-  otbLogMacro(Debug,  << "Returning tensor: "<< out.DebugString());
+  otbLogMacro(Debug, << "Returning tensor: " << out.DebugString());
 
   return out;
 }
diff --git a/include/otbTensorflowCopyUtils.h b/include/otbTensorflowCopyUtils.h
index 174587913beb5a73f78cbffe614aef23c78c8147..59e1a7443ff78511b42d6d67c74023cd49864235 100644
--- a/include/otbTensorflowCopyUtils.h
+++ b/include/otbTensorflowCopyUtils.h
@@ -34,57 +34,94 @@
 #include <string>
 #include <regex>
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 // Generate a string with TensorShape infos
-std::string PrintTensorShape(const tensorflow::TensorShape & shp);
+std::string
+PrintTensorShape(const tensorflow::TensorShape & shp);
 
 // Generate a string with tensor infos
-std::string PrintTensorInfos(const tensorflow::Tensor & tensor);
+std::string
+PrintTensorInfos(const tensorflow::Tensor & tensor);
 
 // Create a tensor with the good datatype
-template<class TImage>
-tensorflow::Tensor CreateTensor(tensorflow::TensorShape & shape);
+template <class TImage>
+tensorflow::Tensor
+CreateTensor(tensorflow::TensorShape & shape);
 
 // Populate a tensor with the buffered region of a vector image
-template<class TImage>
-void PopulateTensorFromBufferedVectorImage(const typename TImage::Pointer bufferedimagePtr, tensorflow::Tensor & out_tensor);
+template <class TImage>
+void
+PopulateTensorFromBufferedVectorImage(const typename TImage::Pointer bufferedimagePtr, tensorflow::Tensor & out_tensor);
 
 // Populate the buffered region of a vector image with a given tensor's values
-template<class TImage>
-void TensorToImageBuffer(const tensorflow::Tensor & tensor, typename TImage::Pointer & image);
+template <class TImage>
+void
+TensorToImageBuffer(const tensorflow::Tensor & tensor, typename TImage::Pointer & image);
 
 // Recopy an VectorImage region into a 4D-shaped tensorflow::Tensor ({-1, sz_y, sz_x, sz_bands})
-template<class TImage, class TValueType=typename TImage::InternalPixelType>
-void RecopyImageRegionToTensor(const typename TImage::Pointer inputPtr,  const typename TImage::RegionType & region, tensorflow::Tensor & tensor, unsigned int elemIdx);
+template <class TImage, class TValueType = typename TImage::InternalPixelType>
+void
+RecopyImageRegionToTensor(const typename TImage::Pointer      inputPtr,
+                          const typename TImage::RegionType & region,
+                          tensorflow::Tensor &                tensor,
+                          unsigned int                        elemIdx);
 
 // Recopy an VectorImage region into a 4D-shaped tensorflow::Tensor (TValueType-agnostic function)
-template<class TImage>
-void RecopyImageRegionToTensorWithCast(const typename TImage::Pointer inputPtr,  const typename TImage::RegionType & region, tensorflow::Tensor & tensor, unsigned int elemIdx);
+template <class TImage>
+void
+RecopyImageRegionToTensorWithCast(const typename TImage::Pointer      inputPtr,
+                                  const typename TImage::RegionType & region,
+                                  tensorflow::Tensor &                tensor,
+                                  unsigned int                        elemIdx);
 
 // Sample a centered patch
-template<class TImage>
-void SampleCenteredPatch(const typename TImage::Pointer inputPtr, const typename TImage::IndexType & centerIndex, const typename TImage::SizeType & patchSize, tensorflow::Tensor & tensor, unsigned int elemIdx);
-template<class TImage>
-void SampleCenteredPatch(const typename TImage::Pointer inputPtr, const typename TImage::PointType & centerCoord, const typename TImage::SizeType & patchSize, tensorflow::Tensor & tensor, unsigned int elemIdx);
+template <class TImage>
+void
+SampleCenteredPatch(const typename TImage::Pointer     inputPtr,
+                    const typename TImage::IndexType & centerIndex,
+                    const typename TImage::SizeType &  patchSize,
+                    tensorflow::Tensor &               tensor,
+                    unsigned int                       elemIdx);
+template <class TImage>
+void
+SampleCenteredPatch(const typename TImage::Pointer     inputPtr,
+                    const typename TImage::PointType & centerCoord,
+                    const typename TImage::SizeType &  patchSize,
+                    tensorflow::Tensor &               tensor,
+                    unsigned int                       elemIdx);
 
 // Return the number of channels from the TensorflowShapeProto
-tensorflow::int64 GetNumberOfChannelsFromShapeProto(const tensorflow::TensorShapeProto & proto);
+tensorflow::int64
+GetNumberOfChannelsFromShapeProto(const tensorflow::TensorShapeProto & proto);
 
 // Copy a tensor into the image region
-template<class TImage, class TValueType>
-void CopyTensorToImageRegion(const tensorflow::Tensor & tensor, typename TImage::Pointer outputPtr, const typename TImage::RegionType & region, int & channelOffset);
+template <class TImage, class TValueType>
+void
+CopyTensorToImageRegion(const tensorflow::Tensor &          tensor,
+                        typename TImage::Pointer            outputPtr,
+                        const typename TImage::RegionType & region,
+                        int &                               channelOffset);
 
 // Copy a tensor into the image region (TValueType-agnostic version)
-template<class TImage>
-void CopyTensorToImageRegion(const tensorflow::Tensor & tensor, const typename TImage::RegionType & bufferRegion, typename TImage::Pointer outputPtr, const typename TImage::RegionType & outputRegion, int & channelOffset);
+template <class TImage>
+void
+CopyTensorToImageRegion(const tensorflow::Tensor &          tensor,
+                        const typename TImage::RegionType & bufferRegion,
+                        typename TImage::Pointer            outputPtr,
+                        const typename TImage::RegionType & outputRegion,
+                        int &                               channelOffset);
 
 // Convert a value into a tensor
-tensorflow::Tensor ValueToTensor(std::string value);
+tensorflow::Tensor
+ValueToTensor(std::string value);
 
 // Convert an expression into a dict
-std::pair<std::string, tensorflow::Tensor> ExpressionToTensor(std::string expression);
+std::pair<std::string, tensorflow::Tensor>
+ExpressionToTensor(std::string expression);
 
 } // end namespace tf
 } // end namespace otb
diff --git a/include/otbTensorflowDataTypeBridge.cxx b/include/otbTensorflowDataTypeBridge.cxx
index a510cb4ea5ecab0c1505e690d79922b2299ddc0d..71fcd8c6beca73f611aa919b237c9d719f6ec4a7 100644
--- a/include/otbTensorflowDataTypeBridge.cxx
+++ b/include/otbTensorflowDataTypeBridge.cxx
@@ -11,14 +11,17 @@
 =========================================================================*/
 #include "otbTensorflowDataTypeBridge.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 //
 // returns the datatype used by tensorflow
 //
-template<class Type>
-tensorflow::DataType GetTensorflowDataType()
+template <class Type>
+tensorflow::DataType
+GetTensorflowDataType()
 {
   if (typeid(Type) == typeid(bool))
   {
@@ -74,8 +77,9 @@ tensorflow::DataType GetTensorflowDataType()
 //
 // Return true if the tensor data type is correct
 //
-template<class Type>
-bool HasSameDataType(const tensorflow::Tensor & tensor)
+template <class Type>
+bool
+HasSameDataType(const tensorflow::Tensor & tensor)
 {
   return GetTensorflowDataType<Type>() == tensor.dtype();
 }
@@ -83,7 +87,8 @@ bool HasSameDataType(const tensorflow::Tensor & tensor)
 //
 // Return the datatype as string
 //
-tensorflow::string GetDataTypeAsString(tensorflow::DataType dt)
+tensorflow::string
+GetDataTypeAsString(tensorflow::DataType dt)
 {
   return tensorflow::DataTypeString(dt);
 }
diff --git a/include/otbTensorflowDataTypeBridge.h b/include/otbTensorflowDataTypeBridge.h
index af6be18d335761b7261e6b8c7288cf9b07122bc8..e815dafcba8bbb408a843e3ed63aa9a5a8b8dfe3 100644
--- a/include/otbTensorflowDataTypeBridge.h
+++ b/include/otbTensorflowDataTypeBridge.h
@@ -16,19 +16,24 @@
 #include "tensorflow/core/framework/types.h"
 #include "tensorflow/core/framework/tensor.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 // returns the datatype used by tensorflow
-template<class Type>
-tensorflow::DataType GetTensorflowDataType();
+template <class Type>
+tensorflow::DataType
+GetTensorflowDataType();
 
 // Return true if the tensor data type is correct
-template<class Type>
-bool HasSameDataType(const tensorflow::Tensor & tensor);
+template <class Type>
+bool
+HasSameDataType(const tensorflow::Tensor & tensor);
 
 // Return datatype as string
-tensorflow::string GetDataTypeAsString(tensorflow::DataType dt);
+tensorflow::string
+GetDataTypeAsString(tensorflow::DataType dt);
 
 } // end namespace tf
 } // end namespace otb
diff --git a/include/otbTensorflowGraphOperations.cxx b/include/otbTensorflowGraphOperations.cxx
index d40c4da6a2f49a86cb28069094b4ab9f0cc5b231..b8e0920e0b04d6cb1dc30f5bf3e8fcbe30d29b1b 100644
--- a/include/otbTensorflowGraphOperations.cxx
+++ b/include/otbTensorflowGraphOperations.cxx
@@ -87,19 +87,39 @@ GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::Ten
                     std::vector<std::string> &                                           tensorsNames,
                     std::vector<std::string> &                                           layerNames,
                     std::vector<tensorflow::TensorShapeProto> &                          shapes,
-                    std::vector<tensorflow::DataType> &                                  dataTypes)
+                    std::vector<tensorflow::DataType> &                                  dataTypes,
+                    std::vector<std::string>                                             blackList = {})
 {
-  // Allocation
+  // Clear shapes, datatypes, and layers names
   shapes.clear();
   dataTypes.clear();
   layerNames.clear();
 
   // Debug infos
-  otbLogMacro(Debug, << "Nodes contained in the model: ");
+  otbLogMacro(Debug, << "Nodes contained in the model:");
   for (auto const & layer : layers)
     otbLogMacro(Debug, << "\t" << layer.first);
 
-  // When the user doesn't specify output.names, m_OutputTensors defaults to an empty list that we can not iterate over.
+  // Sort nodes names alphabetically
+  std::size_t                              k = 0;             // Keep track of the indices in the protobuf map
+  std::vector<std::pair<std::string, int>> sortedLayersNames; // vector of (name, index) pairs
+  for (auto const & layer : layers)
+  {
+    // We exclude names, if any in the blacklist. Useful to avoid confusion
+    // between user placeholders (aka constants) and input tensors.
+    // Not used for output tensors.
+    if (std::count(blackList.begin(), blackList.end(), layer.first) == 0)
+    {
+      sortedLayersNames.emplace_back(layer.first, k);
+    }
+    k++;
+  }
+  std::sort(sortedLayersNames.begin(), sortedLayersNames.end());
+  otbLogMacro(Debug, << "Sorted (alphabetically) nodes names:");
+  for (auto const & name : sortedLayersNames)
+    otbLogMacro(Debug, << "\t" << name.first << " (index: " << name.second << ")");
+
+  // When the user doesn't specify output.names, tensorsNames defaults to an empty list that we can not iterate over.
   // We change it to a list containing an empty string [""]
   if (tensorsNames.size() == 0)
   {
@@ -108,8 +128,8 @@ GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::Ten
   }
 
   // Next, we fill layerNames
-  int k = 0; // counter used for tensorsNames
-  for (auto const & name: tensorsNames)
+  k = 0; // counter used for tensorsNames
+  for (auto const & name : tensorsNames)
   {
     bool                   found = false;
     tensorflow::TensorInfo tensor_info;
@@ -118,18 +138,24 @@ GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::Ten
     if (name.size() == 0)
     {
       found = true;
-      // select the k-th element of `layers`
-      auto it = layers.begin();
-      std::advance(it, k);
+      // select the k-th element of `layers` names, alphabeticallly sorted
+      const std::string kthName = sortedLayersNames[k].first;
+      auto              it = layers.begin();
+      const int         kthIndex = sortedLayersNames[k].second;
+      std::advance(it, kthIndex);
       layerNames.push_back(it->second.name());
       tensor_info = it->second;
-      otbLogMacro(Debug, << "Input " << k << " corresponds to " << it->first << " in the model");
+      if (sortedLayersNames.size() > 1)
+        otbLogMacro(Warning,
+                    << "The provided tensor name is empty, and there are multiple available candidates in the graph. "
+                       "Available tensors names from the graph have been sorted alphabetically, and the tensor #"
+                    << kthIndex << " (aka \"" << it->first << "\") will be used. ");
     }
 
     // Else, if the user specified the placeholdername, find the corresponding layer inside the model
     else
     {
-      otbLogMacro(Debug, << "Searching for corresponding node of: " << name << "... ");
+      otbLogMacro(Debug, << "Searching for corresponding node of \"" << name << "\"... ");
       for (auto const & layer : layers)
       {
         // layer is a pair (name, tensor_info)
@@ -143,7 +169,7 @@ GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::Ten
           otbLogMacro(Debug, << "Found: " << layer.second.name() << " in the model");
         }
       } // next layer
-    } // end else
+    }   // end else
 
     k += 1;
 
@@ -178,7 +204,7 @@ PrintNodeAttributes(const tensorflow::GraphDef & graph, const std::vector<std::s
     tensorflow::NodeDef node = graph.node(i);
     std::cout << i << "\t" << node.name() << std::endl;
 
-    for (auto const & name: nodesNames)
+    for (auto const & name : nodesNames)
     {
       if (node.name().compare(name) == 0)
       {
diff --git a/include/otbTensorflowGraphOperations.h b/include/otbTensorflowGraphOperations.h
index 6ad4a4e29880e10a5f10cbc4f1e945db9ca3c6a6..dbfcaa8e57efe508850f0ef77e108ba7478c0538 100644
--- a/include/otbTensorflowGraphOperations.h
+++ b/include/otbTensorflowGraphOperations.h
@@ -27,27 +27,37 @@
 // OTB log
 #include "otbMacro.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 // Load SavedModel variables
-void RestoreModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle);
+void
+RestoreModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle);
 
 // Save SavedModel variables
-void SaveModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle);
+void
+SaveModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle);
 
 // Load SavedModel
-void LoadModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle, std::vector<std::string> tagList);
+void
+LoadModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle, std::vector<std::string> tagList);
 
 // Get the following attributes of the specified tensors (by name) of a graph:
 // - shape
 // - datatype
 // Here we assume that the node's output is a tensor
-void GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::TensorInfo> layers, std::vector<std::string> & tensorsNames,
-    std::vector<tensorflow::TensorShapeProto> & shapes, std::vector<tensorflow::DataType> & dataTypes);
+void
+GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::TensorInfo> layers,
+                    std::vector<std::string> &                                           tensorsNames,
+                    std::vector<tensorflow::TensorShapeProto> &                          shapes,
+                    std::vector<tensorflow::DataType> &                                  dataTypes,
+                    std::vector<std::string>                                             blackList);
 
 // Print a lot of stuff about the specified nodes of the graph
-void PrintNodeAttributes(const tensorflow::GraphDef & graph, const std::vector<std::string> & nodesNames);
+void
+PrintNodeAttributes(const tensorflow::GraphDef & graph, const std::vector<std::string> & nodesNames);
 
 } // end namespace tf
 } // end namespace otb
diff --git a/include/otbTensorflowMultisourceModelBase.h b/include/otbTensorflowMultisourceModelBase.h
index d10648ea00afc6fc624fc9ffac8e19bbdffdf4f2..6c943d1f1e777f8f7d26fc6be7f529b34535b5c7 100644
--- a/include/otbTensorflowMultisourceModelBase.h
+++ b/include/otbTensorflowMultisourceModelBase.h
@@ -65,34 +65,32 @@ namespace otb
  *
  * \ingroup OTBTensorflow
  */
-template <class TInputImage, class TOutputImage=TInputImage>
-class ITK_EXPORT TensorflowMultisourceModelBase :
-public itk::ImageToImageFilter<TInputImage, TOutputImage>
+template <class TInputImage, class TOutputImage = TInputImage>
+class ITK_EXPORT TensorflowMultisourceModelBase : public itk::ImageToImageFilter<TInputImage, TOutputImage>
 {
 
 public:
-
   /** Standard class typedefs. */
-  typedef TensorflowMultisourceModelBase             Self;
+  typedef TensorflowMultisourceModelBase                     Self;
   typedef itk::ImageToImageFilter<TInputImage, TOutputImage> Superclass;
-  typedef itk::SmartPointer<Self>                    Pointer;
-  typedef itk::SmartPointer<const Self>              ConstPointer;
+  typedef itk::SmartPointer<Self>                            Pointer;
+  typedef itk::SmartPointer<const Self>                      ConstPointer;
 
   /** Run-time type information (and related methods). */
   itkTypeMacro(TensorflowMultisourceModelBase, itk::ImageToImageFilter);
 
   /** Images typedefs */
-  typedef TInputImage                                ImageType;
-  typedef typename TInputImage::Pointer              ImagePointerType;
-  typedef typename TInputImage::PixelType            PixelType;
-  typedef typename TInputImage::InternalPixelType    InternalPixelType;
-  typedef typename TInputImage::IndexType            IndexType;
-  typedef typename TInputImage::IndexValueType       IndexValueType;
-  typedef typename TInputImage::PointType            PointType;
-  typedef typename TInputImage::SizeType             SizeType;
-  typedef typename TInputImage::SizeValueType        SizeValueType;
-  typedef typename TInputImage::SpacingType          SpacingType;
-  typedef typename TInputImage::RegionType           RegionType;
+  typedef TInputImage                             ImageType;
+  typedef typename TInputImage::Pointer           ImagePointerType;
+  typedef typename TInputImage::PixelType         PixelType;
+  typedef typename TInputImage::InternalPixelType InternalPixelType;
+  typedef typename TInputImage::IndexType         IndexType;
+  typedef typename TInputImage::IndexValueType    IndexValueType;
+  typedef typename TInputImage::PointType         PointType;
+  typedef typename TInputImage::SizeType          SizeType;
+  typedef typename TInputImage::SizeValueType     SizeValueType;
+  typedef typename TInputImage::SpacingType       SpacingType;
+  typedef typename TInputImage::RegionType        RegionType;
 
   /** Typedefs for parameters */
   typedef std::pair<std::string, tensorflow::Tensor> DictElementType;
@@ -104,15 +102,26 @@ public:
   typedef std::vector<tensorflow::Tensor>            TensorListType;
 
   /** Set and Get the Tensorflow session and graph */
-  void SetSavedModel(tensorflow::SavedModelBundle * saved_model) {m_SavedModel = saved_model;}
-  tensorflow::SavedModelBundle * GetSavedModel() {return m_SavedModel;}
+  void
+  SetSavedModel(tensorflow::SavedModelBundle * saved_model)
+  {
+    m_SavedModel = saved_model;
+  }
+  tensorflow::SavedModelBundle *
+  GetSavedModel()
+  {
+    return m_SavedModel;
+  }
 
   /** Get the SignatureDef */
-  tensorflow::SignatureDef GetSignatureDef();
+  tensorflow::SignatureDef
+  GetSignatureDef();
 
   /** Model parameters */
-  void PushBackInputTensorBundle(std::string name, SizeType receptiveField, ImagePointerType image);
-  void PushBackOuputTensorBundle(std::string name, SizeType expressionField);
+  void
+  PushBackInputTensorBundle(std::string name, SizeType receptiveField, ImagePointerType image);
+  void
+  PushBackOuputTensorBundle(std::string name, SizeType expressionField);
 
   /** Input placeholders names */
   itkSetMacro(InputPlaceholders, StringList);
@@ -131,8 +140,16 @@ public:
   itkGetMacro(OutputExpressionFields, SizeListType);
 
   /** User placeholders */
-  void SetUserPlaceholders(const DictType & dict) {m_UserPlaceholders = dict;}
-  DictType GetUserPlaceholders() {return m_UserPlaceholders;}
+  void
+  SetUserPlaceholders(const DictType & dict)
+  {
+    m_UserPlaceholders = dict;
+  }
+  DictType
+  GetUserPlaceholders()
+  {
+    return m_UserPlaceholders;
+  }
 
   /** Target nodes names */
   itkSetMacro(TargetNodesNames, StringList);
@@ -144,40 +161,47 @@ public:
   itkGetMacro(InputTensorsShapes, TensorShapeProtoList);
   itkGetMacro(OutputTensorsShapes, TensorShapeProtoList);
 
-  virtual void GenerateOutputInformation();
+  virtual void
+  GenerateOutputInformation();
 
 protected:
   TensorflowMultisourceModelBase();
-  virtual ~TensorflowMultisourceModelBase() {};
+  virtual ~TensorflowMultisourceModelBase(){};
 
-  virtual std::stringstream GenerateDebugReport(DictType & inputs);
+  virtual std::stringstream
+  GenerateDebugReport(DictType & inputs);
 
-  virtual void RunSession(DictType & inputs, TensorListType & outputs);
+  virtual void
+  RunSession(DictType & inputs, TensorListType & outputs);
 
 private:
-  TensorflowMultisourceModelBase(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelBase(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
   // Tensorflow graph and session
-  tensorflow::SavedModelBundle * m_SavedModel;          // The TensorFlow model
+  tensorflow::SavedModelBundle * m_SavedModel; // The TensorFlow model
 
   // Model parameters
-  StringList                 m_InputPlaceholders;       // Input placeholders names
-  SizeListType               m_InputReceptiveFields;    // Input receptive fields
-  StringList                 m_OutputTensors;           // Output tensors names
-  SizeListType               m_OutputExpressionFields;  // Output expression fields
-  DictType                   m_UserPlaceholders;        // User placeholders
-  StringList                 m_TargetNodesNames;        // User nodes target
+  StringList   m_InputPlaceholders;      // Input placeholders names
+  SizeListType m_InputReceptiveFields;   // Input receptive fields
+  StringList   m_OutputTensors;          // Output tensors names
+  SizeListType m_OutputExpressionFields; // Output expression fields
+  DictType     m_UserPlaceholders;       // User placeholders
+  StringList   m_TargetNodesNames;       // User nodes target
 
   // Internal, read-only
-  DataTypeListType           m_InputTensorsDataTypes;   // Input tensors datatype
-  DataTypeListType           m_OutputTensorsDataTypes;  // Output tensors datatype
-  TensorShapeProtoList       m_InputTensorsShapes;      // Input tensors shapes
-  TensorShapeProtoList       m_OutputTensorsShapes;     // Output tensors shapes
+  DataTypeListType     m_InputConstantsDataTypes; // Input constants datatype
+  DataTypeListType     m_InputTensorsDataTypes;   // Input tensors datatype
+  DataTypeListType     m_OutputTensorsDataTypes;  // Output tensors datatype
+  TensorShapeProtoList m_InputConstantsShapes;    // Input constants shapes
+  TensorShapeProtoList m_InputTensorsShapes;      // Input tensors shapes
+  TensorShapeProtoList m_OutputTensorsShapes;     // Output tensors shapes
 
   // Layer names inside the model corresponding to inputs and outputs
-  StringList m_InputLayers;                             // List of input names, as contained in the model
-  StringList m_OutputLayers;                            // List of output names, as contained in the model
+  StringList m_InputConstants; // List of constants names, as contained in the model
+  StringList m_InputLayers;    // List of input names, as contained in the model
+  StringList m_OutputLayers;   // List of output names, as contained in the model
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelBase.hxx b/include/otbTensorflowMultisourceModelBase.hxx
index 752b7c9d61a861d260dc4dfac89efb66e772a42b..ba4612627381d01991dbd5c5d050e8c197c1f851 100644
--- a/include/otbTensorflowMultisourceModelBase.hxx
+++ b/include/otbTensorflowMultisourceModelBase.hxx
@@ -18,28 +18,26 @@ namespace otb
 {
 
 template <class TInputImage, class TOutputImage>
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::TensorflowMultisourceModelBase()
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::TensorflowMultisourceModelBase()
 {
-  Superclass::SetCoordinateTolerance(itk::NumericTraits<double>::max() );
-  Superclass::SetDirectionTolerance(itk::NumericTraits<double>::max() );
-  
+  Superclass::SetCoordinateTolerance(itk::NumericTraits<double>::max());
+  Superclass::SetDirectionTolerance(itk::NumericTraits<double>::max());
+
   m_SavedModel = NULL;
 }
 
 template <class TInputImage, class TOutputImage>
 tensorflow::SignatureDef
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::GetSignatureDef()
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::GetSignatureDef()
 {
-  auto signatures = this->GetSavedModel()->GetSignatures();
+  auto                     signatures = this->GetSavedModel()->GetSignatures();
   tensorflow::SignatureDef signature_def;
 
   if (signatures.size() == 0)
   {
-    itkExceptionMacro("There are no available signatures for this tag-set. \n" <<
-                      "Please check which tag-set to use by running "<<
-                      "`saved_model_cli show --dir your_model_dir --all`");
+    itkExceptionMacro("There are no available signatures for this tag-set. \n"
+                      << "Please check which tag-set to use by running "
+                      << "`saved_model_cli show --dir your_model_dir --all`");
   }
 
   // If serving_default key exists (which is the default for TF saved model), choose it as signature
@@ -57,8 +55,9 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::PushBackInputTensorBundle(std::string placeholder, SizeType receptiveField, ImagePointerType image)
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::PushBackInputTensorBundle(std::string      placeholder,
+                                                                                     SizeType         receptiveField,
+                                                                                     ImagePointerType image)
 {
   Superclass::PushBackInput(image);
   m_InputReceptiveFields.push_back(receptiveField);
@@ -67,8 +66,7 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
 
 template <class TInputImage, class TOutputImage>
 std::stringstream
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::GenerateDebugReport(DictType & inputs)
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::GenerateDebugReport(DictType & inputs)
 {
   // Create a debug report
   std::stringstream debugReport;
@@ -79,18 +77,18 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
   debugReport << "Output image buffered region: " << outputReqRegion << "\n";
 
   // Describe inputs
-  for (unsigned int i = 0 ; i < this->GetNumberOfInputs() ; i++)
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); i++)
   {
-    const ImagePointerType inputPtr = const_cast<TInputImage*>(this->GetInput(i));
-    const RegionType reqRegion = inputPtr->GetRequestedRegion();
+    const ImagePointerType inputPtr = const_cast<TInputImage *>(this->GetInput(i));
+    const RegionType       reqRegion = inputPtr->GetRequestedRegion();
     debugReport << "Input #" << i << ":\n";
     debugReport << "Requested region: " << reqRegion << "\n";
     debugReport << "Tensor \"" << inputs[i].first << "\": " << tf::PrintTensorInfos(inputs[i].second) << "\n";
   }
 
   // Show user placeholders
-  debugReport << "User placeholders:\n" ;
-  for (auto& dict: this->GetUserPlaceholders())
+  debugReport << "User placeholders:\n";
+  for (auto & dict : this->GetUserPlaceholders())
   {
     debugReport << "Tensor \"" << dict.first << "\": " << tf::PrintTensorInfos(dict.second) << "\n" << std::endl;
   }
@@ -101,25 +99,31 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::RunSession(DictType & inputs, TensorListType & outputs)
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::RunSession(DictType & inputs, TensorListType & outputs)
 {
 
-  // Add the user's placeholders
-  std::copy(this->GetUserPlaceholders().begin(), this->GetUserPlaceholders().end(), std::back_inserter(inputs));
-
   // Run the TF session here
   // The session will initialize the outputs
 
-  // `inputs` corresponds to a mapping {name, tensor}, with the name being specified by the user when calling TensorFlowModelServe
-  // we must adapt it to `inputs_new`, that corresponds to a mapping {layerName, tensor}, with the layerName being from the model
+  // `inputs` corresponds to a mapping {name, tensor}, with the name being specified by the user when calling
+  // TensorFlowModelServe we must adapt it to `inputs_new`, that corresponds to a mapping {layerName, tensor}, with the
+  // layerName being from the model
   DictType inputs_new;
-  int k = 0;
-  for (auto& dict: inputs)
+
+  // Add the user's placeholders
+  std::size_t k = 0;
+  for (auto & dict : this->GetUserPlaceholders())
   {
-    DictElementType element = {m_InputLayers[k], dict.second};
-    inputs_new.push_back(element);
-    k+=1;
+    inputs_new.emplace_back(m_InputConstants[k], dict.second);
+    k++;
+  }
+
+  // Add input tensors
+  k = 0;
+  for (auto & dict : inputs)
+  {
+    inputs_new.emplace_back(m_InputLayers[k], dict.second);
+    k += 1;
   }
 
   // Run the session, evaluating our output tensors from the graph
@@ -132,16 +136,18 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
     std::stringstream debugReport = GenerateDebugReport(inputs);
 
     // Throw an exception with the report
-    itkExceptionMacro("Can't run the tensorflow session !\n" <<
-                      "Tensorflow error message:\n" << status.ToString() << "\n"
-                      "OTB Filter debug message:\n" << debugReport.str() );
+    itkExceptionMacro("Can't run the tensorflow session !\n"
+                      << "Tensorflow error message:\n"
+                      << status.ToString()
+                      << "\n"
+                         "OTB Filter debug message:\n"
+                      << debugReport.str());
   }
 }
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::GenerateOutputInformation()
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::GenerateOutputInformation()
 {
 
   // Check that the number of the following is the same
@@ -151,9 +157,9 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
   const unsigned int nbInputs = this->GetNumberOfInputs();
   if (nbInputs != m_InputReceptiveFields.size() || nbInputs != m_InputPlaceholders.size())
   {
-    itkExceptionMacro("Number of input images is " << nbInputs <<
-                      " but the number of input patches size is " << m_InputReceptiveFields.size() <<
-                      " and the number of input tensors names is " << m_InputPlaceholders.size());
+    itkExceptionMacro("Number of input images is "
+                      << nbInputs << " but the number of input patches size is " << m_InputReceptiveFields.size()
+                      << " and the number of input tensors names is " << m_InputPlaceholders.size());
   }
 
   //////////////////////////////////////////////////////////////////////////////////////////
@@ -166,10 +172,30 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
   // and other infos (shapes, dtypes)
   // For example, for output names specified by the user m_OutputTensors = ['s2t', 's2t_pad'],
   // this will return m_OutputLayers = ['PartitionedCall:0', 'PartitionedCall:1']
-  // In case the user hasn't named the output, e.g.  m_OutputTensors = [''],
+  // In case the user hasn't named the output, i.e.  m_OutputTensors = [''],
   // this will return the first output m_OutputLayers = ['PartitionedCall:0']
-  tf::GetTensorAttributes(signaturedef.inputs(), m_InputPlaceholders, m_InputLayers, m_InputTensorsShapes, m_InputTensorsDataTypes);
-  tf::GetTensorAttributes(signaturedef.outputs(), m_OutputTensors, m_OutputLayers, m_OutputTensorsShapes, m_OutputTensorsDataTypes);
+  StringList constantsNames;
+  std::transform(m_UserPlaceholders.begin(),
+                 m_UserPlaceholders.end(),
+                 std::back_inserter(constantsNames),
+                 [](const DictElementType & p) { return p.first; });
+  if (m_UserPlaceholders.size() > 0)
+  {
+    // Avoid the unnecessary warning when no placeholder is fed
+    tf::GetTensorAttributes(signaturedef.inputs(),
+                            constantsNames,
+                            m_InputConstants,
+                            m_InputConstantsShapes,
+                            m_InputConstantsDataTypes);
+  }
+  tf::GetTensorAttributes(signaturedef.inputs(),
+                          m_InputPlaceholders,
+                          m_InputLayers,
+                          m_InputTensorsShapes,
+                          m_InputTensorsDataTypes,
+                          constantsNames);
+  tf::GetTensorAttributes(
+    signaturedef.outputs(), m_OutputTensors, m_OutputLayers, m_OutputTensorsShapes, m_OutputTensorsDataTypes);
 }
 
 
diff --git a/include/otbTensorflowMultisourceModelFilter.h b/include/otbTensorflowMultisourceModelFilter.h
index 36d781dd1519b964adce741a8adbcb1385b0c729..bdf9a02d0b00e9dbc228f3a0401ac8dab4c49e32 100644
--- a/include/otbTensorflowMultisourceModelFilter.h
+++ b/include/otbTensorflowMultisourceModelFilter.h
@@ -80,12 +80,10 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage, class TOutputImage>
-class ITK_EXPORT TensorflowMultisourceModelFilter :
-public TensorflowMultisourceModelBase<TInputImage, TOutputImage>
+class ITK_EXPORT TensorflowMultisourceModelFilter : public TensorflowMultisourceModelBase<TInputImage, TOutputImage>
 {
 
 public:
-
   /** Standard class typedefs. */
   typedef TensorflowMultisourceModelFilter                          Self;
   typedef TensorflowMultisourceModelBase<TInputImage, TOutputImage> Superclass;
@@ -99,16 +97,16 @@ public:
   itkTypeMacro(TensorflowMultisourceModelFilter, TensorflowMultisourceModelBase);
 
   /** Images typedefs */
-  typedef typename Superclass::ImageType           ImageType;
-  typedef typename Superclass::ImagePointerType    ImagePointerType;
-  typedef typename Superclass::PixelType           PixelType;
-  typedef typename Superclass::IndexType           IndexType;
-  typedef typename IndexType::IndexValueType       IndexValueType;
-  typedef typename Superclass::PointType           PointType;
-  typedef typename Superclass::SizeType            SizeType;
-  typedef typename SizeType::SizeValueType         SizeValueType;
-  typedef typename Superclass::SpacingType         SpacingType;
-  typedef typename Superclass::RegionType          RegionType;
+  typedef typename Superclass::ImageType        ImageType;
+  typedef typename Superclass::ImagePointerType ImagePointerType;
+  typedef typename Superclass::PixelType        PixelType;
+  typedef typename Superclass::IndexType        IndexType;
+  typedef typename IndexType::IndexValueType    IndexValueType;
+  typedef typename Superclass::PointType        PointType;
+  typedef typename Superclass::SizeType         SizeType;
+  typedef typename SizeType::SizeValueType      SizeValueType;
+  typedef typename Superclass::SpacingType      SpacingType;
+  typedef typename Superclass::RegionType       RegionType;
 
   typedef TOutputImage                             OutputImageType;
   typedef typename TOutputImage::PixelType         OutputPixelType;
@@ -119,12 +117,12 @@ public:
   typedef typename itk::ImageRegionConstIterator<TInputImage>              InputConstIteratorType;
 
   /* Typedefs for parameters */
-  typedef typename Superclass::DictElementType     DictElementType;
-  typedef typename Superclass::DictType            DictType;
-  typedef typename Superclass::StringList          StringList;
-  typedef typename Superclass::SizeListType        SizeListType;
-  typedef typename Superclass::TensorListType      TensorListType;
-  typedef std::vector<float>                       ScaleListType;
+  typedef typename Superclass::DictElementType DictElementType;
+  typedef typename Superclass::DictType        DictType;
+  typedef typename Superclass::StringList      StringList;
+  typedef typename Superclass::SizeListType    SizeListType;
+  typedef typename Superclass::TensorListType  TensorListType;
+  typedef std::vector<float>                   ScaleListType;
 
   itkSetMacro(OutputGridSize, SizeType);
   itkGetMacro(OutputGridSize, SizeType);
@@ -137,34 +135,43 @@ public:
 
 protected:
   TensorflowMultisourceModelFilter();
-  virtual ~TensorflowMultisourceModelFilter() {};
+  virtual ~TensorflowMultisourceModelFilter(){};
 
-  virtual void SmartPad(RegionType& region, const SizeType &patchSize);
-  virtual void SmartShrink(RegionType& region, const SizeType &patchSize);
-  virtual void ImageToExtent(ImageType* image, PointType &extentInf, PointType &extentSup, SizeType &patchSize);
-  virtual bool OutputRegionToInputRegion(const RegionType &outputRegion, RegionType &inputRegion, ImageType* &inputImage);
-  virtual void EnlargeToAlignedRegion(RegionType& region);
+  virtual void
+  SmartPad(RegionType & region, const SizeType & patchSize);
+  virtual void
+  SmartShrink(RegionType & region, const SizeType & patchSize);
+  virtual void
+  ImageToExtent(ImageType * image, PointType & extentInf, PointType & extentSup, SizeType & patchSize);
+  virtual bool
+  OutputRegionToInputRegion(const RegionType & outputRegion, RegionType & inputRegion, ImageType *& inputImage);
+  virtual void
+  EnlargeToAlignedRegion(RegionType & region);
 
-  virtual void GenerateOutputInformation(void);
+  virtual void
+  GenerateOutputInformation(void);
 
-  virtual void GenerateInputRequestedRegion(void);
+  virtual void
+  GenerateInputRequestedRegion(void);
 
-  virtual void GenerateData();
+  virtual void
+  GenerateData();
 
 private:
-  TensorflowMultisourceModelFilter(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelFilter(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  SizeType                   m_OutputGridSize;       // Output grid size
-  bool                       m_ForceOutputGridSize;  // Force output grid size
-  bool                       m_FullyConvolutional;   // Convolution mode
-  float                      m_OutputSpacingScale;   // scaling of the output spacings
+  SizeType m_OutputGridSize;      // Output grid size
+  bool     m_ForceOutputGridSize; // Force output grid size
+  bool     m_FullyConvolutional;  // Convolution mode
+  float    m_OutputSpacingScale;  // scaling of the output spacings
 
   // Internal
-  SpacingType                m_OutputSpacing;     // Output image spacing
-  PointType                  m_OutputOrigin;      // Output image origin
-  SizeType                   m_OutputSize;        // Output image size
-  PixelType                  m_NullPixel;         // Pixel filled with zeros
+  SpacingType m_OutputSpacing; // Output image spacing
+  PointType   m_OutputOrigin;  // Output image origin
+  SizeType    m_OutputSize;    // Output image size
+  PixelType   m_NullPixel;     // Pixel filled with zeros
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelFilter.hxx b/include/otbTensorflowMultisourceModelFilter.hxx
index d208f01a8deb1834b98f64c1230423d44b2cfe7a..3cbb53d92857466d617e5547940c8e42a0ce971e 100644
--- a/include/otbTensorflowMultisourceModelFilter.hxx
+++ b/include/otbTensorflowMultisourceModelFilter.hxx
@@ -18,9 +18,8 @@ namespace otb
 {
 
 template <class TInputImage, class TOutputImage>
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::TensorflowMultisourceModelFilter()
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::TensorflowMultisourceModelFilter()
+{
   m_OutputGridSize.Fill(0);
   m_ForceOutputGridSize = false;
   m_FullyConvolutional = false;
@@ -31,38 +30,37 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
   m_OutputSpacingScale = 1.0f;
 
-  Superclass::SetCoordinateTolerance(itk::NumericTraits<double>::max() );
-  Superclass::SetDirectionTolerance(itk::NumericTraits<double>::max() );
- }
+  Superclass::SetCoordinateTolerance(itk::NumericTraits<double>::max());
+  Superclass::SetDirectionTolerance(itk::NumericTraits<double>::max());
+}
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::SmartPad(RegionType& region, const SizeType &patchSize)
- {
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::SmartPad(RegionType & region, const SizeType & patchSize)
+{
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     const SizeValueType psz = patchSize[dim];
     const SizeValueType rval = 0.5 * psz;
     const SizeValueType lval = psz - rval;
     region.GetModifiableIndex()[dim] -= lval;
     region.GetModifiableSize()[dim] += psz;
-    }
- }
+  }
+}
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::SmartShrink(RegionType& region, const SizeType &patchSize)
- {
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::SmartShrink(RegionType &     region,
+                                                                         const SizeType & patchSize)
+{
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     const SizeValueType psz = patchSize[dim];
     const SizeValueType lval = 0.5 * psz;
     region.GetModifiableIndex()[dim] += lval;
     region.GetModifiableSize()[dim] -= psz - 1;
-    }
- }
+  }
+}
 
 /**
   Compute the input image extent: corners inf and sup.
@@ -70,9 +68,11 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
  */
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::ImageToExtent(ImageType* image, PointType &extentInf, PointType &extentSup, SizeType &patchSize)
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::ImageToExtent(ImageType * image,
+                                                                           PointType & extentInf,
+                                                                           PointType & extentSup,
+                                                                           SizeType &  patchSize)
+{
 
   // Get largest possible region
   RegionType largestPossibleRegion = image->GetLargestPossibleRegion();
@@ -89,13 +89,12 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   PointType imageEnd;
   image->TransformIndexToPhysicalPoint(imageLastIndex, imageEnd);
   image->TransformIndexToPhysicalPoint(imageFirstIndex, imageOrigin);
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     extentInf[dim] = vnl_math_min(imageOrigin[dim], imageEnd[dim]) - 0.5 * image->GetSpacing()[dim];
     extentSup[dim] = vnl_math_max(imageOrigin[dim], imageEnd[dim]) + 0.5 * image->GetSpacing()[dim];
-    }
-
- }
+  }
+}
 
 /**
   Compute the region of the input image which correspond to the given output requested region
@@ -104,9 +103,10 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
  */
 template <class TInputImage, class TOutputImage>
 bool
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::OutputRegionToInputRegion(const RegionType &outputRegion, RegionType &inputRegion, ImageType* &inputImage)
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::OutputRegionToInputRegion(const RegionType & outputRegion,
+                                                                                       RegionType &       inputRegion,
+                                                                                       ImageType *&       inputImage)
+{
 
   // Mosaic Region Start & End (mosaic image index)
   const IndexType outIndexStart = outputRegion.GetIndex();
@@ -115,45 +115,43 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   // Mosaic Region Start & End (geo)
   PointType outPointStart, outPointEnd;
   this->GetOutput()->TransformIndexToPhysicalPoint(outIndexStart, outPointStart);
-  this->GetOutput()->TransformIndexToPhysicalPoint(outIndexEnd  , outPointEnd  );
+  this->GetOutput()->TransformIndexToPhysicalPoint(outIndexEnd, outPointEnd);
 
   // Add the half-width pixel size of the input image
   // and remove the half-width pixel size of the output image
   // (coordinates = pixel center)
   const SpacingType outputSpc = this->GetOutput()->GetSpacing();
   const SpacingType inputSpc = inputImage->GetSpacing();
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
-    const typename SpacingType::ValueType border =
-        0.5 * (inputSpc[dim] - outputSpc[dim]);
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
+    const typename SpacingType::ValueType border = 0.5 * (inputSpc[dim] - outputSpc[dim]);
     if (outPointStart[dim] < outPointEnd[dim])
-      {
+    {
       outPointStart[dim] += border;
-      outPointEnd  [dim] -= border;
-      }
+      outPointEnd[dim] -= border;
+    }
     else
-      {
+    {
       outPointStart[dim] -= border;
-      outPointEnd  [dim] += border;
-      }
+      outPointEnd[dim] += border;
     }
+  }
 
   // Mosaic Region Start & End (input image index)
   IndexType defIndexStart, defIndexEnd;
   inputImage->TransformPhysicalPointToIndex(outPointStart, defIndexStart);
-  inputImage->TransformPhysicalPointToIndex(outPointEnd  , defIndexEnd);
+  inputImage->TransformPhysicalPointToIndex(outPointEnd, defIndexEnd);
 
   // Compute input image region
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     inputRegion.SetIndex(dim, vnl_math_min(defIndexStart[dim], defIndexEnd[dim]));
     inputRegion.SetSize(dim, vnl_math_max(defIndexStart[dim], defIndexEnd[dim]) - inputRegion.GetIndex(dim) + 1);
-    }
+  }
 
   // crop the input requested region at the input's largest possible region
-  return inputRegion.Crop( inputImage->GetLargestPossibleRegion() );
-
- }
+  return inputRegion.Crop(inputImage->GetLargestPossibleRegion());
+}
 
 /*
  * Enlarge the given region to the nearest aligned region.
@@ -161,11 +159,10 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
  */
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::EnlargeToAlignedRegion(RegionType& region)
- {
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::EnlargeToAlignedRegion(RegionType & region)
+{
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     // Get corners
     IndexValueType lower = region.GetIndex(dim);
     IndexValueType upper = lower + region.GetSize(dim);
@@ -177,22 +174,20 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
     // Move corners to aligned positions
     lower -= deltaLo;
     if (deltaUp > 0)
-      {
+    {
       upper += m_OutputGridSize[dim] - deltaUp;
-      }
+    }
 
     // Update region
     region.SetIndex(dim, lower);
     region.SetSize(dim, upper - lower);
-
-    }
- }
+  }
+}
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::GenerateOutputInformation()
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::GenerateOutputInformation()
+{
 
   Superclass::GenerateOutputInformation();
 
@@ -204,8 +199,8 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   // OTBTF assumes that the output image has the following geometric properties:
   // (1) Image origin is the top-left pixel
   // (2) Image pixel spacing has positive x-spacing and negative y-spacing
-  m_OutputSpacing = this->GetInput(0)->GetSpacing();  // GetSpacing() returns abs. spacing
-  m_OutputSpacing[1] *= -1.0;  // Force negative y-spacing
+  m_OutputSpacing = this->GetInput(0)->GetSpacing(); // GetSpacing() returns abs. spacing
+  m_OutputSpacing[1] *= -1.0;                        // Force negative y-spacing
   m_OutputSpacing[0] *= m_OutputSpacingScale;
   m_OutputSpacing[1] *= m_OutputSpacingScale;
 
@@ -214,30 +209,32 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   PointType extentInf, extentSup;
   extentSup.Fill(itk::NumericTraits<double>::max());
   extentInf.Fill(itk::NumericTraits<double>::NonpositiveMin());
-  for (unsigned int imageIndex = 0 ; imageIndex < this->GetNumberOfInputs() ; imageIndex++)
-    {
-    ImageType * currentImage = static_cast<ImageType *>(
-        Superclass::ProcessObject::GetInput(imageIndex) );
+  for (unsigned int imageIndex = 0; imageIndex < this->GetNumberOfInputs(); imageIndex++)
+  {
+    ImageType * currentImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(imageIndex));
 
     // Update output image extent
     PointType currentInputImageExtentInf, currentInputImageExtentSup;
-    ImageToExtent(currentImage, currentInputImageExtentInf, currentInputImageExtentSup, this->GetInputReceptiveFields()[imageIndex]);
-    for(unsigned int dim = 0; dim<ImageType::ImageDimension; ++dim)
-      {
+    ImageToExtent(currentImage,
+                  currentInputImageExtentInf,
+                  currentInputImageExtentSup,
+                  this->GetInputReceptiveFields()[imageIndex]);
+    for (unsigned int dim = 0; dim < ImageType::ImageDimension; ++dim)
+    {
       extentInf[dim] = vnl_math_max(currentInputImageExtentInf[dim], extentInf[dim]);
       extentSup[dim] = vnl_math_min(currentInputImageExtentSup[dim], extentSup[dim]);
-      }
     }
+  }
 
 
   // Set final origin, aligned to the reference image grid.
   // Here we simply get back to the center of the pixel (extents are pixels corners coordinates)
-  m_OutputOrigin[0] =  extentInf[0] + 0.5 * this->GetInput(0)->GetSpacing()[0];
-  m_OutputOrigin[1] =  extentSup[1] - 0.5 * this->GetInput(0)->GetSpacing()[1];
+  m_OutputOrigin[0] = extentInf[0] + 0.5 * this->GetInput(0)->GetSpacing()[0];
+  m_OutputOrigin[1] = extentSup[1] - 0.5 * this->GetInput(0)->GetSpacing()[1];
 
   // Set final size
-  m_OutputSize[0] = std::floor( (extentSup[0] - extentInf[0]) / std::abs(m_OutputSpacing[0]) );
-  m_OutputSize[1] = std::floor( (extentSup[1] - extentInf[1]) / std::abs(m_OutputSpacing[1]) );
+  m_OutputSize[0] = std::floor((extentSup[0] - extentInf[0]) / std::abs(m_OutputSpacing[0]));
+  m_OutputSize[1] = std::floor((extentSup[1] - extentInf[1]) / std::abs(m_OutputSpacing[1]));
 
   // We should take in account one more thing: the expression field. It enlarge slightly the output image extent.
   m_OutputOrigin[0] -= m_OutputSpacing[0] * std::floor(0.5 * this->GetOutputExpressionFields().at(0)[0]);
@@ -247,18 +244,18 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
   // Set output grid size
   if (!m_ForceOutputGridSize)
-    {
+  {
     // Default is the output field of expression
     m_OutputGridSize = this->GetOutputExpressionFields().at(0);
-    }
+  }
 
   // Resize the largestPossibleRegion to be a multiple of the grid size
-  for(unsigned int dim = 0; dim<ImageType::ImageDimension; ++dim)
-    {
+  for (unsigned int dim = 0; dim < ImageType::ImageDimension; ++dim)
+  {
     if (m_OutputGridSize[dim] > m_OutputSize[dim])
       itkGenericExceptionMacro("Output grid size is larger than output image size !");
     m_OutputSize[dim] -= m_OutputSize[dim] % m_OutputGridSize[dim];
-    }
+  }
 
   // Set the largest possible region
   RegionType largestPossibleRegion;
@@ -269,38 +266,39 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   //////////////////////////////////////////////////////////////////////////////////////////
 
   unsigned int outputPixelSize = 0;
-  for (auto& protoShape: this->GetOutputTensorsShapes())
-    {
+  for (auto & protoShape : this->GetOutputTensorsShapes())
+  {
     // Find the number of components
     if (protoShape.dim_size() > 4)
-      {
-      itkExceptionMacro("dim_size=" << protoShape.dim_size() << " currently not supported. "
-          "Keep in mind that output tensors must have 1, 2, 3 or 4 dimensions. "
-          "In the case of 1-dimensional tensor, the first dimension is for the batch, "
-          "and we assume that the output tensor has 1 channel. "
-          "In the case of 2-dimensional tensor, the first dimension is for the batch, "
-          "and the second is the number of components. "
-          "In the case of 3-dimensional tensor, the first dimension is for the batch, "
-          "and other dims are for (x, y). "
-          "In the case of 4-dimensional tensor, the first dimension is for the batch, "
-          "and the second and the third are for (x, y). The last is for the number of "
-          "channels. ");
-      }
+    {
+      itkExceptionMacro("dim_size=" << protoShape.dim_size()
+                                    << " currently not supported. "
+                                       "Keep in mind that output tensors must have 1, 2, 3 or 4 dimensions. "
+                                       "In the case of 1-dimensional tensor, the first dimension is for the batch, "
+                                       "and we assume that the output tensor has 1 channel. "
+                                       "In the case of 2-dimensional tensor, the first dimension is for the batch, "
+                                       "and the second is the number of components. "
+                                       "In the case of 3-dimensional tensor, the first dimension is for the batch, "
+                                       "and other dims are for (x, y). "
+                                       "In the case of 4-dimensional tensor, the first dimension is for the batch, "
+                                       "and the second and the third are for (x, y). The last is for the number of "
+                                       "channels. ");
+    }
     unsigned int nComponents = tf::GetNumberOfChannelsFromShapeProto(protoShape);
     outputPixelSize += nComponents;
-    }
+  }
 
   // Copy input image projection
-  ImageType * inputImage = static_cast<ImageType * >( Superclass::ProcessObject::GetInput(0) );
+  ImageType *       inputImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(0));
   const std::string projectionRef = inputImage->GetProjectionRef();
 
   // Set output image origin/spacing/size/projection
   ImageType * outputPtr = this->GetOutput();
   outputPtr->SetNumberOfComponentsPerPixel(outputPixelSize);
-  outputPtr->SetProjectionRef        ( projectionRef );
-  outputPtr->SetOrigin               ( m_OutputOrigin );
-  outputPtr->SetSignedSpacing        ( m_OutputSpacing );
-  outputPtr->SetLargestPossibleRegion( largestPossibleRegion );
+  outputPtr->SetProjectionRef(projectionRef);
+  outputPtr->SetOrigin(m_OutputOrigin);
+  outputPtr->SetSignedSpacing(m_OutputSpacing);
+  outputPtr->SetLargestPossibleRegion(largestPossibleRegion);
 
   // Set null pixel
   m_NullPixel.SetSize(outputPtr->GetNumberOfComponentsPerPixel());
@@ -312,14 +310,12 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
   itk::EncapsulateMetaData(outputPtr->GetMetaDataDictionary(), MetaDataKey::TileHintX, m_OutputGridSize[0]);
   itk::EncapsulateMetaData(outputPtr->GetMetaDataDictionary(), MetaDataKey::TileHintY, m_OutputGridSize[1]);
-
- }
+}
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::GenerateInputRequestedRegion()
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::GenerateInputRequestedRegion()
+{
   Superclass::GenerateInputRequestedRegion();
 
   // Output requested region
@@ -329,35 +325,37 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   EnlargeToAlignedRegion(requestedRegion);
 
   // For each image, get the requested region
-  for(unsigned int i = 0; i < this->GetNumberOfInputs(); ++i)
-    {
-    ImageType * inputImage = static_cast<ImageType * >( Superclass::ProcessObject::GetInput(i) );
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); ++i)
+  {
+    ImageType * inputImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(i));
 
     // Compute the requested region
     RegionType inRegion;
-    if (!OutputRegionToInputRegion(requestedRegion, inRegion, inputImage) )
-      {
+    if (!OutputRegionToInputRegion(requestedRegion, inRegion, inputImage))
+    {
       // Image does not overlap requested region: set requested region to null
-      otbLogMacro(Debug,  << "Image #" << i << " :\n" << inRegion << " is outside the requested region");
+      otbLogMacro(Debug, << "Image #" << i << " :\n" << inRegion << " is outside the requested region");
       inRegion.GetModifiableIndex().Fill(0);
       inRegion.GetModifiableSize().Fill(0);
-      }
+    }
 
     // Compute the FOV-scale*FOE radius to pad
     SizeType toPad(this->GetInputReceptiveFields().at(i));
-    for(unsigned int dim = 0; dim<ImageType::ImageDimension; ++dim)
-      {
-      int valToPad = 1 + (this->GetOutputExpressionFields().at(0)[dim] - 1) * m_OutputSpacingScale * this->GetInput(0)->GetSpacing()[dim] / this->GetInput(i)->GetSpacing()[dim] ;
+    for (unsigned int dim = 0; dim < ImageType::ImageDimension; ++dim)
+    {
+      int valToPad = 1 + (this->GetOutputExpressionFields().at(0)[dim] - 1) * m_OutputSpacingScale *
+                           this->GetInput(0)->GetSpacing()[dim] / this->GetInput(i)->GetSpacing()[dim];
       if (valToPad > toPad[dim])
-        itkExceptionMacro("The input requested region of source #" << i << " is not consistent (dim "<< dim<< ")." <<
-                          "Please check RF, EF, SF vs physical spacing of your image!" <<
-                          "\nReceptive field: " << this->GetInputReceptiveFields().at(i)[dim] <<
-                          "\nExpression field: " << this->GetOutputExpressionFields().at(0)[dim] <<
-                          "\nScale factor: " << m_OutputSpacingScale <<
-                          "\nReference image spacing: " << this->GetInput(0)->GetSpacing()[dim] <<
-                          "\nImage " << i << " spacing: " << this->GetInput(i)->GetSpacing()[dim]);
+        itkExceptionMacro("The input requested region of source #"
+                          << i << " is not consistent (dim " << dim << ")."
+                          << "Please check RF, EF, SF vs physical spacing of your image!"
+                          << "\nReceptive field: " << this->GetInputReceptiveFields().at(i)[dim]
+                          << "\nExpression field: " << this->GetOutputExpressionFields().at(0)[dim]
+                          << "\nScale factor: " << m_OutputSpacingScale
+                          << "\nReference image spacing: " << this->GetInput(0)->GetSpacing()[dim] << "\nImage " << i
+                          << " spacing: " << this->GetInput(i)->GetSpacing()[dim]);
       toPad[dim] -= valToPad;
-      }
+    }
 
     // Pad with radius
     SmartPad(inRegion, toPad);
@@ -368,30 +366,28 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
     // can be one pixel larger when the input image regions are not physically
     // aligned.
     if (!m_FullyConvolutional)
-      {
+    {
       inRegion.PadByRadius(1);
-      }
+    }
 
     inRegion.Crop(inputImage->GetLargestPossibleRegion());
 
     // Update the requested region
     inputImage->SetRequestedRegion(inRegion);
 
-    } // next image
-
- }
+  } // next image
+}
 
 /**
  * Compute the output image
  */
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::GenerateData()
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::GenerateData()
+{
   // Output pointer and requested region
   typename TOutputImage::Pointer outputPtr = this->GetOutput();
-  const RegionType outputReqRegion = outputPtr->GetRequestedRegion();
+  const RegionType               outputReqRegion = outputPtr->GetRequestedRegion();
 
   // Get the aligned output requested region
   RegionType outputAlignedReqRegion(outputReqRegion);
@@ -404,10 +400,10 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
 
   // Populate input tensors
-  for (unsigned int i = 0 ; i < nInputs ; i++)
-    {
+  for (unsigned int i = 0; i < nInputs; i++)
+  {
     // Input image pointer
-    const ImagePointerType inputPtr = const_cast<TInputImage*>(this->GetInput(i));
+    const ImagePointerType inputPtr = const_cast<TInputImage *>(this->GetInput(i));
 
     // Patch size of tensor #i
     const SizeType inputPatchSize = this->GetInputReceptiveFields().at(i);
@@ -416,13 +412,13 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
     const RegionType reqRegion = inputPtr->GetRequestedRegion();
 
     if (m_FullyConvolutional)
-      {
+    {
       // Shape of input tensor #i
-      tensorflow::int64 sz_n = 1;
-      tensorflow::int64 sz_y = reqRegion.GetSize(1);
-      tensorflow::int64 sz_x = reqRegion.GetSize(0);
-      tensorflow::int64 sz_c = inputPtr->GetNumberOfComponentsPerPixel();
-      tensorflow::TensorShape inputTensorShape({sz_n, sz_y, sz_x, sz_c});
+      tensorflow::int64       sz_n = 1;
+      tensorflow::int64       sz_y = reqRegion.GetSize(1);
+      tensorflow::int64       sz_x = reqRegion.GetSize(0);
+      tensorflow::int64       sz_c = inputPtr->GetNumberOfComponentsPerPixel();
+      tensorflow::TensorShape inputTensorShape({ sz_n, sz_y, sz_x, sz_c });
 
       // Create the input tensor
       tensorflow::Tensor inputTensor(this->GetInputTensorsDataTypes()[i], inputTensorShape);
@@ -433,16 +429,16 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
       // Input is the tensor representing the subset of image
       DictElementType input = { this->GetInputPlaceholders()[i], inputTensor };
       inputs.push_back(input);
-      }
+    }
     else
-      {
+    {
       // Preparing patches
       // Shape of input tensor #i
-      tensorflow::int64 sz_n = outputReqRegion.GetNumberOfPixels();
-      tensorflow::int64 sz_y = inputPatchSize[1];
-      tensorflow::int64 sz_x = inputPatchSize[0];
-      tensorflow::int64 sz_c = inputPtr->GetNumberOfComponentsPerPixel();
-      tensorflow::TensorShape inputTensorShape({sz_n, sz_y, sz_x, sz_c});
+      tensorflow::int64       sz_n = outputReqRegion.GetNumberOfPixels();
+      tensorflow::int64       sz_y = inputPatchSize[1];
+      tensorflow::int64       sz_x = inputPatchSize[0];
+      tensorflow::int64       sz_c = inputPtr->GetNumberOfComponentsPerPixel();
+      tensorflow::TensorShape inputTensorShape({ sz_n, sz_y, sz_x, sz_c });
 
       // Create the input tensor
       tensorflow::Tensor inputTensor(this->GetInputTensorsDataTypes()[i], inputTensorShape);
@@ -450,10 +446,10 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
       // Fill the input tensor.
       // We iterate over points which are located from the index iterator
       // moving through the output image requested region
-      unsigned int elemIndex = 0;
+      unsigned int      elemIndex = 0;
       IndexIteratorType idxIt(outputPtr, outputReqRegion);
       for (idxIt.GoToBegin(); !idxIt.IsAtEnd(); ++idxIt)
-        {
+      {
         // Get the coordinates of the current output pixel
         PointType point;
         outputPtr->TransformIndexToPhysicalPoint(idxIt.GetIndex(), point);
@@ -461,15 +457,15 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
         // Sample the i-th input patch centered on the point
         tf::SampleCenteredPatch<TInputImage>(inputPtr, point, inputPatchSize, inputTensor, elemIndex);
         elemIndex++;
-        }
+      }
 
       // Input is the tensor of patches (aka the batch)
       DictElementType input = { this->GetInputPlaceholders()[i], inputTensor };
       inputs.push_back(input);
 
-      } // mode is not full convolutional
+    } // mode is not full convolutional
 
-    } // next input tensor
+  } // next input tensor
 
   // Run session
   // TODO: see if we print some info about inputs/outputs of the model e.g. m_OutputTensors
@@ -483,26 +479,25 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
   // Get output tensors
   int bandOffset = 0;
-  for (unsigned int i = 0 ; i < outputs.size() ; i++)
-    {
+  for (unsigned int i = 0; i < outputs.size(); i++)
+  {
     // The offset (i.e. the starting index of the channel for the output tensor) is updated
     // during this call
-    // TODO: implement a generic strategy enabling expression field copy in patch-based mode (see tf::CopyTensorToImageRegion)
+    // TODO: implement a generic strategy enabling expression field copy in patch-based mode (see
+    // tf::CopyTensorToImageRegion)
     try
-      {
-      tf::CopyTensorToImageRegion<TOutputImage> (outputs[i],
-          outputAlignedReqRegion, outputPtr, outputReqRegion, bandOffset);
-      }
-    catch( itk::ExceptionObject & err )
-      {
+    {
+      tf::CopyTensorToImageRegion<TOutputImage>(
+        outputs[i], outputAlignedReqRegion, outputPtr, outputReqRegion, bandOffset);
+    }
+    catch (itk::ExceptionObject & err)
+    {
       std::stringstream debugMsg = this->GenerateDebugReport(inputs);
       itkExceptionMacro("Error occurred during tensor to image conversion.\n"
-          << "Context: " << debugMsg.str()
-          << "Error:" << err);
-      }
+                        << "Context: " << debugMsg.str() << "Error:" << err);
     }
-
- }
+  }
+}
 
 
 } // end namespace otb
diff --git a/include/otbTensorflowMultisourceModelLearningBase.h b/include/otbTensorflowMultisourceModelLearningBase.h
index 0663f17a3f6367d5f5fe0ebbc76b1ca71d64957d..6e01317db89d235e7ddae6740136d28f6470cc59 100644
--- a/include/otbTensorflowMultisourceModelLearningBase.h
+++ b/include/otbTensorflowMultisourceModelLearningBase.h
@@ -53,37 +53,35 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage>
-class ITK_EXPORT TensorflowMultisourceModelLearningBase :
-public TensorflowMultisourceModelBase<TInputImage>
+class ITK_EXPORT TensorflowMultisourceModelLearningBase : public TensorflowMultisourceModelBase<TInputImage>
 {
 public:
-
   /** Standard class typedefs. */
-  typedef TensorflowMultisourceModelLearningBase       Self;
-  typedef TensorflowMultisourceModelBase<TInputImage>  Superclass;
-  typedef itk::SmartPointer<Self>                      Pointer;
-  typedef itk::SmartPointer<const Self>                ConstPointer;
+  typedef TensorflowMultisourceModelLearningBase      Self;
+  typedef TensorflowMultisourceModelBase<TInputImage> Superclass;
+  typedef itk::SmartPointer<Self>                     Pointer;
+  typedef itk::SmartPointer<const Self>               ConstPointer;
 
   /** Run-time type information (and related methods). */
   itkTypeMacro(TensorflowMultisourceModelLearningBase, TensorflowMultisourceModelBase);
 
   /** Images typedefs */
-  typedef typename Superclass::ImageType         ImageType;
-  typedef typename Superclass::ImagePointerType  ImagePointerType;
-  typedef typename Superclass::RegionType        RegionType;
-  typedef typename Superclass::SizeType          SizeType;
-  typedef typename Superclass::IndexType         IndexType;
+  typedef typename Superclass::ImageType        ImageType;
+  typedef typename Superclass::ImagePointerType ImagePointerType;
+  typedef typename Superclass::RegionType       RegionType;
+  typedef typename Superclass::SizeType         SizeType;
+  typedef typename Superclass::IndexType        IndexType;
 
   /* Typedefs for parameters */
-  typedef typename Superclass::DictType          DictType;
-  typedef typename Superclass::DictElementType   DictElementType;
-  typedef typename Superclass::StringList        StringList;
-  typedef typename Superclass::SizeListType      SizeListType;
-  typedef typename Superclass::TensorListType    TensorListType;
+  typedef typename Superclass::DictType        DictType;
+  typedef typename Superclass::DictElementType DictElementType;
+  typedef typename Superclass::StringList      StringList;
+  typedef typename Superclass::SizeListType    SizeListType;
+  typedef typename Superclass::TensorListType  TensorListType;
 
   /* Typedefs for index */
-  typedef typename ImageType::IndexValueType     IndexValueType;
-  typedef std::vector<IndexValueType>            IndexListType;
+  typedef typename ImageType::IndexValueType IndexValueType;
+  typedef std::vector<IndexValueType>        IndexListType;
 
   // Batch size
   itkSetMacro(BatchSize, IndexValueType);
@@ -98,29 +96,36 @@ public:
 
 protected:
   TensorflowMultisourceModelLearningBase();
-  virtual ~TensorflowMultisourceModelLearningBase() {};
+  virtual ~TensorflowMultisourceModelLearningBase(){};
 
-  virtual void GenerateOutputInformation(void) override;
+  virtual void
+  GenerateOutputInformation(void) override;
 
-  virtual void GenerateInputRequestedRegion();
+  virtual void
+  GenerateInputRequestedRegion();
 
-  virtual void GenerateData();
+  virtual void
+  GenerateData();
 
-  virtual void PopulateInputTensors(DictType & inputs, const IndexValueType & sampleStart,
-      const IndexValueType & batchSize, const IndexListType & order);
+  virtual void
+  PopulateInputTensors(DictType &             inputs,
+                       const IndexValueType & sampleStart,
+                       const IndexValueType & batchSize,
+                       const IndexListType &  order);
 
-  virtual void ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-      const IndexValueType & batchSize) = 0;
+  virtual void
+  ProcessBatch(DictType & inputs, const IndexValueType & sampleStart, const IndexValueType & batchSize) = 0;
 
 private:
-  TensorflowMultisourceModelLearningBase(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelLearningBase(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  unsigned int          m_BatchSize;       // Batch size
-  bool                  m_UseStreaming;    // Use streaming on/off
+  unsigned int m_BatchSize;    // Batch size
+  bool         m_UseStreaming; // Use streaming on/off
 
   // Read only
-  IndexValueType        m_NumberOfSamples; // Number of samples
+  IndexValueType m_NumberOfSamples; // Number of samples
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelLearningBase.hxx b/include/otbTensorflowMultisourceModelLearningBase.hxx
index 28b2328b8b82c49896ed40a1edd18fba5cebd7a7..bfa26d4dc3789a18879891fc9df9581a03fb4d1a 100644
--- a/include/otbTensorflowMultisourceModelLearningBase.hxx
+++ b/include/otbTensorflowMultisourceModelLearningBase.hxx
@@ -18,39 +18,38 @@ namespace otb
 {
 
 template <class TInputImage>
-TensorflowMultisourceModelLearningBase<TInputImage>
-::TensorflowMultisourceModelLearningBase(): m_BatchSize(100),
-m_UseStreaming(false), m_NumberOfSamples(0)
- {
- }
+TensorflowMultisourceModelLearningBase<TInputImage>::TensorflowMultisourceModelLearningBase()
+  : m_BatchSize(100)
+  , m_UseStreaming(false)
+  , m_NumberOfSamples(0)
+{}
 
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelLearningBase<TInputImage>
-::GenerateOutputInformation()
- {
+TensorflowMultisourceModelLearningBase<TInputImage>::GenerateOutputInformation()
+{
   Superclass::GenerateOutputInformation();
 
   // Set an empty output buffered region
   ImageType * outputPtr = this->GetOutput();
-  RegionType nullRegion;
+  RegionType  nullRegion;
   nullRegion.GetModifiableSize().Fill(1);
   outputPtr->SetNumberOfComponentsPerPixel(1);
-  outputPtr->SetLargestPossibleRegion( nullRegion );
+  outputPtr->SetLargestPossibleRegion(nullRegion);
 
   // Count the number of samples
   m_NumberOfSamples = 0;
-  for (unsigned int i = 0 ; i < this->GetNumberOfInputs() ; i++)
-    {
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); i++)
+  {
     // Input image pointer
-    ImagePointerType inputPtr = const_cast<ImageType*>(this->GetInput(i));
+    ImagePointerType inputPtr = const_cast<ImageType *>(this->GetInput(i));
 
     // Make sure input is available
-    if ( inputPtr.IsNull() )
-      {
+    if (inputPtr.IsNull())
+    {
       itkExceptionMacro(<< "Input " << i << " is null!");
-      }
+    }
 
     // Update input information
     inputPtr->UpdateOutputInformation();
@@ -63,67 +62,62 @@ TensorflowMultisourceModelLearningBase<TInputImage>
 
     // Check size X
     if (inputPatchSize[0] != reqRegion.GetSize(0))
-      itkExceptionMacro("Patch size for input " << i
-          << " is " << inputPatchSize
-          << " but input patches image size is " << reqRegion.GetSize());
+      itkExceptionMacro("Patch size for input " << i << " is " << inputPatchSize << " but input patches image size is "
+                                                << reqRegion.GetSize());
 
     // Check size Y
     if (reqRegion.GetSize(1) % inputPatchSize[1] != 0)
       itkExceptionMacro("Input patches image must have a number of rows which is "
-          << "a multiple of the patch size Y! Patches image has " << reqRegion.GetSize(1)
-          << " rows but patch size Y is " <<  inputPatchSize[1] << " for input " << i);
+                        << "a multiple of the patch size Y! Patches image has " << reqRegion.GetSize(1)
+                        << " rows but patch size Y is " << inputPatchSize[1] << " for input " << i);
 
     // Get the batch size
     const IndexValueType currNumberOfSamples = reqRegion.GetSize(1) / inputPatchSize[1];
 
     // Check the consistency with other inputs
     if (m_NumberOfSamples == 0)
-      {
+    {
       m_NumberOfSamples = currNumberOfSamples;
-      }
+    }
     else if (m_NumberOfSamples != currNumberOfSamples)
-      {
-      itkGenericExceptionMacro("Batch size of input " << (i-1)
-          << " was " << m_NumberOfSamples
-          << " but input " << i
-          << " has a batch size of " << currNumberOfSamples );
-      }
-    } // next input
- }
+    {
+      itkGenericExceptionMacro("Batch size of input " << (i - 1) << " was " << m_NumberOfSamples << " but input " << i
+                                                      << " has a batch size of " << currNumberOfSamples);
+    }
+  } // next input
+}
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelLearningBase<TInputImage>
-::GenerateInputRequestedRegion()
- {
+TensorflowMultisourceModelLearningBase<TInputImage>::GenerateInputRequestedRegion()
+{
   Superclass::GenerateInputRequestedRegion();
 
   // For each image, set the requested region
   RegionType nullRegion;
-  for(unsigned int i = 0; i < this->GetNumberOfInputs(); ++i)
-    {
-    ImageType * inputImage = static_cast<ImageType * >( Superclass::ProcessObject::GetInput(i) );
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); ++i)
+  {
+    ImageType * inputImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(i));
 
     // If the streaming is enabled, we don't read the full image
     if (m_UseStreaming)
-      {
+    {
       inputImage->SetRequestedRegion(nullRegion);
-      }
+    }
     else
-      {
+    {
       inputImage->SetRequestedRegion(inputImage->GetLargestPossibleRegion());
-      }
-    } // next image
- }
+    }
+  } // next image
+}
 
 /**
  *
  */
 template <class TInputImage>
 void
-TensorflowMultisourceModelLearningBase<TInputImage>
-::GenerateData()
- {
+TensorflowMultisourceModelLearningBase<TInputImage>::GenerateData()
+{
 
   // Batches loop
   const IndexValueType nBatches = std::ceil(m_NumberOfSamples / m_BatchSize);
@@ -131,15 +125,15 @@ TensorflowMultisourceModelLearningBase<TInputImage>
 
   itk::ProgressReporter progress(this, 0, nBatches);
 
-  for (IndexValueType batch = 0 ; batch < nBatches ; batch++)
-    {
+  for (IndexValueType batch = 0; batch < nBatches; batch++)
+  {
 
     // Feed dict
     DictType inputs;
 
     // Batch start and size
     const IndexValueType sampleStart = batch * m_BatchSize;
-    IndexValueType batchSize = m_BatchSize;
+    IndexValueType       batchSize = m_BatchSize;
     if (rest != 0 && batch == nBatches - 1)
     {
       batchSize = rest;
@@ -149,40 +143,40 @@ TensorflowMultisourceModelLearningBase<TInputImage>
     this->ProcessBatch(inputs, sampleStart, batchSize);
 
     progress.CompletedPixel();
-    } // Next batch
-
- }
+  } // Next batch
+}
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelLearningBase<TInputImage>
-::PopulateInputTensors(DictType & inputs, const IndexValueType & sampleStart,
-    const IndexValueType & batchSize, const IndexListType & order)
- {
+TensorflowMultisourceModelLearningBase<TInputImage>::PopulateInputTensors(DictType &             inputs,
+                                                                          const IndexValueType & sampleStart,
+                                                                          const IndexValueType & batchSize,
+                                                                          const IndexListType &  order)
+{
   const bool reorder = order.size();
 
   // Populate input tensors
-  for (unsigned int i = 0 ; i < this->GetNumberOfInputs() ; i++)
-    {
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); i++)
+  {
     // Input image pointer
-    ImagePointerType inputPtr = const_cast<ImageType*>(this->GetInput(i));
+    ImagePointerType inputPtr = const_cast<ImageType *>(this->GetInput(i));
 
     // Patch size of tensor #i
     const SizeType inputPatchSize = this->GetInputReceptiveFields().at(i);
 
     // Create the tensor for the batch
-    const tensorflow::int64 sz_n = batchSize;
-    const tensorflow::int64 sz_y = inputPatchSize[1];
-    const tensorflow::int64 sz_x = inputPatchSize[0];
-    const tensorflow::int64 sz_c = inputPtr->GetNumberOfComponentsPerPixel();
-    const tensorflow::TensorShape inputTensorShape({sz_n, sz_y, sz_x, sz_c});
-    tensorflow::Tensor inputTensor(this->GetInputTensorsDataTypes()[i], inputTensorShape);
+    const tensorflow::int64       sz_n = batchSize;
+    const tensorflow::int64       sz_y = inputPatchSize[1];
+    const tensorflow::int64       sz_x = inputPatchSize[0];
+    const tensorflow::int64       sz_c = inputPtr->GetNumberOfComponentsPerPixel();
+    const tensorflow::TensorShape inputTensorShape({ sz_n, sz_y, sz_x, sz_c });
+    tensorflow::Tensor            inputTensor(this->GetInputTensorsDataTypes()[i], inputTensorShape);
 
     // Populate the tensor
-    for (IndexValueType elem = 0 ; elem < batchSize ; elem++)
-      {
+    for (IndexValueType elem = 0; elem < batchSize; elem++)
+    {
       const tensorflow::uint64 samplePos = sampleStart + elem;
-      IndexType start;
+      IndexType                start;
       start[0] = 0;
       if (reorder)
       {
@@ -190,7 +184,8 @@ TensorflowMultisourceModelLearningBase<TInputImage>
       }
       else
       {
-        start[1] = samplePos * sz_y;;
+        start[1] = samplePos * sz_y;
+        ;
       }
       RegionType patchRegion(start, inputPatchSize);
       if (m_UseStreaming)
@@ -198,14 +193,14 @@ TensorflowMultisourceModelLearningBase<TInputImage>
         // If streaming is enabled, we need to explicitly propagate requested region
         tf::PropagateRequestedRegion<TInputImage>(inputPtr, patchRegion);
       }
-      tf::RecopyImageRegionToTensorWithCast<TInputImage>(inputPtr, patchRegion, inputTensor, elem );
-      }
+      tf::RecopyImageRegionToTensorWithCast<TInputImage>(inputPtr, patchRegion, inputTensor, elem);
+    }
 
     // Input #i : the tensor of patches (aka the batch)
     DictElementType input = { this->GetInputPlaceholders()[i], inputTensor };
     inputs.push_back(input);
-    } // next input tensor
- }
+  } // next input tensor
+}
 
 
 } // end namespace otb
diff --git a/include/otbTensorflowMultisourceModelTrain.h b/include/otbTensorflowMultisourceModelTrain.h
index 8ec4c38c369d532a706746c9674197ad766f657b..694f09e0b0ebfdd65305432a602e9f3908c8eadf 100644
--- a/include/otbTensorflowMultisourceModelTrain.h
+++ b/include/otbTensorflowMultisourceModelTrain.h
@@ -34,11 +34,9 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage>
-class ITK_EXPORT TensorflowMultisourceModelTrain :
-public TensorflowMultisourceModelLearningBase<TInputImage>
+class ITK_EXPORT TensorflowMultisourceModelTrain : public TensorflowMultisourceModelLearningBase<TInputImage>
 {
 public:
-
   /** Standard class typedefs. */
   typedef TensorflowMultisourceModelTrain                     Self;
   typedef TensorflowMultisourceModelLearningBase<TInputImage> Superclass;
@@ -52,25 +50,27 @@ public:
   itkTypeMacro(TensorflowMultisourceModelTrain, TensorflowMultisourceModelLearningBase);
 
   /** Superclass typedefs */
-  typedef typename Superclass::DictType          DictType;
-  typedef typename Superclass::TensorListType    TensorListType;
-  typedef typename Superclass::IndexValueType    IndexValueType;
-  typedef typename Superclass::IndexListType     IndexListType;
+  typedef typename Superclass::DictType       DictType;
+  typedef typename Superclass::TensorListType TensorListType;
+  typedef typename Superclass::IndexValueType IndexValueType;
+  typedef typename Superclass::IndexListType  IndexListType;
 
 
 protected:
   TensorflowMultisourceModelTrain();
-  virtual ~TensorflowMultisourceModelTrain() {};
+  virtual ~TensorflowMultisourceModelTrain(){};
 
-  virtual void GenerateData();
-  virtual void ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-      const IndexValueType & batchSize);
+  virtual void
+  GenerateData();
+  virtual void
+  ProcessBatch(DictType & inputs, const IndexValueType & sampleStart, const IndexValueType & batchSize);
 
 private:
-  TensorflowMultisourceModelTrain(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelTrain(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  IndexListType     m_RandomIndices;           // Reordered indices
+  IndexListType m_RandomIndices; // Reordered indices
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelTrain.hxx b/include/otbTensorflowMultisourceModelTrain.hxx
index 272dd6390668bd233c5ec41b99ff2b088ef313c3..46bc2d7bd22cab4a90a40131436bd428dc77aff9 100644
--- a/include/otbTensorflowMultisourceModelTrain.hxx
+++ b/include/otbTensorflowMultisourceModelTrain.hxx
@@ -18,37 +18,33 @@ namespace otb
 {
 
 template <class TInputImage>
-TensorflowMultisourceModelTrain<TInputImage>
-::TensorflowMultisourceModelTrain()
- {
- }
+TensorflowMultisourceModelTrain<TInputImage>::TensorflowMultisourceModelTrain()
+{}
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelTrain<TInputImage>
-::GenerateData()
- {
+TensorflowMultisourceModelTrain<TInputImage>::GenerateData()
+{
 
   // Initial sequence 1...N
   m_RandomIndices.resize(this->GetNumberOfSamples());
-  std::iota (std::begin(m_RandomIndices), std::end(m_RandomIndices), 0);
+  std::iota(std::begin(m_RandomIndices), std::end(m_RandomIndices), 0);
 
   // Shuffle the sequence
   std::random_device rd;
-  std::mt19937 g(rd());
+  std::mt19937       g(rd());
   std::shuffle(m_RandomIndices.begin(), m_RandomIndices.end(), g);
 
   // Call the generic method
   Superclass::GenerateData();
-
- }
+}
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelTrain<TInputImage>
-::ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-    const IndexValueType & batchSize)
- {
+TensorflowMultisourceModelTrain<TInputImage>::ProcessBatch(DictType &             inputs,
+                                                           const IndexValueType & sampleStart,
+                                                           const IndexValueType & batchSize)
+{
   // Populate input tensors
   this->PopulateInputTensors(inputs, sampleStart, batchSize, m_RandomIndices);
 
@@ -57,12 +53,11 @@ TensorflowMultisourceModelTrain<TInputImage>
   this->RunSession(inputs, outputs);
 
   // Display outputs tensors
-  for (auto& o: outputs)
+  for (auto & o : outputs)
   {
     tf::PrintTensorInfos(o);
   }
-
- }
+}
 
 
 } // end namespace otb
diff --git a/include/otbTensorflowMultisourceModelValidate.h b/include/otbTensorflowMultisourceModelValidate.h
index 322f6a24e288db9d9acf202e72ffe04ff8d8a8d4..54691747a7128d625c4a637972da67d02f11e1e1 100644
--- a/include/otbTensorflowMultisourceModelValidate.h
+++ b/include/otbTensorflowMultisourceModelValidate.h
@@ -42,11 +42,9 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage>
-class ITK_EXPORT TensorflowMultisourceModelValidate :
-public TensorflowMultisourceModelLearningBase<TInputImage>
+class ITK_EXPORT TensorflowMultisourceModelValidate : public TensorflowMultisourceModelLearningBase<TInputImage>
 {
 public:
-
   /** Standard class typedefs. */
   typedef TensorflowMultisourceModelValidate                  Self;
   typedef TensorflowMultisourceModelLearningBase<TInputImage> Superclass;
@@ -60,20 +58,20 @@ public:
   itkTypeMacro(TensorflowMultisourceModelValidate, TensorflowMultisourceModelLearningBase);
 
   /** Images typedefs */
-  typedef typename Superclass::ImageType         ImageType;
-  typedef typename Superclass::ImagePointerType  ImagePointerType;
-  typedef typename Superclass::RegionType        RegionType;
-  typedef typename Superclass::SizeType          SizeType;
-  typedef typename Superclass::IndexType         IndexType;
-  typedef std::vector<ImagePointerType>          ImageListType;
+  typedef typename Superclass::ImageType        ImageType;
+  typedef typename Superclass::ImagePointerType ImagePointerType;
+  typedef typename Superclass::RegionType       RegionType;
+  typedef typename Superclass::SizeType         SizeType;
+  typedef typename Superclass::IndexType        IndexType;
+  typedef std::vector<ImagePointerType>         ImageListType;
 
   /* Typedefs for parameters */
-  typedef typename Superclass::DictType          DictType;
-  typedef typename Superclass::StringList        StringList;
-  typedef typename Superclass::SizeListType      SizeListType;
-  typedef typename Superclass::TensorListType    TensorListType;
-  typedef typename Superclass::IndexValueType    IndexValueType;
-  typedef typename Superclass::IndexListType     IndexListType;
+  typedef typename Superclass::DictType       DictType;
+  typedef typename Superclass::StringList     StringList;
+  typedef typename Superclass::SizeListType   SizeListType;
+  typedef typename Superclass::TensorListType TensorListType;
+  typedef typename Superclass::IndexValueType IndexValueType;
+  typedef typename Superclass::IndexListType  IndexListType;
 
   /* Typedefs for validation */
   typedef unsigned long                            CountValueType;
@@ -87,36 +85,43 @@ public:
   typedef itk::ImageRegionConstIterator<ImageType> IteratorType;
 
   /** Set and Get the input references */
-  virtual void SetInputReferences(ImageListType input);
-  ImagePointerType GetInputReference(unsigned int index);
+  virtual void
+  SetInputReferences(ImageListType input);
+  ImagePointerType
+  GetInputReference(unsigned int index);
 
   /** Get the confusion matrix */
-  const ConfMatType GetConfusionMatrix(unsigned int target);
+  const ConfMatType
+  GetConfusionMatrix(unsigned int target);
 
   /** Get the map of classes matrix */
-  const MapOfClassesType GetMapOfClasses(unsigned int target);
+  const MapOfClassesType
+  GetMapOfClasses(unsigned int target);
 
 protected:
   TensorflowMultisourceModelValidate();
-  virtual ~TensorflowMultisourceModelValidate() {};
+  virtual ~TensorflowMultisourceModelValidate(){};
 
-  void GenerateOutputInformation(void);
-  void GenerateData();
-  void ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-      const IndexValueType & batchSize);
+  void
+  GenerateOutputInformation(void);
+  void
+  GenerateData();
+  void
+  ProcessBatch(DictType & inputs, const IndexValueType & sampleStart, const IndexValueType & batchSize);
 
 private:
-  TensorflowMultisourceModelValidate(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelValidate(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  ImageListType              m_References;              // The references images
+  ImageListType m_References; // The references images
 
   // Read only
-  ConfMatListType            m_ConfusionMatrices;       // Confusion matrix
-  MapOfClassesListType       m_MapsOfClasses;           // Maps of classes
+  ConfMatListType      m_ConfusionMatrices; // Confusion matrix
+  MapOfClassesListType m_MapsOfClasses;     // Maps of classes
 
   // Internal
-  std::vector<MatMapType>    m_ConfMatMaps;             // Accumulators
+  std::vector<MatMapType> m_ConfMatMaps; // Accumulators
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelValidate.hxx b/include/otbTensorflowMultisourceModelValidate.hxx
index 8ec685ba81c1ae51111a077e8170dd227be7241e..a929aa884ea97ff3f80af968b275dc1029fb0de4 100644
--- a/include/otbTensorflowMultisourceModelValidate.hxx
+++ b/include/otbTensorflowMultisourceModelValidate.hxx
@@ -18,82 +18,77 @@ namespace otb
 {
 
 template <class TInputImage>
-TensorflowMultisourceModelValidate<TInputImage>
-::TensorflowMultisourceModelValidate()
- {
- }
+TensorflowMultisourceModelValidate<TInputImage>::TensorflowMultisourceModelValidate()
+{}
 
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelValidate<TInputImage>
-::GenerateOutputInformation()
- {
+TensorflowMultisourceModelValidate<TInputImage>::GenerateOutputInformation()
+{
   Superclass::GenerateOutputInformation();
 
   // Check that there is some reference
   const unsigned int nbOfRefs = m_References.size();
   if (nbOfRefs == 0)
-    {
+  {
     itkExceptionMacro("No reference is set");
-    }
+  }
 
   // Check the number of references
   SizeListType outputPatchSizes = this->GetOutputExpressionFields();
   if (nbOfRefs != outputPatchSizes.size())
-    {
-    itkExceptionMacro("There is " << nbOfRefs << " references but only " <<
-                      outputPatchSizes.size() << " output patch sizes");
-    }
+  {
+    itkExceptionMacro("There is " << nbOfRefs << " references but only " << outputPatchSizes.size()
+                                  << " output patch sizes");
+  }
 
   // Check reference image infos
-  for (unsigned int i = 0 ; i < nbOfRefs ; i++)
-    {
-    const SizeType outputPatchSize = outputPatchSizes[i];
+  for (unsigned int i = 0; i < nbOfRefs; i++)
+  {
+    const SizeType   outputPatchSize = outputPatchSizes[i];
     const RegionType refRegion = m_References[i]->GetLargestPossibleRegion();
     if (refRegion.GetSize(0) != outputPatchSize[0])
-      {
-      itkExceptionMacro("Reference image " << i << " width is " << refRegion.GetSize(0) <<
-                        " but patch size (x) is " << outputPatchSize[0]);
-      }
+    {
+      itkExceptionMacro("Reference image " << i << " width is " << refRegion.GetSize(0) << " but patch size (x) is "
+                                           << outputPatchSize[0]);
+    }
     if (refRegion.GetSize(1) != this->GetNumberOfSamples() * outputPatchSize[1])
-      {
-      itkExceptionMacro("Reference image " << i << " height is " << refRegion.GetSize(1) <<
-                        " but patch size (y) is " << outputPatchSize[1] <<
-                        " which is not consistent with the number of samples (" << this->GetNumberOfSamples() << ")");
-      }
+    {
+      itkExceptionMacro("Reference image "
+                        << i << " height is " << refRegion.GetSize(1) << " but patch size (y) is " << outputPatchSize[1]
+                        << " which is not consistent with the number of samples (" << this->GetNumberOfSamples()
+                        << ")");
     }
-
- }
+  }
+}
 
 
 /*
  * Set the references images
  */
-template<class TInputImage>
+template <class TInputImage>
 void
-TensorflowMultisourceModelValidate<TInputImage>
-::SetInputReferences(ImageListType input)
- {
+TensorflowMultisourceModelValidate<TInputImage>::SetInputReferences(ImageListType input)
+{
   m_References = input;
- }
+}
 
 /*
  * Retrieve the i-th reference image
  * An exception is thrown if it doesn't exist.
  */
-template<class TInputImage>
+template <class TInputImage>
 typename TensorflowMultisourceModelValidate<TInputImage>::ImagePointerType
-TensorflowMultisourceModelValidate<TInputImage>
-::GetInputReference(unsigned int index)
- {
+TensorflowMultisourceModelValidate<TInputImage>::GetInputReference(unsigned int index)
+{
   if (m_References.size <= index || !m_References[index])
-    {
+  {
     itkExceptionMacro("There is no input reference #" << index);
-    }
+  }
 
   return m_References[index];
- }
+}
 
 /**
  * Perform the validation
@@ -103,73 +98,70 @@ TensorflowMultisourceModelValidate<TInputImage>
  */
 template <class TInputImage>
 void
-TensorflowMultisourceModelValidate<TInputImage>
-::GenerateData()
- {
+TensorflowMultisourceModelValidate<TInputImage>::GenerateData()
+{
 
   // Temporary images for outputs
   m_ConfusionMatrices.clear();
   m_MapsOfClasses.clear();
   m_ConfMatMaps.clear();
-  for (auto const& ref: m_References)
-    {
-    (void) ref;
+  for (auto const & ref : m_References)
+  {
+    (void)ref;
 
     // New confusion matrix
     MatMapType mat;
     m_ConfMatMaps.push_back(mat);
-    }
+  }
 
   // Run all the batches
   Superclass::GenerateData();
 
   // Compute confusion matrices
-  for (unsigned int i = 0 ; i < m_ConfMatMaps.size() ; i++)
-    {
+  for (unsigned int i = 0; i < m_ConfMatMaps.size(); i++)
+  {
     // Confusion matrix (map) for current target
     MatMapType mat = m_ConfMatMaps[i];
 
     // List all values
     MapOfClassesType values;
-    LabelValueType curVal = 0;
-    for (auto const& ref: mat)
-      {
+    LabelValueType   curVal = 0;
+    for (auto const & ref : mat)
+    {
       if (values.count(ref.first) == 0)
-        {
+      {
         values[ref.first] = curVal;
         curVal++;
-        }
-      for (auto const& in: ref.second)
+      }
+      for (auto const & in : ref.second)
         if (values.count(in.first) == 0)
-          {
+        {
           values[in.first] = curVal;
           curVal++;
-          }
-      }
+        }
+    }
 
     // Build the confusion matrix
     const LabelValueType nValues = values.size();
-    ConfMatType matrix(nValues, nValues);
+    ConfMatType          matrix(nValues, nValues);
     matrix.Fill(0);
-    for (auto const& ref: mat)
-      for (auto const& in: ref.second)
+    for (auto const & ref : mat)
+      for (auto const & in : ref.second)
         matrix[values[ref.first]][values[in.first]] = in.second;
 
     // Add the confusion matrix
     m_ConfusionMatrices.push_back(matrix);
     m_MapsOfClasses.push_back(values);
-
-    }
-
- }
+  }
+}
 
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelValidate<TInputImage>
-::ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-    const IndexValueType & batchSize)
- {
+TensorflowMultisourceModelValidate<TInputImage>::ProcessBatch(DictType &             inputs,
+                                                              const IndexValueType & sampleStart,
+                                                              const IndexValueType & batchSize)
+{
   // Populate input tensors
   IndexListType empty;
   this->PopulateInputTensors(inputs, sampleStart, batchSize, empty);
@@ -180,16 +172,16 @@ TensorflowMultisourceModelValidate<TInputImage>
 
   // Perform the validation
   if (outputs.size() != m_References.size())
-    {
-    itkWarningMacro("There is " << outputs.size() << " outputs returned after session run, " <<
-                    "but only " << m_References.size() << " reference(s) set");
-    }
+  {
+    itkWarningMacro("There is " << outputs.size() << " outputs returned after session run, "
+                                << "but only " << m_References.size() << " reference(s) set");
+  }
   SizeListType outputEFSizes = this->GetOutputExpressionFields();
-  for (unsigned int refIdx = 0 ; refIdx < outputs.size() ; refIdx++)
-    {
+  for (unsigned int refIdx = 0; refIdx < outputs.size(); refIdx++)
+  {
     // Recopy the chunk
     const SizeType outputFOESize = outputEFSizes[refIdx];
-    IndexType cpyStart;
+    IndexType      cpyStart;
     cpyStart.Fill(0);
     IndexType refRegStart;
     refRegStart.Fill(0);
@@ -216,31 +208,30 @@ TensorflowMultisourceModelValidate<TInputImage>
     IteratorType inIt(img, cpyRegion);
     IteratorType refIt(m_References[refIdx], refRegion);
     for (inIt.GoToBegin(), refIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt, ++refIt)
-      {
+    {
       const int classIn = static_cast<LabelValueType>(inIt.Get()[0]);
       const int classRef = static_cast<LabelValueType>(refIt.Get()[0]);
 
       if (m_ConfMatMaps[refIdx].count(classRef) == 0)
-        {
+      {
         MapType newMap;
         newMap[classIn] = 1;
         m_ConfMatMaps[refIdx][classRef] = newMap;
-        }
+      }
       else
-        {
+      {
         if (m_ConfMatMaps[refIdx][classRef].count(classIn) == 0)
-          {
+        {
           m_ConfMatMaps[refIdx][classRef][classIn] = 1;
-          }
+        }
         else
-          {
+        {
           m_ConfMatMaps[refIdx][classRef][classIn]++;
-          }
         }
       }
     }
-
- }
+  }
+}
 
 /*
  * Get the confusion matrix
@@ -248,17 +239,17 @@ TensorflowMultisourceModelValidate<TInputImage>
  */
 template <class TInputImage>
 const typename TensorflowMultisourceModelValidate<TInputImage>::ConfMatType
-TensorflowMultisourceModelValidate<TInputImage>
-::GetConfusionMatrix(unsigned int target)
- {
+TensorflowMultisourceModelValidate<TInputImage>::GetConfusionMatrix(unsigned int target)
+{
   if (target >= m_ConfusionMatrices.size())
-    {
-    itkExceptionMacro("Unable to get confusion matrix #" << target << ". " <<
-        "There is only " << m_ConfusionMatrices.size() << " available.");
-    }
+  {
+    itkExceptionMacro("Unable to get confusion matrix #" << target << ". "
+                                                         << "There is only " << m_ConfusionMatrices.size()
+                                                         << " available.");
+  }
 
   return m_ConfusionMatrices[target];
- }
+}
 
 /*
  * Get the map of classes
@@ -266,17 +257,17 @@ TensorflowMultisourceModelValidate<TInputImage>
  */
 template <class TInputImage>
 const typename TensorflowMultisourceModelValidate<TInputImage>::MapOfClassesType
-TensorflowMultisourceModelValidate<TInputImage>
-::GetMapOfClasses(unsigned int target)
- {
+TensorflowMultisourceModelValidate<TInputImage>::GetMapOfClasses(unsigned int target)
+{
   if (target >= m_MapsOfClasses.size())
-    {
-    itkExceptionMacro("Unable to get confusion matrix #" << target << ". " <<
-        "There is only " << m_MapsOfClasses.size() << " available.");
-    }
+  {
+    itkExceptionMacro("Unable to get confusion matrix #" << target << ". "
+                                                         << "There is only " << m_MapsOfClasses.size()
+                                                         << " available.");
+  }
 
   return m_MapsOfClasses[target];
- }
+}
 
 } // end namespace otb
 
diff --git a/include/otbTensorflowSampler.h b/include/otbTensorflowSampler.h
index bd363bc8ee191ce7506ca012b68171f6d8bdc828..4fae38e75245ca417c638105379ec7ff7dddf6dd 100644
--- a/include/otbTensorflowSampler.h
+++ b/include/otbTensorflowSampler.h
@@ -52,16 +52,14 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage, class TVectorData>
-class ITK_EXPORT TensorflowSampler :
-public itk::ProcessObject
+class ITK_EXPORT TensorflowSampler : public itk::ProcessObject
 {
 public:
-
   /** Standard class typedefs. */
-  typedef TensorflowSampler                       Self;
-  typedef itk::ProcessObject                      Superclass;
-  typedef itk::SmartPointer<Self>                 Pointer;
-  typedef itk::SmartPointer<const Self>           ConstPointer;
+  typedef TensorflowSampler             Self;
+  typedef itk::ProcessObject            Superclass;
+  typedef itk::SmartPointer<Self>       Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
 
   /** Method for creation through the object factory. */
   itkNewMacro(Self);
@@ -70,33 +68,28 @@ public:
   itkTypeMacro(TensorflowSampler, itk::ProcessObject);
 
   /** Images typedefs */
-  typedef TInputImage                             ImageType;
-  typedef typename TInputImage::Pointer           ImagePointerType;
-  typedef typename TInputImage::InternalPixelType InternalPixelType;
-  typedef typename TInputImage::PixelType         PixelType;
-  typedef typename TInputImage::RegionType        RegionType;
-  typedef typename TInputImage::PointType         PointType;
-  typedef typename TInputImage::SizeType          SizeType;
-  typedef typename TInputImage::IndexType         IndexType;
-  typedef typename otb::MultiChannelExtractROI<InternalPixelType,
-      InternalPixelType>                          ExtractROIMultiFilterType;
-  typedef typename ExtractROIMultiFilterType::Pointer
-                                                  ExtractROIMultiFilterPointerType;
-  typedef typename std::vector<ImagePointerType>  ImagePointerListType;
-  typedef typename std::vector<SizeType>          SizeListType;
-  typedef typename itk::ImageRegionConstIterator<ImageType>
-                                                  IteratorType;
+  typedef TInputImage                                                                ImageType;
+  typedef typename TInputImage::Pointer                                              ImagePointerType;
+  typedef typename TInputImage::InternalPixelType                                    InternalPixelType;
+  typedef typename TInputImage::PixelType                                            PixelType;
+  typedef typename TInputImage::RegionType                                           RegionType;
+  typedef typename TInputImage::PointType                                            PointType;
+  typedef typename TInputImage::SizeType                                             SizeType;
+  typedef typename TInputImage::IndexType                                            IndexType;
+  typedef typename otb::MultiChannelExtractROI<InternalPixelType, InternalPixelType> ExtractROIMultiFilterType;
+  typedef typename ExtractROIMultiFilterType::Pointer                                ExtractROIMultiFilterPointerType;
+  typedef typename std::vector<ImagePointerType>                                     ImagePointerListType;
+  typedef typename std::vector<SizeType>                                             SizeListType;
+  typedef typename itk::ImageRegionConstIterator<ImageType>                          IteratorType;
 
   /** Vector data typedefs */
-  typedef TVectorData                             VectorDataType;
-  typedef typename VectorDataType::Pointer        VectorDataPointer;
-  typedef typename VectorDataType::DataTreeType   DataTreeType;
-  typedef typename itk::PreOrderTreeIterator<DataTreeType>
-                                                  TreeIteratorType;
-  typedef typename VectorDataType::DataNodeType   DataNodeType;
-  typedef typename DataNodeType::Pointer          DataNodePointer;
-  typedef typename DataNodeType::PolygonListPointerType
-                                                  PolygonListPointerType;
+  typedef TVectorData                                      VectorDataType;
+  typedef typename VectorDataType::Pointer                 VectorDataPointer;
+  typedef typename VectorDataType::DataTreeType            DataTreeType;
+  typedef typename itk::PreOrderTreeIterator<DataTreeType> TreeIteratorType;
+  typedef typename VectorDataType::DataNodeType            DataNodeType;
+  typedef typename DataNodeType::Pointer                   DataNodePointer;
+  typedef typename DataNodeType::PolygonListPointerType    PolygonListPointerType;
 
   /** Set / get parameters */
   itkSetMacro(Field, std::string);
@@ -107,15 +100,18 @@ public:
   itkGetConstMacro(InputVectorData, VectorDataPointer);
 
   /** Set / get image */
-  virtual void PushBackInputWithPatchSize(const ImageType *input, SizeType & patchSize, InternalPixelType nodataval);
-  const ImageType* GetInput(unsigned int index);
+  virtual void
+  PushBackInputWithPatchSize(const ImageType * input, SizeType & patchSize, InternalPixelType nodataval);
+  const ImageType *
+  GetInput(unsigned int index);
 
   /** Set / get no-data related parameters */
   itkSetMacro(RejectPatchesWithNodata, bool);
   itkGetMacro(RejectPatchesWithNodata, bool);
 
   /** Do the real work */
-  virtual void Update();
+  virtual void
+  Update();
 
   /** Get outputs */
   itkGetMacro(OutputPatchImages, ImagePointerListType);
@@ -125,18 +121,21 @@ public:
 
 protected:
   TensorflowSampler();
-  virtual ~TensorflowSampler() {};
+  virtual ~TensorflowSampler(){};
 
-  virtual void ResizeImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples);
-  virtual void AllocateImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples, unsigned int nbComponents);
+  virtual void
+  ResizeImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples);
+  virtual void
+  AllocateImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples, unsigned int nbComponents);
 
 private:
-  TensorflowSampler(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowSampler(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  std::string          m_Field;
-  SizeListType         m_PatchSizes;
-  VectorDataPointer    m_InputVectorData;
+  std::string       m_Field;
+  SizeListType      m_PatchSizes;
+  VectorDataPointer m_InputVectorData;
 
   // Read only
   ImagePointerListType m_OutputPatchImages;
@@ -146,7 +145,7 @@ private:
 
   // No data stuff
   std::vector<InternalPixelType> m_NoDataValues;
-  bool                 m_RejectPatchesWithNodata;
+  bool                           m_RejectPatchesWithNodata;
 
 }; // end class
 
diff --git a/include/otbTensorflowSampler.hxx b/include/otbTensorflowSampler.hxx
index 8c0ea7459ad5e1ac0060438e3c6a73b760fc535a..77558c7ba08c6dc75ce8ced1d389a537150a68f7 100644
--- a/include/otbTensorflowSampler.hxx
+++ b/include/otbTensorflowSampler.hxx
@@ -18,36 +18,35 @@ namespace otb
 {
 
 template <class TInputImage, class TVectorData>
-TensorflowSampler<TInputImage, TVectorData>
-::TensorflowSampler()
- {
+TensorflowSampler<TInputImage, TVectorData>::TensorflowSampler()
+{
   m_NumberOfAcceptedSamples = 0;
   m_NumberOfRejectedSamples = 0;
   m_RejectPatchesWithNodata = false;
- }
+}
 
 template <class TInputImage, class TVectorData>
 void
-TensorflowSampler<TInputImage, TVectorData>
-::PushBackInputWithPatchSize(const ImageType *input, SizeType & patchSize, InternalPixelType nodataval)
- {
-  this->ProcessObject::PushBackInput(const_cast<ImageType*>(input));
+TensorflowSampler<TInputImage, TVectorData>::PushBackInputWithPatchSize(const ImageType * input,
+                                                                        SizeType &        patchSize,
+                                                                        InternalPixelType nodataval)
+{
+  this->ProcessObject::PushBackInput(const_cast<ImageType *>(input));
   m_PatchSizes.push_back(patchSize);
   m_NoDataValues.push_back(nodataval);
- }
+}
 
 template <class TInputImage, class TVectorData>
-const TInputImage*
-TensorflowSampler<TInputImage, TVectorData>
-::GetInput(unsigned int index)
- {
+const TInputImage *
+TensorflowSampler<TInputImage, TVectorData>::GetInput(unsigned int index)
+{
   if (this->GetNumberOfInputs() < 1)
   {
     itkExceptionMacro("Input not set");
   }
 
-  return static_cast<const ImageType*>(this->ProcessObject::GetInput(index));
- }
+  return static_cast<const ImageType *>(this->ProcessObject::GetInput(index));
+}
 
 
 /**
@@ -55,9 +54,10 @@ TensorflowSampler<TInputImage, TVectorData>
  */
 template <class TInputImage, class TVectorData>
 void
-TensorflowSampler<TInputImage, TVectorData>
-::ResizeImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples)
- {
+TensorflowSampler<TInputImage, TVectorData>::ResizeImage(ImagePointerType & image,
+                                                         SizeType &         patchSize,
+                                                         unsigned int       nbSamples)
+{
   // New image region
   RegionType region;
   region.SetSize(0, patchSize[0]);
@@ -71,16 +71,18 @@ TensorflowSampler<TInputImage, TVectorData>
 
   // Assign
   image = resizer->GetOutput();
- }
+}
 
 /**
  * Allocate an image given a patch size and a number of samples
  */
 template <class TInputImage, class TVectorData>
 void
-TensorflowSampler<TInputImage, TVectorData>
-::AllocateImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples, unsigned int nbComponents)
- {
+TensorflowSampler<TInputImage, TVectorData>::AllocateImage(ImagePointerType & image,
+                                                           SizeType &         patchSize,
+                                                           unsigned int       nbSamples,
+                                                           unsigned int       nbComponents)
+{
   // Image region
   RegionType region;
   region.SetSize(0, patchSize[0]);
@@ -91,16 +93,15 @@ TensorflowSampler<TInputImage, TVectorData>
   image->SetNumberOfComponentsPerPixel(nbComponents);
   image->SetRegions(region);
   image->Allocate();
- }
+}
 
 /**
  * Do the work
  */
 template <class TInputImage, class TVectorData>
 void
-TensorflowSampler<TInputImage, TVectorData>
-::Update()
- {
+TensorflowSampler<TInputImage, TVectorData>::Update()
+{
 
   // Check number of inputs
   if (this->GetNumberOfInputs() != m_PatchSizes.size())
@@ -109,8 +110,8 @@ TensorflowSampler<TInputImage, TVectorData>
   }
 
   // Count points
-  unsigned int nTotal = 0;
-  unsigned int geomId = 0;
+  unsigned int     nTotal = 0;
+  unsigned int     geomId = 0;
   TreeIteratorType itVector(m_InputVectorData->GetDataTree());
   itVector.GoToBegin();
   while (!itVector.IsAtEnd())
@@ -146,7 +147,7 @@ TensorflowSampler<TInputImage, TVectorData>
   const unsigned int nbInputs = this->GetNumberOfInputs();
   m_OutputPatchImages.clear();
   m_OutputPatchImages.reserve(nbInputs);
-  for (unsigned int i = 0 ; i < nbInputs ; i++)
+  for (unsigned int i = 0; i < nbInputs; i++)
   {
     ImagePointerType newImage;
     AllocateImage(newImage, m_PatchSizes[i], nTotal, GetInput(i)->GetNumberOfComponentsPerPixel());
@@ -160,7 +161,7 @@ TensorflowSampler<TInputImage, TVectorData>
   itVector.GoToBegin();
   unsigned long count = 0;
   unsigned long rejected = 0;
-  IndexType labelIndex;
+  IndexType     labelIndex;
   labelIndex[0] = 0;
   PixelType labelPix;
   labelPix.SetSize(1);
@@ -169,13 +170,13 @@ TensorflowSampler<TInputImage, TVectorData>
     if (!itVector.Get()->IsRoot() && !itVector.Get()->IsDocument() && !itVector.Get()->IsFolder())
     {
       DataNodePointer currentGeometry = itVector.Get();
-      PointType point = currentGeometry->GetPoint();
+      PointType       point = currentGeometry->GetPoint();
 
       // Get the label value
       labelPix[0] = static_cast<InternalPixelType>(currentGeometry->GetFieldAsInt(m_Field));
 
       bool hasBeenSampled = true;
-      for (unsigned int i = 0 ; i < nbInputs ; i++)
+      for (unsigned int i = 0; i < nbInputs; i++)
       {
         // Get input
         ImagePointerType inputPtr = const_cast<ImageType *>(this->GetInput(i));
@@ -188,7 +189,7 @@ TensorflowSampler<TInputImage, TVectorData>
         }
         // Check if the sampled patch contains a no-data value
         if (m_RejectPatchesWithNodata && hasBeenSampled)
-          {
+        {
           IndexType outIndex;
           outIndex[0] = 0;
           outIndex[1] = count * m_PatchSizes[i][1];
@@ -196,13 +197,13 @@ TensorflowSampler<TInputImage, TVectorData>
 
           IteratorType it(m_OutputPatchImages[i], region);
           for (it.GoToBegin(); !it.IsAtEnd(); ++it)
-            {
+          {
             PixelType pix = it.Get();
-            for (unsigned int band = 0 ; band < pix.Size() ; band++)
+            for (unsigned int band = 0; band < pix.Size(); band++)
               if (pix[band] == m_NoDataValues[i])
                 hasBeenSampled = false;
-            }
           }
+        }
       } // Next input
       if (hasBeenSampled)
       {
@@ -220,7 +221,6 @@ TensorflowSampler<TInputImage, TVectorData>
 
       // Update progress
       progress.CompletedPixel();
-
     }
 
     ++itVector;
@@ -228,7 +228,7 @@ TensorflowSampler<TInputImage, TVectorData>
 
   // Resize output images
   ResizeImage(m_OutputLabelImage, labelPatchSize, count);
-  for (unsigned int i = 0 ; i < nbInputs ; i++)
+  for (unsigned int i = 0; i < nbInputs; i++)
   {
     ResizeImage(m_OutputPatchImages[i], m_PatchSizes[i], count);
   }
@@ -236,8 +236,7 @@ TensorflowSampler<TInputImage, TVectorData>
   // Update number of samples produced
   m_NumberOfAcceptedSamples = count;
   m_NumberOfRejectedSamples = rejected;
-
- }
+}
 
 } // end namespace otb
 
diff --git a/include/otbTensorflowSamplingUtils.cxx b/include/otbTensorflowSamplingUtils.cxx
index 5cf88f6b171b61c9576b4ca68d855a0d6059d42f..db4d9ea01d718c5957a9080486dcb18b83097995 100644
--- a/include/otbTensorflowSamplingUtils.cxx
+++ b/include/otbTensorflowSamplingUtils.cxx
@@ -19,13 +19,15 @@ namespace tf
 //
 // Update the distribution of the patch located at the specified location
 //
-template<class TImage, class TDistribution>
-bool UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
-    typename TImage::PointType point, typename TImage::SizeType patchSize,
-    TDistribution & dist)
+template <class TImage, class TDistribution>
+bool
+UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
+                            typename TImage::PointType     point,
+                            typename TImage::SizeType      patchSize,
+                            TDistribution &                dist)
 {
   typename TImage::IndexType index;
-  bool canTransform = inPtr->TransformPhysicalPointToIndex(point, index);
+  bool                       canTransform = inPtr->TransformPhysicalPointToIndex(point, index);
   if (canTransform)
   {
     index[0] -= patchSize[0] / 2;
@@ -38,7 +40,7 @@ bool UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
       // Fill patch
       PropagateRequestedRegion<TImage>(inPtr, inPatchRegion);
 
-      typename itk::ImageRegionConstIterator<TImage> inIt (inPtr, inPatchRegion);
+      typename itk::ImageRegionConstIterator<TImage> inIt(inPtr, inPatchRegion);
       for (inIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt)
       {
         dist.Update(inIt.Get());
@@ -47,7 +49,6 @@ bool UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
     }
   }
   return false;
-
 }
 
 
diff --git a/include/otbTensorflowSamplingUtils.h b/include/otbTensorflowSamplingUtils.h
index 585f90132ea71509fe2a08ff8f56aa1eac2abb3f..846b71318e57e2f64b7b5d582c4a85223bd809b3 100644
--- a/include/otbTensorflowSamplingUtils.h
+++ b/include/otbTensorflowSamplingUtils.h
@@ -20,77 +20,89 @@ namespace otb
 namespace tf
 {
 
-template<class TImage>
+template <class TImage>
 class Distribution
 {
 public:
   typedef typename TImage::PixelType ValueType;
-  typedef vnl_vector<float> CountsType;
-
-  explicit Distribution(unsigned int nClasses): m_NbOfClasses(nClasses), m_Dist(CountsType(nClasses, 0))
-  {
-  }
-  Distribution(unsigned int nClasses, float fillValue): m_NbOfClasses(nClasses), m_Dist(CountsType(nClasses, fillValue))
-  {
-  }
-  Distribution(): m_NbOfClasses(2), m_Dist(CountsType(m_NbOfClasses, 0))
-  {
-  }
-  Distribution(const Distribution & other): m_Dist(other.Get()), m_NbOfClasses(m_Dist.size())
-  {
-  }
-  ~Distribution(){}
-
-  void Update(const typename TImage::PixelType & pixel)
+  typedef vnl_vector<float>          CountsType;
+
+  explicit Distribution(unsigned int nClasses)
+    : m_NbOfClasses(nClasses)
+    , m_Dist(CountsType(nClasses, 0))
+  {}
+  Distribution(unsigned int nClasses, float fillValue)
+    : m_NbOfClasses(nClasses)
+    , m_Dist(CountsType(nClasses, fillValue))
+  {}
+  Distribution()
+    : m_NbOfClasses(2)
+    , m_Dist(CountsType(m_NbOfClasses, 0))
+  {}
+  Distribution(const Distribution & other)
+    : m_Dist(other.Get())
+    , m_NbOfClasses(m_Dist.size())
+  {}
+  ~Distribution() {}
+
+  void
+  Update(const typename TImage::PixelType & pixel)
   {
     m_Dist[pixel]++;
   }
 
-  void Update(const Distribution & other)
+  void
+  Update(const Distribution & other)
   {
     const CountsType otherDist = other.Get();
-    for (unsigned int c = 0 ; c < m_NbOfClasses ; c++)
+    for (unsigned int c = 0; c < m_NbOfClasses; c++)
       m_Dist[c] += otherDist[c];
   }
 
-  CountsType Get() const
+  CountsType
+  Get() const
   {
     return m_Dist;
   }
 
-  CountsType GetNormalized() const
+  CountsType
+  GetNormalized() const
   {
-    const float invNorm = 1.0 / std::sqrt(dot_product(m_Dist, m_Dist));
+    const float      invNorm = 1.0 / std::sqrt(dot_product(m_Dist, m_Dist));
     const CountsType normalizedDist = invNorm * m_Dist;
     return normalizedDist;
   }
 
-  float Cosinus(const Distribution & other) const
+  float
+  Cosinus(const Distribution & other) const
   {
     return dot_product(other.GetNormalized(), GetNormalized());
   }
 
-  std::string ToString()
+  std::string
+  ToString()
   {
     std::stringstream ss;
     ss << "\n";
-    for (unsigned int c = 0 ; c < m_NbOfClasses ; c++)
+    for (unsigned int c = 0; c < m_NbOfClasses; c++)
       ss << "\tClass #" << c << " : " << m_Dist[c] << "\n";
     return ss.str();
   }
 
 private:
   unsigned int m_NbOfClasses;
-  CountsType m_Dist;
+  CountsType   m_Dist;
 };
 
 // Update the distribution of the patch located at the specified location
-template<class TImage, class TDistribution>
-bool UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
-    typename TImage::PointType point, typename TImage::SizeType patchSize,
-    TDistribution & dist);
-
-} // namesapce tf
+template <class TImage, class TDistribution>
+bool
+UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
+                            typename TImage::PointType     point,
+                            typename TImage::SizeType      patchSize,
+                            TDistribution &                dist);
+
+} // namespace tf
 } // namespace otb
 
 #include "otbTensorflowSamplingUtils.cxx"
diff --git a/include/otbTensorflowSource.h b/include/otbTensorflowSource.h
index 1556997f9a20c02c1f5f9fd80f92c0fc38270657..9bbeed12fbe07820c1a59d125d0af5343dfd3492 100644
--- a/include/otbTensorflowSource.h
+++ b/include/otbTensorflowSource.h
@@ -29,45 +29,43 @@ namespace otb
  * Images must have the same size.
  * This is the common input type used in every OTB-TF applications.
  */
-template<class TImage>
+template <class TImage>
 class TensorflowSource
 {
 public:
   /** Typedefs for images */
-  typedef TImage                                            FloatVectorImageType;
-  typedef typename FloatVectorImageType::Pointer            FloatVectorImagePointerType;
-  typedef typename FloatVectorImageType::InternalPixelType  InternalPixelType;
-  typedef otb::Image<InternalPixelType>                     FloatImageType;
-  typedef typename FloatImageType::SizeType                 SizeType;
+  typedef TImage                                           FloatVectorImageType;
+  typedef typename FloatVectorImageType::Pointer           FloatVectorImagePointerType;
+  typedef typename FloatVectorImageType::InternalPixelType InternalPixelType;
+  typedef otb::Image<InternalPixelType>                    FloatImageType;
+  typedef typename FloatImageType::SizeType                SizeType;
 
   /** Typedefs for image concatenation */
-  typedef otb::ImageList<FloatImageType>                    ImageListType;
-  typedef typename ImageListType::Pointer                   ImageListPointer;
-  typedef ImageListToVectorImageFilter<ImageListType,
-      FloatVectorImageType>                                 ListConcatenerFilterType;
-  typedef typename ListConcatenerFilterType::Pointer        ListConcatenerFilterPointer;
-  typedef MultiToMonoChannelExtractROI<InternalPixelType,
-      InternalPixelType>                                    MultiToMonoChannelFilterType;
-  typedef ObjectList<MultiToMonoChannelFilterType>          ExtractROIFilterListType;
-  typedef typename ExtractROIFilterListType::Pointer        ExtractROIFilterListPointer;
-  typedef otb::MultiChannelExtractROI<InternalPixelType,
-      InternalPixelType>                                    ExtractFilterType;
-  typedef otb::ObjectList<FloatVectorImageType>             FloatVectorImageListType;
+  typedef otb::ImageList<FloatImageType>                                     ImageListType;
+  typedef typename ImageListType::Pointer                                    ImageListPointer;
+  typedef ImageListToVectorImageFilter<ImageListType, FloatVectorImageType>  ListConcatenerFilterType;
+  typedef typename ListConcatenerFilterType::Pointer                         ListConcatenerFilterPointer;
+  typedef MultiToMonoChannelExtractROI<InternalPixelType, InternalPixelType> MultiToMonoChannelFilterType;
+  typedef ObjectList<MultiToMonoChannelFilterType>                           ExtractROIFilterListType;
+  typedef typename ExtractROIFilterListType::Pointer                         ExtractROIFilterListPointer;
+  typedef otb::MultiChannelExtractROI<InternalPixelType, InternalPixelType>  ExtractFilterType;
+  typedef otb::ObjectList<FloatVectorImageType>                              FloatVectorImageListType;
 
   // Initialize the source
-  void Set(FloatVectorImageListType * inputList);
+  void
+  Set(FloatVectorImageListType * inputList);
 
   // Get the source output
-  FloatVectorImagePointerType Get();
+  FloatVectorImagePointerType
+  Get();
 
   TensorflowSource();
-  virtual ~TensorflowSource (){};
+  virtual ~TensorflowSource(){};
 
 private:
   ListConcatenerFilterPointer m_Concatener;    // Mono-images stacker
   ImageListPointer            m_List;          // List of mono-images
   ExtractROIFilterListPointer m_ExtractorList; // Mono-images extractors
-
 };
 
 } // end namespace otb
diff --git a/include/otbTensorflowSource.hxx b/include/otbTensorflowSource.hxx
index 2ad575866fda4ba7baa9dc494c44bcc38b79f0cd..2e41253c69c70e328e9d6827ee1a5f5971c716cb 100644
--- a/include/otbTensorflowSource.hxx
+++ b/include/otbTensorflowSource.hxx
@@ -21,8 +21,7 @@ namespace otb
 // Constructor
 //
 template <class TImage>
-TensorflowSource<TImage>
-::TensorflowSource()
+TensorflowSource<TImage>::TensorflowSource()
 {}
 
 //
@@ -30,40 +29,38 @@ TensorflowSource<TImage>
 //
 template <class TImage>
 void
-TensorflowSource<TImage>
-::Set(FloatVectorImageListType * inputList)
+TensorflowSource<TImage>::Set(FloatVectorImageListType * inputList)
 {
   // Create one stack for input images list
-  m_Concatener    = ListConcatenerFilterType::New();
-  m_List          = ImageListType::New();
+  m_Concatener = ListConcatenerFilterType::New();
+  m_List = ImageListType::New();
   m_ExtractorList = ExtractROIFilterListType::New();
 
   // Split each input vector image into image
   // and generate an mono channel image list
   inputList->GetNthElement(0)->UpdateOutputInformation();
   SizeType size = inputList->GetNthElement(0)->GetLargestPossibleRegion().GetSize();
-  for( unsigned int i = 0; i < inputList->Size(); i++ )
+  for (unsigned int i = 0; i < inputList->Size(); i++)
   {
     FloatVectorImagePointerType vectIm = inputList->GetNthElement(i);
     vectIm->UpdateOutputInformation();
-    if( size != vectIm->GetLargestPossibleRegion().GetSize() )
+    if (size != vectIm->GetLargestPossibleRegion().GetSize())
     {
       itkGenericExceptionMacro("Input image size number " << i << " mismatch");
     }
 
-    for( unsigned int j = 0; j < vectIm->GetNumberOfComponentsPerPixel(); j++)
+    for (unsigned int j = 0; j < vectIm->GetNumberOfComponentsPerPixel(); j++)
     {
       typename MultiToMonoChannelFilterType::Pointer extractor = MultiToMonoChannelFilterType::New();
-      extractor->SetInput( vectIm );
-      extractor->SetChannel( j+1 );
+      extractor->SetInput(vectIm);
+      extractor->SetChannel(j + 1);
       extractor->UpdateOutputInformation();
-      m_ExtractorList->PushBack( extractor );
-      m_List->PushBack( extractor->GetOutput() );
+      m_ExtractorList->PushBack(extractor);
+      m_List->PushBack(extractor->GetOutput());
     }
   }
-  m_Concatener->SetInput( m_List );
+  m_Concatener->SetInput(m_List);
   m_Concatener->UpdateOutputInformation();
-
 }
 
 //
diff --git a/include/otbTensorflowStreamerFilter.h b/include/otbTensorflowStreamerFilter.h
index 4730d3691cd3bd64954091c3bfdbb5bb7422d870..fa985d007a1040bc5325d3831724c4108316da5e 100644
--- a/include/otbTensorflowStreamerFilter.h
+++ b/include/otbTensorflowStreamerFilter.h
@@ -26,12 +26,10 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage, class TOutputImage>
-class ITK_EXPORT TensorflowStreamerFilter :
-public itk::ImageToImageFilter<TInputImage, TOutputImage>
+class ITK_EXPORT TensorflowStreamerFilter : public itk::ImageToImageFilter<TInputImage, TOutputImage>
 {
 
 public:
-
   /** Standard class typedefs. */
   typedef TensorflowStreamerFilter                           Self;
   typedef itk::ImageToImageFilter<TInputImage, TOutputImage> Superclass;
@@ -51,24 +49,31 @@ public:
   typedef typename ImageType::SizeType              SizeType;
   typedef typename Superclass::InputImageRegionType RegionType;
 
-  typedef TOutputImage                             OutputImageType;
+  typedef TOutputImage OutputImageType;
 
   itkSetMacro(OutputGridSize, SizeType);
   itkGetMacro(OutputGridSize, SizeType);
 
 protected:
   TensorflowStreamerFilter();
-  virtual ~TensorflowStreamerFilter() {};
+  virtual ~TensorflowStreamerFilter(){};
 
-  virtual void UpdateOutputData(itk::DataObject *output){(void) output; this->GenerateData();}
+  virtual void
+  UpdateOutputData(itk::DataObject * output)
+  {
+    (void)output;
+    this->GenerateData();
+  }
 
-  virtual void GenerateData();
+  virtual void
+  GenerateData();
 
 private:
-  TensorflowStreamerFilter(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowStreamerFilter(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  SizeType                   m_OutputGridSize;       // Output grid size
+  SizeType m_OutputGridSize; // Output grid size
 
 }; // end class
 
diff --git a/include/otbTensorflowStreamerFilter.hxx b/include/otbTensorflowStreamerFilter.hxx
index 59904a54f3df99048dfa383e22dad0ee7bef9784..3aa1afca538841126700484f34f9261731b75d1e 100644
--- a/include/otbTensorflowStreamerFilter.hxx
+++ b/include/otbTensorflowStreamerFilter.hxx
@@ -19,30 +19,28 @@ namespace otb
 {
 
 template <class TInputImage, class TOutputImage>
-TensorflowStreamerFilter<TInputImage, TOutputImage>
-::TensorflowStreamerFilter()
- {
+TensorflowStreamerFilter<TInputImage, TOutputImage>::TensorflowStreamerFilter()
+{
   m_OutputGridSize.Fill(1);
- }
+}
 
 /**
  * Compute the output image
  */
 template <class TInputImage, class TOutputImage>
 void
-TensorflowStreamerFilter<TInputImage, TOutputImage>
-::GenerateData()
- {
+TensorflowStreamerFilter<TInputImage, TOutputImage>::GenerateData()
+{
   // Output pointer and requested region
   OutputImageType * outputPtr = this->GetOutput();
-  const RegionType outputReqRegion = outputPtr->GetRequestedRegion();
+  const RegionType  outputReqRegion = outputPtr->GetRequestedRegion();
   outputPtr->SetBufferedRegion(outputReqRegion);
   outputPtr->Allocate();
 
   // Compute the aligned region
   RegionType region;
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     // Get corners
     IndexValueType lower = outputReqRegion.GetIndex(dim);
     IndexValueType upper = lower + outputReqRegion.GetSize(dim);
@@ -54,35 +52,34 @@ TensorflowStreamerFilter<TInputImage, TOutputImage>
     // Move corners to aligned positions
     lower -= deltaLo;
     if (deltaUp > 0)
-      {
+    {
       upper += m_OutputGridSize[dim] - deltaUp;
-      }
+    }
 
     // Update region
     region.SetIndex(dim, lower);
     region.SetSize(dim, upper - lower);
-
-    }
+  }
 
   // Compute the number of subregions to process
   const unsigned int nbTilesX = region.GetSize(0) / m_OutputGridSize[0];
   const unsigned int nbTilesY = region.GetSize(1) / m_OutputGridSize[1];
 
   // Progress
-  itk::ProgressReporter progress(this, 0, nbTilesX*nbTilesY);
+  itk::ProgressReporter progress(this, 0, nbTilesX * nbTilesY);
 
   // For each tile, propagate the input region and recopy the output
-  ImageType * inputImage = static_cast<ImageType * >(  Superclass::ProcessObject::GetInput(0) );
+  ImageType *  inputImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(0));
   unsigned int tx, ty;
-  RegionType subRegion;
+  RegionType   subRegion;
   subRegion.SetSize(m_OutputGridSize);
   for (ty = 0; ty < nbTilesY; ty++)
   {
-    subRegion.SetIndex(1, ty*m_OutputGridSize[1] + region.GetIndex(1));
+    subRegion.SetIndex(1, ty * m_OutputGridSize[1] + region.GetIndex(1));
     for (tx = 0; tx < nbTilesX; tx++)
     {
       // Update the input subregion
-      subRegion.SetIndex(0, tx*m_OutputGridSize[0] + region.GetIndex(0));
+      subRegion.SetIndex(0, tx * m_OutputGridSize[0] + region.GetIndex(0));
 
       // The actual region to copy
       RegionType cpyRegion(subRegion);
@@ -94,12 +91,12 @@ TensorflowStreamerFilter<TInputImage, TOutputImage>
       inputImage->UpdateOutputData();
 
       // Copy the subregion to output
-      itk::ImageAlgorithm::Copy( inputImage, outputPtr, cpyRegion, cpyRegion );
+      itk::ImageAlgorithm::Copy(inputImage, outputPtr, cpyRegion, cpyRegion);
 
       progress.CompletedPixel();
     }
   }
- }
+}
 
 
 } // end namespace otb
diff --git a/test/data/RF_model_from_deep_features_map.tif b/test/data/RF_model_from_deep_features_map.tif
new file mode 100644
index 0000000000000000000000000000000000000000..8d01ec195605327cb613eed99b3bebc5bb2c5583
--- /dev/null
+++ b/test/data/RF_model_from_deep_features_map.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7c603c85a80e6d59d18fe686a827e77066dbedb76c53fb6c35c7ed291d20cf34
+size 1316
diff --git a/test/data/Sentinel-2_B4328_10m_labels_A.tif b/test/data/Sentinel-2_B4328_10m_labels_A.tif
index fe3f31127413c33b44dc6522d628111ebc7ddc24..63fa4e2e8209aabd881087754c97fc615553e283 100644
Binary files a/test/data/Sentinel-2_B4328_10m_labels_A.tif and b/test/data/Sentinel-2_B4328_10m_labels_A.tif differ
diff --git a/test/data/Sentinel-2_B4328_10m_labels_B.tif b/test/data/Sentinel-2_B4328_10m_labels_B.tif
index 501f44affb72fa1d4eed1c9f8607ac5284aab87a..0b0edfa08641a1ecb142105db6b80411dbfd838b 100644
Binary files a/test/data/Sentinel-2_B4328_10m_labels_B.tif and b/test/data/Sentinel-2_B4328_10m_labels_B.tif differ
diff --git a/test/data/Sentinel-2_B4328_10m_patches_A.jp2 b/test/data/Sentinel-2_B4328_10m_patches_A.jp2
index b3e3d058a2d0c948c1d0e88732e6378945a13a79..fb686f59c9f168e902a7201d03b3859e155f49ec 100644
Binary files a/test/data/Sentinel-2_B4328_10m_patches_A.jp2 and b/test/data/Sentinel-2_B4328_10m_patches_A.jp2 differ
diff --git a/test/data/Sentinel-2_B4328_10m_patches_B.jp2 b/test/data/Sentinel-2_B4328_10m_patches_B.jp2
index 795af2ecd5908737b62574810b1701ca78eb3bd6..e34e865f8c0321d77ab95dc3831e6b66da56f65b 100644
Binary files a/test/data/Sentinel-2_B4328_10m_patches_B.jp2 and b/test/data/Sentinel-2_B4328_10m_patches_B.jp2 differ
diff --git a/test/data/amsterdam_labels_A.tif b/test/data/amsterdam_labels_A.tif
new file mode 100644
index 0000000000000000000000000000000000000000..a7b0dbcf01de410b0e5f8303ede2e5cdefda9db2
--- /dev/null
+++ b/test/data/amsterdam_labels_A.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bdd9ecd2e54992d712ee44fc05a91bc38f25bfa6c6cbcdc11874fe9070a4fc0f
+size 11411
diff --git a/test/data/amsterdam_labels_B.tif b/test/data/amsterdam_labels_B.tif
new file mode 100644
index 0000000000000000000000000000000000000000..fff31bcfd6b0c4f0d498cf79ce626de9937a6100
--- /dev/null
+++ b/test/data/amsterdam_labels_B.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5ce48d4d57f46ca39d940ead160d7968f9b6c2a62b4f6b49132372d1a8916622
+size 11362
diff --git a/test/data/amsterdam_patches_A.tif b/test/data/amsterdam_patches_A.tif
new file mode 100644
index 0000000000000000000000000000000000000000..41323cf2dd8645e28952fdcd9db7bb8cd2542d8d
--- /dev/null
+++ b/test/data/amsterdam_patches_A.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8d606712ba9b1493ffd02c08135c5d1072d2bc1e64228d00425f17cc62280c9
+size 3840365
diff --git a/test/data/amsterdam_patches_B.tif b/test/data/amsterdam_patches_B.tif
new file mode 100644
index 0000000000000000000000000000000000000000..ace9beebf09fd5066ded88bdbf3fec05f752456e
--- /dev/null
+++ b/test/data/amsterdam_patches_B.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df49a0a8135854191006b0828d99fa494b059cfa15ef801b263d8520bbf8566e
+size 3817177
diff --git a/test/data/apTvClTensorflowModelServeCNN16x16PB.tif b/test/data/apTvClTensorflowModelServeCNN16x16PB.tif
index 25b1e9a73435a0d9d29683af5a2e6e8bec29eb11..586d3609e4bc51d21c6883671c40522c08723972 100644
Binary files a/test/data/apTvClTensorflowModelServeCNN16x16PB.tif and b/test/data/apTvClTensorflowModelServeCNN16x16PB.tif differ
diff --git a/test/data/apTvClTensorflowModelServeCNN8x8_32x32FC.tif b/test/data/apTvClTensorflowModelServeCNN8x8_32x32FC.tif
index f6155aa6a9e1e878d754e4db67a56f8e211809b3..4cad559ff4683b2aa8b0433cefe941fd64ab0394 100644
Binary files a/test/data/apTvClTensorflowModelServeCNN8x8_32x32FC.tif and b/test/data/apTvClTensorflowModelServeCNN8x8_32x32FC.tif differ
diff --git a/test/data/apTvClTensorflowModelServeCNN8x8_32x32PB.tif b/test/data/apTvClTensorflowModelServeCNN8x8_32x32PB.tif
index ec9c9d9f984ff728a2116637ba56ecc398aadaeb..459f5ad94bee8cb485ca585050987e73bfa1e3c1 100644
Binary files a/test/data/apTvClTensorflowModelServeCNN8x8_32x32PB.tif and b/test/data/apTvClTensorflowModelServeCNN8x8_32x32PB.tif differ
diff --git a/test/data/apTvClTensorflowModelServeFCNN16x16FC.tif b/test/data/apTvClTensorflowModelServeFCNN16x16FC.tif
index 14a5d11aa17a21ecf650b04e748e4b1b1bc74cd4..5da8fc25fc48e2e15ac19c81a3eb423de1858871 100644
Binary files a/test/data/apTvClTensorflowModelServeFCNN16x16FC.tif and b/test/data/apTvClTensorflowModelServeFCNN16x16FC.tif differ
diff --git a/test/data/apTvClTensorflowModelServeFCNN16x16PB.tif b/test/data/apTvClTensorflowModelServeFCNN16x16PB.tif
index 14a5d11aa17a21ecf650b04e748e4b1b1bc74cd4..5da8fc25fc48e2e15ac19c81a3eb423de1858871 100644
Binary files a/test/data/apTvClTensorflowModelServeFCNN16x16PB.tif and b/test/data/apTvClTensorflowModelServeFCNN16x16PB.tif differ
diff --git a/test/data/apTvClTensorflowModelServeFCNN64x64to32x32.tif b/test/data/apTvClTensorflowModelServeFCNN64x64to32x32.tif
index 1d22a3b97f6f2ded86651054be8259751ee0df11..08b8b6d72e514d1a85639c84414501cb1950e73a 100644
Binary files a/test/data/apTvClTensorflowModelServeFCNN64x64to32x32.tif and b/test/data/apTvClTensorflowModelServeFCNN64x64to32x32.tif differ
diff --git a/test/data/classif_model1.tif b/test/data/classif_model1.tif
new file mode 100644
index 0000000000000000000000000000000000000000..467f0133d286c40fbd44a8513b0353320bf0a6b4
--- /dev/null
+++ b/test/data/classif_model1.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac1e1b76123ee614062e445b173fd27f2e590782f7f653a32440d2e626ae09d9
+size 1082
diff --git a/test/data/classif_model2.tif b/test/data/classif_model2.tif
new file mode 100644
index 0000000000000000000000000000000000000000..d78613fd6b2c05437c73dc14bb32b256ec801808
--- /dev/null
+++ b/test/data/classif_model2.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f498681e24a61d796a307f5f2696468888995249ced64a875e06a406c3540128
+size 1510
diff --git a/test/data/classif_model3_fcn.tif b/test/data/classif_model3_fcn.tif
new file mode 100644
index 0000000000000000000000000000000000000000..9b6f406c0e575e67c2f60839fabb06c30981194d
--- /dev/null
+++ b/test/data/classif_model3_fcn.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:97b84882b61de5d8a965503a30872a171f6548dabb302701d76a12459cdd2148
+size 852
diff --git a/test/data/classif_model3_pb.tif b/test/data/classif_model3_pb.tif
new file mode 100644
index 0000000000000000000000000000000000000000..8abc3c9d67b451cb4f9bd33bf50ae982d08cc5ed
--- /dev/null
+++ b/test/data/classif_model3_pb.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c067f03b3b0b7edfde73ccf53fd07f7e104082dea6754733917051c3a54937c6
+size 857
diff --git a/test/data/classif_model4.tif b/test/data/classif_model4.tif
new file mode 100644
index 0000000000000000000000000000000000000000..76269c5b4f9d640584082dfa45a0cb9ff675ff7f
--- /dev/null
+++ b/test/data/classif_model4.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2496ffad2c40ac61d8ca5903aa8fe15af65994ae1479513a199ddbb6104978af
+size 7951
diff --git a/test/data/fake_spot6.jp2 b/test/data/fake_spot6.jp2
new file mode 100644
index 0000000000000000000000000000000000000000..0fc2b9d461c1cb97d3857bed81602958777d0c6d
--- /dev/null
+++ b/test/data/fake_spot6.jp2
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:aece496d23eada3e3dac20c7285230c7ab9e49d96bb58b23b1f2dad5054d5002
+size 2099513
diff --git a/test/data/outvec_A.gpkg b/test/data/outvec_A.gpkg
new file mode 100644
index 0000000000000000000000000000000000000000..9f042b76f38e4c9357c3e5460903bb73534cd556
--- /dev/null
+++ b/test/data/outvec_A.gpkg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c6c4ef5c6cabfffc74fbdd716fbba17627c0916ae1dd9dab556e9d7589ef66ee
+size 663552
diff --git a/test/data/outvec_B.gpkg b/test/data/outvec_B.gpkg
new file mode 100644
index 0000000000000000000000000000000000000000..10e824d6a998bfd215c1653e4629907c60917647
--- /dev/null
+++ b/test/data/outvec_B.gpkg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b8b9f30ed4acdf05e90c35f1dc12d44815dfa871527d4ebb9b5cb6614e3cbdd
+size 663552
diff --git a/test/data/pan_subset.tif b/test/data/pan_subset.tif
index ba380b4d410adc4397a2b58a43839b7369c600f9..2e4a2f1745472e8f4aa27901d6c786ef10f2dfd8 100644
Binary files a/test/data/pan_subset.tif and b/test/data/pan_subset.tif differ
diff --git a/test/data/patchimg_01.tif b/test/data/patchimg_01.tif
index f4e5262ee17b80ef1bb72cfbcf8d8d899f73a2c2..209aa93a5eab27ccc495064fdcf7a8ea5c9aa9dc 100644
Binary files a/test/data/patchimg_01.tif and b/test/data/patchimg_01.tif differ
diff --git a/test/data/patchimg_11.tif b/test/data/patchimg_11.tif
index 65b55565f55641cdb6cf496a73e18c919d4191fc..95413dc6ca1b6af65fd8768a55144efef2079b2b 100644
Binary files a/test/data/patchimg_11.tif and b/test/data/patchimg_11.tif differ
diff --git a/test/data/pxs_subset.tif b/test/data/pxs_subset.tif
index 89ee7990d13a3b6dc7a0180b907055b2cb268ee6..8909cb410848441e6ebdf80dc9be9ae459161596 100644
Binary files a/test/data/pxs_subset.tif and b/test/data/pxs_subset.tif differ
diff --git a/test/data/pxs_subset2.tif b/test/data/pxs_subset2.tif
index 64991c0567ca34217558a502a7071eb34e241c21..c74443e6a82fba91aadc91e1a2998897108cdd68 100644
Binary files a/test/data/pxs_subset2.tif and b/test/data/pxs_subset2.tif differ
diff --git a/test/data/s2_10m_labels_A.tif b/test/data/s2_10m_labels_A.tif
new file mode 100644
index 0000000000000000000000000000000000000000..4ed438fa7b6899a7f9b60342ac23fe4fb7f84b74
--- /dev/null
+++ b/test/data/s2_10m_labels_A.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ef0d65837c47f9d7904299c0e2e24077a3325483665e20bfeab0d90fc2cd6fec
+size 2574
diff --git a/test/data/s2_10m_labels_B.tif b/test/data/s2_10m_labels_B.tif
new file mode 100644
index 0000000000000000000000000000000000000000..1b1e8a03152120a4ea9118ffaf593bd6d41b0b0e
--- /dev/null
+++ b/test/data/s2_10m_labels_B.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e2e2785ae30e7a7788ad57007c48e791d54b1a7548bf08ad43d33acc25725c50
+size 2585
diff --git a/test/data/s2_10m_patches_A.tif b/test/data/s2_10m_patches_A.tif
new file mode 100644
index 0000000000000000000000000000000000000000..d8158f8af59ec0a39d830f969460e24a63b7464b
--- /dev/null
+++ b/test/data/s2_10m_patches_A.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ab610d8a0e08216e16f7a30e5cb55c3d5ddc60415f2a2475adcf33336c22aa9
+size 9679811
diff --git a/test/data/s2_10m_patches_B.tif b/test/data/s2_10m_patches_B.tif
new file mode 100644
index 0000000000000000000000000000000000000000..3e913acca0a3075683a9e25ce6eb0f86dc5e76c1
--- /dev/null
+++ b/test/data/s2_10m_patches_B.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6a3ba3be2e55f9b21a6d98f545e6f9b3d7bcc29bb1b93d5a421baf67adf99638
+size 9729377
diff --git a/test/data/s2_20m_patches_A.tif b/test/data/s2_20m_patches_A.tif
new file mode 100644
index 0000000000000000000000000000000000000000..a1fbc0f85ca06865b1874b632131eae1e7fc5e25
--- /dev/null
+++ b/test/data/s2_20m_patches_A.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c12c791b081c86c14656d939be06e21a4b5498b7a15032e08bcfb9d579c3f877
+size 3505547
diff --git a/test/data/s2_20m_patches_B.tif b/test/data/s2_20m_patches_B.tif
new file mode 100644
index 0000000000000000000000000000000000000000..b2cf78abdb1c27b8f1724aa1ebe6dc7753587714
--- /dev/null
+++ b/test/data/s2_20m_patches_B.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4923867f94fbb3d3cd2708161bb1f0c19348b75b4a632d2d67a3f152238a3fca
+size 3522046
diff --git a/test/data/s2_20m_stack.jp2 b/test/data/s2_20m_stack.jp2
new file mode 100644
index 0000000000000000000000000000000000000000..15c2267b9f985b10ac7c6bbe77736328e90e2c46
--- /dev/null
+++ b/test/data/s2_20m_stack.jp2
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2c62408e325e5208be7c3ff5c5ff541f349e599121aae1f93c46b46a041111fe
+size 12584832
diff --git a/test/data/s2_labels_A.tif b/test/data/s2_labels_A.tif
new file mode 100644
index 0000000000000000000000000000000000000000..ebe395d82e7fe4272574b62107e1c26337c19de7
--- /dev/null
+++ b/test/data/s2_labels_A.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b01c44ca2bfe1f2710cc70bbaaca67321aa935a8cc112f28b501dcd4261a0fe7
+size 2474
diff --git a/test/data/s2_labels_B.tif b/test/data/s2_labels_B.tif
new file mode 100644
index 0000000000000000000000000000000000000000..aa2a2872058e2bd25fd6fe70e3b67070db84f9b5
--- /dev/null
+++ b/test/data/s2_labels_B.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3e8bc2b9acc30f2c44e4fbd3702cfb8dccef1845d3d6c5d84b39328f6170d0e3
+size 2485
diff --git a/test/data/s2_patches_A.tif b/test/data/s2_patches_A.tif
new file mode 100644
index 0000000000000000000000000000000000000000..6ae3bb063854dda12dd8d2023a6e807aff1e7982
--- /dev/null
+++ b/test/data/s2_patches_A.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:473e6a13ff57123af49bc4b8437db96907dc33c7cc2a76d99bed0d2392c8098d
+size 9679709
diff --git a/test/data/s2_patches_B.tif b/test/data/s2_patches_B.tif
new file mode 100644
index 0000000000000000000000000000000000000000..05820924931db42dfdaf58e0d4807b3de18a62c3
--- /dev/null
+++ b/test/data/s2_patches_B.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d240e18c87a9f015dfda0600c8359dd20fc20eae66336905230c6848c482400e
+size 9729275
diff --git a/test/data/s2_stack.jp2 b/test/data/s2_stack.jp2
new file mode 100644
index 0000000000000000000000000000000000000000..630659fd44994392bc8606aadd3081f2d7f02837
--- /dev/null
+++ b/test/data/s2_stack.jp2
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:091a491118dbee603f8d9cac66c130cc31758a4d5745e7877ad1ee760ad5224e
+size 33554488
diff --git a/test/data/terrain_truth_epsg32654_A.tif b/test/data/terrain_truth_epsg32654_A.tif
new file mode 100644
index 0000000000000000000000000000000000000000..e738b06e551a33e735178905112d8aeb2ce33426
--- /dev/null
+++ b/test/data/terrain_truth_epsg32654_A.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c26a60d41c0d4fff3edc79e3c5fefef88354bd7b3e845073ab7d087da67d3ff
+size 517796
diff --git a/test/data/terrain_truth_epsg32654_B.tif b/test/data/terrain_truth_epsg32654_B.tif
new file mode 100644
index 0000000000000000000000000000000000000000..31e013f1f0829126a9cb3a1d72a5067a83b3895d
--- /dev/null
+++ b/test/data/terrain_truth_epsg32654_B.tif
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:60f1a69609fbc09c8b1007c40adebd9e01aee96a9d3bf4c32aade7e16c29e784
+size 507062
diff --git a/test/data/xs_subset.tif b/test/data/xs_subset.tif
index 36305231f01bd613e8097a5f85a7a4135672d45a..153a2bbdce7883e799767e709b5b71d1ef06f452 100644
Binary files a/test/data/xs_subset.tif and b/test/data/xs_subset.tif differ
diff --git a/test/models/model1/saved_model.pb b/test/models/model1/saved_model.pb
index b22330c86c0b108daf4ea562bb5fda88b97879e8..48fd1b8c0126ba2a0babdb974d10098e7500a192 100644
Binary files a/test/models/model1/saved_model.pb and b/test/models/model1/saved_model.pb differ
diff --git a/test/models/model1/variables/variables.data-00000-of-00001 b/test/models/model1/variables/variables.data-00000-of-00001
index 2aba6b57990bc65e95e0df46c9d80bb2f9b59d1c..16e7084333fe8905a0e827f9c93721258a7aa6a2 100644
Binary files a/test/models/model1/variables/variables.data-00000-of-00001 and b/test/models/model1/variables/variables.data-00000-of-00001 differ
diff --git a/test/models/model2/saved_model.pb b/test/models/model2/saved_model.pb
index c809fdcef72a36d7a2f2d19a70c7f39fbce0a609..345bf20759695c9ef9a3b7cdcea68de73007cdca 100644
Binary files a/test/models/model2/saved_model.pb and b/test/models/model2/saved_model.pb differ
diff --git a/test/models/model2/variables/variables.data-00000-of-00001 b/test/models/model2/variables/variables.data-00000-of-00001
index 60cb472a8c493715335c853eafbbe4467b53fe0c..18add85af3c57ee1f868720b21fcfec4a857c8bd 100644
Binary files a/test/models/model2/variables/variables.data-00000-of-00001 and b/test/models/model2/variables/variables.data-00000-of-00001 differ
diff --git a/test/models/model3/saved_model.pb b/test/models/model3/saved_model.pb
index 84e1bba53549a0c811fb9d7ccf46bb4e3e960b45..a48e85adbf1fd5ad2e1a7cd4c36a8721794bb922 100644
Binary files a/test/models/model3/saved_model.pb and b/test/models/model3/saved_model.pb differ
diff --git a/test/models/model3/variables/variables.data-00000-of-00001 b/test/models/model3/variables/variables.data-00000-of-00001
index 6d507b96f7b3affe8a7b4c3bd224d6a1f58df7c8..aa215b47f3f6326c99c2b5e4690e2ac4d23eba47 100644
Binary files a/test/models/model3/variables/variables.data-00000-of-00001 and b/test/models/model3/variables/variables.data-00000-of-00001 differ
diff --git a/test/models/model4/saved_model.pb b/test/models/model4/saved_model.pb
index d6920be621f6ab486ed49abce1b97332306f5ac9..b9e1fa1fbff64b0bb7e80c6442af724c9c3cbb6e 100644
Binary files a/test/models/model4/saved_model.pb and b/test/models/model4/saved_model.pb differ
diff --git a/test/models/model4/variables/variables.data-00000-of-00001 b/test/models/model4/variables/variables.data-00000-of-00001
index 2837c8c97d562b7d107dd81636816db722d7ae9c..bc1f29ba9cc4d498ac53d650db76aaefe822d459 100644
Binary files a/test/models/model4/variables/variables.data-00000-of-00001 and b/test/models/model4/variables/variables.data-00000-of-00001 differ
diff --git a/test/models/model5/saved_model.pb b/test/models/model5/saved_model.pb
index 5c4373bc5152c7d08d7afc1fb719916c365e9a81..b173da41a974dc343c1948c7eb7d081b7d709105 100644
Binary files a/test/models/model5/saved_model.pb and b/test/models/model5/saved_model.pb differ
diff --git a/test/models/model5/variables/variables.data-00000-of-00001 b/test/models/model5/variables/variables.data-00000-of-00001
index 8025a877597414e2ccfa5ad6df4d353692f8b3fc..a29d56e6dcf4e1a7d4cd76f92d63d3cb56eee8f5 100644
Binary files a/test/models/model5/variables/variables.data-00000-of-00001 and b/test/models/model5/variables/variables.data-00000-of-00001 differ
diff --git a/test/sr4rs_unittest.py b/test/sr4rs_unittest.py
index fbb921f8451cc83b3fd7b9e9e90bf61755511eca..89c3945f153304d04d35c23ef34513cf668120bc 100644
--- a/test/sr4rs_unittest.py
+++ b/test/sr4rs_unittest.py
@@ -4,8 +4,7 @@
 import unittest
 import os
 from pathlib import Path
-import gdal
-import otbApplication as otb
+import test_utils
 
 
 def command_train_succeed(extra_opts=""):
@@ -54,21 +53,7 @@ class SR4RSv1Test(unittest.TestCase):
         command += "--output '{}?&box=256:256:512:512'".format(out_img)
         os.system(command)
 
-        nbchannels_reconstruct = gdal.Open(out_img).RasterCount
-        nbchannels_baseline = gdal.Open(baseline).RasterCount
-
-        self.assertTrue(nbchannels_reconstruct == nbchannels_baseline)
-
-        for i in range(1, 1 + nbchannels_baseline):
-            comp = otb.Registry.CreateApplication('CompareImages')
-            comp.SetParameterString('ref.in', baseline)
-            comp.SetParameterInt('ref.channel', i)
-            comp.SetParameterString('meas.in', out_img)
-            comp.SetParameterInt('meas.channel', i)
-            comp.Execute()
-            mae = comp.GetParameterFloat('mae')
-
-            self.assertTrue(mae < 0.01)
+        self.assertTrue(test_utils.compare(baseline, out_img))
 
 
 if __name__ == '__main__':
diff --git a/test/test_utils.py b/test/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..c07301e91bb2eab4a808d53beaa46396c3ea82fc
--- /dev/null
+++ b/test/test_utils.py
@@ -0,0 +1,56 @@
+import otbApplication
+import os
+
+
+def get_nb_of_channels(raster):
+    """
+    Return the number of channels in the input raster
+    :param raster: raster filename (str)
+    :return the number of channels in the image (int)
+    """
+    info = otbApplication.Registry.CreateApplication("ReadImageInfo")
+    info.SetParameterString("in", raster)
+    info.ExecuteAndWriteOutput()
+    return info.GetParameterInt('numberbands')
+
+
+def compare(raster1, raster2, tol=0.01):
+    """
+    Return True if the two rasters have the same contents in each bands
+    :param raster1: raster 1 filename (str)
+    :param raster2: raster 2 filename (str)
+    :param tol: tolerance (float)
+    """
+    n_bands1 = get_nb_of_channels(raster1)
+    n_bands2 = get_nb_of_channels(raster2)
+    if n_bands1 != n_bands2:
+        print("The images have not the same number of channels")
+        return False
+
+    for i in range(1, 1 + n_bands1):
+        comp = otbApplication.Registry.CreateApplication('CompareImages')
+        comp.SetParameterString('ref.in', raster1)
+        comp.SetParameterInt('ref.channel', i)
+        comp.SetParameterString('meas.in', raster2)
+        comp.SetParameterInt('meas.channel', i)
+        comp.Execute()
+        mae = comp.GetParameterFloat('mae')
+        if mae > tol:
+            print("The images have not the same content in channel {} "
+                  "(Mean average error: {})".format(i, mae))
+            return False
+    return True
+
+
+def resolve_paths(filename, var_list):
+    """
+    Retrieve environment variables in paths
+    :param filename: file name
+    :params var_list: variable list
+    :return filename with retrieved environment variables
+    """
+    new_filename = filename
+    for var in var_list:
+        new_filename = new_filename.replace("${}".format(var), os.environ[var])
+    print("Resolve filename...\n\tfilename: {}, \n\tnew filename: {}".format(filename, new_filename))
+    return new_filename
diff --git a/test/tutorial_unittest.py b/test/tutorial_unittest.py
new file mode 100644
index 0000000000000000000000000000000000000000..7934862f348326de13a11eef4c81a4fe6512cec6
--- /dev/null
+++ b/test/tutorial_unittest.py
@@ -0,0 +1,513 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+import pytest
+import unittest
+import os
+from pathlib import Path
+import test_utils
+
+INFERENCE_MAE_TOL = 10.0  # Dummy value: we don't really care of the mae value but rather the image size etc
+
+
+def resolve_paths(path):
+    """
+    Resolve a path with the environment variables
+    """
+    return test_utils.resolve_paths(path, var_list=["TMPDIR", "DATADIR"])
+
+
+def run_command(command):
+    """
+    Run a command
+    :param command: the command to run
+    """
+    full_command = resolve_paths(command)
+    print("Running command: \n\t {}".format(full_command))
+    os.system(full_command)
+
+
+def run_command_and_test_exist(command, file_list):
+    """
+    :param command: the command to run (str)
+    :param file_list: list of files to check
+    :return True or False
+    """
+    run_command(command)
+    print("Checking if files exist...")
+    for file in file_list:
+        print("\t{}".format(file))
+        path = Path(resolve_paths(file))
+        if not path.is_file():
+            print("File {} does not exist!".format(file))
+            return False
+        print("\tOk")
+    return True
+
+
+def run_command_and_compare(command, to_compare_dict, tol=0.01):
+    """
+    :param command: the command to run (str)
+    :param to_compare_dict: a dict of {baseline1: output1, ..., baselineN: outputN}
+    :param tol: tolerance (float)
+    :return True or False
+    """
+
+    run_command(command)
+    for baseline, output in to_compare_dict.items():
+        if not test_utils.compare(resolve_paths(baseline), resolve_paths(output), tol):
+            print("Baseline {} and output {} differ.".format(baseline, output))
+            return False
+    return True
+
+
+class TutorialTest(unittest.TestCase):
+
+    @pytest.mark.order(1)
+    def test_sample_selection(self):
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="otbcli_LabelImageSampleSelection "
+                        "-inref $DATADIR/terrain_truth_epsg32654_A.tif "
+                        "-nodata 255 "
+                        "-outvec $TMPDIR/outvec_A.gpkg",
+                file_list=["$TMPDIR/outvec_A.gpkg"]))
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="otbcli_LabelImageSampleSelection "
+                        "-inref $DATADIR/terrain_truth_epsg32654_B.tif "
+                        "-nodata 255 "
+                        "-outvec $TMPDIR/outvec_B.gpkg",
+                file_list=["$TMPDIR/outvec_B.gpkg"]))
+
+    @pytest.mark.order(2)
+    def test_patches_extraction(self):
+        self.assertTrue(
+            run_command_and_compare(
+                command="otbcli_PatchesExtraction "
+                        "-source1.il $DATADIR/s2_stack.jp2 "
+                        "-source1.out $TMPDIR/s2_patches_A.tif "
+                        "-source1.patchsizex 16 "
+                        "-source1.patchsizey 16 "
+                        "-vec $TMPDIR/outvec_A.gpkg "
+                        "-field class "
+                        "-outlabels $TMPDIR/s2_labels_A.tif",
+                to_compare_dict={"$DATADIR/s2_patches_A.tif": "$TMPDIR/s2_patches_A.tif",
+                                 "$DATADIR/s2_labels_A.tif": "$TMPDIR/s2_labels_A.tif"}))
+        self.assertTrue(
+            run_command_and_compare(
+                command="otbcli_PatchesExtraction "
+                        "-source1.il $DATADIR/s2_stack.jp2 "
+                        "-source1.out $TMPDIR/s2_patches_B.tif "
+                        "-source1.patchsizex 16 "
+                        "-source1.patchsizey 16 "
+                        "-vec $TMPDIR/outvec_B.gpkg "
+                        "-field class "
+                        "-outlabels $TMPDIR/s2_labels_B.tif",
+                to_compare_dict={"$DATADIR/s2_patches_B.tif": "$TMPDIR/s2_patches_B.tif",
+                                 "$DATADIR/s2_labels_B.tif": "$TMPDIR/s2_labels_B.tif"}))
+
+    @pytest.mark.order(3)
+    def test_generate_model1(self):
+        run_command("git clone https://github.com/remicres/otbtf_tutorials_resources.git "
+                    "$TMPDIR/otbtf_tuto_repo")
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="python $TMPDIR/otbtf_tuto_repo/01_patch_based_classification/models/create_model1.py "
+                        "$TMPDIR/model1",
+                file_list=["$TMPDIR/model1/saved_model.pb"]))
+
+    @pytest.mark.order(4)
+    def test_model1_train(self):
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="otbcli_TensorflowModelTrain "
+                        "-training.source1.il $DATADIR/s2_patches_A.tif "
+                        "-training.source1.patchsizex 16 "
+                        "-training.source1.patchsizey 16 "
+                        "-training.source1.placeholder x "
+                        "-training.source2.il $DATADIR/s2_labels_A.tif "
+                        "-training.source2.patchsizex 1 "
+                        "-training.source2.patchsizey 1 "
+                        "-training.source2.placeholder y "
+                        "-model.dir $TMPDIR/model1 "
+                        "-training.targetnodes optimizer "
+                        "-training.epochs 10 "
+                        "-validation.mode class "
+                        "-validation.source1.il $DATADIR/s2_patches_B.tif "
+                        "-validation.source1.name x "
+                        "-validation.source2.il $DATADIR/s2_labels_B.tif "
+                        "-validation.source2.name prediction "
+                        "-model.saveto $TMPDIR/model1/variables/variables",
+                file_list=["$TMPDIR/model1/variables/variables.index"]
+            )
+        )
+
+    @pytest.mark.order(5)
+    def test_model1_inference_pb(self):
+        self.assertTrue(
+            run_command_and_compare(
+                command="otbcli_TensorflowModelServe "
+                        "-source1.il $DATADIR/s2_stack.jp2 "
+                        "-source1.rfieldx 16 "
+                        "-source1.rfieldy 16 "
+                        "-source1.placeholder x "
+                        "-model.dir $TMPDIR/model1 "
+                        "-output.names prediction "
+                        "-out \"$TMPDIR/classif_model1.tif?&box=4000:4000:1000:1000\" uint8",
+                to_compare_dict={"$DATADIR/classif_model1.tif": "$TMPDIR/classif_model1.tif"},
+                tol=INFERENCE_MAE_TOL))
+
+    @pytest.mark.order(6)
+    def test_model1_inference_fcn(self):
+        self.assertTrue(
+            run_command_and_compare(
+                command="otbcli_TensorflowModelServe "
+                        "-source1.il $DATADIR/s2_stack.jp2 "
+                        "-source1.rfieldx 16 "
+                        "-source1.rfieldy 16 "
+                        "-source1.placeholder x "
+                        "-model.dir $TMPDIR/model1 "
+                        "-output.names prediction "
+                        "-model.fullyconv on "
+                        "-output.spcscale 4 "
+                        "-out \"$TMPDIR/classif_model1.tif?&box=1000:1000:256:256\" uint8",
+                to_compare_dict={"$DATADIR/classif_model1.tif": "$TMPDIR/classif_model1.tif"},
+                tol=INFERENCE_MAE_TOL))
+
+    @pytest.mark.order(7)
+    def test_rf_sampling(self):
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="otbcli_SampleExtraction "
+                        "-in $DATADIR/s2_stack.jp2 "
+                        "-vec $TMPDIR/outvec_A.gpkg "
+                        "-field class "
+                        "-out $TMPDIR/pixelvalues_A.gpkg",
+                file_list=["$TMPDIR/pixelvalues_A.gpkg"]))
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="otbcli_SampleExtraction "
+                        "-in $DATADIR/s2_stack.jp2 "
+                        "-vec $TMPDIR/outvec_B.gpkg "
+                        "-field class "
+                        "-out $TMPDIR/pixelvalues_B.gpkg",
+                file_list=["$TMPDIR/pixelvalues_B.gpkg"]))
+
+    @pytest.mark.order(8)
+    def test_rf_training(self):
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="otbcli_TrainVectorClassifier "
+                        "-io.vd $TMPDIR/pixelvalues_A.gpkg "
+                        "-valid.vd $TMPDIR/pixelvalues_B.gpkg "
+                        "-feat value_0 value_1 value_2 value_3 "
+                        "-cfield class "
+                        "-classifier rf "
+                        "-io.out $TMPDIR/randomforest_model.yaml ",
+                file_list=["$TMPDIR/randomforest_model.yaml"]))
+
+    @pytest.mark.order(9)
+    def test_generate_model2(self):
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="python $TMPDIR/otbtf_tuto_repo/01_patch_based_classification/models/create_model2.py "
+                        "$TMPDIR/model2",
+                file_list=["$TMPDIR/model2/saved_model.pb"]))
+
+    @pytest.mark.order(10)
+    def test_model2_train(self):
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="otbcli_TensorflowModelTrain "
+                        "-training.source1.il $DATADIR/s2_patches_A.tif "
+                        "-training.source1.patchsizex 16 "
+                        "-training.source1.patchsizey 16 "
+                        "-training.source1.placeholder x "
+                        "-training.source2.il $DATADIR/s2_labels_A.tif "
+                        "-training.source2.patchsizex 1 "
+                        "-training.source2.patchsizey 1 "
+                        "-training.source2.placeholder y "
+                        "-model.dir $TMPDIR/model2 "
+                        "-training.targetnodes optimizer "
+                        "-training.epochs 10 "
+                        "-validation.mode class "
+                        "-validation.source1.il $DATADIR/s2_patches_B.tif "
+                        "-validation.source1.name x "
+                        "-validation.source2.il $DATADIR/s2_labels_B.tif "
+                        "-validation.source2.name prediction "
+                        "-model.saveto $TMPDIR/model2/variables/variables",
+                file_list=["$TMPDIR/model2/variables/variables.index"]))
+
+    @pytest.mark.order(11)
+    def test_model2_inference_fcn(self):
+        self.assertTrue(
+            run_command_and_compare(command="otbcli_TensorflowModelServe "
+                                            "-source1.il $DATADIR/s2_stack.jp2 "
+                                            "-source1.rfieldx 16 "
+                                            "-source1.rfieldy 16 "
+                                            "-source1.placeholder x "
+                                            "-model.dir $TMPDIR/model2 "
+                                            "-model.fullyconv on "
+                                            "-output.names prediction "
+                                            "-out \"$TMPDIR/classif_model2.tif?&box=4000:4000:1000:1000\" uint8",
+                                    to_compare_dict={"$DATADIR/classif_model2.tif": "$TMPDIR/classif_model2.tif"},
+                                    tol=INFERENCE_MAE_TOL))
+
+    @pytest.mark.order(12)
+    def test_model2rf_train(self):
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="otbcli_TrainClassifierFromDeepFeatures "
+                        "-source1.il $DATADIR/s2_stack.jp2 "
+                        "-source1.rfieldx 16 "
+                        "-source1.rfieldy 16 "
+                        "-source1.placeholder x "
+                        "-model.dir $TMPDIR/model2 "
+                        "-model.fullyconv on "
+                        "-optim.tilesizex 999999 "
+                        "-optim.tilesizey 128 "
+                        "-output.names features "
+                        "-vd $TMPDIR/outvec_A.gpkg "
+                        "-valid $TMPDIR/outvec_B.gpkg "
+                        "-sample.vfn class "
+                        "-sample.bm 0 "
+                        "-classifier rf "
+                        "-out $TMPDIR/RF_model_from_deep_features.yaml",
+                file_list=["$TMPDIR/RF_model_from_deep_features.yaml"]))
+
+    @pytest.mark.order(13)
+    def test_model2rf_inference(self):
+        self.assertTrue(
+            run_command_and_compare(
+                command="otbcli_ImageClassifierFromDeepFeatures "
+                        "-source1.il $DATADIR/s2_stack.jp2 "
+                        "-source1.rfieldx 16 "
+                        "-source1.rfieldy 16 "
+                        "-source1.placeholder x "
+                        "-deepmodel.dir $TMPDIR/model2 "
+                        "-deepmodel.fullyconv on "
+                        "-output.names features "
+                        "-model $TMPDIR/RF_model_from_deep_features.yaml "
+                        "-out \"$TMPDIR/RF_model_from_deep_features_map.tif?&box=4000:4000:1000:1000\" uint8",
+                to_compare_dict={
+                    "$DATADIR/RF_model_from_deep_features_map.tif": "$TMPDIR/RF_model_from_deep_features_map.tif"},
+                tol=INFERENCE_MAE_TOL))
+
+    @pytest.mark.order(14)
+    def test_patch_extraction_20m(self):
+        self.assertTrue(
+            run_command_and_compare(
+                command="OTB_TF_NSOURCES=2 otbcli_PatchesExtraction "
+                        "-source1.il $DATADIR/s2_20m_stack.jp2 "
+                        "-source1.patchsizex 8 "
+                        "-source1.patchsizey 8 "
+                        "-source1.out $TMPDIR/s2_20m_patches_A.tif "
+                        "-source2.il $DATADIR/s2_stack.jp2 "
+                        "-source2.patchsizex 16 "
+                        "-source2.patchsizey 16 "
+                        "-source2.out $TMPDIR/s2_10m_patches_A.tif "
+                        "-vec $TMPDIR/outvec_A.gpkg "
+                        "-field class "
+                        "-outlabels $TMPDIR/s2_10m_labels_A.tif uint8",
+                to_compare_dict={"$DATADIR/s2_10m_labels_A.tif": "$TMPDIR/s2_10m_labels_A.tif",
+                                 "$DATADIR/s2_10m_patches_A.tif": "$TMPDIR/s2_10m_patches_A.tif",
+                                 "$DATADIR/s2_20m_patches_A.tif": "$TMPDIR/s2_20m_patches_A.tif"}))
+        self.assertTrue(
+            run_command_and_compare(
+                command="OTB_TF_NSOURCES=2 otbcli_PatchesExtraction "
+                        "-source1.il $DATADIR/s2_20m_stack.jp2 "
+                        "-source1.patchsizex 8 "
+                        "-source1.patchsizey 8 "
+                        "-source1.out $TMPDIR/s2_20m_patches_B.tif "
+                        "-source2.il $DATADIR/s2_stack.jp2 "
+                        "-source2.patchsizex 16 "
+                        "-source2.patchsizey 16 "
+                        "-source2.out $TMPDIR/s2_10m_patches_B.tif "
+                        "-vec $TMPDIR/outvec_B.gpkg "
+                        "-field class "
+                        "-outlabels $TMPDIR/s2_10m_labels_B.tif uint8",
+                to_compare_dict={"$DATADIR/s2_10m_labels_B.tif": "$TMPDIR/s2_10m_labels_B.tif",
+                                 "$DATADIR/s2_10m_patches_B.tif": "$TMPDIR/s2_10m_patches_B.tif",
+                                 "$DATADIR/s2_20m_patches_B.tif": "$TMPDIR/s2_20m_patches_B.tif"}))
+
+    @pytest.mark.order(15)
+    def test_generate_model3(self):
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="python $TMPDIR/otbtf_tuto_repo/01_patch_based_classification/models/create_model3.py "
+                        "$TMPDIR/model3",
+                file_list=["$TMPDIR/model3/saved_model.pb"]))
+
+    @pytest.mark.order(16)
+    def test_model3_train(self):
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="OTB_TF_NSOURCES=2 otbcli_TensorflowModelTrain "
+                        "-training.source1.il $DATADIR/s2_20m_patches_A.tif "
+                        "-training.source1.patchsizex 8 "
+                        "-training.source1.patchsizey 8 "
+                        "-training.source1.placeholder x1 "
+                        "-training.source2.il $DATADIR/s2_10m_patches_A.tif "
+                        "-training.source2.patchsizex 16 "
+                        "-training.source2.patchsizey 16 "
+                        "-training.source2.placeholder x2 "
+                        "-training.source3.il $DATADIR/s2_10m_labels_A.tif "
+                        "-training.source3.patchsizex 1 "
+                        "-training.source3.patchsizey 1 "
+                        "-training.source3.placeholder y "
+                        "-model.dir $TMPDIR/model3 "
+                        "-training.targetnodes optimizer "
+                        "-training.epochs 10 "
+                        "-validation.mode class "
+                        "-validation.source1.il $DATADIR/s2_20m_patches_B.tif "
+                        "-validation.source1.name x1 "
+                        "-validation.source2.il $DATADIR/s2_10m_patches_B.tif "
+                        "-validation.source2.name x2 "
+                        "-validation.source3.il $DATADIR/s2_10m_labels_B.tif "
+                        "-validation.source3.name prediction "
+                        "-model.saveto $TMPDIR/model3/variables/variables",
+                file_list=["$TMPDIR/model3/variables/variables.index"]))
+
+    @pytest.mark.order(17)
+    def test_model3_inference_pb(self):
+        self.assertTrue(
+            run_command_and_compare(
+                command=
+                "OTB_TF_NSOURCES=2 otbcli_TensorflowModelServe "
+                "-source1.il $DATADIR/s2_20m_stack.jp2 "
+                "-source1.rfieldx 8 "
+                "-source1.rfieldy 8 "
+                "-source1.placeholder x1 "
+                "-source2.il $DATADIR/s2_stack.jp2 "
+                "-source2.rfieldx 16 "
+                "-source2.rfieldy 16 "
+                "-source2.placeholder x2 "
+                "-model.dir $TMPDIR/model3 "
+                "-output.names prediction "
+                "-out \"$TMPDIR/classif_model3_pb.tif?&box=2000:2000:500:500&gdal:co:compress=deflate\"",
+                to_compare_dict={"$DATADIR/classif_model3_pb.tif": "$TMPDIR/classif_model3_pb.tif"},
+                tol=INFERENCE_MAE_TOL))
+
+    @pytest.mark.order(18)
+    def test_model3_inference_fcn(self):
+        self.assertTrue(
+            run_command_and_compare(
+                command=
+                "OTB_TF_NSOURCES=2 otbcli_TensorflowModelServe "
+                "-source1.il $DATADIR/s2_20m_stack.jp2 "
+                "-source1.rfieldx 8 "
+                "-source1.rfieldy 8 "
+                "-source1.placeholder x1 "
+                "-source2.il $DATADIR/s2_stack.jp2 "
+                "-source2.rfieldx 16 "
+                "-source2.rfieldy 16 "
+                "-source2.placeholder x2 "
+                "-model.dir $TMPDIR/model3 "
+                "-model.fullyconv on "
+                "-output.names prediction "
+                "-out \"$TMPDIR/classif_model3_fcn.tif?&box=2000:2000:500:500&gdal:co:compress=deflate\"",
+                to_compare_dict={"$DATADIR/classif_model3_fcn.tif": "$TMPDIR/classif_model3_fcn.tif"},
+                tol=INFERENCE_MAE_TOL))
+
+    @pytest.mark.order(19)
+    def test_generate_model4(self):
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="python $TMPDIR/otbtf_tuto_repo/02_semantic_segmentation/models/create_model4.py "
+                        "$TMPDIR/model4",
+                file_list=["$TMPDIR/model4/saved_model.pb"]))
+
+    @pytest.mark.order(20)
+    def test_patches_selection_semseg(self):
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="otbcli_PatchesSelection "
+                        "-in $DATADIR/fake_spot6.jp2 "
+                        "-grid.step 64 "
+                        "-grid.psize 64 "
+                        "-outtrain $TMPDIR/outvec_A_semseg.gpkg "
+                        "-outvalid $TMPDIR/outvec_B_semseg.gpkg",
+                file_list=["$TMPDIR/outvec_A_semseg.gpkg",
+                           "$TMPDIR/outvec_B_semseg.gpkg"]))
+
+    @pytest.mark.order(21)
+    def test_patch_extraction_semseg(self):
+        self.assertTrue(
+            run_command_and_compare(
+                command="OTB_TF_NSOURCES=2 otbcli_PatchesExtraction "
+                        "-source1.il $DATADIR/fake_spot6.jp2 "
+                        "-source1.patchsizex 64 "
+                        "-source1.patchsizey 64 "
+                        "-source1.out \"$TMPDIR/amsterdam_patches_A.tif?&gdal:co:compress=deflate\" "
+                        "-source2.il $TMPDIR/otbtf_tuto_repo/02_semantic_segmentation/"
+                        "amsterdam_dataset/terrain_truth/amsterdam_labelimage.tif "
+                        "-source2.patchsizex 64 "
+                        "-source2.patchsizey 64 "
+                        "-source2.out \"$TMPDIR/amsterdam_labels_A.tif?&gdal:co:compress=deflate\" "
+                        "-vec $TMPDIR/outvec_A_semseg.gpkg "
+                        "-field id ",
+                to_compare_dict={"$DATADIR/amsterdam_labels_A.tif": "$TMPDIR/amsterdam_labels_A.tif",
+                                 "$DATADIR/amsterdam_patches_A.tif": "$TMPDIR/amsterdam_patches_A.tif"}))
+        self.assertTrue(
+            run_command_and_compare(
+                command="OTB_TF_NSOURCES=2 otbcli_PatchesExtraction "
+                        "-source1.il $DATADIR/fake_spot6.jp2 "
+                        "-source1.patchsizex 64 "
+                        "-source1.patchsizey 64 "
+                        "-source1.out \"$TMPDIR/amsterdam_patches_B.tif?&gdal:co:compress=deflate\" "
+                        "-source2.il $TMPDIR/otbtf_tuto_repo/02_semantic_segmentation/"
+                        "amsterdam_dataset/terrain_truth/amsterdam_labelimage.tif "
+                        "-source2.patchsizex 64 "
+                        "-source2.patchsizey 64 "
+                        "-source2.out \"$TMPDIR/amsterdam_labels_B.tif?&gdal:co:compress=deflate\" "
+                        "-vec $TMPDIR/outvec_B_semseg.gpkg "
+                        "-field id ",
+                to_compare_dict={"$DATADIR/amsterdam_labels_B.tif": "$TMPDIR/amsterdam_labels_B.tif",
+                                 "$DATADIR/amsterdam_patches_B.tif": "$TMPDIR/amsterdam_patches_B.tif"}))
+
+    @pytest.mark.order(22)
+    def test_model4_train(self):
+        self.assertTrue(
+            run_command_and_test_exist(
+                command="OTB_TF_NSOURCES=1 otbcli_TensorflowModelTrain "
+                        "-training.source1.il $DATADIR/amsterdam_patches_A.tif "
+                        "-training.source1.patchsizex 64 "
+                        "-training.source1.patchsizey 64 "
+                        "-training.source1.placeholder x "
+                        "-training.source2.il $DATADIR/amsterdam_labels_A.tif "
+                        "-training.source2.patchsizex 64 "
+                        "-training.source2.patchsizey 64 "
+                        "-training.source2.placeholder y "
+                        "-model.dir $TMPDIR/model4 "
+                        "-training.targetnodes optimizer "
+                        "-training.epochs 10 "
+                        "-validation.mode class "
+                        "-validation.source1.il $DATADIR/amsterdam_patches_B.tif "
+                        "-validation.source1.name x "
+                        "-validation.source2.il $DATADIR/amsterdam_labels_B.tif "
+                        "-validation.source2.name prediction "
+                        "-model.saveto $TMPDIR/model4/variables/variables",
+                file_list=["$TMPDIR/model4/variables/variables.index"]))
+
+    @pytest.mark.order(23)
+    def test_model4_inference(self):
+        self.assertTrue(
+            run_command_and_compare(
+                command=
+                "otbcli_TensorflowModelServe "
+                "-source1.il $DATADIR/fake_spot6.jp2 "
+                "-source1.rfieldx 64 "
+                "-source1.rfieldy 64 "
+                "-source1.placeholder x "
+                "-model.dir $TMPDIR/model4 "
+                "-model.fullyconv on "
+                "-output.names prediction_fcn "
+                "-output.efieldx 32 "
+                "-output.efieldy 32 "
+                "-out \"$TMPDIR/classif_model4.tif?&gdal:co:compress=deflate\" uint8",
+                to_compare_dict={"$DATADIR/classif_model4.tif": "$TMPDIR/classif_model4.tif"},
+                tol=INFERENCE_MAE_TOL))
+
+
+if __name__ == '__main__':
+    unittest.main()