From a9efadb3e84b1a09364c9933d3ddb7d0b398e0a9 Mon Sep 17 00:00:00 2001 From: Tomas Hodan Date: Fri, 19 Jul 2019 22:37:44 +0200 Subject: [PATCH] The first public version. --- .gitignore | 90 + LICENSE | 21 + README.md | 48 + bop_toolkit_lib/__init__.py | 0 bop_toolkit_lib/colors.json | 32 + bop_toolkit_lib/config.py | 23 + bop_toolkit_lib/dataset_params.py | 321 +++ bop_toolkit_lib/droid_sans_mono.ttf | Bin 0 -> 119380 bytes bop_toolkit_lib/droid_sans_mono_license.txt | 201 ++ bop_toolkit_lib/inout.py | 655 ++++++ bop_toolkit_lib/misc.py | 378 ++++ bop_toolkit_lib/pose_error.py | 330 +++ bop_toolkit_lib/pose_matching.py | 160 ++ bop_toolkit_lib/renderer.py | 97 + bop_toolkit_lib/renderer_cpp.py | 50 + bop_toolkit_lib/renderer_py.py | 555 +++++ bop_toolkit_lib/score.py | 169 ++ bop_toolkit_lib/transform.py | 1917 +++++++++++++++++ bop_toolkit_lib/view_sampler.py | 291 +++ bop_toolkit_lib/visibility.py | 75 + bop_toolkit_lib/visualization.py | 222 ++ docs/bop_datasets_format.md | 167 ++ requirements.txt | 11 + scripts/calc_gt_distribution.py | 121 ++ scripts/calc_gt_info.py | 172 ++ scripts/calc_gt_masks.py | 126 ++ scripts/calc_model_info.py | 54 + scripts/check_results_bop19.py | 46 + scripts/eval_bop19.py | 192 ++ scripts/eval_calc_errors.py | 407 ++++ scripts/eval_calc_scores.py | 256 +++ .../remesh_for_eval_cell=0.25.mlx | 35 + .../remesh_for_eval_cell=0.5.mlx | 35 + scripts/remesh_models_for_eval.py | 67 + scripts/render_train_imgs.py | 214 ++ scripts/vis_est_poses.py | 235 ++ scripts/vis_gt_poses.py | 209 ++ scripts/vis_object_symmetries.py | 99 + 38 files changed, 8081 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 README.md create mode 100644 bop_toolkit_lib/__init__.py create mode 100644 bop_toolkit_lib/colors.json create mode 100644 bop_toolkit_lib/config.py create mode 100644 bop_toolkit_lib/dataset_params.py create mode 100644 bop_toolkit_lib/droid_sans_mono.ttf create mode 100644 bop_toolkit_lib/droid_sans_mono_license.txt create mode 100644 bop_toolkit_lib/inout.py create mode 100644 bop_toolkit_lib/misc.py create mode 100644 bop_toolkit_lib/pose_error.py create mode 100644 bop_toolkit_lib/pose_matching.py create mode 100644 bop_toolkit_lib/renderer.py create mode 100644 bop_toolkit_lib/renderer_cpp.py create mode 100644 bop_toolkit_lib/renderer_py.py create mode 100644 bop_toolkit_lib/score.py create mode 100644 bop_toolkit_lib/transform.py create mode 100644 bop_toolkit_lib/view_sampler.py create mode 100644 bop_toolkit_lib/visibility.py create mode 100644 bop_toolkit_lib/visualization.py create mode 100644 docs/bop_datasets_format.md create mode 100644 requirements.txt create mode 100644 scripts/calc_gt_distribution.py create mode 100644 scripts/calc_gt_info.py create mode 100644 scripts/calc_gt_masks.py create mode 100644 scripts/calc_model_info.py create mode 100644 scripts/check_results_bop19.py create mode 100644 scripts/eval_bop19.py create mode 100644 scripts/eval_calc_errors.py create mode 100644 scripts/eval_calc_scores.py create mode 100644 scripts/meshlab_scripts/remesh_for_eval_cell=0.25.mlx create mode 100644 scripts/meshlab_scripts/remesh_for_eval_cell=0.5.mlx create mode 100644 scripts/remesh_models_for_eval.py create mode 100644 scripts/render_train_imgs.py create mode 100644 scripts/vis_est_poses.py create mode 100644 scripts/vis_gt_poses.py create mode 100644 scripts/vis_object_symmetries.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..61ab8ba --- /dev/null +++ b/.gitignore @@ -0,0 +1,90 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# IPython Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# dotenv +.env + +# virtualenv +venv/ +ENV/ + +# Spyder project settings +.spyderproject + +# Rope project settings +.ropeproject + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..0d65e17 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Tomas Hodan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..629b778 --- /dev/null +++ b/README.md @@ -0,0 +1,48 @@ +# BOP Toolkit + +A Python toolkit of the BOP benchmark for 6D object pose estimation +(http://bop.felk.cvut.cz). + +- **bop_toolkit_lib** - The core Python library for i/o operations, calculation + of pose errors, Python based rendering etc. +- **docs** - Documentation and conventions. +- **scripts** - Scripts for evaluation, rendering of training images, + visualization of 6D object poses etc. + +## Installation + +### Python Dependencies + +To install the required python libraries, run: +``` +pip install -r requirements.txt +``` + +In the case of problems, try to first run: ```pip install --upgrade pip setuptools``` + +### Python Renderer + +The Python based renderer is implemented using +[Glumpy](https://glumpy.github.io/) which depends on +[freetype](https://www.freetype.org/) and [GLFW](https://www.glfw.org/). +Glumpy is installed using the pip command above. On Linux, freetype and GLFW can +be installed by: + +``` +apt-get install freetype +apt-get install libglfw3 +``` + +To install freetype and GLFW on Windows, follow [these instructions](https://glumpy.readthedocs.io/en/latest/installation.html#step-by-step-install-for-x64-bit-windows-7-8-and-10). + +GLFW serves as a backend of Glumpy. [Another backends](https://glumpy.readthedocs.io/en/latest/api/app-backends.html) +can be used but were not tested with our code. + +### C++ Renderer + +To speed up rendering, we recommend installing [bop_renderer](https://github.com/thodan/bop_renderer), +an off-screen C++ renderer with Python bindings. + +See *scripts/eval_calc_errors.py* for an example on how to use the Python and +C++ renderers - you can switch between them by setting *renderer_type* to +*'python'* or *'cpp'*. diff --git a/bop_toolkit_lib/__init__.py b/bop_toolkit_lib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bop_toolkit_lib/colors.json b/bop_toolkit_lib/colors.json new file mode 100644 index 0000000..1d0254d --- /dev/null +++ b/bop_toolkit_lib/colors.json @@ -0,0 +1,32 @@ +[ + [0.89, 0.28, 0.13], + [0.45, 0.38, 0.92], + [0.35, 0.73, 0.63], + [0.62, 0.28, 0.91], + [0.65, 0.71, 0.22], + [0.8, 0.29, 0.89], + [0.27, 0.55, 0.22], + [0.37, 0.46, 0.84], + [0.84, 0.63, 0.22], + [0.68, 0.29, 0.71], + [0.48, 0.75, 0.48], + [0.88, 0.27, 0.75], + [0.82, 0.45, 0.2], + [0.86, 0.27, 0.27], + [0.52, 0.49, 0.18], + [0.33, 0.67, 0.25], + [0.67, 0.42, 0.29], + [0.67, 0.46, 0.86], + [0.36, 0.72, 0.84], + [0.85, 0.29, 0.4], + [0.24, 0.53, 0.55], + [0.85, 0.55, 0.8], + [0.4, 0.51, 0.33], + [0.56, 0.38, 0.63], + [0.78, 0.66, 0.46], + [0.33, 0.5, 0.72], + [0.83, 0.31, 0.56], + [0.56, 0.61, 0.85], + [0.89, 0.58, 0.57], + [0.67, 0.4, 0.49] +] \ No newline at end of file diff --git a/bop_toolkit_lib/config.py b/bop_toolkit_lib/config.py new file mode 100644 index 0000000..e8d0741 --- /dev/null +++ b/bop_toolkit_lib/config.py @@ -0,0 +1,23 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Configuration of the BOP Toolkit.""" + + +# Folder with the BOP datasets. +datasets_path = r'/path/to/bop/datasets' + +# Folder for outputs (e.g. visualizations). +output_path = r'/path/to/output/folder' + +# Folder with results to be evaluated. +results_path = r'/path/to/folder/with/results' + +# Folder for the calculated pose errors and performance scores. +eval_path = r'/path/to/eval/folder' + +# Path to the build folder of bop_renderer (github.com/thodan/bop_renderer). +bop_renderer_path = r'/path/to/bop_renderer/build' + +# Executable of the MeshLab server. +meshlab_server_path = r'/path/to/meshlabserver.exe' diff --git a/bop_toolkit_lib/dataset_params.py b/bop_toolkit_lib/dataset_params.py new file mode 100644 index 0000000..1307b5c --- /dev/null +++ b/bop_toolkit_lib/dataset_params.py @@ -0,0 +1,321 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Parameters of the BOP datasets.""" + +import math +from os.path import join + +from bop_toolkit_lib import inout + + +def get_camera_params(datasets_path, dataset_name, cam_type=None): + """Return camera parameters for the specified dataset. + + Note that parameters returned by this functions are meant only for simulation + of the used sensor when rendering training images. To get per-image camera + parameters (which may vary), use path template 'scene_camera_tpath' contained + in the dictionary returned by function get_split_params. + + :param datasets_path: Path to a folder with datasets. + :param dataset_name: Name of the dataset for which to return the parameters. + :param cam_type: Type of camera. + :return: Dictionary with camera parameters for the specified dataset. + """ + # T-LESS includes images captured by three sensors. + if dataset_name == 'tless': + + # Use images from the Primesense sensor as default. + if cam_type is None: + cam_type = 'primesense' + cam_filename = 'camera_{}.json'.format(cam_type) + + else: + cam_filename = 'camera.json' + + # Path to the camera file. + cam_params_path = join(datasets_path, dataset_name, cam_filename) + + p = { + # Path to a file with camera parameters. + 'cam_params_path': cam_params_path, + } + + # Add a dictionary containing the intrinsic camera matrix ('K'), image size + # ('im_size'), and scale of the depth images ('depth_scale', optional). + p.update(inout.load_cam_params(cam_params_path)) + + return p + + +def get_model_params(datasets_path, dataset_name, model_type=None): + """Return parameters of object models for the specified dataset. + + :param datasets_path: Path to a folder with datasets. + :param dataset_name: Name of the dataset for which to return the parameters. + :param model_type: Type of object models. + :return: Dictionary with object model parameters for the specified dataset. + """ + # Object ID's. + obj_ids = { + 'lm': range(1, 16), + 'lmo': [1, 5, 6, 8, 9, 10, 11, 12], + 'tless': range(1, 31), + 'tudl': range(1, 4), + 'tyol': range(1, 22), + 'ruapc': range(1, 15), + 'icmi': range(1, 7), + 'icbin': range(1, 3), + 'itodd': range(1, 29), + 'hb': [1, 3, 4, 8, 9, 10, 12, 15, 17, 18, 19, 22, 23, 29, 32, 33], + # 'hb': range(1, 34), # Original HB dataset. + }[dataset_name] + + # ID's of objects with symmetries. + symmetric_obj_ids = { + 'lm': [3, 7, 10, 11], + 'lmo': [10, 11], + 'tless': range(1, 31), + 'tudl': [], + 'tyol': [3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 15, 16, 17, 18, 19, 21], + 'ruapc': [8, 9, 12, 13], + 'icmi': [1, 2, 6], + 'icbin': [1], + 'itodd': [2, 3, 4, 5, 7, 8, 9, 11, 12, 14, 17, 18, 19, 23, 24, 25, 27, 28], + 'hb': [6, 10, 11, 12, 13, 14, 18, 24, 29], + }[dataset_name] + + # T-LESS includes two types of object models, CAD and reconstructed. + # Use the CAD models as default. + if dataset_name == 'tless' and model_type is None: + model_type = 'cad' + + # Name of the folder with object models. + models_folder_name = 'models' + if model_type is not None: + models_folder_name += '_' + model_type + + # Path to the folder with object models. + models_path = join(datasets_path, dataset_name, models_folder_name) + + p = { + # ID's of all objects included in the dataset. + 'obj_ids': obj_ids, + + # ID's of objects with symmetries. + 'symmetric_obj_ids': symmetric_obj_ids, + + # Path template to an object model file. + 'model_tpath': join(models_path, 'obj_{obj_id:06d}.ply'), + + # Path to a file with meta information about the object models. + 'models_info_path': join(models_path, 'models_info.json') + } + + return p + + +def get_split_params(datasets_path, dataset_name, split, split_type=None): + """Return parameters (camera params, paths etc.) for the specified dataset. + + :param datasets_path: Path to a folder with datasets. + :param dataset_name: Name of the dataset for which to return the parameters. + :param split: Name of the dataset split ('train', 'val', 'test'). + :param split_type: Name of the split type (e.g. for T-LESS, possible types of + the 'train' split are: 'primesense', 'render_reconst'). + :return: Dictionary with parameters for the specified dataset split. + """ + p = { + 'name': dataset_name, + 'split': split, + 'split_type': split_type, + 'base_path': join(datasets_path, dataset_name), + + 'depth_range': None, + 'azimuth_range': None, + 'elev_range': None, + } + + gray_ext = '.png' + rgb_ext = '.png' + depth_ext = '.png' + + p['im_modalities'] = ['rgb', 'depth'] + + # Linemod (LM). + if dataset_name == 'lm': + p['scene_ids'] = range(1, 16) + p['im_size'] = (640, 480) + + if split == 'test': + p['depth_range'] = (600.90, 1102.35) + p['azimuth_range'] = (0, 2 * math.pi) + p['elev_range'] = (0, 0.5 * math.pi) + + # Linemod-Occluded (LM). + elif dataset_name == 'lmo': + p['scene_ids'] = {'train': [1, 5, 6, 8, 9, 10, 11, 12], 'test': [2]}[split] + p['im_size'] = (640, 480) + + if split == 'test': + p['depth_range'] = (346.31, 1499.84) + p['azimuth_range'] = (0, 2 * math.pi) + p['elev_range'] = (0, 0.5 * math.pi) + + # T-LESS. + elif dataset_name == 'tless': + p['scene_ids'] = {'train': range(1, 31), 'test': range(1, 21)}[split] + + # Use images from the Primesense sensor by default. + if split_type is None: + split_type = 'primesense' + + p['im_size'] = { + 'train': { + 'primesense': (400, 400), + 'kinect': (400, 400), + 'canon': (1900, 1900), + 'render_reconst': (1280, 1024) + }, + 'test': { + 'primesense': (720, 540), + 'kinect': (720, 540), + 'canon': (2560, 1920) + } + }[split][split_type] + + # The following holds for Primesense, but is similar for the other sensors. + if split == 'test': + p['depth_range'] = (649.89, 940.04) + p['azimuth_range'] = (0, 2 * math.pi) + p['elev_range'] = (-0.5 * math.pi, 0.5 * math.pi) + + # TU Dresden Light (TUD-L). + elif dataset_name == 'tudl': + if split == 'train' and split_type is None: + split_type = 'render' + + p['scene_ids'] = range(1, 4) + p['im_size'] = (640, 480) + + if split == 'test': + p['depth_range'] = (851.29, 2016.14) + p['azimuth_range'] = (0, 2 * math.pi) + p['elev_range'] = (-0.4363, 0.5 * math.pi) # (-25, 90) [deg]. + + # Toyota Light (TYO-L). + elif dataset_name == 'tyol': + p['scene_ids'] = range(1, 22) + p['im_size'] = (640, 480) + + if split == 'test': + p['depth_range'] = (499.57, 1246.07) + p['azimuth_range'] = (0, 2 * math.pi) + p['elev_range'] = (-0.5 * math.pi, 0.5 * math.pi) + + # Rutgers APC (RU-APC). + elif dataset_name == 'ruapc': + p['scene_ids'] = range(1, 15) + p['im_size'] = (640, 480) + + if split == 'test': + p['depth_range'] = (594.41, 739.12) + p['azimuth_range'] = (0, 2 * math.pi) + p['elev_range'] = (-0.5 * math.pi, 0.5 * math.pi) + + # Tejani et al. (IC-MI). + elif dataset_name == 'icmi': + p['scene_ids'] = range(1, 7) + p['im_size'] = (640, 480) + + if split == 'test': + p['depth_range'] = (509.12, 1120.41) + p['azimuth_range'] = (0, 2 * math.pi) + p['elev_range'] = (0, 0.5 * math.pi) + + # Doumanoglou et al. (IC-BIN). + elif dataset_name == 'icbin': + p['scene_ids'] = {'train': range(1, 3), 'test': range(1, 4)}[split] + p['im_size'] = (640, 480) + + if split == 'test': + p['depth_range'] = (454.56, 1076.29) + p['azimuth_range'] = (0, 2 * math.pi) + p['elev_range'] = (-1.0297, 0.5 * math.pi) # (-59, 90) [deg]. + + # MVTec ITODD. + elif dataset_name == 'itodd': + p['scene_ids'] = { + 'train': [], + 'val': [1], + 'test': [1] + }[split] + p['im_size'] = (1280, 960) + + gray_ext = '.tif' + depth_ext = '.tif' + p['im_modalities'] = ['gray', 'depth'] + + if split == 'test': + p['depth_range'] = None + p['azimuth_range'] = None + p['elev_range'] = None + + # HomebrewedDB (HB). + elif dataset_name == 'hb': + p['scene_ids'] = { + 'train': [], + 'val': [3, 5, 13], + 'test': [3, 5, 13], + }[split] + p['im_size'] = (640, 480) + + if split == 'test': + p['depth_range'] = (420.0, 1430.0) + p['azimuth_range'] = (0, 2 * math.pi) + p['elev_range'] = (0.1920, 1.5184) # (11, 87) [deg]. + + else: + raise ValueError('Unknown BOP dataset.') + + base_path = join(datasets_path, dataset_name) + split_path = join(base_path, split) + if split_type is not None: + split_path += '_' + split_type + + p.update({ + # Path template to a file with per-image camera parameters. + 'scene_camera_tpath': join( + split_path, '{scene_id:06d}', 'scene_camera.json'), + + # Path template to a file with GT annotations. + 'scene_gt_tpath': join( + split_path, '{scene_id:06d}', 'scene_gt.json'), + + # Path template to a file with meta information about the GT annotations. + 'scene_gt_info_tpath': join( + split_path, '{scene_id:06d}', 'scene_gt_info.json'), + + # Path template to a gray image. + 'gray_tpath': join( + split_path, '{scene_id:06d}', 'gray', '{im_id:06d}' + gray_ext), + + # Path template to an RGB image. + 'rgb_tpath': join( + split_path, '{scene_id:06d}', 'rgb', '{im_id:06d}' + rgb_ext), + + # Path template to a depth image. + 'depth_tpath': join( + split_path, '{scene_id:06d}', 'depth', '{im_id:06d}' + depth_ext), + + # Path template to a mask of the full object silhouette. + 'mask_tpath': join( + split_path, '{scene_id:06d}', 'mask', '{im_id:06d}_{gt_id:06d}.png'), + + # Path template to a mask of the visible part of an object silhouette. + 'mask_visib_tpath': join( + split_path, '{scene_id:06d}', 'mask_visib', + '{im_id:06d}_{gt_id:06d}.png'), + }) + + return p diff --git a/bop_toolkit_lib/droid_sans_mono.ttf b/bop_toolkit_lib/droid_sans_mono.ttf new file mode 100644 index 0000000000000000000000000000000000000000..a007071944f7c90020e798eea53b10065e2b45a5 GIT binary patch literal 119380 zcmeFad3;P){|A1SJ9oBZ_L*$ROhytBBr+nR$k>tyCDukX5=2B7TePTpLRE>S_NAq$ zDvB~xBetTtKAu{tx~rs{YN>QnCAs;%&z+>z=Xt)rzkdJxUaw~+cjn%6=brO9pYz#2 zpL;_nA;d&x6HbZ-3{Fbfe0uc>LTD*ki^e=WYSMis-$+7q9}r@Gf6Sv(-MyZ>cM2gf z$%HUV$)s@)mtF7%2npXoh*mRhRM{jV5iOzbXXE;iaSzQXS@q1Z<%A^mBlL-nOUI5X z{;_z=K%BdVtz#(~j3d|@jMoy!ElVGsI{lCPem#lfchH~Vp^0NgRqh?u9;f``_~B90 zCuy#;X*e(XcTX7g@YtEZe%Xr9J-A-}*QAMMQ^zgtKakJ^wS+VuI%&$-N!^cq`UD}- zfRX)!h`C}r*Q~#z(A?!WV$`95kdtp;9U{KpUGeZi<(>MA8n3PlhkC+9D_*!p`>@iS z5a~VTorawnui8`aT=0{M7XBnzMr>pd(cnr8Nh0~U^lLf>xTSrzDIv4)OGZ-hSwc6E3)Dj< zHq_EB_|-=AU4cPw;J{hr zVZb*Tqg9ca_{BUT;p*L3p&<1U%t1mAgK+IloDu8Lw2AeoYPgN}HToLf@6y*m8CSu@ zwd5N*lk`VlwU7eK>2lIQcG9kZeHyMQB0BJ11mMfTk&PHhIB5o`a=^bifGLb*LS6^u zgMhF--dD(c`Xl`c?{peVbMZNmwx{Xnk~tsx0`SCIXX&Rwxe8zBz`r+lP>y$CBLxJqpI;2nmwXyT;@NVz(S=&c0U z2~z(cP-78(=fLmQHEuf^ZZ%vK8>mUVg!Gdz>!zN>FDGMd1qBBwOz?}43PTz+C{_loacppGcav}r?+G!fKLgnq?*1WaNR)GAiXi+RKYE8;an#oA;*oIti= z3;|ga9Kr9Q|91SBL~Hupze}*;q@N0ZQ+t!tx*O&k(9+$<|84)jNt&j%CVY$U3i2ML z+);9gdO>;O?WJ$i{d7N8W|2Bej!&^QBUBvAKjR2{x;i4sAEMzYSUfV`|Dfe`1$~|d zz!?oJiXCNdvv=7g_6w(+b3He6FJH(P^XK^){yzUudP-w&L0e?BnAhTu=zHBpx6|Fi z9pjF3C%b*_&h9SmUhZ=DR`+)IZjZ)e_qaSUo;XiC&&U`SBgdFyY%!5BEn*U5vSW&3 z#>W2hj-)g+)HgH$9ycg|1>H>d0*>1(9B{k^I6ej(mR2AmbW()t{?BTYskhJ>?0dTwwO=ysyK*dW?{-Lci=$bED?R zn(H;+*VNQh*L+s z@fLZTyaPV}Cv?caz&B^eS#pk?C+|bbeLyaf4fL zHTi~o3t9giw8;152WWxoSf=0qlXa?=nf<{v}_0SmV5jN zv8w@pK1$iSDGkplx2Jd%$}Ex(`z16?%ZaN-OC> z*vyCMVS0ofrN^L8kJA(MBz>KpqHoYQ>06LE??A`?lb)vkqVLf&^ejC`&(rtm1^NNK zNI#?>(U0jT^i%p7{hVH+U%>9Eh6ef)I`eD#4gD6@>v!}jdw_mVf1uasb?DO@;QpWJ z&-54iEB%c{L*+cAMHi1oKlh|Z7g_W_X>=E`To5rTI8SF7OlReI6v45~~Hk-|1 zPq4Y{Nj8r?#pbgGtb#3Mi`ZheggwocvSn;JTftVcRctj|!`8BO>>2hfy+!|E>)8hO z9NWk?vCZsxwuQaGwz3!5HnyGZU@x(qY!};ra@eaJq7 zeEWob%06SCL*jnHF0*P@!@gu+v9H-T>|1t)eaEh{@7WLR8oSPZWH(qX`-%O`eqq0| z-`Gv|JG;gHV7J*FR>$g@!WtkeDQA$E64!8-#e4Hip2f3yAD+Ye@_zhY-k%TPxqKiW#0T>s zd?+8r@8fxVIM3%J`2Dzl<`50cz$MOd z$$Sbg<5T$~tQqUTQdt2TNn~aaatkU>)pt$OOl@lNVw=A2I{CNV;FTN}fd-mU3#(8M zE6YeiNGL35Gdx-=EH^tW2PgckFj$EZBoa1AGt!*2fED3}-4Fv?qa}$Yaj-X9!NO<_ z%cBjfn|83bl3|IY5FhCP+oU5&Bb{Kmb%vGI1(sMhSZ;pO9d=+(SU0_4!)1|d(g!v} zU)UD+!gd`1`(+?>|6o`VL&-35AFR9Ku--?&4lICuR|tD&6y_u3Z3SfQN=Vg>kfB>3 zMYlnYZifur1t}_I=w3)sAwyq*{5${&S_K(;6y6Y|CkusS7jl0*GefRF4_W^pb3@8M zz>*=ypJi=H8Eel{pcSUEc-ERF&?T%LG{fV}137&dl79@Dz}nCrEQz&(zL>=#BXlE>&^I*|7JpM3*6>wosm-x&Muga6q#|FduYXW#tKzWM)| zeM9`chvy9)GI-Fy+yVXX?bkP_Pj*&j?_NE7boXa;>)Pd>&gq@fI;OTyPHNY-O{@60 z*p}WHPjtA$YB8HajRw6=D{CaqC~4E%n_1)z#1#dkIB#}#TXF0ig@#dgH53KhXv+Ni zZvt*{-6)*;+b{k2Wy!z&(jWY#|Ic4ii@OWy(zcDex7Qtbub0b=A)|{DLyEn{qwdcO@KLySA@99#;rxI#F_7Tx6-apO z>u}(4Y@m&|SMNZg=(X>lKga3&*N`-ziM4p$3x7jIw8(q?8s>3V%c#Z{Io9$U5hpXT zQVSPmdfk}|ix!TmYM4FR>$Z3o?h6T7IH@-MFlb!4-Z(1N@?fD$z~4f z8?X-?kr!aGneNh2m_Gh8yq-=`9xEoGA|w;n|M$}bhyXf(Q;$bXd{LF3jK)D=_Q1U0 zv73yJ+E4sRiNgb|NSry*bjCSUoSEHp=FiKEyjab?gYy;!q}V>i-rm6RqEUg_qd`v( zh^6pa0;Zc$9`8b%)t#OM81Y*-fbLT~-W|}y1!Qs6-4}og#U%^zfkiOg41QgY!T@nr zn>*c$ZbU!5y}gSX{~j$3$B6E>Z35Ye!KELP7x4E2HvFR+$)fkZS`d5hg@U#O2%<0Vz`R4m z*HFE$gFEUK;Rg)wh30^(C+I4!_rkp5l0bA(R53WM#GM!A3HXO&froqZ#ts*R1`H)s zV>rAptpMveB(LvaZ{LCWd017?aP^EhA-a=dd;eQE-n^(_Hz2uyE>`ExV^MrKerQ31 zI}z-po;6w*?%n^7F!de!t-5Qo*(hy?u%o zdI#rqQGX2yGArsaF}98LrG1C=Xxj$Dtj9htT`+K;pDq}jpLfVYhT1BJyiCY|qo(ADSr~W?bke`s*>RCx`P>;t{QKGg5e}pzl##FIj zo2BV@3~f@d&9Al%#|x_zUJ8PQVDIfN7VGo)@Y01v!v&2K7chc9gqXbDfE{nQeU!-| z0fTpJkATtJL$qgz_KaY=EZVi+9s$J1+O~OYp{2L?xA3+Eb93I}4beYMjEqjYG4Tfb z_G@4CH(y6Y18Tp;*D&<5@6ymOe0*r2;amf&YB=F9ZO~gY$tCxtNtgH~%7=cAt3SNx zi@xxFMD+aksq%V6w0zig_}jxb4@>6#(fgD3bN_y`J#+g$x_59y^q!X^qIc&+MDN-e z5&aS_-S}KYbo6s{(uUa^mTfqg3h_)w5SGTfKR;ylO>6^h$JZUP0$&MMS@OjG}0OY{O?I)$V6} z_F?QUqgwwC_UI!K(Nl9HqRX&N%#MhjfZs>b@Szd*hG?zS5G@0$QAH8aV~c2UA#J!r zZ``3X?$C@o1MaZ!+qUq`0k;cpPrS`P81eN8HX`2=ot{r+{MmiBp&B4W0=CS&oZsNQ zx}ILHrvovNx$bz~*}BVh68G1ISTg%$Mno57(|fY0+$^dg+U0W&wNmp?i_bh1HpNhy zMTS;U+210{7QM-Q+04$GZSSncvo$W#-w2CgPV&5ul1N4(VcSL|L z2*eH+pZrw~x}NhtFHGg!E|MH;^(oFHqEcc;7%VcKl*zHg+t{_qj-q z;bmoIQxk(PT3A+=m{?dw648hz^%#3NLZ(`@pe-?x;HwNjMG6*eRGd|Rr2Zs$o~nJ| zT=2(7pmvhBWAP|k^Q0n z^_Sz31z1K-&5#9Lz?5*KT z;BpNjJ>TH70#S5=Xm|*3PM%>oWIC%v{AUNw;>BhlN`FA?(R{>}%8?}yuZIzp`uEpj z_1qKa@6VT}vhI6gZTJDB1Yc!{aZUg8^ijH#51~6*bKp}{j)<4~OHHM66p^&uh_L@g z=kn$azW~RNAlrXS4%%p1)AGAov`)-3BAwvM)mwhgw!wkvj*y{&zKeXIQ^M;pguj(43_ z=O*VRSA?s;b;xxk?B1}a!cK%+!VAM!hkqH-KB6?@SmYCtH>2*0S{`-1nWkA$v(l_qCWAEk$>Ze%R77}TK$k9B}63@BwR@NzBOwd*}7xvA*~;3eZ6%1~#_d9lrFZS`%-+pcN5yY1_3KW$svu6w)T?IyRI-)>X8 zSKFOwcRfi;ic0E|G$?6&QhCyvq}@qxBvmJUm)t+OB>C~=mB}w9pGf{N`TJz0y|aB{ z`yTCwx1ZX6QTr|JtJOt$zOlZ?eLH;LcIew- zbcY!omUY;kDy23{^`+*dj!Jzbby4c3)Po&u9Ut$wvg1n~PjvjK;TK}{iX+L(- zbc*Wa>(r;y=uVGyI+xxjeN_6i^rzGJq`#GZDgD>ZhR&YOX`ORA7j}NM^Wx4gbUxJi zT<34^Nxo<0Jv;9?anHy1{MbeA(yU8rm;PPGbt&(%qRXx>uXp*ROKn%VYjoF+UHf$% z({)DIWnH&*{is_~x6*DKy1mlv-EP(0Zf1mJcr)(HSd?)nv}}?=+t9ikJ29HJ=XNt)#H^OM|ynMb7IeFJPPyG?6>`1>E5h+m)`qM|JMC44Dby>_31Yq=~pJhxqL zR_@;1w+7k=Mh)~1>@u*|z@vk7g9-*s9kgiB8-xBeSU=c0IAZYZ!3%~shfE#v=8#K6 z(}pe?x_anyLth;F^3Yd@9vk}RupYy5h7B4vf7sFc=zRtEZMyG9UYoqf^S&FNHGIJE zZTXt~w)ugiG2MjJ-EN6#2NXLQBrEu(jii5xR&%=9r&kJ&ori(*T0=i))d zT{Lc@Jd*IT8xetE*(13>x4?q0yBM+Z?SecMIVZwy%6K+p@eBzEt zbW+r$>`D74eK)!L;FePouqfC>kFG~=-u%V%t#aeT%{GwL22{Md6dBWA9eS^xO($6uahofR?5 zGb>@%L$emo+B)m#tP8Wg|HnVdS^2Q?-Ls=+FPJ0GS^vbqCw`babndw)2R`}byw{%E z^3=|!_RkNUKYacx3v3J8Ea<)9YQ?gJ%@(d-^yK1UOCpzi_H^5)r$7D2(uAe?OBXIZ zx~zD4=<@k1Tq{afoLHH>^5n{2SH-QGwd&&P&a2n1-nshtnus-pYu;X)xOU;%pVnop zTe$AbGtOs*KC}3lZ=TJ0_P6yr*T1&@{D#mC85^=TY<#ZebE`J$HZI!u#wM~UYtxEN zS2t&Fp0)YJ^Ie`lw58{ke{K0-%MUNm7fN4vdu#00Nn77|(fMM*i<@5jVOy7NpKt%i z_NTUQ+>Ii+%8<<`ohm6t0U z4muAe9_(>2@8HCP^A2u2Sb6Z=!K+oQDzd6WRlll-tCm!4tvXzFuIfsaa>#Kg@lf|e z`G=+)T72lmL$4kB@X(FJ+QW&5`yL*B_>sd)4sSVp`0)9|R}U*koJZOm>2W0Q$iyS_ zjy!i{|B-(lsXlV+sQGBz(e$IaN5>tVb#&#?okvd`{pjfRWB)jI>NV$U<6ist_<-Z< zj@O*H_rz-_emdFgWY)>yCm(`BWTFOBGC-qq;$PE8S<6Q-94pTuXYiAgO^Wuzqe zXi|aC>cdC-WV^@eNyB!M_o+Kr%4XEh*HqrlDV4rO!tYAMSL_?LKO^bjZ-z2bc?~ld zLPC>7qK`QxDoa#auiL@UZAMK&fi*p;z-puENxqtt^z`=0$fEmriqqkZN$tS4&hTBH zTwY#IH%j4mdM#SK7{xM-Hs{-Tpb_SF{$|oyE#0i81GF%~-AH*Z!HW?*@ zeWydBBti<;?37(CnrkIFY*%=+)8O#4T*fR0haApWxa=@kl;7&e_DAJLv8V)%#yx?AEDPBjaxO#)32RYij`Qlno4aN1 zw3!Pn6D{QyZn2onN#+c5p;jcsS&j2^MUSCQrahjKAK*!PH$>Ve5%W5jf+d|&@nC5=X8Z>*96*>jymIbUQSA+LoHunRb}C>45&#akjO#?KW;Zz*|+eIM}i> zJR(eXhKHbV$5JV4Nk+}DH5uUDUu$SmO~J1<1(pvIYYJ4FO)N+Rs)2U2wog`lIN`>{ z2sHXqTuz4^pOV_a8{>3P3Nra`?cTYYH&)Eq^4x+&iO*##4FOtN9tH1G^t?_Ys2^1Mc zAc3?ks~T$k$tG*Iza{O^lD5pUx3J)7Knr?L3o5tBig4SL?d(Lvxd>*Dut%6?pim=A z64+{{w`EI`R-2TNZSor|*``bx<-)@#OQSSO#1zturfhW(ohR3or>A*wv{QUuJWD`X zkd+3(aG6oOF#t}y#JNjqGtlVLIfXdPP=#Swx=M*EcPC<(CMDKD^!o4>L~}ubrKYf; zAkhN(nFtZ!vx2xG+-S!%pRBce<7Kak+O$9MuIUJRjq}EUb}1DE>`P(!G$PAWl{)MD zPv-9V@z^KjyI(l|%8o-y@l#{Q47_g)U8x*9{Zzqbu2Z(oSpNM>%B5|GKRhz`RLP00 zt9I{NeY*RAX*oIf4g> z5|~~3Q}Bwb;+3O(E3VQD4J2zcXfpEJXkEE(sgCR9KTxifks%mYAOX~cPJF~_p^xjR~FMzERw-}r!)aQZ)@tA*IIOwbhFW=&WLleMiymYqG!>m z&}}v7S#%og{wivc75@o(OgT!+Py!*-Y0BFA@9V!t&tx8(CS@bGU?MI3&R?N=GW79O8olc}nqLg_zUkN;S=x3#(Ih~&< zJCtb?Pc~By7ts>s*&>?R?Bql^>=%v#Onuc7mlnQKG|B&Ai#Wp1OQnl!^75g{kYSLHus zNnS^vnIz4Y&Pml$t)x+j-K-B0#IEBTqva%c!X&BSx~i-b3+e@j(=eHPZE2iY(ks^W z^HMpyS~*X93Jjq#?mQ152Ed8iZ<~#BF1$2u6xE=t=MX&xF@hx(SVuRh9w%+iR?_o3 zcHqjh4K++bm9>^M_nX<;WI2GLWu2aEkamEMQ~(ODVEP4-pOp#g1s4FYm5>_gkQ#ba40ZI|LMNWn zQC*mQB0(jVo1;KWNPrN?OKvICXboj3C<4=BB8ejlYM@j=K$;k4wRlo&R!gk-U|w-- zW5wTGqvyZ7t|YEpxNzn2`STs9BcH%(m08Nldh>de6Lv*8V1#m2Ii84hZqhD5kZDD#3*U?GU}N!GTuv9(ShNGx@({8@mf2i$y#kZ^|6})UHqVx zqiFdWzWlKpiJ3EIA|{5OH^Rzkt2qsAGt}RFS=i>VKo}1TlP5MnJo(M`Y-_|t*Eg=; zTpZftJAW8xz!gFe@MWltH0sMtTIQNcF?dZ%Mh$EgEMrPaVu9t0L;(br)8j#vV^H2` z;~V9^mO4FB+q%hg18aC4=V&P!QKMY0-snX)#~&m)z~053#e1FIwX&(^4(ZG7)>ev20p8# zWR?g%k<-+N!19lcs4`plD%_^BTW7e9t zUR;PE9z(SlkxDe;P68Vl<`36e^yX}%mQTR_T7HW?8x}2Nc85)h;KyJQ7*ndd{FmhnMK+{sM)oTUXLEIi|2fJ6+QmN!(p#FZg z?&w8jW~jbpM`bL{7RK8q{!v{rEe=pp(!YMQ!1Qhv)0Lp7LH_oket-}fN;DHUIrci3 zBhq9x8}$ZM>n%f&E`VY?y|xS(=4CW2!Zg(gAyNYr1EDplk02~67y!UA08DJ3oCZ5e zAT>rCZ--pspa;q6ki0Rwmwilgl^w&M&QD|e>#xN>Sn@vQ%C$Sy_v|i0aco>oZ0!~nXi>UP}SCf=huZS->{{0-KBuQFe13p zJ>X@TWcib4$Q3fnlJ7%7og5~|%N)vr>1a5rhB?*f2&zu2Q8_9x4V#K{V(A6M1(q`@ zs>KQpD=6>?QSYR*Kl9a__-@<-XxX;>_Ggf`8-S>v*+F1T_Yo{aL4q+M=4`Da%)!#5hewZ(o)Imzj;7HDf;EdM zGi$wNnxN2w$pPCWQCKICeP;j~ToX$TtQOTVQ9Ti?v}%f2V(+pVK@|^5(0retz~ar3+SEe*V~NTfR*j zw|DFV4~)&9_0f!f)y{n+e^JiA(@6hAgn}9zDw%|UK8}Mvye>EvG(?bz9g_|LndtRq z{s?s-VPh7yZ6hrUCJT!6lpq~zK*uRIaF%LX*?cL2L}IwGq#8{ts_QvMkVw%7uY*KB zr?h^~cq&L_^Ss$lvkjtRJZ0y#b6-=PvQs%3leYdS z16yNCmW!bIs^wZ(Dze3@TOYDRT92aEjO$H0(05%ZvPLav|5F>nVHPi`WD#C|o03c!@CX_QZBqRO*Heao zgE=4(!5XBXAQn~jU=7JvH-c&x93QfJ zwNkPMsffGhTK!pjiS#_?n_Ls-}mU7qb2v$arD5!8`sB zO)tw%r?!zc==uB0o*2CP$vtm&KmW;xcOG2&Xpd}=3Edbwea)azQ;Y8}%YSCnsjc(2 zJ#^ng`L?Z%5groVkR$DZyljV>>!tpDR|neR0*ue_PPEenld*-VnM^-0(IOL#Fj2`w zWs_`bskc-6`>i|65j5icme!6JQ3BuoqCNv|)>3|!U*?)*yt%79W(<1G+fB49w_)mlCj(E+s;t8Pt4dqE1~2f|ow6aml`VH{I+_)?n8JmO19 zbIM*fL<8|aKscaDI5Z37oG-=Bqote;(-*(Kv0|EwWfy;O^vin6Z@l{A#@Ce(m3^p+ zcHg22v!~4N*RlQZ!t%K$y8OWTz?u3ar6?|18^C>EF4{$zt?X0YQFis8Hwd?gOrcBY zS!KHN@)hMOt8^Y&Sw}lzwLM^$-I}AIZWGzzH!u^|QF0I@CEU}|aC5U{8Yjbc2a%ys z1EqsTSR4r&8m^&wG#HIip15h9wkA{$vSCE4wcN*P7N@bC%D5SUafuZgEeP5eE%11o z8>vxaGHJL3BPSyTjx4w_F%hP*PjIU6+{Cvny=!nMu|Pde6b>!^U^I%j9Bz@JJT6{_ zqjYVV;#~0>{pu9`MwvM;KFkrhjJ~Pdqd9uJ7j8k>@p#FYAqv8E9Na{t$)FVz?AaEu zU{dTO9NLyzrj;a9vk?ZjIU=8!C^1<~wLpYP8(y@@OiRtwY$g#!CRR}I_HA1=mEN{w;>iz;w86S&hs>$LGlhK|r!4f(3P zwcws9jvFe9C^r;|B~{%Vff`_@eKLH^AeD2gYTeC;yy;3uDJzuiQ;L{FvBMjGN_kmX ztIXDf-Kk(bSqITA?n#Cms)Q%&_s8mV5cz(aE1QJMhWuJcSV&oYVW}WXgQk#iL!ofm zRQ|-wh3$@YIg^+I3yf;5l{}|@5*uH?mMzm%ZdJN$slSEse#LlQF`k~> z=XYVe%bcEo3W3p@5A6cU&QS((!taJh7aTcBE|hP`4e*Bj7H2lxcmao)z7P_y2m~0g z3jl^1xlV2me@g|y>7jcL^FH;H1q^_ozHTeVe*`O>i4}$o=Z_^?SAGbKw2dGL7laok z;SLanBAD=OLLBD8P=n+sl*CfQ0#U~jGX}O(a0IF2vHmGo)CE_)%rv<9o9VVwHOg(} z6WWV5JMnDxil2wB}Yl3X;OHUf9BB7B-pa2C1;Cz50tUf98 zNd3VhM_8XDY-jy2gx1%wQZZT$81uy1hV15g0zLh0uIN7gqKpW zW##jS4=Y!$e-gi~+oqNLx4O`e8_+kR4623-A@3fH3+Wh=8Ny*z*7_^3p0^D26T?*l zd&fWz8R!lJtuRo7F2s^O1JOV|jnGqr9{dP{L$JIK_V#x$$1O%zb{4)v2v&kDu#6-lIE`E3L5D& zsguB7QYYd2K&=Tg35I*32rdc}zVXZBrCN0*(;&R0Jr|U2iw_^3LEk)I|2sQ8rM?R5 z8M_1i##}&h74weJx5tLmJTreEKF;|Z; z$er7@+XQt?Mql@%FFUF9hx~4*m3F$;zTM930->>?*6esY%AZ_zcGdWYkr~~guI$c6 zsy9X$6O8;@-Ax@+gW>n0Jyl0#ol6&|<6wU{0P9eUg17T(?G3c_&ZWaXW`kE1{k{h=m1t5#dY7;IczR zk2&`T5%ty&)sNNl4tna;$LbmKDTsQ6XXikOKS23C6y8K@%Fs$pcg$A3e1~YOwY6HV zHT7Y`5o*)%Ov5k(GiV#9lHn_8nn-XOjT1@4sy9N9kP*mU>h)3mQJrn~Mb<+$?U4>E zKP;}h0;v1Zq_NyBtjQseYBSX+nH|wjN9YKx(>5X+w~rwYRwzk^Laby^UkLdHmePm- zfT}OzgE282v(ox&%mtZmRjxI>{QL_qDTp@w!hTd{KxwR1roTXAaQkc$ZC-z%e!mzM zj{dy9TNQ()1R+K2wLY)G|W$+g;hRD?}hx z1a3DY7J>L$QAh}N0J8?6mEjctw=kL-nIlomLx>o-1uA8MFB*|;(NHf{(T#YDmp*cY zj%7OKGF|eSvgMntyS8lJzlq&nj|6}+N~swS4SN5^CzmaGTyVw`P+A7`XEatk+G5e< z2ck|OLFjSj8wnvWIh`64FKS`87n+2tqFV9*2<{*VPdICh@h5~F;zcg6C2V$BOKy?D zLoyEn-o50)X*zPrN9SIpwECMP{*&94&*r{&^F`%{2Icqm`-jz*PP(_x=`DNS?7y}D z`I5)8@{X)L@fjd10d9+dTP^A0ZwWggpP!?+S(~ne0sAz3&oLAV4F(2-P$`5rshSYN zf@{)cZ~gSTuwp=tP-Gr<>WDIuD@vipwsotR`7F#lS5pP6Hs0?F&9BCd){I*8`NA_a z!cGY0wi`(aQx&9*V6)o_@d{qETczB(UzO&EA<5%4nyR{fN}Y0zHRF5g?nhc+9v9Y0 z&`%H%=4brj<52GjWvl~PCvY0hsh-n=P)EXNxCf1KjE2EuWq8sS{SH68f^#~Zl#RPu z*`H1VLep8A`!p;Tx6x}{!mEd5uJxOYh{z$-1Vs-GuTl@(*n-5MbI|A;1YHB48r^D= zO`stkh{s1=VI9->H*YIpN0f*+XnaCabhlvCzHU_c+2yAM#vn?O@v9^ebc}yULPUB5 z(?x_wuy`kRI+60CBBmAZ;Bl6Y7M5U0H?USRjgYAhz7Xrkn3jbxmTfJy?yy!_r37ob zm07P?ez&lk%-?KH8PNVa&M&ehC5#Xg) z#Y4e9CTu78kUc}_bksdU72y;K{myX9z}_`{VqixMbb(=qfsHrJFt8B@ngd=Or{AtW zsONF|j`~bEt5VCb4q>apwuVWP3|FO<1TTg@$okE7+12pjHm&3D@bksFF5(=#)i@{w^&B!S32*>( zAWcaRT<^uGBvgwaEaI(39r7v$loC)RW(CWJrDTMO93Fz&YTgKm4tyi43(9lKieqqJ^WVlT(EX1q`D|wWJUj*9aQ%6fRF7xPsl2!o zGuM$<4-p+8Zy9RI7IqyA)6r5oog!UJ;U6Z@bjre!GsPX?3{OwG^$K_sOm@qkBZ9bl zwGCP}T%;p|#-#`(bPU2qhvynFuGNa);1PFBlHj}-B1@_ZI)FhhK;*Yn%1u&L z46H8XJJcY!M=GvkJfg0SU#z>r+b);Nw=KUj2ffh^$p7wErL@oQmel_B@=xV(ijL#y z7ni9>HB-%R?3Vl+{iDfdQS;2A&Ivx3>f0f_j`AR78_ida%C}eFZVx#BmStq<1jv(^ zO~exZ%rc5D1QpCDvth;v=fsH>l<`zOVLLNIf0O@U zziDnYfh8sn*Avzhz(ShWLp=a!o2XsdqdB|!4;fHRWU1U;vSASN7HI0RiqwG09sSVMq2k?ts=HCvQr%IhqaZ>TF~m+PS_XhW@X zRQjwzRR7qJE!9wZn2{tnP5?|GE7c=?cE>6Gpd3Z{H+=C#Z$r#^PFEp1oEyu45`MkJa6~W%dh)sB-$(m9J*JapbGh2e!Pi^_`V_*Ny7C zdp+%E(e}PJsG?}kiFyl*(CQwXu>XZsfOI1u?S*;(Q4!VI9}}K^!_;76rWVjI_AJ5! zAaLr+wYqeXxkd}h>jad4MRigTTU3>JtXC^sX@^#%qJrsTkzx>5VT??Ngv8FP)af4l z=-qF=edlc1qIJ}w{J8$9MT<~7uwdZwt;$8^SLJ8r0=ws{lXzmx#-oDbMRpHV0h>J0 z4`X&I+#9DRn;-zj2U43wuN@fT0*ygQ1OpCd3R1*!3Ns0#rrZ_Klw!NBc~n_K$3M6A zsZ&aW@-^&D?S9F-FJkV`(S5S`y0yYl#fbT6OsrYCPe0WhPk+saa89FsOBAcj5 z4KACBF)UYap;5UUnxk>qQq)Qhfeo=@u_D+i66i48d~w1l_i~XxQbFs;=jbM%7q~bx zH6)~RrFYRf1);Td^Hy%hvv)%9*8DucmJN(pi3|C- zHuh|d85SlZC`-#G@VDWLyWwZjT(d3O#Gi!74mim%&d? z@D!cg<6YXUxD2ryPD&JqR03?Z#B)(PlG*;G4pu8`sc$Al$7j+)>9TY~YJg?Z$ru?K zot)ts;3`DnfGaU`cJ#97&CxtsNVev(K0CUZ6cam0t8JGs0Jgxu(9jr5N`HG4gWHRE zu_%3llP03D!jn*=Y?#MM%;6-60YNa7IVGLMY@AvVjrGRF3zii{Wnc}M_M+A)O|91x zarveyGf`^haf4!{k~g|vd@U`y`1vDys_IXF`c-Ywyx}Dm-yG(D_Hs=P?S1rHBzYDU zOF5IHhR%NexZLU9AniD|Lo(SD`*mQAwFn|2DWhW!Vm8Vm%(m%5aAek$kcV7K07a zzL|-IX2Ig)!;s~)Iqadf{uZacztOB23{MFuLpVpskfF}DAS@U~6L~|RF|~sop){+; z2?}8|9>_?1(5(D|M@ALZt)O|bX-enxvMKeuoipoti@+Mjov%3fFpR5(R8IH1Ejrnh z#g^GOBVJ>V2p1V;W3Jv6W$te?OX}F)U~E;ks3i*++$!RkO$nmLXq$jRfWxjl_}~MI z;i>hH%vJ7Z`Cp#d3K+`Drrb(+Hh#)jDcw2qPWJ<J@#88AdiIGTJqGbk+ z6VK5BH-QMPK2;Wp3S70+BFQIQby7fK^a%G>gvlWCf-y5r%jeTYUnn+axTp-9bKn5H zjCHg^iLGBwZw*rNWk-ES1vyRJuS6sidm$#>)*r5svUIdSPX~xv1>E;9^_QJ^k}Ov2 zuk}*Hp1{~&%j8ggRaa;^&C|?tK6ZPix-{tfe)L@iUUicP{kFA?6I#x%A>1diR z3(7KUk)A##hO&~ZW$w-H6Yg_v$?T@?7>6axZ_NeaMmnrgNN%$*iw#jFm0UruSTvO> zo8n->T#0J?3AJj4L`ISe1Z-*i_Js)rrhd4bR(?LU+kFF`pEV?VOL>%`=Tk4F$o-bi zp2|+Gn6m9u{T4yrn{L#JOMTUkz6n8o&YwlebEP>T+E z`~SvPe{q$7<4>kq8TB`&5`(HN#Yh?SLLNpGmAEl}n<-oGw87e;+>$F>LUN%k#Hv8- z!=Mm$Sfi*H0zY0TBy4Q-$I4Y|{`OPqQhuza9g(X$?Es-jc&J>m@=qwu-<30K@kF=H zN|ACuxM?G#YcGsx!!xP6`Qsw8&8BGB4bcuNWf2(XZp}H3q}7lxVLL>@c96|d)LpiN zkRlKxlL1ex7pR5|QL)}=JBUh80h($&Kt)FBCVX(W@vv^*qB$NVQSJyNG#L-Y(3Qe^ zK)?-p9PlMI!q>$gOLcax$yz3DhFK;wxv5%Y)l331mlQ<-rA1{Q7+_5aD`@gC5DE3A zsiD!dzcjl2E=@i|7qzl7BqXTLg=rJi(d#$-smc*R2fce$8fov3 zS_YyaHlsEx3F%HDhO{tn%y781LOp_pg9&U^FN!=aESwpblY+Mnl%eAQqL0FjS@*hRQV-8EcJ_8jQ-NEnN0o zs|jTqLLmorSz|!zKjTq?M+6v6@hAwu()`{;dFXIy^stzBIzE3S>Lys+r|4{x>K zHf&4!5dVG4$1GT%vpRcT*FN3vUH#CSU4U$nfJ`$HR6E?C7GlZL=rwM*fZ8NYhNfJz zRC88yS%XX#3M(8=lSz0#dy)8Z%I5w?;V|2B2@e`JYB4-TZYw28mDPV*5{>yrWIR)2 z5#bhI5zJ*?_rzn$8s#@?ymvgURB}!ftXb%$d2H6m_B2}gtfGGIDZ$5p4);_-MA7q{pF3nKD$bsfc`G?v&XY@JNh8KEi19Qu4s;<7TVqH>4TyRP z5qLw|#Fni?xjb@EW)?vltBhNX2aMb(G;A`0-Rm8@97i17;l{p^IULN$wPBWU8m`ki z@R)o$1eU2$4#Nr)Xf+Xr5?WORoDfg}@){#h!J7u)-ikk1Lr|@vN&Kms`XvrTvxg(# zgg+5}@^wr@!lm(>^R{z)$=&B4yS!y|Oqd)UJMUz9Vz?aLykuhy8$xrG{eqS2YYv}1 z^fK#F|Jtb+eqPI>u@n>;D%6X7PPeAVa>C<0c5(V-%QCKd`L;N2Ko68^fED$2C8 z@Ws4ml}G4vXP{=@J{JrE}nO9zH%TqtDsVZ5tmUs~Frqf{P*GRU4_k=UO5pBpx)TTan~tvh3- zro1rX*^O$8JXX^3&EB2IvIR}9Hfw}q{$U&0gXXRv2x@m>`x>dp(S)gW3Ua@P@L zv_Hc^ehP8vgCQGPZz<%|kEM>P*4OG;G$_EO|LumM!N5K-P_rS)u-9KRZ50~-rA1rig^mR6LNq`Fg4LheLh0?`1c%gL@zTBhVE zd*~oqxopzatCoR%*%}$op6!2Z{V{qkjh0ulz5|s5XA?pa&H`?f|2tR!b|(JM0BV4C z4gj3^4+XF+RijoYWO0wc>eaI27FsF!3iG_C&a=qhO%w4`C@;4!6-K@OhPlDaEC?7x zIdg-iDkh{hej>_flFWO}$8i^y+2H!k`G@{DLoj5e+K7MFTnFnV3WBqw9nw12A3Iv?6^U_ZKAM%BtIn60v3sgc*9 z4N$LRIjH5!hjvNwN9yfjBE)8SSiMmDC#4~dP&q}!rfL~EG(s3{tD;H2R}!Pd+=BZ%HEFRc1E5psxr6UxgtPSV>8Ha zg^~XLaAL}Ix~<8u!L4?-6hKF4vk8Lw!hyD-LdCAhB~GVHYt;`lnao0CqvRSN!nX>R z;Hs3Q(h-;qc7?F~L;y#N8`@feno@o2%9ag{L7ZoHJhrbw=}-4pXySP%#BfF{4|d&j z4}a%OZ#?Js$DG}OZyNY*nC4yF4>`!+UN%g0*>pCYnS?fXi1?A-@raz{EOeGTmpb=4 zk2^0rwN7UUFQbuKOUP6+ieA8V!IEnLC8U}fZA4XF5JuJV5yk{igwNEDsi`Qvw{{S= zSbUEqb1%~w%KXLbTAk;ex8C~X)f1W-%d&4OpHy6UqyM&N+y8K}g7(_+E^O32w7C>5 zEdrGBWTf8^yNg5yBC8{Lq#Ala{VyUDyH14A0U?DU6H%v;&}e zg*ert7d{FfouX`9Ts(H^(ABRq~+&IBuIf1(=*|-pD0cnMVg=@`KRC~Z= zFv<_?z1~v2QZbWRu^-%=T#eCObFTO?GOuxhRVKMMfZ44+$-;2A9N#8c;xf2n(8$ z4zm#+a*~k!5O6}*Cn5X<)rCqEp|5aTs(7o>C`5}>(SmG2{&A(eR(YScQ{LaNT%c{0 z3*RgAPKew=J*n7Oq1>i2zBptQyGy2NiN|kY@=h*p`-M*Ca!|I3F$JXwVJak?h!_ef zC*&Lwq7ik!2(RYXhMW8o#Mt6V3Yq0k6h@8%RD5Ls4JbYqua(SNzGk_7AqY2@2G@n4anMsI`%4)QU$0i)=+GtQ_Cq$~e>ZdF1O`TW2C`BBJLxpnK-x#ync zJ?~lQaQJM}3fD-$qUS1Wm1mW`i25O$fMxzxWD)ROA_Hy4bh0~QI~(ik>)Ap4V>vWw z^5jYMmm-Sji=(^=t>=W^J4@4)mcJ*v zS9U|%+_YtBvO6s!jn`*S&z_epa|B$01W3!4TyLoX|62e>k$2t++NB66{RNsAU|!?f zz7n{}%m6Nme_62v=Ut#DB{)Pz)uj10)DWBwJ_-Ep7wRwRpX&e6cd+wW)^Wt*+KwOB zf!$^Qx&D^jfBR_Xyx*O7)1|ZSIyKSqwmwCFQvZ`ajXlh+V3)JiCzrCzzySXnb{W9u z`|;mKcH@HLpa18*pN7sI_|(Jt%e3}ERF^G91#l4f)yjBYCbJ*e6<|eyxpexb9P#$I zG{OJKJZ872dR(9RK4Gb!$>7g=4oy+Ir8*$>wDUyZoRMN8cy?!^17|GWrjh0-m9w&^ ztp8-=&N-{5FYI&gqb&F5Tke=Lb&i}dbHO{ePF5|Bg(Wwg^PN77mGc1?*WNTPNW12B zX@;W8Zz8+o#ytBvzP5hjL^`1!c8^yE*?wG+cz<3m@FFhDcQ*E2Oi~@P{Jj)K8a4IX zVwB>iKxk5vl#!WGOu3%`i^u^h%QHT_&Ps3l?z`Lcx3PYuw|)C9J_BF%V>SAEeSHmk zB<=L4q#H$DB$$UR(pg*NjSyYo%Iu1}p%ls9ei~UQP-deJ6QaOKnTxoMfTffwB@eZo zlrxfFviU39+dHZiQR1jU(O;L)FGHS6=jgk!7BM!%?p{fi2+YC;Ur?55Ze$|3(ceN7W|mbMFgCDFcLr zjmhXF5Glf=r<}@hzjXx6DY?Ovf%+1@SSeb-hxbc-Vu2UAI}W>wemm(xqxw#1c(RE3FL< z3~@ZN((F*z>Ro+~{0Q%?;MGrx>Jm1dH^~i_*AbJrBwi8;#-te#D0^ALYTl1cNWR5@&)xj*5-&)CO?Pec(6tU+-Rd^Q6xg_($&j?VNbpzqdf zxgq)O_J;EsMqOHS+k%;QRm^W>*Yl=HZ_d1~Y{rPOlkb>v!9DZu!G0Udm&rAlRi<=z ze59lWjzlt8Mh4e3tLuoDXYrA|4!s|ip?8B7n46Ro$y#rn4%DLLJ$6!KUux`0jWuY@ zsih&>t2rVx=Sb+Y)K46r5tQf!d}*^B#xYw6WlxhQQ^WM8$hE^m;Ctprh@6=#GLF(* zs_g8N_zm*MnjiZ8f-yHucxXjc#q6spYcFV+d*eLiyn+?C&MW@y`HSaFncGk?cEXJl z!8_6;qpe!ev;xH>`~fd8<7R0zl+004lZ8TjnBBQ--NeL))0<`~|5^BC=;=k!SuOCv zqwruu(s^;sVX0h(KxM=$`71jteX6Jk_)!3saEI;4(1@r28PW#uT%?T1w&{q7~2 zska=*=|Xl*9#FhFmSPK{6+z05IM^CVbHp9qDu=_1`eQ!I16?Z>AzMdlOd1smRrq-V zm>`N!a|x&J(*~AgD}!deJL|m5^a+Q)hQ#p%jI*RjD4xcCKVBCu}pvu0QnZ5ix3NJR6|M%29=NnmYu^kWfwiXY1|!UP5Q?< zdd3av_~8Z3Gx8eW!JQj$%Drc40d_7s?sYi$5#T?ZF8;Af13_- zR;o|c|8ncM-`>gwa*uvp4f-LDuhFkgJN+p-N`;{{0WjwU(ro=tc%%8~F(IKE@LX1w zyb=-j0-5`P5<2c@NBnG_pUw3%4Vg|LW!MpkHA`qH!Qyz+KA{du%@3tgd1)y|hxefD zF-L#U7=0&3mu_XX$*X`y>c_Pdeq3WmG`3G;yMXS~5dH+1;aN9Z2B5fko38huTbQ?* zK)1nNLb%I2me=tNx9K~LyObKwP=jX}9PcIbe0qj_b|jz8%V!$=3HG0C_E|Q2KbyUl z&6G}YrV*YqrPwKbB!==5FEscv0%C_0mm4bJObMIPsdsM~etz4VRB-HMCBQYoy zn-uhq>K|c|R06&;BSHbb5Rw|wnr1SDB2cu0k0IVjf@DT4sNRW;MR*{CBBotTr$Q-T zhYeV8<;?kG9=iL5mxq4%_MsEkFT4GGGpdny`$J={y7jsVH`gwow)=@isOr1EHh`7E z9#uA3XYhQW2x#3eY=0h|Kte6tYstrX{(K8^#nwDlSSsQFGh9Ct9Xm%$fw+^0@C%c? z{$IPAU5tAeSJUuP=uBgmcFcv1hV|EFE_#a=J;UgQ9oM6;7L6Wlm3hFDFJt!YEg|za zX!>2|ub981LOADu4J%dV->0&}sVtGo;;Af_$~5DR8Kd}9jQf_FLJavT#k2j0$*akAwr}&Jhco2uMCY<>QE@S42_xE5ocviCu;R9HYuc_ zpht7pIo4`>fxLGx&7nCw0ehV)d+I=IC8{7P#Y=cls^La}o?h9BpM=a)y4meiIulPtkKJ9mGCvyS;6iDB3eCve+B^Nxa?pI0I4lhSkZ( zTB_2W3BdkZS?_R;>J&nNYec1(kQiWKgv1~w2#H}XOFl5RO)Xv*le$Pu&+BJO<%SF} zKYpRW<^8p*Au+k;)ik`*f2OfX0*tv#L(XQj6p15-uR*|=gb0b4OXvGP3ODylG5fR3 z*+1BI_A7hNzUTGm(bi@5cl4NjkE>71yI367WA?_?G<#z%({S2!ojvb9dsE`5D$oW0 zygGiCCWXe?qaJ^)0%0UY3|wHyDN8U6{T1(cN7IF}tcF^kian=+1P zS&CqarUg=iN;soVl3jMi%6x%vodW?V5o|+>jA-yMLjrn|&Y~|7cPuQ-qrfc1z(u@P zM0#XKVeueiINAKZ0jzZV;#s%x@V4KMUp!|9Dv@5kfy;?|^k?~^y9-<*1W!z z)${MBuBzGdlHNk4N$e4M*u-SZ`#imE*I0%!rT6H}5)~Ul&IX%uwq3{>!yyEXwH;@i z+Ep%lUO!VBVMqeQ8OKI={l9i~mpxCz%aUeKA?>`bu~{^7Ax=9y2tW|<1x7HP z*x_Z14vf2c_OB6TZ(qK70vYy^)$;+{YJKoM2 zuAVtY!f6F;04`7Du$eh|;&Pa+hPBmWbapXdHFP}N-vie+Bn0uCS7nm_!?_)XC@q5xVmn7xJK_!sygDy+-X7S#ve{a^W_vzp4 zluu)hjUB(s67V5$0?Tw$naA=9jTJDihofM6|dg4DJG^mG-MtkHn70c&x84A?4=U(t`aH;b=4?T;PxsE{lz@Je1%lho6WK zFqe(PO#sWmBT4o%z9Uk8ozVqK6ZwI&4U-8?p=E=VPs_{87D*yt&V_kBu#`KNTtB63 z!Tm3tQ?qXICS8-~O>MaI`bD2ioch{~rKtHhpPf6Za#(H46^G)3Mo-}@1^#^VX8m6u z-Mnkdh;`M+jy=7GaOq3eKdOIc_3gW~?vD>Z3g8GA|Uubz3cmPo#G)6Zi z)TnK!P;IS~mI9;jbb&*M9VXe&S#Mvwyq(4;EX~ z2)-NTAAekZ%Yw@M*%dYS-h7+>vEC20w68Je=U*}9LA^=e^yX{x$?9*ynbA*ZjZyTb z*l5*Qdmq-`;!m@BySyiX#1wJ~5u>;IcOjQSA(w|>vB~ydxq2r23Gx+FarvlVjRbdV`+r+f&FedL)GFx& zBE_5F)l<~x()ge_p+-=^?D4A!LX?1(X;T)Wi))dGb3H3*ac#P`Tx-&rwc`}uwJFa^ zwx{jLa2);UsP+~-dow2FE1_sEVH`U$afm2e%+L9?fK%%myChSqX<5m?0ZD+9C;*9sj?YYe*BD78?pEx|?i7p(AWI9JOrLhy`PoFvr*P zZonst^?!dke-2xDLG9!B@iIUdCOm#$_U&U9JjQG&tLRsTPaoBFFrK{6dh_MEd*5qe zJCZZ9_rJHEWD=)DKOtub!jYUkFZQ@!XpiSxLVi{2B4<6XCjAFFV|c@Cv_JG~*NQWh zuBD-EDQ6g0=ni8R)8J zQWN-M#CnMhZb-aWYSs6ab{Nz#r!NpvJe88S*$W(@&l;cujI;%zi#u?|D8xaDl2MwpYN6cPi7Q@9Kx3Pv4}4Hx(}^&~ zI}lGJp5AzxX~Oz|Zw-ylnJpvRc%IIuCrfpc`U>`$h$XR0um-}P^9g^hy~V3_i{A*# z!P0>1h3&*H`PtaRy)b-APHs2LNsMiccaB}to;1cr5CMBQ*%Is?OESj(u5;{~AC0l| zFm|}0yH%F0mnl0eFLjPRiY)^T=95d=LUaCCNJWg77tqs0kMGq-|QFc-hk z3zdEI`m*VLS$$s?>&vvhOrrn$cKoXO+k#z4So9QSu>vIWgXv$S!=nF7IBEu43>#1& zk%UZJ26HmfMO6$!hfk)1Mo1XH^C$SvE)$}AP8)=R# zc4=z(3?oxq(inM!w5H3*w`3XcwpjfF@$N>n|BR74 z;{qSIq-auC%VcExHz z#NdSd(*yX_sJnX`;}GvP(BPUX0pP8qRYpR~qfJpBEy(UHze~|Zh5$8lWhErAKg3XJ z4*Xm$YVlGH9|%@0m3^4TE~V^?ivi`Qv~!QLJYac%<_VRAka9j#dEUNTTrPbfo89{8 zqa8(h4=)0GI|gvI!gAOCF88s;t*whs6|yf1{=QYeNxyvqt+$BjiJjhCtl=e+L#!e5 z;I(gU|DnZ&{6rV26n3WA>B4qomjsR77{zW3L{hrhvPQg+cDgZkjg(=&dGdEHzU~n~ zV`SRr#>h3M1xpr&H@&+BYmA)QIr2!v7E6VkP;|=a2N&5d=1kIRj64z=fNU31evyo5 z_c=o!fIX{otZQku5J`-aAj7M5FbV~0rL4}#%@$W_aEgTh@`%ov;AoH2HdbT!6C`v69IP0M6bCYzRN+~Kso z5%;jr8#L}w12Y}>I7i&$-!12!=^mtUjC+ifZZzkgfqTr%O8e#fg~l<)9*J#?z-Dj^4Ax1EBWR;hdiECL}uz;N(P51B)~MMAHg5keH|?=1auH;gc1w} zXb=&*VgXnRK!J2B5g&mBb4v*b(MW7!jg7=B)+jGYZc%ps8-G@w+D{B)O23u*lEW0F zROpGI_XJWEaJ9*(=FiPrajh8DZTOu6h^pB4#Nn3HqSx4OL-|rMgImQ?b{e zUaW?y*nluG5v~;efen1R0yE=ujjX(_vp5!eh1i#g;SlI2>4JD~UdtS{&MbwkMUOae zIuMT!NH##PiK;@Mwcg?Zf0e-wLjYuPbNChFC!RwRr=(d#;CcL^`U$Kzr zBqC4rV`+-#l&&Mpt4J@wF8~uN1lk9Y?uETt%!8ExWov1bF=q^V5gkD!q>ICuu~=p} zq~zsC6Or7s^csJrHx%+%F114r%8QSQiq9_7A&iZg0~7%Saw`$hDjh&bPw@$501fzr zuK++M`X(!&Vq*Ro{hM3~EK1=Olhz+(OE*3|+bk;rD(Nep`#`i-5 zgIgAI@*9#SUIySWWZw$~ytVz67Pr<-Qy#?Cs4~XYB=xu&z8S9eiLtk|c>Rt}d4^OS z#A{drsq#p-{~v3Tt%~IrMagc-ULlF1WK@nzRFdo@aWu3j6;Dz0oFGorybBSRQ z3I(+hBO6a4raw1O=K~#}??a7}Y(t!Fv2C?YcGyyFeQokgzKE~nvW16u9{4crlG|%_ z)*`|J6bl3DA4D+#pi9gF3^rN=+YoY*ohHqqNU@02g}|L-w+g~oqXtWx)2O`mn%4N* zYuP?z#-Lizlmv-b=9yFd#&%X%qc`%|?3L&wi<;6ka1sxu&xMaK>;JBhi5Pz0X*M~9)ff-BA6zD zL2zl!hg2ND{*r%z!2tE~JTZROGQK!;XR_{wS!ld=Vx25OE zxVoRP)y2rX>@#t7K*;)MEj`EXK5I}88}SK-Skx3^sXq5f&>gQMnNQ!DE6!HcQS7P}=yy849_n^Z^4br`SYeH7HP{T8i{1<||S;#@0YayBi6nIKf1bVkzrm zkYPIf&%CvL;?PS*?LWfrz;a z@0sjUmEvl&A2#QGC)$a zd(6ED{(@a9d`FS%L%2ugHNh-n-klM_9@zqRDQ;q14@ZfQ4rUp1H?D_`7KC*{y}<(D zYO0R+0kTCy?+Y!csV*rqTC4i%ra;)AL)j8ABWV(SE*&hGZGooGv;M9XY9(@mto7uu;f*Q zJ5!p4Ih6v@n*xGQi!afqPg;Q^E8((f22|%v>h2Ln4)#p9ji&@`zCAhL18|a>eEcTASBAb>*1H7Oc^$=EGX1kB zH`dmSEZ8;|dQQ(tkX1GsutDRnN?3YuLtj|F-`<9$;tddr4{qYpKx8 zbYzlcL6I3m&eDWzecTdC;hhm~$Q?`xC0BUKmyz5`GtHE?dEpslMO-v(_9MpVcl zjSbx_d@2rNNSt6V_ZV5)j;p~gfvag`E3yknLrM9FG?03(^D!{JWdU~QLr1MYLsg4g zbp$zo`6STR|1h2eK@i+;T}wvj*PcXZYuFN`thcs*+!BRb+vQ9W+^Axm2Ab$1;3{LTcOo7n?6mfuJ1@fv#Faf~ zIzq}aXW9!h&FcN@GacbD)&Q9R=IC+lB)og@yJ8OTCP-2~5$Cc)oZ^RDY|d_e>=IlJ z1`b?(xp6g4NMTDrPV#~gtGmmam;`yfR=hVF*$Sy3{3%^crnaQnx{r+guL%p2MkcEj zR)uM`j%o>}m_w5|OJZc4;9f+2OF$=NvA}M0c|2U&=VI$zY=sN7k1o(Yx0w}-cIl?$-7d|W+MTDwoTw<}H`z*Xyo>rMI@miXOQ@t6(P@*mM1*cSsY}+aJ564~ zO7wS30+Unso-i3qPTYHHH_4fS)uPA^9BsU_(Gv34;+=_Tv1X#F%vg-b6N<=<$mg-q z@EPg-Qtpoh%xs={tr%IH(Z+S= zE(H=mT#w`rXCwU?J@0_X5?GeaJ0P-zSOCS-(>n2VfXpg4AOdcj^y2=?|~{G*S;U>5(gD7n|}zr=y$^Ih`xUwZ_PV8a$U~4WxS~ zQrJ!Fjk0iK?9TN*t;^06zcEuPGxr6>+|F@yk2P=B=ExM!^r42m>scs977=(cG8<{+X9#`si)(w#xr_d#ktM(5mm{B^bEgMI zBfGm^i<+Qo5#K!tM50?lS88kJNL!t4Cpr_h+blMlCkAEc1TkiHHUO5Ee6r8fJ8vJqrRBXjmbPW-^|uo$aL034GU*^9T|Zff4}EmJZ#XlI21XvM z#~RC!8r&X_mW5IkcP!g2-KJ$^*rPKdL92a+mH5I;E-K398Ll-{CERM^aTBOtlxLoH zdeHtNw-lAbn2o4R2VrDMh(Ege`I3qugU6S@`s(WEue!EqMZLp5G zM2;Dk=z29IKMh>Jo7HSs9)$=_Ic3?RKEz=Ko(H^txhC`+Y2m z%jK{?g#MI1m&g8q*HQ;y3+{vQMRnil`=FM|ihBu?VcbgI9L-sp`%3Q8A$N?Pf9I96 zR5c_2u@NI4yD)9w!g*A~%qs9a3&E%+OXtT^vOp9~^)%?xW|=O{Gn<=OL?q4!#vw=o zTjqs&jJr~Re3h`qTR;Tsv^;2E=9TL}C5)#;#U0({G&6|`?^7-ja8r4O zz3CqPpS~NdBd5IJuzWXnB@2x>dk#HWn?*w1;xS;(hQ|kdt}=BY|B$mioITCiV$Q(Y zbpZz?tlAR><{xuS#u+=c;?rA<;*dJMnGb?>i;#XyHc_jutY6dc-GPYP`D?Ov>cP z6207je17$$S+gcxH-GNrv&*L=38Eaj^_pvLomqGF?X2qD(womMEv18zJ%+3m;@F7t zqYp_x>v`5otmrO>%2SlBI`1AS%7In%>an@+i=l>sUY4kqrMD%b^(Zno$tKXRn{F-_ zRbP7h?{+NMIQIhmm760sPF#M&oSW-$FGw5?^=UfX-L za){Xm557e|Q3f-MOF!X`2e6k>k}X{r@9Xg{a*A5#xJ>PtWJlvjXPvVF>8!&}w2AXM z_yeBK3TTrLL=>h%KsBlg4VbmidFIETCMMpP-gGUScKPLRPH(!-ER9Z^xrGX(vATzq zL;M@u-;Tb~n$2#P7b=QVe!z>~LnLHy}pQH(EaSUlGNaW!+H)mw5vi`{LS z$KvWcadjh&&1RZoKVn>+L07K=WS1KUC|xc63;o1Ss4cjf1XX31y#cPV%`$l}wJm8` zMkY|y8wB3UK4&&`M?z$Jp-VA42{ z^ca1NJJ1C};pl}Riv}_X(-l$+zIRcC$wCB@P>PcCQ60MBXWZ=v}CQG*0p<6=v(Xui$rh;%G{B3rG#8#t&#bDMHyqY9?bdC78Fb^;hR)P~9 z4I}fF##1k|SGU)dBJbr8F?hr2um52E3;F2JZ1^WkiC>RRzD`0Amvqmom z#E8oCo69Em3eoA^i`hokMR}*ifATy0I!10bM#9J?V&nz*x(YvXg1`UrLH2|8FYG_t z74&_y+9P&8$a>KFh4p8vl25F`5dxq7o&S@c;7XMDK_@w&u+cn56~vZEzJv5cLKQHV zWNRH77;^=^qKxcFSvkEf?P!^xc_Nk~Pzh7Hl#yf;JGi8j5v(g4ZBRlBU`pL2&q<>7 zfIgX@Sj8vtycO${-zAOpIlWsEDaBKf^rA-&o(j4huOJO4Lkqm&5U}k9QcL_wY7&?q zknP^6w~rUifCRm%s)Ut%MGkXkZ9(YIF1E(SmLL#%3xc3=1VQUv@&Ff8K@gH^bNQ-L zYrQZj%hd*OVeAyGKy7NDT-pk~@YCE{UqQIuop2m+94D$^XCO1Hp4W$_hvuP@Fa*&C z*%Z}+p$vyUz+3=D3T#lQ05q`d*2NMDY-(>6qB1cw3eE*bBl0a&f>RPO%+W$&nubeC zcyL3}fuE9}MK`2;e(J{qMH{S-Y}w z9@xI=z4tb47sw98s;H+DRt>HVqzqo4=&_V0-4?G2S!(@%hDHwitCy%)6qQ*LXgkga zsKZpWrdFNNTCX5~ZGbCckup5B%biWmUCw<(cYV9R(a-($0I66~11_0ztVk>2QA^M) z*^p9^&NQDKhy;P6($rjl`+Q(nB2W`b2ngh4@H;Cj|M{tJgZBBcP4_;eZSXy`_-Xl| ze)Ob%0o!p34G%xnv%h#yu5H_}{0sIeJ6iX<{faNxcJ?3L1TRYSax5R%h`1f z6g?gyY_A8zwU;FkJ$?bUmoUG6Y{|}(Tofb)Wd{Al>lM!^GN*aqTFTL+^h)Pk>oVB> zgn*RhL^zZcv4UUDWfBF*|d3iwD(Kp%Z zH9FI5r8!fzP683cfRrzdivOl>76@&;M4)BQuJZX(D&^im>Q~VWD+{GM85yd_o)%67 z)o`x_VCAB28N@@#=TbSmz+`}sip&o}vBK#T9jtQ-u=^;qN8DJbgO5@B24cn{7Z~Yk z)b7vu@$cJiYyEED8{a&h{GxFU%iFPbV#R%rFM^XOpQrZMAAaXpdGbqjz(0Qc+m?G; zld}@6@PR+BTKT(~ck8f1!NQK`Af{-1# z_sR(1h(>P2K5ZrC3(cc>RIec6w&R=NaRMQl&@P22!lkgsY5w(S5m<$B=pf<4eYpeB z#Hxc5oPT=L&g=9+yMCzbrKVOLd%0Zyr~d6;7{Ei5&CfhAWhb9XYfcfv3aoh)wZbFg zXBV1%V%-%|D(5NLRo3tQ?NA@R(yOGDBPqvIWK?dX%9gA|DC|hsOv+D_fj|^VP(5&2 z8Ri4^-i6(hvgHxH0MJeLD%JAvnOJ(>Pj78{^VRA4{_QD5mynkACynOPt@9J;;q#yrzKfCp;YxP}?PqK;sq16`q6Ox_pUNM9d8iaxR$IJ~q9 z&vGORGt<$oD%=$2;ne{)JFq0cZw#>N02>lu{Q``zbNqT6tG2NrHrCI^0yfFLl|fac zL-m*`O_0ua{-K>wLd7gc3lT&hRtvC`XNM52?4V!GrHw$WtC|E1MD}61LcdjieZ#S* z9-g&v*5p@@y~i$KcN|=-f2szT>PPgM?b7P`1)0}1JhWlm)n(^r$JB4%7@))X~5s1mHT5Kmt%@;((xcy27Xk3S@Kz0{&;xSFo6Z z%Cm&@G5V(H4W}x}CRzb$F0<^0h3`YmV`RM8HN?-<-f&Z@?5WOzz#wpnq8}NP10(<9 z{L0B!Ia!gj9c>YuVK15>hda8QkkaUIS_mrz+Xb4?Vgf=?3COZJLP{$7ULnNf=hq(l z=Cg0V_%0dXCDw(@mMr9j$#>Md@4K%t2tJ~l*l*cQ`T~7LGJwv-@N!S;H|k&Mr(jrv z!;MY>=n5y06+s5=h5}eu1AI@nMW&KOp_fa#W=lF0F;;|M{mS&~Cfuz*qzrF6xjRJd zCuEgu#e=X6fW(E>b5dA6=V4u%5LL>OE{m7ASz3xG%_I4P{$75rVsWywtWpfVc9B#L zm5>#xN@HovEjzS?KNPkkkP{Nh6~rPFK*UGDTWXC-nG>>IVBez6YCghi)>6`|5n>lH zkTL~wJ_!1(phr1TnlrX~aR2j%-gjRGtH?V)BdhrQ(U+$;UKCY(U8?^_8PYVR%5}f5 zaerGMIwBI6vvvP-=e=8!FLg>0u?C`-8dw9D+nNvziwN4lX1q~W42{ z4LsPiVPC~^Ozt8S$od7TcV>_WDabs_&aSsJGUc+1R^6?h(p*sI1!9K#8yM)^un=hDA+-;k>Mrf<|tHS zD<9gk_t1zX&#xl|bj?fp@Q3d{xbns3DMNYNMThi5TV9iIU;M;FI=-Sfr zGlsF+Mf>}@hJ19($`aS=)lExooWd7kSx0FDvje#T!JmFj_xW`XqbYp0L;36~x_l#UqGd*?o3jDD2X# zj)dE54+T2W8c=G;$N-q-!e_Kn22djhy=BNLC>?~SrVXw%4Y=$d`i-rh z+^gjOt@bx7m!|@Sv!g8`HxEy&N!H9Ut)(G2_cSQa(s)+5$`z-yK!%S|@1$mAgM3mT z;SER8WKc9sqJj=#+7bM-kSqqHmYLcMn#Il7xTYDz#;9s*@`HCz{Cx1qo2NgueBNEl z^)D)(xk~?72_RtFUw;QJmCZ+Y(;(GTw=D> zDx0k?QTdH3t5(?%m7S%cIb@2;X9X7r`Sl&rcUBNwE;dPTc;cFKhoo1s7kKRu zF#cC>y5g9$5)23pC^ zX~4Ebr~}p{Od9|Ovm=IR(@JUz`U7p&+mC##^THuR-h6WPeM=rthOgQEmRy;{HjM`(Rp+@_5z_wn$_2z7oV}pk zU&ZPWv+#SlJE3{4P}DRx^d>JZqXVPZqz&wKCsF=OsQ7o%4f zVOq&`2ZN2dK#k8B4&wo*H&$f`=7a3Z^NtETBBA@@=x|eIM@dp{&|I7Kh5!8d+vJZ){Z;*M z*ppluj$?fRBq#)I>iGCM;jy7fAzq1^ooc6w^F5^kXhUD7y*eQ;2Pr5qbyh=;f?iiRk*U~I(mk|)I<<}hDxTqoZ(l9KF_cW3+ZPa z2Ykow>#RON$LH5rj9WF(0Is{|@2zyCBauFC16=)V8-^luvZz|Ud9{Y0t zCZ?(WlQYuUs*^0DfAe#4PpY2JKCr>HE#O(n@AX>EvW)goE%woE*hg8?#<(^%@Oj{; zfLs}11F2=XpVAB0c>Ps~r4RI1z|{&xPd}l}mZ?>HDF7eJz7kbR|CMBvzLa51fUodN zh%uM3YDAai>i(JW%tYq$%%;p;nfo%WMVTxU#WPWlB@syv`;84szC9jMJTp!oH1Wc# zHFpLc8`fOMH);DqnHg~@^3?!&#%f@zfB^%kcAvKUk5iUhc<~=bmM@vQVXA)kg<@y% z3;JP{RGrlK=?(Y&juq?g-gPg#pB2MQ*Y&?9|Gs%M@6CRL$cIceFL7QNkq>FwizZGD zP$iz>I&M%EOzDQ^ zH)1OkhcRL+V+xEo3<1!x{9$8c6R#-Vq=?z9#EV7b2mxloa8WGi>{zeNP#2_{$i9fU zl*ln+*1yV|iFX=-oS;!QwAY+gzoyND(2{vkjbj~T;I+$n-Ky$;y>7JiY; zwfNVs*Bf5hy!bT{dlh*;!t$`Iuv&y=@2DdutnWE0LX92kg{K#+8i;2{q+#&_YaAIA zoNYjvMJ0Khu0u4WE}$uvI;$ga8ro%Km_)r78CuXv2u7MsiDQUON``>| zmu&>UEx9nlcHQ;*1Mll^>nML?13!AAX3fJdos?_VB|kl9-4J#Uo62h0gJU19`7f+S zfwKl~TP55!zCMbhi0XDb@=W4u@uvT;F-uf<%KOC#{UFN-#^TL`{~6Xt49xLYFdO`T zc8A0+mnP$JZxmdXh8L_85S{Zt?a z2wgQ+r)i#OeZ()}<=d~aD#mibc)|OpJW6M05PJ3K3x>@PK#K_MBCx0eGEqtf;XwZ& zkn_W|qR4f?44ARogTcAe2ltNk$}Nqei~SD;Y{zGR-@Bn7bKUz$(Be;il-&Kg|9(%! zR{5&`Nv_f-?)%8%KXo5-ExH3wbOHRSW<0MSl^qwx^PGWN8@>}hGNvOdpk~llcsORS`?4$X;%NIZ?!tUj5@A+0YFI zAAb7T-}@Nzqw0A}YjPDbjjIm6z3-zYF)!>u%z}2HUz+-Y=9`Cen>OGOY(Sq+0-C&? zDr1hgj=Ok;Ypjb8B5N$5)>?vocb(T!*I$ZDO-MiOL$UzOBcQFK7&?K4s>^ck_YTFkxmsEH6I==23n z&*{B|t0`C1Y@Ldo_%dD12X-R336ZO2qCQ<`^E}M@k?&B5>^UKE5Z0bM^ zKt3oJ*nsyy%wQ$)4DUGE7P7c%CCMP*4b)jIE~Spit}djzA~OijhWe2ktt60#2r9XG z_&_1vMtOM>XecBm{(DM4fPBj6r`|XvQj#Q`kbs4=FcZ3SCsqMp3mrx~m2?j+vYp^P2_4q)UQ%nEXT z#0E|wR$1!`DW@!2okI?|)H+$RfJqoKLwQ6LDB+ooY9CQh(txP~2SAnaBsQX92YL<& z!5`=kV1@B1x=JXoKi9Ud`R4i0xOif>zTgMn6W7yP>vMU*H~g>cij&8cY5Gse>)*!H zbwdb{ZpG6R*M(1dJ3eBv{dRjJ+GqmqFwkCMAB&D!7E6VDtotXoJjjg*RR}t{c8YX| zbPoUxh#;3q)zXy^eMOn6Np%0R~WJPyAVlmS>5Dq=LlP6(JFOj1%bxMTwT#V5rS zlEU^9E;bYy!niPK3Oq=`=!PE;q02K^?B!WG{GYdmyDMqCuOsnk9bVQD|U?8c7Ae{frJN3`> zQ$O6l^-Xq-K66KN%bwrQ-1g*zSDEF#ub+pyjk+!MJ^bLD$#>40v+|8U zE+$~Cv>5M|a_gFO^^1}h?;?(R(o*k5UT)R!**VtXR_?Z2qt-rvhB;1&hK}~Be$^ZH z<|9L4AyGB9o`!s)+e!!ks-H8f@h50HVr*nOKtK_#Lfx(v?afH31#(&L;%BT@m0xgR zli!wn=^%(~kK8wXrP6Y8xE%dG3!SPUeI{loG}LBOLz$HzVC;qTw6}zPQfJiwo=E6F zJds1XD_(E0QX%XC>oMz(R@K2%d0+kk|B9dFY60S@R-7r4Qm;%`<|&HO$9Bl}rR~2q zwE5KR_CD+o`;z?^QQ3Wz)SpXx0FWf99?5JC4bg2DSux=mWq5UiNvO~+hGY&;4Dk{H z^lTwkLc)}1K1^=gwv7*bH~BWn4o(@8C>(e|H?@B$-hx+Zh-bUW!g<=we(V0d`%$-C z>z?N3{oLodFLBFGcbc1ek{pj~&>%_*{=pM?G{gf)iGiWY^W()c{Qr zmK|vfdIMjR-V{G)H;JD&cYIA44E)@*?(qBh;_FHHxq(Fjx&dn= zPAI(3h_o(lx4BcGT7V=1LiTwq6Mc+d34g~Lv<5x@jIip+l@UHT!umv5YJ}NAxC4*i z3oonij`i|EUX~A6&&yPFY0UDB^wfD8Jc`GYa$S%O41ONuc|nH3Wyw)zvjjXsmdPka zbQ?i!kzNz4MhIqdknjg8=e(j6z|Rov77E4Fv6uq~L9HN+87V=V^Jaa_h&wj_Zp!e> zCfqb@VDh`avbips&CUzhU`y+%lUts;I`hSTlkZf@+YUy<(cq183RcF%ao~iRAy%d* zVu4O$>VV)x^gBmB1$V!8F5n2f$000}YOoo3ovJ zwrxlr>Ntey9hmPBf{$*Z9>|v;N4@ADC957+oq{I4R)fo$l-bdabDr!kqwfurEV+u{ zR)Bzgx%cx^V}op!wx4bC zY|9c0x7AW#>e^i}q=@>1o)usqrm_Ak&Jq9tH^DP-s2nT`bqs<}k892bDweEUdTGM+ zz*%7Yr;KgO-SFdvpEejq2N@nTlk>2;bmDoXx$!HnbFr~5Hpo@!`q?GVwlA@Ba6Q?P zzC9)J5 zW6TpqARX%+lkI#(9TQI5+}sw!gn!!5xFCM z>|qX|9;_rcRW`c2T1h0}#`z=CBCwoGAr~sZ za7FwTyi>!)5LO(S_9utqKBf*5^E)@LxyOC4wj;Y9z(nZPlP=W=d@A{AJ~SPg%~5b+ znI#OyWX?2==50I06h7EPQvfWh%l$F*FxcB8gfBZhUO3mcjJn!71KPNDxMfk6OZ4l9tUHHitosBfIhU@+{0KSZN0joW zcwzb}G$}6vmrWf&ip#)(L7410I18P%y?QsDSq>5~qSvm0OAj`ZPR<>I0Ge ziX|I1EcZXVW!ILiuk3i9-8`+Xe(DVqCvsmxBBDy z`XBIIWspd}&?^CS`Ob1GX$pFIwajM5f-n& zN9?`yfPp|56(~3%>ivG^uD8k#(lUsd(ry)nClphM6(Hbk#Dai;1Mm(oED8QZCbprHHzFznMa$M_yffG4y7i-N2Tp$c;mM&>?){6t z8#INE<8{xexlI?If9P);_5U72(&rO4g_Xb0V$IXa+FnThhuzL@m`k~L+HGS*4kd`_ z{kRuImvneN`{HLKG!0N$h0__WkTX4DWdE02n}7gNtWqek6Xbgyb_aRZVRypj1vdzI zK_EmV^`^y48xaeRZ;|5zBoBG`P+eGK<`YQ}1<6vDx9FBb z`b%u6e%Cx+Va+}K(31#Je*HR&>j$gGEY*kVPqvIlNR{Pc_8!dMLe-1BGD~id=gP}u zMI5yNY)i;uCloKo)UdurIuDa0_nl5w#glxym47WawYC zp5h7UH-c~iSu~~CM_{2rCUKsf`w5PUV26`cpxUX@C^M0r0I*oRHFmGhsU|?3rpTB; zckfV~@TPkRlLc`cLm272$4-+vJVRNT(wvMoj|BjoCmzvs$g1)f$-WyWQ95lW8$qXbll2J^q5GHRAXY z-_okwu#}I-{T`3s?n0MQiFuqLf`b9m-G?Ld)+E4f?XJyp)HMU(;W>1O1Zw#b106tJ;BE zilH{A#Osd5Y;uL$onGP3u}P8o$n?m(h!Tlpd$STdV{CDZ-4SE4m?ISgCSgkT5m7a~ zVG(7aUD9z}&^E#T7w9F*zMXDlX$^_Mm~nW3y$%eMCpX=Ha?!KpO@Djm<6WQr>&Vx0 zTeh(@ea}kXIP&r}RVzk3^!sgP&hz&^yy>D1V_RNajafHA7WzRJoYLK*zT$brKahVq z5(k*D%3=@Mp=$9hIfgGCGJ1Y3v9YPPSvGz*XOs9$;HmGC;bSmatq|st#ocIUli)jZ zsX}r#<6N}KPOr)xBXJHPCAuDHB zU;w%vGH(Z1jxj}cIk*Z9cpR}0I%_;&lDAqM93@$uSda)@O`I^u)4vPuFXXw2?#qN! z2>F8!>Fd$F+ucC`7Jul^>=z+-%gR?CZ8-0ye{|lE^Yt=X9ZEhKw8A@+x z?8P_8&3ZsDaxGBVhwMMN8N4f{7Auh=ZI7p=*s5$iVy(3DuWTm)CbFRiO)J?8xpff7x9W!d7x8%gHs%(BPLghN8;Zw+*@yQp zJ|#R5DJ2T}O_)HQ7FGY`0$D}s9cWJ~AP$r$wGZ2(VfTT?1GaM?-hVLK-_qaV{BW;4 zKkeGf@{_Z8WAtd-TnNSVOLn?97v%5ZE&+&Ta$-danzQvH&3;)FZGDngFW##9rw3Nec~wq zb}3CTfb1PyVb%mXB$8lsLJR1?r58^}F1WztrMQbS{8X#lOfd$*_Akz@ONA#b7|X@i zI8yPnhz|)$V=a>`90g~VYL8PzY_*zd zr=z~=glONq%f8QUDYBOXzUqTLN!t74Dmd`%okzYOw?H$83Jw z4@QFnxJVZ&pjAT@J1yN#IV7AqPG^cppw8LHYTkM3&#kR5yw$V~;yCG<8SDTFlzj^KD&>$%_h8!zsETR{_DO$B+>)@+ zcClI{Tf!1TG{UegC^-d_L2O{^ElO@ITzfM&=1fp%;QF^ z>{dLnSGqA?ZLL;SrE@ZFx80l!A(HqmSTSNgzQL{&$OTqwX#GuaG;04%IaOD}j;vY8 z<1l9=xGv0DeFT+)uIe-`3O}1u>j(~En*7NV>t;UQHumuo>oCE#1Ukfs3RoSfWQunE{s@lBOa&WKhFepD{n=`Q!&m_5Q3A@*oP_czgdtRg+jjqX* zw_^x6kiSGR2o%AP{mzH4W=;Bd^qO16vliSNxIwy z2?d)!C;kIY%oq>N=VW<=HiNjPaAW63ZP z!{I@{(~wxPvQ{81I@YUm3V07FYN1|O@FSoc)2_`@wH)Dy z6qNkUBh465M-^FjTKQrtyMywuUU?L_lUbE_OVz8q{&g&v{Ck{_G8FhOT~|fpb*af#6m$0?;x#Fr9j%tAie<0$Fq}seu~)m|b{{~G>R=S?ab7Lr6Pfsy%o00Qwpe9% zkRM{#;2nnv2()J-%SJr4Q(vw> z$1a^bVf?fyu+GE@htuvu{qH&~u!naa{osvVM?QX&-ac6#4!nJ6bVUApaT{!)KAfd+ zSiK-8SKN+DqhcX+xpX_Cu(BlbWwB@J)Exxjt&|ix8fArBU5P+I@+Kf3=D|oI2_I#Z zj#5?`i3y1rVNvLHrooak9iD7okrOo__i z#FgC51mj3#r$udv^oVyPSSO`)Ej^Yl<6q6~m&h7IzMN1gf^r)u*>cbh;Jp}p^w3;z zb+HQwnNTWX7nD0Emb25u^agDxUkH9sgPp#W&DIy%9Me8SNBECN>HUZ&bQtT-Qm;3( z?a(&^og^Xw0^{?Jc*ITIG^?f>2z$joGNGUr?0?_Hkvc@fL>15c>2 zb5uS%MWsz}*jd_P5!Fj7ev^vbNg9Wq_LEgV%xJkS4dn)qMNUE?m5-Ev!nO+kfM^l3 zYPK(wo=)Sv-5)zy+s=LKPfCA7X|gag9udQUxW4j)w7+I=UzUmL$bOcIS(TX(Wu2&1 z){$Pt+9Y;)HL=U%_wvt#wMtf$>%m%q@#%*ux#2H}f4o)xgrU$8AUI%gkdEMnZ*fJu z>9@K+>b`}$ja2BBu=6OjMW+Rx1y-Wdik<3oaVhoc4uCw>F16X;@Ro29qA^HmJDwH} zHbRTbVrvGPF(oMxDau8w9?Qb|8*=y4YJs9bo)2<=V9dxqk^`v}5r-b209hzV-gC;F zjgriK=VYk&gpQgtw?D6>FwDM)-H@NK9 z%WkH%O?e^M)63upC;S?bY+7z*bI0E*+-b0vNtY#iS+876xs3YMTe#D(_o2Tfy#fQ{ z1!Ub?;oT#DjS89wOX~F|==Hk8&}@-0%eClSihhElBua?H9)vEPaaoFnC~JzcF(K_a zE90cqb z1(tf8f;W3XhcrG9oe;i*6$E2QIDM^FY-F{x!>~hcWyeD`c%>pUBPh;^>(3tz{8N5B zZUD$jz>ui$<2{6{Z{x@N{--bFh9_}B#9lhw7x-t~dt8sKBlgmnzQ7NBGUeI!!FnKm zJmOQ9`32J`Ggk;^DxclMkEbyE$+CpvEjU3y97ddnX^Je7js`Q8JM{d8lH)G3e^tx( z7;ya|+ylKOa=#PTw;VP8Q+_{gz~l1R5WU}t>mV?>eU+YI?}$!*1wJS01TC{TWCK z6=}}+7B62li2p_3CpJqHH>2(B7s6@5s3J!5{J77p&Q^b-{zfgTcZfR|u$N%kFpXBd zTlfjM%oR?&zsPECPProrSNAzf`KsMD*}`e}ubJ&Sw%VryJs!IYE&wh>|K%ZfZFUwz zZKE844UJKDo3c%x05T<-b{KYY+DqCiu#dDBvq`;MPoqzyQF6m^FDP)g&O%6;41S+7 z8QrUHpOoa(+}?@_@qXli`^@xrx}$by#L!8xM)9^Gr77ZH65m2edoeDy$Wx~hMT=Q4 z>^9M)-Dp%>8^#lhot_}plwl1eM>kKIMt=SGrn6^OdG2e>>ihDnnxG-YsbxI1s+PR| za+`QsK>wsYdrWWNKF$A4(~>jZ)3kJD;`v49=;~V*fCq&8@;U(6hZ8P9TQ!h}LC5jG z(5jvio`*;mj3!R?l0hwjMv;`!Z;%DAOb|RRaz1>PBkWL>sf4JIC0vk>R(ucxiFqvc zK?NHkTu8qr3fIz)rm-JM?P-uNALBls;yx}e7Eg6pdGQw(6va@vJr3hbcGJriC9PW9&M)Af=Y=?=jRBFc$T=WZ z+uF|2%$Lgam`Dodr+?l#Z^2!{SJHQDHf~r8u?{KLbp0bl%D#g%v%t^Q9`D=R&a#ks zYj;C6+U|Cnd-E)dhY|r?0>53Y?yYB8#HizjMZW!#`3e*!bBfo>X`e)St%KzG$51Py zPBIUk3Otg-0#7Byu_WPM57k)D^SRE>PCkVmymXY9@37i4s1Y{?L|xSw)A?|#cIX1Ful zLf@m73RY3?Nl6h*M=jcPxP%-irIy*{i|2QhrTg686wgBM8t*CZCGQol>Uqygo-aMu zJ)+m+^{9${UOixVT`CO@o%i5yP;4Fa4d%{!SOCytf%$w7YAr?iVDTp}z!mha72lW* zIJS_RfwZceTWnJm%VGb`Z+`H4CN94F>qVVk)#gX~R7DE-V0T>}e^UB-@#_D)<=vJg ztG`~@`PbUKUe(o+Uc!VQKg!B~^q6#Z&eWNAj2gRe=G0Zv<;g5`t)=G|v!^M)m^Pc< z+4rb)7Vj(`J!a9&X|S%Y2nYBJGL8)DBz_JEaMa5`<8;O9+{!Hxs)S$S9Dqk!_%rvp z_$7ax=TGtMDJ)HXA$W}Wb}S-oT&?p26A-;T#cgwM9UR1LLpX_&|I==gW6*4n&b|@qSb=B{S))=b-&X9D+IrKGLf;A zut?}4KPiTR3W=sW^vGvJ zH5(Ce?^>w{ybR4X1%d~<5ko=uK3O$VMR`LhczFC08eQg}FgWq?9K!dbz* zw=zChJlVQva~sK0=XM6V&hpheRP0yaa*!T_FG zCj0~@rYr`=N-C5JC;gQ@=h(}i{J*?BmMMMRPw0wIzp!LUzt(r&SyY3{6u*R;HS)sO zkFYWpzB+w%5P1EmbODLT&y zra9_1x&=R8!~>~ZswFi&HImw%`c&%0RJA+Roodc%4~ph?_|p{c^bMy|a`h=EFo|KB zLz(OnQw3$ia0{{oi*NUYA32$JVE4KGrIc<|A~^!)Al2l9pdqyA$dm)qlXlLulD%wQ*> zhTKlK(=sc2XZBCBh3uoDLXL{wDnSF4Zn|o_>Ht8CM=iNsX~#3WQoJ5D22s;xJT4$< z7l&2!^4D<1VG(?diND--1}@XzB+2{;6^kMJnl!`$e37$?+>E6P2ZX$!Rq#nUZlRsc zfjfgP;+?_a17CZ2MnmJ=U5AhEo5Bz4&e_d>5W4r70}rfwX8-0VpWLI`H7E3^k3(~^ zp8xT)hYck0zU%+O?jId{@yR_W=Kkr^PyYlx{3GO3eTvoKg#|e~UgLAmR~ywjojP@) zgImaI$>q;#9Zxx)cf8~fgN&&iC`F(aGQv8IYnf5+^QELMv#L{;Qryam&cZ_=FZUij zq^B&QoP7Z~EUysA%P73t*=1#F5MRWSq(X?nq3f(%5v9V?#dhiI%}=z>Nvznke*WFk zb6X#lF1|IREXsyRdyoJ0({$eoM=tx~fv29nPuhJ_diMgqc?ApneApb!z86b?vcfFL zlA>CySK=6Vjuf(bu!pe`n!<0+N}@}Rusxad$hEDnn%pO#8~8h&{+r5AEP=&BNp zzsnlxa#^hDNZxQ+UFO03cpSlCGEVSXVSO%Z&7GdRELX^#ltVm6M00$q3%^973&G|n zczvm@t(KmXzB+QikgWL4wD1mT>9%d;o#8(gcVW~u82>r(+7z4fjKiWco-w^-VroS0 zSvt-Of0yu@z&|NGEAT6Y`(ZY4XK)O|!}Pic%)THxOat2HWvQLyGiHiCE~bv~TIfg=da zZ>}5^ddkq)#O2q{tU4q;R6ps%$FBe<7`^x@>-EPKN1wfmw@bTL{V8wPDmL)1OoduU zzq$(N_wuFEXVg~v^>4&3S#w$?{TQu9?dR8_Kk;$a+C76C8IJ*3>P*4I6(?Kc1i2>T zKwZb4HL*ucY^sTkG_is3v%{uimr!Ml1MNMF?)s&2*V(x15U}M7e6D=gTn488HMz{1%eY*Y%f1e=_d@JUh#d{FM?);co#Gf} zj^KW`-3*-hH|~}q-woO!-&cD5AMVx??~S`<$al+Nhcnok3}($>960&e*M9b%pPliu zqki@%ho`brh&%H#p(a7YSUIhxGQv9CrRs_DKgt?GV*ZT>sEN9r0iOFDJzb5=?_ zfp*3^>~G=-e*G9K>o^hy;C8z3d>l$Q!G8vtYzcTYJ!G+>kR@YUyrKBwki_;WuKFXr zspX7B^qMe^G4lCFrZajGj&BEY6kY@~eP+BIzpgioHt<;nrZ%|YiD}_FFo9Rd8b#Sk zm0fGKr6k5{QD(1F+n^N!06H)d8YBXV9~eU;zs8}XypZ8QczHuo5*Vt0vQVH66DzC` zYGl87k)68wOMZD(Mq$Bs{DDQPLEpWwKzO2S&+8kyMJ$|Mh}DgeWn4+z!i~EmqtSGf zCM6Rv#|>z=OuQ@-m4*zzk=^Brk)SxYg+1LTP70{HtG@PkQM2#ciTGUN@7RjZH9-3u z{(og!O_HpF{W)H3%lFcsbuNAK_sBHO-+qZM*r3VqN zF7;lZ4~oE4%K=!e)#_EK$SfR!6AiI))O!FeA4RBu-XlXkS@J=J^QF`) zYXK%E&-<(@+kHsS@klocrXI4_@>}?e(CLY}{W6R@ax=(|1@b5$(J85@L4+sy>||pP z`g#TtcMvlZL_EG_GN258^0FtEKla%219R_qdf6k7FM9Ua@Ok}OS_aJgL+hM=EhFPI z_<83KJ-Ff6hv%Q$vhD@e|HPc;VbhMhFt4e321e_1#0M|K1Lg$A%f-~s7$Ih?R+n2i z#2lRYkWq(eCFq5Ws+QY}L&k+>xNcTtL8LcSDqODU^W2fWO|j~VqpWd^n&0O8?7R05 z9dm3;`An6=CSz~!V5`Kan1y_lFqequrRuWmG+|t3#!vmOBWc#3>VzYjpK`U7x9$C@ z{bCwx0bvH8M*ia{OdC9OmtMBwI|*v|H7sriM3GWPz=Z+ILCb`HcxT7t$0l#@Y@D}j z(&JONt!Y@eWXhvcAHAo3?(!**P2IMxZm!_mH!mJrv2Si|tosW*R?y1|G;IgBUyQ0m z%=r9xKa|7eL97By>8 zGYc-Eeke1pJm2U=j|~bfBx=J0hL~0&A_oqS29a}$Eeknp%imbv6;dO+x?$6-@1)bK z*hz_7yNR9L&cw%!6+%&K=s+X%bs)Zzsdzoqj;unx5 zIzWIJGw|@N3+hW8yC@&bwKOI8F9t#3!z00$ibTLzrpCj0V~4FJ(_E6_1ur0_$RY-w z1yd04VyH-B^x8Dx$?h`#4SK%7UYX8b*^BI#XQXx5ptrYeO1x$!enRN3fKJQr{if@cS9Ttu@=5Ut@o!j& z>Z8AU{5S7exx66=?^t<)ep4oY8S*A7VxL274os#gVU}sZdKn0uN}(P z+()<-!pBZ&%PY_6pV5w7{<2N&kzRiE zndzfm#HY)+3iRGW*_c`m;9PJ|Ba~yc(Y=KhX7+2E^Z*@_?1Y*?sf2`vAb}-|Zn-!# z$@GmIo?N<4n!u0yf4|;O#pV}JQ>x7$eFdMyz7Wsx7ttXd=Z@=ih};>pj|d=0qB=L2 zW;FL^oslnmKHbrgeLKWY=iJfcG{4r4zyC<8;y=5-4yU0%ZqjP8iZzc|_;eVE==jYK zxlya+qZ;>-jsa7o)(3v%biX;AJ^?jPJ_9?Y{0TtcYUNKLFYj%iphmGIFa$>V&uX1N z8{aTlt={>l%XW4wT&0!3rEi1(lQI!A;&$VuV(3v?>m|YbiR$RTN@Q~;CxGlo z%B#03rCgDQHua(1tl+UHq<41hdK@L!FS1uZ|3HeZV|8qx^mFMX?vceG6grdKqOI}X zwssWWfdtVJ0Rqoaz5~Z!GHB9eV1QtEi0ltGwOM1(`2l%Asq^3SXobuLVh3aItwAM_ z?$FUgI|wxBA=G!UMbXum8nU5tk19;`Z0zL{ySVSj+TT`wsMOs7YsF zgSCbGF59+!>s|cmwF9?3V#{Hx=bu*JGq3ZZ8*_9XzDYmO!)b#15;PavrD9kYI*Z`= zgM7qs-cO7d8TWfx=|p?rVkB8ZjZtKbN0hVJl$&D~ne4G9eWhFo6iUZ?>-#>iaNW}z zKo5LwTjS)m!2?IA&d;0w{)C-t)*pOvH+x*Fer|NUbm8cfJ1Hp#FsNb&URs1Ky@3;%(0wq(0IPQVe!JsglpDxB;VP(>CK2v%FDZ;x~ zOzm!hFfZY^2pw20C^-yY?CSUkeX23z67S@A-zB5_lHG`ZS_|LFzXUanPrH;3+$^u1 z)J~w9dY|`dSSNSe6Lox75 zHrFMC%JT=JW7C?k&Kws5h&$X^XY#z#ZnQ%$RhC!KLB!%c4Fu7c1J<6RfU4QXoEoH& zXzTA@dH>#Z5B)@%Qe%3oVba*414r?R)gD$g`LVm#KXdM6_uu#_`$t!_tU`TXI~SAI zg6sba%t4kM>l@4sLxtKlZW1?*o5jroig+n<0#R=bj6ASeNt51H!&_M~HpYQ^FeNavC z56dBnlbq!eGIE%dxkZZ*0Le?=F|{~+Y)v*`!uSww75ggOj0e6%!;cL~Zq*SOLI zO1TWf@g1fYIKRs1{Gzx8CqDza4Z{HlQ(Pd}$ji?FO$nC?1kJR`XldFkIAh7V)$=UB z#`^*aLCABRlY9>Id%eAz%FCOhUT?Iyyu7KmchjaZW3s~bbdP!T;HNJ#ePYa@^a7(X zCnb@LEO2>Pr%mx}F)uHFQ7LOF?I`6-OY=RR z&U}`i&!<^HZc&(*S6C?ckv7hGBTx!4ZL%`PZu|t{O*4#%CLyI0R5Y8h8iCOD8D?JN zDOaR7UYb$1Lqo!|22);ysaNyU`DxuxcJJYLXq@3x6y>n**ceXK^-Jv^n_gH}YDmdW zO)v19T>VO2c|nIGnCB|(r*>Xzy*66-$!7(gj8KLrR9RDBbk}`mZ^*tkJ+Ca^mz$ZD zk{T_q%8Y%}ua7w`63mDMOcmt$BOT`_gh%9K5WnDN`y0m@x)5fwa(bB>4iVxcjtO!Y zb~R2~J-kdu)CKU~Gy_u-)7A|b;Nz<5hbrXa)?t=m&ky6Hbzsc()igIZaZMRbd0d_~ zk8jFr%BxT{C0Iq1OCfQUx701+Mg{6ZXERPhNTnh{Hipx96%q8{u zvZj`o4(jE=&#vO8KKu9c{GqS556BtTIdA>7@Yulv4{KoQhR?3P!HBNr;olguKuyA|lA2_XJt+`Sk1Q{MdAs&cUym2fwO+ z+}mkvUmDw%h637YD4?B&0@~o3n=;!f&k>alTvr(98Uf1_lE&jAH=kN#R%sv@$W7uM+|-cbE-H`bWfa(q zA!BMy*cj>4zqZ2hqU8M*|KZ?+dpGsU?VID&X}5w4w%Rpf{@VLCRCj;GM}&>ETI^UY z$cjXtwRB>IiiCSga(+N_5WE zX(cT_4oe334EVxIDb0bDB*JKr#!ax4O6c9Hq~t^}ka(O)WRf@~3UoEY_R0xizsc(a z+YNYiW#h3`&GQGP>)m1R{taU4lPO`3K7G*q=7|e@2HyhqjouSZ>Hf?ccI&O--kF)@ zVH-F%Gkb@v&Vg%=Ego>k{Kfr23EhA3{5u9LKDK7y=<(x6vp?4?Ubt+)fMp98*Nk5~ zJUcHjx$=!S?zumE_*(2a%AJ{@SqdCyF%|`}GEQY9WfU+0P^Us8kV}aq883nT6_?0} zVG8yGF^pV)LJY&@&+ZOo+R}R%9P!Da?(;H>TQ)QvT#F!)%Okdsa8m$JD)1#9T%X!2L+hqb@3~%j{rcxBBUmePP%&yNwH-tXPeLXX z62hWD5crNLYl*UWlm(-Vivj|gauf^bp($dxrM z_$dM@>?3zD-tn!|2WU+ro}36gKxtn=6e>NXQmH&dj_yIJO;k9Ni)<#Jv6kg2zj=ac zlygy0P!r*rktL z;^n=Zc|KntPel27QPf_fVm0Nt@B(n4wQB*Y?l4uqRhwbr|Ihnu`1|Lo2L<6DdQEI! z-#|kNVz#64RL^*C!T2;gR~{Olu8$z#n^xTJv0KFKc9jgKi2hnOlz^`z19li9pxLfe|AaJZ=x&n=n!g`Y@&dTs99*O=#-XPE2EoN=v=-n9+6?%q>IH>QsN zY}2L>cMco2^TSQ||HtFQhCTitw0Z<-xe({xqYX*Jy5W2bO^a`#5oCKl`)O$Pb`>mbVF0ay)P`7 z^lbm^0+;GFizUBua7lLlwfA>F!OC|(`NWfa&ZhHQhYZS?I0M^vFdJyT#4x+1ZF4lOJmzG8IYl&6-=ePT+{$&~W3 z{RXs_I8eLTI93t2;ah_W)kWKYlWFiIPB%%C?{-⪚uX?N@YI*c~@|C)j;jcXgsAuX%ZMZr8|h{l`_LboH(Y z>e0Ef$d(S<+r#;%i_gSSbZJJi$sk?7}LUZjfw9kF;@fG0w z11??0E8hGWadn(|$qL1Yvm-GTK85GR6GBM61y+Ai+@rzfcbV=*P6A%2?GIYK>0aIo zFFj_0BpkUmNPmbXmy<>l&O!SXO&k$Wl5RrCUs#=?Uu4dz%J(=kO9QFBt=7`K_3Ksd z=Jd()X%`$;TT?NYqB1Yb4LUbA2`P&Mn8N1oc*U*x|JOOL2*c;AK!| zWyOSD>s#2X))vTd7xw+8Iu@%A6r`GC=oRIwhUGkrTzs4cIV#7MJPaakK5>2TQB+*M zkq3wCpxqo^#Py>`owu+z;(9r^P}V9ExE^Es9_4ND$lE~t^*!Wm zC=B399b_*)y^yX}m^-)uKa;aOC?W5}^`D_FGGzy;3HUG>1-A-#ziMu2Jl$vkY=bw& zLGxrXIRf0(K7DdDR~B=e~3~2fZ$xF&oB$g$`K(O0-Ootp=up|Vj zg z-LoSgeJ=gw!H@TkEnvUHILX;Lo1huzBUW3afu5gj4!OEB*oy!iWcV!&?Zn8${0+ub zmf9ZqqxOn^1JVFxIM;nR)G?zY zH1pp-Vf^DiM!l;4>h5GCeHBh;jaxcICjngf;!;RdH?U*<;vqoo##xOLykDlkFU%%0YWW+<5DJASb=o@N~~>QcBo%R z9O;qo?|O3RU1K6h%)fAfG7fEd{fkTb=gQFu;+N|J9ZJDV?*N? z(()srz|}NuYP%{FG8J&``8i^`kY=}<+HG!Sw<+}*NJ!GbOF&bEt>M{1@u{;k%;d;` zBRUOcBlO$kzRDqW39C;rCZQGzTL&&471ObIr97c<)av2=@2E_xiZ}GS^T?ewA`#6Q zhO9ZVaO7jv4TZB+Iq{W;7wo&|?EQ(bXP?Dm)=UNZ?J0#Qe9NEzWkRp~UBM!fFjO&M z6>`!GQB5n7p3z{msL~Pd45>oe0(ZoqY2e`X^rQ~6WvE-yQCiFfx^}HAKWA7$(ukza zp=T%&uV@tnty0oh(Obx99BQ|AB@Ik9vMb`29wSiLIqI>+b!y5-Hw<39ZA8Yvc%>`i zD(xNhtXgYi`=kkCX1uL$Vo7mizO&D&(;JC{MXH3+*js0H8=uev)%N&-CnnrGGB?Y+ zFOWui0bep3rU9$$C-I;aX-Zm+O=GrL1)D~zNdY~MTBBmWRK2hI6mU^8j^Ze0rLx%_ zPR84Krvqi%wIX|6{H^#$wBhskK2m}9uM_X73p{pv1F*LL9-oUI zx!hV5bwZy+8!CV6>h-Dy^+Cf41Ao8a2_mX8S`!A3(PXCjE*L4J2y#;ZN7&OG3R@r$ zy_91~5|0>Gjm@?}y`6Z`vM6i>_a$s3&_QWiJTg$%i5=p0Glchs8&E)s2ct;RKB}$# z>lwwzD-Y1=HhF}UY8|p&d+QjTp)n$u>R;O+t}2sbBv)yX#|I-6W_DU%(7mJl2khw2 zPfXc2^Tcs>>~c69&>FRA85yQ)M5;r{#7SO*3FF#Y(*mR0#zXAz++?hhYlxdTxKt*_ z?|;q2j+@v4pa^k@c<*zuv94JzKE;FZ8cGqnJt>0M((0V<+~5>^{#2|J)Y0*S8SJ-4 zcGk%5G#)gbFbYPm*DO#ix~mTMrh^@GumcV@&N16@ze7*~a0!|YIU^i{Rw^(Q97vG^ z=IW~&(A;#OCV2}Wfpjj>I06{cVE}a9Yt#w=wJK{2XVNJ}o?wI^y#GS04I_%vs10M< zcAD0m{B@dQsYD&#cKgWQzV4VzNrj6bjWDRlEhpI%a`Y_3XwO1?KXUNk;p^Oi11x9X zA-4M9bA|i0U3Pb_(U|R|RhXo$lwos2 zpht^@oUpKm%{};JpC9$v^YoAZYu`SW^Y{dp&u=_#O!c|oF1lr5-XPuh<-@HV6T{(& z9j$;&-;`ouE*q@0y)q5HtLjGoI6v|Id5Q6CTN(r>~<}D>WCTD zl82AFD4#nNXhBcuLGB3NtSl}maX?)`bi>74^83%U${Xr?yD%3&*5?Z<*S^mwWlKWzNh<|ELIvQ~%;j=W=%N8_CN?_MJAgM8~Gi znkDVhmJFF*BR$7j$`b{)?r(U#tsqe@9p=}uLh3bnA5m$A%Y~J`Bp%PRre8Ivf-D&1 zJXhWM<1H2rXqAF-Mq}<(F#;LOsjmh##W4zlfxeSBK5bm34|o*=1$kCv6%{^WM;2q0D&+jx2VR@&e)of(x zfS{tRu!&eCd$c7(XUL1BtiG3>7Ky!AeOYpmFrc^V#2oc`__5c;2TraZHf-qFv2!cO z&zR9`{9?O(@tsZFP&RbvfPn)uN=sGaGRC#VTU(pf45*njd2Z>XXmnWpz{z3`n4c~B z_7n#apP+wI%oKe`5(q-4P@f5V;CquCs&r&1{s@c-!Lg7jf?)xEgH8;X@hS*`9v?s1 zcrH@h$k^_22`ANSDBL}oHi->Hl41~kWdH@iktF}2;@;CJQ3NFbi(q1)etd7IIkRu1 z-=vYlN?Rv-wZXEcvXc7733W3!wxpL#-#usSnqk=$GanxNi^CHhnpB!Su)Q)iskJ$K zLe;-aPl=V6I#)y|m;3q*7@VITF&lv9FDWi^j#xdaAUb~a;4z!pOZwmYiqjsdAH4*@@W$>ZjEP9?wZk>svmePntcw(7(EPuq~~BZKcPb zF{vW6t|F{cEmvtgc@-g3?Sy568txoZX;d33$1YEF?7bt7wYWuEB5GB`x%u3Ic!zUv zo$tO5HmhR=oG=|MuY+~Gs%JCw%k}&q{U|+8bsT2TZfmWx0AXt>De*0g&zL@aLR(wE z=H|?Xei}GmW~`YowyDF_+|trAyG`F=t!qjzDavb!l~z}$6T{9A&FQ&P0_onE(WsAR z6tj<6O=^2uo$gi3_C_A;Z zz92H9Z)R-8OB)->EkjJO=f*&L6IwC!rbN6?JEcQ!lSm%AGUf_;S*0i zx}s`9YuVZr&D%!Q)-0=YMT!fZE6S%;r1q^Jp&Bl#mkPq*8e`GG300*{Re2_Zsi6Oa zngL5&OU%A(k2{=V0?DV_@wguJeW3R&7&N*tI&MI&qhL^U;`p70aR2h5L$fosrDkYT zD=VTdf4HL}qpp$|g(@MVw^If$TPl0RlGXa~^J6e4gq}(f*jH(YZx_Vf-mLLlL8foK z-q(?t=}d38h+wLh{l;|g(tHrxGY$AZc#Yk5hFueb+2^2L6DksJcNTB_<+^zHUq-KL z&Jnw3h`BAR#*A7yEMLX%ROL6{`44X5Ij>58?$*?AczNZ@mp9alPYhUdV(HQoYvAY; zdMd@D)n2q4tqy=FKA7rB=shi|sZL*u#p+D@hX0|}w{G_|o&_Btx7!gELj3LC;2Fm@ zG)ZRW^L0ON*!6()Wssi+o{3xiz=OBG+x;G!`m=u(21+MkOJLX3LLUqvhMS7&))Aji zT z!mPxM{#S8OUz^wVLUYs4C-K~(U6Tt;+Q)TN*A$ymJR}Z>?$Q#EhEg)7XDrL$JsGi# z`i#*TVn#|hH(UW#5Y$jy1TJhA=UQ;`W$41L)MgiXUf>6YB*uXRp?hnyi%U=$IZdUw zk8U(q205_s@Sk5olrF_-71SDUTDoz1T3Npl)xKV4L(uNa4F)XIH=n{=w9j2z*Epck zp4K*FS{949Egg`e5jMi+$n95j?K0N4JmTcr%BEnsq~mUmi$`$japa+|0exKrow!)^=L&VmvZwju-@vBIq^PXn+|x)6JUuOcwb^i zf=>_yb|R7+>(?(QkE!!m-gjzbAmyfgXOU0y+;XNXXXW2riLsdTJAES3FtBfGd9Edl zP35IMb(%)5_8g8TY%3h$RLVp?$>}8yGK{8NgX{-(+V{gUFrQ`j96WVVp1O*<|9@yPc=Hy?FB<*Ddfo93;FSG)O1 zSItd#jwmpwx2iOwrvP~?$hAVx!8b!06g8-SJ#|bC3Po4eG}Q39H7u*9&;?{eLmCJB zhvP7G<(s^0jCZE@POso?7^~3va{4j7SVJ#X(TjO|F)gZHHx#aSvgyuc$X9L9XVs+j zX^HxKrCCxT1w1`c0OJhJ5$H*bEPdlkR+i*}hQWzLgcdK}CdqcYj8IYhLXH^yvr%4> zT|YhEcWNRhw_#d!<&HJY@G~ag{m`wr4FjF z(O$t1CMJZ0OY;%j@{ zk-_D*0ny??w%E{Ox4U>~%vx96d!VhXG2-SgJo2|+-0iC=E35I{{flq6J@U=Jt@hTG zlverf{B!EhpyBKfYy(uQ)fm$Up7DqokEDxxPajBOZb$wwxOi zk7ODQWv+acDiUx7ti2LkMTVU6R@Pq1U?LAoaRn?bh%V%0<_|MyRnWB0k$M?HO6>oB zL@*W@5^HimkY-FSavBcA8p|hdZq-$n_o>njyKhQG*`)hN>ibms^V6K-u+38|uf6}0 zp(0vbX?$f$hq1Ca8Z*Ac=PdpCgA*L(y?T{9Cp`G`JC?lo;CM%QuFjR2>KOmvi%X;v z_1S^+?0VMt^sC|gKzeRH?J?pRSf%X?lOczj4hEGuWtQ^V^5*jP^3L)tx8Q_luHVm;*^l% z4rJ5yIF0F7rPd1&gHYt~K1D%EBPVnh5Z4qE1A0s2q&#k*5J^?1D49SAN9gPSyHF1F z6XAlnW#i(B*#)un%Vw0%UbwpPkIPq9S0s@Zt24(%|962Ne_>v(OY_Fs%>2y4GHZCw zG`#qKLkXZ!=v4q3*>!*aV`cvzj0NeOhz^FFx*v=M*@8u>n9(tq(Ipx!R|SOZWNueH z&k}hlay=qM;s#qH63J@_35M9zx`r{s+uBBr>f1Mg!ie z(K@Q5BQan|b>F_01WP1F74_=nbPp?yIIPy}Q0g$0(I8cvW)P3`WQ#)M#z_=ChnErS zIifPf9DxMnjG~EOI7>V@5UldM1hP^Ve2+b)gwd3gH4J?k1q5Vx-mU07$A3HBx6F2+ zW_&O>rg}){_|n`V^9B~RhodDm4FjjQ`-*ZiEY@&tBz5AnfsK7iqv45#b@K-2M91Ad zOWU$`en(M3Y{RmdHFM`KZ~Wt(EAB|nvojnFhO5`V|K#ouH`O@3Thq?I_wFm^lr*a~ zEyeuGyYHP%+v;`JZ2EBb_Vb%+)z93MS&&)O+vb_OsAqQb;IOv({Mp&i{3wGPyG|n_ zrrXHkS5khK&{! zoab3+2$EKjrrPgnSwllbov*q&rKY{YkHDu%rPBz8vI!!S@2<$M zsHOBJF5P6`8CYV zYu}%nzmRhadWf7hfLIDeQ~+p+l2UTpG$Bj%e-N1!-l9n<%_zmhwDkEOM`>02duHOh z3IE%OEf|NOEAK*dG0gRj`>o?_wvGea7K=3oePGhJiVURt>0Hc6eE!FgMHfqILh3-9tt{^Y?vo&onpHKB~$Y zw&OoGJ^11FCQ)b7bx&r$1@g%KlLO58+WZ?a!spON;@nOrbWxx_Elp)MnL^G4C5Z+6 zX4^26-kRziMgc*YO-Cm6$3*lrvIk2_CK#GTCzT8o0MAI+#=pAg%+?VjAN%#nanJRy z>9el+7spB_ZX8`UDx=F;yXpLP5;z&&J?`+E@638)O0mNX0QzsqdYpLuJJi+HLpweQ zb|9VJB$`!bfkQcMcvbjXIkWV6hLlLV&1Eb$#0@RTITIm2M3qXbF`^ps+?~?kNAc) zir=E^vO|qd103ebJXOq=N|PnzII?0Y`P(DlXTCp6t(QJJy7s8_aUDy4P5MJU3v_jM zvGhjiV|Mt!s{PV08(G!{ND2Ra9J&uWG5x>lKYwq-MEw zg(aoR=YK3*PUPmi0O`g#)TXY)fzn)fe#V4ApMup>Y!R+c4Y+w^9zFj1$nrp z^~)FbnRnOvK1KP{R%{(*RZYjSFl{b~K#K5UEwDY8#rOGl)n!0~hU0dN99TCXgH$j- zVChs+X{k;Yi<~=mt^mV#1MdU4G2yG(<$+a^()6tK%IcMOs=6xS&hz`Mc89mfH6(q> zZJ(cg#Hf)8)y;^hfvQyKM8=l*0I)1HV(^=lIOq zf5&(9>#rW9%JQWKg#u0MrO3R4S`hD%;|USy?%r6VPUUN{xGp@Xid;&$*3K2ZQWqyRpY+qnha2y*EaP$ z^OeoYuken1ag+4NrhYc3L9mAV4{hp4SN9)^yve-lpJF^G}Lol~Yll)a38fC(KV9A^$ZWcr6naK#%qG1smF;(zRrc|x#cL4n8d9{h!dKg`+QmD{7xb!i#VUH+>!i-e zqG&vCUM#UZP-gFI%gPLz>iL%RQb(0NJ2PZzd?m8HeDvZlf3$dMd1`G^ z;Idu~UdYs4)7njmud)fTPbGMhn)0u|y>aYwKGaGNmi3vZ56H~|x zWwaoK<}Em}B=`!9hBt9yYY*PQiLKkQ<0Na6jvOJB*ygrvx1q$29Jv)E))#FY2rL_6 z#2Vr`R9c7!JWLxFId{F?YIe1Vg0%MH|GfGiw*bX% zR%mrXmzx`}@@jn=O{&VTR;Q2u5(Jezm*w~QTqYF1e%QPhT#hc|T(I9?OZru|no! zSgcEl4(~H__Uc}fD_=Uv9^LTr-PPTh&jhP{6Xz|fuKMxwXGMMACDTU*w>2F$=!vhe za^7BPhM?LwzveeJ3~f^-$DL?Aqqnda9eJ^qsV4 zQBC{EIdkp^4jkA#Z$OYw9k^hmJ;O44?t%&TI(-Fwf*I+)m{lrJmp>ZrTj)=0dGCRL z{c6zTO$T%a;lrYd547h^+dU^HsCEq5JM*izWjy;ftXR|mhbB;h3ScTNXAv~xjC!qF z-!4G!t4;T+s9L) z5QkJSxw@x(7lVOQTXd#+uwNVbW}P5V0VfNIpdY-8433oVL-%6gUAuS`bCj3-}o>s7pZAdM! z{vHu`tV-m*(Q3qfNb5|4nWFfK!#q%Yj7U&MQxk9{$MLWhG4m ztAn+j3ATK|szdxbm8yFq4S_)=OnCOI$DjFjU$ffiFz)lDurF7b-F@>jq8=fmY?ht%w=)BfWFEDfhz@M1C$3ckB#vBHQEj&?6~kdK<8+om$`l)s4G=3*5|Nas zSRe`tgybM4A2CdeOtu1qEMSOWU9pA5K#A0~OFG=eI;7{0u*5Fb?`J5%VO@Th|G3*P ze}BcF>8|ei?mpTL#s>66sr2RICdbQEZmWrJKw^}jbvAH*B+gqNcIp_|dHuN|mamRJl}|Mn%973EM377bB`S_-yvCm=XX` zk*#Y!&KOtPOGiaB%Q)iIJBzEds4$bw-chdlFBEb4SB5~@e;$qGY3<5|G^g(-E8CkP z8;5Q*v!!NMZf*rw&7c{_ftGwBh>Jz7akPoWO)SgA2&8Vxg#kT^W91ynsA7%i&n}$&)Af z@?UqqA(vgJc|>_{n#KOO+xwV3s%~RO-e52AesOwJ-5P*wbZWky6Rbi|SR=em%>b(f zuTi0l7tR5bNoJ&xA>%!5lI1lTa*jlHa-wc5Et_zNs(JEiRQ6X>@B$HbHXutwWJWO+a5<5?Ig&tzgE{aD$9K{)v~qE-13YwA zj2j~PXDY2yr)?N0Xld(?4nP^BWXdJO?k_bQ9mkZSaQG09_ z5d4xfh}z(`b78lLhu)PAtzUlf7j1M;HZ4Da4`;FMiD3NY}cVWMotuk*mKM9Y8xdY3@YL*ARL*T(VV=x&+D+&Yx z_El?V;D51e?OLAP8nSx~%?5ZJRx2ux_c$M9<={q7$fJoq$r~Q&t+zBE8{ZY?t?Q2- zUB8-%O0iYx;%ataqf%yd6RUWEnW*q8%Q=UBR3+!d$t{mJn$0TC2~@sO9H-N8st#n3 zu2G4qgnbMsx*d)moDip#7c81~qwtjWd8Bt~?O52#WB`rSR*5z$o4#iffL@g~Hh9UY z7qY;j2~ZSE;#($uK$?Y`2XH)|Jb!)#dlmD*pIqF10;vX2BBfOa`DpjLWa~G}`4B7P z4a>~i&HOYoYc;b$$Oo~h)h1M3t4F1Az-Kj1bmGuwYfwGDe-%uN63{qNx93RELbR_F}*@P%H9Wx;Jr)t+x zD&=JrH?qyYm+ZX$ywtk(=q}a7uC7heQt9FjWK(2yZ)M*Om&S?bX^(i4b7AJ%k7>*v ztQrEX7nx9nmN(&iR;f6R(WEt*Kz#}FixWKi399x&py&lbWz+x`njHDd%2|PBFE^aL zStNZCtiY%jERxqfwULrOl+k`e+PCQ>k|;i&$(Ft%P32GQ?7pnB@9)+=!#8zrMBAvE z;u~1+Mp&5T|H85Dc#l~b%M~lIbInG>3?9{wSUrh1)KmhVQ_w-gfN4;pYvJ@rD3zBs zv5(7bqUJ!dp@|e%ftW)zI8xu`@k=&MxcI8n!G8IcG~^g^!~$@>ChG-LfyF=5h;2+t zWeb=nB1Y{v4pd}-X28nT>P)I}O5IfPYA1UO;RJO%YH^}$hzh(JJ}2+5OdvFY@M-il_w`8X{o_Ka<))HKj<>T99Q? zrse7lmO5meeQ9KSji;cu85h7It`Sfgk1Wf#p$Ukr1=XfNmq&-B*(?uEQsm08B9gcy z{})}pAj_$nErF!Mp;#wbySmtwuD!`JT2#AMu?J~XFjrl&o;o-l(Xny0Tr>9)B4=@5 zAi-?`7*{KfBieo>iC<9(T9wd_v%v%`RnNvHTqTA?9*aoZ@2H?7?b92LfYK-IdQqD6 z;*0Dlc3%1 zh9ymF^p^XDgd@tr?7){dvD8nXx3i(k5(jc7cv4U1)+nO?0m)sOICUopH+a;k)Q;zLAmv%wD>J)>iqg7XPG@9DrKixjPm=cW zb*^Gh)4I0a#iLh9dbewz+Y_0*bIzPy(@R9PR7eF@k&^=}v~vB0r=NU(eKl;xk=LIS z_W=pi4?fX3@xD>Js=1jPGTvGdB!q=8oRQHVnE?jFsDkn7zKDM~`Cf=G~p zM3EAUs%2Zva_>cP6kBl|C2<@_c4)b!xRDopg330p%5yGL& zGPE!>!;va-JMjAxvP+X2TVgZHvgXZaE1EB9ELd4Na^_%X>t&DaYFy?nPiZJwKCvjT zaLL%>n3(9Cn!E(hs?p}=;T08Wo~Hc5+SJB$*L?Pmn)OSXQp-PD}@dB)bDPHa}?JiBO6L#fgnk?aU!u==*4;ji)U?Wx&^|DlLPC5*bVl{#08Nh5r z_5lmd*D8xSi!{ons2=?d#_nP47NqXH9*GzF7!uMj+P-4eX;T@*pJ6H@gQD197fL(x z{D)7|CP+C9BAOp_5YC5u=851&Js9jgeHxSVy=QO5A`Ni&V)mi4!Y$Zm@5a_I4QGRO zatEa6M(w2b26A|sc(-mxPK&VdQowo*y$mogw38+vLB#PGIwm|gayJetzw^#d|E$(i z*%;IBqsUq8RVJ9~^l;elk+rnm00RZoc*AvY>aeajRt(kZby9|7v`)}}QqsSlu_^Ks zVvubLiae5pBhFI@8RV~b1O40ZVI%rKlmn!m$0r6h%CIvP(#QByj|98E`&VmLdNKkB zTp3x`zkMgz6?C%qpd{Sn#tyKIj5Qm2+KRM1mWP}kDFE@|B&OG_Vadr5gbd=42Obtv0w8KIN#5a z*buhMC@ne)M`1_x#9H2nX7@z1TcXkA(P(m<(C)zT6&&ZWV&!M)HCU}UBHYd>cQz$i zL=VLBn0#%a)_)Cqx4%yjkB%2f3#ck}yh5KIuZGh?@df=U#loH?QnO)75}6ozKRfUe zHmwu{fA&Q1ZB`ged71x!|Mt_vbFt->t-H4xX@;g+E=9Yt$#N{_j+n2+=(`ZIPE57RqeAUJFx|k~pPp;QtWl7c@h7{};(mSI}Zy4A;26l@9w4uTK44@6CvT*}9ElY7g z>vJJNqH0nR#Xg-X5QVlTs;X$~-z)S984nRLt&#PYgC9MDv+!ar(9~B2zrgyiBm6Us z&9vIEPI$5@vD3cR{uR4!?~2|My$8Mz6Sg2D+xf>$HmkjhvYZ!KbnLz}(^pOOl{v{= zVAjo1u~B%6j-^xJ^jPdIH;QLtim@1bNzoV1GCQ5xVTN}|>qURlh3StJO0ssyF%xkO zXx9xma-IR2Jto5DKhcOH4x_Cwq6U~{H8jy(7~Ubg<+`WI|ebkM!w=Z3#ve=FTT z%YVp!$Ki{if=+y~0A>nDJR-$oQ)77tdN2Ke^B=m5I`S07nl!8|a4?`P3qo(jzAX6G zEgey2oNA9l0ilpETTF~C(ZCS*3=0NHEKL@~1R|cy+rn3lJNXMFnDY8Tic5Pb89d#Q z&QJd!$GOCdU~u>AEQjU39-Lt1_nhtyen0rq>%5w~f&=W%bAJjB(vZX^gMtgLaBDX< zWob4K7ELyLj|FE18O&!*h^^pO-Jtc-Df1X+g`Om%%Ar_SzWU)ymDCJ0xDt4DC5(f) z?Az?t$4{UB^y9zqo8CKRbVxT8jkv^jXPEEv!Rt=2?WFxQmXu9VNVAV9gP2aEZZ&k9 za6(}>^%+|C(2R;Po>kJ2E6&BFp&g@|f`}_HFq}Aj&h$&Z_xJ3sGgwiu19Ai=%?oft zYn>ErZc~oEM}r~9g%f1Sr1UyGY%XUnMXcDnEe3m9NI;(o2_06)P?$tI z5`+_CZdPlYden5_}(Ov)4$occ2-7KI{!JaIj*>n2LDpN zlXbe%I!-1t3W-OC6w8y?a_o%j#ssJ(TJ3N$h9@4#fifk(3r2)iHx!}`O#)01$)|vf zXb-)kaOsMk>JVcCx{2a*Vb(&QllbF&Vj=)zE=4U9>SQE>q!C{Qvc%e4G`<|@sX`tV9U`yc4 znOcASfl6XF^n7p@XAA!}ql5kEEOuAtS381#kW44cX@yCAPSCL1B&G*jz~g%~*ns0~ zz1(O_Kv%=u79o4&pQ8;B!c`z#J0jx{xOwb};cxdo^6V4+6KlS??Fp=??<+6=_L*~! z^2+h?qM~!yu*0;k0$MHj)ILDO-E7zmaHXcdlDAt1(y#9wcQ| zy5lNWjuTl7iA$fE+;G9 zys0{;s7Q;m_QtysDtckMq$Q_lkymhm5&s=nzp>WAv@Un%p^>VRlH!pVOnJ~nq6j!H z3MkENeKn;;tA;zwMINwhEMoXO2TpFtJGaT0w`fmK<+9G^?Cj>n7tgu$Tf62&-FQQs z<%G#LkZ75+{hr~bi|1t<*{>X)wZ>Qd-wA$rUk~jevO3cp_t{h9V>Ub(JgJZFD`|1r z=jHnD{KwbV-`jlYV|(Q8B7XiypnDxwBV}4G;_4f`U76U^QE$D?&Iate>{noNT@dBT z>PpBmRwA*4C8{Yott-ly)$Osw$LExHCp!vrmKSN{45Tp-olG*I^a30Z-oFTXA+xHX z60t2w#1()mI+nG90Y4df=3qc;{1RViGdq?@36HkJ5jB z;GvpIF<^<`Pusga+P zW!_YmUs_7d?n=d>n`nD)sO_a{Kwa|R2mm-kbHZnYfCRQe|9=pJ%-O9=y{CH)t*J2m zujRnzinsFME$o16LbE0$A%=~S^5D)f25(~%ajT9zG%`bM z?od*KHRQXn;9)x%Rz?sSJmZhyRdWD7rPCO^%)AbtD?}bjClX9)uJ|~aE*37RP-zoR zu7`0QD@!UZ%OYVE$~ufFo7!g=E+> zrlpY^H_GCJdS-|=>FHU$IBbv(t%Mdv<|CTtgi0XP5*nKE11V-$StzfraPIsYr|t)h zD{^56t$qF6o2JX&x?|;vn_t{bPTj2emo8wt+Q*Lb19scFlgbaZZuuAAKQYa#d+_ny z4Ink_)y;v;)RW^#$!6I(InFL)o;wM$)qxo#y(0tpnvkc^tu0MUNy*60gw-Cws4*F0 z+PeTT7)%8<8ye7)f8=Z3+CO=0*}{XT2Nnk^%K|O8eWAv8)#{at zCMVhNe!@5NxrbiA?e-sC*_4&>#pL{F-r9P6b45mQF=ho5`vlk4lT>Z|w2(l8EaHJNG zL*9aXiVT8=ff%)jU26!Zs;i5Sug^9_J#*vRryhUL=FZM>IBS;;FU?(;o;2I-X(~yL zv1AvPCh=ymS}?}%{KtDgTt0Zu$ZVVC_89y8%@cKvFCuC2towj_7W(i5m``y~W>a>w z-MqkQce#vq9mabjY}Cx$?Xt%&!+Dh0qDC5qFiuQIcsjn>T;v-jMnjdX1E%`NXZGW7 z@L6Y5@1Zs6UVCDC{YCB7@%(yd@TAxV&joM&uYk#P*fjAW8zGL8bdyez2CHa=)7s<8 zNU--fj7EpU?ci>Q2TpTa@9_j?PpC}bCWK5U#_5iDQ*|fxH0{cx(%xoUsv8aw=Sb$c>cW)oO=X9z2J$3M;;DpDgxny|KV97EB`0& zkPwsHo`m(b9NbI8sXZl5t5QdRY^MVZa}c_3I1FZoty|z|cSPE51&%;)BRn2h6Od*i zFr2d6&cN`dMS;rFtu3?H&&@jbt21YK_L=!3cLZaERF?a053q++_JJ49{x^v$VMr?l zl1)4G6fe9Yq$F{7w1+GZ5KK5m(8#9V<5sNcDyLE^4vebK#=kt|jP%d*Hn@BzIE|f~l1Ocp_H@bDP}hXVcP>w4RhKD|w8f z!HXnljv2iOoJ@^(ffZsgE0Sx-ky2>W*nf{6ig=EQ1ptLQ3XK{*nQI#T8!z^%6+ro# zi#9bp@xddLrC)uAeJy3MROR8Cor1S?5b2ucKwo^${hW7KOIY^!tV`HD%*!wp23mbdP z_L7ZXZM)NUpG_aKU1HgpjLvSJ)&ziK2kko8EuV^tZp?`Pk`GLoli6QNfRm+QJ3% zuW9`Bb{yBa&hS-fE6{h>TrszE!C7P~(#F(in};1Z*FKu}V%|^l^v~seEstNF#{zk* zCy#aIvEsbOJf51zqVw2~lG*dg?90gyC-d8r+3I9}^3i0yBbn7Fvw~!nn2hc7EXvNJ zY!qW+1{Z9xnVF{K#03T%o12G?fH<8o&$NIU_w$$e&-q_qnlmGJL9#v5X16g8YmFf> z5r=Tmz^<4kav+LjqwHU=67K}RJYF)#Ug!@~9KuwkQ(B=y5-&2*BHvJgA>*d~U){go zfG7^*3x}UIIO5|?H@)&)Od=MsMq6A$3|z^wYw8CR+ZVRQpWDli+2$=+l=|sqFzW5d zS-xnVBW~`(r8)do(voKCSY*$=pL8sE{e+H%?58UzD<^Vz!nSpO(y8)ytE`;Iu!iBJ zWod0F4=u|ev@A}0-17@n?rv39)?FH^Tic2^Xb-*46O0AAs68=dW_EC@-}rW@er>B& zWd|>a!z<-QWz(~R3mx%sp)+D5IOr?K`}e@VV4}4yo?+HUPE(?UCqA-RREG=F`vTlXL~MbApEOymT6W3)ovhz@Si*vi|DJcl7 z(GcMAfnATX=XS#*oYftZl;T{TN{bD#2$h7uy-kxaMokP5YHgun7;9^@X=4Xg+Mq50 z*0F4CXyWC&pTA*Q*WpJ;hQ6@}f#Sxmf3?AP(WlPigbW3Z~c+nJsa{b~Nxnt^gu?>9rnI3$~yA!8ivEAq)#kdSGfnq(Vk{-J#G zl4g=H{u^pxZ7af5MENq3Fji4z7b}HudASM!1i1emo5}E!E!H{ba zdPih}NRpW#=nc6Z3BPd-=%^$*M3f!qIRC!rE1VTcC?8FhRLFR{@H;QzohSiC#N26H z&q*lx63Qo&g{3m^PQHN=^e(*70)57fIkFgj&<>5IWBaR7TaP5nNyZ!`LV_9Foza`A z`BJfE#g?!sHirDkP_rSj(6fZpI)3>SH9QQ`n8&fyiC7(COD7-;da@Q~F$ewxO>7jsZ#ylv946D=#_r)?>#G-of5HbrYIK zyAQkRW@ARr(>j_8+za#aFhfuZhs@aI4r4(!p_-*F&CE>5?#{8>61rn!#G#VXHX>0) z=K`j}`AsH)DMZDVVddP%4lMH8@vbdrjxAk$&GY;BJvEPKui88^d&%V&SI-{0WBqP+ z=G^W2T=pY#)9AfJ!;f9DC@<%fBP-eNx#+T`YxZ~NA7Jm_d6dQ*krUbU6xtC9+?bgy z5%Nf9Ct%D&I>>k}Dzpwo>5~|p%8er^{T9h0TLM|6vNo()nH7;r5oKkzWs(q4^OU4> zmPjI^xzc3aVfm1?lN5^DLz2#!g9L=dC&~K5<->hRt0LT|Am}_<^rh5HZl2bcD4+T| zFPnBE|JNjBD0%`X+>v#5c_YlJ5f|H3%X?~yndbo6JO zlwhFGv{YuT*_4UG}w8lbJcpG!5@Z z@lL+zYk!bXU}T`&E@`r_y)c=P9V>!d!+53PjnhIBzC%zT2~mJTtxG3vL?x%#B6DdA zFEB|0WxtD%1k5{(yOK4*MK!OAng#x%W{edyXA?FG|MS3K)*i-2)=%@Qs9)O}B}P3( zKvYJygb*$-VIcnvli07+RsaUZNo0#8Cap-$y$}SE-CI(ya-)KfK`h$n%9#N{SiS`n zPsvsYjoeC}nJdjSy`_bfU$g;@^^mr+3PK`5;I1@#gmglmErq^KQ3TP5&Ck^=#dZdv~K>DF+12VkN_=jm#`lKHqAPH~!rC7X%Xszr6lK{dYKxM!4TU>z9Y16S4K=mp34> zY+Q6)wB6!}jWJ_aiAm>N_~#J`W7QB9&n<{YUxw@(8pmKn#d*LE{osGUGJoIcV_yfM5y-r^Gp!X_di-U`v z{T=!`bO8M$NP!Lc=}MZCOesClX^A9Anu9_ok~hV~o5^2DN{<+Bqe6n!vj=NZI+8kfU zl}ChbgBh*TgvE)5F%H;x@q8HAWW%pB7xvHOvxy#$-F?>1<|8Vrs;CDU$+Df!Bt0#< zbQ3IrLXVXGM8q_ZjRj~{ze^|H$WuvVARN4O3f<889>x&LwE56*IJ0&8vWvIpfOQQRoD9K*REZ*;mw~6era?L-*Wb$ z(Lq=B$~o+hr!rf&ExG(q?wn-^$5xae`n^n>i`~=NwxW*wsw$V>L9XHXvpZZ_4c+O; zmQVo3mv>v76!QlGfJ!Vv%_2yFSqthf4S}?Ut3V!B!sEora5(UwfSgbCYbK|SlsM6- zLZl-CkJh!!ejumK-#UCS$BicH2MbD4?=i7RVISU+Tc?}WqSJrs4Rr0Gl4esqD@O#Pep~d(7rw=$9q<* z?5^aMuH>jF*g9R#O&NtzzUb%-Lq#w1c*M|-wxp#UCA%fN6*Vf^tZ0V=+EfHRghJ@gJ<%|D+lKFn{ImIOy|aeDw&>!5&Eto=wx8P3NNHx*UcIsE z^l5got*_Y~&LZ1dcOa{-I9W91RJPbpWISd!^s9?upQwZHwHP75F4t{o}I?BsS!|E1onvrBN8l_2Ai7bkNCjBdtC~2TTa@Nlf6{nlh z<_#=*@8x+&z<2H8$`#$+iOJd^F z;%+I-&+FRSQM-J0vCR@&*0O%yO=9~N!x~49nI7aQuSkkb`-Ekj^_XCNbzsOu2aL@< zYsaauRk7~g(u6#mSk{|{T@7)YVl8s`#E66Whm5ka>(TxuIwjeG6hDy7kiSbRUl?Je zyGsTGMF?drFTXe1nV1kAljd1Eue#8d8kdrlXfv4;T&Z^UhuJM{jSrM8-QLzZ++Ajj zAOGi{-&#KS#o<{=cSc3e*|4*2_OrmY@xili9^AeAAaytzg~~R4ZV0j{D8#H?42x2c za{){0D3;VAkEJEO!w&y`sU;)B&bpoU6zuO>mS>5g0TrE##urc*NgF7G5VBko7v2J; zRFY%tLNq-PlTqNA)n1(HwMWH8<lI;%?GYis3Oh@f&|nsP0d8OmRv) z-!*^MJ-Ho$&eoyDMZsU*19vVq&YYWj_~4;Kk*vIioNB;z0=SM~gernmloSHUhVsdX>QE#Qzi@BIiGS@U`oFS*zEcgj_V&9CkJz?<#*OhS90Lg51qSe3K zt$+b5o1bK_*Nl8)@6~Tz*VTQ;Pp=$%cv07^&uv_DZGV0FMMu`I*xr&B{4@XP`mcQW z`2~SznCpT2S<*9svXXmpswm;pk+<($T|aW)80lC$K?99OMTpeP1r025KK8>o3p-f} zwgU+=mc|uz6Y`Fnr8vus+yY^<85w#>S%iit&fy8|@|MBR({KpS@eP^IHdx zy?CU#^V&DA9=U(X+&Q_vzl zeAP;h8_BN<(RghGg<8^Z1D1bABq(A!F`MsPaq-0~&QxtUvb_6fZ>@~2pDwL;7laDObSEYmG%TY@`4GYyfex@q zAkgwCt?B)$SVwP8^mfk<^gdGAd&8#nHxJZP2-{-Hm-jSbXPhDlAK|I5x!~&6Us;XxtK04fP_- zE|AcwN_+$KLI(nib9_|OVqYd@^`f37PcV}EKI&^L^dHO_``Y%-{ehms4f#t}uCHnA zZ7V2SdwA{B-+laz`|jQT@~V<`eIremKXbUHYU5Sw7VTM8ZrHT!sIM-$xGw8JT6tD{ z=B$-u5@lEJLVo&!3Dfd*O@L0)voV8AS_Ed&mlwRT_ue(yiVCznci=ng0ScO zt-=DQa~A=4^!%-pc}X*1tcHZ~2pD@lCvBxA(pLJ!WPS04phS_aKyXOktF4U%59QG&8ni;T2(7mQ?MG6S)0Cv=K#HhQw5)tU;##6=b|F%wlv&^y$&|1wqUE&$ zwkV3)pCpUkuZ3h0arUYPK;|VyN))Ev7i|@l5TH86i zCU{a!P35kum`Ni^Lj;6N{+}US^1p(xE=&W&GeiSI;Xs2+u`O)B6@FT?DNMtG5Cj|& z0~$(ZoU)8HJkc;E31oejzFGl*Ji%qNF2tt@S-5mMjQLAu@=3@-lcWN&u=M=7uq@;^ zIdmKt&ErWH(y~bw9zB09?0r_vN#l`_2jgEQWsrxZ|BRFe93su(|I+OhH6#y7A(!-& zJiHAoa)r%~Ji?G9#Ac9H-tH`98uAWATIG+TY?s)rCcVL`EKda#mn&o1d6m-BOye2V zOXFD~I7l^vCT7pRke`{xGgaE%*`LArCNYi)oH0@!M9sp=kE%HfICEv)VY#B(kBU)D;H~eT zP<;c{>wG+`Le?KHe-Y#=Hv+<*^IstR;j4gLQ6BxHli3L~U@QiVt_T==Zj>Ws7J+#5 z{Ed@knKPgehq}c`De2g&ZJI_$mosAq4C&{mky6sJcU2@EtFnqN03lcO4+&wP=9o^$ z$CCv$7lI-BhlH`upkN3>bnpC`Fi0}RSP2+ntjv}D0}_g{j%Ptasadr29D-ztS0Xhf z+d^K&SP6(URyslwPGe;WNiBHjDl7+N?#NJLHue6y6oG3d8Ab%Ikl`#)8Ioc53>k)R z!cc7@8UAn4lLnA1jmpBk$WpRm%;AVz$nd+t<07_K)-1<=DoeeNvVBBK@N?ipwaJEh z7t0>%q?2EH640k{C?UlmJo-wHScTGvEk|kcTFRQ~R9OrrB7P-R`YcuHF_cEsHA)MQ zrI?3A??C0unJT@Np_(Cw`vo_IKNLNhVn?Vn^gy0e?3`XRO0N^#kTnlv$$png{jS*g znVJuaULk5mzZ;YCs&g^i?o2PefT#7a=o6xLeK%GlVoq{Wrx;5wo#AI)C~zkYMMBtf z{(2#=I){&tdeL>0#c4BO&^zlQVC=bG_Pcc1?{2^wGoT1>t>||W%HEKdP|>kG9U0we zYzmJD!doje6iLTkT&Cz)nVx?^2sAHA2>U`_LPdyHG3P=MM6Z(&_J!g*1R1m-=2<8maFULcXY}SY8R> zbC#+EV(NY3d8Mw~Vhl4B}O(D){TqwT`7)Ak4q#buSM z9O+Io>uf#$P1K3rflQttW9MW;?SHJb75RhX9^@!`^<-sPsA8IhWz85VB(|z=pyoNE z=C>yoUZ`e_6%t=gy#uA8QK9lTC!0f+gCF7AG2_YFfiwBe!+5_mSvUW)c+;FGYe(4- z-VaWe*Gpr$7gP+aw&TfbCNw# zJ6m!7dDKpG6``J2`yW2+0$5vlYm1NZHY{i>s`Kk{ys|NQTiRq>k!@WIQG? zD1l%+dVbAhVfl1kP-!7`65qk^h|;AHjYrRSO%_!sW=MQ#eNOoV3Miid)r~eGY^@}9 z*pt8`BuV8OKv{~>F!Mo%DO=`gW1dM@h6*zgEtrkj{XQXeR4+-LQ}F@oHI|qe{n?s@ z)KSe?pJUD@ETGS_#4^{k_!U_*>9e9{)M`~VN26wIOu}bt7W%BHnXSNzlWK-0!>q{s z9*&H`K9{S4-!{AqKc0>(OLMecO`X}<_Nvm{RD-*-usC1WZ!A8Ejb`&^b=f&=7r&;+ zMtf;uv3nU(e>(HDl=^PX>2SK8r=3qajfi|L>yBYdc?>Lhza#_Xe3(g9ngOzzikL^r z@`0Sb6d^5gS&>Ee}Mw<}Tel zk8*C#+j*C7DjjF=DBFML#EDyi-wx;Cy!ObE;J2@3i*n{xq;_2W$`!|6yQVXxY?eEl z(sAB(6_R6}Ej?Uz(b1kl-)#dmuRZ(xGqA3{YIs|}--OsX*xo!%$@W7wo0AMdQAc}e z8*I{1XQKIXJM9Ar9eXHGfRz5dTtp|=A~CVc7{1f*@6TDWq{;DuqqM@47`)w&r}>WO z=_$$ZcKXV7%hM8bmM>qNkrr`JKG$y>-ZnT)mm!oiU2AK~O}f(MN;@<^eOdZQx}KgM zi-VO72-VTdQAwAlH5sgzn-x;2zf64D@iv&!RHm6JC6NPGK{*-xjIRn%lB z&KWHIERL=#lbYfWdE%PlHpS_2ak`HD6OQPpD4Yj%d3=*qzdYKGERA4X>MuTvW9mC( zKUJtDBHcM$5a!%^x)Cg8_W-vifm?s|*1Fnl)%{ET_1nrHc4TB^I}QJk$fZeCpD7)j zlZcI*WvRgN9kl2NrjNnLV(fmdaUDud&PzXJkITaO)N%H_tUR7a`55x@B3YV}lAL{Q zVPP^c7URA|8fRU;Pr(h-xm!wM$|9fu=!eh_u{eP;(%_5_d4LY(1FeM(=wwKSx#)_Dc-U#Y++xXcrrr>zRi1kVX6Dpa5J&;x|8eT8 z6o3_$_k8Ir3#Aa}`5|#{=x4)*vcxh>Eh^VALu{~+8m3GBXra3f;! zgei6BBu0i4@}1WnOl_>p z?DuEPPqme&yd<-rbQ(^TcV3f2V?Pw+wXTshC7Yii)cB&I{H0J{)ELi(2mt{ywr+ z53O|u5ao_2RcJ#`?b+ZK-xZ-AAGQ zURL)p+MxC;bswu$FzhK1DA=?THm>gDMg6+Qa5jLZZDDsoL!*19Wg?nP-g6}4%v1Ne zR>P~*y&+zC+zxvo>5?pe{>A*xxE!0 zkNAmRr8i}{z?j?T9v}7f`?vc>x4B0KLNI;9{qF6)z3$C^_o#m`FgETV1)hOncb|WB z+=uJdiP6AVf1r=B8mkB+5P@q1CIZZsiS52&)Z`u`?8p40fdTjU-W~n{U!ULI?;i^c z4gqMx5GEl!BC!kz42cWlz`-YQ*)H(ghVMqS z0enuUov0!xRBA7-n}L%X?-FfP+ql3{@;HF9#9yM1crBl|0$1Yo80rb&*AV~4K+R{l za$#Oa(h#E0E$Vawj$7LT7=+=l_>Nk1OgvG0j0t=Kg0^wcumjHn_?7S^x$vU|$;&YA zh|&S@f!b$6y-9F}6`i5?sNN7~{6b2IE|Lbqpbx*2lx@e~3H42++>p$T1CLpl!z;Dj z;%^1&n=T<#14&(ls;d%I{{OufqIx>5qkvZ->f8%e0IfJ3AD*F;7|d)Jm_~-=y^4WysyK!(SQijS%?mq zLlMH-e60!GY60eLz`6systa^1)E0rBC7@*)GDa@P7_m}Yh4Yx!Xlr5RyGZLn%>TvO z25lqaoHl{e_iJC$j%Y`#KJA~{KeUgud$ez8FKI7m51`lfVarKBW@NwiqV|35b?p`HRqZwHPvG7U zkhStn?VIQUA80peKh}Pz{Rr**7wvt-l5a(8Y=aaJK^9J;jdr5_Np2<}iMt@Tf7bS3 zgx#y{$Gm@u_Newn?K16B?Q-p)_MY}v?Qv#eX5?v$W--j7ozsHM%3_&~*|qZ`p>-UK zX9>*75?K;+v1A7Qilwo1M2}^%EbU|MU(C(2Sq_5X^Ps>KutHXZtdS+Gl$Eh^R>3Nn zNBapw8Xv^X*RWbv$Ld)FYh<(7Y&M6@W%Jm4?Kj$QSrcn!Ev%Kbv3AzMI+1*G0b7WK ze~Z}?wp2T#Y$NMso0yMn zMrzG|=4S(J5D6^@YjRjHM^LciLJ;EMkk0HbJ6G*`K9rj)JJ@zDfiapJqVb8MX z*z@cK_9A_u+27a)>_hf<_TTIu?4Rr-_CM@n_AmAc zJIg+0=U9-PM`vYlPT*V_Zp2Vro{3e0n`iSJp3C!iJ}k3^yoeWT4{6`xCA?HSqdlTM%geM$?P2XX?U42~vZ{Su zd!Co`3hlev_jslDZSK+jhkJPyk{{OaTI2@0R=bI^ z9OHFA{|0}6f0Li)5AuijxA-J~n4jT~AR+!^{BiyS|2F>)|1SR?f094NpXSf-XZdsd zdHw=_k-x;hkE95%@K^b3NSgQtf0Mt(e}J5rKjJ^;KjA;+Z}XoaWx_A`JN%dYSNzxf zUH%*XTV%}qJ^wHM2mVL?C;n$7RCtfS&;QE*hUBjw^1t){=KtXT;UDvV@lW_! zr0G1zgZ#X%A)hAKVQ8M*>#5=r^o9Fx)ZrJlXRD!tf%Oy zdYYcDXXu%FmhRTG^&CA{&(rhu0=-Z#(u?&Hy;LvL%aL%Q5*zltdX-+S*Wj?TI=xQtx`fPm;_9)NO=j%;+v)-b&>TPdW+QeL0r!EA>_S zYJH8qR$r%Ir1$9S^^5fl`bNE1-=zEW&3d2Sulw}@eNf+`2lTD_HhoCnt`F-Y`VM`k zKB|xDU(f8{6^!@rJ`T_k?{W6>`a!@~{AJ&iPNA)Z8WBQf)Rr=NXHTt#2 zis6Z&Av4r&kEgjUdSn;Y6@4S4e%lV{@gx0E!Nm%}+`Qe_2QA&~lUGyo=28DHzsV;q z=H`*XkzxNfvrk?vt$l&fzKQJvL;gLMzNvddYyZf&p${LXwmu)KGxf_$8|wCr?eSL&jj6Zy9ZdVxi<<-}g%iz?#p_A|##D}$$ z79T!YuXQlu!O}HV-m+!t-q5w#H)_~|57UD2z)-*6gf$;7<^>A-fWm%(#6BQ7wLsw+ zkk^<6VG{U)g*>p;vTzE7W$V^x^d@`+&IE+dhRz!fcjz&C~R!AI1 za-PN*nCuHlcR#ailD`a!E zq-sK3jH^cj!-Gbwpoa%zS4YwmI}!P0Uah2HLS8Lvr!chaj<~-l;(qVcy{Sjix=&nU zdcrakvoCyW92yxO9E)itTx2m{_}1LqF1|AR{Nid^Ha6rN+am8rrtYmPBN$}GxDf6l zCP}_=92SSM*!}>l2xBk-&{ItF(2gxWaU1O$9vR01Kj5>r?-&aJBk^@q`?&gj0lKpI z%DQZOfcPmNR!0<$S+?CjC=0~}P<$FJqkv^-@sInAojzbE2D0kL8nc3D=)q4q;2BqK z!JUBs8W;I??C_zdY~S4P<4Y#^(h0sU0C^P{@a}-VV#|neWngf-PhaJmFe_~I?kxem z75}@(0uoGfqm}9lVPsbYL(eh3Fe9w~2#TVBh-44ZBkr6N^7qiI;!C4=->@0Qje|s- zvEM&5?lY^p4EykdPCvpl0@A25<2FIskRVMW+0x2~C-|O#c|_8uk8T+;!6via=QWCp zJ`Nfce0>M#>%)JbV;mvITdCR*TJ{-eT2-C0)zpvTAx69`Zliqzfq>WJsj3d$)q2Bs zRpGm-Uu(kO)rRlt!guxIyN2*xW9Y8V6TS<>s0+dH)`aWxhHLSLYw?C_sS4Lp6|SW! zREwu7j9*o_zN#>ORpFYe!ZlZgYpxF0Tpg~tI$U#gm9{mRTqAvF8oGa_>H>o8}(tV>chCy zhjFP7<5C~4xjtNTeYocOaLx7Mnj6A3H-zbJ2*YRy!)OS@Xb8h-2*YRy!)OS@Xb8h- z48v#)!)Oe{Xbi(>48v#)!)Oe{Xbi)spQj}Z|dO|R~o)8^gPZ)+b3}XuC+R%5N#s<^c!BHR7y4~`!R?G;d-Qp6xHZ%-G?+)FW zE|LZJii^M>9o^FqD{Qtxk?h|*6uWZ*(*Ws1o^kGNCqQ|GO&t%~-KjYH!a_xDFb6ZMn1 zeT#K`3p68nKV}&S>^zt+MuqD>e{TXEp+W@@8}TMj*eD!ZKJEl z+ach3s#?VNp6WIMA6Ee%R{_V{UfoXRaTWD=+aP=wo#L9_79Eh7T?yWQZ*h4 zuclVwUsJ1|>(q0dtfi(-(os_2|nT~_=u~Z4_CoQTm>I-6@0{1@DW$RM_eVJ zYU)Dp1YhvI;ESiGUcs+d@aq-)dIi5;!LL{F>lOTZ1;1XwuUGKv75sVyzh1$wSMci< z{CWkyLBVfO@Ea8T1_i%C!EaFT8x;Ho1-C)LZBTF<6x;>{w?V;eP;eU*+(rebQNe-g zM)FnDsNggzIE{+VMg^}?!E03T8Wp@o1+P)TYgF(W6})CeXS1TSS;22s@S7Frr@_J_-zV)n}Xk_;I}FG zZ3=#yg5R#-w=4SF75(iBe!GI-uHd&T`r8%!b_Ks(!Eaaa+ZFtF1;1UvZ&&c!6?`>r z)^sTN9SVMjg5ROwcPRKB3Vw%z-=W}lDEJ);eusiD$7xSZhl1as;LCB`Q>(_oS~XhM z%J~A{OZc@O3BT4O;n#X3{92EMU+a{)uM=U-nNtEBLa1;#t9${S(g$zU-fPR`6y2 z#Iu4g`=_^E_D@_D{jz`JSo)!OP|Ma%Y{)wyNx9pF2R{WOz z5zmTV*&nMsY96TebR 0.001: + check_passed = False + check_msg = \ + 'The running time for scene {} and image {} is not the same for' \ + ' all estimates'.format(result['scene_id'], result['im_id']) + misc.log(check_msg) + break + else: + times[result_key] = result['time'] + + except Exception as e: + check_passed = False + check_msg = 'ERROR when loading file {}:\n{}'.format(path, e) + misc.log(check_msg) + + return check_passed, check_msg + + +def load_ply(path): + """Loads a 3D mesh model from a PLY file. + + :param path: Path to a PLY file. + :return: The loaded model given by a dictionary with items: + - 'pts' (nx3 ndarray) + - 'normals' (nx3 ndarray), optional + - 'colors' (nx3 ndarray), optional + - 'faces' (mx3 ndarray), optional + - 'texture_uv' (nx2 ndarray), optional + - 'texture_uv_face' (mx6 ndarray), optional + - 'texture_file' (string), optional + """ + f = open(path, 'rb') + + # Only triangular faces are supported. + face_n_corners = 3 + + n_pts = 0 + n_faces = 0 + pt_props = [] + face_props = [] + is_binary = False + header_vertex_section = False + header_face_section = False + texture_file = None + + # Read the header. + while True: + + # Strip the newline character(s). + line = f.readline().rstrip('\n').rstrip('\r') + + if line.startswith('comment TextureFile'): + texture_file = line.split()[-1] + elif line.startswith('element vertex'): + n_pts = int(line.split()[-1]) + header_vertex_section = True + header_face_section = False + elif line.startswith('element face'): + n_faces = int(line.split()[-1]) + header_vertex_section = False + header_face_section = True + elif line.startswith('element'): # Some other element. + header_vertex_section = False + header_face_section = False + elif line.startswith('property') and header_vertex_section: + # (name of the property, data type) + pt_props.append((line.split()[-1], line.split()[-2])) + elif line.startswith('property list') and header_face_section: + elems = line.split() + if elems[-1] == 'vertex_indices' or elems[-1] == 'vertex_index': + # (name of the property, data type) + face_props.append(('n_corners', elems[2])) + for i in range(face_n_corners): + face_props.append(('ind_' + str(i), elems[3])) + elif elems[-1] == 'texcoord': + # (name of the property, data type) + face_props.append(('texcoord', elems[2])) + for i in range(face_n_corners * 2): + face_props.append(('texcoord_ind_' + str(i), elems[3])) + else: + misc.log('Warning: Not supported face property: ' + elems[-1]) + elif line.startswith('format'): + if 'binary' in line: + is_binary = True + elif line.startswith('end_header'): + break + + # Prepare data structures. + model = {} + if texture_file is not None: + model['texture_file'] = texture_file + model['pts'] = np.zeros((n_pts, 3), np.float) + if n_faces > 0: + model['faces'] = np.zeros((n_faces, face_n_corners), np.float) + + pt_props_names = [p[0] for p in pt_props] + face_props_names = [p[0] for p in face_props] + + is_normal = False + if {'nx', 'ny', 'nz'}.issubset(set(pt_props_names)): + is_normal = True + model['normals'] = np.zeros((n_pts, 3), np.float) + + is_color = False + if {'red', 'green', 'blue'}.issubset(set(pt_props_names)): + is_color = True + model['colors'] = np.zeros((n_pts, 3), np.float) + + is_texture_pt = False + if {'texture_u', 'texture_v'}.issubset(set(pt_props_names)): + is_texture_pt = True + model['texture_uv'] = np.zeros((n_pts, 2), np.float) + + is_texture_face = False + if {'texcoord'}.issubset(set(face_props_names)): + is_texture_face = True + model['texture_uv_face'] = np.zeros((n_faces, 6), np.float) + + # Formats for the binary case. + formats = { + 'float': ('f', 4), + 'double': ('d', 8), + 'int': ('i', 4), + 'uchar': ('B', 1) + } + + # Load vertices. + for pt_id in range(n_pts): + prop_vals = {} + load_props = ['x', 'y', 'z', 'nx', 'ny', 'nz', + 'red', 'green', 'blue', 'texture_u', 'texture_v'] + if is_binary: + for prop in pt_props: + format = formats[prop[1]] + read_data = f.read(format[1]) + val = struct.unpack(format[0], read_data)[0] + if prop[0] in load_props: + prop_vals[prop[0]] = val + else: + elems = f.readline().rstrip('\n').rstrip('\r').split() + for prop_id, prop in enumerate(pt_props): + if prop[0] in load_props: + prop_vals[prop[0]] = elems[prop_id] + + model['pts'][pt_id, 0] = float(prop_vals['x']) + model['pts'][pt_id, 1] = float(prop_vals['y']) + model['pts'][pt_id, 2] = float(prop_vals['z']) + + if is_normal: + model['normals'][pt_id, 0] = float(prop_vals['nx']) + model['normals'][pt_id, 1] = float(prop_vals['ny']) + model['normals'][pt_id, 2] = float(prop_vals['nz']) + + if is_color: + model['colors'][pt_id, 0] = float(prop_vals['red']) + model['colors'][pt_id, 1] = float(prop_vals['green']) + model['colors'][pt_id, 2] = float(prop_vals['blue']) + + if is_texture_pt: + model['texture_uv'][pt_id, 0] = float(prop_vals['texture_u']) + model['texture_uv'][pt_id, 1] = float(prop_vals['texture_v']) + + # Load faces. + for face_id in range(n_faces): + prop_vals = {} + if is_binary: + for prop in face_props: + format = formats[prop[1]] + val = struct.unpack(format[0], f.read(format[1]))[0] + if prop[0] == 'n_corners': + if val != face_n_corners: + raise ValueError('Only triangular faces are supported.') + elif prop[0] == 'texcoord': + if val != face_n_corners * 2: + raise ValueError('Wrong number of UV face coordinates.') + else: + prop_vals[prop[0]] = val + else: + elems = f.readline().rstrip('\n').rstrip('\r').split() + for prop_id, prop in enumerate(face_props): + if prop[0] == 'n_corners': + if int(elems[prop_id]) != face_n_corners: + raise ValueError('Only triangular faces are supported.') + elif prop[0] == 'texcoord': + if int(elems[prop_id]) != face_n_corners * 2: + raise ValueError('Wrong number of UV face coordinates.') + else: + prop_vals[prop[0]] = elems[prop_id] + + model['faces'][face_id, 0] = int(prop_vals['ind_0']) + model['faces'][face_id, 1] = int(prop_vals['ind_1']) + model['faces'][face_id, 2] = int(prop_vals['ind_2']) + + if is_texture_face: + for i in range(6): + model['texture_uv_face'][face_id, i] = float( + prop_vals['texcoord_ind_{}'.format(i)]) + + f.close() + + return model + + +def save_ply(path, model, extra_header_comments=None): + """Saves a 3D mesh model to a PLY file. + + :param path: Path to a PLY file. + :param model: 3D model given by a dictionary with items: + - 'pts' (nx3 ndarray) + - 'normals' (nx3 ndarray, optional) + - 'colors' (nx3 ndarray, optional) + - 'faces' (mx3 ndarray, optional) + - 'texture_uv' (nx2 ndarray, optional) + - 'texture_uv_face' (mx6 ndarray, optional) + - 'texture_file' (string, optional) + :param extra_header_comments: Extra header comment (optional). + """ + pts = model['pts'] + pts_colors = model['colors'] if 'colors' in model.keys() else np.array([]) + pts_normals = model['normals'] if 'normals' in model.keys() else np.array([]) + faces = model['faces'] if 'faces' in model.keys() else np.array([]) + texture_uv = model[ + 'texture_uv'] if 'texture_uv' in model.keys() else np.array([]) + texture_uv_face = model[ + 'texture_uv_face'] if 'texture_uv_face' in model.keys() else np.array([]) + texture_file = model[ + 'texture_file'] if 'texture_file' in model.keys() else np.array([]) + + save_ply2(path, pts, pts_colors, pts_normals, faces, texture_uv, + texture_uv_face, + texture_file, extra_header_comments) + + +def save_ply2(path, pts, pts_colors=None, pts_normals=None, faces=None, + texture_uv=None, texture_uv_face=None, texture_file=None, + extra_header_comments=None): + """Saves a 3D mesh model to a PLY file. + + :param path: Path to the resulting PLY file. + :param pts: nx3 ndarray with vertices. + :param pts_colors: nx3 ndarray with vertex colors (optional). + :param pts_normals: nx3 ndarray with vertex normals (optional). + :param faces: mx3 ndarray with mesh faces (optional). + :param texture_uv: nx2 ndarray with per-vertex UV texture coordinates + (optional). + :param texture_uv_face: mx6 ndarray with per-face UV texture coordinates + (optional). + :param texture_file: Path to a texture image -- relative to the resulting + PLY file (optional). + :param extra_header_comments: Extra header comment (optional). + """ + pts_colors = np.array(pts_colors) + if pts_colors is not None: + assert (len(pts) == len(pts_colors)) + + valid_pts_count = 0 + for pt_id, pt in enumerate(pts): + if not np.isnan(np.sum(pt)): + valid_pts_count += 1 + + f = open(path, 'w') + f.write( + 'ply\n' + 'format ascii 1.0\n' + # 'format binary_little_endian 1.0\n' + ) + + if texture_file is not None: + f.write('comment TextureFile {}\n'.format(texture_file)) + + if extra_header_comments is not None: + for comment in extra_header_comments: + f.write('comment {}\n'.format(comment)) + + f.write( + 'element vertex ' + str(valid_pts_count) + '\n' + 'property float x\n' + 'property float y\n' + 'property float z\n' + ) + if pts_normals is not None: + f.write( + 'property float nx\n' + 'property float ny\n' + 'property float nz\n' + ) + if pts_colors is not None: + f.write( + 'property uchar red\n' + 'property uchar green\n' + 'property uchar blue\n' + ) + if texture_uv is not None: + f.write( + 'property float texture_u\n' + 'property float texture_v\n' + ) + if faces is not None: + f.write( + 'element face ' + str(len(faces)) + '\n' + 'property list uchar int vertex_indices\n' + ) + if texture_uv_face is not None: + f.write( + 'property list uchar float texcoord\n' + ) + f.write('end_header\n') + + format_float = "{:.4f}" + format_2float = " ".join((format_float for _ in range(2))) + format_3float = " ".join((format_float for _ in range(3))) + format_int = "{:d}" + format_3int = " ".join((format_int for _ in range(3))) + + # Save vertices. + for pt_id, pt in enumerate(pts): + if not np.isnan(np.sum(pt)): + f.write(format_3float.format(*pts[pt_id].astype(float))) + if pts_normals is not None: + f.write(' ') + f.write(format_3float.format(*pts_normals[pt_id].astype(float))) + if pts_colors is not None: + f.write(' ') + f.write(format_3int.format(*pts_colors[pt_id].astype(int))) + if texture_uv is not None: + f.write(' ') + f.write(format_2float.format(*texture_uv[pt_id].astype(float))) + f.write('\n') + + # Save faces. + if faces is not None: + for face_id, face in enumerate(faces): + line = ' '.join(map(str, map(int, [len(face)] + list(face.squeeze())))) + if texture_uv_face is not None: + uv = texture_uv_face[face_id] + line += ' ' + ' '.join( + map(str, [len(uv)] + map(float, list(uv.squeeze())))) + f.write(line) + f.write('\n') + + f.close() diff --git a/bop_toolkit_lib/misc.py b/bop_toolkit_lib/misc.py new file mode 100644 index 0000000..a6922f0 --- /dev/null +++ b/bop_toolkit_lib/misc.py @@ -0,0 +1,378 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Miscellaneous functions.""" + +import os +import sys +import time +import math +import subprocess +import numpy as np +from scipy.spatial import distance + +from bop_toolkit_lib import transform + + +def log(s): + """A logging function. + + :param s: String to print (with the current date and time). + """ + sys.stdout.write('{}: {}\n'.format(time.strftime('%m/%d|%H:%M:%S'), s)) + sys.stdout.flush() + + +def ensure_dir(path): + """Ensures that the specified directory exists. + + :param path: Path to the directory. + """ + if not os.path.exists(path): + os.makedirs(path) + + +def get_symmetry_transformations(model_info, max_sym_disc_step): + """Returns a set of symmetry transformations for an object model. + + :param model_info: See files models_info.json provided with the datasets. + :param max_sym_disc_step: The maximum fraction of the object diameter which + the vertex that is the furthest from the axis of continuous rotational + symmetry travels between consecutive discretized rotations. + :return: The set of symmetry transformations. + """ + # Identity. + trans = [{'R': np.eye(3), 't': np.array([[0, 0, 0]]).T}] + + # Discrete symmetries. + if 'symmetries_discrete' in model_info: + for sym in model_info['symmetries_discrete']: + sym_4x4 = np.reshape(sym, (4, 4)) + R = sym_4x4[:3, :3] + t = sym_4x4[:3, 3].reshape((3, 1)) + trans.append({'R': R, 't': t}) + + # Discretized continuous symmetries. + if 'symmetries_continuous' in model_info: + for sym in model_info['symmetries_continuous']: + axis = np.array(sym['axis']) + offset = np.array(sym['offset']).reshape((3, 1)) + + # (PI * diameter) / (max_sym_disc_step * diameter) = discrete_steps_count + discrete_steps_count = int(np.ceil(np.pi / max_sym_disc_step)) + + # Discrete step in radians. + discrete_step = 2.0 * np.pi / discrete_steps_count + + for i in range(1, discrete_steps_count): + R = transform.rotation_matrix(i * discrete_step, axis)[:3, :3] + t = -R.dot(offset) + offset + trans.append({'R': R, 't': t}) + + return trans + + +def project_pts(pts, K, R, t): + """Projects 3D points. + + :param pts: nx3 ndarray with the 3D points. + :param K: 3x3 ndarray with an intrinsic camera matrix. + :param R: 3x3 ndarray with a rotation matrix. + :param t: 3x1 ndarray with a translation vector. + :return: nx2 ndarray with 2D image coordinates of the projections. + """ + assert (pts.shape[1] == 3) + P = K.dot(np.hstack((R, t))) + pts_h = np.hstack((pts, np.ones((pts.shape[0], 1)))) + pts_im = P.dot(pts_h.T) + pts_im /= pts_im[2, :] + return pts_im[:2, :].T + + +class Precomputer(object): + """ Caches pre_Xs, pre_Ys for a 30% speedup of depth_im_to_dist_im() + """ + xs, ys = None, None + pre_Xs,pre_Ys = None,None + depth_im_shape = None + K = None + + @staticmethod + def precompute_lazy(depth_im, K): + """ Lazy precomputation for depth_im_to_dist_im() if depth_im.shape or K changes + + :param depth_im: hxw ndarray with the input depth image, where depth_im[y, x] + is the Z coordinate of the 3D point [X, Y, Z] that projects to pixel [x, y], + or 0 if there is no such 3D point (this is a typical output of the + Kinect-like sensors). + :param K: 3x3 ndarray with an intrinsic camera matrix. + :return: hxw ndarray (Xs/depth_im, Ys/depth_im) + """ + if depth_im.shape != Precomputer.depth_im_shape: + Precomputer.depth_im_shape = depth_im.shape + Precomputer.xs, Precomputer.ys = np.meshgrid( + np.arange(depth_im.shape[1]), np.arange(depth_im.shape[0])) + + if depth_im.shape != Precomputer.depth_im_shape \ + or not np.all(K == Precomputer.K): + Precomputer.K = K + Precomputer.pre_Xs = (Precomputer.xs - K[0, 2]) / np.float64(K[0, 0]) + Precomputer.pre_Ys = (Precomputer.ys - K[1, 2]) / np.float64(K[1, 1]) + + return Precomputer.pre_Xs, Precomputer.pre_Ys + + +def depth_im_to_dist_im_fast(depth_im, K): + """Converts a depth image to a distance image. + + :param depth_im: hxw ndarray with the input depth image, where depth_im[y, x] + is the Z coordinate of the 3D point [X, Y, Z] that projects to pixel [x, y], + or 0 if there is no such 3D point (this is a typical output of the + Kinect-like sensors). + :param K: 3x3 ndarray with an intrinsic camera matrix. + :return: hxw ndarray with the distance image, where dist_im[y, x] is the + distance from the camera center to the 3D point [X, Y, Z] that projects to + pixel [x, y], or 0 if there is no such 3D point. + """ + # Only recomputed if depth_im.shape or K changes. + pre_Xs, pre_Ys = Precomputer.precompute_lazy(depth_im, K) + + dist_im = np.sqrt( + np.multiply(pre_Xs, depth_im)**2 + + np.multiply(pre_Ys, depth_im)**2 + + depth_im.astype(np.float64)**2) + + return dist_im + + +def depth_im_to_dist_im(depth_im, K): + """Converts a depth image to a distance image. + :param depth_im: hxw ndarray with the input depth image, where depth_im[y, x] + is the Z coordinate of the 3D point [X, Y, Z] that projects to pixel [x, y], + or 0 if there is no such 3D point (this is a typical output of the + Kinect-like sensors). + :param K: 3x3 ndarray with an intrinsic camera matrix. + :return: hxw ndarray with the distance image, where dist_im[y, x] is the + distance from the camera center to the 3D point [X, Y, Z] that projects to + pixel [x, y], or 0 if there is no such 3D point. + """ + xs, ys = np.meshgrid( + np.arange(depth_im.shape[1]), np.arange(depth_im.shape[0])) + + Xs = np.multiply(xs - K[0, 2], depth_im) * (1.0 / K[0, 0]) + Ys = np.multiply(ys - K[1, 2], depth_im) * (1.0 / K[1, 1]) + + dist_im = np.sqrt( + Xs.astype(np.float64)**2 + + Ys.astype(np.float64)**2 + + depth_im.astype(np.float64)**2) + # dist_im = np.linalg.norm(np.dstack((Xs, Ys, depth_im)), axis=2) # Slower. + + return dist_im + +def clip_pt_to_im(pt, im_size): + """Clips a 2D point to the image frame. + + :param pt: 2D point (x, y). + :param im_size: Image size (width, height). + :return: Clipped 2D point (x, y). + """ + return [min(max(pt[0], 0), im_size[0] - 1), + min(max(pt[1], 0), im_size[1] - 1)] + + +def calc_2d_bbox(xs, ys, im_size=None, clip=False): + """Calculates 2D bounding box of the given set of 2D points. + + :param xs: 1D ndarray with x-coordinates of 2D points. + :param ys: 1D ndarray with y-coordinates of 2D points. + :param im_size: Image size (width, height) (used for optional clipping). + :param clip: Whether to clip the bounding box (default == False). + :return: 2D bounding box (x, y, w, h), where (x, y) is the top-left corner + and (w, h) is width and height of the bounding box. + """ + bb_min = [xs.min(), ys.min()] + bb_max = [xs.max(), ys.max()] + if clip: + assert (im_size is not None) + bb_min = clip_pt_to_im(bb_min, im_size) + bb_max = clip_pt_to_im(bb_max, im_size) + return [bb_min[0], bb_min[1], bb_max[0] - bb_min[0], bb_max[1] - bb_min[1]] + + +def calc_3d_bbox(xs, ys, zs): + """Calculates 3D bounding box of the given set of 3D points. + + :param xs: 1D ndarray with x-coordinates of 3D points. + :param ys: 1D ndarray with y-coordinates of 3D points. + :param zs: 1D ndarray with z-coordinates of 3D points. + :return: 3D bounding box (x, y, z, w, h, d), where (x, y, z) is the top-left + corner and (w, h, d) is width, height and depth of the bounding box. + """ + bb_min = [xs.min(), ys.min(), zs.min()] + bb_max = [xs.max(), ys.max(), zs.max()] + return [bb_min[0], bb_min[1], bb_min[2], + bb_max[0] - bb_min[0], bb_max[1] - bb_min[1], bb_max[2] - bb_min[2]] + + +def iou(bb_a, bb_b): + """Calculates the Intersection over Union (IoU) of two 2D bounding boxes. + + :param bb_a: 2D bounding box (x1, y1, w1, h1) -- see calc_2d_bbox. + :param bb_b: 2D bounding box (x2, y2, w2, h2) -- see calc_2d_bbox. + :return: The IoU value. + """ + # [x1, y1, width, height] --> [x1, y1, x2, y2] + tl_a, br_a = (bb_a[0], bb_a[1]), (bb_a[0] + bb_a[2], bb_a[1] + bb_a[3]) + tl_b, br_b = (bb_b[0], bb_b[1]), (bb_b[0] + bb_b[2], bb_b[1] + bb_b[3]) + + # Intersection rectangle. + tl_inter = max(tl_a[0], tl_b[0]), max(tl_a[1], tl_b[1]) + br_inter = min(br_a[0], br_b[0]), min(br_a[1], br_b[1]) + + # Width and height of the intersection rectangle. + w_inter = br_inter[0] - tl_inter[0] + h_inter = br_inter[1] - tl_inter[1] + + if w_inter > 0 and h_inter > 0: + area_inter = w_inter * h_inter + area_a = bb_a[2] * bb_a[3] + area_b = bb_b[2] * bb_b[3] + iou = area_inter / float(area_a + area_b - area_inter) + else: + iou = 0.0 + + return iou + + +def transform_pts_Rt(pts, R, t): + """Applies a rigid transformation to 3D points. + + :param pts: nx3 ndarray with 3D points. + :param R: 3x3 ndarray with a rotation matrix. + :param t: 3x1 ndarray with a translation vector. + :return: nx3 ndarray with transformed 3D points. + """ + assert (pts.shape[1] == 3) + pts_t = R.dot(pts.T) + t.reshape((3, 1)) + return pts_t.T + + +def calc_pts_diameter(pts): + """Calculates the diameter of a set of 3D points (i.e. the maximum distance + between any two points in the set). + + :param pts: nx3 ndarray with 3D points. + :return: The calculated diameter. + """ + diameter = -1.0 + for pt_id in range(pts.shape[0]): + pt_dup = np.tile(np.array([pts[pt_id, :]]), [pts.shape[0] - pt_id, 1]) + pts_diff = pt_dup - pts[pt_id:, :] + max_dist = math.sqrt((pts_diff * pts_diff).sum(axis=1).max()) + if max_dist > diameter: + diameter = max_dist + return diameter + + +def calc_pts_diameter2(pts): + """Calculates the diameter of a set of 3D points (i.e. the maximum distance + between any two points in the set). Faster but requires more memory than + calc_pts_diameter. + + :param pts: nx3 ndarray with 3D points. + :return: The calculated diameter. + """ + dists = distance.cdist(pts, pts, 'euclidean') + diameter = np.max(dists) + return diameter + + +def overlapping_sphere_projections(radius, p1, p2): + """Checks if projections of two spheres overlap (approximated). + + :param radius: Radius of the two spheres. + :param p1: [X1, Y1, Z1] center of the first sphere. + :param p2: [X2, Y2, Z2] center of the second sphere. + :return: True if the projections of the two spheres overlap. + """ + # 2D projections of centers of the spheres. + proj1 = (p1 / p1[2])[:2] + proj2 = (p2 / p2[2])[:2] + + # Distance between the center projections. + proj_dist = np.linalg.norm(proj1 - proj2) + + # The max. distance of the center projections at which the sphere projections, + # i.e. sphere silhouettes, still overlap (approximated). + proj_dist_thresh = radius * (1.0 / p1[2] + 1.0 / p2[2]) + + return proj_dist < proj_dist_thresh + + +def get_error_signature(error_type, n_top, **kwargs): + """Generates a signature for the specified settings of pose error calculation. + + :param error_type: Type of error. + :param n_top: Top N pose estimates (with the highest score) to be evaluated + for each object class in each image. + :return: Generated signature. + """ + error_sign = 'error=' + error_type + '_ntop=' + str(n_top) + if error_type == 'vsd': + if kwargs['vsd_tau'] == float('inf'): + vsd_tau_str = 'inf' + else: + vsd_tau_str = '{:.3f}'.format(kwargs['vsd_tau']) + error_sign += '_delta={:.3f}_tau={}'.format( + kwargs['vsd_delta'], vsd_tau_str) + return error_sign + + +def get_score_signature(correct_th, visib_gt_min): + """Generates a signature for a performance score. + + :param visib_gt_min: Minimum visible surface fraction of a valid GT pose. + :return: Generated signature. + """ + eval_sign = 'th=' + '-'.join(['{:.3f}'.format(t) for t in correct_th]) + eval_sign += '_min-visib={:.3f}'.format(visib_gt_min) + return eval_sign + + +def run_meshlab_script(meshlab_server_path, meshlab_script_path, model_in_path, + model_out_path, attrs_to_save): + """Runs a MeshLab script on a 3D model. + + meshlabserver depends on X server. To remove this dependence (on linux), run: + 1) Xvfb :100 & + 2) export DISPLAY=:100.0 + 3) meshlabserver + + :param meshlab_server_path: Path to meshlabserver.exe. + :param meshlab_script_path: Path to an MLX MeshLab script. + :param model_in_path: Path to the input 3D model saved in the PLY format. + :param model_out_path: Path to the output 3D model saved in the PLY format. + :param attrs_to_save: Attributes to save: + - vc -> vertex colors + - vf -> vertex flags + - vq -> vertex quality + - vn -> vertex normals + - vt -> vertex texture coords + - fc -> face colors + - ff -> face flags + - fq -> face quality + - fn -> face normals + - wc -> wedge colors + - wn -> wedge normals + - wt -> wedge texture coords + """ + meshlabserver_cmd = [meshlab_server_path, '-s', meshlab_script_path, '-i', + model_in_path, '-o', model_out_path] + + if len(attrs_to_save): + meshlabserver_cmd += ['-m'] + attrs_to_save + + log(' '.join(meshlabserver_cmd)) + if subprocess.call(meshlabserver_cmd) != 0: + exit(-1) diff --git a/bop_toolkit_lib/pose_error.py b/bop_toolkit_lib/pose_error.py new file mode 100644 index 0000000..a4463eb --- /dev/null +++ b/bop_toolkit_lib/pose_error.py @@ -0,0 +1,330 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Implementation of the pose error functions described in: +Hodan, Michel et al., "BOP: Benchmark for 6D Object Pose Estimation", ECCV'18 +Hodan et al., "On Evaluation of 6D Object Pose Estimation", ECCVW'16 +""" + +import math +import numpy as np +from scipy import spatial + +from bop_toolkit_lib import misc +from bop_toolkit_lib import visibility + + +def vsd(R_est, t_est, R_gt, t_gt, depth_test, K, delta, taus, + normalized_by_diameter, diameter, renderer, obj_id, cost_type='step'): + """Visible Surface Discrepancy -- by Hodan, Michel et al. (ECCV 2018). + + :param R_est: 3x3 ndarray with the estimated rotation matrix. + :param t_est: 3x1 ndarray with the estimated translation vector. + :param R_gt: 3x3 ndarray with the ground-truth rotation matrix. + :param t_gt: 3x1 ndarray with the ground-truth translation vector. + :param depth_test: hxw ndarray with the test depth image. + :param K: 3x3 ndarray with an intrinsic camera matrix. + :param delta: Tolerance used for estimation of the visibility masks. + :param taus: A list of misalignment tolerance values. + :param normalized_by_diameter: Whether to normalize the pixel-wise distances + by the object diameter. + :param diameter: Object diameter. + :param renderer: Instance of the Renderer class (see renderer.py). + :param obj_id: Object identifier. + :param cost_type: Type of the pixel-wise matching cost: + 'tlinear' - Used in the original definition of VSD in: + Hodan et al., On Evaluation of 6D Object Pose Estimation, ECCVW'16 + 'step' - Used for SIXD Challenge 2017 onwards. + :return: List of calculated errors (one for each misalignment tolerance). + """ + # Render depth images of the model in the estimated and the ground-truth pose. + fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2] + depth_est = renderer.render_object( + obj_id, R_est, t_est, fx, fy, cx, cy)['depth'] + depth_gt = renderer.render_object( + obj_id, R_gt, t_gt, fx, fy, cx, cy)['depth'] + + # Convert depth images to distance images. + dist_test = misc.depth_im_to_dist_im_fast(depth_test, K) + dist_gt = misc.depth_im_to_dist_im_fast(depth_gt, K) + dist_est = misc.depth_im_to_dist_im_fast(depth_est, K) + + # Visibility mask of the model in the ground-truth pose. + visib_gt = visibility.estimate_visib_mask_gt( + dist_test, dist_gt, delta, visib_mode='bop19') + + # Visibility mask of the model in the estimated pose. + visib_est = visibility.estimate_visib_mask_est( + dist_test, dist_est, visib_gt, delta, visib_mode='bop19') + + # Intersection and union of the visibility masks. + visib_inter = np.logical_and(visib_gt, visib_est) + visib_union = np.logical_or(visib_gt, visib_est) + + visib_union_count = visib_union.sum() + visib_comp_count = visib_union_count - visib_inter.sum() + + # Pixel-wise distances. + dists = np.abs(dist_gt[visib_inter] - dist_est[visib_inter]) + + # Normalization of pixel-wise distances by object diameter. + if normalized_by_diameter: + dists /= diameter + + # Calculate VSD for each provided value of the misalignment tolerance. + if visib_union_count == 0: + errors = [1.0] * len(taus) + else: + errors = [] + for tau in taus: + + # Pixel-wise matching cost. + if cost_type == 'step': + costs = dists >= tau + elif cost_type == 'tlinear': # Truncated linear function. + costs = dists / tau + costs[costs > 1.0] = 1.0 + else: + raise ValueError('Unknown pixel matching cost.') + + e = (np.sum(costs) + visib_comp_count) / float(visib_union_count) + errors.append(e) + + return errors + + +def mssd(R_est, t_est, R_gt, t_gt, pts, syms): + """Maximum Symmetry-Aware Surface Distance (MSSD). + + See: http://bop.felk.cvut.cz/challenges/bop-challenge-2019/ + + :param R_est: 3x3 ndarray with the estimated rotation matrix. + :param t_est: 3x1 ndarray with the estimated translation vector. + :param R_gt: 3x3 ndarray with the ground-truth rotation matrix. + :param t_gt: 3x1 ndarray with the ground-truth translation vector. + :param pts: nx3 ndarray with 3D model points. + :param syms: Set of symmetry transformations, each given by a dictionary with: + - 'R': 3x3 ndarray with the rotation matrix. + - 't': 3x1 ndarray with the translation vector. + :return: The calculated error. + """ + pts_est = misc.transform_pts_Rt(pts, R_est, t_est) + es = [] + for sym in syms: + R_gt_sym = R_gt.dot(sym['R']) + t_gt_sym = R_gt.dot(sym['t']) + t_gt + pts_gt_sym = misc.transform_pts_Rt(pts, R_gt_sym, t_gt_sym) + es.append(np.linalg.norm(pts_est - pts_gt_sym, axis=1).max()) + return min(es) + + +def mspd(R_est, t_est, R_gt, t_gt, K, pts, syms): + """Maximum Symmetry-Aware Projection Distance (MSPD). + + See: http://bop.felk.cvut.cz/challenges/bop-challenge-2019/ + + :param R_est: 3x3 ndarray with the estimated rotation matrix. + :param t_est: 3x1 ndarray with the estimated translation vector. + :param R_gt: 3x3 ndarray with the ground-truth rotation matrix. + :param t_gt: 3x1 ndarray with the ground-truth translation vector. + :param K: 3x3 ndarray with the intrinsic camera matrix. + :param pts: nx3 ndarray with 3D model points. + :param syms: Set of symmetry transformations, each given by a dictionary with: + - 'R': 3x3 ndarray with the rotation matrix. + - 't': 3x1 ndarray with the translation vector. + :return: The calculated error. + """ + proj_est = misc.project_pts(pts, K, R_est, t_est) + es = [] + for sym in syms: + R_gt_sym = R_gt.dot(sym['R']) + t_gt_sym = R_gt.dot(sym['t']) + t_gt + proj_gt_sym = misc.project_pts(pts, K, R_gt_sym, t_gt_sym) + es.append(np.linalg.norm(proj_est - proj_gt_sym, axis=1).max()) + return min(es) + + +def add(R_est, t_est, R_gt, t_gt, pts): + """Average Distance of Model Points for objects with no indistinguishable + views - by Hinterstoisser et al. (ACCV'12). + + :param R_est: 3x3 ndarray with the estimated rotation matrix. + :param t_est: 3x1 ndarray with the estimated translation vector. + :param R_gt: 3x3 ndarray with the ground-truth rotation matrix. + :param t_gt: 3x1 ndarray with the ground-truth translation vector. + :param pts: nx3 ndarray with 3D model points. + :return: The calculated error. + """ + pts_est = misc.transform_pts_Rt(pts, R_est, t_est) + pts_gt = misc.transform_pts_Rt(pts, R_gt, t_gt) + e = np.linalg.norm(pts_est - pts_gt, axis=1).mean() + return e + + +def adi(R_est, t_est, R_gt, t_gt, pts): + """Average Distance of Model Points for objects with indistinguishable views + - by Hinterstoisser et al. (ACCV'12). + + :param R_est: 3x3 ndarray with the estimated rotation matrix. + :param t_est: 3x1 ndarray with the estimated translation vector. + :param R_gt: 3x3 ndarray with the ground-truth rotation matrix. + :param t_gt: 3x1 ndarray with the ground-truth translation vector. + :param pts: nx3 ndarray with 3D model points. + :return: The calculated error. + """ + pts_est = misc.transform_pts_Rt(pts, R_est, t_est) + pts_gt = misc.transform_pts_Rt(pts, R_gt, t_gt) + + # Calculate distances to the nearest neighbors from vertices in the + # ground-truth pose to vertices in the estimated pose. + nn_index = spatial.cKDTree(pts_est) + nn_dists, _ = nn_index.query(pts_gt, k=1) + + e = nn_dists.mean() + return e + + +def re(R_est, R_gt): + """Rotational Error. + + :param R_est: 3x3 ndarray with the estimated rotation matrix. + :param R_gt: 3x3 ndarray with the ground-truth rotation matrix. + :return: The calculated error. + """ + assert (R_est.shape == R_gt.shape == (3, 3)) + error_cos = float(0.5 * (np.trace(R_est.dot(np.linalg.inv(R_gt))) - 1.0)) + + # Avoid invalid values due to numerical errors. + error_cos = min(1.0, max(-1.0, error_cos)) + + error = math.acos(error_cos) + error = 180.0 * error / np.pi # Convert [rad] to [deg]. + return error + + +def te(t_est, t_gt): + """Translational Error. + + :param t_est: 3x1 ndarray with the estimated translation vector. + :param t_gt: 3x1 ndarray with the ground-truth translation vector. + :return: The calculated error. + """ + assert (t_est.size == t_gt.size == 3) + error = np.linalg.norm(t_gt - t_est) + return error + + +def proj(R_est, t_est, R_gt, t_gt, K, pts): + """Average distance of projections of object model vertices [px] + - by Brachmann et al. (CVPR'16). + + :param R_est: 3x3 ndarray with the estimated rotation matrix. + :param t_est: 3x1 ndarray with the estimated translation vector. + :param R_gt: 3x3 ndarray with the ground-truth rotation matrix. + :param t_gt: 3x1 ndarray with the ground-truth translation vector. + :param K: 3x3 ndarray with an intrinsic camera matrix. + :param pts: nx3 ndarray with 3D model points. + :return: The calculated error. + """ + proj_est = misc.project_pts(pts, K, R_est, t_est) + proj_gt = misc.project_pts(pts, K, R_gt, t_gt) + e = np.linalg.norm(proj_est - proj_gt, axis=1).mean() + return e + + +def cou_mask(mask_est, mask_gt): + """Complement over Union of 2D binary masks. + + :param mask_est: hxw ndarray with the estimated mask. + :param mask_gt: hxw ndarray with the ground-truth mask. + :return: The calculated error. + """ + mask_est_bool = mask_est.astype(np.bool) + mask_gt_bool = mask_gt.astype(np.bool) + + inter = np.logical_and(mask_gt_bool, mask_est_bool) + union = np.logical_or(mask_gt_bool, mask_est_bool) + + union_count = float(union.sum()) + if union_count > 0: + e = 1.0 - inter.sum() / union_count + else: + e = 1.0 + return e + + +def cus(R_est, t_est, R_gt, t_gt, K, renderer, obj_id): + """Complement over Union of projected 2D masks. + + :param R_est: 3x3 ndarray with the estimated rotation matrix. + :param t_est: 3x1 ndarray with the estimated translation vector. + :param R_gt: 3x3 ndarray with the ground-truth rotation matrix. + :param t_gt: 3x1 ndarray with the ground-truth translation vector. + :param K: 3x3 ndarray with an intrinsic camera matrix. + :param renderer: Instance of the Renderer class (see renderer.py). + :param obj_id: Object identifier. + :return: The calculated error. + """ + # Render depth images of the model at the estimated and the ground-truth pose. + fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2] + depth_est = renderer.render_object( + obj_id, R_est, t_est, fx, fy, cx, cy)['depth'] + depth_gt = renderer.render_object( + obj_id, R_gt, t_gt, fx, fy, cx, cy)['depth'] + + # Masks of the rendered model and their intersection and union. + mask_est = depth_est > 0 + mask_gt = depth_gt > 0 + inter = np.logical_and(mask_gt, mask_est) + union = np.logical_or(mask_gt, mask_est) + + union_count = float(union.sum()) + if union_count > 0: + e = 1.0 - inter.sum() / union_count + else: + e = 1.0 + return e + + +def cou_bb(bb_est, bb_gt): + """Complement over Union of 2D bounding boxes. + + :param bb_est: The estimated bounding box (x1, y1, w1, h1). + :param bb_gt: The ground-truth bounding box (x2, y2, w2, h2). + :return: The calculated error. + """ + e = 1.0 - misc.iou(bb_est, bb_gt) + return e + + +def cou_bb_proj(R_est, t_est, R_gt, t_gt, K, renderer, obj_id): + """Complement over Union of projected 2D bounding boxes. + + :param R_est: 3x3 ndarray with the estimated rotation matrix. + :param t_est: 3x1 ndarray with the estimated translation vector. + :param R_gt: 3x3 ndarray with the ground-truth rotation matrix. + :param t_gt: 3x1 ndarray with the ground-truth translation vector. + :param K: 3x3 ndarray with an intrinsic camera matrix. + :param renderer: Instance of the Renderer class (see renderer.py). + :param obj_id: Object identifier. + :return: The calculated error. + """ + # Render depth images of the model at the estimated and the ground-truth pose. + fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2] + depth_est = renderer.render_object( + obj_id, R_est, t_est, fx, fy, cx, cy)['depth'] + depth_gt = renderer.render_object( + obj_id, R_gt, t_gt, fx, fy, cx, cy)['depth'] + + # Masks of the rendered model and their intersection and union + mask_est = depth_est > 0 + mask_gt = depth_gt > 0 + + ys_est, xs_est = mask_est.nonzero() + bb_est = misc.calc_2d_bbox(xs_est, ys_est, im_size=None, clip=False) + + ys_gt, xs_gt = mask_gt.nonzero() + bb_gt = misc.calc_2d_bbox(xs_gt, ys_gt, im_size=None, clip=False) + + e = 1.0 - misc.iou(bb_est, bb_gt) + return e diff --git a/bop_toolkit_lib/pose_matching.py b/bop_toolkit_lib/pose_matching.py new file mode 100644 index 0000000..a7ea5a0 --- /dev/null +++ b/bop_toolkit_lib/pose_matching.py @@ -0,0 +1,160 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Matching of estimated poses to the ground-truth poses.""" + +import numpy as np + + +def match_poses(errs, error_ths, max_ests_count=0, gt_valid_mask=None): + """Matches the estimated poses to the ground-truth poses. + + The estimated poses are greedily matched to the ground truth poses in the + order of decreasing score of the estimates. An estimated pose is matched to a + ground-truth pose if the error w.r.t. the ground-truth pose is below the + specified threshold. Each estimated pose is matched to up to one ground-truth + pose and each ground-truth pose is matched to up to one estimated pose. + + :param errs: List of dictionaries, where each dictionary holds the following + info about one pose estimate: + - 'est_id': ID of the pose estimate. + - 'score': Confidence score of the pose estimate. + - 'errors': Dictionary mapping ground-truth ID's to errors of the pose + estimate w.r.t. the ground-truth poses. + :param error_ths: Thresholds of correctness. The pose error can be given + by more than one element (e.g. translational + rotational error), in which + case there is one threshold for each element. + :param max_ests_count: Top k pose estimates to consider (0 = all). + :param gt_valid_mask: Mask of ground-truth poses which can be considered. + :return: List of dictionaries, where each dictionary holds info for one pose + estimate (the estimates are ordered as in errs) about the matching + ground-truth pose: + - 'est_id': ID of the pose estimate. + - 'gt_id': ID of the matched ground-truth pose (-1 means there is no + matching ground-truth pose). + - 'score': Confidence score of the pose estimate. + - 'error': Error of the pose estimate w.r.t. the matched ground-truth pose. + - 'error_norm': Error normalized by the threshold value. + """ + # Sort the estimated poses by decreasing confidence score. + errs_sorted = sorted(errs, key=lambda e: e['score'], reverse=True) + + # Keep only the required number of poses with the highest confidence score. + # 0 = all pose estimates are considered. + if max_ests_count > 0: + errs_sorted = errs_sorted[:max_ests_count] + + # Number of values defining the error (e.g. 1 for "ADD", 2 for "5deg 5cm"). + error_num_elems = len(error_ths) + + # Greedily match the estimated poses to the ground truth poses in the order of + # decreasing score of the estimates. + matches = [] + gt_matched = [] + for e in errs_sorted: + + best_gt_id = -1 + best_error = list(error_ths) + for gt_id, error in e['errors'].items(): + + # If the mask of valid GT poses is not provided, consider all valid. + is_valid = not gt_valid_mask or gt_valid_mask[gt_id] + + # Only valid GT poses that have not been matched yet are considered. + if is_valid and gt_id not in gt_matched: + + # The current pose estimate is considered the best so far if all error + # elements are the lowest so far. + if np.all([error[i] < best_error[i] for i in range(error_num_elems)]): + best_gt_id = gt_id + best_error = error + + if best_gt_id >= 0: + + # Mark the GT pose as matched. + gt_matched.append(best_gt_id) + + # Error normalized by the threshold. + best_errors_normed = [best_error[i] / float(error_ths[i]) + for i in range(error_num_elems)] + + # Save info about the match. + matches.append({ + 'est_id': e['est_id'], + 'gt_id': best_gt_id, + 'score': e['score'], + 'error': best_error, + 'error_norm': best_errors_normed + }) + + return matches + + +def match_poses_scene(scene_id, scene_gt, scene_gt_valid, scene_errs, + correct_th, n_top): + """Matches the estimated poses to the ground-truth poses in one scene. + + :param scene_id: Scene ID. + :param scene_gt: Dictionary mapping image ID's to lists of dictionaries with: + - 'obj_id': Object ID of the ground-truth pose. + :param scene_gt_valid: Dictionary mapping image ID's to lists of boolean + values indicating which ground-truth poses should be considered. + :param scene_errs: List of dictionaries with: + - 'im_id': Image ID. + - 'obj_id': Object ID. + - 'est_id': ID of the pose estimate. + - 'score': Confidence score of the pose estimate. + - 'errors': Dictionary mapping ground-truth ID's to errors of the pose + estimate w.r.t. the ground-truth poses. + :param error_obj_threshs: Dictionary mapping object ID's to values of the + threshold of correctness. + :param n_top: Top N pose estimates (with the highest score) to be evaluated + for each object class in each image. + :return: + """ + # Organize the errors by image ID and object ID (for faster query). + scene_errs_org = {} + for e in scene_errs: + scene_errs_org.setdefault( + e['im_id'], {}).setdefault(e['obj_id'], []).append(e) + + # Matching of poses in individual images. + scene_matches = [] + for im_id, im_gts in scene_gt.items(): + im_matches = [] + + for gt_id, gt in enumerate(im_gts): + im_matches.append({ + 'scene_id': scene_id, + 'im_id': im_id, + 'obj_id': gt['obj_id'], + 'gt_id': gt_id, + 'est_id': -1, + 'score': -1, + 'error': -1, + 'error_norm': -1, + 'valid': scene_gt_valid[im_id][gt_id], + }) + + # Treat estimates of each object separately. + im_obj_ids = set([gt['obj_id'] for gt in im_gts]) + for obj_id in im_obj_ids: + if im_id in scene_errs_org.keys()\ + and obj_id in scene_errs_org[im_id].keys(): + + # Greedily match the estimated poses to the ground truth poses. + errs_im_obj = scene_errs_org[im_id][obj_id] + ms = match_poses( + errs_im_obj, correct_th, n_top, scene_gt_valid[im_id]) + + # Update info about the matched GT poses. + for m in ms: + g = im_matches[m['gt_id']] + g['est_id'] = m['est_id'] + g['score'] = m['score'] + g['error'] = m['error'] + g['error_norm'] = m['error_norm'] + + scene_matches += im_matches + + return scene_matches diff --git a/bop_toolkit_lib/renderer.py b/bop_toolkit_lib/renderer.py new file mode 100644 index 0000000..a96649a --- /dev/null +++ b/bop_toolkit_lib/renderer.py @@ -0,0 +1,97 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Abstract class of a renderer and a factory function to create a renderer. + +The renderer produces an RGB/depth image of a 3D mesh model in a specified pose +for given camera parameters and illumination settings. +""" + + +class Renderer(object): + """Abstract class of a renderer.""" + + def __init__(self, width, height): + """Constructor. + + :param width: Width of the rendered image. + :param height: Height of the rendered image. + """ + self.width = width + self.height = height + + # 3D location of a point light (in the camera coordinates). + self.light_cam_pos = (0, 0, 0) + + # Weight of the ambient light. + self.light_ambient_weight = 0.5 + + def set_light_cam_pos(self, light_cam_pos): + """Sets the 3D location of a point light. + + :param light_cam_pos: [X, Y, Z]. + """ + self.light_cam_pos = light_cam_pos + + def set_light_ambient_weight(self, light_ambient_weight): + """Sets weight of the ambient light. + + :param light_ambient_weight: Scalar from 0 to 1. + """ + self.light_ambient_weight = light_ambient_weight + + def add_object(self, obj_id, model_path, **kwargs): + """Loads an object model. + + :param obj_id: Object identifier. + :param model_path: Path to the object model file. + """ + raise NotImplementedError + + def remove_object(self, obj_id): + """Removes an object model. + + :param obj_id: Identifier of the object to remove. + """ + raise NotImplementedError + + def render_object(self, obj_id, R, t, fx, fy, cx, cy): + """Renders an object model in the specified pose. + + :param obj_id: Object identifier. + :param R: 3x3 ndarray with a rotation matrix. + :param t: 3x1 ndarray with a translation vector. + :param fx: Focal length (X axis). + :param fy: Focal length (Y axis). + :param cx: The X coordinate of the principal point. + :param cy: The Y coordinate of the principal point. + :return: Returns a dictionary with rendered images. + """ + raise NotImplementedError + + +def create_renderer(width, height, renderer_type='cpp', mode='rgb+depth', + shading='phong', bg_color=(0.0, 0.0, 0.0, 0.0)): + """A factory to create a renderer. + + Note: Parameters mode, shading and bg_color are currently supported only by + the Python renderer (renderer_type='python'). + + :param width: Width of the rendered image. + :param height: Height of the rendered image. + :param renderer_type: Type of renderer (options: 'cpp', 'python'). + :param mode: Rendering mode ('rgb+depth', 'rgb', 'depth'). + :param shading: Type of shading ('flat', 'phong'). + :param bg_color: Color of the background (R, G, B, A). + :return: Instance of a renderer of the specified type. + """ + if renderer_type == 'python': + from . import renderer_py + return renderer_py.RendererPython(width, height, mode, shading, bg_color) + + elif renderer_type == 'cpp': + from . import renderer_cpp + return renderer_cpp.RendererCpp(width, height) + + else: + raise ValueError('Unknown renderer type.') diff --git a/bop_toolkit_lib/renderer_cpp.py b/bop_toolkit_lib/renderer_cpp.py new file mode 100644 index 0000000..09a557e --- /dev/null +++ b/bop_toolkit_lib/renderer_cpp.py @@ -0,0 +1,50 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""An interface to the C++ based renderer (bop_renderer).""" + +import sys + +from bop_toolkit_lib import config +from bop_toolkit_lib import renderer + +# C++ renderer (https://github.com/thodan/bop_renderer) +sys.path.append(config.bop_renderer_path) +import bop_renderer + + +class RendererCpp(renderer.Renderer): + """An interface to the C++ based renderer.""" + + def __init__(self, width, height): + """See base class.""" + super(RendererCpp, self).__init__(width, height) + self.renderer = bop_renderer.Renderer() + self.renderer.init(width, height) + + def set_light_cam_pos(self, light_cam_pos): + """See base class.""" + super(RendererCpp, self).set_light_cam_pos(light_cam_pos) + self.renderer.set_light_cam_pos(light_cam_pos) + + def set_light_ambient_weight(self, light_ambient_weight): + """See base class.""" + super(RendererCpp, self).set_light_ambient_weight(light_ambient_weight) + self.renderer.set_light_ambient_weight(light_ambient_weight) + + def add_object(self, obj_id, model_path, **kwargs): + """See base class.""" + self.renderer.add_object(obj_id, model_path) + + def remove_object(self, obj_id): + """See base class.""" + self.renderer.remove_object(obj_id) + + def render_object(self, obj_id, R, t, fx, fy, cx, cy): + """See base class.""" + R_l = map(float, R.flatten().tolist()) + t_l = map(float, t.flatten().tolist()) + self.renderer.render_object(obj_id, R_l, t_l, fx, fy, cx, cy) + rgb = self.renderer.get_color_image(obj_id) + depth = self.renderer.get_depth_image(obj_id) + return {'rgb': rgb, 'depth': depth} diff --git a/bop_toolkit_lib/renderer_py.py b/bop_toolkit_lib/renderer_py.py new file mode 100644 index 0000000..59e0d28 --- /dev/null +++ b/bop_toolkit_lib/renderer_py.py @@ -0,0 +1,555 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""A Python based renderer.""" + +import os +import numpy as np +from glumpy import app, gloo, gl + +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc +from bop_toolkit_lib import renderer + +# Set glumpy logging level. +from glumpy.log import log +import logging +log.setLevel(logging.WARNING) # Options: ERROR, WARNING, DEBUG, INFO. + +# Set backend (http://glumpy.readthedocs.io/en/latest/api/app-backends.html). +# app.use('glfw') # Options: 'glfw', 'qt5', 'pyside', 'pyglet'. + + +# RGB vertex shader. +_rgb_vertex_code = """ +uniform mat4 u_mv; +uniform mat4 u_nm; +uniform mat4 u_mvp; +uniform vec3 u_light_eye_pos; + +attribute vec3 a_position; +attribute vec3 a_normal; +attribute vec3 a_color; +attribute vec2 a_texcoord; + +varying vec3 v_color; +varying vec2 v_texcoord; +varying vec3 v_eye_pos; +varying vec3 v_L; +varying vec3 v_normal; + +void main() { + gl_Position = u_mvp * vec4(a_position, 1.0); + v_color = a_color; + v_texcoord = a_texcoord; + + // The following points/vectors are expressed in the eye coordinates. + v_eye_pos = (u_mv * vec4(a_position, 1.0)).xyz; // Vertex. + v_L = normalize(u_light_eye_pos - v_eye_pos); // Vector to the light. + v_normal = normalize(u_nm * vec4(a_normal, 1.0)).xyz; // Normal vector. +} +""" + +# RGB fragment shader - flat shading. +_rgb_fragment_flat_code = """ +uniform float u_light_ambient_w; +uniform sampler2D u_texture; +uniform int u_use_texture; + +varying vec3 v_color; +varying vec2 v_texcoord; +varying vec3 v_eye_pos; +varying vec3 v_L; + +void main() { + // Face normal in eye coords. + vec3 f_normal = normalize(cross(dFdx(v_eye_pos), dFdy(v_eye_pos))); + + float light_diffuse_w = max(dot(normalize(v_L), normalize(f_normal)), 0.0); + float light_w = u_light_ambient_w + light_diffuse_w; + if(light_w > 1.0) light_w = 1.0; + + if(bool(u_use_texture)) { + gl_FragColor = vec4(light_w * texture2D(u_texture, v_texcoord)); + } + else { + gl_FragColor = vec4(light_w * v_color, 1.0); + } +} +""" + +# RGB fragment shader - Phong shading. +_rgb_fragment_phong_code = """ +uniform float u_light_ambient_w; +uniform sampler2D u_texture; +uniform int u_use_texture; + +varying vec3 v_color; +varying vec2 v_texcoord; +varying vec3 v_eye_pos; +varying vec3 v_L; +varying vec3 v_normal; + +void main() { + float light_diffuse_w = max(dot(normalize(v_L), normalize(v_normal)), 0.0); + float light_w = u_light_ambient_w + light_diffuse_w; + if(light_w > 1.0) light_w = 1.0; + + if(bool(u_use_texture)) { + gl_FragColor = vec4(light_w * texture2D(u_texture, v_texcoord)); + } + else { + gl_FragColor = vec4(light_w * v_color, 1.0); + } +} +""" + +# Depth vertex shader. +# Ref: https://github.com/julienr/vertex_visibility/blob/master/depth.py +# +# Getting the depth from the depth buffer in OpenGL is doable, see here: +# http://web.archive.org/web/20130416194336/http://olivers.posterous.com/linear-depth-in-glsl-for-real +# http://web.archive.org/web/20130426093607/http://www.songho.ca/opengl/gl_projectionmatrix.html +# http://stackoverflow.com/a/6657284/116067 +# but it is difficult to achieve high precision, as explained in this article: +# http://dev.theomader.com/depth-precision/ +# +# Once the vertex is in the view coordinates (view * model * v), its depth is +# simply the Z axis. Hence, instead of reading from the depth buffer and undoing +# the projection matrix, we store the Z coord of each vertex in the color +# buffer. OpenGL allows for float32 color buffer components. +_depth_vertex_code = """ +uniform mat4 u_mv; +uniform mat4 u_mvp; +attribute vec3 a_position; +attribute vec3 a_color; +varying float v_eye_depth; + +void main() { + gl_Position = u_mvp * vec4(a_position, 1.0); + vec3 v_eye_pos = (u_mv * vec4(a_position, 1.0)).xyz; // In eye coords. + + // OpenGL Z axis goes out of the screen, so depths are negative + v_eye_depth = -v_eye_pos.z; +} +""" + +# Depth fragment shader. +_depth_fragment_code = """ +varying float v_eye_depth; + +void main() { + gl_FragColor = vec4(v_eye_depth, 0.0, 0.0, 1.0); +} +""" + + +# Functions to calculate transformation matrices. +# Note that OpenGL expects the matrices to be saved column-wise. +# (Ref: http://www.songho.ca/opengl/gl_transform.html) + + +def _calc_model_view(model, view): + """Calculates the model-view matrix. + + :param model: 4x4 ndarray with the model matrix. + :param view: 4x4 ndarray with the view matrix. + :return: 4x4 ndarray with the model-view matrix. + """ + return np.dot(model, view) + + +def _calc_model_view_proj(model, view, proj): + """Calculates the model-view-projection matrix. + + :param model: 4x4 ndarray with the model matrix. + :param view: 4x4 ndarray with the view matrix. + :param proj: 4x4 ndarray with the projection matrix. + :return: 4x4 ndarray with the model-view-projection matrix. + """ + return np.dot(np.dot(model, view), proj) + + +def _calc_normal_matrix(model, view): + """Calculates the normal matrix. + + Ref: http://www.songho.ca/opengl/gl_normaltransform.html + + :param model: 4x4 ndarray with the model matrix. + :param view: 4x4 ndarray with the view matrix. + :return: 4x4 ndarray with the normal matrix. + """ + return np.linalg.inv(np.dot(model, view)).T + + +def _calc_calib_proj(K, x0, y0, w, h, nc, fc, window_coords='y_down'): + """Conversion of Hartley-Zisserman intrinsic matrix to OpenGL proj. matrix. + + Ref: + 1) https://strawlab.org/2011/11/05/augmented-reality-with-OpenGL + 2) https://github.com/strawlab/opengl-hz/blob/master/src/calib_test_utils.py + + :param K: 3x3 ndarray with the intrinsic camera matrix. + :param x0 The X coordinate of the camera image origin (typically 0). + :param y0: The Y coordinate of the camera image origin (typically 0). + :param w: Image width. + :param h: Image height. + :param nc: Near clipping plane. + :param fc: Far clipping plane. + :param window_coords: 'y_up' or 'y_down'. + :return: 4x4 ndarray with the OpenGL projection matrix. + """ + depth = float(fc - nc) + q = -(fc + nc) / depth + qn = -2 * (fc * nc) / depth + + # Draw our images upside down, so that all the pixel-based coordinate + # systems are the same. + if window_coords == 'y_up': + proj = np.array([ + [2 * K[0, 0] / w, -2 * K[0, 1] / w, (-2 * K[0, 2] + w + 2 * x0) / w, 0], + [0, -2 * K[1, 1] / h, (-2 * K[1, 2] + h + 2 * y0) / h, 0], + [0, 0, q, qn], # Sets near and far planes (glPerspective). + [0, 0, -1, 0] + ]) + + # Draw the images upright and modify the projection matrix so that OpenGL + # will generate window coords that compensate for the flipped image coords. + else: + assert window_coords == 'y_down' + proj = np.array([ + [2 * K[0, 0] / w, -2 * K[0, 1] / w, (-2 * K[0, 2] + w + 2 * x0) / w, 0], + [0, 2 * K[1, 1] / h, (2 * K[1, 2] - h + 2 * y0) / h, 0], + [0, 0, q, qn], # Sets near and far planes (glPerspective). + [0, 0, -1, 0] + ]) + return proj.T + + +class RendererPython(renderer.Renderer): + """A Python based renderer.""" + + def __init__(self, width, height, mode='rgb+depth', shading='phong', + bg_color=(0.0, 0.0, 0.0, 0.0)): + """Constructor. + + :param width: Width of the rendered image. + :param height: Height of the rendered image. + :param mode: Rendering mode ('rgb+depth', 'rgb', 'depth'). + :param shading: Type of shading ('flat', 'phong'). + :param bg_color: Color of the background (R, G, B, A). + """ + super(RendererPython, self).__init__(width, height) + + self.mode = mode + self.shading = shading + self.bg_color = bg_color + + # Indicators whether to render RGB and/or depth image. + self.render_rgb = self.mode in ['rgb', 'rgb+depth'] + self.render_depth = self.mode in ['depth', 'rgb+depth'] + + # Structures to store object models and related info. + self.models = {} + self.model_bbox_corners = {} + self.model_textures = {} + + # Rendered images. + self.rgb = None + self.depth = None + + # Window for rendering. + self.window = app.Window(visible=False) + + # Per-object vertex and index buffer. + self.vertex_buffers = {} + self.index_buffers = {} + + # Per-object OpenGL programs for rendering of RGB and depth images. + self.rgb_programs = {} + self.depth_programs = {} + + # The frame buffer object. + rgb_buf = np.zeros( + (self.height, self.width, 4), np.float32).view(gloo.TextureFloat2D) + depth_buf = np.zeros( + (self.height, self.width), np.float32).view(gloo.DepthTexture) + self.fbo = gloo.FrameBuffer(color=rgb_buf, depth=depth_buf) + + # Activate the created frame buffer object. + self.fbo.activate() + + def add_object(self, obj_id, model_path, **kwargs): + """See base class.""" + # Color of the object model (the original color saved with the object model + # will be used if None). + surf_color = None + if 'surf_color' in kwargs: + surf_color = kwargs['surf_color'] + + # Load the object model. + model = inout.load_ply(model_path) + self.models[obj_id] = model + + # Calculate the 3D bounding box of the model (will be used to set the near + # and far clipping plane). + bb = misc.calc_3d_bbox( + model['pts'][:, 0], model['pts'][:, 1], model['pts'][:, 2]) + self.model_bbox_corners[obj_id] = np.array([ + [bb[0], bb[1], bb[2]], + [bb[0], bb[1], bb[2] + bb[5]], + [bb[0], bb[1] + bb[4], bb[2]], + [bb[0], bb[1] + bb[4], bb[2] + bb[5]], + [bb[0] + bb[3], bb[1], bb[2]], + [bb[0] + bb[3], bb[1], bb[2] + bb[5]], + [bb[0] + bb[3], bb[1] + bb[4], bb[2]], + [bb[0] + bb[3], bb[1] + bb[4], bb[2] + bb[5]], + ]) + + # Set texture/color of vertices. + self.model_textures[obj_id] = None + + # Use the specified uniform surface color. + if surf_color is not None: + colors = np.tile(list(surf_color) + [1.0], [model['pts'].shape[0], 1]) + + # Set UV texture coordinates to dummy values. + texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32) + + # Use the model texture. + elif 'texture_file' in self.models[obj_id].keys(): + model_texture_path = os.path.join( + os.path.dirname(model_path), self.models[obj_id]['texture_file']) + model_texture = inout.load_im(model_texture_path) + + # Normalize the texture image. + if model_texture.max() > 1.0: + model_texture = model_texture.astype(np.float32) / 255.0 + model_texture = np.flipud(model_texture) + self.model_textures[obj_id] = model_texture + + # UV texture coordinates. + texture_uv = model['texture_uv'] + + # Set the per-vertex color to dummy values. + colors = np.zeros((model['pts'].shape[0], 3), np.float32) + + # Use the original model color. + elif 'colors' in model.keys(): + assert (model['pts'].shape[0] == model['colors'].shape[0]) + colors = model['colors'] + if colors.max() > 1.0: + colors /= 255.0 # Color values are expected in range [0, 1]. + + # Set UV texture coordinates to dummy values. + texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32) + + # Set the model color to gray. + else: + colors = np.ones((model['pts'].shape[0], 3), np.float32) * 0.5 + + # Set UV texture coordinates to dummy values. + texture_uv = np.zeros((model['pts'].shape[0], 2), np.float32) + + # Set the vertex data. + if self.mode == 'depth': + vertices_type = [ + ('a_position', np.float32, 3), + ('a_color', np.float32, colors.shape[1]) + ] + vertices = np.array(list(zip(model['pts'], colors)), vertices_type) + else: + if self.shading == 'flat': + vertices_type = [ + ('a_position', np.float32, 3), + ('a_color', np.float32, colors.shape[1]), + ('a_texcoord', np.float32, 2) + ] + vertices = np.array(list(zip(model['pts'], colors, texture_uv)), + vertices_type) + elif self.shading == 'phong': + vertices_type = [ + ('a_position', np.float32, 3), + ('a_normal', np.float32, 3), + ('a_color', np.float32, colors.shape[1]), + ('a_texcoord', np.float32, 2) + ] + vertices = np.array(list(zip(model['pts'], model['normals'], + colors, texture_uv)), vertices_type) + else: + raise ValueError('Unknown shading type.') + + # Create vertex and index buffer for the loaded object model. + self.vertex_buffers[obj_id] = vertices.view(gloo.VertexBuffer) + self.index_buffers[obj_id] = \ + model['faces'].flatten().astype(np.uint32).view(gloo.IndexBuffer) + + # Set shader for the selected shading. + if self.shading == 'flat': + rgb_fragment_code = _rgb_fragment_flat_code + elif self.shading == 'phong': + rgb_fragment_code = _rgb_fragment_phong_code + else: + raise ValueError('Unknown shading type.') + + # Prepare the RGB OpenGL program. + rgb_program = gloo.Program(_rgb_vertex_code, rgb_fragment_code) + rgb_program.bind(self.vertex_buffers[obj_id]) + if self.model_textures[obj_id] is not None: + rgb_program['u_use_texture'] = int(True) + rgb_program['u_texture'] = self.model_textures[obj_id] + else: + rgb_program['u_use_texture'] = int(False) + rgb_program['u_texture'] = np.zeros((1, 1, 4), np.float32) + self.rgb_programs[obj_id] = rgb_program + + # Prepare the depth OpenGL program. + depth_program = gloo.Program(_depth_vertex_code,_depth_fragment_code) + depth_program.bind(self.vertex_buffers[obj_id]) + self.depth_programs[obj_id] = depth_program + + def remove_object(self, obj_id): + """See base class.""" + del self.models[obj_id] + del self.model_bbox_corners[obj_id] + if obj_id in self.model_textures: + del self.model_textures[obj_id] + del self.vertex_buffers[obj_id] + del self.index_buffers[obj_id] + del self.rgb_programs[obj_id] + del self.depth_programs[obj_id] + + def render_object(self, obj_id, R, t, fx, fy, cx, cy): + """See base class.""" + + # Define the following variables as global so their latest values are always + # seen in function on_draw below. + global curr_obj_id, mat_model, mat_view, mat_proj + curr_obj_id = obj_id + + # Model matrix (from object space to world space). + mat_model = np.eye(4, dtype=np.float32) + + # View matrix (from world space to eye space; transforms also the coordinate + # system from OpenCV to OpenGL camera space). + mat_view_cv = np.eye(4, dtype=np.float32) + mat_view_cv[:3, :3], mat_view_cv[:3, 3] = R, t.squeeze() + yz_flip = np.eye(4, dtype=np.float32) + yz_flip[1, 1], yz_flip[2, 2] = -1, -1 + mat_view = yz_flip.dot(mat_view_cv) # OpenCV to OpenGL camera system. + mat_view = mat_view.T # OpenGL expects column-wise matrix format. + + # Calculate the near and far clipping plane from the 3D bounding box. + bbox_corners = self.model_bbox_corners[obj_id] + bbox_corners_ht = np.concatenate( + (bbox_corners, np.ones((bbox_corners.shape[0], 1))), axis=1).transpose() + bbox_corners_eye_z = mat_view_cv[2, :].reshape((1, 4)).dot(bbox_corners_ht) + clip_near = bbox_corners_eye_z.min() + clip_far = bbox_corners_eye_z.max() + + # Projection matrix. + K = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]]) + mat_proj = _calc_calib_proj( + K, 0, 0, self.width, self.height, clip_near, clip_far) + + @self.window.event + def on_draw(dt): + self.window.clear() + global curr_obj_id, mat_model, mat_view, mat_proj + + # Render the RGB image. + if self.render_rgb: + self.rgb = self._draw_rgb( + curr_obj_id, mat_model, mat_view, mat_proj) + + # Render the depth image. + if self.render_depth: + self.depth = self._draw_depth( + curr_obj_id, mat_model, mat_view, mat_proj) + + # The on_draw function is called framecount+1 times. + app.run(framecount=0) + + if self.mode == 'rgb': + return {'rgb': self.rgb} + elif self.mode == 'depth': + return {'depth': self.depth} + elif self.mode == 'rgb+depth': + return {'rgb': self.rgb, 'depth': self.depth} + + def _draw_rgb(self, obj_id, mat_model, mat_view, mat_proj): + """Renders an RGB image. + + :param obj_id: ID of the object model to render. + :param mat_model: 4x4 ndarray with the model matrix. + :param mat_view: 4x4 ndarray with the view matrix. + :param mat_proj: 4x4 ndarray with the projection matrix. + :return: HxWx3 ndarray with the rendered RGB image. + """ + # Update the OpenGL program. + program = self.rgb_programs[obj_id] + program['u_light_eye_pos'] = [0, 0, 0] # Camera origin. + program['u_light_ambient_w'] = self.light_ambient_weight + program['u_mv'] = _calc_model_view(mat_model, mat_view) + program['u_nm'] = _calc_normal_matrix(mat_model, mat_view) + program['u_mvp'] = _calc_model_view_proj(mat_model, mat_view, mat_proj) + + # OpenGL setup. + gl.glEnable(gl.GL_DEPTH_TEST) + gl.glClearColor( + self.bg_color[0], self.bg_color[1], self.bg_color[2], self.bg_color[3]) + gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) + gl.glViewport(0, 0, self.width, self.height) + + # Keep the back-face culling disabled because of objects which do not have + # well-defined surface (e.g. the lamp from the lm dataset). + gl.glDisable(gl.GL_CULL_FACE) + + # Rendering. + program.draw(gl.GL_TRIANGLES, self.index_buffers[obj_id]) + + # Get the content of the FBO texture. + rgb = np.zeros((self.height, self.width, 4), dtype=np.float32) + gl.glReadPixels(0, 0, self.width, self.height, gl.GL_RGBA, gl.GL_FLOAT, rgb) + rgb.shape = (self.height, self.width, 4) + rgb = rgb[::-1, :] + rgb = np.round(rgb[:, :, :3] * 255).astype(np.uint8) # Convert to [0, 255]. + + return rgb + + def _draw_depth(self, obj_id, mat_model, mat_view, mat_proj): + """Renders a depth image. + + :param obj_id: ID of the object model to render. + :param mat_model: 4x4 ndarray with the model matrix. + :param mat_view: 4x4 ndarray with the view matrix. + :param mat_proj: 4x4 ndarray with the projection matrix. + :return: HxW ndarray with the rendered depth image. + """ + # Update the OpenGL program. + program = self.depth_programs[obj_id] + program['u_mv'] = _calc_model_view(mat_model, mat_view) + program['u_mvp'] = _calc_model_view_proj(mat_model, mat_view, mat_proj) + + # OpenGL setup. + gl.glEnable(gl.GL_DEPTH_TEST) + gl.glClearColor(0.0, 0.0, 0.0, 0.0) + gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) + gl.glViewport(0, 0, self.width, self.height) + + # Keep the back-face culling disabled because of objects which do not have + # well-defined surface (e.g. the lamp from the lm dataset). + gl.glDisable(gl.GL_CULL_FACE) + + # Rendering. + program.draw(gl.GL_TRIANGLES, self.index_buffers[obj_id]) + + # Get the content of the FBO texture. + depth = np.zeros((self.height, self.width, 4), dtype=np.float32) + gl.glReadPixels( + 0, 0, self.width, self.height, gl.GL_RGBA, gl.GL_FLOAT, depth) + depth.shape = (self.height, self.width, 4) + depth = depth[::-1, :] + depth = depth[:, :, 0] # Depth is saved in the first channel + + return depth diff --git a/bop_toolkit_lib/score.py b/bop_toolkit_lib/score.py new file mode 100644 index 0000000..b9e6f91 --- /dev/null +++ b/bop_toolkit_lib/score.py @@ -0,0 +1,169 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Calculation of performance scores.""" + +import numpy as np +from collections import defaultdict + +from bop_toolkit_lib import misc + + +def ap(rec, pre): + """Calculates Average Precision (AP). + + Calculated in the PASCAL VOC challenge from 2010 onwards [1]: + 1) Compute a version of the measured precision/recall curve with precision + monotonically decreasing, by setting the precision for recall r to the + maximum precision obtained for any recall r' >= r. + 2) Compute the AP as the area under this curve by numerical integration. + No approximation is involved since the curve is piecewise constant. + + NOTE: The used AP formula is different from the one in [2] where the + formula from VLFeat [3] was presented - although it was mistakenly + introduced as a formula used in PASCAL. + + References: + [1] http://host.robots.ox.ac.uk/pascal/VOC/voc2012/htmldoc/devkit_doc.html#SECTION00044000000000000000 + [2] Hodan et al., "On Evaluation of 6D Object Pose Estimation", ECCVW 2016 + [3] http://www.vlfeat.org/matlab/vl_pr.html + + :param rec: A list (or 1D ndarray) of recall rates. + :param pre: A list (or 1D ndarray) of precision rates. + :return: Average Precision - the area under the monotonically decreasing + version of the precision/recall curve given by rec and pre. + """ + # Sorts the precision/recall points by increasing recall. + i = np.argsort(rec) + + mrec = np.concatenate(([0], np.array(rec)[i], [1])) + mpre = np.concatenate(([0], np.array(pre)[i], [0])) + assert (mrec.shape == mpre.shape) + for i in range(mpre.size - 3, -1, -1): + mpre[i] = max(mpre[i], mpre[i + 1]) + i = np.nonzero(mrec[1:] != mrec[:-1])[0] + 1 + ap = np.sum((mrec[i] - mrec[i - 1]) * mpre[i]) + return ap + + +def recall(tp_count, targets_count): + """Calculates recall. + + :param tp_count: Number of true positives. + :param targets_count: Number of targets. + :return: The recall rate. + """ + if targets_count == 0: + return 0.0 + else: + return tp_count / float(targets_count) + + +def calc_localization_scores(scene_ids, obj_ids, matches, n_top, do_print=True): + """Calculates performance scores for the 6D object localization task. + + References: + Hodan et al., BOP: Benchmark for 6D Object Pose Estimation, ECCV'18. + Hodan et al., On Evaluation of 6D Object Pose Estimation, ECCVW'16. + + :param scene_ids: ID's of considered scenes. + :param obj_ids: ID's of considered objects. + :param matches: Info about matching pose estimates to ground-truth poses + (see pose_matching.py for details). + :param n_top: Number of top pose estimates to consider per test target. + :param do_print: Whether to print the scores to the standard output. + :return: Dictionary with the evaluation scores. + """ + # Count the number of visible object instances in each image. + insts = {i: {j: defaultdict(lambda: 0) for j in scene_ids} for i in obj_ids} + for m in matches: + if m['valid']: + insts[m['obj_id']][m['scene_id']][m['im_id']] += 1 + + # Count the number of targets = object instances to be found. + # For SiSo, there is either zero or one target in each image - there is just + # one even if there are more instances of the object of interest. + tars = 0 # Total number of targets. + obj_tars = {i: 0 for i in obj_ids} # Targets per object. + scene_tars = {i: 0 for i in scene_ids} # Targets per scene. + for obj_id, obj_insts in insts.items(): + for scene_id, scene_insts in obj_insts.items(): + + # Count the number of targets for the current object in the current scene. + if n_top > 0: + count = sum(np.minimum(n_top, scene_insts.values())) + else: + count = sum(scene_insts.values()) + + tars += count + obj_tars[obj_id] += count + scene_tars[scene_id] += count + + # Count the number of true positives. + tps = 0 # Total number of true positives. + obj_tps = {i: 0 for i in obj_ids} # True positives per object. + scene_tps = {i: 0 for i in scene_ids} # True positives per scene. + for m in matches: + if m['valid'] and m['est_id'] != -1: + tps += 1 + obj_tps[m['obj_id']] += 1 + scene_tps[m['scene_id']] += 1 + + # Total recall. + total_recall = recall(tps, tars) + + # Recall per object. + obj_recalls = {} + for i in obj_ids: + obj_recalls[i] = recall(obj_tps[i], obj_tars[i]) + mean_obj_recall = float(np.mean(obj_recalls.values()).squeeze()) + + # Recall per scene. + scene_recalls = {} + for i in scene_ids: + scene_recalls[i] = float(recall(scene_tps[i], scene_tars[i])) + mean_scene_recall = float(np.mean(scene_recalls.values()).squeeze()) + + # Final scores. + scores = { + 'total_recall': float(total_recall), + 'obj_recalls': obj_recalls, + 'mean_obj_recall': float(mean_obj_recall), + 'scene_recalls': scene_recalls, + 'mean_scene_recall': float(mean_scene_recall), + 'gt_count': len(matches), + 'targets_count': int(tars), + 'tp_count': int(tps), + } + + if do_print: + obj_recalls_str = ', '.join( + ['{}: {:.3f}'.format(i, s) for i, s in scores['obj_recalls'].items()]) + + scene_recalls_str = ', '.join( + ['{}: {:.3f}'.format(i, s) for i, s in scores['scene_recalls'].items()]) + + misc.log('') + misc.log('GT count: {:d}'.format(scores['gt_count'])) + misc.log('Target count: {:d}'.format(scores['targets_count'])) + misc.log('TP count: {:d}'.format(scores['tp_count'])) + misc.log('Total recall: {:.4f}'.format(scores['total_recall'])) + misc.log('Mean object recall: {:.4f}'.format(scores['mean_obj_recall'])) + misc.log('Mean scene recall: {:.4f}'.format(scores['mean_scene_recall'])) + misc.log('Object recalls:\n{}'.format(obj_recalls_str)) + misc.log('Scene recalls:\n{}'.format(scene_recalls_str)) + misc.log('') + + return scores + + +if __name__ == '__main__': + + # AP test. + tp = np.array([False, True, True, False, True, False]) + fp = np.logical_not(tp) + tp_c = np.cumsum(tp).astype(np.float) + fp_c = np.cumsum(fp).astype(np.float) + rec = tp_c / tp.size + pre = tp_c / (fp_c + tp_c) + misc.log('Average Precision: ' + str(ap(rec, pre))) diff --git a/bop_toolkit_lib/transform.py b/bop_toolkit_lib/transform.py new file mode 100644 index 0000000..baf4d1d --- /dev/null +++ b/bop_toolkit_lib/transform.py @@ -0,0 +1,1917 @@ +# -*- coding: utf-8 -*- +# transform.py + +# Copyright (c) 2006-2015, Christoph Gohlke +# Copyright (c) 2006-2015, The Regents of the University of California +# Produced at the Laboratory for Fluorescence Dynamics +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holders nor the names of any +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +"""Homogeneous Transformation Matrices and Quaternions. + +A library for calculating 4x4 matrices for translating, rotating, reflecting, +scaling, shearing, projecting, orthogonalizing, and superimposing arrays of +3D homogeneous coordinates as well as for converting between rotation matrices, +Euler angles, and quaternions. Also includes an Arcball control object and +functions to decompose transformation matrices. + +:Author: + `Christoph Gohlke `_ + +:Organization: + Laboratory for Fluorescence Dynamics, University of California, Irvine + +:Version: 2015.03.19 + +Requirements +------------ +* `CPython 2.7 or 3.4 `_ +* `Numpy 1.9 `_ +* `Transformations.c 2015.03.19 `_ + (recommended for speedup of some functions) + +Notes +----- +The API is not stable yet and is expected to change between revisions. + +This Python code is not optimized for speed. Refer to the transformations.c +module for a faster implementation of some functions. + +Documentation in HTML format can be generated with epydoc. + +Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using +numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using +numpy.dot(M, v) for shape (4, \*) column vectors, respectively +numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points"). + +This module follows the "column vectors on the right" and "row major storage" +(C contiguous) conventions. The translation components are in the right column +of the transformation matrix, i.e. M[:3, 3]. +The transpose of the transformation matrices may have to be used to interface +with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16]. + +Calculations are carried out with numpy.float64 precision. + +Vector, point, quaternion, and matrix function arguments are expected to be +"array like", i.e. tuple, list, or numpy arrays. + +Return types are numpy arrays unless specified otherwise. + +Angles are in radians unless specified otherwise. + +Quaternions w+ix+jy+kz are represented as [w, x, y, z]. + +A triple of Euler angles can be applied/interpreted in 24 ways, which can +be specified using a 4 character string or encoded 4-tuple: + + *Axes 4-string*: e.g. 'sxyz' or 'ryxy' + + - first character : rotations are applied to 's'tatic or 'r'otating frame + - remaining characters : successive rotation axis 'x', 'y', or 'z' + + *Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1) + + - inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix. + - parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed + by 'z', or 'z' is followed by 'x'. Otherwise odd (1). + - repetition : first and last axis are same (1) or different (0). + - frame : rotations are applied to static (0) or rotating (1) frame. + +Other Python packages and modules for 3D transformations and quaternions: + +* `Transforms3d `_ + includes most code of this module. +* `Blender.mathutils `_ +* `numpy-dtypes `_ + +References +---------- +(1) Matrices and transformations. Ronald Goldman. + In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990. +(2) More matrices and transformations: shear and pseudo-perspective. + Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. +(3) Decomposing a matrix into simple transformations. Spencer Thomas. + In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. +(4) Recovering the data from the transformation matrix. Ronald Goldman. + In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991. +(5) Euler angle conversion. Ken Shoemake. + In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994. +(6) Arcball rotation control. Ken Shoemake. + In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994. +(7) Representing attitude: Euler angles, unit quaternions, and rotation + vectors. James Diebel. 2006. +(8) A discussion of the solution for the best rotation to relate two sets + of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828. +(9) Closed-form solution of absolute orientation using unit quaternions. + BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642. +(10) Quaternions. Ken Shoemake. + http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf +(11) From quaternion to matrix and back. JMP van Waveren. 2005. + http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm +(12) Uniform random rotations. Ken Shoemake. + In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992. +(13) Quaternion in molecular modeling. CFF Karney. + J Mol Graph Mod, 25(5):595-604 +(14) New method for extracting the quaternion from a rotation matrix. + Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087. +(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann. + Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130. +(16) Column Vectors vs. Row Vectors. + http://steve.hollasch.net/cgindex/math/matrix/column-vec.html + +Examples +-------- +>>> alpha, beta, gamma = 0.123, -1.234, 2.345 +>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1] +>>> I = identity_matrix() +>>> Rx = rotation_matrix(alpha, xaxis) +>>> Ry = rotation_matrix(beta, yaxis) +>>> Rz = rotation_matrix(gamma, zaxis) +>>> R = concatenate_matrices(Rx, Ry, Rz) +>>> euler = euler_from_matrix(R, 'rxyz') +>>> numpy.allclose([alpha, beta, gamma], euler) +True +>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz') +>>> is_same_transform(R, Re) +True +>>> al, be, ga = euler_from_matrix(Re, 'rxyz') +>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz')) +True +>>> qx = quaternion_about_axis(alpha, xaxis) +>>> qy = quaternion_about_axis(beta, yaxis) +>>> qz = quaternion_about_axis(gamma, zaxis) +>>> q = quaternion_multiply(qx, qy) +>>> q = quaternion_multiply(q, qz) +>>> Rq = quaternion_matrix(q) +>>> is_same_transform(R, Rq) +True +>>> S = scale_matrix(1.23, origin) +>>> T = translation_matrix([1, 2, 3]) +>>> Z = shear_matrix(beta, xaxis, origin, zaxis) +>>> R = random_rotation_matrix(numpy.random.rand(3)) +>>> M = concatenate_matrices(T, R, Z, S) +>>> scale, shear, angles, trans, persp = decompose_matrix(M) +>>> numpy.allclose(scale, 1.23) +True +>>> numpy.allclose(trans, [1, 2, 3]) +True +>>> numpy.allclose(shear, [0, math.tan(beta), 0]) +True +>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles)) +True +>>> M1 = compose_matrix(scale, shear, angles, trans, persp) +>>> is_same_transform(M, M1) +True +>>> v0, v1 = random_vector(3), random_vector(3) +>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1)) +>>> v2 = numpy.dot(v0, M[:3,:3].T) +>>> numpy.allclose(unit_vector(v1), unit_vector(v2)) +True + +""" + +from __future__ import division, print_function + +import math + +import numpy + +__version__ = '2015.03.19' +__docformat__ = 'restructuredtext en' +__all__ = () + + +def identity_matrix(): + """Return 4x4 identity/unit matrix. + + >>> I = identity_matrix() + >>> numpy.allclose(I, numpy.dot(I, I)) + True + >>> numpy.sum(I), numpy.trace(I) + (4.0, 4.0) + >>> numpy.allclose(I, numpy.identity(4)) + True + + """ + return numpy.identity(4) + + +def translation_matrix(direction): + """Return matrix to translate by direction vector. + + >>> v = numpy.random.random(3) - 0.5 + >>> numpy.allclose(v, translation_matrix(v)[:3, 3]) + True + + """ + M = numpy.identity(4) + M[:3, 3] = direction[:3] + return M + + +def translation_from_matrix(matrix): + """Return translation vector from translation matrix. + + >>> v0 = numpy.random.random(3) - 0.5 + >>> v1 = translation_from_matrix(translation_matrix(v0)) + >>> numpy.allclose(v0, v1) + True + + """ + return numpy.array(matrix, copy=False)[:3, 3].copy() + + +def reflection_matrix(point, normal): + """Return matrix to mirror at plane defined by point and normal vector. + + >>> v0 = numpy.random.random(4) - 0.5 + >>> v0[3] = 1. + >>> v1 = numpy.random.random(3) - 0.5 + >>> R = reflection_matrix(v0, v1) + >>> numpy.allclose(2, numpy.trace(R)) + True + >>> numpy.allclose(v0, numpy.dot(R, v0)) + True + >>> v2 = v0.copy() + >>> v2[:3] += v1 + >>> v3 = v0.copy() + >>> v2[:3] -= v1 + >>> numpy.allclose(v2, numpy.dot(R, v3)) + True + + """ + normal = unit_vector(normal[:3]) + M = numpy.identity(4) + M[:3, :3] -= 2.0 * numpy.outer(normal, normal) + M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal + return M + + +def reflection_from_matrix(matrix): + """Return mirror plane point and normal vector from reflection matrix. + + >>> v0 = numpy.random.random(3) - 0.5 + >>> v1 = numpy.random.random(3) - 0.5 + >>> M0 = reflection_matrix(v0, v1) + >>> point, normal = reflection_from_matrix(M0) + >>> M1 = reflection_matrix(point, normal) + >>> is_same_transform(M0, M1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + # normal: unit eigenvector corresponding to eigenvalue -1 + w, V = numpy.linalg.eig(M[:3, :3]) + i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue -1") + normal = numpy.real(V[:, i[0]]).squeeze() + # point: any unit eigenvector corresponding to eigenvalue 1 + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue 1") + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + return point, normal + + +def rotation_matrix(angle, direction, point=None): + """Return matrix to rotate about axis defined by point and direction. + + >>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0]) + >>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1]) + True + >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> direc = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) + >>> is_same_transform(R0, R1) + True + >>> R0 = rotation_matrix(angle, direc, point) + >>> R1 = rotation_matrix(-angle, -direc, point) + >>> is_same_transform(R0, R1) + True + >>> I = numpy.identity(4, numpy.float64) + >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc)) + True + >>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2, + ... direc, point))) + True + + """ + sina = math.sin(angle) + cosa = math.cos(angle) + direction = unit_vector(direction[:3]) + # rotation matrix around unit vector + R = numpy.diag([cosa, cosa, cosa]) + R += numpy.outer(direction, direction) * (1.0 - cosa) + direction *= sina + R += numpy.array([[0.0, -direction[2], direction[1]], + [direction[2], 0.0, -direction[0]], + [-direction[1], direction[0], 0.0]]) + M = numpy.identity(4) + M[:3, :3] = R + if point is not None: + # rotation not around origin + point = numpy.array(point[:3], dtype=numpy.float64, copy=False) + M[:3, 3] = point - numpy.dot(R, point) + return M + + +def rotation_from_matrix(matrix): + """Return rotation angle and axis from rotation matrix. + + >>> angle = (random.random() - 0.5) * (2*math.pi) + >>> direc = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> R0 = rotation_matrix(angle, direc, point) + >>> angle, direc, point = rotation_from_matrix(R0) + >>> R1 = rotation_matrix(angle, direc, point) + >>> is_same_transform(R0, R1) + True + + """ + R = numpy.array(matrix, dtype=numpy.float64, copy=False) + R33 = R[:3, :3] + # direction: unit eigenvector of R33 corresponding to eigenvalue of 1 + w, W = numpy.linalg.eig(R33.T) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue 1") + direction = numpy.real(W[:, i[-1]]).squeeze() + # point: unit eigenvector of R33 corresponding to eigenvalue of 1 + w, Q = numpy.linalg.eig(R) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no unit eigenvector corresponding to eigenvalue 1") + point = numpy.real(Q[:, i[-1]]).squeeze() + point /= point[3] + # rotation angle depending on direction + cosa = (numpy.trace(R33) - 1.0) / 2.0 + if abs(direction[2]) > 1e-8: + sina = (R[1, 0] + (cosa - 1.0) * direction[0] * direction[1]) / direction[2] + elif abs(direction[1]) > 1e-8: + sina = (R[0, 2] + (cosa - 1.0) * direction[0] * direction[2]) / direction[1] + else: + sina = (R[2, 1] + (cosa - 1.0) * direction[1] * direction[2]) / direction[0] + angle = math.atan2(sina, cosa) + return angle, direction, point + + +def scale_matrix(factor, origin=None, direction=None): + """Return matrix to scale by factor around origin in direction. + + Use factor -1 for point symmetry. + + >>> v = (numpy.random.rand(4, 5) - 0.5) * 20 + >>> v[3] = 1 + >>> S = scale_matrix(-1.234) + >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3]) + True + >>> factor = random.random() * 10 - 5 + >>> origin = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> S = scale_matrix(factor, origin) + >>> S = scale_matrix(factor, origin, direct) + + """ + if direction is None: + # uniform scaling + M = numpy.diag([factor, factor, factor, 1.0]) + if origin is not None: + M[:3, 3] = origin[:3] + M[:3, 3] *= 1.0 - factor + else: + # nonuniform scaling + direction = unit_vector(direction[:3]) + factor = 1.0 - factor + M = numpy.identity(4) + M[:3, :3] -= factor * numpy.outer(direction, direction) + if origin is not None: + M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction + return M + + +def scale_from_matrix(matrix): + """Return scaling factor, origin and direction from scaling matrix. + + >>> factor = random.random() * 10 - 5 + >>> origin = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> S0 = scale_matrix(factor, origin) + >>> factor, origin, direction = scale_from_matrix(S0) + >>> S1 = scale_matrix(factor, origin, direction) + >>> is_same_transform(S0, S1) + True + >>> S0 = scale_matrix(factor, origin, direct) + >>> factor, origin, direction = scale_from_matrix(S0) + >>> S1 = scale_matrix(factor, origin, direction) + >>> is_same_transform(S0, S1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + factor = numpy.trace(M33) - 2.0 + try: + # direction: unit eigenvector corresponding to eigenvalue factor + w, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0] + direction = numpy.real(V[:, i]).squeeze() + direction /= vector_norm(direction) + except IndexError: + # uniform scaling + factor = (factor + 2.0) / 3.0 + direction = None + # origin: any eigenvector corresponding to eigenvalue 1 + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no eigenvector corresponding to eigenvalue 1") + origin = numpy.real(V[:, i[-1]]).squeeze() + origin /= origin[3] + return factor, origin, direction + + +def projection_matrix(point, normal, direction=None, + perspective=None, pseudo=False): + """Return matrix to project onto plane defined by point and normal. + + Using either perspective point, projection direction, or none of both. + + If pseudo is True, perspective projections will preserve relative depth + such that Perspective = dot(Orthogonal, PseudoPerspective). + + >>> P = projection_matrix([0, 0, 0], [1, 0, 0]) + >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:]) + True + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(3) - 0.5 + >>> P0 = projection_matrix(point, normal) + >>> P1 = projection_matrix(point, normal, direction=direct) + >>> P2 = projection_matrix(point, normal, perspective=persp) + >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) + >>> is_same_transform(P2, numpy.dot(P0, P3)) + True + >>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0]) + >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20 + >>> v0[3] = 1 + >>> v1 = numpy.dot(P, v0) + >>> numpy.allclose(v1[1], v0[1]) + True + >>> numpy.allclose(v1[0], 3-v1[1]) + True + + """ + M = numpy.identity(4) + point = numpy.array(point[:3], dtype=numpy.float64, copy=False) + normal = unit_vector(normal[:3]) + if perspective is not None: + # perspective projection + perspective = numpy.array(perspective[:3], dtype=numpy.float64, + copy=False) + M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective - point, normal) + M[:3, :3] -= numpy.outer(perspective, normal) + if pseudo: + # preserve relative depth + M[:3, :3] -= numpy.outer(normal, normal) + M[:3, 3] = numpy.dot(point, normal) * (perspective + normal) + else: + M[:3, 3] = numpy.dot(point, normal) * perspective + M[3, :3] = -normal + M[3, 3] = numpy.dot(perspective, normal) + elif direction is not None: + # parallel projection + direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False) + scale = numpy.dot(direction, normal) + M[:3, :3] -= numpy.outer(direction, normal) / scale + M[:3, 3] = direction * (numpy.dot(point, normal) / scale) + else: + # orthogonal projection + M[:3, :3] -= numpy.outer(normal, normal) + M[:3, 3] = numpy.dot(point, normal) * normal + return M + + +def projection_from_matrix(matrix, pseudo=False): + """Return projection plane and perspective point from projection matrix. + + Return values are same as arguments for projection_matrix function: + point, normal, direction, perspective, and pseudo. + + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.random.random(3) - 0.5 + >>> direct = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(3) - 0.5 + >>> P0 = projection_matrix(point, normal) + >>> result = projection_from_matrix(P0) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, direct) + >>> result = projection_from_matrix(P0) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) + >>> result = projection_from_matrix(P0, pseudo=False) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) + >>> result = projection_from_matrix(P0, pseudo=True) + >>> P1 = projection_matrix(*result) + >>> is_same_transform(P0, P1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not pseudo and len(i): + # point: any eigenvector corresponding to eigenvalue 1 + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + # direction: unit eigenvector corresponding to eigenvalue 0 + w, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(w)) < 1e-8)[0] + if not len(i): + raise ValueError("no eigenvector corresponding to eigenvalue 0") + direction = numpy.real(V[:, i[0]]).squeeze() + direction /= vector_norm(direction) + # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 + w, V = numpy.linalg.eig(M33.T) + i = numpy.where(abs(numpy.real(w)) < 1e-8)[0] + if len(i): + # parallel projection + normal = numpy.real(V[:, i[0]]).squeeze() + normal /= vector_norm(normal) + return point, normal, direction, None, False + else: + # orthogonal projection, where normal equals direction vector + return point, direction, None, None, False + else: + # perspective projection + i = numpy.where(abs(numpy.real(w)) > 1e-8)[0] + if not len(i): + raise ValueError( + "no eigenvector not corresponding to eigenvalue 0") + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + normal = - M[3, :3] + perspective = M[:3, 3] / numpy.dot(point[:3], normal) + if pseudo: + perspective -= normal + return point, normal, None, perspective, pseudo + + +def clip_matrix(left, right, bottom, top, near, far, perspective=False): + """Return matrix to obtain normalized device coordinates from frustum. + + The frustum bounds are axis-aligned along x (left, right), + y (bottom, top) and z (near, far). + + Normalized device coordinates are in range [-1, 1] if coordinates are + inside the frustum. + + If perspective is True the frustum is a truncated pyramid with the + perspective point at origin and direction along z axis, otherwise an + orthographic canonical view volume (a box). + + Homogeneous coordinates transformed by the perspective clip matrix + need to be dehomogenized (divided by w coordinate). + + >>> frustum = numpy.random.rand(6) + >>> frustum[1] += frustum[0] + >>> frustum[3] += frustum[2] + >>> frustum[5] += frustum[4] + >>> M = clip_matrix(perspective=False, *frustum) + >>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1]) + array([-1., -1., -1., 1.]) + >>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1]) + array([ 1., 1., 1., 1.]) + >>> M = clip_matrix(perspective=True, *frustum) + >>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1]) + >>> v / v[3] + array([-1., -1., -1., 1.]) + >>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1]) + >>> v / v[3] + array([ 1., 1., -1., 1.]) + + """ + if left >= right or bottom >= top or near >= far: + raise ValueError("invalid frustum") + if perspective: + if near <= _EPS: + raise ValueError("invalid frustum: near <= 0") + t = 2.0 * near + M = [[t / (left - right), 0.0, (right + left) / (right - left), 0.0], + [0.0, t / (bottom - top), (top + bottom) / (top - bottom), 0.0], + [0.0, 0.0, (far + near) / (near - far), t * far / (far - near)], + [0.0, 0.0, -1.0, 0.0]] + else: + M = [[2.0 / (right - left), 0.0, 0.0, (right + left) / (left - right)], + [0.0, 2.0 / (top - bottom), 0.0, (top + bottom) / (bottom - top)], + [0.0, 0.0, 2.0 / (far - near), (far + near) / (near - far)], + [0.0, 0.0, 0.0, 1.0]] + return numpy.array(M) + + +def shear_matrix(angle, direction, point, normal): + """Return matrix to shear by angle along direction vector on shear plane. + + The shear plane is defined by a point and normal vector. The direction + vector must be orthogonal to the plane's normal vector. + + A point P is transformed by the shear matrix into P" such that + the vector P-P" is parallel to the direction vector and its extent is + given by the angle of P-P'-P", where P' is the orthogonal projection + of P onto the shear plane. + + >>> angle = (random.random() - 0.5) * 4*math.pi + >>> direct = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.cross(direct, numpy.random.random(3)) + >>> S = shear_matrix(angle, direct, point, normal) + >>> numpy.allclose(1, numpy.linalg.det(S)) + True + + """ + normal = unit_vector(normal[:3]) + direction = unit_vector(direction[:3]) + if abs(numpy.dot(normal, direction)) > 1e-6: + raise ValueError("direction and normal vectors are not orthogonal") + angle = math.tan(angle) + M = numpy.identity(4) + M[:3, :3] += angle * numpy.outer(direction, normal) + M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction + return M + + +def shear_from_matrix(matrix): + """Return shear angle, direction and plane from shear matrix. + + >>> angle = (random.random() - 0.5) * 4*math.pi + >>> direct = numpy.random.random(3) - 0.5 + >>> point = numpy.random.random(3) - 0.5 + >>> normal = numpy.cross(direct, numpy.random.random(3)) + >>> S0 = shear_matrix(angle, direct, point, normal) + >>> angle, direct, point, normal = shear_from_matrix(S0) + >>> S1 = shear_matrix(angle, direct, point, normal) + >>> is_same_transform(S0, S1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False) + M33 = M[:3, :3] + # normal: cross independent eigenvectors corresponding to the eigenvalue 1 + w, V = numpy.linalg.eig(M33) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0] + if len(i) < 2: + raise ValueError("no two linear independent eigenvectors found %s" % w) + V = numpy.real(V[:, i]).squeeze().T + lenorm = -1.0 + for i0, i1 in ((0, 1), (0, 2), (1, 2)): + n = numpy.cross(V[i0], V[i1]) + w = vector_norm(n) + if w > lenorm: + lenorm = w + normal = n + normal /= lenorm + # direction and angle + direction = numpy.dot(M33 - numpy.identity(3), normal) + angle = vector_norm(direction) + direction /= angle + angle = math.atan(angle) + # point: eigenvector corresponding to eigenvalue 1 + w, V = numpy.linalg.eig(M) + i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0] + if not len(i): + raise ValueError("no eigenvector corresponding to eigenvalue 1") + point = numpy.real(V[:, i[-1]]).squeeze() + point /= point[3] + return angle, direction, point, normal + + +def decompose_matrix(matrix): + """Return sequence of transformations from transformation matrix. + + matrix : array_like + Non-degenerative homogeneous transformation matrix + + Return tuple of: + scale : vector of 3 scaling factors + shear : list of shear factors for x-y, x-z, y-z axes + angles : list of Euler angles about static x, y, z axes + translate : translation vector along x, y, z axes + perspective : perspective partition of matrix + + Raise ValueError if matrix is of wrong type or degenerative. + + >>> T0 = translation_matrix([1, 2, 3]) + >>> scale, shear, angles, trans, persp = decompose_matrix(T0) + >>> T1 = translation_matrix(trans) + >>> numpy.allclose(T0, T1) + True + >>> S = scale_matrix(0.123) + >>> scale, shear, angles, trans, persp = decompose_matrix(S) + >>> scale[0] + 0.123 + >>> R0 = euler_matrix(1, 2, 3) + >>> scale, shear, angles, trans, persp = decompose_matrix(R0) + >>> R1 = euler_matrix(*angles) + >>> numpy.allclose(R0, R1) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=True).T + if abs(M[3, 3]) < _EPS: + raise ValueError("M[3, 3] is zero") + M /= M[3, 3] + P = M.copy() + P[:, 3] = 0.0, 0.0, 0.0, 1.0 + if not numpy.linalg.det(P): + raise ValueError("matrix is singular") + + scale = numpy.zeros((3,)) + shear = [0.0, 0.0, 0.0] + angles = [0.0, 0.0, 0.0] + + if any(abs(M[:3, 3]) > _EPS): + perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T)) + M[:, 3] = 0.0, 0.0, 0.0, 1.0 + else: + perspective = numpy.array([0.0, 0.0, 0.0, 1.0]) + + translate = M[3, :3].copy() + M[3, :3] = 0.0 + + row = M[:3, :3].copy() + scale[0] = vector_norm(row[0]) + row[0] /= scale[0] + shear[0] = numpy.dot(row[0], row[1]) + row[1] -= row[0] * shear[0] + scale[1] = vector_norm(row[1]) + row[1] /= scale[1] + shear[0] /= scale[1] + shear[1] = numpy.dot(row[0], row[2]) + row[2] -= row[0] * shear[1] + shear[2] = numpy.dot(row[1], row[2]) + row[2] -= row[1] * shear[2] + scale[2] = vector_norm(row[2]) + row[2] /= scale[2] + shear[1:] /= scale[2] + + if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0: + numpy.negative(scale, scale) + numpy.negative(row, row) + + angles[1] = math.asin(-row[0, 2]) + if math.cos(angles[1]): + angles[0] = math.atan2(row[1, 2], row[2, 2]) + angles[2] = math.atan2(row[0, 1], row[0, 0]) + else: + # angles[0] = math.atan2(row[1, 0], row[1, 1]) + angles[0] = math.atan2(-row[2, 1], row[1, 1]) + angles[2] = 0.0 + + return scale, shear, angles, translate, perspective + + +def compose_matrix(scale=None, shear=None, angles=None, translate=None, + perspective=None): + """Return transformation matrix from sequence of transformations. + + This is the inverse of the decompose_matrix function. + + Sequence of transformations: + scale : vector of 3 scaling factors + shear : list of shear factors for x-y, x-z, y-z axes + angles : list of Euler angles about static x, y, z axes + translate : translation vector along x, y, z axes + perspective : perspective partition of matrix + + >>> scale = numpy.random.random(3) - 0.5 + >>> shear = numpy.random.random(3) - 0.5 + >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi) + >>> trans = numpy.random.random(3) - 0.5 + >>> persp = numpy.random.random(4) - 0.5 + >>> M0 = compose_matrix(scale, shear, angles, trans, persp) + >>> result = decompose_matrix(M0) + >>> M1 = compose_matrix(*result) + >>> is_same_transform(M0, M1) + True + + """ + M = numpy.identity(4) + if perspective is not None: + P = numpy.identity(4) + P[3, :] = perspective[:4] + M = numpy.dot(M, P) + if translate is not None: + T = numpy.identity(4) + T[:3, 3] = translate[:3] + M = numpy.dot(M, T) + if angles is not None: + R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') + M = numpy.dot(M, R) + if shear is not None: + Z = numpy.identity(4) + Z[1, 2] = shear[2] + Z[0, 2] = shear[1] + Z[0, 1] = shear[0] + M = numpy.dot(M, Z) + if scale is not None: + S = numpy.identity(4) + S[0, 0] = scale[0] + S[1, 1] = scale[1] + S[2, 2] = scale[2] + M = numpy.dot(M, S) + M /= M[3, 3] + return M + + +def orthogonalization_matrix(lengths, angles): + """Return orthogonalization matrix for crystallographic cell coordinates. + + Angles are expected in degrees. + + The de-orthogonalization matrix is the inverse. + + >>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90]) + >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10) + True + >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7]) + >>> numpy.allclose(numpy.sum(O), 43.063229) + True + + """ + a, b, c = lengths + angles = numpy.radians(angles) + sina, sinb, _ = numpy.sin(angles) + cosa, cosb, cosg = numpy.cos(angles) + co = (cosa * cosb - cosg) / (sina * sinb) + return numpy.array([ + [a * sinb * math.sqrt(1.0 - co * co), 0.0, 0.0, 0.0], + [-a * sinb * co, b * sina, 0.0, 0.0], + [a * cosb, b * cosa, c, 0.0], + [0.0, 0.0, 0.0, 1.0]]) + + +def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True): + """Return affine transform matrix to register two point sets. + + v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous + coordinates, where ndims is the dimensionality of the coordinate space. + + If shear is False, a similarity transformation matrix is returned. + If also scale is False, a rigid/Euclidean transformation matrix + is returned. + + By default the algorithm by Hartley and Zissermann [15] is used. + If usesvd is True, similarity and Euclidean transformation matrices + are calculated by minimizing the weighted sum of squared deviations + (RMSD) according to the algorithm by Kabsch [8]. + Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9] + is used, which is slower when using this Python implementation. + + The returned matrix performs rotation, translation and uniform scaling + (if specified). + + >>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]] + >>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]] + >>> affine_matrix_from_points(v0, v1) + array([[ 0.14549, 0.00062, 675.50008], + [ 0.00048, 0.14094, 53.24971], + [ 0. , 0. , 1. ]]) + >>> T = translation_matrix(numpy.random.random(3)-0.5) + >>> R = random_rotation_matrix(numpy.random.random(3)) + >>> S = scale_matrix(random.random()) + >>> M = concatenate_matrices(T, R, S) + >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20 + >>> v0[3] = 1 + >>> v1 = numpy.dot(M, v0) + >>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1) + >>> M = affine_matrix_from_points(v0[:3], v1[:3]) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + + More examples in superimposition_matrix() + + """ + v0 = numpy.array(v0, dtype=numpy.float64, copy=True) + v1 = numpy.array(v1, dtype=numpy.float64, copy=True) + + ndims = v0.shape[0] + if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape: + raise ValueError("input arrays are of wrong shape or type") + + # move centroids to origin + t0 = -numpy.mean(v0, axis=1) + M0 = numpy.identity(ndims + 1) + M0[:ndims, ndims] = t0 + v0 += t0.reshape(ndims, 1) + t1 = -numpy.mean(v1, axis=1) + M1 = numpy.identity(ndims + 1) + M1[:ndims, ndims] = t1 + v1 += t1.reshape(ndims, 1) + + if shear: + # Affine transformation + A = numpy.concatenate((v0, v1), axis=0) + u, s, vh = numpy.linalg.svd(A.T) + vh = vh[:ndims].T + B = vh[:ndims] + C = vh[ndims:2 * ndims] + t = numpy.dot(C, numpy.linalg.pinv(B)) + t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1) + M = numpy.vstack((t, ((0.0,) * ndims) + (1.0,))) + elif usesvd or ndims != 3: + # Rigid transformation via SVD of covariance matrix + u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T)) + # rotation matrix from SVD orthonormal bases + R = numpy.dot(u, vh) + if numpy.linalg.det(R) < 0.0: + # R does not constitute right handed system + R -= numpy.outer(u[:, ndims - 1], vh[ndims - 1, :] * 2.0) + s[-1] *= -1.0 + # homogeneous transformation matrix + M = numpy.identity(ndims + 1) + M[:ndims, :ndims] = R + else: + # Rigid transformation matrix via quaternion + # compute symmetric matrix N + xx, yy, zz = numpy.sum(v0 * v1, axis=1) + xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1) + xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1) + N = [[xx + yy + zz, 0.0, 0.0, 0.0], + [yz - zy, xx - yy - zz, 0.0, 0.0], + [zx - xz, xy + yx, yy - xx - zz, 0.0], + [xy - yx, zx + xz, yz + zy, zz - xx - yy]] + # quaternion: eigenvector corresponding to most positive eigenvalue + w, V = numpy.linalg.eigh(N) + q = V[:, numpy.argmax(w)] + q /= vector_norm(q) # unit quaternion + # homogeneous transformation matrix + M = quaternion_matrix(q) + + if scale and not shear: + # Affine transformation; scale is ratio of RMS deviations from centroid + v0 *= v0 + v1 *= v1 + M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0)) + + # move centroids back + M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0)) + M /= M[ndims, ndims] + return M + + +def superimposition_matrix(v0, v1, scale=False, usesvd=True): + """Return matrix to transform given 3D point set into second point set. + + v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points. + + The parameters scale and usesvd are explained in the more general + affine_matrix_from_points function. + + The returned matrix is a similarity or Euclidean transformation matrix. + This function has a fast C implementation in transformations.c. + + >>> v0 = numpy.random.rand(3, 10) + >>> M = superimposition_matrix(v0, v0) + >>> numpy.allclose(M, numpy.identity(4)) + True + >>> R = random_rotation_matrix(numpy.random.random(3)) + >>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]] + >>> v1 = numpy.dot(R, v0) + >>> M = superimposition_matrix(v0, v1) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20 + >>> v0[3] = 1 + >>> v1 = numpy.dot(R, v0) + >>> M = superimposition_matrix(v0, v1) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> S = scale_matrix(random.random()) + >>> T = translation_matrix(numpy.random.random(3)-0.5) + >>> M = concatenate_matrices(T, R, S) + >>> v1 = numpy.dot(M, v0) + >>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1) + >>> M = superimposition_matrix(v0, v1, scale=True) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) + >>> numpy.allclose(v1, numpy.dot(M, v0)) + True + >>> v = numpy.empty((4, 100, 3)) + >>> v[:, :, 0] = v0 + >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False) + >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0])) + True + + """ + v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3] + v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3] + return affine_matrix_from_points(v0, v1, shear=False, + scale=scale, usesvd=usesvd) + + +def euler_matrix(ai, aj, ak, axes='sxyz'): + """Return homogeneous rotation matrix from Euler angles and axis sequence. + + ai, aj, ak : Euler's roll, pitch and yaw angles + axes : One of 24 axis sequences as string or encoded tuple + + >>> R = euler_matrix(1, 2, 3, 'syxz') + >>> numpy.allclose(numpy.sum(R[0]), -1.34786452) + True + >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1)) + >>> numpy.allclose(numpy.sum(R[0]), -0.383436184) + True + >>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5) + >>> for axes in _AXES2TUPLE.keys(): + ... R = euler_matrix(ai, aj, ak, axes) + >>> for axes in _TUPLE2AXES.keys(): + ... R = euler_matrix(ai, aj, ak, axes) + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes] + except (AttributeError, KeyError): + _TUPLE2AXES[axes] # validation + firstaxis, parity, repetition, frame = axes + + i = firstaxis + j = _NEXT_AXIS[i + parity] + k = _NEXT_AXIS[i - parity + 1] + + if frame: + ai, ak = ak, ai + if parity: + ai, aj, ak = -ai, -aj, -ak + + si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak) + ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak) + cc, cs = ci * ck, ci * sk + sc, ss = si * ck, si * sk + + M = numpy.identity(4) + if repetition: + M[i, i] = cj + M[i, j] = sj * si + M[i, k] = sj * ci + M[j, i] = sj * sk + M[j, j] = -cj * ss + cc + M[j, k] = -cj * cs - sc + M[k, i] = -sj * ck + M[k, j] = cj * sc + cs + M[k, k] = cj * cc - ss + else: + M[i, i] = cj * ck + M[i, j] = sj * sc - cs + M[i, k] = sj * cc + ss + M[j, i] = cj * sk + M[j, j] = sj * ss + cc + M[j, k] = sj * cs - sc + M[k, i] = -sj + M[k, j] = cj * si + M[k, k] = cj * ci + return M + + +def euler_from_matrix(matrix, axes='sxyz'): + """Return Euler angles from rotation matrix for specified axis sequence. + + axes : One of 24 axis sequences as string or encoded tuple + + Note that many Euler angle triplets can describe one matrix. + + >>> R0 = euler_matrix(1, 2, 3, 'syxz') + >>> al, be, ga = euler_from_matrix(R0, 'syxz') + >>> R1 = euler_matrix(al, be, ga, 'syxz') + >>> numpy.allclose(R0, R1) + True + >>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5) + >>> for axes in _AXES2TUPLE.keys(): + ... R0 = euler_matrix(axes=axes, *angles) + ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) + ... if not numpy.allclose(R0, R1): print(axes, "failed") + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] + except (AttributeError, KeyError): + _TUPLE2AXES[axes] # validation + firstaxis, parity, repetition, frame = axes + + i = firstaxis + j = _NEXT_AXIS[i + parity] + k = _NEXT_AXIS[i - parity + 1] + + M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3] + if repetition: + sy = math.sqrt(M[i, j] * M[i, j] + M[i, k] * M[i, k]) + if sy > _EPS: + ax = math.atan2(M[i, j], M[i, k]) + ay = math.atan2(sy, M[i, i]) + az = math.atan2(M[j, i], -M[k, i]) + else: + ax = math.atan2(-M[j, k], M[j, j]) + ay = math.atan2(sy, M[i, i]) + az = 0.0 + else: + cy = math.sqrt(M[i, i] * M[i, i] + M[j, i] * M[j, i]) + if cy > _EPS: + ax = math.atan2(M[k, j], M[k, k]) + ay = math.atan2(-M[k, i], cy) + az = math.atan2(M[j, i], M[i, i]) + else: + ax = math.atan2(-M[j, k], M[j, j]) + ay = math.atan2(-M[k, i], cy) + az = 0.0 + + if parity: + ax, ay, az = -ax, -ay, -az + if frame: + ax, az = az, ax + return ax, ay, az + + +def euler_from_quaternion(quaternion, axes='sxyz'): + """Return Euler angles from quaternion for specified axis sequence. + + >>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0]) + >>> numpy.allclose(angles, [0.123, 0, 0]) + True + + """ + return euler_from_matrix(quaternion_matrix(quaternion), axes) + + +def quaternion_from_euler(ai, aj, ak, axes='sxyz'): + """Return quaternion from Euler angles and axis sequence. + + ai, aj, ak : Euler's roll, pitch and yaw angles + axes : One of 24 axis sequences as string or encoded tuple + + >>> q = quaternion_from_euler(1, 2, 3, 'ryxz') + >>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435]) + True + + """ + try: + firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] + except (AttributeError, KeyError): + _TUPLE2AXES[axes] # validation + firstaxis, parity, repetition, frame = axes + + i = firstaxis + 1 + j = _NEXT_AXIS[i + parity - 1] + 1 + k = _NEXT_AXIS[i - parity] + 1 + + if frame: + ai, ak = ak, ai + if parity: + aj = -aj + + ai /= 2.0 + aj /= 2.0 + ak /= 2.0 + ci = math.cos(ai) + si = math.sin(ai) + cj = math.cos(aj) + sj = math.sin(aj) + ck = math.cos(ak) + sk = math.sin(ak) + cc = ci * ck + cs = ci * sk + sc = si * ck + ss = si * sk + + q = numpy.empty((4,)) + if repetition: + q[0] = cj * (cc - ss) + q[i] = cj * (cs + sc) + q[j] = sj * (cc + ss) + q[k] = sj * (cs - sc) + else: + q[0] = cj * cc + sj * ss + q[i] = cj * sc - sj * cs + q[j] = cj * ss + sj * cc + q[k] = cj * cs - sj * sc + if parity: + q[j] *= -1.0 + + return q + + +def quaternion_about_axis(angle, axis): + """Return quaternion for rotation about axis. + + >>> q = quaternion_about_axis(0.123, [1, 0, 0]) + >>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0]) + True + + """ + q = numpy.array([0.0, axis[0], axis[1], axis[2]]) + qlen = vector_norm(q) + if qlen > _EPS: + q *= math.sin(angle / 2.0) / qlen + q[0] = math.cos(angle / 2.0) + return q + + +def quaternion_matrix(quaternion): + """Return homogeneous rotation matrix from quaternion. + + >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0]) + >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0])) + True + >>> M = quaternion_matrix([1, 0, 0, 0]) + >>> numpy.allclose(M, numpy.identity(4)) + True + >>> M = quaternion_matrix([0, 1, 0, 0]) + >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1])) + True + + """ + q = numpy.array(quaternion, dtype=numpy.float64, copy=True) + n = numpy.dot(q, q) + if n < _EPS: + return numpy.identity(4) + q *= math.sqrt(2.0 / n) + q = numpy.outer(q, q) + return numpy.array([ + [1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], 0.0], + [q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], 0.0], + [q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2], 0.0], + [0.0, 0.0, 0.0, 1.0]]) + + +def quaternion_from_matrix(matrix, isprecise=False): + """Return quaternion from rotation matrix. + + If isprecise is True, the input matrix is assumed to be a precise rotation + matrix and a faster algorithm is used. + + >>> q = quaternion_from_matrix(numpy.identity(4), True) + >>> numpy.allclose(q, [1, 0, 0, 0]) + True + >>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1])) + >>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0]) + True + >>> R = rotation_matrix(0.123, (1, 2, 3)) + >>> q = quaternion_from_matrix(R, True) + >>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786]) + True + >>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0], + ... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]] + >>> q = quaternion_from_matrix(R) + >>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611]) + True + >>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0], + ... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]] + >>> q = quaternion_from_matrix(R) + >>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603]) + True + >>> R = random_rotation_matrix() + >>> q = quaternion_from_matrix(R) + >>> is_same_transform(R, quaternion_matrix(q)) + True + + """ + M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4] + if isprecise: + q = numpy.empty((4,)) + t = numpy.trace(M) + if t > M[3, 3]: + q[0] = t + q[3] = M[1, 0] - M[0, 1] + q[2] = M[0, 2] - M[2, 0] + q[1] = M[2, 1] - M[1, 2] + else: + i, j, k = 1, 2, 3 + if M[1, 1] > M[0, 0]: + i, j, k = 2, 3, 1 + if M[2, 2] > M[i, i]: + i, j, k = 3, 1, 2 + t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] + q[i] = t + q[j] = M[i, j] + M[j, i] + q[k] = M[k, i] + M[i, k] + q[3] = M[k, j] - M[j, k] + q *= 0.5 / math.sqrt(t * M[3, 3]) + else: + m00 = M[0, 0] + m01 = M[0, 1] + m02 = M[0, 2] + m10 = M[1, 0] + m11 = M[1, 1] + m12 = M[1, 2] + m20 = M[2, 0] + m21 = M[2, 1] + m22 = M[2, 2] + # symmetric matrix K + K = numpy.array([[m00 - m11 - m22, 0.0, 0.0, 0.0], + [m01 + m10, m11 - m00 - m22, 0.0, 0.0], + [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0], + [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22]]) + K /= 3.0 + # quaternion is eigenvector of K that corresponds to largest eigenvalue + w, V = numpy.linalg.eigh(K) + q = V[[3, 0, 1, 2], numpy.argmax(w)] + if q[0] < 0.0: + numpy.negative(q, q) + return q + + +def quaternion_multiply(quaternion1, quaternion0): + """Return multiplication of two quaternions. + + >>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7]) + >>> numpy.allclose(q, [28, -44, -14, 48]) + True + + """ + w0, x0, y0, z0 = quaternion0 + w1, x1, y1, z1 = quaternion1 + return numpy.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, + x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0, + -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, + x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=numpy.float64) + + +def quaternion_conjugate(quaternion): + """Return conjugate of quaternion. + + >>> q0 = random_quaternion() + >>> q1 = quaternion_conjugate(q0) + >>> q1[0] == q0[0] and all(q1[1:] == -q0[1:]) + True + + """ + q = numpy.array(quaternion, dtype=numpy.float64, copy=True) + numpy.negative(q[1:], q[1:]) + return q + + +def quaternion_inverse(quaternion): + """Return inverse of quaternion. + + >>> q0 = random_quaternion() + >>> q1 = quaternion_inverse(q0) + >>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0]) + True + + """ + q = numpy.array(quaternion, dtype=numpy.float64, copy=True) + numpy.negative(q[1:], q[1:]) + return q / numpy.dot(q, q) + + +def quaternion_real(quaternion): + """Return real part of quaternion. + + >>> quaternion_real([3, 0, 1, 2]) + 3.0 + + """ + return float(quaternion[0]) + + +def quaternion_imag(quaternion): + """Return imaginary part of quaternion. + + >>> quaternion_imag([3, 0, 1, 2]) + array([ 0., 1., 2.]) + + """ + return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True) + + +def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): + """Return spherical linear interpolation between two quaternions. + + >>> q0 = random_quaternion() + >>> q1 = random_quaternion() + >>> q = quaternion_slerp(q0, q1, 0) + >>> numpy.allclose(q, q0) + True + >>> q = quaternion_slerp(q0, q1, 1, 1) + >>> numpy.allclose(q, q1) + True + >>> q = quaternion_slerp(q0, q1, 0.5) + >>> angle = math.acos(numpy.dot(q0, q)) + >>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \ + numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle) + True + + """ + q0 = unit_vector(quat0[:4]) + q1 = unit_vector(quat1[:4]) + if fraction == 0.0: + return q0 + elif fraction == 1.0: + return q1 + d = numpy.dot(q0, q1) + if abs(abs(d) - 1.0) < _EPS: + return q0 + if shortestpath and d < 0.0: + # invert rotation + d = -d + numpy.negative(q1, q1) + angle = math.acos(d) + spin * math.pi + if abs(angle) < _EPS: + return q0 + isin = 1.0 / math.sin(angle) + q0 *= math.sin((1.0 - fraction) * angle) * isin + q1 *= math.sin(fraction * angle) * isin + q0 += q1 + return q0 + + +def random_quaternion(rand=None): + """Return uniform random unit quaternion. + + rand: array like or None + Three independent random variables that are uniformly distributed + between 0 and 1. + + >>> q = random_quaternion() + >>> numpy.allclose(1, vector_norm(q)) + True + >>> q = random_quaternion(numpy.random.random(3)) + >>> len(q.shape), q.shape[0]==4 + (1, True) + + """ + if rand is None: + rand = numpy.random.rand(3) + else: + assert len(rand) == 3 + r1 = numpy.sqrt(1.0 - rand[0]) + r2 = numpy.sqrt(rand[0]) + pi2 = math.pi * 2.0 + t1 = pi2 * rand[1] + t2 = pi2 * rand[2] + return numpy.array([numpy.cos(t2) * r2, numpy.sin(t1) * r1, + numpy.cos(t1) * r1, numpy.sin(t2) * r2]) + + +def random_rotation_matrix(rand=None): + """Return uniform random rotation matrix. + + rand: array like + Three independent random variables that are uniformly distributed + between 0 and 1 for each returned quaternion. + + >>> R = random_rotation_matrix() + >>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4)) + True + + """ + return quaternion_matrix(random_quaternion(rand)) + + +class Arcball(object): + """Virtual Trackball Control. + + >>> ball = Arcball() + >>> ball = Arcball(initial=numpy.identity(4)) + >>> ball.place([320, 320], 320) + >>> ball.down([500, 250]) + >>> ball.drag([475, 275]) + >>> R = ball.matrix() + >>> numpy.allclose(numpy.sum(R), 3.90583455) + True + >>> ball = Arcball(initial=[1, 0, 0, 0]) + >>> ball.place([320, 320], 320) + >>> ball.setaxes([1, 1, 0], [-1, 1, 0]) + >>> ball.constrain = True + >>> ball.down([400, 200]) + >>> ball.drag([200, 400]) + >>> R = ball.matrix() + >>> numpy.allclose(numpy.sum(R), 0.2055924) + True + >>> ball.next() + + """ + + def __init__(self, initial=None): + """Initialize virtual trackball control. + + initial : quaternion or rotation matrix + + """ + self._axis = None + self._axes = None + self._radius = 1.0 + self._center = [0.0, 0.0] + self._vdown = numpy.array([0.0, 0.0, 1.0]) + self._constrain = False + if initial is None: + self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0]) + else: + initial = numpy.array(initial, dtype=numpy.float64) + if initial.shape == (4, 4): + self._qdown = quaternion_from_matrix(initial) + elif initial.shape == (4,): + initial /= vector_norm(initial) + self._qdown = initial + else: + raise ValueError("initial not a quaternion or matrix") + self._qnow = self._qpre = self._qdown + + def place(self, center, radius): + """Place Arcball, e.g. when window size changes. + + center : sequence[2] + Window coordinates of trackball center. + radius : float + Radius of trackball in window coordinates. + + """ + self._radius = float(radius) + self._center[0] = center[0] + self._center[1] = center[1] + + def setaxes(self, *axes): + """Set axes to constrain rotations.""" + if axes is None: + self._axes = None + else: + self._axes = [unit_vector(axis) for axis in axes] + + @property + def constrain(self): + """Return state of constrain to axis mode.""" + return self._constrain + + @constrain.setter + def constrain(self, value): + """Set state of constrain to axis mode.""" + self._constrain = bool(value) + + def down(self, point): + """Set initial cursor window coordinates and pick constrain-axis.""" + self._vdown = arcball_map_to_sphere(point, self._center, self._radius) + self._qdown = self._qpre = self._qnow + if self._constrain and self._axes is not None: + self._axis = arcball_nearest_axis(self._vdown, self._axes) + self._vdown = arcball_constrain_to_axis(self._vdown, self._axis) + else: + self._axis = None + + def drag(self, point): + """Update current cursor window coordinates.""" + vnow = arcball_map_to_sphere(point, self._center, self._radius) + if self._axis is not None: + vnow = arcball_constrain_to_axis(vnow, self._axis) + self._qpre = self._qnow + t = numpy.cross(self._vdown, vnow) + if numpy.dot(t, t) < _EPS: + self._qnow = self._qdown + else: + q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]] + self._qnow = quaternion_multiply(q, self._qdown) + + def next(self, acceleration=0.0): + """Continue rotation in direction of last drag.""" + q = quaternion_slerp(self._qpre, self._qnow, 2.0 + acceleration, False) + self._qpre, self._qnow = self._qnow, q + + def matrix(self): + """Return homogeneous rotation matrix.""" + return quaternion_matrix(self._qnow) + + +def arcball_map_to_sphere(point, center, radius): + """Return unit sphere coordinates from window coordinates.""" + v0 = (point[0] - center[0]) / radius + v1 = (center[1] - point[1]) / radius + n = v0 * v0 + v1 * v1 + if n > 1.0: + # position outside of sphere + n = math.sqrt(n) + return numpy.array([v0 / n, v1 / n, 0.0]) + else: + return numpy.array([v0, v1, math.sqrt(1.0 - n)]) + + +def arcball_constrain_to_axis(point, axis): + """Return sphere point perpendicular to axis.""" + v = numpy.array(point, dtype=numpy.float64, copy=True) + a = numpy.array(axis, dtype=numpy.float64, copy=True) + v -= a * numpy.dot(a, v) # on plane + n = vector_norm(v) + if n > _EPS: + if v[2] < 0.0: + numpy.negative(v, v) + v /= n + return v + if a[2] == 1.0: + return numpy.array([1.0, 0.0, 0.0]) + return unit_vector([-a[1], a[0], 0.0]) + + +def arcball_nearest_axis(point, axes): + """Return axis, which arc is nearest to point.""" + point = numpy.array(point, dtype=numpy.float64, copy=False) + nearest = None + mx = -1.0 + for axis in axes: + t = numpy.dot(arcball_constrain_to_axis(point, axis), point) + if t > mx: + nearest = axis + mx = t + return nearest + + +# epsilon for testing whether a number is close to zero +_EPS = numpy.finfo(float).eps * 4.0 + +# axis sequences for Euler angles +_NEXT_AXIS = [1, 2, 0, 1] + +# map axes strings to/from tuples of inner axis, parity, repetition, frame +_AXES2TUPLE = { + 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), + 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), + 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), + 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), + 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), + 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), + 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), + 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} + +_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) + + +def vector_norm(data, axis=None, out=None): + """Return length, i.e. Euclidean norm, of ndarray along axis. + + >>> v = numpy.random.random(3) + >>> n = vector_norm(v) + >>> numpy.allclose(n, numpy.linalg.norm(v)) + True + >>> v = numpy.random.rand(6, 5, 3) + >>> n = vector_norm(v, axis=-1) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2))) + True + >>> n = vector_norm(v, axis=1) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) + True + >>> v = numpy.random.rand(5, 4, 3) + >>> n = numpy.empty((5, 3)) + >>> vector_norm(v, axis=1, out=n) + >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1))) + True + >>> vector_norm([]) + 0.0 + >>> vector_norm([1]) + 1.0 + + """ + data = numpy.array(data, dtype=numpy.float64, copy=True) + if out is None: + if data.ndim == 1: + return math.sqrt(numpy.dot(data, data)) + data *= data + out = numpy.atleast_1d(numpy.sum(data, axis=axis)) + numpy.sqrt(out, out) + return out + else: + data *= data + numpy.sum(data, axis=axis, out=out) + numpy.sqrt(out, out) + + +def unit_vector(data, axis=None, out=None): + """Return ndarray normalized by length, i.e. Euclidean norm, along axis. + + >>> v0 = numpy.random.random(3) + >>> v1 = unit_vector(v0) + >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) + True + >>> v0 = numpy.random.rand(5, 4, 3) + >>> v1 = unit_vector(v0, axis=-1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) + >>> numpy.allclose(v1, v2) + True + >>> v1 = unit_vector(v0, axis=1) + >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) + >>> numpy.allclose(v1, v2) + True + >>> v1 = numpy.empty((5, 4, 3)) + >>> unit_vector(v0, axis=1, out=v1) + >>> numpy.allclose(v1, v2) + True + >>> list(unit_vector([])) + [] + >>> list(unit_vector([1])) + [1.0] + + """ + if out is None: + data = numpy.array(data, dtype=numpy.float64, copy=True) + if data.ndim == 1: + data /= math.sqrt(numpy.dot(data, data)) + return data + else: + if out is not data: + out[:] = numpy.array(data, copy=False) + data = out + length = numpy.atleast_1d(numpy.sum(data * data, axis)) + numpy.sqrt(length, length) + if axis is not None: + length = numpy.expand_dims(length, axis) + data /= length + if out is None: + return data + + +def random_vector(size): + """Return array of random doubles in the half-open interval [0.0, 1.0). + + >>> v = random_vector(10000) + >>> numpy.all(v >= 0) and numpy.all(v < 1) + True + >>> v0 = random_vector(10) + >>> v1 = random_vector(10) + >>> numpy.any(v0 == v1) + False + + """ + return numpy.random.random(size) + + +def vector_product(v0, v1, axis=0): + """Return vector perpendicular to vectors. + + >>> v = vector_product([2, 0, 0], [0, 3, 0]) + >>> numpy.allclose(v, [0, 0, 6]) + True + >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]] + >>> v1 = [[3], [0], [0]] + >>> v = vector_product(v0, v1) + >>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]]) + True + >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]] + >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]] + >>> v = vector_product(v0, v1, axis=1) + >>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]]) + True + + """ + return numpy.cross(v0, v1, axis=axis) + + +def angle_between_vectors(v0, v1, directed=True, axis=0): + """Return angle between vectors. + + If directed is False, the input vectors are interpreted as undirected axes, + i.e. the maximum angle is pi/2. + + >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3]) + >>> numpy.allclose(a, math.pi) + True + >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False) + >>> numpy.allclose(a, 0) + True + >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]] + >>> v1 = [[3], [0], [0]] + >>> a = angle_between_vectors(v0, v1) + >>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532]) + True + >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]] + >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]] + >>> a = angle_between_vectors(v0, v1, axis=1) + >>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532]) + True + + """ + v0 = numpy.array(v0, dtype=numpy.float64, copy=False) + v1 = numpy.array(v1, dtype=numpy.float64, copy=False) + dot = numpy.sum(v0 * v1, axis=axis) + dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis) + return numpy.arccos(dot if directed else numpy.fabs(dot)) + + +def inverse_matrix(matrix): + """Return inverse of square transformation matrix. + + >>> M0 = random_rotation_matrix() + >>> M1 = inverse_matrix(M0.T) + >>> numpy.allclose(M1, numpy.linalg.inv(M0.T)) + True + >>> for size in range(1, 7): + ... M0 = numpy.random.rand(size, size) + ... M1 = inverse_matrix(M0) + ... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size) + + """ + return numpy.linalg.inv(matrix) + + +def concatenate_matrices(*matrices): + """Return concatenation of series of transformation matrices. + + >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5 + >>> numpy.allclose(M, concatenate_matrices(M)) + True + >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T)) + True + + """ + M = numpy.identity(4) + for i in matrices: + M = numpy.dot(M, i) + return M + + +def is_same_transform(matrix0, matrix1): + """Return True if two matrices perform same transformation. + + >>> is_same_transform(numpy.identity(4), numpy.identity(4)) + True + >>> is_same_transform(numpy.identity(4), random_rotation_matrix()) + False + + """ + matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True) + matrix0 /= matrix0[3, 3] + matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True) + matrix1 /= matrix1[3, 3] + return numpy.allclose(matrix0, matrix1) + + +def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'): + """Try import all public attributes from module into global namespace. + + Existing attributes with name clashes are renamed with prefix. + Attributes starting with underscore are ignored by default. + + Return True on successful import. + + """ + import warnings + from importlib import import_module + try: + if not package: + module = import_module(name) + else: + module = import_module('.' + name, package=package) + except ImportError: + if warn: + warnings.warn("failed to import module %s" % name) + else: + for attr in dir(module): + if ignore and attr.startswith(ignore): + continue + if prefix: + if attr in globals(): + globals()[prefix + attr] = globals()[attr] + elif warn: + warnings.warn("no Python implementation of " + attr) + globals()[attr] = getattr(module, attr) + return True + + +# _import_module('_transformations') + +if __name__ == "__main__": + import doctest + import random # used in doctests + + numpy.set_printoptions(suppress=True, precision=5) + doctest.testmod() diff --git a/bop_toolkit_lib/view_sampler.py b/bop_toolkit_lib/view_sampler.py new file mode 100644 index 0000000..441d888 --- /dev/null +++ b/bop_toolkit_lib/view_sampler.py @@ -0,0 +1,291 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Sampling of views from a sphere.""" + +import math +import numpy as np + +from bop_toolkit_lib import transform +from bop_toolkit_lib import inout + + +def fibonacci_sampling(n_pts, radius=1.0): + """Samples an odd number of almost equidistant 3D points from the Fibonacci + lattice on a unit sphere. + + Latitude (elevation) represents the rotation angle around the X axis. + Longitude (azimuth) represents the rotation angle around the Z axis. + + Ref: + [1] https://arxiv.org/pdf/0912.4540.pdf + [2] http://stackoverflow.com/questions/34302938/map-point-to-closest-point-on-fibonacci-lattice + [3] http://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere + [4] https://www.openprocessing.org/sketch/41142 + + :param n_pts: Number of 3D points to sample (an odd number). + :param radius: Radius of the sphere. + :return: List of 3D points on the sphere surface. + """ + # Needs to be an odd number [1]. + assert (n_pts % 2 == 1) + n_pts_half = int(n_pts / 2) + + phi = (math.sqrt(5.0) + 1.0) / 2.0 # Golden ratio. + phi_inv = phi - 1.0 + ga = 2.0 * math.pi * phi_inv # Complement to the golden angle. + + pts = [] + for i in range(-n_pts_half, n_pts_half + 1): + lat = math.asin((2 * i) / float(2 * n_pts_half + 1)) + lon = (ga * i) % (2 * math.pi) + + # Convert the latitude and longitude angles to 3D coordinates. + s = math.cos(lat) * radius + x, y, z = math.cos(lon) * s, math.sin(lon) * s, math.tan(lat) * s + pts.append([x, y, z]) + + # Calculate rotation matrix and translation vector. + # Note: lat,lon=0,0 is a camera looking to the sphere center from + # (-radius, 0, 0) in the world (i.e. sphere) coordinate system. + # pi_half = 0.5 * math.pi + # alpha_x = -lat - pi_half + # alpha_z = lon + pi_half + # R_x = transform.rotation_matrix(alpha_x, [1, 0, 0])[:3, :3] + # R_z = transform.rotation_matrix(alpha_z, [0, 0, 1])[:3, :3] + # R = np.linalg.inv(R_z.dot(R_x)) + # t = -R.dot(np.array([x, y, z]).reshape((3, 1))) + + return pts + + +def hinter_sampling(min_n_pts, radius=1.0): + """Samples 3D points on a sphere surface by refining an icosahedron, as in: + Hinterstoisser et al., Simultaneous Recognition and Homography Extraction of + Local Patches with a Simple Linear Classifier, BMVC 2008 + + :param min_n_pts: The minimum number of points to sample on the whole sphere. + :param radius: Radius of the sphere. + :return: 3D points on the sphere surface and a list with indices of refinement + levels on which the points were created. + """ + # Vertices and faces of an icosahedron. + a, b, c = 0.0, 1.0, (1.0 + math.sqrt(5.0)) / 2.0 + pts = [(-b, c, a), (b, c, a), (-b, -c, a), (b, -c, a), (a, -b, c), (a, b, c), + (a, -b, -c), (a, b, -c), (c, a, -b), (c, a, b), (-c, a, -b), + (-c, a, b)] + faces = [(0, 11, 5), (0, 5, 1), (0, 1, 7), (0, 7, 10), (0, 10, 11), (1, 5, 9), + (5, 11, 4), (11, 10, 2), (10, 7, 6), (7, 1, 8), (3, 9, 4), (3, 4, 2), + (3, 2, 6), (3, 6, 8), (3, 8, 9), (4, 9, 5), (2, 4, 11), (6, 2, 10), + (8, 6, 7), (9, 8, 1)] + + # Refinement levels on which the points were created. + pts_level = [0 for _ in range(len(pts))] + + ref_level = 0 + while len(pts) < min_n_pts: + ref_level += 1 + edge_pt_map = {} # Mapping from an edge to a newly added point on the edge. + faces_new = [] # New set of faces. + + # Each face is replaced by four new smaller faces. + for face in faces: + pt_inds = list(face) # List of point ID's involved in the new faces. + for i in range(3): + + # Add a new point if this edge has not been processed yet, or get ID of + # the already added point. + edge = (face[i], face[(i + 1) % 3]) + edge = (min(edge), max(edge)) + if edge not in edge_pt_map.keys(): + pt_new_id = len(pts) + edge_pt_map[edge] = pt_new_id + pt_inds.append(pt_new_id) + + pt_new = 0.5 * (np.array(pts[edge[0]]) + np.array(pts[edge[1]])) + pts.append(pt_new.tolist()) + pts_level.append(ref_level) + else: + pt_inds.append(edge_pt_map[edge]) + + # Replace the current face with four new faces. + faces_new += [(pt_inds[0], pt_inds[3], pt_inds[5]), + (pt_inds[3], pt_inds[1], pt_inds[4]), + (pt_inds[3], pt_inds[4], pt_inds[5]), + (pt_inds[5], pt_inds[4], pt_inds[2])] + faces = faces_new + + # Project the points to a sphere. + pts = np.array(pts) + pts *= np.reshape(radius / np.linalg.norm(pts, axis=1), (pts.shape[0], 1)) + + # Collect point connections. + pt_conns = {} + for face in faces: + for i in range(len(face)): + pt_conns.setdefault(face[i], set()).add(face[(i + 1) % len(face)]) + pt_conns[face[i]].add(face[(i + 2) % len(face)]) + + # Order the points - starting from the top one and adding the connected points + # sorted by azimuth. + top_pt_id = np.argmax(pts[:, 2]) + pts_ordered = [] + pts_todo = [top_pt_id] + pts_done = [False for _ in range(pts.shape[0])] + + def calc_azimuth(x, y): + two_pi = 2.0 * math.pi + return (math.atan2(y, x) + two_pi) % two_pi + + while len(pts_ordered) != pts.shape[0]: + # Sort by azimuth. + pts_todo = sorted( + pts_todo, key=lambda i: calc_azimuth(pts[i][0], pts[i][1])) + pts_todo_new = [] + for pt_id in pts_todo: + pts_ordered.append(pt_id) + pts_done[pt_id] = True + pts_todo_new += [i for i in pt_conns[pt_id]] # Find the connected points. + + # Points to be processed in the next iteration. + pts_todo = [i for i in set(pts_todo_new) if not pts_done[i]] + + # Re-order the points and faces. + pts = pts[np.array(pts_ordered), :] + pts_level = [pts_level[i] for i in pts_ordered] + pts_order = np.zeros((pts.shape[0],)) + pts_order[np.array(pts_ordered)] = np.arange(pts.shape[0]) + for face_id in range(len(faces)): + faces[face_id] = [pts_order[i] for i in faces[face_id]] + + # import inout + # inout.save_ply('output/hinter_sampling.ply', pts=pts, faces=np.array(faces)) + + return pts, pts_level + + +def sample_views( + min_n_views, radius=1.0, azimuth_range=(0, 2 * math.pi), + elev_range=(-0.5 * math.pi, 0.5 * math.pi), mode='hinterstoisser'): + """Viewpoint sampling from a view sphere. + + :param min_n_views: The min. number of points to sample on the whole sphere. + :param radius: Radius of the sphere. + :param azimuth_range: Azimuth range from which the viewpoints are sampled. + :param elev_range: Elevation range from which the viewpoints are sampled. + :param mode: Type of sampling (options: 'hinterstoisser' or 'fibonacci'). + :return: List of views, each represented by a 3x3 ndarray with a rotation + matrix and a 3x1 ndarray with a translation vector. + """ + # Get points on a sphere. + if mode == 'hinterstoisser': + pts, pts_level = hinter_sampling(min_n_views, radius=radius) + elif mode == 'fibonacci': + n_views = min_n_views + if n_views % 2 != 1: + n_views += 1 + + pts = fibonacci_sampling(n_views, radius=radius) + pts_level = [0 for _ in range(len(pts))] + else: + raise ValueError('Unknown view sampling mode.') + + views = [] + for pt in pts: + # Azimuth from (0, 2 * pi). + azimuth = math.atan2(pt[1], pt[0]) + if azimuth < 0: + azimuth += 2.0 * math.pi + + # Elevation from (-0.5 * pi, 0.5 * pi). + a = np.linalg.norm(pt) + b = np.linalg.norm([pt[0], pt[1], 0]) + elev = math.acos(b / a) + if pt[2] < 0: + elev = -elev + + if not (azimuth_range[0] <= azimuth <= azimuth_range[1] and + elev_range[0] <= elev <= elev_range[1]): + continue + + # Rotation matrix. + # Adopted from gluLookAt function (uses OpenGL coordinate system): + # [1] http://stackoverflow.com/questions/5717654/glulookat-explanation + # [2] https://www.opengl.org/wiki/GluLookAt_code + f = -np.array(pt) # Forward direction. + f /= np.linalg.norm(f) + u = np.array([0.0, 0.0, 1.0]) # Up direction. + s = np.cross(f, u) # Side direction. + if np.count_nonzero(s) == 0: + # f and u are parallel, i.e. we are looking along or against Z axis. + s = np.array([1.0, 0.0, 0.0]) + s /= np.linalg.norm(s) + u = np.cross(s, f) # Recompute up. + R = np.array([[s[0], s[1], s[2]], + [u[0], u[1], u[2]], + [-f[0], -f[1], -f[2]]]) + + # Convert from OpenGL to OpenCV coordinate system. + R_yz_flip = transform.rotation_matrix(math.pi, [1, 0, 0])[:3, :3] + R = R_yz_flip.dot(R) + + # Translation vector. + t = -R.dot(np.array(pt).reshape((3, 1))) + + views.append({'R': R, 't': t}) + + return views, pts_level + + +def save_vis(path, views, views_level=None): + """ + Creates a PLY file visualizing the views. + + :param path: Path to output PLY file. + :param views: Views as returned by sample_views(). + :param views_level: View levels as returned by sample_views(). + """ + pts = [] + normals = [] + colors = [] + for view_id, view in enumerate(views): + R_inv = np.linalg.inv(view['R']) + pts += [R_inv.dot(-view['t']).squeeze(), + R_inv.dot(np.array([[0.01, 0, 0]]).T - view['t']).squeeze(), + R_inv.dot(np.array([[0, 0.01, 0]]).T - view['t']).squeeze(), + R_inv.dot(np.array([[0, 0, 0.01]]).T - view['t']).squeeze()] + + normal = R_inv.dot(np.array([0, 0, 1]).reshape((3, 1))) + normals += [normal.squeeze(), + np.array([0, 0, 0]), + np.array([0, 0, 0]), + np.array([0, 0, 0])] + + if views_level: + max_level = max(1, max(views_level)) + intens = (255 * views_level[view_id]) / float(max_level) + else: + intens = 255 * view_id / float(len(views)) + colors += [[intens, intens, intens], + [255, 0, 0], + [0, 255, 0], + [0, 0, 255]] + + inout.save_ply2(path, + pts=np.array(pts), + pts_normals=np.array(normals), + pts_colors=np.array(colors)) + + +if __name__ == '__main__': + + # Example of sampling views from a view sphere. + views, views_level = sample_views( + min_n_views=25, + radius=1, + azimuth_range=(0, 2 * math.pi), + elev_range=(-0.5 * math.pi, 0.5 * math.pi), + mode='fibonacci') + misc.log('Sampled views: ' + str(len(views))) + out_views_vis_path = '../output/view_sphere.ply' + save_vis(out_views_vis_path, views) diff --git a/bop_toolkit_lib/visibility.py b/bop_toolkit_lib/visibility.py new file mode 100644 index 0000000..b90a8ae --- /dev/null +++ b/bop_toolkit_lib/visibility.py @@ -0,0 +1,75 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Estimation of the visible object surface from depth images.""" + +import numpy as np + + +def _estimate_visib_mask(d_test, d_model, delta, visib_mode='bop19'): + """Estimates a mask of the visible object surface. + + :param d_test: Distance image of a scene in which the visibility is estimated. + :param d_model: Rendered distance image of the object model. + :param delta: Tolerance used in the visibility test. + :param visib_mode: Visibility mode: + 1) 'bop18' - Object is considered NOT VISIBLE at pixels with missing depth. + 2) 'bop19' - Object is considered VISIBLE at pixels with missing depth. This + allows to use the VSD pose error function also on shiny objects, which + are typically not captured well by the depth sensors. A possible problem + with this mode is that some invisible parts can be considered visible. + However, the shadows of missing depth measurements, where this problem is + expected to appear and which are often present at depth discontinuities, + are typically relatively narrow and therefore this problem is less + significant. + :return: Visibility mask. + """ + assert (d_test.shape == d_model.shape) + + if visib_mode == 'bop18': + mask_valid = np.logical_and(d_test > 0, d_model > 0) + d_diff = d_model.astype(np.float32) - d_test.astype(np.float32) + visib_mask = np.logical_and(d_diff <= delta, mask_valid) + + elif visib_mode == 'bop19': + d_diff = d_model.astype(np.float32) - d_test.astype(np.float32) + visib_mask = np.logical_and( + np.logical_or(d_diff <= delta, d_test == 0), d_model > 0) + + else: + raise ValueError('Unknown visibility mode.') + + return visib_mask + + +def estimate_visib_mask_gt(d_test, d_gt, delta, visib_mode='bop19'): + """Estimates a mask of the visible object surface in the ground-truth pose. + + :param d_test: Distance image of a scene in which the visibility is estimated. + :param d_gt: Rendered distance image of the object model in the GT pose. + :param delta: Tolerance used in the visibility test. + :param visib_mode: See _estimate_visib_mask. + :return: Visibility mask. + """ + visib_gt = _estimate_visib_mask(d_test, d_gt, delta, visib_mode) + return visib_gt + + +def estimate_visib_mask_est(d_test, d_est, visib_gt, delta, visib_mode='bop19'): + """Estimates a mask of the visible object surface in the estimated pose. + + For an explanation of why the visibility mask is calculated differently for + the estimated and the ground-truth pose, see equation (14) and related text in + Hodan et al., On Evaluation of 6D Object Pose Estimation, ECCVW'16. + + :param d_test: Distance image of a scene in which the visibility is estimated. + :param d_est: Rendered distance image of the object model in the est. pose. + :param visib_gt: Visibility mask of the object model in the GT pose (from + function estimate_visib_mask_gt). + :param delta: Tolerance used in the visibility test. + :param visib_mode: See _estimate_visib_mask. + :return: Visibility mask. + """ + visib_est = _estimate_visib_mask(d_test, d_est, delta, visib_mode) + visib_est = np.logical_or(visib_est, np.logical_and(visib_gt, d_est > 0)) + return visib_est diff --git a/bop_toolkit_lib/visualization.py b/bop_toolkit_lib/visualization.py new file mode 100644 index 0000000..beeb4d5 --- /dev/null +++ b/bop_toolkit_lib/visualization.py @@ -0,0 +1,222 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Visualization utilities.""" + +import os +import numpy as np +import matplotlib.pyplot as plt +from PIL import Image, ImageDraw, ImageFont + +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc + + +def draw_rect(im, rect, color=(1.0, 1.0, 1.0)): + """Draws a rectangle on an image. + + :param im: ndarray (uint8) on which the rectangle will be drawn. + :param rect: Rectangle defined as [x, y, width, height], where [x, y] is the + top-left corner. + :param color: Color of the rectangle. + :return: Image with drawn rectangle. + """ + if im.dtype != np.uint8: + raise ValueError('The image must be of type uint8.') + + im_pil = Image.fromarray(im) + draw = ImageDraw.Draw(im_pil) + draw.rectangle((rect[0], rect[1], rect[0] + rect[2], rect[1] + rect[3]), + outline=tuple([int(c * 255) for c in color]), fill=None) + del draw + return np.asarray(im_pil) + + +def write_text_on_image(im, txt_list, loc=(3, 0), color=(1.0, 1.0, 1.0), + size=20): + """Writes text info on an image. + + :param im: ndarray on which the text info will be written. + :param txt_list: List of dictionaries, each describing one info line: + - 'name': Entry name. + - 'val': Entry value. + - 'fmt': String format for the value. + :param loc: Location of the top left corner of the text box. + :param color: Font color. + :param size: Font size. + :return: Image with written text info. + """ + im_pil = Image.fromarray(im) + + # Load font. + try: + font_path = os.path.join(os.path.dirname(__file__), 'droid_sans_mono.ttf') + font = ImageFont.truetype(font_path, size) + except IOError: + misc.log('Warning: Loading a fallback font.') + font = ImageFont.load_default() + + draw = ImageDraw.Draw(im_pil) + for info in txt_list: + if info['name'] != '': + txt_tpl = '{}:{' + info['fmt'] + '}' + else: + txt_tpl = '{}{' + info['fmt'] + '}' + txt = txt_tpl.format(info['name'], info['val']) + draw.text(loc, txt, fill=tuple([int(c * 255) for c in color]), font=font) + text_width, text_height = font.getsize(txt) + loc = (loc[0], loc[1] + text_height) + del draw + + return np.array(im_pil) + + +def depth_for_vis(depth, valid_start=0.2, valid_end=1.0): + """Transforms depth values from the specified range to [0, 255]. + + :param depth: ndarray with a depth image (1 channel). + :param valid_start: The beginning of the depth range. + :param valid_end: The end of the depth range. + :return: Transformed depth image. + """ + mask = depth > 0 + depth_n = depth.astype(np.float) + depth_n[mask] -= depth_n[mask].min() + depth_n[mask] /= depth_n[mask].max() / (valid_end - valid_start) + depth_n[mask] += valid_start + return depth_n + + +def vis_object_poses( + poses, K, renderer, rgb=None, depth=None, vis_rgb_path=None, + vis_depth_diff_path=None, vis_rgb_resolve_visib=False): + """Visualizes 3D object models in specified poses in a single image. + + Two visualizations are created: + 1. An RGB visualization (if vis_rgb_path is not None). + 2. A Depth-difference visualization (if vis_depth_diff_path is not None). + + :param poses: List of dictionaries, each with info about one pose: + - 'obj_id': Object ID. + - 'R': 3x3 ndarray with a rotation matrix. + - 't': 3x1 ndarray with a translation vector. + - 'text_info': Info to write at the object (see write_text_on_image). + :param K: 3x3 ndarray with an intrinsic camera matrix. + :param renderer: Instance of the Renderer class (see renderer.py). + :param rgb: ndarray with the RGB input image. + :param depth: ndarray with the depth input image. + :param vis_rgb_path: Path to the output RGB visualization. + :param vis_depth_diff_path: Path to the output depth-difference visualization. + :param vis_rgb_resolve_visib: Whether to resolve visibility of the objects + (i.e. only the closest object is visualized at each pixel). + """ + fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2] + + # Indicators of visualization types. + vis_rgb = vis_rgb_path is not None + vis_depth_diff = vis_depth_diff_path is not None + + if vis_rgb and rgb is None: + raise ValueError('RGB visualization triggered but RGB image not provided.') + + if (vis_depth_diff or (vis_rgb and vis_rgb_resolve_visib)) and depth is None: + raise ValueError('Depth visualization triggered but D image not provided.') + + # Prepare images for rendering. + im_size = None + ren_rgb = None + ren_rgb_info = None + ren_depth = None + + if vis_rgb: + im_size = (rgb.shape[1], rgb.shape[0]) + ren_rgb = np.zeros(rgb.shape, np.uint8) + ren_rgb_info = np.zeros(rgb.shape, np.uint8) + + if vis_depth_diff: + if im_size and im_size != (depth.shape[1], depth.shape[0]): + raise ValueError('The RGB and D images must have the same size.') + else: + im_size = (depth.shape[1], depth.shape[0]) + + if vis_depth_diff or (vis_rgb and vis_rgb_resolve_visib): + ren_depth = np.zeros((im_size[1], im_size[0]), np.float32) + + # Render the pose estimates one by one. + for pose in poses: + + # Rendering. + ren_out = renderer.render_object( + pose['obj_id'], pose['R'], pose['t'], fx, fy, cx, cy) + + m_rgb = None + if vis_rgb: + m_rgb = ren_out['rgb'] + + m_mask = None + if vis_depth_diff or (vis_rgb and vis_rgb_resolve_visib): + m_depth = ren_out['depth'] + + # Get mask of the surface parts that are closer than the + # surfaces rendered before. + visible_mask = np.logical_or(ren_depth == 0, m_depth < ren_depth) + m_mask = np.logical_and(m_depth != 0, visible_mask) + + ren_depth[m_mask] = m_depth[m_mask].astype(ren_depth.dtype) + + # Combine the RGB renderings. + if vis_rgb: + if vis_rgb_resolve_visib: + ren_rgb[m_mask] = m_rgb[m_mask].astype(ren_rgb.dtype) + else: + ren_rgb_f = ren_rgb.astype(np.float32) + m_rgb.astype(np.float32) + ren_rgb_f[ren_rgb_f > 255] = 255 + ren_rgb = ren_rgb_f.astype(np.uint8) + + # Draw 2D bounding box and write text info. + obj_mask = np.sum(m_rgb > 0, axis=2) + ys, xs = obj_mask.nonzero() + if len(ys): + # bbox_color = model_color + # text_color = model_color + bbox_color = (0.3, 0.3, 0.3) + text_color = (1.0, 1.0, 1.0) + text_size = 11 + + bbox = misc.calc_2d_bbox(xs, ys, im_size) + im_size = (obj_mask.shape[1], obj_mask.shape[0]) + ren_rgb_info = draw_rect(ren_rgb_info, bbox, bbox_color) + + if 'text_info' in pose: + text_loc = (bbox[0] + 2, bbox[1]) + ren_rgb_info = write_text_on_image( + ren_rgb_info, pose['text_info'], text_loc, color=text_color, + size=text_size) + + # Blend and save the RGB visualization. + if vis_rgb: + vis_im_rgb = 0.5 * rgb.astype(np.float32) + \ + 0.5 * ren_rgb.astype(np.float32) + \ + 1.0 * ren_rgb_info.astype(np.float32) + vis_im_rgb[vis_im_rgb > 255] = 255 + misc.ensure_dir(os.path.dirname(vis_rgb_path)) + inout.save_im(vis_rgb_path, vis_im_rgb.astype(np.uint8), jpg_quality=95) + + # Save the image of depth differences. + if vis_depth_diff: + # Calculate the depth difference at pixels where both depth maps + # are valid. + valid_mask = (depth > 0) * (ren_depth > 0) + depth_diff = valid_mask * (depth - ren_depth.astype(np.float32)) + + f, ax = plt.subplots(1, 1) + cax = ax.matshow(depth_diff) + ax.axis('off') + ax.set_title('captured - GT depth [mm]') + f.colorbar(cax, fraction=0.03, pad=0.01) + f.tight_layout(pad=0) + + if not vis_rgb: + misc.ensure_dir(os.path.dirname(vis_depth_diff_path)) + plt.savefig(vis_depth_diff_path, pad=0, bbox_inches='tight', quality=95) + plt.close() diff --git a/docs/bop_datasets_format.md b/docs/bop_datasets_format.md new file mode 100644 index 0000000..6c6fb78 --- /dev/null +++ b/docs/bop_datasets_format.md @@ -0,0 +1,167 @@ +# Format of the BOP datasets + +This file describes the common format of the BOP datasets [1]. + + +## Directory structure + +The datasets have the following structure: + + +* *models[\_MODELTYPE]* - 3D object models. +* *models[\_MODELTYPE]\_eval* - "Uniformly" resampled and decimated 3D object + models used for calculation of errors of object pose estimates. + + +* *train[\_TRAINTYPE]/X* (optional) - Training images of object X. +* *val[\_VALTYPE]/Y* (optional) - Validation images of scene Y. +* *test[\_TESTTYPE]/Y* - Test images of scene Y. + + +* *camera.json* - Camera parameters (for sensor simulation only; per-image + camera parameters are in files *scene_camera.json* - see below). +* *dataset_info.md* - Dataset-specific information. +* *test_targets_bop19.json* - A list of test targets used for the evaluation in + the BOP paper [1] and in the BOP Challenge 2019. + + +*MODELTYPE*, *TRAINTYPE*, *VALTYPE* and *TESTTYPE* are optional and used if more +data types are available (e.g. images from different sensors). + +The images in *train*, *val* and *test* folders are organized into subfolders: + +* *rgb/gray* - Color/gray images. +* *depth* - Depth images (saved as 16-bit unsigned short). +* *mask* (optional) - Masks of object silhouettes. +* *mask_visib* (optional) - Masks of the visible parts of object silhouettes. + +The corresponding images across the subolders have the same ID, e.g. +*rgb/000000.png* and *depth/000000.png* is the color and the depth image +of the same RGB-D frame. + + +## Training, validation and test images + +If both validation and test images are available for a dataset, the ground-truth +annotations are public only for the validation images. Performance scores for +test images with private ground-truth annotations can be calculated in the +[BOP evaluation system](http://bop.felk.cvut.cz). + +### Camera parameters + +Each set of images is accompanied with file *scene\_camera.json* which contains +the following information for each image: + +* *cam\_K* - 3x3 intrinsic camera matrix K (saved row-wise). +* *depth_scale* - Multiply the depth image with this factor to get depth in mm. +* *cam\_R\_w2c* (optional) - 3x3 rotation matrix R\_w2c (saved row-wise). +* *cam\_t\_w2c* (optional) - 3x1 translation vector t\_w2c. +* *view\_level* (optional) - Viewpoint subdivision level, see below. + +The matrix K may be different for each image. For example, the principal point +is not constant for images in T-LESS as the images were obtained by cropping a +region around the projection of the origin of the world coordinate system. + +Note that the intrinsic camera parameters can be found also in file +*camera.json* in the root folder of a dataset. These parameters are meant only +for simulation of the used sensor when rendering training images. + +P\_w2i = K * [R\_w2c, t\_w2c] is the camera matrix which transforms 3D point +p\_w = [x, y, z, 1]' in the world coordinate system to 2D point p\_i = +[u, v, 1]' in the image coordinate system: s * p\_i = P\_w2i * p\_w. + +### Ground-truth annotations + +The ground truth object poses are provided in files *scene_gt.json* which +contain the following information for each annotated object instance: + +* *obj\_id* - Object ID. +* *cam\_R\_m2c* - 3x3 rotation matrix R\_m2c (saved row-wise). +* *cam\_t\_m2c* - 3x1 translation vector t\_m2c. + +P\_m2i = K * [R\_m2c, t\_m2c] is the camera matrix which transforms 3D point +p\_m = [x, y, z, 1]' in the model coordinate system to 2D point p\_i = +[u, v, 1]' in the image coordinate system: s * p\_i = P\_m2i * p\_m. + +### Meta information about the ground-truth poses + +The following meta information about the ground-truth poses is provided in files +*scene_gt_info.json* (calculated using *scripts/calc_gt_info.py*, with delta = +5mm for ITODD and 15mm for other datasets): + +* *bbox\_obj* - 2D bounding box of the object silhouette given by (x, y, width, + height), where (x, y) is the top-left corner of the bounding box. +* *bbox\_visib* - 2D bounding box of the visible part of the object silhouette. +* *px\_count\_all* - Number of pixels in the object silhouette. +* *px\_count\_valid* - Number of pixels in the object silhouette with a valid + depth measurement (i.e. with a non-zero value in the depth image). +* *px\_count\_visib* - Number of pixels in the visible part of the object + silhouette. +* *visib\_fract* - The visible fraction of the object silhouette (= *px\_count\_visib*/*px\_count +_all*). + + +## Acquisition of training images + +Most of the datasets include training images which were obtained either by +capturing real objects from various viewpoints or by rendering 3D object models +(using *scripts/render_train_imgs.py*). + +The viewpoints, from which the objects were rendered, were sampled from a view +sphere as in [2] by recursively subdividing an icosahedron. The level of +subdivision at which a viewpoint was added is saved in *scene_camera.json* as +*view_level* (viewpoints corresponding to vertices of the icosahedron have +*view_level* = 0, viewpoints obtained in the first subdivision step have +*view_level* = 1, etc.). To reduce the number of viewpoints while preserving +their "uniform" distribution over the sphere surface, one can consider only +viewpoints with *view_level* <= n, where n is the highest considered level of +subdivision. + +For rendering, the radius of the view sphere was set to the distance of the +closest occurrence of any annotated object instance over all test images. The +distance was calculated from the camera center to the origin of the model +coordinate system. + + +## 3D object models + +The 3D object models are provided in PLY (ascii) format. All models include +vertex normals. Most of the models include also vertex color or vertex texture +coordinates with the texture saved as a separate image. +The vertex normals were calculated using +[MeshLab](http://meshlab.sourceforge.net/) as the angle-weighted sum of face +normals incident to a vertex [4]. + +Each folder with object models contains file *models_info.json*, which includes +the 3D bounding box and the diameter for each object model. The diameter is +calculated as the largest distance between any pair of model vertices. + + +## Coordinate systems + +All coordinate systems (model, camera, world) are right-handed. +In the model coordinate system, the Z axis points up (when the object is +standing "naturally up-right") and the origin coincides with the center of the +3D bounding box of the object model. +The camera coordinate system is as in +[OpenCV](http://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html) +with the camera looking along the Z axis. + + +## Units + +* Depth images: See files *camera.json/scene_camera.json* in individual + datasets. +* 3D object models: 1 mm +* Translation vectors: 1 mm + + +## References + +[1] Hodan, Michel et al. "BOP: Benchmark for 6D Object Pose Estimation" ECCV'18. + +[2] Hinterstoisser et al. "Model based training, detection and pose estimation + of texture-less 3d objects in heavily cluttered scenes" ACCV'12. + +[3] Thurrner and Wuthrich "Computing vertex normals from polygonal + facets" Journal of Graphics Tools 3.1 (1998). diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..1d26e40 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,11 @@ +numpy==1.16.4 +matplotlib==2.2.4 +imageio==2.5.0 +scipy==1.2.2 +pypng==0.0.19 +Cython==0.29.10 +PyOpenGL==3.1.0 +triangle==20190115.2 +glumpy==1.1.0 +opencv-python==4.1.0.25 +Pillow==6.0.0 diff --git a/scripts/calc_gt_distribution.py b/scripts/calc_gt_distribution.py new file mode 100644 index 0000000..7bb5aa3 --- /dev/null +++ b/scripts/calc_gt_distribution.py @@ -0,0 +1,121 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Calculates distribution of GT poses.""" + +import math +import numpy as np +import matplotlib.pyplot as plt + +from bop_toolkit_lib import config +from bop_toolkit_lib import dataset_params +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc + + +# PARAMETERS. +################################################################################ +p = { + # See dataset_params.py for options. + 'dataset': 'lmo', + + # Dataset split. Options: 'train', 'val', 'test'. + 'dataset_split': 'test', + + # Folder containing the BOP datasets. + 'datasets_path': config.datasets_path, +} +################################################################################ + + +# Load dataset parameters. +dp_split = dataset_params.get_split_params( + p['datasets_path'], p['dataset'], p['dataset_split']) + +scene_ids = dp_split['scene_ids'] +dists = [] +azimuths = [] +elevs = [] +visib_fracts = [] +ims_count = 0 +for scene_id in scene_ids: + misc.log('Processing - dataset: {} {}, scene: {}'.format( + p['dataset'], p['dataset_split'], scene_id)) + + # Load GT poses. + scene_gt = inout.load_scene_gt( + dp_split['scene_gt_tpath'].format(scene_id=scene_id)) + + # Load info about the GT poses. + scene_gt_info = inout.load_json( + dp_split['scene_gt_info_tpath'].format(scene_id=scene_id), keys_to_int=True) + + ims_count += len(scene_gt) + + for im_id in scene_gt.keys(): + for gt_id, im_gt in enumerate(scene_gt[im_id]): + + # Object distance. + dist = np.linalg.norm(im_gt['cam_t_m2c']) + dists.append(dist) + + # Camera origin in the model coordinate system. + cam_orig_m = -np.linalg.inv(im_gt['cam_R_m2c']).dot( + im_gt['cam_t_m2c']) + + # Azimuth from [0, 360]. + azimuth = math.atan2(cam_orig_m[1, 0], cam_orig_m[0, 0]) + if azimuth < 0: + azimuth += 2.0 * math.pi + azimuths.append((180.0 / math.pi) * azimuth) + + # Elevation from [-90, 90]. + a = np.linalg.norm(cam_orig_m) + b = np.linalg.norm([cam_orig_m[0, 0], cam_orig_m[1, 0], 0]) + elev = math.acos(b / a) + if cam_orig_m[2, 0] < 0: + elev = -elev + elevs.append((180.0 / math.pi) * elev) + + # Visibility fraction. + visib_fracts.append(scene_gt_info[im_id][gt_id]['visib_fract']) + +# Print stats. +misc.log('Stats of the GT poses in dataset {} {}:'.format( + p['dataset'], p['dataset_split'])) +misc.log('Number of images: ' + str(ims_count)) + +misc.log('Min dist: {}'.format(np.min(dists))) +misc.log('Max dist: {}'.format(np.max(dists))) +misc.log('Mean dist: {}'.format(np.mean(dists))) + +misc.log('Min azimuth: {}'.format(np.min(azimuths))) +misc.log('Max azimuth: {}'.format(np.max(azimuths))) +misc.log('Mean azimuth: {}'.format(np.mean(azimuths))) + +misc.log('Min elev: {}'.format(np.min(elevs))) +misc.log('Max elev: {}'.format(np.max(elevs))) +misc.log('Mean elev: {}'.format(np.mean(elevs))) + +misc.log('Min visib fract: {}'.format(np.min(visib_fracts))) +misc.log('Max visib fract: {}'.format(np.max(visib_fracts))) +misc.log('Mean visib fract: {}'.format(np.mean(visib_fracts))) + +# Visualize distributions. +plt.figure() +plt.hist(dists, bins=100) +plt.title('Object distance') + +plt.figure() +plt.hist(azimuths, bins=100) +plt.title('Azimuth') + +plt.figure() +plt.hist(elevs, bins=100) +plt.title('Elevation') + +plt.figure() +plt.hist(visib_fracts, bins=100) +plt.title('Visibility fraction') + +plt.show() diff --git a/scripts/calc_gt_info.py b/scripts/calc_gt_info.py new file mode 100644 index 0000000..28aeba7 --- /dev/null +++ b/scripts/calc_gt_info.py @@ -0,0 +1,172 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Calculates visibility, 2D bounding boxes etc. for the ground-truth poses. + +See docs/bop_datasets_format.md for documentation of the calculated info. + +The info is saved in folder "{train,val,test}_gt_info" in the main folder of the +selected dataset. +""" + +import os +import numpy as np + +from bop_toolkit_lib import config +from bop_toolkit_lib import dataset_params +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc +from bop_toolkit_lib import renderer +from bop_toolkit_lib import visibility + + +# PARAMETERS. +################################################################################ +p = { + # See dataset_params.py for options. + 'dataset': 'lm', + + # Dataset split. Options: 'train', 'val', 'test'. + 'dataset_split': 'test', + + # Dataset split type. None = default. See dataset_params.py for options. + 'dataset_split_type': None, + + # Whether to save visualizations of visibility masks. + 'vis_visibility_masks': False, + + # Tolerance used in the visibility test [mm]. + 'delta': 15, + + # Type of the renderer. + 'renderer_type': 'python', # Options: 'cpp', 'python'. + + # Folder containing the BOP datasets. + 'datasets_path': config.datasets_path, + + # Path template for output images with object masks. + 'vis_mask_visib_tpath': os.path.join( + config.output_path, 'vis_gt_visib_delta={delta}', + 'vis_gt_visib_delta={delta}', '{dataset}', '{split}', '{scene_id:06d}', + '{im_id:06d}_{gt_id:06d}.jpg'), +} +################################################################################ + + +if p['vis_visibility_masks']: + from bop_toolkit_lib import visualization + +# Load dataset parameters. +dp_split = dataset_params.get_split_params( + p['datasets_path'], p['dataset'], p['dataset_split'], p['dataset_split_type']) + +model_type = None +if p['dataset'] == 'tless': + model_type = 'cad' +dp_model = dataset_params.get_model_params( + p['datasets_path'], p['dataset'], model_type) + +# Initialize a renderer. +misc.log('Initializing renderer...') +width, height = dp_split['im_size'] +ren = renderer.create_renderer( + width, height, p['renderer_type'], mode='depth') +for obj_id in dp_model['obj_ids']: + ren.add_object(obj_id, dp_model['model_tpath'].format(obj_id=obj_id)) + +for scene_id in dp_split['scene_ids']: + + # Load scene info and ground-truth poses. + scene_camera = inout.load_scene_camera( + dp_split['scene_camera_tpath'].format(scene_id=scene_id)) + scene_gt = inout.load_scene_gt( + dp_split['scene_gt_tpath'].format(scene_id=scene_id)) + + scene_gt_info = {} + im_ids = sorted(scene_gt.keys()) + for im_counter, im_id in enumerate(im_ids): + if im_counter % 100 == 0: + misc.log( + 'Calculating GT info - dataset: {} ({}, {}), scene: {}, im: {}'.format( + p['dataset'], p['dataset_split'], p['dataset_split_type'], scene_id, + im_id)) + + # Load depth image. + depth = inout.load_depth(dp_split['depth_tpath'].format( + scene_id=scene_id, im_id=im_id)) + depth *= scene_camera[im_id]['depth_scale'] # Convert to [mm]. + + K = scene_camera[im_id]['cam_K'] + fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2] + im_size = (depth.shape[1], depth.shape[0]) + + scene_gt_info[im_id] = [] + for gt_id, gt in enumerate(scene_gt[im_id]): + + # Render depth image of the object model in the ground-truth pose. + depth_gt = ren.render_object( + gt['obj_id'], gt['cam_R_m2c'], gt['cam_t_m2c'], fx, fy, cx, cy)['depth'] + + # Convert depth images to distance images. + dist_gt = misc.depth_im_to_dist_im(depth_gt, K) + dist_im = misc.depth_im_to_dist_im(depth, K) + + # Estimation of the visibility mask. + visib_gt = visibility.estimate_visib_mask_gt( + dist_im, dist_gt, p['delta'], visib_mode='bop19') + + # Visible surface fraction. + obj_mask_gt = dist_gt > 0 + px_count_valid = np.sum(dist_im[obj_mask_gt] > 0) + px_count_visib = visib_gt.sum() + px_count_all = obj_mask_gt.sum() + if px_count_all > 0: + visib_fract = px_count_visib / float(px_count_all) + else: + visib_fract = 0.0 + + # Bounding box of the object projection. + bbox = [-1, -1, -1, -1] + if px_count_visib > 0: + ys, xs = obj_mask_gt.nonzero() + bbox = misc.calc_2d_bbox(xs, ys, im_size) + + # Bounding box of the visible surface part. + bbox_visib = [-1, -1, -1, -1] + if px_count_visib > 0: + ys, xs = visib_gt.nonzero() + bbox_visib = misc.calc_2d_bbox(xs, ys, im_size) + + # Store the calculated info. + scene_gt_info[im_id].append({ + 'px_count_all': int(px_count_all), + 'px_count_visib': int(px_count_visib), + 'px_count_valid': int(px_count_valid), + 'visib_fract': float(visib_fract), + 'bbox_obj': [int(e) for e in bbox], + 'bbox_visib': [int(e) for e in bbox_visib] + }) + + # Visualization of the visibility mask. + if p['vis_visibility_masks']: + + depth_im_vis = visualization.depth_for_vis(depth, 0.2, 1.0) + depth_im_vis = np.dstack([depth_im_vis] * 3) + + visib_gt_vis = visib_gt.astype(np.float) + zero_ch = np.zeros(visib_gt_vis.shape) + visib_gt_vis = np.dstack([zero_ch, visib_gt_vis, zero_ch]) + + vis = 0.5 * depth_im_vis + 0.5 * visib_gt_vis + vis[vis > 1] = 1 + + vis_path = p['vis_mask_visib_tpath'].format( + delta=p['delta'], dataset=p['dataset'], split=p['dataset_split'], + scene_id=scene_id, im_id=im_id, gt_id=gt_id) + misc.ensure_dir(os.path.dirname(vis_path)) + inout.save_im(vis_path, vis) + + # Save the info for the current scene. + scene_gt_info_path = dp_split['scene_gt_info_tpath'].format(scene_id=scene_id) + misc.ensure_dir(os.path.dirname(scene_gt_info_path)) + inout.save_json(scene_gt_info_path, scene_gt_info) diff --git a/scripts/calc_gt_masks.py b/scripts/calc_gt_masks.py new file mode 100644 index 0000000..e88d2dc --- /dev/null +++ b/scripts/calc_gt_masks.py @@ -0,0 +1,126 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Calculates masks of object models in the ground-truth poses.""" + +import os +import numpy as np + +from bop_toolkit_lib import config +from bop_toolkit_lib import dataset_params +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc +from bop_toolkit_lib import renderer +from bop_toolkit_lib import visibility + + +# PARAMETERS. +################################################################################ +p = { + # See dataset_params.py for options. + 'dataset': 'lm', + + # Dataset split. Options: 'train', 'val', 'test'. + 'dataset_split': 'test', + + # Dataset split type. None = default. See dataset_params.py for options. + 'dataset_split_type': None, + + # Tolerance used in the visibility test [mm]. + 'delta': 15, # 5 for ITODD, 15 for the other datasets. + + # Type of the renderer. + 'renderer_type': 'python', # Options: 'cpp', 'python'. + + # Folder containing the BOP datasets. + 'datasets_path': config.datasets_path, +} +################################################################################ + + +# Load dataset parameters. +dp_split = dataset_params.get_split_params( + p['datasets_path'], p['dataset'], p['dataset_split'], p['dataset_split_type']) + +model_type = None +if p['dataset'] == 'tless': + model_type = 'cad' +dp_model = dataset_params.get_model_params( + p['datasets_path'], p['dataset'], model_type) + +for scene_id in dp_split['scene_ids']: + + # Load scene GT. + scene_gt_path = dp_split['scene_gt_tpath'].format( + scene_id=scene_id) + scene_gt = inout.load_scene_gt(scene_gt_path) + + # Load scene camera. + scene_camera_path = dp_split['scene_camera_tpath'].format( + scene_id=scene_id) + scene_camera = inout.load_scene_camera(scene_camera_path) + + # Create folders for the output masks (if they do not exist yet). + mask_dir_path = os.path.dirname( + dp_split['mask_tpath'].format( + scene_id=scene_id, im_id=0, gt_id=0)) + misc.ensure_dir(mask_dir_path) + + mask_visib_dir_path = os.path.dirname( + dp_split['mask_visib_tpath'].format( + scene_id=scene_id, im_id=0, gt_id=0)) + misc.ensure_dir(mask_visib_dir_path) + + # Initialize a renderer. + misc.log('Initializing renderer...') + width, height = dp_split['im_size'] + ren = renderer.create_renderer( + width, height, renderer_type=p['renderer_type'], mode='depth') + + # Add object models. + for obj_id in dp_model['obj_ids']: + ren.add_object(obj_id, dp_model['model_tpath'].format(obj_id=obj_id)) + + im_ids = sorted(scene_gt.keys()) + for im_id in im_ids: + + if im_id % 100 == 0: + misc.log( + 'Calculating masks - dataset: {} ({}, {}), scene: {}, im: {}'.format( + p['dataset'], p['dataset_split'], p['dataset_split_type'], scene_id, + im_id)) + + K = scene_camera[im_id]['cam_K'] + fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2] + + # Load depth image. + depth_path = dp_split['depth_tpath'].format( + scene_id=scene_id, im_id=im_id) + depth_im = inout.load_depth(depth_path) + depth_im *= scene_camera[im_id]['depth_scale'] # to [mm] + dist_im = misc.depth_im_to_dist_im(depth_im, K) + + for gt_id, gt in enumerate(scene_gt[im_id]): + + # Render the depth image. + depth_gt = ren.render_object( + gt['obj_id'], gt['cam_R_m2c'], gt['cam_t_m2c'], fx, fy, cx, cy)['depth'] + + # Convert depth image to distance image. + dist_gt = misc.depth_im_to_dist_im(depth_gt, K) + + # Mask of the full object silhouette. + mask = dist_gt > 0 + + # Mask of the visible part of the object silhouette. + mask_visib = visibility.estimate_visib_mask_gt( + dist_im, dist_gt, p['delta'], visib_mode='bop19') + + # Save the calculated masks. + mask_path = dp_split['mask_tpath'].format( + scene_id=scene_id, im_id=im_id, gt_id=gt_id) + inout.save_im(mask_path, 255 * mask.astype(np.uint8)) + + mask_visib_path = dp_split['mask_visib_tpath'].format( + scene_id=scene_id, im_id=im_id, gt_id=gt_id) + inout.save_im(mask_visib_path, 255 * mask_visib.astype(np.uint8)) diff --git a/scripts/calc_model_info.py b/scripts/calc_model_info.py new file mode 100644 index 0000000..71e7b31 --- /dev/null +++ b/scripts/calc_model_info.py @@ -0,0 +1,54 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Calculates the 3D bounding box and the diameter of 3D object models.""" + +from bop_toolkit_lib import config +from bop_toolkit_lib import dataset_params +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc + + +# PARAMETERS. +################################################################################ +p = { + # See dataset_params.py for options. + 'dataset': 'lm', + + # Type of input object models. + 'model_type': 'eval', + + # Folder containing the BOP datasets. + 'datasets_path': config.datasets_path, +} +################################################################################ + + +# Load dataset parameters. +dp_split = dataset_params.get_split_params( + p['datasets_path'], p['dataset'], 'train') + +dp_model = dataset_params.get_model_params( + p['datasets_path'], p['dataset'], p['model_type']) + +models_info = {} +for obj_id in dp_model['obj_ids']: + misc.log('Processing model of object {}...'.format(obj_id)) + + model = inout.load_ply(dp_model['model_tpath'].format(obj_id=obj_id)) + + # Calculate 3D bounding box. + ref_pt = map(float, model['pts'].min(axis=0).flatten()) + size = map(float, (model['pts'].max(axis=0) - ref_pt).flatten()) + + # Calculated diameter. + diameter = misc.calc_pts_diameter(model['pts']) + + models_info[obj_id] = { + 'min_x': ref_pt[0], 'min_y': ref_pt[1], 'min_z': ref_pt[2], + 'size_x': size[0], 'size_y': size[1], 'size_z': size[2], + 'diameter': diameter + } + +# Save the calculated info about the object models. +inout.save_json(dp_split['models_info_path'], models_info) diff --git a/scripts/check_results_bop19.py b/scripts/check_results_bop19.py new file mode 100644 index 0000000..aca7d2f --- /dev/null +++ b/scripts/check_results_bop19.py @@ -0,0 +1,46 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Evaluation script for the BOP Challenge 2019.""" + +import os +import argparse + +from bop_toolkit_lib import config +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc + + +# PARAMETERS (some can be overwritten by the command line arguments below). +################################################################################ +p = { + # Names of files with results for which to calculate the errors (assumed to be + # stored in folder config.eval_path). See docs/bop_challenge_2019.md for a + # description of the format. Example results can be found at: + # http://ptak.felk.cvut.cz/6DB/public/bop_sample_results/bop_challenge_2019/ + 'result_filenames': [ + '/path/to/csv/with/results', + ], +} +################################################################################ + + +# Command line arguments. +# ------------------------------------------------------------------------------ +parser = argparse.ArgumentParser() +parser.add_argument('--result_filenames', + default=','.join(p['result_filenames']), + help='Comma-separated names of files with results.') +args = parser.parse_args() + +p['result_filenames'] = args.result_filenames.split(',') + + +if __name__ == '__main__': + + for result_filename in p['result_filenames']: + result_path = os.path.join(config.results_path, result_filename) + check_passed, check_msg = inout.check_bop_results( + result_path, version='bop19') + + misc.log('Check msg: {}'.format(check_msg)) diff --git a/scripts/eval_bop19.py b/scripts/eval_bop19.py new file mode 100644 index 0000000..b0b4934 --- /dev/null +++ b/scripts/eval_bop19.py @@ -0,0 +1,192 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Evaluation script for the BOP Challenge 2019.""" + +import os +import time +import argparse +import subprocess +import numpy as np + +from bop_toolkit_lib import config +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc + + +# PARAMETERS (some can be overwritten by the command line arguments below). +################################################################################ +p = { + # Errors to calculate. + 'errors': [ + { + 'n_top': -1, + 'type': 'vsd', + 'vsd_deltas': { + 'hb': 15, + 'icbin': 15, + 'icmi': 15, + 'itodd': 5, + 'lm': 15, + 'lmo': 15, + 'ruapc': 15, + 'tless': 15, + 'tudl': 15, + 'tyol': 15, + }, + 'vsd_taus': list(np.arange(0.05, 0.51, 0.05)), + 'correct_th': [[th] for th in np.arange(0.05, 0.51, 0.05)] + }, + { + 'n_top': -1, + 'type': 'mssd', + 'correct_th': [[th] for th in np.arange(0.05, 0.51, 0.05)] + }, + { + 'n_top': -1, + 'type': 'mspd', + 'correct_th': [[th] for th in np.arange(5, 51, 5)] + }, + ], + + # Minimum visible surface fraction of a valid GT pose. + 'visib_gt_min': 0.1, + + # See misc.get_symmetry_transformations(). + 'max_sym_disc_step': 0.01, + + # Type of the renderer (used for the VSD pose error function). + 'renderer_type': 'python', # Options: 'cpp', 'python'. + + # Names of files with results for which to calculate the errors (assumed to be + # stored in folder config.eval_path). See docs/bop_challenge_2019.md for a + # description of the format. Example results can be found at: + # http://ptak.felk.cvut.cz/6DB/public/bop_sample_results/bop_challenge_2019/ + 'result_filenames': [ + '/path/to/csv/with/results', + ], + + # File with a list of estimation targets to consider. The file is assumed to + # be stored in the dataset folder. + 'targets_filename': 'test_targets_bop19.json', +} +################################################################################ + + +# Command line arguments. +# ------------------------------------------------------------------------------ +parser = argparse.ArgumentParser() +parser.add_argument('--visib_gt_min', default=p['visib_gt_min']) +parser.add_argument('--max_sym_disc_step', default=p['max_sym_disc_step']) +parser.add_argument('--renderer_type', default=p['renderer_type']) +parser.add_argument('--result_filenames', + default=','.join(p['result_filenames']), + help='Comma-separated names of files with results.') +parser.add_argument('--targets_filename', default=p['targets_filename']) +args = parser.parse_args() + +p['visib_gt_min'] = float(args.visib_gt_min) +p['max_sym_disc_step'] = float(args.max_sym_disc_step) +p['renderer_type'] = str(args.renderer_type) +p['result_filenames'] = args.result_filenames.split(',') +p['targets_filename'] = str(args.targets_filename) + +# Evaluation. +# ------------------------------------------------------------------------------ +for result_filename in p['result_filenames']: + + misc.log('===========') + misc.log('EVALUATING: {}'.format(result_filename)) + misc.log('===========') + + time_start = time.time() + + for error in p['errors']: + + # Calculate error of the pose estimates. + calc_errors_cmd = [ + 'python', + os.path.join('scripts', 'eval_calc_errors.py'), + '--n_top={}'.format(error['n_top']), + '--error_type={}'.format(error['type']), + '--result_filenames={}'.format(result_filename), + '--renderer_type={}'.format(p['renderer_type']), + '--targets_filename={}'.format(p['targets_filename']), + '--max_sym_disc_step={}'.format(p['max_sym_disc_step']), + '--skip_missing=1', + ] + if error['type'] == 'vsd': + vsd_deltas_str = \ + ','.join(['{}:{}'.format(k, v) for k, v in error['vsd_deltas'].items()]) + calc_errors_cmd += [ + '--vsd_deltas={}'.format(vsd_deltas_str), + '--vsd_taus={}'.format(','.join(map(str, error['vsd_taus']))) + ] + + misc.log('Running: ' + ' '.join(calc_errors_cmd)) + if subprocess.call(calc_errors_cmd) != 0: + raise RuntimeError('Calculation of VSD failed.') + + # Name of the result and the dataset. + result_name = os.path.splitext(os.path.basename(result_filename))[0] + dataset = str(result_name.split('_')[1].split('-')[0]) + + # Paths (rel. to config.eval_path) to folders with calculated pose errors. + # For VSD, there is one path for each setting of tau. For the other pose + # error functions, there is only one path. + error_dir_paths = {} + if error['type'] == 'vsd': + for vsd_tau in error['vsd_taus']: + error_sign = misc.get_error_signature( + error['type'], error['n_top'], vsd_delta=error['vsd_deltas'][dataset], + vsd_tau=vsd_tau) + error_dir_paths[error_sign] = os.path.join(result_name, error_sign) + else: + error_sign = misc.get_error_signature(error['type'], error['n_top']) + error_dir_paths[error_sign] = os.path.join(result_name, error_sign) + + # Recall scores for all settings of the threshold of correctness (and also + # of the misalignment tolerance tau in the case of VSD). + recalls = [] + + # Calculate performance scores. + for error_sign, error_dir_path in error_dir_paths.items(): + for correct_th in error['correct_th']: + + calc_scores_cmd = [ + 'python', + os.path.join('scripts', 'eval_calc_scores.py'), + '--error_dir_paths={}'.format(error_dir_path), + '--targets_filename={}'.format(p['targets_filename']), + '--visib_gt_min={}'.format(p['visib_gt_min']) + ] + + calc_scores_cmd += ['--correct_th_{}={}'.format( + error['type'], ','.join(map(str, correct_th)))] + + misc.log('Running: ' + ' '.join(calc_scores_cmd)) + if subprocess.call(calc_scores_cmd) != 0: + raise RuntimeError('Calculation of scores failed.') + + # Path to file with calculated scores. + score_sign = misc.get_score_signature(correct_th, p['visib_gt_min']) + + scores_filename = 'scores_{}.json'.format(score_sign) + scores_path = os.path.join( + config.eval_path, result_name, error_sign, scores_filename) + + # Load the scores. + misc.log('Loading calculated scores from: {}'.format(scores_path)) + scores = inout.load_json(scores_path) + recalls.append(scores['total_recall']) + + # Area under the recall surface. + aurs = np.mean(recalls) + + misc.log('Recall scores: {}'.format(' '.join(map(str, recalls)))) + misc.log('Area under the recall surface: {}'.format(aurs)) + + time_total = time.time() - time_start + misc.log('Evaluation of {} took {}s.'.format(result_filename, time_total)) + +misc.log('Done.') diff --git a/scripts/eval_calc_errors.py b/scripts/eval_calc_errors.py new file mode 100644 index 0000000..530ff8e --- /dev/null +++ b/scripts/eval_calc_errors.py @@ -0,0 +1,407 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Calculates error of 6D object pose estimates.""" + +import os +import time +import argparse +import copy +import numpy as np + +from bop_toolkit_lib import config +from bop_toolkit_lib import dataset_params +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc +from bop_toolkit_lib import pose_error +from bop_toolkit_lib import renderer + + +# PARAMETERS (can be overwritten by the command line arguments below). +################################################################################ +p = { + # Top N pose estimates (with the highest score) to be evaluated for each + # object class in each image. + # Options: 0 = all, -1 = given by the number of GT poses. + 'n_top': 1, + + # Pose error function. + # Options: 'vsd', 'mssd', 'mspd', 'ad', 'adi', 'add', 'cus', 're', 'te, etc. + 'error_type': 'mspd', + + # VSD parameters. + 'vsd_deltas': { + 'hb': 15, + 'icbin': 15, + 'icmi': 15, + 'itodd': 5, + 'lm': 15, + 'lmo': 15, + 'ruapc': 15, + 'tless': 15, + 'tudl': 15, + 'tyol': 15, + }, + 'vsd_taus': list(np.arange(0.05, 0.51, 0.05)), + 'vsd_normalized_by_diameter': True, + + # MSSD/MSPD parameters (see misc.get_symmetry_transformations). + 'max_sym_disc_step': 0.01, + + # Whether to ignore/break if some errors are missing. + 'skip_missing': True, + + # Type of the renderer (used for the VSD pose error function). + 'renderer_type': 'python', # Options: 'cpp', 'python'. + + # Names of files with results for which to calculate the errors (assumed to be + # stored in folder config.eval_path). See docs/bop_challenge_2019.md for a + # description of the format. Example results can be found at: + # http://ptak.felk.cvut.cz/6DB/public/bop_sample_results/bop_challenge_2019/ + 'result_filenames': [ + '/path/to/csv/with/results', + ], + + # Folder containing the BOP datasets. + 'datasets_path': config.datasets_path, + + # File with a list of estimation targets to consider. The file is assumed to + # be stored in the dataset folder. + 'targets_filename': 'test_targets_bop19.json', + + # Template of path to the output file with calculated errors. + 'out_errors_tpath': os.path.join( + config.eval_path, '{result_name}', '{error_sign}', + 'errors_{scene_id:06d}.json') +} +################################################################################ + + +# Command line arguments. +# ------------------------------------------------------------------------------ +vsd_deltas_str =\ + ','.join(['{}:{}'.format(k, v) for k, v in p['vsd_deltas'].items()]) + +parser = argparse.ArgumentParser() +parser.add_argument('--n_top', default=p['n_top']) +parser.add_argument('--error_type', default=p['error_type']) +parser.add_argument('--vsd_deltas', default=vsd_deltas_str) +parser.add_argument('--vsd_taus', default=','.join(map(str, p['vsd_taus']))) +parser.add_argument('--vsd_normalized_by_diameter', + default=p['vsd_normalized_by_diameter']) +parser.add_argument('--max_sym_disc_step', default=p['max_sym_disc_step']) +parser.add_argument('--skip_missing', default=p['skip_missing']) +parser.add_argument('--renderer_type', default=p['renderer_type']) +parser.add_argument('--result_filenames', + default=','.join(p['result_filenames']), + help='Comma-separated names of files with results.') +parser.add_argument('--datasets_path', default=p['datasets_path']) +parser.add_argument('--targets_filename', default=p['targets_filename']) +parser.add_argument('--out_errors_tpath', default=p['out_errors_tpath']) +args = parser.parse_args() + +p['n_top'] = int(args.n_top) +p['error_type'] = str(args.error_type) +p['vsd_deltas'] = {str(e.split(':')[0]): float(e.split(':')[1]) + for e in args.vsd_deltas.split(',')} +p['vsd_taus'] = map(float, args.vsd_taus.split(',')) +p['vsd_normalized_by_diameter'] = bool(args.vsd_normalized_by_diameter) +p['max_sym_disc_step'] = bool(args.max_sym_disc_step) +p['skip_missing'] = bool(args.skip_missing) +p['renderer_type'] = str(args.renderer_type) +p['result_filenames'] = args.result_filenames.split(',') +p['datasets_path'] = str(args.datasets_path) +p['targets_filename'] = str(args.targets_filename) +p['out_errors_tpath'] = str(args.out_errors_tpath) + +misc.log('-----------') +misc.log('Parameters:') +for k, v in p.items(): + misc.log('- {}: {}'.format(k, v)) +misc.log('-----------') + +# Error calculation. +# ------------------------------------------------------------------------------ +for result_filename in p['result_filenames']: + misc.log('Processing: {}'.format(result_filename)) + + ests_counter = 0 + time_start = time.time() + + # Parse info about the method and the dataset from the filename. + result_name = os.path.splitext(os.path.basename(result_filename))[0] + result_info = result_name.split('_') + method = str(result_info[0]) + dataset_info = result_info[1].split('-') + dataset = str(dataset_info[0]) + split = str(dataset_info[1]) + split_type = str(dataset_info[2]) if len(dataset_info) > 2 else None + split_type_str = ' - ' + split_type if split_type is not None else '' + + # Load dataset parameters. + dp_split = dataset_params.get_split_params( + p['datasets_path'], dataset, split, split_type) + + model_type = 'eval' + dp_model = dataset_params.get_model_params( + p['datasets_path'], dataset, model_type) + + # Load object models. + models = {} + if p['error_type'] in ['ad', 'add', 'adi', 'mssd', 'mspd']: + misc.log('Loading object models...') + for obj_id in dp_model['obj_ids']: + models[obj_id] = inout.load_ply( + dp_model['model_tpath'].format(obj_id=obj_id)) + + # Load models info. + models_info = None + if p['error_type'] in ['ad', 'add', 'adi', 'vsd', 'mssd', 'mspd', 'cus']: + models_info = inout.load_json( + dp_model['models_info_path'], keys_to_int=True) + + # Get sets of symmetry transformations for the object models. + models_sym = None + if p['error_type'] in ['mssd', 'mspd']: + models_sym = {} + for obj_id in dp_model['obj_ids']: + models_sym[obj_id] = misc.get_symmetry_transformations( + models_info[obj_id], p['max_sym_disc_step']) + + # Initialize a renderer. + ren = None + if p['error_type'] in ['vsd', 'cus']: + misc.log('Initializing renderer...') + width, height = dp_split['im_size'] + ren = renderer.create_renderer( + width, height, p['renderer_type'], mode='depth') + for obj_id in dp_model['obj_ids']: + ren.add_object(obj_id, dp_model['model_tpath'].format(obj_id=obj_id)) + + # Load the estimation targets. + targets = inout.load_json( + os.path.join(dp_split['base_path'], p['targets_filename'])) + + # Organize the targets by scene, image and object. + misc.log('Organizing estimation targets...') + targets_org = {} + for target in targets: + targets_org.setdefault(target['scene_id'], {}).setdefault( + target['im_id'], {})[target['obj_id']] = target + + # Load pose estimates. + misc.log('Loading pose estimates...') + ests = inout.load_bop_results( + os.path.join(config.results_path, result_filename)) + + # Organize the pose estimates by scene, image and object. + misc.log('Organizing pose estimates...') + ests_org = {} + for est in ests: + ests_org.setdefault(est['scene_id'], {}).setdefault( + est['im_id'], {}).setdefault(est['obj_id'], []).append(est) + + for scene_id, scene_targets in targets_org.items(): + + # Load camera and GT poses for the current scene. + scene_camera = inout.load_scene_camera( + dp_split['scene_camera_tpath'].format(scene_id=scene_id)) + scene_gt = inout.load_scene_gt(dp_split['scene_gt_tpath'].format( + scene_id=scene_id)) + + scene_errs = [] + + for im_ind, (im_id, im_targets) in enumerate(scene_targets.items()): + + if im_ind % 10 == 0: + misc.log( + 'Calculating error {} - method: {}, dataset: {}{}, scene: {}, ' + 'im: {}'.format( + p['error_type'], method, dataset, split_type_str, scene_id, im_ind)) + + # Intrinsic camera matrix. + K = scene_camera[im_id]['cam_K'] + + # Load the depth image if VSD is selected as the pose error function. + depth_im = None + if p['error_type'] == 'vsd': + depth_path = dp_split['depth_tpath'].format( + scene_id=scene_id, im_id=im_id) + depth_im = inout.load_depth(depth_path) + depth_im *= scene_camera[im_id]['depth_scale'] # Convert to [mm]. + + for obj_id, target in im_targets.items(): + + # The required number of top estimated poses. + if p['n_top'] == 0: # All estimates are considered. + n_top_curr = None + elif p['n_top'] == -1: # Given by the number of GT poses. + # n_top_curr = sum([gt['obj_id'] == obj_id for gt in scene_gt[im_id]]) + n_top_curr = target['inst_count'] + else: + n_top_curr = p['n_top'] + + # Get the estimates. + try: + obj_ests = ests_org[scene_id][im_id][obj_id] + obj_count = len(obj_ests) + except KeyError: + obj_ests = [] + obj_count = 0 + + # Check the number of estimates. + if not p['skip_missing'] and obj_count < n_top_curr: + raise ValueError( + 'Not enough estimates for scene: {}, im: {}, obj: {} ' + '(provided: {}, expected: {})'.format( + scene_id, im_id, obj_id, obj_count, n_top_curr)) + + # Sort the estimates by score (in descending order). + obj_ests_sorted = sorted( + enumerate(obj_ests), key=lambda x: x[1]['score'], reverse=True) + + # Select the required number of top estimated poses. + obj_ests_sorted = obj_ests_sorted[slice(0, n_top_curr)] + ests_counter += len(obj_ests_sorted) + + # Calculate error of each pose estimate w.r.t. all GT poses of the same + # object class. + for est_id, est in obj_ests_sorted: + + # Estimated pose. + R_e = est['R'] + t_e = est['t'] + + errs = {} # Errors w.r.t. GT poses of the same object class. + for gt_id, gt in enumerate(scene_gt[im_id]): + if gt['obj_id'] != obj_id: + continue + + # Ground-truth pose. + R_g = gt['cam_R_m2c'] + t_g = gt['cam_t_m2c'] + + # Check if the projections of the bounding spheres of the object in + # the two poses overlap (to speed up calculation of some errors). + sphere_projections_overlap = None + if p['error_type'] in ['vsd', 'cus']: + radius = 0.5 * models_info[obj_id]['diameter'] + sphere_projections_overlap = misc.overlapping_sphere_projections( + radius, t_e.squeeze(), t_g.squeeze()) + + # Check if the bounding spheres of the object in the two poses + # overlap (to speed up calculation of some errors). + spheres_overlap = None + if p['error_type'] in ['ad', 'add', 'adi', 'mssd']: + center_dist = np.linalg.norm(t_e - t_g) + spheres_overlap = center_dist < models_info[obj_id]['diameter'] + + if p['error_type'] == 'vsd': + if not sphere_projections_overlap: + e = [1.0] * len(p['vsd_taus']) + else: + e = pose_error.vsd( + R_e, t_e, R_g, t_g, depth_im, K, p['vsd_deltas'][dataset], + p['vsd_taus'], p['vsd_normalized_by_diameter'], + models_info[obj_id]['diameter'], ren, obj_id, 'step') + + elif p['error_type'] == 'mssd': + if not spheres_overlap: + e = [float('inf')] + else: + e = [pose_error.mssd( + R_e, t_e, R_g, t_g, models[obj_id]['pts'], + models_sym[obj_id])] + + elif p['error_type'] == 'mspd': + e = [pose_error.mspd( + R_e, t_e, R_g, t_g, K, models[obj_id]['pts'], + models_sym[obj_id])] + + elif p['error_type'] in ['ad', 'add', 'adi']: + if not spheres_overlap: + # Infinite error if the bounding spheres do not overlap. With + # typically used values of the correctness threshold for the AD + # error (e.g. k*diameter, where k = 0.1), such pose estimates + # would be considered incorrect anyway. + e = [float('inf')] + else: + if p['error_type'] == 'ad': + if obj_id in dp_model['symmetric_obj_ids']: + e = [pose_error.adi( + R_e, t_e, R_g, t_g, models[obj_id]['pts'])] + else: + e = [pose_error.add( + R_e, t_e, R_g, t_g, models[obj_id]['pts'])] + + elif p['error_type'] == 'add': + e = [pose_error.add( + R_e, t_e, R_g, t_g, models[obj_id]['pts'])] + + else: # 'adi' + e = [pose_error.adi( + R_e, t_e, R_g, t_g, models[obj_id]['pts'])] + + elif p['error_type'] == 'cus': + if sphere_projections_overlap: + e = [pose_error.cus( + R_e, t_e, R_g, t_g, K, ren, obj_id)] + else: + e = [1.0] + + elif p['error_type'] == 'rete': + e = [pose_error.re(R_e, R_g), pose_error.te(t_e, t_g)] + + elif p['error_type'] == 're': + e = [pose_error.re(R_e, R_g)] + + elif p['error_type'] == 'te': + e = [pose_error.te(t_e, t_g)] + + else: + raise ValueError('Unknown pose error function.') + + errs[gt_id] = e + + # Save the calculated errors. + scene_errs.append({ + 'im_id': im_id, + 'obj_id': obj_id, + 'est_id': est_id, + 'score': est['score'], + 'errors': errs + }) + + def save_errors(_error_sign, _scene_errs): + # Save the calculated errors to a JSON file. + errors_path = p['out_errors_tpath'].format( + result_name=result_name, error_sign=_error_sign, scene_id=scene_id) + misc.ensure_dir(os.path.dirname(errors_path)) + misc.log('Saving errors to: {}'.format(errors_path)) + inout.save_json(errors_path, _scene_errs) + + # Save the calculated errors. + if p['error_type'] == 'vsd': + + # For VSD, save errors for each tau value to a different file. + for vsd_tau_id, vsd_tau in enumerate(p['vsd_taus']): + error_sign = misc.get_error_signature( + p['error_type'], p['n_top'], vsd_delta=p['vsd_deltas'][dataset], + vsd_tau=vsd_tau) + + # Keep only errors for the current tau. + scene_errs_curr = copy.deepcopy(scene_errs) + for err in scene_errs_curr: + for gt_id in err['errors'].keys(): + err['errors'][gt_id] = [err['errors'][gt_id][vsd_tau_id]] + + save_errors(error_sign, scene_errs_curr) + else: + error_sign = misc.get_error_signature(p['error_type'], p['n_top']) + save_errors(error_sign, scene_errs) + + time_total = time.time() - time_start + misc.log('Calculation of errors for {} estimates took {}s.'.format( + ests_counter, time_total)) + +misc.log('Done.') diff --git a/scripts/eval_calc_scores.py b/scripts/eval_calc_scores.py new file mode 100644 index 0000000..c6d98e3 --- /dev/null +++ b/scripts/eval_calc_scores.py @@ -0,0 +1,256 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Calculates performance scores for 6D object pose estimation tasks. + +Errors of the pose estimates need to be pre-calculated with eval_calc_errors.py. + +Currently supported tasks (see [1]): +- SiSo (a single instance of a single object) + +For evaluation in the BOP paper [1], the following parameters were used: + - n_top = 1 + - visib_gt_min = 0.1 + - error_type = 'vsd' + - vsd_cost = 'step' + - vsd_delta = 15 + - vsd_tau = 20 + - correct_th['vsd'] = 0.3 + + [1] Hodan, Michel et al. BOP: Benchmark for 6D Object Pose Estimation, ECCV'18. +""" + +import os +import time +import argparse + +from bop_toolkit_lib import config +from bop_toolkit_lib import dataset_params +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc +from bop_toolkit_lib import pose_matching +from bop_toolkit_lib import score + + +# PARAMETERS (can be overwritten by the command line arguments below). +################################################################################ +p = { + # Threshold of correctness for different pose error functions. + 'correct_th': { + 'vsd': [0.3], + 'mssd': [0.2], + 'mspd': [10], + 'cus': [0.5], + 'rete': [5.0, 5.0], # [deg, cm]. + 're': [5.0], # [deg]. + 'te': [5.0], # [cm]. + 'ad': [0.1], + 'add': [0.1], + 'adi': [0.1], + }, + + # Pose errors that will be normalized by object diameter before thresholding. + 'normalized_by_diameter': ['ad', 'add', 'adi', 'mssd'], + + # Pose errors that will be normalized the image width before thresholding. + 'normalized_by_im_width': ['mspd'], + + # Minimum visible surface fraction of a valid GT pose. + 'visib_gt_min': 0.1, + + # Paths (relative to config.eval_path) to folders with pose errors calculated + # using eval_calc_errors.py. + # Example: 'hodan-iros15_lm-test/error=vsd_ntop=1_delta=15_tau=20_cost=step' + 'error_dir_paths': [ + r'/path/to/calculated/errors', + ], + + # Folder containing the BOP datasets. + 'datasets_path': config.datasets_path, + + # File with a list of estimation targets to consider. The file is assumed to + # be stored in the dataset folder. + 'targets_filename': 'test_targets_bop19.json', + + # Template of path to the input file with calculated errors. + 'error_tpath': os.path.join( + config.eval_path, '{error_dir_path}', 'errors_{scene_id:06d}.json'), + + # Template of path to the output file with established matches and calculated + # scores. + 'out_matches_tpath': os.path.join( + config.eval_path, '{error_dir_path}', 'matches_{score_sign}.json'), + 'out_scores_tpath': os.path.join( + config.eval_path, '{error_dir_path}', 'scores_{score_sign}.json'), +} +################################################################################ + + +# Command line arguments. +# ------------------------------------------------------------------------------ +parser = argparse.ArgumentParser() + +# Define the command line arguments. +for err_type in p['correct_th']: + parser.add_argument( + '--correct_th_' + err_type, + default=','.join(map(str, p['correct_th'][err_type]))) + +parser.add_argument('--normalized_by_diameter', + default=','.join(p['normalized_by_diameter'])) +parser.add_argument('--normalized_by_im_width', + default=','.join(p['normalized_by_im_width'])) +parser.add_argument('--visib_gt_min', default=p['visib_gt_min']) +parser.add_argument('--error_dir_paths', default=','.join(p['error_dir_paths']), + help='Comma-sep. paths to errors from eval_calc_errors.py.') +parser.add_argument('--datasets_path', default=p['datasets_path']) +parser.add_argument('--targets_filename', default=p['targets_filename']) +parser.add_argument('--error_tpath', default=p['error_tpath']) +parser.add_argument('--out_matches_tpath', default=p['out_matches_tpath']) +parser.add_argument('--out_scores_tpath', default=p['out_scores_tpath']) + +# Process the command line arguments. +args = parser.parse_args() + +for err_type in p['correct_th']: + p['correct_th'][err_type] =\ + map(float, args.__dict__['correct_th_' + err_type].split(',')) + +p['normalized_by_diameter'] = args.normalized_by_diameter.split(',') +p['normalized_by_im_width'] = args.normalized_by_im_width.split(',') +p['visib_gt_min'] = float(args.visib_gt_min) +p['error_dir_paths'] = args.error_dir_paths.split(',') +p['datasets_path'] = str(args.datasets_path) +p['targets_filename'] = str(args.targets_filename) +p['error_tpath'] = str(args.error_tpath) +p['out_matches_tpath'] = str(args.out_matches_tpath) +p['out_scores_tpath'] = str(args.out_scores_tpath) + +misc.log('-----------') +misc.log('Parameters:') +for k, v in p.items(): + misc.log('- {}: {}'.format(k, v)) +misc.log('-----------') + +# Calculation of the performance scores. +# ------------------------------------------------------------------------------ +for error_dir_path in p['error_dir_paths']: + misc.log('Processing: {}'.format(error_dir_path)) + + time_start = time.time() + + # Parse info about the errors from the folder name. + error_sign = os.path.basename(error_dir_path) + err_type = str(error_sign.split('_')[0].split('=')[1]) + n_top = int(error_sign.split('_')[1].split('=')[1]) + result_info = os.path.basename(os.path.dirname(error_dir_path)).split('_') + method = result_info[0] + dataset_info = result_info[1].split('-') + dataset = dataset_info[0] + split = dataset_info[1] + split_type = dataset_info[2] if len(dataset_info) > 2 else None + + # Evaluation signature. + score_sign = misc.get_score_signature( + p['correct_th'][err_type], p['visib_gt_min']) + + misc.log('Calculating score - error: {}, method: {}, dataset: {}.'.format( + err_type, method, dataset)) + + # Load dataset parameters. + dp_split = dataset_params.get_split_params( + p['datasets_path'], dataset, split, split_type) + + model_type = 'eval' + dp_model = dataset_params.get_model_params( + p['datasets_path'], dataset, model_type) + + # Load info about the object models. + models_info = inout.load_json(dp_model['models_info_path'], keys_to_int=True) + + # Load the estimation targets to consider. + targets = inout.load_json( + os.path.join(dp_split['base_path'], p['targets_filename'])) + scene_im_ids = {} + + # Organize the targets by scene, image and object. + misc.log('Organizing estimation targets...') + targets_org = {} + for target in targets: + targets_org.setdefault(target['scene_id'], {}).setdefault( + target['im_id'], {})[target['obj_id']] = target + + # Go through the test scenes and match estimated poses to GT poses. + # ---------------------------------------------------------------------------- + matches = [] # Stores info about the matching pose estimate for each GT pose. + for scene_id, scene_targets in targets_org.items(): + misc.log('Processing scene {} of {}...'.format(scene_id, dataset)) + + # Load GT poses for the current scene. + scene_gt = inout.load_scene_gt( + dp_split['scene_gt_tpath'].format(scene_id=scene_id)) + + # Load info about the GT poses (e.g. visibility) for the current scene. + scene_gt_info = inout.load_json( + dp_split['scene_gt_info_tpath'].format(scene_id=scene_id), + keys_to_int=True) + + # Keep GT poses only for the selected targets. + scene_gt_curr = {} + scene_gt_info_curr = {} + scene_gt_valid = {} + for im_id, im_targets in scene_targets.items(): + scene_gt_curr[im_id] = scene_gt[im_id] + + # Determine which GT poses are valid. + scene_gt_valid[im_id] = [] + im_gt_info = scene_gt_info[im_id] + for gt_id, gt in enumerate(scene_gt[im_id]): + is_target = gt['obj_id'] in im_targets.keys() + is_visib = im_gt_info[gt_id]['visib_fract'] >= p['visib_gt_min'] + scene_gt_valid[im_id].append(is_target and is_visib) + + # Load pre-calculated errors of the pose estimates w.r.t. the GT poses. + scene_errs_path = p['error_tpath'].format( + error_dir_path=error_dir_path, scene_id=scene_id) + scene_errs = inout.load_json(scene_errs_path, keys_to_int=True) + + # Normalize the errors by the object diameter. + if err_type in p['normalized_by_diameter']: + for err in scene_errs: + diameter = float(models_info[err['obj_id']]['diameter']) + for gt_id in err['errors'].keys(): + err['errors'][gt_id] = [e / diameter for e in err['errors'][gt_id]] + + # Normalize the errors by the image width. + if err_type in p['normalized_by_im_width']: + for err in scene_errs: + factor = 640.0 / float(dp_split['im_size'][0]) + for gt_id in err['errors'].keys(): + err['errors'][gt_id] = [factor * e for e in err['errors'][gt_id]] + + # Match the estimated poses to the ground-truth poses. + matches += pose_matching.match_poses_scene( + scene_id, scene_gt_curr, scene_gt_valid, scene_errs, + p['correct_th'][err_type], n_top) + + # Calculate the performance scores. + # ---------------------------------------------------------------------------- + # 6D object localization scores (SiSo if n_top = 1). + scores = score.calc_localization_scores( + dp_split['scene_ids'], dp_model['obj_ids'], matches, n_top) + + # Save scores. + scores_path = p['out_scores_tpath'].format( + error_dir_path=error_dir_path, score_sign=score_sign) + inout.save_json(scores_path, scores) + + # Save matches. + matches_path = p['out_matches_tpath'].format( + error_dir_path=error_dir_path, score_sign=score_sign) + inout.save_json(matches_path, matches) + + time_total = time.time() - time_start + misc.log('Matching and score calculation took {}s.'.format(time_total)) + +misc.log('Done.') diff --git a/scripts/meshlab_scripts/remesh_for_eval_cell=0.25.mlx b/scripts/meshlab_scripts/remesh_for_eval_cell=0.25.mlx new file mode 100644 index 0000000..365ba90 --- /dev/null +++ b/scripts/meshlab_scripts/remesh_for_eval_cell=0.25.mlx @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/scripts/meshlab_scripts/remesh_for_eval_cell=0.5.mlx b/scripts/meshlab_scripts/remesh_for_eval_cell=0.5.mlx new file mode 100644 index 0000000..110538b --- /dev/null +++ b/scripts/meshlab_scripts/remesh_for_eval_cell=0.5.mlx @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/scripts/remesh_models_for_eval.py b/scripts/remesh_models_for_eval.py new file mode 100644 index 0000000..31e9029 --- /dev/null +++ b/scripts/remesh_models_for_eval.py @@ -0,0 +1,67 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""'Uniformly' resamples and decimates 3D object models for evaluation. + +Note: Models of some T-LESS objects were processed by Blender (using the Remesh +modifier). +""" + +import os + +from bop_toolkit_lib import config +from bop_toolkit_lib import dataset_params +from bop_toolkit_lib import misc + + +# PARAMETERS. +################################################################################ +p = { + # See dataset_params.py for options. + 'dataset': 'lm', + + # Type of input object models. + # None = default model type. + 'model_in_type': None, + + # Type of output object models. + 'model_out_type': 'eval', + + # Folder containing the BOP datasets. + 'datasets_path': config.datasets_path, + + # Path to meshlabserver.exe (tested version: 1.3.3). + # On Windows: C:\Program Files\VCG\MeshLab133\meshlabserver.exe + 'meshlab_server_path': config.meshlab_server_path, + + # Path to scripts/meshlab_scripts/remesh_for_eval.mlx. + 'meshlab_script_path': os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'meshlab_scripts', + r'remesh_for_eval_cell=0.25.mlx'), +} +################################################################################ + + +# Load dataset parameters. +dp_model_in = dataset_params.get_model_params( + p['datasets_path'], p['dataset'], p['model_in_type']) + +dp_model_out = dataset_params.get_model_params( + p['datasets_path'], p['dataset'], p['model_out_type']) + +# Attributes to save for the output models. +attrs_to_save = [] + +# Process models of all objects in the selected dataset. +for obj_id in dp_model_in['obj_ids']: + misc.log('\n\n\nProcessing model of object {}...\n'.format(obj_id)) + + model_in_path = dp_model_in['model_tpath'].format(obj_id=obj_id) + model_out_path = dp_model_out['model_tpath'].format(obj_id=obj_id) + + misc.ensure_dir(os.path.dirname(model_out_path)) + + misc.run_meshlab_script(p['meshlab_server_path'], p['meshlab_script_path'], + model_in_path, model_out_path, attrs_to_save) + +misc.log('Done.') diff --git a/scripts/render_train_imgs.py b/scripts/render_train_imgs.py new file mode 100644 index 0000000..7ff0e92 --- /dev/null +++ b/scripts/render_train_imgs.py @@ -0,0 +1,214 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Renders RGB-D images of an object model.""" + +import os +import cv2 + +from bop_toolkit_lib import config +from bop_toolkit_lib import dataset_params +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc +from bop_toolkit_lib import renderer +from bop_toolkit_lib import view_sampler + + +# PARAMETERS. +################################################################################ +# See dataset_params.py for options. +dataset = 'tyol' + +# Radii of view spheres from which to render the objects. +if dataset == 'lm': + radii = [400] # There are only 3 occurrences under 400 mm. +elif dataset == 'tless': + radii = [650] +elif dataset == 'tudl': + radii = [850] +elif dataset == 'tyol': + radii = [500] +elif dataset == 'ruapc': + radii = [590] +elif dataset == 'icmi': + radii = [500] +elif dataset == 'icbin': + radii = [450] +else: + raise ValueError('Unknown dataset.') + +# Type of object models and camera. +model_type = None +cam_type = None +if dataset == 'tless': + model_type = 'reconst' + cam_type = 'primesense' + +# Objects to render ([] = all objects from the specified dataset). +obj_ids = [] + +# Minimum required number of views on the whole view sphere. The final number of +# views depends on the sampling method. +min_n_views = 1000 + +# Rendering parameters. +ambient_weight = 0.5 # Weight of ambient light [0, 1] +shading = 'phong' # 'flat', 'phong' + +# Type of the renderer. Options: 'cpp', 'python'. +renderer_type = 'python' + +# Super-sampling anti-aliasing (SSAA) - the RGB image is rendered at ssaa_fact +# times higher resolution and then down-sampled to the required resolution. +# Ref: https://github.com/vispy/vispy/wiki/Tech.-Antialiasing +ssaa_fact = 4 + +# Folder containing the BOP datasets. +datasets_path = config.datasets_path + +# Folder for the rendered images. +out_tpath = os.path.join(config.output_path, 'render_{dataset}') + +# Output path templates. +out_rgb_tpath =\ + os.path.join('{out_path}', '{obj_id:06d}', 'rgb', '{im_id:06d}.png') +out_depth_tpath =\ + os.path.join('{out_path}', '{obj_id:06d}', 'depth', '{im_id:06d}.png') +out_scene_camera_tpath =\ + os.path.join('{out_path}', '{obj_id:06d}', 'scene_camera.json') +out_scene_gt_tpath =\ + os.path.join('{out_path}', '{obj_id:06d}', 'scene_gt.json') +out_views_vis_tpath =\ + os.path.join('{out_path}', '{obj_id:06d}', 'views_radius={radius}.ply') +################################################################################ + + +out_path = out_tpath.format(dataset=dataset) +misc.ensure_dir(out_path) + +# Load dataset parameters. +dp_split_test = dataset_params.get_split_params(datasets_path, dataset, 'test') +dp_model = dataset_params.get_model_params(datasets_path, dataset, model_type) +dp_camera = dataset_params.get_camera_params(datasets_path, dataset, cam_type) + +if not obj_ids: + obj_ids = dp_model['obj_ids'] + +# Image size and K for the RGB image (potentially with SSAA). +im_size_rgb = [int(round(x * float(ssaa_fact))) for x in dp_camera['im_size']] +K_rgb = dp_camera['K'] * ssaa_fact + +# Intrinsic parameters for RGB rendering. +fx_rgb, fy_rgb, cx_rgb, cy_rgb =\ + K_rgb[0, 0], K_rgb[1, 1], K_rgb[0, 2], K_rgb[1, 2] + +# Intrinsic parameters for depth rendering. +K = dp_camera['K'] +fx_d, fy_d, cx_d, cy_d = K[0, 0], K[1, 1], K[0, 2], K[1, 2] + +# Create RGB and depth renderers (two are created because the RGB has a higher +# resolution if SSAA is used). +width_rgb, height_rgb = im_size_rgb[0], im_size_rgb[1] +ren_rgb = renderer.create_renderer( + width_rgb, height_rgb, renderer_type, mode='rgb', shading=shading) +ren_rgb.set_light_ambient_weight(ambient_weight) + +width_depth, height_depth, = dp_camera['im_size'][0], dp_camera['im_size'][1] +ren_depth = renderer.create_renderer( + width_depth, height_depth, renderer_type, mode='depth') + +# Render training images for all object models. +for obj_id in obj_ids: + + # Add the current object model to the renderer. + ren_rgb.add_object(obj_id, dp_model['model_tpath'].format(obj_id=obj_id)) + ren_depth.add_object(obj_id, dp_model['model_tpath'].format(obj_id=obj_id)) + + # Prepare output folders. + misc.ensure_dir(os.path.dirname(out_rgb_tpath.format( + out_path=out_path, obj_id=obj_id, im_id=0))) + misc.ensure_dir(os.path.dirname(out_depth_tpath.format( + out_path=out_path, obj_id=obj_id, im_id=0))) + + # Load model. + model_path = dp_model['model_tpath'].format(obj_id=obj_id) + model = inout.load_ply(model_path) + + # Load model texture. + if 'texture_file' in model: + model_texture_path =\ + os.path.join(os.path.dirname(model_path), model['texture_file']) + model_texture = inout.load_im(model_texture_path) + else: + model_texture = None + + scene_camera = {} + scene_gt = {} + im_id = 0 + for radius in radii: + # Sample viewpoints. + view_sampler_mode = 'hinterstoisser' # 'hinterstoisser' or 'fibonacci'. + views, views_level = view_sampler.sample_views( + min_n_views, radius, dp_split_test['azimuth_range'], + dp_split_test['elev_range'], view_sampler_mode) + + misc.log('Sampled views: ' + str(len(views))) + # out_views_vis_path = out_views_vis_tpath.format( + # out_path=out_path, obj_id=obj_id, radius=radius) + # view_sampler.save_vis(out_views_vis_path, views, views_level) + + # Render the object model from all views. + for view_id, view in enumerate(views): + if view_id % 10 == 0: + misc.log('Rendering - obj: {}, radius: {}, view: {}/{}'.format( + obj_id, radius, view_id, len(views))) + + # Rendering. + rgb = ren_rgb.render_object( + obj_id, view['R'], view['t'], fx_rgb, fy_rgb, cx_rgb, cy_rgb)['rgb'] + depth = ren_depth.render_object( + obj_id, view['R'], view['t'], fx_d, fy_d, cx_d, cy_d)['depth'] + + # Convert depth so it is in the same units as other images in the dataset. + depth /= dp_camera['depth_scale'] + + # The OpenCV function was used for rendering of the training images + # provided for the SIXD Challenge 2017. + rgb = cv2.resize(rgb, dp_camera['im_size'], interpolation=cv2.INTER_AREA) + # rgb = scipy.misc.imresize(rgb, par['cam']['im_size'][::-1], 'bicubic') + + # Save the rendered images. + out_rgb_path = out_rgb_tpath.format( + out_path=out_path, obj_id=obj_id, im_id=im_id) + inout.save_im(out_rgb_path, rgb) + out_depth_path = out_depth_tpath.format( + out_path=out_path, obj_id=obj_id, im_id=im_id) + inout.save_depth(out_depth_path, depth) + + # Get 2D bounding box of the object model at the ground truth pose. + # ys, xs = np.nonzero(depth > 0) + # obj_bb = misc.calc_2d_bbox(xs, ys, dp_camera['im_size']) + + scene_camera[im_id] = { + 'cam_K': dp_camera['K'].flatten().tolist(), + 'depth_scale': dp_camera['depth_scale'], + 'view_level': int(views_level[view_id]) + } + + scene_gt[im_id] = [{ + 'cam_R_m2c': view['R'].flatten().tolist(), + 'cam_t_m2c': view['t'].flatten().tolist(), + 'obj_id': int(obj_id) + }] + + im_id += 1 + + # Remove the current object model from the renderer. + ren_rgb.remove_object(obj_id) + ren_depth.remove_object(obj_id) + + # Save metadata. + inout.save_scene_camera(out_scene_camera_tpath.format( + out_path=out_path, obj_id=obj_id), scene_camera) + inout.save_scene_gt(out_scene_gt_tpath.format( + out_path=out_path, obj_id=obj_id), scene_gt) diff --git a/scripts/vis_est_poses.py b/scripts/vis_est_poses.py new file mode 100644 index 0000000..ff9a4dd --- /dev/null +++ b/scripts/vis_est_poses.py @@ -0,0 +1,235 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Visualizes object models in pose estimates saved in the BOP format.""" + +import os +import numpy as np +import itertools + +from bop_toolkit_lib import config +from bop_toolkit_lib import dataset_params +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc +from bop_toolkit_lib import renderer +from bop_toolkit_lib import visualization + + +# PARAMETERS. +################################################################################ +p = { + # Top N pose estimates (with the highest score) to be visualized for each + # object in each image. + 'n_top': 1, # 0 = all estimates, -1 = given by the number of GT poses. + + # True = one visualization for each (im_id, obj_id), False = one per im_id. + 'vis_per_obj_id': True, + + # Indicates whether to render RGB image. + 'vis_rgb': True, + + # Indicates whether to resolve visibility in the rendered RGB images (using + # depth renderings). If True, only the part of object surface, which is not + # occluded by any other modeled object, is visible. If False, RGB renderings + # of individual objects are blended together. + 'vis_rgb_resolve_visib': True, + + # Indicates whether to render depth image. + 'vis_depth_diff': False, + + # If to use the original model color. + 'vis_orig_color': False, + + # Type of the renderer (used for the VSD pose error function). + 'renderer_type': 'python', # Options: 'cpp', 'python'. + + # Names of files with pose estimates to visualize (assumed to be stored in + # folder config.eval_path). See docs/bop_challenge_2019.md for a description + # of the format. Example results can be found at: + # http://ptak.felk.cvut.cz/6DB/public/bop_sample_results/bop_challenge_2019/ + 'result_filenames': [ + '/path/to/csv/with/results', + ], + + # Folder containing the BOP datasets. + 'datasets_path': config.datasets_path, + + # Folder for output visualisations. + 'vis_path': os.path.join(config.output_path, 'vis_est_poses'), + + # Path templates for output images. + 'vis_rgb_tpath': os.path.join( + '{vis_path}', '{result_name}', '{scene_id:06d}', '{vis_name}.jpg'), + 'vis_depth_diff_tpath': os.path.join( + '{vis_path}', '{result_name}', '{scene_id:06d}', + '{vis_name}_depth_diff.jpg'), +} +################################################################################ + + +# Load colors. +colors_path = os.path.join( + os.path.dirname(visualization.__file__), 'colors.json') +colors = inout.load_json(colors_path) + +for result_fname in p['result_filenames']: + misc.log('Processing: ' + result_fname) + + # Parse info about the method and the dataset from the filename. + result_name = os.path.splitext(os.path.basename(result_fname))[0] + result_info = result_name.split('_') + method = result_info[0] + dataset_info = result_info[1].split('-') + dataset = dataset_info[0] + split = dataset_info[1] + split_type = dataset_info[2] if len(dataset_info) > 2 else None + + # Load dataset parameters. + dp_split = dataset_params.get_split_params( + p['datasets_path'], dataset, split, split_type) + + model_type = 'eval' + dp_model = dataset_params.get_model_params( + p['datasets_path'], dataset, model_type) + + # Rendering mode. + renderer_modalities = [] + if p['vis_rgb']: + renderer_modalities.append('rgb') + if p['vis_depth_diff'] or (p['vis_rgb'] and p['vis_rgb_resolve_visib']): + renderer_modalities.append('depth') + renderer_mode = '+'.join(renderer_modalities) + + # Create a renderer. + width, height = dp_split['im_size'] + ren = renderer.create_renderer( + width, height, p['renderer_type'], mode=renderer_mode) + + # Load object models. + models = {} + for obj_id in dp_model['obj_ids']: + misc.log('Loading 3D model of object {}...'.format(obj_id)) + model_path = dp_model['model_tpath'].format(obj_id=obj_id) + model_color = None + if not p['vis_orig_color']: + model_color = tuple(colors[(obj_id - 1) % len(colors)]) + ren.add_object(obj_id, model_path, surf_color=model_color) + + # Load pose estimates. + misc.log('Loading pose estimates...') + ests = inout.load_bop_results( + os.path.join(config.results_path, result_fname)) + + # Organize the pose estimates by scene, image and object. + misc.log('Organizing pose estimates...') + ests_org = {} + for est in ests: + ests_org.setdefault(est['scene_id'], {}).setdefault( + est['im_id'], {}).setdefault(est['obj_id'], []).append(est) + + for scene_id, scene_ests in ests_org.items(): + + # Load info and ground-truth poses for the current scene. + scene_camera = inout.load_scene_camera( + dp_split['scene_camera_tpath'].format(scene_id=scene_id)) + scene_gt = inout.load_scene_gt( + dp_split['scene_gt_tpath'].format(scene_id=scene_id)) + + for im_ind, (im_id, im_ests) in enumerate(scene_ests.items()): + + if im_ind % 10 == 0: + split_type_str = ' - ' + split_type if split_type is not None else '' + misc.log( + 'Visualizing pose estimates - method: {}, dataset: {}{}, scene: {}, ' + 'im: {}'.format(method, dataset, split_type_str, scene_id, im_id)) + + # Intrinsic camera matrix. + K = scene_camera[im_id]['cam_K'] + + im_ests_vis = [] + im_ests_vis_obj_ids = [] + for obj_id, obj_ests in im_ests.items(): + + # Sort the estimates by score (in descending order). + obj_ests_sorted = sorted( + obj_ests, key=lambda est: est['score'], reverse=True) + + # Select the number of top estimated poses to visualize. + if p['n_top'] == 0: # All estimates are considered. + n_top_curr = None + elif p['n_top'] == -1: # Given by the number of GT poses. + n_gt = sum([gt['obj_id'] == obj_id for gt in scene_gt[im_id]]) + n_top_curr = n_gt + else: # Specified by the parameter n_top. + n_top_curr = p['n_top'] + obj_ests_sorted = obj_ests_sorted[slice(0, n_top_curr)] + + # Get list of poses to visualize. + for est in obj_ests_sorted: + est['obj_id'] = obj_id + + # Text info to write on the image at the pose estimate. + if p['vis_per_obj_id']: + est['text_info'] = [ + {'name': '', 'val': est['score'], 'fmt': ':.2f'} + ] + else: + val = '{}:{:.2f}'.format(obj_id, est['score']) + est['text_info'] = [{'name': '', 'val': val, 'fmt': ''}] + + im_ests_vis.append(obj_ests_sorted) + im_ests_vis_obj_ids.append(obj_id) + + # Join the per-object estimates if only one visualization is to be made. + if not p['vis_per_obj_id']: + im_ests_vis = [list(itertools.chain.from_iterable(im_ests_vis))] + + for ests_vis_id, ests_vis in enumerate(im_ests_vis): + + # Load the color and depth images and prepare images for rendering. + rgb = None + if p['vis_rgb']: + if 'rgb' in dp_split['im_modalities']: + rgb = inout.load_im(dp_split['rgb_tpath'].format( + scene_id=scene_id, im_id=im_id)) + elif 'gray' in dp_split['im_modalities']: + gray = inout.load_im(dp_split['gray_tpath'].format( + scene_id=scene_id, im_id=im_id)) + rgb = np.dstack([gray, gray, gray]) + else: + raise ValueError('RGB nor gray images are available.') + + depth = None + if p['vis_depth_diff'] or (p['vis_rgb'] and p['vis_rgb_resolve_visib']): + depth = inout.load_depth(dp_split['depth_tpath'].format( + scene_id=scene_id, im_id=im_id)) + depth *= scene_camera[im_id]['depth_scale'] # Convert to [mm]. + + # Visualization name. + if p['vis_per_obj_id']: + vis_name = '{im_id:06d}_{obj_id:06d}'.format( + im_id=im_id, obj_id=im_ests_vis_obj_ids[ests_vis_id]) + else: + vis_name = '{im_id:06d}'.format(im_id=im_id) + + # Path to the output RGB visualization. + vis_rgb_path = None + if p['vis_rgb']: + vis_rgb_path = p['vis_rgb_tpath'].format( + vis_path=p['vis_path'], result_name=result_name, scene_id=scene_id, + vis_name=vis_name) + + # Path to the output depth difference visualization. + vis_depth_diff_path = None + if p['vis_depth_diff']: + vis_depth_diff_path = p['vis_depth_diff_tpath'].format( + vis_path=p['vis_path'], result_name=result_name, scene_id=scene_id, + vis_name=vis_name) + + # Visualization. + visualization.vis_object_poses( + poses=ests_vis, K=K, renderer=ren, rgb=rgb, depth=depth, + vis_rgb_path=vis_rgb_path, vis_depth_diff_path=vis_depth_diff_path, + vis_rgb_resolve_visib=p['vis_rgb_resolve_visib']) + +misc.log('Done.') diff --git a/scripts/vis_gt_poses.py b/scripts/vis_gt_poses.py new file mode 100644 index 0000000..6d6e069 --- /dev/null +++ b/scripts/vis_gt_poses.py @@ -0,0 +1,209 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Visualizes object models in the ground-truth poses.""" + +import os +import numpy as np + +from bop_toolkit_lib import config +from bop_toolkit_lib import dataset_params +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc +from bop_toolkit_lib import renderer +from bop_toolkit_lib import visualization + + +# PARAMETERS. +################################################################################ +p = { + # See dataset_params.py for options. + 'dataset': 'lm', + + # Dataset split. Options: 'train', 'val', 'test'. + 'dataset_split': 'test', + + # Dataset split type. None = default. See dataset_params.py for options. + 'dataset_split_type': None, + + # File with a list of estimation targets used to determine the set of images + # for which the GT poses will be visualized. The file is assumed to be stored + # in the dataset folder. None = all images. + # 'targets_filename': 'test_targets_bop19.json', + 'targets_filename': None, + + # Select ID's of scenes, images and GT poses to be processed. + # Empty list [] means that all ID's will be used. + 'scene_ids': [], + 'im_ids': [], + 'gt_ids': [], + + # Indicates whether to render RGB images. + 'vis_rgb': True, + + # Indicates whether to resolve visibility in the rendered RGB images (using + # depth renderings). If True, only the part of object surface, which is not + # occluded by any other modeled object, is visible. If False, RGB renderings + # of individual objects are blended together. + 'vis_rgb_resolve_visib': True, + + # Indicates whether to save images of depth differences. + 'vis_depth_diff': False, + + # Whether to use the original model color. + 'vis_orig_color': False, + + # Type of the renderer (used for the VSD pose error function). + 'renderer_type': 'python', # Options: 'cpp', 'python'. + + # Folder containing the BOP datasets. + 'datasets_path': config.datasets_path, + + # Folder for output visualisations. + 'vis_path': os.path.join(config.output_path, 'vis_gt_poses'), + + # Path templates for output images. + 'vis_rgb_tpath': os.path.join( + '{vis_path}', '{dataset}', '{split}', '{scene_id:06d}', '{im_id:06d}.jpg'), + 'vis_depth_diff_tpath': os.path.join( + '{vis_path}', '{dataset}', '{split}', '{scene_id:06d}', + '{im_id:06d}_depth_diff.jpg'), +} +################################################################################ + + +# Load dataset parameters. +dp_split = dataset_params.get_split_params( + p['datasets_path'], p['dataset'], p['dataset_split'], p['dataset_split_type']) + +model_type = 'eval' # None = default. +dp_model = dataset_params.get_model_params( + p['datasets_path'], p['dataset'], model_type) + +# Load colors. +colors_path = os.path.join( + os.path.dirname(visualization.__file__), 'colors.json') +colors = inout.load_json(colors_path) + +# Subset of images for which the ground-truth poses will be rendered. +if p['targets_filename'] is not None: + targets = inout.load_json( + os.path.join(dp_split['base_path'], p['targets_filename'])) + scene_im_ids = {} + for target in targets: + scene_im_ids.setdefault( + target['scene_id'], set()).add(target['im_id']) +else: + scene_im_ids = None + +# List of considered scenes. +scene_ids_curr = dp_split['scene_ids'] +if p['scene_ids']: + scene_ids_curr = set(scene_ids_curr).intersection(p['scene_ids']) + +# Rendering mode. +renderer_modalities = [] +if p['vis_rgb']: + renderer_modalities.append('rgb') +if p['vis_depth_diff'] or (p['vis_rgb'] and p['vis_rgb_resolve_visib']): + renderer_modalities.append('depth') +renderer_mode = '+'.join(renderer_modalities) + +# Create a renderer. +width, height = dp_split['im_size'] +ren = renderer.create_renderer( + width, height, p['renderer_type'], mode=renderer_mode, shading='flat') + +# Load object models. +models = {} +for obj_id in dp_model['obj_ids']: + misc.log('Loading 3D model of object {}...'.format(obj_id)) + model_path = dp_model['model_tpath'].format(obj_id=obj_id) + model_color = None + if not p['vis_orig_color']: + model_color = tuple(colors[(obj_id - 1) % len(colors)]) + ren.add_object(obj_id, model_path, surf_color=model_color) + +for scene_id in scene_ids_curr: + + # Load scene info and ground-truth poses. + scene_camera = inout.load_scene_camera( + dp_split['scene_camera_tpath'].format(scene_id=scene_id)) + scene_gt = inout.load_scene_gt( + dp_split['scene_gt_tpath'].format(scene_id=scene_id)) + + # List of considered images. + if scene_im_ids is not None: + im_ids = scene_im_ids[scene_id] + else: + im_ids = sorted(scene_gt.keys()) + if p['im_ids']: + im_ids = set(im_ids).intersection(p['im_ids']) + + # Render the object models in the ground-truth poses in the selected images. + for im_counter, im_id in enumerate(im_ids): + if im_counter % 10 == 0: + misc.log( + 'Visualizing GT poses - dataset: {}, scene: {}, im: {}/{}'.format( + p['dataset'], scene_id, im_counter, len(im_ids))) + + K = scene_camera[im_id]['cam_K'] + + # List of considered ground-truth poses. + gt_ids_curr = range(len(scene_gt[im_id])) + if p['gt_ids']: + gt_ids_curr = set(gt_ids_curr).intersection(p['gt_ids']) + + # Collect the ground-truth poses. + gt_poses = [] + for gt_id in gt_ids_curr: + gt = scene_gt[im_id][gt_id] + gt_poses.append({ + 'obj_id': gt['obj_id'], + 'R': gt['cam_R_m2c'], + 't': gt['cam_t_m2c'], + 'text_info': [ + {'name': '', 'val': '{}:{}'.format(gt['obj_id'], gt_id), 'fmt': ''} + ] + }) + + # Load the color and depth images and prepare images for rendering. + rgb = None + if p['vis_rgb']: + if 'rgb' in dp_split['im_modalities']: + rgb = inout.load_im(dp_split['rgb_tpath'].format( + scene_id=scene_id, im_id=im_id)) + elif 'gray' in dp_split['im_modalities']: + gray = inout.load_im(dp_split['gray_tpath'].format( + scene_id=scene_id, im_id=im_id)) + rgb = np.dstack([gray, gray, gray]) + else: + raise ValueError('RGB nor gray images are available.') + + depth = None + if p['vis_depth_diff'] or (p['vis_rgb'] and p['vis_rgb_resolve_visib']): + depth = inout.load_depth(dp_split['depth_tpath'].format( + scene_id=scene_id, im_id=im_id)) + depth *= scene_camera[im_id]['depth_scale'] # Convert to [mm]. + + # Path to the output RGB visualization. + vis_rgb_path = None + if p['vis_rgb']: + vis_rgb_path = p['vis_rgb_tpath'].format( + vis_path=p['vis_path'], dataset=p['dataset'], split=p['dataset_split'], + scene_id=scene_id, im_id=im_id) + + # Path to the output depth difference visualization. + vis_depth_diff_path = None + if p['vis_depth_diff']: + vis_depth_diff_path = p['vis_depth_diff_tpath'].format( + vis_path=p['vis_path'], dataset=p['dataset'], split=p['dataset_split'], + scene_id=scene_id, im_id=im_id) + + # Visualization. + visualization.vis_object_poses( + poses=gt_poses, K=K, renderer=ren, rgb=rgb, depth=depth, + vis_rgb_path=vis_rgb_path, vis_depth_diff_path=vis_depth_diff_path, + vis_rgb_resolve_visib=p['vis_rgb_resolve_visib']) + +misc.log('Done.') diff --git a/scripts/vis_object_symmetries.py b/scripts/vis_object_symmetries.py new file mode 100644 index 0000000..7131221 --- /dev/null +++ b/scripts/vis_object_symmetries.py @@ -0,0 +1,99 @@ +# Author: Tomas Hodan (hodantom@cmp.felk.cvut.cz) +# Center for Machine Perception, Czech Technical University in Prague + +"""Visualizes object models under all identified symmetry transformations.""" + +import os +import numpy as np + +from bop_toolkit_lib import config +from bop_toolkit_lib import dataset_params +from bop_toolkit_lib import inout +from bop_toolkit_lib import misc +from bop_toolkit_lib import renderer +from bop_toolkit_lib import transform as tr + + +# PARAMETERS. +################################################################################ +p = { + # See dataset_params.py for options. + 'dataset': 'lm', + + # Type of the renderer (used for the VSD pose error function). + 'renderer_type': 'python', # Options: 'cpp', 'python'. + + # See misc.get_symmetry_transformations(). + 'max_sym_disc_step': 0.01, + + 'views': [ + { + 'R': tr.rotation_matrix(0.5 * np.pi, [1, 0, 0]).dot( + tr.rotation_matrix(-0.5 * np.pi, [0, 0, 1])).dot( + tr.rotation_matrix(0.1 * np.pi, [0, 1, 0]))[:3, :3], + 't': np.array([[0, 0, 500]]).T + } + ], + + # Folder containing the BOP datasets. + 'datasets_path': config.datasets_path, + + # Folder for output visualisations. + 'vis_path': os.path.join(config.output_path, 'vis_object_symmetries'), + + # Path templates for output images. + 'vis_rgb_tpath': os.path.join( + '{vis_path}', '{dataset}', '{obj_id:06d}', + '{view_id:06d}_{pose_id:06d}.jpg'), +} +################################################################################ + + +# Load dataset parameters. +model_type = None # None = default. +if p['dataset'] == 'tless': + model_type = 'cad' +dp_model = dataset_params.get_model_params( + p['datasets_path'], p['dataset'], model_type) +dp_camera = dataset_params.get_camera_params( + p['datasets_path'], p['dataset']) + +K = dp_camera['K'] +fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2] + +# Create a renderer. +width, height = dp_camera['im_size'] +ren = renderer.create_renderer( + width, height, p['renderer_type'], mode='rgb', shading='flat') + +# Load meta info about the models (including symmetries). +models_info = inout.load_json(dp_model['models_info_path'], keys_to_int=True) + + +for obj_id in dp_model['obj_ids']: + + # Load object model. + misc.log('Loading 3D model of object {}...'.format(obj_id)) + model_path = dp_model['model_tpath'].format(obj_id=obj_id) + ren.add_object(obj_id, model_path) + + poses = misc.get_symmetry_transformations( + models_info[obj_id], p['max_sym_disc_step']) + + for pose_id, pose in enumerate(poses): + + for view_id, view in enumerate(p['views']): + + R = view['R'].dot(pose['R']) + t = view['R'].dot(pose['t']) + view['t'] + + vis_rgb = ren.render_object(obj_id, R, t, fx, fy, cx, cy)['rgb'] + + # Path to the output RGB visualization. + vis_rgb_path = p['vis_rgb_tpath'].format( + vis_path=p['vis_path'], dataset=p['dataset'], obj_id=obj_id, + view_id=view_id, pose_id=pose_id) + misc.ensure_dir(os.path.dirname(vis_rgb_path)) + inout.save_im(vis_rgb_path, vis_rgb) + +misc.log('Done.')