From 5d4ca394e07f8e021a036a290408fb053ed12157 Mon Sep 17 00:00:00 2001 From: ruben Date: Wed, 26 Jul 2017 12:39:01 +0200 Subject: [PATCH] First commit --- .gitignore | 8 + 3rdparty/line_descriptor/CMakeLists.txt | 30 + 3rdparty/line_descriptor/README.md | 2 + .../line_descriptor/descriptor_custom.hpp | 1382 +++++++++ .../include/line_descriptor_custom.hpp | 119 + 3rdparty/line_descriptor/lib/liblinedesc.so | Bin 0 -> 209880 bytes .../src/LSDDetector_custom.cpp | 328 ++ .../src/binary_descriptor_custom.cpp | 2754 +++++++++++++++++ .../src/binary_descriptor_matcher.cpp | 971 ++++++ .../line_descriptor/src/bitarray_custom.hpp | 115 + .../line_descriptor/src/bitops_custom.hpp | 167 + 3rdparty/line_descriptor/src/draw_custom.cpp | 190 ++ .../line_descriptor/src/precomp_custom.hpp | 76 + 3rdparty/line_descriptor/src/types_custom.hpp | 66 + CMakeLists.txt | 128 + LICENCE | 675 ++++ README.md | 57 + app/run_pipeline.cpp | 830 +++++ app/scene_config.ini | 20 + build.sh | 13 + config/dataset_params.yaml | 19 + include/plsvo/bundle_adjustment.h | 120 + include/plsvo/config.h | 230 ++ include/plsvo/depth_filter.h | 234 ++ include/plsvo/feature.h | 197 ++ include/plsvo/feature3D.h | 233 ++ include/plsvo/feature_alignment.h | 74 + include/plsvo/feature_detection.h | 184 ++ include/plsvo/frame.h | 176 ++ include/plsvo/frame_handler_base.h | 138 + include/plsvo/frame_handler_mono.h | 137 + include/plsvo/global.h | 123 + include/plsvo/initialization.h | 93 + include/plsvo/map.h | 195 ++ include/plsvo/matcher.h | 170 + include/plsvo/point.h | 197 ++ include/plsvo/pose_optimizer.h | 69 + include/plsvo/reprojector.h | 163 + include/plsvo/sceneRepresentation.h | 137 + include/plsvo/sparse_img_align.h | 121 + src/bundle_adjustment.cpp | 442 +++ src/config.cpp | 136 + src/depth_filter.cpp | 586 ++++ src/feature.cpp | 303 ++ src/feature3D.cpp | 129 + src/feature3D_impl.cpp | 175 ++ src/feature_alignment.cpp | 614 ++++ src/feature_detection.cpp | 272 ++ src/frame.cpp | 220 ++ src/frame_handler_base.cpp | 240 ++ src/frame_handler_mono.cpp | 510 +++ src/initialization.cpp | 245 ++ src/map.cpp | 532 ++++ src/matcher.cpp | 589 ++++ src/point.cpp | 221 ++ src/pose_optimizer.cpp | 586 ++++ src/reprojector.cpp | 425 +++ src/sceneRepresentation.cpp | 654 ++++ src/sparse_img_align.cpp | 725 +++++ 59 files changed, 18545 insertions(+) create mode 100644 .gitignore create mode 100644 3rdparty/line_descriptor/CMakeLists.txt create mode 100644 3rdparty/line_descriptor/README.md create mode 100644 3rdparty/line_descriptor/include/line_descriptor/descriptor_custom.hpp create mode 100644 3rdparty/line_descriptor/include/line_descriptor_custom.hpp create mode 100755 3rdparty/line_descriptor/lib/liblinedesc.so create mode 100644 3rdparty/line_descriptor/src/LSDDetector_custom.cpp create mode 100644 3rdparty/line_descriptor/src/binary_descriptor_custom.cpp create mode 100644 3rdparty/line_descriptor/src/binary_descriptor_matcher.cpp create mode 100644 3rdparty/line_descriptor/src/bitarray_custom.hpp create mode 100644 3rdparty/line_descriptor/src/bitops_custom.hpp create mode 100644 3rdparty/line_descriptor/src/draw_custom.cpp create mode 100644 3rdparty/line_descriptor/src/precomp_custom.hpp create mode 100644 3rdparty/line_descriptor/src/types_custom.hpp create mode 100644 CMakeLists.txt create mode 100644 LICENCE create mode 100644 README.md create mode 100644 app/run_pipeline.cpp create mode 100644 app/scene_config.ini create mode 100644 build.sh create mode 100755 config/dataset_params.yaml create mode 100644 include/plsvo/bundle_adjustment.h create mode 100644 include/plsvo/config.h create mode 100644 include/plsvo/depth_filter.h create mode 100644 include/plsvo/feature.h create mode 100644 include/plsvo/feature3D.h create mode 100644 include/plsvo/feature_alignment.h create mode 100644 include/plsvo/feature_detection.h create mode 100644 include/plsvo/frame.h create mode 100644 include/plsvo/frame_handler_base.h create mode 100644 include/plsvo/frame_handler_mono.h create mode 100644 include/plsvo/global.h create mode 100644 include/plsvo/initialization.h create mode 100644 include/plsvo/map.h create mode 100644 include/plsvo/matcher.h create mode 100644 include/plsvo/point.h create mode 100644 include/plsvo/pose_optimizer.h create mode 100644 include/plsvo/reprojector.h create mode 100644 include/plsvo/sceneRepresentation.h create mode 100644 include/plsvo/sparse_img_align.h create mode 100644 src/bundle_adjustment.cpp create mode 100644 src/config.cpp create mode 100644 src/depth_filter.cpp create mode 100644 src/feature.cpp create mode 100644 src/feature3D.cpp create mode 100644 src/feature3D_impl.cpp create mode 100644 src/feature_alignment.cpp create mode 100644 src/feature_detection.cpp create mode 100644 src/frame.cpp create mode 100644 src/frame_handler_base.cpp create mode 100644 src/frame_handler_mono.cpp create mode 100644 src/initialization.cpp create mode 100644 src/map.cpp create mode 100644 src/matcher.cpp create mode 100644 src/point.cpp create mode 100644 src/pose_optimizer.cpp create mode 100644 src/reprojector.cpp create mode 100644 src/sceneRepresentation.cpp create mode 100644 src/sparse_img_align.cpp diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..168171a --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +*/bin/* +*/lib/* +/build/* +/3rdparty/line_descriptor/build/* +svo/datasets/* +*~* +*CMakeLists.txt.user +*.bag* diff --git a/3rdparty/line_descriptor/CMakeLists.txt b/3rdparty/line_descriptor/CMakeLists.txt new file mode 100644 index 0000000..688c725 --- /dev/null +++ b/3rdparty/line_descriptor/CMakeLists.txt @@ -0,0 +1,30 @@ +project( line-descriptor ) + +cmake_minimum_required(VERSION 2.7) +find_package( OpenCV 3 REQUIRED) + +if(COMMAND cmake_policy) + cmake_policy(SET CMP0003 NEW) +endif(COMMAND cmake_policy) +link_directories(${OpenCV_LIBS_DIR}) +include_directories(${OpenCV2_INCLUDE_DIRS}) + +SET(BUILD_SHARED_LIBS ON) +SET(CMAKE_MODULE_PATH $ENV{CMAKE_MODULE_PATH}) +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x -O3 -mtune=native -march=native") + +set(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/build) +set(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib) + +include_directories( include ${OpenCV_INCLUDE_DIRS} ) +list(APPEND LINK_LIBS ${OpenCV_LIBS} ) +file(GLOB_RECURSE all_include_files RELATIVE "${CMAKE_SOURCE_DIR}" *.h *.hpp) + +link_directories(${CMAKE_SOURCE_DIR}/src/) +file(GLOB_RECURSE all_source_files RELATIVE "${CMAKE_SOURCE_DIR}src/" *.cpp ) + +add_custom_target( linedesc_includes DEPENDS ${all_include_files} SOURCES ${all_source_files} ) + +add_library( linedesc ${all_source_files} ) +target_link_libraries( linedesc ${LINK_LIBS} ) + diff --git a/3rdparty/line_descriptor/README.md b/3rdparty/line_descriptor/README.md new file mode 100644 index 0000000..6028de0 --- /dev/null +++ b/3rdparty/line_descriptor/README.md @@ -0,0 +1,2 @@ +Binary descriptors for lines extracted from an image +==================================================== \ No newline at end of file diff --git a/3rdparty/line_descriptor/include/line_descriptor/descriptor_custom.hpp b/3rdparty/line_descriptor/include/line_descriptor/descriptor_custom.hpp new file mode 100644 index 0000000..89a4c91 --- /dev/null +++ b/3rdparty/line_descriptor/include/line_descriptor/descriptor_custom.hpp @@ -0,0 +1,1382 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// + // + // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + // + // By downloading, copying, installing or using the software you agree to this license. + // If you do not agree to this license, do not download, install, + // copy or use the software. + // + // + // License Agreement + // For Open Source Computer Vision Library + // + // Copyright (C) 2014, Biagio Montesano, all rights reserved. + // Third party copyrights are property of their respective owners. + // + // Redistribution and use in source and binary forms, with or without modification, + // are permitted provided that the following conditions are met: + // + // * Redistribution's of source code must retain the above copyright notice, + // this list of conditions and the following disclaimer. + // + // * Redistribution's in binary form must reproduce the above copyright notice, + // this list of conditions and the following disclaimer in the documentation + // and/or other materials provided with the distribution. + // + // * The name of the copyright holders may not be used to endorse or promote products + // derived from this software without specific prior written permission. + // + // This software is provided by the copyright holders and contributors "as is" and + // any express or implied warranties, including, but not limited to, the implied + // warranties of merchantability and fitness for a particular purpose are disclaimed. + // In no event shall the Intel Corporation or contributors be liable for any direct, + // indirect, incidental, special, exemplary, or consequential damages + // (including, but not limited to, procurement of substitute goods or services; + // loss of use, data, or profits; or business interruption) however caused + // and on any theory of liability, whether in contract, strict liability, + // or tort (including negligence or otherwise) arising in any way out of + // the use of this software, even if advised of the possibility of such damage. + // + //M*/ + +#ifndef __OPENCV_DESCRIPTOR_HPP__ +#define __OPENCV_DESCRIPTOR_HPP__ + +#include +#include +#include + +#if defined _MSC_VER && _MSC_VER <= 1700 +#include +#else +#include +#endif + +#include +#include + +#include "opencv2/core/utility.hpp" +//#include "opencv2/core/private.hpp" +#include +#include +#include +#include "opencv2/core.hpp" + +/* define data types */ +typedef uint64_t UINT64; +typedef uint32_t UINT32; +typedef uint16_t UINT16; +typedef uint8_t UINT8; + +/* define constants */ +#define UINT64_1 ((UINT64)0x01) +#define UINT32_1 ((UINT32)0x01) + +namespace cv +{ +namespace line_descriptor +{ + +//! @addtogroup line_descriptor +//! @{ + +/** @brief A class to represent a line + +As aformentioned, it is been necessary to design a class that fully stores the information needed to +characterize completely a line and plot it on image it was extracted from, when required. + +*KeyLine* class has been created for such goal; it is mainly inspired to Feature2d's KeyPoint class, +since KeyLine shares some of *KeyPoint*'s fields, even if a part of them assumes a different +meaning, when speaking about lines. In particular: + +- the *class_id* field is used to gather lines extracted from different octaves which refer to + same line inside original image (such lines and the one they represent in original image share + the same *class_id* value) +- the *angle* field represents line's slope with respect to (positive) X axis +- the *pt* field represents line's midpoint +- the *response* field is computed as the ratio between the line's length and maximum between + image's width and height +- the *size* field is the area of the smallest rectangle containing line + +Apart from fields inspired to KeyPoint class, KeyLines stores information about extremes of line in +original image and in octave it was extracted from, about line's length and number of pixels it +covers. + */ +struct CV_EXPORTS KeyLine +{ + public: + /** orientation of the line */ + float angle; + + /** object ID, that can be used to cluster keylines by the line they represent */ + int class_id; + + /** octave (pyramid layer), from which the keyline has been extracted */ + int octave; + + /** coordinates of the middlepoint */ + Point2f pt; + + /** the response, by which the strongest keylines have been selected. + It's represented by the ratio between line's length and maximum between + image's width and height */ + float response; + + /** minimum area containing line */ + float size; + + /** lines's extremes in original image */ + float startPointX; + float startPointY; + float endPointX; + float endPointY; + + /** line's extremes in image it was extracted from */ + float sPointInOctaveX; + float sPointInOctaveY; + float ePointInOctaveX; + float ePointInOctaveY; + + /** the length of line */ + float lineLength; + + /** number of pixels covered by the line */ + int numOfPixels; + + /** Returns the start point of the line in the original image */ + Point2f getStartPoint() const + { + return Point2f(startPointX, startPointY); + } + + /** Returns the end point of the line in the original image */ + Point2f getEndPoint() const + { + return Point2f(endPointX, endPointY); + } + + /** Returns the start point of the line in the octave it was extracted from */ + Point2f getStartPointInOctave() const + { + return Point2f(sPointInOctaveX, sPointInOctaveY); + } + + /** Returns the end point of the line in the octave it was extracted from */ + Point2f getEndPointInOctave() const + { + return Point2f(ePointInOctaveX, ePointInOctaveY); + } + + /** constructor */ + KeyLine() + { + } +}; + +/** @brief Class implements both functionalities for detection of lines and computation of their +binary descriptor. + +Class' interface is mainly based on the ones of classical detectors and extractors, such as +Feature2d's @ref features2d_main and @ref features2d_match. Retrieved information about lines is +stored in line_descriptor::KeyLine objects. + */ +class CV_EXPORTS BinaryDescriptor : public Algorithm +{ + + public: + /** @brief List of BinaryDescriptor parameters: + */ + struct CV_EXPORTS Params + { + /*CV_WRAP*/ + Params(); + + /** the number of image octaves (default = 1) */ + + int numOfOctave_; + + /** the width of band; (default: 7) */ + + int widthOfBand_; + + /** image's reduction ratio in construction of Gaussian pyramids */ + int reductionRatio; + + int ksize_; + + /** read parameters from a FileNode object and store them (struct function) */ + void read( const FileNode& fn ); + + /** store parameters to a FileStorage object (struct function) */ + void write( FileStorage& fs ) const; + + }; + + /** @brief Constructor + + @param parameters configuration parameters BinaryDescriptor::Params + + If no argument is provided, constructor sets default values (see comments in the code snippet in + previous section). Default values are strongly reccomended. + */ + BinaryDescriptor( const BinaryDescriptor::Params ¶meters = BinaryDescriptor::Params() ); + + /** @brief Create a BinaryDescriptor object with default parameters (or with the ones provided) + and return a smart pointer to it + */ + static Ptr createBinaryDescriptor(); + static Ptr createBinaryDescriptor( Params parameters ); + + /** destructor */ + ~BinaryDescriptor(); + + /** @brief Get current number of octaves + */ + int getNumOfOctaves();/*CV_WRAP*/ + /** @brief Set number of octaves + @param octaves number of octaves + */ + void setNumOfOctaves( int octaves );/*CV_WRAP*/ + /** @brief Get current width of bands + */ + int getWidthOfBand();/*CV_WRAP*/ + /** @brief Set width of bands + @param width width of bands + */ + void setWidthOfBand( int width );/*CV_WRAP*/ + /** @brief Get current reduction ratio (used in Gaussian pyramids) + */ + int getReductionRatio();/*CV_WRAP*/ + /** @brief Set reduction ratio (used in Gaussian pyramids) + @param rRatio reduction ratio + */ + void setReductionRatio( int rRatio ); + + /** @brief Read parameters from a FileNode object and store them + + @param fn source FileNode file + */ + virtual void read( const cv::FileNode& fn ); + + /** @brief Store parameters to a FileStorage object + + @param fs output FileStorage file + */ + virtual void write( cv::FileStorage& fs ) const; + + /** @brief Requires line detection + + @param image input image + @param keypoints vector that will store extracted lines for one or more images + @param mask mask matrix to detect only KeyLines of interest + */ + void detect( const Mat& image, CV_OUT std::vector& keypoints, const Mat& mask = Mat() ); + + /** @overload + + @param images input images + @param keylines set of vectors that will store extracted lines for one or more images + @param masks vector of mask matrices to detect only KeyLines of interest from each input image + */ + void detect( const std::vector& images, std::vector >& keylines, const std::vector& masks = + std::vector() ) const; + + /** @brief Requires descriptors computation + + @param image input image + @param keylines vector containing lines for which descriptors must be computed + @param descriptors + @param returnFloatDescr flag (when set to true, original non-binary descriptors are returned) + */ + void compute( const Mat& image, CV_OUT CV_IN_OUT std::vector& keylines, CV_OUT Mat& descriptors, bool returnFloatDescr = false ) const; + + /** @overload + + @param images input images + @param keylines set of vectors containing lines for which descriptors must be computed + @param descriptors + @param returnFloatDescr flag (when set to true, original non-binary descriptors are returned) + */ + void compute( const std::vector& images, std::vector >& keylines, std::vector& descriptors, bool returnFloatDescr = + false ) const; + + /** @brief Return descriptor size + */ + int descriptorSize() const; + + /** @brief Return data type + */ + int descriptorType() const; + + /** returns norm mode */ + /*CV_WRAP*/ + int defaultNorm() const; + + /** @brief Define operator '()' to perform detection of KeyLines and computation of descriptors in a row. + + @param image input image + @param mask mask matrix to select which lines in KeyLines must be accepted among the ones + extracted (used when *keylines* is not empty) + @param keylines vector that contains input lines (when filled, the detection part will be skipped + and input lines will be passed as input to the algorithm computing descriptors) + @param descriptors matrix that will store final descriptors + @param useProvidedKeyLines flag (when set to true, detection phase will be skipped and only + computation of descriptors will be executed, using lines provided in *keylines*) + @param returnFloatDescr flag (when set to true, original non-binary descriptors are returned) + */ + virtual void operator()( InputArray image, InputArray mask, CV_OUT std::vector& keylines, OutputArray descriptors, + bool useProvidedKeyLines = false, bool returnFloatDescr = false ) const; + + //protected: + /** implementation of line detection */ + virtual void detectImpl( const Mat& imageSrc, std::vector& keylines, const Mat& mask = Mat() ) const; + + /** implementation of descriptors' computation */ + virtual void computeImpl( const Mat& imageSrc, std::vector& keylines, Mat& descriptors, bool returnFloatDescr, + bool useDetectionData ) const; + + //private: + /** struct to represent lines extracted from an octave */ + struct OctaveLine + { + unsigned int octaveCount; //the octave which this line is detected + unsigned int lineIDInOctave; //the line ID in that octave image + unsigned int lineIDInScaleLineVec; //the line ID in Scale line vector + float lineLength; //the length of line in original image scale + }; + + // A 2D line (normal equation parameters). + struct SingleLine + { + //note: rho and theta are based on coordinate origin, i.e. the top-left corner of image + double rho; //unit: pixel length + double theta; //unit: rad + double linePointX; // = rho * cos(theta); + double linePointY; // = rho * sin(theta); + //for EndPoints, the coordinate origin is the top-left corner of image. + double startPointX; + double startPointY; + double endPointX; + double endPointY; + //direction of a line, the angle between positive line direction (dark side is in the left) and positive X axis. + double direction; + //mean gradient magnitude + double gradientMagnitude; + //mean gray value of pixels in dark side of line + double darkSideGrayValue; + //mean gray value of pixels in light side of line + double lightSideGrayValue; + //the length of line + double lineLength; + //the width of line; + double width; + //number of pixels + int numOfPixels; + //the decriptor of line + std::vector descriptor; + }; + + // Specifies a vector of lines. + typedef std::vector Lines_list; + + struct OctaveSingleLine + { + /*endPoints, the coordinate origin is the top-left corner of the original image. + *startPointX = sPointInOctaveX * (factor)^octaveCount; */ + float startPointX; + float startPointY; + float endPointX; + float endPointY; + //endPoints, the coordinate origin is the top-left corner of the octave image. + float sPointInOctaveX; + float sPointInOctaveY; + float ePointInOctaveX; + float ePointInOctaveY; + //direction of a line, the angle between positive line direction (dark side is in the left) and positive X axis. + float direction; + //the summation of gradient magnitudes of pixels on lines + float salience; + //the length of line + float lineLength; + //number of pixels + unsigned int numOfPixels; + //the octave which this line is detected + unsigned int octaveCount; + //the decriptor of line + std::vector descriptor; + }; + + struct Pixel + { + unsigned int x; //X coordinate + unsigned int y; //Y coordinate + }; + struct EdgeChains + { + std::vector xCors; //all the x coordinates of edge points + std::vector yCors; //all the y coordinates of edge points + std::vector sId; //the start index of each edge in the coordinate arrays + unsigned int numOfEdges; //the number of edges whose length are larger than minLineLen; numOfEdges < sId.size; + }; + + struct LineChains + { + std::vector xCors; //all the x coordinates of line points + std::vector yCors; //all the y coordinates of line points + std::vector sId; //the start index of each line in the coordinate arrays + unsigned int numOfLines; //the number of lines whose length are larger than minLineLen; numOfLines < sId.size; + }; + + typedef std::list PixelChain; //each edge is a pixel chain + + struct EDLineParam + { + int ksize; + float sigma; + float gradientThreshold; + float anchorThreshold; + int scanIntervals; + int minLineLen; + double lineFitErrThreshold; + }; + + #define RELATIVE_ERROR_FACTOR 100.0 + #define MLN10 2.30258509299404568402 + #define log_gamma(x) ((x)>15.0?log_gamma_windschitl(x):log_gamma_lanczos(x)) + + /** This class is used to detect lines from input image. + * First, edges are extracted from input image following the method presented in Cihan Topal and + * Cuneyt Akinlar's paper:"Edge Drawing: A Heuristic Approach to Robust Real-Time Edge Detection", 2010. + * Then, lines are extracted from the edge image following the method presented in Cuneyt Akinlar and + * Cihan Topal's paper:"EDLines: A real-time line segment detector with a false detection control", 2011 + * PS: The linking step of edge detection has a little bit difference with the Edge drawing algorithm + * described in the paper. The edge chain doesn't stop when the pixel direction is changed. + */ + class EDLineDetector + { + public: + EDLineDetector(); + EDLineDetector( EDLineParam param ); + ~EDLineDetector(); + + /*extract edges from image + *image: In, gray image; + *edges: Out, store the edges, each edge is a pixel chain + *return -1: error happen + */ + int EdgeDrawing( cv::Mat &image, EdgeChains &edgeChains ); + + /*extract lines from image + *image: In, gray image; + *lines: Out, store the extracted lines, + *return -1: error happen + */ + int EDline( cv::Mat &image, LineChains &lines ); + + /** extract line from image, and store them */ + int EDline( cv::Mat &image ); + + cv::Mat dxImg_; //store the dxImg; + + cv::Mat dyImg_; //store the dyImg; + + cv::Mat gImgWO_; //store the gradient image without threshold; + + LineChains lines_; //store the detected line chains; + + //store the line Equation coefficients, vec3=[w1,w2,w3] for line w1*x + w2*y + w3=0; + std::vector > lineEquations_; + + //store the line endpoints, [x1,y1,x2,y3] + std::vector > lineEndpoints_; + + //store the line direction + std::vector lineDirection_; + + //store the line salience, which is the summation of gradients of pixels on line + std::vector lineSalience_; + + // image sizes + unsigned int imageWidth; + unsigned int imageHeight; + + /*The threshold of line fit error; + *If lineFitErr is large than this threshold, then + *the pixel chain is not accepted as a single line segment.*/ + double lineFitErrThreshold_; + + /*the threshold of pixel gradient magnitude. + *Only those pixel whose gradient magnitude are larger than this threshold will be + *taken as possible edge points. Default value is 36*/ + short gradienThreshold_; + + /*If the pixel's gradient value is bigger than both of its neighbors by a + *certain threshold (ANCHOR_THRESHOLD), the pixel is marked to be an anchor. + *Default value is 8*/ + unsigned char anchorThreshold_; + + /*anchor testing can be performed at different scan intervals, i.e., + *every row/column, every second row/column etc. + *Default value is 2*/ + unsigned int scanIntervals_; + + int minLineLen_; //minimal acceptable line length + + private: + void InitEDLine_(); + + /*For an input edge chain, find the best fit line, the default chain length is minLineLen_ + *xCors: In, pointer to the X coordinates of pixel chain; + *yCors: In, pointer to the Y coordinates of pixel chain; + *offsetS:In, start index of this chain in vector; + *lineEquation: Out, [a,b] which are the coefficient of lines y=ax+b(horizontal) or x=ay+b(vertical); + *return: line fit error; -1:error happens; + */ + double LeastSquaresLineFit_( unsigned int *xCors, unsigned int *yCors, unsigned int offsetS, std::vector &lineEquation ); + + /*For an input pixel chain, find the best fit line. Only do the update based on new points. + *For A*x=v, Least square estimation of x = Inv(A^T * A) * (A^T * v); + *If some new observations are added, i.e, [A; A'] * x = [v; v'], + *then x' = Inv(A^T * A + (A')^T * A') * (A^T * v + (A')^T * v'); + *xCors: In, pointer to the X coordinates of pixel chain; + *yCors: In, pointer to the Y coordinates of pixel chain; + *offsetS:In, start index of this chain in vector; + *newOffsetS: In, start index of extended part; + *offsetE:In, end index of this chain in vector; + *lineEquation: Out, [a,b] which are the coefficient of lines y=ax+b(horizontal) or x=ay+b(vertical); + *return: line fit error; -1:error happens; + */ + double LeastSquaresLineFit_( unsigned int *xCors, unsigned int *yCors, unsigned int offsetS, unsigned int newOffsetS, unsigned int offsetE, + std::vector &lineEquation ); + + /** Validate line based on the Helmholtz principle, which basically states that + * for a structure to be perceptually meaningful, the expectation of this structure + * by chance must be very low. + */ + bool LineValidation_( unsigned int *xCors, unsigned int *yCors, unsigned int offsetS, unsigned int offsetE, std::vector &lineEquation, + float &direction ); + + bool bValidate_; //flag to decide whether line will be validated + + int ksize_; //the size of Gaussian kernel: ksize X ksize, default value is 5. + + float sigma_; //the sigma of Gaussian kernal, default value is 1.0. + + /*For example, there two edges in the image: + *edge1 = [(7,4), (8,5), (9,6),| (10,7)|, (11, 8), (12,9)] and + *edge2 = [(14,9), (15,10), (16,11), (17,12),| (18, 13)|, (19,14)] ; then we store them as following: + *pFirstPartEdgeX_ = [10, 11, 12, 18, 19];//store the first part of each edge[from middle to end] + *pFirstPartEdgeY_ = [7, 8, 9, 13, 14]; + *pFirstPartEdgeS_ = [0,3,5];// the index of start point of first part of each edge + *pSecondPartEdgeX_ = [10, 9, 8, 7, 18, 17, 16, 15, 14];//store the second part of each edge[from middle to front] + *pSecondPartEdgeY_ = [7, 6, 5, 4, 13, 12, 11, 10, 9];//anchor points(10, 7) and (18, 13) are stored again + *pSecondPartEdgeS_ = [0, 4, 9];// the index of start point of second part of each edge + *This type of storage order is because of the order of edge detection process. + *For each edge, start from one anchor point, first go right, then go left or first go down, then go up*/ + + //store the X coordinates of the first part of the pixels for chains + unsigned int *pFirstPartEdgeX_; + + //store the Y coordinates of the first part of the pixels for chains + unsigned int *pFirstPartEdgeY_; + + //store the start index of every edge chain in the first part arrays + unsigned int *pFirstPartEdgeS_; + + //store the X coordinates of the second part of the pixels for chains + unsigned int *pSecondPartEdgeX_; + + //store the Y coordinates of the second part of the pixels for chains + unsigned int *pSecondPartEdgeY_; + + //store the start index of every edge chain in the second part arrays + unsigned int *pSecondPartEdgeS_; + + //store the X coordinates of anchors + unsigned int *pAnchorX_; + + //store the Y coordinates of anchors + unsigned int *pAnchorY_; + + //edges + cv::Mat edgeImage_; + + cv::Mat gImg_; //store the gradient image; + + cv::Mat dirImg_; //store the direction image + + double logNT_; + + cv::Mat_ ATA; //the previous matrix of A^T * A; + + cv::Mat_ ATV; //the previous vector of A^T * V; + + cv::Mat_ fitMatT; //the matrix used in line fit function; + + cv::Mat_ fitVec; //the vector used in line fit function; + + cv::Mat_ tempMatLineFit; //the matrix used in line fit function; + + cv::Mat_ tempVecLineFit; //the vector used in line fit function; + + /** Compare doubles by relative error. + The resulting rounding error after floating point computations + depend on the specific operations done. The same number computed by + different algorithms could present different rounding errors. For a + useful comparison, an estimation of the relative rounding error + should be considered and compared to a factor times EPS. The factor + should be related to the accumulated rounding error in the chain of + computation. Here, as a simplification, a fixed factor is used. + */ + static int double_equal( double a, double b ) + { + double abs_diff, aa, bb, abs_max; + /* trivial case */ + if( a == b ) + return true; + abs_diff = fabs( a - b ); + aa = fabs( a ); + bb = fabs( b ); + abs_max = aa > bb ? aa : bb; + + /* DBL_MIN is the smallest normalized number, thus, the smallest + number whose relative error is bounded by DBL_EPSILON. For + smaller numbers, the same quantization steps as for DBL_MIN + are used. Then, for smaller numbers, a meaningful "relative" + error should be computed by dividing the difference by DBL_MIN. */ + if( abs_max < DBL_MIN ) + abs_max = DBL_MIN; + + /* equal if relative error <= factor x eps */ + return ( abs_diff / abs_max ) <= ( RELATIVE_ERROR_FACTOR * DBL_EPSILON ); + } + + /** Computes the natural logarithm of the absolute value of + the gamma function of x using the Lanczos approximation. + See http://www.rskey.org/gamma.htm + The formula used is + @f[ + \Gamma(x) = \frac{ \sum_{n=0}^{N} q_n x^n }{ \Pi_{n=0}^{N} (x+n) } + (x+5.5)^{x+0.5} e^{-(x+5.5)} + @f] + so + @f[ + \log\Gamma(x) = \log\left( \sum_{n=0}^{N} q_n x^n \right) + + (x+0.5) \log(x+5.5) - (x+5.5) - \sum_{n=0}^{N} \log(x+n) + @f] + and + q0 = 75122.6331530, + q1 = 80916.6278952, + q2 = 36308.2951477, + q3 = 8687.24529705, + q4 = 1168.92649479, + q5 = 83.8676043424, + q6 = 2.50662827511. + */ + static double log_gamma_lanczos( double x ) + { + static double q[7] = + { 75122.6331530, 80916.6278952, 36308.2951477, 8687.24529705, 1168.92649479, 83.8676043424, 2.50662827511 }; + double a = ( x + 0.5 ) * log( x + 5.5 ) - ( x + 5.5 ); + double b = 0.0; + int n; + for ( n = 0; n < 7; n++ ) + { + a -= log( x + (double) n ); + b += q[n] * pow( x, (double) n ); + } + return a + log( b ); + } + + /** Computes the natural logarithm of the absolute value of + the gamma function of x using Windschitl method. + See http://www.rskey.org/gamma.htm + The formula used is + @f[ + \Gamma(x) = \sqrt{\frac{2\pi}{x}} \left( \frac{x}{e} + \sqrt{ x\sinh(1/x) + \frac{1}{810x^6} } \right)^x + @f] + so + @f[ + \log\Gamma(x) = 0.5\log(2\pi) + (x-0.5)\log(x) - x + + 0.5x\log\left( x\sinh(1/x) + \frac{1}{810x^6} \right). + @f] + This formula is a good approximation when x > 15. + */ + static double log_gamma_windschitl( double x ) + { + return 0.918938533204673 + ( x - 0.5 ) * log( x ) - x + 0.5 * x * log( x * sinh( 1 / x ) + 1 / ( 810.0 * pow( x, 6.0 ) ) ); + } + + /** Computes -log10(NFA). + NFA stands for Number of False Alarms: + @f[ + \mathrm{NFA} = NT \cdot B(n,k,p) + @f] + - NT - number of tests + - B(n,k,p) - tail of binomial distribution with parameters n,k and p: + @f[ + B(n,k,p) = \sum_{j=k}^n + \left(\begin{array}{c}n\\j\end{array}\right) + p^{j} (1-p)^{n-j} + @f] + The value -log10(NFA) is equivalent but more intuitive than NFA: + - -1 corresponds to 10 mean false alarms + - 0 corresponds to 1 mean false alarm + - 1 corresponds to 0.1 mean false alarms + - 2 corresponds to 0.01 mean false alarms + - ... + Used this way, the bigger the value, better the detection, + and a logarithmic scale is used. + @param n,k,p binomial parameters. + @param logNT logarithm of Number of Tests + The computation is based in the gamma function by the following + relation: + @f[ + \left(\begin{array}{c}n\\k\end{array}\right) + = \frac{ \Gamma(n+1) }{ \Gamma(k+1) \cdot \Gamma(n-k+1) }. + @f] + We use efficient algorithms to compute the logarithm of + the gamma function. + To make the computation faster, not all the sum is computed, part + of the terms are neglected based on a bound to the error obtained + (an error of 10% in the result is accepted). + */ + static double nfa( int n, int k, double p, double logNT ) + { + double tolerance = 0.1; /* an error of 10% in the result is accepted */ + double log1term, term, bin_term, mult_term, bin_tail, err, p_term; + int i; + + /* check parameters */ + if( n < 0 || k < 0 || k > n || p <= 0.0 || p >= 1.0 ) + { + std::cout << "nfa: wrong n, k or p values." << std::endl; + exit( 0 ); + } + /* trivial cases */ + if( n == 0 || k == 0 ) + return -logNT; + if( n == k ) + return -logNT - (double) n * log10( p ); + + /* probability term */ + p_term = p / ( 1.0 - p ); + + /* compute the first term of the series */ + /* + binomial_tail(n,k,p) = sum_{i=k}^n bincoef(n,i) * p^i * (1-p)^{n-i} + where bincoef(n,i) are the binomial coefficients. + But + bincoef(n,k) = gamma(n+1) / ( gamma(k+1) * gamma(n-k+1) ). + We use this to compute the first term. Actually the log of it. + */ + log1term = log_gamma( (double) n + 1.0 )- log_gamma( (double ) k + 1.0 )- log_gamma( (double ) ( n - k ) + 1.0 ) ++ (double) k * log( p ) ++ (double) ( n - k ) * log( 1.0 - p ); +term = exp( log1term ); + +/* in some cases no more computations are needed */ +if( double_equal( term, 0.0 ) ) +{ /* the first term is almost zero */ + if( (double) k > (double) n * p ) /* at begin or end of the tail? */ + return -log1term / MLN10 - logNT; /* end: use just the first term */ + else + return -logNT; /* begin: the tail is roughly 1 */ +} + +/* compute more terms if needed */ +bin_tail = term; +for ( i = k + 1; i <= n; i++ ) +{ + /* As + term_i = bincoef(n,i) * p^i * (1-p)^(n-i) + and + bincoef(n,i)/bincoef(n,i-1) = n-i+1 / i, + then, + term_i / term_i-1 = (n-i+1)/i * p/(1-p) + and + term_i = term_i-1 * (n-i+1)/i * p/(1-p). + p/(1-p) is computed only once and stored in 'p_term'. + */ + bin_term = (double) ( n - i + 1 ) / (double) i; + mult_term = bin_term * p_term; + term *= mult_term; + bin_tail += term; + if( bin_term < 1.0 ) + { + /* When bin_term<1 then mult_term_ji. + Then, the error on the binomial tail when truncated at + the i term can be bounded by a geometric series of form + term_i * sum mult_term_i^j. */ + err = term * ( ( 1.0 - pow( mult_term, (double) ( n - i + 1 ) ) ) / ( 1.0 - mult_term ) - 1.0 ); + /* One wants an error at most of tolerance*final_result, or: + tolerance * abs(-log10(bin_tail)-logNT). + Now, the error that can be accepted on bin_tail is + given by tolerance*final_result divided by the derivative + of -log10(x) when x=bin_tail. that is: + tolerance * abs(-log10(bin_tail)-logNT) / (1/bin_tail) + Finally, we truncate the tail if the error is less than: + tolerance * abs(-log10(bin_tail)-logNT) * bin_tail */ + if( err < tolerance * fabs( -log10( bin_tail ) - logNT ) * bin_tail ) + break; + } +} +return -log10( bin_tail ) - logNT; +} +}; + + // Specifies a vector of lines. +typedef std::vector LinesVec; + +// each element in ScaleLines is a vector of lines +// which corresponds the same line detected in different octave images. +typedef std::vector ScaleLines; + +/* compute Gaussian pyramids */ +void computeGaussianPyramid( const Mat& image, const int numOctaves ); + +/* compute Sobel's derivatives */ +void computeSobel( const Mat& image, const int numOctaves ); + +/* conversion of an LBD descriptor to its binary representation */ +unsigned char binaryConversion( float* f1, float* f2 ); + +/* compute LBD descriptors using EDLine extractor */ +int computeLBD( ScaleLines &keyLines, bool useDetectionData = false ); + +/* gathers lines in groups using EDLine extractor. + Each group contains the same line, detected in different octaves */ +int OctaveKeyLines( cv::Mat& image, ScaleLines &keyLines ); + +/* the local gaussian coefficient applied to the orthogonal line direction within each band */ +std::vector gaussCoefL_; + +/* the global gaussian coefficient applied to each row within line support region */ +std::vector gaussCoefG_; + +/* descriptor parameters */ +Params params; + +/* vector of sizes of downsampled and blurred images */ +std::vector images_sizes; + +/*For each octave of image, we define an EDLineDetector, because we can get gradient images (dxImg, dyImg, gImg) + *from the EDLineDetector class without extra computation cost. Another reason is that, if we use + *a single EDLineDetector to detect lines in different octave of images, then we need to allocate and release + *memory for gradient images (dxImg, dyImg, gImg) repeatedly for their varying size*/ +std::vector > edLineVec_; + +/* Sobel's derivatives */ +std::vector dxImg_vector, dyImg_vector; + +/* Gaussian pyramid */ +std::vector octaveImages; + +}; + +/** +Lines extraction methodology +---------------------------- + +The lines extraction methodology described in the following is mainly based on @cite EDL . The +extraction starts with a Gaussian pyramid generated from an original image, downsampled N-1 times, +blurred N times, to obtain N layers (one for each octave), with layer 0 corresponding to input +image. Then, from each layer (octave) in the pyramid, lines are extracted using LSD algorithm. + +Differently from EDLine lines extractor used in original article, LSD furnishes information only +about lines extremes; thus, additional information regarding slope and equation of line are computed +via analytic methods. The number of pixels is obtained using *LineIterator*. Extracted lines are +returned in the form of KeyLine objects, but since extraction is based on a method different from +the one used in *BinaryDescriptor* class, data associated to a line's extremes in original image and +in octave it was extracted from, coincide. KeyLine's field *class_id* is used as an index to +indicate the order of extraction of a line inside a single octave. +*/ +class CV_EXPORTS LSDDetectorC : public Algorithm +{ +public: + +/* constructor */ +/* CV_WRAP */ +LSDDetectorC() +{ +} +; + +struct LSDOptions{ + int refine; + double scale; + double sigma_scale; + double quant; + double ang_th; + double log_eps; + double density_th; + int n_bins; + double min_length; +} options; + +/** @brief Creates ad LSDDetectorC object, using smart pointers. + */ +static Ptr createLSDDetectorC(); + +/** @brief Detect lines inside an image. + +@param image input image +@param keypoints vector that will store extracted lines for one or more images +@param scale scale factor used in pyramids generation +@param numOctaves number of octaves inside pyramid +@param mask mask matrix to detect only KeyLines of interest + */ +void detect( const Mat& image, CV_OUT std::vector& keylines, int scale, int numOctaves, const Mat& mask = Mat() ); +void detect( const Mat& image, CV_OUT std::vector& keylines, int scale, int numOctaves, LSDOptions opts, const Mat& mask = Mat() ); + + +/** @overload +@param images input images +@param keylines set of vectors that will store extracted lines for one or more images +@param scale scale factor used in pyramids generation +@param numOctaves number of octaves inside pyramid +@param masks vector of mask matrices to detect only KeyLines of interest from each input image +*/ +void detect( const std::vector& images, std::vector >& keylines, int scale, int numOctaves, +const std::vector& masks = std::vector() ) const; + + +private: +/* compute Gaussian pyramid of input image */ +void computeGaussianPyramid( const Mat& image, int numOctaves, int scale ); + +/* implementation of line detection */ +void detectImpl( const Mat& imageSrc, std::vector& keylines, int numOctaves, int scale, const Mat& mask ) const; +void detectImpl( const Mat& imageSrc, std::vector& keylines, int numOctaves, int scale, LSDOptions opts, const Mat& mask ) const; + +/* matrices for Gaussian pyramids */ +std::vector gaussianPyrs; +}; + +/** @brief furnishes all functionalities for querying a dataset provided by user or internal to +class (that user must, anyway, populate) on the model of @ref features2d_match + + +Once descriptors have been extracted from an image (both they represent lines and points), it +becomes interesting to be able to match a descriptor with another one extracted from a different +image and representing the same line or point, seen from a differente perspective or on a different +scale. In reaching such goal, the main headache is designing an efficient search algorithm to +associate a query descriptor to one extracted from a dataset. In the following, a matching modality +based on *Multi-Index Hashing (MiHashing)* will be described. + +Multi-Index Hashing +------------------- + +The theory described in this section is based on @cite MIH . Given a dataset populated with binary +codes, each code is indexed *m* times into *m* different hash tables, according to *m* substrings it +has been divided into. Thus, given a query code, all the entries close to it at least in one +substring are returned by search as *neighbor candidates*. Returned entries are then checked for +validity by verifying that their full codes are not distant (in Hamming space) more than *r* bits +from query code. In details, each binary code **h** composed of *b* bits is divided into *m* +disjoint substrings \f$\mathbf{h}^{(1)}, ..., \mathbf{h}^{(m)}\f$, each with length +\f$\lfloor b/m \rfloor\f$ or \f$\lceil b/m \rceil\f$ bits. Formally, when two codes **h** and **g** differ +by at the most *r* bits, in at the least one of their *m* substrings they differ by at the most +\f$\lfloor r/m \rfloor\f$ bits. In particular, when \f$||\mathbf{h}-\mathbf{g}||_H \le r\f$ (where \f$||.||_H\f$ +is the Hamming norm), there must exist a substring *k* (with \f$1 \le k \le m\f$) such that + +\f[||\mathbf{h}^{(k)} - \mathbf{g}^{(k)}||_H \le \left\lfloor \frac{r}{m} \right\rfloor .\f] + +That means that if Hamming distance between each of the *m* substring is strictly greater than +\f$\lfloor r/m \rfloor\f$, then \f$||\mathbf{h}-\mathbf{g}||_H\f$ must be larger that *r* and that is a +contradiction. If the codes in dataset are divided into *m* substrings, then *m* tables will be +built. Given a query **q** with substrings \f$\{\mathbf{q}^{(i)}\}^m_{i=1}\f$, *i*-th hash table is +searched for entries distant at the most \f$\lfloor r/m \rfloor\f$ from \f$\mathbf{q}^{(i)}\f$ and a set of +candidates \f$\mathcal{N}_i(\mathbf{q})\f$ is obtained. The union of sets +\f$\mathcal{N}(\mathbf{q}) = \bigcup_i \mathcal{N}_i(\mathbf{q})\f$ is a superset of the *r*-neighbors +of **q**. Then, last step of algorithm is computing the Hamming distance between **q** and each +element in \f$\mathcal{N}(\mathbf{q})\f$, deleting the codes that are distant more that *r* from **q**. +*/ +class CV_EXPORTS BinaryDescriptorMatcher : public Algorithm +{ + +public: +/** @brief For every input query descriptor, retrieve the best matching one from a dataset provided from user +or from the one internal to class + +@param queryDescriptors query descriptors +@param trainDescriptors dataset of descriptors furnished by user +@param matches vector to host retrieved matches +@param mask mask to select which input descriptors must be matched to one in dataset + */ +void match( const Mat& queryDescriptors, const Mat& trainDescriptors, std::vector& matches, const Mat& mask = Mat() ) const; + +/** @overload +@param queryDescriptors query descriptors +@param matches vector to host retrieved matches +@param masks vector of masks to select which input descriptors must be matched to one in dataset +(the *i*-th mask in vector indicates whether each input query can be matched with descriptors in +dataset relative to *i*-th image) +*/ +void match( const Mat& queryDescriptors, std::vector& matches, const std::vector& masks = std::vector() ); + +/** @brief For every input query descriptor, retrieve the best *k* matching ones from a dataset provided from +user or from the one internal to class + +@param queryDescriptors query descriptors +@param trainDescriptors dataset of descriptors furnished by user +@param matches vector to host retrieved matches +@param k number of the closest descriptors to be returned for every input query +@param mask mask to select which input descriptors must be matched to ones in dataset +@param compactResult flag to obtain a compact result (if true, a vector that doesn't contain any +matches for a given query is not inserted in final result) + */ +void knnMatch( const Mat& queryDescriptors, const Mat& trainDescriptors, std::vector >& matches, int k, const Mat& mask = Mat(), +bool compactResult = false ) const; + +/** @overload +@param queryDescriptors query descriptors +@param matches vector to host retrieved matches +@param k number of the closest descriptors to be returned for every input query +@param masks vector of masks to select which input descriptors must be matched to ones in dataset +(the *i*-th mask in vector indicates whether each input query can be matched with descriptors in +dataset relative to *i*-th image) +@param compactResult flag to obtain a compact result (if true, a vector that doesn't contain any +matches for a given query is not inserted in final result) +*/ +void knnMatch( const Mat& queryDescriptors, std::vector >& matches, int k, const std::vector& masks = std::vector(), +bool compactResult = false ); + +/** @brief For every input query descriptor, retrieve, from a dataset provided from user or from the one +internal to class, all the descriptors that are not further than *maxDist* from input query + +@param queryDescriptors query descriptors +@param trainDescriptors dataset of descriptors furnished by user +@param matches vector to host retrieved matches +@param maxDistance search radius +@param mask mask to select which input descriptors must be matched to ones in dataset +@param compactResult flag to obtain a compact result (if true, a vector that doesn't contain any +matches for a given query is not inserted in final result) + */ +void radiusMatch( const Mat& queryDescriptors, const Mat& trainDescriptors, std::vector >& matches, float maxDistance, +const Mat& mask = Mat(), bool compactResult = false ) const; + +/** @overload +@param queryDescriptors query descriptors +@param matches vector to host retrieved matches +@param maxDistance search radius +@param masks vector of masks to select which input descriptors must be matched to ones in dataset +(the *i*-th mask in vector indicates whether each input query can be matched with descriptors in +dataset relative to *i*-th image) +@param compactResult flag to obtain a compact result (if true, a vector that doesn't contain any +matches for a given query is not inserted in final result) +*/ +void radiusMatch( const Mat& queryDescriptors, std::vector >& matches, float maxDistance, const std::vector& masks = +std::vector(), +bool compactResult = false ); + +/** @brief Store locally new descriptors to be inserted in dataset, without updating dataset. + +@param descriptors matrices containing descriptors to be inserted into dataset + +@note Each matrix *i* in **descriptors** should contain descriptors relative to lines extracted from +*i*-th image. + */ +void add( const std::vector& descriptors ); + +/** @brief Update dataset by inserting into it all descriptors that were stored locally by *add* function. + +@note Every time this function is invoked, current dataset is deleted and locally stored descriptors +are inserted into dataset. The locally stored copy of just inserted descriptors is then removed. + */ +void train(); + +/** @brief Create a BinaryDescriptorMatcher object and return a smart pointer to it. + */ +static Ptr createBinaryDescriptorMatcher(); + +/** @brief Clear dataset and internal data + */ +void clear(); + +/** @brief Constructor. + +The BinaryDescriptorMatcher constructed is able to store and manage 256-bits long entries. + */ +BinaryDescriptorMatcher(); + +/** destructor */ +~BinaryDescriptorMatcher() +{ +} + +private: +class BucketGroup +{ + +public: +/** constructor */ +BucketGroup(); + +/** destructor */ +~BucketGroup(); + +/** insert data into the bucket */ +void insert( int subindex, UINT32 data ); + +/** perform a query to the bucket */ +UINT32* query( int subindex, int *size ); + +/** utility functions */ +void insert_value( std::vector& vec, int index, UINT32 data ); +void push_value( std::vector& vec, UINT32 Data ); + +/** data fields */ +UINT32 empty; +std::vector group; + + +}; + +class SparseHashtable +{ + +private: + +/** Maximum bits per key before folding the table */ +static const int MAX_B; + +/** Bins (each bin is an Array object for duplicates of the same key) */ +BucketGroup *table; + +public: + +/** constructor */ +SparseHashtable(); + +/** destructor */ +~SparseHashtable(); + +/** initializer */ +int init( int _b ); + +/** insert data */ +void insert( UINT64 index, UINT32 data ); + +/** query data */ +UINT32* query( UINT64 index, int* size ); + +/** Bits per index */ +int b; + +/** Number of bins */ +UINT64 size; + +}; + +/** class defining a sequence of bits */ +class bitarray +{ + +public: +/** pointer to bits sequence and sequence's length */ +UINT32 *arr; +UINT32 length; + +/** constructor setting default values */ +bitarray() +{ +arr = NULL; +length = 0; +} + +/** constructor setting sequence's length */ +bitarray( UINT64 _bits ) +{ +init( _bits ); +} + +/** initializer of private fields */ +void init( UINT64 _bits ) +{ +length = (UINT32) ceil( _bits / 32.00 ); +arr = new UINT32[length]; +erase(); +} + +/** destructor */ +~bitarray() +{ +if( arr ) +delete[] arr; +} + +inline void flip( UINT64 index ) +{ +arr[index >> 5] ^= ( (UINT32) 0x01 ) << ( index % 32 ); +} + +inline void set( UINT64 index ) +{ +arr[index >> 5] |= ( (UINT32) 0x01 ) << ( index % 32 ); +} + +inline UINT8 get( UINT64 index ) +{ +return ( arr[index >> 5] & ( ( (UINT32) 0x01 ) << ( index % 32 ) ) ) != 0; +} + +/** reserve menory for an UINT32 */ +inline void erase() +{ +memset( arr, 0, sizeof(UINT32) * length ); +} + +}; + +class Mihasher +{ + +public: +/** Bits per code */ +int B; + +/** B/8 */ +int B_over_8; + +/** Bits per chunk (must be less than 64) */ +int b; + +/** Number of chunks */ +int m; + +/** Number of chunks with b bits (have 1 bit more than others) */ +int mplus; + +/** Maximum hamming search radius (we use B/2 by default) */ +int D; + +/** Maximum hamming search radius per substring */ +int d; + +/** Maximum results to return */ +int K; + +/** Number of codes */ +UINT64 N; + +/** Table of original full-length codes */ +cv::Mat codes; + +/** Counter for eliminating duplicate results (it is not thread safe) */ +bitarray *counter; + +/** Array of m hashtables */ +SparseHashtable *H; + +/** Volume of a b-bit Hamming ball with radius s (for s = 0 to d) */ +UINT32 *xornum; + +/** Used within generation of binary codes at a certain Hamming distance */ +int power[100]; + +/** constructor */ +Mihasher(); + +/** desctructor */ +~Mihasher(); + +/** constructor 2 */ +Mihasher( int B, int m ); + +/** K setter */ +void setK( int K ); + +/** populate tables */ +void populate( cv::Mat & codes, UINT32 N, int dim1codes ); + +/** execute a batch query */ +void batchquery( UINT32 * results, UINT32 *numres/*, qstat *stats*/, const cv::Mat & q, UINT32 numq, int dim1queries ); + +private: + +/** execute a single query */ +void query( UINT32 * results, UINT32* numres/*, qstat *stats*/, UINT8 *q, UINT64 * chunks, UINT32 * res ); +}; + +/** retrieve Hamming distances */ +void checkKDistances( UINT32 * numres, int k, std::vector& k_distances, int row, int string_length ) const; + +/** matrix to store new descriptors */ +Mat descriptorsMat; + +/** map storing where each bunch of descriptors benins in DS */ +std::map indexesMap; + +/** internal MiHaser representing dataset */ +Mihasher* dataset; + +/** index from which next added descriptors' bunch must begin */ +int nextAddedIndex; + +/** number of images whose descriptors are stored in DS */ +int numImages; + +/** number of descriptors in dataset */ +int descrInDS; + +}; + +/* -------------------------------------------------------------------------------------------- + UTILITY FUNCTIONS + -------------------------------------------------------------------------------------------- */ + +/** struct for drawing options */ +struct CV_EXPORTS DrawLinesMatchesFlags +{ +enum +{ +DEFAULT = 0, //!< Output image matrix will be created (Mat::create), + //!< i.e. existing memory of output image may be reused. + //!< Two source images, matches, and single keylines + //!< will be drawn. +DRAW_OVER_OUTIMG = 1,//!< Output image matrix will not be +//!< created (using Mat::create). Matches will be drawn +//!< on existing content of output image. +NOT_DRAW_SINGLE_LINES = 2//!< Single keylines will not be drawn. +}; +}; + +/** @brief Draws the found matches of keylines from two images. + +@param img1 first image +@param keylines1 keylines extracted from first image +@param img2 second image +@param keylines2 keylines extracted from second image +@param matches1to2 vector of matches +@param outImg output matrix to draw on +@param matchColor drawing color for matches (chosen randomly in case of default value) +@param singleLineColor drawing color for keylines (chosen randomly in case of default value) +@param matchesMask mask to indicate which matches must be drawn +@param flags drawing flags, see DrawLinesMatchesFlags + +@note If both *matchColor* and *singleLineColor* are set to their default values, function draws +matched lines and line connecting them with same color + */ +CV_EXPORTS void drawLineMatches( const Mat& img1, const std::vector& keylines1, const Mat& img2, const std::vector& keylines2, + const std::vector& matches1to2, Mat& outImg, const Scalar& matchColor = Scalar::all( -1 ), + const Scalar& singleLineColor = Scalar::all( -1 ), const std::vector& matchesMask = std::vector(), + int flags = DrawLinesMatchesFlags::DEFAULT ); + +/** @brief Draws keylines. + +@param image input image +@param keylines keylines to be drawn +@param outImage output image to draw on +@param color color of lines to be drawn (if set to defaul value, color is chosen randomly) +@param flags drawing flags + */ +CV_EXPORTS void drawKeylines( const Mat& image, const std::vector& keylines, Mat& outImage, const Scalar& color = Scalar::all( -1 ), + int flags = DrawLinesMatchesFlags::DEFAULT ); + +//! @} + +} +} + +#endif diff --git a/3rdparty/line_descriptor/include/line_descriptor_custom.hpp b/3rdparty/line_descriptor/include/line_descriptor_custom.hpp new file mode 100644 index 0000000..0f5a77a --- /dev/null +++ b/3rdparty/line_descriptor/include/line_descriptor_custom.hpp @@ -0,0 +1,119 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// + // + // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + // + // By downloading, copying, installing or using the software you agree to this license. + // If you do not agree to this license, do not download, install, + // copy or use the software. + // + // + // License Agreement + // For Open Source Computer Vision Library + // + // Copyright (C) 2013, OpenCV Foundation, all rights reserved. + // Third party copyrights are property of their respective owners. + // + // Redistribution and use in source and binary forms, with or without modification, + // are permitted provided that the following conditions are met: + // + // * Redistribution's of source code must retain the above copyright notice, + // this list of conditions and the following disclaimer. + // + // * Redistribution's in binary form must reproduce the above copyright notice, + // this list of conditions and the following disclaimer in the documentation + // and/or other materials provided with the distribution. + // + // * The name of the copyright holders may not be used to endorse or promote products + // derived from this software without specific prior written permission. + // + // This software is provided by the copyright holders and contributors "as is" and + // any express or implied warranties, including, but not limited to, the implied + // warranties of merchantability and fitness for a particular purpose are disclaimed. + // In no event shall the Intel Corporation or contributors be liable for any direct, + // indirect, incidental, special, exemplary, or consequential damages + // (including, but not limited to, procurement of substitute goods or services; + // loss of use, data, or profits; or business interruption) however caused + // and on any theory of liability, whether in contract, strict liability, + // or tort (including negligence or otherwise) arising in any way out of + // the use of this software, even if advised of the possibility of such damage. + // + //M*/ + +#ifndef __OPENCV_LINE_DESCRIPTOR_HPP__ +#define __OPENCV_LINE_DESCRIPTOR_HPP__ + +#include "line_descriptor/descriptor_custom.hpp" + +/** @defgroup line_descriptor Binary descriptors for lines extracted from an image + +Introduction +------------ + +One of the most challenging activities in computer vision is the extraction of useful information +from a given image. Such information, usually comes in the form of points that preserve some kind of +property (for instance, they are scale-invariant) and are actually representative of input image. + +The goal of this module is seeking a new kind of representative information inside an image and +providing the functionalities for its extraction and representation. In particular, differently from +previous methods for detection of relevant elements inside an image, lines are extracted in place of +points; a new class is defined ad hoc to summarize a line's properties, for reuse and plotting +purposes. + +Computation of binary descriptors +--------------------------------- + +To obtatin a binary descriptor representing a certain line detected from a certain octave of an +image, we first compute a non-binary descriptor as described in @cite LBD . Such algorithm works on +lines extracted using EDLine detector, as explained in @cite EDL . Given a line, we consider a +rectangular region centered at it and called *line support region (LSR)*. Such region is divided +into a set of bands \f$\{B_1, B_2, ..., B_m\}\f$, whose length equals the one of line. + +If we indicate with \f$\bf{d}_L\f$ the direction of line, the orthogonal and clockwise direction to line +\f$\bf{d}_{\perp}\f$ can be determined; these two directions, are used to construct a reference frame +centered in the middle point of line. The gradients of pixels \f$\bf{g'}\f$ inside LSR can be projected +to the newly determined frame, obtaining their local equivalent +\f$\bf{g'} = (\bf{g}^T \cdot \bf{d}_{\perp}, \bf{g}^T \cdot \bf{d}_L)^T \triangleq (\bf{g'}_{d_{\perp}}, \bf{g'}_{d_L})^T\f$. + +Later on, a Gaussian function is applied to all LSR's pixels along \f$\bf{d}_\perp\f$ direction; first, +we assign a global weighting coefficient \f$f_g(i) = (1/\sqrt{2\pi}\sigma_g)e^{-d^2_i/2\sigma^2_g}\f$ to +*i*-th row in LSR, where \f$d_i\f$ is the distance of *i*-th row from the center row in LSR, +\f$\sigma_g = 0.5(m \cdot w - 1)\f$ and \f$w\f$ is the width of bands (the same for every band). Secondly, +considering a band \f$B_j\f$ and its neighbor bands \f$B_{j-1}, B_{j+1}\f$, we assign a local weighting +\f$F_l(k) = (1/\sqrt{2\pi}\sigma_l)e^{-d'^2_k/2\sigma_l^2}\f$, where \f$d'_k\f$ is the distance of *k*-th +row from the center row in \f$B_j\f$ and \f$\sigma_l = w\f$. Using the global and local weights, we obtain, +at the same time, the reduction of role played by gradients far from line and of boundary effect, +respectively. + +Each band \f$B_j\f$ in LSR has an associated *band descriptor(BD)* which is computed considering +previous and next band (top and bottom bands are ignored when computing descriptor for first and +last band). Once each band has been assignen its BD, the LBD descriptor of line is simply given by + +\f[LBD = (BD_1^T, BD_2^T, ... , BD^T_m)^T.\f] + +To compute a band descriptor \f$B_j\f$, each *k*-th row in it is considered and the gradients in such +row are accumulated: + +\f[\begin{matrix} \bf{V1}^k_j = \lambda \sum\limits_{\bf{g}'_{d_\perp}>0}\bf{g}'_{d_\perp}, & \bf{V2}^k_j = \lambda \sum\limits_{\bf{g}'_{d_\perp}<0} -\bf{g}'_{d_\perp}, \\ \bf{V3}^k_j = \lambda \sum\limits_{\bf{g}'_{d_L}>0}\bf{g}'_{d_L}, & \bf{V4}^k_j = \lambda \sum\limits_{\bf{g}'_{d_L}<0} -\bf{g}'_{d_L}\end{matrix}.\f] + +with \f$\lambda = f_g(k)f_l(k)\f$. + +By stacking previous results, we obtain the *band description matrix (BDM)* + +\f[BDM_j = \left(\begin{matrix} \bf{V1}_j^1 & \bf{V1}_j^2 & \ldots & \bf{V1}_j^n \\ \bf{V2}_j^1 & \bf{V2}_j^2 & \ldots & \bf{V2}_j^n \\ \bf{V3}_j^1 & \bf{V3}_j^2 & \ldots & \bf{V3}_j^n \\ \bf{V4}_j^1 & \bf{V4}_j^2 & \ldots & \bf{V4}_j^n \end{matrix} \right) \in \mathbb{R}^{4\times n},\f] + +with \f$n\f$ the number of rows in band \f$B_j\f$: + +\f[n = \begin{cases} 2w, & j = 1||m; \\ 3w, & \mbox{else}. \end{cases}\f] + +Each \f$BD_j\f$ can be obtained using the standard deviation vector \f$S_j\f$ and mean vector \f$M_j\f$ of +\f$BDM_J\f$. Thus, finally: + +\f[LBD = (M_1^T, S_1^T, M_2^T, S_2^T, \ldots, M_m^T, S_m^T)^T \in \mathbb{R}^{8m}\f] + +Once the LBD has been obtained, it must be converted into a binary form. For such purpose, we +consider 32 possible pairs of BD inside it; each couple of BD is compared bit by bit and comparison +generates an 8 bit string. Concatenating 32 comparison strings, we get the 256-bit final binary +representation of a single LBD. +*/ + +#endif diff --git a/3rdparty/line_descriptor/lib/liblinedesc.so b/3rdparty/line_descriptor/lib/liblinedesc.so new file mode 100755 index 0000000000000000000000000000000000000000..5c3ed0c5844e6b79a13f2b37fef14a9e01445c50 GIT binary patch literal 209880 zcmeFa3wRS{_CG%91p-!55ws#o(XiVhLIV^UuL(_H0zoSm!E0MWp~cd6+Y|~SRMH}2 z40gROUUsdn?y~9%>v};@Nr6((RV&_5SE+bQK&0ZW;HAIMdEa;1w`rmU*YETEpXWb3 z!_4{2ne*Pxd(L^!Or|TG?ul^*L%brkc;zC+fb-USIbaib%7_jY#inE_z48ALWiaDS z6g<%mf4D!6!TM2AQW;HU^un$_!mq23oW*ec*n9#bl|AC9-$&eZU&~|szLv-Meflwj z!-{@1axqE*j`seJyJ;q9gFJEr|`<(ZrBTbea!{~IT=1~lUOV(h8d zscYj%z>L+3Qf*U=;IJtf$x33c)HJ0)>0{_?JJC4SXi)k$#3k1!B&;k*O{+Fm*DHOr z>aC7lK_%tnB*knvD^IC28WPgfI9qDmZe?zA#>c5~6MVtPAuYMTQlkV7itRQ-LfKiT z4Xds`^Rl?Lz9c1Oov~MmEg{EtTA!jnCK*tnK66oib#l4!jyyy4{uh%prF!CaB{{)h zP^weiO8lJo8wZ>Ok-ZaYZjSG3h)Y$BsY;EiC_@d&0}QDOS`w!u8%^SG z`o6Ujb{bY**JpM8Wp^YP71nn)L)=4k>VP<-;y}}>FRHN1+foqA1h4|fmDp>r*J59V z{U+?_F-5~p+s)Yh*w>I>|e~IH0;wT@U62#LoD9)e3@mcJfu|FrU=W%=i`xfj$ z>tJrCKjUSZVIKGbk4eW1We;fNd*x$weU+f=X zr>%wkuP7hlxKEsaB#s~B_zCvU1RcV0zc~LK$1kuS!rqGgF!ryo)AlVtC`WMoNt}~* zK#`cf$Kyy_B96VV_r?w-XWI$l2#r)u#BLHa^r~a)kMonU55PW9!cW2Zse&Ga}O#=8~ZTq`u01V563=2VCUjEQk-YtI2!v{?BlSH$DV~<-mHQ?PaMz3 z@d9yvF^)Fuc0o@NM+c5B?31xi!S2SsH!1n%s!z_ae)i#(r<&ioEdJl`820UcBXH)L zP5yqL{i}8A(lN`2*?#}xnm=y7Z0XAM|E_rZQ}a!C+TMI9e_-;>1qc5Ag1w~g!bkmg ze^T_{H=ez>U&SYbu7BpLw2O{z*fsLix8Lwg{@(NMoSeLe_d2igOdGW^@MXU(HKv=* z|GoC>bJ{Puz~uP%{m$Rt8+%^9>wE2nqZ%tnkc=^xZZmAiu2Dq^1zAy zPu6H#s;)1-s%Xll6`x=A?Cb|-y*KUE3s#T0;^4~n|GhMLbp4Y*D;X*HX57-6LN9+x_D* zTQ@e2TbcRJbLPH_(r%sp#8VFqylqoa(fnRh-%Oja@RdQe3!bT*@Vi&thgW}lbF=%a zGp~5|yT0eId9e8Of9{<;aedj29Vb3~!56(I&id(z%f3lH`qJQwk{2&&?{nGNwF|PH zZ+?Ex#>x$;CneuJbbR8vlg*d3yfVW4()-RuD=zPI?c#qOd0|HLdDoAf?|b;@XV#+| z8qT=qNJ{RsQR`n`R2|3cUk^UU4`yYk0r<7cH@{A1&#*M7Zk`18Sk zKJ&!=N8V`-{p+iF`4zS9)9>^RnetIO^NIdLWAAun+o@A>*Iz&AmXW?! z@BP4)6Rx=8FFU&ijn^b-#c>Ft{sV+51anG{NAKkWW#P;;+tg6(Ob`Y zd-lW|UbQwpxoze(qcdBFJ~UZ-<|FeLcQ0>Dy0h*deLgv_RQuq(%+@*Em!A1luk+We zzpMB8fyX9XHT2*|+0Q+4_a*1p`rhFl>vXm)nee-d9+>*$a~TuvG!3a;u*G`sJqKPt z)Nj1${57F}S8jM_cku)Ae2qMhSNbR$$$?1QkY0+?U!wKXfdHf7|4NCD?@Lyclk}UV zV?$bW{?!O1qRAf=qdt$vMd!c88XX@WL;eI58m*qT@zLcdG30N&I6D9RS<&(A82vCR zMn6wEBf6ZYPmYei6=PfqW3)G8OmsP;`$fm&&W?^xiNQZ2Mti40H_npuLqB~gM!(ID zQEnJQ(6nybOEJo|jf~F!4`X!v>lpku|2{hZIT)8{dXlK56vO`PiD6F)ozdI-cXUFuet0=XxfjNe^Kgvz4vW#QO)>Q9nizJz9fKIHp7+GC z8;8b3=YKfHIJU*GOM7FC@20-d<%eVF^RR^I{MW^3Z$k__@aGuyamSdyhhxl_!I%-~ z(r(+-7~?f1#&}&21K$~Aei=|=YPV9c28@9pjbR7wjL|>wNCrjIlT9cz8a@vF7HvKj zL7$`Hvt#J_^D*>lX%fXDNlL0%tn2xLR5R5lNeR+o8ymWdZI`EVysRI`hr=GxcESiQ zC+B;PU(OH82`oNIQkrQYj}61ewmT7ToTQwlG(QCfY#`Y-b2OI|OhgA_)7Rw_m>)+y zDH($8G;zZ@V>y5F37o%%ACym+|4mX_p5^?R0)Oxv&abrb`eeiI(>6Ji<7Fn!m?QXa zK8v?EkCG7Bwy^&dOX ziDoPJL;g@@ppyLPS&UEb9~C4U7tx=~*j3~wgCGVa^Y+Z0iTVLwL2oAK{mk@?l(l znMk1Baw6xy;8f~|eo8Q(*FQtZDS-k|KU6=!0kE;{2iOzpw`%b`LAMX5K_2lZKh61$ z2!6`%QGM##IiTxj8x)@SbN<0`bQjwyPviV~5&GGDAul&aj8}@_UyUQpmyHqg<kpLVi25f_9 zSL-^?f0>YfJCol}X({FXe1*W@hy5Tqd4J=8Zrd*sdZnDs`4{tp@|uOWD>aS-e-<73 zPtngCL_dpaD*M=Y^;7D__=05HC3Y@Hxq{0{6?i%rs6N$)IdDkm?Hy<@(>Gzq^!fYs zc+tk9f_Y8tZB69-w+jBr!oJnL!-`Wz3O$)B z>~{TA953Pr<$2*pf_R2RTZf)ZIo^P-qU{CjRBl;>K0j^ec&gA_xC6G07XHg7 z`Ull!+slwga;n7xh&tX3<$clKdj8l>*)DMVMWnZ|hhn_05pw2F;BqoVxi1O4B8DG) z75voCIb(RcZWa8#a|Q0?1&$MTV~o(Z>Tfu1=C>)kF^@=2i;y!wZ!r8FYgdHdabdpD zyjv&ivu?-Uhhb;_E@D33X5(^NXY&F@Hz?~>iv3PgQl36b)SMsEgdN!M1LxG|#m5+b zs!wG6b0y|A)n6II`A-+^9gcDdFB{3reI0h0wq58?#&6~L--SM83IA@}%;jVW|9&nU z5b>uz$M4tu$Y|j&vLgJ&OG4j*v$%YEMnu~MSzJ!ty&QND^NqI6B3`N%_DApjR^cyl zM1Si2{AY|8^+RNwGxYadek(nMqD=^D(D8FP-V64Kw*A7MqzXH8rMPVc;u(@}6aF1) z%(gdSSeQOU=+87Ue_KRcO8%I($ta)tt>pnu3$vlDJ6rVUK+brN;2#@9Z*RbjWM_6p z*qM_t-$~y>5%JYU6F6Qd`YneSuiV>9)aM}%d?N4?Fe)td+`bm6dm`sGdQ02CFj4N zvnv$Fu=y+G3o*(N=s6qTzj4L_(Ow7a8R2FB;P}I0{vL_p$M3~;;*X5?>Ms!ZN4!4! zpdYkdFZ@@e|J^)+*R$nHUXd2YttgM8y(GVW9+#7YaiZ-(=GO))g%S4fAu*5gBIZ%9 zh+AyJ4=&_l6ndUb{Sz5yUWB-gwJVpGTPgUT#F6a7h6wvmh6zIYUl-x0F2%S!0R9%i z53|g+r&aPtgSz>nx5Riw#&uVSevXXmJ`sL4GOjCwep0)Xi1?P;LG9At*Sc8LXRD}B zYJ`6b<2uQy7W%_`1Mecl(9hS>IDcf^JHjO7M8qwBcJlgcn8e3jw}*EN`N3;>iGLRL z%n4)HmwI70W(fSU@mzlO&0J1A;!D~d67#WcATL*#59LEPj!@D$9G}bC zl{>K@rG9G>`mjal;oo7e2(ONaKhJ``Q@`aXyj(pV`{M+TD?-m-7xK3c;BsuQalnrG zNn6fPj@OI$AJdaH9E-X98=+^ky@~pe9#)HaQOIHC?ihaa6Eu^_5gq&>7pMFP zd4xyCpRLIpuinB1ebuBIMYf#52o0r|y6OTjpKQSLM$A-g!g zqN1QeS(sl`q7;?PE6w$kE-Wi5E-2?zS#iF1UTOJ4P8H{u%&*L!UywVuD8IPC<3&~G zN9%G1oTe%5=RFzHQ(zg zEGTyp_gDvSrlX*^0KM*-J|@@cbUH@!I-m`|aY2^tm$ll%&2&Vr(8osBL+Ym1!Dq9SEs!NP^5i>RtJhk9N@*3_cH z{0b=c*op%06eldoF$%C`PA|(ZuP9I<#Y=W4yK@@!yc*r59FJ0TlQNb}B^u>n3i|8Y z##DbEbmo5`>UbL1FlkP1nFI^0sotv9qUY@@sx1?ZkZgD}$ zd@tGIx%o5~XO=83g33-SsHj|c?9J$`fQ2e>tjf}HyJSOpt|XmzA5Y&|EE6g{*A;jt zm6ukQ9iNhRk?^bQa9n-Rr4^1}T?b1BvyF@Li!0%hI}OVM7?uT2=K{q(ZJPax+-c6~ zt}C6nc84Q(qCI=YrPI3N0)q{L5jke1=+dOjKU5Z!FL4&-9J_LLoBcx8!sC$Dp=v@# z&I0)OoWh)iIAE^98hftG^%P#0I}h>1F}Juwx+P-{g%!NbtPB?IFh{n4^#FxBM2_>6 zmMxi4%A+C6=-f*yy=9eNJI!(@l|S9v;m=$})4gNM@{7t{Q;Le5aA_I2(-Ek6yp{^D zYx?M1Kx2ywDk|VP$XNn#X5~)JEhvYpbCLra)8XUD1u~9fZlW~7IzvAJ&ba*8d=zUS z`k_ta438^hj5o ziyUQ_&S2r(@rZH#oS5mku4om~quSUNV@r^ZieVOy&&`#>0SeG55~qdV9IW+9auKi7 z@{A$n1&mct1&9yCRWdAkgne99qsDfvnj}$p85o?YSk&@xn)fq&k8{x_R;(OKDpA8K zYE222o%zMNSUi^JQ^XH@Nu$BX0=~ynwuDA+1NLCz&xyoF~CVi*Nl>#6vCuO5B4vGd%&deomsC!M+6t2`mRc9;8 zak;sr67`_h_z1fe(*32v_LPo62*8sY(@Qg1=jp~RtEjYstzA1@?sTt}7m6!U z>xDMVn1RWOnMM=NV#(tCXi-tgeBQ2TH=;MtDiMVILK%ELMlnWahj4V)oXUBSjLA%O zxcpbuVaDarg?AqZltre95*Jvs^4Oqj;hl9v2k<&}6Vs*jLYVHH5R>V{pgTDvEIRU2 z%Nf2vbX~-qKb0aSI(Huq8^@~&C=7e%pUM~ zDB6|FFTv7=j8*=eqD2-85j>Q=&ZYb>rqsMrc0ZHc^C9mn^BC{xTAR_RSw&IQ3b6)t zu4Ota`P;_!oelf#%ZW(&kFCgGM82Np%XH>Ws4@{uma+2-ydtrFN&dplTykeu$pJj( zanIXf85g;J`(NkrkFA8xmFTZ&gJrDK;l^_d2Ucv91CQROv9w~*{m-xLwPPvq|Gtf# ztIuE4tI>OrKjG;tf#+mcY5>V9BwAv*xbBJ}#v(=*q&q*GE79Cmch;Zt_EESyoAVoW zSk!!iD67NDgZ4}-=*%}z*XB=mUdo@;M2#D|wPspYUOMkmj~9l$BKFd&dx@4zq=>uRd<-*(xrkXsbG?O^ z&YOS-Z^u;dc+`4Y!Q4uZx2UvaT0Z_iw%bwfh}(~$=3;by_K9}KGwz})XU-J;nRV1# zrX7pDi7G+&zdaalQTJbZZZAB0R>!kodSXhm7Yo_$5Bs|X1LT=_Y!LNDda$(B2;B8QN!f!${MrKW9U7LMZ6Pn3DQ?w<9b-SWo+mB2s04U9{0^G z)52DfJ@X=#adX(q580(9i}27M)5V!H4|$KC+c+}u2uXOz$MQd2R!teX5syqc{G+C)!N}7~cmUxR67T`$*1{bRfF-f{z5?*kItJ@V(gh+N$ z_6E7m;%HfopH0&UG4vp~Yb|>?vYp-2Wi81%idNASY)nIkQ?dyw?TGH8H8yeC={P?2 zaLJMCWmFITOSL$i2hs`E$jAwX7bLZ+tg}BF;7gQAHW6l>*aM|;T7a3d} zV@E_ZM+zrJ(AI-jr=pBZgxKg2He07e4H(CxL!4D*ujQt^lKm=DB%xubrz zehXsM5EblFqfWzJCRel*?#T>^7(q)Wb=Kv0nQJZuQYCtyMXA!Xc?!b$lF>>*RT;(K z7WV3wWE(_>c07C2NBQSi#He?&h-XthY05YrwN6J)x459g2a(Aoqne3h_Zs|f#doWH zPuDoHtB$aLA$!t>|9A$nvv2+f(k;KG^zPmL>uaoAuFhWgpAMnu?#x_t_c-Q(f2)Fc z&qpn29`tpd(}|@rYGLVK3t_cLtJ!l>@m^k+nnv<;Q%!W6DCcM8MDqM9vwUo8*hrVB zr=5Ek>qb~Mlzj=h%VOb9oGq%4ApsMmGSS6#jshwB7-YOzpw|-v7Exnl_jJRB6ga&N z&R)UJ%`YoMaO)KBts))GpDOX!0eK890_Yw_tsdb%ES6kXNyp1aFgKE?>=vx@2dD6f z5$`O`=?p4mabvGT2Bb4^>QWRf;Nf|5*<;aTedp$vo$b#=B}@Ujj#wrws&eV&Tk3pz zhbmU*(?dq_jb5cV>p4IB-Vj@f{k#l%kLG7(L{Z4m@*7O9$xKUQ zzPt2`_j4_m?(dnlqFec*b5ed~MMY75NzM}FDT~DKI#|m)^mnw*GoiS$oCnoiEP#Nz zzWoT>(pmSpb9G_k%a)WoN*9;(R18}ZcM4)+er}{=nA)C(E_$y_FP&2$hvj(^GXk)r5I!3%NOpr?i5eqbPXizhv$_wj7_Q z=UKYVjVOa*5p|#+ig8QAES6NA=n)TeoJTllYnEq`H@mbL`DkWidNP~4YO4MoV=R-? z**Ai79I_eocAcJq?@3D|qDln7bLSU0%JcC^YCg-~^MU#EB{A`QjK{@PmqQ7|ObhYh(b%SU{_==oZX|eDeUGI-}bEhwuyGW`U z%d=rvSTYoOpJ<~JqdIzBs4M6>R!1(cYg_0i0ue9HMy#`9&*Yds9_Fe?LVh+-=)Rge zzM4cV_o55=*_!A!B9`~T!Zxb!`PutWV%Ph6XlWvJf1J}%OwT6hsg zx(OFkrAMN8>PkF2;g8iK?Ge^vSV_h-k7O3fJdVdvl&o8e9$Kep@m3F4kd)n}=R(J5 zO?gI(zarLyZlZS0U@uwqlv#fH?6=Nvch>IzD5cH{{M9Y|g*6uM?_blRUnn4YAd@*4 z4vMBF3^INxD|;XWC>KyEufU1?70j1d+HxPbrm(a=f7+rWj39wtl7`0+W->oQ7$@(g%WLEVnpx z4RI68ugE{~0z59Kw5WvNLpc}Xl&ePN^5^Z{wSF{p6RZZ6aZG&_wG;Gmj$yP%yQe$! zx74zKW~B90FzohkA-yvz`5Q=&`snI6DCg&XWM%2n(V{nFy=c~jS9b9w6_zirAQzw#4LT0XH-SG=e$v+W!_PUvPR)CQGvI}1Ex93r0ne6F(WgSNp9DK?A*~KM~}?x zI3L?_W*Mtw&z^0c;L5e2xmg^aJsWq99ci(2A*~W;mkXo2@bQl`#j&rc#^Y<SY2p3(6!x>%Cx#E@H4C^I6OGzKe;~&%_+58@vpX73x3clUl zTcMBD#j}4ToBok}+W6O`;@ICp6rTVS7+sAAAECV%mV|rwU386aRBMuF`2YX?f2arW zw*c|)Pb&ZW0jU_uoZl_Mmx1w}Ru>;vx*m|OR}Lz|r`i&fw2tfX%ERKi4Yv#^D#91p z`Y2;@ojy%=#~<&4;uw{S1kLY{m+tSSEEo4viB+$yNBJixF2SGQ_9v)jU*&Q^Yb(-0 z_f`r7{rdXrpy0if3PJyY@h2+F1?^40%}ClE08N|zcU1whEhxSZmm|J!sN)&;@$0iA z@H~O%Mc{P;FO0y00xyfet3|o>5#L+X>B#Rvw}|h?MSh?0h`=MitDcz4XLRJ>Cm1Af z{k!n=S-f=Y`e)=;i+U3NH17iTzyAG4!Y{?H^XuO)j>Prv`9|XUci|&({d<>@xc;5@ zNL>G(ZzR4}s{eyh{p%%M|Ar!6R>k)ONlq7BeD^jI-yq4+|JDJ?*(l);5g@iaNxqC% z#lZ8Va%KLVlKkIOrNq`O;rcfV>CzqvU#v6Z7+ilAqsW@ec#FjUu*4sdaQ&N(bh%Ed zr;N8s{Pedv^=+-hFXL?z|KD_G95+h+Bjfdw92r+6e%Ut#W8mu~IWoUd;@7`XP1V{N z1K%LYk@-_3{uUA-wp0m!N(XUFlkgO&-!ddTS;B1+zC)5TTf+5kX4B;|3Ev~}ua@uw z5?&|a|B>*u5>9`ARp06*`~@AvakbPxGQLjYw@Gp~NVqVOOj@&q*GTgBNcb6&oE8a} z|2}N1g#Sa5(O8D0jo+9BxrTU~w_&SL{O~S>eR@v1I38%ktt8X?5 z7k?6nU79W7|I(RpER*nmN_droza!z*5`K|XZcxGxN&Gt{T$T8nCHxJEe~*MeBjL%C zU80Cp-%=zTU;XXaQYBpf#y?$5lW_Xxs=j4N_%a>DF-yWflyIAb50-FM!XJ_F90?yL z;j<-tvxMhKxc)bY>2jfji$7_^?kbb;)smbl2|p^~)e?S@gs+xxn}pX%`1caNR>FUf z@OlYPlg53WgyYlf9oq&8|5FDFJ1^l=C48rZ)8D1mw`K`1)Il8gNcchtZ;|j42@gql zg@m_CxL3m4Bz%#C?-B7ERdKF_r)=PUmOeAiBQbt4Rl>!mf!T#L3HM8KG9-MBgl9>3 zFA29v_~R0;O85#1&yjGdtG>;aa6KlbYk3mhM`y;dP{L`Isc&Txj=;WStCH~H()dEcca7oX~8_cTkm{>^W?ut&mAW}N)L76~69;UNjv|8_fFZk6zm#NQ_2r%Jf;uTH(y z|5gTFHcB}D6hOz8EaCTbpo)?r;rP=W9b2k|f7^k=ZcF%53D1!5ItkB`aC{oPW3x&4 z+71+PzJ%iw;2m3zgrC`gD#~mL$0xu$wmb<>?LeXb5>9__Uf;?jT>Pm8cBx9j#h;L1 zc(sHN)rH`=TEd4&^6Ml#QNq_sI3H0itX{&;mgKLK@Vg~^gM@F9@Qo5aP%1Yl;crR& zJ0;vK;ms2MI|<(-;lG#g770(2@Q{T6UBX)>e7Jc&db-E8%GpK2pLnBz%;FXG!>75^j_53<*~ye7l6_NcgQ1K3l>q5}qgF zH%WM*gyT;NbZli3KBfajo=3uWOL(<}Z<6rU5m)o=!q-apI0>(p@bMD9PQq78 z_y!5r|HcJf-YDT8Nc=$ww@Ua<2|rK5n)A3BOOmt0Y{N@M;O)DdDRnT%
@HZtng%W zPr{QWyq}~eDH1+k;!l$kZ_xXZ;)_R!WT$*j)YsK`p=f| z>m>d>2``rLLJ2RG@G=Q6lkh4D|AT~AOL)12ua@vi30Hle8&!Yez#(TTYHiS)7&;xR zY}L2jxJ_chX?x~bU}+y_#{Z@vHk=V%NKE0Dc5K525luZ1Zf10UqT`4TGP*C(@kDQ6 zbR5wMMAtL==$W7siLPVxVWN8xUCrqIM3Y*D%NV_nXd}^ijDDBsK1Am*dN?qE9Az1EW_GO-t}_J)@TrJ&@=+Mwb(P3enY!UO+T0wZmnMo8rM5hs*$LM#7PA58t(YuKrPPC2DFA{wY(HV^1O7sY#QyIO9 zXj)Q*lNtR4(X^BbD~x`GXj(#rTYq5VPjm*+EsVa0XbaKJjJ}QN(L@IseKXNxh~B{H zl|<80BwWwvr9@{EUB~EhqG{<7u4eQCqG`zyE@Si@qG?GH&SUhIMAMQXoWtm8MAK3s zY-99fqABDLXE1sK(H9V%%INcmzL4l-Mvo@?BBB*W4=0*J`f%&_Z2XC~5#7S*K}6e$ zZf10Uq9+g?WOQGmvx(lo=s2PXp`siTL6e@@77=4)Ni9}a3dOy*Vh%RIFKB85k z^BDau(JrEM7`>b5$wb>2{UXs*h|Xa2R-!4y45u=B6VX$NPGNLXvRn5jOrr=Mmk)=s`s16Wz?{{zOxV5e_oCFVP;NH!wPm z=($AKGy3T1ped9H*D?Aq(G)6#s~NqYXbJ_wWsKfOG&!Jf9;4qSx`^l;M(-wi0ns)_ zzex0TL}xI1E79bF!>NqkMD#+UlNtR4(IrGHjDCb@a?Rn^Ha7l5lS>Y_F!~;%$rXp2 z8GRekHKP|0y_o1SM$aL-is(E> zUr98DT;Uu>Pa}FM(Kbd;Ci;4!GZ;OA=w(EwGWtBCZy-9E(W8l8PPD@4;Y6<>y7fCY z{zO+3-NNWWMBhksGo$+xy^`o4qx%x=BYFd)K(SD*c7`>I~TZm3&^d_PML?<))38L2! ztuXo#qHiU-^;;R}y^((e;d8O7xvX z*D<=B=(~unX7mE0|3q{dqvsI4mgqc2UrF@cMCUMi8qxO4{E4n7x`ok$h<~qyW zAq80F=R{qL|5hwh{gsL{aJAXs@--Q#dJg}LGmTE|FH9(}mugz?FsC>T`<>cdRHVz= zWLh~FJp!%Cy6PJH)%I)b*V=Qpss0(}RDC>M=igNgUhg1Pdt23Jn2n)#VKs@x=vS#a z%WF_)Q_b}x$BG-hGc7^K`3q7__XRbFdBm19+|6&ijNRxqr?R^pT4AcH?LfDTaALXcG+jl zba{d_Txn`UOI$;Eh(mLmlT5XPAwzo|6<*UHXYRlSy_}YBNuu4~8$-6{6BOa{&oo*N zPW2Zg9~fST#(8@=w9&Y(`nD$c8V$Y!2A4L|XbqXxJOl39_e?cAaCBSuRqmeZpQ^ai zUsSbbC_)^X$Mj$$E~|kH6xG^hTJX`2wpX6uw0!Ke z><*s^+8r=ALc9|G5@pxXIB1IpZ<|*q>3>siVo;kVCDXBKl93MEz*|EnO5l?3wZ%r? zk<`V*rmNcbTS!Mz}FG^~s@o52IKu-V(fuS%!_ouuYQhzc?I1=jrkqznrzJ zF+6K`<5$C9vCnEeFnqWDay2k11I9pm6KbV>)A-f3Td6207dn4ZVePlLUZu@S0#O#2 z*xTgV(KHEK-#(#f(g+l$G_=JxP0FA%ZBjzhBw89$d$PpQCXT8&=7{5Lam?e#o2u!E z4YyX&Q5)a*{YjXH_&>=U+Wtj*d*k=XN#@4uN3=A4bw%PfI4OtbG1`5f8=R)~pM)Od z9PEw}!Nw!Af{yf08owVN)UpROet+%O(2Zcwu1i)ok#xKfI;8RY5xAJ*3&yEyRco{9 zrupbRC^7|eN@(}N_I8JVI>vpQs`=IeY9d^5*6yyFZ})9AY>6Yv5_DR2v0i8TV>dL| zFQm26)0(+ zN>1&^cJC=}?NF%g5Gnpa%!kGlLM!+CuS<4oe25%(n3zu!tWfRHaYXa4xM&piDQm9is|6A z5nUTx4$C&P&=PDSrBa|lWR0!P5i7n>{Wp}UzQ!umddPbc^?RwTLTF^RQS}880Awd)hbM0Lo3~g#+ho~K+T-~kA))zD)nwc!)E*F&KnVY5_i zzvbJh{;_6vU__d#HL2RRP$tv%kV_kEb|DNgPSuLd$x~~;^(MKs??Ur&m&-rc?01+G zn%LX2P1y-#df3o*=_|GNp!a$;;5ZlFyV10Y)(vXYHuB!8wl}n#1Zg{_`V-7L`0k(=mY^y05p0qxFyPFo+E}xy9dMz7uutgC@CTT< zP$KT%Hj~aBxV?jRYdg>aH?@hxh5n1M$&Gdx4?M#Cb6^Cmbrj2cmSFftNOHsP-{eAl zE$_jOv3O)E)jlxzAn}DWK}hq%;qPrX{k65?V(m+2MqY1G{g)bjA%kOR@GGZjV{ob` z_!SPmU_6{cz7~V~o?b3PaB{$LV-g&#!sn?a7^%-#2KYW5qWW4|)cD>Q ztnhnifKBxVi9z!!<-=sG=tuLEu=JrAy2uJ9&?p*O_2PXSqyZ`bOXYDRi z&1+B>t+^pI#8kTjr>?-NdQ=163-R-8oS_RH=0abK32U)9^}9X3mU#3jWlPim?;m2w za%-(l?F)Dh=9%X}l07i7-JyLoRXYGlZjz+!0B4m7-OE#f(fBawIesqCZrx!wP26Gk z{REAgxFda=)`T0b4rKqJ-Djo&K@DKXQG5*%Epa%j#+lZZzQYx$sja7TuX!0&sLbWr zL4&^*7z%XTt=Y+@H76#bUhu?Jha4=>i8I5ANO=vt*$MD!T7`W@wQur~INE$|22)K6 z-Pz=O2s90ouMHM-H|Hb$A|K;xi#OGL$}!3&kOlL#C75bNT)jYznCqY9p zuwJH`TR27vjPX*Qr8w4czSn%UspdxFS!P}qx5!2DdRgO38jt*x zI23ADhaPHdOOJ!MOD60LVAi(sF#5O}LJSS(*Ra0yBH(DSYqzjw2J8lO{_-cF_Gu&b(nv<#-99JF$4NpJ_2#Sh?d+oT^5Y9_mU!DRawTdRL@t9?w_*ZxbtwvSQU z$PoHxwb{qy{D9MM4XyZpmtiOV-)(3{r0ce}SH55mOg1!pn1Ge?c}R|oc+#N2R9JSW zzpQ#tY^6apyrjm%dhxiy^mM|>qZ1HMY&Dpk4m#qKZc?p>E5lCflmVsywTqNJd8%jE z_s$5Sn1O{%E@84Kn?H>PrlUib<%L*6hITc0bd47<9Xp4Y;?r<=gPd z;3mO^mT#GDvx|V;ruY$LHfb5!TY)JEAGg7Dxh(HHtcOjt^s5x7KO3nSuQ}PF1?{Hw zJK`}n)wTYFRR)+F4E}8EGV|a?XZipeAl|LwEDbR>=Bv?e4VrEp15awlB4R2<13L`3 zWiW1;UD3yGeap0FIUdkV)xNO%mk+3IuUZHzH@J59@&eWcFtWQGeylcv%k9?JZpgtx zLwof=U*geTwZ6FgV$=G!$duY(C>@5awCI3o_Yb&T_20)_hBjcc!>X)!g`}fs>rX3^ z=w^g=3Dy@^JVeujY{6W040k@JTCx@DjoWEPOOZMsH!|zEQ}w)P_ut~L2a7LkuwI&A zs(k`Y3_pbqX8Oe3jLXx2c={%PoD~krv}!yqVu?0ou-&&E$(vZ@=P@=fn(sV1OfH6>AA(B0_1XUUd@Yv_!uP?YLm45Dd;-YjSA`X6DF;?}NR`9uTN9=u5=l5YzhW2UCg{8L*oQ zP>R~{SzO3~`&G~NSjXc|8f0v6l*D7_Jn^CTKcLXbiyW4A2u{yN%Y1S>$m)Vp(N`c+ zh#;TnAQ9xZs@ZWTG$cnw!_#n01*G7}d?_)Dg^%v1f#eYjT`JU1%Jh>eb^^()!w}k# zC1)7U>%=+k%%D>oS<(T=RQQaz&@&-wDeQ>lpxZwez7v&3bPTnk=fXUW^d}*t{m`u) zc57cSD@pP0B^WQ&lZ{EzXmH2R#Z7_YS|rQkrv|*M65Q4wE1OjR1?JEx#0Nz-hLYIX zy{wv82_T7lK3`;h^)Xl=@;wX>P3S6GBc8<9h<)kalu#;F6P0F-1&gCgH+6JM9_q}x zX*Qj}9-OKA?=V;6+!Czox?Y5g6$hy_ETf*|eMduLZNjSew)d$b5tF|1((V%;zQ@;M zAjMQ)+pqT7N^7XCSUd&8vsr>2Rfn6w>Mm4DBGJaTF=q~Z8VaJxp#x~I9Pd(?t9>8( znDtZQ%@;y?ssKDo&^B?*6Gzs2P1$9Ftl~#s9UZY@a#r&Ljb*6j!}j(~sW^LqSw&iP zHHKbgWBTY?z?}gX53>LPTSo_MY*mgs&`GMlq6!I$!qDLsbXMScW61hn-fI^08m;l} zM%R#$&<;_bJ_-;V~9NYgV_mw#6BB>{6Sl{`7%9cEOmuX__! z>j&P;ab~JHh+8mSlhnWr^8jSHV5kO!)X{zKBoe=iIOE>eHb^Q_Hr_t z)t?EYl!O+~PFYGK*LdJ*5nndyNfC#ZU?!jAe;ht%-aF`FPlB2G9nG2G5D5^|n)BN! zMERWBf#r2)8svbL_gA;{N3PNDL>A<~ECHYXxvSv^tb*bk=`9Xxt7%Ou+&UUwxswL7 zf-U|r!c><~f4Cm11Ju*yH@R4uM#_!yhYc>n8(yQmDZxx#N$#_D7fxKh?FlL#7eG&5 zWm1Y^l9-ghj58FMHj4B{n~%)HtmLoP7-(BA~%Mqh0BoJ8v>b7GWN#Mw-_;(zjqVa3I#4Y3H)@bg&}1nekAX&x zraiRnU4%`i&^0!0s`d&DfCJ-iWNp%r^KN1DI?3$7NE*X~P{~k$nnxKAJ1DHh4^mIm z{0Cwt2L^vabqhb%r99*tw71zKKg#e^{35b!v#_k3l&q#>J%E*RV3N^P6GD~k{*ioX z@6f{b*FUxgt~R$1J%EV+J%{P92Ti_Pz~cy9hsT{e03JlH#^rh0Kj3EMP6JfDLpu_f zG0+ft5*ih#Fj&7a-J;?am;Vx@wqNyppZ+eE8}XL}5{A09V3&0|67Z9@`i>@;CT@kv zvD$r#Ay5&A<*awu1fOGQg2AC7PY~xll_(>wC145b?L$-&s6^af^FC@|*Iq-?!me$z zr#GZ;bq4G|*wI3Yk0%BOu!-n98fRKXZ$va6DNWq`s6lZX_Nsw|XR!b^Vlj$G3Mg~| zC>1%)bQRfe6niDnsYIuOj`vO`I)ms8&x6xA{!GPXd*3aiKBp9r3E=$CTF*4IRuP!i! z9>ZZ6c$DVZ#H~p3XG3-3OlwZ3s8{=dO0N8XTxfPOB~!C-f~P(d z?;=vC6BQ?Ebb>5Qb_Siy#z_{PHYHhn=w9B*GfzaKse_oyGY09-AMc2YobUUm^mVF+W z)b2thwh==T)WL>a(p*gOL)ni-o_GiLuf{`1l=nu{^CIK z;C~-Q!b8Bc(X)Dowx2nG_~&@~($@&H)|3*u;Vm?a9?7nFYbveYr`8^iVTX|PAJ&T4$e$H@9`k|~eBf!f7IJI*sciMNkZ+$s?bA$=ZjeQ~<>-D=m=RcU zs{ZLJooBFQKwml?OXFCkYPdEHZHJZLf@vdkj4f4thYS(p6@C#F4BdzN$j^USbWEXL z=MySZ&1#IQe|oYSxXy@tKtKN!ydCjH$c|eH^=I3(+t<;+M@*mQ_rQRJMN_s7SD{-X zgl@_fky%HqoJ!XbnVn8;qsUC;v-Js+O>voz9{FrzVS1<%sa^ZR8 z>3+l_U)`*~v!Y^>r>cItQ9Z{%Z>As-wg&-;speHQ&*A?&1$=hvf5n3u|G7{I==5YjJct;9j~cmdU|zCKGa)m)7;(+d^Z#J>7fDzYi0KpL&csI^jPw}#qy z<96ztPDRF(Hau9O4!x2(*1wqo+!x3osGIR0w&0M^t#8vMXsYV5A=2Dqx@io_(w}H6 zC>M{28He9y)V4P4GvXaDJo^S`ADQ#5S4hy+l;!-1IvEAysSg(F4*wJ+#Ga+XRqY)I zW=H7VW_nIe54n+;PDsE7nw;*yL*@cOuG&sAf{I{&h)2vWUa| z3HNL7(-cn&oeH^(VTPCa$^s6H|z$WiV~3_89B2jwFlU z8B(=d5~vr6ZzJQvdHfBb&^OdJ|HM>E1=W~?>~`k4Z1iM00fkOXQ#Cw*f)@+z*a0b1Vmsnpjz`0O|#yR4&d7^}6HS)=`*0@njN!YBlTC!ucNQ zyhS<>>F2bthv=Q4>E2cy$9bDL51q%97z5!#9x4S_Xf>NcclZ-8LZRsMRHpa1ltL$T zNfPV|Fw=}-!^O~l*c`)@pvEU}qUauz|Hpd`$n7pP(=5Pal+Zl*8^(YbGxQE7*rrp6 zk+N)!V1Eznp$Xu@`KLH%VrZ2*5*YmR)pY(K-Igfdwv%oXBu^-*tt(7dXvjh_meX}w=6yt>vmf1psMf)+%G{%_AC>1@yhnKHX^NU^jOAps~tI8TH$<&R#I7o zq0ev@?vIS?ygFZ1lEGAaJ6OI`6dP<}?NL@%j?H%-9$e8gTfB~;!nnYg_f@qQ)%YCf zOmlUX;=PBeUuJ)nR#|o}#4`C*?LyL}I^0Pcm57I$T{DLD>=G(H^+k${VOkzDR|9Ko zgZIZE(9I+~4=G{jXAMZZ-)Y7vxnlA#jYo&~ZajLDc$Rp+J=r^h-0!Jw|1IWf@VJqZ zBVMl<<-x;Du+{zzkRX3w;@jEkiv&u@-EmnwvO7 zd$Z|<*6>mybjfhn5I&F`_Gqdkr1XWxgua1J0X*4?iVEd(K=< z#R!#T8j*p{B?&u8bpOj#)x>qIz(~!W zz|@{uFgIy-hx!VW$(5Svdvv8{zo$cWVC1Q=C#c7%{ir?FbQOD3MSnj9aoUZaGrNUQ z#D--RVwS;OVivN`6tmFNqSbEmEP89y2dC372BsAY%Ag3hu*)o7xtnpYcx4SnM2}bccXBLd{n`<)B$6Fw@ycG* zm&YsRIMd^mJv*q#rce%zFX9zysT8j~Op(Kl+o^M;c!l0WSq~F|C}Ob7|H4{a)jkLv zWHSMeMlhx5sbxp(BBGG{NR}S2Ev3S@4_IlmjMNx&QkB5`^n-6b3(zw7UG*M8EfK<4SU3kON8Z!t|n(=l`X zkHvHSJZP$MAiS);f#T`w(Oae$DiKfb4P|YI1nB1myeLUVrJ7nnPkS!KqAYafE;t`2 z&RBo?mXiNTAyfN8$1);)rN#)&cVv{hfsWJz zq>awk@l#494koXZM&Jek(g3E0YLOs-2;G0x<0AC0%pXq<%whIY^2CgjD!iu&*OhP+ z&0Eq#_%H|FIl*fcb)ZZ&AK`y$^hxw?a~Zv2U8TD)VV3DG{zxksQwI;m;OG;q#n8{% zT=Z&3hx;Gn&NBT+JZ3)Ov0i30-TF@i3N!)K&|fi(be0;rn+DX2XoBA3U=av)E&DYxMu!DD8R+~B;RlH)Q`mZpm=U;(5%@-J7W*sT_ zsq$C-T-qKiuZ<7^gg?BZUNozlAcZzOSH%m(1#}0D_BhsNNOjX(BVlUQFpUs%{`N1By)Zri|4}pJgc>v+NzP(Jb?-`V4z3 zh(5y%y99j~KEk4jLm@l6-Kog&#y}Kfs+mutrXAw(20g;3KIk8X{nHPID&D{7tOxF( zTJ2|+yh{|b1#G68KgdzcToA}~QQUMAx;ylB1C_!flnx7kr=C|g623Py78WIRDMW`R zqaGnAs|;5HvPB4j(kTcy7wiX~CvC~2DC`TSbX;EeLb4|R#f=ml+}-iKNXlpIr2d6* z%|U#C*kG{8XWYj;CRW0T4Qy-zCOK9+%()aHtR*MLl;TJHw4*aZh{$Nv{sV@SdXd`W ze~Qf}|MTQjL-(+z;6)IBEy1Ss(11v{>g!DdFJ5dv@&a{O6gKusAt`TWG7UX94hh(WZ|j-(+2g;@ZZUr z2y`_EyE;!K80>+BQ}IHrY0cHbZ>la&lT-Tvrq)MY4R6fe1BNssx3uWKE|wOvoGrkq zB-*G=c<$V3YC}U<>u+HdCR5vZG#QWm@G{nEu&KQo+eYp*t(+1^chUkXa6_3tAq54` zW*G}7^VTOH7{dHcY=2E!kw(EpDl}(y=qgxtm;VO*dg%rutnq9zMyXV_G*itAYO2SE zSAKV(nIeDDMrO^1*Q}8(n{BOv7iU4W|BQFu>-6GD0Y2q#fOLn3q`}_MVN8Zir%}5R zN-!sW0hn0%uo3Mrb{$T95_uah>Vn%g(`{Y6_=jK(nZbhFX8I*Bo=g}8FcDt7y~B$S z!I^Gj%AX^(Yzpn)LU)O&iFuhAT1#_Vx0noB#gJ~1=mQYfJrd=9pRGGTg)7k4orBoG zRoQ$O!-t;;!ncCFxtYT%xxx&$Y5l4AIglD(1vtHOYnb$VB* zMTCdFhXj&`r9lMeGFsrlIaSeH_7KZeykk8Gn-351YUr;tq1Y_2(_2&Uxf22j=Q*q` zrd!h_x9j#l#HNTV{Rh0xqBa~!sLX>y%T>Hr*fFlGw`P;|#w5;w)50R1BG?Qzso`#| z+3>Qe2|v+!E<7n5eH{Tj`n_kTYo|Hx7a*TO$u3;*~GYap_n6X~SG zKT<3l=^=M{hpQXOJz|50Y%tyQG47$?i49feIx@tq?bUhT#x*(hJn1O) z=(+Aan2aLzJbN`?%C&C6CBBp^z*O0EHciGr=4l#TW=pwO7zbO*ZNv1?mvWPs0LJkc zadf7hJC}0B2tD{xE)22yQtrfOsmP{KEu@7vv0?8BjJ`zdq9UD95B{*1HSvr)-|npT zWvtY_AuB6VpVpmW10`};*l?OYrSHdBQ)#DCX>KYF9;~mZu^cvh@D}2LmvNPa3+!RMV9^>~gEZ@+E9_*zo>koRuSBt(*0^)noG0*MGu34D>9oclKTS<*3SG^VqQ*=mOKbiUa1$&o zRZv>L@*RaCUIa?@ET^}do9KysO@ISN4&d$eatyLrHp4xcijqQy`Dq2$;zkDOu_zv!F{7|sG^YweUx~=1?!@9Nn9UmrB47dwT-Wr%? zu)c=ZuRObbP51!|2dzhz<28A_<~ZO5;(=>2)n1R7k3Z?hInov-7k?7U+g41A-&Svk!hkiRJ-3`egx=FuCWG4WvJ zZoy?d`^V~AByL0dHq-oAX3ky3+O1>f)Z-ye9f@Pg*q`Hc5`jLOrOu;iI zgunVDwa)wa4@*y!Z&*S%FnJn#sFp@12q7MdmJ?MgpCUFSzAW!K@Pi)w?9{a8aSCwI zNjX%Y8h#6n(EXKXrcw3awLE=D2M{Y_#|$3)I<*#_FGts)OXwPfouCRiMfODRzR>Sj z711UKe)o@;SSU-5wKJ~ICT@SiT9i+}n!;g*%Rfm^kK-p;&6R zlvZ$QlZ>IQ&xrO>>pGqvY&EUfg(%nUpPFpf{)=CNn`*|jW5i}AkPTQf5a)Qmz(%j7 z$UpKCFAu?poQ~KOZ;W~R;UykZ&4r+Z5qXu~`KT%802vVgFd|bpKph7FMr1e#$cO-d z5$VGLG9s^XBeK<0b22j`SHg(AY1ekx(;MySNF;mBMrXh&S(34})L(Yr_pl^S;EV>; zNK4NlN#X{$B1X`3io=weHR zt}~FxcXXohO0cMDQA)R7P!ow4Fq({E`gN49VsR_Gv{lPit?YU!qDTUS1g{ZqD6K}k zeaDDIyb!?3@BKOF`<rGugsU9$szzm1N9)xT4R?geH;A#aps41J3pkK<(lgD zx$U%1dsW4|;`dKE(+jVcJ9Q%beh&P8o#`Qqtaw4!q6YB$mej|rgR1-uxL0W@q${h| zd=`gQYo3f`?lu%wCVOt##vy>yP-}K^1%`a~u1sz!_tlysVqAD{s$d`Q2Bfc?VSGac zdCZtEMmJ6}QnQ9T-^UDVksj}9!{FH9_c{$^`MKd5ImhH9-gcx&w3jU!J~>QZaAx8e zG)XKbtbPg;OfnpIfz#e(<{%w;QF_c2)RG$e2gPhISG}!a8j&Q}g{PUVxFyl@!AA&g z$NvFY_HCfx+~yW<+Fqe?$EMd5#HL3we7YskziZd{Gg-A? z3D7(T?R3kCb1@2hJFxKPW{%yvHS6e= zFvGm`4pOl7U&-jqOTV3UGZypGFJ;|o-}IvyH|^x;%pLmKSpIwIc36c?7rM(!A1|88 z$w$|xa`X8{)@}0HKkGL649&PTy*b*2{wn=tCNI{);T9LQVft$R1pP0e`R5;I>-`$A zt@De5Ol>;_nR>1eWOBY&kje93fxHj@jX!<=9%N-Tiib%2-?-EwBVP0_AV$Ey-cQ(@ z=qJhluX{h&fH0cN-WOC*<~FS~y#4&(mlNCw2R{DiN|0oJ3?BO>Q*6}EvxY@?hoXEWT((iB+d~f$SqCS&Y*Kj$j zh=uF2tB6xCx2uQ=RCIT%i20dSM1-k7SOEU>?^V`L??BTDoP8nRoK-}fT}7zOIje|R z^JPVO)yXG<^+U|AAHGDoG50c>XUAu(A09y3*ZN^kvwnEo$o{jnepqYP53gB-W)=|4 zEL|3%u?T{hv`^q}EjDhmgy;d( zPqTn$K?C>Y!2LdKvXbJIuuFX$;C)$eu>tdk#JQ5c1X#7I@#2hmmJ^SIh$nUWl^I4w z_inrg4{a4Qq-)vvejj&sJrRdJan|?P0&RlZHNu?*s(`7K}azwMQX=%ygvsDD*3bngh=T2VXM~PM)vjm~%ZYgadq#UE?SG!e3Rml(3#6s#Pc%B6f98A(O4e|A;TWyB( z4c~xdVk}xXjF*`V6L-K-wP>lyELxP<;+({m_%JI9nid|f06~YTiV^RpchK^bUB`^W zsoxSI**9h*iq*7UR@jD`#H(Lolw3kqCkpT%e{4UtXEOJ~dl#H)s{nW<9WA+>` z0SI)w!A@iR@Y_n8$SkX0)szJN^_Xqfmdg~GWv=<5X6*~NeiP;;HS67C$TVOEM zhw9i;6LB*NR$Fo@SgWyKq>adUa=p$QXfIOt)!oa9H^xa`mf4GpRdhG>VE3V@y1YBq z+|!4Rj67e#%IXyb2^*0P>XET*OLBv3odi4@Jm0e^er&l=f_txE6 zS?Z*E`5IZOh5&&qbt35WP6RW5y&cVn&qHW@&RJOT`9lcUiqF3T9nHwkm_PeW`ME8s zV*!Z}q7b_F?@&RbQP)_;DuwJW<E${e*+D41KIHzGmHJt(ik2|b7wV%v&B7vwWKtL(=5WS zB{k!ld(PA6icrugGUI>ChRA9R_pyk9YsosusWNpEi~YxRo2vX7CjF|^@vQUHTr6tO zpO`u*r51^rn?>SAn%yp%tr@@g5j*35C95%9^4`+M3jpo+*ag75g2ZjpuM2`$SnLR*o_XmFK(XYDSSI?l+6i_) zTGH ztfdwG%?)B{vSg=q^njI%V+o3ti#0N? zL1jr&QMWPFybGOWFN=uXZhY(rPky0!7b-E{ea}no2sS?U(5h(q30k?Vtk~c8_;2Q7-5EXDx-X~)Gi7zX(+Jh-!#jikfxz%J(CKSn2pU-jDyBPNN3!=pi8AwT0J|fJX~tc46}1Osx4TtTqZbz) zBZ2o&Tq8 zjFwU=y62eIGlv#9Ji%2X?+#%yn=EX+7&W6g`eYYVUtw;^ET#(nNxi^^$>;3QyqkOK z>4jU=UF^sBYpEKu(ipUsYV>-3rC=xOWU4Gc4- zWpH-+aYz)Clv%NkvdxSpUykOltxWb+R=m$@NmY={=uBU2u5@ygD#hthLYAa|OENk{ zjI6DjuEFBJ6=p{(6q;pGR^Lh$O`Y*7am~4?GA))`N^c?=bAC$KDP+h!Uh%MzG{a`) zf}NK7FNx>c*f$vkZgAtixw9yqv}#!^A95a*vTG?7t#~(leS)$`y*Ttf)08R_hqhP@ z`Ukf9E`Bnm7B_2-$;`#NCv5dgU&mEVVIy(upX`2vI!x-xM`P3TjDqQE+NyfG6T%+0 z<GW|p+&%_FR+Lri-1p8TST!CQOl^&Fx`$&66Hd5lCA0Ij1}an)M@bP zw~;niagF^TncHVAih7|kHFcL*x!GaTh>$t5ksAG5VL;WXgXUZD)8ApzhV_5P4%2#n z!}_n`+FHU#^PjG)csty3FRU*%{pr~B;`yShXbKLF5Gtcj>|P^d4+*>~E7patZvl^u zu;Zr}&kM+~j=Iu zC7s&R@~<7qP<*0MGfe3}OX8J=xBaZu1)*nA*{n11SOq|-* z#%_$QOY&VEPOD;$P{pc>%$GIF=-k6;tHP~+QEVD@wm%<`@NA|U6gc*{`7?Btiz(#_?eDE`EEE&Uiyx#+bk4r&br}gdFg9$XHKD0!CBsGbb=;)Y2Bb!Z&dow1`AgjN%8yKPz7ENH zz5I{X>sWaWSo3(ReXqmu@(yH_cq24aGJzITJq^Vty>im$z}DC4bZ^m(gz<;6q$>iL z)zC17>GD3|qt(zOw$*=0`K`&hlpxd6&4NrDUj{Tk$27a?pTTY{%4Qcw@R4NSK=%Mw zB|Ue=&AH2|`B=K1JPw0f8s~*}oDcK<_$$Wuq!9YTanur$_irq8PAc==CH{A$zKr9q zU>G367#?I_Y*I+}nyzq|Z&BI2Qoq*^)8l_(_}vdz+wq>Wp3_?rP(_(+`?uU;h7wmp z`dEQMz#N01h~bvAg!eJJ%@l4a&%x*>cev#P$`phtF&G(Mj=PN`jZHUMa;bJN#Id>l ze4aG`c)k7vUr$#J@qzfL{#&+uTz@9_E#5f`wC+C(X=0lAmRt4%aE9 z5`^w_)DMED*amqZHwfk1kS_Ft!18U78*_t@?uPUWFhXSnf3QJjP5+=SGA5$Uscv}euq*Fo8IxFRdT7{e9I|z0 zY@SIclN{>pG$UwIgnp~v#}FjTRf7L@hAKpl7^?UbJsSIASJXH%4QDPx-*r9JfjK<8 z(0-?x?5kkWvJ8NxNJcHp<1Q@}0=kk#wqFuB{WDq|c|6JS(B3C9^GWoycSwzRh@kMZ2ho+ z&#mA0$^5^meh*(axLz*}V2Cn(^^vu<9oHsUa%bEPBjNK338!YUPKH#z1UfiA<=X|j z0|)Py^g#_`16QhUETiJ?KJnB{D7r&Wn3(vyP=ZOF`hBZi{+-Ur^S$HAlM6KGAI*gC zB-3VJ=d@s!`7@H8UKHqD z_fkw6S=p=F{TiT=y!aZ)3xj*r<*&pMb9HI3Wrng@PFeM{*XY;| zovq{|BVBFgX^mOzb4EjhdHp{m7FCM6vL4U*Wo)$u+>U) zInSxs(eV4~)U1(l_wac6*0}6^x4%6U%goP>uI5}Amxd z(i*qf8JE-X(G`$;9z}`q2t@ikje*@-^-d{nxM;Z#y(kb_wI|@5C*{irjm|ll}RX*I%qjEW1x$1x*b! z8@GYVy48rcnMYRq(OFeSDq7){_+KmDu9MxX(-3bd-@x^0m631fU~0BXTas|%Y1gVv zI_mTUO>^A+t;!|&lD>P(+oeI_{&t}GGDTyr+Hf@~;0Y%6nI0 zV0l%jOnFbVh>GjOK%*sEy^xU((KI(w8QIcS|Lx{E5gx600@fOJi?Qy|V3vU?diJBLNJtOa=-{G%nVohD^3P;` zGMPu0opUy0NB2g8B24_z1v4tSXiqu33xfi1-+XWZ%4`AtBZwnapj&DBdC&goIrTG7oH5a_{srfp?Y*ZtLDhf7dqATrxB7nWZ<^|hZ3p$IX9m`P z-_OeZWO9G-rx#|bzx?8J&uN%m|I-U;{tHh25gk!9Bk2@+|Bx-lcRr&SZ~4W*vWo47 zJq8y;)Rk-Q_-N!RD9kr^oTCJJ#2k>$p_fd5{f5k=51xC03jF3uBo+jn6D}+{_q=n@ zKesb7vg9W}y)?S4HY9J#alkW-3aF~St9zx?cS{G8B_%Xau&}Ny&RNdKKYCK#;F_&{-bgXp&Ywu3dqRjn5}$HuhbMR zw>?Vtq=wHnWvIb^&jH1hEGS2TUyh~MP>!1@hj+SZAq@@5tP8UmncTvg- z{EOr+PB}%sf3e&nQ%;HR9x3-hDJSB)0h<63{~~#Y!J~GWTa;ISiKCO<7*2DKjbI2R z;*N|~jBLQL6MJ`&P{W=xw&m{%*B`Y~(wq0pT$&u2E42$=s;@18id7&vE_w&7;WXF(gM)=} z4_JFk&<3Sc&f62#;;(m28#c6IjGU$6@u}i-VzM5PD(;TCd+e%lIETn`oY^~DXjwzQ z=1e2{tL-wYMxXxNgMUG0LHkv+Yo+!(N`z{ZfF1^#g3HA9GtB+(uLsBf|K)N*sYmF+ zOv35COl#A=CXImyJfB<0MesG6wPmZWcX_r~-dD}(m7g(`y=#dYly6UV-Kuy?i0-f1 z)uaB}#Y1HMS>=YaB2alq*kEb-Jxpm8XzSd6&=?H|FBrDfBal3c5tcCGc)MyatZCBCO|xS4#eby!xogtX%>IB`KJhBE z2rHhtyN=e6o{GK&i70wxI_z$SwQoh_ zmCl4-1u!K^-QNQkPu}nG(NndZi4T0g&V;;%<>g=FHO^)Cmqu!%kwlC$HJGWq6jm?> zTkmW;cbX!ZmuzSYyjB0sM2Nr2v@=nVfeL*nw;BJoh9U(i-b}4JO_SNY)NFKPZnNET z#crGJWR8sm&Gs_#ubS;rIKkiCY*9v#nytskJg<0TH2(evHQOL+`!URAH`I1G6c#kw zg&gWpvmFE1`kR|=Z>U<$78z!#ZO2vrvzl!XwH<3 zKhbn&HbPHK38yh=qgd4wwaG4dJKq@8Hd!Kz6=kDm;Ua%6~8V;zK)Rg*m~{Z7ZnWy4;jp*h_5PZDwRyL6AC zLEJ-eRD42@Ki2t2)Hu`;f8{X4Z?-M z9W@9y4BVlEaC?9oJ^&Zx6^#(@cj#NdJ!$oi(MH28j6`Ln5)qu0N(7;&l~6yn9)cj= zIYVS4yUr#ulSF>?MG{d8TQ5-x51V^3Bw~+uT@@Cpi+o6`hx{J2d46_zl+^D?3iV7^ zMtvs1Y)yM3=OO!vzh?EEFSi&Q^Z6Dj)AAN6n&MQ`|KoZCO z1(n^d^cFtz-IU}UV^2v+D`Nx1o6RWEvv+JB70wd4#k=i7*2=%*w2r@y*loOHvhkiJ zyyBgwc*Q$DKi;0%c(?lT#wyqHd@(*cb^XzH1_K$d6bj1E&w=&wPbHkML7Xc+L`MBOL2Tm_>x? zif|NvjDB#Nca(}fhvxO&-;}%SgN*wCxwm|naSxOGK$_JL|Ka(#@1@$l`wh8gZbyS9 zy=D438q%)6?8TQWvu+49FMVFtZ4N={-Az9{tSc`a&${7ldFcsRxAYt7QCTwQ;o zDlb--M6~f_Ub}NwYUF7%;0$k^8J;WjOtRe___jsp9SDm!&>}Q0En=8OXn0!02c(b9 zOg-*k5wBTn{hph9FeUf$HGb8m07k!SglON^ezkH4BAx$zY!6aNl*x+`iF z-|WvUu!wOPM2kg~WDu7Kky_CPd3Z1UgWx%J-je_*AGV-;NUs8xy#y%+KG9>Y&)F?! z^PFq17TGjRrCa!ioMkoQDoCEL4*g#HszvTEyOp7PLjesPzNfn~|IzRR#Uq+_U|K@< zQd&5~C{=aTeZa`M?n((7iI3)E7k*`?Wd$B59EGPpJEmyZbP^gyWMy@qwM)U~1khqpQ5DLKuX%rpI4Gy zRc>Jm8hYYCrlId>91WnM^G#>|2Q;*h$wCKq3%u(CD4?M~|8#JQ zUmcj@?rG@QARTW4(0?Bd9XRoCr=gP>%{~nkX8TWc1kHS!kV1ETE*+(HBLaRYnoR!= zxMO?SYVRzX&$zr6F>wFzvRb;-p9AA+PQ7WNbIIPmV43;e?R~cQ?cVO*H+vrs&gbf| zO(m?avb>{`bIn)3)5XT0j^*cZP=nJd#ZEF*I)D={BfuE3rt&U?% z9g9t3RNl`Sh4|rnpb#JGutKW;M%?|uNzI4mE#HsUL6yDoXl@h2Wc|0MEFds`zHoS`sy$F%f^l?cHBpN)~wNJH_)(jVR6*@_dsaxG9yN22>c1#3- zngFN`fVu#fDZt!SYXabUgOX!Z<|XecJr&T)pGi}uPAx4lWwR<3IPTxW|WSkWyG;p7*VvupF;1Er#uCCWA!p&*OdjYc;z?hlcJ8KM)!+LCSgev6w=z_P%+> z{N#&bSDm5OJS~5DLC8Hd1&7^n*ebT^uhSHGZ7>C{GE-m^Et+4^aTOM@?<(y^I@I)q zpx%70dh6Cw88LN|ZyW3@-U`FP9yZwO)Ezw(&-A7dYd3LrglSKr;~S>0Z!MkAYFep1 zC%5H%r2dLG3?;YbPWe}-{va7Gn-14mluj)52hFJpezTukAF6Lom+^X^usuJvi3sb* zMq&MOwiIWZT+b^dsecwDGzx8F{r7f1s%X@Y`jS%hE3m~YC2GJIHLq%Z)kHXGZryQ4 z9`mbH$OKvE&#gPBKJCw~yeF}7YA+MyTE1^;YVDo2SySt-35G2`XJ+g5XB@`-`2=(7 zpY41)Z8h#k&AhsE1n)JD;MIZ04EL&}H6XLy^!t&4{PXGlFTT$2ehA_7`~OyRJIU|q z=IQGHyGm;`zv`r%p^6ozMIScUf&I_wH`D)Hyf@%O>LT$Y)9)gsrKSsP|NHGP)EDW0 zB|{2D&`D1Jqx=^U3Hz!3hkDfIDnzXxTMyhA5-`;B8$Y(NpNdU1W0jiobVs~82Yx%1 z5muQj4&F*4{ceX;n78K+Z1%cWWWan4~ zVe`Awn?MjqmH^*XlYsfkkKhLA?RLPp(`goFo~OZL<|c8AN7V(?>fg;*H+U>Jk-L|a z42JZ6Ov=5lvsxpt=B|@*bE>?T1C_zH`!S%LWN+^yy}e3Nh2RM7}7Y1ymoiUqxzh>9N zI>kF|P5Tui_Q1|dK?Rqi!Y%zuj&q_V$(I5-8DaF7;*Q~a5_Sc`s2Upjg<7G;Sr}ef zd~A4SXmZES!&c=#*Rg%dumqjaA%Qi2jTbvp9W)|T8pk41QA5fI(azx%#Z0MYu1<1C zjafC^a;t_mjuOu zIx3(*7}Cz7Iyg+x$c`6J-PG~wsV^lq&pibTmCasWQVuEJ| z2gUg~@0e*ujuN743U;pSEZy?w-hw*6cE$z@H?ntsfBlg0*g@m& zEc)gImhD|dCJasiIvNqN=J>k}$L!E>e{&j_Te_9vVS7$e}yc$*_%=S}PnQ+%` zHB{VE+GN8Ywc~q17eL#9LL~HI^^XfB)c^%J8!?=X+J1;#6>P9|! zNQ1sN1^+yiXOMXKQN#;@&-Bh=|0pmg?2nd#D939m?HygdI#yuWH!9 z=qkFq76t_Aj4B;}m&ql3!@&qNy59Oc<1Ik8T&G}KSc_goh)4ZBJig4fVXu# zR?B(pR@zU2MGo`B0Eah*kJr(#9%$UcO%*E*i5edp50NJ`v~M@jh0C0Bn{=Yn-RaEla0{ZqwBaRd1r0y(Nr* zNK6B$i__f-Q4)lRm=tIn<5!EHsZMVhLC(oN_szMX(qqwBC(WcRrE;S*`-brs_%fMNQOTR-L zE+e&G${Y-9Z1`bDlwe0EeTA?r-UG%NgK>5s56wJ7_*cBh*?c+wb_wzzyNQzIPCi_c z9OvI40DaG#qOBrB53%HvvGzAZ=@Fz#D!nVr%M=_xpIo3r@aDu@a&iQBjkZL0arAhy zg9|C5b+JjkRclpTORTGlPv>M9ls@9xd9{6Eg%)G{{zkPnf+)Ij>tfa3sZYDb@BYs9){TGwLh(lQvo zCDzrY4@AkwB+=PAG7FYu!LlqkIt$ihLB|I>ThGZrGc(Y6*?_aMU}F|+%7XK<;QTDO zFbgirf=hj{v$YOrm^e%8nhdTh13m6TRJA(`ZpeZ?S^DK^YJ@C>FNjbB2QbV(hROF1C7c?8Jz{kWx+@m zbh2Pg7Ss(2{m8#D3(oSv&ejqjn}16NH!lNS=R?Gvp9L3Y!Npl{Nfun11(#*P4Oy_; z2RmDvfNcJ+2j0onl3}JzN9&VpEwe7ix|*zOzIDyxv#f^tb+&q$G~WqQ>TK=Hg8f;L z9LF|zzfYST;;K(dEE(@wW`gOMUkd`fVWd^76S>JdzN=+85&4P7VaAp>ql?5BK z;KD38-v{B5KsNtAcraT6l<5P*jr*cA;IV*fTQ~8yptJRLK4LDk5Rk$m0u8$aJ01v^ ztsB63&!398p@ZOO@>uXgO5AE|yt_}wBlVD~>;2lo>M(DCg-Zk{Ei6Lz=2%z*)w{^T zVsYLZag&bNt@omZ#|eHyu=|jv7rT)hKgl38Az8$DAyN-%3iJNuL<4KS^Io&5ln9=U z6i1jgs^(o`@K@A%7Yjgy`Umsf;;lbT`^*0;Fa58)B>K^hA>WtYD**pjUTXbcdFg-UCFb@vW)P$&$pg5$tV=UFgL5Pwe-IV< z;Qz`?ntlh$OQ?a_H>UMUG;gO`C8c};sR^w_2`XkIHtbG#O1AdFt~z#%;vPAj@&9u{ z$`_hUbw?o|VKp|8j^-Ibisl-fG_n-X*rbuAynj9(8Dn>HQ$w|pNP6fj@23`4$9ZR3 zxJ2+t7B+HIm4%JmRAFHwH{Ff;8_CAVO~1CVk((9>&Xt>r9fQ!sWaXxzLilpiPmVRP z<~{FDn@Wk`W2y|kjX~y3GI->sqXi&0eb;=qc>jd_l)E3NgB<3Jo|D{`b4*W%RXC=i z(s4{=xQtYyOrC{Pn{J$46S?)5%lad`Y?9dXK#dx3gr$tox+lkp5(TPNAg=N`$xx}> zb;gaa{ltbVyFhKf_@6&A3NkAE{K}S@^?U{P#g9l06p!a9!zQ zms)-KGlQjP{7K^eDY#m&rs*qo9n^C8MmPGb55lI zw<(DAXeo`D093N2v{=a5#!B z)XhqSjqy70%cbA+jZ`#<7`Sf9aR~3Ss3njQe02mnJvkHodZHh(ekBG{ ziC^Kzf11lhkrp;bL^E7}<(@-j_yK5w%}i^ChrJ%IgN`lb_NL=b<0uasJ|&{fOOLZf zxSk}miBsgY7-&MNcZC6H1`^FV_%e+V$9|Qv%Gs@&F+WCx>zums{}6* zOz$n@tDnLy^l?iA9Gh>$`kEuf`OR}fL&7&mx#YN~JK7sG@qIg2C$o3OvkGza)YPxR zR8KvUsh%9RdTR_+KxSPAQuQ8JP``TJ7B$9+_o)(=!^$G-w zX!!=!>sMHCMFDP5^`16q+3K121FL7uIcda9!lXiW4VJ2^I}6UsN)$8=uK&3q;ed` zjB#O3?JxCM^tA?eZZI_pCoX5ynsY}*Y0er3_ZqP5$d#OgFuGTj>ttK98#vU$ z`j~O(55FR9wfe)4d+B@lmTaefObYBnIQbvS-p|i;-edpl{Gi6vLG%+p-OjXeZx8(~ zn$u4Mc@Z;{f9<2iAo16b(@*|$ zbx!$OFFC;mPuFUrCUZDts~AJl984+jP9uzE9LF07#xYRggD?PERm<+8{&qbksG^;B)C-PAc?X4Em@M zK^B>FEt?bTuR0?A7%l13B|SP#hjIeC?9kgq^!|CJ5aw?QX9=_1G?XFCcd51^%y{xc zZv^-lj~qdZO2$2)ul7)EdKeN7$WsU41IpwugQd)rPnnH|_&ADJls+HO?4$06E^V<4 zU8-c3FoW|a9EB{MAT#g0bV zx8kL%uuj0mfYjJFRiaF<^_B0)l%Ea9yi4}Mc)>QT7*K<nz$?3 z-kv@rzw7D#j>it`W1C+a@?)t5rvtNPUO#uzZp&Np8Rh*89iCg>P+P@@tIQ5&)|ON_ z(WB9su_qNhD|$BV@FC{#Lb3cO;uV|g$Hn0a!*y;X;JaV@G(X7YyRqr!HJPo|oZOw6 z-k%*_T?2G9oWC+oMLM-~oVjr2q^>f9{5G&br1Wba+wdh$`SaeR3`D&eFd@B0tb$~) zrt!RYuR%jRhGf!jl7(Ra{wP0Oqx_ZemIeHdu->X4$PoY1qXn!q9l-Mh@QcBW=UdRg z3v=MbIq;Gkcxev2%!kFJ6FR%1lXn|I=J2TA!~OZ0A^ipUQ_}R3;ZG+3+KIDXNcVs< zPu0ZvVP@hq+{(_JuqCmr|M&c9p2qjz#-F~&dtLvf{AoC3ojcyY2!HxFdhql3(=Uqt z`}otDbaQTbch8@u(87cH6MVq1hdUY8mOU`fXW7F|2AyFKKk->doi~0gq0Gc9h9rJn zWBE?`qlRN+FumgWOZO9tne}(JaN`UE3pWoJk4Y6XFB2)f3nrDradGxHR zhs~p<-Y7am4vjnS5aZy)rYCtL=E57R`BNtrTN?9c*Wp%@-NVX!k~HydcvYe6WciiF zz!vXA<-wfeSmnT*zO%$SU+|s2vBfT&x~DnXaKKl*g=9`a(^F_L?@kzRmMtzKC_+`5 z5PWsoH$4V0D-QJ-uVF9xzeFp&)!ufKa=^%Yzo(LJ@m51gSmx60 zQd0nHdnl>%*Idf;N(U6@4v4_=nL=BfMJIlCaWD*rO(6=Q-L%XEa*3bs@h0EQ`T9UU zq#PfbV?0;CqYB2Yh1HxUvkyP&dWNyWN+Am!cd}Ehs|%BBsw>_Ix6Gybm`)mCCsYT6 zYt5iJHWceTb{s+A<;NEJU$vOrCH{A$zPvB+)tGxa23QB#FOOCn8@g(9%$@7RQpfrR zST~M_vYOY{Rabn_aE%x{r!()=ETGFmU)w{+nYP4Jw;6)gp?~I-#ma7T*=XtYbF-o78&%2-n0%KR~+AQdOn$dA`41tu zN`VgdCQqqhaa0&?`9673IJKT3759>e?L1iONV;_v-8m$O5^f2)eA_3PP9H#Z^i6(# zvYj+dKg3HbjfI_Cwtb>iH;-0)LZu_#AC46BvyBr_!AqvFP{G$>v?MU!wAlY=so?rE zm`eR)hA0!gJH-cWmC|*1WzM(rYDw~Gr+mAUe9~$E1Y0e74v}T;_oR8bc9I;lWl6tg z8k)VtyPBDXXD4cSdK4iVl0k$lv1!D(A(7v%{v1=8{AmbV?)Bd@*RZ!Ei`MU3>F!mT z4vJY%Q8wuM=Np@#py*ChT@0DpqaO&uOOhgukRaQG-ps} z=1f^=@W-@ykB%Y*tXaq1JDVs|Ecp)dNw`I03B82W8G|OQx4hqxS&RygBTQZu(>qg_ zzhV+lCbWQ(02nEtDzR=ZQW310i7U_-46q*_t*9&wUG1CMOjFnzrm#1{ z3GVH3|HT*!cao;Wz6EhJ#p+=<6mwbmW_NbEJ(|4It@^@-xvQ9Pzicg@4|>n1>Qna4 z&nu@jn~lBmaN;U67J@=9rEI)v5ql&sr^rNXIo2kuZZ|rI-)Lx}{8cCUywkpY7;oBX zr0L5uQGIl&Q}JZD_3t&y05q>Ay;%OUMw_a%tCMd?KUg)WyS8}!-<8}uhfcBv-uV_0 zo-V5LyQ-?hcbkqvlY)F9L*$>5FQ_o`#T?{|z`*+zW$GMNcyq%wj(el2W#*BLpi-`> zsRln4!(L=NvvoZA+a|K5`{wJDOlJ*E#%Tl_@Cm14d-$4znW@6p-bXMhTrGR{zcx|c zV15}`Ux|(d)YonJ!rhB`i7#%?{N2L(kK&7H5Qt@`xpay;$!F}*zgu- z=tF8VGXtB~1@;=xi*_x)BNYRmztQeF<7)X2z;5F6qAodV#vV0cSB z`H=*wlZRjl*o^J`ArF#Z-f|j0FKUtlZzx8FW4KYlpoF86nn~DHoe-mx9$#oF-*}h& zL&$5lgC#vF@xqiC&{*lnd~wg84=?k1IzS!P77!vIY%a zqx~Rb{clN-dl`1X88X~QHeYcl%WZy{7LUiB2u3=2T){3dX`!9UePEG zb#kq@`CAHU2d&S4f}v`x^1^#C*4mk#q6w--G>E9pZ2Wpp&OGYr z$s2p4v+(emg($TMZ5%{-2_O|}izYi~BolWi#E)`9Y!IR*2hlCWF*%4XA&$&Jve7s?h|JOytSsChE*5r8JzOh*TpS zH;OQ%j$+nd>nwZXar>hM*UpeKZ)q|10lj=(UnKJ;A}Vv6bJH+#CZ3_f@JdtEj!oPh z+c9F1wkDmS3lyPgg}R4sSTIk>@XCqYuubkyZCul-0KUrMuW}NH!K#x=WsA zAoIxFtg7Uj(dKvH1S^WDPLyj|)I}W>JpY5vK*e%8BSrq#OhcWK6655~;9nEC>#qsi z_SXa!L44sdi&zE+c2}rHKf9jZ=lFK2E!0M~tz1KJpOw_$!1?a9aieH;b>WQUqLK6i zr;@lwx}!FN_3^1k?DYw}77V7zYa<7N8NQ8O)DCT;U`j~mknZLX0+B|f(3S7<8Z?`@ zD`t_Ny$WoSHaCGyoDuh3TR=+r?mjh6^9u2jyzuo>9#G;&TnwFlA^EwD6CBqm z5wRy%ES6s~wa|PLOZcQh$0QRs0l*2?GNwt{Uq6wsTAhUNU|3Lz70PbR#=uCc!QHN6 zWvxk5s1<(UZdc(ub8Ie`DKUP%^>#k#c)Rv);Y{oHAIW{=5yl<9_F$7=?fvwCs_B1= zRwzX_#5z{h-tPC6$%LZaZZdtTHfZxja?hwRWD{=L&z3)-`0QhP%i)k5%W8w+G#1W? zR;-DJPgqkKUfD-V#)7uDkPu_m2VwRjOb1~)!YjMO_bezNnacdFrq}zXtfR@rC3v(r z)6v~iHgh?@MqNs?hNhT)-FjG8$JVKS38y9u;cTUE->!WIll;%oukGR1w`fk1xmzY2 zGYzvIYJiM|3%9;zL#)VCh*i@J^AntY$@GLkwQ{ykcGm}HAI&v1(&0^6-?3rJ>x>PN zg)%p!wtiYl1bhpo;5G=~dq#05*5XW%5h)Yo?Wrl#fjf#8i8MMk)!was*~V&TQ8$RV zn-XhN$I3tME)esIyNPAOUOaVO+`WIPz_+I^G8KM%spH$m=%c5$cdVMaon8uJ z8G<>wb_j)j4>TQa(l^Aj1N4pz%2Psb*X{>uKND)GnRehAM|V703ojmVH2+6C&4&0j zulEyUMuic`!Z+->la8{pPTZ~IXhnpTf?*u-@*bp?A=tQ@9KLo9_I!Yx`u(ZpJQ9ed zTFj9-6qga72Wu)Cvyp>5tP_Uc^Pwb*`_)!)zg>177J)OHA;A{=H-GSv;ofQ=$4!|E z@2vHpH&v2cOvTve{Yp}F_Z+Ds3;-to*C-1;wjqW6$MKY{D<8PVpy6f}h7!Jc@C zRS$REFP>WbH5pXGTEyE^*E4)VOr+D`S|?#pOZMw3xXGgcC`L@^L*(G;pdk2H1_nP8 z*~bPiCb8l#+t39;=tBmEz95tMNQE{fVaGwCm02O!+_D^wJt>>`K~ZBBVP{D=aVXry z@0|{KEWI-he#P9x>4+V*u+XAN$ajbtN3bere2jxPPO^?7-|>cmAwn3R*X5v1vJS}= zO4|68_zsg=X|AIz*D)&B0l{Sx7@O;$zL&`OoPrcX!rp@_%-ad&402p9Z;mvTl?A>x10hAon|i+@;4&|rg?v}b8M+P4 zJz?Ksd`BOrr}_?Dx8}7uBeB`qU$oDf*BVA1UzFeR!BjFGfpgB6X z4>x`h<5!dM!;K$q{OXL~tc)LS{BZXdxlOdCyj=~9AcT*d^JO1PxWz#@5pX7)#S+e9 z3D*{c>&^{lv4pc&!u15<`f|frEa5DcaOzuAJ`sfSZKF||Eq2bAn+s9pgERF0Ita{{n^$oR zuU$q{hM$RNNBrW1(3{wBp=Q#ft&be`XAb0(UY^o(Qa6^+M^Lv`0YQD4 zQ7LH146nCqyJd{1H!CE(jkQI_)9?iOnPG8A$JVpVgSb4VK)3Ro!B0>e6MG*7t$l1a zzENN>$CK!Y<3r%+wss#^AY1`B2x5_sW3C=?d?7gAQ@G5>F=dZ9z6ji~aLZ{vj+uMJ z@x|b<)icJ&ja(iz9lj@jax_ZZu`U^jW9}0eG{ghAPY5pCd z`eQzY9|nF{{kOq4x9yRe1^7Mc!$t)sGMcJZg+h#S4$Z&$I^(TU&R+Kj6n`sb8rhWv zMDaLM9M_>EKkpcEwoyJDUUk|x4a2xzBL|5OEOOjG&R6}Di?pJSyH^Uu$%9mARi#@} z>CO!KnmMWD(lbek8y?FYqKx^-^zwrcXI^xfFzK(51K!p(X1(!`E|*o>M~r_+yPDWJ zCzwHSGL&NikC2Kn z(P%b7%)t|PK^{m3deu@6t{i;sYgDYWs)=|~tInc=22A!7WKo?Y+h#0(bym%@=&HrO zEm=@%6k4Tf2@xBEh@Djud|2q$fKhQe6Q=@|0D0p^sBXUc)$fG`$`XruPU^T4#|>$1 zGmdBEFB$IeuAklkzh(*wq*$^&+WhJvvG(+k*tQn|8P8{eXUKENa-s(C5FQkP7;m=N zNC>pX@Wc>!#`Uc}o_(T;8pjj4ufR@0OM{Ou1h26?kykyoGro1XkklhQW;;>id14WG z#`s=7z8E}0`G|?d;2Gz7l11T1fZ_V@>;&WkZ;chG;^aSj2N1SC&mI0Wq<}U`AU^M3{)d)Z|B-cMh%J&xEWK zzRuuhVcqM|%KUYe|}KROC$GPVqv^QkO}^H>CL!+Ui4!<4l)wMR=`O zCx)6op|Sonz+(D&ZAuvW0%TNc)`-rHzlEor@{EUY!Hx7xy5*m{4ouvWI-oxo;Gjnn>mFvV`c zgcJ9u9#^mFf!2Q*ynhls`^;GC$Nd}=EL%n0ZWrI+dpBd4%}1k6LW?gg727qV%_k)t zRqzm%UE6!(iWRNMk6vV>yEeLOqq{b`YoojN9V^$U2GXazr-5!uVDx&FKqa9BDhMSg zF`xTwL5yOJ64-(mjU#1FIfU{4u)i&dO{T~uHF8TrjGB{F z>HFlaGHxg_FMXTbIn%>Ua_7trEx0w~&1a$+V}_sDWN2?B$&hpP_9oJ3NT)BxH378ZR7Mnb+qrT?B;=8>-gnF%I6QqF_qRG9Q$a(+clQ;8ira2_J3nW1{+ zJXlUsUdtKf9!s2L_Y=T$~=dm)GxKH!BZoVeE1!*!(TG=Gog{!7AnQFtMa$aH|c9fZPQ<}7y zQET|nB$}1Q-+Ztx3rdRdv9q#ZV*n=YP7?2A&Ih$^kNz-HTFYs42SwjNUdz=pW7fq} zx6242jA)+O(q7TThXYl-a|xgfIuoyFAO&UlG^8^zKZDcD#x|Uy;+yZCFr4SSO%v=H zPApNL%iim=znHr(XKu~eGab+QWQaCd&B=}!C3I5J5GGht@e!fraEx8<=!*Bl@s~Nl zSg|!6=dIedN=yPbcugzRYI(^8g=ATo`iJ}mO#9xiCa#pt^Cg#QG|DF`iJr4 ztCh*Sm*Q|z3sg!SzwS&d15(LjoL=rswD~T;t^iCXmXW6S_j{YvMuR3PY+cS2y_|!~%$+0d7ge(<+-1SPP zX34*O2d6bwEKy$Jd!7gD3G7U4;G+rv_61<_{>Ad-;$};X@#G9Op(@?F5TYt$y$CU< z$SLl)yr||N{x~K4iIjqCEaT6tQT&-Xnm=`8`BRHRuqZOYbt19pYsjS_ATJ5YZbA(L zwkW{zMyU^?4`0xYFW=O&3Z5QsO&K&Js$<_~YxVptjY;5>yx*EQA)gG1Oe^z=W&n-_H z$zyfgCJCG(r%K0MG1RbJUMjaD#tGWBw0K4k4ZyAS3%|2~?I;rU*@;C7_XQ!5D5-IO9r#Qm* ztTG3XzDY=4pT5mah*7Yt<38`!iixA=cEnQ`*TgH{3%5wgmviJJ`DDEOBlwbX>(h;{ z>))q9PHNf%obz~%=+Bt*Woz0mFWMvO9yjJqJ3JoZn78EtUO z46e|?4;#1${~ldrFt|2@D-JF%7~C3z8yQO;>k%$ua9sv>kbxgJa0&i>#@%h)m_VmY z&lVov8qOR1U#e6h@>FcD-8_-Jul~rbvv8OF~{+JmC#v$oj{(x;+uqz98 zXTc2tm`r$p9;O-onRCTD?fX5x!93Mct7qkkD2?|D z`@9&beI1=PTpsKF-VwsM2Rwg}uHC)Go9|-}=DqVT6=cpOgs)qm!83klDNoS=rHho+ z_(fvxyqszBeZ|{@7IbS$(r+UkF#t3iU?8k*6s+OTS)P#ITVDRaB?PClYbEgwT&8hXDBIqz`1;i=hmChiui_EfRpi5MfVTR5$2XSD zWve$bQuEnXN@b!-f~{D^tqdiX?5uPbD4ojjCzo7V=pgf71#Hv}`Z` zq%54^g;(-65_9!*Hj96T;_cw!&j}fI;^BKr;zNH_WcaE(6dKY=zJ;;Wsfj#y(#O7| zKEZfX>o>11h|UX@itooO-fws^XmvXrg2r1JG+wi5lzca3CICy`HM?ukPAL1j4*K2i z*I;%`sy8!Jq-cJm-?s^oykZ?^VwtA(_TMSWu*30>7B<82XO@t%!|^yfR=Y{V`@;@J zs*+wIWR7-*6Gl06^%|`;KBZSMb`BDTCfzF{&f!;}Hgix)MjXlZBp7$gq^IL#K)B^? z74AU^)BKJ>w-#<)#X$28Gx=YF4BF1El895j&EaN4`_3T^!=2<)-korWv|;7R9rUxO z@vH||+7lb+zCtI`(8{Sge9t?_rHadrOI4PkuWM(=cdM*cK`147cbaPxH=w z!`B)^Cp;-Q;FaSI;mND3#%$El?pvW;#Lavsb#Vw`Y>3%w(JLq*qam=8;!shkR-HZ? zqc0a*eV$R(|Helbg0|{BqphFgqY(-(w%RNw%OIttixhyIk%Lq-MhZcml!KIZE>Z+?0?26dYNMI^g^RD0GWZQp=?!fd}J1)mV@&q2q;@U_YkKj9ZoYY9fTLVU3nwvG%HW(+?~{C^d==zzR*DsX9`n!YM<87;^~xV(A&8Z~AX zOwXZ6OeA&CxKT(jjeK(+d)8)7890~BA`B_Qy-r(<57!7!Tf!_dnk1P9ZdVd8)y5!H zMmz57zyrm%ujwXA)zV}_u{#rERY;Qds?C7TOdCgTV=%2jzF@Xj!&USnVEDvLMQg zr71Mu4>M`M;%!!e=9-Q%1?ogA#4X{hJ|wuWtM~c*`?_WHgL^_@j;f=jbR;;8x^P*d z&dA`&8+35lor~z(RT;jglly%S>HIR!6>ZzZpZ2$h=w`<9LlV6wZQIoT_Ap(u81Kpe zdAQ}D&>wNTtDo>lEa4XIIe33(X4;30ktOp4>(?zp)|9MVGr}Tda>*j{EW+4QGEue| zKKxKgCdA7YA=_Ct$`ckbI)iu^gzK8h+-nYCr!J{WKB^}CF*aS?8|A~DW7l|kCuDHk z9%!T3KK^^T_=P0yrW?9smhIQ(GPWFy9wZwIo9~aQ&z@C-`H^y)0d(*BK68T@l&Yy8;C&VnxY&TYDt%k2W`W?Y%>wucSEu#R z*%{tQ+E3Q>l%JW<&C7ca8)}f?MzN8r?1gaBw8hf<7%j%A9x^@Hpmb6`%4a0K?YMj5 zv%!?Xlnjlg`RNLJPY&$mz)qU86BURz-!yl&35v?T&Iu10xV~wv8^sUyJYRgl_a<`yyhA5du%#GJ-7AJ`hG}?Zv&wHrYnvZS2VgIF zOxXJdmoleF2s63??_dL~4fNt5%iR`F6&5q`PS^q=L)bM4IsSFPeN0wwdHZV+L(B3q zgdIrgecQ$$K z?jbYlx_2(2gmL$w0yR-;Y(i1R!D?v_?`7JLkeV9CaJO-*_XG&{UX6m(3A?n>f2~Y# z+Uas7kko;dT&c<2hh8pZl(}s7lyog{cx7evCzbhK(TY`9j+0|ZG{3#7;?XM)cU)H` z(xX@`DU%bf2BD0T#aG+(ZN5s~ZGN4k@>fMGx-UH;*^L!XS8ornfsx;tMk~5kCM;o{64a+%Cp(dN#>+A`@R|TsaAd`S9Dt^F_TG`O@B!U`2qA$3Dp38m!>f%$IONV?;0F+?kXtetA=9gWq9Ki zZ(p@1i@1+Af3oW#J6y4m<9+`fHS?7iU^v2u{ttWa0$x{9_5UYr3K0wEM9OQ_3K5T* zaH**lZ7gU`b3)I76A92ldI6#ZEH_(ALTk$5bD#<+@FMnAd_{%Vs-P8UN!q3< zC|Fu9>|XOxQHLS z2yrGZ;%XwmzwuU3siHGS^kW?NLB1TE?>-Tq!YjvdCR>E@FBwTE6X(z-&Ftd1 zPMS%A?0NL{C4xK=i}8?|iY8ArhcbzZDvD=?o{FecpzlbNmy&=}De+M_&po@N5DNLE zMDfw&7|!8FIxc5JER8F}Xvl8*N5>kxr_+P~UFew}{8OQA-2ZT#{*L_m;o_~ysmmKU zf)H=}N>S^Nu~UjwqNZJ{O2>Jl{-}u3G=AsG!P9+VuTLtK?bpTi)*JAPtJFfijdb2i zsGe<4C!_s}0v6W%B{ENIRlIE%b|CW!$sy$u+L&-`ay%2?jhn`f$;{`@`y(AE^GsAd zkQrO-J>LFuF%EC*iz2_j_!54`PbjJ#y*)B-3-FI#6PXwAOk~b3j@#vjiOhHK&zV3( zq*00DO|+^I%3WlZhu*li1<89~tZEG8=T(OHI&&hvYEW_7zgFl{@9l^qjq8HY(mHR{ zl|+l1t@DOnNgwP(9~`MBW$NC&?VU)+$G~CNyr0uzLYT;WRI@Q`p!ZPRdow$gn40!l ziivdIf-P3fwYt42()oSDXK!m4n(vN8I@@_ow%;QJ4imF8lp}W@VoZAx9Te&OHzLPe zTVmn;ToEGODq`0gR)h#76UYez?t|<`lH7SUjlvhelN^xYC_GBL9QEQ%A1a>?IU^mL zIeNd$I5sYKh6BNn*wEw#+Q<%1|fgKss@V z11ZH*m=S6boIw*N(t{)W7B+80yZ85miABn2J6hCf#ml)j}nKzrcU#q}$0uDG0cea)~ z)g3Kh*{+gE=RLeZoG{i?1c)9 zsrR|d7D|wSU{oM_|0RudSB@2DadPUKhRo1^i?8C!d;eOaaG6mbVNOjDVHkc`z4v6~ z>Sq|2cv}q^xlSVl6{~Pi8QI9HpRB*-MM5N+mO%$N9fUg)RCmwNaM#0`6Is5{5C4f6 z3Gpjo)iYF-QIs zjc^ddD|{q!^#$bBI^*p~)3k)Q%E5jL(Ft#JJ!|=+&rftG`5Aq3!H-gEQ+oqBI&R(?}}%Ub;s9Wk)g zHt$R)!1by%JKem?j4I8X!nE5GHXPixuE=$O)($wn>f~*h@NjVNwYS6T6IqF^Sfo*Gxm)nqJ5~WPK z96)cASpdDsGm)8|n{+b_m=5X^-s82`=G}5P?^JAV-T`>R^48|M=Q|+9sz}H0`BLEh z7f%OX`)kED?_y$2ga8?&_AjH_qN<0AS6P1nnp-_;M?V&s_dH3ed!GDjI@lOc%%sA3 zqyEUvJHe2B!2ogaZX_q|S|CZ+!K<27O7^<`c{jb7VGifrrNp{h>TQMILbO%3Br!!q*b#PS{^ZG5bv9-m*lXyqTs z0%W(NhI91^%%1|B_47>*N@j+&ReKQe(Ot7OIZu}iF1sfc=^5jgkbzOnfJm^oT@UsY+?_7;`Mm@s^UFZiL|`LKu!jM*gvBo zd-4-4pLV3=6Hty^npGm&t=d$PU|J{3|K$4ApOhh_&LQ{L_`Qm z?SOT>f-*Hd7rb{J&oKDv{Z5_(;Ajz6ID(Q}$B+4Y>-a9j293=dJQJC7awO#}11BLo zXI)%FM+hZX^HixhdIH=>=im;BafWpqx*^gj_-^AlU>*OViV#Kn0PDD}L$q~#GXV!2 zqhFL=Z8V{SpyBDDmU#>$>-m>~CHq%?4CtRY4rRej#-ZdF^K*7*7f=A@6)fbf#MlxJ zd;RxTapfu3DqgUFC!jtR16J`P`d9H$ACFvJ#YUA|1`i?RmhihZXY0K;+$t{qF5K_U zuj4xl*YPeI-oK(D=0d885y}2fo_~KG|2jFeY8=Z`WIS#%cp@_^T*n{UV;w(&b-ai1a3vHV~0ch~Wg_E^U`YQj3MLn;RstmE(QxsE@eS!T+9xEE{n62%m* zpFbz!AFbm;LUq6*^|^I`TgPV&SjQjEt>g0hC-!-PR7OC_I)1gj)H*(zH1&G*o^TwZ z2m@6MnAY(l-+vuHoB9RN>4w*{cE=4JvRcP&UFo7ak$CBXR9xJ&WsF#{U8*5E5q1lV zH;|gQFoAU=9}HN(uOQ+Btlu+dQ18t1gcI?*8RMd?jQv!P+TZehewkn6HNXCr19BMo z1msYj4oJFyX*z3n_UAUgX4!&mpl7dFjFxJc+N^fZ^yMhNwD#)icC{|7TShmmqmc!d zT87@pf>z7Ggmp_*?+!}yz9bb1<`-_wXAXL!MiVo>=Hgf^vf$(NEvtEBTm-a^@A17? z5m_J%Cc0v5=7?&^pUvp%<)b$C1~T$}cPSHW9Oo|N98v>qR5#SPvo-muC#zvS2T{;*~>IW#F&&-$j8Q{*xO=E>W!@TGeNJxXBim|m$3Jc;_NT6 z6OViEhL|E__cS|zIWc_9J&R}*b}i3=sb6~`oKBiX!Z$5S4$Z9kVm`K&;!35nyv?(8 zVVR|W17PAX1PN_7hAX6n*XeqjwPz*<+Zw3%xbjM|q(oPyUUToJw|p96{Fzwo9LpdanicmzVE= z^mj*;A=j4QBW?zh!aUQIr-3}#uK;y!@qFKb`va3cO!Rgj7_!i2pW&1TiXv|d)5Z(3 zjp$kGy{-K*>$6`aX3vklO+h#cU7OXKO&^Dw@O1{3w5K~B%V|$HhoRBI3knI|jV^68 zA8L#LL5trU4AIx5AE|W44HZABo1cDnci%5v*1n&ZMmGHJdcM=9z=O(hbC~10-~e7V zTl4$%&T*W(qd`97nwXSW$aOIm1snG6Y=S#;=b7guEFJpoAwIa#1E>EMF<@vl>$T|V zDWvDVT(HaQyp>u}y`wZ$nt#mWZ-4?iD*_4}1pgmg3T|>A#C^n*yu==aYB4vvdGr=# z#MfKLAk>g;b4M4o%!(|#u#Sw^kTF>Mh`O#fI1Yo*UaJa{L1~vPa$EUZpHSJCT;blH zx3{Hgql;MQlJiHnd@Cc;c_XKa8uckNx)9mcblGv3caOU#}9G2<7(JmBbP5Q5>bP0dY5E9Z1t; zP#pa6agDTvF)}Pq)~s)Rns>_zW%AA3IO#31YBEc%aJ4P8UU6@J+d@TB2OYobeX2R> zgH)3@jppJ{jP{4_4MTuqRSLegMF!D=+w|}kNN-HjZX+ia!TfsL$J8&0w9hy| z$#-cq{o(Vy?X1b1a?xAKeCK`DYz z^IUjw&BFsx4McA}H|BJ*0r$f-)@XCZ6(2mTV+pTCPQ&**R_e1Ca=3vH zi#y=0UsuPt(M&k-#d2T$EmlAOE>}yh_j)KNc~5$)tf9Q3{9&vBzd}_QN;pE}= zZeE8i@K#rCsj;5AY#rxv1f+AlMkBFc!Og_3rXj~d^m^XF`Ns}9|5%J+4yngf6PpMf zC#^Vr2l#GynSesHznlkhBy%TvoBAtXas^RMr4CuW<#Df@yUA=k*zRcGJ@~S3#B0`H zGzAX|%)y@QscQwhKdK{@Np}``TWnVP#VeofJ8X@&Chm=z?|%plX;0t8Khwem+?mwP z|B9X+B)!J}>D*rk+(wVQj8BM_@Zn9kPpUEE>{F4o(B@ijHM3Y$x3_WK%%`_M!GI zMQjIr?dSQ1ai`|yL}_p+^LNrj5UF!9x{NU0FY+8z9E1_H;@R&)8y1|o4#q{U%0qnG zd$&b8D#6lR*In}oz2p&AyKU5Cb8?}kn=a*hl4k+BIz0W;d-3#%e*#bUzvUO|q!YWr zQ#^0|+lLHKkGz7|9G!LP<8Ek_ z5gO4hA@T0JMALjN$?E1H!>~C#JN*o|JY+w^ApVo3jH8tJFYW4IDVJcxJ@ zc92D9d#PwZj+5;Zff*v%2b)!?huy3S7fMFr4k5L=-~&%8x3q?nD>t#$P3)goyUM_q z=#X3B0+~xaZgQ=g=q494?So9Hht*3H)eFHs08^Mz^`T+0C;YuzR2uHzeQN>i$5(Zy?hZt8h)JoqL z><>DmyeJqM96@!yxG{ zJzCvJGUKgWJoFt097Lr2h+9;R81i!{|C6FjvNdsk*ws8=>3Irwh)+#?j_^~CoBPvY zTS>eobAl?;C}}BKZu$ErzT2~I;#)l@v*dB1;L7g0DUgaOs|3c%XTuMBR!n)@mX;Nh zbjEXZPZu{9_TgW4#iZ@72Hi=>SiovDec#<&m(IoyuH_ zzrldOG_HAB;P@doZeteQU`R6r-K=4Zc2dIm5cXuqJ%%VA4137>FdVH^1`HlB#4hsAnbi|eZ_dj|B=b{5^LBVlj~k9 zeOs^?VDkL}L~cg>E0e39PyhE!u3dlnf0JwPlS__~KiK5@!vp^Vlj|g#q5lgeSLaV{ z@iBL($;^F{rh@ua%$uJRADjpKfM>PU*|F$~2y_P%iOD{V?hK2t?B`9{byv7r+#o8D zrM1~xEPm-iB9fWAG&h5XH5b2Ybl4`V^gtX_IHLzm&{6kW4~aZGbw$iYMEfHSw1}Lt zV~BeTl^xrFi0L$=Kl6)KtB;YIol9ZaPXj<7ZVIIO%6^p>f2kAaOc~+-NQ{@a-5Wli z5~Km~E=>Yh*hV%zUZbwV)w`cn=u-d_3|B*4f$yqLOya9n1@n8!w3|$PZmJX)Jzddv zC>j?NWIKa*mRhHiL51UWE~bp^0qBKu`t$gy$;=P65$aTEsMFrDoAQEdsU0=^Q1n@? za{a!9JpNtNg!5i!mmZyOwSE%q^iZynC@-*#L{}<_Wq03m$S(RSm`!hUY*t}G!$U5q zS&?u_t*UWFJz9^d?KtIaR4yrNypmYUqPV}0y!Jj1^W33jUj~dn(o_Dv%vQIA(Zk=6_~|eEIv1tRpUeMcVuVMIxsCM2zh6(HUXVFj@k1$APkd{F~qe+2k*< zBc^7tUVY}(Thw1{Q7BSPOK0wM_rep2y2}~$vzR%vh$#2&!K7jZX&3Mjs0igJ&UGEN z7;~4N*EiqK(W|T6*#V!NvNzQt>3XHg6;It5qmrUd*Pp`bdHwY~FlDt&p*QlLq=*V7 zt}e)^UbKumPwZCO`dk_xNe*)kio4Lr`StE%GyEBLk+C_KQKkXmu@Fg0`ZFUE8J#x~ zAzg2j57Z^-S_V>G(lmzg=$P845x{HqHZVoI z2XH_UZq^=F+=^7UEjX1nk?B@44HzyeTO7i+=MZ-l#4icMTvLTh_3I2T?24@f%>&&L z%DXVrU0zb->bMN`*1d)Z$uVp>whRe=zwF)JnL5sGMoVJo5Q}T-M_+m@Z@>z~qhi@( zSPlf~QZg3@1{1mFmxy|r)JVAO7RkGJ(mw)YjcK9%-cn;}otih-!eQji;8Pbu~1X>jS(ix#H+ zyRRUORlJrbTyx0zMY^~zl_}}%g^!~spVX=B;Z5aOuH0Qmaka8@3d)tOQ<5CXMGSDR zc5yO=M!|e}zliQp710@WINDoToT&MUs+@(r@0m>DyH#*eQP&~Ksr9%4WUuVQYHsG} zPDKa3tRnt|(K_k1Gdko(n0X2+k`v0SZ=qx%>YZ#&XO80%pV|_XUDuT6=%pqmVobMq z<(^%HIotX+hoM_vO!(AXKXPpn)98MiM=qdGdyj)R3so_HRCv^S%I=vPJ*tbRx-jj% zgMCmkGY)5iB?h4eqxJ-U{63{||0e58wlQv`yGA~h_EtA|J$ydoQ+yuxo=EAi-HIH1 zDgXO3tiK;LF(_Vj>RN8KxquVIbE4zF#^L9oJr0aX%ahz@#qIzuf#*h#i>{219RE8UNy>`&Y&foAhwc;S&$K^9B_2@xv#r?U_B|pt;l-ng0u9+n&QtJLnwVs7}M1 z^h=PoKH%Wh4dwfnk;-R`|&QBzOt&$EHfuF^xQE&|LyEg8< zk@9dfRU#`=_OJU6MjzW#T=NXB2|@C>Z>RaA3DiIxGCkx_QOf`AB89r2EzzCmZ*nv7 z0i|{-d7q^J8zq5iB|qrdK7N%(c4hZ>M|53uAP8{G;aL74WxA+YH>m%HE&zc~k{UGW ze^0&K^YWyJtpia5M){UPc1>JvXz|-AT`F{C_v{gDlAOW}?e8jV|5Vx(M#CTWg`Q0l z595DM2|wQ~qKURbH#_B`>|8juk1c)%A+gehay*- ztNC-AR`k4l;u>bj+Q^lUqPtkxJ!wjB&o`$$p^0P}SH|ZwJp}+K!c|E-=h5RTW;6F~ z+~0HV@{L1!)=qr2XU=I)+Pu4A0$ng|{3AWvPg%<~4p}B)kNdD0wWW%Gp=Z;S;W3Im z?J4d(c)n-n8Ea_<;il)vywYX99Ld>Ic1F*Sm0D(7Dyg|C5qq1S#LKl!tsy6BS>8dn z&EMBPik=yxb7zluE;9cu#MZPwquDTnwL`NC48SuJryIZn^X+8x3v4`QH3czUyuShh z%OkTS!QF_Jk@>w816|hF+~cQPmx}R+v8lu#e;|vS$k3Uw(u9{R!ALKT-ZSCpRDbi( z`kE^>86yjRU>rUxJySR3J>fTwmPL_mxW+kXe^oW^+afBISuHvbHljCLISH$UIU?42 zuZA0NnDR+)Hm+bhVnTBLDZH>Cd_x$vb!vkM0$(MuqmP+yFX-96xJ{jfuA(HC{WLS5 zeqzcVRe7Bu&K7R4Akq{)+ef?<`C%8>G~~l1&OBJzJ``rVD5|THz>PP|^dmA3#qWqA zeb?slWeE|pg?!R3Z6EP=VZJ|+8kv`24p8Q2UiiM{-58laS1~UChRA%+<_Jy@?qmKv z!PMOfB3%^|7V)x!X^3`gcGa`QUKcxGv2)69HMUPl8+mhF4__}QpHx~m8^=W8c4LAF z+!`CNFL#o63IRMtLFeUH%{p8x{u)E8F{ec%9p3>W+ZBI=o=^L?TF>+C;pxL~wGkma zwS=F^ZgnMeJgUl#JWn1bL4v?sO@|Bd`c`&4Uf;?l$M`w9(mVbL??Tw;y3uY>>uSSi zQqM@o7G`tOyzxN|aUcTD&0%n5mhj@;VnDi&lYzc>JfeVZRXm;++-V@?z)V%W2_$k< z-*N!HMbh4;;3^m((fQ&(r@g7QY9{pX7tSE z3YBIM0RIc*86FGPLx0ln%T_FWXcu|YepsxZD>izS9SjAau-NZb=8CPfVxv}U)QXJ` zk7XaEp+MXth>PegK`b6r5O3irh&S>yh*h^i+@*jZR-=Fz`AvbjOpdTsx4TwpCbcPP zXW5ywO6_T;gelz3xo_AYtCa>>tu)ALr9m6@*&G_=YNbJ4icy1HtppY+W^OcBYcyAD zG*>HY47Ji+YK>BBcCkiicGY^dw>)fbl&9KT%2VwXMAlw`WbGA9*4~W@sJ#Z1+S{cV z5f%eVbZ3!b=9Ia!(*$xr?b#I<+W_O$A$a>9H?{^4U10#x6$TJp;Q*p596)r10i>?j zsLHkzZ~)O2U5eRB9tRL#EmF*!XbzzF?Ii;U&Y>}TqD=1SAps(c5FqLy0iqrXJH`!` z_0UGuA&f9M)I(j0Q4cvd{-ut24w5CL7aX*5jR|0ah0ZZp=$x=mazHleezM1hy8ku5kp32Si(M{rS1SiTEZd=q`iksu@Gs;dAg?7)`XxdTCa&kD!>v$vBa!WG_32-j+%lAHp^}mJXFrD>U=l4&qdEuY z40C>K{6f@dbLW|)%Hzj%zlV;4U!u8=-G!rAbTa-P(xuk|mahRil zXH^FQu8Ots^h8i*L#>*PW;Ghks-y|&n7lbjnWCGbf`k9)>ZnlFH>dzc%?e@^RglOo z_JuQ;+g2FA6b7ZDu&5Qr1Qz8U7Ry#*ED9!c2dkH$HvEFA4Kb(DA`HXB6aQ&!HV!hw zOc+8$MNGN8RYBA5$lu2p>Ts_ume%gs=bB#=lk0TEbF|0u(-drDi268q9YT{G zW`NA<`}NuA^USev&XF-beJ+2+)3B2gc_X;E@i;pa=n3nuu3>?b@B}5m9nhaB#0!Z!)GXdw#8F%n*#hvMLI2bLgUzf z#dR9B!)$T*GsGHI1-2`VYeZ>MWv{P-M(bmqqjH5b_UnI>%W@9xge=@45ag^4&CF2V zOPyEczCjYXvpRVvzCtRqZyGH}qo*~c0&$6{)riIhI=6^OoXd)ZWCIx;iM*vH#Wv26 zr`*{U{7j4S)2vxpdjvnpYJM6=@grx!WlcvXyxFy&Kmz}wr4600wxF8Z985vuo`mU{ zfhbh`9^hB+1^AktNc(329@ndPpa?M6tH8(=ww=-A#fwc=BhoRzjn>H@ z9C9VWlMx=bD>cUfqip8&cr|Ezj21=VhLxFiMdzw<=@_kxo>xiGDeW$f9g*M$h zF*NH_t0Nt4;Nu=1^%j7C!FrpP7owl)F(Bnp4;OK8y%OQn3qKGFfH1Y_)e_LaHFP(f zGAa=|4x#E6Ym>xY|CA)P-f5nSNH{+>3@Rxi$IF>&2mHWdy{?_YU97Mo)kNKHUWE`N zmiIh~0$~m$Pc3wgSbT2EctAJO!8iPwFKY%dY8|nA2}T+SLm4ah9Zw%@)umC`j_Lka zu{j#hx|%xwedH=BUOEgislT-J#Ye(+qH8~a0mT71Ajr`d~l)4Cx zSpCLD40RD)nX$%2L|p_|W!&W=hACp1=viC`_wNF>`b;AR>nn6ffXhRw9;nY8|8^X> zq~jQy84j~RX71aG%r|`4F@4A-m&l3ouwRe7ANAJskM}%$;hG9Er`nfCx%25W&-FZe z;ZvnO*$W@3(Cbq@PrSLhC;Mhs&%@?AdYQmWrO&AL;6Og0Fj?4YNG) zCbl4tU>q2xCTi}Vc@tUImlprE=XFY?Y_dP`=9-?@De>7i|4eo)%9K6(2o}LRF72j? z-S`@ebUY5^$*B(|rw+YFahxboqp$9=== zDyN&PQ^gP0wH>Vk`(?nOOwTKkiWS~SI9F^o6@_k7Xp2I(EA*y9 zixt|g(4Btc+g{@mY39t*Db?PWXz)&F5Z8n3?ACNA3i>*Q<$L=>Mp;gYW`C%S& zJ9K=cP0i!*Z0DEJIS7&2F*aiWJAa>DK7+lMMdMW#CchqJIcVy^f{GS!(1WvVp9_`)s|Ms;BOF*7Zm_iW zm8FwU+r2Tj-?Mv7-==(ZXD{5+KE1TK+KH%Ie1;^Q3za$X__5J}s4|C`vk z58DoCq@#*xzpkV}kr!n&U?l4*!)HbKEZ37=&f!V_SXbMANeRdJ z>XVnwGToqGU#nQ=7;{P6(#$olzP7g>cy^ENt!Iac*-#H_uZNHc@aw~7)En5mSA_9l zv+AqEcVm~icR>W1j%>4N&fs|Xc(rplbxnQOh4I1RQ1~onF`>VO2`6*yNc$_JvDQCm z;l!FOGW==dqAm?B6g4)H8GdgOQXd~PC}kv;3+^ma9M2miUq zI>|Cd_rI-08Kyi;-~0(9&+#;;uj!zZ_zVE)3-l3vQ`QmJwPK<9nDsA1)ZWcL`Uoln z?eILVw+ee3qnFaj*^lsc_QJ^ptrPyfXMQs5n}f%G;^yLCp;SqK&$*bNU1w9jzo1m} zigP>6sRNx=0dA!4TLT&e?=Y!-{}F=8-gf2FAHj_lu;BK5a7#WIgu%}HFa1=~zg8ly zCfpwLtz9Kw*2k%(a>0;kuh4^YL_>n9w+Qt1+sGimq7JYRB{Oqp1m6;=>9sUfT(Ut;vopU(8daGQ-}@xUqX?SADDq+GkqZQ~nV#tr0^fgM<*H?CDH7qe}hog=S_ar)fjl|6D`O z3oVUoDfXwQ&FT>b@YTz0Vu2u5tERXaC_2qcTRUSm^XuYK7I)0+{EO%vf{6yTjjIl=rwc-x`Y9z^NO}f2}QzVCDgw5#T6F zFb~?qC)`qlzVYN*4Q+~xL?fM9v8R3seuA1r(F<)08I-7cWaLwQ&jgqMJ8amHU=9zM zE3r#IRiJs4~00G2~~A{}zzo4h+WOKjux2!RG5 zcZiVMG*C5RDQ-xLSERgsQ{EYx9VyP4q-$Pn{Z@83WvRc?au_n17h&ADtdzZMzqXFn zWZ28L>p?cuxouZ7OEA&+)kueIuVk9=vhW*&MolC)-BgzFJ)CBTpEX)IgPrv1++{S&~kN7Gf9!n~HQ%BFM~U-yndshWpo&2T0!+a)`r z<#MqXeI+Sz@qT&^KWFnuDx4vTFMc8I zeVVGHS!p61f8eU3*Mmc;iu|o>`W~ZSf&*R6*%J+Um_}IPEeEP#r|~v(Sw;?T!1yL* zmav|BAbS9LgOC59u#5Mge~0GjpYf?fg7a<^YUmjGGp#S@mg%w_`D-B6=w(0slYH0D z&$PU^yifey^4>Xun{~o9LL@jY+~6G^j+H6pf;dh zhe-PSrfUNHOB#l}tfja->65r$(x1U&W+}+pGGt48yc8{v8nQf^s_AKcOhq*KpJQ!K zd$0HNTkqfgEbYgh+PWY!>I)4h3qRcVMfj{J!IsaZz!lB(*Gz)v-amg|rR{E1a_h4> z{BH0+-;cD7Oy`vM`38DZblxsO!D|mFn>|}?%^cG>4VmK?*MV@k@9hewaiaVF39L98 zMNS)XkdaaRqK)EexBkLe>@R&DEOo6Dpxe2zNu&6DDpSHC#c`z!THc`Nkobf&N8Gu| z;bC{^!F$8V1@xQ2@?6j`tzl~Ws2FdeCTF`OC&rl_-S7bpxvdZnul9ACNtwoQ&rvE z_&xaeOyWTX6uZ|a@+3~d0e%dC?b51jYdko-F>5d2Jv z*7MTEf6IaJq@VzV^8o?gN7`@BFM@du-l&+t)PR*^G%H26zE8nm5)=*3Dwk18i7MWz zRB?|%mu0ya-1mcU{)QxfTW}N8cP|yc@jcesE3tKRo&29*|4C=jvA-fdKimuR=8^B$ z$<{xc(^b%lopAO;`q{7+V+V&Brq2H%FR9*DUDk$e!DncLiCWeW2#co@(#4x29UeV~ z6v+wZ6JT|w!#|!-OSsWL83V=>k|@3Qwae)^9!fU$-^<1|GP@u`b?~g?SfddTym6I0 zt9e%Qgu&;$3^TNJOMC6GVoK7c%{713%<@l8f+m>Z)0qM$>F<~Hz#Qz`Fl|lO;GlY) zroY$MhowI|W~Y)LNqj&yIG|=0o#s>x4fwelAciiE1uM7dd4uErxDu}pi|TQuUL7Xt z5LsLRduP(|HDqL626Zq>YcmjW2B~Re5tL6#(2q<=C643V6HBkd|i+1(aHCmXq zL2_wZy#2Bg_R=QM&%JdvKUrhN`CcpHR{&<`M6y_`1-M#nZN)Cr@YQOi)ci4z-N6is zP*Y&SO}mg|#(A1{foFxF;aRRH+J;ZlUuZh>2sJ?85w96na@ngX|FUE%!yuYK-*=UR zQ_yr?WH1(p+8MC3!8;<>Q2e+2B-vy3#+Nlyxw)k49p^|CHwaz}mFXYkYzzi8l{u{$ zHTq|ge+;D<9Q1JK>M7c$U^zUDDXiVjZ}HxDSja~%&1ky5+QMxKn}Z^N2|f%a;~RNi zs{6$u*-zp6^#v2~eG-10^#$8n))sDFgA;zhtuMx! zygWFSIsZ`-dSDo&ZT#&uG`9Gaw0F3;S)}#CRil6Xir*^TZH5Yn^VDXaU!!{>pX_D+Z{E7BcA^~edB)~ znaG%}cM7n^_&S%9tEyNYY95O$_^dW6YjA&{wc)X3F|iNK`jA%b`GJa(EF@~E9ir0U zAT{1U%S_pkjng$Rut!U_w70A8a9cn8345VC!YWN)%l}h4#^{c1kcjU1^&&fdeJNDO zuNTo_Pr!%{Pa`@!jp*<+qQldO4$psr=xDcrTQO-(NON>S5{P<;u)F}o&eO<}YbLS^ zmqdsvpO@r@Zt*I*Gbst7OE!i)RIl?A&|7O86gf$er_mRl6?{cs)*AicSslI`eWEa^ zWo=U!=O|@+7d-}7Hvjlc$wlDC5O`YazIL)n-$%m>=pIuyn9UkZSF!`*ip}qggw@tp zxGLOE31kobZ~Dtiv;h;6ySEp(E&_zscDe?EE!+zKi6*~yNl{v)K*rg; zXi607qa>kwTOEJ#f2tpoVL!sVr9I}(KnRVJ_6z=q``HR;WZFY?+?CxS()uYlkM&dF zJVttS1qX&~P@Jl=H@itjBaWiRuMkCj2XQ@5QD4Qaiw<~f4%)9K(ld@t_?GN0s@y`p5S+KMgs*0lG98fv6cSU(oE+n4B;P^Xbz|2tPS?ngw7Gr zA-uq?{TWo(A~A+~?R)szbY=$DSu4e`Svl$AP5rlT4rU7xwGGDwoFUa%X?NHdZ>Gx^i@9dXbt(`qHwoQkqI%S~gZnQ|U{~#!6{gSE&#`QB%^6MZpv` z?dw=nu+h3vO64b7C-u@;exh|pD|ED9SL= z=LWet$;F5aZjbZAH>= zV(sb;yfBsPaxoxe8Og@OEXV*Z%h`olfNyo;?Y23ZxMoW8rwCpNH8mr*qI$@K(#tT3Wzsg*s9i zt$~MG1rIax6XIh24Wb_DJV%M#mk73i%y0D_n(XayDx=N8v2=@D=PzM*5X1jvaqCK0?+5C~$pDYtJZQlZ-BORG z8wx#0IjfM)8zQ{GFNlBL+F^nV=lXWFW@Qj3yr9Mn!Fp0)g82#~I-|f_3qQ!CADZQdIqX0t;r*JicC%vqz4fn&RrHQ{od$ zMe3B#Y5&VoC^O~_RezIP$nKr_ZIUN!)e7bFePCB%`!#@?t(^v#l(HzfICH7*Q^o97 ze45c)**zz^Zz|-55D1STT&wXTp{(AL5fc4bqlr)U$Cv1{amd)SIG9|2gyk)@xS^C$ zZSO4}A=&+PGp*E#U68>HBUI6%c3Si{6i%#j$x_O*dH3cyc#;A@T{39yF4{NDH z{J2%L)~%w7>t8~-CYn?&p|$->Xl?%zS{qYprI$RyKaO*TwITOfTdOhQ-NwdCq9Hj> z>t}5x3^OW)u`ZtvC(bg;{L(YY9ODT??afd)$$V>MzQM-sjC6jMKD0Bf{^9)yIW%3h zHSJ$m$`UJ_%P+AJEwP!z_lHO_OJ^<~(#S#FmWteZlH23?$>o|)JxY}Q=ohuzluGstx6+wN?GrdQ^Y#=c)Pc-z?0 z`V8;E;!9A89h&@Oe&-N%z)NC4Q3reuu@Y^-wmd)j8?wvV;1I2UZ??f+?gnvCTP~Lm zkgVKLO2lW|st#FuaBegBiC{H(`_@XLKfu7*5d45=sJEk)A3&dps>10r2Nmiw54lte z@88#Fer*qZ<{Q_wQwl_do96KPuPz z=!^H#zvr^5?4^HK%zaP)9(n@(JD2!8c|(2%-(fo7Ak5?Ss#*I(0x~7P6)Um=`MPRV zHpx)+@2~V9%yFD9HZ29~SJL?aU~el`h0(?iX}F;!4;uyl50Tf*(2q!KH~-hX6s%u8 znwya1-1@bJLkj~A>6fCeH130^aUZNcZpA_MyvAxus;1mpafq!I zXA>o2G=+*(o~;!VXty&yc~qJ;TPq?d?*x|jSz^Y+Q`j#D`xptsFQF|ORkmoH$_5OJ z#$y2J7L9&>?|+m(f?l`&5?Wo(erujW4~7muT0M{s!@!8I`M8iC?&UpnxR`_wK!?j| zP7YdnK4GynP|m4BP@i5=Zt~NKQy^m;5S(;sf$ejw_d_ z;emmRCp zkA7zRBH*>KpN^j?Uc+Q79RCY+?a zM|22QN^l=acUP12B7Q6E{L#DaXRqd#!gtj@Oiy|FK{98qq&8$=IHQ!NB_XbIgz27NlaZOPGNX=`RlxRUC6n z#W_i@I=-YvpD(-MTVGJxS>_uiv;S!);G(#S4wpF0n4RtEGK+(*COX(56bAhN;91}K zYNT`P{*>Um-p*v2LFzbK9i+sE^)l-0%b!)pVh7i-C1{20 zn6(K^wNS)gs$y8KcT=z5IZcqT-sO%+Oru8M?iixUO0*{DJ<$`}l#gx92a{nCo%+b6 zf0w$P1JZtYlghTU7Y^7@tg$Mv=MGT`FVbSdX)47ehvbdB=g+VaELC&?n76adknrz% zL`~`KRB8yP$VC|3o)2!x2ZJzJ-}x4PehL7MstRvg|9syAURAXhc*_mmE~*Uezo#3# zw~@OL-j_Zscz@KQXizDHfnZxcctbvjraRY)E5cxX=TigWT};B?fmc=S1>Pn4N;@mI zgoKna_py`5blzH`FildF%TeZ8Hlb5o$Vb4@|dY!l1S2l6I>or@Yt##BV_zT@(bA=D{1;wS%iTht? zvu&yHFZ>Lo!{MQlRv9>iT3CQpO@YR6@|KcKq&)i#P@(PSvq#R5B>xyweyTRKgZX6{(X1L9K4gQv_< zun6nfc_d!J{wPj7z65Bb6-i3pUyG{$$l6if^|~sSCaZtHbwv<2GHz{VePg4*z%RzFwVDy17&bpX$#p<8k+ySf9dNyx zVk_VmG*|H(tM`JgV^O&Xlw=Whh_^@ANh{y8@l^Si?b&qLk2Dem{Rf(=8T8m_9{s&( z{T22m3Y#AZhL9gnf95(edWRBlKG!&Ej}FCgfVx>-ORqNdoJ%hsxjY<*g6_1wRwsx3 zUD%oVo>sS7PcI?>jt=%Z1$C}}pS6i^tGW93jC=<>Wb(fQb`{5)Tqia2WkL6<-N~BA zBlG0b81hl6cd37Mu57i5mjQ>lN5TaqGmZr0(U1A59w0%$@@9CJG3a=+sxqA$U0OMh zyZ}p8NyUfDqt9hFVgr7TdQ_Z@zqCoI<^pedaw@YDBi(2BLBPE{IT(L+ON9nPT4ApG zIEsgcozbSz$*GuR^je)lSb_mLEeJj7-lQVtQ3RQ1aPXpYOPI?0Y>FwnOC7>Xv-Z4} zj^ZWZH*;z(del553F3ZMO&lL?&waRK#1|u1-a-FFI)BbD^z*{f$dxx+#1F!VXyi)Y zB09r}^2n8QETTmbpx98Ww(R<-duE2YXJ-Y^VDmXp5^q)Tl0AEZ4LlRx)}AL$eICe; z2@*v`-8&BfYRg^#T=&aTTv}oA;>QSD`Sp?CzeU*Uj8{FD@LpcoJ^8Q~DT4g*;zvn; zHD2}Tjhv_ZnFdewD8~yV>lXX2sx?Yd#~KJv9R`iDg@?w|S7kp_ug;0J9?JXd(m9`K z{g?vLIUi}|#Nmk9<#Rq1>G&!mjb0z*Uk4?qaB)mJ?c;x5Qdr8xO!k>h0AdE=kOyJwxiDlqW)pKF717~+r zMktVnf(hO!tjSjt!k_SdCR$1}kN}OI<{_Z;jo}0i_jdYeYo}qI_cHyF2HRQh zxebKq96%5^yfpl5AyGhG&LeDe2zb~NAv}4ILvXxK!Iy_S58gn~f=eA=R7viMr<*Sm zC`68qLngvS-;_}@ypGHpZVX|kxFT^<{pStcQW|9Fc>nq{mD+2W!pTaso27<~1*!>_~yvq-1z zLf!NBKCaf%z~?wKqjbEjRwNg%xU@CyP2`HS^*pjySeK9K|S{+CZ7cL zS+^5O>~zh7JRKmxvmi&0!$JTvS7i-nTdilc9peB%K6W9x@Ga9l-pcgbO!UP6?~%#H>Dd(7Dqu0zO2&Ir~i z@@u%%>W^Hqh6V-qD87wbRap(2HJWrD{g$0<-x~uND#!j~s8vmr&D~tnf7f1-MPj1%oQwzC0LS=NDDend5V=zR zBO;ww^6R2N!b>b_R(}*|c&3s<&`x+ZS! zDA4x81>dC}KvA4vbk$KT7mc#SNouFDw!*tUrgGsC`LB(M8?PloaC`1*IREOraTsw>_0c?nUkj(43T@KFa7gW%FbB-OIITN=qTgmRI*x*mo}V>)Gwj}!!#WM@#+`KN=HQAyGzX^<_3zX zwxIIc^&}9X+_)2*kt^{1?}p9y_g*@7kj>AW5qC}v69XInNdxXfLj&%dzH)&6!r5=r z57s9QndyzP<;M54O!TK)fBT)|X^Nu8!DCIghS?jgj;7bQSr8u{c4$`iFJK4gZ)54m-o<<5D6h?yC^4dCd7r|o>RTZGezqz+MR0HuR-peNcA zi36guAM+kFs}Mjt*@$aEL23W8#sUJ{5nE5>$Sw}6A&{XBZ1CQ0&}oK+M4O#U0&jP4 zO!)%=F9l~%LkdIY(c}TK#>Y4sz*N(l0+qQyMJ}MdAZ}5-^piT}r*i=V^>=aZa4)&+ zl@un?oCv^enA0b%pQ0UrmCc;5k91zhfSS#8noHMbq`ga;K#Cy;eSs?F)uh1tA6r_K z;B}a@HWC}r(trvHyi$4jVi2G5k?Q|T1B`*VYP~I zrNWC`xLRSgig+7ccZj6@;#dpkW3Cnv#P7SB%2Z>H0RMn5I+Gy zlNl`a;&SXpnIgfwS1T0l`?906|ERn;*Hfs>`kEIb9VbwbS}O%x*vA@t-Ei>Uj=quD zS}HEuwu=zLrwvFo1A%9{^JAmEZtJmhEcV9|(r(M$^?xX$8A$KdBJAYX1nii3FtOr;9>fBf+@dd1ICP{|D`Xji}0_LT)%YqiWG9O&* zG86|-($jBsn2%U|w9aJ<1<#tb32zf?kbG`d2dkM|AjNFssc@f$m-(FN!!0>Q^Mvdr%p0ziS;E|AAm-GgxTW9*IC6$mdKb$V)D3NcNiq%ZVm%g&y2VUZ>Ap+7k7H`fMlohp1Q ziCN|#us7(wdwoK$Oi!+Au@wvzad+Xk44%+fFcxWiw}j*I$)dsY(!TMpTz^EB>Rz~j z4M{IYRB^FE=B^MNf_oXU`0hv4s@cggH^vc#`!w2a_{t=uT0aj{H8BY=3bJqh3w#)Zw23vtWjFwlio+O%yw^ibGQ^vC)yvk3$nfr@&LhP~gH; z?nDNk(Wku9e2R!(8g~&5Rg7(-8d1YDeQ@w()4Mu@+LsIToxHz`q%pxt#M~QA~@zWG!l> zMGwf(80~%L4rVN8px^uzKqo+4(w#e8y*Vw~&T<)mO=Ibw~a929)s@^Wt- z*WT;v{U1}zT5ei@c?wsF*fly`Jx@-0o=bPpTsN)kdFsTCD3YF!T&erud!8aiLg&s& zD~W~B(5lG%cR?J0lb<-0#cA)w`jK3@g-&&FYh!$gCJUW*dn_D)>zoIEXk#h&1+8Mn zE>tgwnc<^Hq2<@v7e_AVtD9{=@V(P0=hltHbi{N7r|0E~&i{P58?Req@=pV5kGCvE zirr*cI$|Yy*&4n`b_f=Oi>$JLrl2??KCoZqa%s4ggB!%Ia`o(-(qZnx0n0FM>m-^s zsVjJ=ks>%M40VT0>E@<{cYEx4gFJd?SQ@}?RLmTTHK=z&=v6+RmqoUy_+Bf0W%qGY zR{LWyS~;$1BjfRS&Df!l2JD{KHf`YR>#TJI_CIWdwPkJ37QFgxJ#00CEgwyq`0UE= zZ*onHJQ#0R!M{8mFw1T=BqMOMp%#IwY-vl48Ha=8Uz(P&B~~FRuYvYtkAh*y$+w(} zFk&DJx%QG+u{CxeNr1RrbC4B+h+{_(bQbDG5T)Oj@im7D@24^qoKsromuLl9Qn{l? zTM(5ohR0P0u1-V&2Bjju?y~9r@8nBEp(|zEyM>9uF{ELOQ=B?)D1KmN_nafn#et-3 zCe0vVAlAy0772R~Jcb=M-{r zVP~@;&_05ZO8Lh}vmEATIzz+L{Em+$`JEC=@C!E+yl^5LGV%qU#^vKiLY4vl++@IO zWNmi6=@Q&}rh8kj*dZ0fdO|}8iBRU2`^`Z>VY#(0JI#{i^?GOfCcIvA9&Ou`?VIp= zy&SLCYvShhdX12aeL}ombLn2kA9(HedW|Q8j+BC#&@BF2Z@e z-ig=i@41M)*J~<_bc|;z^n1O2G+wnXUbS|h*XtuPV8$L^uNNE|dc9tYJ}&3=8n?zQ z%>VOVuOG;Hy?*Tjdc9t*>5-7L!izL(r#_aPI_!sv%bbGO>ot+9&#>sV=Jnd8nAhv1 zMdS5)n7m%w6z)8RxOB~Pk&cft%XA>#Ok}q~MOXtl;PYH7cT?&D1dHFGcwU?JiV_Z2 zv!UZ0$gtRWDO7UGInx4k;6CGjCtj3I{2z~EM5*mh$3#V}ZUWx9N2DHEkY6idq1E1GrI4;RDbpELndItFG4 zhBW)O37R2$hk!);M@l#K;0dxt+Z>#AQbErQ307fksg9}+h6@HHwT|)TdaG}(V<_Dg zL{G4PS{V!J?#SM+8tuJ%&pf3R8rhwF}mmydH0&EuIpP#Y`~_)M%kgJUFFDYf&;w<(sp&%6yd zFV44uGR}+hO$yPOMcEq(=?vZBJeMd2573=T9ir5k3c+(?HccqFkv?0PTc1+x-;HvM zB|B5yEf<={0=TeaYa9G{tzRCmDQ_*4KmD58lGf6*h7KveT@MId$VCbrwUqmJi)| zRs3njC#1YbG356ilsK&`RV7YOKX?yuS~%szX`#5p=}H?giPae=8Jl#g)SNioM%ZY% z6Q?<_l)K%Qc*2Dfq*;B--<}gnlOq?eIbdOUoht}ig$oJf#OWPFRiV4pDkn}OU9aY& zWX9dzmCW2}tOnH=g~bsR2c|!Qj>R=`dX0-z>?8SLmj#Q91E0}GoK}kwr@u(uu#A=R z=Ce(!NNq5O{zkYsa^7cEKeXoVB5=jI#Z8v7pPMpLYttxh&(1JYoydZy_goHD|{&z9+{*`b&H79 zUpjMZtc|BYxQ(a&6khCte85SmX|=)c*m_QS5}{rG;3pP=O0hp?p<w@IorD^9|igkL_9@2E!SrVuN89p-6vP?hSq@_Vi z7BdZVGPNCrgQ_o50y4F_HJSiH@Lo$vwC+W;#(`+0b2r>XfB(=C zZ3fW#&?Q%1z%B=Y`vvSuVQ>Cm@oqFc+JD)Ne6|r8`^;EU`T@z=E4wc|>@Ox~Cxa4d z##}tsv!0K`s*u}Y)%c=9^rAR00}%<_GK)8XyH!k32E{U1S~~Jo&h9M*M8o9D?1O|B z?kRs)GcRHJoM7WM;ozoF{uT;SW1;*l0gb3a`MW|<1Lbe+%#o{5{;qUU?=OGXL;0_Y zwE-n(?b~}$rNtoL0poV{t9M}-NU*3j6-SQNhSPD zDn%CmY%QVVLJ7PW8G571(4YB!1t1TQz}pn#B=D|*68PCJZ-E3pUNI7$E0DptBp0DA z?5UiN1L@6r<;hUwwfD8Wf+_o!$*aT}Q5QjRAoak%XjGX9Ou6}$HSwAP|?`rDl zS$AT9B>!sU%9&t*#CP416ipZR%kYqu@=%D^<)kYny+fV%#*UycsKTNm`|1&vj9XoB zIl_9gwi7C$f@cQxpd(ZCX%gjKUXr#`v4A-SkR)xV89>sGs#K>!?a4{n*T%&uZ4xEp z^E;hF;AWF!?ZRXU*D65Q8JX-WA=`>p--yid*}YcuKNPdoDv8-ESf5cD6o}dNdy3hm z%9<0iop8@Ug?R7O38^vlh}kP(T|`K$#qPa`$x-&}P=kd@??F4D8dA-8doS9f5|P`1 zWFa9b`=eF{YP5bSdke#rm$JVIQol#aevmAMQuZZC*$qtT_lViIl2Kyz=)H>B@i|9) zGcRU0Ontsz%;s{JivK5P>-Lwu$k~^?pPcXreS{g#a*<~E&aYW&Q~hL+a&Ou$o(KhOMxp`a8nr0iQH|xzgOf|=}zRX6&7B>YhL7jXNb6)8#8AHw_TExezjVxgJ`2tb|T@);wk0ml!$l%|6pF1}{8@!ZSzR_%RA|V(?#& zR&@t0m{>uK#NZbQ1*;Rpni#B}s|{`tP>8`r)FiQZk)Yvyk;+pJiN))}Oy}{;9zQ@V zPJWeL);seuaniE(_5DX~R=UbGH&3&<84lOw!7{$g>JVWNGe|>T)+rDUk^@TL?+guI9N!F-Y21Ds_lbXDI~pS(LqiQ1E${ zDJ1RMqP)EOFU#8pssWj!(q&=iuL!=OZtxGsC^)`u4|)6J^g+IU$=jvG zw%V2rDaI@C)WCl zAOgar{9*V6mvm-qyMdpUICPSxK>Gl9c==B1F_haJ97B!y&O{z9aMfk)HD{RqWl_-< zj_iO|aMcJ@8zyUSZuiFQCx9Ve!a^*#zHv%LQGC4fH7si>|BOZ^G&_y^LXzBOUH${i z83&k6X&zMq)IWD&)Ca#0<@PVr@$D-(iD&nroX-vtN90+QHwPzzINVC|k}gKM3o>c# zr#;`B@ZW4t6NUY+YR}VOdf)b3&KQe^WSO%#H!0O&QaNC~c;jkeiWG*&}L|% zIm?i9cDs*fFaGN%YbK(b7DBw&AO10<8XjnwF@LfJ{t)8aTKv`ke^N>L^YOoila5r? zJIPJ$YkV^mf#=d7`o^t#jD19Gq> zCz=g4-7P1Cbm(8uhoozkx2{fMaFAx7yM2@F45Cq$ozx|9j{&)~IdX57Ac%(l@B8ma zvjJM*-z@Fb;qXx(SngYS-@|{x0gA#=Qs_V7U|wg&gz@Rc*q`NsKY zO>H^j%(Kq^M(g=4lShu^x>6;4W9HOplf&?g^S{w@_Vk$-OirKk?bg|02wS1SI$_Pzx? zs_NQ*&*Ow7gdr-A@Ms+rky?zI03oz00Wv^%rUX%Y?VSwCgp4Gaab^<0d)ubMO|1oq zFGNL+ts*{ZX|-t8*0ffn^>JzI<@#uC+A6hAYf&HR6B@3m*noH-K`Y482t?{hqd9*S-DHiqI2v2ZjIiN#NE4JKmYP2OB189}Bj6H-=kUyz4^VL@3r8ZVM(tjTD#ZCYnRuw)WO_p_n()qU>GQp73sHYg-Xa zG&Fmg+S?iu5R5k#Y6yipLXDhzZn_QN*N(EJ8w)mu+v9_hW;ul0qTqwd=#4i=+FKgE z&B2b4SBptL5G0svRz*evZ?Fwa%iK|J`yvq?Th@d(7TFk=B+~UL2Cqm}Jz`~fLq|np zC=m>|RMsY9t2VZUV#-pg2=v@HyCvKff_7^yDxbLk8WG!Eox(_2Wu{E0XZpT!ok2}& zw54WcU0@i5DfX;t)xN4jEg@)SP2J3ZAAw7f8^C%C`om}^1?d{yb=JMc(=+!Fo?GMV zbd=Wk2c$n1eQSO?U&=f0|AYLGm;Ymx&sttxZYn#LI*wVt$Kvl;&yU65@uoi({bQB? ze>?x<<^Ndav(~q+cdC2oNWt9DcoUfTvUuaqzSY1?6jIt@Yc z_!U&Y=oD8qt!hZ17YK-r;l@Pss-^|7?Ew)BHMYx6YIP7@m6(B!vk6^g-8r~y3vFDb z@9LJUI&XQkcjc-z-UWW|lyki0qOB?TDeuNuq;0*oZH9M)Hxlzky&b`p_E5Z3bingK zSO2NN3hjRcg3)NGtxr6K6%DiL+vn_*ol}JP~d3iXXK_;3XizBpxy7CY?Pu`8q z@XT~fh@LvZ_@}(lmJnPVZ$opaVS_iEhuPbnM| ziX5+lGM&TeNsYXXvEW8jzvg2NoXSKb0kuoJmZV_tEHiU3>mnMDRz%`%ICQ;H(%eC* zOC64z@{kB$S`JXhSP<>+?6bLGHh^pg1+v8y$j(q8`$Be@w+I7U~S7igZHPr zFmIsZ^>-&9`4guvt%~3C>3QG&n&|!aS5Cj|%JDy7`~+^Ez4OMM_nk++wZU0+_0$^t z{~`CA-(~n7^OIkt`qLNGeP4w6D6V??(_d`5rRT`JXCyp2?};70jb~2TG4IjDH6`w~ z2bj+9=lyKT%rE}?3(j7}_wOHLsPSB$f?ic_rT?T`THhJpsh_K9o8%Ww_o-`ZT)Nl% zYe?w{uOYsEey;gXh)yai{;K8+(OaeN31|OTBL|2)K$c^}#&ABDeOZ`KfGDA44 zIno+BE7ra))P|l6{am!=%y>uSteLUKXfT%8d=}Yd(-7>ec&yvwV47weC9>=4_if{m{pEzD&ZlZ85WLG|6y;WDXHm zR7nWoEB*iNJjW#0r>})^x{4lq2a@}6-Tlddq!|ghuj0` z1$+pw8?#wn7{7kNO2A@F8`c8$ViLB8j3TXv6@Wh0zd8Z;0oG&l)joWbe-n1Zbz*bJ zLBL*YsVu?#a35e9pch;BIsuEZ?ROtw?*#+N{84ybkMw|LVdM{3-wr&=?Y(Ruc>!SY zj)7!9;+&#Bz@BT69{hG* z3;6(cUk7;udUp*Zr-Sc(H-Iia<>%CQ+)#VQ~d(wL#}0jUcf5A zy?{M{&ja=X_5*r5@toxNB}|V3_5kLioPB`ZfZofHKj5J)SffOFySFBj)5ik;AIan< zz}hb-lLrBN0L3`)a|QSVtlgGO_5k((K2LbSDCE+6WinX?xzu+fJz(!u$z&hN=Q`ja zkM5nI2e|J>Wp@A%*u4i3^6I}cnY;k<^6o``fIWAE zPmKM1;jwf~L#sdx@p3D|oCasaIV z9ryz5ehqvfeGlLvz+S+9z(aufpx5(zq$9e2NG6*B>;H&!fcpS10DTG&qYREqR}06c zV#lPS5&5*~7vu$sBo0o3ZlbLSPw^s8$}apq|#Rd?wdA{RK}06 z3V#t`$rS@hkV^&1FM+>e8~irl^MNNHPX~&BJKfme9|ii|uVmAI4)}iHVH#3_%0j_$ zY3*`Z(h)bkI?pR6nafLMK8SRe0=l=LqUL*ww>X`VLQnpZk@G!d5yf)+(W?o)rw=6Q zJ%6#lQ@q2uz*BOitH$Hq?q1-TzRg(XDeKCs_Ec^eQSFI34;OkW=X=WLd!{e&co7pR z7I^aKlTa1Ct)S!m#6a>XqUR@iexkRC=q(|7NS&fr?<#lL5?m+=lDwV+ot`rWl5Wa? z1L6M!{4U_3`lp(8*jeg$EdQ<(#gtJ11^oQmhYcrFFNDM=1%K<^>o|A6Q2^P%V~ ziFc?8^X^aazL;dO*i-9T<+SB}{>ZaI?{?6;0Dax7xOSfkdXUsM=X_5|m#f<2-Qsrs zxX@FAI)U_7pg#CgF+}NKLHhhs*g{JGJ2XVZ?V|KsT#G$k=W3N26}fC=b?Tbyf#g#J z#k&CM$t3I0h>NMf*8?9yIZgGA+T{AKruvz$>brK6Z3+D&T_3ZrH^l(uPkW=9%LbC4 z(U}7ho;K+110JR=6-fS%0{;;3cTnG+)X$Zt(FC7SFZe$Rs0SsY__Gi{|MG$4I~4zSF#IxpHOF`UB&9>kMhenDD(CZv|1!mY1o0ui?bIH& zxxiPKyV^5-iveA6KIWN@4565a0A)sWnf`X9-vs+}n9_f7kn|5`r`~{?^iw3!AvS|;@b^hu@Cu<=K8sk?9X-=>Svo9R;!Eb&z3yrBZV?)%7RIQL-l(R z^4)jEKynA!fv3=6!iKO-wAaNd*HiXw!9SAdT?>Btx1k+p>PwoIY;SB|YcI7`YQHkC zQvY57y)wwwzurwl%rtS4s(Cfh- z25bA?4g6lrVXQEL=sJf& z*EuA*uYm3*yvjD8=>7!`-7)!fy1wihthSZ?Ivez&|1yv)Edt$FhCp|p(|MaM*=niZ z+d!`tpN!g1^nRx4<oZm07BTYB>)y5*iXKUYohZSWTXUt)t#0ACC|%tk5@{cXVKTkx>b;&$Mp zurCpc4a4bN0*|a(wMWwHU(lEj&*^M+_h^*ACgnHC{=l`>nQ|70-rJy8wtXOpp_Oa{ zTb#?ayCd!X7?AF6OD0KIGuz0&rJBf6wlBmtvC@n97+RzP@pTdK2Z8@A+c9$=<@yAy z0(&q^5N|5tZv%W6<$sQ-q}%2qPp$LEGA{Zi%I^Wh+lOzv-AeJOKe?VfR zsX*~x0X_;mx|dWS{0G1{+u+AQkLqpkGl0Lq244+)tp!j0%0<900sgy`tL%@KTl=GW z=T6nDq&g(-v5CLkNPiINQN5`^bg7)Zz@GsHa-R;VOWI3`HngLQ`Yh^aYh7o+B4+e< z%SIC2w?TIw<_gSo310-k?FAm)MJf<}D)2qPlNr{5@MitdR&5w|t&S1Wg{GRV2y|iyR?Z!$^mGe2< zX1;i&)2|-DC4nhVb?JU7pD`G-@59^)4aIe+#>EWa_X6KTNSQZ$rz>6Qep&i)%h(Tg z#_im=$w-<29co|YL9a zkYu#fvoFupYVSX5^CJ7pM?tq1b3l3M-`uZc&`tNllCJcLR(Lu;?pk3_m#wDihsR*f zCI7yGPY>wPIMzKchy2WA zkXqMu_CDMEk+VQ=76yo&IF5G(WdQ!nWou6rttO*r#K%^YYZuZjr190SFp-tkk90kn z`G~se?5k1r`;UT;U5R9JE$ch`+;+4U!xq54=o`T{ZL8h7UYs~S< zUcQa^o50tT)NY?Z{KctpiJgq9A)}?RS&u`z55O)y4OMyuW3*>6Nc&GuevPMU%ZRSL zZN_%@l`b0U$gl>#7lCx`fq~>yYM&c$BkNVpgDlDP)n)sHKj(JcYG1KxPym`_Z+Al^ zmvdu z;cb%hh>o2Mr0w8&f~re$ivSX`ih%U`BGB)AdLa1%>GggH-A=F5dC(aDY5U?Uy}lRp ziVqDW$B`Yq;+XUngWgx{-FVerKcMnsPW)!@>JBM-+-7CpKouV=l!Ibu6s!`g8R^?B zn2II;HIQtlbmt2Z{dM<&g{|<6-EcK7xgY@!k@RJYzAA>{VrZ-gV>P6Zcsh3MV zy-wGk4CNSD29YVf+zPtO!B;Q%8tVq#FM{rQ$7olzuCFf*J!tAufw_Cv(MqY3s>_!Jr3tRwnbK1Q9Vq>glRw4 zDCpgBs?)6g2z9z8&ou#cnbH4XT1d@*A$=9rF#ZwqU(}u-0KHzUh0ymP#b=Hsd*_9N zh%;04R6P960R4SfTltCXr{H`@Jeh5&d5cB)^0y zU*Rt*6m`XU&JKaMubmy6mW_6GT##4nadt$DM>#v1i;J8c^-GFf&W>8wof8TXdj-76 zlwQg5gI*MCU@kOaw+rPbnk!5+mq>Coin5hxq(~DbXG@kJQyks@r1GPQiysmFuV9S{ z*E&%9cpK$-s3@8I8kRELJK;>*wg0p|TI%U5a(&n7d95h!=`V7A1s976b%p)3u@V`{ z>muY|h5Roi|0;rfEOswFZ>;+m*ZubXm74#%7xbcEO(r)Iy@i_Ip!%1S9<|?BFeTZu zJ(>I!2?sJTw_kLldVceAOY@a8e$_~FHqv{s23m=J#Qonz+z%`ql~=pNb)|c|v5iI} zRSp`B%pd6*lUG{oy31KAoO?@)o!6F9rm9`FU`n^-T)eD9_FUG596zP|hO{ll=oQOO zl|Wbwu~t;F@6Um+=RxmT;%hJXva8=T&BdNWZdWWU$SDJMhSQs zB>Z_2>;)r~n3p5nb4d5l?qu@k)DNvjI%=mirgmCmY4(2EV&|*I-|f`xlw?Zns0jAs zatnWip9=h;1DW$)D2g~6_=CWEaZSF#7MNO0#mPYFF5>6rd9E42Z$si7gpce-!cSpF}w+{*^ZIAI^;bC&Vv4h_!5rzYg)G-+~#IEv}_- zb@rRwYU$@p2A{P($>a`-Pd&Za&+$4#Wd+{qMZQA}==%(z?Z*$on;&TSd*MB?c z!SuWD2R+HB#r{aU$^Lw=+cjp?;CzzYwu0`G`;*CAaP8g+x>zse^+}rlxe&GZIEFh1 zJo)EC4P0H+y%Rr=BHccu`vLK@LGvTW>FId|+0Ny>f^6rNL*Qo&`i1G(dm#B)Gctym$B0b305tZtIq0wCF6TbSvxa!f zWo$RZeeM9R`wZw1KQEYJL`0WEA;z7vA6z})GVaO~e|8yH<%!$#ie%KWBM{?3hw(Ru zc-mn=SH9*{Ns2sx6O1}>>Y@})Mf;{Hj&W=8@KHyn>pO18uieH6ZgB-=O%_}R@KAil zmvH-t!}w2!=wmiM+3ld!rUQA#!=uGpdB$s_#q0UTmx{%81&Qtx#6Zz~j;_(-CC^ic z1QJ!fy`#m>f?nrKqs0?N25w#&H3z%@JX3M`W6v@n?i@|nCr4vWwV{y#p20A%LI|kGrA^-T_bio21>-$g||8Xt3dQ#} z_3;L-A02P(DG|F%&_WKC5aRWc(TI7&1V!r8>+c6&-^?@aED=}a8;_NUujk+Fc(Fu0 zQ)nbh#O-6Bc7A<=c(~-#Lj1L)5cnG=ApR{A3}6w!jQ;!l*?90T#%l#41}*-D!}wW& zc*_AJ_M+3cwots`#O1GD#z291*VTb9j2OnF1>!xb?VslvdkVytNP%7&VRRRWuh90i z-{u?d=Zmiv;PN*5d60gdp`YK<&wKQRWcN*_c6j!@(`M%qDbE5ch9xlH%!nkab=p>4-6&Rh9#3Lhd`Sl`WV4^rW z3YRZ?j1MM?cTT(-#I}wl5`i@kH{@@y4GfiX$bI_Gk%F>zY9EcTOM*dnQo){SzqWZzm#imT}E% zRDky##`^{02B+(T0`V}lu&15I(E{;Dr|XphaW56-`%tF>@yk5d+XZ5P5Zxn;rwhb4 zN4VZC5Wk`_g5Vdr9FUdx54Z7Jk$B!P28zUA45NFL=+1NXj1v1n1Nc1G{iDR+f2ApB29xDJjI&lkv41;&d=Rp|ONo)*efg|1GI_?x`mIns5RM|^iA;h!7n+UgPS zkKC#%8I^N+b4KXBplZ!V+LEpDYcLsAuz zH_djO83N`n4&xfb@qqJ}&Z`W^C$kNB&5EUtfCXyEs` zLSx@3@v^+WYozPQDDn147x2#(U5MXblm38|_uEAHdk*9KBg9YW@*1ZLE6_j4Lt}Yh zgjdEneJb#8I*qRuioZLJzZQruyXgAoF4ukeV&8~*Tua>txltd7g{m;n&Z|Br(-w_K z+S81HI16T0ByT#?F@zcw5_dB#h5;&7g^YlQ0>YHbkb=UJXf$BAEc z8CV+ls{6%!3})^mwwcaOV)su@ zlu?KTL7j7hlUC-As`F;ITgY-)5Mb#jcuoidp`EC1CMT>@}T2^ zQ^nP%QoOrQHE?}1>Bo1bQlw|5jz)r4rqaX9P9w~Vr!59`l(_6ZSD)k8)5Ps0|}I{NIQ8q3F);w~cNh zcD&~{t~JEhsFzCHo}^>W51j2^nk@dyeUChj6f3onXsFwr;$Fu_rI;W6ek6g9B=C_0K9ayk z68K00A4%XN34A1h|Kk$qy;&8YY`22-47(ZjGVJ7d|Hq5_5&Qo~B%lX}E0!pR_5FIw z{TD3v+b#FkTkh}Rdn`@J@Fd@3@<)cle2+PJ86N6Z5UaZC{$FX81M~0l7waQ;pCYfY zv?s%KhFI#9AZ-Rxn(|a4o|oh8+yM817`ahv9yP2N)h^ zc!c3mhI!X;{tTxvtYEl^;aY}G3_BQhG2F>;55xTo4=_B;@Cd`B4D+t#{25MTSix`+ z!?g^X7F#c!1$yhDR74Wtg{%^Jh4XVFklQ4A(MjV%Wj3i{Va& zdl>F#c!1$yhDR74WtevZ=g)8&!wQCr7_Md5#IS>57sH(l_b}Yg@BqWZ43988$}sOn z&Y$5lh7}AKF~kOc3-2*B0;*@8$WWyy=I{OR_w#^UR2Z6a#`$kn zop<=9kxPH@R#o557cTkA!@sV2@~Kxsx4zR`^^2E^9(iEl)m71tf9|=ich*#0xI52# z%_w)(;)M%8<(+=sy7sn2yLWaeK45!hMZ3K7eZf~zS~jb6)+ZF+TVCd?D61%&n`S!4 zl?zk#2ekucisKSaq5T&VTDb5}`z@Jxx6uAgCf*R*PsqgQ3GH8G;ztPWr)1*uMQx`U zqpn?|KxjYC{M021MIZNj^vJF90vg%W|M{QpiI=s9Y0R{<>T*TG|mXi5K zBZ@u`e?$*%iGMw>VWmIGLjTzu^dDn7dS1jzXR?J370-^(S3$>t_TFu2*KbLDwmy?s z?dT*CABPua*^?77AavNYyOSCJJois}ocC#o&u;J49N*ebYb8Fry*B}`b$~e2VRH`r z)j9BVRPrgPuUa}w2Z4vh;=dU0<+r-!yGD5Kfxe$e@%>7n=$$JHp9B5`b(!&d3GhU} z{x2%N`o-2M#us0t@})hR6h46gx?QRx^;2bbUC_{Ad? zVfl^*@P!!g+tI%;2Yy2i{1pfFU|ji~Pq!Aqn3zO36pQg97?n886(5@L`#&5Z`6IwI1mm zQ9FIOnDISLiVnStL*YKgce5RX8p!Zx#vi&v-D`K@EX<1(qkUQ1MFsH0|Gp0teYxit zNk7YY@9PRL-?0b21^5#{XRoC_UI9Gu-@Q`Br+4)zxM4``_Sx8}fa zm-r&#Jzv!e?ZKt+2jHo^)_OS^C_6dOxh!`4ugQT=>q40JNIwzy!Bl&lJOqKUnD!Moyc)NVt zIes6vFZs>~(mcxeTIyI4HU^O3;G5V1t0jbqfg%#JViyx_-&N&i$vc^ zivGXz(>*eNk?8#w#lLPxFXf>BC#IwQ^Lb3?jU4gkf=SYceTx-+tx zUooA&fa0ItU8Hd4Ni6?26`tP3r%(m_6!=Ag{j^4f7JGpwetO5Lh!C#~^nnAy_ur=O z>Dy%#mI5!^9n&vYXt9;?UZ~dt z$YQS!F~0MjQzYePrvGG5h%cD)Er#t12RWLy5v+;Hh3}S^m0TSp0ESUhktSo|fA+z{~c2p^}?d zJr;kH@%hFCPCQxBImGs)ho5bicvD2ayfp{@LEwo`JwKrB(odPbI8EsZyK!Q)*Io{% z05ADx{|C)eh6Rkb`rEsJw`&)#a(ppK$zh`c;*2S(ynS3Ra0_LKGQN|?t=Dt>TQvSn z6_36}MPcG8iqD>;x@YqsW&>}>&u4PrHvms^>$dd!x61fv7fYNn1A0e_g7Z|>E~>a) zv?{cS1265&I2B)h69La|2HuXJ`*PraD(R0El@@&)knv&nJPw&7++7*YnX2;bIpXi) z_||^p#T@bf3_QuX7NcegdfZVvP3e{QY=x(9e^9s*c#3be=SLXd!wq7MiY`{4uILzc)wxXC)r}U5k=~ ze0LrEp8W}X`nAB@+4Ea-#D5idDbK$u`thMUkg0ddo$O2E#to@>F|vS{4oc9 zM5)TRp8eQ%PO}zx%Gc^Q-VOY1(4RimpGuDZcS$FEzTI^e(|?PZ`nA4jXddcS}6lyJfup0^>U^_!1x2i=`h~1U&I?_4m4f z_hz#rZj^MgAKb@utoHUT;HAG5R1E^`$#8l(%k5nyNcu(-g>K;O0KIX6PQq<)) zjdA%`rf(fbpAARRj{XYZrT$y$aled@`r`Q(dUu(^j5+pnmH;pHknNIg7vIVe|40se zB^t8yw`oEifxgv1;oHF5$>9LUw~mh-a}_^cm!k8sA|on*C;Ii4_7#_Sj${J2058YO z>=&V%kl`t&zt3WaXMKw0V3{`v17D2w4)1;_yeYH+PyAG|zb(IAi|5~ycvD2ad?^RM z@YDA7QVl%mZ7=(QwM=J|#C!0aH>HQCG5$752mCyu zsrSXeliphG{3SBJi7;P&gX3HM|2KiB`nAr-4oEuaUuG(~(Y!E)&^&v&T_N%4mv2|` zbpLyh@%3XAoyXK;ae5W&xl`36eM^hNhALIwewOne)MIfs<7?TE-9eENj#SyxIb%M{ zt(Rg6LEko}a1-!SZkGA^@5}fqj`{bOIq=gLDEh_wRqXdTQBdN|6x4MG@RYCBem=)^ zx^HsGi1ZB~3Ud}J{jaq6rS-rQ9qat$F5vC#(t;B=RX^J2E#wCU2)%NsT zfhRlD6LiXWDO_8SHg5=Ewbl-xx_=B&nh~4{`Pvt@3V|Mewu^MYaGA+<4O+njT;J=EoMGT6`t0~ zDEt99Q0rvoF`Fz!U%7V>9KpLdHkA?ofognf{I(bb4~&AJ+6O{`2JJ_VxHl;7LEd zKUNviH_$13gYi~7`Nte|j1`Jb&v#V%uma+IiRT*=_#*Hm2kZLI%Q@n|!|^-+swzs4 zAJ1E9FXxMM;I9GR&d=GO1MggAPv4sZFB)Qrc%r?jskA`^0!tUJ4lJ*!TN4O~Mx0%? zo{rdz1rn`+hL#8p#YBw8NMLZ&&`8IO2ZHUJ1RYyWCp9;g&Y4{~+dfjD31@)@ zg0Wa|a{ynLFmPlkndsGA#FJHA_{_;Q|-o7qS6Idq?vNj#87Ldnphhj{rF0s5a z5D4Rt)Nlf1=(x0*I9sivAr``s=>BjxT-tz>KNHbdBw9MNY!1iCI@vn=WWwq)e@AIc zq#@W6YzYVBrR8(WkXYu>60ZsS{j2GCG~Xf=tPUc@3G4p4^1$k4YXW|7L?@o3HWWu$ zxuN_JK9!A5MUVTBU24-Qa?C^)QQvIK;d1`f%fKRu16Os43Uv}K4)!h!%vl!NOvljq z>w-0PvvIm2&g!n43-0S?1?c4OifVa4oqRkWk5||E@DLB?)R919ftp67S;`3-K&8R& z51R{~S&i0{@GP}#I?m2~US1%uX!ZOR{(ygFHOW${xxQY!^1Oh52}f8`y_(cF9tq%p z)5eyNs6M|m7HTRD1lDib6re+ABWI zu&RTKx{gcKa8UUv(FHv_F{iV ze090MBeV2%P(y0VYeNk+P0PcxaE`nd0YN$t)HfHRz=6mu!G=&^U9e#T8MT@ksl$lg zQ5{{iCLn@|U|V@sQ8H_4wmeo@u|Q9xhvE&fa1_m5HjhxuH(Q=FysR1) z7pLfk;{Mu;!>iL(90s$fy>K|Qbhf8DX0%CJ*R};*iUQJ8UBJL+>k{Q?y6XaoSSSP| z91q1{hH<2IEChQ`T&=6EOZX~NQQB~RB?J}^!4Ryj3uLNsLnz#mB`(OMvIz$iFG3Ff zT3J<8IS|5Z9Fe^yqO{!S3&`U%=TmP%7KN3D;?0e=$D3s>ku55tM{CyBCFX*Sa0Bdj zyruy>`Wl*HTN1G#&TS@(lF3>|5EfST13ipOj4T*{Nq_^hPX6@!-eNqc5yXpc<9wVNf$oQ*|S32NwgWMVG^ zc`T_Zhb*>^UBL@|cE(T{zM+t#J{=uHs4lWD)S{Y%?0jY8N1=y8V-DKW>Wq`<{W+88 z6tvH`puJ&3D6u#eX^)~*Xt1G17{W{=1QXi`^F$pvEUIim+-A551?&8)sHsX<4TfZW zJDi+AV>n2vqYtwunzlAGTiy*U6pNjh1tXN|6SJI?3aFy=JbdMWRqYALSDBY}>sbHh zCYsTKHb+_-?W~Yp(#A$D(wV_^aZ^Px#i+URU~84wDq*Cg)kL4!k!6@{t2g5`T~c|- z&6M4-iX9H4pJSe~bSG!!r|p1t_-rbg`rDSs`Ydw=*>C!0tq&!tLruZ< zmc&Yo3al2FJMk>uvsJ@av8o{fcc~6uQcEc1nNp)BacQrX+Tj}M=nb9EoOL}&zd-Dw6NA8EEFv4xj2i>N77XsPHQcr# zXZf-f(o>F;DafrsS*hUPs2*aNYP1?gNZC8;iDP&tD~us{C>D#vVBKq%WsXEhB{E#4 zCMZr*E`J&BB%Y<5NoZs+unsc_ZGnbhqM^B}3Im_{3u*$s(wUi5rFNNyiPVAEyX54t z($iEX=0OlMhrUL}qCD)XAZM*tt_U{=sVfFt>aqr`Zn z$?9I%8VbACnbWt>?akyLKo(MK$U%@MEysil2Hj=4J822Etxq&7gKV|NR6XQi5YgFk zI@VeYcu8yob)K3nyL!o~hQ8T#(O@hdS^{gB2(D`hNyoo6)QSdU`>g{|1c1$cYjqHe)DIDmTx^)hMKGx6ieQd4(0JDJ#t0!BxPFsJ~<7Tuj}zwM+jPURxW~3`-YbtZD=? z`BsDOo%mNXe5MBGOUTAWGfTeNDW@9l^t$#Y+kP{1fP<-R_=K5_MXGu**3b+!8@5ZG zF@`usKcR>ZL7%kmJ%p&}9pyTRb@q}?P=qEJRr{)GrAhwCnoMt@bgpb%k=acxn7*?a z4<4U|nys9*)>=6Z7-oN*-3X2?U`%|GN(I-2JA5=6XlMz>{fC>P5n2Tu*qOqmQ`qCL$-0{fzsTqy?Z2=;QnWf+@MX~rk_g9v3|IrEO`+b zEs%#U$EQ`ZXe}_+??|^Q7~_SQ%n>SVmfav#4i<)2^_3gi+R)t%Qu~<0ur91g4S?mW zgeK5b56x%|cQl;c3WhOdoYRuzRPih)qe7Vvf2-_$+xEdD|@+=s*7T&bFP*jgz9Nn9m;a0*+Cfo zavP29QG2Il?i|4cDjGaF$bzg4@fayYM(i5RsQqJaj7{+22+&`WR zN!d$FNzkq=%RobGS2PD{SIWi?lLyq8>9??%8A2I{upnT+2rK<{no6M!M*>T-n8K9% zH!}7yWOI<=st;vWiK?uzr8d)nq2dnD+GLEa$)AeHuy&utvw2lFte17sqhzh-Vi=}Z zbMcVJWrMHfB1ta3vz5=R~ruKo*1|2n;N?X>-lv$N?BY42eP{gu%l0Wm{Cn8r1yi>hwAk=4LEw7FllpFto3XXGxbY zs5VaqDMt+(nU1}VxK3bIysfdx+Qcya{>LrJDKnsVb}45kqvnV2|Jd3U8*Ruf0W{TC zIj19iHVA##^BGyq%W4hm)h5+}LKRY*Q{anXa-tQRN^|T^;tERdyBnS<7^*tcK2KIa z)84%t{pPS;j-hffEDMYo-SfkZiRM*J3(!}np)R>wY5r$##1=%1d8NfMZ&Brv9*;>K zMxC2nXA^G?Eumm6CG;$Vo71h#*w&&4o@)Nr&Qj*2Gn+P;wdH2OrcupO`y;F67LG{F zHwSB0f%U=G)?k2k*j*Zl`^nYGHHVj}1yl!AT{JwLi`~gsGfeNH%j^lljUXGq-a_nd zY-rvPXhJ`jHe7m;mnFjN66Bl-%spv*{04`s-`*w4=FzHHwo>-{E)kflOYQc~Dtpdq z*Pe=Dq0n9BieymL~kQekr@F zW`NBDN;zdON0hP}tTT8%x%t3WQrW%7Y^)NkY;RrFq*mzS!)lYX#AMkjrww_QQe@Y! zHcjav9qD^|{z1FWzRV?!rSLRNI~kUC1XlCw77J-E8Sh#&$+_}$Bjxd5I-j8qxU#oR zsNN^5a5*(Td!%>#ZJOjl+p(~COwPn?dIg{&fVVF!|GKFl&+0g4BwA zdSBwu3!gPrX=Gh7t(SLprI^ezvaRk?W3lbBWACrjghqBo4eT|r5 z*lAsNNjoO34J{X6e6eXgfk*AflU2GG+Zb+Zj5jog6D_8WZ0IH|w+fO6 zV!!F1grv+?dQ(;dik>y1#(1_N{XW7!-5*rOzA>0Rv`6=DUO}P_0P*lK7m@62VNQ9a zwlneCnB2%z+g#fU%S<($HY3!OtqZzCo4E;XiVkj`{()9P4LP)xV84Cs7tR#9Hvap%^8-~@e%#~nW3&^(qK6KPl^2 zNJ`r**kM<>FUJa&*;)*~1l}awNm_H&zPEnGBTI@oD=5`{&O-ZLmf8d`bOIT348Ah? z)>^KuXL`vyi+Xk?>;2+sM>gjSjr1h->X5cN8T*!H4DA~aOBpa?GEW@k9xi1lvmPqJ z-p)3?+mtz#Et#^+W8>{>m_%8I48vpD?r86|WDS1;>#$b`Qz0?^qW2KwZuPExvq|&U zVzW2ihr&MXlw)mDQ}(M@H&Gify$~~tnhs4OSj?6E^v1DO)Fjp+lqBsG+4RGJZ%Cxo(1$&8-%O|*nCd$mq|6qM>DJs8 z>R1q?*!5W*Cp}ZU)@AoCUpLCmBtspgQB7;!pfAv%xtL#|Nl)$0q1O_Fn9c}?vG+${ zi%x5#!#YVK`(LV`^OXdPFOaeBk=jgT5tHq(afolV zlhMhTS(B}nn)~caLorOR52DPOT4r+ehpwnOt@RLec8IfF>;mNM_t8{`y=0p;Y8!-I zR94!twp6ce%^h3OvRqX%H$xPM&)3K=j$&^&oBBu#>Ul`L>;L|@PI6gRUpam21fyoM z-sYE!q2RgSa;A~8?Kf6n!E4s6z*?#Pst-O{Lf`aRLvMHx4{4&@ob9*8WW?mLpSdfe z4xikD+^+*rziN3M0Oo1vP3+j}WUst(@z}aoTDTA&XQA)&d`L|; z^=@Qy2Ae9*X(}>?4m^DqmRs9xnaW8=^D@MSly(f$ssKkI3Td`%Uku>}@` zqsjCT7uH?6W`jS(_A$twDClnHjvykL_W4b`pl8nBOp-BG0hz492WKpMy{x@?<}R!B zj5Qqh9PemBv24G3pn5bliD)}2w+`KDjFHKXD2;D!#k-P#iI{@T`l>CG2$kX$w9<9$ z;g-fT!;M1T(YxlNv~hDAQYx5;sVDsAcg9Ts&tjpLASK|dXiGwr%KZzaxGY^C!7Vmj zGip^8t&!?ntvTn!}E}Sz?5E}U#nq1=S2L|pXQ&@qLJ~M zo<8SR!{$O2Q|GVK>+-j8`UOm|wo_eb*v#K^rSz1>ntl^-B$GV+QPAh+YB*7$Q@>OO zbBN={oL--=tD&r1QvzlO-)>gn3)b{EDZD<9SI6V%DgO2Smo4e_d0-ko`NPZ_x6<2g zNw3fK)lh%eRp)0d|23Rm*PlNBSHn)r$3?xG5JM_1aj4VlbAmOz){>#+R@`Mtug??K zaJLr`OhMPbM%|AKbN>3=VGVz25r8%SCy?HQKdqnoyxS#rs}fDu88NixPoFCvZA!1t zF^+P2YkcedFOin`*ZJ#nfj12?{ckMk^|?o#oW5RH4B}C+=Knfy)OK|K`uya+dsV_R zPOs&r(`!Av3~9~$>vNWizo`=T+~8@^U`^n zqyGI2h2_sVryl8a8bSApQ;kuumicc!P2GPSU?Ki=dB~Phpypwh|3fb+s(ySI*c`0+ F|35k?zxx0H literal 0 HcmV?d00001 diff --git a/3rdparty/line_descriptor/src/LSDDetector_custom.cpp b/3rdparty/line_descriptor/src/LSDDetector_custom.cpp new file mode 100644 index 0000000..9f90e9f --- /dev/null +++ b/3rdparty/line_descriptor/src/LSDDetector_custom.cpp @@ -0,0 +1,328 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// + // + // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + // + // By downloading, copying, installing or using the software you agree to this license. + // If you do not agree to this license, do not download, install, + // copy or use the software. + // + // + // License Agreement + // For Open Source Computer Vision Library + // + // Copyright (C) 2014, Biagio Montesano, all rights reserved. + // Third party copyrights are property of their respective owners. + // + // Redistribution and use in source and binary forms, with or without modification, + // are permitted provided that the following conditions are met: + // + // * Redistribution's of source code must retain the above copyright notice, + // this list of conditions and the following disclaimer. + // + // * Redistribution's in binary form must reproduce the above copyright notice, + // this list of conditions and the following disclaimer in the documentation + // and/or other materials provided with the distribution. + // + // * The name of the copyright holders may not be used to endorse or promote products + // derived from this software without specific prior written permission. + // + // This software is provided by the copyright holders and contributors "as is" and + // any express or implied warranties, including, but not limited to, the implied + // warranties of merchantability and fitness for a particular purpose are disclaimed. + // In no event shall the Intel Corporation or contributors be liable for any direct, + // indirect, incidental, special, exemplary, or consequential damages + // (including, but not limited to, procurement of substitute goods or services; + // loss of use, data, or profits; or business interruption) however caused + // and on any theory of liability, whether in contract, strict liability, + // or tort (including negligence or otherwise) arising in any way out of + // the use of this software, even if advised of the possibility of such damage. + // + //M*/ + +#include "precomp_custom.hpp" + +//using namespace cv; +namespace cv +{ +namespace line_descriptor +{ + +Ptr LSDDetectorC::createLSDDetectorC() +{ + return Ptr( new LSDDetectorC() ); +} + +/* compute Gaussian pyramid of input image */ +void LSDDetectorC::computeGaussianPyramid( const Mat& image, int numOctaves, int scale ) +{ + /* clear class fields */ + gaussianPyrs.clear(); + + /* insert input image into pyramid */ + cv::Mat currentMat = image.clone(); + //cv::GaussianBlur( currentMat, currentMat, cv::Size( 5, 5 ), 1 ); + gaussianPyrs.push_back( currentMat ); + + /* fill Gaussian pyramid */ + for ( int pyrCounter = 1; pyrCounter < numOctaves; pyrCounter++ ) + { + /* compute and store next image in pyramid and its size */ + pyrDown( currentMat, currentMat, Size( currentMat.cols / scale, currentMat.rows / scale ) ); + gaussianPyrs.push_back( currentMat ); + } +} + +/* check lines' extremes */ +inline void checkLineExtremes( cv::Vec4f& extremes, cv::Size imageSize ) +{ + + if( extremes[0] < 0 ) + extremes[0] = 0; + + if( extremes[0] >= imageSize.width ) + extremes[0] = (float)imageSize.width - 1.0f; + + if( extremes[2] < 0 ) + extremes[2] = 0; + + if( extremes[2] >= imageSize.width ) + extremes[2] = (float)imageSize.width - 1.0f; + + if( extremes[1] < 0 ) + extremes[1] = 0; + + if( extremes[1] >= imageSize.height ) + extremes[1] = (float)imageSize.height - 1.0f; + + if( extremes[3] < 0 ) + extremes[3] = 0; + + if( extremes[3] >= imageSize.height ) + extremes[3] = (float)imageSize.height - 1.0f; +} + +/* requires line detection (only one image) */ +void LSDDetectorC::detect( const Mat& image, CV_OUT std::vector& keylines, int scale, int numOctaves, const Mat& mask ) +{ + if( mask.data != NULL && ( mask.size() != image.size() || mask.type() != CV_8UC1 ) ) + throw std::runtime_error( "Mask error while detecting lines: please check its dimensions and that data type is CV_8UC1" ); + + else + detectImpl( image, keylines, numOctaves, scale, mask ); +} + +/* requires line detection (more than one image) */ +void LSDDetectorC::detect( const std::vector& images, std::vector >& keylines, int scale, int numOctaves, + const std::vector& masks ) const +{ + /* detect lines from each image */ + for ( size_t counter = 0; counter < images.size(); counter++ ) + { + if( masks[counter].data != NULL && ( masks[counter].size() != images[counter].size() || masks[counter].type() != CV_8UC1 ) ) + throw std::runtime_error( "Masks error while detecting lines: please check their dimensions and that data types are CV_8UC1" ); + + else + detectImpl( images[counter], keylines[counter], numOctaves, scale, masks[counter] ); + } +} + +/* implementation of line detection */ +void LSDDetectorC::detectImpl( const Mat& imageSrc, std::vector& keylines, int numOctaves, int scale, const Mat& mask ) const +{ + cv::Mat image; + if( imageSrc.channels() != 1 ) + cvtColor( imageSrc, image, COLOR_BGR2GRAY ); + else + image = imageSrc.clone(); + + /*check whether image depth is different from 0 */ + if( image.depth() != 0 ) + throw std::runtime_error( "Error, depth image!= 0" ); + + /* create a pointer to self */ + LSDDetectorC *lsd = const_cast( this ); + + /* compute Gaussian pyramids */ + lsd->computeGaussianPyramid( image, numOctaves, scale ); + + /* create an LSD extractor */ + cv::Ptr ls = cv::createLineSegmentDetector(); + + /* prepare a vector to host extracted segments */ + std::vector > lines_lsd; + + /* extract lines */ + for ( int i = 0; i < numOctaves; i++ ) + { + std::vector octave_lines; + ls->detect( gaussianPyrs[i], octave_lines ); + lines_lsd.push_back( octave_lines ); + } + + /* create keylines */ + int class_counter = -1; + for ( int octaveIdx = 0; octaveIdx < (int) lines_lsd.size(); octaveIdx++ ) + { + float octaveScale = pow( (float)scale, octaveIdx ); + for ( int k = 0; k < (int) lines_lsd[octaveIdx].size(); k++ ) + { + KeyLine kl; + cv::Vec4f extremes = lines_lsd[octaveIdx][k]; + + /* check data validity */ + checkLineExtremes( extremes, gaussianPyrs[octaveIdx].size() ); + + /* fill KeyLine's fields */ + kl.startPointX = extremes[0] * octaveScale; + kl.startPointY = extremes[1] * octaveScale; + kl.endPointX = extremes[2] * octaveScale; + kl.endPointY = extremes[3] * octaveScale; + kl.sPointInOctaveX = extremes[0]; + kl.sPointInOctaveY = extremes[1]; + kl.ePointInOctaveX = extremes[2]; + kl.ePointInOctaveY = extremes[3]; + kl.lineLength = (float) sqrt( pow( extremes[0] - extremes[2], 2 ) + pow( extremes[1] - extremes[3], 2 ) ); + + /* compute number of pixels covered by line */ + LineIterator li( gaussianPyrs[octaveIdx], Point2f( extremes[0], extremes[1] ), Point2f( extremes[2], extremes[3] ) ); + kl.numOfPixels = li.count; + + kl.angle = atan2( ( kl.endPointY - kl.startPointY ), ( kl.endPointX - kl.startPointX ) ); + kl.class_id = ++class_counter; + kl.octave = octaveIdx; + kl.size = ( kl.endPointX - kl.startPointX ) * ( kl.endPointY - kl.startPointY ); + kl.response = kl.lineLength / max( gaussianPyrs[octaveIdx].cols, gaussianPyrs[octaveIdx].rows ); + kl.pt = Point2f( ( kl.endPointX + kl.startPointX ) / 2, ( kl.endPointY + kl.startPointY ) / 2 ); + + keylines.push_back( kl ); + } + } + + /* delete undesired KeyLines, according to input mask */ + if( !mask.empty() ) + { + for ( size_t keyCounter = 0; keyCounter < keylines.size(); keyCounter++ ) + { + KeyLine kl = keylines[keyCounter]; + if( mask.at( (int) kl.startPointY, (int) kl.startPointX ) == 0 && mask.at( (int) kl.endPointY, (int) kl.endPointX ) == 0 ) + { + keylines.erase( keylines.begin() + keyCounter ); + keyCounter--; + } + } + } + +} + +// Overload detect and detectImpl with LSDDetectorC Options +void LSDDetectorC::detect( const Mat& image, CV_OUT std::vector& keylines, int scale, int numOctaves, LSDOptions opts, const Mat& mask ) +{ + if( mask.data != NULL && ( mask.size() != image.size() || mask.type() != CV_8UC1 ) ) + throw std::runtime_error( "Mask error while detecting lines: please check its dimensions and that data type is CV_8UC1" ); + + else + detectImpl( image, keylines, numOctaves, scale, opts, mask ); +} + +void LSDDetectorC::detectImpl( const Mat& imageSrc, std::vector& keylines, int numOctaves, int scale, LSDOptions opts, const Mat& mask ) const +{ + cv::Mat image; + if( imageSrc.channels() != 1 ) + cvtColor( imageSrc, image, COLOR_BGR2GRAY ); + else + image = imageSrc.clone(); + + /*check whether image depth is different from 0 */ + if( image.depth() != 0 ) + throw std::runtime_error( "Error, depth image!= 0" ); + + /* create a pointer to self */ + LSDDetectorC *lsd = const_cast( this ); + + /* compute Gaussian pyramids */ + lsd->computeGaussianPyramid( image, numOctaves, scale ); + + /* create an LSD extractor */ + cv::Ptr ls = cv::createLineSegmentDetector( opts.refine, + opts.scale, + opts.sigma_scale, + opts.quant, + opts.ang_th, + opts.log_eps, + opts.density_th, + opts.n_bins); + + /* prepare a vector to host extracted segments */ + std::vector > lines_lsd; + + /* extract lines */ + for ( int i = 0; i < numOctaves; i++ ) + { + std::vector octave_lines; + ls->detect( gaussianPyrs[i], octave_lines ); + lines_lsd.push_back( octave_lines ); + } + + /* create keylines */ + int class_counter = -1; + for ( int octaveIdx = 0; octaveIdx < (int) lines_lsd.size(); octaveIdx++ ) + { + float octaveScale = pow( (float)scale, octaveIdx ); + for ( int k = 0; k < (int) lines_lsd[octaveIdx].size(); k++ ) + { + KeyLine kl; + cv::Vec4f extremes = lines_lsd[octaveIdx][k]; + + /* check data validity */ + checkLineExtremes( extremes, gaussianPyrs[octaveIdx].size() ); + + /* check line segment min length */ + double length = (float) sqrt( pow( extremes[0] - extremes[2], 2 ) + pow( extremes[1] - extremes[3], 2 ) ); + if( length > opts.min_length ) + { + /* fill KeyLine's fields */ + kl.startPointX = extremes[0] * octaveScale; + kl.startPointY = extremes[1] * octaveScale; + kl.endPointX = extremes[2] * octaveScale; + kl.endPointY = extremes[3] * octaveScale; + kl.sPointInOctaveX = extremes[0]; + kl.sPointInOctaveY = extremes[1]; + kl.ePointInOctaveX = extremes[2]; + kl.ePointInOctaveY = extremes[3]; + kl.lineLength = length; + + /* compute number of pixels covered by line */ + LineIterator li( gaussianPyrs[octaveIdx], Point2f( extremes[0], extremes[1] ), Point2f( extremes[2], extremes[3] ) ); + kl.numOfPixels = li.count; + + kl.angle = atan2( ( kl.endPointY - kl.startPointY ), ( kl.endPointX - kl.startPointX ) ); + kl.class_id = ++class_counter; + kl.octave = octaveIdx; + kl.size = ( kl.endPointX - kl.startPointX ) * ( kl.endPointY - kl.startPointY ); + kl.response = kl.lineLength / max( gaussianPyrs[octaveIdx].cols, gaussianPyrs[octaveIdx].rows ); + kl.pt = Point2f( ( kl.endPointX + kl.startPointX ) / 2, ( kl.endPointY + kl.startPointY ) / 2 ); + + keylines.push_back( kl ); + } + } + } + + /* delete undesired KeyLines, according to input mask */ + if( !mask.empty() ) + { + for ( size_t keyCounter = 0; keyCounter < keylines.size(); keyCounter++ ) + { + KeyLine kl = keylines[keyCounter]; + if( mask.at( (int) kl.startPointY, (int) kl.startPointX ) == 0 && mask.at( (int) kl.endPointY, (int) kl.endPointX ) == 0 ) + { + keylines.erase( keylines.begin() + keyCounter ); + keyCounter--; + } + } + } + +} +} + +} + diff --git a/3rdparty/line_descriptor/src/binary_descriptor_custom.cpp b/3rdparty/line_descriptor/src/binary_descriptor_custom.cpp new file mode 100644 index 0000000..de00798 --- /dev/null +++ b/3rdparty/line_descriptor/src/binary_descriptor_custom.cpp @@ -0,0 +1,2754 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// + // + // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + // + // By downloading, copying, installing or using the software you agree to this license. + // If you do not agree to this license, do not download, install, + // copy or use the software. + // + // + // License Agreement + // For Open Source Computer Vision Library + // + // Copyright (C) 2013, Biagio Montesano, all rights reserved. + // Third party copyrights are property of their respective owners. + // + // Redistribution and use in source and binary forms, with or without modification, + // are permitted provided that the following conditions are met: + // + // * Redistribution's of source code must retain the above copyright notice, + // this list of conditions and the following disclaimer. + // + // * Redistribution's in binary form must reproduce the above copyright notice, + // this list of conditions and the following disclaimer in the documentation + // and/or other materials provided with the distribution. + // + // * The name of the copyright holders may not be used to endorse or promote products + // derived from this software without specific prior written permission. + // + // This software is provided by the copyright holders and contributors "as is" and + // any express or implied warranties, including, but not limited to, the implied + // warranties of merchantability and fitness for a particular purpose are disclaimed. + // In no event shall the Intel Corporation or contributors be liable for any direct, + // indirect, incidental, special, exemplary, or consequential damages + // (including, but not limited to, procurement of substitute goods or services; + // loss of use, data, or profits; or business interruption) however caused + // and on any theory of liability, whether in contract, strict liability, + // or tort (including negligence or otherwise) arising in any way out of + // the use of this software, even if advised of the possibility of such damage. + // + //M*/ + +#include "precomp_custom.hpp" + +#ifdef _MSC_VER + #if (_MSC_VER <= 1700) + /* This function rounds x to the nearest integer, but rounds halfway cases away from zero. */ + static inline double round(double x) + { + if (x < 0.0) + return ceil(x - 0.5); + else + return floor(x + 0.5); + } + #endif +#endif + +#define NUM_OF_BANDS 9 +#define Horizontal 255//if |dx|<|dy|; +#define Vertical 0//if |dy|<=|dx|; +#define UpDir 1 +#define RightDir 2 +#define DownDir 3 +#define LeftDir 4 +#define TryTime 6 +#define SkipEdgePoint 2 + +//using namespace cv; +namespace cv +{ +namespace line_descriptor +{ + +/* combinations of internal indeces for binary descriptor extractor */ +static const int combinations[32][2] = +{ +{ 0, 1 }, +{ 0, 2 }, +{ 0, 3 }, +{ 0, 4 }, +{ 0, 5 }, +{ 0, 6 }, +{ 1, 2 }, +{ 1, 3 }, +{ 1, 4 }, +{ 1, 5 }, +{ 1, 6 }, +{ 2, 3 }, +{ 2, 4 }, +{ 2, 5 }, +{ 2, 6 }, +{ 2, 7 }, +{ 2, 8 }, +{ 3, 4 }, +{ 3, 5 }, +{ 3, 6 }, +{ 3, 7 }, +{ 3, 8 }, +{ 4, 5 }, +{ 4, 6 }, +{ 4, 7 }, +{ 4, 8 }, +{ 5, 6 }, +{ 5, 7 }, +{ 5, 8 }, +{ 6, 7 }, +{ 6, 8 }, +{ 7, 8 } }; + +/* return default parameters */ +BinaryDescriptor::Params::Params() +{ + numOfOctave_ = 1; + widthOfBand_ = 7; + reductionRatio = 2; + ksize_ = 5; +} + +/* setters and getters */ +int BinaryDescriptor::getNumOfOctaves() +{ + return params.numOfOctave_; +} + +void BinaryDescriptor::setNumOfOctaves( int octaves ) +{ + params.numOfOctave_ = octaves; +} + +int BinaryDescriptor::getWidthOfBand() +{ + return params.widthOfBand_; +} + +void BinaryDescriptor::setWidthOfBand( int width ) +{ + params.widthOfBand_ = width; + + /* reserve enough space for EDLine objects and images in Gaussian pyramid */ + edLineVec_.resize( params.numOfOctave_ ); + images_sizes.resize( params.numOfOctave_ ); + + for ( int i = 0; i < params.numOfOctave_; i++ ) + edLineVec_[i] = Ptr < EDLineDetector > ( new EDLineDetector() ); + + /* prepare a vector to host local weights F_l*/ + gaussCoefL_.resize( params.widthOfBand_ * 3 ); + + /* compute center of central band (every computation involves 2-3 bands) */ + double u = ( params.widthOfBand_ * 3 - 1 ) / 2; + + /* compute exponential part of F_l */ + double sigma = ( params.widthOfBand_ * 2 + 1 ) / 2; // (widthOfBand_*2+1)/2; + double invsigma2 = -1 / ( 2 * sigma * sigma ); + + /* compute all local weights */ + double dis; + for ( int i = 0; i < params.widthOfBand_ * 3; i++ ) + { + dis = i - u; + gaussCoefL_[i] = exp( dis * dis * invsigma2 ); + } + + /* prepare a vector for global weights F_g*/ + gaussCoefG_.resize( NUM_OF_BANDS * params.widthOfBand_ ); + + /* compute center of LSR */ + u = ( NUM_OF_BANDS * params.widthOfBand_ - 1 ) / 2; + + /* compute exponential part of F_g */ + sigma = u; + invsigma2 = -1 / ( 2 * sigma * sigma ); + for ( int i = 0; i < NUM_OF_BANDS * params.widthOfBand_; i++ ) + { + dis = i - u; + gaussCoefG_[i] = exp( dis * dis * invsigma2 ); + } +} + +int BinaryDescriptor::getReductionRatio() +{ + return params.reductionRatio; +} + +void BinaryDescriptor::setReductionRatio( int rRatio ) +{ + params.reductionRatio = rRatio; +} + +/* read parameters from a FileNode object and store them (struct function) */ +void BinaryDescriptor::Params::read( const cv::FileNode& fn ) +{ + numOfOctave_ = fn["numOfOctave_"]; + widthOfBand_ = fn["widthOfBand_"]; + reductionRatio = fn["reductionRatio"]; +} + +/* store parameters to a FileStorage object (struct function) */ +void BinaryDescriptor::Params::write( cv::FileStorage& fs ) const +{ + fs << "numOfOctave_" << numOfOctave_; + fs << "numOfBand_" << NUM_OF_BANDS; + fs << "widthOfBand_" << widthOfBand_; + fs << "reductionRatio" << reductionRatio; +} + +Ptr BinaryDescriptor::createBinaryDescriptor() +{ + return Ptr < BinaryDescriptor > ( new BinaryDescriptor() ); +} + +Ptr BinaryDescriptor::createBinaryDescriptor( Params parameters ) +{ + return Ptr < BinaryDescriptor > ( new BinaryDescriptor( parameters ) ); +} + +/* construct a BinaryDescrptor object and compute external private parameters */ +BinaryDescriptor::BinaryDescriptor( const BinaryDescriptor::Params ¶meters ) : + params( parameters ) +{ + /* reserve enough space for EDLine objects and images in Gaussian pyramid */ + edLineVec_.resize( params.numOfOctave_ ); + images_sizes.resize( params.numOfOctave_ ); + + for ( int i = 0; i < params.numOfOctave_; i++ ) + edLineVec_[i] = Ptr < EDLineDetector > ( new EDLineDetector() ); + + /* prepare a vector to host local weights F_l*/ + gaussCoefL_.resize( params.widthOfBand_ * 3 ); + + /* compute center of central band (every computation involves 2-3 bands) */ + double u = ( params.widthOfBand_ * 3 - 1 ) / 2; + + /* compute exponential part of F_l */ + double sigma = ( params.widthOfBand_ * 2 + 1 ) / 2; // (widthOfBand_*2+1)/2; + double invsigma2 = -1 / ( 2 * sigma * sigma ); + + /* compute all local weights */ + double dis; + for ( int i = 0; i < params.widthOfBand_ * 3; i++ ) + { + dis = i - u; + gaussCoefL_[i] = exp( dis * dis * invsigma2 ); + } + + /* prepare a vector for global weights F_g*/ + gaussCoefG_.resize( NUM_OF_BANDS * params.widthOfBand_ ); + + /* compute center of LSR */ + u = ( NUM_OF_BANDS * params.widthOfBand_ - 1 ) / 2; + + /* compute exponential part of F_g */ + sigma = u; + invsigma2 = -1 / ( 2 * sigma * sigma ); + for ( int i = 0; i < NUM_OF_BANDS * params.widthOfBand_; i++ ) + { + dis = i - u; + gaussCoefG_[i] = exp( dis * dis * invsigma2 ); + } +} + +/* definition of operator () */ +void BinaryDescriptor::operator()( InputArray image, InputArray mask, CV_OUT std::vector& keylines, OutputArray descriptors, + bool useProvidedKeyLines, bool returnFloatDescr ) const +{ + + /* create some matrix objects */ + cv::Mat imageMat, maskMat, descrMat; + + /* store reference to input matrices */ + imageMat = image.getMat(); + maskMat = mask.getMat(); + + /* require drawing KeyLines detection if demanded */ + if( !useProvidedKeyLines ) + { + keylines.clear(); + BinaryDescriptor *bn = const_cast( this ); + bn->edLineVec_.clear(); + bn->edLineVec_.resize( params.numOfOctave_ ); + + for ( int i = 0; i < params.numOfOctave_; i++ ) + bn->edLineVec_[i] = Ptr( new EDLineDetector() ); + + detectImpl( imageMat, keylines, maskMat ); + + } + + /* initialize output matrix */ + //descriptors.create( Size( 32, (int) keylines.size() ), CV_8UC1 ); + /* store reference to output matrix */ + //descrMat = descriptors.getMat(); + /* compute descriptors */ + if( !useProvidedKeyLines ) + computeImpl( imageMat, keylines, descrMat, returnFloatDescr, true ); + + else + computeImpl( imageMat, keylines, descrMat, returnFloatDescr, false ); + + descrMat.copyTo( descriptors ); +} + +BinaryDescriptor::~BinaryDescriptor() +{ + +} + +/* read parameters from a FileNode object and store them (class function ) */ +void BinaryDescriptor::read( const cv::FileNode& fn ) +{ + params.read( fn ); +} + +/* store parameters to a FileStorage object (class function) */ +void BinaryDescriptor::write( cv::FileStorage& fs ) const +{ + params.write( fs ); +} + +/* return norm mode */ +int BinaryDescriptor::defaultNorm() const +{ + return NORM_HAMMING; +} + +/* return data type */ +int BinaryDescriptor::descriptorType() const +{ + return CV_8U; +} + +/*return descriptor size */ +int BinaryDescriptor::descriptorSize() const +{ + return 32 * 8; +} + +/* power function with error management */ +static inline int get2Pow( int i ) +{ + if( i >= 0 && i <= 7 ) + return (int) pow( 2, (double) i ); + + else + { + throw std::runtime_error( "Invalid power argument" ); + } +} + +/* compute Gaussian pyramids */ +void BinaryDescriptor::computeGaussianPyramid( const Mat& image, const int numOctaves ) +{ + /* clear class fields */ + images_sizes.clear(); + octaveImages.clear(); + + /* insert input image into pyramid */ + cv::Mat currentMat = image.clone(); + cv::GaussianBlur( currentMat, currentMat, cv::Size( 5, 5 ), 1 ); + octaveImages.push_back( currentMat ); + images_sizes.push_back( currentMat.size() ); + + /* fill Gaussian pyramid */ + for ( int pyrCounter = 1; pyrCounter < numOctaves; pyrCounter++ ) + { + /* compute and store next image in pyramid and its size */ + pyrDown( currentMat, currentMat, Size( currentMat.cols / params.reductionRatio, currentMat.rows / params.reductionRatio ) ); + octaveImages.push_back( currentMat ); + images_sizes.push_back( currentMat.size() ); + } +} + +/* compute Sobel's derivatives */ +void BinaryDescriptor::computeSobel( const cv::Mat& image, const int numOctaves ) +{ + + /* compute Gaussian pyramids */ + computeGaussianPyramid( image, numOctaves ); + + /* reinitialize class structures */ + dxImg_vector.clear(); + dyImg_vector.clear(); + +// dxImg_vector.resize( params.numOfOctave_ ); +// dyImg_vector.resize( params.numOfOctave_ ); + + dxImg_vector.resize( octaveImages.size() ); + dyImg_vector.resize( octaveImages.size() ); + + /* compute derivatives */ + for ( size_t sobelCnt = 0; sobelCnt < octaveImages.size(); sobelCnt++ ) + { + dxImg_vector[sobelCnt].create( images_sizes[sobelCnt].height, images_sizes[sobelCnt].width, CV_16SC1 ); + dyImg_vector[sobelCnt].create( images_sizes[sobelCnt].height, images_sizes[sobelCnt].width, CV_16SC1 ); + + cv::Sobel( octaveImages[sobelCnt], dxImg_vector[sobelCnt], CV_16SC1, 1, 0, 3 ); + cv::Sobel( octaveImages[sobelCnt], dyImg_vector[sobelCnt], CV_16SC1, 0, 1, 3 ); + } +} + +/* utility function for conversion of an LBD descriptor to its binary representation */ +unsigned char BinaryDescriptor::binaryConversion( float* f1, float* f2 ) +{ + uchar result = 0; + for ( int i = 0; i < 8; i++ ) + { + if( f1[i] > f2[i] ) + result += (uchar) get2Pow( i ); + } + + return result; + +} + +/* requires line detection (only one image) */ +void BinaryDescriptor::detect( const Mat& image, CV_OUT std::vector& keylines, const Mat& mask ) +{ + + if( image.data == NULL ) + { + std::cout << "Error: input image for detection is empty" << std::endl; + return; + } + + if( mask.data != NULL && ( mask.size() != image.size() || mask.type() != CV_8UC1 ) ) + throw std::runtime_error( "Mask error while detecting lines: please check its dimensions and that data type is CV_8UC1" ); + + else + detectImpl( image, keylines, mask ); +} + +/* requires line detection (more than one image) */ +void BinaryDescriptor::detect( const std::vector& images, std::vector >& keylines, const std::vector& masks ) const +{ + + if( images.size() == 0 ) + { + std::cout << "Error: input image for detection is empty" << std::endl; + return; + } + + /* detect lines from each image */ + for ( size_t counter = 0; counter < images.size(); counter++ ) + { + if( masks[counter].data != NULL && ( masks[counter].size() != images[counter].size() || masks[counter].type() != CV_8UC1 ) ) + throw std::runtime_error( "Masks error while detecting lines: please check their dimensions and that data types are CV_8UC1" ); + + else + detectImpl( images[counter], keylines[counter], masks[counter] ); + } +} + +void BinaryDescriptor::detectImpl( const Mat& imageSrc, std::vector& keylines, const Mat& mask ) const +{ + + cv::Mat image; + if( imageSrc.channels() != 1 ) + { + cvtColor( imageSrc, image, COLOR_BGR2GRAY ); + } + else + image = imageSrc.clone(); + + /*check whether image depth is different from 0 */ + if( image.depth() != 0 ) + throw std::runtime_error( "Warning, depth image!= 0" ); + + /* create a pointer to self */ + BinaryDescriptor *bn = const_cast( this ); + + /* detect and arrange lines across octaves */ + ScaleLines sl; + bn->OctaveKeyLines( image, sl ); + + /* fill KeyLines vector */ + for ( int i = 0; i < (int) sl.size(); i++ ) + { + for ( size_t j = 0; j < sl[i].size(); j++ ) + { + /* get current line */ + OctaveSingleLine osl = sl[i][j]; + + /* create a KeyLine object */ + KeyLine kl; + + /* fill KeyLine's fields */ + kl.startPointX = osl.startPointX; //extremes[0]; + kl.startPointY = osl.startPointY; //extremes[1]; + kl.endPointX = osl.endPointX; //extremes[2]; + kl.endPointY = osl.endPointY; //extremes[3]; + kl.sPointInOctaveX = osl.sPointInOctaveX; + kl.sPointInOctaveY = osl.sPointInOctaveY; + kl.ePointInOctaveX = osl.ePointInOctaveX; + kl.ePointInOctaveY = osl.ePointInOctaveY; + kl.lineLength = osl.lineLength; + kl.numOfPixels = osl.numOfPixels; + + kl.angle = osl.direction; + kl.class_id = i; + kl.octave = osl.octaveCount; + kl.size = ( osl.endPointX - osl.startPointX ) * ( osl.endPointY - osl.startPointY ); + kl.response = osl.lineLength / max( images_sizes[osl.octaveCount].width, images_sizes[osl.octaveCount].height ); + kl.pt = Point2f( ( osl.endPointX + osl.startPointX ) / 2, ( osl.endPointY + osl.startPointY ) / 2 ); + + /* store KeyLine */ + keylines.push_back( kl ); + } + + } + + /* delete undesired KeyLines, according to input mask */ + if( !mask.empty() ) + { + for ( size_t keyCounter = 0; keyCounter < keylines.size(); keyCounter++ ) + { + KeyLine kl = keylines[keyCounter]; + if( mask.at < uchar > ( (int) kl.startPointY, (int) kl.startPointX ) == 0 && mask.at < uchar > ( (int) kl.endPointY, (int) kl.endPointX ) == 0 ) + keylines.erase( keylines.begin() + keyCounter ); + } + } + +} + +/* requires descriptors computation (only one image) */ +void BinaryDescriptor::compute( const Mat& image, CV_OUT CV_IN_OUT std::vector& keylines, CV_OUT Mat& descriptors, + bool returnFloatDescr ) const +{ + computeImpl( image, keylines, descriptors, returnFloatDescr, false ); +} + +/* requires descriptors computation (more than one image) */ +void BinaryDescriptor::compute( const std::vector& images, std::vector >& keylines, std::vector& descriptors, + bool returnFloatDescr ) const +{ + for ( size_t i = 0; i < images.size(); i++ ) + computeImpl( images[i], keylines[i], descriptors[i], returnFloatDescr, false ); +} + +/* implementation of descriptors computation */ +void BinaryDescriptor::computeImpl( const Mat& imageSrc, std::vector& keylines, Mat& descriptors, bool returnFloatDescr, + bool useDetectionData ) const +{ + + /* convert input image to gray scale */ + cv::Mat image; + + if( imageSrc.channels() != 1 ) + cvtColor( imageSrc, image, COLOR_BGR2GRAY ); + else + image = imageSrc.clone(); + + /*check whether image's depth is different from 0 */ + if( image.depth() != 0 ) + throw std::runtime_error( "Error, depth of image != 0" ); + + /* keypoints list can't be empty */ + if( keylines.size() == 0 ) + { + std::cout << "Error: keypoint list is empty" << std::endl; + return; + } + + BinaryDescriptor* bd = const_cast( this ); + + /* get maximum class_id and octave*/ + int numLines = 0; + int octaveIndex = -1; + for ( size_t l = 0; l < keylines.size(); l++ ) + { + if( keylines[l].class_id > numLines ) + numLines = keylines[l].class_id; + + if( keylines[l].octave > octaveIndex ) + octaveIndex = keylines[l].octave; + } + + if( !useDetectionData ) + bd->computeSobel( image, octaveIndex + 1 ); + + /* create a ScaleLines object */ + OctaveSingleLine fictiousOSL; +// fictiousOSL.octaveCount = params.numOfOctave_ + 1; +// LinesVec lv( params.numOfOctave_, fictiousOSL ); + fictiousOSL.octaveCount = octaveIndex + 1; + LinesVec lv( octaveIndex + 1, fictiousOSL ); + ScaleLines sl( numLines + 1, lv ); + + /* create a map to record association between KeyLines and their position + in ScaleLines vector */ + std::map, size_t> correspondences; + + /* fill ScaleLines object */ + for ( size_t slCounter = 0; slCounter < keylines.size(); slCounter++ ) + { + /* get a KeyLine object and create a new line */ + KeyLine kl = keylines[slCounter]; + OctaveSingleLine osl; + + /* insert data in newly created line */ + osl.startPointX = kl.startPointX; + osl.startPointY = kl.startPointY; + osl.endPointX = kl.endPointX; + osl.endPointY = kl.endPointY; + osl.sPointInOctaveX = kl.sPointInOctaveX; + osl.sPointInOctaveY = kl.sPointInOctaveY; + osl.ePointInOctaveX = kl.ePointInOctaveX; + osl.ePointInOctaveY = kl.ePointInOctaveY; + osl.lineLength = kl.lineLength; + osl.numOfPixels = kl.numOfPixels; + osl.salience = kl.response; + + osl.direction = kl.angle; + osl.octaveCount = kl.octave; + + /* store new line */ + sl[kl.class_id][kl.octave] = osl; + + /* update map */ + int id = kl.class_id; + int oct = kl.octave; + correspondences.insert( std::pair, size_t>( std::pair( id, oct ), slCounter ) ); + } + + /* delete useless OctaveSingleLines */ + for ( size_t i = 0; i < sl.size(); i++ ) + { + for ( size_t j = 0; j < sl[i].size(); j++ ) + { + //if( (int) ( sl[i][j] ).octaveCount > params.numOfOctave_ ) + if( (int) ( sl[i][j] ).octaveCount > octaveIndex ) + ( sl[i] ).erase( ( sl[i] ).begin() + j ); + } + } + + /* compute LBD descriptors */ + bd->computeLBD( sl, useDetectionData ); + + /* resize output matrix */ + if( !returnFloatDescr ) + descriptors = cv::Mat( (int) keylines.size(), 32, CV_8UC1 ); + + else + descriptors = cv::Mat( (int) keylines.size(), NUM_OF_BANDS * 8, CV_32FC1 ); + + /* fill output matrix with descriptors */ + for ( int k = 0; k < (int) sl.size(); k++ ) + { + for ( int lineC = 0; lineC < (int) sl[k].size(); lineC++ ) + { + /* get original index of keypoint */ + int lineOctave = ( sl[k][lineC] ).octaveCount; + int originalIndex = (int)correspondences.find( std::pair( k, lineOctave ) )->second; + + if( !returnFloatDescr ) + { + /* get a pointer to correspondent row in output matrix */ + uchar* pointerToRow = descriptors.ptr( originalIndex ); + + /* get LBD data */ + float* desVec = &sl[k][lineC].descriptor.front(); + + /* fill current row with binary descriptor */ + for ( int comb = 0; comb < 32; comb++ ) + { + *pointerToRow = bd->binaryConversion( &desVec[8 * combinations[comb][0]], &desVec[8 * combinations[comb][1]] ); + pointerToRow++; + } + } + + else + { + /* get a pointer to correspondent row in output matrix */ + float* pointerToRow = descriptors.ptr( originalIndex ); + + /* get LBD data */ + std::vector desVec = sl[k][lineC].descriptor; + + for ( int count = 0; count < (int) desVec.size(); count++ ) + { + *pointerToRow = desVec[count]; + pointerToRow++; + } + } + + } + } + +} + +int BinaryDescriptor::OctaveKeyLines( cv::Mat& image, ScaleLines &keyLines ) +{ + + /* final number of extracted lines */ + unsigned int numOfFinalLine = 0; + + /* sigma values and reduction factor used in Gaussian pyramids */ + float preSigma2 = 0; //orignal image is not blurred, has zero sigma; + float curSigma2 = 1.0; //[sqrt(2)]^0=1; + double factor = sqrt( 2.0 ); //the down sample factor between connective two octave images + + /* loop over number of octaves */ + for ( int octaveCount = 0; octaveCount < params.numOfOctave_; octaveCount++ ) + { + /* matrix storing results from blurring processes */ + cv::Mat blur; + + /* apply Gaussian blur */ + float increaseSigma = sqrt( curSigma2 - preSigma2 ); + cv::GaussianBlur( image, blur, cv::Size( params.ksize_, params.ksize_ ), increaseSigma ); + images_sizes[octaveCount] = blur.size(); + + /* for current octave, extract lines */ + if( ( edLineVec_[octaveCount]->EDline( blur ) ) != 1 ) + { + return -1; + } + + /* update number of total extracted lines */ + numOfFinalLine += edLineVec_[octaveCount]->lines_.numOfLines; + + /* resize image for next level of pyramid */ + cv::resize( blur, image, cv::Size(), ( 1.f / factor ), ( 1.f / factor ) ); + + /* update sigma values */ + preSigma2 = curSigma2; + curSigma2 = curSigma2 * 2; + + } /* end of loop over number of octaves */ + + /* prepare a vector to store octave information associated to extracted lines */ + std::vector < OctaveLine > octaveLines( numOfFinalLine ); + + /* set lines' counter to 0 for reuse */ + numOfFinalLine = 0; + + /* counter to give a unique ID to lines in LineVecs */ + unsigned int lineIDInScaleLineVec = 0; + + /* floats to compute lines' lengths */ + float dx, dy; + + /* loop over lines extracted from scale 0 (original image) */ + for ( unsigned int lineCurId = 0; lineCurId < edLineVec_[0]->lines_.numOfLines; lineCurId++ ) + { + /* FOR CURRENT LINE: */ + + /* set octave from which it was extracted */ + octaveLines[numOfFinalLine].octaveCount = 0; + /* set ID within its octave */ + octaveLines[numOfFinalLine].lineIDInOctave = lineCurId; + /* set a unique ID among all lines extracted in all octaves */ + octaveLines[numOfFinalLine].lineIDInScaleLineVec = lineIDInScaleLineVec; + + /* compute absolute value of difference between X coordinates of line's extreme points */ + dx = fabs( edLineVec_[0]->lineEndpoints_[lineCurId][0] - edLineVec_[0]->lineEndpoints_[lineCurId][2] ); + /* compute absolute value of difference between Y coordinates of line's extreme points */ + dy = fabs( edLineVec_[0]->lineEndpoints_[lineCurId][1] - edLineVec_[0]->lineEndpoints_[lineCurId][3] ); + /* compute line's length */ + octaveLines[numOfFinalLine].lineLength = sqrt( dx * dx + dy * dy ); + + /* update counters */ + numOfFinalLine++; + lineIDInScaleLineVec++; + } + + /* create and fill an array to store scale factors */ + float *scale = new float[params.numOfOctave_]; + scale[0] = 1; + for ( int octaveCount = 1; octaveCount < params.numOfOctave_; octaveCount++ ) + { + scale[octaveCount] = (float) ( factor * scale[octaveCount - 1] ); + } + + /* some variables' declarations */ + float rho1, rho2, tempValue; + float direction, diffNear, length; + unsigned int octaveID, lineIDInOctave; + + /*more than one octave image, organize lines in scale space. + *lines corresponding to the same line in octave images should have the same index in the ScaleLineVec */ + if( params.numOfOctave_ > 1 ) + { + /* some other variables' declarations */ + double twoPI = 2 * M_PI; + unsigned int closeLineID = 0; + float endPointDis, minEndPointDis, minLocalDis, maxLocalDis; + float lp0, lp1, lp2, lp3, np0, np1, np2, np3; + + /* loop over list of octaves */ + for ( int octaveCount = 1; octaveCount < params.numOfOctave_; octaveCount++ ) + { + /*for each line in current octave image, find their corresponding lines in the octaveLines, + *give them the same value of lineIDInScaleLineVec*/ + + /* loop over list of lines extracted from current octave */ + for ( unsigned int lineCurId = 0; lineCurId < edLineVec_[octaveCount]->lines_.numOfLines; lineCurId++ ) + { + /* get (scaled) known term from equation of current line */ + rho1 = (float) ( scale[octaveCount] * fabs( edLineVec_[octaveCount]->lineEquations_[lineCurId][2] ) ); + + /*nearThreshold depends on the distance of the image coordinate origin to current line. + *so nearThreshold = rho1 * nearThresholdRatio, where nearThresholdRatio = 1-cos(10*pi/180) = 0.0152*/ + tempValue = (float) ( rho1 * 0.0152 ); + float diffNearThreshold = ( tempValue > 6 ) ? ( tempValue ) : 6; + diffNearThreshold = ( diffNearThreshold < 12 ) ? diffNearThreshold : 12; + + /* compute scaled lenght of current line */ + dx = fabs( edLineVec_[octaveCount]->lineEndpoints_[lineCurId][0] - edLineVec_[octaveCount]->lineEndpoints_[lineCurId][2] ); //x1-x2 + dy = fabs( edLineVec_[octaveCount]->lineEndpoints_[lineCurId][1] - edLineVec_[octaveCount]->lineEndpoints_[lineCurId][3] ); //y1-y2 + length = scale[octaveCount] * sqrt( dx * dx + dy * dy ); + + minEndPointDis = 12; + /* loop over the octave representations of all lines */ + for ( unsigned int lineNextId = 0; lineNextId < numOfFinalLine; lineNextId++ ) + { + /* if a line from same octave is encountered, + a comparison with it shouldn't be considered */ + octaveID = octaveLines[lineNextId].octaveCount; + if( (int) octaveID == octaveCount ) + { //lines in the same layer of octave image should not be compared. + break; + } + + /* take ID in octave of line to be compared */ + lineIDInOctave = octaveLines[lineNextId].lineIDInOctave; + + /*first check whether current line and next line are parallel. + *If line1:a1*x+b1*y+c1=0 and line2:a2*x+b2*y+c2=0 are parallel, then + *-a1/b1=-a2/b2, i.e., a1b2=b1a2. + *we define parallel=fabs(a1b2-b1a2) + *note that, in EDLine class, we have normalized the line equations + *to make a1^2+ b1^2 = a2^2+ b2^2 = 1*/ + direction = fabs( edLineVec_[octaveCount]->lineDirection_[lineCurId] - edLineVec_[octaveID]->lineDirection_[lineIDInOctave] ); + + /* the angle between two lines are larger than 10degrees + (i.e. 10*pi/180=0.1745), they are not close to parallel */ + if( direction > 0.1745 && ( twoPI - direction > 0.1745 ) ) + { + continue; + } + /*now check whether current line and next line are near to each other. + *If line1:a1*x+b1*y+c1=0 and line2:a2*x+b2*y+c2=0 are near in image, then + *rho1 = |a1*0+b1*0+c1|/sqrt(a1^2+b1^2) and rho2 = |a2*0+b2*0+c2|/sqrt(a2^2+b2^2) should close. + *In our case, rho1 = |c1| and rho2 = |c2|, because sqrt(a1^2+b1^2) = sqrt(a2^2+b2^2) = 1; + *note that, lines are in different octave images, so we define near = fabs(scale*rho1 - rho2) or + *where scale is the scale factor between to octave images*/ + + /* get known term from equation to be compared */ + rho2 = (float) ( scale[octaveID] * fabs( edLineVec_[octaveID]->lineEquations_[lineIDInOctave][2] ) ); + /* compute difference between known ters */ + diffNear = fabs( rho1 - rho2 ); + + /* two lines are not close in the image */ + if( diffNear > diffNearThreshold ) + { + continue; + } + + /*now check the end points distance between two lines, the scale of distance is in the original image size. + * find the minimal and maximal end points distance*/ + + /* get the extreme points of the two lines */ + lp0 = scale[octaveCount] * edLineVec_[octaveCount]->lineEndpoints_[lineCurId][0]; + lp1 = scale[octaveCount] * edLineVec_[octaveCount]->lineEndpoints_[lineCurId][1]; + lp2 = scale[octaveCount] * edLineVec_[octaveCount]->lineEndpoints_[lineCurId][2]; + lp3 = scale[octaveCount] * edLineVec_[octaveCount]->lineEndpoints_[lineCurId][3]; + np0 = scale[octaveID] * edLineVec_[octaveID]->lineEndpoints_[lineIDInOctave][0]; + np1 = scale[octaveID] * edLineVec_[octaveID]->lineEndpoints_[lineIDInOctave][1]; + np2 = scale[octaveID] * edLineVec_[octaveID]->lineEndpoints_[lineIDInOctave][2]; + np3 = scale[octaveID] * edLineVec_[octaveID]->lineEndpoints_[lineIDInOctave][3]; + + /* get the distance between the two leftmost extremes of lines + L1(0,1)<->L2(0,1) */ + dx = lp0 - np0; + dy = lp1 - np1; + endPointDis = sqrt( dx * dx + dy * dy ); + + /* set momentaneously min and max distance between lines to + the one between left extremes */ + minLocalDis = endPointDis; + maxLocalDis = endPointDis; + + /* compute distance between right extremes + L1(2,3)<->L2(2,3) */ + dx = lp2 - np2; + dy = lp3 - np3; + endPointDis = sqrt( dx * dx + dy * dy ); + + /* update (if necessary) min and max distance between lines */ + minLocalDis = ( endPointDis < minLocalDis ) ? endPointDis : minLocalDis; + maxLocalDis = ( endPointDis > maxLocalDis ) ? endPointDis : maxLocalDis; + + /* compute distance between left extreme of current line and + right extreme of line to be compared + L1(0,1)<->L2(2,3) */ + dx = lp0 - np2; + dy = lp1 - np3; + endPointDis = sqrt( dx * dx + dy * dy ); + + /* update (if necessary) min and max distance between lines */ + minLocalDis = ( endPointDis < minLocalDis ) ? endPointDis : minLocalDis; + maxLocalDis = ( endPointDis > maxLocalDis ) ? endPointDis : maxLocalDis; + + /* compute distance between right extreme of current line and + left extreme of line to be compared + L1(2,3)<->L2(0,1) */ + dx = lp2 - np0; + dy = lp3 - np1; + endPointDis = sqrt( dx * dx + dy * dy ); + + /* update (if necessary) min and max distance between lines */ + minLocalDis = ( endPointDis < minLocalDis ) ? endPointDis : minLocalDis; + maxLocalDis = ( endPointDis > maxLocalDis ) ? endPointDis : maxLocalDis; + + /* check whether conditions for considering line to be compared + worth to be inserted in the same LineVec are satisfied */ + if( ( maxLocalDis < 0.8 * ( length + octaveLines[lineNextId].lineLength ) ) && ( minLocalDis < minEndPointDis ) ) + { //keep the closest line + minEndPointDis = minLocalDis; + closeLineID = lineNextId; + } + } + + /* add current line into octaveLines */ + if( minEndPointDis < 12 ) + { + octaveLines[numOfFinalLine].lineIDInScaleLineVec = octaveLines[closeLineID].lineIDInScaleLineVec; + } + else + { + octaveLines[numOfFinalLine].lineIDInScaleLineVec = lineIDInScaleLineVec; + lineIDInScaleLineVec++; + } + octaveLines[numOfFinalLine].octaveCount = octaveCount; + octaveLines[numOfFinalLine].lineIDInOctave = lineCurId; + octaveLines[numOfFinalLine].lineLength = length; + numOfFinalLine++; + } + } //end for(unsigned int octaveCount = 1; octaveCount1) + + //////////////////////////////////// + //Reorganize the detected lines into keyLines + keyLines.clear(); + keyLines.resize( lineIDInScaleLineVec ); + unsigned int tempID; + float s1, e1, s2, e2; + bool shouldChange; + OctaveSingleLine singleLine; + for ( unsigned int lineID = 0; lineID < numOfFinalLine; lineID++ ) + { + lineIDInOctave = octaveLines[lineID].lineIDInOctave; + octaveID = octaveLines[lineID].octaveCount; + direction = edLineVec_[octaveID]->lineDirection_[lineIDInOctave]; + singleLine.octaveCount = octaveID; + singleLine.direction = direction; + singleLine.lineLength = octaveLines[lineID].lineLength; + singleLine.salience = edLineVec_[octaveID]->lineSalience_[lineIDInOctave]; + singleLine.numOfPixels = edLineVec_[octaveID]->lines_.sId[lineIDInOctave + 1] - edLineVec_[octaveID]->lines_.sId[lineIDInOctave]; + //decide the start point and end point + shouldChange = false; + s1 = edLineVec_[octaveID]->lineEndpoints_[lineIDInOctave][0]; //sx + s2 = edLineVec_[octaveID]->lineEndpoints_[lineIDInOctave][1]; //sy + e1 = edLineVec_[octaveID]->lineEndpoints_[lineIDInOctave][2]; //ex + e2 = edLineVec_[octaveID]->lineEndpoints_[lineIDInOctave][3]; //ey + dx = e1 - s1; //ex-sx + dy = e2 - s2; //ey-sy + if( direction >= -0.75 * M_PI && direction < -0.25 * M_PI ) + { + if( dy > 0 ) + { + shouldChange = true; + } + } + if( direction >= -0.25 * M_PI && direction < 0.25 * M_PI ) + { + if( dx < 0 ) + { + shouldChange = true; + } + } + if( direction >= 0.25 * M_PI && direction < 0.75 * M_PI ) + { + if( dy < 0 ) + { + shouldChange = true; + } + } + if( ( direction >= 0.75 * M_PI && direction < M_PI ) || ( direction >= -M_PI && direction < -0.75 * M_PI ) ) + { + if( dx > 0 ) + { + shouldChange = true; + } + } + tempValue = scale[octaveID]; + if( shouldChange ) + { + singleLine.sPointInOctaveX = e1; + singleLine.sPointInOctaveY = e2; + singleLine.ePointInOctaveX = s1; + singleLine.ePointInOctaveY = s2; + singleLine.startPointX = tempValue * e1; + singleLine.startPointY = tempValue * e2; + singleLine.endPointX = tempValue * s1; + singleLine.endPointY = tempValue * s2; + } + else + { + singleLine.sPointInOctaveX = s1; + singleLine.sPointInOctaveY = s2; + singleLine.ePointInOctaveX = e1; + singleLine.ePointInOctaveY = e2; + singleLine.startPointX = tempValue * s1; + singleLine.startPointY = tempValue * s2; + singleLine.endPointX = tempValue * e1; + singleLine.endPointY = tempValue * e2; + } + tempID = octaveLines[lineID].lineIDInScaleLineVec; + keyLines[tempID].push_back( singleLine ); + } + + delete[] scale; + return 1; +} + +int BinaryDescriptor::computeLBD( ScaleLines &keyLines, bool useDetectionData ) +{ + //the default length of the band is the line length. + short numOfFinalLine = (short) keyLines.size(); + float *dL = new float[2]; //line direction cos(dir), sin(dir) + float *dO = new float[2]; //the clockwise orthogonal vector of line direction. + short heightOfLSP = (short) ( params.widthOfBand_ * NUM_OF_BANDS ); //the height of line support region; + short descriptor_size = NUM_OF_BANDS * 8; //each band, we compute the m( pgdL, ngdL, pgdO, ngdO) and std( pgdL, ngdL, pgdO, ngdO); + float pgdLRowSum; //the summation of {g_dL |g_dL>0 } for each row of the region; + float ngdLRowSum; //the summation of {g_dL |g_dL<0 } for each row of the region; + float pgdL2RowSum; //the summation of {g_dL^2 |g_dL>0 } for each row of the region; + float ngdL2RowSum; //the summation of {g_dL^2 |g_dL<0 } for each row of the region; + float pgdORowSum; //the summation of {g_dO |g_dO>0 } for each row of the region; + float ngdORowSum; //the summation of {g_dO |g_dO<0 } for each row of the region; + float pgdO2RowSum; //the summation of {g_dO^2 |g_dO>0 } for each row of the region; + float ngdO2RowSum; //the summation of {g_dO^2 |g_dO<0 } for each row of the region; + + float *pgdLBandSum = new float[NUM_OF_BANDS]; //the summation of {g_dL |g_dL>0 } for each band of the region; + float *ngdLBandSum = new float[NUM_OF_BANDS]; //the summation of {g_dL |g_dL<0 } for each band of the region; + float *pgdL2BandSum = new float[NUM_OF_BANDS]; //the summation of {g_dL^2 |g_dL>0 } for each band of the region; + float *ngdL2BandSum = new float[NUM_OF_BANDS]; //the summation of {g_dL^2 |g_dL<0 } for each band of the region; + float *pgdOBandSum = new float[NUM_OF_BANDS]; //the summation of {g_dO |g_dO>0 } for each band of the region; + float *ngdOBandSum = new float[NUM_OF_BANDS]; //the summation of {g_dO |g_dO<0 } for each band of the region; + float *pgdO2BandSum = new float[NUM_OF_BANDS]; //the summation of {g_dO^2 |g_dO>0 } for each band of the region; + float *ngdO2BandSum = new float[NUM_OF_BANDS]; //the summation of {g_dO^2 |g_dO<0 } for each band of the region; + + short numOfBitsBand = NUM_OF_BANDS * sizeof(float); + short lengthOfLSP; //the length of line support region, varies with lines + short halfHeight = ( heightOfLSP - 1 ) / 2; + short halfWidth; + short bandID; + float coefInGaussion; + float lineMiddlePointX, lineMiddlePointY; + float sCorX, sCorY, sCorX0, sCorY0; + short tempCor, xCor, yCor; //pixel coordinates in image plane + short dx, dy; + float gDL; //store the gradient projection of pixels in support region along dL vector + float gDO; //store the gradient projection of pixels in support region along dO vector + short imageWidth, imageHeight, realWidth; + short *pdxImg, *pdyImg; + float *desVec; + + short sameLineSize; + short octaveCount; + OctaveSingleLine *pSingleLine; + /* loop over list of LineVec */ + for ( short lineIDInScaleVec = 0; lineIDInScaleVec < numOfFinalLine; lineIDInScaleVec++ ) + { + sameLineSize = (short) ( keyLines[lineIDInScaleVec].size() ); + /* loop over current LineVec's lines */ + for ( short lineIDInSameLine = 0; lineIDInSameLine < sameLineSize; lineIDInSameLine++ ) + { + /* get a line in current LineVec and its original ID in its octave */ + pSingleLine = & ( keyLines[lineIDInScaleVec][lineIDInSameLine] ); + octaveCount = (short) pSingleLine->octaveCount; + + if( useDetectionData ) + { + /* retrieve associated dxImg and dyImg */ + pdxImg = edLineVec_[octaveCount]->dxImg_.ptr(); + pdyImg = edLineVec_[octaveCount]->dyImg_.ptr(); + + /* get image size to work on from real one */ + realWidth = (short) edLineVec_[octaveCount]->imageWidth; + imageWidth = realWidth - 1; + imageHeight = (short) ( edLineVec_[octaveCount]->imageHeight - 1 ); + } + + else + { + /* retrieve associated dxImg and dyImg */ + pdxImg = dxImg_vector[octaveCount].ptr(); + pdyImg = dyImg_vector[octaveCount].ptr(); + + /* get image size to work on from real one */ + realWidth = (short) images_sizes[octaveCount].width; + imageWidth = realWidth - 1; + imageHeight = (short) ( images_sizes[octaveCount].height - 1 ); + } + + /* initialize memory areas */ + memset( pgdLBandSum, 0, numOfBitsBand ); + memset( ngdLBandSum, 0, numOfBitsBand ); + memset( pgdL2BandSum, 0, numOfBitsBand ); + memset( ngdL2BandSum, 0, numOfBitsBand ); + memset( pgdOBandSum, 0, numOfBitsBand ); + memset( ngdOBandSum, 0, numOfBitsBand ); + memset( pgdO2BandSum, 0, numOfBitsBand ); + memset( ngdO2BandSum, 0, numOfBitsBand ); + + /* get length of line and its half */ + lengthOfLSP = (short) keyLines[lineIDInScaleVec][lineIDInSameLine].numOfPixels; + halfWidth = ( lengthOfLSP - 1 ) / 2; + + /* get middlepoint of line */ + lineMiddlePointX = (float) ( 0.5 * ( pSingleLine->sPointInOctaveX + pSingleLine->ePointInOctaveX ) ); + lineMiddlePointY = (float) ( 0.5 * ( pSingleLine->sPointInOctaveY + pSingleLine->ePointInOctaveY ) ); + + /*1.rotate the local coordinate system to the line direction (direction is the angle + between positive line direction and positive X axis) + *2.compute the gradient projection of pixels in line support region*/ + + /* get the vector representing original image reference system after rotation to aligh with + line's direction */ + dL[0] = cos( pSingleLine->direction ); + dL[1] = sin( pSingleLine->direction ); + + /* set the clockwise orthogonal vector of line direction */ + dO[0] = -dL[1]; + dO[1] = dL[0]; + + /* get rotated reference frame */ + sCorX0 = -dL[0] * halfWidth + dL[1] * halfHeight + lineMiddlePointX; //hID =0; wID = 0; + sCorY0 = -dL[1] * halfWidth - dL[0] * halfHeight + lineMiddlePointY; + + /* BIAS::Matrix gDLMat(heightOfLSP,lengthOfLSP) */ + for ( short hID = 0; hID < heightOfLSP; hID++ ) + { + /*initialization */ + sCorX = sCorX0; + sCorY = sCorY0; + + pgdLRowSum = 0; + ngdLRowSum = 0; + pgdORowSum = 0; + ngdORowSum = 0; + + for ( short wID = 0; wID < lengthOfLSP; wID++ ) + { + tempCor = (short) round( sCorX ); + xCor = ( tempCor < 0 ) ? 0 : ( tempCor > imageWidth ) ? imageWidth : tempCor; + tempCor = (short) round( sCorY ); + yCor = ( tempCor < 0 ) ? 0 : ( tempCor > imageHeight ) ? imageHeight : tempCor; + + /* To achieve rotation invariance, each simple gradient is rotated aligned with + * the line direction and clockwise orthogonal direction.*/ + dx = pdxImg[yCor * realWidth + xCor]; + dy = pdyImg[yCor * realWidth + xCor]; + gDL = dx * dL[0] + dy * dL[1]; + gDO = dx * dO[0] + dy * dO[1]; + if( gDL > 0 ) + { + pgdLRowSum += gDL; + } + else + { + ngdLRowSum -= gDL; + } + if( gDO > 0 ) + { + pgdORowSum += gDO; + } + else + { + ngdORowSum -= gDO; + } + sCorX += dL[0]; + sCorY += dL[1]; + /* gDLMat[hID][wID] = gDL; */ + } + sCorX0 -= dL[1]; + sCorY0 += dL[0]; + coefInGaussion = (float) gaussCoefG_[hID]; + pgdLRowSum = coefInGaussion * pgdLRowSum; + ngdLRowSum = coefInGaussion * ngdLRowSum; + pgdL2RowSum = pgdLRowSum * pgdLRowSum; + ngdL2RowSum = ngdLRowSum * ngdLRowSum; + pgdORowSum = coefInGaussion * pgdORowSum; + ngdORowSum = coefInGaussion * ngdORowSum; + pgdO2RowSum = pgdORowSum * pgdORowSum; + ngdO2RowSum = ngdORowSum * ngdORowSum; + + /* compute {g_dL |g_dL>0 }, {g_dL |g_dL<0 }, + {g_dO |g_dO>0 }, {g_dO |g_dO<0 } of each band in the line support region + first, current row belong to current band */ + bandID = (short) ( hID / params.widthOfBand_ ); + coefInGaussion = (float) ( gaussCoefL_[hID % params.widthOfBand_ + params.widthOfBand_] ); + pgdLBandSum[bandID] += coefInGaussion * pgdLRowSum; + ngdLBandSum[bandID] += coefInGaussion * ngdLRowSum; + pgdL2BandSum[bandID] += coefInGaussion * coefInGaussion * pgdL2RowSum; + ngdL2BandSum[bandID] += coefInGaussion * coefInGaussion * ngdL2RowSum; + pgdOBandSum[bandID] += coefInGaussion * pgdORowSum; + ngdOBandSum[bandID] += coefInGaussion * ngdORowSum; + pgdO2BandSum[bandID] += coefInGaussion * coefInGaussion * pgdO2RowSum; + ngdO2BandSum[bandID] += coefInGaussion * coefInGaussion * ngdO2RowSum; + + /* In order to reduce boundary effect along the line gradient direction, + * a row's gradient will contribute not only to its current band, but also + * to its nearest upper and down band with gaussCoefL_.*/ + bandID--; + if( bandID >= 0 ) + {/* the band above the current band */ + coefInGaussion = (float) ( gaussCoefL_[hID % params.widthOfBand_ + 2 * params.widthOfBand_] ); + pgdLBandSum[bandID] += coefInGaussion * pgdLRowSum; + ngdLBandSum[bandID] += coefInGaussion * ngdLRowSum; + pgdL2BandSum[bandID] += coefInGaussion * coefInGaussion * pgdL2RowSum; + ngdL2BandSum[bandID] += coefInGaussion * coefInGaussion * ngdL2RowSum; + pgdOBandSum[bandID] += coefInGaussion * pgdORowSum; + ngdOBandSum[bandID] += coefInGaussion * ngdORowSum; + pgdO2BandSum[bandID] += coefInGaussion * coefInGaussion * pgdO2RowSum; + ngdO2BandSum[bandID] += coefInGaussion * coefInGaussion * ngdO2RowSum; + } + bandID = bandID + 2; + if( bandID < NUM_OF_BANDS ) + {/*the band below the current band */ + coefInGaussion = (float) ( gaussCoefL_[hID % params.widthOfBand_] ); + pgdLBandSum[bandID] += coefInGaussion * pgdLRowSum; + ngdLBandSum[bandID] += coefInGaussion * ngdLRowSum; + pgdL2BandSum[bandID] += coefInGaussion * coefInGaussion * pgdL2RowSum; + ngdL2BandSum[bandID] += coefInGaussion * coefInGaussion * ngdL2RowSum; + pgdOBandSum[bandID] += coefInGaussion * pgdORowSum; + ngdOBandSum[bandID] += coefInGaussion * ngdORowSum; + pgdO2BandSum[bandID] += coefInGaussion * coefInGaussion * pgdO2RowSum; + ngdO2BandSum[bandID] += coefInGaussion * coefInGaussion * ngdO2RowSum; + } + } + /* gDLMat.Save("gDLMat.txt"); + return 0; */ + + /* construct line descriptor */ + pSingleLine->descriptor.resize( descriptor_size ); + desVec = &pSingleLine->descriptor.front(); + + short desID; + + /*Note that the first and last bands only have (lengthOfLSP * widthOfBand_ * 2.0) pixels + * which are counted. */ + float invN2 = (float) ( 1.0 / ( params.widthOfBand_ * 2.0 ) ); + float invN3 = (float) ( 1.0 / ( params.widthOfBand_ * 3.0 ) ); + float invN, temp; + for ( bandID = 0; bandID < NUM_OF_BANDS; bandID++ ) + { + if( bandID == 0 || bandID == NUM_OF_BANDS - 1 ) + { + invN = invN2; + } + else + { + invN = invN3; + } + desID = bandID * 8; + temp = pgdLBandSum[bandID] * invN; + desVec[desID] = temp;/* mean value of pgdL; */ + desVec[desID + 4] = sqrt( pgdL2BandSum[bandID] * invN - temp * temp ); //std value of pgdL; + temp = ngdLBandSum[bandID] * invN; + desVec[desID + 1] = temp; //mean value of ngdL; + desVec[desID + 5] = sqrt( ngdL2BandSum[bandID] * invN - temp * temp ); //std value of ngdL; + + temp = pgdOBandSum[bandID] * invN; + desVec[desID + 2] = temp; //mean value of pgdO; + desVec[desID + 6] = sqrt( pgdO2BandSum[bandID] * invN - temp * temp ); //std value of pgdO; + temp = ngdOBandSum[bandID] * invN; + desVec[desID + 3] = temp; //mean value of ngdO; + desVec[desID + 7] = sqrt( ngdO2BandSum[bandID] * invN - temp * temp ); //std value of ngdO; + } + + // normalize; + float tempM, tempS; + tempM = 0; + tempS = 0; + desVec = &pSingleLine->descriptor.front(); + + int base = 0; + for ( short i = 0; i < (short) ( NUM_OF_BANDS * 8 ); ++base, i = (short) ( base * 8 ) ) + { + tempM += * ( desVec + i ) * * ( desVec + i ); //desVec[8*i+0] * desVec[8*i+0]; + tempM += * ( desVec + i + 1 ) * * ( desVec + i + 1 ); //desVec[8*i+1] * desVec[8*i+1]; + tempM += * ( desVec + i + 2 ) * * ( desVec + i + 2 ); //desVec[8*i+2] * desVec[8*i+2]; + tempM += * ( desVec + i + 3 ) * * ( desVec + i + 3 ); //desVec[8*i+3] * desVec[8*i+3]; + tempS += * ( desVec + i + 4 ) * * ( desVec + i + 4 ); //desVec[8*i+4] * desVec[8*i+4]; + tempS += * ( desVec + i + 5 ) * * ( desVec + i + 5 ); //desVec[8*i+5] * desVec[8*i+5]; + tempS += * ( desVec + i + 6 ) * * ( desVec + i + 6 ); //desVec[8*i+6] * desVec[8*i+6]; + tempS += * ( desVec + i + 7 ) * * ( desVec + i + 7 ); //desVec[8*i+7] * desVec[8*i+7]; + } + + tempM = 1 / sqrt( tempM ); + tempS = 1 / sqrt( tempS ); + desVec = &pSingleLine->descriptor.front(); + base = 0; + for ( short i = 0; i < (short) ( NUM_OF_BANDS * 8 ); ++base, i = (short) ( base * 8 ) ) + { + * ( desVec + i ) = * ( desVec + i ) * tempM; //desVec[8*i] = desVec[8*i] * tempM; + * ( desVec + 1 + i ) = * ( desVec + 1 + i ) * tempM; //desVec[8*i+1] = desVec[8*i+1] * tempM; + * ( desVec + 2 + i ) = * ( desVec + 2 + i ) * tempM; //desVec[8*i+2] = desVec[8*i+2] * tempM; + * ( desVec + 3 + i ) = * ( desVec + 3 + i ) * tempM; //desVec[8*i+3] = desVec[8*i+3] * tempM; + * ( desVec + 4 + i ) = * ( desVec + 4 + i ) * tempS; //desVec[8*i+4] = desVec[8*i+4] * tempS; + * ( desVec + 5 + i ) = * ( desVec + 5 + i ) * tempS; //desVec[8*i+5] = desVec[8*i+5] * tempS; + * ( desVec + 6 + i ) = * ( desVec + 6 + i ) * tempS; //desVec[8*i+6] = desVec[8*i+6] * tempS; + * ( desVec + 7 + i ) = * ( desVec + 7 + i ) * tempS; //desVec[8*i+7] = desVec[8*i+7] * tempS; + } + + /* In order to reduce the influence of non-linear illumination, + * a threshold is used to limit the value of element in the unit feature + * vector no larger than this threshold. In Z.Wang's work, a value of 0.4 is found + * empirically to be a proper threshold.*/ + desVec = &pSingleLine->descriptor.front(); + for ( short i = 0; i < descriptor_size; i++ ) + { + if( desVec[i] > 0.4 ) + { + desVec[i] = (float) 0.4; + } + } + + //re-normalize desVec; + temp = 0; + for ( short i = 0; i < descriptor_size; i++ ) + { + temp += desVec[i] * desVec[i]; + } + + temp = 1 / sqrt( temp ); + for ( short i = 0; i < descriptor_size; i++ ) + { + desVec[i] = desVec[i] * temp; + } + }/* end for(short lineIDInSameLine = 0; lineIDInSameLine( 0 ); + for ( int g = 0; g < 32; g++ ) + { + /* get LBD data */ + float* des_Vec = &keyLines[lineIDInScaleVec][0].descriptor.front(); + *pointerToRow = des_Vec[g]; + pointerToRow++; + + } + + }/* end for(short lineIDInScaleVec = 0; + lineIDInScaleVec( 2, 2 ); + ATV = cv::Mat_( 1, 2 ); + tempMatLineFit = cv::Mat_( 2, 2 ); + tempVecLineFit = cv::Mat_( 1, 2 ); + fitMatT = cv::Mat_( 2, minLineLen_ ); + fitVec = cv::Mat_( 1, minLineLen_ ); + for ( int i = 0; i < minLineLen_; i++ ) + { + fitMatT[1][i] = 1; + } + dxImg_.create( 1, 1, CV_16SC1 ); + dyImg_.create( 1, 1, CV_16SC1 ); + gImgWO_.create( 1, 1, CV_8SC1 ); + pFirstPartEdgeX_ = NULL; + pFirstPartEdgeY_ = NULL; + pFirstPartEdgeS_ = NULL; + pSecondPartEdgeX_ = NULL; + pSecondPartEdgeY_ = NULL; + pSecondPartEdgeS_ = NULL; + pAnchorX_ = NULL; + pAnchorY_ = NULL; +} + +BinaryDescriptor::EDLineDetector::~EDLineDetector() +{ + if( pFirstPartEdgeX_ != NULL ) + { + delete[] pFirstPartEdgeX_; + delete[] pFirstPartEdgeY_; + delete[] pSecondPartEdgeX_; + delete[] pSecondPartEdgeY_; + delete[] pAnchorX_; + delete[] pAnchorY_; + } + if( pFirstPartEdgeS_ != NULL ) + { + delete[] pFirstPartEdgeS_; + delete[] pSecondPartEdgeS_; + } +} + +int BinaryDescriptor::EDLineDetector::EdgeDrawing( cv::Mat &image, EdgeChains &edgeChains ) +{ + imageWidth = image.cols; + imageHeight = image.rows; + unsigned int pixelNum = imageWidth * imageHeight; + + unsigned int edgePixelArraySize = pixelNum / 5; + unsigned int maxNumOfEdge = edgePixelArraySize / 20; + //compute dx, dy images + if( gImg_.cols != (int) imageWidth || gImg_.rows != (int) imageHeight ) + { + if( pFirstPartEdgeX_ != NULL ) + { + delete[] pFirstPartEdgeX_; + delete[] pFirstPartEdgeY_; + delete[] pSecondPartEdgeX_; + delete[] pSecondPartEdgeY_; + delete[] pFirstPartEdgeS_; + delete[] pSecondPartEdgeS_; + delete[] pAnchorX_; + delete[] pAnchorY_; + } + + dxImg_.create( imageHeight, imageWidth, CV_16SC1 ); + dyImg_.create( imageHeight, imageWidth, CV_16SC1 ); + gImgWO_.create( imageHeight, imageWidth, CV_16SC1 ); + gImg_.create( imageHeight, imageWidth, CV_16SC1 ); + dirImg_.create( imageHeight, imageWidth, CV_8UC1 ); + edgeImage_.create( imageHeight, imageWidth, CV_8UC1 ); + pFirstPartEdgeX_ = new unsigned int[edgePixelArraySize]; + pFirstPartEdgeY_ = new unsigned int[edgePixelArraySize]; + pSecondPartEdgeX_ = new unsigned int[edgePixelArraySize]; + pSecondPartEdgeY_ = new unsigned int[edgePixelArraySize]; + pFirstPartEdgeS_ = new unsigned int[maxNumOfEdge]; + pSecondPartEdgeS_ = new unsigned int[maxNumOfEdge]; + pAnchorX_ = new unsigned int[edgePixelArraySize]; + pAnchorY_ = new unsigned int[edgePixelArraySize]; + } + cv::Sobel( image, dxImg_, CV_16SC1, 1, 0, 3 ); + cv::Sobel( image, dyImg_, CV_16SC1, 0, 1, 3 ); + + //compute gradient and direction images + cv::Mat dxABS_m = cv::abs( dxImg_ ); + cv::Mat dyABS_m = cv::abs( dyImg_ ); + cv::Mat sumDxDy; + cv::add( dyABS_m, dxABS_m, sumDxDy ); + + cv::threshold( sumDxDy, gImg_, gradienThreshold_ + 1, 255, cv::THRESH_TOZERO ); + gImg_ = gImg_ / 4; + gImgWO_ = sumDxDy / 4; + cv::compare( dxABS_m, dyABS_m, dirImg_, cv::CMP_LT ); + + short *pgImg = gImg_.ptr(); + unsigned char *pdirImg = dirImg_.ptr(); + + //extract the anchors in the gradient image, store into a vector + memset( pAnchorX_, 0, edgePixelArraySize * sizeof(unsigned int) ); //initialization + memset( pAnchorY_, 0, edgePixelArraySize * sizeof(unsigned int) ); + unsigned int anchorsSize = 0; + int indexInArray; + unsigned char gValue1, gValue2, gValue3; + for ( unsigned int w = 1; w < imageWidth - 1; w = w + scanIntervals_ ) + { + for ( unsigned int h = 1; h < imageHeight - 1; h = h + scanIntervals_ ) + { + indexInArray = h * imageWidth + w; + //gValue1 = pdirImg[indexInArray]; + if( pdirImg[indexInArray] == Horizontal ) + { //if the direction of pixel is horizontal, then compare with up and down + //gValue2 = pgImg[indexInArray]; + if( pgImg[indexInArray] >= pgImg[indexInArray - imageWidth] + anchorThreshold_ + && pgImg[indexInArray] >= pgImg[indexInArray + imageWidth] + anchorThreshold_ ) + { // (w,h) is accepted as an anchor + pAnchorX_[anchorsSize] = w; + pAnchorY_[anchorsSize++] = h; + } + } + else + { // if(pdirImg[indexInArray]==Vertical){//it is vertical edge, should be compared with left and right + //gValue2 = pgImg[indexInArray]; + if( pgImg[indexInArray] >= pgImg[indexInArray - 1] + anchorThreshold_ && pgImg[indexInArray] >= pgImg[indexInArray + 1] + anchorThreshold_ ) + { // (w,h) is accepted as an anchor + pAnchorX_[anchorsSize] = w; + pAnchorY_[anchorsSize++] = h; + } + } + } + } + if( anchorsSize > edgePixelArraySize ) + { + std::cout << "anchor size is larger than its maximal size. anchorsSize=" << anchorsSize << ", maximal size = " << edgePixelArraySize << std::endl; + return -1; + } + + //link the anchors by smart routing + edgeImage_.setTo( 0 ); + unsigned char *pEdgeImg = edgeImage_.data; + memset( pFirstPartEdgeX_, 0, edgePixelArraySize * sizeof(unsigned int) ); //initialization + memset( pFirstPartEdgeY_, 0, edgePixelArraySize * sizeof(unsigned int) ); + memset( pSecondPartEdgeX_, 0, edgePixelArraySize * sizeof(unsigned int) ); + memset( pSecondPartEdgeY_, 0, edgePixelArraySize * sizeof(unsigned int) ); + memset( pFirstPartEdgeS_, 0, maxNumOfEdge * sizeof(unsigned int) ); + memset( pSecondPartEdgeS_, 0, maxNumOfEdge * sizeof(unsigned int) ); + unsigned int offsetPFirst = 0, offsetPSecond = 0; + unsigned int offsetPS = 0; + + unsigned int x, y; + unsigned int lastX = 0; + unsigned int lastY = 0; + unsigned char lastDirection; //up = 1, right = 2, down = 3, left = 4; + unsigned char shouldGoDirection; //up = 1, right = 2, down = 3, left = 4; + int edgeLenFirst, edgeLenSecond; + for ( unsigned int i = 0; i < anchorsSize; i++ ) + { + x = pAnchorX_[i]; + y = pAnchorY_[i]; + indexInArray = y * imageWidth + x; + if( pEdgeImg[indexInArray] ) + { //if anchor i is already been an edge pixel. + continue; + } + /*The walk stops under 3 conditions: + * 1. We move out of the edge areas, i.e., the thresholded gradient value + * of the current pixel is 0. + * 2. The current direction of the edge changes, i.e., from horizontal + * to vertical or vice versa.?? (This is turned out not correct. From the online edge draw demo + * we can figure out that authors don't implement this rule either because their extracted edge + * chain could be a circle which means pixel directions would definitely be different + * in somewhere on the chain.) + * 3. We encounter a previously detected edge pixel. */ + pFirstPartEdgeS_[offsetPS] = offsetPFirst; + if( pdirImg[indexInArray] == Horizontal ) + { //if the direction of this pixel is horizontal, then go left and right. + //fist go right, pixel direction may be different during linking. + lastDirection = RightDir; + while ( pgImg[indexInArray] > 0 && !pEdgeImg[indexInArray] ) + { + pEdgeImg[indexInArray] = 1; // Mark this pixel as an edge pixel + pFirstPartEdgeX_[offsetPFirst] = x; + pFirstPartEdgeY_[offsetPFirst++] = y; + shouldGoDirection = 0; //unknown + if( pdirImg[indexInArray] == Horizontal ) + { //should go left or right + if( lastDirection == UpDir || lastDirection == DownDir ) + { //change the pixel direction now + if( x > lastX ) + { //should go right + shouldGoDirection = RightDir; + } + else + { //should go left + shouldGoDirection = LeftDir; + } + } + lastX = x; + lastY = y; + if( lastDirection == RightDir || shouldGoDirection == RightDir ) + { //go right + if( x == imageWidth - 1 || y == 0 || y == imageHeight - 1 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the right and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray - imageWidth + 1]; + gValue2 = (unsigned char) pgImg[indexInArray + 1]; + gValue3 = (unsigned char) pgImg[indexInArray + imageWidth + 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //up-right + x = x + 1; + y = y - 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //down-right + x = x + 1; + y = y + 1; + } + else + { //straight-right + x = x + 1; + } + lastDirection = RightDir; + } + else if( lastDirection == LeftDir || shouldGoDirection == LeftDir ) + { //go left + if( x == 0 || y == 0 || y == imageHeight - 1 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the left and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray - imageWidth - 1]; + gValue2 = (unsigned char) pgImg[indexInArray - 1]; + gValue3 = (unsigned char) pgImg[indexInArray + imageWidth - 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //up-left + x = x - 1; + y = y - 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //down-left + x = x - 1; + y = y + 1; + } + else + { //straight-left + x = x - 1; + } + lastDirection = LeftDir; + } + } + else + { //should go up or down. + if( lastDirection == RightDir || lastDirection == LeftDir ) + { //change the pixel direction now + if( y > lastY ) + { //should go down + shouldGoDirection = DownDir; + } + else + { //should go up + shouldGoDirection = UpDir; + } + } + lastX = x; + lastY = y; + if( lastDirection == DownDir || shouldGoDirection == DownDir ) + { //go down + if( x == 0 || x == imageWidth - 1 || y == imageHeight - 1 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the down and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray + imageWidth + 1]; + gValue2 = (unsigned char) pgImg[indexInArray + imageWidth]; + gValue3 = (unsigned char) pgImg[indexInArray + imageWidth - 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //down-right + x = x + 1; + y = y + 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //down-left + x = x - 1; + y = y + 1; + } + else + { //straight-down + y = y + 1; + } + lastDirection = DownDir; + } + else if( lastDirection == UpDir || shouldGoDirection == UpDir ) + { //go up + if( x == 0 || x == imageWidth - 1 || y == 0 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the up and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray - imageWidth + 1]; + gValue2 = (unsigned char) pgImg[indexInArray - imageWidth]; + gValue3 = (unsigned char) pgImg[indexInArray - imageWidth - 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //up-right + x = x + 1; + y = y - 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //up-left + x = x - 1; + y = y - 1; + } + else + { //straight-up + y = y - 1; + } + lastDirection = UpDir; + } + } + indexInArray = y * imageWidth + x; + } //end while go right + //then go left, pixel direction may be different during linking. + x = pAnchorX_[i]; + y = pAnchorY_[i]; + indexInArray = y * imageWidth + x; + pEdgeImg[indexInArray] = 0; //mark the anchor point be a non-edge pixel and + lastDirection = LeftDir; + pSecondPartEdgeS_[offsetPS] = offsetPSecond; + while ( pgImg[indexInArray] > 0 && !pEdgeImg[indexInArray] ) + { + pEdgeImg[indexInArray] = 1; // Mark this pixel as an edge pixel + pSecondPartEdgeX_[offsetPSecond] = x; + pSecondPartEdgeY_[offsetPSecond++] = y; + shouldGoDirection = 0; //unknown + if( pdirImg[indexInArray] == Horizontal ) + { //should go left or right + if( lastDirection == UpDir || lastDirection == DownDir ) + { //change the pixel direction now + if( x > lastX ) + { //should go right + shouldGoDirection = RightDir; + } + else + { //should go left + shouldGoDirection = LeftDir; + } + } + lastX = x; + lastY = y; + if( lastDirection == RightDir || shouldGoDirection == RightDir ) + { //go right + if( x == imageWidth - 1 || y == 0 || y == imageHeight - 1 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the right and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray - imageWidth + 1]; + gValue2 = (unsigned char) pgImg[indexInArray + 1]; + gValue3 = (unsigned char) pgImg[indexInArray + imageWidth + 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //up-right + x = x + 1; + y = y - 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //down-right + x = x + 1; + y = y + 1; + } + else + { //straight-right + x = x + 1; + } + lastDirection = RightDir; + } + else if( lastDirection == LeftDir || shouldGoDirection == LeftDir ) + { //go left + if( x == 0 || y == 0 || y == imageHeight - 1 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the left and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray - imageWidth - 1]; + gValue2 = (unsigned char) pgImg[indexInArray - 1]; + gValue3 = (unsigned char) pgImg[indexInArray + imageWidth - 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //up-left + x = x - 1; + y = y - 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //down-left + x = x - 1; + y = y + 1; + } + else + { //straight-left + x = x - 1; + } + lastDirection = LeftDir; + } + } + else + { //should go up or down. + if( lastDirection == RightDir || lastDirection == LeftDir ) + { //change the pixel direction now + if( y > lastY ) + { //should go down + shouldGoDirection = DownDir; + } + else + { //should go up + shouldGoDirection = UpDir; + } + } + lastX = x; + lastY = y; + if( lastDirection == DownDir || shouldGoDirection == DownDir ) + { //go down + if( x == 0 || x == imageWidth - 1 || y == imageHeight - 1 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the down and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray + imageWidth + 1]; + gValue2 = (unsigned char) pgImg[indexInArray + imageWidth]; + gValue3 = (unsigned char) pgImg[indexInArray + imageWidth - 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //down-right + x = x + 1; + y = y + 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //down-left + x = x - 1; + y = y + 1; + } + else + { //straight-down + y = y + 1; + } + lastDirection = DownDir; + } + else if( lastDirection == UpDir || shouldGoDirection == UpDir ) + { //go up + if( x == 0 || x == imageWidth - 1 || y == 0 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the up and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray - imageWidth + 1]; + gValue2 = (unsigned char) pgImg[indexInArray - imageWidth]; + gValue3 = (unsigned char) pgImg[indexInArray - imageWidth - 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //up-right + x = x + 1; + y = y - 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //up-left + x = x - 1; + y = y - 1; + } + else + { //straight-up + y = y - 1; + } + lastDirection = UpDir; + } + } + indexInArray = y * imageWidth + x; + } //end while go left + //end anchor is Horizontal + } + else + { //the direction of this pixel is vertical, go up and down + //fist go down, pixel direction may be different during linking. + lastDirection = DownDir; + while ( pgImg[indexInArray] > 0 && !pEdgeImg[indexInArray] ) + { + pEdgeImg[indexInArray] = 1; // Mark this pixel as an edge pixel + pFirstPartEdgeX_[offsetPFirst] = x; + pFirstPartEdgeY_[offsetPFirst++] = y; + shouldGoDirection = 0; //unknown + if( pdirImg[indexInArray] == Horizontal ) + { //should go left or right + if( lastDirection == UpDir || lastDirection == DownDir ) + { //change the pixel direction now + if( x > lastX ) + { //should go right + shouldGoDirection = RightDir; + } + else + { //should go left + shouldGoDirection = LeftDir; + } + } + lastX = x; + lastY = y; + if( lastDirection == RightDir || shouldGoDirection == RightDir ) + { //go right + if( x == imageWidth - 1 || y == 0 || y == imageHeight - 1 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the right and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray - imageWidth + 1]; + gValue2 = (unsigned char) pgImg[indexInArray + 1]; + gValue3 = (unsigned char) pgImg[indexInArray + imageWidth + 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //up-right + x = x + 1; + y = y - 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //down-right + x = x + 1; + y = y + 1; + } + else + { //straight-right + x = x + 1; + } + lastDirection = RightDir; + } + else if( lastDirection == LeftDir || shouldGoDirection == LeftDir ) + { //go left + if( x == 0 || y == 0 || y == imageHeight - 1 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the left and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray - imageWidth - 1]; + gValue2 = (unsigned char) pgImg[indexInArray - 1]; + gValue3 = (unsigned char) pgImg[indexInArray + imageWidth - 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //up-left + x = x - 1; + y = y - 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //down-left + x = x - 1; + y = y + 1; + } + else + { //straight-left + x = x - 1; + } + lastDirection = LeftDir; + } + } + else + { //should go up or down. + if( lastDirection == RightDir || lastDirection == LeftDir ) + { //change the pixel direction now + if( y > lastY ) + { //should go down + shouldGoDirection = DownDir; + } + else + { //should go up + shouldGoDirection = UpDir; + } + } + lastX = x; + lastY = y; + if( lastDirection == DownDir || shouldGoDirection == DownDir ) + { //go down + if( x == 0 || x == imageWidth - 1 || y == imageHeight - 1 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the down and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray + imageWidth + 1]; + gValue2 = (unsigned char) pgImg[indexInArray + imageWidth]; + gValue3 = (unsigned char) pgImg[indexInArray + imageWidth - 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //down-right + x = x + 1; + y = y + 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //down-left + x = x - 1; + y = y + 1; + } + else + { //straight-down + y = y + 1; + } + lastDirection = DownDir; + } + else if( lastDirection == UpDir || shouldGoDirection == UpDir ) + { //go up + if( x == 0 || x == imageWidth - 1 || y == 0 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the up and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray - imageWidth + 1]; + gValue2 = (unsigned char) pgImg[indexInArray - imageWidth]; + gValue3 = (unsigned char) pgImg[indexInArray - imageWidth - 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //up-right + x = x + 1; + y = y - 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //up-left + x = x - 1; + y = y - 1; + } + else + { //straight-up + y = y - 1; + } + lastDirection = UpDir; + } + } + indexInArray = y * imageWidth + x; + } //end while go down + //then go up, pixel direction may be different during linking. + lastDirection = UpDir; + x = pAnchorX_[i]; + y = pAnchorY_[i]; + indexInArray = y * imageWidth + x; + pEdgeImg[indexInArray] = 0; //mark the anchor point be a non-edge pixel and + pSecondPartEdgeS_[offsetPS] = offsetPSecond; + while ( pgImg[indexInArray] > 0 && !pEdgeImg[indexInArray] ) + { + pEdgeImg[indexInArray] = 1; // Mark this pixel as an edge pixel + pSecondPartEdgeX_[offsetPSecond] = x; + pSecondPartEdgeY_[offsetPSecond++] = y; + shouldGoDirection = 0; //unknown + if( pdirImg[indexInArray] == Horizontal ) + { //should go left or right + if( lastDirection == UpDir || lastDirection == DownDir ) + { //change the pixel direction now + if( x > lastX ) + { //should go right + shouldGoDirection = RightDir; + } + else + { //should go left + shouldGoDirection = LeftDir; + } + } + lastX = x; + lastY = y; + if( lastDirection == RightDir || shouldGoDirection == RightDir ) + { //go right + if( x == imageWidth - 1 || y == 0 || y == imageHeight - 1 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the right and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray - imageWidth + 1]; + gValue2 = (unsigned char) pgImg[indexInArray + 1]; + gValue3 = (unsigned char) pgImg[indexInArray + imageWidth + 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //up-right + x = x + 1; + y = y - 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //down-right + x = x + 1; + y = y + 1; + } + else + { //straight-right + x = x + 1; + } + lastDirection = RightDir; + } + else if( lastDirection == LeftDir || shouldGoDirection == LeftDir ) + { //go left + if( x == 0 || y == 0 || y == imageHeight - 1 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the left and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray - imageWidth - 1]; + gValue2 = (unsigned char) pgImg[indexInArray - 1]; + gValue3 = (unsigned char) pgImg[indexInArray + imageWidth - 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //up-left + x = x - 1; + y = y - 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //down-left + x = x - 1; + y = y + 1; + } + else + { //straight-left + x = x - 1; + } + lastDirection = LeftDir; + } + } + else + { //should go up or down. + if( lastDirection == RightDir || lastDirection == LeftDir ) + { //change the pixel direction now + if( y > lastY ) + { //should go down + shouldGoDirection = DownDir; + } + else + { //should go up + shouldGoDirection = UpDir; + } + } + lastX = x; + lastY = y; + if( lastDirection == DownDir || shouldGoDirection == DownDir ) + { //go down + if( x == 0 || x == imageWidth - 1 || y == imageHeight - 1 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the down and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray + imageWidth + 1]; + gValue2 = (unsigned char) pgImg[indexInArray + imageWidth]; + gValue3 = (unsigned char) pgImg[indexInArray + imageWidth - 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //down-right + x = x + 1; + y = y + 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //down-left + x = x - 1; + y = y + 1; + } + else + { //straight-down + y = y + 1; + } + lastDirection = DownDir; + } + else if( lastDirection == UpDir || shouldGoDirection == UpDir ) + { //go up + if( x == 0 || x == imageWidth - 1 || y == 0 ) + { //reach the image border + break; + } + // Look at 3 neighbors to the up and pick the one with the max. gradient value + gValue1 = (unsigned char) pgImg[indexInArray - imageWidth + 1]; + gValue2 = (unsigned char) pgImg[indexInArray - imageWidth]; + gValue3 = (unsigned char) pgImg[indexInArray - imageWidth - 1]; + if( gValue1 >= gValue2 && gValue1 >= gValue3 ) + { //up-right + x = x + 1; + y = y - 1; + } + else if( gValue3 >= gValue2 && gValue3 >= gValue1 ) + { //up-left + x = x - 1; + y = y - 1; + } + else + { //straight-up + y = y - 1; + } + lastDirection = UpDir; + } + } + indexInArray = y * imageWidth + x; + } //end while go up + } //end anchor is Vertical + //only keep the edge chains whose length is larger than the minLineLen_; + edgeLenFirst = offsetPFirst - pFirstPartEdgeS_[offsetPS]; + edgeLenSecond = offsetPSecond - pSecondPartEdgeS_[offsetPS]; + if( edgeLenFirst + edgeLenSecond < minLineLen_ + 1 ) + { //short edge, drop it + offsetPFirst = pFirstPartEdgeS_[offsetPS]; + offsetPSecond = pSecondPartEdgeS_[offsetPS]; + } + else + { + offsetPS++; + } + } + //store the last index + pFirstPartEdgeS_[offsetPS] = offsetPFirst; + pSecondPartEdgeS_[offsetPS] = offsetPSecond; + if( offsetPS > maxNumOfEdge ) + { + std::cout << "Edge drawing Error: The total number of edges is larger than MaxNumOfEdge, " + "numofedge = " << offsetPS << ", MaxNumOfEdge=" << maxNumOfEdge << std::endl; + return -1; + } + if( offsetPFirst > edgePixelArraySize || offsetPSecond > edgePixelArraySize ) + { + std::cout << "Edge drawing Error: The total number of edge pixels is larger than MaxNumOfEdgePixels, " + "numofedgePixel1 = " << offsetPFirst << ", numofedgePixel2 = " << offsetPSecond << ", MaxNumOfEdgePixel=" << edgePixelArraySize << std::endl; + return -1; + } + + /*now all the edge information are stored in pFirstPartEdgeX_, pFirstPartEdgeY_, + *pFirstPartEdgeS_, pSecondPartEdgeX_, pSecondPartEdgeY_, pSecondPartEdgeS_; + *we should reorganize them into edgeChains for easily using. */ + int tempID; + edgeChains.xCors.resize( offsetPFirst + offsetPSecond ); + edgeChains.yCors.resize( offsetPFirst + offsetPSecond ); + edgeChains.sId.resize( offsetPS + 1 ); + unsigned int *pxCors = &edgeChains.xCors.front(); + unsigned int *pyCors = &edgeChains.yCors.front(); + unsigned int *psId = &edgeChains.sId.front(); + offsetPFirst = 0; + offsetPSecond = 0; + unsigned int indexInCors = 0; + unsigned int numOfEdges = 0; + for ( unsigned int edgeId = 0; edgeId < offsetPS; edgeId++ ) + { + //step1, put the first and second parts edge coordinates together from edge start to edge end + psId[numOfEdges++] = indexInCors; + indexInArray = pFirstPartEdgeS_[edgeId]; + offsetPFirst = pFirstPartEdgeS_[edgeId + 1]; + for ( tempID = offsetPFirst - 1; tempID >= indexInArray; tempID-- ) + { //add first part edge + pxCors[indexInCors] = pFirstPartEdgeX_[tempID]; + pyCors[indexInCors++] = pFirstPartEdgeY_[tempID]; + } + indexInArray = pSecondPartEdgeS_[edgeId]; + offsetPSecond = pSecondPartEdgeS_[edgeId + 1]; + for ( tempID = indexInArray + 1; tempID < (int) offsetPSecond; tempID++ ) + { //add second part edge + pxCors[indexInCors] = pSecondPartEdgeX_[tempID]; + pyCors[indexInCors++] = pSecondPartEdgeY_[tempID]; + } + } + psId[numOfEdges] = indexInCors; //the end index of the last edge + edgeChains.numOfEdges = numOfEdges; + + return 1; +} + +int BinaryDescriptor::EDLineDetector::EDline( cv::Mat &image, LineChains &lines ) +{ + + //first, call EdgeDrawing function to extract edges + EdgeChains edges; + if( ( EdgeDrawing( image, edges ) ) != 1 ) + { + std::cout << "Line Detection not finished" << std::endl; + return -1; + } + + //detect lines + unsigned int linePixelID = edges.sId[edges.numOfEdges]; + lines.xCors.resize( linePixelID ); + lines.yCors.resize( linePixelID ); + lines.sId.resize( 5 * edges.numOfEdges ); + unsigned int *pEdgeXCors = &edges.xCors.front(); + unsigned int *pEdgeYCors = &edges.yCors.front(); + unsigned int *pEdgeSID = &edges.sId.front(); + unsigned int *pLineXCors = &lines.xCors.front(); + unsigned int *pLineYCors = &lines.yCors.front(); + unsigned int *pLineSID = &lines.sId.front(); + logNT_ = 2.0 * ( log10( (double) imageWidth ) + log10( (double) imageHeight ) ); + double lineFitErr = 0; //the line fit error; + std::vector lineEquation( 2, 0 ); + lineEquations_.clear(); + lineEndpoints_.clear(); + lineDirection_.clear(); + unsigned char *pdirImg = dirImg_.data; + unsigned int numOfLines = 0; + unsigned int newOffsetS = 0; + unsigned int offsetInEdgeArrayS, offsetInEdgeArrayE; //start index and end index + unsigned int offsetInLineArray = 0; + float direction; //line direction + + for ( unsigned int edgeID = 0; edgeID < edges.numOfEdges; edgeID++ ) + { + offsetInEdgeArrayS = pEdgeSID[edgeID]; + offsetInEdgeArrayE = pEdgeSID[edgeID + 1]; + while ( offsetInEdgeArrayE > offsetInEdgeArrayS + minLineLen_ ) + { //extract line segments from an edge, may find more than one segments + //find an initial line segment + while ( offsetInEdgeArrayE > offsetInEdgeArrayS + minLineLen_ ) + { + lineFitErr = LeastSquaresLineFit_( pEdgeXCors, pEdgeYCors, offsetInEdgeArrayS, lineEquation ); + if( lineFitErr <= lineFitErrThreshold_ ) + break; //ok, an initial line segment detected + offsetInEdgeArrayS += SkipEdgePoint; //skip the first two pixel in the chain and try with the remaining pixels + } + if( lineFitErr > lineFitErrThreshold_ ) + break; //no line is detected + //An initial line segment is detected. Try to extend this line segment + pLineSID[numOfLines] = offsetInLineArray; + double coef1 = 0; //for a line ax+by+c=0, coef1 = 1/sqrt(a^2+b^2); + double pointToLineDis; //for a line ax+by+c=0 and a point(xi, yi), pointToLineDis = coef1*|a*xi+b*yi+c| + bool bExtended = true; + bool bFirstTry = true; + int numOfOutlier; //to against noise, we accept a few outlier of a line. + int tryTimes = 0; + if( pdirImg[pEdgeYCors[offsetInEdgeArrayS] * imageWidth + pEdgeXCors[offsetInEdgeArrayS]] == Horizontal ) + { //y=ax+b, i.e. ax-y+b=0 + while ( bExtended ) + { + tryTimes++; + if( bFirstTry ) + { + bFirstTry = false; + for ( int i = 0; i < minLineLen_; i++ ) + { //First add the initial line segment to the line array + pLineXCors[offsetInLineArray] = pEdgeXCors[offsetInEdgeArrayS]; + pLineYCors[offsetInLineArray++] = pEdgeYCors[offsetInEdgeArrayS++]; + } + } + else + { //after each try, line is extended, line equation should be re-estimated + //adjust the line equation + lineFitErr = LeastSquaresLineFit_( pLineXCors, pLineYCors, pLineSID[numOfLines], newOffsetS, offsetInLineArray, lineEquation ); + } + coef1 = 1 / sqrt( lineEquation[0] * lineEquation[0] + 1 ); + numOfOutlier = 0; + newOffsetS = offsetInLineArray; + while ( offsetInEdgeArrayE > offsetInEdgeArrayS ) + { + pointToLineDis = fabs( lineEquation[0] * pEdgeXCors[offsetInEdgeArrayS] - pEdgeYCors[offsetInEdgeArrayS] + lineEquation[1] ) * coef1; + pLineXCors[offsetInLineArray] = pEdgeXCors[offsetInEdgeArrayS]; + pLineYCors[offsetInLineArray++] = pEdgeYCors[offsetInEdgeArrayS++]; + if( pointToLineDis > lineFitErrThreshold_ ) + { + numOfOutlier++; + if( numOfOutlier > 3 ) + break; + } + else + { //we count number of connective outliers. + numOfOutlier = 0; + } + } + //pop back the last few outliers from lines and return them to edge chain + offsetInLineArray -= numOfOutlier; + offsetInEdgeArrayS -= numOfOutlier; + if( offsetInLineArray - newOffsetS > 0 && tryTimes < TryTime ) + { //some new pixels are added to the line + } + else + { + bExtended = false; //no new pixels are added. + } + } + //the line equation coefficients,for line w1x+w2y+w3 =0, we normalize it to make w1^2+w2^2 = 1. + std::vector lineEqu( 3, 0 ); + lineEqu[0] = lineEquation[0] * coef1; + lineEqu[1] = -1 * coef1; + lineEqu[2] = lineEquation[1] * coef1; + if( LineValidation_( pLineXCors, pLineYCors, pLineSID[numOfLines], offsetInLineArray, lineEqu, direction ) ) + { //check the line + //store the line equation coefficients + lineEquations_.push_back( lineEqu ); + /*At last, compute the line endpoints and store them. + *we project the first and last pixels in the pixelChain onto the best fit line + *to get the line endpoints. + *xp= (w2^2*x0-w1*w2*y0-w3*w1)/(w1^2+w2^2) + *yp= (w1^2*y0-w1*w2*x0-w3*w2)/(w1^2+w2^2) */ + std::vector lineEndP( 4, 0 ); //line endpoints + double a1 = lineEqu[1] * lineEqu[1]; + double a2 = lineEqu[0] * lineEqu[0]; + double a3 = lineEqu[0] * lineEqu[1]; + double a4 = lineEqu[2] * lineEqu[0]; + double a5 = lineEqu[2] * lineEqu[1]; + unsigned int Px = pLineXCors[pLineSID[numOfLines]]; //first pixel + unsigned int Py = pLineYCors[pLineSID[numOfLines]]; + lineEndP[0] = (float) ( a1 * Px - a3 * Py - a4 ); //x + lineEndP[1] = (float) ( a2 * Py - a3 * Px - a5 ); //y + Px = pLineXCors[offsetInLineArray - 1]; //last pixel + Py = pLineYCors[offsetInLineArray - 1]; + lineEndP[2] = (float) ( a1 * Px - a3 * Py - a4 ); //x + lineEndP[3] = (float) ( a2 * Py - a3 * Px - a5 ); //y + lineEndpoints_.push_back( lineEndP ); + lineDirection_.push_back( direction ); + numOfLines++; + } + else + { + offsetInLineArray = pLineSID[numOfLines]; // line was not accepted, the offset is set back + } + } + else + { //x=ay+b, i.e. x-ay-b=0 + while ( bExtended ) + { + tryTimes++; + if( bFirstTry ) + { + bFirstTry = false; + for ( int i = 0; i < minLineLen_; i++ ) + { //First add the initial line segment to the line array + pLineXCors[offsetInLineArray] = pEdgeXCors[offsetInEdgeArrayS]; + pLineYCors[offsetInLineArray++] = pEdgeYCors[offsetInEdgeArrayS++]; + } + } + else + { //after each try, line is extended, line equation should be re-estimated + //adjust the line equation + lineFitErr = LeastSquaresLineFit_( pLineXCors, pLineYCors, pLineSID[numOfLines], newOffsetS, offsetInLineArray, lineEquation ); + } + coef1 = 1 / sqrt( 1 + lineEquation[0] * lineEquation[0] ); + numOfOutlier = 0; + newOffsetS = offsetInLineArray; + while ( offsetInEdgeArrayE > offsetInEdgeArrayS ) + { + pointToLineDis = fabs( pEdgeXCors[offsetInEdgeArrayS] - lineEquation[0] * pEdgeYCors[offsetInEdgeArrayS] - lineEquation[1] ) * coef1; + pLineXCors[offsetInLineArray] = pEdgeXCors[offsetInEdgeArrayS]; + pLineYCors[offsetInLineArray++] = pEdgeYCors[offsetInEdgeArrayS++]; + if( pointToLineDis > lineFitErrThreshold_ ) + { + numOfOutlier++; + if( numOfOutlier > 3 ) + break; + } + else + { //we count number of connective outliers. + numOfOutlier = 0; + } + } + //pop back the last few outliers from lines and return them to edge chain + offsetInLineArray -= numOfOutlier; + offsetInEdgeArrayS -= numOfOutlier; + if( offsetInLineArray - newOffsetS > 0 && tryTimes < TryTime ) + { //some new pixels are added to the line + } + else + { + bExtended = false; //no new pixels are added. + } + } + //the line equation coefficients,for line w1x+w2y+w3 =0, we normalize it to make w1^2+w2^2 = 1. + std::vector lineEqu( 3, 0 ); + lineEqu[0] = 1 * coef1; + lineEqu[1] = -lineEquation[0] * coef1; + lineEqu[2] = -lineEquation[1] * coef1; + + if( LineValidation_( pLineXCors, pLineYCors, pLineSID[numOfLines], offsetInLineArray, lineEqu, direction ) ) + { //check the line + //store the line equation coefficients + lineEquations_.push_back( lineEqu ); + /*At last, compute the line endpoints and store them. + *we project the first and last pixels in the pixelChain onto the best fit line + *to get the line endpoints. + *xp= (w2^2*x0-w1*w2*y0-w3*w1)/(w1^2+w2^2) + *yp= (w1^2*y0-w1*w2*x0-w3*w2)/(w1^2+w2^2) */ + std::vector lineEndP( 4, 0 ); //line endpoints + double a1 = lineEqu[1] * lineEqu[1]; + double a2 = lineEqu[0] * lineEqu[0]; + double a3 = lineEqu[0] * lineEqu[1]; + double a4 = lineEqu[2] * lineEqu[0]; + double a5 = lineEqu[2] * lineEqu[1]; + unsigned int Px = pLineXCors[pLineSID[numOfLines]]; //first pixel + unsigned int Py = pLineYCors[pLineSID[numOfLines]]; + lineEndP[0] = (float) ( a1 * Px - a3 * Py - a4 ); //x + lineEndP[1] = (float) ( a2 * Py - a3 * Px - a5 ); //y + Px = pLineXCors[offsetInLineArray - 1]; //last pixel + Py = pLineYCors[offsetInLineArray - 1]; + lineEndP[2] = (float) ( a1 * Px - a3 * Py - a4 ); //x + lineEndP[3] = (float) ( a2 * Py - a3 * Px - a5 ); //y + lineEndpoints_.push_back( lineEndP ); + lineDirection_.push_back( direction ); + numOfLines++; + } + else + { + offsetInLineArray = pLineSID[numOfLines]; // line was not accepted, the offset is set back + } + } + //Extract line segments from the remaining pixel; Current chain has been shortened already. + } + } //end for(unsigned int edgeID=0; edgeID &lineEquation ) +{ + + float * pMatT; + float * pATA; + double fitError = 0; + double coef; + unsigned char *pdirImg = dirImg_.data; + unsigned int offset = offsetS; + /*If the first pixel in this chain is horizontal, + *then we try to find a horizontal line, y=ax+b;*/ + if( pdirImg[yCors[offsetS] * imageWidth + xCors[offsetS]] == Horizontal ) + { + /*Build the system,and solve it using least square regression: mat * [a,b]^T = vec + * [x0,1] [y0] + * [x1,1] [a] [y1] + * . [b] = . + * [xn,1] [yn]*/ + pMatT = fitMatT.ptr(); //fitMatT = [x0, x1, ... xn; 1,1,...,1]; + for ( int i = 0; i < minLineLen_; i++ ) + { + //*(pMatT+minLineLen_) = 1; //the value are not changed; + * ( pMatT++ ) = (float) xCors[offsetS]; + fitVec[0][i] = (float) yCors[offsetS++]; + } + ATA = fitMatT * fitMatT.t(); + ATV = fitMatT * fitVec.t(); + /* [a,b]^T = Inv(mat^T * mat) * mat^T * vec */ + pATA = ATA.ptr(); + coef = 1.0 / ( double( pATA[0] ) * double( pATA[3] ) - double( pATA[1] ) * double( pATA[2] ) ); + // lineEquation = svd.Invert(ATA) * matT * vec; + lineEquation[0] = coef * ( double( pATA[3] ) * double( ATV[0][0] ) - double( pATA[1] ) * double( ATV[0][1] ) ); + lineEquation[1] = coef * ( double( pATA[0] ) * double( ATV[0][1] ) - double( pATA[2] ) * double( ATV[0][0] ) ); + /*compute line fit error */ + for ( int i = 0; i < minLineLen_; i++ ) + { + //coef = double(yCors[offset]) - double(xCors[offset++]) * lineEquation[0] - lineEquation[1]; + coef = double( yCors[offset] ) - double( xCors[offset] ) * lineEquation[0] - lineEquation[1]; + offset++; + fitError += coef * coef; + } + return sqrt( fitError ); + } + /*If the first pixel in this chain is vertical, + *then we try to find a vertical line, x=ay+b;*/ + if( pdirImg[yCors[offsetS] * imageWidth + xCors[offsetS]] == Vertical ) + { + /*Build the system,and solve it using least square regression: mat * [a,b]^T = vec + * [y0,1] [x0] + * [y1,1] [a] [x1] + * . [b] = . + * [yn,1] [xn]*/ + pMatT = fitMatT.ptr(); //fitMatT = [y0, y1, ... yn; 1,1,...,1]; + for ( int i = 0; i < minLineLen_; i++ ) + { + //*(pMatT+minLineLen_) = 1;//the value are not changed; + * ( pMatT++ ) = (float) yCors[offsetS]; + fitVec[0][i] = (float) xCors[offsetS++]; + } + ATA = fitMatT * ( fitMatT.t() ); + ATV = fitMatT * fitVec.t(); + /* [a,b]^T = Inv(mat^T * mat) * mat^T * vec */ + pATA = ATA.ptr(); + coef = 1.0 / ( double( pATA[0] ) * double( pATA[3] ) - double( pATA[1] ) * double( pATA[2] ) ); + // lineEquation = svd.Invert(ATA) * matT * vec; + lineEquation[0] = coef * ( double( pATA[3] ) * double( ATV[0][0] ) - double( pATA[1] ) * double( ATV[0][1] ) ); + lineEquation[1] = coef * ( double( pATA[0] ) * double( ATV[0][1] ) - double( pATA[2] ) * double( ATV[0][0] ) ); + /*compute line fit error */ + for ( int i = 0; i < minLineLen_; i++ ) + { + //coef = double(xCors[offset]) - double(yCors[offset++]) * lineEquation[0] - lineEquation[1]; + coef = double( xCors[offset] ) - double( yCors[offset] ) * lineEquation[0] - lineEquation[1]; + offset++; + fitError += coef * coef; + } + return sqrt( fitError ); + } + return 0; +} +double BinaryDescriptor::EDLineDetector::LeastSquaresLineFit_( unsigned int *xCors, unsigned int *yCors, unsigned int offsetS, + unsigned int newOffsetS, unsigned int offsetE, std::vector &lineEquation ) +{ + int length = offsetE - offsetS; + int newLength = offsetE - newOffsetS; + if( length <= 0 || newLength <= 0 ) + { + std::cout << "EDLineDetector::LeastSquaresLineFit_ Error:" + " the expected line index is wrong...offsetE = " << offsetE << ", offsetS=" << offsetS << ", newOffsetS=" << newOffsetS << std::endl; + return -1; + } + if( lineEquation.size() != 2 ) + { + std::cout << "SHOULD NOT BE != 2" << std::endl; + } + cv::Mat_ matT( 2, newLength ); + cv::Mat_ vec( newLength, 1 ); + float * pMatT; + float * pATA; + double coef; + unsigned char *pdirImg = dirImg_.data; + /*If the first pixel in this chain is horizontal, + *then we try to find a horizontal line, y=ax+b;*/ + if( pdirImg[yCors[offsetS] * imageWidth + xCors[offsetS]] == Horizontal ) + { + /*Build the new system,and solve it using least square regression: mat * [a,b]^T = vec + * [x0',1] [y0'] + * [x1',1] [a] [y1'] + * . [b] = . + * [xn',1] [yn']*/ + pMatT = matT.ptr(); //matT = [x0', x1', ... xn'; 1,1,...,1] + for ( int i = 0; i < newLength; i++ ) + { + * ( pMatT + newLength ) = 1; + * ( pMatT++ ) = (float) xCors[newOffsetS]; + vec[0][i] = (float) yCors[newOffsetS++]; + } + /* [a,b]^T = Inv(ATA + mat^T * mat) * (ATV + mat^T * vec) */ + tempMatLineFit = matT * matT.t(); + tempVecLineFit = matT * vec; + ATA = ATA + tempMatLineFit; + ATV = ATV + tempVecLineFit; + pATA = ATA.ptr(); + coef = 1.0 / ( double( pATA[0] ) * double( pATA[3] ) - double( pATA[1] ) * double( pATA[2] ) ); + lineEquation[0] = coef * ( double( pATA[3] ) * double( ATV[0][0] ) - double( pATA[1] ) * double( ATV[0][1] ) ); + lineEquation[1] = coef * ( double( pATA[0] ) * double( ATV[0][1] ) - double( pATA[2] ) * double( ATV[0][0] ) ); + + return 0; + } + /*If the first pixel in this chain is vertical, + *then we try to find a vertical line, x=ay+b;*/ + if( pdirImg[yCors[offsetS] * imageWidth + xCors[offsetS]] == Vertical ) + { + /*Build the system,and solve it using least square regression: mat * [a,b]^T = vec + * [y0',1] [x0'] + * [y1',1] [a] [x1'] + * . [b] = . + * [yn',1] [xn']*/ + pMatT = matT.ptr(); //matT = [y0', y1', ... yn'; 1,1,...,1] + for ( int i = 0; i < newLength; i++ ) + { + * ( pMatT + newLength ) = 1; + * ( pMatT++ ) = (float) yCors[newOffsetS]; + vec[0][i] = (float) xCors[newOffsetS++]; + } + /* [a,b]^T = Inv(ATA + mat^T * mat) * (ATV + mat^T * vec) */ +// matT.MultiplyWithTransposeOf(matT, tempMatLineFit); + tempMatLineFit = matT * matT.t(); + tempVecLineFit = matT * vec; + ATA = ATA + tempMatLineFit; + ATV = ATV + tempVecLineFit; +// pATA = ATA.GetData(); + pATA = ATA.ptr(); + coef = 1.0 / ( double( pATA[0] ) * double( pATA[3] ) - double( pATA[1] ) * double( pATA[2] ) ); + lineEquation[0] = coef * ( double( pATA[3] ) * double( ATV[0][0] ) - double( pATA[1] ) * double( ATV[0][1] ) ); + lineEquation[1] = coef * ( double( pATA[0] ) * double( ATV[0][1] ) - double( pATA[2] ) * double( ATV[0][0] ) ); + + } + return 0; +} + +bool BinaryDescriptor::EDLineDetector::LineValidation_( unsigned int *xCors, unsigned int *yCors, unsigned int offsetS, unsigned int offsetE, + std::vector &lineEquation, float &direction ) +{ + if( bValidate_ ) + { + int n = offsetE - offsetS; + /*first compute the direction of line, make sure that the dark side always be the + *left side of a line.*/ + int meanGradientX = 0, meanGradientY = 0; + short *pdxImg = dxImg_.ptr(); + short *pdyImg = dyImg_.ptr(); + double dx, dy; + std::vector pointDirection; + int index; + for ( int i = 0; i < n; i++ ) + { + index = yCors[offsetS] * imageWidth + xCors[offsetS]; + offsetS++; + meanGradientX += pdxImg[index]; + meanGradientY += pdyImg[index]; + dx = (double) pdxImg[index]; + dy = (double) pdyImg[index]; + pointDirection.push_back( atan2( -dx, dy ) ); + } + dx = fabs( lineEquation[1] ); + dy = fabs( lineEquation[0] ); + if( meanGradientX == 0 && meanGradientY == 0 ) + { //not possible, if happens, it must be a wrong line, + return false; + } + if( meanGradientX > 0 && meanGradientY >= 0 ) + { //first quadrant, and positive direction of X axis. + direction = (float) atan2( -dy, dx ); //line direction is in fourth quadrant + } + if( meanGradientX <= 0 && meanGradientY > 0 ) + { //second quadrant, and positive direction of Y axis. + direction = (float) atan2( dy, dx ); //line direction is in first quadrant + } + if( meanGradientX < 0 && meanGradientY <= 0 ) + { //third quadrant, and negative direction of X axis. + direction = (float) atan2( dy, -dx ); //line direction is in second quadrant + } + if( meanGradientX >= 0 && meanGradientY < 0 ) + { //fourth quadrant, and negative direction of Y axis. + direction = (float) atan2( -dy, -dx ); //line direction is in third quadrant + } + /*then check whether the line is on the border of the image. We don't keep the border line.*/ + if( fabs( direction ) < 0.15 || M_PI - fabs( direction ) < 0.15 ) + { //Horizontal line + if( fabs( lineEquation[2] ) < 10 || fabs( imageHeight - fabs( lineEquation[2] ) ) < 10 ) + { //upper border or lower border + return false; + } + } + if( fabs( fabs( direction ) - M_PI * 0.5 ) < 0.15 ) + { //Vertical line + if( fabs( lineEquation[2] ) < 10 || fabs( imageWidth - fabs( lineEquation[2] ) ) < 10 ) + { //left border or right border + return false; + } + } + //count the aligned points on the line which have the same direction as the line. + double disDirection; + int k = 0; + for ( int i = 0; i < n; i++ ) + { + disDirection = fabs( direction - pointDirection[i] ); + if( fabs( 2 * M_PI - disDirection ) < 0.392699 || disDirection < 0.392699 ) + { //same direction, pi/8 = 0.392699081698724 + k++; + } + } + //now compute NFA(Number of False Alarms) + double ret = nfa( n, k, 0.125, logNT_ ); + + return ( ret > 0 ); //0 corresponds to 1 mean false alarm + } + else + { + return true; + } +} + +int BinaryDescriptor::EDLineDetector::EDline( cv::Mat &image ) +{ + if( ( EDline( image, lines_/*, smoothed*/) ) != 1 ) + { + return -1; + } + lineSalience_.clear(); + lineSalience_.resize( lines_.numOfLines ); + unsigned char *pgImg = gImgWO_.ptr(); + unsigned int indexInLineArray; + unsigned int *pXCor = &lines_.xCors.front(); + unsigned int *pYCor = &lines_.yCors.front(); + unsigned int *pSID = &lines_.sId.front(); + for ( unsigned int i = 0; i < lineSalience_.size(); i++ ) + { + int salience = 0; + for ( indexInLineArray = pSID[i]; indexInLineArray < pSID[i + 1]; indexInLineArray++ ) + { + salience += pgImg[pYCor[indexInLineArray] * imageWidth + pXCor[indexInLineArray]]; + } + lineSalience_[i] = (float) salience; + } + return 1; +} + +} +} diff --git a/3rdparty/line_descriptor/src/binary_descriptor_matcher.cpp b/3rdparty/line_descriptor/src/binary_descriptor_matcher.cpp new file mode 100644 index 0000000..b4598da --- /dev/null +++ b/3rdparty/line_descriptor/src/binary_descriptor_matcher.cpp @@ -0,0 +1,971 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// + // + // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + // + // By downloading, copying, installing or using the software you agree to this license. + // If you do not agree to this license, do not download, install, + // copy or use the software. + // + // + // License Agreement + // For Open Source Computer Vision Library + // + // Copyright (C) 2014, Biagio Montesano, all rights reserved. + // Third party copyrights are property of their respective owners. + // + // Redistribution and use in source and binary forms, with or without modification, + // are permitted provided that the following conditions are met: + // + // * Redistribution's of source code must retain the above copyright notice, + // this list of conditions and the following disclaimer. + // + // * Redistribution's in binary form must reproduce the above copyright notice, + // this list of conditions and the following disclaimer in the documentation + // and/or other materials provided with the distribution. + // + // * The name of the copyright holders may not be used to endorse or promote products + // derived from this software without specific prior written permission. + // + // This software is provided by the copyright holders and contributors "as is" and + // any express or implied warranties, including, but not limited to, the implied + // warranties of merchantability and fitness for a particular purpose are disclaimed. + // In no event shall the Intel Corporation or contributors be liable for any direct, + // indirect, incidental, special, exemplary, or consequential damages + // (including, but not limited to, procurement of substitute goods or services; + // loss of use, data, or profits; or business interruption) however caused + // and on any theory of liability, whether in contract, strict liability, + // or tort (including negligence or otherwise) arising in any way out of + // the use of this software, even if advised of the possibility of such damage. + // + //M*/ + +#include "precomp_custom.hpp" + +#define MAX_B 37 +double ARRAY_RESIZE_FACTOR = 1.1; // minimum is 1.0 +double ARRAY_RESIZE_ADD_FACTOR = 4; // minimum is 1 + +//using namespace cv; +namespace cv +{ +namespace line_descriptor +{ + +/* constructor */ +BinaryDescriptorMatcher::BinaryDescriptorMatcher() +{ + dataset = new Mihasher( 256, 32 ); + nextAddedIndex = 0; + numImages = 0; + descrInDS = 0; +} + +/* constructor with smart pointer */ +Ptr BinaryDescriptorMatcher::createBinaryDescriptorMatcher() +{ + return Ptr < BinaryDescriptorMatcher > ( new BinaryDescriptorMatcher() ); +} + +/* store new descriptors to be inserted in dataset */ +void BinaryDescriptorMatcher::add( const std::vector& descriptors ) +{ + for ( size_t i = 0; i < descriptors.size(); i++ ) + { + descriptorsMat.push_back( descriptors[i] ); + + indexesMap.insert( std::pair( nextAddedIndex, numImages ) ); + nextAddedIndex += descriptors[i].rows; + numImages++; + } +} + +/* store new descriptors into dataset */ +void BinaryDescriptorMatcher::train() +{ + if( !dataset ) + dataset = new Mihasher( 256, 32 ); + + if( descriptorsMat.rows > 0 ) + dataset->populate( descriptorsMat, descriptorsMat.rows, descriptorsMat.cols ); + + descrInDS = descriptorsMat.rows; + descriptorsMat.release(); +} + +/* clear dataset and internal data */ +void BinaryDescriptorMatcher::clear() +{ + descriptorsMat.release(); + indexesMap.clear(); + dataset = 0; + nextAddedIndex = 0; + numImages = 0; + descrInDS = 0; +} + +/* retrieve Hamming distances */ +void BinaryDescriptorMatcher::checkKDistances( UINT32 * numres, int k, std::vector & k_distances, int row, int string_length ) const +{ + int k_to_found = k; + + UINT32 * numres_tmp = numres + ( ( string_length + 1 ) * row ); + for ( int j = 0; j < ( string_length + 1 ) && k_to_found > 0; j++ ) + { + if( ( * ( numres_tmp + j ) ) > 0 ) + { + for ( int i = 0; i < (int) ( * ( numres_tmp + j ) ) && k_to_found > 0; i++ ) + { + k_distances.push_back( j ); + k_to_found--; + } + } + } +} + +/* for every input descriptor, + find the best matching one (from one image to a set) */ +void BinaryDescriptorMatcher::match( const Mat& queryDescriptors, std::vector& matches, const std::vector& masks ) +{ + /* check data validity */ + if( queryDescriptors.rows == 0 ) + { + std::cout << "Error: query descriptors'matrix is empty" << std::endl; + return; + } + + if( masks.size() != 0 && (int) masks.size() != numImages ) + { + std::cout << "Error: the number of images in dataset is " << numImages << " but match function received " << masks.size() + << " masks. Program will be terminated" << std::endl; + + return; + } + + /* add new descriptors to dataset, if needed */ + train(); + + /* set number of requested matches to return for each query */ + dataset->setK( 1 ); + + /* prepare structures for query */ + UINT32 *results = new UINT32[queryDescriptors.rows]; + UINT32 * numres = new UINT32[ ( 256 + 1 ) * ( queryDescriptors.rows )]; + + /* execute query */ + dataset->batchquery( results, numres, queryDescriptors, queryDescriptors.rows, queryDescriptors.cols ); + /* compose matches */ + for ( int counter = 0; counter < queryDescriptors.rows; counter++ ) + { + /* create a map iterator */ + std::map::iterator itup; + + /* get info about original image of each returned descriptor */ + itup = indexesMap.upper_bound( results[counter] - 1 ); + itup--; + /* data validity check */ + if( !masks.empty() && ( masks[itup->second].rows != queryDescriptors.rows || masks[itup->second].cols != 1 ) ) + { + std::stringstream ss; + ss << "Error: mask " << itup->second << " in knnMatch function " << "should have " << queryDescriptors.rows << " and " + << "1 column. Program will be terminated"; + //throw std::runtime_error( ss.str() ); + } + /* create a DMatch object if required by mask or if there is + no mask at all */ + else if( masks.empty() || masks[itup->second].at < uchar > ( counter ) != 0 ) + { + std::vector k_distances; + checkKDistances( numres, 1, k_distances, counter, 256 ); + + DMatch dm; + dm.queryIdx = counter; + dm.trainIdx = results[counter] - 1; + dm.imgIdx = itup->second; + dm.distance = (float) k_distances[0]; + + matches.push_back( dm ); + } + + } + + /* delete data */ + delete[] results; + delete[] numres; +} + +/* for every input descriptor, find the best matching one (for a pair of images) */ +void BinaryDescriptorMatcher::match( const Mat& queryDescriptors, const Mat& trainDescriptors, std::vector& matches, const Mat& mask ) const +{ + + /* check data validity */ + if( queryDescriptors.rows == 0 || trainDescriptors.rows == 0 ) + { + std::cout << "Error: descriptors matrices cannot be void" << std::endl; + return; + } + + if( !mask.empty() && ( mask.rows != queryDescriptors.rows && mask.cols != 1 ) ) + { + std::cout << "Error: input mask should have " << queryDescriptors.rows << " rows and 1 column. " << "Program will be terminated" << std::endl; + + return; + } + + /* create a new mihasher object */ + Mihasher *mh = new Mihasher( 256, 32 ); + + /* populate mihasher */ + cv::Mat copy = trainDescriptors.clone(); + mh->populate( copy, copy.rows, copy.cols ); + mh->setK( 1 ); + + /* prepare structures for query */ + UINT32 *results = new UINT32[queryDescriptors.rows]; + UINT32 * numres = new UINT32[ ( 256 + 1 ) * ( queryDescriptors.rows )]; + + /* execute query */ + mh->batchquery( results, numres, queryDescriptors, queryDescriptors.rows, queryDescriptors.cols ); + + /* compose matches */ + for ( int counter = 0; counter < queryDescriptors.rows; counter++ ) + { + /* create a DMatch object if required by mask or if there is + no mask at all */ + if( mask.empty() || ( !mask.empty() && mask.at < uchar > ( counter ) != 0 ) ) + { + std::vector k_distances; + checkKDistances( numres, 1, k_distances, counter, 256 ); + + DMatch dm; + dm.queryIdx = counter; + dm.trainIdx = results[counter] - 1; + dm.imgIdx = 0; + dm.distance = (float) k_distances[0]; + + matches.push_back( dm ); + } + } + + /* delete data */ + delete mh; + delete[] results; + delete[] numres; + +} + +/* for every input descriptor, + find the best k matching descriptors (for a pair of images) */ +void BinaryDescriptorMatcher::knnMatch( const Mat& queryDescriptors, const Mat& trainDescriptors, std::vector >& matches, int k, + const Mat& mask, bool compactResult ) const + +{ + /* check data validity */ + if( queryDescriptors.rows == 0 || trainDescriptors.rows == 0 ) + { + std::cout << "Error: descriptors matrices cannot be void" << std::endl; + return; + } + + if( !mask.empty() && ( mask.rows != queryDescriptors.rows || mask.cols != 1 ) ) + { + std::cout << "Error: input mask should have " << queryDescriptors.rows << " rows and 1 column. " << "Program will be terminated" << std::endl; + + return; + } + + /* create a new mihasher object */ + Mihasher *mh = new Mihasher( 256, 32 ); + + /* populate mihasher */ + cv::Mat copy = trainDescriptors.clone(); + mh->populate( copy, copy.rows, copy.cols ); + + /* set K */ + mh->setK( k ); + + /* prepare structures for query */ + UINT32 *results = new UINT32[k * queryDescriptors.rows]; + UINT32 * numres = new UINT32[ ( 256 + 1 ) * ( queryDescriptors.rows )]; + + /* execute query */ + mh->batchquery( results, numres, queryDescriptors, queryDescriptors.rows, queryDescriptors.cols ); + + /* compose matches */ + int index = 0; + for ( int counter = 0; counter < queryDescriptors.rows; counter++ ) + { + /* initialize a vector of matches */ + std::vector < DMatch > tempVec; + + /* chech whether query should be ignored */ + if( !mask.empty() && mask.at < uchar > ( counter ) == 0 ) + { + /* if compact result is not requested, add an empty vector */ + if( !compactResult ) + matches.push_back( tempVec ); + } + + /* query matches must be considered */ + else + { + std::vector k_distances; + checkKDistances( numres, k, k_distances, counter, 256 ); + for ( int j = index; j < index + k; j++ ) + { + DMatch dm; + dm.queryIdx = counter; + dm.trainIdx = results[j] - 1; + dm.imgIdx = 0; + dm.distance = (float) k_distances[j - index]; + + tempVec.push_back( dm ); + } + + matches.push_back( tempVec ); + } + + /* increment pointer */ + index += k; + } + + /* delete data */ + delete mh; + delete[] results; + delete[] numres; +} + +/* for every input descriptor, + find the best k matching descriptors (from one image to a set) */ +void BinaryDescriptorMatcher::knnMatch( const Mat& queryDescriptors, std::vector >& matches, int k, const std::vector& masks, + bool compactResult ) +{ + + /* check data validity */ + if( queryDescriptors.rows == 0 ) + { + std::cout << "Error: descriptors matrix cannot be void" << std::endl; + return; + } + + if( masks.size() != 0 && (int) masks.size() != numImages ) + { + std::cout << "Error: the number of images in dataset is " << numImages << " but knnMatch function received " << masks.size() + << " masks. Program will be terminated" << std::endl; + + return; + } + + /* add new descriptors to dataset, if needed */ + train(); + + /* set number of requested matches to return for each query */ + dataset->setK( k ); + + /* prepare structures for query */ + UINT32 *results = new UINT32[k * queryDescriptors.rows]; + UINT32 * numres = new UINT32[ ( 256 + 1 ) * ( queryDescriptors.rows )]; + + /* execute query */ + dataset->batchquery( results, numres, queryDescriptors, queryDescriptors.rows, queryDescriptors.cols ); + + /* compose matches */ + int index = 0; + for ( int counter = 0; counter < queryDescriptors.rows; counter++ ) + { + /* create a void vector of matches */ + std::vector < DMatch > tempVector; + + /* loop over k results returned for every query */ + for ( int j = index; j < index + k; j++ ) + { + /* retrieve which image returned index refers to */ + int currentIndex = results[j] - 1; + std::map::iterator itup; + itup = indexesMap.upper_bound( currentIndex ); + itup--; + + /* data validity check */ + if( !masks.empty() && ( masks[itup->second].rows != queryDescriptors.rows || masks[itup->second].cols != 1 ) ) + { + std::cout << "Error: mask " << itup->second << " in knnMatch function " << "should have " << queryDescriptors.rows << " and " + << "1 column. Program will be terminated" << std::endl; + + return; + } + + /* decide if, according to relative mask, returned match should be + considered */ + else if( masks.size() == 0 || masks[itup->second].at < uchar > ( counter ) != 0 ) + { + std::vector k_distances; + checkKDistances( numres, k, k_distances, counter, 256 ); + + DMatch dm; + dm.queryIdx = counter; + dm.trainIdx = results[j] - 1; + dm.imgIdx = itup->second; + dm.distance = (float) k_distances[j - index]; + + tempVector.push_back( dm ); + } + } + + /* decide whether temporary vector should be saved */ + if( ( tempVector.size() == 0 && !compactResult ) || tempVector.size() > 0 ) + matches.push_back( tempVector ); + + /* increment pointer */ + index += k; + } + + /* delete data */ + delete[] results; + delete[] numres; +} + +/* for every input desciptor, find all the ones falling in a + certaing matching radius (for a pair of images) */ +void BinaryDescriptorMatcher::radiusMatch( const Mat& queryDescriptors, const Mat& trainDescriptors, std::vector >& matches, + float maxDistance, const Mat& mask, bool compactResult ) const + +{ + + /* check data validity */ + if( queryDescriptors.rows == 0 || trainDescriptors.rows == 0 ) + { + std::cout << "Error: descriptors matrices cannot be void" << std::endl; + return; + } + + if( !mask.empty() && ( mask.rows != queryDescriptors.rows && mask.cols != 1 ) ) + { + std::cout << "Error: input mask should have " << queryDescriptors.rows << " rows and 1 column. " << "Program will be terminated" << std::endl; + + return; + } + + /* create a new Mihasher */ + Mihasher* mh = new Mihasher( 256, 32 ); + + /* populate Mihasher */ + //Mat copy = queryDescriptors.clone(); + Mat copy = trainDescriptors.clone(); + mh->populate( copy, copy.rows, copy.cols ); + + /* set K */ + mh->setK( trainDescriptors.rows ); + + /* prepare structures for query */ + UINT32 *results = new UINT32[trainDescriptors.rows * queryDescriptors.rows]; + UINT32 * numres = new UINT32[ ( 256 + 1 ) * ( queryDescriptors.rows )]; + + /* execute query */ + mh->batchquery( results, numres, queryDescriptors, queryDescriptors.rows, queryDescriptors.cols ); + + /* compose matches */ + int index = 0; + for ( int i = 0; i < queryDescriptors.rows; i++ ) + { + std::vector k_distances; + checkKDistances( numres, trainDescriptors.rows, k_distances, i, 256 ); + + std::vector < DMatch > tempVector; + for ( int j = index; j < index + trainDescriptors.rows; j++ ) + { +// if( numres[j] <= maxDistance ) + if( k_distances[j - index] <= maxDistance ) + { + if( mask.empty() || mask.at < uchar > ( i ) != 0 ) + { + DMatch dm; + dm.queryIdx = i; + dm.trainIdx = (int) ( results[j] - 1 ); + dm.imgIdx = 0; + dm.distance = (float) k_distances[j - index]; + + tempVector.push_back( dm ); + } + } + } + + /* decide whether temporary vector should be saved */ + if( ( tempVector.size() == 0 && !compactResult ) || tempVector.size() > 0 ) + matches.push_back( tempVector ); + + /* increment pointer */ + index += trainDescriptors.rows; + + } + + /* delete data */ + delete mh; + delete[] results; + delete[] numres; +} + +/* for every input descriptor, find all the ones falling in a + certain matching radius (from one image to a set) */ +void BinaryDescriptorMatcher::radiusMatch( const Mat& queryDescriptors, std::vector >& matches, float maxDistance, + const std::vector& masks, bool compactResult ) +{ + + /* check data validity */ + if( queryDescriptors.rows == 0 ) + { + std::cout << "Error: descriptors matrices cannot be void" << std::endl; + return; + } + + if( masks.size() != 0 && (int) masks.size() != numImages ) + { + std::cout << "Error: the number of images in dataset is " << numImages << " but radiusMatch function received " << masks.size() + << " masks. Program will be terminated" << std::endl; + + return; + } + + /* populate dataset */ + train(); + + /* set K */ + dataset->setK( descrInDS ); + + /* prepare structures for query */ + UINT32 *results = new UINT32[descrInDS * queryDescriptors.rows]; + UINT32 * numres = new UINT32[ ( 256 + 1 ) * ( queryDescriptors.rows )]; + + /* execute query */ + dataset->batchquery( results, numres, queryDescriptors, queryDescriptors.rows, queryDescriptors.cols ); + + /* compose matches */ + int index = 0; + for ( int counter = 0; counter < queryDescriptors.rows; counter++ ) + { + std::vector < DMatch > tempVector; + for ( int j = index; j < index + descrInDS; j++ ) + { + std::vector k_distances; + checkKDistances( numres, descrInDS, k_distances, counter, 256 ); + + if( k_distances[j - index] <= maxDistance ) + { + int currentIndex = results[j] - 1; + std::map::iterator itup; + itup = indexesMap.upper_bound( currentIndex ); + itup--; + + /* data validity check */ + if( !masks.empty() && ( masks[itup->second].rows != queryDescriptors.rows || masks[itup->second].cols != 1 ) ) + { + std::cout << "Error: mask " << itup->second << " in radiusMatch function " << "should have " << queryDescriptors.rows << " and " + << "1 column. Program will be terminated" << std::endl; + + return; + } + + /* add match if necessary */ + else if( masks.empty() || masks[itup->second].at < uchar > ( counter ) != 0 ) + { + + DMatch dm; + dm.queryIdx = counter; + dm.trainIdx = results[j] - 1; + dm.imgIdx = itup->second; + dm.distance = (float) k_distances[j - index]; + + tempVector.push_back( dm ); + } + } + } + + /* decide whether temporary vector should be saved */ + if( ( tempVector.size() == 0 && !compactResult ) || tempVector.size() > 0 ) + matches.push_back( tempVector ); + + /* increment pointer */ + index += descrInDS; + } + + /* delete data */ + delete[] results; + delete[] numres; + +} + +/* execute a batch query */ +void BinaryDescriptorMatcher::Mihasher::batchquery( UINT32 * results, UINT32 *numres, const cv::Mat & queries, UINT32 numq, int dim1queries ) +{ + /* create and initialize a bitarray */ + counter = new bitarray; + counter->init( N ); + + UINT32 *res = new UINT32[K * ( D + 1 )]; + UINT64 *chunks = new UINT64[m]; + UINT32 * presults = results; + UINT32 *pnumres = numres; + + /* make a copy of input queries */ + cv::Mat queries_clone = queries.clone(); + + /* set a pointer to first query (row) */ + UINT8 *pq = queries_clone.ptr(); + + /* loop over number of descriptors */ + for ( size_t i = 0; i < numq; i++ ) + { + /* for every descriptor, query database */ + query( presults, pnumres, pq, chunks, res ); + + /* move pointer to write next K indeces */ + presults += K; + pnumres += B + 1; + + /* move forward pointer to current row in descriptors matrix */ + pq += dim1queries; + + } + + delete[] res; + delete[] chunks; + + delete counter; +} + +/* execute a single query */ +void BinaryDescriptorMatcher::Mihasher::query( UINT32* results, UINT32* numres, UINT8 * Query, UINT64 *chunks, UINT32 *res ) +{ + /* if K == 0 that means we want everything to be processed. + So maxres = N in that case. Otherwise K limits the results processed */ + UINT32 maxres = K ? K : (UINT32) N; + + /* number of results so far obtained (up to a distance of s per chunk) */ + UINT32 n = 0; + + /* number of candidates tested with full codes (not counting duplicates) */ + UINT32 nc = 0; + + /* counting everything retrieved (duplicates are counted multiple times) + number of lookups (and xors) */ + UINT32 nl = 0; + + UINT32 nd = 0; + UINT32 *arr; + int size = 0; + UINT32 index; + int hammd; + + counter->erase(); + memset( numres, 0, ( B + 1 ) * sizeof ( *numres ) ); + + split( chunks, Query, m, mplus, b ); + + /* the growing search radius per substring */ + int s; + + /* current b: for the first mplus substrings it is b, for the rest it is (b-1) */ + int curb = b; + + for ( s = 0; s <= d && n < maxres; s++ ) + { + for ( int k = 0; k < m; k++ ) + { + if( k < mplus ) + curb = b; + else + curb = b - 1; + UINT64 chunksk = chunks[k]; + /* number of bit-strings with s number of 1s */ + nl += xornum[s + 1] - xornum[s]; + + /* the bit-string with s number of 1s */ + UINT64 bitstr = 0; + for ( int i = 0; i < s; i++ ) + /* power[i] stores the location of the i'th 1 */ + power[i] = i; + /* used for stopping criterion (location of (s+1)th 1) */ + power[s] = curb + 1; + + /* bit determines the 1 that should be moving to the left */ + int bit = s - 1; + + /* start from the left-most 1, and move it to the left until + it touches another one */ + + /* the loop for changing bitstr */ + bool infiniteWhile = true; + while ( infiniteWhile ) + { + if( bit != -1 ) + { + bitstr ^= ( power[bit] == bit ) ? (UINT64) 1 << power[bit] : (UINT64) 3 << ( power[bit] - 1 ); + power[bit]++; + bit--; + } + + else + { /* bit == -1 */ + /* the binary code bitstr is available for processing */ + arr = H[k].query( chunksk ^ bitstr, &size ); // lookup + if( size ) + { /* the corresponding bucket is not empty */ + nd += size; + for ( int c = 0; c < size; c++ ) + { + index = arr[c]; + if( !counter->get( index ) ) + { /* if it is not a duplicate */ + counter->set( index ); + hammd = cv::line_descriptor::match( codes.ptr() + (UINT64) index * ( B_over_8 ), Query, B_over_8 ); + + nc++; + if( hammd <= D && numres[hammd] < maxres ) + res[hammd * K + numres[hammd]] = index + 1; + + numres[hammd]++; + } + } + } + + /* end of processing */ + while ( ++bit < s && power[bit] == power[bit + 1] - 1 ) + { + bitstr ^= (UINT64) 1 << ( power[bit] - 1 ); + power[bit] = bit; + } + if( bit == s ) + break; + } + } + + n = n + numres[s * m + k]; + if( n >= maxres ) + break; + } + } + + n = 0; + for ( s = 0; s <= D && (int) n < K; s++ ) + { + for ( int c = 0; c < (int) numres[s] && (int) n < K; c++ ) + results[n++] = res[s * K + c]; + } + +} + +/* constructor 2 */ +BinaryDescriptorMatcher::Mihasher::Mihasher( int B_val, int _m ) +{ + B = B_val; + B_over_8 = B / 8; + m = _m; + b = (int) ceil( (double) B / m ); + + /* assuming that B/2 is large enough radius to include + all of the k nearest neighbors */ + D = (int) ceil( B / 2.0 ); + d = (int) ceil( (double) D / m ); + + /* mplus is the number of chunks with b bits + (m-mplus) is the number of chunks with (b-1) bits */ + mplus = B - m * ( b - 1 ); + + xornum = new UINT32[d + 2]; + xornum[0] = 0; + for ( int i = 0; i <= d; i++ ) + xornum[i + 1] = xornum[i] + (UINT32) choose( b, i ); + + H = new SparseHashtable[m]; + + /* H[i].init might fail */ + for ( int i = 0; i < mplus; i++ ) + H[i].init( b ); + for ( int i = mplus; i < m; i++ ) + H[i].init( b - 1 ); +} + +/* K setter */ +void BinaryDescriptorMatcher::Mihasher::setK( int K_val ) +{ + K = K_val; +} + +/* desctructor */ +BinaryDescriptorMatcher::Mihasher::~Mihasher() +{ + delete[] xornum; + delete[] H; +} + +/* populate tables */ +void BinaryDescriptorMatcher::Mihasher::populate( cv::Mat & _codes, UINT32 N_val, int dim1codes ) +{ + N = N_val; + codes = _codes; + UINT64 * chunks = new UINT64[m]; + + UINT8 * pcodes = codes.ptr(); + for ( UINT64 i = 0; i < N; i++, pcodes += dim1codes ) + { + split( chunks, pcodes, m, mplus, b ); + + for ( int k = 0; k < m; k++ ) + H[k].insert( chunks[k], (UINT32) i ); + + if( i % (int) ceil( N / 1000.0 ) == 0 ) + fflush (stdout); + } + + delete[] chunks; +} + +/* constructor */ +BinaryDescriptorMatcher::SparseHashtable::SparseHashtable() +{ + table = NULL; + size = 0; + b = 0; +} + +/* initializer */ +int BinaryDescriptorMatcher::SparseHashtable::init( int _b ) +{ + b = _b; + + if( b < 5 || b > MAX_B || b > (int) ( sizeof(UINT64) * 8 ) ) + return 1; + + size = UINT64_1 << ( b - 5 ); // size = 2 ^ b + table = (BucketGroup*) calloc( (size_t)size, sizeof(BucketGroup) ); + + return 0; + +} + +/* destructor */ +BinaryDescriptorMatcher::SparseHashtable::~SparseHashtable() +{ + free (table); +} + +/* insert data */ +void BinaryDescriptorMatcher::SparseHashtable::insert( UINT64 index, UINT32 data ) +{ + table[index >> 5].insert( (int) ( index % 32 ), data ); +} + +/* query data */ +UINT32* BinaryDescriptorMatcher::SparseHashtable::query( UINT64 index, int *Size ) +{ + return table[index >> 5].query( (int) ( index % 32 ), Size ); +} + +/* constructor */ +BinaryDescriptorMatcher::BucketGroup::BucketGroup() +{ + empty = 0; + group = std::vector < uint32_t > ( 2, 0 ); +} + +/* destructor */ +BinaryDescriptorMatcher::BucketGroup::~BucketGroup() +{ +} + +void BinaryDescriptorMatcher::BucketGroup::insert_value( std::vector& vec, int index, UINT32 data ) +{ + if( vec.size() > 1 ) + { + if( vec[0] == vec[1] ) + { + vec[1] = (UINT32) ceil( vec[0] * 1.1 ); + for ( int i = 0; i < (int) ( 2 + vec[1] - vec.size() ); i++ ) + vec.push_back( 0 ); + + } + + vec.insert( vec.begin() + 2 + index, data ); + vec[2 + index] = data; + vec[0]++; + } + + else + { + vec = std::vector < uint32_t > ( 3, 0 ); + vec[0] = 1; + vec[1] = 1; + vec[2] = data; + } +} + +void BinaryDescriptorMatcher::BucketGroup::push_value( std::vector& vec, UINT32 Data ) +{ + if( vec.size() > 0 ) + { + if( vec[0] == vec[1] ) + { + vec[1] = (UINT32) std::max( ceil( vec[1] * ARRAY_RESIZE_FACTOR ), vec[1] + ARRAY_RESIZE_ADD_FACTOR ); + for ( int i = 0; i < (int) ( 2 + vec[1] - vec.size() ); i++ ) + vec.push_back( 0 ); + } + + vec[2 + vec[0]] = Data; + vec[0]++; + + } + + else + { + vec = std::vector < uint32_t > ( 2 + (uint32_t) ARRAY_RESIZE_ADD_FACTOR, 0 ); + vec[0] = 1; + vec[1] = 1; + vec[2] = Data; + } +} + +/* insert data into the bucket */ +void BinaryDescriptorMatcher::BucketGroup::insert( int subindex, UINT32 data ) +{ + if( group.size() == 0 ) + { + push_value( group, 0 ); + } + + UINT32 lowerbits = ( (UINT32) 1 << subindex ) - 1; + int end = popcnt( empty & lowerbits ); + + if( ! ( empty & ( (UINT32) 1 << subindex ) ) ) + { + insert_value( group, end, group[end + 2] ); + empty |= (UINT32) 1 << subindex; + } + + int totones = popcnt( empty ); + insert_value( group, totones + 1 + group[2 + end + 1], data ); + + for ( int i = end + 1; i < totones + 1; i++ ) + group[2 + i]++; +} + +/* perform a query to the bucket */ +UINT32* BinaryDescriptorMatcher::BucketGroup::query( int subindex, int *size ) +{ + if( empty & ( (UINT32) 1 << subindex ) ) + { + UINT32 lowerbits = ( (UINT32) 1 << subindex ) - 1; + int end = popcnt( empty & lowerbits ); + int totones = popcnt( empty ); + + *size = group[2 + end + 1] - group[2 + end]; + return & ( * ( group.begin() + 2 + totones + 1 + (int) group[2 + end] ) ); + } + + else + { + *size = 0; + return NULL; + } +} + +} +} + diff --git a/3rdparty/line_descriptor/src/bitarray_custom.hpp b/3rdparty/line_descriptor/src/bitarray_custom.hpp new file mode 100644 index 0000000..dedfb8e --- /dev/null +++ b/3rdparty/line_descriptor/src/bitarray_custom.hpp @@ -0,0 +1,115 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// + // + // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + // + // By downloading, copying, installing or using the software you agree to this license. + // If you do not agree to this license, do not download, install, + // copy or use the software. + // + // + // License Agreement + // For Open Source Computer Vision Library + // + // Copyright (C) 2014, Mohammad Norouzi, Ali Punjani, David J. Fleet, + // all rights reserved. + // Third party copyrights are property of their respective owners. + // + // Redistribution and use in source and binary forms, with or without modification, + // are permitted provided that the following conditions are met: + // + // * Redistribution's of source code must retain the above copyright notice, + // this list of conditions and the following disclaimer. + // + // * Redistribution's in binary form must reproduce the above copyright notice, + // this list of conditions and the following disclaimer in the documentation + // and/or other materials provided with the distribution. + // + // * The name of the copyright holders may not be used to endorse or promote products + // derived from this software without specific prior written permission. + // + // This software is provided by the copyright holders and contributors "as is" and + // any express or implied warranties, including, but not limited to, the implied + // warranties of merchantability and fitness for a particular purpose are disclaimed. + // In no event shall the Intel Corporation or contributors be liable for any direct, + // indirect, incidental, special, exemplary, or consequential damages + // (including, but not limited to, procurement of substitute goods or services; + // loss of use, data, or profits; or business interruption) however caused + // and on any theory of liability, whether in contract, strict liability, + // or tort (including negligence or otherwise) arising in any way out of + // the use of this software, even if advised of the possibility of such damage. + // + //M*/ + +#ifndef __OPENCV_BITARRAY_HPP +#define __OPENCV_BITARRAY_HPP + +#ifdef _MSC_VER +#pragma warning( disable : 4267 ) +#endif + +#include "types_custom.hpp" +#include +#include +#include + +/* class defining a sequence of bits */ +class bitarray +{ + + public: + /* pointer to bits sequence and sequence's length */ + UINT32 *arr; + UINT32 length; + + /* constructor setting default values */ + bitarray() + { + arr = NULL; + length = 0; + } + + /* constructor setting sequence's length */ + bitarray( UINT64 _bits ) + { + init( _bits ); + } + + /* initializer of private fields */ + void init( UINT64 _bits ) + { + length = (UINT32) ceil( _bits / 32.00 ); + arr = new UINT32[length]; + erase(); + } + + /* destructor */ + ~bitarray() + { + if( arr ) + delete[] arr; + } + + inline void flip( UINT64 index ) + { + arr[index >> 5] ^= ( (UINT32) 0x01 ) << ( index % 32 ); + } + + inline void set( UINT64 index ) + { + arr[index >> 5] |= ( (UINT32) 0x01 ) << ( index % 32 ); + } + + inline UINT8 get( UINT64 index ) + { + return ( arr[index >> 5] & ( ( (UINT32) 0x01 ) << ( index % 32 ) ) ) != 0; + } + + /* reserve menory for an UINT32 */ + inline void erase() + { + memset( arr, 0, sizeof(UINT32) * length ); + } + +}; + +#endif diff --git a/3rdparty/line_descriptor/src/bitops_custom.hpp b/3rdparty/line_descriptor/src/bitops_custom.hpp new file mode 100644 index 0000000..02e3678 --- /dev/null +++ b/3rdparty/line_descriptor/src/bitops_custom.hpp @@ -0,0 +1,167 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// + // + // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + // + // By downloading, copying, installing or using the software you agree to this license. + // If you do not agree to this license, do not download, install, + // copy or use the software. + // + // + // License Agreement + // For Open Source Computer Vision Library + // + // Copyright (C) 2014, Mohammad Norouzi, Ali Punjani, David J. Fleet, + // all rights reserved. + // Third party copyrights are property of their respective owners. + // + // Redistribution and use in source and binary forms, with or without modification, + // are permitted provided that the following conditions are met: + // + // * Redistribution's of source code must retain the above copyright notice, + // this list of conditions and the following disclaimer. + // + // * Redistribution's in binary form must reproduce the above copyright notice, + // this list of conditions and the following disclaimer in the documentation + // and/or other materials provided with the distribution. + // + // * The name of the copyright holders may not be used to endorse or promote products + // derived from this software without specific prior written permission. + // + // This software is provided by the copyright holders and contributors "as is" and + // any express or implied warranties, including, but not limited to, the implied + // warranties of merchantability and fitness for a particular purpose are disclaimed. + // In no event shall the Intel Corporation or contributors be liable for any direct, + // indirect, incidental, special, exemplary, or consequential damages + // (including, but not limited to, procurement of substitute goods or services; + // loss of use, data, or profits; or business interruption) however caused + // and on any theory of liability, whether in contract, strict liability, + // or tort (including negligence or otherwise) arising in any way out of + // the use of this software, even if advised of the possibility of such damage. + // + //M*/ + +#ifndef __OPENCV_BITOPTS_HPP +#define __OPENCV_BITOPTS_HPP + +#include "precomp_custom.hpp" + +#ifdef _MSC_VER +# include +# define popcnt __popcnt +# pragma warning( disable : 4267 ) +#else +# define popcnt __builtin_popcount + +#endif + +/* LUT */ +const int lookup[] = +{ + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 +}; + +namespace cv +{ +namespace line_descriptor +{ +/*matching function */ +inline int match( UINT8*P, UINT8*Q, int codelb ) +{ + int i, output = 0; + for( i = 0; i <= codelb - 16; i += 16 ) + { + output += popcnt( *(UINT32*) (P+i) ^ *(UINT32*) (Q+i) ) + + popcnt( *(UINT32*) (P+i+4) ^ *(UINT32*) (Q+i+4) ) + + popcnt( *(UINT32*) (P+i+8) ^ *(UINT32*) (Q+i+8) ) + + popcnt( *(UINT32*) (P+i+12) ^ *(UINT32*) (Q+i+12) ); + } + for( ; i < codelb; i++ ) + output += lookup[P[i] ^ Q[i]]; + return output; +} + +/* splitting function (b <= 64) */ +inline void split( UINT64 *chunks, UINT8 *code, int m, int mplus, int b ) +{ + UINT64 temp = 0x0; + int nbits = 0; + int nbyte = 0; + UINT64 mask = (b == 64) ? 0xFFFFFFFFFFFFFFFFull : ( ( UINT64_1 << b ) - UINT64_1 ); + + for ( int i = 0; i < m; i++ ) + { + while ( nbits < b ) + { + temp |= ( (UINT64) code[nbyte++] << nbits ); + nbits += 8; + } + + chunks[i] = temp & mask; + temp = b == 64 ? 0x0 : temp >> b; + nbits -= b; + + if( i == mplus - 1 ) + { + b--; /* b <= 63 */ + mask = ( ( UINT64_1 << b ) - UINT64_1 ); + } + } +} + +/* generates the next binary code (in alphabetical order) with the + same number of ones as the input x. Taken from + http://www.geeksforgeeks.org/archives/10375 */ +inline UINT64 next_set_of_n_elements( UINT64 x ) +{ + UINT64 smallest, ripple, new_smallest; + + smallest = x & -(signed) x; + ripple = x + smallest; + new_smallest = x ^ ripple; + new_smallest = new_smallest / smallest; + new_smallest >>= 2; + return ripple | new_smallest; +} + +/* print code */ +inline void print_code( UINT64 tmp, int b ) +{ + for ( long long int j = ( b - 1 ); j >= 0; j-- ) + { + printf( "%llu", (long long int) tmp / (UINT64) ( 1 << j ) ); + tmp = tmp - ( tmp / (UINT64) ( 1 << j ) ) * (UINT64) ( 1 << j ); + } + + printf( "\n" ); +} + +inline UINT64 choose( int n, int r ) +{ + UINT64 nchooser = 1; + for ( int k = 0; k < r; k++ ) + { + nchooser *= n - k; + nchooser /= k + 1; + } + + return nchooser; +} +} +} + +#endif diff --git a/3rdparty/line_descriptor/src/draw_custom.cpp b/3rdparty/line_descriptor/src/draw_custom.cpp new file mode 100644 index 0000000..8e9281d --- /dev/null +++ b/3rdparty/line_descriptor/src/draw_custom.cpp @@ -0,0 +1,190 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// + // + // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + // + // By downloading, copying, installing or using the software you agree to this license. + // If you do not agree to this license, do not download, install, + // copy or use the software. + // + // + // License Agreement + // For Open Source Computer Vision Library + // + // Copyright (C) 2014, Biagio Montesano, all rights reserved. + // Third party copyrights are property of their respective owners. + // + // Redistribution and use in source and binary forms, with or without modification, + // are permitted provided that the following conditions are met: + // + // * Redistribution's of source code must retain the above copyright notice, + // this list of conditions and the following disclaimer. + // + // * Redistribution's in binary form must reproduce the above copyright notice, + // this list of conditions and the following disclaimer in the documentation + // and/or other materials provided with the distribution. + // + // * The name of the copyright holders may not be used to endorse or promote products + // derived from this software without specific prior written permission. + // + // This software is provided by the copyright holders and contributors "as is" and + // any express or implied warranties, including, but not limited to, the implied + // warranties of merchantability and fitness for a particular purpose are disclaimed. + // In no event shall the Intel Corporation or contributors be liable for any direct, + // indirect, incidental, special, exemplary, or consequential damages + // (including, but not limited to, procurement of substitute goods or services; + // loss of use, data, or profits; or business interruption) however caused + // and on any theory of liability, whether in contract, strict liability, + // or tort (including negligence or otherwise) arising in any way out of + // the use of this software, even if advised of the possibility of such damage. + // + //M*/ + +#include "precomp_custom.hpp" + +namespace cv +{ +namespace line_descriptor +{ +/* draw matches between two images */ +void drawLineMatches( const Mat& img1, const std::vector& keylines1, const Mat& img2, const std::vector& keylines2, + const std::vector& matches1to2, Mat& outImg, const Scalar& matchColor, const Scalar& singleLineColor, + const std::vector& matchesMask, int flags ) +{ + + if(img1.type() != img2.type()) + { + std::cout << "Input images have different types" << std::endl; + CV_Assert(img1.type() == img2.type()); + } + + /* initialize output matrix (if necessary) */ + if( flags == DrawLinesMatchesFlags::DEFAULT ) + { + /* check how many rows are necessary for output matrix */ + int totalRows = img1.rows >= img2.rows ? img1.rows : img2.rows; + + /* initialize output matrix */ + outImg = Mat::zeros( totalRows, img1.cols + img2.cols, img1.type() ); + + } + + /* initialize random seed: */ + srand( (unsigned int) time( NULL ) ); + + Scalar singleLineColorRGB; + if( singleLineColor == Scalar::all( -1 ) ) + { + int R = ( rand() % (int) ( 255 + 1 ) ); + int G = ( rand() % (int) ( 255 + 1 ) ); + int B = ( rand() % (int) ( 255 + 1 ) ); + + singleLineColorRGB = Scalar( R, G, B ); + } + + else + singleLineColorRGB = singleLineColor; + + /* copy input images to output images */ + Mat roi_left( outImg, Rect( 0, 0, img1.cols, img1.rows ) ); + Mat roi_right( outImg, Rect( img1.cols, 0, img2.cols, img2.rows ) ); + img1.copyTo( roi_left ); + img2.copyTo( roi_right ); + + /* get columns offset */ + int offset = img1.cols; + + /* if requested, draw lines from both images */ + if( flags != DrawLinesMatchesFlags::NOT_DRAW_SINGLE_LINES ) + { + for ( size_t i = 0; i < keylines1.size(); i++ ) + { + KeyLine k1 = keylines1[i]; + //line( outImg, Point2f( k1.startPointX, k1.startPointY ), Point2f( k1.endPointX, k1.endPointY ), singleLineColorRGB, 2 ); + line( outImg, Point2f( k1.sPointInOctaveX, k1.sPointInOctaveY ), Point2f( k1.ePointInOctaveX, k1.ePointInOctaveY ), singleLineColorRGB, 2 ); + + } + + for ( size_t j = 0; j < keylines2.size(); j++ ) + { + KeyLine k2 = keylines2[j]; + line( outImg, Point2f( k2.sPointInOctaveX + offset, k2.sPointInOctaveY ), Point2f( k2.ePointInOctaveX + offset, k2.ePointInOctaveY ), singleLineColorRGB, 2 ); + } + } + + /* draw matches */ + for ( size_t counter = 0; counter < matches1to2.size(); counter++ ) + { + if( matchesMask[counter] != 0 ) + { + DMatch dm = matches1to2[counter]; + KeyLine left = keylines1[dm.queryIdx]; + KeyLine right = keylines2[dm.trainIdx]; + + Scalar matchColorRGB; + if( matchColor == Scalar::all( -1 ) ) + { + int R = ( rand() % (int) ( 255 + 1 ) ); + int G = ( rand() % (int) ( 255 + 1 ) ); + int B = ( rand() % (int) ( 255 + 1 ) ); + + matchColorRGB = Scalar( R, G, B ); + + if( singleLineColor == Scalar::all( -1 ) ) + singleLineColorRGB = matchColorRGB; + } + + else + matchColorRGB = matchColor; + + /* draw lines if necessary */ +// line( outImg, Point2f( left.startPointX, left.startPointY ), Point2f( left.endPointX, left.endPointY ), singleLineColorRGB, 2 ); +// +// line( outImg, Point2f( right.startPointX + offset, right.startPointY ), Point2f( right.endPointX + offset, right.endPointY ), singleLineColorRGB, +// 2 ); +// +// /* link correspondent lines */ +// line( outImg, Point2f( left.startPointX, left.startPointY ), Point2f( right.startPointX + offset, right.startPointY ), matchColorRGB, 1 ); + + line( outImg, Point2f( left.sPointInOctaveX, left.sPointInOctaveY ), Point2f( left.ePointInOctaveX, left.ePointInOctaveY ), singleLineColorRGB, 2 ); + + line( outImg, Point2f( right.sPointInOctaveX + offset, right.sPointInOctaveY ), Point2f( right.ePointInOctaveX + offset, right.ePointInOctaveY ), singleLineColorRGB, + 2 ); + + /* link correspondent lines */ + line( outImg, Point2f( left.sPointInOctaveX, left.sPointInOctaveY ), Point2f( right.sPointInOctaveX + offset, right.sPointInOctaveY ), matchColorRGB, 1 ); + } + } +} + +/* draw extracted lines on original image */ +void drawKeylines( const Mat& image, const std::vector& keylines, Mat& outImage, const Scalar& color, int flags ) +{ + if( flags == DrawLinesMatchesFlags::DEFAULT ) + outImage = image.clone(); + + for ( size_t i = 0; i < keylines.size(); i++ ) + { + /* decide lines' color */ + Scalar lineColor; + if( color == Scalar::all( -1 ) ) + { + int R = ( rand() % (int) ( 255 + 1 ) ); + int G = ( rand() % (int) ( 255 + 1 ) ); + int B = ( rand() % (int) ( 255 + 1 ) ); + + lineColor = Scalar( R, G, B ); + } + + else + lineColor = color; + + /* get line */ + KeyLine k = keylines[i]; + + /* draw line */ + line( outImage, Point2f( k.startPointX, k.startPointY ), Point2f( k.endPointX, k.endPointY ), lineColor, 1 ); + } +} + +} +} diff --git a/3rdparty/line_descriptor/src/precomp_custom.hpp b/3rdparty/line_descriptor/src/precomp_custom.hpp new file mode 100644 index 0000000..11565f1 --- /dev/null +++ b/3rdparty/line_descriptor/src/precomp_custom.hpp @@ -0,0 +1,76 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// + // + // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + // + // By downloading, copying, installing or using the software you agree to this license. + // If you do not agree to this license, do not download, install, + // copy or use the software. + // + // + // License Agreement + // For Open Source Computer Vision Library + // + // Copyright (C) 2014, Biagio Montesano, all rights reserved. + // Third party copyrights are property of their respective owners. + // + // Redistribution and use in source and binary forms, with or without modification, + // are permitted provided that the following conditions are met: + // + // * Redistribution's of source code must retain the above copyright notice, + // this list of conditions and the following disclaimer. + // + // * Redistribution's in binary form must reproduce the above copyright notice, + // this list of conditions and the following disclaimer in the documentation + // and/or other materials provided with the distribution. + // + // * The name of the copyright holders may not be used to endorse or promote products + // derived from this software without specific prior written permission. + // + // This software is provided by the copyright holders and contributors "as is" and + // any express or implied warranties, including, but not limited to, the implied + // warranties of merchantability and fitness for a particular purpose are disclaimed. + // In no event shall the Intel Corporation or contributors be liable for any direct, + // indirect, incidental, special, exemplary, or consequential damages + // (including, but not limited to, procurement of substitute goods or services; + // loss of use, data, or profits; or business interruption) however caused + // and on any theory of liability, whether in contract, strict liability, + // or tort (including negligence or otherwise) arising in any way out of + // the use of this software, even if advised of the possibility of such damage. + // + //M*/ + +#ifndef __OPENCV_PRECOMP_H__ +#define __OPENCV_PRECOMP_H__ + +#ifdef _MSC_VER +#pragma warning( disable : 4267 ) +#endif + +#define _USE_MATH_DEFINES + +#include +#include "opencv2/core/utility.hpp" +#include +#include +#include +#include "opencv2/core.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bitarray_custom.hpp" +#include "bitops_custom.hpp" +#include "types_custom.hpp" + +#include "line_descriptor_custom.hpp" + +#endif diff --git a/3rdparty/line_descriptor/src/types_custom.hpp b/3rdparty/line_descriptor/src/types_custom.hpp new file mode 100644 index 0000000..80a17f0 --- /dev/null +++ b/3rdparty/line_descriptor/src/types_custom.hpp @@ -0,0 +1,66 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// + // + // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + // + // By downloading, copying, installing or using the software you agree to this license. + // If you do not agree to this license, do not download, install, + // copy or use the software. + // + // + // License Agreement + // For Open Source Computer Vision Library + // + // Copyright (C) 2014, Mohammad Norouzi, Ali Punjani, David J. Fleet, + // all rights reserved. + // Third party copyrights are property of their respective owners. + // + // Redistribution and use in source and binary forms, with or without modification, + // are permitted provided that the following conditions are met: + // + // * Redistribution's of source code must retain the above copyright notice, + // this list of conditions and the following disclaimer. + // + // * Redistribution's in binary form must reproduce the above copyright notice, + // this list of conditions and the following disclaimer in the documentation + // and/or other materials provided with the distribution. + // + // * The name of the copyright holders may not be used to endorse or promote products + // derived from this software without specific prior written permission. + // + // This software is provided by the copyright holders and contributors "as is" and + // any express or implied warranties, including, but not limited to, the implied + // warranties of merchantability and fitness for a particular purpose are disclaimed. + // In no event shall the Intel Corporation or contributors be liable for any direct, + // indirect, incidental, special, exemplary, or consequential damages + // (including, but not limited to, procurement of substitute goods or services; + // loss of use, data, or profits; or business interruption) however caused + // and on any theory of liability, whether in contract, strict liability, + // or tort (including negligence or otherwise) arising in any way out of + // the use of this software, even if advised of the possibility of such damage. + // + //M*/ + +#if defined _MSC_VER && _MSC_VER <= 1700 +#include +#else +#include +#endif + +#ifndef __OPENCV_TYPES_HPP +#define __OPENCV_TYPES_HPP + +#ifdef _MSC_VER +#pragma warning( disable : 4267 ) +#endif + +/* define data types */ +typedef uint64_t UINT64; +typedef uint32_t UINT32; +typedef uint16_t UINT16; +typedef uint8_t UINT8; + +/* define constants */ +#define UINT64_1 ((UINT64)0x01) +#define UINT32_1 ((UINT32)0x01) + +#endif diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..c8c56a1 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,128 @@ +################################################################################ +# user build settings +SET(TRACE TRUE) +SET(HAVE_G2O FALSE) +SET(HAVE_MRPT TRUE) + +################################################################################ + +SET(DEFAULT_HAS_MRPT ON) +SET(HAS_MRPT ${DEFAULT_HAS_MRPT} CACHE BOOL "Build the representation api which uses the MRPT library") + +SET(PROJECT_NAME plsvo) +PROJECT(${PROJECT_NAME}) +CMAKE_MINIMUM_REQUIRED (VERSION 2.8.3) +SET(CMAKE_BUILD_TYPE Release) # Release, RelWithDebInfo -> Set in cmake-gui option +SET(CMAKE_VERBOSE_MAKEFILE OFF) +SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${PROJECT_SOURCE_DIR}/CMakeModules/") + +# Set definitions +IF(HAVE_MRPT) + ADD_DEFINITIONS(-DHAS_MRPT) +ENDIF() + +# Set build flags, set ARM_ARCHITECTURE environment variable on Odroid +SET(CMAKE_CXX_FLAGS "-Wall -D_LINUX -D_REENTRANT -march=native -Wno-unused-variable -Wno-unused-but-set-variable -Wno-unknown-pragmas -Wno-reorder") +IF(DEFINED ENV{ARM_ARCHITECTURE}) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mfpu=neon -march=armv7-a") +ELSE() + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mmmx -msse -msse -msse2 -msse3 -mssse3") +ENDIF() +IF(CMAKE_COMPILER_IS_GNUCC) + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") +ELSE() + SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +ENDIF() +SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS} -O3 -fsee -fomit-frame-pointer -fno-signed-zeros -fno-math-errno -funroll-loops") + +# Add plain cmake packages +FIND_PACKAGE(OpenCV 3 REQUIRED) +FIND_PACKAGE(Eigen3 REQUIRED) +FIND_PACKAGE(Sophus REQUIRED) +FIND_PACKAGE(fast REQUIRED) +FIND_PACKAGE(Boost REQUIRED COMPONENTS thread system filesystem) + +IF(HAVE_MRPT) + SET(MRPT_DONT_USE_DBG_LIBS 1) #use release libraries for linking even if "Debug" CMake build + FIND_PACKAGE(MRPT REQUIRED base opengl gui) +ENDIF() + +FIND_LIBRARY(YAML_CPP_LIBRARIES yaml-cpp) +if(NOT YAML_CPP_LIBRARIES) + # If yaml-cpp not found in the system, try finding it as a user CMake-generated project + FIND_PACKAGE(yaml-cpp REQUIRED) + INCLUDE_DIRECTORIES(${YAML_CPP_INCLUDE_DIRS}) +endif(NOT YAML_CPP_LIBRARIES) + +FIND_PACKAGE(vikit_common REQUIRED) +SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/bin) +SET(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib) + +# Include dirs +INCLUDE_DIRECTORIES( + include + ${Eigen3_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ${Sophus_INCLUDE_DIRS} + ${Boost_INCLUDE_DIRS} + ${fast_INCLUDE_DIRS} + ${YAML_CPP_INCLUDE_DIRS} + ${PROJECT_SOURCE_DIR}/3rdparty/line_descriptor/include/ +) + +# Set link libraries +LIST(APPEND LINK_LIBS + ${OpenCV_LIBS} + #${Sophus_LIBRARIES} + ${Boost_LIBRARIES} + ${fast_LIBRARIES} + ${YAML_CPP_LIBRARIES} + ~/libs/Sophus/build/libSophus.so + ${PROJECT_SOURCE_DIR}/3rdparty/line_descriptor/lib/liblinedesc.so +) + +INCLUDE_DIRECTORIES(${vikit_common_INCLUDE_DIRS}) +LIST(APPEND LINK_LIBS ${vikit_common_LIBRARIES}) + +# Set sourcefiles +LIST(APPEND SOURCEFILES + src/frame_handler_mono.cpp + src/frame_handler_base.cpp + src/frame.cpp + src/feature.cpp + src/feature3D.cpp + src/feature3D_impl.cpp + src/map.cpp + src/pose_optimizer.cpp + src/initialization.cpp + src/matcher.cpp + src/reprojector.cpp + src/feature_alignment.cpp + src/feature_detection.cpp + src/depth_filter.cpp + src/config.cpp + src/sparse_img_align.cpp) + +IF(HAVE_MRPT) + LIST(APPEND SOURCEFILES src/sceneRepresentation.cpp) +ENDIF() + +# Code below is just for accessibility issues in IDE editor +# List all files (headers) contained by svo library +FILE(GLOB_RECURSE all_include_files RELATIVE "${CMAKE_SOURCE_DIR}" *.h *.hpp) + +# Visualize the files of this directory in IDE creating an custom empty target +add_custom_target( plsvo_includes DEPENDS ${all_include_files} SOURCES ${all_include_files} ) + +# Create svo library +ADD_LIBRARY(plsvo SHARED ${SOURCEFILES}) +TARGET_LINK_LIBRARIES(plsvo ${LINK_LIBS} ${MRPT_LIBS}) + +################################################################################ + +IF(HAVE_MRPT) + ADD_EXECUTABLE( run_pipeline app/run_pipeline.cpp ) + TARGET_LINK_LIBRARIES( run_pipeline plsvo) +ENDIF() + + diff --git a/LICENCE b/LICENCE new file mode 100644 index 0000000..70fc6da --- /dev/null +++ b/LICENCE @@ -0,0 +1,675 @@ +GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. {http://fsf.org/} + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see {http://www.gnu.org/licenses/}. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + PTAM-GPL Copyright (C) 2013 Oxford-PTAM + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +{http://www.gnu.org/licenses/}. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +{http://www.gnu.org/philosophy/why-not-lgpl.html}. + diff --git a/README.md b/README.md new file mode 100644 index 0000000..70ef305 --- /dev/null +++ b/README.md @@ -0,0 +1,57 @@ +# PL-SVO # + +This code contains an algorithm to compute monocular visual odometry by using both point and line segment features, based on the open source version of [SVO](https://github.com/uzh-rpg/rpg_svo). + +**Authors:** [Ruben Gomez-Ojeda](http://mapir.isa.uma.es/mapirwebsite/index.php/people/164-ruben-gomez), [Jesus Briales](http://mapir.isa.uma.es/mapirwebsite/index.php/people/165-jesus-briales) and [Javier Gonzalez-Jimenez](http://mapir.isa.uma.es/mapirwebsite/index.php/people/95-javier-gonzalez-jimenez) + +**Related publication:** [*PL-SVO: Semi-direct monocular visual odometry by combining points and line segments*](http://mapir.isa.uma.es/mapirwebsite/index.php/people/164-ruben-gomez) + +If you use PL-SVO in your research work, please cite: + + @inproceedings{ gomez2016plsvo, + title = {{PL-SVO: Semi-direct monocular visual odometry by combining points and line segments}}, + author = {Gomez-Ojeda, Ruben and Briales, Jesus and Gonzalez-Jimenez, Javier}, + booktitle = {Intelligent Robots and Systems (IROS), 2016 IEEE/RSJ International Conference on}, + pages = {4211--4216}, + year = {2016}, + organization = {IEEE} + } + +The provided code is published under the General Public License Version 3 (GPL v3). More information can be found in the "LICENCE" also included in the repository. + +Please do not hesitate to contact the authors if you have any further questions. + + +## 1. Prerequisites and dependencies + +We have tested PL-SVO with Ubuntu 12.04, 14.04 and 16.04, but it should be straightforward to compile along other platforms. Please notice that several internal processes, such as feature detection and matching can work in parallel, for which a powerful computer would be useful if working with the parallel configuration (change flags in the config file). + +### SVO +See the SVO Wiki for more instructions: https://github.com/uzh-rpg/rpg_svo/wiki + +### MRPT +In case of using the provided representation. +``` +sudo apt-get install libmrpt-dev +``` + +Download and install instructions can be also found at: http://www.mrpt.org/ . + +### Line descriptor (in 3rdparty folder) +We have modified the [*line_descriptor*](https://github.com/opencv/opencv_contrib/tree/master/modules/line_descriptor) module from the [OpenCV/contrib](https://github.com/opencv/opencv_contrib) library (both BSD) which is included in the *3rdparty* folder. + + +## 2. Configuration and generation + +Executing the file *build.sh* will configure and generate the *line_descriptor* module, and then will configure and generate the *PL-SVO* library for which we generate: **libplsvo.so** in the lib folder, and the application **run_pipeline** that works with our dataset format (explained in the next section). + + +## 3. Dataset format and usage + +The **run_pipeline** basic usage is: +``` +./run_pipeline +``` + +where ** refers to the sequence folder relative to the environment variable *${DATASETS_DIR}* that must be previously set. That sequence folder must contain the dataset configuration file named **dataset_params.yaml** following the examples in **pl-svo/config**, where **images_subfolder** refers to the image subfolder. + diff --git a/app/run_pipeline.cpp b/app/run_pipeline.cpp new file mode 100644 index 0000000..b71b17f --- /dev/null +++ b/app/run_pipeline.cpp @@ -0,0 +1,830 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#ifdef HAS_MRPT +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "opencv2/core/utility.hpp" +#include "yaml-cpp/yaml.h" + +#include +#include + +using namespace std; + +struct svo_options { + int seq_offset; + int seq_step; + int seq_length; + bool has_points; + bool has_lines ; + bool is_tum; + string dataset_dir; + string images_dir; + string traj_out; + string map_out; +}; + +namespace plsvo { + +struct ConvergedSeed { + int x_, y_; + Vector3d pos_; + cv::Vec3b col_; + ConvergedSeed(int x, int y, Vector3d pos, cv::Vec3b col) : + x_(x), y_(y), pos_(pos), col_(col) + {} +}; + +class BenchmarkNode +{ + vk::AbstractCamera* cam_; + FrameHandlerMono* vo_; + DepthFilter* depth_filter_; + std::list results_; + +public: + BenchmarkNode(vk::AbstractCamera *cam_); + BenchmarkNode(vk::AbstractCamera *cam_, const plsvo::FrameHandlerMono::Options& handler_opts); + ~BenchmarkNode(); + void depthFilterCbPt(plsvo::Point* point); + void depthFilterCbLs(plsvo::LineSeg* ls); + int runFromFolder(svo_options opts); + int runFromFolder(vk::PinholeCamera* cam_, svo_options opts); + int runFromFolder(vk::ATANCamera* cam_, svo_options opts); +}; + +BenchmarkNode::BenchmarkNode(vk::AbstractCamera* cam_) +{ + vo_ = new plsvo::FrameHandlerMono(cam_); + vo_->start(); +} + +BenchmarkNode::BenchmarkNode( + vk::AbstractCamera* cam_, + const plsvo::FrameHandlerMono::Options& handler_opts) +{ + vo_ = new plsvo::FrameHandlerMono(cam_, handler_opts); + vo_->start(); +} + +BenchmarkNode::~BenchmarkNode() +{ + delete vo_; + delete cam_; +} + +void BenchmarkNode::depthFilterCbPt(plsvo::Point* point) +{ + cv::Vec3b color = point->obs_.front()->frame->img_pyr_[0].at(point->obs_.front()->px[0], point->obs_.front()->px[1]); + results_.push_back(ConvergedSeed( + point->obs_.front()->px[0], point->obs_.front()->px[1], point->pos_, color)); + delete point->obs_.front(); +} + +void BenchmarkNode::depthFilterCbLs(plsvo::LineSeg* ls) +{ + cv::Vec3b color = ls->obs_.front()->frame->img_pyr_[0].at(ls->obs_.front()->spx[0], ls->obs_.front()->spx[1]); + results_.push_back(ConvergedSeed( + ls->obs_.front()->spx[0], ls->obs_.front()->spx[1], ls->spos_, color)); // test only with spoint + delete ls->obs_.front(); +} + +int BenchmarkNode::runFromFolder(svo_options opts) +{ + // grab options + int seq_offset = opts.seq_offset; + int seq_step = opts.seq_step; + int seq_length = opts.seq_length; + bool has_points = opts.has_points; + bool has_lines = opts.has_lines; + bool is_tum = opts.is_tum; + string dataset_dir = opts.dataset_dir; + string images_dir = opts.images_dir; + string map_out = opts.map_out; + string traj_out = opts.traj_out; + int fps_ = 30; + + // Read content of the .yaml dataset configuration file + YAML::Node dset_config = YAML::LoadFile(dataset_dir+"/dataset_params.yaml"); + + // get a sorted list of files in the img directory + boost::filesystem::path img_dir_path(images_dir.c_str()); + if (!boost::filesystem::exists(img_dir_path)) + { + cout << endl << "Image directory does not exist: \t" << images_dir << endl; + return -1; + } + + // get all files in the img directory + size_t max_len = 0; + std::list imgs; + boost::filesystem::directory_iterator end_itr; // default construction yields past-the-end + for (boost::filesystem::directory_iterator file(img_dir_path); file != end_itr; ++file) + { + boost::filesystem::path filename_path = file->path().filename(); + if (boost::filesystem::is_regular_file(file->status()) && + (filename_path.extension() == ".png" || + filename_path.extension() == ".jpg" || + filename_path.extension() == ".jpeg" || + filename_path.extension() == ".tiff") ) + { + std::string filename(filename_path.string()); + imgs.push_back(filename); + max_len = max(max_len, filename.length()); + } + } + + // sort them by filename; add leading zeros to make filename-lengths equal if needed + std::map sorted_imgs; + int n_imgs = 0; + for (std::list::iterator img = imgs.begin(); img != imgs.end(); ++img) + { + sorted_imgs[std::string(max_len - img->length(), '0') + (*img)] = *img; + n_imgs++; + } + + // add offset / step / length + int seq_end = n_imgs; + if( seq_length != 0 ) + int seq_end = std::min( seq_length*seq_step+seq_offset, n_imgs ); + std::map sorted_imgs_aux = sorted_imgs; + sorted_imgs.clear(); + int k = 0; + for (auto img = sorted_imgs_aux.begin(); img != sorted_imgs_aux.end(); std::advance(img,seq_step) ) + { + if( k >= seq_offset && k <= seq_end ) + sorted_imgs.insert( *img ); + k++; + } + + // create scene + sceneRepresentation scene("../app/scene_config.ini"); + Matrix T_c_w, T_f_w = Matrix::Identity(), T_f_w_prev = Matrix::Identity(), T_inc; + T_c_w = Matrix::Identity(); + scene.initializeScene(T_f_w); + + // run SVO (pose estimation) + std::list frames; + int frame_counter = 1; + std::ofstream ofs_traj(traj_out.c_str()); + for (std::map::iterator it = sorted_imgs.begin(); it != sorted_imgs.end(); ++it) + { + // load image + boost::filesystem::path img_path = img_dir_path / boost::filesystem::path(it->second.c_str()); + if (frame_counter == 1) + std::cout << "reading image " << img_path.string() << std::endl; + cv::Mat img(cv::imread(img_path.string(), CV_8UC3)); + //cv::cvtColor( img, img, cv::COLOR_BGR2GRAY); + // IMPORTANT: The image must be flipped if focal length is negative + // since the optimization code assumes that both f_x and f_y are positive + { + // get camera config + YAML::Node cam_config = dset_config["cam0"]; + // check f_y sign + if(cam_config["cam_fy"].as()<0) + cv::flip(img, img, 0); // Vertical flipping (around x axis, changes y coordinate) + } + assert(!img.empty()); + + // process frame + vo_->addImage(img, frame_counter / (double)fps_); + + // display tracking quality + if (vo_->lastFrame() != NULL) { + std::cout << "Frame-Id: " << vo_->lastFrame()->id_ << " \t" + << "#PointFeatures: " << vo_->lastNumPtObservations() << " \t" + << "#LineFeatures: " << vo_->lastNumLsObservations() << " \t" + << "Proc. Time: " << vo_->lastProcessingTime()*1000 << "ms" << std::endl << std::endl; + frames.push_back(vo_->lastFrame()); + // access the pose of the camera via vo_->lastFrame()->T_f_w_. + T_f_w = vo_->lastFrame()->T_f_w_.matrix(); + T_inc = T_f_w_prev * T_f_w.inverse(); + T_f_w_prev = T_f_w; + scene.setPose( T_inc ); + scene.setText( vo_->lastFrame()->id_, vo_->lastProcessingTime()*1000, vo_->lastNumPtObservations(), 0, vo_->lastNumLsObservations(), 0 ); + cv::Mat dbg_img = vo_->get_debug_image(); + if( !dbg_img.empty() ) + scene.setImage( dbg_img ); + else + scene.setImage("./empty_img.png"); + + // save trajectory + Eigen::Matrix cov = vo_->lastFrame()->Cov_; + bool skip_frame = false; + for (int i = 0; i < 6; i++) + for (int j = 0; j < 6; j++) + if (! ((1.e-16 < fabs(cov(i,j))) && (fabs(cov(i,j)) < 1.e+16)) ) // likely an invalid pose -> seriously? + skip_frame = true; + + // access the pose of the camera via vo_->lastFrame()->T_f_w_. + Sophus::SE3 world_transf = vo_->lastFrame()->T_f_w_.inverse(); + Eigen::Quaterniond quat = world_transf.unit_quaternion(); + Eigen::Vector3d transl = world_transf.translation(); + if ( ((transl(0) == 0.) && (transl(1) == 0.) && (transl(2) == 0.)) && + ((quat.x() == -0.) && (quat.y() == -0.) && (quat.z() == -0.) && (quat.w() == 1.)) ) + skip_frame = true; + + if (skip_frame) + continue; + //cout << endl << "Frame skipped, watch out" << endl; + + string filename = (it->second).c_str(); + int size = filename.size(); + string timestamp = filename.substr(0,size-4); + ofs_traj << timestamp << " " + << transl(0) << " " << transl(1) << " " << transl(2) << " " + << quat.x() << " " << quat.y() << " " << quat.z() << " " << quat.w() + << std::endl; + + // introduce 3d features to the scene + vector< Matrix > points3d; + for(list::iterator it=vo_->lastFrame()->pt_fts_.begin(); it!=vo_->lastFrame()->pt_fts_.end();++it) + if((*it)->feat3D != NULL) + points3d.push_back( (*it)->feat3D->pos_ ); + scene.setPointsSVO(points3d); + // introduce 3d line segments to the scene + vector< Matrix > lines3d; + Matrix line3d_; + for(list::iterator it=vo_->lastFrame()->seg_fts_.begin(); it!=vo_->lastFrame()->seg_fts_.end(); ++it) + { + if((*it)->feat3D != NULL) + { + line3d_ << (*it)->feat3D->spos_, (*it)->feat3D->epos_; + lines3d.push_back( line3d_ ); + } + } + scene.setLinesSVO(lines3d); + // update scene + scene.updateScene(); + } + frame_counter++; + } + cout << endl << "End of the SVO pipeline" << endl << endl; + + std::cout << "Done." << std::endl; + return 0; +} + +int BenchmarkNode::runFromFolder(vk::PinholeCamera* cam_, svo_options opts) +{ + + // grab options + int seq_offset = opts.seq_offset; + int seq_step = opts.seq_step; + int seq_length = opts.seq_length; + bool has_points = opts.has_points; + bool has_lines = opts.has_lines; + bool is_tum = opts.is_tum; + string dataset_dir = opts.dataset_dir; + string images_dir = opts.images_dir; + string map_out = opts.map_out; + string traj_out = opts.traj_out; + int fps_ = 30; + + // Read content of the .yaml dataset configuration file + YAML::Node dset_config = YAML::LoadFile(dataset_dir+"/dataset_params.yaml"); + + // get a sorted list of files in the img directory + boost::filesystem::path img_dir_path(images_dir.c_str()); + if (!boost::filesystem::exists(img_dir_path)) + { + cout << endl << "Image directory does not exist: \t" << images_dir << endl; + return -1; + } + + // get all files in the img directory + size_t max_len = 0; + std::list imgs; + boost::filesystem::directory_iterator end_itr; // default construction yields past-the-end + for (boost::filesystem::directory_iterator file(img_dir_path); file != end_itr; ++file) + { + boost::filesystem::path filename_path = file->path().filename(); + if (boost::filesystem::is_regular_file(file->status()) && + (filename_path.extension() == ".png" || + filename_path.extension() == ".jpg" || + filename_path.extension() == ".jpeg" || + filename_path.extension() == ".tiff") ) + { + std::string filename(filename_path.string()); + imgs.push_back(filename); + max_len = max(max_len, filename.length()); + } + } + + // sort them by filename; add leading zeros to make filename-lengths equal if needed + std::map sorted_imgs; + int n_imgs = 0; + for (std::list::iterator img = imgs.begin(); img != imgs.end(); ++img) + { + sorted_imgs[std::string(max_len - img->length(), '0') + (*img)] = *img; + n_imgs++; + } + + // add offset / step / length + int seq_end = n_imgs; + if( seq_length != 0 ) + int seq_end = std::min( seq_length*seq_step+seq_offset, n_imgs ); + std::map sorted_imgs_aux = sorted_imgs; + sorted_imgs.clear(); + int k = 0; + for (auto img = sorted_imgs_aux.begin(); img != sorted_imgs_aux.end(); std::advance(img,seq_step) ) + { + if( k >= seq_offset && k <= seq_end ) + sorted_imgs.insert( *img ); + k++; + } + + // create scene + sceneRepresentation scene("../app/scene_config.ini"); + Matrix T_c_w, T_f_w = Matrix::Identity(), T_f_w_prev = Matrix::Identity(), T_inc; + T_c_w = Matrix::Identity(); + scene.initializeScene(T_f_w); + + // run SVO (pose estimation) + std::list frames; + int frame_counter = 1; + std::ofstream ofs_traj(traj_out.c_str()); + for (std::map::iterator it = sorted_imgs.begin(); it != sorted_imgs.end(); ++it ) + { + // load image + boost::filesystem::path img_path = img_dir_path / boost::filesystem::path(it->second.c_str()); + if (frame_counter == 1) + std::cout << "reading image " << img_path.string() << std::endl; + cv::Mat img(cv::imread(img_path.string(), CV_8UC1)); + // IMPORTANT: The image must be flipped if focal length is negative + // since the optimization code assumes that both f_x and f_y are positive + { + // get camera config + YAML::Node cam_config = dset_config["cam0"]; + // check f_y sign + if(cam_config["cam_fy"].as()<0) + cv::flip(img, img, 0); // Vertical flipping (around x axis, changes y coordinate) + } + assert(!img.empty()); + + // undistort image + cv::Mat img_rec; + cam_->undistortImage(img,img_rec); + + // process frame + vo_->addImage(img_rec, frame_counter / (double)fps_); + + // display tracking quality + if (vo_->lastFrame() != NULL) { + std::cout << "Frame-Id: " << vo_->lastFrame()->id_ << " \t" + << "#PointFeatures: " << vo_->lastNumPtObservations() << " \t" + << "#LineFeatures: " << vo_->lastNumLsObservations() << " \t" + << "Proc. Time: " << vo_->lastProcessingTime()*1000 << "ms" << std::endl << std::endl; + frames.push_back(vo_->lastFrame()); + + // save trajectory + Eigen::Matrix cov = vo_->lastFrame()->Cov_; + bool skip_frame = false; + for (int i = 0; i < 6; i++) + for (int j = 0; j < 6; j++) + if (! ((1.e-16 < fabs(cov(i,j))) && (fabs(cov(i,j)) < 1.e+16)) ) // likely an invalid pose -> seriously? + skip_frame = true; + + // access the pose of the camera via vo_->lastFrame()->T_f_w_. + Sophus::SE3 world_transf = vo_->lastFrame()->T_f_w_.inverse(); + Eigen::Quaterniond quat = world_transf.unit_quaternion(); + Eigen::Vector3d transl = world_transf.translation(); + Matrix3d rot = world_transf.rotation_matrix(); + if ( ((transl(0) == 0.) && (transl(1) == 0.) && (transl(2) == 0.)) && + ((quat.x() == -0.) && (quat.y() == -0.) && (quat.z() == -0.) && (quat.w() == 1.)) ) + skip_frame = true; + + if (skip_frame) + continue; + //cout << endl << "Frame skipped, watch out" << endl; + + string filename = (it->second).c_str(); + int size = filename.size(); + string timestamp = filename.substr(0,size-4); + ofs_traj << timestamp << " " + << transl(0) << " " << transl(1) << " " << transl(2) << " " + << quat.x() << " " << quat.y() << " " << quat.z() << " " << quat.w() + << std::endl; + + // access the pose of the camera via vo_->lastFrame()->T_f_w_. + T_f_w = vo_->lastFrame()->T_f_w_.matrix(); + T_inc = T_f_w_prev * T_f_w.inverse(); + T_f_w_prev = T_f_w; + scene.setPose( T_inc ); + scene.setText( vo_->lastFrame()->id_, vo_->lastProcessingTime()*1000, vo_->lastNumPtObservations(), 0, vo_->lastNumLsObservations(), 0 ); + cv::Mat dbg_img = vo_->get_debug_image(); + if( !dbg_img.empty() ) + scene.setImage( dbg_img ); + else + scene.setImage("./empty_img.png"); + + // introduce 3d features to the scene + vector< Matrix > points3d; + for(list::iterator it=vo_->lastFrame()->pt_fts_.begin(); it!=vo_->lastFrame()->pt_fts_.end();++it) + if((*it)->feat3D != NULL) + points3d.push_back( (*it)->feat3D->pos_ ); + scene.setPointsSVO(points3d); + + // introduce 3d line segments to the scene + vector< Matrix > lines3d; + Matrix line3d_; + for(list::iterator it=vo_->lastFrame()->seg_fts_.begin(); it!=vo_->lastFrame()->seg_fts_.end(); ++it) + { + if((*it)->feat3D != NULL) + { + line3d_ << (*it)->feat3D->spos_, (*it)->feat3D->epos_; + lines3d.push_back( line3d_ ); + } + } + scene.setLinesSVO(lines3d); + + // update scene + scene.updateScene(); + + } + frame_counter++; + + } + cout << endl << "End of the SVO pipeline" << endl << endl; + + std::cout << "Done." << std::endl; + + return 0; + +} + +int BenchmarkNode::runFromFolder(vk::ATANCamera* cam_, svo_options opts) +{ + + // grab options + int seq_offset = opts.seq_offset; + int seq_step = opts.seq_step; + int seq_length = opts.seq_length; + bool has_points = opts.has_points; + bool has_lines = opts.has_lines; + bool is_tum = opts.is_tum; + string dataset_dir = opts.dataset_dir; + string images_dir = opts.images_dir; + string map_out = opts.map_out; + string traj_out = opts.traj_out; + int fps_ = 30; + + // Read content of the .yaml dataset configuration file + YAML::Node dset_config = YAML::LoadFile(dataset_dir+"/dataset_params.yaml"); + + // get a sorted list of files in the img directory + boost::filesystem::path img_dir_path(images_dir.c_str()); + if (!boost::filesystem::exists(img_dir_path)) + { + cout << endl << "Image directory does not exist: \t" << images_dir << endl; + return -1; + } + + // get all files in the img directory + size_t max_len = 0; + std::list imgs; + boost::filesystem::directory_iterator end_itr; // default construction yields past-the-end + for (boost::filesystem::directory_iterator file(img_dir_path); file != end_itr; ++file) + { + boost::filesystem::path filename_path = file->path().filename(); + if (boost::filesystem::is_regular_file(file->status()) && + (filename_path.extension() == ".png" || + filename_path.extension() == ".jpg" || + filename_path.extension() == ".jpeg" || + filename_path.extension() == ".tiff") ) + { + std::string filename(filename_path.string()); + imgs.push_back(filename); + max_len = max(max_len, filename.length()); + } + } + + // sort them by filename; add leading zeros to make filename-lengths equal if needed + std::map sorted_imgs; + int n_imgs = 0; + for (std::list::iterator img = imgs.begin(); img != imgs.end(); ++img) + { + sorted_imgs[std::string(max_len - img->length(), '0') + (*img)] = *img; + n_imgs++; + } + + // add offset / step / length + int seq_end = n_imgs; + if( seq_length != 0 ) + int seq_end = std::min( seq_length*seq_step+seq_offset, n_imgs ); + std::map sorted_imgs_aux = sorted_imgs; + sorted_imgs.clear(); + int k = 0; + for (auto img = sorted_imgs_aux.begin(); img != sorted_imgs_aux.end(); std::advance(img,seq_step) ) + { + if( k >= seq_offset && k <= seq_end ) + sorted_imgs.insert( *img ); + k++; + } + + // create scene + sceneRepresentation scene("../app/scene_config.ini"); + Matrix T_c_w, T_f_w = Matrix::Identity(), T_f_w_prev = Matrix::Identity(), T_inc; + T_c_w = Matrix::Identity(); + scene.initializeScene(T_f_w); + + // run SVO (pose estimation) + std::list frames; + int frame_counter = 1; + std::ofstream ofs_traj(traj_out.c_str()); + for (std::map::iterator it = sorted_imgs.begin(); it != sorted_imgs.end(); ++it ) + { + // load image + boost::filesystem::path img_path = img_dir_path / boost::filesystem::path(it->second.c_str()); + if (frame_counter == 1) + std::cout << "reading image " << img_path.string() << std::endl; + cv::Mat img(cv::imread(img_path.string(), CV_8UC1)); + // IMPORTANT: The image must be flipped if focal length is negative + // since the optimization code assumes that both f_x and f_y are positive + { + // get camera config + YAML::Node cam_config = dset_config["cam0"]; + // check f_y sign + if(cam_config["cam_fy"].as()<0) + cv::flip(img, img, 0); // Vertical flipping (around x axis, changes y coordinate) + } + assert(!img.empty()); + + // undistort image + cv::Mat img_rec; + //cam_->undistortImage(img,img_rec); + + // process frame + vo_->addImage(img, frame_counter / (double)fps_); + + // display tracking quality + if (vo_->lastFrame() != NULL) { + std::cout << "Frame-Id: " << vo_->lastFrame()->id_ << " \t" + << "#PointFeatures: " << vo_->lastNumPtObservations() << " \t" + << "#LineFeatures: " << vo_->lastNumLsObservations() << " \t" + << "Proc. Time: " << vo_->lastProcessingTime()*1000 << "ms" << std::endl << std::endl; + frames.push_back(vo_->lastFrame()); + + // save trajectory + Eigen::Matrix cov = vo_->lastFrame()->Cov_; + bool skip_frame = false; + for (int i = 0; i < 6; i++) + for (int j = 0; j < 6; j++) + if (! ((1.e-16 < fabs(cov(i,j))) && (fabs(cov(i,j)) < 1.e+16)) ) // likely an invalid pose -> seriously? + skip_frame = true; + + // access the pose of the camera via vo_->lastFrame()->T_f_w_. + Sophus::SE3 world_transf = vo_->lastFrame()->T_f_w_.inverse(); + Eigen::Quaterniond quat = world_transf.unit_quaternion(); + Eigen::Vector3d transl = world_transf.translation(); + Matrix3d rot = world_transf.rotation_matrix(); + if ( ((transl(0) == 0.) && (transl(1) == 0.) && (transl(2) == 0.)) && + ((quat.x() == -0.) && (quat.y() == -0.) && (quat.z() == -0.) && (quat.w() == 1.)) ) + skip_frame = true; + + if (skip_frame) + continue; + //cout << endl << "Frame skipped, watch out" << endl; + + string filename = (it->second).c_str(); + int size = filename.size(); + string timestamp = filename.substr(0,size-4); + ofs_traj << timestamp << " " + << transl(0) << " " << transl(1) << " " << transl(2) << " " + << quat.x() << " " << quat.y() << " " << quat.z() << " " << quat.w() + << std::endl; + + // access the pose of the camera via vo_->lastFrame()->T_f_w_. + T_f_w = vo_->lastFrame()->T_f_w_.matrix(); + T_inc = T_f_w_prev * T_f_w.inverse(); + T_f_w_prev = T_f_w; + scene.setPose( T_inc ); + scene.setText( vo_->lastFrame()->id_, vo_->lastProcessingTime()*1000, vo_->lastNumPtObservations(), 0, vo_->lastNumLsObservations(), 0 ); + cv::Mat dbg_img = vo_->get_debug_image(); + if( !dbg_img.empty() ) + scene.setImage( dbg_img ); + else + scene.setImage("./empty_img.png"); + + // introduce 3d features to the scene + vector< Matrix > points3d; + for(list::iterator it=vo_->lastFrame()->pt_fts_.begin(); it!=vo_->lastFrame()->pt_fts_.end();++it) + if((*it)->feat3D != NULL) + points3d.push_back( (*it)->feat3D->pos_ ); + scene.setPointsSVO(points3d); + + // introduce 3d line segments to the scene + vector< Matrix > lines3d; + Matrix line3d_; + for(list::iterator it=vo_->lastFrame()->seg_fts_.begin(); it!=vo_->lastFrame()->seg_fts_.end(); ++it) + { + if((*it)->feat3D != NULL) + { + line3d_ << (*it)->feat3D->spos_, (*it)->feat3D->epos_; + lines3d.push_back( line3d_ ); + } + } + scene.setLinesSVO(lines3d); + + // update scene + scene.updateScene(); + + } + frame_counter++; + + } + cout << endl << "End of the SVO pipeline" << endl << endl; + + std::cout << "Done." << std::endl; + + return 0; + +} + +} // namespace plsvo + +const cv::String keys = + "{help h usage ? | | print this message }" + "{@dset |sin2_tex2_h1_v8_d | dataset folder inside $SVO_DATASET_DIR }" + "{expname | | name of the experiment }" + "{seqoff seqo |0 | start position in the sequence }" + "{seql seqlength |0 | number of frames to test (from 1st ref frame) }" + "{seqs seqstep |1 | step size in sequence (nr of frames to advance) }" + "{verbose v | | show more verbose information in optimization (akin debug) }" + "{display-optim disp-optim|false | display sparse residue images in optimization }" + "{display disp |false | display residue images (debug) }" + "{init initialization |2 | value to take as initialization of transformation }" + "{haspt haspoints |true | bool to employ or not point }" + "{hasls haslines |true | bool to employ or not line segments }" + "{mapout | | name of the pcd output file for the map }" + "{trajout |trajout.txt | name of the output file for the trajectory }" + ; + +// Examples of use: +// sin2_tex2_h1_v8_d ./run_pipeline_comp sin2_tex2_h1_v8_d +// ICL-NUIM ./run_pipeline_comp ICL-NUIM/lrkt0-retinex +// ASL_Dataset ./run_pipeline_comp ASL_Dataset/MH_01_easy + +int main(int argc, char** argv) +{ + + cv::CommandLineParser parser(argc, argv, keys); + parser.about("SVO test: run_pipeline"); + if (parser.has("help")) + { + parser.printMessage(); + return 0; + } + std::string dataset_name = parser.get(0); + std::string experiment_name, map_output, traj_output; + if(parser.has("expname")) + experiment_name = parser.get("expname"); + else + experiment_name = dataset_name; + if(parser.has("mapout")) + map_output = parser.get("mapout"); + else + map_output = "map_out.pcd"; + traj_output = parser.get("trajout"); + + int init = parser.get("init"); + bool verbose = parser.has("verbose"); + bool display_optim = parser.has("disp-optim"); + bool display = parser.has("disp"); + + svo_options opts; + opts.seq_offset = parser.get("seqoff"); + opts.seq_step = parser.get("seqs"); + opts.seq_length = parser.get("seql"); + opts.has_points = parser.get("haspt"); + opts.has_lines = parser.get("hasls"); + opts.traj_out = traj_output; + opts.map_out = map_output; + + opts.is_tum = false; + if( dataset_name.find("TUM") != std::string::npos ) + { + opts.is_tum = true; + } + + if (!parser.check()) + { + parser.printErrors(); + return 0; + } + std::string dataset_dir( std::getenv("DATASETS_DIR") + dataset_name ); + opts.dataset_dir = dataset_dir; + + // Read content of the .yaml dataset configuration file + YAML::Node dset_config = YAML::LoadFile(dataset_dir+"/dataset_params.yaml"); + string img_dir = dataset_dir + "/" + dset_config["images_subfolder"].as(); + opts.images_dir = img_dir; + + // Setup camera and run node + YAML::Node cam_config = dset_config["cam0"]; + string camera_model = cam_config["cam_model"].as(); + if( camera_model == "Pinhole" ) + { + // setup cameras + vk::PinholeCamera* cam_pin; + vk::PinholeCamera* cam_pin_und; + cam_pin = new vk::PinholeCamera( + cam_config["cam_width"].as(), + cam_config["cam_height"].as(), + fabs(cam_config["cam_fx"].as()), + fabs(cam_config["cam_fy"].as()), + cam_config["cam_cx"].as(), + cam_config["cam_cy"].as(), + cam_config["cam_d0"].as(), + cam_config["cam_d1"].as(), + cam_config["cam_d2"].as(), + cam_config["cam_d3"].as() ); + cam_pin_und = new vk::PinholeCamera( + cam_config["cam_width"].as(), + cam_config["cam_height"].as(), + fabs(cam_config["cam_fx"].as()), + fabs(cam_config["cam_fy"].as()), + cam_config["cam_cx"].as(), + cam_config["cam_cy"].as() ); + // Set options for FrameHandlerMono + plsvo::FrameHandlerMono::Options handler_opts(opts.has_points,opts.has_lines); + plsvo::BenchmarkNode benchmark(cam_pin_und); + // run pipeline + benchmark.runFromFolder(cam_pin,opts); + } + else if( camera_model == "ATAN" || camera_model == "Atan" ) + { + // setup cameras + vk::ATANCamera* cam_atan; + vk::ATANCamera* cam_atan_und; + cam_atan = new vk::ATANCamera( + cam_config["cam_width"].as(), + cam_config["cam_height"].as(), + fabs(cam_config["cam_fx"].as()), + fabs(cam_config["cam_fy"].as()), + cam_config["cam_cx"].as(), + cam_config["cam_cy"].as(), + cam_config["cam_d0"].as() ); + cam_atan_und = new vk::ATANCamera( + cam_config["cam_width"].as(), + cam_config["cam_height"].as(), + fabs(cam_config["cam_fx"].as()), + fabs(cam_config["cam_fy"].as()), + cam_config["cam_cx"].as(), + cam_config["cam_cy"].as(), + cam_config["cam_d0"].as() ); + // Set options for FrameHandlerMono + plsvo::FrameHandlerMono::Options handler_opts(opts.has_points,opts.has_lines); + plsvo::BenchmarkNode benchmark(cam_atan_und); + // run pipeline + benchmark.runFromFolder(cam_atan,opts); + } + + printf("BenchmarkNode finished.\n"); + return 0; + +} diff --git a/app/scene_config.ini b/app/scene_config.ini new file mode 100644 index 0000000..f669ad5 --- /dev/null +++ b/app/scene_config.ini @@ -0,0 +1,20 @@ +[Scene] +hasCamFix = true +hasGT = false +hasChange = true +hasComparison = true +hasText = true +hasAxes = true +hasLegend = false +hasHelp = false +hasTraj = true +hasCov = false +hasImg = true +hasLines = false +hasPoints = false +hasFrustum = false +isKitti = false +selli = 1.0; +szoom = 25.0; +selev = 90.0; +sazim = -90.0; diff --git a/build.sh b/build.sh new file mode 100644 index 0000000..b6a5bef --- /dev/null +++ b/build.sh @@ -0,0 +1,13 @@ +echo "Building 3rdparty/line_descriptor ... " +cd 3rdparty/line_descriptor +mkdir build +cd build +cmake .. +make -j +cd ../../../ + +echo "Building PL-SVO ... " +mkdir build +cd build +cmake .. +make -j diff --git a/config/dataset_params.yaml b/config/dataset_params.yaml new file mode 100755 index 0000000..67f9f82 --- /dev/null +++ b/config/dataset_params.yaml @@ -0,0 +1,19 @@ +cam0: + cam_cx: 385.554786 + cam_cy: 237.640332 + cam_d0: -0.277970 + cam_d1: 0.060647 + cam_d2: -0.002097 + cam_d3: 0.000373 + cam_fx: 416.401549 + cam_fy: 416.375319 + cam_height: 480 + cam_model: Pinhole + cam_width: 752 + rx: 0.0 + ry: 0.0 + rz: 0.0 + tx: 0.0 + ty: 0.0 + tz: 0.0 +images_subfolder: images/ diff --git a/include/plsvo/bundle_adjustment.h b/include/plsvo/bundle_adjustment.h new file mode 100644 index 0000000..4badf6f --- /dev/null +++ b/include/plsvo/bundle_adjustment.h @@ -0,0 +1,120 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#ifndef SVO_BUNDLE_ADJUSTMENT_H_ +#define SVO_BUNDLE_ADJUSTMENT_H_ + +#include + +namespace g2o { +class EdgeProjectXYZ2UV; +class SparseOptimizer; +class VertexSE3Expmap; +class VertexSBAPointXYZ; +} + +namespace svo { + +typedef g2o::EdgeProjectXYZ2UV g2oEdgeSE3; +typedef g2o::VertexSE3Expmap g2oFrameSE3; +typedef g2o::VertexSBAPointXYZ g2oPoint; + +class Frame; +class Point; +class Feature; +class Map; + +/// Local, global and 2-view bundle adjustment with g2o +namespace ba { + +/// Temporary container to hold the g2o edge with reference to frame and point. +struct EdgeContainerSE3 +{ + g2oEdgeSE3* edge; + Frame* frame; + Feature* feature; + bool is_deleted; + EdgeContainerSE3(g2oEdgeSE3* e, Frame* frame, Feature* feature) : + edge(e), frame(frame), feature(feature), is_deleted(false) + {} +}; + +/// Optimize two camera frames and their observed 3D points. +/// Is used after initialization. +void twoViewBA(Frame* frame1, Frame* frame2, double reproj_thresh, Map* map); + +/// Local bundle adjustment. +/// Optimizes core_kfs and their observed map points while keeping the +/// neighbourhood fixed. +void localBA( + Frame* center_kf, + set* core_kfs, + Map* map, + size_t& n_incorrect_edges_1, + size_t& n_incorrect_edges_2, + double& init_error, + double& final_error); + +/// Global bundle adjustment. +/// Optimizes the whole map. Is currently not used in SVO. +void globalBA(Map* map); + +/// Initialize g2o with solver type, optimization strategy and camera model. +void setupG2o(g2o::SparseOptimizer * optimizer); + +/// Run the optimization on the provided graph. +void runSparseBAOptimizer( + g2o::SparseOptimizer* optimizer, + unsigned int num_iter, + double& init_error, + double& final_error); + +/// Create a g2o vertice from a keyframe object. +g2oFrameSE3* createG2oFrameSE3( + Frame* kf, + size_t id, + bool fixed); + +/// Creates a g2o vertice from a mappoint object. +g2oPoint* createG2oPoint( + Vector3d pos, + size_t id, + bool fixed); + +/// Creates a g2o edge between a g2o keyframe and mappoint vertice with the provided measurement. +g2oEdgeSE3* createG2oEdgeSE3( + g2oFrameSE3* v_kf, + g2oPoint* v_mp, + const Vector2d& f_up, + bool robust_kernel, + double huber_width, + double weight = 1); + +} // namespace ba +} // namespace plsvo + +#endif // SVO_BUNDLE_ADJUSTMENT_H_ diff --git a/include/plsvo/config.h b/include/plsvo/config.h new file mode 100644 index 0000000..7fcd706 --- /dev/null +++ b/include/plsvo/config.h @@ -0,0 +1,230 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#ifndef SVO_CONFIG_H_ +#define SVO_CONFIG_H_ + +#include +#include +#include + +namespace plsvo { + +using std::string; + +/// Global configuration file of SVO. +/// Implements the Singleton design pattern to allow global access and to ensure +/// that only one instance exists. +class Config +{ +public: + static Config& getInstance(); + + /// Has points + static bool& hasPoints() { return getInstance().has_pt; } + + /// Has line segments + static bool& hasLines() { return getInstance().has_ls; } + + /// Employ points in initialization + static bool& initPoints() { return getInstance().init_pt; } + + /// Employ line segments in initialization + static bool& initLines() { return getInstance().init_ls; } + + /// Base-name of the tracefiles. + static string& traceName() { return getInstance().trace_name; } + + /// Directory where the tracefiles are saved. + static string& traceDir() { return getInstance().trace_dir; } + + /// Number of pyramid levels used for features. + static size_t& nPyrLevels() { return getInstance().n_pyr_levels; } + + /// Number of pyramid levels used for features (line segments). + static size_t& nPyrLevelsSegs() { return getInstance().n_pyr_levels_segs; } + + /// Use the IMU to get relative rotations. + static bool& useImu() { return getInstance().use_imu; } + + /// Number of keyframes in the core. The core-kfs are optimized through bundle adjustment. + static size_t& coreNKfs() { return getInstance().core_n_kfs; } + + /// Initial scale of the map. Depends on the distance the camera is moved for the initialization. + static double& mapScale() { return getInstance().map_scale; } + + /// Feature grid size of a cell in [px]. + static size_t& gridSize() { return getInstance().grid_size; } + + /// Feature grid size of a cell in [px] (line segments). + static size_t& gridSizeSegs() { return getInstance().grid_size_segs; } + + /// Initialization: Minimum required disparity between the first two frames. + static double& initMinDisparity() { return getInstance().init_min_disparity; } + + /// Initialization: Minimum number of tracked features. + static size_t& initMinTracked() { return getInstance().init_min_tracked; } + + /// Initialization: Minimum number of inliers after RANSAC. + static size_t& initMinInliers() { return getInstance().init_min_inliers; } + + /// Maximum level of the Lucas Kanade tracker. + static size_t& kltMaxLevel() { return getInstance().klt_max_level; } + + /// Minimum level of the Lucas Kanade tracker. + static size_t& kltMinLevel() { return getInstance().klt_min_level; } + + /// Reprojection threshold [px]. + static double& reprojThresh() { return getInstance().reproj_thresh; } + + /// Reprojection threshold [px]. + static bool& hasRefinement() { return getInstance().has_refinement; } + + /// Reprojection threshold after pose optimization. + static double& poseOptimThresh() { return getInstance().poseoptim_thresh; } + + /// Number of iterations in local bundle adjustment. + static size_t& poseOptimNumIter() { return getInstance().poseoptim_num_iter; } + + /// Number of iterations in local bundle adjustment. + static size_t& poseOptimNumIterRef() { return getInstance().poseoptim_num_iter_ref; } + + /// Maximum number of points to optimize at every iteration. + static size_t& structureOptimMaxPts() { return getInstance().structureoptim_max_pts; } + + /// Maximum number of points to optimize at every iteration. + static size_t& structureOptimMaxSegs() { return getInstance().structureoptim_max_segs; } + + /// Number of iterations in structure optimization. + static size_t& structureOptimNumIter() { return getInstance().structureoptim_num_iter; } + + /// Number of iterations in structure optimization (lines). + static size_t& structureOptimNumIterSegs() { return getInstance().structureoptim_num_iter_segs; } + + /// Reprojection threshold after bundle adjustment. + static double& lobaThresh() { return getInstance().loba_thresh; } + + /// Threshold for the robust Huber kernel of the local bundle adjustment. + static double& lobaRobustHuberWidth() { return getInstance().loba_robust_huber_width; } + + /// Number of iterations in the local bundle adjustment. + static size_t& lobaNumIter() { return getInstance().loba_num_iter; } + + /// Minimum translation distance between two keyframes. Relative to the average height in the map. + static double& kfSelectMinDistT() { return getInstance().kfselect_mindist_t; } + + /// Minimum rotation distance between two keyframes. Relative to the average height in the map. + static double& kfSelectMinDistR() { return getInstance().kfselect_mindist_r; } + + /// Select only features with a minimum Harris corner score for triangulation. + static double& triangMinCornerScore() { return getInstance().triang_min_corner_score; } + + /// Select only line segments with a minimum length + static double& lsdMinLength() { return getInstance().lsd_min_length; } + + /// Subpixel refinement of reprojection and triangulation. Set to 0 if no subpix refinement required! + static size_t& subpixNIter() { return getInstance().subpix_n_iter; } + + /// Limit the number of keyframes in the map. This makes nslam essentially. + /// a Visual Odometry. Set to 0 if unlimited number of keyframes are allowed. + /// Minimum number of keyframes is 3. + static size_t& maxNKfs() { return getInstance().max_n_kfs; } + + /// How much (in milliseconds) is the camera delayed with respect to the imu. + static double& imgImuDelay() { return getInstance().img_imu_delay; } + + /// Maximum number of features that should be tracked. + static size_t& maxFts() { return getInstance().max_fts; } + + /// Maximum number of features that should be tracked. + static size_t& maxFtsSegs() { return getInstance().max_fts_segs; } + + /// If the number of tracked features drops below this threshold. Tracking quality is bad. + static size_t& qualityMinFts() { return getInstance().quality_min_fts; } + + /// If the number of tracked features drops below this threshold. Tracking quality is bad. (line segments) + static size_t& qualityMinFtsSegs() { return getInstance().quality_min_fts_segs; } + + /// If within one frame, this amount of features are dropped. Tracking quality is bad. + static double& qualityMaxFtsDrop() { return getInstance().quality_max_drop_fts; } + + /// If within one frame, this amount of features are dropped. Tracking quality is bad. + static double& qualityMaxFtsDropSegs() { return getInstance().quality_max_drop_fts_segs; } + +private: + Config(); + Config(Config const&); + void operator=(Config const&); + bool has_pt; + bool has_ls; + bool init_pt; + bool init_ls; + string trace_name; + string trace_dir; + size_t n_pyr_levels; + size_t n_pyr_levels_segs; // always n_pyr_levels > n_pyr_levels_segs --> TODO: check matcher.cpp + bool use_imu; + size_t core_n_kfs; + double map_scale; + size_t grid_size; + size_t grid_size_segs; + double init_min_disparity; + size_t init_min_tracked; + size_t init_min_inliers; + size_t klt_max_level; + size_t klt_min_level; + bool has_refinement; + double reproj_thresh; + double poseoptim_thresh; + size_t poseoptim_num_iter; + size_t poseoptim_num_iter_ref; + size_t structureoptim_max_pts; + size_t structureoptim_num_iter; + size_t structureoptim_max_segs; + size_t structureoptim_num_iter_segs; + double loba_thresh; + double loba_robust_huber_width; + size_t loba_num_iter; + double kfselect_mindist_t; + double kfselect_mindist_r; + double triang_min_corner_score; + double lsd_min_length; // TODO: change for some heuristic (the longest N lines or sth like that) + size_t triang_half_patch_size; + size_t subpix_n_iter; + size_t max_n_kfs; + double img_imu_delay; + size_t max_fts; + size_t max_fts_segs; // pendiente + size_t quality_min_fts; + size_t quality_min_fts_segs; //pendiente + double quality_max_drop_fts; + double quality_max_drop_fts_segs; //pendiente +}; + +} // namespace plsvo + +#endif // SVO_CONFIG_H_ diff --git a/include/plsvo/depth_filter.h b/include/plsvo/depth_filter.h new file mode 100644 index 0000000..17bce13 --- /dev/null +++ b/include/plsvo/depth_filter.h @@ -0,0 +1,234 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#ifndef SVO_DEPTH_FILTER_H_ +#define SVO_DEPTH_FILTER_H_ + +#include +#include +#include +#include +#include +#include +#include + +namespace plsvo { + +class Frame; +class Feature; +class PointFeat; +class LineFeat; +class Point; +class Line; + +/// A seed is a probabilistic depth estimate. +struct Seed +{ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + static int batch_counter; //!< static counter for Keyframe Id (shared by all seed types) + + int batch_id; //!< Batch id is the id of the keyframe for which the seed was created. + int id; //!< Seed ID, only used for visualization. + Seed(int batch_id, int id); +}; + +/// A seed for a single pixel. +struct PointSeed : public Seed +{ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + static int seed_counter; //!< static counter for the number of seeds of Point type + + PointFeat* ftr; //!< Feature in the keyframe for which the depth should be computed. + float a; //!< a of Beta distribution: When high, probability of inlier is large. + float b; //!< b of Beta distribution: When high, probability of outlier is large. + float mu; //!< Mean of normal distribution. + float z_range; //!< Max range of the possible depth. + float sigma2; //!< Variance of normal distribution. + Matrix2d patch_cov; //!< Patch covariance in reference image. + PointSeed(PointFeat* ftr, float depth_mean, float depth_min); +}; + +/// A seed for a segment. +struct LineSeed : public Seed +{ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + static int seed_counter; + + LineFeat* ftr; //!< Feature in the keyframe for which the depth should be computed. + float a; //!< a of Beta distribution: When high, probability of inlier is large. + float b; //!< b of Beta distribution: When high, probability of outlier is large. + float mu_s; //!< Mean of normal distribution (start point). + float mu_e; //!< Mean of normal distribution (end point). + float z_range_s; //!< Max range of the possible depth (start point). + float z_range_e; //!< Max range of the possible depth(end point). + float sigma2_s; //!< Variance of normal distribution (start point). + float sigma2_e; //!< Variance of normal distribution(end point). + Matrix2d patch_cov_s; //!< Patch covariance in reference image (start point). + Matrix2d patch_cov_e; //!< Patch covariance in reference image(end point). + + LineSeed(LineFeat* ftr, float depth_mean, float depth_min); +}; + +/// Depth filter implements the Bayesian Update proposed in: +/// "Video-based, Real-Time Multi View Stereo" by G. Vogiatzis and C. Hernández. +/// In Image and Vision Computing, 29(7):434-441, 2011. +/// +/// The class uses a callback mechanism such that it can be used also by other +/// algorithms than nslam and for simplified testing. +class DepthFilter +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + typedef boost::unique_lock lock_t; + typedef boost::function callback_t; + typedef boost::function callback_t_ls; + + /// Depth-filter config parameters + struct Options + { + bool check_ftr_angle; //!< gradient features are only updated if the epipolar line is orthogonal to the gradient. + bool epi_search_1d; //!< restrict Gauss Newton in the epipolar search to the epipolar line. + bool verbose; //!< display output. + bool use_photometric_disparity_error; //!< use photometric disparity error instead of 1px error in tau computation. + int max_n_kfs; //!< maximum number of keyframes for which we maintain seeds. + double sigma_i_sq; //!< image noise. + double seed_convergence_sigma2_thresh; //!< threshold on depth uncertainty for convergence. + Options() + : check_ftr_angle(false), + epi_search_1d(false), + verbose(false), + use_photometric_disparity_error(false), + max_n_kfs(3), + sigma_i_sq(5e-4), + seed_convergence_sigma2_thresh(200.0) + {} + } options_; + + DepthFilter(feature_detection::DetectorPtr pt_feature_detector, + feature_detection::DetectorPtr seg_feature_detector, + callback_t seed_converged_cb, + callback_t_ls seed_converged_cb_ls); + + virtual ~DepthFilter(); + + /// Start this thread when seed updating should be in a parallel thread. + void startThread(); + + /// Stop the parallel thread that is running. + void stopThread(); + + /// Add frame to the queue to be processed. + void addFrame(FramePtr frame); + + /// Add new keyframe to the queue + void addKeyframe(FramePtr frame, double depth_mean, double depth_min); + void addKeyframe(FramePtr frame, double depth_mean, double depth_min, cv::Mat depth_image_, bool has_lines); + + /// Remove all seeds which are initialized from the specified keyframe. This + /// function is used to make sure that no seeds points to a non-existent frame + /// when a frame is removed from the map. + void removeKeyframe(FramePtr frame); + + /// If the map is reset, call this function such that we don't have pointers + /// to old frames. + void reset(); + + /// Returns a copy of the seeds belonging to frame. Thread-safe. + /// Can be used to compute the Next-Best-View in parallel. + /// IMPORTANT! Make sure you hold a valid reference counting pointer to frame + /// so it is not being deleted while you use it. + void getSeedsCopy(const FramePtr& frame, std::list& seeds); + + /// Return a reference to the seeds. This is NOT THREAD SAFE! + std::list >& getSeeds() { return pt_seeds_; } + + /// Bayes update of the seed, x is the measurement, tau2 the measurement uncertainty + static void updatePointSeed( + const float x, + const float tau2, + PointSeed* seed); + + /// Bayes update of the seed, x is the measurement, tau2 the measurement uncertainty + static void updateLineSeed(const float x_s, + const float tau2_s, const float x_e, const float tau2_e, + LineSeed *seed); + + /// Compute the uncertainty of the measurement. + static double computeTau( + const SE3& T_ref_cur, + const Vector3d& f, + const double z, + const double px_error_angle); + + feature_detection::DetectorPtr seg_feature_detector_; + +protected: + + feature_detection::DetectorPtr pt_feature_detector_; + callback_t seed_converged_cb_; + callback_t_ls seed_converged_cb_ls_; + std::list > pt_seeds_; + std::list > seg_seeds_; + boost::mutex seeds_mut_; + bool seeds_updating_halt_; //!< Set this value to true when seeds updating should be interrupted. + boost::thread* thread_; + std::queue frame_queue_; + boost::mutex frame_queue_mut_; + boost::condition_variable frame_queue_cond_; + FramePtr new_keyframe_; //!< Next keyframe to extract new seeds. + bool new_keyframe_set_; //!< Do we have a new keyframe to process?. + double new_keyframe_min_depth_; //!< Minimum depth in the new keyframe. Used for range in new seeds. + double new_keyframe_mean_depth_; //!< Maximum depth in the new keyframe. Used for range in new seeds. + vk::PerformanceMonitor permon_; //!< Separate performance monitor since the DepthFilter runs in a parallel thread. + Matcher matcher_, matcherls_; + + /// Initialize new seeds from a frame. + void initializeSeeds(FramePtr frame); + void initializeSeeds(FramePtr frame, cv::Mat depth_image_, bool has_lines); + + /// Update all seeds with a new measurement frame (call feature-specific methods). + virtual void updateSeeds(FramePtr frame); + + /// Update the point seeds with the new measurement frame + void updatePointSeeds(FramePtr frame); + + /// Update the segment seeds with the new measurement frame + void updateLineSeeds(FramePtr frame); + + /// When a new keyframe arrives, the frame queue should be cleared. + void clearFrameQueue(); + + /// A thread that is continuously updating the seeds. + void updateSeedsLoop(); +}; + +} // namespace plsvo + +#endif // SVO_DEPTH_FILTER_H_ diff --git a/include/plsvo/feature.h b/include/plsvo/feature.h new file mode 100644 index 0000000..d650658 --- /dev/null +++ b/include/plsvo/feature.h @@ -0,0 +1,197 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#ifndef SVO_FEATURE_H_ +#define SVO_FEATURE_H_ + +#include + +namespace plsvo { + +/// A salient image region that is tracked across frames. +/// This class is abstract and should not be instantiated except for pointers +struct Feature +{ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + Frame* frame; //!< Pointer to frame in which the feature was detected. + Vector2d px; //!< Any feature must have a center or origin. Coordinates in pixels on pyramid level 0. + Vector3d f; //!< Unit-bearing vector of the feature center. + int level; //!< Image pyramid level where feature was extracted. + + Feature(const Vector2d& _px); + Feature(Frame* _frame, const Vector2d& _px, int _level); + Feature(Frame* _frame, const Vector2d& _px, const Vector3d& _f, int _level); + virtual ~Feature() = 0; // the pure virtual destructor makes this class abstract +}; +inline Feature::~Feature() {} // default destructor for inheriting classes + +/// Point Feature in 2D +struct PointFeat : public Feature +{ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + enum FeatureType { + CORNER, + EDGELET, + }; + + FeatureType type; //!< Type can be corner or edgelet. + Vector2d grad; //!< Dominant gradient direction for edglets, normalized. + + Point* feat3D; //!< Pointer to 3D point which corresponds to the point feature. + + // Constructors + PointFeat(const Vector2d& _px); + PointFeat(Frame* _frame, const Vector2d& _px, int _level); + PointFeat(Frame* _frame, const Vector2d& _px, const Vector3d& _f, int _level); + PointFeat(Frame* _frame, Point* _point, const Vector2d& _px, const Vector3d& _f, int _level); +}; + +/// Segment Feature in 2D +struct LineFeat : public Feature +{ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + enum FeatureType { + LINE_SEGMENT + }; + + FeatureType type; //!< Type can be line_segment only (for now) + Vector2d spx; //!< Start PiXel: Coordinates of start point in pixels on pyramid level 0. + Vector2d epx; //!< End PiXel: Coordinates of end point in pixels on pyramid level 0. + Vector3d sf; //!< Unit-bearing vector of the start point. + Vector3d ef; //!< Unit-bearing vector of the end point + Vector3d line; //!< Normalized vector of the line segment observation + LineSeg* feat3D; //!< Pointer to 3D line segment which corresponds to the feature. + Vector2d grad; //!< Dominant gradient direction for edgelets and segments, normalized. + double length; //!< Line segment length + double angle; + + // Constructors + LineFeat(const Vector2d& _spx, const Vector2d& _epx); + LineFeat(Frame* _frame, const Vector2d& _spx, const Vector2d& _epx, int _level); + LineFeat(Frame* _frame, const Vector2d& _spx, const Vector2d& _epx, int _level, double angle); + LineFeat(Frame* _frame, const Vector2d& _spx, const Vector2d& _epx, const Vector3d& _sf, const Vector3d& _ef, int _level); + LineFeat(Frame* _frame, LineSeg* _ls, const Vector2d& _spx, const Vector2d& _epx, const Vector3d& _sf, const Vector3d& _ef, int _level); + + // Sample points uniformly through a segment + size_t setupSampling(size_t patch_size, Vector2d& dif); +}; + +/// Patch class for a image region associated to a certain feature +struct Patch +{ + // patch geometry + int size; + int halfsize; + int area; + int border; + cv::Rect rect; // higher-level geometric object defining the patch + // this rectangle is extended to contain the subpixel rectangle (float) + + // patch position + float u_ref, v_ref; + Vector2i pos_ref_i; + int u_ref_i, v_ref_i; + // bilateral interpolation weights + float wTL, wTR, wBL, wBR; + + // image parameters + cv::Mat full_img; // the complete input image + cv::Mat roi; // the interest block in the image (Region Of Interest) + int stride; // stride to jump between row indeces in vectorized image + + // level and scale remain as sth external + Patch() {} // empty constructor + Patch( int _size, const cv::Mat& _img ); + + /// Set exact and floor position of the patch reference (center) + void setPosition( const Vector2d& px ); + /// Compute bilateral interpolation weights for a certain location in the image with subpixel precision + void computeInterpWeights(); + /// Set interface objects for the region of interest inside the image + void setRoi(); + inline bool isInFrame( int boundary=0 ) + { + // TODO: use cv::Rect to check if patch is contained? See answer http://stackoverflow.com/a/32324568 + // TODO: invert boolean operations to positive + return !( u_ref_i < boundary || v_ref_i < boundary || u_ref_i >= full_img.cols-boundary || v_ref_i >= full_img.rows-boundary ); + } + uchar* begin( int i ); + uchar* end( int i ); +}; + +struct RotatedRectPatch +{ + // patch geometry + float length; + float width; + float angle; + int area; + float bSize_x,bSize_y; + cv::RotatedRect rect; // higher-level geometric object defining the patch + // this rectangle is extended to contain the subpixel rectangle (float) + cv::Rect2f brect; // bounding rectangle aligned with axes + + // patch position + float u_ref, v_ref; + Vector2i pos_ref_i; + int u_ref_i, v_ref_i; + // bilateral interpolation weights + float wTL, wTR, wBL, wBR; + + // image parameters + cv::Mat full_img; // the complete input image + cv::Mat roi; // the interest block in the image (Region Of Interest) + int stride; // stride to jump between row indeces in vectorized image + + // level and scale remain as sth external + RotatedRectPatch() {} // empty constructor + RotatedRectPatch( float length, float width, float angle, const cv::Mat& _img ); // angle in rad for now? + + /// Set exact and floor position of the patch reference (center) + void setPosition( const Vector2d& px ); + /// Compute bilateral interpolation weights for a certain location in the image with subpixel precision + void computeInterpWeights(); + /// Set interface objects for the region of interest inside the image + void setRoi(); + inline bool isInFrame( int boundary=0 ) + { + // TODO: use cv::Rect to check if patch is contained? See answer http://stackoverflow.com/a/32324568 + // TODO: invert boolean operations to positive + return !( u_ref_i < boundary || v_ref_i < boundary || u_ref_i >= full_img.cols-boundary || v_ref_i >= full_img.rows-boundary ); + } + + float horBorderDist(float y); + uchar* begin( int i ); + uchar* end( int i ); +}; + +} // namespace plsvo + +#endif // SVO_FEATURE_H_ diff --git a/include/plsvo/feature3D.h b/include/plsvo/feature3D.h new file mode 100644 index 0000000..3bf4368 --- /dev/null +++ b/include/plsvo/feature3D.h @@ -0,0 +1,233 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#ifndef SVO_POINT_H_ +#define SVO_POINT_H_ + +#include +#include + +namespace g2o { + class VertexSBAPointXYZ; +} +typedef g2o::VertexSBAPointXYZ g2oPoint; + +namespace plsvo { + +class Feature; +class PointFeat; +class LineFeat; + +typedef Matrix Matrix23d; + +/// A generic geometric feature in 3D from which particular elements inherit (points, segments, all 3D objects) +template +class Feature3D : boost::noncopyable +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + /// Current status of the 3D object (measure of quality) + enum StatusType { + TYPE_DELETED, + TYPE_CANDIDATE, + TYPE_UNKNOWN, + TYPE_GOOD + }; + + static int counter_; //!< Counts the number of created points. Used to set the unique id. + int id_; //!< Unique ID of the 3D feature. + + list obs_; //!< References to keyframes which observe the 3D feature: 2D features derived from its observation + size_t n_obs_; //!< Number of obervations: Keyframes AND successful reprojections in intermediate frames. + + int last_published_ts_; //!< Timestamp of last publishing. + int last_projected_kf_id_; //!< Flag for the reprojection: don't reproject a 3D feature twice. + StatusType type_; //!< Quality of the 3D feature. + int n_failed_reproj_; //!< Number of failed reprojections. Used to assess the quality of the 3D feature. + int n_succeeded_reproj_; //!< Number of succeeded reprojections. Used to assess the quality of the 3D feature. + int last_structure_optim_; //!< Timestamp of last 3D feature optimization + + Feature3D(int id_); + virtual ~Feature3D() = 0; // Make this class abstract + + /// Add a reference to a frame. + void addFrameRef(FeatureT *ftr); + + /// Remove reference to a frame. + bool deleteFrameRef(Frame* frame); + + /// Check whether mappoint has reference to a frame. + Feature* findFrameRef(Frame* frame); + + /// Get Frame with similar viewpoint. + virtual bool getCloseViewObs(const Vector3d& framepos, Feature*& obs) const = 0; + + /// Optimize point position through minimizing the reprojection error. + virtual void optimize(const size_t n_iter) = 0; +}; + +// Set default trivial implementation for inheriting classes (non-abstract) +template inline Feature3D::~Feature3D() {} + +/// A 3D point on the surface of the scene. +class Point : public Feature3D +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + Vector3d pos_; //!< 3d pos of the point in the world coordinate frame. + Vector3d normal_; //!< Surface normal at point. + Matrix3d normal_information_; //!< Inverse covariance matrix of normal estimation. + bool normal_set_; //!< Flag whether the surface normal was estimated or not. + + g2oPoint* v_g2o_; //!< Temporary pointer to the point-vertex in g2o during bundle adjustment. + + Point(const Vector3d& pos); + Point(const Vector3d& pos, PointFeat *ftr); + + /// Initialize point normal. The inital estimate will point towards the frame. + void initNormal(); + + /// Get Frame with similar viewpoint. + bool getCloseViewObs(const Vector3d& framepos, Feature*& obs) const; + + /// Optimize point position through minimizing the reprojection error. + void optimize(const size_t n_iter); + + /// Get number of observations. + inline size_t nRefs() const { return obs_.size(); } + + /// Jacobian of point projection on unit plane (focal length = 1) in frame (f). + inline static void jacobian_xyz2uv( + const Vector3d& p_in_f, + const Matrix3d& R_f_w, + Matrix23d& point_jac) + { + const double z_inv = 1.0/p_in_f[2]; + const double z_inv_sq = z_inv*z_inv; + point_jac(0, 0) = z_inv; + point_jac(0, 1) = 0.0; + point_jac(0, 2) = -p_in_f[0] * z_inv_sq; + point_jac(1, 0) = 0.0; + point_jac(1, 1) = z_inv; + point_jac(1, 2) = -p_in_f[1] * z_inv_sq; + point_jac = - point_jac * R_f_w; + } +}; + +/// A 3D line segment on the surface of the scene. +class LineSeg : public Feature3D +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + Vector3d spos_; //!< 3d pos of the start point in the world coordinate frame. + Vector3d epos_; //!< 3d pos of the end point in the world coordinate frame. + + g2oPoint* v_g2o_; //!< Temporary pointer to the point-vertex in g2o during bundle adjustment. + + LineSeg(const Vector3d& spos, const Vector3d& epos); + LineSeg(const Vector3d& spos, const Vector3d& epos, LineFeat *ftr); + + /// Get Frame with similar viewpoint. + bool getCloseViewObs(const Vector3d& framepos, Feature*& obs) const; + + /// Optimize point position through minimizing the reprojection error. + void optimize(const size_t n_iter); + + /// Get number of observations. + inline size_t nRefs() const { return obs_.size(); } + + /// Jacobian of point projection on unit plane (focal length = 1) in frame (f). + inline static void jacobian_xyz2uv( + const Vector3d& p_in_f, + const Matrix3d& R_f_w, + Matrix23d& point_jac) + { + const double z_inv = 1.0/p_in_f[2]; + const double z_inv_sq = z_inv*z_inv; + point_jac(0, 0) = z_inv; + point_jac(0, 1) = 0.0; + point_jac(0, 2) = -p_in_f[0] * z_inv_sq; + point_jac(1, 0) = 0.0; + point_jac(1, 1) = z_inv; + point_jac(1, 2) = -p_in_f[1] * z_inv_sq; + point_jac = - point_jac * R_f_w; + } +}; + +/* Define template methods in parent class */ +/* --------------------------------------- */ +template +Feature3D::Feature3D(int _id) : + id_(_id), + n_obs_(0), + last_published_ts_(0), + last_projected_kf_id_(-1), + type_(TYPE_UNKNOWN), + n_failed_reproj_(0), + n_succeeded_reproj_(0), + last_structure_optim_(0) +{} + +template +int Feature3D::counter_ = 0; + +template +void Feature3D::addFrameRef(FeatureT* ftr) +{ + obs_.push_front(ftr); + ++n_obs_; +} + +template +Feature* Feature3D::findFrameRef(Frame* frame) +{ + for(auto it=obs_.begin(), ite=obs_.end(); it!=ite; ++it) + if((*it)->frame == frame) + return *it; + return NULL; // no keyframe found +} + +template +bool Feature3D::deleteFrameRef(Frame* frame) +{ + for(auto it=obs_.begin(), ite=obs_.end(); it!=ite; ++it) + { + if((*it)->frame == frame) + { + obs_.erase(it); + return true; + } + } + return false; +} + +} // namespace plsvo + +#endif // SVO_POINT_H_ diff --git a/include/plsvo/feature_alignment.h b/include/plsvo/feature_alignment.h new file mode 100644 index 0000000..a53cf75 --- /dev/null +++ b/include/plsvo/feature_alignment.h @@ -0,0 +1,74 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#ifndef SVO_FEATURE_ALIGNMENT_H_ +#define SVO_FEATURE_ALIGNMENT_H_ + +#include +#include // For Patch auxiliar class + +namespace plsvo { + +/// Subpixel refinement of a reference feature patch with the current image. +/// Implements the inverse-compositional approach (see "Lucas-Kanade 20 Years on" +/// paper by Baker. +namespace feature_alignment { + +bool align1D( + const cv::Mat& cur_img, + const Vector2f& dir, // direction in which the patch is allowed to move + uint8_t* ref_patch_with_border, + uint8_t* ref_patch, + const int n_iter, + Vector2d& cur_px_estimate, + double& h_inv); + +bool align2D( + const cv::Mat& cur_img, + uint8_t* ref_patch_with_border, + uint8_t* ref_patch, + const int n_iter, + Vector2d& cur_px_estimate, + bool no_simd = false); + +bool align2D_SSE2( + const cv::Mat& cur_img, + uint8_t* ref_patch_with_border, + uint8_t* ref_patch, + const int n_iter, + Vector2d& cur_px_estimate); + +bool align2D_NEON( + const cv::Mat& cur_img, + uint8_t* ref_patch_with_border, + uint8_t* ref_patch, + const int n_iter, + Vector2d& cur_px_estimate); + +} // namespace feature_alignment +} // namespace plsvo + +#endif // SVO_FEATURE_ALIGNMENT_H_ diff --git a/include/plsvo/feature_detection.h b/include/plsvo/feature_detection.h new file mode 100644 index 0000000..7a0fcce --- /dev/null +++ b/include/plsvo/feature_detection.h @@ -0,0 +1,184 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#ifndef SVO_FEATURE_DETECTION_H_ +#define SVO_FEATURE_DETECTION_H_ + +#include +#include +#include + +namespace plsvo { + +/// Implementation of various feature detectors. +namespace feature_detection { + +/// Temporary container used for corner detection. Features are initialized from these. +struct Corner +{ + int x; //!< x-coordinate of corner in the image. + int y; //!< y-coordinate of corner in the image. + int level; //!< pyramid level of the corner. + float score; //!< shi-tomasi score of the corner. + float angle; //!< for gradient-features: dominant gradient angle. + Corner(int x, int y, float score, int level, float angle) : + x(x), y(y), level(level), score(score), angle(angle) + {} +}; +typedef vector Corners; + +/// All detectors should derive from this abstract class. +template +class AbstractDetector +{ +public: + AbstractDetector( + const int img_width, + const int img_height, + const int cell_size, + const int n_pyr_levels); + + virtual ~AbstractDetector() {} + + virtual void detect( + Frame* frame, + const ImgPyr& img_pyr, + const double detection_threshold, + list& fts) {} + // Default method to instantiate AbstractDetector as a void detector that detects nothing + + /// Flag the grid cell as occupied + virtual void setGridOccpuancy(const FeatureT& ft) {} // Default does nothing + + /// Set grid cells of existing features as occupied + virtual void setExistingFeatures(const list& fts) {} // Default does nothing + + const int img_width_; + const int img_height_; + +protected: + + static const int border_ = 8; //!< no feature should be within 8px of border. + const int cell_size_; + const int n_pyr_levels_; + const int grid_n_cols_; + const int grid_n_rows_; + vector grid_occupancy_; + + void resetGrid(); + + inline int getCellIndex(int x, int y, int level) + { + const int scale = (1< +AbstractDetector::AbstractDetector( + const int img_width, + const int img_height, + const int cell_size, + const int n_pyr_levels) : + img_width_(img_width), + img_height_(img_height), + cell_size_(cell_size), + n_pyr_levels_(n_pyr_levels), + grid_n_cols_(ceil(static_cast(img_width)/cell_size_)), + grid_n_rows_(ceil(static_cast(img_height)/cell_size_)), + grid_occupancy_(grid_n_cols_*grid_n_rows_, false) +{} + +template +using DetectorPtr = boost::shared_ptr>; + +template +void AbstractDetector::resetGrid() +{ + std::fill(grid_occupancy_.begin(), grid_occupancy_.end(), false); +} + +/// FAST detector by Edward Rosten. +class FastDetector : public AbstractDetector +{ +public: + FastDetector( + const int img_width, + const int img_height, + const int cell_size, + const int n_pyr_levels); + + virtual ~FastDetector() {} + + virtual void detect( + Frame* frame, + const ImgPyr& img_pyr, + const double detection_threshold, + list& fts); + + virtual void setGridOccpuancy(const PointFeat& ft); + virtual void setExistingFeatures(const list& fts); +}; + +/// LSD detector from OpenCV. +class LsdDetector : public AbstractDetector +{ +public: + LsdDetector( + const int img_width, + const int img_height, + const int cell_size, + const int n_pyr_levels); + + virtual ~LsdDetector() {} + + virtual void detect( + Frame* frame, + const ImgPyr& img_pyr, + const double detection_threshold, + list& fts); + + virtual void detect( + Frame* frame, + const cv::Mat& rec_img, + const double detection_threshold, + list& fts); + + virtual void setGridOccpuancy(const LineFeat& ft); + virtual void setExistingFeatures(const list& fts); + +protected: + + void resetGridLs(){ + std::fill(grid_occupancy_.begin(), grid_occupancy_.end(), false); + } + +}; + +} // namespace feature_detection +} // namespace plsvo + +#endif // SVO_FEATURE_DETECTION_H_ diff --git a/include/plsvo/frame.h b/include/plsvo/frame.h new file mode 100644 index 0000000..fe3b392 --- /dev/null +++ b/include/plsvo/frame.h @@ -0,0 +1,176 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#ifndef SVO_FRAME_H_ +#define SVO_FRAME_H_ + +#include +#include +#include +#include +#include + +namespace g2o { +class VertexSE3Expmap; +} +typedef g2o::VertexSE3Expmap g2oFrameSE3; + +namespace plsvo { + +class Point; +class LineSeg; +struct Feature; +struct PointFeat; +struct LineFeat; + +typedef vector ImgPyr; + +/// A frame saves the image, the associated features and the estimated pose. +class Frame : boost::noncopyable +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + static int frame_counter_; //!< Counts the number of created frames. Used to set the unique id. + int id_; //!< Unique id of the frame. + double timestamp_; //!< Timestamp of when the image was recorded. + vk::AbstractCamera* cam_; //!< Camera model. + Sophus::SE3 T_f_w_; //!< Transform (f)rame from (w)orld. + Matrix Cov_; //!< Covariance. + ImgPyr img_pyr_; //!< Image Pyramid. + list pt_fts_; //!< List of point features in the image. + list seg_fts_; //!< List of segment features in the image. + vector key_pts_; //!< Five features and associated 3D points which are used to detect if two frames have overlapping field of view. + bool is_keyframe_; //!< Was this frames selected as keyframe? + g2oFrameSE3* v_kf_; //!< Temporary pointer to the g2o node object of the keyframe. + int last_published_ts_; //!< Timestamp of last publishing. + cv::Mat rec_img; + + Frame(vk::AbstractCamera* cam, const cv::Mat& img, double timestamp); + ~Frame(); + + /// Initialize new frame and create image pyramid. + void initFrame(const cv::Mat& img); + + /// Select this frame as keyframe. + void setKeyframe(); + + /// Add a feature to the image + void addFeature(PointFeat* ftr); + void addFeature(LineFeat* ftr); + + /// The KeyPoints are those five features which are closest to the 4 image corners + /// and to the center and which have a 3D point assigned. These points are used + /// to quickly check whether two frames have overlapping field of view. + void setKeyPoints(); + + /// Check if we can select five better key-points. + void checkKeyPoints(PointFeat *ftr); + + /// If a point is deleted, we must remove the corresponding key-point. + void removeKeyPoint(PointFeat* ftr); + + /// Return number of point observations. + inline size_t nObs() const { return pt_fts_.size(); } + + /// Return number of point observations. + inline size_t nLsObs() const { return seg_fts_.size(); } + + /// Check if a point in (w)orld coordinate frame is visible in the image. + bool isVisible(const Vector3d& xyz_w) const; + + /// Full resolution image stored in the frame. + inline const cv::Mat& img() const { return img_pyr_[0]; } + + /// Was this frame selected as keyframe? + inline bool isKeyframe() const { return is_keyframe_; } + + /// Transforms point coordinates in world-frame (w) to camera pixel coordinates (c). + inline Vector2d w2c(const Vector3d& xyz_w) const { return cam_->world2cam( T_f_w_ * xyz_w ); } + + /// Transforms pixel coordinates (c) to frame unit sphere coordinates (f). + inline Vector3d c2f(const Vector2d& px) const { return cam_->cam2world(px[0], px[1]); } + + /// Transforms pixel coordinates (c) to frame unit sphere coordinates (f). + inline Vector3d c2f(const double x, const double y) const { return cam_->cam2world(x, y); } + + /// Transforms point coordinates in world-frame (w) to camera-frams (f). + inline Vector3d w2f(const Vector3d& xyz_w) const { return T_f_w_ * xyz_w; } + + /// Transforms point from frame unit sphere (f) frame to world coordinate frame (w). + inline Vector3d f2w(const Vector3d& f) const { return T_f_w_.inverse() * f; } + + /// Projects Point from unit sphere (f) in camera pixels (c). + inline Vector2d f2c(const Vector3d& f) const { return cam_->world2cam( f ); } + + /// Return the pose of the frame in the (w)orld coordinate frame. + inline Vector3d pos() const { return T_f_w_.inverse().translation(); } + + /// Return the pose of the frame in the (w)orld coordinate frame. + inline SO3 rot() const { return T_f_w_.inverse().so3(); } + + /// Frame jacobian for projection of 3D point in (f)rame coordinate to + /// unit plane coordinates uv (focal length = 1). + inline static void jacobian_xyz2uv( + const Vector3d& xyz_in_f, + Matrix& J) + { + const double x = xyz_in_f[0]; + const double y = xyz_in_f[1]; + const double z_inv = 1./xyz_in_f[2]; + const double z_inv_2 = z_inv*z_inv; + + J(0,0) = -z_inv; // -1/z + J(0,1) = 0.0; // 0 + J(0,2) = x*z_inv_2; // x/z^2 + J(0,3) = y*J(0,2); // x*y/z^2 + J(0,4) = -(1.0 + x*J(0,2)); // -(1.0 + x^2/z^2) + J(0,5) = y*z_inv; // y/z + + J(1,0) = 0.0; // 0 + J(1,1) = -z_inv; // -1/z + J(1,2) = y*z_inv_2; // y/z^2 + J(1,3) = 1.0 + y*J(1,2); // 1.0 + y^2/z^2 + J(1,4) = -J(0,3); // -x*y/z^2 + J(1,5) = -x*z_inv; // x/z + } +}; + + +/// Some helper functions for the frame object. +namespace frame_utils { + +/// Creates an image pyramid of half-sampled images. +void createImgPyramid(const cv::Mat& img_level_0, int n_levels, ImgPyr& pyr); + +/// Get the average depth of the features in the image. +bool getSceneDepth(const Frame& frame, double& depth_mean, double& depth_min); + +} // namespace frame_utils +} // namespace plsvo + +#endif // SVO_FRAME_H_ diff --git a/include/plsvo/frame_handler_base.h b/include/plsvo/frame_handler_base.h new file mode 100644 index 0000000..16471f3 --- /dev/null +++ b/include/plsvo/frame_handler_base.h @@ -0,0 +1,138 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#ifndef SVO_FRAME_HANDLER_BASE_H_ +#define SVO_FRAME_HANDLER_BASE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace vk +{ +class AbstractCamera; +class PerformanceMonitor; +} + +namespace plsvo +{ +class Point; +class Matcher; +class DepthFilter; + +/// Base class for various VO pipelines. Manages the map and the state machine. +class FrameHandlerBase : boost::noncopyable +{ +public: + enum Stage { + STAGE_PAUSED, + STAGE_FIRST_FRAME, + STAGE_SECOND_FRAME, + STAGE_DEFAULT_FRAME, + STAGE_RELOCALIZING + }; + enum TrackingQuality { + TRACKING_INSUFFICIENT, + TRACKING_BAD, + TRACKING_GOOD + }; + enum UpdateResult { + RESULT_NO_KEYFRAME, + RESULT_IS_KEYFRAME, + RESULT_FAILURE + }; + + FrameHandlerBase(); + + virtual ~FrameHandlerBase(); + + /// Get the current map. + const Map& map() const { return map_; } + + /// Will reset the map as soon as the current frame is finished processing. + void reset() { set_reset_ = true; } + + /// Start processing. + void start() { set_start_ = true; } + + /// Get the current stage of the algorithm. + Stage stage() const { return stage_; } + + /// Get tracking quality. + TrackingQuality trackingQuality() const { return tracking_quality_; } + + /// Get the processing time of the previous iteration. + double lastProcessingTime() const { return timer_.getTime(); } + + /// Get the number of feature observations of the last frame. + size_t lastNumObservations() const { return num_obs_last_; } + size_t lastNumPtObservations() const { return num_obs_last_pt; } + size_t lastNumLsObservations() const { return num_obs_last_ls; } + +protected: + Stage stage_; //!< Current stage of the algorithm. + bool set_reset_; //!< Flag that the user can set. Will reset the system before the next iteration. + bool set_start_; //!< Flag the user can set to start the system when the next image is received. + Map map_; //!< Map of keyframes created by the slam system. + vk::Timer timer_; //!< Stopwatch to measure time to process frame. + vk::RingBuffer acc_frame_timings_; //!< Total processing time of the last 10 frames, used to give some user feedback on the performance. + vk::RingBuffer acc_num_obs_; //!< Number of observed features of the last 10 frames, used to give some user feedback on the tracking performance. + size_t num_obs_last_; //!< Number of observations in the previous frame. + size_t num_obs_last_pt; //!< Number of KP observations in the previous frame. + size_t num_obs_last_ls; //!< Number of LS observations in the previous frame. + TrackingQuality tracking_quality_; //!< An estimate of the tracking quality based on the number of tracked features. + + /// Before a frame is processed, this function is called. + bool startFrameProcessingCommon(const double timestamp); + + /// When a frame is finished processing, this function is called. + int finishFrameProcessingCommon( + const size_t update_id, + const UpdateResult dropout, + const size_t num_pt_observations, + const size_t num_ls_observations); + + /// Reset the map and frame handler to start from scratch. + void resetCommon(); + + /// Reset the frame handler. Implement in derived class. + virtual void resetAll() { resetCommon(); } + + /// Set the tracking quality based on the number of tracked features. + virtual void setTrackingQuality(const size_t num_observations_pt, const size_t num_observations_ls); + + /// Optimize some of the observed 3D points. + virtual void optimizeStructure(FramePtr frame, size_t max_n_pts, int max_iter, size_t max_n_segs, int max_iter_segs); +}; + +} // namespace nslam + +#endif // SVO_FRAME_HANDLER_BASE_H_ diff --git a/include/plsvo/frame_handler_mono.h b/include/plsvo/frame_handler_mono.h new file mode 100644 index 0000000..c5f0247 --- /dev/null +++ b/include/plsvo/frame_handler_mono.h @@ -0,0 +1,137 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#ifndef SVO_FRAME_HANDLER_H_ +#define SVO_FRAME_HANDLER_H_ + +#include +#include +#include +#include +#include + +namespace plsvo { + +/// Monocular Visual Odometry Pipeline as described in the SVO paper. +class FrameHandlerMono : public FrameHandlerBase +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + struct Options + { + bool has_pt; //!< use point features in the pipeline + bool has_ls; //!< use line segment features in the pipeline + Options() : + has_pt(true), + has_ls(true) + {} + Options(bool has_pt, bool has_ls) : + has_pt(has_pt), + has_ls(has_ls) + {} + } options_; + + FrameHandlerMono(vk::AbstractCamera* cam); + FrameHandlerMono(vk::AbstractCamera* cam, const Options& opts); + virtual ~FrameHandlerMono(); + + /// Auxiliar functions + cv::Mat get_debug_image() + { + return debug_img; + } + + /// Provide an image. + void addImage(const cv::Mat& img, double timestamp); + void addImage(const cv::Mat& img, double timestamp, cv::Mat& rec); + + /// Set the first frame (used for synthetic datasets in benchmark node) + void setFirstFrame(const FramePtr& first_frame); + + /// Get the last frame that has been processed. + FramePtr lastFrame() { return last_frame_; } + + /// Get the set of spatially closest keyframes of the last frame. + const set& coreKeyframes() { return core_kfs_; } + + /// Return the feature track to visualize the KLT tracking during initialization. + const vector& initFeatureTrackRefPx() const { return klt_homography_init_.px_ref_; } + const vector& initFeatureTrackCurPx() const { return klt_homography_init_.px_cur_; } + + /// Access the depth filter. + DepthFilter* depthFilter() const { return depth_filter_; } + + /// An external place recognition module may know where to relocalize. + bool relocalizeFrameAtPose( + const int keyframe_id, + const SE3& T_kf_f, + const cv::Mat& img, + const double timestamp); + +protected: + vk::AbstractCamera* cam_; //!< Camera model, can be ATAN, Pinhole or Ocam (see vikit). + Reprojector reprojector_; //!< Projects points from other keyframes into the current frame + FramePtr new_frame_; //!< Current frame. + FramePtr last_frame_; //!< Last frame, not necessarily a keyframe. + set core_kfs_; //!< Keyframes in the closer neighbourhood. + vector< pair > overlap_kfs_; //!< All keyframes with overlapping field of view. the paired number specifies how many common mappoints are observed TODO: why vector!? + initialization::KltHomographyInit klt_homography_init_; //!< Used to estimate pose of the first two keyframes by estimating a homography. + DepthFilter* depth_filter_; //!< Depth estimation algorithm runs in a parallel thread and is used to initialize new 3D points. + cv::Mat debug_img; + + /// Initialize the visual odometry algorithm. + virtual void initialize(); + + virtual void initialize(const Options opts); + + + /// Processes the first frame and sets it as a keyframe. + virtual UpdateResult processFirstFrame(); + + /// Processes all frames after the first frame until a keyframe is selected. + virtual UpdateResult processSecondFrame(); + + /// Processes all frames after the first two keyframes. + virtual UpdateResult processFrame(); + + /// Try relocalizing the frame at relative position to provided keyframe. + virtual UpdateResult relocalizeFrame( + const SE3& T_cur_ref, + FramePtr ref_keyframe); + + /// Reset the frame handler. Implement in derived class. + virtual void resetAll(); + + /// Keyframe selection criterion. + virtual bool needNewKf(double scene_depth_mean); + + void setCoreKfs(size_t n_closest); +}; + +} // namespace plsvo + +#endif // SVO_FRAME_HANDLER_H_ diff --git a/include/plsvo/global.h b/include/plsvo/global.h new file mode 100644 index 0000000..20d1ece --- /dev/null +++ b/include/plsvo/global.h @@ -0,0 +1,123 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#ifndef SVO_GLOBAL_H_ +#define SVO_GLOBAL_H_ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +// Define macro for TODO messages +// Statements like: +// #pragma message(TODO "Fix this problem!") +// Which will cause messages like: +// C:\Source\Project\main.cpp(47): TODO: Fix this problem! +// to show up during compiles. Note that you can NOT use the +// words "error" or "warning" in your reminders, since it will +// make the IDE think it should abort execution. You can double +// click on these messages and jump to the line in question. +#define Stringize( L ) #L +#define MakeString( M, L ) M(L) +#define $Line \ + MakeString( Stringize, __LINE__ ) +#define TODO \ + __FILE__ "(" $Line ") : TODO: " + +#ifdef SVO_USE_ROS + #include + #define SVO_DEBUG_STREAM(x) ROS_DEBUG_STREAM(x) + #define SVO_INFO_STREAM(x) ROS_INFO_STREAM(x) + #define SVO_WARN_STREAM(x) ROS_WARN_STREAM(x) + #define SVO_WARN_STREAM_THROTTLE(rate, x) ROS_WARN_STREAM_THROTTLE(rate, x) + #define SVO_ERROR_STREAM(x) ROS_ERROR_STREAM(x) +#else + #define SVO_INFO_STREAM(x) std::cerr<<"\033[0;0m[INFO] "< // Adapted from rosconsole. Copyright (c) 2008, Willow Garage, Inc. + #define SVO_WARN_STREAM_THROTTLE(rate, x) \ + do { \ + static double __log_stream_throttle__last_hit__ = 0.0; \ + std::chrono::time_point __log_stream_throttle__now__ = \ + std::chrono::system_clock::now(); \ + if (__log_stream_throttle__last_hit__ + rate <= \ + std::chrono::duration_cast( \ + __log_stream_throttle__now__.time_since_epoch()).count()) { \ + __log_stream_throttle__last_hit__ = \ + std::chrono::duration_cast( \ + __log_stream_throttle__now__.time_since_epoch()).count(); \ + SVO_WARN_STREAM(x); \ + } \ + } while(0) +#endif + +namespace plsvo +{ + using namespace Eigen; + using namespace Sophus; + + const double EPS = 0.0000000001; + const double PI = 3.14159265; + +#ifdef SVO_TRACE + extern vk::PerformanceMonitor* g_permon; + #define SVO_LOG(value) g_permon->log(std::string((#value)),(value)) + #define SVO_LOG2(value1, value2) SVO_LOG(value1); SVO_LOG(value2) + #define SVO_LOG3(value1, value2, value3) SVO_LOG2(value1, value2); SVO_LOG(value3) + #define SVO_LOG4(value1, value2, value3, value4) SVO_LOG2(value1, value2); SVO_LOG2(value3, value4) + #define SVO_START_TIMER(name) g_permon->startTimer((name)) + #define SVO_STOP_TIMER(name) g_permon->stopTimer((name)) +#else + #define SVO_LOG(v) + #define SVO_LOG2(v1, v2) + #define SVO_LOG3(v1, v2, v3) + #define SVO_LOG4(v1, v2, v3, v4) + #define SVO_START_TIMER(name) + #define SVO_STOP_TIMER(name) +#endif + + class Frame; + typedef boost::shared_ptr FramePtr; +} // namespace plsvo + +#endif // SVO_GLOBAL_H_ diff --git a/include/plsvo/initialization.h b/include/plsvo/initialization.h new file mode 100644 index 0000000..d255f71 --- /dev/null +++ b/include/plsvo/initialization.h @@ -0,0 +1,93 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#ifndef SVO_INITIALIZATION_H +#define SVO_INITIALIZATION_H + +#include + +namespace plsvo { + +class FrameHandlerMono; + +/// Bootstrapping the map from the first two views. +namespace initialization { + +enum InitResult { FAILURE, NO_KEYFRAME, SUCCESS }; + +/// Tracks features using Lucas-Kanade tracker and then estimates a homography. +class KltHomographyInit { + friend class plsvo::FrameHandlerMono; +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + FramePtr frame_ref_; + KltHomographyInit() {}; + ~KltHomographyInit() {}; + InitResult addFirstFrame(FramePtr frame_ref); + InitResult addSecondFrame(FramePtr frame_cur); + void reset(); + +protected: + vector px_ref_; //!< keypoints to be tracked in reference frame. + vector px_cur_; //!< tracked keypoints in current frame. + vector f_ref_; //!< bearing vectors corresponding to the keypoints in the reference image. + vector f_cur_; //!< bearing vectors corresponding to the keypoints in the current image. + vector disparities_; //!< disparity between (correctly tracked) points of first and second frame. + vector inliers_; //!< inliers after the geometric check (e.g., Homography). + vector xyz_in_cur_; //!< 3D points computed during the geometric check. + SE3 T_cur_from_ref_; //!< computed transformation between the first two frames. +}; + +/// Detect Fast corners in the image. +void detectFeatures( + FramePtr frame, + vector& px_vec, + vector& f_vec); + +/// Compute optical flow (Lucas Kanade) for selected keypoints. +void trackKlt( + FramePtr frame_ref, + FramePtr frame_cur, + vector& px_ref, + vector& px_cur, + vector& f_ref, + vector& f_cur, + vector& disparities); + +void computeHomography( + const vector& f_ref, + const vector& f_cur, + double focal_length, + double reprojection_threshold, + vector& inliers, + vector& xyz_in_cur, + SE3& T_cur_from_ref); + +} // namespace initialization +} // namespace nslam + +#endif // NSLAM_INITIALIZATION_H diff --git a/include/plsvo/map.h b/include/plsvo/map.h new file mode 100644 index 0000000..96e85db --- /dev/null +++ b/include/plsvo/map.h @@ -0,0 +1,195 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#ifndef SVO_MAP_H_ +#define SVO_MAP_H_ + +#include +#include +#include +#include + +namespace plsvo { + +class Point; +class LineSeg; +class Feature; +class PointFeat; +class LineFeat; +class Seed; +class PointSeed; +class LineSeed; + +/// Container for converged 3D points that are not already assigned to two keyframes. +class MapPointCandidates +{ +public: + typedef pair PointCandidate; + typedef list PointCandidateList; + + /// The depth-filter is running in a parallel thread and fills the canidate list. + /// This mutex controls concurrent access to point_candidates. + boost::mutex mut_; + + /// Candidate points are created from converged seeds. + /// Until the next keyframe, these points can be used for reprojection and pose optimization. + PointCandidateList candidates_; + list< Point* > trash_points_; + + MapPointCandidates(); + ~MapPointCandidates(); + + /// Add a candidate point. + void newCandidatePoint(Point* point, double depth_sigma2); + + /// Adds the feature to the frame and deletes candidate from list. + void addCandidatePointToFrame(FramePtr frame); + + /// Remove a candidate point from the list of candidates. + bool deleteCandidatePoint(Point* point); + + /// Remove all candidates that belong to a frame. + void removeFrameCandidates(FramePtr frame); + + /// Reset the candidate list, remove and delete all points. + void reset(); + + void deleteCandidate(PointCandidate& c); + + void emptyTrash(); +}; + +/// Container for converged 3D points that are not already assigned to two keyframes. +class MapSegmentCandidates +{ +public: + typedef pair SegmentCandidate; + typedef list SegmentCandidateList; + + /// The depth-filter is running in a parallel thread and fills the canidate list. + /// This mutex controls concurrent access to point_candidates. + boost::mutex mut_; + + /// Candidate points are created from converged seeds. + /// Until the next keyframe, these points can be used for reprojection and pose optimization. + SegmentCandidateList candidates_; + list< LineSeg* > trash_segments_; + + MapSegmentCandidates(); + ~MapSegmentCandidates(); + + /// Add a candidate point. + void newCandidateSegment(LineSeg* ls, double depth_sigma2_s, double depth_sigma2_e); + + /// Adds the feature to the frame and deletes candidate from list. + void addCandidateSegmentToFrame(FramePtr frame); + + /// Remove a candidate point from the list of candidates. + bool deleteCandidateSegment(LineSeg* ls); + + /// Remove all candidates that belong to a frame. + void removeFrameCandidates(FramePtr frame); + + /// Reset the candidate list, remove and delete all points. + void reset(); + + void deleteCandidate(SegmentCandidate& c); + + void emptyTrash(); +}; + +/// Map object which saves all keyframes which are in a map. +class Map : boost::noncopyable +{ +public: + list< FramePtr > keyframes_; //!< List of keyframes in the map. + list< Point* > trash_points_; //!< A deleted point is moved to the trash bin. Now and then this is cleaned. One reason is that the visualizer must remove the points also. + MapPointCandidates point_candidates_; + list< LineSeg* > trash_segments_; //!< A deleted point is moved to the trash bin. Now and then this is cleaned. One reason is that the visualizer must remove the points also. + MapSegmentCandidates segment_candidates_; + + Map(); + ~Map(); + + /// Reset the map. Delete all keyframes and reset the frame and point counters. + void reset(); + + /// Delete a point in the map and remove all references in keyframes to it. + void safeDeletePoint(Point* pt); + void safeDeleteSegment(LineSeg* ls); + + /// Moves the point to the trash queue which is cleaned now and then. + void deletePoint(Point* pt); + void deleteSegment(LineSeg* ls); + + /// Moves the frame to the trash queue which is cleaned now and then. + bool safeDeleteFrame(FramePtr frame); + + /// Remove the references between a point and a frame. + void removePtFrameRef(Frame* frame, PointFeat* ftr); + void removeLsFrameRef(Frame* frame, LineFeat* ftr); + + /// Add a new keyframe to the map. + void addKeyframe(FramePtr new_keyframe); + + /// Given a frame, return all keyframes which have an overlapping field of view. + void getCloseKeyframes(const FramePtr& frame, list< pair >& close_kfs) const; + + /// Return the keyframe which is spatially closest and has overlapping field of view. + FramePtr getClosestKeyframe(const FramePtr& frame) const; + + /// Return the keyframe which is furthest apart from pos. + FramePtr getFurthestKeyframe(const Vector3d& pos) const; + + bool getKeyframeById(const int id, FramePtr& frame) const; + + /// Transform the whole map with rotation R, translation t and scale s. + void transform(const Matrix3d& R, const Vector3d& t, const double& s); + + /// Empty trash bin of deleted keyframes and map points. We don't delete the + /// points immediately to ensure proper cleanup and to provide the visualizer + /// a list of objects which must be removed. + void emptyTrash(); + + /// Return the keyframe which was last inserted in the map. + inline FramePtr lastKeyframe() { return keyframes_.back(); } + + /// Return the number of keyframes in the map + inline size_t size() const { return keyframes_.size(); } +}; + +/// A collection of debug functions to check the data consistency. +namespace map_debug { + +void mapStatistics(Map* map); +void mapValidation(Map* map, int id); +void frameValidation(Frame* frame, int id); +void pointValidation(Point* point, int id); + +} // namespace map_debug +} // namespace plsvo + +#endif // SVO_MAP_H_ diff --git a/include/plsvo/matcher.h b/include/plsvo/matcher.h new file mode 100644 index 0000000..4713017 --- /dev/null +++ b/include/plsvo/matcher.h @@ -0,0 +1,170 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#ifndef SVO_MATCHER_H_ +#define SVO_MATCHER_H_ + +#include + +namespace vk { + class AbstractCamera; + namespace patch_score { + template class ZMSSD; + } +} + +namespace plsvo { + +class Point; +class LineSeg; +class Frame; +class Feature; +class PointFeat; +class LineFeat; + +/// Warp a patch from the reference view to the current view. +namespace warp { + +void getWarpMatrixAffine( + const vk::AbstractCamera& cam_ref, + const vk::AbstractCamera& cam_cur, + const Vector2d& px_ref, + const Vector3d& f_ref, + const double depth_ref, + const SE3& T_cur_ref, + const int level_ref, + Matrix2d& A_cur_ref); + +int getBestSearchLevel( + const Matrix2d& A_cur_ref, + const int max_level); + +void warpAffine( + const Matrix2d& A_cur_ref, + const cv::Mat& img_ref, + const Vector2d& px_ref, + const int level_ref, + const int level_cur, + const int halfpatch_size, + uint8_t* patch); + +} // namespace warp + +/// Patch-matcher for reprojection-matching and epipolar search in triangulation. +class Matcher +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + static const int halfpatch_size_ = 4; + static const int patch_size_ = 8; + + typedef vk::patch_score::ZMSSD PatchScore; + + struct Options + { + bool align_1d; //!< in epipolar search: align patch 1D along epipolar line + int align_max_iter; //!< number of iterations for aligning the feature patches in gauss newton + double max_epi_length_optim;//!< max length of epipolar line to skip epipolar search and directly go to img align + size_t max_epi_search_steps;//!< max number of evaluations along epipolar line + bool subpix_refinement; //!< do gauss newton feature patch alignment after epipolar search + bool epi_search_edgelet_filtering; + double epi_search_edgelet_max_angle; + Options() : + align_1d(false), + align_max_iter(10), + max_epi_length_optim(2.0), + max_epi_search_steps(1000), + subpix_refinement(true), + epi_search_edgelet_filtering(true), + epi_search_edgelet_max_angle(0.7) + {} + } options_; + + uint8_t patch_[patch_size_*patch_size_] __attribute__ ((aligned (16))); + uint8_t patch_with_border_[(patch_size_+2)*(patch_size_+2)] __attribute__ ((aligned (16))); + Matrix2d A_cur_ref_; //!< affine warp matrix + Vector2d epi_dir_; + double epi_length_; //!< length of epipolar line segment in pixels (only used for epipolar search) + double h_inv_; //!< hessian of 1d image alignment along epipolar line + int search_level_; + bool reject_; + Feature* ref_ftr_; + Vector2d px_cur_; + + Matcher() = default; + ~Matcher() = default; + + /// Find a match by directly applying subpix refinement. + /// IMPORTANT! This function assumes that px_cur is already set to an estimate that is within ~2-3 pixel of the final result! + bool findMatchDirect( + const Point& pt, + const Frame& frame, + Vector2d& px_cur); + + /// Find a match for a line segment by directly applying subpix refinement in the extreme points. + /// IMPORTANT! This function assumes that px_cur is already set to an estimate that is within ~2-3 pixel of the final result! + bool findMatchDirect( + const LineSeg& ls, + const Frame& frame, + Vector2d& spx_cur, + Vector2d& epx_cur); + + /// Find a match by searching along the epipolar line without using any features. + bool findEpipolarMatchDirect( + const Frame& ref_frame, + const Frame& cur_frame, + const Feature& ref_ftr, + const double d_estimate, + const double d_min, + const double d_max, + double& depth); + + /// Find a match by searching along the epipolar line without using any features. + bool findEpipolarMatchDirectSegmentEndpoint( + const Frame& ref_frame, + const Frame& cur_frame, + const Feature& ref_ftr, + const double d_estimate, + const double d_min, + const double d_max, + double& depth); + + /// Compute affine warping according to current parameters and the feature position + int precomputeRefPatch( + const Vector3d& pos, + const Vector2d& px, + const Vector3d& f, + const Frame& cur_frame); + + /// Copy inner content from patch with border to patch + void createPatchFromPatchWithBorder(); +}; + +} // namespace plsvo + +#endif // SVO_MATCHER_H_ diff --git a/include/plsvo/point.h b/include/plsvo/point.h new file mode 100644 index 0000000..b9353cc --- /dev/null +++ b/include/plsvo/point.h @@ -0,0 +1,197 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#ifndef SVO_POINT_H_ +#define SVO_POINT_H_ + +#include +#include + +namespace g2o { + class VertexSBAPointXYZ; +} +typedef g2o::VertexSBAPointXYZ g2oPoint; + +namespace svo { + +class Feature; + +typedef Matrix Matrix23d; + +/// A 3D point on the surface of the scene. +class Point : boost::noncopyable +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + enum PointType { + TYPE_DELETED, + TYPE_CANDIDATE, + TYPE_UNKNOWN, + TYPE_GOOD + }; + + static int point_counter_; //!< Counts the number of created points. Used to set the unique id. + int id_; //!< Unique ID of the point. + Vector3d pos_; //!< 3d pos of the point in the world coordinate frame. + Vector3d normal_; //!< Surface normal at point. + Matrix3d normal_information_; //!< Inverse covariance matrix of normal estimation. + bool normal_set_; //!< Flag whether the surface normal was estimated or not. + list obs_; //!< References to keyframes which observe the point. + size_t n_obs_; //!< Number of obervations: Keyframes AND successful reprojections in intermediate frames. + g2oPoint* v_pt_; //!< Temporary pointer to the point-vertex in g2o during bundle adjustment. + int last_published_ts_; //!< Timestamp of last publishing. + int last_projected_kf_id_; //!< Flag for the reprojection: don't reproject a pt twice. + PointType type_; //!< Quality of the point. + int n_failed_reproj_; //!< Number of failed reprojections. Used to assess the quality of the point. + int n_succeeded_reproj_; //!< Number of succeeded reprojections. Used to assess the quality of the point. + int last_structure_optim_; //!< Timestamp of last point optimization + + Point(const Vector3d& pos); + Point(const Vector3d& pos, Feature* ftr); + ~Point(); + + /// Add a reference to a frame. + void addFrameRef(Feature* ftr); + + /// Remove reference to a frame. + bool deleteFrameRef(Frame* frame); + + /// Initialize point normal. The inital estimate will point towards the frame. + void initNormal(); + + /// Check whether mappoint has reference to a frame. + Feature* findFrameRef(Frame* frame); + + /// Get Frame with similar viewpoint. + bool getCloseViewObs(const Vector3d& pos, Feature*& obs) const; + + /// Get number of observations. + inline size_t nRefs() const { return obs_.size(); } + + /// Optimize point position through minimizing the reprojection error. + void optimize(const size_t n_iter); + + /// Jacobian of point projection on unit plane (focal length = 1) in frame (f). + inline static void jacobian_xyz2uv( + const Vector3d& p_in_f, + const Matrix3d& R_f_w, + Matrix23d& point_jac) + { + const double z_inv = 1.0/p_in_f[2]; + const double z_inv_sq = z_inv*z_inv; + point_jac(0, 0) = z_inv; + point_jac(0, 1) = 0.0; + point_jac(0, 2) = -p_in_f[0] * z_inv_sq; + point_jac(1, 0) = 0.0; + point_jac(1, 1) = z_inv; + point_jac(1, 2) = -p_in_f[1] * z_inv_sq; + point_jac = - point_jac * R_f_w; + } +}; + +/// A 3D line on the surface of the scene. +class LineSeg : boost::noncopyable +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + enum PointType { + TYPE_DELETED, + TYPE_CANDIDATE, + TYPE_UNKNOWN, + TYPE_GOOD + }; + + static int point_counter_; //!< Counts the number of created points. Used to set the unique id. + int id_; //!< Unique ID of the point. + Vector3d spos_; //!< 3d pos of the start point in the world coordinate frame. + Vector3d epos_; //!< 3d pos of the end point in the world coordinate frame. + Vector3d normal_; //!< Surface normal at point. + Matrix3d normal_information_; //!< Inverse covariance matrix of normal estimation. + bool normal_set_; //!< Flag whether the surface normal was estimated or not. + list obs_; //!< References to keyframes which observe the point. + size_t n_obs_; //!< Number of obervations: Keyframes AND successful reprojections in intermediate frames. + g2oPoint* v_pt_; //!< Temporary pointer to the point-vertex in g2o during bundle adjustment. + int last_published_ts_; //!< Timestamp of last publishing. + int last_projected_kf_id_; //!< Flag for the reprojection: don't reproject a pt twice. + PointType type_; //!< Quality of the point. + int n_failed_reproj_; //!< Number of failed reprojections. Used to assess the quality of the point. + int n_succeeded_reproj_; //!< Number of succeeded reprojections. Used to assess the quality of the point. + int last_structure_optim_; //!< Timestamp of last point optimization + + LineSeg(const Vector3d& spos, const Vector3d& epos); + LineSeg(const Vector3d& spos, const Vector3d& epos, LineFeat *ftr); + ~LineSeg(); + + /// Add a reference to a frame. + void addFrameRef(Feature* ftr); + + /// Remove reference to a frame. + bool deleteFrameRef(Frame* frame); + + /// Initialize point normal. The inital estimate will point towards the frame. + void initNormal(); + + /// Check whether mappoint has reference to a frame. + Feature* findFrameRef(Frame* frame); + + /// Get Frame with similar viewpoint. + bool getCloseViewObs(const Vector3d& pos, Feature*& obs) const; + + /// Get number of observations. + inline size_t nRefs() const { return obs_.size(); } + + /// Optimize point position through minimizing the reprojection error. + void optimize(const size_t n_iter); + + /// Jacobian of point projection on unit plane (focal length = 1) in frame (f). + inline static void jacobian_xyz2uv( + const Vector3d& p_in_f, + const Matrix3d& R_f_w, + Matrix23d& point_jac) + { + const double z_inv = 1.0/p_in_f[2]; + const double z_inv_sq = z_inv*z_inv; + point_jac(0, 0) = z_inv; + point_jac(0, 1) = 0.0; + point_jac(0, 2) = -p_in_f[0] * z_inv_sq; + point_jac(1, 0) = 0.0; + point_jac(1, 1) = z_inv; + point_jac(1, 2) = -p_in_f[1] * z_inv_sq; + point_jac = - point_jac * R_f_w; + } +}; + + + + + + +} // namespace plsvo + +#endif // SVO_POINT_H_ diff --git a/include/plsvo/pose_optimizer.h b/include/plsvo/pose_optimizer.h new file mode 100644 index 0000000..b5dca8d --- /dev/null +++ b/include/plsvo/pose_optimizer.h @@ -0,0 +1,69 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#ifndef SVO_POSE_OPTIMIZER_H_ +#define SVO_POSE_OPTIMIZER_H_ + +#include + +namespace plsvo { + +using namespace Eigen; +using namespace Sophus; +using namespace std; + +typedef Matrix Matrix6d; +typedef Matrix Matrix26d; +typedef Matrix Vector6d; + +class Point; + +/// Motion-only bundle adjustment. Minimize the reprojection error of a single frame. +namespace pose_optimizer { + +void optimizeGaussNewton(const double reproj_thresh, + const size_t n_iter, + const bool verbose, + FramePtr& frame, + double& estimated_scale, + double& error_init, + double& error_final, + size_t& num_obs_pt, size_t &num_obs_ls); + +void optimizeGaussNewton(const double reproj_thresh, + const size_t n_iter, + const size_t n_iter_ref, + const bool verbose, + FramePtr& frame, + double& estimated_scale, + double& error_init, + double& error_final, + size_t& num_obs_pt, size_t &num_obs_ls); + +} // namespace pose_optimizer +} // namespace plsvo + +#endif // SVO_POSE_OPTIMIZER_H_ diff --git a/include/plsvo/reprojector.h b/include/plsvo/reprojector.h new file mode 100644 index 0000000..063c376 --- /dev/null +++ b/include/plsvo/reprojector.h @@ -0,0 +1,163 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#ifndef SVO_REPROJECTION_H_ +#define SVO_REPROJECTION_H_ + +#include +#include + +namespace vk { +class AbstractCamera; +} + +namespace plsvo { + +class Map; +class Point; +class LineSeg; + +/// Project points from the map into the image and find the corresponding +/// feature (corner). We don't search a match for every point but only for one +/// point per cell. Thereby, we achieve a homogeneously distributed set of +/// matched features and at the same time we can save processing time by not +/// projecting all points. +class Reprojector +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + /// Reprojector config parameters + struct Options { + size_t max_n_kfs; //!< max number of keyframes to reproject from + bool find_match_direct; + Options() + : max_n_kfs(10), + find_match_direct(true) + {} + } options_; + + size_t n_matches_; + size_t n_trials_; + size_t n_ls_matches_; + + Reprojector(vk::AbstractCamera* cam, Map& map); + + ~Reprojector(); + + /// Project points from the map into the image. + /// First find keyframes with overlapping field of view + /// and project only those map-points. + void reprojectMap( + FramePtr frame, + std::vector< std::pair >& overlap_kfs); // pairs contain Frame ptr and number of candidates found from that frame + +private: + + /// A candidate is a feature that projects into the image plane + /// and for which we will search a maching feature in the image. + // template + // struct Candidate + // { + + // }; + + struct PointCandidate { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + Point* pt; //!< 3D point. + Vector2d px; //!< projected 2D pixel location. + PointCandidate(Point* pt, Vector2d& px) : pt(pt), px(px) {} + }; + // A cell is just a list of Reprojector::Candidate + typedef std::list > Cell; + typedef std::vector CandidateGrid; + + struct LineCandidate { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + LineSeg* ls; //!< 3D point. + Vector2d spx; + Vector2d epx; + LineCandidate(LineSeg* ls, Vector2d& spx, Vector2d& epx) : ls(ls), spx(spx), epx(epx) {} + }; + /// The candidate segments are collected in a single list for the whole image. + /// There is no clear heuristic about how to discretize the image space for the segment case. + typedef std::list > LineCandidates; + typedef std::vector LineCandidateGrid; + + /// The grid stores a set of candidate matches. For every grid cell we try to find one match. + struct Grid + { + CandidateGrid cells; + vector cell_order; + int cell_size; + int grid_n_cols; + int grid_n_rows; + }; + + struct GridLs + { + LineCandidateGrid cells; + vector cell_order; + int cell_size; + int grid_n_cols; + int grid_n_rows; + }; + + Grid grid_; + GridLs gridls_; + Matcher matcher_; + Map& map_; + + // Comparison operator (candidate1 > candidate2) + // that returns quality(cand1) > quality(cand2) + static bool pointQualityComparator(PointCandidate& lhs, PointCandidate& rhs); + static bool lineQualityComparator(LineCandidate& lhs, LineCandidate& rhs); + void initializeGrid(vk::AbstractCamera* cam); + void resetReprojGrid(); + /// Get candidates for refinement from the features in an overlapping keyframe + template + int setKfCandidates(FramePtr frame, list fts); + /// Get candidates for refinement from the converged seeds in the map + template + void setMapCandidates(FramePtr frame, MapCandidatesT& map_candidates); + /// Add a new Reprojector::Candidate in the cell of the frame (if any) within which point projects + bool reproject(FramePtr frame, Point* point); + /// Add a new segment candidate to refine + bool reproject(FramePtr frame, LineSeg* segment); + /// Find highest quality candidate that matches to its closest reference feature observation + /// and adds the refined feature to input (current) frame + bool refineBestCandidate(Cell& cell, FramePtr frame); + bool refineBestCandidate(LineCandidates& cell, FramePtr frame); + /// Refine feature in frame (taking previous view as reference) +// template // TODO + bool refine(Point* pt, Vector2d& px_est, FramePtr frame); + /// Refine line segment and in case of success, add it to frame as feature + bool refine(LineSeg* pt, Vector2d& spx_est, Vector2d& epx_est, FramePtr frame); +}; + +} // namespace plsvo + +#endif // SVO_REPROJECTION_H_ diff --git a/include/plsvo/sceneRepresentation.h b/include/plsvo/sceneRepresentation.h new file mode 100644 index 0000000..fdb60d3 --- /dev/null +++ b/include/plsvo/sceneRepresentation.h @@ -0,0 +1,137 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#include +#include + +#ifdef HAS_MRPT +#include +#include +#include +#include +using namespace mrpt::math; +using namespace mrpt::poses; +using namespace mrpt::opengl; +using namespace mrpt::gui; +using namespace mrpt::utils; +#endif +#include +#include + +using namespace std; +using namespace Eigen; +using namespace cv; + +// Auxiliar functions +template +std::string to_string_with_precision(const T a_value, const int n = 6) +{ + std::ostringstream out; + out << std::setprecision(n) << a_value; + return out.str(); +} + +class sceneRepresentation{ + +public: + + sceneRepresentation(); + sceneRepresentation(string configFile); + ~sceneRepresentation(); + void initialize3DScene(Matrix x_0); + void initialize3DSceneLines(Matrix x_0); + void initialize3DSceneImg(Matrix x_0); + void initialize3DSceneGT(Matrix x_0); + + void initializeScene(Matrix x_0); + void initializeScene(Matrix x_0, Matrix x_0gt); + + bool updateScene(); + + void setText(int frame_, float time_, int nPoints_, int nPointsH_=0, int nLines_=0, int nLinesH_=0); + void setPose(Matrix x_); + void setGT(Matrix xgt_); + void setComparison(Matrix xcomp_); + void setImage(Mat image_); + void setImage(string image_); + void setLegend(); + void setHelp(); + void setPoints(CMatrixFloat pData_); + void setLines(CMatrixFloat lData_); + + void setPointsSVO(vector< Matrix > pointsSVO_); + void setLinesSVO( vector< Matrix > linesSVO_); + + void setStereoCalibration(Matrix3f K_, float b_); + + + bool waitUntilClose(); + bool isOpen(); + void getYPR(float &yaw, float &pitch, float &roll); + void getPose(Matrix &T); + + CImage img_mrpt_legend, img_mrpt_image, img_mrpt_help; + bool hasGT; + +private: + + CMatrixDouble getPoseFormat(Matrix T); + CMatrixDouble33 getCovFormat(MatrixXf cov_); + CPose3D getPoseXYZ(VectorXf x); + + CDisplayWindow3D* win; + COpenGLScenePtr theScene; + COpenGLViewportPtr image, legend, help; + CSetOfObjectsPtr srefObj, srefObj1, gtObj, srefObjGT, elliObjL, elliObjP; + CEllipsoidPtr elliObj; + CSetOfLinesPtr lineObj; + CPointCloudPtr pointObj; + CFrustumPtr frustObj, frustObj1, bbObj, bbObj1; + CAxisPtr axesObj; + + vector< Matrix > pointsSVO; + vector< Matrix > linesSVO; + + float sbb, saxis, srad, sref, sline, sfreq, szoom, selli, selev, sazim, sfrust, slinef; + CVectorDouble v_aux, v_aux_, v_aux1, v_aux1_, v_auxgt, gt_aux_, v_auxgt_; + CPose3D pose, pose_0, pose_gt, pose_ini, ellPose, pose1, change, frustumL_, frustumR_; + Matrix x_ini; + mrptKeyModifier kmods; + int key; + CMatrixDouble33 cov3D; + bool hasText, hasCov, hasChange, hasImg, hasLines, hasPoints, hasFrustum, hasComparison, hasLegend, hasHelp, hasAxes, hasTraj, isKitti; + + Matrix x, xgt, xcomp; + MatrixXf cov, W; + unsigned int frame, nPoints, nPointsH, nLines, nLinesH; + float time; + string img, img_legend, img_help; + CMatrixFloat lData, pData; + + float b, sigmaP, sigmaL, f, cx, cy, bsigmaL, bsigmaP; + +}; + diff --git a/include/plsvo/sparse_img_align.h b/include/plsvo/sparse_img_align.h new file mode 100644 index 0000000..ebaddf6 --- /dev/null +++ b/include/plsvo/sparse_img_align.h @@ -0,0 +1,121 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#ifndef SVO_SPARSE_IMG_ALIGN_H_ +#define SVO_SPARSE_IMG_ALIGN_H_ + +#include +#include +#include + +namespace vk { +class AbstractCamera; +} + +namespace plsvo { + +class Feature; +class PointFeat; +class LineFeat; + +/// Optimize the pose of the frame by minimizing the photometric error of feature patches. +class SparseImgAlign : public vk::NLLSSolver<6, SE3> +{ + static const int patch_halfsize_ = 2; + static const int patch_size_ = 2*patch_halfsize_; + static const int patch_area_ = patch_size_*patch_size_; +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + cv::Mat resimg_; + + SparseImgAlign( + int n_levels, + int min_level, + int n_iter, + Method method, + bool display, + bool verbose); + + size_t run( + FramePtr ref_frame, + FramePtr cur_frame); + + /// Return fisher information matrix, i.e. the Hessian of the log-likelihood + /// at the converged state. + Matrix getFisherInformation(); + +protected: + FramePtr ref_frame_; //!< reference frame, has depth for gradient pixels. + FramePtr cur_frame_; //!< only the image is known! + int level_; //!< current pyramid level on which the optimization runs. + bool display_; //!< display residual image. + int max_level_; //!< coarsest pyramid level for the alignment. + int min_level_; //!< finest pyramid level for the alignment. + double scale_ls, scale_pt; + + // cache: + /// Cached values for all pixels corresponding to a list of feature-patches + struct Cache + { + Matrix jacobian; // cached jacobian + cv::Mat ref_patch; // cached patch intensity values (with subpixel precision) + std::vector visible_fts; // mask of visible features + Cache() {} // default constructor + Cache( size_t num_fts, int patch_area ) // constructor + { + // resize cache variables according to the maximum number of incoming features + ref_patch = cv::Mat(num_fts, patch_area, CV_32F); + jacobian.resize(Eigen::NoChange, num_fts*patch_area); + visible_fts.resize(num_fts, false); // TODO: should it be reset at each level? + } + }; + Cache pt_cache_; + Cache seg_cache_; + std::vector patch_offset; // offset for the segment cache + bool have_ref_patch_cache_; // flag to avoid recomputing cache + + void precomputeReferencePatches(); + void precomputeGaussNewtonParamsPoints(Cache &cache, list &fts); + void precomputeGaussNewtonParamsSegments(Cache &cache, list &fts); + virtual double computeResiduals(const SE3& model, bool linearize_system, bool compute_weight_scale = true); + void computeGaussNewtonParamsPoints( + const SE3 &T_cur_from_ref, bool linearize_system, bool compute_weight_scale, + Cache &cache, list &fts, Matrix &H, Matrix &Jres, + std::vector &errors, float &chi2); + void computeGaussNewtonParamsSegments(const SE3 &T_cur_from_ref, bool linearize_system, bool compute_weight_scale, + Cache &cache, list &fts, Matrix &H, Matrix &Jres, + std::vector &errors, float &chi2); + virtual int solve(); + virtual void update (const ModelType& old_model, ModelType& new_model); + virtual void startIteration(); + virtual void finishIteration(); +}; + +} // namespace plsvo + +#endif // SVO_SPARSE_IMG_ALIGN_H_ diff --git a/src/bundle_adjustment.cpp b/src/bundle_adjustment.cpp new file mode 100644 index 0000000..1df3d95 --- /dev/null +++ b/src/bundle_adjustment.cpp @@ -0,0 +1,442 @@ +// This file is part of SVO - Semi-direct Visual Odometry. +// +// Copyright (C) 2014 Christian Forster +// (Robotics and Perception Group, University of Zurich, Switzerland). +// +// SVO is free software: you can redistribute it and/or modify it under the +// terms of the GNU General Public License as published by the Free Software +// Foundation, either version 3 of the License, or any later version. +// +// SVO is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SCHUR_TRICK 1 + +namespace svo { +namespace ba { + +void twoViewBA( + Frame* frame1, + Frame* frame2, + double reproj_thresh, + Map* map) +{ + // scale reprojection threshold in pixels to unit plane + reproj_thresh /= frame1->cam_->errorMultiplier2(); + + // init g2o + g2o::SparseOptimizer optimizer; + setupG2o(&optimizer); + + list edges; + size_t v_id = 0; + + // New Keyframe Vertex 1: This Keyframe is set to fixed! + g2oFrameSE3* v_frame1 = createG2oFrameSE3(frame1, v_id++, true); + optimizer.addVertex(v_frame1); + + // New Keyframe Vertex 2 + g2oFrameSE3* v_frame2 = createG2oFrameSE3(frame2, v_id++, false); + optimizer.addVertex(v_frame2); + + // Create Point Vertices + for(list::iterator it_ftr=frame1->pt_fts_.begin(); it_ftr!=frame1->pt_fts_.end(); ++it_ftr) + { + Point* pt = (*it_ftr)->feat3D; + if(pt == NULL) + continue; + g2oPoint* v_pt = createG2oPoint(pt->pos_, v_id++, false); + optimizer.addVertex(v_pt); + pt->v_g2o_ = v_pt; + g2oEdgeSE3* e = createG2oEdgeSE3(v_frame1, v_pt, vk::project2d((*it_ftr)->f), true, reproj_thresh*Config::lobaRobustHuberWidth()); + optimizer.addEdge(e); + edges.push_back(EdgeContainerSE3(e, frame1, *it_ftr)); // TODO feature now links to frame, so we can simplify edge container! + + // find at which index the second frame observes the point + Feature* ftr_frame2 = pt->findFrameRef(frame2); + e = createG2oEdgeSE3(v_frame2, v_pt, vk::project2d(ftr_frame2->f), true, reproj_thresh*Config::lobaRobustHuberWidth()); + optimizer.addEdge(e); + edges.push_back(EdgeContainerSE3(e, frame2, ftr_frame2)); + } + + // Optimization + double init_error, final_error; + runSparseBAOptimizer(&optimizer, Config::lobaNumIter(), init_error, final_error); + printf("2-View BA: Error before/after = %f / %f\n", init_error, final_error); + + // Update Keyframe Positions + frame1->T_f_w_.rotation_matrix() = v_frame1->estimate().rotation().toRotationMatrix(); + frame1->T_f_w_.translation() = v_frame1->estimate().translation(); + frame2->T_f_w_.rotation_matrix() = v_frame2->estimate().rotation().toRotationMatrix(); + frame2->T_f_w_.translation() = v_frame2->estimate().translation(); + + // Update Mappoint Positions + for(list::iterator it=frame1->pt_fts_.begin(); it!=frame1->pt_fts_.end(); ++it) + { + if((*it)->feat3D == NULL) + continue; + (*it)->feat3D->pos_ = (*it)->feat3D->v_g2o_->estimate(); + (*it)->feat3D->v_g2o_ = NULL; + } + + // Find Mappoints with too large reprojection error + const double reproj_thresh_squared = reproj_thresh*reproj_thresh; + size_t n_incorrect_edges = 0; + for(list::iterator it_e = edges.begin(); it_e != edges.end(); ++it_e) + if(it_e->edge->chi2() > reproj_thresh_squared) + { + PointFeat* it_aux = static_cast(it_e->feature); + if(it_aux->feat3D != NULL) + { + map->safeDeletePoint(it_aux->feat3D); + it_aux->feat3D = NULL; + } + ++n_incorrect_edges; + } + + printf("2-View BA: Wrong edges = %zu\n", n_incorrect_edges); +} + +void localBA( + Frame* center_kf, + set* core_kfs, + Map* map, + size_t& n_incorrect_edges_1, + size_t& n_incorrect_edges_2, + double& init_error, + double& final_error) +{ + + // init g2o + g2o::SparseOptimizer optimizer; + setupG2o(&optimizer); + + list edges; + set mps; + list neib_kfs; + size_t v_id = 0; + size_t n_mps = 0; + size_t n_fix_kfs = 0; + size_t n_var_kfs = 1; + size_t n_edges = 0; + n_incorrect_edges_1 = 0; + n_incorrect_edges_2 = 0; + + // Add all core keyframes + for(set::iterator it_kf = core_kfs->begin(); it_kf != core_kfs->end(); ++it_kf) + { + g2oFrameSE3* v_kf = createG2oFrameSE3(it_kf->get(), v_id++, false); + (*it_kf)->v_kf_ = v_kf; + ++n_var_kfs; + assert(optimizer.addVertex(v_kf)); + + // all points that the core keyframes observe are also optimized: + for(list::iterator it_pt=(*it_kf)->pt_fts_.begin(); it_pt!=(*it_kf)->pt_fts_.end(); ++it_pt) + if((*it_pt)->feat3D != NULL) + mps.insert((*it_pt)->feat3D); + } + + // Now go throug all the points and add a measurement. Add a fixed neighbour + // Keyframe if it is not in the set of core kfs + double reproj_thresh_2 = Config::lobaThresh() / center_kf->cam_->errorMultiplier2(); + double reproj_thresh_1 = Config::poseOptimThresh() / center_kf->cam_->errorMultiplier2(); + double reproj_thresh_1_squared = reproj_thresh_1*reproj_thresh_1; + for(set::iterator it_pt = mps.begin(); it_pt!=mps.end(); ++it_pt) + { + // Create point vertex + g2oPoint* v_pt = createG2oPoint((*it_pt)->pos_, v_id++, false); + (*it_pt)->v_g2o_ = v_pt; + assert(optimizer.addVertex(v_pt)); + ++n_mps; + + // Add edges + list::iterator it_obs=(*it_pt)->obs_.begin(); + while(it_obs!=(*it_pt)->obs_.end()) + { + Vector2d error = vk::project2d((*it_obs)->f) - vk::project2d((*it_obs)->frame->w2f((*it_pt)->pos_)); + + if((*it_obs)->frame->v_kf_ == NULL) + { + // frame does not have a vertex yet -> it belongs to the neib kfs and + // is fixed. create one: + g2oFrameSE3* v_kf = createG2oFrameSE3((*it_obs)->frame, v_id++, true); + (*it_obs)->frame->v_kf_ = v_kf; + ++n_fix_kfs; + assert(optimizer.addVertex(v_kf)); + neib_kfs.push_back((*it_obs)->frame); + } + + // create edge + g2oEdgeSE3* e = createG2oEdgeSE3((*it_obs)->frame->v_kf_, v_pt, + vk::project2d((*it_obs)->f), + true, + reproj_thresh_2*Config::lobaRobustHuberWidth(), + 1.0 / (1<<(*it_obs)->level)); + assert(optimizer.addEdge(e)); + edges.push_back(EdgeContainerSE3(e, (*it_obs)->frame, *it_obs)); + ++n_edges; + ++it_obs; + } + } + + // structure only + g2o::StructureOnlySolver<3> structure_only_ba; + g2o::OptimizableGraph::VertexContainer points; + for (g2o::OptimizableGraph::VertexIDMap::const_iterator it = optimizer.vertices().begin(); it != optimizer.vertices().end(); ++it) + { + g2o::OptimizableGraph::Vertex* v = static_cast(it->second); + if (v->dimension() == 3 && v->edges().size() >= 2) + points.push_back(v); + } + structure_only_ba.calc(points, 10); + + // Optimization + if(Config::lobaNumIter() > 0) + runSparseBAOptimizer(&optimizer, Config::lobaNumIter(), init_error, final_error); + + // Update Keyframes + for(set::iterator it = core_kfs->begin(); it != core_kfs->end(); ++it) + { + (*it)->T_f_w_ = SE3( (*it)->v_kf_->estimate().rotation(), + (*it)->v_kf_->estimate().translation()); + (*it)->v_kf_ = NULL; + } + + for(list::iterator it = neib_kfs.begin(); it != neib_kfs.end(); ++it) + (*it)->v_kf_ = NULL; + + // Update Mappoints + for(set::iterator it = mps.begin(); it != mps.end(); ++it) + { + (*it)->pos_ = (*it)->v_g2o_->estimate(); + (*it)->v_g2o_ = NULL; + } + + // Remove Measurements with too large reprojection error + double reproj_thresh_2_squared = reproj_thresh_2*reproj_thresh_2; + for(list::iterator it = edges.begin(); it != edges.end(); ++it) + { + //PointFeat* it_aux = static_cast(it_e->feature); + if(it->edge->chi2() > reproj_thresh_2_squared) //*(1<feature_->level)) + { + PointFeat* feat_aux = static_cast(it->feature); + map->removePtFrameRef(it->frame, feat_aux); + ++n_incorrect_edges_2; + } + } + + // TODO: delete points and edges! + init_error = sqrt(init_error)*center_kf->cam_->errorMultiplier2(); + final_error = sqrt(final_error)*center_kf->cam_->errorMultiplier2(); +} + +void globalBA(Map* map) +{ + // init g2o + g2o::SparseOptimizer optimizer; + setupG2o(&optimizer); + + list edges; + list< pair > incorrect_edges; + + // Go through all Keyframes + size_t v_id = 0; + double reproj_thresh_2 = Config::lobaThresh() / map->lastKeyframe()->cam_->errorMultiplier2(); + double reproj_thresh_1_squared = Config::poseOptimThresh() / map->lastKeyframe()->cam_->errorMultiplier2(); + reproj_thresh_1_squared *= reproj_thresh_1_squared; + for(list::iterator it_kf = map->keyframes_.begin(); + it_kf != map->keyframes_.end(); ++it_kf) + { + // New Keyframe Vertex + g2oFrameSE3* v_kf = createG2oFrameSE3(it_kf->get(), v_id++, false); + (*it_kf)->v_kf_ = v_kf; + optimizer.addVertex(v_kf); + for(list::iterator it_ftr=(*it_kf)->pt_fts_.begin(); it_ftr!=(*it_kf)->pt_fts_.end(); ++it_ftr) + { + // for each keyframe add edges to all observed mapoints + Point* mp = (*it_ftr)->feat3D; + if(mp == NULL) + continue; + g2oPoint* v_mp = mp->v_g2o_; + if(v_mp == NULL) + { + // mappoint-vertex doesn't exist yet. create a new one: + v_mp = createG2oPoint(mp->pos_, v_id++, false); + mp->v_g2o_ = v_mp; + optimizer.addVertex(v_mp); + } + + // Due to merging of mappoints it is possible that references in kfs suddenly + // have a very large reprojection error which may result in distorted results. + Vector2d error = vk::project2d((*it_ftr)->f) - vk::project2d((*it_kf)->w2f(mp->pos_)); + if(error.squaredNorm() > reproj_thresh_1_squared) + incorrect_edges.push_back(pair(*it_kf, *it_ftr)); + else + { + g2oEdgeSE3* e = createG2oEdgeSE3(v_kf, v_mp, vk::project2d((*it_ftr)->f), + true, + reproj_thresh_2*Config::lobaRobustHuberWidth()); + + edges.push_back(EdgeContainerSE3(e, it_kf->get(), *it_ftr)); + optimizer.addEdge(e); + } + } + } + + // Optimization + double init_error=0.0, final_error=0.0; + if(Config::lobaNumIter() > 0) + runSparseBAOptimizer(&optimizer, Config::lobaNumIter(), init_error, final_error); + + // Update Keyframe and MapPoint Positions + for(list::iterator it_kf = map->keyframes_.begin(); + it_kf != map->keyframes_.end(); ++it_kf) + { + (*it_kf)->T_f_w_ = SE3( (*it_kf)->v_kf_->estimate().rotation(), + (*it_kf)->v_kf_->estimate().translation()); + (*it_kf)->v_kf_ = NULL; + for(list::iterator it_ftr=(*it_kf)->pt_fts_.begin(); it_ftr!=(*it_kf)->pt_fts_.end(); ++it_ftr) + { + Point* mp = (*it_ftr)->feat3D; + if(mp == NULL) + continue; + if(mp->v_g2o_ == NULL) + continue; // mp was updated before + mp->pos_ = mp->v_g2o_->estimate(); + mp->v_g2o_ = NULL; + } + } + + // Remove Measurements with too large reprojection error + for(list< pair >::iterator it=incorrect_edges.begin(); it!=incorrect_edges.end(); ++it) + { + PointFeat* feat_aux = static_cast(it->second); + map->removePtFrameRef(it->first.get(), feat_aux); + } + + double reproj_thresh_2_squared = reproj_thresh_2*reproj_thresh_2; + for(list::iterator it = edges.begin(); it != edges.end(); ++it) + { + if(it->edge->chi2() > reproj_thresh_2_squared) + { + PointFeat* feat_aux = static_cast(it->feature); + map->removePtFrameRef(it->frame, feat_aux); + } + } +} + +void setupG2o(g2o::SparseOptimizer * optimizer) +{ + // TODO: What's happening with all this HEAP stuff? Memory Leak? + optimizer->setVerbose(false); + +#if SCHUR_TRICK + // solver + g2o::BlockSolver_6_3::LinearSolverType* linearSolver; + linearSolver = new g2o::LinearSolverCholmod(); + //linearSolver = new g2o::LinearSolverCSparse(); + + g2o::BlockSolver_6_3 * solver_ptr = new g2o::BlockSolver_6_3(linearSolver); + g2o::OptimizationAlgorithmLevenberg * solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr); +#else + g2o::BlockSolverX::LinearSolverType * linearSolver; + linearSolver = new g2o::LinearSolverCholmod(); + //linearSolver = new g2o::LinearSolverCSparse(); + g2o::BlockSolverX * solver_ptr = new g2o::BlockSolverX(linearSolver); + g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr); +#endif + + solver->setMaxTrialsAfterFailure(5); + optimizer->setAlgorithm(solver); + + // setup camera + g2o::CameraParameters * cam_params = new g2o::CameraParameters(1.0, Vector2d(0.,0.), 0.); + cam_params->setId(0); + if (!optimizer->addParameter(cam_params)) { + assert(false); + } +} + +void +runSparseBAOptimizer(g2o::SparseOptimizer* optimizer, + unsigned int num_iter, + double& init_error, double& final_error) +{ + optimizer->initializeOptimization(); + optimizer->computeActiveErrors(); + init_error = optimizer->activeChi2(); + optimizer->optimize(num_iter); + final_error = optimizer->activeChi2(); +} + +g2oFrameSE3* +createG2oFrameSE3(Frame* frame, size_t id, bool fixed) +{ + g2oFrameSE3* v = new g2oFrameSE3(); + v->setId(id); + v->setFixed(fixed); + v->setEstimate(g2o::SE3Quat(frame->T_f_w_.unit_quaternion(), frame->T_f_w_.translation())); + return v; +} + +g2oPoint* +createG2oPoint(Vector3d pos, + size_t id, + bool fixed) +{ + g2oPoint* v = new g2oPoint(); + v->setId(id); +#if SCHUR_TRICK + v->setMarginalized(true); +#endif + v->setFixed(fixed); + v->setEstimate(pos); + return v; +} + +g2oEdgeSE3* +createG2oEdgeSE3( g2oFrameSE3* v_frame, + g2oPoint* v_point, + const Vector2d& f_up, + bool robust_kernel, + double huber_width, + double weight) +{ + g2oEdgeSE3* e = new g2oEdgeSE3(); + e->setVertex(0, dynamic_cast(v_point)); + e->setVertex(1, dynamic_cast(v_frame)); + e->setMeasurement(f_up); + e->information() = weight * Eigen::Matrix2d::Identity(2,2); + g2o::RobustKernelHuber* rk = new g2o::RobustKernelHuber(); // TODO: memory leak + rk->setDelta(huber_width); + e->setRobustKernel(rk); + e->setParameterId(0, 0); //old: e->setId(v_point->id()); + return e; +} + +} // namespace ba +} // namespace svo diff --git a/src/config.cpp b/src/config.cpp new file mode 100644 index 0000000..8fc6396 --- /dev/null +++ b/src/config.cpp @@ -0,0 +1,136 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#ifdef SVO_USE_ROS +#include +#endif +#include + +namespace plsvo { + +Config::Config() : +#ifdef SVO_USE_ROS + has_pt(vk::getParam( "plsvo/has_pt",true)), + has_ls(vk::getParam( "plsvo/has_ls",true)), + init_pt(vk::getParam( "plsvo/init_pt",true)), + init_ls(vk::getParam( "plsvo/init_ls",true)), + use_imu(vk::getParam( "plsvo/use_imu",false)), + trace_name(vk::getParam( "plsvo/trace_name","svo")), + trace_dir(vk::getParam( "plsvo/trace_dir","/tmp")), + n_pyr_levels(vk::getParam( "plsvo/n_pyr_levels",3)), + n_pyr_levels_segs(vk::getParam( "plsvo/n_pyr_levels_segs",1)), + core_n_kfs(vk::getParam( "plsvo/core_n_kfs",5)), + map_scale(vk::getParam( "plsvo/map_scale",1.0)), + grid_size(vk::getParam( "plsvo/grid_size",25)), + grid_size_segs(vk::getParam( "plsvo/grid_size_segs",25)), + init_min_disparity(vk::getParam( "plsvo/init_min_disparity",40.0)), + init_min_tracked(vk::getParam( "plsvo/init_min_tracked",40)), + init_min_inliers(vk::getParam( "plsvo/init_min_inliers",30)), + klt_max_level(vk::getParam( "plsvo/klt_max_level",4)), + klt_min_level(vk::getParam( "plsvo/klt_min_level",2)), + has_refinement(vk::getParam( "plsvo/has_refinement",true)), + reproj_thresh(vk::getParam( "plsvo/reproj_thresh",2.0)), + poseoptim_thresh(vk::getParam( "plsvo/poseoptim_thresh",2.0)), + poseoptim_num_iter(vk::getParam( "plsvo/poseoptim_num_iter",10)), + poseoptim_num_iter_ref(vk::getParam( "plsvo/poseoptim_num_iter_ref",3)), + structureoptim_max_pts(vk::getParam( "plsvo/structureoptim_max_pts",20)), + structureoptim_num_iter(vk::getParam( "plsvo/structureoptim_num_iter",5)), + structureoptim_max_segs(vk::getParam( "plsvo/structureoptim_max_segs",20)), + structureoptim_num_iter_segs(vk::getParam( "plsvo/structureoptim_num_iter_segs",5)), + loba_thresh(vk::getParam( "plsvo/loba_thresh",2.0)), + loba_robust_huber_width(vk::getParam( "plsvo/loba_robust_huber_width",1.0)), + loba_num_iter(vk::getParam( "plsvo/loba_num_iter",0)), + kfselect_mindist_t(vk::getParam( "plsvo/kfselect_mindist_t",0.06)), + kfselect_mindist_r(vk::getParam( "plsvo/kfselect_mindist_r",3.0)), + triang_min_corner_score(vk::getParam( "plsvo/triang_min_corner_score",20.0)), + lsd_min_length(vk::getParam( "plsvo/lsd_min_length",0.15)), + img_imu_delay(vk::getParam( "plsvo/img_imu_delay",0.0)), + triang_half_patch_size(vk::getParam( "plsvo/triang_half_patch_size",4)), + subpix_n_iter(vk::getParam( "plsvo/subpix_n_iter",10)), + max_n_kfs(vk::getParam( "plsvo/max_n_kfs",0)), + max_fts(vk::getParam( "plsvo/max_fts",100)), + max_fts_segs(vk::getParam( "plsvo/max_fts_segs",100)), + quality_min_fts(vk::getParam( "plsvo/quality_min_fts",20)), + quality_max_drop_fts(vk::getParam( "plsvo/quality_max_drop_fts",50)), + quality_min_fts_segs(vk::getParam( "plsvo/quality_min_fts_segs",20)), + quality_max_drop_fts_segs(vk::getParam( "plsvo/quality_max_drop_fts_segs",20)) +#else + has_pt(true), + has_ls(true), + init_pt(true), + init_ls(true), + trace_name("svo"), + trace_dir("/tmp"), + n_pyr_levels(3), + n_pyr_levels_segs(1), + use_imu(false), + core_n_kfs(5), + map_scale(1.0), + grid_size(25), + grid_size_segs(25), + init_min_disparity(40.0), + init_min_tracked(40), + init_min_inliers(30), + klt_max_level(4), + klt_min_level(2), + has_refinement(true), + reproj_thresh(2.0), + poseoptim_thresh(2.0), + poseoptim_num_iter(10), + poseoptim_num_iter_ref(3), + structureoptim_max_pts(20), + structureoptim_num_iter(5), + structureoptim_max_segs(20), + structureoptim_num_iter_segs(5), + loba_thresh(2.0), + loba_robust_huber_width(1.0), + loba_num_iter(0), + kfselect_mindist_t(0.06), + kfselect_mindist_r(3.0), + triang_min_corner_score(20.0), + lsd_min_length(0.15), + triang_half_patch_size(4), + subpix_n_iter(10), + max_n_kfs(0), + img_imu_delay(0.0), + max_fts(100), + max_fts_segs(100), + quality_min_fts(20), + quality_max_drop_fts(50), + quality_min_fts_segs(20), + quality_max_drop_fts_segs(50) +#endif +{} + +Config& Config::getInstance() +{ + static Config instance; // Instantiated on first use and guaranteed to be destroyed + return instance; +} + +} // namespace plsvo + diff --git a/src/depth_filter.cpp b/src/depth_filter.cpp new file mode 100644 index 0000000..bdbde1b --- /dev/null +++ b/src/depth_filter.cpp @@ -0,0 +1,586 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace plsvo { + +int Seed::batch_counter = 0; +int PointSeed::seed_counter = 0; +int LineSeed::seed_counter = 0; + +Seed::Seed(int _batch_id, int _id) : + batch_id(_batch_id), + id(_id) +{} + +PointSeed::PointSeed(PointFeat* ftr, float depth_mean, float depth_min) : + Seed(batch_counter,seed_counter++), + ftr(ftr), + a(10), + b(10), + mu(1.0/depth_mean), + z_range(1.0/depth_min), + sigma2(z_range*z_range/36) +{} + +LineSeed::LineSeed(LineFeat *ftr, float depth_mean, float depth_min) : + Seed(batch_counter,seed_counter++), + ftr(ftr), + a(10), + b(10), + mu_s(1.0/depth_mean), + mu_e(1.0/depth_mean), + z_range_s(1.0/depth_min), + z_range_e(1.0/depth_min), + sigma2_s(z_range_s*z_range_s/36), + sigma2_e(z_range_e*z_range_e/36) +{} + +DepthFilter::DepthFilter( + feature_detection::DetectorPtr pt_feature_detector, + feature_detection::DetectorPtr seg_feature_detector, + callback_t seed_converged_cb, + callback_t_ls seed_converged_cb_ls) : + pt_feature_detector_(pt_feature_detector), + seg_feature_detector_(seg_feature_detector), + seed_converged_cb_(seed_converged_cb), + seed_converged_cb_ls_(seed_converged_cb_ls), + seeds_updating_halt_(false), + thread_(NULL), + new_keyframe_set_(false), + new_keyframe_min_depth_(0.0), + new_keyframe_mean_depth_(0.0) +{} + +DepthFilter::~DepthFilter() +{ + stopThread(); + SVO_INFO_STREAM("DepthFilter destructed."); +} + +void DepthFilter::startThread() +{ + thread_ = new boost::thread(&DepthFilter::updateSeedsLoop, this); +} + +void DepthFilter::stopThread() +{ + SVO_INFO_STREAM("DepthFilter stop thread invoked."); + if(thread_ != NULL) + { + SVO_INFO_STREAM("DepthFilter interrupt and join thread... "); + seeds_updating_halt_ = true; + thread_->interrupt(); + thread_->join(); + thread_ = NULL; + } +} + +void DepthFilter::addFrame(FramePtr frame) +{ + if(thread_ != NULL) + { + { + lock_t lock(frame_queue_mut_); + if(frame_queue_.size() > 2) + frame_queue_.pop(); + frame_queue_.push(frame); + } + seeds_updating_halt_ = false; + frame_queue_cond_.notify_one(); + } + else + updateSeeds(frame); +} + +void DepthFilter::addKeyframe(FramePtr frame, double depth_mean, double depth_min) +{ + new_keyframe_min_depth_ = depth_min; + new_keyframe_mean_depth_ = depth_mean; + // if there exists an parallel thread for Depth-Filter just setup the control variables to jump into it + if(thread_ != NULL) + { + new_keyframe_ = frame; + new_keyframe_set_ = true; + seeds_updating_halt_ = true; + frame_queue_cond_.notify_one(); + } + // if there is no independent thread for Depth-Filter call the initialization of seeds explicitly (same effect) + else + initializeSeeds(frame); + +} + +void DepthFilter::initializeSeeds(FramePtr frame) +{ + + list new_pt_features; + list new_seg_features; + + if(Config::hasPoints()) + { + /* detect new point features in point-unpopulated cells of the grid */ + // populate the occupancy grid of the detector with current features + pt_feature_detector_->setExistingFeatures(frame->pt_fts_); + // detect features to fill the free cells in the image + pt_feature_detector_->detect(frame.get(), frame->img_pyr_, + Config::triangMinCornerScore(), new_pt_features); + } + + if(Config::hasLines()) + { + /* detect new segment features in line-unpopulated cells of the grid */ + // populate the occupancy grid of the detector with current features + seg_feature_detector_->setExistingFeatures(frame->seg_fts_); + // detect features + seg_feature_detector_->detect(frame.get(), frame->img_pyr_, + Config::lsdMinLength(), new_seg_features); + } + + // lock the parallel updating thread? + seeds_updating_halt_ = true; + lock_t lock(seeds_mut_); // by locking the updateSeeds function stops + // increase by one the keyframe counter (to account for this new one) + ++PointSeed::batch_counter; + + // initialize a point seed for every new point feature + std::for_each(new_pt_features.begin(), new_pt_features.end(), [&](PointFeat* ftr){ + pt_seeds_.push_back(PointSeed(ftr, new_keyframe_mean_depth_, new_keyframe_min_depth_)); + }); + + // initialize a segment seed for every new segment feature + std::for_each(new_seg_features.begin(), new_seg_features.end(), [&](LineFeat* ftr){ + seg_seeds_.push_back(LineSeed(ftr, new_keyframe_mean_depth_, new_keyframe_min_depth_)); + }); + + if(options_.verbose) + SVO_INFO_STREAM("DepthFilter: Initialized "<::iterator it=pt_seeds_.begin(); + size_t n_removed = 0; + while(it!=pt_seeds_.end()) + { + if(it->ftr->frame == frame.get()) + { + it = pt_seeds_.erase(it); + ++n_removed; + } + else + ++it; + } + seeds_updating_halt_ = false; +} + +void DepthFilter::reset() +{ + seeds_updating_halt_ = true; + { + lock_t lock(seeds_mut_); + pt_seeds_.clear(); + } + lock_t lock(); + while(!frame_queue_.empty()) + frame_queue_.pop(); + seeds_updating_halt_ = false; + + if(options_.verbose) + SVO_INFO_STREAM("DepthFilter: RESET."); +} + +void DepthFilter::updateSeedsLoop() +{ + while(!boost::this_thread::interruption_requested()) + { + FramePtr frame; + { + lock_t lock(frame_queue_mut_); + while(frame_queue_.empty() && new_keyframe_set_ == false) + frame_queue_cond_.wait(lock); + if(new_keyframe_set_) + { + new_keyframe_set_ = false; + seeds_updating_halt_ = false; + clearFrameQueue(); + frame = new_keyframe_; + } + else + { + frame = frame_queue_.front(); + frame_queue_.pop(); + } + } + updateSeeds(frame); + if(frame->isKeyframe()) + initializeSeeds(frame); + } +} + +void DepthFilter::updateSeeds(FramePtr frame) +{ + // update the point-type seeds + updatePointSeeds(frame); + // update the line-type seeds + updateLineSeeds(frame); +} + +void DepthFilter::updatePointSeeds(FramePtr frame) +{ + // update only a limited number of seeds, because we don't have time to do it + // for all the seeds in every frame! + size_t n_updates=0, n_failed_matches=0, n_seeds = pt_seeds_.size(); + lock_t lock(seeds_mut_); + list::iterator it=pt_seeds_.begin(); + + const double focal_length = frame->cam_->errorMultiplier2(); + double px_noise = 1.0; + double px_error_angle = atan(px_noise/(2.0*focal_length))*2.0; // law of chord (sehnensatz) + + while( it!=pt_seeds_.end()) + { + // set this value true when seeds updating should be interrupted + if(seeds_updating_halt_) + return; + + // check if seed is not already too old + if((PointSeed::batch_counter - it->batch_id) > options_.max_n_kfs) { + it = pt_seeds_.erase(it); + continue; + } + + // check if point is visible in the current image + SE3 T_ref_cur = it->ftr->frame->T_f_w_ * frame->T_f_w_.inverse(); + const Vector3d xyz_f(T_ref_cur.inverse()*(1.0/it->mu * it->ftr->f) ); + if(xyz_f.z() < 0.0) { + ++it; // behind the camera + continue; + } + if(!frame->cam_->isInFrame(frame->f2c(xyz_f).cast())) { + ++it; // point does not project in image + continue; + } + + // we are using inverse depth coordinates + float z_inv_min = it->mu + sqrt(it->sigma2); + float z_inv_max = max(it->mu - sqrt(it->sigma2), 0.00000001f); + double z; + if(!matcher_.findEpipolarMatchDirect( + *it->ftr->frame, *frame, *it->ftr, 1.0/it->mu, 1.0/z_inv_min, 1.0/z_inv_max, z)) + { + it->b++; // increase outlier probability when no match was found + ++it; + ++n_failed_matches; + continue; + } + + // compute tau + double tau = computeTau(T_ref_cur, it->ftr->f, z, px_error_angle); + double tau_inverse = 0.5 * (1.0/max(0.0000001, z-tau) - 1.0/(z+tau)); + + // update the estimate + updatePointSeed(1./z, tau_inverse*tau_inverse, &*it); + ++n_updates; + + if(frame->isKeyframe()) + { + // The feature detector should not initialize new seeds close to this location + pt_feature_detector_->setGridOccpuancy(PointFeat(matcher_.px_cur_)); + } + + // if the seed has converged, we initialize a new candidate point and remove the seed + if(sqrt(it->sigma2) < it->z_range/options_.seed_convergence_sigma2_thresh) + { + assert(it->ftr->feat3D == NULL); // TODO this should not happen anymore + Vector3d xyz_world(it->ftr->frame->T_f_w_.inverse() * (it->ftr->f * (1.0/it->mu))); + Point* point = new Point(xyz_world, it->ftr); + it->ftr->feat3D = point; + /* FIXME it is not threadsafe to add a feature to the frame here. + if(frame->isKeyframe()) + { + Feature* ftr = new PointFeat(frame.get(), matcher_.px_cur_, matcher_.search_level_); + ftr->point = point; + point->addFrameRef(ftr); + frame->addFeature(ftr); + it->ftr->frame->addFeature(it->ftr); + } + else + */ + { + seed_converged_cb_(point, it->sigma2); // put in candidate list + } + it = pt_seeds_.erase(it); + } + else if(isnan(z_inv_min)) + { + SVO_WARN_STREAM("z_min is NaN"); + it = pt_seeds_.erase(it); + } + else + ++it; + } + +} + +void DepthFilter::updateLineSeeds(FramePtr frame) +{ + // update only a limited number of seeds, because we don't have time to do it + // for all the seeds in every frame! + size_t n_updates=0, n_failed_matches=0, n_seeds = seg_seeds_.size(); + lock_t lock(seeds_mut_); + list::iterator it=seg_seeds_.begin(); + + const double focal_length = frame->cam_->errorMultiplier2(); + double px_noise = 1.0; + double px_error_angle = atan(px_noise/(2.0*focal_length))*2.0; // law of chord (sehnensatz) + + while( it!=seg_seeds_.end()) + { + // set this value true when seeds updating should be interrupted + if(seeds_updating_halt_) + return; + + // check if seed is not already too old + if((LineSeed::batch_counter - it->batch_id) > options_.max_n_kfs) { + it = seg_seeds_.erase(it); + continue; + } + + // check if segment is visible in the current image + SE3 T_ref_cur = it->ftr->frame->T_f_w_ * frame->T_f_w_.inverse(); + const Vector3d xyz_f_s(T_ref_cur.inverse()*(1.0/it->mu_s * static_cast(it->ftr)->sf) ); + const Vector3d xyz_f_e(T_ref_cur.inverse()*(1.0/it->mu_e * static_cast(it->ftr)->ef) ); + if( xyz_f_s.z() < 0.0 || xyz_f_e.z() < 0.0 ) { + ++it; // behind the camera + continue; + } + if( !frame->cam_->isInFrame(frame->f2c(xyz_f_s).cast()) || + !frame->cam_->isInFrame(frame->f2c(xyz_f_e).cast()) ) { + ++it; // segment does not project in image + continue; + } + + // we are using inverse depth coordinates + float z_inv_min_s = it->mu_s + sqrt(it->sigma2_s); + float z_inv_max_s = max(it->mu_s - sqrt(it->sigma2_s), 0.00000001f); + float z_inv_min_e = it->mu_e + sqrt(it->sigma2_e); + float z_inv_max_e = max(it->mu_e - sqrt(it->sigma2_e), 0.00000001f); + double z_s, z_e; + if(!matcherls_.findEpipolarMatchDirectSegmentEndpoint( + *it->ftr->frame, *frame, *it->ftr, 1.0/it->mu_s, 1.0/z_inv_min_s, 1.0/z_inv_max_s, z_s) || + !matcherls_.findEpipolarMatchDirectSegmentEndpoint( + *it->ftr->frame, *frame, *it->ftr, 1.0/it->mu_e, 1.0/z_inv_min_e, 1.0/z_inv_max_e, z_e) ) + { + it->b++; // increase outlier probability when no match was found + ++it; + ++n_failed_matches; + continue; + } + + // compute tau + double tau_s = computeTau(T_ref_cur, static_cast(it->ftr)->sf, z_s, px_error_angle); + double tau_inverse_s = 0.5 * (1.0/max(0.0000001, z_s-tau_s) - 1.0/(z_s+tau_s)); + double tau_e = computeTau(T_ref_cur, static_cast(it->ftr)->ef, z_e, px_error_angle); + double tau_inverse_e = 0.5 * (1.0/max(0.0000001, z_e-tau_e) - 1.0/(z_e+tau_e)); + + // update the estimate + updateLineSeed(1./z_s, tau_inverse_s*tau_inverse_s, 1./z_e, tau_inverse_e*tau_inverse_e, &*it); + ++n_updates; + + if(frame->isKeyframe()) + { + // The feature detector should not initialize new seeds close to this location + seg_feature_detector_->setGridOccpuancy(LineFeat(matcher_.px_cur_,matcherls_.px_cur_)); + } + + // if the seed has converged, we initialize a new candidate point and remove the seed + if(sqrt(it->sigma2_s) < it->z_range_s/options_.seed_convergence_sigma2_thresh && + sqrt(it->sigma2_e) < it->z_range_e/options_.seed_convergence_sigma2_thresh ) + { + assert(static_cast(it->ftr)->feat3D == NULL); // TODO this should not happen anymore + Vector3d xyz_world_s(it->ftr->frame->T_f_w_.inverse() * (static_cast(it->ftr)->sf * (1.0/it->mu_s))); + Vector3d xyz_world_e(it->ftr->frame->T_f_w_.inverse() * (static_cast(it->ftr)->ef * (1.0/it->mu_e))); + LineSeg* line = new LineSeg(xyz_world_s, xyz_world_e, it->ftr); + static_cast(it->ftr)->feat3D = line; + /* FIXME it is not threadsafe to add a feature to the frame here. + if(frame->isKeyframe()) + { + Feature* ftr = new PointFeat(frame.get(), matcher_.px_cur_, matcher_.search_level_); + ftr->point = point; + point->addFrameRef(ftr); + frame->addFeature(ftr); + it->ftr->frame->addFeature(it->ftr); + } + else + */ + { + seed_converged_cb_ls_(line, it->sigma2_s, it->sigma2_e); // put in candidate list + } + it = seg_seeds_.erase(it); + } + else if( isnan(z_inv_min_s) || isnan(z_inv_min_e) ) + { + SVO_WARN_STREAM("z_min_s or z_min_e is NaN"); + it = seg_seeds_.erase(it); + } + else + ++it; + } +} + +void DepthFilter::clearFrameQueue() +{ + while(!frame_queue_.empty()) + frame_queue_.pop(); +} + +void DepthFilter::getSeedsCopy(const FramePtr& frame, std::list& seeds) +{ + lock_t lock(seeds_mut_); + for(std::list::iterator it=pt_seeds_.begin(); it!=pt_seeds_.end(); ++it) + { + if (it->ftr->frame == frame.get()) + seeds.push_back(*it); + } +} + +void DepthFilter::updatePointSeed(const float x, const float tau2, PointSeed* seed) +{ + float norm_scale = sqrt(seed->sigma2 + tau2); + if(std::isnan(norm_scale)) + return; + boost::math::normal_distribution nd(seed->mu, norm_scale); + float s2 = 1./(1./seed->sigma2 + 1./tau2); + float m = s2*(seed->mu/seed->sigma2 + x/tau2); + float C1 = seed->a/(seed->a+seed->b) * boost::math::pdf(nd, x); + float C2 = seed->b/(seed->a+seed->b) * 1./seed->z_range; + float normalization_constant = C1 + C2; + C1 /= normalization_constant; + C2 /= normalization_constant; + float f = C1*(seed->a+1.)/(seed->a+seed->b+1.) + C2*seed->a/(seed->a+seed->b+1.); + float e = C1*(seed->a+1.)*(seed->a+2.)/((seed->a+seed->b+1.)*(seed->a+seed->b+2.)) + + C2*seed->a*(seed->a+1.0f)/((seed->a+seed->b+1.0f)*(seed->a+seed->b+2.0f)); + + // update parameters + float mu_new = C1*m+C2*seed->mu; + seed->sigma2 = C1*(s2 + m*m) + C2*(seed->sigma2 + seed->mu*seed->mu) - mu_new*mu_new; + seed->mu = mu_new; + seed->a = (e-f)/(f-e/f); + seed->b = seed->a*(1.0f-f)/f; +} + +void DepthFilter::updateLineSeed(const float x_s, const float tau2_s, const float x_e, const float tau2_e, LineSeed* seed) +{ + float norm_scale_s = sqrt(seed->sigma2_s + tau2_s); + float norm_scale_e = sqrt(seed->sigma2_e + tau2_e); + if(std::isnan(norm_scale_s)||std::isnan(norm_scale_e)) + return; + boost::math::normal_distribution nd_s(seed->mu_s, norm_scale_s); + boost::math::normal_distribution nd_e(seed->mu_e, norm_scale_e); + + /* update first endpoint of the line segment*/ + float s2 = 1./(1./seed->sigma2_s + 1./tau2_s); + float m = s2*(seed->mu_s/seed->sigma2_s + x_s/tau2_s); + float C1 = seed->a/(seed->a+seed->b) * boost::math::pdf(nd_s, x_s); + float C2 = seed->b/(seed->a+seed->b) * 1./seed->z_range_s; + float normalization_constant = C1 + C2; + C1 /= normalization_constant; + C2 /= normalization_constant; + float f_s = C1*(seed->a+1.)/(seed->a+seed->b+1.) + C2*seed->a/(seed->a+seed->b+1.); + float e_s = C1*(seed->a+1.)*(seed->a+2.)/((seed->a+seed->b+1.)*(seed->a+seed->b+2.)) + + C2*seed->a*(seed->a+1.0f)/((seed->a+seed->b+1.0f)*(seed->a+seed->b+2.0f)); + // update parameters of first endpoint + float mu_new_s = C1*m+C2*seed->mu_s; + seed->sigma2_s = C1*(s2 + m*m) + C2*(seed->sigma2_s + seed->mu_s*seed->mu_s) - mu_new_s*mu_new_s; + seed->mu_s = mu_new_s; + + /* update first endpoint of the line segment*/ + s2 = 1./(1./seed->sigma2_e + 1./tau2_e); + m = s2*(seed->mu_e/seed->sigma2_e + x_e/tau2_e); + C1 = seed->a/(seed->a+seed->b) * boost::math::pdf(nd_e, x_e); + C2 = seed->b/(seed->a+seed->b) * 1./seed->z_range_e; + normalization_constant = C1 + C2; + C1 /= normalization_constant; + C2 /= normalization_constant; + float f_e = C1*(seed->a+1.)/(seed->a+seed->b+1.) + C2*seed->a/(seed->a+seed->b+1.); + float e_e = C1*(seed->a+1.)*(seed->a+2.)/((seed->a+seed->b+1.)*(seed->a+seed->b+2.)) + + C2*seed->a*(seed->a+1.0f)/((seed->a+seed->b+1.0f)*(seed->a+seed->b+2.0f)); + // update parameters of first endpoint + float mu_new_e = C1*m+C2*seed->mu_e; + seed->sigma2_e = C1*(s2 + m*m) + C2*(seed->sigma2_e + seed->mu_e*seed->mu_e) - mu_new_e*mu_new_e; + seed->mu_e = mu_new_e; + + /* update probability of inlier and outlier (a & b parameters of Beta distribution)*/ + float a_s = (e_s-f_s)/(f_s-e_s/f_s); + float a_e = (e_e-f_e)/(f_e-e_e/f_e); + //seed->a = sqrt(a_s*a_s+a_e*a_e); + seed->a = std::max(a_s,a_e); + + float b_s = a_s*(1.f-f_s)/f_s; + float b_e = a_e*(1.f-f_e)/f_e; + //seed->b = sqrt(b_s*b_s+b_e*b_e); + seed->b = std::min(b_s,b_e); + +} + +double DepthFilter::computeTau( + const SE3& T_ref_cur, + const Vector3d& f, + const double z, + const double px_error_angle) +{ + Vector3d t(T_ref_cur.translation()); + Vector3d a = f*z-t; + double t_norm = t.norm(); + double a_norm = a.norm(); + double alpha = acos(f.dot(t)/t_norm); // dot product + double beta = acos(a.dot(-t)/(t_norm*a_norm)); // dot product + double beta_plus = beta + px_error_angle; + double gamma_plus = PI-alpha-beta_plus; // triangle angles sum to PI + double z_plus = t_norm*sin(beta_plus)/sin(gamma_plus); // law of sines + return (z_plus - z); // tau +} + +} // namespace plsvo diff --git a/src/feature.cpp b/src/feature.cpp new file mode 100644 index 0000000..66d0262 --- /dev/null +++ b/src/feature.cpp @@ -0,0 +1,303 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#include + +namespace plsvo { + +Feature::Feature(const Vector2d &_px) : + frame(NULL), + px(_px), + f(Vector3d()), + level(-1) +{} + +Feature::Feature(Frame *_frame, const Eigen::Vector2d &_px, int _level) : + frame(_frame), + px(_px), + f(frame->cam_->cam2world(px)), + level(_level) +{} + +Feature::Feature(Frame *_frame, const Vector2d &_px, const Vector3d &_f, int _level) : + frame(_frame), + px(_px), + f(_f), + level(_level) +{} + +PointFeat::PointFeat(const Vector2d &_px) : + Feature(_px), + type(CORNER), + grad(1.0,0.0), + feat3D(NULL) +{ } + +PointFeat::PointFeat(Frame *_frame, const Vector2d &_px, int _level) : + Feature(_frame, _px, _level), + type(CORNER), + grad(1.0,0.0), + feat3D(NULL) +{} + +PointFeat::PointFeat(Frame *_frame, const Vector2d &_px, const Vector3d &_f, int _level) : + Feature(_frame, _px, _f, _level), + type(CORNER), + grad(1.0,0.0), + feat3D(NULL) +{} + +PointFeat::PointFeat(Frame *_frame, Point *_point, const Vector2d &_px, const Vector3d &_f, int _level) : + Feature(_frame, _px, _f, _level), + type(CORNER), + grad(1.0,0.0), + feat3D(_point) +{} + +LineFeat::LineFeat(const Vector2d &_spx, const Vector2d &_epx) : + Feature(0.5f*(_spx+_epx)), + type(LINE_SEGMENT), + spx(_spx), + epx(_epx), + sf(Vector3d()), + ef(Vector3d()), + feat3D(NULL), + grad(1.0,0.0), + length((_epx-_spx).norm()) +{ } + +LineFeat::LineFeat(Frame *_frame, const Vector2d &_spx, const Vector2d &_epx, int _level) : + Feature(_frame, 0.5f*(_spx+_epx), _level), + type(LINE_SEGMENT), + spx(_spx), + epx(_epx), + sf(frame->cam_->cam2world(spx)), + ef(frame->cam_->cam2world(epx)), + feat3D(NULL), + length((_epx-_spx).norm()) +{ + line = sf.cross(ef) ; + line = line / sqrt(line(0)*line(0)+line(1)*line(1)); + // 2 first coordinates in line are the normalized normal in 2D (same direction as gradient) + grad = Vector2d(line[0],line[1]); +} + +LineFeat::LineFeat(Frame *_frame, const Vector2d &_spx, const Vector2d &_epx, int _level, double _angle) : + Feature(_frame, 0.5f*(_spx+_epx), _level), + type(LINE_SEGMENT), + spx(_spx), + epx(_epx), + sf(frame->cam_->cam2world(spx)), + ef(frame->cam_->cam2world(epx)), + feat3D(NULL), + length((_epx-_spx).norm()), + angle(_angle) +{ + line = sf.cross(ef) ; + line = line / sqrt(line(0)*line(0)+line(1)*line(1)); + // 2 first coordinates in line are the normalized normal in 2D (same direction as gradient) + grad = Vector2d(line[0],line[1]); +} + +LineFeat::LineFeat(Frame *_frame, const Vector2d &_spx, const Vector2d &_epx, const Vector3d &_sf, const Vector3d &_ef, int _level) : + Feature(_frame, 0.5f*(_spx+_epx), _level), + type(LINE_SEGMENT), + spx(_spx), + epx(_epx), + sf(_sf), + ef(_ef), + feat3D(NULL), + grad(1.0,0.0), + length((_epx-_spx).norm()) +{ + line = sf.cross(ef) ; + line = line / sqrt(line(0)*line(0)+line(1)*line(1)); + // 2 first coordinates in line are the normalized normal in 2D (same direction as gradient) + grad = Vector2d(line[0],line[1]); +} + +LineFeat::LineFeat(Frame *_frame, LineSeg *_ls, const Vector2d &_spx, const Vector2d &_epx, const Vector3d &_sf, const Vector3d &_ef, int _level) : + Feature(_frame, 0.5f*(_spx+_epx), _level), + type(LINE_SEGMENT), + spx(_spx), + epx(_epx), + sf(_sf), + ef(_ef), + feat3D(_ls), + grad(1.0,0.0), + length((_epx-_spx).norm()) +{ + line = sf.cross(ef) ; + line = line / sqrt(line(0)*line(0)+line(1)*line(1)); + // 2 first coordinates in line are the normalized normal in 2D (same direction as gradient) + grad = Vector2d(line[0],line[1]); +} + +size_t LineFeat::setupSampling(size_t patch_size, Vector2d &dif) +{ + // complete sampling of the segment surroundings, + // with minimum overlap of the square patches + // if segment is horizontal or vertical, N is seg_length/patch_size + // if the segment has angle theta, we need to correct according to the distance from center to unit-square border: *corr + // scale (pyramid level) is accounted for later + dif = epx - spx; // difference vector from start to end point + double tan_dir = std::min(fabs(dif[0]),fabs(dif[1])) / std::max(fabs(dif[0]),fabs(dif[1])); + double sin_dir = tan_dir / sqrt( 1.0+tan_dir*tan_dir ); + double correction = 2.0 * sqrt( 1.0 + sin_dir*sin_dir ); + return std::max( 1.0, length / (2.0*patch_size*correction) ); + // If length is very low the segment approaches a point and the minimum 1 sample is taken (the central point) +} + +Patch::Patch(int _size, const cv::Mat &_img) +{ + // assert the size is an even number for symmetry in the window + assert( _size%2 == 0 ); + size = _size; + halfsize = size/2; + area = size*size; + border = halfsize+1; + + // from image get stride + full_img = _img; + stride = _img.step[0]; // take as stride between rows the corresponding Mat property +} + +void Patch::setPosition(const Vector2d &px) +{ + // set patch center position + u_ref = px[0]; + v_ref = px[1]; + u_ref_i = floorf(u_ref); + v_ref_i = floorf(v_ref); + pos_ref_i = px.cast(); // Casting to int truncates non-integer part (in >=0 equivalent to floor) +} + +void Patch::computeInterpWeights() +{ + // compute bilateral interpolation weights for patch subpixel position + const float subpix_u_ref = u_ref-u_ref_i; + const float subpix_v_ref = v_ref-v_ref_i; + wTL = (1.0-subpix_u_ref) * (1.0-subpix_v_ref); + wTR = subpix_u_ref * (1.0-subpix_v_ref); + wBL = (1.0-subpix_u_ref) * subpix_v_ref; + wBR = subpix_u_ref * subpix_v_ref; +} + +void Patch::setRoi() +{ + // set interest rectangle + rect = cv::Rect( u_ref_i - halfsize, v_ref_i - halfsize, size, size ); + + // copy submatrix of the full image corresponding to this patch + // (OR NOT?) due to subpixel precision, we need to extend the size by 1 in each direction + roi = cv::Mat(full_img, rect); +} + +uchar* Patch::begin(int i) +{ + return roi.ptr(i); +} +uchar* Patch::end(int i) +{ + return roi.ptr(i) + roi.cols; +} + +RotatedRectPatch::RotatedRectPatch(float length, float width, float angle, const cv::Mat &_img) : + length(length), + width(width), + angle(angle) +{ + // set bounding dimensions + bSize_y = cos(angle) * width + sin(angle) * length; + bSize_x = sin(angle) * width + cos(angle) * length; + // set bounding area + area = (int) ceil(bSize_x) * ceil(bSize_y); // Upper bound in number of pixels that the rotated region can contain + + // from image get stride + full_img = _img; + stride = _img.step[0]; // take as stride between rows the corresponding Mat property +} + +void RotatedRectPatch::setPosition(const Vector2d &px) +{ + // set patch center position + u_ref = px[0]; + v_ref = px[1]; + u_ref_i = floorf(u_ref); + v_ref_i = floorf(v_ref); + pos_ref_i = px.cast(); // Casting to int truncates non-integer part (in >=0 equivalent to floor) +} + +void RotatedRectPatch::computeInterpWeights() +{ + // compute bilateral interpolation weights for patch subpixel position + const float subpix_u_ref = u_ref-u_ref_i; + const float subpix_v_ref = v_ref-v_ref_i; + wTL = (1.0-subpix_u_ref) * (1.0-subpix_v_ref); + wTR = subpix_u_ref * (1.0-subpix_v_ref); + wBL = (1.0-subpix_u_ref) * subpix_v_ref; + wBR = subpix_u_ref * subpix_v_ref; +} + +void RotatedRectPatch::setRoi() +{ + // set interest rectangle + rect = cv::RotatedRect(cv::Point2f(u_ref_i,v_ref_i), cv::Size2f(length,width), angle*180.0/M_PI); + + // copy submatrix of the full image corresponding to the bounding box of this patch + roi = cv::Mat(full_img, rect.boundingRect()); +} + +float RotatedRectPatch::horBorderDist(float y) +{ + // The geometric index function resembles an odd function: f(-x)=-f(x) + // All results can be obtained from the output for positive angle (0. ** +** ** +*****************************************************************************/ + + +#include +#include +#include +#include +#include + +namespace plsvo { + +Point::Point(const Vector3d& pos) : + Feature3D(counter_++), + pos_(pos), + normal_set_(false), + v_g2o_(NULL) +{} + +Point::Point(const Vector3d& pos, PointFeat* ftr) : + Feature3D(counter_++), + pos_(pos), + normal_set_(false), + v_g2o_(NULL) +{ + obs_.push_front(ftr); + ++n_obs_; +} + +LineSeg::LineSeg(const Vector3d& spos, const Vector3d& epos) : + Feature3D(counter_++), + spos_(spos), + epos_(epos), + v_g2o_(NULL) +{} + +LineSeg::LineSeg(const Vector3d& spos, const Vector3d& epos, LineFeat* ftr) : + Feature3D(counter_++), + spos_(spos), + epos_(epos), + v_g2o_(NULL) +{ + obs_.push_front(ftr); + ++n_obs_; +} + +void Point::initNormal() +{ + assert(!obs_.empty()); + const Feature* ftr = obs_.back(); + assert(ftr->frame != NULL); + normal_ = ftr->frame->T_f_w_.rotation_matrix().transpose()*(-ftr->f); + normal_information_ = DiagonalMatrix(pow(20/(pos_-ftr->frame->pos()).norm(),2), 1.0, 1.0); + normal_set_ = true; +} + +bool Point::getCloseViewObs(const Vector3d& framepos, Feature* &ftr) const +{ + // TODO: get frame with same point of view AND same pyramid level! + Vector3d obs_dir(framepos - pos_); obs_dir.normalize(); + auto min_it=obs_.begin(); + double min_cos_angle = 0; + for(auto it=obs_.begin(), ite=obs_.end(); it!=ite; ++it) + { + Vector3d dir((*it)->frame->pos() - pos_); dir.normalize(); + double cos_angle = obs_dir.dot(dir); + if(cos_angle > min_cos_angle) + { + min_cos_angle = cos_angle; + min_it = it; + } + } + ftr = *min_it; + if(min_cos_angle < 0.5) // assume that observations larger than 60° are useless + return false; + return true; +} + +bool LineSeg::getCloseViewObs(const Vector3d& framepos, Feature *&ftr) const +{ + // TODO: get frame with same point of view AND same pyramid level! + // For now, use angle central point to measure observation distance + // Further rotation from that central point can be corrected by in-plane optimization? + Vector3d cpos = 0.5*(spos_+epos_); + Vector3d obs_dir(framepos - cpos); obs_dir.normalize(); + auto min_it=obs_.begin(); + double min_cos_angle = 0; + for(auto it=obs_.begin(), ite=obs_.end(); it!=ite; ++it) + { + Vector3d dir((*it)->frame->pos() - cpos); dir.normalize(); + double cos_angle = obs_dir.dot(dir); + if(cos_angle > min_cos_angle) + { + min_cos_angle = cos_angle; + min_it = it; + } + } + ftr = *min_it; + if(min_cos_angle < 0.5) // assume that observations larger than 60° are useless + return false; + return true; +} + +} // namespace plsvo + + diff --git a/src/feature3D_impl.cpp b/src/feature3D_impl.cpp new file mode 100644 index 0000000..3efabb0 --- /dev/null +++ b/src/feature3D_impl.cpp @@ -0,0 +1,175 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#include +#include +#include +#include +#include + +namespace plsvo { + +void Point::optimize(const size_t n_iter) +{ + Vector3d old_point = pos_; + double chi2 = 0.0; + Matrix3d A; + Vector3d b; + + for(size_t i=0; iframe->T_f_w_ * pos_); + Point::jacobian_xyz2uv(p_in_f, (*it)->frame->T_f_w_.rotation_matrix(), J); + const Vector2d e(vk::project2d((*it)->f) - vk::project2d(p_in_f)); + new_chi2 += e.squaredNorm(); + A.noalias() += J.transpose() * J; + b.noalias() -= J.transpose() * e; + } + + // solve linear system + const Vector3d dp(A.ldlt().solve(b)); + + // check if error increased + if((i > 0 && new_chi2 > chi2) || (bool) std::isnan((double)dp[0])) + { +#ifdef POINT_OPTIMIZER_DEBUG + cout << "it " << i + << "\t FAILURE \t new_chi2 = " << new_chi2 << endl; +#endif + pos_ = old_point; // roll-back + break; + } + + // update the model + Vector3d new_point = pos_ + dp; + old_point = pos_; + pos_ = new_point; + chi2 = new_chi2; +#ifdef POINT_OPTIMIZER_DEBUG + cout << "it " << i + << "\t Success \t new_chi2 = " << new_chi2 + << "\t norm(b) = " << vk::norm_max(b) + << endl; +#endif + + // stop when converged + if(vk::norm_max(dp) <= EPS) + break; + + } +#ifdef POINT_OPTIMIZER_DEBUG + cout << endl; +#endif +} + +void LineSeg::optimize(const size_t n_iter) +{ + Vector3d old_spoint = spos_; + Vector3d old_epoint = epos_; + double chi2s = 0.0; + double chi2e = 0.0; + Matrix3d As,Ae; + Vector3d bs,be; + + for(size_t i=0; iframe->T_f_w_ * spos_); + Point::jacobian_xyz2uv( sp_in_f, (*it)->frame->T_f_w_.rotation_matrix(), Js); + const Vector3d ep_in_f( (*it)->frame->T_f_w_ * epos_); + Point::jacobian_xyz2uv( ep_in_f, (*it)->frame->T_f_w_.rotation_matrix(), Je); + const Vector2d es(vk::project2d(static_cast(*it)->sf) - vk::project2d(sp_in_f)); + const Vector2d ee(vk::project2d(static_cast(*it)->ef) - vk::project2d(ep_in_f)); + new_chi2_s += es.squaredNorm(); + As.noalias() += Js.transpose() * Js; + bs.noalias() -= Js.transpose() * es; + new_chi2_e += ee.squaredNorm(); + Ae.noalias() += Je.transpose() * Je; + be.noalias() -= Je.transpose() * ee; + } + + // solve linear system + const Vector3d dps(As.ldlt().solve(bs)); + const Vector3d dpe(Ae.ldlt().solve(be)); + + // check if error increased + if( (i > 0 && new_chi2_s > chi2s) || (bool) std::isnan((double)dps[0]) || (i > 0 && new_chi2_e > chi2e) || (bool) std::isnan((double)dpe[0]) ) + { +#ifdef POINT_OPTIMIZER_DEBUG + cout << "it " << i + << "\t FAILURE \t new_chi2 = " << new_chi2 << endl; +#endif + spos_ = old_spoint; // roll-back + epos_ = old_epoint; // roll-back + break; + } + + // update the model + Vector3d new_spoint = spos_ + dps; + old_spoint = spos_; + spos_ = new_spoint; + chi2s = new_chi2_s; + Vector3d new_epoint = epos_ + dpe; + old_epoint = epos_; + epos_ = new_epoint; + chi2e = new_chi2_e; + +#ifdef POINT_OPTIMIZER_DEBUG + cout << "it " << i + << "\t Success \t new_chi2 = " << new_chi2 + << "\t norm(b) = " << vk::norm_max(b) + << endl; +#endif + + // stop when converged + if( vk::norm_max(dps) <= EPS || vk::norm_max(dpe) <= EPS ) + break; + } +#ifdef POINT_OPTIMIZER_DEBUG + cout << endl; +#endif + +} +} // namespace plsvo diff --git a/src/feature_alignment.cpp b/src/feature_alignment.cpp new file mode 100644 index 0000000..ff01cb1 --- /dev/null +++ b/src/feature_alignment.cpp @@ -0,0 +1,614 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#ifdef __SSE2__ +#include +#endif +#ifdef __ARM_NEON__ +#include +#endif +#include + +namespace plsvo { +namespace feature_alignment { + +#define SUBPIX_VERBOSE 0 + +bool align1D( + const cv::Mat& cur_img, + const Vector2f& dir, // direction in which the patch is allowed to move + uint8_t* ref_patch_with_border, + uint8_t* ref_patch, + const int n_iter, + Vector2d& cur_px_estimate, + double& h_inv) +{ + const int halfpatch_size_ = 4; + const int patch_size = 8; + const int patch_area = 64; + bool converged = false; + + // compute derivative of template and prepare inverse compositional + float __attribute__((__aligned__(16))) ref_patch_dv[patch_area]; + Matrix2f H; H.setZero(); + + // compute gradient and hessian + const int ref_step = patch_size+2; + float* it_dv = ref_patch_dv; + for(int y=0; y= cur_img.cols-halfpatch_size_ || v_r >= cur_img.rows-halfpatch_size_) + break; + + if(isnan(u) || isnan(v)) // TODO very rarely this can happen, maybe H is singular? should not be at corner.. check + return false; + + // compute interpolation weights + float subpix_x = u-u_r; + float subpix_y = v-v_r; + float wTL = (1.0-subpix_x)*(1.0-subpix_y); + float wTR = subpix_x * (1.0-subpix_y); + float wBL = (1.0-subpix_x)*subpix_y; + float wBR = subpix_x * subpix_y; + + // loop through search_patch, interpolate + uint8_t* it_ref = ref_patch; + float* it_ref_dv = ref_patch_dv; + float new_chi2 = 0.0; + Vector2f Jres; Jres.setZero(); + for(int y=0; y 0 && new_chi2 > chi2) + { +#if SUBPIX_VERBOSE + cout << "error increased." << endl; +#endif + u -= update[0]; + v -= update[1]; + break; + } + + chi2 = new_chi2; + update = Hinv * Jres; + u += update[0]*dir[0]; + v += update[0]*dir[1]; + mean_diff += update[1]; + +#if SUBPIX_VERBOSE + cout << "Iter " << iter << ":" + << "\t u=" << u << ", v=" << v + << "\t update = " << update[0] << ", " << update[1] + << "\t new chi2 = " << new_chi2 << endl; +#endif + + if(update[0]*update[0]+update[1]*update[1] < min_update_squared) + { +#if SUBPIX_VERBOSE + cout << "converged." << endl; +#endif + converged=true; + break; + } + } + + cur_px_estimate << u, v; + return converged; +} + +bool align2D( + const cv::Mat& cur_img, + uint8_t* ref_patch_with_border, + uint8_t* ref_patch, + const int n_iter, + Vector2d& cur_px_estimate, + bool no_simd) +{ +#ifdef __ARM_NEON__ + if(!no_simd) + return align2D_NEON(cur_img, ref_patch_with_border, ref_patch, n_iter, cur_px_estimate); +#endif + + const int patch_size_ = 8; + Patch patch( patch_size_, cur_img ); + bool converged=false; + + /* Precomputation step: derivatives in reference patch */ + // compute derivative of template and prepare inverse compositional + float __attribute__((__aligned__(16))) ref_patch_dx[patch.area]; + float __attribute__((__aligned__(16))) ref_patch_dy[patch.area]; + Matrix3f H; H.setZero(); + + // compute gradient and hessian + const int ref_step = patch_size_+2; // assumes ref_patch_with_border comes from a specific Mat object with certain size!!! Bad way to do it? + float* it_dx = ref_patch_dx; + float* it_dy = ref_patch_dy; + for(int y=0; y 0 && new_chi2 > chi2) + { +#if SUBPIX_VERBOSE + cout << "error increased." << endl; +#endif + u -= update[0]; + v -= update[1]; + break; + } + chi2 = new_chi2; +*/ + update = Hinv * Jres; + u += update[0]; + v += update[1]; + cur_px_estimate = Vector2d(u,v); + mean_diff += update[2]; + +#if SUBPIX_VERBOSE + cout << "Iter " << iter << ":" + << "\t u=" << u << ", v=" << v + << "\t update = " << update[0] << ", " << update[1] +// << "\t new chi2 = " << new_chi2 << endl; +#endif + + if(update[0]*update[0]+update[1]*update[1] < min_update_squared) + { +#if SUBPIX_VERBOSE + cout << "converged." << endl; +#endif + converged=true; + break; + } + } + + cur_px_estimate << u, v; + return converged; +} + +#define DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n)) // rounds to closest integer and descales + +bool align2D_SSE2( + const cv::Mat& cur_img, + uint8_t* ref_patch_with_border, + uint8_t* ref_patch, + const int n_iter, + Vector2d& cur_px_estimate) +{ + // TODO: This function should not be used as the alignment is not robust to illumination changes! + const int halfpatch_size = 4; + const int patch_size = 8; + const int patch_area = 64; + bool converged=false; + const int W_BITS = 14; + + // compute derivative of template and prepare inverse compositional + int16_t __attribute__((__aligned__(16))) ref_patch_dx[patch_area]; + int16_t __attribute__((__aligned__(16))) ref_patch_dy[patch_area]; + + // compute gradient and hessian + const int ref_step = patch_size+2; + int16_t* it_dx = ref_patch_dx; + int16_t* it_dy = ref_patch_dy; + float A11=0, A12=0, A22=0; + for(int y=0; y(it[1]) - it[-1]; + int16_t dy = static_cast(it[ref_step]) - it[-ref_step]; + *it_dx = dx; + *it_dy = dy; // we are missing a factor 1/2 + A11 += static_cast(dx*dx); // we are missing a factor 1/4 + A12 += static_cast(dx*dy); + A22 += static_cast(dy*dy); + } + } + + // Compute pixel location in new image: + float u = cur_px_estimate.x(); + float v = cur_px_estimate.y(); + + // termination condition + const float min_update_squared = 0.03*0.03; + const int cur_step = cur_img.step.p[0]; + const float Dinv = 1.0f/(A11*A22 - A12*A12); // we are missing an extra factor 16 + float chi2 = 0; + float update_u = 0, update_v = 0; + + for(int iter = 0; iter= cur_img.cols-halfpatch_size || v_r >= cur_img.rows-halfpatch_size) + break; + + if(isnan(u) || isnan(v)) // TODO very rarely this can happen, maybe H is singular? should not be at corner.. check + return false; + + float subpix_x = u-u_r; + float subpix_y = v-v_r; + float b1=0, b2=0; + float new_chi2 = 0.0; + +#ifdef __SSE2__ + // compute bilinear interpolation weights + int wTL = static_cast((1.0f-subpix_x)*(1.0f-subpix_y)*(1 << W_BITS)); + int wTR = static_cast(subpix_x * (1.0f-subpix_y)*(1 << W_BITS)); + int wBL = static_cast((1.0f-subpix_x)*subpix_y*(1 << W_BITS)); + int wBR = (1 << W_BITS) - wTL - wTR - wBL; + + __m128i qw0 = _mm_set1_epi32(wTL + (wTR << 16)); // Sets the 4 signed 32-bit integer values to [wTL, wTR]. + __m128i qw1 = _mm_set1_epi32(wBL + (wBR << 16)); + __m128i z = _mm_setzero_si128(); + __m128 qb0 = _mm_setzero_ps(); // 4 floats + __m128 qb1 = _mm_setzero_ps(); // 4 floats + __m128i qdelta = _mm_set1_epi32(1 << (W_BITS-1)); + for(int y=0; y(it[1] - it[-1]); + *it_dy = static_cast(it[ref_step] - it[-ref_step]); // divide by 2 missing + Vector3f J(*it_dx, *it_dy, 1.0f); + H += J*J.transpose(); + } + } + Matrix3f Hinv = H.inverse(); + float mean_diff = 0.0; + + // Compute pixel location in new image: + float u = cur_px_estimate.x(); + float v = cur_px_estimate.y(); + + // termination condition + const float min_update_squared = 0.03*0.03; + const int cur_step = cur_img.step.p[0]; + Vector3f update; + Vector3f Jres; + for(int iter = 0; iter= cur_img.cols-halfpatch_size || v_r >= cur_img.rows-halfpatch_size) + break; + + if(isnan(u) || isnan(v)) // TODO very rarely this can happen, maybe H is singular? should not be at corner.. check + return false; + + float subpix_x = u-u_r; + float subpix_y = v-v_r; + float b1=0, b2=0; + +#ifdef __ARM_NEON__ + const int SHIFT_BITS = 7; + const uint16_t wTL = static_cast((1.0f-subpix_x)*(1.0f-subpix_y)*(1<(subpix_x*(1.0f-subpix_y)*(1<((1.0f-subpix_x)*subpix_y*(1<((1 << SHIFT_BITS) - wTL - wTR - wBL); + + // initialize result to zero + int32x4_t vb1 = vdupq_n_s32(0); + int32x4_t vb2 = vdupq_n_s32(0); + int16x8_t vmean_diff = vdupq_n_s16( (int16_t) (mean_diff+0.5) ); + int16x8_t vres_sum = vdupq_n_s16(0); + for(int y=0; y Vr[i] := Va[i] + Vb[i] * Vc[i] + // int32x4_t vmlal_s16(int32x4_t a, int16x4_t b, int16x4_t c); // VMLAL.S16 q0,d0,d0 + int16x8_t grad = vld1q_s16(ref_patch_dx + y*patch_size); + vb1 = vmlal_s16(vb1, vget_low_s16(grad), vget_low_s16(res)); + vb1 = vmlal_s16(vb1, vget_high_s16(grad), vget_high_s16(res)); + + grad = vld1q_s16(ref_patch_dy + y*patch_size); + vb2 = vmlal_s16(vb2, vget_low_s16(grad), vget_low_s16(res)); + vb2 = vmlal_s16(vb2, vget_high_s16(grad), vget_high_s16(res)); + } + + // finally, sum results of vb1, vb2 and vres_sum + int32x2_t tmp; + tmp = vpadd_s32(vget_low_s32(vb1), vget_high_s32(vb1)); + Jres[0] = -vget_lane_s32(tmp, 0) - vget_lane_s32(tmp, 1); + + tmp = vpadd_s32(vget_low_s32(vb2), vget_high_s32(vb2)); + Jres[1] = -vget_lane_s32(tmp, 0) - vget_lane_s32(tmp, 1); + + int32x4_t vres_sum1 = vpaddlq_s16(vres_sum); + tmp = vpadd_s32(vget_low_s32(vres_sum1), vget_high_s32(vres_sum1)); + Jres[2] = -vget_lane_s32(tmp, 0) - vget_lane_s32(tmp, 1); +#endif + + update = Hinv * Jres * 2; // * 2 to compensate because above, we did not compute the derivative correctly + u += update[0]; + v += update[1]; + mean_diff += update[2]; + +#if SUBPIX_VERBOSE + cout << "Iter " << iter << ":" + << "\t u=" << u << ", v=" << v + << "\t update = " << update[0] << ", " << update[1] << endl; +#endif + + if(update[0]*update[0]+update[1]*update[1] < min_update_squared) + { +#if SUBPIX_VERBOSE + cout << "converged." << endl; +#endif + converged=true; + break; + } + } + + cur_px_estimate << u, v; + return converged; +} + +} // namespace feature_alignment +} // namespace plsvo diff --git a/src/feature_detection.cpp b/src/feature_detection.cpp new file mode 100644 index 0000000..f474049 --- /dev/null +++ b/src/feature_detection.cpp @@ -0,0 +1,272 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#include +#include +#include +#include + +using namespace cv; +using namespace cv::line_descriptor; + +struct compare_line_by_unscaled_length +{ + inline bool operator()(const KeyLine& a, const KeyLine& b){ + return (a.lineLength > b.lineLength); + } +}; + +namespace plsvo { +namespace feature_detection { + +FastDetector::FastDetector( + const int img_width, + const int img_height, + const int cell_size, + const int n_pyr_levels) : + AbstractDetector(img_width, img_height, cell_size, n_pyr_levels) +{} + +void FastDetector::detect( + Frame* frame, + const ImgPyr& img_pyr, + const double detection_threshold, + list& fts) +{ + Corners corners(grid_n_cols_*grid_n_rows_, Corner(0,0,detection_threshold,0,0.0f)); + for(int L=0; L fast_corners; +#if __SSE2__ + fast::fast_corner_detect_10_sse2( + (fast::fast_byte*) img_pyr[L].data, img_pyr[L].cols, + img_pyr[L].rows, img_pyr[L].cols, 20, fast_corners); +#elif HAVE_FAST_NEON + fast::fast_corner_detect_9_neon( + (fast::fast_byte*) img_pyr[L].data, img_pyr[L].cols, + img_pyr[L].rows, img_pyr[L].cols, 20, fast_corners); +#else + fast::fast_corner_detect_10( + (fast::fast_byte*) img_pyr[L].data, img_pyr[L].cols, + img_pyr[L].rows, img_pyr[L].cols, 20, fast_corners); +#endif + // nm stands for non-maximal + vector scores, nm_corners; + // compute scores for all fast corners + fast::fast_corner_score_10((fast::fast_byte*) img_pyr[L].data, img_pyr[L].cols, fast_corners, 20, scores); + // get list of nonmax corners in a 3x3 window + fast::fast_nonmax_3x3(fast_corners, scores, nm_corners); + + for(auto it=nm_corners.begin(), ite=nm_corners.end(); it!=ite; ++it) + { + fast::fast_xy& xy = fast_corners.at(*it); + const int k = static_cast((xy.y*scale)/cell_size_)*grid_n_cols_ + + static_cast((xy.x*scale)/cell_size_); + if(grid_occupancy_[k]) + continue; + const float score = vk::shiTomasiScore(img_pyr[L], xy.x, xy.y); + if(score > corners.at(k).score) + corners.at(k) = Corner(xy.x*scale, xy.y*scale, score, L, 0.0f); + } + } + + // Create feature for every corner that has high enough corner score + std::for_each(corners.begin(), corners.end(), [&](Corner& c) { + if(c.score > detection_threshold) + fts.push_back(new PointFeat(frame, Vector2d(c.x, c.y), c.level)); + }); + + resetGrid(); +} + +void FastDetector::setExistingFeatures(const list &fts) +{ + std::for_each(fts.begin(), fts.end(), [&](PointFeat* i){ + grid_occupancy_.at( + static_cast(i->px[1]/cell_size_)*grid_n_cols_ + + static_cast(i->px[0]/cell_size_)) = true; + }); +} + +void FastDetector::setGridOccpuancy(const PointFeat &ft) +{ + const Vector2d& px = ft.px; + grid_occupancy_.at( + static_cast(px[1]/cell_size_)*grid_n_cols_ + + static_cast(px[0]/cell_size_)) = true; +} + +LsdDetector::LsdDetector( + const int img_width, + const int img_height, + const int cell_size, + const int n_pyr_levels) : + AbstractDetector(img_width, img_height, cell_size, n_pyr_levels) +{} + +void LsdDetector::detect(Frame* frame, + const ImgPyr& img_pyr, + const double detection_threshold, + list &fts) +{ + // Parameters (TODO: include into config file or whatever and commit it to opencv) + vector keyline, keylines, keylines_sort; + + // Define the LSD detector object + Ptr lsd = LSDDetectorC::createLSDDetectorC(); + + // TODO: grab from config file + LSDDetectorC::LSDOptions opts; + opts.refine = LSD_REFINE_ADV; + opts.scale = 1.2; + opts.sigma_scale = 0.6; + opts.quant = 2.0; + opts.ang_th = 22.5; + opts.log_eps = 1.0; + opts.density_th = 0.6; + opts.n_bins = 1024; + + // Detect features on each pyramid level + double detection_threshold_ = detection_threshold * double(img_height_*img_width_) / double(img_height_+img_width_); // min length relative to size of image (change) + for(int L=0; Ldetect( img_pyr[L], keyline, 1, 1, opts); + // sort lines according to their unscaled length + sort( keyline.begin(), keyline.end(), compare_line_by_unscaled_length() ); + //detection_threshold_ = keyline[int(0.25*double(keyline.size()))].lineLength ; + std::for_each( keyline.begin(), keyline.end(), [&](KeyLine kl){ + if( scale*kl.lineLength > detection_threshold_ ){ + const int sk = static_cast((kl.startPointY*scale)/cell_size_)*grid_n_cols_ + + static_cast((kl.startPointX*scale)/cell_size_); + const int mk = static_cast(((kl.startPointY+kl.endPointY)*scale/2)/cell_size_)*grid_n_cols_ + + static_cast(((kl.startPointY+kl.endPointY)*scale/2)/cell_size_); + const int ek = static_cast((kl.endPointY*scale)/cell_size_)*grid_n_cols_ + + static_cast((kl.endPointX*scale)/cell_size_); + if( !grid_occupancy_[sk] && !grid_occupancy_[ek] && !grid_occupancy_[mk] ) + //if( !grid_occupancy_[sk] && !grid_occupancy_[ek] ) + { + fts.push_back( new LineFeat(frame,Vector2d(kl.startPointX*scale,kl.startPointY*scale),Vector2d(kl.endPointX*scale,kl.endPointY*scale),L,kl.angle) ); + } + } + }); + keyline.clear(); + } + resetGridLs(); + +} + +void LsdDetector::detect(Frame* frame, + const cv::Mat& rec_img, + const double detection_threshold, + list &fts) +{ + // Parameters (TODO: include into config file or whatever and commit it to opencv) + vector keyline, keylines, keylines_sort; + + // Define the LSD detector object + Ptr lsd = LSDDetectorC::createLSDDetectorC(); + + // TODO: grab from config file + LSDDetectorC::LSDOptions opts; + opts.refine = LSD_REFINE_STD; + opts.scale = 1.0; + opts.sigma_scale = 0.6; + opts.quant = 2.0; + opts.ang_th = 22.5; + opts.log_eps = 0.0; + opts.density_th = 0.7; + opts.n_bins = 1024; + + // Detect features on each pyramid level + double detection_threshold_ = detection_threshold * double(img_height_*img_width_) / double(img_height_+img_width_); // min length relative to size of image (change) + + lsd->detect( rec_img, keyline, 1, 1, opts); + // sort lines according to their unscaled length + sort( keyline.begin(), keyline.end(), compare_line_by_unscaled_length() ); + + detection_threshold_ = keyline[int(0.5*double(keyline.size()))].lineLength * 2.0; + + std::for_each( keyline.begin(), keyline.end(), [&](KeyLine kl){ + if( kl.lineLength > detection_threshold_ ){ + const int sk = static_cast((kl.startPointY)/cell_size_)*grid_n_cols_ + + static_cast((kl.startPointX)/cell_size_); + /*const int mk = static_cast(((kl.startPointY+kl.endPointY)*scale/2)/cell_size_)*grid_n_cols_ + + static_cast(((kl.startPointY+kl.endPointY)*scale/2)/cell_size_);*/ + const int ek = static_cast((kl.endPointY)/cell_size_)*grid_n_cols_ + + static_cast((kl.endPointX)/cell_size_); + //if( !grid_occupancy_[sk] && !grid_occupancy_[ek] && !grid_occupancy_[mk] ) + if( !grid_occupancy_[sk] && !grid_occupancy_[ek] ) + { + fts.push_back( new LineFeat(frame,Vector2d(kl.startPointX,kl.startPointY),Vector2d(kl.endPointX,kl.endPointY),kl.octave) ); + } + } + }); + keyline.clear(); + + resetGridLs(); + +} + + + +void LsdDetector::setGridOccpuancy(const LineFeat& ft) +{ + const Vector2d& spx = ft.spx; + grid_occupancy_.at( + static_cast(spx[1]/cell_size_)*grid_n_cols_ + + static_cast(spx[0]/cell_size_)) = true; + const Vector2d& epx = ft.epx; + grid_occupancy_.at( + static_cast(epx[1]/cell_size_)*grid_n_cols_ + + static_cast(epx[0]/cell_size_)) = true; + // mid point + /*grid_occupancy_.at( + static_cast((epx[1]+spx[1])/(2*cell_size_))*grid_n_cols_ + + static_cast((epx[0]+spx[0])/(2*cell_size_))) = true;*/ +} + +void LsdDetector::setExistingFeatures(const list& fts) +{ + std::for_each(fts.begin(), fts.end(), [&](LineFeat* i){ + grid_occupancy_.at( + static_cast(i->spx[1]/cell_size_)*grid_n_cols_ + + static_cast(i->spx[0]/cell_size_)) = true; + grid_occupancy_.at( + static_cast(i->epx[1]/cell_size_)*grid_n_cols_ + + static_cast(i->epx[0]/cell_size_)) = true; + // mid point + /*grid_occupancy_.at( + static_cast((i->epx[1]+i->spx[1])/(2*cell_size_))*grid_n_cols_ + + static_cast((i->epx[0]+i->spx[0])/(2*cell_size_))) = true;*/ + }); +} + +} // namespace feature_detection +} // namespace plsvo + diff --git a/src/frame.cpp b/src/frame.cpp new file mode 100644 index 0000000..22985e2 --- /dev/null +++ b/src/frame.cpp @@ -0,0 +1,220 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace plsvo { + +int Frame::frame_counter_ = 0; + +Frame::Frame(vk::AbstractCamera* cam, const cv::Mat& img, double timestamp) : + id_(frame_counter_++), + timestamp_(timestamp), + cam_(cam), + key_pts_(5), + is_keyframe_(false), + v_kf_(NULL) +{ + initFrame(img); +} + +Frame::~Frame() +{ + std::for_each(pt_fts_.begin(), pt_fts_.end(), [&](Feature* i){delete i;}); +} + +void Frame::initFrame(const cv::Mat& img) +{ + // check image + if(img.empty() || img.type() != CV_8UC1 || img.cols != cam_->width() || img.rows != cam_->height()) + throw std::runtime_error("Frame: provided image has not the same size as the camera model or image is not grayscale"); + + // Set keypoints to NULL + std::for_each(key_pts_.begin(), key_pts_.end(), [&](Feature* ftr){ ftr=NULL; }); + + // Build Image Pyramid + frame_utils::createImgPyramid(img, max(Config::nPyrLevels(), Config::kltMaxLevel()+1), img_pyr_); +} + +void Frame::setKeyframe() +{ + is_keyframe_ = true; + setKeyPoints(); +} + +void Frame::addFeature(PointFeat *ftr) +{ + pt_fts_.push_back(ftr); +} +void Frame::addFeature(LineFeat *ftr) +{ + seg_fts_.push_back(ftr); +} + +void Frame::setKeyPoints() +{ + for(size_t i = 0; i < 5; ++i) + if(key_pts_[i] != NULL) + if(key_pts_[i]->feat3D == NULL) + key_pts_[i] = NULL; + + std::for_each(pt_fts_.begin(), pt_fts_.end(), [&](PointFeat* ftr){ if(ftr->feat3D != NULL) checkKeyPoints(ftr); }); +} + +void Frame::checkKeyPoints(PointFeat* ftr) +{ + const int cu = cam_->width()/2; + const int cv = cam_->height()/2; + + // center pixel + if(key_pts_[0] == NULL) + key_pts_[0] = ftr; + else if(std::max(std::fabs(ftr->px[0]-cu), std::fabs(ftr->px[1]-cv)) + < std::max(std::fabs(key_pts_[0]->px[0]-cu), std::fabs(key_pts_[0]->px[1]-cv))) + key_pts_[0] = ftr; + + if(ftr->px[0] >= cu && ftr->px[1] >= cv) + { + if(key_pts_[1] == NULL) + key_pts_[1] = ftr; + else if((ftr->px[0]-cu) * (ftr->px[1]-cv) + > (key_pts_[1]->px[0]-cu) * (key_pts_[1]->px[1]-cv)) + key_pts_[1] = ftr; + } + if(ftr->px[0] >= cu && ftr->px[1] < cv) + { + if(key_pts_[2] == NULL) + key_pts_[2] = ftr; + else if((ftr->px[0]-cu) * (ftr->px[1]-cv) + > (key_pts_[2]->px[0]-cu) * (key_pts_[2]->px[1]-cv)) + key_pts_[2] = ftr; + } + if(ftr->px[0] < cv && ftr->px[1] < cv) + { + if(key_pts_[3] == NULL) + key_pts_[3] = ftr; + else if((ftr->px[0]-cu) * (ftr->px[1]-cv) + > (key_pts_[3]->px[0]-cu) * (key_pts_[3]->px[1]-cv)) + key_pts_[3] = ftr; + } + if(ftr->px[0] < cv && ftr->px[1] >= cv) + { + if(key_pts_[4] == NULL) + key_pts_[4] = ftr; + else if((ftr->px[0]-cu) * (ftr->px[1]-cv) + > (key_pts_[4]->px[0]-cu) * (key_pts_[4]->px[1]-cv)) + key_pts_[4] = ftr; + } +} + +void Frame::removeKeyPoint(PointFeat* ftr) +{ + bool found = false; + std::for_each(key_pts_.begin(), key_pts_.end(), [&](PointFeat*& i){ + if(i == ftr) { + i = NULL; + found = true; + } + }); + if(found) + setKeyPoints(); +} + +bool Frame::isVisible(const Vector3d& xyz_w) const +{ + Vector3d xyz_f = T_f_w_*xyz_w; + if(xyz_f.z() < 0.0) + return false; // point is behind the camera + Vector2d px = f2c(xyz_f); + if(px[0] >= 0.0 && px[1] >= 0.0 && px[0] < cam_->width() && px[1] < cam_->height()) + return true; + return false; +} + + +/// Utility functions for the Frame class +namespace frame_utils { + +void createImgPyramid(const cv::Mat& img_level_0, int n_levels, ImgPyr& pyr) +{ + pyr.resize(n_levels); + pyr[0] = img_level_0; + for(int i=1; i depth_vec; + depth_vec.reserve(frame.pt_fts_.size()); + depth_min = std::numeric_limits::max(); + // Points + for(auto it=frame.pt_fts_.begin(), ite=frame.pt_fts_.end(); it!=ite; ++it) + { + if((*it)->feat3D != NULL) + { + const double z = frame.w2f((*it)->feat3D->pos_).z(); + depth_vec.push_back(z); + depth_min = fmin(z, depth_min); + } + } + // Line segments + for(auto it=frame.seg_fts_.begin(), ite=frame.seg_fts_.end(); it!=ite; ++it) + { + if((*it)->feat3D != NULL) + { + const double zs = frame.w2f((*it)->feat3D->spos_).z(); + const double ze = frame.w2f((*it)->feat3D->epos_).z(); + depth_vec.push_back(zs); + depth_vec.push_back(ze); + depth_min = fmin(zs, depth_min); + depth_min = fmin(ze, depth_min); + } + } + if(depth_vec.empty()) + { + SVO_WARN_STREAM("Cannot set scene depth. Frame has no point-observations!"); + return false; + } + depth_mean = vk::getMedian(depth_vec); + return true; +} + +} // namespace frame_utils +} // namespace plsvo diff --git a/src/frame_handler_base.cpp b/src/frame_handler_base.cpp new file mode 100644 index 0000000..61f3ac5 --- /dev/null +++ b/src/frame_handler_base.cpp @@ -0,0 +1,240 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace plsvo +{ + +// definition of global and static variables which were declared in the header +#ifdef SVO_TRACE +vk::PerformanceMonitor* g_permon = NULL; +#endif + +FrameHandlerBase::FrameHandlerBase() : + stage_(STAGE_PAUSED), + set_reset_(false), + set_start_(false), + acc_frame_timings_(10), + acc_num_obs_(10), + num_obs_last_(0), + num_obs_last_pt(0), + num_obs_last_ls(0), + tracking_quality_(TRACKING_INSUFFICIENT) +{ +#ifdef SVO_TRACE + // Initialize Performance Monitor + g_permon = new vk::PerformanceMonitor(); + g_permon->addTimer("pyramid_creation"); + g_permon->addTimer("sparse_img_align"); + g_permon->addTimer("reproject"); + g_permon->addTimer("reproject_kfs"); + g_permon->addTimer("reproject_candidates"); + g_permon->addTimer("feature_align"); + g_permon->addTimer("pose_optimizer"); + g_permon->addTimer("point_optimizer"); + g_permon->addTimer("local_ba"); + g_permon->addTimer("tot_time"); + g_permon->addLog("timestamp"); + g_permon->addLog("img_align_n_tracked"); + g_permon->addLog("repr_n_mps"); + g_permon->addLog("repr_n_new_references"); + g_permon->addLog("sfba_thresh"); + g_permon->addLog("sfba_error_init"); + g_permon->addLog("sfba_error_final"); + g_permon->addLog("sfba_n_edges_final"); + g_permon->addLog("loba_n_erredges_init"); + g_permon->addLog("loba_n_erredges_fin"); + g_permon->addLog("loba_err_init"); + g_permon->addLog("loba_err_fin"); + g_permon->addLog("n_candidates"); + g_permon->addLog("dropout"); + g_permon->init(Config::traceName(), Config::traceDir()); +#endif + + SVO_INFO_STREAM("SVO initialized"); +} + +FrameHandlerBase::~FrameHandlerBase() +{ + SVO_INFO_STREAM("SVO destructor invoked"); +#ifdef SVO_TRACE + delete g_permon; +#endif +} + +bool FrameHandlerBase::startFrameProcessingCommon(const double timestamp) +{ + if(set_start_) + { + resetAll(); + stage_ = STAGE_FIRST_FRAME; + } + + if(stage_ == STAGE_PAUSED) + return false; + + SVO_LOG(timestamp); + SVO_DEBUG_STREAM("New Frame"); + SVO_START_TIMER("tot_time"); + timer_.start(); + + // some cleanup from last iteration, can't do before because of visualization + map_.emptyTrash(); + return true; +} + +int FrameHandlerBase::finishFrameProcessingCommon(const size_t update_id, + const UpdateResult dropout, + const size_t num_pt_observations, const size_t num_ls_observations) +{ + SVO_DEBUG_STREAM("Frame: "<writeToFile(); + { + boost::unique_lock lock(map_.point_candidates_.mut_); + size_t n_candidates = map_.point_candidates_.candidates_.size(); + SVO_LOG(n_candidates); + } +#endif + + if(dropout == RESULT_FAILURE && + (stage_ == STAGE_DEFAULT_FRAME || stage_ == STAGE_RELOCALIZING )) + { + stage_ = STAGE_RELOCALIZING; + tracking_quality_ = TRACKING_INSUFFICIENT; + } + else if (dropout == RESULT_FAILURE) + resetAll(); + if(set_reset_) + resetAll(); + + return 0; +} + +void FrameHandlerBase::resetCommon() +{ + map_.reset(); + stage_ = STAGE_PAUSED; + set_reset_ = false; + set_start_ = false; + tracking_quality_ = TRACKING_INSUFFICIENT; + num_obs_last_ = 0; + num_obs_last_pt = 0; + num_obs_last_ls = 0; + SVO_INFO_STREAM("RESET"); +} + +void FrameHandlerBase::setTrackingQuality(const size_t num_pt_observations, size_t num_ls_observations) +{ + tracking_quality_ = TRACKING_GOOD; + if( num_pt_observations + num_ls_observations < Config::qualityMinFts() ) + { + SVO_WARN_STREAM_THROTTLE(0.5, "Tracking less than "<< Config::qualityMinFts() << " point and line segment features!"); + tracking_quality_ = TRACKING_BAD; + } + // Drop features only for the case of points + const int feature_drop_pt = static_cast(std::min(num_obs_last_pt, Config::maxFts())) - num_pt_observations; + const int feature_drop_ls = static_cast(std::min(num_obs_last_ls, Config::maxFtsSegs())) - num_ls_observations; + //if( feature_drop_pt > int(Config::qualityMaxFtsDrop()) && feature_drop_ls > int(Config::qualityMaxFtsDropSegs()) ) + if( feature_drop_pt > int(Config::qualityMaxFtsDrop()) ) + { + SVO_WARN_STREAM("Lost "<< feature_drop_pt << " point and " << feature_drop_ls << " line segment features!"); + tracking_quality_ = TRACKING_BAD; + } +} + +bool ptLastOptimComparator(Point* lhs, Point* rhs) +{ + return (lhs->last_structure_optim_ < rhs->last_structure_optim_); +} + +bool lsLastOptimComparator(LineSeg* lhs, LineSeg* rhs) +{ + return (lhs->last_structure_optim_ < rhs->last_structure_optim_); +} + +void FrameHandlerBase::optimizeStructure( + FramePtr frame, + size_t max_n_pts, + int max_iter, + size_t max_n_segs, + int max_iter_segs) +{ + // Point features + deque pts; + for(list::iterator it=frame->pt_fts_.begin(); it!=frame->pt_fts_.end(); ++it) + { + if((*it)->feat3D != NULL) + pts.push_back((*it)->feat3D); + } + max_n_pts = min(max_n_pts, pts.size()); + nth_element(pts.begin(), pts.begin() + max_n_pts, pts.end(), ptLastOptimComparator); + for(deque::iterator it=pts.begin(); it!=pts.begin()+max_n_pts; ++it) + { + (*it)->optimize(max_iter); + (*it)->last_structure_optim_ = frame->id_; + } + // Line features + deque segs; + for(list::iterator it=frame->seg_fts_.begin(); it!=frame->seg_fts_.end(); ++it) + { + if((*it)->feat3D != NULL) + segs.push_back((*it)->feat3D); + } + max_n_segs = min(max_n_segs, segs.size()); + nth_element(segs.begin(), segs.begin() + max_n_segs, segs.end(), lsLastOptimComparator); + for(deque::iterator it=segs.begin(); it!=segs.begin()+max_n_segs; ++it) + { + (*it)->optimize(max_iter_segs); + (*it)->last_structure_optim_ = frame->id_; + } +} + + +} // namespace plsvo diff --git a/src/frame_handler_mono.cpp b/src/frame_handler_mono.cpp new file mode 100644 index 0000000..b393d88 --- /dev/null +++ b/src/frame_handler_mono.cpp @@ -0,0 +1,510 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef USE_BUNDLE_ADJUSTMENT +#include +#endif + +namespace plsvo { + +FrameHandlerMono::FrameHandlerMono(vk::AbstractCamera* cam) : + FrameHandlerBase(), + cam_(cam), + reprojector_(cam_, map_), + depth_filter_(NULL) +{ + initialize(); +} + +FrameHandlerMono::FrameHandlerMono(vk::AbstractCamera *cam, const FrameHandlerMono::Options& opts) : + FrameHandlerBase(), + cam_(cam), + reprojector_(cam_, map_), + depth_filter_(NULL), + options_(opts) +{ + initialize(opts); +} + +void FrameHandlerMono::initialize() +{ + // create a point feature detector instance + feature_detection::DetectorPtr pt_feature_detector; + if(Config::hasPoints()) + pt_feature_detector = feature_detection::DetectorPtr( + new feature_detection::FastDetector( + cam_->width(), cam_->height(), Config::gridSize(), Config::nPyrLevels())); + else + // create an abstract (void) detector that detects nothing to deactivate use of points + pt_feature_detector = feature_detection::DetectorPtr( + new feature_detection::AbstractDetector( + cam_->width(), cam_->height(), Config::gridSize(), Config::nPyrLevels())); + + // create a segment feature detector instance + feature_detection::DetectorPtr seg_feature_detector; + if(Config::hasLines()) + seg_feature_detector = feature_detection::DetectorPtr( + new feature_detection::LsdDetector( + cam_->width(), cam_->height(), Config::gridSizeSegs(), Config::nPyrLevelsSegs())); + else + // create an abstract (void) detector that detects nothing to deactivate use of line segs + seg_feature_detector = feature_detection::DetectorPtr( + new feature_detection::AbstractDetector( + cam_->width(), cam_->height(), Config::gridSizeSegs(), Config::nPyrLevelsSegs())); + + // create the callback object for the Depth-Filter + DepthFilter::callback_t depth_filter_cb = boost::bind( + &MapPointCandidates::newCandidatePoint, &map_.point_candidates_, _1, _2); + + DepthFilter::callback_t_ls depth_filter_cb_ls = boost::bind( + &MapSegmentCandidates::newCandidateSegment, &map_.segment_candidates_, _1, _2, _3); + + // Setup the Depth-Filter object + depth_filter_ = new DepthFilter(pt_feature_detector, seg_feature_detector, depth_filter_cb, depth_filter_cb_ls ); + depth_filter_->startThread(); +} + +void FrameHandlerMono::initialize(const Options opts) +{ + // create a point feature detector instance + feature_detection::DetectorPtr pt_feature_detector; + if(opts.has_pt) + pt_feature_detector = feature_detection::DetectorPtr( + new feature_detection::FastDetector( + cam_->width(), cam_->height(), Config::gridSize(), Config::nPyrLevels())); + else + // create an abstract (void) detector that detects nothing to deactivate use of points + pt_feature_detector = feature_detection::DetectorPtr( + new feature_detection::AbstractDetector( + cam_->width(), cam_->height(), Config::gridSize(), Config::nPyrLevels())); + + // create a segment feature detector instance + feature_detection::DetectorPtr seg_feature_detector; + if(opts.has_ls) + seg_feature_detector = feature_detection::DetectorPtr( + new feature_detection::LsdDetector( + cam_->width(), cam_->height(), Config::gridSizeSegs(), Config::nPyrLevelsSegs())); + else + // create an abstract (void) detector that detects nothing to deactivate use of line segs + seg_feature_detector = feature_detection::DetectorPtr( + new feature_detection::AbstractDetector( + cam_->width(), cam_->height(), Config::gridSizeSegs(), Config::nPyrLevelsSegs())); + + // create the callback object for the Depth-Filter + DepthFilter::callback_t depth_filter_cb = boost::bind( + &MapPointCandidates::newCandidatePoint, &map_.point_candidates_, _1, _2); + + DepthFilter::callback_t_ls depth_filter_cb_ls = boost::bind( + &MapSegmentCandidates::newCandidateSegment, &map_.segment_candidates_, _1, _2, _3); + + // Setup the Depth-Filter object + depth_filter_ = new DepthFilter(pt_feature_detector, seg_feature_detector, depth_filter_cb, depth_filter_cb_ls ); + depth_filter_->startThread(); +} + +FrameHandlerMono::~FrameHandlerMono() +{ + delete depth_filter_; +} + +void FrameHandlerMono::addImage(const cv::Mat& img, const double timestamp) +{ + + img.copyTo(debug_img); + + if(!startFrameProcessingCommon(timestamp)) + return; + + // some cleanup from last iteration, can't do before because of visualization + core_kfs_.clear(); + overlap_kfs_.clear(); + + // create new frame + SVO_START_TIMER("pyramid_creation"); + // The Frame constructor initializes new_frame_ + // and creates the image pyramid (also stored in Frame as img_pyr_) + new_frame_.reset(new Frame(cam_, img.clone(), timestamp)); + SVO_STOP_TIMER("pyramid_creation"); + + // process frame + UpdateResult res = RESULT_FAILURE; + if(stage_ == STAGE_DEFAULT_FRAME) + res = processFrame(); + else if(stage_ == STAGE_SECOND_FRAME) + res = processSecondFrame(); + else if(stage_ == STAGE_FIRST_FRAME) + res = processFirstFrame(); + else if(stage_ == STAGE_RELOCALIZING) + res = relocalizeFrame(SE3(Matrix3d::Identity(), Vector3d::Zero()), + map_.getClosestKeyframe(last_frame_)); + + // set last frame + last_frame_ = new_frame_; + new_frame_.reset(); + + // finish processing + finishFrameProcessingCommon(last_frame_->id_, res, last_frame_->nObs(), last_frame_->nLsObs()); +} + +void FrameHandlerMono::addImage(const cv::Mat& img, const double timestamp, cv::Mat& rec) +{ + + img.copyTo(debug_img); + + if(!startFrameProcessingCommon(timestamp)) + return; + + // some cleanup from last iteration, can't do before because of visualization + core_kfs_.clear(); + overlap_kfs_.clear(); + + // create new frame + SVO_START_TIMER("pyramid_creation"); + // The Frame constructor initializes new_frame_ + // and creates the image pyramid (also stored in Frame as img_pyr_) + new_frame_.reset(new Frame(cam_, img.clone(), timestamp)); + rec.copyTo(new_frame_->rec_img); + SVO_STOP_TIMER("pyramid_creation"); + + // process frame + UpdateResult res = RESULT_FAILURE; + if(stage_ == STAGE_DEFAULT_FRAME) + res = processFrame(); + else if(stage_ == STAGE_SECOND_FRAME) + res = processSecondFrame(); + else if(stage_ == STAGE_FIRST_FRAME) + res = processFirstFrame(); + else if(stage_ == STAGE_RELOCALIZING) + res = relocalizeFrame(SE3(Matrix3d::Identity(), Vector3d::Zero()), + map_.getClosestKeyframe(last_frame_)); + + // set last frame + last_frame_ = new_frame_; + new_frame_.reset(); + + // finish processing + finishFrameProcessingCommon(last_frame_->id_, res, last_frame_->nObs(), last_frame_->nLsObs()); +} + +FrameHandlerMono::UpdateResult FrameHandlerMono::processFirstFrame() +{ + // set first frame to identity transformation + new_frame_->T_f_w_ = SE3(Matrix3d::Identity(), Vector3d::Zero()); + // for now the initialization is done with points and endpoints only (consider use lines) + if(klt_homography_init_.addFirstFrame(new_frame_) == initialization::FAILURE) + return RESULT_NO_KEYFRAME; + new_frame_->setKeyframe(); + map_.addKeyframe(new_frame_); + stage_ = STAGE_SECOND_FRAME; + SVO_INFO_STREAM("Init: Selected first frame."); + return RESULT_IS_KEYFRAME; +} + +FrameHandlerBase::UpdateResult FrameHandlerMono::processSecondFrame() +{ + initialization::InitResult res = klt_homography_init_.addSecondFrame(new_frame_); + if(res == initialization::FAILURE) + return RESULT_FAILURE; + else if(res == initialization::NO_KEYFRAME) + return RESULT_NO_KEYFRAME; + + // two-frame bundle adjustment +#ifdef USE_BUNDLE_ADJUSTMENT + ba::twoViewBA(new_frame_.get(), map_.lastKeyframe().get(), Config::lobaThresh(), &map_); +#endif + + new_frame_->setKeyframe(); + double depth_mean, depth_min; + frame_utils::getSceneDepth(*new_frame_, depth_mean, depth_min); + depth_filter_->addKeyframe(new_frame_, depth_mean, 0.5*depth_min); + + // add frame to map + map_.addKeyframe(new_frame_); + stage_ = STAGE_DEFAULT_FRAME; + klt_homography_init_.reset(); + SVO_INFO_STREAM("Init: Selected second frame, triangulated initial map."); + return RESULT_IS_KEYFRAME; +} + +FrameHandlerBase::UpdateResult FrameHandlerMono::processFrame() +{ + // Set initial pose TODO use prior + new_frame_->T_f_w_ = last_frame_->T_f_w_; + + // sparse image align + SVO_START_TIMER("sparse_img_align"); + bool display = false; + bool verbose = false; + SparseImgAlign img_align(Config::kltMaxLevel(), Config::kltMinLevel(), + 30, SparseImgAlign::GaussNewton, display, verbose); + size_t img_align_n_tracked = img_align.run(last_frame_, new_frame_); + SVO_STOP_TIMER("sparse_img_align"); + SVO_LOG(img_align_n_tracked); + SVO_DEBUG_STREAM("Img Align:\t Tracked = " << img_align_n_tracked); + + // show reference features + cv::cvtColor(last_frame_->img(), FrameHandlerMono::debug_img, cv::COLOR_GRAY2BGR); + { + // draw point features + { + auto fts = last_frame_->pt_fts_; + Patch patch( 4, debug_img ); + for(auto it=fts.begin(); it!=fts.end(); ++it) + { + patch.setPosition((*it)->px); + patch.setRoi(); + cv::rectangle(debug_img,patch.rect,cv::Scalar(0,255,0)); + } + } + // draw segment features + { + auto fts = last_frame_->seg_fts_; + std::for_each(fts.begin(), fts.end(), [&](plsvo::LineFeat* i){ + if( i->feat3D != NULL ) + cv::line(debug_img,cv::Point2f(i->spx[0],i->spx[1]),cv::Point2f(i->epx[0],i->epx[1]),cv::Scalar(0,255,0)); + }); + } + //cv::imshow("cv: Ref image", debug_img); + //cv::waitKey(30); + } + + // map reprojection & feature alignment + SVO_START_TIMER("reproject"); + reprojector_.reprojectMap(new_frame_, overlap_kfs_); + SVO_STOP_TIMER("reproject"); + const size_t repr_n_new_references_pt = reprojector_.n_matches_; + const size_t repr_n_new_references = repr_n_new_references_pt; + const size_t repr_n_new_references_ls = reprojector_.n_ls_matches_; + const size_t repr_n_mps = reprojector_.n_trials_; + SVO_LOG2(repr_n_mps, repr_n_new_references); + SVO_DEBUG_STREAM( "Reprojection:\t nPoints & nLines = " << repr_n_mps << "\t \t nMatches = " << repr_n_new_references_pt + repr_n_new_references_ls ); + if( repr_n_new_references_pt + repr_n_new_references_ls < Config::qualityMinFts() ) + { + SVO_WARN_STREAM_THROTTLE(1.0, "Not enough matched features."); + new_frame_->T_f_w_ = last_frame_->T_f_w_; // reset to avoid crazy pose jumps + tracking_quality_ = TRACKING_INSUFFICIENT; + return RESULT_FAILURE; + } + + // pose optimization + SVO_START_TIMER("pose_optimizer"); + size_t sfba_n_edges_final, sfba_n_edges_final_pt, sfba_n_edges_final_ls; + double sfba_thresh, sfba_error_init, sfba_error_final; + pose_optimizer::optimizeGaussNewton( + Config::poseOptimThresh(), Config::poseOptimNumIter(), false, + new_frame_, sfba_thresh, sfba_error_init, sfba_error_final, sfba_n_edges_final_pt, sfba_n_edges_final_ls); + SVO_STOP_TIMER("pose_optimizer"); + sfba_n_edges_final = sfba_n_edges_final_pt + sfba_n_edges_final_ls; + SVO_LOG4(sfba_thresh, sfba_error_init, sfba_error_final,sfba_n_edges_final); + SVO_DEBUG_STREAM("PoseOptimizer:\t ErrInit = "<T_f_w_ = last_frame_->T_f_w_; // reset to avoid crazy pose jumps + return RESULT_FAILURE; + } + double depth_mean, depth_min; + frame_utils::getSceneDepth(*new_frame_, depth_mean, depth_min); + if( !needNewKf(depth_mean) || tracking_quality_ == TRACKING_BAD ) + { + depth_filter_->addFrame(new_frame_); + return RESULT_NO_KEYFRAME; + } + new_frame_->setKeyframe(); + SVO_DEBUG_STREAM("New keyframe selected."); + + // new keyframe selected + for(list::iterator it=new_frame_->pt_fts_.begin(); it!=new_frame_->pt_fts_.end(); ++it) + if((*it)->feat3D != NULL) + (*it)->feat3D->addFrameRef(*it); + map_.point_candidates_.addCandidatePointToFrame(new_frame_); + + for(list::iterator it=new_frame_->seg_fts_.begin(); it!=new_frame_->seg_fts_.end(); ++it) + if((*it)->feat3D != NULL) + (*it)->feat3D->addFrameRef(*it); + map_.segment_candidates_.addCandidateSegmentToFrame(new_frame_); + + // optional bundle adjustment +#ifdef USE_BUNDLE_ADJUSTMENT + if(Config::lobaNumIter() > 0) + { + SVO_START_TIMER("local_ba"); + setCoreKfs(Config::coreNKfs()); + size_t loba_n_erredges_init, loba_n_erredges_fin; + double loba_err_init, loba_err_fin; + ba::localBA(new_frame_.get(), &core_kfs_, &map_, + loba_n_erredges_init, loba_n_erredges_fin, + loba_err_init, loba_err_fin); + SVO_STOP_TIMER("local_ba"); + SVO_LOG4(loba_n_erredges_init, loba_n_erredges_fin, loba_err_init, loba_err_fin); + SVO_DEBUG_STREAM("Local BA:\t RemovedEdges {"<addKeyframe(new_frame_, depth_mean*2.0, 0.1*depth_min); + + // if limited number of keyframes, remove the one furthest apart + if(Config::maxNKfs() > 2 && map_.size() >= Config::maxNKfs()) + { + FramePtr furthest_frame = map_.getFurthestKeyframe(new_frame_->pos()); + depth_filter_->removeKeyframe(furthest_frame); // TODO this interrupts the mapper thread, maybe we can solve this better + map_.safeDeleteFrame(furthest_frame); + } + + // add keyframe to map + map_.addKeyframe(new_frame_); + + return RESULT_IS_KEYFRAME; + +} + +FrameHandlerMono::UpdateResult FrameHandlerMono::relocalizeFrame( + const SE3& T_cur_ref, + FramePtr ref_keyframe) +{ + SVO_WARN_STREAM_THROTTLE(1.0, "Relocalizing frame"); + if(ref_keyframe == nullptr) + { + SVO_INFO_STREAM("No reference keyframe."); + return RESULT_FAILURE; + } + SparseImgAlign img_align(Config::kltMaxLevel(), Config::kltMinLevel(), + 30, SparseImgAlign::GaussNewton, false, false); + size_t img_align_n_tracked = img_align.run(ref_keyframe, new_frame_); + if(img_align_n_tracked > 30) + { + SE3 T_f_w_last = last_frame_->T_f_w_; + last_frame_ = ref_keyframe; + FrameHandlerMono::UpdateResult res = processFrame(); + if(res != RESULT_FAILURE) + { + stage_ = STAGE_DEFAULT_FRAME; + SVO_INFO_STREAM("Relocalization successful."); + } + else + new_frame_->T_f_w_ = T_f_w_last; // reset to last well localized pose + return res; + } + return RESULT_FAILURE; +} + +bool FrameHandlerMono::relocalizeFrameAtPose( + const int keyframe_id, + const SE3& T_f_kf, + const cv::Mat& img, + const double timestamp) +{ + FramePtr ref_keyframe; + if(!map_.getKeyframeById(keyframe_id, ref_keyframe)) + return false; + new_frame_.reset(new Frame(cam_, img.clone(), timestamp)); + UpdateResult res = relocalizeFrame(T_f_kf, ref_keyframe); + if(res != RESULT_FAILURE) { + last_frame_ = new_frame_; + return true; + } + return false; +} + +void FrameHandlerMono::resetAll() +{ + resetCommon(); + last_frame_.reset(); + new_frame_.reset(); + core_kfs_.clear(); + overlap_kfs_.clear(); + depth_filter_->reset(); +} + +void FrameHandlerMono::setFirstFrame(const FramePtr& first_frame) +{ + resetAll(); + last_frame_ = first_frame; + last_frame_->setKeyframe(); + map_.addKeyframe(last_frame_); + stage_ = STAGE_DEFAULT_FRAME; +} + +bool FrameHandlerMono::needNewKf(double scene_depth_mean) +{ + for(auto it=overlap_kfs_.begin(), ite=overlap_kfs_.end(); it!=ite; ++it) + { + // Estimate rotation distance + SE3 T_last, T_curr, delta_T; + T_last = last_frame_->T_f_w_; + T_curr = it->first->T_f_w_; + delta_T = T_last.inverse() * T_curr; + + Vector6d relpos_ = delta_T.log(); + double delta_t = sqrt( pow(relpos_(0),2) + pow(relpos_(1),2) + pow(relpos_(2),2) ); + double delta_r = sqrt( pow(relpos_(3),2) + pow(relpos_(4),2) + pow(relpos_(5),2) ) * 180.0 / 3.1416; + if( delta_t < Config::kfSelectMinDistT() && delta_r < Config::kfSelectMinDistR() ) + return false; + + /*// they estimated with the scene depth... + Vector3d relpos = new_frame_->w2f(it->first->pos()); + if(fabs(relpos.x())/scene_depth_mean < Config::kfSelectMinDist() && + fabs(relpos.y())/scene_depth_mean < Config::kfSelectMinDist()*0.8 && + fabs(relpos.z())/scene_depth_mean < Config::kfSelectMinDist()*1.3) + return false;*/ + } + return true; +} + +void FrameHandlerMono::setCoreKfs(size_t n_closest) +{ + size_t n = min(n_closest, overlap_kfs_.size()-1); + std::partial_sort(overlap_kfs_.begin(), overlap_kfs_.begin()+n, overlap_kfs_.end(), + boost::bind(&pair::second, _1) > + boost::bind(&pair::second, _2)); + std::for_each(overlap_kfs_.begin(), overlap_kfs_.end(), [&](pair& i){ core_kfs_.insert(i.first); }); +} + +} // namespace plsvo diff --git a/src/initialization.cpp b/src/initialization.cpp new file mode 100644 index 0000000..d6c0fff --- /dev/null +++ b/src/initialization.cpp @@ -0,0 +1,245 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace plsvo { +namespace initialization { + +InitResult KltHomographyInit::addFirstFrame(FramePtr frame_ref) +{ + reset(); + detectFeatures(frame_ref, px_ref_, f_ref_); + if(px_ref_.size() < 100) + //if(px_ref_.size() < 80) + { + SVO_WARN_STREAM_THROTTLE(2.0, "First image has less than 80 features. Retry in more textured environment."); + return FAILURE; + } + frame_ref_ = frame_ref; + // initialize points in current frame (query or second frame) with points in ref frame + px_cur_.insert(px_cur_.begin(), px_ref_.begin(), px_ref_.end()); + return SUCCESS; +} + +InitResult KltHomographyInit::addSecondFrame(FramePtr frame_cur) +{ + trackKlt(frame_ref_, frame_cur, px_ref_, px_cur_, f_ref_, f_cur_, disparities_); + SVO_INFO_STREAM("Init: KLT tracked "<< disparities_.size() <<" features"); + + // check the number of points tracked is high enough + if(disparities_.size() < Config::initMinTracked()) + return FAILURE; + + // check the median disparity is high enough to compute homography robustly + double disparity = vk::getMedian(disparities_); + SVO_INFO_STREAM("Init: KLT "<cam_->errorMultiplier2(), Config::poseOptimThresh(), + inliers_, xyz_in_cur_, T_cur_from_ref_); + SVO_INFO_STREAM("Init: Homography RANSAC "< depth_vec; + for(size_t i=0; iT_f_w_ = T_cur_from_ref_ * frame_ref_->T_f_w_; + frame_cur->T_f_w_.translation() = + -frame_cur->T_f_w_.rotation_matrix()*(frame_ref_->pos() + scale*(frame_cur->pos() - frame_ref_->pos())); + + // For each inlier create 3D point and add feature in both frames + SE3 T_world_cur = frame_cur->T_f_w_.inverse(); + for(vector::iterator it=inliers_.begin(); it!=inliers_.end(); ++it) + { + Vector2d px_cur(px_cur_[*it].x, px_cur_[*it].y); + Vector2d px_ref(px_ref_[*it].x, px_ref_[*it].y); + // add 3D point (in (w)olrd coordinates) and features if + // BOTH ref and cur points lie within the image (with a margin) + // AND the 3D point lies in front of the camera + if(frame_ref_->cam_->isInFrame(px_cur.cast(), 10) && frame_ref_->cam_->isInFrame(px_ref.cast(), 10) && xyz_in_cur_[*it].z() > 0) + { + Vector3d pos = T_world_cur * (xyz_in_cur_[*it]*scale); + Point* new_point = new Point(pos); + + PointFeat* ftr_cur(new PointFeat(frame_cur.get(), new_point, px_cur, f_cur_[*it], 0)); + frame_cur->addFeature(ftr_cur); + new_point->addFrameRef(ftr_cur); + + PointFeat* ftr_ref(new PointFeat(frame_ref_.get(), new_point, px_ref, f_ref_[*it], 0)); + frame_ref_->addFeature(ftr_ref); + new_point->addFrameRef(ftr_ref); + } + } + return SUCCESS; +} + +void KltHomographyInit::reset() +{ + px_cur_.clear(); + frame_ref_.reset(); +} + +void detectFeatures( + FramePtr frame, + vector& px_vec, + vector& f_vec) +{ + list new_features; + list new_features_ls; + + if(Config::initPoints()) + { + feature_detection::FastDetector detector( + frame->img().cols, frame->img().rows, Config::gridSize(), Config::nPyrLevels()); + detector.detect(frame.get(), frame->img_pyr_, Config::triangMinCornerScore(), new_features); + } + + if(Config::initLines()) + { + feature_detection::LsdDetector detector_ls( + frame->img().cols, frame->img().rows, Config::gridSizeSegs(), Config::nPyrLevelsSegs()); + detector_ls.detect(frame.get(), frame->img_pyr_, Config::lsdMinLength(), new_features_ls); + } + + // now for all maximum corners, initialize a new seed + px_vec.clear(); px_vec.reserve(new_features.size()+2*new_features_ls.size()); + f_vec.clear(); f_vec.reserve(new_features.size()+2*new_features_ls.size()); + + // we know features here are really PointFeat but this should work with the parent Feature type + std::for_each(new_features.begin(), new_features.end(), [&](Feature* ftr){ + px_vec.push_back(cv::Point2f(ftr->px[0], ftr->px[1])); + f_vec.push_back(ftr->f); + delete ftr; + }); + + // First try, introduce endpoints (line segments usually belongs to planes) + std::for_each(new_features_ls.begin(), new_features_ls.end(), [&](LineFeat* ftr){ + px_vec.push_back(cv::Point2f(ftr->spx[0], ftr->spx[1])); + f_vec.push_back(ftr->sf); + px_vec.push_back(cv::Point2f((ftr->spx[0]+ftr->epx[0])/2.0, (ftr->spx[1]+ftr->epx[1])/2.0)); + f_vec.push_back((ftr->sf+ftr->ef)/2.0); + px_vec.push_back(cv::Point2f(ftr->epx[0], ftr->epx[1])); + f_vec.push_back(ftr->ef); + delete ftr; + }); +} + +void trackKlt( + FramePtr frame_ref, + FramePtr frame_cur, + vector& px_ref, + vector& px_cur, + vector& f_ref, + vector& f_cur, + vector& disparities) +{ + const double klt_win_size = 30.0; + const int klt_max_iter = 30; + const double klt_eps = 0.001; + vector status; + vector error; + vector min_eig_vec; + cv::TermCriteria termcrit(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, klt_max_iter, klt_eps); + cv::calcOpticalFlowPyrLK(frame_ref->img_pyr_[0], frame_cur->img_pyr_[0], + px_ref, px_cur, + status, error, + cv::Size2i(klt_win_size, klt_win_size), + 4, termcrit, cv::OPTFLOW_USE_INITIAL_FLOW); + + // for correctly tracked points, compute 3D vectors and disparities in image (distance between ref and cur point) + vector::iterator px_ref_it = px_ref.begin(); + vector::iterator px_cur_it = px_cur.begin(); + vector::iterator f_ref_it = f_ref.begin(); + f_cur.clear(); f_cur.reserve(px_cur.size()); + disparities.clear(); disparities.reserve(px_cur.size()); + for(size_t i=0; px_ref_it != px_ref.end(); ++i) + { + // if the point has not been correctly tracked, + // remove all occurrences: ref px, ref f, and cur px + if(!status[i]) + { + px_ref_it = px_ref.erase(px_ref_it); + px_cur_it = px_cur.erase(px_cur_it); + f_ref_it = f_ref.erase(f_ref_it); + continue; + } + f_cur.push_back(frame_cur->c2f(px_cur_it->x, px_cur_it->y)); + disparities.push_back(Vector2d(px_ref_it->x - px_cur_it->x, px_ref_it->y - px_cur_it->y).norm()); + ++px_ref_it; + ++px_cur_it; + ++f_ref_it; + } +} + +void computeHomography( + const vector& f_ref, + const vector& f_cur, + double focal_length, + double reprojection_threshold, + vector& inliers, + vector& xyz_in_cur, + SE3& T_cur_from_ref) +{ + vector > uv_ref(f_ref.size()); + vector > uv_cur(f_cur.size()); + for(size_t i=0, i_max=f_ref.size(); i outliers; + vk::computeInliers(f_cur, f_ref, + Homography.T_c2_from_c1.rotation_matrix(), Homography.T_c2_from_c1.translation(), + reprojection_threshold, focal_length, + xyz_in_cur, inliers, outliers); + T_cur_from_ref = Homography.T_c2_from_c1; +} + + +} // namespace initialization +} // namespace plsvo diff --git a/src/map.cpp b/src/map.cpp new file mode 100644 index 0000000..5122dde --- /dev/null +++ b/src/map.cpp @@ -0,0 +1,532 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#include +#include +#include +#include +#include +#include + +namespace plsvo { + +Map::Map() {} + +Map::~Map() +{ + reset(); + SVO_INFO_STREAM("Map destructed"); +} + +void Map::reset() +{ + keyframes_.clear(); + point_candidates_.reset(); + segment_candidates_.reset(); + emptyTrash(); +} + +bool Map::safeDeleteFrame(FramePtr frame) +{ + bool found = false; + for(auto it=keyframes_.begin(), ite=keyframes_.end(); it!=ite; ++it) + { + if(*it == frame) + { + std::for_each((*it)->pt_fts_.begin(), (*it)->pt_fts_.end(), [&](PointFeat* ftr){ + removePtFrameRef(it->get(), ftr); + }); + std::for_each((*it)->seg_fts_.begin(), (*it)->seg_fts_.end(), [&](LineFeat* ftr){ + removeLsFrameRef(it->get(), ftr); + }); + + keyframes_.erase(it); + found = true; + break; + } + } + + point_candidates_.removeFrameCandidates(frame); + + if(found) + return true; + + SVO_ERROR_STREAM("Tried to delete Keyframe in map which was not there."); + return false; +} + +void Map::removePtFrameRef(Frame* frame, PointFeat* ftr) +{ + if(ftr->feat3D == NULL) + return; // mappoint may have been deleted in a previous ref. removal + Point* pt = ftr->feat3D; + ftr->feat3D = NULL; + if(pt->obs_.size() <= 2) + { + // If the references list of mappoint has only size=2, delete mappoint + safeDeletePoint(pt); + return; + } + pt->deleteFrameRef(frame); // Remove reference from map_point + frame->removeKeyPoint(ftr); // Check if mp was keyMp in keyframe +} + +void Map::removeLsFrameRef(Frame* frame, LineFeat* ftr) +{ + if(ftr->feat3D == NULL) + return; // map segment may have been deleted in a previous ref. removal + LineSeg* ls = ftr->feat3D; + ftr->feat3D = NULL; + if(ls->obs_.size() <= 2) + { + // If the references list of mappoint has only size=2, delete mappoint + safeDeleteSegment(ls); + return; + } + ls->deleteFrameRef(frame); // Remove reference from map_point + // TODO: For now, there are no KeySegments in the frame (only points are used to check overlapping + // frame->removeKeyPoint(ftr); // Check if mp was keyMp in keyframe + +} + +void Map::safeDeletePoint(Point* pt) +{ + // Delete references to mappoints in all keyframes + std::for_each(pt->obs_.begin(), pt->obs_.end(), [&](PointFeat* ftr){ + ftr->feat3D=NULL; + ftr->frame->removeKeyPoint(ftr); + }); + pt->obs_.clear(); + + // Delete mappoint + deletePoint(pt); +} + +void Map::safeDeleteSegment(LineSeg* ls) +{ + // Delete references to mappoints in all keyframes + std::for_each(ls->obs_.begin(), ls->obs_.end(), [&](LineFeat* ftr){ + ftr->feat3D=NULL; + //ftr_->frame->removeKeyPoint(ftr); // TODO: There are no "KeySegments" in frame + }); + ls->obs_.clear(); + // Delete mappoint + deleteSegment(ls); +} + +void Map::deletePoint(Point* pt) +{ + pt->type_ = Point::TYPE_DELETED; + trash_points_.push_back(pt); +} + +void Map::deleteSegment(LineSeg* ls) +{ + ls->type_ = LineSeg::TYPE_DELETED; + trash_segments_.push_back(ls); +} + +void Map::addKeyframe(FramePtr new_keyframe) +{ + keyframes_.push_back(new_keyframe); +} + +void Map::getCloseKeyframes( + const FramePtr& frame, + std::list< std::pair >& close_kfs) const +{ + for(auto kf : keyframes_) + { + // check if kf has overlaping field of view with frame, use therefore KeyPoints + for(auto keypoint : kf->key_pts_) + { + if(keypoint == nullptr) + continue; + if(frame->isVisible(keypoint->feat3D->pos_)) + { + // store a pair formed by the keyframe pointer and the distance between keyframes + close_kfs.push_back( + std::make_pair( + kf, (frame->T_f_w_.translation()-kf->T_f_w_.translation()).norm())); + break; // this keyframe has an overlapping field of view -> add to close_kfs + } + } + } +} + +FramePtr Map::getClosestKeyframe(const FramePtr& frame) const +{ + list< pair > close_kfs; + getCloseKeyframes(frame, close_kfs); + if(close_kfs.empty()) + { + return nullptr; + } + + + // Sort KFs with overlap according to their closeness + close_kfs.sort(boost::bind(&std::pair::second, _1) < + boost::bind(&std::pair::second, _2)); + + if(close_kfs.front().first != frame) + return close_kfs.front().first; + close_kfs.pop_front(); + return close_kfs.front().first; +} + +FramePtr Map::getFurthestKeyframe(const Vector3d& pos) const +{ + FramePtr furthest_kf; + double maxdist = 0.0; + for(auto it=keyframes_.begin(), ite=keyframes_.end(); it!=ite; ++it) + { + double dist = ((*it)->pos()-pos).norm(); + if(dist > maxdist) { + maxdist = dist; + furthest_kf = *it; + } + } + return furthest_kf; +} + +bool Map::getKeyframeById(const int id, FramePtr& frame) const +{ + bool found = false; + for(auto it=keyframes_.begin(), ite=keyframes_.end(); it!=ite; ++it) + if((*it)->id_ == id) { + found = true; + frame = *it; + break; + } + return found; +} + +void Map::transform(const Matrix3d& R, const Vector3d& t, const double& s) +{ + for(auto it=keyframes_.begin(), ite=keyframes_.end(); it!=ite; ++it) + { + Vector3d pos = s*R*(*it)->pos() + t; + Matrix3d rot = R*(*it)->T_f_w_.rotation_matrix().inverse(); + (*it)->T_f_w_ = SE3(rot, pos).inverse(); + // Key Points + for(auto ftr=(*it)->pt_fts_.begin(); ftr!=(*it)->pt_fts_.end(); ++ftr) + { + if((*ftr)->feat3D == NULL) + continue; + if((*ftr)->feat3D->last_published_ts_ == -1000) + continue; + (*ftr)->feat3D->last_published_ts_ = -1000; + (*ftr)->feat3D->pos_ = s*R*(*ftr)->feat3D->pos_ + t; + } + // Line Segments + for(auto ftr=(*it)->seg_fts_.begin(); ftr!=(*it)->seg_fts_.end(); ++ftr) + { + if((*ftr)->feat3D == NULL) + continue; + if((*ftr)->feat3D->last_published_ts_ == -1000) + continue; + (*ftr)->feat3D->last_published_ts_ = -1000; + (*ftr)->feat3D->spos_ = s*R*(*ftr)->feat3D->spos_ + t; + (*ftr)->feat3D->epos_ = s*R*(*ftr)->feat3D->epos_ + t; + } + } +} + +void Map::emptyTrash() +{ + // Key Points + std::for_each(trash_points_.begin(), trash_points_.end(), [&](Point*& pt){ + delete pt; + pt=NULL; + }); + trash_points_.clear(); + point_candidates_.emptyTrash(); + // Line Segments + std::for_each(trash_segments_.begin(), trash_segments_.end(), [&](LineSeg*& ls){ + delete ls; + ls=NULL; + }); + trash_segments_.clear(); + segment_candidates_.emptyTrash(); +} + +MapPointCandidates::MapPointCandidates() +{} + +MapPointCandidates::~MapPointCandidates() +{ + reset(); +} + +void MapPointCandidates::newCandidatePoint(Point* point, double depth_sigma2) +{ + point->type_ = Point::TYPE_CANDIDATE; + boost::unique_lock lock(mut_); + candidates_.push_back(PointCandidate(point, point->obs_.front())); +} + +void MapPointCandidates::addCandidatePointToFrame(FramePtr frame) +{ + boost::unique_lock lock(mut_); + PointCandidateList::iterator it=candidates_.begin(); + while(it != candidates_.end()) + { + if(it->first->obs_.front()->frame == frame.get()) + { + // insert feature in the frame + it->first->type_ = Point::TYPE_UNKNOWN; + it->first->n_failed_reproj_ = 0; + it->second->frame->addFeature(it->second); + it = candidates_.erase(it); + } + else + ++it; + } +} + +bool MapPointCandidates::deleteCandidatePoint(Point* point) +{ + boost::unique_lock lock(mut_); + for(auto it=candidates_.begin(), ite=candidates_.end(); it!=ite; ++it) + { + if(it->first == point) + { + deleteCandidate(*it); + candidates_.erase(it); + return true; + } + } + return false; +} + +void MapPointCandidates::removeFrameCandidates(FramePtr frame) +{ + boost::unique_lock lock(mut_); + auto it=candidates_.begin(); + while(it!=candidates_.end()) + { + if(it->second->frame == frame.get()) + { + deleteCandidate(*it); + it = candidates_.erase(it); + } + else + ++it; + } +} + +void MapPointCandidates::reset() +{ + boost::unique_lock lock(mut_); + std::for_each(candidates_.begin(), candidates_.end(), [&](PointCandidate& c){ + delete c.first; + delete c.second; + }); + candidates_.clear(); +} + +void MapPointCandidates::deleteCandidate(PointCandidate& c) +{ + // camera-rig: another frame might still be pointing to the candidate point + // therefore, we can't delete it right now. + delete c.second; c.second=NULL; + c.first->type_ = Point::TYPE_DELETED; + trash_points_.push_back(c.first); +} + +void MapPointCandidates::emptyTrash() +{ + std::for_each(trash_points_.begin(), trash_points_.end(), [&](Point*& p){ + delete p; p=NULL; + }); + trash_points_.clear(); +} + +MapSegmentCandidates::MapSegmentCandidates() +{} + +MapSegmentCandidates::~MapSegmentCandidates() +{ + reset(); +} + +void MapSegmentCandidates::newCandidateSegment(LineSeg* ls, double depth_sigma2_s, double depth_sigma2_e) +{ + ls->type_ = LineSeg::TYPE_CANDIDATE; + boost::unique_lock lock(mut_); + candidates_.push_back(SegmentCandidate(ls, ls->obs_.front())); +} + +void MapSegmentCandidates::addCandidateSegmentToFrame(FramePtr frame) +{ + boost::unique_lock lock(mut_); + SegmentCandidateList::iterator it=candidates_.begin(); + while(it != candidates_.end()) + { + if(it->first->obs_.front()->frame == frame.get()) + { + // insert feature in the frame + it->first->type_ = LineSeg::TYPE_UNKNOWN; + it->first->n_failed_reproj_ = 0; + it->second->frame->addFeature(it->second); + it = candidates_.erase(it); + } + else + ++it; + } +} + +bool MapSegmentCandidates::deleteCandidateSegment(LineSeg* ls) +{ + boost::unique_lock lock(mut_); + for(auto it=candidates_.begin(), ite=candidates_.end(); it!=ite; ++it) + { + if(it->first == ls) + { + deleteCandidate(*it); + candidates_.erase(it); + return true; + } + } + return false; +} + +void MapSegmentCandidates::removeFrameCandidates(FramePtr frame) +{ + boost::unique_lock lock(mut_); + auto it=candidates_.begin(); + while(it!=candidates_.end()) + { + if(it->second->frame == frame.get()) + { + deleteCandidate(*it); + it = candidates_.erase(it); + } + else + ++it; + } +} + +void MapSegmentCandidates::reset() +{ + boost::unique_lock lock(mut_); + std::for_each(candidates_.begin(), candidates_.end(), [&](SegmentCandidate& c){ + delete c.first; + delete c.second; + }); + candidates_.clear(); +} + +void MapSegmentCandidates::deleteCandidate(SegmentCandidate& c) +{ + // camera-rig: another frame might still be pointing to the candidate point + // therefore, we can't delete it right now. + delete c.second; c.second=NULL; + c.first->type_ = LineSeg::TYPE_DELETED; + trash_segments_.push_back(c.first); +} + +void MapSegmentCandidates::emptyTrash() +{ + std::for_each(trash_segments_.begin(), trash_segments_.end(), [&](LineSeg*& p){ + delete p; p=NULL; + }); + trash_segments_.clear(); +} + +namespace map_debug { + +void mapValidation(Map* map, int id) +{ + for(auto it=map->keyframes_.begin(); it!=map->keyframes_.end(); ++it) + frameValidation(it->get(), id); +} + +void frameValidation(Frame* frame, int id) +{ + for(auto it = frame->pt_fts_.begin(); it!=frame->pt_fts_.end(); ++it) + { + if((*it)->feat3D==NULL) + continue; + + if((*it)->feat3D->type_ == Point::TYPE_DELETED) + printf("ERROR DataValidation %i: Referenced point was deleted.\n", id); + + if(!(*it)->feat3D->findFrameRef(frame)) + printf("ERROR DataValidation %i: Frame has reference but point does not have a reference back.\n", id); + + pointValidation((*it)->feat3D, id); + } + for(auto it=frame->key_pts_.begin(); it!=frame->key_pts_.end(); ++it) + if(*it != NULL) + if((*it)->feat3D == NULL) + printf("ERROR DataValidation %i: KeyPoints not correct!\n", id); +} + +void pointValidation(Point* point, int id) +{ + for(auto it=point->obs_.begin(); it!=point->obs_.end(); ++it) + { + bool found=false; + for(auto it_ftr=(*it)->frame->pt_fts_.begin(); it_ftr!=(*it)->frame->pt_fts_.end(); ++it_ftr) + if((*it_ftr)->feat3D == point) { + found=true; break; + } + if(!found) + printf("ERROR DataValidation %i: Point %i has inconsistent reference in frame %i, is candidate = %i\n", id, point->id_, (*it)->frame->id_, (int) point->type_); + } +} + +void mapStatistics(Map* map) +{ + // compute average number of features which each frame observes + size_t n_pt_obs(0); + for(auto it=map->keyframes_.begin(); it!=map->keyframes_.end(); ++it) + n_pt_obs += (*it)->nObs(); + printf("\n\nMap Statistics: Frame avg. point obs = %f\n", (float) n_pt_obs/map->size()); + + // compute average number of observations that each point has + size_t n_frame_obs(0); + size_t n_pts(0); + std::set points; + for(auto it=map->keyframes_.begin(); it!=map->keyframes_.end(); ++it) + { + for(auto ftr=(*it)->pt_fts_.begin(); ftr!=(*it)->pt_fts_.end(); ++ftr) + { + if((*ftr)->feat3D == NULL) + continue; + if(points.insert((*ftr)->feat3D).second) { + ++n_pts; + n_frame_obs += (*ftr)->feat3D->nRefs(); + } + } + } + printf("Map Statistics: Point avg. frame obs = %f\n\n", (float) n_frame_obs/n_pts); +} + +} // namespace map_debug +} // namespace plsvo diff --git a/src/matcher.cpp b/src/matcher.cpp new file mode 100644 index 0000000..d5cdf15 --- /dev/null +++ b/src/matcher.cpp @@ -0,0 +1,589 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace plsvo { + +namespace warp { + +void getWarpMatrixAffine( + const vk::AbstractCamera& cam_ref, + const vk::AbstractCamera& cam_cur, + const Vector2d& px_ref, + const Vector3d& f_ref, + const double depth_ref, + const SE3& T_cur_ref, + const int level_ref, + Matrix2d& A_cur_ref) +{ + // Compute affine warp matrix A_ref_cur + const int halfpatch_size = 5; + // Central 3D point in reference frame + const Vector3d xyz_ref(f_ref*depth_ref); + // Bearing vector for points in right and botton of the patch + Vector3d xyz_du_ref(cam_ref.cam2world(px_ref + Vector2d(halfpatch_size,0)*(1< 3.0 && search_level < max_level) + { + search_level += 1; + D *= 0.25; + } + return search_level; +} + +void warpAffine( + const Matrix2d& A_cur_ref, + const cv::Mat& img_ref, + const Vector2d& px_ref, + const int level_ref, + const int search_level, + const int halfpatch_size, + uint8_t* patch) +{ + const int patch_size = halfpatch_size*2 ; + const Matrix2f A_ref_cur = A_cur_ref.inverse().cast(); + if(isnan(A_ref_cur(0,0))) + { + printf("Affine warp is NaN, probably camera has no translation\n"); // TODO + return; + } + + // Perform the warp on a larger patch. + // The approach is inverse: We assume that the *result* patch after affine transformation is square + // and its elements are px_patch. We then warp back by the affinity the locations in this *final* patch + // to its original pixel location in the not-warped image, and interpolate the intensity values + // from this original location + // TODO: A segment patch is not square, but RotatedRect + // Nevertheless, once the segment is warped (transformation of the end points) + // and the ROI determined, the same approach as here can be used + // to fill the intensity values in the ROI pixels + uint8_t* patch_ptr = patch; + const Vector2f px_ref_pyr = px_ref.cast() / (1<=img_ref.cols-1 || px[1]>=img_ref.rows-1) + *patch_ptr = 0; + else + *patch_ptr = (uint8_t) vk::interpolateMat_8u(img_ref, px[0], px[1]); + } + } +} + +} // namespace warp + +bool depthFromTriangulation( + const SE3& T_search_ref, + const Vector3d& f_ref, + const Vector3d& f_cur, + double& depth) +{ + Matrix A; A << T_search_ref.rotation_matrix() * f_ref, f_cur; + const Matrix2d AtA = A.transpose()*A; + if(AtA.determinant() < 0.000001) + return false; + const Vector2d depth2 = - AtA.inverse()*A.transpose()*T_search_ref.translation(); + depth = fabs(depth2[0]); + return true; +} + +void Matcher::createPatchFromPatchWithBorder() +{ + uint8_t* ref_patch_ptr = patch_; + for(int y=1; yframe->cam_->isInFrame( + ref_ftr_->px.cast()/(1<level), halfpatch_size_+2, ref_ftr_->level)) + return false; + + // warp affine + warp::getWarpMatrixAffine( + *ref_ftr_->frame->cam_, *cur_frame.cam_, ref_ftr_->px, ref_ftr_->f, + (ref_ftr_->frame->pos() - pt.pos_).norm(), + cur_frame.T_f_w_ * ref_ftr_->frame->T_f_w_.inverse(), ref_ftr_->level, A_cur_ref_); + search_level_ = warp::getBestSearchLevel(A_cur_ref_, Config::nPyrLevels()-1); + // is img of ref_frame fully available at any time? that means keeping stored previous images, for how long? + warp::warpAffine(A_cur_ref_, ref_ftr_->frame->img_pyr_[ref_ftr_->level], ref_ftr_->px, + ref_ftr_->level, search_level_, halfpatch_size_+1, patch_with_border_); + // patch_with_border_ stores the square patch (of pixel intensities) around the reference feature + // once the affine transformation is applied to the original reference image + // the border is necessary for gradient operations (intensities at the border must be precomputed by interpolation too!) + createPatchFromPatchWithBorder(); + + // px_cur should be set + Vector2d px_scaled(px_cur/(1<(ref_ftr_); + if(pt_ftr->type == PointFeat::EDGELET) + { + Vector2d dir_cur(A_cur_ref_*pt_ftr->grad); + dir_cur.normalize(); + success = feature_alignment::align1D( + cur_frame.img_pyr_[search_level_], dir_cur.cast(), + patch_with_border_, patch_, options_.align_max_iter, px_scaled, h_inv_); + } + else + { + success = feature_alignment::align2D( + cur_frame.img_pyr_[search_level_], patch_with_border_, patch_, + options_.align_max_iter, px_scaled); + } + px_cur = px_scaled * (1<frame->cam_, *cur_frame.cam_, px, f, + (ref_ftr_->frame->pos() - pos).norm(), + cur_frame.T_f_w_ * ref_ftr_->frame->T_f_w_.inverse(), ref_ftr_->level, A_cur_ref_); + search_level_ = warp::getBestSearchLevel(A_cur_ref_, Config::nPyrLevels()-1); + // is img of ref_frame fully available at any time? that means keeping stored previous images, for how long? + warp::warpAffine(A_cur_ref_, ref_ftr_->frame->img_pyr_[ref_ftr_->level], px, + ref_ftr_->level, search_level_, halfpatch_size_+1, patch_with_border_); + // patch_with_border_ stores the square patch (of pixel intensities) around the reference feature + // once the affine transformation is applied to the original reference image + // the border is necessary for gradient operations (intensities at the border must be precomputed by interpolation too!) + createPatchFromPatchWithBorder(); + + return search_level_; +} + +bool Matcher::findMatchDirect( + const LineSeg& ls, + const Frame& cur_frame, + Vector2d& spx_cur, + Vector2d& epx_cur) +{ + // get reference feature in closest view (frame) + if(!ls.getCloseViewObs(cur_frame.pos(), ref_ftr_)) + return false; + + LineFeat* ref_ls_ftr_ = static_cast(ref_ftr_); + // abort if any of the segment extreme points do not lie inside the reference image + if(!ref_ftr_->frame->cam_->isInFrame( + ref_ls_ftr_->spx.cast()/(1<level), halfpatch_size_+2, ref_ftr_->level) || + !ref_ftr_->frame->cam_->isInFrame( + ref_ls_ftr_->epx.cast()/(1<level), halfpatch_size_+2, ref_ftr_->level)) + return false; + + bool success = true; + // Optimize start point + { + search_level_ = precomputeRefPatch(ls.spos_, ref_ls_ftr_->spx, ref_ls_ftr_->sf, cur_frame); + + // optimize feature position in the search_level_ of the pyramid + Vector2d px_scaled(spx_cur/(1<epx, ref_ls_ftr_->ef, cur_frame); + + // optimize feature position in the search_level_ of the pyramid + Vector2d px_scaled(epx_cur/(1<(&ref_ftr); + if(pt_ftr->type == PointFeat::EDGELET && options_.epi_search_edgelet_filtering) + { + const Vector2d grad_cur = (A_cur_ref_ * pt_ftr->grad).normalized(); + const double cosangle = fabs(grad_cur.dot(epi_dir_.normalized())); + if(cosangle < options_.epi_search_edgelet_max_angle) { + reject_ = true; + return false; + } + } + + search_level_ = warp::getBestSearchLevel(A_cur_ref_, Config::nPyrLevels()-1); + + // Find length of search range on epipolar line + Vector2d px_A(cur_frame.cam_->world2cam(A)); + Vector2d px_B(cur_frame.cam_->world2cam(B)); + epi_length_ = (px_A-px_B).norm() / (1<().normalized(), + patch_with_border_, patch_, options_.align_max_iter, px_scaled, h_inv_); + else + res = feature_alignment::align2D( + cur_frame.img_pyr_[search_level_], patch_with_border_, patch_, + options_.align_max_iter, px_scaled); + if(res) + { + px_cur_ = px_scaled*(1<cam2world(px_cur_), depth)) + return true; + } + return false; + } + + size_t n_steps = epi_length_/0.7; // one step per pixel + Vector2d step = epi_dir_/n_steps; + + if(n_steps > options_.max_epi_search_steps) + { + printf("WARNING: skip epipolar search: %zu evaluations, px_length=%f, d_min=%f, d_max=%f.\n", + n_steps, epi_length_, d_min, d_max); + return false; + } + + // for matching, precompute sum and sum2 of warped reference patch + int pixel_sum = 0; + int pixel_sum_square = 0; + PatchScore patch_score(patch_); + + // now we sample along the epipolar line + Vector2d uv = B-step; + Vector2i last_checked_pxi(0,0); + ++n_steps; + for(size_t i=0; iworld2cam(uv)); + Vector2i pxi(px[0]/(1<isInFrame(pxi, patch_size_, search_level_)) + continue; + + // TODO interpolation would probably be a good idea + uint8_t* cur_patch_ptr = cur_frame.img_pyr_[search_level_].data + + (pxi[1]-halfpatch_size_)*cur_frame.img_pyr_[search_level_].cols + + (pxi[0]-halfpatch_size_); + int zmssd = patch_score.computeScore(cur_patch_ptr, cur_frame.img_pyr_[search_level_].cols); + + if(zmssd < zmssd_best) { + zmssd_best = zmssd; + uv_best = uv; + } + } + + if(zmssd_best < PatchScore::threshold()) + { + if(options_.subpix_refinement) + { + px_cur_ = cur_frame.cam_->world2cam(uv_best); + Vector2d px_scaled(px_cur_/(1<().normalized(), + patch_with_border_, patch_, options_.align_max_iter, px_scaled, h_inv_); + else + res = feature_alignment::align2D( + cur_frame.img_pyr_[search_level_], patch_with_border_, patch_, + options_.align_max_iter, px_scaled); + if(res) + { + px_cur_ = px_scaled*(1<cam2world(px_cur_), depth)) + return true; + } + return false; + } + px_cur_ = cur_frame.cam_->world2cam(uv_best); + if(depthFromTriangulation(T_cur_ref, ref_ftr.f, vk::unproject2d(uv_best).normalized(), depth)) + return true; + } + return false; +} + +bool Matcher::findEpipolarMatchDirectSegmentEndpoint( + const Frame& ref_frame, + const Frame& cur_frame, + const Feature& ref_ftr, + const double d_estimate, + const double d_min, + const double d_max, + double& depth) +{ + SE3 T_cur_ref = cur_frame.T_f_w_ * ref_frame.T_f_w_.inverse(); + int zmssd_best = PatchScore::threshold(); + Vector2d uv_best; + + // Fix this + if( std::isnan(d_min) || std::isnan(d_max) ) + { + reject_ = true; + return false; + } + + // Compute start and end of epipolar line in old_kf for match search, on unit plane! + Vector2d A = vk::project2d(T_cur_ref * (ref_ftr.f*d_min)); + Vector2d B = vk::project2d(T_cur_ref * (ref_ftr.f*d_max)); + epi_dir_ = A - B; + + // Compute affine warp matrix + warp::getWarpMatrixAffine( + *ref_frame.cam_, *cur_frame.cam_, ref_ftr.px, ref_ftr.f, + d_estimate, T_cur_ref, ref_ftr.level, A_cur_ref_); + + // feature pre-selection + reject_ = false; + const LineFeat* ls_ftr = static_cast(&ref_ftr); + + /*const Vector2d grad_cur = (A_cur_ref_ * pt_ftr->grad).normalized(); + const double cosangle = fabs(grad_cur.dot(epi_dir_.normalized())); + if(cosangle < options_.epi_search_edgelet_max_angle) { + reject_ = true; + return false; + }*/ + + /*double epi_ang_ = std::min(fabs(epi_dir_[0]),fabs(epi_dir_[1])) / std::max(fabs(epi_dir_[0]),fabs(epi_dir_[1])); + double ang_dif_ = epi_ang_*180.0/3.1416 - ls_ftr->angle*180.0/3.1416; + if(ang_dif_ > 180.0) ang_dif_-=360.0; + if(ang_dif_ < -180.0) ang_dif_+=360.0; + if( fabsf(ang_dif_) < 30.0 ) // skip lines aligned with epipolar line + { + reject_ = true; + return false; + }*/ + + search_level_ = warp::getBestSearchLevel(A_cur_ref_, Config::nPyrLevels()-1); + + // Find length of search range on epipolar line + Vector2d px_A(cur_frame.cam_->world2cam(A)); + Vector2d px_B(cur_frame.cam_->world2cam(B)); + epi_length_ = (px_A-px_B).norm() / (1<().normalized(), + patch_with_border_, patch_, options_.align_max_iter, px_scaled, h_inv_); + else + res = feature_alignment::align2D( + cur_frame.img_pyr_[search_level_], patch_with_border_, patch_, + options_.align_max_iter, px_scaled); + if(res) + { + px_cur_ = px_scaled*(1<cam2world(px_cur_), depth)) + return true; + } + return false; + } + + size_t n_steps = epi_length_/0.7; // one step per pixel + Vector2d step = epi_dir_/n_steps; + + if(n_steps > options_.max_epi_search_steps) + { + printf("WARNING: skip epipolar search: %zu evaluations, px_lenght=%f, d_min=%f, d_max=%f.\n", + n_steps, epi_length_, d_min, d_max); + return false; + } + + // for matching, precompute sum and sum2 of warped reference patch + int pixel_sum = 0; + int pixel_sum_square = 0; + PatchScore patch_score(patch_); + + // now we sample along the epipolar line + Vector2d uv = B-step; + Vector2i last_checked_pxi(0,0); + ++n_steps; + for(size_t i=0; iworld2cam(uv)); + Vector2i pxi(px[0]/(1<isInFrame(pxi, patch_size_, search_level_)) + continue; + + // TODO interpolation would probably be a good idea + uint8_t* cur_patch_ptr = cur_frame.img_pyr_[search_level_].data + + (pxi[1]-halfpatch_size_)*cur_frame.img_pyr_[search_level_].cols + + (pxi[0]-halfpatch_size_); + int zmssd = patch_score.computeScore(cur_patch_ptr, cur_frame.img_pyr_[search_level_].cols); + + if(zmssd < zmssd_best) { + zmssd_best = zmssd; + uv_best = uv; + } + } + + if(zmssd_best < PatchScore::threshold()) + { + if(options_.subpix_refinement) + { + px_cur_ = cur_frame.cam_->world2cam(uv_best); + Vector2d px_scaled(px_cur_/(1<().normalized(), + patch_with_border_, patch_, options_.align_max_iter, px_scaled, h_inv_); + else + res = feature_alignment::align2D( + cur_frame.img_pyr_[search_level_], patch_with_border_, patch_, + options_.align_max_iter, px_scaled); + if(res) + { + px_cur_ = px_scaled*(1<cam2world(px_cur_), depth)) + return true; + } + return false; + } + px_cur_ = cur_frame.cam_->world2cam(uv_best); + if(depthFromTriangulation(T_cur_ref, ref_ftr.f, vk::unproject2d(uv_best).normalized(), depth)) + return true; + } + return false; +} + + +} // namespace plsvo diff --git a/src/point.cpp b/src/point.cpp new file mode 100644 index 0000000..b4cc67b --- /dev/null +++ b/src/point.cpp @@ -0,0 +1,221 @@ +// This file is part of SVO - Semi-direct Visual Odometry. +// +// Copyright (C) 2014 Christian Forster +// (Robotics and Perception Group, University of Zurich, Switzerland). +// +// SVO is free software: you can redistribute it and/or modify it under the +// terms of the GNU General Public License as published by the Free Software +// Foundation, either version 3 of the License, or any later version. +// +// SVO is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#include +#include +#include +#include +#include + +namespace svo { + +int Point::point_counter_ = 0; + +Point::Point(const Vector3d& pos) : + id_(point_counter_++), + pos_(pos), + normal_set_(false), + n_obs_(0), + v_pt_(NULL), + last_published_ts_(0), + last_projected_kf_id_(-1), + type_(TYPE_UNKNOWN), + n_failed_reproj_(0), + n_succeeded_reproj_(0), + last_structure_optim_(0) +{} + +Point::Point(const Vector3d& pos, Feature* ftr) : + id_(point_counter_++), + pos_(pos), + normal_set_(false), + n_obs_(1), + v_pt_(NULL), + last_published_ts_(0), + last_projected_kf_id_(-1), + type_(TYPE_UNKNOWN), + n_failed_reproj_(0), + n_succeeded_reproj_(0), + last_structure_optim_(0) +{ + obs_.push_front(ftr); +} + +Point::~Point() +{} + +void Point::addFrameRef(Feature* ftr) +{ + obs_.push_front(ftr); + ++n_obs_; +} + +Feature* Point::findFrameRef(Frame* frame) +{ + for(auto it=obs_.begin(), ite=obs_.end(); it!=ite; ++it) + if((*it)->frame == frame) + return *it; + return NULL; // no keyframe found +} + +bool Point::deleteFrameRef(Frame* frame) +{ + for(auto it=obs_.begin(), ite=obs_.end(); it!=ite; ++it) + { + if((*it)->frame == frame) + { + obs_.erase(it); + return true; + } + } + return false; +} + +void Point::initNormal() +{ + assert(!obs_.empty()); + const Feature* ftr = obs_.back(); + assert(ftr->frame != NULL); + normal_ = ftr->frame->T_f_w_.rotation_matrix().transpose()*(-ftr->f); + normal_information_ = DiagonalMatrix(pow(20/(pos_-ftr->frame->pos()).norm(),2), 1.0, 1.0); + normal_set_ = true; +} + +bool Point::getCloseViewObs(const Vector3d& framepos, Feature*& ftr) const +{ + // TODO: get frame with same point of view AND same pyramid level! + Vector3d obs_dir(framepos - pos_); obs_dir.normalize(); + auto min_it=obs_.begin(); + double min_cos_angle = 0; + for(auto it=obs_.begin(), ite=obs_.end(); it!=ite; ++it) + { + Vector3d dir((*it)->frame->pos() - pos_); dir.normalize(); + double cos_angle = obs_dir.dot(dir); + if(cos_angle > min_cos_angle) + { + min_cos_angle = cos_angle; + min_it = it; + } + } + ftr = *min_it; + if(min_cos_angle < 0.5) // assume that observations larger than 60° are useless + return false; + return true; +} + +void Point::optimize(const size_t n_iter) +{ + Vector3d old_point = pos_; + double chi2 = 0.0; + Matrix3d A; + Vector3d b; + + for(size_t i=0; iframe->T_f_w_ * pos_); + Point::jacobian_xyz2uv(p_in_f, (*it)->frame->T_f_w_.rotation_matrix(), J); + const Vector2d e(vk::project2d((*it)->f) - vk::project2d(p_in_f)); + new_chi2 += e.squaredNorm(); + A.noalias() += J.transpose() * J; + b.noalias() -= J.transpose() * e; + } + + // solve linear system + const Vector3d dp(A.ldlt().solve(b)); + + // check if error increased + if((i > 0 && new_chi2 > chi2) || (bool) std::isnan((double)dp[0])) + { +#ifdef POINT_OPTIMIZER_DEBUG + cout << "it " << i + << "\t FAILURE \t new_chi2 = " << new_chi2 << endl; +#endif + pos_ = old_point; // roll-back + break; + } + + // update the model + Vector3d new_point = pos_ + dp; + old_point = pos_; + pos_ = new_point; + chi2 = new_chi2; +#ifdef POINT_OPTIMIZER_DEBUG + cout << "it " << i + << "\t Success \t new_chi2 = " << new_chi2 + << "\t norm(b) = " << vk::norm_max(b) + << endl; +#endif + + // stop when converged + if(vk::norm_max(dp) <= EPS) + break; + } +#ifdef POINT_OPTIMIZER_DEBUG + cout << endl; +#endif +} + +// Line segment + +int LineSeg::point_counter_ = 0; + +LineSeg::LineSeg(const Vector3d& spos, const Vector3d& epos ) : + id_(point_counter_++), + spos_(spos), + epos_(epos), + normal_set_(false), + n_obs_(0), + v_pt_(NULL), + last_published_ts_(0), + last_projected_kf_id_(-1), + type_(TYPE_UNKNOWN), + n_failed_reproj_(0), + n_succeeded_reproj_(0), + last_structure_optim_(0) +{} + +LineSeg::LineSeg(const Vector3d& spos, const Vector3d& epos , Feature* ftr) : + id_(point_counter_++), + spos_(spos), + epos_(epos), + normal_set_(false), + n_obs_(1), + v_pt_(NULL), + last_published_ts_(0), + last_projected_kf_id_(-1), + type_(TYPE_UNKNOWN), + n_failed_reproj_(0), + n_succeeded_reproj_(0), + last_structure_optim_(0) +{ + obs_.push_front(ftr); +} + +LineSeg::~LineSeg() +{} + + + + +} // namespace svo diff --git a/src/pose_optimizer.cpp b/src/pose_optimizer.cpp new file mode 100644 index 0000000..de27e2a --- /dev/null +++ b/src/pose_optimizer.cpp @@ -0,0 +1,586 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include + +namespace plsvo { +namespace pose_optimizer { + +void optimizeGaussNewton( + const double reproj_thresh, + const size_t n_iter, + const bool verbose, + FramePtr& frame, + double& estimated_scale, + double& error_init, + double& error_final, + size_t& num_obs_pt, + size_t& num_obs_ls) +{ + + // init + double chi2(0.0); + vector chi2_vec_init, chi2_vec_final; + vk::robust_cost::TukeyWeightFunction weight_function; + SE3 T_old(frame->T_f_w_); + Matrix6d A; + Vector6d b; + + // compute the scale of the error for robust estimation + vk::robust_cost::MADScaleEstimator scale_estimator; + std::vector errors; errors.reserve(frame->pt_fts_.size()+frame->seg_fts_.size()); + for(auto it=frame->pt_fts_.begin(); it!=frame->pt_fts_.end(); ++it) + { + if((*it)->feat3D == NULL) + continue; + Vector2d e = vk::project2d((*it)->f) + - vk::project2d(frame->T_f_w_ * (*it)->feat3D->pos_); + e *= 1.0 / (1<<(*it)->level); + errors.push_back(e.norm()); + } + double estimated_scale_pt = scale_estimator.compute(errors); + num_obs_pt = errors.size(); + + std::vector errors_ls; errors_ls.reserve(frame->seg_fts_.size()); + for(auto it=frame->seg_fts_.begin(); it!=frame->seg_fts_.end(); ++it) + { + LineFeat* it_ = static_cast( *it ); + if(it_->feat3D == NULL) + continue; + // Check distance function + Vector3d line = it_->line; + Vector3d s_proj = vk::unproject2d( vk::project2d(frame->T_f_w_ * it_->feat3D->spos_) ); + Vector3d e_proj = vk::unproject2d( vk::project2d(frame->T_f_w_ * it_->feat3D->epos_) ); + float es = line.dot(s_proj); + float ee = line.dot(e_proj); + errors.push_back( sqrt(es*es+ee*ee) ); + errors_ls.push_back( sqrt(es*es+ee*ee) ); + } + if(errors.empty()) + return; + num_obs_ls = errors_ls.size(); + + // Estimate scale of line segments' errors + double estimated_scale_ls = 1.f; + if(!errors_ls.empty()) + estimated_scale_ls = scale_estimator.compute(errors_ls); + estimated_scale = estimated_scale_pt; + + size_t num_obs = num_obs_pt + num_obs_ls; + chi2_vec_init.reserve(num_obs); + chi2_vec_final.reserve(num_obs); + double scale_pt = estimated_scale_pt; + double scale_ls = estimated_scale_ls; + for(size_t iter=0; iterpt_fts_.begin(); it!=frame->pt_fts_.end(); ++it) + { + if((*it)->feat3D == NULL) + continue; + Matrix26d J; + Vector3d xyz_f(frame->T_f_w_ * (*it)->feat3D->pos_); + Frame::jacobian_xyz2uv(xyz_f, J); + Vector2d e = vk::project2d((*it)->f) - vk::project2d(xyz_f); + double sqrt_inv_cov = 1.0 / (1<<(*it)->level); + e *= sqrt_inv_cov; + if(iter == 0) + chi2_vec_init.push_back(e.squaredNorm()); // just for debug + J *= sqrt_inv_cov; + double weight = weight_function.value(e.norm()/scale_pt); + //double weight = 1.0 / ( 1.0 + e.norm()/scale_pt ); + A.noalias() += J.transpose()*J*weight; + b.noalias() -= J.transpose()*e*weight; + new_chi2 += e.squaredNorm()*weight; + } + + // compute residual for line segments + for(auto it=frame->seg_fts_.begin(); it!=frame->seg_fts_.end(); ++it) + { + LineFeat* it_ = static_cast( *it ); + if((it_)->feat3D == NULL) + continue; + Vector2d e, l_aux; + Matrix26d J_s, J_e, J; + Vector3d xyz_f_s(frame->T_f_w_ * (it_)->feat3D->spos_); + Vector3d xyz_f_e(frame->T_f_w_ * (it_)->feat3D->epos_); + Frame::jacobian_xyz2uv(xyz_f_s, J_s); + Frame::jacobian_xyz2uv(xyz_f_e, J_e); + + Vector3d s_proj = vk::unproject2d( vk::project2d(xyz_f_s) ); + Vector3d e_proj = vk::unproject2d( vk::project2d(xyz_f_e) ); + Vector3d line = it_->line; + float ds = line.dot(s_proj); + float de = line.dot(e_proj); + e << ds, de; + + double sqrt_inv_cov = 1.0 / (1<<(it_)->level); // CHECK + e *= sqrt_inv_cov; + if(iter == 0) + chi2_vec_init.push_back(e.squaredNorm()); // just for debug + + l_aux << line(0), line(1); + J_s *= sqrt_inv_cov * ds / e.norm(); + J_e *= sqrt_inv_cov * ds / e.norm(); + J.block(0,0,1,6) = l_aux.transpose() * J_s; + J.block(1,0,1,6) = l_aux.transpose() * J_e; + + double weight = weight_function.value(e.norm()/scale_ls); + //double weight = 1.0 / ( 1.0 + e.norm()/scale_ls ); + A.noalias() += J.transpose()*J*weight; + b.noalias() -= J.transpose()*e*weight; + new_chi2 += e.squaredNorm()*weight; + } + + // solve linear system + const Vector6d dT(A.ldlt().solve(b)); + + // check if error increased + if((iter > 0 && new_chi2 > chi2) || (bool) std::isnan((double)dT[0])) + { + if(verbose) + std::cout << "it " << iter + << "\t FAILURE \t new_chi2 = " << new_chi2 << std::endl; + frame->T_f_w_ = T_old; // roll-back + break; + } + + // update the model + SE3 T_new = SE3::exp(dT)*frame->T_f_w_; + T_old = frame->T_f_w_; + frame->T_f_w_ = T_new; + chi2 = new_chi2; + if(verbose) + std::cout << "it " << iter + << "\t Success \t new_chi2 = " << new_chi2 + << "\t norm(dT) = " << vk::norm_max(dT) << std::endl; + + // stop when converged + if(vk::norm_max(dT) <= EPS) + break; + } + + // Set covariance as inverse information matrix. Optimistic estimator! + const double pixel_variance = 1.0; + frame->Cov_ = pixel_variance*(A*std::pow(frame->cam_->errorMultiplier2(),2)).inverse(); + + // Remove Measurements with too large reprojection error + double reproj_thresh_scaled_pt = reproj_thresh / frame->cam_->errorMultiplier2(); + double reproj_thresh_scaled_ls = reproj_thresh_scaled_pt * estimated_scale_ls / estimated_scale_pt; + size_t n_deleted_refs_pt = 0; + size_t n_deleted_refs_ls = 0; + for(list::iterator it=frame->pt_fts_.begin(); it!=frame->pt_fts_.end(); ++it) + { + if((*it)->feat3D == NULL) + continue; + + Vector2d e = vk::project2d((*it)->f) - vk::project2d(frame->T_f_w_ * (*it)->feat3D->pos_ ); + double sqrt_inv_cov = 1.0 / (1<<(*it)->level); + e *= sqrt_inv_cov; + chi2_vec_final.push_back(e.squaredNorm()); + if(e.norm() > reproj_thresh_scaled_pt) + { + // we don't need to delete a reference in the point since it was not created yet + (*it)->feat3D = NULL; + ++n_deleted_refs_pt; + } + } + + for(list::iterator it=frame->seg_fts_.begin(); it!=frame->seg_fts_.end(); ++it) + { + LineFeat* it_ = static_cast( *it ); + if((it_)->feat3D == NULL) + continue; + Vector2d e; + Vector3d line = it_->line; + Vector3d s_proj = vk::unproject2d( vk::project2d(frame->T_f_w_ * it_->feat3D->spos_) ); + Vector3d e_proj = vk::unproject2d( vk::project2d(frame->T_f_w_ * it_->feat3D->epos_) ); + e << line.dot(s_proj), line.dot(e_proj); + double sqrt_inv_cov = 1.0 / (1<<(it_)->level); + e *= sqrt_inv_cov; + chi2_vec_final.push_back(e.squaredNorm()); + if(e.norm() > reproj_thresh_scaled_ls) + { + // we don't need to delete a reference in the point since it was not created yet + (it_)->feat3D = NULL; + ++n_deleted_refs_ls; + } + } + + error_init=0.0; + error_final=0.0; + if(!chi2_vec_init.empty()) + error_init = sqrt(vk::getMedian(chi2_vec_init))*frame->cam_->errorMultiplier2(); + if(!chi2_vec_final.empty()) + error_final = sqrt(vk::getMedian(chi2_vec_final))*frame->cam_->errorMultiplier2(); + + estimated_scale *= frame->cam_->errorMultiplier2(); + if(verbose) + std::cout << "n deleted obs = " << n_deleted_refs_pt << " points \t " << n_deleted_refs_ls << " lines" + << "\t scale = " << estimated_scale + << "\t error init = " << error_init + << "\t error end = " << error_final << std::endl; + num_obs -= (n_deleted_refs_pt+n_deleted_refs_pt); + num_obs_pt -= n_deleted_refs_pt; + num_obs_ls -= n_deleted_refs_ls; +} + +void optimizeGaussNewton( + const double reproj_thresh, + const size_t n_iter, + const size_t n_iter_ref, + const bool verbose, + FramePtr& frame, + double& estimated_scale, + double& error_init, + double& error_final, + size_t& num_obs_pt, + size_t& num_obs_ls) +{ + + // init + double chi2(0.0); + vector chi2_vec_init, chi2_vec_final; + vk::robust_cost::TukeyWeightFunction weight_function; + SE3 T_old(frame->T_f_w_); + Matrix6d A; + Vector6d b; + + // compute the scale of the error for robust estimation + vk::robust_cost::MADScaleEstimator scale_estimator; + std::vector errors; errors.reserve(frame->pt_fts_.size()+frame->seg_fts_.size()); + for(auto it=frame->pt_fts_.begin(); it!=frame->pt_fts_.end(); ++it) + { + if((*it)->feat3D == NULL) + continue; + Vector2d e = vk::project2d((*it)->f) + - vk::project2d(frame->T_f_w_ * (*it)->feat3D->pos_); + e *= 1.0 / (1<<(*it)->level); + errors.push_back(e.norm()); + } + double estimated_scale_pt = scale_estimator.compute(errors); + num_obs_pt = errors.size(); + + std::vector errors_ls; errors_ls.reserve(frame->seg_fts_.size()); + for(auto it=frame->seg_fts_.begin(); it!=frame->seg_fts_.end(); ++it) + { + LineFeat* it_ = static_cast( *it ); + if(it_->feat3D == NULL) + continue; + // Check distance function + Vector3d line = it_->line; + Vector3d s_proj = vk::unproject2d( vk::project2d(frame->T_f_w_ * it_->feat3D->spos_) ); + Vector3d e_proj = vk::unproject2d( vk::project2d(frame->T_f_w_ * it_->feat3D->epos_) ); + float es = line.dot(s_proj); + float ee = line.dot(e_proj); + errors.push_back( sqrt(es*es+ee*ee) ); + errors_ls.push_back( sqrt(es*es+ee*ee) ); + } + if(errors.empty()) + return; + num_obs_ls = errors_ls.size(); + + // Estimate scale of line segments' errors + double estimated_scale_ls = 1.f; + if(!errors_ls.empty()) + estimated_scale_ls = scale_estimator.compute(errors_ls); + estimated_scale = estimated_scale_pt; + + size_t num_obs = num_obs_pt + num_obs_ls; + chi2_vec_init.reserve(num_obs); + chi2_vec_final.reserve(num_obs); + double scale_pt = estimated_scale_pt; + double scale_ls = estimated_scale_ls; + for(size_t iter=0; iterpt_fts_.begin(); it!=frame->pt_fts_.end(); ++it) + { + if((*it)->feat3D == NULL) + continue; + Matrix26d J; + Vector3d xyz_f(frame->T_f_w_ * (*it)->feat3D->pos_); + Frame::jacobian_xyz2uv(xyz_f, J); + Vector2d e = vk::project2d((*it)->f) - vk::project2d(xyz_f); + double sqrt_inv_cov = 1.0 / (1<<(*it)->level); + e *= sqrt_inv_cov; + if(iter == 0) + chi2_vec_init.push_back(e.squaredNorm()); // just for debug + J *= sqrt_inv_cov; + double weight = weight_function.value(e.norm()/scale_pt); + //double weight = 1.0 / ( 1.0 + e.norm()/scale_pt ); + A.noalias() += J.transpose()*J*weight; + b.noalias() -= J.transpose()*e*weight; + new_chi2 += e.squaredNorm()*weight; + } + + // compute residual for line segments + for(auto it=frame->seg_fts_.begin(); it!=frame->seg_fts_.end(); ++it) + { + LineFeat* it_ = static_cast( *it ); + if((it_)->feat3D == NULL) + continue; + Vector2d e, l_aux; + Matrix26d J_s, J_e, J; + Vector3d xyz_f_s(frame->T_f_w_ * (it_)->feat3D->spos_); + Vector3d xyz_f_e(frame->T_f_w_ * (it_)->feat3D->epos_); + Frame::jacobian_xyz2uv(xyz_f_s, J_s); + Frame::jacobian_xyz2uv(xyz_f_e, J_e); + + Vector3d s_proj = vk::unproject2d( vk::project2d(xyz_f_s) ); + Vector3d e_proj = vk::unproject2d( vk::project2d(xyz_f_e) ); + Vector3d line = it_->line; + float ds = line.dot(s_proj); + float de = line.dot(e_proj); + e << ds, de; + + double sqrt_inv_cov = 1.0 / (1<<(it_)->level); // CHECK + e *= sqrt_inv_cov; + if(iter == 0) + chi2_vec_init.push_back(e.squaredNorm()); // just for debug + + l_aux << line(0), line(1); + J_s *= sqrt_inv_cov * ds / e.norm(); + J_e *= sqrt_inv_cov * ds / e.norm(); + J.block(0,0,1,6) = l_aux.transpose() * J_s; + J.block(1,0,1,6) = l_aux.transpose() * J_e; + + double weight = weight_function.value(e.norm()/scale_ls); + //double weight = 1.0 / ( 1.0 + e.norm()/scale_ls ); + A.noalias() += J.transpose()*J*weight; + b.noalias() -= J.transpose()*e*weight; + new_chi2 += e.squaredNorm()*weight; + } + + // solve linear system + const Vector6d dT(A.ldlt().solve(b)); + + // check if error increased + if((iter > 0 && new_chi2 > chi2) || (bool) std::isnan((double)dT[0])) + { + if(verbose) + std::cout << "it " << iter + << "\t FAILURE \t new_chi2 = " << new_chi2 << std::endl; + frame->T_f_w_ = T_old; // roll-back + break; + } + + // update the model + SE3 T_new = SE3::exp(dT)*frame->T_f_w_; + T_old = frame->T_f_w_; + frame->T_f_w_ = T_new; + chi2 = new_chi2; + if(verbose) + std::cout << "it " << iter + << "\t Success \t new_chi2 = " << new_chi2 + << "\t norm(dT) = " << vk::norm_max(dT) << std::endl; + + // stop when converged + if(vk::norm_max(dT) <= EPS) + break; + } + + // Set covariance as inverse information matrix. Optimistic estimator! + const double pixel_variance = 1.0; + frame->Cov_ = pixel_variance*(A*std::pow(frame->cam_->errorMultiplier2(),2)).inverse(); + + // Remove Measurements with too large reprojection error + double reproj_thresh_scaled_pt = reproj_thresh / frame->cam_->errorMultiplier2(); + double reproj_thresh_scaled_ls = reproj_thresh_scaled_pt * estimated_scale_ls / estimated_scale_pt; + size_t n_deleted_refs_pt = 0; + size_t n_deleted_refs_ls = 0; + for(list::iterator it=frame->pt_fts_.begin(); it!=frame->pt_fts_.end(); ++it) + { + if((*it)->feat3D == NULL) + continue; + + Vector2d e = vk::project2d((*it)->f) - vk::project2d(frame->T_f_w_ * (*it)->feat3D->pos_ ); + double sqrt_inv_cov = 1.0 / (1<<(*it)->level); + e *= sqrt_inv_cov; + chi2_vec_final.push_back(e.squaredNorm()); + if(e.norm() > reproj_thresh_scaled_pt) + { + // we don't need to delete a reference in the point since it was not created yet + (*it)->feat3D = NULL; + ++n_deleted_refs_pt; + } + } + + for(list::iterator it=frame->seg_fts_.begin(); it!=frame->seg_fts_.end(); ++it) + { + LineFeat* it_ = static_cast( *it ); + if((it_)->feat3D == NULL) + continue; + Vector2d e; + Vector3d line = it_->line; + Vector3d s_proj = vk::unproject2d( vk::project2d(frame->T_f_w_ * it_->feat3D->spos_) ); + Vector3d e_proj = vk::unproject2d( vk::project2d(frame->T_f_w_ * it_->feat3D->epos_) ); + e << line.dot(s_proj), line.dot(e_proj); + double sqrt_inv_cov = 1.0 / (1<<(it_)->level); + e *= sqrt_inv_cov; + chi2_vec_final.push_back(e.squaredNorm()); + if(e.norm() > reproj_thresh_scaled_ls) + { + // we don't need to delete a reference in the point since it was not created yet + (it_)->feat3D = NULL; + ++n_deleted_refs_ls; + } + } + + // refinement with inliers + num_obs = errors.size(); + chi2_vec_init.reserve(num_obs); + chi2_vec_final.reserve(num_obs); + for(size_t iter=0; iterpt_fts_.begin(); it!=frame->pt_fts_.end(); ++it) + { + if((*it)->feat3D == NULL) + continue; + Matrix26d J; + Vector3d xyz_f(frame->T_f_w_ * (*it)->feat3D->pos_); + Frame::jacobian_xyz2uv(xyz_f, J); + Vector2d e = vk::project2d((*it)->f) - vk::project2d(xyz_f); + double sqrt_inv_cov = 1.0 / (1<<(*it)->level); + e *= sqrt_inv_cov; + if(iter == 0) + chi2_vec_init.push_back(e.squaredNorm()); // just for debug + J *= sqrt_inv_cov; + double weight = weight_function.value(e.norm()/scale_pt); + A.noalias() += J.transpose()*J*weight; + b.noalias() -= J.transpose()*e*weight; + new_chi2 += e.squaredNorm()*weight; + } + + // compute residual for line segments + for(auto it=frame->seg_fts_.begin(); it!=frame->seg_fts_.end(); ++it) + { + LineFeat* it_ = static_cast( *it ); + if((it_)->feat3D == NULL) + continue; + Vector2d e, l_aux; + Matrix26d J_s, J_e, J; + Vector3d xyz_f_s(frame->T_f_w_ * (it_)->feat3D->spos_); + Vector3d xyz_f_e(frame->T_f_w_ * (it_)->feat3D->epos_); + Frame::jacobian_xyz2uv(xyz_f_s, J_s); + Frame::jacobian_xyz2uv(xyz_f_e, J_e); + + Vector3d s_proj = vk::unproject2d( vk::project2d(xyz_f_s) ); + Vector3d e_proj = vk::unproject2d( vk::project2d(xyz_f_e) ); + Vector3d line = it_->line; + float ds = line.dot(s_proj); + float de = line.dot(e_proj); + e << ds, de; + + double sqrt_inv_cov = 1.0 / (1<<(it_)->level); // CHECK + e *= sqrt_inv_cov; + if(iter == 0) + chi2_vec_init.push_back(e.squaredNorm()); // just for debug + + l_aux << line(0), line(1); + J_s *= sqrt_inv_cov * ds / e.norm(); + J_e *= sqrt_inv_cov * ds / e.norm(); + J.block(0,0,1,6) = l_aux.transpose() * J_s; + J.block(1,0,1,6) = l_aux.transpose() * J_e; + + double weight = weight_function.value(e.norm()/scale_ls); + A.noalias() += J.transpose()*J*weight; + b.noalias() -= J.transpose()*e*weight; + new_chi2 += e.squaredNorm()*weight; + } + + // solve linear system + const Vector6d dT(A.ldlt().solve(b)); + + // check if error increased + if((iter > 0 && new_chi2 > chi2) || (bool) std::isnan((double)dT[0])) + { + if(verbose) + std::cout << "it " << iter + << "\t FAILURE \t new_chi2 = " << new_chi2 << std::endl; + frame->T_f_w_ = T_old; // roll-back + break; + } + + // update the model + SE3 T_new = SE3::exp(dT)*frame->T_f_w_; + T_old = frame->T_f_w_; + frame->T_f_w_ = T_new; + chi2 = new_chi2; + if(verbose) + std::cout << "it " << iter + << "\t Success \t new_chi2 = " << new_chi2 + << "\t norm(dT) = " << vk::norm_max(dT) << std::endl; + + // stop when converged + if(vk::norm_max(dT) <= EPS) + break; + } + + + error_init=0.0; + error_final=0.0; + if(!chi2_vec_init.empty()) + error_init = sqrt(vk::getMedian(chi2_vec_init))*frame->cam_->errorMultiplier2(); + if(!chi2_vec_final.empty()) + error_final = sqrt(vk::getMedian(chi2_vec_final))*frame->cam_->errorMultiplier2(); + + estimated_scale *= frame->cam_->errorMultiplier2(); + if(verbose) + std::cout << "n deleted obs = " << n_deleted_refs_pt << " points \t " << n_deleted_refs_ls << " lines" + << "\t scale = " << estimated_scale + << "\t error init = " << error_init + << "\t error end = " << error_final << std::endl; + num_obs -= (n_deleted_refs_pt+n_deleted_refs_pt); + num_obs_pt -= n_deleted_refs_pt; + num_obs_ls -= n_deleted_refs_ls; +} + +} // namespace pose_optimizer +} // namespace plsvo + diff --git a/src/reprojector.cpp b/src/reprojector.cpp new file mode 100644 index 0000000..4036140 --- /dev/null +++ b/src/reprojector.cpp @@ -0,0 +1,425 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace plsvo { + +Reprojector::Reprojector(vk::AbstractCamera* cam, Map& map) : + map_(map) +{ + initializeGrid(cam); +} + +Reprojector::~Reprojector() +{ + std::for_each(grid_.cells.begin(), grid_.cells.end(), [&](Cell* c){ delete c; }); + std::for_each(gridls_.cells.begin(), gridls_.cells.end(), [&](LineCandidates* c){ delete c; }); +} + +void Reprojector::initializeGrid(vk::AbstractCamera* cam) +{ + // Point features + grid_.cell_size = Config::gridSize(); + grid_.grid_n_cols = ceil(static_cast(cam->width())/grid_.cell_size); + grid_.grid_n_rows = ceil(static_cast(cam->height())/grid_.cell_size); + grid_.cells.resize(grid_.grid_n_cols*grid_.grid_n_rows); + std::for_each(grid_.cells.begin(), grid_.cells.end(), [&](Cell*& c){ c = new Cell; }); + grid_.cell_order.resize(grid_.cells.size()); + for(size_t i=0; i(cam->width())/gridls_.cell_size); + gridls_.grid_n_rows = ceil(static_cast(cam->height())/gridls_.cell_size); + gridls_.cells.resize(gridls_.grid_n_cols*gridls_.grid_n_rows); + std::for_each(gridls_.cells.begin(), gridls_.cells.end(), [&](LineCandidates*& c){ c = new LineCandidates; }); + gridls_.cell_order.resize(gridls_.cells.size()); + for(size_t i=0; iclear(); }); + if(Config::hasLines()) std::for_each(gridls_.cells.begin(), gridls_.cells.end(), [&](LineCandidates* c){ c->clear(); }); +} + +template +int Reprojector::setKfCandidates(FramePtr frame, list fts) +{ + int candidate_counter = 0; + for(auto it=fts.begin(), ite_ftr=fts.end(); it!=ite_ftr; ++it) + { + // check if the feature has a 3D object assigned + if((*it)->feat3D == NULL) + continue; + // make sure we project a point only once + if((*it)->feat3D->last_projected_kf_id_ == frame->id_) + continue; + (*it)->feat3D->last_projected_kf_id_ = frame->id_; + if(reproject(frame, (*it)->feat3D)) + // increment the number of candidates taken successfully + candidate_counter++; + } + return candidate_counter; +} + +template +void Reprojector::setMapCandidates(FramePtr frame, MapCandidatesT &map_candidates) +{ + boost::unique_lock lock(map_candidates.mut_); // the mutex will be unlocked when out of scope + auto it=map_candidates.candidates_.begin(); + while(it!=map_candidates.candidates_.end()) + { + if(!reproject(frame, it->first)) + { + // if the reprojection of the map candidate point failed, + // increment the counter of failed reprojections (assess the point quality) + it->first->n_failed_reproj_ += 3; + if(it->first->n_failed_reproj_ > 30) + { + // if the reprojection failed too many times, remove the map candidate point + map_candidates.deleteCandidate(*it); + it = map_candidates.candidates_.erase(it); + continue; + } + } + ++it; + } // end-while-loop +} + +void Reprojector::reprojectMap( + FramePtr frame, + std::vector< std::pair >& overlap_kfs) +{ + resetReprojGrid(); + + // Reset candidate lines: TODO in the future this could be fused into resetGrid() + n_ls_matches_ = 0; + //lines_.clear(); + + // Identify those Keyframes which share a common field of view. + SVO_START_TIMER("reproject_kfs"); + list< pair > close_kfs; + map_.getCloseKeyframes(frame, close_kfs); + + // Sort KFs with overlap according to their closeness (2nd value of pairs in the list) + close_kfs.sort(boost::bind(&std::pair::second, _1) < + boost::bind(&std::pair::second, _2)); + + // Reproject all map features of the closest N kfs with overlap. + size_t n_kfs = 0; + overlap_kfs.reserve(options_.max_n_kfs); + for(auto it_frame=close_kfs.begin(), ite_frame=close_kfs.end(); + it_frame!=ite_frame && n_kfsfirst; + // add the current frame to the (output) list of keyframes with overlap + // initialize the counter of candidates from this frame (2nd element in pair) to zero + overlap_kfs.push_back(pair(ref_frame,0)); + // Consider for candidate each mappoint in the ref KF that the current (input) KF observes + // We only store in which grid cell the points fall. + // Add each corresponding valid new Candidate to its cell in the grid. + int num_pt_success = setKfCandidates( frame, ref_frame->pt_fts_ ); + overlap_kfs.back().second += num_pt_success; + // Add each line segment in the ref KF that the current (input) KF observes + int num_seg_success = setKfCandidates( frame, ref_frame->seg_fts_ ); + overlap_kfs.back().second += num_seg_success; + } + SVO_STOP_TIMER("reproject_kfs"); + + // Now project all map candidates + // (candidates in the map are created from converged seeds) + SVO_START_TIMER("reproject_candidates"); + // Point candidates + // (same logic as above to populate the cell grid but taking candidate points from the map object) + setMapCandidates(frame, map_.point_candidates_); + // Segment candidates + setMapCandidates(frame, map_.segment_candidates_); + SVO_STOP_TIMER("reproject_candidates"); + + // Now we go through each grid cell and select one point to match. + // At the end, we should have at maximum one reprojected point per cell. + SVO_START_TIMER("feature_align"); + for(size_t i=0; i (size_t) Config::maxFts()) + break; // the random visit order of cells assures uniform distribution + // of the features even if we break early (maxFts reached soon) + } + + // Try to refine every segment candidate + for(size_t i=0; i (size_t) Config::maxFtsSegs()) + break; // the random visit order of cells assures uniform distribution + // of the features even if we break early (maxFts reached soon) + } + /*for(auto it = lines_.begin(), ite = lines_.end(); it!=ite; ++it) + { + if(refine(it->ls,it->spx,it->epx,frame)) + ++n_ls_matches_; + if(n_ls_matches_ > (size_t) Config::maxFtsSegs()) + break; + }*/ + SVO_STOP_TIMER("feature_align"); +} + +bool Reprojector::pointQualityComparator(PointCandidate& lhs, PointCandidate& rhs) +{ + // point quality is given by the Point::PointType enum + // so that DELETED < CANDIDATE < UNKNOWN < GOOD + if(lhs.pt->type_ > rhs.pt->type_) + return true; + return false; +} + +bool Reprojector::lineQualityComparator(LineCandidate& lhs, LineCandidate& rhs) +{ + // DELETED < CANDIDATE < UNKNOWN < GOOD + if(lhs.ls->type_ > rhs.ls->type_) + return true; + return false; +} + +bool Reprojector::refineBestCandidate(Cell& cell, FramePtr frame) +{ + // sort the candidates inside the cell according to their quality + cell.sort(boost::bind(&Reprojector::pointQualityComparator, _1, _2)); + Cell::iterator it=cell.begin(); + // in principle, iterate through the whole list of features in the cell + // in reality, there is maximum one point per cell, so the loop returns if successful + while(it!=cell.end()) + { + ++n_trials_; + // Try to refine the point feature in frame from current initial estimate + bool success = refine( it->pt, it->px, frame ); + // Failed or not, this candidate was finally being erased in original code + it = cell.erase(it); // it takes next position in the list as output of .erase + if(success) + // Maximum one point per cell. + return true; + } + return false; +} + +bool Reprojector::refineBestCandidate(LineCandidates& cell, FramePtr frame) +{ + // sort the candidates inside the cell according to their quality + cell.sort(boost::bind(&Reprojector::lineQualityComparator, _1, _2)); + LineCandidates::iterator it=cell.begin(); + // in principle, iterate through the whole list of features in the cell + // in reality, there is maximum one point per cell, so the loop returns if successful + while(it!=cell.end()) + { + ++n_trials_; + // Try to refine the point feature in frame from current initial estimate + bool success = refine( it->ls, it->spx, it->epx, frame ); + // Failed or not, this candidate was finally being erased in original code + it = cell.erase(it); // it takes next position in the list as output of .erase + if(success) + // Maximum one point per cell. + return true; + } + return false; +} + +bool Reprojector::refine(Point* pt, Vector2d& px_est, FramePtr frame) +{ + if(pt->type_ == Point::TYPE_DELETED) + return false; + + bool found_match = true; + if(options_.find_match_direct) + // refine px position in the candidate by directly applying subpix refinement + // internally, it is optimizing photometric error + // of the candidate px patch wrt the closest-view reference feature patch + found_match = matcher_.findMatchDirect(*pt, *frame, px_est); + // TODO: What happens if options_.find_match_direct is false??? Shouldn't findEpipolarMatchDirect be here? + + // apply quality logic + { + if(!found_match) + { + // if there is no match found for this point, decrease quality + pt->n_failed_reproj_++; + // consider removing the point from map depending on point type and quality + if(pt->type_ == Point::TYPE_UNKNOWN && pt->n_failed_reproj_ > 15) + map_.safeDeletePoint(pt); + if(pt->type_ == Point::TYPE_CANDIDATE && pt->n_failed_reproj_ > 30) + map_.point_candidates_.deleteCandidatePoint(pt); + return false; + } + // if there is successful match found for this point, increase quality + pt->n_succeeded_reproj_++; + if(pt->type_ == Point::TYPE_UNKNOWN && pt->n_succeeded_reproj_ > 10) + pt->type_ = Point::TYPE_GOOD; + } + + // create new point feature for this frame with the refined (aligned) candidate position in this image + PointFeat* new_feature = new PointFeat(frame.get(), px_est, matcher_.search_level_); + frame->addFeature(new_feature); + + // Here we add a reference in the feature to the 3D point, the other way + // round is only done if this frame is selected as keyframe. + // TODO: why not give it directly to the constructor PointFeat(frame.get(), pt, it->px, matcher_.serach_level_) + new_feature->feat3D = pt; + + PointFeat* pt_ftr = static_cast( matcher_.ref_ftr_ ); + if(pt_ftr != NULL) + { + if(pt_ftr->type == PointFeat::EDGELET) + { + new_feature->type = PointFeat::EDGELET; + new_feature->grad = matcher_.A_cur_ref_*pt_ftr->grad; + new_feature->grad.normalize(); + } + } + + // If the keyframe is selected and we reproject the rest, we don't have to + // check this point anymore. +// it = cell.erase(it); + + // Maximum one point per cell. + return true; +} + +bool Reprojector::refine(LineSeg* ls, Vector2d& spx_est, Vector2d& epx_est, FramePtr frame) +{ + + if(ls->type_ == LineSeg::TYPE_DELETED) + return false; + + bool found_match = true; + if(options_.find_match_direct) + { + // refine start and end points in the segment independently + found_match = matcher_.findMatchDirect(*ls, *frame, spx_est, epx_est); + } + // TODO: What happens if options_.find_match_direct is false??? Shouldn't findEpipolarMatchDirect be here? + + // apply quality logic - don't like the n_failed_reproj and n_succeeded_reproj_ numbers + { + if(!found_match) + { + // if there is no match found for this point, decrease quality + ls->n_failed_reproj_++; + // consider removing the point from map depending on point type and quality + if(ls->type_ == LineSeg::TYPE_UNKNOWN && ls->n_failed_reproj_ > 15) + map_.safeDeleteSegment(ls); + if(ls->type_ == LineSeg::TYPE_CANDIDATE && ls->n_failed_reproj_ > 30) + map_.segment_candidates_.deleteCandidateSegment(ls); + return false; + } + // if there is successful match found for this point, increase quality + ls->n_succeeded_reproj_++; + if(ls->type_ == LineSeg::TYPE_UNKNOWN && ls->n_succeeded_reproj_ > 10){ + ls->type_ = LineSeg::TYPE_GOOD; + } + } + + // create new segment feature for this frame with the refined (aligned) candidate position in this image + LineFeat* new_feature = new LineFeat(frame.get(), spx_est, epx_est, matcher_.search_level_); + frame->addFeature(new_feature); + + // Here we add a reference in the feature to the 3D point, the other way + // round is only done if this frame is selected as keyframe. + // TODO: why not give it directly to the constructor PointFeat(frame.get(), pt, it->px, matcher_.serach_level_) + new_feature->feat3D = ls; + + // If the keyframe is selected and we reproject the rest, we don't have to + // check this point anymore. + // it = cell.erase(it); + + // Maximum one point per cell. + return true; +} + +bool Reprojector::reproject(FramePtr frame, Point* point) +{ + // get position in current frame image of the world 3D point + Vector2d cur_px(frame->w2c(point->pos_)); + if(frame->cam_->isInFrame(cur_px.cast(), 8)) // 8px is the patch size in the matcher + { + // get linear index (wrt row-wise vectorized grid matrix) + // of the image grid cell in which the point px lies + const int k = static_cast(cur_px[1]/grid_.cell_size)*grid_.grid_n_cols + + static_cast(cur_px[0]/grid_.cell_size); + grid_.cells.at(k)->push_back(PointCandidate(point, cur_px)); + return true; + } + return false; +} + +bool Reprojector::reproject(FramePtr frame, LineSeg* segment) +{ + // get position in current frame image of the world 3D point + Vector2d cur_spx(frame->w2c(segment->spos_)); + Vector2d cur_epx(frame->w2c(segment->epos_)); + if(frame->cam_->isInFrame(cur_spx.cast(), 8) && + frame->cam_->isInFrame(cur_epx.cast(), 8)) // 8px is the patch size in the matcher + { + const int sk = static_cast(cur_spx[1]/gridls_.cell_size)*gridls_.grid_n_cols + + static_cast(cur_spx[0]/gridls_.cell_size); + const int ek = static_cast(cur_epx[1]/gridls_.cell_size)*gridls_.grid_n_cols + + static_cast(cur_epx[0]/gridls_.cell_size); + // in the case of segments, add the candidate to a common list for the whole image + gridls_.cells.at(sk)->push_back(LineCandidate(segment, cur_spx, cur_epx)); + gridls_.cells.at(ek)->push_back(LineCandidate(segment, cur_spx, cur_epx)); + return true; + } + return false; +} + +} // namespace plsvo diff --git a/src/sceneRepresentation.cpp b/src/sceneRepresentation.cpp new file mode 100644 index 0000000..3a1a060 --- /dev/null +++ b/src/sceneRepresentation.cpp @@ -0,0 +1,654 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#include + +// Constructors and destructor + +sceneRepresentation::sceneRepresentation(){ + + sbb = 1.0f; + sref = 0.2f; + srad = 0.005f; + sline = 2.0f; + saxis = 0.5f; + sfreq = 3.0f; + szoom = 3.0f; + selli = 2.0f; + selev = 30.f; + sazim = -135.f; + sfrust = 0.2f; + slinef = 0.1f; + win = new CDisplayWindow3D("3D Scene",1920,1080); + + hasText = true; + hasAxes = false; + hasLegend = false; + hasHelp = false; + hasCov = false; + hasTraj = true; + hasGT = false; + hasChange = false; + hasComparison = false; + hasImg = false; + hasLines = false; + hasPoints = false; + hasFrustum = false; + +} + +sceneRepresentation::sceneRepresentation(string configFile){ + + CConfigFile config(configFile); + sbb = config.read_double("Scene","sbb",1.f); + sref = config.read_double("Scene","sref",0.2f); + srad = config.read_double("Scene","srad",0.005f); + sline = config.read_double("Scene","sline",2.f); + saxis = config.read_double("Scene","saxis",0.5f); + sfreq = config.read_double("Scene","sfreq",3.f); + szoom = config.read_double("Scene","szoom",3.f); + selli = config.read_double("Scene","selli",1.f); + selev = config.read_double("Scene","selev",30.f); + sazim = config.read_double("Scene","sazim",-135.f); + sfrust = config.read_double("Scene","sfrust",0.2f); + slinef = config.read_double("Scene","slinef",0.1f); + win = new CDisplayWindow3D("3D Scene",1920,1080); + + hasText = config.read_bool("Scene","hasText",true); + hasAxes = config.read_bool("Scene","hasAxes",true); + hasLegend = config.read_bool("Scene","hasLegend",false); + hasHelp = config.read_bool("Scene","hasHelp",false); + hasCov = config.read_bool("Scene","hasCov",false); + hasGT = config.read_bool("Scene","hasGT",false); + hasTraj = config.read_bool("Scene","hasTraj",true); + hasChange = config.read_bool("Scene","hasChange",false); + hasComparison = config.read_bool("Scene","hasComparison",false); + hasImg = config.read_bool("Scene","hasImg",true); + hasLines = config.read_bool("Scene","hasLines",false); + hasPoints = config.read_bool("Scene","hasPoints",false); + hasFrustum = config.read_bool("Scene","hasFrustum",false); + isKitti = config.read_bool("Scene","isKitti",false); + + Matrix x_cw; + x_cw << 1, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0, 0, 0, 1; + CPose3D x_aux(getPoseFormat(x_cw)); + pose_ini = x_aux; + +} + +sceneRepresentation::~sceneRepresentation(){ + +} + +// Initialize the scene + +void sceneRepresentation::initializeScene(Matrix x_0){ + + // Initialize the scene and window + win->setCameraElevationDeg(selev); + win->setCameraAzimuthDeg(sazim); + win->setCameraZoom(szoom); + theScene = win->get3DSceneAndLock(); + + // Initialize poses of different objects + if(hasChange) + change.setFromValues(0,0,0,0,0,-90*CV_PI/180.f); + else + change.setFromValues(0,0,0,0,0,0); + CPose3D x_aux(getPoseFormat(x_0)); + pose = x_aux; + pose1 = x_aux; + pose_0 = x_aux; + pose_gt = pose_ini - change; + x_ini = x_0; + pose.getAsVector(v_aux); + pose1.getAsVector(v_aux1); + pose_gt.getAsVector(v_auxgt); + + // Initialize the camera object + //bbObj = stock_objects::BumblebeeCamera(); + bbObj = CFrustum::Create(); + { + bbObj->setPose(pose_0+frustumL_); + bbObj->setScale(sbb/15.0); + bbObj->setColor(0.0,0.0,0.0); + theScene->insert(bbObj); + } + srefObj = stock_objects::CornerXYZSimple(); + srefObj->setPose(pose_0); + srefObj->setScale(sref); + theScene->insert(srefObj); + frustumL_.setFromValues(0,0,0, 0, -90.f*CV_PI/180.f, -90.f*CV_PI/180.f); + frustumR_.setFromValues(0.12f,0,0, 0, -90.f*CV_PI/180.f, -90.f*CV_PI/180.f); + if(hasFrustum){ + frustObj = CFrustum::Create(); + { + frustObj->setPose(pose_0+frustumL_); + frustObj->setLineWidth (slinef); + frustObj->setScale(sfrust); + theScene->insert(frustObj); + } + frustObj1 = CFrustum::Create(); + { + frustObj1->setPose(pose_0+frustumR_); + frustObj1->setLineWidth (slinef); + frustObj1->setScale(sfrust); + theScene->insert(frustObj1); + } + } + + // Initialize the axes + if(hasAxes){ + axesObj = CAxis::Create(); + axesObj->setFrequency(sfreq); + axesObj->enableTickMarks(false); + axesObj->setAxisLimits(-saxis,-saxis,-saxis, saxis,saxis,saxis); + theScene->insert( axesObj ); + } + + // Initialize the ground truth camera object + if(hasGT){ + gtObj = stock_objects::BumblebeeCamera(); + { + gtObj->setPose(pose_ini); + gtObj->setScale(sbb); + theScene->insert(gtObj); + } + srefObjGT = stock_objects::CornerXYZSimple(); + { + srefObjGT->setPose(pose_ini); + srefObjGT->setScale(sref); + theScene->insert(srefObjGT); + } + } + + // Initialize a second camera to compare when changing parameters + if(hasComparison){ + //bbObj1 = stock_objects::BumblebeeCamera(); + bbObj1 = CFrustum::Create(); + { + bbObj1->setPose(pose_0+frustumL_); + bbObj1->setScale(sbb/15.0); + bbObj1->setColor(0.0,0.0,0.0); + theScene->insert(bbObj1); + } + srefObj1 = stock_objects::CornerXYZSimple(); + { + srefObj1->setPose(pose_0); + srefObj1->setScale(sref); + theScene->insert(srefObj1); + } + } + + // Initialize the text + if(hasText){ + string text = "Frame: \t \t0 \nFrequency: \t0 Hz \nLines: \t0 (0)\nPoints: \t0 (0)"; + win->addTextMessage(0.85,0.95, text, TColorf(1.0,1.0,1.0), 0, MRPT_GLUT_BITMAP_HELVETICA_10 ); + } + + // Initialize the lines + lineObj = CSetOfLines::Create(); + lineObj->setLineWidth(1.0); + lineObj->setColor(0,0,0); + theScene->insert( lineObj ); + + // Initialize the point cloud + pointObj = CPointCloud::Create(); + pointObj->setPointSize(3.0); + pointObj->setColor(0,0,0); + theScene->insert( pointObj ); + + // Initialize the viewports + setHelp(); + setLegend(); + + image = theScene->createViewport("image"); + if(isKitti){ + if(hasImg) + image->setViewportPosition(20, 20, 621, 188); + else + image->setViewportPosition(2000, 2000, 621, 188); + } + else{ + if(hasImg) + image->setViewportPosition(20, 20, 640, 480); + else + image->setViewportPosition(2000, 2000, 640, 480); + } + + // BUG: i cannot check if CImage is empty + if(hasImg){ + img_mrpt_image.loadFromFile("./empty_img.png",1); + } + + // Re-paint the scene + win->unlockAccess3DScene(); + win->repaint(); + +} + +// Update the scene + +bool sceneRepresentation::updateScene(){ + + theScene = win->get3DSceneAndLock(); + bool restart = false; + + // Update camera pose + CPose3D x_aux(getPoseFormat(x)); + pose = pose + x_aux; + v_aux_ = v_aux; + pose.getAsVector(v_aux); + if(hasTraj){ + CSimpleLinePtr obj = CSimpleLine::Create(); + obj->setLineCoords(v_aux_(0),v_aux_(1),v_aux_(2), v_aux(0),v_aux(1),v_aux(2)); + obj->setLineWidth(sline); + obj->setColor(0,0,0.7); + theScene->insert( obj ); + } + bbObj->setPose(pose+frustumL_); + srefObj->setPose(pose); + if(hasFrustum){ + frustObj->setPose(pose+frustumL_); + frustObj1->setPose(pose+frustumR_); + } + + // Update the GT camera pose + if(hasGT){ + CPose3D x_auxgt(getPoseFormat(xgt)); + //pose_gt = pose_gt + x_auxgt; + pose_gt = x_auxgt; + v_auxgt_ = v_auxgt; + + pose_gt.getAsVector(v_auxgt); + float y_ = v_auxgt(1); + float z_ = v_auxgt(2); + float b_ = v_auxgt(4); + float c_ = v_auxgt(5); + v_auxgt(1) = z_; + v_auxgt(2) = -y_; + v_auxgt(4) = -c_; + v_auxgt(5) = b_; + pose_gt = TPose3D(v_auxgt(0),v_auxgt(1),v_auxgt(2),v_auxgt(3),v_auxgt(4),v_auxgt(5)); + + if(hasTraj){ + CSimpleLinePtr obj = CSimpleLine::Create(); + obj->setLineCoords(v_auxgt_(0),v_auxgt_(1),v_auxgt_(2), v_auxgt(0),v_auxgt(1),v_auxgt(2)); + obj->setLineWidth(sline); + obj->setColor(0,0,0); + theScene->insert( obj ); + } + gtObj->setPose(pose_gt); + srefObjGT->setPose(pose_gt); + } + + // Update the comparison camera pose + if(hasComparison){ + CPose3D x_aux1(getPoseFormat(xcomp)); + pose1 = pose1 + x_aux1; + v_aux1_ = v_aux1; + pose1.getAsVector(v_aux1); + if(hasTraj){ + CSimpleLinePtr obj = CSimpleLine::Create(); + obj->setLineCoords(v_aux1_(0),v_aux1_(1),v_aux1_(2), v_aux1(0),v_aux1(1),v_aux1(2)); + obj->setLineWidth(sline); + obj->setColor(0,0.7,0); + theScene->insert( obj ); + } + bbObj1->setPose(pose1+frustumL_); + srefObj1->setPose(pose1); + } + + // Update the text + if(hasText){ + string text = "Frame: \t \t" + to_string(frame) + " \n" + "Frequency: \t" + to_string_with_precision(1000.f/time,4) + " Hz \n" + "Lines: \t" + to_string(nLines) + " (" + to_string(nLinesH) + ") \nPoints: \t" + to_string(nPoints) + " (" + to_string(nPointsH) + ")"; + win->addTextMessage(0.85,0.95, text, TColorf(1.0,1.0,1.0), 0, MRPT_GLUT_BITMAP_HELVETICA_10 ); + } + + // Update the image + if(hasImg){ + //image->setImageView_fast( img_mrpt_image ); + image->setImageView( img_mrpt_image ); + } + + // Update the lines + lineObj->clear(); + if(hasLines){ + for( vector>::iterator it = linesSVO.begin(); it!= linesSVO.end();++it) + lineObj->appendLine((*it)(0),(*it)(1),(*it)(2),(*it)(3),(*it)(4),(*it)(5)); + lineObj->setPose(pose_0); + } + + // Update the points + pointObj->clear(); + if(hasPoints){ + for( vector>::iterator it = pointsSVO.begin(); it!= pointsSVO.end();++it) + pointObj->insertPoint( (*it)(0),(*it)(1),(*it)(2) ); + pointObj->setPose(pose_0); + } + + // Re-paint the scene + win->unlockAccess3DScene(); + win->repaint(); + + // Key events - TODO: change the trick to employ viewports + if(win->keyHit()){ + key = win->getPushedKey(&kmods); + if(key == MRPTK_SPACE){ // Space Reset VO + theScene->clear(); + initializeScene(x_ini); + } + else if (key == MRPTK_ESCAPE){ // Esc Restart VO + theScene->clear(); + initializeScene(x_ini); + restart = true; + } + else if ( (key == 104) || (key == 72) ){ // H help + hasHelp = !hasHelp; + if(!hasHelp) + help->setViewportPosition(2000, 2000, 300, 376); + else + help->setViewportPosition(1600, 20, 300, 376); + } + else if ( (key == 103) || (key == 71) ){ // G legend + hasLegend = !hasLegend; + if(!hasLegend) + legend->setViewportPosition(2000, 2000, 250, 97); + else + legend->setViewportPosition(20, 900, 250, 97); + } + else if ( (key == 97) || (key == 65) ){ // A axes + hasAxes = !hasAxes; + if(!hasAxes){ + axesObj.clear(); + } + else{ + axesObj = CAxis::Create(); + axesObj->setFrequency(sfreq); + axesObj->enableTickMarks(false); + axesObj->setAxisLimits(-saxis,-saxis,-saxis, saxis,saxis,saxis); + theScene->insert( axesObj ); + } + } + else if ( (key == 102) || (key == 70) ){ // F frustum + hasFrustum = !hasFrustum; + if(!hasFrustum){ + frustObj.clear(); + frustObj1.clear(); + } + else{ + frustObj = CFrustum::Create(); + frustObj->setLineWidth (slinef); + frustObj->setScale(sfrust); + frustObj->setPose(pose+frustumL_); + theScene->insert(frustObj); + frustObj1 = CFrustum::Create(); + frustObj1->setLineWidth (slinef); + frustObj1->setScale(sfrust); + frustObj1->setPose(pose+frustumR_); + theScene->insert(frustObj1); + } + } + else if ( (key == 112) || (key == 80) ){ // P points + hasPoints = !hasPoints; + if(!hasPoints){ + elliObjP.clear(); + } + else{ + elliObjP = CSetOfObjects::Create(); + elliObjP->setScale(selli); + elliObjP->setPose(pose); + theScene->insert(elliObjP); + } + } + else if ( (key == 108) || (key == 76) ){ // L lines + hasLines = !hasLines; + if(!hasLines){ + elliObjL.clear(); + } + else{ + elliObjL = CSetOfObjects::Create(); + elliObjL->setScale(selli); + elliObjL->setPose(pose); + theScene->insert(elliObjL); + } + } + else if ( (key == 116) || (key == 84) ){ // T text + hasText = !hasText; + if(!hasText){ + string text = ""; + win->addTextMessage(0.85,0.95, text, TColorf(1.0,1.0,1.0), 0, MRPT_GLUT_BITMAP_HELVETICA_10 ); + } + } + else if ( (key == 99) || (key == 67) ){ // C covariance + hasCov = !hasCov; + if(!hasCov){ + elliObj.clear(); + } + else{ + elliObj = CEllipsoid::Create(); + elliObj->setScale(selli); + elliObj->setQuantiles(2.0); + elliObj->setColor(0,0.3,0); + elliObj->enableDrawSolid3D(true); + elliObj->setPose(pose); + theScene->insert(elliObj); + } + } + else if ( key == 43){ // + Increases the scale of the covariance ellipse + selli += 0.5f; + if(hasCov) + elliObj->setScale(selli); + } + else if ( key == 45){ // - Decreases the scale of the covariance ellipse + selli -= 0.5f; + if(hasCov) + elliObj->setScale(selli); + } + else if ( (key == 105) || (key == 73) ){ // I image + hasImg = !hasImg; + if(isKitti){ + if(hasImg) + image->setViewportPosition(20, 20, 621, 188); + else + image->setViewportPosition(2000, 2000, 621, 188); + } + else{ + if(hasImg) + image->setViewportPosition(20, 20, 400, 300); + else + image->setViewportPosition(2000, 2000, 400, 300); + } + + } + } + + return restart; + +} + +// Setters + +void sceneRepresentation::setText(int frame_, float time_, int nPoints_, int nPointsH_, int nLines_, int nLinesH_){ + frame = frame_; + time = time_; + nPoints = nPoints_; + nPointsH = nPointsH_; + nLines = nLines_; + nLinesH = nLinesH_; + +} + +void sceneRepresentation::setPose(Matrix x_){ + x = x_; +} + +void sceneRepresentation::setGT(Matrix xgt_){ + xgt = xgt_; +} + +void sceneRepresentation::setComparison(Matrix xcomp_){ + xcomp = xcomp_; +} + +void sceneRepresentation::setImage(Mat image_){ + imwrite("./img_aux.jpg",image_); + img_mrpt_image.loadFromFile("./img_aux.jpg",1); +} + +void sceneRepresentation::setImage(string image_){ + img_mrpt_image.loadFromFile(image_,1); +} + +void sceneRepresentation::setLegend(){ + // Initialize the legend + legend = theScene->createViewport("legend"); + if(hasLegend) + legend->setViewportPosition(20, 900, 250, 97); + else + legend->setViewportPosition(2000, 2000, 250, 97); + + if(!hasGT) { + if(hasComparison){ + img_legend = "aux/legend_comp.png"; + img_mrpt_legend.loadFromFile(img_legend,1); + legend->setImageView_fast( img_mrpt_legend ); + } + else + img_mrpt_legend.loadFromFile("",1); + } + else if(hasComparison){ + img_legend = "aux/legend_full.png"; + img_mrpt_legend.loadFromFile(img_legend,1); + legend->setImageView_fast( img_mrpt_legend ); + } + else{ + img_legend = "aux/legend.png"; + img_mrpt_legend.loadFromFile(img_legend,1); + legend->setImageView_fast( img_mrpt_legend ); + } +} + +void sceneRepresentation::setHelp(){ + // Initialize the legend + help = theScene->createViewport("help"); + img_help = "aux/help.png"; + img_mrpt_help.loadFromFile(img_help,1); + help->setImageView_fast( img_mrpt_help ); + if(hasHelp) + help->setViewportPosition(1600, 20, 300, 376); + else + help->setViewportPosition(2000, 2000, 300, 376); +} + +void sceneRepresentation::setPoints(CMatrixFloat pData_){ + pData = pData_; +} + +void sceneRepresentation::setLines(CMatrixFloat lData_){ + lData = lData_; +} + +void sceneRepresentation::setPointsSVO(vector< Matrix > pointsSVO_){ + pointsSVO = pointsSVO_; +} + +void sceneRepresentation::setLinesSVO(vector< Matrix > linesSVO_){ + linesSVO = linesSVO_; +} + +void sceneRepresentation::setStereoCalibration(Matrix3f K_, float b_){ + f = K_(0,0); + cx = K_(0,2); + cy = K_(1,2); + b = b_; + sigmaP = 1.f; // TODO: READ FROM CONFIG FILE + sigmaL = 1.f; + + bsigmaL = b*b*sigmaL*sigmaL; + bsigmaP = b*b*sigmaP*sigmaP; +} + +// Public methods + +bool sceneRepresentation::waitUntilClose(){ + while(win->isOpen()); + return true; +} + +bool sceneRepresentation::isOpen(){ + return win->isOpen(); +} + +// Auxiliar methods + +CPose3D sceneRepresentation::getPoseXYZ(VectorXf x){ + CPose3D pose(x(0),x(1),x(2),x(3),x(4),x(5)); + return pose; +} + +CMatrixDouble sceneRepresentation::getPoseFormat(Matrix T){ + CMatrixDouble T_(4,4); + for(unsigned int i = 0; i < 4; i++){ + for(unsigned int j = 0; j < 4; j++){ + T_(i,j) = T(i,j); + } + } + return T_; +} + +CMatrixDouble33 sceneRepresentation::getCovFormat(MatrixXf cov_){ + CMatrixDouble33 cov3; + + Matrix3f cov3_eigen = cov_.block(0,0,3,3); + + for(unsigned int i = 0; i < 3; i++){ + for(unsigned int j = 0; j < 3; j++){ + cov3(i,j) = cov3_eigen(i,j); + } + } + return cov3; +} + +void sceneRepresentation::getYPR(float &yaw, float &pitch, float &roll){ + double y, p, r; + pose.getYawPitchRoll(y,p,r); + yaw = y; + pitch = p; + roll = r; +} + +void sceneRepresentation::getPose(Matrix &T){ + CMatrixDouble44 T_; + pose.getHomogeneousMatrix(T_); + for(unsigned int i = 0; i < 4; i++){ + for(unsigned int j = 0; j < 4; j++){ + T(i,j) = T_(i,j); + } + } +} + diff --git a/src/sparse_img_align.cpp b/src/sparse_img_align.cpp new file mode 100644 index 0000000..28019fd --- /dev/null +++ b/src/sparse_img_align.cpp @@ -0,0 +1,725 @@ +/***************************************************************************** +** PL-SVO: Semi-direct Monocular Visual Odometry by Combining ** +** Points and Line Segments ** +****************************************************************************** +** ** +** Copyright(c) 2016, Ruben Gomez-Ojeda, University of Malaga ** +** Copyright(c) 2016, MAPIR group, University of Malaga ** +** ** +** This library extends SVO to the case of also using line-segments, thus ** +** it is highly based on the previous implementation of SVO: ** +** https://github.com/uzh-rpg/rpg_svo ** +** ** +** This program is free software: you can redistribute it and/or modify ** +** it under the terms of the GNU General Public License (version 3) as ** +** published by the Free Software Foundation. ** +** ** +** This program is distributed in the hope that it will be useful, but ** +** WITHOUT ANY WARRANTY; without even the implied warranty of ** +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** +** GNU General Public License for more details. ** +** ** +** You should have received a copy of the GNU General Public License ** +** along with this program. If not, see . ** +** ** +*****************************************************************************/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace plsvo { + +SparseImgAlign::SparseImgAlign( + int max_level, int min_level, int n_iter, + Method method, bool display, bool verbose) : + display_(display), + max_level_(max_level), + min_level_(min_level) +{ + n_iter_ = n_iter; + n_iter_init_ = n_iter_; + method_ = method; + verbose_ = verbose; + eps_ = 0.000001; +} + +size_t SparseImgAlign::run(FramePtr ref_frame, FramePtr cur_frame) +{ + reset(); + + if(ref_frame->pt_fts_.empty() && ref_frame->seg_fts_.empty()) + { + SVO_WARN_STREAM("SparseImgAlign: no features (points or segments) to track!"); + return 0; + } + + ref_frame_ = ref_frame; + cur_frame_ = cur_frame; + + // The maximum number of segment samples (without overlapping) is proportional to + // the accumulated length of the segment features + float total_length = 0; + std::for_each(ref_frame->seg_fts_.begin(), ref_frame->seg_fts_.end(), [&](plsvo::Feature* i){ + LineFeat* seg = static_cast(i); + total_length += seg->length; + }); + int max_num_seg_samples = std::ceil( total_length / patch_size_ ); + + // setup cache structures to allocate the optimization data (residues, jacobians, etc.) + pt_cache_ = Cache( ref_frame_->pt_fts_.size(), patch_area_ ); + seg_cache_ = Cache( max_num_seg_samples, patch_area_ ); + + SE3 T_cur_from_ref(cur_frame_->T_f_w_ * ref_frame_->T_f_w_.inverse()); + + for(level_=max_level_; level_>=min_level_; --level_) + { + mu_ = 0.1; + pt_cache_.jacobian.setZero(); + seg_cache_.jacobian.setZero(); + have_ref_patch_cache_ = false; + if(verbose_) + printf("\nPYRAMID LEVEL %i\n---------------\n", level_); + optimize(T_cur_from_ref); + } + cur_frame_->T_f_w_ = T_cur_from_ref * ref_frame_->T_f_w_; + + return n_meas_/patch_area_; +} + +Matrix SparseImgAlign::getFisherInformation() +{ + double sigma_i_sq = 5e-4*255*255; // image noise + Matrix I = H_/sigma_i_sq; + return I; +} + +void SparseImgAlign::precomputeReferencePatches() +{ + precomputeGaussNewtonParamsPoints(pt_cache_,ref_frame_->pt_fts_); + precomputeGaussNewtonParamsSegments(seg_cache_,ref_frame_->seg_fts_); + // set flag to true to avoid repeating unnecessary computations in the following iterations + have_ref_patch_cache_ = true; +} + +double SparseImgAlign::computeResiduals( + const SE3& T_cur_from_ref, + bool linearize_system, + bool compute_weight_scale) +{ + // Warp the (cur)rent image such that it aligns with the (ref)erence image + + // setup residues image if display is active + const cv::Mat& cur_img = cur_frame_->img_pyr_.at(level_); + if(linearize_system && display_) + resimg_ = cv::Mat(cur_img.size(), CV_32F, cv::Scalar(0)); + + // do the precomputation of reference parameters (patch intensities and jacobian) + // for the Inverse Compositional approach if not done yet + if(have_ref_patch_cache_ == false) + precomputeReferencePatches(); + + // they didn't use weights + linearize_system = true; + compute_weight_scale = false; + use_weights_ = true; + + /* computations for the point patches */ + /* ---------------------------------- */ + // declare the GN parameters H=sum(J_i*J_i') and Jres=sum(J_i*res_i) + Matrix pt_H; + Matrix pt_Jres; + // define other interest variables + std::vector pt_errors; + float pt_chi2 = 0.0; + + // compute the parameters for Gauss-Newton update related to point patches + computeGaussNewtonParamsPoints( + T_cur_from_ref, linearize_system, compute_weight_scale, + pt_cache_, ref_frame_->pt_fts_, pt_H, pt_Jres, pt_errors, pt_chi2 ); + + /* computations for the segment patches */ + /* ------------------------------------ */ + // declare the GN parameters H=sum(J_i*J_i') and Jres=sum(J_i*res_i) + Matrix seg_H; + Matrix seg_Jres; + // define other interest variables + std::vector seg_errors; + float seg_chi2 = 0.0; + + // compute the parameters for Gauss-Newton update + computeGaussNewtonParamsSegments( + T_cur_from_ref, linearize_system, compute_weight_scale, + seg_cache_, ref_frame_->seg_fts_, seg_H, seg_Jres, seg_errors, seg_chi2 ); + + /* fuse optimization variables coming from both points and segments */ + /* ---------------------------------------------------------------- */ + if(linearize_system) + { + // sum the contribution from both points and segments + H_ = pt_H + seg_H; + Jres_ = pt_Jres + seg_Jres; + } + + float chi2 = pt_chi2 + seg_chi2; + + // compute total scale and compare with the rest + std::vector errors = pt_errors; + for(vector::iterator it = seg_errors.begin(); it != seg_errors.end(); ++it){ + errors.push_back(*it); + } + // compute the weights on the first iteration + vk::robust_cost::MADScaleEstimator scale_estimator; + if( compute_weight_scale && iter_ == 0 ){ + if(pt_errors.size()!=0) scale_pt = scale_estimator.compute(pt_errors); + if(seg_errors.size()!=0) scale_ls = scale_estimator.compute(seg_errors); + if(errors.size()!=0) scale_ = scale_estimator.compute(errors); + if(scale_ls < 0.0001) scale_ls = 1.0; // after initializing there's no LS + } + else if(iter_== 0){ + scale_ = 1.0; + scale_ls = 1.0; + scale_pt = 1.0; + } + + return chi2/n_meas_; +} + +void SparseImgAlign::precomputeGaussNewtonParamsPoints(Cache &cache, list &fts) +{ + + // initialize patch parameters (mainly define its geometry) + Patch patch( patch_size_, ref_frame_->img_pyr_.at(level_) ); + const float scale = 1.0f/(1<pos(); + const double focal_length = ref_frame_->cam_->errorMultiplier2(); + + /* precompute intensity and jacobian for the point patches in the reference image */ + { + size_t feature_counter = 0; + std::vector::iterator visiblity_it = cache.visible_fts.begin(); + for(auto it=fts.begin(), ite=fts.end(); + it!=ite; ++it, ++feature_counter, ++visiblity_it) + { + // if point is not valid (empty or null) skip this feature + if((*it)->feat3D == NULL) + continue; + + // set patch position for current feature + patch.setPosition((*it)->px*scale); + // skip this feature if the patch (with extra pixel for border in derivatives) does not fully lie within the image + if(!patch.isInFrame(patch.halfsize+1)) + continue; + // compute the bilinear interpolation weights constant along the patch scan + patch.computeInterpWeights(); + // set the patch at the corresponding ROI in the image + patch.setRoi(); + + // flag the feature as valid/visible + *visiblity_it = true; + + // cannot just take the 3d points coordinate because of the reprojection errors in the reference image!!! + const double depth(((*it)->feat3D->pos_ - ref_pos).norm()); + const Vector3d xyz_ref((*it)->f*depth); + + // evaluate projection jacobian + Matrix frame_jac; + Frame::jacobian_xyz2uv(xyz_ref, frame_jac); + + // iterate through all points in the Region Of Interest defined by the patch + // the pointer points to the data in the original image matrix + // (this is efficient C-like row-wise scanning of patch, see OpenCV tutorial "How to scan images") + size_t pixel_counter = 0; + float* cache_ptr = reinterpret_cast(cache.ref_patch.data) + patch.area*feature_counter; + uint8_t* img_ptr; // pointer that will point to memory locations of the ROI (same memory as for the original full ref_img) + const int stride = patch.stride; // the stride stored in the patch is that necessary to jump between the full matrix rows + for(int y=0; y &fts) +{ + // initialize patch parameters (mainly define its geometry) + Patch patch( patch_size_, ref_frame_->img_pyr_.at(level_) ); + + const float scale = 1.0f/(1<pos(); + const double focal_length = ref_frame_->cam_->errorMultiplier2(); + + // TODO: feature_counter is no longer valid because each segment + // has a variable number of patches (and total pixels) + std::vector::iterator visiblity_it = cache.visible_fts.begin(); + patch_offset = std::vector(fts.size(),0); // vector of offsets in cache for each patch + std::vector::iterator offset_it = patch_offset.begin(); + size_t cache_idx = 0; // index of the current pixel as stored in cache + for(auto ft_it=fts.begin(), ft_ite=fts.end(); + ft_it!=ft_ite; ++ft_it, ++visiblity_it, ++offset_it) + { + // cast generic Feature* iterator to a LineFeat* pointer here to fully control the object + LineFeat* it = static_cast( *ft_it ); + + // set cache index to current feature offset + *offset_it = cache_idx; + + // if line segment is not valid (empty or null) skip this feature + if( it->feat3D == NULL ) + continue; + + // skip this feature if the patches for start or end points do not fully lie within the image + if(!ref_frame_->cam_->isInFrame((it->spx*scale).cast(),patch.halfsize+1,level_) || + !ref_frame_->cam_->isInFrame((it->epx*scale).cast(),patch.halfsize+1,level_)) + continue; + + // flag the feature as valid/visible + *visiblity_it = true; + + // 1. Estimate number of samples (TODO: first, we have implemented for a fixed N_samples) + // 2. Compute discrete increment between 2D and 3D points from start to end + // i. Estimate depth at extreme points + // ii. Estimate xyz_ref at extreme points + // iii. Get 3D increment + // 3. Iterate over the segment + // i. Estimate jacobian + // ii. Iterate around the points of the patch + + // Compute the number of samples and total increment + Vector2d inc2d; // will store the difference vector from start to end points in the segment first + // later will parameterize the 2D step to sample the segment + size_t N_samples = it->setupSampling(patch.size, inc2d); + // Adjust the number of samples in terms of the current pyramid level + N_samples = 1 + (N_samples-1) / (1<spx * scale; // 2D point in the image segment (to update in the loop), initialize at start 2D point + + // Parameterize 3D segment with start point and discrete 3D increment + double p_depth = (it->feat3D->spos_-ref_pos).norm(); // depth (norm of the vector) of segment 3D start point + Vector3d p_ref = it->sf * p_depth; // 3D start point in the frame coordinates + double q_depth = (it->feat3D->epos_-ref_pos).norm(); // depth (norm of the vector) of segment 3D end point + Vector3d q_ref = it->ef * q_depth; // 3D start point in the frame coordinates + Vector3d inc3d = (q_ref-p_ref) / (N_samples-1); // 3D increment to go from start to end of segment in N steps + Vector3d xyz_ref = p_ref; // 3D point in the segment (to update it in the loop), initialized at start 3D point + + // Evaluate over the patch for each point sampled in the segment (including extremes) + for(unsigned int sample = 0; sample frame_jac; + Frame::jacobian_xyz2uv(xyz_ref, frame_jac); + + // iterate through all points in the Region Of Interest defined by the patch + // the pointer points to the data in the original image matrix + // (this is efficient C-like row-wise scanning of patch, see OpenCV tutorial "How to scan images") + float* cache_ptr = reinterpret_cast(cache.ref_patch.data) + cache_idx; + uint8_t* img_ptr; // pointer that will point to memory locations of the ROI (same memory as for the original full ref_img) + const int stride = patch.stride; // the stride stored in the patch is that necessary to jump between the full matrix rows + for(int y=0; y &fts, + Matrix &H, + Matrix &Jres, + std::vector& errors, + float& chi2) +{ + // initialize patch parameters (mainly define its geometry) + Patch patch( patch_size_, cur_frame_->img_pyr_.at(level_) ); + Patch resPatch; + if(linearize_system && display_) + resPatch = Patch( patch_size_, resimg_ ); + + // compute the weights on the first iteration + + if(compute_weight_scale) + errors.reserve(cache.visible_fts.size()); + + const float scale = 1.0f/(1<pos()); + + // reset chi2 variable to zero + chi2 = 0.0; + vk::robust_cost::TukeyWeightFunction weight_estimator; + + // set GN parameters to zero prior to accumulate results + H.setZero(); + Jres.setZero(); + size_t feature_counter = 0; // is used to compute the index of the cached jacobian + std::vector::iterator visiblity_it = cache.visible_fts.begin(); + for(auto it=fts.begin(); it!=fts.end(); + ++it, ++feature_counter, ++visiblity_it) + { + // check if feature is within image + if(!*visiblity_it) + continue; + + // compute pixel location in cur img + const double depth = ((*it)->feat3D->pos_ - ref_pos).norm(); + const Vector3d xyz_ref((*it)->f*depth); + const Vector3d xyz_cur(T_cur_from_ref * xyz_ref); + const Vector2d uv_cur_pyr(cur_frame_->cam_->world2cam(xyz_cur) * scale); + + // set patch position for current feature + patch.setPosition(uv_cur_pyr); + // skip this feature if the patch (with extra pixel for border in derivatives) does not fully lie within the image + if(!patch.isInFrame(patch.halfsize)) + continue; + // compute the bilinear interpolation weights constant along the patch scan + patch.computeInterpWeights(); + // set the patch at the corresponding ROI in the image + patch.setRoi(); + // iterate through all points in the Region Of Interest defined by the patch + // the pointer points to the data in the original image matrix + // (this is efficient C-like row-wise scanning of patch, see OpenCV tutorial "How to scan images") + size_t pixel_counter = 0; + float* cache_ptr = reinterpret_cast(cache.ref_patch.data) + patch.area*feature_counter; + uint8_t* img_ptr; // pointer that will point to memory locations of the ROI (same memory as for the original full ref_img) + const int stride = patch.stride; // the stride stored in the patch is that necessary to jump between the full matrix rows + cv::MatIterator_ itDisp; + if(linearize_system && display_) + { + resPatch.setPosition(uv_cur_pyr); + resPatch.setRoi(); + itDisp = resPatch.roi.begin(); + } + for(int y=0; y &fts, + Matrix &H, + Matrix &Jres, + std::vector &errors, + float &chi2) +{ + + // initialize patch parameters (mainly define its geometry) + Patch patch( patch_size_, cur_frame_->img_pyr_.at(level_) ); + Patch resPatch; + if(linearize_system && display_) + resPatch = Patch( patch_size_, resimg_ ); + + // compute the weights on the first iteration + if(compute_weight_scale) + errors.reserve(cache.visible_fts.size()); + const float scale = 1.0f/(1<pos()); + + // reset chi2 variable to zero + chi2 = 0.0; + vk::robust_cost::TukeyWeightFunction weight_estimator; + + // set GN parameters to zero prior to accumulate results + H.setZero(); + Jres.setZero(); + std::vector::iterator offset_it = patch_offset.begin(); + size_t cache_idx = 0; // index of the current pixel as stored in cache + std::vector::iterator visiblity_it = cache.visible_fts.begin(); + + Matrix H_ls = Matrix::Zero(); + Matrix Jres_ls = Matrix::Zero(); + + for(auto ft_it=fts.begin(), ft_ite=fts.end(); + ft_it!=ft_ite; ++ft_it, ++offset_it, ++visiblity_it) + { + // cast generic Feature* iterator to a LineFeat* pointer here to fully control the object + LineFeat* it = static_cast( *ft_it ); + + // check if we have already removed this line feature + if( it->feat3D == NULL) + continue; + + // check if feature is within image + if(!*visiblity_it) + continue; + + // setup current index in cache according to stored offset values + cache_idx = *offset_it; + + // Compute the number of samples and total increment + Vector2d inc2d; // will store the difference vector from start to end points in the segment first + // later will parameterize the 2D step to sample the segment + size_t N_samples = it->setupSampling(patch.size, inc2d); + + // Adjust the number of samples in terms of the current pyramid level + N_samples = 1 + (N_samples-1) / (1<feat3D->spos_-ref_pos).norm(); // depth (norm of the vector) of segment 3D start point + Vector3d p_ref = it->sf * p_depth; // 3D start point in the frame coordinates + double q_depth = (it->feat3D->epos_-ref_pos).norm(); // depth (norm of the vector) of segment 3D end point + Vector3d q_ref = it->ef * q_depth; // 3D start point in the frame coordinates + Vector3d inc3d = (q_ref-p_ref) / (N_samples-1); // 3D increment to go from start to end of segment in N steps + Vector3d xyz_ref = p_ref; // 3D point in the segment (to update it in the loop), initialized at start 3D point + + // Evaluate over the patch for each point sampled in the segment (including extremes) + Matrix H_ = Matrix::Zero(); + Matrix Jres_ = Matrix::Zero(); + vector ls_res; + bool good_line = true; + for(unsigned int sample = 0; sample < N_samples; ++sample, xyz_ref+=inc3d ) + { + // compute pixel location in cur img + const Vector3d xyz_cur(T_cur_from_ref * xyz_ref); + const Vector2d uv_cur_pyr(cur_frame_->cam_->world2cam(xyz_cur) * scale); + // set patch position for current feature + patch.setPosition(uv_cur_pyr); + // skip this feature if the patch (with extra pixel for border in derivatives) does not fully lie within the image + if(!patch.isInFrame(patch.halfsize)) + { + cache_idx += patch.size; // Do not lose position of the next patch in cache! + good_line = false; + sample = N_samples; + continue; + } + // compute the bilinear interpolation weights constant along the patch scan + patch.computeInterpWeights(); + // set the patch at the corresponding ROI in the image + patch.setRoi(); + // iterate through all points in the Region Of Interest defined by the patch + // the pointer points to the data in the original image matrix + // (this is efficient C-like row-wise scanning of patch, see OpenCV tutorial "How to scan images") + float* cache_ptr = reinterpret_cast(cache.ref_patch.data) + cache_idx; + uint8_t* img_ptr; // pointer that will point to memory locations of the ROI (same memory as for the original full ref_img) + const int stride = patch.stride; // the stride stored in the patch is that necessary to jump between the full matrix rows + cv::MatIterator_ itDisp; + if(linearize_system && display_) + { + resPatch.setPosition(uv_cur_pyr); + resPatch.setRoi(); + itDisp = resPatch.roi.begin(); + } + for(int y=0; y::iterator it = ls_res.begin(); it != ls_res.end(); ++it){ + //res_ += pow(*it,2); + res_ += fabsf(*it); + //res_ += pow(*it,2); + } + //res_ = sqrt(res_)/double(N_samples) ; + res_ = res_ / double(N_samples); + if( good_line && res_ < 200.0) // debug + { + /*float res_ = 0.0, res2_; + for(vector::iterator it = ls_res.begin(); it != ls_res.end(); ++it){ + //res_ += pow(*it,2); + res_ += fabsf(*it); + //res_ += pow(*it,2); + } + //res_ = sqrt(res_)/double(N_samples) ; + res_ = res_ / double(N_samples);*/ + + // used to compute scale for robust cost + if(compute_weight_scale) + errors.push_back(res_); + // robustification + float weight = 1.0; + if(use_weights_) + { + if(compute_weight_scale && iter_ != 0) + { + //weight = 2.0*fabsf(res) / (1.0+res2/scale_pt); + weight = 1.0 / (1.0+res_/scale_ls); + //weight = weight_estimator.value(res_/scale_ls); + } + else + { + //weight = 2.0*fabsf(res) / (1.0+res2); + weight = 1.0 / (1.0+res_); + //weight = weight_estimator.value(res_); + } + } + + // update total H and J + H.noalias() += H_ * weight / res_; // only divide hessian once (H/res2 g/res) + Jres.noalias() += Jres_ * weight ; // it is already negative + chi2 += res_*res_*weight; + n_meas_++; + + } + else + it->feat3D = NULL; + + good_line = true; + ls_res.clear(); + H_.setZero(); + Jres_.setZero(); + }//end feature-sweep +} + +int SparseImgAlign::solve() +{ + x_ = H_.ldlt().solve(Jres_); + if((bool) std::isnan((double) x_[0])) + return 0; + return 1; +} + +void SparseImgAlign::update( + const ModelType& T_curold_from_ref, + ModelType& T_curnew_from_ref) +{ + T_curnew_from_ref = T_curold_from_ref * SE3::exp(-x_); +} + +void SparseImgAlign::startIteration() +{} + +void SparseImgAlign::finishIteration() +{ + if(display_) + { + //cv::namedWindow("residuals", cv::WINDOW_AUTOSIZE); + //cv::imshow("residuals", resimg_*10); + //cv::waitKey(0); + } +} + +} // namespace plsvo