diff --git a/CMakeLists.txt b/CMakeLists.txt index fd8470d..7a40971 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,22 +5,22 @@ project(modcma) # Set the version of C/C++ (here C++17) set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) file(GLOB SRC_FILES ${PROJECT_SOURCE_DIR}/src/*.cpp) list(FILTER SRC_FILES EXCLUDE REGEX ".*interface.cpp$") - -add_executable(main ${SRC_FILES}) -#add_subdirectory(${PROJECT_SOURCE_DIR}/../IOHexperimenter ${CMAKE_CURRENT_BINARY_DIR}/ioh) +add_executable(main ${SRC_FILES}) target_include_directories(main PUBLIC - ${PROJECT_SOURCE_DIR}/include + ${PROJECT_SOURCE_DIR}/include ${PROJECT_SOURCE_DIR}/external -# ${PROJECT_SOURCE_DIR}/../IOHexperimenter/include ) -#target_link_libraries(main PUBLIC ioh) - if (MSVC) target_compile_options(main PRIVATE /bigobj) -endif() + +else() + target_compile_options(main PRIVATE -march=native) +endif() + \ No newline at end of file diff --git a/README.md b/README.md index 38429a2..4acb02f 100644 --- a/README.md +++ b/README.md @@ -202,7 +202,7 @@ The CMA-ES Modular package provides various modules, grouped into 13 categories. | -------- | ------ | ------ | ---- | | [Matrix Adaptation](#matrix-adaptation) | Covariance | :green_circle: | :green_circle: | | | Matrix | :red_circle: | :green_circle: | -| | Seperable | :red_circle: | :green_circle: | +| | Separable | :red_circle: | :green_circle: | | | None | :red_circle: | :green_circle: | | [Active Update](#active-update) | Off/On | :green_circle: | :green_circle: | | [Elitism](#elitism) | Off/On | :green_circle: | :green_circle: | @@ -256,7 +256,12 @@ modules.matrix_adaptation = c_maes.options.MatrixAdaptationType.MATRIX # We can also only perform step-size-adaptation modules.matrix_adaptation = c_maes.options.MatrixAdaptationType.NONE # Or use the seperable CMA-ES -modules.matrix_adaptation = c_maes.options.MatrixAdaptationType.SEPERABLE +modules.matrix_adaptation = c_maes.options.MatrixAdaptationType.SEPARABLE +# Other variants: +modules.matrix_adaptation = c_maes.options.MatrixAdaptationType.CHOLESKY +modules.matrix_adaptation = c_maes.options.MatrixAdaptationType.CMSA +modules.matrix_adaptation = c_maes.options.MatrixAdaptationType.COVARIANCE_NO_EIGV +modules.matrix_adaptation = c_maes.options.MatrixAdaptationType.NATURAL_GRADIENT ``` ### Active Update diff --git a/external/Eigen/version.txt b/external/Eigen/version.txt new file mode 100644 index 0000000..fbcbf73 --- /dev/null +++ b/external/Eigen/version.txt @@ -0,0 +1 @@ +3.4.0 \ No newline at end of file diff --git a/external/unsupported/CMakeLists.txt b/external/unsupported/CMakeLists.txt new file mode 100644 index 0000000..34408c0 --- /dev/null +++ b/external/unsupported/CMakeLists.txt @@ -0,0 +1,11 @@ +add_subdirectory(Eigen) +if(EIGEN_BUILD_DOC) + add_subdirectory(doc EXCLUDE_FROM_ALL) +endif() +if(BUILD_TESTING) + if(EIGEN_LEAVE_TEST_IN_ALL_TARGET) + add_subdirectory(test) # can't do EXCLUDE_FROM_ALL here, breaks CTest + else() + add_subdirectory(test EXCLUDE_FROM_ALL) + endif() +endif() diff --git a/external/unsupported/Eigen/AdolcForward b/external/unsupported/Eigen/AdolcForward new file mode 100644 index 0000000..56caeae --- /dev/null +++ b/external/unsupported/Eigen/AdolcForward @@ -0,0 +1,159 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ADLOC_FORWARD +#define EIGEN_ADLOC_FORWARD + +//-------------------------------------------------------------------------------- +// +// This file provides support for adolc's adouble type in forward mode. +// ADOL-C is a C++ automatic differentiation library, +// see https://projects.coin-or.org/ADOL-C for more information. +// +// Note that the maximal number of directions is controlled by +// the preprocessor token NUMBER_DIRECTIONS. The default is 2. +// +//-------------------------------------------------------------------------------- + +#define ADOLC_TAPELESS +#ifndef NUMBER_DIRECTIONS +# define NUMBER_DIRECTIONS 2 +#endif +#include + +// adolc defines some very stupid macros: +#if defined(malloc) +# undef malloc +#endif + +#if defined(calloc) +# undef calloc +#endif + +#if defined(realloc) +# undef realloc +#endif + +#include "../../Eigen/Core" + +namespace Eigen { + +/** + * \defgroup AdolcForward_Module Adolc forward module + * This module provides support for adolc's adouble type in forward mode. + * ADOL-C is a C++ automatic differentiation library, + * see https://projects.coin-or.org/ADOL-C for more information. + * It mainly consists in: + * - a struct Eigen::NumTraits specialization + * - overloads of internal::* math function for adtl::adouble type. + * + * Note that the maximal number of directions is controlled by + * the preprocessor token NUMBER_DIRECTIONS. The default is 2. + * + * \code + * #include + * \endcode + */ + //@{ + +} // namespace Eigen + +// Eigen's require a few additional functions which must be defined in the same namespace +// than the custom scalar type own namespace +namespace adtl { + +inline const adouble& conj(const adouble& x) { return x; } +inline const adouble& real(const adouble& x) { return x; } +inline adouble imag(const adouble&) { return 0.; } +inline adouble abs(const adouble& x) { return fabs(x); } +inline adouble abs2(const adouble& x) { return x*x; } + +inline bool (isinf)(const adouble& x) { return (Eigen::numext::isinf)(x.getValue()); } +inline bool (isnan)(const adouble& x) { return (Eigen::numext::isnan)(x.getValue()); } + +} + +namespace Eigen { + +template<> struct NumTraits + : NumTraits +{ + typedef adtl::adouble Real; + typedef adtl::adouble NonInteger; + typedef adtl::adouble Nested; + enum { + IsComplex = 0, + IsInteger = 0, + IsSigned = 1, + RequireInitialization = 1, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; +}; + +template class AdolcForwardJacobian : public Functor +{ + typedef adtl::adouble ActiveScalar; +public: + + AdolcForwardJacobian() : Functor() {} + AdolcForwardJacobian(const Functor& f) : Functor(f) {} + + // forward constructors + template + AdolcForwardJacobian(const T0& a0) : Functor(a0) {} + template + AdolcForwardJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {} + template + AdolcForwardJacobian(const T0& a0, const T1& a1, const T1& a2) : Functor(a0, a1, a2) {} + + typedef typename Functor::InputType InputType; + typedef typename Functor::ValueType ValueType; + typedef typename Functor::JacobianType JacobianType; + + typedef Matrix ActiveInput; + typedef Matrix ActiveValue; + + void operator() (const InputType& x, ValueType* v, JacobianType* _jac) const + { + eigen_assert(v!=0); + if (!_jac) + { + Functor::operator()(x, v); + return; + } + + JacobianType& jac = *_jac; + + ActiveInput ax = x.template cast(); + ActiveValue av(jac.rows()); + + for (int j=0; j +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ALIGNED_VECTOR3 +#define EIGEN_ALIGNED_VECTOR3 + +#include "../../Eigen/Geometry" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +namespace Eigen { + +/** + * \defgroup AlignedVector3_Module Aligned vector3 module + * + * \code + * #include + * \endcode + */ + //@{ + + +/** \class AlignedVector3 + * + * \brief A vectorization friendly 3D vector + * + * This class represents a 3D vector internally using a 4D vector + * such that vectorization can be seamlessly enabled. Of course, + * the same result can be achieved by directly using a 4D vector. + * This class makes this process simpler. + * + */ +// TODO specialize Cwise +template class AlignedVector3; + +namespace internal { +template struct traits > + : traits > +{ +}; +} + +template class AlignedVector3 + : public MatrixBase > +{ + typedef Matrix<_Scalar,4,1> CoeffType; + CoeffType m_coeffs; + public: + + typedef MatrixBase > Base; + EIGEN_DENSE_PUBLIC_INTERFACE(AlignedVector3) + using Base::operator*; + + inline Index rows() const { return 3; } + inline Index cols() const { return 1; } + + Scalar* data() { return m_coeffs.data(); } + const Scalar* data() const { return m_coeffs.data(); } + Index innerStride() const { return 1; } + Index outerStride() const { return 3; } + + inline const Scalar& coeff(Index row, Index col) const + { return m_coeffs.coeff(row, col); } + + inline Scalar& coeffRef(Index row, Index col) + { return m_coeffs.coeffRef(row, col); } + + inline const Scalar& coeff(Index index) const + { return m_coeffs.coeff(index); } + + inline Scalar& coeffRef(Index index) + { return m_coeffs.coeffRef(index);} + + + inline AlignedVector3() + {} + + inline AlignedVector3(const Scalar& x, const Scalar& y, const Scalar& z) + : m_coeffs(x, y, z, Scalar(0)) + {} + + inline AlignedVector3(const AlignedVector3& other) + : Base(), m_coeffs(other.m_coeffs) + {} + + template + struct generic_assign_selector {}; + + template struct generic_assign_selector + { + inline static void run(AlignedVector3& dest, const XprType& src) + { + dest.m_coeffs = src; + } + }; + + template struct generic_assign_selector + { + inline static void run(AlignedVector3& dest, const XprType& src) + { + dest.m_coeffs.template head<3>() = src; + dest.m_coeffs.w() = Scalar(0); + } + }; + + template + inline AlignedVector3(const MatrixBase& other) + { + generic_assign_selector::run(*this,other.derived()); + } + + inline AlignedVector3& operator=(const AlignedVector3& other) + { m_coeffs = other.m_coeffs; return *this; } + + template + inline AlignedVector3& operator=(const MatrixBase& other) + { + generic_assign_selector::run(*this,other.derived()); + return *this; + } + + inline AlignedVector3 operator+(const AlignedVector3& other) const + { return AlignedVector3(m_coeffs + other.m_coeffs); } + + inline AlignedVector3& operator+=(const AlignedVector3& other) + { m_coeffs += other.m_coeffs; return *this; } + + inline AlignedVector3 operator-(const AlignedVector3& other) const + { return AlignedVector3(m_coeffs - other.m_coeffs); } + + inline AlignedVector3 operator-() const + { return AlignedVector3(-m_coeffs); } + + inline AlignedVector3 operator-=(const AlignedVector3& other) + { m_coeffs -= other.m_coeffs; return *this; } + + inline AlignedVector3 operator*(const Scalar& s) const + { return AlignedVector3(m_coeffs * s); } + + inline friend AlignedVector3 operator*(const Scalar& s,const AlignedVector3& vec) + { return AlignedVector3(s * vec.m_coeffs); } + + inline AlignedVector3& operator*=(const Scalar& s) + { m_coeffs *= s; return *this; } + + inline AlignedVector3 operator/(const Scalar& s) const + { return AlignedVector3(m_coeffs / s); } + + inline AlignedVector3& operator/=(const Scalar& s) + { m_coeffs /= s; return *this; } + + inline Scalar dot(const AlignedVector3& other) const + { + eigen_assert(m_coeffs.w()==Scalar(0)); + eigen_assert(other.m_coeffs.w()==Scalar(0)); + return m_coeffs.dot(other.m_coeffs); + } + + inline void normalize() + { + m_coeffs /= norm(); + } + + inline AlignedVector3 normalized() const + { + return AlignedVector3(m_coeffs / norm()); + } + + inline Scalar sum() const + { + eigen_assert(m_coeffs.w()==Scalar(0)); + return m_coeffs.sum(); + } + + inline Scalar squaredNorm() const + { + eigen_assert(m_coeffs.w()==Scalar(0)); + return m_coeffs.squaredNorm(); + } + + inline Scalar norm() const + { + using std::sqrt; + return sqrt(squaredNorm()); + } + + inline AlignedVector3 cross(const AlignedVector3& other) const + { + return AlignedVector3(m_coeffs.cross3(other.m_coeffs)); + } + + template + inline bool isApprox(const MatrixBase& other, const RealScalar& eps=NumTraits::dummy_precision()) const + { + return m_coeffs.template head<3>().isApprox(other,eps); + } + + CoeffType& coeffs() { return m_coeffs; } + const CoeffType& coeffs() const { return m_coeffs; } +}; + +namespace internal { + +template +struct eval, Dense> +{ + typedef const AlignedVector3<_Scalar>& type; +}; + +template +struct evaluator > + : evaluator > +{ + typedef AlignedVector3 XprType; + typedef evaluator > Base; + + evaluator(const XprType &m) : Base(m.coeffs()) {} +}; + +} + +//@} + +} + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_ALIGNED_VECTOR3 diff --git a/external/unsupported/Eigen/ArpackSupport b/external/unsupported/Eigen/ArpackSupport new file mode 100644 index 0000000..67c4ac8 --- /dev/null +++ b/external/unsupported/Eigen/ArpackSupport @@ -0,0 +1,30 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARPACKSUPPORT_MODULE_H +#define EIGEN_ARPACKSUPPORT_MODULE_H + +#include "../../Eigen/Core" + +/** \defgroup ArpackSupport_Module Arpack support module + * + * This module provides a wrapper to Arpack, a library for sparse eigenvalue decomposition. + * + * \code + * #include + * \endcode + */ + +#include "../../Eigen/SparseCholesky" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" +#include "src/Eigenvalues/ArpackSelfAdjointEigenSolver.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_ARPACKSUPPORT_MODULE_H diff --git a/external/unsupported/Eigen/AutoDiff b/external/unsupported/Eigen/AutoDiff new file mode 100644 index 0000000..7a4ff46 --- /dev/null +++ b/external/unsupported/Eigen/AutoDiff @@ -0,0 +1,46 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_AUTODIFF_MODULE +#define EIGEN_AUTODIFF_MODULE + +namespace Eigen { + +/** + * \defgroup AutoDiff_Module Auto Diff module + * + * This module features forward automatic differentation via a simple + * templated scalar type wrapper AutoDiffScalar. + * + * Warning : this should NOT be confused with numerical differentiation, which + * is a different method and has its own module in Eigen : \ref NumericalDiff_Module. + * + * \code + * #include + * \endcode + */ +//@{ + +} +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + + +#include "src/AutoDiff/AutoDiffScalar.h" +// #include "src/AutoDiff/AutoDiffVector.h" +#include "src/AutoDiff/AutoDiffJacobian.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + + + +namespace Eigen { +//@} +} + +#endif // EIGEN_AUTODIFF_MODULE diff --git a/external/unsupported/Eigen/BVH b/external/unsupported/Eigen/BVH new file mode 100644 index 0000000..666c983 --- /dev/null +++ b/external/unsupported/Eigen/BVH @@ -0,0 +1,95 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Ilya Baran +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BVH_MODULE_H +#define EIGEN_BVH_MODULE_H + +#include "../../Eigen/Core" +#include "../../Eigen/Geometry" +#include "../../Eigen/StdVector" +#include +#include + +namespace Eigen { + +/** + * \defgroup BVH_Module BVH module + * \brief This module provides generic bounding volume hierarchy algorithms + * and reference tree implementations. + * + * + * \code + * #include + * \endcode + * + * A bounding volume hierarchy (BVH) can accelerate many geometric queries. This module provides a generic implementation + * of the two basic algorithms over a BVH: intersection of a query object against all objects in the hierarchy and minimization + * of a function over the objects in the hierarchy. It also provides intersection and minimization over a cartesian product of + * two BVH's. A BVH accelerates intersection by using the fact that if a query object does not intersect a volume, then it cannot + * intersect any object contained in that volume. Similarly, a BVH accelerates minimization because the minimum of a function + * over a volume is no greater than the minimum of a function over any object contained in it. + * + * Some sample queries that can be written in terms of intersection are: + * - Determine all points where a ray intersects a triangle mesh + * - Given a set of points, determine which are contained in a query sphere + * - Given a set of spheres, determine which contain the query point + * - Given a set of disks, determine if any is completely contained in a query rectangle (represent each 2D disk as a point \f$(x,y,r)\f$ + * in 3D and represent the rectangle as a pyramid based on the original rectangle and shrinking in the \f$r\f$ direction) + * - Given a set of points, count how many pairs are \f$d\pm\epsilon\f$ apart (done by looking at the cartesian product of the set + * of points with itself) + * + * Some sample queries that can be written in terms of function minimization over a set of objects are: + * - Find the intersection between a ray and a triangle mesh closest to the ray origin (function is infinite off the ray) + * - Given a polyline and a query point, determine the closest point on the polyline to the query + * - Find the diameter of a point cloud (done by looking at the cartesian product and using negative distance as the function) + * - Determine how far two meshes are from colliding (this is also a cartesian product query) + * + * This implementation decouples the basic algorithms both from the type of hierarchy (and the types of the bounding volumes) and + * from the particulars of the query. To enable abstraction from the BVH, the BVH is required to implement a generic mechanism + * for traversal. To abstract from the query, the query is responsible for keeping track of results. + * + * To be used in the algorithms, a hierarchy must implement the following traversal mechanism (see KdBVH for a sample implementation): \code + typedef Volume //the type of bounding volume + typedef Object //the type of object in the hierarchy + typedef Index //a reference to a node in the hierarchy--typically an int or a pointer + typedef VolumeIterator //an iterator type over node children--returns Index + typedef ObjectIterator //an iterator over object (leaf) children--returns const Object & + Index getRootIndex() const //returns the index of the hierarchy root + const Volume &getVolume(Index index) const //returns the bounding volume of the node at given index + void getChildren(Index index, VolumeIterator &outVBegin, VolumeIterator &outVEnd, + ObjectIterator &outOBegin, ObjectIterator &outOEnd) const + //getChildren takes a node index and makes [outVBegin, outVEnd) range over its node children + //and [outOBegin, outOEnd) range over its object children + \endcode + * + * To use the hierarchy, call BVIntersect or BVMinimize, passing it a BVH (or two, for cartesian product) and a minimizer or intersector. + * For an intersection query on a single BVH, the intersector encapsulates the query and must provide two functions: + * \code + bool intersectVolume(const Volume &volume) //returns true if the query intersects the volume + bool intersectObject(const Object &object) //returns true if the intersection search should terminate immediately + \endcode + * The guarantee that BVIntersect provides is that intersectObject will be called on every object whose bounding volume + * intersects the query (but possibly on other objects too) unless the search is terminated prematurely. It is the + * responsibility of the intersectObject function to keep track of the results in whatever manner is appropriate. + * The cartesian product intersection and the BVMinimize queries are similar--see their individual documentation. + * + * The following is a simple but complete example for how to use the BVH to accelerate the search for a closest red-blue point pair: + * \include BVH_Example.cpp + * Output: \verbinclude BVH_Example.out + */ +} + +//@{ + +#include "src/BVH/BVAlgorithms.h" +#include "src/BVH/KdBVH.h" + +//@} + +#endif // EIGEN_BVH_MODULE_H diff --git a/external/unsupported/Eigen/CMakeLists.txt b/external/unsupported/Eigen/CMakeLists.txt new file mode 100644 index 0000000..631a060 --- /dev/null +++ b/external/unsupported/Eigen/CMakeLists.txt @@ -0,0 +1,32 @@ +set(Eigen_HEADERS + AdolcForward + AlignedVector3 + ArpackSupport + AutoDiff + BVH + EulerAngles + FFT + IterativeSolvers + KroneckerProduct + LevenbergMarquardt + MatrixFunctions + MoreVectorization + MPRealSupport + NonLinearOptimization + NumericalDiff + OpenGLSupport + Polynomials + Skyline + SparseExtra + SpecialFunctions + Splines + ) + +install(FILES + ${Eigen_HEADERS} + DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen COMPONENT Devel + ) + +install(DIRECTORY src DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen COMPONENT Devel FILES_MATCHING PATTERN "*.h") + +add_subdirectory(CXX11) diff --git a/external/unsupported/Eigen/CXX11/CMakeLists.txt b/external/unsupported/Eigen/CXX11/CMakeLists.txt new file mode 100644 index 0000000..385ed24 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/CMakeLists.txt @@ -0,0 +1,8 @@ +set(Eigen_CXX11_HEADERS Tensor TensorSymmetry ThreadPool) + +install(FILES + ${Eigen_CXX11_HEADERS} + DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/CXX11 COMPONENT Devel + ) + +install(DIRECTORY src DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/CXX11 COMPONENT Devel FILES_MATCHING PATTERN "*.h") diff --git a/external/unsupported/Eigen/CXX11/Tensor b/external/unsupported/Eigen/CXX11/Tensor new file mode 100644 index 0000000..0938bb5 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/Tensor @@ -0,0 +1,137 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +//#ifndef EIGEN_CXX11_TENSOR_MODULE +//#define EIGEN_CXX11_TENSOR_MODULE + +#include "../../../Eigen/Core" + +#if EIGEN_HAS_CXX11 + +#include "../SpecialFunctions" + +#include "../../../Eigen/src/Core/util/DisableStupidWarnings.h" +#include "src/util/CXX11Meta.h" +#include "src/util/MaxSizeVector.h" + +/** \defgroup CXX11_Tensor_Module Tensor Module + * + * This module provides a Tensor class for storing arbitrarily indexed + * objects. + * + * \code + * #include + * \endcode + * + * Much of the documentation can be found \ref eigen_tensors "here". + */ + +#include +#include +#include +#include +#include +#include +#include + +#if defined(EIGEN_USE_THREADS) || defined(EIGEN_USE_SYCL) +#include "ThreadPool" +#endif + +#ifdef EIGEN_USE_GPU + #include + #if defined(EIGEN_USE_HIP) + #include + #else + #include + #endif +#endif + +#include "src/Tensor/TensorMacros.h" +#include "src/Tensor/TensorForwardDeclarations.h" +#include "src/Tensor/TensorMeta.h" +#include "src/Tensor/TensorFunctors.h" +#include "src/Tensor/TensorCostModel.h" +#include "src/Tensor/TensorDeviceDefault.h" +#include "src/Tensor/TensorDeviceThreadPool.h" +#include "src/Tensor/TensorDeviceGpu.h" +#ifndef gpu_assert +#define gpu_assert(x) +#endif +#include "src/Tensor/TensorDeviceSycl.h" +#include "src/Tensor/TensorIndexList.h" +#include "src/Tensor/TensorDimensionList.h" +#include "src/Tensor/TensorDimensions.h" +#include "src/Tensor/TensorInitializer.h" +#include "src/Tensor/TensorTraits.h" +#include "src/Tensor/TensorRandom.h" +#include "src/Tensor/TensorUInt128.h" +#include "src/Tensor/TensorIntDiv.h" +#include "src/Tensor/TensorGlobalFunctions.h" + +#include "src/Tensor/TensorBase.h" +#include "src/Tensor/TensorBlock.h" + +#include "src/Tensor/TensorEvaluator.h" +#include "src/Tensor/TensorExpr.h" +#include "src/Tensor/TensorReduction.h" +#include "src/Tensor/TensorReductionGpu.h" +#include "src/Tensor/TensorArgMax.h" +#include "src/Tensor/TensorConcatenation.h" +#include "src/Tensor/TensorContractionMapper.h" +#include "src/Tensor/TensorContractionBlocking.h" +#include "src/Tensor/TensorContraction.h" +#include "src/Tensor/TensorContractionThreadPool.h" +#include "src/Tensor/TensorContractionGpu.h" +#include "src/Tensor/TensorConversion.h" +#include "src/Tensor/TensorConvolution.h" +#include "src/Tensor/TensorFFT.h" +#include "src/Tensor/TensorPatch.h" +#include "src/Tensor/TensorImagePatch.h" +#include "src/Tensor/TensorVolumePatch.h" +#include "src/Tensor/TensorBroadcasting.h" +#include "src/Tensor/TensorChipping.h" +#include "src/Tensor/TensorInflation.h" +#include "src/Tensor/TensorLayoutSwap.h" +#include "src/Tensor/TensorMorphing.h" +#include "src/Tensor/TensorPadding.h" +#include "src/Tensor/TensorReverse.h" +#include "src/Tensor/TensorShuffling.h" +#include "src/Tensor/TensorStriding.h" +#include "src/Tensor/TensorCustomOp.h" +#include "src/Tensor/TensorEvalTo.h" +#include "src/Tensor/TensorForcedEval.h" +#include "src/Tensor/TensorGenerator.h" +#include "src/Tensor/TensorAssign.h" +#include "src/Tensor/TensorScan.h" +#include "src/Tensor/TensorTrace.h" + +#ifdef EIGEN_USE_SYCL +#include "src/Tensor/TensorReductionSycl.h" +#include "src/Tensor/TensorConvolutionSycl.h" +#include "src/Tensor/TensorContractionSycl.h" +#include "src/Tensor/TensorScanSycl.h" +#endif + +#include "src/Tensor/TensorExecutor.h" +#include "src/Tensor/TensorDevice.h" + +#include "src/Tensor/TensorStorage.h" +#include "src/Tensor/Tensor.h" +#include "src/Tensor/TensorFixedSize.h" +#include "src/Tensor/TensorMap.h" +#include "src/Tensor/TensorRef.h" + +#include "src/Tensor/TensorIO.h" + +#include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_HAS_CXX11 +//#endif // EIGEN_CXX11_TENSOR_MODULE diff --git a/external/unsupported/Eigen/CXX11/TensorSymmetry b/external/unsupported/Eigen/CXX11/TensorSymmetry new file mode 100644 index 0000000..b09c5e4 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/TensorSymmetry @@ -0,0 +1,42 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSORSYMMETRY_MODULE +#define EIGEN_CXX11_TENSORSYMMETRY_MODULE + +#include "Tensor" + +#include "../../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#include "src/util/CXX11Meta.h" + +/** \defgroup CXX11_TensorSymmetry_Module Tensor Symmetry Module + * + * This module provides a classes that allow for the definition of + * symmetries w.r.t. tensor indices. + * + * Including this module will implicitly include the Tensor module. + * + * \code + * #include + * \endcode + */ + +#include "src/TensorSymmetry/util/TemplateGroupTheory.h" +#include "src/TensorSymmetry/Symmetry.h" +#include "src/TensorSymmetry/StaticSymmetry.h" +#include "src/TensorSymmetry/DynamicSymmetry.h" + +#include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CXX11_TENSORSYMMETRY_MODULE + +/* + * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; + */ diff --git a/external/unsupported/Eigen/CXX11/ThreadPool b/external/unsupported/Eigen/CXX11/ThreadPool new file mode 100644 index 0000000..c5cafb2 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/ThreadPool @@ -0,0 +1,74 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_MODULE +#define EIGEN_CXX11_THREADPOOL_MODULE + +#include "../../../Eigen/Core" + +#include "../../../Eigen/src/Core/util/DisableStupidWarnings.h" + +/** \defgroup CXX11_ThreadPool_Module C++11 ThreadPool Module + * + * This module provides 2 threadpool implementations + * - a simple reference implementation + * - a faster non blocking implementation + * + * This module requires C++11. + * + * \code + * #include + * \endcode + */ + + +// The code depends on CXX11, so only include the module if the +// compiler supports it. +#if (EIGEN_COMP_CXXVER >= 11) +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// There are non-parenthesized calls to "max" in the header, +// which trigger a check in test/main.h causing compilation to fail. +// We work around the check here by removing the check for max in +// the case where we have to emulate thread_local. +#ifdef max +#undef max +#endif +#include + +#include "src/util/CXX11Meta.h" +#include "src/util/MaxSizeVector.h" + +#include "src/ThreadPool/ThreadLocal.h" +#include "src/ThreadPool/ThreadYield.h" +#include "src/ThreadPool/ThreadCancel.h" +#include "src/ThreadPool/EventCount.h" +#include "src/ThreadPool/RunQueue.h" +#include "src/ThreadPool/ThreadPoolInterface.h" +#include "src/ThreadPool/ThreadEnvironment.h" +#include "src/ThreadPool/Barrier.h" +#include "src/ThreadPool/NonBlockingThreadPool.h" + +#endif + +#include "../../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CXX11_THREADPOOL_MODULE diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/README.md b/external/unsupported/Eigen/CXX11/src/Tensor/README.md new file mode 100644 index 0000000..2f65b1b --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/README.md @@ -0,0 +1,1815 @@ +# Eigen Tensors {#eigen_tensors} + +Tensors are multidimensional arrays of elements. Elements are typically scalars, +but more complex types such as strings are also supported. + +## Tensor Classes + +You can manipulate a tensor with one of the following classes. They all are in +the namespace `::Eigen.` + + +### Class Tensor + +This is the class to use to create a tensor and allocate memory for it. The +class is templatized with the tensor datatype, such as float or int, and the +tensor rank. The rank is the number of dimensions, for example rank 2 is a +matrix. + +Tensors of this class are resizable. For example, if you assign a tensor of a +different size to a Tensor, that tensor is resized to match its new value. + +#### Constructor Tensor(size0, size1, ...) + +Constructor for a Tensor. The constructor must be passed `rank` integers +indicating the sizes of the instance along each of the the `rank` +dimensions. + + // Create a tensor of rank 3 of sizes 2, 3, 4. This tensor owns + // memory to hold 24 floating point values (24 = 2 x 3 x 4). + Tensor t_3d(2, 3, 4); + + // Resize t_3d by assigning a tensor of different sizes, but same rank. + t_3d = Tensor(3, 4, 3); + +#### Constructor Tensor(size_array) + +Constructor where the sizes for the constructor are specified as an array of +values instead of an explicitly list of parameters. The array type to use is +`Eigen::array`. The array can be constructed automatically +from an initializer list. + + // Create a tensor of strings of rank 2 with sizes 5, 7. + Tensor t_2d({5, 7}); + + +### Class TensorFixedSize> + +Class to use for tensors of fixed size, where the size is known at compile +time. Fixed sized tensors can provide very fast computations because all their +dimensions are known by the compiler. FixedSize tensors are not resizable. + +If the total number of elements in a fixed size tensor is small enough the +tensor data is held onto the stack and does not cause heap allocation and free. + + // Create a 4 x 3 tensor of floats. + TensorFixedSize> t_4x3; + +### Class TensorMap> + +This is the class to use to create a tensor on top of memory allocated and +owned by another part of your code. It allows to view any piece of allocated +memory as a Tensor. Instances of this class do not own the memory where the +data are stored. + +A TensorMap is not resizable because it does not own the memory where its data +are stored. + +#### Constructor TensorMap>(data, size0, size1, ...) + +Constructor for a Tensor. The constructor must be passed a pointer to the +storage for the data, and "rank" size attributes. The storage has to be +large enough to hold all the data. + + // Map a tensor of ints on top of stack-allocated storage. + int storage[128]; // 2 x 4 x 2 x 8 = 128 + TensorMap> t_4d(storage, 2, 4, 2, 8); + + // The same storage can be viewed as a different tensor. + // You can also pass the sizes as an array. + TensorMap> t_2d(storage, 16, 8); + + // You can also map fixed-size tensors. Here we get a 1d view of + // the 2d fixed-size tensor. + TensorFixedSize> t_4x3; + TensorMap> t_12(t_4x3.data(), 12); + + +#### Class TensorRef + +See Assigning to a TensorRef below. + +## Accessing Tensor Elements + +#### tensor(index0, index1...) + +Return the element at position `(index0, index1...)` in tensor +`tensor`. You must pass as many parameters as the rank of `tensor`. +The expression can be used as an l-value to set the value of the element at the +specified position. The value returned is of the datatype of the tensor. + + // Set the value of the element at position (0, 1, 0); + Tensor t_3d(2, 3, 4); + t_3d(0, 1, 0) = 12.0f; + + // Initialize all elements to random values. + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 3; ++j) { + for (int k = 0; k < 4; ++k) { + t_3d(i, j, k) = ...some random value...; + } + } + } + + // Print elements of a tensor. + for (int i = 0; i < 2; ++i) { + LOG(INFO) << t_3d(i, 0, 0); + } + + +## TensorLayout + +The tensor library supports 2 layouts: `ColMajor` (the default) and +`RowMajor`. Only the default column major layout is currently fully +supported, and it is therefore not recommended to attempt to use the row major +layout at the moment. + +The layout of a tensor is optionally specified as part of its type. If not +specified explicitly column major is assumed. + + Tensor col_major; // equivalent to Tensor + TensorMap > row_major(data, ...); + +All the arguments to an expression must use the same layout. Attempting to mix +different layouts will result in a compilation error. + +It is possible to change the layout of a tensor or an expression using the +`swap_layout()` method. Note that this will also reverse the order of the +dimensions. + + Tensor col_major(2, 4); + Tensor row_major(2, 4); + + Tensor col_major_result = col_major; // ok, layouts match + Tensor col_major_result = row_major; // will not compile + + // Simple layout swap + col_major_result = row_major.swap_layout(); + eigen_assert(col_major_result.dimension(0) == 4); + eigen_assert(col_major_result.dimension(1) == 2); + + // Swap the layout and preserve the order of the dimensions + array shuffle(1, 0); + col_major_result = row_major.swap_layout().shuffle(shuffle); + eigen_assert(col_major_result.dimension(0) == 2); + eigen_assert(col_major_result.dimension(1) == 4); + + +## Tensor Operations + +The Eigen Tensor library provides a vast library of operations on Tensors: +numerical operations such as addition and multiplication, geometry operations +such as slicing and shuffling, etc. These operations are available as methods +of the Tensor classes, and in some cases as operator overloads. For example +the following code computes the elementwise addition of two tensors: + + Tensor t1(2, 3, 4); + ...set some values in t1... + Tensor t2(2, 3, 4); + ...set some values in t2... + // Set t3 to the element wise sum of t1 and t2 + Tensor t3 = t1 + t2; + +While the code above looks easy enough, it is important to understand that the +expression `t1 + t2` is not actually adding the values of the tensors. The +expression instead constructs a "tensor operator" object of the class +TensorCwiseBinaryOp, which has references to the tensors +`t1` and `t2`. This is a small C++ object that knows how to add +`t1` and `t2`. It is only when the value of the expression is assigned +to the tensor `t3` that the addition is actually performed. Technically, +this happens through the overloading of `operator=()` in the Tensor class. + +This mechanism for computing tensor expressions allows for lazy evaluation and +optimizations which are what make the tensor library very fast. + +Of course, the tensor operators do nest, and the expression `t1 + t2 * 0.3f` +is actually represented with the (approximate) tree of operators: + + TensorCwiseBinaryOp(t1, TensorCwiseUnaryOp(t2, 0.3f)) + + +### Tensor Operations and C++ "auto" + +Because Tensor operations create tensor operators, the C++ `auto` keyword +does not have its intuitive meaning. Consider these 2 lines of code: + + Tensor t3 = t1 + t2; + auto t4 = t1 + t2; + +In the first line we allocate the tensor `t3` and it will contain the +result of the addition of `t1` and `t2`. In the second line, `t4` +is actually the tree of tensor operators that will compute the addition of +`t1` and `t2`. In fact, `t4` is *not* a tensor and you cannot get +the values of its elements: + + Tensor t3 = t1 + t2; + cout << t3(0, 0, 0); // OK prints the value of t1(0, 0, 0) + t2(0, 0, 0) + + auto t4 = t1 + t2; + cout << t4(0, 0, 0); // Compilation error! + +When you use `auto` you do not get a Tensor as a result but instead a +non-evaluated expression. So only use `auto` to delay evaluation. + +Unfortunately, there is no single underlying concrete type for holding +non-evaluated expressions, hence you have to use auto in the case when you do +want to hold non-evaluated expressions. + +When you need the results of set of tensor computations you have to assign the +result to a Tensor that will be capable of holding onto them. This can be +either a normal Tensor, a fixed size Tensor, or a TensorMap on an existing +piece of memory. All the following will work: + + auto t4 = t1 + t2; + + Tensor result = t4; // Could also be: result(t4); + cout << result(0, 0, 0); + + TensorMap result(, , ...) = t4; + cout << result(0, 0, 0); + + TensorFixedSize> result = t4; + cout << result(0, 0, 0); + +Until you need the results, you can keep the operation around, and even reuse +it for additional operations. As long as you keep the expression as an +operation, no computation is performed. + + // One way to compute exp((t1 + t2) * 0.2f); + auto t3 = t1 + t2; + auto t4 = t3 * 0.2f; + auto t5 = t4.exp(); + Tensor result = t5; + + // Another way, exactly as efficient as the previous one: + Tensor result = ((t1 + t2) * 0.2f).exp(); + +### Controlling When Expression are Evaluated + +There are several ways to control when expressions are evaluated: + +* Assignment to a Tensor, TensorFixedSize, or TensorMap. +* Use of the eval() method. +* Assignment to a TensorRef. + +#### Assigning to a Tensor, TensorFixedSize, or TensorMap. + +The most common way to evaluate an expression is to assign it to a Tensor. In +the example below, the `auto` declarations make the intermediate values +"Operations", not Tensors, and do not cause the expressions to be evaluated. +The assignment to the Tensor `result` causes the evaluation of all the +operations. + + auto t3 = t1 + t2; // t3 is an Operation. + auto t4 = t3 * 0.2f; // t4 is an Operation. + auto t5 = t4.exp(); // t5 is an Operation. + Tensor result = t5; // The operations are evaluated. + +If you know the ranks and sizes of the Operation value you can assign the +Operation to a TensorFixedSize instead of a Tensor, which is a bit more +efficient. + + // We know that the result is a 4x4x2 tensor! + TensorFixedSize> result = t5; + +Simiarly, assigning an expression to a TensorMap causes its evaluation. Like +tensors of type TensorFixedSize, TensorMaps cannot be resized so they have to +have the rank and sizes of the expression that are assigned to them. + +#### Calling eval(). + +When you compute large composite expressions, you sometimes want to tell Eigen +that an intermediate value in the expression tree is worth evaluating ahead of +time. This is done by inserting a call to the `eval()` method of the +expression Operation. + + // The previous example could have been written: + Tensor result = ((t1 + t2) * 0.2f).exp(); + + // If you want to compute (t1 + t2) once ahead of time you can write: + Tensor result = ((t1 + t2).eval() * 0.2f).exp(); + +Semantically, calling `eval()` is equivalent to materializing the value of +the expression in a temporary Tensor of the right size. The code above in +effect does: + + // .eval() knows the size! + TensorFixedSize> tmp = t1 + t2; + Tensor result = (tmp * 0.2f).exp(); + +Note that the return value of `eval()` is itself an Operation, so the +following code does not do what you may think: + + // Here t3 is an evaluation Operation. t3 has not been evaluated yet. + auto t3 = (t1 + t2).eval(); + + // You can use t3 in another expression. Still no evaluation. + auto t4 = (t3 * 0.2f).exp(); + + // The value is evaluated when you assign the Operation to a Tensor, using + // an intermediate tensor to represent t3.x + Tensor result = t4; + +While in the examples above calling `eval()` does not make a difference in +performance, in other cases it can make a huge difference. In the expression +below the `broadcast()` expression causes the `X.maximum()` expression +to be evaluated many times: + + Tensor<...> X ...; + Tensor<...> Y = ((X - X.maximum(depth_dim).reshape(dims2d).broadcast(bcast)) + * beta).exp(); + +Inserting a call to `eval()` between the `maximum()` and +`reshape()` calls guarantees that maximum() is only computed once and +greatly speeds-up execution: + + Tensor<...> Y = + ((X - X.maximum(depth_dim).eval().reshape(dims2d).broadcast(bcast)) + * beta).exp(); + +In the other example below, the tensor `Y` is both used in the expression +and its assignment. This is an aliasing problem and if the evaluation is not +done in the right order Y will be updated incrementally during the evaluation +resulting in bogus results: + + Tensor<...> Y ...; + Y = Y / (Y.sum(depth_dim).reshape(dims2d).broadcast(bcast)); + +Inserting a call to `eval()` between the `sum()` and `reshape()` +expressions ensures that the sum is computed before any updates to `Y` are +done. + + Y = Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast)); + +Note that an eval around the full right hand side expression is not needed +because the generated has to compute the i-th value of the right hand side +before assigning it to the left hand side. + +However, if you were assigning the expression value to a shuffle of `Y` +then you would need to force an eval for correctness by adding an `eval()` +call for the right hand side: + + Y.shuffle(...) = + (Y / (Y.sum(depth_dim).eval().reshape(dims2d).broadcast(bcast))).eval(); + + +#### Assigning to a TensorRef. + +If you need to access only a few elements from the value of an expression you +can avoid materializing the value in a full tensor by using a TensorRef. + +A TensorRef is a small wrapper class for any Eigen Operation. It provides +overloads for the `()` operator that let you access individual values in +the expression. TensorRef is convenient, because the Operation themselves do +not provide a way to access individual elements. + + // Create a TensorRef for the expression. The expression is not + // evaluated yet. + TensorRef > ref = ((t1 + t2) * 0.2f).exp(); + + // Use "ref" to access individual elements. The expression is evaluated + // on the fly. + float at_0 = ref(0, 0, 0); + cout << ref(0, 1, 0); + +Only use TensorRef when you need a subset of the values of the expression. +TensorRef only computes the values you access. However note that if you are +going to access all the values it will be much faster to materialize the +results in a Tensor first. + +In some cases, if the full Tensor result would be very large, you may save +memory by accessing it as a TensorRef. But not always. So don't count on it. + + +### Controlling How Expressions Are Evaluated + +The tensor library provides several implementations of the various operations +such as contractions and convolutions. The implementations are optimized for +different environments: single threaded on CPU, multi threaded on CPU, or on a +GPU using cuda. Additional implementations may be added later. + +You can choose which implementation to use with the `device()` call. If +you do not choose an implementation explicitly the default implementation that +uses a single thread on the CPU is used. + +The default implementation has been optimized for recent Intel CPUs, taking +advantage of SSE, AVX, and FMA instructions. Work is ongoing to tune the +library on ARM CPUs. Note that you need to pass compiler-dependent flags +to enable the use of SSE, AVX, and other instructions. + +For example, the following code adds two tensors using the default +single-threaded CPU implementation: + + Tensor a(30, 40); + Tensor b(30, 40); + Tensor c = a + b; + +To choose a different implementation you have to insert a `device()` call +before the assignment of the result. For technical C++ reasons this requires +that the Tensor for the result be declared on its own. This means that you +have to know the size of the result. + + Eigen::Tensor c(30, 40); + c.device(...) = a + b; + +The call to `device()` must be the last call on the left of the operator=. + +You must pass to the `device()` call an Eigen device object. There are +presently three devices you can use: DefaultDevice, ThreadPoolDevice and +GpuDevice. + + +#### Evaluating With the DefaultDevice + +This is exactly the same as not inserting a `device()` call. + + DefaultDevice my_device; + c.device(my_device) = a + b; + +#### Evaluating with a Thread Pool + + // Create the Eigen ThreadPool + Eigen::ThreadPool pool(8 /* number of threads in pool */) + + // Create the Eigen ThreadPoolDevice. + Eigen::ThreadPoolDevice my_device(&pool, 4 /* number of threads to use */); + + // Now just use the device when evaluating expressions. + Eigen::Tensor c(30, 50); + c.device(my_device) = a.contract(b, dot_product_dims); + + +#### Evaluating On GPU + +This is presently a bit more complicated than just using a thread pool device. +You need to create a GPU device but you also need to explicitly allocate the +memory for tensors with cuda. + + +## API Reference + +### Datatypes + +In the documentation of the tensor methods and Operation we mention datatypes +that are tensor-type specific: + +#### ::Dimensions + +Acts like an array of ints. Has an `int size` attribute, and can be +indexed like an array to access individual values. Used to represent the +dimensions of a tensor. See `dimensions()`. + +#### ::Index + +Acts like an `int`. Used for indexing tensors along their dimensions. See +`operator()`, `dimension()`, and `size()`. + +#### ::Scalar + +Represents the datatype of individual tensor elements. For example, for a +`Tensor`, `Scalar` is the type `float`. See +`setConstant()`. + +#### + +We use this pseudo type to indicate that a tensor Operation is returned by a +method. We indicate in the text the type and dimensions of the tensor that the +Operation returns after evaluation. + +The Operation will have to be evaluated, for example by assigning it to a +tensor, before you can access the values of the resulting tensor. You can also +access the values through a TensorRef. + + +## Built-in Tensor Methods + +These are usual C++ methods that act on tensors immediately. They are not +Operations which provide delayed evaluation of their results. Unless specified +otherwise, all the methods listed below are available on all tensor classes: +Tensor, TensorFixedSize, and TensorMap. + +## Metadata + +### int NumDimensions + +Constant value indicating the number of dimensions of a Tensor. This is also +known as the tensor "rank". + + Eigen::Tensor a(3, 4); + cout << "Dims " << a.NumDimensions; + => Dims 2 + +### Dimensions dimensions() + +Returns an array-like object representing the dimensions of the tensor. +The actual type of the `dimensions()` result is `::``Dimensions`. + + Eigen::Tensor a(3, 4); + const Eigen::Tensor::Dimensions& d = a.dimensions(); + cout << "Dim size: " << d.size << ", dim 0: " << d[0] + << ", dim 1: " << d[1]; + => Dim size: 2, dim 0: 3, dim 1: 4 + +If you use a C++11 compiler, you can use `auto` to simplify the code: + + const auto& d = a.dimensions(); + cout << "Dim size: " << d.size << ", dim 0: " << d[0] + << ", dim 1: " << d[1]; + => Dim size: 2, dim 0: 3, dim 1: 4 + +### Index dimension(Index n) + +Returns the n-th dimension of the tensor. The actual type of the +`dimension()` result is `::``Index`, but you can +always use it like an int. + + Eigen::Tensor a(3, 4); + int dim1 = a.dimension(1); + cout << "Dim 1: " << dim1; + => Dim 1: 4 + +### Index size() + +Returns the total number of elements in the tensor. This is the product of all +the tensor dimensions. The actual type of the `size()` result is +`::``Index`, but you can always use it like an int. + + Eigen::Tensor a(3, 4); + cout << "Size: " << a.size(); + => Size: 12 + + +### Getting Dimensions From An Operation + +A few operations provide `dimensions()` directly, +e.g. `TensorReslicingOp`. Most operations defer calculating dimensions +until the operation is being evaluated. If you need access to the dimensions +of a deferred operation, you can wrap it in a TensorRef (see Assigning to a +TensorRef above), which provides `dimensions()` and `dimension()` as +above. + +TensorRef can also wrap the plain Tensor types, so this is a useful idiom in +templated contexts where the underlying object could be either a raw Tensor +or some deferred operation (e.g. a slice of a Tensor). In this case, the +template code can wrap the object in a TensorRef and reason about its +dimensionality while remaining agnostic to the underlying type. + + +## Constructors + +### Tensor + +Creates a tensor of the specified size. The number of arguments must be equal +to the rank of the tensor. The content of the tensor is not initialized. + + Eigen::Tensor a(3, 4); + cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl; + => NumRows: 3 NumCols: 4 + +### TensorFixedSize + +Creates a tensor of the specified size. The number of arguments in the Sizes<> +template parameter determines the rank of the tensor. The content of the tensor +is not initialized. + + Eigen::TensorFixedSize> a; + cout << "Rank: " << a.rank() << endl; + => Rank: 2 + cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl; + => NumRows: 3 NumCols: 4 + +### TensorMap + +Creates a tensor mapping an existing array of data. The data must not be freed +until the TensorMap is discarded, and the size of the data must be large enough +to accommodate the coefficients of the tensor. + + float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; + Eigen::TensorMap> a(data, 3, 4); + cout << "NumRows: " << a.dimension(0) << " NumCols: " << a.dimension(1) << endl; + => NumRows: 3 NumCols: 4 + cout << "a(1, 2): " << a(1, 2) << endl; + => a(1, 2): 7 + + +## Contents Initialization + +When a new Tensor or a new TensorFixedSize are created, memory is allocated to +hold all the tensor elements, but the memory is not initialized. Similarly, +when a new TensorMap is created on top of non-initialized memory the memory its +contents are not initialized. + +You can use one of the methods below to initialize the tensor memory. These +have an immediate effect on the tensor and return the tensor itself as a +result. These are not tensor Operations which delay evaluation. + +### setConstant(const Scalar& val) + +Sets all elements of the tensor to the constant value `val`. `Scalar` +is the type of data stored in the tensor. You can pass any value that is +convertible to that type. + +Returns the tensor itself in case you want to chain another call. + + a.setConstant(12.3f); + cout << "Constant: " << endl << a << endl << endl; + => + Constant: + 12.3 12.3 12.3 12.3 + 12.3 12.3 12.3 12.3 + 12.3 12.3 12.3 12.3 + +Note that `setConstant()` can be used on any tensor where the element type +has a copy constructor and an `operator=()`: + + Eigen::Tensor a(2, 3); + a.setConstant("yolo"); + cout << "String tensor: " << endl << a << endl << endl; + => + String tensor: + yolo yolo yolo + yolo yolo yolo + + +### setZero() + +Fills the tensor with zeros. Equivalent to `setConstant(Scalar(0))`. +Returns the tensor itself in case you want to chain another call. + + a.setZero(); + cout << "Zeros: " << endl << a << endl << endl; + => + Zeros: + 0 0 0 0 + 0 0 0 0 + 0 0 0 0 + + +### setValues({..initializer_list}) + +Fills the tensor with explicit values specified in a std::initializer_list. +The type of the initializer list depends on the type and rank of the tensor. + +If the tensor has rank N, the initializer list must be nested N times. The +most deeply nested lists must contains P scalars of the Tensor type where P is +the size of the last dimension of the Tensor. + +For example, for a `TensorFixedSize` the initializer list must +contains 2 lists of 3 floats each. + +`setValues()` returns the tensor itself in case you want to chain another +call. + + Eigen::Tensor a(2, 3); + a.setValues({{0.0f, 1.0f, 2.0f}, {3.0f, 4.0f, 5.0f}}); + cout << "a" << endl << a << endl << endl; + => + a + 0 1 2 + 3 4 5 + +If a list is too short, the corresponding elements of the tensor will not be +changed. This is valid at each level of nesting. For example the following +code only sets the values of the first row of the tensor. + + Eigen::Tensor a(2, 3); + a.setConstant(1000); + a.setValues({{10, 20, 30}}); + cout << "a" << endl << a << endl << endl; + => + a + 10 20 30 + 1000 1000 1000 + +### setRandom() + +Fills the tensor with random values. Returns the tensor itself in case you +want to chain another call. + + a.setRandom(); + cout << "Random: " << endl << a << endl << endl; + => + Random: + 0.680375 0.59688 -0.329554 0.10794 + -0.211234 0.823295 0.536459 -0.0452059 + 0.566198 -0.604897 -0.444451 0.257742 + +You can customize `setRandom()` by providing your own random number +generator as a template argument: + + a.setRandom(); + +Here, `MyRandomGenerator` must be a struct with the following member +functions, where Scalar and Index are the same as `::``Scalar` +and `::``Index`. + +See `struct UniformRandomGenerator` in TensorFunctors.h for an example. + + // Custom number generator for use with setRandom(). + struct MyRandomGenerator { + // Default and copy constructors. Both are needed + MyRandomGenerator() { } + MyRandomGenerator(const MyRandomGenerator& ) { } + + // Return a random value to be used. "element_location" is the + // location of the entry to set in the tensor, it can typically + // be ignored. + Scalar operator()(Eigen::DenseIndex element_location, + Eigen::DenseIndex /*unused*/ = 0) const { + return ; + } + + // Same as above but generates several numbers at a time. + typename internal::packet_traits::type packetOp( + Eigen::DenseIndex packet_location, Eigen::DenseIndex /*unused*/ = 0) const { + return ; + } + }; + +You can also use one of the 2 random number generators that are part of the +tensor library: +* UniformRandomGenerator +* NormalRandomGenerator + + +## Data Access + +The Tensor, TensorFixedSize, and TensorRef classes provide the following +accessors to access the tensor coefficients: + + const Scalar& operator()(const array& indices) + const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) + Scalar& operator()(const array& indices) + Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) + +The number of indices must be equal to the rank of the tensor. Moreover, these +accessors are not available on tensor expressions. In order to access the +values of a tensor expression, the expression must either be evaluated or +wrapped in a TensorRef. + + +### Scalar* data() and const Scalar* data() const + +Returns a pointer to the storage for the tensor. The pointer is const if the +tensor was const. This allows direct access to the data. The layout of the +data depends on the tensor layout: RowMajor or ColMajor. + +This access is usually only needed for special cases, for example when mixing +Eigen Tensor code with other libraries. + +Scalar is the type of data stored in the tensor. + + Eigen::Tensor a(3, 4); + float* a_data = a.data(); + a_data[0] = 123.45f; + cout << "a(0, 0): " << a(0, 0); + => a(0, 0): 123.45 + + +## Tensor Operations + +All the methods documented below return non evaluated tensor `Operations`. +These can be chained: you can apply another Tensor Operation to the value +returned by the method. + +The chain of Operation is evaluated lazily, typically when it is assigned to a +tensor. See "Controlling when Expression are Evaluated" for more details about +their evaluation. + +### constant(const Scalar& val) + +Returns a tensor of the same type and dimensions as the original tensor but +where all elements have the value `val`. + +This is useful, for example, when you want to add or subtract a constant from a +tensor, or multiply every element of a tensor by a scalar. + + Eigen::Tensor a(2, 3); + a.setConstant(1.0f); + Eigen::Tensor b = a + a.constant(2.0f); + Eigen::Tensor c = b * b.constant(0.2f); + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + cout << "c" << endl << c << endl << endl; + => + a + 1 1 1 + 1 1 1 + + b + 3 3 3 + 3 3 3 + + c + 0.6 0.6 0.6 + 0.6 0.6 0.6 + +### random() + +Returns a tensor of the same type and dimensions as the current tensor +but where all elements have random values. + +This is for example useful to add random values to an existing tensor. +The generation of random values can be customized in the same manner +as for `setRandom()`. + + Eigen::Tensor a(2, 3); + a.setConstant(1.0f); + Eigen::Tensor b = a + a.random(); + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + => + a + 1 1 1 + 1 1 1 + + b + 1.68038 1.5662 1.82329 + 0.788766 1.59688 0.395103 + + +## Unary Element Wise Operations + +All these operations take a single input tensor as argument and return a tensor +of the same type and dimensions as the tensor to which they are applied. The +requested operations are applied to each element independently. + +### operator-() + +Returns a tensor of the same type and dimensions as the original tensor +containing the opposite values of the original tensor. + + Eigen::Tensor a(2, 3); + a.setConstant(1.0f); + Eigen::Tensor b = -a; + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + => + a + 1 1 1 + 1 1 1 + + b + -1 -1 -1 + -1 -1 -1 + +### sqrt() + +Returns a tensor of the same type and dimensions as the original tensor +containing the square roots of the original tensor. + +### rsqrt() + +Returns a tensor of the same type and dimensions as the original tensor +containing the inverse square roots of the original tensor. + +### square() + +Returns a tensor of the same type and dimensions as the original tensor +containing the squares of the original tensor values. + +### inverse() + +Returns a tensor of the same type and dimensions as the original tensor +containing the inverse of the original tensor values. + +### exp() + +Returns a tensor of the same type and dimensions as the original tensor +containing the exponential of the original tensor. + +### log() + +Returns a tensor of the same type and dimensions as the original tensor +containing the natural logarithms of the original tensor. + +### abs() + +Returns a tensor of the same type and dimensions as the original tensor +containing the absolute values of the original tensor. + +### pow(Scalar exponent) + +Returns a tensor of the same type and dimensions as the original tensor +containing the coefficients of the original tensor to the power of the +exponent. + +The type of the exponent, Scalar, is always the same as the type of the +tensor coefficients. For example, only integer exponents can be used in +conjuntion with tensors of integer values. + +You can use cast() to lift this restriction. For example this computes +cubic roots of an int Tensor: + + Eigen::Tensor a(2, 3); + a.setValues({{0, 1, 8}, {27, 64, 125}}); + Eigen::Tensor b = a.cast().pow(1.0 / 3.0); + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + => + a + 0 1 8 + 27 64 125 + + b + 0 1 2 + 3 4 5 + +### operator * (Scalar scale) + +Multiplies all the coefficients of the input tensor by the provided scale. + +### cwiseMax(Scalar threshold) +TODO + +### cwiseMin(Scalar threshold) +TODO + +### unaryExpr(const CustomUnaryOp& func) +TODO + + +## Binary Element Wise Operations + +These operations take two input tensors as arguments. The 2 input tensors should +be of the same type and dimensions. The result is a tensor of the same +dimensions as the tensors to which they are applied, and unless otherwise +specified it is also of the same type. The requested operations are applied to +each pair of elements independently. + +### operator+(const OtherDerived& other) + +Returns a tensor of the same type and dimensions as the input tensors +containing the coefficient wise sums of the inputs. + +### operator-(const OtherDerived& other) + +Returns a tensor of the same type and dimensions as the input tensors +containing the coefficient wise differences of the inputs. + +### operator*(const OtherDerived& other) + +Returns a tensor of the same type and dimensions as the input tensors +containing the coefficient wise products of the inputs. + +### operator/(const OtherDerived& other) + +Returns a tensor of the same type and dimensions as the input tensors +containing the coefficient wise quotients of the inputs. + +This operator is not supported for integer types. + +### cwiseMax(const OtherDerived& other) + +Returns a tensor of the same type and dimensions as the input tensors +containing the coefficient wise maximums of the inputs. + +### cwiseMin(const OtherDerived& other) + +Returns a tensor of the same type and dimensions as the input tensors +containing the coefficient wise mimimums of the inputs. + +### Logical operators + +The following logical operators are supported as well: + +* operator&&(const OtherDerived& other) +* operator||(const OtherDerived& other) +* operator<(const OtherDerived& other) +* operator<=(const OtherDerived& other) +* operator>(const OtherDerived& other) +* operator>=(const OtherDerived& other) +* operator==(const OtherDerived& other) +* operator!=(const OtherDerived& other) + +They all return a tensor of boolean values. + + +## Selection (select(const ThenDerived& thenTensor, const ElseDerived& elseTensor) + +Selection is a coefficient-wise ternary operator that is the tensor equivalent +to the if-then-else operation. + + Tensor if = ...; + Tensor then = ...; + Tensor else = ...; + Tensor result = if.select(then, else); + +The 3 arguments must be of the same dimensions, which will also be the dimension +of the result. The 'if' tensor must be of type boolean, the 'then' and the +'else' tensor must be of the same type, which will also be the type of the +result. + +Each coefficient in the result is equal to the corresponding coefficient in the +'then' tensor if the corresponding value in the 'if' tensor is true. If not, the +resulting coefficient will come from the 'else' tensor. + + +## Contraction + +Tensor *contractions* are a generalization of the matrix product to the +multidimensional case. + + // Create 2 matrices using tensors of rank 2 + Eigen::Tensor a(2, 3); + a.setValues({{1, 2, 3}, {6, 5, 4}}); + Eigen::Tensor b(3, 2); + b.setValues({{1, 2}, {4, 5}, {5, 6}}); + + // Compute the traditional matrix product + Eigen::array, 1> product_dims = { Eigen::IndexPair(1, 0) }; + Eigen::Tensor AB = a.contract(b, product_dims); + + // Compute the product of the transpose of the matrices + Eigen::array, 1> transposed_product_dims = { Eigen::IndexPair(0, 1) }; + Eigen::Tensor AtBt = a.contract(b, transposed_product_dims); + + // Contraction to scalar value using a double contraction. + // First coordinate of both tensors are contracted as well as both second coordinates, i.e., this computes the sum of the squares of the elements. + Eigen::array, 2> double_contraction_product_dims = { Eigen::IndexPair(0, 0), Eigen::IndexPair(1, 1) }; + Eigen::Tensor AdoubleContractedA = a.contract(a, double_contraction_product_dims); + + // Extracting the scalar value of the tensor contraction for further usage + int value = AdoubleContractedA(0); + +## Reduction Operations + +A *Reduction* operation returns a tensor with fewer dimensions than the +original tensor. The values in the returned tensor are computed by applying a +*reduction operator* to slices of values from the original tensor. You specify +the dimensions along which the slices are made. + +The Eigen Tensor library provides a set of predefined reduction operators such +as `maximum()` and `sum()` and lets you define additional operators by +implementing a few methods from a reductor template. + +### Reduction Dimensions + +All reduction operations take a single parameter of type +`::``Dimensions` which can always be specified as an array of +ints. These are called the "reduction dimensions." The values are the indices +of the dimensions of the input tensor over which the reduction is done. The +parameter can have at most as many element as the rank of the input tensor; +each element must be less than the tensor rank, as it indicates one of the +dimensions to reduce. + +Each dimension of the input tensor should occur at most once in the reduction +dimensions as the implementation does not remove duplicates. + +The order of the values in the reduction dimensions does not affect the +results, but the code may execute faster if you list the dimensions in +increasing order. + +Example: Reduction along one dimension. + + // Create a tensor of 2 dimensions + Eigen::Tensor a(2, 3); + a.setValues({{1, 2, 3}, {6, 5, 4}}); + // Reduce it along the second dimension (1)... + Eigen::array dims({1 /* dimension to reduce */}); + // ...using the "maximum" operator. + // The result is a tensor with one dimension. The size of + // that dimension is the same as the first (non-reduced) dimension of a. + Eigen::Tensor b = a.maximum(dims); + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + => + a + 1 2 3 + 6 5 4 + + b + 3 + 6 + +Example: Reduction along two dimensions. + + Eigen::Tensor a(2, 3, 4); + a.setValues({{{0.0f, 1.0f, 2.0f, 3.0f}, + {7.0f, 6.0f, 5.0f, 4.0f}, + {8.0f, 9.0f, 10.0f, 11.0f}}, + {{12.0f, 13.0f, 14.0f, 15.0f}, + {19.0f, 18.0f, 17.0f, 16.0f}, + {20.0f, 21.0f, 22.0f, 23.0f}}}); + // The tensor a has 3 dimensions. We reduce along the + // first 2, resulting in a tensor with a single dimension + // of size 4 (the last dimension of a.) + // Note that we pass the array of reduction dimensions + // directly to the maximum() call. + Eigen::Tensor b = + a.maximum(Eigen::array({0, 1})); + cout << "b" << endl << b << endl << endl; + => + b + 20 + 21 + 22 + 23 + +#### Reduction along all dimensions + +As a special case, if you pass no parameter to a reduction operation the +original tensor is reduced along *all* its dimensions. The result is a +scalar, represented as a zero-dimension tensor. + + Eigen::Tensor a(2, 3, 4); + a.setValues({{{0.0f, 1.0f, 2.0f, 3.0f}, + {7.0f, 6.0f, 5.0f, 4.0f}, + {8.0f, 9.0f, 10.0f, 11.0f}}, + {{12.0f, 13.0f, 14.0f, 15.0f}, + {19.0f, 18.0f, 17.0f, 16.0f}, + {20.0f, 21.0f, 22.0f, 23.0f}}}); + // Reduce along all dimensions using the sum() operator. + Eigen::Tensor b = a.sum(); + cout << "b" << endl << b << endl << endl; + => + b + 276 + + +### sum(const Dimensions& new_dims) +### sum() + +Reduce a tensor using the sum() operator. The resulting values +are the sum of the reduced values. + +### mean(const Dimensions& new_dims) +### mean() + +Reduce a tensor using the mean() operator. The resulting values +are the mean of the reduced values. + +### maximum(const Dimensions& new_dims) +### maximum() + +Reduce a tensor using the maximum() operator. The resulting values are the +largest of the reduced values. + +### minimum(const Dimensions& new_dims) +### minimum() + +Reduce a tensor using the minimum() operator. The resulting values +are the smallest of the reduced values. + +### prod(const Dimensions& new_dims) +### prod() + +Reduce a tensor using the prod() operator. The resulting values +are the product of the reduced values. + +### all(const Dimensions& new_dims) +### all() +Reduce a tensor using the all() operator. Casts tensor to bool and then checks +whether all elements are true. Runs through all elements rather than +short-circuiting, so may be significantly inefficient. + +### any(const Dimensions& new_dims) +### any() +Reduce a tensor using the any() operator. Casts tensor to bool and then checks +whether any element is true. Runs through all elements rather than +short-circuiting, so may be significantly inefficient. + + +### reduce(const Dimensions& new_dims, const Reducer& reducer) + +Reduce a tensor using a user-defined reduction operator. See `SumReducer` +in TensorFunctors.h for information on how to implement a reduction operator. + + +## Trace + +A *Trace* operation returns a tensor with fewer dimensions than the original +tensor. It returns a tensor whose elements are the sum of the elements of the +original tensor along the main diagonal for a list of specified dimensions, the +"trace dimensions". Similar to the `Reduction Dimensions`, the trace dimensions +are passed as an input parameter to the operation, are of type `::``Dimensions` +, and have the same requirements when passed as an input parameter. In addition, +the trace dimensions must have the same size. + +Example: Trace along 2 dimensions. + + // Create a tensor of 3 dimensions + Eigen::Tensor a(2, 2, 3); + a.setValues({{{1, 2, 3}, {4, 5, 6}}, {{7, 8, 9}, {10, 11, 12}}}); + // Specify the dimensions along which the trace will be computed. + // In this example, the trace can only be computed along the dimensions + // with indices 0 and 1 + Eigen::array dims({0, 1}); + // The output tensor contains all but the trace dimensions. + Tensor a_trace = a.trace(dims); + cout << "a_trace:" << endl; + cout << a_trace << endl; + => + a_trace: + 11 + 13 + 15 + + +### trace(const Dimensions& new_dims) +### trace() + +As a special case, if no parameter is passed to the operation, trace is computed +along *all* dimensions of the input tensor. + +Example: Trace along all dimensions. + + // Create a tensor of 3 dimensions, with all dimensions having the same size. + Eigen::Tensor a(3, 3, 3); + a.setValues({{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}, + {{10, 11, 12}, {13, 14, 15}, {16, 17, 18}}, + {{19, 20, 21}, {22, 23, 24}, {25, 26, 27}}}); + // Result is a zero dimension tensor + Tensor a_trace = a.trace(); + cout<<"a_trace:"< + a_trace: + 42 + + +## Scan Operations + +A *Scan* operation returns a tensor with the same dimensions as the original +tensor. The operation performs an inclusive scan along the specified +axis, which means it computes a running total along the axis for a given +reduction operation. +If the reduction operation corresponds to summation, then this computes the +prefix sum of the tensor along the given axis. + +Example: +dd a comment to this line + + // Create a tensor of 2 dimensions + Eigen::Tensor a(2, 3); + a.setValues({{1, 2, 3}, {4, 5, 6}}); + // Scan it along the second dimension (1) using summation + Eigen::Tensor b = a.cumsum(1); + // The result is a tensor with the same size as the input + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + => + a + 1 2 3 + 4 5 6 + + b + 1 3 6 + 4 9 15 + +### cumsum(const Index& axis) + +Perform a scan by summing consecutive entries. + +### cumprod(const Index& axis) + +Perform a scan by multiplying consecutive entries. + + +## Convolutions + +### convolve(const Kernel& kernel, const Dimensions& dims) + +Returns a tensor that is the output of the convolution of the input tensor with the kernel, +along the specified dimensions of the input tensor. The dimension size for dimensions of the output tensor +which were part of the convolution will be reduced by the formula: +output_dim_size = input_dim_size - kernel_dim_size + 1 (requires: input_dim_size >= kernel_dim_size). +The dimension sizes for dimensions that were not part of the convolution will remain the same. +Performance of the convolution can depend on the length of the stride(s) of the input tensor dimension(s) along which the +convolution is computed (the first dimension has the shortest stride for ColMajor, whereas RowMajor's shortest stride is +for the last dimension). + + // Compute convolution along the second and third dimension. + Tensor input(3, 3, 7, 11); + Tensor kernel(2, 2); + Tensor output(3, 2, 6, 11); + input.setRandom(); + kernel.setRandom(); + + Eigen::array dims({1, 2}); // Specify second and third dimension for convolution. + output = input.convolve(kernel, dims); + + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 2; ++j) { + for (int k = 0; k < 6; ++k) { + for (int l = 0; l < 11; ++l) { + const float result = output(i,j,k,l); + const float expected = input(i,j+0,k+0,l) * kernel(0,0) + + input(i,j+1,k+0,l) * kernel(1,0) + + input(i,j+0,k+1,l) * kernel(0,1) + + input(i,j+1,k+1,l) * kernel(1,1); + VERIFY_IS_APPROX(result, expected); + } + } + } + } + + +## Geometrical Operations + +These operations return a Tensor with different dimensions than the original +Tensor. They can be used to access slices of tensors, see them with different +dimensions, or pad tensors with additional data. + +### reshape(const Dimensions& new_dims) + +Returns a view of the input tensor that has been reshaped to the specified +new dimensions. The argument new_dims is an array of Index values. The +rank of the resulting tensor is equal to the number of elements in new_dims. + +The product of all the sizes in the new dimension array must be equal to +the number of elements in the input tensor. + + // Increase the rank of the input tensor by introducing a new dimension + // of size 1. + Tensor input(7, 11); + array three_dims{{7, 11, 1}}; + Tensor result = input.reshape(three_dims); + + // Decrease the rank of the input tensor by merging 2 dimensions; + array one_dim{{7 * 11}}; + Tensor result = input.reshape(one_dim); + +This operation does not move any data in the input tensor, so the resulting +contents of a reshaped Tensor depend on the data layout of the original Tensor. + +For example this is what happens when you `reshape()` a 2D ColMajor tensor +to one dimension: + + Eigen::Tensor a(2, 3); + a.setValues({{0.0f, 100.0f, 200.0f}, {300.0f, 400.0f, 500.0f}}); + Eigen::array one_dim({3 * 2}); + Eigen::Tensor b = a.reshape(one_dim); + cout << "b" << endl << b << endl; + => + b + 0 + 300 + 100 + 400 + 200 + 500 + +This is what happens when the 2D Tensor is RowMajor: + + Eigen::Tensor a(2, 3); + a.setValues({{0.0f, 100.0f, 200.0f}, {300.0f, 400.0f, 500.0f}}); + Eigen::array one_dim({3 * 2}); + Eigen::Tensor b = a.reshape(one_dim); + cout << "b" << endl << b << endl; + => + b + 0 + 100 + 200 + 300 + 400 + 500 + +The reshape operation is a lvalue. In other words, it can be used on the left +side of the assignment operator. + +The previous example can be rewritten as follow: + + Eigen::Tensor a(2, 3); + a.setValues({{0.0f, 100.0f, 200.0f}, {300.0f, 400.0f, 500.0f}}); + Eigen::array two_dim({2, 3}); + Eigen::Tensor b(6); + b.reshape(two_dim) = a; + cout << "b" << endl << b << endl; + => + b + 0 + 300 + 100 + 400 + 200 + 500 + +Note that "b" itself was not reshaped but that instead the assignment is done to +the reshape view of b. + + +### shuffle(const Shuffle& shuffle) + +Returns a copy of the input tensor whose dimensions have been +reordered according to the specified permutation. The argument shuffle +is an array of Index values. Its size is the rank of the input +tensor. It must contain a permutation of 0, 1, ..., rank - 1. The i-th +dimension of the output tensor equals to the size of the shuffle[i]-th +dimension of the input tensor. For example: + + // Shuffle all dimensions to the left by 1. + Tensor input(20, 30, 50); + // ... set some values in input. + Tensor output = input.shuffle({1, 2, 0}) + + eigen_assert(output.dimension(0) == 30); + eigen_assert(output.dimension(1) == 50); + eigen_assert(output.dimension(2) == 20); + +Indices into the output tensor are shuffled accordingly to formulate +indices into the input tensor. For example, one can assert in the above +code snippet that: + + eigen_assert(output(3, 7, 11) == input(11, 3, 7)); + +In general, one can assert that + + eigen_assert(output(..., indices[shuffle[i]], ...) == + input(..., indices[i], ...)) + +The shuffle operation results in a lvalue, which means that it can be assigned +to. In other words, it can be used on the left side of the assignment operator. + +Let's rewrite the previous example to take advantage of this feature: + + // Shuffle all dimensions to the left by 1. + Tensor input(20, 30, 50); + // ... set some values in input. + Tensor output(30, 50, 20); + output.shuffle({2, 0, 1}) = input; + + +### stride(const Strides& strides) + +Returns a view of the input tensor that strides (skips stride-1 +elements) along each of the dimensions. The argument strides is an +array of Index values. The dimensions of the resulting tensor are +ceil(input_dimensions[i] / strides[i]). + +For example this is what happens when you `stride()` a 2D tensor: + + Eigen::Tensor a(4, 3); + a.setValues({{0, 100, 200}, {300, 400, 500}, {600, 700, 800}, {900, 1000, 1100}}); + Eigen::array strides({3, 2}); + Eigen::Tensor b = a.stride(strides); + cout << "b" << endl << b << endl; + => + b + 0 200 + 900 1100 + +It is possible to assign a tensor to a stride: + Tensor input(20, 30, 50); + // ... set some values in input. + Tensor output(40, 90, 200); + output.stride({2, 3, 4}) = input; + + +### slice(const StartIndices& offsets, const Sizes& extents) + +Returns a sub-tensor of the given tensor. For each dimension i, the slice is +made of the coefficients stored between offset[i] and offset[i] + extents[i] in +the input tensor. + + Eigen::Tensor a(4, 3); + a.setValues({{0, 100, 200}, {300, 400, 500}, + {600, 700, 800}, {900, 1000, 1100}}); + Eigen::array offsets = {1, 0}; + Eigen::array extents = {2, 2}; + Eigen::Tensor slice = a.slice(offsets, extents); + cout << "a" << endl << a << endl; + => + a + 0 100 200 + 300 400 500 + 600 700 800 + 900 1000 1100 + cout << "slice" << endl << slice << endl; + => + slice + 300 400 + 600 700 + + +### chip(const Index offset, const Index dim) + +A chip is a special kind of slice. It is the subtensor at the given offset in +the dimension dim. The returned tensor has one fewer dimension than the input +tensor: the dimension dim is removed. + +For example, a matrix chip would be either a row or a column of the input +matrix. + + Eigen::Tensor a(4, 3); + a.setValues({{0, 100, 200}, {300, 400, 500}, + {600, 700, 800}, {900, 1000, 1100}}); + Eigen::Tensor row_3 = a.chip(2, 0); + Eigen::Tensor col_2 = a.chip(1, 1); + cout << "a" << endl << a << endl; + => + a + 0 100 200 + 300 400 500 + 600 700 800 + 900 1000 1100 + cout << "row_3" << endl << row_3 << endl; + => + row_3 + 600 700 800 + cout << "col_2" << endl << col_2 << endl; + => + col_2 + 100 400 700 1000 + +It is possible to assign values to a tensor chip since the chip operation is a +lvalue. For example: + + Eigen::Tensor a(3); + a.setValues({{100, 200, 300}}); + Eigen::Tensor b(2, 3); + b.setZero(); + b.chip(0, 0) = a; + cout << "a" << endl << a << endl; + => + a + 100 + 200 + 300 + cout << "b" << endl << b << endl; + => + b + 100 200 300 + 0 0 0 + + +### reverse(const ReverseDimensions& reverse) + +Returns a view of the input tensor that reverses the order of the coefficients +along a subset of the dimensions. The argument reverse is an array of boolean +values that indicates whether or not the order of the coefficients should be +reversed along each of the dimensions. This operation preserves the dimensions +of the input tensor. + +For example this is what happens when you `reverse()` the first dimension +of a 2D tensor: + + Eigen::Tensor a(4, 3); + a.setValues({{0, 100, 200}, {300, 400, 500}, + {600, 700, 800}, {900, 1000, 1100}}); + Eigen::array reverse({true, false}); + Eigen::Tensor b = a.reverse(reverse); + cout << "a" << endl << a << endl << "b" << endl << b << endl; + => + a + 0 100 200 + 300 400 500 + 600 700 800 + 900 1000 1100 + b + 900 1000 1100 + 600 700 800 + 300 400 500 + 0 100 200 + + +### broadcast(const Broadcast& broadcast) + +Returns a view of the input tensor in which the input is replicated one to many +times. +The broadcast argument specifies how many copies of the input tensor need to be +made in each of the dimensions. + + Eigen::Tensor a(2, 3); + a.setValues({{0, 100, 200}, {300, 400, 500}}); + Eigen::array bcast({3, 2}); + Eigen::Tensor b = a.broadcast(bcast); + cout << "a" << endl << a << endl << "b" << endl << b << endl; + => + a + 0 100 200 + 300 400 500 + b + 0 100 200 0 100 200 + 300 400 500 300 400 500 + 0 100 200 0 100 200 + 300 400 500 300 400 500 + 0 100 200 0 100 200 + 300 400 500 300 400 500 + +### concatenate(const OtherDerived& other, Axis axis) + +TODO + +### pad(const PaddingDimensions& padding) + +Returns a view of the input tensor in which the input is padded with zeros. + + Eigen::Tensor a(2, 3); + a.setValues({{0, 100, 200}, {300, 400, 500}}); + Eigen::array, 2> paddings; + paddings[0] = make_pair(0, 1); + paddings[1] = make_pair(2, 3); + Eigen::Tensor b = a.pad(paddings); + cout << "a" << endl << a << endl << "b" << endl << b << endl; + => + a + 0 100 200 + 300 400 500 + b + 0 0 0 0 + 0 0 0 0 + 0 100 200 0 + 300 400 500 0 + 0 0 0 0 + 0 0 0 0 + 0 0 0 0 + + +### extract_patches(const PatchDims& patch_dims) + +Returns a tensor of coefficient patches extracted from the input tensor, where +each patch is of dimension specified by 'patch_dims'. The returned tensor has +one greater dimension than the input tensor, which is used to index each patch. +The patch index in the output tensor depends on the data layout of the input +tensor: the patch index is the last dimension ColMajor layout, and the first +dimension in RowMajor layout. + +For example, given the following input tensor: + + Eigen::Tensor tensor(3,4); + tensor.setValues({{0.0f, 1.0f, 2.0f, 3.0f}, + {4.0f, 5.0f, 6.0f, 7.0f}, + {8.0f, 9.0f, 10.0f, 11.0f}}); + + cout << "tensor: " << endl << tensor << endl; + => + tensor: + 0 1 2 3 + 4 5 6 7 + 8 9 10 11 + +Six 2x2 patches can be extracted and indexed using the following code: + + Eigen::Tensor patch; + Eigen::array patch_dims; + patch_dims[0] = 2; + patch_dims[1] = 2; + patch = tensor.extract_patches(patch_dims); + for (int k = 0; k < 6; ++k) { + cout << "patch index: " << k << endl; + for (int i = 0; i < 2; ++i) { + for (int j = 0; j < 2; ++j) { + if (DataLayout == ColMajor) { + cout << patch(i, j, k) << " "; + } else { + cout << patch(k, i, j) << " "; + } + } + cout << endl; + } + } + +This code results in the following output when the data layout is ColMajor: + + patch index: 0 + 0 1 + 4 5 + patch index: 1 + 4 5 + 8 9 + patch index: 2 + 1 2 + 5 6 + patch index: 3 + 5 6 + 9 10 + patch index: 4 + 2 3 + 6 7 + patch index: 5 + 6 7 + 10 11 + +This code results in the following output when the data layout is RowMajor: +(NOTE: the set of patches is the same as in ColMajor, but are indexed differently). + + patch index: 0 + 0 1 + 4 5 + patch index: 1 + 1 2 + 5 6 + patch index: 2 + 2 3 + 6 7 + patch index: 3 + 4 5 + 8 9 + patch index: 4 + 5 6 + 9 10 + patch index: 5 + 6 7 + 10 11 + +### extract_image_patches(const Index patch_rows, const Index patch_cols, const Index row_stride, const Index col_stride, const PaddingType padding_type) + +Returns a tensor of coefficient image patches extracted from the input tensor, +which is expected to have dimensions ordered as follows (depending on the data +layout of the input tensor, and the number of additional dimensions 'N'): + +*) ColMajor +1st dimension: channels (of size d) +2nd dimension: rows (of size r) +3rd dimension: columns (of size c) +4th-Nth dimension: time (for video) or batch (for bulk processing). + +*) RowMajor (reverse order of ColMajor) +1st-Nth dimension: time (for video) or batch (for bulk processing). +N+1'th dimension: columns (of size c) +N+2'th dimension: rows (of size r) +N+3'th dimension: channels (of size d) + +The returned tensor has one greater dimension than the input tensor, which is +used to index each patch. The patch index in the output tensor depends on the +data layout of the input tensor: the patch index is the 4'th dimension in +ColMajor layout, and the 4'th from the last dimension in RowMajor layout. + +For example, given the following input tensor with the following dimension +sizes: + *) depth: 2 + *) rows: 3 + *) columns: 5 + *) batch: 7 + + Tensor tensor(2,3,5,7); + Tensor tensor_row_major = tensor.swap_layout(); + +2x2 image patches can be extracted and indexed using the following code: + +*) 2D patch: ColMajor (patch indexed by second-to-last dimension) + + Tensor twod_patch; + twod_patch = tensor.extract_image_patches<2, 2>(); + // twod_patch.dimension(0) == 2 + // twod_patch.dimension(1) == 2 + // twod_patch.dimension(2) == 2 + // twod_patch.dimension(3) == 3*5 + // twod_patch.dimension(4) == 7 + +*) 2D patch: RowMajor (patch indexed by the second dimension) + + Tensor twod_patch_row_major; + twod_patch_row_major = tensor_row_major.extract_image_patches<2, 2>(); + // twod_patch_row_major.dimension(0) == 7 + // twod_patch_row_major.dimension(1) == 3*5 + // twod_patch_row_major.dimension(2) == 2 + // twod_patch_row_major.dimension(3) == 2 + // twod_patch_row_major.dimension(4) == 2 + +## Special Operations + +### cast() + +Returns a tensor of type T with the same dimensions as the original tensor. +The returned tensor contains the values of the original tensor converted to +type T. + + Eigen::Tensor a(2, 3); + Eigen::Tensor b = a.cast(); + +This can be useful for example if you need to do element-wise division of +Tensors of integers. This is not currently supported by the Tensor library +but you can easily cast the tensors to floats to do the division: + + Eigen::Tensor a(2, 3); + a.setValues({{0, 1, 2}, {3, 4, 5}}); + Eigen::Tensor b = + (a.cast() / a.constant(2).cast()).cast(); + cout << "a" << endl << a << endl << endl; + cout << "b" << endl << b << endl << endl; + => + a + 0 1 2 + 3 4 5 + + b + 0 0 1 + 1 2 2 + + +### eval() + +TODO + + +## Representation of scalar values + +Scalar values are often represented by tensors of size 1 and rank 0.For example +Tensor::maximum() currently returns a Tensor. Similarly, the inner +product of 2 1d tensors (through contractions) returns a 0d tensor. + +## Limitations + +* The number of tensor dimensions is currently limited to 250 when using a + compiler that supports cxx11. It is limited to only 5 for older compilers. +* The IndexList class requires a cxx11 compliant compiler. You can use an + array of indices instead if you don't have access to a modern compiler. +* On GPUs only floating point values are properly tested and optimized for. +* Complex and integer values are known to be broken on GPUs. If you try to use + them you'll most likely end up triggering a static assertion failure such as + EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE) + + diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/Tensor.h b/external/unsupported/Eigen/CXX11/src/Tensor/Tensor.h new file mode 100644 index 0000000..8cac2bb --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/Tensor.h @@ -0,0 +1,554 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_H + +namespace Eigen { + +/** \class Tensor + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor class. + * + * The %Tensor class is the work-horse for all \em dense tensors within Eigen. + * + * The %Tensor class encompasses only dynamic-size objects so far. + * + * The first two template parameters are required: + * \tparam Scalar_ Numeric type, e.g. float, double, int or `std::complex`. + * User defined scalar types are supported as well (see \ref user_defined_scalars "here"). + * \tparam NumIndices_ Number of indices (i.e. rank of the tensor) + * + * The remaining template parameters are optional -- in most cases you don't have to worry about them. + * \tparam Options_ A combination of either \b #RowMajor or \b #ColMajor, and of either + * \b #AutoAlign or \b #DontAlign. + * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required + * for vectorization. It defaults to aligning tensors. Note that tensors currently do not support any operations that profit from vectorization. + * Support for such operations (i.e. adding two tensors etc.) is planned. + * + * You can access elements of tensors using normal subscripting: + * + * \code + * Eigen::Tensor t(10, 10, 10, 10); + * t(0, 1, 2, 3) = 42.0; + * \endcode + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_TENSOR_PLUGIN. + * + * Some notes: + * + *
+ *
Relation to other parts of Eigen:
+ *
The midterm development goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that + * taking blocks or using tensors in expressions is easily possible, including an interface with the vector/matrix code + * by providing .asMatrix() and .asVector() (or similar) methods for rank 2 and 1 tensors. However, currently, the %Tensor + * class does not provide any of these features and is only available as a stand-alone class that just allows for + * coefficient access. Also, when fixed-size tensors are implemented, the number of template arguments is likely to + * change dramatically.
+ *
+ * + * \ref TopicStorageOrders + */ + +template +class Tensor : public TensorBase > +{ + public: + typedef Tensor Self; + typedef TensorBase > Base; + typedef typename Eigen::internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef Scalar_ Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename Base::CoeffReturnType CoeffReturnType; + + enum { + IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0) & !(Options_&DontAlign), + Layout = Options_ & RowMajor ? RowMajor : ColMajor, + CoordAccess = true, + RawAccess = true + }; + + static const int Options = Options_; + static const int NumIndices = NumIndices_; + typedef DSizes Dimensions; + + protected: + TensorStorage m_storage; + +#ifdef EIGEN_HAS_SFINAE + template + struct isOfNormalIndex{ + static const bool is_array = internal::is_base_of, CustomIndices>::value; + static const bool is_int = NumTraits::IsInteger; + static const bool value = is_array | is_int; + }; +#endif + + public: + // Metadata + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } + + // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + // work, because that uses base().coeffRef() - and we don't yet + // implement a similar class hierarchy + inline Self& base() { return *this; } + inline const Self& base() const { return *this; } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeff(array{{firstIndex, secondIndex, otherIndices...}}); + } +#endif + + // normal indices + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array& indices) const + { + eigen_internal_assert(checkIndexRange(indices)); + return m_storage.data()[linearizedIndex(indices)]; + } + + // custom indices +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(CustomIndices& indices) const + { + return coeff(internal::customIndices2Array(indices)); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return m_storage.data()[0]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return m_storage.data()[index]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeffRef(array{{firstIndex, secondIndex, otherIndices...}}); + } +#endif + + // normal indices + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array& indices) + { + eigen_internal_assert(checkIndexRange(indices)); + return m_storage.data()[linearizedIndex(indices)]; + } + + // custom indices +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(CustomIndices& indices) + { + return coeffRef(internal::customIndices2Array(indices)); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return m_storage.data()[0]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) + { + eigen_internal_assert(index >= 0 && index < size()); + return m_storage.data()[index]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return this->operator()(array{{firstIndex, secondIndex, otherIndices...}}); + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const + { + return coeff(array(i0, i1)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const + { + return coeff(array(i0, i1, i2)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const + { + return coeff(array(i0, i1, i2, i3)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const + { + return coeff(array(i0, i1, i2, i3, i4)); + } +#endif + + // custom indices +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(CustomIndices& indices) const + { + return coeff(internal::customIndices2Array(indices)); + } +#endif + + // normal indices + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array& indices) const + { + return coeff(indices); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return coeff(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeff(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const + { + // The bracket operator is only for vectors, use the parenthesis operator instead. + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeff(index); + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return operator()(array{{firstIndex, secondIndex, otherIndices...}}); + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1) + { + return coeffRef(array(i0, i1)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2) + { + return coeffRef(array(i0, i1, i2)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3) + { + return coeffRef(array(i0, i1, i2, i3)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) + { + return coeffRef(array(i0, i1, i2, i3, i4)); + } +#endif + + // normal indices + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array& indices) + { + return coeffRef(indices); + } + + // custom indices +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(CustomIndices& indices) + { + return coeffRef(internal::customIndices2Array(indices)); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index) + { + eigen_assert(index >= 0 && index < size()); + return coeffRef(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeffRef(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index) + { + // The bracket operator is only for vectors, use the parenthesis operator instead + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeffRef(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor() + : m_storage() + { + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor(const Self& other) + : m_storage(other.m_storage) + { + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions) + : m_storage(firstDimension, otherDimensions...) + { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#else + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1) + : m_storage(dim1, array(dim1)) + { + EIGEN_STATIC_ASSERT(1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2) + : m_storage(dim1*dim2, array(dim1, dim2)) + { + EIGEN_STATIC_ASSERT(2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3) + : m_storage(dim1*dim2*dim3, array(dim1, dim2, dim3)) + { + EIGEN_STATIC_ASSERT(3 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4) + : m_storage(dim1*dim2*dim3*dim4, array(dim1, dim2, dim3, dim4)) + { + EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) + : m_storage(dim1*dim2*dim3*dim4*dim5, array(dim1, dim2, dim3, dim4, dim5)) + { + EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#endif + + /** Normal Dimension */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array& dimensions) + : m_storage(internal::array_prod(dimensions), dimensions) + { + EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor(const TensorBase& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other.derived()); + resize(TensorEvaluator(assign, DefaultDevice()).dimensions()); + internal::TensorExecutor::run(assign, DefaultDevice()); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor(const TensorBase& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other.derived()); + resize(TensorEvaluator(assign, DefaultDevice()).dimensions()); + internal::TensorExecutor::run(assign, DefaultDevice()); + } + + #if EIGEN_HAS_RVALUE_REFERENCES + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor(Self&& other) + : m_storage(std::move(other.m_storage)) + { + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor& operator=(Self&& other) + { + m_storage = std::move(other.m_storage); + return *this; + } + #endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + resize(TensorEvaluator(assign, DefaultDevice()).dimensions()); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other); + resize(TensorEvaluator(assign, DefaultDevice()).dimensions()); + internal::TensorExecutor::run(assign, DefaultDevice()); + return *this; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + void resize(Index firstDimension, IndexTypes... otherDimensions) + { + // The number of dimensions used to resize a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + resize(array{{firstDimension, otherDimensions...}}); + } +#endif + + /** Normal Dimension */ + EIGEN_DEVICE_FUNC void resize(const array& dimensions) + { + int i; + Index size = Index(1); + for (i = 0; i < NumIndices; i++) { + internal::check_rows_cols_for_overflow::run(size, dimensions[i]); + size *= dimensions[i]; + } + #ifdef EIGEN_INITIALIZE_COEFFS + bool size_changed = size != this->size(); + m_storage.resize(size, dimensions); + if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + #else + m_storage.resize(size, dimensions); + #endif + } + + // Why this overload, DSizes is derived from array ??? // + EIGEN_DEVICE_FUNC void resize(const DSizes& dimensions) { + array dims; + for (int i = 0; i < NumIndices; ++i) { + dims[i] = dimensions[i]; + } + resize(dims); + } + + EIGEN_DEVICE_FUNC + void resize() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + // Nothing to do: rank 0 tensors have fixed size + } + +#ifdef EIGEN_HAS_INDEX_LIST + template + EIGEN_DEVICE_FUNC + void resize(const Eigen::IndexList& dimensions) { + array dims; + for (int i = 0; i < NumIndices; ++i) { + dims[i] = static_cast(dimensions[i]); + } + resize(dims); + } +#endif + + /** Custom Dimension */ +#ifdef EIGEN_HAS_SFINAE + template::value) ) + > + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(CustomDimension& dimensions) + { + resize(internal::customIndices2Array(dimensions)); + } +#endif + +#ifndef EIGEN_EMULATE_CXX11_META_H + template + EIGEN_DEVICE_FUNC + void resize(const Sizes& dimensions) { + array dims; + for (int i = 0; i < NumIndices; ++i) { + dims[i] = static_cast(dimensions[i]); + } + resize(dims); + } +#else + template + EIGEN_DEVICE_FUNC + void resize(const Sizes& dimensions) { + array dims; + for (int i = 0; i < NumIndices; ++i) { + dims[i] = static_cast(dimensions[i]); + } + resize(dims); + } +#endif + + protected: + + bool checkIndexRange(const array& indices) const + { + using internal::array_apply_and_reduce; + using internal::array_zip_and_reduce; + using internal::greater_equal_zero_op; + using internal::logical_and_op; + using internal::lesser_op; + + return + // check whether the indices are all >= 0 + array_apply_and_reduce(indices) && + // check whether the indices fit in the dimensions + array_zip_and_reduce(indices, m_storage.dimensions()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array& indices) const + { + if (Options&RowMajor) { + return m_storage.dimensions().IndexOfRowMajor(indices); + } else { + return m_storage.dimensions().IndexOfColMajor(indices); + } + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h new file mode 100644 index 0000000..8b8fb92 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h @@ -0,0 +1,329 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Eugene Brevdo +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H +#define EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H + +namespace Eigen { +namespace internal { + +/** \class TensorIndexTuple + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor + Index Tuple class. + * + * + */ +template +struct traits > : public traits +{ + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef Tuple Scalar; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorIndexTupleOpEIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, + typename eval >::type> +{ + typedef TensorIndexTupleOp type; +}; + +} // end namespace internal + +template +class TensorIndexTupleOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + typedef Tuple CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIndexTupleOp(const XprType& expr) + : m_xpr(expr) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorIndexTupleOp XprType; + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + + typedef typename TensorEvaluator::Dimensions Dimensions; + static const int NumDims = internal::array_size::value; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/ false, + PacketAccess = /*TensorEvaluator::PacketAccess*/ false, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { + return m_impl.dimensions(); + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return CoeffReturnType(index, m_impl.coeff(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, 1); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + TensorEvaluator m_impl; +}; + +namespace internal { + +/** \class TensorTupleIndex + * \ingroup CXX11_Tensor_Module + * + * \brief Converts to Tensor > and reduces to Tensor. + * + */ +template +struct traits > : public traits +{ + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef Index Scalar; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions - array_size::value; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorTupleReducerOpEIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, + typename eval >::type> +{ + typedef TensorTupleReducerOp type; +}; + +} // end namespace internal + +template +class TensorTupleReducerOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + typedef Index CoeffReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTupleReducerOp(const XprType& expr, + const ReduceOp& reduce_op, + const Index return_dim, + const Dims& reduce_dims) + : m_xpr(expr), m_reduce_op(reduce_op), m_return_dim(return_dim), m_reduce_dims(reduce_dims) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC + const ReduceOp& reduce_op() const { return m_reduce_op; } + + EIGEN_DEVICE_FUNC + const Dims& reduce_dims() const { return m_reduce_dims; } + + EIGEN_DEVICE_FUNC + Index return_dim() const { return m_return_dim; } + + protected: + typename XprType::Nested m_xpr; + const ReduceOp m_reduce_op; + const Index m_return_dim; + const Dims m_reduce_dims; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorTupleReducerOp XprType; + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename TensorIndexTupleOp::CoeffReturnType TupleType; + typedef typename TensorEvaluator >, Device>::Dimensions Dimensions; + typedef typename TensorEvaluator , Device>::Dimensions InputDimensions; + static const int NumDims = internal::array_size::value; + typedef array StrideDims; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + typedef StorageMemory TupleStorageMem; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/ false, + PacketAccess = /*TensorEvaluator::PacketAccess*/ false, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator >, Device>::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_orig_impl(op.expression(), device), + m_impl(op.expression().index_tuples().reduce(op.reduce_dims(), op.reduce_op()), device), + m_return_dim(op.return_dim()) + { + gen_strides(m_orig_impl.dimensions(), m_strides); + if (Layout == static_cast(ColMajor)) { + const Index total_size = internal::array_prod(m_orig_impl.dimensions()); + m_stride_mod = (m_return_dim < NumDims - 1) ? m_strides[m_return_dim + 1] : total_size; + } else { + const Index total_size = internal::array_prod(m_orig_impl.dimensions()); + m_stride_mod = (m_return_dim > 0) ? m_strides[m_return_dim - 1] : total_size; + } + // If m_return_dim is not a valid index, returns 1 or this can crash on Windows. + m_stride_div = ((m_return_dim >= 0) && + (m_return_dim < static_cast(m_strides.size()))) + ? m_strides[m_return_dim] : 1; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { + return m_impl.dimensions(); + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + const TupleType v = m_impl.coeff(index); + return (m_return_dim < 0) ? v.first : (v.first % m_stride_mod) / m_stride_div; + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } +#ifdef EIGEN_USE_SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + m_orig_impl.bind(cgh); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double compute_cost = 1.0 + + (m_return_dim < 0 ? 0.0 : (TensorOpCost::ModCost() + TensorOpCost::DivCost())); + return m_orig_impl.costPerCoeff(vectorized) + + m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, compute_cost); + } + + private: + EIGEN_DEVICE_FUNC void gen_strides(const InputDimensions& dims, StrideDims& strides) { + if (m_return_dim < 0) { + return; // Won't be using the strides. + } + eigen_assert(m_return_dim < NumDims && + "Asking to convert index to a dimension outside of the rank"); + + // Calculate m_stride_div and m_stride_mod, which are used to + // calculate the value of an index w.r.t. the m_return_dim. + if (Layout == static_cast(ColMajor)) { + strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + strides[i] = strides[i-1] * dims[i-1]; + } + } else { + strides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + strides[i] = strides[i+1] * dims[i+1]; + } + } + } + + protected: + TensorEvaluator, Device> m_orig_impl; + TensorEvaluator >, Device> m_impl; + const Index m_return_dim; + StrideDims m_strides; + Index m_stride_mod; + Index m_stride_div; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h new file mode 100644 index 0000000..e5811d6 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h @@ -0,0 +1,247 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H +#define EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H + +namespace Eigen { + +/** \class TensorAssign + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor assignment class. + * + * This class is represents the assignment of the values resulting from the evaluation of + * the rhs expression to the memory locations denoted by the lhs expression. + */ +namespace internal { +template +struct traits > +{ + typedef typename LhsXprType::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const std::size_t NumDimensions = internal::traits::NumDimensions; + static const int Layout = internal::traits::Layout; + typedef typename traits::PointerType PointerType; + + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorAssignOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorAssignOp type; +}; + +} // end namespace internal + + + +template +class TensorAssignOp : public TensorBase > +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename LhsXprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + static const int NumDims = Eigen::internal::traits::NumDimensions; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorAssignOp(LhsXprType& lhs, const RhsXprType& rhs) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs) {} + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + typename internal::remove_all::type& + lhsExpression() const { return *((typename internal::remove_all::type*)&m_lhs_xpr); } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + protected: + typename internal::remove_all::type& m_lhs_xpr; + const typename internal::remove_all::type& m_rhs_xpr; +}; + + +template +struct TensorEvaluator, Device> +{ + typedef TensorAssignOp XprType; + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + static const int PacketSize = PacketType::size; + static const int NumDims = XprType::NumDims; + + enum { + IsAligned = int(TensorEvaluator::IsAligned) & + int(TensorEvaluator::IsAligned), + PacketAccess = int(TensorEvaluator::PacketAccess) & + int(TensorEvaluator::PacketAccess), + BlockAccess = int(TensorEvaluator::BlockAccess) & + int(TensorEvaluator::BlockAccess), + PreferBlockAccess = int(TensorEvaluator::PreferBlockAccess) | + int(TensorEvaluator::PreferBlockAccess), + Layout = TensorEvaluator::Layout, + RawAccess = TensorEvaluator::RawAccess + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + RightTensorBlock; + //===--------------------------------------------------------------------===// + + TensorEvaluator(const XprType& op, const Device& device) : + m_leftImpl(op.lhsExpression(), device), + m_rightImpl(op.rhsExpression(), device) + { + EIGEN_STATIC_ASSERT( + (static_cast(TensorEvaluator::Layout) == + static_cast(TensorEvaluator::Layout)), + YOU_MADE_A_PROGRAMMING_MISTAKE); + } + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const + { + // The dimensions of the lhs and the rhs tensors should be equal to prevent + // overflows and ensure the result is fully initialized. + // TODO: use left impl instead if right impl dimensions are known at compile time. + return m_rightImpl.dimensions(); + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions())); + m_leftImpl.evalSubExprsIfNeeded(NULL); + // If the lhs provides raw access to its storage area (i.e. if m_leftImpl.data() returns a non + // null value), attempt to evaluate the rhs expression in place. Returns true iff in place + // evaluation isn't supported and the caller still needs to manually assign the values generated + // by the rhs to the lhs. + return m_rightImpl.evalSubExprsIfNeeded(m_leftImpl.data()); + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done](bool) { + m_rightImpl.evalSubExprsIfNeededAsync( + m_leftImpl.data(), [done](bool need_assign) { done(need_assign); }); + }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_leftImpl.cleanup(); + m_rightImpl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) { + m_leftImpl.coeffRef(i) = m_rightImpl.coeff(i); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) { + + const int LhsStoreMode = TensorEvaluator::IsAligned ? Aligned : Unaligned; + const int RhsLoadMode = TensorEvaluator::IsAligned ? Aligned : Unaligned; + m_leftImpl.template writePacket(i, m_rightImpl.template packet(i)); + } + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_leftImpl.coeff(index); + } + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const + { + return m_leftImpl.template packet(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + // We assume that evalPacket or evalScalar is called to perform the + // assignment and account for the cost of the write here, but reduce left + // cost by one load because we are using m_leftImpl.coeffRef. + TensorOpCost left = m_leftImpl.costPerCoeff(vectorized); + return m_rightImpl.costPerCoeff(vectorized) + + TensorOpCost( + numext::maxi(0.0, left.bytes_loaded() - sizeof(CoeffReturnType)), + left.bytes_stored(), left.compute_cycles()) + + TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return internal::TensorBlockResourceRequirements::merge( + m_leftImpl.getResourceRequirements(), + m_rightImpl.getResourceRequirements()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalBlock( + TensorBlockDesc& desc, TensorBlockScratch& scratch) { + if (TensorEvaluator::RawAccess && + m_leftImpl.data() != NULL) { + // If destination has raw data access, we pass it as a potential + // destination for a block descriptor evaluation. + desc.template AddDestinationBuffer( + /*dst_base=*/m_leftImpl.data() + desc.offset(), + /*dst_strides=*/internal::strides(m_leftImpl.dimensions())); + } + + RightTensorBlock block = m_rightImpl.block(desc, scratch, /*root_of_expr_ast=*/true); + // If block was evaluated into a destination, there is no need to do assignment. + if (block.kind() != internal::TensorBlockKind::kMaterializedInOutput) { + m_leftImpl.writeBlock(desc, block); + } + block.cleanup(); + } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_leftImpl.bind(cgh); + m_rightImpl.bind(cgh); + } +#endif + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_leftImpl.data(); } + + private: + TensorEvaluator m_leftImpl; + TensorEvaluator m_rightImpl; +}; + +} + + +#endif // EIGEN_CXX11_TENSOR_TENSOR_ASSIGN_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h new file mode 100644 index 0000000..35b6458 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h @@ -0,0 +1,1176 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_BASE_H +#define EIGEN_CXX11_TENSOR_TENSOR_BASE_H + +// clang-format off + +namespace Eigen { + +/** \class TensorBase + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor base class. + * + * This class is the common parent of the Tensor and TensorMap class, thus + * making it possible to use either class interchangeably in expressions. + */ +#ifndef EIGEN_PARSED_BY_DOXYGEN +// FIXME Doxygen does not like the inheritance with different template parameters +// Since there is no doxygen documentation inside, we disable it for now +template +class TensorBase +{ + public: + typedef internal::traits DerivedTraits; + typedef typename DerivedTraits::Scalar Scalar; + typedef typename DerivedTraits::Index Index; + typedef typename internal::remove_const::type CoeffReturnType; + static const int NumDimensions = DerivedTraits::NumDimensions; + + // Generic nullary operation support. + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseNullaryOp + nullaryExpr(const CustomNullaryOp& func) const { + return TensorCwiseNullaryOp(derived(), func); + } + + // Coefficient-wise nullary operators + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseNullaryOp, const Derived> + constant(const Scalar& value) const { + return nullaryExpr(internal::scalar_constant_op(value)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseNullaryOp, const Derived> + random() const { + return nullaryExpr(internal::UniformRandomGenerator()); + } + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseNullaryOp + random(const RandomGenerator& gen = RandomGenerator()) const { + return nullaryExpr(gen); + } + + // Tensor generation + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorGeneratorOp + generate(const Generator& generator) const { + return TensorGeneratorOp(derived(), generator); + } + + // Generic unary operation support. + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp + unaryExpr(const CustomUnaryOp& func) const { + return TensorCwiseUnaryOp(derived(), func); + } + + // Coefficient-wise unary operators + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + operator-() const { + return unaryExpr(internal::scalar_opposite_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + sqrt() const { + return unaryExpr(internal::scalar_sqrt_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + sign() const { + return unaryExpr(internal::scalar_sign_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + rsqrt() const { + return unaryExpr(internal::scalar_rsqrt_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + square() const { + return unaryExpr(internal::scalar_square_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + cube() const { + return unaryExpr(internal::scalar_cube_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + inverse() const { + return unaryExpr(internal::scalar_inverse_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + tanh() const { + return unaryExpr(internal::scalar_tanh_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + lgamma() const { + return unaryExpr(internal::scalar_lgamma_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + digamma() const { + return unaryExpr(internal::scalar_digamma_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_i0() const { + return unaryExpr(internal::scalar_bessel_i0_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_i0e() const { + return unaryExpr(internal::scalar_bessel_i0e_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_i1() const { + return unaryExpr(internal::scalar_bessel_i1_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_i1e() const { + return unaryExpr(internal::scalar_bessel_i1e_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_j0() const { + return unaryExpr(internal::scalar_bessel_j0_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_y0() const { + return unaryExpr(internal::scalar_bessel_y0_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_j1() const { + return unaryExpr(internal::scalar_bessel_j1_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_y1() const { + return unaryExpr(internal::scalar_bessel_y1_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_k0() const { + return unaryExpr(internal::scalar_bessel_k0_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_k0e() const { + return unaryExpr(internal::scalar_bessel_k0e_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_k1() const { + return unaryExpr(internal::scalar_bessel_k1_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + bessel_k1e() const { + return unaryExpr(internal::scalar_bessel_k1e_op()); + } + + // igamma(a = this, x = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + igamma(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_igamma_op()); + } + + // igamma_der_a(a = this, x = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + igamma_der_a(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_igamma_der_a_op()); + } + + // gamma_sample_der_alpha(alpha = this, sample = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + gamma_sample_der_alpha(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_gamma_sample_der_alpha_op()); + } + + // igammac(a = this, x = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + igammac(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_igammac_op()); + } + + // zeta(x = this, q = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + zeta(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_zeta_op()); + } + + // polygamma(n = this, x = other) + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + polygamma(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_polygamma_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + erf() const { + return unaryExpr(internal::scalar_erf_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + erfc() const { + return unaryExpr(internal::scalar_erfc_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + ndtri() const { + return unaryExpr(internal::scalar_ndtri_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + sigmoid() const { + return unaryExpr(internal::scalar_logistic_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + exp() const { + return unaryExpr(internal::scalar_exp_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + expm1() const { + return unaryExpr(internal::scalar_expm1_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + log() const { + return unaryExpr(internal::scalar_log_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + log1p() const { + return unaryExpr(internal::scalar_log1p_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + log2() const { + return unaryExpr(internal::scalar_log2_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + abs() const { + return unaryExpr(internal::scalar_abs_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + clip(Scalar min, Scalar max) const { + return unaryExpr(internal::scalar_clamp_op(min, max)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const typename internal::conditional::IsComplex, + TensorCwiseUnaryOp, const Derived>, + Derived>::type + conjugate() const { + return choose(Cond::IsComplex>(), unaryExpr(internal::scalar_conjugate_op()), derived()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + pow(Scalar exponent) const { + return unaryExpr(internal::bind2nd_op >(exponent)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + real() const { + return unaryExpr(internal::scalar_real_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + imag() const { + return unaryExpr(internal::scalar_imag_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + operator+ (Scalar rhs) const { + return unaryExpr(internal::bind2nd_op >(rhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE friend + const TensorCwiseUnaryOp >, const Derived> + operator+ (Scalar lhs, const Derived& rhs) { + return rhs.unaryExpr(internal::bind1st_op >(lhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + operator- (Scalar rhs) const { + EIGEN_STATIC_ASSERT((NumTraits::IsSigned || internal::is_same >::value), YOU_MADE_A_PROGRAMMING_MISTAKE); + return unaryExpr(internal::bind2nd_op >(rhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE friend + const TensorCwiseUnaryOp >, const Derived> + operator- (Scalar lhs, const Derived& rhs) { + return rhs.unaryExpr(internal::bind1st_op >(lhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + operator* (Scalar rhs) const { + return unaryExpr(internal::bind2nd_op >(rhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE friend + const TensorCwiseUnaryOp >, const Derived> + operator* (Scalar lhs, const Derived& rhs) { + return rhs.unaryExpr(internal::bind1st_op >(lhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp >, const Derived> + operator/ (Scalar rhs) const { + return unaryExpr(internal::bind2nd_op >(rhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE friend + const TensorCwiseUnaryOp >, const Derived> + operator/ (Scalar lhs, const Derived& rhs) { + return rhs.unaryExpr(internal::bind1st_op >(lhs)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + operator% (Scalar rhs) const { + EIGEN_STATIC_ASSERT(NumTraits::IsInteger, YOU_MADE_A_PROGRAMMING_MISTAKE_TRY_MOD); + return unaryExpr(internal::scalar_mod_op(rhs)); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + cwiseMax(Scalar threshold) const { + return cwiseMax(constant(threshold)); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + cwiseMin(Scalar threshold) const { + return cwiseMin(constant(threshold)); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const typename internal::conditional::value, + Derived, + TensorConversionOp >::type + cast() const { + return choose(Cond::value>(), derived(), TensorConversionOp(derived())); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + round() const { + return unaryExpr(internal::scalar_round_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + rint() const { + return unaryExpr(internal::scalar_rint_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + ceil() const { + return unaryExpr(internal::scalar_ceil_op()); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + floor() const { + return unaryExpr(internal::scalar_floor_op()); + } + + // Generic binary operation support. + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp + binaryExpr(const OtherDerived& other, const CustomBinaryOp& func) const { + return TensorCwiseBinaryOp(derived(), other, func); + } + + // Coefficient-wise binary operators. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator+(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_sum_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator-(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_difference_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator*(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_product_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator/(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_quotient_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + cwiseMax(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_max_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + cwiseMin(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_min_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp + operator&&(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_boolean_and_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp + operator||(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_boolean_or_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp + operator^(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_boolean_xor_op()); + } + + // Comparisons and tests. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator<(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator<=(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator>(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator>=(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator==(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCwiseBinaryOp, const Derived, const OtherDerived> + operator!=(const OtherDerived& other) const { + return binaryExpr(other.derived(), internal::scalar_cmp_op()); + } + + // comparisons and tests for Scalars + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator<(Scalar threshold) const { + return operator<(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator<=(Scalar threshold) const { + return operator<=(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator>(Scalar threshold) const { + return operator>(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator>=(Scalar threshold) const { + return operator>=(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator==(Scalar threshold) const { + return operator==(constant(threshold)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseBinaryOp, const Derived, const TensorCwiseNullaryOp, const Derived> > + operator!=(Scalar threshold) const { + return operator!=(constant(threshold)); + } + + // Checks + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + (isnan)() const { + return unaryExpr(internal::scalar_isnan_op()); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + (isinf)() const { + return unaryExpr(internal::scalar_isinf_op()); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const TensorCwiseUnaryOp, const Derived> + (isfinite)() const { + return unaryExpr(internal::scalar_isfinite_op()); + } + + // Coefficient-wise ternary operators. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorSelectOp + select(const ThenDerived& thenTensor, const ElseDerived& elseTensor) const { + return TensorSelectOp(derived(), thenTensor.derived(), elseTensor.derived()); + } + + // Contractions. + typedef Eigen::IndexPair DimensionPair; + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorContractionOp + contract(const OtherDerived& other, const Dimensions& dims) const { + return TensorContractionOp(derived(), other.derived(), dims); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorContractionOp + contract(const OtherDerived& other, const Dimensions& dims, const OutputKernel& output_kernel) const { + return TensorContractionOp(derived(), other.derived(), dims, output_kernel); + } + + // Convolutions. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorConvolutionOp + convolve(const KernelDerived& kernel, const Dimensions& dims) const { + return TensorConvolutionOp(derived(), kernel.derived(), dims); + } + + // Fourier transforms + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorFFTOp + fft(const FFT& dims) const { + return TensorFFTOp(derived(), dims); + } + + // Scan. + typedef TensorScanOp, const Derived> TensorScanSumOp; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorScanSumOp + cumsum(const Index& axis, bool exclusive = false) const { + return TensorScanSumOp(derived(), axis, exclusive); + } + + typedef TensorScanOp, const Derived> TensorScanProdOp; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorScanProdOp + cumprod(const Index& axis, bool exclusive = false) const { + return TensorScanProdOp(derived(), axis, exclusive); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorScanOp + scan(const Index& axis, const Reducer& reducer, bool exclusive = false) const { + return TensorScanOp(derived(), axis, exclusive, reducer); + } + + // Reductions. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + sum(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::SumReducer()); + } + + const TensorReductionOp, const DimensionList, const Derived> + sum() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::SumReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + mean(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::MeanReducer()); + } + + const TensorReductionOp, const DimensionList, const Derived> + mean() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::MeanReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + prod(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::ProdReducer()); + } + + const TensorReductionOp, const DimensionList, const Derived> + prod() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::ProdReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + maximum(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::MaxReducer()); + } + + template + const TensorReductionOp, const DimensionList, const Derived> + maximum() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::MaxReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const Dims, const Derived> + minimum(const Dims& dims) const { + return TensorReductionOp, const Dims, const Derived>(derived(), dims, internal::MinReducer()); + } + + template + const TensorReductionOp, const DimensionList, const Derived> + minimum() const { + DimensionList in_dims; + return TensorReductionOp, const DimensionList, const Derived>(derived(), in_dims, internal::MinReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp::value, Derived, TensorConversionOp >::type > + all(const Dims& dims) const { + return cast().reduce(dims, internal::AndReducer()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const typename internal::conditional::value, Derived, TensorConversionOp >::type > + all() const { + DimensionList in_dims; + return cast().reduce(in_dims, internal::AndReducer()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp::value, Derived, TensorConversionOp >::type > + any(const Dims& dims) const { + return cast().reduce(dims, internal::OrReducer()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp, const typename internal::conditional::value, Derived, TensorConversionOp >::type > + any() const { + DimensionList in_dims; + return cast().reduce(in_dims, internal::OrReducer()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTupleReducerOp< + internal::ArgMaxTupleReducer >, + const array, const Derived> + argmax() const { + array in_dims; + for (Index d = 0; d < NumDimensions; ++d) in_dims[d] = d; + return TensorTupleReducerOp< + internal::ArgMaxTupleReducer >, + const array, + const Derived>(derived(), internal::ArgMaxTupleReducer >(), -1, in_dims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTupleReducerOp< + internal::ArgMinTupleReducer >, + const array, const Derived> + argmin() const { + array in_dims; + for (Index d = 0; d < NumDimensions; ++d) in_dims[d] = d; + return TensorTupleReducerOp< + internal::ArgMinTupleReducer >, + const array, + const Derived>(derived(), internal::ArgMinTupleReducer >(), -1, in_dims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTupleReducerOp< + internal::ArgMaxTupleReducer >, + const array, const Derived> + argmax(const Index return_dim) const { + array in_dims; + in_dims[0] = return_dim; + return TensorTupleReducerOp< + internal::ArgMaxTupleReducer >, + const array, + const Derived>(derived(), internal::ArgMaxTupleReducer >(), return_dim, in_dims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTupleReducerOp< + internal::ArgMinTupleReducer >, + const array, const Derived> + argmin(const Index return_dim) const { + array in_dims; + in_dims[0] = return_dim; + return TensorTupleReducerOp< + internal::ArgMinTupleReducer >, + const array, + const Derived>(derived(), internal::ArgMinTupleReducer >(), return_dim, in_dims); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReductionOp + reduce(const Dims& dims, const Reducer& reducer) const { + return TensorReductionOp(derived(), dims, reducer); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorTraceOp + trace(const Dims& dims) const { + return TensorTraceOp(derived(), dims); + } + + const TensorTraceOp, const Derived> + trace() const { + DimensionList in_dims; + return TensorTraceOp, const Derived>(derived(), in_dims); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorBroadcastingOp + broadcast(const Broadcast& bcast) const { + return TensorBroadcastingOp(derived(), bcast); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorConcatenationOp + concatenate(const OtherDerived& other, Axis axis) const { + return TensorConcatenationOp(derived(), other.derived(), axis); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorPatchOp + extract_patches(const PatchDims& patch_dims) const { + return TensorPatchOp(derived(), patch_dims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorImagePatchOp + extract_image_patches(const Index patch_rows = 1, const Index patch_cols = 1, + const Index row_stride = 1, const Index col_stride = 1, + const Index in_row_stride = 1, const Index in_col_stride = 1, + const PaddingType padding_type = PADDING_SAME, const Scalar padding_value = Scalar(0)) const { + return TensorImagePatchOp(derived(), patch_rows, patch_cols, row_stride, col_stride, + in_row_stride, in_col_stride, 1, 1, padding_type, padding_value); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorImagePatchOp + extract_image_patches(const Index patch_rows, const Index patch_cols, + const Index row_stride, const Index col_stride, + const Index in_row_stride, const Index in_col_stride, + const Index row_inflate_stride, const Index col_inflate_stride, + const Index padding_top, const Index padding_bottom, + const Index padding_left,const Index padding_right, + const Scalar padding_value) const { + return TensorImagePatchOp(derived(), patch_rows, patch_cols, row_stride, col_stride, + in_row_stride, in_col_stride, row_inflate_stride, col_inflate_stride, + padding_top, padding_bottom, padding_left, padding_right, padding_value); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorVolumePatchOp + extract_volume_patches(const Index patch_planes, const Index patch_rows, const Index patch_cols, + const Index plane_stride = 1, const Index row_stride = 1, const Index col_stride = 1, + const PaddingType padding_type = PADDING_SAME, const Scalar padding_value = Scalar(0)) const { + return TensorVolumePatchOp(derived(), patch_planes, patch_rows, patch_cols, plane_stride, row_stride, col_stride, 1, 1, 1, 1, 1, 1, padding_type, padding_value); + } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorVolumePatchOp + extract_volume_patches(const Index patch_planes, const Index patch_rows, const Index patch_cols, + const Index plane_stride, const Index row_stride, const Index col_stride, + const Index plane_inflate_stride, const Index row_inflate_stride, const Index col_inflate_stride, + const Index padding_top_z, const Index padding_bottom_z, + const Index padding_top, const Index padding_bottom, + const Index padding_left, const Index padding_right, const Scalar padding_value = Scalar(0)) const { + return TensorVolumePatchOp(derived(), patch_planes, patch_rows, patch_cols, plane_stride, row_stride, col_stride, 1, 1, 1, plane_inflate_stride, row_inflate_stride, col_inflate_stride, padding_top_z, padding_bottom_z, padding_top, padding_bottom, padding_left, padding_right, padding_value); + } + + // Morphing operators. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorLayoutSwapOp + swap_layout() const { + return TensorLayoutSwapOp(derived()); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReshapingOp + reshape(const NewDimensions& newDimensions) const { + return TensorReshapingOp(derived(), newDimensions); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorSlicingOp + slice(const StartIndices& startIndices, const Sizes& sizes) const { + return TensorSlicingOp(derived(), startIndices, sizes); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorStridingSlicingOp + stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) const { + return TensorStridingSlicingOp(derived(), startIndices, stopIndices, strides); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorChippingOp + chip(const Index offset) const { + return TensorChippingOp(derived(), offset, DimId); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorChippingOp + chip(const Index offset, const Index dim) const { + return TensorChippingOp(derived(), offset, dim); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReverseOp + reverse(const ReverseDimensions& rev) const { + return TensorReverseOp(derived(), rev); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorPaddingOp + pad(const PaddingDimensions& padding) const { + return TensorPaddingOp(derived(), padding, internal::scalar_cast_op()(0)); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorPaddingOp + pad(const PaddingDimensions& padding, const Scalar padding_value) const { + return TensorPaddingOp(derived(), padding, padding_value); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorShufflingOp + shuffle(const Shuffle& shfl) const { + return TensorShufflingOp(derived(), shfl); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorStridingOp + stride(const Strides& strides) const { + return TensorStridingOp(derived(), strides); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorInflationOp + inflate(const Strides& strides) const { + return TensorInflationOp(derived(), strides); + } + + // Returns a tensor containing index/value tuples + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorIndexTupleOp + index_tuples() const { + return TensorIndexTupleOp(derived()); + } + + // Support for custom unary and binary operations + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCustomUnaryOp customOp(const CustomUnaryFunc& op) const { + return TensorCustomUnaryOp(derived(), op); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorCustomBinaryOp customOp(const OtherDerived& other, const CustomBinaryFunc& op) const { + return TensorCustomBinaryOp(derived(), other, op); + } + + // Force the evaluation of the expression. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorForcedEvalOp eval() const { + return TensorForcedEvalOp(derived()); + } + + protected: + template friend class Tensor; + template friend class TensorFixedSize; + // the Eigen:: prefix is required to workaround a compilation issue with nvcc 9.0 + template friend class Eigen::TensorBase; + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast(this); } +}; + +template::value> +class TensorBase : public TensorBase { + public: + typedef TensorBase Base; + typedef internal::traits DerivedTraits; + typedef typename DerivedTraits::Scalar Scalar; + typedef typename DerivedTraits::Index Index; + typedef Scalar CoeffReturnType; + static const int NumDimensions = DerivedTraits::NumDimensions; + + template friend class Tensor; + template friend class TensorFixedSize; + // the Eigen:: prefix is required to workaround a compilation issue with nvcc 9.0 + template friend class Eigen::TensorBase; + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setZero() { + return setConstant(Scalar(0)); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setConstant(const Scalar& val) { + return derived() = this->constant(val); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setRandom() { + return derived() = this->random(); + } + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setRandom() { + return derived() = this->template random(); + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& setValues( + const typename internal::Initializer::InitList& vals) { + TensorEvaluator eval(derived(), DefaultDevice()); + internal::initialize_tensor(eval, vals); + return derived(); + } +#endif // EIGEN_HAS_VARIADIC_TEMPLATES + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator+=(const OtherDerived& other) { + return derived() = derived() + other.derived(); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator-=(const OtherDerived& other) { + return derived() = derived() - other.derived(); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator*=(const OtherDerived& other) { + return derived() = derived() * other.derived(); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Derived& operator/=(const OtherDerived& other) { + return derived() = derived() / other.derived(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorLayoutSwapOp + swap_layout() const { + return TensorLayoutSwapOp(derived()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorLayoutSwapOp + swap_layout() { + return TensorLayoutSwapOp(derived()); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorConcatenationOp + concatenate(const OtherDerived& other, const Axis& axis) const { + return TensorConcatenationOp(derived(), other, axis); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorConcatenationOp + concatenate(const OtherDerived& other, const Axis& axis) { + return TensorConcatenationOp(derived(), other, axis); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReshapingOp + reshape(const NewDimensions& newDimensions) const { + return TensorReshapingOp(derived(), newDimensions); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorReshapingOp + reshape(const NewDimensions& newDimensions) { + return TensorReshapingOp(derived(), newDimensions); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorSlicingOp + slice(const StartIndices& startIndices, const Sizes& sizes) const { + return TensorSlicingOp(derived(), startIndices, sizes); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorSlicingOp + slice(const StartIndices& startIndices, const Sizes& sizes) { + return TensorSlicingOp(derived(), startIndices, sizes); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorStridingSlicingOp + stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) const { + return TensorStridingSlicingOp(derived(), startIndices, stopIndices, strides); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorStridingSlicingOp + stridedSlice(const StartIndices& startIndices, const StopIndices& stopIndices, const Strides& strides) { + return TensorStridingSlicingOp(derived(), startIndices, stopIndices, strides); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorChippingOp + chip(const Index offset) const { + return TensorChippingOp(derived(), offset, DimId); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorChippingOp + chip(const Index offset) { + return TensorChippingOp(derived(), offset, DimId); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorChippingOp + chip(const Index offset, const Index dim) const { + return TensorChippingOp(derived(), offset, dim); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorChippingOp + chip(const Index offset, const Index dim) { + return TensorChippingOp(derived(), offset, dim); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorReverseOp + reverse(const ReverseDimensions& rev) const { + return TensorReverseOp(derived(), rev); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorReverseOp + reverse(const ReverseDimensions& rev) { + return TensorReverseOp(derived(), rev); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorShufflingOp + shuffle(const Shuffle& shfl) const { + return TensorShufflingOp(derived(), shfl); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorShufflingOp + shuffle(const Shuffle& shfl) { + return TensorShufflingOp(derived(), shfl); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const TensorStridingOp + stride(const Strides& strides) const { + return TensorStridingOp(derived(), strides); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorStridingOp + stride(const Strides& strides) { + return TensorStridingOp(derived(), strides); + } + + // Select the device on which to evaluate the expression. + template + TensorDevice device(const DeviceType& dev) { + return TensorDevice(dev, derived()); + } + + // Select the async device on which to evaluate the expression. + template + TensorAsyncDevice device(const DeviceType& dev, DoneCallback done) { + return TensorAsyncDevice(dev, derived(), std::move(done)); + } + + protected: + EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(TensorBase) + EIGEN_DEFAULT_COPY_CONSTRUCTOR(TensorBase) + + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& operator=(const OtherDerived& other) + { + typedef TensorAssignOp Assign; + Assign assign(derived(), other.derived()); + internal::TensorExecutor::run(assign, DefaultDevice()); + return derived(); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Derived& derived() { return *static_cast(this); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast(this); } +}; +#endif // EIGEN_PARSED_BY_DOXYGEN +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_BASE_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h new file mode 100644 index 0000000..1e55d12 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h @@ -0,0 +1,1559 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H +#define EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H + +namespace Eigen { +namespace internal { + +// -------------------------------------------------------------------------- // +// Forward declarations for templates defined below. +template +class TensorBlockIO; + +// -------------------------------------------------------------------------- // +// Helper function to compute strides for densely stored buffer of given +// dimensions. + +// TODO(ezhulenev): We compute strides 1000 times in different evaluators, use +// this function instead everywhere. +template +EIGEN_ALWAYS_INLINE DSizes strides( + const DSizes& dimensions) { + DSizes strides; + if (NumDims == 0) return strides; + + // TODO(ezhulenev): Use templates to unroll this loop (similar to + // h_array_reduce in CXX11meta.h)? Benchmark it. + if (static_cast(Layout) == static_cast(ColMajor)) { + strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + strides[i] = strides[i - 1] * dimensions[i - 1]; + } + } else { + strides[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + strides[i] = strides[i + 1] * dimensions[i + 1]; + } + } + + return strides; +} + +template +EIGEN_ALWAYS_INLINE DSizes strides( + const Eigen::array& dimensions) { + return strides(DSizes(dimensions)); +} + +template +EIGEN_STRONG_INLINE DSizes strides( + const Sizes& sizes) { + return strides(DSizes(sizes)); +} + +// -------------------------------------------------------------------------- // + +// Tensor block shape type defines what are the shape preference for the blocks +// extracted from the larger tensor. +// +// Example: blocks of 100 elements from the large 100x100 tensor: +// - tensor: 100x100 +// - target_block_size: 100 +// +// TensorBlockShapeType: +// - kUniformAllDims: 100 blocks of size 10x10 +// - kSkewedInnerDims: 100 blocks of size 100x1 (or 1x100 depending on a column +// or row major layout) +enum class TensorBlockShapeType { kUniformAllDims, kSkewedInnerDims }; + +struct TensorBlockResourceRequirements { + TensorBlockShapeType shape_type; // target block shape + size_t size; // target block size + TensorOpCost cost_per_coeff; // cost of computing a single block element + +#ifdef EIGEN_HIPCC + // For HIPCC, we need to explicitly declare as a "device fun", the constructor + // which is implicitly invoked in the "merge" / "any" routines. else HIPCC + // errors out complaining about the lack of a matching constructor + EIGEN_DEVICE_FUNC + TensorBlockResourceRequirements(TensorBlockShapeType shape_type_, size_t size_, + TensorOpCost cost_) + : shape_type(shape_type_), size(size_), cost_per_coeff(cost_) + {} +#endif + + template + EIGEN_DEVICE_FUNC static TensorBlockResourceRequirements withShapeAndSize( + TensorBlockShapeType shape_type, size_t size_in_bytes, + TensorOpCost cost) { + const size_t size = numext::maxi(size_t(1), size_in_bytes / sizeof(Scalar)); + return {shape_type, size, cost}; + } + + template + EIGEN_DEVICE_FUNC static TensorBlockResourceRequirements withShapeAndSize( + TensorBlockShapeType shape_type, size_t size_in_bytes) { + // This default cost per coefficient is valid for most materialized tensor + // block evaluation implementations, because they typically just read + // coefficients from the underlying tensor storage, and write to the tensor + // block buffer (scratch or destination memory, reads and writes have linear + // access pattern). We ignore the fixed cost of block evaluation, because in + // practice it should negligible. + // + // Lazy block evaluation adds the cost of calling a functor for each + // coefficient. + // + // All non-trivial block evaluation implementations must provide their own + // cost approximation (e.g. shuffling inner dimension has a much higher cost + // because it reads memory randomly, although the total number of moved + // bytes is the same). + return withShapeAndSize(shape_type, size_in_bytes, + {/*bytes_loaded=*/sizeof(Scalar), + /*bytes_stored=*/sizeof(Scalar), + /*compute_cycles=*/0}); + } + + template + EIGEN_DEVICE_FUNC static TensorBlockResourceRequirements skewed( + size_t size_in_bytes) { + return withShapeAndSize(TensorBlockShapeType::kSkewedInnerDims, + size_in_bytes); + } + + template + EIGEN_DEVICE_FUNC static TensorBlockResourceRequirements uniform( + size_t size_in_bytes) { + return withShapeAndSize(TensorBlockShapeType::kUniformAllDims, + size_in_bytes); + } + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE TensorBlockResourceRequirements + merge(const TensorBlockResourceRequirements& lhs, + const TensorBlockResourceRequirements& rhs) { + return {merge(lhs.shape_type, rhs.shape_type), // shape_type + merge(lhs.size, rhs.size), // size + merge(lhs.cost_per_coeff, rhs.cost_per_coeff)}; // cost_per_coeff + } + + EIGEN_DEVICE_FUNC TensorBlockResourceRequirements& addCostPerCoeff( + TensorOpCost cost) { + cost_per_coeff += cost; + return *this; + } + + // This is a resource requirement that should be returned from expressions + // that do not have any block evaluation preference (e.g. default tensor + // expression with raw buffer access). + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE TensorBlockResourceRequirements any() { + return {TensorBlockShapeType::kUniformAllDims, 1, {0, 0, 0}}; + } + + private: + using Requirements = TensorBlockResourceRequirements; + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE size_t merge(size_t lhs_size, size_t rhs_size) { + return numext::maxi(lhs_size, rhs_size); + } + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE TensorBlockShapeType + merge(TensorBlockShapeType lhs, TensorBlockShapeType rhs) { + return (lhs == TensorBlockShapeType::kSkewedInnerDims || + rhs == TensorBlockShapeType::kSkewedInnerDims) + ? TensorBlockShapeType::kSkewedInnerDims + : TensorBlockShapeType::kUniformAllDims; + } + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE TensorOpCost merge(TensorOpCost lhs_cost, + TensorOpCost rhs_cost) { + return lhs_cost + rhs_cost; + } +}; + +// -------------------------------------------------------------------------- // +// TensorBlockDescriptor specifies a block offset within a tensor and the block +// sizes along each of the tensor dimensions. + +template +class TensorBlockDescriptor { + public: + typedef DSizes Dimensions; + + // If we evaluate a Tensor assignment, and expression on the left, already has + // a memory buffer, then we might do performance optimization, and evaluate + // the root expression directly into the final output memory. Some time it's + // possible to reuse it for materializing subexpressions inside an expression + // tree, to to avoid dynamic memory allocation. + // + // The pointer type of the underlying storage is erased, because passing + // Scalar type through all the expression evaluation layers is way too many + // templates. In practice destination buffer type should always match the + // evaluated expression scalar type. + class DestinationBuffer { + public: + enum DestinationBufferKind : int { + // The above explicit specification of "int" as the enum basetype is + // needed to get around a HIPCC link error ("the field type is not + // amp-compatible") + // which is issued for class members with the enum type. + // TODO(rocm): + // remove the "int" basetype once HIPCC has been fixed to not error out + // in the above scenario. + + // Destination buffer is not defined (`m_data` == nullptr). + kEmpty, + + // Tensor block defined by an owning tensor block descriptor can fit + // contiguously into the destination buffer. In this case it's safe to + // materialize tensor block in the destination buffer, wrap it in a + // TensorMap, and use to build Eigen expression on top of it. + kContiguous, + + // Destination buffer strides do not match strides of the contiguously + // stored block, and it's impossible to define a TensorMap over this + // buffer. However if we are evaluating a root of an expression tree, we + // still can materialize an output into this destination, because we can + // guarantee that no one will ever access it through block API. + // + // In theory it is possible to build valid TensorStriding + // expression on top of this destination buffer, however it has + // inefficient coeff/packet access, and defeats the purpose of fast block + // evaluation API. + kStrided + }; + + template + Scalar* data() const { + eigen_assert(m_data_type_size == sizeof(Scalar)); + return static_cast(m_data); + } + + const Dimensions& strides() const { return m_strides; } + const DestinationBufferKind& kind() const { return m_kind; } + + private: + friend class TensorBlockDescriptor; + + DestinationBuffer() : m_data(NULL), m_data_type_size(0), m_kind(kEmpty) {} + + template + DestinationBuffer(Scalar* data, const Dimensions& strides, + DestinationBufferKind kind) + : m_data(static_cast(data)), + m_data_type_size(sizeof(Scalar)), + m_strides(strides), + m_kind(kind) {} + + template + static DestinationBuffer make(const TensorBlockDescriptor& desc, + Scalar* data, const Dimensions& strides) { + return DestinationBuffer(data, strides, kind(desc, strides)); + } + + template + static DestinationBufferKind kind(const TensorBlockDescriptor& desc, + const Dimensions& strides) { + const Dimensions& desc_dims = desc.dimensions(); + const Dimensions& desc_strides = internal::strides(desc_dims); + for (int i = 0; i < NumDims; ++i) { + if (desc_dims[i] == 1) continue; + if (desc_strides[i] != strides[i]) return kStrided; + } + return kContiguous; + } + + // Storage pointer is type erased, to reduce template bloat, but we still + // keep the size of the underlying element type for error checking. + void* m_data; + size_t m_data_type_size; + + // Destination buffer dimensions always match the dimensions of a tensor + // block descriptor it belongs to, however strides might be different. + Dimensions m_strides; + + DestinationBufferKind m_kind; + }; + + TensorBlockDescriptor(const IndexType offset, const Dimensions& dimensions, + const DestinationBuffer& destination) + : m_offset(offset), + m_dimensions(dimensions), + m_destination(destination) {} + + TensorBlockDescriptor(const IndexType offset, const Dimensions& dimensions) + : m_offset(offset), + m_dimensions(dimensions), + m_destination(DestinationBuffer()) {} + + IndexType offset() const { return m_offset; } + const Dimensions& dimensions() const { return m_dimensions; } + IndexType dimension(int index) const { return m_dimensions[index]; } + IndexType size() const { return array_prod(m_dimensions); } + + const DestinationBuffer& destination() const { return m_destination; } + + template + void AddDestinationBuffer(Scalar* dst_base, const Dimensions& dst_strides) { + eigen_assert(dst_base != NULL); + m_destination = + DestinationBuffer::template make(*this, dst_base, dst_strides); + } + + template + void AddDestinationBuffer( + Scalar* dst_base, + const DSizes& dst_strides) { + // DSizes constructor will do index type promotion if it's safe. + AddDestinationBuffer(dst_base, Dimensions(dst_strides)); + } + + TensorBlockDescriptor& DropDestinationBuffer() { + m_destination.m_data = NULL; + m_destination.m_kind = DestinationBuffer::kEmpty; + return *this; + } + + bool HasDestinationBuffer() const { + return m_destination.kind() != DestinationBuffer::kEmpty; + } + + // Returns a copy of `*this` with updated offset. + TensorBlockDescriptor WithOffset(IndexType offset) const { + return TensorBlockDescriptor(offset, m_dimensions, m_destination); + } + + private: + // Offset and dimensions are immutable after construction. Block descriptor + // can only be mutated by adding or dropping destination. + const IndexType m_offset; + const Dimensions m_dimensions; + DestinationBuffer m_destination; +}; + +// -------------------------------------------------------------------------- // +// TensorBlockMapper is responsible for iterating over the blocks of a tensor. + +template +class TensorBlockMapper { + typedef TensorBlockDescriptor BlockDescriptor; + + public: + typedef DSizes Dimensions; + + TensorBlockMapper() = default; + TensorBlockMapper(const DSizes& dimensions, + const TensorBlockResourceRequirements& requirements) + : m_tensor_dimensions(dimensions), m_requirements(requirements) { + // Compute block dimensions and the total number of blocks. + InitializeBlockDimensions(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE IndexType blockCount() const { + return m_total_block_count; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE IndexType blockTotalSize() const { + return m_block_dimensions.TotalSize(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const DSizes& + blockDimensions() const { + return m_block_dimensions; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockDescriptor + blockDescriptor(IndexType block_index) const { + static const bool isColMajor = Layout == static_cast(ColMajor); + + IndexType offset = 0; + DSizes dimensions; + + if (NumDims == 0) return BlockDescriptor(offset, dimensions); + + // Iterate outer -> inner dimensions. + for (int i = NumDims - 1; i >= 0; --i) { + const int dim = isColMajor ? i : NumDims - i - 1; + + const IndexType idx = block_index / m_block_strides[dim]; + block_index -= idx * m_block_strides[dim]; + + const IndexType coord = idx * m_block_dimensions[dim]; + dimensions[dim] = numext::mini(m_tensor_dimensions[dim] - coord, + m_block_dimensions[dim]); + offset += coord * m_tensor_strides[dim]; + } + + return {offset, dimensions}; + } + + private: + void InitializeBlockDimensions() { + // Requested block shape and size. + const TensorBlockShapeType shape_type = m_requirements.shape_type; + IndexType target_block_size = + numext::maxi(1, static_cast(m_requirements.size)); + + IndexType tensor_size = m_tensor_dimensions.TotalSize(); + + // Corner case: one of the dimensions is zero. Logic below is too complex + // to handle this case on a general basis, just use unit block size. + // Note: we must not yield blocks with zero dimensions (recipe for + // overflows/underflows, divisions by zero and NaNs later). + if (tensor_size == 0) { + for (int i = 0; i < NumDims; ++i) { + m_block_dimensions[i] = 1; + } + m_total_block_count = 0; + return; + } + + // If tensor fits into a target block size, evaluate it as a single block. + if (tensor_size <= target_block_size) { + m_block_dimensions = m_tensor_dimensions; + m_total_block_count = 1; + // The only valid block index is `0`, and in this case we do not need + // to compute real strides for tensor or blocks (see blockDescriptor). + for (int i = 0; i < NumDims; ++i) { + m_tensor_strides[i] = 0; + m_block_strides[i] = 1; + } + return; + } + + static const bool isColMajor = Layout == static_cast(ColMajor); + + // Block shape skewed towards inner dimension. + if (shape_type == TensorBlockShapeType::kSkewedInnerDims) { + IndexType coeff_to_allocate = target_block_size; + + for (int i = 0; i < NumDims; ++i) { + const int dim = isColMajor ? i : NumDims - i - 1; + m_block_dimensions[dim] = + numext::mini(coeff_to_allocate, m_tensor_dimensions[dim]); + coeff_to_allocate = divup( + coeff_to_allocate, + numext::maxi(static_cast(1), m_block_dimensions[dim])); + } + eigen_assert(coeff_to_allocate == 1); + + } else if (shape_type == TensorBlockShapeType::kUniformAllDims) { + // Tensor will not fit within 'target_block_size' budget: calculate tensor + // block dimension sizes based on "square" dimension size target. + const IndexType dim_size_target = convert_index( + std::pow(static_cast(target_block_size), + 1.0f / static_cast(m_block_dimensions.rank()))); + + for (int i = 0; i < NumDims; ++i) { + // TODO(andydavis) Adjust the inner most 'block_dim_size' to make it + // a multiple of the packet size. Note that reducing + // 'block_dim_size' in this manner can increase the number of + // blocks, and so will amplify any per-block overhead. + m_block_dimensions[i] = + numext::mini(dim_size_target, m_tensor_dimensions[i]); + } + + // Add any un-allocated coefficients to inner dimension(s). + IndexType total_size = m_block_dimensions.TotalSize(); + for (int i = 0; i < NumDims; ++i) { + const int dim = isColMajor ? i : NumDims - i - 1; + + if (m_block_dimensions[dim] < m_tensor_dimensions[dim]) { + const IndexType total_size_other_dims = + total_size / m_block_dimensions[dim]; + const IndexType alloc_avail = + divup(target_block_size, total_size_other_dims); + if (alloc_avail == m_block_dimensions[dim]) { + // Insufficient excess coefficients to allocate. + break; + } + m_block_dimensions[dim] = + numext::mini(m_tensor_dimensions[dim], alloc_avail); + total_size = total_size_other_dims * m_block_dimensions[dim]; + } + } + + } else { + eigen_assert(false); // unknown block shape + } + + eigen_assert(m_block_dimensions.TotalSize() >= + numext::mini(target_block_size, + m_tensor_dimensions.TotalSize())); + + // Calculate block counts by dimension and total block count. + DSizes block_count; + for (int i = 0; i < NumDims; ++i) { + block_count[i] = divup(m_tensor_dimensions[i], m_block_dimensions[i]); + } + m_total_block_count = array_prod(block_count); + + // Calculate block strides (used for enumerating blocks). + m_tensor_strides = strides(m_tensor_dimensions); + m_block_strides = strides(block_count); + } + + DSizes m_tensor_dimensions; + TensorBlockResourceRequirements m_requirements; + + DSizes m_block_dimensions; + IndexType m_total_block_count; + + DSizes m_tensor_strides; + DSizes m_block_strides; +}; + +// -------------------------------------------------------------------------- // +// TensorBlockScratchAllocator is responsible for allocating temporary buffers +// for block evaluation (output or input block materialization). Given that +// Eigen expression traversal order is deterministic, all temporary allocations +// are happening in the same order, and usually have exactly the same size. +// Scratch allocator keeps a trace of all dynamic allocations, and after the +// first block evaluation is completed, we should be able to reuse all the +// temporary buffers for the next block evaluation. + +template +class TensorBlockScratchAllocator { + public: + explicit TensorBlockScratchAllocator(const Device& device) + : m_device(device), m_allocation_index(0) {} + + ~TensorBlockScratchAllocator() { + for (size_t i = 0; i < m_allocations.size(); ++i) { + m_device.deallocate(m_allocations[i].ptr); + } + } + + void* allocate(size_t size) { + // TODO(ezhulenev): Remove when replaced with inlined vector. + if (m_allocations.capacity() == 0) m_allocations.reserve(8); + + // Check if we already have an existing allocation att current index. + const int num_allocations = static_cast(m_allocations.size()); + const bool has_allocation = m_allocation_index < num_allocations; + + // Allocation index can't be larger than the number of allocations. + eigen_assert(m_allocation_index <= num_allocations); + + // If we have existing allocation, and its size is larger or equal to + // requested size, we do nothing. + + // If current allocation can't fit requested size, we deallocate it, and + // replace with a larger allocation. + if (has_allocation && m_allocations[m_allocation_index].size < size) { + m_device.deallocate(m_allocations[m_allocation_index].ptr); + m_allocations[m_allocation_index].ptr = m_device.allocate(size); + m_allocations[m_allocation_index].size = size; + } + + // Make a new allocation if we don't have and existing one. + if (!has_allocation) { + Allocation allocation; + allocation.ptr = m_device.allocate(size); + allocation.size = size; + m_allocations.push_back(allocation); + } + + eigen_assert(m_allocations[m_allocation_index].ptr != NULL); + eigen_assert(m_allocations[m_allocation_index].size >= size); + + return m_allocations[m_allocation_index++].ptr; + } + + void reset() { m_allocation_index = 0; } + + private: + struct Allocation { + void* ptr; + size_t size; + }; + + const Device& m_device; + int m_allocation_index; + // TODO(ezhulenev): This should be an inlined vector. + std::vector m_allocations; +}; + +// -------------------------------------------------------------------------- // +// TensorBlockKind represents all possible block kinds, that can be produced by +// TensorEvaluator::evalBlock function. +enum TensorBlockKind { + // Tensor block that is a lazy expression that must be assigned to a + // destination using TensorBlockAssign. + kExpr, + + // Tensor block that is a view into a memory buffer owned by an underlying + // Tensor expression (e.g. it can be a view into a Tensor buffer). + kView, + + // Tensor block that was materialized in a scratch memory buffer, allocated + // with TensorBlockScratchAllocator. This block must be copied to a + // destination, similar to a block of `kExpr` type. + kMaterializedInScratch, + + // Tensor block that was materialized directly into the final output memory + // buffer. For example if the left side of an assignment is a Tensor, we can + // directly materialize the block in the destination memory. + // + // If strides in the output buffer do not match tensor block strides, the + // Tensor expression will be invalid, and should not be used by + // TensorBlockAssign or for constructing another block expression. + kMaterializedInOutput +}; + +// -------------------------------------------------------------------------- // +// TensorBlockNotImplemented should be used to defined TensorBlock typedef in +// TensorEvaluators that do not support block evaluation. + +class TensorBlockNotImplemented { + public: + typedef void XprType; +}; + +// -------------------------------------------------------------------------- // +// XprScalar extracts Scalar type from the Eigen expressions (if expression type +// is not void). It's required to be able to define lazy block expression for +// argument types, that do not support block evaluation. + +template +struct XprScalar { + typedef typename XprType::Scalar type; +}; +template <> +struct XprScalar { + typedef void type; +}; + +// -------------------------------------------------------------------------- // +// TensorMaterializedBlock is a fully evaluated block of the original tensor, +// and XprType is just a TensorMap over the data. This block type is typically +// used to materialize blocks of tensor expressions, that can't be efficiently +// represented as lazy Tensor expressions with fast coeff/packet operations, +// e.g. we materialize all broadcasts into evaluated blocks. +// +// TensorMaterializedBlock does not own its memory buffer, it's either a memory +// buffer that backs the original expression (e.g. block is just a view into a +// Tensor), or a memory buffer allocated with scratch allocator, and in this +// case the scratch allocator will deallocate it at the end of block based +// expression execution. +// +// If the block was evaluated directly into the output buffer, and strides in +// the output buffer do not match block strides, the TensorMap expression will +// be invalid, and should never be used in block assignment or any other tensor +// expression. + +template +class TensorMaterializedBlock { + public: + typedef DSizes Dimensions; + typedef TensorMap > XprType; + + TensorMaterializedBlock(TensorBlockKind kind, const Scalar* data, + const Dimensions& dimensions, bool valid_expr = true) + : m_kind(kind), + m_data(data), + m_dimensions(dimensions), + m_expr(m_data, m_dimensions), + m_valid_expr(valid_expr) { + eigen_assert(m_kind == internal::TensorBlockKind::kView || + m_kind == internal::TensorBlockKind::kMaterializedInScratch || + m_kind == internal::TensorBlockKind::kMaterializedInOutput); + } + + TensorBlockKind kind() const { return m_kind; } + // NOTE(ezhulenev): Returning XprType by value like in other block types + // causes asan failures. The theory is that XprType::Nested doesn't work + // properly for TensorMap. + const XprType& expr() const { + eigen_assert(m_valid_expr); + return m_expr; + } + const Scalar* data() const { return m_data; } + void cleanup() {} + + typedef internal::TensorBlockDescriptor TensorBlockDesc; + + // TensorMaterializedBlock can be backed by different types of storage: + // + // (1) Contiguous block of memory allocated with scratch allocator. + // (2) Contiguous block of memory reused from tensor block descriptor + // destination buffer. + // (3) Strided block of memory reused from tensor block descriptor + // destination buffer. + // + class Storage { + public: + Scalar* data() const { return m_data; } + const Dimensions& dimensions() const { return m_dimensions; } + const Dimensions& strides() const { return m_strides; } + + TensorMaterializedBlock AsTensorMaterializedBlock() const { + return TensorMaterializedBlock( + m_materialized_in_output + ? internal::TensorBlockKind::kMaterializedInOutput + : internal::TensorBlockKind::kMaterializedInScratch, + m_data, m_dimensions, !m_strided_storage); + } + + private: + friend class TensorMaterializedBlock; + + Storage(Scalar* data, const Dimensions& dimensions, + const Dimensions& strides, bool materialized_in_output, + bool strided_storage) + : m_data(data), + m_dimensions(dimensions), + m_strides(strides), + m_materialized_in_output(materialized_in_output), + m_strided_storage(strided_storage) {} + + Scalar* m_data; + Dimensions m_dimensions; + Dimensions m_strides; + bool m_materialized_in_output; + bool m_strided_storage; + }; + + // Creates a storage for materialized block either from the block descriptor + // destination buffer, or allocates a new buffer with scratch allocator. + template + EIGEN_STRONG_INLINE static Storage prepareStorage( + TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool allow_strided_storage = false) { + // Try to reuse destination as an output block buffer. + typedef typename TensorBlockDesc::DestinationBuffer DestinationBuffer; + + if (desc.destination().kind() == DestinationBuffer::kContiguous) { + Scalar* buffer = desc.destination().template data(); + desc.DropDestinationBuffer(); + return Storage(buffer, desc.dimensions(), + internal::strides(desc.dimensions()), + /*materialized_in_output=*/true, + /*strided_storage=*/false); + + } else if (desc.destination().kind() == DestinationBuffer::kStrided && + allow_strided_storage) { + Scalar* buffer = desc.destination().template data(); + desc.DropDestinationBuffer(); + return Storage(buffer, desc.dimensions(), desc.destination().strides(), + /*materialized_in_output=*/true, /*strided_storage=*/true); + + } else { + void* mem = scratch.allocate(desc.size() * sizeof(Scalar)); + return Storage(static_cast(mem), desc.dimensions(), + internal::strides(desc.dimensions()), + /*materialized_in_output=*/false, + /*strided_storage=*/false); + } + } + + // Creates a materialized block for the given descriptor from a memory buffer. + template + EIGEN_STRONG_INLINE static TensorMaterializedBlock materialize( + const Scalar* data, const DataDimensions& data_dims, + TensorBlockDesc& desc, TensorBlockScratch& scratch) { + eigen_assert(array_size::value == desc.dimensions().size()); + + // If a tensor block dimensions covers a contiguous block of the underlying + // memory, we can skip block buffer memory allocation, and construct a block + // from existing `data` memory buffer. + // + // Example: (RowMajor layout) + // data_dims: [11, 12, 13, 14] + // desc.dimensions(): [1, 1, 3, 14] + // + // In this case we can construct a TensorBlock starting at + // `data + desc.offset()`, with a `desc.dimensions()` block sizes. + static const bool is_col_major = Layout == ColMajor; + + // Find out how many inner dimensions have a matching size. + int num_matching_inner_dims = 0; + for (int i = 0; i < NumDims; ++i) { + int dim = is_col_major ? i : NumDims - i - 1; + if (data_dims[dim] != desc.dimensions()[dim]) break; + ++num_matching_inner_dims; + } + + // All the outer dimensions must be of size `1`, except a single dimension + // before the matching inner dimension (`3` in the example above). + bool can_use_direct_access = true; + for (int i = num_matching_inner_dims + 1; i < NumDims; ++i) { + int dim = is_col_major ? i : NumDims - i - 1; + if (desc.dimension(dim) != 1) { + can_use_direct_access = false; + break; + } + } + + if (can_use_direct_access) { + const Scalar* block_start = data + desc.offset(); + return TensorMaterializedBlock(internal::TensorBlockKind::kView, + block_start, desc.dimensions()); + + } else { + // Reuse destination buffer or allocate new buffer with scratch allocator. + const Storage storage = prepareStorage(desc, scratch); + + typedef internal::TensorBlockIO + TensorBlockIO; + typedef typename TensorBlockIO::Dst TensorBlockIODst; + typedef typename TensorBlockIO::Src TensorBlockIOSrc; + + TensorBlockIOSrc src(internal::strides(Dimensions(data_dims)), + data, desc.offset()); + TensorBlockIODst dst(storage.dimensions(), storage.strides(), + storage.data()); + + TensorBlockIO::Copy(dst, src); + return storage.AsTensorMaterializedBlock(); + } + } + + private: + TensorBlockKind m_kind; + const Scalar* m_data; + Dimensions m_dimensions; + XprType m_expr; + bool m_valid_expr; +}; + +// -------------------------------------------------------------------------- // +// TensorCwiseUnaryBlock is a lazy tensor expression block that applies UnaryOp +// functor to the blocks produced by the underlying Tensor expression. + +template +class TensorCwiseUnaryBlock { + static const bool NoArgBlockAccess = + internal::is_void::value; + + public: + typedef typename conditional< + NoArgBlockAccess, void, + TensorCwiseUnaryOp >:: + type XprType; + + typedef typename XprScalar::type Scalar; + + TensorCwiseUnaryBlock(const ArgTensorBlock& arg_block, const UnaryOp& functor) + : m_arg_block(arg_block), m_functor(functor) {} + + TensorBlockKind kind() const { return internal::TensorBlockKind::kExpr; } + + XprType expr() const { return XprType(m_arg_block.expr(), m_functor); } + const Scalar* data() const { return NULL; } + void cleanup() { m_arg_block.cleanup(); } + + private: + ArgTensorBlock m_arg_block; + UnaryOp m_functor; +}; + +// -------------------------------------------------------------------------- // +// TensorCwiseUnaryBlock is a lazy tensor expression block that applies BinaryOp +// functor to the blocks produced by the underlying Tensor expression. + +template +class TensorCwiseBinaryBlock { + static const bool NoArgBlockAccess = + internal::is_void::value || + internal::is_void::value; + + public: + typedef typename conditional< + NoArgBlockAccess, void, + TensorCwiseBinaryOp >::type + XprType; + + typedef typename XprScalar::type Scalar; + + TensorCwiseBinaryBlock(const LhsTensorBlock& left_block, + const RhsTensorBlock& right_block, + const BinaryOp& functor) + : m_left_block(left_block), + m_right_block(right_block), + m_functor(functor) {} + + TensorBlockKind kind() const { return internal::TensorBlockKind::kExpr; } + + XprType expr() const { + return XprType(m_left_block.expr(), m_right_block.expr(), m_functor); + } + + const Scalar* data() const { return NULL; } + + void cleanup() { + m_left_block.cleanup(); + m_right_block.cleanup(); + } + + private: + LhsTensorBlock m_left_block; + RhsTensorBlock m_right_block; + BinaryOp m_functor; +}; + +// -------------------------------------------------------------------------- // +// TensorUnaryExprBlock is a lazy tensor expression block that can construct +// an arbitrary tensor expression from a block of the underlying type (this is a +// generalization of the TensorCwiseUnaryBlock for arbitrary expressions). + +template +class TensorUnaryExprBlock { + typedef typename ArgTensorBlock::XprType ArgXprType; + static const bool NoArgBlockAccess = internal::is_void::value; + + public: + typedef typename conditional< + NoArgBlockAccess, void, + typename BlockFactory::template XprType::type>::type XprType; + + typedef typename XprScalar::type Scalar; + + TensorUnaryExprBlock(const ArgTensorBlock& arg_block, + const BlockFactory& factory) + : m_arg_block(arg_block), m_factory(factory) {} + + TensorBlockKind kind() const { return internal::TensorBlockKind::kExpr; } + XprType expr() const { return m_factory.expr(m_arg_block.expr()); } + const Scalar* data() const { return NULL; } + void cleanup() { m_arg_block.cleanup(); } + + private: + ArgTensorBlock m_arg_block; + BlockFactory m_factory; +}; + +// -------------------------------------------------------------------------- // +// TensorTernaryExprBlock is a lazy tensor expression block that can construct +// an arbitrary tensor expression from three blocks of the underlying type. + +template +class TensorTernaryExprBlock { + typedef typename Arg1TensorBlock::XprType Arg1XprType; + typedef typename Arg2TensorBlock::XprType Arg2XprType; + typedef typename Arg3TensorBlock::XprType Arg3XprType; + + static const bool NoArgBlockAccess = internal::is_void::value || + internal::is_void::value || + internal::is_void::value; + + public: + typedef typename conditional< + NoArgBlockAccess, void, + typename BlockFactory::template XprType::type>::type XprType; + + typedef typename XprScalar::type Scalar; + + TensorTernaryExprBlock(const Arg1TensorBlock& arg1_block, + const Arg2TensorBlock& arg2_block, + const Arg3TensorBlock& arg3_block, + const BlockFactory& factory) + : m_arg1_block(arg1_block), + m_arg2_block(arg2_block), + m_arg3_block(arg3_block), + m_factory(factory) {} + + TensorBlockKind kind() const { return internal::TensorBlockKind::kExpr; } + XprType expr() const { + return m_factory.expr(m_arg1_block.expr(), m_arg2_block.expr(), + m_arg3_block.expr()); + } + const Scalar* data() const { return NULL; } + void cleanup() { + m_arg1_block.cleanup(); + m_arg2_block.cleanup(); + m_arg3_block.cleanup(); + } + + private: + Arg1TensorBlock m_arg1_block; + Arg2TensorBlock m_arg2_block; + Arg3TensorBlock m_arg3_block; + BlockFactory m_factory; +}; + +// -------------------------------------------------------------------------- // +// StridedLinearBufferCopy provides a method to copy data between two linear +// buffers with different strides, with optimized paths for scatter/gather. + +template +class StridedLinearBufferCopy { + typedef typename packet_traits::type Packet; + enum { + Vectorizable = packet_traits::Vectorizable, + PacketSize = packet_traits::size + }; + + public: + // Specifying linear copy kind statically gives ~30% speedup for small sizes. + enum class Kind { + Linear = 0, // src_stride == 1 && dst_stride == 1 + Scatter = 1, // src_stride == 1 && dst_stride != 1 + FillLinear = 2, // src_stride == 0 && dst_stride == 1 + FillScatter = 3, // src_stride == 0 && dst_stride != 1 + Gather = 4, // dst_stride == 1 + Random = 5 // everything else + }; + + struct Dst { + Dst(IndexType o, IndexType s, Scalar* d) : offset(o), stride(s), data(d) {} + + IndexType offset; + IndexType stride; + Scalar* data; + }; + + struct Src { + Src(IndexType o, IndexType s, const Scalar* d) + : offset(o), stride(s), data(d) {} + + IndexType offset; + IndexType stride; + const Scalar* data; + }; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(const Dst& dst, + const Src& src, + const size_t count) { + Run(count, dst.offset, dst.stride, dst.data, src.offset, src.stride, + src.data); + } + + private: + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const IndexType count, const IndexType dst_offset, + const IndexType dst_stride, Scalar* EIGEN_RESTRICT dst_data, + const IndexType src_offset, const IndexType src_stride, + const Scalar* EIGEN_RESTRICT src_data) { + const Scalar* src = &src_data[src_offset]; + Scalar* dst = &dst_data[dst_offset]; + + if (!Vectorizable) { + for (Index i = 0; i < count; ++i) { + dst[i * dst_stride] = src[i * src_stride]; + } + return; + } + + const IndexType vectorized_size = count - PacketSize; + IndexType i = 0; + + if (kind == StridedLinearBufferCopy::Kind::Linear) { + // ******************************************************************** // + // Linear copy from `src` to `dst`. + const IndexType unrolled_size = count - 4 * PacketSize; + eigen_assert(src_stride == 1 && dst_stride == 1); + for (; i <= unrolled_size; i += 4 * PacketSize) { + for (int j = 0; j < 4; ++j) { + Packet p = ploadu(src + i + j * PacketSize); + pstoreu(dst + i + j * PacketSize, p); + } + } + for (; i <= vectorized_size; i += PacketSize) { + Packet p = ploadu(src + i); + pstoreu(dst + i, p); + } + for (; i < count; ++i) { + dst[i] = src[i]; + } + // ******************************************************************** // + } else if (kind == StridedLinearBufferCopy::Kind::Scatter) { + // Scatter from `src` to `dst`. + eigen_assert(src_stride == 1 && dst_stride != 1); + for (; i <= vectorized_size; i += PacketSize) { + Packet p = ploadu(src + i); + pscatter(dst + i * dst_stride, p, dst_stride); + } + for (; i < count; ++i) { + dst[i * dst_stride] = src[i]; + } + // ******************************************************************** // + } else if (kind == StridedLinearBufferCopy::Kind::FillLinear) { + // Fill `dst` with value at `*src`. + eigen_assert(src_stride == 0 && dst_stride == 1); + const IndexType unrolled_size = count - 4 * PacketSize; + Packet p = pload1(src); + for (; i <= unrolled_size; i += 4 * PacketSize) { + for (int j = 0; j < 4; ++j) { + pstoreu(dst + i + j * PacketSize, p); + } + } + for (; i <= vectorized_size; i += PacketSize) { + pstoreu(dst + i, p); + } + for (; i < count; ++i) { + dst[i] = *src; + } + // ******************************************************************** // + } else if (kind == StridedLinearBufferCopy::Kind::FillScatter) { + // Scatter `*src` into `dst`. + eigen_assert(src_stride == 0 && dst_stride != 1); + Packet p = pload1(src); + for (; i <= vectorized_size; i += PacketSize) { + pscatter(dst + i * dst_stride, p, dst_stride); + } + for (; i < count; ++i) { + dst[i * dst_stride] = *src; + } + // ******************************************************************** // + } else if (kind == StridedLinearBufferCopy::Kind::Gather) { + // Gather from `src` into `dst`. + eigen_assert(dst_stride == 1); + for (; i <= vectorized_size; i += PacketSize) { + Packet p = pgather(src + i * src_stride, src_stride); + pstoreu(dst + i, p); + } + for (; i < count; ++i) { + dst[i] = src[i * src_stride]; + } + // ******************************************************************** // + } else if (kind == StridedLinearBufferCopy::Kind::Random) { + // Random. + for (; i < count; ++i) { + dst[i * dst_stride] = src[i * src_stride]; + } + } else { + eigen_assert(false); + } + } +}; + +// -------------------------------------------------------------------------- // +// TensorBlockIO copies data from `src` tensor block, to the `dst` tensor block. +// It's possible to specify src->dst dimension mapping for the copy operation. +// Dimensions of `dst` specify how many elements have to be copied, for the +// `src` we need to know only stride to navigate through source memory buffer. + +template +class TensorBlockIO { + static const bool IsColMajor = (Layout == ColMajor); + + typedef StridedLinearBufferCopy LinCopy; + + public: + typedef DSizes Dimensions; + typedef DSizes DimensionsMap; + + struct Dst { + Dst(const Dimensions& dst_dims, const Dimensions& dst_strides, Scalar* dst, + IndexType dst_offset = 0) + : dims(dst_dims), strides(dst_strides), data(dst), offset(dst_offset) {} + + Dimensions dims; + Dimensions strides; + Scalar* data; + IndexType offset; + }; + + struct Src { + Src(const Dimensions& src_strides, const Scalar* src, + IndexType src_offset = 0) + : strides(src_strides), data(src), offset(src_offset) {} + + Dimensions strides; + const Scalar* data; + IndexType offset; + }; + + // Copies data to `dst` from `src`, using provided dimensions mapping: + // + // src_dimension_index = dst_to_src_dim_map[dst_dimension_index] + // + // Returns the number of copied elements. + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE IndexType Copy( + const Dst& dst, const Src& src, const DimensionsMap& dst_to_src_dim_map) { + // Copy single scalar value from `src` to `dst`. + if (NumDims == 0) { + *(dst.data + dst.offset) = *(src.data + src.offset); + return 1; + } + + // Both `dst` and `src` must have contiguous innermost dimension. We also + // accept the special case with stride '0', because it's used as a trick to + // implement broadcasting. + { + int inner_dim = IsColMajor ? 0 : NumDims - 1; + EIGEN_UNUSED_VARIABLE(inner_dim); + eigen_assert(dst.strides[inner_dim] == 1 || dst.strides[inner_dim] == 0); + eigen_assert(src.strides[inner_dim] == 1 || src.strides[inner_dim] == 0); + } + + // Give a shorter name to `dst_to_src_dim_map`. + const DimensionsMap& dim_map = dst_to_src_dim_map; + + // Do not squeeze reordered inner dimensions. + int num_squeezable_dims = NumSqueezableInnerDims(dim_map); + + // NOTE: We find the innermost dimension (contiguous in memory) in the dst + // block, and we write data linearly into that dimension, reading it from + // the src. If dimensions are reordered, we might end up reading data from + // the src with `stride != 1`. + // + // NOTE: Random-Read/Linear-Write can be up to ~2X faster than + // Linear-Read/Random-Write: https://stackoverflow.com/a/54935680 + + // Find the innermost dimension in the dst whose size is not 1. This is the + // effective inner dim. + int num_size_one_inner_dims = 0; + for (int i = 0; i < num_squeezable_dims; ++i) { + const int dst_dim = IsColMajor ? i : NumDims - i - 1; + if (dst.dims[dst_dim] != 1) break; + num_size_one_inner_dims++; + } + + // If all dimensions are of size 1, just copy a scalar from `src` to `dst`. + if (num_size_one_inner_dims == NumDims) { + *(dst.data + dst.offset) = *(src.data + src.offset); + return 1; + } + + // Outermost dimension in the dst with `stride == 1` (contiguous in memory). + const int dst_stride1_dim = IsColMajor + ? num_size_one_inner_dims + : NumDims - num_size_one_inner_dims - 1; + + // Dimension in the src that corresponds to the dst innermost dimension. + const int src_dim_for_dst_stride1_dim = + NumDims == 0 ? 1 : dim_map[dst_stride1_dim]; + + // Size of the innermost dimension (length of contiguous blocks of memory). + IndexType dst_inner_dim_size = NumDims == 0 ? 1 : dst.dims[dst_stride1_dim]; + + // Squeeze multiple inner dims into one if they are contiguous in `dst` and + // `src` memory, so we can do less linear copy calls. + for (int i = num_size_one_inner_dims + 1; i < num_squeezable_dims; ++i) { + const int dst_dim = IsColMajor ? i : NumDims - i - 1; + const IndexType dst_stride = dst.strides[dst_dim]; + const IndexType src_stride = src.strides[dim_map[dst_dim]]; + if (dst_inner_dim_size == dst_stride && dst_stride == src_stride) { + dst_inner_dim_size *= dst.dims[dst_dim]; + ++num_size_one_inner_dims; + } else { + break; + } + } + + // Setup strides to read data from `src` and write to `dst`. + IndexType input_offset = src.offset; + IndexType output_offset = dst.offset; + IndexType input_stride = + NumDims == 0 ? 1 : src.strides[src_dim_for_dst_stride1_dim]; + IndexType output_stride = NumDims == 0 ? 1 : dst.strides[dst_stride1_dim]; + + const int at_least_1_dim = NumDims <= 1 ? 1 : NumDims - 1; + array it; + + // Initialize block iterator state. Squeeze away any dimension of size 1. + int idx = 0; // currently initialized iterator state index + for (int i = num_size_one_inner_dims; i < NumDims - 1; ++i) { + const int dst_dim = IsColMajor ? i + 1 : NumDims - i - 2; + if (dst.dims[dst_dim] == 1) continue; + + it[idx].size = dst.dims[dst_dim]; + it[idx].input_stride = src.strides[dim_map[dst_dim]]; + it[idx].output_stride = dst.strides[dst_dim]; + + it[idx].input_span = it[idx].input_stride * (it[idx].size - 1); + it[idx].output_span = it[idx].output_stride * (it[idx].size - 1); + + idx++; + } + + // Iterate copying data from src to dst. + const IndexType block_total_size = NumDims == 0 ? 1 : dst.dims.TotalSize(); + +#define COPY_INNER_DIM(KIND) \ + IndexType num_copied = 0; \ + for (num_copied = 0; num_copied < block_total_size; \ + num_copied += dst_inner_dim_size) { \ + LinCopy::template Run( \ + typename LinCopy::Dst(output_offset, output_stride, dst.data), \ + typename LinCopy::Src(input_offset, input_stride, src.data), \ + dst_inner_dim_size); \ + \ + for (int j = 0; j < idx; ++j) { \ + if (++it[j].count < it[j].size) { \ + input_offset += it[j].input_stride; \ + output_offset += it[j].output_stride; \ + break; \ + } \ + it[j].count = 0; \ + input_offset -= it[j].input_span; \ + output_offset -= it[j].output_span; \ + } \ + } \ + return num_copied; + + if (input_stride == 1 && output_stride == 1) { + COPY_INNER_DIM(LinCopy::Kind::Linear); + } else if (input_stride == 1 && output_stride != 1) { + COPY_INNER_DIM(LinCopy::Kind::Scatter); + } else if (input_stride == 0 && output_stride == 1) { + COPY_INNER_DIM(LinCopy::Kind::FillLinear); + } else if (input_stride == 0 && output_stride != 1) { + COPY_INNER_DIM(LinCopy::Kind::FillScatter); + } else if (output_stride == 1) { + COPY_INNER_DIM(LinCopy::Kind::Gather); + } else { + COPY_INNER_DIM(LinCopy::Kind::Random); + } + +#undef COPY_INNER_DIM + } + + // Copy from `src` to `dst` with an identity src->dst dimension map. Returns + // the number of copied elements. + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexType Copy(const Dst& dst, + const Src& src) { + DimensionsMap dst_to_src_map; + for (int i = 0; i < NumDims; ++i) dst_to_src_map[i] = i; + return Copy(dst, src, dst_to_src_map); + } + + private: + struct BlockIteratorState { + BlockIteratorState() + : size(0), + count(0), + input_stride(0), + output_stride(0), + input_span(0), + output_span(0) {} + + IndexType size; + IndexType count; + IndexType input_stride; + IndexType output_stride; + IndexType input_span; + IndexType output_span; + }; + + // Compute how many inner dimensions it's allowed to squeeze when doing IO + // between two tensor blocks. It's safe to squeeze inner dimensions, only + // if they are not reordered. + static int NumSqueezableInnerDims(const DimensionsMap& dim_map) { + int num_squeezable_dims = 0; + for (int i = 0; i < NumDims; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + if (dim_map[dim] != dim) break; + num_squeezable_dims++; + } + return num_squeezable_dims; + } +}; + +// -------------------------------------------------------------------------- // +// TensorBlockAssignment assigns a block expression of type `TensorBlockExpr` to +// a Tensor block defined by `desc`, backed by a memory buffer at `target`. +// +// Currently there is no way to write from a Tensor expression to a block of +// memory, if dimensions are reordered. If you need to do that, you should +// materialize a Tensor block expression into a memory buffer, and then use +// TensorBlockIO to copy data between two memory buffers with a custom +// `target->src` dimension map (see definition above). +// +// Also currently the innermost dimension of `target` must have a stride '1' +// (contiguous in memory). This restriction could be lifted with a `pscatter`, +// but in practice it's never needed, and there is a similar TensorBlockIO +// workaround for that. +// +// TODO(ezhulenev): TensorBlockAssignment is a special case of TensorBlockIO +// where `src` is a tensor expression. Explore if it is possible to rewrite IO +// to use expressions instead of pointers, and after that TensorBlockAssignment +// will become an alias to IO. +template +class TensorBlockAssignment { + // We will use coeff/packet path to evaluate block expressions. + typedef TensorEvaluator + TensorBlockEvaluator; + + typedef DSizes Dimensions; + + enum { + Vectorizable = packet_traits::Vectorizable, + PacketSize = packet_traits::size + }; + + template + struct InnerDimAssign { + EIGEN_ALWAYS_INLINE static void Run(Scalar* target, IndexType count, + const Evaluator& eval, + IndexType eval_offset) { + for (IndexType i = 0; i < count; ++i) { + target[i] = eval.coeff(eval_offset + i); + } + } + }; + + template + struct InnerDimAssign { + EIGEN_ALWAYS_INLINE static void Run(Scalar* target, IndexType count, + const Evaluator& eval, + IndexType eval_offset) { + typedef typename packet_traits::type Packet; + + const IndexType unrolled_size = count - 4 * PacketSize; + const IndexType vectorized_size = count - PacketSize; + IndexType i = 0; + + for (; i <= unrolled_size; i += 4 * PacketSize) { + for (int j = 0; j < 4; ++j) { + const IndexType idx = eval_offset + i + j * PacketSize; + Packet p = eval.template packet(idx); + pstoreu(target + i + j * PacketSize, p); + } + } + + for (; i <= vectorized_size; i += PacketSize) { + Packet p = eval.template packet(eval_offset + i); + pstoreu(target + i, p); + } + + for (; i < count; ++i) { + target[i] = eval.coeff(eval_offset + i); + } + } + }; + + public: + struct Target { + Target(const Dimensions& target_dims, const Dimensions& target_strides, + Scalar* target_data, IndexType target_offset = 0) + : dims(target_dims), + strides(target_strides), + data(target_data), + offset(target_offset) {} + + Dimensions dims; + Dimensions strides; + Scalar* data; + IndexType offset; + }; + + static Target target(const Dimensions& target_dims, + const Dimensions& target_strides, Scalar* target_data, + IndexType target_offset = 0) { + return Target(target_dims, target_strides, target_data, target_offset); + } + + template + static Target target( + const DSizes& target_dims, + const DSizes& target_strides, + Scalar* target_data, IndexType target_offset = 0) { + // DSizes constructor will do index type promotion if it's safe. + return Target(Dimensions(target_dims), Dimensions(target_strides), + target_data, target_offset); + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const Target& target, const TensorBlockExpr& expr) { + // Prepare evaluator for block expression. + DefaultDevice default_device; + TensorBlockEvaluator eval(expr, default_device); + + // Tensor block expression dimension should match destination dimensions. + eigen_assert(dimensions_match(target.dims, eval.dimensions())); + + static const int Layout = TensorBlockEvaluator::Layout; + static const bool is_col_major = Layout == ColMajor; + + // Initialize output inner dimension size based on a layout. + const IndexType output_size = NumDims == 0 ? 1 : target.dims.TotalSize(); + const int inner_dim_idx = is_col_major ? 0 : NumDims - 1; + IndexType output_inner_dim_size = target.dims[inner_dim_idx]; + + // Target inner dimension stride must be '1'. + eigen_assert(target.strides[inner_dim_idx] == 1); + + // Squeeze multiple inner dims into one if they are contiguous in `target`. + IndexType num_squeezed_dims = 0; + for (Index i = 1; i < NumDims; ++i) { + const Index dim = is_col_major ? i : NumDims - i - 1; + const IndexType target_stride = target.strides[dim]; + + if (output_inner_dim_size == target_stride) { + output_inner_dim_size *= target.dims[dim]; + num_squeezed_dims++; + } else { + break; + } + } + + // Initialize output block iterator state. Dimension in this array are + // always in inner_most -> outer_most order (col major layout). + array it; + + int idx = 0; // currently initialized iterator state index + for (Index i = num_squeezed_dims; i < NumDims - 1; ++i) { + const Index dim = is_col_major ? i + 1 : NumDims - i - 2; + + it[idx].count = 0; + it[idx].size = target.dims[dim]; + it[idx].output_stride = target.strides[dim]; + it[idx].output_span = it[idx].output_stride * (it[idx].size - 1); + idx++; + } + + // We read block expression from the beginning, and start writing data to + // `target` at given offset. + IndexType input_offset = 0; + IndexType output_offset = target.offset; + + // Iterate copying data from `eval` to `target`. + for (IndexType i = 0; i < output_size; i += output_inner_dim_size) { + // Assign to `target` at current offset. + InnerDimAssign::Run(target.data + output_offset, + output_inner_dim_size, eval, + input_offset); + + // Move input offset forward by the number of assigned coefficients. + input_offset += output_inner_dim_size; + + // Update index. + for (int j = 0; j < idx; ++j) { + if (++it[j].count < it[j].size) { + output_offset += it[j].output_stride; + break; + } + it[j].count = 0; + output_offset -= it[j].output_span; + } + } + } + + private: + struct BlockIteratorState { + BlockIteratorState() + : count(0), size(0), output_stride(0), output_span(0) {} + + IndexType count; + IndexType size; + IndexType output_stride; + IndexType output_span; + }; +}; + +// -------------------------------------------------------------------------- // + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h new file mode 100644 index 0000000..a354132 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h @@ -0,0 +1,1093 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H +#define EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H + +namespace Eigen { + +/** \class TensorBroadcasting + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor broadcasting class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorBroadcastingOp EIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorBroadcastingOp type; +}; + +template +struct is_input_scalar { + static const bool value = false; +}; +template <> +struct is_input_scalar > { + static const bool value = true; +}; +#ifndef EIGEN_EMULATE_CXX11_META_H +template +struct is_input_scalar > { + static const bool value = (Sizes::total_size == 1); +}; +#endif + +} // end namespace internal + + + +template +class TensorBroadcastingOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBroadcastingOp(const XprType& expr, const Broadcast& broadcast) + : m_xpr(expr), m_broadcast(broadcast) {} + + EIGEN_DEVICE_FUNC + const Broadcast& broadcast() const { return m_broadcast; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const Broadcast m_broadcast; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorBroadcastingOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename TensorEvaluator::Dimensions InputDimensions; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + protected: // all the non-static fields must have the same access control, otherwise the TensorEvaluator wont be standard layout; + bool isCopy, nByOne, oneByN; + public: + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::BlockAccess, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + // We do block based broadcasting using a trick with 2x tensor rank and 0 + // strides. See block method implementation for details. + typedef DSizes BroadcastDimensions; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + ArgTensorBlock; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : isCopy(false), nByOne(false), oneByN(false), + m_device(device), m_broadcast(op.broadcast()), m_impl(op.expression(), device) + { + + // The broadcasting op doesn't change the rank of the tensor. One can't broadcast a scalar + // and store the result in a scalar. Instead one should reshape the scalar into a a N-D + // tensor with N >= 1 of 1 element first and then broadcast. + EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + const InputDimensions& input_dims = m_impl.dimensions(); + isCopy = true; + for (int i = 0; i < NumDims; ++i) { + eigen_assert(input_dims[i] > 0); + m_dimensions[i] = input_dims[i] * m_broadcast[i]; + if (m_broadcast[i] != 1) { + isCopy = false; + } + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStrides[0] = 1; + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + } + } else { + m_inputStrides[NumDims-1] = 1; + m_outputStrides[NumDims-1] = 1; + for (int i = NumDims-2; i >= 0; --i) { + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; + } + } + + if (input_dims[0] == 1) { + oneByN = true; + for (int i = 1; i < NumDims; ++i) { + if (m_broadcast[i] != 1) { + oneByN = false; + break; + } + } + } else if (input_dims[NumDims-1] == 1) { + nByOne = true; + for (int i = 0; i < NumDims-1; ++i) { + if (m_broadcast[i] != 1) { + nByOne = false; + break; + } + } + } + + // Handle special format like NCHW, its input shape is '[1, N..., 1]' and + // broadcast shape is '[N, 1..., N]' + if (!oneByN && !nByOne) { + if (input_dims[0] == 1 && input_dims[NumDims-1] == 1 && NumDims > 2) { + nByOne = true; + oneByN = true; + for (int i = 1; i < NumDims-1; ++i) { + if (m_broadcast[i] != 1) { + nByOne = false; + oneByN = false; + break; + } + } + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const + { + if (internal::is_input_scalar::type>::value) { + return m_impl.coeff(0); + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + if (isCopy) { + return m_impl.coeff(index); + } else { + return coeffColMajor(index); + } + } else { + if (isCopy) { + return m_impl.coeff(index); + } else { + return coeffRowMajor(index); + } + } + } + + // TODO: attempt to speed this up. The integer divisions and modulo are slow + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index indexColMajor(Index index) const { + Index inputIndex = 0; + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx < m_impl.dimensions()[i]); + inputIndex += idx * m_inputStrides[i]; + } else { + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx % m_impl.dimensions()[i] == 0); + } else { + inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; + } + } + index -= idx * m_outputStrides[i]; + } + if (internal::index_statically_eq(0, 1)) { + eigen_assert(index < m_impl.dimensions()[0]); + inputIndex += index; + } else { + if (internal::index_statically_eq(0, 1)) { + eigen_assert(index % m_impl.dimensions()[0] == 0); + } else { + inputIndex += (index % m_impl.dimensions()[0]); + } + } + return inputIndex; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffColMajor(Index index) const + { + return m_impl.coeff(indexColMajor(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index indexRowMajor(Index index) const { + Index inputIndex = 0; + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i]; + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx < m_impl.dimensions()[i]); + inputIndex += idx * m_inputStrides[i]; + } else { + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx % m_impl.dimensions()[i] == 0); + } else { + inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; + } + } + index -= idx * m_outputStrides[i]; + } + if (internal::index_statically_eq(NumDims - 1, 1)) { + eigen_assert(index < m_impl.dimensions()[NumDims - 1]); + inputIndex += index; + } else { + if (internal::index_statically_eq(NumDims - 1, 1)) { + eigen_assert(index % m_impl.dimensions()[NumDims - 1] == 0); + } else { + inputIndex += (index % m_impl.dimensions()[NumDims - 1]); + } + } + return inputIndex; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffRowMajor(Index index) const + { + return m_impl.coeff(indexRowMajor(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType packet(Index index) const + { + if (internal::is_input_scalar::type>::value) { + return internal::pset1(m_impl.coeff(0)); + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + if (isCopy) { + #ifdef EIGEN_GPU_COMPILE_PHASE + // See PR 437: on NVIDIA P100 and K20m we observed a x3-4 speed up by enforcing + // unaligned loads here. The reason is unclear though. + return m_impl.template packet(index); + #else + return m_impl.template packet(index); + #endif + } else if (oneByN && !nByOne) { + return packetNByOne(index); + } else if (!oneByN && nByOne) { + return packetOneByN(index); + } else if (oneByN && nByOne) { + return packetOneByNByOne(index); + } else { + return packetColMajor(index); + } + } else { + if (isCopy) { + #ifdef EIGEN_GPU_COMPILE_PHASE + // See above. + return m_impl.template packet(index); + #else + return m_impl.template packet(index); + #endif + } else if (oneByN && !nByOne) { + return packetOneByN(index); + } else if (!oneByN && nByOne) { + return packetNByOne(index); + } else if (oneByN && nByOne) { + return packetOneByNByOne(index); + } else { + return packetRowMajor(index); + } + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetOneByNByOne + (Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + Index startDim, endDim; + Index inputIndex, outputOffset, batchedIndex; + + if (static_cast(Layout) == static_cast(ColMajor)) { + startDim = NumDims - 1; + endDim = 1; + } else { + startDim = 0; + endDim = NumDims - 2; + } + + batchedIndex = index % m_outputStrides[startDim]; + inputIndex = batchedIndex / m_outputStrides[endDim]; + outputOffset = batchedIndex % m_outputStrides[endDim]; + + if (outputOffset + PacketSize <= m_outputStrides[endDim]) { + values[0] = m_impl.coeff(inputIndex); + return internal::pload1(values); + } else { + EIGEN_UNROLL_LOOP + for (int i = 0, cur = 0; i < PacketSize; ++i, ++cur) { + if (outputOffset + cur < m_outputStrides[endDim]) { + values[i] = m_impl.coeff(inputIndex); + } else { + ++inputIndex; + inputIndex = (inputIndex == m_inputStrides[startDim] ? 0 : inputIndex); + values[i] = m_impl.coeff(inputIndex); + outputOffset = 0; + cur = 0; + } + } + return internal::pload(values); + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetOneByN(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + Index dim, inputIndex; + + if (static_cast(Layout) == static_cast(ColMajor)) { + dim = NumDims - 1; + } else { + dim = 0; + } + + inputIndex = index % m_inputStrides[dim]; + if (inputIndex + PacketSize <= m_inputStrides[dim]) { + return m_impl.template packet(inputIndex); + } else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + if (inputIndex > m_inputStrides[dim]-1) { + inputIndex = 0; + } + values[i] = m_impl.coeff(inputIndex++); + } + return internal::pload(values); + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetNByOne(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + Index dim, inputIndex, outputOffset; + + if (static_cast(Layout) == static_cast(ColMajor)) { + dim = 1; + } else { + dim = NumDims - 2; + } + + inputIndex = index / m_outputStrides[dim]; + outputOffset = index % m_outputStrides[dim]; + if (outputOffset + PacketSize <= m_outputStrides[dim]) { + values[0] = m_impl.coeff(inputIndex); + return internal::pload1(values); + } else { + EIGEN_UNROLL_LOOP + for (int i = 0, cur = 0; i < PacketSize; ++i, ++cur) { + if (outputOffset + cur < m_outputStrides[dim]) { + values[i] = m_impl.coeff(inputIndex); + } else { + values[i] = m_impl.coeff(++inputIndex); + outputOffset = 0; + cur = 0; + } + } + return internal::pload(values); + } + } + + // Ignore the LoadMode and always use unaligned loads since we can't guarantee + // the alignment at compile time. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + const Index originalIndex = index; + + Index inputIndex = 0; + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx < m_impl.dimensions()[i]); + inputIndex += idx * m_inputStrides[i]; + } else { + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx % m_impl.dimensions()[i] == 0); + } else { + inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; + } + } + index -= idx * m_outputStrides[i]; + } + Index innermostLoc; + if (internal::index_statically_eq(0, 1)) { + eigen_assert(index < m_impl.dimensions()[0]); + innermostLoc = index; + } else { + if (internal::index_statically_eq(0, 1)) { + eigen_assert(index % m_impl.dimensions()[0] == 0); + innermostLoc = 0; + } else { + innermostLoc = index % m_impl.dimensions()[0]; + } + } + inputIndex += innermostLoc; + + // Todo: this could be extended to the second dimension if we're not + // broadcasting alongside the first dimension, and so on. + if (innermostLoc + PacketSize <= m_impl.dimensions()[0]) { + return m_impl.template packet(inputIndex); + } else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + values[0] = m_impl.coeff(inputIndex); + EIGEN_UNROLL_LOOP + for (int i = 1; i < PacketSize; ++i) { + if (innermostLoc + i < m_impl.dimensions()[0]) { + values[i] = m_impl.coeff(inputIndex+i); + } else { + values[i] = coeffColMajor(originalIndex+i); + } + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + const Index originalIndex = index; + + Index inputIndex = 0; + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i]; + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx < m_impl.dimensions()[i]); + inputIndex += idx * m_inputStrides[i]; + } else { + if (internal::index_statically_eq(i, 1)) { + eigen_assert(idx % m_impl.dimensions()[i] == 0); + } else { + inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; + } + } + index -= idx * m_outputStrides[i]; + } + Index innermostLoc; + if (internal::index_statically_eq(NumDims-1, 1)) { + eigen_assert(index < m_impl.dimensions()[NumDims-1]); + innermostLoc = index; + } else { + if (internal::index_statically_eq(NumDims-1, 1)) { + eigen_assert(index % m_impl.dimensions()[NumDims-1] == 0); + innermostLoc = 0; + } else { + innermostLoc = index % m_impl.dimensions()[NumDims-1]; + } + } + inputIndex += innermostLoc; + + // Todo: this could be extended to the second dimension if we're not + // broadcasting alongside the first dimension, and so on. + if (innermostLoc + PacketSize <= m_impl.dimensions()[NumDims-1]) { + return m_impl.template packet(inputIndex); + } else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + values[0] = m_impl.coeff(inputIndex); + EIGEN_UNROLL_LOOP + for (int i = 1; i < PacketSize; ++i) { + if (innermostLoc + i < m_impl.dimensions()[NumDims-1]) { + values[i] = m_impl.coeff(inputIndex+i); + } else { + values[i] = coeffRowMajor(originalIndex+i); + } + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + double compute_cost = TensorOpCost::AddCost(); + if (!isCopy && NumDims > 0) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + compute_cost += TensorOpCost::DivCost(); + if (internal::index_statically_eq(i, 1)) { + compute_cost += + TensorOpCost::MulCost() + TensorOpCost::AddCost(); + } else { + if (!internal::index_statically_eq(i, 1)) { + compute_cost += TensorOpCost::MulCost() + + TensorOpCost::ModCost() + + TensorOpCost::AddCost(); + } + } + compute_cost += + TensorOpCost::MulCost() + TensorOpCost::AddCost(); + } + } + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + // TODO(wuke): Targeting L1 size is 30% faster than targeting L{-1} on large + // tensors. But this might need further tuning. + const size_t target_size = m_device.firstLevelCacheSize(); + return internal::TensorBlockResourceRequirements::merge( + m_impl.getResourceRequirements(), + internal::TensorBlockResourceRequirements::skewed(target_size)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + BlockBroadcastingParams params = blockBroadcastingParams(desc); + + if (params.inner_dim_size == 0 || params.bcast_dim_size == 0) { + return emptyBlock(); + } + + // Prepare storage for the materialized broadcasting result. + const typename TensorBlock::Storage block_storage = + TensorBlock::prepareStorage(desc, scratch); + ScalarNoConst* materialized_output = block_storage.data(); + + // We potentially will need to materialize input blocks. + size_t materialized_input_size = 0; + ScalarNoConst* materialized_input = NULL; + + // Initialize block broadcating iterator state for outer dimensions (outer + // with regard to bcast dimension). Dimension in this array are always in + // inner_most -> outer_most order (col major layout). + array it; + int idx = 0; + + for (int i = params.inner_dim_count + 1; i < NumDims; ++i) { + const Index dim = IsColMajor ? i : NumDims - 1 - i; + it[idx].size = params.output_dims[dim]; + it[idx].count = 0; + it[idx].output_stride = m_outputStrides[dim]; + it[idx].output_span = it[idx].output_stride * (it[idx].size - 1); + idx++; + } + + // Write output into the beginning of `materialized_output`. + Index output_offset = 0; + + // We will fill output block by broadcasting along the bcast dim, and + // iterating over outer dimension. + const Index output_size = NumDims == 0 ? 1 : params.output_dims.TotalSize(); + + for (Index num_output_coeffs = 0; num_output_coeffs < output_size;) { + ScalarNoConst* bcast_output = materialized_output + num_output_coeffs; + Index bcast_offset = desc.offset() + output_offset; + + // Broadcast along the bcast dimension. + num_output_coeffs += BroadcastBlockAlongBcastDim( + params, bcast_offset, scratch, bcast_output, &materialized_input, + &materialized_input_size); + + // Switch to the next outer dimension. + for (int j = 0; j < idx; ++j) { + if (++it[j].count < it[j].size) { + output_offset += it[j].output_stride; + break; + } + it[j].count = 0; + output_offset -= it[j].output_span; + } + } + + return block_storage.AsTensorMaterializedBlock(); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + const TensorEvaluator& impl() const { return m_impl; } + + Broadcast functor() const { return m_broadcast; } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind( + cl::sycl::handler& cgh) const { + m_impl.bind(cgh); + } +#endif + private: + static const bool IsColMajor = + static_cast(Layout) == static_cast(ColMajor); + + // We will build a general case block broadcasting on top of broadcasting + // primitive that will do broadcasting only for the inner dimension(s) along + // the first dimension smaller than the input size (it's called `bcast_dim`). + // + // Example: + // dim: 0 1 2 (ColMajor) + // input size: [9, 3, 6] + // block size: [9, 2, 6] + // + // We will compute broadcasted block by iterating over the outer dimensions + // before `bcast_dim` (only dimension `2` in this example) and computing + // broadcasts along the `bcast_dim` (dimension `1` in this example). + + // BlockBroadcastingParams holds precomputed parameters for broadcasting a + // single block along the broadcasting dimension. Sizes and strides along the + // `bcast_dim` might be invalid, they will be adjusted later in + // `BroadcastBlockAlongBcastDim`. + struct BlockBroadcastingParams { + Dimensions input_dims; // input expression dimensions + Dimensions output_dims; // output block sizes + Dimensions output_strides; // output block strides + + int inner_dim_count; // count inner dimensions matching in size + int bcast_dim; // broadcasting dimension index + Index bcast_dim_size; // broadcasting dimension size + Index inner_dim_size; // inner dimensions size + + // Block sizes and strides for the input block where all dimensions before + // `bcast_dim` are equal to `1`. + Dimensions input_block_sizes; + Dimensions input_block_strides; + + // Block sizes and strides for blocks with extra dimensions and strides `0`. + BroadcastDimensions bcast_block_sizes; + BroadcastDimensions bcast_block_strides; + BroadcastDimensions bcast_input_strides; + }; + + struct BlockBroadcastingIteratorState { + Index size; + Index count; + Index output_stride; + Index output_span; + }; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlockBroadcastingParams + blockBroadcastingParams(TensorBlockDesc& desc) const { + BlockBroadcastingParams params; + + params.input_dims = Dimensions(m_impl.dimensions()); + + // Output block sizes and strides. + params.output_dims = desc.dimensions(); + params.output_strides = internal::strides(params.output_dims); + + // Find the broadcasting dimension (first dimension with output size smaller + // that the input size). + params.bcast_dim = 0; + params.bcast_dim_size = 1; + params.inner_dim_size = 1; + + // Count the number of inner dimensions that have the same size in the block + // and in the broadcast expression. + params.inner_dim_count = 0; + + for (int i = 0; i < NumDims; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + + if (params.output_dims[dim] == m_dimensions[dim]) { + params.inner_dim_size *= params.output_dims[dim]; + ++params.inner_dim_count; + continue; + } + + // First non-matching dimension is the broadcasting dimension. + eigen_assert(params.output_dims[dim] < m_dimensions[dim]); + params.bcast_dim = dim; + params.bcast_dim_size = params.output_dims[dim]; + break; + } + + // Calculate the input block size for looking into the input. + for (int i = 0; i < params.inner_dim_count; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + params.input_block_sizes[dim] = params.input_dims[dim]; + } + for (int i = params.inner_dim_count; i < NumDims; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + params.input_block_sizes[dim] = 1; + } + params.input_block_strides = + internal::strides(params.input_block_sizes); + + // Broadcast with the 0-stride trick: Create 1 extra dim for each + // broadcast, set the input stride to 0. + // + // When ColMajor: + // + // - bcast_block_sizes: + // [d_0, b_0, d_1, b_1, ...] + // + // - bcast_block_strides: + // [output_block_strides[0], output_block_strides[0] * d_0, + // output_block_strides[1], output_block_strides[1] * d_1, + // ...] + // + // - bcast_input_strides: + // [input_block_strides[0], 0, + // input_block_strides[1], 0, + // ...]. + // + for (int i = 0; i < params.inner_dim_count; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + + const int copy_dim = IsColMajor ? 2 * i : 2 * NumDims - 2 * i - 1; + const int broadcast_dim = IsColMajor ? copy_dim + 1 : copy_dim - 1; + + params.bcast_block_sizes[copy_dim] = params.input_dims[dim]; + params.bcast_block_sizes[broadcast_dim] = m_broadcast[dim]; + params.bcast_block_strides[copy_dim] = params.output_strides[dim]; + params.bcast_block_strides[broadcast_dim] = + params.output_strides[dim] * params.input_dims[dim]; + params.bcast_input_strides[copy_dim] = params.input_block_strides[dim]; + params.bcast_input_strides[broadcast_dim] = 0; + } + + for (int i = 2 * params.inner_dim_count; i < 2 * NumDims; ++i) { + const int dim = IsColMajor ? i : 2 * NumDims - i - 1; + params.bcast_block_sizes[dim] = 1; + params.bcast_block_strides[dim] = 0; + params.bcast_input_strides[dim] = 0; + } + + return params; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock emptyBlock() const { + DSizes dimensions; + for (int i = 0; i < NumDims; ++i) dimensions[i] = 0; + return TensorBlock(internal::TensorBlockKind::kView, NULL, dimensions); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index BroadcastBlockAlongBcastDim( + BlockBroadcastingParams params, Index bcast_offset, + TensorBlockScratch& scratch, ScalarNoConst* materialized_output, + ScalarNoConst** materialized_input, + size_t* materialized_input_size) const { + if (params.bcast_dim_size == 1) { + // We just need one block read using the ready-set values above. + return BroadcastBlock( + params.input_block_sizes, params.input_block_strides, + params.bcast_block_sizes, params.bcast_block_strides, + params.bcast_input_strides, bcast_offset, 0, scratch, + materialized_output, materialized_input, materialized_input_size); + + } else if (params.input_dims[params.bcast_dim] == 1) { + // Broadcast bcast dimension (< NumDims) by bcast_dim_size. + const int broadcast_bcast_dim = + IsColMajor ? 2 * params.inner_dim_count + 1 + : 2 * NumDims - 2 * params.inner_dim_count - 2; + + params.bcast_block_sizes[broadcast_bcast_dim] = params.bcast_dim_size; + params.bcast_input_strides[broadcast_bcast_dim] = 0; + params.bcast_block_strides[broadcast_bcast_dim] = + params.output_strides[params.bcast_dim]; + + return BroadcastBlock( + params.input_block_sizes, params.input_block_strides, + params.bcast_block_sizes, params.bcast_block_strides, + params.bcast_input_strides, bcast_offset, 0, scratch, + materialized_output, materialized_input, materialized_input_size); + + } else { + // Keep track of the total number of the coefficients written to the + // output block. + Index num_output_coeffs = 0; + + // The general case. Let's denote the output block as + // + // x[..., a:a+bcast_dim_size, :, ..., :] + // + // where a:a+bcast_dim_size is a slice on the bcast_dim dimension + // (< NumDims). We need to split the a:a+bcast_dim_size into possibly 3 + // sub-blocks: + // + // (1) a:b, where b is the smallest multiple of + // input_dims[bcast_dim_start] in [a, a+bcast_dim_size]. + // + // (2) b:c, where c is the largest multiple of input_dims[bcast_dim_start] + // in [a, a+bcast_dim_size]. + // + // (3) c:a+bcast_dim_size . + // + // Or, when b and c do not exist, we just need to process the whole block + // together. + + // Find a. + const Index bcast_dim_left_index = + bcast_offset / m_outputStrides[params.bcast_dim]; + + // Find b and c. + const Index input_bcast_dim_size = params.input_dims[params.bcast_dim]; + + // First multiple after a. This is b when <= bcast_dim_left_index + + // bcast_dim_size. + const Index first_multiple = + divup(bcast_dim_left_index, input_bcast_dim_size) * + input_bcast_dim_size; + + if (first_multiple <= bcast_dim_left_index + params.bcast_dim_size) { + // b exists, so does c. Find it. + const Index last_multiple = + (bcast_dim_left_index + params.bcast_dim_size) / + input_bcast_dim_size * input_bcast_dim_size; + const int copy_bcast_dim = + IsColMajor ? 2 * params.inner_dim_count + : 2 * NumDims - 2 * params.inner_dim_count - 1; + const int broadcast_bcast_dim = + IsColMajor ? 2 * params.inner_dim_count + 1 + : 2 * NumDims - 2 * params.inner_dim_count - 2; + + if (first_multiple > bcast_dim_left_index) { + const Index head_size = first_multiple - bcast_dim_left_index; + params.input_block_sizes[params.bcast_dim] = head_size; + params.bcast_block_sizes[copy_bcast_dim] = head_size; + params.bcast_input_strides[copy_bcast_dim] = + params.input_block_strides[params.bcast_dim]; + params.bcast_block_strides[copy_bcast_dim] = + params.output_strides[params.bcast_dim]; + params.bcast_block_sizes[broadcast_bcast_dim] = 1; + params.bcast_input_strides[broadcast_bcast_dim] = 0; + params.bcast_block_strides[broadcast_bcast_dim] = + params.output_strides[params.bcast_dim] * + params.input_dims[params.bcast_dim]; + + num_output_coeffs += BroadcastBlock( + params.input_block_sizes, params.input_block_strides, + params.bcast_block_sizes, params.bcast_block_strides, + params.bcast_input_strides, bcast_offset, 0, scratch, + materialized_output, materialized_input, materialized_input_size); + } + if (first_multiple < last_multiple) { + params.input_block_sizes[params.bcast_dim] = input_bcast_dim_size; + params.bcast_block_sizes[copy_bcast_dim] = input_bcast_dim_size; + params.bcast_input_strides[copy_bcast_dim] = + params.input_block_strides[params.bcast_dim]; + params.bcast_block_strides[copy_bcast_dim] = + params.output_strides[params.bcast_dim]; + params.bcast_block_sizes[broadcast_bcast_dim] = + (last_multiple - first_multiple) / input_bcast_dim_size; + params.bcast_input_strides[broadcast_bcast_dim] = 0; + params.bcast_block_strides[broadcast_bcast_dim] = + params.output_strides[params.bcast_dim] * + params.input_dims[params.bcast_dim]; + const Index offset = (first_multiple - bcast_dim_left_index) * + m_outputStrides[params.bcast_dim]; + + num_output_coeffs += BroadcastBlock( + params.input_block_sizes, params.input_block_strides, + params.bcast_block_sizes, params.bcast_block_strides, + params.bcast_input_strides, bcast_offset, offset, scratch, + materialized_output, materialized_input, materialized_input_size); + } + if (last_multiple < bcast_dim_left_index + params.bcast_dim_size) { + const Index tail_size = + bcast_dim_left_index + params.bcast_dim_size - last_multiple; + params.input_block_sizes[params.bcast_dim] = tail_size; + params.bcast_block_sizes[copy_bcast_dim] = tail_size; + params.bcast_input_strides[copy_bcast_dim] = + params.input_block_strides[params.bcast_dim]; + params.bcast_block_strides[copy_bcast_dim] = + params.output_strides[params.bcast_dim]; + params.bcast_block_sizes[broadcast_bcast_dim] = 1; + params.bcast_input_strides[broadcast_bcast_dim] = 0; + params.bcast_block_strides[broadcast_bcast_dim] = + params.output_strides[params.bcast_dim] * + params.input_dims[params.bcast_dim]; + const Index offset = (last_multiple - bcast_dim_left_index) * + m_outputStrides[params.bcast_dim]; + + num_output_coeffs += BroadcastBlock( + params.input_block_sizes, params.input_block_strides, + params.bcast_block_sizes, params.bcast_block_strides, + params.bcast_input_strides, bcast_offset, offset, scratch, + materialized_output, materialized_input, materialized_input_size); + } + } else { + // b and c do not exist. + const int copy_bcast_dim = + IsColMajor ? 2 * params.inner_dim_count + : 2 * NumDims - 2 * params.inner_dim_count - 1; + params.input_block_sizes[params.bcast_dim] = params.bcast_dim_size; + params.bcast_block_sizes[copy_bcast_dim] = params.bcast_dim_size; + params.bcast_input_strides[copy_bcast_dim] = + params.input_block_strides[params.bcast_dim]; + params.bcast_block_strides[copy_bcast_dim] = + params.output_strides[params.bcast_dim]; + + num_output_coeffs += BroadcastBlock( + params.input_block_sizes, params.input_block_strides, + params.bcast_block_sizes, params.bcast_block_strides, + params.bcast_input_strides, bcast_offset, 0, scratch, + materialized_output, materialized_input, materialized_input_size); + } + + return num_output_coeffs; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index BroadcastBlock( + const Dimensions& input_block_sizes, + const Dimensions& input_block_strides, + const BroadcastDimensions& bcast_block_sizes, + const BroadcastDimensions& bcast_block_strides, + const BroadcastDimensions& bcast_input_strides, Index bcast_offset, + Index offset, TensorBlockScratch& scratch, + ScalarNoConst* materialized_output, ScalarNoConst** materialized_input, + size_t* materialized_input_size) const { + // ---------------------------------------------------------------------- // + // Tensor block descriptor for reading block from the input. + const Index input_offset = bcast_offset + offset; + TensorBlockDesc input_desc( + IsColMajor ? indexColMajor(input_offset) : indexRowMajor(input_offset), + input_block_sizes); + + ArgTensorBlock input_block = m_impl.block(input_desc, scratch); + + // ---------------------------------------------------------------------- // + // Materialize input block into a temporary memory buffer only if it's not + // already available in the arg block. + const ScalarNoConst* input_buffer = NULL; + + if (input_block.data() != NULL) { + // Input block already has raw data, there is no need to materialize it. + input_buffer = input_block.data(); + + } else { + // Otherwise we have to do block assignment into a temporary buffer. + + // Maybe reuse previously allocated buffer, or allocate a new one with a + // scratch allocator. + const size_t input_total_size = input_block_sizes.TotalSize(); + if (*materialized_input == NULL || + *materialized_input_size < input_total_size) { + *materialized_input_size = input_total_size; + void* mem = scratch.allocate(*materialized_input_size * sizeof(Scalar)); + *materialized_input = static_cast(mem); + } + + typedef internal::TensorBlockAssignment< + ScalarNoConst, NumDims, typename ArgTensorBlock::XprType, Index> + TensorBlockAssignment; + + TensorBlockAssignment::Run( + TensorBlockAssignment::target(input_block_sizes, input_block_strides, + *materialized_input), + input_block.expr()); + + input_buffer = *materialized_input; + } + + // ---------------------------------------------------------------------- // + // Copy data from materialized input block to the materialized output, using + // given broadcast strides (strides with zeroes). + typedef internal::TensorBlockIO + TensorBlockIO; + + typename TensorBlockIO::Src src(bcast_input_strides, input_buffer); + typename TensorBlockIO::Dst dst(bcast_block_sizes, bcast_block_strides, + materialized_output + offset); + + return TensorBlockIO::Copy(dst, src); + } + +protected: + const Device EIGEN_DEVICE_REF m_device; + const typename internal::remove_reference::type m_broadcast; + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + TensorEvaluator m_impl; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_BROADCASTING_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h new file mode 100644 index 0000000..3764573 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h @@ -0,0 +1,518 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H +#define EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H + +namespace Eigen { + +/** \class TensorKChippingReshaping + * \ingroup CXX11_Tensor_Module + * + * \brief A chip is a thin slice, corresponding to a column or a row in a 2-d tensor. + * + * + */ + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions - 1; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorChippingOp EIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorChippingOp type; +}; + +template +struct DimensionId +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) { + EIGEN_UNUSED_VARIABLE(dim); + eigen_assert(dim == DimId); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const { + return DimId; + } +}; +template <> +struct DimensionId +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DimensionId(DenseIndex dim) : actual_dim(dim) { + eigen_assert(dim >= 0); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const { + return actual_dim; + } + private: + const DenseIndex actual_dim; +}; + + +} // end namespace internal + + + +template +class TensorChippingOp : public TensorBase > +{ + public: + typedef TensorBase > Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp(const XprType& expr, const Index offset, const Index dim) + : m_xpr(expr), m_offset(offset), m_dim(dim) { + } + + EIGEN_DEVICE_FUNC + const Index offset() const { return m_offset; } + EIGEN_DEVICE_FUNC + const Index dim() const { return m_dim.actualDim(); } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorChippingOp) + + protected: + typename XprType::Nested m_xpr; + const Index m_offset; + const internal::DimensionId m_dim; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorChippingOp XprType; + static const int NumInputDims = internal::array_size::Dimensions>::value; + static const int NumDims = NumInputDims-1; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + // Alignment can't be guaranteed at compile time since it depends on the + // slice offsets. + IsAligned = false, + Layout = TensorEvaluator::Layout, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::BlockAccess, + // Chipping of outer-most dimension is a trivial operation, because we can + // read and write directly from the underlying tensor using single offset. + IsOuterChipping = (static_cast(Layout) == ColMajor && DimId == NumInputDims - 1) || + (static_cast(Layout) == RowMajor && DimId == 0), + // Chipping inner-most dimension. + IsInnerChipping = (static_cast(Layout) == ColMajor && DimId == 0) || + (static_cast(Layout) == RowMajor && DimId == NumInputDims - 1), + // Prefer block access if the underlying expression prefers it, otherwise + // only if chipping is not trivial. + PreferBlockAccess = TensorEvaluator::PreferBlockAccess || + !IsOuterChipping, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef internal::TensorBlockDescriptor + ArgTensorBlockDesc; + typedef typename TensorEvaluator::TensorBlock + ArgTensorBlock; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_dim(op.dim()), m_device(device) + { + EIGEN_STATIC_ASSERT((NumInputDims >= 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(NumInputDims > m_dim.actualDim()); + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + eigen_assert(op.offset() < input_dims[m_dim.actualDim()]); + + int j = 0; + for (int i = 0; i < NumInputDims; ++i) { + if (i != m_dim.actualDim()) { + m_dimensions[j] = input_dims[i]; + ++j; + } + } + + m_stride = 1; + m_inputStride = 1; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < m_dim.actualDim(); ++i) { + m_stride *= input_dims[i]; + m_inputStride *= input_dims[i]; + } + } else { + for (int i = NumInputDims-1; i > m_dim.actualDim(); --i) { + m_stride *= input_dims[i]; + m_inputStride *= input_dims[i]; + } + } + m_inputStride *= input_dims[m_dim.actualDim()]; + m_inputOffset = m_stride * op.offset(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_impl.coeff(srcCoeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + if (isInnerChipping()) { + // m_stride is equal to 1, so let's avoid the integer division. + eigen_assert(m_stride == 1); + Index inputIndex = index * m_inputStride + m_inputOffset; + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = m_impl.coeff(inputIndex); + inputIndex += m_inputStride; + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } else if (isOuterChipping()) { + // m_stride is always greater than index, so let's avoid the integer division. + eigen_assert(m_stride > index); + return m_impl.template packet(index + m_inputOffset); + } else { + const Index idx = index / m_stride; + const Index rem = index - idx * m_stride; + if (rem + PacketSize <= m_stride) { + Index inputIndex = idx * m_inputStride + m_inputOffset + rem; + return m_impl.template packet(inputIndex); + } else { + // Cross the stride boundary. Fallback to slow path. + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index); + ++index; + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + double cost = 0; + if ((static_cast(Layout) == static_cast(ColMajor) && + m_dim.actualDim() == 0) || + (static_cast(Layout) == static_cast(RowMajor) && + m_dim.actualDim() == NumInputDims - 1)) { + cost += TensorOpCost::MulCost() + TensorOpCost::AddCost(); + } else if ((static_cast(Layout) == static_cast(ColMajor) && + m_dim.actualDim() == NumInputDims - 1) || + (static_cast(Layout) == static_cast(RowMajor) && + m_dim.actualDim() == 0)) { + cost += TensorOpCost::AddCost(); + } else { + cost += 3 * TensorOpCost::MulCost() + TensorOpCost::DivCost() + + 3 * TensorOpCost::AddCost(); + } + + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + const size_t target_size = m_device.lastLevelCacheSize(); + return internal::TensorBlockResourceRequirements::merge( + internal::TensorBlockResourceRequirements::skewed(target_size), + m_impl.getResourceRequirements()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool root_of_expr_ast = false) const { + const Index chip_dim = m_dim.actualDim(); + + DSizes input_block_dims; + for (int i = 0; i < NumInputDims; ++i) { + input_block_dims[i] + = i < chip_dim ? desc.dimension(i) + : i > chip_dim ? desc.dimension(i - 1) + : 1; + } + + ArgTensorBlockDesc arg_desc(srcCoeff(desc.offset()), input_block_dims); + + // Try to reuse destination buffer for materializing argument block. + if (desc.HasDestinationBuffer()) { + DSizes arg_destination_strides; + for (int i = 0; i < NumInputDims; ++i) { + arg_destination_strides[i] + = i < chip_dim ? desc.destination().strides()[i] + : i > chip_dim ? desc.destination().strides()[i - 1] + : 0; // for dimensions of size `1` stride should never be used. + } + + arg_desc.template AddDestinationBuffer( + desc.destination().template data(), + arg_destination_strides); + } + + ArgTensorBlock arg_block = m_impl.block(arg_desc, scratch, root_of_expr_ast); + if (!arg_desc.HasDestinationBuffer()) desc.DropDestinationBuffer(); + + if (arg_block.data() != NULL) { + // Forward argument block buffer if possible. + return TensorBlock(arg_block.kind(), arg_block.data(), + desc.dimensions()); + + } else { + // Assign argument block expression to a buffer. + + // Prepare storage for the materialized chipping result. + const typename TensorBlock::Storage block_storage = + TensorBlock::prepareStorage(desc, scratch); + + typedef internal::TensorBlockAssignment< + ScalarNoConst, NumInputDims, typename ArgTensorBlock::XprType, Index> + TensorBlockAssignment; + + TensorBlockAssignment::Run( + TensorBlockAssignment::target( + arg_desc.dimensions(), + internal::strides(arg_desc.dimensions()), + block_storage.data()), + arg_block.expr()); + + return block_storage.AsTensorMaterializedBlock(); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Storage::Type data() const { + typename Storage::Type result = constCast(m_impl.data()); + if (isOuterChipping() && result) { + return result + m_inputOffset; + } else { + return NULL; + } + } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const + { + Index inputIndex; + if (isInnerChipping()) { + // m_stride is equal to 1, so let's avoid the integer division. + eigen_assert(m_stride == 1); + inputIndex = index * m_inputStride + m_inputOffset; + } else if (isOuterChipping()) { + // m_stride is always greater than index, so let's avoid the integer + // division. + eigen_assert(m_stride > index); + inputIndex = index + m_inputOffset; + } else { + const Index idx = index / m_stride; + inputIndex = idx * m_inputStride + m_inputOffset; + index -= idx * m_stride; + inputIndex += index; + } + return inputIndex; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool isInnerChipping() const { + return IsInnerChipping || + (static_cast(Layout) == ColMajor && m_dim.actualDim() == 0) || + (static_cast(Layout) == RowMajor && m_dim.actualDim() == NumInputDims - 1); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool isOuterChipping() const { + return IsOuterChipping || + (static_cast(Layout) == ColMajor && m_dim.actualDim() == NumInputDims-1) || + (static_cast(Layout) == RowMajor && m_dim.actualDim() == 0); + } + + Dimensions m_dimensions; + Index m_stride; + Index m_inputOffset; + Index m_inputStride; + TensorEvaluator m_impl; + const internal::DimensionId m_dim; + const Device EIGEN_DEVICE_REF m_device; +}; + + +// Eval as lvalue +template +struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorChippingOp XprType; + static const int NumInputDims = internal::array_size::Dimensions>::value; + static const int NumDims = NumInputDims-1; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::RawAccess, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + return this->m_impl.coeffRef(this->srcCoeff(index)); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + + if (this->isInnerChipping()) { + // m_stride is equal to 1, so let's avoid the integer division. + eigen_assert(this->m_stride == 1); + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + internal::pstore(values, x); + Index inputIndex = index * this->m_inputStride + this->m_inputOffset; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + this->m_impl.coeffRef(inputIndex) = values[i]; + inputIndex += this->m_inputStride; + } + } else if (this->isOuterChipping()) { + // m_stride is always greater than index, so let's avoid the integer division. + eigen_assert(this->m_stride > index); + this->m_impl.template writePacket(index + this->m_inputOffset, x); + } else { + const Index idx = index / this->m_stride; + const Index rem = index - idx * this->m_stride; + if (rem + PacketSize <= this->m_stride) { + const Index inputIndex = idx * this->m_inputStride + this->m_inputOffset + rem; + this->m_impl.template writePacket(inputIndex, x); + } else { + // Cross stride boundary. Fallback to slow path. + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + internal::pstore(values, x); + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + this->coeffRef(index) = values[i]; + ++index; + } + } + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock( + const TensorBlockDesc& desc, const TensorBlock& block) { + assert(this->m_impl.data() != NULL); + + const Index chip_dim = this->m_dim.actualDim(); + + DSizes input_block_dims; + for (int i = 0; i < NumInputDims; ++i) { + input_block_dims[i] = i < chip_dim ? desc.dimension(i) + : i > chip_dim ? desc.dimension(i - 1) + : 1; + } + + typedef TensorReshapingOp, + const typename TensorBlock::XprType> + TensorBlockExpr; + + typedef internal::TensorBlockAssignment + TensorBlockAssign; + + TensorBlockAssign::Run( + TensorBlockAssign::target( + input_block_dims, + internal::strides(this->m_impl.dimensions()), + this->m_impl.data(), this->srcCoeff(desc.offset())), + block.expr().reshape(input_block_dims)); + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CHIPPING_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h new file mode 100644 index 0000000..5235a8e --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h @@ -0,0 +1,377 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H + +namespace Eigen { + +/** \class TensorConcatenationOp + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor concatenation class. + * + * + */ +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename promote_storage_type::ret Scalar; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; + enum { Flags = 0 }; + typedef typename conditional::val, + typename traits::PointerType, typename traits::PointerType>::type PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorConcatenationOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorConcatenationOp type; +}; + +} // end namespace internal + + +template +class TensorConcatenationOp : public TensorBase, WriteAccessors> +{ + public: + typedef TensorBase, WriteAccessors> Base; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::nested::type Nested; + typedef typename internal::promote_storage_type::ret CoeffReturnType; + typedef typename NumTraits::Real RealScalar; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConcatenationOp(const LhsXprType& lhs, const RhsXprType& rhs, Axis axis) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_axis(axis) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + lhsExpression() const { return m_lhs_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + EIGEN_DEVICE_FUNC const Axis& axis() const { return m_axis; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorConcatenationOp) + protected: + typename LhsXprType::Nested m_lhs_xpr; + typename RhsXprType::Nested m_rhs_xpr; + const Axis m_axis; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorConcatenationOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + static const int RightNumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess && + TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess || + TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_leftImpl(op.lhsExpression(), device), m_rightImpl(op.rhsExpression(), device), m_axis(op.axis()) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout) || NumDims == 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((NumDims == RightNumDims), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + + eigen_assert(0 <= m_axis && m_axis < NumDims); + const Dimensions& lhs_dims = m_leftImpl.dimensions(); + const Dimensions& rhs_dims = m_rightImpl.dimensions(); + { + int i = 0; + for (; i < m_axis; ++i) { + eigen_assert(lhs_dims[i] > 0); + eigen_assert(lhs_dims[i] == rhs_dims[i]); + m_dimensions[i] = lhs_dims[i]; + } + eigen_assert(lhs_dims[i] > 0); // Now i == m_axis. + eigen_assert(rhs_dims[i] > 0); + m_dimensions[i] = lhs_dims[i] + rhs_dims[i]; + for (++i; i < NumDims; ++i) { + eigen_assert(lhs_dims[i] > 0); + eigen_assert(lhs_dims[i] == rhs_dims[i]); + m_dimensions[i] = lhs_dims[i]; + } + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_leftStrides[0] = 1; + m_rightStrides[0] = 1; + m_outputStrides[0] = 1; + + for (int j = 1; j < NumDims; ++j) { + m_leftStrides[j] = m_leftStrides[j-1] * lhs_dims[j-1]; + m_rightStrides[j] = m_rightStrides[j-1] * rhs_dims[j-1]; + m_outputStrides[j] = m_outputStrides[j-1] * m_dimensions[j-1]; + } + } else { + m_leftStrides[NumDims - 1] = 1; + m_rightStrides[NumDims - 1] = 1; + m_outputStrides[NumDims - 1] = 1; + + for (int j = NumDims - 2; j >= 0; --j) { + m_leftStrides[j] = m_leftStrides[j+1] * lhs_dims[j+1]; + m_rightStrides[j] = m_rightStrides[j+1] * rhs_dims[j+1]; + m_outputStrides[j] = m_outputStrides[j+1] * m_dimensions[j+1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + // TODO(phli): Add short-circuit memcpy evaluation if underlying data are linear? + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) + { + m_leftImpl.evalSubExprsIfNeeded(NULL); + m_rightImpl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_STRONG_INLINE void cleanup() + { + m_leftImpl.cleanup(); + m_rightImpl.cleanup(); + } + + // TODO(phli): attempt to speed this up. The integer divisions and modulo are slow. + // See CL/76180724 comments for more ideas. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + // Collect dimension-wise indices (subs). + array subs; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + subs[i] = index / m_outputStrides[i]; + index -= subs[i] * m_outputStrides[i]; + } + subs[0] = index; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + subs[i] = index / m_outputStrides[i]; + index -= subs[i] * m_outputStrides[i]; + } + subs[NumDims - 1] = index; + } + + const Dimensions& left_dims = m_leftImpl.dimensions(); + if (subs[m_axis] < left_dims[m_axis]) { + Index left_index; + if (static_cast(Layout) == static_cast(ColMajor)) { + left_index = subs[0]; + EIGEN_UNROLL_LOOP + for (int i = 1; i < NumDims; ++i) { + left_index += (subs[i] % left_dims[i]) * m_leftStrides[i]; + } + } else { + left_index = subs[NumDims - 1]; + EIGEN_UNROLL_LOOP + for (int i = NumDims - 2; i >= 0; --i) { + left_index += (subs[i] % left_dims[i]) * m_leftStrides[i]; + } + } + return m_leftImpl.coeff(left_index); + } else { + subs[m_axis] -= left_dims[m_axis]; + const Dimensions& right_dims = m_rightImpl.dimensions(); + Index right_index; + if (static_cast(Layout) == static_cast(ColMajor)) { + right_index = subs[0]; + EIGEN_UNROLL_LOOP + for (int i = 1; i < NumDims; ++i) { + right_index += (subs[i] % right_dims[i]) * m_rightStrides[i]; + } + } else { + right_index = subs[NumDims - 1]; + EIGEN_UNROLL_LOOP + for (int i = NumDims - 2; i >= 0; --i) { + right_index += (subs[i] % right_dims[i]) * m_rightStrides[i]; + } + } + return m_rightImpl.coeff(right_index); + } + } + + // TODO(phli): Add a real vectorization. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + const int packetSize = PacketType::size; + EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index + packetSize - 1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX CoeffReturnType values[packetSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < packetSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double compute_cost = NumDims * (2 * TensorOpCost::AddCost() + + 2 * TensorOpCost::MulCost() + + TensorOpCost::DivCost() + + TensorOpCost::ModCost()); + const double lhs_size = m_leftImpl.dimensions().TotalSize(); + const double rhs_size = m_rightImpl.dimensions().TotalSize(); + return (lhs_size / (lhs_size + rhs_size)) * + m_leftImpl.costPerCoeff(vectorized) + + (rhs_size / (lhs_size + rhs_size)) * + m_rightImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + #ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_leftImpl.bind(cgh); + m_rightImpl.bind(cgh); + } + #endif + + protected: + Dimensions m_dimensions; + array m_outputStrides; + array m_leftStrides; + array m_rightStrides; + TensorEvaluator m_leftImpl; + TensorEvaluator m_rightImpl; + const Axis m_axis; +}; + +// Eval as lvalue +template + struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorConcatenationOp XprType; + typedef typename Base::Dimensions Dimensions; + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess && + TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess || + TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(XprType& op, const Device& device) + : Base(op, device) + { + EIGEN_STATIC_ASSERT((static_cast(Layout) == static_cast(ColMajor)), YOU_MADE_A_PROGRAMMING_MISTAKE); + } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + // Collect dimension-wise indices (subs). + array subs; + for (int i = Base::NumDims - 1; i > 0; --i) { + subs[i] = index / this->m_outputStrides[i]; + index -= subs[i] * this->m_outputStrides[i]; + } + subs[0] = index; + + const Dimensions& left_dims = this->m_leftImpl.dimensions(); + if (subs[this->m_axis] < left_dims[this->m_axis]) { + Index left_index = subs[0]; + for (int i = 1; i < Base::NumDims; ++i) { + left_index += (subs[i] % left_dims[i]) * this->m_leftStrides[i]; + } + return this->m_leftImpl.coeffRef(left_index); + } else { + subs[this->m_axis] -= left_dims[this->m_axis]; + const Dimensions& right_dims = this->m_rightImpl.dimensions(); + Index right_index = subs[0]; + for (int i = 1; i < Base::NumDims; ++i) { + right_index += (subs[i] % right_dims[i]) * this->m_rightStrides[i]; + } + return this->m_rightImpl.coeffRef(right_index); + } + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + const int packetSize = PacketType::size; + EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index + packetSize - 1 < this->dimensions().TotalSize()); + + EIGEN_ALIGN_MAX CoeffReturnType values[packetSize]; + internal::pstore(values, x); + for (int i = 0; i < packetSize; ++i) { + coeffRef(index+i) = values[i]; + } + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONCATENATION_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h new file mode 100644 index 0000000..8b35f79 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h @@ -0,0 +1,1023 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H + +namespace Eigen { + +/** \class TensorContraction + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor contraction class. + * + * + */ +namespace internal { + +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename gebp_traits::type, + typename remove_const::type>::ResScalar Scalar; + + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + + // From NumDims below. + static const int NumDimensions = traits::NumDimensions + traits::NumDimensions - 2 * array_size::value; + static const int Layout = traits::Layout; + typedef typename conditional::val, + typename traits::PointerType, + typename traits::PointerType>::type + PointerType; + + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorContractionOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorContractionOp type; +}; + +template +struct traits, Device_> > { + typedef Indices_ Indices; + typedef LeftArgType_ LeftArgType; + typedef RightArgType_ RightArgType; + typedef OutputKernelType_ OutputKernelType; + typedef Device_ Device; + + // From NumDims below. + static const int NumDimensions = traits::NumDimensions + traits::NumDimensions - 2 * array_size::value; +}; + +// Helper class to allocate and deallocate temporary memory for packed buffers. +template +struct TensorContractionBlockMemAllocator { + typedef void* BlockMemHandle; + + template + EIGEN_DEVICE_FUNC static BlockMemHandle allocate(Device& d, const Index bm, + const Index bk, + const Index bn, + LhsScalar** lhs_block, + RhsScalar** rhs_block) { + eigen_assert(lhs_block); + eigen_assert(rhs_block); + BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn); + char* block_mem = static_cast(d.allocate(sz.lhs_size + sz.rhs_size)); + eigen_assert(block_mem); + *lhs_block = reinterpret_cast(block_mem); + *rhs_block = reinterpret_cast(block_mem + sz.lhs_size); + return block_mem; + } + + template + EIGEN_DEVICE_FUNC static BlockMemHandle allocateSlices( + Device& d, const Index bm, const Index bk, const Index bn, + const Index num_lhs, const Index num_rhs, const Index num_slices, + std::vector* lhs_blocks, + std::vector* rhs_blocks) { + eigen_assert(num_slices > 0); + eigen_assert(num_lhs >= 0 && num_rhs >= 0); + eigen_assert(num_lhs == 0 || lhs_blocks); + eigen_assert(num_rhs == 0 || rhs_blocks); + BlockSizes sz = ComputeLhsRhsBlockSizes(bm, bk, bn); + void* block_mem = d.allocate( + (num_lhs * sz.lhs_size + num_rhs * sz.rhs_size) * num_slices); + eigen_assert(block_mem); + char* mem = static_cast(block_mem); + + for (Index x = 0; x < num_slices; x++) { + if (num_lhs > 0) lhs_blocks[x].resize(num_lhs); + for (Index m = 0; m < num_lhs; m++) { + lhs_blocks[x][m] = reinterpret_cast(mem); + mem += sz.lhs_size; + } + if (num_rhs > 0) rhs_blocks[x].resize(num_rhs); + for (Index n = 0; n < num_rhs; n++) { + rhs_blocks[x][n] = reinterpret_cast(mem); + mem += sz.rhs_size; + } + } + + return block_mem; + } + + template + EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) { + d.deallocate(handle); + } + + private: + struct BlockSizes { + Index lhs_size; + Index rhs_size; + }; + EIGEN_DEVICE_FUNC static BlockSizes ComputeLhsRhsBlockSizes(const Index bm, + const Index bk, + const Index bn) { + Index align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1); + BlockSizes sz; + sz.lhs_size = divup(bm * bk * sizeof(LhsScalar), align) * align; + sz.rhs_size = divup(bn * bk * sizeof(RhsScalar), align) * align; + return sz; + } +}; + +// WARNING: In this code we assume that Lhs and Rhs tensor expressions are in +// ColMajor storage order. This property is guaranteed by the +// TensorContractionOp evaluator. TensorContractionKernel specifies how we pack +// blocks of Lhs and Rhs tensor expressions, and how we invoke matrix +// multiplication for these blocks. Default tensor contraction uses +// gemm_pack_rhs, gemm_pack_lhs and gebp_kernel from Eigen Core (see +// GeneralBlocPanelKernel.h for details). +// +// By specializing contraction kernels we can use other low level libraries to +// perform matrix multiplication, and still rely on Eigen contraction evaluator. +// This also includes full support in TensorContractionThreadPool, assuming that +// underlying gemm do not use it's own threading. +// +// - ResScalar/LhsScalar/RhsScalar - scalar type for the result of +// multiplication, lhs tensor and rhs tensor respectively. +// +// - StorageIndex - index type for the tensor expressions. In practice almost +// always is Eigen::Index. +// +// - OutputMapper provides access to the memory of the output matrix. In +// practice it's always column major blas_data_mapper (it must be of ResScalar +// type). +// +// - LhsMapper/RhsMapper similarly to blas_data_mapper provide a two dimensional +// view into the Lhs/Rhs tensor expressions. In practice it's +// TensorContractionInputMapper, or some specialization of it based on the +// type of tensor expression (e.g. TensorImagePatchOp has optimized input +// mapper). +template +struct TensorContractionKernel { + // True if `invoke()` supports `beta` in `C <- alpha * A * B + beta * C` + // (otherwise beta should be always equal to 1). + enum { HasBeta = false }; + + EIGEN_DEVICE_FUNC + TensorContractionKernel(StorageIndex m_, StorageIndex k_, StorageIndex n_, + StorageIndex bm_, StorageIndex bk_, StorageIndex bn_) + : m(m_), k(k_), n(n_), bm(bm_), bk(bk_), bn(bn_) {} + + // Pack blocks of Lhs and Rhs into contiguous blocks in memory. + typedef LhsScalar* LhsBlock; + typedef RhsScalar* RhsBlock; + + // Packed Lhs/Rhs block memory allocator. + typedef TensorContractionBlockMemAllocator + BlockMemAllocator; + typedef typename BlockMemAllocator::BlockMemHandle BlockMemHandle; + + typedef typename internal::gebp_traits Traits; + + typedef internal::gemm_pack_lhs< + LhsScalar, StorageIndex, typename LhsMapper::SubMapper, Traits::mr, + Traits::LhsProgress, typename Traits::LhsPacket4Packing, ColMajor> + LhsPacker; + + typedef internal::gemm_pack_rhs + RhsPacker; + + typedef internal::gebp_kernel + GebpKernel; + + template + EIGEN_DEVICE_FUNC BlockMemHandle allocate(Device& d, LhsBlock* lhs_block, + RhsBlock* rhs_block) { + return BlockMemAllocator::allocate(d, bm, bk, bn, lhs_block, rhs_block); + } + + template + EIGEN_DEVICE_FUNC BlockMemHandle allocateSlices( + Device& d, const StorageIndex num_lhs, const StorageIndex num_rhs, + const StorageIndex num_slices, std::vector* lhs_blocks, + std::vector* rhs_blocks) { + return BlockMemAllocator::allocateSlices( + d, bm, bk, bn, num_lhs, num_rhs, num_slices, lhs_blocks, rhs_blocks); + } + + template + EIGEN_DEVICE_FUNC static void deallocate(Device& d, BlockMemHandle handle) { + BlockMemAllocator::deallocate(d, handle); + } + + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packLhs( + LhsBlock* lhsBlock, const typename LhsMapper::SubMapper& data_mapper, + const StorageIndex depth, const StorageIndex rows) { + LhsPacker()(*lhsBlock, data_mapper, depth, rows, /*stride*/ 0, + /*offset*/ 0); + } + + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void packRhs( + RhsBlock* rhsBlock, const typename RhsMapper::SubMapper& data_mapper, + const StorageIndex depth, const StorageIndex cols) { + RhsPacker()(*rhsBlock, data_mapper, depth, cols); + } + + EIGEN_DEVICE_FUNC EIGEN_DONT_INLINE void invoke( + const OutputMapper& output_mapper, const LhsBlock& lhsBlock, + const RhsBlock& rhsBlock, const StorageIndex rows, + const StorageIndex depth, const StorageIndex cols, + const ResScalar alpha, const ResScalar beta) { + // Default GEBP kernel does not support beta. + eigen_assert(beta == ResScalar(1)); + static const int kComputeStrideFromBlockDimensions = -1; + GebpKernel()(output_mapper, lhsBlock, rhsBlock, rows, depth, cols, alpha, + /*strideA*/ kComputeStrideFromBlockDimensions, + /*strideB*/ kComputeStrideFromBlockDimensions, + /*offsetA*/ 0, /*offsetB*/ 0); + } + + private: + // These are dimensions of the original Tensors, and selected block sizes. The + // actual block sizes passed to all function above might be smaller because of + // the partial blocks at the end. + const StorageIndex m; + const StorageIndex k; + const StorageIndex n; + const StorageIndex bm; + const StorageIndex bk; + const StorageIndex bn; +}; + +} // end namespace internal + +// Tensor contraction params that should enable to get from output matrix +// 2-dimensional coordinates to the output tensor dimensions. +struct TensorContractionParams { + // TensorContraction evaluator assumes that both tensors are in ColMajor + // layout, if tensors are in RowMajor evaluator swap lhs with rhs. + bool swapped_arguments; +}; + +// Output kernel allows to fuse operations into the tensor contraction. +// +// Examples: +// 1. Elementwise Relu transformation following Conv2D. +// 2. AddBias to the Conv2D output channels dimension. +// +// The NoOpOutputKernel implements an output kernel that does absolutely nothing. +struct NoOpOutputKernel { + /** + * Tensor contraction evaluator calls this kernel after finishing each block + * of output matrix. Output blocks belong to the 2-dimensional output tensor. + * + * TensorContractionParams contains contraction dimensions information + * required to map output 2-d space into the expected output tensor space + * (potentially higher dimensional). + * + * \param[in] output_mapper Access to output tensor memory + * \param[in] params Tensor contraction parameters + * \param[in] i Index of a first row available through output_mapper + * \param[in] j Index of a first column available through output_mapper + * \param[in] num_rows Number of available rows + * \param[in] num_cols Number of available columns + */ + template + EIGEN_ALWAYS_INLINE void operator()( + const internal::blas_data_mapper& output_mapper, + const TensorContractionParams& params, Index i, + Index j, Index num_rows, Index num_cols) const { + EIGEN_UNUSED_VARIABLE(output_mapper); + EIGEN_UNUSED_VARIABLE(params); + EIGEN_UNUSED_VARIABLE(i); + EIGEN_UNUSED_VARIABLE(j); + EIGEN_UNUSED_VARIABLE(num_rows); + EIGEN_UNUSED_VARIABLE(num_cols); + } +}; + +template +class TensorContractionOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename internal::gebp_traits::ResScalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionOp( + const LhsXprType& lhs, const RhsXprType& rhs, const Indices& dims, + const OutputKernelType& output_kernel = OutputKernelType()) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_indices(dims), + m_output_kernel(output_kernel) {} + + EIGEN_DEVICE_FUNC + const Indices& indices() const { return m_indices; } + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + lhsExpression() const { return m_lhs_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + EIGEN_DEVICE_FUNC + const OutputKernelType& outputKernel() const { return m_output_kernel; } + + protected: + typename LhsXprType::Nested m_lhs_xpr; + typename RhsXprType::Nested m_rhs_xpr; + const Indices m_indices; + const OutputKernelType m_output_kernel; +}; + + +template +struct TensorContractionEvaluatorBase : internal::no_assignment_operator +{ + typedef typename internal::traits::Indices Indices; + typedef typename internal::traits::LeftArgType LeftArgType; + typedef typename internal::traits::RightArgType RightArgType; + typedef typename internal::traits::OutputKernelType OutputKernelType; + typedef typename internal::traits::Device Device; + + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = true, + PacketAccess = (PacketType::size > 1), + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = true + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + // Most of the code is assuming that both input tensors are ColMajor. If the + // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS: + // If we want to compute A * B = C, where A is LHS and B is RHS, the code + // will pretend B is LHS and A is RHS. + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType; + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType; + + typedef TensorEvaluator LeftEvaluatorType; + typedef TensorEvaluator RightEvaluatorType; + + static const int LDims = + internal::array_size::Dimensions>::value; + static const int RDims = + internal::array_size::Dimensions>::value; + static const int ContractDims = internal::array_size::value; + static const int NumDims = LDims + RDims - 2 * ContractDims; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + typedef DSizes Dimensions; + + EIGEN_STRONG_INLINE + TensorContractionEvaluatorBase(const XprType& op, const Device& device) + : m_leftImpl(choose(Cond(Layout) == static_cast(ColMajor)>(), + op.lhsExpression(), op.rhsExpression()), device), + m_rightImpl(choose(Cond(Layout) == static_cast(ColMajor)>(), + op.rhsExpression(), op.lhsExpression()), device), + m_device(device), + m_output_kernel(op.outputKernel()), + m_result(NULL) { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == + static_cast(TensorEvaluator::Layout)), + YOU_MADE_A_PROGRAMMING_MISTAKE); + + + DSizes eval_left_dims; + DSizes eval_right_dims; + array, ContractDims> eval_op_indices; + if (static_cast(Layout) == static_cast(ColMajor)) { + // For ColMajor, we keep using the existing dimensions + for (int i = 0; i < LDims; i++) { + eval_left_dims[i] = m_leftImpl.dimensions()[i]; + } + for (int i = 0; i < RDims; i++) { + eval_right_dims[i] = m_rightImpl.dimensions()[i]; + } + // We keep the pairs of contracting indices. + for (int i = 0; i < ContractDims; i++) { + eval_op_indices[i].first = op.indices()[i].first; + eval_op_indices[i].second = op.indices()[i].second; + } + } else { + // For RowMajor, we need to reverse the existing dimensions + for (int i = 0; i < LDims; i++) { + eval_left_dims[i] = m_leftImpl.dimensions()[LDims - i - 1]; + } + for (int i = 0; i < RDims; i++) { + eval_right_dims[i] = m_rightImpl.dimensions()[RDims - i - 1]; + } + // We need to flip all the pairs of contracting indices as well as + // reversing the dimensions. + for (int i = 0; i < ContractDims; i++) { + eval_op_indices[i].first = LDims - 1 - op.indices()[ContractDims - 1 - i].second; + eval_op_indices[i].second = RDims - 1 - op.indices()[ContractDims - 1 - i].first; + } + } + + // Check for duplicate axes and make sure the first index in eval_op_indices + // is increasing. Using O(n^2) sorting is OK since ContractDims is small + for (int i = 0; i < ContractDims; i++) { + for (int j = i + 1; j < ContractDims; j++) { + eigen_assert(eval_op_indices[j].first != eval_op_indices[i].first && + eval_op_indices[j].second != eval_op_indices[i].second && + "contraction axes should be unique"); + if (eval_op_indices[j].first < eval_op_indices[i].first) { + numext::swap(eval_op_indices[j], eval_op_indices[i]); + } + } + } + + array lhs_strides; + lhs_strides[0] = 1; + for (int i = 0; i < LDims-1; ++i) { + lhs_strides[i+1] = lhs_strides[i] * eval_left_dims[i]; + } + + array rhs_strides; + rhs_strides[0] = 1; + for (int i = 0; i < RDims-1; ++i) { + rhs_strides[i+1] = rhs_strides[i] * eval_right_dims[i]; + } + + if (m_i_strides.size() > 0) m_i_strides[0] = 1; + if (m_j_strides.size() > 0) m_j_strides[0] = 1; + if (m_k_strides.size() > 0) m_k_strides[0] = 1; + + m_i_size = 1; + m_j_size = 1; + m_k_size = 1; + + // To compute the dimension, we simply concatenate the non-contracting + // dimensions of the left and then the right tensor. Additionally, we also + // compute the strides corresponding to the left non-contracting + // dimensions and right non-contracting dimensions. + m_lhs_inner_dim_contiguous = true; + int dim_idx = 0; + Index nocontract_idx = 0; + + for (int i = 0; i < LDims; i++) { + // find if we are contracting on index i of left tensor + bool contracting = false; + for (int j = 0; j < ContractDims; j++) { + if (eval_op_indices[j].first == i) { + contracting = true; + break; + } + } + if (!contracting) { + // add dimension size to output dimensions + m_dimensions[dim_idx] = eval_left_dims[i]; + m_left_nocontract_strides[nocontract_idx] = lhs_strides[i]; + if (dim_idx != i) { + m_lhs_inner_dim_contiguous = false; + } + if (nocontract_idx+1 < internal::array_size::value) { + m_i_strides[nocontract_idx+1] = + m_i_strides[nocontract_idx] * eval_left_dims[i]; + } else { + m_i_size = m_i_strides[nocontract_idx] * eval_left_dims[i]; + } + dim_idx++; + nocontract_idx++; + } + } + + nocontract_idx = 0; + for (int i = 0; i < RDims; i++) { + bool contracting = false; + // find if we are contracting on index i of right tensor + for (int j = 0; j < ContractDims; j++) { + if (eval_op_indices[j].second == i) { + contracting = true; + break; + } + } + if (!contracting) { + m_dimensions[dim_idx] = eval_right_dims[i]; + if (nocontract_idx+1 < internal::array_size::value) { + m_j_strides[nocontract_idx+1] = + m_j_strides[nocontract_idx] * eval_right_dims[i]; + } else { + m_j_size = m_j_strides[nocontract_idx] * eval_right_dims[i]; + } + m_right_nocontract_strides[nocontract_idx] = rhs_strides[i]; + dim_idx++; + nocontract_idx++; + } + } + + // Now compute the strides corresponding to the contracting dimensions. We + // assumed above that non-contracting axes are represented in the same order + // in the matrix as they are in the tensor. This is not the case for + // contracting axes. As the contracting axes must be of the same size in + // each tensor, we'll only look at the first tensor here. + m_rhs_inner_dim_contiguous = true; + m_rhs_inner_dim_reordered = false; + for (int i = 0; i < ContractDims; i++) { + Index left = eval_op_indices[i].first; + Index right = eval_op_indices[i].second; + + Index size = eval_left_dims[left]; + eigen_assert(size == eval_right_dims[right] && + "Contraction axes must be same size"); + + if (i+1 < static_cast(internal::array_size::value)) { + m_k_strides[i+1] = m_k_strides[i] * size; + } else { + m_k_size = m_k_strides[i] * size; + } + m_left_contracting_strides[i] = lhs_strides[left]; + m_right_contracting_strides[i] = rhs_strides[right]; + + if (i > 0 && right < eval_op_indices[i-1].second) { + m_rhs_inner_dim_reordered = true; + } + if (right != i) { + m_rhs_inner_dim_contiguous = false; + } + } + + // If the layout is RowMajor, we need to reverse the m_dimensions + if (static_cast(Layout) == static_cast(RowMajor)) { + for (int i = 0, j = NumDims - 1; i < j; i++, j--) { + numext::swap(m_dimensions[i], m_dimensions[j]); + } + } + + // A set of parameters that will allow output kernel to get from output + // tensor dimensions (i, j) into the original tensor dimensions. + // TODO(ezhulenev): Add parameters required to infer output tensor index for + // more complex contractions than 2x2 on internal dimension. + m_tensor_contraction_params.swapped_arguments = static_cast(Layout) == RowMajor; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + m_leftImpl.evalSubExprsIfNeeded(NULL); + m_rightImpl.evalSubExprsIfNeeded(NULL); + if (data) { + evalTo(data); + return false; + } else { + m_result = static_cast(m_device.allocate(dimensions().TotalSize() * sizeof(Scalar))); + evalTo(m_result); + return true; + } + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType dest, EvalSubExprsCallback done) { + m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { + m_rightImpl.evalSubExprsIfNeededAsync(nullptr, [this, done, dest](bool) { + if (dest) { + evalToAsync(dest, [done]() { done(false); }); + } else { + m_result = static_cast( + m_device.allocate(dimensions().TotalSize() * sizeof(Scalar))); + evalToAsync(m_result, [done]() { done(true); }); + } + }); + }); + } +#endif // EIGEN_USE_THREADS + +#ifndef TENSOR_CONTRACTION_DISPATCH +#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \ + if (this->m_lhs_inner_dim_contiguous) { \ + if (this->m_rhs_inner_dim_contiguous) { \ + if (this->m_rhs_inner_dim_reordered) { \ + METHOD ARGS; \ + } else { \ + METHOD ARGS; \ + } \ + } else { \ + if (this->m_rhs_inner_dim_reordered) { \ + METHOD ARGS; \ + } else { \ + METHOD ARGS; \ + } \ + } \ + } else { \ + if (this->m_rhs_inner_dim_contiguous) { \ + if (this->m_rhs_inner_dim_reordered) { \ + METHOD ARGS; \ + } else { \ + METHOD ARGS; \ + } \ + } else { \ + if (this->m_rhs_inner_dim_reordered) { \ + METHOD ARGS; \ + } else { \ + METHOD ARGS; \ + } \ + } \ + } +#endif + +#ifndef TENSOR_CONTRACTION_ASYNC_DISPATCH +#define TENSOR_CONTRACTION_ASYNC_DISPATCH(METHOD, DONE, ALIGNMENT, ARGS, FN) \ + if (this->m_lhs_inner_dim_contiguous) { \ + if (this->m_rhs_inner_dim_contiguous) { \ + if (this->m_rhs_inner_dim_reordered) { \ + (new METHOD ARGS)->FN; \ + } else { \ + (new METHOD ARGS)->FN; \ + } \ + } else { \ + if (this->m_rhs_inner_dim_reordered) { \ + (new METHOD ARGS)->FN; \ + } else { \ + (new METHOD ARGS)->FN; \ + } \ + } \ + } else { \ + if (this->m_rhs_inner_dim_contiguous) { \ + if (this->m_rhs_inner_dim_reordered) { \ + (new METHOD ARGS)->FN; \ + } else { \ + (new METHOD ARGS)->FN; \ + } \ + } else { \ + if (this->m_rhs_inner_dim_reordered) { \ + (new METHOD ARGS)->FN; \ + } else { \ + (new METHOD ARGS)->FN; \ + } \ + } \ + } +#endif + + EIGEN_DEVICE_FUNC void evalTo(Scalar* buffer) const { + static_cast(this)->template evalProduct(buffer); + } + +#ifdef EIGEN_USE_THREADS + template + void evalToAsync(Scalar* buffer, EvalToCallback done) const { + static_cast(this) + ->template evalProductAsync(buffer, + std::move(done)); + } +#endif // EIGEN_USE_THREADS + + template + void evalProductSequential(Scalar* buffer) const { + if (this->m_j_size == 1) { + this->template evalGemv(buffer); + } else { + this->template evalGemm(buffer); + } + } + + template + #if !defined(EIGEN_HIPCC) + EIGEN_DEVICE_FUNC + #endif + void evalGemv(Scalar* buffer) const { + const Index rows = m_i_size; + const Index cols = m_k_size; + + typedef typename internal::remove_const::type LhsScalar; + typedef typename internal::remove_const::type RhsScalar; + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + const Index lhs_packet_size = internal::unpacket_traits::size; + const Index rhs_packet_size = internal::unpacket_traits::size; + const int lhs_alignment = LeftEvaluator::IsAligned ? Aligned : Unaligned; + const int rhs_alignment = RightEvaluator::IsAligned ? Aligned : Unaligned; + typedef internal::TensorContractionInputMapper LhsMapper; + + typedef internal::TensorContractionInputMapper RhsMapper; + + LhsMapper lhs(m_leftImpl, m_left_nocontract_strides, m_i_strides, + m_left_contracting_strides, m_k_strides); + RhsMapper rhs(m_rightImpl, m_right_nocontract_strides, m_j_strides, + m_right_contracting_strides, m_k_strides); + + const Scalar alpha(1); + const Index resIncr(1); + + // zero out the result buffer (which must be of size at least rows * sizeof(Scalar) + m_device.memset(buffer, 0, rows * sizeof(Scalar)); + + internal::general_matrix_vector_product::run( + rows, cols, lhs, rhs, + buffer, resIncr, alpha); + + typedef internal::blas_data_mapper OutputMapper; + m_output_kernel(OutputMapper(buffer, rows), m_tensor_contraction_params, + static_cast(0), static_cast(0), rows, + static_cast(1)); + } + + template + #if !defined(EIGEN_HIPCC) + EIGEN_DEVICE_FUNC + #endif + void evalGemm(Scalar* buffer) const { + // columns in left side, rows in right side + const Index k = this->m_k_size; + this->template evalGemmPartial(buffer, 0, k, 1); + } + + template + EIGEN_DEVICE_FUNC void evalGemmPartialWithoutOutputKernel( + Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + evalGemmPartial(buffer, k_start, k_end, + num_threads); + } + + template + EIGEN_DEVICE_FUNC void evalGemmPartial(Scalar* buffer, Index k_start, Index k_end, int num_threads) const { + eigen_assert(k_end >= k_start && k_start >= 0 && k_end <= this->m_k_size); + // columns in slice on left side, rows on right side + const Index k_slice = k_end - k_start; + + // rows in left side + const Index m = this->m_i_size; + + // columns in right side + const Index n = this->m_j_size; + + // define data mappers for Lhs and Rhs + typedef typename internal::remove_const::type LhsScalar; + typedef typename internal::remove_const::type RhsScalar; + + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + + const Index lhs_packet_size = internal::unpacket_traits::size; + const Index rhs_packet_size = internal::unpacket_traits::size; + + typedef internal::TensorContractionInputMapper LhsMapper; + + typedef internal::TensorContractionInputMapper RhsMapper; + + typedef internal::blas_data_mapper OutputMapper; + + typedef internal::TensorContractionKernel< + Scalar, LhsScalar, RhsScalar, Index, OutputMapper, LhsMapper, RhsMapper> + TensorContractionKernel; + + // initialize data mappers + LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides, + this->m_left_contracting_strides, this->m_k_strides); + + RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides, + this->m_right_contracting_strides, this->m_k_strides); + + OutputMapper output(buffer, m); + + // Sizes of the blocks to load in cache. See the Goto paper for details. + internal::TensorContractionBlocking + blocking(k_slice, m, n, num_threads); + const Index kc = blocking.kc(); + const Index mc = numext::mini(m, blocking.mc()); + const Index nc = numext::mini(n, blocking.nc()); + + typedef typename TensorContractionKernel::LhsBlock LhsBlock; + typedef typename TensorContractionKernel::RhsBlock RhsBlock; + + LhsBlock blockA; + RhsBlock blockB; + + TensorContractionKernel kernel(m, k_slice, n, mc, kc, nc); + + typedef typename TensorContractionKernel::BlockMemHandle BlockMemHandle; + const BlockMemHandle packed_mem = + kernel.allocate(this->m_device, &blockA, &blockB); + + // If a contraction kernel does not support beta, explicitly initialize + // output buffer with zeroes. + if (!TensorContractionKernel::HasBeta) { + this->m_device.memset(buffer, 0, m * n * sizeof(Scalar)); + } + + for(Index i2=0; i2= k_end) { + m_output_kernel(output_mapper, m_tensor_contraction_params, i2, j2, + actual_mc, actual_nc); + } + } + } + } + + kernel.deallocate(this->m_device, packed_mem); + } + + EIGEN_STRONG_INLINE void cleanup() { + m_leftImpl.cleanup(); + m_rightImpl.cleanup(); + + if (m_result != NULL) { + m_device.deallocate(m_result); + m_result = NULL; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_result[index]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { + return internal::ploadt(m_result + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return m_result; } + +protected: + Dimensions m_dimensions; + + contract_t m_k_strides; + contract_t m_left_contracting_strides; + contract_t m_right_contracting_strides; + + bool m_lhs_inner_dim_contiguous; + bool m_rhs_inner_dim_contiguous; + bool m_rhs_inner_dim_reordered; + + left_nocontract_t m_i_strides; + right_nocontract_t m_j_strides; + left_nocontract_t m_left_nocontract_strides; + right_nocontract_t m_right_nocontract_strides; + + Index m_i_size; + Index m_j_size; + Index m_k_size; + + TensorContractionParams m_tensor_contraction_params; + + TensorEvaluator m_leftImpl; + TensorEvaluator m_rightImpl; + const Device EIGEN_DEVICE_REF m_device; + OutputKernelType m_output_kernel; + EvaluatorPointerType m_result; +}; + + +// evaluator for default device +template +struct TensorEvaluator, Device> : + public TensorContractionEvaluatorBase< + TensorEvaluator, Device> > { + typedef TensorEvaluator, Device> Self; + typedef TensorContractionEvaluatorBase Base; + + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + enum { + Layout = TensorEvaluator::Layout + }; + + // Most of the code is assuming that both input tensors are ColMajor. If the + // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS: + // If we want to compute A * B = C, where A is LHS and B is RHS, the code + // will pretend B is LHS and A is RHS. + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType; + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType; + + static const int LDims = + internal::array_size::Dimensions>::value; + static const int RDims = + internal::array_size::Dimensions>::value; + static const int ContractDims = internal::array_size::value; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + static const int NumDims = LDims + RDims - 2 * ContractDims; + + // Could we use NumDimensions here? + typedef DSizes Dimensions; + + TensorEvaluator(const XprType& op, const Device& device) : + Base(op, device) { } + + template + void evalProduct(Scalar* buffer) const { + TENSOR_CONTRACTION_DISPATCH(this->template evalProductSequential, Alignment, (buffer)); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h new file mode 100644 index 0000000..974feb0 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h @@ -0,0 +1,73 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H + + +namespace Eigen { +namespace internal { + +enum { + ShardByRow = 0, + ShardByCol = 1 +}; + + +// Default Blocking Strategy +template +class TensorContractionBlocking { + public: + + /* + adding EIGEN_DEVICE_FUNC unconditionally to 'TensorContractionBlocking' constructor in `TensorContractionBlocking.h` + requires adding EIGEN_DEVICE_FUNC to `computeProductBlockingSizes` in `GeneralBlockPanelKernel.h` + which in turn, requires adding EIGEN_DEVICE_FUNC to `evaluateProductBlockingSizesHeuristic` in `GeneralBlockPanelKernel.h` + which in turn, requires adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h` + (else HIPCC will error out) + + However adding EIGEN_DEVICE_FUNC to `manage_caching_sizes` in `GeneralBlockPanelKernel.h` + results in NVCC erroring out with the following error + + ../Eigen/src/Core/products/GeneralBlockPanelKernel.h(57): error #2901: + dynamic initialization is not supported for function-scope static variables within a __device__/__global__ function + */ + + #if !defined(EIGEN_HIPCC) + EIGEN_DEVICE_FUNC + #endif + TensorContractionBlocking(StorageIndex k, StorageIndex m, StorageIndex n, StorageIndex num_threads = 1) : + kc_(k), mc_(m), nc_(n) + { + if (ShardingType == ShardByCol) { + computeProductBlockingSizes(kc_, mc_, nc_, num_threads); + } + else { + computeProductBlockingSizes(kc_, nc_, mc_, num_threads); + } + + const int rhs_packet_size = internal::packet_traits::size; + kc_ = (rhs_packet_size <= 8 || kc_ <= rhs_packet_size) ? + kc_ : (kc_ / rhs_packet_size) * rhs_packet_size; + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE StorageIndex kc() const { return kc_; } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE StorageIndex mc() const { return mc_; } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE StorageIndex nc() const { return nc_; } + + private: + StorageIndex kc_; + StorageIndex mc_; + StorageIndex nc_; +}; + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_BLOCKING_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h new file mode 100644 index 0000000..3f315fe --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionCuda.h @@ -0,0 +1,6 @@ + +#if defined(__clang__) || defined(__GNUC__) +#warning "Deprecated header file, please either include the main Eigen/CXX11/Tensor header or the respective TensorContractionGpu.h file" +#endif + +#include "TensorContractionGpu.h" diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h new file mode 100644 index 0000000..c818038 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h @@ -0,0 +1,1413 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014-2015 Benoit Steiner +// Copyright (C) 2015 Navdeep Jaitly +// Copyright (C) 2014 Eric Martin +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_GPU_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_GPU_H + +#if defined(EIGEN_USE_GPU) && defined(EIGEN_GPUCC) + +namespace Eigen { + +template +__device__ EIGEN_STRONG_INLINE void +EigenContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, Scalar* lhs_shmem, Scalar* rhs_shmem, + const Index m_size, const Index n_size, const Index k_size) { + + const Index m_block_idx = blockIdx.x; + const Index n_block_idx = blockIdx.y; + + const Index base_m = 64 * m_block_idx; + const Index base_n = 64 * n_block_idx; + + // declare and initialize 64 registers for output 8x8 block + + // prefetch registers + Scalar lhs_pf0; + Scalar lhs_pf1; + Scalar lhs_pf2; + Scalar lhs_pf3; + Scalar lhs_pf4; + Scalar lhs_pf5; + Scalar lhs_pf6; + Scalar lhs_pf7; + + Scalar rhs_pf0; + Scalar rhs_pf1; + Scalar rhs_pf2; + Scalar rhs_pf3; + Scalar rhs_pf4; + Scalar rhs_pf5; + Scalar rhs_pf6; + Scalar rhs_pf7; + + // shared memory is formatted + // (contract idx in block, nocontract idx in block, block idx) + // where block idx is column major. This transposition limits the number of + // bank conflicts when reading the LHS. The core idea is that since the contracting + // index is shared by both sides, then the contracting index should be in threadIdx.x. + + // On the LHS, we pad each row inside of each block with an extra element. This makes + // each block 8 rows of 9 elements, which is 72 elements. This gives no bank conflicts + // on writes and very few 2-way conflicts on reads. There is an 8x8 grid of these blocks. + + // On the RHS we just add 8 padding elements to the end of each block. This gives no bank + // conflicts on writes and also none on reads. + + // storage indices + const Index lhs_store_idx_base = threadIdx.y * 72 + threadIdx.x * 9 + threadIdx.z; + const Index rhs_store_idx_base = threadIdx.y * 72 + threadIdx.z * 8 + threadIdx.x; + + const Index lhs_store_idx_0 = lhs_store_idx_base + 576 * 0; + const Index lhs_store_idx_1 = lhs_store_idx_base + 576 * 1; + const Index lhs_store_idx_2 = lhs_store_idx_base + 576 * 2; + const Index lhs_store_idx_3 = lhs_store_idx_base + 576 * 3; + const Index lhs_store_idx_4 = lhs_store_idx_base + 576 * 4; + const Index lhs_store_idx_5 = lhs_store_idx_base + 576 * 5; + const Index lhs_store_idx_6 = lhs_store_idx_base + 576 * 6; + const Index lhs_store_idx_7 = lhs_store_idx_base + 576 * 7; + + const Index rhs_store_idx_0 = rhs_store_idx_base + 576 * 0; + const Index rhs_store_idx_1 = rhs_store_idx_base + 576 * 1; + const Index rhs_store_idx_2 = rhs_store_idx_base + 576 * 2; + const Index rhs_store_idx_3 = rhs_store_idx_base + 576 * 3; + const Index rhs_store_idx_4 = rhs_store_idx_base + 576 * 4; + const Index rhs_store_idx_5 = rhs_store_idx_base + 576 * 5; + const Index rhs_store_idx_6 = rhs_store_idx_base + 576 * 6; + const Index rhs_store_idx_7 = rhs_store_idx_base + 576 * 7; + + // in the loading code, the following variables are important: + // threadIdx.x: the vertical position in an 8x8 block + // threadIdx.y: the vertical index of the 8x8 block in the grid + // threadIdx.z: the horizontal position in an 8x8 block + // k: the horizontal index of the 8x8 block in the grid + // + // The k parameter is implicit (it was the loop counter for a loop that went + // from 0 to <8, but now that loop is unrolled in the below code. + + const Index load_idx_vert = threadIdx.x + 8 * threadIdx.y; + const Index lhs_vert = base_m + load_idx_vert; + +#define prefetchIntoRegisters(base_k) \ + { \ + lhs_pf0 = conv(0); \ + lhs_pf1 = conv(0); \ + lhs_pf2 = conv(0); \ + lhs_pf3 = conv(0); \ + lhs_pf4 = conv(0); \ + lhs_pf5 = conv(0); \ + lhs_pf6 = conv(0); \ + lhs_pf7 = conv(0); \ + \ + rhs_pf0 = conv(0); \ + rhs_pf1 = conv(0); \ + rhs_pf2 = conv(0); \ + rhs_pf3 = conv(0); \ + rhs_pf4 = conv(0); \ + rhs_pf5 = conv(0); \ + rhs_pf6 = conv(0); \ + rhs_pf7 = conv(0); \ + \ + if (!needs_edge_check || lhs_vert < m_size) { \ + const Index lhs_horiz_0 = base_k + threadIdx.z + 0 * 8; \ + const Index lhs_horiz_1 = base_k + threadIdx.z + 1 * 8; \ + const Index lhs_horiz_2 = base_k + threadIdx.z + 2 * 8; \ + const Index lhs_horiz_3 = base_k + threadIdx.z + 3 * 8; \ + const Index lhs_horiz_4 = base_k + threadIdx.z + 4 * 8; \ + const Index lhs_horiz_5 = base_k + threadIdx.z + 5 * 8; \ + const Index lhs_horiz_6 = base_k + threadIdx.z + 6 * 8; \ + const Index lhs_horiz_7 = base_k + threadIdx.z + 7 * 8; \ + \ + if (!needs_edge_check || lhs_horiz_7 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \ + lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \ + lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \ + lhs_pf7 = lhs(lhs_vert, lhs_horiz_7); \ + } else if (lhs_horiz_6 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \ + lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \ + lhs_pf6 = lhs(lhs_vert, lhs_horiz_6); \ + } else if (lhs_horiz_5 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \ + lhs_pf5 = lhs(lhs_vert, lhs_horiz_5); \ + } else if (lhs_horiz_4 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + lhs_pf4 = lhs(lhs_vert, lhs_horiz_4); \ + } else if (lhs_horiz_3 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + lhs_pf3 = lhs(lhs_vert, lhs_horiz_3); \ + } else if (lhs_horiz_2 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + lhs_pf2 = lhs(lhs_vert, lhs_horiz_2); \ + } else if (lhs_horiz_1 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + lhs_pf1 = lhs(lhs_vert, lhs_horiz_1); \ + } else if (lhs_horiz_0 < k_size) { \ + lhs_pf0 = lhs(lhs_vert, lhs_horiz_0); \ + } \ + } \ + \ + const Index rhs_vert = base_k + load_idx_vert; \ + if (!needs_edge_check || rhs_vert < k_size) { \ + const Index rhs_horiz_0 = base_n + threadIdx.z + 0 * 8; \ + const Index rhs_horiz_1 = base_n + threadIdx.z + 1 * 8; \ + const Index rhs_horiz_2 = base_n + threadIdx.z + 2 * 8; \ + const Index rhs_horiz_3 = base_n + threadIdx.z + 3 * 8; \ + const Index rhs_horiz_4 = base_n + threadIdx.z + 4 * 8; \ + const Index rhs_horiz_5 = base_n + threadIdx.z + 5 * 8; \ + const Index rhs_horiz_6 = base_n + threadIdx.z + 6 * 8; \ + const Index rhs_horiz_7 = base_n + threadIdx.z + 7 * 8; \ + \ + if (rhs_horiz_7 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \ + rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \ + rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \ + rhs_pf7 = rhs(rhs_vert, rhs_horiz_7); \ + } else if (rhs_horiz_6 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \ + rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \ + rhs_pf6 = rhs(rhs_vert, rhs_horiz_6); \ + } else if (rhs_horiz_5 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \ + rhs_pf5 = rhs(rhs_vert, rhs_horiz_5); \ + } else if (rhs_horiz_4 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + rhs_pf4 = rhs(rhs_vert, rhs_horiz_4); \ + } else if (rhs_horiz_3 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + rhs_pf3 = rhs(rhs_vert, rhs_horiz_3); \ + } else if (rhs_horiz_2 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + rhs_pf2 = rhs(rhs_vert, rhs_horiz_2); \ + } else if (rhs_horiz_1 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + rhs_pf1 = rhs(rhs_vert, rhs_horiz_1); \ + } else if (rhs_horiz_0 < n_size) { \ + rhs_pf0 = rhs(rhs_vert, rhs_horiz_0); \ + } \ + } \ + } \ + +#define writeRegToShmem(_) \ + lhs_shmem[lhs_store_idx_0] = lhs_pf0; \ + rhs_shmem[rhs_store_idx_0] = rhs_pf0; \ + \ + lhs_shmem[lhs_store_idx_1] = lhs_pf1; \ + rhs_shmem[rhs_store_idx_1] = rhs_pf1; \ + \ + lhs_shmem[lhs_store_idx_2] = lhs_pf2; \ + rhs_shmem[rhs_store_idx_2] = rhs_pf2; \ + \ + lhs_shmem[lhs_store_idx_3] = lhs_pf3; \ + rhs_shmem[rhs_store_idx_3] = rhs_pf3; \ + \ + lhs_shmem[lhs_store_idx_4] = lhs_pf4; \ + rhs_shmem[rhs_store_idx_4] = rhs_pf4; \ + \ + lhs_shmem[lhs_store_idx_5] = lhs_pf5; \ + rhs_shmem[rhs_store_idx_5] = rhs_pf5; \ + \ + lhs_shmem[lhs_store_idx_6] = lhs_pf6; \ + rhs_shmem[rhs_store_idx_6] = rhs_pf6; \ + \ + lhs_shmem[lhs_store_idx_7] = lhs_pf7; \ + rhs_shmem[rhs_store_idx_7] = rhs_pf7; \ + + // declare and initialize result array +#define res(i, j) _res_##i##j +#define initResultRow(i) \ + Scalar res(i, 0) = conv(0); \ + Scalar res(i, 1) = conv(0); \ + Scalar res(i, 2) = conv(0); \ + Scalar res(i, 3) = conv(0); \ + Scalar res(i, 4) = conv(0); \ + Scalar res(i, 5) = conv(0); \ + Scalar res(i, 6) = conv(0); \ + Scalar res(i, 7) = conv(0); \ + + internal::scalar_cast_op conv; + initResultRow(0); + initResultRow(1); + initResultRow(2); + initResultRow(3); + initResultRow(4); + initResultRow(5); + initResultRow(6); + initResultRow(7); +#undef initResultRow + + for (Index base_k = 0; base_k < k_size; base_k += 64) { + // wait for previous iteration to finish with shmem. Despite common sense, + // the code is a bit faster with this here then at bottom of loop + __syncthreads(); + + prefetchIntoRegisters(base_k); + writeRegToShmem(); + + #undef prefetchIntoRegisters + #undef writeRegToShmem + + // wait for shared mem packing to be done before starting computation + __syncthreads(); + + // compute 8x8 matrix product by outer product. This involves packing one column + // of LHS and one row of RHS into registers (takes 16 registers). + +#define lcol(i) _lcol##i + Scalar lcol(0); + Scalar lcol(1); + Scalar lcol(2); + Scalar lcol(3); + Scalar lcol(4); + Scalar lcol(5); + Scalar lcol(6); + Scalar lcol(7); + +#define rrow(j) _rrow##j + Scalar rrow(0); + Scalar rrow(1); + Scalar rrow(2); + Scalar rrow(3); + Scalar rrow(4); + Scalar rrow(5); + Scalar rrow(6); + Scalar rrow(7); + + // Now x corresponds to k, y to m, and z to n + const Scalar* lhs_block = &lhs_shmem[threadIdx.x + 9 * threadIdx.y]; + const Scalar* rhs_block = &rhs_shmem[threadIdx.x + 8 * threadIdx.z]; + +#define lhs_element(i, j) lhs_block[72 * ((i) + 8 * (j))] +#define rhs_element(i, j) rhs_block[72 * ((i) + 8 * (j))] + +#define loadData(i, j) \ + lcol(0) = lhs_element(0, j); \ + rrow(0) = rhs_element(i, 0); \ + lcol(1) = lhs_element(1, j); \ + rrow(1) = rhs_element(i, 1); \ + lcol(2) = lhs_element(2, j); \ + rrow(2) = rhs_element(i, 2); \ + lcol(3) = lhs_element(3, j); \ + rrow(3) = rhs_element(i, 3); \ + lcol(4) = lhs_element(4, j); \ + rrow(4) = rhs_element(i, 4); \ + lcol(5) = lhs_element(5, j); \ + rrow(5) = rhs_element(i, 5); \ + lcol(6) = lhs_element(6, j); \ + rrow(6) = rhs_element(i, 6); \ + lcol(7) = lhs_element(7, j); \ + rrow(7) = rhs_element(i, 7); \ + +#define computeCol(j) \ + res(0, j) += lcol(0) * rrow(j); \ + res(1, j) += lcol(1) * rrow(j); \ + res(2, j) += lcol(2) * rrow(j); \ + res(3, j) += lcol(3) * rrow(j); \ + res(4, j) += lcol(4) * rrow(j); \ + res(5, j) += lcol(5) * rrow(j); \ + res(6, j) += lcol(6) * rrow(j); \ + res(7, j) += lcol(7) * rrow(j); \ + +#define computePass(i) \ + loadData(i, i); \ + \ + computeCol(0); \ + computeCol(1); \ + computeCol(2); \ + computeCol(3); \ + computeCol(4); \ + computeCol(5); \ + computeCol(6); \ + computeCol(7); \ + + computePass(0); + computePass(1); + computePass(2); + computePass(3); + computePass(4); + computePass(5); + computePass(6); + computePass(7); + +#undef lcol +#undef rrow +#undef lhs_element +#undef rhs_element +#undef loadData +#undef computeCol +#undef computePass + } // end loop over k + + // we've now iterated over all of the large (ie width 64) k blocks and + // accumulated results in registers. At this point thread (x, y, z) contains + // the sum across all big k blocks of the product of little k block of index (x, y) + // with block of index (y, z). To compute the final output, we need to reduce + // the 8 threads over y by summation. +#if defined(EIGEN_HIPCC) || (defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000) +#define shuffleInc(i, j, mask) res(i, j) += __shfl_xor(res(i, j), mask) +#else +#define shuffleInc(i, j, mask) res(i, j) += __shfl_xor_sync(0xFFFFFFFF, res(i, j), mask) +#endif + +#define reduceRow(i, mask) \ + shuffleInc(i, 0, mask); \ + shuffleInc(i, 1, mask); \ + shuffleInc(i, 2, mask); \ + shuffleInc(i, 3, mask); \ + shuffleInc(i, 4, mask); \ + shuffleInc(i, 5, mask); \ + shuffleInc(i, 6, mask); \ + shuffleInc(i, 7, mask); \ + +#define reduceMatrix(mask) \ + reduceRow(0, mask); \ + reduceRow(1, mask); \ + reduceRow(2, mask); \ + reduceRow(3, mask); \ + reduceRow(4, mask); \ + reduceRow(5, mask); \ + reduceRow(6, mask); \ + reduceRow(7, mask); \ + + // actually perform the reduction, now each thread of index (_, y, z) + // contains the correct values in its registers that belong in the output + // block + reduceMatrix(1); + reduceMatrix(2); + reduceMatrix(4); + +#undef shuffleInc +#undef reduceRow +#undef reduceMatrix + + // now we need to copy the 64 values into main memory. We can't split work + // among threads because all variables are in registers. There's 2 ways + // to do this: + // (1) have 1 thread do 64 writes from registers into global memory + // (2) have 1 thread do 64 writes into shared memory, and then 8 threads + // each do 8 writes into global memory. We can just overwrite the shared + // memory from the problem we just solved. + // (2) is slightly faster than (1) due to less branching and more ILP + + // TODO: won't yield much gain, but could just use currently unused shared mem + // and then we won't have to sync + // wait for shared mem to be out of use + __syncthreads(); + +#define writeResultShmem(i, j) \ + lhs_shmem[i + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j] = res(i, j); \ + +#define writeRow(i) \ + writeResultShmem(i, 0); \ + writeResultShmem(i, 1); \ + writeResultShmem(i, 2); \ + writeResultShmem(i, 3); \ + writeResultShmem(i, 4); \ + writeResultShmem(i, 5); \ + writeResultShmem(i, 6); \ + writeResultShmem(i, 7); \ + + if (threadIdx.x == 0) { + writeRow(0); + writeRow(1); + writeRow(2); + writeRow(3); + writeRow(4); + writeRow(5); + writeRow(6); + writeRow(7); + } +#undef writeResultShmem +#undef writeRow + + const int max_i_write = numext::mini((int)((m_size - base_m - threadIdx.y + 7) / 8), 8); + const int max_j_write = numext::mini((int)((n_size - base_n - threadIdx.z + 7) / 8), 8); + + if (threadIdx.x < max_i_write) { + if (max_j_write == 8) { + // TODO: can i trade bank conflicts for coalesced writes? + Scalar val0 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 0]; + Scalar val1 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 1]; + Scalar val2 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 2]; + Scalar val3 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 3]; + Scalar val4 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 4]; + Scalar val5 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 5]; + Scalar val6 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 6]; + Scalar val7 = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * 7]; + + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 0) = val0; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 1) = val1; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 2) = val2; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 3) = val3; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 4) = val4; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 5) = val5; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 6) = val6; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * 7) = val7; + } else { +#pragma unroll 7 + for (int j = 0; j < max_j_write; j++) { + Scalar val = lhs_shmem[threadIdx.x + 8 * threadIdx.y + 64 * threadIdx.z + 512 * j]; + output(base_m + threadIdx.y + 8 * threadIdx.x, base_n + threadIdx.z + 8 * j) = val; + } + } + } +#undef res +} + + +template +__global__ void +#if defined(EIGEN_HIPCC) +__launch_bounds__(512, 1) +#else +__launch_bounds__(512) +#endif +EigenContractionKernel(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, + const Index m_size, const Index n_size, const Index k_size) { + __shared__ Scalar lhs_shmem[72 * 64]; + __shared__ Scalar rhs_shmem[72 * 64]; + + const Index m_block_idx = blockIdx.x; + const Index n_block_idx = blockIdx.y; + + const Index base_m = 64 * m_block_idx; + const Index base_n = 64 * n_block_idx; + + if (base_m + 63 < m_size && base_n + 63 < n_size) { + EigenContractionKernelInternal(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size); + } else { + EigenContractionKernelInternal(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size); + } +} + + +template +__device__ __forceinline__ void +EigenFloatContractionKernelInternal16x16(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, float2 lhs_shmem2[][16], + float2 rhs_shmem2[][8], const Index m_size, + const Index n_size, const Index k_size, + const Index base_m, const Index base_n) { + + // prefetch registers + float4 lhs_pf0, rhs_pf0; + + float4 results[4]; + for (int i=0; i < 4; i++) { + results[i].x = results[i].y = results[i].z = results[i].w = 0; + } + +#define prefetch_lhs(reg, row, col) \ + if (!CHECK_LHS_BOUNDARY) { \ + if (col < k_size) { \ + reg =lhs.template loadPacket(row, col); \ + } \ + } else { \ + if (col < k_size) { \ + if (row + 3 < m_size) { \ + reg =lhs.template loadPacket(row, col); \ + } else if (row + 2 < m_size) { \ + reg.x =lhs(row + 0, col); \ + reg.y =lhs(row + 1, col); \ + reg.z =lhs(row + 2, col); \ + } else if (row + 1 < m_size) { \ + reg.x =lhs(row + 0, col); \ + reg.y =lhs(row + 1, col); \ + } else if (row < m_size) { \ + reg.x =lhs(row + 0, col); \ + } \ + } \ + } \ + + Index lhs_vert = base_m+threadIdx.x*4; + + for (Index k = 0; k < k_size; k += 16) { + + lhs_pf0 = internal::pset1(0); + rhs_pf0 = internal::pset1(0); + + Index lhs_horiz = threadIdx.y+k; + prefetch_lhs(lhs_pf0, lhs_vert, lhs_horiz) + + Index rhs_vert = k+(threadIdx.x%4)*4; + Index rhs_horiz0 = (threadIdx.x>>2)+threadIdx.y*4+base_n; + + if (!CHECK_RHS_BOUNDARY) { + if ((rhs_vert + 3) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); + } else if (rhs_vert + 2 < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + } else if (rhs_vert + 1 < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + } else if (rhs_vert < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + } + } else { + if (rhs_horiz0 < n_size) { + if ((rhs_vert + 3) < k_size) { + rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); + } else if ((rhs_vert + 2) < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + } else if ((rhs_vert + 1) < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + } else if (rhs_vert < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + } + } + } + float x1, x2 ; + // the following can be a bitwise operation..... some day. + if((threadIdx.x%8) < 4) { + x1 = rhs_pf0.y; + x2 = rhs_pf0.w; + } else { + x1 = rhs_pf0.x; + x2 = rhs_pf0.z; + } + #if defined(EIGEN_HIPCC) || (defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000) + x1 = __shfl_xor(x1, 4); + x2 = __shfl_xor(x2, 4); + #else + x1 = __shfl_xor_sync(0xFFFFFFFF, x1, 4); + x2 = __shfl_xor_sync(0xFFFFFFFF, x2, 4); + #endif + if((threadIdx.x%8) < 4) { + rhs_pf0.y = x1; + rhs_pf0.w = x2; + } else { + rhs_pf0.x = x1; + rhs_pf0.z = x2; + } + + // We have 64 features. + // Row 0 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 0, 1. + // Row 1 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 2, 3. + // ... + // Row 31 -> times (0, 4, 8, 12, 1, 5, 9, 13) for features 62, 63 + // Row 32 -> times (2, 6, 10, 14, 3, 7, 11, 15) for features 0, 1 + // ... + rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2][threadIdx.x%8] = make_float2(rhs_pf0.x, rhs_pf0.y); + rhs_shmem2[(threadIdx.x>>3)+ threadIdx.y*2+32][threadIdx.x%8] = make_float2(rhs_pf0.z, rhs_pf0.w); + + // Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) + // Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) + // ... + // Row 15 (time 15) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) + // Row 16 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) + // ... + + lhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(lhs_pf0.x, lhs_pf0.y); + lhs_shmem2[threadIdx.y+16][threadIdx.x] = make_float2(lhs_pf0.z, lhs_pf0.w); + + +#define add_vals(fl1, fl2, fr1, fr2)\ + results[0].x += fl1.x * fr1.x;\ + results[0].y += fl1.y * fr1.x;\ + results[0].z += fl2.x * fr1.x;\ + results[0].w += fl2.y * fr1.x;\ +\ + results[1].x += fl1.x * fr1.y;\ + results[1].y += fl1.y * fr1.y;\ + results[1].z += fl2.x * fr1.y;\ + results[1].w += fl2.y * fr1.y;\ +\ + results[2].x += fl1.x * fr2.x;\ + results[2].y += fl1.y * fr2.x;\ + results[2].z += fl2.x * fr2.x;\ + results[2].w += fl2.y * fr2.x;\ +\ + results[3].x += fl1.x * fr2.y;\ + results[3].y += fl1.y * fr2.y;\ + results[3].z += fl2.x * fr2.y;\ + results[3].w += fl2.y * fr2.y;\ + + __syncthreads(); + + // Do the multiplies. + #pragma unroll + for (int koff = 0; koff < 16; koff ++) { + // 32 x threads. + float2 fl1 = lhs_shmem2[koff][threadIdx.x]; + float2 fl2 = lhs_shmem2[koff + 16][threadIdx.x]; + + int start_feature = threadIdx.y * 4; + float2 fr1 = rhs_shmem2[(start_feature>>1) + 32*((koff%4)/2)][koff/4 + (koff%2)*4]; + float2 fr2 = rhs_shmem2[(start_feature>>1) + 1 + 32*((koff%4)/2)][koff/4 + (koff%2)*4]; + + add_vals(fl1, fl2, fr1, fr2) + } + __syncthreads(); + } + +#undef prefetch_lhs +#undef add_vals + + Index horiz_base = threadIdx.y*4+base_n; + if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } else if (!CHECK_RHS_BOUNDARY) { + // CHECK LHS + if (lhs_vert + 3 < m_size) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } else if (lhs_vert + 2 < m_size) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + } + } else if (lhs_vert + 1 < m_size) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + } + } else if (lhs_vert < m_size) { + for (int i = 0; i < 4; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + } + } + } else if (!CHECK_LHS_BOUNDARY) { + // CHECK RHS + /* + int ncols_rem = fminf(n_size- horiz_base, 4); + for (int i = 0; i < ncols_rem; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + }*/ + for (int i = 0; i < 4; i++) { + if (horiz_base+i < n_size) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } + } else { + // CHECK both boundaries. + for (int i = 0; i < 4; i++) { + if (horiz_base+i < n_size) { + if (lhs_vert < m_size) + output(lhs_vert, horiz_base + i) = results[i].x; + if (lhs_vert + 1 < m_size) + output(lhs_vert + 1, horiz_base + i) = results[i].y; + if (lhs_vert + 2 < m_size) + output(lhs_vert + 2, horiz_base + i) = results[i].z; + if (lhs_vert + 3 < m_size) + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } + } +} + + +template +__device__ __forceinline__ void +EigenFloatContractionKernelInternal(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, float2 lhs_shmem2[][32], + float2 rhs_shmem2[][8], const Index m_size, + const Index n_size, const Index k_size, + const Index base_m, const Index base_n) { + + // prefetch registers + float4 lhs_pf0, lhs_pf1, lhs_pf2, lhs_pf3; + float4 rhs_pf0, rhs_pf1; + + float4 results[8]; + for (int i=0; i < 8; i++) { + results[i].x = results[i].y = results[i].z = results[i].w = 0; + } + + Index lhs_vert = base_m+threadIdx.x*4+(threadIdx.y%4)*32; + for (Index k = 0; k < k_size; k += 32) { + lhs_pf0 = internal::pset1(0); + lhs_pf1 = internal::pset1(0); + lhs_pf2 = internal::pset1(0); + lhs_pf3 = internal::pset1(0); + + rhs_pf0 = internal::pset1(0); + rhs_pf1 = internal::pset1(0); + + if (!CHECK_LHS_BOUNDARY) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + lhs_pf2 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+16)); + lhs_pf3 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + lhs_pf2 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + } + } else { + // just CHECK_LHS_BOUNDARY + if (lhs_vert + 3 < m_size) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + lhs_pf2 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+16)); + lhs_pf3 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + lhs_pf2 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + lhs_pf1 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0 =lhs.template loadPacket(lhs_vert, (threadIdx.y/4+k)); + } + } else if (lhs_vert + 2 < m_size) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16)); + lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16)); + lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24)); + lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24)); + lhs_pf3.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16)); + lhs_pf2.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf1.z =lhs(lhs_vert + 2, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf0.z =lhs(lhs_vert + 2, (threadIdx.y/4+k)); + } + } else if (lhs_vert + 1 < m_size) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16)); + lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24)); + lhs_pf3.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf2.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf1.y =lhs(lhs_vert + 1, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf0.y =lhs(lhs_vert + 1, (threadIdx.y/4+k)); + } + } else if (lhs_vert < m_size) { + if ((threadIdx.y/4+k+24) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + lhs_pf3.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+24)); + } else if ((threadIdx.y/4+k+16) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + lhs_pf2.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+16)); + } else if ((threadIdx.y/4+k+8) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + lhs_pf1.x =lhs(lhs_vert + 0, (threadIdx.y/4+k+8)); + } else if ((threadIdx.y/4+k) < k_size) { + lhs_pf0.x =lhs(lhs_vert + 0, (threadIdx.y/4+k)); + } + } + } + __syncthreads(); + Index rhs_vert = k+threadIdx.x*4; + Index rhs_horiz0 = threadIdx.y*2+base_n; + Index rhs_horiz1 = threadIdx.y*2+1+base_n; + if (!CHECK_RHS_BOUNDARY) { + if ((rhs_vert + 3) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); + rhs_pf1 = rhs.template loadPacket(rhs_vert, rhs_horiz1); + } else if (rhs_vert + 2 < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1); + rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1); + } else if (rhs_vert + 1 < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1); + } else if (rhs_vert < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + } + } else { + if (rhs_horiz1 < n_size) { + if ((rhs_vert + 3) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); + rhs_pf1 = rhs.template loadPacket(rhs_vert, rhs_horiz1); + } else if (rhs_vert + 2 < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1); + rhs_pf1.z = rhs(rhs_vert + 2, rhs_horiz1); + } else if (k+threadIdx.x*4 + 1 < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + rhs_pf1.y = rhs(rhs_vert + 1, rhs_horiz1); + } else if (k+threadIdx.x*4 < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf1.x = rhs(rhs_vert, rhs_horiz1); + } + } else if (rhs_horiz0 < n_size) { + if ((rhs_vert + 3) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0 = rhs.template loadPacket(rhs_vert, rhs_horiz0); + } else if ((rhs_vert + 2) < k_size) { + // just CHECK_RHS_BOUNDARY + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + rhs_pf0.z = rhs(rhs_vert + 2, rhs_horiz0); + } else if ((rhs_vert + 1) < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + rhs_pf0.y = rhs(rhs_vert + 1, rhs_horiz0); + } else if (rhs_vert < k_size) { + rhs_pf0.x = rhs(rhs_vert, rhs_horiz0); + } + } + } + __syncthreads(); + // Loaded. Do computation + // Row 0 -> times (0, 4, 8, .. 28) for features 0, 1. + // Row 1 -> times (0, 4, 8, .. 28) for features 2, 3. + // .. + // Row 31 -> times (0, 4, 8, .. 28) for features 62, 63 + rhs_shmem2[threadIdx.y][threadIdx.x] = make_float2(rhs_pf0.x, rhs_pf1.x); + // Row 32 -> times (1, 5, 9, .. 29) for features 0, 1. + // Row 33 -> times (1, 5, 9, .. 29) for features 2, 3. + // .. + rhs_shmem2[threadIdx.y+32][threadIdx.x] = make_float2(rhs_pf0.y, rhs_pf1.y); + // Row 64 -> times (2, 6, 10, .. 30) for features 0, 1. + // Row 65 -> times (2, 6, 10, .. 30) for features 2, 3. + rhs_shmem2[threadIdx.y+64][threadIdx.x] = make_float2(rhs_pf0.z, rhs_pf1.z); + // Row 96 -> times (3, 7, 11, .. 31) for features 0, 1. + // Row 97 -> times (3, 7, 11, .. 31) for features 2, 3. + rhs_shmem2[threadIdx.y+96][threadIdx.x] = make_float2(rhs_pf0.w, rhs_pf1.w); + + // LHS. + // Row 0 (time 0) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125) + // Row 1 (time 1) -> features (0, 1), (4, 5), .. (28, 29), (32, 33), .. (60, 61) .. (124, 125) + // ... + // Row 8 (time 0) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127) + // Row 15 (time 7) -> features (2, 3), (6, 7), .. (30, 31), (34, 35), .. (62, 63) .. (126, 127) + + +#define add_vals(a_feat1, a_feat2, f1, f2, f3, f4)\ + results[0].x += a_feat1.x * f1.x;\ + results[1].x += a_feat1.x * f1.y;\ + results[2].x += a_feat1.x * f2.x;\ + results[3].x += a_feat1.x * f2.y;\ + results[4].x += a_feat1.x * f3.x;\ + results[5].x += a_feat1.x * f3.y;\ + results[6].x += a_feat1.x * f4.x;\ + results[7].x += a_feat1.x * f4.y;\ +\ + results[0].y += a_feat1.y * f1.x;\ + results[1].y += a_feat1.y * f1.y;\ + results[2].y += a_feat1.y * f2.x;\ + results[3].y += a_feat1.y * f2.y;\ + results[4].y += a_feat1.y * f3.x;\ + results[5].y += a_feat1.y * f3.y;\ + results[6].y += a_feat1.y * f4.x;\ + results[7].y += a_feat1.y * f4.y;\ +\ + results[0].z += a_feat2.x * f1.x;\ + results[1].z += a_feat2.x * f1.y;\ + results[2].z += a_feat2.x * f2.x;\ + results[3].z += a_feat2.x * f2.y;\ + results[4].z += a_feat2.x * f3.x;\ + results[5].z += a_feat2.x * f3.y;\ + results[6].z += a_feat2.x * f4.x;\ + results[7].z += a_feat2.x * f4.y;\ +\ + results[0].w += a_feat2.y * f1.x;\ + results[1].w += a_feat2.y * f1.y;\ + results[2].w += a_feat2.y * f2.x;\ + results[3].w += a_feat2.y * f2.y;\ + results[4].w += a_feat2.y * f3.x;\ + results[5].w += a_feat2.y * f3.y;\ + results[6].w += a_feat2.y * f4.x;\ + results[7].w += a_feat2.y * f4.y;\ + + lhs_shmem2[threadIdx.y/4][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.x, lhs_pf0.y); + lhs_shmem2[threadIdx.y/4+8][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.x, lhs_pf1.y); + lhs_shmem2[threadIdx.y/4+16][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.x, lhs_pf2.y); + lhs_shmem2[threadIdx.y/4+24][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.x, lhs_pf3.y); + + lhs_shmem2[threadIdx.y/4 + 32][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf0.z, lhs_pf0.w); + lhs_shmem2[threadIdx.y/4 + 40][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf1.z, lhs_pf1.w); + lhs_shmem2[threadIdx.y/4 + 48][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf2.z, lhs_pf2.w); + lhs_shmem2[threadIdx.y/4 + 56][threadIdx.x+(threadIdx.y%4)*8] = make_float2(lhs_pf3.z, lhs_pf3.w); + + __syncthreads(); + + // Do the multiplies. + #pragma unroll + for (int koff = 0; koff < 32; koff ++) { + float2 a3 = lhs_shmem2[koff][threadIdx.x + (threadIdx.y % 4) * 8]; + float2 a4 = lhs_shmem2[koff + 32][threadIdx.x + (threadIdx.y % 4) * 8]; + + // first feature is at (threadIdx.y/4) * 8 last is at start + 8. + int start_feature = (threadIdx.y / 4) * 8; + + float2 br1 = rhs_shmem2[start_feature/2 + (koff % 4) * 32][koff/4]; + float2 br2 = rhs_shmem2[start_feature/2 + 1 + (koff % 4) * 32][koff/4]; + float2 br3 = rhs_shmem2[start_feature/2 + 2 + (koff % 4) * 32][koff/4]; + float2 br4 = rhs_shmem2[start_feature/2 + 3 + (koff % 4) * 32][koff/4]; + + add_vals(a3, a4, br1, br2, br3, br4) + } + __syncthreads(); + } // end loop over k + + __syncthreads(); + Index horiz_base = (threadIdx.y/4)*8+base_n; + if (!CHECK_LHS_BOUNDARY && !CHECK_RHS_BOUNDARY) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } else if (!CHECK_RHS_BOUNDARY) { + if (lhs_vert + 3 < m_size) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } else if (lhs_vert + 2 < m_size) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + } + } else if (lhs_vert + 1 < m_size) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + } + } else if (lhs_vert < m_size) { + for (int i = 0; i < 8; i++) { + output(lhs_vert, horiz_base + i) = results[i].x; + } + } + } else if (!CHECK_LHS_BOUNDARY) { + // CHECK BOUNDARY_B + for (int i = 0; i < 8; i++) { + if (horiz_base + i < n_size) { + output(lhs_vert, horiz_base + i) = results[i].x; + output(lhs_vert + 1, horiz_base + i) = results[i].y; + output(lhs_vert + 2, horiz_base + i) = results[i].z; + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } + } else { + // CHECK both boundaries. + for (int i = 0; i < 8; i++) { + if (horiz_base + i < n_size) { + if (lhs_vert < m_size) + output(lhs_vert, horiz_base + i) = results[i].x; + if (lhs_vert + 1 < m_size) + output(lhs_vert + 1, horiz_base + i) = results[i].y; + if (lhs_vert + 2 < m_size) + output(lhs_vert + 2, horiz_base + i) = results[i].z; + if (lhs_vert + 3 < m_size) + output(lhs_vert + 3, horiz_base + i) = results[i].w; + } + } + } +} + + +template +__global__ void +#if defined(EIGEN_HIPCC) +__launch_bounds__(256, 1) +#else +__launch_bounds__(256) +#endif +EigenFloatContractionKernel(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, + const Index m_size, const Index n_size, const Index k_size) { + __shared__ float2 lhs_shmem[64*32]; + __shared__ float2 rhs_shmem[128*8]; + + typedef float2 LHS_MEM[64][32]; + typedef float2 RHS_MEM[128][8]; + + const Index m_block_idx = blockIdx.x; + const Index n_block_idx = blockIdx.y; + + const Index base_m = 128 * m_block_idx; + const Index base_n = 64 * n_block_idx; + + bool check_rhs = (base_n + 63) >= n_size; + bool check_lhs128 = (base_m + 127) >= m_size; + + if (!check_rhs) { + if (!check_lhs128) { + // >= 128 rows left + EigenFloatContractionKernelInternal( + lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n); + } else { + EigenFloatContractionKernelInternal( + lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n); + } + } else { + if (!check_lhs128) { + // >= 128 rows left + EigenFloatContractionKernelInternal( + lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n); + } else { + EigenFloatContractionKernelInternal( + lhs, rhs, output, *((LHS_MEM *) lhs_shmem), *((RHS_MEM *) rhs_shmem), m_size, n_size, k_size, base_m, base_n); + } + } +} + +template +__global__ void +#if defined(EIGEN_HIPCC) +__launch_bounds__(256, 1) +#else +__launch_bounds__(256) +#endif +EigenFloatContractionKernel16x16(const LhsMapper lhs, const RhsMapper rhs, + const OutputMapper output, + const Index m_size, const Index n_size, const Index k_size) { + __shared__ float2 lhs_shmem[32][16]; + __shared__ float2 rhs_shmem[64][8]; + + const Index m_block_idx = blockIdx.x; + const Index n_block_idx = blockIdx.y; + + const Index base_m = 64 * m_block_idx; + const Index base_n = 64 * n_block_idx; + + if (base_m + 63 < m_size) { + if (base_n + 63 < n_size) { + EigenFloatContractionKernelInternal16x16(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n); + } else { + EigenFloatContractionKernelInternal16x16(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n); + } + } else { + if (base_n + 63 < n_size) { + EigenFloatContractionKernelInternal16x16(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n); + } else { + EigenFloatContractionKernelInternal16x16(lhs, rhs, output, lhs_shmem, rhs_shmem, m_size, n_size, k_size, base_m, base_n); + } + } +} + + +template +struct TensorEvaluator, GpuDevice> : + public TensorContractionEvaluatorBase, GpuDevice> > { + + typedef GpuDevice Device; + + typedef TensorEvaluator, Device> Self; + typedef TensorContractionEvaluatorBase Base; + + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + enum { + Layout = TensorEvaluator::Layout, + }; + + // Most of the code is assuming that both input tensors are ColMajor. If the + // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS: + // If we want to compute A * B = C, where A is LHS and B is RHS, the code + // will pretend B is LHS and A is RHS. + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType; + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType; + + static const int LDims = + internal::array_size::Dimensions>::value; + static const int RDims = + internal::array_size::Dimensions>::value; + static const int ContractDims = internal::array_size::value; + + typedef array left_dim_mapper_t; + typedef array right_dim_mapper_t; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + static const int NumDims = LDims + RDims - 2 * ContractDims; + + typedef DSizes Dimensions; + + // typedefs needed in evalTo + typedef typename internal::remove_const::type LhsScalar; + typedef typename internal::remove_const::type RhsScalar; + + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + + typedef typename LeftEvaluator::Dimensions LeftDimensions; + typedef typename RightEvaluator::Dimensions RightDimensions; + + TensorEvaluator(const XprType& op, const Device& device) : + Base(op, device) + { + EIGEN_STATIC_ASSERT( (internal::is_same::value), + GPU_TENSOR_CONTRACTION_DOES_NOT_SUPPORT_OUTPUT_KERNELS); + } + + // We need to redefine this method to make nvcc happy + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) { + this->m_leftImpl.evalSubExprsIfNeeded(NULL); + this->m_rightImpl.evalSubExprsIfNeeded(NULL); + if (data) { + evalTo(data); + return false; + } else { + this->m_result = static_cast(this->m_device.allocate(this->dimensions().TotalSize() * sizeof(Scalar))); + evalTo(this->m_result); + return true; + } + } + + void evalTo(Scalar* buffer) const { + if (this->m_lhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } + else { + evalTyped(buffer); + } + } + else { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } + else { + evalTyped(buffer); + } + } + } + else { + if (this->m_rhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } + else { + evalTyped(buffer); + } + } + else { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } + else { + evalTyped(buffer); + } + } + } + } + + template struct LaunchKernels { + static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) { + const Index m_blocks = (m + 63) / 64; + const Index n_blocks = (n + 63) / 64; + const dim3 num_blocks(m_blocks, n_blocks, 1); + const dim3 block_size(8, 8, 8); + LAUNCH_GPU_KERNEL((EigenContractionKernel), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k); + } + }; + + template struct LaunchKernels { + static void Run(const LhsMapper& lhs, const RhsMapper& rhs, const OutputMapper& output, Index m, Index n, Index k, const GpuDevice& device) { + if (m < 768 || n < 768) { + const Index m_blocks = (m + 63) / 64; + const Index n_blocks = (n + 63) / 64; + const dim3 num_blocks(m_blocks, n_blocks, 1); + const dim3 block_size(16, 16, 1); + LAUNCH_GPU_KERNEL((EigenFloatContractionKernel16x16), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k); + } else { + const Index m_blocks = (m + 127) / 128; + const Index n_blocks = (n + 63) / 64; + const dim3 num_blocks(m_blocks, n_blocks, 1); + const dim3 block_size(8, 32, 1); + LAUNCH_GPU_KERNEL((EigenFloatContractionKernel), num_blocks, block_size, 0, device, lhs, rhs, output, m, n, k); + } + } + }; + + template + void evalTyped(Scalar* buffer) const { + // columns in left side, rows in right side + const Index k = this->m_k_size; + EIGEN_UNUSED_VARIABLE(k) + + // rows in left side + const Index m = this->m_i_size; + + // columns in right side + const Index n = this->m_j_size; + + // zero out the result buffer (which must be of size at least m * n * sizeof(Scalar) + this->m_device.memset(buffer, 0, m * n * sizeof(Scalar)); + + typedef internal::TensorContractionInputMapper LhsMapper; + + typedef internal::TensorContractionInputMapper RhsMapper; + + typedef internal::blas_data_mapper OutputMapper; + + + // initialize data mappers + LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides, + this->m_left_contracting_strides, this->m_k_strides); + + RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides, + this->m_right_contracting_strides, this->m_k_strides); + + OutputMapper output(buffer, m); + +#if defined(EIGEN_USE_HIP) + setGpuSharedMemConfig(hipSharedMemBankSizeEightByte); +#else + setGpuSharedMemConfig(cudaSharedMemBankSizeEightByte); +#endif + + LaunchKernels::Run(lhs, rhs, output, m, n, k, this->m_device); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_USE_GPU and EIGEN_GPUCC +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_GPU_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h new file mode 100644 index 0000000..9ab900b --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h @@ -0,0 +1,575 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H + +namespace Eigen { + +namespace internal { + +enum { + Rhs = 0, + Lhs = 1 +}; + +/* + * Implementation of the Eigen blas_data_mapper class for tensors. + */ +/// The make pointer class is used by sycl in order to build the mapper class on the device. For other platform the default make pointer is used which +/// is scalar * for CoeffLoader. +template class MakePointer_ = MakePointer> +struct CoeffLoader; + +template class MakePointer_ = MakePointer> +class BaseTensorContractionMapper; + +template class MakePointer_> +struct CoeffLoader { + enum { + DirectOffsets = false + }; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_tensor(tensor) { } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index) { + eigen_assert(false && "unsupported"); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const typename MakePointer_::Type + data() const { + eigen_assert(false && "unsupported"); + return NULL; + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) const { return m_tensor.coeff(index); } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename Tensor::PacketReturnType packet(typename Tensor::Index index) const + { + return m_tensor.template packet(index); + } + + #ifdef EIGEN_USE_SYCL + // The placeholder accessors require to be bound to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_tensor.bind(cgh); + } + #endif + + private: + const Tensor m_tensor; +}; + +template class MakePointer_> +struct CoeffLoader { + enum { + DirectOffsets = true + }; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_data(tensor.data()) {} + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) { + m_data += offset; + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const typename MakePointer_::Type + data() const { + return m_data; + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) const { return loadConstant(m_data+index); } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename Tensor::PacketReturnType packet(typename Tensor::Index index) const + { + return internal::ploadt_ro(m_data + index); + } + + #ifdef EIGEN_USE_SYCL + // The placeholder accessors require to be bound to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_data.bind(cgh); + } + #endif + private: + typedef typename Tensor::Scalar Scalar; + + typename MakePointer_::Type m_data; +}; + +template class MakePointer_ = MakePointer> +class SimpleTensorContractionMapper { + public: + EIGEN_DEVICE_FUNC + SimpleTensorContractionMapper(const Tensor& tensor, + const nocontract_t& nocontract_strides, + const nocontract_t& ij_strides, + const contract_t& contract_strides, + const contract_t& k_strides) : + m_tensor(tensor), + m_nocontract_strides(nocontract_strides), + m_ij_strides(ij_strides), + m_contract_strides(contract_strides), + m_k_strides(k_strides) { } + + enum { + DirectOffsets = CoeffLoader::DirectOffsets + }; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) { + m_tensor.offsetBuffer(offset); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE void prefetch(Index /*i*/) { } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar operator()(Index row) const { + // column major assumption + return operator()(row, 0); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar operator()(Index row, Index col) const { + return m_tensor.coeff(computeIndex(row, col)); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index computeIndex(Index row, Index col) const { + const bool left = (side == Lhs); + EIGEN_UNUSED_VARIABLE(left); // annoying bug in g++8.1: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85963 + Index nocontract_val = left ? row : col; + Index linidx = 0; + EIGEN_UNROLL_LOOP + for (int i = static_cast(array_size::value) - 1; i > 0; i--) { + const Index idx = nocontract_val / m_ij_strides[i]; + linidx += idx * m_nocontract_strides[i]; + nocontract_val -= idx * m_ij_strides[i]; + } + if (array_size::value > array_size::value) { + if (side == Lhs && inner_dim_contiguous) { + eigen_assert(m_nocontract_strides[0] == 1); + linidx += nocontract_val; + } else { + linidx += nocontract_val * m_nocontract_strides[0]; + } + } + + Index contract_val = left ? col : row; + if(array_size::value > 0) { + EIGEN_UNROLL_LOOP + for (int i = static_cast(array_size::value) - 1; i > 0; i--) { + const Index idx = contract_val / m_k_strides[i]; + linidx += idx * m_contract_strides[i]; + contract_val -= idx * m_k_strides[i]; + } + + if (side == Rhs && inner_dim_contiguous) { + eigen_assert(m_contract_strides[0] == 1); + linidx += contract_val; + } else { + linidx += contract_val * m_contract_strides[0]; + } + } + + return linidx; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE IndexPair computeIndexPair(Index row, Index col, const Index distance) const { + const bool left = (side == Lhs); + EIGEN_UNUSED_VARIABLE(left); // annoying bug in g++8.1: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85963 + Index nocontract_val[2] = {left ? row : col, left ? row + distance : col}; + Index linidx[2] = {0, 0}; + if (array_size::value > array_size::value) { + EIGEN_UNROLL_LOOP + for (int i = static_cast(array_size::value) - 1; i > 0; i--) { + const Index idx0 = nocontract_val[0] / m_ij_strides[i]; + const Index idx1 = nocontract_val[1] / m_ij_strides[i]; + linidx[0] += idx0 * m_nocontract_strides[i]; + linidx[1] += idx1 * m_nocontract_strides[i]; + nocontract_val[0] -= idx0 * m_ij_strides[i]; + nocontract_val[1] -= idx1 * m_ij_strides[i]; + } + if (side == Lhs && inner_dim_contiguous) { + eigen_assert(m_nocontract_strides[0] == 1); + linidx[0] += nocontract_val[0]; + linidx[1] += nocontract_val[1]; + } else { + linidx[0] += nocontract_val[0] * m_nocontract_strides[0]; + linidx[1] += nocontract_val[1] * m_nocontract_strides[0]; + } + } + + Index contract_val[2] = {left ? col : row, left ? col : row + distance}; + if (array_size::value> 0) { + EIGEN_UNROLL_LOOP + for (int i = static_cast(array_size::value) - 1; i > 0; i--) { + const Index idx0 = contract_val[0] / m_k_strides[i]; + const Index idx1 = contract_val[1] / m_k_strides[i]; + linidx[0] += idx0 * m_contract_strides[i]; + linidx[1] += idx1 * m_contract_strides[i]; + contract_val[0] -= idx0 * m_k_strides[i]; + contract_val[1] -= idx1 * m_k_strides[i]; + } + + if (side == Rhs && inner_dim_contiguous) { + eigen_assert(m_contract_strides[0] == 1); + linidx[0] += contract_val[0]; + linidx[1] += contract_val[1]; + } else { + linidx[0] += contract_val[0] * m_contract_strides[0]; + linidx[1] += contract_val[1] * m_contract_strides[0]; + } + } + return IndexPair(linidx[0], linidx[1]); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index firstAligned(Index size) const { + // Only claim alignment when we can compute the actual stride (ie when we're + // dealing with the lhs with inner_dim_contiguous. This is because the + // matrix-vector product relies on the stride when dealing with aligned inputs. + return (Alignment == Aligned) && (side == Lhs) && inner_dim_contiguous ? 0 : size; + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index stride() const { + return ((side == Lhs) && inner_dim_contiguous && array_size::value > 0) ? m_contract_strides[0] : 1; + } + + #ifdef EIGEN_USE_SYCL + // The placeholder accessors require to be bound to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_tensor.bind(cgh); + } + #endif + + const CoeffLoader& tensor() const { + return m_tensor; + } + + const nocontract_t& nocontract_strides() const { + return m_nocontract_strides; + } + const nocontract_t& ij_strides() const { return m_ij_strides; } + const contract_t& contract_strides() const { return m_contract_strides; } + const contract_t& k_strides() const { return m_k_strides; } + + protected: + CoeffLoader m_tensor; + const nocontract_t m_nocontract_strides; + const nocontract_t m_ij_strides; + const contract_t m_contract_strides; + const contract_t m_k_strides; +}; + +template class MakePointer_> +class BaseTensorContractionMapper : public SimpleTensorContractionMapper +{ + public: + typedef SimpleTensorContractionMapper ParentMapper; + + EIGEN_DEVICE_FUNC + BaseTensorContractionMapper(const Tensor& tensor, + const nocontract_t& nocontract_strides, + const nocontract_t& ij_strides, + const contract_t& contract_strides, + const contract_t& k_strides) : + ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename internal::enable_if::size==packet_size,PacketT>::type + load(Index i, Index j) const + { + // whole method makes column major assumption + + // don't need to add offsets for now (because operator handles that) + // current code assumes packet size must be a multiple of 2 + EIGEN_STATIC_ASSERT(packet_size % 2 == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + + if (Tensor::PacketAccess && inner_dim_contiguous && !inner_dim_reordered) { + const Index index = this->computeIndex(i, j); + eigen_assert(this->computeIndex(i+packet_size-1, j) == index + packet_size-1); + return this->m_tensor.template packet(index); + } + + const IndexPair indexPair = this->computeIndexPair(i, j, packet_size - 1); + const Index first = indexPair.first; + const Index lastIdx = indexPair.second; + + // We can always do optimized packet reads from left hand side right now, because + // the vertical matrix dimension on the left hand side is never contracting. + // On the right hand side we need to check if the contracting dimensions may have + // been shuffled first. + if (Tensor::PacketAccess && + (side == Lhs || internal::array_size::value <= 1 || !inner_dim_reordered) && + (lastIdx - first) == (packet_size - 1)) { + + return this->m_tensor.template packet(first); + } + + EIGEN_ALIGN_MAX Scalar data[packet_size]; + + data[0] = this->m_tensor.coeff(first); + EIGEN_UNROLL_LOOP + for (Index k = 1; k < packet_size - 1; k += 2) { + const IndexPair internal_pair = this->computeIndexPair(i + k, j, 1); + data[k] = this->m_tensor.coeff(internal_pair.first); + data[k + 1] = this->m_tensor.coeff(internal_pair.second); + } + data[packet_size - 1] = this->m_tensor.coeff(lastIdx); + + return pload(data); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename internal::enable_if::size!=packet_size,PacketT>::type + load(Index i, Index j) const + { + const Index requested_packet_size = internal::unpacket_traits::size; + EIGEN_ALIGN_MAX Scalar data[requested_packet_size]; + + const IndexPair indexPair = this->computeIndexPair(i, j, requested_packet_size - 1); + const Index first = indexPair.first; + const Index lastIdx = indexPair.second; + + data[0] = this->m_tensor.coeff(first); + for (Index k = 1; k < requested_packet_size - 1; k += 2) { + const IndexPair internal_pair = this->computeIndexPair(i + k, j, 1); + data[k] = this->m_tensor.coeff(internal_pair.first); + data[k + 1] = this->m_tensor.coeff(internal_pair.second); + } + data[requested_packet_size - 1] = this->m_tensor.coeff(lastIdx); + + return pload(data); + } + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE PacketT loadPacket(Index i, Index j) const { + return this->load(i,j); + } +}; + + +template class MakePointer_> +class BaseTensorContractionMapper + : public SimpleTensorContractionMapper +{ + public: + typedef SimpleTensorContractionMapper ParentMapper; + + EIGEN_DEVICE_FUNC + BaseTensorContractionMapper(const Tensor& tensor, + const nocontract_t& nocontract_strides, + const nocontract_t& ij_strides, + const contract_t& contract_strides, + const contract_t& k_strides) : + ParentMapper(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { } + + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE PacketT loadPacket(Index i, Index j) const { + EIGEN_ALIGN_MAX Scalar data[1]; + data[0] = this->m_tensor.coeff(this->computeIndex(i, j)); + return pload(data); + } + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE PacketT load(Index i, Index j) const { + EIGEN_ALIGN_MAX Scalar data[1]; + data[0] = this->m_tensor.coeff(this->computeIndex(i, j)); + return pload(data); + } +}; + + +template class MakePointer_=MakePointer> +class TensorContractionSubMapper { + public: + + typedef BaseTensorContractionMapper ParentMapper; + typedef TensorContractionSubMapper Self; + typedef Self LinearMapper; + + enum { + // We can use direct offsets iff the parent mapper supports then and we can compute the strides. + // TODO: we should also enable direct offsets for the Rhs case. + UseDirectOffsets = ParentMapper::DirectOffsets && (side == Lhs) && inner_dim_contiguous && (array_size::value > 0) + }; + + EIGEN_DEVICE_FUNC TensorContractionSubMapper(const ParentMapper& base_mapper, Index vert_offset, Index horiz_offset) + : m_base_mapper(base_mapper), m_vert_offset(vert_offset), m_horiz_offset(horiz_offset) { + // Bake the offsets into the buffer used by the base mapper whenever possible. This avoids the need to recompute + // this offset every time we attempt to access a coefficient. + if (UseDirectOffsets) { + Index stride = m_base_mapper.stride(); + m_base_mapper.offsetBuffer(vert_offset + horiz_offset * stride); + } + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const { + if (UseDirectOffsets) { + return m_base_mapper(i, 0); + } + return m_base_mapper(i + m_vert_offset, m_horiz_offset); + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i, Index j) const { + if (UseDirectOffsets) { + return m_base_mapper(i, j); + } + return m_base_mapper(i + m_vert_offset, j + m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT loadPacket(Index i) const { + if (UseDirectOffsets) { + return m_base_mapper.template loadPacket(i, 0); + } + return m_base_mapper.template loadPacket(i + m_vert_offset, m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT loadPacket(Index i, Index j) const { + if (UseDirectOffsets) { + return m_base_mapper.template loadPacket(i, j); + } + return m_base_mapper.template loadPacket(i + m_vert_offset, j + m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT loadPacket(Index i, Index j) const { + if (UseDirectOffsets) { + return m_base_mapper.template load(i, j); + } + return m_base_mapper.template loadPacket(i + m_vert_offset, j + m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const PacketT& p) const { + if (UseDirectOffsets) { + m_base_mapper.storePacket(i, 0, p); + } + m_base_mapper.storePacket(i + m_vert_offset, m_horiz_offset, p); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LinearMapper getLinearMapper(Index i, Index j) const { + if (UseDirectOffsets) { + return LinearMapper(m_base_mapper, i, j); + } + return LinearMapper(m_base_mapper, i + m_vert_offset, j + m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketT load(Index i) const { + EIGEN_STATIC_ASSERT((internal::is_same::value), YOU_MADE_A_PROGRAMMING_MISTAKE); + const int ActualAlignment = (AlignmentType == Aligned) && (Alignment == Aligned) ? Aligned : Unaligned; + if (UseDirectOffsets) { + return m_base_mapper.template loadPacket(i, 0); + } + return m_base_mapper.template loadPacket(i + m_vert_offset, m_horiz_offset); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool aligned(Index) const { + return false; + } + + #ifdef EIGEN_USE_SYCL + // The placeholder accessors require to be bound to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_base_mapper.bind(cgh); + } + #endif + + const ParentMapper& base_mapper() const { return m_base_mapper; } + Index vert_offset() const { return m_vert_offset; } + Index horiz_offset() const { return m_horiz_offset; } + + private: + ParentMapper m_base_mapper; + const Index m_vert_offset; + const Index m_horiz_offset; +}; + + +template class MakePointer_=MakePointer> +class TensorContractionInputMapper + : public BaseTensorContractionMapper { + + public: + typedef Scalar_ Scalar; + typedef BaseTensorContractionMapper Base; + typedef TensorContractionSubMapper SubMapper; + typedef SubMapper VectorMapper; + + EIGEN_DEVICE_FUNC TensorContractionInputMapper(const Tensor& tensor, + const nocontract_t& nocontract_strides, + const nocontract_t& ij_strides, + const contract_t& contract_strides, + const contract_t& k_strides) + : Base(tensor, nocontract_strides, ij_strides, contract_strides, k_strides) { } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE SubMapper getSubMapper(Index i, Index j) const { + return SubMapper(*this, i, j); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE VectorMapper getVectorMapper(Index i, Index j) const { + return VectorMapper(*this, i, j); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE const CoeffLoader& get_tensor() const { + return Base::m_tensor; + } +}; + + +template struct TensorContractionInputMapperTrait; + +template class MakePointer_> +struct TensorContractionInputMapperTrait > { + + typedef Tensor_ XprType; + static const bool inner_dim_contiguous = inner_dim_contiguous_; + static const bool inner_dim_reordered = inner_dim_reordered_; + }; + + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_MAPPER_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h new file mode 100644 index 0000000..473c228 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionSycl.h @@ -0,0 +1,1650 @@ +// This file is part of Eigen, a lightweight C++ template library for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla Public License v. 2.0. If a copy of the MPL was not +// distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/***************************************************************** + * TensorContractionSycl.h + * + * \brief: + * TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend + * + *****************************************************************/ + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_SYCL_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_SYCL_H + +namespace Eigen { + +namespace TensorSycl { +namespace internal { + +#ifndef EIGEN_SYCL_DISABLE_GEMV +/*! + * \brief TVPanelSize, a template class used for setting the panel size required for launching General TensorVector + * contraction kernel on various hardware devices. + * + * \tparam Scalar: determines the element type of the tensor/vector + * + * \tparam StorageIndex determines the Index type. + * + * \tparam NCWindow: determines the number of non-contracting element to be process by each work-group + * + * \tparam CFactor: determines the number of contracting element to be process by each thread + * + * \tparam NCFactor: determines the number of non-contracting element to be process by each thread + */ +template +struct TVPanelSize { + // LocalThreadSizeC: determines total number of thread per workgroup for the contracting dimension + static EIGEN_CONSTEXPR StorageIndex LocalThreadSizeC = EIGEN_SYCL_LOCAL_THREAD_DIM0; + // LocalThreadSizeNC: determines total number of thread per workgroup for the non-contracting dimension + static EIGEN_CONSTEXPR StorageIndex LocalThreadSizeNC = EIGEN_SYCL_LOCAL_THREAD_DIM1; + // TileSizeDimNC: determines the tile size for the non-contracting dimension + static EIGEN_CONSTEXPR StorageIndex TileSizeDimNC = NCWindow / NCFactor; + // TileSizeDimC: determines the tile size for the contracting dimension + static EIGEN_CONSTEXPR StorageIndex TileSizeDimC = CFactor * LocalThreadSizeNC * LocalThreadSizeC; + // WorkLoadPerThreadNC : determines workload per thread for loading the non-contracting dimension + static EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadNC = TileSizeDimNC / LocalThreadSizeNC; + // WorkLoadPerThreadC: determines workload per thread for loading the non-contracting dimension + static EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadC = TileSizeDimC / LocalThreadSizeC; + // BC : determines if supporting bank conflict is required + static EIGEN_CONSTEXPR bool BC = false; +}; +#endif + +/*! + * \brief TTPanelSize, a template class used for setting the panel size required for launching General Tensor Tensor + contraction kernel on various hardware devices. + * + * \tparam Scalar: determines the element type of the tensor + * + * \tparam StorageIndex: determines the Index type. + * + * \tparam REG_SIZE_M: determines workload per thread for loading the M dimension This can be varied based on the + available register on a chosen device(can be controlled by EIGEN_SYCL_REG_M macro). + * + * \tparam REG_SIZE_N: determines workload per thread for loading the N dimension This can be varied based on the + available register on a chosen device(can be controlled by EIGEN_SYCL_REG_N macro). + * + * \tparam TSDK: determines Tile size for dimension K. The packet size is assumed to be considered + */ + +template +struct TTPanelSize { + // TileSizeDimK: determines Tile size for dimension K. The packet size is assumed to be considered + static EIGEN_CONSTEXPR StorageIndex TileSizeDimK = TSDK; + // WorkLoadPerThreadM : determines workload per thread for loading the M dimension This can be varied based on the + // available register on a chosen device(can be controlled by EIGEN_SYCL_REG_M macro// +#ifndef EIGEN_SYCL_REG_M + static EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadM = REG_SIZE_M; +#else + static EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadM = EIGEN_SYCL_REG_M; +#endif +// WorkLoadPerThreadN : determines workload per thread for loading the N dimension This can be varied based on the +// available register on a chosen device(can be controlled by EIGEN_SYCL_REG_N macro +#ifndef EIGEN_SYCL_REG_N + static EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadN = REG_SIZE_N; +#else + static EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadN = EIGEN_SYCL_REG_N; +#endif + // LocalThreadSizeM: determines total number of thread per workgroup for the m dimension + static EIGEN_CONSTEXPR StorageIndex LocalThreadSizeM = EIGEN_SYCL_LOCAL_THREAD_DIM0; + // LocalThreadSizeN: determines total number of thread per workgroup for the n dimension + static EIGEN_CONSTEXPR StorageIndex LocalThreadSizeN = EIGEN_SYCL_LOCAL_THREAD_DIM1; + // TileSizeDimM: determines the tile size for the m dimension + static EIGEN_CONSTEXPR StorageIndex TileSizeDimM = LocalThreadSizeM * WorkLoadPerThreadM; + // TileSizeDimN: determines the tile size for the n dimension + static EIGEN_CONSTEXPR StorageIndex TileSizeDimN = LocalThreadSizeN * WorkLoadPerThreadN; + // LoadPerThreadLhs: determines workload per thread for loading Lhs Tensor. This must be divisable by packetsize + static EIGEN_CONSTEXPR StorageIndex LoadPerThreadLhs = + ((TileSizeDimK * WorkLoadPerThreadM * WorkLoadPerThreadN) / (TileSizeDimN)); + // LoadPerThreadRhs: determines workload per thread for loading Rhs Tensor. This must be divisable by packetsize + static EIGEN_CONSTEXPR StorageIndex LoadPerThreadRhs = + ((TileSizeDimK * WorkLoadPerThreadM * WorkLoadPerThreadN) / (TileSizeDimM)); + // BC : determines if supporting bank conflict is required + static EIGEN_CONSTEXPR bool BC = true; + // DoubleBuffer: determines if double buffering technique should be used (This can be disabled by + // EIGEN_SYCL_DISABLE_DOUBLE_BUFFER macro when the device doesnot have sufficient local memory) + static EIGEN_CONSTEXPR bool DoubleBuffer = +#ifdef EIGEN_SYCL_DISABLE_DOUBLE_BUFFER + false; +#else + true; +#endif +}; + +/* ! + * \brief contraction_type: an enum class representing the Tensor Contraction implementation algorithm. This is used to + * specialize the contraction algorithm based on device support for dedicated local memory. + */ +enum class contraction_type { local, no_local }; +/* ! + * \brief data_source an enum class determining the location of the data in a memory hierarchy (global, local, private). + */ +enum class data_source { global_mem, local_mem, private_mem }; + +/*! + * \brief read, a template function used for loading the data from global + memory. This function is used to guarantee coalesced and vectorized load whenever possible + * + * \tparam PacketLoad: determines if the each element of this tensor block should be loaded in a packet mode + * + * \param is_coalesced_layout: determines whether or not the Tensor data in a memory can be access coalesced and + vectorized when possible. Coalesced memory access is a key factor in Kernel performance. When a tensor is 2d and the + contracting dimension is 1, it is always possible to accessed tensor data coalesced and vectorized. This is the case + when RHS(right hand side) Tensor is transposed or when LHS(left hand side) Tensor is not transposed. + * + * \tparam PacketType: determines the type of packet + * + * \tparam TensorMapper: determines the input tensor mapper type + * + * \tparam StorageIndex: determines the Index type + + * \param tensorMapper: is the input tensor + * + * \param NCIndex: is the non-contracting dim index + * + * \param CIndex is the contracting dim index + * + * \param ld: is the leading dimension of the flattened tensor + */ +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if::type read( + const TensorMapper &tensorMapper, const StorageIndex &NCIndex, const StorageIndex &CIndex, const StorageIndex &ld) { + const StorageIndex row = (is_coalesced_layout) ? NCIndex : CIndex; + const StorageIndex col = (is_coalesced_layout) ? CIndex : NCIndex; + return tensorMapper.get_tensor().template packet(row + (col * ld)); +} + +/*! + * \brief read, special overload of read function, when the read access is not vectorized + * + * \tparam PacketLoad: determines if the each element of this tensor block should be loaded in a packet mode + * + * \param is_coalesced_layout: determines whether or not the Tensor data in a memory can be access coalesced and + vectorized when possible. Coalesced memory access is a key factor in Kernel performance. When a tensor is 2d and the + contracting dimension is 1, it is always possible to accessed tensor data coalesced and vectorized. This is the case + when RHS(right hand side) Tensor is transposed or when LHS(left hand side) Tensor is not transposed. + * + * \tparam PacketType: determines the type of packet + * + * \tparam TensorMapper: determines the input tensor mapper type + * + * \tparam StorageIndex: determines the Index type + + * \param tensorMapper: is the input tensor + * + * \param NCIndex: is the non-contracting dim index + * + * \param CIndex: is the contracting dim index + */ +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if::type read( + const TensorMapper &tensorMapper, const StorageIndex &NCIndex, const StorageIndex &CIndex, const StorageIndex &) { + const StorageIndex row = (IsRhs) ? CIndex : NCIndex; + const StorageIndex col = (IsRhs) ? NCIndex : CIndex; + return tensorMapper(row, col); +} + +/*! + * \brief write, a template function used for storing the data to local memory. This function is used to guarantee + * coalesced and vectorized store whenever possible. + * + * \tparam StorageIndex: determines the Index type + * + * \param ld is the leading dimension of the local memory. ld is a compile time value for the local memory + * + * \tparam data_source: an enum value representing if the location of the data in a memory hierarchy. + * + * \tparam PacketType: determines the type of packet + * + * \tparam DataScalar: determines the output data type + * + * \param packet_data: the data to be written in the local memory + * + * \param ptr: a pointer to the local memory + * + * \param CIndex is the contracting dim index + */ + +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if
::type + write(PacketType &packet_data, DataScalar ptr) { + EIGEN_CONSTEXPR int PacketSize = Eigen::internal::unpacket_traits::size; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; i++) { + *ptr = PacketWrapper::scalarize(i, packet_data); + ptr += ld; + } +} + +/*! + * \brief Overloading the write function for storing the data to global memory, when vectorization enabled This function + * is used to guarantee coalesced and vectorized store whenever possible. + * + * \tparam data_source: an enum value representing if the location of the data in a memory hierarchy. + * + * \tparam PacketType: determines the type of packet + * + * \tparam DataScalar: determines the output data type + * + * \param packet_data: the data to be written in the local memory + * + * \param ptr: a pointer to the local memory + */ + +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if< + Eigen::internal::unpacket_traits::size != 1 && dt == data_source::global_mem, void>::type +write(PacketType &packet_data, DataScalar *ptr) { + ::Eigen::internal::pstoreu(ptr, packet_data); +} + +/*! + * \brief Overloading the write function for storing the data to global memory, when vectorization is disabled. + * + * \tparam data_source: an enum value representing if the location of the data in a memory hierarchy. + * + * \tparam PacketType: determines the type of packet + * + * \tparam DataScalar: determines the output data type + * + * \param packet_data: the data to be written in the local memory + * + * \param ptr: a pointer to the local memory + */ +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if< + Eigen::internal::unpacket_traits::size == 1 && dt == data_source::global_mem, void>::type +write(PacketType &packet_data, DataScalar *ptr) { + *ptr = packet_data; +} + +/*! + * \brief check_boundary: is used to check the edge condition for non-internal blocks. + * + * \tparam is_internal: determines if the block is internal + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool check_boundary(bool) { + return true; +} + +/*! + * \brief check_boundary: specialization of the check_boundary for non-internal blocks. + * + * \param cond: true when the data is in range. Otherwise false + */ +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool check_boundary(bool cond) { + return cond; +} + +/*! + * \brief BlockProperties is a template class that provides different characteristic of a block of each Tensor processed + * by each workgroup. + * + * \tparam is_transposed: iff true, determines whether or not the block of the Tensor is transposed + * + * \tparam packet_load_: determines if the each element of this tensor block should be loaded in a packet mode + * + * \tparam PacketType: determines the type of packet + * + * \tparam OutType: determines the type of each element for this block of tensor. If packet load is true, it will be + * packetType; Otherwise it will be scalar Type + * + * \param elements_per_access determines the size of each element based on OutType + * + * \param is_coalesced_layout determines whether or not the Tensor data in a memory can be access coalesced and + * vectorized when possible. Coalesced memory access is a key factor in Kernel performance. When a tensor is 2d and the + * contracting dimension is 1, it is always possible to accessed tensor data coalesced and vectorized. This is the case + * when RHS(right hand side) Tensor is transposed or when LHS(left hand side) Tensor is not transposed. + * + * \param nc_stride determines the stride of non-contracting dimension to access the next adjustment element within the + * Tensor Block for each workgroup + * + * \param c_stride determines the stride of contracting dimension to access the next adjustment element within the + * Tensor Block for each workgroup + */ +template +struct BlockProperties { + static EIGEN_CONSTEXPR bool packet_load = packet_load_; + typedef typename Eigen::internal::unpacket_traits::type OutScalar; + static EIGEN_CONSTEXPR bool is_rhs = is_rhs_; + typedef typename Eigen::internal::conditional::type OutType; + static EIGEN_CONSTEXPR int elements_per_access = Eigen::internal::unpacket_traits::size; + static EIGEN_CONSTEXPR bool is_coalesced_layout = !(is_transposed ^ is_rhs); + static EIGEN_CONSTEXPR int nc_stride = (is_coalesced_layout ? elements_per_access : 1); + static EIGEN_CONSTEXPR int c_stride = (is_coalesced_layout ? 1 : elements_per_access); +}; + +/*! + * \brief ThreadProperties is a template class that provides each thread's properties within a workgroup. Please see + * the sycl-1.2.1 specification (https://www.khronos.org/registry/SYCL/specs/sycl-1.2.1.pdf) for the workgroup, + * work-items + * + * \tparam StorageIndex: determines the StorageIndex Type + * + * \param linearLocalThreadId: determines the linearized location of a thread within a work-group + * + * \param kGroupId: determines the logical group id in a k dimension of the flattened tensor. It will be > 1 when + * tall/skinny algorithm is used + * + * \param mGroupOffset: determines the logical start position of all thread within a workgroup for the m dimension of + * the flattened tensor. + * + * \param kGroupOffset determines the logical start position of all thread within a workgroup for the k dimension of the + * flattened tensor. It will be > 1 when tall/skinny algorithm is used. + * + * \param mLocalOffset: determines the logical start position of each thread within a workgroup for the m dimension of a + * flattened tensor. The position determines the distance of each thread within the workgroup from each other + * independent from their global position. + * + * \param nLocalOffset: determines the logical start position of each thread within a workgroup for the n dimension of a + * flattened tensor. The position determines the distance of each thread within the workgroup from each other + * independent from their global position. + * + * \param mGlobalOffset: determines the logical start position of each thread a thread for the m dimension on a + * flattened tensor + * + * \param nGlobalOffset: determines the logical start position of each thread a thread for the n dimension on a + * flattened tensor + * + * \param kSize : determine the number of the k elements of the flattened Tensor to be processed by each thread for the + * given tensor block. This is !=K dimension of Flattened Tensor when Tall/Skinny matrix is used. + * + * \param is_internal : this will determined if the thread within the work-group computes an internal block of tensor or + * the edge blocks. When it is internal, there is no need to check the boundaries and all the if stantement can be + * resolve by compiler. + */ +template +struct ThreadProperties { + const StorageIndex linearLocalThreadId; + const StorageIndex kGroupId; + const StorageIndex mGroupOffset; + const StorageIndex nGroupOffset; + const StorageIndex kGroupOffset; + const StorageIndex mLocalOffset; + const StorageIndex nLocalOffset; + const StorageIndex mGlobalOffset; + const StorageIndex nGlobalOffset; + StorageIndex kSize; + const bool is_internal; + // this is used to adjust the last block + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ThreadProperties( + const StorageIndex linearLocalThreadId_, const StorageIndex kGroupId_, const StorageIndex mGroupOffset_, + const StorageIndex nGroupOffset_, const StorageIndex kGroupOffset_, const StorageIndex mLocalOffset_, + const StorageIndex nLocalOffset_, const StorageIndex mGlobalOffset_, const StorageIndex nGlobalOffset_, + StorageIndex kSize_, const bool is_internal_) + : linearLocalThreadId(linearLocalThreadId_), + kGroupId(kGroupId_), + mGroupOffset(mGroupOffset_), + nGroupOffset(nGroupOffset_), + kGroupOffset(kGroupOffset_), + mLocalOffset(mLocalOffset_), + nLocalOffset(nLocalOffset_), + mGlobalOffset(mGlobalOffset_), + nGlobalOffset(nGlobalOffset_), + kSize(kSize_), + is_internal(is_internal_) {} +}; + +/*! + * \brief TensorContractionKernel is a template class that provides Tensor -Tensor contraction operation. + * + * \tparam OutScalar: determines the output scalar type + * + * \tparam LhsScalar: determines the left-hand-side scalar type + * + * \tparam RhsScalar: determines the right-hand-side scalar type + * + * \tparam OutAccessor: determines the sycl accessor type for out put (please see the sycl-1.2.1 specification + (https://www.khronos.org/registry/SYCL/specs/sycl-1.2.1.pdf) for accessor definition) + * + * \tparam LhsMapper determines the tensor contraction mapper type for left-hand-side matrix + * + * \tparam RhsMapper determines the tensor contraction mapper type for right-hand-side matrix + * + * \tparam StorageIndex: determines the StorageIndex Type + * + * \tparam Properties: determines the Contraction Panel properties + * + * \tparam TripleDim: determines the M, K, N dimensions for the flatten tensors in order to treat them as a matrix + * + * \tparam Vectorizable: determines whether or not the vectorization is enabled for the Eigen expression. + * + * \tparam input_mapper_properties : determine if the input tensors are matrix. If they are matrix, special memory + access is used to guarantee that always the memory access are coalesced. + * + * \tptaram IsFinal : determine if this is the final kernel. If so, the result will be written in a final output. + Otherwise, the result of contraction will be written iin a temporary buffer. This is the case when Tall/Skinny + contraction is used. So in this case, a final reduction step is required to compute final output. + + * \tparam contraction_tp: it is an enum value representing whether the local memroy/no local memory implementation of + the algorithm to be used + * + * \param scratch: local memory containing tiles of LHS and RHS tensors for each work-group + * + * \param lhs: determines the left-hand-side flattened tensor (tensor mapper) + * + * \param rhs: determines the right-hand-side flattened tensor (tensor mapper) + * + * \param out_res: determines the output tensor containing the contraction result + * + * \param groupSizeM: a logical number determining the number of work-group for m dimension + * + * \param groupSizeN: a logical number determining the number of work-group for n dimension + * + * \param numTiles: determines total number of tiles on the k dimension + * + * \param TripleDim: determines the M, K, N dimensions for the flatten tensors in order to treat them as a matrix + */ +template +class TensorContractionKernel { + public: + typedef typename Eigen::TensorSycl::internal::Vectorise::PacketReturnType + PacketReturnType; + static EIGEN_CONSTEXPR int PacketSize = + Eigen::TensorSycl::internal::Vectorise::PacketSize; + static EIGEN_CONSTEXPR bool is_lhs_transposed = + !::Eigen::internal::TensorContractionInputMapperTrait::inner_dim_contiguous; + static EIGEN_CONSTEXPR bool is_rhs_transposed = + !::Eigen::internal::TensorContractionInputMapperTrait::inner_dim_contiguous; + + typedef BlockProperties + LHSBlockProperties; + + typedef BlockProperties + RHSBlockProperties; + + static EIGEN_CONSTEXPR StorageIndex NStride = + contraction_tp == contraction_type::local ? Properties::WorkLoadPerThreadN : RHSBlockProperties::nc_stride; + + typedef cl::sycl::accessor Scratch; + typedef cl::sycl::multi_ptr local_ptr; + typedef OutScalar * /*cl::sycl::multi_ptr*/ private_ptr; + typedef + typename ::Eigen::internal::conditional::type + tile_ptr; + static EIGEN_CONSTEXPR StorageIndex LSDL = contraction_tp == contraction_type::local + ? Properties::TileSizeDimM + Properties::BC + : Properties::WorkLoadPerThreadM; + static EIGEN_CONSTEXPR StorageIndex LSDR = contraction_tp == contraction_type::local + ? Properties::TileSizeDimN + Properties::BC + : Properties::WorkLoadPerThreadN; + static EIGEN_CONSTEXPR StorageIndex LocalOffset = Properties::LocalThreadSizeM * Properties::LocalThreadSizeN; + + /** + * \brief MemHolder this is a place holder struct for creating memory hierarchy in SYCL. Inside SYCL kernel it is not + * allowed to have dynamic memory allocation. While the local memory is created outside of the kernel and passed to + * the kernel as an accessor, the private memory can only allowed to be allocated statically. Since we are abstracting + * the TiledMemory for both local and private memory, the MemHolder structs is used as a helper to abstract out + * different type of memory needed when local/no_local memory computation is called. + * + * \tparam contraction_type: it is an enum value representing whether the local memroy/no local memory implementation + of the algorithm to be used + * \tparam the private memory size + * \param ptr the tile memory pointer type + */ + template + struct MemHolder { + tile_ptr ptr; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE MemHolder(local_ptr block_start_ptr) : ptr(block_start_ptr) {} + }; + /** + * \brief specialization of memHolder class when no local memory kernel is used. + */ + template + struct MemHolder { + OutScalar ptr[MemSize] = {OutScalar{0}}; + }; + /** + * \brief TiledMemory: contains required memory pointer for loading each tile of the TensorContraction panel from + * global memory to local/private memory when local/no_local algorithm used. + * + * \param lhs_scratch_extract : determines the LHS tile memory. It is either private or local memory based on the + * selected contraction_type. + * + * \param rhs_scratch_extract : determines the RHS tile memory. It is either private or local memory based on the + * selected contraction_type. + * + * \param lhs_extract_index: determins the position of each thread on a local memory for lhs input. When private + * memory is used this is set to zero as this is not applicable in case of private memory. + * + * \param rhs_extract_index: determins the position of each thread on a local memory for rhs input. When private + * memory is used this is set to zero as this is not applicable in case of private memory. + * + * \param lhs_scratch_compute : determines the location to load for computation for lhs_local memory. This is the + * same as lhs_scratch_extract for private memory. + * + * \param rhs_scratch_compute : determines the location to load for computation for rhs_local memory. This is the + * same as rhs_scratch_extract for private memory. + */ + struct TiledMemory { + MemHolder lhs_scratch_extract; + MemHolder rhs_scratch_extract; + tile_ptr lhs_scratch_ptr_compute; + tile_ptr rhs_scratch_ptr_compute; + const std::pair lhs_extract_index; + const std::pair rhs_extract_index; + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TiledMemory(const ThreadProperties &, local_ptr, + typename ::Eigen::internal::enable_if::type * = 0) + : lhs_scratch_extract{}, + rhs_scratch_extract{}, + lhs_scratch_ptr_compute(lhs_scratch_extract.ptr), + rhs_scratch_ptr_compute(rhs_scratch_extract.ptr), + lhs_extract_index(std::pair(StorageIndex{0}, StorageIndex{0})), + rhs_extract_index(std::pair(StorageIndex{0}, StorageIndex{0})) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TiledMemory(const ThreadProperties &thread_properties, local_ptr block_start_ptr, + typename ::Eigen::internal::enable_if::type * = 0) + : lhs_scratch_extract{block_start_ptr}, + rhs_scratch_extract{lhs_scratch_extract.ptr + + ((Properties::DoubleBuffer + 1) * LSDL * Properties::TileSizeDimK)}, + lhs_scratch_ptr_compute(lhs_scratch_extract.ptr + thread_properties.mLocalOffset), + rhs_scratch_ptr_compute(rhs_scratch_extract.ptr + thread_properties.nLocalOffset), + lhs_extract_index( + local_id_extract(thread_properties.linearLocalThreadId)), + rhs_extract_index( + local_id_extract(thread_properties.linearLocalThreadId)) {} + }; + + Scratch scratch; + const LhsMapper lhs; + const RhsMapper rhs; + OutAccessor out_res; + const StorageIndex groupSizeM; + const StorageIndex groupSizeN; + const StorageIndex numTiles; + const TripleDim triple_dim; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionKernel(Scratch scratch_, const LhsMapper lhs_, + const RhsMapper rhs_, OutAccessor out_res_, + const StorageIndex groupSizeM_, + const StorageIndex groupSizeN_, + const StorageIndex numTiles_, + const TripleDim triple_dim_) + : scratch(scratch_), + lhs(lhs_), + rhs(rhs_), + out_res(out_res_), + groupSizeM(groupSizeM_), + groupSizeN(groupSizeN_), + numTiles(numTiles_), + triple_dim(triple_dim_) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorContractionKernel(Scratch scratch_, const LhsMapper lhs_, + const RhsMapper rhs_, OutAccessor out_res_, + const StorageIndex groupSizeM_, + const StorageIndex numTiles_, + const TripleDim triple_dim_) + : TensorContractionKernel(scratch_, lhs_, rhs_, out_res_, groupSizeM_, 1, numTiles_, triple_dim_) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(cl::sycl::nd_item<1> itemID) { + const StorageIndex linearLocalThreadId = itemID.get_local_id(0); + const StorageIndex nLocalThreadId = linearLocalThreadId / Properties::LocalThreadSizeM; + const StorageIndex mLocalThreadId = linearLocalThreadId % Properties::LocalThreadSizeM; + const StorageIndex mGroupId = itemID.get_group(0) % groupSizeM; + const StorageIndex tmp = itemID.get_group(0) / groupSizeM; + const StorageIndex nGroupId = IsFinal ? tmp : tmp % groupSizeN; + const StorageIndex kGroupId = IsFinal ? 0 : tmp / groupSizeN; + const StorageIndex mGroupOffset = mGroupId * Properties::TileSizeDimM; + const StorageIndex nGroupOffset = nGroupId * Properties::TileSizeDimN; + const StorageIndex mLocalOffset = PacketSize * mLocalThreadId; + const StorageIndex nLocalOffset = NStride * nLocalThreadId; + const StorageIndex mGlobalOffset = mGroupOffset + mLocalOffset; + const StorageIndex nGlobalOffset = nGroupOffset + nLocalOffset; + + const StorageIndex kSizePerWG = IsFinal ? triple_dim.K : numTiles * Properties::TileSizeDimK; + StorageIndex kGroupOffset = kGroupId * kSizePerWG; + const bool is_internal = triple_dim.M - mGroupOffset >= Properties::TileSizeDimM && + triple_dim.N - nGroupOffset >= Properties::TileSizeDimN && + triple_dim.K - kGroupOffset >= kSizePerWG; + // this is used to adjust the last block + StorageIndex kSize = IsFinal ? triple_dim.K : std::min(kSizePerWG, triple_dim.K - kGroupOffset); + // This is used to find out the lats K offset so that kGroupOffset -kSize can compute the coffset for loading to + // tile + kGroupOffset += kSize; + + auto thread_properties = + ThreadProperties(linearLocalThreadId, kGroupId, mGroupOffset, nGroupOffset, kGroupOffset, + mLocalOffset, nLocalOffset, mGlobalOffset, nGlobalOffset, kSize, is_internal); + + auto out_ptr = out_res.get_pointer() + (IsFinal ? 0 : thread_properties.kGroupId * triple_dim.M * triple_dim.N); + + (thread_properties.is_internal) ? compute_panel(itemID, thread_properties, out_ptr) + : compute_panel(itemID, thread_properties, out_ptr); + } + // The compute block computes the contraction operation private block for each thread and store the resutl in the + // privateRes memory of Each computation the compute block function is independent of local and no local concepts as + // it only compute the block on each thread's private memory space + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_block_per_tile(OutScalar *lhs_block_ptr, OutScalar *rhs_block_ptr, + PacketReturnType *privateRes) { + StorageIndex idx = 0; + EIGEN_CONSTEXPR StorageIndex lhs_stride = + contraction_tp == contraction_type::local ? (PacketSize * Properties::LocalThreadSizeM) : 1; + EIGEN_UNROLL_LOOP + for (StorageIndex wLPTN = 0; wLPTN < Properties::WorkLoadPerThreadN; wLPTN++) { + auto rhsPacket = PacketReturnType{*(rhs_block_ptr + wLPTN)}; + StorageIndex lhs_index = 0; + EIGEN_UNROLL_LOOP + for (StorageIndex wLPTM = 0; wLPTM < Properties::WorkLoadPerThreadM / PacketSize; wLPTM++) { + PacketReturnType lhsPack{}; + Eigen::TensorSycl::internal::PacketWrapper::set_packet(lhsPack, + lhs_block_ptr + lhs_index); + privateRes[idx] = ::Eigen::internal::pmadd(lhsPack, rhsPacket, privateRes[idx]); + + lhs_index += lhs_stride; + idx++; + } + } + } + // The store function write the computed contraction operation in the private memory of each thread to the global + // memory. The store function is independent of local and no local concepts s that it can be abstract out in the base + // class. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void store(OutPtr *out_ptr, PacketReturnType *privateRes, + StorageIndex mGlobalOffset, StorageIndex nGlobalOffset) { + auto chk_bound = [&](const StorageIndex &mIndex, const StorageIndex &nIndex) EIGEN_DEVICE_FUNC { + return (mIndex + PacketSize - 1 < triple_dim.M && nGlobalOffset + nIndex < triple_dim.N); + }; + // when local memory is not used M and N are both accessed in a coalesced way. However, when local memory is + // available the k*N is transposed in the local to N*K therefore, each blocks operates on blockId* + // WorkLoadPerThreadN slice of N + EIGEN_CONSTEXPR StorageIndex GlobalNStride = + contraction_tp == contraction_type::local ? 1 : Properties::LocalThreadSizeN; + EIGEN_UNROLL_LOOP + for (StorageIndex wLPTN = 0; wLPTN < Properties::WorkLoadPerThreadN / PrivateNStride; wLPTN++) { + // output leading dimension + StorageIndex outputLD = 0; + // When local memory is used the PrivateNstride is always 1 because the coalesed access on N is loaded into Local + // memory and extracting from local to global is the same as no transposed version. However, when local memory is + // not used and RHS is transposed we packetize the load for RHS. + EIGEN_UNROLL_LOOP + for (StorageIndex nId = 0; nId < PrivateNStride; nId++) { + StorageIndex globalRow = mGlobalOffset; + EIGEN_UNROLL_LOOP + for (StorageIndex wLPTM = 0; wLPTM < Properties::WorkLoadPerThreadM / PacketSize; wLPTM++) { + PacketReturnType privetOut = privateRes[wLPTM]; + if (check_boundary(chk_bound(globalRow, nId))) { + // Store the final results in C. The C matrix has always M as a first StorageIndex and N as a second + // StorageIndex Therefore it is always coalesced layout + write(privetOut, out_ptr + outputLD + globalRow); + } else { + EIGEN_UNROLL_LOOP + for (StorageIndex mId = 0; mId < PacketSize; mId++) { + StorageIndex mOffset = globalRow + mId; + if (mOffset < triple_dim.M && (nGlobalOffset + nId < triple_dim.N)) { + out_ptr[mOffset + outputLD] = + Eigen::TensorSycl::internal::PacketWrapper::scalarize(mId, privetOut); + } + } + } + globalRow += (PacketSize * Properties::LocalThreadSizeM); + } + outputLD += triple_dim.M; + privateRes += Properties::WorkLoadPerThreadM / PacketSize; + } + out_ptr += (GlobalNStride * outputLD); + + nGlobalOffset += (PrivateNStride * GlobalNStride); + } + } + // when no local memory is used the following extract_block will be enabled + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + extract_block(const Input &inpt, PrivateReg private_ptr, const std::pair &, + const StorageIndex &ncOffset, const StorageIndex cOffset) { + EIGEN_CONSTEXPR StorageIndex LocalThreadSizeNC = + InputBlockProperties::is_rhs ? Properties::LocalThreadSizeN : Properties::LocalThreadSizeM; + EIGEN_CONSTEXPR StorageIndex WorkLoadPerThreadNC = + InputBlockProperties::is_rhs ? Properties::WorkLoadPerThreadN : Properties::WorkLoadPerThreadM; + const StorageIndex &NC = InputBlockProperties::is_rhs ? triple_dim.N : triple_dim.M; + + auto chk_bound = [&](const StorageIndex &CIndex, const StorageIndex &NCIndex) EIGEN_DEVICE_FUNC { + return ((CIndex + InputBlockProperties::c_stride - 1 < triple_dim.K) && + (NCIndex + InputBlockProperties::nc_stride - 1 < NC)); + }; + const StorageIndex ld = InputBlockProperties::is_coalesced_layout ? NC : triple_dim.K; + StorageIndex cIndex = cOffset; + + EIGEN_UNROLL_LOOP + for (StorageIndex cId = 0; cId < Properties::TileSizeDimK / InputBlockProperties::c_stride; cId++) { + StorageIndex ncIndex = ncOffset; + EIGEN_UNROLL_LOOP + for (StorageIndex ncId = 0; ncId < WorkLoadPerThreadNC / InputBlockProperties::nc_stride; ncId++) { + if (check_boundary(chk_bound(cIndex, ncIndex))) { + auto val = + read(inpt, ncIndex, cIndex, ld); + + write(val, private_ptr); + } else { + EIGEN_UNROLL_LOOP + for (StorageIndex i = 0; i < InputBlockProperties::elements_per_access; i++) { + const StorageIndex ncInd = ncIndex + (InputBlockProperties::is_coalesced_layout ? i : 0); + const StorageIndex cInd = cIndex + (InputBlockProperties::is_coalesced_layout ? 0 : i); + OutScalar val = + (ncInd < NC && cInd < triple_dim.K) + ? read( + inpt, ncInd, cInd, ld) + : OutScalar(0); + write( + val, private_ptr + (InputBlockProperties::is_coalesced_layout ? i : 0) + + ((InputBlockProperties::is_coalesced_layout ? 0 : i) * WorkLoadPerThreadNC)); + } + } + + // if it is lhs we have to load it packetised when the packet size is > 1, because the output is coalesced. So + // even if M is not accessed in a coalesced mode, we have to load packet_size number of m per thread. + ncIndex = (!InputBlockProperties::is_rhs && InputBlockProperties::nc_stride == 1 && PacketSize != 1) + ? ncOffset + (ncId + 1) % PacketSize + ((ncId + 1) / PacketSize) * LocalThreadSizeNC + : (ncIndex + InputBlockProperties::nc_stride * LocalThreadSizeNC); + private_ptr += InputBlockProperties::nc_stride; + } + // the previous for loop ( private_ptr += (ncId * nc_stride)) has already moved ptr with one WorkLoadPerThreadNC + private_ptr += (InputBlockProperties::c_stride - 1) * WorkLoadPerThreadNC; + cIndex += InputBlockProperties::c_stride; + } + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::pair local_id_extract( + const StorageIndex &linearLocalThreadId) { + const StorageIndex localThreadNC = + (InputBlockProperties::is_coalesced_layout) + ? linearLocalThreadId % (TileSizeDimNC / InputBlockProperties::nc_stride) + : linearLocalThreadId / (Properties::TileSizeDimK / InputBlockProperties::c_stride); + const StorageIndex localThreadC = + (InputBlockProperties::is_coalesced_layout) + ? linearLocalThreadId / (TileSizeDimNC / InputBlockProperties::nc_stride) + : linearLocalThreadId % (Properties::TileSizeDimK / InputBlockProperties::c_stride); + return std::pair(localThreadNC, localThreadC); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + sync_mem(const cl::sycl::nd_item<1> &, bool &db_offset) noexcept { + db_offset = !db_offset; + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + sync_mem(const cl::sycl::nd_item<1> &itemID, bool &) noexcept { + itemID.barrier(cl::sycl::access::fence_space::local_space); + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + sync_mem(const cl::sycl::nd_item<1> &, bool &) noexcept { + return; + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + sync_thread(const cl::sycl::nd_item<1> & +#ifdef EIGEN_SYCL_ARM_GPU_CACHE_OPTIMISATION + itemID +#endif + ) noexcept { +#ifdef EIGEN_SYCL_ARM_GPU_CACHE_OPTIMISATION + itemID.barrier(cl::sycl::access::fence_spacce::local_space); +#else + return; +#endif + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + sync_thread(const cl::sycl::nd_item<1> &itemID) { + itemID.barrier(cl::sycl::access::fence_space::local_space); + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if::type sync_thread( + const cl::sycl::nd_item<1> &) { + return; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_tile_per_panel(const cl::sycl::nd_item<1> &itemID, + ThreadProperties &thread_properties, + TiledMemory &tiled_input_block, + PacketReturnType *privateRes, bool &db_offset) { + // Tiling the Rhs block from global to local memory + extract_block( + rhs, tiled_input_block.rhs_scratch_extract.ptr + (db_offset * Properties::TileSizeDimK * LSDR), + tiled_input_block.rhs_extract_index, + contraction_tp == contraction_type::local ? thread_properties.nGroupOffset : thread_properties.nGlobalOffset, + thread_properties.kGroupOffset - thread_properties.kSize); + + sync_thread(itemID); + + // Tiling the Lhs block from global to local memory + extract_block( + lhs, tiled_input_block.lhs_scratch_extract.ptr + (db_offset * LSDL * Properties::TileSizeDimK), + tiled_input_block.lhs_extract_index, + contraction_tp == contraction_type::local ? thread_properties.mGroupOffset : thread_properties.mGlobalOffset, + thread_properties.kGroupOffset - thread_properties.kSize); + + // itemID.barrier(cl::sycl::access::fence_space::local_space); + sync_thread(itemID); + // switch to compute mede + StorageIndex lhs_offset = (db_offset * LSDL * Properties::TileSizeDimK); + StorageIndex rhs_offset = (db_offset * Properties::TileSizeDimK * LSDR); + // Loop over the values of a single tile + for (StorageIndex k = 0; k < Properties::TileSizeDimK; k++) { + compute_block_per_tile(tiled_input_block.lhs_scratch_ptr_compute + lhs_offset, + tiled_input_block.rhs_scratch_ptr_compute + rhs_offset, privateRes); + lhs_offset += LSDL; + rhs_offset += LSDR; + } + // computing the K index for the next tile + thread_properties.kSize -= Properties::TileSizeDimK; + sync_mem(itemID, db_offset); + } + + // when local memory is available the following compute_panel will be enabled + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_panel(const cl::sycl::nd_item<1> &itemID, + ThreadProperties &thread_properties, + OutPtr out_ptr) { + auto tiled_input_block = TiledMemory{thread_properties, scratch.get_pointer()}; + // Allocate register space + PacketReturnType privateRes[Properties::WorkLoadPerThreadM * Properties::WorkLoadPerThreadN / PacketSize] = { + PacketReturnType{0}}; + bool db_offset = 0; + + while (thread_properties.kSize >= Properties::TileSizeDimK) { + compute_tile_per_panel(itemID, thread_properties, tiled_input_block, privateRes, db_offset); + } + if (thread_properties.kSize > 0) { + compute_tile_per_panel(itemID, thread_properties, tiled_input_block, privateRes, db_offset); + } + + // Storing the final results in the output + store(1) : RHSBlockProperties::nc_stride>( + out_ptr + thread_properties.nGlobalOffset * triple_dim.M, privateRes, thread_properties.mGlobalOffset, + thread_properties.nGlobalOffset); + } + // When local memory is available the following extract_block will be enabled + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename ::Eigen::internal::enable_if::type + extract_block(const Input &inpt, Local local_ptr, const std::pair& local_index, + const StorageIndex &ncOffset, const StorageIndex cOffset) { + EIGEN_CONSTEXPR StorageIndex TileSizeDimNC = + InputBlockProperties::is_rhs ? Properties::TileSizeDimN : Properties::TileSizeDimM; + EIGEN_CONSTEXPR StorageIndex LoadPerThread = + InputBlockProperties::is_rhs ? Properties::LoadPerThreadRhs : Properties::LoadPerThreadLhs; + EIGEN_CONSTEXPR StorageIndex LSD = InputBlockProperties::is_rhs ? LSDR : LSDL; + static_assert(((LocalOffset % (TileSizeDimNC / InputBlockProperties::nc_stride) == 0) && + (LocalOffset % (Properties::TileSizeDimK / InputBlockProperties::c_stride) == 0)), + " LocalOffset must be divisable by stride"); + const StorageIndex &NC = InputBlockProperties::is_rhs ? triple_dim.N : triple_dim.M; + StorageIndex localThreadNC = local_index.first; + StorageIndex localThreadC = local_index.second; + auto chk_bound = [&](const StorageIndex &CIndex, const StorageIndex &NCIndex) EIGEN_DEVICE_FUNC { + return ((CIndex + InputBlockProperties::c_stride - 1 < triple_dim.K) && + (NCIndex + InputBlockProperties::nc_stride - 1 < NC)); + }; + EIGEN_UNROLL_LOOP + for (StorageIndex lPT = 0; lPT < LoadPerThread / InputBlockProperties::elements_per_access; lPT++) { + const StorageIndex CIndex = cOffset + (InputBlockProperties::c_stride * localThreadC); + const StorageIndex NCIndex = ncOffset + (InputBlockProperties::nc_stride * localThreadNC); + const StorageIndex ld = InputBlockProperties::is_coalesced_layout ? NC : triple_dim.K; + if (check_boundary(chk_bound(CIndex, NCIndex))) { + auto val = + read(inpt, NCIndex, CIndex, ld); + write( + val, local_ptr + (InputBlockProperties::nc_stride * localThreadNC) + + (InputBlockProperties::c_stride * localThreadC * LSD)); + } else { + EIGEN_UNROLL_LOOP + for (StorageIndex i = 0; i < InputBlockProperties::elements_per_access; i++) { + const StorageIndex nCInd = NCIndex + (InputBlockProperties::is_coalesced_layout ? i : 0); + const StorageIndex cInd = CIndex + (InputBlockProperties::is_coalesced_layout ? 0 : i); + OutScalar val = + (nCInd < NC && cInd < triple_dim.K) + ? read( + inpt, nCInd, cInd, ld) + : OutScalar(0); + + write( + val, local_ptr + (InputBlockProperties::nc_stride * localThreadNC) + + (InputBlockProperties::is_coalesced_layout ? i : 0) + + ((InputBlockProperties::c_stride * localThreadC + + (InputBlockProperties::is_coalesced_layout ? 0 : i)) * + LSD)); + } + } + localThreadNC += (InputBlockProperties::is_coalesced_layout) + ? LocalOffset % (TileSizeDimNC / InputBlockProperties::nc_stride) + : LocalOffset / (Properties::TileSizeDimK / InputBlockProperties::c_stride); + localThreadC += (InputBlockProperties::is_coalesced_layout) + ? LocalOffset / (TileSizeDimNC / InputBlockProperties::nc_stride) + : LocalOffset % (Properties::TileSizeDimK / InputBlockProperties::c_stride); + } + } +}; + +#ifndef EIGEN_SYCL_DISABLE_GEMV + +/*! + * \brief GeneralVectorTensor is a template class that provides Tensor -vector contraction operation, which is a special + * case of Tensor Tensor contraction. + * + * \tparam OutScalar: determines the output scalar type + * + * \tparam OutAccessor: determines the sycl accessor type for out put (please see the sycl-1.2.1 specification + * (https://www.khronos.org/registry/SYCL/specs/sycl-1.2.1.pdf) for accessor definition) + * + * \tparam VectorMapper: determines the tensor contraction mapper for the vector input (can be lhs or rhs) + * + * \tparam TensorMapper: determines the tensor contraction mapper for the tensor input (can be lhs or rhs) + * + * \tparam StorageIndex: determines the StorageIndex Type + * + * \tparam Properties: determines the Contraction Panel properties + * + * \tparam KFactor: determines the number of elements in K dimension in a Tile + * + * \tparam Vectorizable: determines whether or not the vectorization is enabled for the Eigen expression. + * + * \tparam is_lhs_vec: determines whether lhs is a vector or rhs is a vector + * + * \tparam IsFinal: determine if this is the final kernel. If so, the result will be written in a final output. + * Otherwise, the result of contraction will be written iin a temporary buffer. + * + * \param scratch: determines the local memory containing the vector block for each work-group + * + * \param vec: determines the vector input (tensor mapper) + * + * \param mat: determines the tensor input (tensor mapper) + * + * \param out_res: determines the output vector containing the contraction result + * + * \param nonContractGroupSize: a logical number determining the number of work-group for non-contracting dimension + * + * \param nonContractDim: determines the size of non contracting dimension for the flattened tensor + * + * \param contractDim: determines the size of non contracting dimension for the flattened tensor + * + */ +template +struct GeneralVectorTensor { + typedef typename Eigen::TensorSycl::internal::Vectorise::PacketReturnType + PacketReturnType; + static EIGEN_CONSTEXPR int PacketSize = + Eigen::TensorSycl::internal::Vectorise::PacketSize; + typedef cl::sycl::accessor Scratch; + + static EIGEN_CONSTEXPR StorageIndex OutScratchOffset = + KFactor * Properties::LocalThreadSizeC * Properties::LocalThreadSizeNC; + + // Since the access layout for a vector can always be coalesced, when LHS is a vector, we pass false and false to make + // sure that the !^ is true When RHS is a vector, we pass true and true to make sure that the !^ is true. + typedef BlockProperties + VecBlockProperties; + + Scratch scratch; + const VectorMapper vec; + const TensorMapper mat; + OutAccessor out_res; + const StorageIndex nonContractGroupSize; + const StorageIndex nonContractDim; + const StorageIndex contractDim; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE GeneralVectorTensor(Scratch scratch_, const VectorMapper vec_, + const TensorMapper mat_, OutAccessor out_res_, + const StorageIndex nonContractGroupSize_, + const StorageIndex nonContractDim_, + const StorageIndex contractDim_) + : scratch(scratch_), + vec(vec_), + mat(mat_), + out_res(out_res_), + nonContractGroupSize(nonContractGroupSize_), + nonContractDim(nonContractDim_), + contractDim(contractDim_) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(cl::sycl::nd_item<1> itemID) { + auto scratch_ptr = scratch.get_pointer(); + const StorageIndex linearLocalThreadId = itemID.get_local_id(0); + StorageIndex nonContractId = is_lhs_vec ? linearLocalThreadId / Properties::LocalThreadSizeC + : linearLocalThreadId % Properties::LocalThreadSizeNC; + StorageIndex contractId = is_lhs_vec ? linearLocalThreadId % Properties::LocalThreadSizeC + : linearLocalThreadId / Properties::LocalThreadSizeNC; + const StorageIndex cGroupSize = itemID.get_group_range(0) / nonContractGroupSize; + const StorageIndex nonContractGroupId = + is_lhs_vec ? itemID.get_group(0) / cGroupSize : itemID.get_group(0) % nonContractGroupSize; + const StorageIndex contractGroupId = + is_lhs_vec ? itemID.get_group(0) % cGroupSize : itemID.get_group(0) / nonContractGroupSize; + auto out_ptr = out_res.get_pointer() + (IsFinal ? 0 : contractGroupId * nonContractDim); + + const StorageIndex nonContractGroupOffset = nonContractGroupId * Properties::TileSizeDimNC; + const StorageIndex contractGroupOffset = contractGroupId * Properties::TileSizeDimC; + auto outScratchIndex = nonContractId + contractId * Properties::LocalThreadSizeNC; + const StorageIndex globalNonContractDimOffset = nonContractGroupOffset + nonContractId; + const StorageIndex globalContractDimOffset = contractGroupOffset + contractId; + auto local_output = scratch_ptr + OutScratchOffset; + const bool is_internal = nonContractDim - nonContractGroupOffset >= Properties::TileSizeDimNC && + contractDim - contractGroupOffset >= Properties::TileSizeDimC; + is_internal + ? compute_panel(itemID, vec, mat, local_output, out_ptr, +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + scratch_ptr, contractGroupOffset, +#endif + nonContractGroupOffset, linearLocalThreadId, contractDim, nonContractDim, contractId, + nonContractId, globalContractDimOffset, globalNonContractDimOffset, outScratchIndex) + : compute_panel(itemID, vec, mat, local_output, out_ptr, +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + scratch_ptr, contractGroupOffset, +#endif + nonContractGroupOffset, linearLocalThreadId, contractDim, nonContractDim, contractId, + nonContractId, globalContractDimOffset, globalNonContractDimOffset, outScratchIndex); + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_panel( + const cl::sycl::nd_item<1> &itemID, const VectorMapper &vec, const TensorMapper &mat, OutScalar *local_output, + OutPtr out_ptr, +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + OutScalar *scratch_ptr, const StorageIndex contractGroupOffset, +#endif + const StorageIndex nonContractGroupOffset, const StorageIndex linearLocalThreadId, StorageIndex contractDim, + StorageIndex nonContractDim, StorageIndex contractId, StorageIndex nonContractId, + StorageIndex globalContractDimOffset, StorageIndex globalNonContractDimOffset, StorageIndex outScratchIndex) { + OutScalar outScalar[Properties::WorkLoadPerThreadNC] = {OutScalar(0)}; + // Reading the vector +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + const StorageIndex vectorOffset = contractGroupOffset + linearLocalThreadId; + extract_block(vec, scratch_ptr, linearLocalThreadId, + vectorOffset, contractDim); + + itemID.barrier(cl::sycl::access::fence_space::local_space); + auto in_scratch_ptr = scratch_ptr + contractId; +#endif + + StorageIndex privateOffsetC = 0; + EIGEN_UNROLL_LOOP + for (StorageIndex i = 0; i < Properties::WorkLoadPerThreadC; i++) { + StorageIndex privateOffsetNC = 0; + bool contract_conds = ((globalContractDimOffset + privateOffsetC) < contractDim); +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + auto vecScalar = *in_scratch_ptr; +#else + auto vecScalar = (check_boundary(contract_conds)) + ? vec(is_lhs_vec ? StorageIndex(0) : globalContractDimOffset + privateOffsetC, + is_lhs_vec ? globalContractDimOffset + privateOffsetC : StorageIndex(0)) + : OutScalar(0); +#endif + EIGEN_UNROLL_LOOP + for (StorageIndex j = 0; j < Properties::WorkLoadPerThreadNC; j++) { + auto matScalar = (check_boundary( + contract_conds && ((globalNonContractDimOffset + privateOffsetNC) < nonContractDim))) + ? mat(is_lhs_vec ? globalContractDimOffset + privateOffsetC + : globalNonContractDimOffset + privateOffsetNC, + is_lhs_vec ? globalNonContractDimOffset + privateOffsetNC + : globalContractDimOffset + privateOffsetC) + : OutScalar(0); + + outScalar[j] = cl::sycl::mad(matScalar, vecScalar, outScalar[j]); + privateOffsetNC += Properties::LocalThreadSizeNC; + } + privateOffsetC += Properties::LocalThreadSizeC; +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + in_scratch_ptr += Properties::LocalThreadSizeC; +#endif + } + + auto out_scratch_ptr = local_output + outScratchIndex; + // Each block of 16*16 element in shared memory should reduce to 16*1 + EIGEN_UNROLL_LOOP + for (StorageIndex j = 0; j < Properties::WorkLoadPerThreadNC; j++) { + *out_scratch_ptr = outScalar[j]; + + out_scratch_ptr += (Properties::LocalThreadSizeNC * Properties::LocalThreadSizeC); + } + if (is_lhs_vec) { + nonContractId = linearLocalThreadId % Properties::LocalThreadSizeNC; + contractId = linearLocalThreadId / Properties::LocalThreadSizeNC; + outScratchIndex = nonContractId + contractId * Properties::LocalThreadSizeNC; + } + + out_scratch_ptr = local_output + outScratchIndex; + EIGEN_UNROLL_LOOP + for (StorageIndex j = 0; j < Properties::WorkLoadPerThreadNC; j++) { + EIGEN_UNROLL_LOOP + for (StorageIndex offset = Properties::LocalThreadSizeC >> 1; offset > 0; offset >>= 1) { + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (contractId < offset) { + StorageIndex myNeigbourId = (Properties::LocalThreadSizeNC * offset); + *out_scratch_ptr += out_scratch_ptr[myNeigbourId]; + } + } + // moving to next 16 by 16 block + out_scratch_ptr += (Properties::LocalThreadSizeNC * Properties::LocalThreadSizeC); + } + + if (contractId == 0) { + out_scratch_ptr = local_output + nonContractId; + StorageIndex global_final_offset = nonContractGroupOffset + nonContractId; + out_ptr += global_final_offset; + EIGEN_UNROLL_LOOP + for (StorageIndex j = 0; j < Properties::WorkLoadPerThreadNC; j++) { + if (check_boundary(global_final_offset < nonContractDim)) { + auto res = *out_scratch_ptr; + + *out_ptr = res; + out_ptr += Properties::LocalThreadSizeNC; + } + // moving to next 16 by 16 block to ge the next 16 reduced elements + out_scratch_ptr += (Properties::LocalThreadSizeNC * Properties::LocalThreadSizeC); + if (!(is_internal_block)) global_final_offset += Properties::LocalThreadSizeNC; + } + } + } + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void extract_block(const Input &inpt, Local *local_ptr, + const StorageIndex &linearLocalThreadId, + const StorageIndex &cOffset, const StorageIndex &C) { + local_ptr += InputBlockProperties::c_stride * linearLocalThreadId; + StorageIndex cIndex = cOffset; + for (StorageIndex cId = 0; cId < CFactor / InputBlockProperties::c_stride; cId++) { + if (check_boundary(cIndex + InputBlockProperties::c_stride - 1 < C)) { + auto val = read(inpt, StorageIndex(0), + cIndex, StorageIndex(1)); + write(val, local_ptr); + } else { + EIGEN_UNROLL_LOOP + for (StorageIndex i = 0; i < InputBlockProperties::elements_per_access; i++) { + OutScalar val = + (cIndex + i < C) + ? read( + inpt, StorageIndex(0), cIndex + i, StorageIndex(1)) + : OutScalar(0); + write(val, local_ptr + i); + } + } + local_ptr += InputBlockProperties::c_stride * GroupSize; + cIndex += InputBlockProperties::c_stride * GroupSize; + } + } +}; +#endif + +#ifndef EIGEN_SYCL_DISABLE_SCALAR + +/*! + * \brief GeneralScalarContraction is a template class that provides the scalar value of Tensor -Tensor contraction + * operation, when all the dimensions are contracting dimensions. This Kernel reduces two tensors to an scalar + * + * \tparam OutScalar: determines the output scalar type + * + * \tparam LhsScalar: determines the left-hand-side scalar type + * + * \tparam RhsScalar: determines the right-hand-side scalar type + * + * \tparam OutAccessor: determines the sycl accessor type for out put (please see the sycl-1.2.1 specification + * (https://www.khronos.org/registry/SYCL/specs/sycl-1.2.1.pdf) for accessor definition) + * + * \tparam LhsMapper: determines the tensor contraction mapper type for left-hand-side matrix + * + * \tparam RhsMapper: determines the tensor contraction mapper type for right-hand-side matrix + * + * \tparam StorageIndex: determines the StorageIndex Type + * + * \tparam Vectorizable: determines whether or not the vectorization is enabled for the Eigen expression. + * + * \param scratch: local memory containing tiles of LHS and RHS tensors for each work-group + * + * \param lhs: determines the left-hand-side flattened tensor (tensor mapper) + * + * \param rhs: determines the right-hand-side flattened tensor (tensor mapper) + * + * \param out_res: determines the output tensor containing the contraction result + * + * \param rng: determins the total input data size + */ +template +struct GeneralScalarContraction { + typedef cl::sycl::accessor Scratch; + Scratch scratch; + const LhsMapper lhs; + const RhsMapper rhs; + OutAccessor out_res; + const StorageIndex rng; + + EIGEN_DEVICE_FUNC + GeneralScalarContraction(Scratch scratch_, const LhsMapper lhs_, const RhsMapper rhs_, OutAccessor out_res_, + const StorageIndex rng_) + : scratch(scratch_), lhs(lhs_), rhs(rhs_), out_res(out_res_), rng(rng_) {} + + EIGEN_DEVICE_FUNC void operator()(cl::sycl::nd_item<1> itemID) { + auto out_ptr = out_res.get_pointer(); + auto scratch_ptr = scratch.get_pointer().get(); + + StorageIndex globalid = itemID.get_global_id(0); + StorageIndex localid = itemID.get_local_id(0); + OutScalar accumulator = OutScalar(0); + for (StorageIndex i = globalid; i < rng; i += itemID.get_global_range(0)) { + accumulator = cl::sycl::mad(lhs(0, i), rhs(i, 0), accumulator); + } + auto out_scratch_ptr = scratch_ptr + localid; + *out_scratch_ptr = accumulator; + for (StorageIndex offset = itemID.get_local_range(0) >> 1; offset > 0; offset >>= 1) { + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (localid < offset) { + *out_scratch_ptr = (accumulator += out_scratch_ptr[offset]); + } + } + if (localid == 0) { + out_ptr[itemID.get_group(0)] = accumulator; + } + } +}; +#endif + +} // namespace internal +} // namespace TensorSycl + +template +struct TensorEvaluator, + Eigen::SyclDevice> + : public TensorContractionEvaluatorBase, Eigen::SyclDevice>> { + static_assert(std::is_same::value, + "SYCL tensor contraction does not support output kernels."); + + typedef Eigen::SyclDevice Device; + + typedef TensorEvaluator, Device> Self; + typedef TensorContractionEvaluatorBase Base; + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index StorageIndex; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename Base::Storage Storage; + typedef typename Base::EvaluatorPointerType EvaluatorPointerType; + struct TripleDim { + const StorageIndex M; + const StorageIndex N; + const StorageIndex K; + TripleDim(const StorageIndex M_, const StorageIndex N_, const StorageIndex K_) : M(M_), N(N_), K(K_) {} + }; + enum { + Layout = TensorEvaluator::Layout, + PacketAccess = (PacketType::size > 1), + BlockAccess = false, + }; + + static EIGEN_CONSTEXPR int LDims = Base::LDims; + static EIGEN_CONSTEXPR int RDims = Base::RDims; + static EIGEN_CONSTEXPR int ContractDims = Base::ContractDims; + + typedef array left_dim_mapper_t; + typedef array right_dim_mapper_t; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + static const int NumDims = LDims + RDims - 2 * ContractDims; + + typedef DSizes Dimensions; + + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + typedef typename Eigen::internal::remove_const::type LhsScalar; + typedef typename Eigen::internal::remove_const::type RhsScalar; + + typedef typename LeftEvaluator::Dimensions LeftDimensions; + typedef typename RightEvaluator::Dimensions RightDimensions; + + template + struct input_mapper_propertis { + static EIGEN_CONSTEXPR bool is_lhs_matrix = (LDims == 2 && ContractDims == 1) || lhs_inner_dim_contiguous; + static EIGEN_CONSTEXPR bool is_rhs_matrix = + (RDims == 2 && ContractDims == 1) || (rhs_inner_dim_contiguous && !rhs_inner_dim_reordered); + }; + + TensorEvaluator(const XprType &op, const Device &device) : Base(op, device) {} + + // We need to redefine this method to make nvcc happy + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(typename Base::EvaluatorPointerType data) { + this->m_leftImpl.evalSubExprsIfNeeded(NULL); + this->m_rightImpl.evalSubExprsIfNeeded(NULL); + if (!data) { + this->m_result = this->m_device.get( + static_cast(this->m_device.allocate_temp(this->dimensions().TotalSize() * sizeof(Scalar)))); + data = this->m_result; + } + evalToSycl(data); + return (this->m_result != NULL); + } + const Eigen::SyclDevice &device() const { return this->m_device; } + void evalToSycl(typename Base::EvaluatorPointerType buffer) const { + if (this->m_lhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } else { + evalTyped(buffer); + } + } else { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } else { + evalTyped(buffer); + } + } + } else { + if (this->m_rhs_inner_dim_contiguous) { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } else { + evalTyped(buffer); + } + } else { + if (this->m_rhs_inner_dim_reordered) { + evalTyped(buffer); + } else { + evalTyped(buffer); + } + } + } + } + + template + void evalTyped(typename Base::EvaluatorPointerType buffer) const { + const auto triple_dim = TripleDim{this->m_i_size, this->m_j_size, this->m_k_size}; + typedef internal::TensorContractionInputMapper< + LhsScalar, StorageIndex, internal::Lhs, LeftEvaluator, left_nocontract_t, contract_t, + PacketType::size, lhs_inner_dim_contiguous, false, Unaligned, MakeSYCLPointer> + LhsMapper; + + typedef internal::TensorContractionInputMapper::size, rhs_inner_dim_contiguous, + rhs_inner_dim_reordered, Unaligned, MakeSYCLPointer> + RhsMapper; + + // initialize data mappers + LhsMapper lhs(this->m_leftImpl, this->m_left_nocontract_strides, this->m_i_strides, + this->m_left_contracting_strides, this->m_k_strides); + + RhsMapper rhs(this->m_rightImpl, this->m_right_nocontract_strides, this->m_j_strides, + this->m_right_contracting_strides, this->m_k_strides); + +#ifndef EIGEN_SYCL_DISABLE_SCALAR + if (triple_dim.M == 1 && triple_dim.N == 1) { + launchSC(buffer, lhs, rhs, triple_dim.K); + } else +#endif +#ifndef EIGEN_SYCL_DISABLE_GEMV + if (triple_dim.M != 1 && triple_dim.N == 1) { + LaunchVT(buffer, rhs, lhs, triple_dim.M, triple_dim.K); + } else if (triple_dim.M == 1 && triple_dim.N != 1) { + LaunchVT(buffer, lhs, rhs, triple_dim.N, triple_dim.K); + } else // This is equivalent of if (m!=1 && n!=1) +#endif + { + typedef input_mapper_propertis + inpt_mapper_properties; +#ifndef EIGEN_SYCL_DISABLE_SKINNY + bool skinny = false; + auto platform_name = this->device().getPlatformName(); + // This is based on empirical calculation for AMD r9-nano and Fiji + if (platform_name.find("AMD") == 0) { + skinny = (triple_dim.M < triple_dim.K || triple_dim.N < triple_dim.K) && + ((triple_dim.M < 1024 && triple_dim.N < 1024) || + (uint64_t(triple_dim.M * triple_dim.N) < uint64_t(triple_dim.K))); + } else { + skinny = (((std::max(triple_dim.K, triple_dim.N) / std::min(triple_dim.K, triple_dim.N)) > 100) || + ((std::max(triple_dim.K, triple_dim.M) / std::min(triple_dim.K, triple_dim.M)) > 100) || + ((std::max(triple_dim.N, triple_dim.M) / std::min(triple_dim.N, triple_dim.M)) > 100)); + } + if (skinny) + adjustTT(buffer, lhs, rhs, triple_dim); + else +#endif // EIGEN_SYCL_DISABLE_SKINNY + adjustTT(buffer, lhs, rhs, triple_dim); + } + } + + template + void EIGEN_ALWAYS_INLINE adjustTT(EvaluatorPointerType buffer, const LhsMapper &lhs, const RhsMapper &rhs, + const TripleDim &triple_dim) const { +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON + if (device().has_local_memory()) { + typedef TensorSycl::internal::TTPanelSize PanelParameters; + launchTT( + buffer, lhs, rhs, triple_dim); + } +#endif +#ifdef EIGEN_SYCL_LOCAL_MEM_UNSET_OR_OFF + if (!(device().has_local_memory())) { + typedef TensorSycl::internal::TTPanelSize PanelParameters; + launchTT( + buffer, lhs, rhs, triple_dim); + } +#endif + } + + template + void launchTT(EvaluatorPointerType buffer, const LhsMapper &lhs, const RhsMapper &rhs, + const TripleDim &triple_dim) const { + const StorageIndex roundUpM = Eigen::TensorSycl::internal::roundUp(triple_dim.M, Properties::TileSizeDimM); + const StorageIndex roundUpN = Eigen::TensorSycl::internal::roundUp(triple_dim.N, Properties::TileSizeDimN); + const StorageIndex groupSizeM = roundUpM / Properties::TileSizeDimM; + const StorageIndex groupSizeN = roundUpN / Properties::TileSizeDimN; + + const StorageIndex roundUpK = Eigen::TensorSycl::internal::roundUp(triple_dim.K, Properties::TileSizeDimK); + StorageIndex totalTilesK = roundUpK / Properties::TileSizeDimK; + StorageIndex groupSizeK = + skinny + ? std::max(std::min(totalTilesK, + (StorageIndex)(device().getPowerOfTwo(device().getNumSyclMultiProcessors(), true) * 4) / + (groupSizeM * groupSizeN)), + StorageIndex(1)) + : StorageIndex(1); + + const StorageIndex numTilesPerGroup = Eigen::TensorSycl::internal::roundUp(totalTilesK, groupSizeK) / groupSizeK; + + const StorageIndex totalGroupSize = groupSizeM * groupSizeN * groupSizeK; + + const StorageIndex localRange = Properties::LocalThreadSizeM * Properties::LocalThreadSizeN; + const StorageIndex globalRange = totalGroupSize * localRange; + + const StorageIndex scratchSize = (ct == TensorSycl::internal::contraction_type::local) + ? ((Properties::DoubleBuffer + 1) * + (Properties::TileSizeDimM + Properties::BC) * (Properties::TileSizeDimK)) + + ((Properties::DoubleBuffer + 1) * (Properties::TileSizeDimK) * + (Properties::TileSizeDimN + Properties::BC)) + : StorageIndex(1); + + auto thread_range = cl::sycl::nd_range<1>(cl::sycl::range<1>(globalRange), cl::sycl::range<1>(localRange)); + if (groupSizeK == 1) { + typedef TensorSycl::internal::TensorContractionKernel + ContractKernelName; + device().template binary_kernel_launcher( + lhs, rhs, buffer, thread_range, scratchSize, groupSizeM, groupSizeN, numTilesPerGroup, triple_dim); + } else { + typedef TensorSycl::internal::TensorContractionKernel + ContractKernelName; + CoeffReturnType *temp_pointer = static_cast( + device().allocate_temp(triple_dim.M * triple_dim.N * groupSizeK * sizeof(CoeffReturnType))); + EvaluatorPointerType tmp_global_accessor = device().get(temp_pointer); + + device().template binary_kernel_launcher( + lhs, rhs, tmp_global_accessor, thread_range, scratchSize, groupSizeM, groupSizeN, numTilesPerGroup, + triple_dim); + + typedef Eigen::internal::SumReducer Op; + auto op = Op(); + typedef TensorSycl::internal::SecondStepPartialReduction + ReductionKernel; + + device().template unary_kernel_launcher( + tmp_global_accessor, buffer, + cl::sycl::nd_range<1>(cl::sycl::range<1>(StorageIndex( + Eigen::TensorSycl::internal::roundUp(triple_dim.M * triple_dim.N, localRange))), + cl::sycl::range<1>(localRange)), + StorageIndex(1), op, StorageIndex(triple_dim.M * triple_dim.N), groupSizeK); + + device().deallocate_temp(temp_pointer); + } + } + +#ifndef EIGEN_SYCL_DISABLE_GEMV + template + void EIGEN_ALWAYS_INLINE LaunchVT(EvaluatorPointerType buffer, const VectorMapper &vec, const TensorMapper &mat, + StorageIndex NC, StorageIndex C) const { + const StorageIndex nonContractDim = NC; + EIGEN_CONSTEXPR StorageIndex NCFactor = 1; + EIGEN_CONSTEXPR StorageIndex CFactor = 1; + EIGEN_CONSTEXPR StorageIndex NCWindow = 16; + typedef Eigen::TensorSycl::internal::TVPanelSize + Properties; + const StorageIndex roundUpC = Eigen::TensorSycl::internal::roundUp(C, Properties::TileSizeDimC); + const StorageIndex cNumGroups = roundUpC / (Properties::LocalThreadSizeC * Properties::WorkLoadPerThreadC); + const StorageIndex roundUpNC = Eigen::TensorSycl::internal::roundUp(nonContractDim, Properties::TileSizeDimNC); + const StorageIndex nCNumGroups = roundUpNC / (Properties::LocalThreadSizeNC * Properties::WorkLoadPerThreadNC); + const StorageIndex globalRange = + (roundUpNC / (Properties::WorkLoadPerThreadNC)) * (roundUpC / (Properties::WorkLoadPerThreadC)); + const StorageIndex localRange = Properties::LocalThreadSizeNC * Properties::LocalThreadSizeC; + const StorageIndex scratchSize = + (Properties::WorkLoadPerThreadNC + CFactor) * Properties::LocalThreadSizeC * Properties::LocalThreadSizeNC; + auto thread_range = cl::sycl::nd_range<1>(cl::sycl::range<1>(globalRange), cl::sycl::range<1>(localRange)); + if (cNumGroups > 1) { + typedef Eigen::TensorSycl::internal::GeneralVectorTensor + ContractKernelName; + CoeffReturnType *temp_pointer = + static_cast(device().allocate_temp(nonContractDim * cNumGroups * sizeof(CoeffReturnType))); + EvaluatorPointerType tmp_global_accessor = device().get(temp_pointer); + + device().template binary_kernel_launcher( + vec, mat, tmp_global_accessor, thread_range, scratchSize, nCNumGroups, nonContractDim, C); + + typedef Eigen::internal::SumReducer Op; + typedef TensorSycl::internal::SecondStepPartialReduction + ReductionKernel; + + device().template unary_kernel_launcher( + tmp_global_accessor, buffer, + cl::sycl::nd_range<1>(cl::sycl::range<1>(Eigen::TensorSycl::internal::roundUp(nonContractDim, localRange)), + cl::sycl::range<1>(localRange)), + StorageIndex(1), Op(), nonContractDim, cNumGroups); + + device().deallocate_temp(temp_pointer); + } else { + typedef Eigen::TensorSycl::internal::GeneralVectorTensor + ContractKernelName; + device().template binary_kernel_launcher( + vec, mat, buffer, thread_range, scratchSize, nCNumGroups, nonContractDim, C); + } + } +#endif + +#ifndef EIGEN_SYCL_DISABLE_SCALAR + template + EIGEN_ALWAYS_INLINE void launchSC(EvaluatorPointerType buffer, const LhsMapper &lhs, const RhsMapper &rhs, + StorageIndex K) const { + EIGEN_STATIC_ASSERT(!((EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1) & + (EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1 - 1)), + "The Local thread size must be a power of 2 for the reduction " + "operation"); + EIGEN_CONSTEXPR StorageIndex local_range = EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1; + + // Here we force the code not to be more than 2-step reduction: Our empirical research shows that if each thread + // reduces at least 512 elementss individually, we get better performance. + const StorageIndex num_work_group = ((K + (512 * local_range - 1)) / (512 * local_range) > 1 ? local_range : 1); + const StorageIndex global_range = num_work_group * local_range; + + typedef Eigen::TensorSycl::internal::GeneralScalarContraction< + CoeffReturnType, LhsScalar, RhsScalar, EvaluatorPointerType, LhsMapper, RhsMapper, StorageIndex, false> + ContractKernelName; + auto thread_range = cl::sycl::nd_range<1>(cl::sycl::range<1>(global_range), cl::sycl::range<1>(local_range)); + if (num_work_group > 1) { + CoeffReturnType *temp_pointer = + static_cast(device().allocate_temp(num_work_group * sizeof(CoeffReturnType))); + EvaluatorPointerType tmp_global_accessor = device().get(temp_pointer); + device().template binary_kernel_launcher(lhs, rhs, tmp_global_accessor, + thread_range, local_range, K); + typedef Eigen::internal::SumReducer Op; + typedef TensorSycl::internal::SecondStepFullReducer + GenericRKernel; + device().template unary_kernel_launcher( + tmp_global_accessor, buffer, + cl::sycl::nd_range<1>(cl::sycl::range<1>(local_range), cl::sycl::range<1>(local_range)), local_range, Op()); + + device().deallocate_temp(temp_pointer); + } else { + device().template binary_kernel_launcher(lhs, rhs, buffer, thread_range, + local_range, K); + } + } +#endif + + EIGEN_STRONG_INLINE void cleanup() { + this->m_leftImpl.cleanup(); + this->m_rightImpl.cleanup(); + + if (this->m_result) { + this->m_device.deallocate_temp(this->m_result); + this->m_result = NULL; + } + } + // The placeholder accessors must bound to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + this->m_leftImpl.bind(cgh); + this->m_rightImpl.bind(cgh); + this->m_result.bind(cgh); + } +}; +} // namespace Eigen +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_SYCL_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h new file mode 100644 index 0000000..21be6ea --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h @@ -0,0 +1,1679 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H + +// evaluator for thread pool device +#ifdef EIGEN_USE_THREADS + +namespace Eigen { + +template +struct TensorEvaluator, ThreadPoolDevice> : + public TensorContractionEvaluatorBase, ThreadPoolDevice> > { + + typedef ThreadPoolDevice Device; + + typedef TensorEvaluator, Device> Self; + typedef TensorContractionEvaluatorBase Base; + + typedef TensorContractionOp XprType; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + enum { + Layout = TensorEvaluator::Layout, + }; + + // Most of the code is assuming that both input tensors are ColMajor. If the + // inputs are RowMajor, we will "cheat" by swapping the LHS and RHS: + // If we want to compute A * B = C, where A is LHS and B is RHS, the code + // will pretend B is LHS and A is RHS. + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType; + typedef typename internal::conditional< + static_cast(Layout) == static_cast(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType; + + static const int LDims = + internal::array_size::Dimensions>::value; + static const int RDims = + internal::array_size::Dimensions>::value; + static const int ContractDims = internal::array_size::value; + + typedef array left_dim_mapper_t; + typedef array right_dim_mapper_t; + + typedef array contract_t; + typedef array left_nocontract_t; + typedef array right_nocontract_t; + + static const int NumDims = LDims + RDims - 2 * ContractDims; + + typedef DSizes Dimensions; + + // typedefs needed in evalTo + typedef typename internal::remove_const::type LhsScalar; + typedef typename internal::remove_const::type RhsScalar; + typedef typename internal::gebp_traits Traits; + + typedef TensorEvaluator LeftEvaluator; + typedef TensorEvaluator RightEvaluator; + + TensorEvaluator(const XprType& op, const Device& device) : + Base(op, device) {} + + template + void evalProduct(Scalar* buffer) const { + evalProductImpl(buffer, NoCallback()); + } + + template + void evalProductAsync(Scalar* buffer, EvalToCallback done) const { + evalProductImpl(buffer, std::move(done)); + } + + template + void evalProductImpl(Scalar* buffer, DoneCallback done) const { + // This function computes a lot of heuristics in multiple steps, and it + // also has multiple exit points. To keep it sane, readable and all in one + // place, sync/async execution decision is made at runtime at the very end. + // + // (1) In sync mode we allocate Context on the stack, submit computations + // to the device thread pool, and block on a barrier until it is + // completed. + // + // (2) In async mode we allocate Context on the heap, and after all tasks + // are finished, we call provided the done callback, and delete a + // context from the heap. + // + // (*) EvalParallelContext & EvalShardedByInnerDimContext owns all the state + // and temporary buffers, requried for executing the tensor contraction. + // They are responsible for cleaning it up after contraction is done. + static const bool IsEvalInSyncMode = + std::is_same::value; + + const Index m = this->m_i_size; + const Index n = this->m_j_size; + const Index k = this->m_k_size; + if (m == 0 || n == 0 || k == 0) return; + + // Compute a set of algorithm parameters: + // - kernel block sizes (bm, bn, bk) + // - task grain sizes (number of kernels executed per task: gm, gn) + // - number of threads + // - sharding by row/column + // - parallel packing or first lhs then rhs + // and some derived parameters: + // - number of tasks (nm, nn, nk) + // - number of kernels (nm0, nn0) + // Unfortunately, all these parameters are tightly interdependent. + // So in some cases we first compute approximate values, then compute other + // values based on these approximations and then refine the approximations. + + // There are lots of heuristics here. There is some reasoning behind them, + // but ultimately they are just tuned on contraction benchmarks for + // different input configurations, thread counts and instruction sets. + // So feel free to question any of them. + + // Compute whether we want to shard by row or by column. + // This is a first approximation, it will be refined later. Since we don't + // know number of threads yet we use 2, because what's we are most + // interested in at this point is whether it makes sense to use + // parallelization at all or not. + bool shard_by_col = shardByCol(m, n, 2); + + // First approximation of kernel blocking sizes. + // Again, we don't know number of threads yet, so we use 2. + Index bm, bn, bk; + if (shard_by_col) { + internal::TensorContractionBlocking + blocking(k, m, n, 2); + bm = blocking.mc(); + bn = blocking.nc(); + bk = blocking.kc(); + } else { + internal::TensorContractionBlocking + blocking(k, m, n, 2); + bm = blocking.mc(); + bn = blocking.nc(); + bk = blocking.kc(); + } + + // Compute optimal number of threads. + // Note: we use bk instead of k here because we are interested in amount of + // _parallelizable_ computations, and computations are not parallelizable + // across k dimension. + const TensorOpCost cost = + contractionCost(m, n, bm, bn, bk, shard_by_col, false); + int num_threads = TensorCostModel::numThreads( + static_cast(n) * m, cost, this->m_device.numThreads()); + int num_threads_by_k = numThreadsInnerDim(m, n, k); + if (shardByInnerDim(m, n, k, num_threads, num_threads_by_k)) { + // We are in the scenario where it is more effective to shard by the + // inner dimension. + if (IsEvalInSyncMode) { + EvalShardedByInnerDimContext ctx( + this, num_threads_by_k, buffer, m, n, k, std::move(done)); + ctx.template run(); + } else { + auto* ctx = new EvalShardedByInnerDimContext( + this, num_threads_by_k, buffer, m, n, k, std::move(done)); + ctx->template runAsync(); + } + + return; + } + + // TODO(dvyukov): this is a stop-gap to prevent regressions while the cost + // model is not tuned. Remove this when the cost model is tuned. + if (n == 1) num_threads = 1; + + if (num_threads == 1) { + TENSOR_CONTRACTION_DISPATCH(this->template evalProductSequential, + Unaligned, (buffer)); + if (!IsEvalInSyncMode) done(); + return; + } + + // Now that we know number of threads, recalculate sharding and blocking. + shard_by_col = shardByCol(m, n, num_threads); + if (shard_by_col) { + internal::TensorContractionBlocking + blocking(k, m, n, num_threads); + bm = blocking.mc(); + bn = blocking.nc(); + bk = blocking.kc(); + } else { + internal::TensorContractionBlocking + blocking(k, m, n, num_threads); + bm = blocking.mc(); + bn = blocking.nc(); + bk = blocking.kc(); + } + + // Number of kernels for each dimension. + Index nm0 = divup(m, bm); + Index nn0 = divup(n, bn); + Index nk = divup(k, bk); + + // Calculate task grain size (number of kernels executed per task). + // This task size coarsening serves two purposes: + // 1. It reduces per-task overheads including synchronization overheads. + // 2. It allows to use caches better (reuse the same packed rhs in several + // consecutive kernels). + Index gm = 1; + Index gn = 1; + // If we are sharding by column, then we prefer to reduce rows first. + if (shard_by_col) { + gm = coarsenM(m, n, bm, bn, bk, gn, num_threads, shard_by_col); + gn = coarsenN(m, n, bm, bn, bk, gm, num_threads, shard_by_col); + } else { + gn = coarsenN(m, n, bm, bn, bk, gm, num_threads, shard_by_col); + gm = coarsenM(m, n, bm, bn, bk, gn, num_threads, shard_by_col); + } + // Number of tasks in each dimension. + Index nm = divup(nm0, gm); + Index nn = divup(nn0, gn); + + // If there is enough concurrency in the sharding dimension, we choose not + // to paralellize by the other dimension, and execute all kernels in sync + // mode. This reduces parallelism from the nm x nn down to nn + // (shard_by_col==true) or nm (shard_by_col==false). + const Index sharding_dim_tasks = shard_by_col ? nn : nm; + const int num_worker_threads = this->m_device.numThreadsInPool(); + + // With small number of threads we want to make sure that we do not reduce + // parallelism too much. With large number of threads we trade maximum + // parallelism for better memory locality. + const float oversharding_factor = + num_worker_threads <= 4 ? 8.0 : + num_worker_threads <= 8 ? 4.0 : + num_worker_threads <= 16 ? 2.0 : + num_worker_threads <= 32 ? 1.0 : + num_worker_threads <= 64 ? 0.8 : /* num_worker_threads > 64 */ 0.6; + + const bool parallelize_by_sharding_dim_only = + sharding_dim_tasks >= oversharding_factor * num_worker_threads; + + // Last by not least, decide whether we want to issue both lhs and rhs + // packing in parallel; or issue lhs packing first, and then issue rhs + // packing when lhs packing completes (for !shard_by_col lhs and rhs are + // swapped). Parallel packing allows more parallelism (for both packing and + // kernels), while sequential packing provides better locality (once + // a thread finishes rhs packing it proceed to kernels with that rhs). + // First, we are interested in parallel packing if there are few tasks. + bool parallel_pack = num_threads >= nm * nn; + // Also do parallel packing if all data fits into L2$. + if (m * bk * Index(sizeof(LhsScalar)) + n * bk * Index(sizeof(RhsScalar)) <= + l2CacheSize() * num_threads) + parallel_pack = true; + // But don't do it if we will use each rhs only once. Locality seems to be + // more important in this case. + if ((shard_by_col ? nm : nn) == 1) parallel_pack = false; + // Also don't get in the way of parallelize_by_sharding_dim_only + // optimization. + if (parallelize_by_sharding_dim_only) parallel_pack = false; + + // TODO(ezhulnev): With if contexpr we don't need SyncEvalParallelContext. + if (IsEvalInSyncMode) { +#define CONTEXT_ARGS \ + (this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, \ + nn0, shard_by_col, parallel_pack, parallelize_by_sharding_dim_only, \ + NoCallback()) \ + .run() + TENSOR_CONTRACTION_DISPATCH(SyncEvalParallelContext, Alignment, + CONTEXT_ARGS); +#undef CONTEXT_ARGS + + } else { +#define CONTEXT_ARGS \ + (this, num_threads, buffer, m, n, k, bm, bn, bk, nm, nn, nk, gm, gn, nm0, \ + nn0, shard_by_col, parallel_pack, parallelize_by_sharding_dim_only, \ + std::move(done)) + TENSOR_CONTRACTION_ASYNC_DISPATCH(EvalParallelContext, DoneCallback, + Alignment, CONTEXT_ARGS, run()); +#undef CONTEXT_ARGS + } + } + + // ------------------------------------------------------------------------ // + + // Dummy struct to represent an empty DoneCallback. + + struct NoCallback { + void operator()() { + eigen_assert(false && "NoCallback should never be called"); + } + }; + + // ------------------------------------------------------------------------ // + + template + class EvalParallelNotification; + + // Synchronous evaluation notification that blocks caller thread in Wait(). + template + class EvalParallelNotification { + public: + EvalParallelNotification(Context*, NoCallback) {} + void Notify() { done_.Notify(); } + void Wait() { done_.Wait(); } + private: + Eigen::Notification done_; + }; + + // Asynchronous evaluation notification that does not block in Wait(). + template + class EvalParallelNotification { + public: + EvalParallelNotification(Context* ctx, DoneCallback done) + : ctx_(ctx), done_(std::move(done)) {} + + void Notify() { + // Make a copy of done callback, because it will be destructed when we + // will delete context in the next line (EvalParallelNotification is a + // data member of EvalParallelContext class). + DoneCallback done_copy = std::move(done_); + + // Delete parallel evaluation context. + delete ctx_; + + // Now safely call the done callback. + done_copy(); + } + + void Wait() {} + + private: + Context* ctx_; + DoneCallback done_; + }; + + // Context orchestrates sync/async parallel contraction evaluation. When it is + // executed in asynchronous mode, it owns all the shared state that might be + // accessible by block packing and kernel tasks. + + template + class EvalParallelContext { + public: + typedef internal::TensorContractionInputMapper< + LhsScalar, Index, internal::Lhs, LeftEvaluator, left_nocontract_t, + contract_t, internal::packet_traits::size, + lhs_inner_dim_contiguous, false, Unaligned> + LhsMapper; + typedef internal::TensorContractionInputMapper< + RhsScalar, Index, internal::Rhs, RightEvaluator, right_nocontract_t, + contract_t, internal::packet_traits::size, + rhs_inner_dim_contiguous, rhs_inner_dim_reordered, Unaligned> + RhsMapper; + + typedef internal::blas_data_mapper OutputMapper; + + typedef internal::TensorContractionKernel< + Scalar, LhsScalar, RhsScalar, Index, OutputMapper, LhsMapper, RhsMapper> + TensorContractionKernel; + + typedef typename TensorContractionKernel::LhsBlock LhsBlock; + typedef typename TensorContractionKernel::RhsBlock RhsBlock; + typedef typename TensorContractionKernel::BlockMemHandle BlockMemHandle; + + EvalParallelContext(const Self* self, int num_threads, Scalar* buffer, + Index tm, Index tn, Index tk, Index bm, Index bn, + Index bk, Index nm, Index nn, Index nk, Index gm, + Index gn, Index nm0, Index nn0, bool shard_by_col, + bool parallel_pack, + bool parallelize_by_sharding_dim_only, + DoneCallback done) + : created_by_thread_id_(std::this_thread::get_id()), + done_(this, std::move(done)), + device_(self->m_device), + lhs_(self->m_leftImpl, self->m_left_nocontract_strides, + self->m_i_strides, self->m_left_contracting_strides, + self->m_k_strides), + rhs_(self->m_rightImpl, self->m_right_nocontract_strides, + self->m_j_strides, self->m_right_contracting_strides, + self->m_k_strides), + buffer_(buffer), + output_(buffer, tm), + output_kernel_(self->m_output_kernel), + tensor_contraction_params_(self->m_tensor_contraction_params), + num_threads_(num_threads), + shard_by_col_(shard_by_col), + parallel_pack_(parallel_pack), + parallelize_by_sharding_dim_only_(parallelize_by_sharding_dim_only), + m_(tm), + n_(tn), + k_(tk), + bm_(bm), + bn_(bn), + bk_(bk), + nm_(nm), + nn_(nn), + nk_(nk), + gm_(gm), + gn_(gn), + nm0_(nm0), + nn0_(nn0), + kernel_(m_, k_, n_, bm_, bk_, bn_), + num_thread_local_allocations_(0), + // We reserve 2X more capacity for a thread local values, than the + // number of threads in the pool to efficiently handle task stealing + // by threads that are not managed by the pool. + thread_local_capacity(2 * (parallelize_by_sharding_dim_only_ + ? device_.numThreadsInPool() + : 0)), + // We will use only one of the Lhs/Rhs thread local storage depending + // on the shard_by_col value and we parallelize by sharding dim ONLY. + lhs_thread_local_blocks_(shard_by_col_ ? 0 : thread_local_capacity, + {*this}, {*this}), + rhs_thread_local_blocks_(shard_by_col_ ? thread_local_capacity : 0, + {*this}, {*this}) { + // These two options are mutually exclusive. + eigen_assert(!(parallel_pack && parallelize_by_sharding_dim_only)); + + for (Index x = 0; x < P; x++) { + // Normal number of notifications for k slice switch is + // nm_ + nn_ + nm_ * nn_. However, first P - 1 slices will receive only + // nm_ + nn_ notifications, because they will not receive notifications + // from preceding kernels. + state_switch_[x] = + x == 0 + ? 1 + : (parallel_pack_ ? nn_ + nm_ : (shard_by_col_ ? nn_ : nm_)) + + (x == P - 1 ? nm_ * nn_ : 0); + state_packing_ready_[x] = + parallel_pack_ ? 0 : (shard_by_col_ ? nm_ : nn_); + state_kernel_[x] = new std::atomic*[nm_]; + for (Index m = 0; m < nm_; m++) { + state_kernel_[x][m] = new std::atomic[nn_]; + // Kernels generally receive 3 notifications (previous kernel + 2 + // packing), but the first slice won't get notifications from previous + // kernels. + for (Index n = 0; n < nn_; n++) + state_kernel_[x][m][n].store( + (x == 0 ? 0 : 1) + (parallel_pack_ ? 2 : 1), + std::memory_order_relaxed); + } + } + + // Allocate memory for packed rhs/lhs matrices. + packed_mem_ = kernel_.allocateSlices( // + device_, // + /*num_lhs=*/nm0_, // + /*num_rhs=*/nn0_, // + /*num_slices=*/std::min(nk_, P - 1), // + packed_lhs_, packed_rhs_); + + if (parallelize_by_sharding_dim_only_) { + const int num_worker_threads = device_.numThreadsInPool(); + + if (shard_by_col) { + can_use_thread_local_packed_ = new std::atomic[nn_]; + for (int i = 0; i < nn_; ++i) + can_use_thread_local_packed_[i].store(true, + std::memory_order_relaxed); + + Index num_blocks = num_worker_threads * gn_; + thread_local_pre_alocated_mem_ = kernel_.allocateSlices( // + device_, // + /*num_lhs=*/0, // + /*num_rhs=*/num_blocks, // + /*num_slices=*/1, // + /*lhs_blocks=*/nullptr, &rhs_thread_local_pre_allocated_); + + } else { + can_use_thread_local_packed_ = new std::atomic[nm_]; + for (int i = 0; i < nm_; ++i) + can_use_thread_local_packed_[i].store(true, + std::memory_order_relaxed); + + Index num_blocks = num_worker_threads * gm_; + thread_local_pre_alocated_mem_ = kernel_.allocateSlices( // + device_, // + /*num_lhs=*/num_blocks, // + /*num_rhs=*/0, // + /*num_slices=*/1, &lhs_thread_local_pre_allocated_, // + /*rhs_blocks=*/nullptr); + } + } + } + + ~EvalParallelContext() { + for (Index x = 0; x < P; x++) { + for (Index m = 0; m < nm_; m++) delete[] state_kernel_[x][m]; + delete[] state_kernel_[x]; + } + kernel_.deallocate(device_, packed_mem_); + if (parallelize_by_sharding_dim_only_) { + kernel_.deallocate(device_, thread_local_pre_alocated_mem_); + delete[] can_use_thread_local_packed_; + } + } + + void run() { + // Kick off packing of the first slice. + signal_switch(0, 1); + + // Wait for overall completion. + // + // If parallel evaluation is executed in async mode, this is a no-op, and + // Wait() will return immediately. In synchronous mode it will block the + // caller thread until it will receive notification from last task. + // + // In async mode, last task when completed will call done callback from + // the same thread, and will delete this context. + // + // TODO(dvyukov): This wait can lead to deadlock if contraction is + // evaluated in synchronous mode. If nthreads contractions are + // concurrently submitted from worker threads, this wait will block all + // worker threads and the system will deadlock. + done_.Wait(); + } + + private: + std::thread::id created_by_thread_id_; + + // This notification is specialized on the type of DoneCallback and can be + // blocking or non-blocking. + EvalParallelNotification done_; + + const Device& device_; + LhsMapper lhs_; + RhsMapper rhs_; + Scalar* const buffer_; + OutputMapper output_; + OutputKernelType output_kernel_; + TensorContractionParams tensor_contraction_params_; + const int num_threads_; + const bool shard_by_col_; + const bool parallel_pack_; + const bool parallelize_by_sharding_dim_only_; + // Matrix sizes. + const Index m_; + const Index n_; + const Index k_; + // Block sizes. + const Index bm_; + const Index bn_; + const Index bk_; + // Number of tasks. + const Index nm_; + const Index nn_; + const Index nk_; + // Task grain sizes (number of kernels executed per task). + const Index gm_; + const Index gn_; + // Number of blocks (this is different from ni_/nn_ because of task size + // coarsening). + const Index nm0_; + const Index nn0_; + // Tensor contraction kernel. + TensorContractionKernel kernel_; + + // Parallelization strategy. + // + // Blocks related to the same k block can run in parallel because they write + // to different output blocks. So we parallelize within k slices, this + // gives us parallelism level of m x n. Before we can start any kernels + // related to k-th slice, we need to issue m lhs packing tasks and n rhs + // packing tasks. + // + // However, there is a bottleneck when we are finishing kernels for k-th + // slice (at the very end there is only 1 runnable kernel). To mitigate this + // bottleneck we allow kernels from k-th and k+1-th slices to run in + // parallel. Note that (m, n, k) and (m, n, k+1) kernels write to the same + // output block, so they must not run in parallel. + // + // This gives us the following dependency graph. + // On each k slice we have m x n kernel tasks, m lhs paking tasks and n rhs + // packing tasks. + // Kernel (m, n, k) can start when: + // - kernel (m, n, k-1) has finished + // - lhs packing (m, k) has finished + // - rhs packing (n, k) has finished + // Lhs/rhs packing can start when: + // - all k-1 packing has finished (artificially imposed to limit amount of + // parallel packing) + // + // On top of that we limit runnable tasks to two consecutive k slices. + // This is done to limit amount of memory we need for packed lhs/rhs + // (for each k slice we need m*bk + n*bk memory in packed_lhs_/packed_rhs_). + // + // state_switch_ tracks when we are ready to switch to the next k slice. + // state_kernel_[m][n] tracks when we are ready to kick off kernel (m, n). + // These variable are rolling over 3 consecutive k slices: first two we are + // actively executing + one to track completion of kernels in the second + // slice. + static const Index P = 3; + + // Handle to the allocated temporary storage for Lhs/Rhs blocks. + BlockMemHandle packed_mem_; + std::vector packed_lhs_[P - 1]; + std::vector packed_rhs_[P - 1]; + + // If we choose to parallelize only by the sharding dimension, each thread + // will have it's own "thead local" (not a c++ thread local storage) memory + // for packed_lhs or packed_rhs (shard_by_col = false of true). This memory + // can't be passed to a kernel that might execute on a different thread. + // + // In practice when we are ready to pack memory for the sharding dimension + // (rhs if shard_by_col==true) of the K-th slice, all kernels for K-1 slice + // already computed (99% of the time), and we can pack data into the thread + // local storage, and guarantee that all the kernels will be executed + // immediately in the same thread. This significantly increases L1 cache hit + // ratio and reduces pressure on the memory bus. + // + // It's still possible that kernel for the K-th slice will be ready before + // completion of the K-1 kernel, so we have to allocate "global" packed_lhs_ + // and packed_rhs_ to allow kernels to be executed later on a thread + // different from the thread that was used for packing. + + // Handle for pre-allocated thread local memory buffers. + BlockMemHandle thread_local_pre_alocated_mem_; + + // Only one of these will be initialized depending on shard_by_col value + // (the size will be `num_worker_threads * num_grains_in_the_sharding_dim`). + std::vector lhs_thread_local_pre_allocated_; + std::vector rhs_thread_local_pre_allocated_; + + // How many thread local blocks were already allocated. + std::atomic num_thread_local_allocations_; + const int thread_local_capacity; + + // We will use pre-allocated Lhs/Rhs blocks defined above, if the number of + // unique threads in a system is below or equal to the number of threads in + // a thread pool. We will fallback on dynamic memory allocation after that. + + // ThreadLocalBlocks is a container for Lhs or Rhs thread local buffers. Its + // size is equal to the grain size in Lhs/Rhs sharding dimension. + template + class ThreadLocalBlocks { + public: + ThreadLocalBlocks() = default; + + ThreadLocalBlocks(BlockType* base, size_t grain_size) + : is_pre_allocated_(true), + thread_local_pre_allocated_base_(base), + grain_size_(grain_size) {} + + ThreadLocalBlocks(BlockMemHandle mem_handle, + std::vector blocks) + : is_pre_allocated_(false), + mem_handle_(std::move(mem_handle)), + blocks_(std::move(blocks)) {} + + BlockType& block(int grain_index) { + eigen_assert(grain_index >= 0); + eigen_assert(static_cast(grain_index) < size()); + return is_pre_allocated_ ? thread_local_pre_allocated_base_[grain_index] + : blocks_[grain_index]; + } + + void Release(EvalParallelContext& ctx) const { + if (!is_pre_allocated_) { + ctx.kernel_.deallocate(ctx.device_, mem_handle_); + } + } + + size_t size() const { + return is_pre_allocated_ ? grain_size_ : blocks_.size(); + } + + private: + bool is_pre_allocated_; + + // Reuse pre-allocated thread local buffers. + BlockType* thread_local_pre_allocated_base_ = nullptr; + size_t grain_size_ = 0; + + // These will be initialized only if `is_pre_allocated == false`. + BlockMemHandle mem_handle_{}; + std::vector blocks_; + }; + + // ThreadLocalBlocksInitialize callable does custom thread local blocks + // initialization, and will reuse pre-allocated buffers if possible, or will + // dynamically allocate new memory. + // + // Lhs/Rhs blocks might be of the same type, so we have to pass explicitly + // for what side do we plan to do block allocation. + template + class ThreadLocalBlocksInitialize { + static constexpr bool kIsLhs = + !is_rhs && std::is_same::value; + static const bool kIsRhs = + is_rhs && std::is_same::value; + static_assert(kIsLhs || kIsRhs, "Unkown block type"); + + using Blocks = ThreadLocalBlocks; + + public: + ThreadLocalBlocksInitialize(EvalParallelContext& ctx) + : ctx_(ctx), + num_worker_threads_(ctx_.device_.numThreadsInPool()) {} + + void operator()(Blocks& blocks) { + const int n = ctx_.num_thread_local_allocations_.fetch_add( + 1, std::memory_order_relaxed); + + if (n >= num_worker_threads_) { + ThreadLocalBlocksAllocator::allocate(ctx_, blocks); + } else { + ThreadLocalBlocksAllocator::reuse(ctx_, n, blocks); + } + } + + private: + // NOTE(ezhulenev): Without 'if constexpr' we have to put calls to + // TensorContractionKernel::allocateSlices into template specializations. + // Also explicit specializations are not allowed at class scope in C++03, + // EvalCtx type parameter is just a workaround for that limitation. + template + struct ThreadLocalBlocksAllocator; + + template + struct ThreadLocalBlocksAllocator { + static void allocate(EvalCtx& ctx, Blocks& blocks) { + std::vector rhs_blocks; + BlockMemHandle mem_handle = ctx.kernel_.allocateSlices( + ctx.device_, + /*num_lhs=*/0, + /*num_rhs=*/ctx.gn_, + /*num_slices=*/1, + /*lhs_blocks=*/nullptr, /*rhs_blocks=*/&rhs_blocks); + + blocks = ThreadLocalBlocks(std::move(mem_handle), + std::move(rhs_blocks)); + } + + static void reuse(EvalCtx& ctx, int index, Blocks& blocks) { + RhsBlock* ptr = &ctx.rhs_thread_local_pre_allocated_[ctx.gn_ * index]; + blocks = ThreadLocalBlocks(ptr, ctx.gn_); + } + }; + + template + struct ThreadLocalBlocksAllocator { + static void allocate(EvalCtx& ctx, Blocks& blocks) { + std::vector lhs_blocks; + BlockMemHandle mem_handle = ctx.kernel_.allocateSlices( + ctx.device_, + /*num_lhs=*/ctx.gm_, + /*num_rhs=*/0, + /*num_slices=*/1, + /*lhs_blocks=*/&lhs_blocks, /*rhs_blocks=*/nullptr); + + blocks = ThreadLocalBlocks(std::move(mem_handle), + std::move(lhs_blocks)); + } + + static void reuse(EvalCtx& ctx, int index, Blocks& blocks) { + LhsBlock* ptr = &ctx.lhs_thread_local_pre_allocated_[ctx.gm_ * index]; + blocks = ThreadLocalBlocks(ptr, ctx.gm_); + } + }; + + EvalParallelContext& ctx_; + const int num_worker_threads_; + }; + + template + class ThreadLocalBlocksRelease { + public: + using Blocks = ThreadLocalBlocks; + ThreadLocalBlocksRelease(EvalParallelContext& ctx) : ctx_(ctx) {} + void operator()(Blocks& blocks) { blocks.Release(ctx_); } + + private: + EvalParallelContext& ctx_; + }; + + // ThreadLocalBlocks initialization callables. + using ThreadLocalLhsInit = + ThreadLocalBlocksInitialize; + using ThreadLocalRhsInit = + ThreadLocalBlocksInitialize; + + // ThreadLocalBlocks release callables. + using ThreadLocalLhsRelease = ThreadLocalBlocksRelease; + using ThreadLocalRhsRelease = ThreadLocalBlocksRelease; + + // Thread local containers for Lhs/Rhs block packs. In practice only one of + // them will be used, depending on the shard_by_col value. + Eigen::ThreadLocal, ThreadLocalLhsInit, + ThreadLocalLhsRelease> + lhs_thread_local_blocks_; + Eigen::ThreadLocal, ThreadLocalRhsInit, + ThreadLocalRhsRelease> + rhs_thread_local_blocks_; + + // After a particular shard for Kth slice missed thread local execution + // opportunity (K-1 slice didn't complete kernels execution), we can no + // longer schedule K+1 and following slices in thread local mode, because + // there is no more guarantee that previous kernels were executed + // sequentially in the same thread (size is nn_ or nm_). + std::atomic* can_use_thread_local_packed_; + + std::atomic** state_kernel_[P]; + // state_switch_ is frequently modified by worker threads, while other + // fields are read-only after constructor. Let's move it to a separate cache + // line to reduce cache-coherency traffic. + char pad_[128]; + std::atomic state_packing_ready_[P]; + std::atomic state_switch_[P]; + + LhsBlock& packed_lhs(Index m, Index k, Index m1, bool use_thread_local) { + if (use_thread_local) { + eigen_assert(!shard_by_col_); + ThreadLocalBlocks& blocks = lhs_thread_local_blocks_.local(); + + Index grain_index = m1 - m * gm_; + return blocks.block(internal::convert_index(grain_index)); // FIXME better make ThreadLocalBlocks use Eigen::Index? + } else { + return packed_lhs_[k % (P - 1)][m1]; + } + } + + RhsBlock& packed_rhs(Index n, Index k, Index n1, bool use_thread_local) { + if (use_thread_local) { + eigen_assert(shard_by_col_); + ThreadLocalBlocks& blocks = rhs_thread_local_blocks_.local(); + + Index grain_index = n1 - n * gn_; + return blocks.block(internal::convert_index(grain_index)); // FIXME better make ThreadLocalBlocks use Eigen::Index? + } else { + return packed_rhs_[k % (P - 1)][n1]; + } + } + + // In following two methods (pack_lhs and pack_rhs), if we know for sure + // that we'll be able to immediately call a kernel with packed data, and do + // not submit it to the thread pool, we can use thread local memory for + // packed data. + // + // We can only reliably check it if we are running all kernels in sync mode + // (parallelize only by sharding dim). If kernel for m==0 (n==0) is ready to + // run, it's guaranteed that all kernels with larger values of m (n) are + // also ready, because we execute them in the same order for all K slices. + + void pack_lhs(Index m, Index k) { + bool use_thread_local = false; + + if (parallelize_by_sharding_dim_only_ && !shard_by_col_ && + can_use_thread_local_packed_[m].load(std::memory_order_relaxed)) { + if (state_kernel_[k % P][m][0].load(std::memory_order_relaxed) == 1) { + use_thread_local = true; + } else { + // If we can't guarantee that all kernels in `k` slice will be + // executed sequentially in current thread, it's no longer safe to use + // thread local memory in following slices along the k dimensions. + eigen_assert(k > 0); + can_use_thread_local_packed_[m].store(false, + std::memory_order_relaxed); + } + } + + const Index mend = m * gm_ + gm(m); + for (Index m1 = m * gm_; m1 < mend; m1++) + kernel_.packLhs(&packed_lhs(m, k, m1, use_thread_local), + lhs_.getSubMapper(m1 * bm_, k * bk_), bk(k), bm(m1)); + + if (!parallel_pack_ && shard_by_col_) { + assert(!use_thread_local); + signal_packing(k); + } else { + signal_switch(k + 1); + for (Index n = nn_ - 1; n >= 0; n--) { + bool sync = parallelize_by_sharding_dim_only_ || n == 0; + signal_kernel(m, n, k, sync, use_thread_local); + } + } + } + + void pack_rhs(Index n, Index k) { + bool use_thread_local = false; + + if (parallelize_by_sharding_dim_only_ && shard_by_col_ && + can_use_thread_local_packed_[n].load(std::memory_order_relaxed)) { + if (state_kernel_[k % P][0][n].load(std::memory_order_relaxed) == 1) { + use_thread_local = true; + } else { + // If we can't guarantee that all kernels in `k` slice will be + // executed sequentially in current thread, it's no longer safe to use + // thread local memory in followig slices along the k dimensions. + eigen_assert(k > 0); + can_use_thread_local_packed_[n].store(false, + std::memory_order_relaxed); + } + } + + const Index nend = n * gn_ + gn(n); + for (Index n1 = n * gn_; n1 < nend; n1++) { + if (!TensorContractionKernel::HasBeta && k == 0) { + // Zero the output memory in parallel, only if contraction kernel does + // not support `beta`. Otherwise we will pass beta 0.0 to the first + // call to the `TensorContractionKernel::invoke()`. + // + // On 10000x2x10000 mm zeroing can easily take half of time. Zero (bn + // x m) row. Safe to do here because all kernels that will write to + // this memory depend on completion of this task. Note: don't call + // device_.memset() here. device_.memset() blocks on thread pool + // worker thread, which can lead to underutilization and deadlocks. + memset(buffer_ + n1 * bn_ * m_, 0, bn(n1) * m_ * sizeof(Scalar)); + } + kernel_.packRhs(&packed_rhs(n, k, n1, use_thread_local), + rhs_.getSubMapper(k * bk_, n1 * bn_), bk(k), bn(n1)); + } + + if (parallel_pack_ || shard_by_col_) { + signal_switch(k + 1); + for (Index m = nm_ - 1; m >= 0; m--) { + bool sync = parallelize_by_sharding_dim_only_ || m == 0; + signal_kernel(m, n, k, sync, use_thread_local); + } + } else { + assert(!use_thread_local); + signal_packing(k); + } + } + + void kernel(Index m, Index n, Index k, bool use_thread_local) { + // Note: order of iteration matters here. Iteration over m is innermost + // because we want to reuse the same packed rhs in consecutive tasks + // (rhs fits into L2$ while lhs only into L3$). + const Index nend = n * gn_ + gn(n); + const Index mend = m * gm_ + gm(m); + + // NOTE: output = alpha * LHS * RHS + beta * output. + const Scalar alpha = Scalar(1); + const Scalar beta = + (TensorContractionKernel::HasBeta && k == 0) ? Scalar(0) : Scalar(1); + + if (shard_by_col_) { + for (Index n1 = n * gn_; n1 < nend; n1++) { + for (Index m1 = m * gm_; m1 < mend; m1++) { + const auto output_mapper = output_.getSubMapper(m1 * bm_, n1 * bn_); + kernel_.invoke( + output_mapper, + packed_lhs(m, k, m1, !shard_by_col_ && use_thread_local), + packed_rhs(n, k, n1, shard_by_col_ && use_thread_local), bm(m1), + bk(k), bn(n1), alpha, beta); + + // We are done with the last task for the [m1, n1] block. + if (k + 1 == nk_) { + output_kernel_(output_mapper, tensor_contraction_params_, + m1 * bm_, n1 * bn_, bm(m1), bn(n1)); + } + } + } + } else { + for (Index m1 = m * gm_; m1 < mend; m1++) + for (Index n1 = n * gn_; n1 < nend; n1++) { + const auto output_mapper = output_.getSubMapper(m1 * bm_, n1 * bn_); + kernel_.invoke( + output_mapper, + packed_lhs(m, k, m1, !shard_by_col_ && use_thread_local), + packed_rhs(n, k, n1, shard_by_col_ && use_thread_local), bm(m1), + bk(k), bn(n1), alpha, beta); + + // We are done with the last task for the [m1, n1] block. + if (k + 1 == nk_) { + output_kernel_(output_mapper, tensor_contraction_params_, + m1 * bm_, n1 * bn_, bm(m1), bn(n1)); + } + } + } + signal_kernel(m, n, k + 1, /*sync=*/false, /*use_thread_local=*/false); + signal_switch(k + 2); + } + + void signal_packing(Index k) { + eigen_assert(!parallel_pack_); + Index s = state_packing_ready_[k % P].fetch_sub(1); + eigen_assert(s > 0); + if (s != 1) return; + state_packing_ready_[k % P] = shard_by_col_ ? nm_ : nn_; + enqueue_packing(k, shard_by_col_); + } + + void signal_kernel(Index m, Index n, Index k, bool sync, + bool use_thread_local) { + std::atomic* state = &state_kernel_[k % P][m][n]; + Index s = state->load(); + eigen_assert(s > 0); + if (s != 1 && state->fetch_sub(1) != 1) { + eigen_assert(!use_thread_local); + return; + } + state->store(parallel_pack_ ? 3 : 2, std::memory_order_relaxed); + if (sync) { + kernel(m, n, k, use_thread_local); + } else { + eigen_assert(!use_thread_local); + device_.enqueueNoNotification( + [=]() { kernel(m, n, k, use_thread_local); }); + } + } + + void signal_switch(Index k, Index v = 1) { + Index s = state_switch_[k % P].fetch_sub(v); + eigen_assert(s >= v); + if (s != v) return; + + // Ready to switch to the next k slice. + // Reset counter for the next iteration. + state_switch_[k % P] = + (parallel_pack_ ? nm_ + nn_ : (shard_by_col_ ? nn_ : nm_)) + + nm_ * nn_; + if (k < nk_) { + // Issue lhs/rhs packing. Their completion will in turn kick off + // kernels. + if (parallel_pack_) { + enqueue_packing(k, !shard_by_col_); + enqueue_packing(k, shard_by_col_); + } else if (shard_by_col_) { + enqueue_packing(k, false); + } else { + enqueue_packing(k, true); + } + + // Termination handling. + // Because kernel completion signals k + 2 switch, we need to finish nk + // + 2 slices without issuing any tasks on nk + 1 slice. So here we + // pretend that all nk + 1 packing tasks just finish instantly; so that + // nk + 2 switch only waits for completion of nk kernels. + } else if (k == nk_) { + signal_switch(k + 1, + parallel_pack_ ? nm_ + nn_ : (shard_by_col_ ? nn_ : nm_)); + } else { + done_.Notify(); + } + } + + // Enqueue all rhs/lhs packing for k-th slice. + void enqueue_packing(Index k, bool rhs) { + enqueue_packing_helper(0, rhs ? nn_ : nm_, k, rhs); + } + + void enqueue_packing_helper(Index start, Index end, Index k, bool rhs) { + if (end - start == 1) { + if (rhs) + pack_rhs(start, k); + else + pack_lhs(start, k); + } else { + while (end - start > 1) { + Index mid = (start + end) / 2; + device_.enqueueNoNotification( + [=]() { enqueue_packing_helper(mid, end, k, rhs); }); + end = mid; + } + + // Decide if we want to run first packing task (start == 0) in + // async mode if we parallelize only by sharding dim: + // (1) pack_lhs and pack_rhs call signal_switch before completing + // all calls to signal_kernel, which in sync mode might lead + // to the execution of the first kernel of the k+1 slice, before + // completing a call to the last kernel of the k slice. + // (2) all pack tasks for sharded dim must be executed in a thread + // pool to get pre-allocated thead local buffers. + bool pack_async = + (start == 0) && + (parallelize_by_sharding_dim_only_&& shard_by_col_ == rhs) && + (k > 0 || std::this_thread::get_id() == created_by_thread_id_); + + if (pack_async) { + device_.enqueueNoNotification( + [=]() { enqueue_packing_helper(start, end, k, rhs); }); + } else { + enqueue_packing_helper(start, end, k, rhs); + } + } + } + + // Block sizes with accounting for potentially incomplete last block. + Index bm(Index m) const { return m + 1 < nm0_ ? bm_ : m_ + bm_ - bm_ * nm0_; } + Index bn(Index n) const { return n + 1 < nn0_ ? bn_ : n_ + bn_ - bn_ * nn0_; } + Index bk(Index k) const { return k + 1 < nk_ ? bk_ : k_ + bk_ - bk_ * nk_; } + // Task grain sizes accounting for potentially incomplete last task. + Index gm(Index m) const { return m + 1 < nm_ ? gm_ : nm0_ + gm_ - gm_ * nm_; } + Index gn(Index n) const { return n + 1 < nn_ ? gn_ : nn0_ + gn_ - gn_ * nn_; } + + EvalParallelContext(const EvalParallelContext&) = delete; + void operator=(const EvalParallelContext&) = delete; + }; + + template + using SyncEvalParallelContext = + EvalParallelContext; + + // ------------------------------------------------------------------------ // + + // EvalShardedByInnerDimContext orchestrates sync/async contraction + // evaluation, when we shard by inner dimension. When it is executed in + // asynchronous mode, it owns all the shared state that might be accessible by + // block processing tasks. + + template + struct EvalShardedByInnerDimContext { + EvalShardedByInnerDimContext(const Self* self, int num_threads, + Scalar* result_buffer, + Index m_size, Index n_size, Index k_size, + DoneCallback done_callback) + : evaluator(self), + m_lhs_inner_dim_contiguous(evaluator->m_lhs_inner_dim_contiguous), + m_rhs_inner_dim_contiguous(evaluator->m_rhs_inner_dim_contiguous), + m_rhs_inner_dim_reordered(evaluator->m_rhs_inner_dim_reordered), + result(result_buffer), + m(m_size), + n(n_size), + k(k_size), + done(std::move(done_callback)), + buffer_size_bytes(m * n * sizeof(Scalar)), + block_size(blockSize(k, num_threads)), + num_blocks(divup(k, block_size)), + num_pending_blocks(internal::convert_index(num_blocks)), + l0_ranges(divup(num_blocks, l0_size)), + l0_state(l0_ranges), + block_buffers(num_blocks) { + // Keep count of pending gemm tasks for each l0 range. + for (int i = 0; i < l0_ranges; ++i) { + const Index num_pending_tasks = actualRangeSize(l0_ranges, l0_size, i); + l0_state.emplace_back(internal::convert_index(num_pending_tasks)); + } + + // Allocate temporary buffers for each block. + for (Index block_idx = 0; block_idx < num_blocks; ++block_idx) { + Scalar* buf = block_idx == 0 + ? result + : static_cast(evaluator->m_device.allocate( + buffer_size_bytes)); + block_buffers.emplace_back(buf); + } + } + + ~EvalShardedByInnerDimContext() { + for (Index i = 1; i < num_blocks; ++i) { + evaluator->m_device.deallocate(block_buffers[i]); + } + } + + template + void run() { + Barrier barrier(internal::convert_index(num_blocks)); + eval(barrier, 0, num_blocks); + barrier.Wait(); + + // Aggregate partial sums from l0 ranges. + aggregateL0Blocks(); + + // Apply output kernel. + applyOutputKernel(); + } + + template + void runAsync() { + evalAsync(0, num_blocks); + } + + private: + // The underlying GEMM kernel assumes that k is a multiple of + // the packet size and subtle breakage occurs if this is violated. + static const Index packet_size = internal::packet_traits::size; + + const Self* evaluator; // TensorContraction evaluator + + // These fields required fromTENSOR_CONTRACTION_DISPATCH macro. + bool m_lhs_inner_dim_contiguous; + bool m_rhs_inner_dim_contiguous; + bool m_rhs_inner_dim_reordered; + + Scalar* result; + + Index m; + Index n; + Index k; + + DoneCallback done; + + // ----------------------------------------------------------------------// + // Algorithm parameters. + + // We will compute partial results into the buffers of this size. + Index buffer_size_bytes; + + Index block_size; + Index num_blocks; + + // Keep track of pending tasks when evaluate in async mode. + std::atomic num_pending_blocks; + + // We compute partial gemm results in parallel, and to get the final result + // we need to add them all together. For the large number of threads (>= 48) + // this adds a very expensive sequential step at the end. + // + // We split the [0, num_blocks) into small ranges, and when a task for the + // block finishes its partial gemm computation, it checks if it was the last + // gemm in the range, and if so, it will add all blocks of the range. + // + // After all tasks done, we need to add only these pre-aggregated blocks. + + // For now we use just a single level of ranges to compute pre-aggregated + // partial sums, but in general we can use more layers to compute tree + // aggregation in parallel and reduce the size of the sequential step. + // + // TODO(ezhulenev): Add multilevel tree aggregation? Probably will make + // sense only if number of threads >= ~128? + static const Index l0_size = 4; + Index l0_ranges; + + // Keep count of pending gemm tasks for each l0 range. + MaxSizeVector> l0_state; // [0, l0_ranges) + + // Buffers allocated for each temporary block computation. + MaxSizeVector block_buffers; // [0, num_blocks) + + template + void processBlock(Index block_idx, Index begin, Index end) { + Scalar* buf = block_buffers[block_idx]; + + TENSOR_CONTRACTION_DISPATCH( + evaluator->template evalGemmPartialWithoutOutputKernel, Alignment, + (buf, begin, end, + /*num_threads=*/internal::convert_index(num_blocks))); + + // Check if it was the last task in l0 range. + const Index l0_index = block_idx / l0_size; + const int v = l0_state[l0_index].fetch_sub(1); + eigen_assert(v >= 1); + + // If we processed the last block of the range, we can aggregate all + // partial results into the first block of the range. + if (v == 1) { + const Index rng_size = actualRangeSize(l0_ranges, l0_size, l0_index); + const Index dst_block_idx = l0_index * l0_size; + + if (rng_size == l0_size) { + addAllToBuffer( + m * n, + /*src_buf0=*/block_buffers[dst_block_idx + 1], + /*src_buf1=*/block_buffers[dst_block_idx + 2], + /*src_buf2=*/block_buffers[dst_block_idx + 3], + /*dst_buf= */ block_buffers[dst_block_idx]); + } else { + // Aggregate blocks of potentially incomplete last range. + for (int i = 1; i < rng_size; ++i) { + addToBuffer(m * n, + /*src_buf=*/block_buffers[dst_block_idx + i], + /*dst_buf=*/block_buffers[dst_block_idx]); + } + } + } + } + + // Aggregate partial sums from l0 ranges. + template + void aggregateL0Blocks() const { + Index l0_index = 1; + + for (; l0_index + 2 < l0_ranges; l0_index += 3) { + addAllToBuffer( + m * n, + /*src_buf0=*/block_buffers[(l0_index + 0) * l0_size], + /*src_buf1=*/block_buffers[(l0_index + 1) * l0_size], + /*src_buf2=*/block_buffers[(l0_index + 2) * l0_size], + /*dst_buf= */ block_buffers[0]); + } + + for (; l0_index < l0_ranges; ++l0_index) { + addToBuffer(m * n, block_buffers[l0_index * l0_size], + block_buffers[0]); + } + } + + void applyOutputKernel() const { + typedef internal::blas_data_mapper OutputMapper; + evaluator->m_output_kernel( + OutputMapper(result, m), evaluator->m_tensor_contraction_params, + static_cast(0), static_cast(0), m, n); + } + + // Compute block size with accounting for potentially incomplete last block. + Index actualBlockSize(Index block_idx) const { + return block_idx + 1 < num_blocks + ? block_size + : k + block_size - block_size * num_blocks; + }; + + // Compute range size with accounting for potentially incomplete last range. + Index actualRangeSize(Index num_ranges, Index range_size, + Index range_idx) const { + eigen_assert(range_idx < num_ranges); + return range_idx + 1 < num_ranges + ? range_size + : num_blocks + range_size - range_size * num_ranges; + }; + + template + EIGEN_STRONG_INLINE static void addToBuffer(size_t n, const Scalar* src_buf, + Scalar* tgt_buf) { + const int output_packet_size = + internal::unpacket_traits::size; + size_t i = 0; + const size_t num_packets = n / output_packet_size; + for (; i < output_packet_size * num_packets; i += output_packet_size) { + const PacketReturnType src_val = + internal::pload(src_buf + i); + const PacketReturnType tgt_val = + internal::ploadt(tgt_buf + i); + const PacketReturnType sum = internal::padd(src_val, tgt_val); + internal::pstoret(tgt_buf + i, + sum); + } + for (; i < n; ++i) { + tgt_buf[i] += src_buf[i]; + } + } + + template + EIGEN_STRONG_INLINE static void addAllToBuffer(size_t n, + const Scalar* src_buf0, + const Scalar* src_buf1, + const Scalar* src_buf2, + Scalar* dst_buf) { + using ::Eigen::internal::padd; + using ::Eigen::internal::pload; + using ::Eigen::internal::ploadt; + using ::Eigen::internal::pstoret; + + const int output_packet_size = + internal::unpacket_traits::size; + + size_t i = 0; + const size_t num_packets = n / output_packet_size; + for (; i < output_packet_size * num_packets; i += output_packet_size) { + const auto src_val0 = pload(src_buf0 + i); + const auto src_val1 = pload(src_buf1 + i); + const auto src_val2 = pload(src_buf2 + i); + + const auto dst_val = ploadt(dst_buf + i); + const auto sum = + padd(padd(dst_val, src_val0), padd(src_val1, src_val2)); + + pstoret(dst_buf + i, sum); + } + for (; i < n; ++i) { + dst_buf[i] += src_buf0[i] + src_buf1[i] + src_buf2[i]; + } + } + + template + void eval(Barrier& barrier, Index start_block_idx, Index end_block_idx) { + while (end_block_idx - start_block_idx > 1) { + Index mid_block_idx = (start_block_idx + end_block_idx) / 2; + evaluator->m_device.enqueueNoNotification( + [this, &barrier, mid_block_idx, end_block_idx]() { + eval(barrier, mid_block_idx, end_block_idx); + }); + end_block_idx = mid_block_idx; + } + + Index block_idx = start_block_idx; + Index block_start = block_idx * block_size; + Index block_end = block_start + actualBlockSize(block_idx); + + processBlock(block_idx, block_start, block_end); + barrier.Notify(); + } + + template + void evalAsync(Index start_block_idx, Index end_block_idx) { + while (end_block_idx - start_block_idx > 1) { + Index mid_block_idx = (start_block_idx + end_block_idx) / 2; + evaluator->m_device.enqueueNoNotification( + [this, mid_block_idx, end_block_idx]() { + evalAsync(mid_block_idx, end_block_idx); + }); + end_block_idx = mid_block_idx; + } + + Index block_idx = start_block_idx; + + Index block_start = block_idx * block_size; + Index block_end = block_start + actualBlockSize(block_idx); + + processBlock(block_idx, block_start, block_end); + + int v = num_pending_blocks.fetch_sub(1); + eigen_assert(v >= 1); + + if (v == 1) { + // Aggregate partial sums from l0 ranges. + aggregateL0Blocks(); + + // Apply output kernel. + applyOutputKernel(); + + // NOTE: If we call `done` callback before deleting this (context), + // it might deallocate Self* pointer captured by context, and we'll + // fail in destructor trying to deallocate temporary buffers. + + // Move done call back from context before it will be destructed. + DoneCallback done_copy = std::move(done); + + // We are confident that we are the last one who touches context. + delete this; + + // Now safely call the done callback. + done_copy(); + } + } + + // Cost model doesn't capture well the cost associated with constructing + // tensor contraction mappers and computing loop bounds in gemm_pack_lhs + // and gemm_pack_rhs, so we specify minimum desired block size. + static Index blockSize(Index k, int num_threads) { + const auto round_up = [=](Index index) -> Index { + const Index kmultiple = packet_size <= 8 ? 8 : packet_size; + return divup(index, kmultiple) * kmultiple; + }; + + const Index target_block_size = round_up(divup(k, num_threads)); + const Index desired_min_block_size = 12 * packet_size; + + return numext::mini( + k, numext::maxi(desired_min_block_size, target_block_size)); + } + + EvalShardedByInnerDimContext(const EvalShardedByInnerDimContext&) = delete; + void operator=(const EvalShardedByInnerDimContext&) = delete; + }; + + // ------------------------------------------------------------------------ // + + // Below are the function used by evalProductImpl heuristics, trying to select + // optimcal parameters for parallelization algorithm. + + // Decide whether we want to shard m x n contraction by columns or by rows. + static bool shardByCol(Index m, Index n, Index num_threads) { + // Note: we are comparing both n and m against Traits::nr, it is not + // a mistake. We are trying to figure out how both n and m will fit into + // the main sharding dimension. + + // Sharding by column is the default + // ... unless there is enough data for vectorization over rows + if (m / num_threads >= Traits::nr && + // and not enough data for vectorization over columns + (n / num_threads < Traits::nr || + // ... or barely enough data for vectorization over columns, + // but it is not evenly dividable across threads + (n / num_threads < 4 * Traits::nr && + (n % (num_threads * Traits::nr)) != 0 && + // ... and it is evenly dividable across threads for rows + ((m % (num_threads * Traits::nr)) == 0 || + // .. or it is not evenly dividable for both dimensions but + // there is much more data over rows so that corner effects are + // mitigated. + (m / n >= 6))))) + return false; + // Wait, or if matrices are just substantially prolonged over the other + // dimension. + if (n / num_threads < 16 * Traits::nr && m > n * 32) return false; + return true; + } + + Index coarsenM(Index m, Index n, Index bm, Index bn, Index bk, Index gn, + int num_threads, bool shard_by_col) const { + Index gm = 1; + Index gm1 = 1; + Index nm0 = divup(m, bm); + Index nm1 = nm0; + for (;;) { + // Find the next candidate for m grain size. It needs to result in + // different number of blocks. E.g. if we have 10 kernels, we want to try + // 5 and 10, but not 6, 7, 8 and 9. + while (gm1 <= nm0 && nm1 == divup(nm0, gm1)) gm1++; + if (gm1 > nm0) break; + // Check the candidate. + int res = checkGrain(m, n, bm, bn, bk, gm1, gn, gm, gn, num_threads, + shard_by_col); + if (res < 0) break; + nm1 = divup(nm0, gm1); + if (res == 0) continue; + // Commit new grain size. + gm = gm1; + } + return gm; + } + + Index coarsenN(Index m, Index n, Index bm, Index bn, Index bk, Index gm, + int num_threads, bool shard_by_col) const { + Index gn = 1; + Index gn1 = 1; + Index nn0 = divup(n, bn); + Index nn1 = nn0; + for (;;) { + while (gn1 <= nn0 && nn1 == divup(nn0, gn1)) gn1++; + if (gn1 > nn0) break; + int res = checkGrain(m, n, bm, bn, bk, gm, gn1, gm, gn, num_threads, + shard_by_col); + if (res < 0) break; + nn1 = divup(nn0, gn1); + if (res == 0) continue; + gn = gn1; + } + return gn; + } + + // checkGrain checks whether grain (gm, gn) is suitable and is better than + // (oldgm, oldgn). + int checkGrain(Index m, Index n, Index bm, Index bn, Index bk, Index gm, + Index gn, Index oldgm, Index oldgn, int num_threads, + bool shard_by_col) const { + const TensorOpCost cost = + contractionCost(bm * gm, bn * gn, bm, bn, bk, shard_by_col, true); + double taskSize = TensorCostModel::taskSize( + static_cast(bm) * gm * bn * gn, cost); + // If the task is too small, then we agree on it regardless of anything + // else. Otherwise synchronization overheads will dominate. + if (taskSize < 1) return 1; + // If it is too large, then we reject it and all larger tasks. + if (taskSize > 2) return -1; + // Now we are in presumably good task size range. + // The main deciding factor here is parallelism. Consider that we have 12 + // kernels and 4 threads. Grains of 2, 3 and 4 all yield good task sizes. + // But 2/4 yield 6/3 tasks, which gives us parallelism of 0.75 (at most 3/4 + // of cores will be busy). While grain size 3 gives us 4 tasks, which gives + // us parallelism of 1 (we can load all cores). + Index nm0 = divup(m, bm); + Index nn0 = divup(n, bn); + Index new_tasks = divup(nm0, gm) * divup(nn0, gn); + double new_parallelism = static_cast(new_tasks) / + (divup(new_tasks, num_threads) * num_threads); + Index old_tasks = divup(nm0, oldgm) * divup(nn0, oldgn); + double old_parallelism = static_cast(old_tasks) / + (divup(old_tasks, num_threads) * num_threads); + if (new_parallelism > old_parallelism || new_parallelism == 1) return 1; + return 0; + } + + TensorOpCost contractionCost(Index m, Index n, Index bm, Index bn, Index bk, + bool shard_by_col, bool prepacked) const { + const int packed_size = std::min(PacketType::size, + PacketType::size); + const int output_packet_size = internal::unpacket_traits::size; + const double kd = static_cast(bk); + double compute_bandwidth = computeBandwidth(false, bm, bn, bk); + // Computations. + TensorOpCost cost = TensorOpCost(0, 0, kd * compute_bandwidth, true, packed_size); + // Output stores. + cost += TensorOpCost(0, sizeof(CoeffReturnType), 0, true, output_packet_size); + if (prepacked) { + // Packing and kernels are executed in different tasks. When we calculate + // task grain size we look only at kernel cost assuming that kernel + // is more expensive than packing. + return cost; + } + // Lhs/rhs loads + computations. + TensorOpCost lhsCost = this->m_leftImpl.costPerCoeff(true) * (kd / n); + TensorOpCost rhsCost = this->m_rightImpl.costPerCoeff(true) * (kd / m); + // Lhs packing memory cost does not contribute considerably to overall + // execution time because lhs is prefetched early and accessed sequentially. + if (shard_by_col) + lhsCost.dropMemoryCost(); + else + rhsCost.dropMemoryCost(); + return cost + lhsCost + rhsCost; + } + + // Decide whether we want to shard m x k x n contraction over the inner + // (contraction) dimension (k). + static bool shardByInnerDim(Index m, Index n, Index k, int num_threads, + int num_threads_by_k) { + std::ptrdiff_t bufsize = m * n * sizeof(Scalar); + bool shard_by_k = false; + if (n == 1 || // If mat*vec or... + num_threads_by_k < 2 || // running single threaded or... + num_threads_by_k < + num_threads || // sharding by k gives less parallelism or... + bufsize > l3CacheSize() / num_threads_by_k || // need more buffer space + // than L3 cache or... + k / num_threads_by_k < 2 * Traits::nr) { // k per thread is tiny. + shard_by_k = false; + } else if (numext::maxi(m, n) / num_threads < + Traits::nr || // both other dimensions are tiny or... + // k per thread is not small and... + (k / num_threads_by_k > 8 * Traits::nr && + // one of the outer dimensions is tiny or sharding by k offers + // more parallelism. + (numext::mini(m, n) < 2 * Traits::nr || + num_threads_by_k > num_threads))) { + shard_by_k = true; + } + return shard_by_k; + } + + TensorOpCost contractionCostPerInnerDim(Index m, Index n, Index k) const { + // Compute cost. + const int output_packet_size = internal::unpacket_traits::size; + TensorOpCost cost(0, 0, (computeBandwidth(true, m, n, k) * m) * n, true, output_packet_size); + // Output stores. + cost += TensorOpCost(0, sizeof(CoeffReturnType), 0, true, output_packet_size); + TensorOpCost lhsCost = this->m_leftImpl.costPerCoeff(true) * m; + TensorOpCost rhsCost = this->m_rightImpl.costPerCoeff(true) * n; + // Since the inner gemm kernel is always sharded by column, the lhs + // load cost is negligible. + lhsCost.dropMemoryCost(); + return cost + lhsCost + rhsCost; + } + + int numThreadsInnerDim(Index m, Index n, Index k) const { + const int output_packet_size = internal::unpacket_traits::size; + TensorOpCost cost = contractionCostPerInnerDim(m, n, k); + double total_parallel_cost = + TensorCostModel::totalCost(k, cost); + // Cost of reduction step accumulating the m*n per-thread buffers into the + // result. + double reduction_cost = TensorCostModel::totalCost( + m * n, TensorOpCost(2, 1, 1, true, output_packet_size)); + int num_threads = 1; + double min_cost = total_parallel_cost; + double kPerThreadOverHead = 3000; + double kFixedOverHead = 100000; + for (int nt = 2; nt <= this->m_device.numThreads(); nt += 2) { + double sequential_cost = + kFixedOverHead + nt * (reduction_cost + kPerThreadOverHead); + double parallel_cost = total_parallel_cost / nt + sequential_cost; + if (parallel_cost < min_cost) { + num_threads = nt; + min_cost = parallel_cost; + } + } + return num_threads; + } + + double computeBandwidth(bool shard_by_col, Index bm, Index bn, + Index bk) const { + // Peak VFMA bandwidth is 0.5. However if we have not enough data for + // vectorization bandwidth drops. The 4.0 and 2.0 bandwidth is determined + // experimentally. + double computeBandwidth = + bk == 1 ? 4.0 + : (shard_by_col ? bn : bm) < Traits::nr || + (shard_by_col ? bm : bn) < Traits::mr + ? 2.0 + : 0.5; +#ifndef EIGEN_VECTORIZE_FMA + // Bandwidth of all of VFMA/MULPS/ADDPS is 0.5 on latest Intel processors. + // However for MULPS/ADDPS we have dependent sequence of 2 such + // instructions, + // so overall bandwidth is 1.0. + if (computeBandwidth == 0.5) computeBandwidth = 1.0; +#endif + return computeBandwidth; + } + +}; + +} // end namespace Eigen + +#endif // EIGEN_USE_THREADS +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONTRACTION_THREAD_POOL_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h new file mode 100644 index 0000000..09d2da9 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h @@ -0,0 +1,456 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H + +namespace Eigen { + +/** \class TensorConversionOp + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor conversion class. This class makes it possible to vectorize + * type casting operations when the number of scalars per packet in the source + * and the destination type differ + */ +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef TargetType Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; + enum { Flags = 0 }; + typedef typename TypeConversion::PointerType>::type PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorConversionOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorConversionOp type; +}; + +} // end namespace internal + + +template +struct PacketConverter; + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + return internal::pcast(m_impl.template packet(index)); + } + + private: + const TensorEvaluator& m_impl; +}; + + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + const int SrcPacketSize = internal::unpacket_traits::size; + + SrcPacket src1 = m_impl.template packet(index); + SrcPacket src2 = m_impl.template packet(index + SrcPacketSize); + TgtPacket result = internal::pcast(src1, src2); + return result; + } + + private: + const TensorEvaluator& m_impl; +}; + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + const int SrcPacketSize = internal::unpacket_traits::size; + + SrcPacket src1 = m_impl.template packet(index); + SrcPacket src2 = m_impl.template packet(index + SrcPacketSize); + SrcPacket src3 = m_impl.template packet(index + 2 * SrcPacketSize); + SrcPacket src4 = m_impl.template packet(index + 3 * SrcPacketSize); + TgtPacket result = internal::pcast(src1, src2, src3, src4); + return result; + } + + private: + const TensorEvaluator& m_impl; +}; + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + const int SrcPacketSize = internal::unpacket_traits::size; + + SrcPacket src1 = m_impl.template packet(index); + SrcPacket src2 = m_impl.template packet(index + 1 * SrcPacketSize); + SrcPacket src3 = m_impl.template packet(index + 2 * SrcPacketSize); + SrcPacket src4 = m_impl.template packet(index + 3 * SrcPacketSize); + SrcPacket src5 = m_impl.template packet(index + 4 * SrcPacketSize); + SrcPacket src6 = m_impl.template packet(index + 5 * SrcPacketSize); + SrcPacket src7 = m_impl.template packet(index + 6 * SrcPacketSize); + SrcPacket src8 = m_impl.template packet(index + 7 * SrcPacketSize); + TgtPacket result = internal::pcast(src1, src2, src3, src4, src5, src6, src7, src8); + return result; + } + + private: + const TensorEvaluator& m_impl; +}; + +template +struct PacketConverter { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketConverter(const TensorEvaluator& impl) + : m_impl(impl), m_maxIndex(impl.dimensions().TotalSize()) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TgtPacket packet(Index index) const { + const int SrcPacketSize = internal::unpacket_traits::size; + // Only call m_impl.packet() when we have direct access to the underlying data. This + // ensures that we don't compute the subexpression twice. We may however load some + // coefficients twice, but in practice this doesn't negatively impact performance. + if (m_impl.data() && (index + SrcPacketSize < m_maxIndex)) { + // Force unaligned memory loads since we can't ensure alignment anymore + return internal::pcast(m_impl.template packet(index)); + } else { + const int TgtPacketSize = internal::unpacket_traits::size; + typedef typename internal::unpacket_traits::type SrcType; + typedef typename internal::unpacket_traits::type TgtType; + internal::scalar_cast_op converter; + EIGEN_ALIGN_MAX typename internal::unpacket_traits::type values[TgtPacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < TgtPacketSize; ++i) { + values[i] = converter(m_impl.coeff(index+i)); + } + TgtPacket rslt = internal::pload(values); + return rslt; + } + } + + private: + const TensorEvaluator& m_impl; + const typename TensorEvaluator::Index m_maxIndex; +}; + +template +class TensorConversionOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::nested::type Nested; + typedef Scalar CoeffReturnType; + typedef typename NumTraits::Real RealScalar; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConversionOp(const XprType& xpr) + : m_xpr(xpr) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; +}; + +template struct ConversionSubExprEval { + static EIGEN_STRONG_INLINE bool run(Eval& impl, EvalPointerType) { + impl.evalSubExprsIfNeeded(NULL); + return true; + } +}; + +template struct ConversionSubExprEval { + static EIGEN_STRONG_INLINE bool run(Eval& impl, EvalPointerType data) { + return impl.evalSubExprsIfNeeded(data); + } +}; + +#ifdef EIGEN_USE_THREADS +template +struct ConversionSubExprEvalAsync { + static EIGEN_STRONG_INLINE void run(Eval& impl, EvalPointerType, EvalSubExprsCallback done) { + impl.evalSubExprsIfNeededAsync(nullptr, std::move(done)); + } +}; + +template +struct ConversionSubExprEvalAsync { + static EIGEN_STRONG_INLINE void run(Eval& impl, EvalPointerType data, EvalSubExprsCallback done) { + impl.evalSubExprsIfNeededAsync(data, std::move(done)); + } +}; +#endif + +namespace internal { + +template +struct CoeffConv { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetType run(const TensorEvaluator& impl, Index index) { + internal::scalar_cast_op converter; + return converter(impl.coeff(index)); + } +}; + +template +struct CoeffConv { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetType run(const TensorEvaluator& impl, Index index) { + return impl.coeff(index); + } +}; + +template +struct PacketConv { + typedef typename internal::unpacket_traits::type SrcType; + typedef typename internal::unpacket_traits::type TargetType; + + static const int PacketSize = internal::unpacket_traits::size; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator& impl, Index index) { + internal::scalar_cast_op converter; + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = converter(impl.coeff(index+i)); + } + TargetPacket rslt = internal::pload(values); + return rslt; + } +}; + +template +struct PacketConv { + typedef typename internal::unpacket_traits::type SrcType; + typedef typename internal::unpacket_traits::type TargetType; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator& impl, Index index) { + const int SrcCoeffRatio = internal::type_casting_traits::SrcCoeffRatio; + const int TgtCoeffRatio = internal::type_casting_traits::TgtCoeffRatio; + PacketConverter, SrcPacket, TargetPacket, + SrcCoeffRatio, TgtCoeffRatio> converter(impl); + return converter.template packet(index); + } +}; + +template +struct PacketConv { + typedef typename internal::unpacket_traits::type TargetType; + static const int PacketSize = internal::unpacket_traits::size; + + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator& impl, Index index) { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + for (int i = 0; i < PacketSize; ++i) values[i] = impl.coeff(index+i); + return internal::pload(values); + } +}; + +template +struct PacketConv { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TargetPacket run(const TensorEvaluator& impl, Index index) { + return impl.template packet(index); + } +}; + +} // namespace internal + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorConversionOp XprType; + typedef typename XprType::Index Index; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef TargetType Scalar; + typedef TargetType CoeffReturnType; + typedef typename internal::remove_all::Scalar>::type SrcType; + typedef typename PacketType::type PacketReturnType; + typedef typename PacketType::type PacketSourceType; + static const int PacketSize = PacketType::size; + static const bool IsSameType = internal::is_same::value; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = + #ifndef EIGEN_USE_SYCL + true, + #else + TensorEvaluator::PacketAccess & + internal::type_casting_traits::VectorizedCast, + #endif + BlockAccess = TensorEvaluator::BlockAccess, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + static const int NumDims = internal::array_size::value; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + ArgTensorBlock; + + struct TensorConversionOpBlockFactory { + template + struct XprType { + typedef TensorConversionOp type; + }; + + template + typename XprType::type expr(const ArgXprType& expr) const { + return typename XprType::type(expr); + } + }; + + typedef internal::TensorUnaryExprBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) + { + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_impl.dimensions(); } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) + { + return ConversionSubExprEval, EvaluatorPointerType>::run(m_impl, data); + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType data, EvalSubExprsCallback done) { + ConversionSubExprEvalAsync, + EvaluatorPointerType, + EvalSubExprsCallback>::run(m_impl, data, std::move(done)); + } +#endif + + EIGEN_STRONG_INLINE void cleanup() + { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return internal::CoeffConv::run(m_impl,index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType + packet(Index index) const { + // If we are not going to do the cast, we just need to check that base + // TensorEvaluator has packet access. Otherwise we also need to make sure, + // that we have an implementation of vectorized cast. + const bool Vectorizable = + IsSameType + ? TensorEvaluator::PacketAccess + : int(TensorEvaluator::PacketAccess) & + int(internal::type_casting_traits::VectorizedCast); + + return internal::PacketConv::run(m_impl, index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double cast_cost = TensorOpCost::CastCost(); + if (vectorized) { + const double SrcCoeffRatio = + internal::type_casting_traits::SrcCoeffRatio; + const double TgtCoeffRatio = + internal::type_casting_traits::TgtCoeffRatio; + return m_impl.costPerCoeff(vectorized) * (SrcCoeffRatio / PacketSize) + + TensorOpCost(0, 0, TgtCoeffRatio * (cast_cost / PacketSize)); + } else { + return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, cast_cost); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return m_impl.getResourceRequirements(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + return TensorBlock(m_impl.block(desc, scratch), + TensorConversionOpBlockFactory()); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + /// required by sycl in order to extract the sycl accessor + const TensorEvaluator& impl() const { return m_impl; } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + TensorEvaluator m_impl; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVERSION_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h new file mode 100644 index 0000000..b20f80b --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h @@ -0,0 +1,1132 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H + +namespace Eigen { + +/** \class TensorConvolution + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor convolution class. + * + * + */ +namespace internal { + +template +class IndexMapper { + public: + IndexMapper(const InputDims& input_dims, const array& kernel_dims, + const array& indices) { + + array dimensions = input_dims; + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = indices[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + dimensions[index] = result_dim; + } + + array inputStrides; + array outputStrides; + if (static_cast(Layout) == static_cast(ColMajor)) { + inputStrides[0] = 1; + outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + inputStrides[i] = inputStrides[i-1] * input_dims[i-1]; + outputStrides[i] = outputStrides[i-1] * dimensions[i-1]; + } + } else { + inputStrides[NumDims - 1] = 1; + outputStrides[NumDims - 1] = 1; + for (int i = static_cast(NumDims) - 2; i >= 0; --i) { + inputStrides[i] = inputStrides[i + 1] * input_dims[i + 1]; + outputStrides[i] = outputStrides[i + 1] * dimensions[i + 1]; + } + } + + array gpuInputDimensions; + array gpuOutputDimensions; + array tmp = dimensions; + array ordering; + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = i + offset; + ordering[index] = indices[i]; + tmp[indices[i]] = -1; + gpuInputDimensions[index] = input_dims[indices[i]]; + gpuOutputDimensions[index] = dimensions[indices[i]]; + } + + int written = static_cast(Layout) == static_cast(ColMajor) + ? NumKernelDims + : 0; + for (int i = 0; i < NumDims; ++i) { + if (tmp[i] >= 0) { + ordering[written] = i; + gpuInputDimensions[written] = input_dims[i]; + gpuOutputDimensions[written] = dimensions[i]; + ++written; + } + } + + for (int i = 0; i < NumDims; ++i) { + m_inputStrides[i] = inputStrides[ordering[i]]; + m_outputStrides[i] = outputStrides[ordering[i]]; + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumDims; ++i) { + if (i > NumKernelDims) { + m_gpuInputStrides[i] = + m_gpuInputStrides[i - 1] * gpuInputDimensions[i - 1]; + m_gpuOutputStrides[i] = + m_gpuOutputStrides[i - 1] * gpuOutputDimensions[i - 1]; + } else { + m_gpuInputStrides[i] = 1; + m_gpuOutputStrides[i] = 1; + } + } + } else { + for (int i = NumDims - 1; i >= 0; --i) { + if (static_cast(i + 1) < offset) { + m_gpuInputStrides[i] = + m_gpuInputStrides[i + 1] * gpuInputDimensions[i + 1]; + m_gpuOutputStrides[i] = + m_gpuOutputStrides[i + 1] * gpuOutputDimensions[i + 1]; + } else { + m_gpuInputStrides[i] = 1; + m_gpuOutputStrides[i] = 1; + } + } + } + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuInputPlaneToTensorInputOffset(Index p) const { + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int d = NumDims - 1; d > NumKernelDims; --d) { + const Index idx = p / m_gpuInputStrides[d]; + inputIndex += idx * m_inputStrides[d]; + p -= idx * m_gpuInputStrides[d]; + } + inputIndex += p * m_inputStrides[NumKernelDims]; + } else { + std::ptrdiff_t limit = 0; + if (NumKernelDims < NumDims) { + limit = NumDims - NumKernelDims - 1; + } + for (int d = 0; d < limit; ++d) { + const Index idx = p / m_gpuInputStrides[d]; + inputIndex += idx * m_inputStrides[d]; + p -= idx * m_gpuInputStrides[d]; + } + inputIndex += p * m_inputStrides[limit]; + } + return inputIndex; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuOutputPlaneToTensorOutputOffset(Index p) const { + Index outputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int d = NumDims - 1; d > NumKernelDims; --d) { + const Index idx = p / m_gpuOutputStrides[d]; + outputIndex += idx * m_outputStrides[d]; + p -= idx * m_gpuOutputStrides[d]; + } + outputIndex += p * m_outputStrides[NumKernelDims]; + } else { + std::ptrdiff_t limit = 0; + if (NumKernelDims < NumDims) { + limit = NumDims - NumKernelDims - 1; + } + for (int d = 0; d < limit; ++d) { + const Index idx = p / m_gpuOutputStrides[d]; + outputIndex += idx * m_outputStrides[d]; + p -= idx * m_gpuOutputStrides[d]; + } + outputIndex += p * m_outputStrides[limit]; + } + return outputIndex; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuInputKernelToTensorInputOffset(Index i) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_inputStrides[offset]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuOutputKernelToTensorOutputOffset(Index i) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_outputStrides[offset]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuInputKernelToTensorInputOffset(Index i, Index j) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_inputStrides[offset] + j * m_inputStrides[offset + 1]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuOutputKernelToTensorOutputOffset(Index i, Index j) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_outputStrides[offset] + j * m_outputStrides[offset + 1]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuInputKernelToTensorInputOffset(Index i, Index j, Index k) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_inputStrides[offset] + j * m_inputStrides[offset + 1] + + k * m_inputStrides[offset + 2]; + } + + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Index mapGpuOutputKernelToTensorOutputOffset(Index i, Index j, Index k) const { + const size_t offset = static_cast(Layout) == static_cast(ColMajor) + ? 0 + : NumDims - NumKernelDims; + return i * m_outputStrides[offset] + j * m_outputStrides[offset + 1] + + k * m_outputStrides[offset + 2]; + } + + private: + static const int NumDims = internal::array_size::value; + array m_inputStrides; + array m_outputStrides; + array m_gpuInputStrides; + array m_gpuOutputStrides; +}; + + + +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename promote_storage_type::ret Scalar; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename InputXprType::Nested LhsNested; + typedef typename KernelXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; + typedef typename conditional::val, + typename traits::PointerType, typename traits::PointerType>::type PointerType; + + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorConvolutionOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorConvolutionOp type; +}; + +} // end namespace internal + + + +template +class TensorConvolutionOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::promote_storage_type::ret CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorConvolutionOp(const InputXprType& input, const KernelXprType& kernel, const Indices& dims) + : m_input_xpr(input), m_kernel_xpr(kernel), m_indices(dims) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Indices& indices() const { return m_indices; } + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const typename internal::remove_all::type& + inputExpression() const { return m_input_xpr; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const typename internal::remove_all::type& + kernelExpression() const { return m_kernel_xpr; } + + protected: + typename InputXprType::Nested m_input_xpr; + typename KernelXprType::Nested m_kernel_xpr; + const Indices m_indices; +}; + + +template +struct TensorEvaluator, Device> +{ + typedef TensorConvolutionOp XprType; + + static const int NumDims = internal::array_size::Dimensions>::value; + static const int NumKernelDims = internal::array_size::value; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = int(TensorEvaluator::IsAligned) & int(TensorEvaluator::IsAligned), + PacketAccess = int(TensorEvaluator::PacketAccess) & int(TensorEvaluator::PacketAccess), + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_inputImpl(op.inputExpression(), device), m_kernelImpl(op.kernelExpression(), device), m_kernelArg(op.kernelExpression()), m_kernel(NULL), m_local_kernel(false), m_device(device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + + const typename TensorEvaluator::Dimensions& input_dims = m_inputImpl.dimensions(); + const typename TensorEvaluator::Dimensions& kernel_dims = m_kernelImpl.dimensions(); + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStride[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_inputStride[i] = m_inputStride[i - 1] * input_dims[i - 1]; + } + } else { + m_inputStride[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_inputStride[i] = m_inputStride[i + 1] * input_dims[i + 1]; + } + } + + m_dimensions = m_inputImpl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = op.indices()[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + m_dimensions[index] = result_dim; + if (i > 0) { + m_kernelStride[i] = m_kernelStride[i - 1] * kernel_dims[i - 1]; + } else { + m_kernelStride[0] = 1; + } + m_indexStride[i] = m_inputStride[index]; + } + + m_outputStride[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStride[i] = m_outputStride[i - 1] * m_dimensions[i - 1]; + } + } else { + for (int i = NumKernelDims - 1; i >= 0; --i) { + const Index index = op.indices()[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + m_dimensions[index] = result_dim; + if (i < NumKernelDims - 1) { + m_kernelStride[i] = m_kernelStride[i + 1] * kernel_dims[i + 1]; + } else { + m_kernelStride[NumKernelDims - 1] = 1; + } + m_indexStride[i] = m_inputStride[index]; + } + + m_outputStride[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStride[i] = m_outputStride[i + 1] * m_dimensions[i + 1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) { + m_inputImpl.evalSubExprsIfNeeded(NULL); + preloadKernel(); + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + m_inputImpl.cleanup(); + if (m_local_kernel) { + m_device.deallocate((void*)m_kernel); + m_local_kernel = false; + } + m_kernel = NULL; + } + + void evalTo(typename XprType::Scalar* buffer) { + evalSubExprsIfNeeded(NULL); + for (int i = 0; i < dimensions().TotalSize(); ++i) { + buffer[i] += coeff(i); + } + cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + CoeffReturnType result = CoeffReturnType(0); + convolve(firstInput(index), 0, NumKernelDims-1, result); + return result; + } + + template + EIGEN_DEVICE_FUNC PacketReturnType packet(const Index index) const + { + Index indices[2] = {index, index+PacketSize-1}; + Index startInputs[2] = {0, 0}; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx0 = indices[0] / m_outputStride[i]; + const Index idx1 = indices[1] / m_outputStride[i]; + startInputs[0] += idx0 * m_inputStride[i]; + startInputs[1] += idx1 * m_inputStride[i]; + indices[0] -= idx0 * m_outputStride[i]; + indices[1] -= idx1 * m_outputStride[i]; + } + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx0 = indices[0] / m_outputStride[i]; + const Index idx1 = indices[1] / m_outputStride[i]; + startInputs[0] += idx0 * m_inputStride[i]; + startInputs[1] += idx1 * m_inputStride[i]; + indices[0] -= idx0 * m_outputStride[i]; + indices[1] -= idx1 * m_outputStride[i]; + } + } + startInputs[0] += indices[0]; + startInputs[1] += indices[1]; + + if (startInputs[1]-startInputs[0] == PacketSize-1) { + PacketReturnType result = internal::pset1(0); + convolvePacket(startInputs[0], 0, NumKernelDims-1, result); + return result; + } else { + EIGEN_ALIGN_MAX Scalar data[PacketSize]; + data[0] = Scalar(0); + convolve(startInputs[0], 0, NumKernelDims-1, data[0]); + for (int i = 1; i < PacketSize-1; ++i) { + data[i] = Scalar(0); + convolve(firstInput(index+i), 0, NumKernelDims-1, data[i]); + } + data[PacketSize-1] = Scalar(0); + convolve(startInputs[1], 0, NumKernelDims-1, data[PacketSize-1]); + return internal::pload(data); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double kernel_size = m_kernelImpl.dimensions().TotalSize(); + // We ignore the use of fused multiply-add. + const double convolve_compute_cost = + TensorOpCost::AddCost() + TensorOpCost::MulCost(); + const double firstIndex_compute_cost = + NumDims * + (2 * TensorOpCost::AddCost() + 2 * TensorOpCost::MulCost() + + TensorOpCost::DivCost()); + return TensorOpCost(0, 0, firstIndex_compute_cost, vectorized, PacketSize) + + kernel_size * (m_inputImpl.costPerCoeff(vectorized) + + m_kernelImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, convolve_compute_cost, vectorized, + PacketSize)); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + private: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const { + Index startInput = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStride[i]; + startInput += idx * m_inputStride[i]; + index -= idx * m_outputStride[i]; + } + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStride[i]; + startInput += idx * m_inputStride[i]; + index -= idx * m_outputStride[i]; + } + } + startInput += index; + return startInput; + } + + EIGEN_DEVICE_FUNC void convolve(Index firstIndex, Index firstKernel, int DimIndex, CoeffReturnType& accum) const { + for (int j = 0; j < m_kernelImpl.dimensions()[DimIndex]; ++j) { + const Index input = firstIndex + j * m_indexStride[DimIndex]; + const Index kernel = firstKernel + j * m_kernelStride[DimIndex]; + if (DimIndex > 0) { + convolve(input, kernel, DimIndex-1, accum); + } else { + accum += m_inputImpl.coeff(input) * m_kernel[kernel]; + } + } + } + + template + EIGEN_DEVICE_FUNC void convolvePacket(Index firstIndex, Index firstKernel, int DimIndex, Packet& accum) const { + for (int j = 0; j < m_kernelImpl.dimensions()[DimIndex]; ++j) { + const Index input = firstIndex + j * m_indexStride[DimIndex]; + const Index kernel = firstKernel + j * m_kernelStride[DimIndex]; + if (DimIndex > 0) { + convolvePacket(input, kernel, DimIndex-1, accum); + } else { + accum = internal::pmadd(m_inputImpl.template packet(input), internal::pset1(m_kernel[kernel]), accum); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void preloadKernel() { + // Don't make a local copy of the kernel unless we have to (i.e. it's an + // expression that needs to be evaluated) + const Scalar* in_place = m_kernelImpl.data(); + if (in_place) { + m_kernel = in_place; + m_local_kernel = false; + } else { + size_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar); + Scalar* local = (Scalar*)m_device.allocate_temp(kernel_sz); + typedef TensorEvalToOp EvalTo; + EvalTo evalToTmp(local, m_kernelArg); + const bool Vectorize = internal::IsVectorizable::value; + internal::TensorExecutor::run(evalToTmp, m_device); + + m_kernel = local; + m_local_kernel = true; + } + } + + array m_inputStride; + array m_outputStride; + + array m_indexStride; + array m_kernelStride; + TensorEvaluator m_inputImpl; + TensorEvaluator m_kernelImpl; + Dimensions m_dimensions; + + KernelArgType m_kernelArg; + const Scalar* m_kernel; + bool m_local_kernel; + const Device EIGEN_DEVICE_REF m_device; +}; + + + + +// Use an optimized implementation of the evaluation code for GPUs whenever possible. +#if defined(EIGEN_USE_GPU) && defined(EIGEN_GPUCC) + +template +struct GetKernelSize { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator() (const int /*kernelSize*/) const { + return StaticKernelSize; + } +}; +template <> +struct GetKernelSize { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator() (const int kernelSize) const { + return kernelSize; + } +}; + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void EigenConvolutionKernel1D( + InputEvaluator eval, + const internal::IndexMapper + indexMapper, + const float* __restrict kernel, const int numPlanes, const int numX, + const int maxX, const int kernelSize, float* buffer) { +#if defined(EIGEN_HIPCC) + HIP_DYNAMIC_SHARED(float, s) +#else + extern __shared__ float s[]; +#endif + + const int first_x = blockIdx.x * maxX; + const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1; + const int num_x_input = last_x - first_x + GetKernelSize()(kernelSize); + const int num_x_output = last_x - first_x + 1; + + const int first_plane = blockIdx.y * blockDim.y; + const int plane_stride = blockDim.y * gridDim.y; + + for (int p = first_plane + threadIdx.y; p < numPlanes; p += plane_stride) { + // Load inputs to shared memory + const int plane_input_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(p); + const int plane_kernel_offset = threadIdx.y * num_x_input; + #pragma unroll + for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) { + const int tensor_index = plane_input_offset + indexMapper.mapGpuInputKernelToTensorInputOffset(i+first_x); + s[i + plane_kernel_offset] = eval.coeff(tensor_index); + } + + __syncthreads(); + + // Compute the convolution + const int plane_output_offset = indexMapper.mapGpuOutputPlaneToTensorOutputOffset(p); + + #pragma unroll + for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) { + const int kernel_offset = plane_kernel_offset + i; + float result = 0.0f; + #pragma unroll + for (int k = 0; k < GetKernelSize()(kernelSize); ++k) { + result += s[k + kernel_offset] * kernel[k]; + } + const int tensor_index = plane_output_offset + indexMapper.mapGpuOutputKernelToTensorOutputOffset(i+first_x); + buffer[tensor_index] = result; + } + __syncthreads(); + } +}; + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void EigenConvolutionKernel2D( + InputEvaluator eval, + const internal::IndexMapper + indexMapper, + const float* __restrict kernel, const int numPlanes, const int numX, + const int maxX, const int numY, const int maxY, const int kernelSizeX, + const int kernelSizeY, float* buffer) { +#if defined(EIGEN_HIPCC) + HIP_DYNAMIC_SHARED(float, s) +#else + extern __shared__ float s[]; +#endif + + const int first_x = blockIdx.x * maxX; + const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1; + const int num_x_input = last_x - first_x + GetKernelSize()(kernelSizeX); + const int num_x_output = last_x - first_x + 1; + + const int first_y = blockIdx.y * maxY; + const int last_y = (first_y + maxY < numY ? first_y + maxY : numY) - 1; + const int num_y_input = last_y - first_y + GetKernelSize()(kernelSizeY); + const int num_y_output = last_y - first_y + 1; + + const int first_plane = blockIdx.z * blockDim.z; + const int plane_stride = blockDim.z * gridDim.z; + + for (int p = first_plane + threadIdx.z; p < numPlanes; p += plane_stride) { + + const int plane_input_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(p); + const int plane_kernel_offset = threadIdx.z * num_y_input; + + // Load inputs to shared memory + #pragma unroll + for (int j = threadIdx.y; j < num_y_input; j += blockDim.y) { + const int input_offset = num_x_input * (j + plane_kernel_offset); + #pragma unroll + for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) { + const int tensor_index = plane_input_offset + indexMapper.mapGpuInputKernelToTensorInputOffset(i+first_x, j+first_y); + s[i + input_offset] = eval.coeff(tensor_index); + } + } + + __syncthreads(); + + // Convolution + const int plane_output_offset = indexMapper.mapGpuOutputPlaneToTensorOutputOffset(p); + + #pragma unroll + for (int j = threadIdx.y; j < num_y_output; j += blockDim.y) { + #pragma unroll + for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) { + float result = 0.0f; + #pragma unroll + for (int l = 0; l < GetKernelSize()(kernelSizeY); ++l) { + const int kernel_offset = kernelSizeX * l; + const int input_offset = i + num_x_input * (j + l + plane_kernel_offset); + #pragma unroll + for (int k = 0; k < GetKernelSize()(kernelSizeX); ++k) { + result += s[k + input_offset] * kernel[k + kernel_offset]; + } + } + const int tensor_index = plane_output_offset + indexMapper.mapGpuOutputKernelToTensorOutputOffset(i+first_x, j+first_y); + buffer[tensor_index] = result; + } + } + + __syncthreads(); + } +}; + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void EigenConvolutionKernel3D( + InputEvaluator eval, + const internal::IndexMapper + indexMapper, + const float* __restrict kernel, const size_t numPlanes, const size_t numX, + const size_t maxX, const size_t numY, const size_t maxY, const size_t numZ, + const size_t maxZ, const size_t kernelSizeX, const size_t kernelSizeY, + const size_t kernelSizeZ, float* buffer) { +#if defined(EIGEN_HIPCC) + HIP_DYNAMIC_SHARED(float, s) +#else + extern __shared__ float s[]; +#endif + + // Load inputs to shared memory + const int first_x = blockIdx.x * maxX; + const int last_x = (first_x + maxX < numX ? first_x + maxX : numX) - 1; + const int num_x_input = last_x - first_x + kernelSizeX; + + const int first_y = blockIdx.y * maxY; + const int last_y = (first_y + maxY < numY ? first_y + maxY : numY) - 1; + const int num_y_input = last_y - first_y + kernelSizeY; + + const int first_z = blockIdx.z * maxZ; + const int last_z = (first_z + maxZ < numZ ? first_z + maxZ : numZ) - 1; + const int num_z_input = last_z - first_z + kernelSizeZ; + + for (int p = 0; p < numPlanes; ++p) { + + const int plane_input_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(p); + const int plane_kernel_offset = 0; + + for (int k = threadIdx.z; k < num_z_input; k += blockDim.z) { + for (int j = threadIdx.y; j < num_y_input; j += blockDim.y) { + for (int i = threadIdx.x; i < num_x_input; i += blockDim.x) { + const int tensor_index = plane_input_offset + indexMapper.mapGpuInputKernelToTensorInputOffset(i+first_x, j+first_y, k+first_z); + s[i + num_x_input * (j + num_y_input * (k + plane_kernel_offset))] = eval.coeff(tensor_index); + } + } + } + + __syncthreads(); + + // Convolution + const int num_z_output = last_z - first_z + 1; + const int num_y_output = last_y - first_y + 1; + const int num_x_output = last_x - first_x + 1; + const int plane_output_offset = indexMapper.mapGpuOutputPlaneToTensorOutputOffset(p); + + for (int k = threadIdx.z; k < num_z_output; k += blockDim.z) { + for (int j = threadIdx.y; j < num_y_output; j += blockDim.y) { + for (int i = threadIdx.x; i < num_x_output; i += blockDim.x) { + float result = 0.0f; + for (int n = 0; n < kernelSizeZ; ++n) { + for (int m = 0; m < kernelSizeY; ++m) { + for (int l = 0; l < kernelSizeX; ++l) { + result += s[i + l + num_x_input * (j + m + num_y_input * (k + n + plane_kernel_offset))] * kernel[l + kernelSizeX * (m + kernelSizeY * n)]; + } + } + } + const int tensor_index = plane_output_offset + indexMapper.mapGpuOutputKernelToTensorOutputOffset(i+first_x, j+first_y, k+first_z); + buffer[tensor_index] = result; + } + } + } + __syncthreads(); + } +}; + + + +template +struct TensorEvaluator, GpuDevice> +{ + typedef TensorConvolutionOp XprType; + + static const int NumDims = internal::array_size::Dimensions>::value; + static const int NumKernelDims = internal::array_size::value; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + typedef typename TensorEvaluator::Dimensions KernelDimensions; + + enum { + IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + TensorEvaluator(const XprType& op, const GpuDevice& device) + : m_inputImpl(op.inputExpression(), device), m_kernelImpl(op.kernelExpression(), device), m_kernelArg(op.kernelExpression()), m_indices(op.indices()), m_buf(NULL), m_kernel(NULL), m_local_kernel(false), m_device(device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + + const typename TensorEvaluator::Dimensions& input_dims = m_inputImpl.dimensions(); + const typename TensorEvaluator::Dimensions& kernel_dims = m_kernelImpl.dimensions(); + + m_dimensions = m_inputImpl.dimensions(); + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = op.indices()[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + m_dimensions[index] = result_dim; + } + } + + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename InputArgType::Scalar Scalar; + static const int PacketSize = internal::unpacket_traits::size; + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) { + preloadKernel(); + m_inputImpl.evalSubExprsIfNeeded(NULL); + if (data) { + executeEval(data); + return false; + } else { + m_buf = (Scalar*)m_device.allocate(dimensions().TotalSize() * sizeof(Scalar)); + executeEval(m_buf); + return true; + } + } + + EIGEN_STRONG_INLINE void cleanup() { + m_inputImpl.cleanup(); + if (m_buf) { + m_device.deallocate(m_buf); + m_buf = NULL; + } + if (m_local_kernel) { + m_device.deallocate((void*)m_kernel); + m_local_kernel = false; + } + m_kernel = NULL; + } + + EIGEN_STRONG_INLINE void preloadKernel() { + // Don't make a local copy of the kernel unless we have to (i.e. it's an + // expression that needs to be evaluated) + const Scalar* in_place = m_kernelImpl.data(); + if (in_place) { + m_kernel = in_place; + m_local_kernel = false; + } else { + size_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar); + Scalar* local = (Scalar*)m_device.allocate(kernel_sz); + typedef TensorEvalToOp EvalTo; + EvalTo evalToTmp(local, m_kernelArg); + const bool PacketAccess = internal::IsVectorizable::value; + internal::TensorExecutor::run(evalToTmp, m_device); + + m_kernel = local; + m_local_kernel = true; + } + } + + static unsigned int ceil(unsigned int num, unsigned int denom) { + const unsigned int rounded_toward_zero = num / denom; + if (num > rounded_toward_zero * denom) { + return rounded_toward_zero + 1; + } + return rounded_toward_zero; + } + + void executeEval(Scalar* data) const { + typedef typename TensorEvaluator::Dimensions InputDims; + + const int maxSharedMem = m_device.sharedMemPerBlock(); + const int maxThreadsPerBlock = m_device.maxGpuThreadsPerBlock(); + const int maxBlocksPerProcessor = m_device.maxGpuThreadsPerMultiProcessor() / maxThreadsPerBlock; + const int numMultiProcessors = m_device.getNumGpuMultiProcessors(); + const int warpSize = 32; + + switch (NumKernelDims) { + case 1: { + const int kernel_size = m_kernelImpl.dimensions().TotalSize(); + + const int numX = dimensions()[m_indices[0]]; + const int numP = dimensions().TotalSize() / numX; + int maxX; + dim3 block_size; + + const int single_stride_dim = + static_cast(Layout) == static_cast(ColMajor) + ? 0 + : m_inputImpl.dimensions().rank() - 1; + if (m_indices[0] == single_stride_dim) { + // Maximum the reuse + const int inner_dim = ((maxSharedMem / (sizeof(Scalar)) - kernel_size + 1 + 31) / 32) * 32; + maxX = numext::mini(inner_dim, numX); + const int maxP = numext::mini(maxSharedMem / ((kernel_size - 1 + maxX) * sizeof(Scalar)), numP); + block_size.x = numext::mini(maxThreadsPerBlock, maxX); + block_size.y = numext::mini(maxThreadsPerBlock / block_size.x, maxP); + } + else { + // Read as much as possible alongside the inner most dimension, that is the plane + const int inner_dim = maxSharedMem / ((warpSize + kernel_size) * sizeof(Scalar)); + const int maxP = numext::mini(inner_dim, numP); + maxX = numext::mini(maxSharedMem / (inner_dim * sizeof(Scalar)) - kernel_size + 1, numX); + + block_size.x = numext::mini(warpSize, maxX); + block_size.y = numext::mini(maxThreadsPerBlock/block_size.x, maxP); + } + + const int shared_mem = block_size.y * (maxX + kernel_size - 1) * sizeof(Scalar); + gpu_assert(shared_mem <= maxSharedMem); + + const int num_x_blocks = ceil(numX, maxX); + const int blocksPerProcessor = numext::mini(maxBlocksPerProcessor, maxSharedMem / shared_mem); + const int num_y_blocks = ceil(numMultiProcessors * blocksPerProcessor, num_x_blocks); + + dim3 num_blocks(num_x_blocks, numext::mini(num_y_blocks, ceil(numP, block_size.y))); + + + //cout << "launching 1D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " maxX: " << maxX << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl; + + const array indices(m_indices[0]); + const array kernel_dims(m_kernelImpl.dimensions()[0]); + internal::IndexMapper indexMapper( + m_inputImpl.dimensions(), kernel_dims, indices); + switch(kernel_size) { + case 4: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel1D, Index, InputDims, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 4, data); + break; + } + case 7: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel1D, Index, InputDims, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 7, data); + break; + } + default: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel1D, Index, InputDims, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, kernel_size, data); + } + } + break; + } + + case 2: { + const int idxX = + static_cast(Layout) == static_cast(ColMajor) ? 0 : 1; + const int idxY = + static_cast(Layout) == static_cast(ColMajor) ? 1 : 0; + const int kernel_size_x = m_kernelImpl.dimensions()[idxX]; + const int kernel_size_y = m_kernelImpl.dimensions()[idxY]; + + const int numX = dimensions()[m_indices[idxX]]; + const int numY = dimensions()[m_indices[idxY]]; + const int numP = dimensions().TotalSize() / (numX*numY); + + const float scaling_factor = sqrtf(static_cast(maxSharedMem) / (sizeof(Scalar) * kernel_size_y * kernel_size_x)); + + // Snap maxX to warp size + int inner_dim = ((static_cast(scaling_factor * kernel_size_x) - kernel_size_x + 1 + 32) / 32) * 32; + const int maxX = numext::mini(inner_dim, numX); + const int maxY = numext::mini(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1)) - kernel_size_y + 1, numY); + const int maxP = numext::mini(maxSharedMem / ((kernel_size_x - 1 + maxX) * (kernel_size_y - 1 + maxY) * sizeof(Scalar)), numP); + + dim3 block_size; + block_size.x = numext::mini(1024, maxX); + block_size.y = numext::mini(1024/block_size.x, maxY); + block_size.z = numext::mini(1024/(block_size.x*block_size.y), maxP); + + const int shared_mem = block_size.z * (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1) * sizeof(Scalar); + gpu_assert(shared_mem <= maxSharedMem); + + const int num_x_blocks = ceil(numX, maxX); + const int num_y_blocks = ceil(numY, maxY); + const int blocksPerProcessor = numext::mini(maxBlocksPerProcessor, maxSharedMem / shared_mem); + const int num_z_blocks = ceil(numMultiProcessors * blocksPerProcessor, num_x_blocks * num_y_blocks); + + dim3 num_blocks(num_x_blocks, num_y_blocks, numext::mini(num_z_blocks, ceil(numP, block_size.z))); + + + //cout << "launching 2D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " block_size.z: " << block_size.z << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " num_blocks.z: " << num_blocks.z << " maxX: " << maxX << " maxY: " << maxY << " maxP: " << maxP << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl; + + const array indices(m_indices[idxX], m_indices[idxY]); + const array kernel_dims(m_kernelImpl.dimensions()[idxX], + m_kernelImpl.dimensions()[idxY]); + internal::IndexMapper indexMapper( + m_inputImpl.dimensions(), kernel_dims, indices); + switch (kernel_size_x) { + case 4: { + switch (kernel_size_y) { + case 7: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D, Index, InputDims, 4, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, 7, data); + break; + } + default: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D, Index, InputDims, 4, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, kernel_size_y, data); + break; + } + } + break; + } + case 7: { + switch (kernel_size_y) { + case 4: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D, Index, InputDims, 7, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, 4, data); + break; + } + default: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D, Index, InputDims, 7, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, kernel_size_y, data); + break; + } + } + break; + } + default: { + LAUNCH_GPU_KERNEL((EigenConvolutionKernel2D, Index, InputDims, Dynamic, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, kernel_size_x, kernel_size_y, data); + break; + } + } + break; + } + + case 3: { + const int idxX = + static_cast(Layout) == static_cast(ColMajor) ? 0 : 2; + const int idxY = + static_cast(Layout) == static_cast(ColMajor) ? 1 : 1; + const int idxZ = + static_cast(Layout) == static_cast(ColMajor) ? 2 : 0; + + const int kernel_size_x = m_kernelImpl.dimensions()[idxX]; + const int kernel_size_y = m_kernelImpl.dimensions()[idxY]; + const int kernel_size_z = m_kernelImpl.dimensions()[idxZ]; + + const int numX = dimensions()[m_indices[idxX]]; + const int numY = dimensions()[m_indices[idxY]]; + const int numZ = dimensions()[m_indices[idxZ]]; + const int numP = dimensions().TotalSize() / (numX*numY*numZ); + + const int maxX = numext::mini(128, numext::mini(maxSharedMem / (sizeof(Scalar) * kernel_size_y * kernel_size_z) - kernel_size_x + 1, numX)); + const int maxY = numext::mini(128, numext::mini(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1) * kernel_size_z) - kernel_size_y + 1, numY)); + const int maxZ = numext::mini(128, numext::mini(maxSharedMem / (sizeof(Scalar) * (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1)) - kernel_size_z + 1, numZ)); + + dim3 block_size; + block_size.x = numext::mini(32, maxX); + block_size.y = numext::mini(32, maxY); + block_size.z = numext::mini(1024/(block_size.x*block_size.y), maxZ); + dim3 num_blocks(ceil(numX, maxX), ceil(numY, maxY), ceil(numZ, maxZ)); + + const int shared_mem = (maxX + kernel_size_x - 1) * (maxY + kernel_size_y - 1) * (maxZ + kernel_size_z - 1) * sizeof(Scalar); + gpu_assert(shared_mem <= maxSharedMem); + + //cout << "launching 3D kernel with block_size.x: " << block_size.x << " block_size.y: " << block_size.y << " block_size.z: " << block_size.z << " num_blocks.x: " << num_blocks.x << " num_blocks.y: " << num_blocks.y << " num_blocks.z: " << num_blocks.z << " shared_mem: " << shared_mem << " in stream " << m_device.stream() << endl; + const array indices(m_indices[idxX], m_indices[idxY], + m_indices[idxZ]); + const array kernel_dims(m_kernelImpl.dimensions()[idxX], + m_kernelImpl.dimensions()[idxY], + m_kernelImpl.dimensions()[idxZ]); + internal::IndexMapper indexMapper( + m_inputImpl.dimensions(), kernel_dims, indices); + + LAUNCH_GPU_KERNEL((EigenConvolutionKernel3D, Index, InputDims>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, numZ, maxZ, kernel_size_x, kernel_size_y, kernel_size_z, data); + break; + } + + default: { + EIGEN_STATIC_ASSERT((NumKernelDims >= 1 && NumKernelDims <= 3), THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + eigen_assert(m_buf); + eigen_assert(index < m_dimensions.TotalSize()); + return m_buf[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(const Index index) const + { + eigen_assert(m_buf); + eigen_assert(index < m_dimensions.TotalSize()); + return internal::ploadt(m_buf+index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + // TODO(rmlarsen): FIXME: For now, this is just a copy of the CPU cost + // model. + const double kernel_size = m_kernelImpl.dimensions().TotalSize(); + // We ignore the use of fused multiply-add. + const double convolve_compute_cost = + TensorOpCost::AddCost() + TensorOpCost::MulCost(); + const double firstIndex_compute_cost = + NumDims * + (2 * TensorOpCost::AddCost() + 2 * TensorOpCost::MulCost() + + TensorOpCost::DivCost()); + return TensorOpCost(0, 0, firstIndex_compute_cost, vectorized, PacketSize) + + kernel_size * (m_inputImpl.costPerCoeff(vectorized) + + m_kernelImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, convolve_compute_cost, vectorized, + PacketSize)); + } + + private: + // No assignment (copies are needed by the kernels) + TensorEvaluator& operator = (const TensorEvaluator&); + + TensorEvaluator m_inputImpl; + TensorEvaluator m_kernelImpl; + KernelArgType m_kernelArg; + Indices m_indices; + Dimensions m_dimensions; + Scalar* m_buf; + const Scalar* m_kernel; + bool m_local_kernel; + + const GpuDevice& m_device; +}; +#endif + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h new file mode 100644 index 0000000..033318f --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorConvolutionSycl.h @@ -0,0 +1,544 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Copyright (C) 2016 Benoit Steiner + +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_SYCL_H +#define EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_SYCL_H + +namespace Eigen { + +/** \class TensorConvolution + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor convolution class. + * + * + */ + +enum class convolution_type { CONV1D, CONV2D, CONV3D }; +template +struct EigenConvolutionKernel; +template +struct EigenConvolutionKernel { + typedef cl::sycl::accessor + Local_accessor; + Local_accessor local_acc; + Evaluator device_evaluator; + Kernel_accessor kernel_filter; + Buffer_accessor buffer_acc; + internal::IndexMapper indexMapper; + const size_t kernelSize; + const cl::sycl::range<2> input_range; + EigenConvolutionKernel(Local_accessor local_acc_, Evaluator device_evaluator_, Kernel_accessor kernel_filter_, + Buffer_accessor buffer_acc_, + internal::IndexMapper indexMapper_, + const size_t kernelSize_, const cl::sycl::range<2> input_range_) + : local_acc(local_acc_), + device_evaluator(device_evaluator_), + kernel_filter(kernel_filter_), + buffer_acc(buffer_acc_), + indexMapper(indexMapper_), + kernelSize(kernelSize_), + input_range(input_range_) {} + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool boundary_check(const BooleanDim2 boolean_check) { + return (boolean_check[0] && boolean_check[1]); + } + void operator()(cl::sycl::nd_item<2> itemID) { + auto buffer_ptr = buffer_acc.get_pointer(); + auto kernel_ptr = kernel_filter.get_pointer(); + // the required row to be calculated for the for each plane in shered memory + const size_t num_input = (itemID.get_local_range()[0] + kernelSize - 1); + const size_t plane_kernel_offset = itemID.get_local_id(1) * num_input; + const size_t input_offset = itemID.get_group(0) * itemID.get_local_range()[0]; + const size_t plane_tensor_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(itemID.get_global_id(1)); + /// fill the shared memory + for (size_t i = itemID.get_local_id(0); i < num_input; i += itemID.get_local_range()[0]) { + const size_t local_index = i + plane_kernel_offset; + const size_t tensor_index = + plane_tensor_offset + indexMapper.mapGpuInputKernelToTensorInputOffset(i + input_offset); + + local_acc[local_index] = + (((i + input_offset) < (input_range[0] + kernelSize - 1)) && itemID.get_global_id(1) < input_range[1]) + ? device_evaluator.coeff(tensor_index) + : CoeffReturnType(0); + } + + itemID.barrier(cl::sycl::access::fence_space::local_space); + + // calculate the convolution // output start x + const size_t first_output_start = itemID.get_group(0) * (itemID.get_local_range()[0]); + if (boundary_check(itemID.get_global_id() < input_range)) { + CoeffReturnType result = static_cast(0); + const size_t index = plane_kernel_offset + itemID.get_local_id(0); + for (size_t k = 0; k < kernelSize; ++k) { + result += (local_acc[k + index] * kernel_ptr[k]); + } + const size_t tensor_index = + indexMapper.mapGpuOutputPlaneToTensorOutputOffset(itemID.get_global_id(1)) + + indexMapper.mapGpuOutputKernelToTensorOutputOffset(itemID.get_local_id(0) + first_output_start); + buffer_ptr[tensor_index] = result; + } + } +}; + +template +struct EigenConvolutionKernel { + typedef cl::sycl::accessor + Local_accessor; + Local_accessor local_acc; + Evaluator device_evaluator; + Kernel_accessor kernel_filter; + Buffer_accessor buffer_acc; + internal::IndexMapper indexMapper; + const cl::sycl::range<2> kernel_size; + const cl::sycl::range<3> input_range; + EigenConvolutionKernel(Local_accessor local_acc_, Evaluator device_evaluator_, Kernel_accessor kernel_filter_, + Buffer_accessor buffer_acc_, + internal::IndexMapper indexMapper_, + const cl::sycl::range<2> kernel_size_, const cl::sycl::range<3> input_range_) + : local_acc(local_acc_), + device_evaluator(device_evaluator_), + kernel_filter(kernel_filter_), + buffer_acc(buffer_acc_), + indexMapper(indexMapper_), + kernel_size(kernel_size_), + input_range(input_range_) {} + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool boundary_check(const BooleanDim3 boolean_check) { + return (boolean_check[0] && boolean_check[1] && boolean_check[2]); + } + + void operator()(cl::sycl::nd_item<3> itemID) { + auto buffer_ptr = buffer_acc.get_pointer(); + auto kernel_ptr = kernel_filter.get_pointer(); + // the required row to be calculated for the for each plane in shered memory + const auto num_input = cl::sycl::range<2>{ + (cl::sycl::range<2>(itemID.get_local_range()[0], itemID.get_local_range()[1]) + kernel_size - 1)}; + + const size_t plane_input_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(itemID.get_global_id(2)); + const size_t plane_kernel_offset = itemID.get_local_id(2) * num_input[1]; + + const auto input_offset = cl::sycl::range<2>{itemID.get_group(0) * itemID.get_local_range()[0], + itemID.get_group(1) * itemID.get_local_range()[1]}; + + // fill the local memory + bool in_range_dim2 = itemID.get_global_id(2) < input_range[2]; + for (size_t j = itemID.get_local_id(1); j < num_input[1]; j += itemID.get_local_range()[1]) { + const size_t local_input_offset = num_input[0] * (j + plane_kernel_offset); + bool in_range_dim1 = ((j + input_offset[1]) < (input_range[1] + kernel_size[1] - 1)); + for (size_t i = itemID.get_local_id(0); i < num_input[0]; i += itemID.get_local_range()[0]) { + const size_t local_index = i + local_input_offset; + const size_t tensor_index = plane_input_offset + indexMapper.mapGpuInputKernelToTensorInputOffset( + i + input_offset[0], j + input_offset[1]); + local_acc[local_index] = (((i + input_offset[0]) < (input_range[0] + kernel_size[0] - 1)) && + in_range_dim1 && in_range_dim2) + ? device_evaluator.coeff(tensor_index) + : CoeffReturnType(0); + } + } + + itemID.barrier(cl::sycl::access::fence_space::local_space); + + // output offset start for each thread + const auto output_offset = cl::sycl::range<2>{itemID.get_group(0) * itemID.get_local_range()[0], + itemID.get_group(1) * itemID.get_local_range()[1]}; + + if (boundary_check(itemID.get_global_id() < input_range)) { + CoeffReturnType result = static_cast(0); + + for (size_t j = 0; j < kernel_size[1]; j++) { + size_t kernel_offset = kernel_size[0] * j; + const size_t index = + (num_input[0] * (plane_kernel_offset + j + itemID.get_local_id(1))) + itemID.get_local_id(0); + for (size_t i = 0; i < kernel_size[0]; i++) { + result += (local_acc[i + index] * kernel_ptr[i + kernel_offset]); + } + } + const size_t tensor_index = + indexMapper.mapGpuOutputPlaneToTensorOutputOffset(itemID.get_global_id(2)) + + indexMapper.mapGpuOutputKernelToTensorOutputOffset(itemID.get_local_id(0) + output_offset[0], + itemID.get_local_id(1) + output_offset[1]); + + buffer_ptr[tensor_index] = result; + } + } +}; + +template +struct EigenConvolutionKernel { + typedef cl::sycl::accessor + Local_accessor; + Local_accessor local_acc; + Evaluator device_evaluator; + Kernel_accessor kernel_filter; + Buffer_accessor buffer_acc; + internal::IndexMapper indexMapper; + const cl::sycl::range<3> kernel_size; + const cl::sycl::range<3> input_range; + const size_t numP; + + EigenConvolutionKernel(Local_accessor local_acc_, Evaluator device_evaluator_, Kernel_accessor kernel_filter_, + Buffer_accessor buffer_acc_, + internal::IndexMapper indexMapper_, + const cl::sycl::range<3> kernel_size_, const cl::sycl::range<3> input_range_, + const size_t numP_) + : local_acc(local_acc_), + device_evaluator(device_evaluator_), + kernel_filter(kernel_filter_), + buffer_acc(buffer_acc_), + indexMapper(indexMapper_), + kernel_size(kernel_size_), + input_range(input_range_), + numP(numP_) {} + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool boundary_check(const BooleanDim3 boolean_check) { + return (boolean_check[0] && boolean_check[1] && boolean_check[2]); + } + void operator()(cl::sycl::nd_item<3> itemID) { + auto buffer_ptr = buffer_acc.get_pointer(); + auto kernel_ptr = kernel_filter.get_pointer(); + const auto num_input = cl::sycl::range<3>{itemID.get_local_range() + kernel_size - 1}; + + const auto input_offset = cl::sycl::range<3>{itemID.get_group().get_id() * itemID.get_local_range()}; + + const auto output_offset = + cl::sycl::range<3>{itemID.get_group().get_id() * itemID.get_local_range() + itemID.get_local_id()}; + + for (size_t p = 0; p < numP; p++) { + /// fill the shared memory + const size_t plane_input_offset = indexMapper.mapGpuInputPlaneToTensorInputOffset(p); + for (size_t k = itemID.get_local_id(2); k < num_input[2]; k += itemID.get_local_range()[2]) { + size_t local_index_dim2 = num_input[0] * num_input[1] * k; + bool cond_k_dim = (k + input_offset[2] < (input_range[2] + kernel_size[2] - 1)); + for (size_t j = itemID.get_local_id(1); j < num_input[1]; j += itemID.get_local_range()[1]) { + bool cond_j_dim = cond_k_dim && (j + input_offset[1] < (input_range[1] + kernel_size[1] - 1)); + size_t local_index_dim1 = (num_input[0] * j) + local_index_dim2; + for (size_t i = itemID.get_local_id(0); i < num_input[0]; i += itemID.get_local_range()[0]) { + bool conds = cond_j_dim && (i + input_offset[0] < (input_range[0] + kernel_size[0] - 1)); + const size_t local_index = local_index_dim1 + i; + const size_t tensor_index = + plane_input_offset + indexMapper.mapGpuInputKernelToTensorInputOffset( + i + input_offset[0], j + input_offset[1], k + input_offset[2]); + local_acc[local_index] = conds ? device_evaluator.coeff(tensor_index) : CoeffReturnType(0); + } + } + } + itemID.barrier(cl::sycl::access::fence_space::local_space); + + // calculate the convolution + + if (boundary_check(itemID.get_global_id() < input_range)) { + CoeffReturnType result = static_cast(0); + for (size_t k = 0; k < kernel_size[2]; k++) { + for (size_t j = 0; j < kernel_size[1]; j++) { + for (size_t i = 0; i < kernel_size[0]; i++) { + const size_t kernel_index = i + kernel_size[0] * (j + kernel_size[1] * k); + const size_t local_index = + ((i + itemID.get_local_id(0)) + + num_input[0] * ((j + itemID.get_local_id(1)) + num_input[1] * (k + itemID.get_local_id(2)))); + + result += (local_acc[local_index] * kernel_ptr[kernel_index]); + } + } + } + const size_t tensor_index = + indexMapper.mapGpuOutputPlaneToTensorOutputOffset(p) + + indexMapper.mapGpuOutputKernelToTensorOutputOffset(output_offset[0], output_offset[1], output_offset[2]); + buffer_ptr[tensor_index] = result; + } + + itemID.barrier(cl::sycl::access::fence_space::local_space); + } + } +}; + +template +struct TensorEvaluator, Eigen::SyclDevice> { + typedef TensorConvolutionOp XprType; + + static const int NumDims = + internal::array_size::Dimensions>::value; + static const int NumKernelDims = internal::array_size::value; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + typedef typename TensorEvaluator::Dimensions KernelDimensions; + typedef const Eigen::SyclDevice Device; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename InputArgType::Scalar Scalar; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + typedef StorageMemory KernelStorage; + + enum { + IsAligned = TensorEvaluator::IsAligned & + TensorEvaluator::IsAligned, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + TensorEvaluator(const XprType &op, const Eigen::SyclDevice &device) + : m_inputImpl(op.inputExpression(), device), + m_kernelArg(op.kernelExpression()), + m_kernelImpl(op.kernelExpression(), device), + m_indices(op.indices()), + m_buf(NULL), + m_kernel(NULL), + m_local_kernel(false), + m_device(device) { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == + static_cast(TensorEvaluator::Layout)), + YOU_MADE_A_PROGRAMMING_MISTAKE); + + const typename TensorEvaluator::Dimensions &input_dims = m_inputImpl.dimensions(); + const typename TensorEvaluator::Dimensions &kernel_dims = + m_kernelImpl.dimensions(); + + m_dimensions = m_inputImpl.dimensions(); + for (int i = 0; i < NumKernelDims; ++i) { + const Index index = op.indices()[i]; + const Index input_dim = input_dims[index]; + const Index kernel_dim = kernel_dims[i]; + const Index result_dim = input_dim - kernel_dim + 1; + m_dimensions[index] = result_dim; + } + } + + EIGEN_DEVICE_FUNC const Dimensions &dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + preloadKernel(); + m_inputImpl.evalSubExprsIfNeeded(NULL); + if (data) { + executeEval(data); + return false; + } else { + m_buf = (EvaluatorPointerType)m_device.get( + (Scalar *)m_device.allocate_temp(dimensions().TotalSize() * sizeof(Scalar))); + executeEval(m_buf); + return true; + } + } + + EIGEN_STRONG_INLINE void cleanup() { + m_inputImpl.cleanup(); + if (m_buf) { + m_device.deallocate_temp(m_buf); + m_buf = NULL; + } + if (m_local_kernel) { + m_device.deallocate_temp(m_kernel); + m_local_kernel = false; + } + m_kernel = NULL; + } + /// used by sycl in order to build the sycl buffer + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Device &device() const { return m_device; } + /// used by sycl in order to build the sycl buffer + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return m_buf; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void preloadKernel() { + // Don't make a local copy of the kernel unless we have to (i.e. it's an + // expression that needs to be evaluated) + typename KernelStorage::Type in_place = m_kernelImpl.data(); + if (in_place) { + m_kernel = in_place; + m_local_kernel = false; + } else { + ptrdiff_t kernel_sz = m_kernelImpl.dimensions().TotalSize() * sizeof(Scalar); + EvaluatorPointerType local = (EvaluatorPointerType)m_device.get((Scalar *)m_device.allocate_temp(kernel_sz)); + typedef TensorEvalToOp EvalTo; + EvalTo evalToTmp(m_device.get(local), m_kernelArg); + const bool PacketAccess = internal::IsVectorizable::value; + internal::TensorExecutor::run(evalToTmp, m_device); + m_kernel = local; + m_local_kernel = true; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void executeEval(EvaluatorPointerType data) const { + typedef TensorEvaluator InputEvaluator; + typedef typename InputEvaluator::Dimensions InputDims; + switch (NumKernelDims) { + case 1: { + const size_t numX = dimensions()[m_indices[0]]; + const size_t numP = dimensions().TotalSize() / numX; + const auto input_dim = std::array{numX, numP}; + auto global_range = cl::sycl::range<2>{}; + auto local_range = cl::sycl::range<2>{}; + const size_t kernel_size = m_kernelImpl.dimensions().TotalSize(); + + m_device.parallel_for_setup(input_dim, global_range, local_range); + const size_t local_memory_size = (local_range[0] + kernel_size - 1) * (local_range[1]); + gpu_assert(static_cast(local_memory_size) <= m_device.sharedMemPerBlock()); + const array indices{{m_indices[0]}}; + const array kernel_dims{{m_kernelImpl.dimensions()[0]}}; + internal::IndexMapper indexMapper(m_inputImpl.dimensions(), kernel_dims, indices); + + typedef EigenConvolutionKernel + ConvKernel; + + m_device.template binary_kernel_launcher( + m_inputImpl, m_kernel, data, cl::sycl::nd_range<2>(global_range, local_range), local_memory_size, + indexMapper, kernel_size, cl::sycl::range<2>(input_dim[0], input_dim[1])); + break; + } + + case 2: { + auto kernel_index = std::array{static_cast(Layout) == static_cast(ColMajor) ? 0 : 1, + static_cast(Layout) == static_cast(ColMajor) ? 1 : 0}; + auto kernel_size = cl::sycl::range<2>{(size_t)m_kernelImpl.dimensions()[kernel_index[0]], + (size_t)m_kernelImpl.dimensions()[kernel_index[1]]}; + const size_t numX = dimensions()[m_indices[kernel_index[0]]]; + const size_t numY = dimensions()[m_indices[kernel_index[1]]]; + const size_t numP = dimensions().TotalSize() / (numX * numY); + auto input_dim = std::array{numX, numY, numP}; + + auto global_range = cl::sycl::range<3>{}; + auto local_range = cl::sycl::range<3>{}; + + m_device.parallel_for_setup(input_dim, global_range, local_range); + + const size_t local_memory_size = + (local_range[0] + kernel_size[0] - 1) * (local_range[1] + kernel_size[1] - 1) * local_range[2]; + gpu_assert(static_cast(local_memory_size) <= m_device.sharedMemPerBlock()); + const array indices{{m_indices[kernel_index[0]], m_indices[kernel_index[1]]}}; + const array kernel_dims{ + {m_kernelImpl.dimensions()[kernel_index[0]], m_kernelImpl.dimensions()[kernel_index[1]]}}; + internal::IndexMapper indexMapper(m_inputImpl.dimensions(), kernel_dims, indices); + typedef EigenConvolutionKernel + ConvKernel; + m_device.template binary_kernel_launcher( + m_inputImpl, m_kernel, data, cl::sycl::nd_range<3>(global_range, local_range), local_memory_size, + indexMapper, kernel_size, cl::sycl::range<3>{input_dim[0], input_dim[1], input_dim[2]}); + break; + } + + case 3: { + auto kernel_index = std::array{static_cast(Layout) == static_cast(ColMajor) ? 0 : 2, + static_cast(Layout) == static_cast(ColMajor) ? 1 : 1, + static_cast(Layout) == static_cast(ColMajor) ? 2 : 0}; + + auto kernel_size = cl::sycl::range<3>{(size_t)m_kernelImpl.dimensions()[kernel_index[0]], + (size_t)m_kernelImpl.dimensions()[kernel_index[1]], + (size_t)m_kernelImpl.dimensions()[kernel_index[2]]}; + + const size_t numX = dimensions()[m_indices[kernel_index[0]]]; + const size_t numY = dimensions()[m_indices[kernel_index[1]]]; + const size_t numZ = dimensions()[m_indices[kernel_index[2]]]; + auto input_dim = std::array{numX, numY, numZ}; + const size_t numP = dimensions().TotalSize() / (numX * numY * numZ); + + const array indices{ + {m_indices[kernel_index[0]], m_indices[kernel_index[1]], m_indices[kernel_index[2]]}}; + const array kernel_dims{{m_kernelImpl.dimensions()[kernel_index[0]], + m_kernelImpl.dimensions()[kernel_index[1]], + m_kernelImpl.dimensions()[kernel_index[2]]}}; + + internal::IndexMapper indexMapper(m_inputImpl.dimensions(), kernel_dims, indices); + + auto global_range = cl::sycl::range<3>{}; + auto local_range = cl::sycl::range<3>{}; + + m_device.parallel_for_setup(input_dim, global_range, local_range); + auto local_memory_range = (local_range + kernel_size - 1); + const size_t local_memory_size = local_memory_range[0] * local_memory_range[1] * local_memory_range[2]; + + gpu_assert(static_cast(local_memory_size) <= m_device.sharedMemPerBlock()); + typedef EigenConvolutionKernel + ConvKernel; + m_device.template binary_kernel_launcher( + m_inputImpl, m_kernel, data, cl::sycl::nd_range<3>(global_range, local_range), local_memory_size, + indexMapper, kernel_size, cl::sycl::range<3>(input_dim[0], input_dim[1], input_dim[2]), numP); + break; + } + + default: { + EIGEN_STATIC_ASSERT((NumKernelDims >= 1 && NumKernelDims <= 3), + THIS_METHOD_IS_ONLY_FOR_OBJECTS_OF_A_SPECIFIC_SIZE); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + eigen_assert(m_buf != NULL); + eigen_assert(index < m_dimensions.TotalSize()); + return m_buf[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(const Index index) const { + eigen_assert(m_buf != NULL); + eigen_assert(index < m_dimensions.TotalSize()); + return internal::ploadt(m_buf + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + // TODO(rmlarsen): FIXME: For now, this is just a copy of the CPU cost + // model. + const double kernel_size = m_kernelImpl.dimensions().TotalSize(); + // We ignore the use of fused multiply-add. + const double convolve_compute_cost = TensorOpCost::AddCost() + TensorOpCost::MulCost(); + const double firstIndex_compute_cost = + NumDims * + (2 * TensorOpCost::AddCost() + 2 * TensorOpCost::MulCost() + TensorOpCost::DivCost()); + return TensorOpCost(0, 0, firstIndex_compute_cost, vectorized, PacketSize) + + kernel_size * (m_inputImpl.costPerCoeff(vectorized) + m_kernelImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, convolve_compute_cost, vectorized, PacketSize)); + } + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_kernelImpl.bind(cgh); + m_inputImpl.bind(cgh); + m_buf.bind(cgh); + m_kernel.bind(cgh); + } + + private: + // No assignment (copies are needed by the kernels) + TensorEvaluator &operator=(const TensorEvaluator &); + TensorEvaluator m_inputImpl; + KernelArgType m_kernelArg; + TensorEvaluator m_kernelImpl; + Indices m_indices; + Dimensions m_dimensions; + EvaluatorPointerType m_buf; + typename KernelStorage::Type m_kernel; + bool m_local_kernel; + const Eigen::SyclDevice EIGEN_DEVICE_REF m_device; +}; // namespace Eigen + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CONVOLUTION_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h new file mode 100644 index 0000000..195267c --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h @@ -0,0 +1,214 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Rasmus Munk Larsen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H +#define EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H + +namespace Eigen { + +/** \class TensorEvaluator + * \ingroup CXX11_Tensor_Module + * + * \brief A cost model used to limit the number of threads used for evaluating + * tensor expression. + * + */ + +// Class storing the cost of evaluating a tensor expression in terms of the +// estimated number of operand bytes loads, bytes stored, and compute cycles. +class TensorOpCost { + public: + // TODO(rmlarsen): Fix the scalar op costs in Eigen proper. Even a simple + // model based on minimal reciprocal throughput numbers from Intel or + // Agner Fog's tables would be better than what is there now. + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int MulCost() { + return internal::functor_traits< + internal::scalar_product_op >::Cost; + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int AddCost() { + return internal::functor_traits >::Cost; + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int DivCost() { + return internal::functor_traits< + internal::scalar_quotient_op >::Cost; + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int ModCost() { + return internal::functor_traits >::Cost; + } + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int CastCost() { + return internal::functor_traits< + internal::scalar_cast_op >::Cost; + } + + EIGEN_DEVICE_FUNC + TensorOpCost() : bytes_loaded_(0), bytes_stored_(0), compute_cycles_(0) {} + EIGEN_DEVICE_FUNC + TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles) + : bytes_loaded_(bytes_loaded), + bytes_stored_(bytes_stored), + compute_cycles_(compute_cycles) {} + + EIGEN_DEVICE_FUNC + TensorOpCost(double bytes_loaded, double bytes_stored, double compute_cycles, + bool vectorized, double packet_size) + : bytes_loaded_(bytes_loaded), + bytes_stored_(bytes_stored), + compute_cycles_(vectorized ? compute_cycles / packet_size + : compute_cycles) { + eigen_assert(bytes_loaded >= 0 && (numext::isfinite)(bytes_loaded)); + eigen_assert(bytes_stored >= 0 && (numext::isfinite)(bytes_stored)); + eigen_assert(compute_cycles >= 0 && (numext::isfinite)(compute_cycles)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_loaded() const { + return bytes_loaded_; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double bytes_stored() const { + return bytes_stored_; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double compute_cycles() const { + return compute_cycles_; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double total_cost( + double load_cost, double store_cost, double compute_cost) const { + return load_cost * bytes_loaded_ + store_cost * bytes_stored_ + + compute_cost * compute_cycles_; + } + + // Drop memory access component. Intended for cases when memory accesses are + // sequential or are completely masked by computations. + EIGEN_DEVICE_FUNC void dropMemoryCost() { + bytes_loaded_ = 0; + bytes_stored_ = 0; + } + + // TODO(rmlarsen): Define min in terms of total cost, not elementwise. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMin( + const TensorOpCost& rhs) const { + double bytes_loaded = numext::mini(bytes_loaded_, rhs.bytes_loaded()); + double bytes_stored = numext::mini(bytes_stored_, rhs.bytes_stored()); + double compute_cycles = numext::mini(compute_cycles_, rhs.compute_cycles()); + return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles); + } + + // TODO(rmlarsen): Define max in terms of total cost, not elementwise. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost cwiseMax( + const TensorOpCost& rhs) const { + double bytes_loaded = numext::maxi(bytes_loaded_, rhs.bytes_loaded()); + double bytes_stored = numext::maxi(bytes_stored_, rhs.bytes_stored()); + double compute_cycles = numext::maxi(compute_cycles_, rhs.compute_cycles()); + return TensorOpCost(bytes_loaded, bytes_stored, compute_cycles); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator+=( + const TensorOpCost& rhs) { + bytes_loaded_ += rhs.bytes_loaded(); + bytes_stored_ += rhs.bytes_stored(); + compute_cycles_ += rhs.compute_cycles(); + return *this; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost& operator*=(double rhs) { + bytes_loaded_ *= rhs; + bytes_stored_ *= rhs; + compute_cycles_ *= rhs; + return *this; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator+( + TensorOpCost lhs, const TensorOpCost& rhs) { + lhs += rhs; + return lhs; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*( + TensorOpCost lhs, double rhs) { + lhs *= rhs; + return lhs; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE friend TensorOpCost operator*( + double lhs, TensorOpCost rhs) { + rhs *= lhs; + return rhs; + } + + friend std::ostream& operator<<(std::ostream& os, const TensorOpCost& tc) { + return os << "[bytes_loaded = " << tc.bytes_loaded() + << ", bytes_stored = " << tc.bytes_stored() + << ", compute_cycles = " << tc.compute_cycles() << "]"; + } + + private: + double bytes_loaded_; + double bytes_stored_; + double compute_cycles_; +}; + +// TODO(rmlarsen): Implement a policy that chooses an "optimal" number of theads +// in [1:max_threads] instead of just switching multi-threading off for small +// work units. +template +class TensorCostModel { + public: + // Scaling from Eigen compute cost to device cycles. + static const int kDeviceCyclesPerComputeCycle = 1; + + // Costs in device cycles. + static const int kStartupCycles = 100000; + static const int kPerThreadCycles = 100000; + static const int kTaskSize = 40000; + + // Returns the number of threads in [1:max_threads] to use for + // evaluating an expression with the given output size and cost per + // coefficient. + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int numThreads( + double output_size, const TensorOpCost& cost_per_coeff, int max_threads) { + double cost = totalCost(output_size, cost_per_coeff); + double threads = (cost - kStartupCycles) / kPerThreadCycles + 0.9; + // Make sure we don't invoke undefined behavior when we convert to an int. + threads = numext::mini(threads, GenericNumTraits::highest()); + return numext::mini(max_threads, + numext::maxi(1, static_cast(threads))); + } + + // taskSize assesses parallel task size. + // Value of 1.0 means ideal parallel task size. Values < 1.0 mean that task + // granularity needs to be increased to mitigate parallelization overheads. + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double taskSize( + double output_size, const TensorOpCost& cost_per_coeff) { + return totalCost(output_size, cost_per_coeff) / kTaskSize; + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double totalCost( + double output_size, const TensorOpCost& cost_per_coeff) { + // Cost of memory fetches from L2 cache. 64 is typical cache line size. + // 11 is L2 cache latency on Haswell. + // We don't know whether data is in L1, L2 or L3. But we are most interested + // in single-threaded computational time around 100us-10ms (smaller time + // is too small for parallelization, larger time is not interesting + // either because we are probably using all available threads already). + // And for the target time range, L2 seems to be what matters. Data set + // fitting into L1 is too small to take noticeable time. Data set fitting + // only into L3 presumably will take more than 10ms to load and process. + const double kLoadCycles = 1.0 / 64 * 11; + const double kStoreCycles = 1.0 / 64 * 11; + // Scaling from Eigen compute cost to device cycles. + return output_size * + cost_per_coeff.total_cost(kLoadCycles, kStoreCycles, + kDeviceCyclesPerComputeCycle); + } +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_COST_MODEL_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h new file mode 100644 index 0000000..95a8a84 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h @@ -0,0 +1,347 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H +#define EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H + +namespace Eigen { + +/** \class TensorCustomUnaryOp + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor custom class. + * + * + */ +namespace internal { +template +struct traits > +{ + typedef typename XprType::Scalar Scalar; + typedef typename XprType::StorageKind StorageKind; + typedef typename XprType::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; + typedef typename traits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCustomUnaryOpEIGEN_DEVICE_REF type; +}; + +template +struct nested > +{ + typedef TensorCustomUnaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCustomUnaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCustomUnaryOp(const XprType& expr, const CustomUnaryFunc& func) + : m_expr(expr), m_func(func) {} + + EIGEN_DEVICE_FUNC + const CustomUnaryFunc& func() const { return m_func; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_expr; } + + protected: + typename XprType::Nested m_expr; + const CustomUnaryFunc m_func; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorCustomUnaryOp ArgType; + typedef typename internal::traits::Index Index; + static const int NumDims = internal::traits::NumDimensions; + typedef DSizes Dimensions; + typedef typename internal::remove_const::type Scalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename Eigen::internal::traits::PointerType TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = (PacketType::size > 1), + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const ArgType& op, const Device& device) + : m_op(op), m_device(device), m_result(NULL) + { + m_dimensions = op.func().dimensions(op.expression()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + if (data) { + evalTo(data); + return false; + } else { + m_result = static_cast(m_device.get( (CoeffReturnType*) + m_device.allocate_temp(dimensions().TotalSize() * sizeof(Scalar)))); + evalTo(m_result); + return true; + } + } + + EIGEN_STRONG_INLINE void cleanup() { + if (m_result) { + m_device.deallocate_temp(m_result); + m_result = NULL; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_result[index]; + } + + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const { + return internal::ploadt(m_result + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + // TODO(rmlarsen): Extend CustomOp API to return its cost estimate. + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_result; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_result.bind(cgh); + } +#endif + + protected: + void evalTo(EvaluatorPointerType data) { + TensorMap > result(m_device.get(data), m_dimensions); + m_op.func().eval(m_op.expression(), result, m_device); + } + + Dimensions m_dimensions; + const ArgType m_op; + const Device EIGEN_DEVICE_REF m_device; + EvaluatorPointerType m_result; +}; + + + +/** \class TensorCustomBinaryOp + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor custom class. + * + * + */ +namespace internal { +template +struct traits > +{ + typedef typename internal::promote_storage_type::ret Scalar; + typedef typename internal::promote_storage_type::ret CoeffReturnType; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = traits::Layout; + typedef typename conditional::val, + typename traits::PointerType, typename traits::PointerType>::type PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCustomBinaryOp& type; +}; + +template +struct nested > +{ + typedef TensorCustomBinaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCustomBinaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::traits::CoeffReturnType CoeffReturnType; + typedef typename internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCustomBinaryOp(const LhsXprType& lhs, const RhsXprType& rhs, const CustomBinaryFunc& func) + + : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_func(func) {} + + EIGEN_DEVICE_FUNC + const CustomBinaryFunc& func() const { return m_func; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + lhsExpression() const { return m_lhs_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + protected: + typename LhsXprType::Nested m_lhs_xpr; + typename RhsXprType::Nested m_rhs_xpr; + const CustomBinaryFunc m_func; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorCustomBinaryOp XprType; + typedef typename internal::traits::Index Index; + static const int NumDims = internal::traits::NumDimensions; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + + typedef typename Eigen::internal::traits::PointerType TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = (PacketType::size > 1), + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_op(op), m_device(device), m_result(NULL) + { + m_dimensions = op.func().dimensions(op.lhsExpression(), op.rhsExpression()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + if (data) { + evalTo(data); + return false; + } else { + m_result = static_cast(m_device.get( (CoeffReturnType*) + m_device.allocate_temp(dimensions().TotalSize() * sizeof(CoeffReturnType)))); + evalTo(m_result); + return true; + } + } + + EIGEN_STRONG_INLINE void cleanup() { + if (m_result != NULL) { + m_device.deallocate_temp(m_result); + m_result = NULL; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_result[index]; + } + + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const { + return internal::ploadt(m_result + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + // TODO(rmlarsen): Extend CustomOp API to return its cost estimate. + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_result; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_result.bind(cgh); + } +#endif + + protected: + void evalTo(EvaluatorPointerType data) { + TensorMap > result(m_device.get(data), m_dimensions); + m_op.func().eval(m_op.lhsExpression(), m_op.rhsExpression(), result, m_device); + } + + Dimensions m_dimensions; + const XprType m_op; + const Device EIGEN_DEVICE_REF m_device; + EvaluatorPointerType m_result; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_CUSTOM_OP_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h new file mode 100644 index 0000000..96fa46c --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h @@ -0,0 +1,137 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H + +namespace Eigen { + +/** \class TensorDevice + * \ingroup CXX11_Tensor_Module + * + * \brief Pseudo expression providing an operator = that will evaluate its argument + * on the specified computing 'device' (GPU, thread pool, ...) + * + * Example: + * C.device(EIGEN_GPU) = A + B; + * + * Todo: operator *= and /=. + */ + +template class TensorDevice { + public: + TensorDevice(const DeviceType& device, ExpressionType& expression) : m_device(device), m_expression(expression) {} + + EIGEN_DEFAULT_COPY_CONSTRUCTOR(TensorDevice) + + template + EIGEN_STRONG_INLINE TensorDevice& operator=(const OtherDerived& other) { + typedef TensorAssignOp Assign; + Assign assign(m_expression, other); + internal::TensorExecutor::run(assign, m_device); + return *this; + } + + template + EIGEN_STRONG_INLINE TensorDevice& operator+=(const OtherDerived& other) { + typedef typename OtherDerived::Scalar Scalar; + typedef TensorCwiseBinaryOp, const ExpressionType, const OtherDerived> Sum; + Sum sum(m_expression, other); + typedef TensorAssignOp Assign; + Assign assign(m_expression, sum); + internal::TensorExecutor::run(assign, m_device); + return *this; + } + + template + EIGEN_STRONG_INLINE TensorDevice& operator-=(const OtherDerived& other) { + typedef typename OtherDerived::Scalar Scalar; + typedef TensorCwiseBinaryOp, const ExpressionType, const OtherDerived> Difference; + Difference difference(m_expression, other); + typedef TensorAssignOp Assign; + Assign assign(m_expression, difference); + internal::TensorExecutor::run(assign, m_device); + return *this; + } + + protected: + const DeviceType& m_device; + ExpressionType& m_expression; +}; + +/** \class TensorAsyncDevice + * \ingroup CXX11_Tensor_Module + * + * \brief Pseudo expression providing an operator = that will evaluate its + * argument asynchronously on the specified device. Currently only + * ThreadPoolDevice implements proper asynchronous execution, while the default + * and GPU devices just run the expression synchronously and call m_done() on + * completion.. + * + * Example: + * auto done = []() { ... expression evaluation done ... }; + * C.device(thread_pool_device, std::move(done)) = A + B; + */ + +template +class TensorAsyncDevice { + public: + TensorAsyncDevice(const DeviceType& device, ExpressionType& expression, + DoneCallback done) + : m_device(device), m_expression(expression), m_done(std::move(done)) {} + + template + EIGEN_STRONG_INLINE TensorAsyncDevice& operator=(const OtherDerived& other) { + typedef TensorAssignOp Assign; + typedef internal::TensorExecutor Executor; + + Assign assign(m_expression, other); + Executor::run(assign, m_device); + m_done(); + + return *this; + } + + protected: + const DeviceType& m_device; + ExpressionType& m_expression; + DoneCallback m_done; +}; + + +#ifdef EIGEN_USE_THREADS +template +class TensorAsyncDevice { + public: + TensorAsyncDevice(const ThreadPoolDevice& device, ExpressionType& expression, + DoneCallback done) + : m_device(device), m_expression(expression), m_done(std::move(done)) {} + + template + EIGEN_STRONG_INLINE TensorAsyncDevice& operator=(const OtherDerived& other) { + typedef TensorAssignOp Assign; + typedef internal::TensorAsyncExecutor Executor; + + // WARNING: After assignment 'm_done' callback will be in undefined state. + Assign assign(m_expression, other); + Executor::runAsync(assign, m_device, std::move(m_done)); + + return *this; + } + + protected: + const ThreadPoolDevice& m_device; + ExpressionType& m_expression; + DoneCallback m_done; +}; +#endif + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h new file mode 100644 index 0000000..f779239 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceCuda.h @@ -0,0 +1,6 @@ + +#if defined(__clang__) || defined(__GNUC__) +#warning "Deprecated header file, please either include the main Eigen/CXX11/Tensor header or the respective TensorDeviceGpu.h file" +#endif + +#include "TensorDeviceGpu.h" diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h new file mode 100644 index 0000000..46b9d3a --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h @@ -0,0 +1,104 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H + + +namespace Eigen { + +// Default device for the machine (typically a single cpu core) +struct DefaultDevice { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const { + return internal::aligned_malloc(num_bytes); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate(void* buffer) const { + internal::aligned_free(buffer); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const { + return allocate(num_bytes); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const { + deallocate(buffer); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const { + ::memcpy(dst, src, n); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const { + memcpy(dst, src, n); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const { + memcpy(dst, src, n); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const { + ::memset(buffer, c, n); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Type get(Type data) const { + return data; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t numThreads() const { +#if !defined(EIGEN_GPU_COMPILE_PHASE) + // Running on the host CPU + return 1; +#elif defined(EIGEN_HIP_DEVICE_COMPILE) + // Running on a HIP device + return 64; +#else + // Running on a CUDA device + return 32; +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { +#if !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY) + // Running on the host CPU + return l1CacheSize(); +#elif defined(EIGEN_HIP_DEVICE_COMPILE) + // Running on a HIP device + return 48*1024; // FIXME : update this number for HIP +#else + // Running on a CUDA device, return the amount of shared memory available. + return 48*1024; +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { +#if !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY) + // Running single threaded on the host CPU + return l3CacheSize(); +#elif defined(EIGEN_HIP_DEVICE_COMPILE) + // Running on a HIP device + return firstLevelCacheSize(); // FIXME : update this number for HIP +#else + // Running on a CUDA device + return firstLevelCacheSize(); +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const { +#if !defined(EIGEN_GPU_COMPILE_PHASE) + // Running single threaded on the host CPU + // Should return an enum that encodes the ISA supported by the CPU + return 1; +#elif defined(EIGEN_HIP_DEVICE_COMPILE) + // Running on a HIP device + // return 1 as major for HIP + return 1; +#else + // Running on a CUDA device + return EIGEN_CUDA_ARCH / 100; +#endif + } +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_DEFAULT_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h new file mode 100644 index 0000000..ec2e3cb --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h @@ -0,0 +1,389 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_GPU_H) +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_GPU_H + +// This header file container defines fo gpu* macros which will resolve to +// their equivalent hip* or cuda* versions depending on the compiler in use +// A separate header (included at the end of this file) will undefine all +#include "TensorGpuHipCudaDefines.h" + +namespace Eigen { + +static const int kGpuScratchSize = 1024; + +// This defines an interface that GPUDevice can take to use +// HIP / CUDA streams underneath. +class StreamInterface { + public: + virtual ~StreamInterface() {} + + virtual const gpuStream_t& stream() const = 0; + virtual const gpuDeviceProp_t& deviceProperties() const = 0; + + // Allocate memory on the actual device where the computation will run + virtual void* allocate(size_t num_bytes) const = 0; + virtual void deallocate(void* buffer) const = 0; + + // Return a scratchpad buffer of size 1k + virtual void* scratchpad() const = 0; + + // Return a semaphore. The semaphore is initially initialized to 0, and + // each kernel using it is responsible for resetting to 0 upon completion + // to maintain the invariant that the semaphore is always equal to 0 upon + // each kernel start. + virtual unsigned int* semaphore() const = 0; +}; + +class GpuDeviceProperties { + public: + GpuDeviceProperties() : + initialized_(false), first_(true), device_properties_(nullptr) {} + + ~GpuDeviceProperties() { + if (device_properties_) { + delete[] device_properties_; + } + } + + EIGEN_STRONG_INLINE const gpuDeviceProp_t& get(int device) const { + return device_properties_[device]; + } + + EIGEN_STRONG_INLINE bool isInitialized() const { + return initialized_; + } + + void initialize() { + if (!initialized_) { + // Attempts to ensure proper behavior in the case of multiple threads + // calling this function simultaneously. This would be trivial to + // implement if we could use std::mutex, but unfortunately mutex don't + // compile with nvcc, so we resort to atomics and thread fences instead. + // Note that if the caller uses a compiler that doesn't support c++11 we + // can't ensure that the initialization is thread safe. + if (first_.exchange(false)) { + // We're the first thread to reach this point. + int num_devices; + gpuError_t status = gpuGetDeviceCount(&num_devices); + if (status != gpuSuccess) { + std::cerr << "Failed to get the number of GPU devices: " + << gpuGetErrorString(status) + << std::endl; + gpu_assert(status == gpuSuccess); + } + device_properties_ = new gpuDeviceProp_t[num_devices]; + for (int i = 0; i < num_devices; ++i) { + status = gpuGetDeviceProperties(&device_properties_[i], i); + if (status != gpuSuccess) { + std::cerr << "Failed to initialize GPU device #" + << i + << ": " + << gpuGetErrorString(status) + << std::endl; + gpu_assert(status == gpuSuccess); + } + } + + std::atomic_thread_fence(std::memory_order_release); + initialized_ = true; + } else { + // Wait for the other thread to inititialize the properties. + while (!initialized_) { + std::atomic_thread_fence(std::memory_order_acquire); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + } + } + } + } + + private: + volatile bool initialized_; + std::atomic first_; + gpuDeviceProp_t* device_properties_; +}; + +EIGEN_ALWAYS_INLINE const GpuDeviceProperties& GetGpuDeviceProperties() { + static GpuDeviceProperties* deviceProperties = new GpuDeviceProperties(); + if (!deviceProperties->isInitialized()) { + deviceProperties->initialize(); + } + return *deviceProperties; +} + +EIGEN_ALWAYS_INLINE const gpuDeviceProp_t& GetGpuDeviceProperties(int device) { + return GetGpuDeviceProperties().get(device); +} + +static const gpuStream_t default_stream = gpuStreamDefault; + +class GpuStreamDevice : public StreamInterface { + public: + // Use the default stream on the current device + GpuStreamDevice() : stream_(&default_stream), scratch_(NULL), semaphore_(NULL) { + gpuGetDevice(&device_); + } + // Use the default stream on the specified device + GpuStreamDevice(int device) : stream_(&default_stream), device_(device), scratch_(NULL), semaphore_(NULL) {} + // Use the specified stream. Note that it's the + // caller responsibility to ensure that the stream can run on + // the specified device. If no device is specified the code + // assumes that the stream is associated to the current gpu device. + GpuStreamDevice(const gpuStream_t* stream, int device = -1) + : stream_(stream), device_(device), scratch_(NULL), semaphore_(NULL) { + if (device < 0) { + gpuGetDevice(&device_); + } else { + int num_devices; + gpuError_t err = gpuGetDeviceCount(&num_devices); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); + gpu_assert(device < num_devices); + device_ = device; + } + } + + virtual ~GpuStreamDevice() { + if (scratch_) { + deallocate(scratch_); + } + } + + const gpuStream_t& stream() const { return *stream_; } + const gpuDeviceProp_t& deviceProperties() const { + return GetGpuDeviceProperties(device_); + } + virtual void* allocate(size_t num_bytes) const { + gpuError_t err = gpuSetDevice(device_); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); + void* result; + err = gpuMalloc(&result, num_bytes); + gpu_assert(err == gpuSuccess); + gpu_assert(result != NULL); + return result; + } + virtual void deallocate(void* buffer) const { + gpuError_t err = gpuSetDevice(device_); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); + gpu_assert(buffer != NULL); + err = gpuFree(buffer); + gpu_assert(err == gpuSuccess); + } + + virtual void* scratchpad() const { + if (scratch_ == NULL) { + scratch_ = allocate(kGpuScratchSize + sizeof(unsigned int)); + } + return scratch_; + } + + virtual unsigned int* semaphore() const { + if (semaphore_ == NULL) { + char* scratch = static_cast(scratchpad()) + kGpuScratchSize; + semaphore_ = reinterpret_cast(scratch); + gpuError_t err = gpuMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); + } + return semaphore_; + } + + private: + const gpuStream_t* stream_; + int device_; + mutable void* scratch_; + mutable unsigned int* semaphore_; +}; + +struct GpuDevice { + // The StreamInterface is not owned: the caller is + // responsible for its initialization and eventual destruction. + explicit GpuDevice(const StreamInterface* stream) : stream_(stream), max_blocks_(INT_MAX) { + eigen_assert(stream); + } + explicit GpuDevice(const StreamInterface* stream, int num_blocks) : stream_(stream), max_blocks_(num_blocks) { + eigen_assert(stream); + } + // TODO(bsteiner): This is an internal API, we should not expose it. + EIGEN_STRONG_INLINE const gpuStream_t& stream() const { + return stream_->stream(); + } + + EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const { + return stream_->allocate(num_bytes); + } + + EIGEN_STRONG_INLINE void deallocate(void* buffer) const { + stream_->deallocate(buffer); + } + + EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const { + return stream_->allocate(num_bytes); + } + + EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const { + stream_->deallocate(buffer); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Type get(Type data) const { + return data; + } + + EIGEN_STRONG_INLINE void* scratchpad() const { + return stream_->scratchpad(); + } + + EIGEN_STRONG_INLINE unsigned int* semaphore() const { + return stream_->semaphore(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const { +#ifndef EIGEN_GPU_COMPILE_PHASE + gpuError_t err = gpuMemcpyAsync(dst, src, n, gpuMemcpyDeviceToDevice, + stream_->stream()); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); +#else + EIGEN_UNUSED_VARIABLE(dst); + EIGEN_UNUSED_VARIABLE(src); + EIGEN_UNUSED_VARIABLE(n); + eigen_assert(false && "The default device should be used instead to generate kernel code"); +#endif + } + + EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const { + gpuError_t err = + gpuMemcpyAsync(dst, src, n, gpuMemcpyHostToDevice, stream_->stream()); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); + } + + EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const { + gpuError_t err = + gpuMemcpyAsync(dst, src, n, gpuMemcpyDeviceToHost, stream_->stream()); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const { +#ifndef EIGEN_GPU_COMPILE_PHASE + gpuError_t err = gpuMemsetAsync(buffer, c, n, stream_->stream()); + EIGEN_UNUSED_VARIABLE(err) + gpu_assert(err == gpuSuccess); +#else + eigen_assert(false && "The default device should be used instead to generate kernel code"); +#endif + } + + EIGEN_STRONG_INLINE size_t numThreads() const { + // FIXME + return 32; + } + + EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { + // FIXME + return 48*1024; + } + + EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { + // We won't try to take advantage of the l2 cache for the time being, and + // there is no l3 cache on hip/cuda devices. + return firstLevelCacheSize(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void synchronize() const { +#ifndef EIGEN_GPU_COMPILE_PHASE + gpuError_t err = gpuStreamSynchronize(stream_->stream()); + if (err != gpuSuccess) { + std::cerr << "Error detected in GPU stream: " + << gpuGetErrorString(err) + << std::endl; + gpu_assert(err == gpuSuccess); + } +#else + gpu_assert(false && "The default device should be used instead to generate kernel code"); +#endif + } + + EIGEN_STRONG_INLINE int getNumGpuMultiProcessors() const { + return stream_->deviceProperties().multiProcessorCount; + } + EIGEN_STRONG_INLINE int maxGpuThreadsPerBlock() const { + return stream_->deviceProperties().maxThreadsPerBlock; + } + EIGEN_STRONG_INLINE int maxGpuThreadsPerMultiProcessor() const { + return stream_->deviceProperties().maxThreadsPerMultiProcessor; + } + EIGEN_STRONG_INLINE int sharedMemPerBlock() const { + return stream_->deviceProperties().sharedMemPerBlock; + } + EIGEN_STRONG_INLINE int majorDeviceVersion() const { + return stream_->deviceProperties().major; + } + EIGEN_STRONG_INLINE int minorDeviceVersion() const { + return stream_->deviceProperties().minor; + } + + EIGEN_STRONG_INLINE int maxBlocks() const { + return max_blocks_; + } + + // This function checks if the GPU runtime recorded an error for the + // underlying stream device. + inline bool ok() const { +#ifdef EIGEN_GPUCC + gpuError_t error = gpuStreamQuery(stream_->stream()); + return (error == gpuSuccess) || (error == gpuErrorNotReady); +#else + return false; +#endif + } + + private: + const StreamInterface* stream_; + int max_blocks_; +}; + +#if defined(EIGEN_HIPCC) + +#define LAUNCH_GPU_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \ + hipLaunchKernelGGL(kernel, dim3(gridsize), dim3(blocksize), (sharedmem), (device).stream(), __VA_ARGS__); \ + gpu_assert(hipGetLastError() == hipSuccess); + +#else + +#define LAUNCH_GPU_KERNEL(kernel, gridsize, blocksize, sharedmem, device, ...) \ + (kernel) <<< (gridsize), (blocksize), (sharedmem), (device).stream() >>> (__VA_ARGS__); \ + gpu_assert(cudaGetLastError() == cudaSuccess); + +#endif + +// FIXME: Should be device and kernel specific. +#ifdef EIGEN_GPUCC +static EIGEN_DEVICE_FUNC inline void setGpuSharedMemConfig(gpuSharedMemConfig config) { +#ifndef EIGEN_GPU_COMPILE_PHASE + gpuError_t status = gpuDeviceSetSharedMemConfig(config); + EIGEN_UNUSED_VARIABLE(status) + gpu_assert(status == gpuSuccess); +#else + EIGEN_UNUSED_VARIABLE(config) +#endif +} +#endif + +} // end namespace Eigen + +// undefine all the gpu* macros we defined at the beginning of the file +#include "TensorGpuHipCudaUndefines.h" + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_GPU_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h new file mode 100644 index 0000000..df591c2 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceSycl.h @@ -0,0 +1,1048 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// Copyright (C) 2016 Benoit Steiner + +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if defined(EIGEN_USE_SYCL) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H) +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H +#include + +namespace Eigen { + +namespace TensorSycl { +namespace internal { + +/// Cache all the device information needed +struct SyclDeviceInfo { + SyclDeviceInfo(cl::sycl::queue queue) + : local_mem_type( + queue.get_device() + .template get_info()), + max_work_item_sizes( + queue.get_device() + .template get_info< + cl::sycl::info::device::max_work_item_sizes>()), + max_mem_alloc_size( + queue.get_device() + .template get_info< + cl::sycl::info::device::max_mem_alloc_size>()), + max_compute_units(queue.get_device() + .template get_info< + cl::sycl::info::device::max_compute_units>()), + max_work_group_size( + queue.get_device() + .template get_info< + cl::sycl::info::device::max_work_group_size>()), + local_mem_size( + queue.get_device() + .template get_info()), + platform_name(queue.get_device() + .get_platform() + .template get_info()), + device_name(queue.get_device() + .template get_info()), + device_vendor( + queue.get_device() + .template get_info()) {} + + cl::sycl::info::local_mem_type local_mem_type; + cl::sycl::id<3> max_work_item_sizes; + unsigned long max_mem_alloc_size; + unsigned long max_compute_units; + unsigned long max_work_group_size; + size_t local_mem_size; + std::string platform_name; + std::string device_name; + std::string device_vendor; +}; + +} // end namespace internal +} // end namespace TensorSycl + +typedef TensorSycl::internal::buffer_data_type_t buffer_scalar_t; +// All devices (even AMD CPU with intel OpenCL runtime) that support OpenCL and +// can consume SPIR or SPIRV can use the Eigen SYCL backend and consequently +// TensorFlow via the Eigen SYCL Backend. +EIGEN_STRONG_INLINE auto get_sycl_supported_devices() + -> decltype(cl::sycl::device::get_devices()) { +#ifdef EIGEN_SYCL_USE_DEFAULT_SELECTOR + return {cl::sycl::device(cl::sycl::default_selector())}; +#else + std::vector supported_devices; + auto platform_list = cl::sycl::platform::get_platforms(); + for (const auto &platform : platform_list) { + auto device_list = platform.get_devices(); + auto platform_name = + platform.template get_info(); + std::transform(platform_name.begin(), platform_name.end(), + platform_name.begin(), ::tolower); + for (const auto &device : device_list) { + auto vendor = device.template get_info(); + std::transform(vendor.begin(), vendor.end(), vendor.begin(), ::tolower); + bool unsupported_condition = + (device.is_cpu() && platform_name.find("amd") != std::string::npos && + vendor.find("apu") == std::string::npos) || + (platform_name.find("experimental") != std::string::npos) || + device.is_host(); + if (!unsupported_condition) { + supported_devices.push_back(device); + } + } + } + return supported_devices; +#endif +} + +class QueueInterface { + public: + /// Creating device by using cl::sycl::selector or cl::sycl::device. + template + explicit QueueInterface( + const DeviceOrSelector &dev_or_sel, cl::sycl::async_handler handler, + unsigned num_threads = std::thread::hardware_concurrency()) + : m_queue(dev_or_sel, handler), +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + m_prog(m_queue.get_context(), get_sycl_supported_devices()), +#endif + m_thread_pool(num_threads), + m_device_info(m_queue) { +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + m_prog.build_with_kernel_type(); + auto f = [&](cl::sycl::handler &cgh) { + cgh.single_task(m_prog.get_kernel(), + [=]() {}) + }; + EIGEN_SYCL_TRY_CATCH(m_queue.submit(f)); +#endif + } + + template + explicit QueueInterface( + const DeviceOrSelector &dev_or_sel, + unsigned num_threads = std::thread::hardware_concurrency()) + : QueueInterface(dev_or_sel, + [this](cl::sycl::exception_list l) { + this->exception_caught_ = this->sycl_async_handler(l); + }, + num_threads) {} + +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + EIGEN_STRONG_INLINE cl::sycl::program &program() const { return m_prog; } +#endif + + /// Attach an existing buffer to the pointer map, Eigen will not reuse it + EIGEN_STRONG_INLINE void *attach_buffer( + cl::sycl::buffer &buf) const { + std::lock_guard lock(pmapper_mutex_); + return static_cast(pMapper.add_pointer(buf)); + } + + /// Detach previously attached buffer + EIGEN_STRONG_INLINE void detach_buffer(void *p) const { + std::lock_guard lock(pmapper_mutex_); + TensorSycl::internal::SYCLfree(p, pMapper); + } + + /// Allocating device pointer. This pointer is actually an 8 bytes host + /// pointer used as key to access the sycl device buffer. The reason is that + /// we cannot use device buffer as a pointer as a m_data in Eigen leafNode + /// expressions. So we create a key pointer to be used in Eigen expression + /// construction. When we convert the Eigen construction into the sycl + /// construction we use this pointer as a key in our buffer_map and we make + /// sure that we dedicate only one buffer only for this pointer. The device + /// pointer would be deleted by calling deallocate function. + EIGEN_STRONG_INLINE void *allocate(size_t num_bytes) const { +#if EIGEN_MAX_ALIGN_BYTES > 0 + size_t align = num_bytes % EIGEN_MAX_ALIGN_BYTES; + if (align > 0) { + num_bytes += EIGEN_MAX_ALIGN_BYTES - align; + } +#endif + std::lock_guard lock(pmapper_mutex_); + return TensorSycl::internal::SYCLmalloc(num_bytes, pMapper); + } + + EIGEN_STRONG_INLINE void *allocate_temp(size_t num_bytes) const { +#if EIGEN_MAX_ALIGN_BYTES > 0 + size_t align = num_bytes % EIGEN_MAX_ALIGN_BYTES; + if (align > 0) { + num_bytes += EIGEN_MAX_ALIGN_BYTES - align; + } +#endif + std::lock_guard lock(pmapper_mutex_); +#ifndef EIGEN_SYCL_NO_REUSE_BUFFERS + if (scratch_buffers.empty()) { + return TensorSycl::internal::SYCLmalloc(num_bytes, pMapper); + ; + } else { + for (auto it = scratch_buffers.begin(); it != scratch_buffers.end();) { + auto buff = pMapper.get_buffer(*it); + if (buff.get_size() >= num_bytes) { + auto ptr = *it; + scratch_buffers.erase(it); + return ptr; + } else { + ++it; + } + } + return TensorSycl::internal::SYCLmalloc(num_bytes, pMapper); + } +#else + return TensorSycl::internal::SYCLmalloc(num_bytes, pMapper); +#endif + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorSycl::internal::RangeAccess< + cl::sycl::access::mode::read_write, data_t> + get(data_t *data) const { + return get_range_accessor(data); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE data_t *get( + TensorSycl::internal::RangeAccess + data) const { + return static_cast(data.get_virtual_pointer()); + } + + EIGEN_STRONG_INLINE void deallocate_temp(void *p) const { + std::lock_guard lock(pmapper_mutex_); +#ifndef EIGEN_SYCL_NO_REUSE_BUFFERS + scratch_buffers.insert(p); +#else + TensorSycl::internal::SYCLfree(p, pMapper); +#endif + } + template + EIGEN_STRONG_INLINE void deallocate_temp( + const TensorSycl::internal::RangeAccess &p) const { + deallocate_temp(p.get_virtual_pointer()); + } + + /// This is used to deallocate the device pointer. p is used as a key inside + /// the map to find the device buffer and delete it. + EIGEN_STRONG_INLINE void deallocate(void *p) const { + std::lock_guard lock(pmapper_mutex_); + TensorSycl::internal::SYCLfree(p, pMapper); + } + + EIGEN_STRONG_INLINE void deallocate_all() const { + std::lock_guard lock(pmapper_mutex_); + TensorSycl::internal::SYCLfreeAll(pMapper); +#ifndef EIGEN_SYCL_NO_REUSE_BUFFERS + scratch_buffers.clear(); +#endif + } + + /// The memcpyHostToDevice is used to copy the data from host to device + /// The destination pointer could be deleted before the copy happend which is + /// why a callback function is needed. By default if none is provided, the + /// function is blocking. + EIGEN_STRONG_INLINE void memcpyHostToDevice( + void *dst, const void *src, size_t n, + std::function callback) const { + static const auto write_mode = cl::sycl::access::mode::discard_write; + static const auto global_access = cl::sycl::access::target::global_buffer; + typedef cl::sycl::accessor + write_accessor; + if (n == 0) { + if (callback) callback(); + return; + } + n /= sizeof(buffer_scalar_t); + auto f = [&](cl::sycl::handler &cgh) { + write_accessor dst_acc = get_range_accessor(cgh, dst, n); + buffer_scalar_t const *ptr = static_cast(src); + auto non_deleter = [](buffer_scalar_t const *) {}; + std::shared_ptr s_ptr(ptr, non_deleter); + cgh.copy(s_ptr, dst_acc); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(f)); + synchronize_and_callback(e, callback); + } + + /// The memcpyDeviceToHost is used to copy the data from device to host. + /// The source pointer could be deleted before the copy happend which is + /// why a callback function is needed. By default if none is provided, the + /// function is blocking. + EIGEN_STRONG_INLINE void memcpyDeviceToHost( + void *dst, const void *src, size_t n, + std::function callback) const { + static const auto read_mode = cl::sycl::access::mode::read; + static const auto global_access = cl::sycl::access::target::global_buffer; + typedef cl::sycl::accessor + read_accessor; + if (n == 0) { + if (callback) callback(); + return; + } + n /= sizeof(buffer_scalar_t); + auto f = [&](cl::sycl::handler &cgh) { + read_accessor src_acc = get_range_accessor(cgh, src, n); + buffer_scalar_t *ptr = static_cast(dst); + auto non_deleter = [](buffer_scalar_t *) {}; + std::shared_ptr s_ptr(ptr, non_deleter); + cgh.copy(src_acc, s_ptr); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(f)); + synchronize_and_callback(e, callback); + } + + /// The memcpy function. + /// No callback is required here as both arguments are on the device + /// and SYCL can handle the dependency. + EIGEN_STRONG_INLINE void memcpy(void *dst, const void *src, size_t n) const { + static const auto read_mode = cl::sycl::access::mode::read; + static const auto write_mode = cl::sycl::access::mode::discard_write; + if (n == 0) { + return; + } + n /= sizeof(buffer_scalar_t); + auto f = [&](cl::sycl::handler &cgh) { + auto src_acc = get_range_accessor(cgh, src, n); + auto dst_acc = get_range_accessor(cgh, dst, n); + cgh.copy(src_acc, dst_acc); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(f)); + async_synchronize(e); + } + + /// the memset function. + /// No callback is required here as both arguments are on the device + /// and SYCL can handle the dependency. + EIGEN_STRONG_INLINE void memset(void *data, int c, size_t n) const { + static const auto write_mode = cl::sycl::access::mode::discard_write; + if (n == 0) { + return; + } + n /= sizeof(buffer_scalar_t); + auto f = [&](cl::sycl::handler &cgh) { + auto dst_acc = get_range_accessor(cgh, data, n); + // The cast to uint8_t is here to match the behaviour of the standard + // memset. The cast to buffer_scalar_t is needed to match the type of the + // accessor (in case buffer_scalar_t is not uint8_t) + cgh.fill(dst_acc, static_cast(static_cast(c))); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(f)); + async_synchronize(e); + } + + /// Get a range accessor to the virtual pointer's device memory. This range + /// accessor will allow access to the memory from the pointer to the end of + /// the buffer. + /// + /// NOTE: Inside a kernel the range accessor will always be indexed from the + /// start of the buffer, so the offset in the accessor is only used by + /// methods like handler::copy and will not be available inside a kernel. + template + EIGEN_STRONG_INLINE TensorSycl::internal::RangeAccess + get_range_accessor(const void *ptr) const { + static const auto global_access = cl::sycl::access::target::global_buffer; + static const auto is_place_holder = cl::sycl::access::placeholder::true_t; + typedef TensorSycl::internal::RangeAccess ret_type; + typedef const TensorSycl::internal::buffer_data_type_t *internal_ptr_t; + + std::lock_guard lock(pmapper_mutex_); + + auto original_buffer = pMapper.get_buffer(ptr); + const ptrdiff_t offset = pMapper.get_offset(ptr); + const ptrdiff_t typed_offset = offset / sizeof(T); + eigen_assert(typed_offset >= 0); + const auto typed_size = original_buffer.get_size() / sizeof(T); + auto buffer = original_buffer.template reinterpret< + typename Eigen::internal::remove_const::type>( + cl::sycl::range<1>(typed_size)); + const ptrdiff_t size = buffer.get_count() - typed_offset; + eigen_assert(size >= 0); + typedef cl::sycl::accessor::type, + 1, AcMd, global_access, is_place_holder> + placeholder_accessor_t; + const auto start_ptr = static_cast(ptr) - offset; + return ret_type(placeholder_accessor_t(buffer, cl::sycl::range<1>(size), + cl::sycl::id<1>(typed_offset)), + static_cast(typed_offset), + reinterpret_cast(start_ptr)); + } + + /// Get a range accessor to the virtual pointer's device memory with a + /// specified size. + template + EIGEN_STRONG_INLINE cl::sycl::accessor< + buffer_scalar_t, 1, AcMd, cl::sycl::access::target::global_buffer> + get_range_accessor(cl::sycl::handler &cgh, const void *ptr, + const Index n_bytes) const { + static const auto global_access = cl::sycl::access::target::global_buffer; + eigen_assert(n_bytes >= 0); + std::lock_guard lock(pmapper_mutex_); + auto buffer = pMapper.get_buffer(ptr); + const ptrdiff_t offset = pMapper.get_offset(ptr); + eigen_assert(offset >= 0); + eigen_assert(offset + n_bytes <= buffer.get_size()); + return buffer.template get_access( + cgh, cl::sycl::range<1>(n_bytes), cl::sycl::id<1>(offset)); + } + + /// Creation of sycl accessor for a buffer. This function first tries to find + /// the buffer in the buffer_map. If found it gets the accessor from it, if + /// not, the function then adds an entry by creating a sycl buffer for that + /// particular pointer. + template + EIGEN_STRONG_INLINE cl::sycl::accessor< + buffer_scalar_t, 1, AcMd, cl::sycl::access::target::global_buffer> + get_sycl_accessor(cl::sycl::handler &cgh, const void *ptr) const { + std::lock_guard lock(pmapper_mutex_); + return pMapper.get_buffer(ptr) + .template get_access( + cgh); + } + + EIGEN_STRONG_INLINE cl::sycl::buffer get_sycl_buffer( + const void *ptr) const { + std::lock_guard lock(pmapper_mutex_); + return pMapper.get_buffer(ptr); + } + + EIGEN_STRONG_INLINE ptrdiff_t get_offset(const void *ptr) const { + std::lock_guard lock(pmapper_mutex_); + return pMapper.get_offset(ptr); + } + + template + EIGEN_ALWAYS_INLINE void binary_kernel_launcher(const Lhs &lhs, + const Rhs &rhs, OutPtr outptr, + Range thread_range, + Index scratchSize, + T... var) const { + auto kernel_functor = [=](cl::sycl::handler &cgh) { + // binding the placeholder accessors to a commandgroup handler + lhs.bind(cgh); + rhs.bind(cgh); + outptr.bind(cgh); + typedef cl::sycl::accessor + LocalAccessor; + + LocalAccessor scratch(cl::sycl::range<1>(scratchSize), cgh); + cgh.parallel_for( +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + program().template get_kernel(), +#endif + thread_range, sycl_kernel(scratch, lhs, rhs, outptr, var...)); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(kernel_functor)); + async_synchronize(e); + } + + template + EIGEN_ALWAYS_INLINE void unary_kernel_launcher(const InPtr &inptr, + OutPtr &outptr, + Range thread_range, + Index scratchSize, + T... var) const { + auto kernel_functor = [=](cl::sycl::handler &cgh) { + // binding the placeholder accessors to a commandgroup handler + inptr.bind(cgh); + outptr.bind(cgh); + typedef cl::sycl::accessor + LocalAccessor; + + LocalAccessor scratch(cl::sycl::range<1>(scratchSize), cgh); + cgh.parallel_for( +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + program().template get_kernel(), +#endif + thread_range, sycl_kernel(scratch, inptr, outptr, var...)); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(kernel_functor)); + async_synchronize(e); + } + + template + EIGEN_ALWAYS_INLINE void nullary_kernel_launcher(const InPtr &inptr, + Range thread_range, + Index scratchSize, + T... var) const { + auto kernel_functor = [=](cl::sycl::handler &cgh) { + // binding the placeholder accessors to a commandgroup handler + inptr.bind(cgh); + typedef cl::sycl::accessor + LocalAccessor; + + LocalAccessor scratch(cl::sycl::range<1>(scratchSize), cgh); + cgh.parallel_for( +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + program().template get_kernel(), +#endif + thread_range, sycl_kernel(scratch, inptr, var...)); + }; + cl::sycl::event e; + EIGEN_SYCL_TRY_CATCH(e = m_queue.submit(kernel_functor)); + async_synchronize(e); + } + + + EIGEN_STRONG_INLINE void synchronize() const { +#ifdef EIGEN_EXCEPTIONS + m_queue.wait_and_throw(); +#else + m_queue.wait(); +#endif + } + + + EIGEN_STRONG_INLINE void async_synchronize(cl::sycl::event e) const { + set_latest_event(e); +#ifndef EIGEN_SYCL_ASYNC_EXECUTION + synchronize(); +#endif + } + + template + EIGEN_STRONG_INLINE void parallel_for_setup(Index n, Index &tileSize, + Index &rng, Index &GRange) const { + tileSize = static_cast(getNearestPowerOfTwoWorkGroupSize()); + tileSize = std::min(static_cast(EIGEN_SYCL_LOCAL_THREAD_DIM0 * + EIGEN_SYCL_LOCAL_THREAD_DIM1), + static_cast(tileSize)); + rng = n; + if (rng == 0) rng = static_cast(1); + GRange = rng; + if (tileSize > GRange) + tileSize = GRange; + else if (GRange > tileSize) { + Index xMode = static_cast(GRange % tileSize); + if (xMode != 0) GRange += static_cast(tileSize - xMode); + } + } + + /// This is used to prepare the number of threads and also the number of + /// threads per block for sycl kernels + template + EIGEN_STRONG_INLINE void parallel_for_setup( + const std::array &input_dim, cl::sycl::range<2> &global_range, + cl::sycl::range<2> &local_range) const { + std::array input_range = input_dim; + Index max_workgroup_Size = + static_cast(getNearestPowerOfTwoWorkGroupSize()); + max_workgroup_Size = + std::min(static_cast(EIGEN_SYCL_LOCAL_THREAD_DIM0 * + EIGEN_SYCL_LOCAL_THREAD_DIM1), + static_cast(max_workgroup_Size)); + Index pow_of_2 = static_cast(std::log2(max_workgroup_Size)); + local_range[1] = + static_cast(std::pow(2, static_cast(pow_of_2 / 2))); + input_range[1] = input_dim[1]; + if (input_range[1] == 0) input_range[1] = static_cast(1); + global_range[1] = input_range[1]; + if (local_range[1] > global_range[1]) + local_range[1] = global_range[1]; + else if (global_range[1] > local_range[1]) { + Index xMode = static_cast(global_range[1] % local_range[1]); + if (xMode != 0) + global_range[1] += static_cast(local_range[1] - xMode); + } + local_range[0] = static_cast(max_workgroup_Size / local_range[1]); + input_range[0] = input_dim[0]; + if (input_range[0] == 0) input_range[0] = static_cast(1); + global_range[0] = input_range[0]; + if (local_range[0] > global_range[0]) + local_range[0] = global_range[0]; + else if (global_range[0] > local_range[0]) { + Index xMode = static_cast(global_range[0] % local_range[0]); + if (xMode != 0) + global_range[0] += static_cast(local_range[0] - xMode); + } + } + + /// This is used to prepare the number of threads and also the number of + /// threads per block for sycl kernels + template + EIGEN_STRONG_INLINE void parallel_for_setup( + const std::array &input_dim, cl::sycl::range<3> &global_range, + cl::sycl::range<3> &local_range) const { + std::array input_range = input_dim; + Index max_workgroup_Size = + static_cast(getNearestPowerOfTwoWorkGroupSize()); + max_workgroup_Size = + std::min(static_cast(EIGEN_SYCL_LOCAL_THREAD_DIM0 * + EIGEN_SYCL_LOCAL_THREAD_DIM1), + static_cast(max_workgroup_Size)); + Index pow_of_2 = static_cast(std::log2(max_workgroup_Size)); + local_range[2] = + static_cast(std::pow(2, static_cast(pow_of_2 / 3))); + input_range[2] = input_dim[2]; + if (input_range[2] == 0) input_range[1] = static_cast(1); + global_range[2] = input_range[2]; + if (local_range[2] > global_range[2]) + local_range[2] = global_range[2]; + else if (global_range[2] > local_range[2]) { + Index xMode = static_cast(global_range[2] % local_range[2]); + if (xMode != 0) + global_range[2] += static_cast(local_range[2] - xMode); + } + pow_of_2 = static_cast( + std::log2(static_cast(max_workgroup_Size / local_range[2]))); + local_range[1] = + static_cast(std::pow(2, static_cast(pow_of_2 / 2))); + input_range[1] = input_dim[1]; + if (input_range[1] == 0) input_range[1] = static_cast(1); + global_range[1] = input_range[1]; + if (local_range[1] > global_range[1]) + local_range[1] = global_range[1]; + else if (global_range[1] > local_range[1]) { + Index xMode = static_cast(global_range[1] % local_range[1]); + if (xMode != 0) + global_range[1] += static_cast(local_range[1] - xMode); + } + local_range[0] = static_cast(max_workgroup_Size / + (local_range[1] * local_range[2])); + input_range[0] = input_dim[0]; + if (input_range[0] == 0) input_range[0] = static_cast(1); + global_range[0] = input_range[0]; + if (local_range[0] > global_range[0]) + local_range[0] = global_range[0]; + else if (global_range[0] > local_range[0]) { + Index xMode = static_cast(global_range[0] % local_range[0]); + if (xMode != 0) + global_range[0] += static_cast(local_range[0] - xMode); + } + } + + EIGEN_STRONG_INLINE bool has_local_memory() const { +#if !defined(EIGEN_SYCL_LOCAL_MEM) && defined(EIGEN_SYCL_NO_LOCAL_MEM) + return false; +#elif defined(EIGEN_SYCL_LOCAL_MEM) && !defined(EIGEN_SYCL_NO_LOCAL_MEM) + return true; +#else + return m_device_info.local_mem_type == + cl::sycl::info::local_mem_type::local; +#endif + } + + EIGEN_STRONG_INLINE unsigned long max_buffer_size() const { + return m_device_info.max_mem_alloc_size; + } + + EIGEN_STRONG_INLINE unsigned long getNumSyclMultiProcessors() const { + return m_device_info.max_compute_units; + } + + EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerBlock() const { + return m_device_info.max_work_group_size; + } + + EIGEN_STRONG_INLINE cl::sycl::id<3> maxWorkItemSizes() const { + return m_device_info.max_work_item_sizes; + } + + /// No need for sycl it should act the same as CPU version + EIGEN_STRONG_INLINE int majorDeviceVersion() const { return 1; } + + EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerMultiProcessor() const { + // OpenCL doesnot have such concept + return 2; + } + + EIGEN_STRONG_INLINE size_t sharedMemPerBlock() const { + return m_device_info.local_mem_size; + } + + // This function returns the nearest power of 2 Work-group size which is <= + // maximum device workgroup size. + EIGEN_STRONG_INLINE size_t getNearestPowerOfTwoWorkGroupSize() const { + return getPowerOfTwo(m_device_info.max_work_group_size, false); + } + + EIGEN_STRONG_INLINE std::string getPlatformName() const { + return m_device_info.platform_name; + } + + EIGEN_STRONG_INLINE std::string getDeviceName() const { + return m_device_info.device_name; + } + + EIGEN_STRONG_INLINE std::string getDeviceVendor() const { + return m_device_info.device_vendor; + } + + // This function returns the nearest power of 2 + // if roundup is true returns result>=wgsize + // else it return result <= wgsize + EIGEN_STRONG_INLINE size_t getPowerOfTwo(size_t wGSize, bool roundUp) const { + if (roundUp) --wGSize; + wGSize |= (wGSize >> 1); + wGSize |= (wGSize >> 2); + wGSize |= (wGSize >> 4); + wGSize |= (wGSize >> 8); + wGSize |= (wGSize >> 16); +#if EIGEN_ARCH_x86_64 || EIGEN_ARCH_ARM64 || EIGEN_OS_WIN64 + wGSize |= (wGSize >> 32); +#endif + return ((!roundUp) ? (wGSize - (wGSize >> 1)) : ++wGSize); + } + + EIGEN_STRONG_INLINE cl::sycl::queue &sycl_queue() const { return m_queue; } + + // This function checks if the runtime recorded an error for the + // underlying stream device. + EIGEN_STRONG_INLINE bool ok() const { + if (!exception_caught_) { + synchronize(); + } + return !exception_caught_; + } + + EIGEN_STRONG_INLINE cl::sycl::event get_latest_event() const { +#ifdef EIGEN_SYCL_STORE_LATEST_EVENT + std::lock_guard lock(event_mutex_); + return latest_events_[std::this_thread::get_id()]; +#else + eigen_assert(false); + return cl::sycl::event(); +#endif + } + + // destructor + ~QueueInterface() { + pMapper.clear(); +#ifndef EIGEN_SYCL_NO_REUSE_BUFFERS + scratch_buffers.clear(); +#endif + } + + protected: + EIGEN_STRONG_INLINE void set_latest_event(cl::sycl::event e) const { +#ifdef EIGEN_SYCL_STORE_LATEST_EVENT + std::lock_guard lock(event_mutex_); + latest_events_[std::this_thread::get_id()] = e; +#else + EIGEN_UNUSED_VARIABLE(e); +#endif + } + + void synchronize_and_callback(cl::sycl::event e, + const std::function &callback) const { + set_latest_event(e); + if (callback) { + auto callback_ = [=]() { +#ifdef EIGEN_EXCEPTIONS + cl::sycl::event(e).wait_and_throw(); +#else + cl::sycl::event(e).wait(); +#endif + callback(); + }; + m_thread_pool.Schedule(std::move(callback_)); + } else { +#ifdef EIGEN_EXCEPTIONS + m_queue.wait_and_throw(); +#else + m_queue.wait(); +#endif + } + } + + bool sycl_async_handler(cl::sycl::exception_list exceptions) const { + bool exception_caught = false; + for (const auto &e : exceptions) { + if (e) { + exception_caught = true; + EIGEN_THROW_X(e); + } + } + return exception_caught; + } + + /// class members: + bool exception_caught_ = false; + + mutable std::mutex pmapper_mutex_; + +#ifdef EIGEN_SYCL_STORE_LATEST_EVENT + mutable std::mutex event_mutex_; + mutable std::unordered_map latest_events_; +#endif + + /// std::map is the container used to make sure that we create only one buffer + /// per pointer. The lifespan of the buffer now depends on the lifespan of + /// SyclDevice. If a non-read-only pointer is needed to be accessed on the + /// host we should manually deallocate it. + mutable TensorSycl::internal::PointerMapper pMapper; +#ifndef EIGEN_SYCL_NO_REUSE_BUFFERS + mutable std::unordered_set scratch_buffers; +#endif + /// sycl queue + mutable cl::sycl::queue m_queue; +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + mutable cl::sycl::program m_prog; +#endif + + /// The thread pool is used to wait on events and call callbacks + /// asynchronously + mutable Eigen::ThreadPool m_thread_pool; + + const TensorSycl::internal::SyclDeviceInfo m_device_info; +}; + +struct SyclDeviceBase { + /// QueueInterface is not owned. it is the caller's responsibility to destroy + /// it + const QueueInterface *m_queue_stream; + explicit SyclDeviceBase(const QueueInterface *queue_stream) + : m_queue_stream(queue_stream) {} + EIGEN_STRONG_INLINE const QueueInterface *queue_stream() const { + return m_queue_stream; + } +}; + +// Here is a sycl device struct which accept the sycl queue interface +// as an input +struct SyclDevice : public SyclDeviceBase { + explicit SyclDevice(const QueueInterface *queue_stream) + : SyclDeviceBase(queue_stream) {} + + // this is the accessor used to construct the evaluator + template + EIGEN_STRONG_INLINE TensorSycl::internal::RangeAccess + get_range_accessor(const void *ptr) const { + return queue_stream()->template get_range_accessor(ptr); + } + + // get sycl accessor + template + EIGEN_STRONG_INLINE cl::sycl::accessor< + buffer_scalar_t, 1, AcMd, cl::sycl::access::target::global_buffer> + get_sycl_accessor(cl::sycl::handler &cgh, const void *ptr) const { + return queue_stream()->template get_sycl_accessor(cgh, ptr); + } + + /// Accessing the created sycl device buffer for the device pointer + EIGEN_STRONG_INLINE cl::sycl::buffer get_sycl_buffer( + const void *ptr) const { + return queue_stream()->get_sycl_buffer(ptr); + } + + /// This is used to prepare the number of threads and also the number of + /// threads per block for sycl kernels + template + EIGEN_STRONG_INLINE void parallel_for_setup(Index n, Index &tileSize, + Index &rng, Index &GRange) const { + queue_stream()->parallel_for_setup(n, tileSize, rng, GRange); + } + + /// This is used to prepare the number of threads and also the number of + /// threads per block for sycl kernels + template + EIGEN_STRONG_INLINE void parallel_for_setup( + const std::array &input_dim, cl::sycl::range<2> &global_range, + cl::sycl::range<2> &local_range) const { + queue_stream()->parallel_for_setup(input_dim, global_range, local_range); + } + + /// This is used to prepare the number of threads and also the number of + /// threads per block for sycl kernels + template + EIGEN_STRONG_INLINE void parallel_for_setup( + const std::array &input_dim, cl::sycl::range<3> &global_range, + cl::sycl::range<3> &local_range) const { + queue_stream()->parallel_for_setup(input_dim, global_range, local_range); + } + + /// allocate device memory + EIGEN_STRONG_INLINE void *allocate(size_t num_bytes) const { + return queue_stream()->allocate(num_bytes); + } + + EIGEN_STRONG_INLINE void *allocate_temp(size_t num_bytes) const { + return queue_stream()->allocate_temp(num_bytes); + } + + /// deallocate device memory + EIGEN_STRONG_INLINE void deallocate(void *p) const { + queue_stream()->deallocate(p); + } + + EIGEN_STRONG_INLINE void deallocate_temp(void *buffer) const { + queue_stream()->deallocate_temp(buffer); + } + template + EIGEN_STRONG_INLINE void deallocate_temp( + const TensorSycl::internal::RangeAccess &buffer) const { + queue_stream()->deallocate_temp(buffer); + } + EIGEN_STRONG_INLINE void deallocate_all() const { + queue_stream()->deallocate_all(); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorSycl::internal::RangeAccess< + cl::sycl::access::mode::read_write, data_t> + get(data_t *data) const { + return queue_stream()->get(data); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE data_t *get( + TensorSycl::internal::RangeAccess + data) const { + return queue_stream()->get(data); + } + + /// attach existing buffer + EIGEN_STRONG_INLINE void *attach_buffer( + cl::sycl::buffer &buf) const { + return queue_stream()->attach_buffer(buf); + } + /// detach buffer + EIGEN_STRONG_INLINE void detach_buffer(void *p) const { + queue_stream()->detach_buffer(p); + } + EIGEN_STRONG_INLINE ptrdiff_t get_offset(const void *ptr) const { + return queue_stream()->get_offset(ptr); + } + + // some runtime conditions that can be applied here + EIGEN_STRONG_INLINE bool isDeviceSuitable() const { return true; } + + /// memcpyHostToDevice + template + EIGEN_STRONG_INLINE void memcpyHostToDevice( + Index *dst, const Index *src, size_t n, + std::function callback = {}) const { + queue_stream()->memcpyHostToDevice(dst, src, n, callback); + } + /// memcpyDeviceToHost + template + EIGEN_STRONG_INLINE void memcpyDeviceToHost( + void *dst, const Index *src, size_t n, + std::function callback = {}) const { + queue_stream()->memcpyDeviceToHost(dst, src, n, callback); + } + /// the memcpy function + template + EIGEN_STRONG_INLINE void memcpy(void *dst, const Index *src, size_t n) const { + queue_stream()->memcpy(dst, src, n); + } + /// the memset function + EIGEN_STRONG_INLINE void memset(void *data, int c, size_t n) const { + queue_stream()->memset(data, c, n); + } + /// returning the sycl queue + EIGEN_STRONG_INLINE cl::sycl::queue &sycl_queue() const { + return queue_stream()->sycl_queue(); + } +#ifdef EIGEN_SYCL_USE_PROGRAM_CLASS + EIGEN_STRONG_INLINE cl::sycl::program &program() const { + return queue_stream()->program(); + } +#endif + + EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { return 48 * 1024; } + + EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { + // We won't try to take advantage of the l2 cache for the time being, and + // there is no l3 cache on sycl devices. + return firstLevelCacheSize(); + } + EIGEN_STRONG_INLINE unsigned long getNumSyclMultiProcessors() const { + return queue_stream()->getNumSyclMultiProcessors(); + } + EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerBlock() const { + return queue_stream()->maxSyclThreadsPerBlock(); + } + EIGEN_STRONG_INLINE cl::sycl::id<3> maxWorkItemSizes() const { + return queue_stream()->maxWorkItemSizes(); + } + EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerMultiProcessor() const { + // OpenCL doesnot have such concept + return queue_stream()->maxSyclThreadsPerMultiProcessor(); + } + EIGEN_STRONG_INLINE size_t sharedMemPerBlock() const { + return queue_stream()->sharedMemPerBlock(); + } + EIGEN_STRONG_INLINE size_t getNearestPowerOfTwoWorkGroupSize() const { + return queue_stream()->getNearestPowerOfTwoWorkGroupSize(); + } + + EIGEN_STRONG_INLINE size_t getPowerOfTwo(size_t val, bool roundUp) const { + return queue_stream()->getPowerOfTwo(val, roundUp); + } + /// No need for sycl it should act the same as CPU version + EIGEN_STRONG_INLINE int majorDeviceVersion() const { + return queue_stream()->majorDeviceVersion(); + } + + EIGEN_STRONG_INLINE void synchronize() const { + queue_stream()->synchronize(); + } + EIGEN_STRONG_INLINE void async_synchronize( + cl::sycl::event e = cl::sycl::event()) const { + queue_stream()->async_synchronize(e); + } + EIGEN_STRONG_INLINE cl::sycl::event get_latest_event() const { + return queue_stream()->get_latest_event(); + } + + // This function checks if the runtime recorded an error for the + // underlying stream device. + EIGEN_STRONG_INLINE bool ok() const { return queue_stream()->ok(); } + + EIGEN_STRONG_INLINE bool has_local_memory() const { + return queue_stream()->has_local_memory(); + } + EIGEN_STRONG_INLINE long max_buffer_size() const { + return queue_stream()->max_buffer_size(); + } + EIGEN_STRONG_INLINE std::string getPlatformName() const { + return queue_stream()->getPlatformName(); + } + EIGEN_STRONG_INLINE std::string getDeviceName() const { + return queue_stream()->getDeviceName(); + } + EIGEN_STRONG_INLINE std::string getDeviceVendor() const { + return queue_stream()->getDeviceVendor(); + } + template + EIGEN_ALWAYS_INLINE void binary_kernel_launcher(T... var) const { + queue_stream()->template binary_kernel_launcher( + var...); + } + template + EIGEN_ALWAYS_INLINE void unary_kernel_launcher(T... var) const { + queue_stream()->template unary_kernel_launcher( + var...); + } + + template + EIGEN_ALWAYS_INLINE void nullary_kernel_launcher(T... var) const { + queue_stream()->template nullary_kernel_launcher( + var...); + } +}; +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_SYCL_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h new file mode 100644 index 0000000..e524b53 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h @@ -0,0 +1,409 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if defined(EIGEN_USE_THREADS) && !defined(EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H) +#define EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H + +namespace Eigen { + +// Runs an arbitrary function and then calls Notify() on the passed in +// Notification. +template struct FunctionWrapperWithNotification +{ + static void run(Notification* n, Function f, Args... args) { + f(args...); + if (n) { + n->Notify(); + } + } +}; + +template struct FunctionWrapperWithBarrier +{ + static void run(Barrier* b, Function f, Args... args) { + f(args...); + if (b) { + b->Notify(); + } + } +}; + +template +static EIGEN_STRONG_INLINE void wait_until_ready(SyncType* n) { + if (n) { + n->Wait(); + } +} + +// An abstract interface to a device specific memory allocator. +class Allocator { + public: + virtual ~Allocator() {} + virtual void* allocate(size_t num_bytes) const = 0; + virtual void deallocate(void* buffer) const = 0; +}; + +// Build a thread pool device on top the an existing pool of threads. +struct ThreadPoolDevice { + // The ownership of the thread pool remains with the caller. + ThreadPoolDevice(ThreadPoolInterface* pool, int num_cores, Allocator* allocator = nullptr) + : pool_(pool), num_threads_(num_cores), allocator_(allocator) { } + + EIGEN_STRONG_INLINE void* allocate(size_t num_bytes) const { + return allocator_ ? allocator_->allocate(num_bytes) + : internal::aligned_malloc(num_bytes); + } + + EIGEN_STRONG_INLINE void deallocate(void* buffer) const { + if (allocator_) { + allocator_->deallocate(buffer); + } else { + internal::aligned_free(buffer); + } + } + + EIGEN_STRONG_INLINE void* allocate_temp(size_t num_bytes) const { + return allocate(num_bytes); + } + + EIGEN_STRONG_INLINE void deallocate_temp(void* buffer) const { + deallocate(buffer); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Type get(Type data) const { + return data; + } + + EIGEN_STRONG_INLINE void memcpy(void* dst, const void* src, size_t n) const { +#ifdef __ANDROID__ + ::memcpy(dst, src, n); +#else + // TODO(rmlarsen): Align blocks on cache lines. + // We have observed that going beyond 4 threads usually just wastes + // CPU cycles due to the threads competing for memory bandwidth, so we + // statically schedule at most 4 block copies here. + const size_t kMinBlockSize = 32768; + const size_t num_threads = CostModel::numThreads(n, TensorOpCost(1.0, 1.0, 0), 4); + if (n <= kMinBlockSize || num_threads < 2) { + ::memcpy(dst, src, n); + } else { + const char* src_ptr = static_cast(src); + char* dst_ptr = static_cast(dst); + const size_t blocksize = (n + (num_threads - 1)) / num_threads; + Barrier barrier(static_cast(num_threads - 1)); + // Launch the last 3 blocks on worker threads. + for (size_t i = 1; i < num_threads; ++i) { + enqueue_with_barrier(&barrier, [n, i, src_ptr, dst_ptr, blocksize] { + ::memcpy(dst_ptr + i * blocksize, src_ptr + i * blocksize, + numext::mini(blocksize, n - (i * blocksize))); + }); + } + // Launch the first block on the main thread. + ::memcpy(dst_ptr, src_ptr, blocksize); + barrier.Wait(); + } +#endif + } + EIGEN_STRONG_INLINE void memcpyHostToDevice(void* dst, const void* src, size_t n) const { + memcpy(dst, src, n); + } + EIGEN_STRONG_INLINE void memcpyDeviceToHost(void* dst, const void* src, size_t n) const { + memcpy(dst, src, n); + } + + EIGEN_STRONG_INLINE void memset(void* buffer, int c, size_t n) const { + ::memset(buffer, c, n); + } + + EIGEN_STRONG_INLINE int numThreads() const { + return num_threads_; + } + + // Number of theads available in the underlying thread pool. This number can + // be different from the value returned by numThreads(). + EIGEN_STRONG_INLINE int numThreadsInPool() const { + return pool_->NumThreads(); + } + + EIGEN_STRONG_INLINE size_t firstLevelCacheSize() const { + return l1CacheSize(); + } + + EIGEN_STRONG_INLINE size_t lastLevelCacheSize() const { + // The l3 cache size is shared between all the cores. + return l3CacheSize() / num_threads_; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int majorDeviceVersion() const { + // Should return an enum that encodes the ISA supported by the CPU + return 1; + } + + template + EIGEN_STRONG_INLINE Notification* enqueue(Function&& f, + Args&&... args) const { + Notification* n = new Notification(); + pool_->Schedule( + std::bind(&FunctionWrapperWithNotification::run, n, + std::move(f), args...)); + return n; + } + + template + EIGEN_STRONG_INLINE void enqueue_with_barrier(Barrier* b, Function&& f, + Args&&... args) const { + pool_->Schedule( + std::bind(&FunctionWrapperWithBarrier::run, b, + std::move(f), args...)); + } + + template + EIGEN_STRONG_INLINE void enqueueNoNotification(Function&& f, + Args&&... args) const { + if (sizeof...(args) > 0) { + pool_->Schedule(std::bind(std::move(f), args...)); + } else { + pool_->Schedule(std::move(f)); + } + } + + // Returns a logical thread index between 0 and pool_->NumThreads() - 1 if + // called from one of the threads in pool_. Returns -1 otherwise. + EIGEN_STRONG_INLINE int currentThreadId() const { + return pool_->CurrentThreadId(); + } + + // WARNING: This function is synchronous and will block the calling thread. + // + // Synchronous parallelFor executes f with [0, n) arguments in parallel and + // waits for completion. F accepts a half-open interval [first, last). Block + // size is chosen based on the iteration cost and resulting parallel + // efficiency. If block_align is not nullptr, it is called to round up the + // block size. + void parallelFor(Index n, const TensorOpCost& cost, + std::function block_align, + std::function f) const { + if (EIGEN_PREDICT_FALSE(n <= 0)){ + return; + // Compute small problems directly in the caller thread. + } else if (n == 1 || numThreads() == 1 || + CostModel::numThreads(n, cost, static_cast(numThreads())) == 1) { + f(0, n); + return; + } + + // Compute block size and total count of blocks. + ParallelForBlock block = CalculateParallelForBlock(n, cost, block_align); + + // Recursively divide size into halves until we reach block_size. + // Division code rounds mid to block_size, so we are guaranteed to get + // block_count leaves that do actual computations. + Barrier barrier(static_cast(block.count)); + std::function handleRange; + handleRange = [=, &handleRange, &barrier, &f](Index firstIdx, + Index lastIdx) { + while (lastIdx - firstIdx > block.size) { + // Split into halves and schedule the second half on a different thread. + const Index midIdx = firstIdx + divup((lastIdx - firstIdx) / 2, block.size) * block.size; + pool_->Schedule([=, &handleRange]() { handleRange(midIdx, lastIdx); }); + lastIdx = midIdx; + } + // Single block or less, execute directly. + f(firstIdx, lastIdx); + barrier.Notify(); + }; + + if (block.count <= numThreads()) { + // Avoid a thread hop by running the root of the tree and one block on the + // main thread. + handleRange(0, n); + } else { + // Execute the root in the thread pool to avoid running work on more than + // numThreads() threads. + pool_->Schedule([=, &handleRange]() { handleRange(0, n); }); + } + + barrier.Wait(); + } + + // Convenience wrapper for parallelFor that does not align blocks. + void parallelFor(Index n, const TensorOpCost& cost, + std::function f) const { + parallelFor(n, cost, nullptr, std::move(f)); + } + + // WARNING: This function is asynchronous and will not block the calling thread. + // + // Asynchronous parallelFor executes f with [0, n) arguments in parallel + // without waiting for completion. When the last block finished, it will call + // 'done' callback. F accepts a half-open interval [first, last). Block size + // is chosen based on the iteration cost and resulting parallel efficiency. If + // block_align is not nullptr, it is called to round up the block size. + void parallelForAsync(Index n, const TensorOpCost& cost, + std::function block_align, + std::function f, + std::function done) const { + // Compute small problems directly in the caller thread. + if (n <= 1 || numThreads() == 1 || + CostModel::numThreads(n, cost, static_cast(numThreads())) == 1) { + f(0, n); + done(); + return; + } + + // Compute block size and total count of blocks. + ParallelForBlock block = CalculateParallelForBlock(n, cost, block_align); + + ParallelForAsyncContext* const ctx = + new ParallelForAsyncContext(block.count, std::move(f), std::move(done)); + + // Recursively divide size into halves until we reach block_size. + // Division code rounds mid to block_size, so we are guaranteed to get + // block_count leaves that do actual computations. + ctx->handle_range = [this, ctx, block](Index firstIdx, Index lastIdx) { + while (lastIdx - firstIdx > block.size) { + // Split into halves and schedule the second half on a different thread. + const Index midIdx = firstIdx + divup((lastIdx - firstIdx) / 2, block.size) * block.size; + pool_->Schedule( + [ctx, midIdx, lastIdx]() { ctx->handle_range(midIdx, lastIdx); }); + lastIdx = midIdx; + } + + // Single block or less, execute directly. + ctx->f(firstIdx, lastIdx); + + // Delete async context if it was the last block. + if (ctx->count.fetch_sub(1) == 1) delete ctx; + }; + + if (block.count <= numThreads()) { + // Avoid a thread hop by running the root of the tree and one block on the + // main thread. + ctx->handle_range(0, n); + } else { + // Execute the root in the thread pool to avoid running work on more than + // numThreads() threads. + pool_->Schedule([ctx, n]() { ctx->handle_range(0, n); }); + } + } + + // Convenience wrapper for parallelForAsync that does not align blocks. + void parallelForAsync(Index n, const TensorOpCost& cost, + std::function f, + std::function done) const { + parallelForAsync(n, cost, nullptr, std::move(f), std::move(done)); + } + + // Thread pool accessor. + ThreadPoolInterface* getPool() const { return pool_; } + + // Allocator accessor. + Allocator* allocator() const { return allocator_; } + + private: + typedef TensorCostModel CostModel; + + // For parallelForAsync we must keep passed in closures on the heap, and + // delete them only after `done` callback finished. + struct ParallelForAsyncContext { + ParallelForAsyncContext(Index block_count, + std::function block_f, + std::function done_callback) + : count(block_count), + f(std::move(block_f)), + done(std::move(done_callback)) {} + ~ParallelForAsyncContext() { done(); } + + std::atomic count; + std::function f; + std::function done; + + std::function handle_range; + }; + + struct ParallelForBlock { + Index size; // block size + Index count; // number of blocks + }; + + // Calculates block size based on (1) the iteration cost and (2) parallel + // efficiency. We want blocks to be not too small to mitigate parallelization + // overheads; not too large to mitigate tail effect and potential load + // imbalance and we also want number of blocks to be evenly dividable across + // threads. + ParallelForBlock CalculateParallelForBlock( + const Index n, const TensorOpCost& cost, + std::function block_align) const { + const double block_size_f = 1.0 / CostModel::taskSize(1, cost); + const Index max_oversharding_factor = 4; + Index block_size = numext::mini( + n, numext::maxi( + divup(n, max_oversharding_factor * numThreads()), + block_size_f)); + const Index max_block_size = numext::mini(n, 2 * block_size); + + if (block_align) { + Index new_block_size = block_align(block_size); + eigen_assert(new_block_size >= block_size); + block_size = numext::mini(n, new_block_size); + } + + Index block_count = divup(n, block_size); + + // Calculate parallel efficiency as fraction of total CPU time used for + // computations: + double max_efficiency = + static_cast(block_count) / + (divup(block_count, numThreads()) * numThreads()); + + // Now try to increase block size up to max_block_size as long as it + // doesn't decrease parallel efficiency. + for (Index prev_block_count = block_count; + max_efficiency < 1.0 && prev_block_count > 1;) { + // This is the next block size that divides size into a smaller number + // of blocks than the current block_size. + Index coarser_block_size = divup(n, prev_block_count - 1); + if (block_align) { + Index new_block_size = block_align(coarser_block_size); + eigen_assert(new_block_size >= coarser_block_size); + coarser_block_size = numext::mini(n, new_block_size); + } + if (coarser_block_size > max_block_size) { + break; // Reached max block size. Stop. + } + // Recalculate parallel efficiency. + const Index coarser_block_count = divup(n, coarser_block_size); + eigen_assert(coarser_block_count < prev_block_count); + prev_block_count = coarser_block_count; + const double coarser_efficiency = + static_cast(coarser_block_count) / + (divup(coarser_block_count, numThreads()) * numThreads()); + if (coarser_efficiency + 0.01 >= max_efficiency) { + // Taking it. + block_size = coarser_block_size; + block_count = coarser_block_count; + if (max_efficiency < coarser_efficiency) { + max_efficiency = coarser_efficiency; + } + } + } + + return {block_size, block_count}; + } + + ThreadPoolInterface* pool_; + int num_threads_; + Allocator* allocator_; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DEVICE_THREAD_POOL_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h new file mode 100644 index 0000000..1a30e45 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h @@ -0,0 +1,236 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H +#define EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H + +namespace Eigen { + +/** \internal + * + * \class TensorDimensionList + * \ingroup CXX11_Tensor_Module + * + * \brief Special case of tensor index list used to list all the dimensions of a tensor of rank n. + * + * \sa Tensor + */ + +template struct DimensionList { + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + const Index operator[] (const Index i) const { return i; } +}; + +namespace internal { + +template struct array_size > { + static const size_t value = Rank; +}; +template struct array_size > { + static const size_t value = Rank; +}; + +template const Index array_get(DimensionList&) { + return n; +} +template const Index array_get(const DimensionList&) { + return n; +} + + +#if EIGEN_HAS_CONSTEXPR +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) { + return true; + } +}; +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex) { + return true; + } +}; + +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return true; + } +}; +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return true; + } +}; + +template +struct indices_statically_known_to_increase_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return true; + } +}; +template +struct indices_statically_known_to_increase_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return true; + } +}; + +template +struct index_statically_eq_impl > { + static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i == value; + } +}; +template +struct index_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i == value; + } +}; + +template +struct index_statically_ne_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i != value; + } +}; +template +struct index_statically_ne_impl > { + static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i != value; + } +}; + +template +struct index_statically_gt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i > value; + } +}; +template +struct index_statically_gt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i > value; + } +}; + +template +struct index_statically_lt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i < value; + } +}; +template +struct index_statically_lt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const DenseIndex i, const DenseIndex value) { + return i < value; + } +}; + +#else +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) { + return true; + } +}; +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) { + return true; + } +}; + +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() { + return true; + } +}; +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() { + return true; + } +}; + +template +struct indices_statically_known_to_increase_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { + return true; + } +}; +template +struct indices_statically_known_to_increase_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { + return true; + } +}; + +template +struct index_statically_eq_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; +template +struct index_statically_eq_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; + +template +struct index_statically_ne_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex){ + return false; + } +}; +template +struct index_statically_ne_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; + +template +struct index_statically_gt_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; +template +struct index_statically_gt_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; + +template +struct index_statically_lt_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; +template +struct index_statically_lt_impl > { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) { + return false; + } +}; +#endif + +} // end namespace internal +} // end namespace Eigen + + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DIMENSION_LIST_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h new file mode 100644 index 0000000..f0f1e83 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h @@ -0,0 +1,490 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H +#define EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H + + +namespace Eigen { + +/** \internal + * + * \class TensorDimensions + * \ingroup CXX11_Tensor_Module + * + * \brief Set of classes used to encode and store the dimensions of a Tensor. + * + * The Sizes class encodes as part of the type the number of dimensions and the + * sizes corresponding to each dimension. It uses no storage space since it is + * entirely known at compile time. + * The DSizes class is its dynamic sibling: the number of dimensions is known + * at compile time but the sizes are set during execution. + * + * \sa Tensor + */ + +// Boilerplate code +namespace internal { + +template struct dget { + static const std::ptrdiff_t value = get::value; +}; + + +template +struct fixed_size_tensor_index_linearization_helper +{ + template EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Index run(array const& indices, + const Dimensions& dimensions) + { + return array_get(indices) + + dget::value * + fixed_size_tensor_index_linearization_helper::run(indices, dimensions); + } +}; + +template +struct fixed_size_tensor_index_linearization_helper +{ + template EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Index run(array const&, const Dimensions&) + { + return 0; + } +}; + +template +struct fixed_size_tensor_index_extraction_helper +{ + template EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Index run(const Index index, + const Dimensions& dimensions) + { + const Index mult = (index == n-1) ? 1 : 0; + return array_get(dimensions) * mult + + fixed_size_tensor_index_extraction_helper::run(index, dimensions); + } +}; + +template +struct fixed_size_tensor_index_extraction_helper +{ + template EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Index run(const Index, + const Dimensions&) + { + return 0; + } + }; + +} // end namespace internal + + +// Fixed size +#ifndef EIGEN_EMULATE_CXX11_META_H +template +struct Sizes { + typedef internal::numeric_list Base; + const Base t = Base(); + static const std::ptrdiff_t total_size = internal::arg_prod(Indices...); + static const ptrdiff_t count = Base::count; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t rank() const { + return Base::count; + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t TotalSize() { + return internal::arg_prod(Indices...); + } + + EIGEN_DEVICE_FUNC Sizes() { } + template + explicit EIGEN_DEVICE_FUNC Sizes(const array& /*indices*/) { + // todo: add assertion + } +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC Sizes(DenseIndex...) { } + explicit EIGEN_DEVICE_FUNC Sizes(std::initializer_list /*l*/) { + // todo: add assertion + } +#endif + + template Sizes& operator = (const T& /*other*/) { + // add assertion failure if the size of other is different + return *this; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t operator[] (const std::ptrdiff_t index) const { + return internal::fixed_size_tensor_index_extraction_helper::run(index, t); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + ptrdiff_t IndexOfColMajor(const array& indices) const { + return internal::fixed_size_tensor_index_linearization_helper::run(indices, t); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + ptrdiff_t IndexOfRowMajor(const array& indices) const { + return internal::fixed_size_tensor_index_linearization_helper::run(indices, t); + } +}; + +namespace internal { +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes&) { + return Sizes::total_size; +} +} + +#else + +template +struct non_zero_size { + typedef internal::type2val type; +}; +template <> +struct non_zero_size<0> { + typedef internal::null_type type; +}; + +template struct Sizes { + typedef typename internal::make_type_list::type, typename non_zero_size::type, typename non_zero_size::type, typename non_zero_size::type, typename non_zero_size::type >::type Base; + static const std::ptrdiff_t count = Base::count; + static const std::ptrdiff_t total_size = internal::arg_prod::value; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptrdiff_t rank() const { + return count; + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ptrdiff_t TotalSize() { + return internal::arg_prod::value; + } + + Sizes() { } + template + explicit Sizes(const array& /*indices*/) { + // todo: add assertion + } + template Sizes& operator = (const T& /*other*/) { + // add assertion failure if the size of other is different + return *this; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template Sizes(DenseIndex... /*indices*/) { } + explicit Sizes(std::initializer_list) { + // todo: add assertion + } +#else + EIGEN_DEVICE_FUNC explicit Sizes(const DenseIndex) { + } + EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex) { + } + EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex) { + } + EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex) { + } + EIGEN_DEVICE_FUNC Sizes(const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex, const DenseIndex) { + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index operator[] (const Index index) const { + switch (index) { + case 0: + return internal::get<0, Base>::value; + case 1: + return internal::get<1, Base>::value; + case 2: + return internal::get<2, Base>::value; + case 3: + return internal::get<3, Base>::value; + case 4: + return internal::get<4, Base>::value; + default: + eigen_assert(false && "index overflow"); + return static_cast(-1); + } + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + ptrdiff_t IndexOfColMajor(const array& indices) const { + return internal::fixed_size_tensor_index_linearization_helper::run(indices, *reinterpret_cast(this)); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + ptrdiff_t IndexOfRowMajor(const array& indices) const { + return internal::fixed_size_tensor_index_linearization_helper::run(indices, *reinterpret_cast(this)); + } +}; + +namespace internal { +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes&) { + return Sizes::total_size; +} +} + +#endif + +// Boilerplate +namespace internal { +template +struct tensor_index_linearization_helper +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Index run(array const& indices, array const& dimensions) + { + return array_get(indices) + + array_get(dimensions) * + tensor_index_linearization_helper::run(indices, dimensions); + } +}; + +template +struct tensor_index_linearization_helper +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Index run(array const& indices, array const&) + { + return array_get(indices); + } +}; +} // end namespace internal + + + +// Dynamic size +template +struct DSizes : array { + typedef array Base; + static const int count = NumDims; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { + return NumDims; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex TotalSize() const { + return (NumDims == 0) ? 1 : internal::array_prod(*static_cast(this)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DSizes() { + for (int i = 0 ; i < NumDims; ++i) { + (*this)[i] = 0; + } + } + EIGEN_DEVICE_FUNC explicit DSizes(const array& a) : Base(a) { } + + EIGEN_DEVICE_FUNC explicit DSizes(const DenseIndex i0) { + eigen_assert(NumDims == 1); + (*this)[0] = i0; + } + + EIGEN_DEVICE_FUNC DSizes(const DimensionList& a) { + for (int i = 0 ; i < NumDims; ++i) { + (*this)[i] = a[i]; + } + } + + // Enable DSizes index type promotion only if we are promoting to the + // larger type, e.g. allow to promote dimensions of type int to long. + template + EIGEN_DEVICE_FUNC + explicit DSizes(const array& other, + // Default template parameters require c++11. + typename internal::enable_if< + internal::is_same< + DenseIndex, + typename internal::promote_index_type< + DenseIndex, + OtherIndex + >::type + >::value, void*>::type = 0) { + for (int i = 0; i < NumDims; ++i) { + (*this)[i] = static_cast(other[i]); + } + } + +#ifdef EIGEN_HAS_INDEX_LIST + template + EIGEN_DEVICE_FUNC + explicit DSizes(const Eigen::IndexList& dimensions) { + for (int i = 0; i < dimensions.count; ++i) { + (*this)[i] = dimensions[i]; + } + } +#endif + +#ifndef EIGEN_EMULATE_CXX11_META_H + template + EIGEN_DEVICE_FUNC DSizes(const Sizes& a) { + for (int i = 0 ; i < NumDims; ++i) { + (*this)[i] = a[i]; + } + } +#else + template + EIGEN_DEVICE_FUNC DSizes(const Sizes& a) { + for (int i = 0 ; i < NumDims; ++i) { + (*this)[i] = a[i]; + } + } +#endif + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE explicit DSizes(DenseIndex firstDimension, DenseIndex secondDimension, IndexTypes... otherDimensions) : Base({{firstDimension, secondDimension, otherDimensions...}}) { + EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 2 == NumDims, YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#else + EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1) { + eigen_assert(NumDims == 2); + (*this)[0] = i0; + (*this)[1] = i1; + } + EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2) { + eigen_assert(NumDims == 3); + (*this)[0] = i0; + (*this)[1] = i1; + (*this)[2] = i2; + } + EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3) { + eigen_assert(NumDims == 4); + (*this)[0] = i0; + (*this)[1] = i1; + (*this)[2] = i2; + (*this)[3] = i3; + } + EIGEN_DEVICE_FUNC DSizes(const DenseIndex i0, const DenseIndex i1, const DenseIndex i2, const DenseIndex i3, const DenseIndex i4) { + eigen_assert(NumDims == 5); + (*this)[0] = i0; + (*this)[1] = i1; + (*this)[2] = i2; + (*this)[3] = i3; + (*this)[4] = i4; + } +#endif + + EIGEN_DEVICE_FUNC DSizes& operator = (const array& other) { + *static_cast(this) = other; + return *this; + } + + // A constexpr would be so much better here + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex IndexOfColMajor(const array& indices) const { + return internal::tensor_index_linearization_helper::run(indices, *static_cast(this)); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex IndexOfRowMajor(const array& indices) const { + return internal::tensor_index_linearization_helper::run(indices, *static_cast(this)); + } +}; + +template +std::ostream& operator<<(std::ostream& os, + const DSizes& dims) { + os << "["; + for (int i = 0; i < NumDims; ++i) { + if (i > 0) os << ", "; + os << dims[i]; + } + os << "]"; + return os; +} + +// Boilerplate +namespace internal { +template +struct tensor_vsize_index_linearization_helper +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Index run(array const& indices, std::vector const& dimensions) + { + return array_get(indices) + + array_get(dimensions) * + tensor_vsize_index_linearization_helper::run(indices, dimensions); + } +}; + +template +struct tensor_vsize_index_linearization_helper +{ + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Index run(array const& indices, std::vector const&) + { + return array_get(indices); + } +}; +} // end namespace internal + + +namespace internal { + +template struct array_size > { + static const ptrdiff_t value = NumDims; +}; +template struct array_size > { + static const ptrdiff_t value = NumDims; +}; +#ifndef EIGEN_EMULATE_CXX11_META_H +template struct array_size > { +static const std::ptrdiff_t value = Sizes::count; +}; +template struct array_size > { +static const std::ptrdiff_t value = Sizes::count; +}; +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes&) { + return get >::value; +} +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes<>&) { + eigen_assert(false && "should never be called"); + return -1; +} +#else +template struct array_size > { + static const ptrdiff_t value = Sizes::count; +}; +template struct array_size > { + static const ptrdiff_t value = Sizes::count; +}; +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_get(const Sizes&) { + return get::Base>::value; +} + +#endif + + +template +struct sizes_match_below_dim { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1&, Dims2&) { + return false; + } +}; +template +struct sizes_match_below_dim { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1& dims1, Dims2& dims2) { + return (array_get(dims1) == array_get(dims2)) && + sizes_match_below_dim::run(dims1, dims2); + } +}; +template +struct sizes_match_below_dim { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool run(Dims1&, Dims2&) { + return true; + } +}; + +} // end namespace internal + + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool dimensions_match(Dims1 dims1, Dims2 dims2) { + return internal::sizes_match_below_dim::value, internal::array_size::value>::run(dims1, dims2); +} + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_DIMENSIONS_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h new file mode 100644 index 0000000..a48d035 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h @@ -0,0 +1,236 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H +#define EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H + +namespace Eigen { + +/** \class TensorForcedEval + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reshaping class. + * + * + */ +namespace internal { +template class MakePointer_> +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename MakePointer_::Type PointerType; + + enum { + Flags = 0 + }; + template + struct MakePointer { + // Intermediate typedef to workaround MSVC issue. + typedef MakePointer_ MakePointerT; + typedef typename MakePointerT::Type Type; + + + }; +}; + +template class MakePointer_> +struct eval, Eigen::Dense> +{ + typedef const TensorEvalToOp& type; +}; + +template class MakePointer_> +struct nested, 1, typename eval >::type> +{ + typedef TensorEvalToOp type; +}; + +} // end namespace internal + + + + +template class MakePointer_> +class TensorEvalToOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename MakePointer_::Type PointerType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + static const int NumDims = Eigen::internal::traits::NumDimensions; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvalToOp(PointerType buffer, const XprType& expr) + : m_xpr(expr), m_buffer(buffer) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC PointerType buffer() const { return m_buffer; } + + protected: + typename XprType::Nested m_xpr; + PointerType m_buffer; +}; + + + +template class MakePointer_> +struct TensorEvaluator, Device> +{ + typedef TensorEvalToOp XprType; + typedef typename ArgType::Scalar Scalar; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef typename XprType::Index Index; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename Eigen::internal::traits::PointerType TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = true, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = true + }; + + static const int NumDims = internal::traits::NumDimensions; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + ArgTensorBlock; + + typedef internal::TensorBlockAssignment< + CoeffReturnType, NumDims, typename ArgTensorBlock::XprType, Index> + TensorBlockAssignment; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_buffer(device.get(op.buffer())), m_expression(op.expression()){} + + + EIGEN_STRONG_INLINE ~TensorEvaluator() { + } + + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType scalar) { + EIGEN_UNUSED_VARIABLE(scalar); + eigen_assert(scalar == NULL); + return m_impl.evalSubExprsIfNeeded(m_buffer); + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType scalar, EvalSubExprsCallback done) { + EIGEN_UNUSED_VARIABLE(scalar); + eigen_assert(scalar == NULL); + m_impl.evalSubExprsIfNeededAsync(m_buffer, std::move(done)); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalScalar(Index i) { + m_buffer[i] = m_impl.coeff(i); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) { + internal::pstoret(m_buffer + i, m_impl.template packet::IsAligned ? Aligned : Unaligned>(i)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return m_impl.getResourceRequirements(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalBlock( + TensorBlockDesc& desc, TensorBlockScratch& scratch) { + // Add `m_buffer` as destination buffer to the block descriptor. + desc.template AddDestinationBuffer( + /*dst_base=*/m_buffer + desc.offset(), + /*dst_strides=*/internal::strides(m_impl.dimensions())); + + ArgTensorBlock block = + m_impl.block(desc, scratch, /*root_of_expr_ast=*/true); + + // If block was evaluated into a destination buffer, there is no need to do + // an assignment. + if (block.kind() != internal::TensorBlockKind::kMaterializedInOutput) { + TensorBlockAssignment::Run( + TensorBlockAssignment::target( + desc.dimensions(), internal::strides(m_impl.dimensions()), + m_buffer, desc.offset()), + block.expr()); + } + block.cleanup(); + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_buffer[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return internal::ploadt(m_buffer + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + // We assume that evalPacket or evalScalar is called to perform the + // assignment and account for the cost of the write here. + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, sizeof(CoeffReturnType), 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_buffer; } + ArgType expression() const { return m_expression; } + #ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + m_buffer.bind(cgh); + } + #endif + + + private: + TensorEvaluator m_impl; + EvaluatorPointerType m_buffer; + const ArgType m_expression; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EVAL_TO_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h new file mode 100644 index 0000000..3aff7fa --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h @@ -0,0 +1,983 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H + +namespace Eigen { + +/** \class TensorEvaluator + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor evaluator classes. + * + * These classes are responsible for the evaluation of the tensor expression. + * + * TODO: add support for more types of expressions, in particular expressions + * leading to lvalues (slicing, reshaping, etc...) + */ + +// Generic evaluator +template +struct TensorEvaluator +{ + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename Derived::Dimensions Dimensions; + typedef Derived XprType; + static const int PacketSize = PacketType::size; + typedef typename internal::traits::template MakePointer::Type TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + // NumDimensions is -1 for variable dim tensors + static const int NumCoords = internal::traits::NumDimensions > 0 ? + internal::traits::NumDimensions : 0; + + enum { + IsAligned = Derived::IsAligned, + PacketAccess = (PacketType::size > 1), + BlockAccess = internal::is_arithmetic::type>::value, + PreferBlockAccess = false, + Layout = Derived::Layout, + CoordAccess = NumCoords > 0, + RawAccess = true + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device) + : m_data(device.get((const_cast(m.data())))), + m_dims(m.dimensions()), + m_device(device) + { } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType dest) { + if (!NumTraits::type>::RequireInitialization && dest) { + m_device.memcpy((void*)(m_device.get(dest)), m_device.get(m_data), m_dims.TotalSize() * sizeof(Scalar)); + return false; + } + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType dest, EvalSubExprsCallback done) { + // TODO(ezhulenev): ThreadPoolDevice memcpy is blockign operation. + done(evalSubExprsIfNeeded(dest)); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + eigen_assert(m_data != NULL); + return m_data[index]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) { + eigen_assert(m_data != NULL); + return m_data[index]; + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketReturnType packet(Index index) const + { + return internal::ploadt(m_data + index); + } + + // Return a packet starting at `index` where `umask` specifies which elements + // have to be loaded. Type/size of mask depends on PacketReturnType, e.g. for + // Packet16f, `umask` is of type uint16_t and if a bit is 1, corresponding + // float element will be loaded, otherwise 0 will be loaded. + // Function has been templatized to enable Sfinae. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename internal::enable_if::masked_load_available, PacketReturnTypeT>::type + partialPacket(Index index, typename internal::unpacket_traits::mask_t umask) const + { + return internal::ploadu(m_data + index, umask); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + return internal::pstoret(m_data + index, x); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array& coords) const { + eigen_assert(m_data != NULL); + if (static_cast(Layout) == static_cast(ColMajor)) { + return m_data[m_dims.IndexOfColMajor(coords)]; + } else { + return m_data[m_dims.IndexOfRowMajor(coords)]; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& + coeffRef(const array& coords) { + eigen_assert(m_data != NULL); + if (static_cast(Layout) == static_cast(ColMajor)) { + return m_data[m_dims.IndexOfColMajor(coords)]; + } else { + return m_data[m_dims.IndexOfRowMajor(coords)]; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, + PacketType::size); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return internal::TensorBlockResourceRequirements::any(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + assert(m_data != NULL); + return TensorBlock::materialize(m_data, m_dims, desc, scratch); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock( + const TensorBlockDesc& desc, const TensorBlock& block) { + assert(m_data != NULL); + + typedef typename TensorBlock::XprType TensorBlockExpr; + typedef internal::TensorBlockAssignment + TensorBlockAssign; + + TensorBlockAssign::Run( + TensorBlockAssign::target(desc.dimensions(), + internal::strides(m_dims), m_data, + desc.offset()), + block.expr()); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_data; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_data.bind(cgh); + } +#endif + protected: + EvaluatorPointerType m_data; + Dimensions m_dims; + const Device EIGEN_DEVICE_REF m_device; +}; + +namespace { +template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T loadConstant(const T* address) { + return *address; +} +// Use the texture cache on CUDA devices whenever possible +#if defined(EIGEN_CUDA_ARCH) && EIGEN_CUDA_ARCH >= 350 +template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +float loadConstant(const float* address) { + return __ldg(address); +} +template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +double loadConstant(const double* address) { + return __ldg(address); +} +template <> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +Eigen::half loadConstant(const Eigen::half* address) { + return Eigen::half(half_impl::raw_uint16_to_half(__ldg(&address->x))); +} +#endif +#ifdef EIGEN_USE_SYCL +// overload of load constant should be implemented here based on range access +template +T &loadConstant(const Eigen::TensorSycl::internal::RangeAccess &address) { + return *address; +} +#endif +} + + +// Default evaluator for rvalues +template +struct TensorEvaluator +{ + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename Derived::Dimensions Dimensions; + typedef const Derived XprType; + typedef typename internal::traits::template MakePointer::Type TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + typedef typename internal::remove_const::type ScalarNoConst; + + // NumDimensions is -1 for variable dim tensors + static const int NumCoords = internal::traits::NumDimensions > 0 ? + internal::traits::NumDimensions : 0; + static const int PacketSize = PacketType::size; + + enum { + IsAligned = Derived::IsAligned, + PacketAccess = (PacketType::size > 1), + BlockAccess = internal::is_arithmetic::value, + PreferBlockAccess = false, + Layout = Derived::Layout, + CoordAccess = NumCoords > 0, + RawAccess = true + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const Derived& m, const Device& device) + : m_data(device.get(m.data())), m_dims(m.dimensions()), m_device(device) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dims; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + if (!NumTraits::type>::RequireInitialization && data) { + m_device.memcpy((void*)(m_device.get(data)),m_device.get(m_data), m_dims.TotalSize() * sizeof(Scalar)); + return false; + } + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType dest, EvalSubExprsCallback done) { + // TODO(ezhulenev): ThreadPoolDevice memcpy is a blockign operation. + done(evalSubExprsIfNeeded(dest)); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + eigen_assert(m_data != NULL); + return loadConstant(m_data+index); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketReturnType packet(Index index) const + { + return internal::ploadt_ro(m_data + index); + } + + // Return a packet starting at `index` where `umask` specifies which elements + // have to be loaded. Type/size of mask depends on PacketReturnType, e.g. for + // Packet16f, `umask` is of type uint16_t and if a bit is 1, corresponding + // float element will be loaded, otherwise 0 will be loaded. + // Function has been templatized to enable Sfinae. + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + typename internal::enable_if::masked_load_available, PacketReturnTypeT>::type + partialPacket(Index index, typename internal::unpacket_traits::mask_t umask) const + { + return internal::ploadu(m_data + index, umask); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array& coords) const { + eigen_assert(m_data != NULL); + const Index index = (static_cast(Layout) == static_cast(ColMajor)) ? m_dims.IndexOfColMajor(coords) + : m_dims.IndexOfRowMajor(coords); + return loadConstant(m_data+index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, + PacketType::size); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return internal::TensorBlockResourceRequirements::any(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + assert(m_data != NULL); + return TensorBlock::materialize(m_data, m_dims, desc, scratch); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_data; } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_data.bind(cgh); + } +#endif + protected: + EvaluatorPointerType m_data; + Dimensions m_dims; + const Device EIGEN_DEVICE_REF m_device; +}; + + + + +// -------------------- CwiseNullaryOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorCwiseNullaryOp XprType; + + TensorEvaluator(const XprType& op, const Device& device) + : m_functor(op.functor()), m_argImpl(op.nestedExpression(), device), m_wrapper() + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = true, + PacketAccess = internal::functor_traits::PacketAccess + #ifdef EIGEN_USE_SYCL + && (PacketType::size >1) + #endif + , + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { return true; } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + done(true); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_wrapper(m_functor, index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_wrapper.template packetOp(m_functor, index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, + PacketType::size); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_argImpl.bind(cgh); + } +#endif + + private: + const NullaryOp m_functor; + TensorEvaluator m_argImpl; + const internal::nullary_wrapper m_wrapper; +}; + + + +// -------------------- CwiseUnaryOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorCwiseUnaryOp XprType; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = int(TensorEvaluator::PacketAccess) & + int(internal::functor_traits::PacketAccess), + BlockAccess = TensorEvaluator::BlockAccess, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + TensorEvaluator(const XprType& op, const Device& device) + : m_device(device), + m_functor(op.functor()), + m_argImpl(op.nestedExpression(), device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::remove_const::type ScalarNoConst; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + static const int NumDims = internal::array_size::value; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + ArgTensorBlock; + + typedef internal::TensorCwiseUnaryBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_argImpl.dimensions(); } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_argImpl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_argImpl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_argImpl.cleanup(); + } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_functor(m_argImpl.coeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_functor.packetOp(m_argImpl.template packet(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + const double functor_cost = internal::functor_traits::Cost; + return m_argImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + static const double functor_cost = internal::functor_traits::Cost; + return m_argImpl.getResourceRequirements().addCostPerCoeff( + {0, 0, functor_cost / PacketSize}); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + return TensorBlock(m_argImpl.block(desc, scratch), m_functor); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const{ + m_argImpl.bind(cgh); + } +#endif + + + private: + const Device EIGEN_DEVICE_REF m_device; + const UnaryOp m_functor; + TensorEvaluator m_argImpl; +}; + + +// -------------------- CwiseBinaryOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorCwiseBinaryOp XprType; + + enum { + IsAligned = int(TensorEvaluator::IsAligned) & + int(TensorEvaluator::IsAligned), + PacketAccess = int(TensorEvaluator::PacketAccess) & + int(TensorEvaluator::PacketAccess) & + int(internal::functor_traits::PacketAccess), + BlockAccess = int(TensorEvaluator::BlockAccess) & + int(TensorEvaluator::BlockAccess), + PreferBlockAccess = int(TensorEvaluator::PreferBlockAccess) | + int(TensorEvaluator::PreferBlockAccess), + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + TensorEvaluator(const XprType& op, const Device& device) + : m_device(device), + m_functor(op.functor()), + m_leftImpl(op.lhsExpression(), device), + m_rightImpl(op.rhsExpression(), device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout) || internal::traits::NumDimensions <= 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions())); + } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + static const int NumDims = internal::array_size< + typename TensorEvaluator::Dimensions>::value; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + LeftTensorBlock; + typedef typename TensorEvaluator::TensorBlock + RightTensorBlock; + + typedef internal::TensorCwiseBinaryBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const + { + // TODO: use right impl instead if right impl dimensions are known at compile time. + return m_leftImpl.dimensions(); + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_leftImpl.evalSubExprsIfNeeded(NULL); + m_rightImpl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + // TODO(ezhulenev): Evaluate two expression in parallel? + m_leftImpl.evalSubExprsIfNeededAsync(nullptr, [this, done](bool) { + m_rightImpl.evalSubExprsIfNeededAsync(nullptr, + [done](bool) { done(true); }); + }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_leftImpl.cleanup(); + m_rightImpl.cleanup(); + } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_functor(m_leftImpl.coeff(index), m_rightImpl.coeff(index)); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_functor.packetOp(m_leftImpl.template packet(index), m_rightImpl.template packet(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double functor_cost = internal::functor_traits::Cost; + return m_leftImpl.costPerCoeff(vectorized) + + m_rightImpl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + static const double functor_cost = internal::functor_traits::Cost; + return internal::TensorBlockResourceRequirements::merge( + m_leftImpl.getResourceRequirements(), + m_rightImpl.getResourceRequirements()) + .addCostPerCoeff({0, 0, functor_cost / PacketSize}); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + desc.DropDestinationBuffer(); + return TensorBlock(m_leftImpl.block(desc, scratch), + m_rightImpl.block(desc, scratch), m_functor); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + #ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_leftImpl.bind(cgh); + m_rightImpl.bind(cgh); + } + #endif + private: + const Device EIGEN_DEVICE_REF m_device; + const BinaryOp m_functor; + TensorEvaluator m_leftImpl; + TensorEvaluator m_rightImpl; +}; + +// -------------------- CwiseTernaryOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorCwiseTernaryOp XprType; + + enum { + IsAligned = TensorEvaluator::IsAligned & TensorEvaluator::IsAligned & TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess && + TensorEvaluator::PacketAccess && + TensorEvaluator::PacketAccess && + internal::functor_traits::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess || + TensorEvaluator::PreferBlockAccess || + TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + TensorEvaluator(const XprType& op, const Device& device) + : m_functor(op.functor()), + m_arg1Impl(op.arg1Expression(), device), + m_arg2Impl(op.arg2Expression(), device), + m_arg3Impl(op.arg3Expression(), device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout) || internal::traits::NumDimensions <= 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + + EIGEN_STATIC_ASSERT((internal::is_same::StorageKind, + typename internal::traits::StorageKind>::value), + STORAGE_KIND_MUST_MATCH) + EIGEN_STATIC_ASSERT((internal::is_same::StorageKind, + typename internal::traits::StorageKind>::value), + STORAGE_KIND_MUST_MATCH) + EIGEN_STATIC_ASSERT((internal::is_same::Index, + typename internal::traits::Index>::value), + STORAGE_INDEX_MUST_MATCH) + EIGEN_STATIC_ASSERT((internal::is_same::Index, + typename internal::traits::Index>::value), + STORAGE_INDEX_MUST_MATCH) + + eigen_assert(dimensions_match(m_arg1Impl.dimensions(), m_arg2Impl.dimensions()) && dimensions_match(m_arg1Impl.dimensions(), m_arg3Impl.dimensions())); + } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const + { + // TODO: use arg2 or arg3 dimensions if they are known at compile time. + return m_arg1Impl.dimensions(); + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_arg1Impl.evalSubExprsIfNeeded(NULL); + m_arg2Impl.evalSubExprsIfNeeded(NULL); + m_arg3Impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + m_arg1Impl.cleanup(); + m_arg2Impl.cleanup(); + m_arg3Impl.cleanup(); + } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_functor(m_arg1Impl.coeff(index), m_arg2Impl.coeff(index), m_arg3Impl.coeff(index)); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_functor.packetOp(m_arg1Impl.template packet(index), + m_arg2Impl.template packet(index), + m_arg3Impl.template packet(index)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double functor_cost = internal::functor_traits::Cost; + return m_arg1Impl.costPerCoeff(vectorized) + + m_arg2Impl.costPerCoeff(vectorized) + + m_arg3Impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, functor_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_arg1Impl.bind(cgh); + m_arg2Impl.bind(cgh); + m_arg3Impl.bind(cgh); + } +#endif + + private: + const TernaryOp m_functor; + TensorEvaluator m_arg1Impl; + TensorEvaluator m_arg2Impl; + TensorEvaluator m_arg3Impl; +}; + + +// -------------------- SelectOp -------------------- + +template +struct TensorEvaluator, Device> +{ + typedef TensorSelectOp XprType; + typedef typename XprType::Scalar Scalar; + + enum { + IsAligned = TensorEvaluator::IsAligned & + TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess & + TensorEvaluator::PacketAccess & + PacketType::HasBlend, + BlockAccess = TensorEvaluator::BlockAccess && + TensorEvaluator::BlockAccess && + TensorEvaluator::BlockAccess, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess || + TensorEvaluator::PreferBlockAccess || + TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + TensorEvaluator(const XprType& op, const Device& device) + : m_condImpl(op.ifExpression(), device), + m_thenImpl(op.thenExpression(), device), + m_elseImpl(op.elseExpression(), device) + { + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((static_cast(TensorEvaluator::Layout) == static_cast(TensorEvaluator::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(dimensions_match(m_condImpl.dimensions(), m_thenImpl.dimensions())); + eigen_assert(dimensions_match(m_thenImpl.dimensions(), m_elseImpl.dimensions())); + } + + typedef typename XprType::Index Index; + typedef typename internal::traits::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + static const int NumDims = internal::array_size::value; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + IfArgTensorBlock; + typedef typename TensorEvaluator::TensorBlock + ThenArgTensorBlock; + typedef typename TensorEvaluator::TensorBlock + ElseArgTensorBlock; + + struct TensorSelectOpBlockFactory { + template + struct XprType { + typedef TensorSelectOp type; + }; + + template + typename XprType::type expr( + const IfArgXprType& if_expr, const ThenArgXprType& then_expr, const ElseArgXprType& else_expr) const { + return typename XprType::type(if_expr, then_expr, else_expr); + } + }; + + typedef internal::TensorTernaryExprBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const + { + // TODO: use then or else impl instead if they happen to be known at compile time. + return m_condImpl.dimensions(); + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_condImpl.evalSubExprsIfNeeded(NULL); + m_thenImpl.evalSubExprsIfNeeded(NULL); + m_elseImpl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_condImpl.evalSubExprsIfNeeded(nullptr, [this, done](bool) { + m_thenImpl.evalSubExprsIfNeeded(nullptr, [this, done](bool) { + m_elseImpl.evalSubExprsIfNeeded(nullptr, [done](bool) { done(true); }); + }); + }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_condImpl.cleanup(); + m_thenImpl.cleanup(); + m_elseImpl.cleanup(); + } + + EIGEN_DEVICE_FUNC CoeffReturnType coeff(Index index) const + { + return m_condImpl.coeff(index) ? m_thenImpl.coeff(index) : m_elseImpl.coeff(index); + } + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const + { + internal::Selector select; + EIGEN_UNROLL_LOOP + for (Index i = 0; i < PacketSize; ++i) { + select.select[i] = m_condImpl.coeff(index+i); + } + return internal::pblend(select, + m_thenImpl.template packet(index), + m_elseImpl.template packet(index)); + + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + return m_condImpl.costPerCoeff(vectorized) + + m_thenImpl.costPerCoeff(vectorized) + .cwiseMax(m_elseImpl.costPerCoeff(vectorized)); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + auto then_req = m_thenImpl.getResourceRequirements(); + auto else_req = m_elseImpl.getResourceRequirements(); + + auto merged_req = + internal::TensorBlockResourceRequirements::merge(then_req, else_req); + merged_req.cost_per_coeff = + then_req.cost_per_coeff.cwiseMax(else_req.cost_per_coeff); + + return internal::TensorBlockResourceRequirements::merge( + m_condImpl.getResourceRequirements(), merged_req); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + // It's unsafe to pass destination buffer to underlying expressions, because + // output might be aliased with one of the inputs. + desc.DropDestinationBuffer(); + + return TensorBlock( + m_condImpl.block(desc, scratch), m_thenImpl.block(desc, scratch), + m_elseImpl.block(desc, scratch), TensorSelectOpBlockFactory()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_condImpl.bind(cgh); + m_thenImpl.bind(cgh); + m_elseImpl.bind(cgh); + } +#endif + private: + TensorEvaluator m_condImpl; + TensorEvaluator m_thenImpl; + TensorEvaluator m_elseImpl; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EVALUATOR_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h new file mode 100644 index 0000000..c52fb77 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h @@ -0,0 +1,703 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H + +namespace Eigen { + +/** + * \class TensorExecutor + * \ingroup CXX11_Tensor_Module + * + * \brief The tensor executor class. + * + * This class is responsible for launch the evaluation of the expression on + * the specified computing device. + * + * @tparam Vectorizable can use packet math (SSE/AVX/etc... registers and + * instructions) + * @tparam Tiling can use block based tensor evaluation + * (see TensorBlock.h) + */ +namespace internal { + +/** + * Evaluating TensorBroadcastingOp via coefficient of packet path is extremely + * expensive. If expression has at least one broadcast op in it, and it supports + * block based evaluation, we always prefer it, even for the small tensors. For + * all other tileable ops, block evaluation overhead for small tensors (fits + * into L1) is too large, and we fallback on vectorized evaluation. + */ + +// TODO(ezhulenev): Add specializations for all other types of Tensor ops. + +template +struct ExpressionHasTensorBroadcastingOp { + enum { value = false }; +}; + +template +struct ExpressionHasTensorBroadcastingOp< + const TensorAssignOp > { + enum { value = ExpressionHasTensorBroadcastingOp::value }; +}; + +template +struct ExpressionHasTensorBroadcastingOp< + const TensorCwiseUnaryOp > { + enum { value = ExpressionHasTensorBroadcastingOp::value }; +}; + +template +struct ExpressionHasTensorBroadcastingOp< + const TensorCwiseBinaryOp > { + enum { + value = ExpressionHasTensorBroadcastingOp::value || + ExpressionHasTensorBroadcastingOp::value + }; +}; + +template +struct ExpressionHasTensorBroadcastingOp< + const TensorBroadcastingOp > { + enum { value = true }; +}; + +// -------------------------------------------------------------------------- // + +/** + * Default strategy: the expression is evaluated sequentially with a single cpu + * thread, without vectorization and block evaluation. + */ +template +class TensorExecutor { + public: + typedef typename Expression::Index StorageIndex; + + // Including `unsupported/Eigen/CXX11/Tensor` in different translation units + // with/without `EIGEN_USE_THREADS` or `EIGEN_USE_GPU` is a potential ODR + // violation. If this template is instantiated with a non-default device, it + // means that this header file was included without defining + // `EIGEN_USE_THREADS`, `EIGEN_USE_GPU` or `EIGEN_USE_SYCL`. + static_assert(std::is_same::value, + "Default executor instantiated with non-default device. " + "You must #define EIGEN_USE_THREADS, EIGEN_USE_GPU or " + "EIGEN_USE_SYCL before including Eigen headers."); + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE void run(const Expression& expr, + const Device& device = Device()) { + TensorEvaluator evaluator(expr, device); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); + if (needs_assign) { + const StorageIndex size = array_prod(evaluator.dimensions()); + for (StorageIndex i = 0; i < size; ++i) { + evaluator.evalScalar(i); + } + } + evaluator.cleanup(); + } +}; + +/** + * Default async execution strategy is not implemented. Currently it's only + * available for ThreadPoolDevice (see definition below). + */ +template +class TensorAsyncExecutor {}; + +/** + * Process all the data with a single cpu thread, using vectorized instructions. + */ +template +class TensorExecutor { + public: + typedef typename Expression::Index StorageIndex; + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE void run( + const Expression& expr, const DefaultDevice& device = DefaultDevice()) { + TensorEvaluator evaluator(expr, device); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); + if (needs_assign) { + const StorageIndex size = array_prod(evaluator.dimensions()); + const int PacketSize = unpacket_traits::PacketReturnType>::size; + + // Give compiler a strong possibility to unroll the loop. But don't insist + // on unrolling, because if the function is expensive compiler should not + // unroll the loop at the expense of inlining. + const StorageIndex UnrolledSize = + (size / (4 * PacketSize)) * 4 * PacketSize; + for (StorageIndex i = 0; i < UnrolledSize; i += 4 * PacketSize) { + for (StorageIndex j = 0; j < 4; j++) { + evaluator.evalPacket(i + j * PacketSize); + } + } + const StorageIndex VectorizedSize = (size / PacketSize) * PacketSize; + for (StorageIndex i = UnrolledSize; i < VectorizedSize; i += PacketSize) { + evaluator.evalPacket(i); + } + for (StorageIndex i = VectorizedSize; i < size; ++i) { + evaluator.evalScalar(i); + } + } + evaluator.cleanup(); + } +}; + +/** + * Process all the data with a single cpu thread, using blocks of data. By + * sizing a block to fit L1 cache we get better cache performance. + */ +template +class TensorExecutor { + public: + typedef typename traits::Scalar Scalar; + typedef typename remove_const::type ScalarNoConst; + + typedef TensorEvaluator Evaluator; + typedef typename traits::Index StorageIndex; + + static const int NumDims = traits::NumDimensions; + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE void run(const Expression& expr, + const DefaultDevice& device = DefaultDevice()) { + typedef TensorBlockMapper + TensorBlockMapper; + + typedef internal::TensorBlockDescriptor + TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator + TensorBlockScratch; + + Evaluator evaluator(expr, device); + + // TODO(ezhulenev): Do not use tiling for small tensors? + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); + + if (needs_assign) { + // Query expression tree for desired block size/shape. + const TensorBlockResourceRequirements requirements = + evaluator.getResourceRequirements(); + + const TensorBlockMapper block_mapper( + typename TensorBlockDesc::Dimensions(evaluator.dimensions()), + requirements); + + // Share scratch memory allocator between all blocks. + TensorBlockScratch scratch(device); + + const StorageIndex total_block_count = block_mapper.blockCount(); + for (StorageIndex i = 0; i < total_block_count; ++i) { + TensorBlockDesc desc = block_mapper.blockDescriptor(i); + evaluator.evalBlock(desc, scratch); + scratch.reset(); + } + } + evaluator.cleanup(); + } +}; + +/** + * Multicore strategy: the index space is partitioned and each partition is + * executed on a single core. + * + * (1) TensorExecutor will submit work to the ThreadPoolDevice managed thread + * pool, and will block the caller thread until all tasks are finished. + * + * (2) TensorAsyncExecutor is a non-blocking version, that will submit work to + * the ThreadPoolDevice managed thread pool, and will return immediately. + * It will call 'done' callback after all tasks are finished. + */ +#ifdef EIGEN_USE_THREADS + +template +struct TensorExecutorTilingContext { + TensorExecutorTilingContext() = default; + TensorExecutorTilingContext(const TensorBlockMapper& b_mapper, + const TensorOpCost& b_cost, size_t b_aligned_size) + : block_mapper(b_mapper), + cost(b_cost), + aligned_blocksize(b_aligned_size) {} + + TensorBlockMapper block_mapper; // navigate through blocks + TensorOpCost cost; // cost of computing a single block + size_t aligned_blocksize; // block size after memory alignment +}; + +// Computes a block evaluation parameters, and allocates temporary memory buffer +// for blocks. See TensorExecutor/TensorAsyncExecutor (Tiling=On) below. +template +TensorExecutorTilingContext GetTensorExecutorTilingContext( + const Evaluator& evaluator) { + // Query expression tree for desired block size/shape. + TensorBlockResourceRequirements requirements = + evaluator.getResourceRequirements(); + + // Update target block size based on cost model. + double taskSize = TensorCostModel::taskSize( + 1, requirements.cost_per_coeff); + requirements.size = static_cast(1.0 / taskSize); + + TensorBlockMapper block_mapper( + typename TensorBlockMapper::Dimensions(evaluator.dimensions()), + requirements); + + size_t block_size = block_mapper.blockTotalSize(); + const size_t align = numext::maxi(EIGEN_MAX_ALIGN_BYTES, 1); + const size_t aligned_blocksize = + align * + divup(block_size * sizeof(typename Evaluator::Scalar), align); + + return {block_mapper, requirements.cost_per_coeff * block_size, + aligned_blocksize}; +} + +template +struct EvalRange { + static void run(Evaluator* evaluator_in, const StorageIndex firstIdx, + const StorageIndex lastIdx) { + Evaluator evaluator = *evaluator_in; + eigen_assert(lastIdx >= firstIdx); + for (StorageIndex i = firstIdx; i < lastIdx; ++i) { + evaluator.evalScalar(i); + } + } + + static StorageIndex alignBlockSize(StorageIndex size) { return size; } +}; + +template +struct EvalRange { + static const int PacketSize = + unpacket_traits::size; + + static void run(Evaluator* evaluator_in, const StorageIndex firstIdx, + const StorageIndex lastIdx) { + Evaluator evaluator = *evaluator_in; + eigen_assert(lastIdx >= firstIdx); + StorageIndex i = firstIdx; + if (lastIdx - firstIdx >= PacketSize) { + eigen_assert(firstIdx % PacketSize == 0); + StorageIndex last_chunk_offset = lastIdx - 4 * PacketSize; + // Give compiler a strong possibility to unroll the loop. But don't insist + // on unrolling, because if the function is expensive compiler should not + // unroll the loop at the expense of inlining. + for (; i <= last_chunk_offset; i += 4 * PacketSize) { + for (StorageIndex j = 0; j < 4; j++) { + evaluator.evalPacket(i + j * PacketSize); + } + } + last_chunk_offset = lastIdx - PacketSize; + for (; i <= last_chunk_offset; i += PacketSize) { + evaluator.evalPacket(i); + } + } + for (; i < lastIdx; ++i) { + evaluator.evalScalar(i); + } + } + + static StorageIndex alignBlockSize(StorageIndex size) { + // Align block size to packet size and account for unrolling in run above. + if (size >= 16 * PacketSize) { + return (size + 4 * PacketSize - 1) & ~(4 * PacketSize - 1); + } + // Aligning to 4 * PacketSize would increase block size by more than 25%. + return (size + PacketSize - 1) & ~(PacketSize - 1); + } +}; + +template +class TensorExecutor { + public: + typedef typename Expression::Index StorageIndex; + + static EIGEN_STRONG_INLINE void run(const Expression& expr, + const ThreadPoolDevice& device) { + typedef TensorEvaluator Evaluator; + typedef EvalRange EvalRange; + + Evaluator evaluator(expr, device); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr); + if (needs_assign) { + const StorageIndex size = array_prod(evaluator.dimensions()); + device.parallelFor(size, evaluator.costPerCoeff(Vectorizable), + EvalRange::alignBlockSize, + [&evaluator](StorageIndex firstIdx, StorageIndex lastIdx) { + EvalRange::run(&evaluator, firstIdx, lastIdx); + }); + } + evaluator.cleanup(); + } +}; + +template +class TensorExecutor { + public: + typedef typename traits::Index IndexType; + typedef typename traits::Scalar Scalar; + typedef typename remove_const::type ScalarNoConst; + + static const int NumDims = traits::NumDimensions; + + typedef TensorEvaluator Evaluator; + typedef TensorBlockMapper BlockMapper; + typedef TensorExecutorTilingContext TilingContext; + + typedef internal::TensorBlockDescriptor + TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator + TensorBlockScratch; + + static EIGEN_STRONG_INLINE void run(const Expression& expr, + const ThreadPoolDevice& device) { + Evaluator evaluator(expr, device); + + const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr); + if (needs_assign) { + const TilingContext tiling = + internal::GetTensorExecutorTilingContext(evaluator); + + auto eval_block = [&device, &evaluator, &tiling](IndexType firstBlockIdx, + IndexType lastBlockIdx) { + TensorBlockScratch scratch(device); + + for (IndexType block_idx = firstBlockIdx; block_idx < lastBlockIdx; + ++block_idx) { + TensorBlockDesc desc = tiling.block_mapper.blockDescriptor(block_idx); + evaluator.evalBlock(desc, scratch); + scratch.reset(); + } + }; + + // Evaluate small expressions directly as a single block. + if (tiling.block_mapper.blockCount() == 1) { + TensorBlockScratch scratch(device); + TensorBlockDesc desc(0, tiling.block_mapper.blockDimensions()); + evaluator.evalBlock(desc, scratch); + } else { + device.parallelFor(tiling.block_mapper.blockCount(), tiling.cost, + eval_block); + } + } + evaluator.cleanup(); + } +}; + +template +class TensorAsyncExecutor { + public: + typedef typename Expression::Index StorageIndex; + typedef TensorEvaluator Evaluator; + + static EIGEN_STRONG_INLINE void runAsync(const Expression& expr, + const ThreadPoolDevice& device, + DoneCallback done) { + TensorAsyncExecutorContext* const ctx = + new TensorAsyncExecutorContext(expr, device, std::move(done)); + + const auto on_eval_subexprs = [ctx, &device](bool need_assign) -> void { + if (!need_assign) { + delete ctx; + return; + } + + typedef EvalRange EvalRange; + const StorageIndex size = array_prod(ctx->evaluator.dimensions()); + device.parallelForAsync( + size, ctx->evaluator.costPerCoeff(Vectorizable), + EvalRange::alignBlockSize, + [ctx](StorageIndex firstIdx, StorageIndex lastIdx) { + EvalRange::run(&ctx->evaluator, firstIdx, lastIdx); + }, + [ctx]() { delete ctx; }); + }; + + ctx->evaluator.evalSubExprsIfNeededAsync(nullptr, on_eval_subexprs); + } + + private: + struct TensorAsyncExecutorContext { + TensorAsyncExecutorContext(const Expression& expr, + const ThreadPoolDevice& thread_pool, + DoneCallback done) + : evaluator(expr, thread_pool), on_done(std::move(done)) {} + + ~TensorAsyncExecutorContext() { + evaluator.cleanup(); + on_done(); + } + + Evaluator evaluator; + + private: + DoneCallback on_done; + }; +}; + +template +class TensorAsyncExecutor { + public: + typedef typename traits::Index IndexType; + typedef typename traits::Scalar Scalar; + typedef typename remove_const::type ScalarNoConst; + + static const int NumDims = traits::NumDimensions; + + typedef TensorEvaluator Evaluator; + typedef TensorBlockMapper BlockMapper; + typedef TensorExecutorTilingContext TilingContext; + + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator + TensorBlockScratch; + + static EIGEN_STRONG_INLINE void runAsync(const Expression& expr, + const ThreadPoolDevice& device, + DoneCallback done) { + + TensorAsyncExecutorContext* const ctx = + new TensorAsyncExecutorContext(expr, device, std::move(done)); + + const auto on_eval_subexprs = [ctx](bool need_assign) -> void { + if (!need_assign) { + delete ctx; + return; + } + + ctx->tiling = internal::GetTensorExecutorTilingContext< + Evaluator, BlockMapper, Vectorizable>(ctx->evaluator); + + auto eval_block = [ctx](IndexType firstBlockIdx, IndexType lastBlockIdx) { + TensorBlockScratch scratch(ctx->device); + + for (IndexType block_idx = firstBlockIdx; block_idx < lastBlockIdx; + ++block_idx) { + TensorBlockDesc desc = + ctx->tiling.block_mapper.blockDescriptor(block_idx); + ctx->evaluator.evalBlock(desc, scratch); + scratch.reset(); + } + }; + + // Evaluate small expressions directly as a single block. + if (ctx->tiling.block_mapper.blockCount() == 1) { + TensorBlockScratch scratch(ctx->device); + TensorBlockDesc desc(0, ctx->tiling.block_mapper.blockDimensions()); + ctx->evaluator.evalBlock(desc, scratch); + delete ctx; + } else { + ctx->device.parallelForAsync(ctx->tiling.block_mapper.blockCount(), + ctx->tiling.cost, eval_block, + [ctx]() { delete ctx; }); + } + }; + + ctx->evaluator.evalSubExprsIfNeededAsync(nullptr, on_eval_subexprs); + } + + private: + struct TensorAsyncExecutorContext { + TensorAsyncExecutorContext(const Expression& expr, + const ThreadPoolDevice& thread_pool, + DoneCallback done) + : device(thread_pool), + evaluator(expr, thread_pool), + on_done(std::move(done)) {} + + ~TensorAsyncExecutorContext() { + evaluator.cleanup(); + on_done(); + } + + const ThreadPoolDevice& device; + Evaluator evaluator; + TilingContext tiling; + + private: + DoneCallback on_done; + }; +}; + +#endif // EIGEN_USE_THREADS + +// GPU: the evaluation of the expression is offloaded to a GPU. +#if defined(EIGEN_USE_GPU) + +template +class TensorExecutor { + public: + typedef typename Expression::Index StorageIndex; + static void run(const Expression& expr, const GpuDevice& device); +}; + +#if defined(EIGEN_GPUCC) +template +struct EigenMetaKernelEval { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + void run(Evaluator& eval, StorageIndex firstIdx, StorageIndex lastIdx, StorageIndex step_size) { + for (StorageIndex i = firstIdx; i < lastIdx; i += step_size) { + eval.evalScalar(i); + } + } +}; + +template +struct EigenMetaKernelEval { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + void run(Evaluator& eval, StorageIndex firstIdx, StorageIndex lastIdx, StorageIndex step_size) { + const StorageIndex PacketSize = unpacket_traits::size; + const StorageIndex vectorized_size = (lastIdx / PacketSize) * PacketSize; + const StorageIndex vectorized_step_size = step_size * PacketSize; + + // Use the vector path + for (StorageIndex i = firstIdx * PacketSize; i < vectorized_size; + i += vectorized_step_size) { + eval.evalPacket(i); + } + for (StorageIndex i = vectorized_size + firstIdx; i < lastIdx; i += step_size) { + eval.evalScalar(i); + } + } +}; + +template +__global__ void +__launch_bounds__(1024) +EigenMetaKernel(Evaluator eval, StorageIndex size) { + + const StorageIndex first_index = blockIdx.x * blockDim.x + threadIdx.x; + const StorageIndex step_size = blockDim.x * gridDim.x; + + const bool vectorizable = Evaluator::PacketAccess & Evaluator::IsAligned; + EigenMetaKernelEval::run(eval, first_index, size, step_size); +} + +/*static*/ +template +EIGEN_STRONG_INLINE void TensorExecutor::run( + const Expression& expr, const GpuDevice& device) { + TensorEvaluator evaluator(expr, device); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(nullptr); + if (needs_assign) { + + const int block_size = device.maxGpuThreadsPerBlock(); + const int max_blocks = device.getNumGpuMultiProcessors() * + device.maxGpuThreadsPerMultiProcessor() / block_size; + const StorageIndex size = array_prod(evaluator.dimensions()); + // Create a least one block to ensure we won't crash when tensorflow calls with tensors of size 0. + const int num_blocks = numext::maxi(numext::mini(max_blocks, divup(size, block_size)), 1); + + LAUNCH_GPU_KERNEL( + (EigenMetaKernel, StorageIndex>), + num_blocks, block_size, 0, device, evaluator, size); + } + evaluator.cleanup(); +} + +#endif // EIGEN_GPUCC +#endif // EIGEN_USE_GPU + +// SYCL Executor policy +#ifdef EIGEN_USE_SYCL + +template +struct ExecExprFunctorKernel { + typedef typename Evaluator::Index Index; + Evaluator evaluator; + const Index range; + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE ExecExprFunctorKernel( + const Scratch, Evaluator evaluator_, const Index range_) + : evaluator(evaluator_), range(range_) {} + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void operator()( + cl::sycl::nd_item<1> itemID) { + compute(itemID); + } + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename std::enable_if::type + compute(const cl::sycl::nd_item<1>& itemID) { + Index gId = static_cast(itemID.get_global_linear_id()); + Index total_threads = itemID.get_global_range(0); + + for (Index i = gId; i < range; i += total_threads) { + evaluator.evalScalar(i); + } + } + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename std::enable_if::type + compute(const cl::sycl::nd_item<1>& itemID) { + const Index vectorizedRange = + (range / Evaluator::PacketSize) * Evaluator::PacketSize; + Index gId = static_cast(itemID.get_global_linear_id()); + const Index step = Evaluator::PacketSize * itemID.get_global_range(0); + const Index start = Evaluator::PacketSize * gId; + for (Index i = start; i < vectorizedRange; i += step) { + evaluator.evalPacket(i); + } + gId += vectorizedRange; + for (Index i = gId; i < range; i += itemID.get_global_range(0)) { + evaluator.evalScalar(i); + } + } +}; + +template +class TensorExecutor { + public: + typedef typename Expression::Index Index; + static EIGEN_STRONG_INLINE void run(const Expression& expr, + const Eigen::SyclDevice& dev) { + typedef Eigen::TensorEvaluator Evaluator; + Evaluator evaluator(expr, dev); + const bool needs_assign = evaluator.evalSubExprsIfNeeded(NULL); + if (needs_assign) { + Index range, GRange, tileSize; + Index total_size = ::Eigen::internal::array_prod(evaluator.dimensions()); + total_size = (total_size == 0) ? 1 : total_size; + const int PacketSize = + Eigen::PacketType::size; + Index vectorizable_threads = static_cast(total_size / PacketSize); + dev.parallel_for_setup(vectorizable_threads, tileSize, range, GRange); + range = total_size; + + dev.template nullary_kernel_launcher< + typename Evaluator::CoeffReturnType, + ExecExprFunctorKernel >( + evaluator, + cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), + cl::sycl::range<1>(tileSize)), + Index(1), range); + } + evaluator.cleanup(); + } +}; + +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EXECUTOR_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h new file mode 100644 index 0000000..c9bccfc --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h @@ -0,0 +1,388 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_EXPR_H +#define EIGEN_CXX11_TENSOR_TENSOR_EXPR_H + +namespace Eigen { + +/** \class TensorExpr + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor expression classes. + * + * The TensorCwiseNullaryOp class applies a nullary operators to an expression. + * This is typically used to generate constants. + * + * The TensorCwiseUnaryOp class represents an expression where a unary operator + * (e.g. cwiseSqrt) is applied to an expression. + * + * The TensorCwiseBinaryOp class represents an expression where a binary + * operator (e.g. addition) is applied to a lhs and a rhs expression. + * + */ +namespace internal { +template +struct traits > + : traits +{ + typedef traits XprTraits; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::Nested XprTypeNested; + typedef typename remove_reference::type _XprTypeNested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; + enum { + Flags = 0 + }; +}; + +} // end namespace internal + + + +template +class TensorCwiseNullaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef TensorCwiseNullaryOp Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseNullaryOp(const XprType& xpr, const NullaryOp& func = NullaryOp()) + : m_xpr(xpr), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + nestedExpression() const { return m_xpr; } + + EIGEN_DEVICE_FUNC + const NullaryOp& functor() const { return m_functor; } + + protected: + typename XprType::Nested m_xpr; + const NullaryOp m_functor; +}; + + + +namespace internal { +template +struct traits > + : traits +{ + // TODO(phli): Add InputScalar, InputPacket. Check references to + // current Scalar/Packet to see if the intent is Input or Output. + typedef typename result_of::type Scalar; + typedef traits XprTraits; + typedef typename XprType::Nested XprTypeNested; + typedef typename remove_reference::type _XprTypeNested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename TypeConversion::type + PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCwiseUnaryOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorCwiseUnaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCwiseUnaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + // TODO(phli): Add InputScalar, InputPacket. Check references to + // current Scalar/Packet to see if the intent is Input or Output. + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef Scalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) + : m_xpr(xpr), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const UnaryOp& functor() const { return m_functor; } + + /** \returns the nested expression */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + nestedExpression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const UnaryOp m_functor; +}; + + +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs + // are different. + // TODO(phli): Add Lhs/RhsScalar, Lhs/RhsPacket. Check references to + // current Scalar/Packet to see if the intent is Inputs or Output. + typedef typename result_of< + BinaryOp(typename LhsXprType::Scalar, + typename RhsXprType::Scalar)>::type Scalar; + typedef traits XprTraits; + typedef typename promote_storage_type< + typename traits::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type< + typename traits::Index, + typename traits::Index>::type Index; + typedef typename LhsXprType::Nested LhsNested; + typedef typename RhsXprType::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename TypeConversion::val, + typename traits::PointerType, + typename traits::PointerType>::type + >::type + PointerType; + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCwiseBinaryOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorCwiseBinaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCwiseBinaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + // TODO(phli): Add Lhs/RhsScalar, Lhs/RhsPacket. Check references to + // current Scalar/Packet to see if the intent is Inputs or Output. + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef Scalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseBinaryOp(const LhsXprType& lhs, const RhsXprType& rhs, const BinaryOp& func = BinaryOp()) + : m_lhs_xpr(lhs), m_rhs_xpr(rhs), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const BinaryOp& functor() const { return m_functor; } + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + lhsExpression() const { return m_lhs_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + rhsExpression() const { return m_rhs_xpr; } + + protected: + typename LhsXprType::Nested m_lhs_xpr; + typename RhsXprType::Nested m_rhs_xpr; + const BinaryOp m_functor; +}; + + +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the args are different. + typedef typename result_of< + TernaryOp(typename Arg1XprType::Scalar, + typename Arg2XprType::Scalar, + typename Arg3XprType::Scalar)>::type Scalar; + typedef traits XprTraits; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::Index Index; + typedef typename Arg1XprType::Nested Arg1Nested; + typedef typename Arg2XprType::Nested Arg2Nested; + typedef typename Arg3XprType::Nested Arg3Nested; + typedef typename remove_reference::type _Arg1Nested; + typedef typename remove_reference::type _Arg2Nested; + typedef typename remove_reference::type _Arg3Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename TypeConversion::val, + typename traits::PointerType, + typename traits::PointerType>::type + >::type + PointerType; + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorCwiseTernaryOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorCwiseTernaryOp type; +}; + +} // end namespace internal + + + +template +class TensorCwiseTernaryOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef Scalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorCwiseTernaryOp(const Arg1XprType& arg1, const Arg2XprType& arg2, const Arg3XprType& arg3, const TernaryOp& func = TernaryOp()) + : m_arg1_xpr(arg1), m_arg2_xpr(arg2), m_arg3_xpr(arg3), m_functor(func) {} + + EIGEN_DEVICE_FUNC + const TernaryOp& functor() const { return m_functor; } + + /** \returns the nested expressions */ + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + arg1Expression() const { return m_arg1_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + arg2Expression() const { return m_arg2_xpr; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + arg3Expression() const { return m_arg3_xpr; } + + protected: + typename Arg1XprType::Nested m_arg1_xpr; + typename Arg2XprType::Nested m_arg2_xpr; + typename Arg3XprType::Nested m_arg3_xpr; + const TernaryOp m_functor; +}; + + +namespace internal { +template +struct traits > + : traits +{ + typedef typename traits::Scalar Scalar; + typedef traits XprTraits; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename IfXprType::Nested IfNested; + typedef typename ThenXprType::Nested ThenNested; + typedef typename ElseXprType::Nested ElseNested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename conditional::val, + typename traits::PointerType, + typename traits::PointerType>::type PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorSelectOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorSelectOp type; +}; + +} // end namespace internal + + +template +class TensorSelectOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::promote_storage_type::ret CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC + TensorSelectOp(const IfXprType& a_condition, + const ThenXprType& a_then, + const ElseXprType& a_else) + : m_condition(a_condition), m_then(a_then), m_else(a_else) + { } + + EIGEN_DEVICE_FUNC + const IfXprType& ifExpression() const { return m_condition; } + + EIGEN_DEVICE_FUNC + const ThenXprType& thenExpression() const { return m_then; } + + EIGEN_DEVICE_FUNC + const ElseXprType& elseExpression() const { return m_else; } + + protected: + typename IfXprType::Nested m_condition; + typename ThenXprType::Nested m_then; + typename ElseXprType::Nested m_else; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_EXPR_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h new file mode 100644 index 0000000..4a1a068 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h @@ -0,0 +1,669 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Jianwei Cui +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FFT_H +#define EIGEN_CXX11_TENSOR_TENSOR_FFT_H + +namespace Eigen { + +/** \class TensorFFT + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor FFT class. + * + * TODO: + * Vectorize the Cooley Tukey and the Bluestein algorithm + * Add support for multithreaded evaluation + * Improve the performance on GPU + */ + +template struct MakeComplex { + template + EIGEN_DEVICE_FUNC + T operator() (const T& val) const { return val; } +}; + +template <> struct MakeComplex { + template + EIGEN_DEVICE_FUNC + std::complex operator() (const T& val) const { return std::complex(val, 0); } +}; + +template <> struct MakeComplex { + template + EIGEN_DEVICE_FUNC + std::complex operator() (const std::complex& val) const { return val; } +}; + +template struct PartOf { + template T operator() (const T& val) const { return val; } +}; + +template <> struct PartOf { + template T operator() (const std::complex& val) const { return val.real(); } +}; + +template <> struct PartOf { + template T operator() (const std::complex& val) const { return val.imag(); } +}; + +namespace internal { +template +struct traits > : public traits { + typedef traits XprTraits; + typedef typename NumTraits::Real RealScalar; + typedef typename std::complex ComplexScalar; + typedef typename XprTraits::Scalar InputScalar; + typedef typename conditional::type OutputScalar; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename traits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> { + typedef const TensorFFTOp& type; +}; + +template +struct nested, 1, typename eval >::type> { + typedef TensorFFTOp type; +}; + +} // end namespace internal + +template +class TensorFFTOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename std::complex ComplexScalar; + typedef typename internal::conditional::type OutputScalar; + typedef OutputScalar CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFFTOp(const XprType& expr, const FFT& fft) + : m_xpr(expr), m_fft(fft) {} + + EIGEN_DEVICE_FUNC + const FFT& fft() const { return m_fft; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& expression() const { + return m_xpr; + } + + protected: + typename XprType::Nested m_xpr; + const FFT m_fft; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> { + typedef TensorFFTOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename std::complex ComplexScalar; + typedef typename TensorEvaluator::Dimensions InputDimensions; + typedef internal::traits XprTraits; + typedef typename XprTraits::Scalar InputScalar; + typedef typename internal::conditional::type OutputScalar; + typedef OutputScalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = true, + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : m_fft(op.fft()), m_impl(op.expression(), device), m_data(NULL), m_device(device) { + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + for (int i = 0; i < NumDims; ++i) { + eigen_assert(input_dims[i] > 0); + m_dimensions[i] = input_dims[i]; + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1]; + } + } else { + m_strides[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1]; + } + } + m_size = m_dimensions.TotalSize(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { + return m_dimensions; + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + m_impl.evalSubExprsIfNeeded(NULL); + if (data) { + evalToBuf(data); + return false; + } else { + m_data = (EvaluatorPointerType)m_device.get((CoeffReturnType*)(m_device.allocate_temp(sizeof(CoeffReturnType) * m_size))); + evalToBuf(m_data); + return true; + } + } + + EIGEN_STRONG_INLINE void cleanup() { + if (m_data) { + m_device.deallocate(m_data); + m_data = NULL; + } + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const { + return m_data[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType + packet(Index index) const { + return internal::ploadt(m_data + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_data; } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_data.bind(cgh); + } +#endif + + private: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalToBuf(EvaluatorPointerType data) { + const bool write_to_out = internal::is_same::value; + ComplexScalar* buf = write_to_out ? (ComplexScalar*)data : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * m_size); + + for (Index i = 0; i < m_size; ++i) { + buf[i] = MakeComplex::value>()(m_impl.coeff(i)); + } + + for (size_t i = 0; i < m_fft.size(); ++i) { + Index dim = m_fft[i]; + eigen_assert(dim >= 0 && dim < NumDims); + Index line_len = m_dimensions[dim]; + eigen_assert(line_len >= 1); + ComplexScalar* line_buf = (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * line_len); + const bool is_power_of_two = isPowerOfTwo(line_len); + const Index good_composite = is_power_of_two ? 0 : findGoodComposite(line_len); + const Index log_len = is_power_of_two ? getLog2(line_len) : getLog2(good_composite); + + ComplexScalar* a = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * good_composite); + ComplexScalar* b = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * good_composite); + ComplexScalar* pos_j_base_powered = is_power_of_two ? NULL : (ComplexScalar*)m_device.allocate(sizeof(ComplexScalar) * (line_len + 1)); + if (!is_power_of_two) { + // Compute twiddle factors + // t_n = exp(sqrt(-1) * pi * n^2 / line_len) + // for n = 0, 1,..., line_len-1. + // For n > 2 we use the recurrence t_n = t_{n-1}^2 / t_{n-2} * t_1^2 + + // The recurrence is correct in exact arithmetic, but causes + // numerical issues for large transforms, especially in + // single-precision floating point. + // + // pos_j_base_powered[0] = ComplexScalar(1, 0); + // if (line_len > 1) { + // const ComplexScalar pos_j_base = ComplexScalar( + // numext::cos(M_PI / line_len), numext::sin(M_PI / line_len)); + // pos_j_base_powered[1] = pos_j_base; + // if (line_len > 2) { + // const ComplexScalar pos_j_base_sq = pos_j_base * pos_j_base; + // for (int i = 2; i < line_len + 1; ++i) { + // pos_j_base_powered[i] = pos_j_base_powered[i - 1] * + // pos_j_base_powered[i - 1] / + // pos_j_base_powered[i - 2] * + // pos_j_base_sq; + // } + // } + // } + // TODO(rmlarsen): Find a way to use Eigen's vectorized sin + // and cosine functions here. + for (int j = 0; j < line_len + 1; ++j) { + double arg = ((EIGEN_PI * j) * j) / line_len; + std::complex tmp(numext::cos(arg), numext::sin(arg)); + pos_j_base_powered[j] = static_cast(tmp); + } + } + + for (Index partial_index = 0; partial_index < m_size / line_len; ++partial_index) { + const Index base_offset = getBaseOffsetFromIndex(partial_index, dim); + + // get data into line_buf + const Index stride = m_strides[dim]; + if (stride == 1) { + m_device.memcpy(line_buf, &buf[base_offset], line_len*sizeof(ComplexScalar)); + } else { + Index offset = base_offset; + for (int j = 0; j < line_len; ++j, offset += stride) { + line_buf[j] = buf[offset]; + } + } + + // process the line + if (is_power_of_two) { + processDataLineCooleyTukey(line_buf, line_len, log_len); + } + else { + processDataLineBluestein(line_buf, line_len, good_composite, log_len, a, b, pos_j_base_powered); + } + + // write back + if (FFTDir == FFT_FORWARD && stride == 1) { + m_device.memcpy(&buf[base_offset], line_buf, line_len*sizeof(ComplexScalar)); + } else { + Index offset = base_offset; + const ComplexScalar div_factor = ComplexScalar(1.0 / line_len, 0); + for (int j = 0; j < line_len; ++j, offset += stride) { + buf[offset] = (FFTDir == FFT_FORWARD) ? line_buf[j] : line_buf[j] * div_factor; + } + } + } + m_device.deallocate(line_buf); + if (!is_power_of_two) { + m_device.deallocate(a); + m_device.deallocate(b); + m_device.deallocate(pos_j_base_powered); + } + } + + if(!write_to_out) { + for (Index i = 0; i < m_size; ++i) { + data[i] = PartOf()(buf[i]); + } + m_device.deallocate(buf); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static bool isPowerOfTwo(Index x) { + eigen_assert(x > 0); + return !(x & (x - 1)); + } + + // The composite number for padding, used in Bluestein's FFT algorithm + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Index findGoodComposite(Index n) { + Index i = 2; + while (i < 2 * n - 1) i *= 2; + return i; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static Index getLog2(Index m) { + Index log2m = 0; + while (m >>= 1) log2m++; + return log2m; + } + + // Call Cooley Tukey algorithm directly, data length must be power of 2 + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void processDataLineCooleyTukey(ComplexScalar* line_buf, Index line_len, Index log_len) { + eigen_assert(isPowerOfTwo(line_len)); + scramble_FFT(line_buf, line_len); + compute_1D_Butterfly(line_buf, line_len, log_len); + } + + // Call Bluestein's FFT algorithm, m is a good composite number greater than (2 * n - 1), used as the padding length + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void processDataLineBluestein(ComplexScalar* line_buf, Index line_len, Index good_composite, Index log_len, ComplexScalar* a, ComplexScalar* b, const ComplexScalar* pos_j_base_powered) { + Index n = line_len; + Index m = good_composite; + ComplexScalar* data = line_buf; + + for (Index i = 0; i < n; ++i) { + if(FFTDir == FFT_FORWARD) { + a[i] = data[i] * numext::conj(pos_j_base_powered[i]); + } + else { + a[i] = data[i] * pos_j_base_powered[i]; + } + } + for (Index i = n; i < m; ++i) { + a[i] = ComplexScalar(0, 0); + } + + for (Index i = 0; i < n; ++i) { + if(FFTDir == FFT_FORWARD) { + b[i] = pos_j_base_powered[i]; + } + else { + b[i] = numext::conj(pos_j_base_powered[i]); + } + } + for (Index i = n; i < m - n; ++i) { + b[i] = ComplexScalar(0, 0); + } + for (Index i = m - n; i < m; ++i) { + if(FFTDir == FFT_FORWARD) { + b[i] = pos_j_base_powered[m-i]; + } + else { + b[i] = numext::conj(pos_j_base_powered[m-i]); + } + } + + scramble_FFT(a, m); + compute_1D_Butterfly(a, m, log_len); + + scramble_FFT(b, m); + compute_1D_Butterfly(b, m, log_len); + + for (Index i = 0; i < m; ++i) { + a[i] *= b[i]; + } + + scramble_FFT(a, m); + compute_1D_Butterfly(a, m, log_len); + + //Do the scaling after ifft + for (Index i = 0; i < m; ++i) { + a[i] /= m; + } + + for (Index i = 0; i < n; ++i) { + if(FFTDir == FFT_FORWARD) { + data[i] = a[i] * numext::conj(pos_j_base_powered[i]); + } + else { + data[i] = a[i] * pos_j_base_powered[i]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE static void scramble_FFT(ComplexScalar* data, Index n) { + eigen_assert(isPowerOfTwo(n)); + Index j = 1; + for (Index i = 1; i < n; ++i){ + if (j > i) { + std::swap(data[j-1], data[i-1]); + } + Index m = n >> 1; + while (m >= 2 && j > m) { + j -= m; + m >>= 1; + } + j += m; + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_2(ComplexScalar* data) { + ComplexScalar tmp = data[1]; + data[1] = data[0] - data[1]; + data[0] += tmp; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_4(ComplexScalar* data) { + ComplexScalar tmp[4]; + tmp[0] = data[0] + data[1]; + tmp[1] = data[0] - data[1]; + tmp[2] = data[2] + data[3]; + if (Dir == FFT_FORWARD) { + tmp[3] = ComplexScalar(0.0, -1.0) * (data[2] - data[3]); + } else { + tmp[3] = ComplexScalar(0.0, 1.0) * (data[2] - data[3]); + } + data[0] = tmp[0] + tmp[2]; + data[1] = tmp[1] + tmp[3]; + data[2] = tmp[0] - tmp[2]; + data[3] = tmp[1] - tmp[3]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_8(ComplexScalar* data) { + ComplexScalar tmp_1[8]; + ComplexScalar tmp_2[8]; + + tmp_1[0] = data[0] + data[1]; + tmp_1[1] = data[0] - data[1]; + tmp_1[2] = data[2] + data[3]; + if (Dir == FFT_FORWARD) { + tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, -1); + } else { + tmp_1[3] = (data[2] - data[3]) * ComplexScalar(0, 1); + } + tmp_1[4] = data[4] + data[5]; + tmp_1[5] = data[4] - data[5]; + tmp_1[6] = data[6] + data[7]; + if (Dir == FFT_FORWARD) { + tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, -1); + } else { + tmp_1[7] = (data[6] - data[7]) * ComplexScalar(0, 1); + } + tmp_2[0] = tmp_1[0] + tmp_1[2]; + tmp_2[1] = tmp_1[1] + tmp_1[3]; + tmp_2[2] = tmp_1[0] - tmp_1[2]; + tmp_2[3] = tmp_1[1] - tmp_1[3]; + tmp_2[4] = tmp_1[4] + tmp_1[6]; +// SQRT2DIV2 = sqrt(2)/2 +#define SQRT2DIV2 0.7071067811865476 + if (Dir == FFT_FORWARD) { + tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, -SQRT2DIV2); + tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, -1); + tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, -SQRT2DIV2); + } else { + tmp_2[5] = (tmp_1[5] + tmp_1[7]) * ComplexScalar(SQRT2DIV2, SQRT2DIV2); + tmp_2[6] = (tmp_1[4] - tmp_1[6]) * ComplexScalar(0, 1); + tmp_2[7] = (tmp_1[5] - tmp_1[7]) * ComplexScalar(-SQRT2DIV2, SQRT2DIV2); + } + data[0] = tmp_2[0] + tmp_2[4]; + data[1] = tmp_2[1] + tmp_2[5]; + data[2] = tmp_2[2] + tmp_2[6]; + data[3] = tmp_2[3] + tmp_2[7]; + data[4] = tmp_2[0] - tmp_2[4]; + data[5] = tmp_2[1] - tmp_2[5]; + data[6] = tmp_2[2] - tmp_2[6]; + data[7] = tmp_2[3] - tmp_2[7]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void butterfly_1D_merge( + ComplexScalar* data, Index n, Index n_power_of_2) { + // Original code: + // RealScalar wtemp = std::sin(M_PI/n); + // RealScalar wpi = -std::sin(2 * M_PI/n); + const RealScalar wtemp = m_sin_PI_div_n_LUT[n_power_of_2]; + const RealScalar wpi = (Dir == FFT_FORWARD) + ? m_minus_sin_2_PI_div_n_LUT[n_power_of_2] + : -m_minus_sin_2_PI_div_n_LUT[n_power_of_2]; + + const ComplexScalar wp(wtemp, wpi); + const ComplexScalar wp_one = wp + ComplexScalar(1, 0); + const ComplexScalar wp_one_2 = wp_one * wp_one; + const ComplexScalar wp_one_3 = wp_one_2 * wp_one; + const ComplexScalar wp_one_4 = wp_one_3 * wp_one; + const Index n2 = n / 2; + ComplexScalar w(1.0, 0.0); + for (Index i = 0; i < n2; i += 4) { + ComplexScalar temp0(data[i + n2] * w); + ComplexScalar temp1(data[i + 1 + n2] * w * wp_one); + ComplexScalar temp2(data[i + 2 + n2] * w * wp_one_2); + ComplexScalar temp3(data[i + 3 + n2] * w * wp_one_3); + w = w * wp_one_4; + + data[i + n2] = data[i] - temp0; + data[i] += temp0; + + data[i + 1 + n2] = data[i + 1] - temp1; + data[i + 1] += temp1; + + data[i + 2 + n2] = data[i + 2] - temp2; + data[i + 2] += temp2; + + data[i + 3 + n2] = data[i + 3] - temp3; + data[i + 3] += temp3; + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void compute_1D_Butterfly( + ComplexScalar* data, Index n, Index n_power_of_2) { + eigen_assert(isPowerOfTwo(n)); + if (n > 8) { + compute_1D_Butterfly(data, n / 2, n_power_of_2 - 1); + compute_1D_Butterfly(data + n / 2, n / 2, n_power_of_2 - 1); + butterfly_1D_merge(data, n, n_power_of_2); + } else if (n == 8) { + butterfly_8(data); + } else if (n == 4) { + butterfly_4(data); + } else if (n == 2) { + butterfly_2(data); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index getBaseOffsetFromIndex(Index index, Index omitted_dim) const { + Index result = 0; + + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > omitted_dim; --i) { + const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim]; + const Index idx = index / partial_m_stride; + index -= idx * partial_m_stride; + result += idx * m_strides[i]; + } + result += index; + } + else { + for (Index i = 0; i < omitted_dim; ++i) { + const Index partial_m_stride = m_strides[i] / m_dimensions[omitted_dim]; + const Index idx = index / partial_m_stride; + index -= idx * partial_m_stride; + result += idx * m_strides[i]; + } + result += index; + } + // Value of index_coords[omitted_dim] is not determined to this step + return result; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index getIndexFromOffset(Index base, Index omitted_dim, Index offset) const { + Index result = base + offset * m_strides[omitted_dim] ; + return result; + } + + protected: + Index m_size; + const FFT EIGEN_DEVICE_REF m_fft; + Dimensions m_dimensions; + array m_strides; + TensorEvaluator m_impl; + EvaluatorPointerType m_data; + const Device EIGEN_DEVICE_REF m_device; + + // This will support a maximum FFT size of 2^32 for each dimension + // m_sin_PI_div_n_LUT[i] = (-2) * std::sin(M_PI / std::pow(2,i)) ^ 2; + const RealScalar m_sin_PI_div_n_LUT[32] = { + RealScalar(0.0), + RealScalar(-2), + RealScalar(-0.999999999999999), + RealScalar(-0.292893218813453), + RealScalar(-0.0761204674887130), + RealScalar(-0.0192147195967696), + RealScalar(-0.00481527332780311), + RealScalar(-0.00120454379482761), + RealScalar(-3.01181303795779e-04), + RealScalar(-7.52981608554592e-05), + RealScalar(-1.88247173988574e-05), + RealScalar(-4.70619042382852e-06), + RealScalar(-1.17654829809007e-06), + RealScalar(-2.94137117780840e-07), + RealScalar(-7.35342821488550e-08), + RealScalar(-1.83835707061916e-08), + RealScalar(-4.59589268710903e-09), + RealScalar(-1.14897317243732e-09), + RealScalar(-2.87243293150586e-10), + RealScalar( -7.18108232902250e-11), + RealScalar(-1.79527058227174e-11), + RealScalar(-4.48817645568941e-12), + RealScalar(-1.12204411392298e-12), + RealScalar(-2.80511028480785e-13), + RealScalar(-7.01277571201985e-14), + RealScalar(-1.75319392800498e-14), + RealScalar(-4.38298482001247e-15), + RealScalar(-1.09574620500312e-15), + RealScalar(-2.73936551250781e-16), + RealScalar(-6.84841378126949e-17), + RealScalar(-1.71210344531737e-17), + RealScalar(-4.28025861329343e-18) + }; + + // m_minus_sin_2_PI_div_n_LUT[i] = -std::sin(2 * M_PI / std::pow(2,i)); + const RealScalar m_minus_sin_2_PI_div_n_LUT[32] = { + RealScalar(0.0), + RealScalar(0.0), + RealScalar(-1.00000000000000e+00), + RealScalar(-7.07106781186547e-01), + RealScalar(-3.82683432365090e-01), + RealScalar(-1.95090322016128e-01), + RealScalar(-9.80171403295606e-02), + RealScalar(-4.90676743274180e-02), + RealScalar(-2.45412285229123e-02), + RealScalar(-1.22715382857199e-02), + RealScalar(-6.13588464915448e-03), + RealScalar(-3.06795676296598e-03), + RealScalar(-1.53398018628477e-03), + RealScalar(-7.66990318742704e-04), + RealScalar(-3.83495187571396e-04), + RealScalar(-1.91747597310703e-04), + RealScalar(-9.58737990959773e-05), + RealScalar(-4.79368996030669e-05), + RealScalar(-2.39684498084182e-05), + RealScalar(-1.19842249050697e-05), + RealScalar(-5.99211245264243e-06), + RealScalar(-2.99605622633466e-06), + RealScalar(-1.49802811316901e-06), + RealScalar(-7.49014056584716e-07), + RealScalar(-3.74507028292384e-07), + RealScalar(-1.87253514146195e-07), + RealScalar(-9.36267570730981e-08), + RealScalar(-4.68133785365491e-08), + RealScalar(-2.34066892682746e-08), + RealScalar(-1.17033446341373e-08), + RealScalar(-5.85167231706864e-09), + RealScalar(-2.92583615853432e-09) + }; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FFT_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h new file mode 100644 index 0000000..ca39bb8 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h @@ -0,0 +1,379 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H +#define EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H + +namespace Eigen { + +/** \class TensorFixedSize + * \ingroup CXX11_Tensor_Module + * + * \brief The fixed sized version of the tensor class. + * + * The fixed sized equivalent of + * Eigen::Tensor t(3, 5, 7); + * is + * Eigen::TensorFixedSize> t; + */ + +template +class TensorFixedSize : public TensorBase > +{ + public: + typedef TensorFixedSize Self; + typedef TensorBase > Base; + typedef typename Eigen::internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef Scalar_ Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename Base::CoeffReturnType CoeffReturnType; + + static const int Options = Options_; + + enum { + IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0), + PacketAccess = (internal::packet_traits::size > 1), + BlockAccess = false, + PreferBlockAccess = false, + Layout = Options_ & RowMajor ? RowMajor : ColMajor, + CoordAccess = true, + RawAccess = true + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + typedef Dimensions_ Dimensions; + static const std::size_t NumIndices = Dimensions::count; + + protected: + TensorStorage m_storage; + + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); } + + // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED + // work, because that uses base().coeffRef() - and we don't yet + // implement a similar class hierarchy + inline Self& base() { return *this; } + inline const Self& base() const { return *this; } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeff(array{{firstIndex, otherIndices...}}); + } +#endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& coeff(const array& indices) const + { + eigen_internal_assert(checkIndexRange(indices)); + return m_storage.data()[linearizedIndex(indices)]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return m_storage.data()[index]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& coeff() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return m_storage.data()[0]; + } + + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices) + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeffRef(array{{firstIndex, otherIndices...}}); + } +#endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(const array& indices) + { + eigen_internal_assert(checkIndexRange(indices)); + return m_storage.data()[linearizedIndex(indices)]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) + { + eigen_internal_assert(index >= 0 && index < size()); + return m_storage.data()[index]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return m_storage.data()[0]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return this->operator()(array{{firstIndex, otherIndices...}}); + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const + { + if (Options&RowMajor) { + const Index index = i1 + i0 * m_storage.dimensions()[1]; + return m_storage.data()[index]; + } else { + const Index index = i0 + i1 * m_storage.dimensions()[0]; + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const + { + if (Options&RowMajor) { + const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2); + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const + { + if (Options&RowMajor) { + const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3)); + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const + { + if (Options&RowMajor) { + const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0))); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4))); + return m_storage.data()[index]; + } + } +#endif + + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(const array& indices) const + { + eigen_assert(checkIndexRange(indices)); + return coeff(indices); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return coeff(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator()() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeff(); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const + { + // The bracket operator is only for vectors, use the parenthesis operator instead. + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeff(index); + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) + { + // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + return operator()(array{{firstIndex, otherIndices...}}); + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1) + { + if (Options&RowMajor) { + const Index index = i1 + i0 * m_storage.dimensions()[1]; + return m_storage.data()[index]; + } else { + const Index index = i0 + i1 * m_storage.dimensions()[0]; + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2) + { + if (Options&RowMajor) { + const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2); + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3) + { + if (Options&RowMajor) { + const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3)); + return m_storage.data()[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) + { + if (Options&RowMajor) { + const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0))); + return m_storage.data()[index]; + } else { + const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4))); + return m_storage.data()[index]; + } + } +#endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(const array& indices) + { + eigen_assert(checkIndexRange(indices)); + return coeffRef(indices); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index index) + { + eigen_assert(index >= 0 && index < size()); + return coeffRef(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE); + return coeffRef(); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator[](Index index) + { + // The bracket operator is only for vectors, use the parenthesis operator instead + EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE) + return coeffRef(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize() + : m_storage() + { + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize(const Self& other) + : m_storage(other.m_storage) + { + } + +#if EIGEN_HAS_RVALUE_REFERENCES + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(Self&& other) + : m_storage(other.m_storage) + { + } +#endif + + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other.derived()); + internal::TensorExecutor::run(assign, DefaultDevice()); + } + template + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase& other) + { + typedef TensorAssignOp Assign; + Assign assign(*this, other.derived()); + internal::TensorExecutor::run(assign, DefaultDevice()); + } + + // FIXME: check that the dimensions of other match the dimensions of *this. + // Unfortunately this isn't possible yet when the rhs is an expression. + EIGEN_TENSOR_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(TensorFixedSize) + + + protected: + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE bool checkIndexRange(const array& /*indices*/) const + { + using internal::array_apply_and_reduce; + using internal::array_zip_and_reduce; + using internal::greater_equal_zero_op; + using internal::logical_and_op; + using internal::lesser_op; + + return true; + // check whether the indices are all >= 0 + /* array_apply_and_reduce(indices) && + // check whether the indices fit in the dimensions + array_zip_and_reduce(indices, m_storage.dimensions());*/ + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index linearizedIndex(const array& indices) const + { + if (Options&RowMajor) { + return m_storage.dimensions().IndexOfRowMajor(indices); + } else { + return m_storage.dimensions().IndexOfColMajor(indices); + } + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h new file mode 100644 index 0000000..e800ded --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h @@ -0,0 +1,237 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H +#define EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H + +namespace Eigen { + +/** \class TensorForcedEval + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reshaping class. + * + * + */ +namespace internal { +template +struct traits > +{ + // Type promotion to handle the case where the types of the lhs and the rhs are different. + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; + + enum { + Flags = 0 + }; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorForcedEvalOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorForcedEvalOp type; +}; + +} // end namespace internal + + + +template +class TensorForcedEvalOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorForcedEvalOp(const XprType& expr) + : m_xpr(expr) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; +}; + +namespace internal { +template +struct non_integral_type_placement_new{ + template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(Index numValues, StorageType m_buffer) { + // Initialize non-trivially constructible types. + if (!internal::is_arithmetic::value) { + for (Index i = 0; i < numValues; ++i) new (m_buffer + i) CoeffReturnType(); + } +} +}; + +// SYCL does not support non-integral types +// having new (m_buffer + i) CoeffReturnType() causes the following compiler error for SYCL Devices +// no matching function for call to 'operator new' +template +struct non_integral_type_placement_new { + template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(Index, StorageType) { +} +}; +} // end namespace internal + +template +struct TensorEvaluator, Device> +{ + typedef const typename internal::remove_all::type ArgType; + typedef TensorForcedEvalOp XprType; + typedef typename ArgType::Scalar Scalar; + typedef typename TensorEvaluator::Dimensions Dimensions; + typedef typename XprType::Index Index; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef typename Eigen::internal::traits::PointerType TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = true, + PacketAccess = (PacketType::size > 1), + BlockAccess = internal::is_arithmetic::value, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + RawAccess = true + }; + + static const int NumDims = internal::traits::NumDimensions; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_op(op.expression()), + m_device(device), m_buffer(NULL) + { } + + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + const Index numValues = internal::array_prod(m_impl.dimensions()); + m_buffer = m_device.get((CoeffReturnType*)m_device.allocate_temp(numValues * sizeof(CoeffReturnType))); + + internal::non_integral_type_placement_new()(numValues, m_buffer); + + typedef TensorEvalToOp< const typename internal::remove_const::type > EvalTo; + EvalTo evalToTmp(m_device.get(m_buffer), m_op); + + internal::TensorExecutor< + const EvalTo, typename internal::remove_const::type, + /*Vectorizable=*/internal::IsVectorizable::value, + /*Tiling=*/internal::IsTileable::value>:: + run(evalToTmp, m_device); + + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + const Index numValues = internal::array_prod(m_impl.dimensions()); + m_buffer = m_device.get((CoeffReturnType*)m_device.allocate_temp( + numValues * sizeof(CoeffReturnType))); + typedef TensorEvalToOp::type> + EvalTo; + EvalTo evalToTmp(m_device.get(m_buffer), m_op); + + auto on_done = std::bind([](EvalSubExprsCallback done_) { done_(true); }, + std::move(done)); + internal::TensorAsyncExecutor< + const EvalTo, typename internal::remove_const::type, + decltype(on_done), + /*Vectorizable=*/internal::IsVectorizable::value, + /*Tiling=*/internal::IsTileable::value>:: + runAsync(evalToTmp, m_device, std::move(on_done)); + } +#endif + + EIGEN_STRONG_INLINE void cleanup() { + m_device.deallocate_temp(m_buffer); + m_buffer = NULL; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_buffer[index]; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return internal::ploadt(m_buffer + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return internal::TensorBlockResourceRequirements::any(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + assert(m_buffer != NULL); + return TensorBlock::materialize(m_buffer, m_impl.dimensions(), desc, scratch); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + EvaluatorPointerType data() const { return m_buffer; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_buffer.bind(cgh); + m_impl.bind(cgh); + } +#endif + private: + TensorEvaluator m_impl; + const ArgType m_op; + const Device EIGEN_DEVICE_REF m_device; + EvaluatorPointerType m_buffer; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FORCED_EVAL_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h new file mode 100644 index 0000000..246ebe4 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h @@ -0,0 +1,191 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H +#define EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H + +namespace Eigen { + +// MakePointer class is used as a container of the address space of the pointer +// on the host and on the device. From the host side it generates the T* pointer +// and when EIGEN_USE_SYCL is used it construct a buffer with a map_allocator to +// T* m_data on the host. It is always called on the device. +// Specialisation of MakePointer class for creating the sycl buffer with +// map_allocator. +template struct MakePointer { + typedef T* Type; + typedef const T* ConstType; +}; + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T* constCast(const T* data) { + return const_cast(data); +} + +// The StorageMemory class is a container of the device specific pointer +// used for refering to a Pointer on TensorEvaluator class. While the TensorExpression +// is a device-agnostic type and need MakePointer class for type conversion, +// the TensorEvaluator class can be specialized for a device, hence it is possible +// to construct different types of temproray storage memory in TensorEvaluator +// for different devices by specializing the following StorageMemory class. +template struct StorageMemory: MakePointer {}; + +namespace internal{ +template struct Pointer_type_promotion { + static const bool val=false; +}; +template struct Pointer_type_promotion { + static const bool val = true; +}; +template struct TypeConversion { + typedef A* type; +}; +} + + +template class MakePointer_ = MakePointer> class TensorMap; +template class Tensor; +template class TensorFixedSize; +template class TensorRef; +template class TensorBase; + +template class TensorCwiseNullaryOp; +template class TensorCwiseUnaryOp; +template class TensorCwiseBinaryOp; +template class TensorCwiseTernaryOp; +template class TensorSelectOp; +template class MakePointer_ = MakePointer > class TensorReductionOp; +template class TensorIndexTupleOp; +template class TensorTupleReducerOp; +template class TensorConcatenationOp; +template class TensorContractionOp; +template class TensorConversionOp; +template class TensorConvolutionOp; +template class TensorFFTOp; +template class TensorPatchOp; +template class TensorImagePatchOp; +template class TensorVolumePatchOp; +template class TensorBroadcastingOp; +template class TensorChippingOp; +template class TensorReshapingOp; +template class TensorLayoutSwapOp; +template class TensorSlicingOp; +template class TensorReverseOp; +template class TensorPaddingOp; +template class TensorShufflingOp; +template class TensorStridingOp; +template class TensorStridingSlicingOp; +template class TensorInflationOp; +template class TensorGeneratorOp; +template class TensorAssignOp; +template class TensorScanOp; +template class TensorTraceOp; + +template class TensorCustomUnaryOp; +template class TensorCustomBinaryOp; + +template class MakePointer_ = MakePointer> class TensorEvalToOp; +template class TensorForcedEvalOp; + +template class TensorDevice; +template class TensorAsyncDevice; +template struct TensorEvaluator; + +struct NoOpOutputKernel; + +struct DefaultDevice; +struct ThreadPoolDevice; +struct GpuDevice; +struct SyclDevice; + +#ifdef EIGEN_USE_SYCL + +template struct MakeSYCLPointer { + typedef Eigen::TensorSycl::internal::RangeAccess Type; +}; + +template +EIGEN_STRONG_INLINE const Eigen::TensorSycl::internal::RangeAccess& +constCast(const Eigen::TensorSycl::internal::RangeAccess& data) { + return data; +} + +template +struct StorageMemory : MakeSYCLPointer {}; +template +struct StorageMemory : StorageMemory {}; + +namespace TensorSycl { +namespace internal{ +template class GenericNondeterministicReducer; +} +} +#endif + + +enum FFTResultType { + RealPart = 0, + ImagPart = 1, + BothParts = 2 +}; + +enum FFTDirection { + FFT_FORWARD = 0, + FFT_REVERSE = 1 +}; + + +namespace internal { + +template +struct IsVectorizable { + static const bool value = TensorEvaluator::PacketAccess; +}; + +template +struct IsVectorizable { + static const bool value = TensorEvaluator::PacketAccess && + TensorEvaluator::IsAligned; +}; + +// Tiled evaluation strategy. +enum TiledEvaluation { + Off = 0, // tiled evaluation is not supported + On = 1, // still work in progress (see TensorBlock.h) +}; + +template +struct IsTileable { + // Check that block evaluation is supported and it's a preferred option (at + // least one sub-expression has much faster block evaluation, e.g. + // broadcasting). + static const bool BlockAccess = + TensorEvaluator::BlockAccess && + TensorEvaluator::PreferBlockAccess; + + static const TiledEvaluation value = + BlockAccess ? TiledEvaluation::On : TiledEvaluation::Off; +}; + +template ::value, + TiledEvaluation Tiling = IsTileable::value> +class TensorExecutor; + +template ::value, + TiledEvaluation Tiling = IsTileable::value> +class TensorAsyncExecutor; + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h new file mode 100644 index 0000000..d963032 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h @@ -0,0 +1,488 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H +#define EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H + +namespace Eigen { +namespace internal { + + +/** \internal + * \brief Template functor to compute the modulo between an array and a scalar. + */ +template +struct scalar_mod_op { + EIGEN_DEVICE_FUNC scalar_mod_op(const Scalar& divisor) : m_divisor(divisor) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a % m_divisor; } + const Scalar m_divisor; +}; +template +struct functor_traits > +{ enum { Cost = scalar_div_cost::value, PacketAccess = false }; }; + + +/** \internal + * \brief Template functor to compute the modulo between 2 arrays. + */ +template +struct scalar_mod2_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_mod2_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a, const Scalar& b) const { return a % b; } +}; +template +struct functor_traits > +{ enum { Cost = scalar_div_cost::value, PacketAccess = false }; }; + +template +struct scalar_fmod_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_fmod_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar + operator()(const Scalar& a, const Scalar& b) const { + return numext::fmod(a, b); + } +}; +template +struct functor_traits > { + enum { Cost = 13, // Reciprocal throughput of FPREM on Haswell. + PacketAccess = false }; +}; + +template +struct reducer_traits { + enum { + Cost = 1, + PacketAccess = false, + IsStateful = false, + IsExactlyAssociative = true + }; +}; + +// Standard reduction functors +template struct SumReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + internal::scalar_sum_op sum_op; + *accum = sum_op(*accum, t); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { + (*accum) = padd(*accum, p); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + internal::scalar_cast_op conv; + return conv(0); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + return accum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return vaccum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + internal::scalar_sum_op sum_op; + return sum_op(saccum, predux(vaccum)); + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = PacketType::HasAdd, + IsStateful = false, + IsExactlyAssociative = NumTraits::IsInteger + }; +}; + +template struct MeanReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + MeanReducer() : scalarCount_(0), packetCount_(0) { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) { + internal::scalar_sum_op sum_op; + *accum = sum_op(*accum, t); + scalarCount_++; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) { + (*accum) = padd(*accum, p); + packetCount_++; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + internal::scalar_cast_op conv; + return conv(0); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + internal::scalar_quotient_op quotient_op; + return quotient_op(accum, T(scalarCount_)); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return pdiv(vaccum, pset1(T(packetCount_))); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + internal::scalar_sum_op sum_op; + internal::scalar_quotient_op quotient_op; + return quotient_op( + sum_op(saccum, predux(vaccum)), + T(scalarCount_ + packetCount_ * unpacket_traits::size)); + } + + protected: + DenseIndex scalarCount_; + DenseIndex packetCount_; +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = PacketType::HasAdd && + PacketType::HasDiv && !NumTraits::IsInteger, + IsStateful = true, + IsExactlyAssociative = NumTraits::IsInteger + }; +}; + + +template +struct MinMaxBottomValue { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { + return Eigen::NumTraits::lowest(); + } +}; +template +struct MinMaxBottomValue { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { + return -Eigen::NumTraits::infinity(); + } +}; +template +struct MinMaxBottomValue { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { + return Eigen::NumTraits::highest(); + } +}; +template +struct MinMaxBottomValue { + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE T bottom_value() { + return Eigen::NumTraits::infinity(); + } +}; + + +template struct MaxReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + scalar_max_op op; + *accum = op(t, *accum); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { + scalar_max_op op; + (*accum) = op.packetOp(*accum, p); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + return MinMaxBottomValue::IsInteger>::bottom_value(); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + return accum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return vaccum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + scalar_max_op op; + return op(saccum, op.predux(vaccum)); + } +}; + +template + struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = PacketType::HasMax, + IsStateful = false, + IsExactlyAssociative = (NaNPropagation!=PropagateFast) + }; +}; + +template struct MinReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + scalar_min_op op; + *accum = op(t, *accum); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { + scalar_min_op op; + (*accum) = op.packetOp(*accum, p); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + return MinMaxBottomValue::IsInteger>::bottom_value(); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + return accum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return vaccum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + scalar_min_op op; + return op(saccum, op.predux(vaccum)); + } +}; + +template + struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = PacketType::HasMin, + IsStateful = false, + IsExactlyAssociative = (NaNPropagation!=PropagateFast) + }; +}; + +template struct ProdReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + internal::scalar_product_op prod_op; + (*accum) = prod_op(*accum, t); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p, Packet* accum) const { + (*accum) = pmul(*accum, p); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + internal::scalar_cast_op conv; + return conv(1); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const { + return pset1(initialize()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const { + return accum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet finalizePacket(const Packet& vaccum) const { + return vaccum; + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalizeBoth(const T saccum, const Packet& vaccum) const { + internal::scalar_product_op prod_op; + return prod_op(saccum, predux_mul(vaccum)); + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::MulCost, + PacketAccess = PacketType::HasMul, + IsStateful = false, + IsExactlyAssociative = true + }; +}; + + +struct AndReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const { + *accum = *accum && t; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const { + return true; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const { + return accum; + } +}; + +template +struct reducer_traits { + enum { + Cost = 1, + PacketAccess = false, + IsStateful = false, + IsExactlyAssociative = true + }; +}; + + +struct OrReducer { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(bool t, bool* accum) const { + *accum = *accum || t; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool initialize() const { + return false; + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool finalize(bool accum) const { + return accum; + } +}; + +template +struct reducer_traits { + enum { + Cost = 1, + PacketAccess = false, + IsStateful = false, + IsExactlyAssociative = true + }; +}; + +// Argmin/Argmax reducers. Returns the first occurrence if multiple locations +// contain the same min/max value. +template struct ArgMaxTupleReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const { + if (t.second < accum->second) { + return; + } else if (t.second > accum->second || accum->first > t.first ) { + *accum = t; + } + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + return T(0, NumTraits::lowest()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const { + return accum; + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false, + IsStateful = false, + IsExactlyAssociative = true + }; +}; + + +template struct ArgMinTupleReducer +{ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T& t, T* accum) const { + if (t.second > accum->second) { + return; + } else if (t.second < accum->second || accum->first > t.first) { + *accum = t; + } + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const { + return T(0, NumTraits::highest()); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T& accum) const { + return accum; + } +}; + +template +struct reducer_traits, Device> { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false, + IsStateful = false, + IsExactlyAssociative = true + }; +}; + + +template +class GaussianGenerator { + public: + static const bool PacketAccess = false; + + EIGEN_DEVICE_FUNC GaussianGenerator(const array& means, + const array& std_devs) + : m_means(means) + { + EIGEN_UNROLL_LOOP + for (size_t i = 0; i < NumDims; ++i) { + m_two_sigmas[i] = std_devs[i] * std_devs[i] * 2; + } + } + + EIGEN_DEVICE_FUNC T operator()(const array& coordinates) const { + T tmp = T(0); + EIGEN_UNROLL_LOOP + for (size_t i = 0; i < NumDims; ++i) { + T offset = coordinates[i] - m_means[i]; + tmp += offset * offset / m_two_sigmas[i]; + } + return numext::exp(-tmp); + } + + private: + array m_means; + array m_two_sigmas; +}; + +template +struct functor_traits > { + enum { + Cost = NumDims * (2 * NumTraits::AddCost + NumTraits::MulCost + + functor_traits >::Cost) + + functor_traits >::Cost, + PacketAccess = GaussianGenerator::PacketAccess + }; +}; + +template +struct scalar_clamp_op { + EIGEN_DEVICE_FUNC inline scalar_clamp_op(const Scalar& _min, const Scalar& _max) : m_min(_min), m_max(_max) {} + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar + operator()(const Scalar& x) const { + return numext::mini(numext::maxi(x, m_min), m_max); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet + packetOp(const Packet& x) const { + return internal::pmin(internal::pmax(x, pset1(m_min)), pset1(m_max)); + } + const Scalar m_min; + const Scalar m_max; +}; +template +struct functor_traits > +{ enum { Cost = 2 * NumTraits::AddCost, PacketAccess = (packet_traits::HasMin && packet_traits::HasMax)}; }; + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_FUNCTORS_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h new file mode 100644 index 0000000..174bf06 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h @@ -0,0 +1,302 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H +#define EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H + +namespace Eigen { + +/** \class TensorGeneratorOp + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor generator class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorGeneratorOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorGeneratorOp type; +}; + +} // end namespace internal + + + +template +class TensorGeneratorOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorGeneratorOp(const XprType& expr, const Generator& generator) + : m_xpr(expr), m_generator(generator) {} + + EIGEN_DEVICE_FUNC + const Generator& generator() const { return m_generator; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const Generator m_generator; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorGeneratorOp XprType; + typedef typename XprType::Index Index; + typedef typename TensorEvaluator::Dimensions Dimensions; + static const int NumDims = internal::array_size::value; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + enum { + IsAligned = false, + PacketAccess = (PacketType::size > 1), + BlockAccess = true, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + typedef internal::TensorIntDivisor IndexDivisor; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_device(device), m_generator(op.generator()) + { + TensorEvaluator argImpl(op.expression(), device); + m_dimensions = argImpl.dimensions(); + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_strides[0] = 1; + EIGEN_UNROLL_LOOP + for (int i = 1; i < NumDims; ++i) { + m_strides[i] = m_strides[i - 1] * m_dimensions[i - 1]; + if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]); + } + } else { + m_strides[NumDims - 1] = 1; + EIGEN_UNROLL_LOOP + for (int i = NumDims - 2; i >= 0; --i) { + m_strides[i] = m_strides[i + 1] * m_dimensions[i + 1]; + if (m_strides[i] != 0) m_fast_strides[i] = IndexDivisor(m_strides[i]); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + array coords; + extract_coordinates(index, coords); + return m_generator(coords); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + const int packetSize = PacketType::size; + EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+packetSize-1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[packetSize]; + for (int i = 0; i < packetSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + const size_t target_size = m_device.firstLevelCacheSize(); + // TODO(ezhulenev): Generator should have a cost. + return internal::TensorBlockResourceRequirements::skewed( + target_size); + } + + struct BlockIteratorState { + Index stride; + Index span; + Index size; + Index count; + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + static const bool is_col_major = + static_cast(Layout) == static_cast(ColMajor); + + // Compute spatial coordinates for the first block element. + array coords; + extract_coordinates(desc.offset(), coords); + array initial_coords = coords; + + // Offset in the output block buffer. + Index offset = 0; + + // Initialize output block iterator state. Dimension in this array are + // always in inner_most -> outer_most order (col major layout). + array it; + for (int i = 0; i < NumDims; ++i) { + const int dim = is_col_major ? i : NumDims - 1 - i; + it[i].size = desc.dimension(dim); + it[i].stride = i == 0 ? 1 : (it[i - 1].size * it[i - 1].stride); + it[i].span = it[i].stride * (it[i].size - 1); + it[i].count = 0; + } + eigen_assert(it[0].stride == 1); + + // Prepare storage for the materialized generator result. + const typename TensorBlock::Storage block_storage = + TensorBlock::prepareStorage(desc, scratch); + + CoeffReturnType* block_buffer = block_storage.data(); + + static const int packet_size = PacketType::size; + + static const int inner_dim = is_col_major ? 0 : NumDims - 1; + const Index inner_dim_size = it[0].size; + const Index inner_dim_vectorized = inner_dim_size - packet_size; + + while (it[NumDims - 1].count < it[NumDims - 1].size) { + Index i = 0; + // Generate data for the vectorized part of the inner-most dimension. + for (; i <= inner_dim_vectorized; i += packet_size) { + for (Index j = 0; j < packet_size; ++j) { + array j_coords = coords; // Break loop dependence. + j_coords[inner_dim] += j; + *(block_buffer + offset + i + j) = m_generator(j_coords); + } + coords[inner_dim] += packet_size; + } + // Finalize non-vectorized part of the inner-most dimension. + for (; i < inner_dim_size; ++i) { + *(block_buffer + offset + i) = m_generator(coords); + coords[inner_dim]++; + } + coords[inner_dim] = initial_coords[inner_dim]; + + // For the 1d tensor we need to generate only one inner-most dimension. + if (NumDims == 1) break; + + // Update offset. + for (i = 1; i < NumDims; ++i) { + if (++it[i].count < it[i].size) { + offset += it[i].stride; + coords[is_col_major ? i : NumDims - 1 - i]++; + break; + } + if (i != NumDims - 1) it[i].count = 0; + coords[is_col_major ? i : NumDims - 1 - i] = + initial_coords[is_col_major ? i : NumDims - 1 - i]; + offset -= it[i].span; + } + } + + return block_storage.AsTensorMaterializedBlock(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool) const { + // TODO(rmlarsen): This is just a placeholder. Define interface to make + // generators return their cost. + return TensorOpCost(0, 0, TensorOpCost::AddCost() + + TensorOpCost::MulCost()); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler&) const {} +#endif + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void extract_coordinates(Index index, array& coords) const { + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_fast_strides[i]; + index -= idx * m_strides[i]; + coords[i] = idx; + } + coords[0] = index; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_fast_strides[i]; + index -= idx * m_strides[i]; + coords[i] = idx; + } + coords[NumDims-1] = index; + } + } + + const Device EIGEN_DEVICE_REF m_device; + Dimensions m_dimensions; + array m_strides; + array m_fast_strides; + Generator m_generator; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_GENERATOR_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h new file mode 100644 index 0000000..665b861 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h @@ -0,0 +1,33 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Eugene Brevdo +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H +#define EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H + +namespace Eigen { + +/** \cpp11 \returns an expression of the coefficient-wise betainc(\a x, \a a, \a b) to the given tensors. + * + * This function computes the regularized incomplete beta function (integral). + * + */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const + TensorCwiseTernaryOp, + const ADerived, const BDerived, const XDerived> + betainc(const ADerived& a, const BDerived& b, const XDerived& x) { + return TensorCwiseTernaryOp< + internal::scalar_betainc_op, const ADerived, + const BDerived, const XDerived>( + a, b, x, internal::scalar_betainc_op()); +} + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_GLOBAL_FUNCTIONS_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h new file mode 100644 index 0000000..cb53ce2 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaDefines.h @@ -0,0 +1,99 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2018 Deven Desai +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if defined(EIGEN_USE_GPU) && !defined(EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H) +#define EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H + +// Note that we are using EIGEN_USE_HIP here instead of EIGEN_HIPCC...this is by design +// There is code in the Tensorflow codebase that will define EIGEN_USE_GPU, but +// for some reason gets sent to the gcc/host compiler instead of the gpu/nvcc/hipcc compiler +// When compiling such files, gcc will end up trying to pick up the CUDA headers by +// default (see the code within "unsupported/Eigen/CXX11/Tensor" that is guarded by EIGEN_USE_GPU) +// This will obviously not work when trying to compile tensorflow on a system with no CUDA +// To work around this issue for HIP systems (and leave the default behaviour intact), the +// HIP tensorflow build defines EIGEN_USE_HIP when compiling all source files, and +// "unsupported/Eigen/CXX11/Tensor" has been updated to use HIP header when EIGEN_USE_HIP is +// defined. In continuation of that requirement, the guard here needs to be EIGEN_USE_HIP as well + +#if defined(EIGEN_USE_HIP) + +#define gpuStream_t hipStream_t +#define gpuDeviceProp_t hipDeviceProp_t +#define gpuError_t hipError_t +#define gpuSuccess hipSuccess +#define gpuErrorNotReady hipErrorNotReady +#define gpuGetDeviceCount hipGetDeviceCount +#define gpuGetLastError hipGetLastError +#define gpuPeekAtLastError hipPeekAtLastError +#define gpuGetErrorName hipGetErrorName +#define gpuGetErrorString hipGetErrorString +#define gpuGetDeviceProperties hipGetDeviceProperties +#define gpuStreamDefault hipStreamDefault +#define gpuGetDevice hipGetDevice +#define gpuSetDevice hipSetDevice +#define gpuMalloc hipMalloc +#define gpuFree hipFree +#define gpuMemsetAsync hipMemsetAsync +#define gpuMemcpyAsync hipMemcpyAsync +#define gpuMemcpyDeviceToDevice hipMemcpyDeviceToDevice +#define gpuMemcpyDeviceToHost hipMemcpyDeviceToHost +#define gpuMemcpyHostToDevice hipMemcpyHostToDevice +#define gpuStreamQuery hipStreamQuery +#define gpuSharedMemConfig hipSharedMemConfig +#define gpuDeviceSetSharedMemConfig hipDeviceSetSharedMemConfig +#define gpuStreamSynchronize hipStreamSynchronize +#define gpuDeviceSynchronize hipDeviceSynchronize +#define gpuMemcpy hipMemcpy + +#else + +#define gpuStream_t cudaStream_t +#define gpuDeviceProp_t cudaDeviceProp +#define gpuError_t cudaError_t +#define gpuSuccess cudaSuccess +#define gpuErrorNotReady cudaErrorNotReady +#define gpuGetDeviceCount cudaGetDeviceCount +#define gpuGetLastError cudaGetLastError +#define gpuPeekAtLastError cudaPeekAtLastError +#define gpuGetErrorName cudaGetErrorName +#define gpuGetErrorString cudaGetErrorString +#define gpuGetDeviceProperties cudaGetDeviceProperties +#define gpuStreamDefault cudaStreamDefault +#define gpuGetDevice cudaGetDevice +#define gpuSetDevice cudaSetDevice +#define gpuMalloc cudaMalloc +#define gpuFree cudaFree +#define gpuMemsetAsync cudaMemsetAsync +#define gpuMemcpyAsync cudaMemcpyAsync +#define gpuMemcpyDeviceToDevice cudaMemcpyDeviceToDevice +#define gpuMemcpyDeviceToHost cudaMemcpyDeviceToHost +#define gpuMemcpyHostToDevice cudaMemcpyHostToDevice +#define gpuStreamQuery cudaStreamQuery +#define gpuSharedMemConfig cudaSharedMemConfig +#define gpuDeviceSetSharedMemConfig cudaDeviceSetSharedMemConfig +#define gpuStreamSynchronize cudaStreamSynchronize +#define gpuDeviceSynchronize cudaDeviceSynchronize +#define gpuMemcpy cudaMemcpy + +#endif + +// gpu_assert can be overridden +#ifndef gpu_assert + +#if defined(EIGEN_HIP_DEVICE_COMPILE) +// HIPCC do not support the use of assert on the GPU side. +#define gpu_assert(COND) +#else +#define gpu_assert(COND) assert(COND) +#endif + +#endif // gpu_assert + +#endif // EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h new file mode 100644 index 0000000..1d142f2 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorGpuHipCudaUndefines.h @@ -0,0 +1,44 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2018 Deven Desai +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#if defined(EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H) + +#ifndef EIGEN_PERMANENTLY_ENABLE_GPU_HIP_CUDA_DEFINES + +#undef gpuStream_t +#undef gpuDeviceProp_t +#undef gpuError_t +#undef gpuSuccess +#undef gpuErrorNotReady +#undef gpuGetDeviceCount +#undef gpuGetErrorString +#undef gpuGetDeviceProperties +#undef gpuStreamDefault +#undef gpuGetDevice +#undef gpuSetDevice +#undef gpuMalloc +#undef gpuFree +#undef gpuMemsetAsync +#undef gpuMemcpyAsync +#undef gpuMemcpyDeviceToDevice +#undef gpuMemcpyDeviceToHost +#undef gpuMemcpyHostToDevice +#undef gpuStreamQuery +#undef gpuSharedMemConfig +#undef gpuDeviceSetSharedMemConfig +#undef gpuStreamSynchronize +#undef gpuDeviceSynchronize +#undef gpuMemcpy + +#endif // EIGEN_PERMANENTLY_ENABLE_GPU_HIP_CUDA_DEFINES + +#undef EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H + +#endif // EIGEN_CXX11_TENSOR_GPU_HIP_CUDA_DEFINES_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h new file mode 100644 index 0000000..a901c5d --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h @@ -0,0 +1,79 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_IO_H +#define EIGEN_CXX11_TENSOR_TENSOR_IO_H + +namespace Eigen { + +namespace internal { + +// Print the tensor as a 2d matrix +template +struct TensorPrinter { + static void run (std::ostream& os, const Tensor& tensor) { + typedef typename internal::remove_const::type Scalar; + typedef typename Tensor::Index Index; + const Index total_size = internal::array_prod(tensor.dimensions()); + if (total_size > 0) { + const Index first_dim = Eigen::internal::array_get<0>(tensor.dimensions()); + static const int layout = Tensor::Layout; + Map > matrix(const_cast(tensor.data()), first_dim, total_size/first_dim); + os << matrix; + } + } +}; + + +// Print the tensor as a vector +template +struct TensorPrinter { + static void run (std::ostream& os, const Tensor& tensor) { + typedef typename internal::remove_const::type Scalar; + typedef typename Tensor::Index Index; + const Index total_size = internal::array_prod(tensor.dimensions()); + if (total_size > 0) { + Map > array(const_cast(tensor.data()), total_size); + os << array; + } + } +}; + + +// Print the tensor as a scalar +template +struct TensorPrinter { + static void run (std::ostream& os, const Tensor& tensor) { + os << tensor.coeff(0); + } +}; +} + +template +std::ostream& operator << (std::ostream& os, const TensorBase& expr) { + typedef TensorEvaluator, DefaultDevice> Evaluator; + typedef typename Evaluator::Dimensions Dimensions; + + // Evaluate the expression if needed + TensorForcedEvalOp eval = expr.eval(); + Evaluator tensor(eval, DefaultDevice()); + tensor.evalSubExprsIfNeeded(NULL); + + // Print the result + static const int rank = internal::array_size::value; + internal::TensorPrinter::run(os, tensor); + + // Cleanup. + tensor.cleanup(); + return os; +} + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_IO_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h new file mode 100644 index 0000000..dd51850 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h @@ -0,0 +1,603 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H +#define EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H + +namespace Eigen { + +/** \class TensorImagePatch + * \ingroup CXX11_Tensor_Module + * + * \brief Patch extraction specialized for image processing. + * This assumes that the input has a least 3 dimensions ordered as follow: + * 1st dimension: channels (of size d) + * 2nd dimension: rows (of size r) + * 3rd dimension: columns (of size c) + * There can be additional dimensions such as time (for video) or batch (for + * bulk processing after the first 3. + * Calling the image patch code with patch_rows and patch_cols is equivalent + * to calling the regular patch extraction code with parameters d, patch_rows, + * patch_cols, and 1 for all the additional dimensions. + */ +namespace internal { + +template +struct traits > : public traits +{ + typedef typename internal::remove_const::type Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions + 1; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorImagePatchOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorImagePatchOp type; +}; + +template +struct ImagePatchCopyOp { + typedef typename Self::Index Index; + typedef typename Self::Scalar Scalar; + typedef typename Self::Impl Impl; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const Self& self, const Index num_coeff_to_copy, const Index dst_index, + Scalar* dst_data, const Index src_index) { + const Impl& impl = self.impl(); + for (Index i = 0; i < num_coeff_to_copy; ++i) { + dst_data[dst_index + i] = impl.coeff(src_index + i); + } + } +}; + +template +struct ImagePatchCopyOp { + typedef typename Self::Index Index; + typedef typename Self::Scalar Scalar; + typedef typename Self::Impl Impl; + typedef typename packet_traits::type Packet; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const Self& self, const Index num_coeff_to_copy, const Index dst_index, + Scalar* dst_data, const Index src_index) { + const Impl& impl = self.impl(); + const Index packet_size = internal::unpacket_traits::size; + const Index vectorized_size = + (num_coeff_to_copy / packet_size) * packet_size; + for (Index i = 0; i < vectorized_size; i += packet_size) { + Packet p = impl.template packet(src_index + i); + internal::pstoret(dst_data + dst_index + i, p); + } + for (Index i = vectorized_size; i < num_coeff_to_copy; ++i) { + dst_data[dst_index + i] = impl.coeff(src_index + i); + } + } +}; + +template +struct ImagePatchPaddingOp { + typedef typename Self::Index Index; + typedef typename Self::Scalar Scalar; + typedef typename packet_traits::type Packet; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run( + const Index num_coeff_to_pad, const Scalar padding_value, + const Index dst_index, Scalar* dst_data) { + const Index packet_size = internal::unpacket_traits::size; + const Packet padded_packet = internal::pset1(padding_value); + const Index vectorized_size = + (num_coeff_to_pad / packet_size) * packet_size; + for (Index i = 0; i < vectorized_size; i += packet_size) { + internal::pstoret(dst_data + dst_index + i, + padded_packet); + } + for (Index i = vectorized_size; i < num_coeff_to_pad; ++i) { + dst_data[dst_index + i] = padding_value; + } + } +}; + +} // end namespace internal + +template +class TensorImagePatchOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols, + DenseIndex row_strides, DenseIndex col_strides, + DenseIndex in_row_strides, DenseIndex in_col_strides, + DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, + PaddingType padding_type, Scalar padding_value) + : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols), + m_row_strides(row_strides), m_col_strides(col_strides), + m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides), + m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides), + m_padding_explicit(false), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0), + m_padding_type(padding_type), m_padding_value(padding_value) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols, + DenseIndex row_strides, DenseIndex col_strides, + DenseIndex in_row_strides, DenseIndex in_col_strides, + DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, + DenseIndex padding_top, DenseIndex padding_bottom, + DenseIndex padding_left, DenseIndex padding_right, + Scalar padding_value) + : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols), + m_row_strides(row_strides), m_col_strides(col_strides), + m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides), + m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides), + m_padding_explicit(true), m_padding_top(padding_top), m_padding_bottom(padding_bottom), + m_padding_left(padding_left), m_padding_right(padding_right), + m_padding_type(PADDING_VALID), m_padding_value(padding_value) {} + + + EIGEN_DEVICE_FUNC + DenseIndex patch_rows() const { return m_patch_rows; } + EIGEN_DEVICE_FUNC + DenseIndex patch_cols() const { return m_patch_cols; } + EIGEN_DEVICE_FUNC + DenseIndex row_strides() const { return m_row_strides; } + EIGEN_DEVICE_FUNC + DenseIndex col_strides() const { return m_col_strides; } + EIGEN_DEVICE_FUNC + DenseIndex in_row_strides() const { return m_in_row_strides; } + EIGEN_DEVICE_FUNC + DenseIndex in_col_strides() const { return m_in_col_strides; } + EIGEN_DEVICE_FUNC + DenseIndex row_inflate_strides() const { return m_row_inflate_strides; } + EIGEN_DEVICE_FUNC + DenseIndex col_inflate_strides() const { return m_col_inflate_strides; } + EIGEN_DEVICE_FUNC + bool padding_explicit() const { return m_padding_explicit; } + EIGEN_DEVICE_FUNC + DenseIndex padding_top() const { return m_padding_top; } + EIGEN_DEVICE_FUNC + DenseIndex padding_bottom() const { return m_padding_bottom; } + EIGEN_DEVICE_FUNC + DenseIndex padding_left() const { return m_padding_left; } + EIGEN_DEVICE_FUNC + DenseIndex padding_right() const { return m_padding_right; } + EIGEN_DEVICE_FUNC + PaddingType padding_type() const { return m_padding_type; } + EIGEN_DEVICE_FUNC + Scalar padding_value() const { return m_padding_value; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const DenseIndex m_patch_rows; + const DenseIndex m_patch_cols; + const DenseIndex m_row_strides; + const DenseIndex m_col_strides; + const DenseIndex m_in_row_strides; + const DenseIndex m_in_col_strides; + const DenseIndex m_row_inflate_strides; + const DenseIndex m_col_inflate_strides; + const bool m_padding_explicit; + const DenseIndex m_padding_top; + const DenseIndex m_padding_bottom; + const DenseIndex m_padding_left; + const DenseIndex m_padding_right; + const PaddingType m_padding_type; + const Scalar m_padding_value; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorImagePatchOp XprType; + typedef typename XprType::Index Index; + static const int NumInputDims = internal::array_size::Dimensions>::value; + static const int NumDims = NumInputDims + 1; + typedef DSizes Dimensions; + typedef typename internal::remove_const::type Scalar; + typedef TensorEvaluator, + Device> Self; + typedef TensorEvaluator Impl; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator( const XprType& op, const Device& device) + : m_device(device), m_impl(op.expression(), device) + { + EIGEN_STATIC_ASSERT((NumDims >= 4), YOU_MADE_A_PROGRAMMING_MISTAKE); + + m_paddingValue = op.padding_value(); + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + + // Caches a few variables. + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputDepth = input_dims[0]; + m_inputRows = input_dims[1]; + m_inputCols = input_dims[2]; + } else { + m_inputDepth = input_dims[NumInputDims-1]; + m_inputRows = input_dims[NumInputDims-2]; + m_inputCols = input_dims[NumInputDims-3]; + } + + m_row_strides = op.row_strides(); + m_col_strides = op.col_strides(); + + // Input strides and effective input/patch size + m_in_row_strides = op.in_row_strides(); + m_in_col_strides = op.in_col_strides(); + m_row_inflate_strides = op.row_inflate_strides(); + m_col_inflate_strides = op.col_inflate_strides(); + // The "effective" input rows and input cols are the input rows and cols + // after inflating them with zeros. + // For examples, a 2x3 matrix with row_inflate_strides and + // col_inflate_strides of 2 comes from: + // A B C + // D E F + // + // to a matrix is 3 x 5: + // + // A . B . C + // . . . . . + // D . E . F + + m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1; + m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1; + m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1); + m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1); + + if (op.padding_explicit()) { + m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) / static_cast(m_row_strides)); + m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) / static_cast(m_col_strides)); + m_rowPaddingTop = op.padding_top(); + m_colPaddingLeft = op.padding_left(); + } else { + // Computing padding from the type + switch (op.padding_type()) { + case PADDING_VALID: + m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) / static_cast(m_row_strides)); + m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) / static_cast(m_col_strides)); + // Calculate the padding + m_rowPaddingTop = numext::maxi(0, ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2); + m_colPaddingLeft = numext::maxi(0, ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2); + break; + case PADDING_SAME: + m_outputRows = numext::ceil(m_input_rows_eff / static_cast(m_row_strides)); + m_outputCols = numext::ceil(m_input_cols_eff / static_cast(m_col_strides)); + // Calculate the padding + m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2; + m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2; + // The padding size calculation for PADDING_SAME has been updated to + // be consistent with how TensorFlow extracts its paddings. + m_rowPaddingTop = numext::maxi(0, m_rowPaddingTop); + m_colPaddingLeft = numext::maxi(0, m_colPaddingLeft); + break; + default: + eigen_assert(false && "unexpected padding"); + m_outputCols=0; // silence the uninitialised warning; + m_outputRows=0; //// silence the uninitialised warning; + } + } + eigen_assert(m_outputRows > 0); + eigen_assert(m_outputCols > 0); + + // Dimensions for result of extraction. + if (static_cast(Layout) == static_cast(ColMajor)) { + // ColMajor + // 0: depth + // 1: patch_rows + // 2: patch_cols + // 3: number of patches + // 4 and beyond: anything else (such as batch). + m_dimensions[0] = input_dims[0]; + m_dimensions[1] = op.patch_rows(); + m_dimensions[2] = op.patch_cols(); + m_dimensions[3] = m_outputRows * m_outputCols; + for (int i = 4; i < NumDims; ++i) { + m_dimensions[i] = input_dims[i-1]; + } + } else { + // RowMajor + // NumDims-1: depth + // NumDims-2: patch_rows + // NumDims-3: patch_cols + // NumDims-4: number of patches + // NumDims-5 and beyond: anything else (such as batch). + m_dimensions[NumDims-1] = input_dims[NumInputDims-1]; + m_dimensions[NumDims-2] = op.patch_rows(); + m_dimensions[NumDims-3] = op.patch_cols(); + m_dimensions[NumDims-4] = m_outputRows * m_outputCols; + for (int i = NumDims-5; i >= 0; --i) { + m_dimensions[i] = input_dims[i]; + } + } + + // Strides for moving the patch in various dimensions. + if (static_cast(Layout) == static_cast(ColMajor)) { + m_colStride = m_dimensions[1]; + m_patchStride = m_colStride * m_dimensions[2] * m_dimensions[0]; + m_otherStride = m_patchStride * m_dimensions[3]; + } else { + m_colStride = m_dimensions[NumDims-2]; + m_patchStride = m_colStride * m_dimensions[NumDims-3] * m_dimensions[NumDims-1]; + m_otherStride = m_patchStride * m_dimensions[NumDims-4]; + } + + // Strides for navigating through the input tensor. + m_rowInputStride = m_inputDepth; + m_colInputStride = m_inputDepth * m_inputRows; + m_patchInputStride = m_inputDepth * m_inputRows * m_inputCols; + + // Fast representations of different variables. + m_fastOtherStride = internal::TensorIntDivisor(m_otherStride); + m_fastPatchStride = internal::TensorIntDivisor(m_patchStride); + m_fastColStride = internal::TensorIntDivisor(m_colStride); + m_fastInflateRowStride = internal::TensorIntDivisor(m_row_inflate_strides); + m_fastInflateColStride = internal::TensorIntDivisor(m_col_inflate_strides); + m_fastInputColsEff = internal::TensorIntDivisor(m_input_cols_eff); + + // Number of patches in the width dimension. + m_fastOutputRows = internal::TensorIntDivisor(m_outputRows); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_fastOutputDepth = internal::TensorIntDivisor(m_dimensions[0]); + } else { + m_fastOutputDepth = internal::TensorIntDivisor(m_dimensions[NumDims-1]); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + // Patch index corresponding to the passed in index. + const Index patchIndex = index / m_fastPatchStride; + // Find the offset of the element wrt the location of the first element. + const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth; + + // Other ways to index this element. + const Index otherIndex = (NumDims == 4) ? 0 : index / m_fastOtherStride; + const Index patch2DIndex = (NumDims == 4) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride; + + // Calculate col index in the input original tensor. + const Index colIndex = patch2DIndex / m_fastOutputRows; + const Index colOffset = patchOffset / m_fastColStride; + const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft; + const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInflateColStride) : 0); + if (inputCol < 0 || inputCol >= m_input_cols_eff || + ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) { + return Scalar(m_paddingValue); + } + + // Calculate row index in the original input tensor. + const Index rowIndex = patch2DIndex - colIndex * m_outputRows; + const Index rowOffset = patchOffset - colOffset * m_colStride; + const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop; + const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInflateRowStride) : 0); + if (inputRow < 0 || inputRow >= m_input_rows_eff || + ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) { + return Scalar(m_paddingValue); + } + + const int depth_index = static_cast(Layout) == static_cast(ColMajor) ? 0 : NumDims - 1; + const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index]; + + const Index inputIndex = depth + origInputRow * m_rowInputStride + origInputCol * m_colInputStride + otherIndex * m_patchInputStride; + return m_impl.coeff(inputIndex); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1) { + return packetWithPossibleZero(index); + } + + const Index indices[2] = {index, index + PacketSize - 1}; + const Index patchIndex = indices[0] / m_fastPatchStride; + if (patchIndex != indices[1] / m_fastPatchStride) { + return packetWithPossibleZero(index); + } + const Index otherIndex = (NumDims == 4) ? 0 : indices[0] / m_fastOtherStride; + eigen_assert(otherIndex == indices[1] / m_fastOtherStride); + + // Find the offset of the element wrt the location of the first element. + const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth, + (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth}; + + const Index patch2DIndex = (NumDims == 4) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride; + eigen_assert(patch2DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride); + + const Index colIndex = patch2DIndex / m_fastOutputRows; + const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride, patchOffsets[1] / m_fastColStride}; + + // Calculate col indices in the original input tensor. + const Index inputCols[2] = {colIndex * m_col_strides + colOffsets[0] - + m_colPaddingLeft, colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft}; + if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) { + return internal::pset1(Scalar(m_paddingValue)); + } + + if (inputCols[0] == inputCols[1]) { + const Index rowIndex = patch2DIndex - colIndex * m_outputRows; + const Index rowOffsets[2] = {patchOffsets[0] - colOffsets[0]*m_colStride, patchOffsets[1] - colOffsets[1]*m_colStride}; + eigen_assert(rowOffsets[0] <= rowOffsets[1]); + // Calculate col indices in the original input tensor. + const Index inputRows[2] = {rowIndex * m_row_strides + rowOffsets[0] - + m_rowPaddingTop, rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop}; + + if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) { + return internal::pset1(Scalar(m_paddingValue)); + } + + if (inputRows[0] >= 0 && inputRows[1] < m_inputRows) { + // no padding + const int depth_index = static_cast(Layout) == static_cast(ColMajor) ? 0 : NumDims - 1; + const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index]; + const Index inputIndex = depth + inputRows[0] * m_rowInputStride + inputCols[0] * m_colInputStride + otherIndex * m_patchInputStride; + return m_impl.template packet(inputIndex); + } + } + + return packetWithPossibleZero(index); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator& impl() const { return m_impl; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowPaddingTop() const { return m_rowPaddingTop; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colPaddingLeft() const { return m_colPaddingLeft; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputRows() const { return m_outputRows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputCols() const { return m_outputCols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userRowStride() const { return m_row_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userColStride() const { return m_col_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInRowStride() const { return m_in_row_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInColStride() const { return m_in_col_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowInflateStride() const { return m_row_inflate_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colInflateStride() const { return m_col_inflate_strides; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + // We conservatively estimate the cost for the code path where the computed + // index is inside the original image and + // TensorEvaluator::CoordAccess is false. + const double compute_cost = 3 * TensorOpCost::DivCost() + + 6 * TensorOpCost::MulCost() + + 8 * TensorOpCost::MulCost(); + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const + { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + Dimensions m_dimensions; + + Index m_otherStride; + Index m_patchStride; + Index m_colStride; + Index m_row_strides; + Index m_col_strides; + + Index m_in_row_strides; + Index m_in_col_strides; + Index m_row_inflate_strides; + Index m_col_inflate_strides; + + Index m_input_rows_eff; + Index m_input_cols_eff; + Index m_patch_rows_eff; + Index m_patch_cols_eff; + + internal::TensorIntDivisor m_fastOtherStride; + internal::TensorIntDivisor m_fastPatchStride; + internal::TensorIntDivisor m_fastColStride; + internal::TensorIntDivisor m_fastInflateRowStride; + internal::TensorIntDivisor m_fastInflateColStride; + internal::TensorIntDivisor m_fastInputColsEff; + + Index m_rowInputStride; + Index m_colInputStride; + Index m_patchInputStride; + + Index m_inputDepth; + Index m_inputRows; + Index m_inputCols; + + Index m_outputRows; + Index m_outputCols; + + Index m_rowPaddingTop; + Index m_colPaddingLeft; + + internal::TensorIntDivisor m_fastOutputRows; + internal::TensorIntDivisor m_fastOutputDepth; + + Scalar m_paddingValue; + + const Device EIGEN_DEVICE_REF m_device; + TensorEvaluator m_impl; +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h new file mode 100644 index 0000000..2d8c7b9 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h @@ -0,0 +1,738 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H +#define EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H + + +#if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES + +#define EIGEN_HAS_INDEX_LIST + +namespace Eigen { + +/** \internal + * + * \class TensorIndexList + * \ingroup CXX11_Tensor_Module + * + * \brief Set of classes used to encode a set of Tensor dimensions/indices. + * + * The indices in the list can be known at compile time or at runtime. A mix + * of static and dynamic indices can also be provided if needed. The tensor + * code will attempt to take advantage of the indices that are known at + * compile time to optimize the code it generates. + * + * This functionality requires a c++11 compliant compiler. If your compiler + * is older you need to use arrays of indices instead. + * + * Several examples are provided in the cxx11_tensor_index_list.cpp file. + * + * \sa Tensor + */ + +template +struct type2index { + static const Index value = n; + EIGEN_DEVICE_FUNC constexpr operator Index() const { return n; } + EIGEN_DEVICE_FUNC void set(Index val) { + eigen_assert(val == n); + } +}; + +// This can be used with IndexPairList to get compile-time constant pairs, +// such as IndexPairList, type2indexpair<3,4>>(). +template +struct type2indexpair { + static const Index first = f; + static const Index second = s; + + constexpr EIGEN_DEVICE_FUNC operator IndexPair() const { + return IndexPair(f, s); + } + + EIGEN_DEVICE_FUNC void set(const IndexPair& val) { + eigen_assert(val.first == f); + eigen_assert(val.second == s); + } +}; + + +template struct NumTraits > +{ + typedef Index Real; + enum { + IsComplex = 0, + RequireInitialization = false, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; + + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR EIGEN_STRONG_INLINE Real epsilon() { return 0; } + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR EIGEN_STRONG_INLINE Real dummy_precision() { return 0; } + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR EIGEN_STRONG_INLINE Real highest() { return n; } + EIGEN_DEVICE_FUNC static EIGEN_CONSTEXPR EIGEN_STRONG_INLINE Real lowest() { return n; } +}; + +namespace internal { +template +EIGEN_DEVICE_FUNC void update_value(T& val, Index new_val) { + val = internal::convert_index(new_val); +} +template +EIGEN_DEVICE_FUNC void update_value(type2index& val, Index new_val) { + val.set(new_val); +} + +template +EIGEN_DEVICE_FUNC void update_value(T& val, IndexPair new_val) { + val = new_val; +} +template +EIGEN_DEVICE_FUNC void update_value(type2indexpair& val, IndexPair new_val) { + val.set(new_val); +} + + +template +struct is_compile_time_constant { + static constexpr bool value = false; +}; + +template +struct is_compile_time_constant > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant& > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant& > { + static constexpr bool value = true; +}; + +template +struct is_compile_time_constant > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant& > { + static constexpr bool value = true; +}; +template +struct is_compile_time_constant& > { + static constexpr bool value = true; +}; + + +template +struct IndexTuple; + +template +struct IndexTuple { + EIGEN_DEVICE_FUNC constexpr IndexTuple() : head(), others() { } + EIGEN_DEVICE_FUNC constexpr IndexTuple(const T& v, const O... o) : head(v), others(o...) { } + + constexpr static int count = 1 + sizeof...(O); + T head; + IndexTuple others; + typedef T Head; + typedef IndexTuple Other; +}; + +template + struct IndexTuple { + EIGEN_DEVICE_FUNC constexpr IndexTuple() : head() { } + EIGEN_DEVICE_FUNC constexpr IndexTuple(const T& v) : head(v) { } + + constexpr static int count = 1; + T head; + typedef T Head; +}; + + +template +struct IndexTupleExtractor; + +template +struct IndexTupleExtractor { + + typedef typename IndexTupleExtractor::ValType ValType; + + EIGEN_DEVICE_FUNC static constexpr ValType& get_val(IndexTuple& val) { + return IndexTupleExtractor::get_val(val.others); + } + + EIGEN_DEVICE_FUNC static constexpr const ValType& get_val(const IndexTuple& val) { + return IndexTupleExtractor::get_val(val.others); + } + template + EIGEN_DEVICE_FUNC static void set_val(IndexTuple& val, V& new_val) { + IndexTupleExtractor::set_val(val.others, new_val); + } + +}; + +template + struct IndexTupleExtractor<0, T, O...> { + + typedef T ValType; + + EIGEN_DEVICE_FUNC static constexpr ValType& get_val(IndexTuple& val) { + return val.head; + } + EIGEN_DEVICE_FUNC static constexpr const ValType& get_val(const IndexTuple& val) { + return val.head; + } + template + EIGEN_DEVICE_FUNC static void set_val(IndexTuple& val, V& new_val) { + val.head = new_val; + } +}; + + + +template +EIGEN_DEVICE_FUNC constexpr typename IndexTupleExtractor::ValType& array_get(IndexTuple& tuple) { + return IndexTupleExtractor::get_val(tuple); +} +template +EIGEN_DEVICE_FUNC constexpr const typename IndexTupleExtractor::ValType& array_get(const IndexTuple& tuple) { + return IndexTupleExtractor::get_val(tuple); +} +template + struct array_size > { + static const size_t value = IndexTuple::count; +}; +template + struct array_size > { + static const size_t value = IndexTuple::count; +}; + + + + +template +struct tuple_coeff { + template + EIGEN_DEVICE_FUNC static constexpr ValueT get(const Index i, const IndexTuple& t) { + // return array_get(t) * (i == Idx) + tuple_coeff::get(i, t) * (i != Idx); + return (i == Idx ? array_get(t) : tuple_coeff::get(i, t)); + } + template + EIGEN_DEVICE_FUNC static void set(const Index i, IndexTuple& t, const ValueT& value) { + if (i == Idx) { + update_value(array_get(t), value); + } else { + tuple_coeff::set(i, t, value); + } + } + + template + EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const Index i, const IndexTuple& t) { + return ((i == Idx) & is_compile_time_constant::ValType>::value) || + tuple_coeff::value_known_statically(i, t); + } + + template + EIGEN_DEVICE_FUNC static constexpr bool values_up_to_known_statically(const IndexTuple& t) { + return is_compile_time_constant::ValType>::value && + tuple_coeff::values_up_to_known_statically(t); + } + + template + EIGEN_DEVICE_FUNC static constexpr bool values_up_to_statically_known_to_increase(const IndexTuple& t) { + return is_compile_time_constant::ValType>::value && + is_compile_time_constant::ValType>::value && + array_get(t) > array_get(t) && + tuple_coeff::values_up_to_statically_known_to_increase(t); + } +}; + +template +struct tuple_coeff<0, ValueT> { + template + EIGEN_DEVICE_FUNC static constexpr ValueT get(const Index /*i*/, const IndexTuple& t) { + // eigen_assert (i == 0); // gcc fails to compile assertions in constexpr + return array_get<0>(t)/* * (i == 0)*/; + } + template + EIGEN_DEVICE_FUNC static void set(const Index i, IndexTuple& t, const ValueT value) { + eigen_assert (i == 0); + update_value(array_get<0>(t), value); + } + template + EIGEN_DEVICE_FUNC static constexpr bool value_known_statically(const Index i, const IndexTuple&) { + return is_compile_time_constant::ValType>::value && (i == 0); + } + + template + EIGEN_DEVICE_FUNC static constexpr bool values_up_to_known_statically(const IndexTuple&) { + return is_compile_time_constant::ValType>::value; + } + + template + EIGEN_DEVICE_FUNC static constexpr bool values_up_to_statically_known_to_increase(const IndexTuple&) { + return true; + } +}; +} // namespace internal + + + +template +struct IndexList : internal::IndexTuple { + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr Index operator[] (const Index i) const { + return internal::tuple_coeff >::value-1, Index>::get(i, *this); + } + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr Index get(const Index i) const { + return internal::tuple_coeff >::value-1, Index>::get(i, *this); + } + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const Index i, const Index value) { + return internal::tuple_coeff >::value-1, Index>::set(i, *this, value); + } + + EIGEN_DEVICE_FUNC constexpr IndexList(const internal::IndexTuple& other) : internal::IndexTuple(other) { } + EIGEN_DEVICE_FUNC constexpr IndexList(FirstType& first, OtherTypes... other) : internal::IndexTuple(first, other...) { } + EIGEN_DEVICE_FUNC constexpr IndexList() : internal::IndexTuple() { } + + EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const Index i) const { + return internal::tuple_coeff >::value-1, Index>::value_known_statically(i, *this); + } + EIGEN_DEVICE_FUNC constexpr bool all_values_known_statically() const { + return internal::tuple_coeff >::value-1, Index>::values_up_to_known_statically(*this); + } + + EIGEN_DEVICE_FUNC constexpr bool values_statically_known_to_increase() const { + return internal::tuple_coeff >::value-1, Index>::values_up_to_statically_known_to_increase(*this); + } +}; + +template +std::ostream& operator<<(std::ostream& os, + const IndexList& dims) { + os << "["; + for (size_t i = 0; i < 1 + sizeof...(OtherTypes); ++i) { + if (i > 0) os << ", "; + os << dims[i]; + } + os << "]"; + return os; +} + +template +constexpr IndexList make_index_list(FirstType val1, OtherTypes... other_vals) { + return IndexList(val1, other_vals...); +} + + +template +struct IndexPairList : internal::IndexTuple { + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC constexpr IndexPair operator[] (const Index i) const { + return internal::tuple_coeff >::value-1, IndexPair>::get(i, *this); + } + EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC void set(const Index i, const IndexPair value) { + return internal::tuple_coeff>::value-1, IndexPair >::set(i, *this, value); + } + + EIGEN_DEVICE_FUNC constexpr IndexPairList(const internal::IndexTuple& other) : internal::IndexTuple(other) { } + EIGEN_DEVICE_FUNC constexpr IndexPairList() : internal::IndexTuple() { } + + EIGEN_DEVICE_FUNC constexpr bool value_known_statically(const Index i) const { + return internal::tuple_coeff >::value-1, Index>::value_known_statically(i, *this); + } +}; + +namespace internal { + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index array_prod(const IndexList& sizes) { + Index result = 1; + EIGEN_UNROLL_LOOP + for (size_t i = 0; i < array_size >::value; ++i) { + result *= sizes[i]; + } + return result; +} + +template struct array_size > { + static const size_t value = array_size >::value; +}; +template struct array_size > { + static const size_t value = array_size >::value; +}; + +template struct array_size > { + static const size_t value = std::tuple_size >::value; +}; +template struct array_size > { + static const size_t value = std::tuple_size >::value; +}; + +template EIGEN_DEVICE_FUNC constexpr Index array_get(IndexList& a) { + return IndexTupleExtractor::get_val(a); +} +template EIGEN_DEVICE_FUNC constexpr Index array_get(const IndexList& a) { + return IndexTupleExtractor::get_val(a); +} + +template +struct index_known_statically_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index) { + return false; + } +}; + +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i) { + return IndexList().value_known_statically(i); + } +}; + +template +struct index_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i) { + return IndexList().value_known_statically(i); + } +}; + + +template +struct all_indices_known_statically_impl { + static constexpr bool run() { + return false; + } +}; + +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return IndexList().all_values_known_statically(); + } +}; + +template +struct all_indices_known_statically_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return IndexList().all_values_known_statically(); + } +}; + + +template +struct indices_statically_known_to_increase_impl { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return false; + } +}; + +template + struct indices_statically_known_to_increase_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return Eigen::IndexList().values_statically_known_to_increase(); + } +}; + +template + struct indices_statically_known_to_increase_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run() { + return Eigen::IndexList().values_statically_known_to_increase(); + } +}; + + +template +struct index_statically_eq_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) == value); + } +}; + +template +struct index_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) == value); + } +}; + + +template +struct index_statically_ne_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_ne_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) != value); + } +}; + +template +struct index_statically_ne_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) != value); + } +}; + + +template +struct index_statically_gt_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_gt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) > value); + } +}; + +template +struct index_statically_gt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) > value); + } +}; + + + +template +struct index_statically_lt_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_lt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) < value); + } +}; + +template +struct index_statically_lt_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexList().value_known_statically(i) & + (IndexList().get(i) < value); + } +}; + + + +template +struct index_pair_first_statically_eq_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) { + return false; + } +}; + +template +struct index_pair_first_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexPairList().value_known_statically(i) & + (IndexPairList().operator[](i).first == value); + } +}; + +template +struct index_pair_first_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexPairList().value_known_statically(i) & + (IndexPairList().operator[](i).first == value); + } +}; + + + +template +struct index_pair_second_statically_eq_impl { + EIGEN_DEVICE_FUNC static constexpr bool run(Index, Index) { + return false; + } +}; + +template +struct index_pair_second_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexPairList().value_known_statically(i) & + (IndexPairList().operator[](i).second == value); + } +}; + +template +struct index_pair_second_statically_eq_impl > { + EIGEN_DEVICE_FUNC static constexpr bool run(const Index i, const Index value) { + return IndexPairList().value_known_statically(i) & + (IndexPairList().operator[](i).second == value); + } +}; + + +} // end namespace internal +} // end namespace Eigen + +#else + +namespace Eigen { +namespace internal { + +template +struct index_known_statically_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const Index) { + return false; + } +}; + +template +struct all_indices_known_statically_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { + return false; + } +}; + +template +struct indices_statically_known_to_increase_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() { + return false; + } +}; + +template +struct index_statically_eq_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_ne_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_gt_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) { + return false; + } +}; + +template +struct index_statically_lt_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) { + return false; + } +}; + +template +struct index_pair_first_statically_eq_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) { + return false; + } +}; + +template +struct index_pair_second_statically_eq_impl { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(Index, Index) { + return false; + } +}; + + + +} // end namespace internal +} // end namespace Eigen + +#endif + + +namespace Eigen { +namespace internal { +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_known_statically(Index i) { + return index_known_statically_impl::run(i); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool all_indices_known_statically() { + return all_indices_known_statically_impl::run(); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool indices_statically_known_to_increase() { + return indices_statically_known_to_increase_impl::run(); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_eq(Index i, Index value) { + return index_statically_eq_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_ne(Index i, Index value) { + return index_statically_ne_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_gt(Index i, Index value) { + return index_statically_gt_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_statically_lt(Index i, Index value) { + return index_statically_lt_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_first_statically_eq(Index i, Index value) { + return index_pair_first_statically_eq_impl::run(i, value); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR bool index_pair_second_statically_eq(Index i, Index value) { + return index_pair_second_statically_eq_impl::run(i, value); +} + +} // end namespace internal +} // end namespace Eigen + + +#endif // EIGEN_CXX11_TENSOR_TENSOR_INDEX_LIST_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h new file mode 100644 index 0000000..c5cb61a --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h @@ -0,0 +1,247 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Ke Yang +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H +#define EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H + +namespace Eigen { + +/** \class TensorInflation + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor inflation class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorInflationOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorInflationOp type; +}; + +} // end namespace internal + +template +class TensorInflationOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorInflationOp(const XprType& expr, const Strides& strides) + : m_xpr(expr), m_strides(strides) {} + + EIGEN_DEVICE_FUNC + const Strides& strides() const { return m_strides; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const Strides m_strides; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorInflationOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/ false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_strides(op.strides()) + { + m_dimensions = m_impl.dimensions(); + // Expand each dimension to the inflated dimension. + for (int i = 0; i < NumDims; ++i) { + m_dimensions[i] = (m_dimensions[i] - 1) * op.strides()[i] + 1; + } + + // Remember the strides for fast division. + for (int i = 0; i < NumDims; ++i) { + m_fastStrides[i] = internal::TensorIntDivisor(m_strides[i]); + } + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_outputStrides[0] = 1; + m_inputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + } + } else { // RowMajor + m_outputStrides[NumDims-1] = 1; + m_inputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + // Computes the input index given the output index. Returns true if the output + // index doesn't fall into a hole. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool getInputIndex(Index index, Index* inputIndex) const + { + eigen_assert(index < dimensions().TotalSize()); + *inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + if (idx != idx / m_fastStrides[i] * m_strides[i]) { + return false; + } + *inputIndex += idx / m_strides[i] * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (index != index / m_fastStrides[0] * m_strides[0]) { + return false; + } + *inputIndex += index / m_strides[0]; + return true; + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i]; + if (idx != idx / m_fastStrides[i] * m_strides[i]) { + return false; + } + *inputIndex += idx / m_strides[i] * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (index != index / m_fastStrides[NumDims-1] * m_strides[NumDims-1]) { + return false; + } + *inputIndex += index / m_strides[NumDims - 1]; + } + return true; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + Index inputIndex = 0; + if (getInputIndex(index, &inputIndex)) { + return m_impl.coeff(inputIndex); + } else { + return Scalar(0); + } + } + + // TODO(yangke): optimize this function so that we can detect and produce + // all-zero packets + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + const double compute_cost = NumDims * (3 * TensorOpCost::DivCost() + + 3 * TensorOpCost::MulCost() + + 2 * TensorOpCost::AddCost()); + const double input_size = m_impl.dimensions().TotalSize(); + const double output_size = m_dimensions.TotalSize(); + if (output_size == 0) + return TensorOpCost(); + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(sizeof(CoeffReturnType) * input_size / output_size, 0, + compute_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + TensorEvaluator m_impl; + const Strides m_strides; + array, NumDims> m_fastStrides; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_INFLATION_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h new file mode 100644 index 0000000..26a3818 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h @@ -0,0 +1,82 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H +#define EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H + +#if EIGEN_HAS_VARIADIC_TEMPLATES + +#include + +namespace Eigen { + +/** \class TensorInitializer + * \ingroup CXX11_Tensor_Module + * + * \brief Helper template to initialize Tensors from std::initializer_lists. + */ +namespace internal { + +template +struct Initializer { + typedef std::initializer_list< + typename Initializer::InitList> InitList; + + static void run(TensorEvaluator& tensor, + Eigen::array::Index, traits::NumDimensions>* indices, + const InitList& vals) { + int i = 0; + for (const auto& v : vals) { + (*indices)[traits::NumDimensions - N] = i++; + Initializer::run(tensor, indices, v); + } + } +}; + +template +struct Initializer { + typedef std::initializer_list::Scalar> InitList; + + static void run(TensorEvaluator& tensor, + Eigen::array::Index, traits::NumDimensions>* indices, + const InitList& vals) { + int i = 0; + // There is likely a faster way to do that than iterating. + for (const auto& v : vals) { + (*indices)[traits::NumDimensions - 1] = i++; + tensor.coeffRef(*indices) = v; + } + } +}; + +template +struct Initializer { + typedef typename traits::Scalar InitList; + + static void run(TensorEvaluator& tensor, + Eigen::array::Index, traits::NumDimensions>*, + const InitList& v) { + tensor.coeffRef(0) = v; + } +}; + + +template +void initialize_tensor(TensorEvaluator& tensor, + const typename Initializer::NumDimensions>::InitList& vals) { + Eigen::array::Index, traits::NumDimensions> indices; + Initializer::NumDimensions>::run(tensor, &indices, vals); +} + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_HAS_VARIADIC_TEMPLATES + +#endif // EIGEN_CXX11_TENSOR_TENSOR_INITIALIZER_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h new file mode 100644 index 0000000..6d5cce4 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h @@ -0,0 +1,263 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H +#define EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H + + +namespace Eigen { + +/** \internal + * + * \class TensorIntDiv + * \ingroup CXX11_Tensor_Module + * + * \brief Fast integer division by a constant. + * + * See the paper from Granlund and Montgomery for explanation. + * (at https://doi.org/10.1145/773473.178249) + * + * \sa Tensor + */ + +namespace internal { + +namespace { + + // Note: result is undefined if val == 0 + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + typename internal::enable_if::type count_leading_zeros(const T val) + { +#ifdef EIGEN_GPU_COMPILE_PHASE + return __clz(val); +#elif defined(SYCL_DEVICE_ONLY) + return cl::sycl::clz(val); +#elif EIGEN_COMP_MSVC + unsigned long index; + _BitScanReverse(&index, val); + return 31 - index; +#else + EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE); + return __builtin_clz(static_cast(val)); +#endif + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + typename internal::enable_if::type count_leading_zeros(const T val) + { +#ifdef EIGEN_GPU_COMPILE_PHASE + return __clzll(val); +#elif defined(SYCL_DEVICE_ONLY) + return static_cast(cl::sycl::clz(val)); +#elif EIGEN_COMP_MSVC && EIGEN_ARCH_x86_64 + unsigned long index; + _BitScanReverse64(&index, val); + return 63 - index; +#elif EIGEN_COMP_MSVC + // MSVC's _BitScanReverse64 is not available for 32bits builds. + unsigned int lo = (unsigned int)(val&0xffffffff); + unsigned int hi = (unsigned int)((val>>32)&0xffffffff); + int n; + if(hi==0) + n = 32 + count_leading_zeros(lo); + else + n = count_leading_zeros(hi); + return n; +#else + EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE); + return __builtin_clzll(static_cast(val)); +#endif + } + + template + struct UnsignedTraits { + typedef typename conditional::type type; + }; + + template + struct DividerTraits { + typedef typename UnsignedTraits::type type; + static const int N = sizeof(T) * 8; + }; + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t muluh(const uint32_t a, const T b) { +#if defined(EIGEN_GPU_COMPILE_PHASE) + return __umulhi(a, b); +#elif defined(SYCL_DEVICE_ONLY) + return cl::sycl::mul_hi(a, static_cast(b)); +#else + return (static_cast(a) * b) >> 32; +#endif + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t muluh(const uint64_t a, const T b) { +#if defined(EIGEN_GPU_COMPILE_PHASE) + return __umul64hi(a, b); +#elif defined(SYCL_DEVICE_ONLY) + return cl::sycl::mul_hi(a, static_cast(b)); +#elif EIGEN_HAS_BUILTIN_INT128 + __uint128_t v = static_cast<__uint128_t>(a) * static_cast<__uint128_t>(b); + return static_cast(v >> 64); +#else + return (TensorUInt128, uint64_t>(a) * TensorUInt128, uint64_t>(b)).upper(); +#endif + } + + template + struct DividerHelper { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t computeMultiplier(const int log_div, const T divider) { + EIGEN_STATIC_ASSERT(N == 32, YOU_MADE_A_PROGRAMMING_MISTAKE); + return static_cast((static_cast(1) << (N+log_div)) / divider - (static_cast(1) << N) + 1); + } + }; + + template + struct DividerHelper<64, T> { + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(const int log_div, const T divider) { +#if EIGEN_HAS_BUILTIN_INT128 && !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY) + return static_cast((static_cast<__uint128_t>(1) << (64+log_div)) / static_cast<__uint128_t>(divider) - (static_cast<__uint128_t>(1) << 64) + 1); +#else + const uint64_t shift = 1ULL << log_div; + TensorUInt128 result = TensorUInt128 >(shift, 0) / TensorUInt128, uint64_t>(divider) + - TensorUInt128, static_val<0> >(1, 0) + + TensorUInt128, static_val<1> >(1); + return static_cast(result); +#endif + } + }; +} + + +template +struct TensorIntDivisor { + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() { + multiplier = 0; + shift1 = 0; + shift2 = 0; + } + + // Must have 0 < divider < 2^31. This is relaxed to + // 0 < divider < 2^63 when using 64-bit indices on platforms that support + // the __uint128_t type. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor(const T divider) { + const int N = DividerTraits::N; + eigen_assert(static_cast::type>(divider) < NumTraits::highest()/2); + eigen_assert(divider > 0); + + // fast ln2 + const int leading_zeros = count_leading_zeros(static_cast(divider)); + int log_div = N - leading_zeros; + // if divider is a power of two then log_div is 1 more than it should be. + if ((static_cast::type>(1) << (log_div-1)) == static_cast::type>(divider)) + log_div--; + + multiplier = DividerHelper::computeMultiplier(log_div, divider); + shift1 = log_div > 1 ? 1 : log_div; + shift2 = log_div > 1 ? log_div-1 : 0; + } + + // Must have 0 <= numerator. On platforms that don't support the __uint128_t + // type numerator should also be less than 2^32-1. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const { + eigen_assert(static_cast::type>(numerator) < NumTraits::highest()/2); + //eigen_assert(numerator >= 0); // this is implicitly asserted by the line above + + UnsignedType t1 = muluh(multiplier, numerator); + UnsignedType t = (static_cast(numerator) - t1) >> shift1; + return (t1 + t) >> shift2; + } + + private: + typedef typename DividerTraits::type UnsignedType; + UnsignedType multiplier; + int32_t shift1; + int32_t shift2; +}; + + +// Optimized version for signed 32 bit integers. +// Derived from Hacker's Delight. +// Only works for divisors strictly greater than one +template <> +class TensorIntDivisor { + public: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() { + magic = 0; + shift = 0; + } + // Must have 2 <= divider + EIGEN_DEVICE_FUNC TensorIntDivisor(int32_t divider) { + eigen_assert(divider >= 2); + calcMagic(divider); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int divide(const int32_t n) const { +#ifdef EIGEN_GPU_COMPILE_PHASE + return (__umulhi(magic, n) >> shift); +#elif defined(SYCL_DEVICE_ONLY) + return (cl::sycl::mul_hi(magic, static_cast(n)) >> shift); +#else + uint64_t v = static_cast(magic) * static_cast(n); + return (static_cast(v >> 32) >> shift); +#endif + } + +private: + // Compute the magic numbers. See Hacker's Delight section 10 for an in + // depth explanation. + EIGEN_DEVICE_FUNC void calcMagic(int32_t d) { + const unsigned two31 = 0x80000000; // 2**31. + unsigned ad = d; + unsigned t = two31 + (ad >> 31); + unsigned anc = t - 1 - t%ad; // Absolute value of nc. + int p = 31; // Init. p. + unsigned q1 = two31/anc; // Init. q1 = 2**p/|nc|. + unsigned r1 = two31 - q1*anc; // Init. r1 = rem(2**p, |nc|). + unsigned q2 = two31/ad; // Init. q2 = 2**p/|d|. + unsigned r2 = two31 - q2*ad; // Init. r2 = rem(2**p, |d|). + unsigned delta = 0; + do { + p = p + 1; + q1 = 2*q1; // Update q1 = 2**p/|nc|. + r1 = 2*r1; // Update r1 = rem(2**p, |nc|). + if (r1 >= anc) { // (Must be an unsigned + q1 = q1 + 1; // comparison here). + r1 = r1 - anc;} + q2 = 2*q2; // Update q2 = 2**p/|d|. + r2 = 2*r2; // Update r2 = rem(2**p, |d|). + if (r2 >= ad) { // (Must be an unsigned + q2 = q2 + 1; // comparison here). + r2 = r2 - ad;} + delta = ad - r2; + } while (q1 < delta || (q1 == delta && r1 == 0)); + + magic = (unsigned)(q2 + 1); + shift = p - 32; + } + + uint32_t magic; + int32_t shift; +}; + + +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator / (const T& numerator, const TensorIntDivisor& divisor) { + return divisor.divide(numerator); +} + + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h new file mode 100644 index 0000000..80106c1 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h @@ -0,0 +1,216 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H +#define EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H + +namespace Eigen { + +/** \class TensorLayoutSwap + * \ingroup CXX11_Tensor_Module + * + * \brief Swap the layout from col-major to row-major, or row-major + * to col-major, and invert the order of the dimensions. + * + * Beware: the dimensions are reversed by this operation. If you want to + * preserve the ordering of the dimensions, you need to combine this + * operation with a shuffle. + * + * \example: + * Tensor input(2, 4); + * Tensor output = input.swap_layout(); + * eigen_assert(output.dimension(0) == 4); + * eigen_assert(output.dimension(1) == 2); + * + * array shuffle(1, 0); + * output = input.swap_layout().shuffle(shuffle); + * eigen_assert(output.dimension(0) == 2); + * eigen_assert(output.dimension(1) == 4); + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = traits::NumDimensions; + static const int Layout = (traits::Layout == ColMajor) ? RowMajor : ColMajor; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorLayoutSwapOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorLayoutSwapOp type; +}; + +} // end namespace internal + + + +template +class TensorLayoutSwapOp : public TensorBase, WriteAccessors> +{ + public: + typedef TensorBase, WriteAccessors> Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorLayoutSwapOp(const XprType& expr) + : m_xpr(expr) {} + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorLayoutSwapOp) + protected: + typename XprType::Nested m_xpr; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorLayoutSwapOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = (static_cast(TensorEvaluator::Layout) == static_cast(ColMajor)) ? RowMajor : ColMajor, + CoordAccess = false, // to be implemented + RawAccess = TensorEvaluator::RawAccess + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) + { + for(int i = 0; i < NumDims; ++i) { + m_dimensions[i] = m_impl.dimensions()[NumDims-1-i]; + } + } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + return m_impl.evalSubExprsIfNeeded(data); + } + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_impl.coeff(index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_impl.template packet(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized); + } + + EIGEN_DEVICE_FUNC typename Storage::Type data() const { + return constCast(m_impl.data()); + } + + const TensorEvaluator& impl() const { return m_impl; } + + protected: + TensorEvaluator m_impl; + Dimensions m_dimensions; +}; + + +// Eval as lvalue +template + struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorLayoutSwapOp XprType; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = (static_cast(TensorEvaluator::Layout) == static_cast(ColMajor)) ? RowMajor : ColMajor, + CoordAccess = false // to be implemented + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + return this->m_impl.coeffRef(index); + } + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + this->m_impl.template writePacket(index, x); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h new file mode 100644 index 0000000..73ff3d2 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h @@ -0,0 +1,98 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_META_MACROS_H +#define EIGEN_CXX11_TENSOR_TENSOR_META_MACROS_H + + +/** use this macro in sfinae selection in templated functions + * + * template::value , int >::type = 0 + * > + * void foo(){} + * + * becomes => + * + * template::value ) + * > + * void foo(){} + */ + +// SFINAE requires variadic templates +#if !defined(EIGEN_GPUCC) +#if EIGEN_HAS_VARIADIC_TEMPLATES + // SFINAE doesn't work for gcc <= 4.7 + #ifdef EIGEN_COMP_GNUC + #if EIGEN_GNUC_AT_LEAST(4,8) + #define EIGEN_HAS_SFINAE + #endif + #else + #define EIGEN_HAS_SFINAE + #endif +#endif +#endif + +#define EIGEN_SFINAE_ENABLE_IF( __condition__ ) \ + typename internal::enable_if< ( __condition__ ) , int >::type = 0 + +// Define a macro to use a reference on the host but a value on the device +#if defined(SYCL_DEVICE_ONLY) + #define EIGEN_DEVICE_REF +#else + #define EIGEN_DEVICE_REF & +#endif + +// Define a macro for catching SYCL exceptions if exceptions are enabled +#define EIGEN_SYCL_TRY_CATCH(X) \ + do { \ + EIGEN_TRY {X;} \ + EIGEN_CATCH(const cl::sycl::exception& e) { \ + EIGEN_THROW_X(std::runtime_error("SYCL exception at " + \ + std::string(__FILE__) + ":" + \ + std::to_string(__LINE__) + "\n" + \ + e.what())); \ + } \ + } while (false) + +// Define a macro if local memory flags are unset or one of them is set +// Setting both flags is the same as unsetting them +#if (!defined(EIGEN_SYCL_LOCAL_MEM) && !defined(EIGEN_SYCL_NO_LOCAL_MEM)) || \ + (defined(EIGEN_SYCL_LOCAL_MEM) && defined(EIGEN_SYCL_NO_LOCAL_MEM)) + #define EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON 1 + #define EIGEN_SYCL_LOCAL_MEM_UNSET_OR_OFF 1 +#elif defined(EIGEN_SYCL_LOCAL_MEM) && !defined(EIGEN_SYCL_NO_LOCAL_MEM) + #define EIGEN_SYCL_LOCAL_MEM_UNSET_OR_ON 1 +#elif !defined(EIGEN_SYCL_LOCAL_MEM) && defined(EIGEN_SYCL_NO_LOCAL_MEM) + #define EIGEN_SYCL_LOCAL_MEM_UNSET_OR_OFF 1 +#endif + +#if EIGEN_COMP_CLANG // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653) + #define EIGEN_TENSOR_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + using Base::operator =; \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \ + template \ + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const OtherDerived& other) { Base::operator=(other); return *this; } +#else + #define EIGEN_TENSOR_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) +#endif + +/** \internal + * \brief Macro to manually inherit assignment operators. + * This is necessary, because the implicitly defined assignment operator gets deleted when a custom operator= is defined. + * This also inherits template operator=(const OtherDerived&) assignments. + * With C++11 or later this also default-implements the copy-constructor + */ +#define EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ + EIGEN_TENSOR_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \ + EIGEN_DEFAULT_COPY_CONSTRUCTOR(Derived) + +#endif diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h new file mode 100644 index 0000000..6834c97 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h @@ -0,0 +1,327 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_MAP_H +#define EIGEN_CXX11_TENSOR_TENSOR_MAP_H + +namespace Eigen { + +// FIXME use proper doxygen documentation (e.g. \tparam MakePointer_) + +/** \class TensorMap + * \ingroup CXX11_Tensor_Module + * + * \brief A tensor expression mapping an existing array of data. + * + */ +/// `template class MakePointer_` is added to convert the host pointer to the device pointer. +/// It is added due to the fact that for our device compiler `T*` is not allowed. +/// If we wanted to use the same Evaluator functions we have to convert that type to our pointer `T`. +/// This is done through our `MakePointer_` class. By default the Type in the `MakePointer_` is `T*` . +/// Therefore, by adding the default value, we managed to convert the type and it does not break any +/// existing code as its default value is `T*`. +template class MakePointer_> class TensorMap : public TensorBase > +{ + public: + typedef TensorMap Self; + typedef TensorBase > Base; + #ifdef EIGEN_USE_SYCL + typedef typename Eigen::internal::remove_reference::type>::type Nested; + #else + typedef typename Eigen::internal::nested::type Nested; + #endif + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename PlainObjectType::Base::CoeffReturnType CoeffReturnType; + + typedef typename MakePointer_::Type PointerType; + typedef typename MakePointer_::ConstType PointerConstType; + + // WARN: PointerType still can be a pointer to const (const Scalar*), for + // example in TensorMap> expression. This type of + // expression should be illegal, but adding this restriction is not possible + // in practice (see https://bitbucket.org/eigen/eigen/pull-requests/488). + typedef typename internal::conditional< + bool(internal::is_lvalue::value), + PointerType, // use simple pointer in lvalue expressions + PointerConstType // use const pointer in rvalue expressions + >::type StoragePointerType; + + // If TensorMap was constructed over rvalue expression (e.g. const Tensor), + // we should return a reference to const from operator() (and others), even + // if TensorMap itself is not const. + typedef typename internal::conditional< + bool(internal::is_lvalue::value), + Scalar&, + const Scalar& + >::type StorageRefType; + + static const int Options = Options_; + + static const Index NumIndices = PlainObjectType::NumIndices; + typedef typename PlainObjectType::Dimensions Dimensions; + + enum { + IsAligned = ((int(Options_)&Aligned)==Aligned), + Layout = PlainObjectType::Layout, + CoordAccess = true, + RawAccess = true + }; + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr) : m_data(dataPtr), m_dimensions() { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT((0 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE) + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index firstDimension, IndexTypes... otherDimensions) : m_data(dataPtr), m_dimensions(firstDimension, otherDimensions...) { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT((sizeof...(otherDimensions) + 1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index firstDimension) : m_data(dataPtr), m_dimensions(firstDimension) { + // The number of dimensions used to construct a tensor must be equal to the rank of the tensor. + EIGEN_STATIC_ASSERT((1 == NumIndices || NumIndices == Dynamic), YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2) : m_data(dataPtr), m_dimensions(dim1, dim2) { + EIGEN_STATIC_ASSERT(2 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3) { + EIGEN_STATIC_ASSERT(3 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4) { + EIGEN_STATIC_ASSERT(4 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE) + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, Index dim1, Index dim2, Index dim3, Index dim4, Index dim5) : m_data(dataPtr), m_dimensions(dim1, dim2, dim3, dim4, dim5) { + EIGEN_STATIC_ASSERT(5 == NumIndices || NumIndices == Dynamic, YOU_MADE_A_PROGRAMMING_MISTAKE) + } +#endif + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, const array& dimensions) + : m_data(dataPtr), m_dimensions(dimensions) + { } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(StoragePointerType dataPtr, const Dimensions& dimensions) + : m_data(dataPtr), m_dimensions(dimensions) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorMap(PlainObjectType& tensor) + : m_data(tensor.data()), m_dimensions(tensor.dimensions()) + { } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index rank() const { return m_dimensions.rank(); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index dimension(Index n) const { return m_dimensions[n]; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index size() const { return m_dimensions.TotalSize(); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StoragePointerType data() { return m_data; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StoragePointerType data() const { return m_data; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(const array& indices) const + { + // eigen_assert(checkIndexRange(indices)); + if (PlainObjectType::Options&RowMajor) { + const Index index = m_dimensions.IndexOfRowMajor(indices); + return m_data[index]; + } else { + const Index index = m_dimensions.IndexOfColMajor(indices); + return m_data[index]; + } + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()() const + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE) + return m_data[0]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return m_data[index]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const + { + EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(internal::all((Eigen::NumTraits::highest() >= otherIndices)...)); + if (PlainObjectType::Options&RowMajor) { + const Index index = m_dimensions.IndexOfRowMajor(array{{firstIndex, secondIndex, otherIndices...}}); + return m_data[index]; + } else { + const Index index = m_dimensions.IndexOfColMajor(array{{firstIndex, secondIndex, otherIndices...}}); + return m_data[index]; + } + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1) const + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i1 + i0 * m_dimensions[1]; + return m_data[index]; + } else { + const Index index = i0 + i1 * m_dimensions[0]; + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2) const + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2); + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3) const + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * i3)); + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0))); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * (i3 + m_dimensions[3] * i4))); + return m_data[index]; + } + } +#endif + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(const array& indices) + { + // eigen_assert(checkIndexRange(indices)); + if (PlainObjectType::Options&RowMajor) { + const Index index = m_dimensions.IndexOfRowMajor(indices); + return m_data[index]; + } else { + const Index index = m_dimensions.IndexOfColMajor(indices); + return m_data[index]; + } + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()() + { + EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE) + return m_data[0]; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index index) + { + eigen_internal_assert(index >= 0 && index < size()); + return m_data[index]; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) + { + static_assert(sizeof...(otherIndices) + 2 == NumIndices || NumIndices == Dynamic, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor."); + eigen_assert(internal::all((Eigen::NumTraits::highest() >= otherIndices)...)); + const std::size_t NumDims = sizeof...(otherIndices) + 2; + if (PlainObjectType::Options&RowMajor) { + const Index index = m_dimensions.IndexOfRowMajor(array{{firstIndex, secondIndex, otherIndices...}}); + return m_data[index]; + } else { + const Index index = m_dimensions.IndexOfColMajor(array{{firstIndex, secondIndex, otherIndices...}}); + return m_data[index]; + } + } +#else + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1) + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i1 + i0 * m_dimensions[1]; + return m_data[index]; + } else { + const Index index = i0 + i1 * m_dimensions[0]; + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2) + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * i2); + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3) + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0)); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * i3)); + return m_data[index]; + } + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE StorageRefType operator()(Index i0, Index i1, Index i2, Index i3, Index i4) + { + if (PlainObjectType::Options&RowMajor) { + const Index index = i4 + m_dimensions[4] * (i3 + m_dimensions[3] * (i2 + m_dimensions[2] * (i1 + m_dimensions[1] * i0))); + return m_data[index]; + } else { + const Index index = i0 + m_dimensions[0] * (i1 + m_dimensions[1] * (i2 + m_dimensions[2] * (i3 + m_dimensions[3] * i4))); + return m_data[index]; + } + } +#endif + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorMap) + + private: + StoragePointerType m_data; + Dimensions m_dimensions; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_MAP_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h new file mode 100644 index 0000000..a6181d3 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h @@ -0,0 +1,311 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_META_H +#define EIGEN_CXX11_TENSOR_TENSOR_META_H + +namespace Eigen { + +template struct Cond {}; + +template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +const T1& choose(Cond, const T1& first, const T2&) { + return first; +} + +template EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +const T2& choose(Cond, const T1&, const T2& second) { + return second; +} + + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T divup(const X x, const Y y) { + return static_cast((x + y - 1) / y); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T divup(const T x, const T y) { + return static_cast((x + y - 1) / y); +} + +template struct max_n_1 { + static const size_t size = n; +}; +template <> struct max_n_1<0> { + static const size_t size = 1; +}; + + +// Default packet types +template +struct PacketType : internal::packet_traits { + typedef typename internal::packet_traits::type type; +}; + +// For CUDA packet types when using a GpuDevice +#if defined(EIGEN_USE_GPU) && defined(EIGEN_HAS_GPU_FP16) + +typedef ulonglong2 Packet4h2; +template<> +struct PacketType { + typedef Packet4h2 type; + static const int size = 8; + enum { + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasArg = 0, + HasAbs2 = 0, + HasMin = 1, + HasMax = 1, + HasConj = 0, + HasSetLinear = 0, + HasBlend = 0, + + HasDiv = 1, + HasSqrt = 1, + HasRsqrt = 1, + HasExp = 1, + HasExpm1 = 0, + HasLog = 1, + HasLog1p = 0, + HasLog10 = 0, + HasPow = 1, + }; +}; +#endif + +#if defined(EIGEN_USE_SYCL) + +namespace TensorSycl { +namespace internal { + +template struct PlusOp { + static constexpr Index Value = A + B; +}; + +template struct DivOp { + static constexpr Index Value = A / B; +}; + +template class StepOp> +struct static_for { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void loop(UnaryOperator op) { + op(start); + static_for::Value, end, step, + StepOp>::loop(op); + } +}; +template class StepOp> +struct static_for { + template + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void loop(UnaryOperator) {} +}; + +template +struct Vectorise { + static const int PacketSize = 1; + typedef OutScalar PacketReturnType; +}; + +template +struct Vectorise { + static const int PacketSize = Eigen::PacketType::size; + typedef typename Eigen::PacketType::type PacketReturnType; +}; + +static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index roundUp(Index x, Index y) { + return ((((x) + (y)-1) / (y)) * (y)); +} + +} // namespace internal +} // namespace TensorSycl + +template <> + struct PacketType { + typedef half type; + static const int size = 1; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasArg = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSetLinear = 0, + HasBlend = 0 + }; +}; +template +struct PacketType : internal::default_packet_traits { + typedef Scalar type; + typedef Scalar half; + enum { + Vectorizable = 0, + size = 1, + AlignedOnScalar = 0, + HasHalfPacket = 0 + }; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSetLinear = 0 + }; + +}; + +template +struct PacketType : PacketType{}; + +#ifndef EIGEN_DONT_VECTORIZE_SYCL +#define PACKET_TYPE(CVQual, Type, val, lengths, DEV)\ +template<> struct PacketType : internal::sycl_packet_traits \ +{\ + typedef typename internal::packet_traits::type type;\ + typedef typename internal::packet_traits::half half;\ +}; + + +PACKET_TYPE(const, float, 1, 4, SyclDevice) +PACKET_TYPE(, float, 1, 4, SyclDevice) +PACKET_TYPE(const, float, 1, 4, const SyclDevice) +PACKET_TYPE(, float, 1, 4, const SyclDevice) + +PACKET_TYPE(const, double, 0, 2, SyclDevice) +PACKET_TYPE(, double, 0, 2, SyclDevice) +PACKET_TYPE(const, double, 0, 2, const SyclDevice) +PACKET_TYPE(, double, 0, 2, const SyclDevice) +#undef PACKET_TYPE + +template<> struct PacketType: PacketType{}; +template<> struct PacketType: PacketType{}; +#endif +#endif + +// Tuple mimics std::pair but works on e.g. nvcc. +template struct Tuple { + public: + U first; + V second; + + typedef U first_type; + typedef V second_type; + + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Tuple() : first(), second() {} + + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Tuple(const U& f, const V& s) : first(f), second(s) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void swap(Tuple& rhs) { + using numext::swap; + swap(first, rhs.first); + swap(second, rhs.second); + } +}; + +template +EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +bool operator==(const Tuple& x, const Tuple& y) { + return (x.first == y.first && x.second == y.second); +} + +template +EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +bool operator!=(const Tuple& x, const Tuple& y) { + return !(x == y); +} + + +// Can't use std::pairs on cuda devices +template struct IndexPair { + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair() : first(0), second(0) {} + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair(Idx f, Idx s) : first(f), second(s) {} + + EIGEN_DEVICE_FUNC void set(IndexPair val) { + first = val.first; + second = val.second; + } + + Idx first; + Idx second; +}; + + +#ifdef EIGEN_HAS_SFINAE +namespace internal { + + template + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + array customIndices2Array(IndexType& idx, numeric_list) { + return { idx[Is]... }; + } + template + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + array customIndices2Array(IndexType&, numeric_list) { + return array(); + } + + /** Make an array (for index/dimensions) out of a custom index */ + template + EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + array customIndices2Array(IndexType& idx) { + return customIndices2Array(idx, typename gen_numeric_list::type{}); + } + + + template + struct is_base_of + { + + typedef char (&yes)[1]; + typedef char (&no)[2]; + + template + struct Host + { + operator BB*() const; + operator DD*(); + }; + + template + static yes check(D*, T); + static no check(B*, int); + + static const bool value = sizeof(check(Host(), int())) == sizeof(yes); + }; + +} +#endif + + + +} // namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_META_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h new file mode 100644 index 0000000..b3f00f7 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h @@ -0,0 +1,1102 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H +#define EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H + +namespace Eigen { + +/** \class TensorReshaping + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reshaping class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = array_size::value; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorReshapingOpEIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorReshapingOp type; +}; + +} // end namespace internal + + + +template +class TensorReshapingOp : public TensorBase, WriteAccessors> +{ + public: + typedef TensorBase, WriteAccessors> Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReshapingOp(const XprType& expr, const NewDimensions& dims) + : m_xpr(expr), m_dims(dims) {} + + EIGEN_DEVICE_FUNC + const NewDimensions& dimensions() const { return m_dims; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorReshapingOp) + + protected: + typename XprType::Nested m_xpr; + const NewDimensions m_dims; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorReshapingOp XprType; + typedef NewDimensions Dimensions; + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + typedef StorageMemory::type, Device> ConstCastStorage; + + static const int NumOutputDims = internal::array_size::value; + static const int NumInputDims = internal::array_size::Dimensions>::value; + + enum ReshapingKind { + // We do not use layout information to determine reshaping kind. + // Depending on the layout `N` can be inner or outer dimension. + OneByN = 0, // expr.reshape(1, N) + NByOne = 1, // expr.reshape(N, 1) + Runtime = 2 // Reshape dimensions are dynamic (specified at runtime). + }; + + // clang-format off + static const ReshapingKind kind = +#if defined(EIGEN_HAS_INDEX_LIST) + (NumOutputDims == 2 && internal::index_statically_eq(/*index=*/0, /*value=*/1)) ? OneByN + : (NumOutputDims == 2 && internal::index_statically_eq(/*index=*/1, /*value=*/1)) ? NByOne + : Runtime; +#else + Runtime; +#endif + // clang-format on + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + // For trivial reshapes with raw access to underlying data we will provide + // zero overhead block access. + // TODO(ezhulenev): Consider adding block access without raw access? + BlockAccess = TensorEvaluator::RawAccess && + NumInputDims > 0 && NumOutputDims > 0, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = TensorEvaluator::RawAccess + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef + typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_dimensions(op.dimensions()) + { + // The total size of the reshaped tensor must be equal to the total size + // of the input tensor. + eigen_assert(internal::array_prod(m_impl.dimensions()) == internal::array_prod(op.dimensions())); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType data, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(data, std::move(done)); + } +#endif + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + return m_impl.evalSubExprsIfNeeded(data); + } + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_impl.coeff(index); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + return m_impl.template packet(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + return internal::TensorBlockResourceRequirements::any(); + } + + // required in block(OutputTensorBlock* output_block) const + // For C++03 compatibility this must be defined outside the method + struct BlockIteratorState { + Index stride; + Index span; + Index size; + Index count; + }; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + eigen_assert(m_impl.data() != NULL); + eigen_assert((kind == Runtime) || + (kind == OneByN && desc.dimensions()[0] == 1) || + (kind == NByOne && desc.dimensions()[1] == 1)); + + if (kind == OneByN || kind == NByOne) { + // We can guarantee at compile time that block is just a contiguous slice + // of the underlying expression memory buffer. + return TensorBlock(internal::TensorBlockKind::kView, + m_impl.data() + desc.offset(), desc.dimensions()); + } else { + // This will do additional runtime checks, and in the end it might be also + // a view, or it might be a block materialized in the temporary buffer. + return TensorBlock::materialize(m_impl.data(), m_dimensions, desc, + scratch); + } + } + + EIGEN_DEVICE_FUNC typename Storage::Type data() const { + return constCast(m_impl.data()); + } + + EIGEN_DEVICE_FUNC const TensorEvaluator& impl() const { return m_impl; } + + #ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } + #endif + protected: + TensorEvaluator m_impl; + NewDimensions m_dimensions; +}; + + +// Eval as lvalue +template + struct TensorEvaluator, Device> + : public TensorEvaluator, Device> + +{ + typedef TensorEvaluator, Device> Base; + typedef TensorReshapingOp XprType; + typedef NewDimensions Dimensions; + + enum { + IsAligned = TensorEvaluator::IsAligned, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::RawAccess, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = TensorEvaluator::RawAccess + }; + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor + TensorBlockDesc; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + return this->m_impl.coeffRef(index); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + this->m_impl.template writePacket(index, x); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock( + const TensorBlockDesc& desc, const TensorBlock& block) { + assert(this->m_impl.data() != NULL); + + typedef typename TensorBlock::XprType TensorBlockExpr; + typedef internal::TensorBlockAssignment< + Scalar, TensorEvaluator::NumOutputDims, TensorBlockExpr, Index> + TensorBlockAssign; + + TensorBlockAssign::Run( + TensorBlockAssign::target(desc.dimensions(), + internal::strides(this->dimensions()), + this->m_impl.data(), desc.offset()), + block.expr()); + } +}; + + +/** \class TensorSlicing + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor slicing class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = array_size::value; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorSlicingOpEIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorSlicingOp type; +}; + +} // end namespace internal + + + +template +class TensorSlicingOp : public TensorBase > +{ + public: + typedef TensorBase > Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorSlicingOp(const XprType& expr, const StartIndices& indices, const Sizes& sizes) + : m_xpr(expr), m_indices(indices), m_sizes(sizes) {} + + EIGEN_DEVICE_FUNC + const StartIndices& startIndices() const { return m_indices; } + EIGEN_DEVICE_FUNC + const Sizes& sizes() const { return m_sizes; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorSlicingOp) + + protected: + typename XprType::Nested m_xpr; + const StartIndices m_indices; + const Sizes m_sizes; +}; + + +// Fixme: figure out the exact threshold +namespace { +template struct MemcpyTriggerForSlicing { + EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(const Device& device) : threshold_(2 * device.numThreads()) { } + EIGEN_DEVICE_FUNC bool operator ()(Index total, Index contiguous) const { + const bool prefer_block_evaluation = BlockAccess && total > 32*1024; + return !prefer_block_evaluation && contiguous > threshold_; + } + + private: + Index threshold_; +}; + +// It is very expensive to start the memcpy kernel on GPU: we therefore only +// use it for large copies. +#ifdef EIGEN_USE_GPU +template struct MemcpyTriggerForSlicing { + EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(const GpuDevice&) { } + EIGEN_DEVICE_FUNC bool operator ()(Index, Index contiguous) const { return contiguous > 4*1024*1024; } +}; +#endif + +// It is very expensive to start the memcpy kernel on GPU: we therefore only +// use it for large copies. +#ifdef EIGEN_USE_SYCL +template struct MemcpyTriggerForSlicing { + EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(const SyclDevice&) { } + EIGEN_DEVICE_FUNC bool operator ()(Index, Index contiguous) const { return contiguous > 4*1024*1024; } +}; +#endif + +} + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorSlicingOp XprType; + static const int NumDims = internal::array_size::value; + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef Sizes Dimensions; + typedef StorageMemory Storage; + typedef StorageMemory::type, Device> ConstCastStorage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + // Alignment can't be guaranteed at compile time since it depends on the + // slice offsets and sizes. + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::BlockAccess && + // FIXME: Temporary workaround for bug in slicing of bool tensors. + !internal::is_same::type, bool>::value, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + // Tensor slicing does not change the block type. + typedef typename TensorEvaluator::TensorBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_device(device), m_dimensions(op.sizes()), m_offsets(op.startIndices()) + { + m_is_identity = true; + for (int i = 0; i < internal::array_size::value; ++i) { + eigen_assert(m_impl.dimensions()[i] >= + op.sizes()[i] + op.startIndices()[i]); + if (m_impl.dimensions()[i] != op.sizes()[i] || + op.startIndices()[i] != 0) { + m_is_identity = false; + } + } + + // No strides for scalars. + if (NumDims == 0) return; + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + const Sizes& output_dims = op.sizes(); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + } + + // Don't initialize m_fastOutputStrides[0] since it won't ever be accessed. + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * output_dims[i-1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i] > 0 ? m_outputStrides[i] : 1); + } + } else { + m_inputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + } + + // Don't initialize m_fastOutputStrides[NumDims-1] since it won't ever be accessed. + m_outputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * output_dims[i+1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i] > 0 ? m_outputStrides[i] : 1); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + m_impl.evalSubExprsIfNeeded(NULL); + if (!NumTraits::type>::RequireInitialization + && data && m_impl.data()) { + Index contiguous_values = 1; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumDims; ++i) { + contiguous_values *= dimensions()[i]; + if (dimensions()[i] != m_impl.dimensions()[i]) { + break; + } + } + } else { + for (int i = NumDims-1; i >= 0; --i) { + contiguous_values *= dimensions()[i]; + if (dimensions()[i] != m_impl.dimensions()[i]) { + break; + } + } + } + // Use memcpy if it's going to be faster than using the regular evaluation. + const MemcpyTriggerForSlicing trigger(m_device); + if (trigger(internal::array_prod(dimensions()), contiguous_values)) { + EvaluatorPointerType src = (EvaluatorPointerType)m_impl.data(); + for (Index i = 0; i < internal::array_prod(dimensions()); i += contiguous_values) { + Index offset = srcCoeff(i); + m_device.memcpy((void*)(m_device.get(data + i)), m_device.get(src+offset), contiguous_values * sizeof(Scalar)); + } + return false; + } + } + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType /*data*/, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + if (m_is_identity) { + return m_impl.coeff(index); + } else { + return m_impl.coeff(srcCoeff(index)); + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + const int packetSize = PacketType::size; + EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+packetSize-1 < internal::array_prod(dimensions())); + + if (m_is_identity) { + return m_impl.template packet(index); + } + + Index inputIndices[] = {0, 0}; + Index indices[] = {index, index + packetSize - 1}; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx0 = indices[0] / m_fastOutputStrides[i]; + const Index idx1 = indices[1] / m_fastOutputStrides[i]; + inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i]; + inputIndices[1] += (idx1 + m_offsets[i]) * m_inputStrides[i]; + indices[0] -= idx0 * m_outputStrides[i]; + indices[1] -= idx1 * m_outputStrides[i]; + } + inputIndices[0] += (indices[0] + m_offsets[0]); + inputIndices[1] += (indices[1] + m_offsets[0]); + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx0 = indices[0] / m_fastOutputStrides[i]; + const Index idx1 = indices[1] / m_fastOutputStrides[i]; + inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i]; + inputIndices[1] += (idx1 + m_offsets[i]) * m_inputStrides[i]; + indices[0] -= idx0 * m_outputStrides[i]; + indices[1] -= idx1 * m_outputStrides[i]; + } + inputIndices[0] += (indices[0] + m_offsets[NumDims-1]); + inputIndices[1] += (indices[1] + m_offsets[NumDims-1]); + } + if (inputIndices[1] - inputIndices[0] == packetSize - 1) { + PacketReturnType rslt = m_impl.template packet(inputIndices[0]); + return rslt; + } + else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[packetSize]; + values[0] = m_impl.coeff(inputIndices[0]); + values[packetSize-1] = m_impl.coeff(inputIndices[1]); + EIGEN_UNROLL_LOOP + for (int i = 1; i < packetSize-1; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, m_is_identity ? 1 : NumDims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + const size_t target_size = m_device.lastLevelCacheSize(); + return internal::TensorBlockResourceRequirements::merge( + internal::TensorBlockResourceRequirements::skewed(target_size), + m_impl.getResourceRequirements()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + TensorBlockDesc arg_desc = desc.WithOffset(srcCoeff(desc.offset())); + TensorBlock block = m_impl.block(arg_desc, scratch); + if (!arg_desc.HasDestinationBuffer()) desc.DropDestinationBuffer(); + return block; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Storage::Type data() const { + typename Storage::Type result = constCast(m_impl.data()); + if (result) { + Index offset = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumDims; ++i) { + if (m_dimensions[i] != m_impl.dimensions()[i]) { + offset += m_offsets[i] * m_inputStrides[i]; + for (int j = i+1; j < NumDims; ++j) { + if (m_dimensions[j] > 1) { + return NULL; + } + offset += m_offsets[j] * m_inputStrides[j]; + } + break; + } + } + } else { + for (int i = NumDims - 1; i >= 0; --i) { + if (m_dimensions[i] != m_impl.dimensions()[i]) { + offset += m_offsets[i] * m_inputStrides[i]; + for (int j = i-1; j >= 0; --j) { + if (m_dimensions[j] > 1) { + return NULL; + } + offset += m_offsets[j] * m_inputStrides[j]; + } + break; + } + } + } + return result + offset; + } + return NULL; + } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const + { + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += (idx + m_offsets[i]) * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + inputIndex += (index + m_offsets[0]); + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += (idx + m_offsets[i]) * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + inputIndex += (index + m_offsets[NumDims-1]); + } + return inputIndex; + } + + array m_outputStrides; + array, NumDims> m_fastOutputStrides; + array m_inputStrides; + TensorEvaluator m_impl; + const Device EIGEN_DEVICE_REF m_device; + Dimensions m_dimensions; + bool m_is_identity; + const StartIndices m_offsets; +}; + + +// Eval as lvalue +template +struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorSlicingOp XprType; + static const int NumDims = internal::array_size::value; + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef Sizes Dimensions; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::BlockAccess, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = (NumDims == 1) & TensorEvaluator::RawAccess + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + if (this->m_is_identity) { + return this->m_impl.coeffRef(index); + } else { + return this->m_impl.coeffRef(this->srcCoeff(index)); + } + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + if (this->m_is_identity) { + this->m_impl.template writePacket(index, x); + return; + } + + const int packetSize = PacketType::size; + Index inputIndices[] = {0, 0}; + Index indices[] = {index, index + packetSize - 1}; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx0 = indices[0] / this->m_fastOutputStrides[i]; + const Index idx1 = indices[1] / this->m_fastOutputStrides[i]; + inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i]; + inputIndices[1] += (idx1 + this->m_offsets[i]) * this->m_inputStrides[i]; + indices[0] -= idx0 * this->m_outputStrides[i]; + indices[1] -= idx1 * this->m_outputStrides[i]; + } + inputIndices[0] += (indices[0] + this->m_offsets[0]); + inputIndices[1] += (indices[1] + this->m_offsets[0]); + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx0 = indices[0] / this->m_fastOutputStrides[i]; + const Index idx1 = indices[1] / this->m_fastOutputStrides[i]; + inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i]; + inputIndices[1] += (idx1 + this->m_offsets[i]) * this->m_inputStrides[i]; + indices[0] -= idx0 * this->m_outputStrides[i]; + indices[1] -= idx1 * this->m_outputStrides[i]; + } + inputIndices[0] += (indices[0] + this->m_offsets[NumDims-1]); + inputIndices[1] += (indices[1] + this->m_offsets[NumDims-1]); + } + if (inputIndices[1] - inputIndices[0] == packetSize - 1) { + this->m_impl.template writePacket(inputIndices[0], x); + } + else { + EIGEN_ALIGN_MAX CoeffReturnType values[packetSize]; + internal::pstore(values, x); + this->m_impl.coeffRef(inputIndices[0]) = values[0]; + this->m_impl.coeffRef(inputIndices[1]) = values[packetSize-1]; + EIGEN_UNROLL_LOOP + for (int i = 1; i < packetSize-1; ++i) { + this->coeffRef(index+i) = values[i]; + } + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock( + const TensorBlockDesc& desc, const TensorBlock& block) { + TensorBlockDesc arg_desc = desc.WithOffset(this->srcCoeff(desc.offset())); + this->m_impl.writeBlock(arg_desc, block); + } +}; + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = array_size::value; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorStridingSlicingOpEIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorStridingSlicingOp type; +}; + +} // end namespace internal + + +template +class TensorStridingSlicingOp : public TensorBase > +{ + public: + typedef TensorBase > Base; + typedef typename internal::traits::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingSlicingOp( + const XprType& expr, const StartIndices& startIndices, + const StopIndices& stopIndices, const Strides& strides) + : m_xpr(expr), m_startIndices(startIndices), m_stopIndices(stopIndices), + m_strides(strides) {} + + EIGEN_DEVICE_FUNC + const StartIndices& startIndices() const { return m_startIndices; } + EIGEN_DEVICE_FUNC + const StartIndices& stopIndices() const { return m_stopIndices; } + EIGEN_DEVICE_FUNC + const StartIndices& strides() const { return m_strides; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorStridingSlicingOp) + + protected: + typename XprType::Nested m_xpr; + const StartIndices m_startIndices; + const StopIndices m_stopIndices; + const Strides m_strides; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorStridingSlicingOp XprType; + static const int NumDims = internal::array_size::value; + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + typedef Strides Dimensions; + + enum { + // Alignment can't be guaranteed at compile time since it depends on the + // slice offsets and sizes. + IsAligned = false, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), + m_device(device), + m_strides(op.strides()) + { + // Handle degenerate intervals by gracefully clamping and allowing m_dimensions to be zero + DSizes startIndicesClamped, stopIndicesClamped; + for (ptrdiff_t i = 0; i < internal::array_size::value; ++i) { + eigen_assert(m_strides[i] != 0 && "0 stride is invalid"); + if (m_strides[i] > 0) { + startIndicesClamped[i] = + clamp(op.startIndices()[i], 0, m_impl.dimensions()[i]); + stopIndicesClamped[i] = + clamp(op.stopIndices()[i], 0, m_impl.dimensions()[i]); + } else { + /* implies m_strides[i] < 0 by assert */ + startIndicesClamped[i] = + clamp(op.startIndices()[i], -1, m_impl.dimensions()[i] - 1); + stopIndicesClamped[i] = + clamp(op.stopIndices()[i], -1, m_impl.dimensions()[i] - 1); + } + m_startIndices[i] = startIndicesClamped[i]; + } + + typedef typename TensorEvaluator::Dimensions InputDimensions; + const InputDimensions& input_dims = m_impl.dimensions(); + + // compute output tensor shape + m_is_identity = true; + for (int i = 0; i < NumDims; i++) { + Index interval = stopIndicesClamped[i] - startIndicesClamped[i]; + if (interval == 0 || ((interval < 0) != (m_strides[i] < 0))) { + m_dimensions[i] = 0; + } else { + m_dimensions[i] = + (interval / m_strides[i]) + (interval % m_strides[i] != 0 ? 1 : 0); + eigen_assert(m_dimensions[i] >= 0); + } + if (m_strides[i] != 1 || interval != m_impl.dimensions()[i]) { + m_is_identity = false; + } + } + + Strides output_dims = m_dimensions; + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStrides[0] = m_strides[0]; + m_offsets[0] = startIndicesClamped[0]; + Index previousDimProduct = 1; + for (int i = 1; i < NumDims; ++i) { + previousDimProduct *= input_dims[i-1]; + m_inputStrides[i] = previousDimProduct * m_strides[i]; + m_offsets[i] = startIndicesClamped[i] * previousDimProduct; + } + + // Don't initialize m_fastOutputStrides[0] since it won't ever be accessed. + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * output_dims[i-1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i] > 0 ? m_outputStrides[i] : 1); + } + } else { + m_inputStrides[NumDims-1] = m_strides[NumDims-1]; + m_offsets[NumDims-1] = startIndicesClamped[NumDims-1]; + Index previousDimProduct = 1; + for (int i = NumDims - 2; i >= 0; --i) { + previousDimProduct *= input_dims[i+1]; + m_inputStrides[i] = previousDimProduct * m_strides[i]; + m_offsets[i] = startIndicesClamped[i] * previousDimProduct; + } + + m_outputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * output_dims[i+1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i] > 0 ? m_outputStrides[i] : 1); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + if (m_is_identity) { + return m_impl.coeff(index); + } else { + return m_impl.coeff(srcCoeff(index)); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, m_is_identity ? 1 : NumDims); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Storage::Type data() const { + return NULL; + } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const + { + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i >= 0; --i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += idx * m_inputStrides[i] + m_offsets[i]; + index -= idx * m_outputStrides[i]; + } + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims; ++i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += idx * m_inputStrides[i] + m_offsets[i]; + index -= idx * m_outputStrides[i]; + } + } + return inputIndex; + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index clamp(Index value, Index min, Index max) { +#ifndef SYCL_DEVICE_ONLY + return numext::maxi(min, numext::mini(max,value)); +#else + return cl::sycl::clamp(value, min, max); +#endif + } + + array m_outputStrides; + array, NumDims> m_fastOutputStrides; + array m_inputStrides; + bool m_is_identity; + TensorEvaluator m_impl; + const Device EIGEN_DEVICE_REF m_device; + DSizes m_startIndices; // clamped startIndices + DSizes m_dimensions; + DSizes m_offsets; // offset in a flattened shape + const Strides m_strides; +}; + +// Eval as lvalue +template +struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + typedef TensorStridingSlicingOp XprType; + static const int NumDims = internal::array_size::value; + + enum { + IsAligned = false, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = TensorEvaluator::CoordAccess, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef Strides Dimensions; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + if (this->m_is_identity) { + return this->m_impl.coeffRef(index); + } else { + return this->m_impl.coeffRef(this->srcCoeff(index)); + } + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h new file mode 100644 index 0000000..ee44382 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h @@ -0,0 +1,708 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_PADDING_H +#define EIGEN_CXX11_TENSOR_TENSOR_PADDING_H + +namespace Eigen { + +/** \class TensorPadding + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor padding class. + * At the moment only padding with a constant value is supported. + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorPaddingOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorPaddingOp type; +}; + +} // end namespace internal + + + +template +class TensorPaddingOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPaddingOp(const XprType& expr, const PaddingDimensions& padding_dims, const Scalar padding_value) + : m_xpr(expr), m_padding_dims(padding_dims), m_padding_value(padding_value) {} + + EIGEN_DEVICE_FUNC + const PaddingDimensions& padding() const { return m_padding_dims; } + EIGEN_DEVICE_FUNC + Scalar padding_value() const { return m_padding_value; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const PaddingDimensions m_padding_dims; + const Scalar m_padding_value; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorPaddingOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = true, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = TensorEvaluator::RawAccess, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = true, + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_padding(op.padding()), m_paddingValue(op.padding_value()), m_device(device) + { + // The padding op doesn't change the rank of the tensor. Directly padding a scalar would lead + // to a vector, which doesn't make sense. Instead one should reshape the scalar into a vector + // of 1 element first and then pad. + EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + + // Compute dimensions + m_dimensions = m_impl.dimensions(); + for (int i = 0; i < NumDims; ++i) { + m_dimensions[i] += m_padding[i].first + m_padding[i].second; + } + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputStrides[0] = 1; + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + } + m_outputStrides[NumDims] = m_outputStrides[NumDims-1] * m_dimensions[NumDims-1]; + } else { + m_inputStrides[NumDims - 1] = 1; + m_outputStrides[NumDims] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + m_outputStrides[i+1] = m_outputStrides[i+2] * m_dimensions[i+1]; + } + m_outputStrides[0] = m_outputStrides[1] * m_dimensions[0]; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + eigen_assert(index < dimensions().TotalSize()); + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + if (isPaddingAtIndexForDim(idx, i)) { + return m_paddingValue; + } + inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (isPaddingAtIndexForDim(index, 0)) { + return m_paddingValue; + } + inputIndex += (index - m_padding[0].first); + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i+1]; + if (isPaddingAtIndexForDim(idx, i)) { + return m_paddingValue; + } + inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; + index -= idx * m_outputStrides[i+1]; + } + if (isPaddingAtIndexForDim(index, NumDims-1)) { + return m_paddingValue; + } + inputIndex += (index - m_padding[NumDims-1].first); + } + return m_impl.coeff(inputIndex); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + if (static_cast(Layout) == static_cast(ColMajor)) { + return packetColMajor(index); + } + return packetRowMajor(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + TensorOpCost cost = m_impl.costPerCoeff(vectorized); + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims; ++i) + updateCostPerDimension(cost, i, i == 0); + } else { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i >= 0; --i) + updateCostPerDimension(cost, i, i == NumDims - 1); + } + return cost; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + const size_t target_size = m_device.lastLevelCacheSize(); + return internal::TensorBlockResourceRequirements::merge( + internal::TensorBlockResourceRequirements::skewed(target_size), + m_impl.getResourceRequirements()); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + // If one of the dimensions is zero, return empty block view. + if (desc.size() == 0) { + return TensorBlock(internal::TensorBlockKind::kView, NULL, + desc.dimensions()); + } + + static const bool IsColMajor = Layout == static_cast(ColMajor); + const int inner_dim_idx = IsColMajor ? 0 : NumDims - 1; + + Index offset = desc.offset(); + + // Compute offsets in the output tensor corresponding to the desc.offset(). + DSizes output_offsets; + for (int i = NumDims - 1; i > 0; --i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + const int stride_dim = IsColMajor ? dim : dim + 1; + output_offsets[dim] = offset / m_outputStrides[stride_dim]; + offset -= output_offsets[dim] * m_outputStrides[stride_dim]; + } + output_offsets[inner_dim_idx] = offset; + + // Offsets in the input corresponding to output offsets. + DSizes input_offsets = output_offsets; + for (int i = 0; i < NumDims; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + input_offsets[dim] = input_offsets[dim] - m_padding[dim].first; + } + + // Compute offset in the input buffer (at this point it might be illegal and + // point outside of the input buffer, because we don't check for negative + // offsets, it will be autocorrected in the block iteration loop below). + Index input_offset = 0; + for (int i = 0; i < NumDims; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + input_offset += input_offsets[dim] * m_inputStrides[dim]; + } + + // Destination buffer and scratch buffer both indexed from 0 and have the + // same dimensions as the requested block (for destination buffer this + // property is guaranteed by `desc.destination()`). + Index output_offset = 0; + const DSizes output_strides = + internal::strides(desc.dimensions()); + + // NOTE(ezhulenev): We initialize bock iteration state for `NumDims - 1` + // dimensions, skipping innermost dimension. In theory it should be possible + // to squeeze matching innermost dimensions, however in practice that did + // not show any improvements in benchmarks. Also in practice first outer + // dimension usually has padding, and will prevent squeezing. + + // Initialize output block iterator state. Dimension in this array are + // always in inner_most -> outer_most order (col major layout). + array it; + for (int i = 0; i < NumDims - 1; ++i) { + const int dim = IsColMajor ? i + 1 : NumDims - i - 2; + it[i].count = 0; + it[i].size = desc.dimension(dim); + + it[i].input_stride = m_inputStrides[dim]; + it[i].input_span = it[i].input_stride * (it[i].size - 1); + + it[i].output_stride = output_strides[dim]; + it[i].output_span = it[i].output_stride * (it[i].size - 1); + } + + const Index input_inner_dim_size = + static_cast(m_impl.dimensions()[inner_dim_idx]); + + // Total output size. + const Index output_size = desc.size(); + + // We will fill inner dimension of this size in the output. It might be + // larger than the inner dimension in the input, so we might have to pad + // before/after we copy values from the input inner dimension. + const Index output_inner_dim_size = desc.dimension(inner_dim_idx); + + // How many values to fill with padding BEFORE reading from the input inner + // dimension. + const Index output_inner_pad_before_size = + input_offsets[inner_dim_idx] < 0 + ? numext::mini(numext::abs(input_offsets[inner_dim_idx]), + output_inner_dim_size) + : 0; + + // How many values we can actually copy from the input inner dimension. + const Index output_inner_copy_size = numext::mini( + // Want to copy from input. + (output_inner_dim_size - output_inner_pad_before_size), + // Can copy from input. + numext::maxi(input_inner_dim_size - (input_offsets[inner_dim_idx] + + output_inner_pad_before_size), + Index(0))); + + eigen_assert(output_inner_copy_size >= 0); + + // How many values to fill with padding AFTER reading from the input inner + // dimension. + const Index output_inner_pad_after_size = + (output_inner_dim_size - output_inner_copy_size - + output_inner_pad_before_size); + + // Sanity check, sum of all sizes must be equal to the output size. + eigen_assert(output_inner_dim_size == + (output_inner_pad_before_size + output_inner_copy_size + + output_inner_pad_after_size)); + + // Keep track of current coordinates and padding in the output. + DSizes output_coord = output_offsets; + DSizes output_padded; + for (int i = 0; i < NumDims; ++i) { + const int dim = IsColMajor ? i : NumDims - i - 1; + output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim); + } + + typedef internal::StridedLinearBufferCopy LinCopy; + + // Prepare storage for the materialized padding result. + const typename TensorBlock::Storage block_storage = + TensorBlock::prepareStorage(desc, scratch); + + // TODO(ezhulenev): Squeeze multiple non-padded inner dimensions into a + // single logical inner dimension. + + // When possible we squeeze writes for the innermost (only if non-padded) + // dimension with the first padded dimension. This allows to reduce the + // number of calls to LinCopy and better utilize vector instructions. + const bool squeeze_writes = + NumDims > 1 && + // inner dimension is not padded + (input_inner_dim_size == m_dimensions[inner_dim_idx]) && + // and equal to the block inner dimension + (input_inner_dim_size == output_inner_dim_size); + + const int squeeze_dim = IsColMajor ? inner_dim_idx + 1 : inner_dim_idx - 1; + + // Maximum coordinate on a squeeze dimension that we can write to. + const Index squeeze_max_coord = + squeeze_writes ? numext::mini( + // max non-padded element in the input + static_cast(m_dimensions[squeeze_dim] - + m_padding[squeeze_dim].second), + // max element in the output buffer + static_cast(output_offsets[squeeze_dim] + + desc.dimension(squeeze_dim))) + : static_cast(0); + + // Iterate copying data from `m_impl.data()` to the output buffer. + for (Index size = 0; size < output_size;) { + // Detect if we are in the padded region (exclude innermost dimension). + bool is_padded = false; + for (int j = 1; j < NumDims; ++j) { + const int dim = IsColMajor ? j : NumDims - j - 1; + is_padded = output_padded[dim]; + if (is_padded) break; + } + + if (is_padded) { + // Fill single innermost dimension with padding value. + size += output_inner_dim_size; + + LinCopy::template Run( + typename LinCopy::Dst(output_offset, 1, block_storage.data()), + typename LinCopy::Src(0, 0, &m_paddingValue), + output_inner_dim_size); + + + } else if (squeeze_writes) { + // Squeeze multiple reads from innermost dimensions. + const Index squeeze_num = squeeze_max_coord - output_coord[squeeze_dim]; + size += output_inner_dim_size * squeeze_num; + + // Copy `squeeze_num` inner dimensions from input to output. + LinCopy::template Run( + typename LinCopy::Dst(output_offset, 1, block_storage.data()), + typename LinCopy::Src(input_offset, 1, m_impl.data()), + output_inner_dim_size * squeeze_num); + + // Update iteration state for only `squeeze_num - 1` processed inner + // dimensions, because we have another iteration state update at the end + // of the loop that will update iteration state for the last inner + // processed dimension. + it[0].count += (squeeze_num - 1); + input_offset += it[0].input_stride * (squeeze_num - 1); + output_offset += it[0].output_stride * (squeeze_num - 1); + output_coord[squeeze_dim] += (squeeze_num - 1); + + } else { + // Single read from innermost dimension. + size += output_inner_dim_size; + + { // Fill with padding before copying from input inner dimension. + const Index out = output_offset; + + LinCopy::template Run( + typename LinCopy::Dst(out, 1, block_storage.data()), + typename LinCopy::Src(0, 0, &m_paddingValue), + output_inner_pad_before_size); + } + + { // Copy data from input inner dimension. + const Index out = output_offset + output_inner_pad_before_size; + const Index in = input_offset + output_inner_pad_before_size; + + eigen_assert(output_inner_copy_size == 0 || m_impl.data() != NULL); + + LinCopy::template Run( + typename LinCopy::Dst(out, 1, block_storage.data()), + typename LinCopy::Src(in, 1, m_impl.data()), + output_inner_copy_size); + } + + { // Fill with padding after copying from input inner dimension. + const Index out = output_offset + output_inner_pad_before_size + + output_inner_copy_size; + + LinCopy::template Run( + typename LinCopy::Dst(out, 1, block_storage.data()), + typename LinCopy::Src(0, 0, &m_paddingValue), + output_inner_pad_after_size); + } + } + + for (int j = 0; j < NumDims - 1; ++j) { + const int dim = IsColMajor ? j + 1 : NumDims - j - 2; + + if (++it[j].count < it[j].size) { + input_offset += it[j].input_stride; + output_offset += it[j].output_stride; + output_coord[dim] += 1; + output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim); + break; + } + it[j].count = 0; + input_offset -= it[j].input_span; + output_offset -= it[j].output_span; + output_coord[dim] -= it[j].size - 1; + output_padded[dim] = isPaddingAtIndexForDim(output_coord[dim], dim); + } + } + + return block_storage.AsTensorMaterializedBlock(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + private: + struct BlockIteratorState { + BlockIteratorState() + : count(0), + size(0), + input_stride(0), + input_span(0), + output_stride(0), + output_span(0) {} + + Index count; + Index size; + Index input_stride; + Index input_span; + Index output_stride; + Index output_span; + }; + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isPaddingAtIndexForDim( + Index index, int dim_index) const { +#if defined(EIGEN_HAS_INDEX_LIST) + return (!internal::index_pair_first_statically_eq(dim_index, 0) && + index < m_padding[dim_index].first) || + (!internal::index_pair_second_statically_eq(dim_index, 0) && + index >= m_dimensions[dim_index] - m_padding[dim_index].second); +#else + return (index < m_padding[dim_index].first) || + (index >= m_dimensions[dim_index] - m_padding[dim_index].second); +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isLeftPaddingCompileTimeZero( + int dim_index) const { +#if defined(EIGEN_HAS_INDEX_LIST) + return internal::index_pair_first_statically_eq(dim_index, 0); +#else + EIGEN_UNUSED_VARIABLE(dim_index); + return false; +#endif + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool isRightPaddingCompileTimeZero( + int dim_index) const { +#if defined(EIGEN_HAS_INDEX_LIST) + return internal::index_pair_second_statically_eq(dim_index, 0); +#else + EIGEN_UNUSED_VARIABLE(dim_index); + return false; +#endif + } + + + void updateCostPerDimension(TensorOpCost& cost, int i, bool first) const { + const double in = static_cast(m_impl.dimensions()[i]); + const double out = in + m_padding[i].first + m_padding[i].second; + if (out == 0) + return; + const double reduction = in / out; + cost *= reduction; + if (first) { + cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost() + + reduction * (1 * TensorOpCost::AddCost())); + } else { + cost += TensorOpCost(0, 0, 2 * TensorOpCost::AddCost() + + 2 * TensorOpCost::MulCost() + + reduction * (2 * TensorOpCost::MulCost() + + 1 * TensorOpCost::DivCost())); + } + } + + protected: + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + const Index initialIndex = index; + Index inputIndex = 0; + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index firstIdx = index; + const Index lastIdx = index + PacketSize - 1; + const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i]; + const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i]; + const Index lastPaddedRight = m_outputStrides[i+1]; + + if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) { + // all the coefficient are between the 2 padding zones. + const Index idx = index / m_outputStrides[i]; + inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + else { + // Every other case + return packetWithPossibleZero(initialIndex); + } + } + + const Index lastIdx = index + PacketSize - 1; + const Index firstIdx = index; + const Index lastPaddedLeft = m_padding[0].first; + const Index firstPaddedRight = (m_dimensions[0] - m_padding[0].second); + const Index lastPaddedRight = m_outputStrides[1]; + + if (!isLeftPaddingCompileTimeZero(0) && lastIdx < lastPaddedLeft) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if (!isRightPaddingCompileTimeZero(0) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if ((isLeftPaddingCompileTimeZero(0) && isRightPaddingCompileTimeZero(0)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) { + // all the coefficient are between the 2 padding zones. + inputIndex += (index - m_padding[0].first); + return m_impl.template packet(inputIndex); + } + // Every other case + return packetWithPossibleZero(initialIndex); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + const Index initialIndex = index; + Index inputIndex = 0; + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index firstIdx = index; + const Index lastIdx = index + PacketSize - 1; + const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i+1]; + const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i+1]; + const Index lastPaddedRight = m_outputStrides[i]; + + if (!isLeftPaddingCompileTimeZero(i) && lastIdx < lastPaddedLeft) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if (!isRightPaddingCompileTimeZero(i) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if ((isLeftPaddingCompileTimeZero(i) && isRightPaddingCompileTimeZero(i)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) { + // all the coefficient are between the 2 padding zones. + const Index idx = index / m_outputStrides[i+1]; + inputIndex += (idx - m_padding[i].first) * m_inputStrides[i]; + index -= idx * m_outputStrides[i+1]; + } + else { + // Every other case + return packetWithPossibleZero(initialIndex); + } + } + + const Index lastIdx = index + PacketSize - 1; + const Index firstIdx = index; + const Index lastPaddedLeft = m_padding[NumDims-1].first; + const Index firstPaddedRight = (m_dimensions[NumDims-1] - m_padding[NumDims-1].second); + const Index lastPaddedRight = m_outputStrides[NumDims-1]; + + if (!isLeftPaddingCompileTimeZero(NumDims-1) && lastIdx < lastPaddedLeft) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if (!isRightPaddingCompileTimeZero(NumDims-1) && firstIdx >= firstPaddedRight && lastIdx < lastPaddedRight) { + // all the coefficient are in the padding zone. + return internal::pset1(m_paddingValue); + } + else if ((isLeftPaddingCompileTimeZero(NumDims-1) && isRightPaddingCompileTimeZero(NumDims-1)) || (firstIdx >= lastPaddedLeft && lastIdx < firstPaddedRight)) { + // all the coefficient are between the 2 padding zones. + inputIndex += (index - m_padding[NumDims-1].first); + return m_impl.template packet(inputIndex); + } + // Every other case + return packetWithPossibleZero(initialIndex); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const + { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + TensorEvaluator m_impl; + PaddingDimensions m_padding; + + Scalar m_paddingValue; + + const Device EIGEN_DEVICE_REF m_device; +}; + + + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_PADDING_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h new file mode 100644 index 0000000..413d25d --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h @@ -0,0 +1,291 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_PATCH_H +#define EIGEN_CXX11_TENSOR_TENSOR_PATCH_H + +namespace Eigen { + +/** \class TensorPatch + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor patch class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions + 1; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorPatchOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorPatchOp type; +}; + +} // end namespace internal + + + +template +class TensorPatchOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorPatchOp(const XprType& expr, const PatchDim& patch_dims) + : m_xpr(expr), m_patch_dims(patch_dims) {} + + EIGEN_DEVICE_FUNC + const PatchDim& patch_dims() const { return m_patch_dims; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const PatchDim m_patch_dims; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorPatchOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value + 1; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) + { + Index num_patches = 1; + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + const PatchDim& patch_dims = op.patch_dims(); + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < NumDims-1; ++i) { + m_dimensions[i] = patch_dims[i]; + num_patches *= (input_dims[i] - patch_dims[i] + 1); + } + m_dimensions[NumDims-1] = num_patches; + + m_inputStrides[0] = 1; + m_patchStrides[0] = 1; + for (int i = 1; i < NumDims-1; ++i) { + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + m_patchStrides[i] = m_patchStrides[i-1] * (input_dims[i-1] - patch_dims[i-1] + 1); + } + m_outputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + } + } else { + for (int i = 0; i < NumDims-1; ++i) { + m_dimensions[i+1] = patch_dims[i]; + num_patches *= (input_dims[i] - patch_dims[i] + 1); + } + m_dimensions[0] = num_patches; + + m_inputStrides[NumDims-2] = 1; + m_patchStrides[NumDims-2] = 1; + for (int i = NumDims-3; i >= 0; --i) { + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + m_patchStrides[i] = m_patchStrides[i+1] * (input_dims[i+1] - patch_dims[i+1] + 1); + } + m_outputStrides[NumDims-1] = 1; + for (int i = NumDims-2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + Index output_stride_index = (static_cast(Layout) == static_cast(ColMajor)) ? NumDims - 1 : 0; + // Find the location of the first element of the patch. + Index patchIndex = index / m_outputStrides[output_stride_index]; + // Find the offset of the element wrt the location of the first element. + Index patchOffset = index - patchIndex * m_outputStrides[output_stride_index]; + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 2; i > 0; --i) { + const Index patchIdx = patchIndex / m_patchStrides[i]; + patchIndex -= patchIdx * m_patchStrides[i]; + const Index offsetIdx = patchOffset / m_outputStrides[i]; + patchOffset -= offsetIdx * m_outputStrides[i]; + inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i]; + } + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 2; ++i) { + const Index patchIdx = patchIndex / m_patchStrides[i]; + patchIndex -= patchIdx * m_patchStrides[i]; + const Index offsetIdx = patchOffset / m_outputStrides[i+1]; + patchOffset -= offsetIdx * m_outputStrides[i+1]; + inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i]; + } + } + inputIndex += (patchIndex + patchOffset); + return m_impl.coeff(inputIndex); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + Index output_stride_index = (static_cast(Layout) == static_cast(ColMajor)) ? NumDims - 1 : 0; + Index indices[2] = {index, index + PacketSize - 1}; + Index patchIndices[2] = {indices[0] / m_outputStrides[output_stride_index], + indices[1] / m_outputStrides[output_stride_index]}; + Index patchOffsets[2] = {indices[0] - patchIndices[0] * m_outputStrides[output_stride_index], + indices[1] - patchIndices[1] * m_outputStrides[output_stride_index]}; + + Index inputIndices[2] = {0, 0}; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 2; i > 0; --i) { + const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i], + patchIndices[1] / m_patchStrides[i]}; + patchIndices[0] -= patchIdx[0] * m_patchStrides[i]; + patchIndices[1] -= patchIdx[1] * m_patchStrides[i]; + + const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i], + patchOffsets[1] / m_outputStrides[i]}; + patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i]; + patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i]; + + inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i]; + inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i]; + } + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 2; ++i) { + const Index patchIdx[2] = {patchIndices[0] / m_patchStrides[i], + patchIndices[1] / m_patchStrides[i]}; + patchIndices[0] -= patchIdx[0] * m_patchStrides[i]; + patchIndices[1] -= patchIdx[1] * m_patchStrides[i]; + + const Index offsetIdx[2] = {patchOffsets[0] / m_outputStrides[i+1], + patchOffsets[1] / m_outputStrides[i+1]}; + patchOffsets[0] -= offsetIdx[0] * m_outputStrides[i+1]; + patchOffsets[1] -= offsetIdx[1] * m_outputStrides[i+1]; + + inputIndices[0] += (patchIdx[0] + offsetIdx[0]) * m_inputStrides[i]; + inputIndices[1] += (patchIdx[1] + offsetIdx[1]) * m_inputStrides[i]; + } + } + inputIndices[0] += (patchIndices[0] + patchOffsets[0]); + inputIndices[1] += (patchIndices[1] + patchOffsets[1]); + + if (inputIndices[1] - inputIndices[0] == PacketSize - 1) { + PacketReturnType rslt = m_impl.template packet(inputIndices[0]); + return rslt; + } + else { + EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize]; + values[0] = m_impl.coeff(inputIndices[0]); + values[PacketSize-1] = m_impl.coeff(inputIndices[1]); + EIGEN_UNROLL_LOOP + for (int i = 1; i < PacketSize-1; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + const double compute_cost = NumDims * (TensorOpCost::DivCost() + + TensorOpCost::MulCost() + + 2 * TensorOpCost::AddCost()); + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + array m_patchStrides; + + TensorEvaluator m_impl; + +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_PATCH_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h new file mode 100644 index 0000000..37c1d1c --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h @@ -0,0 +1,322 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// Copyright (C) 2018 Mehdi Goli Codeplay Software Ltd. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H +#define EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H + +namespace Eigen { +namespace internal { + +namespace { + +EIGEN_DEVICE_FUNC uint64_t get_random_seed() { +#if defined(EIGEN_GPU_COMPILE_PHASE) + // We don't support 3d kernels since we currently only use 1 and + // 2d kernels. + gpu_assert(threadIdx.z == 0); + return blockIdx.x * blockDim.x + threadIdx.x + + gridDim.x * blockDim.x * (blockIdx.y * blockDim.y + threadIdx.y); +#else + // Rely on Eigen's random implementation. + return random(); +#endif +} + +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE unsigned PCG_XSH_RS_generator(uint64_t* state, uint64_t stream) { + // TODO: Unify with the implementation in the non blocking thread pool. + uint64_t current = *state; + // Update the internal state + *state = current * 6364136223846793005ULL + (stream << 1 | 1); + // Generate the random output (using the PCG-XSH-RS scheme) + return static_cast((current ^ (current >> 22)) >> (22 + (current >> 61))); +} + +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE uint64_t PCG_XSH_RS_state(uint64_t seed) { + seed = seed ? seed : get_random_seed(); + return seed * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL; +} + +} // namespace + + +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +T RandomToTypeUniform(uint64_t* state, uint64_t stream) { + unsigned rnd = PCG_XSH_RS_generator(state, stream); + return static_cast(rnd); +} + + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +Eigen::half RandomToTypeUniform(uint64_t* state, uint64_t stream) { + // Generate 10 random bits for the mantissa, merge with exponent. + unsigned rnd = PCG_XSH_RS_generator(state, stream); + const uint16_t half_bits = static_cast(rnd & 0x3ffu) | (static_cast(15) << 10); + Eigen::half result = Eigen::numext::bit_cast(half_bits); + // Return the final result + return result - Eigen::half(1.0f); +} + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +Eigen::bfloat16 RandomToTypeUniform(uint64_t* state, uint64_t stream) { + + // Generate 7 random bits for the mantissa, merge with exponent. + unsigned rnd = PCG_XSH_RS_generator(state, stream); + const uint16_t half_bits = static_cast(rnd & 0x7fu) | (static_cast(127) << 7); + Eigen::bfloat16 result = Eigen::numext::bit_cast(half_bits); + // Return the final result + return result - Eigen::bfloat16(1.0f); +} + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float RandomToTypeUniform(uint64_t* state, uint64_t stream) { + typedef union { + uint32_t raw; + float fp; + } internal; + internal result; + // Generate 23 random bits for the mantissa mantissa + const unsigned rnd = PCG_XSH_RS_generator(state, stream); + result.raw = rnd & 0x7fffffu; + // Set the exponent + result.raw |= (static_cast(127) << 23); + // Return the final result + return result.fp - 1.0f; +} + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double RandomToTypeUniform(uint64_t* state, uint64_t stream) { + typedef union { + uint64_t raw; + double dp; + } internal; + internal result; + result.raw = 0; + // Generate 52 random bits for the mantissa + // First generate the upper 20 bits + unsigned rnd1 = PCG_XSH_RS_generator(state, stream) & 0xfffffu; + // The generate the lower 32 bits + unsigned rnd2 = PCG_XSH_RS_generator(state, stream); + result.raw = (static_cast(rnd1) << 32) | rnd2; + // Set the exponent + result.raw |= (static_cast(1023) << 52); + // Return the final result + return result.dp - 1.0; +} + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex RandomToTypeUniform >(uint64_t* state, uint64_t stream) { + return std::complex(RandomToTypeUniform(state, stream), + RandomToTypeUniform(state, stream)); +} +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex RandomToTypeUniform >(uint64_t* state, uint64_t stream) { + return std::complex(RandomToTypeUniform(state, stream), + RandomToTypeUniform(state, stream)); +} + +template class UniformRandomGenerator { + public: + static const bool PacketAccess = true; + + // Uses the given "seed" if non-zero, otherwise uses a random seed. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UniformRandomGenerator( + uint64_t seed = 0) { + m_state = PCG_XSH_RS_state(seed); + #ifdef EIGEN_USE_SYCL + // In SYCL it is not possible to build PCG_XSH_RS_state in one step. + // Therefor, we need two step to initializate the m_state. + // IN SYCL, the constructor of the functor is s called on the CPU + // and we get the clock seed here from the CPU. However, This seed is + //the same for all the thread. As unlike CUDA, the thread.ID, BlockID, etc is not a global function. + // and only available on the Operator() function (which is called on the GPU). + // Thus for CUDA (((CLOCK + global_thread_id)* 6364136223846793005ULL) + 0xda3e39cb94b95bdbULL) is passed to each thread + // but for SYCL ((CLOCK * 6364136223846793005ULL) + 0xda3e39cb94b95bdbULL) is passed to each thread and each thread adds + // the (global_thread_id* 6364136223846793005ULL) for itself only once, in order to complete the construction + // similar to CUDA Therefore, the thread Id injection is not available at this stage. + //However when the operator() is called the thread ID will be avilable. So inside the opeator, + // we add the thrreadID, BlockId,... (which is equivalent of i) + //to the seed and construct the unique m_state per thead similar to cuda. + m_exec_once =false; + #endif + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE UniformRandomGenerator( + const UniformRandomGenerator& other) { + m_state = other.m_state; + #ifdef EIGEN_USE_SYCL + m_exec_once =other.m_exec_once; + #endif + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + T operator()(Index i) const { + #ifdef EIGEN_USE_SYCL + if(!m_exec_once) { + // This is the second stage of adding thread Id to the CPU clock seed and build unique seed per thread + // The (i * 6364136223846793005ULL) is the remaining part of the PCG_XSH_RS_state on the GPU side + m_state += (i * 6364136223846793005ULL); + m_exec_once =true; + } + #endif + T result = RandomToTypeUniform(&m_state, i); + return result; + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Packet packetOp(Index i) const { + const int packetSize = internal::unpacket_traits::size; + EIGEN_ALIGN_MAX T values[packetSize]; + #ifdef EIGEN_USE_SYCL + if(!m_exec_once) { + // This is the second stage of adding thread Id to the CPU clock seed and build unique seed per thread + m_state += (i * 6364136223846793005ULL); + m_exec_once =true; + } + #endif + EIGEN_UNROLL_LOOP + for (int j = 0; j < packetSize; ++j) { + values[j] = RandomToTypeUniform(&m_state, i); + } + return internal::pload(values); + } + + private: + mutable uint64_t m_state; + #ifdef EIGEN_USE_SYCL + mutable bool m_exec_once; + #endif +}; + +template +struct functor_traits > { + enum { + // Rough estimate for floating point, multiplied by ceil(sizeof(T) / sizeof(float)). + Cost = 12 * NumTraits::AddCost * + ((sizeof(Scalar) + sizeof(float) - 1) / sizeof(float)), + PacketAccess = UniformRandomGenerator::PacketAccess + }; +}; + + + +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +T RandomToTypeNormal(uint64_t* state, uint64_t stream) { + // Use the ratio of uniform method to generate numbers following a normal + // distribution. See for example Numerical Recipes chapter 7.3.9 for the + // details. + T u, v, q; + do { + u = RandomToTypeUniform(state, stream); + v = T(1.7156) * (RandomToTypeUniform(state, stream) - T(0.5)); + const T x = u - T(0.449871); + const T y = numext::abs(v) + T(0.386595); + q = x*x + y * (T(0.196)*y - T(0.25472)*x); + } while (q > T(0.27597) && + (q > T(0.27846) || v*v > T(-4) * numext::log(u) * u*u)); + + return v/u; +} + +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex RandomToTypeNormal >(uint64_t* state, uint64_t stream) { + return std::complex(RandomToTypeNormal(state, stream), + RandomToTypeNormal(state, stream)); +} +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +std::complex RandomToTypeNormal >(uint64_t* state, uint64_t stream) { + return std::complex(RandomToTypeNormal(state, stream), + RandomToTypeNormal(state, stream)); +} + + +template class NormalRandomGenerator { + public: + static const bool PacketAccess = true; + + // Uses the given "seed" if non-zero, otherwise uses a random seed. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NormalRandomGenerator(uint64_t seed = 0) { + m_state = PCG_XSH_RS_state(seed); + #ifdef EIGEN_USE_SYCL + // In SYCL it is not possible to build PCG_XSH_RS_state in one step. + // Therefor, we need two steps to initializate the m_state. + // IN SYCL, the constructor of the functor is s called on the CPU + // and we get the clock seed here from the CPU. However, This seed is + //the same for all the thread. As unlike CUDA, the thread.ID, BlockID, etc is not a global function. + // and only available on the Operator() function (which is called on the GPU). + // Therefore, the thread Id injection is not available at this stage. However when the operator() + //is called the thread ID will be avilable. So inside the opeator, + // we add the thrreadID, BlockId,... (which is equivalent of i) + //to the seed and construct the unique m_state per thead similar to cuda. + m_exec_once =false; + #endif + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NormalRandomGenerator( + const NormalRandomGenerator& other) { + m_state = other.m_state; +#ifdef EIGEN_USE_SYCL + m_exec_once=other.m_exec_once; +#endif + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + T operator()(Index i) const { + #ifdef EIGEN_USE_SYCL + if(!m_exec_once) { + // This is the second stage of adding thread Id to the CPU clock seed and build unique seed per thread + m_state += (i * 6364136223846793005ULL); + m_exec_once =true; + } + #endif + T result = RandomToTypeNormal(&m_state, i); + return result; + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + Packet packetOp(Index i) const { + const int packetSize = internal::unpacket_traits::size; + EIGEN_ALIGN_MAX T values[packetSize]; + #ifdef EIGEN_USE_SYCL + if(!m_exec_once) { + // This is the second stage of adding thread Id to the CPU clock seed and build unique seed per thread + m_state += (i * 6364136223846793005ULL); + m_exec_once =true; + } + #endif + EIGEN_UNROLL_LOOP + for (int j = 0; j < packetSize; ++j) { + values[j] = RandomToTypeNormal(&m_state, i); + } + return internal::pload(values); + } + + private: + mutable uint64_t m_state; + #ifdef EIGEN_USE_SYCL + mutable bool m_exec_once; + #endif +}; + + +template +struct functor_traits > { + enum { + // On average, we need to generate about 3 random numbers + // 15 mul, 8 add, 1.5 logs + Cost = 3 * functor_traits >::Cost + + 15 * NumTraits::AddCost + 8 * NumTraits::AddCost + + 3 * functor_traits >::Cost / 2, + PacketAccess = NormalRandomGenerator::PacketAccess + }; +}; + + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_RANDOM_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h new file mode 100644 index 0000000..583f462 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h @@ -0,0 +1,998 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// Copyright (C) 2016 Mehdi Goli, Codeplay Software Ltd +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H +#define EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H + +// clang is incompatible with the CUDA syntax wrt making a kernel a class friend, +// so we'll use a macro to make clang happy. +#ifndef KERNEL_FRIEND +#if defined(__clang__) && (defined(__CUDA__) || defined(__HIP__)) +#define KERNEL_FRIEND friend __global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 +#else +#define KERNEL_FRIEND friend +#endif +#endif + + +namespace Eigen { + + +/** \class TensorReduction + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reduction class. + * + */ + +namespace internal { + template class MakePointer_ > + struct traits > + : traits +{ + typedef traits XprTraits; + typedef typename XprTraits::Scalar Scalar; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + static const int NumDimensions = XprTraits::NumDimensions - array_size::value; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; + + template struct MakePointer { + // Intermediate typedef to workaround MSVC issue. + typedef MakePointer_ MakePointerT; + typedef typename MakePointerT::Type Type; + }; +}; + +template class MakePointer_> +struct eval, Eigen::Dense> +{ + typedef const TensorReductionOp& type; +}; + +template class MakePointer_> +struct nested, 1, typename eval >::type> +{ + typedef TensorReductionOp type; +}; + + +template struct DimInitializer { + template EIGEN_DEVICE_FUNC + static void run(const InputDims& input_dims, + const array::value>& reduced, + OutputDims* output_dims, ReducedDims* reduced_dims) { + const int NumInputDims = internal::array_size::value; + int outputIndex = 0; + int reduceIndex = 0; + for (int i = 0; i < NumInputDims; ++i) { + if (reduced[i]) { + (*reduced_dims)[reduceIndex] = input_dims[i]; + ++reduceIndex; + } else { + (*output_dims)[outputIndex] = input_dims[i]; + ++outputIndex; + } + } + } +}; + +template <> struct DimInitializer > { + template EIGEN_DEVICE_FUNC + static void run(const InputDims& input_dims, const array&, + Sizes<>*, array* reduced_dims) { + const int NumInputDims = internal::array_size::value; + for (int i = 0; i < NumInputDims; ++i) { + (*reduced_dims)[i] = input_dims[i]; + } + } +}; + + +template +struct are_inner_most_dims { + static const bool value = false; +}; +template +struct preserve_inner_most_dims { + static const bool value = false; +}; + +#if EIGEN_HAS_CONSTEXPR && EIGEN_HAS_VARIADIC_TEMPLATES +template +struct are_inner_most_dims{ + static const bool tmp1 = indices_statically_known_to_increase(); + static const bool tmp2 = index_statically_eq(0, 0); + static const bool tmp3 = index_statically_eq(array_size::value-1, array_size::value-1); + static const bool value = tmp1 & tmp2 & tmp3; +}; +template +struct are_inner_most_dims{ + static const bool tmp1 = indices_statically_known_to_increase(); + static const bool tmp2 = index_statically_eq(0, NumTensorDims - array_size::value); + static const bool tmp3 = index_statically_eq(array_size::value - 1, NumTensorDims - 1); + static const bool value = tmp1 & tmp2 & tmp3; + +}; +template +struct preserve_inner_most_dims{ + static const bool tmp1 = indices_statically_known_to_increase(); + static const bool tmp2 = index_statically_gt(0, 0); + static const bool value = tmp1 & tmp2; + +}; +template +struct preserve_inner_most_dims{ + static const bool tmp1 = indices_statically_known_to_increase(); + static const bool tmp2 = index_statically_lt(array_size::value - 1, NumTensorDims - 1); + static const bool value = tmp1 & tmp2; +}; +#endif + + +template +struct GenericDimReducer { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) { + EIGEN_STATIC_ASSERT((DimIndex > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + for (int j = 0; j < self.m_reducedDims[DimIndex]; ++j) { + const typename Self::Index input = firstIndex + j * self.m_reducedStrides[DimIndex]; + GenericDimReducer::reduce(self, input, reducer, accum); + } + } +}; +template +struct GenericDimReducer<0, Self, Op> { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::CoeffReturnType* accum) { + for (int j = 0; j < self.m_reducedDims[0]; ++j) { + const typename Self::Index input = firstIndex + j * self.m_reducedStrides[0]; + reducer.reduce(self.m_impl.coeff(input), accum); + } + } +}; +template +struct GenericDimReducer<-1, Self, Op> { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index index, Op& reducer, typename Self::CoeffReturnType* accum) { + reducer.reduce(self.m_impl.coeff(index), accum); + } +}; + +template +struct InnerMostDimReducer { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) { + typename Self::CoeffReturnType accum = reducer.initialize(); + for (typename Self::Index j = 0; j < numValuesToReduce; ++j) { + reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); + } + return reducer.finalize(accum); + } +}; + +template +struct InnerMostDimReducer { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType reduce(const Self& self, typename Self::Index firstIndex, typename Self::Index numValuesToReduce, Op& reducer) { + const typename Self::Index packetSize = internal::unpacket_traits::size; + const typename Self::Index VectorizedSize = (numValuesToReduce / packetSize) * packetSize; + typename Self::PacketReturnType paccum = reducer.template initializePacket(); + for (typename Self::Index j = 0; j < VectorizedSize; j += packetSize) { + reducer.reducePacket(self.m_impl.template packet(firstIndex + j), &paccum); + } + typename Self::CoeffReturnType accum = reducer.initialize(); + for (typename Self::Index j = VectorizedSize; j < numValuesToReduce; ++j) { + reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); + } + return reducer.finalizeBoth(accum, paccum); + } +}; + +#if !defined(EIGEN_HIPCC) +static const int kLeafSize = 1024; + +template +struct InnerMostDimReducer { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType + reduce(const Self& self, typename Self::Index firstIndex, + typename Self::Index numValuesToReduce, Op& reducer) { + typename Self::CoeffReturnType accum = reducer.initialize(); + if (numValuesToReduce > kLeafSize) { + const typename Self::Index half = numValuesToReduce / 2; + reducer.reduce(reduce(self, firstIndex, half, reducer), &accum); + reducer.reduce( + reduce(self, firstIndex + half, numValuesToReduce - half, reducer), + &accum); + } else { + for (typename Self::Index j = 0; j < numValuesToReduce; ++j) { + reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); + } + } + return reducer.finalize(accum); + } +}; + +template +struct InnerMostDimReducer { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Self::CoeffReturnType + reduce(const Self& self, typename Self::Index firstIndex, + typename Self::Index numValuesToReduce, Op& reducer) { + const typename Self::Index packetSize = + internal::unpacket_traits::size; + typename Self::CoeffReturnType accum = reducer.initialize(); + if (numValuesToReduce > packetSize * kLeafSize) { + // Make sure the split point is aligned on a packet boundary. + const typename Self::Index split = + packetSize * + divup(firstIndex + divup(numValuesToReduce, typename Self::Index(2)), + packetSize); + const typename Self::Index num_left = + numext::mini(split - firstIndex, numValuesToReduce); + reducer.reduce(reduce(self, firstIndex, num_left, reducer), &accum); + if (num_left < numValuesToReduce) { + reducer.reduce( + reduce(self, split, numValuesToReduce - num_left, reducer), &accum); + } + return reducer.finalize(accum); + } else { + const typename Self::Index UnrollSize = + (numValuesToReduce / (2*packetSize)) * 2*packetSize; + const typename Self::Index VectorizedSize = + (numValuesToReduce / packetSize) * packetSize; + typename Self::PacketReturnType paccum = + reducer.template initializePacket(); + typename Self::PacketReturnType paccum2 = + reducer.template initializePacket(); + for (typename Self::Index j = 0; j < UnrollSize; j += packetSize * 2) { + reducer.reducePacket( + self.m_impl.template packet(firstIndex + j), &paccum); + reducer.reducePacket( + self.m_impl.template packet(firstIndex + j + packetSize), + &paccum2); + } + for (typename Self::Index j = UnrollSize; j < VectorizedSize; j+= packetSize) { + reducer.reducePacket(self.m_impl.template packet( + firstIndex + j), &paccum); + } + reducer.reducePacket(paccum2, &paccum); + for (typename Self::Index j = VectorizedSize; j < numValuesToReduce; + ++j) { + reducer.reduce(self.m_impl.coeff(firstIndex + j), &accum); + } + return reducer.finalizeBoth(accum, paccum); + } + } +}; +#endif + +template +struct InnerMostDimPreserver { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self&, typename Self::Index, Op&, typename Self::PacketReturnType*) { + eigen_assert(false && "should never be called"); + } +}; + +template +struct InnerMostDimPreserver { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) { + EIGEN_STATIC_ASSERT((DimIndex > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + for (typename Self::Index j = 0; j < self.m_reducedDims[DimIndex]; ++j) { + const typename Self::Index input = firstIndex + j * self.m_reducedStrides[DimIndex]; + InnerMostDimPreserver::reduce(self, input, reducer, accum); + } + } +}; + +template +struct InnerMostDimPreserver<0, Self, Op, true> { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self& self, typename Self::Index firstIndex, Op& reducer, typename Self::PacketReturnType* accum) { + for (typename Self::Index j = 0; j < self.m_reducedDims[0]; ++j) { + const typename Self::Index input = firstIndex + j * self.m_reducedStrides[0]; + reducer.reducePacket(self.m_impl.template packet(input), accum); + } + } +}; +template +struct InnerMostDimPreserver<-1, Self, Op, true> { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const Self&, typename Self::Index, Op&, typename Self::PacketReturnType*) { + eigen_assert(false && "should never be called"); + } +}; + +// Default full reducer +template +struct FullReducer { + static const bool HasOptimizedImplementation = false; + + static EIGEN_DEVICE_FUNC void run(const Self& self, Op& reducer, const Device&, typename Self::EvaluatorPointerType output) { + const typename Self::Index num_coeffs = array_prod(self.m_impl.dimensions()); + *output = InnerMostDimReducer::reduce(self, 0, num_coeffs, reducer); + } +}; + + +#ifdef EIGEN_USE_THREADS +// Multithreaded full reducers +template +struct FullReducerShard { + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void run(const Self& self, typename Self::Index firstIndex, + typename Self::Index numValuesToReduce, Op& reducer, + typename Self::CoeffReturnType* output) { + *output = InnerMostDimReducer::reduce( + self, firstIndex, numValuesToReduce, reducer); + } +}; + +// Multithreaded full reducer +template +struct FullReducer { + static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful; + static const Index PacketSize = + unpacket_traits::size; + + // launch one reducer per thread and accumulate the result. + static void run(const Self& self, Op& reducer, const ThreadPoolDevice& device, + typename Self::CoeffReturnType* output) { + typedef typename Self::Index Index; + const Index num_coeffs = array_prod(self.m_impl.dimensions()); + if (num_coeffs == 0) { + *output = reducer.finalize(reducer.initialize()); + return; + } + const TensorOpCost cost = + self.m_impl.costPerCoeff(Vectorizable) + + TensorOpCost(0, 0, internal::functor_traits::Cost, Vectorizable, + PacketSize); + const int num_threads = TensorCostModel::numThreads( + num_coeffs, cost, device.numThreads()); + if (num_threads == 1) { + *output = + InnerMostDimReducer::reduce(self, 0, num_coeffs, reducer); + return; + } + const Index blocksize = + std::floor(static_cast(num_coeffs) / num_threads); + const Index numblocks = blocksize > 0 ? num_coeffs / blocksize : 0; + eigen_assert(num_coeffs >= numblocks * blocksize); + + Barrier barrier(internal::convert_index(numblocks)); + MaxSizeVector shards(numblocks, reducer.initialize()); + for (Index i = 0; i < numblocks; ++i) { + device.enqueue_with_barrier(&barrier, &FullReducerShard::run, + self, i * blocksize, blocksize, reducer, + &shards[i]); + } + typename Self::CoeffReturnType finalShard; + if (numblocks * blocksize < num_coeffs) { + finalShard = InnerMostDimReducer::reduce( + self, numblocks * blocksize, num_coeffs - numblocks * blocksize, + reducer); + } else { + finalShard = reducer.initialize(); + } + barrier.Wait(); + + for (Index i = 0; i < numblocks; ++i) { + reducer.reduce(shards[i], &finalShard); + } + *output = reducer.finalize(finalShard); + } +}; + +#endif + + +// Default inner reducer +template +struct InnerReducer { + static const bool HasOptimizedImplementation = false; + + EIGEN_DEVICE_FUNC static bool run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) { + eigen_assert(false && "Not implemented"); + return true; + } +}; + +// Default outer reducer +template +struct OuterReducer { + static const bool HasOptimizedImplementation = false; + + EIGEN_DEVICE_FUNC static bool run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) { + eigen_assert(false && "Not implemented"); + return true; + } +}; + +#ifdef EIGEN_USE_SYCL +// Default Generic reducer +template +struct GenericReducer { + static const bool HasOptimizedImplementation = false; + + EIGEN_DEVICE_FUNC static bool run(const Self&, Op&, const Device&, typename Self::CoeffReturnType*, typename Self::Index, typename Self::Index) { + eigen_assert(false && "Not implemented"); + return true; + } +}; +#endif + +#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC)) +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void FullReductionKernel(R, const S, I_, typename S::CoeffReturnType*, unsigned int*); + + +#if defined(EIGEN_HAS_GPU_FP16) +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void ReductionInitFullReduxKernelHalfFloat(R, const S, I_, internal::packet_traits::type*); +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void FullReductionKernelHalfFloat(R, const S, I_, half*, internal::packet_traits::type*); +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void InnerReductionKernelHalfFloat(R, const S, I_, I_, half*); + +#endif + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void InnerReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*); + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void OuterReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*); +#endif + +/** + * For SYCL, the return type of the reduction is deduced from the initialize method of the given Op. + * This allows the reduction to have a different type for the accumulator than the input data type. + * If this is the case, the functor needs to have two reduce method: one for reducing an element of the input + * with the accumulator and the other for reducing two accumulators. + * Such a reducer can be useful for instance when the accumulator is a boolean or a bitset that checks for + * some properties of the input. + */ +template +struct ReductionReturnType { +#if defined(EIGEN_USE_SYCL) + typedef typename remove_const().initialize())>::type type; +#else + typedef typename remove_const::type type; +#endif +}; + +} // end namespace internal + + +template class MakePointer_> +class TensorReductionOp : public TensorBase, ReadOnlyAccessors> { + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename internal::remove_const::type CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorReductionOp(const XprType& expr, const Dims& dims) : m_expr(expr), m_dims(dims) + { } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + TensorReductionOp(const XprType& expr, const Dims& dims, const Op& reducer) : m_expr(expr), m_dims(dims), m_reducer(reducer) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const XprType& expression() const { return m_expr; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Dims& dims() const { return m_dims; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Op& reducer() const { return m_reducer; } + + protected: + typename XprType::Nested m_expr; + const Dims m_dims; + const Op m_reducer; +}; + +template +struct TensorReductionEvaluatorBase; + +// Eval as rvalue +template class MakePointer_, typename Device> +struct TensorReductionEvaluatorBase, Device> +{ + typedef internal::reducer_traits ReducerTraits; + typedef Dims ReducedDims; + typedef TensorReductionOp XprType; + typedef typename XprType::Index Index; + typedef ArgType ChildType; + typedef typename TensorEvaluator::Dimensions InputDimensions; + static const int NumInputDims = internal::array_size::value; + static const int NumReducedDims = internal::array_size::value; + static const int NumOutputDims = NumInputDims - NumReducedDims; + typedef typename internal::conditional, DSizes >::type Dimensions; + typedef typename XprType::Scalar Scalar; + typedef TensorReductionEvaluatorBase, Device> Self; + static const bool InputPacketAccess = TensorEvaluator::PacketAccess; + typedef typename internal::ReductionReturnType::type CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const Index PacketSize = PacketType::size; + + typedef typename Eigen::internal::traits::PointerType TensorPointerType; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + // Subset of strides of the input tensor for the non-reduced dimensions. + // Indexed by output dimensions. + static const int NumPreservedStrides = max_n_1::size; + + enum { + IsAligned = false, + PacketAccess = Self::InputPacketAccess && ReducerTraits::PacketAccess, + BlockAccess = false, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + static const bool ReducingInnerMostDims = internal::are_inner_most_dims::value; + static const bool PreservingInnerMostDims = internal::preserve_inner_most_dims::value; + static const bool RunningFullReduction = (NumOutputDims==0); + + EIGEN_STRONG_INLINE TensorReductionEvaluatorBase(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_reducer(op.reducer()), m_result(NULL), m_device(device) + { + EIGEN_STATIC_ASSERT((NumInputDims >= NumReducedDims), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((!ReducingInnerMostDims | !PreservingInnerMostDims | (NumReducedDims == NumInputDims)), + YOU_MADE_A_PROGRAMMING_MISTAKE); + + // Build the bitmap indicating if an input dimension is reduced or not. + for (int i = 0; i < NumInputDims; ++i) { + m_reduced[i] = false; + } + for (int i = 0; i < NumReducedDims; ++i) { + eigen_assert(op.dims()[i] >= 0); + eigen_assert(op.dims()[i] < NumInputDims); + m_reduced[op.dims()[i]] = true; + } + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + internal::DimInitializer::run(input_dims, m_reduced, &m_dimensions, &m_reducedDims); + + // Precompute output strides. + if (NumOutputDims > 0) { + if (static_cast(Layout) == static_cast(ColMajor)) { + m_outputStrides[0] = 1; + for (int i = 1; i < NumOutputDims; ++i) { + m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i]); + } + } else { + m_outputStrides[NumOutputDims - 1] = 1; + for (int i = NumOutputDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor(m_outputStrides[i]); + } + } + } + + // Precompute input strides. + if (NumInputDims > 0) { + array input_strides; + if (static_cast(Layout) == static_cast(ColMajor)) { + input_strides[0] = 1; + for (int i = 1; i < NumInputDims; ++i) { + input_strides[i] = input_strides[i-1] * input_dims[i-1]; + } + } else { + input_strides.back() = 1; + for (int i = NumInputDims - 2; i >= 0; --i) { + input_strides[i] = input_strides[i + 1] * input_dims[i + 1]; + } + } + + int outputIndex = 0; + int reduceIndex = 0; + for (int i = 0; i < NumInputDims; ++i) { + if (m_reduced[i]) { + m_reducedStrides[reduceIndex] = input_strides[i]; + ++reduceIndex; + } else { + m_preservedStrides[outputIndex] = input_strides[i]; + m_output_to_input_dim_map[outputIndex] = i; + ++outputIndex; + } + } + } + + // Special case for full reductions + if (NumOutputDims == 0) { + m_preservedStrides[0] = internal::array_prod(input_dims); + } + + m_numValuesToReduce = + NumOutputDims == 0 + ? internal::array_prod(input_dims) + : (static_cast(Layout) == static_cast(ColMajor)) + ? m_preservedStrides[0] + : m_preservedStrides[NumOutputDims - 1]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE + bool evalSubExprsIfNeededCommon(EvaluatorPointerType data) { + // Use the FullReducer if possible. + if ((RunningFullReduction && RunningOnSycl) ||(RunningFullReduction && + internal::FullReducer::HasOptimizedImplementation && + ((RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) || + !RunningOnGPU))) { + bool need_assign = false; + if (!data) { + m_result = static_cast(m_device.get((CoeffReturnType*)m_device.allocate_temp(sizeof(CoeffReturnType)))); + data = m_result; + need_assign = true; + } + Op reducer(m_reducer); + internal::FullReducer::run(*this, reducer, m_device, data); + return need_assign; + } + + // Attempt to use an optimized reduction. + else if ((RunningOnGPU && (m_device.majorDeviceVersion() >= 3)) || (RunningOnSycl)) { + bool reducing_inner_dims = true; + for (int i = 0; i < NumReducedDims; ++i) { + if (static_cast(Layout) == static_cast(ColMajor)) { + reducing_inner_dims &= m_reduced[i]; + } else { + reducing_inner_dims &= m_reduced[NumInputDims - 1 - i]; + } + } + if (internal::InnerReducer::HasOptimizedImplementation && + (reducing_inner_dims || ReducingInnerMostDims)) { + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); + if (!data) { + if ((num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 128) || (RunningOnSycl)) { + data = static_cast(m_device.get((CoeffReturnType*)m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve))); + m_result = data; + } + else { + return true; + } + } + Op reducer(m_reducer); + // For SYCL this if always return false + if (internal::InnerReducer::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) { + if (m_result) { + m_device.deallocate_temp(m_result); + m_result = NULL; + } + return true; + } else { + return (m_result != NULL); + } + } + + bool preserving_inner_dims = true; + for (int i = 0; i < NumReducedDims; ++i) { + if (static_cast(Layout) == static_cast(ColMajor)) { + preserving_inner_dims &= m_reduced[NumInputDims - 1 - i]; + } else { + preserving_inner_dims &= m_reduced[i]; + } + } + if (internal::OuterReducer::HasOptimizedImplementation && + preserving_inner_dims) { + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); + if (!data) { + if ((num_coeffs_to_preserve < 1024 && num_values_to_reduce > num_coeffs_to_preserve && num_values_to_reduce > 32) || (RunningOnSycl)) { + data = static_cast(m_device.get((CoeffReturnType*)m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve))); + m_result = data; + } + else { + return true; + } + } + Op reducer(m_reducer); + // For SYCL this if always return false + if (internal::OuterReducer::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve)) { + if (m_result) { + m_device.deallocate_temp(m_result); + m_result = NULL; + } + return true; + } else { + return (m_result != NULL); + } + } + #if defined(EIGEN_USE_SYCL) + // If there is no Optimised version for SYCL, the reduction expression + // must break into two subexpression and use the SYCL generic Reducer on the device. + if(RunningOnSycl) { + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const Index num_coeffs_to_preserve = internal::array_prod(m_dimensions); + if (!data) { + data = static_cast(m_device.get((CoeffReturnType*)m_device.allocate_temp(sizeof(CoeffReturnType) * num_coeffs_to_preserve))); + m_result = data; + } + Op reducer(m_reducer); + internal::GenericReducer::run(*this, reducer, m_device, data, num_values_to_reduce, num_coeffs_to_preserve); + return (m_result != NULL); + } + #endif + } + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE + void + evalSubExprsIfNeededAsync(EvaluatorPointerType data, + EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(NULL, [this, data, done](bool) { + done(evalSubExprsIfNeededCommon(data)); + }); + } +#endif + + EIGEN_STRONG_INLINE + bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + m_impl.evalSubExprsIfNeeded(NULL); + return evalSubExprsIfNeededCommon(data); + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + if (m_result) { + m_device.deallocate_temp(m_result); + m_result = NULL; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + if (( RunningFullReduction || RunningOnGPU) && m_result ) { + return *(m_result + index); + } + Op reducer(m_reducer); + if (ReducingInnerMostDims || RunningFullReduction) { + const Index num_values_to_reduce = + (static_cast(Layout) == static_cast(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumPreservedStrides - 1]; + return internal::InnerMostDimReducer::reduce(*this, firstInput(index), + num_values_to_reduce, reducer); + } else { + typename Self::CoeffReturnType accum = reducer.initialize(); + internal::GenericDimReducer::reduce(*this, firstInput(index), reducer, &accum); + return reducer.finalize(accum); + } + } + + // TODO(bsteiner): provide a more efficient implementation. + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index + PacketSize - 1 < Index(internal::array_prod(dimensions()))); + + if (RunningOnGPU && m_result) { + return internal::pload(m_result + index); + } + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + if (ReducingInnerMostDims) { + const Index num_values_to_reduce = + (static_cast(Layout) == static_cast(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumPreservedStrides - 1]; + const Index firstIndex = firstInput(index); + for (Index i = 0; i < PacketSize; ++i) { + Op reducer(m_reducer); + values[i] = internal::InnerMostDimReducer::reduce(*this, firstIndex + i * num_values_to_reduce, + num_values_to_reduce, reducer); + } + } else if (PreservingInnerMostDims) { + const Index firstIndex = firstInput(index); + const int innermost_dim = (static_cast(Layout) == static_cast(ColMajor)) ? 0 : NumOutputDims - 1; + // TBD: extend this the the n innermost dimensions that we preserve. + if (((firstIndex % m_dimensions[innermost_dim]) + PacketSize - 1) < m_dimensions[innermost_dim]) { + Op reducer(m_reducer); + typename Self::PacketReturnType accum = reducer.template initializePacket(); + internal::InnerMostDimPreserver::reduce(*this, firstIndex, reducer, &accum); + return reducer.finalizePacket(accum); + } else { + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index + i); + } + } + } else { + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index + i); + } + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + // Must be called after evalSubExprsIfNeeded(). + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + if (RunningFullReduction && m_result) { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); + } else { + const Index num_values_to_reduce = internal::array_prod(m_reducedDims); + const double compute_cost = num_values_to_reduce * internal::functor_traits::Cost; + return m_impl.costPerCoeff(vectorized) * num_values_to_reduce + + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return m_result; } + EIGEN_DEVICE_FUNC const TensorEvaluator& impl() const { return m_impl; } + EIGEN_DEVICE_FUNC const Device& device() const { return m_device; } +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + m_result.bind(cgh); + } +#endif + + private: + template friend struct internal::GenericDimReducer; + template friend struct internal::InnerMostDimReducer; + template friend struct internal::InnerMostDimPreserver; + template friend struct internal::FullReducer; +#ifdef EIGEN_USE_THREADS + template friend struct internal::FullReducerShard; +#endif +#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC)) + template KERNEL_FRIEND void internal::FullReductionKernel(R, const S, I_, typename S::CoeffReturnType*, unsigned int*); +#if defined(EIGEN_HAS_GPU_FP16) + template KERNEL_FRIEND void internal::ReductionInitFullReduxKernelHalfFloat(R, const S, I_, internal::packet_traits::type*); + template KERNEL_FRIEND void internal::FullReductionKernelHalfFloat(R, const S, I_, half*, internal::packet_traits::type*); + template KERNEL_FRIEND void internal::InnerReductionKernelHalfFloat(R, const S, I_, I_, half*); +#endif + template KERNEL_FRIEND void internal::InnerReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*); + + template KERNEL_FRIEND void internal::OuterReductionKernel(R, const S, I_, I_, typename S::CoeffReturnType*); +#endif + +#if defined(EIGEN_USE_SYCL) + template < typename Evaluator_, typename Op__> friend class TensorSycl::internal::GenericNondeterministicReducer; + // SYCL need the Generic reducer for the case the recution algorithm is neither inner, outer, and full reducer + template friend struct internal::GenericReducer; +#endif + + + template friend struct internal::InnerReducer; + + struct BlockIteratorState { + Index input_dim; + Index output_size; + Index output_count; + }; + + // Returns the Index in the input tensor of the first value that needs to be + // used to compute the reduction at output index "index". + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const { + if (ReducingInnerMostDims) { + if (static_cast(Layout) == static_cast(ColMajor)) { + return index * m_preservedStrides[0]; + } else { + return index * m_preservedStrides[NumPreservedStrides - 1]; + } + } + // TBD: optimize the case where we preserve the innermost dimensions. + Index startInput = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumOutputDims - 1; i > 0; --i) { + // This is index_i in the output tensor. + const Index idx = index / m_outputStrides[i]; + startInput += idx * m_preservedStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (PreservingInnerMostDims) { + eigen_assert(m_preservedStrides[0] == 1); + startInput += index; + } else { + startInput += index * m_preservedStrides[0]; + } + } else { + for (int i = 0; i < NumOutputDims - 1; ++i) { + // This is index_i in the output tensor. + const Index idx = index / m_outputStrides[i]; + startInput += idx * m_preservedStrides[i]; + index -= idx * m_outputStrides[i]; + } + if (PreservingInnerMostDims) { + eigen_assert(m_preservedStrides[NumPreservedStrides - 1] == 1); + startInput += index; + } else { + startInput += index * m_preservedStrides[NumPreservedStrides - 1]; + } + } + return startInput; + } + + // Bitmap indicating if an input dimension is reduced or not. + array m_reduced; + // Dimensions of the output of the operation. + Dimensions m_dimensions; + // Precomputed strides for the output tensor. + array m_outputStrides; + array, NumOutputDims> m_fastOutputStrides; + array m_preservedStrides; + // Map from output to input dimension index. + array m_output_to_input_dim_map; + // How many values go into each reduction + Index m_numValuesToReduce; + + // Subset of strides of the input tensor for the reduced dimensions. + // Indexed by reduced dimensions. + array m_reducedStrides; + // Size of the input dimensions that are reduced. + // Indexed by reduced dimensions. + array m_reducedDims; + + // Evaluator for the input expression. + TensorEvaluator m_impl; + + // Operation to apply for computing the reduction. + Op m_reducer; + + // For full reductions +#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC)) + static const bool RunningOnGPU = internal::is_same::value; + static const bool RunningOnSycl = false; +#elif defined(EIGEN_USE_SYCL) +static const bool RunningOnSycl = internal::is_same::type, Eigen::SyclDevice>::value; +static const bool RunningOnGPU = false; +#else + static const bool RunningOnGPU = false; + static const bool RunningOnSycl = false; +#endif + EvaluatorPointerType m_result; + + const Device EIGEN_DEVICE_REF m_device; +}; + +template class MakePointer_, typename Device> +struct TensorEvaluator, Device> +: public TensorReductionEvaluatorBase, Device> { + typedef TensorReductionEvaluatorBase, Device> Base; + EIGEN_STRONG_INLINE TensorEvaluator(const typename Base::XprType& op, const Device& device) : Base(op, device){} +}; + + +template class MakePointer_> +struct TensorEvaluator, Eigen::SyclDevice> +: public TensorReductionEvaluatorBase, Eigen::SyclDevice> { + + typedef TensorReductionEvaluatorBase, Eigen::SyclDevice> Base; + EIGEN_STRONG_INLINE TensorEvaluator(const typename Base::XprType& op, const Eigen::SyclDevice& device) : Base(op, device){} + // The coeff function in the base the recursive method which is not an standard layout and cannot be used in the SYCL kernel + //Therefore the coeff function should be overridden by for SYCL kernel + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Base::CoeffReturnType coeff(typename Base::Index index) const { + return *(this->data() + index); + } + // The packet function in the base the recursive method which is not an standard layout and cannot be used in the SYCL kernel + //Therefore the packet function should be overridden by for SYCL kernel + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename Base::PacketReturnType packet(typename Base::Index index) const { + return internal::pload(this->data() + index); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h new file mode 100644 index 0000000..68780cd --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h @@ -0,0 +1,6 @@ + +#if defined(__clang__) || defined(__GNUC__) +#warning "Deprecated header file, please either include the main Eigen/CXX11/Tensor header or the respective TensorReductionGpu.h file" +#endif + +#include "TensorReductionGpu.h" diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h new file mode 100644 index 0000000..db4e8d8 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h @@ -0,0 +1,966 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_GPU_H +#define EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_GPU_H + +namespace Eigen { +namespace internal { + + +#if defined(EIGEN_USE_GPU) && defined(EIGEN_GPUCC) +// Full reducers for GPU, don't vectorize for now + +// Reducer function that enables multiple gpu thread to safely accumulate at the same +// output address. It basically reads the current value of the output variable, and +// attempts to update it with the new value. If in the meantime another gpu thread +// updated the content of the output address it will try again. +template +__device__ EIGEN_ALWAYS_INLINE void atomicReduce(T* output, T accum, R& reducer) { +#if (defined(EIGEN_HIP_DEVICE_COMPILE) && defined(__HIP_ARCH_HAS_WARP_SHUFFLE__)) || (EIGEN_CUDA_ARCH >= 300) + if (sizeof(T) == 4) + { + unsigned int oldval = *reinterpret_cast(output); + unsigned int newval = oldval; + reducer.reduce(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + unsigned int readback; + while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) { + oldval = readback; + newval = oldval; + reducer.reduce(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + } + } + else if (sizeof(T) == 8) { + unsigned long long oldval = *reinterpret_cast(output); + unsigned long long newval = oldval; + reducer.reduce(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + unsigned long long readback; + while ((readback = atomicCAS((unsigned long long*)output, oldval, newval)) != oldval) { + oldval = readback; + newval = oldval; + reducer.reduce(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + } + } + else { + gpu_assert(0 && "Wordsize not supported"); + } +#else // EIGEN_CUDA_ARCH >= 300 + gpu_assert(0 && "Shouldn't be called on unsupported device"); +#endif // EIGEN_CUDA_ARCH >= 300 +} + +// We extend atomicExch to support extra data types +template +__device__ inline Type atomicExchCustom(Type* address, Type val) { + return atomicExch(address, val); +} + +template <> +__device__ inline double atomicExchCustom(double* address, double val) { + unsigned long long int* address_as_ull = reinterpret_cast(address); + return __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(val))); +} + +#ifdef EIGEN_HAS_GPU_FP16 +template +__device__ inline void atomicReduce(half2* output, half2 accum, R& reducer) { + unsigned int oldval = *reinterpret_cast(output); + unsigned int newval = oldval; + reducer.reducePacket(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + unsigned int readback; + while ((readback = atomicCAS((unsigned int*)output, oldval, newval)) != oldval) { + oldval = readback; + newval = oldval; + reducer.reducePacket(accum, reinterpret_cast(&newval)); + if (newval == oldval) { + return; + } + } +} +// reduction should be associative since reduction is not atomic in wide vector but atomic in half2 operations +template +__device__ inline void atomicReduce(Packet4h2* output, Packet4h2 accum, R& reducer) { + half2* houtput=reinterpret_cast(output); + half2* haccum=reinterpret_cast(&accum); + for(int i=0;i<4;++i){ + atomicReduce(houtput+i,*(haccum+i),reducer); + } +} +#endif // EIGEN_HAS_GPU_FP16 + +template <> +__device__ inline void atomicReduce(float* output, float accum, SumReducer&) { +#if (defined(EIGEN_HIP_DEVICE_COMPILE) && defined(__HIP_ARCH_HAS_WARP_SHUFFLE__)) || (EIGEN_CUDA_ARCH >= 300) + atomicAdd(output, accum); +#else // EIGEN_CUDA_ARCH >= 300 + gpu_assert(0 && "Shouldn't be called on unsupported device"); +#endif // EIGEN_CUDA_ARCH >= 300 +} + + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void ReductionInitKernel(const CoeffType val, Index num_preserved_coeffs, CoeffType* output) { + const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; + const Index num_threads = blockDim.x * gridDim.x; + for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) { + output[i] = val; + } +} + + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void FullReductionKernel(Reducer reducer, const Self input, Index num_coeffs, + typename Self::CoeffReturnType* output, unsigned int* semaphore) { +#if (defined(EIGEN_HIP_DEVICE_COMPILE) && defined(__HIP_ARCH_HAS_WARP_SHUFFLE__)) || (EIGEN_CUDA_ARCH >= 300) + // Initialize the output value + const Index first_index = blockIdx.x * BlockSize * NumPerThread + threadIdx.x; + if (gridDim.x == 1) { + if (first_index == 0) { + *output = reducer.initialize(); + } + } + else { + if (threadIdx.x == 0) { + unsigned int block = atomicCAS(semaphore, 0u, 1u); + if (block == 0) { + // We're the first block to run, initialize the output value + atomicExchCustom(output, reducer.initialize()); + __threadfence(); + atomicExch(semaphore, 2u); + } + else { + // Wait for the first block to initialize the output value. + // Use atomicCAS here to ensure that the reads aren't cached + unsigned int val; + do { + val = atomicCAS(semaphore, 2u, 2u); + } + while (val < 2u); + } + } + } + + __syncthreads(); + + eigen_assert(gridDim.x == 1 || *semaphore >= 2u); + + typename Self::CoeffReturnType accum = reducer.initialize(); + Index max_iter = numext::mini(num_coeffs - first_index, NumPerThread*BlockSize); + for (Index i = 0; i < max_iter; i+=BlockSize) { + const Index index = first_index + i; + eigen_assert(index < num_coeffs); + typename Self::CoeffReturnType val = input.m_impl.coeff(index); + reducer.reduce(val, &accum); + } + +#pragma unroll + for (int offset = warpSize/2; offset > 0; offset /= 2) { + #if defined(EIGEN_HIPCC) + // use std::is_floating_point to determine the type of reduced_val + // This is needed because when Type == double, hipcc will give a "call to __shfl_down is ambguous" error + // and list the float and int versions of __shfl_down as the candidate functions. + if (std::is_floating_point::value) { + reducer.reduce(__shfl_down(static_cast(accum), offset, warpSize), &accum); + } else { + reducer.reduce(__shfl_down(static_cast(accum), offset, warpSize), &accum); + } + #elif defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000 + reducer.reduce(__shfl_down(accum, offset, warpSize), &accum); + #else + reducer.reduce(__shfl_down_sync(0xFFFFFFFF, accum, offset, warpSize), &accum); + #endif + } + + if ((threadIdx.x & (warpSize - 1)) == 0) { + atomicReduce(output, accum, reducer); + } + + if (gridDim.x > 1 && threadIdx.x == 0) { + // Let the last block reset the semaphore + atomicInc(semaphore, gridDim.x + 1); +#if defined(EIGEN_HIPCC) + __threadfence_system(); +#endif + } +#else // EIGEN_CUDA_ARCH >= 300 + gpu_assert(0 && "Shouldn't be called on unsupported device"); +#endif // EIGEN_CUDA_ARCH >= 300 +} + + +#ifdef EIGEN_HAS_GPU_FP16 +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void ReductionInitFullReduxKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, + packet_traits::type* scratch) { + eigen_assert(blockDim.x == 1); + eigen_assert(gridDim.x == 1); + typedef packet_traits::type packet_type; + Index packet_remainder = + num_coeffs % Index(unpacket_traits::size); + if (packet_remainder != 0) { + half2* h2scratch = reinterpret_cast(scratch); + for (Index i = num_coeffs - packet_remainder; i + 2 <= num_coeffs; i += 2) { + *h2scratch = + __halves2half2(input.m_impl.coeff(i), input.m_impl.coeff(i + 1)); + h2scratch++; + } + if ((num_coeffs & 1) != 0) { + half lastCoeff = input.m_impl.coeff(num_coeffs - 1); + *h2scratch = __halves2half2(lastCoeff, reducer.initialize()); + } + } else { + *scratch = reducer.template initializePacket(); + } +} + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void ReductionInitKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, half* output) { + const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; + const Index num_threads = blockDim.x * gridDim.x; + typedef typename packet_traits::type PacketType; + + const Index num_packets = + num_coeffs / Index(unpacket_traits::size); + PacketType* p_output = reinterpret_cast(output); + for (Index i = thread_id; i < num_packets; i += num_threads) { + p_output[i] = reducer.template initializePacket(); + } + Index packet_remainder = + num_coeffs % Index(unpacket_traits::size); + if (thread_id < packet_remainder) { + output[num_coeffs - packet_remainder + thread_id] = reducer.initialize(); + } +} + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void FullReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs, + half* output, packet_traits::type* scratch) { + typedef typename packet_traits::type PacketType; + const int packet_width = unpacket_traits::size; + eigen_assert(NumPerThread % packet_width == 0); + const Index first_index = + blockIdx.x * BlockSize * NumPerThread + packet_width * threadIdx.x; + + // Initialize the output value if it wasn't initialized by the ReductionInitKernel + + if (gridDim.x == 1) { + if (first_index == 0) { + int rem = num_coeffs % packet_width; + if (rem != 0) { + half2* p_scratch = reinterpret_cast(scratch); + *scratch = reducer.template initializePacket(); + for (int i = 0; i < rem / 2; i++) { + *p_scratch = __halves2half2( + input.m_impl.coeff(num_coeffs - packet_width + 2 * i), + input.m_impl.coeff(num_coeffs - packet_width + 2 * i + 1)); + p_scratch++; + } + if ((num_coeffs & 1) != 0) { + half last = input.m_impl.coeff(num_coeffs - 1); + *p_scratch = __halves2half2(last, reducer.initialize()); + } + } else { + *scratch = reducer.template initializePacket(); + } + } + __syncthreads(); + } + + PacketType accum = reducer.template initializePacket(); + const Index max_iter = + numext::mini((num_coeffs - first_index) / packet_width, + NumPerThread * BlockSize / packet_width); + for (Index i = 0; i < max_iter; i += BlockSize) { + const Index index = first_index + packet_width * i; + eigen_assert(index + packet_width < num_coeffs); + PacketType val = input.m_impl.template packet(index); + reducer.reducePacket(val, &accum); + } + +#pragma unroll + for (int offset = warpSize/2; offset > 0; offset /= 2) { + #if defined(EIGEN_HIPCC) + PacketType r1; + half2* hr = reinterpret_cast(&r1); + half2* hacc = reinterpret_cast(&accum); + for (int i = 0; i < packet_width / 2; i++) { + // FIXME : remove this workaround once we have native half/half2 support for __shfl_down + union { int i; half2 h; } wka_in, wka_out; + wka_in.h = hacc[i]; + wka_out.i = __shfl_down(wka_in.i, offset, warpSize); + hr[i] = wka_out.h; + } + reducer.reducePacket(r1, &accum); + #elif defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000 + PacketType r1; + half2* hr = reinterpret_cast(&r1); + half2* hacc = reinterpret_cast(&accum); + for (int i = 0; i < packet_width / 2; i++) { + hr[i] = __shfl_down(hacc[i], offset, warpSize); + } + reducer.reducePacket(r1, &accum); + #else + PacketType r1; + half2* hr = reinterpret_cast(&r1); + half2* hacc = reinterpret_cast(&accum); + for (int i = 0; i < packet_width / 2; i++) { + hr[i] = __shfl_down_sync(0xFFFFFFFF, hacc[i], (unsigned)offset, warpSize); + } + reducer.reducePacket(r1, &accum); + + #endif + } + + if ((threadIdx.x & (warpSize - 1)) == 0) { + atomicReduce(scratch, accum, reducer); + } + + __syncthreads(); + half2* rv1 = reinterpret_cast(scratch); + if (packet_width > 2) { + reducer.reducePacket(rv1[2], rv1); + reducer.reducePacket(rv1[3], rv1 + 1); + reducer.reducePacket(rv1[1], rv1); + } + if (gridDim.x == 1) { + if (first_index == 0) { + half tmp = __low2half(*rv1); + reducer.reduce(__high2half(*rv1), &tmp); + *output = tmp; + } + } +} + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void ReductionCleanupKernelHalfFloat(Op reducer, half* output, packet_traits::type* scratch) { + eigen_assert(threadIdx.x == 1); + half2* pscratch = reinterpret_cast(scratch); + half tmp = __float2half(0.f); + typedef packet_traits::type packet_type; + for (int i = 0; i < unpacket_traits::size; i += 2) { + reducer.reduce(__low2half(*pscratch), &tmp); + reducer.reduce(__high2half(*pscratch), &tmp); + pscratch++; + } + *output = tmp; +} + +#endif // EIGEN_HAS_GPU_FP16 + +template +struct FullReductionLauncher { + static void run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index) { + gpu_assert(false && "Should only be called on doubles, floats and half floats"); + } +}; + +// Specialization for float and double +template +struct FullReductionLauncher< + Self, Op, OutputType, PacketAccess, + typename internal::enable_if< + internal::is_same::value || + internal::is_same::value, + void>::type> { + static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs) { + + typedef typename Self::Index Index; + const int block_size = 256; + const int num_per_thread = 128; + const int num_blocks = divup(num_coeffs, block_size * num_per_thread); + + unsigned int* semaphore = NULL; + if (num_blocks > 1) { + semaphore = device.semaphore(); + } + + LAUNCH_GPU_KERNEL((FullReductionKernel), + num_blocks, block_size, 0, device, reducer, self, num_coeffs, output, semaphore); + } +}; + +#ifdef EIGEN_HAS_GPU_FP16 +template +struct FullReductionLauncher { + static void run(const Self&, Op&, const GpuDevice&, half*, typename Self::Index) { + gpu_assert(false && "Should not be called since there is no packet accessor"); + } +}; + +template +struct FullReductionLauncher { + static void run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs) { + typedef typename Self::Index Index; + typedef typename packet_traits::type PacketType; + + const int block_size = 256; + const int num_per_thread = 128; + const int num_blocks = divup(num_coeffs, block_size * num_per_thread); + PacketType* scratch = static_cast(device.scratchpad()); + // half2* scratch = static_cast(device.scratchpad()); + + if (num_blocks > 1) { + // We initialize the output and the scrathpad outside the reduction kernel when we can't be sure that there + // won't be a race conditions between multiple thread blocks. + LAUNCH_GPU_KERNEL((ReductionInitFullReduxKernelHalfFloat), + 1, 1, 0, device, reducer, self, num_coeffs, scratch); + } + + LAUNCH_GPU_KERNEL((FullReductionKernelHalfFloat), + num_blocks, block_size, 0, device, reducer, self, num_coeffs, output, scratch); + + if (num_blocks > 1) { + LAUNCH_GPU_KERNEL((ReductionCleanupKernelHalfFloat), + 1, 1, 0, device, reducer, output, scratch); + } + } +}; +#endif // EIGEN_HAS_GPU_FP16 + + +template +struct FullReducer { + // Unfortunately nvidia doesn't support well exotic types such as complex, + // so reduce the scope of the optimized version of the code to the simple cases + // of doubles, floats and half floats +#ifdef EIGEN_HAS_GPU_FP16 + static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful && + (internal::is_same::value || + internal::is_same::value || + (internal::is_same::value && reducer_traits::PacketAccess)); +#else // EIGEN_HAS_GPU_FP16 + static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful && + (internal::is_same::value || + internal::is_same::value); +#endif // EIGEN_HAS_GPU_FP16 + + template + static void run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output) { + gpu_assert(HasOptimizedImplementation && "Should only be called on doubles, floats or half floats"); + const Index num_coeffs = array_prod(self.m_impl.dimensions()); + // Don't crash when we're called with an input tensor of size 0. + if (num_coeffs == 0) { + return; + } + + FullReductionLauncher::PacketAccess>::run(self, reducer, device, output, num_coeffs); + } +}; + + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void InnerReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs, + typename Self::CoeffReturnType* output) { +#if (defined(EIGEN_HIP_DEVICE_COMPILE) && defined(__HIP_ARCH_HAS_WARP_SHUFFLE__)) || (EIGEN_CUDA_ARCH >= 300) + typedef typename Self::CoeffReturnType Type; + eigen_assert(blockDim.y == 1); + eigen_assert(blockDim.z == 1); + eigen_assert(gridDim.y == 1); + eigen_assert(gridDim.z == 1); + + const int unroll_times = 16; + eigen_assert(NumPerThread % unroll_times == 0); + + const Index input_col_blocks = divup(num_coeffs_to_reduce, blockDim.x * NumPerThread); + const Index num_input_blocks = input_col_blocks * num_preserved_coeffs; + + const Index num_threads = blockDim.x * gridDim.x; + const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + // Initialize the output values if they weren't initialized by the ReductionInitKernel + if (gridDim.x == 1) { + for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) { + output[i] = reducer.initialize(); + } + __syncthreads(); + } + + for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) { + const Index row = i / input_col_blocks; + + if (row < num_preserved_coeffs) { + const Index col_block = i % input_col_blocks; + const Index col_begin = col_block * blockDim.x * NumPerThread + threadIdx.x; + + Type reduced_val = reducer.initialize(); + + for (Index j = 0; j < NumPerThread; j += unroll_times) { + const Index last_col = col_begin + blockDim.x * (j + unroll_times - 1); + if (last_col >= num_coeffs_to_reduce) { + for (Index col = col_begin + blockDim.x * j; col < num_coeffs_to_reduce; col += blockDim.x) { + const Type val = input.m_impl.coeff(row * num_coeffs_to_reduce + col); + reducer.reduce(val, &reduced_val); + } + break; + } else { + // Faster version of the loop with no branches after unrolling. +#pragma unroll + for (int k = 0; k < unroll_times; ++k) { + const Index col = col_begin + blockDim.x * (j + k); + reducer.reduce(input.m_impl.coeff(row * num_coeffs_to_reduce + col), &reduced_val); + } + } + } + +#pragma unroll + for (int offset = warpSize/2; offset > 0; offset /= 2) { + #if defined(EIGEN_HIPCC) + // use std::is_floating_point to determine the type of reduced_val + // This is needed because when Type == double, hipcc will give a "call to __shfl_down is ambguous" error + // and list the float and int versions of __shfl_down as the candidate functions. + if (std::is_floating_point::value) { + reducer.reduce(__shfl_down(static_cast(reduced_val), offset), &reduced_val); + } else { + reducer.reduce(__shfl_down(static_cast(reduced_val), offset), &reduced_val); + } + #elif defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000 + reducer.reduce(__shfl_down(reduced_val, offset), &reduced_val); + #else + reducer.reduce(__shfl_down_sync(0xFFFFFFFF, reduced_val, offset), &reduced_val); + #endif + } + + if ((threadIdx.x & (warpSize - 1)) == 0) { + atomicReduce(&(output[row]), reduced_val, reducer); + } + } + } +#else // EIGEN_CUDA_ARCH >= 300 + gpu_assert(0 && "Shouldn't be called on unsupported device"); +#endif // EIGEN_CUDA_ARCH >= 300 +} + +#ifdef EIGEN_HAS_GPU_FP16 + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void InnerReductionKernelHalfFloat(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs, + half* output) { + eigen_assert(blockDim.y == 1); + eigen_assert(blockDim.z == 1); + eigen_assert(gridDim.y == 1); + eigen_assert(gridDim.z == 1); + + typedef typename packet_traits::type PacketType; + const int packet_width = unpacket_traits::size; + const int unroll_times = 16 / packet_width; + eigen_assert(NumPerThread % unroll_times == 0); + eigen_assert(unroll_times % 2 == 0); + + const Index input_col_blocks = divup(num_coeffs_to_reduce, blockDim.x * NumPerThread * 2); + const Index num_input_blocks = divup(input_col_blocks * num_preserved_coeffs, 2); + + const Index num_threads = blockDim.x * gridDim.x; + const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; + + // Initialize the output values if they weren't initialized by the ReductionInitKernel + if (gridDim.x == 1) { + Index i = packet_width * thread_id; + for (; i + packet_width <= num_preserved_coeffs; + i += packet_width * num_threads) { + PacketType* poutput = reinterpret_cast(output + i); + *poutput = reducer.template initializePacket(); + } + if (i < num_preserved_coeffs) { + output[i] = reducer.initialize(); + } + __syncthreads(); + } + + for (Index i = blockIdx.x; i < num_input_blocks; i += gridDim.x) { + const Index row = 2 * (i / input_col_blocks); // everybody takes 2 rows + + if (row + 1 < num_preserved_coeffs) { + const Index col_block = i % input_col_blocks; + const Index col_begin = + packet_width * (col_block * blockDim.x * NumPerThread + threadIdx.x); + + PacketType reduced_val1 = reducer.template initializePacket(); + PacketType reduced_val2 = reducer.template initializePacket(); + + for (Index j = 0; j < NumPerThread; j += unroll_times) { + const Index last_col = + col_begin + blockDim.x * (j + unroll_times - 1) * packet_width; + if (last_col >= num_coeffs_to_reduce) { + Index col = col_begin + blockDim.x * j; + for (; col + packet_width <= num_coeffs_to_reduce; + col += blockDim.x) { + const PacketType val1 = input.m_impl.template packet( + row * num_coeffs_to_reduce + col); + reducer.reducePacket(val1, &reduced_val1); + const PacketType val2 = input.m_impl.template packet( + (row + 1) * num_coeffs_to_reduce + col); + reducer.reducePacket(val2, &reduced_val2); + } + if (col < num_coeffs_to_reduce) { + PacketType r1 = reducer.template initializePacket(); + PacketType r2 = reducer.template initializePacket(); + half2* hr1 = reinterpret_cast(&r1); + half2* hr2 = reinterpret_cast(&r2); + while (col + 1 < num_coeffs_to_reduce) { + *hr1 = __halves2half2( + input.m_impl.coeff(row * num_coeffs_to_reduce + col), + input.m_impl.coeff(row * num_coeffs_to_reduce + col + 1)); + *hr2 = __halves2half2( + input.m_impl.coeff((row + 1) * num_coeffs_to_reduce + col), + input.m_impl.coeff((row + 1) * num_coeffs_to_reduce + col + + 1)); + hr1++; + hr2++; + col += 2; + } + if (col < num_coeffs_to_reduce) { + // Peel; + const half last1 = + input.m_impl.coeff(row * num_coeffs_to_reduce + col); + *hr1 = __halves2half2(last1, reducer.initialize()); + const half last2 = + input.m_impl.coeff((row + 1) * num_coeffs_to_reduce + col); + *hr2 = __halves2half2(last2, reducer.initialize()); + } + reducer.reducePacket(r1, &reduced_val1); + reducer.reducePacket(r2, &reduced_val2); + } + break; + } else { + // Faster version of the loop with no branches after unrolling. +#pragma unroll + for (int k = 0; k < unroll_times; ++k) { + const Index col = col_begin + blockDim.x * (j + k) * packet_width; + reducer.reducePacket(input.m_impl.template packet( + row * num_coeffs_to_reduce + col), + &reduced_val1); + reducer.reducePacket(input.m_impl.template packet( + (row + 1) * num_coeffs_to_reduce + col), + &reduced_val2); + } + } + } + +#pragma unroll + for (int offset = warpSize/2; offset > 0; offset /= 2) { + #if defined(EIGEN_HIPCC) + PacketType r1; + PacketType r2; + half2* hr1 = reinterpret_cast(&r1); + half2* hr2 = reinterpret_cast(&r2); + half2* rv1 = reinterpret_cast(&reduced_val1); + half2* rv2 = reinterpret_cast(&reduced_val2); + for (int i = 0; i < packet_width / 2; i++) { + // FIXME : remove this workaround once we have native half/half2 support for __shfl_down + union { int i; half2 h; } wka_in1, wka_out1; + wka_in1.h = rv1[i]; + wka_out1.i = __shfl_down(wka_in1.i, offset, warpSize); + hr1[i] = wka_out1.h; + + union { int i; half2 h; } wka_in2, wka_out2; + wka_in2.h = rv2[i]; + wka_out2.i = __shfl_down(wka_in2.i, offset, warpSize); + hr2[i] = wka_out2.h; + } + reducer.reducePacket(r1, &reduced_val1); + reducer.reducePacket(r2, &reduced_val2); + #elif defined(EIGEN_CUDA_SDK_VER) && EIGEN_CUDA_SDK_VER < 90000 + PacketType r1; + PacketType r2; + half2* hr1 = reinterpret_cast(&r1); + half2* hr2 = reinterpret_cast(&r2); + half2* rv1 = reinterpret_cast(&reduced_val1); + half2* rv2 = reinterpret_cast(&reduced_val2); + for (int i = 0; i < packet_width / 2; i++) { + hr1[i] = __shfl_down(rv1[i], offset, warpSize); + hr2[i] = __shfl_down(rv2[i], offset, warpSize); + } + reducer.reducePacket(r1, &reduced_val1); + reducer.reducePacket(r2, &reduced_val2); + #else + PacketType r1; + PacketType r2; + half2* hr1 = reinterpret_cast(&r1); + half2* hr2 = reinterpret_cast(&r2); + half2* rr1 = reinterpret_cast(&reduced_val1); + half2* rr2 = reinterpret_cast(&reduced_val2); + for (int i = 0; i < packet_width / 2; i++) { + hr1[i] = + __shfl_down_sync(0xFFFFFFFF, rr1[i], (unsigned)offset, warpSize); + hr2[i] = + __shfl_down_sync(0xFFFFFFFF, rr2[i], (unsigned)offset, warpSize); + } + reducer.reducePacket(r1, &reduced_val1); + reducer.reducePacket(r2, &reduced_val2); + + #endif + } + half2* rv1 = reinterpret_cast(&reduced_val1); + half2* rv2 = reinterpret_cast(&reduced_val2); + half2 val; + if (packet_width > 2) { + reducer.reducePacket(rv1[2], rv1); + reducer.reducePacket(rv1[3], rv1 + 1); + reducer.reducePacket(rv1[1], rv1); + reducer.reducePacket(rv2[2], rv2); + reducer.reducePacket(rv2[3], rv2 + 1); + reducer.reducePacket(rv2[1], rv2); + } + half val1 = __low2half(*rv1); + reducer.reduce(__high2half(*rv1), &val1); + half val2 = __low2half(*rv2); + reducer.reduce(__high2half(*rv2), &val2); + val = __halves2half2(val1, val2); + if ((threadIdx.x & (warpSize - 1)) == 0) { + half* loc = output + row; + atomicReduce((half2*)loc, val, reducer); + } + } + } +} + +#endif // EIGEN_HAS_GPU_FP16 + +template +struct InnerReductionLauncher { + static EIGEN_DEVICE_FUNC bool run(const Self&, Op&, const GpuDevice&, OutputType*, typename Self::Index, typename Self::Index) { + gpu_assert(false && "Should only be called to reduce doubles, floats and half floats on a gpu device"); + return true; + } +}; + +// Specialization for float and double +template +struct InnerReductionLauncher< + Self, Op, OutputType, PacketAccess, + typename internal::enable_if< + internal::is_same::value || + internal::is_same::value, + void>::type> { + static bool run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) { + typedef typename Self::Index Index; + + const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals; + const int block_size = 256; + const int num_per_thread = 128; + const int dyn_blocks = divup(num_coeffs, block_size * num_per_thread); + const int max_blocks = device.getNumGpuMultiProcessors() * + device.maxGpuThreadsPerMultiProcessor() / block_size; + const int num_blocks = numext::mini(max_blocks, dyn_blocks); + + if (num_blocks > 1) { + // We initialize the outputs outside the reduction kernel when we can't be sure that there + // won't be a race conditions between multiple thread blocks. + const int dyn_blocks = divup(num_preserved_vals, 1024); + const int max_blocks = device.getNumGpuMultiProcessors() * + device.maxGpuThreadsPerMultiProcessor() / 1024; + const int num_blocks = numext::mini(max_blocks, dyn_blocks); + LAUNCH_GPU_KERNEL((ReductionInitKernel), + num_blocks, 1024, 0, device, reducer.initialize(), + num_preserved_vals, output); + } + + LAUNCH_GPU_KERNEL((InnerReductionKernel), + num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output); + + return false; + } +}; + +#ifdef EIGEN_HAS_GPU_FP16 +template +struct InnerReductionLauncher { + static bool run(const Self&, Op&, const GpuDevice&, half*, typename Self::Index, typename Self::Index) { + gpu_assert(false && "Should not be called since there is no packet accessor"); + return true; + } +}; + +template +struct InnerReductionLauncher { + static bool run(const Self& self, Op& reducer, const GpuDevice& device, half* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) { + typedef typename Self::Index Index; + + if (num_preserved_vals % 2 != 0) { + // Not supported yet, revert to the slower code path + return true; + } + + const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals; + const int block_size = /*256*/128; + const int num_per_thread = /*128*/64; + const int dyn_blocks = divup(num_coeffs, block_size * num_per_thread); + const int max_blocks = device.getNumGpuMultiProcessors() * + device.maxGpuThreadsPerMultiProcessor() / block_size; + const int num_blocks = numext::mini(max_blocks, dyn_blocks); + + if (num_blocks > 1) { + // We initialize the outputs outside the reduction kernel when we can't be sure that there + // won't be a race conditions between multiple thread blocks. + LAUNCH_GPU_KERNEL((ReductionInitKernelHalfFloat), + 1, 1, 0, device, reducer, self, num_preserved_vals, output); + } + + LAUNCH_GPU_KERNEL((InnerReductionKernelHalfFloat), + num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output); + + return false; + } +}; +#endif // EIGEN_HAS_GPU_FP16 + + +template +struct InnerReducer { + // Unfortunately nvidia doesn't support well exotic types such as complex, + // so reduce the scope of the optimized version of the code to the simple case + // of floats and half floats. +#ifdef EIGEN_HAS_GPU_FP16 + static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful && + (internal::is_same::value || + internal::is_same::value || + (internal::is_same::value && reducer_traits::PacketAccess)); +#else // EIGEN_HAS_GPU_FP16 + static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful && + (internal::is_same::value || + internal::is_same::value); +#endif // EIGEN_HAS_GPU_FP16 + + template + static bool run(const Self& self, Op& reducer, const GpuDevice& device, OutputType* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) { + gpu_assert(HasOptimizedImplementation && "Should only be called on doubles, floats or half floats"); + const Index num_coeffs = array_prod(self.m_impl.dimensions()); + // Don't crash when we're called with an input tensor of size 0. + if (num_coeffs == 0) { + return true; + } + // It's faster to use the usual code. + if (num_coeffs_to_reduce <= 128) { + return true; + } + + return InnerReductionLauncher::PacketAccess>::run(self, reducer, device, output, num_coeffs_to_reduce, num_preserved_vals); + } +}; + +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void OuterReductionKernel(Reducer reducer, const Self input, Index num_coeffs_to_reduce, Index num_preserved_coeffs, + typename Self::CoeffReturnType* output) { + const Index num_threads = blockDim.x * gridDim.x; + const Index thread_id = blockIdx.x * blockDim.x + threadIdx.x; + // Initialize the output values if they weren't initialized by the ReductionInitKernel + if (gridDim.x == 1) { + for (Index i = thread_id; i < num_preserved_coeffs; i += num_threads) { + output[i] = reducer.initialize(); + } + __syncthreads(); + } + + // Do the reduction. + const Index max_iter = num_preserved_coeffs * divup(num_coeffs_to_reduce, NumPerThread); + for (Index i = thread_id; i < max_iter; i += num_threads) { + const Index input_col = i % num_preserved_coeffs; + const Index input_row = (i / num_preserved_coeffs) * NumPerThread; + typename Self::CoeffReturnType reduced_val = reducer.initialize(); + const Index max_row = numext::mini(input_row + NumPerThread, num_coeffs_to_reduce); + for (Index j = input_row; j < max_row; j++) { + typename Self::CoeffReturnType val = input.m_impl.coeff(j * num_preserved_coeffs + input_col); + reducer.reduce(val, &reduced_val); + } + atomicReduce(&(output[input_col]), reduced_val, reducer); + } +} + + +template +struct OuterReducer { + // Unfortunately nvidia doesn't support well exotic types such as complex, + // so reduce the scope of the optimized version of the code to the simple case + // of floats. + static const bool HasOptimizedImplementation = !Self::ReducerTraits::IsStateful && + (internal::is_same::value || + internal::is_same::value); + template + static + #if !defined(EIGEN_HIPCC) + // FIXME : leaving this EIGEN_DEVICE_FUNC in, results in the following runtime error + // (in the cxx11_tensor_reduction_gpu test) + // + // terminate called after throwing an instance of 'std::runtime_error' + // what(): No device code available for function: _ZN5Eigen8internal20OuterReductionKernelIL... + // + // don't know why this happens (and why is it a runtime error instead of a compile time error) + // + // this will be fixed by HIP PR#457 + EIGEN_DEVICE_FUNC + #endif + bool run(const Self&, Op&, const Device&, OutputType*, typename Self::Index, typename Self::Index) { + gpu_assert(false && "Should only be called to reduce doubles or floats on a gpu device"); + return true; + } + + static bool run(const Self& self, Op& reducer, const GpuDevice& device, float* output, typename Self::Index num_coeffs_to_reduce, typename Self::Index num_preserved_vals) { + typedef typename Self::Index Index; + + // It's faster to use the usual code. + if (num_coeffs_to_reduce <= 32) { + return true; + } + + const Index num_coeffs = num_coeffs_to_reduce * num_preserved_vals; + const int block_size = 256; + const int num_per_thread = 16; + const int dyn_blocks = divup(num_coeffs, block_size * num_per_thread); + const int max_blocks = device.getNumGpuMultiProcessors() * + device.maxGpuThreadsPerMultiProcessor() / block_size; + const int num_blocks = numext::mini(max_blocks, dyn_blocks); + + if (num_blocks > 1) { + // We initialize the outputs in the reduction kernel itself when we don't have to worry + // about race conditions between multiple thread blocks. + const int dyn_blocks = divup(num_preserved_vals, 1024); + const int max_blocks = device.getNumGpuMultiProcessors() * + device.maxGpuThreadsPerMultiProcessor() / 1024; + const int num_blocks = numext::mini(max_blocks, dyn_blocks); + LAUNCH_GPU_KERNEL((ReductionInitKernel), + num_blocks, 1024, 0, device, reducer.initialize(), + num_preserved_vals, output); + } + + LAUNCH_GPU_KERNEL((OuterReductionKernel), + num_blocks, block_size, 0, device, reducer, self, num_coeffs_to_reduce, num_preserved_vals, output); + + return false; + } +}; + +#endif // defined(EIGEN_USE_GPU) && defined(EIGEN_GPUCC) + + +} // end namespace internal +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_REDUCTION_GPU_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h new file mode 100644 index 0000000..474eba0 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorReductionSycl.h @@ -0,0 +1,582 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/***************************************************************** + * TensorReductionSycl.h + * + * \brief: + * This is the specialization of the reduction operation. Two phase reduction approach + * is used since the GPU does not have Global Synchronization for global memory among + * different work-group/thread block. To solve the problem, we need to create two kernels + * to reduce the data, where the first kernel reduce the data locally and each local + * workgroup/thread-block save the input data into global memory. In the second phase (global reduction) + * one work-group uses one work-group/thread-block to reduces the intermediate data into one single element. + * Here is an NVIDIA presentation explaining the optimized two phase reduction algorithm on GPU: + * https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf + * + *****************************************************************/ + +#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP +#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP +namespace Eigen { +namespace TensorSycl { +namespace internal { + +template +struct OpDefiner { + typedef typename Vectorise::PacketReturnType PacketReturnType; + typedef Op type; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE type get_op(Op &op) { return op; } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType finalise_op(const PacketReturnType &accumulator, + const Index &) { + return accumulator; + } +}; + +template +struct OpDefiner, CoeffReturnType, Index, false> { + typedef Eigen::internal::SumReducer type; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE type get_op(Eigen::internal::MeanReducer &) { + return type(); + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType finalise_op(const CoeffReturnType &accumulator, + const Index &scale) { + ::Eigen::internal::scalar_quotient_op quotient_op; + return quotient_op(accumulator, CoeffReturnType(scale)); + } +}; + +template +struct OpDefiner, CoeffReturnType, Index, true> { + typedef typename Vectorise::PacketReturnType PacketReturnType; + typedef Eigen::internal::SumReducer type; + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE type get_op(Eigen::internal::MeanReducer &) { + return type(); + } + + static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType finalise_op(const PacketReturnType &accumulator, + const Index &scale) { + return ::Eigen::internal::pdiv(accumulator, ::Eigen::internal::pset1(CoeffReturnType(scale))); + } +}; + +template +struct SecondStepFullReducer { + typedef cl::sycl::accessor + LocalAccessor; + typedef OpDefiner OpDef; + typedef typename OpDef::type Op; + LocalAccessor scratch; + InputAccessor aI; + OutputAccessor outAcc; + Op op; + SecondStepFullReducer(LocalAccessor scratch_, InputAccessor aI_, OutputAccessor outAcc_, OpType op_) + : scratch(scratch_), aI(aI_), outAcc(outAcc_), op(OpDef::get_op(op_)) {} + + void operator()(cl::sycl::nd_item<1> itemID) { + // Our empirical research shows that the best performance will be achieved + // when there is only one element per thread to reduce in the second step. + // in this step the second step reduction time is almost negligible. + // Hence, in the second step of reduction the input size is fixed to the + // local size, thus, there is only one element read per thread. The + // algorithm must be changed if the number of reduce per thread in the + // second step is greater than 1. Otherwise, the result will be wrong. + const Index localid = itemID.get_local_id(0); + auto aInPtr = aI.get_pointer() + localid; + auto aOutPtr = outAcc.get_pointer(); + CoeffReturnType *scratchptr = scratch.get_pointer(); + CoeffReturnType accumulator = *aInPtr; + + scratchptr[localid] = op.finalize(accumulator); + for (Index offset = itemID.get_local_range(0) / 2; offset > 0; offset /= 2) { + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (localid < offset) { + op.reduce(scratchptr[localid + offset], &accumulator); + scratchptr[localid] = op.finalize(accumulator); + } + } + if (localid == 0) *aOutPtr = op.finalize(accumulator); + } +}; + +// Full reduction first phase. In this version the vectorization is true and the reduction accept +// any generic reducerOp e.g( max, min, sum, mean, iamax, iamin, etc ). +template +class FullReductionKernelFunctor { + public: + typedef typename Evaluator::CoeffReturnType CoeffReturnType; + typedef typename Evaluator::Index Index; + typedef OpDefiner + OpDef; + + typedef typename OpDef::type Op; + typedef typename Evaluator::EvaluatorPointerType EvaluatorPointerType; + typedef typename Evaluator::PacketReturnType PacketReturnType; + typedef + typename ::Eigen::internal::conditional<(Evaluator::ReducerTraits::PacketAccess & Evaluator::InputPacketAccess), + PacketReturnType, CoeffReturnType>::type OutType; + typedef cl::sycl::accessor + LocalAccessor; + LocalAccessor scratch; + Evaluator evaluator; + EvaluatorPointerType final_output; + Index rng; + Op op; + + FullReductionKernelFunctor(LocalAccessor scratch_, Evaluator evaluator_, EvaluatorPointerType final_output_, + Index rng_, OpType op_) + : scratch(scratch_), evaluator(evaluator_), final_output(final_output_), rng(rng_), op(OpDef::get_op(op_)) {} + + void operator()(cl::sycl::nd_item<1> itemID) { compute_reduction(itemID); } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if::type compute_reduction( + const cl::sycl::nd_item<1> &itemID) { + auto output_ptr = final_output.get_pointer(); + Index VectorizedRange = (rng / Evaluator::PacketSize) * Evaluator::PacketSize; + Index globalid = itemID.get_global_id(0); + Index localid = itemID.get_local_id(0); + Index step = Evaluator::PacketSize * itemID.get_global_range(0); + Index start = Evaluator::PacketSize * globalid; + // vectorizable parts + PacketReturnType packetAccumulator = op.template initializePacket(); + for (Index i = start; i < VectorizedRange; i += step) { + op.template reducePacket(evaluator.impl().template packet(i), &packetAccumulator); + } + globalid += VectorizedRange; + // non vectorizable parts + for (Index i = globalid; i < rng; i += itemID.get_global_range(0)) { + op.template reducePacket( + ::Eigen::TensorSycl::internal::PacketWrapper::convert_to_packet_type( + evaluator.impl().coeff(i), op.initialize()), + &packetAccumulator); + } + scratch[localid] = packetAccumulator = + OpDef::finalise_op(op.template finalizePacket(packetAccumulator), rng); + // reduction parts // Local size is always power of 2 + EIGEN_UNROLL_LOOP + for (Index offset = local_range / 2; offset > 0; offset /= 2) { + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (localid < offset) { + op.template reducePacket(scratch[localid + offset], &packetAccumulator); + scratch[localid] = op.template finalizePacket(packetAccumulator); + } + } + if (localid == 0) { + output_ptr[itemID.get_group(0)] = + op.finalizeBoth(op.initialize(), op.template finalizePacket(packetAccumulator)); + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE typename ::Eigen::internal::enable_if::type compute_reduction( + const cl::sycl::nd_item<1> &itemID) { + auto output_ptr = final_output.get_pointer(); + Index globalid = itemID.get_global_id(0); + Index localid = itemID.get_local_id(0); + // vectorizable parts + CoeffReturnType accumulator = op.initialize(); + // non vectorizable parts + for (Index i = globalid; i < rng; i += itemID.get_global_range(0)) { + op.reduce(evaluator.impl().coeff(i), &accumulator); + } + scratch[localid] = accumulator = OpDef::finalise_op(op.finalize(accumulator), rng); + + // reduction parts. the local size is always power of 2 + EIGEN_UNROLL_LOOP + for (Index offset = local_range / 2; offset > 0; offset /= 2) { + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (localid < offset) { + op.reduce(scratch[localid + offset], &accumulator); + scratch[localid] = op.finalize(accumulator); + } + } + if (localid == 0) { + output_ptr[itemID.get_group(0)] = op.finalize(accumulator); + } + } +}; + +template +class GenericNondeterministicReducer { + public: + typedef typename Evaluator::CoeffReturnType CoeffReturnType; + typedef typename Evaluator::EvaluatorPointerType EvaluatorPointerType; + typedef typename Evaluator::Index Index; + typedef OpDefiner OpDef; + typedef typename OpDef::type Op; + template + GenericNondeterministicReducer(Scratch, Evaluator evaluator_, EvaluatorPointerType output_accessor_, OpType functor_, + Index range_, Index num_values_to_reduce_) + : evaluator(evaluator_), + output_accessor(output_accessor_), + functor(OpDef::get_op(functor_)), + range(range_), + num_values_to_reduce(num_values_to_reduce_) {} + + void operator()(cl::sycl::nd_item<1> itemID) { + auto output_accessor_ptr = output_accessor.get_pointer(); + /// const cast added as a naive solution to solve the qualifier drop error + Index globalid = static_cast(itemID.get_global_linear_id()); + if (globalid < range) { + CoeffReturnType accum = functor.initialize(); + Eigen::internal::GenericDimReducer::reduce( + evaluator, evaluator.firstInput(globalid), functor, &accum); + output_accessor_ptr[globalid] = OpDef::finalise_op(functor.finalize(accum), num_values_to_reduce); + } + } + + private: + Evaluator evaluator; + EvaluatorPointerType output_accessor; + Op functor; + Index range; + Index num_values_to_reduce; +}; + +enum class reduction_dim { inner_most, outer_most }; +// default is preserver +template +struct PartialReductionKernel { + typedef typename Evaluator::CoeffReturnType CoeffReturnType; + typedef typename Evaluator::EvaluatorPointerType EvaluatorPointerType; + typedef typename Evaluator::Index Index; + typedef OpDefiner OpDef; + typedef typename OpDef::type Op; + typedef cl::sycl::accessor + ScratchAcc; + ScratchAcc scratch; + Evaluator evaluator; + EvaluatorPointerType output_accessor; + Op op; + const Index preserve_elements_num_groups; + const Index reduce_elements_num_groups; + const Index num_coeffs_to_preserve; + const Index num_coeffs_to_reduce; + + PartialReductionKernel(ScratchAcc scratch_, Evaluator evaluator_, EvaluatorPointerType output_accessor_, OpType op_, + const Index preserve_elements_num_groups_, const Index reduce_elements_num_groups_, + const Index num_coeffs_to_preserve_, const Index num_coeffs_to_reduce_) + : scratch(scratch_), + evaluator(evaluator_), + output_accessor(output_accessor_), + op(OpDef::get_op(op_)), + preserve_elements_num_groups(preserve_elements_num_groups_), + reduce_elements_num_groups(reduce_elements_num_groups_), + num_coeffs_to_preserve(num_coeffs_to_preserve_), + num_coeffs_to_reduce(num_coeffs_to_reduce_) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void element_wise_reduce(Index globalRId, Index globalPId, + CoeffReturnType &accumulator) { + if (globalPId >= num_coeffs_to_preserve) { + return; + } + Index global_offset = rt == reduction_dim::outer_most ? globalPId + (globalRId * num_coeffs_to_preserve) + : globalRId + (globalPId * num_coeffs_to_reduce); + Index localOffset = globalRId; + + const Index per_thread_local_stride = PannelParameters::LocalThreadSizeR * reduce_elements_num_groups; + const Index per_thread_global_stride = + rt == reduction_dim::outer_most ? num_coeffs_to_preserve * per_thread_local_stride : per_thread_local_stride; + for (Index i = globalRId; i < num_coeffs_to_reduce; i += per_thread_local_stride) { + op.reduce(evaluator.impl().coeff(global_offset), &accumulator); + localOffset += per_thread_local_stride; + global_offset += per_thread_global_stride; + } + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(cl::sycl::nd_item<1> itemID) { + const Index linearLocalThreadId = itemID.get_local_id(0); + Index pLocalThreadId = rt == reduction_dim::outer_most ? linearLocalThreadId % PannelParameters::LocalThreadSizeP + : linearLocalThreadId / PannelParameters::LocalThreadSizeR; + Index rLocalThreadId = rt == reduction_dim::outer_most ? linearLocalThreadId / PannelParameters::LocalThreadSizeP + : linearLocalThreadId % PannelParameters::LocalThreadSizeR; + const Index pGroupId = rt == reduction_dim::outer_most ? itemID.get_group(0) % preserve_elements_num_groups + : itemID.get_group(0) / reduce_elements_num_groups; + const Index rGroupId = rt == reduction_dim::outer_most ? itemID.get_group(0) / preserve_elements_num_groups + : itemID.get_group(0) % reduce_elements_num_groups; + + Index globalPId = pGroupId * PannelParameters::LocalThreadSizeP + pLocalThreadId; + const Index globalRId = rGroupId * PannelParameters::LocalThreadSizeR + rLocalThreadId; + auto scratchPtr = scratch.get_pointer().get(); + auto outPtr = + output_accessor.get_pointer() + (reduce_elements_num_groups > 1 ? rGroupId * num_coeffs_to_preserve : 0); + CoeffReturnType accumulator = op.initialize(); + + element_wise_reduce(globalRId, globalPId, accumulator); + + accumulator = OpDef::finalise_op(op.finalize(accumulator), num_coeffs_to_reduce); + scratchPtr[pLocalThreadId + rLocalThreadId * (PannelParameters::LocalThreadSizeP + PannelParameters::BC)] = + accumulator; + if (rt == reduction_dim::inner_most) { + pLocalThreadId = linearLocalThreadId % PannelParameters::LocalThreadSizeP; + rLocalThreadId = linearLocalThreadId / PannelParameters::LocalThreadSizeP; + globalPId = pGroupId * PannelParameters::LocalThreadSizeP + pLocalThreadId; + } + + /* Apply the reduction operation between the current local + * id and the one on the other half of the vector. */ + auto out_scratch_ptr = + scratchPtr + (pLocalThreadId + (rLocalThreadId * (PannelParameters::LocalThreadSizeP + PannelParameters::BC))); + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (rt == reduction_dim::inner_most) { + accumulator = *out_scratch_ptr; + } + // The Local LocalThreadSizeR is always power of 2 + EIGEN_UNROLL_LOOP + for (Index offset = PannelParameters::LocalThreadSizeR >> 1; offset > 0; offset >>= 1) { + if (rLocalThreadId < offset) { + op.reduce(out_scratch_ptr[(PannelParameters::LocalThreadSizeP + PannelParameters::BC) * offset], &accumulator); + // The result has already been divided for mean reducer in the + // previous reduction so no need to divide furthermore + *out_scratch_ptr = op.finalize(accumulator); + } + /* All threads collectively read from global memory into local. + * The barrier ensures all threads' IO is resolved before + * execution continues (strictly speaking, all threads within + * a single work-group - there is no co-ordination between + * work-groups, only work-items). */ + itemID.barrier(cl::sycl::access::fence_space::local_space); + } + + if (rLocalThreadId == 0 && (globalPId < num_coeffs_to_preserve)) { + outPtr[globalPId] = op.finalize(accumulator); + } + } +}; + +template +struct SecondStepPartialReduction { + typedef OpDefiner OpDef; + typedef typename OpDef::type Op; + typedef cl::sycl::accessor + ScratchAccessor; + InputAccessor input_accessor; + OutputAccessor output_accessor; + Op op; + const Index num_coeffs_to_preserve; + const Index num_coeffs_to_reduce; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE SecondStepPartialReduction(ScratchAccessor, InputAccessor input_accessor_, + OutputAccessor output_accessor_, OpType op_, + const Index num_coeffs_to_preserve_, + const Index num_coeffs_to_reduce_) + : input_accessor(input_accessor_), + output_accessor(output_accessor_), + op(OpDef::get_op(op_)), + num_coeffs_to_preserve(num_coeffs_to_preserve_), + num_coeffs_to_reduce(num_coeffs_to_reduce_) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(cl::sycl::nd_item<1> itemID) { + const Index globalId = itemID.get_global_id(0); + + if (globalId >= num_coeffs_to_preserve) return; + + auto in_ptr = input_accessor.get_pointer() + globalId; + + OutScalar accumulator = op.initialize(); +// num_coeffs_to_reduce is not bigger that 256 + for (Index i = 0; i < num_coeffs_to_reduce; i++) { + op.reduce(*in_ptr, &accumulator); + in_ptr += num_coeffs_to_preserve; + } + output_accessor.get_pointer()[globalId] = op.finalize(accumulator); + } +}; // namespace internal + +template +struct ReductionPannel { + static EIGEN_CONSTEXPR Index LocalThreadSizeP = LTP; + static EIGEN_CONSTEXPR Index LocalThreadSizeR = LTR; + static EIGEN_CONSTEXPR bool BC = BC_; +}; + +template +struct PartialReducerLauncher { + typedef typename Self::EvaluatorPointerType EvaluatorPointerType; + typedef typename Self::CoeffReturnType CoeffReturnType; + typedef typename Self::Storage Storage; + typedef typename Self::Index Index; + typedef ReductionPannel + PannelParameters; + + typedef PartialReductionKernel SyclReducerKerneType; + + static bool run(const Self &self, const Op &reducer, const Eigen::SyclDevice &dev, EvaluatorPointerType output, + Index num_coeffs_to_reduce, Index num_coeffs_to_preserve) { + Index roundUpP = roundUp(num_coeffs_to_preserve, PannelParameters::LocalThreadSizeP); + + // getPowerOfTwo makes sure local range is power of 2 and <= + // maxSyclThreadPerBlock this will help us to avoid extra check on the + // kernel + static_assert(!((PannelParameters::LocalThreadSizeP * PannelParameters::LocalThreadSizeR) & + (PannelParameters::LocalThreadSizeP * PannelParameters::LocalThreadSizeR - 1)), + "The Local thread size must be a power of 2 for the reduction " + "operation"); + + EIGEN_CONSTEXPR Index localRange = PannelParameters::LocalThreadSizeP * PannelParameters::LocalThreadSizeR; + // In this step, we force the code not to be more than 2-step reduction: + // Our empirical research shows that if each thread reduces at least 64 + // elemnts individually, we get better performance. However, this can change + // on different platforms. In this step we force the code not to be + // morthan step reduction: Our empirical research shows that for inner_most + // dim reducer, it is better to have 8 group in a reduce dimension for sizes + // > 1024 to achieve the best performance. + const Index reductionPerThread = 64; + Index cu = dev.getPowerOfTwo(dev.getNumSyclMultiProcessors(), true); + const Index pNumGroups = roundUpP / PannelParameters::LocalThreadSizeP; + Index rGroups = (cu + pNumGroups - 1) / pNumGroups; + const Index rNumGroups = num_coeffs_to_reduce > reductionPerThread * localRange ? std::min(rGroups, localRange) : 1; + const Index globalRange = pNumGroups * rNumGroups * localRange; + + EIGEN_CONSTEXPR Index scratchSize = + PannelParameters::LocalThreadSizeR * (PannelParameters::LocalThreadSizeP + PannelParameters::BC); + auto thread_range = cl::sycl::nd_range<1>(cl::sycl::range<1>(globalRange), cl::sycl::range<1>(localRange)); + if (rNumGroups > 1) { + CoeffReturnType *temp_pointer = static_cast( + dev.allocate_temp(num_coeffs_to_preserve * rNumGroups * sizeof(CoeffReturnType))); + EvaluatorPointerType temp_accessor = dev.get(temp_pointer); + dev.template unary_kernel_launcher( + self, temp_accessor, thread_range, scratchSize, reducer, pNumGroups, rNumGroups, num_coeffs_to_preserve, + num_coeffs_to_reduce); + + typedef SecondStepPartialReduction + SecondStepPartialReductionKernel; + + dev.template unary_kernel_launcher( + temp_accessor, output, + cl::sycl::nd_range<1>(cl::sycl::range<1>(pNumGroups * localRange), cl::sycl::range<1>(localRange)), Index(1), + reducer, num_coeffs_to_preserve, rNumGroups); + + self.device().deallocate_temp(temp_pointer); + } else { + dev.template unary_kernel_launcher( + self, output, thread_range, scratchSize, reducer, pNumGroups, rNumGroups, num_coeffs_to_preserve, + num_coeffs_to_reduce); + } + return false; + } +}; +} // namespace internal +} // namespace TensorSycl + +namespace internal { + +template +struct FullReducer { + typedef typename Self::CoeffReturnType CoeffReturnType; + typedef typename Self::EvaluatorPointerType EvaluatorPointerType; + static EIGEN_CONSTEXPR bool HasOptimizedImplementation = true; + static EIGEN_CONSTEXPR int PacketSize = Self::PacketAccess ? Self::PacketSize : 1; + static void run(const Self &self, Op &reducer, const Eigen::SyclDevice &dev, EvaluatorPointerType data) { + typedef typename conditional::type OutType; + static_assert(!((EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1) & + (EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1 - 1)), + "The Local thread size must be a power of 2 for the reduction " + "operation"); + EIGEN_CONSTEXPR Index local_range = EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1; + + typename Self::Index inputSize = self.impl().dimensions().TotalSize(); + // In this step we force the code not to be more than 2-step reduction: + // Our empirical research shows that if each thread reduces at least 512 + // elemnts individually, we get better performance. + const Index reductionPerThread = 2048; + // const Index num_work_group = + Index reductionGroup = dev.getPowerOfTwo( + (inputSize + (reductionPerThread * local_range - 1)) / (reductionPerThread * local_range), true); + const Index num_work_group = std::min(reductionGroup, local_range); + // 1 + // ? local_range + // : 1); + const Index global_range = num_work_group * local_range; + + auto thread_range = cl::sycl::nd_range<1>(cl::sycl::range<1>(global_range), cl::sycl::range<1>(local_range)); + typedef TensorSycl::internal::FullReductionKernelFunctor reduction_kernel_t; + if (num_work_group > 1) { + CoeffReturnType *temp_pointer = + static_cast(dev.allocate_temp(num_work_group * sizeof(CoeffReturnType))); + typename Self::EvaluatorPointerType tmp_global_accessor = dev.get(temp_pointer); + dev.template unary_kernel_launcher(self, tmp_global_accessor, thread_range, + local_range, inputSize, reducer); + + typedef TensorSycl::internal::SecondStepFullReducer + GenericRKernel; + dev.template unary_kernel_launcher( + tmp_global_accessor, data, + cl::sycl::nd_range<1>(cl::sycl::range<1>(num_work_group), cl::sycl::range<1>(num_work_group)), num_work_group, + reducer); + + dev.deallocate_temp(temp_pointer); + } else { + dev.template unary_kernel_launcher(self, data, thread_range, local_range, inputSize, + reducer); + } + } +}; +// vectorizable inner_most most dim preserver +// col reduction +template +struct OuterReducer { + static EIGEN_CONSTEXPR bool HasOptimizedImplementation = true; + + static bool run(const Self &self, const Op &reducer, const Eigen::SyclDevice &dev, + typename Self::EvaluatorPointerType output, typename Self::Index num_coeffs_to_reduce, + typename Self::Index num_coeffs_to_preserve) { + return ::Eigen::TensorSycl::internal::PartialReducerLauncher< + Self, Op, ::Eigen::TensorSycl::internal::reduction_dim::outer_most>::run(self, reducer, dev, output, + num_coeffs_to_reduce, + num_coeffs_to_preserve); + } +}; +// row reduction +template +struct InnerReducer { + static EIGEN_CONSTEXPR bool HasOptimizedImplementation = true; + + static bool run(const Self &self, const Op &reducer, const Eigen::SyclDevice &dev, + typename Self::EvaluatorPointerType output, typename Self::Index num_coeffs_to_reduce, + typename Self::Index num_coeffs_to_preserve) { + return ::Eigen::TensorSycl::internal::PartialReducerLauncher< + Self, Op, ::Eigen::TensorSycl::internal::reduction_dim::inner_most>::run(self, reducer, dev, output, + num_coeffs_to_reduce, + num_coeffs_to_preserve); + } +}; + +// ArmgMax uses this kernel for partial reduction// +// TODO(@mehdi.goli) come up with a better kernel +// generic partial reduction +template +struct GenericReducer { + static EIGEN_CONSTEXPR bool HasOptimizedImplementation = false; + static bool run(const Self &self, const Op &reducer, const Eigen::SyclDevice &dev, + typename Self::EvaluatorPointerType output, typename Self::Index num_values_to_reduce, + typename Self::Index num_coeffs_to_preserve) { + typename Self::Index range, GRange, tileSize; + dev.parallel_for_setup(num_coeffs_to_preserve, tileSize, range, GRange); + + dev.template unary_kernel_launcher>( + self, output, cl::sycl::nd_range<1>(cl::sycl::range<1>(GRange), cl::sycl::range<1>(tileSize)), Index(1), + reducer, range, (num_values_to_reduce != 0) ? num_values_to_reduce : static_cast(1)); + return false; + } +}; + +} // namespace internal +} // namespace Eigen + +#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_REDUCTION_SYCL_HPP diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h new file mode 100644 index 0000000..a27d364 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h @@ -0,0 +1,454 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_REF_H +#define EIGEN_CXX11_TENSOR_TENSOR_REF_H + +namespace Eigen { + +namespace internal { + +template +class TensorLazyBaseEvaluator { + public: + TensorLazyBaseEvaluator() : m_refcount(0) { } + virtual ~TensorLazyBaseEvaluator() { } + + EIGEN_DEVICE_FUNC virtual const Dimensions& dimensions() const = 0; + EIGEN_DEVICE_FUNC virtual const Scalar* data() const = 0; + + EIGEN_DEVICE_FUNC virtual const Scalar coeff(DenseIndex index) const = 0; + EIGEN_DEVICE_FUNC virtual Scalar& coeffRef(DenseIndex index) = 0; + + void incrRefCount() { ++m_refcount; } + void decrRefCount() { --m_refcount; } + int refCount() const { return m_refcount; } + + private: + // No copy, no assignment; + TensorLazyBaseEvaluator(const TensorLazyBaseEvaluator& other); + TensorLazyBaseEvaluator& operator = (const TensorLazyBaseEvaluator& other); + + int m_refcount; +}; + + +template +class TensorLazyEvaluatorReadOnly : public TensorLazyBaseEvaluator::Scalar> { + public: + // typedef typename TensorEvaluator::Dimensions Dimensions; + typedef typename TensorEvaluator::Scalar Scalar; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + typedef TensorEvaluator EvalType; + + TensorLazyEvaluatorReadOnly(const Expr& expr, const Device& device) : m_impl(expr, device), m_dummy(Scalar(0)) { + m_dims = m_impl.dimensions(); + m_impl.evalSubExprsIfNeeded(NULL); + } + virtual ~TensorLazyEvaluatorReadOnly() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC virtual const Dimensions& dimensions() const { + return m_dims; + } + EIGEN_DEVICE_FUNC virtual const Scalar* data() const { + return m_impl.data(); + } + + EIGEN_DEVICE_FUNC virtual const Scalar coeff(DenseIndex index) const { + return m_impl.coeff(index); + } + EIGEN_DEVICE_FUNC virtual Scalar& coeffRef(DenseIndex /*index*/) { + eigen_assert(false && "can't reference the coefficient of a rvalue"); + return m_dummy; + }; + + protected: + TensorEvaluator m_impl; + Dimensions m_dims; + Scalar m_dummy; +}; + +template +class TensorLazyEvaluatorWritable : public TensorLazyEvaluatorReadOnly { + public: + typedef TensorLazyEvaluatorReadOnly Base; + typedef typename Base::Scalar Scalar; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + TensorLazyEvaluatorWritable(const Expr& expr, const Device& device) : Base(expr, device) { + } + virtual ~TensorLazyEvaluatorWritable() { + } + + EIGEN_DEVICE_FUNC virtual Scalar& coeffRef(DenseIndex index) { + return this->m_impl.coeffRef(index); + } +}; + +template +class TensorLazyEvaluator : public internal::conditional::value), + TensorLazyEvaluatorWritable, + TensorLazyEvaluatorReadOnly >::type { + public: + typedef typename internal::conditional::value), + TensorLazyEvaluatorWritable, + TensorLazyEvaluatorReadOnly >::type Base; + typedef typename Base::Scalar Scalar; + + TensorLazyEvaluator(const Expr& expr, const Device& device) : Base(expr, device) { + } + virtual ~TensorLazyEvaluator() { + } +}; + +} // namespace internal + + +/** \class TensorRef + * \ingroup CXX11_Tensor_Module + * + * \brief A reference to a tensor expression + * The expression will be evaluated lazily (as much as possible). + * + */ +template class TensorRef : public TensorBase > +{ + public: + typedef TensorRef Self; + typedef typename PlainObjectType::Base Base; + typedef typename Eigen::internal::nested::type Nested; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename Base::CoeffReturnType CoeffReturnType; + typedef Scalar* PointerType; + typedef PointerType PointerArgType; + + static const Index NumIndices = PlainObjectType::NumIndices; + typedef typename PlainObjectType::Dimensions Dimensions; + + enum { + IsAligned = false, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = false, + Layout = PlainObjectType::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -----------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorRef() : m_evaluator(NULL) { + } + + template + EIGEN_STRONG_INLINE TensorRef(const Expression& expr) : m_evaluator(new internal::TensorLazyEvaluator(expr, DefaultDevice())) { + m_evaluator->incrRefCount(); + } + + template + EIGEN_STRONG_INLINE TensorRef& operator = (const Expression& expr) { + unrefEvaluator(); + m_evaluator = new internal::TensorLazyEvaluator(expr, DefaultDevice()); + m_evaluator->incrRefCount(); + return *this; + } + + ~TensorRef() { + unrefEvaluator(); + } + + TensorRef(const TensorRef& other) : m_evaluator(other.m_evaluator) { + eigen_assert(m_evaluator->refCount() > 0); + m_evaluator->incrRefCount(); + } + + TensorRef& operator = (const TensorRef& other) { + if (this != &other) { + unrefEvaluator(); + m_evaluator = other.m_evaluator; + eigen_assert(m_evaluator->refCount() > 0); + m_evaluator->incrRefCount(); + } + return *this; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index rank() const { return m_evaluator->dimensions().size(); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index dimension(Index n) const { return m_evaluator->dimensions()[n]; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_evaluator->dimensions(); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Index size() const { return m_evaluator->dimensions().TotalSize(); } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar* data() const { return m_evaluator->data(); } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator()(Index index) const + { + return m_evaluator->coeff(index); + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator()(Index firstIndex, IndexTypes... otherIndices) const + { + const std::size_t num_indices = (sizeof...(otherIndices) + 1); + const array indices{{firstIndex, otherIndices...}}; + return coeff(indices); + } + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices) + { + const std::size_t num_indices = (sizeof...(otherIndices) + 1); + const array indices{{firstIndex, otherIndices...}}; + return coeffRef(indices); + } +#else + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1) const + { + array indices; + indices[0] = i0; + indices[1] = i1; + return coeff(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2) const + { + array indices; + indices[0] = i0; + indices[1] = i1; + indices[2] = i2; + return coeff(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2, Index i3) const + { + array indices; + indices[0] = i0; + indices[1] = i1; + indices[2] = i2; + indices[3] = i3; + return coeff(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const + { + array indices; + indices[0] = i0; + indices[1] = i1; + indices[2] = i2; + indices[3] = i3; + indices[4] = i4; + return coeff(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1) + { + array indices; + indices[0] = i0; + indices[1] = i1; + return coeffRef(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1, Index i2) + { + array indices; + indices[0] = i0; + indices[1] = i1; + indices[2] = i2; + return coeffRef(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3) + { + array indices; + indices[0] = i0; + indices[1] = i1; + indices[2] = i2; + indices[3] = i3; + return coeffRef(indices); + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index i0, Index i1, Index i2, Index i3, Index i4) + { + array indices; + indices[0] = i0; + indices[1] = i1; + indices[2] = i2; + indices[3] = i3; + indices[4] = i4; + return coeffRef(indices); + } +#endif + + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar coeff(const array& indices) const + { + const Dimensions& dims = this->dimensions(); + Index index = 0; + if (PlainObjectType::Options & RowMajor) { + index += indices[0]; + for (size_t i = 1; i < NumIndices; ++i) { + index = index * dims[i] + indices[i]; + } + } else { + index += indices[NumIndices-1]; + for (int i = NumIndices-2; i >= 0; --i) { + index = index * dims[i] + indices[i]; + } + } + return m_evaluator->coeff(index); + } + template EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(const array& indices) + { + const Dimensions& dims = this->dimensions(); + Index index = 0; + if (PlainObjectType::Options & RowMajor) { + index += indices[0]; + for (size_t i = 1; i < NumIndices; ++i) { + index = index * dims[i] + indices[i]; + } + } else { + index += indices[NumIndices-1]; + for (int i = NumIndices-2; i >= 0; --i) { + index = index * dims[i] + indices[i]; + } + } + return m_evaluator->coeffRef(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const Scalar coeff(Index index) const + { + return m_evaluator->coeff(index); + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) + { + return m_evaluator->coeffRef(index); + } + + private: + EIGEN_STRONG_INLINE void unrefEvaluator() { + if (m_evaluator) { + m_evaluator->decrRefCount(); + if (m_evaluator->refCount() == 0) { + delete m_evaluator; + } + } + } + + internal::TensorLazyBaseEvaluator* m_evaluator; +}; + + +// evaluator for rvalues +template +struct TensorEvaluator, Device> +{ + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename Derived::Dimensions Dimensions; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorRef::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const TensorRef& m, const Device&) + : m_ref(m) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_ref.dimensions(); } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + return true; + } + + EIGEN_STRONG_INLINE void cleanup() { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const { + return m_ref.coeff(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + return m_ref.coeffRef(index); + } + + EIGEN_DEVICE_FUNC const Scalar* data() const { return m_ref.data(); } + + protected: + TensorRef m_ref; +}; + + +// evaluator for lvalues +template +struct TensorEvaluator, Device> : public TensorEvaluator, Device> +{ + typedef typename Derived::Index Index; + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Scalar CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef typename Derived::Dimensions Dimensions; + + typedef TensorEvaluator, Device> Base; + + enum { + IsAligned = false, + PacketAccess = false, + BlockAccess = false, + PreferBlockAccess = false, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(TensorRef& m, const Device& d) : Base(m, d) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + return this->m_ref.coeffRef(index); + } +}; + + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_REF_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h new file mode 100644 index 0000000..586ce68 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h @@ -0,0 +1,465 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Navdeep Jaitly +// Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H +#define EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H +namespace Eigen { + +/** \class TensorReverse + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor reverse elements class. + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorReverseOp& type; +}; + +template +struct nested, 1, + typename eval >::type> +{ + typedef TensorReverseOp type; +}; + +} // end namespace internal + +template +class TensorReverseOp : public TensorBase, WriteAccessors> +{ + public: + typedef TensorBase, WriteAccessors>Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind + StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReverseOp( + const XprType& expr, const ReverseDimensions& reverse_dims) + : m_xpr(expr), m_reverse_dims(reverse_dims) { } + + EIGEN_DEVICE_FUNC + const ReverseDimensions& reverse() const { return m_reverse_dims; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorReverseOp) + + + protected: + typename XprType::Nested m_xpr; + const ReverseDimensions m_reverse_dims; +}; + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorReverseOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = NumDims > 0, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + typedef internal::TensorIntDivisor IndexDivisor; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename TensorEvaluator::TensorBlock + ArgTensorBlock; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), + m_reverse(op.reverse()), + m_device(device) + { + // Reversing a scalar isn't supported yet. It would be a no-op anyway. + EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + + // Compute strides + m_dimensions = m_impl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_strides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_strides[i] = m_strides[i-1] * m_dimensions[i-1]; + if (m_strides[i] > 0) m_fastStrides[i] = IndexDivisor(m_strides[i]); + } + } else { + m_strides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_strides[i] = m_strides[i+1] * m_dimensions[i+1]; + if (m_strides[i] > 0) m_fastStrides[i] = IndexDivisor(m_strides[i]); + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index reverseIndex( + Index index) const { + eigen_assert(index < dimensions().TotalSize()); + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + Index idx = index / m_fastStrides[i]; + index -= idx * m_strides[i]; + if (m_reverse[i]) { + idx = m_dimensions[i] - idx - 1; + } + inputIndex += idx * m_strides[i] ; + } + if (m_reverse[0]) { + inputIndex += (m_dimensions[0] - index - 1); + } else { + inputIndex += index; + } + } else { + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + Index idx = index / m_fastStrides[i]; + index -= idx * m_strides[i]; + if (m_reverse[i]) { + idx = m_dimensions[i] - idx - 1; + } + inputIndex += idx * m_strides[i] ; + } + if (m_reverse[NumDims-1]) { + inputIndex += (m_dimensions[NumDims-1] - index - 1); + } else { + inputIndex += index; + } + } + return inputIndex; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff( + Index index) const { + return m_impl.coeff(reverseIndex(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + // TODO(ndjaitly): write a better packing routine that uses + // local structure. + EIGEN_ALIGN_MAX typename internal::remove_const::type + values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + const size_t target_size = m_device.lastLevelCacheSize(); + // Block evaluation reads underlying memory in reverse order, and default + // cost model does not properly catch this in bytes stored/loaded. + return internal::TensorBlockResourceRequirements::skewed( + target_size) + .addCostPerCoeff({0, 0, 24}); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool /*root_of_expr_ast*/ = false) const { + // TODO(ezhulenev): If underlying tensor expression supports and prefers + // block evaluation we must use it. Currently we use coeff and packet + // access into the underlying tensor expression. + // static const bool useBlockAccessForArgType = + // TensorEvaluator::BlockAccess && + // TensorEvaluator::PreferBlockAccess; + + static const bool isColMajor = + static_cast(Layout) == static_cast(ColMajor); + + static const Index inner_dim_idx = isColMajor ? 0 : NumDims - 1; + const bool inner_dim_reversed = m_reverse[inner_dim_idx]; + + // Offset in the output block. + Index block_offset = 0; + + // Offset in the input Tensor. + Index input_offset = reverseIndex(desc.offset()); + + // Initialize output block iterator state. Dimension in this array are + // always in inner_most -> outer_most order (col major layout). + array it; + for (int i = 0; i < NumDims; ++i) { + const int dim = isColMajor ? i : NumDims - 1 - i; + it[i].size = desc.dimension(dim); + it[i].count = 0; + it[i].reverse = m_reverse[dim]; + + it[i].block_stride = + i == 0 ? 1 : (it[i - 1].size * it[i - 1].block_stride); + it[i].block_span = it[i].block_stride * (it[i].size - 1); + + it[i].input_stride = m_strides[dim]; + it[i].input_span = it[i].input_stride * (it[i].size - 1); + + if (it[i].reverse) { + it[i].input_stride = -1 * it[i].input_stride; + it[i].input_span = -1 * it[i].input_span; + } + } + + // If multiple inner dimensions have the same reverse flag, check if we can + // merge them into a single virtual inner dimension. + int effective_inner_dim = 0; + for (int i = 1; i < NumDims; ++i) { + if (it[i].reverse != it[effective_inner_dim].reverse) break; + if (it[i].block_stride != it[effective_inner_dim].size) break; + if (it[i].block_stride != numext::abs(it[i].input_stride)) break; + + it[i].size = it[effective_inner_dim].size * it[i].size; + + it[i].block_stride = 1; + it[i].input_stride = (inner_dim_reversed ? -1 : 1); + + it[i].block_span = it[i].block_stride * (it[i].size - 1); + it[i].input_span = it[i].input_stride * (it[i].size - 1); + + effective_inner_dim = i; + } + + eigen_assert(it[effective_inner_dim].block_stride == 1); + eigen_assert(it[effective_inner_dim].input_stride == + (inner_dim_reversed ? -1 : 1)); + + const Index inner_dim_size = it[effective_inner_dim].size; + + // Prepare storage for the materialized reverse result. + const typename TensorBlock::Storage block_storage = + TensorBlock::prepareStorage(desc, scratch); + CoeffReturnType* block_buffer = block_storage.data(); + + while (it[NumDims - 1].count < it[NumDims - 1].size) { + // Copy inner-most dimension data from reversed location in input. + Index dst = block_offset; + Index src = input_offset; + + // NOTE(ezhulenev): Adding vectorized path with internal::preverse showed + // worse results in benchmarks than a simple coefficient loop. + if (inner_dim_reversed) { + for (Index i = 0; i < inner_dim_size; ++i) { + block_buffer[dst] = m_impl.coeff(src); + ++dst; + --src; + } + } else { + for (Index i = 0; i < inner_dim_size; ++i) { + block_buffer[dst] = m_impl.coeff(src); + ++dst; + ++src; + } + } + + // For the 1d tensor we need to generate only one inner-most dimension. + if ((NumDims - effective_inner_dim) == 1) break; + + // Update offset. + for (Index i = effective_inner_dim + 1; i < NumDims; ++i) { + if (++it[i].count < it[i].size) { + block_offset += it[i].block_stride; + input_offset += it[i].input_stride; + break; + } + if (i != NumDims - 1) it[i].count = 0; + block_offset -= it[i].block_span; + input_offset -= it[i].input_span; + } + } + + return block_storage.AsTensorMaterializedBlock(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + double compute_cost = NumDims * (2 * TensorOpCost::AddCost() + + 2 * TensorOpCost::MulCost() + + TensorOpCost::DivCost()); + for (int i = 0; i < NumDims; ++i) { + if (m_reverse[i]) { + compute_cost += 2 * TensorOpCost::AddCost(); + } + } + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize); + } + + EIGEN_DEVICE_FUNC typename Storage::Type data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + Dimensions m_dimensions; + array m_strides; + array m_fastStrides; + TensorEvaluator m_impl; + ReverseDimensions m_reverse; + const Device EIGEN_DEVICE_REF m_device; + + private: + struct BlockIteratorState { + BlockIteratorState() + : size(0), + count(0), + reverse(false), + block_stride(0), + block_span(0), + input_stride(0), + input_span(0) {} + + Index size; + Index count; + bool reverse; + Index block_stride; + Index block_span; + Index input_stride; + Index input_span; + }; +}; + +// Eval as lvalue + +template +struct TensorEvaluator, Device> + : public TensorEvaluator, + Device> { + typedef TensorEvaluator, + Device> Base; + typedef TensorReverseOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::value; + typedef DSizes Dimensions; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) {} + + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Dimensions& dimensions() const { return this->m_dimensions; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) { + return this->m_impl.coeffRef(this->reverseIndex(index)); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + // This code is pilfered from TensorMorphing.h + EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize]; + internal::pstore(values, x); + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + this->coeffRef(index+i) = values[i]; + } + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h new file mode 100644 index 0000000..beae854 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h @@ -0,0 +1,528 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Igor Babuschkin +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_SCAN_H +#define EIGEN_CXX11_TENSOR_TENSOR_SCAN_H + +namespace Eigen { + +namespace internal { + +template +struct traits > + : public traits { + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorScanOp& type; +}; + +template +struct nested, 1, + typename eval >::type> +{ + typedef TensorScanOp type; +}; +} // end namespace internal + +/** \class TensorScan + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor scan class. + */ +template +class TensorScanOp + : public TensorBase, ReadOnlyAccessors> { +public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorScanOp( + const XprType& expr, const Index& axis, bool exclusive = false, const Op& op = Op()) + : m_expr(expr), m_axis(axis), m_accumulator(op), m_exclusive(exclusive) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Index axis() const { return m_axis; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const XprType& expression() const { return m_expr; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Op accumulator() const { return m_accumulator; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + bool exclusive() const { return m_exclusive; } + +protected: + typename XprType::Nested m_expr; + const Index m_axis; + const Op m_accumulator; + const bool m_exclusive; +}; + + +namespace internal { + +template +EIGEN_STRONG_INLINE void ReduceScalar(Self& self, Index offset, + typename Self::CoeffReturnType* data) { + // Compute the scan along the axis, starting at the given offset + typename Self::CoeffReturnType accum = self.accumulator().initialize(); + if (self.stride() == 1) { + if (self.exclusive()) { + for (Index curr = offset; curr < offset + self.size(); ++curr) { + data[curr] = self.accumulator().finalize(accum); + self.accumulator().reduce(self.inner().coeff(curr), &accum); + } + } else { + for (Index curr = offset; curr < offset + self.size(); ++curr) { + self.accumulator().reduce(self.inner().coeff(curr), &accum); + data[curr] = self.accumulator().finalize(accum); + } + } + } else { + if (self.exclusive()) { + for (Index idx3 = 0; idx3 < self.size(); idx3++) { + Index curr = offset + idx3 * self.stride(); + data[curr] = self.accumulator().finalize(accum); + self.accumulator().reduce(self.inner().coeff(curr), &accum); + } + } else { + for (Index idx3 = 0; idx3 < self.size(); idx3++) { + Index curr = offset + idx3 * self.stride(); + self.accumulator().reduce(self.inner().coeff(curr), &accum); + data[curr] = self.accumulator().finalize(accum); + } + } + } +} + +template +EIGEN_STRONG_INLINE void ReducePacket(Self& self, Index offset, + typename Self::CoeffReturnType* data) { + using Scalar = typename Self::CoeffReturnType; + using Packet = typename Self::PacketReturnType; + // Compute the scan along the axis, starting at the calculated offset + Packet accum = self.accumulator().template initializePacket(); + if (self.stride() == 1) { + if (self.exclusive()) { + for (Index curr = offset; curr < offset + self.size(); ++curr) { + internal::pstoreu(data + curr, self.accumulator().finalizePacket(accum)); + self.accumulator().reducePacket(self.inner().template packet(curr), &accum); + } + } else { + for (Index curr = offset; curr < offset + self.size(); ++curr) { + self.accumulator().reducePacket(self.inner().template packet(curr), &accum); + internal::pstoreu(data + curr, self.accumulator().finalizePacket(accum)); + } + } + } else { + if (self.exclusive()) { + for (Index idx3 = 0; idx3 < self.size(); idx3++) { + const Index curr = offset + idx3 * self.stride(); + internal::pstoreu(data + curr, self.accumulator().finalizePacket(accum)); + self.accumulator().reducePacket(self.inner().template packet(curr), &accum); + } + } else { + for (Index idx3 = 0; idx3 < self.size(); idx3++) { + const Index curr = offset + idx3 * self.stride(); + self.accumulator().reducePacket(self.inner().template packet(curr), &accum); + internal::pstoreu(data + curr, self.accumulator().finalizePacket(accum)); + } + } + } +} + +template +struct ReduceBlock { + EIGEN_STRONG_INLINE void operator()(Self& self, Index idx1, + typename Self::CoeffReturnType* data) { + for (Index idx2 = 0; idx2 < self.stride(); idx2++) { + // Calculate the starting offset for the scan + Index offset = idx1 + idx2; + ReduceScalar(self, offset, data); + } + } +}; + +// Specialization for vectorized reduction. +template +struct ReduceBlock { + EIGEN_STRONG_INLINE void operator()(Self& self, Index idx1, + typename Self::CoeffReturnType* data) { + using Packet = typename Self::PacketReturnType; + const int PacketSize = internal::unpacket_traits::size; + Index idx2 = 0; + for (; idx2 + PacketSize <= self.stride(); idx2 += PacketSize) { + // Calculate the starting offset for the packet scan + Index offset = idx1 + idx2; + ReducePacket(self, offset, data); + } + for (; idx2 < self.stride(); idx2++) { + // Calculate the starting offset for the scan + Index offset = idx1 + idx2; + ReduceScalar(self, offset, data); + } + } +}; + +// Single-threaded CPU implementation of scan +template ::PacketAccess && + internal::reducer_traits::PacketAccess)> +struct ScanLauncher { + void operator()(Self& self, typename Self::CoeffReturnType* data) { + Index total_size = internal::array_prod(self.dimensions()); + + // We fix the index along the scan axis to 0 and perform a + // scan per remaining entry. The iteration is split into two nested + // loops to avoid an integer division by keeping track of each idx1 and + // idx2. + for (Index idx1 = 0; idx1 < total_size; idx1 += self.stride() * self.size()) { + ReduceBlock block_reducer; + block_reducer(self, idx1, data); + } + } +}; + +#ifdef EIGEN_USE_THREADS + +// Adjust block_size to avoid false sharing of cachelines among +// threads. Currently set to twice the cache line size on Intel and ARM +// processors. +EIGEN_STRONG_INLINE Index AdjustBlockSize(Index item_size, Index block_size) { + EIGEN_CONSTEXPR Index kBlockAlignment = 128; + const Index items_per_cacheline = + numext::maxi(1, kBlockAlignment / item_size); + return items_per_cacheline * divup(block_size, items_per_cacheline); +} + +template +struct ReduceBlock { + EIGEN_STRONG_INLINE void operator()(Self& self, Index idx1, + typename Self::CoeffReturnType* data) { + using Scalar = typename Self::CoeffReturnType; + using Packet = typename Self::PacketReturnType; + const int PacketSize = internal::unpacket_traits::size; + Index num_scalars = self.stride(); + Index num_packets = 0; + if (self.stride() >= PacketSize) { + num_packets = self.stride() / PacketSize; + self.device().parallelFor( + num_packets, + TensorOpCost(PacketSize * self.size(), PacketSize * self.size(), + 16 * PacketSize * self.size(), true, PacketSize), + // Make the shard size large enough that two neighboring threads + // won't write to the same cacheline of `data`. + [=](Index blk_size) { + return AdjustBlockSize(PacketSize * sizeof(Scalar), blk_size); + }, + [&](Index first, Index last) { + for (Index packet = first; packet < last; ++packet) { + const Index idx2 = packet * PacketSize; + ReducePacket(self, idx1 + idx2, data); + } + }); + num_scalars -= num_packets * PacketSize; + } + self.device().parallelFor( + num_scalars, TensorOpCost(self.size(), self.size(), 16 * self.size()), + // Make the shard size large enough that two neighboring threads + // won't write to the same cacheline of `data`. + [=](Index blk_size) { + return AdjustBlockSize(sizeof(Scalar), blk_size); + }, + [&](Index first, Index last) { + for (Index scalar = first; scalar < last; ++scalar) { + const Index idx2 = num_packets * PacketSize + scalar; + ReduceScalar(self, idx1 + idx2, data); + } + }); + } +}; + +template +struct ReduceBlock { + EIGEN_STRONG_INLINE void operator()(Self& self, Index idx1, + typename Self::CoeffReturnType* data) { + using Scalar = typename Self::CoeffReturnType; + self.device().parallelFor( + self.stride(), TensorOpCost(self.size(), self.size(), 16 * self.size()), + // Make the shard size large enough that two neighboring threads + // won't write to the same cacheline of `data`. + [=](Index blk_size) { + return AdjustBlockSize(sizeof(Scalar), blk_size); + }, + [&](Index first, Index last) { + for (Index idx2 = first; idx2 < last; ++idx2) { + ReduceScalar(self, idx1 + idx2, data); + } + }); + } +}; + +// Specialization for multi-threaded execution. +template +struct ScanLauncher { + void operator()(Self& self, typename Self::CoeffReturnType* data) { + using Scalar = typename Self::CoeffReturnType; + using Packet = typename Self::PacketReturnType; + const int PacketSize = internal::unpacket_traits::size; + const Index total_size = internal::array_prod(self.dimensions()); + const Index inner_block_size = self.stride() * self.size(); + bool parallelize_by_outer_blocks = (total_size >= (self.stride() * inner_block_size)); + + if ((parallelize_by_outer_blocks && total_size <= 4096) || + (!parallelize_by_outer_blocks && self.stride() < PacketSize)) { + ScanLauncher launcher; + launcher(self, data); + return; + } + + if (parallelize_by_outer_blocks) { + // Parallelize over outer blocks. + const Index num_outer_blocks = total_size / inner_block_size; + self.device().parallelFor( + num_outer_blocks, + TensorOpCost(inner_block_size, inner_block_size, + 16 * PacketSize * inner_block_size, Vectorize, + PacketSize), + [=](Index blk_size) { + return AdjustBlockSize(inner_block_size * sizeof(Scalar), blk_size); + }, + [&](Index first, Index last) { + for (Index idx1 = first; idx1 < last; ++idx1) { + ReduceBlock block_reducer; + block_reducer(self, idx1 * inner_block_size, data); + } + }); + } else { + // Parallelize over inner packets/scalars dimensions when the reduction + // axis is not an inner dimension. + ReduceBlock block_reducer; + for (Index idx1 = 0; idx1 < total_size; + idx1 += self.stride() * self.size()) { + block_reducer(self, idx1, data); + } + } + } +}; +#endif // EIGEN_USE_THREADS + +#if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC)) + +// GPU implementation of scan +// TODO(ibab) This placeholder implementation performs multiple scans in +// parallel, but it would be better to use a parallel scan algorithm and +// optimize memory access. +template +__global__ EIGEN_HIP_LAUNCH_BOUNDS_1024 void ScanKernel(Self self, Index total_size, typename Self::CoeffReturnType* data) { + // Compute offset as in the CPU version + Index val = threadIdx.x + blockIdx.x * blockDim.x; + Index offset = (val / self.stride()) * self.stride() * self.size() + val % self.stride(); + + if (offset + (self.size() - 1) * self.stride() < total_size) { + // Compute the scan along the axis, starting at the calculated offset + typename Self::CoeffReturnType accum = self.accumulator().initialize(); + for (Index idx = 0; idx < self.size(); idx++) { + Index curr = offset + idx * self.stride(); + if (self.exclusive()) { + data[curr] = self.accumulator().finalize(accum); + self.accumulator().reduce(self.inner().coeff(curr), &accum); + } else { + self.accumulator().reduce(self.inner().coeff(curr), &accum); + data[curr] = self.accumulator().finalize(accum); + } + } + } + __syncthreads(); + +} + +template +struct ScanLauncher { + void operator()(const Self& self, typename Self::CoeffReturnType* data) { + Index total_size = internal::array_prod(self.dimensions()); + Index num_blocks = (total_size / self.size() + 63) / 64; + Index block_size = 64; + + LAUNCH_GPU_KERNEL((ScanKernel), num_blocks, block_size, 0, self.device(), self, total_size, data); + } +}; +#endif // EIGEN_USE_GPU && (EIGEN_GPUCC) + +} // namespace internal + +// Eval as rvalue +template +struct TensorEvaluator, Device> { + + typedef TensorScanOp XprType; + typedef typename XprType::Index Index; + typedef const ArgType ChildTypeNoConst; + typedef const ArgType ChildType; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + typedef TensorEvaluator, Device> Self; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = (PacketType::size > 1), + BlockAccess = false, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = true + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), + m_device(device), + m_exclusive(op.exclusive()), + m_accumulator(op.accumulator()), + m_size(m_impl.dimensions()[op.axis()]), + m_stride(1), m_consume_dim(op.axis()), + m_output(NULL) { + + // Accumulating a scalar isn't supported. + EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(op.axis() >= 0 && op.axis() < NumDims); + + // Compute stride of scan axis + const Dimensions& dims = m_impl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = 0; i < op.axis(); ++i) { + m_stride = m_stride * dims[i]; + } + } else { + // dims can only be indexed through unsigned integers, + // so let's use an unsigned type to let the compiler knows. + // This prevents stupid warnings: ""'*((void*)(& evaluator)+64)[18446744073709551615]' may be used uninitialized in this function" + unsigned int axis = internal::convert_index(op.axis()); + for (unsigned int i = NumDims - 1; i > axis; --i) { + m_stride = m_stride * dims[i]; + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { + return m_impl.dimensions(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& stride() const { + return m_stride; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& consume_dim() const { + return m_consume_dim; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Index& size() const { + return m_size; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Op& accumulator() const { + return m_accumulator; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool exclusive() const { + return m_exclusive; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const TensorEvaluator& inner() const { + return m_impl; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Device& device() const { + return m_device; + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data) { + m_impl.evalSubExprsIfNeeded(NULL); + internal::ScanLauncher launcher; + if (data) { + launcher(*this, data); + return false; + } + + const Index total_size = internal::array_prod(dimensions()); + m_output = static_cast(m_device.get((Scalar*) m_device.allocate_temp(total_size * sizeof(Scalar)))); + launcher(*this, m_output); + return true; + } + + template + EIGEN_DEVICE_FUNC PacketReturnType packet(Index index) const { + return internal::ploadt(m_output + index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EvaluatorPointerType data() const + { + return m_output; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_output[index]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool) const { + return TensorOpCost(sizeof(CoeffReturnType), 0, 0); + } + + EIGEN_STRONG_INLINE void cleanup() { + if (m_output) { + m_device.deallocate_temp(m_output); + m_output = NULL; + } + m_impl.cleanup(); + } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + m_output.bind(cgh); + } +#endif +protected: + TensorEvaluator m_impl; + const Device EIGEN_DEVICE_REF m_device; + const bool m_exclusive; + Op m_accumulator; + const Index m_size; + Index m_stride; + Index m_consume_dim; + EvaluatorPointerType m_output; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_SCAN_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorScanSycl.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorScanSycl.h new file mode 100644 index 0000000..7f68ecb --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorScanSycl.h @@ -0,0 +1,513 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Mehdi Goli Codeplay Software Ltd. +// Ralph Potter Codeplay Software Ltd. +// Luke Iwanski Codeplay Software Ltd. +// Contact: +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +/***************************************************************** + * TensorScanSycl.h + * + * \brief: + * Tensor Scan Sycl implement the extend version of + * "Efficient parallel scan algorithms for GPUs." .for Tensor operations. + * The algorithm requires up to 3 stage (consequently 3 kernels) depending on + * the size of the tensor. In the first kernel (ScanKernelFunctor), each + * threads within the work-group individually reduces the allocated elements per + * thread in order to reduces the total number of blocks. In the next step all + * thread within the work-group will reduce the associated blocks into the + * temporary buffers. In the next kernel(ScanBlockKernelFunctor), the temporary + * buffer is given as an input and all the threads within a work-group scan and + * reduces the boundaries between the blocks (generated from the previous + * kernel). and write the data on the temporary buffer. If the second kernel is + * required, the third and final kerenl (ScanAdjustmentKernelFunctor) will + * adjust the final result into the output buffer. + * The original algorithm for the parallel prefix sum can be found here: + * + * Sengupta, Shubhabrata, Mark Harris, and Michael Garland. "Efficient parallel + * scan algorithms for GPUs." NVIDIA, Santa Clara, CA, Tech. Rep. NVR-2008-003 + *1, no. 1 (2008): 1-17. + *****************************************************************/ + +#ifndef UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_SYCL_SYCL_HPP +#define UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_SYCL_SYCL_HPP + +namespace Eigen { +namespace TensorSycl { +namespace internal { + +#ifndef EIGEN_SYCL_MAX_GLOBAL_RANGE +#define EIGEN_SYCL_MAX_GLOBAL_RANGE (EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1 * 4) +#endif + +template +struct ScanParameters { + // must be power of 2 + static EIGEN_CONSTEXPR index_t ScanPerThread = 8; + const index_t total_size; + const index_t non_scan_size; + const index_t scan_size; + const index_t non_scan_stride; + const index_t scan_stride; + const index_t panel_threads; + const index_t group_threads; + const index_t block_threads; + const index_t elements_per_group; + const index_t elements_per_block; + const index_t loop_range; + + ScanParameters(index_t total_size_, index_t non_scan_size_, index_t scan_size_, index_t non_scan_stride_, + index_t scan_stride_, index_t panel_threads_, index_t group_threads_, index_t block_threads_, + index_t elements_per_group_, index_t elements_per_block_, index_t loop_range_) + : total_size(total_size_), + non_scan_size(non_scan_size_), + scan_size(scan_size_), + non_scan_stride(non_scan_stride_), + scan_stride(scan_stride_), + panel_threads(panel_threads_), + group_threads(group_threads_), + block_threads(block_threads_), + elements_per_group(elements_per_group_), + elements_per_block(elements_per_block_), + loop_range(loop_range_) {} +}; + +enum class scan_step { first, second }; +template +struct ScanKernelFunctor { + typedef cl::sycl::accessor + LocalAccessor; + static EIGEN_CONSTEXPR int PacketSize = ScanParameters::ScanPerThread / 2; + + LocalAccessor scratch; + Evaluator dev_eval; + OutAccessor out_accessor; + OutAccessor temp_accessor; + const ScanParameters scanParameters; + Op accumulator; + const bool inclusive; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ScanKernelFunctor(LocalAccessor scratch_, const Evaluator dev_eval_, + OutAccessor out_accessor_, OutAccessor temp_accessor_, + const ScanParameters scanParameters_, Op accumulator_, + const bool inclusive_) + : scratch(scratch_), + dev_eval(dev_eval_), + out_accessor(out_accessor_), + temp_accessor(temp_accessor_), + scanParameters(scanParameters_), + accumulator(accumulator_), + inclusive(inclusive_) {} + + template + typename ::Eigen::internal::enable_if::type EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE + read(const Input &inpt, Index global_id) { + return inpt.coeff(global_id); + } + + template + typename ::Eigen::internal::enable_if::type EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE + read(const Input &inpt, Index global_id) { + return inpt[global_id]; + } + + template + typename ::Eigen::internal::enable_if::type EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + first_step_inclusive_Operation(InclusiveOp inclusive_op) { + inclusive_op(); + } + + template + typename ::Eigen::internal::enable_if::type EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + first_step_inclusive_Operation(InclusiveOp) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(cl::sycl::nd_item<1> itemID) { + auto out_ptr = out_accessor.get_pointer(); + auto tmp_ptr = temp_accessor.get_pointer(); + auto scratch_ptr = scratch.get_pointer().get(); + + for (Index loop_offset = 0; loop_offset < scanParameters.loop_range; loop_offset++) { + Index data_offset = (itemID.get_global_id(0) + (itemID.get_global_range(0) * loop_offset)); + Index tmp = data_offset % scanParameters.panel_threads; + const Index panel_id = data_offset / scanParameters.panel_threads; + const Index group_id = tmp / scanParameters.group_threads; + tmp = tmp % scanParameters.group_threads; + const Index block_id = tmp / scanParameters.block_threads; + const Index local_id = tmp % scanParameters.block_threads; + // we put one element per packet in scratch_mem + const Index scratch_stride = scanParameters.elements_per_block / PacketSize; + const Index scratch_offset = (itemID.get_local_id(0) / scanParameters.block_threads) * scratch_stride; + CoeffReturnType private_scan[ScanParameters::ScanPerThread]; + CoeffReturnType inclusive_scan; + // the actual panel size is scan_size * non_scan_size. + // elements_per_panel is roundup to power of 2 for binary tree + const Index panel_offset = panel_id * scanParameters.scan_size * scanParameters.non_scan_size; + const Index group_offset = group_id * scanParameters.non_scan_stride; + // This will be effective when the size is bigger than elements_per_block + const Index block_offset = block_id * scanParameters.elements_per_block * scanParameters.scan_stride; + const Index thread_offset = (ScanParameters::ScanPerThread * local_id * scanParameters.scan_stride); + const Index global_offset = panel_offset + group_offset + block_offset + thread_offset; + Index next_elements = 0; + EIGEN_UNROLL_LOOP + for (int i = 0; i < ScanParameters::ScanPerThread; i++) { + Index global_id = global_offset + next_elements; + private_scan[i] = ((((block_id * scanParameters.elements_per_block) + + (ScanParameters::ScanPerThread * local_id) + i) < scanParameters.scan_size) && + (global_id < scanParameters.total_size)) + ? read(dev_eval, global_id) + : accumulator.initialize(); + next_elements += scanParameters.scan_stride; + } + first_step_inclusive_Operation([&]() EIGEN_DEVICE_FUNC { + if (inclusive) { + inclusive_scan = private_scan[ScanParameters::ScanPerThread - 1]; + } + }); + // This for loop must be 2 + EIGEN_UNROLL_LOOP + for (int packetIndex = 0; packetIndex < ScanParameters::ScanPerThread; packetIndex += PacketSize) { + Index private_offset = 1; + // build sum in place up the tree + EIGEN_UNROLL_LOOP + for (Index d = PacketSize >> 1; d > 0; d >>= 1) { + EIGEN_UNROLL_LOOP + for (Index l = 0; l < d; l++) { + Index ai = private_offset * (2 * l + 1) - 1 + packetIndex; + Index bi = private_offset * (2 * l + 2) - 1 + packetIndex; + CoeffReturnType accum = accumulator.initialize(); + accumulator.reduce(private_scan[ai], &accum); + accumulator.reduce(private_scan[bi], &accum); + private_scan[bi] = accumulator.finalize(accum); + } + private_offset *= 2; + } + scratch_ptr[2 * local_id + (packetIndex / PacketSize) + scratch_offset] = + private_scan[PacketSize - 1 + packetIndex]; + private_scan[PacketSize - 1 + packetIndex] = accumulator.initialize(); + // traverse down tree & build scan + EIGEN_UNROLL_LOOP + for (Index d = 1; d < PacketSize; d *= 2) { + private_offset >>= 1; + EIGEN_UNROLL_LOOP + for (Index l = 0; l < d; l++) { + Index ai = private_offset * (2 * l + 1) - 1 + packetIndex; + Index bi = private_offset * (2 * l + 2) - 1 + packetIndex; + CoeffReturnType accum = accumulator.initialize(); + accumulator.reduce(private_scan[ai], &accum); + accumulator.reduce(private_scan[bi], &accum); + private_scan[ai] = private_scan[bi]; + private_scan[bi] = accumulator.finalize(accum); + } + } + } + + Index offset = 1; + // build sum in place up the tree + for (Index d = scratch_stride >> 1; d > 0; d >>= 1) { + // Synchronise + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (local_id < d) { + Index ai = offset * (2 * local_id + 1) - 1 + scratch_offset; + Index bi = offset * (2 * local_id + 2) - 1 + scratch_offset; + CoeffReturnType accum = accumulator.initialize(); + accumulator.reduce(scratch_ptr[ai], &accum); + accumulator.reduce(scratch_ptr[bi], &accum); + scratch_ptr[bi] = accumulator.finalize(accum); + } + offset *= 2; + } + // Synchronise + itemID.barrier(cl::sycl::access::fence_space::local_space); + // next step optimisation + if (local_id == 0) { + if (((scanParameters.elements_per_group / scanParameters.elements_per_block) > 1)) { + const Index temp_id = panel_id * (scanParameters.elements_per_group / scanParameters.elements_per_block) * + scanParameters.non_scan_size + + group_id * (scanParameters.elements_per_group / scanParameters.elements_per_block) + + block_id; + tmp_ptr[temp_id] = scratch_ptr[scratch_stride - 1 + scratch_offset]; + } + // clear the last element + scratch_ptr[scratch_stride - 1 + scratch_offset] = accumulator.initialize(); + } + // traverse down tree & build scan + for (Index d = 1; d < scratch_stride; d *= 2) { + offset >>= 1; + // Synchronise + itemID.barrier(cl::sycl::access::fence_space::local_space); + if (local_id < d) { + Index ai = offset * (2 * local_id + 1) - 1 + scratch_offset; + Index bi = offset * (2 * local_id + 2) - 1 + scratch_offset; + CoeffReturnType accum = accumulator.initialize(); + accumulator.reduce(scratch_ptr[ai], &accum); + accumulator.reduce(scratch_ptr[bi], &accum); + scratch_ptr[ai] = scratch_ptr[bi]; + scratch_ptr[bi] = accumulator.finalize(accum); + } + } + // Synchronise + itemID.barrier(cl::sycl::access::fence_space::local_space); + // This for loop must be 2 + EIGEN_UNROLL_LOOP + for (int packetIndex = 0; packetIndex < ScanParameters::ScanPerThread; packetIndex += PacketSize) { + EIGEN_UNROLL_LOOP + for (Index i = 0; i < PacketSize; i++) { + CoeffReturnType accum = private_scan[packetIndex + i]; + accumulator.reduce(scratch_ptr[2 * local_id + (packetIndex / PacketSize) + scratch_offset], &accum); + private_scan[packetIndex + i] = accumulator.finalize(accum); + } + } + first_step_inclusive_Operation([&]() EIGEN_DEVICE_FUNC { + if (inclusive) { + accumulator.reduce(private_scan[ScanParameters::ScanPerThread - 1], &inclusive_scan); + private_scan[0] = accumulator.finalize(inclusive_scan); + } + }); + next_elements = 0; + // right the first set of private param + EIGEN_UNROLL_LOOP + for (Index i = 0; i < ScanParameters::ScanPerThread; i++) { + Index global_id = global_offset + next_elements; + if ((((block_id * scanParameters.elements_per_block) + (ScanParameters::ScanPerThread * local_id) + i) < + scanParameters.scan_size) && + (global_id < scanParameters.total_size)) { + Index private_id = (i * !inclusive) + (((i + 1) % ScanParameters::ScanPerThread) * (inclusive)); + out_ptr[global_id] = private_scan[private_id]; + } + next_elements += scanParameters.scan_stride; + } + } // end for loop + } +}; + +template +struct ScanAdjustmentKernelFunctor { + typedef cl::sycl::accessor + LocalAccessor; + static EIGEN_CONSTEXPR int PacketSize = ScanParameters::ScanPerThread / 2; + InAccessor in_accessor; + OutAccessor out_accessor; + const ScanParameters scanParameters; + Op accumulator; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ScanAdjustmentKernelFunctor(LocalAccessor, InAccessor in_accessor_, + OutAccessor out_accessor_, + const ScanParameters scanParameters_, + Op accumulator_) + : in_accessor(in_accessor_), + out_accessor(out_accessor_), + scanParameters(scanParameters_), + accumulator(accumulator_) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void operator()(cl::sycl::nd_item<1> itemID) { + auto in_ptr = in_accessor.get_pointer(); + auto out_ptr = out_accessor.get_pointer(); + + for (Index loop_offset = 0; loop_offset < scanParameters.loop_range; loop_offset++) { + Index data_offset = (itemID.get_global_id(0) + (itemID.get_global_range(0) * loop_offset)); + Index tmp = data_offset % scanParameters.panel_threads; + const Index panel_id = data_offset / scanParameters.panel_threads; + const Index group_id = tmp / scanParameters.group_threads; + tmp = tmp % scanParameters.group_threads; + const Index block_id = tmp / scanParameters.block_threads; + const Index local_id = tmp % scanParameters.block_threads; + + // the actual panel size is scan_size * non_scan_size. + // elements_per_panel is roundup to power of 2 for binary tree + const Index panel_offset = panel_id * scanParameters.scan_size * scanParameters.non_scan_size; + const Index group_offset = group_id * scanParameters.non_scan_stride; + // This will be effective when the size is bigger than elements_per_block + const Index block_offset = block_id * scanParameters.elements_per_block * scanParameters.scan_stride; + const Index thread_offset = ScanParameters::ScanPerThread * local_id * scanParameters.scan_stride; + + const Index global_offset = panel_offset + group_offset + block_offset + thread_offset; + const Index block_size = scanParameters.elements_per_group / scanParameters.elements_per_block; + const Index in_id = (panel_id * block_size * scanParameters.non_scan_size) + (group_id * block_size) + block_id; + CoeffReturnType adjust_val = in_ptr[in_id]; + + Index next_elements = 0; + EIGEN_UNROLL_LOOP + for (Index i = 0; i < ScanParameters::ScanPerThread; i++) { + Index global_id = global_offset + next_elements; + if ((((block_id * scanParameters.elements_per_block) + (ScanParameters::ScanPerThread * local_id) + i) < + scanParameters.scan_size) && + (global_id < scanParameters.total_size)) { + CoeffReturnType accum = adjust_val; + accumulator.reduce(out_ptr[global_id], &accum); + out_ptr[global_id] = accumulator.finalize(accum); + } + next_elements += scanParameters.scan_stride; + } + } + } +}; + +template +struct ScanInfo { + const Index &total_size; + const Index &scan_size; + const Index &panel_size; + const Index &non_scan_size; + const Index &scan_stride; + const Index &non_scan_stride; + + Index max_elements_per_block; + Index block_size; + Index panel_threads; + Index group_threads; + Index block_threads; + Index elements_per_group; + Index elements_per_block; + Index loop_range; + Index global_range; + Index local_range; + const Eigen::SyclDevice &dev; + EIGEN_STRONG_INLINE ScanInfo(const Index &total_size_, const Index &scan_size_, const Index &panel_size_, + const Index &non_scan_size_, const Index &scan_stride_, const Index &non_scan_stride_, + const Eigen::SyclDevice &dev_) + : total_size(total_size_), + scan_size(scan_size_), + panel_size(panel_size_), + non_scan_size(non_scan_size_), + scan_stride(scan_stride_), + non_scan_stride(non_scan_stride_), + dev(dev_) { + // must be power of 2 + local_range = std::min(Index(dev.getNearestPowerOfTwoWorkGroupSize()), + Index(EIGEN_SYCL_LOCAL_THREAD_DIM0 * EIGEN_SYCL_LOCAL_THREAD_DIM1)); + + max_elements_per_block = local_range * ScanParameters::ScanPerThread; + + elements_per_group = + dev.getPowerOfTwo(Index(roundUp(Index(scan_size), ScanParameters::ScanPerThread)), true); + const Index elements_per_panel = elements_per_group * non_scan_size; + elements_per_block = std::min(Index(elements_per_group), Index(max_elements_per_block)); + panel_threads = elements_per_panel / ScanParameters::ScanPerThread; + group_threads = elements_per_group / ScanParameters::ScanPerThread; + block_threads = elements_per_block / ScanParameters::ScanPerThread; + block_size = elements_per_group / elements_per_block; +#ifdef EIGEN_SYCL_MAX_GLOBAL_RANGE + const Index max_threads = std::min(Index(panel_threads * panel_size), Index(EIGEN_SYCL_MAX_GLOBAL_RANGE)); +#else + const Index max_threads = panel_threads * panel_size; +#endif + global_range = roundUp(max_threads, local_range); + loop_range = Index( + std::ceil(double(elements_per_panel * panel_size) / (global_range * ScanParameters::ScanPerThread))); + } + inline ScanParameters get_scan_parameter() { + return ScanParameters(total_size, non_scan_size, scan_size, non_scan_stride, scan_stride, panel_threads, + group_threads, block_threads, elements_per_group, elements_per_block, loop_range); + } + inline cl::sycl::nd_range<1> get_thread_range() { + return cl::sycl::nd_range<1>(cl::sycl::range<1>(global_range), cl::sycl::range<1>(local_range)); + } +}; + +template +struct SYCLAdjustBlockOffset { + EIGEN_STRONG_INLINE static void adjust_scan_block_offset(EvaluatorPointerType in_ptr, EvaluatorPointerType out_ptr, + Reducer &accumulator, const Index total_size, + const Index scan_size, const Index panel_size, + const Index non_scan_size, const Index scan_stride, + const Index non_scan_stride, const Eigen::SyclDevice &dev) { + auto scan_info = + ScanInfo(total_size, scan_size, panel_size, non_scan_size, scan_stride, non_scan_stride, dev); + + typedef ScanAdjustmentKernelFunctor + AdjustFuctor; + dev.template unary_kernel_launcher(in_ptr, out_ptr, scan_info.get_thread_range(), + scan_info.max_elements_per_block, + scan_info.get_scan_parameter(), accumulator); + } +}; + +template +struct ScanLauncher_impl { + template + EIGEN_STRONG_INLINE static void scan_block(Input in_ptr, EvaluatorPointerType out_ptr, Reducer &accumulator, + const Index total_size, const Index scan_size, const Index panel_size, + const Index non_scan_size, const Index scan_stride, + const Index non_scan_stride, const bool inclusive, + const Eigen::SyclDevice &dev) { + auto scan_info = + ScanInfo(total_size, scan_size, panel_size, non_scan_size, scan_stride, non_scan_stride, dev); + const Index temp_pointer_size = scan_info.block_size * non_scan_size * panel_size; + const Index scratch_size = scan_info.max_elements_per_block / (ScanParameters::ScanPerThread / 2); + CoeffReturnType *temp_pointer = + static_cast(dev.allocate_temp(temp_pointer_size * sizeof(CoeffReturnType))); + EvaluatorPointerType tmp_global_accessor = dev.get(temp_pointer); + + typedef ScanKernelFunctor ScanFunctor; + dev.template binary_kernel_launcher( + in_ptr, out_ptr, tmp_global_accessor, scan_info.get_thread_range(), scratch_size, + scan_info.get_scan_parameter(), accumulator, inclusive); + + if (scan_info.block_size > 1) { + ScanLauncher_impl::scan_block( + tmp_global_accessor, tmp_global_accessor, accumulator, temp_pointer_size, scan_info.block_size, panel_size, + non_scan_size, Index(1), scan_info.block_size, false, dev); + + SYCLAdjustBlockOffset::adjust_scan_block_offset( + tmp_global_accessor, out_ptr, accumulator, total_size, scan_size, panel_size, non_scan_size, scan_stride, + non_scan_stride, dev); + } + dev.deallocate_temp(temp_pointer); + } +}; + +} // namespace internal +} // namespace TensorSycl +namespace internal { +template +struct ScanLauncher { + typedef typename Self::Index Index; + typedef typename Self::CoeffReturnType CoeffReturnType; + typedef typename Self::Storage Storage; + typedef typename Self::EvaluatorPointerType EvaluatorPointerType; + void operator()(Self &self, EvaluatorPointerType data) { + const Index total_size = internal::array_prod(self.dimensions()); + const Index scan_size = self.size(); + const Index scan_stride = self.stride(); + // this is the scan op (can be sum or ...) + auto accumulator = self.accumulator(); + auto inclusive = !self.exclusive(); + auto consume_dim = self.consume_dim(); + auto dev = self.device(); + + auto dims = self.inner().dimensions(); + + Index non_scan_size = 1; + Index panel_size = 1; + if (static_cast(Self::Layout) == static_cast(ColMajor)) { + for (int i = 0; i < consume_dim; i++) { + non_scan_size *= dims[i]; + } + for (int i = consume_dim + 1; i < Self::NumDims; i++) { + panel_size *= dims[i]; + } + } else { + for (int i = Self::NumDims - 1; i > consume_dim; i--) { + non_scan_size *= dims[i]; + } + for (int i = consume_dim - 1; i >= 0; i--) { + panel_size *= dims[i]; + } + } + const Index non_scan_stride = (scan_stride > 1) ? 1 : scan_size; + auto eval_impl = self.inner(); + TensorSycl::internal::ScanLauncher_impl::scan_block( + eval_impl, data, accumulator, total_size, scan_size, panel_size, non_scan_size, scan_stride, non_scan_stride, + inclusive, dev); + } +}; +} // namespace internal +} // namespace Eigen + +#endif // UNSUPPORTED_EIGEN_CXX11_SRC_TENSOR_TENSOR_SYCL_SYCL_HPP diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h new file mode 100644 index 0000000..e5e5efd --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h @@ -0,0 +1,471 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H +#define EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H + +namespace Eigen { + +/** \class TensorShuffling + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor shuffling class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorShufflingOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorShufflingOp type; +}; + +} // end namespace internal + + + +template +class TensorShufflingOp : public TensorBase > +{ + public: + typedef TensorBase > Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorShufflingOp(const XprType& expr, const Shuffle& shfl) + : m_xpr(expr), m_shuffle(shfl) {} + + EIGEN_DEVICE_FUNC + const Shuffle& shufflePermutation() const { return m_shuffle; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorShufflingOp) + + + protected: + typename XprType::Nested m_xpr; + const Shuffle m_shuffle; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Self; + typedef TensorShufflingOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = (PacketType::size > 1), + BlockAccess = TensorEvaluator::RawAccess, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + typedef internal::TensorBlockScratchAllocator TensorBlockScratch; + + typedef typename internal::TensorMaterializedBlock + TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_device(device), + m_impl(op.expression(), device) + { + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + const Shuffle& shuffle = op.shufflePermutation(); + m_is_identity = true; + for (int i = 0; i < NumDims; ++i) { + m_shuffle[i] = static_cast(shuffle[i]); + m_dimensions[i] = input_dims[shuffle[i]]; + m_inverseShuffle[shuffle[i]] = i; + if (m_is_identity && shuffle[i] != i) { + m_is_identity = false; + } + } + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_unshuffledInputStrides[0] = 1; + m_outputStrides[0] = 1; + + for (int i = 1; i < NumDims; ++i) { + m_unshuffledInputStrides[i] = + m_unshuffledInputStrides[i - 1] * input_dims[i - 1]; + m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor( + m_outputStrides[i] > 0 ? m_outputStrides[i] : Index(1)); + } + } else { + m_unshuffledInputStrides[NumDims - 1] = 1; + m_outputStrides[NumDims - 1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_unshuffledInputStrides[i] = + m_unshuffledInputStrides[i + 1] * input_dims[i + 1]; + m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1]; + m_fastOutputStrides[i] = internal::TensorIntDivisor( + m_outputStrides[i] > 0 ? m_outputStrides[i] : Index(1)); + } + } + + for (int i = 0; i < NumDims; ++i) { + m_inputStrides[i] = m_unshuffledInputStrides[shuffle[i]]; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + +#ifdef EIGEN_USE_THREADS + template + EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync( + EvaluatorPointerType, EvalSubExprsCallback done) { + m_impl.evalSubExprsIfNeededAsync(nullptr, [done](bool) { done(true); }); + } +#endif // EIGEN_USE_THREADS + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + if (m_is_identity) { + return m_impl.coeff(index); + } else { + return m_impl.coeff(srcCoeff(index)); + } + } + + template + struct PacketLoader { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + static PacketReturnType Run(const Self& self, Index index) { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = self.coeff(index + i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + }; + + template + struct PacketLoader { + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + static PacketReturnType Run(const Self& self, Index index) { + if (self.m_is_identity) { + return self.m_impl.template packet(index); + } else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = self.coeff(index + i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + }; + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index + PacketSize - 1 < dimensions().TotalSize()); + return PacketLoader::PacketAccess>::Run(*this, index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + internal::TensorBlockResourceRequirements getResourceRequirements() const { + static const int inner_dim = + Layout == static_cast(ColMajor) ? 0 : NumDims - 1; + + const size_t target_size = m_device.firstLevelCacheSize(); + const bool inner_dim_shuffled = m_shuffle[inner_dim] != inner_dim; + + // Shuffled inner dimensions leads to a random memory access, which is not + // captured by default cost model bytes loaded/stored. We add this cost + // explicitly. The number of cycles picked based on the benchmarks. + // TODO(ezhulenev): This number was picked based on a very questionable + // benchmarks, add benchmarks that are representative of real workloads. + using BlockRequirements = internal::TensorBlockResourceRequirements; + if (inner_dim_shuffled) { + return BlockRequirements::uniform(target_size) + .addCostPerCoeff({0, 0, NumDims * 28}); + } else { + return BlockRequirements::skewed(target_size); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock + block(TensorBlockDesc& desc, TensorBlockScratch& scratch, + bool root_of_expr_ast = false) const { + assert(m_impl.data() != NULL); + + typedef internal::TensorBlockIO + TensorBlockIO; + typedef typename TensorBlockIO::Dst TensorBlockIODst; + typedef typename TensorBlockIO::Src TensorBlockIOSrc; + + const typename TensorBlock::Storage block_storage = + TensorBlock::prepareStorage( + desc, scratch, /*allow_strided_storage=*/root_of_expr_ast); + + typename TensorBlockIO::Dimensions input_strides(m_unshuffledInputStrides); + TensorBlockIOSrc src(input_strides, m_impl.data(), srcCoeff(desc.offset())); + + TensorBlockIODst dst(block_storage.dimensions(), block_storage.strides(), + block_storage.data()); + + typename TensorBlockIO::DimensionsMap dst_to_src_dim_map(m_shuffle); + TensorBlockIO::Copy(dst, src, dst_to_src_dim_map); + + return block_storage.AsTensorMaterializedBlock(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + const double compute_cost = m_is_identity ? TensorOpCost::AddCost() : + NumDims * (2 * TensorOpCost::AddCost() + + 2 * TensorOpCost::MulCost() + + TensorOpCost::DivCost()); + return m_impl.costPerCoeff(vectorized) + + TensorOpCost(0, 0, compute_cost, m_is_identity /* vectorized */, PacketSize); + } + + EIGEN_DEVICE_FUNC typename Storage::Type data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index GetBlockOutputIndex( + Index input_index, + const DSizes& input_block_strides, + const DSizes& output_block_strides, + const DSizes, NumDims>& fast_input_block_strides) const { + Index output_index = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = input_index / fast_input_block_strides[i]; + output_index += idx * output_block_strides[m_inverseShuffle[i]]; + input_index -= idx * input_block_strides[i]; + } + return output_index + input_index * + output_block_strides[m_inverseShuffle[0]]; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = input_index / fast_input_block_strides[i]; + output_index += idx * output_block_strides[m_inverseShuffle[i]]; + input_index -= idx * input_block_strides[i]; + } + return output_index + input_index * + output_block_strides[m_inverseShuffle[NumDims - 1]]; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const { + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += idx * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + return inputIndex + index * m_inputStrides[0]; + } else { + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_fastOutputStrides[i]; + inputIndex += idx * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + return inputIndex + index * m_inputStrides[NumDims - 1]; + } + } + + Dimensions m_dimensions; + bool m_is_identity; + array m_shuffle; + array m_inverseShuffle; // TODO(ezhulenev): Make it int type. + array m_outputStrides; + array, NumDims> m_fastOutputStrides; + array m_inputStrides; + array m_unshuffledInputStrides; + + const Device EIGEN_DEVICE_REF m_device; + TensorEvaluator m_impl; +}; + + +// Eval as lvalue +template +struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorEvaluator, Device> Base; + + typedef TensorShufflingOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + + enum { + IsAligned = false, + PacketAccess = (PacketType::size > 1), + BlockAccess = TensorEvaluator::RawAccess, + PreferBlockAccess = true, + Layout = TensorEvaluator::Layout, + RawAccess = false + }; + + typedef typename internal::remove_const::type ScalarNoConst; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockDescriptor TensorBlockDesc; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) + { } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index) + { + return this->m_impl.coeffRef(this->srcCoeff(index)); + } + + template EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + internal::pstore(values, x); + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + this->coeffRef(index+i) = values[i]; + } + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock( + const TensorBlockDesc& desc, const TensorBlock& block) { + eigen_assert(this->m_impl.data() != NULL); + + typedef internal::TensorBlockIO + TensorBlockIO; + typedef typename TensorBlockIO::Dst TensorBlockIODst; + typedef typename TensorBlockIO::Src TensorBlockIOSrc; + + const Scalar* block_buffer = block.data(); + + // TODO(ezhulenev): TensorBlockIO should be able to read from any Eigen + // expression with coefficient and packet access as `src`. + void* mem = NULL; + if (block_buffer == NULL) { + mem = this->m_device.allocate(desc.size() * sizeof(Scalar)); + ScalarNoConst* buf = static_cast(mem); + + typedef internal::TensorBlockAssignment< + ScalarNoConst, NumDims, typename TensorBlock::XprType, Index> + TensorBlockAssignment; + + TensorBlockAssignment::Run( + TensorBlockAssignment::target( + desc.dimensions(), internal::strides(desc.dimensions()), + buf), + block.expr()); + + block_buffer = buf; + } + + // Read from block. + TensorBlockIOSrc src(internal::strides(desc.dimensions()), + block_buffer); + + // Write to the output buffer. + typename TensorBlockIO::Dimensions output_strides( + this->m_unshuffledInputStrides); + typename TensorBlockIO::Dimensions output_dimensions; + for (int i = 0; i < NumDims; ++i) { + output_dimensions[this->m_shuffle[i]] = desc.dimension(i); + } + TensorBlockIODst dst(output_dimensions, output_strides, this->m_impl.data(), + this->srcCoeff(desc.offset())); + + // Reorder dimensions according to the shuffle. + typename TensorBlockIO::DimensionsMap dst_to_src_dim_map; + for (int i = 0; i < NumDims; ++i) { + dst_to_src_dim_map[i] = static_cast(this->m_inverseShuffle[i]); + } + TensorBlockIO::Copy(dst, src, dst_to_src_dim_map); + + // Deallocate temporary buffer used for the block materialization. + if (mem != NULL) this->m_device.deallocate(mem); + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_SHUFFLING_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h new file mode 100644 index 0000000..5ff0880 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h @@ -0,0 +1,161 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christian Seiler +// Copyright (C) 2014-2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSORSTORAGE_H +#define EIGEN_CXX11_TENSOR_TENSORSTORAGE_H + +#ifdef EIGEN_TENSOR_STORAGE_CTOR_PLUGIN + #define EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN EIGEN_TENSOR_STORAGE_CTOR_PLUGIN; +#else + #define EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN +#endif + +namespace Eigen { + +/** \internal + * + * \class TensorStorage + * \ingroup CXX11_Tensor_Module + * + * \brief Stores the data of a tensor + * + * This class stores the data of fixed-size, dynamic-size or mixed tensors + * in a way as compact as possible. + * + * \sa Tensor + */ +template class TensorStorage; + + +// Pure fixed-size storage +template +class TensorStorage +{ + private: + static const std::size_t Size = FixedDimensions::total_size; + + // Allocate an array of size at least one to prevent compiler warnings. + static const std::size_t MinSize = max_n_1::size; + EIGEN_ALIGN_MAX T m_data[MinSize]; + + public: + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE TensorStorage() { + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE T *data() { return m_data; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const T *data() const { return m_data; } + + static EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const FixedDimensions& dimensions() + { + static const FixedDimensions* singleton_dimensions = new FixedDimensions(); + return *singleton_dimensions; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE DenseIndex size() const { return Size; } +}; + +// pure dynamic +template +class TensorStorage, Options_> +{ + public: + typedef IndexType Index; + typedef DSizes Dimensions; + typedef TensorStorage, Options_> Self; + + EIGEN_DEVICE_FUNC TensorStorage() : m_data(0), m_dimensions() { + if (NumIndices_ == 0) { + m_data = internal::conditional_aligned_new_auto(1); + } + } + EIGEN_DEVICE_FUNC TensorStorage(internal::constructor_without_unaligned_array_assert) + : m_data(0), m_dimensions(internal::template repeat(0)) {} + EIGEN_DEVICE_FUNC TensorStorage(Index size, const array& dimensions) + : m_data(internal::conditional_aligned_new_auto(size)), m_dimensions(dimensions) + { EIGEN_INTERNAL_TENSOR_STORAGE_CTOR_PLUGIN } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + EIGEN_DEVICE_FUNC TensorStorage(DenseIndex... indices) : m_dimensions(indices...) { + m_data = internal::conditional_aligned_new_auto(internal::array_prod(m_dimensions)); + } +#endif + + EIGEN_DEVICE_FUNC TensorStorage(const Self& other) + : m_data(internal::conditional_aligned_new_auto(internal::array_prod(other.m_dimensions))) + , m_dimensions(other.m_dimensions) + { + internal::smart_copy(other.m_data, other.m_data+internal::array_prod(other.m_dimensions), m_data); + } + EIGEN_DEVICE_FUNC Self& operator=(const Self& other) + { + if (this != &other) { + Self tmp(other); + this->swap(tmp); + } + return *this; + } + +#if EIGEN_HAS_RVALUE_REFERENCES + EIGEN_DEVICE_FUNC TensorStorage(Self&& other) : TensorStorage() + { + *this = std::move(other); + } + + EIGEN_DEVICE_FUNC Self& operator=(Self&& other) + { + numext::swap(m_data, other.m_data); + numext::swap(m_dimensions, other.m_dimensions); + return *this; + } +#endif + + EIGEN_DEVICE_FUNC ~TensorStorage() { internal::conditional_aligned_delete_auto(m_data, internal::array_prod(m_dimensions)); } + EIGEN_DEVICE_FUNC void swap(Self& other) + { numext::swap(m_data,other.m_data); numext::swap(m_dimensions,other.m_dimensions); } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {return m_dimensions;} + + EIGEN_DEVICE_FUNC void resize(Index size, const array& nbDimensions) + { + const Index currentSz = internal::array_prod(m_dimensions); + if(size != currentSz) + { + internal::conditional_aligned_delete_auto(m_data, currentSz); + if (size) + m_data = internal::conditional_aligned_new_auto(size); + else if (NumIndices_ == 0) { + m_data = internal::conditional_aligned_new_auto(1); + } + else + m_data = 0; + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN({}) + } + m_dimensions = nbDimensions; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T *data() { return m_data; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T *data() const { return m_data; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_dimensions.TotalSize(); } + + private: + T *m_data; + Dimensions m_dimensions; +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSORSTORAGE_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h new file mode 100644 index 0000000..2f62a66 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h @@ -0,0 +1,346 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H +#define EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H + +namespace Eigen { + +/** \class TensorStriding + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor striding class. + * + * + */ +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorStridingOpEIGEN_DEVICE_REF type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorStridingOp type; +}; + +} // end namespace internal + + + +template +class TensorStridingOp : public TensorBase > +{ + public: + typedef TensorBase > Base; + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingOp(const XprType& expr, const Strides& dims) + : m_xpr(expr), m_dims(dims) {} + + EIGEN_DEVICE_FUNC + const Strides& strides() const { return m_dims; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(TensorStridingOp) + + protected: + typename XprType::Nested m_xpr; + const Strides m_dims; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorStridingOp XprType; + typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device) + { + m_dimensions = m_impl.dimensions(); + for (int i = 0; i < NumDims; ++i) { + m_dimensions[i] =Eigen::numext::ceil(static_cast(m_dimensions[i]) / op.strides()[i]); + } + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + if (static_cast(Layout) == static_cast(ColMajor)) { + m_outputStrides[0] = 1; + m_inputStrides[0] = 1; + for (int i = 1; i < NumDims; ++i) { + m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; + m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; + m_inputStrides[i-1] *= op.strides()[i-1]; + } + m_inputStrides[NumDims-1] *= op.strides()[NumDims-1]; + } else { // RowMajor + m_outputStrides[NumDims-1] = 1; + m_inputStrides[NumDims-1] = 1; + for (int i = NumDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1]; + m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1]; + m_inputStrides[i+1] *= op.strides()[i+1]; + } + m_inputStrides[0] *= op.strides()[0]; + } + } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType/*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return m_impl.coeff(srcCoeff(index)); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + Index inputIndices[] = {0, 0}; + Index indices[] = {index, index + PacketSize - 1}; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx0 = indices[0] / m_outputStrides[i]; + const Index idx1 = indices[1] / m_outputStrides[i]; + inputIndices[0] += idx0 * m_inputStrides[i]; + inputIndices[1] += idx1 * m_inputStrides[i]; + indices[0] -= idx0 * m_outputStrides[i]; + indices[1] -= idx1 * m_outputStrides[i]; + } + inputIndices[0] += indices[0] * m_inputStrides[0]; + inputIndices[1] += indices[1] * m_inputStrides[0]; + } else { // RowMajor + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx0 = indices[0] / m_outputStrides[i]; + const Index idx1 = indices[1] / m_outputStrides[i]; + inputIndices[0] += idx0 * m_inputStrides[i]; + inputIndices[1] += idx1 * m_inputStrides[i]; + indices[0] -= idx0 * m_outputStrides[i]; + indices[1] -= idx1 * m_outputStrides[i]; + } + inputIndices[0] += indices[0] * m_inputStrides[NumDims-1]; + inputIndices[1] += indices[1] * m_inputStrides[NumDims-1]; + } + if (inputIndices[1] - inputIndices[0] == PacketSize - 1) { + PacketReturnType rslt = m_impl.template packet(inputIndices[0]); + return rslt; + } + else { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + values[0] = m_impl.coeff(inputIndices[0]); + values[PacketSize-1] = m_impl.coeff(inputIndices[1]); + EIGEN_UNROLL_LOOP + for (int i = 1; i < PacketSize-1; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { + double compute_cost = (NumDims - 1) * (TensorOpCost::AddCost() + + TensorOpCost::MulCost() + + TensorOpCost::DivCost()) + + TensorOpCost::MulCost(); + if (vectorized) { + compute_cost *= 2; // packet() computes two indices + } + const int innerDim = (static_cast(Layout) == static_cast(ColMajor)) ? 0 : (NumDims - 1); + return m_impl.costPerCoeff(vectorized && m_inputStrides[innerDim] == 1) + + // Computation is not vectorized per se, but it is done once per packet. + TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC typename Storage::Type data() const { return NULL; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const + { + Index inputIndex = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + inputIndex += idx * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + inputIndex += index * m_inputStrides[0]; + } else { // RowMajor + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx = index / m_outputStrides[i]; + inputIndex += idx * m_inputStrides[i]; + index -= idx * m_outputStrides[i]; + } + inputIndex += index * m_inputStrides[NumDims-1]; + } + return inputIndex; + } + + Dimensions m_dimensions; + array m_outputStrides; + array m_inputStrides; + TensorEvaluator m_impl; +}; + +// Eval as lvalue +template +struct TensorEvaluator, Device> + : public TensorEvaluator, Device> +{ + typedef TensorStridingOp XprType; + typedef TensorEvaluator Base; + // typedef typename XprType::Index Index; + static const int NumDims = internal::array_size::Dimensions>::value; + // typedef DSizes Dimensions; + + enum { + IsAligned = /*TensorEvaluator::IsAligned*/false, + PacketAccess = TensorEvaluator::PacketAccess, + PreferBlockAccess = false, + Layout = TensorEvaluator::Layout, + CoordAccess = false, // to be implemented + RawAccess = false + }; + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : Base(op, device) { } + + typedef typename XprType::Index Index; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) + { + return this->m_impl.coeffRef(this->srcCoeff(index)); + } + + template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void writePacket(Index index, const PacketReturnType& x) + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < this->dimensions().TotalSize()); + + Index inputIndices[] = {0, 0}; + Index indices[] = {index, index + PacketSize - 1}; + if (static_cast(Layout) == static_cast(ColMajor)) { + EIGEN_UNROLL_LOOP + for (int i = NumDims - 1; i > 0; --i) { + const Index idx0 = indices[0] / this->m_outputStrides[i]; + const Index idx1 = indices[1] / this->m_outputStrides[i]; + inputIndices[0] += idx0 * this->m_inputStrides[i]; + inputIndices[1] += idx1 * this->m_inputStrides[i]; + indices[0] -= idx0 * this->m_outputStrides[i]; + indices[1] -= idx1 * this->m_outputStrides[i]; + } + inputIndices[0] += indices[0] * this->m_inputStrides[0]; + inputIndices[1] += indices[1] * this->m_inputStrides[0]; + } else { // RowMajor + EIGEN_UNROLL_LOOP + for (int i = 0; i < NumDims - 1; ++i) { + const Index idx0 = indices[0] / this->m_outputStrides[i]; + const Index idx1 = indices[1] / this->m_outputStrides[i]; + inputIndices[0] += idx0 * this->m_inputStrides[i]; + inputIndices[1] += idx1 * this->m_inputStrides[i]; + indices[0] -= idx0 * this->m_outputStrides[i]; + indices[1] -= idx1 * this->m_outputStrides[i]; + } + inputIndices[0] += indices[0] * this->m_inputStrides[NumDims-1]; + inputIndices[1] += indices[1] * this->m_inputStrides[NumDims-1]; + } + if (inputIndices[1] - inputIndices[0] == PacketSize - 1) { + this->m_impl.template writePacket(inputIndices[0], x); + } + else { + EIGEN_ALIGN_MAX Scalar values[PacketSize]; + internal::pstore(values, x); + this->m_impl.coeffRef(inputIndices[0]) = values[0]; + this->m_impl.coeffRef(inputIndices[1]) = values[PacketSize-1]; + EIGEN_UNROLL_LOOP + for (int i = 1; i < PacketSize-1; ++i) { + this->coeffRef(index+i) = values[i]; + } + } + } +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h new file mode 100644 index 0000000..926ecdd --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h @@ -0,0 +1,303 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2017 Gagan Goel +// Copyright (C) 2017 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_TRACE_H +#define EIGEN_CXX11_TENSOR_TENSOR_TRACE_H + +namespace Eigen { + +/** \class TensorTrace + * \ingroup CXX11_Tensor_Module + * + * \brief Tensor Trace class. + * + * + */ + +namespace internal { +template +struct traits > : public traits +{ + typedef typename XprType::Scalar Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions - array_size::value; + static const int Layout = XprTraits::Layout; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorTraceOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorTraceOp type; +}; + +} // end namespace internal + + +template +class TensorTraceOp : public TensorBase > +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTraceOp(const XprType& expr, const Dims& dims) + : m_xpr(expr), m_dims(dims) { + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const Dims& dims() const { return m_dims; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const typename internal::remove_all::type& expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const Dims m_dims; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorTraceOp XprType; + static const int NumInputDims = internal::array_size::Dimensions>::value; + static const int NumReducedDims = internal::array_size::value; + static const int NumOutputDims = NumInputDims - NumReducedDims; + typedef typename XprType::Index Index; + typedef DSizes Dimensions; + typedef typename XprType::Scalar Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = internal::unpacket_traits::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) + : m_impl(op.expression(), device), m_traceDim(1), m_device(device) + { + + EIGEN_STATIC_ASSERT((NumOutputDims >= 0), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT((NumReducedDims >= 2) || ((NumReducedDims == 0) && (NumInputDims == 0)), YOU_MADE_A_PROGRAMMING_MISTAKE); + + for (int i = 0; i < NumInputDims; ++i) { + m_reduced[i] = false; + } + + const Dims& op_dims = op.dims(); + for (int i = 0; i < NumReducedDims; ++i) { + eigen_assert(op_dims[i] >= 0); + eigen_assert(op_dims[i] < NumInputDims); + m_reduced[op_dims[i]] = true; + } + + // All the dimensions should be distinct to compute the trace + int num_distinct_reduce_dims = 0; + for (int i = 0; i < NumInputDims; ++i) { + if (m_reduced[i]) { + ++num_distinct_reduce_dims; + } + } + + eigen_assert(num_distinct_reduce_dims == NumReducedDims); + + // Compute the dimensions of the result. + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + + int output_index = 0; + int reduced_index = 0; + for (int i = 0; i < NumInputDims; ++i) { + if (m_reduced[i]) { + m_reducedDims[reduced_index] = input_dims[i]; + if (reduced_index > 0) { + // All the trace dimensions must have the same size + eigen_assert(m_reducedDims[0] == m_reducedDims[reduced_index]); + } + ++reduced_index; + } + else { + m_dimensions[output_index] = input_dims[i]; + ++output_index; + } + } + + if (NumReducedDims != 0) { + m_traceDim = m_reducedDims[0]; + } + + // Compute the output strides + if (NumOutputDims > 0) { + if (static_cast(Layout) == static_cast(ColMajor)) { + m_outputStrides[0] = 1; + for (int i = 1; i < NumOutputDims; ++i) { + m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1]; + } + } + else { + m_outputStrides.back() = 1; + for (int i = NumOutputDims - 2; i >= 0; --i) { + m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1]; + } + } + } + + // Compute the input strides + if (NumInputDims > 0) { + array input_strides; + if (static_cast(Layout) == static_cast(ColMajor)) { + input_strides[0] = 1; + for (int i = 1; i < NumInputDims; ++i) { + input_strides[i] = input_strides[i - 1] * input_dims[i - 1]; + } + } + else { + input_strides.back() = 1; + for (int i = NumInputDims - 2; i >= 0; --i) { + input_strides[i] = input_strides[i + 1] * input_dims[i + 1]; + } + } + + output_index = 0; + reduced_index = 0; + for (int i = 0; i < NumInputDims; ++i) { + if(m_reduced[i]) { + m_reducedStrides[reduced_index] = input_strides[i]; + ++reduced_index; + } + else { + m_preservedStrides[output_index] = input_strides[i]; + ++output_index; + } + } + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { + return m_dimensions; + } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + // Initialize the result + CoeffReturnType result = internal::cast(0); + Index index_stride = 0; + for (int i = 0; i < NumReducedDims; ++i) { + index_stride += m_reducedStrides[i]; + } + + // If trace is requested along all dimensions, starting index would be 0 + Index cur_index = 0; + if (NumOutputDims != 0) + cur_index = firstInput(index); + for (Index i = 0; i < m_traceDim; ++i) { + result += m_impl.coeff(cur_index); + cur_index += index_stride; + } + + return result; + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const { + + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE); + eigen_assert(index + PacketSize - 1 < dimensions().TotalSize()); + + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index + i); + } + PacketReturnType result = internal::ploadt(values); + return result; + } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + + protected: + // Given the output index, finds the first index in the input tensor used to compute the trace + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const { + Index startInput = 0; + if (static_cast(Layout) == static_cast(ColMajor)) { + for (int i = NumOutputDims - 1; i > 0; --i) { + const Index idx = index / m_outputStrides[i]; + startInput += idx * m_preservedStrides[i]; + index -= idx * m_outputStrides[i]; + } + startInput += index * m_preservedStrides[0]; + } + else { + for (int i = 0; i < NumOutputDims - 1; ++i) { + const Index idx = index / m_outputStrides[i]; + startInput += idx * m_preservedStrides[i]; + index -= idx * m_outputStrides[i]; + } + startInput += index * m_preservedStrides[NumOutputDims - 1]; + } + return startInput; + } + + Dimensions m_dimensions; + TensorEvaluator m_impl; + // Initialize the size of the trace dimension + Index m_traceDim; + const Device EIGEN_DEVICE_REF m_device; + array m_reduced; + array m_reducedDims; + array m_outputStrides; + array m_reducedStrides; + array m_preservedStrides; +}; + + +} // End namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_TRACE_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h new file mode 100644 index 0000000..4f7fd34 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h @@ -0,0 +1,264 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H +#define EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H + +namespace Eigen { +namespace internal { + + +template +class compute_tensor_flags +{ + enum { + is_dynamic_size_storage = 1, + + is_aligned = + ( + ((Options&DontAlign)==0) && ( +#if EIGEN_MAX_STATIC_ALIGN_BYTES>0 + (!is_dynamic_size_storage) +#else + 0 +#endif + | +#if EIGEN_MAX_ALIGN_BYTES>0 + is_dynamic_size_storage +#else + 0 +#endif + ) + ), + packet_access_bit = packet_traits::Vectorizable && is_aligned ? PacketAccessBit : 0 + }; + + public: + enum { ret = packet_access_bit }; +}; + + +template +struct traits > +{ + typedef Scalar_ Scalar; + typedef Dense StorageKind; + typedef IndexType_ Index; + static const int NumDimensions = NumIndices_; + static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor; + enum { + Options = Options_, + Flags = compute_tensor_flags::ret | (is_const::value ? 0 : LvalueBit) + }; + template struct MakePointer { + typedef T* Type; + }; + typedef typename MakePointer::Type PointerType; +}; + + +template +struct traits > +{ + typedef Scalar_ Scalar; + typedef Dense StorageKind; + typedef IndexType_ Index; + static const int NumDimensions = array_size::value; + static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor; + enum { + Options = Options_, + Flags = compute_tensor_flags::ret | (is_const::value ? 0: LvalueBit) + }; + template struct MakePointer { + typedef T* Type; + }; + typedef typename MakePointer::Type PointerType; +}; + + +template class MakePointer_> +struct traits > + : public traits +{ + typedef traits BaseTraits; + typedef typename BaseTraits::Scalar Scalar; + typedef typename BaseTraits::StorageKind StorageKind; + typedef typename BaseTraits::Index Index; + static const int NumDimensions = BaseTraits::NumDimensions; + static const int Layout = BaseTraits::Layout; + enum { + Options = Options_, + Flags = BaseTraits::Flags + }; + template struct MakePointer { + // Intermediate typedef to workaround MSVC issue. + typedef MakePointer_ MakePointerT; + typedef typename MakePointerT::Type Type; + }; + typedef typename MakePointer::Type PointerType; +}; + +template +struct traits > + : public traits +{ + typedef traits BaseTraits; + typedef typename BaseTraits::Scalar Scalar; + typedef typename BaseTraits::StorageKind StorageKind; + typedef typename BaseTraits::Index Index; + static const int NumDimensions = BaseTraits::NumDimensions; + static const int Layout = BaseTraits::Layout; + enum { + Options = BaseTraits::Options, + Flags = BaseTraits::Flags + }; + typedef typename BaseTraits::PointerType PointerType; +}; + + +template +struct eval, Eigen::Dense> +{ + typedef const Tensor<_Scalar, NumIndices_, Options, IndexType_>EIGEN_DEVICE_REF type; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const Tensor<_Scalar, NumIndices_, Options, IndexType_>EIGEN_DEVICE_REF type; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorFixedSizeEIGEN_DEVICE_REF type; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorFixedSizeEIGEN_DEVICE_REF type; +}; + +template class MakePointer> +struct eval, Eigen::Dense> +{ + typedef const TensorMapEIGEN_DEVICE_REF type; +}; + +template class MakePointer> +struct eval, Eigen::Dense> +{ + typedef const TensorMapEIGEN_DEVICE_REF type; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorRefEIGEN_DEVICE_REF type; +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorRefEIGEN_DEVICE_REF type; +}; + +// TODO nested<> does not exist anymore in Eigen/Core, and it thus has to be removed in favor of ref_selector. +template struct nested +{ + typedef typename ref_selector::type type; +}; + +template +struct nested > +{ + typedef const TensorEIGEN_DEVICE_REF type; +}; + +template +struct nested > +{ + typedef const TensorEIGEN_DEVICE_REF type; +}; + +template +struct nested > +{ + typedef const TensorFixedSizeEIGEN_DEVICE_REF type; +}; + +template +struct nested > +{ + typedef const TensorFixedSizeEIGEN_DEVICE_REF type; +}; + + +template +struct nested > +{ + typedef const TensorRefEIGEN_DEVICE_REF type; +}; + +template +struct nested > +{ + typedef const TensorRefEIGEN_DEVICE_REF type; +}; + +} // end namespace internal + +// Convolutional layers take in an input tensor of shape (D, R, C, B), or (D, C, +// R, B), and convolve it with a set of filters, which can also be presented as +// a tensor (D, K, K, M), where M is the number of filters, K is the filter +// size, and each 3-dimensional tensor of size (D, K, K) is a filter. For +// simplicity we assume that we always use square filters (which is usually the +// case in images), hence the two Ks in the tensor dimension. It also takes in +// a few additional parameters: +// Stride (S): The convolution stride is the offset between locations where we +// apply the filters. A larger stride means that the output will be +// spatially smaller. +// Padding (P): The padding we apply to the input tensor along the R and C +// dimensions. This is usually used to make sure that the spatial +// dimensions of the output matches our intention. +// +// Two types of padding are often used: +// SAME: The pad value is computed so that the output will have size +// R/S and C/S. +// VALID: no padding is carried out. +// When we do padding, the padded values at the padded locations are usually +// zero. +// +// The output dimensions for convolution, when given all the parameters above, +// are as follows: +// When Padding = SAME: the output size is (B, R', C', M), where +// R' = ceil(float(R) / float(S)) +// C' = ceil(float(C) / float(S)) +// where ceil is the ceiling function. The input tensor is padded with 0 as +// needed. The number of padded rows and columns are computed as: +// Pr = ((R' - 1) * S + K - R) / 2 +// Pc = ((C' - 1) * S + K - C) / 2 +// when the stride is 1, we have the simplified case R'=R, C'=C, Pr=Pc=(K-1)/2. +// This is where SAME comes from - the output has the same size as the input has. +// When Padding = VALID: the output size is computed as +// R' = ceil(float(R - K + 1) / float(S)) +// C' = ceil(float(C - K + 1) / float(S)) +// and the number of padded rows and columns are computed in the same way as in +// the SAME case. +// When the stride is 1, we have the simplified case R'=R-K+1, C'=C-K+1, Pr=0, +// Pc=0. +typedef enum { + PADDING_VALID = 1, + PADDING_SAME = 2 +} PaddingType; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h new file mode 100644 index 0000000..d23f2e4 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h @@ -0,0 +1,249 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_UINT128_H +#define EIGEN_CXX11_TENSOR_TENSOR_UINT128_H + +namespace Eigen { +namespace internal { + + +template +struct static_val { + static const uint64_t value = n; + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE operator uint64_t() const { return n; } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static_val() { } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static_val(const T& v) { + EIGEN_UNUSED_VARIABLE(v); + eigen_assert(v == n); + } +}; + + +template +struct TensorUInt128 +{ + HIGH high; + LOW low; + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + TensorUInt128(const TensorUInt128& other) : high(other.high), low(other.low) { + EIGEN_STATIC_ASSERT(sizeof(OTHER_HIGH) <= sizeof(HIGH), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(sizeof(OTHER_LOW) <= sizeof(LOW), YOU_MADE_A_PROGRAMMING_MISTAKE); + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + TensorUInt128& operator = (const TensorUInt128& other) { + EIGEN_STATIC_ASSERT(sizeof(OTHER_HIGH) <= sizeof(HIGH), YOU_MADE_A_PROGRAMMING_MISTAKE); + EIGEN_STATIC_ASSERT(sizeof(OTHER_LOW) <= sizeof(LOW), YOU_MADE_A_PROGRAMMING_MISTAKE); + high = other.high; + low = other.low; + return *this; + } + + template + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + explicit TensorUInt128(const T& x) : high(0), low(x) { + eigen_assert((static_cast::type>(x) <= NumTraits::highest())); + eigen_assert(x >= 0); + } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + TensorUInt128(HIGH y, LOW x) : high(y), low(x) { } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE operator LOW() const { + return low; + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LOW lower() const { + return low; + } + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HIGH upper() const { + return high; + } +}; + + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool operator == (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + return (lhs.high == rhs.high) & (lhs.low == rhs.low); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool operator != (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + return (lhs.high != rhs.high) | (lhs.low != rhs.low); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool operator >= (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + if (lhs.high != rhs.high) { + return lhs.high > rhs.high; + } + return lhs.low >= rhs.low; +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +bool operator < (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + if (lhs.high != rhs.high) { + return lhs.high < rhs.high; + } + return lhs.low < rhs.low; +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +TensorUInt128 operator + (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + TensorUInt128 result(lhs.high + rhs.high, lhs.low + rhs.low); + if (result.low < rhs.low) { + result.high += 1; + } + return result; +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +TensorUInt128 operator - (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + TensorUInt128 result(lhs.high - rhs.high, lhs.low - rhs.low); + if (result.low > lhs.low) { + result.high -= 1; + } + return result; +} + + +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +TensorUInt128 operator * (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + // Split each 128-bit integer into 4 32-bit integers, and then do the + // multiplications by hand as follow: + // lhs a b c d + // rhs e f g h + // ----------- + // ah bh ch dh + // bg cg dg + // cf df + // de + // The result is stored in 2 64bit integers, high and low. + + const uint64_t LOW = 0x00000000FFFFFFFFLL; + const uint64_t HIGH = 0xFFFFFFFF00000000LL; + + uint64_t d = lhs.low & LOW; + uint64_t c = (lhs.low & HIGH) >> 32LL; + uint64_t b = lhs.high & LOW; + uint64_t a = (lhs.high & HIGH) >> 32LL; + + uint64_t h = rhs.low & LOW; + uint64_t g = (rhs.low & HIGH) >> 32LL; + uint64_t f = rhs.high & LOW; + uint64_t e = (rhs.high & HIGH) >> 32LL; + + // Compute the low 32 bits of low + uint64_t acc = d * h; + uint64_t low = acc & LOW; + // Compute the high 32 bits of low. Add a carry every time we wrap around + acc >>= 32LL; + uint64_t carry = 0; + uint64_t acc2 = acc + c * h; + if (acc2 < acc) { + carry++; + } + acc = acc2 + d * g; + if (acc < acc2) { + carry++; + } + low |= (acc << 32LL); + + // Carry forward the high bits of acc to initiate the computation of the + // low 32 bits of high + acc2 = (acc >> 32LL) | (carry << 32LL); + carry = 0; + + acc = acc2 + b * h; + if (acc < acc2) { + carry++; + } + acc2 = acc + c * g; + if (acc2 < acc) { + carry++; + } + acc = acc2 + d * f; + if (acc < acc2) { + carry++; + } + uint64_t high = acc & LOW; + + // Start to compute the high 32 bits of high. + acc2 = (acc >> 32LL) | (carry << 32LL); + + acc = acc2 + a * h; + acc2 = acc + b * g; + acc = acc2 + c * f; + acc2 = acc + d * e; + high |= (acc2 << 32LL); + + return TensorUInt128(high, low); +} + +template +static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +TensorUInt128 operator / (const TensorUInt128& lhs, const TensorUInt128& rhs) +{ + if (rhs == TensorUInt128, static_val<1> >(1)) { + return TensorUInt128(lhs.high, lhs.low); + } else if (lhs < rhs) { + return TensorUInt128(0); + } else { + // calculate the biggest power of 2 times rhs that's less than or equal to lhs + TensorUInt128 power2(1); + TensorUInt128 d(rhs); + TensorUInt128 tmp(lhs - d); + while (lhs >= d) { + tmp = tmp - d; + d = d + d; + power2 = power2 + power2; + } + + tmp = TensorUInt128(lhs.high, lhs.low); + TensorUInt128 result(0); + while (power2 != TensorUInt128, static_val<0> >(0)) { + if (tmp >= d) { + tmp = tmp - d; + result = result + power2; + } + // Shift right + power2 = TensorUInt128(power2.high >> 1, (power2.low >> 1) | (power2.high << 63)); + d = TensorUInt128(d.high >> 1, (d.low >> 1) | (d.high << 63)); + } + + return result; + } +} + + +} // namespace internal +} // namespace Eigen + + +#endif // EIGEN_CXX11_TENSOR_TENSOR_UINT128_H diff --git a/external/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h b/external/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h new file mode 100644 index 0000000..0beb9ff --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h @@ -0,0 +1,629 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. + +#ifndef EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H +#define EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H + +namespace Eigen { + +/** \class TensorVolumePatch + * \ingroup CXX11_Tensor_Module + * + * \brief Patch extraction specialized for processing of volumetric data. + * This assumes that the input has a least 4 dimensions ordered as follows: + * - channels + * - planes + * - rows + * - columns + * - (optional) additional dimensions such as time or batch size. + * Calling the volume patch code with patch_planes, patch_rows, and patch_cols + * is equivalent to calling the regular patch extraction code with parameters + * d, patch_planes, patch_rows, patch_cols, and 1 for all the additional + * dimensions. + */ +namespace internal { + +template +struct traits > : public traits +{ + typedef typename internal::remove_const::type Scalar; + typedef traits XprTraits; + typedef typename XprTraits::StorageKind StorageKind; + typedef typename XprTraits::Index Index; + typedef typename XprType::Nested Nested; + typedef typename remove_reference::type _Nested; + static const int NumDimensions = XprTraits::NumDimensions + 1; + static const int Layout = XprTraits::Layout; + typedef typename XprTraits::PointerType PointerType; + +}; + +template +struct eval, Eigen::Dense> +{ + typedef const TensorVolumePatchOp& type; +}; + +template +struct nested, 1, typename eval >::type> +{ + typedef TensorVolumePatchOp type; +}; + +} // end namespace internal + +template +class TensorVolumePatchOp : public TensorBase, ReadOnlyAccessors> +{ + public: + typedef typename Eigen::internal::traits::Scalar Scalar; + typedef typename Eigen::NumTraits::Real RealScalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename Eigen::internal::nested::type Nested; + typedef typename Eigen::internal::traits::StorageKind StorageKind; + typedef typename Eigen::internal::traits::Index Index; + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorVolumePatchOp(const XprType& expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols, + DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides, + DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides, + DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, + PaddingType padding_type, Scalar padding_value) + : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols), + m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides), + m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides), + m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides), + m_padding_explicit(false), m_padding_top_z(0), m_padding_bottom_z(0), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0), + m_padding_type(padding_type), m_padding_value(padding_value) {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorVolumePatchOp(const XprType& expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols, + DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides, + DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides, + DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, + DenseIndex padding_top_z, DenseIndex padding_bottom_z, + DenseIndex padding_top, DenseIndex padding_bottom, + DenseIndex padding_left, DenseIndex padding_right, + Scalar padding_value) + : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols), + m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides), + m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides), + m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides), + m_padding_explicit(true), m_padding_top_z(padding_top_z), m_padding_bottom_z(padding_bottom_z), m_padding_top(padding_top), m_padding_bottom(padding_bottom), + m_padding_left(padding_left), m_padding_right(padding_right), + m_padding_type(PADDING_VALID), m_padding_value(padding_value) {} + + EIGEN_DEVICE_FUNC + DenseIndex patch_planes() const { return m_patch_planes; } + EIGEN_DEVICE_FUNC + DenseIndex patch_rows() const { return m_patch_rows; } + EIGEN_DEVICE_FUNC + DenseIndex patch_cols() const { return m_patch_cols; } + EIGEN_DEVICE_FUNC + DenseIndex plane_strides() const { return m_plane_strides; } + EIGEN_DEVICE_FUNC + DenseIndex row_strides() const { return m_row_strides; } + EIGEN_DEVICE_FUNC + DenseIndex col_strides() const { return m_col_strides; } + EIGEN_DEVICE_FUNC + DenseIndex in_plane_strides() const { return m_in_plane_strides; } + EIGEN_DEVICE_FUNC + DenseIndex in_row_strides() const { return m_in_row_strides; } + EIGEN_DEVICE_FUNC + DenseIndex in_col_strides() const { return m_in_col_strides; } + EIGEN_DEVICE_FUNC + DenseIndex plane_inflate_strides() const { return m_plane_inflate_strides; } + EIGEN_DEVICE_FUNC + DenseIndex row_inflate_strides() const { return m_row_inflate_strides; } + EIGEN_DEVICE_FUNC + DenseIndex col_inflate_strides() const { return m_col_inflate_strides; } + EIGEN_DEVICE_FUNC + bool padding_explicit() const { return m_padding_explicit; } + EIGEN_DEVICE_FUNC + DenseIndex padding_top_z() const { return m_padding_top_z; } + EIGEN_DEVICE_FUNC + DenseIndex padding_bottom_z() const { return m_padding_bottom_z; } + EIGEN_DEVICE_FUNC + DenseIndex padding_top() const { return m_padding_top; } + EIGEN_DEVICE_FUNC + DenseIndex padding_bottom() const { return m_padding_bottom; } + EIGEN_DEVICE_FUNC + DenseIndex padding_left() const { return m_padding_left; } + EIGEN_DEVICE_FUNC + DenseIndex padding_right() const { return m_padding_right; } + EIGEN_DEVICE_FUNC + PaddingType padding_type() const { return m_padding_type; } + EIGEN_DEVICE_FUNC + Scalar padding_value() const { return m_padding_value; } + + EIGEN_DEVICE_FUNC + const typename internal::remove_all::type& + expression() const { return m_xpr; } + + protected: + typename XprType::Nested m_xpr; + const DenseIndex m_patch_planes; + const DenseIndex m_patch_rows; + const DenseIndex m_patch_cols; + const DenseIndex m_plane_strides; + const DenseIndex m_row_strides; + const DenseIndex m_col_strides; + const DenseIndex m_in_plane_strides; + const DenseIndex m_in_row_strides; + const DenseIndex m_in_col_strides; + const DenseIndex m_plane_inflate_strides; + const DenseIndex m_row_inflate_strides; + const DenseIndex m_col_inflate_strides; + const bool m_padding_explicit; + const DenseIndex m_padding_top_z; + const DenseIndex m_padding_bottom_z; + const DenseIndex m_padding_top; + const DenseIndex m_padding_bottom; + const DenseIndex m_padding_left; + const DenseIndex m_padding_right; + const PaddingType m_padding_type; + const Scalar m_padding_value; +}; + + +// Eval as rvalue +template +struct TensorEvaluator, Device> +{ + typedef TensorVolumePatchOp XprType; + typedef typename XprType::Index Index; + static const int NumInputDims = internal::array_size::Dimensions>::value; + static const int NumDims = NumInputDims + 1; + typedef DSizes Dimensions; + typedef typename internal::remove_const::type Scalar; + typedef typename XprType::CoeffReturnType CoeffReturnType; + typedef typename PacketType::type PacketReturnType; + static const int PacketSize = PacketType::size; + typedef StorageMemory Storage; + typedef typename Storage::Type EvaluatorPointerType; + + enum { + IsAligned = false, + PacketAccess = TensorEvaluator::PacketAccess, + BlockAccess = false, + PreferBlockAccess = TensorEvaluator::PreferBlockAccess, + Layout = TensorEvaluator::Layout, + CoordAccess = false, + RawAccess = false + }; + + //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===// + typedef internal::TensorBlockNotImplemented TensorBlock; + //===--------------------------------------------------------------------===// + + EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) : + m_impl(op.expression(), device) + { + EIGEN_STATIC_ASSERT((NumDims >= 5), YOU_MADE_A_PROGRAMMING_MISTAKE); + + m_paddingValue = op.padding_value(); + + const typename TensorEvaluator::Dimensions& input_dims = m_impl.dimensions(); + + // Cache a few variables. + if (static_cast(Layout) == static_cast(ColMajor)) { + m_inputDepth = input_dims[0]; + m_inputPlanes = input_dims[1]; + m_inputRows = input_dims[2]; + m_inputCols = input_dims[3]; + } else { + m_inputDepth = input_dims[NumInputDims-1]; + m_inputPlanes = input_dims[NumInputDims-2]; + m_inputRows = input_dims[NumInputDims-3]; + m_inputCols = input_dims[NumInputDims-4]; + } + + m_plane_strides = op.plane_strides(); + m_row_strides = op.row_strides(); + m_col_strides = op.col_strides(); + + // Input strides and effective input/patch size + m_in_plane_strides = op.in_plane_strides(); + m_in_row_strides = op.in_row_strides(); + m_in_col_strides = op.in_col_strides(); + m_plane_inflate_strides = op.plane_inflate_strides(); + m_row_inflate_strides = op.row_inflate_strides(); + m_col_inflate_strides = op.col_inflate_strides(); + + // The "effective" spatial size after inflating data with zeros. + m_input_planes_eff = (m_inputPlanes - 1) * m_plane_inflate_strides + 1; + m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1; + m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1; + m_patch_planes_eff = op.patch_planes() + (op.patch_planes() - 1) * (m_in_plane_strides - 1); + m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1); + m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1); + + if (op.padding_explicit()) { + m_outputPlanes = numext::ceil((m_input_planes_eff + op.padding_top_z() + op.padding_bottom_z() - m_patch_planes_eff + 1.f) / static_cast(m_plane_strides)); + m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) / static_cast(m_row_strides)); + m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) / static_cast(m_col_strides)); + m_planePaddingTop = op.padding_top_z(); + m_rowPaddingTop = op.padding_top(); + m_colPaddingLeft = op.padding_left(); + } else { + // Computing padding from the type + switch (op.padding_type()) { + case PADDING_VALID: + m_outputPlanes = numext::ceil((m_input_planes_eff - m_patch_planes_eff + 1.f) / static_cast(m_plane_strides)); + m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) / static_cast(m_row_strides)); + m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) / static_cast(m_col_strides)); + m_planePaddingTop = 0; + m_rowPaddingTop = 0; + m_colPaddingLeft = 0; + break; + case PADDING_SAME: { + m_outputPlanes = numext::ceil(m_input_planes_eff / static_cast(m_plane_strides)); + m_outputRows = numext::ceil(m_input_rows_eff / static_cast(m_row_strides)); + m_outputCols = numext::ceil(m_input_cols_eff / static_cast(m_col_strides)); + const Index dz = (m_outputPlanes - 1) * m_plane_strides + m_patch_planes_eff - m_input_planes_eff; + const Index dy = (m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff; + const Index dx = (m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff; + m_planePaddingTop = dz / 2; + m_rowPaddingTop = dy / 2; + m_colPaddingLeft = dx / 2; + break; + } + default: + eigen_assert(false && "unexpected padding"); + } + } + eigen_assert(m_outputRows > 0); + eigen_assert(m_outputCols > 0); + eigen_assert(m_outputPlanes > 0); + + // Dimensions for result of extraction. + if (static_cast(Layout) == static_cast(ColMajor)) { + // ColMajor + // 0: depth + // 1: patch_planes + // 2: patch_rows + // 3: patch_cols + // 4: number of patches + // 5 and beyond: anything else (such as batch). + m_dimensions[0] = input_dims[0]; + m_dimensions[1] = op.patch_planes(); + m_dimensions[2] = op.patch_rows(); + m_dimensions[3] = op.patch_cols(); + m_dimensions[4] = m_outputPlanes * m_outputRows * m_outputCols; + for (int i = 5; i < NumDims; ++i) { + m_dimensions[i] = input_dims[i-1]; + } + } else { + // RowMajor + // NumDims-1: depth + // NumDims-2: patch_planes + // NumDims-3: patch_rows + // NumDims-4: patch_cols + // NumDims-5: number of patches + // NumDims-6 and beyond: anything else (such as batch). + m_dimensions[NumDims-1] = input_dims[NumInputDims-1]; + m_dimensions[NumDims-2] = op.patch_planes(); + m_dimensions[NumDims-3] = op.patch_rows(); + m_dimensions[NumDims-4] = op.patch_cols(); + m_dimensions[NumDims-5] = m_outputPlanes * m_outputRows * m_outputCols; + for (int i = NumDims-6; i >= 0; --i) { + m_dimensions[i] = input_dims[i]; + } + } + + // Strides for the output tensor. + if (static_cast(Layout) == static_cast(ColMajor)) { + m_rowStride = m_dimensions[1]; + m_colStride = m_dimensions[2] * m_rowStride; + m_patchStride = m_colStride * m_dimensions[3] * m_dimensions[0]; + m_otherStride = m_patchStride * m_dimensions[4]; + } else { + m_rowStride = m_dimensions[NumDims-2]; + m_colStride = m_dimensions[NumDims-3] * m_rowStride; + m_patchStride = m_colStride * m_dimensions[NumDims-4] * m_dimensions[NumDims-1]; + m_otherStride = m_patchStride * m_dimensions[NumDims-5]; + } + + // Strides for navigating through the input tensor. + m_planeInputStride = m_inputDepth; + m_rowInputStride = m_inputDepth * m_inputPlanes; + m_colInputStride = m_inputDepth * m_inputRows * m_inputPlanes; + m_otherInputStride = m_inputDepth * m_inputRows * m_inputCols * m_inputPlanes; + + m_outputPlanesRows = m_outputPlanes * m_outputRows; + + // Fast representations of different variables. + m_fastOtherStride = internal::TensorIntDivisor(m_otherStride); + + m_fastPatchStride = internal::TensorIntDivisor(m_patchStride); + m_fastColStride = internal::TensorIntDivisor(m_colStride); + m_fastRowStride = internal::TensorIntDivisor(m_rowStride); + m_fastInputRowStride = internal::TensorIntDivisor(m_row_inflate_strides); + m_fastInputColStride = internal::TensorIntDivisor(m_col_inflate_strides); + m_fastInputPlaneStride = internal::TensorIntDivisor(m_plane_inflate_strides); + m_fastInputColsEff = internal::TensorIntDivisor(m_input_cols_eff); + m_fastOutputPlanes = internal::TensorIntDivisor(m_outputPlanes); + m_fastOutputPlanesRows = internal::TensorIntDivisor(m_outputPlanesRows); + + if (static_cast(Layout) == static_cast(ColMajor)) { + m_fastOutputDepth = internal::TensorIntDivisor(m_dimensions[0]); + } else { + m_fastOutputDepth = internal::TensorIntDivisor(m_dimensions[NumDims-1]); + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } + + EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) { + m_impl.evalSubExprsIfNeeded(NULL); + return true; + } + + EIGEN_STRONG_INLINE void cleanup() { + m_impl.cleanup(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + // Patch index corresponding to the passed in index. + const Index patchIndex = index / m_fastPatchStride; + + // Spatial offset within the patch. This has to be translated into 3D + // coordinates within the patch. + const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth; + + // Batch, etc. + const Index otherIndex = (NumDims == 5) ? 0 : index / m_fastOtherStride; + const Index patch3DIndex = (NumDims == 5) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride; + + // Calculate column index in the input original tensor. + const Index colIndex = patch3DIndex / m_fastOutputPlanesRows; + const Index colOffset = patchOffset / m_fastColStride; + const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft; + const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInputColStride) : 0); + if (inputCol < 0 || inputCol >= m_input_cols_eff || + ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) { + return Scalar(m_paddingValue); + } + + // Calculate row index in the original input tensor. + const Index rowIndex = (patch3DIndex - colIndex * m_outputPlanesRows) / m_fastOutputPlanes; + const Index rowOffset = (patchOffset - colOffset * m_colStride) / m_fastRowStride; + const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop; + const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInputRowStride) : 0); + if (inputRow < 0 || inputRow >= m_input_rows_eff || + ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) { + return Scalar(m_paddingValue); + } + + // Calculate plane index in the original input tensor. + const Index planeIndex = (patch3DIndex - m_outputPlanes * (colIndex * m_outputRows + rowIndex)); + const Index planeOffset = patchOffset - colOffset * m_colStride - rowOffset * m_rowStride; + const Index inputPlane = planeIndex * m_plane_strides + planeOffset * m_in_plane_strides - m_planePaddingTop; + const Index origInputPlane = (m_plane_inflate_strides == 1) ? inputPlane : ((inputPlane >= 0) ? (inputPlane / m_fastInputPlaneStride) : 0); + if (inputPlane < 0 || inputPlane >= m_input_planes_eff || + ((m_plane_inflate_strides != 1) && (inputPlane != origInputPlane * m_plane_inflate_strides))) { + return Scalar(m_paddingValue); + } + + const int depth_index = static_cast(Layout) == static_cast(ColMajor) ? 0 : NumDims - 1; + const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index]; + + const Index inputIndex = depth + + origInputRow * m_rowInputStride + + origInputCol * m_colInputStride + + origInputPlane * m_planeInputStride + + otherIndex * m_otherInputStride; + + return m_impl.coeff(inputIndex); + } + + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE) + eigen_assert(index+PacketSize-1 < dimensions().TotalSize()); + + if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1 || + m_in_plane_strides != 1 || m_plane_inflate_strides != 1) { + return packetWithPossibleZero(index); + } + + const Index indices[2] = {index, index + PacketSize - 1}; + const Index patchIndex = indices[0] / m_fastPatchStride; + if (patchIndex != indices[1] / m_fastPatchStride) { + return packetWithPossibleZero(index); + } + const Index otherIndex = (NumDims == 5) ? 0 : indices[0] / m_fastOtherStride; + eigen_assert(otherIndex == indices[1] / m_fastOtherStride); + + // Find the offset of the element wrt the location of the first element. + const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth, + (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth}; + + const Index patch3DIndex = (NumDims == 5) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride; + eigen_assert(patch3DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride); + + const Index colIndex = patch3DIndex / m_fastOutputPlanesRows; + const Index colOffsets[2] = { + patchOffsets[0] / m_fastColStride, + patchOffsets[1] / m_fastColStride}; + + // Calculate col indices in the original input tensor. + const Index inputCols[2] = { + colIndex * m_col_strides + colOffsets[0] - m_colPaddingLeft, + colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft}; + if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) { + return internal::pset1(Scalar(m_paddingValue)); + } + + if (inputCols[0] != inputCols[1]) { + return packetWithPossibleZero(index); + } + + const Index rowIndex = (patch3DIndex - colIndex * m_outputPlanesRows) / m_fastOutputPlanes; + const Index rowOffsets[2] = { + (patchOffsets[0] - colOffsets[0] * m_colStride) / m_fastRowStride, + (patchOffsets[1] - colOffsets[1] * m_colStride) / m_fastRowStride}; + eigen_assert(rowOffsets[0] <= rowOffsets[1]); + // Calculate col indices in the original input tensor. + const Index inputRows[2] = { + rowIndex * m_row_strides + rowOffsets[0] - m_rowPaddingTop, + rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop}; + + if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) { + return internal::pset1(Scalar(m_paddingValue)); + } + + if (inputRows[0] != inputRows[1]) { + return packetWithPossibleZero(index); + } + + const Index planeIndex = (patch3DIndex - m_outputPlanes * (colIndex * m_outputRows + rowIndex)); + const Index planeOffsets[2] = { + patchOffsets[0] - colOffsets[0] * m_colStride - rowOffsets[0] * m_rowStride, + patchOffsets[1] - colOffsets[1] * m_colStride - rowOffsets[1] * m_rowStride}; + eigen_assert(planeOffsets[0] <= planeOffsets[1]); + const Index inputPlanes[2] = { + planeIndex * m_plane_strides + planeOffsets[0] - m_planePaddingTop, + planeIndex * m_plane_strides + planeOffsets[1] - m_planePaddingTop}; + + if (inputPlanes[1] < 0 || inputPlanes[0] >= m_inputPlanes) { + return internal::pset1(Scalar(m_paddingValue)); + } + + if (inputPlanes[0] >= 0 && inputPlanes[1] < m_inputPlanes) { + // no padding + const int depth_index = static_cast(Layout) == static_cast(ColMajor) ? 0 : NumDims - 1; + const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index]; + const Index inputIndex = depth + + inputRows[0] * m_rowInputStride + + inputCols[0] * m_colInputStride + + m_planeInputStride * inputPlanes[0] + + otherIndex * m_otherInputStride; + return m_impl.template packet(inputIndex); + } + + return packetWithPossibleZero(index); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost + costPerCoeff(bool vectorized) const { + const double compute_cost = + 10 * TensorOpCost::DivCost() + 21 * TensorOpCost::MulCost() + + 8 * TensorOpCost::AddCost(); + return TensorOpCost(0, 0, compute_cost, vectorized, PacketSize); + } + + EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; } + + const TensorEvaluator& impl() const { return m_impl; } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index planePaddingTop() const { return m_planePaddingTop; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowPaddingTop() const { return m_rowPaddingTop; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colPaddingLeft() const { return m_colPaddingLeft; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputPlanes() const { return m_outputPlanes; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputRows() const { return m_outputRows; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputCols() const { return m_outputCols; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userPlaneStride() const { return m_plane_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userRowStride() const { return m_row_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userColStride() const { return m_col_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInPlaneStride() const { return m_in_plane_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInRowStride() const { return m_in_row_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInColStride() const { return m_in_col_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index planeInflateStride() const { return m_plane_inflate_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowInflateStride() const { return m_row_inflate_strides; } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colInflateStride() const { return m_col_inflate_strides; } + +#ifdef EIGEN_USE_SYCL + // binding placeholder accessors to a command group handler for SYCL + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const { + m_impl.bind(cgh); + } +#endif + protected: + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const + { + EIGEN_ALIGN_MAX typename internal::remove_const::type values[PacketSize]; + EIGEN_UNROLL_LOOP + for (int i = 0; i < PacketSize; ++i) { + values[i] = coeff(index+i); + } + PacketReturnType rslt = internal::pload(values); + return rslt; + } + + Dimensions m_dimensions; + + // Parameters passed to the constructor. + Index m_plane_strides; + Index m_row_strides; + Index m_col_strides; + + Index m_outputPlanes; + Index m_outputRows; + Index m_outputCols; + + Index m_planePaddingTop; + Index m_rowPaddingTop; + Index m_colPaddingLeft; + + Index m_in_plane_strides; + Index m_in_row_strides; + Index m_in_col_strides; + + Index m_plane_inflate_strides; + Index m_row_inflate_strides; + Index m_col_inflate_strides; + + // Cached input size. + Index m_inputDepth; + Index m_inputPlanes; + Index m_inputRows; + Index m_inputCols; + + // Other cached variables. + Index m_outputPlanesRows; + + // Effective input/patch post-inflation size. + Index m_input_planes_eff; + Index m_input_rows_eff; + Index m_input_cols_eff; + Index m_patch_planes_eff; + Index m_patch_rows_eff; + Index m_patch_cols_eff; + + // Strides for the output tensor. + Index m_otherStride; + Index m_patchStride; + Index m_rowStride; + Index m_colStride; + + // Strides for the input tensor. + Index m_planeInputStride; + Index m_rowInputStride; + Index m_colInputStride; + Index m_otherInputStride; + + internal::TensorIntDivisor m_fastOtherStride; + internal::TensorIntDivisor m_fastPatchStride; + internal::TensorIntDivisor m_fastColStride; + internal::TensorIntDivisor m_fastRowStride; + internal::TensorIntDivisor m_fastInputPlaneStride; + internal::TensorIntDivisor m_fastInputRowStride; + internal::TensorIntDivisor m_fastInputColStride; + internal::TensorIntDivisor m_fastInputColsEff; + internal::TensorIntDivisor m_fastOutputPlanesRows; + internal::TensorIntDivisor m_fastOutputPlanes; + internal::TensorIntDivisor m_fastOutputDepth; + + Scalar m_paddingValue; + + TensorEvaluator m_impl; + + +}; + + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H diff --git a/external/unsupported/Eigen/CXX11/src/TensorSymmetry/DynamicSymmetry.h b/external/unsupported/Eigen/CXX11/src/TensorSymmetry/DynamicSymmetry.h new file mode 100644 index 0000000..bc4f202 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/TensorSymmetry/DynamicSymmetry.h @@ -0,0 +1,293 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSORSYMMETRY_DYNAMICSYMMETRY_H +#define EIGEN_CXX11_TENSORSYMMETRY_DYNAMICSYMMETRY_H + +namespace Eigen { + +class DynamicSGroup +{ + public: + inline explicit DynamicSGroup() : m_numIndices(1), m_elements(), m_generators(), m_globalFlags(0) { m_elements.push_back(ge(Generator(0, 0, 0))); } + inline DynamicSGroup(const DynamicSGroup& o) : m_numIndices(o.m_numIndices), m_elements(o.m_elements), m_generators(o.m_generators), m_globalFlags(o.m_globalFlags) { } + inline DynamicSGroup(DynamicSGroup&& o) : m_numIndices(o.m_numIndices), m_elements(), m_generators(o.m_generators), m_globalFlags(o.m_globalFlags) { std::swap(m_elements, o.m_elements); } + inline DynamicSGroup& operator=(const DynamicSGroup& o) { m_numIndices = o.m_numIndices; m_elements = o.m_elements; m_generators = o.m_generators; m_globalFlags = o.m_globalFlags; return *this; } + inline DynamicSGroup& operator=(DynamicSGroup&& o) { m_numIndices = o.m_numIndices; std::swap(m_elements, o.m_elements); m_generators = o.m_generators; m_globalFlags = o.m_globalFlags; return *this; } + + void add(int one, int two, int flags = 0); + + template + inline void add(Gen_) { add(Gen_::One, Gen_::Two, Gen_::Flags); } + inline void addSymmetry(int one, int two) { add(one, two, 0); } + inline void addAntiSymmetry(int one, int two) { add(one, two, NegationFlag); } + inline void addHermiticity(int one, int two) { add(one, two, ConjugationFlag); } + inline void addAntiHermiticity(int one, int two) { add(one, two, NegationFlag | ConjugationFlag); } + + template + inline RV apply(const std::array& idx, RV initial, Args&&... args) const + { + eigen_assert(N >= m_numIndices && "Can only apply symmetry group to objects that have at least the required amount of indices."); + for (std::size_t i = 0; i < size(); i++) + initial = Op::run(h_permute(i, idx, typename internal::gen_numeric_list::type()), m_elements[i].flags, initial, std::forward(args)...); + return initial; + } + + template + inline RV apply(const std::vector& idx, RV initial, Args&&... args) const + { + eigen_assert(idx.size() >= m_numIndices && "Can only apply symmetry group to objects that have at least the required amount of indices."); + for (std::size_t i = 0; i < size(); i++) + initial = Op::run(h_permute(i, idx), m_elements[i].flags, initial, std::forward(args)...); + return initial; + } + + inline int globalFlags() const { return m_globalFlags; } + inline std::size_t size() const { return m_elements.size(); } + + template + inline internal::tensor_symmetry_value_setter operator()(Tensor_& tensor, typename Tensor_::Index firstIndex, IndexTypes... otherIndices) const + { + static_assert(sizeof...(otherIndices) + 1 == Tensor_::NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor."); + return operator()(tensor, std::array{{firstIndex, otherIndices...}}); + } + + template + inline internal::tensor_symmetry_value_setter operator()(Tensor_& tensor, std::array const& indices) const + { + return internal::tensor_symmetry_value_setter(tensor, *this, indices); + } + private: + struct GroupElement { + std::vector representation; + int flags; + bool isId() const + { + for (std::size_t i = 0; i < representation.size(); i++) + if (i != (size_t)representation[i]) + return false; + return true; + } + }; + struct Generator { + int one; + int two; + int flags; + constexpr inline Generator(int one_, int two_, int flags_) : one(one_), two(two_), flags(flags_) {} + }; + + std::size_t m_numIndices; + std::vector m_elements; + std::vector m_generators; + int m_globalFlags; + + template + inline std::array h_permute(std::size_t which, const std::array& idx, internal::numeric_list) const + { + return std::array{{ idx[n >= m_numIndices ? n : m_elements[which].representation[n]]... }}; + } + + template + inline std::vector h_permute(std::size_t which, std::vector idx) const + { + std::vector result; + result.reserve(idx.size()); + for (auto k : m_elements[which].representation) + result.push_back(idx[k]); + for (std::size_t i = m_numIndices; i < idx.size(); i++) + result.push_back(idx[i]); + return result; + } + + inline GroupElement ge(Generator const& g) const + { + GroupElement result; + result.representation.reserve(m_numIndices); + result.flags = g.flags; + for (std::size_t k = 0; k < m_numIndices; k++) { + if (k == (std::size_t)g.one) + result.representation.push_back(g.two); + else if (k == (std::size_t)g.two) + result.representation.push_back(g.one); + else + result.representation.push_back(int(k)); + } + return result; + } + + GroupElement mul(GroupElement, GroupElement) const; + inline GroupElement mul(Generator g1, GroupElement g2) const + { + return mul(ge(g1), g2); + } + + inline GroupElement mul(GroupElement g1, Generator g2) const + { + return mul(g1, ge(g2)); + } + + inline GroupElement mul(Generator g1, Generator g2) const + { + return mul(ge(g1), ge(g2)); + } + + inline int findElement(GroupElement e) const + { + for (auto ee : m_elements) { + if (ee.representation == e.representation) + return ee.flags ^ e.flags; + } + return -1; + } + + void updateGlobalFlags(int flagDiffOfSameGenerator); +}; + +// dynamic symmetry group that auto-adds the template parameters in the constructor +template +class DynamicSGroupFromTemplateArgs : public DynamicSGroup +{ + public: + inline DynamicSGroupFromTemplateArgs() : DynamicSGroup() + { + add_all(internal::type_list()); + } + inline DynamicSGroupFromTemplateArgs(DynamicSGroupFromTemplateArgs const& other) : DynamicSGroup(other) { } + inline DynamicSGroupFromTemplateArgs(DynamicSGroupFromTemplateArgs&& other) : DynamicSGroup(other) { } + inline DynamicSGroupFromTemplateArgs& operator=(const DynamicSGroupFromTemplateArgs& o) { DynamicSGroup::operator=(o); return *this; } + inline DynamicSGroupFromTemplateArgs& operator=(DynamicSGroupFromTemplateArgs&& o) { DynamicSGroup::operator=(o); return *this; } + + private: + template + inline void add_all(internal::type_list) + { + add(Gen1()); + add_all(internal::type_list()); + } + + inline void add_all(internal::type_list<>) + { + } +}; + +inline DynamicSGroup::GroupElement DynamicSGroup::mul(GroupElement g1, GroupElement g2) const +{ + eigen_internal_assert(g1.representation.size() == m_numIndices); + eigen_internal_assert(g2.representation.size() == m_numIndices); + + GroupElement result; + result.representation.reserve(m_numIndices); + for (std::size_t i = 0; i < m_numIndices; i++) { + int v = g2.representation[g1.representation[i]]; + eigen_assert(v >= 0); + result.representation.push_back(v); + } + result.flags = g1.flags ^ g2.flags; + return result; +} + +inline void DynamicSGroup::add(int one, int two, int flags) +{ + eigen_assert(one >= 0); + eigen_assert(two >= 0); + eigen_assert(one != two); + + if ((std::size_t)one >= m_numIndices || (std::size_t)two >= m_numIndices) { + std::size_t newNumIndices = (one > two) ? one : two + 1; + for (auto& gelem : m_elements) { + gelem.representation.reserve(newNumIndices); + for (std::size_t i = m_numIndices; i < newNumIndices; i++) + gelem.representation.push_back(i); + } + m_numIndices = newNumIndices; + } + + Generator g{one, two, flags}; + GroupElement e = ge(g); + + /* special case for first generator */ + if (m_elements.size() == 1) { + while (!e.isId()) { + m_elements.push_back(e); + e = mul(e, g); + } + + if (e.flags > 0) + updateGlobalFlags(e.flags); + + // only add in case we didn't have identity + if (m_elements.size() > 1) + m_generators.push_back(g); + return; + } + + int p = findElement(e); + if (p >= 0) { + updateGlobalFlags(p); + return; + } + + std::size_t coset_order = m_elements.size(); + m_elements.push_back(e); + for (std::size_t i = 1; i < coset_order; i++) + m_elements.push_back(mul(m_elements[i], e)); + m_generators.push_back(g); + + std::size_t coset_rep = coset_order; + do { + for (auto g : m_generators) { + e = mul(m_elements[coset_rep], g); + p = findElement(e); + if (p < 0) { + // element not yet in group + m_elements.push_back(e); + for (std::size_t i = 1; i < coset_order; i++) + m_elements.push_back(mul(m_elements[i], e)); + } else if (p > 0) { + updateGlobalFlags(p); + } + } + coset_rep += coset_order; + } while (coset_rep < m_elements.size()); +} + +inline void DynamicSGroup::updateGlobalFlags(int flagDiffOfSameGenerator) +{ + switch (flagDiffOfSameGenerator) { + case 0: + default: + // nothing happened + break; + case NegationFlag: + // every element is it's own negative => whole tensor is zero + m_globalFlags |= GlobalZeroFlag; + break; + case ConjugationFlag: + // every element is it's own conjugate => whole tensor is real + m_globalFlags |= GlobalRealFlag; + break; + case (NegationFlag | ConjugationFlag): + // every element is it's own negative conjugate => whole tensor is imaginary + m_globalFlags |= GlobalImagFlag; + break; + /* NOTE: + * since GlobalZeroFlag == GlobalRealFlag | GlobalImagFlag, if one generator + * causes the tensor to be real and the next one to be imaginary, this will + * trivially give the correct result + */ + } +} + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSORSYMMETRY_DYNAMICSYMMETRY_H + +/* + * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; + */ diff --git a/external/unsupported/Eigen/CXX11/src/TensorSymmetry/StaticSymmetry.h b/external/unsupported/Eigen/CXX11/src/TensorSymmetry/StaticSymmetry.h new file mode 100644 index 0000000..942293b --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/TensorSymmetry/StaticSymmetry.h @@ -0,0 +1,236 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSORSYMMETRY_STATICSYMMETRY_H +#define EIGEN_CXX11_TENSORSYMMETRY_STATICSYMMETRY_H + +namespace Eigen { + +namespace internal { + +template struct tensor_static_symgroup_permutate; + +template +struct tensor_static_symgroup_permutate> +{ + constexpr static std::size_t N = sizeof...(nn); + + template + constexpr static inline std::array run(const std::array& indices) + { + return {{indices[nn]...}}; + } +}; + +template +struct tensor_static_symgroup_element +{ + typedef indices_ indices; + constexpr static int flags = flags_; +}; + +template +struct tensor_static_symgroup_element_ctor +{ + typedef tensor_static_symgroup_element< + typename gen_numeric_list_swapped_pair::type, + Gen::Flags + > type; +}; + +template +struct tensor_static_symgroup_identity_ctor +{ + typedef tensor_static_symgroup_element< + typename gen_numeric_list::type, + 0 + > type; +}; + +template +struct tensor_static_symgroup_multiply_helper +{ + template + constexpr static inline numeric_list::value...> helper(numeric_list) { + return numeric_list::value...>(); + } +}; + +template +struct tensor_static_symgroup_multiply +{ + private: + typedef typename A::indices iia; + typedef typename B::indices iib; + constexpr static int ffa = A::flags; + constexpr static int ffb = B::flags; + + public: + static_assert(iia::count == iib::count, "Cannot multiply symmetry elements with different number of indices."); + + typedef tensor_static_symgroup_element< + decltype(tensor_static_symgroup_multiply_helper::helper(iia())), + ffa ^ ffb + > type; +}; + +template +struct tensor_static_symgroup_equality +{ + typedef typename A::indices iia; + typedef typename B::indices iib; + constexpr static int ffa = A::flags; + constexpr static int ffb = B::flags; + static_assert(iia::count == iib::count, "Cannot compare symmetry elements with different number of indices."); + + constexpr static bool value = is_same::value; + + private: + /* this should be zero if they are identical, or else the tensor + * will be forced to be pure real, pure imaginary or even pure zero + */ + constexpr static int flags_cmp_ = ffa ^ ffb; + + /* either they are not equal, then we don't care whether the flags + * match, or they are equal, and then we have to check + */ + constexpr static bool is_zero = value && flags_cmp_ == NegationFlag; + constexpr static bool is_real = value && flags_cmp_ == ConjugationFlag; + constexpr static bool is_imag = value && flags_cmp_ == (NegationFlag | ConjugationFlag); + + public: + constexpr static int global_flags = + (is_real ? GlobalRealFlag : 0) | + (is_imag ? GlobalImagFlag : 0) | + (is_zero ? GlobalZeroFlag : 0); +}; + +template +struct tensor_static_symgroup +{ + typedef StaticSGroup type; + constexpr static std::size_t size = type::static_size; +}; + +template +constexpr static inline std::array tensor_static_symgroup_index_permute(std::array idx, internal::numeric_list, internal::numeric_list) +{ + return {{ idx[ii]..., idx[jj]... }}; +} + +template +static inline std::vector tensor_static_symgroup_index_permute(std::vector idx, internal::numeric_list) +{ + std::vector result{{ idx[ii]... }}; + std::size_t target_size = idx.size(); + for (std::size_t i = result.size(); i < target_size; i++) + result.push_back(idx[i]); + return result; +} + +template struct tensor_static_symgroup_do_apply; + +template +struct tensor_static_symgroup_do_apply> +{ + template + static inline RV run(const std::array& idx, RV initial, Args&&... args) + { + static_assert(NumIndices >= SGNumIndices, "Can only apply symmetry group to objects that have at least the required amount of indices."); + typedef typename internal::gen_numeric_list::type remaining_indices; + initial = Op::run(tensor_static_symgroup_index_permute(idx, typename first::indices(), remaining_indices()), first::flags, initial, std::forward(args)...); + return tensor_static_symgroup_do_apply>::template run(idx, initial, args...); + } + + template + static inline RV run(const std::vector& idx, RV initial, Args&&... args) + { + eigen_assert(idx.size() >= SGNumIndices && "Can only apply symmetry group to objects that have at least the required amount of indices."); + initial = Op::run(tensor_static_symgroup_index_permute(idx, typename first::indices()), first::flags, initial, std::forward(args)...); + return tensor_static_symgroup_do_apply>::template run(idx, initial, args...); + } +}; + +template +struct tensor_static_symgroup_do_apply> +{ + template + static inline RV run(const std::array&, RV initial, Args&&...) + { + // do nothing + return initial; + } + + template + static inline RV run(const std::vector&, RV initial, Args&&...) + { + // do nothing + return initial; + } +}; + +} // end namespace internal + +template +class StaticSGroup +{ + constexpr static std::size_t NumIndices = internal::tensor_symmetry_num_indices::value; + typedef internal::group_theory::enumerate_group_elements< + internal::tensor_static_symgroup_multiply, + internal::tensor_static_symgroup_equality, + typename internal::tensor_static_symgroup_identity_ctor::type, + internal::type_list::type...> + > group_elements; + typedef typename group_elements::type ge; + public: + constexpr inline StaticSGroup() {} + constexpr inline StaticSGroup(const StaticSGroup&) {} + constexpr inline StaticSGroup(StaticSGroup&&) {} + + template + static inline RV apply(const std::array& idx, RV initial, Args&&... args) + { + return internal::tensor_static_symgroup_do_apply::template run(idx, initial, args...); + } + + template + static inline RV apply(const std::vector& idx, RV initial, Args&&... args) + { + eigen_assert(idx.size() == NumIndices); + return internal::tensor_static_symgroup_do_apply::template run(idx, initial, args...); + } + + constexpr static std::size_t static_size = ge::count; + + constexpr static inline std::size_t size() { + return ge::count; + } + constexpr static inline int globalFlags() { return group_elements::global_flags; } + + template + inline internal::tensor_symmetry_value_setter> operator()(Tensor_& tensor, typename Tensor_::Index firstIndex, IndexTypes... otherIndices) const + { + static_assert(sizeof...(otherIndices) + 1 == Tensor_::NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor."); + return operator()(tensor, std::array{{firstIndex, otherIndices...}}); + } + + template + inline internal::tensor_symmetry_value_setter> operator()(Tensor_& tensor, std::array const& indices) const + { + return internal::tensor_symmetry_value_setter>(tensor, *this, indices); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSORSYMMETRY_STATICSYMMETRY_H + +/* + * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; + */ diff --git a/external/unsupported/Eigen/CXX11/src/TensorSymmetry/Symmetry.h b/external/unsupported/Eigen/CXX11/src/TensorSymmetry/Symmetry.h new file mode 100644 index 0000000..879d6cd --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/TensorSymmetry/Symmetry.h @@ -0,0 +1,338 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSORSYMMETRY_SYMMETRY_H +#define EIGEN_CXX11_TENSORSYMMETRY_SYMMETRY_H + +namespace Eigen { + +enum { + NegationFlag = 0x01, + ConjugationFlag = 0x02 +}; + +enum { + GlobalRealFlag = 0x01, + GlobalImagFlag = 0x02, + GlobalZeroFlag = 0x03 +}; + +namespace internal { + +template struct tensor_symmetry_pre_analysis; +template struct tensor_static_symgroup; +template struct tensor_static_symgroup_if; +template struct tensor_symmetry_calculate_flags; +template struct tensor_symmetry_assign_value; +template struct tensor_symmetry_num_indices; + +} // end namespace internal + +template +struct Symmetry +{ + static_assert(One_ != Two_, "Symmetries must cover distinct indices."); + constexpr static int One = One_; + constexpr static int Two = Two_; + constexpr static int Flags = 0; +}; + +template +struct AntiSymmetry +{ + static_assert(One_ != Two_, "Symmetries must cover distinct indices."); + constexpr static int One = One_; + constexpr static int Two = Two_; + constexpr static int Flags = NegationFlag; +}; + +template +struct Hermiticity +{ + static_assert(One_ != Two_, "Symmetries must cover distinct indices."); + constexpr static int One = One_; + constexpr static int Two = Two_; + constexpr static int Flags = ConjugationFlag; +}; + +template +struct AntiHermiticity +{ + static_assert(One_ != Two_, "Symmetries must cover distinct indices."); + constexpr static int One = One_; + constexpr static int Two = Two_; + constexpr static int Flags = ConjugationFlag | NegationFlag; +}; + +/** \class DynamicSGroup + * \ingroup TensorSymmetry_Module + * + * \brief Dynamic symmetry group + * + * The %DynamicSGroup class represents a symmetry group that need not be known at + * compile time. It is useful if one wants to support arbitrary run-time defineable + * symmetries for tensors, but it is also instantiated if a symmetry group is defined + * at compile time that would be either too large for the compiler to reasonably + * generate (using templates to calculate this at compile time is very inefficient) + * or that the compiler could generate the group but that it wouldn't make sense to + * unroll the loop for setting coefficients anymore. + */ +class DynamicSGroup; + +/** \internal + * + * \class DynamicSGroupFromTemplateArgs + * \ingroup TensorSymmetry_Module + * + * \brief Dynamic symmetry group, initialized from template arguments + * + * This class is a child class of DynamicSGroup. It uses the template arguments + * specified to initialize itself. + */ +template +class DynamicSGroupFromTemplateArgs; + +/** \class StaticSGroup + * \ingroup TensorSymmetry_Module + * + * \brief Static symmetry group + * + * This class represents a symmetry group that is known and resolved completely + * at compile time. Ideally, no run-time penalty is incurred compared to the + * manual unrolling of the symmetry. + * + * CAUTION: + * + * Do not use this class directly for large symmetry groups. The compiler + * may run into a limit, or segfault or in the very least will take a very, + * very, very long time to compile the code. Use the SGroup class instead + * if you want a static group. That class contains logic that will + * automatically select the DynamicSGroup class instead if the symmetry + * group becomes too large. (In that case, unrolling may not even be + * beneficial.) + */ +template +class StaticSGroup; + +/** \class SGroup + * \ingroup TensorSymmetry_Module + * + * \brief Symmetry group, initialized from template arguments + * + * This class represents a symmetry group whose generators are already + * known at compile time. It may or may not be resolved at compile time, + * depending on the estimated size of the group. + * + * \sa StaticSGroup + * \sa DynamicSGroup + */ +template +class SGroup : public internal::tensor_symmetry_pre_analysis::value, Gen...>::root_type +{ + public: + constexpr static std::size_t NumIndices = internal::tensor_symmetry_num_indices::value; + typedef typename internal::tensor_symmetry_pre_analysis::root_type Base; + + // make standard constructors + assignment operators public + inline SGroup() : Base() { } + inline SGroup(const SGroup& other) : Base(other) { } + inline SGroup(SGroup&& other) : Base(other) { } + inline SGroup& operator=(const SGroup& other) { Base::operator=(other); return *this; } + inline SGroup& operator=(SGroup&& other) { Base::operator=(other); return *this; } + + // all else is defined in the base class +}; + +namespace internal { + +template struct tensor_symmetry_num_indices +{ + constexpr static std::size_t value = 1; +}; + +template struct tensor_symmetry_num_indices, Sym...> +{ +private: + constexpr static std::size_t One = static_cast(One_); + constexpr static std::size_t Two = static_cast(Two_); + constexpr static std::size_t Three = tensor_symmetry_num_indices::value; + + // don't use std::max, since it's not constexpr until C++14... + constexpr static std::size_t maxOneTwoPlusOne = ((One > Two) ? One : Two) + 1; +public: + constexpr static std::size_t value = (maxOneTwoPlusOne > Three) ? maxOneTwoPlusOne : Three; +}; + +template struct tensor_symmetry_num_indices, Sym...> + : public tensor_symmetry_num_indices, Sym...> {}; +template struct tensor_symmetry_num_indices, Sym...> + : public tensor_symmetry_num_indices, Sym...> {}; +template struct tensor_symmetry_num_indices, Sym...> + : public tensor_symmetry_num_indices, Sym...> {}; + +/** \internal + * + * \class tensor_symmetry_pre_analysis + * \ingroup TensorSymmetry_Module + * + * \brief Pre-select whether to use a static or dynamic symmetry group + * + * When a symmetry group could in principle be determined at compile time, + * this template implements the logic whether to actually do that or whether + * to rather defer that to runtime. + * + * The logic is as follows: + *
+ *
No generators (trivial symmetry):
+ *
Use a trivial static group. Ideally, this has no performance impact + * compared to not using symmetry at all. In practice, this might not + * be the case.
+ *
More than 4 generators:
+ *
Calculate the group at run time, it is likely far too large for the + * compiler to be able to properly generate it in a realistic time.
+ *
Up to and including 4 generators:
+ *
Actually enumerate all group elements, but then check how many there + * are. If there are more than 16, it is unlikely that unrolling the + * loop (as is done in the static compile-time case) is sensible, so + * use a dynamic group instead. If there are at most 16 elements, actually + * use that static group. Note that the largest group with 4 generators + * still compiles with reasonable resources.
+ *
+ * + * Note: Example compile time performance with g++-4.6 on an Intenl Core i5-3470 + * with 16 GiB RAM (all generators non-redundant and the subgroups don't + * factorize): + * + * # Generators -O0 -ggdb -O2 + * ------------------------------------------------------------------- + * 1 0.5 s / 250 MiB 0.45s / 230 MiB + * 2 0.5 s / 260 MiB 0.5 s / 250 MiB + * 3 0.65s / 310 MiB 0.62s / 310 MiB + * 4 2.2 s / 860 MiB 1.7 s / 770 MiB + * 5 130 s / 13000 MiB 120 s / 11000 MiB + * + * It is clear that everything is still very efficient up to 4 generators, then + * the memory and CPU requirements become unreasonable. Thus we only instantiate + * the template group theory logic if the number of generators supplied is 4 or + * lower, otherwise this will be forced to be done during runtime, where the + * algorithm is reasonably fast. + */ +template +struct tensor_symmetry_pre_analysis +{ + typedef StaticSGroup<> root_type; +}; + +template +struct tensor_symmetry_pre_analysis +{ + constexpr static std::size_t max_static_generators = 4; + constexpr static std::size_t max_static_elements = 16; + typedef tensor_static_symgroup_if<(sizeof...(Gens_) + 1 <= max_static_generators), NumIndices, Gen_, Gens_...> helper; + constexpr static std::size_t possible_size = helper::size; + + typedef typename conditional< + possible_size == 0 || possible_size >= max_static_elements, + DynamicSGroupFromTemplateArgs, + typename helper::type + >::type root_type; +}; + +template +struct tensor_static_symgroup_if +{ + constexpr static std::size_t size = 0; + typedef void type; +}; + +template +struct tensor_static_symgroup_if : tensor_static_symgroup {}; + +template +struct tensor_symmetry_assign_value +{ + typedef typename Tensor_::Index Index; + typedef typename Tensor_::Scalar Scalar; + constexpr static std::size_t NumIndices = Tensor_::NumIndices; + + static inline int run(const std::array& transformed_indices, int transformation_flags, int dummy, Tensor_& tensor, const Scalar& value_) + { + Scalar value(value_); + if (transformation_flags & ConjugationFlag) + value = numext::conj(value); + if (transformation_flags & NegationFlag) + value = -value; + tensor.coeffRef(transformed_indices) = value; + return dummy; + } +}; + +template +struct tensor_symmetry_calculate_flags +{ + typedef typename Tensor_::Index Index; + constexpr static std::size_t NumIndices = Tensor_::NumIndices; + + static inline int run(const std::array& transformed_indices, int transform_flags, int current_flags, const std::array& orig_indices) + { + if (transformed_indices == orig_indices) { + if (transform_flags & (ConjugationFlag | NegationFlag)) + return current_flags | GlobalImagFlag; // anti-hermitian diagonal + else if (transform_flags & ConjugationFlag) + return current_flags | GlobalRealFlag; // hermitian diagonal + else if (transform_flags & NegationFlag) + return current_flags | GlobalZeroFlag; // anti-symmetric diagonal + } + return current_flags; + } +}; + +template +class tensor_symmetry_value_setter +{ + public: + typedef typename Tensor_::Index Index; + typedef typename Tensor_::Scalar Scalar; + constexpr static std::size_t NumIndices = Tensor_::NumIndices; + + inline tensor_symmetry_value_setter(Tensor_& tensor, Symmetry_ const& symmetry, std::array const& indices) + : m_tensor(tensor), m_symmetry(symmetry), m_indices(indices) { } + + inline tensor_symmetry_value_setter& operator=(Scalar const& value) + { + doAssign(value); + return *this; + } + private: + Tensor_& m_tensor; + Symmetry_ m_symmetry; + std::array m_indices; + + inline void doAssign(Scalar const& value) + { + #ifdef EIGEN_TENSOR_SYMMETRY_CHECK_VALUES + int value_flags = m_symmetry.template apply, int>(m_indices, m_symmetry.globalFlags(), m_indices); + if (value_flags & GlobalRealFlag) + eigen_assert(numext::imag(value) == 0); + if (value_flags & GlobalImagFlag) + eigen_assert(numext::real(value) == 0); + #endif + m_symmetry.template apply, int>(m_indices, 0, m_tensor, value); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSORSYMMETRY_SYMMETRY_H + +/* + * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; + */ diff --git a/external/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h b/external/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h new file mode 100644 index 0000000..54bf9db --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h @@ -0,0 +1,669 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_TENSORSYMMETRY_TEMPLATEGROUPTHEORY_H +#define EIGEN_CXX11_TENSORSYMMETRY_TEMPLATEGROUPTHEORY_H + +namespace Eigen { + +namespace internal { + +namespace group_theory { + +/** \internal + * \file CXX11/src/TensorSymmetry/util/TemplateGroupTheory.h + * This file contains C++ templates that implement group theory algorithms. + * + * The algorithms allow for a compile-time analysis of finite groups. + * + * Currently only Dimino's algorithm is implemented, which returns a list + * of all elements in a group given a set of (possibly redundant) generators. + * (One could also do that with the so-called orbital algorithm, but that + * is much more expensive and usually has no advantages.) + */ + +/********************************************************************** + * "Ok kid, here is where it gets complicated." + * - Amelia Pond in the "Doctor Who" episode + * "The Big Bang" + * + * Dimino's algorithm + * ================== + * + * The following is Dimino's algorithm in sequential form: + * + * Input: identity element, list of generators, equality check, + * multiplication operation + * Output: list of group elements + * + * 1. add identity element + * 2. remove identities from list of generators + * 3. add all powers of first generator that aren't the + * identity element + * 4. go through all remaining generators: + * a. if generator is already in the list of elements + * -> do nothing + * b. otherwise + * i. remember current # of elements + * (i.e. the size of the current subgroup) + * ii. add all current elements (which includes + * the identity) each multiplied from right + * with the current generator to the group + * iii. add all remaining cosets that are generated + * by products of the new generator with itself + * and all other generators seen so far + * + * In functional form, this is implemented as a long set of recursive + * templates that have a complicated relationship. + * + * The main interface for Dimino's algorithm is the template + * enumerate_group_elements. All lists are implemented as variadic + * type_list and numeric_list + * templates. + * + * 'Calling' templates is usually done via typedefs. + * + * This algorithm is an extended version of the basic version. The + * extension consists in the fact that each group element has a set + * of flags associated with it. Multiplication of two group elements + * with each other results in a group element whose flags are the + * XOR of the flags of the previous elements. Each time the algorithm + * notices that a group element it just calculated is already in the + * list of current elements, the flags of both will be compared and + * added to the so-called 'global flags' of the group. + * + * The rationale behind this extension is that this allows not only + * for the description of symmetries between tensor indices, but + * also allows for the description of hermiticity, antisymmetry and + * antihermiticity. Negation and conjugation each are specific bit + * in the flags value and if two different ways to reach a group + * element lead to two different flags, this poses a constraint on + * the allowed values of the resulting tensor. For example, if a + * group element is reach both with and without the conjugation + * flags, it is clear that the resulting tensor has to be real. + * + * Note that this flag mechanism is quite generic and may have other + * uses beyond tensor properties. + * + * IMPORTANT: + * This algorithm assumes the group to be finite. If you try to + * run it with a group that's infinite, the algorithm will only + * terminate once you hit a compiler limit (max template depth). + * Also note that trying to use this implementation to create a + * very large group will probably either make you hit the same + * limit, cause the compiler to segfault or at the very least + * take a *really* long time (hours, days, weeks - sic!) to + * compile. It is not recommended to plug in more than 4 + * generators, unless they are independent of each other. + */ + +/** \internal + * + * \class strip_identities + * \ingroup CXX11_TensorSymmetry_Module + * + * \brief Cleanse a list of group elements of the identity element + * + * This template is used to make a first pass through all initial + * generators of Dimino's algorithm and remove the identity + * elements. + * + * \sa enumerate_group_elements + */ +template class Equality, typename id, typename L> struct strip_identities; + +template< + template class Equality, + typename id, + typename t, + typename... ts +> +struct strip_identities> +{ + typedef typename conditional< + Equality::value, + typename strip_identities>::type, + typename concat, typename strip_identities>::type>::type + >::type type; + constexpr static int global_flags = Equality::global_flags | strip_identities>::global_flags; +}; + +template< + template class Equality, + typename id + EIGEN_TPL_PP_SPEC_HACK_DEFC(typename, ts) +> +struct strip_identities> +{ + typedef type_list<> type; + constexpr static int global_flags = 0; +}; + +/** \internal + * + * \class dimino_first_step_elements_helper + * \ingroup CXX11_TensorSymmetry_Module + * + * \brief Recursive template that adds powers of the first generator to the list of group elements + * + * This template calls itself recursively to add powers of the first + * generator to the list of group elements. It stops if it reaches + * the identity element again. + * + * \sa enumerate_group_elements, dimino_first_step_elements + */ +template< + template class Multiply, + template class Equality, + typename id, + typename g, + typename current_element, + typename elements, + bool dont_add_current_element // = false +> +struct dimino_first_step_elements_helper +#ifndef EIGEN_PARSED_BY_DOXYGEN + : // recursive inheritance is too difficult for Doxygen + public dimino_first_step_elements_helper< + Multiply, + Equality, + id, + g, + typename Multiply::type, + typename concat>::type, + Equality::type, id>::value + > {}; + +template< + template class Multiply, + template class Equality, + typename id, + typename g, + typename current_element, + typename elements +> +struct dimino_first_step_elements_helper +#endif // EIGEN_PARSED_BY_DOXYGEN +{ + typedef elements type; + constexpr static int global_flags = Equality::global_flags; +}; + +/** \internal + * + * \class dimino_first_step_elements + * \ingroup CXX11_TensorSymmetry_Module + * + * \brief Add all powers of the first generator to the list of group elements + * + * This template takes the first non-identity generator and generates the initial + * list of elements which consists of all powers of that generator. For a group + * with just one generated, it would be enumerated after this. + * + * \sa enumerate_group_elements + */ +template< + template class Multiply, + template class Equality, + typename id, + typename generators +> +struct dimino_first_step_elements +{ + typedef typename get<0, generators>::type first_generator; + typedef typename skip<1, generators>::type next_generators; + typedef type_list generators_done; + + typedef dimino_first_step_elements_helper< + Multiply, + Equality, + id, + first_generator, + first_generator, + type_list, + false + > helper; + typedef typename helper::type type; + constexpr static int global_flags = helper::global_flags; +}; + +/** \internal + * + * \class dimino_get_coset_elements + * \ingroup CXX11_TensorSymmetry_Module + * + * \brief Generate all elements of a specific coset + * + * This template generates all the elements of a specific coset by + * multiplying all elements in the given subgroup with the new + * coset representative. Note that the first element of the + * subgroup is always the identity element, so the first element of + * the result of this template is going to be the coset + * representative itself. + * + * Note that this template accepts an additional boolean parameter + * that specifies whether to actually generate the coset (true) or + * just return an empty list (false). + * + * \sa enumerate_group_elements, dimino_add_cosets_for_rep + */ +template< + template class Multiply, + typename sub_group_elements, + typename new_coset_rep, + bool generate_coset // = true +> +struct dimino_get_coset_elements +{ + typedef typename apply_op_from_right::type type; +}; + +template< + template class Multiply, + typename sub_group_elements, + typename new_coset_rep +> +struct dimino_get_coset_elements +{ + typedef type_list<> type; +}; + +/** \internal + * + * \class dimino_add_cosets_for_rep + * \ingroup CXX11_TensorSymmetry_Module + * + * \brief Recursive template for adding coset spaces + * + * This template multiplies the coset representative with a generator + * from the list of previous generators. If the new element is not in + * the group already, it adds the corresponding coset. Finally it + * proceeds to call itself with the next generator from the list. + * + * \sa enumerate_group_elements, dimino_add_all_coset_spaces + */ +template< + template class Multiply, + template class Equality, + typename id, + typename sub_group_elements, + typename elements, + typename generators, + typename rep_element, + int sub_group_size +> +struct dimino_add_cosets_for_rep; + +template< + template class Multiply, + template class Equality, + typename id, + typename sub_group_elements, + typename elements, + typename g, + typename... gs, + typename rep_element, + int sub_group_size +> +struct dimino_add_cosets_for_rep, rep_element, sub_group_size> +{ + typedef typename Multiply::type new_coset_rep; + typedef contained_in_list_gf _cil; + constexpr static bool add_coset = !_cil::value; + + typedef typename dimino_get_coset_elements< + Multiply, + sub_group_elements, + new_coset_rep, + add_coset + >::type coset_elements; + + typedef dimino_add_cosets_for_rep< + Multiply, + Equality, + id, + sub_group_elements, + typename concat::type, + type_list, + rep_element, + sub_group_size + > _helper; + + typedef typename _helper::type type; + constexpr static int global_flags = _cil::global_flags | _helper::global_flags; + + /* Note that we don't have to update global flags here, since + * we will only add these elements if they are not part of + * the group already. But that only happens if the coset rep + * is not already in the group, so the check for the coset rep + * will catch this. + */ +}; + +template< + template class Multiply, + template class Equality, + typename id, + typename sub_group_elements, + typename elements + EIGEN_TPL_PP_SPEC_HACK_DEFC(typename, empty), + typename rep_element, + int sub_group_size +> +struct dimino_add_cosets_for_rep, rep_element, sub_group_size> +{ + typedef elements type; + constexpr static int global_flags = 0; +}; + +/** \internal + * + * \class dimino_add_all_coset_spaces + * \ingroup CXX11_TensorSymmetry_Module + * + * \brief Recursive template for adding all coset spaces for a new generator + * + * This template tries to go through the list of generators (with + * the help of the dimino_add_cosets_for_rep template) as long as + * it still finds elements that are not part of the group and add + * the corresponding cosets. + * + * \sa enumerate_group_elements, dimino_add_cosets_for_rep + */ +template< + template class Multiply, + template class Equality, + typename id, + typename sub_group_elements, + typename elements, + typename generators, + int sub_group_size, + int rep_pos, + bool stop_condition // = false +> +struct dimino_add_all_coset_spaces +{ + typedef typename get::type rep_element; + typedef dimino_add_cosets_for_rep< + Multiply, + Equality, + id, + sub_group_elements, + elements, + generators, + rep_element, + sub_group_elements::count + > _ac4r; + typedef typename _ac4r::type new_elements; + + constexpr static int new_rep_pos = rep_pos + sub_group_elements::count; + constexpr static bool new_stop_condition = new_rep_pos >= new_elements::count; + + typedef dimino_add_all_coset_spaces< + Multiply, + Equality, + id, + sub_group_elements, + new_elements, + generators, + sub_group_size, + new_rep_pos, + new_stop_condition + > _helper; + + typedef typename _helper::type type; + constexpr static int global_flags = _helper::global_flags | _ac4r::global_flags; +}; + +template< + template class Multiply, + template class Equality, + typename id, + typename sub_group_elements, + typename elements, + typename generators, + int sub_group_size, + int rep_pos +> +struct dimino_add_all_coset_spaces +{ + typedef elements type; + constexpr static int global_flags = 0; +}; + +/** \internal + * + * \class dimino_add_generator + * \ingroup CXX11_TensorSymmetry_Module + * + * \brief Enlarge the group by adding a new generator. + * + * It accepts a boolean parameter that determines if the generator is redundant, + * i.e. was already seen in the group. In that case, it reduces to a no-op. + * + * \sa enumerate_group_elements, dimino_add_all_coset_spaces + */ +template< + template class Multiply, + template class Equality, + typename id, + typename elements, + typename generators_done, + typename current_generator, + bool redundant // = false +> +struct dimino_add_generator +{ + /* this template is only called if the generator is not redundant + * => all elements of the group multiplied with the new generator + * are going to be new elements of the most trivial coset space + */ + typedef typename apply_op_from_right::type multiplied_elements; + typedef typename concat::type new_elements; + + constexpr static int rep_pos = elements::count; + + typedef dimino_add_all_coset_spaces< + Multiply, + Equality, + id, + elements, // elements of previous subgroup + new_elements, + typename concat>::type, + elements::count, // size of previous subgroup + rep_pos, + false // don't stop (because rep_pos >= new_elements::count is always false at this point) + > _helper; + typedef typename _helper::type type; + constexpr static int global_flags = _helper::global_flags; +}; + +template< + template class Multiply, + template class Equality, + typename id, + typename elements, + typename generators_done, + typename current_generator +> +struct dimino_add_generator +{ + // redundant case + typedef elements type; + constexpr static int global_flags = 0; +}; + +/** \internal + * + * \class dimino_add_remaining_generators + * \ingroup CXX11_TensorSymmetry_Module + * + * \brief Recursive template that adds all remaining generators to a group + * + * Loop through the list of generators that remain and successively + * add them to the group. + * + * \sa enumerate_group_elements, dimino_add_generator + */ +template< + template class Multiply, + template class Equality, + typename id, + typename generators_done, + typename remaining_generators, + typename elements +> +struct dimino_add_remaining_generators +{ + typedef typename get<0, remaining_generators>::type first_generator; + typedef typename skip<1, remaining_generators>::type next_generators; + + typedef contained_in_list_gf _cil; + + typedef dimino_add_generator< + Multiply, + Equality, + id, + elements, + generators_done, + first_generator, + _cil::value + > _helper; + + typedef typename _helper::type new_elements; + + typedef dimino_add_remaining_generators< + Multiply, + Equality, + id, + typename concat>::type, + next_generators, + new_elements + > _next_iter; + + typedef typename _next_iter::type type; + constexpr static int global_flags = + _cil::global_flags | + _helper::global_flags | + _next_iter::global_flags; +}; + +template< + template class Multiply, + template class Equality, + typename id, + typename generators_done, + typename elements +> +struct dimino_add_remaining_generators, elements> +{ + typedef elements type; + constexpr static int global_flags = 0; +}; + +/** \internal + * + * \class enumerate_group_elements_noid + * \ingroup CXX11_TensorSymmetry_Module + * + * \brief Helper template that implements group element enumeration + * + * This is a helper template that implements the actual enumeration + * of group elements. This has been split so that the list of + * generators can be cleansed of the identity element before + * performing the actual operation. + * + * \sa enumerate_group_elements + */ +template< + template class Multiply, + template class Equality, + typename id, + typename generators, + int initial_global_flags = 0 +> +struct enumerate_group_elements_noid +{ + typedef dimino_first_step_elements first_step; + typedef typename first_step::type first_step_elements; + + typedef dimino_add_remaining_generators< + Multiply, + Equality, + id, + typename first_step::generators_done, + typename first_step::next_generators, // remaining_generators + typename first_step::type // first_step elements + > _helper; + + typedef typename _helper::type type; + constexpr static int global_flags = + initial_global_flags | + first_step::global_flags | + _helper::global_flags; +}; + +// in case when no generators are specified +template< + template class Multiply, + template class Equality, + typename id, + int initial_global_flags +> +struct enumerate_group_elements_noid, initial_global_flags> +{ + typedef type_list type; + constexpr static int global_flags = initial_global_flags; +}; + +/** \internal + * + * \class enumerate_group_elements + * \ingroup CXX11_TensorSymmetry_Module + * + * \brief Enumerate all elements in a finite group + * + * This template enumerates all elements in a finite group. It accepts + * the following template parameters: + * + * \tparam Multiply The multiplication operation that multiplies two group elements + * with each other. + * \tparam Equality The equality check operation that checks if two group elements + * are equal to another. + * \tparam id The identity element + * \tparam _generators A list of (possibly redundant) generators of the group + */ +template< + template class Multiply, + template class Equality, + typename id, + typename _generators +> +struct enumerate_group_elements + : public enumerate_group_elements_noid< + Multiply, + Equality, + id, + typename strip_identities::type, + strip_identities::global_flags + > +{ +}; + +} // end namespace group_theory + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CXX11_TENSORSYMMETRY_TEMPLATEGROUPTHEORY_H + +/* + * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; + */ diff --git a/external/unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h b/external/unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h new file mode 100644 index 0000000..e4c59dc --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/ThreadPool/Barrier.h @@ -0,0 +1,67 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2018 Rasmus Munk Larsen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +// Barrier is an object that allows one or more threads to wait until +// Notify has been called a specified number of times. + +#ifndef EIGEN_CXX11_THREADPOOL_BARRIER_H +#define EIGEN_CXX11_THREADPOOL_BARRIER_H + +namespace Eigen { + +class Barrier { + public: + Barrier(unsigned int count) : state_(count << 1), notified_(false) { + eigen_plain_assert(((count << 1) >> 1) == count); + } + ~Barrier() { eigen_plain_assert((state_ >> 1) == 0); } + + void Notify() { + unsigned int v = state_.fetch_sub(2, std::memory_order_acq_rel) - 2; + if (v != 1) { + // Clear the lowest bit (waiter flag) and check that the original state + // value was not zero. If it was zero, it means that notify was called + // more times than the original count. + eigen_plain_assert(((v + 2) & ~1) != 0); + return; // either count has not dropped to 0, or waiter is not waiting + } + std::unique_lock l(mu_); + eigen_plain_assert(!notified_); + notified_ = true; + cv_.notify_all(); + } + + void Wait() { + unsigned int v = state_.fetch_or(1, std::memory_order_acq_rel); + if ((v >> 1) == 0) return; + std::unique_lock l(mu_); + while (!notified_) { + cv_.wait(l); + } + } + + private: + std::mutex mu_; + std::condition_variable cv_; + std::atomic state_; // low bit is waiter flag + bool notified_; +}; + +// Notification is an object that allows a user to to wait for another +// thread to signal a notification that an event has occurred. +// +// Multiple threads can wait on the same Notification object, +// but only one caller must call Notify() on the object. +struct Notification : Barrier { + Notification() : Barrier(1){}; +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_THREADPOOL_BARRIER_H diff --git a/external/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h b/external/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h new file mode 100644 index 0000000..4549aa0 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/ThreadPool/EventCount.h @@ -0,0 +1,249 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Dmitry Vyukov +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_ +#define EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_ + +namespace Eigen { + +// EventCount allows to wait for arbitrary predicates in non-blocking +// algorithms. Think of condition variable, but wait predicate does not need to +// be protected by a mutex. Usage: +// Waiting thread does: +// +// if (predicate) +// return act(); +// EventCount::Waiter& w = waiters[my_index]; +// ec.Prewait(&w); +// if (predicate) { +// ec.CancelWait(&w); +// return act(); +// } +// ec.CommitWait(&w); +// +// Notifying thread does: +// +// predicate = true; +// ec.Notify(true); +// +// Notify is cheap if there are no waiting threads. Prewait/CommitWait are not +// cheap, but they are executed only if the preceding predicate check has +// failed. +// +// Algorithm outline: +// There are two main variables: predicate (managed by user) and state_. +// Operation closely resembles Dekker mutual algorithm: +// https://en.wikipedia.org/wiki/Dekker%27s_algorithm +// Waiting thread sets state_ then checks predicate, Notifying thread sets +// predicate then checks state_. Due to seq_cst fences in between these +// operations it is guaranteed than either waiter will see predicate change +// and won't block, or notifying thread will see state_ change and will unblock +// the waiter, or both. But it can't happen that both threads don't see each +// other changes, which would lead to deadlock. +class EventCount { + public: + class Waiter; + + EventCount(MaxSizeVector& waiters) + : state_(kStackMask), waiters_(waiters) { + eigen_plain_assert(waiters.size() < (1 << kWaiterBits) - 1); + } + + ~EventCount() { + // Ensure there are no waiters. + eigen_plain_assert(state_.load() == kStackMask); + } + + // Prewait prepares for waiting. + // After calling Prewait, the thread must re-check the wait predicate + // and then call either CancelWait or CommitWait. + void Prewait() { + uint64_t state = state_.load(std::memory_order_relaxed); + for (;;) { + CheckState(state); + uint64_t newstate = state + kWaiterInc; + CheckState(newstate); + if (state_.compare_exchange_weak(state, newstate, + std::memory_order_seq_cst)) + return; + } + } + + // CommitWait commits waiting after Prewait. + void CommitWait(Waiter* w) { + eigen_plain_assert((w->epoch & ~kEpochMask) == 0); + w->state = Waiter::kNotSignaled; + const uint64_t me = (w - &waiters_[0]) | w->epoch; + uint64_t state = state_.load(std::memory_order_seq_cst); + for (;;) { + CheckState(state, true); + uint64_t newstate; + if ((state & kSignalMask) != 0) { + // Consume the signal and return immidiately. + newstate = state - kWaiterInc - kSignalInc; + } else { + // Remove this thread from pre-wait counter and add to the waiter stack. + newstate = ((state & kWaiterMask) - kWaiterInc) | me; + w->next.store(state & (kStackMask | kEpochMask), + std::memory_order_relaxed); + } + CheckState(newstate); + if (state_.compare_exchange_weak(state, newstate, + std::memory_order_acq_rel)) { + if ((state & kSignalMask) == 0) { + w->epoch += kEpochInc; + Park(w); + } + return; + } + } + } + + // CancelWait cancels effects of the previous Prewait call. + void CancelWait() { + uint64_t state = state_.load(std::memory_order_relaxed); + for (;;) { + CheckState(state, true); + uint64_t newstate = state - kWaiterInc; + // We don't know if the thread was also notified or not, + // so we should not consume a signal unconditionaly. + // Only if number of waiters is equal to number of signals, + // we know that the thread was notified and we must take away the signal. + if (((state & kWaiterMask) >> kWaiterShift) == + ((state & kSignalMask) >> kSignalShift)) + newstate -= kSignalInc; + CheckState(newstate); + if (state_.compare_exchange_weak(state, newstate, + std::memory_order_acq_rel)) + return; + } + } + + // Notify wakes one or all waiting threads. + // Must be called after changing the associated wait predicate. + void Notify(bool notifyAll) { + std::atomic_thread_fence(std::memory_order_seq_cst); + uint64_t state = state_.load(std::memory_order_acquire); + for (;;) { + CheckState(state); + const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift; + const uint64_t signals = (state & kSignalMask) >> kSignalShift; + // Easy case: no waiters. + if ((state & kStackMask) == kStackMask && waiters == signals) return; + uint64_t newstate; + if (notifyAll) { + // Empty wait stack and set signal to number of pre-wait threads. + newstate = + (state & kWaiterMask) | (waiters << kSignalShift) | kStackMask; + } else if (signals < waiters) { + // There is a thread in pre-wait state, unblock it. + newstate = state + kSignalInc; + } else { + // Pop a waiter from list and unpark it. + Waiter* w = &waiters_[state & kStackMask]; + uint64_t next = w->next.load(std::memory_order_relaxed); + newstate = (state & (kWaiterMask | kSignalMask)) | next; + } + CheckState(newstate); + if (state_.compare_exchange_weak(state, newstate, + std::memory_order_acq_rel)) { + if (!notifyAll && (signals < waiters)) + return; // unblocked pre-wait thread + if ((state & kStackMask) == kStackMask) return; + Waiter* w = &waiters_[state & kStackMask]; + if (!notifyAll) w->next.store(kStackMask, std::memory_order_relaxed); + Unpark(w); + return; + } + } + } + + class Waiter { + friend class EventCount; + // Align to 128 byte boundary to prevent false sharing with other Waiter + // objects in the same vector. + EIGEN_ALIGN_TO_BOUNDARY(128) std::atomic next; + std::mutex mu; + std::condition_variable cv; + uint64_t epoch = 0; + unsigned state = kNotSignaled; + enum { + kNotSignaled, + kWaiting, + kSignaled, + }; + }; + + private: + // State_ layout: + // - low kWaiterBits is a stack of waiters committed wait + // (indexes in waiters_ array are used as stack elements, + // kStackMask means empty stack). + // - next kWaiterBits is count of waiters in prewait state. + // - next kWaiterBits is count of pending signals. + // - remaining bits are ABA counter for the stack. + // (stored in Waiter node and incremented on push). + static const uint64_t kWaiterBits = 14; + static const uint64_t kStackMask = (1ull << kWaiterBits) - 1; + static const uint64_t kWaiterShift = kWaiterBits; + static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1) + << kWaiterShift; + static const uint64_t kWaiterInc = 1ull << kWaiterShift; + static const uint64_t kSignalShift = 2 * kWaiterBits; + static const uint64_t kSignalMask = ((1ull << kWaiterBits) - 1) + << kSignalShift; + static const uint64_t kSignalInc = 1ull << kSignalShift; + static const uint64_t kEpochShift = 3 * kWaiterBits; + static const uint64_t kEpochBits = 64 - kEpochShift; + static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift; + static const uint64_t kEpochInc = 1ull << kEpochShift; + std::atomic state_; + MaxSizeVector& waiters_; + + static void CheckState(uint64_t state, bool waiter = false) { + static_assert(kEpochBits >= 20, "not enough bits to prevent ABA problem"); + const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift; + const uint64_t signals = (state & kSignalMask) >> kSignalShift; + eigen_plain_assert(waiters >= signals); + eigen_plain_assert(waiters < (1 << kWaiterBits) - 1); + eigen_plain_assert(!waiter || waiters > 0); + (void)waiters; + (void)signals; + } + + void Park(Waiter* w) { + std::unique_lock lock(w->mu); + while (w->state != Waiter::kSignaled) { + w->state = Waiter::kWaiting; + w->cv.wait(lock); + } + } + + void Unpark(Waiter* w) { + for (Waiter* next; w; w = next) { + uint64_t wnext = w->next.load(std::memory_order_relaxed) & kStackMask; + next = wnext == kStackMask ? nullptr : &waiters_[wnext]; + unsigned state; + { + std::unique_lock lock(w->mu); + state = w->state; + w->state = Waiter::kSignaled; + } + // Avoid notifying if it wasn't waiting. + if (state == Waiter::kWaiting) w->cv.notify_one(); + } + } + + EventCount(const EventCount&) = delete; + void operator=(const EventCount&) = delete; +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_ diff --git a/external/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h b/external/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h new file mode 100644 index 0000000..23a2b54 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h @@ -0,0 +1,486 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Dmitry Vyukov +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H +#define EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H + +namespace Eigen { + +template +class ThreadPoolTempl : public Eigen::ThreadPoolInterface { + public: + typedef typename Environment::Task Task; + typedef RunQueue Queue; + + ThreadPoolTempl(int num_threads, Environment env = Environment()) + : ThreadPoolTempl(num_threads, true, env) {} + + ThreadPoolTempl(int num_threads, bool allow_spinning, + Environment env = Environment()) + : env_(env), + num_threads_(num_threads), + allow_spinning_(allow_spinning), + thread_data_(num_threads), + all_coprimes_(num_threads), + waiters_(num_threads), + global_steal_partition_(EncodePartition(0, num_threads_)), + blocked_(0), + spinning_(0), + done_(false), + cancelled_(false), + ec_(waiters_) { + waiters_.resize(num_threads_); + // Calculate coprimes of all numbers [1, num_threads]. + // Coprimes are used for random walks over all threads in Steal + // and NonEmptyQueueIndex. Iteration is based on the fact that if we take + // a random starting thread index t and calculate num_threads - 1 subsequent + // indices as (t + coprime) % num_threads, we will cover all threads without + // repetitions (effectively getting a presudo-random permutation of thread + // indices). + eigen_plain_assert(num_threads_ < kMaxThreads); + for (int i = 1; i <= num_threads_; ++i) { + all_coprimes_.emplace_back(i); + ComputeCoprimes(i, &all_coprimes_.back()); + } +#ifndef EIGEN_THREAD_LOCAL + init_barrier_.reset(new Barrier(num_threads_)); +#endif + thread_data_.resize(num_threads_); + for (int i = 0; i < num_threads_; i++) { + SetStealPartition(i, EncodePartition(0, num_threads_)); + thread_data_[i].thread.reset( + env_.CreateThread([this, i]() { WorkerLoop(i); })); + } +#ifndef EIGEN_THREAD_LOCAL + // Wait for workers to initialize per_thread_map_. Otherwise we might race + // with them in Schedule or CurrentThreadId. + init_barrier_->Wait(); +#endif + } + + ~ThreadPoolTempl() { + done_ = true; + + // Now if all threads block without work, they will start exiting. + // But note that threads can continue to work arbitrary long, + // block, submit new work, unblock and otherwise live full life. + if (!cancelled_) { + ec_.Notify(true); + } else { + // Since we were cancelled, there might be entries in the queues. + // Empty them to prevent their destructor from asserting. + for (size_t i = 0; i < thread_data_.size(); i++) { + thread_data_[i].queue.Flush(); + } + } + // Join threads explicitly (by destroying) to avoid destruction order within + // this class. + for (size_t i = 0; i < thread_data_.size(); ++i) + thread_data_[i].thread.reset(); + } + + void SetStealPartitions(const std::vector>& partitions) { + eigen_plain_assert(partitions.size() == static_cast(num_threads_)); + + // Pass this information to each thread queue. + for (int i = 0; i < num_threads_; i++) { + const auto& pair = partitions[i]; + unsigned start = pair.first, end = pair.second; + AssertBounds(start, end); + unsigned val = EncodePartition(start, end); + SetStealPartition(i, val); + } + } + + void Schedule(std::function fn) EIGEN_OVERRIDE { + ScheduleWithHint(std::move(fn), 0, num_threads_); + } + + void ScheduleWithHint(std::function fn, int start, + int limit) override { + Task t = env_.CreateTask(std::move(fn)); + PerThread* pt = GetPerThread(); + if (pt->pool == this) { + // Worker thread of this pool, push onto the thread's queue. + Queue& q = thread_data_[pt->thread_id].queue; + t = q.PushFront(std::move(t)); + } else { + // A free-standing thread (or worker of another pool), push onto a random + // queue. + eigen_plain_assert(start < limit); + eigen_plain_assert(limit <= num_threads_); + int num_queues = limit - start; + int rnd = Rand(&pt->rand) % num_queues; + eigen_plain_assert(start + rnd < limit); + Queue& q = thread_data_[start + rnd].queue; + t = q.PushBack(std::move(t)); + } + // Note: below we touch this after making w available to worker threads. + // Strictly speaking, this can lead to a racy-use-after-free. Consider that + // Schedule is called from a thread that is neither main thread nor a worker + // thread of this pool. Then, execution of w directly or indirectly + // completes overall computations, which in turn leads to destruction of + // this. We expect that such scenario is prevented by program, that is, + // this is kept alive while any threads can potentially be in Schedule. + if (!t.f) { + ec_.Notify(false); + } else { + env_.ExecuteTask(t); // Push failed, execute directly. + } + } + + void Cancel() EIGEN_OVERRIDE { + cancelled_ = true; + done_ = true; + + // Let each thread know it's been cancelled. +#ifdef EIGEN_THREAD_ENV_SUPPORTS_CANCELLATION + for (size_t i = 0; i < thread_data_.size(); i++) { + thread_data_[i].thread->OnCancel(); + } +#endif + + // Wake up the threads without work to let them exit on their own. + ec_.Notify(true); + } + + int NumThreads() const EIGEN_FINAL { return num_threads_; } + + int CurrentThreadId() const EIGEN_FINAL { + const PerThread* pt = const_cast(this)->GetPerThread(); + if (pt->pool == this) { + return pt->thread_id; + } else { + return -1; + } + } + + private: + // Create a single atomic that encodes start and limit information for + // each thread. + // We expect num_threads_ < 65536, so we can store them in a single + // std::atomic. + // Exposed publicly as static functions so that external callers can reuse + // this encode/decode logic for maintaining their own thread-safe copies of + // scheduling and steal domain(s). + static const int kMaxPartitionBits = 16; + static const int kMaxThreads = 1 << kMaxPartitionBits; + + inline unsigned EncodePartition(unsigned start, unsigned limit) { + return (start << kMaxPartitionBits) | limit; + } + + inline void DecodePartition(unsigned val, unsigned* start, unsigned* limit) { + *limit = val & (kMaxThreads - 1); + val >>= kMaxPartitionBits; + *start = val; + } + + void AssertBounds(int start, int end) { + eigen_plain_assert(start >= 0); + eigen_plain_assert(start < end); // non-zero sized partition + eigen_plain_assert(end <= num_threads_); + } + + inline void SetStealPartition(size_t i, unsigned val) { + thread_data_[i].steal_partition.store(val, std::memory_order_relaxed); + } + + inline unsigned GetStealPartition(int i) { + return thread_data_[i].steal_partition.load(std::memory_order_relaxed); + } + + void ComputeCoprimes(int N, MaxSizeVector* coprimes) { + for (int i = 1; i <= N; i++) { + unsigned a = i; + unsigned b = N; + // If GCD(a, b) == 1, then a and b are coprimes. + while (b != 0) { + unsigned tmp = a; + a = b; + b = tmp % b; + } + if (a == 1) { + coprimes->push_back(i); + } + } + } + + typedef typename Environment::EnvThread Thread; + + struct PerThread { + constexpr PerThread() : pool(NULL), rand(0), thread_id(-1) {} + ThreadPoolTempl* pool; // Parent pool, or null for normal threads. + uint64_t rand; // Random generator state. + int thread_id; // Worker thread index in pool. +#ifndef EIGEN_THREAD_LOCAL + // Prevent false sharing. + char pad_[128]; +#endif + }; + + struct ThreadData { + constexpr ThreadData() : thread(), steal_partition(0), queue() {} + std::unique_ptr thread; + std::atomic steal_partition; + Queue queue; + }; + + Environment env_; + const int num_threads_; + const bool allow_spinning_; + MaxSizeVector thread_data_; + MaxSizeVector> all_coprimes_; + MaxSizeVector waiters_; + unsigned global_steal_partition_; + std::atomic blocked_; + std::atomic spinning_; + std::atomic done_; + std::atomic cancelled_; + EventCount ec_; +#ifndef EIGEN_THREAD_LOCAL + std::unique_ptr init_barrier_; + std::mutex per_thread_map_mutex_; // Protects per_thread_map_. + std::unordered_map> per_thread_map_; +#endif + + // Main worker thread loop. + void WorkerLoop(int thread_id) { +#ifndef EIGEN_THREAD_LOCAL + std::unique_ptr new_pt(new PerThread()); + per_thread_map_mutex_.lock(); + bool insertOK = per_thread_map_.emplace(GlobalThreadIdHash(), std::move(new_pt)).second; + eigen_plain_assert(insertOK); + EIGEN_UNUSED_VARIABLE(insertOK); + per_thread_map_mutex_.unlock(); + init_barrier_->Notify(); + init_barrier_->Wait(); +#endif + PerThread* pt = GetPerThread(); + pt->pool = this; + pt->rand = GlobalThreadIdHash(); + pt->thread_id = thread_id; + Queue& q = thread_data_[thread_id].queue; + EventCount::Waiter* waiter = &waiters_[thread_id]; + // TODO(dvyukov,rmlarsen): The time spent in NonEmptyQueueIndex() is + // proportional to num_threads_ and we assume that new work is scheduled at + // a constant rate, so we set spin_count to 5000 / num_threads_. The + // constant was picked based on a fair dice roll, tune it. + const int spin_count = + allow_spinning_ && num_threads_ > 0 ? 5000 / num_threads_ : 0; + if (num_threads_ == 1) { + // For num_threads_ == 1 there is no point in going through the expensive + // steal loop. Moreover, since NonEmptyQueueIndex() calls PopBack() on the + // victim queues it might reverse the order in which ops are executed + // compared to the order in which they are scheduled, which tends to be + // counter-productive for the types of I/O workloads the single thread + // pools tend to be used for. + while (!cancelled_) { + Task t = q.PopFront(); + for (int i = 0; i < spin_count && !t.f; i++) { + if (!cancelled_.load(std::memory_order_relaxed)) { + t = q.PopFront(); + } + } + if (!t.f) { + if (!WaitForWork(waiter, &t)) { + return; + } + } + if (t.f) { + env_.ExecuteTask(t); + } + } + } else { + while (!cancelled_) { + Task t = q.PopFront(); + if (!t.f) { + t = LocalSteal(); + if (!t.f) { + t = GlobalSteal(); + if (!t.f) { + // Leave one thread spinning. This reduces latency. + if (allow_spinning_ && !spinning_ && !spinning_.exchange(true)) { + for (int i = 0; i < spin_count && !t.f; i++) { + if (!cancelled_.load(std::memory_order_relaxed)) { + t = GlobalSteal(); + } else { + return; + } + } + spinning_ = false; + } + if (!t.f) { + if (!WaitForWork(waiter, &t)) { + return; + } + } + } + } + } + if (t.f) { + env_.ExecuteTask(t); + } + } + } + } + + // Steal tries to steal work from other worker threads in the range [start, + // limit) in best-effort manner. + Task Steal(unsigned start, unsigned limit) { + PerThread* pt = GetPerThread(); + const size_t size = limit - start; + unsigned r = Rand(&pt->rand); + // Reduce r into [0, size) range, this utilizes trick from + // https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ + eigen_plain_assert(all_coprimes_[size - 1].size() < (1<<30)); + unsigned victim = ((uint64_t)r * (uint64_t)size) >> 32; + unsigned index = ((uint64_t) all_coprimes_[size - 1].size() * (uint64_t)r) >> 32; + unsigned inc = all_coprimes_[size - 1][index]; + + for (unsigned i = 0; i < size; i++) { + eigen_plain_assert(start + victim < limit); + Task t = thread_data_[start + victim].queue.PopBack(); + if (t.f) { + return t; + } + victim += inc; + if (victim >= size) { + victim -= size; + } + } + return Task(); + } + + // Steals work within threads belonging to the partition. + Task LocalSteal() { + PerThread* pt = GetPerThread(); + unsigned partition = GetStealPartition(pt->thread_id); + // If thread steal partition is the same as global partition, there is no + // need to go through the steal loop twice. + if (global_steal_partition_ == partition) return Task(); + unsigned start, limit; + DecodePartition(partition, &start, &limit); + AssertBounds(start, limit); + + return Steal(start, limit); + } + + // Steals work from any other thread in the pool. + Task GlobalSteal() { + return Steal(0, num_threads_); + } + + + // WaitForWork blocks until new work is available (returns true), or if it is + // time to exit (returns false). Can optionally return a task to execute in t + // (in such case t.f != nullptr on return). + bool WaitForWork(EventCount::Waiter* waiter, Task* t) { + eigen_plain_assert(!t->f); + // We already did best-effort emptiness check in Steal, so prepare for + // blocking. + ec_.Prewait(); + // Now do a reliable emptiness check. + int victim = NonEmptyQueueIndex(); + if (victim != -1) { + ec_.CancelWait(); + if (cancelled_) { + return false; + } else { + *t = thread_data_[victim].queue.PopBack(); + return true; + } + } + // Number of blocked threads is used as termination condition. + // If we are shutting down and all worker threads blocked without work, + // that's we are done. + blocked_++; + // TODO is blocked_ required to be unsigned? + if (done_ && blocked_ == static_cast(num_threads_)) { + ec_.CancelWait(); + // Almost done, but need to re-check queues. + // Consider that all queues are empty and all worker threads are preempted + // right after incrementing blocked_ above. Now a free-standing thread + // submits work and calls destructor (which sets done_). If we don't + // re-check queues, we will exit leaving the work unexecuted. + if (NonEmptyQueueIndex() != -1) { + // Note: we must not pop from queues before we decrement blocked_, + // otherwise the following scenario is possible. Consider that instead + // of checking for emptiness we popped the only element from queues. + // Now other worker threads can start exiting, which is bad if the + // work item submits other work. So we just check emptiness here, + // which ensures that all worker threads exit at the same time. + blocked_--; + return true; + } + // Reached stable termination state. + ec_.Notify(true); + return false; + } + ec_.CommitWait(waiter); + blocked_--; + return true; + } + + int NonEmptyQueueIndex() { + PerThread* pt = GetPerThread(); + // We intentionally design NonEmptyQueueIndex to steal work from + // anywhere in the queue so threads don't block in WaitForWork() forever + // when all threads in their partition go to sleep. Steal is still local. + const size_t size = thread_data_.size(); + unsigned r = Rand(&pt->rand); + unsigned inc = all_coprimes_[size - 1][r % all_coprimes_[size - 1].size()]; + unsigned victim = r % size; + for (unsigned i = 0; i < size; i++) { + if (!thread_data_[victim].queue.Empty()) { + return victim; + } + victim += inc; + if (victim >= size) { + victim -= size; + } + } + return -1; + } + + static EIGEN_STRONG_INLINE uint64_t GlobalThreadIdHash() { + return std::hash()(std::this_thread::get_id()); + } + + EIGEN_STRONG_INLINE PerThread* GetPerThread() { +#ifndef EIGEN_THREAD_LOCAL + static PerThread dummy; + auto it = per_thread_map_.find(GlobalThreadIdHash()); + if (it == per_thread_map_.end()) { + return &dummy; + } else { + return it->second.get(); + } +#else + EIGEN_THREAD_LOCAL PerThread per_thread_; + PerThread* pt = &per_thread_; + return pt; +#endif + } + + static EIGEN_STRONG_INLINE unsigned Rand(uint64_t* state) { + uint64_t current = *state; + // Update the internal state + *state = current * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL; + // Generate the random output (using the PCG-XSH-RS scheme) + return static_cast((current ^ (current >> 22)) >> + (22 + (current >> 61))); + } +}; + +typedef ThreadPoolTempl ThreadPool; + +} // namespace Eigen + +#endif // EIGEN_CXX11_THREADPOOL_NONBLOCKING_THREAD_POOL_H diff --git a/external/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h b/external/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h new file mode 100644 index 0000000..b572ebc --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/ThreadPool/RunQueue.h @@ -0,0 +1,236 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Dmitry Vyukov +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_ +#define EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_ + +namespace Eigen { + +// RunQueue is a fixed-size, partially non-blocking deque or Work items. +// Operations on front of the queue must be done by a single thread (owner), +// operations on back of the queue can be done by multiple threads concurrently. +// +// Algorithm outline: +// All remote threads operating on the queue back are serialized by a mutex. +// This ensures that at most two threads access state: owner and one remote +// thread (Size aside). The algorithm ensures that the occupied region of the +// underlying array is logically continuous (can wraparound, but no stray +// occupied elements). Owner operates on one end of this region, remote thread +// operates on the other end. Synchronization between these threads +// (potential consumption of the last element and take up of the last empty +// element) happens by means of state variable in each element. States are: +// empty, busy (in process of insertion of removal) and ready. Threads claim +// elements (empty->busy and ready->busy transitions) by means of a CAS +// operation. The finishing transition (busy->empty and busy->ready) are done +// with plain store as the element is exclusively owned by the current thread. +// +// Note: we could permit only pointers as elements, then we would not need +// separate state variable as null/non-null pointer value would serve as state, +// but that would require malloc/free per operation for large, complex values +// (and this is designed to store std::function<()>). +template +class RunQueue { + public: + RunQueue() : front_(0), back_(0) { + // require power-of-two for fast masking + eigen_plain_assert((kSize & (kSize - 1)) == 0); + eigen_plain_assert(kSize > 2); // why would you do this? + eigen_plain_assert(kSize <= (64 << 10)); // leave enough space for counter + for (unsigned i = 0; i < kSize; i++) + array_[i].state.store(kEmpty, std::memory_order_relaxed); + } + + ~RunQueue() { eigen_plain_assert(Size() == 0); } + + // PushFront inserts w at the beginning of the queue. + // If queue is full returns w, otherwise returns default-constructed Work. + Work PushFront(Work w) { + unsigned front = front_.load(std::memory_order_relaxed); + Elem* e = &array_[front & kMask]; + uint8_t s = e->state.load(std::memory_order_relaxed); + if (s != kEmpty || + !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) + return w; + front_.store(front + 1 + (kSize << 1), std::memory_order_relaxed); + e->w = std::move(w); + e->state.store(kReady, std::memory_order_release); + return Work(); + } + + // PopFront removes and returns the first element in the queue. + // If the queue was empty returns default-constructed Work. + Work PopFront() { + unsigned front = front_.load(std::memory_order_relaxed); + Elem* e = &array_[(front - 1) & kMask]; + uint8_t s = e->state.load(std::memory_order_relaxed); + if (s != kReady || + !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) + return Work(); + Work w = std::move(e->w); + e->state.store(kEmpty, std::memory_order_release); + front = ((front - 1) & kMask2) | (front & ~kMask2); + front_.store(front, std::memory_order_relaxed); + return w; + } + + // PushBack adds w at the end of the queue. + // If queue is full returns w, otherwise returns default-constructed Work. + Work PushBack(Work w) { + std::unique_lock lock(mutex_); + unsigned back = back_.load(std::memory_order_relaxed); + Elem* e = &array_[(back - 1) & kMask]; + uint8_t s = e->state.load(std::memory_order_relaxed); + if (s != kEmpty || + !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) + return w; + back = ((back - 1) & kMask2) | (back & ~kMask2); + back_.store(back, std::memory_order_relaxed); + e->w = std::move(w); + e->state.store(kReady, std::memory_order_release); + return Work(); + } + + // PopBack removes and returns the last elements in the queue. + Work PopBack() { + if (Empty()) return Work(); + std::unique_lock lock(mutex_); + unsigned back = back_.load(std::memory_order_relaxed); + Elem* e = &array_[back & kMask]; + uint8_t s = e->state.load(std::memory_order_relaxed); + if (s != kReady || + !e->state.compare_exchange_strong(s, kBusy, std::memory_order_acquire)) + return Work(); + Work w = std::move(e->w); + e->state.store(kEmpty, std::memory_order_release); + back_.store(back + 1 + (kSize << 1), std::memory_order_relaxed); + return w; + } + + // PopBackHalf removes and returns half last elements in the queue. + // Returns number of elements removed. + unsigned PopBackHalf(std::vector* result) { + if (Empty()) return 0; + std::unique_lock lock(mutex_); + unsigned back = back_.load(std::memory_order_relaxed); + unsigned size = Size(); + unsigned mid = back; + if (size > 1) mid = back + (size - 1) / 2; + unsigned n = 0; + unsigned start = 0; + for (; static_cast(mid - back) >= 0; mid--) { + Elem* e = &array_[mid & kMask]; + uint8_t s = e->state.load(std::memory_order_relaxed); + if (n == 0) { + if (s != kReady || !e->state.compare_exchange_strong( + s, kBusy, std::memory_order_acquire)) + continue; + start = mid; + } else { + // Note: no need to store temporal kBusy, we exclusively own these + // elements. + eigen_plain_assert(s == kReady); + } + result->push_back(std::move(e->w)); + e->state.store(kEmpty, std::memory_order_release); + n++; + } + if (n != 0) + back_.store(start + 1 + (kSize << 1), std::memory_order_relaxed); + return n; + } + + // Size returns current queue size. + // Can be called by any thread at any time. + unsigned Size() const { return SizeOrNotEmpty(); } + + // Empty tests whether container is empty. + // Can be called by any thread at any time. + bool Empty() const { return SizeOrNotEmpty() == 0; } + + // Delete all the elements from the queue. + void Flush() { + while (!Empty()) { + PopFront(); + } + } + + private: + static const unsigned kMask = kSize - 1; + static const unsigned kMask2 = (kSize << 1) - 1; + struct Elem { + std::atomic state; + Work w; + }; + enum { + kEmpty, + kBusy, + kReady, + }; + std::mutex mutex_; + // Low log(kSize) + 1 bits in front_ and back_ contain rolling index of + // front/back, respectively. The remaining bits contain modification counters + // that are incremented on Push operations. This allows us to (1) distinguish + // between empty and full conditions (if we would use log(kSize) bits for + // position, these conditions would be indistinguishable); (2) obtain + // consistent snapshot of front_/back_ for Size operation using the + // modification counters. + std::atomic front_; + std::atomic back_; + Elem array_[kSize]; + + // SizeOrNotEmpty returns current queue size; if NeedSizeEstimate is false, + // only whether the size is 0 is guaranteed to be correct. + // Can be called by any thread at any time. + template + unsigned SizeOrNotEmpty() const { + // Emptiness plays critical role in thread pool blocking. So we go to great + // effort to not produce false positives (claim non-empty queue as empty). + unsigned front = front_.load(std::memory_order_acquire); + for (;;) { + // Capture a consistent snapshot of front/tail. + unsigned back = back_.load(std::memory_order_acquire); + unsigned front1 = front_.load(std::memory_order_relaxed); + if (front != front1) { + front = front1; + std::atomic_thread_fence(std::memory_order_acquire); + continue; + } + if (NeedSizeEstimate) { + return CalculateSize(front, back); + } else { + // This value will be 0 if the queue is empty, and undefined otherwise. + unsigned maybe_zero = ((front ^ back) & kMask2); + // Queue size estimate must agree with maybe zero check on the queue + // empty/non-empty state. + eigen_assert((CalculateSize(front, back) == 0) == (maybe_zero == 0)); + return maybe_zero; + } + } + } + + EIGEN_ALWAYS_INLINE + unsigned CalculateSize(unsigned front, unsigned back) const { + int size = (front & kMask2) - (back & kMask2); + // Fix overflow. + if (size < 0) size += 2 * kSize; + // Order of modification in push/pop is crafted to make the queue look + // larger than it is during concurrent modifications. E.g. push can + // increment size before the corresponding pop has decremented it. + // So the computed size can be up to kSize + 1, fix it. + if (size > static_cast(kSize)) size = kSize; + return static_cast(size); + } + + RunQueue(const RunQueue&) = delete; + void operator=(const RunQueue&) = delete; +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_THREADPOOL_RUNQUEUE_H_ diff --git a/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadCancel.h b/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadCancel.h new file mode 100644 index 0000000..a05685f --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadCancel.h @@ -0,0 +1,23 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_THREAD_CANCEL_H +#define EIGEN_CXX11_THREADPOOL_THREAD_CANCEL_H + +// Try to come up with a portable way to cancel a thread +#if EIGEN_OS_GNULINUX + #define EIGEN_THREAD_CANCEL(t) \ + pthread_cancel(t.native_handle()); + #define EIGEN_SUPPORTS_THREAD_CANCELLATION 1 +#else +#define EIGEN_THREAD_CANCEL(t) +#endif + + +#endif // EIGEN_CXX11_THREADPOOL_THREAD_CANCEL_H diff --git a/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h b/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h new file mode 100644 index 0000000..d94a064 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadEnvironment.h @@ -0,0 +1,40 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H +#define EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H + +namespace Eigen { + +struct StlThreadEnvironment { + struct Task { + std::function f; + }; + + // EnvThread constructor must start the thread, + // destructor must join the thread. + class EnvThread { + public: + EnvThread(std::function f) : thr_(std::move(f)) {} + ~EnvThread() { thr_.join(); } + // This function is called when the threadpool is cancelled. + void OnCancel() { } + + private: + std::thread thr_; + }; + + EnvThread* CreateThread(std::function f) { return new EnvThread(std::move(f)); } + Task CreateTask(std::function f) { return Task{std::move(f)}; } + void ExecuteTask(const Task& t) { t.f(); } +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_THREADPOOL_THREAD_ENVIRONMENT_H diff --git a/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h b/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h new file mode 100644 index 0000000..4e68474 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadLocal.h @@ -0,0 +1,301 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H +#define EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H + +#ifdef EIGEN_AVOID_THREAD_LOCAL + +#ifdef EIGEN_THREAD_LOCAL +#undef EIGEN_THREAD_LOCAL +#endif + +#else + +#if EIGEN_MAX_CPP_VER >= 11 && \ + ((EIGEN_COMP_GNUC && EIGEN_GNUC_AT_LEAST(4, 8)) || \ + __has_feature(cxx_thread_local) || \ + (EIGEN_COMP_MSVC >= 1900) ) +#define EIGEN_THREAD_LOCAL static thread_local +#endif + +// Disable TLS for Apple and Android builds with older toolchains. +#if defined(__APPLE__) +// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED, +// __IPHONE_8_0. +#include +#include +#endif +// Checks whether C++11's `thread_local` storage duration specifier is +// supported. +#if defined(__apple_build_version__) && \ + ((__apple_build_version__ < 8000042) || \ + (TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)) +// Notes: Xcode's clang did not support `thread_local` until version +// 8, and even then not for all iOS < 9.0. +#undef EIGEN_THREAD_LOCAL + +#elif defined(__ANDROID__) && EIGEN_COMP_CLANG +// There are platforms for which TLS should not be used even though the compiler +// makes it seem like it's supported (Android NDK < r12b for example). +// This is primarily because of linker problems and toolchain misconfiguration: +// TLS isn't supported until NDK r12b per +// https://developer.android.com/ndk/downloads/revision_history.html +// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in +// . For NDK < r16, users should define these macros, +// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11. +#if __has_include() +#include +#endif // __has_include() +#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \ + defined(__NDK_MINOR__) && \ + ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1))) +#undef EIGEN_THREAD_LOCAL +#endif +#endif // defined(__ANDROID__) && defined(__clang__) + +#endif // EIGEN_AVOID_THREAD_LOCAL + +namespace Eigen { + +namespace internal { +template +struct ThreadLocalNoOpInitialize { + void operator()(T&) const {} +}; + +template +struct ThreadLocalNoOpRelease { + void operator()(T&) const {} +}; + +} // namespace internal + +// Thread local container for elements of type T, that does not use thread local +// storage. As long as the number of unique threads accessing this storage +// is smaller than `capacity_`, it is lock-free and wait-free. Otherwise it will +// use a mutex for synchronization. +// +// Type `T` has to be default constructible, and by default each thread will get +// a default constructed value. It is possible to specify custom `initialize` +// callable, that will be called lazily from each thread accessing this object, +// and will be passed a default initialized object of type `T`. Also it's +// possible to pass a custom `release` callable, that will be invoked before +// calling ~T(). +// +// Example: +// +// struct Counter { +// int value = 0; +// } +// +// Eigen::ThreadLocal counter(10); +// +// // Each thread will have access to it's own counter object. +// Counter& cnt = counter.local(); +// cnt++; +// +// WARNING: Eigen::ThreadLocal uses the OS-specific value returned by +// std::this_thread::get_id() to identify threads. This value is not guaranteed +// to be unique except for the life of the thread. A newly created thread may +// get an OS-specific ID equal to that of an already destroyed thread. +// +// Somewhat similar to TBB thread local storage, with similar restrictions: +// https://www.threadingbuildingblocks.org/docs/help/reference/thread_local_storage/enumerable_thread_specific_cls.html +// +template , + typename Release = internal::ThreadLocalNoOpRelease> +class ThreadLocal { + // We preallocate default constructed elements in MaxSizedVector. + static_assert(std::is_default_constructible::value, + "ThreadLocal data type must be default constructible"); + + public: + explicit ThreadLocal(int capacity) + : ThreadLocal(capacity, internal::ThreadLocalNoOpInitialize(), + internal::ThreadLocalNoOpRelease()) {} + + ThreadLocal(int capacity, Initialize initialize) + : ThreadLocal(capacity, std::move(initialize), + internal::ThreadLocalNoOpRelease()) {} + + ThreadLocal(int capacity, Initialize initialize, Release release) + : initialize_(std::move(initialize)), + release_(std::move(release)), + capacity_(capacity), + data_(capacity_), + ptr_(capacity_), + filled_records_(0) { + eigen_assert(capacity_ >= 0); + data_.resize(capacity_); + for (int i = 0; i < capacity_; ++i) { + ptr_.emplace_back(nullptr); + } + } + + T& local() { + std::thread::id this_thread = std::this_thread::get_id(); + if (capacity_ == 0) return SpilledLocal(this_thread); + + std::size_t h = std::hash()(this_thread); + const int start_idx = h % capacity_; + + // NOTE: From the definition of `std::this_thread::get_id()` it is + // guaranteed that we never can have concurrent insertions with the same key + // to our hash-map like data structure. If we didn't find an element during + // the initial traversal, it's guaranteed that no one else could have + // inserted it while we are in this function. This allows to massively + // simplify out lock-free insert-only hash map. + + // Check if we already have an element for `this_thread`. + int idx = start_idx; + while (ptr_[idx].load() != nullptr) { + ThreadIdAndValue& record = *(ptr_[idx].load()); + if (record.thread_id == this_thread) return record.value; + + idx += 1; + if (idx >= capacity_) idx -= capacity_; + if (idx == start_idx) break; + } + + // If we are here, it means that we found an insertion point in lookup + // table at `idx`, or we did a full traversal and table is full. + + // If lock-free storage is full, fallback on mutex. + if (filled_records_.load() >= capacity_) return SpilledLocal(this_thread); + + // We double check that we still have space to insert an element into a lock + // free storage. If old value in `filled_records_` is larger than the + // records capacity, it means that some other thread added an element while + // we were traversing lookup table. + int insertion_index = + filled_records_.fetch_add(1, std::memory_order_relaxed); + if (insertion_index >= capacity_) return SpilledLocal(this_thread); + + // At this point it's guaranteed that we can access to + // data_[insertion_index_] without a data race. + data_[insertion_index].thread_id = this_thread; + initialize_(data_[insertion_index].value); + + // That's the pointer we'll put into the lookup table. + ThreadIdAndValue* inserted = &data_[insertion_index]; + + // We'll use nullptr pointer to ThreadIdAndValue in a compare-and-swap loop. + ThreadIdAndValue* empty = nullptr; + + // Now we have to find an insertion point into the lookup table. We start + // from the `idx` that was identified as an insertion point above, it's + // guaranteed that we will have an empty record somewhere in a lookup table + // (because we created a record in the `data_`). + const int insertion_idx = idx; + + do { + // Always start search from the original insertion candidate. + idx = insertion_idx; + while (ptr_[idx].load() != nullptr) { + idx += 1; + if (idx >= capacity_) idx -= capacity_; + // If we did a full loop, it means that we don't have any free entries + // in the lookup table, and this means that something is terribly wrong. + eigen_assert(idx != insertion_idx); + } + // Atomic CAS of the pointer guarantees that any other thread, that will + // follow this pointer will see all the mutations in the `data_`. + } while (!ptr_[idx].compare_exchange_weak(empty, inserted)); + + return inserted->value; + } + + // WARN: It's not thread safe to call it concurrently with `local()`. + void ForEach(std::function f) { + // Reading directly from `data_` is unsafe, because only CAS to the + // record in `ptr_` makes all changes visible to other threads. + for (auto& ptr : ptr_) { + ThreadIdAndValue* record = ptr.load(); + if (record == nullptr) continue; + f(record->thread_id, record->value); + } + + // We did not spill into the map based storage. + if (filled_records_.load(std::memory_order_relaxed) < capacity_) return; + + // Adds a happens before edge from the last call to SpilledLocal(). + std::unique_lock lock(mu_); + for (auto& kv : per_thread_map_) { + f(kv.first, kv.second); + } + } + + // WARN: It's not thread safe to call it concurrently with `local()`. + ~ThreadLocal() { + // Reading directly from `data_` is unsafe, because only CAS to the record + // in `ptr_` makes all changes visible to other threads. + for (auto& ptr : ptr_) { + ThreadIdAndValue* record = ptr.load(); + if (record == nullptr) continue; + release_(record->value); + } + + // We did not spill into the map based storage. + if (filled_records_.load(std::memory_order_relaxed) < capacity_) return; + + // Adds a happens before edge from the last call to SpilledLocal(). + std::unique_lock lock(mu_); + for (auto& kv : per_thread_map_) { + release_(kv.second); + } + } + + private: + struct ThreadIdAndValue { + std::thread::id thread_id; + T value; + }; + + // Use unordered map guarded by a mutex when lock free storage is full. + T& SpilledLocal(std::thread::id this_thread) { + std::unique_lock lock(mu_); + + auto it = per_thread_map_.find(this_thread); + if (it == per_thread_map_.end()) { + auto result = per_thread_map_.emplace(this_thread, T()); + eigen_assert(result.second); + initialize_((*result.first).second); + return (*result.first).second; + } else { + return it->second; + } + } + + Initialize initialize_; + Release release_; + const int capacity_; + + // Storage that backs lock-free lookup table `ptr_`. Records stored in this + // storage contiguously starting from index 0. + MaxSizeVector data_; + + // Atomic pointers to the data stored in `data_`. Used as a lookup table for + // linear probing hash map (https://en.wikipedia.org/wiki/Linear_probing). + MaxSizeVector> ptr_; + + // Number of records stored in the `data_`. + std::atomic filled_records_; + + // We fallback on per thread map if lock-free storage is full. In practice + // this should never happen, if `capacity_` is a reasonable estimate of the + // number of threads running in a system. + std::mutex mu_; // Protects per_thread_map_. + std::unordered_map per_thread_map_; +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_THREADPOOL_THREAD_LOCAL_H diff --git a/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h b/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h new file mode 100644 index 0000000..25030dc --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadPoolInterface.h @@ -0,0 +1,48 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H +#define EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H + +namespace Eigen { + +// This defines an interface that ThreadPoolDevice can take to use +// custom thread pools underneath. +class ThreadPoolInterface { + public: + // Submits a closure to be run by a thread in the pool. + virtual void Schedule(std::function fn) = 0; + + // Submits a closure to be run by threads in the range [start, end) in the + // pool. + virtual void ScheduleWithHint(std::function fn, int /*start*/, + int /*end*/) { + // Just defer to Schedule in case sub-classes aren't interested in + // overriding this functionality. + Schedule(fn); + } + + // If implemented, stop processing the closures that have been enqueued. + // Currently running closures may still be processed. + // If not implemented, does nothing. + virtual void Cancel() {} + + // Returns the number of threads in the pool. + virtual int NumThreads() const = 0; + + // Returns a logical thread index between 0 and NumThreads() - 1 if called + // from one of the threads in the pool. Returns -1 otherwise. + virtual int CurrentThreadId() const = 0; + + virtual ~ThreadPoolInterface() {} +}; + +} // namespace Eigen + +#endif // EIGEN_CXX11_THREADPOOL_THREAD_POOL_INTERFACE_H diff --git a/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h b/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h new file mode 100644 index 0000000..a859c7b --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/ThreadPool/ThreadYield.h @@ -0,0 +1,20 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H +#define EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H + +// Try to come up with a portable way to yield +#if EIGEN_COMP_GNUC && EIGEN_GNUC_AT_MOST(4, 7) +#define EIGEN_THREAD_YIELD() sched_yield() +#else +#define EIGEN_THREAD_YIELD() std::this_thread::yield() +#endif + +#endif // EIGEN_CXX11_THREADPOOL_THREAD_YIELD_H diff --git a/external/unsupported/Eigen/CXX11/src/util/CXX11Meta.h b/external/unsupported/Eigen/CXX11/src/util/CXX11Meta.h new file mode 100644 index 0000000..149ceaf --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/util/CXX11Meta.h @@ -0,0 +1,537 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11META_H +#define EIGEN_CXX11META_H + +#include +#include "EmulateArray.h" + +#include "CXX11Workarounds.h" + +namespace Eigen { + +namespace internal { + +/** \internal + * \file CXX11/util/CXX11Meta.h + * This file contains generic metaprogramming classes which are not specifically related to Eigen. + * This file expands upon Core/util/Meta.h and adds support for C++11 specific features. + */ + +template +struct type_list { constexpr static int count = sizeof...(tt); }; + +template +struct type_list { constexpr static int count = sizeof...(tt) + 1; typedef t first_type; }; + +template +struct numeric_list { constexpr static std::size_t count = sizeof...(nn); }; + +template +struct numeric_list { static const std::size_t count = sizeof...(nn) + 1; const static T first_value = n; }; + +#ifndef EIGEN_PARSED_BY_DOXYGEN +/* numeric list constructors + * + * equivalencies: + * constructor result + * typename gen_numeric_list::type numeric_list + * typename gen_numeric_list_reversed::type numeric_list + * typename gen_numeric_list_swapped_pair::type numeric_list + * typename gen_numeric_list_repeated::type numeric_list + */ + +template struct gen_numeric_list : gen_numeric_list {}; +template struct gen_numeric_list { typedef numeric_list type; }; + +template struct gen_numeric_list_reversed : gen_numeric_list_reversed {}; +template struct gen_numeric_list_reversed { typedef numeric_list type; }; + +template struct gen_numeric_list_swapped_pair : gen_numeric_list_swapped_pair {}; +template struct gen_numeric_list_swapped_pair { typedef numeric_list type; }; + +template struct gen_numeric_list_repeated : gen_numeric_list_repeated {}; +template struct gen_numeric_list_repeated { typedef numeric_list type; }; + +/* list manipulation: concatenate */ + +template struct concat; + +template struct concat, type_list> { typedef type_list type; }; +template struct concat, numeric_list > { typedef numeric_list type; }; + +template struct mconcat; +template struct mconcat
{ typedef a type; }; +template struct mconcat : concat {}; +template struct mconcat : concat::type> {}; + +/* list manipulation: extract slices */ + +template struct take; +template struct take> : concat, typename take>::type> {}; +template struct take> { typedef type_list<> type; }; +template struct take<0, type_list> { typedef type_list<> type; }; +template<> struct take<0, type_list<>> { typedef type_list<> type; }; + +template struct take> : concat, typename take>::type> {}; +template struct take> { typedef numeric_list type; }; +template struct take<0, numeric_list> { typedef numeric_list type; }; +template struct take<0, numeric_list> { typedef numeric_list type; }; + +template struct h_skip_helper_numeric; +template struct h_skip_helper_numeric : h_skip_helper_numeric {}; +template struct h_skip_helper_numeric { typedef numeric_list type; }; +template struct h_skip_helper_numeric { typedef numeric_list type; }; +template struct h_skip_helper_numeric { typedef numeric_list type; }; + +template struct h_skip_helper_type; +template struct h_skip_helper_type : h_skip_helper_type {}; +template struct h_skip_helper_type<0, t, tt...> { typedef type_list type; }; +template struct h_skip_helper_type { typedef type_list<> type; }; +template<> struct h_skip_helper_type<0> { typedef type_list<> type; }; +#endif //not EIGEN_PARSED_BY_DOXYGEN + +template +struct h_skip { + template + constexpr static EIGEN_STRONG_INLINE typename h_skip_helper_numeric::type helper(numeric_list) { return typename h_skip_helper_numeric::type(); } + template + constexpr static EIGEN_STRONG_INLINE typename h_skip_helper_type::type helper(type_list) { return typename h_skip_helper_type::type(); } +}; + +template struct skip { typedef decltype(h_skip::helper(a())) type; }; + +template struct slice : take::type> {}; + +/* list manipulation: retrieve single element from list */ + +template struct get; + +template struct get> : get> {}; +template struct get<0, type_list> { typedef a type; }; + +template struct get> : get> {}; +template struct get<0, numeric_list> { constexpr static T value = a; }; + +template constexpr T array_get(const numeric_list&) { + return get<(int)n, numeric_list>::value; +} + +/* always get type, regardless of dummy; good for parameter pack expansion */ + +template struct id_numeric { typedef t type; }; +template struct id_type { typedef t type; }; + +/* equality checking, flagged version */ + +template struct is_same_gf : is_same { constexpr static int global_flags = 0; }; + +/* apply_op to list */ + +template< + bool from_left, // false + template class op, + typename additional_param, + typename... values +> +struct h_apply_op_helper { typedef type_list::type...> type; }; +template< + template class op, + typename additional_param, + typename... values +> +struct h_apply_op_helper { typedef type_list::type...> type; }; + +template< + bool from_left, + template class op, + typename additional_param +> +struct h_apply_op +{ + template + constexpr static typename h_apply_op_helper::type helper(type_list) + { return typename h_apply_op_helper::type(); } +}; + +template< + template class op, + typename additional_param, + typename a +> +struct apply_op_from_left { typedef decltype(h_apply_op::helper(a())) type; }; + +template< + template class op, + typename additional_param, + typename a +> +struct apply_op_from_right { typedef decltype(h_apply_op::helper(a())) type; }; + +/* see if an element is in a list */ + +template< + template class test, + typename check_against, + typename h_list, + bool last_check_positive = false +> +struct contained_in_list; + +template< + template class test, + typename check_against, + typename h_list +> +struct contained_in_list +{ + constexpr static bool value = true; +}; + +template< + template class test, + typename check_against, + typename a, + typename... as +> +struct contained_in_list, false> : contained_in_list, test::value> {}; + +template< + template class test, + typename check_against + EIGEN_TPL_PP_SPEC_HACK_DEFC(typename, empty) +> +struct contained_in_list, false> { constexpr static bool value = false; }; + +/* see if an element is in a list and check for global flags */ + +template< + template class test, + typename check_against, + typename h_list, + int default_flags = 0, + bool last_check_positive = false, + int last_check_flags = default_flags +> +struct contained_in_list_gf; + +template< + template class test, + typename check_against, + typename h_list, + int default_flags, + int last_check_flags +> +struct contained_in_list_gf +{ + constexpr static bool value = true; + constexpr static int global_flags = last_check_flags; +}; + +template< + template class test, + typename check_against, + typename a, + typename... as, + int default_flags, + int last_check_flags +> +struct contained_in_list_gf, default_flags, false, last_check_flags> : contained_in_list_gf, default_flags, test::value, test::global_flags> {}; + +template< + template class test, + typename check_against + EIGEN_TPL_PP_SPEC_HACK_DEFC(typename, empty), + int default_flags, + int last_check_flags +> +struct contained_in_list_gf, default_flags, false, last_check_flags> { constexpr static bool value = false; constexpr static int global_flags = default_flags; }; + +/* generic reductions */ + +template< + typename Reducer, + typename... Ts +> struct reduce; + +template< + typename Reducer +> struct reduce +{ + EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE int run() { return Reducer::Identity; } +}; + +template< + typename Reducer, + typename A +> struct reduce +{ + EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE A run(A a) { return a; } +}; + +template< + typename Reducer, + typename A, + typename... Ts +> struct reduce +{ + EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE auto run(A a, Ts... ts) -> decltype(Reducer::run(a, reduce::run(ts...))) { + return Reducer::run(a, reduce::run(ts...)); + } +}; + +/* generic binary operations */ + +struct sum_op { + template EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a + b) { return a + b; } + static constexpr int Identity = 0; +}; +struct product_op { + template EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a * b) { return a * b; } + static constexpr int Identity = 1; +}; + +struct logical_and_op { template constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a && b) { return a && b; } }; +struct logical_or_op { template constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a || b) { return a || b; } }; + +struct equal_op { template constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a == b) { return a == b; } }; +struct not_equal_op { template constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a != b) { return a != b; } }; +struct lesser_op { template constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a < b) { return a < b; } }; +struct lesser_equal_op { template constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a <= b) { return a <= b; } }; +struct greater_op { template constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a > b) { return a > b; } }; +struct greater_equal_op { template constexpr static EIGEN_STRONG_INLINE auto run(A a, B b) -> decltype(a >= b) { return a >= b; } }; + +/* generic unary operations */ + +struct not_op { template constexpr static EIGEN_STRONG_INLINE auto run(A a) -> decltype(!a) { return !a; } }; +struct negation_op { template constexpr static EIGEN_STRONG_INLINE auto run(A a) -> decltype(-a) { return -a; } }; +struct greater_equal_zero_op { template constexpr static EIGEN_STRONG_INLINE auto run(A a) -> decltype(a >= 0) { return a >= 0; } }; + + +/* reductions for lists */ + +// using auto -> return value spec makes ICC 13.0 and 13.1 crash here, so we have to hack it +// together in front... (13.0 doesn't work with array_prod/array_reduce/... anyway, but 13.1 +// does... +template +EIGEN_DEVICE_FUNC constexpr EIGEN_STRONG_INLINE decltype(reduce::run((*((Ts*)0))...)) arg_prod(Ts... ts) +{ + return reduce::run(ts...); +} + +template +constexpr EIGEN_STRONG_INLINE decltype(reduce::run((*((Ts*)0))...)) arg_sum(Ts... ts) +{ + return reduce::run(ts...); +} + +/* reverse arrays */ + +template +constexpr EIGEN_STRONG_INLINE Array h_array_reverse(Array arr, numeric_list) +{ + return {{array_get(arr)...}}; +} + +template +constexpr EIGEN_STRONG_INLINE array array_reverse(array arr) +{ + return h_array_reverse(arr, typename gen_numeric_list::type()); +} + + +/* generic array reductions */ + +// can't reuse standard reduce() interface above because Intel's Compiler +// *really* doesn't like it, so we just reimplement the stuff +// (start from N - 1 and work down to 0 because specialization for +// n == N - 1 also doesn't work in Intel's compiler, so it goes into +// an infinite loop) +template +struct h_array_reduce { + EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE auto run(array arr, T identity) -> decltype(Reducer::run(h_array_reduce::run(arr, identity), array_get(arr))) + { + return Reducer::run(h_array_reduce::run(arr, identity), array_get(arr)); + } +}; + +template +struct h_array_reduce +{ + EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE T run(const array& arr, T) + { + return array_get<0>(arr); + } +}; + +template +struct h_array_reduce +{ + EIGEN_DEVICE_FUNC constexpr static EIGEN_STRONG_INLINE T run(const array&, T identity) + { + return identity; + } +}; + +template +EIGEN_DEVICE_FUNC constexpr EIGEN_STRONG_INLINE auto array_reduce(const array& arr, T identity) -> decltype(h_array_reduce::run(arr, identity)) +{ + return h_array_reduce::run(arr, identity); +} + +/* standard array reductions */ + +template +EIGEN_DEVICE_FUNC constexpr EIGEN_STRONG_INLINE auto array_sum(const array& arr) -> decltype(array_reduce(arr, static_cast(0))) +{ + return array_reduce(arr, static_cast(0)); +} + +template +EIGEN_DEVICE_FUNC constexpr EIGEN_STRONG_INLINE auto array_prod(const array& arr) -> decltype(array_reduce(arr, static_cast(1))) +{ + return array_reduce(arr, static_cast(1)); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE t array_prod(const std::vector& a) { + eigen_assert(a.size() > 0); + t prod = 1; + for (size_t i = 0; i < a.size(); ++i) { prod *= a[i]; } + return prod; +} + +/* zip an array */ + +template +constexpr EIGEN_STRONG_INLINE array h_array_zip(array a, array b, numeric_list) +{ + return array{{ Op::run(array_get(a), array_get(b))... }}; +} + +template +constexpr EIGEN_STRONG_INLINE array array_zip(array a, array b) +{ + return h_array_zip(a, b, typename gen_numeric_list::type()); +} + +/* zip an array and reduce the result */ + +template +constexpr EIGEN_STRONG_INLINE auto h_array_zip_and_reduce(array a, array b, numeric_list) -> decltype(reduce::type...>::run(Op::run(array_get(a), array_get(b))...)) +{ + return reduce::type...>::run(Op::run(array_get(a), array_get(b))...); +} + +template +constexpr EIGEN_STRONG_INLINE auto array_zip_and_reduce(array a, array b) -> decltype(h_array_zip_and_reduce(a, b, typename gen_numeric_list::type())) +{ + return h_array_zip_and_reduce(a, b, typename gen_numeric_list::type()); +} + +/* apply stuff to an array */ + +template +constexpr EIGEN_STRONG_INLINE array h_array_apply(array a, numeric_list) +{ + return array{{ Op::run(array_get(a))... }}; +} + +template +constexpr EIGEN_STRONG_INLINE array array_apply(array a) +{ + return h_array_apply(a, typename gen_numeric_list::type()); +} + +/* apply stuff to an array and reduce */ + +template +constexpr EIGEN_STRONG_INLINE auto h_array_apply_and_reduce(array arr, numeric_list) -> decltype(reduce::type...>::run(Op::run(array_get(arr))...)) +{ + return reduce::type...>::run(Op::run(array_get(arr))...); +} + +template +constexpr EIGEN_STRONG_INLINE auto array_apply_and_reduce(array a) -> decltype(h_array_apply_and_reduce(a, typename gen_numeric_list::type())) +{ + return h_array_apply_and_reduce(a, typename gen_numeric_list::type()); +} + +/* repeat a value n times (and make an array out of it + * usage: + * array = repeat<16>(42); + */ + +template +struct h_repeat +{ + template + constexpr static EIGEN_STRONG_INLINE array run(t v, numeric_list) + { + return {{ typename id_numeric::type(v)... }}; + } +}; + +template +constexpr array repeat(t v) { return h_repeat::run(v, typename gen_numeric_list::type()); } + +/* instantiate a class by a C-style array */ +template +struct h_instantiate_by_c_array; + +template +struct h_instantiate_by_c_array +{ + static InstType run(ArrType* arr, Ps... args) + { + return h_instantiate_by_c_array::run(arr + 1, args..., arr[0]); + } +}; + +template +struct h_instantiate_by_c_array +{ + static InstType run(ArrType* arr, Ps... args) + { + return h_instantiate_by_c_array::run(arr + 1, arr[0], args...); + } +}; + +template +struct h_instantiate_by_c_array +{ + static InstType run(ArrType* arr, Ps... args) + { + (void)arr; + return InstType(args...); + } +}; + +template +struct h_instantiate_by_c_array +{ + static InstType run(ArrType* arr, Ps... args) + { + (void)arr; + return InstType(args...); + } +}; + +template +InstType instantiate_by_c_array(ArrType* arr) +{ + return h_instantiate_by_c_array::run(arr); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CXX11META_H diff --git a/external/unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h b/external/unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h new file mode 100644 index 0000000..056736c --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/util/CXX11Workarounds.h @@ -0,0 +1,88 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Christian Seiler +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CXX11WORKAROUNDS_H +#define EIGEN_CXX11WORKAROUNDS_H + +/* COMPATIBILITY CHECKS + * (so users of compilers that are too old get some realistic error messages) + */ +#if defined(__INTEL_COMPILER) && (__INTEL_COMPILER < 1310) +#error Intel Compiler only supports required C++ features since version 13.1. +// note that most stuff in principle works with 13.0 but when combining +// some features, at some point 13.0 will just fail with an internal assertion +#elif defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 6)) +// G++ < 4.6 by default will continue processing the source files - even if we use #error to make +// it error out. For this reason, we use the pragma to make sure G++ aborts at the first error +// it sees. Unfortunately, that is still not our #error directive, but at least the output is +// short enough the user has a chance to see that the compiler version is not sufficient for +// the funky template mojo we use. +#pragma GCC diagnostic error "-Wfatal-errors" +#error GNU C++ Compiler (g++) only supports required C++ features since version 4.6. +#endif + +/* Check that the compiler at least claims to support C++11. It might not be sufficient + * because the compiler may not implement it correctly, but at least we'll know. + * On the other hand, visual studio still doesn't claim to support C++11 although it's + * compliant enugh for our purpose. + */ +#if (EIGEN_COMP_CXXVER < 11) +#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) +#pragma GCC diagnostic error "-Wfatal-errors" +#endif +#error This library needs at least a C++11 compliant compiler. If you use g++/clang, please enable the -std=c++11 compiler flag. (-std=c++0x on older versions.) +#endif + +namespace Eigen { + +namespace internal { + +/* std::get is only constexpr in C++14, not yet in C++11 + */ + + +template constexpr inline T& array_get(std::vector& a) { return a[I_]; } +template constexpr inline T&& array_get(std::vector&& a) { return a[I_]; } +template constexpr inline T const& array_get(std::vector const& a) { return a[I_]; } + +/* Suppose you have a template of the form + * template struct X; + * And you want to specialize it in such a way: + * template struct X> { ::: }; + * template<> struct X> { ::: }; + * This will work in Intel's compiler 13.0, but only to some extent in g++ 4.6, since + * g++ can only match templates called with parameter packs if the number of template + * arguments is not a fixed size (so inside the first specialization, referencing + * X> will fail in g++). On the other hand, g++ will accept the following: + * template struct X> { ::: }: + * as an additional (!) specialization, which will then only match the empty case. + * But Intel's compiler 13.0 won't accept that, it will only accept the empty syntax, + * so we have to create a workaround for this. + */ +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) +#define EIGEN_TPL_PP_SPEC_HACK_DEF(mt, n) mt... n +#define EIGEN_TPL_PP_SPEC_HACK_DEFC(mt, n) , EIGEN_TPL_PP_SPEC_HACK_DEF(mt, n) +#define EIGEN_TPL_PP_SPEC_HACK_USE(n) n... +#define EIGEN_TPL_PP_SPEC_HACK_USEC(n) , n... +#else +#define EIGEN_TPL_PP_SPEC_HACK_DEF(mt, n) +#define EIGEN_TPL_PP_SPEC_HACK_DEFC(mt, n) +#define EIGEN_TPL_PP_SPEC_HACK_USE(n) +#define EIGEN_TPL_PP_SPEC_HACK_USEC(n) +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CXX11WORKAROUNDS_H + +/* + * kate: space-indent on; indent-width 2; mixedindent off; indent-mode cstyle; + */ diff --git a/external/unsupported/Eigen/CXX11/src/util/EmulateArray.h b/external/unsupported/Eigen/CXX11/src/util/EmulateArray.h new file mode 100644 index 0000000..834b20b --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/util/EmulateArray.h @@ -0,0 +1,261 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_EMULATE_ARRAY_H +#define EIGEN_EMULATE_ARRAY_H + + + +// The array class is only available starting with cxx11. Emulate our own here +// if needed. Beware, msvc still doesn't advertise itself as a c++11 compiler! +// Moreover, CUDA doesn't support the STL containers, so we use our own instead. +#if (__cplusplus <= 199711L && EIGEN_COMP_MSVC < 1900) || defined(EIGEN_GPUCC) || defined(EIGEN_AVOID_STL_ARRAY) + +namespace Eigen { +template class array { + public: + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE T& operator[] (size_t index) { eigen_internal_assert(index < size()); return values[index]; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const T& operator[] (size_t index) const { eigen_internal_assert(index < size()); return values[index]; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE T& at(size_t index) { eigen_assert(index < size()); return values[index]; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const T& at(size_t index) const { eigen_assert(index < size()); return values[index]; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE T& front() { return values[0]; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const T& front() const { return values[0]; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE T& back() { return values[n-1]; } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const T& back() const { return values[n-1]; } + + EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE + static std::size_t size() { return n; } + + T values[n]; + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE array() { } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE array(const T& v) { + EIGEN_STATIC_ASSERT(n==1, YOU_MADE_A_PROGRAMMING_MISTAKE) + values[0] = v; + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE array(const T& v1, const T& v2) { + EIGEN_STATIC_ASSERT(n==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + values[0] = v1; + values[1] = v2; + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3) { + EIGEN_STATIC_ASSERT(n==3, YOU_MADE_A_PROGRAMMING_MISTAKE) + values[0] = v1; + values[1] = v2; + values[2] = v3; + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3, + const T& v4) { + EIGEN_STATIC_ASSERT(n==4, YOU_MADE_A_PROGRAMMING_MISTAKE) + values[0] = v1; + values[1] = v2; + values[2] = v3; + values[3] = v4; + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3, const T& v4, + const T& v5) { + EIGEN_STATIC_ASSERT(n==5, YOU_MADE_A_PROGRAMMING_MISTAKE) + values[0] = v1; + values[1] = v2; + values[2] = v3; + values[3] = v4; + values[4] = v5; + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3, const T& v4, + const T& v5, const T& v6) { + EIGEN_STATIC_ASSERT(n==6, YOU_MADE_A_PROGRAMMING_MISTAKE) + values[0] = v1; + values[1] = v2; + values[2] = v3; + values[3] = v4; + values[4] = v5; + values[5] = v6; + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE array(const T& v1, const T& v2, const T& v3, const T& v4, + const T& v5, const T& v6, const T& v7) { + EIGEN_STATIC_ASSERT(n==7, YOU_MADE_A_PROGRAMMING_MISTAKE) + values[0] = v1; + values[1] = v2; + values[2] = v3; + values[3] = v4; + values[4] = v5; + values[5] = v6; + values[6] = v7; + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE array( + const T& v1, const T& v2, const T& v3, const T& v4, + const T& v5, const T& v6, const T& v7, const T& v8) { + EIGEN_STATIC_ASSERT(n==8, YOU_MADE_A_PROGRAMMING_MISTAKE) + values[0] = v1; + values[1] = v2; + values[2] = v3; + values[3] = v4; + values[4] = v5; + values[5] = v6; + values[6] = v7; + values[7] = v8; + } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE array(std::initializer_list l) { + eigen_assert(l.size() == n); + internal::smart_copy(l.begin(), l.end(), values); + } +#endif +}; + + +// Specialize array for zero size +template class array { + public: + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE T& operator[] (size_t) { + eigen_assert(false && "Can't index a zero size array"); + return dummy; + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const T& operator[] (size_t) const { + eigen_assert(false && "Can't index a zero size array"); + return dummy; + } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE T& front() { + eigen_assert(false && "Can't index a zero size array"); + return dummy; + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const T& front() const { + eigen_assert(false && "Can't index a zero size array"); + return dummy; + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE T& back() { + eigen_assert(false && "Can't index a zero size array"); + return dummy; + } + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE const T& back() const { + eigen_assert(false && "Can't index a zero size array"); + return dummy; + } + + static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE std::size_t size() { return 0; } + + EIGEN_DEVICE_FUNC + EIGEN_STRONG_INLINE array() : dummy() { } + +#if EIGEN_HAS_VARIADIC_TEMPLATES + EIGEN_DEVICE_FUNC array(std::initializer_list l) : dummy() { + EIGEN_UNUSED_VARIABLE(l); + eigen_assert(l.size() == 0); + } +#endif + + private: + T dummy; +}; + +// Comparison operator +// Todo: implement !=, <, <=, >, and >= +template +EIGEN_DEVICE_FUNC bool operator==(const array& lhs, const array& rhs) { + for (std::size_t i = 0; i < N; ++i) { + if (lhs[i] != rhs[i]) { + return false; + } + } + return true; +} + + +namespace internal { +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T& array_get(array& a) { + return a[I_]; +} +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const T& array_get(const array& a) { + return a[I_]; +} + +template struct array_size > { + enum { value = N }; +}; +template struct array_size& > { + enum { value = N }; +}; +template struct array_size > { + enum { value = N }; +}; +template struct array_size& > { + enum { value = N }; +}; + +} // end namespace internal +} // end namespace Eigen + +#else + +// The compiler supports c++11, and we're not targeting cuda: use std::array as Eigen::array +#include +namespace Eigen { + +template using array = std::array; + +namespace internal { +/* std::get is only constexpr in C++14, not yet in C++11 + * - libstdc++ from version 4.7 onwards has it nevertheless, + * so use that + * - libstdc++ older versions: use _M_instance directly + * - libc++ all versions so far: use __elems_ directly + * - all other libs: use std::get to be portable, but + * this may not be constexpr + */ +#if defined(__GLIBCXX__) && __GLIBCXX__ < 20120322 +#define STD_GET_ARR_HACK a._M_instance[I_] +#elif defined(_LIBCPP_VERSION) +#define STD_GET_ARR_HACK a.__elems_[I_] +#else +#define STD_GET_ARR_HACK std::template get(a) +#endif + +template constexpr inline T& array_get(std::array& a) { return (T&) STD_GET_ARR_HACK; } +template constexpr inline T&& array_get(std::array&& a) { return (T&&) STD_GET_ARR_HACK; } +template constexpr inline T const& array_get(std::array const& a) { return (T const&) STD_GET_ARR_HACK; } + +#undef STD_GET_ARR_HACK + +} // end namespace internal +} // end namespace Eigen + +#endif + +#endif // EIGEN_EMULATE_ARRAY_H diff --git a/external/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h b/external/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h new file mode 100644 index 0000000..277ab14 --- /dev/null +++ b/external/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h @@ -0,0 +1,158 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FIXEDSIZEVECTOR_H +#define EIGEN_FIXEDSIZEVECTOR_H + +namespace Eigen { + +/** \class MaxSizeVector + * \ingroup Core + * + * \brief The MaxSizeVector class. + * + * The %MaxSizeVector provides a subset of std::vector functionality. + * + * The goal is to provide basic std::vector operations when using + * std::vector is not an option (e.g. on GPU or when compiling using + * FMA/AVX, as this can cause either compilation failures or illegal + * instruction failures). + * + * Beware: The constructors are not API compatible with these of + * std::vector. + */ +template +class MaxSizeVector { + static const size_t alignment = EIGEN_PLAIN_ENUM_MAX(EIGEN_ALIGNOF(T), sizeof(void*)); + public: + // Construct a new MaxSizeVector, reserve n elements. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + explicit MaxSizeVector(size_t n) + : reserve_(n), size_(0), + data_(static_cast(internal::handmade_aligned_malloc(n * sizeof(T), alignment))) { + } + + // Construct a new MaxSizeVector, reserve and resize to n. + // Copy the init value to all elements. + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + MaxSizeVector(size_t n, const T& init) + : reserve_(n), size_(n), + data_(static_cast(internal::handmade_aligned_malloc(n * sizeof(T), alignment))) { + size_t i = 0; + EIGEN_TRY + { + for(; i < size_; ++i) { new (&data_[i]) T(init); } + } + EIGEN_CATCH(...) + { + // Construction failed, destruct in reverse order: + for(; (i+1) > 0; --i) { data_[i-1].~T(); } + internal::handmade_aligned_free(data_); + EIGEN_THROW; + } + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + ~MaxSizeVector() { + for (size_t i = size_; i > 0; --i) { + data_[i-1].~T(); + } + internal::handmade_aligned_free(data_); + } + + void resize(size_t n) { + eigen_assert(n <= reserve_); + for (; size_ < n; ++size_) { + new (&data_[size_]) T; + } + for (; size_ > n; --size_) { + data_[size_-1].~T(); + } + eigen_assert(size_ == n); + } + + // Append new elements (up to reserved size). + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void push_back(const T& t) { + eigen_assert(size_ < reserve_); + new (&data_[size_++]) T(t); + } + + // For C++03 compatibility this only takes one argument + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void emplace_back(const X& x) { + eigen_assert(size_ < reserve_); + new (&data_[size_++]) T(x); + } + + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const T& operator[] (size_t i) const { + eigen_assert(i < size_); + return data_[i]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + T& operator[] (size_t i) { + eigen_assert(i < size_); + return data_[i]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + T& back() { + eigen_assert(size_ > 0); + return data_[size_ - 1]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const T& back() const { + eigen_assert(size_ > 0); + return data_[size_ - 1]; + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + void pop_back() { + eigen_assert(size_ > 0); + data_[--size_].~T(); + } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + size_t size() const { return size_; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + bool empty() const { return size_ == 0; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + T* data() { return data_; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const T* data() const { return data_; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + T* begin() { return data_; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + T* end() { return data_ + size_; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const T* begin() const { return data_; } + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE + const T* end() const { return data_ + size_; } + + private: + size_t reserve_; + size_t size_; + T* data_; +}; + +} // namespace Eigen + +#endif // EIGEN_FIXEDSIZEVECTOR_H diff --git a/external/unsupported/Eigen/EulerAngles b/external/unsupported/Eigen/EulerAngles new file mode 100644 index 0000000..f8f1c5d --- /dev/null +++ b/external/unsupported/Eigen/EulerAngles @@ -0,0 +1,43 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Tal Hadad +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_EULERANGLES_MODULE_H +#define EIGEN_EULERANGLES_MODULE_H + + +#include "../../Eigen/Core" +#include "../../Eigen/Geometry" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +namespace Eigen { + +/** + * \defgroup EulerAngles_Module EulerAngles module + * \brief This module provides generic euler angles rotation. + * + * Euler angles are a way to represent 3D rotation. + * + * In order to use this module in your code, include this header: + * \code + * #include + * \endcode + * + * See \ref EulerAngles for more information. + * + */ + +} + +#include "src/EulerAngles/EulerSystem.h" +#include "src/EulerAngles/EulerAngles.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_EULERANGLES_MODULE_H diff --git a/external/unsupported/Eigen/FFT b/external/unsupported/Eigen/FFT new file mode 100644 index 0000000..c8c311a --- /dev/null +++ b/external/unsupported/Eigen/FFT @@ -0,0 +1,419 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Mark Borgerding mark a borgerding net +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FFT_H +#define EIGEN_FFT_H + +#include +#include +#include +#include "../../Eigen/Core" + + +/** + * \defgroup FFT_Module Fast Fourier Transform module + * + * \code + * #include + * \endcode + * + * This module provides Fast Fourier transformation, with a configurable backend + * implementation. + * + * The default implementation is based on kissfft. It is a small, free, and + * reasonably efficient default. + * + * There are currently two implementation backend: + * + * - fftw (http://www.fftw.org) : faster, GPL -- incompatible with Eigen in LGPL form, bigger code size. + * - MKL (http://en.wikipedia.org/wiki/Math_Kernel_Library) : fastest, commercial -- may be incompatible with Eigen in GPL form. + * + * \section FFTDesign Design + * + * The following design decisions were made concerning scaling and + * half-spectrum for real FFT. + * + * The intent is to facilitate generic programming and ease migrating code + * from Matlab/octave. + * We think the default behavior of Eigen/FFT should favor correctness and + * generality over speed. Of course, the caller should be able to "opt-out" from this + * behavior and get the speed increase if they want it. + * + * 1) %Scaling: + * Other libraries (FFTW,IMKL,KISSFFT) do not perform scaling, so there + * is a constant gain incurred after the forward&inverse transforms , so + * IFFT(FFT(x)) = Kx; this is done to avoid a vector-by-value multiply. + * The downside is that algorithms that worked correctly in Matlab/octave + * don't behave the same way once implemented in C++. + * + * How Eigen/FFT differs: invertible scaling is performed so IFFT( FFT(x) ) = x. + * + * 2) Real FFT half-spectrum + * Other libraries use only half the frequency spectrum (plus one extra + * sample for the Nyquist bin) for a real FFT, the other half is the + * conjugate-symmetric of the first half. This saves them a copy and some + * memory. The downside is the caller needs to have special logic for the + * number of bins in complex vs real. + * + * How Eigen/FFT differs: The full spectrum is returned from the forward + * transform. This facilitates generic template programming by obviating + * separate specializations for real vs complex. On the inverse + * transform, only half the spectrum is actually used if the output type is real. + */ + + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#ifdef EIGEN_FFTW_DEFAULT +// FFTW: faster, GPL -- incompatible with Eigen in LGPL form, bigger code size +# include +# include "src/FFT/ei_fftw_impl.h" + namespace Eigen { + //template typedef struct internal::fftw_impl default_fft_impl; this does not work + template struct default_fft_impl : public internal::fftw_impl {}; + } +#elif defined EIGEN_MKL_DEFAULT +// TODO +// intel Math Kernel Library: fastest, commercial -- may be incompatible with Eigen in GPL form +# include "src/FFT/ei_imklfft_impl.h" + namespace Eigen { + template struct default_fft_impl : public internal::imklfft_impl {}; + } +#else +// internal::kissfft_impl: small, free, reasonably efficient default, derived from kissfft +// +# include "src/FFT/ei_kissfft_impl.h" + namespace Eigen { + template + struct default_fft_impl : public internal::kissfft_impl {}; + } +#endif + +namespace Eigen { + + +// +template struct fft_fwd_proxy; +template struct fft_inv_proxy; + +namespace internal { +template +struct traits< fft_fwd_proxy > +{ + typedef typename T_SrcMat::PlainObject ReturnType; +}; +template +struct traits< fft_inv_proxy > +{ + typedef typename T_SrcMat::PlainObject ReturnType; +}; +} + +template +struct fft_fwd_proxy + : public ReturnByValue > +{ + typedef DenseIndex Index; + + fft_fwd_proxy(const T_SrcMat& src,T_FftIfc & fft, Index nfft) : m_src(src),m_ifc(fft), m_nfft(nfft) {} + + template void evalTo(T_DestMat& dst) const; + + Index rows() const { return m_src.rows(); } + Index cols() const { return m_src.cols(); } +protected: + const T_SrcMat & m_src; + T_FftIfc & m_ifc; + Index m_nfft; +}; + +template +struct fft_inv_proxy + : public ReturnByValue > +{ + typedef DenseIndex Index; + + fft_inv_proxy(const T_SrcMat& src,T_FftIfc & fft, Index nfft) : m_src(src),m_ifc(fft), m_nfft(nfft) {} + + template void evalTo(T_DestMat& dst) const; + + Index rows() const { return m_src.rows(); } + Index cols() const { return m_src.cols(); } +protected: + const T_SrcMat & m_src; + T_FftIfc & m_ifc; + Index m_nfft; +}; + + +template > +class FFT +{ + public: + typedef T_Impl impl_type; + typedef DenseIndex Index; + typedef typename impl_type::Scalar Scalar; + typedef typename impl_type::Complex Complex; + + enum Flag { + Default=0, // goof proof + Unscaled=1, + HalfSpectrum=2, + // SomeOtherSpeedOptimization=4 + Speedy=32767 + }; + + FFT( const impl_type & impl=impl_type() , Flag flags=Default ) :m_impl(impl),m_flag(flags) { } + + inline + bool HasFlag(Flag f) const { return (m_flag & (int)f) == f;} + + inline + void SetFlag(Flag f) { m_flag |= (int)f;} + + inline + void ClearFlag(Flag f) { m_flag &= (~(int)f);} + + inline + void fwd( Complex * dst, const Scalar * src, Index nfft) + { + m_impl.fwd(dst,src,static_cast(nfft)); + if ( HasFlag(HalfSpectrum) == false) + ReflectSpectrum(dst,nfft); + } + + inline + void fwd( Complex * dst, const Complex * src, Index nfft) + { + m_impl.fwd(dst,src,static_cast(nfft)); + } + + /* + inline + void fwd2(Complex * dst, const Complex * src, int n0,int n1) + { + m_impl.fwd2(dst,src,n0,n1); + } + */ + + template + inline + void fwd( std::vector & dst, const std::vector<_Input> & src) + { + if ( NumTraits<_Input>::IsComplex == 0 && HasFlag(HalfSpectrum) ) + dst.resize( (src.size()>>1)+1); // half the bins + Nyquist bin + else + dst.resize(src.size()); + fwd(&dst[0],&src[0],src.size()); + } + + template + inline + void fwd( MatrixBase & dst, const MatrixBase & src, Index nfft=-1) + { + typedef typename ComplexDerived::Scalar dst_type; + typedef typename InputDerived::Scalar src_type; + EIGEN_STATIC_ASSERT_VECTOR_ONLY(InputDerived) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(ComplexDerived) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(ComplexDerived,InputDerived) // size at compile-time + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + EIGEN_STATIC_ASSERT(int(InputDerived::Flags)&int(ComplexDerived::Flags)&DirectAccessBit, + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES) + + if (nfft<1) + nfft = src.size(); + + if ( NumTraits< src_type >::IsComplex == 0 && HasFlag(HalfSpectrum) ) + dst.derived().resize( (nfft>>1)+1); + else + dst.derived().resize(nfft); + + if ( src.innerStride() != 1 || src.size() < nfft ) { + Matrix tmp; + if (src.size() + inline + fft_fwd_proxy< MatrixBase, FFT > + fwd( const MatrixBase & src, Index nfft=-1) + { + return fft_fwd_proxy< MatrixBase ,FFT >( src, *this,nfft ); + } + + template + inline + fft_inv_proxy< MatrixBase, FFT > + inv( const MatrixBase & src, Index nfft=-1) + { + return fft_inv_proxy< MatrixBase ,FFT >( src, *this,nfft ); + } + + inline + void inv( Complex * dst, const Complex * src, Index nfft) + { + m_impl.inv( dst,src,static_cast(nfft) ); + if ( HasFlag( Unscaled ) == false) + scale(dst,Scalar(1./nfft),nfft); // scale the time series + } + + inline + void inv( Scalar * dst, const Complex * src, Index nfft) + { + m_impl.inv( dst,src,static_cast(nfft) ); + if ( HasFlag( Unscaled ) == false) + scale(dst,Scalar(1./nfft),nfft); // scale the time series + } + + template + inline + void inv( MatrixBase & dst, const MatrixBase & src, Index nfft=-1) + { + typedef typename ComplexDerived::Scalar src_type; + typedef typename ComplexDerived::RealScalar real_type; + typedef typename OutputDerived::Scalar dst_type; + const bool realfft= (NumTraits::IsComplex == 0); + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OutputDerived) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(ComplexDerived) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(ComplexDerived,OutputDerived) // size at compile-time + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + EIGEN_STATIC_ASSERT(int(OutputDerived::Flags)&int(ComplexDerived::Flags)&DirectAccessBit, + THIS_METHOD_IS_ONLY_FOR_EXPRESSIONS_WITH_DIRECT_MEMORY_ACCESS_SUCH_AS_MAP_OR_PLAIN_MATRICES) + + if (nfft<1) { //automatic FFT size determination + if ( realfft && HasFlag(HalfSpectrum) ) + nfft = 2*(src.size()-1); //assume even fft size + else + nfft = src.size(); + } + dst.derived().resize( nfft ); + + // check for nfft that does not fit the input data size + Index resize_input= ( realfft && HasFlag(HalfSpectrum) ) + ? ( (nfft/2+1) - src.size() ) + : ( nfft - src.size() ); + + if ( src.innerStride() != 1 || resize_input ) { + // if the vector is strided, then we need to copy it to a packed temporary + Matrix tmp; + if ( resize_input ) { + size_t ncopy = (std::min)(src.size(),src.size() + resize_input); + tmp.setZero(src.size() + resize_input); + if ( realfft && HasFlag(HalfSpectrum) ) { + // pad at the Nyquist bin + tmp.head(ncopy) = src.head(ncopy); + tmp(ncopy-1) = real(tmp(ncopy-1)); // enforce real-only Nyquist bin + }else{ + size_t nhead,ntail; + nhead = 1+ncopy/2-1; // range [0:pi) + ntail = ncopy/2-1; // range (-pi:0) + tmp.head(nhead) = src.head(nhead); + tmp.tail(ntail) = src.tail(ntail); + if (resize_input<0) { //shrinking -- create the Nyquist bin as the average of the two bins that fold into it + tmp(nhead) = ( src(nfft/2) + src( src.size() - nfft/2 ) )*real_type(.5); + }else{ // expanding -- split the old Nyquist bin into two halves + tmp(nhead) = src(nhead) * real_type(.5); + tmp(tmp.size()-nhead) = tmp(nhead); + } + } + }else{ + tmp = src; + } + inv( &dst[0],&tmp[0], nfft); + }else{ + inv( &dst[0],&src[0], nfft); + } + } + + template + inline + void inv( std::vector<_Output> & dst, const std::vector & src,Index nfft=-1) + { + if (nfft<1) + nfft = ( NumTraits<_Output>::IsComplex == 0 && HasFlag(HalfSpectrum) ) ? 2*(src.size()-1) : src.size(); + dst.resize( nfft ); + inv( &dst[0],&src[0],nfft); + } + + + /* + // TODO: multi-dimensional FFTs + inline + void inv2(Complex * dst, const Complex * src, int n0,int n1) + { + m_impl.inv2(dst,src,n0,n1); + if ( HasFlag( Unscaled ) == false) + scale(dst,1./(n0*n1),n0*n1); + } + */ + + inline + impl_type & impl() {return m_impl;} + private: + + template + inline + void scale(T_Data * x,Scalar s,Index nx) + { +#if 1 + for (int k=0;k::Map(x,nx) *= s; + else + Matrix::MapAligned(x,nx) *= s; + //Matrix::Map(x,nx) * s; +#endif + } + + inline + void ReflectSpectrum(Complex * freq, Index nfft) + { + // create the implicit right-half spectrum (conjugate-mirror of the left-half) + Index nhbins=(nfft>>1)+1; + for (Index k=nhbins;k < nfft; ++k ) + freq[k] = conj(freq[nfft-k]); + } + + impl_type m_impl; + int m_flag; +}; + +template +template inline +void fft_fwd_proxy::evalTo(T_DestMat& dst) const +{ + m_ifc.fwd( dst, m_src, m_nfft); +} + +template +template inline +void fft_inv_proxy::evalTo(T_DestMat& dst) const +{ + m_ifc.inv( dst, m_src, m_nfft); +} + +} + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif diff --git a/external/unsupported/Eigen/IterativeSolvers b/external/unsupported/Eigen/IterativeSolvers new file mode 100644 index 0000000..a3f58d6 --- /dev/null +++ b/external/unsupported/Eigen/IterativeSolvers @@ -0,0 +1,51 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ITERATIVE_SOLVERS_MODULE_H +#define EIGEN_ITERATIVE_SOLVERS_MODULE_H + +#include "../../Eigen/Sparse" +#include "../../Eigen/Jacobi" +#include "../../Eigen/Householder" + + +/** + * \defgroup IterativeLinearSolvers_Module Iterative solvers module + * This module aims to provide various iterative linear and non linear solver algorithms. + * It currently provides: + * - a constrained conjugate gradient + * - a Householder GMRES implementation + * - an IDR(s) implementation + * - a DGMRES implementation + * - a MINRES implementation + * + * \code + * #include + * \endcode + */ + + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#ifndef EIGEN_MPL2_ONLY +#include "src/IterativeSolvers/IterationController.h" +#include "src/IterativeSolvers/ConstrainedConjGrad.h" +#endif + +#include "src/IterativeSolvers/IncompleteLU.h" +#include "src/IterativeSolvers/GMRES.h" +#include "src/IterativeSolvers/DGMRES.h" +//#include "src/IterativeSolvers/SSORPreconditioner.h" +#include "src/IterativeSolvers/MINRES.h" +#include "src/IterativeSolvers/IDRS.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + + +#endif // EIGEN_ITERATIVE_SOLVERS_MODULE_H diff --git a/external/unsupported/Eigen/KroneckerProduct b/external/unsupported/Eigen/KroneckerProduct new file mode 100644 index 0000000..5f5afb8 --- /dev/null +++ b/external/unsupported/Eigen/KroneckerProduct @@ -0,0 +1,36 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_KRONECKER_PRODUCT_MODULE_H +#define EIGEN_KRONECKER_PRODUCT_MODULE_H + +#include "../../Eigen/Core" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#include "../../Eigen/src/SparseCore/SparseUtil.h" + +namespace Eigen { + +/** + * \defgroup KroneckerProduct_Module KroneckerProduct module + * + * This module contains an experimental Kronecker product implementation. + * + * \code + * #include + * \endcode + */ + +} // namespace Eigen + +#include "src/KroneckerProduct/KroneckerTensorProduct.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_KRONECKER_PRODUCT_MODULE_H diff --git a/external/unsupported/Eigen/LevenbergMarquardt b/external/unsupported/Eigen/LevenbergMarquardt new file mode 100644 index 0000000..1090505 --- /dev/null +++ b/external/unsupported/Eigen/LevenbergMarquardt @@ -0,0 +1,49 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LEVENBERGMARQUARDT_MODULE +#define EIGEN_LEVENBERGMARQUARDT_MODULE + +// #include + +#include "../../Eigen/Core" +#include "../../Eigen/Jacobi" +#include "../../Eigen/QR" +#include "NumericalDiff" + +#include "../../Eigen/SparseQR" + +/** + * \defgroup LevenbergMarquardt_Module Levenberg-Marquardt module + * + * \code + * #include + * \endcode + * + * + */ + +#include "../../Eigen/SparseCore" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#ifndef EIGEN_PARSED_BY_DOXYGEN + +#include "src/LevenbergMarquardt/LMqrsolv.h" +#include "src/LevenbergMarquardt/LMcovar.h" +#include "src/LevenbergMarquardt/LMpar.h" + +#endif + +#include "src/LevenbergMarquardt/LevenbergMarquardt.h" +#include "src/LevenbergMarquardt/LMonestep.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_LEVENBERGMARQUARDT_MODULE diff --git a/external/unsupported/Eigen/MPRealSupport b/external/unsupported/Eigen/MPRealSupport new file mode 100644 index 0000000..c4ea4ec --- /dev/null +++ b/external/unsupported/Eigen/MPRealSupport @@ -0,0 +1,213 @@ +// This file is part of a joint effort between Eigen, a lightweight C++ template library +// for linear algebra, and MPFR C++, a C++ interface to MPFR library (http://www.holoborodko.com/pavel/) +// +// Copyright (C) 2010-2012 Pavel Holoborodko +// Copyright (C) 2010 Konstantin Holoborodko +// Copyright (C) 2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MPREALSUPPORT_MODULE_H +#define EIGEN_MPREALSUPPORT_MODULE_H + +#include "../../Eigen/Core" +#include + +namespace Eigen { + +/** + * \defgroup MPRealSupport_Module MPFRC++ Support module + * \code + * #include + * \endcode + * + * This module provides support for multi precision floating point numbers + * via the MPFR C++ + * library which itself is built upon MPFR/GMP. + * + * \warning MPFR C++ is licensed under the GPL. + * + * You can find a copy of MPFR C++ that is known to be compatible in the unsupported/test/mpreal folder. + * + * Here is an example: + * +\code +#include +#include +#include +using namespace mpfr; +using namespace Eigen; +int main() +{ + // set precision to 256 bits (double has only 53 bits) + mpreal::set_default_prec(256); + // Declare matrix and vector types with multi-precision scalar type + typedef Matrix MatrixXmp; + typedef Matrix VectorXmp; + + MatrixXmp A = MatrixXmp::Random(100,100); + VectorXmp b = VectorXmp::Random(100); + + // Solve Ax=b using LU + VectorXmp x = A.lu().solve(b); + std::cout << "relative error: " << (A*x - b).norm() / b.norm() << std::endl; + return 0; +} +\endcode + * + */ + + template<> struct NumTraits + : GenericNumTraits + { + enum { + IsInteger = 0, + IsSigned = 1, + IsComplex = 0, + RequireInitialization = 1, + ReadCost = HugeCost, + AddCost = HugeCost, + MulCost = HugeCost + }; + + typedef mpfr::mpreal Real; + typedef mpfr::mpreal NonInteger; + + static inline Real highest (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::maxval(Precision); } + static inline Real lowest (long Precision = mpfr::mpreal::get_default_prec()) { return -mpfr::maxval(Precision); } + + // Constants + static inline Real Pi (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_pi(Precision); } + static inline Real Euler (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_euler(Precision); } + static inline Real Log2 (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_log2(Precision); } + static inline Real Catalan (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::const_catalan(Precision); } + + static inline Real epsilon (long Precision = mpfr::mpreal::get_default_prec()) { return mpfr::machine_epsilon(Precision); } + static inline Real epsilon (const Real& x) { return mpfr::machine_epsilon(x); } + +#ifdef MPREAL_HAVE_DYNAMIC_STD_NUMERIC_LIMITS + static inline int digits10 (long Precision = mpfr::mpreal::get_default_prec()) { return std::numeric_limits::digits10(Precision); } + static inline int digits10 (const Real& x) { return std::numeric_limits::digits10(x); } + + static inline int digits () { return std::numeric_limits::digits(); } + static inline int digits (const Real& x) { return std::numeric_limits::digits(x); } +#endif + + static inline Real dummy_precision() + { + mpfr_prec_t weak_prec = ((mpfr::mpreal::get_default_prec()-1) * 90) / 100; + return mpfr::machine_epsilon(weak_prec); + } + }; + + namespace internal { + + template<> inline mpfr::mpreal random() + { + return mpfr::random(); + } + + template<> inline mpfr::mpreal random(const mpfr::mpreal& a, const mpfr::mpreal& b) + { + return a + (b-a) * random(); + } + + inline bool isMuchSmallerThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps) + { + return mpfr::abs(a) <= mpfr::abs(b) * eps; + } + + inline bool isApprox(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps) + { + return mpfr::isEqualFuzzy(a,b,eps); + } + + inline bool isApproxOrLessThan(const mpfr::mpreal& a, const mpfr::mpreal& b, const mpfr::mpreal& eps) + { + return a <= b || mpfr::isEqualFuzzy(a,b,eps); + } + + template<> inline long double cast(const mpfr::mpreal& x) + { return x.toLDouble(); } + + template<> inline double cast(const mpfr::mpreal& x) + { return x.toDouble(); } + + template<> inline long cast(const mpfr::mpreal& x) + { return x.toLong(); } + + template<> inline int cast(const mpfr::mpreal& x) + { return int(x.toLong()); } + + // Specialize GEBP kernel and traits for mpreal (no need for peeling, nor complicated stuff) + // This also permits to directly call mpfr's routines and avoid many temporaries produced by mpreal + template<> + class gebp_traits + { + public: + typedef mpfr::mpreal ResScalar; + enum { + Vectorizable = false, + LhsPacketSize = 1, + RhsPacketSize = 1, + ResPacketSize = 1, + NumberOfRegisters = 1, + nr = 1, + mr = 1, + LhsProgress = 1, + RhsProgress = 1 + }; + typedef ResScalar LhsPacket; + typedef ResScalar RhsPacket; + typedef ResScalar ResPacket; + typedef LhsPacket LhsPacket4Packing; + + }; + + + + template + struct gebp_kernel + { + typedef mpfr::mpreal mpreal; + + EIGEN_DONT_INLINE + void operator()(const DataMapper& res, const mpreal* blockA, const mpreal* blockB, + Index rows, Index depth, Index cols, const mpreal& alpha, + Index strideA=-1, Index strideB=-1, Index offsetA=0, Index offsetB=0) + { + if(rows==0 || cols==0 || depth==0) + return; + + mpreal acc1(0,mpfr_get_prec(blockA[0].mpfr_srcptr())), + tmp (0,mpfr_get_prec(blockA[0].mpfr_srcptr())); + + if(strideA==-1) strideA = depth; + if(strideB==-1) strideB = depth; + + for(Index i=0; i +// Copyright (C) 2012 Chen-Pang He +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIX_FUNCTIONS +#define EIGEN_MATRIX_FUNCTIONS + +#include +#include + +#include "../../Eigen/Core" +#include "../../Eigen/LU" +#include "../../Eigen/Eigenvalues" + +/** + * \defgroup MatrixFunctions_Module Matrix functions module + * \brief This module aims to provide various methods for the computation of + * matrix functions. + * + * To use this module, add + * \code + * #include + * \endcode + * at the start of your source file. + * + * This module defines the following MatrixBase methods. + * - \ref matrixbase_cos "MatrixBase::cos()", for computing the matrix cosine + * - \ref matrixbase_cosh "MatrixBase::cosh()", for computing the matrix hyperbolic cosine + * - \ref matrixbase_exp "MatrixBase::exp()", for computing the matrix exponential + * - \ref matrixbase_log "MatrixBase::log()", for computing the matrix logarithm + * - \ref matrixbase_pow "MatrixBase::pow()", for computing the matrix power + * - \ref matrixbase_matrixfunction "MatrixBase::matrixFunction()", for computing general matrix functions + * - \ref matrixbase_sin "MatrixBase::sin()", for computing the matrix sine + * - \ref matrixbase_sinh "MatrixBase::sinh()", for computing the matrix hyperbolic sine + * - \ref matrixbase_sqrt "MatrixBase::sqrt()", for computing the matrix square root + * + * These methods are the main entry points to this module. + * + * %Matrix functions are defined as follows. Suppose that \f$ f \f$ + * is an entire function (that is, a function on the complex plane + * that is everywhere complex differentiable). Then its Taylor + * series + * \f[ f(0) + f'(0) x + \frac{f''(0)}{2} x^2 + \frac{f'''(0)}{3!} x^3 + \cdots \f] + * converges to \f$ f(x) \f$. In this case, we can define the matrix + * function by the same series: + * \f[ f(M) = f(0) + f'(0) M + \frac{f''(0)}{2} M^2 + \frac{f'''(0)}{3!} M^3 + \cdots \f] + * + */ + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#include "src/MatrixFunctions/MatrixExponential.h" +#include "src/MatrixFunctions/MatrixFunction.h" +#include "src/MatrixFunctions/MatrixSquareRoot.h" +#include "src/MatrixFunctions/MatrixLogarithm.h" +#include "src/MatrixFunctions/MatrixPower.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + + +/** +\page matrixbaseextra_page +\ingroup MatrixFunctions_Module + +\section matrixbaseextra MatrixBase methods defined in the MatrixFunctions module + +The remainder of the page documents the following MatrixBase methods +which are defined in the MatrixFunctions module. + + + +\subsection matrixbase_cos MatrixBase::cos() + +Compute the matrix cosine. + +\code +const MatrixFunctionReturnValue MatrixBase::cos() const +\endcode + +\param[in] M a square matrix. +\returns expression representing \f$ \cos(M) \f$. + +This function computes the matrix cosine. Use ArrayBase::cos() for computing the entry-wise cosine. + +The implementation calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cos(). + +\sa \ref matrixbase_sin "sin()" for an example. + + + +\subsection matrixbase_cosh MatrixBase::cosh() + +Compute the matrix hyberbolic cosine. + +\code +const MatrixFunctionReturnValue MatrixBase::cosh() const +\endcode + +\param[in] M a square matrix. +\returns expression representing \f$ \cosh(M) \f$ + +This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::cosh(). + +\sa \ref matrixbase_sinh "sinh()" for an example. + + + +\subsection matrixbase_exp MatrixBase::exp() + +Compute the matrix exponential. + +\code +const MatrixExponentialReturnValue MatrixBase::exp() const +\endcode + +\param[in] M matrix whose exponential is to be computed. +\returns expression representing the matrix exponential of \p M. + +The matrix exponential of \f$ M \f$ is defined by +\f[ \exp(M) = \sum_{k=0}^\infty \frac{M^k}{k!}. \f] +The matrix exponential can be used to solve linear ordinary +differential equations: the solution of \f$ y' = My \f$ with the +initial condition \f$ y(0) = y_0 \f$ is given by +\f$ y(t) = \exp(M) y_0 \f$. + +The matrix exponential is different from applying the exp function to all the entries in the matrix. +Use ArrayBase::exp() if you want to do the latter. + +The cost of the computation is approximately \f$ 20 n^3 \f$ for +matrices of size \f$ n \f$. The number 20 depends weakly on the +norm of the matrix. + +The matrix exponential is computed using the scaling-and-squaring +method combined with Padé approximation. The matrix is first +rescaled, then the exponential of the reduced matrix is computed +approximant, and then the rescaling is undone by repeated +squaring. The degree of the Padé approximant is chosen such +that the approximation error is less than the round-off +error. However, errors may accumulate during the squaring phase. + +Details of the algorithm can be found in: Nicholas J. Higham, "The +scaling and squaring method for the matrix exponential revisited," +SIAM J. %Matrix Anal. Applic., 26:1179–1193, +2005. + +Example: The following program checks that +\f[ \exp \left[ \begin{array}{ccc} + 0 & \frac14\pi & 0 \\ + -\frac14\pi & 0 & 0 \\ + 0 & 0 & 0 + \end{array} \right] = \left[ \begin{array}{ccc} + \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ + \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ + 0 & 0 & 1 + \end{array} \right]. \f] +This corresponds to a rotation of \f$ \frac14\pi \f$ radians around +the z-axis. + +\include MatrixExponential.cpp +Output: \verbinclude MatrixExponential.out + +\note \p M has to be a matrix of \c float, \c double, `long double` +\c complex, \c complex, or `complex` . + + +\subsection matrixbase_log MatrixBase::log() + +Compute the matrix logarithm. + +\code +const MatrixLogarithmReturnValue MatrixBase::log() const +\endcode + +\param[in] M invertible matrix whose logarithm is to be computed. +\returns expression representing the matrix logarithm root of \p M. + +The matrix logarithm of \f$ M \f$ is a matrix \f$ X \f$ such that +\f$ \exp(X) = M \f$ where exp denotes the matrix exponential. As for +the scalar logarithm, the equation \f$ \exp(X) = M \f$ may have +multiple solutions; this function returns a matrix whose eigenvalues +have imaginary part in the interval \f$ (-\pi,\pi] \f$. + +The matrix logarithm is different from applying the log function to all the entries in the matrix. +Use ArrayBase::log() if you want to do the latter. + +In the real case, the matrix \f$ M \f$ should be invertible and +it should have no eigenvalues which are real and negative (pairs of +complex conjugate eigenvalues are allowed). In the complex case, it +only needs to be invertible. + +This function computes the matrix logarithm using the Schur-Parlett +algorithm as implemented by MatrixBase::matrixFunction(). The +logarithm of an atomic block is computed by MatrixLogarithmAtomic, +which uses direct computation for 1-by-1 and 2-by-2 blocks and an +inverse scaling-and-squaring algorithm for bigger blocks, with the +square roots computed by MatrixBase::sqrt(). + +Details of the algorithm can be found in Section 11.6.2 of: +Nicholas J. Higham, +Functions of Matrices: Theory and Computation, +SIAM 2008. ISBN 978-0-898716-46-7. + +Example: The following program checks that +\f[ \log \left[ \begin{array}{ccc} + \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ + \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ + 0 & 0 & 1 + \end{array} \right] = \left[ \begin{array}{ccc} + 0 & \frac14\pi & 0 \\ + -\frac14\pi & 0 & 0 \\ + 0 & 0 & 0 + \end{array} \right]. \f] +This corresponds to a rotation of \f$ \frac14\pi \f$ radians around +the z-axis. This is the inverse of the example used in the +documentation of \ref matrixbase_exp "exp()". + +\include MatrixLogarithm.cpp +Output: \verbinclude MatrixLogarithm.out + +\note \p M has to be a matrix of \c float, \c double, `long +double`, \c complex, \c complex, or `complex`. + +\sa MatrixBase::exp(), MatrixBase::matrixFunction(), + class MatrixLogarithmAtomic, MatrixBase::sqrt(). + + +\subsection matrixbase_pow MatrixBase::pow() + +Compute the matrix raised to arbitrary real power. + +\code +const MatrixPowerReturnValue MatrixBase::pow(RealScalar p) const +\endcode + +\param[in] M base of the matrix power, should be a square matrix. +\param[in] p exponent of the matrix power. + +The matrix power \f$ M^p \f$ is defined as \f$ \exp(p \log(M)) \f$, +where exp denotes the matrix exponential, and log denotes the matrix +logarithm. This is different from raising all the entries in the matrix +to the p-th power. Use ArrayBase::pow() if you want to do the latter. + +If \p p is complex, the scalar type of \p M should be the type of \p +p . \f$ M^p \f$ simply evaluates into \f$ \exp(p \log(M)) \f$. +Therefore, the matrix \f$ M \f$ should meet the conditions to be an +argument of matrix logarithm. + +If \p p is real, it is casted into the real scalar type of \p M. Then +this function computes the matrix power using the Schur-Padé +algorithm as implemented by class MatrixPower. The exponent is split +into integral part and fractional part, where the fractional part is +in the interval \f$ (-1, 1) \f$. The main diagonal and the first +super-diagonal is directly computed. + +If \p M is singular with a semisimple zero eigenvalue and \p p is +positive, the Schur factor \f$ T \f$ is reordered with Givens +rotations, i.e. + +\f[ T = \left[ \begin{array}{cc} + T_1 & T_2 \\ + 0 & 0 + \end{array} \right] \f] + +where \f$ T_1 \f$ is invertible. Then \f$ T^p \f$ is given by + +\f[ T^p = \left[ \begin{array}{cc} + T_1^p & T_1^{-1} T_1^p T_2 \\ + 0 & 0 + \end{array}. \right] \f] + +\warning Fractional power of a matrix with a non-semisimple zero +eigenvalue is not well-defined. We introduce an assertion failure +against inaccurate result, e.g. \code +#include +#include + +int main() +{ + Eigen::Matrix4d A; + A << 0, 0, 2, 3, + 0, 0, 4, 5, + 0, 0, 6, 7, + 0, 0, 8, 9; + std::cout << A.pow(0.37) << std::endl; + + // The 1 makes eigenvalue 0 non-semisimple. + A.coeffRef(0, 1) = 1; + + // This fails if EIGEN_NO_DEBUG is undefined. + std::cout << A.pow(0.37) << std::endl; + + return 0; +} +\endcode + +Details of the algorithm can be found in: Nicholas J. Higham and +Lijing Lin, "A Schur-Padé algorithm for fractional powers of a +matrix," SIAM J. %Matrix Anal. Applic., +32(3):1056–1078, 2011. + +Example: The following program checks that +\f[ \left[ \begin{array}{ccc} + \cos1 & -\sin1 & 0 \\ + \sin1 & \cos1 & 0 \\ + 0 & 0 & 1 + \end{array} \right]^{\frac14\pi} = \left[ \begin{array}{ccc} + \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ + \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ + 0 & 0 & 1 + \end{array} \right]. \f] +This corresponds to \f$ \frac14\pi \f$ rotations of 1 radian around +the z-axis. + +\include MatrixPower.cpp +Output: \verbinclude MatrixPower.out + +MatrixBase::pow() is user-friendly. However, there are some +circumstances under which you should use class MatrixPower directly. +MatrixPower can save the result of Schur decomposition, so it's +better for computing various powers for the same matrix. + +Example: +\include MatrixPower_optimal.cpp +Output: \verbinclude MatrixPower_optimal.out + +\note \p M has to be a matrix of \c float, \c double, `long +double`, \c complex, \c complex, or +\c complex . + +\sa MatrixBase::exp(), MatrixBase::log(), class MatrixPower. + + +\subsection matrixbase_matrixfunction MatrixBase::matrixFunction() + +Compute a matrix function. + +\code +const MatrixFunctionReturnValue MatrixBase::matrixFunction(typename internal::stem_function::Scalar>::type f) const +\endcode + +\param[in] M argument of matrix function, should be a square matrix. +\param[in] f an entire function; \c f(x,n) should compute the n-th +derivative of f at x. +\returns expression representing \p f applied to \p M. + +Suppose that \p M is a matrix whose entries have type \c Scalar. +Then, the second argument, \p f, should be a function with prototype +\code +ComplexScalar f(ComplexScalar, int) +\endcode +where \c ComplexScalar = \c std::complex if \c Scalar is +real (e.g., \c float or \c double) and \c ComplexScalar = +\c Scalar if \c Scalar is complex. The return value of \c f(x,n) +should be \f$ f^{(n)}(x) \f$, the n-th derivative of f at x. + +This routine uses the algorithm described in: +Philip Davies and Nicholas J. Higham, +"A Schur-Parlett algorithm for computing matrix functions", +SIAM J. %Matrix Anal. Applic., 25:464–485, 2003. + +The actual work is done by the MatrixFunction class. + +Example: The following program checks that +\f[ \exp \left[ \begin{array}{ccc} + 0 & \frac14\pi & 0 \\ + -\frac14\pi & 0 & 0 \\ + 0 & 0 & 0 + \end{array} \right] = \left[ \begin{array}{ccc} + \frac12\sqrt2 & -\frac12\sqrt2 & 0 \\ + \frac12\sqrt2 & \frac12\sqrt2 & 0 \\ + 0 & 0 & 1 + \end{array} \right]. \f] +This corresponds to a rotation of \f$ \frac14\pi \f$ radians around +the z-axis. This is the same example as used in the documentation +of \ref matrixbase_exp "exp()". + +\include MatrixFunction.cpp +Output: \verbinclude MatrixFunction.out + +Note that the function \c expfn is defined for complex numbers +\c x, even though the matrix \c A is over the reals. Instead of +\c expfn, we could also have used StdStemFunctions::exp: +\code +A.matrixFunction(StdStemFunctions >::exp, &B); +\endcode + + + +\subsection matrixbase_sin MatrixBase::sin() + +Compute the matrix sine. + +\code +const MatrixFunctionReturnValue MatrixBase::sin() const +\endcode + +\param[in] M a square matrix. +\returns expression representing \f$ \sin(M) \f$. + +This function computes the matrix sine. Use ArrayBase::sin() for computing the entry-wise sine. + +The implementation calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sin(). + +Example: \include MatrixSine.cpp +Output: \verbinclude MatrixSine.out + + + +\subsection matrixbase_sinh MatrixBase::sinh() + +Compute the matrix hyperbolic sine. + +\code +MatrixFunctionReturnValue MatrixBase::sinh() const +\endcode + +\param[in] M a square matrix. +\returns expression representing \f$ \sinh(M) \f$ + +This function calls \ref matrixbase_matrixfunction "matrixFunction()" with StdStemFunctions::sinh(). + +Example: \include MatrixSinh.cpp +Output: \verbinclude MatrixSinh.out + + +\subsection matrixbase_sqrt MatrixBase::sqrt() + +Compute the matrix square root. + +\code +const MatrixSquareRootReturnValue MatrixBase::sqrt() const +\endcode + +\param[in] M invertible matrix whose square root is to be computed. +\returns expression representing the matrix square root of \p M. + +The matrix square root of \f$ M \f$ is the matrix \f$ M^{1/2} \f$ +whose square is the original matrix; so if \f$ S = M^{1/2} \f$ then +\f$ S^2 = M \f$. This is different from taking the square root of all +the entries in the matrix; use ArrayBase::sqrt() if you want to do the +latter. + +In the real case, the matrix \f$ M \f$ should be invertible and +it should have no eigenvalues which are real and negative (pairs of +complex conjugate eigenvalues are allowed). In that case, the matrix +has a square root which is also real, and this is the square root +computed by this function. + +The matrix square root is computed by first reducing the matrix to +quasi-triangular form with the real Schur decomposition. The square +root of the quasi-triangular matrix can then be computed directly. The +cost is approximately \f$ 25 n^3 \f$ real flops for the real Schur +decomposition and \f$ 3\frac13 n^3 \f$ real flops for the remainder +(though the computation time in practice is likely more than this +indicates). + +Details of the algorithm can be found in: Nicholas J. Highan, +"Computing real square roots of a real matrix", Linear Algebra +Appl., 88/89:405–430, 1987. + +If the matrix is positive-definite symmetric, then the square +root is also positive-definite symmetric. In this case, it is best to +use SelfAdjointEigenSolver::operatorSqrt() to compute it. + +In the complex case, the matrix \f$ M \f$ should be invertible; +this is a restriction of the algorithm. The square root computed by +this algorithm is the one whose eigenvalues have an argument in the +interval \f$ (-\frac12\pi, \frac12\pi] \f$. This is the usual branch +cut. + +The computation is the same as in the real case, except that the +complex Schur decomposition is used to reduce the matrix to a +triangular matrix. The theoretical cost is the same. Details are in: +Åke Björck and Sven Hammarling, "A Schur method for the +square root of a matrix", Linear Algebra Appl., +52/53:127–140, 1983. + +Example: The following program checks that the square root of +\f[ \left[ \begin{array}{cc} + \cos(\frac13\pi) & -\sin(\frac13\pi) \\ + \sin(\frac13\pi) & \cos(\frac13\pi) + \end{array} \right], \f] +corresponding to a rotation over 60 degrees, is a rotation over 30 degrees: +\f[ \left[ \begin{array}{cc} + \cos(\frac16\pi) & -\sin(\frac16\pi) \\ + \sin(\frac16\pi) & \cos(\frac16\pi) + \end{array} \right]. \f] + +\include MatrixSquareRoot.cpp +Output: \verbinclude MatrixSquareRoot.out + +\sa class RealSchur, class ComplexSchur, class MatrixSquareRoot, + SelfAdjointEigenSolver::operatorSqrt(). + +*/ + +#endif // EIGEN_MATRIX_FUNCTIONS + diff --git a/external/unsupported/Eigen/MoreVectorization b/external/unsupported/Eigen/MoreVectorization new file mode 100644 index 0000000..7662b47 --- /dev/null +++ b/external/unsupported/Eigen/MoreVectorization @@ -0,0 +1,24 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MOREVECTORIZATION_MODULE_H +#define EIGEN_MOREVECTORIZATION_MODULE_H + +#include "../../Eigen/Core" + +namespace Eigen { + +/** + * \defgroup MoreVectorization More vectorization module + */ + +} + +#include "src/MoreVectorization/MathFunctions.h" + +#endif // EIGEN_MOREVECTORIZATION_MODULE_H diff --git a/external/unsupported/Eigen/NonLinearOptimization b/external/unsupported/Eigen/NonLinearOptimization new file mode 100644 index 0000000..961f192 --- /dev/null +++ b/external/unsupported/Eigen/NonLinearOptimization @@ -0,0 +1,140 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NONLINEAROPTIMIZATION_MODULE +#define EIGEN_NONLINEAROPTIMIZATION_MODULE + +#include + +#include "../../Eigen/Core" +#include "../../Eigen/Jacobi" +#include "../../Eigen/QR" +#include "NumericalDiff" + +/** + * \defgroup NonLinearOptimization_Module Non linear optimization module + * + * \code + * #include + * \endcode + * + * This module provides implementation of two important algorithms in non linear + * optimization. In both cases, we consider a system of non linear functions. Of + * course, this should work, and even work very well if those functions are + * actually linear. But if this is so, you should probably better use other + * methods more fitted to this special case. + * + * One algorithm allows to find a least-squares solution of such a system + * (Levenberg-Marquardt algorithm) and the second one is used to find + * a zero for the system (Powell hybrid "dogleg" method). + * + * This code is a port of minpack (http://en.wikipedia.org/wiki/MINPACK). + * Minpack is a very famous, old, robust and well renowned package, written in + * fortran. Those implementations have been carefully tuned, tested, and used + * for several decades. + * + * The original fortran code was automatically translated using f2c (http://en.wikipedia.org/wiki/F2c) in C, + * then c++, and then cleaned by several different authors. + * The last one of those cleanings being our starting point : + * http://devernay.free.fr/hacks/cminpack.html + * + * Finally, we ported this code to Eigen, creating classes and API + * coherent with Eigen. When possible, we switched to Eigen + * implementation, such as most linear algebra (vectors, matrices, stable norms). + * + * Doing so, we were very careful to check the tests we setup at the very + * beginning, which ensure that the same results are found. + * + * \section Tests Tests + * + * The tests are placed in the file unsupported/test/NonLinear.cpp. + * + * There are two kinds of tests : those that come from examples bundled with cminpack. + * They guaranty we get the same results as the original algorithms (value for 'x', + * for the number of evaluations of the function, and for the number of evaluations + * of the Jacobian if ever). + * + * Other tests were added by myself at the very beginning of the + * process and check the results for Levenberg-Marquardt using the reference data + * on http://www.itl.nist.gov/div898/strd/nls/nls_main.shtml. Since then i've + * carefully checked that the same results were obtained when modifying the + * code. Please note that we do not always get the exact same decimals as they do, + * but this is ok : they use 128bits float, and we do the tests using the C type 'double', + * which is 64 bits on most platforms (x86 and amd64, at least). + * I've performed those tests on several other implementations of Levenberg-Marquardt, and + * (c)minpack performs VERY well compared to those, both in accuracy and speed. + * + * The documentation for running the tests is on the wiki + * http://eigen.tuxfamily.org/index.php?title=Tests + * + * \section API API: overview of methods + * + * Both algorithms needs a functor computing the Jacobian. It can be computed by + * hand, using auto-differentiation (see \ref AutoDiff_Module), or using numerical + * differences (see \ref NumericalDiff_Module). For instance: + *\code + * MyFunc func; + * NumericalDiff func_with_num_diff(func); + * LevenbergMarquardt > lm(func_with_num_diff); + * \endcode + * For HybridNonLinearSolver, the method solveNumericalDiff() does the above wrapping for + * you. + * + * The methods LevenbergMarquardt.lmder1()/lmdif1()/lmstr1() and + * HybridNonLinearSolver.hybrj1()/hybrd1() are specific methods from the original + * minpack package that you probably should NOT use until you are porting a code that + * was previously using minpack. They just define a 'simple' API with default values + * for some parameters. + * + * All algorithms are provided using two APIs : + * - one where the user inits the algorithm, and uses '*OneStep()' as much as he wants : + * this way the caller have control over the steps + * - one where the user just calls a method (optimize() or solve()) which will + * handle the loop: init + loop until a stop condition is met. Those are provided for + * convenience. + * + * As an example, the method LevenbergMarquardt::minimize() is + * implemented as follow: + * \code + * Status LevenbergMarquardt::minimize(FVectorType &x, const int mode) + * { + * Status status = minimizeInit(x, mode); + * do { + * status = minimizeOneStep(x, mode); + * } while (status==Running); + * return status; + * } + * \endcode + * + * \section examples Examples + * + * The easiest way to understand how to use this module is by looking at the many examples in the file + * unsupported/test/NonLinearOptimization.cpp. + */ + +#ifndef EIGEN_PARSED_BY_DOXYGEN + +#include "src/NonLinearOptimization/qrsolv.h" +#include "src/NonLinearOptimization/r1updt.h" +#include "src/NonLinearOptimization/r1mpyq.h" +#include "src/NonLinearOptimization/rwupdt.h" +#include "src/NonLinearOptimization/fdjac1.h" +#include "src/NonLinearOptimization/lmpar.h" +#include "src/NonLinearOptimization/dogleg.h" +#include "src/NonLinearOptimization/covar.h" + +#include "src/NonLinearOptimization/chkder.h" + +#endif + +#include "src/NonLinearOptimization/HybridNonLinearSolver.h" +#include "src/NonLinearOptimization/LevenbergMarquardt.h" + + +#endif // EIGEN_NONLINEAROPTIMIZATION_MODULE diff --git a/external/unsupported/Eigen/NumericalDiff b/external/unsupported/Eigen/NumericalDiff new file mode 100644 index 0000000..0668f96 --- /dev/null +++ b/external/unsupported/Eigen/NumericalDiff @@ -0,0 +1,56 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NUMERICALDIFF_MODULE +#define EIGEN_NUMERICALDIFF_MODULE + +#include "../../Eigen/Core" + +namespace Eigen { + +/** + * \defgroup NumericalDiff_Module Numerical differentiation module + * + * \code + * #include + * \endcode + * + * See http://en.wikipedia.org/wiki/Numerical_differentiation + * + * Warning : this should NOT be confused with automatic differentiation, which + * is a different method and has its own module in Eigen : \ref + * AutoDiff_Module. + * + * Currently only "Forward" and "Central" schemes are implemented. Those + * are basic methods, and there exist some more elaborated way of + * computing such approximates. They are implemented using both + * proprietary and free software, and usually requires linking to an + * external library. It is very easy for you to write a functor + * using such software, and the purpose is quite orthogonal to what we + * want to achieve with Eigen. + * + * This is why we will not provide wrappers for every great numerical + * differentiation software that exist, but should rather stick with those + * basic ones, that still are useful for testing. + * + * Also, the \ref NonLinearOptimization_Module needs this in order to + * provide full features compatibility with the original (c)minpack + * package. + * + */ +} + +//@{ + +#include "src/NumericalDiff/NumericalDiff.h" + +//@} + + +#endif // EIGEN_NUMERICALDIFF_MODULE diff --git a/external/unsupported/Eigen/OpenGLSupport b/external/unsupported/Eigen/OpenGLSupport new file mode 100644 index 0000000..f8c2130 --- /dev/null +++ b/external/unsupported/Eigen/OpenGLSupport @@ -0,0 +1,322 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_OPENGL_MODULE +#define EIGEN_OPENGL_MODULE + +#include "../../Eigen/Geometry" + +#if defined(__APPLE_CC__) + #include +#else + #include +#endif + +namespace Eigen { + +/** + * \defgroup OpenGLSUpport_Module OpenGL Support module + * + * This module provides wrapper functions for a couple of OpenGL functions + * which simplify the way to pass Eigen's object to openGL. + * Here is an example: + * + * \code + * // You need to add path_to_eigen/unsupported to your include path. + * #include + * // ... + * Vector3f x, y; + * Matrix3f rot; + * + * glVertex(y + x * rot); + * + * Quaternion q; + * glRotate(q); + * + * // ... + * \endcode + * + */ +//@{ + +#define EIGEN_GL_FUNC_DECLARATION(FUNC) \ +namespace internal { \ + template< typename XprType, \ + typename Scalar = typename XprType::Scalar, \ + int Rows = XprType::RowsAtCompileTime, \ + int Cols = XprType::ColsAtCompileTime, \ + bool IsGLCompatible = bool(internal::evaluator::Flags&LinearAccessBit) \ + && bool(XprType::Flags&DirectAccessBit) \ + && (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \ + struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \ + \ + template \ + struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(const XprType& p) { \ + EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)::type>::run(p); } \ + }; \ +} \ + \ +template inline void FUNC(const Eigen::DenseBase& p) { \ + EIGEN_CAT(EIGEN_CAT(internal::gl_,FUNC),_impl)::run(p.derived()); \ +} + + +#define EIGEN_GL_FUNC_SPECIALIZATION_MAT(FUNC,SCALAR,ROWS,COLS,SUFFIX) \ +namespace internal { \ + template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \ + }; \ +} + + +#define EIGEN_GL_FUNC_SPECIALIZATION_VEC(FUNC,SCALAR,SIZE,SUFFIX) \ +namespace internal { \ + template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \ + }; \ + template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(const XprType& p) { FUNC##SUFFIX(p.data()); } \ + }; \ +} + + +EIGEN_GL_FUNC_DECLARATION (glVertex) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 2,2iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 2,2sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 2,2fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 2,2dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 3,3iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 3,3sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 3,3fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 3,3dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,int, 4,4iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,short, 4,4sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,float, 4,4fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glVertex,double, 4,4dv) + +EIGEN_GL_FUNC_DECLARATION (glTexCoord) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 2,2iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 2,2sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 2,2fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 2,2dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 3,3iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 3,3sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 3,3fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 3,3dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,int, 4,4iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,short, 4,4sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,float, 4,4fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTexCoord,double, 4,4dv) + +EIGEN_GL_FUNC_DECLARATION (glColor) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 2,2iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 2,2sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 2,2fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 2,2dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 3,3iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 3,3sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 3,3fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 3,3dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,int, 4,4iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,short, 4,4sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,float, 4,4fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glColor,double, 4,4dv) + +EIGEN_GL_FUNC_DECLARATION (glNormal) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,int, 3,3iv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,short, 3,3sv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,float, 3,3fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glNormal,double, 3,3dv) + +inline void glScale2fv(const float* v) { glScalef(v[0], v[1], 1.f); } +inline void glScale2dv(const double* v) { glScaled(v[0], v[1], 1.0); } +inline void glScale3fv(const float* v) { glScalef(v[0], v[1], v[2]); } +inline void glScale3dv(const double* v) { glScaled(v[0], v[1], v[2]); } + +EIGEN_GL_FUNC_DECLARATION (glScale) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,float, 2,2fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,double, 2,2dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,float, 3,3fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glScale,double, 3,3dv) + +template void glScale(const UniformScaling& s) { glScale(Matrix::Constant(s.factor())); } + +inline void glTranslate2fv(const float* v) { glTranslatef(v[0], v[1], 0.f); } +inline void glTranslate2dv(const double* v) { glTranslated(v[0], v[1], 0.0); } +inline void glTranslate3fv(const float* v) { glTranslatef(v[0], v[1], v[2]); } +inline void glTranslate3dv(const double* v) { glTranslated(v[0], v[1], v[2]); } + +EIGEN_GL_FUNC_DECLARATION (glTranslate) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,float, 2,2fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,double, 2,2dv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,float, 3,3fv) +EIGEN_GL_FUNC_SPECIALIZATION_VEC(glTranslate,double, 3,3dv) + +template void glTranslate(const Translation& t) { glTranslate(t.vector()); } +template void glTranslate(const Translation& t) { glTranslate(t.vector()); } + +EIGEN_GL_FUNC_DECLARATION (glMultMatrix) +EIGEN_GL_FUNC_SPECIALIZATION_MAT(glMultMatrix,float, 4,4,f) +EIGEN_GL_FUNC_SPECIALIZATION_MAT(glMultMatrix,double, 4,4,d) + +template void glMultMatrix(const Transform& t) { glMultMatrix(t.matrix()); } +template void glMultMatrix(const Transform& t) { glMultMatrix(t.matrix()); } +template void glMultMatrix(const Transform& t) { glMultMatrix(Transform(t).matrix()); } + +EIGEN_GL_FUNC_DECLARATION (glLoadMatrix) +EIGEN_GL_FUNC_SPECIALIZATION_MAT(glLoadMatrix,float, 4,4,f) +EIGEN_GL_FUNC_SPECIALIZATION_MAT(glLoadMatrix,double, 4,4,d) + +template void glLoadMatrix(const Transform& t) { glLoadMatrix(t.matrix()); } +template void glLoadMatrix(const Transform& t) { glLoadMatrix(t.matrix()); } +template void glLoadMatrix(const Transform& t) { glLoadMatrix(Transform(t).matrix()); } + +inline void glRotate(const Rotation2D& rot) +{ + glRotatef(rot.angle()*180.f/float(EIGEN_PI), 0.f, 0.f, 1.f); +} +inline void glRotate(const Rotation2D& rot) +{ + glRotated(rot.angle()*180.0/double(EIGEN_PI), 0.0, 0.0, 1.0); +} + +template void glRotate(const RotationBase& rot) +{ + Transform tr(rot); + glMultMatrix(tr.matrix()); +} + +#define EIGEN_GL_MAKE_CONST_const const +#define EIGEN_GL_MAKE_CONST__ +#define EIGEN_GL_EVAL(X) X + +#define EIGEN_GL_FUNC1_DECLARATION(FUNC,ARG1,CONST) \ +namespace internal { \ + template< typename XprType, \ + typename Scalar = typename XprType::Scalar, \ + int Rows = XprType::RowsAtCompileTime, \ + int Cols = XprType::ColsAtCompileTime, \ + bool IsGLCompatible = bool(internal::evaluator::Flags&LinearAccessBit) \ + && bool(XprType::Flags&DirectAccessBit) \ + && (XprType::IsVectorAtCompileTime || (XprType::Flags&RowMajorBit)==0)> \ + struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl); \ + \ + template \ + struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(ARG1 a,EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { \ + EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl)::type>::run(a,p); } \ + }; \ +} \ + \ +template inline void FUNC(ARG1 a,EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) Eigen::DenseBase& p) { \ + EIGEN_CAT(EIGEN_CAT(internal::gl_,FUNC),_impl)::run(a,p.derived()); \ +} + + +#define EIGEN_GL_FUNC1_SPECIALIZATION_MAT(FUNC,ARG1,CONST,SCALAR,ROWS,COLS,SUFFIX) \ +namespace internal { \ + template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \ + }; \ +} + + +#define EIGEN_GL_FUNC1_SPECIALIZATION_VEC(FUNC,ARG1,CONST,SCALAR,SIZE,SUFFIX) \ +namespace internal { \ + template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \ + }; \ + template< typename XprType> struct EIGEN_CAT(EIGEN_CAT(gl_,FUNC),_impl) { \ + inline static void run(ARG1 a, EIGEN_GL_EVAL(EIGEN_GL_MAKE_CONST_##CONST) XprType& p) { FUNC##SUFFIX(a,p.data()); } \ + }; \ +} + +EIGEN_GL_FUNC1_DECLARATION (glGet,GLenum,_) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,float, 4,4,Floatv) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glGet,GLenum,_,double, 4,4,Doublev) + +// glUniform API + +#ifdef GL_VERSION_2_0 + +inline void glUniform2fv_ei (GLint loc, const float* v) { glUniform2fv(loc,1,v); } +inline void glUniform2iv_ei (GLint loc, const int* v) { glUniform2iv(loc,1,v); } + +inline void glUniform3fv_ei (GLint loc, const float* v) { glUniform3fv(loc,1,v); } +inline void glUniform3iv_ei (GLint loc, const int* v) { glUniform3iv(loc,1,v); } + +inline void glUniform4fv_ei (GLint loc, const float* v) { glUniform4fv(loc,1,v); } +inline void glUniform4iv_ei (GLint loc, const int* v) { glUniform4iv(loc,1,v); } + +inline void glUniformMatrix2fv_ei (GLint loc, const float* v) { glUniformMatrix2fv(loc,1,false,v); } +inline void glUniformMatrix3fv_ei (GLint loc, const float* v) { glUniformMatrix3fv(loc,1,false,v); } +inline void glUniformMatrix4fv_ei (GLint loc, const float* v) { glUniformMatrix4fv(loc,1,false,v); } + + +EIGEN_GL_FUNC1_DECLARATION (glUniform,GLint,const) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 2,2fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 2,2iv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 3,3fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 3,3iv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,float, 4,4fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,int, 4,4iv_ei) + +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,2,Matrix2fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,3,Matrix3fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,4,Matrix4fv_ei) + +#endif + +#ifdef GL_VERSION_2_1 + +inline void glUniformMatrix2x3fv_ei(GLint loc, const float* v) { glUniformMatrix2x3fv(loc,1,false,v); } +inline void glUniformMatrix3x2fv_ei(GLint loc, const float* v) { glUniformMatrix3x2fv(loc,1,false,v); } +inline void glUniformMatrix2x4fv_ei(GLint loc, const float* v) { glUniformMatrix2x4fv(loc,1,false,v); } +inline void glUniformMatrix4x2fv_ei(GLint loc, const float* v) { glUniformMatrix4x2fv(loc,1,false,v); } +inline void glUniformMatrix3x4fv_ei(GLint loc, const float* v) { glUniformMatrix3x4fv(loc,1,false,v); } +inline void glUniformMatrix4x3fv_ei(GLint loc, const float* v) { glUniformMatrix4x3fv(loc,1,false,v); } + +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,3,Matrix2x3fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,2,Matrix3x2fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 2,4,Matrix2x4fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,2,Matrix4x2fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 3,4,Matrix3x4fv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_MAT(glUniform,GLint,const,float, 4,3,Matrix4x3fv_ei) + +#endif + +#ifdef GL_VERSION_3_0 + +inline void glUniform2uiv_ei (GLint loc, const unsigned int* v) { glUniform2uiv(loc,1,v); } +inline void glUniform3uiv_ei (GLint loc, const unsigned int* v) { glUniform3uiv(loc,1,v); } +inline void glUniform4uiv_ei (GLint loc, const unsigned int* v) { glUniform4uiv(loc,1,v); } + +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 2,2uiv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 3,3uiv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,unsigned int, 4,4uiv_ei) + +#endif + +#ifdef GL_ARB_gpu_shader_fp64 +inline void glUniform2dv_ei (GLint loc, const double* v) { glUniform2dv(loc,1,v); } +inline void glUniform3dv_ei (GLint loc, const double* v) { glUniform3dv(loc,1,v); } +inline void glUniform4dv_ei (GLint loc, const double* v) { glUniform4dv(loc,1,v); } + +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 2,2dv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 3,3dv_ei) +EIGEN_GL_FUNC1_SPECIALIZATION_VEC(glUniform,GLint,const,double, 4,4dv_ei) +#endif + + +//@} + +} + +#endif // EIGEN_OPENGL_MODULE diff --git a/external/unsupported/Eigen/Polynomials b/external/unsupported/Eigen/Polynomials new file mode 100644 index 0000000..32ce2a2 --- /dev/null +++ b/external/unsupported/Eigen/Polynomials @@ -0,0 +1,137 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_POLYNOMIALS_MODULE_H +#define EIGEN_POLYNOMIALS_MODULE_H + +#include "../../Eigen/Core" + +#include "../../Eigen/Eigenvalues" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module +#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2) + #ifndef EIGEN_HIDE_HEAVY_CODE + #define EIGEN_HIDE_HEAVY_CODE + #endif +#elif defined EIGEN_HIDE_HEAVY_CODE + #undef EIGEN_HIDE_HEAVY_CODE +#endif + +/** + * \defgroup Polynomials_Module Polynomials module + * \brief This module provides a QR based polynomial solver. + * + * To use this module, add + * \code + * #include + * \endcode + * at the start of your source file. + */ + +#include "src/Polynomials/PolynomialUtils.h" +#include "src/Polynomials/Companion.h" +#include "src/Polynomials/PolynomialSolver.h" + +/** + \page polynomials Polynomials defines functions for dealing with polynomials + and a QR based polynomial solver. + \ingroup Polynomials_Module + + The remainder of the page documents first the functions for evaluating, computing + polynomials, computing estimates about polynomials and next the QR based polynomial + solver. + + \section polynomialUtils convenient functions to deal with polynomials + \subsection roots_to_monicPolynomial + The function + \code + void roots_to_monicPolynomial( const RootVector& rv, Polynomial& poly ) + \endcode + computes the coefficients \f$ a_i \f$ of + + \f$ p(x) = a_0 + a_{1}x + ... + a_{n-1}x^{n-1} + x^n \f$ + + where \f$ p \f$ is known through its roots i.e. \f$ p(x) = (x-r_1)(x-r_2)...(x-r_n) \f$. + + \subsection poly_eval + The function + \code + T poly_eval( const Polynomials& poly, const T& x ) + \endcode + evaluates a polynomial at a given point using stabilized Hörner method. + + The following code: first computes the coefficients in the monomial basis of the monic polynomial that has the provided roots; + then, it evaluates the computed polynomial, using a stabilized Hörner method. + + \include PolynomialUtils1.cpp + Output: \verbinclude PolynomialUtils1.out + + \subsection Cauchy bounds + The function + \code + Real cauchy_max_bound( const Polynomial& poly ) + \endcode + provides a maximum bound (the Cauchy one: \f$C(p)\f$) for the absolute value of a root of the given polynomial i.e. + \f$ \forall r_i \f$ root of \f$ p(x) = \sum_{k=0}^d a_k x^k \f$, + \f$ |r_i| \le C(p) = \sum_{k=0}^{d} \left | \frac{a_k}{a_d} \right | \f$ + The leading coefficient \f$ p \f$: should be non zero \f$a_d \neq 0\f$. + + + The function + \code + Real cauchy_min_bound( const Polynomial& poly ) + \endcode + provides a minimum bound (the Cauchy one: \f$c(p)\f$) for the absolute value of a non zero root of the given polynomial i.e. + \f$ \forall r_i \neq 0 \f$ root of \f$ p(x) = \sum_{k=0}^d a_k x^k \f$, + \f$ |r_i| \ge c(p) = \left( \sum_{k=0}^{d} \left | \frac{a_k}{a_0} \right | \right)^{-1} \f$ + + + + + \section QR polynomial solver class + Computes the complex roots of a polynomial by computing the eigenvalues of the associated companion matrix with the QR algorithm. + + The roots of \f$ p(x) = a_0 + a_1 x + a_2 x^2 + a_{3} x^3 + x^4 \f$ are the eigenvalues of + \f$ + \left [ + \begin{array}{cccc} + 0 & 0 & 0 & a_0 \\ + 1 & 0 & 0 & a_1 \\ + 0 & 1 & 0 & a_2 \\ + 0 & 0 & 1 & a_3 + \end{array} \right ] + \f$ + + However, the QR algorithm is not guaranteed to converge when there are several eigenvalues with same modulus. + + Therefore the current polynomial solver is guaranteed to provide a correct result only when the complex roots \f$r_1,r_2,...,r_d\f$ have distinct moduli i.e. + + \f$ \forall i,j \in [1;d],~ \| r_i \| \neq \| r_j \| \f$. + + With 32bit (float) floating types this problem shows up frequently. + However, almost always, correct accuracy is reached even in these cases for 64bit + (double) floating types and small polynomial degree (<20). + + \include PolynomialSolver1.cpp + + In the above example: + + -# a simple use of the polynomial solver is shown; + -# the accuracy problem with the QR algorithm is presented: a polynomial with almost conjugate roots is provided to the solver. + Those roots have almost same module therefore the QR algorithm failed to converge: the accuracy + of the last root is bad; + -# a simple way to circumvent the problem is shown: use doubles instead of floats. + + Output: \verbinclude PolynomialSolver1.out +*/ + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_POLYNOMIALS_MODULE_H diff --git a/external/unsupported/Eigen/Skyline b/external/unsupported/Eigen/Skyline new file mode 100644 index 0000000..ebdf143 --- /dev/null +++ b/external/unsupported/Eigen/Skyline @@ -0,0 +1,39 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SKYLINE_MODULE_H +#define EIGEN_SKYLINE_MODULE_H + + +#include "../../Eigen/Core" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#include +#include +#include +#include + +/** + * \defgroup Skyline_Module Skyline module + * + * + * + * + */ + +#include "src/Skyline/SkylineUtil.h" +#include "src/Skyline/SkylineMatrixBase.h" +#include "src/Skyline/SkylineStorage.h" +#include "src/Skyline/SkylineMatrix.h" +#include "src/Skyline/SkylineInplaceLU.h" +#include "src/Skyline/SkylineProduct.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SKYLINE_MODULE_H diff --git a/external/unsupported/Eigen/SparseExtra b/external/unsupported/Eigen/SparseExtra new file mode 100644 index 0000000..ba5cbd6 --- /dev/null +++ b/external/unsupported/Eigen/SparseExtra @@ -0,0 +1,54 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_EXTRA_MODULE_H +#define EIGEN_SPARSE_EXTRA_MODULE_H + +#include "../../Eigen/Sparse" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#include +#include +#include +#include +#include +#include +#include + +#ifdef EIGEN_GOOGLEHASH_SUPPORT + #include + #include +#endif + +/** + * \defgroup SparseExtra_Module SparseExtra module + * + * This module contains some experimental features extending the sparse module. + * + * \code + * #include + * \endcode + */ + + +#include "src/SparseExtra/DynamicSparseMatrix.h" +#include "src/SparseExtra/BlockOfDynamicSparseMatrix.h" +#include "src/SparseExtra/RandomSetter.h" + +#include "src/SparseExtra/MarketIO.h" + +#if !defined(_WIN32) +#include +#include "src/SparseExtra/MatrixMarketIterator.h" +#endif + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPARSE_EXTRA_MODULE_H diff --git a/external/unsupported/Eigen/SpecialFunctions b/external/unsupported/Eigen/SpecialFunctions new file mode 100644 index 0000000..f6a2460 --- /dev/null +++ b/external/unsupported/Eigen/SpecialFunctions @@ -0,0 +1,103 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPECIALFUNCTIONS_MODULE +#define EIGEN_SPECIALFUNCTIONS_MODULE + +#include + +#include "../../Eigen/Core" + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +namespace Eigen { + +/** + * \defgroup SpecialFunctions_Module Special math functions module + * + * This module features additional coefficient-wise math functions available + * within the numext:: namespace for the scalar version, and as method and/or free + * functions of Array. Those include: + * + * - erf + * - erfc + * - lgamma + * - igamma + * - igamma_der_a + * - gamma_sample_der_alpha + * - igammac + * - digamma + * - ndtri + * - polygamma + * - zeta + * - betainc + * + * Bessel Functions + * - bessel_i0 + * - bessel_i0e + * - bessel_i1 + * - bessel_i1e + * - bessel_j0 + * - bessel_j1 + * - bessel_k0 + * - bessel_k0e + * - bessel_k1 + * - bessel_k1e + * - bessel_y0 + * - bessel_y1 + * + * \code + * #include + * \endcode + */ +//@{ + +} + +#include "src/SpecialFunctions/BesselFunctionsImpl.h" +#include "src/SpecialFunctions/BesselFunctionsBFloat16.h" +#include "src/SpecialFunctions/BesselFunctionsHalf.h" +#include "src/SpecialFunctions/BesselFunctionsPacketMath.h" +#include "src/SpecialFunctions/BesselFunctionsFunctors.h" +#include "src/SpecialFunctions/BesselFunctionsArrayAPI.h" +#include "src/SpecialFunctions/SpecialFunctionsImpl.h" +#if defined(EIGEN_HIPCC) +#include "src/SpecialFunctions/HipVectorCompatibility.h" +#endif +#include "src/SpecialFunctions/SpecialFunctionsBFloat16.h" +#include "src/SpecialFunctions/SpecialFunctionsHalf.h" +#include "src/SpecialFunctions/SpecialFunctionsPacketMath.h" +#include "src/SpecialFunctions/SpecialFunctionsFunctors.h" +#include "src/SpecialFunctions/SpecialFunctionsArrayAPI.h" + +#if defined EIGEN_VECTORIZE_AVX512 + #include "src/SpecialFunctions/arch/AVX/BesselFunctions.h" + #include "src/SpecialFunctions/arch/AVX/SpecialFunctions.h" + #include "src/SpecialFunctions/arch/AVX512/BesselFunctions.h" + #include "src/SpecialFunctions/arch/AVX512/SpecialFunctions.h" +#elif defined EIGEN_VECTORIZE_AVX + #include "src/SpecialFunctions/arch/AVX/BesselFunctions.h" + #include "src/SpecialFunctions/arch/AVX/SpecialFunctions.h" +#elif defined EIGEN_VECTORIZE_NEON + #include "src/SpecialFunctions/arch/NEON/BesselFunctions.h" + #include "src/SpecialFunctions/arch/NEON/SpecialFunctions.h" +#endif + +#if defined EIGEN_VECTORIZE_GPU + #include "src/SpecialFunctions/arch/GPU/SpecialFunctions.h" +#endif + +namespace Eigen { +//@} +} + + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPECIALFUNCTIONS_MODULE diff --git a/external/unsupported/Eigen/Splines b/external/unsupported/Eigen/Splines new file mode 100644 index 0000000..2ca5813 --- /dev/null +++ b/external/unsupported/Eigen/Splines @@ -0,0 +1,35 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 20010-2011 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPLINES_MODULE_H +#define EIGEN_SPLINES_MODULE_H + +namespace Eigen +{ +/** + * \defgroup Splines_Module Spline and spline fitting module + * + * This module provides a simple multi-dimensional spline class while + * offering most basic functionality to fit a spline to point sets. + * + * \code + * #include + * \endcode + */ +} + +#include "../../Eigen/src/Core/util/DisableStupidWarnings.h" + +#include "src/Splines/SplineFwd.h" +#include "src/Splines/Spline.h" +#include "src/Splines/SplineFitting.h" + +#include "../../Eigen/src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPLINES_MODULE_H diff --git a/external/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h b/external/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h new file mode 100644 index 0000000..33b6c39 --- /dev/null +++ b/external/unsupported/Eigen/src/AutoDiff/AutoDiffJacobian.h @@ -0,0 +1,108 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_AUTODIFF_JACOBIAN_H +#define EIGEN_AUTODIFF_JACOBIAN_H + +namespace Eigen +{ + +template class AutoDiffJacobian : public Functor +{ +public: + AutoDiffJacobian() : Functor() {} + AutoDiffJacobian(const Functor& f) : Functor(f) {} + + // forward constructors +#if EIGEN_HAS_VARIADIC_TEMPLATES + template + AutoDiffJacobian(const T& ...Values) : Functor(Values...) {} +#else + template + AutoDiffJacobian(const T0& a0) : Functor(a0) {} + template + AutoDiffJacobian(const T0& a0, const T1& a1) : Functor(a0, a1) {} + template + AutoDiffJacobian(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2) {} +#endif + + typedef typename Functor::InputType InputType; + typedef typename Functor::ValueType ValueType; + typedef typename ValueType::Scalar Scalar; + + enum { + InputsAtCompileTime = InputType::RowsAtCompileTime, + ValuesAtCompileTime = ValueType::RowsAtCompileTime + }; + + typedef Matrix JacobianType; + typedef typename JacobianType::Index Index; + + typedef Matrix DerivativeType; + typedef AutoDiffScalar ActiveScalar; + + typedef Matrix ActiveInput; + typedef Matrix ActiveValue; + +#if EIGEN_HAS_VARIADIC_TEMPLATES + // Some compilers don't accept variadic parameters after a default parameter, + // i.e., we can't just write _jac=0 but we need to overload operator(): + EIGEN_STRONG_INLINE + void operator() (const InputType& x, ValueType* v) const + { + this->operator()(x, v, 0); + } + template + void operator() (const InputType& x, ValueType* v, JacobianType* _jac, + const ParamsType&... Params) const +#else + void operator() (const InputType& x, ValueType* v, JacobianType* _jac=0) const +#endif + { + eigen_assert(v!=0); + + if (!_jac) + { +#if EIGEN_HAS_VARIADIC_TEMPLATES + Functor::operator()(x, v, Params...); +#else + Functor::operator()(x, v); +#endif + return; + } + + JacobianType& jac = *_jac; + + ActiveInput ax = x.template cast(); + ActiveValue av(jac.rows()); + + if(InputsAtCompileTime==Dynamic) + for (Index j=0; j +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_AUTODIFF_SCALAR_H +#define EIGEN_AUTODIFF_SCALAR_H + +namespace Eigen { + +namespace internal { + +template +struct make_coherent_impl { + static void run(A&, B&) {} +}; + +// resize a to match b is a.size()==0, and conversely. +template +void make_coherent(const A& a, const B&b) +{ + make_coherent_impl::run(a.const_cast_derived(), b.const_cast_derived()); +} + +template struct auto_diff_special_op; + +} // end namespace internal + +template class AutoDiffScalar; + +template +inline AutoDiffScalar MakeAutoDiffScalar(const typename NewDerType::Scalar& value, const NewDerType &der) { + return AutoDiffScalar(value,der); +} + +/** \class AutoDiffScalar + * \brief A scalar type replacement with automatic differentiation capability + * + * \param DerivativeType the vector type used to store/represent the derivatives. The base scalar type + * as well as the number of derivatives to compute are determined from this type. + * Typical choices include, e.g., \c Vector4f for 4 derivatives, or \c VectorXf + * if the number of derivatives is not known at compile time, and/or, the number + * of derivatives is large. + * Note that DerivativeType can also be a reference (e.g., \c VectorXf&) to wrap a + * existing vector into an AutoDiffScalar. + * Finally, DerivativeType can also be any Eigen compatible expression. + * + * This class represents a scalar value while tracking its respective derivatives using Eigen's expression + * template mechanism. + * + * It supports the following list of global math function: + * - std::abs, std::sqrt, std::pow, std::exp, std::log, std::sin, std::cos, + * - internal::abs, internal::sqrt, numext::pow, internal::exp, internal::log, internal::sin, internal::cos, + * - internal::conj, internal::real, internal::imag, numext::abs2. + * + * AutoDiffScalar can be used as the scalar type of an Eigen::Matrix object. However, + * in that case, the expression template mechanism only occurs at the top Matrix level, + * while derivatives are computed right away. + * + */ + +template +class AutoDiffScalar + : public internal::auto_diff_special_op + ::type>::Scalar, + typename NumTraits::type>::Scalar>::Real>::value> +{ + public: + typedef internal::auto_diff_special_op + ::type>::Scalar, + typename NumTraits::type>::Scalar>::Real>::value> Base; + typedef typename internal::remove_all::type DerType; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real Real; + + using Base::operator+; + using Base::operator*; + + /** Default constructor without any initialization. */ + AutoDiffScalar() {} + + /** Constructs an active scalar from its \a value, + and initializes the \a nbDer derivatives such that it corresponds to the \a derNumber -th variable */ + AutoDiffScalar(const Scalar& value, int nbDer, int derNumber) + : m_value(value), m_derivatives(DerType::Zero(nbDer)) + { + m_derivatives.coeffRef(derNumber) = Scalar(1); + } + + /** Conversion from a scalar constant to an active scalar. + * The derivatives are set to zero. */ + /*explicit*/ AutoDiffScalar(const Real& value) + : m_value(value) + { + if(m_derivatives.size()>0) + m_derivatives.setZero(); + } + + /** Constructs an active scalar from its \a value and derivatives \a der */ + AutoDiffScalar(const Scalar& value, const DerType& der) + : m_value(value), m_derivatives(der) + {} + + template + AutoDiffScalar(const AutoDiffScalar& other +#ifndef EIGEN_PARSED_BY_DOXYGEN + , typename internal::enable_if< + internal::is_same::type>::Scalar>::value + && internal::is_convertible::value , void*>::type = 0 +#endif + ) + : m_value(other.value()), m_derivatives(other.derivatives()) + {} + + friend std::ostream & operator << (std::ostream & s, const AutoDiffScalar& a) + { + return s << a.value(); + } + + AutoDiffScalar(const AutoDiffScalar& other) + : m_value(other.value()), m_derivatives(other.derivatives()) + {} + + template + inline AutoDiffScalar& operator=(const AutoDiffScalar& other) + { + m_value = other.value(); + m_derivatives = other.derivatives(); + return *this; + } + + inline AutoDiffScalar& operator=(const AutoDiffScalar& other) + { + m_value = other.value(); + m_derivatives = other.derivatives(); + return *this; + } + + inline AutoDiffScalar& operator=(const Scalar& other) + { + m_value = other; + if(m_derivatives.size()>0) + m_derivatives.setZero(); + return *this; + } + +// inline operator const Scalar& () const { return m_value; } +// inline operator Scalar& () { return m_value; } + + inline const Scalar& value() const { return m_value; } + inline Scalar& value() { return m_value; } + + inline const DerType& derivatives() const { return m_derivatives; } + inline DerType& derivatives() { return m_derivatives; } + + inline bool operator< (const Scalar& other) const { return m_value < other; } + inline bool operator<=(const Scalar& other) const { return m_value <= other; } + inline bool operator> (const Scalar& other) const { return m_value > other; } + inline bool operator>=(const Scalar& other) const { return m_value >= other; } + inline bool operator==(const Scalar& other) const { return m_value == other; } + inline bool operator!=(const Scalar& other) const { return m_value != other; } + + friend inline bool operator< (const Scalar& a, const AutoDiffScalar& b) { return a < b.value(); } + friend inline bool operator<=(const Scalar& a, const AutoDiffScalar& b) { return a <= b.value(); } + friend inline bool operator> (const Scalar& a, const AutoDiffScalar& b) { return a > b.value(); } + friend inline bool operator>=(const Scalar& a, const AutoDiffScalar& b) { return a >= b.value(); } + friend inline bool operator==(const Scalar& a, const AutoDiffScalar& b) { return a == b.value(); } + friend inline bool operator!=(const Scalar& a, const AutoDiffScalar& b) { return a != b.value(); } + + template inline bool operator< (const AutoDiffScalar& b) const { return m_value < b.value(); } + template inline bool operator<=(const AutoDiffScalar& b) const { return m_value <= b.value(); } + template inline bool operator> (const AutoDiffScalar& b) const { return m_value > b.value(); } + template inline bool operator>=(const AutoDiffScalar& b) const { return m_value >= b.value(); } + template inline bool operator==(const AutoDiffScalar& b) const { return m_value == b.value(); } + template inline bool operator!=(const AutoDiffScalar& b) const { return m_value != b.value(); } + + inline const AutoDiffScalar operator+(const Scalar& other) const + { + return AutoDiffScalar(m_value + other, m_derivatives); + } + + friend inline const AutoDiffScalar operator+(const Scalar& a, const AutoDiffScalar& b) + { + return AutoDiffScalar(a + b.value(), b.derivatives()); + } + +// inline const AutoDiffScalar operator+(const Real& other) const +// { +// return AutoDiffScalar(m_value + other, m_derivatives); +// } + +// friend inline const AutoDiffScalar operator+(const Real& a, const AutoDiffScalar& b) +// { +// return AutoDiffScalar(a + b.value(), b.derivatives()); +// } + + inline AutoDiffScalar& operator+=(const Scalar& other) + { + value() += other; + return *this; + } + + template + inline const AutoDiffScalar,const DerType,const typename internal::remove_all::type> > + operator+(const AutoDiffScalar& other) const + { + internal::make_coherent(m_derivatives, other.derivatives()); + return AutoDiffScalar,const DerType,const typename internal::remove_all::type> >( + m_value + other.value(), + m_derivatives + other.derivatives()); + } + + template + inline AutoDiffScalar& + operator+=(const AutoDiffScalar& other) + { + (*this) = (*this) + other; + return *this; + } + + inline const AutoDiffScalar operator-(const Scalar& b) const + { + return AutoDiffScalar(m_value - b, m_derivatives); + } + + friend inline const AutoDiffScalar, const DerType> > + operator-(const Scalar& a, const AutoDiffScalar& b) + { + return AutoDiffScalar, const DerType> > + (a - b.value(), -b.derivatives()); + } + + inline AutoDiffScalar& operator-=(const Scalar& other) + { + value() -= other; + return *this; + } + + template + inline const AutoDiffScalar, const DerType,const typename internal::remove_all::type> > + operator-(const AutoDiffScalar& other) const + { + internal::make_coherent(m_derivatives, other.derivatives()); + return AutoDiffScalar, const DerType,const typename internal::remove_all::type> >( + m_value - other.value(), + m_derivatives - other.derivatives()); + } + + template + inline AutoDiffScalar& + operator-=(const AutoDiffScalar& other) + { + *this = *this - other; + return *this; + } + + inline const AutoDiffScalar, const DerType> > + operator-() const + { + return AutoDiffScalar, const DerType> >( + -m_value, + -m_derivatives); + } + + inline const AutoDiffScalar + operator*(const Scalar& other) const + { + return MakeAutoDiffScalar(m_value * other, m_derivatives * other); + } + + friend inline const AutoDiffScalar + operator*(const Scalar& other, const AutoDiffScalar& a) + { + return MakeAutoDiffScalar(a.value() * other, a.derivatives() * other); + } + +// inline const AutoDiffScalar, DerType>::Type > +// operator*(const Real& other) const +// { +// return AutoDiffScalar, DerType>::Type >( +// m_value * other, +// (m_derivatives * other)); +// } +// +// friend inline const AutoDiffScalar, DerType>::Type > +// operator*(const Real& other, const AutoDiffScalar& a) +// { +// return AutoDiffScalar, DerType>::Type >( +// a.value() * other, +// a.derivatives() * other); +// } + + inline const AutoDiffScalar + operator/(const Scalar& other) const + { + return MakeAutoDiffScalar(m_value / other, (m_derivatives * (Scalar(1)/other))); + } + + friend inline const AutoDiffScalar + operator/(const Scalar& other, const AutoDiffScalar& a) + { + return MakeAutoDiffScalar(other / a.value(), a.derivatives() * (Scalar(-other) / (a.value()*a.value()))); + } + +// inline const AutoDiffScalar, DerType>::Type > +// operator/(const Real& other) const +// { +// return AutoDiffScalar, DerType>::Type >( +// m_value / other, +// (m_derivatives * (Real(1)/other))); +// } +// +// friend inline const AutoDiffScalar, DerType>::Type > +// operator/(const Real& other, const AutoDiffScalar& a) +// { +// return AutoDiffScalar, DerType>::Type >( +// other / a.value(), +// a.derivatives() * (-Real(1)/other)); +// } + + template + inline const AutoDiffScalar EIGEN_COMMA + const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product) EIGEN_COMMA + const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename internal::remove_all::type,Scalar,product) >,Scalar,product) > + operator/(const AutoDiffScalar& other) const + { + internal::make_coherent(m_derivatives, other.derivatives()); + return MakeAutoDiffScalar( + m_value / other.value(), + ((m_derivatives * other.value()) - (other.derivatives() * m_value)) + * (Scalar(1)/(other.value()*other.value()))); + } + + template + inline const AutoDiffScalar, + const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType,Scalar,product), + const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename internal::remove_all::type,Scalar,product) > > + operator*(const AutoDiffScalar& other) const + { + internal::make_coherent(m_derivatives, other.derivatives()); + return MakeAutoDiffScalar( + m_value * other.value(), + (m_derivatives * other.value()) + (other.derivatives() * m_value)); + } + + inline AutoDiffScalar& operator*=(const Scalar& other) + { + *this = *this * other; + return *this; + } + + template + inline AutoDiffScalar& operator*=(const AutoDiffScalar& other) + { + *this = *this * other; + return *this; + } + + inline AutoDiffScalar& operator/=(const Scalar& other) + { + *this = *this / other; + return *this; + } + + template + inline AutoDiffScalar& operator/=(const AutoDiffScalar& other) + { + *this = *this / other; + return *this; + } + + protected: + Scalar m_value; + DerType m_derivatives; + +}; + +namespace internal { + +template +struct auto_diff_special_op +// : auto_diff_scalar_op::Real, +// is_same::Real>::value> +{ + typedef typename remove_all::type DerType; + typedef typename traits::Scalar Scalar; + typedef typename NumTraits::Real Real; + +// typedef auto_diff_scalar_op::Real, +// is_same::Real>::value> Base; + +// using Base::operator+; +// using Base::operator+=; +// using Base::operator-; +// using Base::operator-=; +// using Base::operator*; +// using Base::operator*=; + + const AutoDiffScalar& derived() const { return *static_cast*>(this); } + AutoDiffScalar& derived() { return *static_cast*>(this); } + + + inline const AutoDiffScalar operator+(const Real& other) const + { + return AutoDiffScalar(derived().value() + other, derived().derivatives()); + } + + friend inline const AutoDiffScalar operator+(const Real& a, const AutoDiffScalar& b) + { + return AutoDiffScalar(a + b.value(), b.derivatives()); + } + + inline AutoDiffScalar& operator+=(const Real& other) + { + derived().value() += other; + return derived(); + } + + + inline const AutoDiffScalar >, DerType>::Type > + operator*(const Real& other) const + { + return AutoDiffScalar >, DerType>::Type >( + derived().value() * other, + derived().derivatives() * other); + } + + friend inline const AutoDiffScalar >, DerType>::Type > + operator*(const Real& other, const AutoDiffScalar& a) + { + return AutoDiffScalar >, DerType>::Type >( + a.value() * other, + a.derivatives() * other); + } + + inline AutoDiffScalar& operator*=(const Scalar& other) + { + *this = *this * other; + return derived(); + } +}; + +template +struct auto_diff_special_op +{ + void operator*() const; + void operator-() const; + void operator+() const; +}; + +template +void make_coherent_expression(CwiseBinaryOp xpr, const RefType &ref) +{ + make_coherent(xpr.const_cast_derived().lhs(), ref); + make_coherent(xpr.const_cast_derived().rhs(), ref); +} + +template +void make_coherent_expression(const CwiseUnaryOp &xpr, const RefType &ref) +{ + make_coherent(xpr.nestedExpression().const_cast_derived(), ref); +} + +// needed for compilation only +template +void make_coherent_expression(const CwiseNullaryOp &, const RefType &) +{} + +template +struct make_coherent_impl, B> { + typedef Matrix A; + static void run(A& a, B& b) { + if((A_Rows==Dynamic || A_Cols==Dynamic) && (a.size()==0)) + { + a.resize(b.size()); + a.setZero(); + } + else if (B::SizeAtCompileTime==Dynamic && a.size()!=0 && b.size()==0) + { + make_coherent_expression(b,a); + } + } +}; + +template +struct make_coherent_impl > { + typedef Matrix B; + static void run(A& a, B& b) { + if((B_Rows==Dynamic || B_Cols==Dynamic) && (b.size()==0)) + { + b.resize(a.size()); + b.setZero(); + } + else if (A::SizeAtCompileTime==Dynamic && b.size()!=0 && a.size()==0) + { + make_coherent_expression(a,b); + } + } +}; + +template +struct make_coherent_impl, + Matrix > { + typedef Matrix A; + typedef Matrix B; + static void run(A& a, B& b) { + if((A_Rows==Dynamic || A_Cols==Dynamic) && (a.size()==0)) + { + a.resize(b.size()); + a.setZero(); + } + else if((B_Rows==Dynamic || B_Cols==Dynamic) && (b.size()==0)) + { + b.resize(a.size()); + b.setZero(); + } + } +}; + +} // end namespace internal + +template +struct ScalarBinaryOpTraits,typename DerType::Scalar,BinOp> +{ + typedef AutoDiffScalar ReturnType; +}; + +template +struct ScalarBinaryOpTraits, BinOp> +{ + typedef AutoDiffScalar ReturnType; +}; + + +// The following is an attempt to let Eigen's known about expression template, but that's more tricky! + +// template +// struct ScalarBinaryOpTraits,AutoDiffScalar, BinOp> +// { +// enum { Defined = 1 }; +// typedef AutoDiffScalar ReturnType; +// }; +// +// template +// struct ScalarBinaryOpTraits,AutoDiffScalar, BinOp> +// { +// enum { Defined = 1 };//internal::is_same::value }; +// typedef AutoDiffScalar ReturnType; +// }; + +#define EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(FUNC,CODE) \ + template \ + inline const Eigen::AutoDiffScalar< \ + EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename Eigen::internal::remove_all::type, typename Eigen::internal::traits::type>::Scalar, product) > \ + FUNC(const Eigen::AutoDiffScalar& x) { \ + using namespace Eigen; \ + typedef typename Eigen::internal::traits::type>::Scalar Scalar; \ + EIGEN_UNUSED_VARIABLE(sizeof(Scalar)); \ + CODE; \ + } + +template +struct CleanedUpDerType { + typedef AutoDiffScalar::type::PlainObject> type; +}; + +template +inline const AutoDiffScalar& conj(const AutoDiffScalar& x) { return x; } +template +inline const AutoDiffScalar& real(const AutoDiffScalar& x) { return x; } +template +inline typename DerType::Scalar imag(const AutoDiffScalar&) { return 0.; } +template +inline typename CleanedUpDerType::type (min)(const AutoDiffScalar& x, const T& y) { + typedef typename CleanedUpDerType::type ADS; + return (x <= y ? ADS(x) : ADS(y)); +} +template +inline typename CleanedUpDerType::type (max)(const AutoDiffScalar& x, const T& y) { + typedef typename CleanedUpDerType::type ADS; + return (x >= y ? ADS(x) : ADS(y)); +} +template +inline typename CleanedUpDerType::type (min)(const T& x, const AutoDiffScalar& y) { + typedef typename CleanedUpDerType::type ADS; + return (x < y ? ADS(x) : ADS(y)); +} +template +inline typename CleanedUpDerType::type (max)(const T& x, const AutoDiffScalar& y) { + typedef typename CleanedUpDerType::type ADS; + return (x > y ? ADS(x) : ADS(y)); +} +template +inline typename CleanedUpDerType::type (min)(const AutoDiffScalar& x, const AutoDiffScalar& y) { + return (x.value() < y.value() ? x : y); +} +template +inline typename CleanedUpDerType::type (max)(const AutoDiffScalar& x, const AutoDiffScalar& y) { + return (x.value() >= y.value() ? x : y); +} + + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs, + using std::abs; + return Eigen::MakeAutoDiffScalar(abs(x.value()), x.derivatives() * (x.value()<0 ? -1 : 1) );) + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs2, + using numext::abs2; + return Eigen::MakeAutoDiffScalar(abs2(x.value()), x.derivatives() * (Scalar(2)*x.value()));) + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sqrt, + using std::sqrt; + Scalar sqrtx = sqrt(x.value()); + return Eigen::MakeAutoDiffScalar(sqrtx,x.derivatives() * (Scalar(0.5) / sqrtx));) + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(cos, + using std::cos; + using std::sin; + return Eigen::MakeAutoDiffScalar(cos(x.value()), x.derivatives() * (-sin(x.value())));) + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sin, + using std::sin; + using std::cos; + return Eigen::MakeAutoDiffScalar(sin(x.value()),x.derivatives() * cos(x.value()));) + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(exp, + using std::exp; + Scalar expx = exp(x.value()); + return Eigen::MakeAutoDiffScalar(expx,x.derivatives() * expx);) + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(log, + using std::log; + return Eigen::MakeAutoDiffScalar(log(x.value()),x.derivatives() * (Scalar(1)/x.value()));) + +template +inline const Eigen::AutoDiffScalar< +EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(typename internal::remove_all::type,typename internal::traits::type>::Scalar,product) > +pow(const Eigen::AutoDiffScalar &x, const typename internal::traits::type>::Scalar &y) +{ + using namespace Eigen; + using std::pow; + return Eigen::MakeAutoDiffScalar(pow(x.value(),y), x.derivatives() * (y * pow(x.value(),y-1))); +} + + +template +inline const AutoDiffScalar::type>::Scalar,Dynamic,1> > +atan2(const AutoDiffScalar& a, const AutoDiffScalar& b) +{ + using std::atan2; + typedef typename internal::traits::type>::Scalar Scalar; + typedef AutoDiffScalar > PlainADS; + PlainADS ret; + ret.value() = atan2(a.value(), b.value()); + + Scalar squared_hypot = a.value() * a.value() + b.value() * b.value(); + + // if (squared_hypot==0) the derivation is undefined and the following results in a NaN: + ret.derivatives() = (a.derivatives() * b.value() - a.value() * b.derivatives()) / squared_hypot; + + return ret; +} + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(tan, + using std::tan; + using std::cos; + return Eigen::MakeAutoDiffScalar(tan(x.value()),x.derivatives() * (Scalar(1)/numext::abs2(cos(x.value()))));) + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(asin, + using std::sqrt; + using std::asin; + return Eigen::MakeAutoDiffScalar(asin(x.value()),x.derivatives() * (Scalar(1)/sqrt(1-numext::abs2(x.value()))));) + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(acos, + using std::sqrt; + using std::acos; + return Eigen::MakeAutoDiffScalar(acos(x.value()),x.derivatives() * (Scalar(-1)/sqrt(1-numext::abs2(x.value()))));) + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(tanh, + using std::cosh; + using std::tanh; + return Eigen::MakeAutoDiffScalar(tanh(x.value()),x.derivatives() * (Scalar(1)/numext::abs2(cosh(x.value()))));) + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(sinh, + using std::sinh; + using std::cosh; + return Eigen::MakeAutoDiffScalar(sinh(x.value()),x.derivatives() * cosh(x.value()));) + +EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(cosh, + using std::sinh; + using std::cosh; + return Eigen::MakeAutoDiffScalar(cosh(x.value()),x.derivatives() * sinh(x.value()));) + +#undef EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY + +template struct NumTraits > + : NumTraits< typename NumTraits::type::Scalar>::Real > +{ + typedef typename internal::remove_all::type DerTypeCleaned; + typedef AutoDiffScalar::Real,DerTypeCleaned::RowsAtCompileTime,DerTypeCleaned::ColsAtCompileTime, + 0, DerTypeCleaned::MaxRowsAtCompileTime, DerTypeCleaned::MaxColsAtCompileTime> > Real; + typedef AutoDiffScalar NonInteger; + typedef AutoDiffScalar Nested; + typedef typename NumTraits::Literal Literal; + enum{ + RequireInitialization = 1 + }; +}; + +} + +namespace std { + +template +class numeric_limits > + : public numeric_limits {}; + +template +class numeric_limits > + : public numeric_limits {}; + +} // namespace std + +#endif // EIGEN_AUTODIFF_SCALAR_H diff --git a/external/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h b/external/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h new file mode 100644 index 0000000..8c2d048 --- /dev/null +++ b/external/unsupported/Eigen/src/AutoDiff/AutoDiffVector.h @@ -0,0 +1,220 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_AUTODIFF_VECTOR_H +#define EIGEN_AUTODIFF_VECTOR_H + +namespace Eigen { + +/* \class AutoDiffScalar + * \brief A scalar type replacement with automatic differentation capability + * + * \param DerType the vector type used to store/represent the derivatives (e.g. Vector3f) + * + * This class represents a scalar value while tracking its respective derivatives. + * + * It supports the following list of global math function: + * - std::abs, std::sqrt, std::pow, std::exp, std::log, std::sin, std::cos, + * - internal::abs, internal::sqrt, numext::pow, internal::exp, internal::log, internal::sin, internal::cos, + * - internal::conj, internal::real, internal::imag, numext::abs2. + * + * AutoDiffScalar can be used as the scalar type of an Eigen::Matrix object. However, + * in that case, the expression template mechanism only occurs at the top Matrix level, + * while derivatives are computed right away. + * + */ +template +class AutoDiffVector +{ + public: + //typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::Scalar BaseScalar; + typedef AutoDiffScalar > ActiveScalar; + typedef ActiveScalar Scalar; + typedef AutoDiffScalar CoeffType; + typedef typename JacobianType::Index Index; + + inline AutoDiffVector() {} + + inline AutoDiffVector(const ValueType& values) + : m_values(values) + { + m_jacobian.setZero(); + } + + + CoeffType operator[] (Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); } + const CoeffType operator[] (Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); } + + CoeffType operator() (Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); } + const CoeffType operator() (Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); } + + CoeffType coeffRef(Index i) { return CoeffType(m_values[i], m_jacobian.col(i)); } + const CoeffType coeffRef(Index i) const { return CoeffType(m_values[i], m_jacobian.col(i)); } + + Index size() const { return m_values.size(); } + + // FIXME here we could return an expression of the sum + Scalar sum() const { /*std::cerr << "sum \n\n";*/ /*std::cerr << m_jacobian.rowwise().sum() << "\n\n";*/ return Scalar(m_values.sum(), m_jacobian.rowwise().sum()); } + + + inline AutoDiffVector(const ValueType& values, const JacobianType& jac) + : m_values(values), m_jacobian(jac) + {} + + template + inline AutoDiffVector(const AutoDiffVector& other) + : m_values(other.values()), m_jacobian(other.jacobian()) + {} + + inline AutoDiffVector(const AutoDiffVector& other) + : m_values(other.values()), m_jacobian(other.jacobian()) + {} + + template + inline AutoDiffVector& operator=(const AutoDiffVector& other) + { + m_values = other.values(); + m_jacobian = other.jacobian(); + return *this; + } + + inline AutoDiffVector& operator=(const AutoDiffVector& other) + { + m_values = other.values(); + m_jacobian = other.jacobian(); + return *this; + } + + inline const ValueType& values() const { return m_values; } + inline ValueType& values() { return m_values; } + + inline const JacobianType& jacobian() const { return m_jacobian; } + inline JacobianType& jacobian() { return m_jacobian; } + + template + inline const AutoDiffVector< + typename MakeCwiseBinaryOp,ValueType,OtherValueType>::Type, + typename MakeCwiseBinaryOp,JacobianType,OtherJacobianType>::Type > + operator+(const AutoDiffVector& other) const + { + return AutoDiffVector< + typename MakeCwiseBinaryOp,ValueType,OtherValueType>::Type, + typename MakeCwiseBinaryOp,JacobianType,OtherJacobianType>::Type >( + m_values + other.values(), + m_jacobian + other.jacobian()); + } + + template + inline AutoDiffVector& + operator+=(const AutoDiffVector& other) + { + m_values += other.values(); + m_jacobian += other.jacobian(); + return *this; + } + + template + inline const AutoDiffVector< + typename MakeCwiseBinaryOp,ValueType,OtherValueType>::Type, + typename MakeCwiseBinaryOp,JacobianType,OtherJacobianType>::Type > + operator-(const AutoDiffVector& other) const + { + return AutoDiffVector< + typename MakeCwiseBinaryOp,ValueType,OtherValueType>::Type, + typename MakeCwiseBinaryOp,JacobianType,OtherJacobianType>::Type >( + m_values - other.values(), + m_jacobian - other.jacobian()); + } + + template + inline AutoDiffVector& + operator-=(const AutoDiffVector& other) + { + m_values -= other.values(); + m_jacobian -= other.jacobian(); + return *this; + } + + inline const AutoDiffVector< + typename MakeCwiseUnaryOp, ValueType>::Type, + typename MakeCwiseUnaryOp, JacobianType>::Type > + operator-() const + { + return AutoDiffVector< + typename MakeCwiseUnaryOp, ValueType>::Type, + typename MakeCwiseUnaryOp, JacobianType>::Type >( + -m_values, + -m_jacobian); + } + + inline const AutoDiffVector< + typename MakeCwiseUnaryOp, ValueType>::Type, + typename MakeCwiseUnaryOp, JacobianType>::Type> + operator*(const BaseScalar& other) const + { + return AutoDiffVector< + typename MakeCwiseUnaryOp, ValueType>::Type, + typename MakeCwiseUnaryOp, JacobianType>::Type >( + m_values * other, + m_jacobian * other); + } + + friend inline const AutoDiffVector< + typename MakeCwiseUnaryOp, ValueType>::Type, + typename MakeCwiseUnaryOp, JacobianType>::Type > + operator*(const Scalar& other, const AutoDiffVector& v) + { + return AutoDiffVector< + typename MakeCwiseUnaryOp, ValueType>::Type, + typename MakeCwiseUnaryOp, JacobianType>::Type >( + v.values() * other, + v.jacobian() * other); + } + +// template +// inline const AutoDiffVector< +// CwiseBinaryOp, ValueType, OtherValueType> +// CwiseBinaryOp, +// CwiseUnaryOp, JacobianType>, +// CwiseUnaryOp, OtherJacobianType> > > +// operator*(const AutoDiffVector& other) const +// { +// return AutoDiffVector< +// CwiseBinaryOp, ValueType, OtherValueType> +// CwiseBinaryOp, +// CwiseUnaryOp, JacobianType>, +// CwiseUnaryOp, OtherJacobianType> > >( +// m_values.cwise() * other.values(), +// (m_jacobian * other.values()) + (m_values * other.jacobian())); +// } + + inline AutoDiffVector& operator*=(const Scalar& other) + { + m_values *= other; + m_jacobian *= other; + return *this; + } + + template + inline AutoDiffVector& operator*=(const AutoDiffVector& other) + { + *this = *this * other; + return *this; + } + + protected: + ValueType m_values; + JacobianType m_jacobian; + +}; + +} + +#endif // EIGEN_AUTODIFF_VECTOR_H diff --git a/external/unsupported/Eigen/src/BVH/BVAlgorithms.h b/external/unsupported/Eigen/src/BVH/BVAlgorithms.h new file mode 100644 index 0000000..994c8af --- /dev/null +++ b/external/unsupported/Eigen/src/BVH/BVAlgorithms.h @@ -0,0 +1,293 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Ilya Baran +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BVALGORITHMS_H +#define EIGEN_BVALGORITHMS_H + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +bool intersect_helper(const BVH &tree, Intersector &intersector, typename BVH::Index root) +{ + typedef typename BVH::Index Index; + typedef typename BVH::VolumeIterator VolIter; + typedef typename BVH::ObjectIterator ObjIter; + + VolIter vBegin = VolIter(), vEnd = VolIter(); + ObjIter oBegin = ObjIter(), oEnd = ObjIter(); + + std::vector todo(1, root); + + while(!todo.empty()) { + tree.getChildren(todo.back(), vBegin, vEnd, oBegin, oEnd); + todo.pop_back(); + + for(; vBegin != vEnd; ++vBegin) //go through child volumes + if(intersector.intersectVolume(tree.getVolume(*vBegin))) + todo.push_back(*vBegin); + + for(; oBegin != oEnd; ++oBegin) //go through child objects + if(intersector.intersectObject(*oBegin)) + return true; //intersector said to stop query + } + return false; +} +#endif //not EIGEN_PARSED_BY_DOXYGEN + +template +struct intersector_helper1 +{ + intersector_helper1(const Object2 &inStored, Intersector &in) : stored(inStored), intersector(in) {} + bool intersectVolume(const Volume1 &vol) { return intersector.intersectVolumeObject(vol, stored); } + bool intersectObject(const Object1 &obj) { return intersector.intersectObjectObject(obj, stored); } + Object2 stored; + Intersector &intersector; +private: + intersector_helper1& operator=(const intersector_helper1&); +}; + +template +struct intersector_helper2 +{ + intersector_helper2(const Object1 &inStored, Intersector &in) : stored(inStored), intersector(in) {} + bool intersectVolume(const Volume2 &vol) { return intersector.intersectObjectVolume(stored, vol); } + bool intersectObject(const Object2 &obj) { return intersector.intersectObjectObject(stored, obj); } + Object1 stored; + Intersector &intersector; +private: + intersector_helper2& operator=(const intersector_helper2&); +}; + +} // end namespace internal + +/** Given a BVH, runs the query encapsulated by \a intersector. + * The Intersector type must provide the following members: \code + bool intersectVolume(const BVH::Volume &volume) //returns true if volume intersects the query + bool intersectObject(const BVH::Object &object) //returns true if the search should terminate immediately + \endcode + */ +template +void BVIntersect(const BVH &tree, Intersector &intersector) +{ + internal::intersect_helper(tree, intersector, tree.getRootIndex()); +} + +/** Given two BVH's, runs the query on their Cartesian product encapsulated by \a intersector. + * The Intersector type must provide the following members: \code + bool intersectVolumeVolume(const BVH1::Volume &v1, const BVH2::Volume &v2) //returns true if product of volumes intersects the query + bool intersectVolumeObject(const BVH1::Volume &v1, const BVH2::Object &o2) //returns true if the volume-object product intersects the query + bool intersectObjectVolume(const BVH1::Object &o1, const BVH2::Volume &v2) //returns true if the volume-object product intersects the query + bool intersectObjectObject(const BVH1::Object &o1, const BVH2::Object &o2) //returns true if the search should terminate immediately + \endcode + */ +template +void BVIntersect(const BVH1 &tree1, const BVH2 &tree2, Intersector &intersector) //TODO: tandem descent when it makes sense +{ + typedef typename BVH1::Index Index1; + typedef typename BVH2::Index Index2; + typedef internal::intersector_helper1 Helper1; + typedef internal::intersector_helper2 Helper2; + typedef typename BVH1::VolumeIterator VolIter1; + typedef typename BVH1::ObjectIterator ObjIter1; + typedef typename BVH2::VolumeIterator VolIter2; + typedef typename BVH2::ObjectIterator ObjIter2; + + VolIter1 vBegin1 = VolIter1(), vEnd1 = VolIter1(); + ObjIter1 oBegin1 = ObjIter1(), oEnd1 = ObjIter1(); + VolIter2 vBegin2 = VolIter2(), vEnd2 = VolIter2(), vCur2 = VolIter2(); + ObjIter2 oBegin2 = ObjIter2(), oEnd2 = ObjIter2(), oCur2 = ObjIter2(); + + std::vector > todo(1, std::make_pair(tree1.getRootIndex(), tree2.getRootIndex())); + + while(!todo.empty()) { + tree1.getChildren(todo.back().first, vBegin1, vEnd1, oBegin1, oEnd1); + tree2.getChildren(todo.back().second, vBegin2, vEnd2, oBegin2, oEnd2); + todo.pop_back(); + + for(; vBegin1 != vEnd1; ++vBegin1) { //go through child volumes of first tree + const typename BVH1::Volume &vol1 = tree1.getVolume(*vBegin1); + for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree + if(intersector.intersectVolumeVolume(vol1, tree2.getVolume(*vCur2))) + todo.push_back(std::make_pair(*vBegin1, *vCur2)); + } + + for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree + Helper1 helper(*oCur2, intersector); + if(internal::intersect_helper(tree1, helper, *vBegin1)) + return; //intersector said to stop query + } + } + + for(; oBegin1 != oEnd1; ++oBegin1) { //go through child objects of first tree + for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree + Helper2 helper(*oBegin1, intersector); + if(internal::intersect_helper(tree2, helper, *vCur2)) + return; //intersector said to stop query + } + + for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree + if(intersector.intersectObjectObject(*oBegin1, *oCur2)) + return; //intersector said to stop query + } + } + } +} + +namespace internal { + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +typename Minimizer::Scalar minimize_helper(const BVH &tree, Minimizer &minimizer, typename BVH::Index root, typename Minimizer::Scalar minimum) +{ + typedef typename Minimizer::Scalar Scalar; + typedef typename BVH::Index Index; + typedef std::pair QueueElement; //first element is priority + typedef typename BVH::VolumeIterator VolIter; + typedef typename BVH::ObjectIterator ObjIter; + + VolIter vBegin = VolIter(), vEnd = VolIter(); + ObjIter oBegin = ObjIter(), oEnd = ObjIter(); + std::priority_queue, std::greater > todo; //smallest is at the top + + todo.push(std::make_pair(Scalar(), root)); + + while(!todo.empty()) { + tree.getChildren(todo.top().second, vBegin, vEnd, oBegin, oEnd); + todo.pop(); + + for(; oBegin != oEnd; ++oBegin) //go through child objects + minimum = (std::min)(minimum, minimizer.minimumOnObject(*oBegin)); + + for(; vBegin != vEnd; ++vBegin) { //go through child volumes + Scalar val = minimizer.minimumOnVolume(tree.getVolume(*vBegin)); + if(val < minimum) + todo.push(std::make_pair(val, *vBegin)); + } + } + + return minimum; +} +#endif //not EIGEN_PARSED_BY_DOXYGEN + + +template +struct minimizer_helper1 +{ + typedef typename Minimizer::Scalar Scalar; + minimizer_helper1(const Object2 &inStored, Minimizer &m) : stored(inStored), minimizer(m) {} + Scalar minimumOnVolume(const Volume1 &vol) { return minimizer.minimumOnVolumeObject(vol, stored); } + Scalar minimumOnObject(const Object1 &obj) { return minimizer.minimumOnObjectObject(obj, stored); } + Object2 stored; + Minimizer &minimizer; +private: + minimizer_helper1& operator=(const minimizer_helper1&); +}; + +template +struct minimizer_helper2 +{ + typedef typename Minimizer::Scalar Scalar; + minimizer_helper2(const Object1 &inStored, Minimizer &m) : stored(inStored), minimizer(m) {} + Scalar minimumOnVolume(const Volume2 &vol) { return minimizer.minimumOnObjectVolume(stored, vol); } + Scalar minimumOnObject(const Object2 &obj) { return minimizer.minimumOnObjectObject(stored, obj); } + Object1 stored; + Minimizer &minimizer; +private: + minimizer_helper2& operator=(const minimizer_helper2&); +}; + +} // end namespace internal + +/** Given a BVH, runs the query encapsulated by \a minimizer. + * \returns the minimum value. + * The Minimizer type must provide the following members: \code + typedef Scalar //the numeric type of what is being minimized--not necessarily the Scalar type of the BVH (if it has one) + Scalar minimumOnVolume(const BVH::Volume &volume) + Scalar minimumOnObject(const BVH::Object &object) + \endcode + */ +template +typename Minimizer::Scalar BVMinimize(const BVH &tree, Minimizer &minimizer) +{ + return internal::minimize_helper(tree, minimizer, tree.getRootIndex(), (std::numeric_limits::max)()); +} + +/** Given two BVH's, runs the query on their cartesian product encapsulated by \a minimizer. + * \returns the minimum value. + * The Minimizer type must provide the following members: \code + typedef Scalar //the numeric type of what is being minimized--not necessarily the Scalar type of the BVH (if it has one) + Scalar minimumOnVolumeVolume(const BVH1::Volume &v1, const BVH2::Volume &v2) + Scalar minimumOnVolumeObject(const BVH1::Volume &v1, const BVH2::Object &o2) + Scalar minimumOnObjectVolume(const BVH1::Object &o1, const BVH2::Volume &v2) + Scalar minimumOnObjectObject(const BVH1::Object &o1, const BVH2::Object &o2) + \endcode + */ +template +typename Minimizer::Scalar BVMinimize(const BVH1 &tree1, const BVH2 &tree2, Minimizer &minimizer) +{ + typedef typename Minimizer::Scalar Scalar; + typedef typename BVH1::Index Index1; + typedef typename BVH2::Index Index2; + typedef internal::minimizer_helper1 Helper1; + typedef internal::minimizer_helper2 Helper2; + typedef std::pair > QueueElement; //first element is priority + typedef typename BVH1::VolumeIterator VolIter1; + typedef typename BVH1::ObjectIterator ObjIter1; + typedef typename BVH2::VolumeIterator VolIter2; + typedef typename BVH2::ObjectIterator ObjIter2; + + VolIter1 vBegin1 = VolIter1(), vEnd1 = VolIter1(); + ObjIter1 oBegin1 = ObjIter1(), oEnd1 = ObjIter1(); + VolIter2 vBegin2 = VolIter2(), vEnd2 = VolIter2(), vCur2 = VolIter2(); + ObjIter2 oBegin2 = ObjIter2(), oEnd2 = ObjIter2(), oCur2 = ObjIter2(); + std::priority_queue, std::greater > todo; //smallest is at the top + + Scalar minimum = (std::numeric_limits::max)(); + todo.push(std::make_pair(Scalar(), std::make_pair(tree1.getRootIndex(), tree2.getRootIndex()))); + + while(!todo.empty()) { + tree1.getChildren(todo.top().second.first, vBegin1, vEnd1, oBegin1, oEnd1); + tree2.getChildren(todo.top().second.second, vBegin2, vEnd2, oBegin2, oEnd2); + todo.pop(); + + for(; oBegin1 != oEnd1; ++oBegin1) { //go through child objects of first tree + for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree + minimum = (std::min)(minimum, minimizer.minimumOnObjectObject(*oBegin1, *oCur2)); + } + + for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree + Helper2 helper(*oBegin1, minimizer); + minimum = (std::min)(minimum, internal::minimize_helper(tree2, helper, *vCur2, minimum)); + } + } + + for(; vBegin1 != vEnd1; ++vBegin1) { //go through child volumes of first tree + const typename BVH1::Volume &vol1 = tree1.getVolume(*vBegin1); + + for(oCur2 = oBegin2; oCur2 != oEnd2; ++oCur2) {//go through child objects of second tree + Helper1 helper(*oCur2, minimizer); + minimum = (std::min)(minimum, internal::minimize_helper(tree1, helper, *vBegin1, minimum)); + } + + for(vCur2 = vBegin2; vCur2 != vEnd2; ++vCur2) { //go through child volumes of second tree + Scalar val = minimizer.minimumOnVolumeVolume(vol1, tree2.getVolume(*vCur2)); + if(val < minimum) + todo.push(std::make_pair(val, std::make_pair(*vBegin1, *vCur2))); + } + } + } + return minimum; +} + +} // end namespace Eigen + +#endif // EIGEN_BVALGORITHMS_H diff --git a/external/unsupported/Eigen/src/BVH/KdBVH.h b/external/unsupported/Eigen/src/BVH/KdBVH.h new file mode 100644 index 0000000..2d5b76a --- /dev/null +++ b/external/unsupported/Eigen/src/BVH/KdBVH.h @@ -0,0 +1,223 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Ilya Baran +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef KDBVH_H_INCLUDED +#define KDBVH_H_INCLUDED + +namespace Eigen { + +namespace internal { + +//internal pair class for the BVH--used instead of std::pair because of alignment +template +struct vector_int_pair +{ +EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar, Dim) + typedef Matrix VectorType; + + vector_int_pair(const VectorType &v, int i) : first(v), second(i) {} + + VectorType first; + int second; +}; + +//these templates help the tree initializer get the bounding boxes either from a provided +//iterator range or using bounding_box in a unified way +template +struct get_boxes_helper { + void operator()(const ObjectList &objects, BoxIter boxBegin, BoxIter boxEnd, VolumeList &outBoxes) + { + outBoxes.insert(outBoxes.end(), boxBegin, boxEnd); + eigen_assert(outBoxes.size() == objects.size()); + EIGEN_ONLY_USED_FOR_DEBUG(objects); + } +}; + +template +struct get_boxes_helper { + void operator()(const ObjectList &objects, int, int, VolumeList &outBoxes) + { + outBoxes.reserve(objects.size()); + for(int i = 0; i < (int)objects.size(); ++i) + outBoxes.push_back(bounding_box(objects[i])); + } +}; + +} // end namespace internal + + +/** \class KdBVH + * \brief A simple bounding volume hierarchy based on AlignedBox + * + * \param _Scalar The underlying scalar type of the bounding boxes + * \param _Dim The dimension of the space in which the hierarchy lives + * \param _Object The object type that lives in the hierarchy. It must have value semantics. Either bounding_box(_Object) must + * be defined and return an AlignedBox<_Scalar, _Dim> or bounding boxes must be provided to the tree initializer. + * + * This class provides a simple (as opposed to optimized) implementation of a bounding volume hierarchy analogous to a Kd-tree. + * Given a sequence of objects, it computes their bounding boxes, constructs a Kd-tree of their centers + * and builds a BVH with the structure of that Kd-tree. When the elements of the tree are too expensive to be copied around, + * it is useful for _Object to be a pointer. + */ +template class KdBVH +{ +public: + enum { Dim = _Dim }; + typedef _Object Object; + typedef std::vector > ObjectList; + typedef _Scalar Scalar; + typedef AlignedBox Volume; + typedef std::vector > VolumeList; + typedef int Index; + typedef const int *VolumeIterator; //the iterators are just pointers into the tree's vectors + typedef const Object *ObjectIterator; + + KdBVH() {} + + /** Given an iterator range over \a Object references, constructs the BVH. Requires that bounding_box(Object) return a Volume. */ + template KdBVH(Iter begin, Iter end) { init(begin, end, 0, 0); } //int is recognized by init as not being an iterator type + + /** Given an iterator range over \a Object references and an iterator range over their bounding boxes, constructs the BVH */ + template KdBVH(OIter begin, OIter end, BIter boxBegin, BIter boxEnd) { init(begin, end, boxBegin, boxEnd); } + + /** Given an iterator range over \a Object references, constructs the BVH, overwriting whatever is in there currently. + * Requires that bounding_box(Object) return a Volume. */ + template void init(Iter begin, Iter end) { init(begin, end, 0, 0); } + + /** Given an iterator range over \a Object references and an iterator range over their bounding boxes, + * constructs the BVH, overwriting whatever is in there currently. */ + template void init(OIter begin, OIter end, BIter boxBegin, BIter boxEnd) + { + objects.clear(); + boxes.clear(); + children.clear(); + + objects.insert(objects.end(), begin, end); + int n = static_cast(objects.size()); + + if(n < 2) + return; //if we have at most one object, we don't need any internal nodes + + VolumeList objBoxes; + VIPairList objCenters; + + //compute the bounding boxes depending on BIter type + internal::get_boxes_helper()(objects, boxBegin, boxEnd, objBoxes); + + objCenters.reserve(n); + boxes.reserve(n - 1); + children.reserve(2 * n - 2); + + for(int i = 0; i < n; ++i) + objCenters.push_back(VIPair(objBoxes[i].center(), i)); + + build(objCenters, 0, n, objBoxes, 0); //the recursive part of the algorithm + + ObjectList tmp(n); + tmp.swap(objects); + for(int i = 0; i < n; ++i) + objects[i] = tmp[objCenters[i].second]; + } + + /** \returns the index of the root of the hierarchy */ + inline Index getRootIndex() const { return (int)boxes.size() - 1; } + + /** Given an \a index of a node, on exit, \a outVBegin and \a outVEnd range over the indices of the volume children of the node + * and \a outOBegin and \a outOEnd range over the object children of the node */ + EIGEN_STRONG_INLINE void getChildren(Index index, VolumeIterator &outVBegin, VolumeIterator &outVEnd, + ObjectIterator &outOBegin, ObjectIterator &outOEnd) const + { //inlining this function should open lots of optimization opportunities to the compiler + if(index < 0) { + outVBegin = outVEnd; + if(!objects.empty()) + outOBegin = &(objects[0]); + outOEnd = outOBegin + objects.size(); //output all objects--necessary when the tree has only one object + return; + } + + int numBoxes = static_cast(boxes.size()); + + int idx = index * 2; + if(children[idx + 1] < numBoxes) { //second index is always bigger + outVBegin = &(children[idx]); + outVEnd = outVBegin + 2; + outOBegin = outOEnd; + } + else if(children[idx] >= numBoxes) { //if both children are objects + outVBegin = outVEnd; + outOBegin = &(objects[children[idx] - numBoxes]); + outOEnd = outOBegin + 2; + } else { //if the first child is a volume and the second is an object + outVBegin = &(children[idx]); + outVEnd = outVBegin + 1; + outOBegin = &(objects[children[idx + 1] - numBoxes]); + outOEnd = outOBegin + 1; + } + } + + /** \returns the bounding box of the node at \a index */ + inline const Volume &getVolume(Index index) const + { + return boxes[index]; + } + +private: + typedef internal::vector_int_pair VIPair; + typedef std::vector > VIPairList; + typedef Matrix VectorType; + struct VectorComparator //compares vectors, or more specifically, VIPairs along a particular dimension + { + VectorComparator(int inDim) : dim(inDim) {} + inline bool operator()(const VIPair &v1, const VIPair &v2) const { return v1.first[dim] < v2.first[dim]; } + int dim; + }; + + //Build the part of the tree between objects[from] and objects[to] (not including objects[to]). + //This routine partitions the objCenters in [from, to) along the dimension dim, recursively constructs + //the two halves, and adds their parent node. TODO: a cache-friendlier layout + void build(VIPairList &objCenters, int from, int to, const VolumeList &objBoxes, int dim) + { + eigen_assert(to - from > 1); + if(to - from == 2) { + boxes.push_back(objBoxes[objCenters[from].second].merged(objBoxes[objCenters[from + 1].second])); + children.push_back(from + (int)objects.size() - 1); //there are objects.size() - 1 tree nodes + children.push_back(from + (int)objects.size()); + } + else if(to - from == 3) { + int mid = from + 2; + std::nth_element(objCenters.begin() + from, objCenters.begin() + mid, + objCenters.begin() + to, VectorComparator(dim)); //partition + build(objCenters, from, mid, objBoxes, (dim + 1) % Dim); + int idx1 = (int)boxes.size() - 1; + boxes.push_back(boxes[idx1].merged(objBoxes[objCenters[mid].second])); + children.push_back(idx1); + children.push_back(mid + (int)objects.size() - 1); + } + else { + int mid = from + (to - from) / 2; + nth_element(objCenters.begin() + from, objCenters.begin() + mid, + objCenters.begin() + to, VectorComparator(dim)); //partition + build(objCenters, from, mid, objBoxes, (dim + 1) % Dim); + int idx1 = (int)boxes.size() - 1; + build(objCenters, mid, to, objBoxes, (dim + 1) % Dim); + int idx2 = (int)boxes.size() - 1; + boxes.push_back(boxes[idx1].merged(boxes[idx2])); + children.push_back(idx1); + children.push_back(idx2); + } + } + + std::vector children; //children of x are children[2x] and children[2x+1], indices bigger than boxes.size() index into objects. + VolumeList boxes; + ObjectList objects; +}; + +} // end namespace Eigen + +#endif //KDBVH_H_INCLUDED diff --git a/external/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h b/external/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h new file mode 100644 index 0000000..0fbd847 --- /dev/null +++ b/external/unsupported/Eigen/src/Eigenvalues/ArpackSelfAdjointEigenSolver.h @@ -0,0 +1,790 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 David Harmon +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H +#define EIGEN_ARPACKGENERALIZEDSELFADJOINTEIGENSOLVER_H + +#include "../../../../Eigen/Dense" + +namespace Eigen { + +namespace internal { + template struct arpack_wrapper; + template struct OP; +} + + + +template, bool BisSPD=false> +class ArpackGeneralizedSelfAdjointEigenSolver +{ +public: + //typedef typename MatrixSolver::MatrixType MatrixType; + + /** \brief Scalar type for matrices of type \p MatrixType. */ + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + + /** \brief Real scalar type for \p MatrixType. + * + * This is just \c Scalar if #Scalar is real (e.g., \c float or + * \c Scalar), and the type of the real part of \c Scalar if #Scalar is + * complex. + */ + typedef typename NumTraits::Real RealScalar; + + /** \brief Type for vector of eigenvalues as returned by eigenvalues(). + * + * This is a column vector with entries of type #RealScalar. + * The length of the vector is the size of \p nbrEigenvalues. + */ + typedef typename internal::plain_col_type::type RealVectorType; + + /** \brief Default constructor. + * + * The default constructor is for cases in which the user intends to + * perform decompositions via compute(). + * + */ + ArpackGeneralizedSelfAdjointEigenSolver() + : m_eivec(), + m_eivalues(), + m_isInitialized(false), + m_eigenvectorsOk(false), + m_nbrConverged(0), + m_nbrIterations(0) + { } + + /** \brief Constructor; computes generalized eigenvalues of given matrix with respect to another matrix. + * + * \param[in] A Self-adjoint matrix whose eigenvalues / eigenvectors will + * computed. By default, the upper triangular part is used, but can be changed + * through the template parameter. + * \param[in] B Self-adjoint matrix for the generalized eigenvalue problem. + * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute. + * Must be less than the size of the input matrix, or an error is returned. + * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with + * respective meanings to find the largest magnitude , smallest magnitude, + * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this + * value can contain floating point value in string form, in which case the + * eigenvalues closest to this value will be found. + * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. + * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which + * means machine precision. + * + * This constructor calls compute(const MatrixType&, const MatrixType&, Index, string, int, RealScalar) + * to compute the eigenvalues of the matrix \p A with respect to \p B. The eigenvectors are computed if + * \p options equals #ComputeEigenvectors. + * + */ + ArpackGeneralizedSelfAdjointEigenSolver(const MatrixType& A, const MatrixType& B, + Index nbrEigenvalues, std::string eigs_sigma="LM", + int options=ComputeEigenvectors, RealScalar tol=0.0) + : m_eivec(), + m_eivalues(), + m_isInitialized(false), + m_eigenvectorsOk(false), + m_nbrConverged(0), + m_nbrIterations(0) + { + compute(A, B, nbrEigenvalues, eigs_sigma, options, tol); + } + + /** \brief Constructor; computes eigenvalues of given matrix. + * + * \param[in] A Self-adjoint matrix whose eigenvalues / eigenvectors will + * computed. By default, the upper triangular part is used, but can be changed + * through the template parameter. + * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute. + * Must be less than the size of the input matrix, or an error is returned. + * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with + * respective meanings to find the largest magnitude , smallest magnitude, + * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this + * value can contain floating point value in string form, in which case the + * eigenvalues closest to this value will be found. + * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. + * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which + * means machine precision. + * + * This constructor calls compute(const MatrixType&, Index, string, int, RealScalar) + * to compute the eigenvalues of the matrix \p A. The eigenvectors are computed if + * \p options equals #ComputeEigenvectors. + * + */ + + ArpackGeneralizedSelfAdjointEigenSolver(const MatrixType& A, + Index nbrEigenvalues, std::string eigs_sigma="LM", + int options=ComputeEigenvectors, RealScalar tol=0.0) + : m_eivec(), + m_eivalues(), + m_isInitialized(false), + m_eigenvectorsOk(false), + m_nbrConverged(0), + m_nbrIterations(0) + { + compute(A, nbrEigenvalues, eigs_sigma, options, tol); + } + + + /** \brief Computes generalized eigenvalues / eigenvectors of given matrix using the external ARPACK library. + * + * \param[in] A Selfadjoint matrix whose eigendecomposition is to be computed. + * \param[in] B Selfadjoint matrix for generalized eigenvalues. + * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute. + * Must be less than the size of the input matrix, or an error is returned. + * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with + * respective meanings to find the largest magnitude , smallest magnitude, + * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this + * value can contain floating point value in string form, in which case the + * eigenvalues closest to this value will be found. + * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. + * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which + * means machine precision. + * + * \returns Reference to \c *this + * + * This function computes the generalized eigenvalues of \p A with respect to \p B using ARPACK. The eigenvalues() + * function can be used to retrieve them. If \p options equals #ComputeEigenvectors, + * then the eigenvectors are also computed and can be retrieved by + * calling eigenvectors(). + * + */ + ArpackGeneralizedSelfAdjointEigenSolver& compute(const MatrixType& A, const MatrixType& B, + Index nbrEigenvalues, std::string eigs_sigma="LM", + int options=ComputeEigenvectors, RealScalar tol=0.0); + + /** \brief Computes eigenvalues / eigenvectors of given matrix using the external ARPACK library. + * + * \param[in] A Selfadjoint matrix whose eigendecomposition is to be computed. + * \param[in] nbrEigenvalues The number of eigenvalues / eigenvectors to compute. + * Must be less than the size of the input matrix, or an error is returned. + * \param[in] eigs_sigma String containing either "LM", "SM", "LA", or "SA", with + * respective meanings to find the largest magnitude , smallest magnitude, + * largest algebraic, or smallest algebraic eigenvalues. Alternatively, this + * value can contain floating point value in string form, in which case the + * eigenvalues closest to this value will be found. + * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly. + * \param[in] tol What tolerance to find the eigenvalues to. Default is 0, which + * means machine precision. + * + * \returns Reference to \c *this + * + * This function computes the eigenvalues of \p A using ARPACK. The eigenvalues() + * function can be used to retrieve them. If \p options equals #ComputeEigenvectors, + * then the eigenvectors are also computed and can be retrieved by + * calling eigenvectors(). + * + */ + ArpackGeneralizedSelfAdjointEigenSolver& compute(const MatrixType& A, + Index nbrEigenvalues, std::string eigs_sigma="LM", + int options=ComputeEigenvectors, RealScalar tol=0.0); + + + /** \brief Returns the eigenvectors of given matrix. + * + * \returns A const reference to the matrix whose columns are the eigenvectors. + * + * \pre The eigenvectors have been computed before. + * + * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding + * to eigenvalue number \f$ k \f$ as returned by eigenvalues(). The + * eigenvectors are normalized to have (Euclidean) norm equal to one. If + * this object was used to solve the eigenproblem for the selfadjoint + * matrix \f$ A \f$, then the matrix returned by this function is the + * matrix \f$ V \f$ in the eigendecomposition \f$ A V = D V \f$. + * For the generalized eigenproblem, the matrix returned is the solution \f$ A V = D B V \f$ + * + * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp + * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out + * + * \sa eigenvalues() + */ + const Matrix& eigenvectors() const + { + eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + return m_eivec; + } + + /** \brief Returns the eigenvalues of given matrix. + * + * \returns A const reference to the column vector containing the eigenvalues. + * + * \pre The eigenvalues have been computed before. + * + * The eigenvalues are repeated according to their algebraic multiplicity, + * so there are as many eigenvalues as rows in the matrix. The eigenvalues + * are sorted in increasing order. + * + * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp + * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out + * + * \sa eigenvectors(), MatrixBase::eigenvalues() + */ + const Matrix& eigenvalues() const + { + eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized."); + return m_eivalues; + } + + /** \brief Computes the positive-definite square root of the matrix. + * + * \returns the positive-definite square root of the matrix + * + * \pre The eigenvalues and eigenvectors of a positive-definite matrix + * have been computed before. + * + * The square root of a positive-definite matrix \f$ A \f$ is the + * positive-definite matrix whose square equals \f$ A \f$. This function + * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the + * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$. + * + * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp + * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out + * + * \sa operatorInverseSqrt(), + * \ref MatrixFunctions_Module "MatrixFunctions Module" + */ + Matrix operatorSqrt() const + { + eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint(); + } + + /** \brief Computes the inverse square root of the matrix. + * + * \returns the inverse positive-definite square root of the matrix + * + * \pre The eigenvalues and eigenvectors of a positive-definite matrix + * have been computed before. + * + * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to + * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is + * cheaper than first computing the square root with operatorSqrt() and + * then its inverse with MatrixBase::inverse(). + * + * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp + * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out + * + * \sa operatorSqrt(), MatrixBase::inverse(), + * \ref MatrixFunctions_Module "MatrixFunctions Module" + */ + Matrix operatorInverseSqrt() const + { + eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized."); + eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues."); + return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint(); + } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was successful, \c NoConvergence otherwise. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "ArpackGeneralizedSelfAdjointEigenSolver is not initialized."); + return m_info; + } + + size_t getNbrConvergedEigenValues() const + { return m_nbrConverged; } + + size_t getNbrIterations() const + { return m_nbrIterations; } + +protected: + Matrix m_eivec; + Matrix m_eivalues; + ComputationInfo m_info; + bool m_isInitialized; + bool m_eigenvectorsOk; + + size_t m_nbrConverged; + size_t m_nbrIterations; +}; + + + + + +template +ArpackGeneralizedSelfAdjointEigenSolver& + ArpackGeneralizedSelfAdjointEigenSolver +::compute(const MatrixType& A, Index nbrEigenvalues, + std::string eigs_sigma, int options, RealScalar tol) +{ + MatrixType B(0,0); + compute(A, B, nbrEigenvalues, eigs_sigma, options, tol); + + return *this; +} + + +template +ArpackGeneralizedSelfAdjointEigenSolver& + ArpackGeneralizedSelfAdjointEigenSolver +::compute(const MatrixType& A, const MatrixType& B, Index nbrEigenvalues, + std::string eigs_sigma, int options, RealScalar tol) +{ + eigen_assert(A.cols() == A.rows()); + eigen_assert(B.cols() == B.rows()); + eigen_assert(B.rows() == 0 || A.cols() == B.rows()); + eigen_assert((options &~ (EigVecMask | GenEigMask)) == 0 + && (options & EigVecMask) != EigVecMask + && "invalid option parameter"); + + bool isBempty = (B.rows() == 0) || (B.cols() == 0); + + // For clarity, all parameters match their ARPACK name + // + // Always 0 on the first call + // + int ido = 0; + + int n = (int)A.cols(); + + // User options: "LA", "SA", "SM", "LM", "BE" + // + char whch[3] = "LM"; + + // Specifies the shift if iparam[6] = { 3, 4, 5 }, not used if iparam[6] = { 1, 2 } + // + RealScalar sigma = 0.0; + + if (eigs_sigma.length() >= 2 && isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1])) + { + eigs_sigma[0] = toupper(eigs_sigma[0]); + eigs_sigma[1] = toupper(eigs_sigma[1]); + + // In the following special case we're going to invert the problem, since solving + // for larger magnitude is much much faster + // i.e., if 'SM' is specified, we're going to really use 'LM', the default + // + if (eigs_sigma.substr(0,2) != "SM") + { + whch[0] = eigs_sigma[0]; + whch[1] = eigs_sigma[1]; + } + } + else + { + eigen_assert(false && "Specifying clustered eigenvalues is not yet supported!"); + + // If it's not scalar values, then the user may be explicitly + // specifying the sigma value to cluster the evs around + // + sigma = atof(eigs_sigma.c_str()); + + // If atof fails, it returns 0.0, which is a fine default + // + } + + // "I" means normal eigenvalue problem, "G" means generalized + // + char bmat[2] = "I"; + if (eigs_sigma.substr(0,2) == "SM" || !(isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1])) || (!isBempty && !BisSPD)) + bmat[0] = 'G'; + + // Now we determine the mode to use + // + int mode = (bmat[0] == 'G') + 1; + if (eigs_sigma.substr(0,2) == "SM" || !(isalpha(eigs_sigma[0]) && isalpha(eigs_sigma[1]))) + { + // We're going to use shift-and-invert mode, and basically find + // the largest eigenvalues of the inverse operator + // + mode = 3; + } + + // The user-specified number of eigenvalues/vectors to compute + // + int nev = (int)nbrEigenvalues; + + // Allocate space for ARPACK to store the residual + // + Scalar *resid = new Scalar[n]; + + // Number of Lanczos vectors, must satisfy nev < ncv <= n + // Note that this indicates that nev != n, and we cannot compute + // all eigenvalues of a mtrix + // + int ncv = std::min(std::max(2*nev, 20), n); + + // The working n x ncv matrix, also store the final eigenvectors (if computed) + // + Scalar *v = new Scalar[n*ncv]; + int ldv = n; + + // Working space + // + Scalar *workd = new Scalar[3*n]; + int lworkl = ncv*ncv+8*ncv; // Must be at least this length + Scalar *workl = new Scalar[lworkl]; + + int *iparam= new int[11]; + iparam[0] = 1; // 1 means we let ARPACK perform the shifts, 0 means we'd have to do it + iparam[2] = std::max(300, (int)std::ceil(2*n/std::max(ncv,1))); + iparam[6] = mode; // The mode, 1 is standard ev problem, 2 for generalized ev, 3 for shift-and-invert + + // Used during reverse communicate to notify where arrays start + // + int *ipntr = new int[11]; + + // Error codes are returned in here, initial value of 0 indicates a random initial + // residual vector is used, any other values means resid contains the initial residual + // vector, possibly from a previous run + // + int info = 0; + + Scalar scale = 1.0; + //if (!isBempty) + //{ + //Scalar scale = B.norm() / std::sqrt(n); + //scale = std::pow(2, std::floor(std::log(scale+1))); + ////M /= scale; + //for (size_t i=0; i<(size_t)B.outerSize(); i++) + // for (typename MatrixType::InnerIterator it(B, i); it; ++it) + // it.valueRef() /= scale; + //} + + MatrixSolver OP; + if (mode == 1 || mode == 2) + { + if (!isBempty) + OP.compute(B); + } + else if (mode == 3) + { + if (sigma == 0.0) + { + OP.compute(A); + } + else + { + // Note: We will never enter here because sigma must be 0.0 + // + if (isBempty) + { + MatrixType AminusSigmaB(A); + for (Index i=0; i::saupd(&ido, bmat, &n, whch, &nev, &tol, resid, + &ncv, v, &ldv, iparam, ipntr, workd, workl, + &lworkl, &info); + + if (ido == -1 || ido == 1) + { + Scalar *in = workd + ipntr[0] - 1; + Scalar *out = workd + ipntr[1] - 1; + + if (ido == 1 && mode != 2) + { + Scalar *out2 = workd + ipntr[2] - 1; + if (isBempty || mode == 1) + Matrix::Map(out2, n) = Matrix::Map(in, n); + else + Matrix::Map(out2, n) = B * Matrix::Map(in, n); + + in = workd + ipntr[2] - 1; + } + + if (mode == 1) + { + if (isBempty) + { + // OP = A + // + Matrix::Map(out, n) = A * Matrix::Map(in, n); + } + else + { + // OP = L^{-1}AL^{-T} + // + internal::OP::applyOP(OP, A, n, in, out); + } + } + else if (mode == 2) + { + if (ido == 1) + Matrix::Map(in, n) = A * Matrix::Map(in, n); + + // OP = B^{-1} A + // + Matrix::Map(out, n) = OP.solve(Matrix::Map(in, n)); + } + else if (mode == 3) + { + // OP = (A-\sigmaB)B (\sigma could be 0, and B could be I) + // The B * in is already computed and stored at in if ido == 1 + // + if (ido == 1 || isBempty) + Matrix::Map(out, n) = OP.solve(Matrix::Map(in, n)); + else + Matrix::Map(out, n) = OP.solve(B * Matrix::Map(in, n)); + } + } + else if (ido == 2) + { + Scalar *in = workd + ipntr[0] - 1; + Scalar *out = workd + ipntr[1] - 1; + + if (isBempty || mode == 1) + Matrix::Map(out, n) = Matrix::Map(in, n); + else + Matrix::Map(out, n) = B * Matrix::Map(in, n); + } + } while (ido != 99); + + if (info == 1) + m_info = NoConvergence; + else if (info == 3) + m_info = NumericalIssue; + else if (info < 0) + m_info = InvalidInput; + else if (info != 0) + eigen_assert(false && "Unknown ARPACK return value!"); + else + { + // Do we compute eigenvectors or not? + // + int rvec = (options & ComputeEigenvectors) == ComputeEigenvectors; + + // "A" means "All", use "S" to choose specific eigenvalues (not yet supported in ARPACK)) + // + char howmny[2] = "A"; + + // if howmny == "S", specifies the eigenvalues to compute (not implemented in ARPACK) + // + int *select = new int[ncv]; + + // Final eigenvalues + // + m_eivalues.resize(nev, 1); + + internal::arpack_wrapper::seupd(&rvec, howmny, select, m_eivalues.data(), v, &ldv, + &sigma, bmat, &n, whch, &nev, &tol, resid, &ncv, + v, &ldv, iparam, ipntr, workd, workl, &lworkl, &info); + + if (info == -14) + m_info = NoConvergence; + else if (info != 0) + m_info = InvalidInput; + else + { + if (rvec) + { + m_eivec.resize(A.rows(), nev); + for (int i=0; i::project(OP, n, nev, m_eivec.data()); + + m_eigenvectorsOk = true; + } + + m_nbrIterations = iparam[2]; + m_nbrConverged = iparam[4]; + + m_info = Success; + } + + delete[] select; + } + + delete[] v; + delete[] iparam; + delete[] ipntr; + delete[] workd; + delete[] workl; + delete[] resid; + + m_isInitialized = true; + + return *this; +} + + +// Single precision +// +extern "C" void ssaupd_(int *ido, char *bmat, int *n, char *which, + int *nev, float *tol, float *resid, int *ncv, + float *v, int *ldv, int *iparam, int *ipntr, + float *workd, float *workl, int *lworkl, + int *info); + +extern "C" void sseupd_(int *rvec, char *All, int *select, float *d, + float *z, int *ldz, float *sigma, + char *bmat, int *n, char *which, int *nev, + float *tol, float *resid, int *ncv, float *v, + int *ldv, int *iparam, int *ipntr, float *workd, + float *workl, int *lworkl, int *ierr); + +// Double precision +// +extern "C" void dsaupd_(int *ido, char *bmat, int *n, char *which, + int *nev, double *tol, double *resid, int *ncv, + double *v, int *ldv, int *iparam, int *ipntr, + double *workd, double *workl, int *lworkl, + int *info); + +extern "C" void dseupd_(int *rvec, char *All, int *select, double *d, + double *z, int *ldz, double *sigma, + char *bmat, int *n, char *which, int *nev, + double *tol, double *resid, int *ncv, double *v, + int *ldv, int *iparam, int *ipntr, double *workd, + double *workl, int *lworkl, int *ierr); + + +namespace internal { + +template struct arpack_wrapper +{ + static inline void saupd(int *ido, char *bmat, int *n, char *which, + int *nev, RealScalar *tol, Scalar *resid, int *ncv, + Scalar *v, int *ldv, int *iparam, int *ipntr, + Scalar *workd, Scalar *workl, int *lworkl, int *info) + { + EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) + } + + static inline void seupd(int *rvec, char *All, int *select, Scalar *d, + Scalar *z, int *ldz, RealScalar *sigma, + char *bmat, int *n, char *which, int *nev, + RealScalar *tol, Scalar *resid, int *ncv, Scalar *v, + int *ldv, int *iparam, int *ipntr, Scalar *workd, + Scalar *workl, int *lworkl, int *ierr) + { + EIGEN_STATIC_ASSERT(!NumTraits::IsComplex, NUMERIC_TYPE_MUST_BE_REAL) + } +}; + +template <> struct arpack_wrapper +{ + static inline void saupd(int *ido, char *bmat, int *n, char *which, + int *nev, float *tol, float *resid, int *ncv, + float *v, int *ldv, int *iparam, int *ipntr, + float *workd, float *workl, int *lworkl, int *info) + { + ssaupd_(ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, info); + } + + static inline void seupd(int *rvec, char *All, int *select, float *d, + float *z, int *ldz, float *sigma, + char *bmat, int *n, char *which, int *nev, + float *tol, float *resid, int *ncv, float *v, + int *ldv, int *iparam, int *ipntr, float *workd, + float *workl, int *lworkl, int *ierr) + { + sseupd_(rvec, All, select, d, z, ldz, sigma, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, + workd, workl, lworkl, ierr); + } +}; + +template <> struct arpack_wrapper +{ + static inline void saupd(int *ido, char *bmat, int *n, char *which, + int *nev, double *tol, double *resid, int *ncv, + double *v, int *ldv, int *iparam, int *ipntr, + double *workd, double *workl, int *lworkl, int *info) + { + dsaupd_(ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, workd, workl, lworkl, info); + } + + static inline void seupd(int *rvec, char *All, int *select, double *d, + double *z, int *ldz, double *sigma, + char *bmat, int *n, char *which, int *nev, + double *tol, double *resid, int *ncv, double *v, + int *ldv, int *iparam, int *ipntr, double *workd, + double *workl, int *lworkl, int *ierr) + { + dseupd_(rvec, All, select, d, v, ldv, sigma, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam, ipntr, + workd, workl, lworkl, ierr); + } +}; + + +template +struct OP +{ + static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out); + static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs); +}; + +template +struct OP +{ + static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out) +{ + // OP = L^{-1} A L^{-T} (B = LL^T) + // + // First solve L^T out = in + // + Matrix::Map(out, n) = OP.matrixU().solve(Matrix::Map(in, n)); + Matrix::Map(out, n) = OP.permutationPinv() * Matrix::Map(out, n); + + // Then compute out = A out + // + Matrix::Map(out, n) = A * Matrix::Map(out, n); + + // Then solve L out = out + // + Matrix::Map(out, n) = OP.permutationP() * Matrix::Map(out, n); + Matrix::Map(out, n) = OP.matrixL().solve(Matrix::Map(out, n)); +} + + static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs) +{ + // Solve L^T out = in + // + Matrix::Map(vecs, n, k) = OP.matrixU().solve(Matrix::Map(vecs, n, k)); + Matrix::Map(vecs, n, k) = OP.permutationPinv() * Matrix::Map(vecs, n, k); +} + +}; + +template +struct OP +{ + static inline void applyOP(MatrixSolver &OP, const MatrixType &A, int n, Scalar *in, Scalar *out) +{ + eigen_assert(false && "Should never be in here..."); +} + + static inline void project(MatrixSolver &OP, int n, int k, Scalar *vecs) +{ + eigen_assert(false && "Should never be in here..."); +} + +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_ARPACKSELFADJOINTEIGENSOLVER_H + diff --git a/external/unsupported/Eigen/src/EulerAngles/CMakeLists.txt b/external/unsupported/Eigen/src/EulerAngles/CMakeLists.txt new file mode 100644 index 0000000..22088eb --- /dev/null +++ b/external/unsupported/Eigen/src/EulerAngles/CMakeLists.txt @@ -0,0 +1,6 @@ +file(GLOB Eigen_EulerAngles_SRCS "*.h") + +install(FILES + ${Eigen_EulerAngles_SRCS} + DESTINATION ${INCLUDE_INSTALL_DIR}/unsupported/Eigen/src/EulerAngles COMPONENT Devel + ) diff --git a/external/unsupported/Eigen/src/EulerAngles/EulerAngles.h b/external/unsupported/Eigen/src/EulerAngles/EulerAngles.h new file mode 100644 index 0000000..e43cdb7 --- /dev/null +++ b/external/unsupported/Eigen/src/EulerAngles/EulerAngles.h @@ -0,0 +1,355 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Tal Hadad +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_EULERANGLESCLASS_H// TODO: Fix previous "EIGEN_EULERANGLES_H" definition? +#define EIGEN_EULERANGLESCLASS_H + +namespace Eigen +{ + /** \class EulerAngles + * + * \ingroup EulerAngles_Module + * + * \brief Represents a rotation in a 3 dimensional space as three Euler angles. + * + * Euler rotation is a set of three rotation of three angles over three fixed axes, defined by the EulerSystem given as a template parameter. + * + * Here is how intrinsic Euler angles works: + * - first, rotate the axes system over the alpha axis in angle alpha + * - then, rotate the axes system over the beta axis(which was rotated in the first stage) in angle beta + * - then, rotate the axes system over the gamma axis(which was rotated in the two stages above) in angle gamma + * + * \note This class support only intrinsic Euler angles for simplicity, + * see EulerSystem how to easily overcome this for extrinsic systems. + * + * ### Rotation representation and conversions ### + * + * It has been proved(see Wikipedia link below) that every rotation can be represented + * by Euler angles, but there is no single representation (e.g. unlike rotation matrices). + * Therefore, you can convert from Eigen rotation and to them + * (including rotation matrices, which is not called "rotations" by Eigen design). + * + * Euler angles usually used for: + * - convenient human representation of rotation, especially in interactive GUI. + * - gimbal systems and robotics + * - efficient encoding(i.e. 3 floats only) of rotation for network protocols. + * + * However, Euler angles are slow comparing to quaternion or matrices, + * because their unnatural math definition, although it's simple for human. + * To overcome this, this class provide easy movement from the math friendly representation + * to the human friendly representation, and vise-versa. + * + * All the user need to do is a safe simple C++ type conversion, + * and this class take care for the math. + * Additionally, some axes related computation is done in compile time. + * + * #### Euler angles ranges in conversions #### + * Rotations representation as EulerAngles are not single (unlike matrices), + * and even have infinite EulerAngles representations.
+ * For example, add or subtract 2*PI from either angle of EulerAngles + * and you'll get the same rotation. + * This is the general reason for infinite representation, + * but it's not the only general reason for not having a single representation. + * + * When converting rotation to EulerAngles, this class convert it to specific ranges + * When converting some rotation to EulerAngles, the rules for ranges are as follow: + * - If the rotation we converting from is an EulerAngles + * (even when it represented as RotationBase explicitly), angles ranges are __undefined__. + * - otherwise, alpha and gamma angles will be in the range [-PI, PI].
+ * As for Beta angle: + * - If the system is Tait-Bryan, the beta angle will be in the range [-PI/2, PI/2]. + * - otherwise: + * - If the beta axis is positive, the beta angle will be in the range [0, PI] + * - If the beta axis is negative, the beta angle will be in the range [-PI, 0] + * + * \sa EulerAngles(const MatrixBase&) + * \sa EulerAngles(const RotationBase&) + * + * ### Convenient user typedefs ### + * + * Convenient typedefs for EulerAngles exist for float and double scalar, + * in a form of EulerAngles{A}{B}{C}{scalar}, + * e.g. \ref EulerAnglesXYZd, \ref EulerAnglesZYZf. + * + * Only for positive axes{+x,+y,+z} Euler systems are have convenient typedef. + * If you need negative axes{-x,-y,-z}, it is recommended to create you own typedef with + * a word that represent what you need. + * + * ### Example ### + * + * \include EulerAngles.cpp + * Output: \verbinclude EulerAngles.out + * + * ### Additional reading ### + * + * If you're want to get more idea about how Euler system work in Eigen see EulerSystem. + * + * More information about Euler angles: https://en.wikipedia.org/wiki/Euler_angles + * + * \tparam _Scalar the scalar type, i.e. the type of the angles. + * + * \tparam _System the EulerSystem to use, which represents the axes of rotation. + */ + template + class EulerAngles : public RotationBase, 3> + { + public: + typedef RotationBase, 3> Base; + + /** the scalar type of the angles */ + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + /** the EulerSystem to use, which represents the axes of rotation. */ + typedef _System System; + + typedef Matrix Matrix3; /*!< the equivalent rotation matrix type */ + typedef Matrix Vector3; /*!< the equivalent 3 dimension vector type */ + typedef Quaternion QuaternionType; /*!< the equivalent quaternion type */ + typedef AngleAxis AngleAxisType; /*!< the equivalent angle-axis type */ + + /** \returns the axis vector of the first (alpha) rotation */ + static Vector3 AlphaAxisVector() { + const Vector3& u = Vector3::Unit(System::AlphaAxisAbs - 1); + return System::IsAlphaOpposite ? -u : u; + } + + /** \returns the axis vector of the second (beta) rotation */ + static Vector3 BetaAxisVector() { + const Vector3& u = Vector3::Unit(System::BetaAxisAbs - 1); + return System::IsBetaOpposite ? -u : u; + } + + /** \returns the axis vector of the third (gamma) rotation */ + static Vector3 GammaAxisVector() { + const Vector3& u = Vector3::Unit(System::GammaAxisAbs - 1); + return System::IsGammaOpposite ? -u : u; + } + + private: + Vector3 m_angles; + + public: + /** Default constructor without initialization. */ + EulerAngles() {} + /** Constructs and initialize an EulerAngles (\p alpha, \p beta, \p gamma). */ + EulerAngles(const Scalar& alpha, const Scalar& beta, const Scalar& gamma) : + m_angles(alpha, beta, gamma) {} + + // TODO: Test this constructor + /** Constructs and initialize an EulerAngles from the array data {alpha, beta, gamma} */ + explicit EulerAngles(const Scalar* data) : m_angles(data) {} + + /** Constructs and initializes an EulerAngles from either: + * - a 3x3 rotation matrix expression(i.e. pure orthogonal matrix with determinant of +1), + * - a 3D vector expression representing Euler angles. + * + * \note If \p other is a 3x3 rotation matrix, the angles range rules will be as follow:
+ * Alpha and gamma angles will be in the range [-PI, PI].
+ * As for Beta angle: + * - If the system is Tait-Bryan, the beta angle will be in the range [-PI/2, PI/2]. + * - otherwise: + * - If the beta axis is positive, the beta angle will be in the range [0, PI] + * - If the beta axis is negative, the beta angle will be in the range [-PI, 0] + */ + template + explicit EulerAngles(const MatrixBase& other) { *this = other; } + + /** Constructs and initialize Euler angles from a rotation \p rot. + * + * \note If \p rot is an EulerAngles (even when it represented as RotationBase explicitly), + * angles ranges are __undefined__. + * Otherwise, alpha and gamma angles will be in the range [-PI, PI].
+ * As for Beta angle: + * - If the system is Tait-Bryan, the beta angle will be in the range [-PI/2, PI/2]. + * - otherwise: + * - If the beta axis is positive, the beta angle will be in the range [0, PI] + * - If the beta axis is negative, the beta angle will be in the range [-PI, 0] + */ + template + EulerAngles(const RotationBase& rot) { System::CalcEulerAngles(*this, rot.toRotationMatrix()); } + + /*EulerAngles(const QuaternionType& q) + { + // TODO: Implement it in a faster way for quaternions + // According to http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToEuler/ + // we can compute only the needed matrix cells and then convert to euler angles. (see ZYX example below) + // Currently we compute all matrix cells from quaternion. + + // Special case only for ZYX + //Scalar y2 = q.y() * q.y(); + //m_angles[0] = std::atan2(2*(q.w()*q.z() + q.x()*q.y()), (1 - 2*(y2 + q.z()*q.z()))); + //m_angles[1] = std::asin( 2*(q.w()*q.y() - q.z()*q.x())); + //m_angles[2] = std::atan2(2*(q.w()*q.x() + q.y()*q.z()), (1 - 2*(q.x()*q.x() + y2))); + }*/ + + /** \returns The angle values stored in a vector (alpha, beta, gamma). */ + const Vector3& angles() const { return m_angles; } + /** \returns A read-write reference to the angle values stored in a vector (alpha, beta, gamma). */ + Vector3& angles() { return m_angles; } + + /** \returns The value of the first angle. */ + Scalar alpha() const { return m_angles[0]; } + /** \returns A read-write reference to the angle of the first angle. */ + Scalar& alpha() { return m_angles[0]; } + + /** \returns The value of the second angle. */ + Scalar beta() const { return m_angles[1]; } + /** \returns A read-write reference to the angle of the second angle. */ + Scalar& beta() { return m_angles[1]; } + + /** \returns The value of the third angle. */ + Scalar gamma() const { return m_angles[2]; } + /** \returns A read-write reference to the angle of the third angle. */ + Scalar& gamma() { return m_angles[2]; } + + /** \returns The Euler angles rotation inverse (which is as same as the negative), + * (-alpha, -beta, -gamma). + */ + EulerAngles inverse() const + { + EulerAngles res; + res.m_angles = -m_angles; + return res; + } + + /** \returns The Euler angles rotation negative (which is as same as the inverse), + * (-alpha, -beta, -gamma). + */ + EulerAngles operator -() const + { + return inverse(); + } + + /** Set \c *this from either: + * - a 3x3 rotation matrix expression(i.e. pure orthogonal matrix with determinant of +1), + * - a 3D vector expression representing Euler angles. + * + * See EulerAngles(const MatrixBase&) for more information about + * angles ranges output. + */ + template + EulerAngles& operator=(const MatrixBase& other) + { + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + internal::eulerangles_assign_impl::run(*this, other.derived()); + return *this; + } + + // TODO: Assign and construct from another EulerAngles (with different system) + + /** Set \c *this from a rotation. + * + * See EulerAngles(const RotationBase&) for more information about + * angles ranges output. + */ + template + EulerAngles& operator=(const RotationBase& rot) { + System::CalcEulerAngles(*this, rot.toRotationMatrix()); + return *this; + } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const EulerAngles& other, + const RealScalar& prec = NumTraits::dummy_precision()) const + { return angles().isApprox(other.angles(), prec); } + + /** \returns an equivalent 3x3 rotation matrix. */ + Matrix3 toRotationMatrix() const + { + // TODO: Calc it faster + return static_cast(*this).toRotationMatrix(); + } + + /** Convert the Euler angles to quaternion. */ + operator QuaternionType() const + { + return + AngleAxisType(alpha(), AlphaAxisVector()) * + AngleAxisType(beta(), BetaAxisVector()) * + AngleAxisType(gamma(), GammaAxisVector()); + } + + friend std::ostream& operator<<(std::ostream& s, const EulerAngles& eulerAngles) + { + s << eulerAngles.angles().transpose(); + return s; + } + + /** \returns \c *this with scalar type casted to \a NewScalarType */ + template + EulerAngles cast() const + { + EulerAngles e; + e.angles() = angles().template cast(); + return e; + } + }; + +#define EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(AXES, SCALAR_TYPE, SCALAR_POSTFIX) \ + /** \ingroup EulerAngles_Module */ \ + typedef EulerAngles EulerAngles##AXES##SCALAR_POSTFIX; + +#define EIGEN_EULER_ANGLES_TYPEDEFS(SCALAR_TYPE, SCALAR_POSTFIX) \ + EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(XYZ, SCALAR_TYPE, SCALAR_POSTFIX) \ + EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(XYX, SCALAR_TYPE, SCALAR_POSTFIX) \ + EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(XZY, SCALAR_TYPE, SCALAR_POSTFIX) \ + EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(XZX, SCALAR_TYPE, SCALAR_POSTFIX) \ + \ + EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(YZX, SCALAR_TYPE, SCALAR_POSTFIX) \ + EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(YZY, SCALAR_TYPE, SCALAR_POSTFIX) \ + EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(YXZ, SCALAR_TYPE, SCALAR_POSTFIX) \ + EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(YXY, SCALAR_TYPE, SCALAR_POSTFIX) \ + \ + EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(ZXY, SCALAR_TYPE, SCALAR_POSTFIX) \ + EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(ZXZ, SCALAR_TYPE, SCALAR_POSTFIX) \ + EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(ZYX, SCALAR_TYPE, SCALAR_POSTFIX) \ + EIGEN_EULER_ANGLES_SINGLE_TYPEDEF(ZYZ, SCALAR_TYPE, SCALAR_POSTFIX) + +EIGEN_EULER_ANGLES_TYPEDEFS(float, f) +EIGEN_EULER_ANGLES_TYPEDEFS(double, d) + + namespace internal + { + template + struct traits > + { + typedef _Scalar Scalar; + }; + + // set from a rotation matrix + template + struct eulerangles_assign_impl + { + typedef typename Other::Scalar Scalar; + static void run(EulerAngles& e, const Other& m) + { + System::CalcEulerAngles(e, m); + } + }; + + // set from a vector of Euler angles + template + struct eulerangles_assign_impl + { + typedef typename Other::Scalar Scalar; + static void run(EulerAngles& e, const Other& vec) + { + e.angles() = vec; + } + }; + } +} + +#endif // EIGEN_EULERANGLESCLASS_H diff --git a/external/unsupported/Eigen/src/EulerAngles/EulerSystem.h b/external/unsupported/Eigen/src/EulerAngles/EulerSystem.h new file mode 100644 index 0000000..2a833b0 --- /dev/null +++ b/external/unsupported/Eigen/src/EulerAngles/EulerSystem.h @@ -0,0 +1,305 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Tal Hadad +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_EULERSYSTEM_H +#define EIGEN_EULERSYSTEM_H + +namespace Eigen +{ + // Forward declarations + template + class EulerAngles; + + namespace internal + { + // TODO: Add this trait to the Eigen internal API? + template 0)> + struct Abs + { + enum { value = Num }; + }; + + template + struct Abs + { + enum { value = -Num }; + }; + + template + struct IsValidAxis + { + enum { value = Axis != 0 && Abs::value <= 3 }; + }; + + template + struct eulerangles_assign_impl; + } + + #define EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT(COND,MSG) typedef char static_assertion_##MSG[(COND)?1:-1] + + /** \brief Representation of a fixed signed rotation axis for EulerSystem. + * + * \ingroup EulerAngles_Module + * + * Values here represent: + * - The axis of the rotation: X, Y or Z. + * - The sign (i.e. direction of the rotation along the axis): positive(+) or negative(-) + * + * Therefore, this could express all the axes {+X,+Y,+Z,-X,-Y,-Z} + * + * For positive axis, use +EULER_{axis}, and for negative axis use -EULER_{axis}. + */ + enum EulerAxis + { + EULER_X = 1, /*!< the X axis */ + EULER_Y = 2, /*!< the Y axis */ + EULER_Z = 3 /*!< the Z axis */ + }; + + /** \class EulerSystem + * + * \ingroup EulerAngles_Module + * + * \brief Represents a fixed Euler rotation system. + * + * This meta-class goal is to represent the Euler system in compilation time, for EulerAngles. + * + * You can use this class to get two things: + * - Build an Euler system, and then pass it as a template parameter to EulerAngles. + * - Query some compile time data about an Euler system. (e.g. Whether it's Tait-Bryan) + * + * Euler rotation is a set of three rotation on fixed axes. (see \ref EulerAngles) + * This meta-class store constantly those signed axes. (see \ref EulerAxis) + * + * ### Types of Euler systems ### + * + * All and only valid 3 dimension Euler rotation over standard + * signed axes{+X,+Y,+Z,-X,-Y,-Z} are supported: + * - all axes X, Y, Z in each valid order (see below what order is valid) + * - rotation over the axis is supported both over the positive and negative directions. + * - both Tait-Bryan and proper/classic Euler angles (i.e. the opposite). + * + * Since EulerSystem support both positive and negative directions, + * you may call this rotation distinction in other names: + * - _right handed_ or _left handed_ + * - _counterclockwise_ or _clockwise_ + * + * Notice all axed combination are valid, and would trigger a static assertion. + * Same unsigned axes can't be neighbors, e.g. {X,X,Y} is invalid. + * This yield two and only two classes: + * - _Tait-Bryan_ - all unsigned axes are distinct, e.g. {X,Y,Z} + * - _proper/classic Euler angles_ - The first and the third unsigned axes is equal, + * and the second is different, e.g. {X,Y,X} + * + * ### Intrinsic vs extrinsic Euler systems ### + * + * Only intrinsic Euler systems are supported for simplicity. + * If you want to use extrinsic Euler systems, + * just use the equal intrinsic opposite order for axes and angles. + * I.e axes (A,B,C) becomes (C,B,A), and angles (a,b,c) becomes (c,b,a). + * + * ### Convenient user typedefs ### + * + * Convenient typedefs for EulerSystem exist (only for positive axes Euler systems), + * in a form of EulerSystem{A}{B}{C}, e.g. \ref EulerSystemXYZ. + * + * ### Additional reading ### + * + * More information about Euler angles: https://en.wikipedia.org/wiki/Euler_angles + * + * \tparam _AlphaAxis the first fixed EulerAxis + * + * \tparam _BetaAxis the second fixed EulerAxis + * + * \tparam _GammaAxis the third fixed EulerAxis + */ + template + class EulerSystem + { + public: + // It's defined this way and not as enum, because I think + // that enum is not guerantee to support negative numbers + + /** The first rotation axis */ + static const int AlphaAxis = _AlphaAxis; + + /** The second rotation axis */ + static const int BetaAxis = _BetaAxis; + + /** The third rotation axis */ + static const int GammaAxis = _GammaAxis; + + enum + { + AlphaAxisAbs = internal::Abs::value, /*!< the first rotation axis unsigned */ + BetaAxisAbs = internal::Abs::value, /*!< the second rotation axis unsigned */ + GammaAxisAbs = internal::Abs::value, /*!< the third rotation axis unsigned */ + + IsAlphaOpposite = (AlphaAxis < 0) ? 1 : 0, /*!< whether alpha axis is negative */ + IsBetaOpposite = (BetaAxis < 0) ? 1 : 0, /*!< whether beta axis is negative */ + IsGammaOpposite = (GammaAxis < 0) ? 1 : 0, /*!< whether gamma axis is negative */ + + // Parity is even if alpha axis X is followed by beta axis Y, or Y is followed + // by Z, or Z is followed by X; otherwise it is odd. + IsOdd = ((AlphaAxisAbs)%3 == (BetaAxisAbs - 1)%3) ? 0 : 1, /*!< whether the Euler system is odd */ + IsEven = IsOdd ? 0 : 1, /*!< whether the Euler system is even */ + + IsTaitBryan = ((unsigned)AlphaAxisAbs != (unsigned)GammaAxisAbs) ? 1 : 0 /*!< whether the Euler system is Tait-Bryan */ + }; + + private: + + EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT(internal::IsValidAxis::value, + ALPHA_AXIS_IS_INVALID); + + EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT(internal::IsValidAxis::value, + BETA_AXIS_IS_INVALID); + + EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT(internal::IsValidAxis::value, + GAMMA_AXIS_IS_INVALID); + + EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT((unsigned)AlphaAxisAbs != (unsigned)BetaAxisAbs, + ALPHA_AXIS_CANT_BE_EQUAL_TO_BETA_AXIS); + + EIGEN_EULER_ANGLES_CLASS_STATIC_ASSERT((unsigned)BetaAxisAbs != (unsigned)GammaAxisAbs, + BETA_AXIS_CANT_BE_EQUAL_TO_GAMMA_AXIS); + + static const int + // I, J, K are the pivot indexes permutation for the rotation matrix, that match this Euler system. + // They are used in this class converters. + // They are always different from each other, and their possible values are: 0, 1, or 2. + I_ = AlphaAxisAbs - 1, + J_ = (AlphaAxisAbs - 1 + 1 + IsOdd)%3, + K_ = (AlphaAxisAbs - 1 + 2 - IsOdd)%3 + ; + + // TODO: Get @mat parameter in form that avoids double evaluation. + template + static void CalcEulerAngles_imp(Matrix::Scalar, 3, 1>& res, const MatrixBase& mat, internal::true_type /*isTaitBryan*/) + { + using std::atan2; + using std::sqrt; + + typedef typename Derived::Scalar Scalar; + + const Scalar plusMinus = IsEven? 1 : -1; + const Scalar minusPlus = IsOdd? 1 : -1; + + const Scalar Rsum = sqrt((mat(I_,I_) * mat(I_,I_) + mat(I_,J_) * mat(I_,J_) + mat(J_,K_) * mat(J_,K_) + mat(K_,K_) * mat(K_,K_))/2); + res[1] = atan2(plusMinus * mat(I_,K_), Rsum); + + // There is a singularity when cos(beta) == 0 + if(Rsum > 4 * NumTraits::epsilon()) {// cos(beta) != 0 + res[0] = atan2(minusPlus * mat(J_, K_), mat(K_, K_)); + res[2] = atan2(minusPlus * mat(I_, J_), mat(I_, I_)); + } + else if(plusMinus * mat(I_, K_) > 0) {// cos(beta) == 0 and sin(beta) == 1 + Scalar spos = mat(J_, I_) + plusMinus * mat(K_, J_); // 2*sin(alpha + plusMinus * gamma + Scalar cpos = mat(J_, J_) + minusPlus * mat(K_, I_); // 2*cos(alpha + plusMinus * gamma) + Scalar alphaPlusMinusGamma = atan2(spos, cpos); + res[0] = alphaPlusMinusGamma; + res[2] = 0; + } + else {// cos(beta) == 0 and sin(beta) == -1 + Scalar sneg = plusMinus * (mat(K_, J_) + minusPlus * mat(J_, I_)); // 2*sin(alpha + minusPlus*gamma) + Scalar cneg = mat(J_, J_) + plusMinus * mat(K_, I_); // 2*cos(alpha + minusPlus*gamma) + Scalar alphaMinusPlusBeta = atan2(sneg, cneg); + res[0] = alphaMinusPlusBeta; + res[2] = 0; + } + } + + template + static void CalcEulerAngles_imp(Matrix::Scalar,3,1>& res, + const MatrixBase& mat, internal::false_type /*isTaitBryan*/) + { + using std::atan2; + using std::sqrt; + + typedef typename Derived::Scalar Scalar; + + const Scalar plusMinus = IsEven? 1 : -1; + const Scalar minusPlus = IsOdd? 1 : -1; + + const Scalar Rsum = sqrt((mat(I_, J_) * mat(I_, J_) + mat(I_, K_) * mat(I_, K_) + mat(J_, I_) * mat(J_, I_) + mat(K_, I_) * mat(K_, I_)) / 2); + + res[1] = atan2(Rsum, mat(I_, I_)); + + // There is a singularity when sin(beta) == 0 + if(Rsum > 4 * NumTraits::epsilon()) {// sin(beta) != 0 + res[0] = atan2(mat(J_, I_), minusPlus * mat(K_, I_)); + res[2] = atan2(mat(I_, J_), plusMinus * mat(I_, K_)); + } + else if(mat(I_, I_) > 0) {// sin(beta) == 0 and cos(beta) == 1 + Scalar spos = plusMinus * mat(K_, J_) + minusPlus * mat(J_, K_); // 2*sin(alpha + gamma) + Scalar cpos = mat(J_, J_) + mat(K_, K_); // 2*cos(alpha + gamma) + res[0] = atan2(spos, cpos); + res[2] = 0; + } + else {// sin(beta) == 0 and cos(beta) == -1 + Scalar sneg = plusMinus * mat(K_, J_) + plusMinus * mat(J_, K_); // 2*sin(alpha - gamma) + Scalar cneg = mat(J_, J_) - mat(K_, K_); // 2*cos(alpha - gamma) + res[0] = atan2(sneg, cneg); + res[2] = 0; + } + } + + template + static void CalcEulerAngles( + EulerAngles& res, + const typename EulerAngles::Matrix3& mat) + { + CalcEulerAngles_imp( + res.angles(), mat, + typename internal::conditional::type()); + + if (IsAlphaOpposite) + res.alpha() = -res.alpha(); + + if (IsBetaOpposite) + res.beta() = -res.beta(); + + if (IsGammaOpposite) + res.gamma() = -res.gamma(); + } + + template + friend class Eigen::EulerAngles; + + template + friend struct internal::eulerangles_assign_impl; + }; + +#define EIGEN_EULER_SYSTEM_TYPEDEF(A, B, C) \ + /** \ingroup EulerAngles_Module */ \ + typedef EulerSystem EulerSystem##A##B##C; + + EIGEN_EULER_SYSTEM_TYPEDEF(X,Y,Z) + EIGEN_EULER_SYSTEM_TYPEDEF(X,Y,X) + EIGEN_EULER_SYSTEM_TYPEDEF(X,Z,Y) + EIGEN_EULER_SYSTEM_TYPEDEF(X,Z,X) + + EIGEN_EULER_SYSTEM_TYPEDEF(Y,Z,X) + EIGEN_EULER_SYSTEM_TYPEDEF(Y,Z,Y) + EIGEN_EULER_SYSTEM_TYPEDEF(Y,X,Z) + EIGEN_EULER_SYSTEM_TYPEDEF(Y,X,Y) + + EIGEN_EULER_SYSTEM_TYPEDEF(Z,X,Y) + EIGEN_EULER_SYSTEM_TYPEDEF(Z,X,Z) + EIGEN_EULER_SYSTEM_TYPEDEF(Z,Y,X) + EIGEN_EULER_SYSTEM_TYPEDEF(Z,Y,Z) +} + +#endif // EIGEN_EULERSYSTEM_H diff --git a/external/unsupported/Eigen/src/FFT/ei_fftw_impl.h b/external/unsupported/Eigen/src/FFT/ei_fftw_impl.h new file mode 100644 index 0000000..1c2cd24 --- /dev/null +++ b/external/unsupported/Eigen/src/FFT/ei_fftw_impl.h @@ -0,0 +1,261 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Mark Borgerding mark a borgerding net +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +namespace Eigen { + +namespace internal { + + // FFTW uses non-const arguments + // so we must use ugly const_cast calls for all the args it uses + // + // This should be safe as long as + // 1. we use FFTW_ESTIMATE for all our planning + // see the FFTW docs section 4.3.2 "Planner Flags" + // 2. fftw_complex is compatible with std::complex + // This assumes std::complex layout is array of size 2 with real,imag + template + inline + T * fftw_cast(const T* p) + { + return const_cast( p); + } + + inline + fftw_complex * fftw_cast( const std::complex * p) + { + return const_cast( reinterpret_cast(p) ); + } + + inline + fftwf_complex * fftw_cast( const std::complex * p) + { + return const_cast( reinterpret_cast(p) ); + } + + inline + fftwl_complex * fftw_cast( const std::complex * p) + { + return const_cast( reinterpret_cast(p) ); + } + + template + struct fftw_plan {}; + + template <> + struct fftw_plan + { + typedef float scalar_type; + typedef fftwf_complex complex_type; + fftwf_plan m_plan; + fftw_plan() :m_plan(NULL) {} + ~fftw_plan() {if (m_plan) fftwf_destroy_plan(m_plan);} + + inline + void fwd(complex_type * dst,complex_type * src,int nfft) { + if (m_plan==NULL) m_plan = fftwf_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftwf_execute_dft( m_plan, src,dst); + } + inline + void inv(complex_type * dst,complex_type * src,int nfft) { + if (m_plan==NULL) m_plan = fftwf_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftwf_execute_dft( m_plan, src,dst); + } + inline + void fwd(complex_type * dst,scalar_type * src,int nfft) { + if (m_plan==NULL) m_plan = fftwf_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftwf_execute_dft_r2c( m_plan,src,dst); + } + inline + void inv(scalar_type * dst,complex_type * src,int nfft) { + if (m_plan==NULL) + m_plan = fftwf_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftwf_execute_dft_c2r( m_plan, src,dst); + } + + inline + void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { + if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftwf_execute_dft( m_plan, src,dst); + } + inline + void inv2( complex_type * dst,complex_type * src,int n0,int n1) { + if (m_plan==NULL) m_plan = fftwf_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftwf_execute_dft( m_plan, src,dst); + } + + }; + template <> + struct fftw_plan + { + typedef double scalar_type; + typedef fftw_complex complex_type; + ::fftw_plan m_plan; + fftw_plan() :m_plan(NULL) {} + ~fftw_plan() {if (m_plan) fftw_destroy_plan(m_plan);} + + inline + void fwd(complex_type * dst,complex_type * src,int nfft) { + if (m_plan==NULL) m_plan = fftw_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftw_execute_dft( m_plan, src,dst); + } + inline + void inv(complex_type * dst,complex_type * src,int nfft) { + if (m_plan==NULL) m_plan = fftw_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftw_execute_dft( m_plan, src,dst); + } + inline + void fwd(complex_type * dst,scalar_type * src,int nfft) { + if (m_plan==NULL) m_plan = fftw_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftw_execute_dft_r2c( m_plan,src,dst); + } + inline + void inv(scalar_type * dst,complex_type * src,int nfft) { + if (m_plan==NULL) + m_plan = fftw_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftw_execute_dft_c2r( m_plan, src,dst); + } + inline + void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { + if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftw_execute_dft( m_plan, src,dst); + } + inline + void inv2( complex_type * dst,complex_type * src,int n0,int n1) { + if (m_plan==NULL) m_plan = fftw_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftw_execute_dft( m_plan, src,dst); + } + }; + template <> + struct fftw_plan + { + typedef long double scalar_type; + typedef fftwl_complex complex_type; + fftwl_plan m_plan; + fftw_plan() :m_plan(NULL) {} + ~fftw_plan() {if (m_plan) fftwl_destroy_plan(m_plan);} + + inline + void fwd(complex_type * dst,complex_type * src,int nfft) { + if (m_plan==NULL) m_plan = fftwl_plan_dft_1d(nfft,src,dst, FFTW_FORWARD, FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftwl_execute_dft( m_plan, src,dst); + } + inline + void inv(complex_type * dst,complex_type * src,int nfft) { + if (m_plan==NULL) m_plan = fftwl_plan_dft_1d(nfft,src,dst, FFTW_BACKWARD , FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftwl_execute_dft( m_plan, src,dst); + } + inline + void fwd(complex_type * dst,scalar_type * src,int nfft) { + if (m_plan==NULL) m_plan = fftwl_plan_dft_r2c_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftwl_execute_dft_r2c( m_plan,src,dst); + } + inline + void inv(scalar_type * dst,complex_type * src,int nfft) { + if (m_plan==NULL) + m_plan = fftwl_plan_dft_c2r_1d(nfft,src,dst,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftwl_execute_dft_c2r( m_plan, src,dst); + } + inline + void fwd2( complex_type * dst,complex_type * src,int n0,int n1) { + if (m_plan==NULL) m_plan = fftwl_plan_dft_2d(n0,n1,src,dst,FFTW_FORWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftwl_execute_dft( m_plan, src,dst); + } + inline + void inv2( complex_type * dst,complex_type * src,int n0,int n1) { + if (m_plan==NULL) m_plan = fftwl_plan_dft_2d(n0,n1,src,dst,FFTW_BACKWARD,FFTW_ESTIMATE|FFTW_PRESERVE_INPUT); + fftwl_execute_dft( m_plan, src,dst); + } + }; + + template + struct fftw_impl + { + typedef _Scalar Scalar; + typedef std::complex Complex; + + inline + void clear() + { + m_plans.clear(); + } + + // complex-to-complex forward FFT + inline + void fwd( Complex * dst,const Complex *src,int nfft) + { + get_plan(nfft,false,dst,src).fwd(fftw_cast(dst), fftw_cast(src),nfft ); + } + + // real-to-complex forward FFT + inline + void fwd( Complex * dst,const Scalar * src,int nfft) + { + get_plan(nfft,false,dst,src).fwd(fftw_cast(dst), fftw_cast(src) ,nfft); + } + + // 2-d complex-to-complex + inline + void fwd2(Complex * dst, const Complex * src, int n0,int n1) + { + get_plan(n0,n1,false,dst,src).fwd2(fftw_cast(dst), fftw_cast(src) ,n0,n1); + } + + // inverse complex-to-complex + inline + void inv(Complex * dst,const Complex *src,int nfft) + { + get_plan(nfft,true,dst,src).inv(fftw_cast(dst), fftw_cast(src),nfft ); + } + + // half-complex to scalar + inline + void inv( Scalar * dst,const Complex * src,int nfft) + { + get_plan(nfft,true,dst,src).inv(fftw_cast(dst), fftw_cast(src),nfft ); + } + + // 2-d complex-to-complex + inline + void inv2(Complex * dst, const Complex * src, int n0,int n1) + { + get_plan(n0,n1,true,dst,src).inv2(fftw_cast(dst), fftw_cast(src) ,n0,n1); + } + + + protected: + typedef fftw_plan PlanData; + + typedef Eigen::numext::int64_t int64_t; + + typedef std::map PlanMap; + + PlanMap m_plans; + + inline + PlanData & get_plan(int nfft,bool inverse,void * dst,const void * src) + { + bool inplace = (dst==src); + bool aligned = ( (reinterpret_cast(src)&15) | (reinterpret_cast(dst)&15) ) == 0; + int64_t key = ( (nfft<<3 ) | (inverse<<2) | (inplace<<1) | aligned ) << 1; + return m_plans[key]; + } + + inline + PlanData & get_plan(int n0,int n1,bool inverse,void * dst,const void * src) + { + bool inplace = (dst==src); + bool aligned = ( (reinterpret_cast(src)&15) | (reinterpret_cast(dst)&15) ) == 0; + int64_t key = ( ( (((int64_t)n0) << 30)|(n1<<3 ) | (inverse<<2) | (inplace<<1) | aligned ) << 1 ) + 1; + return m_plans[key]; + } + }; + +} // end namespace internal + +} // end namespace Eigen diff --git a/external/unsupported/Eigen/src/FFT/ei_kissfft_impl.h b/external/unsupported/Eigen/src/FFT/ei_kissfft_impl.h new file mode 100644 index 0000000..430953a --- /dev/null +++ b/external/unsupported/Eigen/src/FFT/ei_kissfft_impl.h @@ -0,0 +1,449 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Mark Borgerding mark a borgerding net +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +namespace Eigen { + +namespace internal { + + // This FFT implementation was derived from kissfft http:sourceforge.net/projects/kissfft + // Copyright 2003-2009 Mark Borgerding + +template +struct kiss_cpx_fft +{ + typedef _Scalar Scalar; + typedef std::complex Complex; + std::vector m_twiddles; + std::vector m_stageRadix; + std::vector m_stageRemainder; + std::vector m_scratchBuf; + bool m_inverse; + + inline void make_twiddles(int nfft, bool inverse) + { + using numext::sin; + using numext::cos; + m_inverse = inverse; + m_twiddles.resize(nfft); + double phinc = 0.25 * double(EIGEN_PI) / nfft; + Scalar flip = inverse ? Scalar(1) : Scalar(-1); + m_twiddles[0] = Complex(Scalar(1), Scalar(0)); + if ((nfft&1)==0) + m_twiddles[nfft/2] = Complex(Scalar(-1), Scalar(0)); + int i=1; + for (;i*8n) + p=n;// impossible to have a factor > sqrt(n) + } + n /= p; + m_stageRadix.push_back(p); + m_stageRemainder.push_back(n); + if ( p > 5 ) + m_scratchBuf.resize(p); // scratchbuf will be needed in bfly_generic + }while(n>1); + } + + template + inline + void work( int stage,Complex * xout, const _Src * xin, size_t fstride,size_t in_stride) + { + int p = m_stageRadix[stage]; + int m = m_stageRemainder[stage]; + Complex * Fout_beg = xout; + Complex * Fout_end = xout + p*m; + + if (m>1) { + do{ + // recursive call: + // DFT of size m*p performed by doing + // p instances of smaller DFTs of size m, + // each one takes a decimated version of the input + work(stage+1, xout , xin, fstride*p,in_stride); + xin += fstride*in_stride; + }while( (xout += m) != Fout_end ); + }else{ + do{ + *xout = *xin; + xin += fstride*in_stride; + }while(++xout != Fout_end ); + } + xout=Fout_beg; + + // recombine the p smaller DFTs + switch (p) { + case 2: bfly2(xout,fstride,m); break; + case 3: bfly3(xout,fstride,m); break; + case 4: bfly4(xout,fstride,m); break; + case 5: bfly5(xout,fstride,m); break; + default: bfly_generic(xout,fstride,m,p); break; + } + } + + inline + void bfly2( Complex * Fout, const size_t fstride, int m) + { + for (int k=0;kreal() - Scalar(.5)*scratch[3].real() , Fout->imag() - Scalar(.5)*scratch[3].imag() ); + scratch[0] *= epi3.imag(); + *Fout += scratch[3]; + Fout[m2] = Complex( Fout[m].real() + scratch[0].imag() , Fout[m].imag() - scratch[0].real() ); + Fout[m] += Complex( -scratch[0].imag(),scratch[0].real() ); + ++Fout; + }while(--k); + } + + inline + void bfly5( Complex * Fout, const size_t fstride, const size_t m) + { + Complex *Fout0,*Fout1,*Fout2,*Fout3,*Fout4; + size_t u; + Complex scratch[13]; + Complex * twiddles = &m_twiddles[0]; + Complex *tw; + Complex ya,yb; + ya = twiddles[fstride*m]; + yb = twiddles[fstride*2*m]; + + Fout0=Fout; + Fout1=Fout0+m; + Fout2=Fout0+2*m; + Fout3=Fout0+3*m; + Fout4=Fout0+4*m; + + tw=twiddles; + for ( u=0; u(m_twiddles.size()); + Complex * scratchbuf = &m_scratchBuf[0]; + + for ( u=0; u(fstride) * k; + if (twidx>=Norig) twidx-=Norig; + t=scratchbuf[q] * twiddles[twidx]; + Fout[ k ] += t; + } + k += m; + } + } + } +}; + +template +struct kissfft_impl +{ + typedef _Scalar Scalar; + typedef std::complex Complex; + + void clear() + { + m_plans.clear(); + m_realTwiddles.clear(); + } + + inline + void fwd( Complex * dst,const Complex *src,int nfft) + { + get_plan(nfft,false).work(0, dst, src, 1,1); + } + + inline + void fwd2( Complex * dst,const Complex *src,int n0,int n1) + { + EIGEN_UNUSED_VARIABLE(dst); + EIGEN_UNUSED_VARIABLE(src); + EIGEN_UNUSED_VARIABLE(n0); + EIGEN_UNUSED_VARIABLE(n1); + } + + inline + void inv2( Complex * dst,const Complex *src,int n0,int n1) + { + EIGEN_UNUSED_VARIABLE(dst); + EIGEN_UNUSED_VARIABLE(src); + EIGEN_UNUSED_VARIABLE(n0); + EIGEN_UNUSED_VARIABLE(n1); + } + + // real-to-complex forward FFT + // perform two FFTs of src even and src odd + // then twiddle to recombine them into the half-spectrum format + // then fill in the conjugate symmetric half + inline + void fwd( Complex * dst,const Scalar * src,int nfft) + { + if ( nfft&3 ) { + // use generic mode for odd + m_tmpBuf1.resize(nfft); + get_plan(nfft,false).work(0, &m_tmpBuf1[0], src, 1,1); + std::copy(m_tmpBuf1.begin(),m_tmpBuf1.begin()+(nfft>>1)+1,dst ); + }else{ + int ncfft = nfft>>1; + int ncfft2 = nfft>>2; + Complex * rtw = real_twiddles(ncfft2); + + // use optimized mode for even real + fwd( dst, reinterpret_cast (src), ncfft); + Complex dc(dst[0].real() + dst[0].imag()); + Complex nyquist(dst[0].real() - dst[0].imag()); + int k; + for ( k=1;k <= ncfft2 ; ++k ) { + Complex fpk = dst[k]; + Complex fpnk = conj(dst[ncfft-k]); + Complex f1k = fpk + fpnk; + Complex f2k = fpk - fpnk; + Complex tw= f2k * rtw[k-1]; + dst[k] = (f1k + tw) * Scalar(.5); + dst[ncfft-k] = conj(f1k -tw)*Scalar(.5); + } + dst[0] = dc; + dst[ncfft] = nyquist; + } + } + + // inverse complex-to-complex + inline + void inv(Complex * dst,const Complex *src,int nfft) + { + get_plan(nfft,true).work(0, dst, src, 1,1); + } + + // half-complex to scalar + inline + void inv( Scalar * dst,const Complex * src,int nfft) + { + if (nfft&3) { + m_tmpBuf1.resize(nfft); + m_tmpBuf2.resize(nfft); + std::copy(src,src+(nfft>>1)+1,m_tmpBuf1.begin() ); + for (int k=1;k<(nfft>>1)+1;++k) + m_tmpBuf1[nfft-k] = conj(m_tmpBuf1[k]); + inv(&m_tmpBuf2[0],&m_tmpBuf1[0],nfft); + for (int k=0;k>1; + int ncfft2 = nfft>>2; + Complex * rtw = real_twiddles(ncfft2); + m_tmpBuf1.resize(ncfft); + m_tmpBuf1[0] = Complex( src[0].real() + src[ncfft].real(), src[0].real() - src[ncfft].real() ); + for (int k = 1; k <= ncfft / 2; ++k) { + Complex fk = src[k]; + Complex fnkc = conj(src[ncfft-k]); + Complex fek = fk + fnkc; + Complex tmp = fk - fnkc; + Complex fok = tmp * conj(rtw[k-1]); + m_tmpBuf1[k] = fek + fok; + m_tmpBuf1[ncfft-k] = conj(fek - fok); + } + get_plan(ncfft,true).work(0, reinterpret_cast(dst), &m_tmpBuf1[0], 1,1); + } + } + + protected: + typedef kiss_cpx_fft PlanData; + typedef std::map PlanMap; + + PlanMap m_plans; + std::map > m_realTwiddles; + std::vector m_tmpBuf1; + std::vector m_tmpBuf2; + + inline + int PlanKey(int nfft, bool isinverse) const { return (nfft<<1) | int(isinverse); } + + inline + PlanData & get_plan(int nfft, bool inverse) + { + // TODO look for PlanKey(nfft, ! inverse) and conjugate the twiddles + PlanData & pd = m_plans[ PlanKey(nfft,inverse) ]; + if ( pd.m_twiddles.size() == 0 ) { + pd.make_twiddles(nfft,inverse); + pd.factorize(nfft); + } + return pd; + } + + inline + Complex * real_twiddles(int ncfft2) + { + using std::acos; + std::vector & twidref = m_realTwiddles[ncfft2];// creates new if not there + if ( (int)twidref.size() != ncfft2 ) { + twidref.resize(ncfft2); + int ncfft= ncfft2<<1; + Scalar pi = acos( Scalar(-1) ); + for (int k=1;k<=ncfft2;++k) + twidref[k-1] = exp( Complex(0,-pi * (Scalar(k) / ncfft + Scalar(.5)) ) ); + } + return &twidref[0]; + } +}; + +} // end namespace internal + +} // end namespace Eigen diff --git a/external/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h b/external/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h new file mode 100644 index 0000000..e7d70f3 --- /dev/null +++ b/external/unsupported/Eigen/src/IterativeSolvers/ConstrainedConjGrad.h @@ -0,0 +1,187 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud + +/* NOTE The functions of this file have been adapted from the GMM++ library */ + +//======================================================================== +// +// Copyright (C) 2002-2007 Yves Renard +// +// This file is a part of GETFEM++ +// +// Getfem++ is free software; you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation; version 2.1 of the License. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// You should have received a copy of the GNU Lesser General Public +// License along with this program; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, +// USA. +// +//======================================================================== + +#include "../../../../Eigen/src/Core/util/NonMPL2.h" + +#ifndef EIGEN_CONSTRAINEDCG_H +#define EIGEN_CONSTRAINEDCG_H + +#include "../../../../Eigen/Core" + +namespace Eigen { + +namespace internal { + +/** \ingroup IterativeLinearSolvers_Module + * Compute the pseudo inverse of the non-square matrix C such that + * \f$ CINV = (C * C^T)^{-1} * C \f$ based on a conjugate gradient method. + * + * This function is internally used by constrained_cg. + */ +template +void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV) +{ + // optimisable : copie de la ligne, precalcul de C * trans(C). + typedef typename CMatrix::Scalar Scalar; + typedef typename CMatrix::Index Index; + // FIXME use sparse vectors ? + typedef Matrix TmpVec; + + Index rows = C.rows(), cols = C.cols(); + + TmpVec d(rows), e(rows), l(cols), p(rows), q(rows), r(rows); + Scalar rho, rho_1, alpha; + d.setZero(); + + typedef Triplet T; + std::vector tripletList; + + for (Index i = 0; i < rows; ++i) + { + d[i] = 1.0; + rho = 1.0; + e.setZero(); + r = d; + p = d; + + while (rho >= 1e-38) + { /* conjugate gradient to compute e */ + /* which is the i-th row of inv(C * trans(C)) */ + l = C.transpose() * p; + q = C * l; + alpha = rho / p.dot(q); + e += alpha * p; + r += -alpha * q; + rho_1 = rho; + rho = r.dot(r); + p = (rho/rho_1) * p + r; + } + + l = C.transpose() * e; // l is the i-th row of CINV + // FIXME add a generic "prune/filter" expression for both dense and sparse object to sparse + for (Index j=0; j +void constrained_cg(const TMatrix& A, const CMatrix& C, VectorX& x, + const VectorB& b, const VectorF& f, IterationController &iter) +{ + using std::sqrt; + typedef typename TMatrix::Scalar Scalar; + typedef typename TMatrix::Index Index; + typedef Matrix TmpVec; + + Scalar rho = 1.0, rho_1, lambda, gamma; + Index xSize = x.size(); + TmpVec p(xSize), q(xSize), q2(xSize), + r(xSize), old_z(xSize), z(xSize), + memox(xSize); + std::vector satured(C.rows()); + p.setZero(); + iter.setRhsNorm(sqrt(b.dot(b))); // gael vect_sp(PS, b, b) + if (iter.rhsNorm() == 0.0) iter.setRhsNorm(1.0); + + SparseMatrix CINV(C.rows(), C.cols()); + pseudo_inverse(C, CINV); + + while(true) + { + // computation of residual + old_z = z; + memox = x; + r = b; + r += A * -x; + z = r; + bool transition = false; + for (Index i = 0; i < C.rows(); ++i) + { + Scalar al = C.row(i).dot(x) - f.coeff(i); + if (al >= -1.0E-15) + { + if (!satured[i]) + { + satured[i] = true; + transition = true; + } + Scalar bb = CINV.row(i).dot(z); + if (bb > 0.0) + // FIXME: we should allow that: z += -bb * C.row(i); + for (typename CMatrix::InnerIterator it(C,i); it; ++it) + z.coeffRef(it.index()) -= bb*it.value(); + } + else + satured[i] = false; + } + + // descent direction + rho_1 = rho; + rho = r.dot(z); + + if (iter.finished(rho)) break; + if (transition || iter.first()) gamma = 0.0; + else gamma = (std::max)(0.0, (rho - old_z.dot(z)) / rho_1); + p = z + gamma*p; + + ++iter; + // one dimensionnal optimization + q = A * p; + lambda = rho / q.dot(p); + for (Index i = 0; i < C.rows(); ++i) + { + if (!satured[i]) + { + Scalar bb = C.row(i).dot(p) - f[i]; + if (bb > 0.0) + lambda = (std::min)(lambda, (f.coeff(i)-C.row(i).dot(x)) / bb); + } + } + x += lambda * p; + memox -= x; + } +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CONSTRAINEDCG_H diff --git a/external/unsupported/Eigen/src/IterativeSolvers/DGMRES.h b/external/unsupported/Eigen/src/IterativeSolvers/DGMRES.h new file mode 100644 index 0000000..5ae011b --- /dev/null +++ b/external/unsupported/Eigen/src/IterativeSolvers/DGMRES.h @@ -0,0 +1,511 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DGMRES_H +#define EIGEN_DGMRES_H + +#include "../../../../Eigen/Eigenvalues" + +namespace Eigen { + +template< typename _MatrixType, + typename _Preconditioner = DiagonalPreconditioner > +class DGMRES; + +namespace internal { + +template< typename _MatrixType, typename _Preconditioner> +struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; +}; + +/** \brief Computes a permutation vector to have a sorted sequence + * \param vec The vector to reorder. + * \param perm gives the sorted sequence on output. Must be initialized with 0..n-1 + * \param ncut Put the ncut smallest elements at the end of the vector + * WARNING This is an expensive sort, so should be used only + * for small size vectors + * TODO Use modified QuickSplit or std::nth_element to get the smallest values + */ +template +void sortWithPermutation (VectorType& vec, IndexType& perm, typename IndexType::Scalar& ncut) +{ + eigen_assert(vec.size() == perm.size()); + bool flag; + for (Index k = 0; k < ncut; k++) + { + flag = false; + for (Index j = 0; j < vec.size()-1; j++) + { + if ( vec(perm(j)) < vec(perm(j+1)) ) + { + std::swap(perm(j),perm(j+1)); + flag = true; + } + if (!flag) break; // The vector is in sorted order + } + } +} + +} +/** + * \ingroup IterativeLinearSolvers_Module + * \brief A Restarted GMRES with deflation. + * This class implements a modification of the GMRES solver for + * sparse linear systems. The basis is built with modified + * Gram-Schmidt. At each restart, a few approximated eigenvectors + * corresponding to the smallest eigenvalues are used to build a + * preconditioner for the next cycle. This preconditioner + * for deflation can be combined with any other preconditioner, + * the IncompleteLUT for instance. The preconditioner is applied + * at right of the matrix and the combination is multiplicative. + * + * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * Typical usage : + * \code + * SparseMatrix A; + * VectorXd x, b; + * //Fill A and b ... + * DGMRES > solver; + * solver.set_restart(30); // Set restarting value + * solver.setEigenv(1); // Set the number of eigenvalues to deflate + * solver.compute(A); + * x = solver.solve(b); + * \endcode + * + * DGMRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. + * + * References : + * [1] D. NUENTSA WAKAM and F. PACULL, Memory Efficient Hybrid + * Algebraic Solvers for Linear Systems Arising from Compressible + * Flows, Computers and Fluids, In Press, + * https://doi.org/10.1016/j.compfluid.2012.03.023 + * [2] K. Burrage and J. Erhel, On the performance of various + * adaptive preconditioned GMRES strategies, 5(1998), 101-121. + * [3] J. Erhel, K. Burrage and B. Pohl, Restarted GMRES + * preconditioned by deflation,J. Computational and Applied + * Mathematics, 69(1996), 303-318. + + * + */ +template< typename _MatrixType, typename _Preconditioner> +class DGMRES : public IterativeSolverBase > +{ + typedef IterativeSolverBase Base; + using Base::matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; + using Base::m_tolerance; + public: + using Base::_solve_impl; + using Base::_solve_with_guess_impl; + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::StorageIndex StorageIndex; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + typedef Matrix DenseMatrix; + typedef Matrix DenseRealMatrix; + typedef Matrix DenseVector; + typedef Matrix DenseRealVector; + typedef Matrix, Dynamic, 1> ComplexVector; + + + /** Default constructor. */ + DGMRES() : Base(),m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + explicit DGMRES(const EigenBase& A) : Base(A.derived()), m_restart(30),m_neig(0),m_r(0),m_maxNeig(5),m_isDeflAllocated(false),m_isDeflInitialized(false) {} + + ~DGMRES() {} + + /** \internal */ + template + void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const + { + EIGEN_STATIC_ASSERT(Rhs::ColsAtCompileTime==1 || Dest::ColsAtCompileTime==1, YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX); + + m_iterations = Base::maxIterations(); + m_error = Base::m_tolerance; + + dgmres(matrix(), b, x, Base::m_preconditioner); + } + + /** + * Get the restart value + */ + Index restart() { return m_restart; } + + /** + * Set the restart value (default is 30) + */ + void set_restart(const Index restart) { m_restart=restart; } + + /** + * Set the number of eigenvalues to deflate at each restart + */ + void setEigenv(const Index neig) + { + m_neig = neig; + if (neig+1 > m_maxNeig) m_maxNeig = neig+1; // To allow for complex conjugates + } + + /** + * Get the size of the deflation subspace size + */ + Index deflSize() {return m_r; } + + /** + * Set the maximum size of the deflation subspace + */ + void setMaxEigenv(const Index maxNeig) { m_maxNeig = maxNeig; } + + protected: + // DGMRES algorithm + template + void dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x, const Preconditioner& precond) const; + // Perform one cycle of GMRES + template + Index dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, Index& nbIts) const; + // Compute data to use for deflation + Index dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const; + // Apply deflation to a vector + template + Index dgmresApplyDeflation(const RhsType& In, DestType& Out) const; + ComplexVector schurValues(const ComplexSchur& schurofH) const; + ComplexVector schurValues(const RealSchur& schurofH) const; + // Init data for deflation + void dgmresInitDeflation(Index& rows) const; + mutable DenseMatrix m_V; // Krylov basis vectors + mutable DenseMatrix m_H; // Hessenberg matrix + mutable DenseMatrix m_Hes; // Initial hessenberg matrix without Givens rotations applied + mutable Index m_restart; // Maximum size of the Krylov subspace + mutable DenseMatrix m_U; // Vectors that form the basis of the invariant subspace + mutable DenseMatrix m_MU; // matrix operator applied to m_U (for next cycles) + mutable DenseMatrix m_T; /* T=U^T*M^{-1}*A*U */ + mutable PartialPivLU m_luT; // LU factorization of m_T + mutable StorageIndex m_neig; //Number of eigenvalues to extract at each restart + mutable Index m_r; // Current number of deflated eigenvalues, size of m_U + mutable Index m_maxNeig; // Maximum number of eigenvalues to deflate + mutable RealScalar m_lambdaN; //Modulus of the largest eigenvalue of A + mutable bool m_isDeflAllocated; + mutable bool m_isDeflInitialized; + + //Adaptive strategy + mutable RealScalar m_smv; // Smaller multiple of the remaining number of steps allowed + mutable bool m_force; // Force the use of deflation at each restart + +}; +/** + * \brief Perform several cycles of restarted GMRES with modified Gram Schmidt, + * + * A right preconditioner is used combined with deflation. + * + */ +template< typename _MatrixType, typename _Preconditioner> +template +void DGMRES<_MatrixType, _Preconditioner>::dgmres(const MatrixType& mat,const Rhs& rhs, Dest& x, + const Preconditioner& precond) const +{ + const RealScalar considerAsZero = (std::numeric_limits::min)(); + + RealScalar normRhs = rhs.norm(); + if(normRhs <= considerAsZero) + { + x.setZero(); + m_error = 0; + return; + } + + //Initialization + m_isDeflInitialized = false; + Index n = mat.rows(); + DenseVector r0(n); + Index nbIts = 0; + m_H.resize(m_restart+1, m_restart); + m_Hes.resize(m_restart, m_restart); + m_V.resize(n,m_restart+1); + //Initial residual vector and initial norm + if(x.squaredNorm()==0) + x = precond.solve(rhs); + r0 = rhs - mat * x; + RealScalar beta = r0.norm(); + + m_error = beta/normRhs; + if(m_error < m_tolerance) + m_info = Success; + else + m_info = NoConvergence; + + // Iterative process + while (nbIts < m_iterations && m_info == NoConvergence) + { + dgmresCycle(mat, precond, x, r0, beta, normRhs, nbIts); + + // Compute the new residual vector for the restart + if (nbIts < m_iterations && m_info == NoConvergence) { + r0 = rhs - mat * x; + beta = r0.norm(); + } + } +} + +/** + * \brief Perform one restart cycle of DGMRES + * \param mat The coefficient matrix + * \param precond The preconditioner + * \param x the new approximated solution + * \param r0 The initial residual vector + * \param beta The norm of the residual computed so far + * \param normRhs The norm of the right hand side vector + * \param nbIts The number of iterations + */ +template< typename _MatrixType, typename _Preconditioner> +template +Index DGMRES<_MatrixType, _Preconditioner>::dgmresCycle(const MatrixType& mat, const Preconditioner& precond, Dest& x, DenseVector& r0, RealScalar& beta, const RealScalar& normRhs, Index& nbIts) const +{ + //Initialization + DenseVector g(m_restart+1); // Right hand side of the least square problem + g.setZero(); + g(0) = Scalar(beta); + m_V.col(0) = r0/beta; + m_info = NoConvergence; + std::vector >gr(m_restart); // Givens rotations + Index it = 0; // Number of inner iterations + Index n = mat.rows(); + DenseVector tv1(n), tv2(n); //Temporary vectors + while (m_info == NoConvergence && it < m_restart && nbIts < m_iterations) + { + // Apply preconditioner(s) at right + if (m_isDeflInitialized ) + { + dgmresApplyDeflation(m_V.col(it), tv1); // Deflation + tv2 = precond.solve(tv1); + } + else + { + tv2 = precond.solve(m_V.col(it)); // User's selected preconditioner + } + tv1 = mat * tv2; + + // Orthogonalize it with the previous basis in the basis using modified Gram-Schmidt + Scalar coef; + for (Index i = 0; i <= it; ++i) + { + coef = tv1.dot(m_V.col(i)); + tv1 = tv1 - coef * m_V.col(i); + m_H(i,it) = coef; + m_Hes(i,it) = coef; + } + // Normalize the vector + coef = tv1.norm(); + m_V.col(it+1) = tv1/coef; + m_H(it+1, it) = coef; +// m_Hes(it+1,it) = coef; + + // FIXME Check for happy breakdown + + // Update Hessenberg matrix with Givens rotations + for (Index i = 1; i <= it; ++i) + { + m_H.col(it).applyOnTheLeft(i-1,i,gr[i-1].adjoint()); + } + // Compute the new plane rotation + gr[it].makeGivens(m_H(it, it), m_H(it+1,it)); + // Apply the new rotation + m_H.col(it).applyOnTheLeft(it,it+1,gr[it].adjoint()); + g.applyOnTheLeft(it,it+1, gr[it].adjoint()); + + beta = std::abs(g(it+1)); + m_error = beta/normRhs; + // std::cerr << nbIts << " Relative Residual Norm " << m_error << std::endl; + it++; nbIts++; + + if (m_error < m_tolerance) + { + // The method has converged + m_info = Success; + break; + } + } + + // Compute the new coefficients by solving the least square problem +// it++; + //FIXME Check first if the matrix is singular ... zero diagonal + DenseVector nrs(m_restart); + nrs = m_H.topLeftCorner(it,it).template triangularView().solve(g.head(it)); + + // Form the new solution + if (m_isDeflInitialized) + { + tv1 = m_V.leftCols(it) * nrs; + dgmresApplyDeflation(tv1, tv2); + x = x + precond.solve(tv2); + } + else + x = x + precond.solve(m_V.leftCols(it) * nrs); + + // Go for a new cycle and compute data for deflation + if(nbIts < m_iterations && m_info == NoConvergence && m_neig > 0 && (m_r+m_neig) < m_maxNeig) + dgmresComputeDeflationData(mat, precond, it, m_neig); + return 0; + +} + + +template< typename _MatrixType, typename _Preconditioner> +void DGMRES<_MatrixType, _Preconditioner>::dgmresInitDeflation(Index& rows) const +{ + m_U.resize(rows, m_maxNeig); + m_MU.resize(rows, m_maxNeig); + m_T.resize(m_maxNeig, m_maxNeig); + m_lambdaN = 0.0; + m_isDeflAllocated = true; +} + +template< typename _MatrixType, typename _Preconditioner> +inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const ComplexSchur& schurofH) const +{ + return schurofH.matrixT().diagonal(); +} + +template< typename _MatrixType, typename _Preconditioner> +inline typename DGMRES<_MatrixType, _Preconditioner>::ComplexVector DGMRES<_MatrixType, _Preconditioner>::schurValues(const RealSchur& schurofH) const +{ + const DenseMatrix& T = schurofH.matrixT(); + Index it = T.rows(); + ComplexVector eig(it); + Index j = 0; + while (j < it-1) + { + if (T(j+1,j) ==Scalar(0)) + { + eig(j) = std::complex(T(j,j),RealScalar(0)); + j++; + } + else + { + eig(j) = std::complex(T(j,j),T(j+1,j)); + eig(j+1) = std::complex(T(j,j+1),T(j+1,j+1)); + j++; + } + } + if (j < it-1) eig(j) = std::complex(T(j,j),RealScalar(0)); + return eig; +} + +template< typename _MatrixType, typename _Preconditioner> +Index DGMRES<_MatrixType, _Preconditioner>::dgmresComputeDeflationData(const MatrixType& mat, const Preconditioner& precond, const Index& it, StorageIndex& neig) const +{ + // First, find the Schur form of the Hessenberg matrix H + typename internal::conditional::IsComplex, ComplexSchur, RealSchur >::type schurofH; + bool computeU = true; + DenseMatrix matrixQ(it,it); + matrixQ.setIdentity(); + schurofH.computeFromHessenberg(m_Hes.topLeftCorner(it,it), matrixQ, computeU); + + ComplexVector eig(it); + Matrixperm(it); + eig = this->schurValues(schurofH); + + // Reorder the absolute values of Schur values + DenseRealVector modulEig(it); + for (Index j=0; j(it-1)); + internal::sortWithPermutation(modulEig, perm, neig); + + if (!m_lambdaN) + { + m_lambdaN = (std::max)(modulEig.maxCoeff(), m_lambdaN); + } + //Count the real number of extracted eigenvalues (with complex conjugates) + Index nbrEig = 0; + while (nbrEig < neig) + { + if(eig(perm(it-nbrEig-1)).imag() == RealScalar(0)) nbrEig++; + else nbrEig += 2; + } + // Extract the Schur vectors corresponding to the smallest Ritz values + DenseMatrix Sr(it, nbrEig); + Sr.setZero(); + for (Index j = 0; j < nbrEig; j++) + { + Sr.col(j) = schurofH.matrixU().col(perm(it-j-1)); + } + + // Form the Schur vectors of the initial matrix using the Krylov basis + DenseMatrix X; + X = m_V.leftCols(it) * Sr; + if (m_r) + { + // Orthogonalize X against m_U using modified Gram-Schmidt + for (Index j = 0; j < nbrEig; j++) + for (Index k =0; k < m_r; k++) + X.col(j) = X.col(j) - (m_U.col(k).dot(X.col(j)))*m_U.col(k); + } + + // Compute m_MX = A * M^-1 * X + Index m = m_V.rows(); + if (!m_isDeflAllocated) + dgmresInitDeflation(m); + DenseMatrix MX(m, nbrEig); + DenseVector tv1(m); + for (Index j = 0; j < nbrEig; j++) + { + tv1 = mat * X.col(j); + MX.col(j) = precond.solve(tv1); + } + + //Update m_T = [U'MU U'MX; X'MU X'MX] + m_T.block(m_r, m_r, nbrEig, nbrEig) = X.transpose() * MX; + if(m_r) + { + m_T.block(0, m_r, m_r, nbrEig) = m_U.leftCols(m_r).transpose() * MX; + m_T.block(m_r, 0, nbrEig, m_r) = X.transpose() * m_MU.leftCols(m_r); + } + + // Save X into m_U and m_MX in m_MU + for (Index j = 0; j < nbrEig; j++) m_U.col(m_r+j) = X.col(j); + for (Index j = 0; j < nbrEig; j++) m_MU.col(m_r+j) = MX.col(j); + // Increase the size of the invariant subspace + m_r += nbrEig; + + // Factorize m_T into m_luT + m_luT.compute(m_T.topLeftCorner(m_r, m_r)); + + //FIXME CHeck if the factorization was correctly done (nonsingular matrix) + m_isDeflInitialized = true; + return 0; +} +template +template +Index DGMRES<_MatrixType, _Preconditioner>::dgmresApplyDeflation(const RhsType &x, DestType &y) const +{ + DenseVector x1 = m_U.leftCols(m_r).transpose() * x; + y = x + m_U.leftCols(m_r) * ( m_lambdaN * m_luT.solve(x1) - x1); + return 0; +} + +} // end namespace Eigen +#endif diff --git a/external/unsupported/Eigen/src/IterativeSolvers/GMRES.h b/external/unsupported/Eigen/src/IterativeSolvers/GMRES.h new file mode 100644 index 0000000..ff91209 --- /dev/null +++ b/external/unsupported/Eigen/src/IterativeSolvers/GMRES.h @@ -0,0 +1,335 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2012, 2014 Kolja Brix +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GMRES_H +#define EIGEN_GMRES_H + +namespace Eigen { + +namespace internal { + +/** +* Generalized Minimal Residual Algorithm based on the +* Arnoldi algorithm implemented with Householder reflections. +* +* Parameters: +* \param mat matrix of linear system of equations +* \param rhs right hand side vector of linear system of equations +* \param x on input: initial guess, on output: solution +* \param precond preconditioner used +* \param iters on input: maximum number of iterations to perform +* on output: number of iterations performed +* \param restart number of iterations for a restart +* \param tol_error on input: relative residual tolerance +* on output: residuum achieved +* +* \sa IterativeMethods::bicgstab() +* +* +* For references, please see: +* +* Saad, Y. and Schultz, M. H. +* GMRES: A Generalized Minimal Residual Algorithm for Solving Nonsymmetric Linear Systems. +* SIAM J.Sci.Stat.Comp. 7, 1986, pp. 856 - 869. +* +* Saad, Y. +* Iterative Methods for Sparse Linear Systems. +* Society for Industrial and Applied Mathematics, Philadelphia, 2003. +* +* Walker, H. F. +* Implementations of the GMRES method. +* Comput.Phys.Comm. 53, 1989, pp. 311 - 320. +* +* Walker, H. F. +* Implementation of the GMRES Method using Householder Transformations. +* SIAM J.Sci.Stat.Comp. 9, 1988, pp. 152 - 163. +* +*/ +template +bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond, + Index &iters, const Index &restart, typename Dest::RealScalar & tol_error) { + + using std::sqrt; + using std::abs; + + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix < Scalar, Dynamic, 1 > VectorType; + typedef Matrix < Scalar, Dynamic, Dynamic, ColMajor> FMatrixType; + + const RealScalar considerAsZero = (std::numeric_limits::min)(); + + if(rhs.norm() <= considerAsZero) + { + x.setZero(); + tol_error = 0; + return true; + } + + RealScalar tol = tol_error; + const Index maxIters = iters; + iters = 0; + + const Index m = mat.rows(); + + // residual and preconditioned residual + VectorType p0 = rhs - mat*x; + VectorType r0 = precond.solve(p0); + + const RealScalar r0Norm = r0.norm(); + + // is initial guess already good enough? + if(r0Norm == 0) + { + tol_error = 0; + return true; + } + + // storage for Hessenberg matrix and Householder data + FMatrixType H = FMatrixType::Zero(m, restart + 1); + VectorType w = VectorType::Zero(restart + 1); + VectorType tau = VectorType::Zero(restart + 1); + + // storage for Jacobi rotations + std::vector < JacobiRotation < Scalar > > G(restart); + + // storage for temporaries + VectorType t(m), v(m), workspace(m), x_new(m); + + // generate first Householder vector + Ref H0_tail = H.col(0).tail(m - 1); + RealScalar beta; + r0.makeHouseholder(H0_tail, tau.coeffRef(0), beta); + w(0) = Scalar(beta); + + for (Index k = 1; k <= restart; ++k) + { + ++iters; + + v = VectorType::Unit(m, k - 1); + + // apply Householder reflections H_{1} ... H_{k-1} to v + // TODO: use a HouseholderSequence + for (Index i = k - 1; i >= 0; --i) { + v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); + } + + // apply matrix M to v: v = mat * v; + t.noalias() = mat * v; + v = precond.solve(t); + + // apply Householder reflections H_{k-1} ... H_{1} to v + // TODO: use a HouseholderSequence + for (Index i = 0; i < k; ++i) { + v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); + } + + if (v.tail(m - k).norm() != 0.0) + { + if (k <= restart) + { + // generate new Householder vector + Ref Hk_tail = H.col(k).tail(m - k - 1); + v.tail(m - k).makeHouseholder(Hk_tail, tau.coeffRef(k), beta); + + // apply Householder reflection H_{k} to v + v.tail(m - k).applyHouseholderOnTheLeft(Hk_tail, tau.coeffRef(k), workspace.data()); + } + } + + if (k > 1) + { + for (Index i = 0; i < k - 1; ++i) + { + // apply old Givens rotations to v + v.applyOnTheLeft(i, i + 1, G[i].adjoint()); + } + } + + if (k y = w.head(k); + H.topLeftCorner(k, k).template triangularView ().solveInPlace(y); + + // use Horner-like scheme to calculate solution vector + x_new.setZero(); + for (Index i = k - 1; i >= 0; --i) + { + x_new(i) += y(i); + // apply Householder reflection H_{i} to x_new + x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); + } + + x += x_new; + + if(stop) + { + return true; + } + else + { + k=0; + + // reset data for restart + p0.noalias() = rhs - mat*x; + r0 = precond.solve(p0); + + // clear Hessenberg matrix and Householder data + H.setZero(); + w.setZero(); + tau.setZero(); + + // generate first Householder vector + r0.makeHouseholder(H0_tail, tau.coeffRef(0), beta); + w(0) = Scalar(beta); + } + } + } + + return false; + +} + +} + +template< typename _MatrixType, + typename _Preconditioner = DiagonalPreconditioner > +class GMRES; + +namespace internal { + +template< typename _MatrixType, typename _Preconditioner> +struct traits > +{ + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; +}; + +} + +/** \ingroup IterativeLinearSolvers_Module + * \brief A GMRES solver for sparse square problems + * + * This class allows to solve for A.x = b sparse linear problems using a generalized minimal + * residual method. The vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + * \code + * int n = 10000; + * VectorXd x(n), b(n); + * SparseMatrix A(n,n); + * // fill A and b + * GMRES > solver(A); + * x = solver.solve(b); + * std::cout << "#iterations: " << solver.iterations() << std::endl; + * std::cout << "estimated error: " << solver.error() << std::endl; + * // update b, and solve again + * x = solver.solve(b); + * \endcode + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. + * + * GMRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. + * + * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ +template< typename _MatrixType, typename _Preconditioner> +class GMRES : public IterativeSolverBase > +{ + typedef IterativeSolverBase Base; + using Base::matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; + +private: + Index m_restart; + +public: + using Base::_solve_impl; + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + +public: + + /** Default constructor. */ + GMRES() : Base(), m_restart(30) {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + explicit GMRES(const EigenBase& A) : Base(A.derived()), m_restart(30) {} + + ~GMRES() {} + + /** Get the number of iterations after that a restart is performed. + */ + Index get_restart() { return m_restart; } + + /** Set the number of iterations after that a restart is performed. + * \param restart number of iterations for a restarti, default is 30. + */ + void set_restart(const Index restart) { m_restart=restart; } + + /** \internal */ + template + void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const + { + m_iterations = Base::maxIterations(); + m_error = Base::m_tolerance; + bool ret = internal::gmres(matrix(), b, x, Base::m_preconditioner, m_iterations, m_restart, m_error); + m_info = (!ret) ? NumericalIssue + : m_error <= Base::m_tolerance ? Success + : NoConvergence; + } + +protected: + +}; + +} // end namespace Eigen + +#endif // EIGEN_GMRES_H diff --git a/external/unsupported/Eigen/src/IterativeSolvers/IDRS.h b/external/unsupported/Eigen/src/IterativeSolvers/IDRS.h new file mode 100644 index 0000000..90d20fa --- /dev/null +++ b/external/unsupported/Eigen/src/IterativeSolvers/IDRS.h @@ -0,0 +1,436 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2020 Chris Schoutrop +// Copyright (C) 2020 Jens Wehner +// Copyright (C) 2020 Jan van Dijk +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_IDRS_H +#define EIGEN_IDRS_H + +namespace Eigen +{ + + namespace internal + { + /** \internal Low-level Induced Dimension Reduction algoritm + \param A The matrix A + \param b The right hand side vector b + \param x On input and initial solution, on output the computed solution. + \param precond A preconditioner being able to efficiently solve for an + approximation of Ax=b (regardless of b) + \param iter On input the max number of iteration, on output the number of performed iterations. + \param relres On input the tolerance error, on output an estimation of the relative error. + \param S On input Number of the dimension of the shadow space. + \param smoothing switches residual smoothing on. + \param angle small omega lead to faster convergence at the expense of numerical stability + \param replacement switches on a residual replacement strategy to increase accuracy of residual at the expense of more Mat*vec products + \return false in the case of numerical issue, for example a break down of IDRS. + */ + template + typename Vector::Scalar omega(const Vector& t, const Vector& s, RealScalar angle) + { + using numext::abs; + typedef typename Vector::Scalar Scalar; + const RealScalar ns = s.norm(); + const RealScalar nt = t.norm(); + const Scalar ts = t.dot(s); + const RealScalar rho = abs(ts / (nt * ns)); + + if (rho < angle) { + if (ts == Scalar(0)) { + return Scalar(0); + } + // Original relation for om is given by + // om = om * angle / rho; + // To alleviate potential (near) division by zero this can be rewritten as + // om = angle * (ns / nt) * (ts / abs(ts)) = angle * (ns / nt) * sgn(ts) + return angle * (ns / nt) * (ts / abs(ts)); + } + return ts / (nt * nt); + } + + template + bool idrs(const MatrixType& A, const Rhs& b, Dest& x, const Preconditioner& precond, + Index& iter, + typename Dest::RealScalar& relres, Index S, bool smoothing, typename Dest::RealScalar angle, bool replacement) + { + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix VectorType; + typedef Matrix DenseMatrixType; + const Index N = b.size(); + S = S < x.rows() ? S : x.rows(); + const RealScalar tol = relres; + const Index maxit = iter; + + Index replacements = 0; + bool trueres = false; + + FullPivLU lu_solver; + + DenseMatrixType P; + { + HouseholderQR qr(DenseMatrixType::Random(N, S)); + P = (qr.householderQ() * DenseMatrixType::Identity(N, S)); + } + + const RealScalar normb = b.norm(); + + if (internal::isApprox(normb, RealScalar(0))) + { + //Solution is the zero vector + x.setZero(); + iter = 0; + relres = 0; + return true; + } + // from http://homepage.tudelft.nl/1w5b5/IDRS/manual.pdf + // A peak in the residual is considered dangerously high if‖ri‖/‖b‖> C(tol/epsilon). + // With epsilon the + // relative machine precision. The factor tol/epsilon corresponds to the size of a + // finite precision number that is so large that the absolute round-off error in + // this number, when propagated through the process, makes it impossible to + // achieve the required accuracy.The factor C accounts for the accumulation of + // round-off errors. This parameter has beenset to 10−3. + // mp is epsilon/C + // 10^3 * eps is very conservative, so normally no residual replacements will take place. + // It only happens if things go very wrong. Too many restarts may ruin the convergence. + const RealScalar mp = RealScalar(1e3) * NumTraits::epsilon(); + + + + //Compute initial residual + const RealScalar tolb = tol * normb; //Relative tolerance + VectorType r = b - A * x; + + VectorType x_s, r_s; + + if (smoothing) + { + x_s = x; + r_s = r; + } + + RealScalar normr = r.norm(); + + if (normr <= tolb) + { + //Initial guess is a good enough solution + iter = 0; + relres = normr / normb; + return true; + } + + DenseMatrixType G = DenseMatrixType::Zero(N, S); + DenseMatrixType U = DenseMatrixType::Zero(N, S); + DenseMatrixType M = DenseMatrixType::Identity(S, S); + VectorType t(N), v(N); + Scalar om = 1.; + + //Main iteration loop, guild G-spaces: + iter = 0; + + while (normr > tolb && iter < maxit) + { + //New right hand size for small system: + VectorType f = (r.adjoint() * P).adjoint(); + + for (Index k = 0; k < S; ++k) + { + //Solve small system and make v orthogonal to P: + //c = M(k:s,k:s)\f(k:s); + lu_solver.compute(M.block(k , k , S -k, S - k )); + VectorType c = lu_solver.solve(f.segment(k , S - k )); + //v = r - G(:,k:s)*c; + v = r - G.rightCols(S - k ) * c; + //Preconditioning + v = precond.solve(v); + + //Compute new U(:,k) and G(:,k), G(:,k) is in space G_j + U.col(k) = U.rightCols(S - k ) * c + om * v; + G.col(k) = A * U.col(k ); + + //Bi-Orthogonalise the new basis vectors: + for (Index i = 0; i < k-1 ; ++i) + { + //alpha = ( P(:,i)'*G(:,k) )/M(i,i); + Scalar alpha = P.col(i ).dot(G.col(k )) / M(i, i ); + G.col(k ) = G.col(k ) - alpha * G.col(i ); + U.col(k ) = U.col(k ) - alpha * U.col(i ); + } + + //New column of M = P'*G (first k-1 entries are zero) + //M(k:s,k) = (G(:,k)'*P(:,k:s))'; + M.block(k , k , S - k , 1) = (G.col(k ).adjoint() * P.rightCols(S - k )).adjoint(); + + if (internal::isApprox(M(k,k), Scalar(0))) + { + return false; + } + + //Make r orthogonal to q_i, i = 0..k-1 + Scalar beta = f(k ) / M(k , k ); + r = r - beta * G.col(k ); + x = x + beta * U.col(k ); + normr = r.norm(); + + if (replacement && normr > tolb / mp) + { + trueres = true; + } + + //Smoothing: + if (smoothing) + { + t = r_s - r; + //gamma is a Scalar, but the conversion is not allowed + Scalar gamma = t.dot(r_s) / t.norm(); + r_s = r_s - gamma * t; + x_s = x_s - gamma * (x_s - x); + normr = r_s.norm(); + } + + if (normr < tolb || iter == maxit) + { + break; + } + + //New f = P'*r (first k components are zero) + if (k < S-1) + { + f.segment(k + 1, S - (k + 1) ) = f.segment(k + 1 , S - (k + 1)) - beta * M.block(k + 1 , k , S - (k + 1), 1); + } + }//end for + + if (normr < tolb || iter == maxit) + { + break; + } + + //Now we have sufficient vectors in G_j to compute residual in G_j+1 + //Note: r is already perpendicular to P so v = r + //Preconditioning + v = r; + v = precond.solve(v); + + //Matrix-vector multiplication: + t = A * v; + + //Computation of a new omega + om = internal::omega(t, r, angle); + + if (om == RealScalar(0.0)) + { + return false; + } + + r = r - om * t; + x = x + om * v; + normr = r.norm(); + + if (replacement && normr > tolb / mp) + { + trueres = true; + } + + //Residual replacement? + if (trueres && normr < normb) + { + r = b - A * x; + trueres = false; + replacements++; + } + + //Smoothing: + if (smoothing) + { + t = r_s - r; + Scalar gamma = t.dot(r_s) /t.norm(); + r_s = r_s - gamma * t; + x_s = x_s - gamma * (x_s - x); + normr = r_s.norm(); + } + + iter++; + + }//end while + + if (smoothing) + { + x = x_s; + } + relres=normr/normb; + return true; + } + + } // namespace internal + + template > + class IDRS; + + namespace internal + { + + template + struct traits > + { + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; + }; + + } // namespace internal + + +/** \ingroup IterativeLinearSolvers_Module + * \brief The Induced Dimension Reduction method (IDR(s)) is a short-recurrences Krylov method for sparse square problems. + * + * This class allows to solve for A.x = b sparse linear problems. The vectors x and b can be either dense or sparse. + * he Induced Dimension Reduction method, IDR(), is a robust and efficient short-recurrence Krylov subspace method for + * solving large nonsymmetric systems of linear equations. + * + * For indefinite systems IDR(S) outperforms both BiCGStab and BiCGStab(L). Additionally, IDR(S) can handle matrices + * with complex eigenvalues more efficiently than BiCGStab. + * + * Many problems that do not converge for BiCGSTAB converge for IDR(s) (for larger values of s). And if both methods + * converge the convergence for IDR(s) is typically much faster for difficult systems (for example indefinite problems). + * + * IDR(s) is a limited memory finite termination method. In exact arithmetic it converges in at most N+N/s iterations, + * with N the system size. It uses a fixed number of 4+3s vector. In comparison, BiCGSTAB terminates in 2N iterations + * and uses 7 vectors. GMRES terminates in at most N iterations, and uses I+3 vectors, with I the number of iterations. + * Restarting GMRES limits the memory consumption, but destroys the finite termination property. + * + * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * + * \implsparsesolverconcept + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * The tolerance corresponds to the relative residual error: |Ax-b|/|b| + * + * \b Performance: when using sparse matrices, best performance is achied for a row-major sparse matrix format. + * Moreover, in this case multi-threading can be exploited if the user code is compiled with OpenMP enabled. + * See \ref TopicMultiThreading for details. + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. + * + * IDR(s) can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. + * + * \sa class SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ + template + class IDRS : public IterativeSolverBase > + { + + public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + + private: + typedef IterativeSolverBase Base; + using Base::m_error; + using Base::m_info; + using Base::m_isInitialized; + using Base::m_iterations; + using Base::matrix; + Index m_S; + bool m_smoothing; + RealScalar m_angle; + bool m_residual; + + public: + /** Default constructor. */ + IDRS(): m_S(4), m_smoothing(false), m_angle(RealScalar(0.7)), m_residual(false) {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + + This constructor is a shortcut for the default constructor followed + by a call to compute(). + + \warning this class stores a reference to the matrix A as well as some + precomputed values that depend on it. Therefore, if \a A is changed + this class becomes invalid. Call compute() to update it with the new + matrix A, or modify a copy of A. + */ + template + explicit IDRS(const EigenBase& A) : Base(A.derived()), m_S(4), m_smoothing(false), + m_angle(RealScalar(0.7)), m_residual(false) {} + + + /** \internal */ + /** Loops over the number of columns of b and does the following: + 1. sets the tolerence and maxIterations + 2. Calls the function that has the core solver routine + */ + template + void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const + { + m_iterations = Base::maxIterations(); + m_error = Base::m_tolerance; + + bool ret = internal::idrs(matrix(), b, x, Base::m_preconditioner, m_iterations, m_error, m_S,m_smoothing,m_angle,m_residual); + + m_info = (!ret) ? NumericalIssue : m_error <= Base::m_tolerance ? Success : NoConvergence; + } + + /** Sets the parameter S, indicating the dimension of the shadow space. Default is 4*/ + void setS(Index S) + { + if (S < 1) + { + S = 4; + } + + m_S = S; + } + + /** Switches off and on smoothing. + Residual smoothing results in monotonically decreasing residual norms at + the expense of two extra vectors of storage and a few extra vector + operations. Although monotonic decrease of the residual norms is a + desirable property, the rate of convergence of the unsmoothed process and + the smoothed process is basically the same. Default is off */ + void setSmoothing(bool smoothing) + { + m_smoothing=smoothing; + } + + /** The angle must be a real scalar. In IDR(s), a value for the + iteration parameter omega must be chosen in every s+1th step. The most + natural choice is to select a value to minimize the norm of the next residual. + This corresponds to the parameter omega = 0. In practice, this may lead to + values of omega that are so small that the other iteration parameters + cannot be computed with sufficient accuracy. In such cases it is better to + increase the value of omega sufficiently such that a compromise is reached + between accurate computations and reduction of the residual norm. The + parameter angle =0.7 (”maintaining the convergence strategy”) + results in such a compromise. */ + void setAngle(RealScalar angle) + { + m_angle=angle; + } + + /** The parameter replace is a logical that determines whether a + residual replacement strategy is employed to increase the accuracy of the + solution. */ + void setResidualUpdate(bool update) + { + m_residual=update; + } + + }; + +} // namespace Eigen + +#endif /* EIGEN_IDRS_H */ diff --git a/external/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h b/external/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h new file mode 100644 index 0000000..7d08c35 --- /dev/null +++ b/external/unsupported/Eigen/src/IterativeSolvers/IncompleteLU.h @@ -0,0 +1,90 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_INCOMPLETE_LU_H +#define EIGEN_INCOMPLETE_LU_H + +namespace Eigen { + +template +class IncompleteLU : public SparseSolverBase > +{ + protected: + typedef SparseSolverBase > Base; + using Base::m_isInitialized; + + typedef _Scalar Scalar; + typedef Matrix Vector; + typedef typename Vector::Index Index; + typedef SparseMatrix FactorType; + + public: + typedef Matrix MatrixType; + + IncompleteLU() {} + + template + IncompleteLU(const MatrixType& mat) + { + compute(mat); + } + + Index rows() const { return m_lu.rows(); } + Index cols() const { return m_lu.cols(); } + + template + IncompleteLU& compute(const MatrixType& mat) + { + m_lu = mat; + int size = mat.cols(); + Vector diag(size); + for(int i=0; i + void _solve_impl(const Rhs& b, Dest& x) const + { + x = m_lu.template triangularView().solve(b); + x = m_lu.template triangularView().solve(x); + } + + protected: + FactorType m_lu; +}; + +} // end namespace Eigen + +#endif // EIGEN_INCOMPLETE_LU_H diff --git a/external/unsupported/Eigen/src/IterativeSolvers/IterationController.h b/external/unsupported/Eigen/src/IterativeSolvers/IterationController.h new file mode 100644 index 0000000..a116e09 --- /dev/null +++ b/external/unsupported/Eigen/src/IterativeSolvers/IterationController.h @@ -0,0 +1,154 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud + +/* NOTE The class IterationController has been adapted from the iteration + * class of the GMM++ and ITL libraries. + */ + +//======================================================================= +// Copyright (C) 1997-2001 +// Authors: Andrew Lumsdaine +// Lie-Quan Lee +// +// This file is part of the Iterative Template Library +// +// You should have received a copy of the License Agreement for the +// Iterative Template Library along with the software; see the +// file LICENSE. +// +// Permission to modify the code and to distribute modified code is +// granted, provided the text of this NOTICE is retained, a notice that +// the code was modified is included with the above COPYRIGHT NOTICE and +// with the COPYRIGHT NOTICE in the LICENSE file, and that the LICENSE +// file is distributed with the modified code. +// +// LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. +// By way of example, but not limitation, Licensor MAKES NO +// REPRESENTATIONS OR WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY +// PARTICULAR PURPOSE OR THAT THE USE OF THE LICENSED SOFTWARE COMPONENTS +// OR DOCUMENTATION WILL NOT INFRINGE ANY PATENTS, COPYRIGHTS, TRADEMARKS +// OR OTHER RIGHTS. +//======================================================================= + +//======================================================================== +// +// Copyright (C) 2002-2007 Yves Renard +// +// This file is a part of GETFEM++ +// +// Getfem++ is free software; you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation; version 2.1 of the License. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// You should have received a copy of the GNU Lesser General Public +// License along with this program; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, +// USA. +// +//======================================================================== + +#include "../../../../Eigen/src/Core/util/NonMPL2.h" + +#ifndef EIGEN_ITERATION_CONTROLLER_H +#define EIGEN_ITERATION_CONTROLLER_H + +namespace Eigen { + +/** \ingroup IterativeLinearSolvers_Module + * \class IterationController + * + * \brief Controls the iterations of the iterative solvers + * + * This class has been adapted from the iteration class of GMM++ and ITL libraries. + * + */ +class IterationController +{ + protected : + double m_rhsn; ///< Right hand side norm + size_t m_maxiter; ///< Max. number of iterations + int m_noise; ///< if noise > 0 iterations are printed + double m_resmax; ///< maximum residual + double m_resminreach, m_resadd; + size_t m_nit; ///< iteration number + double m_res; ///< last computed residual + bool m_written; + void (*m_callback)(const IterationController&); + public : + + void init() + { + m_nit = 0; m_res = 0.0; m_written = false; + m_resminreach = 1E50; m_resadd = 0.0; + m_callback = 0; + } + + IterationController(double r = 1.0E-8, int noi = 0, size_t mit = size_t(-1)) + : m_rhsn(1.0), m_maxiter(mit), m_noise(noi), m_resmax(r) { init(); } + + void operator ++(int) { m_nit++; m_written = false; m_resadd += m_res; } + void operator ++() { (*this)++; } + + bool first() { return m_nit == 0; } + + /* get/set the "noisyness" (verbosity) of the solvers */ + int noiseLevel() const { return m_noise; } + void setNoiseLevel(int n) { m_noise = n; } + void reduceNoiseLevel() { if (m_noise > 0) m_noise--; } + + double maxResidual() const { return m_resmax; } + void setMaxResidual(double r) { m_resmax = r; } + + double residual() const { return m_res; } + + /* change the user-definable callback, called after each iteration */ + void setCallback(void (*t)(const IterationController&)) + { + m_callback = t; + } + + size_t iteration() const { return m_nit; } + void setIteration(size_t i) { m_nit = i; } + + size_t maxIterarions() const { return m_maxiter; } + void setMaxIterations(size_t i) { m_maxiter = i; } + + double rhsNorm() const { return m_rhsn; } + void setRhsNorm(double r) { m_rhsn = r; } + + bool converged() const { return m_res <= m_rhsn * m_resmax; } + bool converged(double nr) + { + using std::abs; + m_res = abs(nr); + m_resminreach = (std::min)(m_resminreach, m_res); + return converged(); + } + template bool converged(const VectorType &v) + { return converged(v.squaredNorm()); } + + bool finished(double nr) + { + if (m_callback) m_callback(*this); + if (m_noise > 0 && !m_written) + { + converged(nr); + m_written = true; + } + return (m_nit >= m_maxiter || converged(nr)); + } + template + bool finished(const MatrixBase &v) + { return finished(double(v.squaredNorm())); } + +}; + +} // end namespace Eigen + +#endif // EIGEN_ITERATION_CONTROLLER_H diff --git a/external/unsupported/Eigen/src/IterativeSolvers/MINRES.h b/external/unsupported/Eigen/src/IterativeSolvers/MINRES.h new file mode 100644 index 0000000..5db454d --- /dev/null +++ b/external/unsupported/Eigen/src/IterativeSolvers/MINRES.h @@ -0,0 +1,267 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Giacomo Po +// Copyright (C) 2011-2014 Gael Guennebaud +// Copyright (C) 2018 David Hyde +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_MINRES_H_ +#define EIGEN_MINRES_H_ + + +namespace Eigen { + + namespace internal { + + /** \internal Low-level MINRES algorithm + * \param mat The matrix A + * \param rhs The right hand side vector b + * \param x On input and initial solution, on output the computed solution. + * \param precond A right preconditioner being able to efficiently solve for an + * approximation of Ax=b (regardless of b) + * \param iters On input the max number of iteration, on output the number of performed iterations. + * \param tol_error On input the tolerance error, on output an estimation of the relative error. + */ + template + EIGEN_DONT_INLINE + void minres(const MatrixType& mat, const Rhs& rhs, Dest& x, + const Preconditioner& precond, Index& iters, + typename Dest::RealScalar& tol_error) + { + using std::sqrt; + typedef typename Dest::RealScalar RealScalar; + typedef typename Dest::Scalar Scalar; + typedef Matrix VectorType; + + // Check for zero rhs + const RealScalar rhsNorm2(rhs.squaredNorm()); + if(rhsNorm2 == 0) + { + x.setZero(); + iters = 0; + tol_error = 0; + return; + } + + // initialize + const Index maxIters(iters); // initialize maxIters to iters + const Index N(mat.cols()); // the size of the matrix + const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2) + + // Initialize preconditioned Lanczos + VectorType v_old(N); // will be initialized inside loop + VectorType v( VectorType::Zero(N) ); //initialize v + VectorType v_new(rhs-mat*x); //initialize v_new + RealScalar residualNorm2(v_new.squaredNorm()); + VectorType w(N); // will be initialized inside loop + VectorType w_new(precond.solve(v_new)); // initialize w_new +// RealScalar beta; // will be initialized inside loop + RealScalar beta_new2(v_new.dot(w_new)); + eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); + RealScalar beta_new(sqrt(beta_new2)); + const RealScalar beta_one(beta_new); + // Initialize other variables + RealScalar c(1.0); // the cosine of the Givens rotation + RealScalar c_old(1.0); + RealScalar s(0.0); // the sine of the Givens rotation + RealScalar s_old(0.0); // the sine of the Givens rotation + VectorType p_oold(N); // will be initialized in loop + VectorType p_old(VectorType::Zero(N)); // initialize p_old=0 + VectorType p(p_old); // initialize p=0 + RealScalar eta(1.0); + + iters = 0; // reset iters + while ( iters < maxIters ) + { + // Preconditioned Lanczos + /* Note that there are 4 variants on the Lanczos algorithm. These are + * described in Paige, C. C. (1972). Computational variants of + * the Lanczos method for the eigenproblem. IMA Journal of Applied + * Mathematics, 10(3), 373-381. The current implementation corresponds + * to the case A(2,7) in the paper. It also corresponds to + * algorithm 6.14 in Y. Saad, Iterative Methods for Sparse Linear + * Systems, 2003 p.173. For the preconditioned version see + * A. Greenbaum, Iterative Methods for Solving Linear Systems, SIAM (1987). + */ + const RealScalar beta(beta_new); + v_old = v; // update: at first time step, this makes v_old = 0 so value of beta doesn't matter + v_new /= beta_new; // overwrite v_new for next iteration + w_new /= beta_new; // overwrite w_new for next iteration + v = v_new; // update + w = w_new; // update + v_new.noalias() = mat*w - beta*v_old; // compute v_new + const RealScalar alpha = v_new.dot(w); + v_new -= alpha*v; // overwrite v_new + w_new = precond.solve(v_new); // overwrite w_new + beta_new2 = v_new.dot(w_new); // compute beta_new + eigen_assert(beta_new2 >= 0.0 && "PRECONDITIONER IS NOT POSITIVE DEFINITE"); + beta_new = sqrt(beta_new2); // compute beta_new + + // Givens rotation + const RealScalar r2 =s*alpha+c*c_old*beta; // s, s_old, c and c_old are still from previous iteration + const RealScalar r3 =s_old*beta; // s, s_old, c and c_old are still from previous iteration + const RealScalar r1_hat=c*alpha-c_old*s*beta; + const RealScalar r1 =sqrt( std::pow(r1_hat,2) + std::pow(beta_new,2) ); + c_old = c; // store for next iteration + s_old = s; // store for next iteration + c=r1_hat/r1; // new cosine + s=beta_new/r1; // new sine + + // Update solution + p_oold = p_old; + p_old = p; + p.noalias()=(w-r2*p_old-r3*p_oold) /r1; // IS NOALIAS REQUIRED? + x += beta_one*c*eta*p; + + /* Update the squared residual. Note that this is the estimated residual. + The real residual |Ax-b|^2 may be slightly larger */ + residualNorm2 *= s*s; + + if ( residualNorm2 < threshold2) + { + break; + } + + eta=-s*eta; // update eta + iters++; // increment iteration number (for output purposes) + } + + /* Compute error. Note that this is the estimated error. The real + error |Ax-b|/|b| may be slightly larger */ + tol_error = std::sqrt(residualNorm2 / rhsNorm2); + } + + } + + template< typename _MatrixType, int _UpLo=Lower, + typename _Preconditioner = IdentityPreconditioner> + class MINRES; + + namespace internal { + + template< typename _MatrixType, int _UpLo, typename _Preconditioner> + struct traits > + { + typedef _MatrixType MatrixType; + typedef _Preconditioner Preconditioner; + }; + + } + + /** \ingroup IterativeLinearSolvers_Module + * \brief A minimal residual solver for sparse symmetric problems + * + * This class allows to solve for A.x = b sparse linear problems using the MINRES algorithm + * of Paige and Saunders (1975). The sparse matrix A must be symmetric (possibly indefinite). + * The vectors x and b can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, can be a dense or a sparse matrix. + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower, + * Upper, or Lower|Upper in which the full matrix entries will be considered. Default is Lower. + * \tparam _Preconditioner the type of the preconditioner. Default is DiagonalPreconditioner + * + * The maximal number of iterations and tolerance value can be controlled via the setMaxIterations() + * and setTolerance() methods. The defaults are the size of the problem for the maximal number of iterations + * and NumTraits::epsilon() for the tolerance. + * + * This class can be used as the direct solver classes. Here is a typical usage example: + * \code + * int n = 10000; + * VectorXd x(n), b(n); + * SparseMatrix A(n,n); + * // fill A and b + * MINRES > mr; + * mr.compute(A); + * x = mr.solve(b); + * std::cout << "#iterations: " << mr.iterations() << std::endl; + * std::cout << "estimated error: " << mr.error() << std::endl; + * // update b, and solve again + * x = mr.solve(b); + * \endcode + * + * By default the iterations start with x=0 as an initial guess of the solution. + * One can control the start using the solveWithGuess() method. + * + * MINRES can also be used in a matrix-free context, see the following \link MatrixfreeSolverExample example \endlink. + * + * \sa class ConjugateGradient, BiCGSTAB, SimplicialCholesky, DiagonalPreconditioner, IdentityPreconditioner + */ + template< typename _MatrixType, int _UpLo, typename _Preconditioner> + class MINRES : public IterativeSolverBase > + { + + typedef IterativeSolverBase Base; + using Base::matrix; + using Base::m_error; + using Base::m_iterations; + using Base::m_info; + using Base::m_isInitialized; + public: + using Base::_solve_impl; + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef _Preconditioner Preconditioner; + + enum {UpLo = _UpLo}; + + public: + + /** Default constructor. */ + MINRES() : Base() {} + + /** Initialize the solver with matrix \a A for further \c Ax=b solving. + * + * This constructor is a shortcut for the default constructor followed + * by a call to compute(). + * + * \warning this class stores a reference to the matrix A as well as some + * precomputed values that depend on it. Therefore, if \a A is changed + * this class becomes invalid. Call compute() to update it with the new + * matrix A, or modify a copy of A. + */ + template + explicit MINRES(const EigenBase& A) : Base(A.derived()) {} + + /** Destructor. */ + ~MINRES(){} + + /** \internal */ + template + void _solve_vector_with_guess_impl(const Rhs& b, Dest& x) const + { + typedef typename Base::MatrixWrapper MatrixWrapper; + typedef typename Base::ActualMatrixType ActualMatrixType; + enum { + TransposeInput = (!MatrixWrapper::MatrixFree) + && (UpLo==(Lower|Upper)) + && (!MatrixType::IsRowMajor) + && (!NumTraits::IsComplex) + }; + typedef typename internal::conditional, ActualMatrixType const&>::type RowMajorWrapper; + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(MatrixWrapper::MatrixFree,UpLo==(Lower|Upper)),MATRIX_FREE_CONJUGATE_GRADIENT_IS_COMPATIBLE_WITH_UPPER_UNION_LOWER_MODE_ONLY); + typedef typename internal::conditional::Type + >::type SelfAdjointWrapper; + + m_iterations = Base::maxIterations(); + m_error = Base::m_tolerance; + RowMajorWrapper row_mat(matrix()); + internal::minres(SelfAdjointWrapper(row_mat), b, x, + Base::m_preconditioner, m_iterations, m_error); + m_info = m_error <= Base::m_tolerance ? Success : NoConvergence; + } + + protected: + + }; + +} // end namespace Eigen + +#endif // EIGEN_MINRES_H diff --git a/external/unsupported/Eigen/src/IterativeSolvers/Scaling.h b/external/unsupported/Eigen/src/IterativeSolvers/Scaling.h new file mode 100644 index 0000000..9b3eb53 --- /dev/null +++ b/external/unsupported/Eigen/src/IterativeSolvers/Scaling.h @@ -0,0 +1,193 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Desire NUENTSA WAKAM A; + * // fill A and b; + * IterScaling > scal; + * // Compute the left and right scaling vectors. The matrix is equilibrated at output + * scal.computeRef(A); + * // Scale the right hand side + * b = scal.LeftScaling().cwiseProduct(b); + * // Now, solve the equilibrated linear system with any available solver + * + * // Scale back the computed solution + * x = scal.RightScaling().cwiseProduct(x); + * \endcode + * + * \tparam _MatrixType the type of the matrix. It should be a real square sparsematrix + * + * References : D. Ruiz and B. Ucar, A Symmetry Preserving Algorithm for Matrix Scaling, INRIA Research report RR-7552 + * + * \sa \ref IncompleteLUT + */ +template +class IterScaling +{ + public: + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + + public: + IterScaling() { init(); } + + IterScaling(const MatrixType& matrix) + { + init(); + compute(matrix); + } + + ~IterScaling() { } + + /** + * Compute the left and right diagonal matrices to scale the input matrix @p mat + * + * FIXME This algorithm will be modified such that the diagonal elements are permuted on the diagonal. + * + * \sa LeftScaling() RightScaling() + */ + void compute (const MatrixType& mat) + { + using std::abs; + int m = mat.rows(); + int n = mat.cols(); + eigen_assert((m>0 && m == n) && "Please give a non - empty matrix"); + m_left.resize(m); + m_right.resize(n); + m_left.setOnes(); + m_right.setOnes(); + m_matrix = mat; + VectorXd Dr, Dc, DrRes, DcRes; // Temporary Left and right scaling vectors + Dr.resize(m); Dc.resize(n); + DrRes.resize(m); DcRes.resize(n); + double EpsRow = 1.0, EpsCol = 1.0; + int its = 0; + do + { // Iterate until the infinite norm of each row and column is approximately 1 + // Get the maximum value in each row and column + Dr.setZero(); Dc.setZero(); + for (int k=0; km_tol || EpsCol > m_tol) && (its < m_maxits) ); + m_isInitialized = true; + } + /** Compute the left and right vectors to scale the vectors + * the input matrix is scaled with the computed vectors at output + * + * \sa compute() + */ + void computeRef (MatrixType& mat) + { + compute (mat); + mat = m_matrix; + } + /** Get the vector to scale the rows of the matrix + */ + VectorXd& LeftScaling() + { + return m_left; + } + + /** Get the vector to scale the columns of the matrix + */ + VectorXd& RightScaling() + { + return m_right; + } + + /** Set the tolerance for the convergence of the iterative scaling algorithm + */ + void setTolerance(double tol) + { + m_tol = tol; + } + + protected: + + void init() + { + m_tol = 1e-10; + m_maxits = 5; + m_isInitialized = false; + } + + MatrixType m_matrix; + mutable ComputationInfo m_info; + bool m_isInitialized; + VectorXd m_left; // Left scaling vector + VectorXd m_right; // m_right scaling vector + double m_tol; + int m_maxits; // Maximum number of iterations allowed +}; +} +#endif diff --git a/external/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h b/external/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h new file mode 100644 index 0000000..6a9b0be --- /dev/null +++ b/external/unsupported/Eigen/src/KroneckerProduct/KroneckerTensorProduct.h @@ -0,0 +1,305 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Kolja Brix +// Copyright (C) 2011 Andreas Platen +// Copyright (C) 2012 Chen-Pang He +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef KRONECKER_TENSOR_PRODUCT_H +#define KRONECKER_TENSOR_PRODUCT_H + +namespace Eigen { + +/*! + * \ingroup KroneckerProduct_Module + * + * \brief The base class of dense and sparse Kronecker product. + * + * \tparam Derived is the derived type. + */ +template +class KroneckerProductBase : public ReturnByValue +{ + private: + typedef typename internal::traits Traits; + typedef typename Traits::Scalar Scalar; + + protected: + typedef typename Traits::Lhs Lhs; + typedef typename Traits::Rhs Rhs; + + public: + /*! \brief Constructor. */ + KroneckerProductBase(const Lhs& A, const Rhs& B) + : m_A(A), m_B(B) + {} + + inline Index rows() const { return m_A.rows() * m_B.rows(); } + inline Index cols() const { return m_A.cols() * m_B.cols(); } + + /*! + * This overrides ReturnByValue::coeff because this function is + * efficient enough. + */ + Scalar coeff(Index row, Index col) const + { + return m_A.coeff(row / m_B.rows(), col / m_B.cols()) * + m_B.coeff(row % m_B.rows(), col % m_B.cols()); + } + + /*! + * This overrides ReturnByValue::coeff because this function is + * efficient enough. + */ + Scalar coeff(Index i) const + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived); + return m_A.coeff(i / m_A.size()) * m_B.coeff(i % m_A.size()); + } + + protected: + typename Lhs::Nested m_A; + typename Rhs::Nested m_B; +}; + +/*! + * \ingroup KroneckerProduct_Module + * + * \brief Kronecker tensor product helper class for dense matrices + * + * This class is the return value of kroneckerProduct(MatrixBase, + * MatrixBase). Use the function rather than construct this class + * directly to avoid specifying template prarameters. + * + * \tparam Lhs Type of the left-hand side, a matrix expression. + * \tparam Rhs Type of the rignt-hand side, a matrix expression. + */ +template +class KroneckerProduct : public KroneckerProductBase > +{ + private: + typedef KroneckerProductBase Base; + using Base::m_A; + using Base::m_B; + + public: + /*! \brief Constructor. */ + KroneckerProduct(const Lhs& A, const Rhs& B) + : Base(A, B) + {} + + /*! \brief Evaluate the Kronecker tensor product. */ + template void evalTo(Dest& dst) const; +}; + +/*! + * \ingroup KroneckerProduct_Module + * + * \brief Kronecker tensor product helper class for sparse matrices + * + * If at least one of the operands is a sparse matrix expression, + * then this class is returned and evaluates into a sparse matrix. + * + * This class is the return value of kroneckerProduct(EigenBase, + * EigenBase). Use the function rather than construct this class + * directly to avoid specifying template prarameters. + * + * \tparam Lhs Type of the left-hand side, a matrix expression. + * \tparam Rhs Type of the rignt-hand side, a matrix expression. + */ +template +class KroneckerProductSparse : public KroneckerProductBase > +{ + private: + typedef KroneckerProductBase Base; + using Base::m_A; + using Base::m_B; + + public: + /*! \brief Constructor. */ + KroneckerProductSparse(const Lhs& A, const Rhs& B) + : Base(A, B) + {} + + /*! \brief Evaluate the Kronecker tensor product. */ + template void evalTo(Dest& dst) const; +}; + +template +template +void KroneckerProduct::evalTo(Dest& dst) const +{ + const int BlockRows = Rhs::RowsAtCompileTime, + BlockCols = Rhs::ColsAtCompileTime; + const Index Br = m_B.rows(), + Bc = m_B.cols(); + for (Index i=0; i < m_A.rows(); ++i) + for (Index j=0; j < m_A.cols(); ++j) + Block(dst,i*Br,j*Bc,Br,Bc) = m_A.coeff(i,j) * m_B; +} + +template +template +void KroneckerProductSparse::evalTo(Dest& dst) const +{ + Index Br = m_B.rows(), Bc = m_B.cols(); + dst.resize(this->rows(), this->cols()); + dst.resizeNonZeros(0); + + // 1 - evaluate the operands if needed: + typedef typename internal::nested_eval::type Lhs1; + typedef typename internal::remove_all::type Lhs1Cleaned; + const Lhs1 lhs1(m_A); + typedef typename internal::nested_eval::type Rhs1; + typedef typename internal::remove_all::type Rhs1Cleaned; + const Rhs1 rhs1(m_B); + + // 2 - construct respective iterators + typedef Eigen::InnerIterator LhsInnerIterator; + typedef Eigen::InnerIterator RhsInnerIterator; + + // compute number of non-zeros per innervectors of dst + { + // TODO VectorXi is not necessarily big enough! + VectorXi nnzA = VectorXi::Zero(Dest::IsRowMajor ? m_A.rows() : m_A.cols()); + for (Index kA=0; kA < m_A.outerSize(); ++kA) + for (LhsInnerIterator itA(lhs1,kA); itA; ++itA) + nnzA(Dest::IsRowMajor ? itA.row() : itA.col())++; + + VectorXi nnzB = VectorXi::Zero(Dest::IsRowMajor ? m_B.rows() : m_B.cols()); + for (Index kB=0; kB < m_B.outerSize(); ++kB) + for (RhsInnerIterator itB(rhs1,kB); itB; ++itB) + nnzB(Dest::IsRowMajor ? itB.row() : itB.col())++; + + Matrix nnzAB = nnzB * nnzA.transpose(); + dst.reserve(VectorXi::Map(nnzAB.data(), nnzAB.size())); + } + + for (Index kA=0; kA < m_A.outerSize(); ++kA) + { + for (Index kB=0; kB < m_B.outerSize(); ++kB) + { + for (LhsInnerIterator itA(lhs1,kA); itA; ++itA) + { + for (RhsInnerIterator itB(rhs1,kB); itB; ++itB) + { + Index i = itA.row() * Br + itB.row(), + j = itA.col() * Bc + itB.col(); + dst.insert(i,j) = itA.value() * itB.value(); + } + } + } + } +} + +namespace internal { + +template +struct traits > +{ + typedef typename remove_all<_Lhs>::type Lhs; + typedef typename remove_all<_Rhs>::type Rhs; + typedef typename ScalarBinaryOpTraits::ReturnType Scalar; + typedef typename promote_index_type::type StorageIndex; + + enum { + Rows = size_at_compile_time::RowsAtCompileTime, traits::RowsAtCompileTime>::ret, + Cols = size_at_compile_time::ColsAtCompileTime, traits::ColsAtCompileTime>::ret, + MaxRows = size_at_compile_time::MaxRowsAtCompileTime, traits::MaxRowsAtCompileTime>::ret, + MaxCols = size_at_compile_time::MaxColsAtCompileTime, traits::MaxColsAtCompileTime>::ret + }; + + typedef Matrix ReturnType; +}; + +template +struct traits > +{ + typedef MatrixXpr XprKind; + typedef typename remove_all<_Lhs>::type Lhs; + typedef typename remove_all<_Rhs>::type Rhs; + typedef typename ScalarBinaryOpTraits::ReturnType Scalar; + typedef typename cwise_promote_storage_type::StorageKind, typename traits::StorageKind, scalar_product_op >::ret StorageKind; + typedef typename promote_index_type::type StorageIndex; + + enum { + LhsFlags = Lhs::Flags, + RhsFlags = Rhs::Flags, + + RowsAtCompileTime = size_at_compile_time::RowsAtCompileTime, traits::RowsAtCompileTime>::ret, + ColsAtCompileTime = size_at_compile_time::ColsAtCompileTime, traits::ColsAtCompileTime>::ret, + MaxRowsAtCompileTime = size_at_compile_time::MaxRowsAtCompileTime, traits::MaxRowsAtCompileTime>::ret, + MaxColsAtCompileTime = size_at_compile_time::MaxColsAtCompileTime, traits::MaxColsAtCompileTime>::ret, + + EvalToRowMajor = (int(LhsFlags) & int(RhsFlags) & RowMajorBit), + RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit), + + Flags = ((int(LhsFlags) | int(RhsFlags)) & HereditaryBits & RemovedBits) + | EvalBeforeNestingBit, + CoeffReadCost = HugeCost + }; + + typedef SparseMatrix ReturnType; +}; + +} // end namespace internal + +/*! + * \ingroup KroneckerProduct_Module + * + * Computes Kronecker tensor product of two dense matrices + * + * \warning If you want to replace a matrix by its Kronecker product + * with some matrix, do \b NOT do this: + * \code + * A = kroneckerProduct(A,B); // bug!!! caused by aliasing effect + * \endcode + * instead, use eval() to work around this: + * \code + * A = kroneckerProduct(A,B).eval(); + * \endcode + * + * \param a Dense matrix a + * \param b Dense matrix b + * \return Kronecker tensor product of a and b + */ +template +KroneckerProduct kroneckerProduct(const MatrixBase& a, const MatrixBase& b) +{ + return KroneckerProduct(a.derived(), b.derived()); +} + +/*! + * \ingroup KroneckerProduct_Module + * + * Computes Kronecker tensor product of two matrices, at least one of + * which is sparse + * + * \warning If you want to replace a matrix by its Kronecker product + * with some matrix, do \b NOT do this: + * \code + * A = kroneckerProduct(A,B); // bug!!! caused by aliasing effect + * \endcode + * instead, use eval() to work around this: + * \code + * A = kroneckerProduct(A,B).eval(); + * \endcode + * + * \param a Dense/sparse matrix a + * \param b Dense/sparse matrix b + * \return Kronecker tensor product of a and b, stored in a sparse + * matrix + */ +template +KroneckerProductSparse kroneckerProduct(const EigenBase& a, const EigenBase& b) +{ + return KroneckerProductSparse(a.derived(), b.derived()); +} + +} // end namespace Eigen + +#endif // KRONECKER_TENSOR_PRODUCT_H diff --git a/external/unsupported/Eigen/src/LevenbergMarquardt/CopyrightMINPACK.txt b/external/unsupported/Eigen/src/LevenbergMarquardt/CopyrightMINPACK.txt new file mode 100644 index 0000000..ae7984d --- /dev/null +++ b/external/unsupported/Eigen/src/LevenbergMarquardt/CopyrightMINPACK.txt @@ -0,0 +1,52 @@ +Minpack Copyright Notice (1999) University of Chicago. All rights reserved + +Redistribution and use in source and binary forms, with or +without modification, are permitted provided that the +following conditions are met: + +1. Redistributions of source code must retain the above +copyright notice, this list of conditions and the following +disclaimer. + +2. Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following +disclaimer in the documentation and/or other materials +provided with the distribution. + +3. The end-user documentation included with the +redistribution, if any, must include the following +acknowledgment: + + "This product includes software developed by the + University of Chicago, as Operator of Argonne National + Laboratory. + +Alternately, this acknowledgment may appear in the software +itself, if and wherever such third-party acknowledgments +normally appear. + +4. WARRANTY DISCLAIMER. THE SOFTWARE IS SUPPLIED "AS IS" +WITHOUT WARRANTY OF ANY KIND. THE COPYRIGHT HOLDER, THE +UNITED STATES, THE UNITED STATES DEPARTMENT OF ENERGY, AND +THEIR EMPLOYEES: (1) DISCLAIM ANY WARRANTIES, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO ANY IMPLIED WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE +OR NON-INFRINGEMENT, (2) DO NOT ASSUME ANY LEGAL LIABILITY +OR RESPONSIBILITY FOR THE ACCURACY, COMPLETENESS, OR +USEFULNESS OF THE SOFTWARE, (3) DO NOT REPRESENT THAT USE OF +THE SOFTWARE WOULD NOT INFRINGE PRIVATELY OWNED RIGHTS, (4) +DO NOT WARRANT THAT THE SOFTWARE WILL FUNCTION +UNINTERRUPTED, THAT IT IS ERROR-FREE OR THAT ANY ERRORS WILL +BE CORRECTED. + +5. LIMITATION OF LIABILITY. IN NO EVENT WILL THE COPYRIGHT +HOLDER, THE UNITED STATES, THE UNITED STATES DEPARTMENT OF +ENERGY, OR THEIR EMPLOYEES: BE LIABLE FOR ANY INDIRECT, +INCIDENTAL, CONSEQUENTIAL, SPECIAL OR PUNITIVE DAMAGES OF +ANY KIND OR NATURE, INCLUDING BUT NOT LIMITED TO LOSS OF +PROFITS OR LOSS OF DATA, FOR ANY REASON WHATSOEVER, WHETHER +SUCH LIABILITY IS ASSERTED ON THE BASIS OF CONTRACT, TORT +(INCLUDING NEGLIGENCE OR STRICT LIABILITY), OR OTHERWISE, +EVEN IF ANY OF SAID PARTIES HAS BEEN WARNED OF THE +POSSIBILITY OF SUCH LOSS OR DAMAGES. + diff --git a/external/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h b/external/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h new file mode 100644 index 0000000..b75bea2 --- /dev/null +++ b/external/unsupported/Eigen/src/LevenbergMarquardt/LMcovar.h @@ -0,0 +1,84 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This code initially comes from MINPACK whose original authors are: +// Copyright Jorge More - Argonne National Laboratory +// Copyright Burt Garbow - Argonne National Laboratory +// Copyright Ken Hillstrom - Argonne National Laboratory +// +// This Source Code Form is subject to the terms of the Minpack license +// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. + +#ifndef EIGEN_LMCOVAR_H +#define EIGEN_LMCOVAR_H + +namespace Eigen { + +namespace internal { + +template +void covar( + Matrix< Scalar, Dynamic, Dynamic > &r, + const VectorXi& ipvt, + Scalar tol = std::sqrt(NumTraits::epsilon()) ) +{ + using std::abs; + /* Local variables */ + Index i, j, k, l, ii, jj; + bool sing; + Scalar temp; + + /* Function Body */ + const Index n = r.cols(); + const Scalar tolr = tol * abs(r(0,0)); + Matrix< Scalar, Dynamic, 1 > wa(n); + eigen_assert(ipvt.size()==n); + + /* form the inverse of r in the full upper triangle of r. */ + l = -1; + for (k = 0; k < n; ++k) + if (abs(r(k,k)) > tolr) { + r(k,k) = 1. / r(k,k); + for (j = 0; j <= k-1; ++j) { + temp = r(k,k) * r(j,k); + r(j,k) = 0.; + r.col(k).head(j+1) -= r.col(j).head(j+1) * temp; + } + l = k; + } + + /* form the full upper triangle of the inverse of (r transpose)*r */ + /* in the full upper triangle of r. */ + for (k = 0; k <= l; ++k) { + for (j = 0; j <= k-1; ++j) + r.col(j).head(j+1) += r.col(k).head(j+1) * r(j,k); + r.col(k).head(k+1) *= r(k,k); + } + + /* form the full lower triangle of the covariance matrix */ + /* in the strict lower triangle of r and in wa. */ + for (j = 0; j < n; ++j) { + jj = ipvt[j]; + sing = j > l; + for (i = 0; i <= j; ++i) { + if (sing) + r(i,j) = 0.; + ii = ipvt[i]; + if (ii > jj) + r(ii,jj) = r(i,j); + if (ii < jj) + r(jj,ii) = r(i,j); + } + wa[jj] = r(j,j); + } + + /* symmetrize the covariance matrix in r. */ + r.topLeftCorner(n,n).template triangularView() = r.topLeftCorner(n,n).transpose(); + r.diagonal() = wa; +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_LMCOVAR_H diff --git a/external/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h b/external/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h new file mode 100644 index 0000000..25b32ec --- /dev/null +++ b/external/unsupported/Eigen/src/LevenbergMarquardt/LMonestep.h @@ -0,0 +1,202 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli +// +// This code initially comes from MINPACK whose original authors are: +// Copyright Jorge More - Argonne National Laboratory +// Copyright Burt Garbow - Argonne National Laboratory +// Copyright Ken Hillstrom - Argonne National Laboratory +// +// This Source Code Form is subject to the terms of the Minpack license +// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. + +#ifndef EIGEN_LMONESTEP_H +#define EIGEN_LMONESTEP_H + +namespace Eigen { + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::minimizeOneStep(FVectorType &x) +{ + using std::abs; + using std::sqrt; + RealScalar temp, temp1,temp2; + RealScalar ratio; + RealScalar pnorm, xnorm, fnorm1, actred, dirder, prered; + eigen_assert(x.size()==n); // check the caller is not cheating us + + temp = 0.0; xnorm = 0.0; + /* calculate the jacobian matrix. */ + Index df_ret = m_functor.df(x, m_fjac); + if (df_ret<0) + return LevenbergMarquardtSpace::UserAsked; + if (df_ret>0) + // numerical diff, we evaluated the function df_ret times + m_nfev += df_ret; + else m_njev++; + + /* compute the qr factorization of the jacobian. */ + for (int j = 0; j < x.size(); ++j) + m_wa2(j) = m_fjac.col(j).blueNorm(); + QRSolver qrfac(m_fjac); + if(qrfac.info() != Success) { + m_info = NumericalIssue; + return LevenbergMarquardtSpace::ImproperInputParameters; + } + // Make a copy of the first factor with the associated permutation + m_rfactor = qrfac.matrixR(); + m_permutation = (qrfac.colsPermutation()); + + /* on the first iteration and if external scaling is not used, scale according */ + /* to the norms of the columns of the initial jacobian. */ + if (m_iter == 1) { + if (!m_useExternalScaling) + for (Index j = 0; j < n; ++j) + m_diag[j] = (m_wa2[j]==0.)? 1. : m_wa2[j]; + + /* on the first iteration, calculate the norm of the scaled x */ + /* and initialize the step bound m_delta. */ + xnorm = m_diag.cwiseProduct(x).stableNorm(); + m_delta = m_factor * xnorm; + if (m_delta == 0.) + m_delta = m_factor; + } + + /* form (q transpose)*m_fvec and store the first n components in */ + /* m_qtf. */ + m_wa4 = m_fvec; + m_wa4 = qrfac.matrixQ().adjoint() * m_fvec; + m_qtf = m_wa4.head(n); + + /* compute the norm of the scaled gradient. */ + m_gnorm = 0.; + if (m_fnorm != 0.) + for (Index j = 0; j < n; ++j) + if (m_wa2[m_permutation.indices()[j]] != 0.) + m_gnorm = (std::max)(m_gnorm, abs( m_rfactor.col(j).head(j+1).dot(m_qtf.head(j+1)/m_fnorm) / m_wa2[m_permutation.indices()[j]])); + + /* test for convergence of the gradient norm. */ + if (m_gnorm <= m_gtol) { + m_info = Success; + return LevenbergMarquardtSpace::CosinusTooSmall; + } + + /* rescale if necessary. */ + if (!m_useExternalScaling) + m_diag = m_diag.cwiseMax(m_wa2); + + do { + /* determine the levenberg-marquardt parameter. */ + internal::lmpar2(qrfac, m_diag, m_qtf, m_delta, m_par, m_wa1); + + /* store the direction p and x + p. calculate the norm of p. */ + m_wa1 = -m_wa1; + m_wa2 = x + m_wa1; + pnorm = m_diag.cwiseProduct(m_wa1).stableNorm(); + + /* on the first iteration, adjust the initial step bound. */ + if (m_iter == 1) + m_delta = (std::min)(m_delta,pnorm); + + /* evaluate the function at x + p and calculate its norm. */ + if ( m_functor(m_wa2, m_wa4) < 0) + return LevenbergMarquardtSpace::UserAsked; + ++m_nfev; + fnorm1 = m_wa4.stableNorm(); + + /* compute the scaled actual reduction. */ + actred = -1.; + if (Scalar(.1) * fnorm1 < m_fnorm) + actred = 1. - numext::abs2(fnorm1 / m_fnorm); + + /* compute the scaled predicted reduction and */ + /* the scaled directional derivative. */ + m_wa3 = m_rfactor.template triangularView() * (m_permutation.inverse() *m_wa1); + temp1 = numext::abs2(m_wa3.stableNorm() / m_fnorm); + temp2 = numext::abs2(sqrt(m_par) * pnorm / m_fnorm); + prered = temp1 + temp2 / Scalar(.5); + dirder = -(temp1 + temp2); + + /* compute the ratio of the actual to the predicted */ + /* reduction. */ + ratio = 0.; + if (prered != 0.) + ratio = actred / prered; + + /* update the step bound. */ + if (ratio <= Scalar(.25)) { + if (actred >= 0.) + temp = RealScalar(.5); + if (actred < 0.) + temp = RealScalar(.5) * dirder / (dirder + RealScalar(.5) * actred); + if (RealScalar(.1) * fnorm1 >= m_fnorm || temp < RealScalar(.1)) + temp = Scalar(.1); + /* Computing MIN */ + m_delta = temp * (std::min)(m_delta, pnorm / RealScalar(.1)); + m_par /= temp; + } else if (!(m_par != 0. && ratio < RealScalar(.75))) { + m_delta = pnorm / RealScalar(.5); + m_par = RealScalar(.5) * m_par; + } + + /* test for successful iteration. */ + if (ratio >= RealScalar(1e-4)) { + /* successful iteration. update x, m_fvec, and their norms. */ + x = m_wa2; + m_wa2 = m_diag.cwiseProduct(x); + m_fvec = m_wa4; + xnorm = m_wa2.stableNorm(); + m_fnorm = fnorm1; + ++m_iter; + } + + /* tests for convergence. */ + if (abs(actred) <= m_ftol && prered <= m_ftol && Scalar(.5) * ratio <= 1. && m_delta <= m_xtol * xnorm) + { + m_info = Success; + return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall; + } + if (abs(actred) <= m_ftol && prered <= m_ftol && Scalar(.5) * ratio <= 1.) + { + m_info = Success; + return LevenbergMarquardtSpace::RelativeReductionTooSmall; + } + if (m_delta <= m_xtol * xnorm) + { + m_info = Success; + return LevenbergMarquardtSpace::RelativeErrorTooSmall; + } + + /* tests for termination and stringent tolerances. */ + if (m_nfev >= m_maxfev) + { + m_info = NoConvergence; + return LevenbergMarquardtSpace::TooManyFunctionEvaluation; + } + if (abs(actred) <= NumTraits::epsilon() && prered <= NumTraits::epsilon() && Scalar(.5) * ratio <= 1.) + { + m_info = Success; + return LevenbergMarquardtSpace::FtolTooSmall; + } + if (m_delta <= NumTraits::epsilon() * xnorm) + { + m_info = Success; + return LevenbergMarquardtSpace::XtolTooSmall; + } + if (m_gnorm <= NumTraits::epsilon()) + { + m_info = Success; + return LevenbergMarquardtSpace::GtolTooSmall; + } + + } while (ratio < Scalar(1e-4)); + + return LevenbergMarquardtSpace::Running; +} + + +} // end namespace Eigen + +#endif // EIGEN_LMONESTEP_H diff --git a/external/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h b/external/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h new file mode 100644 index 0000000..9a48365 --- /dev/null +++ b/external/unsupported/Eigen/src/LevenbergMarquardt/LMpar.h @@ -0,0 +1,160 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This code initially comes from MINPACK whose original authors are: +// Copyright Jorge More - Argonne National Laboratory +// Copyright Burt Garbow - Argonne National Laboratory +// Copyright Ken Hillstrom - Argonne National Laboratory +// +// This Source Code Form is subject to the terms of the Minpack license +// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. + +#ifndef EIGEN_LMPAR_H +#define EIGEN_LMPAR_H + +namespace Eigen { + +namespace internal { + + template + void lmpar2( + const QRSolver &qr, + const VectorType &diag, + const VectorType &qtb, + typename VectorType::Scalar m_delta, + typename VectorType::Scalar &par, + VectorType &x) + + { + using std::sqrt; + using std::abs; + typedef typename QRSolver::MatrixType MatrixType; + typedef typename QRSolver::Scalar Scalar; +// typedef typename QRSolver::StorageIndex StorageIndex; + + /* Local variables */ + Index j; + Scalar fp; + Scalar parc, parl; + Index iter; + Scalar temp, paru; + Scalar gnorm; + Scalar dxnorm; + + // Make a copy of the triangular factor. + // This copy is modified during call the qrsolv + MatrixType s; + s = qr.matrixR(); + + /* Function Body */ + const Scalar dwarf = (std::numeric_limits::min)(); + const Index n = qr.matrixR().cols(); + eigen_assert(n==diag.size()); + eigen_assert(n==qtb.size()); + + VectorType wa1, wa2; + + /* compute and store in x the gauss-newton direction. if the */ + /* jacobian is rank-deficient, obtain a least squares solution. */ + + // const Index rank = qr.nonzeroPivots(); // exactly double(0.) + const Index rank = qr.rank(); // use a threshold + wa1 = qtb; + wa1.tail(n-rank).setZero(); + //FIXME There is no solve in place for sparse triangularView + wa1.head(rank) = s.topLeftCorner(rank,rank).template triangularView().solve(qtb.head(rank)); + + x = qr.colsPermutation()*wa1; + + /* initialize the iteration counter. */ + /* evaluate the function at the origin, and test */ + /* for acceptance of the gauss-newton direction. */ + iter = 0; + wa2 = diag.cwiseProduct(x); + dxnorm = wa2.blueNorm(); + fp = dxnorm - m_delta; + if (fp <= Scalar(0.1) * m_delta) { + par = 0; + return; + } + + /* if the jacobian is not rank deficient, the newton */ + /* step provides a lower bound, parl, for the zero of */ + /* the function. otherwise set this bound to zero. */ + parl = 0.; + if (rank==n) { + wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm; + s.topLeftCorner(n,n).transpose().template triangularView().solveInPlace(wa1); + temp = wa1.blueNorm(); + parl = fp / m_delta / temp / temp; + } + + /* calculate an upper bound, paru, for the zero of the function. */ + for (j = 0; j < n; ++j) + wa1[j] = s.col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)]; + + gnorm = wa1.stableNorm(); + paru = gnorm / m_delta; + if (paru == 0.) + paru = dwarf / (std::min)(m_delta,Scalar(0.1)); + + /* if the input par lies outside of the interval (parl,paru), */ + /* set par to the closer endpoint. */ + par = (std::max)(par,parl); + par = (std::min)(par,paru); + if (par == 0.) + par = gnorm / dxnorm; + + /* beginning of an iteration. */ + while (true) { + ++iter; + + /* evaluate the function at the current value of par. */ + if (par == 0.) + par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */ + wa1 = sqrt(par)* diag; + + VectorType sdiag(n); + lmqrsolv(s, qr.colsPermutation(), wa1, qtb, x, sdiag); + + wa2 = diag.cwiseProduct(x); + dxnorm = wa2.blueNorm(); + temp = fp; + fp = dxnorm - m_delta; + + /* if the function is small enough, accept the current value */ + /* of par. also test for the exceptional cases where parl */ + /* is zero or the number of iterations has reached 10. */ + if (abs(fp) <= Scalar(0.1) * m_delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10) + break; + + /* compute the newton correction. */ + wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm); + // we could almost use this here, but the diagonal is outside qr, in sdiag[] + for (j = 0; j < n; ++j) { + wa1[j] /= sdiag[j]; + temp = wa1[j]; + for (Index i = j+1; i < n; ++i) + wa1[i] -= s.coeff(i,j) * temp; + } + temp = wa1.blueNorm(); + parc = fp / m_delta / temp / temp; + + /* depending on the sign of the function, update parl or paru. */ + if (fp > 0.) + parl = (std::max)(parl,par); + if (fp < 0.) + paru = (std::min)(paru,par); + + /* compute an improved estimate for par. */ + par = (std::max)(parl,par+parc); + } + if (iter == 0) + par = 0.; + return; + } +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_LMPAR_H diff --git a/external/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h b/external/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h new file mode 100644 index 0000000..1234858 --- /dev/null +++ b/external/unsupported/Eigen/src/LevenbergMarquardt/LMqrsolv.h @@ -0,0 +1,188 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli +// Copyright (C) 2012 Desire Nuentsa +// +// This code initially comes from MINPACK whose original authors are: +// Copyright Jorge More - Argonne National Laboratory +// Copyright Burt Garbow - Argonne National Laboratory +// Copyright Ken Hillstrom - Argonne National Laboratory +// +// This Source Code Form is subject to the terms of the Minpack license +// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. + +#ifndef EIGEN_LMQRSOLV_H +#define EIGEN_LMQRSOLV_H + +namespace Eigen { + +namespace internal { + +template +void lmqrsolv( + Matrix &s, + const PermutationMatrix &iPerm, + const Matrix &diag, + const Matrix &qtb, + Matrix &x, + Matrix &sdiag) +{ + /* Local variables */ + Index i, j, k; + Scalar temp; + Index n = s.cols(); + Matrix wa(n); + JacobiRotation givens; + + /* Function Body */ + // the following will only change the lower triangular part of s, including + // the diagonal, though the diagonal is restored afterward + + /* copy r and (q transpose)*b to preserve input and initialize s. */ + /* in particular, save the diagonal elements of r in x. */ + x = s.diagonal(); + wa = qtb; + + + s.topLeftCorner(n,n).template triangularView() = s.topLeftCorner(n,n).transpose(); + /* eliminate the diagonal matrix d using a givens rotation. */ + for (j = 0; j < n; ++j) { + + /* prepare the row of d to be eliminated, locating the */ + /* diagonal element using p from the qr factorization. */ + const PermIndex l = iPerm.indices()(j); + if (diag[l] == 0.) + break; + sdiag.tail(n-j).setZero(); + sdiag[j] = diag[l]; + + /* the transformations to eliminate the row of d */ + /* modify only a single element of (q transpose)*b */ + /* beyond the first n, which is initially zero. */ + Scalar qtbpj = 0.; + for (k = j; k < n; ++k) { + /* determine a givens rotation which eliminates the */ + /* appropriate element in the current row of d. */ + givens.makeGivens(-s(k,k), sdiag[k]); + + /* compute the modified diagonal element of r and */ + /* the modified element of ((q transpose)*b,0). */ + s(k,k) = givens.c() * s(k,k) + givens.s() * sdiag[k]; + temp = givens.c() * wa[k] + givens.s() * qtbpj; + qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj; + wa[k] = temp; + + /* accumulate the transformation in the row of s. */ + for (i = k+1; i().solveInPlace(wa.head(nsing)); + + // restore + sdiag = s.diagonal(); + s.diagonal() = x; + + /* permute the components of z back to components of x. */ + x = iPerm * wa; +} + +template +void lmqrsolv( + SparseMatrix &s, + const PermutationMatrix &iPerm, + const Matrix &diag, + const Matrix &qtb, + Matrix &x, + Matrix &sdiag) +{ + /* Local variables */ + typedef SparseMatrix FactorType; + Index i, j, k, l; + Scalar temp; + Index n = s.cols(); + Matrix wa(n); + JacobiRotation givens; + + /* Function Body */ + // the following will only change the lower triangular part of s, including + // the diagonal, though the diagonal is restored afterward + + /* copy r and (q transpose)*b to preserve input and initialize R. */ + wa = qtb; + FactorType R(s); + // Eliminate the diagonal matrix d using a givens rotation + for (j = 0; j < n; ++j) + { + // Prepare the row of d to be eliminated, locating the + // diagonal element using p from the qr factorization + l = iPerm.indices()(j); + if (diag(l) == Scalar(0)) + break; + sdiag.tail(n-j).setZero(); + sdiag[j] = diag[l]; + // the transformations to eliminate the row of d + // modify only a single element of (q transpose)*b + // beyond the first n, which is initially zero. + + Scalar qtbpj = 0; + // Browse the nonzero elements of row j of the upper triangular s + for (k = j; k < n; ++k) + { + typename FactorType::InnerIterator itk(R,k); + for (; itk; ++itk){ + if (itk.index() < k) continue; + else break; + } + //At this point, we have the diagonal element R(k,k) + // Determine a givens rotation which eliminates + // the appropriate element in the current row of d + givens.makeGivens(-itk.value(), sdiag(k)); + + // Compute the modified diagonal element of r and + // the modified element of ((q transpose)*b,0). + itk.valueRef() = givens.c() * itk.value() + givens.s() * sdiag(k); + temp = givens.c() * wa(k) + givens.s() * qtbpj; + qtbpj = -givens.s() * wa(k) + givens.c() * qtbpj; + wa(k) = temp; + + // Accumulate the transformation in the remaining k row/column of R + for (++itk; itk; ++itk) + { + i = itk.index(); + temp = givens.c() * itk.value() + givens.s() * sdiag(i); + sdiag(i) = -givens.s() * itk.value() + givens.c() * sdiag(i); + itk.valueRef() = temp; + } + } + } + + // Solve the triangular system for z. If the system is + // singular, then obtain a least squares solution + Index nsing; + for(nsing = 0; nsing().solve/*InPlace*/(wa.head(nsing)); + + sdiag = R.diagonal(); + // Permute the components of z back to components of x + x = iPerm * wa; +} +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_LMQRSOLV_H diff --git a/external/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h b/external/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h new file mode 100644 index 0000000..62561da --- /dev/null +++ b/external/unsupported/Eigen/src/LevenbergMarquardt/LevenbergMarquardt.h @@ -0,0 +1,396 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli +// Copyright (C) 2012 Desire Nuentsa +// +// The algorithm of this class initially comes from MINPACK whose original authors are: +// Copyright Jorge More - Argonne National Laboratory +// Copyright Burt Garbow - Argonne National Laboratory +// Copyright Ken Hillstrom - Argonne National Laboratory +// +// This Source Code Form is subject to the terms of the Minpack license +// (a BSD-like license) described in the campaigned CopyrightMINPACK.txt file. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LEVENBERGMARQUARDT_H +#define EIGEN_LEVENBERGMARQUARDT_H + + +namespace Eigen { +namespace LevenbergMarquardtSpace { + enum Status { + NotStarted = -2, + Running = -1, + ImproperInputParameters = 0, + RelativeReductionTooSmall = 1, + RelativeErrorTooSmall = 2, + RelativeErrorAndReductionTooSmall = 3, + CosinusTooSmall = 4, + TooManyFunctionEvaluation = 5, + FtolTooSmall = 6, + XtolTooSmall = 7, + GtolTooSmall = 8, + UserAsked = 9 + }; +} + +template +struct DenseFunctor +{ + typedef _Scalar Scalar; + enum { + InputsAtCompileTime = NX, + ValuesAtCompileTime = NY + }; + typedef Matrix InputType; + typedef Matrix ValueType; + typedef Matrix JacobianType; + typedef ColPivHouseholderQR QRSolver; + const int m_inputs, m_values; + + DenseFunctor() : m_inputs(InputsAtCompileTime), m_values(ValuesAtCompileTime) {} + DenseFunctor(int inputs, int values) : m_inputs(inputs), m_values(values) {} + + int inputs() const { return m_inputs; } + int values() const { return m_values; } + + //int operator()(const InputType &x, ValueType& fvec) { } + // should be defined in derived classes + + //int df(const InputType &x, JacobianType& fjac) { } + // should be defined in derived classes +}; + +template +struct SparseFunctor +{ + typedef _Scalar Scalar; + typedef _Index Index; + typedef Matrix InputType; + typedef Matrix ValueType; + typedef SparseMatrix JacobianType; + typedef SparseQR > QRSolver; + enum { + InputsAtCompileTime = Dynamic, + ValuesAtCompileTime = Dynamic + }; + + SparseFunctor(int inputs, int values) : m_inputs(inputs), m_values(values) {} + + int inputs() const { return m_inputs; } + int values() const { return m_values; } + + const int m_inputs, m_values; + //int operator()(const InputType &x, ValueType& fvec) { } + // to be defined in the functor + + //int df(const InputType &x, JacobianType& fjac) { } + // to be defined in the functor if no automatic differentiation + +}; +namespace internal { +template +void lmpar2(const QRSolver &qr, const VectorType &diag, const VectorType &qtb, + typename VectorType::Scalar m_delta, typename VectorType::Scalar &par, + VectorType &x); + } +/** + * \ingroup NonLinearOptimization_Module + * \brief Performs non linear optimization over a non-linear function, + * using a variant of the Levenberg Marquardt algorithm. + * + * Check wikipedia for more information. + * http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm + */ +template +class LevenbergMarquardt : internal::no_assignment_operator +{ + public: + typedef _FunctorType FunctorType; + typedef typename FunctorType::QRSolver QRSolver; + typedef typename FunctorType::JacobianType JacobianType; + typedef typename JacobianType::Scalar Scalar; + typedef typename JacobianType::RealScalar RealScalar; + typedef typename QRSolver::StorageIndex PermIndex; + typedef Matrix FVectorType; + typedef PermutationMatrix PermutationType; + public: + LevenbergMarquardt(FunctorType& functor) + : m_functor(functor),m_nfev(0),m_njev(0),m_fnorm(0.0),m_gnorm(0), + m_isInitialized(false),m_info(InvalidInput) + { + resetParameters(); + m_useExternalScaling=false; + } + + LevenbergMarquardtSpace::Status minimize(FVectorType &x); + LevenbergMarquardtSpace::Status minimizeInit(FVectorType &x); + LevenbergMarquardtSpace::Status minimizeOneStep(FVectorType &x); + LevenbergMarquardtSpace::Status lmder1( + FVectorType &x, + const Scalar tol = std::sqrt(NumTraits::epsilon()) + ); + static LevenbergMarquardtSpace::Status lmdif1( + FunctorType &functor, + FVectorType &x, + Index *nfev, + const Scalar tol = std::sqrt(NumTraits::epsilon()) + ); + + /** Sets the default parameters */ + void resetParameters() + { + using std::sqrt; + + m_factor = 100.; + m_maxfev = 400; + m_ftol = sqrt(NumTraits::epsilon()); + m_xtol = sqrt(NumTraits::epsilon()); + m_gtol = 0. ; + m_epsfcn = 0. ; + } + + /** Sets the tolerance for the norm of the solution vector*/ + void setXtol(RealScalar xtol) { m_xtol = xtol; } + + /** Sets the tolerance for the norm of the vector function*/ + void setFtol(RealScalar ftol) { m_ftol = ftol; } + + /** Sets the tolerance for the norm of the gradient of the error vector*/ + void setGtol(RealScalar gtol) { m_gtol = gtol; } + + /** Sets the step bound for the diagonal shift */ + void setFactor(RealScalar factor) { m_factor = factor; } + + /** Sets the error precision */ + void setEpsilon (RealScalar epsfcn) { m_epsfcn = epsfcn; } + + /** Sets the maximum number of function evaluation */ + void setMaxfev(Index maxfev) {m_maxfev = maxfev; } + + /** Use an external Scaling. If set to true, pass a nonzero diagonal to diag() */ + void setExternalScaling(bool value) {m_useExternalScaling = value; } + + /** \returns the tolerance for the norm of the solution vector */ + RealScalar xtol() const {return m_xtol; } + + /** \returns the tolerance for the norm of the vector function */ + RealScalar ftol() const {return m_ftol; } + + /** \returns the tolerance for the norm of the gradient of the error vector */ + RealScalar gtol() const {return m_gtol; } + + /** \returns the step bound for the diagonal shift */ + RealScalar factor() const {return m_factor; } + + /** \returns the error precision */ + RealScalar epsilon() const {return m_epsfcn; } + + /** \returns the maximum number of function evaluation */ + Index maxfev() const {return m_maxfev; } + + /** \returns a reference to the diagonal of the jacobian */ + FVectorType& diag() {return m_diag; } + + /** \returns the number of iterations performed */ + Index iterations() { return m_iter; } + + /** \returns the number of functions evaluation */ + Index nfev() { return m_nfev; } + + /** \returns the number of jacobian evaluation */ + Index njev() { return m_njev; } + + /** \returns the norm of current vector function */ + RealScalar fnorm() {return m_fnorm; } + + /** \returns the norm of the gradient of the error */ + RealScalar gnorm() {return m_gnorm; } + + /** \returns the LevenbergMarquardt parameter */ + RealScalar lm_param(void) { return m_par; } + + /** \returns a reference to the current vector function + */ + FVectorType& fvec() {return m_fvec; } + + /** \returns a reference to the matrix where the current Jacobian matrix is stored + */ + JacobianType& jacobian() {return m_fjac; } + + /** \returns a reference to the triangular matrix R from the QR of the jacobian matrix. + * \sa jacobian() + */ + JacobianType& matrixR() {return m_rfactor; } + + /** the permutation used in the QR factorization + */ + PermutationType permutation() {return m_permutation; } + + /** + * \brief Reports whether the minimization was successful + * \returns \c Success if the minimization was successful, + * \c NumericalIssue if a numerical problem arises during the + * minimization process, for example during the QR factorization + * \c NoConvergence if the minimization did not converge after + * the maximum number of function evaluation allowed + * \c InvalidInput if the input matrix is invalid + */ + ComputationInfo info() const + { + + return m_info; + } + private: + JacobianType m_fjac; + JacobianType m_rfactor; // The triangular matrix R from the QR of the jacobian matrix m_fjac + FunctorType &m_functor; + FVectorType m_fvec, m_qtf, m_diag; + Index n; + Index m; + Index m_nfev; + Index m_njev; + RealScalar m_fnorm; // Norm of the current vector function + RealScalar m_gnorm; //Norm of the gradient of the error + RealScalar m_factor; // + Index m_maxfev; // Maximum number of function evaluation + RealScalar m_ftol; //Tolerance in the norm of the vector function + RealScalar m_xtol; // + RealScalar m_gtol; //tolerance of the norm of the error gradient + RealScalar m_epsfcn; // + Index m_iter; // Number of iterations performed + RealScalar m_delta; + bool m_useExternalScaling; + PermutationType m_permutation; + FVectorType m_wa1, m_wa2, m_wa3, m_wa4; //Temporary vectors + RealScalar m_par; + bool m_isInitialized; // Check whether the minimization step has been called + ComputationInfo m_info; +}; + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::minimize(FVectorType &x) +{ + LevenbergMarquardtSpace::Status status = minimizeInit(x); + if (status==LevenbergMarquardtSpace::ImproperInputParameters) { + m_isInitialized = true; + return status; + } + do { +// std::cout << " uv " << x.transpose() << "\n"; + status = minimizeOneStep(x); + } while (status==LevenbergMarquardtSpace::Running); + m_isInitialized = true; + return status; +} + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::minimizeInit(FVectorType &x) +{ + n = x.size(); + m = m_functor.values(); + + m_wa1.resize(n); m_wa2.resize(n); m_wa3.resize(n); + m_wa4.resize(m); + m_fvec.resize(m); + //FIXME Sparse Case : Allocate space for the jacobian + m_fjac.resize(m, n); +// m_fjac.reserve(VectorXi::Constant(n,5)); // FIXME Find a better alternative + if (!m_useExternalScaling) + m_diag.resize(n); + eigen_assert( (!m_useExternalScaling || m_diag.size()==n) && "When m_useExternalScaling is set, the caller must provide a valid 'm_diag'"); + m_qtf.resize(n); + + /* Function Body */ + m_nfev = 0; + m_njev = 0; + + /* check the input parameters for errors. */ + if (n <= 0 || m < n || m_ftol < 0. || m_xtol < 0. || m_gtol < 0. || m_maxfev <= 0 || m_factor <= 0.){ + m_info = InvalidInput; + return LevenbergMarquardtSpace::ImproperInputParameters; + } + + if (m_useExternalScaling) + for (Index j = 0; j < n; ++j) + if (m_diag[j] <= 0.) + { + m_info = InvalidInput; + return LevenbergMarquardtSpace::ImproperInputParameters; + } + + /* evaluate the function at the starting point */ + /* and calculate its norm. */ + m_nfev = 1; + if ( m_functor(x, m_fvec) < 0) + return LevenbergMarquardtSpace::UserAsked; + m_fnorm = m_fvec.stableNorm(); + + /* initialize levenberg-marquardt parameter and iteration counter. */ + m_par = 0.; + m_iter = 1; + + return LevenbergMarquardtSpace::NotStarted; +} + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::lmder1( + FVectorType &x, + const Scalar tol + ) +{ + n = x.size(); + m = m_functor.values(); + + /* check the input parameters for errors. */ + if (n <= 0 || m < n || tol < 0.) + return LevenbergMarquardtSpace::ImproperInputParameters; + + resetParameters(); + m_ftol = tol; + m_xtol = tol; + m_maxfev = 100*(n+1); + + return minimize(x); +} + + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::lmdif1( + FunctorType &functor, + FVectorType &x, + Index *nfev, + const Scalar tol + ) +{ + Index n = x.size(); + Index m = functor.values(); + + /* check the input parameters for errors. */ + if (n <= 0 || m < n || tol < 0.) + return LevenbergMarquardtSpace::ImproperInputParameters; + + NumericalDiff numDiff(functor); + // embedded LevenbergMarquardt + LevenbergMarquardt > lm(numDiff); + lm.setFtol(tol); + lm.setXtol(tol); + lm.setMaxfev(200*(n+1)); + + LevenbergMarquardtSpace::Status info = LevenbergMarquardtSpace::Status(lm.minimize(x)); + if (nfev) + * nfev = lm.nfev(); + return info; +} + +} // end namespace Eigen + +#endif // EIGEN_LEVENBERGMARQUARDT_H diff --git a/external/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h b/external/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h new file mode 100644 index 0000000..02284b0 --- /dev/null +++ b/external/unsupported/Eigen/src/MatrixFunctions/MatrixExponential.h @@ -0,0 +1,441 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009, 2010, 2013 Jitse Niesen +// Copyright (C) 2011, 2013 Chen-Pang He +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIX_EXPONENTIAL +#define EIGEN_MATRIX_EXPONENTIAL + +#include "StemFunction.h" + +namespace Eigen { +namespace internal { + +/** \brief Scaling operator. + * + * This struct is used by CwiseUnaryOp to scale a matrix by \f$ 2^{-s} \f$. + */ +template +struct MatrixExponentialScalingOp +{ + /** \brief Constructor. + * + * \param[in] squarings The integer \f$ s \f$ in this document. + */ + MatrixExponentialScalingOp(int squarings) : m_squarings(squarings) { } + + + /** \brief Scale a matrix coefficient. + * + * \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$. + */ + inline const RealScalar operator() (const RealScalar& x) const + { + using std::ldexp; + return ldexp(x, -m_squarings); + } + + typedef std::complex ComplexScalar; + + /** \brief Scale a matrix coefficient. + * + * \param[in,out] x The scalar to be scaled, becoming \f$ 2^{-s} x \f$. + */ + inline const ComplexScalar operator() (const ComplexScalar& x) const + { + using std::ldexp; + return ComplexScalar(ldexp(x.real(), -m_squarings), ldexp(x.imag(), -m_squarings)); + } + + private: + int m_squarings; +}; + +/** \brief Compute the (3,3)-Padé approximant to the exponential. + * + * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé + * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. + */ +template +void matrix_exp_pade3(const MatA& A, MatU& U, MatV& V) +{ + typedef typename MatA::PlainObject MatrixType; + typedef typename NumTraits::Scalar>::Real RealScalar; + const RealScalar b[] = {120.L, 60.L, 12.L, 1.L}; + const MatrixType A2 = A * A; + const MatrixType tmp = b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); + U.noalias() = A * tmp; + V = b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); +} + +/** \brief Compute the (5,5)-Padé approximant to the exponential. + * + * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé + * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. + */ +template +void matrix_exp_pade5(const MatA& A, MatU& U, MatV& V) +{ + typedef typename MatA::PlainObject MatrixType; + typedef typename NumTraits::Scalar>::Real RealScalar; + const RealScalar b[] = {30240.L, 15120.L, 3360.L, 420.L, 30.L, 1.L}; + const MatrixType A2 = A * A; + const MatrixType A4 = A2 * A2; + const MatrixType tmp = b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); + U.noalias() = A * tmp; + V = b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); +} + +/** \brief Compute the (7,7)-Padé approximant to the exponential. + * + * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé + * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. + */ +template +void matrix_exp_pade7(const MatA& A, MatU& U, MatV& V) +{ + typedef typename MatA::PlainObject MatrixType; + typedef typename NumTraits::Scalar>::Real RealScalar; + const RealScalar b[] = {17297280.L, 8648640.L, 1995840.L, 277200.L, 25200.L, 1512.L, 56.L, 1.L}; + const MatrixType A2 = A * A; + const MatrixType A4 = A2 * A2; + const MatrixType A6 = A4 * A2; + const MatrixType tmp = b[7] * A6 + b[5] * A4 + b[3] * A2 + + b[1] * MatrixType::Identity(A.rows(), A.cols()); + U.noalias() = A * tmp; + V = b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); + +} + +/** \brief Compute the (9,9)-Padé approximant to the exponential. + * + * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé + * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. + */ +template +void matrix_exp_pade9(const MatA& A, MatU& U, MatV& V) +{ + typedef typename MatA::PlainObject MatrixType; + typedef typename NumTraits::Scalar>::Real RealScalar; + const RealScalar b[] = {17643225600.L, 8821612800.L, 2075673600.L, 302702400.L, 30270240.L, + 2162160.L, 110880.L, 3960.L, 90.L, 1.L}; + const MatrixType A2 = A * A; + const MatrixType A4 = A2 * A2; + const MatrixType A6 = A4 * A2; + const MatrixType A8 = A6 * A2; + const MatrixType tmp = b[9] * A8 + b[7] * A6 + b[5] * A4 + b[3] * A2 + + b[1] * MatrixType::Identity(A.rows(), A.cols()); + U.noalias() = A * tmp; + V = b[8] * A8 + b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); +} + +/** \brief Compute the (13,13)-Padé approximant to the exponential. + * + * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé + * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. + */ +template +void matrix_exp_pade13(const MatA& A, MatU& U, MatV& V) +{ + typedef typename MatA::PlainObject MatrixType; + typedef typename NumTraits::Scalar>::Real RealScalar; + const RealScalar b[] = {64764752532480000.L, 32382376266240000.L, 7771770303897600.L, + 1187353796428800.L, 129060195264000.L, 10559470521600.L, 670442572800.L, + 33522128640.L, 1323241920.L, 40840800.L, 960960.L, 16380.L, 182.L, 1.L}; + const MatrixType A2 = A * A; + const MatrixType A4 = A2 * A2; + const MatrixType A6 = A4 * A2; + V = b[13] * A6 + b[11] * A4 + b[9] * A2; // used for temporary storage + MatrixType tmp = A6 * V; + tmp += b[7] * A6 + b[5] * A4 + b[3] * A2 + b[1] * MatrixType::Identity(A.rows(), A.cols()); + U.noalias() = A * tmp; + tmp = b[12] * A6 + b[10] * A4 + b[8] * A2; + V.noalias() = A6 * tmp; + V += b[6] * A6 + b[4] * A4 + b[2] * A2 + b[0] * MatrixType::Identity(A.rows(), A.cols()); +} + +/** \brief Compute the (17,17)-Padé approximant to the exponential. + * + * After exit, \f$ (V+U)(V-U)^{-1} \f$ is the Padé + * approximant of \f$ \exp(A) \f$ around \f$ A = 0 \f$. + * + * This function activates only if your long double is double-double or quadruple. + */ +#if LDBL_MANT_DIG > 64 +template +void matrix_exp_pade17(const MatA& A, MatU& U, MatV& V) +{ + typedef typename MatA::PlainObject MatrixType; + typedef typename NumTraits::Scalar>::Real RealScalar; + const RealScalar b[] = {830034394580628357120000.L, 415017197290314178560000.L, + 100610229646136770560000.L, 15720348382208870400000.L, + 1774878043152614400000.L, 153822763739893248000.L, 10608466464820224000.L, + 595373117923584000.L, 27563570274240000.L, 1060137318240000.L, + 33924394183680.L, 899510451840.L, 19554575040.L, 341863200.L, 4651200.L, + 46512.L, 306.L, 1.L}; + const MatrixType A2 = A * A; + const MatrixType A4 = A2 * A2; + const MatrixType A6 = A4 * A2; + const MatrixType A8 = A4 * A4; + V = b[17] * A8 + b[15] * A6 + b[13] * A4 + b[11] * A2; // used for temporary storage + MatrixType tmp = A8 * V; + tmp += b[9] * A8 + b[7] * A6 + b[5] * A4 + b[3] * A2 + + b[1] * MatrixType::Identity(A.rows(), A.cols()); + U.noalias() = A * tmp; + tmp = b[16] * A8 + b[14] * A6 + b[12] * A4 + b[10] * A2; + V.noalias() = tmp * A8; + V += b[8] * A8 + b[6] * A6 + b[4] * A4 + b[2] * A2 + + b[0] * MatrixType::Identity(A.rows(), A.cols()); +} +#endif + +template ::Scalar>::Real> +struct matrix_exp_computeUV +{ + /** \brief Compute Padé approximant to the exponential. + * + * Computes \c U, \c V and \c squarings such that \f$ (V+U)(V-U)^{-1} \f$ is a Padé + * approximant of \f$ \exp(2^{-\mbox{squarings}}M) \f$ around \f$ M = 0 \f$, where \f$ M \f$ + * denotes the matrix \c arg. The degree of the Padé approximant and the value of squarings + * are chosen such that the approximation error is no more than the round-off error. + */ + static void run(const MatrixType& arg, MatrixType& U, MatrixType& V, int& squarings); +}; + +template +struct matrix_exp_computeUV +{ + template + static void run(const ArgType& arg, MatrixType& U, MatrixType& V, int& squarings) + { + using std::frexp; + using std::pow; + const float l1norm = arg.cwiseAbs().colwise().sum().maxCoeff(); + squarings = 0; + if (l1norm < 4.258730016922831e-001f) { + matrix_exp_pade3(arg, U, V); + } else if (l1norm < 1.880152677804762e+000f) { + matrix_exp_pade5(arg, U, V); + } else { + const float maxnorm = 3.925724783138660f; + frexp(l1norm / maxnorm, &squarings); + if (squarings < 0) squarings = 0; + MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp(squarings)); + matrix_exp_pade7(A, U, V); + } + } +}; + +template +struct matrix_exp_computeUV +{ + typedef typename NumTraits::Scalar>::Real RealScalar; + template + static void run(const ArgType& arg, MatrixType& U, MatrixType& V, int& squarings) + { + using std::frexp; + using std::pow; + const RealScalar l1norm = arg.cwiseAbs().colwise().sum().maxCoeff(); + squarings = 0; + if (l1norm < 1.495585217958292e-002) { + matrix_exp_pade3(arg, U, V); + } else if (l1norm < 2.539398330063230e-001) { + matrix_exp_pade5(arg, U, V); + } else if (l1norm < 9.504178996162932e-001) { + matrix_exp_pade7(arg, U, V); + } else if (l1norm < 2.097847961257068e+000) { + matrix_exp_pade9(arg, U, V); + } else { + const RealScalar maxnorm = 5.371920351148152; + frexp(l1norm / maxnorm, &squarings); + if (squarings < 0) squarings = 0; + MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp(squarings)); + matrix_exp_pade13(A, U, V); + } + } +}; + +template +struct matrix_exp_computeUV +{ + template + static void run(const ArgType& arg, MatrixType& U, MatrixType& V, int& squarings) + { +#if LDBL_MANT_DIG == 53 // double precision + matrix_exp_computeUV::run(arg, U, V, squarings); + +#else + + using std::frexp; + using std::pow; + const long double l1norm = arg.cwiseAbs().colwise().sum().maxCoeff(); + squarings = 0; + +#if LDBL_MANT_DIG <= 64 // extended precision + + if (l1norm < 4.1968497232266989671e-003L) { + matrix_exp_pade3(arg, U, V); + } else if (l1norm < 1.1848116734693823091e-001L) { + matrix_exp_pade5(arg, U, V); + } else if (l1norm < 5.5170388480686700274e-001L) { + matrix_exp_pade7(arg, U, V); + } else if (l1norm < 1.3759868875587845383e+000L) { + matrix_exp_pade9(arg, U, V); + } else { + const long double maxnorm = 4.0246098906697353063L; + frexp(l1norm / maxnorm, &squarings); + if (squarings < 0) squarings = 0; + MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp(squarings)); + matrix_exp_pade13(A, U, V); + } + +#elif LDBL_MANT_DIG <= 106 // double-double + + if (l1norm < 3.2787892205607026992947488108213e-005L) { + matrix_exp_pade3(arg, U, V); + } else if (l1norm < 6.4467025060072760084130906076332e-003L) { + matrix_exp_pade5(arg, U, V); + } else if (l1norm < 6.8988028496595374751374122881143e-002L) { + matrix_exp_pade7(arg, U, V); + } else if (l1norm < 2.7339737518502231741495857201670e-001L) { + matrix_exp_pade9(arg, U, V); + } else if (l1norm < 1.3203382096514474905666448850278e+000L) { + matrix_exp_pade13(arg, U, V); + } else { + const long double maxnorm = 3.2579440895405400856599663723517L; + frexp(l1norm / maxnorm, &squarings); + if (squarings < 0) squarings = 0; + MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp(squarings)); + matrix_exp_pade17(A, U, V); + } + +#elif LDBL_MANT_DIG <= 113 // quadruple precision + + if (l1norm < 1.639394610288918690547467954466970e-005L) { + matrix_exp_pade3(arg, U, V); + } else if (l1norm < 4.253237712165275566025884344433009e-003L) { + matrix_exp_pade5(arg, U, V); + } else if (l1norm < 5.125804063165764409885122032933142e-002L) { + matrix_exp_pade7(arg, U, V); + } else if (l1norm < 2.170000765161155195453205651889853e-001L) { + matrix_exp_pade9(arg, U, V); + } else if (l1norm < 1.125358383453143065081397882891878e+000L) { + matrix_exp_pade13(arg, U, V); + } else { + const long double maxnorm = 2.884233277829519311757165057717815L; + frexp(l1norm / maxnorm, &squarings); + if (squarings < 0) squarings = 0; + MatrixType A = arg.unaryExpr(MatrixExponentialScalingOp(squarings)); + matrix_exp_pade17(A, U, V); + } + +#else + + // this case should be handled in compute() + eigen_assert(false && "Bug in MatrixExponential"); + +#endif +#endif // LDBL_MANT_DIG + } +}; + +template struct is_exp_known_type : false_type {}; +template<> struct is_exp_known_type : true_type {}; +template<> struct is_exp_known_type : true_type {}; +#if LDBL_MANT_DIG <= 113 +template<> struct is_exp_known_type : true_type {}; +#endif + +template +void matrix_exp_compute(const ArgType& arg, ResultType &result, true_type) // natively supported scalar type +{ + typedef typename ArgType::PlainObject MatrixType; + MatrixType U, V; + int squarings; + matrix_exp_computeUV::run(arg, U, V, squarings); // Pade approximant is (U+V) / (-U+V) + MatrixType numer = U + V; + MatrixType denom = -U + V; + result = denom.partialPivLu().solve(numer); + for (int i=0; i +void matrix_exp_compute(const ArgType& arg, ResultType &result, false_type) // default +{ + typedef typename ArgType::PlainObject MatrixType; + typedef typename traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename std::complex ComplexScalar; + result = arg.matrixFunction(internal::stem_function_exp); +} + +} // end namespace Eigen::internal + +/** \ingroup MatrixFunctions_Module + * + * \brief Proxy for the matrix exponential of some matrix (expression). + * + * \tparam Derived Type of the argument to the matrix exponential. + * + * This class holds the argument to the matrix exponential until it is assigned or evaluated for + * some other reason (so the argument should not be changed in the meantime). It is the return type + * of MatrixBase::exp() and most of the time this is the only way it is used. + */ +template struct MatrixExponentialReturnValue +: public ReturnByValue > +{ + public: + /** \brief Constructor. + * + * \param src %Matrix (expression) forming the argument of the matrix exponential. + */ + MatrixExponentialReturnValue(const Derived& src) : m_src(src) { } + + /** \brief Compute the matrix exponential. + * + * \param result the matrix exponential of \p src in the constructor. + */ + template + inline void evalTo(ResultType& result) const + { + const typename internal::nested_eval::type tmp(m_src); + internal::matrix_exp_compute(tmp, result, internal::is_exp_known_type()); + } + + Index rows() const { return m_src.rows(); } + Index cols() const { return m_src.cols(); } + + protected: + const typename internal::ref_selector::type m_src; +}; + +namespace internal { +template +struct traits > +{ + typedef typename Derived::PlainObject ReturnType; +}; +} + +template +const MatrixExponentialReturnValue MatrixBase::exp() const +{ + eigen_assert(rows() == cols()); + return MatrixExponentialReturnValue(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_MATRIX_EXPONENTIAL diff --git a/external/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h b/external/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h new file mode 100644 index 0000000..cc12ab6 --- /dev/null +++ b/external/unsupported/Eigen/src/MatrixFunctions/MatrixFunction.h @@ -0,0 +1,569 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2011, 2013 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIX_FUNCTION_H +#define EIGEN_MATRIX_FUNCTION_H + +#include "StemFunction.h" + + +namespace Eigen { + +namespace internal { + +/** \brief Maximum distance allowed between eigenvalues to be considered "close". */ +static const float matrix_function_separation = 0.1f; + +/** \ingroup MatrixFunctions_Module + * \class MatrixFunctionAtomic + * \brief Helper class for computing matrix functions of atomic matrices. + * + * Here, an atomic matrix is a triangular matrix whose diagonal entries are close to each other. + */ +template +class MatrixFunctionAtomic +{ + public: + + typedef typename MatrixType::Scalar Scalar; + typedef typename stem_function::type StemFunction; + + /** \brief Constructor + * \param[in] f matrix function to compute. + */ + MatrixFunctionAtomic(StemFunction f) : m_f(f) { } + + /** \brief Compute matrix function of atomic matrix + * \param[in] A argument of matrix function, should be upper triangular and atomic + * \returns f(A), the matrix function evaluated at the given matrix + */ + MatrixType compute(const MatrixType& A); + + private: + StemFunction* m_f; +}; + +template +typename NumTraits::Real matrix_function_compute_mu(const MatrixType& A) +{ + typedef typename plain_col_type::type VectorType; + Index rows = A.rows(); + const MatrixType N = MatrixType::Identity(rows, rows) - A; + VectorType e = VectorType::Ones(rows); + N.template triangularView().solveInPlace(e); + return e.cwiseAbs().maxCoeff(); +} + +template +MatrixType MatrixFunctionAtomic::compute(const MatrixType& A) +{ + // TODO: Use that A is upper triangular + typedef typename NumTraits::Real RealScalar; + Index rows = A.rows(); + Scalar avgEival = A.trace() / Scalar(RealScalar(rows)); + MatrixType Ashifted = A - avgEival * MatrixType::Identity(rows, rows); + RealScalar mu = matrix_function_compute_mu(Ashifted); + MatrixType F = m_f(avgEival, 0) * MatrixType::Identity(rows, rows); + MatrixType P = Ashifted; + MatrixType Fincr; + for (Index s = 1; double(s) < 1.1 * double(rows) + 10.0; s++) { // upper limit is fairly arbitrary + Fincr = m_f(avgEival, static_cast(s)) * P; + F += Fincr; + P = Scalar(RealScalar(1)/RealScalar(s + 1)) * P * Ashifted; + + // test whether Taylor series converged + const RealScalar F_norm = F.cwiseAbs().rowwise().sum().maxCoeff(); + const RealScalar Fincr_norm = Fincr.cwiseAbs().rowwise().sum().maxCoeff(); + if (Fincr_norm < NumTraits::epsilon() * F_norm) { + RealScalar delta = 0; + RealScalar rfactorial = 1; + for (Index r = 0; r < rows; r++) { + RealScalar mx = 0; + for (Index i = 0; i < rows; i++) + mx = (std::max)(mx, std::abs(m_f(Ashifted(i, i) + avgEival, static_cast(s+r)))); + if (r != 0) + rfactorial *= RealScalar(r); + delta = (std::max)(delta, mx / rfactorial); + } + const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff(); + if (mu * delta * P_norm < NumTraits::epsilon() * F_norm) // series converged + break; + } + } + return F; +} + +/** \brief Find cluster in \p clusters containing some value + * \param[in] key Value to find + * \returns Iterator to cluster containing \p key, or \c clusters.end() if no cluster in \p m_clusters + * contains \p key. + */ +template +typename ListOfClusters::iterator matrix_function_find_cluster(Index key, ListOfClusters& clusters) +{ + typename std::list::iterator j; + for (typename ListOfClusters::iterator i = clusters.begin(); i != clusters.end(); ++i) { + j = std::find(i->begin(), i->end(), key); + if (j != i->end()) + return i; + } + return clusters.end(); +} + +/** \brief Partition eigenvalues in clusters of ei'vals close to each other + * + * \param[in] eivals Eigenvalues + * \param[out] clusters Resulting partition of eigenvalues + * + * The partition satisfies the following two properties: + * # Any eigenvalue in a certain cluster is at most matrix_function_separation() away from another eigenvalue + * in the same cluster. + * # The distance between two eigenvalues in different clusters is more than matrix_function_separation(). + * The implementation follows Algorithm 4.1 in the paper of Davies and Higham. + */ +template +void matrix_function_partition_eigenvalues(const EivalsType& eivals, std::list& clusters) +{ + typedef typename EivalsType::RealScalar RealScalar; + for (Index i=0; i::iterator qi = matrix_function_find_cluster(i, clusters); + if (qi == clusters.end()) { + Cluster l; + l.push_back(i); + clusters.push_back(l); + qi = clusters.end(); + --qi; + } + + // Look for other element to add to the set + for (Index j=i+1; jbegin(), qi->end(), j) == qi->end()) { + typename std::list::iterator qj = matrix_function_find_cluster(j, clusters); + if (qj == clusters.end()) { + qi->push_back(j); + } else { + qi->insert(qi->end(), qj->begin(), qj->end()); + clusters.erase(qj); + } + } + } + } +} + +/** \brief Compute size of each cluster given a partitioning */ +template +void matrix_function_compute_cluster_size(const ListOfClusters& clusters, Matrix& clusterSize) +{ + const Index numClusters = static_cast(clusters.size()); + clusterSize.setZero(numClusters); + Index clusterIndex = 0; + for (typename ListOfClusters::const_iterator cluster = clusters.begin(); cluster != clusters.end(); ++cluster) { + clusterSize[clusterIndex] = cluster->size(); + ++clusterIndex; + } +} + +/** \brief Compute start of each block using clusterSize */ +template +void matrix_function_compute_block_start(const VectorType& clusterSize, VectorType& blockStart) +{ + blockStart.resize(clusterSize.rows()); + blockStart(0) = 0; + for (Index i = 1; i < clusterSize.rows(); i++) { + blockStart(i) = blockStart(i-1) + clusterSize(i-1); + } +} + +/** \brief Compute mapping of eigenvalue indices to cluster indices */ +template +void matrix_function_compute_map(const EivalsType& eivals, const ListOfClusters& clusters, VectorType& eivalToCluster) +{ + eivalToCluster.resize(eivals.rows()); + Index clusterIndex = 0; + for (typename ListOfClusters::const_iterator cluster = clusters.begin(); cluster != clusters.end(); ++cluster) { + for (Index i = 0; i < eivals.rows(); ++i) { + if (std::find(cluster->begin(), cluster->end(), i) != cluster->end()) { + eivalToCluster[i] = clusterIndex; + } + } + ++clusterIndex; + } +} + +/** \brief Compute permutation which groups ei'vals in same cluster together */ +template +void matrix_function_compute_permutation(const DynVectorType& blockStart, const DynVectorType& eivalToCluster, VectorType& permutation) +{ + DynVectorType indexNextEntry = blockStart; + permutation.resize(eivalToCluster.rows()); + for (Index i = 0; i < eivalToCluster.rows(); i++) { + Index cluster = eivalToCluster[i]; + permutation[i] = indexNextEntry[cluster]; + ++indexNextEntry[cluster]; + } +} + +/** \brief Permute Schur decomposition in U and T according to permutation */ +template +void matrix_function_permute_schur(VectorType& permutation, MatrixType& U, MatrixType& T) +{ + for (Index i = 0; i < permutation.rows() - 1; i++) { + Index j; + for (j = i; j < permutation.rows(); j++) { + if (permutation(j) == i) break; + } + eigen_assert(permutation(j) == i); + for (Index k = j-1; k >= i; k--) { + JacobiRotation rotation; + rotation.makeGivens(T(k, k+1), T(k+1, k+1) - T(k, k)); + T.applyOnTheLeft(k, k+1, rotation.adjoint()); + T.applyOnTheRight(k, k+1, rotation); + U.applyOnTheRight(k, k+1, rotation); + std::swap(permutation.coeffRef(k), permutation.coeffRef(k+1)); + } + } +} + +/** \brief Compute block diagonal part of matrix function. + * + * This routine computes the matrix function applied to the block diagonal part of \p T (which should be + * upper triangular), with the blocking given by \p blockStart and \p clusterSize. The matrix function of + * each diagonal block is computed by \p atomic. The off-diagonal parts of \p fT are set to zero. + */ +template +void matrix_function_compute_block_atomic(const MatrixType& T, AtomicType& atomic, const VectorType& blockStart, const VectorType& clusterSize, MatrixType& fT) +{ + fT.setZero(T.rows(), T.cols()); + for (Index i = 0; i < clusterSize.rows(); ++i) { + fT.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i)) + = atomic.compute(T.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i))); + } +} + +/** \brief Solve a triangular Sylvester equation AX + XB = C + * + * \param[in] A the matrix A; should be square and upper triangular + * \param[in] B the matrix B; should be square and upper triangular + * \param[in] C the matrix C; should have correct size. + * + * \returns the solution X. + * + * If A is m-by-m and B is n-by-n, then both C and X are m-by-n. The (i,j)-th component of the Sylvester + * equation is + * \f[ + * \sum_{k=i}^m A_{ik} X_{kj} + \sum_{k=1}^j X_{ik} B_{kj} = C_{ij}. + * \f] + * This can be re-arranged to yield: + * \f[ + * X_{ij} = \frac{1}{A_{ii} + B_{jj}} \Bigl( C_{ij} + * - \sum_{k=i+1}^m A_{ik} X_{kj} - \sum_{k=1}^{j-1} X_{ik} B_{kj} \Bigr). + * \f] + * It is assumed that A and B are such that the numerator is never zero (otherwise the Sylvester equation + * does not have a unique solution). In that case, these equations can be evaluated in the order + * \f$ i=m,\ldots,1 \f$ and \f$ j=1,\ldots,n \f$. + */ +template +MatrixType matrix_function_solve_triangular_sylvester(const MatrixType& A, const MatrixType& B, const MatrixType& C) +{ + eigen_assert(A.rows() == A.cols()); + eigen_assert(A.isUpperTriangular()); + eigen_assert(B.rows() == B.cols()); + eigen_assert(B.isUpperTriangular()); + eigen_assert(C.rows() == A.rows()); + eigen_assert(C.cols() == B.rows()); + + typedef typename MatrixType::Scalar Scalar; + + Index m = A.rows(); + Index n = B.rows(); + MatrixType X(m, n); + + for (Index i = m - 1; i >= 0; --i) { + for (Index j = 0; j < n; ++j) { + + // Compute AX = \sum_{k=i+1}^m A_{ik} X_{kj} + Scalar AX; + if (i == m - 1) { + AX = 0; + } else { + Matrix AXmatrix = A.row(i).tail(m-1-i) * X.col(j).tail(m-1-i); + AX = AXmatrix(0,0); + } + + // Compute XB = \sum_{k=1}^{j-1} X_{ik} B_{kj} + Scalar XB; + if (j == 0) { + XB = 0; + } else { + Matrix XBmatrix = X.row(i).head(j) * B.col(j).head(j); + XB = XBmatrix(0,0); + } + + X(i,j) = (C(i,j) - AX - XB) / (A(i,i) + B(j,j)); + } + } + return X; +} + +/** \brief Compute part of matrix function above block diagonal. + * + * This routine completes the computation of \p fT, denoting a matrix function applied to the triangular + * matrix \p T. It assumes that the block diagonal part of \p fT has already been computed. The part below + * the diagonal is zero, because \p T is upper triangular. + */ +template +void matrix_function_compute_above_diagonal(const MatrixType& T, const VectorType& blockStart, const VectorType& clusterSize, MatrixType& fT) +{ + typedef internal::traits Traits; + typedef typename MatrixType::Scalar Scalar; + static const int Options = MatrixType::Options; + typedef Matrix DynMatrixType; + + for (Index k = 1; k < clusterSize.rows(); k++) { + for (Index i = 0; i < clusterSize.rows() - k; i++) { + // compute (i, i+k) block + DynMatrixType A = T.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i)); + DynMatrixType B = -T.block(blockStart(i+k), blockStart(i+k), clusterSize(i+k), clusterSize(i+k)); + DynMatrixType C = fT.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i)) + * T.block(blockStart(i), blockStart(i+k), clusterSize(i), clusterSize(i+k)); + C -= T.block(blockStart(i), blockStart(i+k), clusterSize(i), clusterSize(i+k)) + * fT.block(blockStart(i+k), blockStart(i+k), clusterSize(i+k), clusterSize(i+k)); + for (Index m = i + 1; m < i + k; m++) { + C += fT.block(blockStart(i), blockStart(m), clusterSize(i), clusterSize(m)) + * T.block(blockStart(m), blockStart(i+k), clusterSize(m), clusterSize(i+k)); + C -= T.block(blockStart(i), blockStart(m), clusterSize(i), clusterSize(m)) + * fT.block(blockStart(m), blockStart(i+k), clusterSize(m), clusterSize(i+k)); + } + fT.block(blockStart(i), blockStart(i+k), clusterSize(i), clusterSize(i+k)) + = matrix_function_solve_triangular_sylvester(A, B, C); + } + } +} + +/** \ingroup MatrixFunctions_Module + * \brief Class for computing matrix functions. + * \tparam MatrixType type of the argument of the matrix function, + * expected to be an instantiation of the Matrix class template. + * \tparam AtomicType type for computing matrix function of atomic blocks. + * \tparam IsComplex used internally to select correct specialization. + * + * This class implements the Schur-Parlett algorithm for computing matrix functions. The spectrum of the + * matrix is divided in clustered of eigenvalues that lies close together. This class delegates the + * computation of the matrix function on every block corresponding to these clusters to an object of type + * \p AtomicType and uses these results to compute the matrix function of the whole matrix. The class + * \p AtomicType should have a \p compute() member function for computing the matrix function of a block. + * + * \sa class MatrixFunctionAtomic, class MatrixLogarithmAtomic + */ +template ::Scalar>::IsComplex> +struct matrix_function_compute +{ + /** \brief Compute the matrix function. + * + * \param[in] A argument of matrix function, should be a square matrix. + * \param[in] atomic class for computing matrix function of atomic blocks. + * \param[out] result the function \p f applied to \p A, as + * specified in the constructor. + * + * See MatrixBase::matrixFunction() for details on how this computation + * is implemented. + */ + template + static void run(const MatrixType& A, AtomicType& atomic, ResultType &result); +}; + +/** \internal \ingroup MatrixFunctions_Module + * \brief Partial specialization of MatrixFunction for real matrices + * + * This converts the real matrix to a complex matrix, compute the matrix function of that matrix, and then + * converts the result back to a real matrix. + */ +template +struct matrix_function_compute +{ + template + static void run(const MatA& A, AtomicType& atomic, ResultType &result) + { + typedef internal::traits Traits; + typedef typename Traits::Scalar Scalar; + static const int Rows = Traits::RowsAtCompileTime, Cols = Traits::ColsAtCompileTime; + static const int MaxRows = Traits::MaxRowsAtCompileTime, MaxCols = Traits::MaxColsAtCompileTime; + + typedef std::complex ComplexScalar; + typedef Matrix ComplexMatrix; + + ComplexMatrix CA = A.template cast(); + ComplexMatrix Cresult; + matrix_function_compute::run(CA, atomic, Cresult); + result = Cresult.real(); + } +}; + +/** \internal \ingroup MatrixFunctions_Module + * \brief Partial specialization of MatrixFunction for complex matrices + */ +template +struct matrix_function_compute +{ + template + static void run(const MatA& A, AtomicType& atomic, ResultType &result) + { + typedef internal::traits Traits; + + // compute Schur decomposition of A + const ComplexSchur schurOfA(A); + eigen_assert(schurOfA.info()==Success); + MatrixType T = schurOfA.matrixT(); + MatrixType U = schurOfA.matrixU(); + + // partition eigenvalues into clusters of ei'vals "close" to each other + std::list > clusters; + matrix_function_partition_eigenvalues(T.diagonal(), clusters); + + // compute size of each cluster + Matrix clusterSize; + matrix_function_compute_cluster_size(clusters, clusterSize); + + // blockStart[i] is row index at which block corresponding to i-th cluster starts + Matrix blockStart; + matrix_function_compute_block_start(clusterSize, blockStart); + + // compute map so that eivalToCluster[i] = j means that i-th ei'val is in j-th cluster + Matrix eivalToCluster; + matrix_function_compute_map(T.diagonal(), clusters, eivalToCluster); + + // compute permutation which groups ei'vals in same cluster together + Matrix permutation; + matrix_function_compute_permutation(blockStart, eivalToCluster, permutation); + + // permute Schur decomposition + matrix_function_permute_schur(permutation, U, T); + + // compute result + MatrixType fT; // matrix function applied to T + matrix_function_compute_block_atomic(T, atomic, blockStart, clusterSize, fT); + matrix_function_compute_above_diagonal(T, blockStart, clusterSize, fT); + result = U * (fT.template triangularView() * U.adjoint()); + } +}; + +} // end of namespace internal + +/** \ingroup MatrixFunctions_Module + * + * \brief Proxy for the matrix function of some matrix (expression). + * + * \tparam Derived Type of the argument to the matrix function. + * + * This class holds the argument to the matrix function until it is assigned or evaluated for some other + * reason (so the argument should not be changed in the meantime). It is the return type of + * matrixBase::matrixFunction() and related functions and most of the time this is the only way it is used. + */ +template class MatrixFunctionReturnValue +: public ReturnByValue > +{ + public: + typedef typename Derived::Scalar Scalar; + typedef typename internal::stem_function::type StemFunction; + + protected: + typedef typename internal::ref_selector::type DerivedNested; + + public: + + /** \brief Constructor. + * + * \param[in] A %Matrix (expression) forming the argument of the matrix function. + * \param[in] f Stem function for matrix function under consideration. + */ + MatrixFunctionReturnValue(const Derived& A, StemFunction f) : m_A(A), m_f(f) { } + + /** \brief Compute the matrix function. + * + * \param[out] result \p f applied to \p A, where \p f and \p A are as in the constructor. + */ + template + inline void evalTo(ResultType& result) const + { + typedef typename internal::nested_eval::type NestedEvalType; + typedef typename internal::remove_all::type NestedEvalTypeClean; + typedef internal::traits Traits; + typedef std::complex::Real> ComplexScalar; + typedef Matrix DynMatrixType; + + typedef internal::MatrixFunctionAtomic AtomicType; + AtomicType atomic(m_f); + + internal::matrix_function_compute::run(m_A, atomic, result); + } + + Index rows() const { return m_A.rows(); } + Index cols() const { return m_A.cols(); } + + private: + const DerivedNested m_A; + StemFunction *m_f; +}; + +namespace internal { +template +struct traits > +{ + typedef typename Derived::PlainObject ReturnType; +}; +} + + +/********** MatrixBase methods **********/ + + +template +const MatrixFunctionReturnValue MatrixBase::matrixFunction(typename internal::stem_function::Scalar>::type f) const +{ + eigen_assert(rows() == cols()); + return MatrixFunctionReturnValue(derived(), f); +} + +template +const MatrixFunctionReturnValue MatrixBase::sin() const +{ + eigen_assert(rows() == cols()); + typedef typename internal::stem_function::ComplexScalar ComplexScalar; + return MatrixFunctionReturnValue(derived(), internal::stem_function_sin); +} + +template +const MatrixFunctionReturnValue MatrixBase::cos() const +{ + eigen_assert(rows() == cols()); + typedef typename internal::stem_function::ComplexScalar ComplexScalar; + return MatrixFunctionReturnValue(derived(), internal::stem_function_cos); +} + +template +const MatrixFunctionReturnValue MatrixBase::sinh() const +{ + eigen_assert(rows() == cols()); + typedef typename internal::stem_function::ComplexScalar ComplexScalar; + return MatrixFunctionReturnValue(derived(), internal::stem_function_sinh); +} + +template +const MatrixFunctionReturnValue MatrixBase::cosh() const +{ + eigen_assert(rows() == cols()); + typedef typename internal::stem_function::ComplexScalar ComplexScalar; + return MatrixFunctionReturnValue(derived(), internal::stem_function_cosh); +} + +} // end namespace Eigen + +#endif // EIGEN_MATRIX_FUNCTION_H diff --git a/external/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h b/external/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h new file mode 100644 index 0000000..e917013 --- /dev/null +++ b/external/unsupported/Eigen/src/MatrixFunctions/MatrixLogarithm.h @@ -0,0 +1,373 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011, 2013 Jitse Niesen +// Copyright (C) 2011 Chen-Pang He +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIX_LOGARITHM +#define EIGEN_MATRIX_LOGARITHM + +namespace Eigen { + +namespace internal { + +template +struct matrix_log_min_pade_degree +{ + static const int value = 3; +}; + +template +struct matrix_log_max_pade_degree +{ + typedef typename NumTraits::Real RealScalar; + static const int value = std::numeric_limits::digits<= 24? 5: // single precision + std::numeric_limits::digits<= 53? 7: // double precision + std::numeric_limits::digits<= 64? 8: // extended precision + std::numeric_limits::digits<=106? 10: // double-double + 11; // quadruple precision +}; + +/** \brief Compute logarithm of 2x2 triangular matrix. */ +template +void matrix_log_compute_2x2(const MatrixType& A, MatrixType& result) +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + using std::abs; + using std::ceil; + using std::imag; + using std::log; + + Scalar logA00 = log(A(0,0)); + Scalar logA11 = log(A(1,1)); + + result(0,0) = logA00; + result(1,0) = Scalar(0); + result(1,1) = logA11; + + Scalar y = A(1,1) - A(0,0); + if (y==Scalar(0)) + { + result(0,1) = A(0,1) / A(0,0); + } + else if ((abs(A(0,0)) < RealScalar(0.5)*abs(A(1,1))) || (abs(A(0,0)) > 2*abs(A(1,1)))) + { + result(0,1) = A(0,1) * (logA11 - logA00) / y; + } + else + { + // computation in previous branch is inaccurate if A(1,1) \approx A(0,0) + RealScalar unwindingNumber = ceil((imag(logA11 - logA00) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI)); + result(0,1) = A(0,1) * (numext::log1p(y/A(0,0)) + Scalar(0,RealScalar(2*EIGEN_PI)*unwindingNumber)) / y; + } +} + +/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = float) */ +inline int matrix_log_get_pade_degree(float normTminusI) +{ + const float maxNormForPade[] = { 2.5111573934555054e-1 /* degree = 3 */ , 4.0535837411880493e-1, + 5.3149729967117310e-1 }; + const int minPadeDegree = matrix_log_min_pade_degree::value; + const int maxPadeDegree = matrix_log_max_pade_degree::value; + int degree = minPadeDegree; + for (; degree <= maxPadeDegree; ++degree) + if (normTminusI <= maxNormForPade[degree - minPadeDegree]) + break; + return degree; +} + +/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = double) */ +inline int matrix_log_get_pade_degree(double normTminusI) +{ + const double maxNormForPade[] = { 1.6206284795015624e-2 /* degree = 3 */ , 5.3873532631381171e-2, + 1.1352802267628681e-1, 1.8662860613541288e-1, 2.642960831111435e-1 }; + const int minPadeDegree = matrix_log_min_pade_degree::value; + const int maxPadeDegree = matrix_log_max_pade_degree::value; + int degree = minPadeDegree; + for (; degree <= maxPadeDegree; ++degree) + if (normTminusI <= maxNormForPade[degree - minPadeDegree]) + break; + return degree; +} + +/* \brief Get suitable degree for Pade approximation. (specialized for RealScalar = long double) */ +inline int matrix_log_get_pade_degree(long double normTminusI) +{ +#if LDBL_MANT_DIG == 53 // double precision + const long double maxNormForPade[] = { 1.6206284795015624e-2L /* degree = 3 */ , 5.3873532631381171e-2L, + 1.1352802267628681e-1L, 1.8662860613541288e-1L, 2.642960831111435e-1L }; +#elif LDBL_MANT_DIG <= 64 // extended precision + const long double maxNormForPade[] = { 5.48256690357782863103e-3L /* degree = 3 */, 2.34559162387971167321e-2L, + 5.84603923897347449857e-2L, 1.08486423756725170223e-1L, 1.68385767881294446649e-1L, + 2.32777776523703892094e-1L }; +#elif LDBL_MANT_DIG <= 106 // double-double + const long double maxNormForPade[] = { 8.58970550342939562202529664318890e-5L /* degree = 3 */, + 9.34074328446359654039446552677759e-4L, 4.26117194647672175773064114582860e-3L, + 1.21546224740281848743149666560464e-2L, 2.61100544998339436713088248557444e-2L, + 4.66170074627052749243018566390567e-2L, 7.32585144444135027565872014932387e-2L, + 1.05026503471351080481093652651105e-1L }; +#else // quadruple precision + const long double maxNormForPade[] = { 4.7419931187193005048501568167858103e-5L /* degree = 3 */, + 5.8853168473544560470387769480192666e-4L, 2.9216120366601315391789493628113520e-3L, + 8.8415758124319434347116734705174308e-3L, 1.9850836029449446668518049562565291e-2L, + 3.6688019729653446926585242192447447e-2L, 5.9290962294020186998954055264528393e-2L, + 8.6998436081634343903250580992127677e-2L, 1.1880960220216759245467951592883642e-1L }; +#endif + const int minPadeDegree = matrix_log_min_pade_degree::value; + const int maxPadeDegree = matrix_log_max_pade_degree::value; + int degree = minPadeDegree; + for (; degree <= maxPadeDegree; ++degree) + if (normTminusI <= maxNormForPade[degree - minPadeDegree]) + break; + return degree; +} + +/* \brief Compute Pade approximation to matrix logarithm */ +template +void matrix_log_compute_pade(MatrixType& result, const MatrixType& T, int degree) +{ + typedef typename NumTraits::Real RealScalar; + const int minPadeDegree = 3; + const int maxPadeDegree = 11; + assert(degree >= minPadeDegree && degree <= maxPadeDegree); + // FIXME this creates float-conversion-warnings if these are enabled. + // Either manually convert each value, or disable the warning locally + const RealScalar nodes[][maxPadeDegree] = { + { 0.1127016653792583114820734600217600L, 0.5000000000000000000000000000000000L, // degree 3 + 0.8872983346207416885179265399782400L }, + { 0.0694318442029737123880267555535953L, 0.3300094782075718675986671204483777L, // degree 4 + 0.6699905217924281324013328795516223L, 0.9305681557970262876119732444464048L }, + { 0.0469100770306680036011865608503035L, 0.2307653449471584544818427896498956L, // degree 5 + 0.5000000000000000000000000000000000L, 0.7692346550528415455181572103501044L, + 0.9530899229693319963988134391496965L }, + { 0.0337652428984239860938492227530027L, 0.1693953067668677431693002024900473L, // degree 6 + 0.3806904069584015456847491391596440L, 0.6193095930415984543152508608403560L, + 0.8306046932331322568306997975099527L, 0.9662347571015760139061507772469973L }, + { 0.0254460438286207377369051579760744L, 0.1292344072003027800680676133596058L, // degree 7 + 0.2970774243113014165466967939615193L, 0.5000000000000000000000000000000000L, + 0.7029225756886985834533032060384807L, 0.8707655927996972199319323866403942L, + 0.9745539561713792622630948420239256L }, + { 0.0198550717512318841582195657152635L, 0.1016667612931866302042230317620848L, // degree 8 + 0.2372337950418355070911304754053768L, 0.4082826787521750975302619288199080L, + 0.5917173212478249024697380711800920L, 0.7627662049581644929088695245946232L, + 0.8983332387068133697957769682379152L, 0.9801449282487681158417804342847365L }, + { 0.0159198802461869550822118985481636L, 0.0819844463366821028502851059651326L, // degree 9 + 0.1933142836497048013456489803292629L, 0.3378732882980955354807309926783317L, + 0.5000000000000000000000000000000000L, 0.6621267117019044645192690073216683L, + 0.8066857163502951986543510196707371L, 0.9180155536633178971497148940348674L, + 0.9840801197538130449177881014518364L }, + { 0.0130467357414141399610179939577740L, 0.0674683166555077446339516557882535L, // degree 10 + 0.1602952158504877968828363174425632L, 0.2833023029353764046003670284171079L, + 0.4255628305091843945575869994351400L, 0.5744371694908156054424130005648600L, + 0.7166976970646235953996329715828921L, 0.8397047841495122031171636825574368L, + 0.9325316833444922553660483442117465L, 0.9869532642585858600389820060422260L }, + { 0.0108856709269715035980309994385713L, 0.0564687001159523504624211153480364L, // degree 11 + 0.1349239972129753379532918739844233L, 0.2404519353965940920371371652706952L, + 0.3652284220238275138342340072995692L, 0.5000000000000000000000000000000000L, + 0.6347715779761724861657659927004308L, 0.7595480646034059079628628347293048L, + 0.8650760027870246620467081260155767L, 0.9435312998840476495375788846519636L, + 0.9891143290730284964019690005614287L } }; + + const RealScalar weights[][maxPadeDegree] = { + { 0.2777777777777777777777777777777778L, 0.4444444444444444444444444444444444L, // degree 3 + 0.2777777777777777777777777777777778L }, + { 0.1739274225687269286865319746109997L, 0.3260725774312730713134680253890003L, // degree 4 + 0.3260725774312730713134680253890003L, 0.1739274225687269286865319746109997L }, + { 0.1184634425280945437571320203599587L, 0.2393143352496832340206457574178191L, // degree 5 + 0.2844444444444444444444444444444444L, 0.2393143352496832340206457574178191L, + 0.1184634425280945437571320203599587L }, + { 0.0856622461895851725201480710863665L, 0.1803807865240693037849167569188581L, // degree 6 + 0.2339569672863455236949351719947755L, 0.2339569672863455236949351719947755L, + 0.1803807865240693037849167569188581L, 0.0856622461895851725201480710863665L }, + { 0.0647424830844348466353057163395410L, 0.1398526957446383339507338857118898L, // degree 7 + 0.1909150252525594724751848877444876L, 0.2089795918367346938775510204081633L, + 0.1909150252525594724751848877444876L, 0.1398526957446383339507338857118898L, + 0.0647424830844348466353057163395410L }, + { 0.0506142681451881295762656771549811L, 0.1111905172266872352721779972131204L, // degree 8 + 0.1568533229389436436689811009933007L, 0.1813418916891809914825752246385978L, + 0.1813418916891809914825752246385978L, 0.1568533229389436436689811009933007L, + 0.1111905172266872352721779972131204L, 0.0506142681451881295762656771549811L }, + { 0.0406371941807872059859460790552618L, 0.0903240803474287020292360156214564L, // degree 9 + 0.1303053482014677311593714347093164L, 0.1561735385200014200343152032922218L, + 0.1651196775006298815822625346434870L, 0.1561735385200014200343152032922218L, + 0.1303053482014677311593714347093164L, 0.0903240803474287020292360156214564L, + 0.0406371941807872059859460790552618L }, + { 0.0333356721543440687967844049466659L, 0.0747256745752902965728881698288487L, // degree 10 + 0.1095431812579910219977674671140816L, 0.1346333596549981775456134607847347L, + 0.1477621123573764350869464973256692L, 0.1477621123573764350869464973256692L, + 0.1346333596549981775456134607847347L, 0.1095431812579910219977674671140816L, + 0.0747256745752902965728881698288487L, 0.0333356721543440687967844049466659L }, + { 0.0278342835580868332413768602212743L, 0.0627901847324523123173471496119701L, // degree 11 + 0.0931451054638671257130488207158280L, 0.1165968822959952399592618524215876L, + 0.1314022722551233310903444349452546L, 0.1364625433889503153572417641681711L, + 0.1314022722551233310903444349452546L, 0.1165968822959952399592618524215876L, + 0.0931451054638671257130488207158280L, 0.0627901847324523123173471496119701L, + 0.0278342835580868332413768602212743L } }; + + MatrixType TminusI = T - MatrixType::Identity(T.rows(), T.rows()); + result.setZero(T.rows(), T.rows()); + for (int k = 0; k < degree; ++k) { + RealScalar weight = weights[degree-minPadeDegree][k]; + RealScalar node = nodes[degree-minPadeDegree][k]; + result += weight * (MatrixType::Identity(T.rows(), T.rows()) + node * TminusI) + .template triangularView().solve(TminusI); + } +} + +/** \brief Compute logarithm of triangular matrices with size > 2. + * \details This uses a inverse scale-and-square algorithm. */ +template +void matrix_log_compute_big(const MatrixType& A, MatrixType& result) +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + using std::pow; + + int numberOfSquareRoots = 0; + int numberOfExtraSquareRoots = 0; + int degree; + MatrixType T = A, sqrtT; + + const int maxPadeDegree = matrix_log_max_pade_degree::value; + const RealScalar maxNormForPade = RealScalar( + maxPadeDegree<= 5? 5.3149729967117310e-1L: // single precision + maxPadeDegree<= 7? 2.6429608311114350e-1L: // double precision + maxPadeDegree<= 8? 2.32777776523703892094e-1L: // extended precision + maxPadeDegree<=10? 1.05026503471351080481093652651105e-1L: // double-double + 1.1880960220216759245467951592883642e-1L); // quadruple precision + + while (true) { + RealScalar normTminusI = (T - MatrixType::Identity(T.rows(), T.rows())).cwiseAbs().colwise().sum().maxCoeff(); + if (normTminusI < maxNormForPade) { + degree = matrix_log_get_pade_degree(normTminusI); + int degree2 = matrix_log_get_pade_degree(normTminusI / RealScalar(2)); + if ((degree - degree2 <= 1) || (numberOfExtraSquareRoots == 1)) + break; + ++numberOfExtraSquareRoots; + } + matrix_sqrt_triangular(T, sqrtT); + T = sqrtT.template triangularView(); + ++numberOfSquareRoots; + } + + matrix_log_compute_pade(result, T, degree); + result *= pow(RealScalar(2), RealScalar(numberOfSquareRoots)); // TODO replace by bitshift if possible +} + +/** \ingroup MatrixFunctions_Module + * \class MatrixLogarithmAtomic + * \brief Helper class for computing matrix logarithm of atomic matrices. + * + * Here, an atomic matrix is a triangular matrix whose diagonal entries are close to each other. + * + * \sa class MatrixFunctionAtomic, MatrixBase::log() + */ +template +class MatrixLogarithmAtomic +{ +public: + /** \brief Compute matrix logarithm of atomic matrix + * \param[in] A argument of matrix logarithm, should be upper triangular and atomic + * \returns The logarithm of \p A. + */ + MatrixType compute(const MatrixType& A); +}; + +template +MatrixType MatrixLogarithmAtomic::compute(const MatrixType& A) +{ + using std::log; + MatrixType result(A.rows(), A.rows()); + if (A.rows() == 1) + result(0,0) = log(A(0,0)); + else if (A.rows() == 2) + matrix_log_compute_2x2(A, result); + else + matrix_log_compute_big(A, result); + return result; +} + +} // end of namespace internal + +/** \ingroup MatrixFunctions_Module + * + * \brief Proxy for the matrix logarithm of some matrix (expression). + * + * \tparam Derived Type of the argument to the matrix function. + * + * This class holds the argument to the matrix function until it is + * assigned or evaluated for some other reason (so the argument + * should not be changed in the meantime). It is the return type of + * MatrixBase::log() and most of the time this is the only way it + * is used. + */ +template class MatrixLogarithmReturnValue +: public ReturnByValue > +{ +public: + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Index Index; + +protected: + typedef typename internal::ref_selector::type DerivedNested; + +public: + + /** \brief Constructor. + * + * \param[in] A %Matrix (expression) forming the argument of the matrix logarithm. + */ + explicit MatrixLogarithmReturnValue(const Derived& A) : m_A(A) { } + + /** \brief Compute the matrix logarithm. + * + * \param[out] result Logarithm of \c A, where \c A is as specified in the constructor. + */ + template + inline void evalTo(ResultType& result) const + { + typedef typename internal::nested_eval::type DerivedEvalType; + typedef typename internal::remove_all::type DerivedEvalTypeClean; + typedef internal::traits Traits; + typedef std::complex::Real> ComplexScalar; + typedef Matrix DynMatrixType; + typedef internal::MatrixLogarithmAtomic AtomicType; + AtomicType atomic; + + internal::matrix_function_compute::run(m_A, atomic, result); + } + + Index rows() const { return m_A.rows(); } + Index cols() const { return m_A.cols(); } + +private: + const DerivedNested m_A; +}; + +namespace internal { + template + struct traits > + { + typedef typename Derived::PlainObject ReturnType; + }; +} + + +/********** MatrixBase method **********/ + + +template +const MatrixLogarithmReturnValue MatrixBase::log() const +{ + eigen_assert(rows() == cols()); + return MatrixLogarithmReturnValue(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_MATRIX_LOGARITHM diff --git a/external/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h b/external/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h new file mode 100644 index 0000000..d7672d7 --- /dev/null +++ b/external/unsupported/Eigen/src/MatrixFunctions/MatrixPower.h @@ -0,0 +1,705 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012, 2013 Chen-Pang He +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIX_POWER +#define EIGEN_MATRIX_POWER + +namespace Eigen { + +template class MatrixPower; + +/** + * \ingroup MatrixFunctions_Module + * + * \brief Proxy for the matrix power of some matrix. + * + * \tparam MatrixType type of the base, a matrix. + * + * This class holds the arguments to the matrix power until it is + * assigned or evaluated for some other reason (so the argument + * should not be changed in the meantime). It is the return type of + * MatrixPower::operator() and related functions and most of the + * time this is the only way it is used. + */ +/* TODO This class is only used by MatrixPower, so it should be nested + * into MatrixPower, like MatrixPower::ReturnValue. However, my + * compiler complained about unused template parameter in the + * following declaration in namespace internal. + * + * template + * struct traits::ReturnValue>; + */ +template +class MatrixPowerParenthesesReturnValue : public ReturnByValue< MatrixPowerParenthesesReturnValue > +{ + public: + typedef typename MatrixType::RealScalar RealScalar; + + /** + * \brief Constructor. + * + * \param[in] pow %MatrixPower storing the base. + * \param[in] p scalar, the exponent of the matrix power. + */ + MatrixPowerParenthesesReturnValue(MatrixPower& pow, RealScalar p) : m_pow(pow), m_p(p) + { } + + /** + * \brief Compute the matrix power. + * + * \param[out] result + */ + template + inline void evalTo(ResultType& result) const + { m_pow.compute(result, m_p); } + + Index rows() const { return m_pow.rows(); } + Index cols() const { return m_pow.cols(); } + + private: + MatrixPower& m_pow; + const RealScalar m_p; +}; + +/** + * \ingroup MatrixFunctions_Module + * + * \brief Class for computing matrix powers. + * + * \tparam MatrixType type of the base, expected to be an instantiation + * of the Matrix class template. + * + * This class is capable of computing triangular real/complex matrices + * raised to a power in the interval \f$ (-1, 1) \f$. + * + * \note Currently this class is only used by MatrixPower. One may + * insist that this be nested into MatrixPower. This class is here to + * facilitate future development of triangular matrix functions. + */ +template +class MatrixPowerAtomic : internal::noncopyable +{ + private: + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime + }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef std::complex ComplexScalar; + typedef Block ResultType; + + const MatrixType& m_A; + RealScalar m_p; + + void computePade(int degree, const MatrixType& IminusT, ResultType& res) const; + void compute2x2(ResultType& res, RealScalar p) const; + void computeBig(ResultType& res) const; + static int getPadeDegree(float normIminusT); + static int getPadeDegree(double normIminusT); + static int getPadeDegree(long double normIminusT); + static ComplexScalar computeSuperDiag(const ComplexScalar&, const ComplexScalar&, RealScalar p); + static RealScalar computeSuperDiag(RealScalar, RealScalar, RealScalar p); + + public: + /** + * \brief Constructor. + * + * \param[in] T the base of the matrix power. + * \param[in] p the exponent of the matrix power, should be in + * \f$ (-1, 1) \f$. + * + * The class stores a reference to T, so it should not be changed + * (or destroyed) before evaluation. Only the upper triangular + * part of T is read. + */ + MatrixPowerAtomic(const MatrixType& T, RealScalar p); + + /** + * \brief Compute the matrix power. + * + * \param[out] res \f$ A^p \f$ where A and p are specified in the + * constructor. + */ + void compute(ResultType& res) const; +}; + +template +MatrixPowerAtomic::MatrixPowerAtomic(const MatrixType& T, RealScalar p) : + m_A(T), m_p(p) +{ + eigen_assert(T.rows() == T.cols()); + eigen_assert(p > -1 && p < 1); +} + +template +void MatrixPowerAtomic::compute(ResultType& res) const +{ + using std::pow; + switch (m_A.rows()) { + case 0: + break; + case 1: + res(0,0) = pow(m_A(0,0), m_p); + break; + case 2: + compute2x2(res, m_p); + break; + default: + computeBig(res); + } +} + +template +void MatrixPowerAtomic::computePade(int degree, const MatrixType& IminusT, ResultType& res) const +{ + int i = 2*degree; + res = (m_p-RealScalar(degree)) / RealScalar(2*i-2) * IminusT; + + for (--i; i; --i) { + res = (MatrixType::Identity(IminusT.rows(), IminusT.cols()) + res).template triangularView() + .solve((i==1 ? -m_p : i&1 ? (-m_p-RealScalar(i/2))/RealScalar(2*i) : (m_p-RealScalar(i/2))/RealScalar(2*i-2)) * IminusT).eval(); + } + res += MatrixType::Identity(IminusT.rows(), IminusT.cols()); +} + +// This function assumes that res has the correct size (see bug 614) +template +void MatrixPowerAtomic::compute2x2(ResultType& res, RealScalar p) const +{ + using std::abs; + using std::pow; + res.coeffRef(0,0) = pow(m_A.coeff(0,0), p); + + for (Index i=1; i < m_A.cols(); ++i) { + res.coeffRef(i,i) = pow(m_A.coeff(i,i), p); + if (m_A.coeff(i-1,i-1) == m_A.coeff(i,i)) + res.coeffRef(i-1,i) = p * pow(m_A.coeff(i,i), p-1); + else if (2*abs(m_A.coeff(i-1,i-1)) < abs(m_A.coeff(i,i)) || 2*abs(m_A.coeff(i,i)) < abs(m_A.coeff(i-1,i-1))) + res.coeffRef(i-1,i) = (res.coeff(i,i)-res.coeff(i-1,i-1)) / (m_A.coeff(i,i)-m_A.coeff(i-1,i-1)); + else + res.coeffRef(i-1,i) = computeSuperDiag(m_A.coeff(i,i), m_A.coeff(i-1,i-1), p); + res.coeffRef(i-1,i) *= m_A.coeff(i-1,i); + } +} + +template +void MatrixPowerAtomic::computeBig(ResultType& res) const +{ + using std::ldexp; + const int digits = std::numeric_limits::digits; + const RealScalar maxNormForPade = RealScalar( + digits <= 24? 4.3386528e-1L // single precision + : digits <= 53? 2.789358995219730e-1L // double precision + : digits <= 64? 2.4471944416607995472e-1L // extended precision + : digits <= 106? 1.1016843812851143391275867258512e-1L // double-double + : 9.134603732914548552537150753385375e-2L); // quadruple precision + MatrixType IminusT, sqrtT, T = m_A.template triangularView(); + RealScalar normIminusT; + int degree, degree2, numberOfSquareRoots = 0; + bool hasExtraSquareRoot = false; + + for (Index i=0; i < m_A.cols(); ++i) + eigen_assert(m_A(i,i) != RealScalar(0)); + + while (true) { + IminusT = MatrixType::Identity(m_A.rows(), m_A.cols()) - T; + normIminusT = IminusT.cwiseAbs().colwise().sum().maxCoeff(); + if (normIminusT < maxNormForPade) { + degree = getPadeDegree(normIminusT); + degree2 = getPadeDegree(normIminusT/2); + if (degree - degree2 <= 1 || hasExtraSquareRoot) + break; + hasExtraSquareRoot = true; + } + matrix_sqrt_triangular(T, sqrtT); + T = sqrtT.template triangularView(); + ++numberOfSquareRoots; + } + computePade(degree, IminusT, res); + + for (; numberOfSquareRoots; --numberOfSquareRoots) { + compute2x2(res, ldexp(m_p, -numberOfSquareRoots)); + res = res.template triangularView() * res; + } + compute2x2(res, m_p); +} + +template +inline int MatrixPowerAtomic::getPadeDegree(float normIminusT) +{ + const float maxNormForPade[] = { 2.8064004e-1f /* degree = 3 */ , 4.3386528e-1f }; + int degree = 3; + for (; degree <= 4; ++degree) + if (normIminusT <= maxNormForPade[degree - 3]) + break; + return degree; +} + +template +inline int MatrixPowerAtomic::getPadeDegree(double normIminusT) +{ + const double maxNormForPade[] = { 1.884160592658218e-2 /* degree = 3 */ , 6.038881904059573e-2, 1.239917516308172e-1, + 1.999045567181744e-1, 2.789358995219730e-1 }; + int degree = 3; + for (; degree <= 7; ++degree) + if (normIminusT <= maxNormForPade[degree - 3]) + break; + return degree; +} + +template +inline int MatrixPowerAtomic::getPadeDegree(long double normIminusT) +{ +#if LDBL_MANT_DIG == 53 + const int maxPadeDegree = 7; + const double maxNormForPade[] = { 1.884160592658218e-2L /* degree = 3 */ , 6.038881904059573e-2L, 1.239917516308172e-1L, + 1.999045567181744e-1L, 2.789358995219730e-1L }; +#elif LDBL_MANT_DIG <= 64 + const int maxPadeDegree = 8; + const long double maxNormForPade[] = { 6.3854693117491799460e-3L /* degree = 3 */ , 2.6394893435456973676e-2L, + 6.4216043030404063729e-2L, 1.1701165502926694307e-1L, 1.7904284231268670284e-1L, 2.4471944416607995472e-1L }; +#elif LDBL_MANT_DIG <= 106 + const int maxPadeDegree = 10; + const double maxNormForPade[] = { 1.0007161601787493236741409687186e-4L /* degree = 3 */ , + 1.0007161601787493236741409687186e-3L, 4.7069769360887572939882574746264e-3L, 1.3220386624169159689406653101695e-2L, + 2.8063482381631737920612944054906e-2L, 4.9625993951953473052385361085058e-2L, 7.7367040706027886224557538328171e-2L, + 1.1016843812851143391275867258512e-1L }; +#else + const int maxPadeDegree = 10; + const double maxNormForPade[] = { 5.524506147036624377378713555116378e-5L /* degree = 3 */ , + 6.640600568157479679823602193345995e-4L, 3.227716520106894279249709728084626e-3L, + 9.619593944683432960546978734646284e-3L, 2.134595382433742403911124458161147e-2L, + 3.908166513900489428442993794761185e-2L, 6.266780814639442865832535460550138e-2L, + 9.134603732914548552537150753385375e-2L }; +#endif + int degree = 3; + for (; degree <= maxPadeDegree; ++degree) + if (normIminusT <= maxNormForPade[degree - 3]) + break; + return degree; +} + +template +inline typename MatrixPowerAtomic::ComplexScalar +MatrixPowerAtomic::computeSuperDiag(const ComplexScalar& curr, const ComplexScalar& prev, RealScalar p) +{ + using std::ceil; + using std::exp; + using std::log; + using std::sinh; + + ComplexScalar logCurr = log(curr); + ComplexScalar logPrev = log(prev); + RealScalar unwindingNumber = ceil((numext::imag(logCurr - logPrev) - RealScalar(EIGEN_PI)) / RealScalar(2*EIGEN_PI)); + ComplexScalar w = numext::log1p((curr-prev)/prev)/RealScalar(2) + ComplexScalar(0, RealScalar(EIGEN_PI)*unwindingNumber); + return RealScalar(2) * exp(RealScalar(0.5) * p * (logCurr + logPrev)) * sinh(p * w) / (curr - prev); +} + +template +inline typename MatrixPowerAtomic::RealScalar +MatrixPowerAtomic::computeSuperDiag(RealScalar curr, RealScalar prev, RealScalar p) +{ + using std::exp; + using std::log; + using std::sinh; + + RealScalar w = numext::log1p((curr-prev)/prev)/RealScalar(2); + return 2 * exp(p * (log(curr) + log(prev)) / 2) * sinh(p * w) / (curr - prev); +} + +/** + * \ingroup MatrixFunctions_Module + * + * \brief Class for computing matrix powers. + * + * \tparam MatrixType type of the base, expected to be an instantiation + * of the Matrix class template. + * + * This class is capable of computing real/complex matrices raised to + * an arbitrary real power. Meanwhile, it saves the result of Schur + * decomposition if an non-integral power has even been calculated. + * Therefore, if you want to compute multiple (>= 2) matrix powers + * for the same matrix, using the class directly is more efficient than + * calling MatrixBase::pow(). + * + * Example: + * \include MatrixPower_optimal.cpp + * Output: \verbinclude MatrixPower_optimal.out + */ +template +class MatrixPower : internal::noncopyable +{ + private: + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + + public: + /** + * \brief Constructor. + * + * \param[in] A the base of the matrix power. + * + * The class stores a reference to A, so it should not be changed + * (or destroyed) before evaluation. + */ + explicit MatrixPower(const MatrixType& A) : + m_A(A), + m_conditionNumber(0), + m_rank(A.cols()), + m_nulls(0) + { eigen_assert(A.rows() == A.cols()); } + + /** + * \brief Returns the matrix power. + * + * \param[in] p exponent, a real scalar. + * \return The expression \f$ A^p \f$, where A is specified in the + * constructor. + */ + const MatrixPowerParenthesesReturnValue operator()(RealScalar p) + { return MatrixPowerParenthesesReturnValue(*this, p); } + + /** + * \brief Compute the matrix power. + * + * \param[in] p exponent, a real scalar. + * \param[out] res \f$ A^p \f$ where A is specified in the + * constructor. + */ + template + void compute(ResultType& res, RealScalar p); + + Index rows() const { return m_A.rows(); } + Index cols() const { return m_A.cols(); } + + private: + typedef std::complex ComplexScalar; + typedef Matrix ComplexMatrix; + + /** \brief Reference to the base of matrix power. */ + typename MatrixType::Nested m_A; + + /** \brief Temporary storage. */ + MatrixType m_tmp; + + /** \brief Store the result of Schur decomposition. */ + ComplexMatrix m_T, m_U; + + /** \brief Store fractional power of m_T. */ + ComplexMatrix m_fT; + + /** + * \brief Condition number of m_A. + * + * It is initialized as 0 to avoid performing unnecessary Schur + * decomposition, which is the bottleneck. + */ + RealScalar m_conditionNumber; + + /** \brief Rank of m_A. */ + Index m_rank; + + /** \brief Rank deficiency of m_A. */ + Index m_nulls; + + /** + * \brief Split p into integral part and fractional part. + * + * \param[in] p The exponent. + * \param[out] p The fractional part ranging in \f$ (-1, 1) \f$. + * \param[out] intpart The integral part. + * + * Only if the fractional part is nonzero, it calls initialize(). + */ + void split(RealScalar& p, RealScalar& intpart); + + /** \brief Perform Schur decomposition for fractional power. */ + void initialize(); + + template + void computeIntPower(ResultType& res, RealScalar p); + + template + void computeFracPower(ResultType& res, RealScalar p); + + template + static void revertSchur( + Matrix& res, + const ComplexMatrix& T, + const ComplexMatrix& U); + + template + static void revertSchur( + Matrix& res, + const ComplexMatrix& T, + const ComplexMatrix& U); +}; + +template +template +void MatrixPower::compute(ResultType& res, RealScalar p) +{ + using std::pow; + switch (cols()) { + case 0: + break; + case 1: + res(0,0) = pow(m_A.coeff(0,0), p); + break; + default: + RealScalar intpart; + split(p, intpart); + + res = MatrixType::Identity(rows(), cols()); + computeIntPower(res, intpart); + if (p) computeFracPower(res, p); + } +} + +template +void MatrixPower::split(RealScalar& p, RealScalar& intpart) +{ + using std::floor; + using std::pow; + + intpart = floor(p); + p -= intpart; + + // Perform Schur decomposition if it is not yet performed and the power is + // not an integer. + if (!m_conditionNumber && p) + initialize(); + + // Choose the more stable of intpart = floor(p) and intpart = ceil(p). + if (p > RealScalar(0.5) && p > (1-p) * pow(m_conditionNumber, p)) { + --p; + ++intpart; + } +} + +template +void MatrixPower::initialize() +{ + const ComplexSchur schurOfA(m_A); + JacobiRotation rot; + ComplexScalar eigenvalue; + + m_fT.resizeLike(m_A); + m_T = schurOfA.matrixT(); + m_U = schurOfA.matrixU(); + m_conditionNumber = m_T.diagonal().array().abs().maxCoeff() / m_T.diagonal().array().abs().minCoeff(); + + // Move zero eigenvalues to the bottom right corner. + for (Index i = cols()-1; i>=0; --i) { + if (m_rank <= 2) + return; + if (m_T.coeff(i,i) == RealScalar(0)) { + for (Index j=i+1; j < m_rank; ++j) { + eigenvalue = m_T.coeff(j,j); + rot.makeGivens(m_T.coeff(j-1,j), eigenvalue); + m_T.applyOnTheRight(j-1, j, rot); + m_T.applyOnTheLeft(j-1, j, rot.adjoint()); + m_T.coeffRef(j-1,j-1) = eigenvalue; + m_T.coeffRef(j,j) = RealScalar(0); + m_U.applyOnTheRight(j-1, j, rot); + } + --m_rank; + } + } + + m_nulls = rows() - m_rank; + if (m_nulls) { + eigen_assert(m_T.bottomRightCorner(m_nulls, m_nulls).isZero() + && "Base of matrix power should be invertible or with a semisimple zero eigenvalue."); + m_fT.bottomRows(m_nulls).fill(RealScalar(0)); + } +} + +template +template +void MatrixPower::computeIntPower(ResultType& res, RealScalar p) +{ + using std::abs; + using std::fmod; + RealScalar pp = abs(p); + + if (p<0) + m_tmp = m_A.inverse(); + else + m_tmp = m_A; + + while (true) { + if (fmod(pp, 2) >= 1) + res = m_tmp * res; + pp /= 2; + if (pp < 1) + break; + m_tmp *= m_tmp; + } +} + +template +template +void MatrixPower::computeFracPower(ResultType& res, RealScalar p) +{ + Block blockTp(m_fT, 0, 0, m_rank, m_rank); + eigen_assert(m_conditionNumber); + eigen_assert(m_rank + m_nulls == rows()); + + MatrixPowerAtomic(m_T.topLeftCorner(m_rank, m_rank), p).compute(blockTp); + if (m_nulls) { + m_fT.topRightCorner(m_rank, m_nulls) = m_T.topLeftCorner(m_rank, m_rank).template triangularView() + .solve(blockTp * m_T.topRightCorner(m_rank, m_nulls)); + } + revertSchur(m_tmp, m_fT, m_U); + res = m_tmp * res; +} + +template +template +inline void MatrixPower::revertSchur( + Matrix& res, + const ComplexMatrix& T, + const ComplexMatrix& U) +{ res.noalias() = U * (T.template triangularView() * U.adjoint()); } + +template +template +inline void MatrixPower::revertSchur( + Matrix& res, + const ComplexMatrix& T, + const ComplexMatrix& U) +{ res.noalias() = (U * (T.template triangularView() * U.adjoint())).real(); } + +/** + * \ingroup MatrixFunctions_Module + * + * \brief Proxy for the matrix power of some matrix (expression). + * + * \tparam Derived type of the base, a matrix (expression). + * + * This class holds the arguments to the matrix power until it is + * assigned or evaluated for some other reason (so the argument + * should not be changed in the meantime). It is the return type of + * MatrixBase::pow() and related functions and most of the + * time this is the only way it is used. + */ +template +class MatrixPowerReturnValue : public ReturnByValue< MatrixPowerReturnValue > +{ + public: + typedef typename Derived::PlainObject PlainObject; + typedef typename Derived::RealScalar RealScalar; + + /** + * \brief Constructor. + * + * \param[in] A %Matrix (expression), the base of the matrix power. + * \param[in] p real scalar, the exponent of the matrix power. + */ + MatrixPowerReturnValue(const Derived& A, RealScalar p) : m_A(A), m_p(p) + { } + + /** + * \brief Compute the matrix power. + * + * \param[out] result \f$ A^p \f$ where \p A and \p p are as in the + * constructor. + */ + template + inline void evalTo(ResultType& result) const + { MatrixPower(m_A.eval()).compute(result, m_p); } + + Index rows() const { return m_A.rows(); } + Index cols() const { return m_A.cols(); } + + private: + const Derived& m_A; + const RealScalar m_p; +}; + +/** + * \ingroup MatrixFunctions_Module + * + * \brief Proxy for the matrix power of some matrix (expression). + * + * \tparam Derived type of the base, a matrix (expression). + * + * This class holds the arguments to the matrix power until it is + * assigned or evaluated for some other reason (so the argument + * should not be changed in the meantime). It is the return type of + * MatrixBase::pow() and related functions and most of the + * time this is the only way it is used. + */ +template +class MatrixComplexPowerReturnValue : public ReturnByValue< MatrixComplexPowerReturnValue > +{ + public: + typedef typename Derived::PlainObject PlainObject; + typedef typename std::complex ComplexScalar; + + /** + * \brief Constructor. + * + * \param[in] A %Matrix (expression), the base of the matrix power. + * \param[in] p complex scalar, the exponent of the matrix power. + */ + MatrixComplexPowerReturnValue(const Derived& A, const ComplexScalar& p) : m_A(A), m_p(p) + { } + + /** + * \brief Compute the matrix power. + * + * Because \p p is complex, \f$ A^p \f$ is simply evaluated as \f$ + * \exp(p \log(A)) \f$. + * + * \param[out] result \f$ A^p \f$ where \p A and \p p are as in the + * constructor. + */ + template + inline void evalTo(ResultType& result) const + { result = (m_p * m_A.log()).exp(); } + + Index rows() const { return m_A.rows(); } + Index cols() const { return m_A.cols(); } + + private: + const Derived& m_A; + const ComplexScalar m_p; +}; + +namespace internal { + +template +struct traits< MatrixPowerParenthesesReturnValue > +{ typedef typename MatrixPowerType::PlainObject ReturnType; }; + +template +struct traits< MatrixPowerReturnValue > +{ typedef typename Derived::PlainObject ReturnType; }; + +template +struct traits< MatrixComplexPowerReturnValue > +{ typedef typename Derived::PlainObject ReturnType; }; + +} + +template +const MatrixPowerReturnValue MatrixBase::pow(const RealScalar& p) const +{ return MatrixPowerReturnValue(derived(), p); } + +template +const MatrixComplexPowerReturnValue MatrixBase::pow(const std::complex& p) const +{ return MatrixComplexPowerReturnValue(derived(), p); } + +} // namespace Eigen + +#endif // EIGEN_MATRIX_POWER diff --git a/external/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h b/external/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h new file mode 100644 index 0000000..e363e77 --- /dev/null +++ b/external/unsupported/Eigen/src/MatrixFunctions/MatrixSquareRoot.h @@ -0,0 +1,368 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011, 2013 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIX_SQUARE_ROOT +#define EIGEN_MATRIX_SQUARE_ROOT + +namespace Eigen { + +namespace internal { + +// pre: T.block(i,i,2,2) has complex conjugate eigenvalues +// post: sqrtT.block(i,i,2,2) is square root of T.block(i,i,2,2) +template +void matrix_sqrt_quasi_triangular_2x2_diagonal_block(const MatrixType& T, Index i, ResultType& sqrtT) +{ + // TODO: This case (2-by-2 blocks with complex conjugate eigenvalues) is probably hidden somewhere + // in EigenSolver. If we expose it, we could call it directly from here. + typedef typename traits::Scalar Scalar; + Matrix block = T.template block<2,2>(i,i); + EigenSolver > es(block); + sqrtT.template block<2,2>(i,i) + = (es.eigenvectors() * es.eigenvalues().cwiseSqrt().asDiagonal() * es.eigenvectors().inverse()).real(); +} + +// pre: block structure of T is such that (i,j) is a 1x1 block, +// all blocks of sqrtT to left of and below (i,j) are correct +// post: sqrtT(i,j) has the correct value +template +void matrix_sqrt_quasi_triangular_1x1_off_diagonal_block(const MatrixType& T, Index i, Index j, ResultType& sqrtT) +{ + typedef typename traits::Scalar Scalar; + Scalar tmp = (sqrtT.row(i).segment(i+1,j-i-1) * sqrtT.col(j).segment(i+1,j-i-1)).value(); + sqrtT.coeffRef(i,j) = (T.coeff(i,j) - tmp) / (sqrtT.coeff(i,i) + sqrtT.coeff(j,j)); +} + +// similar to compute1x1offDiagonalBlock() +template +void matrix_sqrt_quasi_triangular_1x2_off_diagonal_block(const MatrixType& T, Index i, Index j, ResultType& sqrtT) +{ + typedef typename traits::Scalar Scalar; + Matrix rhs = T.template block<1,2>(i,j); + if (j-i > 1) + rhs -= sqrtT.block(i, i+1, 1, j-i-1) * sqrtT.block(i+1, j, j-i-1, 2); + Matrix A = sqrtT.coeff(i,i) * Matrix::Identity(); + A += sqrtT.template block<2,2>(j,j).transpose(); + sqrtT.template block<1,2>(i,j).transpose() = A.fullPivLu().solve(rhs.transpose()); +} + +// similar to compute1x1offDiagonalBlock() +template +void matrix_sqrt_quasi_triangular_2x1_off_diagonal_block(const MatrixType& T, Index i, Index j, ResultType& sqrtT) +{ + typedef typename traits::Scalar Scalar; + Matrix rhs = T.template block<2,1>(i,j); + if (j-i > 2) + rhs -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 1); + Matrix A = sqrtT.coeff(j,j) * Matrix::Identity(); + A += sqrtT.template block<2,2>(i,i); + sqrtT.template block<2,1>(i,j) = A.fullPivLu().solve(rhs); +} + +// solves the equation A X + X B = C where all matrices are 2-by-2 +template +void matrix_sqrt_quasi_triangular_solve_auxiliary_equation(MatrixType& X, const MatrixType& A, const MatrixType& B, const MatrixType& C) +{ + typedef typename traits::Scalar Scalar; + Matrix coeffMatrix = Matrix::Zero(); + coeffMatrix.coeffRef(0,0) = A.coeff(0,0) + B.coeff(0,0); + coeffMatrix.coeffRef(1,1) = A.coeff(0,0) + B.coeff(1,1); + coeffMatrix.coeffRef(2,2) = A.coeff(1,1) + B.coeff(0,0); + coeffMatrix.coeffRef(3,3) = A.coeff(1,1) + B.coeff(1,1); + coeffMatrix.coeffRef(0,1) = B.coeff(1,0); + coeffMatrix.coeffRef(0,2) = A.coeff(0,1); + coeffMatrix.coeffRef(1,0) = B.coeff(0,1); + coeffMatrix.coeffRef(1,3) = A.coeff(0,1); + coeffMatrix.coeffRef(2,0) = A.coeff(1,0); + coeffMatrix.coeffRef(2,3) = B.coeff(1,0); + coeffMatrix.coeffRef(3,1) = A.coeff(1,0); + coeffMatrix.coeffRef(3,2) = B.coeff(0,1); + + Matrix rhs; + rhs.coeffRef(0) = C.coeff(0,0); + rhs.coeffRef(1) = C.coeff(0,1); + rhs.coeffRef(2) = C.coeff(1,0); + rhs.coeffRef(3) = C.coeff(1,1); + + Matrix result; + result = coeffMatrix.fullPivLu().solve(rhs); + + X.coeffRef(0,0) = result.coeff(0); + X.coeffRef(0,1) = result.coeff(1); + X.coeffRef(1,0) = result.coeff(2); + X.coeffRef(1,1) = result.coeff(3); +} + +// similar to compute1x1offDiagonalBlock() +template +void matrix_sqrt_quasi_triangular_2x2_off_diagonal_block(const MatrixType& T, Index i, Index j, ResultType& sqrtT) +{ + typedef typename traits::Scalar Scalar; + Matrix A = sqrtT.template block<2,2>(i,i); + Matrix B = sqrtT.template block<2,2>(j,j); + Matrix C = T.template block<2,2>(i,j); + if (j-i > 2) + C -= sqrtT.block(i, i+2, 2, j-i-2) * sqrtT.block(i+2, j, j-i-2, 2); + Matrix X; + matrix_sqrt_quasi_triangular_solve_auxiliary_equation(X, A, B, C); + sqrtT.template block<2,2>(i,j) = X; +} + +// pre: T is quasi-upper-triangular and sqrtT is a zero matrix of the same size +// post: the diagonal blocks of sqrtT are the square roots of the diagonal blocks of T +template +void matrix_sqrt_quasi_triangular_diagonal(const MatrixType& T, ResultType& sqrtT) +{ + using std::sqrt; + const Index size = T.rows(); + for (Index i = 0; i < size; i++) { + if (i == size - 1 || T.coeff(i+1, i) == 0) { + eigen_assert(T(i,i) >= 0); + sqrtT.coeffRef(i,i) = sqrt(T.coeff(i,i)); + } + else { + matrix_sqrt_quasi_triangular_2x2_diagonal_block(T, i, sqrtT); + ++i; + } + } +} + +// pre: T is quasi-upper-triangular and diagonal blocks of sqrtT are square root of diagonal blocks of T. +// post: sqrtT is the square root of T. +template +void matrix_sqrt_quasi_triangular_off_diagonal(const MatrixType& T, ResultType& sqrtT) +{ + const Index size = T.rows(); + for (Index j = 1; j < size; j++) { + if (T.coeff(j, j-1) != 0) // if T(j-1:j, j-1:j) is a 2-by-2 block + continue; + for (Index i = j-1; i >= 0; i--) { + if (i > 0 && T.coeff(i, i-1) != 0) // if T(i-1:i, i-1:i) is a 2-by-2 block + continue; + bool iBlockIs2x2 = (i < size - 1) && (T.coeff(i+1, i) != 0); + bool jBlockIs2x2 = (j < size - 1) && (T.coeff(j+1, j) != 0); + if (iBlockIs2x2 && jBlockIs2x2) + matrix_sqrt_quasi_triangular_2x2_off_diagonal_block(T, i, j, sqrtT); + else if (iBlockIs2x2 && !jBlockIs2x2) + matrix_sqrt_quasi_triangular_2x1_off_diagonal_block(T, i, j, sqrtT); + else if (!iBlockIs2x2 && jBlockIs2x2) + matrix_sqrt_quasi_triangular_1x2_off_diagonal_block(T, i, j, sqrtT); + else if (!iBlockIs2x2 && !jBlockIs2x2) + matrix_sqrt_quasi_triangular_1x1_off_diagonal_block(T, i, j, sqrtT); + } + } +} + +} // end of namespace internal + +/** \ingroup MatrixFunctions_Module + * \brief Compute matrix square root of quasi-triangular matrix. + * + * \tparam MatrixType type of \p arg, the argument of matrix square root, + * expected to be an instantiation of the Matrix class template. + * \tparam ResultType type of \p result, where result is to be stored. + * \param[in] arg argument of matrix square root. + * \param[out] result matrix square root of upper Hessenberg part of \p arg. + * + * This function computes the square root of the upper quasi-triangular matrix stored in the upper + * Hessenberg part of \p arg. Only the upper Hessenberg part of \p result is updated, the rest is + * not touched. See MatrixBase::sqrt() for details on how this computation is implemented. + * + * \sa MatrixSquareRoot, MatrixSquareRootQuasiTriangular + */ +template +void matrix_sqrt_quasi_triangular(const MatrixType &arg, ResultType &result) +{ + eigen_assert(arg.rows() == arg.cols()); + result.resize(arg.rows(), arg.cols()); + internal::matrix_sqrt_quasi_triangular_diagonal(arg, result); + internal::matrix_sqrt_quasi_triangular_off_diagonal(arg, result); +} + + +/** \ingroup MatrixFunctions_Module + * \brief Compute matrix square root of triangular matrix. + * + * \tparam MatrixType type of \p arg, the argument of matrix square root, + * expected to be an instantiation of the Matrix class template. + * \tparam ResultType type of \p result, where result is to be stored. + * \param[in] arg argument of matrix square root. + * \param[out] result matrix square root of upper triangular part of \p arg. + * + * Only the upper triangular part (including the diagonal) of \p result is updated, the rest is not + * touched. See MatrixBase::sqrt() for details on how this computation is implemented. + * + * \sa MatrixSquareRoot, MatrixSquareRootQuasiTriangular + */ +template +void matrix_sqrt_triangular(const MatrixType &arg, ResultType &result) +{ + using std::sqrt; + typedef typename MatrixType::Scalar Scalar; + + eigen_assert(arg.rows() == arg.cols()); + + // Compute square root of arg and store it in upper triangular part of result + // This uses that the square root of triangular matrices can be computed directly. + result.resize(arg.rows(), arg.cols()); + for (Index i = 0; i < arg.rows(); i++) { + result.coeffRef(i,i) = sqrt(arg.coeff(i,i)); + } + for (Index j = 1; j < arg.cols(); j++) { + for (Index i = j-1; i >= 0; i--) { + // if i = j-1, then segment has length 0 so tmp = 0 + Scalar tmp = (result.row(i).segment(i+1,j-i-1) * result.col(j).segment(i+1,j-i-1)).value(); + // denominator may be zero if original matrix is singular + result.coeffRef(i,j) = (arg.coeff(i,j) - tmp) / (result.coeff(i,i) + result.coeff(j,j)); + } + } +} + + +namespace internal { + +/** \ingroup MatrixFunctions_Module + * \brief Helper struct for computing matrix square roots of general matrices. + * \tparam MatrixType type of the argument of the matrix square root, + * expected to be an instantiation of the Matrix class template. + * + * \sa MatrixSquareRootTriangular, MatrixSquareRootQuasiTriangular, MatrixBase::sqrt() + */ +template ::Scalar>::IsComplex> +struct matrix_sqrt_compute +{ + /** \brief Compute the matrix square root + * + * \param[in] arg matrix whose square root is to be computed. + * \param[out] result square root of \p arg. + * + * See MatrixBase::sqrt() for details on how this computation is implemented. + */ + template static void run(const MatrixType &arg, ResultType &result); +}; + + +// ********** Partial specialization for real matrices ********** + +template +struct matrix_sqrt_compute +{ + typedef typename MatrixType::PlainObject PlainType; + template + static void run(const MatrixType &arg, ResultType &result) + { + eigen_assert(arg.rows() == arg.cols()); + + // Compute Schur decomposition of arg + const RealSchur schurOfA(arg); + const PlainType& T = schurOfA.matrixT(); + const PlainType& U = schurOfA.matrixU(); + + // Compute square root of T + PlainType sqrtT = PlainType::Zero(arg.rows(), arg.cols()); + matrix_sqrt_quasi_triangular(T, sqrtT); + + // Compute square root of arg + result = U * sqrtT * U.adjoint(); + } +}; + + +// ********** Partial specialization for complex matrices ********** + +template +struct matrix_sqrt_compute +{ + typedef typename MatrixType::PlainObject PlainType; + template + static void run(const MatrixType &arg, ResultType &result) + { + eigen_assert(arg.rows() == arg.cols()); + + // Compute Schur decomposition of arg + const ComplexSchur schurOfA(arg); + const PlainType& T = schurOfA.matrixT(); + const PlainType& U = schurOfA.matrixU(); + + // Compute square root of T + PlainType sqrtT; + matrix_sqrt_triangular(T, sqrtT); + + // Compute square root of arg + result = U * (sqrtT.template triangularView() * U.adjoint()); + } +}; + +} // end namespace internal + +/** \ingroup MatrixFunctions_Module + * + * \brief Proxy for the matrix square root of some matrix (expression). + * + * \tparam Derived Type of the argument to the matrix square root. + * + * This class holds the argument to the matrix square root until it + * is assigned or evaluated for some other reason (so the argument + * should not be changed in the meantime). It is the return type of + * MatrixBase::sqrt() and most of the time this is the only way it is + * used. + */ +template class MatrixSquareRootReturnValue +: public ReturnByValue > +{ + protected: + typedef typename internal::ref_selector::type DerivedNested; + + public: + /** \brief Constructor. + * + * \param[in] src %Matrix (expression) forming the argument of the + * matrix square root. + */ + explicit MatrixSquareRootReturnValue(const Derived& src) : m_src(src) { } + + /** \brief Compute the matrix square root. + * + * \param[out] result the matrix square root of \p src in the + * constructor. + */ + template + inline void evalTo(ResultType& result) const + { + typedef typename internal::nested_eval::type DerivedEvalType; + typedef typename internal::remove_all::type DerivedEvalTypeClean; + DerivedEvalType tmp(m_src); + internal::matrix_sqrt_compute::run(tmp, result); + } + + Index rows() const { return m_src.rows(); } + Index cols() const { return m_src.cols(); } + + protected: + const DerivedNested m_src; +}; + +namespace internal { +template +struct traits > +{ + typedef typename Derived::PlainObject ReturnType; +}; +} + +template +const MatrixSquareRootReturnValue MatrixBase::sqrt() const +{ + eigen_assert(rows() == cols()); + return MatrixSquareRootReturnValue(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_MATRIX_FUNCTION diff --git a/external/unsupported/Eigen/src/MatrixFunctions/StemFunction.h b/external/unsupported/Eigen/src/MatrixFunctions/StemFunction.h new file mode 100644 index 0000000..7604df9 --- /dev/null +++ b/external/unsupported/Eigen/src/MatrixFunctions/StemFunction.h @@ -0,0 +1,117 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010, 2013 Jitse Niesen +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STEM_FUNCTION +#define EIGEN_STEM_FUNCTION + +namespace Eigen { + +namespace internal { + +/** \brief The exponential function (and its derivatives). */ +template +Scalar stem_function_exp(Scalar x, int) +{ + using std::exp; + return exp(x); +} + +/** \brief Cosine (and its derivatives). */ +template +Scalar stem_function_cos(Scalar x, int n) +{ + using std::cos; + using std::sin; + Scalar res; + + switch (n % 4) { + case 0: + res = std::cos(x); + break; + case 1: + res = -std::sin(x); + break; + case 2: + res = -std::cos(x); + break; + case 3: + res = std::sin(x); + break; + } + return res; +} + +/** \brief Sine (and its derivatives). */ +template +Scalar stem_function_sin(Scalar x, int n) +{ + using std::cos; + using std::sin; + Scalar res; + + switch (n % 4) { + case 0: + res = std::sin(x); + break; + case 1: + res = std::cos(x); + break; + case 2: + res = -std::sin(x); + break; + case 3: + res = -std::cos(x); + break; + } + return res; +} + +/** \brief Hyperbolic cosine (and its derivatives). */ +template +Scalar stem_function_cosh(Scalar x, int n) +{ + using std::cosh; + using std::sinh; + Scalar res; + + switch (n % 2) { + case 0: + res = std::cosh(x); + break; + case 1: + res = std::sinh(x); + break; + } + return res; +} + +/** \brief Hyperbolic sine (and its derivatives). */ +template +Scalar stem_function_sinh(Scalar x, int n) +{ + using std::cosh; + using std::sinh; + Scalar res; + + switch (n % 2) { + case 0: + res = std::sinh(x); + break; + case 1: + res = std::cosh(x); + break; + } + return res; +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_STEM_FUNCTION diff --git a/external/unsupported/Eigen/src/MoreVectorization/MathFunctions.h b/external/unsupported/Eigen/src/MoreVectorization/MathFunctions.h new file mode 100644 index 0000000..63cb28d --- /dev/null +++ b/external/unsupported/Eigen/src/MoreVectorization/MathFunctions.h @@ -0,0 +1,95 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Rohit Garg +// Copyright (C) 2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H +#define EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H + +namespace Eigen { + +namespace internal { + +/** \internal \returns the arcsin of \a a (coeff-wise) */ +template inline static Packet pasin(Packet a) { return std::asin(a); } + +#ifdef EIGEN_VECTORIZE_SSE + +template<> EIGEN_DONT_INLINE Packet4f pasin(Packet4f x) +{ + _EIGEN_DECLARE_CONST_Packet4f(half, 0.5); + _EIGEN_DECLARE_CONST_Packet4f(minus_half, -0.5); + _EIGEN_DECLARE_CONST_Packet4f(3half, 1.5); + + _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(sign_mask, 0x80000000); + + _EIGEN_DECLARE_CONST_Packet4f(pi, 3.141592654); + _EIGEN_DECLARE_CONST_Packet4f(pi_over_2, 3.141592654*0.5); + + _EIGEN_DECLARE_CONST_Packet4f(asin1, 4.2163199048E-2); + _EIGEN_DECLARE_CONST_Packet4f(asin2, 2.4181311049E-2); + _EIGEN_DECLARE_CONST_Packet4f(asin3, 4.5470025998E-2); + _EIGEN_DECLARE_CONST_Packet4f(asin4, 7.4953002686E-2); + _EIGEN_DECLARE_CONST_Packet4f(asin5, 1.6666752422E-1); + + Packet4f a = pabs(x);//got the absolute value + + Packet4f sign_bit= _mm_and_ps(x, p4f_sign_mask);//extracted the sign bit + + Packet4f z1,z2;//will need them during computation + + +//will compute the two branches for asin +//so first compare with half + + Packet4f branch_mask= _mm_cmpgt_ps(a, p4f_half);//this is to select which branch to take +//both will be taken, and finally results will be merged +//the branch for values >0.5 + + { +//the core series expansion + z1=pmadd(p4f_minus_half,a,p4f_half); + Packet4f x1=psqrt(z1); + Packet4f s1=pmadd(p4f_asin1, z1, p4f_asin2); + Packet4f s2=pmadd(s1, z1, p4f_asin3); + Packet4f s3=pmadd(s2,z1, p4f_asin4); + Packet4f s4=pmadd(s3,z1, p4f_asin5); + Packet4f temp=pmul(s4,z1);//not really a madd but a mul by z so that the next term can be a madd + z1=pmadd(temp,x1,x1); + z1=padd(z1,z1); + z1=psub(p4f_pi_over_2,z1); + } + + { +//the core series expansion + Packet4f x2=a; + z2=pmul(x2,x2); + Packet4f s1=pmadd(p4f_asin1, z2, p4f_asin2); + Packet4f s2=pmadd(s1, z2, p4f_asin3); + Packet4f s3=pmadd(s2,z2, p4f_asin4); + Packet4f s4=pmadd(s3,z2, p4f_asin5); + Packet4f temp=pmul(s4,z2);//not really a madd but a mul by z so that the next term can be a madd + z2=pmadd(temp,x2,x2); + } + +/* select the correct result from the two branch evaluations */ + z1 = _mm_and_ps(branch_mask, z1); + z2 = _mm_andnot_ps(branch_mask, z2); + Packet4f z = _mm_or_ps(z1,z2); + +/* update the sign */ + return _mm_xor_ps(z, sign_bit); +} + +#endif // EIGEN_VECTORIZE_SSE + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MOREVECTORIZATION_MATHFUNCTIONS_H diff --git a/external/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h b/external/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h new file mode 100644 index 0000000..07c5ef0 --- /dev/null +++ b/external/unsupported/Eigen/src/NonLinearOptimization/HybridNonLinearSolver.h @@ -0,0 +1,601 @@ +// -*- coding: utf-8 +// vim: set fileencoding=utf-8 + +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_HYBRIDNONLINEARSOLVER_H +#define EIGEN_HYBRIDNONLINEARSOLVER_H + +namespace Eigen { + +namespace HybridNonLinearSolverSpace { + enum Status { + Running = -1, + ImproperInputParameters = 0, + RelativeErrorTooSmall = 1, + TooManyFunctionEvaluation = 2, + TolTooSmall = 3, + NotMakingProgressJacobian = 4, + NotMakingProgressIterations = 5, + UserAsked = 6 + }; +} + +/** + * \ingroup NonLinearOptimization_Module + * \brief Finds a zero of a system of n + * nonlinear functions in n variables by a modification of the Powell + * hybrid method ("dogleg"). + * + * The user must provide a subroutine which calculates the + * functions. The Jacobian is either provided by the user, or approximated + * using a forward-difference method. + * + */ +template +class HybridNonLinearSolver +{ +public: + typedef DenseIndex Index; + + HybridNonLinearSolver(FunctorType &_functor) + : functor(_functor) { nfev=njev=iter = 0; fnorm= 0.; useExternalScaling=false;} + + struct Parameters { + Parameters() + : factor(Scalar(100.)) + , maxfev(1000) + , xtol(numext::sqrt(NumTraits::epsilon())) + , nb_of_subdiagonals(-1) + , nb_of_superdiagonals(-1) + , epsfcn(Scalar(0.)) {} + Scalar factor; + Index maxfev; // maximum number of function evaluation + Scalar xtol; + Index nb_of_subdiagonals; + Index nb_of_superdiagonals; + Scalar epsfcn; + }; + typedef Matrix< Scalar, Dynamic, 1 > FVectorType; + typedef Matrix< Scalar, Dynamic, Dynamic > JacobianType; + /* TODO: if eigen provides a triangular storage, use it here */ + typedef Matrix< Scalar, Dynamic, Dynamic > UpperTriangularType; + + HybridNonLinearSolverSpace::Status hybrj1( + FVectorType &x, + const Scalar tol = numext::sqrt(NumTraits::epsilon()) + ); + + HybridNonLinearSolverSpace::Status solveInit(FVectorType &x); + HybridNonLinearSolverSpace::Status solveOneStep(FVectorType &x); + HybridNonLinearSolverSpace::Status solve(FVectorType &x); + + HybridNonLinearSolverSpace::Status hybrd1( + FVectorType &x, + const Scalar tol = numext::sqrt(NumTraits::epsilon()) + ); + + HybridNonLinearSolverSpace::Status solveNumericalDiffInit(FVectorType &x); + HybridNonLinearSolverSpace::Status solveNumericalDiffOneStep(FVectorType &x); + HybridNonLinearSolverSpace::Status solveNumericalDiff(FVectorType &x); + + void resetParameters(void) { parameters = Parameters(); } + Parameters parameters; + FVectorType fvec, qtf, diag; + JacobianType fjac; + UpperTriangularType R; + Index nfev; + Index njev; + Index iter; + Scalar fnorm; + bool useExternalScaling; +private: + FunctorType &functor; + Index n; + Scalar sum; + bool sing; + Scalar temp; + Scalar delta; + bool jeval; + Index ncsuc; + Scalar ratio; + Scalar pnorm, xnorm, fnorm1; + Index nslow1, nslow2; + Index ncfail; + Scalar actred, prered; + FVectorType wa1, wa2, wa3, wa4; + + HybridNonLinearSolver& operator=(const HybridNonLinearSolver&); +}; + + + +template +HybridNonLinearSolverSpace::Status +HybridNonLinearSolver::hybrj1( + FVectorType &x, + const Scalar tol + ) +{ + n = x.size(); + + /* check the input parameters for errors. */ + if (n <= 0 || tol < 0.) + return HybridNonLinearSolverSpace::ImproperInputParameters; + + resetParameters(); + parameters.maxfev = 100*(n+1); + parameters.xtol = tol; + diag.setConstant(n, 1.); + useExternalScaling = true; + return solve(x); +} + +template +HybridNonLinearSolverSpace::Status +HybridNonLinearSolver::solveInit(FVectorType &x) +{ + n = x.size(); + + wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(n); + fvec.resize(n); + qtf.resize(n); + fjac.resize(n, n); + if (!useExternalScaling) + diag.resize(n); + eigen_assert( (!useExternalScaling || diag.size()==n) && "When useExternalScaling is set, the caller must provide a valid 'diag'"); + + /* Function Body */ + nfev = 0; + njev = 0; + + /* check the input parameters for errors. */ + if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0. ) + return HybridNonLinearSolverSpace::ImproperInputParameters; + if (useExternalScaling) + for (Index j = 0; j < n; ++j) + if (diag[j] <= 0.) + return HybridNonLinearSolverSpace::ImproperInputParameters; + + /* evaluate the function at the starting point */ + /* and calculate its norm. */ + nfev = 1; + if ( functor(x, fvec) < 0) + return HybridNonLinearSolverSpace::UserAsked; + fnorm = fvec.stableNorm(); + + /* initialize iteration counter and monitors. */ + iter = 1; + ncsuc = 0; + ncfail = 0; + nslow1 = 0; + nslow2 = 0; + + return HybridNonLinearSolverSpace::Running; +} + +template +HybridNonLinearSolverSpace::Status +HybridNonLinearSolver::solveOneStep(FVectorType &x) +{ + using std::abs; + + eigen_assert(x.size()==n); // check the caller is not cheating us + + Index j; + std::vector > v_givens(n), w_givens(n); + + jeval = true; + + /* calculate the jacobian matrix. */ + if ( functor.df(x, fjac) < 0) + return HybridNonLinearSolverSpace::UserAsked; + ++njev; + + wa2 = fjac.colwise().blueNorm(); + + /* on the first iteration and if external scaling is not used, scale according */ + /* to the norms of the columns of the initial jacobian. */ + if (iter == 1) { + if (!useExternalScaling) + for (j = 0; j < n; ++j) + diag[j] = (wa2[j]==0.) ? 1. : wa2[j]; + + /* on the first iteration, calculate the norm of the scaled x */ + /* and initialize the step bound delta. */ + xnorm = diag.cwiseProduct(x).stableNorm(); + delta = parameters.factor * xnorm; + if (delta == 0.) + delta = parameters.factor; + } + + /* compute the qr factorization of the jacobian. */ + HouseholderQR qrfac(fjac); // no pivoting: + + /* copy the triangular factor of the qr factorization into r. */ + R = qrfac.matrixQR(); + + /* accumulate the orthogonal factor in fjac. */ + fjac = qrfac.householderQ(); + + /* form (q transpose)*fvec and store in qtf. */ + qtf = fjac.transpose() * fvec; + + /* rescale if necessary. */ + if (!useExternalScaling) + diag = diag.cwiseMax(wa2); + + while (true) { + /* determine the direction p. */ + internal::dogleg(R, diag, qtf, delta, wa1); + + /* store the direction p and x + p. calculate the norm of p. */ + wa1 = -wa1; + wa2 = x + wa1; + pnorm = diag.cwiseProduct(wa1).stableNorm(); + + /* on the first iteration, adjust the initial step bound. */ + if (iter == 1) + delta = (std::min)(delta,pnorm); + + /* evaluate the function at x + p and calculate its norm. */ + if ( functor(wa2, wa4) < 0) + return HybridNonLinearSolverSpace::UserAsked; + ++nfev; + fnorm1 = wa4.stableNorm(); + + /* compute the scaled actual reduction. */ + actred = -1.; + if (fnorm1 < fnorm) /* Computing 2nd power */ + actred = 1. - numext::abs2(fnorm1 / fnorm); + + /* compute the scaled predicted reduction. */ + wa3 = R.template triangularView()*wa1 + qtf; + temp = wa3.stableNorm(); + prered = 0.; + if (temp < fnorm) /* Computing 2nd power */ + prered = 1. - numext::abs2(temp / fnorm); + + /* compute the ratio of the actual to the predicted reduction. */ + ratio = 0.; + if (prered > 0.) + ratio = actred / prered; + + /* update the step bound. */ + if (ratio < Scalar(.1)) { + ncsuc = 0; + ++ncfail; + delta = Scalar(.5) * delta; + } else { + ncfail = 0; + ++ncsuc; + if (ratio >= Scalar(.5) || ncsuc > 1) + delta = (std::max)(delta, pnorm / Scalar(.5)); + if (abs(ratio - 1.) <= Scalar(.1)) { + delta = pnorm / Scalar(.5); + } + } + + /* test for successful iteration. */ + if (ratio >= Scalar(1e-4)) { + /* successful iteration. update x, fvec, and their norms. */ + x = wa2; + wa2 = diag.cwiseProduct(x); + fvec = wa4; + xnorm = wa2.stableNorm(); + fnorm = fnorm1; + ++iter; + } + + /* determine the progress of the iteration. */ + ++nslow1; + if (actred >= Scalar(.001)) + nslow1 = 0; + if (jeval) + ++nslow2; + if (actred >= Scalar(.1)) + nslow2 = 0; + + /* test for convergence. */ + if (delta <= parameters.xtol * xnorm || fnorm == 0.) + return HybridNonLinearSolverSpace::RelativeErrorTooSmall; + + /* tests for termination and stringent tolerances. */ + if (nfev >= parameters.maxfev) + return HybridNonLinearSolverSpace::TooManyFunctionEvaluation; + if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits::epsilon() * xnorm) + return HybridNonLinearSolverSpace::TolTooSmall; + if (nslow2 == 5) + return HybridNonLinearSolverSpace::NotMakingProgressJacobian; + if (nslow1 == 10) + return HybridNonLinearSolverSpace::NotMakingProgressIterations; + + /* criterion for recalculating jacobian. */ + if (ncfail == 2) + break; // leave inner loop and go for the next outer loop iteration + + /* calculate the rank one modification to the jacobian */ + /* and update qtf if necessary. */ + wa1 = diag.cwiseProduct( diag.cwiseProduct(wa1)/pnorm ); + wa2 = fjac.transpose() * wa4; + if (ratio >= Scalar(1e-4)) + qtf = wa2; + wa2 = (wa2-wa3)/pnorm; + + /* compute the qr factorization of the updated jacobian. */ + internal::r1updt(R, wa1, v_givens, w_givens, wa2, wa3, &sing); + internal::r1mpyq(n, n, fjac.data(), v_givens, w_givens); + internal::r1mpyq(1, n, qtf.data(), v_givens, w_givens); + + jeval = false; + } + return HybridNonLinearSolverSpace::Running; +} + +template +HybridNonLinearSolverSpace::Status +HybridNonLinearSolver::solve(FVectorType &x) +{ + HybridNonLinearSolverSpace::Status status = solveInit(x); + if (status==HybridNonLinearSolverSpace::ImproperInputParameters) + return status; + while (status==HybridNonLinearSolverSpace::Running) + status = solveOneStep(x); + return status; +} + + + +template +HybridNonLinearSolverSpace::Status +HybridNonLinearSolver::hybrd1( + FVectorType &x, + const Scalar tol + ) +{ + n = x.size(); + + /* check the input parameters for errors. */ + if (n <= 0 || tol < 0.) + return HybridNonLinearSolverSpace::ImproperInputParameters; + + resetParameters(); + parameters.maxfev = 200*(n+1); + parameters.xtol = tol; + + diag.setConstant(n, 1.); + useExternalScaling = true; + return solveNumericalDiff(x); +} + +template +HybridNonLinearSolverSpace::Status +HybridNonLinearSolver::solveNumericalDiffInit(FVectorType &x) +{ + n = x.size(); + + if (parameters.nb_of_subdiagonals<0) parameters.nb_of_subdiagonals= n-1; + if (parameters.nb_of_superdiagonals<0) parameters.nb_of_superdiagonals= n-1; + + wa1.resize(n); wa2.resize(n); wa3.resize(n); wa4.resize(n); + qtf.resize(n); + fjac.resize(n, n); + fvec.resize(n); + if (!useExternalScaling) + diag.resize(n); + eigen_assert( (!useExternalScaling || diag.size()==n) && "When useExternalScaling is set, the caller must provide a valid 'diag'"); + + /* Function Body */ + nfev = 0; + njev = 0; + + /* check the input parameters for errors. */ + if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.nb_of_subdiagonals< 0 || parameters.nb_of_superdiagonals< 0 || parameters.factor <= 0. ) + return HybridNonLinearSolverSpace::ImproperInputParameters; + if (useExternalScaling) + for (Index j = 0; j < n; ++j) + if (diag[j] <= 0.) + return HybridNonLinearSolverSpace::ImproperInputParameters; + + /* evaluate the function at the starting point */ + /* and calculate its norm. */ + nfev = 1; + if ( functor(x, fvec) < 0) + return HybridNonLinearSolverSpace::UserAsked; + fnorm = fvec.stableNorm(); + + /* initialize iteration counter and monitors. */ + iter = 1; + ncsuc = 0; + ncfail = 0; + nslow1 = 0; + nslow2 = 0; + + return HybridNonLinearSolverSpace::Running; +} + +template +HybridNonLinearSolverSpace::Status +HybridNonLinearSolver::solveNumericalDiffOneStep(FVectorType &x) +{ + using std::sqrt; + using std::abs; + + assert(x.size()==n); // check the caller is not cheating us + + Index j; + std::vector > v_givens(n), w_givens(n); + + jeval = true; + if (parameters.nb_of_subdiagonals<0) parameters.nb_of_subdiagonals= n-1; + if (parameters.nb_of_superdiagonals<0) parameters.nb_of_superdiagonals= n-1; + + /* calculate the jacobian matrix. */ + if (internal::fdjac1(functor, x, fvec, fjac, parameters.nb_of_subdiagonals, parameters.nb_of_superdiagonals, parameters.epsfcn) <0) + return HybridNonLinearSolverSpace::UserAsked; + nfev += (std::min)(parameters.nb_of_subdiagonals+parameters.nb_of_superdiagonals+ 1, n); + + wa2 = fjac.colwise().blueNorm(); + + /* on the first iteration and if external scaling is not used, scale according */ + /* to the norms of the columns of the initial jacobian. */ + if (iter == 1) { + if (!useExternalScaling) + for (j = 0; j < n; ++j) + diag[j] = (wa2[j]==0.) ? 1. : wa2[j]; + + /* on the first iteration, calculate the norm of the scaled x */ + /* and initialize the step bound delta. */ + xnorm = diag.cwiseProduct(x).stableNorm(); + delta = parameters.factor * xnorm; + if (delta == 0.) + delta = parameters.factor; + } + + /* compute the qr factorization of the jacobian. */ + HouseholderQR qrfac(fjac); // no pivoting: + + /* copy the triangular factor of the qr factorization into r. */ + R = qrfac.matrixQR(); + + /* accumulate the orthogonal factor in fjac. */ + fjac = qrfac.householderQ(); + + /* form (q transpose)*fvec and store in qtf. */ + qtf = fjac.transpose() * fvec; + + /* rescale if necessary. */ + if (!useExternalScaling) + diag = diag.cwiseMax(wa2); + + while (true) { + /* determine the direction p. */ + internal::dogleg(R, diag, qtf, delta, wa1); + + /* store the direction p and x + p. calculate the norm of p. */ + wa1 = -wa1; + wa2 = x + wa1; + pnorm = diag.cwiseProduct(wa1).stableNorm(); + + /* on the first iteration, adjust the initial step bound. */ + if (iter == 1) + delta = (std::min)(delta,pnorm); + + /* evaluate the function at x + p and calculate its norm. */ + if ( functor(wa2, wa4) < 0) + return HybridNonLinearSolverSpace::UserAsked; + ++nfev; + fnorm1 = wa4.stableNorm(); + + /* compute the scaled actual reduction. */ + actred = -1.; + if (fnorm1 < fnorm) /* Computing 2nd power */ + actred = 1. - numext::abs2(fnorm1 / fnorm); + + /* compute the scaled predicted reduction. */ + wa3 = R.template triangularView()*wa1 + qtf; + temp = wa3.stableNorm(); + prered = 0.; + if (temp < fnorm) /* Computing 2nd power */ + prered = 1. - numext::abs2(temp / fnorm); + + /* compute the ratio of the actual to the predicted reduction. */ + ratio = 0.; + if (prered > 0.) + ratio = actred / prered; + + /* update the step bound. */ + if (ratio < Scalar(.1)) { + ncsuc = 0; + ++ncfail; + delta = Scalar(.5) * delta; + } else { + ncfail = 0; + ++ncsuc; + if (ratio >= Scalar(.5) || ncsuc > 1) + delta = (std::max)(delta, pnorm / Scalar(.5)); + if (abs(ratio - 1.) <= Scalar(.1)) { + delta = pnorm / Scalar(.5); + } + } + + /* test for successful iteration. */ + if (ratio >= Scalar(1e-4)) { + /* successful iteration. update x, fvec, and their norms. */ + x = wa2; + wa2 = diag.cwiseProduct(x); + fvec = wa4; + xnorm = wa2.stableNorm(); + fnorm = fnorm1; + ++iter; + } + + /* determine the progress of the iteration. */ + ++nslow1; + if (actred >= Scalar(.001)) + nslow1 = 0; + if (jeval) + ++nslow2; + if (actred >= Scalar(.1)) + nslow2 = 0; + + /* test for convergence. */ + if (delta <= parameters.xtol * xnorm || fnorm == 0.) + return HybridNonLinearSolverSpace::RelativeErrorTooSmall; + + /* tests for termination and stringent tolerances. */ + if (nfev >= parameters.maxfev) + return HybridNonLinearSolverSpace::TooManyFunctionEvaluation; + if (Scalar(.1) * (std::max)(Scalar(.1) * delta, pnorm) <= NumTraits::epsilon() * xnorm) + return HybridNonLinearSolverSpace::TolTooSmall; + if (nslow2 == 5) + return HybridNonLinearSolverSpace::NotMakingProgressJacobian; + if (nslow1 == 10) + return HybridNonLinearSolverSpace::NotMakingProgressIterations; + + /* criterion for recalculating jacobian. */ + if (ncfail == 2) + break; // leave inner loop and go for the next outer loop iteration + + /* calculate the rank one modification to the jacobian */ + /* and update qtf if necessary. */ + wa1 = diag.cwiseProduct( diag.cwiseProduct(wa1)/pnorm ); + wa2 = fjac.transpose() * wa4; + if (ratio >= Scalar(1e-4)) + qtf = wa2; + wa2 = (wa2-wa3)/pnorm; + + /* compute the qr factorization of the updated jacobian. */ + internal::r1updt(R, wa1, v_givens, w_givens, wa2, wa3, &sing); + internal::r1mpyq(n, n, fjac.data(), v_givens, w_givens); + internal::r1mpyq(1, n, qtf.data(), v_givens, w_givens); + + jeval = false; + } + return HybridNonLinearSolverSpace::Running; +} + +template +HybridNonLinearSolverSpace::Status +HybridNonLinearSolver::solveNumericalDiff(FVectorType &x) +{ + HybridNonLinearSolverSpace::Status status = solveNumericalDiffInit(x); + if (status==HybridNonLinearSolverSpace::ImproperInputParameters) + return status; + while (status==HybridNonLinearSolverSpace::Running) + status = solveNumericalDiffOneStep(x); + return status; +} + +} // end namespace Eigen + +#endif // EIGEN_HYBRIDNONLINEARSOLVER_H + +//vim: ai ts=4 sts=4 et sw=4 diff --git a/external/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h b/external/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h new file mode 100644 index 0000000..fe3b79c --- /dev/null +++ b/external/unsupported/Eigen/src/NonLinearOptimization/LevenbergMarquardt.h @@ -0,0 +1,657 @@ +// -*- coding: utf-8 +// vim: set fileencoding=utf-8 + +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LEVENBERGMARQUARDT__H +#define EIGEN_LEVENBERGMARQUARDT__H + +namespace Eigen { + +namespace LevenbergMarquardtSpace { + enum Status { + NotStarted = -2, + Running = -1, + ImproperInputParameters = 0, + RelativeReductionTooSmall = 1, + RelativeErrorTooSmall = 2, + RelativeErrorAndReductionTooSmall = 3, + CosinusTooSmall = 4, + TooManyFunctionEvaluation = 5, + FtolTooSmall = 6, + XtolTooSmall = 7, + GtolTooSmall = 8, + UserAsked = 9 + }; +} + + + +/** + * \ingroup NonLinearOptimization_Module + * \brief Performs non linear optimization over a non-linear function, + * using a variant of the Levenberg Marquardt algorithm. + * + * Check wikipedia for more information. + * http://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm + */ +template +class LevenbergMarquardt +{ + static Scalar sqrt_epsilon() + { + using std::sqrt; + return sqrt(NumTraits::epsilon()); + } + +public: + LevenbergMarquardt(FunctorType &_functor) + : functor(_functor) { nfev = njev = iter = 0; fnorm = gnorm = 0.; useExternalScaling=false; } + + typedef DenseIndex Index; + + struct Parameters { + Parameters() + : factor(Scalar(100.)) + , maxfev(400) + , ftol(sqrt_epsilon()) + , xtol(sqrt_epsilon()) + , gtol(Scalar(0.)) + , epsfcn(Scalar(0.)) {} + Scalar factor; + Index maxfev; // maximum number of function evaluation + Scalar ftol; + Scalar xtol; + Scalar gtol; + Scalar epsfcn; + }; + + typedef Matrix< Scalar, Dynamic, 1 > FVectorType; + typedef Matrix< Scalar, Dynamic, Dynamic > JacobianType; + + LevenbergMarquardtSpace::Status lmder1( + FVectorType &x, + const Scalar tol = sqrt_epsilon() + ); + + LevenbergMarquardtSpace::Status minimize(FVectorType &x); + LevenbergMarquardtSpace::Status minimizeInit(FVectorType &x); + LevenbergMarquardtSpace::Status minimizeOneStep(FVectorType &x); + + static LevenbergMarquardtSpace::Status lmdif1( + FunctorType &functor, + FVectorType &x, + Index *nfev, + const Scalar tol = sqrt_epsilon() + ); + + LevenbergMarquardtSpace::Status lmstr1( + FVectorType &x, + const Scalar tol = sqrt_epsilon() + ); + + LevenbergMarquardtSpace::Status minimizeOptimumStorage(FVectorType &x); + LevenbergMarquardtSpace::Status minimizeOptimumStorageInit(FVectorType &x); + LevenbergMarquardtSpace::Status minimizeOptimumStorageOneStep(FVectorType &x); + + void resetParameters(void) { parameters = Parameters(); } + + Parameters parameters; + FVectorType fvec, qtf, diag; + JacobianType fjac; + PermutationMatrix permutation; + Index nfev; + Index njev; + Index iter; + Scalar fnorm, gnorm; + bool useExternalScaling; + + Scalar lm_param(void) { return par; } +private: + + FunctorType &functor; + Index n; + Index m; + FVectorType wa1, wa2, wa3, wa4; + + Scalar par, sum; + Scalar temp, temp1, temp2; + Scalar delta; + Scalar ratio; + Scalar pnorm, xnorm, fnorm1, actred, dirder, prered; + + LevenbergMarquardt& operator=(const LevenbergMarquardt&); +}; + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::lmder1( + FVectorType &x, + const Scalar tol + ) +{ + n = x.size(); + m = functor.values(); + + /* check the input parameters for errors. */ + if (n <= 0 || m < n || tol < 0.) + return LevenbergMarquardtSpace::ImproperInputParameters; + + resetParameters(); + parameters.ftol = tol; + parameters.xtol = tol; + parameters.maxfev = 100*(n+1); + + return minimize(x); +} + + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::minimize(FVectorType &x) +{ + LevenbergMarquardtSpace::Status status = minimizeInit(x); + if (status==LevenbergMarquardtSpace::ImproperInputParameters) + return status; + do { + status = minimizeOneStep(x); + } while (status==LevenbergMarquardtSpace::Running); + return status; +} + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::minimizeInit(FVectorType &x) +{ + n = x.size(); + m = functor.values(); + + wa1.resize(n); wa2.resize(n); wa3.resize(n); + wa4.resize(m); + fvec.resize(m); + fjac.resize(m, n); + if (!useExternalScaling) + diag.resize(n); + eigen_assert( (!useExternalScaling || diag.size()==n) && "When useExternalScaling is set, the caller must provide a valid 'diag'"); + qtf.resize(n); + + /* Function Body */ + nfev = 0; + njev = 0; + + /* check the input parameters for errors. */ + if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.) + return LevenbergMarquardtSpace::ImproperInputParameters; + + if (useExternalScaling) + for (Index j = 0; j < n; ++j) + if (diag[j] <= 0.) + return LevenbergMarquardtSpace::ImproperInputParameters; + + /* evaluate the function at the starting point */ + /* and calculate its norm. */ + nfev = 1; + if ( functor(x, fvec) < 0) + return LevenbergMarquardtSpace::UserAsked; + fnorm = fvec.stableNorm(); + + /* initialize levenberg-marquardt parameter and iteration counter. */ + par = 0.; + iter = 1; + + return LevenbergMarquardtSpace::NotStarted; +} + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::minimizeOneStep(FVectorType &x) +{ + using std::abs; + using std::sqrt; + + eigen_assert(x.size()==n); // check the caller is not cheating us + + /* calculate the jacobian matrix. */ + Index df_ret = functor.df(x, fjac); + if (df_ret<0) + return LevenbergMarquardtSpace::UserAsked; + if (df_ret>0) + // numerical diff, we evaluated the function df_ret times + nfev += df_ret; + else njev++; + + /* compute the qr factorization of the jacobian. */ + wa2 = fjac.colwise().blueNorm(); + ColPivHouseholderQR qrfac(fjac); + fjac = qrfac.matrixQR(); + permutation = qrfac.colsPermutation(); + + /* on the first iteration and if external scaling is not used, scale according */ + /* to the norms of the columns of the initial jacobian. */ + if (iter == 1) { + if (!useExternalScaling) + for (Index j = 0; j < n; ++j) + diag[j] = (wa2[j]==0.)? 1. : wa2[j]; + + /* on the first iteration, calculate the norm of the scaled x */ + /* and initialize the step bound delta. */ + xnorm = diag.cwiseProduct(x).stableNorm(); + delta = parameters.factor * xnorm; + if (delta == 0.) + delta = parameters.factor; + } + + /* form (q transpose)*fvec and store the first n components in */ + /* qtf. */ + wa4 = fvec; + wa4.applyOnTheLeft(qrfac.householderQ().adjoint()); + qtf = wa4.head(n); + + /* compute the norm of the scaled gradient. */ + gnorm = 0.; + if (fnorm != 0.) + for (Index j = 0; j < n; ++j) + if (wa2[permutation.indices()[j]] != 0.) + gnorm = (std::max)(gnorm, abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); + + /* test for convergence of the gradient norm. */ + if (gnorm <= parameters.gtol) + return LevenbergMarquardtSpace::CosinusTooSmall; + + /* rescale if necessary. */ + if (!useExternalScaling) + diag = diag.cwiseMax(wa2); + + do { + + /* determine the levenberg-marquardt parameter. */ + internal::lmpar2(qrfac, diag, qtf, delta, par, wa1); + + /* store the direction p and x + p. calculate the norm of p. */ + wa1 = -wa1; + wa2 = x + wa1; + pnorm = diag.cwiseProduct(wa1).stableNorm(); + + /* on the first iteration, adjust the initial step bound. */ + if (iter == 1) + delta = (std::min)(delta,pnorm); + + /* evaluate the function at x + p and calculate its norm. */ + if ( functor(wa2, wa4) < 0) + return LevenbergMarquardtSpace::UserAsked; + ++nfev; + fnorm1 = wa4.stableNorm(); + + /* compute the scaled actual reduction. */ + actred = -1.; + if (Scalar(.1) * fnorm1 < fnorm) + actred = 1. - numext::abs2(fnorm1 / fnorm); + + /* compute the scaled predicted reduction and */ + /* the scaled directional derivative. */ + wa3 = fjac.template triangularView() * (qrfac.colsPermutation().inverse() *wa1); + temp1 = numext::abs2(wa3.stableNorm() / fnorm); + temp2 = numext::abs2(sqrt(par) * pnorm / fnorm); + prered = temp1 + temp2 / Scalar(.5); + dirder = -(temp1 + temp2); + + /* compute the ratio of the actual to the predicted */ + /* reduction. */ + ratio = 0.; + if (prered != 0.) + ratio = actred / prered; + + /* update the step bound. */ + if (ratio <= Scalar(.25)) { + if (actred >= 0.) + temp = Scalar(.5); + if (actred < 0.) + temp = Scalar(.5) * dirder / (dirder + Scalar(.5) * actred); + if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1)) + temp = Scalar(.1); + /* Computing MIN */ + delta = temp * (std::min)(delta, pnorm / Scalar(.1)); + par /= temp; + } else if (!(par != 0. && ratio < Scalar(.75))) { + delta = pnorm / Scalar(.5); + par = Scalar(.5) * par; + } + + /* test for successful iteration. */ + if (ratio >= Scalar(1e-4)) { + /* successful iteration. update x, fvec, and their norms. */ + x = wa2; + wa2 = diag.cwiseProduct(x); + fvec = wa4; + xnorm = wa2.stableNorm(); + fnorm = fnorm1; + ++iter; + } + + /* tests for convergence. */ + if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm) + return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall; + if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.) + return LevenbergMarquardtSpace::RelativeReductionTooSmall; + if (delta <= parameters.xtol * xnorm) + return LevenbergMarquardtSpace::RelativeErrorTooSmall; + + /* tests for termination and stringent tolerances. */ + if (nfev >= parameters.maxfev) + return LevenbergMarquardtSpace::TooManyFunctionEvaluation; + if (abs(actred) <= NumTraits::epsilon() && prered <= NumTraits::epsilon() && Scalar(.5) * ratio <= 1.) + return LevenbergMarquardtSpace::FtolTooSmall; + if (delta <= NumTraits::epsilon() * xnorm) + return LevenbergMarquardtSpace::XtolTooSmall; + if (gnorm <= NumTraits::epsilon()) + return LevenbergMarquardtSpace::GtolTooSmall; + + } while (ratio < Scalar(1e-4)); + + return LevenbergMarquardtSpace::Running; +} + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::lmstr1( + FVectorType &x, + const Scalar tol + ) +{ + n = x.size(); + m = functor.values(); + + /* check the input parameters for errors. */ + if (n <= 0 || m < n || tol < 0.) + return LevenbergMarquardtSpace::ImproperInputParameters; + + resetParameters(); + parameters.ftol = tol; + parameters.xtol = tol; + parameters.maxfev = 100*(n+1); + + return minimizeOptimumStorage(x); +} + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::minimizeOptimumStorageInit(FVectorType &x) +{ + n = x.size(); + m = functor.values(); + + wa1.resize(n); wa2.resize(n); wa3.resize(n); + wa4.resize(m); + fvec.resize(m); + // Only R is stored in fjac. Q is only used to compute 'qtf', which is + // Q.transpose()*rhs. qtf will be updated using givens rotation, + // instead of storing them in Q. + // The purpose it to only use a nxn matrix, instead of mxn here, so + // that we can handle cases where m>>n : + fjac.resize(n, n); + if (!useExternalScaling) + diag.resize(n); + eigen_assert( (!useExternalScaling || diag.size()==n) && "When useExternalScaling is set, the caller must provide a valid 'diag'"); + qtf.resize(n); + + /* Function Body */ + nfev = 0; + njev = 0; + + /* check the input parameters for errors. */ + if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.) + return LevenbergMarquardtSpace::ImproperInputParameters; + + if (useExternalScaling) + for (Index j = 0; j < n; ++j) + if (diag[j] <= 0.) + return LevenbergMarquardtSpace::ImproperInputParameters; + + /* evaluate the function at the starting point */ + /* and calculate its norm. */ + nfev = 1; + if ( functor(x, fvec) < 0) + return LevenbergMarquardtSpace::UserAsked; + fnorm = fvec.stableNorm(); + + /* initialize levenberg-marquardt parameter and iteration counter. */ + par = 0.; + iter = 1; + + return LevenbergMarquardtSpace::NotStarted; +} + + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::minimizeOptimumStorageOneStep(FVectorType &x) +{ + using std::abs; + using std::sqrt; + + eigen_assert(x.size()==n); // check the caller is not cheating us + + Index i, j; + bool sing; + + /* compute the qr factorization of the jacobian matrix */ + /* calculated one row at a time, while simultaneously */ + /* forming (q transpose)*fvec and storing the first */ + /* n components in qtf. */ + qtf.fill(0.); + fjac.fill(0.); + Index rownb = 2; + for (i = 0; i < m; ++i) { + if (functor.df(x, wa3, rownb) < 0) return LevenbergMarquardtSpace::UserAsked; + internal::rwupdt(fjac, wa3, qtf, fvec[i]); + ++rownb; + } + ++njev; + + /* if the jacobian is rank deficient, call qrfac to */ + /* reorder its columns and update the components of qtf. */ + sing = false; + for (j = 0; j < n; ++j) { + if (fjac(j,j) == 0.) + sing = true; + wa2[j] = fjac.col(j).head(j).stableNorm(); + } + permutation.setIdentity(n); + if (sing) { + wa2 = fjac.colwise().blueNorm(); + // TODO We have no unit test covering this code path, do not modify + // until it is carefully tested + ColPivHouseholderQR qrfac(fjac); + fjac = qrfac.matrixQR(); + wa1 = fjac.diagonal(); + fjac.diagonal() = qrfac.hCoeffs(); + permutation = qrfac.colsPermutation(); + // TODO : avoid this: + for(Index ii=0; ii< fjac.cols(); ii++) fjac.col(ii).segment(ii+1, fjac.rows()-ii-1) *= fjac(ii,ii); // rescale vectors + + for (j = 0; j < n; ++j) { + if (fjac(j,j) != 0.) { + sum = 0.; + for (i = j; i < n; ++i) + sum += fjac(i,j) * qtf[i]; + temp = -sum / fjac(j,j); + for (i = j; i < n; ++i) + qtf[i] += fjac(i,j) * temp; + } + fjac(j,j) = wa1[j]; + } + } + + /* on the first iteration and if external scaling is not used, scale according */ + /* to the norms of the columns of the initial jacobian. */ + if (iter == 1) { + if (!useExternalScaling) + for (j = 0; j < n; ++j) + diag[j] = (wa2[j]==0.)? 1. : wa2[j]; + + /* on the first iteration, calculate the norm of the scaled x */ + /* and initialize the step bound delta. */ + xnorm = diag.cwiseProduct(x).stableNorm(); + delta = parameters.factor * xnorm; + if (delta == 0.) + delta = parameters.factor; + } + + /* compute the norm of the scaled gradient. */ + gnorm = 0.; + if (fnorm != 0.) + for (j = 0; j < n; ++j) + if (wa2[permutation.indices()[j]] != 0.) + gnorm = (std::max)(gnorm, abs( fjac.col(j).head(j+1).dot(qtf.head(j+1)/fnorm) / wa2[permutation.indices()[j]])); + + /* test for convergence of the gradient norm. */ + if (gnorm <= parameters.gtol) + return LevenbergMarquardtSpace::CosinusTooSmall; + + /* rescale if necessary. */ + if (!useExternalScaling) + diag = diag.cwiseMax(wa2); + + do { + + /* determine the levenberg-marquardt parameter. */ + internal::lmpar(fjac, permutation.indices(), diag, qtf, delta, par, wa1); + + /* store the direction p and x + p. calculate the norm of p. */ + wa1 = -wa1; + wa2 = x + wa1; + pnorm = diag.cwiseProduct(wa1).stableNorm(); + + /* on the first iteration, adjust the initial step bound. */ + if (iter == 1) + delta = (std::min)(delta,pnorm); + + /* evaluate the function at x + p and calculate its norm. */ + if ( functor(wa2, wa4) < 0) + return LevenbergMarquardtSpace::UserAsked; + ++nfev; + fnorm1 = wa4.stableNorm(); + + /* compute the scaled actual reduction. */ + actred = -1.; + if (Scalar(.1) * fnorm1 < fnorm) + actred = 1. - numext::abs2(fnorm1 / fnorm); + + /* compute the scaled predicted reduction and */ + /* the scaled directional derivative. */ + wa3 = fjac.topLeftCorner(n,n).template triangularView() * (permutation.inverse() * wa1); + temp1 = numext::abs2(wa3.stableNorm() / fnorm); + temp2 = numext::abs2(sqrt(par) * pnorm / fnorm); + prered = temp1 + temp2 / Scalar(.5); + dirder = -(temp1 + temp2); + + /* compute the ratio of the actual to the predicted */ + /* reduction. */ + ratio = 0.; + if (prered != 0.) + ratio = actred / prered; + + /* update the step bound. */ + if (ratio <= Scalar(.25)) { + if (actred >= 0.) + temp = Scalar(.5); + if (actred < 0.) + temp = Scalar(.5) * dirder / (dirder + Scalar(.5) * actred); + if (Scalar(.1) * fnorm1 >= fnorm || temp < Scalar(.1)) + temp = Scalar(.1); + /* Computing MIN */ + delta = temp * (std::min)(delta, pnorm / Scalar(.1)); + par /= temp; + } else if (!(par != 0. && ratio < Scalar(.75))) { + delta = pnorm / Scalar(.5); + par = Scalar(.5) * par; + } + + /* test for successful iteration. */ + if (ratio >= Scalar(1e-4)) { + /* successful iteration. update x, fvec, and their norms. */ + x = wa2; + wa2 = diag.cwiseProduct(x); + fvec = wa4; + xnorm = wa2.stableNorm(); + fnorm = fnorm1; + ++iter; + } + + /* tests for convergence. */ + if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1. && delta <= parameters.xtol * xnorm) + return LevenbergMarquardtSpace::RelativeErrorAndReductionTooSmall; + if (abs(actred) <= parameters.ftol && prered <= parameters.ftol && Scalar(.5) * ratio <= 1.) + return LevenbergMarquardtSpace::RelativeReductionTooSmall; + if (delta <= parameters.xtol * xnorm) + return LevenbergMarquardtSpace::RelativeErrorTooSmall; + + /* tests for termination and stringent tolerances. */ + if (nfev >= parameters.maxfev) + return LevenbergMarquardtSpace::TooManyFunctionEvaluation; + if (abs(actred) <= NumTraits::epsilon() && prered <= NumTraits::epsilon() && Scalar(.5) * ratio <= 1.) + return LevenbergMarquardtSpace::FtolTooSmall; + if (delta <= NumTraits::epsilon() * xnorm) + return LevenbergMarquardtSpace::XtolTooSmall; + if (gnorm <= NumTraits::epsilon()) + return LevenbergMarquardtSpace::GtolTooSmall; + + } while (ratio < Scalar(1e-4)); + + return LevenbergMarquardtSpace::Running; +} + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::minimizeOptimumStorage(FVectorType &x) +{ + LevenbergMarquardtSpace::Status status = minimizeOptimumStorageInit(x); + if (status==LevenbergMarquardtSpace::ImproperInputParameters) + return status; + do { + status = minimizeOptimumStorageOneStep(x); + } while (status==LevenbergMarquardtSpace::Running); + return status; +} + +template +LevenbergMarquardtSpace::Status +LevenbergMarquardt::lmdif1( + FunctorType &functor, + FVectorType &x, + Index *nfev, + const Scalar tol + ) +{ + Index n = x.size(); + Index m = functor.values(); + + /* check the input parameters for errors. */ + if (n <= 0 || m < n || tol < 0.) + return LevenbergMarquardtSpace::ImproperInputParameters; + + NumericalDiff numDiff(functor); + // embedded LevenbergMarquardt + LevenbergMarquardt, Scalar > lm(numDiff); + lm.parameters.ftol = tol; + lm.parameters.xtol = tol; + lm.parameters.maxfev = 200*(n+1); + + LevenbergMarquardtSpace::Status info = LevenbergMarquardtSpace::Status(lm.minimize(x)); + if (nfev) + * nfev = lm.nfev; + return info; +} + +} // end namespace Eigen + +#endif // EIGEN_LEVENBERGMARQUARDT__H + +//vim: ai ts=4 sts=4 et sw=4 diff --git a/external/unsupported/Eigen/src/NonLinearOptimization/chkder.h b/external/unsupported/Eigen/src/NonLinearOptimization/chkder.h new file mode 100644 index 0000000..db8ff7d --- /dev/null +++ b/external/unsupported/Eigen/src/NonLinearOptimization/chkder.h @@ -0,0 +1,66 @@ +#define chkder_log10e 0.43429448190325182765 +#define chkder_factor 100. + +namespace Eigen { + +namespace internal { + +template +void chkder( + const Matrix< Scalar, Dynamic, 1 > &x, + const Matrix< Scalar, Dynamic, 1 > &fvec, + const Matrix< Scalar, Dynamic, Dynamic > &fjac, + Matrix< Scalar, Dynamic, 1 > &xp, + const Matrix< Scalar, Dynamic, 1 > &fvecp, + int mode, + Matrix< Scalar, Dynamic, 1 > &err + ) +{ + using std::sqrt; + using std::abs; + using std::log; + + typedef DenseIndex Index; + + const Scalar eps = sqrt(NumTraits::epsilon()); + const Scalar epsf = chkder_factor * NumTraits::epsilon(); + const Scalar epslog = chkder_log10e * log(eps); + Scalar temp; + + const Index m = fvec.size(), n = x.size(); + + if (mode != 2) { + /* mode = 1. */ + xp.resize(n); + for (Index j = 0; j < n; ++j) { + temp = eps * abs(x[j]); + if (temp == 0.) + temp = eps; + xp[j] = x[j] + temp; + } + } + else { + /* mode = 2. */ + err.setZero(m); + for (Index j = 0; j < n; ++j) { + temp = abs(x[j]); + if (temp == 0.) + temp = 1.; + err += temp * fjac.col(j); + } + for (Index i = 0; i < m; ++i) { + temp = 1.; + if (fvec[i] != 0. && fvecp[i] != 0. && abs(fvecp[i] - fvec[i]) >= epsf * abs(fvec[i])) + temp = eps * abs((fvecp[i] - fvec[i]) / eps - err[i]) / (abs(fvec[i]) + abs(fvecp[i])); + err[i] = 1.; + if (temp > NumTraits::epsilon() && temp < eps) + err[i] = (chkder_log10e * log(temp) - epslog) / epslog; + if (temp >= eps) + err[i] = 0.; + } + } +} + +} // end namespace internal + +} // end namespace Eigen diff --git a/external/unsupported/Eigen/src/NonLinearOptimization/covar.h b/external/unsupported/Eigen/src/NonLinearOptimization/covar.h new file mode 100644 index 0000000..68260d1 --- /dev/null +++ b/external/unsupported/Eigen/src/NonLinearOptimization/covar.h @@ -0,0 +1,70 @@ +namespace Eigen { + +namespace internal { + +template +void covar( + Matrix< Scalar, Dynamic, Dynamic > &r, + const VectorXi &ipvt, + Scalar tol = std::sqrt(NumTraits::epsilon()) ) +{ + using std::abs; + typedef DenseIndex Index; + + /* Local variables */ + Index i, j, k, l, ii, jj; + bool sing; + Scalar temp; + + /* Function Body */ + const Index n = r.cols(); + const Scalar tolr = tol * abs(r(0,0)); + Matrix< Scalar, Dynamic, 1 > wa(n); + eigen_assert(ipvt.size()==n); + + /* form the inverse of r in the full upper triangle of r. */ + l = -1; + for (k = 0; k < n; ++k) + if (abs(r(k,k)) > tolr) { + r(k,k) = 1. / r(k,k); + for (j = 0; j <= k-1; ++j) { + temp = r(k,k) * r(j,k); + r(j,k) = 0.; + r.col(k).head(j+1) -= r.col(j).head(j+1) * temp; + } + l = k; + } + + /* form the full upper triangle of the inverse of (r transpose)*r */ + /* in the full upper triangle of r. */ + for (k = 0; k <= l; ++k) { + for (j = 0; j <= k-1; ++j) + r.col(j).head(j+1) += r.col(k).head(j+1) * r(j,k); + r.col(k).head(k+1) *= r(k,k); + } + + /* form the full lower triangle of the covariance matrix */ + /* in the strict lower triangle of r and in wa. */ + for (j = 0; j < n; ++j) { + jj = ipvt[j]; + sing = j > l; + for (i = 0; i <= j; ++i) { + if (sing) + r(i,j) = 0.; + ii = ipvt[i]; + if (ii > jj) + r(ii,jj) = r(i,j); + if (ii < jj) + r(jj,ii) = r(i,j); + } + wa[jj] = r(j,j); + } + + /* symmetrize the covariance matrix in r. */ + r.topLeftCorner(n,n).template triangularView() = r.topLeftCorner(n,n).transpose(); + r.diagonal() = wa; +} + +} // end namespace internal + +} // end namespace Eigen diff --git a/external/unsupported/Eigen/src/NonLinearOptimization/dogleg.h b/external/unsupported/Eigen/src/NonLinearOptimization/dogleg.h new file mode 100644 index 0000000..80c5d27 --- /dev/null +++ b/external/unsupported/Eigen/src/NonLinearOptimization/dogleg.h @@ -0,0 +1,107 @@ +namespace Eigen { + +namespace internal { + +template +void dogleg( + const Matrix< Scalar, Dynamic, Dynamic > &qrfac, + const Matrix< Scalar, Dynamic, 1 > &diag, + const Matrix< Scalar, Dynamic, 1 > &qtb, + Scalar delta, + Matrix< Scalar, Dynamic, 1 > &x) +{ + using std::abs; + using std::sqrt; + + typedef DenseIndex Index; + + /* Local variables */ + Index i, j; + Scalar sum, temp, alpha, bnorm; + Scalar gnorm, qnorm; + Scalar sgnorm; + + /* Function Body */ + const Scalar epsmch = NumTraits::epsilon(); + const Index n = qrfac.cols(); + eigen_assert(n==qtb.size()); + eigen_assert(n==x.size()); + eigen_assert(n==diag.size()); + Matrix< Scalar, Dynamic, 1 > wa1(n), wa2(n); + + /* first, calculate the gauss-newton direction. */ + for (j = n-1; j >=0; --j) { + temp = qrfac(j,j); + if (temp == 0.) { + temp = epsmch * qrfac.col(j).head(j+1).maxCoeff(); + if (temp == 0.) + temp = epsmch; + } + if (j==n-1) + x[j] = qtb[j] / temp; + else + x[j] = (qtb[j] - qrfac.row(j).tail(n-j-1).dot(x.tail(n-j-1))) / temp; + } + + /* test whether the gauss-newton direction is acceptable. */ + qnorm = diag.cwiseProduct(x).stableNorm(); + if (qnorm <= delta) + return; + + // TODO : this path is not tested by Eigen unit tests + + /* the gauss-newton direction is not acceptable. */ + /* next, calculate the scaled gradient direction. */ + + wa1.fill(0.); + for (j = 0; j < n; ++j) { + wa1.tail(n-j) += qrfac.row(j).tail(n-j) * qtb[j]; + wa1[j] /= diag[j]; + } + + /* calculate the norm of the scaled gradient and test for */ + /* the special case in which the scaled gradient is zero. */ + gnorm = wa1.stableNorm(); + sgnorm = 0.; + alpha = delta / qnorm; + if (gnorm == 0.) + goto algo_end; + + /* calculate the point along the scaled gradient */ + /* at which the quadratic is minimized. */ + wa1.array() /= (diag*gnorm).array(); + // TODO : once unit tests cover this part,: + // wa2 = qrfac.template triangularView() * wa1; + for (j = 0; j < n; ++j) { + sum = 0.; + for (i = j; i < n; ++i) { + sum += qrfac(j,i) * wa1[i]; + } + wa2[j] = sum; + } + temp = wa2.stableNorm(); + sgnorm = gnorm / temp / temp; + + /* test whether the scaled gradient direction is acceptable. */ + alpha = 0.; + if (sgnorm >= delta) + goto algo_end; + + /* the scaled gradient direction is not acceptable. */ + /* finally, calculate the point along the dogleg */ + /* at which the quadratic is minimized. */ + bnorm = qtb.stableNorm(); + temp = bnorm / gnorm * (bnorm / qnorm) * (sgnorm / delta); + temp = temp - delta / qnorm * numext::abs2(sgnorm / delta) + sqrt(numext::abs2(temp - delta / qnorm) + (1.-numext::abs2(delta / qnorm)) * (1.-numext::abs2(sgnorm / delta))); + alpha = delta / qnorm * (1. - numext::abs2(sgnorm / delta)) / temp; +algo_end: + + /* form appropriate convex combination of the gauss-newton */ + /* direction and the scaled gradient direction. */ + temp = (1.-alpha) * (std::min)(sgnorm,delta); + x = temp * wa1 + alpha * x; +} + +} // end namespace internal + +} // end namespace Eigen diff --git a/external/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h b/external/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h new file mode 100644 index 0000000..bb7cf26 --- /dev/null +++ b/external/unsupported/Eigen/src/NonLinearOptimization/fdjac1.h @@ -0,0 +1,79 @@ +namespace Eigen { + +namespace internal { + +template +DenseIndex fdjac1( + const FunctorType &Functor, + Matrix< Scalar, Dynamic, 1 > &x, + Matrix< Scalar, Dynamic, 1 > &fvec, + Matrix< Scalar, Dynamic, Dynamic > &fjac, + DenseIndex ml, DenseIndex mu, + Scalar epsfcn) +{ + using std::sqrt; + using std::abs; + + typedef DenseIndex Index; + + /* Local variables */ + Scalar h; + Index j, k; + Scalar eps, temp; + Index msum; + int iflag; + Index start, length; + + /* Function Body */ + const Scalar epsmch = NumTraits::epsilon(); + const Index n = x.size(); + eigen_assert(fvec.size()==n); + Matrix< Scalar, Dynamic, 1 > wa1(n); + Matrix< Scalar, Dynamic, 1 > wa2(n); + + eps = sqrt((std::max)(epsfcn,epsmch)); + msum = ml + mu + 1; + if (msum >= n) { + /* computation of dense approximate jacobian. */ + for (j = 0; j < n; ++j) { + temp = x[j]; + h = eps * abs(temp); + if (h == 0.) + h = eps; + x[j] = temp + h; + iflag = Functor(x, wa1); + if (iflag < 0) + return iflag; + x[j] = temp; + fjac.col(j) = (wa1-fvec)/h; + } + + }else { + /* computation of banded approximate jacobian. */ + for (k = 0; k < msum; ++k) { + for (j = k; (msum<0) ? (j>n): (jn): (j(0,j-mu); + length = (std::min)(n-1, j+ml) - start + 1; + fjac.col(j).segment(start, length) = ( wa1.segment(start, length)-fvec.segment(start, length))/h; + } + } + } + return 0; +} + +} // end namespace internal + +} // end namespace Eigen diff --git a/external/unsupported/Eigen/src/NonLinearOptimization/lmpar.h b/external/unsupported/Eigen/src/NonLinearOptimization/lmpar.h new file mode 100644 index 0000000..4c17d4c --- /dev/null +++ b/external/unsupported/Eigen/src/NonLinearOptimization/lmpar.h @@ -0,0 +1,298 @@ +namespace Eigen { + +namespace internal { + +template +void lmpar( + Matrix< Scalar, Dynamic, Dynamic > &r, + const VectorXi &ipvt, + const Matrix< Scalar, Dynamic, 1 > &diag, + const Matrix< Scalar, Dynamic, 1 > &qtb, + Scalar delta, + Scalar &par, + Matrix< Scalar, Dynamic, 1 > &x) +{ + using std::abs; + using std::sqrt; + typedef DenseIndex Index; + + /* Local variables */ + Index i, j, l; + Scalar fp; + Scalar parc, parl; + Index iter; + Scalar temp, paru; + Scalar gnorm; + Scalar dxnorm; + + + /* Function Body */ + const Scalar dwarf = (std::numeric_limits::min)(); + const Index n = r.cols(); + eigen_assert(n==diag.size()); + eigen_assert(n==qtb.size()); + eigen_assert(n==x.size()); + + Matrix< Scalar, Dynamic, 1 > wa1, wa2; + + /* compute and store in x the gauss-newton direction. if the */ + /* jacobian is rank-deficient, obtain a least squares solution. */ + Index nsing = n-1; + wa1 = qtb; + for (j = 0; j < n; ++j) { + if (r(j,j) == 0. && nsing == n-1) + nsing = j - 1; + if (nsing < n-1) + wa1[j] = 0.; + } + for (j = nsing; j>=0; --j) { + wa1[j] /= r(j,j); + temp = wa1[j]; + for (i = 0; i < j ; ++i) + wa1[i] -= r(i,j) * temp; + } + + for (j = 0; j < n; ++j) + x[ipvt[j]] = wa1[j]; + + /* initialize the iteration counter. */ + /* evaluate the function at the origin, and test */ + /* for acceptance of the gauss-newton direction. */ + iter = 0; + wa2 = diag.cwiseProduct(x); + dxnorm = wa2.blueNorm(); + fp = dxnorm - delta; + if (fp <= Scalar(0.1) * delta) { + par = 0; + return; + } + + /* if the jacobian is not rank deficient, the newton */ + /* step provides a lower bound, parl, for the zero of */ + /* the function. otherwise set this bound to zero. */ + parl = 0.; + if (nsing >= n-1) { + for (j = 0; j < n; ++j) { + l = ipvt[j]; + wa1[j] = diag[l] * (wa2[l] / dxnorm); + } + // it's actually a triangularView.solveInplace(), though in a weird + // way: + for (j = 0; j < n; ++j) { + Scalar sum = 0.; + for (i = 0; i < j; ++i) + sum += r(i,j) * wa1[i]; + wa1[j] = (wa1[j] - sum) / r(j,j); + } + temp = wa1.blueNorm(); + parl = fp / delta / temp / temp; + } + + /* calculate an upper bound, paru, for the zero of the function. */ + for (j = 0; j < n; ++j) + wa1[j] = r.col(j).head(j+1).dot(qtb.head(j+1)) / diag[ipvt[j]]; + + gnorm = wa1.stableNorm(); + paru = gnorm / delta; + if (paru == 0.) + paru = dwarf / (std::min)(delta,Scalar(0.1)); + + /* if the input par lies outside of the interval (parl,paru), */ + /* set par to the closer endpoint. */ + par = (std::max)(par,parl); + par = (std::min)(par,paru); + if (par == 0.) + par = gnorm / dxnorm; + + /* beginning of an iteration. */ + while (true) { + ++iter; + + /* evaluate the function at the current value of par. */ + if (par == 0.) + par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */ + wa1 = sqrt(par)* diag; + + Matrix< Scalar, Dynamic, 1 > sdiag(n); + qrsolv(r, ipvt, wa1, qtb, x, sdiag); + + wa2 = diag.cwiseProduct(x); + dxnorm = wa2.blueNorm(); + temp = fp; + fp = dxnorm - delta; + + /* if the function is small enough, accept the current value */ + /* of par. also test for the exceptional cases where parl */ + /* is zero or the number of iterations has reached 10. */ + if (abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10) + break; + + /* compute the newton correction. */ + for (j = 0; j < n; ++j) { + l = ipvt[j]; + wa1[j] = diag[l] * (wa2[l] / dxnorm); + } + for (j = 0; j < n; ++j) { + wa1[j] /= sdiag[j]; + temp = wa1[j]; + for (i = j+1; i < n; ++i) + wa1[i] -= r(i,j) * temp; + } + temp = wa1.blueNorm(); + parc = fp / delta / temp / temp; + + /* depending on the sign of the function, update parl or paru. */ + if (fp > 0.) + parl = (std::max)(parl,par); + if (fp < 0.) + paru = (std::min)(paru,par); + + /* compute an improved estimate for par. */ + /* Computing MAX */ + par = (std::max)(parl,par+parc); + + /* end of an iteration. */ + } + + /* termination. */ + if (iter == 0) + par = 0.; + return; +} + +template +void lmpar2( + const ColPivHouseholderQR > &qr, + const Matrix< Scalar, Dynamic, 1 > &diag, + const Matrix< Scalar, Dynamic, 1 > &qtb, + Scalar delta, + Scalar &par, + Matrix< Scalar, Dynamic, 1 > &x) + +{ + using std::sqrt; + using std::abs; + typedef DenseIndex Index; + + /* Local variables */ + Index j; + Scalar fp; + Scalar parc, parl; + Index iter; + Scalar temp, paru; + Scalar gnorm; + Scalar dxnorm; + + + /* Function Body */ + const Scalar dwarf = (std::numeric_limits::min)(); + const Index n = qr.matrixQR().cols(); + eigen_assert(n==diag.size()); + eigen_assert(n==qtb.size()); + + Matrix< Scalar, Dynamic, 1 > wa1, wa2; + + /* compute and store in x the gauss-newton direction. if the */ + /* jacobian is rank-deficient, obtain a least squares solution. */ + +// const Index rank = qr.nonzeroPivots(); // exactly double(0.) + const Index rank = qr.rank(); // use a threshold + wa1 = qtb; + wa1.tail(n-rank).setZero(); + qr.matrixQR().topLeftCorner(rank, rank).template triangularView().solveInPlace(wa1.head(rank)); + + x = qr.colsPermutation()*wa1; + + /* initialize the iteration counter. */ + /* evaluate the function at the origin, and test */ + /* for acceptance of the gauss-newton direction. */ + iter = 0; + wa2 = diag.cwiseProduct(x); + dxnorm = wa2.blueNorm(); + fp = dxnorm - delta; + if (fp <= Scalar(0.1) * delta) { + par = 0; + return; + } + + /* if the jacobian is not rank deficient, the newton */ + /* step provides a lower bound, parl, for the zero of */ + /* the function. otherwise set this bound to zero. */ + parl = 0.; + if (rank==n) { + wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2)/dxnorm; + qr.matrixQR().topLeftCorner(n, n).transpose().template triangularView().solveInPlace(wa1); + temp = wa1.blueNorm(); + parl = fp / delta / temp / temp; + } + + /* calculate an upper bound, paru, for the zero of the function. */ + for (j = 0; j < n; ++j) + wa1[j] = qr.matrixQR().col(j).head(j+1).dot(qtb.head(j+1)) / diag[qr.colsPermutation().indices()(j)]; + + gnorm = wa1.stableNorm(); + paru = gnorm / delta; + if (paru == 0.) + paru = dwarf / (std::min)(delta,Scalar(0.1)); + + /* if the input par lies outside of the interval (parl,paru), */ + /* set par to the closer endpoint. */ + par = (std::max)(par,parl); + par = (std::min)(par,paru); + if (par == 0.) + par = gnorm / dxnorm; + + /* beginning of an iteration. */ + Matrix< Scalar, Dynamic, Dynamic > s = qr.matrixQR(); + while (true) { + ++iter; + + /* evaluate the function at the current value of par. */ + if (par == 0.) + par = (std::max)(dwarf,Scalar(.001) * paru); /* Computing MAX */ + wa1 = sqrt(par)* diag; + + Matrix< Scalar, Dynamic, 1 > sdiag(n); + qrsolv(s, qr.colsPermutation().indices(), wa1, qtb, x, sdiag); + + wa2 = diag.cwiseProduct(x); + dxnorm = wa2.blueNorm(); + temp = fp; + fp = dxnorm - delta; + + /* if the function is small enough, accept the current value */ + /* of par. also test for the exceptional cases where parl */ + /* is zero or the number of iterations has reached 10. */ + if (abs(fp) <= Scalar(0.1) * delta || (parl == 0. && fp <= temp && temp < 0.) || iter == 10) + break; + + /* compute the newton correction. */ + wa1 = qr.colsPermutation().inverse() * diag.cwiseProduct(wa2/dxnorm); + // we could almost use this here, but the diagonal is outside qr, in sdiag[] + // qr.matrixQR().topLeftCorner(n, n).transpose().template triangularView().solveInPlace(wa1); + for (j = 0; j < n; ++j) { + wa1[j] /= sdiag[j]; + temp = wa1[j]; + for (Index i = j+1; i < n; ++i) + wa1[i] -= s(i,j) * temp; + } + temp = wa1.blueNorm(); + parc = fp / delta / temp / temp; + + /* depending on the sign of the function, update parl or paru. */ + if (fp > 0.) + parl = (std::max)(parl,par); + if (fp < 0.) + paru = (std::min)(paru,par); + + /* compute an improved estimate for par. */ + par = (std::max)(parl,par+parc); + } + if (iter == 0) + par = 0.; + return; +} + +} // end namespace internal + +} // end namespace Eigen diff --git a/external/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h b/external/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h new file mode 100644 index 0000000..4f2f560 --- /dev/null +++ b/external/unsupported/Eigen/src/NonLinearOptimization/qrsolv.h @@ -0,0 +1,91 @@ +namespace Eigen { + +namespace internal { + +// TODO : once qrsolv2 is removed, use ColPivHouseholderQR or PermutationMatrix instead of ipvt +template +void qrsolv( + Matrix< Scalar, Dynamic, Dynamic > &s, + // TODO : use a PermutationMatrix once lmpar is no more: + const VectorXi &ipvt, + const Matrix< Scalar, Dynamic, 1 > &diag, + const Matrix< Scalar, Dynamic, 1 > &qtb, + Matrix< Scalar, Dynamic, 1 > &x, + Matrix< Scalar, Dynamic, 1 > &sdiag) + +{ + typedef DenseIndex Index; + + /* Local variables */ + Index i, j, k, l; + Scalar temp; + Index n = s.cols(); + Matrix< Scalar, Dynamic, 1 > wa(n); + JacobiRotation givens; + + /* Function Body */ + // the following will only change the lower triangular part of s, including + // the diagonal, though the diagonal is restored afterward + + /* copy r and (q transpose)*b to preserve input and initialize s. */ + /* in particular, save the diagonal elements of r in x. */ + x = s.diagonal(); + wa = qtb; + + s.topLeftCorner(n,n).template triangularView() = s.topLeftCorner(n,n).transpose(); + + /* eliminate the diagonal matrix d using a givens rotation. */ + for (j = 0; j < n; ++j) { + + /* prepare the row of d to be eliminated, locating the */ + /* diagonal element using p from the qr factorization. */ + l = ipvt[j]; + if (diag[l] == 0.) + break; + sdiag.tail(n-j).setZero(); + sdiag[j] = diag[l]; + + /* the transformations to eliminate the row of d */ + /* modify only a single element of (q transpose)*b */ + /* beyond the first n, which is initially zero. */ + Scalar qtbpj = 0.; + for (k = j; k < n; ++k) { + /* determine a givens rotation which eliminates the */ + /* appropriate element in the current row of d. */ + givens.makeGivens(-s(k,k), sdiag[k]); + + /* compute the modified diagonal element of r and */ + /* the modified element of ((q transpose)*b,0). */ + s(k,k) = givens.c() * s(k,k) + givens.s() * sdiag[k]; + temp = givens.c() * wa[k] + givens.s() * qtbpj; + qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj; + wa[k] = temp; + + /* accumulate the transformation in the row of s. */ + for (i = k+1; i().solveInPlace(wa.head(nsing)); + + // restore + sdiag = s.diagonal(); + s.diagonal() = x; + + /* permute the components of z back to components of x. */ + for (j = 0; j < n; ++j) x[ipvt[j]] = wa[j]; +} + +} // end namespace internal + +} // end namespace Eigen diff --git a/external/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h b/external/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h new file mode 100644 index 0000000..36ff700 --- /dev/null +++ b/external/unsupported/Eigen/src/NonLinearOptimization/r1mpyq.h @@ -0,0 +1,30 @@ +namespace Eigen { + +namespace internal { + +// TODO : move this to GivensQR once there's such a thing in Eigen + +template +void r1mpyq(DenseIndex m, DenseIndex n, Scalar *a, const std::vector > &v_givens, const std::vector > &w_givens) +{ + typedef DenseIndex Index; + + /* apply the first set of givens rotations to a. */ + for (Index j = n-2; j>=0; --j) + for (Index i = 0; i +void r1updt( + Matrix< Scalar, Dynamic, Dynamic > &s, + const Matrix< Scalar, Dynamic, 1> &u, + std::vector > &v_givens, + std::vector > &w_givens, + Matrix< Scalar, Dynamic, 1> &v, + Matrix< Scalar, Dynamic, 1> &w, + bool *sing) +{ + typedef DenseIndex Index; + const JacobiRotation IdentityRotation = JacobiRotation(1,0); + + /* Local variables */ + const Index m = s.rows(); + const Index n = s.cols(); + Index i, j=1; + Scalar temp; + JacobiRotation givens; + + // r1updt had a broader usecase, but we don't use it here. And, more + // importantly, we can not test it. + eigen_assert(m==n); + eigen_assert(u.size()==m); + eigen_assert(v.size()==n); + eigen_assert(w.size()==n); + + /* move the nontrivial part of the last column of s into w. */ + w[n-1] = s(n-1,n-1); + + /* rotate the vector v into a multiple of the n-th unit vector */ + /* in such a way that a spike is introduced into w. */ + for (j=n-2; j>=0; --j) { + w[j] = 0.; + if (v[j] != 0.) { + /* determine a givens rotation which eliminates the */ + /* j-th element of v. */ + givens.makeGivens(-v[n-1], v[j]); + + /* apply the transformation to v and store the information */ + /* necessary to recover the givens rotation. */ + v[n-1] = givens.s() * v[j] + givens.c() * v[n-1]; + v_givens[j] = givens; + + /* apply the transformation to s and extend the spike in w. */ + for (i = j; i < m; ++i) { + temp = givens.c() * s(j,i) - givens.s() * w[i]; + w[i] = givens.s() * s(j,i) + givens.c() * w[i]; + s(j,i) = temp; + } + } else + v_givens[j] = IdentityRotation; + } + + /* add the spike from the rank 1 update to w. */ + w += v[n-1] * u; + + /* eliminate the spike. */ + *sing = false; + for (j = 0; j < n-1; ++j) { + if (w[j] != 0.) { + /* determine a givens rotation which eliminates the */ + /* j-th element of the spike. */ + givens.makeGivens(-s(j,j), w[j]); + + /* apply the transformation to s and reduce the spike in w. */ + for (i = j; i < m; ++i) { + temp = givens.c() * s(j,i) + givens.s() * w[i]; + w[i] = -givens.s() * s(j,i) + givens.c() * w[i]; + s(j,i) = temp; + } + + /* store the information necessary to recover the */ + /* givens rotation. */ + w_givens[j] = givens; + } else + v_givens[j] = IdentityRotation; + + /* test for zero diagonal elements in the output s. */ + if (s(j,j) == 0.) { + *sing = true; + } + } + /* move w back into the last column of the output s. */ + s(n-1,n-1) = w[n-1]; + + if (s(j,j) == 0.) { + *sing = true; + } + return; +} + +} // end namespace internal + +} // end namespace Eigen diff --git a/external/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h b/external/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h new file mode 100644 index 0000000..6ebf856 --- /dev/null +++ b/external/unsupported/Eigen/src/NonLinearOptimization/rwupdt.h @@ -0,0 +1,49 @@ +namespace Eigen { + +namespace internal { + +template +void rwupdt( + Matrix< Scalar, Dynamic, Dynamic > &r, + const Matrix< Scalar, Dynamic, 1> &w, + Matrix< Scalar, Dynamic, 1> &b, + Scalar alpha) +{ + typedef DenseIndex Index; + + const Index n = r.cols(); + eigen_assert(r.rows()>=n); + std::vector > givens(n); + + /* Local variables */ + Scalar temp, rowj; + + /* Function Body */ + for (Index j = 0; j < n; ++j) { + rowj = w[j]; + + /* apply the previous transformations to */ + /* r(i,j), i=0,1,...,j-1, and to w(j). */ + for (Index i = 0; i < j; ++i) { + temp = givens[i].c() * r(i,j) + givens[i].s() * rowj; + rowj = -givens[i].s() * r(i,j) + givens[i].c() * rowj; + r(i,j) = temp; + } + + /* determine a givens rotation which eliminates w(j). */ + givens[j].makeGivens(-r(j,j), rowj); + + if (rowj == 0.) + continue; // givens[j] is identity + + /* apply the current transformation to r(j,j), b(j), and alpha. */ + r(j,j) = givens[j].c() * r(j,j) + givens[j].s() * rowj; + temp = givens[j].c() * b[j] + givens[j].s() * alpha; + alpha = -givens[j].s() * b[j] + givens[j].c() * alpha; + b[j] = temp; + } +} + +} // end namespace internal + +} // end namespace Eigen diff --git a/external/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h b/external/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h new file mode 100644 index 0000000..ea5d8bc --- /dev/null +++ b/external/unsupported/Eigen/src/NumericalDiff/NumericalDiff.h @@ -0,0 +1,130 @@ +// -*- coding: utf-8 +// vim: set fileencoding=utf-8 + +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Thomas Capricelli +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NUMERICAL_DIFF_H +#define EIGEN_NUMERICAL_DIFF_H + +namespace Eigen { + +enum NumericalDiffMode { + Forward, + Central +}; + + +/** + * This class allows you to add a method df() to your functor, which will + * use numerical differentiation to compute an approximate of the + * derivative for the functor. Of course, if you have an analytical form + * for the derivative, you should rather implement df() by yourself. + * + * More information on + * http://en.wikipedia.org/wiki/Numerical_differentiation + * + * Currently only "Forward" and "Central" scheme are implemented. + */ +template +class NumericalDiff : public _Functor +{ +public: + typedef _Functor Functor; + typedef typename Functor::Scalar Scalar; + typedef typename Functor::InputType InputType; + typedef typename Functor::ValueType ValueType; + typedef typename Functor::JacobianType JacobianType; + + NumericalDiff(Scalar _epsfcn=0.) : Functor(), epsfcn(_epsfcn) {} + NumericalDiff(const Functor& f, Scalar _epsfcn=0.) : Functor(f), epsfcn(_epsfcn) {} + + // forward constructors + template + NumericalDiff(const T0& a0) : Functor(a0), epsfcn(0) {} + template + NumericalDiff(const T0& a0, const T1& a1) : Functor(a0, a1), epsfcn(0) {} + template + NumericalDiff(const T0& a0, const T1& a1, const T2& a2) : Functor(a0, a1, a2), epsfcn(0) {} + + enum { + InputsAtCompileTime = Functor::InputsAtCompileTime, + ValuesAtCompileTime = Functor::ValuesAtCompileTime + }; + + /** + * return the number of evaluation of functor + */ + int df(const InputType& _x, JacobianType &jac) const + { + using std::sqrt; + using std::abs; + /* Local variables */ + Scalar h; + int nfev=0; + const typename InputType::Index n = _x.size(); + const Scalar eps = sqrt(((std::max)(epsfcn,NumTraits::epsilon() ))); + ValueType val1, val2; + InputType x = _x; + // TODO : we should do this only if the size is not already known + val1.resize(Functor::values()); + val2.resize(Functor::values()); + + // initialization + switch(mode) { + case Forward: + // compute f(x) + Functor::operator()(x, val1); nfev++; + break; + case Central: + // do nothing + break; + default: + eigen_assert(false); + }; + + // Function Body + for (int j = 0; j < n; ++j) { + h = eps * abs(x[j]); + if (h == 0.) { + h = eps; + } + switch(mode) { + case Forward: + x[j] += h; + Functor::operator()(x, val2); + nfev++; + x[j] = _x[j]; + jac.col(j) = (val2-val1)/h; + break; + case Central: + x[j] += h; + Functor::operator()(x, val2); nfev++; + x[j] -= 2*h; + Functor::operator()(x, val1); nfev++; + x[j] = _x[j]; + jac.col(j) = (val2-val1)/(2*h); + break; + default: + eigen_assert(false); + }; + } + return nfev; + } +private: + Scalar epsfcn; + + NumericalDiff& operator=(const NumericalDiff&); +}; + +} // end namespace Eigen + +//vim: ai ts=4 sts=4 et sw=4 +#endif // EIGEN_NUMERICAL_DIFF_H + diff --git a/external/unsupported/Eigen/src/Polynomials/Companion.h b/external/unsupported/Eigen/src/Polynomials/Companion.h new file mode 100644 index 0000000..59a15b0 --- /dev/null +++ b/external/unsupported/Eigen/src/Polynomials/Companion.h @@ -0,0 +1,280 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Manuel Yguel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMPANION_H +#define EIGEN_COMPANION_H + +// This file requires the user to include +// * Eigen/Core +// * Eigen/src/PolynomialSolver.h + +namespace Eigen { + +namespace internal { + +#ifndef EIGEN_PARSED_BY_DOXYGEN + +template +struct decrement_if_fixed_size +{ + enum { + ret = (Size == Dynamic) ? Dynamic : Size-1 }; +}; + +#endif + +template< typename _Scalar, int _Deg > +class companion +{ + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg) + + enum { + Deg = _Deg, + Deg_1=decrement_if_fixed_size::ret + }; + + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix RightColumn; + //typedef DiagonalMatrix< Scalar, Deg_1, Deg_1 > BottomLeftDiagonal; + typedef Matrix BottomLeftDiagonal; + + typedef Matrix DenseCompanionMatrixType; + typedef Matrix< Scalar, _Deg, Deg_1 > LeftBlock; + typedef Matrix< Scalar, Deg_1, Deg_1 > BottomLeftBlock; + typedef Matrix< Scalar, 1, Deg_1 > LeftBlockFirstRow; + + typedef DenseIndex Index; + + public: + EIGEN_STRONG_INLINE const _Scalar operator()(Index row, Index col ) const + { + if( m_bl_diag.rows() > col ) + { + if( 0 < row ){ return m_bl_diag[col]; } + else{ return 0; } + } + else{ return m_monic[row]; } + } + + public: + template + void setPolynomial( const VectorType& poly ) + { + const Index deg = poly.size()-1; + m_monic = -poly.head(deg)/poly[deg]; + m_bl_diag.setOnes(deg-1); + } + + template + companion( const VectorType& poly ){ + setPolynomial( poly ); } + + public: + DenseCompanionMatrixType denseMatrix() const + { + const Index deg = m_monic.size(); + const Index deg_1 = deg-1; + DenseCompanionMatrixType companMat(deg,deg); + companMat << + ( LeftBlock(deg,deg_1) + << LeftBlockFirstRow::Zero(1,deg_1), + BottomLeftBlock::Identity(deg-1,deg-1)*m_bl_diag.asDiagonal() ).finished() + , m_monic; + return companMat; + } + + + + protected: + /** Helper function for the balancing algorithm. + * \returns true if the row and the column, having colNorm and rowNorm + * as norms, are balanced, false otherwise. + * colB and rowB are respectively the multipliers for + * the column and the row in order to balance them. + * */ + bool balanced( RealScalar colNorm, RealScalar rowNorm, + bool& isBalanced, RealScalar& colB, RealScalar& rowB ); + + /** Helper function for the balancing algorithm. + * \returns true if the row and the column, having colNorm and rowNorm + * as norms, are balanced, false otherwise. + * colB and rowB are respectively the multipliers for + * the column and the row in order to balance them. + * */ + bool balancedR( RealScalar colNorm, RealScalar rowNorm, + bool& isBalanced, RealScalar& colB, RealScalar& rowB ); + + public: + /** + * Balancing algorithm from B. N. PARLETT and C. REINSCH (1969) + * "Balancing a matrix for calculation of eigenvalues and eigenvectors" + * adapted to the case of companion matrices. + * A matrix with non zero row and non zero column is balanced + * for a certain norm if the i-th row and the i-th column + * have same norm for all i. + */ + void balance(); + + protected: + RightColumn m_monic; + BottomLeftDiagonal m_bl_diag; +}; + + + +template< typename _Scalar, int _Deg > +inline +bool companion<_Scalar,_Deg>::balanced( RealScalar colNorm, RealScalar rowNorm, + bool& isBalanced, RealScalar& colB, RealScalar& rowB ) +{ + if( RealScalar(0) == colNorm || RealScalar(0) == rowNorm + || !(numext::isfinite)(colNorm) || !(numext::isfinite)(rowNorm)){ + return true; + } + else + { + //To find the balancing coefficients, if the radix is 2, + //one finds \f$ \sigma \f$ such that + // \f$ 2^{2\sigma-1} < rowNorm / colNorm \le 2^{2\sigma+1} \f$ + // then the balancing coefficient for the row is \f$ 1/2^{\sigma} \f$ + // and the balancing coefficient for the column is \f$ 2^{\sigma} \f$ + const RealScalar radix = RealScalar(2); + const RealScalar radix2 = RealScalar(4); + + rowB = rowNorm / radix; + colB = RealScalar(1); + const RealScalar s = colNorm + rowNorm; + + // Find sigma s.t. rowNorm / 2 <= 2^(2*sigma) * colNorm + RealScalar scout = colNorm; + while (scout < rowB) + { + colB *= radix; + scout *= radix2; + } + + // We now have an upper-bound for sigma, try to lower it. + // Find sigma s.t. 2^(2*sigma) * colNorm / 2 < rowNorm + scout = colNorm * (colB / radix) * colB; // Avoid overflow. + while (scout >= rowNorm) + { + colB /= radix; + scout /= radix2; + } + + // This line is used to avoid insubstantial balancing. + if ((rowNorm + radix * scout) < RealScalar(0.95) * s * colB) + { + isBalanced = false; + rowB = RealScalar(1) / colB; + return false; + } + else + { + return true; + } + } +} + +template< typename _Scalar, int _Deg > +inline +bool companion<_Scalar,_Deg>::balancedR( RealScalar colNorm, RealScalar rowNorm, + bool& isBalanced, RealScalar& colB, RealScalar& rowB ) +{ + if( RealScalar(0) == colNorm || RealScalar(0) == rowNorm ){ return true; } + else + { + /** + * Set the norm of the column and the row to the geometric mean + * of the row and column norm + */ + const RealScalar q = colNorm/rowNorm; + if( !isApprox( q, _Scalar(1) ) ) + { + rowB = sqrt( colNorm/rowNorm ); + colB = RealScalar(1)/rowB; + + isBalanced = false; + return false; + } + else{ + return true; } + } +} + + +template< typename _Scalar, int _Deg > +void companion<_Scalar,_Deg>::balance() +{ + using std::abs; + EIGEN_STATIC_ASSERT( Deg == Dynamic || 1 < Deg, YOU_MADE_A_PROGRAMMING_MISTAKE ); + const Index deg = m_monic.size(); + const Index deg_1 = deg-1; + + bool hasConverged=false; + while( !hasConverged ) + { + hasConverged = true; + RealScalar colNorm,rowNorm; + RealScalar colB,rowB; + + //First row, first column excluding the diagonal + //============================================== + colNorm = abs(m_bl_diag[0]); + rowNorm = abs(m_monic[0]); + + //Compute balancing of the row and the column + if( !balanced( colNorm, rowNorm, hasConverged, colB, rowB ) ) + { + m_bl_diag[0] *= colB; + m_monic[0] *= rowB; + } + + //Middle rows and columns excluding the diagonal + //============================================== + for( Index i=1; i headMonic( m_monic, 0, deg_1 ); + colNorm = headMonic.array().abs().sum(); + rowNorm = abs( m_bl_diag[ebl] ); + + //Compute balancing of the row and the column + if( !balanced( colNorm, rowNorm, hasConverged, colB, rowB ) ) + { + headMonic *= colB; + m_bl_diag[ebl] *= rowB; + } + } +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_COMPANION_H diff --git a/external/unsupported/Eigen/src/Polynomials/PolynomialSolver.h b/external/unsupported/Eigen/src/Polynomials/PolynomialSolver.h new file mode 100644 index 0000000..5e0ecbb --- /dev/null +++ b/external/unsupported/Eigen/src/Polynomials/PolynomialSolver.h @@ -0,0 +1,428 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Manuel Yguel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_POLYNOMIAL_SOLVER_H +#define EIGEN_POLYNOMIAL_SOLVER_H + +namespace Eigen { + +/** \ingroup Polynomials_Module + * \class PolynomialSolverBase. + * + * \brief Defined to be inherited by polynomial solvers: it provides + * convenient methods such as + * - real roots, + * - greatest, smallest complex roots, + * - real roots with greatest, smallest absolute real value, + * - greatest, smallest real roots. + * + * It stores the set of roots as a vector of complexes. + * + */ +template< typename _Scalar, int _Deg > +class PolynomialSolverBase +{ + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg) + + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef std::complex RootType; + typedef Matrix RootsType; + + typedef DenseIndex Index; + + protected: + template< typename OtherPolynomial > + inline void setPolynomial( const OtherPolynomial& poly ){ + m_roots.resize(poly.size()-1); } + + public: + template< typename OtherPolynomial > + inline PolynomialSolverBase( const OtherPolynomial& poly ){ + setPolynomial( poly() ); } + + inline PolynomialSolverBase(){} + + public: + /** \returns the complex roots of the polynomial */ + inline const RootsType& roots() const { return m_roots; } + + public: + /** Clear and fills the back insertion sequence with the real roots of the polynomial + * i.e. the real part of the complex roots that have an imaginary part which + * absolute value is smaller than absImaginaryThreshold. + * absImaginaryThreshold takes the dummy_precision associated + * with the _Scalar template parameter of the PolynomialSolver class as the default value. + * + * \param[out] bi_seq : the back insertion sequence (stl concept) + * \param[in] absImaginaryThreshold : the maximum bound of the imaginary part of a complex + * number that is considered as real. + * */ + template + inline void realRoots( Stl_back_insertion_sequence& bi_seq, + const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const + { + using std::abs; + bi_seq.clear(); + for(Index i=0; i + inline const RootType& selectComplexRoot_withRespectToNorm( squaredNormBinaryPredicate& pred ) const + { + Index res=0; + RealScalar norm2 = numext::abs2( m_roots[0] ); + for( Index i=1; i greater; + return selectComplexRoot_withRespectToNorm( greater ); + } + + /** + * \returns the complex root with smallest norm. + */ + inline const RootType& smallestRoot() const + { + std::less less; + return selectComplexRoot_withRespectToNorm( less ); + } + + protected: + template + inline const RealScalar& selectRealRoot_withRespectToAbsRealPart( + squaredRealPartBinaryPredicate& pred, + bool& hasArealRoot, + const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const + { + using std::abs; + hasArealRoot = false; + Index res=0; + RealScalar abs2(0); + + for( Index i=0; i + inline const RealScalar& selectRealRoot_withRespectToRealPart( + RealPartBinaryPredicate& pred, + bool& hasArealRoot, + const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const + { + using std::abs; + hasArealRoot = false; + Index res=0; + RealScalar val(0); + + for( Index i=0; i::dummy_precision() ) const + { + std::greater greater; + return selectRealRoot_withRespectToAbsRealPart( greater, hasArealRoot, absImaginaryThreshold ); + } + + + /** + * \returns a real root with smallest absolute magnitude. + * A real root is defined as the real part of a complex root with absolute imaginary + * part smallest than absImaginaryThreshold. + * absImaginaryThreshold takes the dummy_precision associated + * with the _Scalar template parameter of the PolynomialSolver class as the default value. + * If no real root is found the boolean hasArealRoot is set to false and the real part of + * the root with smallest absolute imaginary part is returned instead. + * + * \param[out] hasArealRoot : boolean true if a real root is found according to the + * absImaginaryThreshold criterion, false otherwise. + * \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide + * whether or not a root is real. + */ + inline const RealScalar& absSmallestRealRoot( + bool& hasArealRoot, + const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const + { + std::less less; + return selectRealRoot_withRespectToAbsRealPart( less, hasArealRoot, absImaginaryThreshold ); + } + + + /** + * \returns the real root with greatest value. + * A real root is defined as the real part of a complex root with absolute imaginary + * part smallest than absImaginaryThreshold. + * absImaginaryThreshold takes the dummy_precision associated + * with the _Scalar template parameter of the PolynomialSolver class as the default value. + * If no real root is found the boolean hasArealRoot is set to false and the real part of + * the root with smallest absolute imaginary part is returned instead. + * + * \param[out] hasArealRoot : boolean true if a real root is found according to the + * absImaginaryThreshold criterion, false otherwise. + * \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide + * whether or not a root is real. + */ + inline const RealScalar& greatestRealRoot( + bool& hasArealRoot, + const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const + { + std::greater greater; + return selectRealRoot_withRespectToRealPart( greater, hasArealRoot, absImaginaryThreshold ); + } + + + /** + * \returns the real root with smallest value. + * A real root is defined as the real part of a complex root with absolute imaginary + * part smallest than absImaginaryThreshold. + * absImaginaryThreshold takes the dummy_precision associated + * with the _Scalar template parameter of the PolynomialSolver class as the default value. + * If no real root is found the boolean hasArealRoot is set to false and the real part of + * the root with smallest absolute imaginary part is returned instead. + * + * \param[out] hasArealRoot : boolean true if a real root is found according to the + * absImaginaryThreshold criterion, false otherwise. + * \param[in] absImaginaryThreshold : threshold on the absolute imaginary part to decide + * whether or not a root is real. + */ + inline const RealScalar& smallestRealRoot( + bool& hasArealRoot, + const RealScalar& absImaginaryThreshold = NumTraits::dummy_precision() ) const + { + std::less less; + return selectRealRoot_withRespectToRealPart( less, hasArealRoot, absImaginaryThreshold ); + } + + protected: + RootsType m_roots; +}; + +#define EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( BASE ) \ + typedef typename BASE::Scalar Scalar; \ + typedef typename BASE::RealScalar RealScalar; \ + typedef typename BASE::RootType RootType; \ + typedef typename BASE::RootsType RootsType; + + + +/** \ingroup Polynomials_Module + * + * \class PolynomialSolver + * + * \brief A polynomial solver + * + * Computes the complex roots of a real polynomial. + * + * \param _Scalar the scalar type, i.e., the type of the polynomial coefficients + * \param _Deg the degree of the polynomial, can be a compile time value or Dynamic. + * Notice that the number of polynomial coefficients is _Deg+1. + * + * This class implements a polynomial solver and provides convenient methods such as + * - real roots, + * - greatest, smallest complex roots, + * - real roots with greatest, smallest absolute real value. + * - greatest, smallest real roots. + * + * WARNING: this polynomial solver is experimental, part of the unsupported Eigen modules. + * + * + * Currently a QR algorithm is used to compute the eigenvalues of the companion matrix of + * the polynomial to compute its roots. + * This supposes that the complex moduli of the roots are all distinct: e.g. there should + * be no multiple roots or conjugate roots for instance. + * With 32bit (float) floating types this problem shows up frequently. + * However, almost always, correct accuracy is reached even in these cases for 64bit + * (double) floating types and small polynomial degree (<20). + */ +template +class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg> +{ + public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Deg==Dynamic ? Dynamic : _Deg) + + typedef PolynomialSolverBase<_Scalar,_Deg> PS_Base; + EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( PS_Base ) + + typedef Matrix CompanionMatrixType; + typedef typename internal::conditional::IsComplex, + ComplexEigenSolver, + EigenSolver >::type EigenSolverType; + typedef typename internal::conditional::IsComplex, Scalar, std::complex >::type ComplexScalar; + + public: + /** Computes the complex roots of a new polynomial. */ + template< typename OtherPolynomial > + void compute( const OtherPolynomial& poly ) + { + eigen_assert( Scalar(0) != poly[poly.size()-1] ); + eigen_assert( poly.size() > 1 ); + if(poly.size() > 2 ) + { + internal::companion companion( poly ); + companion.balance(); + m_eigenSolver.compute( companion.denseMatrix() ); + m_roots = m_eigenSolver.eigenvalues(); + // cleanup noise in imaginary part of real roots: + // if the imaginary part is rather small compared to the real part + // and that cancelling the imaginary part yield a smaller evaluation, + // then it's safe to keep the real part only. + RealScalar coarse_prec = RealScalar(std::pow(4,poly.size()+1))*NumTraits::epsilon(); + for(Index i = 0; i + inline PolynomialSolver( const OtherPolynomial& poly ){ + compute( poly ); } + + inline PolynomialSolver(){} + + protected: + using PS_Base::m_roots; + EigenSolverType m_eigenSolver; +}; + + +template< typename _Scalar > +class PolynomialSolver<_Scalar,1> : public PolynomialSolverBase<_Scalar,1> +{ + public: + typedef PolynomialSolverBase<_Scalar,1> PS_Base; + EIGEN_POLYNOMIAL_SOLVER_BASE_INHERITED_TYPES( PS_Base ) + + public: + /** Computes the complex roots of a new polynomial. */ + template< typename OtherPolynomial > + void compute( const OtherPolynomial& poly ) + { + eigen_assert( poly.size() == 2 ); + eigen_assert( Scalar(0) != poly[1] ); + m_roots[0] = -poly[0]/poly[1]; + } + + public: + template< typename OtherPolynomial > + inline PolynomialSolver( const OtherPolynomial& poly ){ + compute( poly ); } + + inline PolynomialSolver(){} + + protected: + using PS_Base::m_roots; +}; + +} // end namespace Eigen + +#endif // EIGEN_POLYNOMIAL_SOLVER_H diff --git a/external/unsupported/Eigen/src/Polynomials/PolynomialUtils.h b/external/unsupported/Eigen/src/Polynomials/PolynomialUtils.h new file mode 100644 index 0000000..394e857 --- /dev/null +++ b/external/unsupported/Eigen/src/Polynomials/PolynomialUtils.h @@ -0,0 +1,143 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Manuel Yguel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_POLYNOMIAL_UTILS_H +#define EIGEN_POLYNOMIAL_UTILS_H + +namespace Eigen { + +/** \ingroup Polynomials_Module + * \returns the evaluation of the polynomial at x using Horner algorithm. + * + * \param[in] poly : the vector of coefficients of the polynomial ordered + * by degrees i.e. poly[i] is the coefficient of degree i of the polynomial + * e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$. + * \param[in] x : the value to evaluate the polynomial at. + * + * \note for stability: + * \f$ |x| \le 1 \f$ + */ +template +inline +T poly_eval_horner( const Polynomials& poly, const T& x ) +{ + T val=poly[poly.size()-1]; + for(DenseIndex i=poly.size()-2; i>=0; --i ){ + val = val*x + poly[i]; } + return val; +} + +/** \ingroup Polynomials_Module + * \returns the evaluation of the polynomial at x using stabilized Horner algorithm. + * + * \param[in] poly : the vector of coefficients of the polynomial ordered + * by degrees i.e. poly[i] is the coefficient of degree i of the polynomial + * e.g. \f$ 1 + 3x^2 \f$ is stored as a vector \f$ [ 1, 0, 3 ] \f$. + * \param[in] x : the value to evaluate the polynomial at. + */ +template +inline +T poly_eval( const Polynomials& poly, const T& x ) +{ + typedef typename NumTraits::Real Real; + + if( numext::abs2( x ) <= Real(1) ){ + return poly_eval_horner( poly, x ); } + else + { + T val=poly[0]; + T inv_x = T(1)/x; + for( DenseIndex i=1; i +inline +typename NumTraits::Real cauchy_max_bound( const Polynomial& poly ) +{ + using std::abs; + typedef typename Polynomial::Scalar Scalar; + typedef typename NumTraits::Real Real; + + eigen_assert( Scalar(0) != poly[poly.size()-1] ); + const Scalar inv_leading_coeff = Scalar(1)/poly[poly.size()-1]; + Real cb(0); + + for( DenseIndex i=0; i +inline +typename NumTraits::Real cauchy_min_bound( const Polynomial& poly ) +{ + using std::abs; + typedef typename Polynomial::Scalar Scalar; + typedef typename NumTraits::Real Real; + + DenseIndex i=0; + while( i +void roots_to_monicPolynomial( const RootVector& rv, Polynomial& poly ) +{ + + typedef typename Polynomial::Scalar Scalar; + + poly.setZero( rv.size()+1 ); + poly[0] = -rv[0]; poly[1] = Scalar(1); + for( DenseIndex i=1; i< rv.size(); ++i ) + { + for( DenseIndex j=i+1; j>0; --j ){ poly[j] = poly[j-1] - rv[i]*poly[j]; } + poly[0] = -rv[i]*poly[0]; + } +} + +} // end namespace Eigen + +#endif // EIGEN_POLYNOMIAL_UTILS_H diff --git a/external/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h b/external/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h new file mode 100644 index 0000000..6d0370d --- /dev/null +++ b/external/unsupported/Eigen/src/Skyline/SkylineInplaceLU.h @@ -0,0 +1,352 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Guillaume Saupin +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SKYLINEINPLACELU_H +#define EIGEN_SKYLINEINPLACELU_H + +namespace Eigen { + +/** \ingroup Skyline_Module + * + * \class SkylineInplaceLU + * + * \brief Inplace LU decomposition of a skyline matrix and associated features + * + * \param MatrixType the type of the matrix of which we are computing the LU factorization + * + */ +template +class SkylineInplaceLU { +protected: + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::Index Index; + + typedef typename NumTraits::Real RealScalar; + +public: + + /** Creates a LU object and compute the respective factorization of \a matrix using + * flags \a flags. */ + SkylineInplaceLU(MatrixType& matrix, int flags = 0) + : /*m_matrix(matrix.rows(), matrix.cols()),*/ m_flags(flags), m_status(0), m_lu(matrix) { + m_precision = RealScalar(0.1) * Eigen::dummy_precision (); + m_lu.IsRowMajor ? computeRowMajor() : compute(); + } + + /** Sets the relative threshold value used to prune zero coefficients during the decomposition. + * + * Setting a value greater than zero speeds up computation, and yields to an incomplete + * factorization with fewer non zero coefficients. Such approximate factors are especially + * useful to initialize an iterative solver. + * + * Note that the exact meaning of this parameter might depends on the actual + * backend. Moreover, not all backends support this feature. + * + * \sa precision() */ + void setPrecision(RealScalar v) { + m_precision = v; + } + + /** \returns the current precision. + * + * \sa setPrecision() */ + RealScalar precision() const { + return m_precision; + } + + /** Sets the flags. Possible values are: + * - CompleteFactorization + * - IncompleteFactorization + * - MemoryEfficient + * - one of the ordering methods + * - etc... + * + * \sa flags() */ + void setFlags(int f) { + m_flags = f; + } + + /** \returns the current flags */ + int flags() const { + return m_flags; + } + + void setOrderingMethod(int m) { + m_flags = m; + } + + int orderingMethod() const { + return m_flags; + } + + /** Computes/re-computes the LU factorization */ + void compute(); + void computeRowMajor(); + + /** \returns the lower triangular matrix L */ + //inline const MatrixType& matrixL() const { return m_matrixL; } + + /** \returns the upper triangular matrix U */ + //inline const MatrixType& matrixU() const { return m_matrixU; } + + template + bool solve(const MatrixBase &b, MatrixBase* x, + const int transposed = 0) const; + + /** \returns true if the factorization succeeded */ + inline bool succeeded(void) const { + return m_succeeded; + } + +protected: + RealScalar m_precision; + int m_flags; + mutable int m_status; + bool m_succeeded; + MatrixType& m_lu; +}; + +/** Computes / recomputes the in place LU decomposition of the SkylineInplaceLU. + * using the default algorithm. + */ +template +//template +void SkylineInplaceLU::compute() { + const size_t rows = m_lu.rows(); + const size_t cols = m_lu.cols(); + + eigen_assert(rows == cols && "We do not (yet) support rectangular LU."); + eigen_assert(!m_lu.IsRowMajor && "LU decomposition does not work with rowMajor Storage"); + + for (Index row = 0; row < rows; row++) { + const double pivot = m_lu.coeffDiag(row); + + //Lower matrix Columns update + const Index& col = row; + for (typename MatrixType::InnerLowerIterator lIt(m_lu, col); lIt; ++lIt) { + lIt.valueRef() /= pivot; + } + + //Upper matrix update -> contiguous memory access + typename MatrixType::InnerLowerIterator lIt(m_lu, col); + for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) { + typename MatrixType::InnerUpperIterator uItPivot(m_lu, row); + typename MatrixType::InnerUpperIterator uIt(m_lu, rrow); + const double coef = lIt.value(); + + uItPivot += (rrow - row - 1); + + //update upper part -> contiguous memory access + for (++uItPivot; uIt && uItPivot;) { + uIt.valueRef() -= uItPivot.value() * coef; + + ++uIt; + ++uItPivot; + } + ++lIt; + } + + //Upper matrix update -> non contiguous memory access + typename MatrixType::InnerLowerIterator lIt3(m_lu, col); + for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) { + typename MatrixType::InnerUpperIterator uItPivot(m_lu, row); + const double coef = lIt3.value(); + + //update lower part -> non contiguous memory access + for (Index i = 0; i < rrow - row - 1; i++) { + m_lu.coeffRefLower(rrow, row + i + 1) -= uItPivot.value() * coef; + ++uItPivot; + } + ++lIt3; + } + //update diag -> contiguous + typename MatrixType::InnerLowerIterator lIt2(m_lu, col); + for (Index rrow = row + 1; rrow < m_lu.rows(); rrow++) { + + typename MatrixType::InnerUpperIterator uItPivot(m_lu, row); + typename MatrixType::InnerUpperIterator uIt(m_lu, rrow); + const double coef = lIt2.value(); + + uItPivot += (rrow - row - 1); + m_lu.coeffRefDiag(rrow) -= uItPivot.value() * coef; + ++lIt2; + } + } +} + +template +void SkylineInplaceLU::computeRowMajor() { + const size_t rows = m_lu.rows(); + const size_t cols = m_lu.cols(); + + eigen_assert(rows == cols && "We do not (yet) support rectangular LU."); + eigen_assert(m_lu.IsRowMajor && "You're trying to apply rowMajor decomposition on a ColMajor matrix !"); + + for (Index row = 0; row < rows; row++) { + typename MatrixType::InnerLowerIterator llIt(m_lu, row); + + + for (Index col = llIt.col(); col < row; col++) { + if (m_lu.coeffExistLower(row, col)) { + const double diag = m_lu.coeffDiag(col); + + typename MatrixType::InnerLowerIterator lIt(m_lu, row); + typename MatrixType::InnerUpperIterator uIt(m_lu, col); + + + const Index offset = lIt.col() - uIt.row(); + + + Index stop = offset > 0 ? col - lIt.col() : col - uIt.row(); + + //#define VECTORIZE +#ifdef VECTORIZE + Map rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop); + Map colVal(uIt.valuePtr() + (offset > 0 ? offset : 0), stop); + + + Scalar newCoeff = m_lu.coeffLower(row, col) - rowVal.dot(colVal); +#else + if (offset > 0) //Skip zero value of lIt + uIt += offset; + else //Skip zero values of uIt + lIt += -offset; + Scalar newCoeff = m_lu.coeffLower(row, col); + + for (Index k = 0; k < stop; ++k) { + const Scalar tmp = newCoeff; + newCoeff = tmp - lIt.value() * uIt.value(); + ++lIt; + ++uIt; + } +#endif + + m_lu.coeffRefLower(row, col) = newCoeff / diag; + } + } + + //Upper matrix update + const Index col = row; + typename MatrixType::InnerUpperIterator uuIt(m_lu, col); + for (Index rrow = uuIt.row(); rrow < col; rrow++) { + + typename MatrixType::InnerLowerIterator lIt(m_lu, rrow); + typename MatrixType::InnerUpperIterator uIt(m_lu, col); + const Index offset = lIt.col() - uIt.row(); + + Index stop = offset > 0 ? rrow - lIt.col() : rrow - uIt.row(); + +#ifdef VECTORIZE + Map rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop); + Map colVal(uIt.valuePtr() + (offset > 0 ? offset : 0), stop); + + Scalar newCoeff = m_lu.coeffUpper(rrow, col) - rowVal.dot(colVal); +#else + if (offset > 0) //Skip zero value of lIt + uIt += offset; + else //Skip zero values of uIt + lIt += -offset; + Scalar newCoeff = m_lu.coeffUpper(rrow, col); + for (Index k = 0; k < stop; ++k) { + const Scalar tmp = newCoeff; + newCoeff = tmp - lIt.value() * uIt.value(); + + ++lIt; + ++uIt; + } +#endif + m_lu.coeffRefUpper(rrow, col) = newCoeff; + } + + + //Diag matrix update + typename MatrixType::InnerLowerIterator lIt(m_lu, row); + typename MatrixType::InnerUpperIterator uIt(m_lu, row); + + const Index offset = lIt.col() - uIt.row(); + + + Index stop = offset > 0 ? lIt.size() : uIt.size(); +#ifdef VECTORIZE + Map rowVal(lIt.valuePtr() + (offset > 0 ? 0 : -offset), stop); + Map colVal(uIt.valuePtr() + (offset > 0 ? offset : 0), stop); + Scalar newCoeff = m_lu.coeffDiag(row) - rowVal.dot(colVal); +#else + if (offset > 0) //Skip zero value of lIt + uIt += offset; + else //Skip zero values of uIt + lIt += -offset; + Scalar newCoeff = m_lu.coeffDiag(row); + for (Index k = 0; k < stop; ++k) { + const Scalar tmp = newCoeff; + newCoeff = tmp - lIt.value() * uIt.value(); + ++lIt; + ++uIt; + } +#endif + m_lu.coeffRefDiag(row) = newCoeff; + } +} + +/** Computes *x = U^-1 L^-1 b + * + * If \a transpose is set to SvTranspose or SvAdjoint, the solution + * of the transposed/adjoint system is computed instead. + * + * Not all backends implement the solution of the transposed or + * adjoint system. + */ +template +template +bool SkylineInplaceLU::solve(const MatrixBase &b, MatrixBase* x, const int transposed) const { + const size_t rows = m_lu.rows(); + const size_t cols = m_lu.cols(); + + + for (Index row = 0; row < rows; row++) { + x->coeffRef(row) = b.coeff(row); + Scalar newVal = x->coeff(row); + typename MatrixType::InnerLowerIterator lIt(m_lu, row); + + Index col = lIt.col(); + while (lIt.col() < row) { + + newVal -= x->coeff(col++) * lIt.value(); + ++lIt; + } + + x->coeffRef(row) = newVal; + } + + + for (Index col = rows - 1; col > 0; col--) { + x->coeffRef(col) = x->coeff(col) / m_lu.coeffDiag(col); + + const Scalar x_col = x->coeff(col); + + typename MatrixType::InnerUpperIterator uIt(m_lu, col); + uIt += uIt.size()-1; + + + while (uIt) { + x->coeffRef(uIt.row()) -= x_col * uIt.value(); + //TODO : introduce --operator + uIt += -1; + } + + + } + x->coeffRef(0) = x->coeff(0) / m_lu.coeffDiag(0); + + return true; +} + +} // end namespace Eigen + +#endif // EIGEN_SKYLINEINPLACELU_H diff --git a/external/unsupported/Eigen/src/Skyline/SkylineMatrix.h b/external/unsupported/Eigen/src/Skyline/SkylineMatrix.h new file mode 100644 index 0000000..7c7eace --- /dev/null +++ b/external/unsupported/Eigen/src/Skyline/SkylineMatrix.h @@ -0,0 +1,862 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Guillaume Saupin +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SKYLINEMATRIX_H +#define EIGEN_SKYLINEMATRIX_H + +#include "SkylineStorage.h" +#include "SkylineMatrixBase.h" + +namespace Eigen { + +/** \ingroup Skyline_Module + * + * \class SkylineMatrix + * + * \brief The main skyline matrix class + * + * This class implements a skyline matrix using the very uncommon storage + * scheme. + * + * \param _Scalar the scalar type, i.e. the type of the coefficients + * \param _Options Union of bit flags controlling the storage scheme. Currently the only possibility + * is RowMajor. The default is 0 which means column-major. + * + * + */ +namespace internal { +template +struct traits > { + typedef _Scalar Scalar; + typedef Sparse StorageKind; + + enum { + RowsAtCompileTime = Dynamic, + ColsAtCompileTime = Dynamic, + MaxRowsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic, + Flags = SkylineBit | _Options, + CoeffReadCost = NumTraits::ReadCost, + }; +}; +} + +template +class SkylineMatrix +: public SkylineMatrixBase > { +public: + EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(SkylineMatrix) + EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(SkylineMatrix, +=) + EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(SkylineMatrix, -=) + + using Base::IsRowMajor; + +protected: + + typedef SkylineMatrix TransposedSkylineMatrix; + + Index m_outerSize; + Index m_innerSize; + +public: + Index* m_colStartIndex; + Index* m_rowStartIndex; + SkylineStorage m_data; + +public: + + inline Index rows() const { + return IsRowMajor ? m_outerSize : m_innerSize; + } + + inline Index cols() const { + return IsRowMajor ? m_innerSize : m_outerSize; + } + + inline Index innerSize() const { + return m_innerSize; + } + + inline Index outerSize() const { + return m_outerSize; + } + + inline Index upperNonZeros() const { + return m_data.upperSize(); + } + + inline Index lowerNonZeros() const { + return m_data.lowerSize(); + } + + inline Index upperNonZeros(Index j) const { + return m_colStartIndex[j + 1] - m_colStartIndex[j]; + } + + inline Index lowerNonZeros(Index j) const { + return m_rowStartIndex[j + 1] - m_rowStartIndex[j]; + } + + inline const Scalar* _diagPtr() const { + return &m_data.diag(0); + } + + inline Scalar* _diagPtr() { + return &m_data.diag(0); + } + + inline const Scalar* _upperPtr() const { + return &m_data.upper(0); + } + + inline Scalar* _upperPtr() { + return &m_data.upper(0); + } + + inline const Scalar* _lowerPtr() const { + return &m_data.lower(0); + } + + inline Scalar* _lowerPtr() { + return &m_data.lower(0); + } + + inline const Index* _upperProfilePtr() const { + return &m_data.upperProfile(0); + } + + inline Index* _upperProfilePtr() { + return &m_data.upperProfile(0); + } + + inline const Index* _lowerProfilePtr() const { + return &m_data.lowerProfile(0); + } + + inline Index* _lowerProfilePtr() { + return &m_data.lowerProfile(0); + } + + inline Scalar coeff(Index row, Index col) const { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + eigen_assert(outer < outerSize()); + eigen_assert(inner < innerSize()); + + if (outer == inner) + return this->m_data.diag(outer); + + if (IsRowMajor) { + if (inner > outer) //upper matrix + { + const Index minOuterIndex = inner - m_data.upperProfile(inner); + if (outer >= minOuterIndex) + return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); + else + return Scalar(0); + } + if (inner < outer) //lower matrix + { + const Index minInnerIndex = outer - m_data.lowerProfile(outer); + if (inner >= minInnerIndex) + return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); + else + return Scalar(0); + } + return m_data.upper(m_colStartIndex[inner] + outer - inner); + } else { + if (outer > inner) //upper matrix + { + const Index maxOuterIndex = inner + m_data.upperProfile(inner); + if (outer <= maxOuterIndex) + return this->m_data.upper(m_colStartIndex[inner] + (outer - inner)); + else + return Scalar(0); + } + if (outer < inner) //lower matrix + { + const Index maxInnerIndex = outer + m_data.lowerProfile(outer); + + if (inner <= maxInnerIndex) + return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer)); + else + return Scalar(0); + } + } + } + + inline Scalar& coeffRef(Index row, Index col) { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + eigen_assert(outer < outerSize()); + eigen_assert(inner < innerSize()); + + if (outer == inner) + return this->m_data.diag(outer); + + if (IsRowMajor) { + if (col > row) //upper matrix + { + const Index minOuterIndex = inner - m_data.upperProfile(inner); + eigen_assert(outer >= minOuterIndex && "You tried to access a coeff that does not exist in the storage"); + return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); + } + if (col < row) //lower matrix + { + const Index minInnerIndex = outer - m_data.lowerProfile(outer); + eigen_assert(inner >= minInnerIndex && "You tried to access a coeff that does not exist in the storage"); + return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); + } + } else { + if (outer > inner) //upper matrix + { + const Index maxOuterIndex = inner + m_data.upperProfile(inner); + eigen_assert(outer <= maxOuterIndex && "You tried to access a coeff that does not exist in the storage"); + return this->m_data.upper(m_colStartIndex[inner] + (outer - inner)); + } + if (outer < inner) //lower matrix + { + const Index maxInnerIndex = outer + m_data.lowerProfile(outer); + eigen_assert(inner <= maxInnerIndex && "You tried to access a coeff that does not exist in the storage"); + return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer)); + } + } + } + + inline Scalar coeffDiag(Index idx) const { + eigen_assert(idx < outerSize()); + eigen_assert(idx < innerSize()); + return this->m_data.diag(idx); + } + + inline Scalar coeffLower(Index row, Index col) const { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + eigen_assert(outer < outerSize()); + eigen_assert(inner < innerSize()); + eigen_assert(inner != outer); + + if (IsRowMajor) { + const Index minInnerIndex = outer - m_data.lowerProfile(outer); + if (inner >= minInnerIndex) + return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); + else + return Scalar(0); + + } else { + const Index maxInnerIndex = outer + m_data.lowerProfile(outer); + if (inner <= maxInnerIndex) + return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer)); + else + return Scalar(0); + } + } + + inline Scalar coeffUpper(Index row, Index col) const { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + eigen_assert(outer < outerSize()); + eigen_assert(inner < innerSize()); + eigen_assert(inner != outer); + + if (IsRowMajor) { + const Index minOuterIndex = inner - m_data.upperProfile(inner); + if (outer >= minOuterIndex) + return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); + else + return Scalar(0); + } else { + const Index maxOuterIndex = inner + m_data.upperProfile(inner); + if (outer <= maxOuterIndex) + return this->m_data.upper(m_colStartIndex[inner] + (outer - inner)); + else + return Scalar(0); + } + } + + inline Scalar& coeffRefDiag(Index idx) { + eigen_assert(idx < outerSize()); + eigen_assert(idx < innerSize()); + return this->m_data.diag(idx); + } + + inline Scalar& coeffRefLower(Index row, Index col) { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + eigen_assert(outer < outerSize()); + eigen_assert(inner < innerSize()); + eigen_assert(inner != outer); + + if (IsRowMajor) { + const Index minInnerIndex = outer - m_data.lowerProfile(outer); + eigen_assert(inner >= minInnerIndex && "You tried to access a coeff that does not exist in the storage"); + return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); + } else { + const Index maxInnerIndex = outer + m_data.lowerProfile(outer); + eigen_assert(inner <= maxInnerIndex && "You tried to access a coeff that does not exist in the storage"); + return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer)); + } + } + + inline bool coeffExistLower(Index row, Index col) { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + eigen_assert(outer < outerSize()); + eigen_assert(inner < innerSize()); + eigen_assert(inner != outer); + + if (IsRowMajor) { + const Index minInnerIndex = outer - m_data.lowerProfile(outer); + return inner >= minInnerIndex; + } else { + const Index maxInnerIndex = outer + m_data.lowerProfile(outer); + return inner <= maxInnerIndex; + } + } + + inline Scalar& coeffRefUpper(Index row, Index col) { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + eigen_assert(outer < outerSize()); + eigen_assert(inner < innerSize()); + eigen_assert(inner != outer); + + if (IsRowMajor) { + const Index minOuterIndex = inner - m_data.upperProfile(inner); + eigen_assert(outer >= minOuterIndex && "You tried to access a coeff that does not exist in the storage"); + return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); + } else { + const Index maxOuterIndex = inner + m_data.upperProfile(inner); + eigen_assert(outer <= maxOuterIndex && "You tried to access a coeff that does not exist in the storage"); + return this->m_data.upper(m_colStartIndex[inner] + (outer - inner)); + } + } + + inline bool coeffExistUpper(Index row, Index col) { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + eigen_assert(outer < outerSize()); + eigen_assert(inner < innerSize()); + eigen_assert(inner != outer); + + if (IsRowMajor) { + const Index minOuterIndex = inner - m_data.upperProfile(inner); + return outer >= minOuterIndex; + } else { + const Index maxOuterIndex = inner + m_data.upperProfile(inner); + return outer <= maxOuterIndex; + } + } + + +protected: + +public: + class InnerUpperIterator; + class InnerLowerIterator; + + class OuterUpperIterator; + class OuterLowerIterator; + + /** Removes all non zeros */ + inline void setZero() { + m_data.clear(); + memset(m_colStartIndex, 0, (m_outerSize + 1) * sizeof (Index)); + memset(m_rowStartIndex, 0, (m_outerSize + 1) * sizeof (Index)); + } + + /** \returns the number of non zero coefficients */ + inline Index nonZeros() const { + return m_data.diagSize() + m_data.upperSize() + m_data.lowerSize(); + } + + /** Preallocates \a reserveSize non zeros */ + inline void reserve(Index reserveSize, Index reserveUpperSize, Index reserveLowerSize) { + m_data.reserve(reserveSize, reserveUpperSize, reserveLowerSize); + } + + /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col. + + * + * \warning This function can be extremely slow if the non zero coefficients + * are not inserted in a coherent order. + * + * After an insertion session, you should call the finalize() function. + */ + EIGEN_DONT_INLINE Scalar & insert(Index row, Index col) { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + eigen_assert(outer < outerSize()); + eigen_assert(inner < innerSize()); + + if (outer == inner) + return m_data.diag(col); + + if (IsRowMajor) { + if (outer < inner) //upper matrix + { + Index minOuterIndex = 0; + minOuterIndex = inner - m_data.upperProfile(inner); + + if (outer < minOuterIndex) //The value does not yet exist + { + const Index previousProfile = m_data.upperProfile(inner); + + m_data.upperProfile(inner) = inner - outer; + + + const Index bandIncrement = m_data.upperProfile(inner) - previousProfile; + //shift data stored after this new one + const Index stop = m_colStartIndex[cols()]; + const Index start = m_colStartIndex[inner]; + + + for (Index innerIdx = stop; innerIdx >= start; innerIdx--) { + m_data.upper(innerIdx + bandIncrement) = m_data.upper(innerIdx); + } + + for (Index innerIdx = cols(); innerIdx > inner; innerIdx--) { + m_colStartIndex[innerIdx] += bandIncrement; + } + + //zeros new data + memset(this->_upperPtr() + start, 0, (bandIncrement - 1) * sizeof (Scalar)); + + return m_data.upper(m_colStartIndex[inner]); + } else { + return m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner))); + } + } + + if (outer > inner) //lower matrix + { + const Index minInnerIndex = outer - m_data.lowerProfile(outer); + if (inner < minInnerIndex) //The value does not yet exist + { + const Index previousProfile = m_data.lowerProfile(outer); + m_data.lowerProfile(outer) = outer - inner; + + const Index bandIncrement = m_data.lowerProfile(outer) - previousProfile; + //shift data stored after this new one + const Index stop = m_rowStartIndex[rows()]; + const Index start = m_rowStartIndex[outer]; + + + for (Index innerIdx = stop; innerIdx >= start; innerIdx--) { + m_data.lower(innerIdx + bandIncrement) = m_data.lower(innerIdx); + } + + for (Index innerIdx = rows(); innerIdx > outer; innerIdx--) { + m_rowStartIndex[innerIdx] += bandIncrement; + } + + //zeros new data + memset(this->_lowerPtr() + start, 0, (bandIncrement - 1) * sizeof (Scalar)); + return m_data.lower(m_rowStartIndex[outer]); + } else { + return m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer))); + } + } + } else { + if (outer > inner) //upper matrix + { + const Index maxOuterIndex = inner + m_data.upperProfile(inner); + if (outer > maxOuterIndex) //The value does not yet exist + { + const Index previousProfile = m_data.upperProfile(inner); + m_data.upperProfile(inner) = outer - inner; + + const Index bandIncrement = m_data.upperProfile(inner) - previousProfile; + //shift data stored after this new one + const Index stop = m_rowStartIndex[rows()]; + const Index start = m_rowStartIndex[inner + 1]; + + for (Index innerIdx = stop; innerIdx >= start; innerIdx--) { + m_data.upper(innerIdx + bandIncrement) = m_data.upper(innerIdx); + } + + for (Index innerIdx = inner + 1; innerIdx < outerSize() + 1; innerIdx++) { + m_rowStartIndex[innerIdx] += bandIncrement; + } + memset(this->_upperPtr() + m_rowStartIndex[inner] + previousProfile + 1, 0, (bandIncrement - 1) * sizeof (Scalar)); + return m_data.upper(m_rowStartIndex[inner] + m_data.upperProfile(inner)); + } else { + return m_data.upper(m_rowStartIndex[inner] + (outer - inner)); + } + } + + if (outer < inner) //lower matrix + { + const Index maxInnerIndex = outer + m_data.lowerProfile(outer); + if (inner > maxInnerIndex) //The value does not yet exist + { + const Index previousProfile = m_data.lowerProfile(outer); + m_data.lowerProfile(outer) = inner - outer; + + const Index bandIncrement = m_data.lowerProfile(outer) - previousProfile; + //shift data stored after this new one + const Index stop = m_colStartIndex[cols()]; + const Index start = m_colStartIndex[outer + 1]; + + for (Index innerIdx = stop; innerIdx >= start; innerIdx--) { + m_data.lower(innerIdx + bandIncrement) = m_data.lower(innerIdx); + } + + for (Index innerIdx = outer + 1; innerIdx < outerSize() + 1; innerIdx++) { + m_colStartIndex[innerIdx] += bandIncrement; + } + memset(this->_lowerPtr() + m_colStartIndex[outer] + previousProfile + 1, 0, (bandIncrement - 1) * sizeof (Scalar)); + return m_data.lower(m_colStartIndex[outer] + m_data.lowerProfile(outer)); + } else { + return m_data.lower(m_colStartIndex[outer] + (inner - outer)); + } + } + } + } + + /** Must be called after inserting a set of non zero entries. + */ + inline void finalize() { + if (IsRowMajor) { + if (rows() > cols()) + m_data.resize(cols(), cols(), rows(), m_colStartIndex[cols()] + 1, m_rowStartIndex[rows()] + 1); + else + m_data.resize(rows(), cols(), rows(), m_colStartIndex[cols()] + 1, m_rowStartIndex[rows()] + 1); + + // eigen_assert(rows() == cols() && "memory reorganisatrion only works with suare matrix"); + // + // Scalar* newArray = new Scalar[m_colStartIndex[cols()] + 1 + m_rowStartIndex[rows()] + 1]; + // Index dataIdx = 0; + // for (Index row = 0; row < rows(); row++) { + // + // const Index nbLowerElts = m_rowStartIndex[row + 1] - m_rowStartIndex[row]; + // // std::cout << "nbLowerElts" << nbLowerElts << std::endl; + // memcpy(newArray + dataIdx, m_data.m_lower + m_rowStartIndex[row], nbLowerElts * sizeof (Scalar)); + // m_rowStartIndex[row] = dataIdx; + // dataIdx += nbLowerElts; + // + // const Index nbUpperElts = m_colStartIndex[row + 1] - m_colStartIndex[row]; + // memcpy(newArray + dataIdx, m_data.m_upper + m_colStartIndex[row], nbUpperElts * sizeof (Scalar)); + // m_colStartIndex[row] = dataIdx; + // dataIdx += nbUpperElts; + // + // + // } + // //todo : don't access m_data profile directly : add an accessor from SkylineMatrix + // m_rowStartIndex[rows()] = m_rowStartIndex[rows()-1] + m_data.lowerProfile(rows()-1); + // m_colStartIndex[cols()] = m_colStartIndex[cols()-1] + m_data.upperProfile(cols()-1); + // + // delete[] m_data.m_lower; + // delete[] m_data.m_upper; + // + // m_data.m_lower = newArray; + // m_data.m_upper = newArray; + } else { + if (rows() > cols()) + m_data.resize(cols(), rows(), cols(), m_rowStartIndex[cols()] + 1, m_colStartIndex[cols()] + 1); + else + m_data.resize(rows(), rows(), cols(), m_rowStartIndex[rows()] + 1, m_colStartIndex[rows()] + 1); + } + } + + inline void squeeze() { + finalize(); + m_data.squeeze(); + } + + void prune(Scalar reference, RealScalar epsilon = dummy_precision ()) { + //TODO + } + + /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero + * \sa resizeNonZeros(Index), reserve(), setZero() + */ + void resize(size_t rows, size_t cols) { + const Index diagSize = rows > cols ? cols : rows; + m_innerSize = IsRowMajor ? cols : rows; + + eigen_assert(rows == cols && "Skyline matrix must be square matrix"); + + if (diagSize % 2) { // diagSize is odd + const Index k = (diagSize - 1) / 2; + + m_data.resize(diagSize, IsRowMajor ? cols : rows, IsRowMajor ? rows : cols, + 2 * k * k + k + 1, + 2 * k * k + k + 1); + + } else // diagSize is even + { + const Index k = diagSize / 2; + m_data.resize(diagSize, IsRowMajor ? cols : rows, IsRowMajor ? rows : cols, + 2 * k * k - k + 1, + 2 * k * k - k + 1); + } + + if (m_colStartIndex && m_rowStartIndex) { + delete[] m_colStartIndex; + delete[] m_rowStartIndex; + } + m_colStartIndex = new Index [cols + 1]; + m_rowStartIndex = new Index [rows + 1]; + m_outerSize = diagSize; + + m_data.reset(); + m_data.clear(); + + m_outerSize = diagSize; + memset(m_colStartIndex, 0, (cols + 1) * sizeof (Index)); + memset(m_rowStartIndex, 0, (rows + 1) * sizeof (Index)); + } + + void resizeNonZeros(Index size) { + m_data.resize(size); + } + + inline SkylineMatrix() + : m_outerSize(-1), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) { + resize(0, 0); + } + + inline SkylineMatrix(size_t rows, size_t cols) + : m_outerSize(0), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) { + resize(rows, cols); + } + + template + inline SkylineMatrix(const SkylineMatrixBase& other) + : m_outerSize(0), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) { + *this = other.derived(); + } + + inline SkylineMatrix(const SkylineMatrix & other) + : Base(), m_outerSize(0), m_innerSize(0), m_colStartIndex(0), m_rowStartIndex(0) { + *this = other.derived(); + } + + inline void swap(SkylineMatrix & other) { + //EIGEN_DBG_SKYLINE(std::cout << "SkylineMatrix:: swap\n"); + std::swap(m_colStartIndex, other.m_colStartIndex); + std::swap(m_rowStartIndex, other.m_rowStartIndex); + std::swap(m_innerSize, other.m_innerSize); + std::swap(m_outerSize, other.m_outerSize); + m_data.swap(other.m_data); + } + + inline SkylineMatrix & operator=(const SkylineMatrix & other) { + std::cout << "SkylineMatrix& operator=(const SkylineMatrix& other)\n"; + if (other.isRValue()) { + swap(other.const_cast_derived()); + } else { + resize(other.rows(), other.cols()); + memcpy(m_colStartIndex, other.m_colStartIndex, (m_outerSize + 1) * sizeof (Index)); + memcpy(m_rowStartIndex, other.m_rowStartIndex, (m_outerSize + 1) * sizeof (Index)); + m_data = other.m_data; + } + return *this; + } + + template + inline SkylineMatrix & operator=(const SkylineMatrixBase& other) { + const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); + if (needToTranspose) { + // TODO + // return *this; + } else { + // there is no special optimization + return SkylineMatrixBase::operator=(other.derived()); + } + } + + friend std::ostream & operator <<(std::ostream & s, const SkylineMatrix & m) { + + EIGEN_DBG_SKYLINE( + std::cout << "upper elements : " << std::endl; + for (Index i = 0; i < m.m_data.upperSize(); i++) + std::cout << m.m_data.upper(i) << "\t"; + std::cout << std::endl; + std::cout << "upper profile : " << std::endl; + for (Index i = 0; i < m.m_data.upperProfileSize(); i++) + std::cout << m.m_data.upperProfile(i) << "\t"; + std::cout << std::endl; + std::cout << "lower startIdx : " << std::endl; + for (Index i = 0; i < m.m_data.upperProfileSize(); i++) + std::cout << (IsRowMajor ? m.m_colStartIndex[i] : m.m_rowStartIndex[i]) << "\t"; + std::cout << std::endl; + + + std::cout << "lower elements : " << std::endl; + for (Index i = 0; i < m.m_data.lowerSize(); i++) + std::cout << m.m_data.lower(i) << "\t"; + std::cout << std::endl; + std::cout << "lower profile : " << std::endl; + for (Index i = 0; i < m.m_data.lowerProfileSize(); i++) + std::cout << m.m_data.lowerProfile(i) << "\t"; + std::cout << std::endl; + std::cout << "lower startIdx : " << std::endl; + for (Index i = 0; i < m.m_data.lowerProfileSize(); i++) + std::cout << (IsRowMajor ? m.m_rowStartIndex[i] : m.m_colStartIndex[i]) << "\t"; + std::cout << std::endl; + ); + for (Index rowIdx = 0; rowIdx < m.rows(); rowIdx++) { + for (Index colIdx = 0; colIdx < m.cols(); colIdx++) { + s << m.coeff(rowIdx, colIdx) << "\t"; + } + s << std::endl; + } + return s; + } + + /** Destructor */ + inline ~SkylineMatrix() { + delete[] m_colStartIndex; + delete[] m_rowStartIndex; + } + + /** Overloaded for performance */ + Scalar sum() const; +}; + +template +class SkylineMatrix::InnerUpperIterator { +public: + + InnerUpperIterator(const SkylineMatrix& mat, Index outer) + : m_matrix(mat), m_outer(outer), + m_id(_Options == RowMajor ? mat.m_colStartIndex[outer] : mat.m_rowStartIndex[outer] + 1), + m_start(m_id), + m_end(_Options == RowMajor ? mat.m_colStartIndex[outer + 1] : mat.m_rowStartIndex[outer + 1] + 1) { + } + + inline InnerUpperIterator & operator++() { + m_id++; + return *this; + } + + inline InnerUpperIterator & operator+=(Index shift) { + m_id += shift; + return *this; + } + + inline Scalar value() const { + return m_matrix.m_data.upper(m_id); + } + + inline Scalar* valuePtr() { + return const_cast (&(m_matrix.m_data.upper(m_id))); + } + + inline Scalar& valueRef() { + return const_cast (m_matrix.m_data.upper(m_id)); + } + + inline Index index() const { + return IsRowMajor ? m_outer - m_matrix.m_data.upperProfile(m_outer) + (m_id - m_start) : + m_outer + (m_id - m_start) + 1; + } + + inline Index row() const { + return IsRowMajor ? index() : m_outer; + } + + inline Index col() const { + return IsRowMajor ? m_outer : index(); + } + + inline size_t size() const { + return m_matrix.m_data.upperProfile(m_outer); + } + + inline operator bool() const { + return (m_id < m_end) && (m_id >= m_start); + } + +protected: + const SkylineMatrix& m_matrix; + const Index m_outer; + Index m_id; + const Index m_start; + const Index m_end; +}; + +template +class SkylineMatrix::InnerLowerIterator { +public: + + InnerLowerIterator(const SkylineMatrix& mat, Index outer) + : m_matrix(mat), + m_outer(outer), + m_id(_Options == RowMajor ? mat.m_rowStartIndex[outer] : mat.m_colStartIndex[outer] + 1), + m_start(m_id), + m_end(_Options == RowMajor ? mat.m_rowStartIndex[outer + 1] : mat.m_colStartIndex[outer + 1] + 1) { + } + + inline InnerLowerIterator & operator++() { + m_id++; + return *this; + } + + inline InnerLowerIterator & operator+=(Index shift) { + m_id += shift; + return *this; + } + + inline Scalar value() const { + return m_matrix.m_data.lower(m_id); + } + + inline Scalar* valuePtr() { + return const_cast (&(m_matrix.m_data.lower(m_id))); + } + + inline Scalar& valueRef() { + return const_cast (m_matrix.m_data.lower(m_id)); + } + + inline Index index() const { + return IsRowMajor ? m_outer - m_matrix.m_data.lowerProfile(m_outer) + (m_id - m_start) : + m_outer + (m_id - m_start) + 1; + ; + } + + inline Index row() const { + return IsRowMajor ? m_outer : index(); + } + + inline Index col() const { + return IsRowMajor ? index() : m_outer; + } + + inline size_t size() const { + return m_matrix.m_data.lowerProfile(m_outer); + } + + inline operator bool() const { + return (m_id < m_end) && (m_id >= m_start); + } + +protected: + const SkylineMatrix& m_matrix; + const Index m_outer; + Index m_id; + const Index m_start; + const Index m_end; +}; + +} // end namespace Eigen + +#endif // EIGEN_SKYLINEMATRIX_H diff --git a/external/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h b/external/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h new file mode 100644 index 0000000..b0d5e10 --- /dev/null +++ b/external/unsupported/Eigen/src/Skyline/SkylineMatrixBase.h @@ -0,0 +1,212 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Guillaume Saupin +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SKYLINEMATRIXBASE_H +#define EIGEN_SKYLINEMATRIXBASE_H + +#include "SkylineUtil.h" + +namespace Eigen { + +/** \ingroup Skyline_Module + * + * \class SkylineMatrixBase + * + * \brief Base class of any skyline matrices or skyline expressions + * + * \param Derived + * + */ +template class SkylineMatrixBase : public EigenBase { +public: + + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::index::type Index; + + enum { + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + /**< The number of rows at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ + + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + /**< The number of columns at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ + + + SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, + internal::traits::ColsAtCompileTime>::ret), + /**< This is equal to the number of coefficients, i.e. the number of + * rows times the number of columns, or to \a Dynamic if this is not + * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ + + MaxRowsAtCompileTime = RowsAtCompileTime, + MaxColsAtCompileTime = ColsAtCompileTime, + + MaxSizeAtCompileTime = (internal::size_at_compile_time::ret), + + IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1, + /**< This is set to true if either the number of rows or the number of + * columns is known at compile-time to be equal to 1. Indeed, in that case, + * we are dealing with a column-vector (if there is only one column) or with + * a row-vector (if there is only one row). */ + + Flags = internal::traits::Flags, + /**< This stores expression \ref flags flags which may or may not be inherited by new expressions + * constructed from this one. See the \ref flags "list of flags". + */ + + CoeffReadCost = internal::traits::CoeffReadCost, + /**< This is a rough measure of how expensive it is to read one coefficient from + * this expression. + */ + + IsRowMajor = Flags & RowMajorBit ? 1 : 0 + }; + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is the "real scalar" type; if the \a Scalar type is already real numbers + * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If + * \a Scalar is \a std::complex then RealScalar is \a T. + * + * \sa class NumTraits + */ + typedef typename NumTraits::Real RealScalar; + + /** type of the equivalent square matrix */ + typedef Matrix SquareMatrixType; + + inline const Derived& derived() const { + return *static_cast (this); + } + + inline Derived& derived() { + return *static_cast (this); + } + + inline Derived& const_cast_derived() const { + return *static_cast (const_cast (this)); + } +#endif // not EIGEN_PARSED_BY_DOXYGEN + + /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ + inline EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT { + return derived().rows(); + } + + /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ + inline EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT { + return derived().cols(); + } + + /** \returns the number of coefficients, which is \a rows()*cols(). + * \sa rows(), cols(), SizeAtCompileTime. */ + inline EIGEN_CONSTEXPR Index size() const EIGEN_NOEXCEPT { + return rows() * cols(); + } + + /** \returns the number of nonzero coefficients which is in practice the number + * of stored coefficients. */ + inline Index nonZeros() const { + return derived().nonZeros(); + } + + /** \returns the size of the storage major dimension, + * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */ + Index outerSize() const { + return (int(Flags) & RowMajorBit) ? this->rows() : this->cols(); + } + + /** \returns the size of the inner dimension according to the storage order, + * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */ + Index innerSize() const { + return (int(Flags) & RowMajorBit) ? this->cols() : this->rows(); + } + + bool isRValue() const { + return m_isRValue; + } + + Derived& markAsRValue() { + m_isRValue = true; + return derived(); + } + + SkylineMatrixBase() : m_isRValue(false) { + /* TODO check flags */ + } + + inline Derived & operator=(const Derived& other) { + this->operator= (other); + return derived(); + } + + template + inline void assignGeneric(const OtherDerived& other) { + derived().resize(other.rows(), other.cols()); + for (Index row = 0; row < rows(); row++) + for (Index col = 0; col < cols(); col++) { + if (other.coeff(row, col) != Scalar(0)) + derived().insert(row, col) = other.coeff(row, col); + } + derived().finalize(); + } + + template + inline Derived & operator=(const SkylineMatrixBase& other) { + //TODO + } + + template + inline Derived & operator=(const SkylineProduct& product); + + friend std::ostream & operator <<(std::ostream & s, const SkylineMatrixBase& m) { + s << m.derived(); + return s; + } + + template + const typename SkylineProductReturnType::Type + operator*(const MatrixBase &other) const; + + /** \internal use operator= */ + template + void evalTo(MatrixBase& dst) const { + dst.setZero(); + for (Index i = 0; i < rows(); i++) + for (Index j = 0; j < rows(); j++) + dst(i, j) = derived().coeff(i, j); + } + + Matrix toDense() const { + return derived(); + } + + /** \returns the matrix or vector obtained by evaluating this expression. + * + * Notice that in the case of a plain matrix or vector (not an expression) this function just returns + * a const reference, in order to avoid a useless copy. + */ + EIGEN_STRONG_INLINE const typename internal::eval::type eval() const { + return typename internal::eval::type(derived()); + } + +protected: + bool m_isRValue; +}; + +} // end namespace Eigen + +#endif // EIGEN_SKYLINEMATRIXBASE_H diff --git a/external/unsupported/Eigen/src/Skyline/SkylineProduct.h b/external/unsupported/Eigen/src/Skyline/SkylineProduct.h new file mode 100644 index 0000000..d9eb814 --- /dev/null +++ b/external/unsupported/Eigen/src/Skyline/SkylineProduct.h @@ -0,0 +1,295 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Guillaume Saupin +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SKYLINEPRODUCT_H +#define EIGEN_SKYLINEPRODUCT_H + +namespace Eigen { + +template +struct SkylineProductReturnType { + typedef const typename internal::nested_eval::type LhsNested; + typedef const typename internal::nested_eval::type RhsNested; + + typedef SkylineProduct Type; +}; + +template +struct internal::traits > { + // clean the nested types: + typedef typename internal::remove_all::type _LhsNested; + typedef typename internal::remove_all::type _RhsNested; + typedef typename _LhsNested::Scalar Scalar; + + enum { + LhsCoeffReadCost = _LhsNested::CoeffReadCost, + RhsCoeffReadCost = _RhsNested::CoeffReadCost, + LhsFlags = _LhsNested::Flags, + RhsFlags = _RhsNested::Flags, + + RowsAtCompileTime = _LhsNested::RowsAtCompileTime, + ColsAtCompileTime = _RhsNested::ColsAtCompileTime, + InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime), + + MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime, + MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime, + + EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit), + ResultIsSkyline = ProductMode == SkylineTimeSkylineProduct, + + RemovedBits = ~((EvalToRowMajor ? 0 : RowMajorBit) | (ResultIsSkyline ? 0 : SkylineBit)), + + Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits) + | EvalBeforeAssigningBit + | EvalBeforeNestingBit, + + CoeffReadCost = HugeCost + }; + + typedef typename internal::conditional >, + MatrixBase > >::type Base; +}; + +namespace internal { +template +class SkylineProduct : no_assignment_operator, +public traits >::Base { +public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(SkylineProduct) + +private: + + typedef typename traits::_LhsNested _LhsNested; + typedef typename traits::_RhsNested _RhsNested; + +public: + + template + EIGEN_STRONG_INLINE SkylineProduct(const Lhs& lhs, const Rhs& rhs) + : m_lhs(lhs), m_rhs(rhs) { + eigen_assert(lhs.cols() == rhs.rows()); + + enum { + ProductIsValid = _LhsNested::ColsAtCompileTime == Dynamic + || _RhsNested::RowsAtCompileTime == Dynamic + || int(_LhsNested::ColsAtCompileTime) == int(_RhsNested::RowsAtCompileTime), + AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime, + SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested, _RhsNested) + }; + // note to the lost user: + // * for a dot product use: v1.dot(v2) + // * for a coeff-wise product use: v1.cwise()*v2 + EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) + EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) + EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) + } + + EIGEN_STRONG_INLINE Index rows() const { + return m_lhs.rows(); + } + + EIGEN_STRONG_INLINE Index cols() const { + return m_rhs.cols(); + } + + EIGEN_STRONG_INLINE const _LhsNested& lhs() const { + return m_lhs; + } + + EIGEN_STRONG_INLINE const _RhsNested& rhs() const { + return m_rhs; + } + +protected: + LhsNested m_lhs; + RhsNested m_rhs; +}; + +// dense = skyline * dense +// Note that here we force no inlining and separate the setZero() because GCC messes up otherwise + +template +EIGEN_DONT_INLINE void skyline_row_major_time_dense_product(const Lhs& lhs, const Rhs& rhs, Dest& dst) { + typedef typename remove_all::type _Lhs; + typedef typename remove_all::type _Rhs; + typedef typename traits::Scalar Scalar; + + enum { + LhsIsRowMajor = (_Lhs::Flags & RowMajorBit) == RowMajorBit, + LhsIsSelfAdjoint = (_Lhs::Flags & SelfAdjointBit) == SelfAdjointBit, + ProcessFirstHalf = LhsIsSelfAdjoint + && (((_Lhs::Flags & (UpperTriangularBit | LowerTriangularBit)) == 0) + || ((_Lhs::Flags & UpperTriangularBit) && !LhsIsRowMajor) + || ((_Lhs::Flags & LowerTriangularBit) && LhsIsRowMajor)), + ProcessSecondHalf = LhsIsSelfAdjoint && (!ProcessFirstHalf) + }; + + //Use matrix diagonal part <- Improvement : use inner iterator on dense matrix. + for (Index col = 0; col < rhs.cols(); col++) { + for (Index row = 0; row < lhs.rows(); row++) { + dst(row, col) = lhs.coeffDiag(row) * rhs(row, col); + } + } + //Use matrix lower triangular part + for (Index row = 0; row < lhs.rows(); row++) { + typename _Lhs::InnerLowerIterator lIt(lhs, row); + const Index stop = lIt.col() + lIt.size(); + for (Index col = 0; col < rhs.cols(); col++) { + + Index k = lIt.col(); + Scalar tmp = 0; + while (k < stop) { + tmp += + lIt.value() * + rhs(k++, col); + ++lIt; + } + dst(row, col) += tmp; + lIt += -lIt.size(); + } + + } + + //Use matrix upper triangular part + for (Index lhscol = 0; lhscol < lhs.cols(); lhscol++) { + typename _Lhs::InnerUpperIterator uIt(lhs, lhscol); + const Index stop = uIt.size() + uIt.row(); + for (Index rhscol = 0; rhscol < rhs.cols(); rhscol++) { + + + const Scalar rhsCoeff = rhs.coeff(lhscol, rhscol); + Index k = uIt.row(); + while (k < stop) { + dst(k++, rhscol) += + uIt.value() * + rhsCoeff; + ++uIt; + } + uIt += -uIt.size(); + } + } + +} + +template +EIGEN_DONT_INLINE void skyline_col_major_time_dense_product(const Lhs& lhs, const Rhs& rhs, Dest& dst) { + typedef typename remove_all::type _Lhs; + typedef typename remove_all::type _Rhs; + typedef typename traits::Scalar Scalar; + + enum { + LhsIsRowMajor = (_Lhs::Flags & RowMajorBit) == RowMajorBit, + LhsIsSelfAdjoint = (_Lhs::Flags & SelfAdjointBit) == SelfAdjointBit, + ProcessFirstHalf = LhsIsSelfAdjoint + && (((_Lhs::Flags & (UpperTriangularBit | LowerTriangularBit)) == 0) + || ((_Lhs::Flags & UpperTriangularBit) && !LhsIsRowMajor) + || ((_Lhs::Flags & LowerTriangularBit) && LhsIsRowMajor)), + ProcessSecondHalf = LhsIsSelfAdjoint && (!ProcessFirstHalf) + }; + + //Use matrix diagonal part <- Improvement : use inner iterator on dense matrix. + for (Index col = 0; col < rhs.cols(); col++) { + for (Index row = 0; row < lhs.rows(); row++) { + dst(row, col) = lhs.coeffDiag(row) * rhs(row, col); + } + } + + //Use matrix upper triangular part + for (Index row = 0; row < lhs.rows(); row++) { + typename _Lhs::InnerUpperIterator uIt(lhs, row); + const Index stop = uIt.col() + uIt.size(); + for (Index col = 0; col < rhs.cols(); col++) { + + Index k = uIt.col(); + Scalar tmp = 0; + while (k < stop) { + tmp += + uIt.value() * + rhs(k++, col); + ++uIt; + } + + + dst(row, col) += tmp; + uIt += -uIt.size(); + } + } + + //Use matrix lower triangular part + for (Index lhscol = 0; lhscol < lhs.cols(); lhscol++) { + typename _Lhs::InnerLowerIterator lIt(lhs, lhscol); + const Index stop = lIt.size() + lIt.row(); + for (Index rhscol = 0; rhscol < rhs.cols(); rhscol++) { + + const Scalar rhsCoeff = rhs.coeff(lhscol, rhscol); + Index k = lIt.row(); + while (k < stop) { + dst(k++, rhscol) += + lIt.value() * + rhsCoeff; + ++lIt; + } + lIt += -lIt.size(); + } + } + +} + +template::Flags&RowMajorBit> + struct skyline_product_selector; + +template +struct skyline_product_selector { + typedef typename traits::type>::Scalar Scalar; + + static void run(const Lhs& lhs, const Rhs& rhs, ResultType & res) { + skyline_row_major_time_dense_product (lhs, rhs, res); + } +}; + +template +struct skyline_product_selector { + typedef typename traits::type>::Scalar Scalar; + + static void run(const Lhs& lhs, const Rhs& rhs, ResultType & res) { + skyline_col_major_time_dense_product (lhs, rhs, res); + } +}; + +} // end namespace internal + +// template +// template +// Derived & MatrixBase::lazyAssign(const SkylineProduct& product) { +// typedef typename internal::remove_all::type _Lhs; +// internal::skyline_product_selector::type, +// typename internal::remove_all::type, +// Derived>::run(product.lhs(), product.rhs(), derived()); +// +// return derived(); +// } + +// skyline * dense + +template +template +EIGEN_STRONG_INLINE const typename SkylineProductReturnType::Type +SkylineMatrixBase::operator*(const MatrixBase &other) const { + + return typename SkylineProductReturnType::Type(derived(), other.derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_SKYLINEPRODUCT_H diff --git a/external/unsupported/Eigen/src/Skyline/SkylineStorage.h b/external/unsupported/Eigen/src/Skyline/SkylineStorage.h new file mode 100644 index 0000000..cc7514f --- /dev/null +++ b/external/unsupported/Eigen/src/Skyline/SkylineStorage.h @@ -0,0 +1,259 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Guillaume Saupin +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SKYLINE_STORAGE_H +#define EIGEN_SKYLINE_STORAGE_H + +namespace Eigen { + +/** Stores a skyline set of values in three structures : + * The diagonal elements + * The upper elements + * The lower elements + * + */ +template +class SkylineStorage { + typedef typename NumTraits::Real RealScalar; + typedef SparseIndex Index; +public: + + SkylineStorage() + : m_diag(0), + m_lower(0), + m_upper(0), + m_lowerProfile(0), + m_upperProfile(0), + m_diagSize(0), + m_upperSize(0), + m_lowerSize(0), + m_upperProfileSize(0), + m_lowerProfileSize(0), + m_allocatedSize(0) { + } + + SkylineStorage(const SkylineStorage& other) + : m_diag(0), + m_lower(0), + m_upper(0), + m_lowerProfile(0), + m_upperProfile(0), + m_diagSize(0), + m_upperSize(0), + m_lowerSize(0), + m_upperProfileSize(0), + m_lowerProfileSize(0), + m_allocatedSize(0) { + *this = other; + } + + SkylineStorage & operator=(const SkylineStorage& other) { + resize(other.diagSize(), other.m_upperProfileSize, other.m_lowerProfileSize, other.upperSize(), other.lowerSize()); + memcpy(m_diag, other.m_diag, m_diagSize * sizeof (Scalar)); + memcpy(m_upper, other.m_upper, other.upperSize() * sizeof (Scalar)); + memcpy(m_lower, other.m_lower, other.lowerSize() * sizeof (Scalar)); + memcpy(m_upperProfile, other.m_upperProfile, m_upperProfileSize * sizeof (Index)); + memcpy(m_lowerProfile, other.m_lowerProfile, m_lowerProfileSize * sizeof (Index)); + return *this; + } + + void swap(SkylineStorage& other) { + std::swap(m_diag, other.m_diag); + std::swap(m_upper, other.m_upper); + std::swap(m_lower, other.m_lower); + std::swap(m_upperProfile, other.m_upperProfile); + std::swap(m_lowerProfile, other.m_lowerProfile); + std::swap(m_diagSize, other.m_diagSize); + std::swap(m_upperSize, other.m_upperSize); + std::swap(m_lowerSize, other.m_lowerSize); + std::swap(m_allocatedSize, other.m_allocatedSize); + } + + ~SkylineStorage() { + delete[] m_diag; + delete[] m_upper; + if (m_upper != m_lower) + delete[] m_lower; + delete[] m_upperProfile; + delete[] m_lowerProfile; + } + + void reserve(Index size, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize) { + Index newAllocatedSize = size + upperSize + lowerSize; + if (newAllocatedSize > m_allocatedSize) + reallocate(size, upperProfileSize, lowerProfileSize, upperSize, lowerSize); + } + + void squeeze() { + if (m_allocatedSize > m_diagSize + m_upperSize + m_lowerSize) + reallocate(m_diagSize, m_upperProfileSize, m_lowerProfileSize, m_upperSize, m_lowerSize); + } + + void resize(Index diagSize, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize, float reserveSizeFactor = 0) { + if (m_allocatedSize < diagSize + upperSize + lowerSize) + reallocate(diagSize, upperProfileSize, lowerProfileSize, upperSize + Index(reserveSizeFactor * upperSize), lowerSize + Index(reserveSizeFactor * lowerSize)); + m_diagSize = diagSize; + m_upperSize = upperSize; + m_lowerSize = lowerSize; + m_upperProfileSize = upperProfileSize; + m_lowerProfileSize = lowerProfileSize; + } + + inline Index diagSize() const { + return m_diagSize; + } + + inline Index upperSize() const { + return m_upperSize; + } + + inline Index lowerSize() const { + return m_lowerSize; + } + + inline Index upperProfileSize() const { + return m_upperProfileSize; + } + + inline Index lowerProfileSize() const { + return m_lowerProfileSize; + } + + inline Index allocatedSize() const { + return m_allocatedSize; + } + + inline void clear() { + m_diagSize = 0; + } + + inline Scalar& diag(Index i) { + return m_diag[i]; + } + + inline const Scalar& diag(Index i) const { + return m_diag[i]; + } + + inline Scalar& upper(Index i) { + return m_upper[i]; + } + + inline const Scalar& upper(Index i) const { + return m_upper[i]; + } + + inline Scalar& lower(Index i) { + return m_lower[i]; + } + + inline const Scalar& lower(Index i) const { + return m_lower[i]; + } + + inline Index& upperProfile(Index i) { + return m_upperProfile[i]; + } + + inline const Index& upperProfile(Index i) const { + return m_upperProfile[i]; + } + + inline Index& lowerProfile(Index i) { + return m_lowerProfile[i]; + } + + inline const Index& lowerProfile(Index i) const { + return m_lowerProfile[i]; + } + + static SkylineStorage Map(Index* upperProfile, Index* lowerProfile, Scalar* diag, Scalar* upper, Scalar* lower, Index size, Index upperSize, Index lowerSize) { + SkylineStorage res; + res.m_upperProfile = upperProfile; + res.m_lowerProfile = lowerProfile; + res.m_diag = diag; + res.m_upper = upper; + res.m_lower = lower; + res.m_allocatedSize = res.m_diagSize = size; + res.m_upperSize = upperSize; + res.m_lowerSize = lowerSize; + return res; + } + + inline void reset() { + memset(m_diag, 0, m_diagSize * sizeof (Scalar)); + memset(m_upper, 0, m_upperSize * sizeof (Scalar)); + memset(m_lower, 0, m_lowerSize * sizeof (Scalar)); + memset(m_upperProfile, 0, m_diagSize * sizeof (Index)); + memset(m_lowerProfile, 0, m_diagSize * sizeof (Index)); + } + + void prune(Scalar reference, RealScalar epsilon = dummy_precision()) { + //TODO + } + +protected: + + inline void reallocate(Index diagSize, Index upperProfileSize, Index lowerProfileSize, Index upperSize, Index lowerSize) { + + Scalar* diag = new Scalar[diagSize]; + Scalar* upper = new Scalar[upperSize]; + Scalar* lower = new Scalar[lowerSize]; + Index* upperProfile = new Index[upperProfileSize]; + Index* lowerProfile = new Index[lowerProfileSize]; + + Index copyDiagSize = (std::min)(diagSize, m_diagSize); + Index copyUpperSize = (std::min)(upperSize, m_upperSize); + Index copyLowerSize = (std::min)(lowerSize, m_lowerSize); + Index copyUpperProfileSize = (std::min)(upperProfileSize, m_upperProfileSize); + Index copyLowerProfileSize = (std::min)(lowerProfileSize, m_lowerProfileSize); + + // copy + memcpy(diag, m_diag, copyDiagSize * sizeof (Scalar)); + memcpy(upper, m_upper, copyUpperSize * sizeof (Scalar)); + memcpy(lower, m_lower, copyLowerSize * sizeof (Scalar)); + memcpy(upperProfile, m_upperProfile, copyUpperProfileSize * sizeof (Index)); + memcpy(lowerProfile, m_lowerProfile, copyLowerProfileSize * sizeof (Index)); + + + + // delete old stuff + delete[] m_diag; + delete[] m_upper; + delete[] m_lower; + delete[] m_upperProfile; + delete[] m_lowerProfile; + m_diag = diag; + m_upper = upper; + m_lower = lower; + m_upperProfile = upperProfile; + m_lowerProfile = lowerProfile; + m_allocatedSize = diagSize + upperSize + lowerSize; + m_upperSize = upperSize; + m_lowerSize = lowerSize; + } + +public: + Scalar* m_diag; + Scalar* m_upper; + Scalar* m_lower; + Index* m_upperProfile; + Index* m_lowerProfile; + Index m_diagSize; + Index m_upperSize; + Index m_lowerSize; + Index m_upperProfileSize; + Index m_lowerProfileSize; + Index m_allocatedSize; + +}; + +} // end namespace Eigen + +#endif // EIGEN_SKYLINE_STORAGE_H diff --git a/external/unsupported/Eigen/src/Skyline/SkylineUtil.h b/external/unsupported/Eigen/src/Skyline/SkylineUtil.h new file mode 100644 index 0000000..75eb612 --- /dev/null +++ b/external/unsupported/Eigen/src/Skyline/SkylineUtil.h @@ -0,0 +1,89 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Guillaume Saupin +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SKYLINEUTIL_H +#define EIGEN_SKYLINEUTIL_H + +namespace Eigen { + +#ifdef NDEBUG +#define EIGEN_DBG_SKYLINE(X) +#else +#define EIGEN_DBG_SKYLINE(X) X +#endif + +const unsigned int SkylineBit = 0x1200; +template class SkylineProduct; +enum AdditionalProductEvaluationMode {SkylineTimeDenseProduct, SkylineTimeSkylineProduct, DenseTimeSkylineProduct}; +enum {IsSkyline = SkylineBit}; + + +#define EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \ +template \ +EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SkylineMatrixBase& other) \ +{ \ + return Base::operator Op(other.derived()); \ +} \ +EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \ +{ \ + return Base::operator Op(other); \ +} + +#define EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \ +template \ +EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \ +{ \ + return Base::operator Op(scalar); \ +} + +#define EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ + EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \ + EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \ + EIGEN_SKYLINE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \ + EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \ + EIGEN_SKYLINE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=) + +#define _EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \ + typedef BaseClass Base; \ + typedef typename Eigen::internal::traits::Scalar Scalar; \ + typedef typename Eigen::NumTraits::Real RealScalar; \ + typedef typename Eigen::internal::traits::StorageKind StorageKind; \ + typedef typename Eigen::internal::index::type Index; \ + enum { Flags = Eigen::internal::traits::Flags, }; + +#define EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived) \ + _EIGEN_SKYLINE_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::SkylineMatrixBase) + +template class SkylineMatrixBase; +template class SkylineMatrix; +template class DynamicSkylineMatrix; +template class SkylineVector; +template class MappedSkylineMatrix; + +namespace internal { + +template struct skyline_product_mode; +template::value> struct SkylineProductReturnType; + +template class eval +{ + typedef typename traits::Scalar _Scalar; + enum { + _Flags = traits::Flags + }; + + public: + typedef SkylineMatrix<_Scalar, _Flags> type; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SKYLINEUTIL_H diff --git a/external/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h b/external/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h new file mode 100644 index 0000000..e9ec746 --- /dev/null +++ b/external/unsupported/Eigen/src/SparseExtra/BlockOfDynamicSparseMatrix.h @@ -0,0 +1,122 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H +#define EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H + +namespace Eigen { + +#if 0 + +// NOTE Have to be reimplemented as a specialization of BlockImpl< DynamicSparseMatrix<_Scalar, _Options, _Index>, ... > +// See SparseBlock.h for an example + + +/*************************************************************************** +* specialisation for DynamicSparseMatrix +***************************************************************************/ + +template +class SparseInnerVectorSet, Size> + : public SparseMatrixBase, Size> > +{ + typedef DynamicSparseMatrix<_Scalar, _Options, _Index> MatrixType; + public: + + enum { IsRowMajor = internal::traits::IsRowMajor }; + + EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet) + class InnerIterator: public MatrixType::InnerIterator + { + public: + inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer) + : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + {} + inline Index row() const { return IsRowMajor ? m_outer : this->index(); } + inline Index col() const { return IsRowMajor ? this->index() : m_outer; } + protected: + Index m_outer; + }; + + inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) + : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) + { + eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); + } + + inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) + : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) + { + eigen_assert(Size!=Dynamic); + eigen_assert( (outer>=0) && (outer + inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) + { + if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit)) + { + // need to transpose => perform a block evaluation followed by a big swap + DynamicSparseMatrix aux(other); + *this = aux.markAsRValue(); + } + else + { + // evaluate/copy vector per vector + for (Index j=0; j aux(other.innerVector(j)); + m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data()); + } + } + return *this; + } + + inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other) + { + return operator=(other); + } + + Index nonZeros() const + { + Index count = 0; + for (Index j=0; j0); + return m_matrix.data()[m_outerStart].vale(m_matrix.data()[m_outerStart].size()-1); + } + +// template +// inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) +// { +// return *this; +// } + + EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + + protected: + + const typename MatrixType::Nested m_matrix; + Index m_outerStart; + const internal::variable_if_dynamic m_outerSize; + +}; + +#endif + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_BLOCKFORDYNAMICMATRIX_H diff --git a/external/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h b/external/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h new file mode 100644 index 0000000..536a0c3 --- /dev/null +++ b/external/unsupported/Eigen/src/SparseExtra/BlockSparseMatrix.h @@ -0,0 +1,1079 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2013 Desire Nuentsa +// Copyright (C) 2013 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSEBLOCKMATRIX_H +#define EIGEN_SPARSEBLOCKMATRIX_H + +namespace Eigen { +/** \ingroup SparseCore_Module + * + * \class BlockSparseMatrix + * + * \brief A versatile sparse matrix representation where each element is a block + * + * This class provides routines to manipulate block sparse matrices stored in a + * BSR-like representation. There are two main types : + * + * 1. All blocks have the same number of rows and columns, called block size + * in the following. In this case, if this block size is known at compile time, + * it can be given as a template parameter like + * \code + * BlockSparseMatrix bmat(b_rows, b_cols); + * \endcode + * Here, bmat is a b_rows x b_cols block sparse matrix + * where each coefficient is a 3x3 dense matrix. + * If the block size is fixed but will be given at runtime, + * \code + * BlockSparseMatrix bmat(b_rows, b_cols); + * bmat.setBlockSize(block_size); + * \endcode + * + * 2. The second case is for variable-block sparse matrices. + * Here each block has its own dimensions. The only restriction is that all the blocks + * in a row (resp. a column) should have the same number of rows (resp. of columns). + * It is thus required in this case to describe the layout of the matrix by calling + * setBlockLayout(rowBlocks, colBlocks). + * + * In any of the previous case, the matrix can be filled by calling setFromTriplets(). + * A regular sparse matrix can be converted to a block sparse matrix and vice versa. + * It is obviously required to describe the block layout beforehand by calling either + * setBlockSize() for fixed-size blocks or setBlockLayout for variable-size blocks. + * + * \tparam _Scalar The Scalar type + * \tparam _BlockAtCompileTime The block layout option. It takes the following values + * Dynamic : block size known at runtime + * a numeric number : fixed-size block known at compile time + */ +template class BlockSparseMatrix; + +template class BlockSparseMatrixView; + +namespace internal { +template +struct traits > +{ + typedef _Scalar Scalar; + typedef _Index Index; + typedef Sparse StorageKind; // FIXME Where is it used ?? + typedef MatrixXpr XprKind; + enum { + RowsAtCompileTime = Dynamic, + ColsAtCompileTime = Dynamic, + MaxRowsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic, + BlockSize = _BlockAtCompileTime, + Flags = _Options | NestByRefBit | LvalueBit, + CoeffReadCost = NumTraits::ReadCost, + SupportedAccessPatterns = InnerRandomAccessPattern + }; +}; +template +struct traits > +{ + typedef Ref > Scalar; + typedef Ref > RealScalar; + +}; + +// Function object to sort a triplet list +template +struct TripletComp +{ + typedef typename Iterator::value_type Triplet; + bool operator()(const Triplet& a, const Triplet& b) + { if(IsColMajor) + return ((a.col() == b.col() && a.row() < b.row()) || (a.col() < b.col())); + else + return ((a.row() == b.row() && a.col() < b.col()) || (a.row() < b.row())); + } +}; +} // end namespace internal + + +/* Proxy to view the block sparse matrix as a regular sparse matrix */ +template +class BlockSparseMatrixView : public SparseMatrixBase +{ + public: + typedef Ref Scalar; + typedef Ref RealScalar; + typedef typename BlockSparseMatrixT::Index Index; + typedef BlockSparseMatrixT Nested; + enum { + Flags = BlockSparseMatrixT::Options, + Options = BlockSparseMatrixT::Options, + RowsAtCompileTime = BlockSparseMatrixT::RowsAtCompileTime, + ColsAtCompileTime = BlockSparseMatrixT::ColsAtCompileTime, + MaxColsAtCompileTime = BlockSparseMatrixT::MaxColsAtCompileTime, + MaxRowsAtCompileTime = BlockSparseMatrixT::MaxRowsAtCompileTime + }; + public: + BlockSparseMatrixView(const BlockSparseMatrixT& spblockmat) + : m_spblockmat(spblockmat) + {} + + Index outerSize() const + { + return (Flags&RowMajorBit) == 1 ? this->rows() : this->cols(); + } + Index cols() const + { + return m_spblockmat.blockCols(); + } + Index rows() const + { + return m_spblockmat.blockRows(); + } + Scalar coeff(Index row, Index col) + { + return m_spblockmat.coeff(row, col); + } + Scalar coeffRef(Index row, Index col) + { + return m_spblockmat.coeffRef(row, col); + } + // Wrapper to iterate over all blocks + class InnerIterator : public BlockSparseMatrixT::BlockInnerIterator + { + public: + InnerIterator(const BlockSparseMatrixView& mat, Index outer) + : BlockSparseMatrixT::BlockInnerIterator(mat.m_spblockmat, outer) + {} + + }; + + protected: + const BlockSparseMatrixT& m_spblockmat; +}; + +// Proxy to view a regular vector as a block vector +template +class BlockVectorView +{ + public: + enum { + BlockSize = BlockSparseMatrixT::BlockSize, + ColsAtCompileTime = VectorType::ColsAtCompileTime, + RowsAtCompileTime = VectorType::RowsAtCompileTime, + Flags = VectorType::Flags + }; + typedef Ref >Scalar; + typedef typename BlockSparseMatrixT::Index Index; + public: + BlockVectorView(const BlockSparseMatrixT& spblockmat, const VectorType& vec) + : m_spblockmat(spblockmat),m_vec(vec) + { } + inline Index cols() const + { + return m_vec.cols(); + } + inline Index size() const + { + return m_spblockmat.blockRows(); + } + inline Scalar coeff(Index bi) const + { + Index startRow = m_spblockmat.blockRowsIndex(bi); + Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow; + return m_vec.middleRows(startRow, rowSize); + } + inline Scalar coeff(Index bi, Index j) const + { + Index startRow = m_spblockmat.blockRowsIndex(bi); + Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow; + return m_vec.block(startRow, j, rowSize, 1); + } + protected: + const BlockSparseMatrixT& m_spblockmat; + const VectorType& m_vec; +}; + +template class BlockVectorReturn; + + +// Proxy to view a regular vector as a block vector +template +class BlockVectorReturn +{ + public: + enum { + ColsAtCompileTime = VectorType::ColsAtCompileTime, + RowsAtCompileTime = VectorType::RowsAtCompileTime, + Flags = VectorType::Flags + }; + typedef Ref > Scalar; + typedef typename BlockSparseMatrixT::Index Index; + public: + BlockVectorReturn(const BlockSparseMatrixT& spblockmat, VectorType& vec) + : m_spblockmat(spblockmat),m_vec(vec) + { } + inline Index size() const + { + return m_spblockmat.blockRows(); + } + inline Scalar coeffRef(Index bi) + { + Index startRow = m_spblockmat.blockRowsIndex(bi); + Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow; + return m_vec.middleRows(startRow, rowSize); + } + inline Scalar coeffRef(Index bi, Index j) + { + Index startRow = m_spblockmat.blockRowsIndex(bi); + Index rowSize = m_spblockmat.blockRowsIndex(bi+1) - startRow; + return m_vec.block(startRow, j, rowSize, 1); + } + + protected: + const BlockSparseMatrixT& m_spblockmat; + VectorType& m_vec; +}; + +// Block version of the sparse dense product +template +class BlockSparseTimeDenseProduct; + +namespace internal { + +template +struct traits > +{ + typedef Dense StorageKind; + typedef MatrixXpr XprKind; + typedef typename BlockSparseMatrixT::Scalar Scalar; + typedef typename BlockSparseMatrixT::Index Index; + enum { + RowsAtCompileTime = Dynamic, + ColsAtCompileTime = Dynamic, + MaxRowsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic, + Flags = 0, + CoeffReadCost = internal::traits::CoeffReadCost + }; +}; +} // end namespace internal + +template +class BlockSparseTimeDenseProduct + : public ProductBase, Lhs, Rhs> +{ + public: + EIGEN_PRODUCT_PUBLIC_INTERFACE(BlockSparseTimeDenseProduct) + + BlockSparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) + {} + + template void scaleAndAddTo(Dest& dest, const typename Rhs::Scalar& alpha) const + { + BlockVectorReturn tmpDest(m_lhs, dest); + internal::sparse_time_dense_product( BlockSparseMatrixView(m_lhs), BlockVectorView(m_lhs, m_rhs), tmpDest, alpha); + } + + private: + BlockSparseTimeDenseProduct& operator=(const BlockSparseTimeDenseProduct&); +}; + +template +class BlockSparseMatrix : public SparseMatrixBase > +{ + public: + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef _StorageIndex StorageIndex; + typedef typename internal::ref_selector >::type Nested; + + enum { + Options = _Options, + Flags = Options, + BlockSize=_BlockAtCompileTime, + RowsAtCompileTime = Dynamic, + ColsAtCompileTime = Dynamic, + MaxRowsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic, + IsVectorAtCompileTime = 0, + IsColMajor = Flags&RowMajorBit ? 0 : 1 + }; + typedef Matrix BlockScalar; + typedef Matrix BlockRealScalar; + typedef typename internal::conditional<_BlockAtCompileTime==Dynamic, Scalar, BlockScalar>::type BlockScalarReturnType; + typedef BlockSparseMatrix PlainObject; + public: + // Default constructor + BlockSparseMatrix() + : m_innerBSize(0),m_outerBSize(0),m_innerOffset(0),m_outerOffset(0), + m_nonzerosblocks(0),m_values(0),m_blockPtr(0),m_indices(0), + m_outerIndex(0),m_blockSize(BlockSize) + { } + + + /** + * \brief Construct and resize + * + */ + BlockSparseMatrix(Index brow, Index bcol) + : m_innerBSize(IsColMajor ? brow : bcol), + m_outerBSize(IsColMajor ? bcol : brow), + m_innerOffset(0),m_outerOffset(0),m_nonzerosblocks(0), + m_values(0),m_blockPtr(0),m_indices(0), + m_outerIndex(0),m_blockSize(BlockSize) + { } + + /** + * \brief Copy-constructor + */ + BlockSparseMatrix(const BlockSparseMatrix& other) + : m_innerBSize(other.m_innerBSize),m_outerBSize(other.m_outerBSize), + m_nonzerosblocks(other.m_nonzerosblocks),m_nonzeros(other.m_nonzeros), + m_blockPtr(0),m_blockSize(other.m_blockSize) + { + // should we allow copying between variable-size blocks and fixed-size blocks ?? + eigen_assert(m_blockSize == BlockSize && " CAN NOT COPY BETWEEN FIXED-SIZE AND VARIABLE-SIZE BLOCKS"); + + std::copy(other.m_innerOffset, other.m_innerOffset+m_innerBSize+1, m_innerOffset); + std::copy(other.m_outerOffset, other.m_outerOffset+m_outerBSize+1, m_outerOffset); + std::copy(other.m_values, other.m_values+m_nonzeros, m_values); + + if(m_blockSize != Dynamic) + std::copy(other.m_blockPtr, other.m_blockPtr+m_nonzerosblocks, m_blockPtr); + + std::copy(other.m_indices, other.m_indices+m_nonzerosblocks, m_indices); + std::copy(other.m_outerIndex, other.m_outerIndex+m_outerBSize, m_outerIndex); + } + + friend void swap(BlockSparseMatrix& first, BlockSparseMatrix& second) + { + std::swap(first.m_innerBSize, second.m_innerBSize); + std::swap(first.m_outerBSize, second.m_outerBSize); + std::swap(first.m_innerOffset, second.m_innerOffset); + std::swap(first.m_outerOffset, second.m_outerOffset); + std::swap(first.m_nonzerosblocks, second.m_nonzerosblocks); + std::swap(first.m_nonzeros, second.m_nonzeros); + std::swap(first.m_values, second.m_values); + std::swap(first.m_blockPtr, second.m_blockPtr); + std::swap(first.m_indices, second.m_indices); + std::swap(first.m_outerIndex, second.m_outerIndex); + std::swap(first.m_BlockSize, second.m_blockSize); + } + + BlockSparseMatrix& operator=(BlockSparseMatrix other) + { + //Copy-and-swap paradigm ... avoid leaked data if thrown + swap(*this, other); + return *this; + } + + // Destructor + ~BlockSparseMatrix() + { + delete[] m_outerIndex; + delete[] m_innerOffset; + delete[] m_outerOffset; + delete[] m_indices; + delete[] m_blockPtr; + delete[] m_values; + } + + + /** + * \brief Constructor from a sparse matrix + * + */ + template + inline BlockSparseMatrix(const MatrixType& spmat) : m_blockSize(BlockSize) + { + EIGEN_STATIC_ASSERT((m_blockSize != Dynamic), THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE); + + *this = spmat; + } + + /** + * \brief Assignment from a sparse matrix with the same storage order + * + * Convert from a sparse matrix to block sparse matrix. + * \warning Before calling this function, tt is necessary to call + * either setBlockLayout() (matrices with variable-size blocks) + * or setBlockSize() (for fixed-size blocks). + */ + template + inline BlockSparseMatrix& operator=(const MatrixType& spmat) + { + eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) + && "Trying to assign to a zero-size matrix, call resize() first"); + eigen_assert(((MatrixType::Options&RowMajorBit) != IsColMajor) && "Wrong storage order"); + typedef SparseMatrix MatrixPatternType; + MatrixPatternType blockPattern(blockRows(), blockCols()); + m_nonzeros = 0; + + // First, compute the number of nonzero blocks and their locations + for(StorageIndex bj = 0; bj < m_outerBSize; ++bj) + { + // Browse each outer block and compute the structure + std::vector nzblocksFlag(m_innerBSize,false); // Record the existing blocks + blockPattern.startVec(bj); + for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j) + { + typename MatrixType::InnerIterator it_spmat(spmat, j); + for(; it_spmat; ++it_spmat) + { + StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block + if(!nzblocksFlag[bi]) + { + // Save the index of this nonzero block + nzblocksFlag[bi] = true; + blockPattern.insertBackByOuterInnerUnordered(bj, bi) = true; + // Compute the total number of nonzeros (including explicit zeros in blocks) + m_nonzeros += blockOuterSize(bj) * blockInnerSize(bi); + } + } + } // end current outer block + } + blockPattern.finalize(); + + // Allocate the internal arrays + setBlockStructure(blockPattern); + + for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0); + for(StorageIndex bj = 0; bj < m_outerBSize; ++bj) + { + // Now copy the values + for(StorageIndex j = blockOuterIndex(bj); j < blockOuterIndex(bj+1); ++j) + { + // Browse the outer block column by column (for column-major matrices) + typename MatrixType::InnerIterator it_spmat(spmat, j); + for(; it_spmat; ++it_spmat) + { + StorageIndex idx = 0; // Position of this block in the column block + StorageIndex bi = innerToBlock(it_spmat.index()); // Index of the current nonzero block + // Go to the inner block where this element belongs to + while(bi > m_indices[m_outerIndex[bj]+idx]) ++idx; // Not expensive for ordered blocks + StorageIndex idxVal;// Get the right position in the array of values for this element + if(m_blockSize == Dynamic) + { + // Offset from all blocks before ... + idxVal = m_blockPtr[m_outerIndex[bj]+idx]; + // ... and offset inside the block + idxVal += (j - blockOuterIndex(bj)) * blockOuterSize(bj) + it_spmat.index() - m_innerOffset[bi]; + } + else + { + // All blocks before + idxVal = (m_outerIndex[bj] + idx) * m_blockSize * m_blockSize; + // inside the block + idxVal += (j - blockOuterIndex(bj)) * m_blockSize + (it_spmat.index()%m_blockSize); + } + // Insert the value + m_values[idxVal] = it_spmat.value(); + } // end of this column + } // end of this block + } // end of this outer block + + return *this; + } + + /** + * \brief Set the nonzero block pattern of the matrix + * + * Given a sparse matrix describing the nonzero block pattern, + * this function prepares the internal pointers for values. + * After calling this function, any *nonzero* block (bi, bj) can be set + * with a simple call to coeffRef(bi,bj). + * + * + * \warning Before calling this function, tt is necessary to call + * either setBlockLayout() (matrices with variable-size blocks) + * or setBlockSize() (for fixed-size blocks). + * + * \param blockPattern Sparse matrix of boolean elements describing the block structure + * + * \sa setBlockLayout() \sa setBlockSize() + */ + template + void setBlockStructure(const MatrixType& blockPattern) + { + resize(blockPattern.rows(), blockPattern.cols()); + reserve(blockPattern.nonZeros()); + + // Browse the block pattern and set up the various pointers + m_outerIndex[0] = 0; + if(m_blockSize == Dynamic) m_blockPtr[0] = 0; + for(StorageIndex nz = 0; nz < m_nonzeros; ++nz) m_values[nz] = Scalar(0); + for(StorageIndex bj = 0; bj < m_outerBSize; ++bj) + { + //Browse each outer block + + //First, copy and save the indices of nonzero blocks + //FIXME : find a way to avoid this ... + std::vector nzBlockIdx; + typename MatrixType::InnerIterator it(blockPattern, bj); + for(; it; ++it) + { + nzBlockIdx.push_back(it.index()); + } + std::sort(nzBlockIdx.begin(), nzBlockIdx.end()); + + // Now, fill block indices and (eventually) pointers to blocks + for(StorageIndex idx = 0; idx < nzBlockIdx.size(); ++idx) + { + StorageIndex offset = m_outerIndex[bj]+idx; // offset in m_indices + m_indices[offset] = nzBlockIdx[idx]; + if(m_blockSize == Dynamic) + m_blockPtr[offset] = m_blockPtr[offset-1] + blockInnerSize(nzBlockIdx[idx]) * blockOuterSize(bj); + // There is no blockPtr for fixed-size blocks... not needed !??? + } + // Save the pointer to the next outer block + m_outerIndex[bj+1] = m_outerIndex[bj] + nzBlockIdx.size(); + } + } + + /** + * \brief Set the number of rows and columns blocks + */ + inline void resize(Index brow, Index bcol) + { + m_innerBSize = IsColMajor ? brow : bcol; + m_outerBSize = IsColMajor ? bcol : brow; + } + + /** + * \brief set the block size at runtime for fixed-size block layout + * + * Call this only for fixed-size blocks + */ + inline void setBlockSize(Index blockSize) + { + m_blockSize = blockSize; + } + + /** + * \brief Set the row and column block layouts, + * + * This function set the size of each row and column block. + * So this function should be used only for blocks with variable size. + * \param rowBlocks : Number of rows per row block + * \param colBlocks : Number of columns per column block + * \sa resize(), setBlockSize() + */ + inline void setBlockLayout(const VectorXi& rowBlocks, const VectorXi& colBlocks) + { + const VectorXi& innerBlocks = IsColMajor ? rowBlocks : colBlocks; + const VectorXi& outerBlocks = IsColMajor ? colBlocks : rowBlocks; + eigen_assert(m_innerBSize == innerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS"); + eigen_assert(m_outerBSize == outerBlocks.size() && "CHECK THE NUMBER OF ROW OR COLUMN BLOCKS"); + m_outerBSize = outerBlocks.size(); + // starting index of blocks... cumulative sums + m_innerOffset = new StorageIndex[m_innerBSize+1]; + m_outerOffset = new StorageIndex[m_outerBSize+1]; + m_innerOffset[0] = 0; + m_outerOffset[0] = 0; + std::partial_sum(&innerBlocks[0], &innerBlocks[m_innerBSize-1]+1, &m_innerOffset[1]); + std::partial_sum(&outerBlocks[0], &outerBlocks[m_outerBSize-1]+1, &m_outerOffset[1]); + + // Compute the total number of nonzeros + m_nonzeros = 0; + for(StorageIndex bj = 0; bj < m_outerBSize; ++bj) + for(StorageIndex bi = 0; bi < m_innerBSize; ++bi) + m_nonzeros += outerBlocks[bj] * innerBlocks[bi]; + + } + + /** + * \brief Allocate the internal array of pointers to blocks and their inner indices + * + * \note For fixed-size blocks, call setBlockSize() to set the block. + * And For variable-size blocks, call setBlockLayout() before using this function + * + * \param nonzerosblocks Number of nonzero blocks. The total number of nonzeros is + * is computed in setBlockLayout() for variable-size blocks + * \sa setBlockSize() + */ + inline void reserve(const Index nonzerosblocks) + { + eigen_assert((m_innerBSize != 0 && m_outerBSize != 0) && + "TRYING TO RESERVE ZERO-SIZE MATRICES, CALL resize() first"); + + //FIXME Should free if already allocated + m_outerIndex = new StorageIndex[m_outerBSize+1]; + + m_nonzerosblocks = nonzerosblocks; + if(m_blockSize != Dynamic) + { + m_nonzeros = nonzerosblocks * (m_blockSize * m_blockSize); + m_blockPtr = 0; + } + else + { + // m_nonzeros is already computed in setBlockLayout() + m_blockPtr = new StorageIndex[m_nonzerosblocks+1]; + } + m_indices = new StorageIndex[m_nonzerosblocks+1]; + m_values = new Scalar[m_nonzeros]; + } + + + /** + * \brief Fill values in a matrix from a triplet list. + * + * Each triplet item has a block stored in an Eigen dense matrix. + * The InputIterator class should provide the functions row(), col() and value() + * + * \note For fixed-size blocks, call setBlockSize() before this function. + * + * FIXME Do not accept duplicates + */ + template + void setFromTriplets(const InputIterator& begin, const InputIterator& end) + { + eigen_assert((m_innerBSize!=0 && m_outerBSize !=0) && "ZERO BLOCKS, PLEASE CALL resize() before"); + + /* First, sort the triplet list + * FIXME This can be unnecessarily expensive since only the inner indices have to be sorted + * The best approach is like in SparseMatrix::setFromTriplets() + */ + internal::TripletComp tripletcomp; + std::sort(begin, end, tripletcomp); + + /* Count the number of rows and column blocks, + * and the number of nonzero blocks per outer dimension + */ + VectorXi rowBlocks(m_innerBSize); // Size of each block row + VectorXi colBlocks(m_outerBSize); // Size of each block column + rowBlocks.setZero(); colBlocks.setZero(); + VectorXi nzblock_outer(m_outerBSize); // Number of nz blocks per outer vector + VectorXi nz_outer(m_outerBSize); // Number of nz per outer vector...for variable-size blocks + nzblock_outer.setZero(); + nz_outer.setZero(); + for(InputIterator it(begin); it !=end; ++it) + { + eigen_assert(it->row() >= 0 && it->row() < this->blockRows() && it->col() >= 0 && it->col() < this->blockCols()); + eigen_assert((it->value().rows() == it->value().cols() && (it->value().rows() == m_blockSize)) + || (m_blockSize == Dynamic)); + + if(m_blockSize == Dynamic) + { + eigen_assert((rowBlocks[it->row()] == 0 || rowBlocks[it->row()] == it->value().rows()) && + "NON CORRESPONDING SIZES FOR ROW BLOCKS"); + eigen_assert((colBlocks[it->col()] == 0 || colBlocks[it->col()] == it->value().cols()) && + "NON CORRESPONDING SIZES FOR COLUMN BLOCKS"); + rowBlocks[it->row()] =it->value().rows(); + colBlocks[it->col()] = it->value().cols(); + } + nz_outer(IsColMajor ? it->col() : it->row()) += it->value().rows() * it->value().cols(); + nzblock_outer(IsColMajor ? it->col() : it->row())++; + } + // Allocate member arrays + if(m_blockSize == Dynamic) setBlockLayout(rowBlocks, colBlocks); + StorageIndex nzblocks = nzblock_outer.sum(); + reserve(nzblocks); + + // Temporary markers + VectorXi block_id(m_outerBSize); // To be used as a block marker during insertion + + // Setup outer index pointers and markers + m_outerIndex[0] = 0; + if (m_blockSize == Dynamic) m_blockPtr[0] = 0; + for(StorageIndex bj = 0; bj < m_outerBSize; ++bj) + { + m_outerIndex[bj+1] = m_outerIndex[bj] + nzblock_outer(bj); + block_id(bj) = m_outerIndex[bj]; + if(m_blockSize==Dynamic) + { + m_blockPtr[m_outerIndex[bj+1]] = m_blockPtr[m_outerIndex[bj]] + nz_outer(bj); + } + } + + // Fill the matrix + for(InputIterator it(begin); it!=end; ++it) + { + StorageIndex outer = IsColMajor ? it->col() : it->row(); + StorageIndex inner = IsColMajor ? it->row() : it->col(); + m_indices[block_id(outer)] = inner; + StorageIndex block_size = it->value().rows()*it->value().cols(); + StorageIndex nz_marker = blockPtr(block_id[outer]); + memcpy(&(m_values[nz_marker]), it->value().data(), block_size * sizeof(Scalar)); + if(m_blockSize == Dynamic) + { + m_blockPtr[block_id(outer)+1] = m_blockPtr[block_id(outer)] + block_size; + } + block_id(outer)++; + } + + // An alternative when the outer indices are sorted...no need to use an array of markers +// for(Index bcol = 0; bcol < m_outerBSize; ++bcol) +// { +// Index id = 0, id_nz = 0, id_nzblock = 0; +// for(InputIterator it(begin); it!=end; ++it) +// { +// while (idvalue().rows()*it->value().cols(); +// m_blockPtr[id_nzblock+1] = m_blockPtr[id_nzblock] + block_size; +// id_nzblock++; +// memcpy(&(m_values[id_nz]),it->value().data(), block_size*sizeof(Scalar)); +// id_nz += block_size; +// } +// while(id < m_outerBSize-1) // Empty columns at the end +// { +// id++; +// m_outerIndex[id+1]=m_outerIndex[id]; +// } +// } + } + + + /** + * \returns the number of rows + */ + inline Index rows() const + { +// return blockRows(); + return (IsColMajor ? innerSize() : outerSize()); + } + + /** + * \returns the number of cols + */ + inline Index cols() const + { +// return blockCols(); + return (IsColMajor ? outerSize() : innerSize()); + } + + inline Index innerSize() const + { + if(m_blockSize == Dynamic) return m_innerOffset[m_innerBSize]; + else return (m_innerBSize * m_blockSize) ; + } + + inline Index outerSize() const + { + if(m_blockSize == Dynamic) return m_outerOffset[m_outerBSize]; + else return (m_outerBSize * m_blockSize) ; + } + /** \returns the number of rows grouped by blocks */ + inline Index blockRows() const + { + return (IsColMajor ? m_innerBSize : m_outerBSize); + } + /** \returns the number of columns grouped by blocks */ + inline Index blockCols() const + { + return (IsColMajor ? m_outerBSize : m_innerBSize); + } + + inline Index outerBlocks() const { return m_outerBSize; } + inline Index innerBlocks() const { return m_innerBSize; } + + /** \returns the block index where outer belongs to */ + inline Index outerToBlock(Index outer) const + { + eigen_assert(outer < outerSize() && "OUTER INDEX OUT OF BOUNDS"); + + if(m_blockSize != Dynamic) + return (outer / m_blockSize); // Integer division + + StorageIndex b_outer = 0; + while(m_outerOffset[b_outer] <= outer) ++b_outer; + return b_outer - 1; + } + /** \returns the block index where inner belongs to */ + inline Index innerToBlock(Index inner) const + { + eigen_assert(inner < innerSize() && "OUTER INDEX OUT OF BOUNDS"); + + if(m_blockSize != Dynamic) + return (inner / m_blockSize); // Integer division + + StorageIndex b_inner = 0; + while(m_innerOffset[b_inner] <= inner) ++b_inner; + return b_inner - 1; + } + + /** + *\returns a reference to the (i,j) block as an Eigen Dense Matrix + */ + Ref coeffRef(Index brow, Index bcol) + { + eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS"); + eigen_assert(bcol < blockCols() && "BLOCK nzblocksFlagCOLUMN OUT OF BOUNDS"); + + StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol); + StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow); + StorageIndex inner = IsColMajor ? brow : bcol; + StorageIndex outer = IsColMajor ? bcol : brow; + StorageIndex offset = m_outerIndex[outer]; + while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner) + offset++; + if(m_indices[offset] == inner) + { + return Map(&(m_values[blockPtr(offset)]), rsize, csize); + } + else + { + //FIXME the block does not exist, Insert it !!!!!!!!! + eigen_assert("DYNAMIC INSERTION IS NOT YET SUPPORTED"); + } + } + + /** + * \returns the value of the (i,j) block as an Eigen Dense Matrix + */ + Map coeff(Index brow, Index bcol) const + { + eigen_assert(brow < blockRows() && "BLOCK ROW INDEX OUT OF BOUNDS"); + eigen_assert(bcol < blockCols() && "BLOCK COLUMN OUT OF BOUNDS"); + + StorageIndex rsize = IsColMajor ? blockInnerSize(brow): blockOuterSize(bcol); + StorageIndex csize = IsColMajor ? blockOuterSize(bcol) : blockInnerSize(brow); + StorageIndex inner = IsColMajor ? brow : bcol; + StorageIndex outer = IsColMajor ? bcol : brow; + StorageIndex offset = m_outerIndex[outer]; + while(offset < m_outerIndex[outer+1] && m_indices[offset] != inner) offset++; + if(m_indices[offset] == inner) + { + return Map (&(m_values[blockPtr(offset)]), rsize, csize); + } + else +// return BlockScalar::Zero(rsize, csize); + eigen_assert("NOT YET SUPPORTED"); + } + + // Block Matrix times vector product + template + BlockSparseTimeDenseProduct operator*(const VecType& lhs) const + { + return BlockSparseTimeDenseProduct(*this, lhs); + } + + /** \returns the number of nonzero blocks */ + inline Index nonZerosBlocks() const { return m_nonzerosblocks; } + /** \returns the total number of nonzero elements, including eventual explicit zeros in blocks */ + inline Index nonZeros() const { return m_nonzeros; } + + inline BlockScalarReturnType *valuePtr() {return static_cast(m_values);} +// inline Scalar *valuePtr(){ return m_values; } + inline StorageIndex *innerIndexPtr() {return m_indices; } + inline const StorageIndex *innerIndexPtr() const {return m_indices; } + inline StorageIndex *outerIndexPtr() {return m_outerIndex; } + inline const StorageIndex* outerIndexPtr() const {return m_outerIndex; } + + /** \brief for compatibility purposes with the SparseMatrix class */ + inline bool isCompressed() const {return true;} + /** + * \returns the starting index of the bi row block + */ + inline Index blockRowsIndex(Index bi) const + { + return IsColMajor ? blockInnerIndex(bi) : blockOuterIndex(bi); + } + + /** + * \returns the starting index of the bj col block + */ + inline Index blockColsIndex(Index bj) const + { + return IsColMajor ? blockOuterIndex(bj) : blockInnerIndex(bj); + } + + inline Index blockOuterIndex(Index bj) const + { + return (m_blockSize == Dynamic) ? m_outerOffset[bj] : (bj * m_blockSize); + } + inline Index blockInnerIndex(Index bi) const + { + return (m_blockSize == Dynamic) ? m_innerOffset[bi] : (bi * m_blockSize); + } + + // Not needed ??? + inline Index blockInnerSize(Index bi) const + { + return (m_blockSize == Dynamic) ? (m_innerOffset[bi+1] - m_innerOffset[bi]) : m_blockSize; + } + inline Index blockOuterSize(Index bj) const + { + return (m_blockSize == Dynamic) ? (m_outerOffset[bj+1]- m_outerOffset[bj]) : m_blockSize; + } + + /** + * \brief Browse the matrix by outer index + */ + class InnerIterator; // Browse column by column + + /** + * \brief Browse the matrix by block outer index + */ + class BlockInnerIterator; // Browse block by block + + friend std::ostream & operator << (std::ostream & s, const BlockSparseMatrix& m) + { + for (StorageIndex j = 0; j < m.outerBlocks(); ++j) + { + BlockInnerIterator itb(m, j); + for(; itb; ++itb) + { + s << "("<::type()); + } + + + protected: +// inline Index blockDynIdx(Index id, internal::true_type) const +// { +// return m_blockPtr[id]; +// } +// inline Index blockDynIdx(Index id, internal::false_type) const +// { +// return id * BlockSize * BlockSize; +// } + + // To be implemented + // Insert a block at a particular location... need to make a room for that + Map insert(Index brow, Index bcol); + + Index m_innerBSize; // Number of block rows + Index m_outerBSize; // Number of block columns + StorageIndex *m_innerOffset; // Starting index of each inner block (size m_innerBSize+1) + StorageIndex *m_outerOffset; // Starting index of each outer block (size m_outerBSize+1) + Index m_nonzerosblocks; // Total nonzeros blocks (lower than m_innerBSize x m_outerBSize) + Index m_nonzeros; // Total nonzeros elements + Scalar *m_values; //Values stored block column after block column (size m_nonzeros) + StorageIndex *m_blockPtr; // Pointer to the beginning of each block in m_values, size m_nonzeroblocks ... null for fixed-size blocks + StorageIndex *m_indices; //Inner block indices, size m_nonzerosblocks ... OK + StorageIndex *m_outerIndex; // Starting pointer of each block column in m_indices (size m_outerBSize)... OK + Index m_blockSize; // Size of a block for fixed-size blocks, otherwise -1 +}; + +template +class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::BlockInnerIterator +{ + public: + + enum{ + Flags = _Options + }; + + BlockInnerIterator(const BlockSparseMatrix& mat, const Index outer) + : m_mat(mat),m_outer(outer), + m_id(mat.m_outerIndex[outer]), + m_end(mat.m_outerIndex[outer+1]) + { + } + + inline BlockInnerIterator& operator++() {m_id++; return *this; } + + inline const Map value() const + { + return Map(&(m_mat.m_values[m_mat.blockPtr(m_id)]), + rows(),cols()); + } + inline Map valueRef() + { + return Map(&(m_mat.m_values[m_mat.blockPtr(m_id)]), + rows(),cols()); + } + // Block inner index + inline Index index() const {return m_mat.m_indices[m_id]; } + inline Index outer() const { return m_outer; } + // block row index + inline Index row() const {return index(); } + // block column index + inline Index col() const {return outer(); } + // FIXME Number of rows in the current block + inline Index rows() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_innerOffset[index()+1] - m_mat.m_innerOffset[index()]) : m_mat.m_blockSize; } + // Number of columns in the current block ... + inline Index cols() const { return (m_mat.m_blockSize==Dynamic) ? (m_mat.m_outerOffset[m_outer+1]-m_mat.m_outerOffset[m_outer]) : m_mat.m_blockSize;} + inline operator bool() const { return (m_id < m_end); } + + protected: + const BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, StorageIndex>& m_mat; + const Index m_outer; + Index m_id; + Index m_end; +}; + +template +class BlockSparseMatrix<_Scalar, _BlockAtCompileTime, _Options, _StorageIndex>::InnerIterator +{ + public: + InnerIterator(const BlockSparseMatrix& mat, Index outer) + : m_mat(mat),m_outerB(mat.outerToBlock(outer)),m_outer(outer), + itb(mat, mat.outerToBlock(outer)), + m_offset(outer - mat.blockOuterIndex(m_outerB)) + { + if (itb) + { + m_id = m_mat.blockInnerIndex(itb.index()); + m_start = m_id; + m_end = m_mat.blockInnerIndex(itb.index()+1); + } + } + inline InnerIterator& operator++() + { + m_id++; + if (m_id >= m_end) + { + ++itb; + if (itb) + { + m_id = m_mat.blockInnerIndex(itb.index()); + m_start = m_id; + m_end = m_mat.blockInnerIndex(itb.index()+1); + } + } + return *this; + } + inline const Scalar& value() const + { + return itb.value().coeff(m_id - m_start, m_offset); + } + inline Scalar& valueRef() + { + return itb.valueRef().coeff(m_id - m_start, m_offset); + } + inline Index index() const { return m_id; } + inline Index outer() const {return m_outer; } + inline Index col() const {return outer(); } + inline Index row() const { return index();} + inline operator bool() const + { + return itb; + } + protected: + const BlockSparseMatrix& m_mat; + const Index m_outer; + const Index m_outerB; + BlockInnerIterator itb; // Iterator through the blocks + const Index m_offset; // Position of this column in the block + Index m_start; // starting inner index of this block + Index m_id; // current inner index in the block + Index m_end; // starting inner index of the next block + +}; +} // end namespace Eigen + +#endif // EIGEN_SPARSEBLOCKMATRIX_H diff --git a/external/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h b/external/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h new file mode 100644 index 0000000..42c99e4 --- /dev/null +++ b/external/unsupported/Eigen/src/SparseExtra/DynamicSparseMatrix.h @@ -0,0 +1,404 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DYNAMIC_SPARSEMATRIX_H +#define EIGEN_DYNAMIC_SPARSEMATRIX_H + +namespace Eigen { + +/** \deprecated use a SparseMatrix in an uncompressed mode + * + * \class DynamicSparseMatrix + * + * \brief A sparse matrix class designed for matrix assembly purpose + * + * \param _Scalar the scalar type, i.e. the type of the coefficients + * + * Unlike SparseMatrix, this class provides a much higher degree of flexibility. In particular, it allows + * random read/write accesses in log(rho*outer_size) where \c rho is the probability that a coefficient is + * nonzero and outer_size is the number of columns if the matrix is column-major and the number of rows + * otherwise. + * + * Internally, the data are stored as a std::vector of compressed vector. The performances of random writes might + * decrease as the number of nonzeros per inner-vector increase. In practice, we observed very good performance + * till about 100 nonzeros/vector, and the performance remains relatively good till 500 nonzeros/vectors. + * + * \see SparseMatrix + */ + +namespace internal { +template +struct traits > +{ + typedef _Scalar Scalar; + typedef _StorageIndex StorageIndex; + typedef Sparse StorageKind; + typedef MatrixXpr XprKind; + enum { + RowsAtCompileTime = Dynamic, + ColsAtCompileTime = Dynamic, + MaxRowsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic, + Flags = _Options | NestByRefBit | LvalueBit, + CoeffReadCost = NumTraits::ReadCost, + SupportedAccessPatterns = OuterRandomAccessPattern + }; +}; +} + +template + class DynamicSparseMatrix + : public SparseMatrixBase > +{ + typedef SparseMatrixBase Base; + using Base::convert_index; + public: + EIGEN_SPARSE_PUBLIC_INTERFACE(DynamicSparseMatrix) + // FIXME: why are these operator already alvailable ??? + // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, +=) + // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, -=) + typedef MappedSparseMatrix Map; + using Base::IsRowMajor; + using Base::operator=; + enum { + Options = _Options + }; + + protected: + + typedef DynamicSparseMatrix TransposedSparseMatrix; + + Index m_innerSize; + std::vector > m_data; + + public: + + inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; } + inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); } + inline Index innerSize() const { return m_innerSize; } + inline Index outerSize() const { return convert_index(m_data.size()); } + inline Index innerNonZeros(Index j) const { return m_data[j].size(); } + + std::vector >& _data() { return m_data; } + const std::vector >& _data() const { return m_data; } + + /** \returns the coefficient value at given position \a row, \a col + * This operation involes a log(rho*outer_size) binary search. + */ + inline Scalar coeff(Index row, Index col) const + { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + return m_data[outer].at(inner); + } + + /** \returns a reference to the coefficient value at given position \a row, \a col + * This operation involes a log(rho*outer_size) binary search. If the coefficient does not + * exist yet, then a sorted insertion into a sequential buffer is performed. + */ + inline Scalar& coeffRef(Index row, Index col) + { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + return m_data[outer].atWithInsertion(inner); + } + + class InnerIterator; + class ReverseInnerIterator; + + void setZero() + { + for (Index j=0; j0) + { + Index reserveSizePerVector = (std::max)(reserveSize/outerSize(),Index(4)); + for (Index j=0; j(m_data[outer].size()) - 1; + m_data[outer].resize(id+2,1); + + while ( (id >= startId) && (m_data[outer].index(id) > inner) ) + { + m_data[outer].index(id+1) = m_data[outer].index(id); + m_data[outer].value(id+1) = m_data[outer].value(id); + --id; + } + m_data[outer].index(id+1) = inner; + m_data[outer].value(id+1) = 0; + return m_data[outer].value(id+1); + } + + /** Does nothing: provided for compatibility with SparseMatrix */ + inline void finalize() {} + + /** Suppress all nonzeros which are smaller than \a reference under the tolerance \a epsilon */ + void prune(Scalar reference, RealScalar epsilon = NumTraits::dummy_precision()) + { + for (Index j=0; jinnerSize) + { + // remove all coefficients with innerCoord>=innerSize + // TODO + //std::cerr << "not implemented yet\n"; + exit(2); + } + if (m_data.size() != outerSize) + { + m_data.resize(outerSize); + } + } + + /** The class DynamicSparseMatrix is deprecated */ + EIGEN_DEPRECATED inline DynamicSparseMatrix() + : m_innerSize(0), m_data(0) + { + #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + #endif + eigen_assert(innerSize()==0 && outerSize()==0); + } + + /** The class DynamicSparseMatrix is deprecated */ + EIGEN_DEPRECATED inline DynamicSparseMatrix(Index rows, Index cols) + : m_innerSize(0) + { + #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + #endif + resize(rows, cols); + } + + /** The class DynamicSparseMatrix is deprecated */ + template + EIGEN_DEPRECATED explicit inline DynamicSparseMatrix(const SparseMatrixBase& other) + : m_innerSize(0) + { + #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + #endif + Base::operator=(other.derived()); + } + + inline DynamicSparseMatrix(const DynamicSparseMatrix& other) + : Base(), m_innerSize(0) + { + #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN + #endif + *this = other.derived(); + } + + inline void swap(DynamicSparseMatrix& other) + { + //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n"); + std::swap(m_innerSize, other.m_innerSize); + //std::swap(m_outerSize, other.m_outerSize); + m_data.swap(other.m_data); + } + + inline DynamicSparseMatrix& operator=(const DynamicSparseMatrix& other) + { + if (other.isRValue()) + { + swap(other.const_cast_derived()); + } + else + { + resize(other.rows(), other.cols()); + m_data = other.m_data; + } + return *this; + } + + /** Destructor */ + inline ~DynamicSparseMatrix() {} + + public: + + /** \deprecated + * Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */ + EIGEN_DEPRECATED void startFill(Index reserveSize = 1000) + { + setZero(); + reserve(reserveSize); + } + + /** \deprecated use insert() + * inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that: + * 1 - the coefficient does not exist yet + * 2 - this the coefficient with greater inner coordinate for the given outer coordinate. + * In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates + * \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid. + * + * \see fillrand(), coeffRef() + */ + EIGEN_DEPRECATED Scalar& fill(Index row, Index col) + { + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + return insertBack(outer,inner); + } + + /** \deprecated use insert() + * Like fill() but with random inner coordinates. + * Compared to the generic coeffRef(), the unique limitation is that we assume + * the coefficient does not exist yet. + */ + EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col) + { + return insert(row,col); + } + + /** \deprecated use finalize() + * Does nothing. Provided for compatibility with SparseMatrix. */ + EIGEN_DEPRECATED void endFill() {} + +# ifdef EIGEN_DYNAMICSPARSEMATRIX_PLUGIN +# include EIGEN_DYNAMICSPARSEMATRIX_PLUGIN +# endif + }; + +template +class DynamicSparseMatrix::InnerIterator : public SparseVector::InnerIterator +{ + typedef typename SparseVector::InnerIterator Base; + public: + InnerIterator(const DynamicSparseMatrix& mat, Index outer) + : Base(mat.m_data[outer]), m_outer(outer) + {} + + inline Index row() const { return IsRowMajor ? m_outer : Base::index(); } + inline Index col() const { return IsRowMajor ? Base::index() : m_outer; } + inline Index outer() const { return m_outer; } + + protected: + const Index m_outer; +}; + +template +class DynamicSparseMatrix::ReverseInnerIterator : public SparseVector::ReverseInnerIterator +{ + typedef typename SparseVector::ReverseInnerIterator Base; + public: + ReverseInnerIterator(const DynamicSparseMatrix& mat, Index outer) + : Base(mat.m_data[outer]), m_outer(outer) + {} + + inline Index row() const { return IsRowMajor ? m_outer : Base::index(); } + inline Index col() const { return IsRowMajor ? Base::index() : m_outer; } + inline Index outer() const { return m_outer; } + + protected: + const Index m_outer; +}; + +namespace internal { + +template +struct evaluator > + : evaluator_base > +{ + typedef _Scalar Scalar; + typedef DynamicSparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType; + typedef typename SparseMatrixType::InnerIterator InnerIterator; + typedef typename SparseMatrixType::ReverseInnerIterator ReverseInnerIterator; + + enum { + CoeffReadCost = NumTraits<_Scalar>::ReadCost, + Flags = SparseMatrixType::Flags + }; + + evaluator() : m_matrix(0) {} + evaluator(const SparseMatrixType &mat) : m_matrix(&mat) {} + + operator SparseMatrixType&() { return m_matrix->const_cast_derived(); } + operator const SparseMatrixType&() const { return *m_matrix; } + + Scalar coeff(Index row, Index col) const { return m_matrix->coeff(row,col); } + + Index nonZerosEstimate() const { return m_matrix->nonZeros(); } + + const SparseMatrixType *m_matrix; +}; + +} + +} // end namespace Eigen + +#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H diff --git a/external/unsupported/Eigen/src/SparseExtra/MarketIO.h b/external/unsupported/Eigen/src/SparseExtra/MarketIO.h new file mode 100644 index 0000000..dd786d5 --- /dev/null +++ b/external/unsupported/Eigen/src/SparseExtra/MarketIO.h @@ -0,0 +1,282 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2011 Gael Guennebaud +// Copyright (C) 2012 Desire NUENTSA WAKAM +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPARSE_MARKET_IO_H +#define EIGEN_SPARSE_MARKET_IO_H + +#include +#include + +namespace Eigen { + +namespace internal +{ + template + inline void GetMarketLine (const char* line, StorageIndex& i, StorageIndex& j, Scalar& value) + { + std::stringstream sline(line); + sline >> i >> j >> value; + } + + template<> inline void GetMarketLine (const char* line, int& i, int& j, float& value) + { std::sscanf(line, "%d %d %g", &i, &j, &value); } + + template<> inline void GetMarketLine (const char* line, int& i, int& j, double& value) + { std::sscanf(line, "%d %d %lg", &i, &j, &value); } + + template<> inline void GetMarketLine (const char* line, int& i, int& j, std::complex& value) + { std::sscanf(line, "%d %d %g %g", &i, &j, &numext::real_ref(value), &numext::imag_ref(value)); } + + template<> inline void GetMarketLine (const char* line, int& i, int& j, std::complex& value) + { std::sscanf(line, "%d %d %lg %lg", &i, &j, &numext::real_ref(value), &numext::imag_ref(value)); } + + template + inline void GetMarketLine (const char* line, StorageIndex& i, StorageIndex& j, std::complex& value) + { + std::stringstream sline(line); + Scalar valR, valI; + sline >> i >> j >> valR >> valI; + value = std::complex(valR,valI); + } + + template + inline void GetVectorElt (const std::string& line, RealScalar& val) + { + std::istringstream newline(line); + newline >> val; + } + + template + inline void GetVectorElt (const std::string& line, std::complex& val) + { + RealScalar valR, valI; + std::istringstream newline(line); + newline >> valR >> valI; + val = std::complex(valR, valI); + } + + template + inline void putMarketHeader(std::string& header,int sym) + { + header= "%%MatrixMarket matrix coordinate "; + if(internal::is_same >::value || internal::is_same >::value) + { + header += " complex"; + if(sym == Symmetric) header += " symmetric"; + else if (sym == SelfAdjoint) header += " Hermitian"; + else header += " general"; + } + else + { + header += " real"; + if(sym == Symmetric) header += " symmetric"; + else header += " general"; + } + } + + template + inline void PutMatrixElt(Scalar value, StorageIndex row, StorageIndex col, std::ofstream& out) + { + out << row << " "<< col << " " << value << "\n"; + } + template + inline void PutMatrixElt(std::complex value, StorageIndex row, StorageIndex col, std::ofstream& out) + { + out << row << " " << col << " " << value.real() << " " << value.imag() << "\n"; + } + + + template + inline void putVectorElt(Scalar value, std::ofstream& out) + { + out << value << "\n"; + } + template + inline void putVectorElt(std::complex value, std::ofstream& out) + { + out << value.real() << " " << value.imag()<< "\n"; + } + +} // end namespace internal + +inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscomplex, bool& isvector) +{ + sym = 0; + iscomplex = false; + isvector = false; + std::ifstream in(filename.c_str(),std::ios::in); + if(!in) + return false; + + std::string line; + // The matrix header is always the first line in the file + std::getline(in, line); eigen_assert(in.good()); + + std::stringstream fmtline(line); + std::string substr[5]; + fmtline>> substr[0] >> substr[1] >> substr[2] >> substr[3] >> substr[4]; + if(substr[2].compare("array") == 0) isvector = true; + if(substr[3].compare("complex") == 0) iscomplex = true; + if(substr[4].compare("symmetric") == 0) sym = Symmetric; + else if (substr[4].compare("Hermitian") == 0) sym = SelfAdjoint; + + return true; +} + +template +bool loadMarket(SparseMatrixType& mat, const std::string& filename) +{ + typedef typename SparseMatrixType::Scalar Scalar; + typedef typename SparseMatrixType::StorageIndex StorageIndex; + std::ifstream input(filename.c_str(),std::ios::in); + if(!input) + return false; + + char rdbuffer[4096]; + input.rdbuf()->pubsetbuf(rdbuffer, 4096); + + const int maxBuffersize = 2048; + char buffer[maxBuffersize]; + + bool readsizes = false; + + typedef Triplet T; + std::vector elements; + + Index M(-1), N(-1), NNZ(-1); + Index count = 0; + while(input.getline(buffer, maxBuffersize)) + { + // skip comments + //NOTE An appropriate test should be done on the header to get the symmetry + if(buffer[0]=='%') + continue; + + if(!readsizes) + { + std::stringstream line(buffer); + line >> M >> N >> NNZ; + if(M > 0 && N > 0) + { + readsizes = true; + mat.resize(M,N); + mat.reserve(NNZ); + } + } + else + { + StorageIndex i(-1), j(-1); + Scalar value; + internal::GetMarketLine(buffer, i, j, value); + + i--; + j--; + if(i>=0 && j>=0 && i +bool loadMarketVector(VectorType& vec, const std::string& filename) +{ + typedef typename VectorType::Scalar Scalar; + std::ifstream in(filename.c_str(), std::ios::in); + if(!in) + return false; + + std::string line; + int n(0), col(0); + do + { // Skip comments + std::getline(in, line); eigen_assert(in.good()); + } while (line[0] == '%'); + std::istringstream newline(line); + newline >> n >> col; + eigen_assert(n>0 && col>0); + vec.resize(n); + int i = 0; + Scalar value; + while ( std::getline(in, line) && (i < n) ){ + internal::GetVectorElt(line, value); + vec(i++) = value; + } + in.close(); + if (i!=n){ + std::cerr<< "Unable to read all elements from file " << filename << "\n"; + return false; + } + return true; +} + +template +bool saveMarket(const SparseMatrixType& mat, const std::string& filename, int sym = 0) +{ + typedef typename SparseMatrixType::Scalar Scalar; + typedef typename SparseMatrixType::RealScalar RealScalar; + std::ofstream out(filename.c_str(),std::ios::out); + if(!out) + return false; + + out.flags(std::ios_base::scientific); + out.precision(std::numeric_limits::digits10 + 2); + std::string header; + internal::putMarketHeader(header, sym); + out << header << std::endl; + out << mat.rows() << " " << mat.cols() << " " << mat.nonZeros() << "\n"; + int count = 0; + for(int j=0; j +bool saveMarketVector (const VectorType& vec, const std::string& filename) +{ + typedef typename VectorType::Scalar Scalar; + typedef typename VectorType::RealScalar RealScalar; + std::ofstream out(filename.c_str(),std::ios::out); + if(!out) + return false; + + out.flags(std::ios_base::scientific); + out.precision(std::numeric_limits::digits10 + 2); + if(internal::is_same >::value || internal::is_same >::value) + out << "%%MatrixMarket matrix array complex general\n"; + else + out << "%%MatrixMarket matrix array real general\n"; + out << vec.size() << " "<< 1 << "\n"; + for (int i=0; i < vec.size(); i++){ + internal::putVectorElt(vec(i), out); + } + out.close(); + return true; +} + +} // end namespace Eigen + +#endif // EIGEN_SPARSE_MARKET_IO_H diff --git a/external/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h b/external/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h new file mode 100644 index 0000000..02916ea --- /dev/null +++ b/external/unsupported/Eigen/src/SparseExtra/MatrixMarketIterator.h @@ -0,0 +1,247 @@ + +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Desire NUENTSA WAKAM +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BROWSE_MATRICES_H +#define EIGEN_BROWSE_MATRICES_H + +namespace Eigen { + +enum { + SPD = 0x100, + NonSymmetric = 0x0 +}; + +/** + * @brief Iterator to browse matrices from a specified folder + * + * This is used to load all the matrices from a folder. + * The matrices should be in Matrix Market format + * It is assumed that the matrices are named as matname.mtx + * and matname_SPD.mtx if the matrix is Symmetric and positive definite (or Hermitian) + * The right hand side vectors are loaded as well, if they exist. + * They should be named as matname_b.mtx. + * Note that the right hand side for a SPD matrix is named as matname_SPD_b.mtx + * + * Sometimes a reference solution is available. In this case, it should be named as matname_x.mtx + * + * Sample code + * \code + * + * \endcode + * + * \tparam Scalar The scalar type + */ +template +class MatrixMarketIterator +{ + typedef typename NumTraits::Real RealScalar; + public: + typedef Matrix VectorType; + typedef SparseMatrix MatrixType; + + public: + MatrixMarketIterator(const std::string &folder) + : m_sym(0), m_isvalid(false), m_matIsLoaded(false), m_hasRhs(false), m_hasrefX(false), m_folder(folder) + { + m_folder_id = opendir(folder.c_str()); + if(m_folder_id) + Getnextvalidmatrix(); + } + + ~MatrixMarketIterator() + { + if (m_folder_id) closedir(m_folder_id); + } + + inline MatrixMarketIterator& operator++() + { + m_matIsLoaded = false; + m_hasrefX = false; + m_hasRhs = false; + Getnextvalidmatrix(); + return *this; + } + inline operator bool() const { return m_isvalid;} + + /** Return the sparse matrix corresponding to the current file */ + inline MatrixType& matrix() + { + // Read the matrix + if (m_matIsLoaded) return m_mat; + + std::string matrix_file = m_folder + "/" + m_matname + ".mtx"; + if ( !loadMarket(m_mat, matrix_file)) + { + std::cerr << "Warning loadMarket failed when loading \"" << matrix_file << "\"" << std::endl; + m_matIsLoaded = false; + return m_mat; + } + m_matIsLoaded = true; + + if (m_sym != NonSymmetric) + { + // Check whether we need to restore a full matrix: + RealScalar diag_norm = m_mat.diagonal().norm(); + RealScalar lower_norm = m_mat.template triangularView().norm(); + RealScalar upper_norm = m_mat.template triangularView().norm(); + if(lower_norm>diag_norm && upper_norm==diag_norm) + { + // only the lower part is stored + MatrixType tmp(m_mat); + m_mat = tmp.template selfadjointView(); + } + else if(upper_norm>diag_norm && lower_norm==diag_norm) + { + // only the upper part is stored + MatrixType tmp(m_mat); + m_mat = tmp.template selfadjointView(); + } + } + return m_mat; + } + + /** Return the right hand side corresponding to the current matrix. + * If the rhs file is not provided, a random rhs is generated + */ + inline VectorType& rhs() + { + // Get the right hand side + if (m_hasRhs) return m_rhs; + + std::string rhs_file; + rhs_file = m_folder + "/" + m_matname + "_b.mtx"; // The pattern is matname_b.mtx + m_hasRhs = Fileexists(rhs_file); + if (m_hasRhs) + { + m_rhs.resize(m_mat.cols()); + m_hasRhs = loadMarketVector(m_rhs, rhs_file); + } + if (!m_hasRhs) + { + // Generate a random right hand side + if (!m_matIsLoaded) this->matrix(); + m_refX.resize(m_mat.cols()); + m_refX.setRandom(); + m_rhs = m_mat * m_refX; + m_hasrefX = true; + m_hasRhs = true; + } + return m_rhs; + } + + /** Return a reference solution + * If it is not provided and if the right hand side is not available + * then refX is randomly generated such that A*refX = b + * where A and b are the matrix and the rhs. + * Note that when a rhs is provided, refX is not available + */ + inline VectorType& refX() + { + // Check if a reference solution is provided + if (m_hasrefX) return m_refX; + + std::string lhs_file; + lhs_file = m_folder + "/" + m_matname + "_x.mtx"; + m_hasrefX = Fileexists(lhs_file); + if (m_hasrefX) + { + m_refX.resize(m_mat.cols()); + m_hasrefX = loadMarketVector(m_refX, lhs_file); + } + else + m_refX.resize(0); + return m_refX; + } + + inline std::string& matname() { return m_matname; } + + inline int sym() { return m_sym; } + + bool hasRhs() {return m_hasRhs; } + bool hasrefX() {return m_hasrefX; } + bool isFolderValid() { return bool(m_folder_id); } + + protected: + + inline bool Fileexists(std::string file) + { + std::ifstream file_id(file.c_str()); + if (!file_id.good() ) + { + return false; + } + else + { + file_id.close(); + return true; + } + } + + void Getnextvalidmatrix( ) + { + m_isvalid = false; + // Here, we return with the next valid matrix in the folder + while ( (m_curs_id = readdir(m_folder_id)) != NULL) { + m_isvalid = false; + std::string curfile; + curfile = m_folder + "/" + m_curs_id->d_name; + // Discard if it is a folder + if (m_curs_id->d_type == DT_DIR) continue; //FIXME This may not be available on non BSD systems +// struct stat st_buf; +// stat (curfile.c_str(), &st_buf); +// if (S_ISDIR(st_buf.st_mode)) continue; + + // Determine from the header if it is a matrix or a right hand side + bool isvector,iscomplex=false; + if(!getMarketHeader(curfile,m_sym,iscomplex,isvector)) continue; + if(isvector) continue; + if (!iscomplex) + { + if(internal::is_same >::value || internal::is_same >::value) + continue; + } + if (iscomplex) + { + if(internal::is_same::value || internal::is_same::value) + continue; + } + + + // Get the matrix name + std::string filename = m_curs_id->d_name; + m_matname = filename.substr(0, filename.length()-4); + + // Find if the matrix is SPD + size_t found = m_matname.find("SPD"); + if( (found!=std::string::npos) && (m_sym != NonSymmetric) ) + m_sym = SPD; + + m_isvalid = true; + break; + } + } + int m_sym; // Symmetry of the matrix + MatrixType m_mat; // Current matrix + VectorType m_rhs; // Current vector + VectorType m_refX; // The reference solution, if exists + std::string m_matname; // Matrix Name + bool m_isvalid; + bool m_matIsLoaded; // Determine if the matrix has already been loaded from the file + bool m_hasRhs; // The right hand side exists + bool m_hasrefX; // A reference solution is provided + std::string m_folder; + DIR * m_folder_id; + struct dirent *m_curs_id; + +}; + +} // end namespace Eigen + +#endif diff --git a/external/unsupported/Eigen/src/SparseExtra/RandomSetter.h b/external/unsupported/Eigen/src/SparseExtra/RandomSetter.h new file mode 100644 index 0000000..985702b --- /dev/null +++ b/external/unsupported/Eigen/src/SparseExtra/RandomSetter.h @@ -0,0 +1,349 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_RANDOMSETTER_H +#define EIGEN_RANDOMSETTER_H + +#if defined(EIGEN_GOOGLEHASH_SUPPORT) +// Ensure the ::google namespace exists, required for checking existence of +// ::google::dense_hash_map and ::google::sparse_hash_map. +namespace google {} +#endif + +namespace Eigen { + +/** Represents a std::map + * + * \see RandomSetter + */ +template struct StdMapTraits +{ + typedef int KeyType; + typedef std::map Type; + enum { + IsSorted = 1 + }; + + static void setInvalidKey(Type&, const KeyType&) {} +}; + +#ifdef EIGEN_UNORDERED_MAP_SUPPORT +/** Represents a std::unordered_map + * + * To use it you need to both define EIGEN_UNORDERED_MAP_SUPPORT and include the unordered_map header file + * yourself making sure that unordered_map is defined in the std namespace. + * + * For instance, with current version of gcc you can either enable C++0x standard (-std=c++0x) or do: + * \code + * #include + * #define EIGEN_UNORDERED_MAP_SUPPORT + * namespace std { + * using std::tr1::unordered_map; + * } + * \endcode + * + * \see RandomSetter + */ +template struct StdUnorderedMapTraits +{ + typedef int KeyType; + typedef std::unordered_map Type; + enum { + IsSorted = 0 + }; + + static void setInvalidKey(Type&, const KeyType&) {} +}; +#endif // EIGEN_UNORDERED_MAP_SUPPORT + +#if defined(EIGEN_GOOGLEHASH_SUPPORT) + +namespace google { + +// Namespace work-around, since sometimes dense_hash_map and sparse_hash_map +// are in the global namespace, and other times they are under ::google. +using namespace ::google; + +template +struct DenseHashMap { + typedef dense_hash_map type; +}; + +template +struct SparseHashMap { + typedef sparse_hash_map type; +}; + +} // namespace google + +/** Represents a google::dense_hash_map + * + * \see RandomSetter + */ +template struct GoogleDenseHashMapTraits +{ + typedef int KeyType; + typedef typename google::DenseHashMap::type Type; + enum { + IsSorted = 0 + }; + + static void setInvalidKey(Type& map, const KeyType& k) + { map.set_empty_key(k); } +}; + +/** Represents a google::sparse_hash_map + * + * \see RandomSetter + */ +template struct GoogleSparseHashMapTraits +{ + typedef int KeyType; + typedef typename google::SparseHashMap::type Type; + enum { + IsSorted = 0 + }; + + static void setInvalidKey(Type&, const KeyType&) {} +}; +#endif + +/** \class RandomSetter + * + * \brief The RandomSetter is a wrapper object allowing to set/update a sparse matrix with random access + * + * \tparam SparseMatrixType the type of the sparse matrix we are updating + * \tparam MapTraits a traits class representing the map implementation used for the temporary sparse storage. + * Its default value depends on the system. + * \tparam OuterPacketBits defines the number of rows (or columns) manage by a single map object + * as a power of two exponent. + * + * This class temporarily represents a sparse matrix object using a generic map implementation allowing for + * efficient random access. The conversion from the compressed representation to a hash_map object is performed + * in the RandomSetter constructor, while the sparse matrix is updated back at destruction time. This strategy + * suggest the use of nested blocks as in this example: + * + * \code + * SparseMatrix m(rows,cols); + * { + * RandomSetter > w(m); + * // don't use m but w instead with read/write random access to the coefficients: + * for(;;) + * w(rand(),rand()) = rand; + * } + * // when w is deleted, the data are copied back to m + * // and m is ready to use. + * \endcode + * + * Since hash_map objects are not fully sorted, representing a full matrix as a single hash_map would + * involve a big and costly sort to update the compressed matrix back. To overcome this issue, a RandomSetter + * use multiple hash_map, each representing 2^OuterPacketBits columns or rows according to the storage order. + * To reach optimal performance, this value should be adjusted according to the average number of nonzeros + * per rows/columns. + * + * The possible values for the template parameter MapTraits are: + * - \b StdMapTraits: corresponds to std::map. (does not perform very well) + * - \b GnuHashMapTraits: corresponds to __gnu_cxx::hash_map (available only with GCC) + * - \b GoogleDenseHashMapTraits: corresponds to google::dense_hash_map (best efficiency, reasonable memory consumption) + * - \b GoogleSparseHashMapTraits: corresponds to google::sparse_hash_map (best memory consumption, relatively good performance) + * + * The default map implementation depends on the availability, and the preferred order is: + * GoogleSparseHashMapTraits, GnuHashMapTraits, and finally StdMapTraits. + * + * For performance and memory consumption reasons it is highly recommended to use one of + * Google's hash_map implementations. To enable the support for them, you must define + * EIGEN_GOOGLEHASH_SUPPORT. This will include both and + * for you. + * + * \see https://github.com/sparsehash/sparsehash + */ +template class MapTraits = +#if defined(EIGEN_GOOGLEHASH_SUPPORT) + GoogleDenseHashMapTraits +#elif defined(_HASH_MAP) + GnuHashMapTraits +#else + StdMapTraits +#endif + ,int OuterPacketBits = 6> +class RandomSetter +{ + typedef typename SparseMatrixType::Scalar Scalar; + typedef typename SparseMatrixType::StorageIndex StorageIndex; + + struct ScalarWrapper + { + ScalarWrapper() : value(0) {} + Scalar value; + }; + typedef typename MapTraits::KeyType KeyType; + typedef typename MapTraits::Type HashMapType; + static const int OuterPacketMask = (1 << OuterPacketBits) - 1; + enum { + SwapStorage = 1 - MapTraits::IsSorted, + TargetRowMajor = (SparseMatrixType::Flags & RowMajorBit) ? 1 : 0, + SetterRowMajor = SwapStorage ? 1-TargetRowMajor : TargetRowMajor + }; + + public: + + /** Constructs a random setter object from the sparse matrix \a target + * + * Note that the initial value of \a target are imported. If you want to re-set + * a sparse matrix from scratch, then you must set it to zero first using the + * setZero() function. + */ + inline RandomSetter(SparseMatrixType& target) + : mp_target(&target) + { + const Index outerSize = SwapStorage ? target.innerSize() : target.outerSize(); + const Index innerSize = SwapStorage ? target.outerSize() : target.innerSize(); + m_outerPackets = outerSize >> OuterPacketBits; + if (outerSize&OuterPacketMask) + m_outerPackets += 1; + m_hashmaps = new HashMapType[m_outerPackets]; + // compute number of bits needed to store inner indices + Index aux = innerSize - 1; + m_keyBitsOffset = 0; + while (aux) + { + ++m_keyBitsOffset; + aux = aux >> 1; + } + KeyType ik = (1<<(OuterPacketBits+m_keyBitsOffset)); + for (Index k=0; k::setInvalidKey(m_hashmaps[k],ik); + + // insert current coeffs + for (Index j=0; jouterSize(); ++j) + for (typename SparseMatrixType::InnerIterator it(*mp_target,j); it; ++it) + (*this)(TargetRowMajor?j:it.index(), TargetRowMajor?it.index():j) = it.value(); + } + + /** Destructor updating back the sparse matrix target */ + ~RandomSetter() + { + KeyType keyBitsMask = (1<setZero(); + mp_target->makeCompressed(); + mp_target->reserve(nonZeros()); + Index prevOuter = -1; + for (Index k=0; kfirst >> m_keyBitsOffset) + outerOffset; + const Index inner = it->first & keyBitsMask; + if (prevOuter!=outer) + { + for (Index j=prevOuter+1;j<=outer;++j) + mp_target->startVec(j); + prevOuter = outer; + } + mp_target->insertBackByOuterInner(outer, inner) = it->second.value; + } + } + mp_target->finalize(); + } + else + { + VectorXi positions(mp_target->outerSize()); + positions.setZero(); + // pass 1 + for (Index k=0; kfirst & keyBitsMask; + ++positions[outer]; + } + } + // prefix sum + StorageIndex count = 0; + for (Index j=0; jouterSize(); ++j) + { + StorageIndex tmp = positions[j]; + mp_target->outerIndexPtr()[j] = count; + positions[j] = count; + count += tmp; + } + mp_target->makeCompressed(); + mp_target->outerIndexPtr()[mp_target->outerSize()] = count; + mp_target->resizeNonZeros(count); + // pass 2 + for (Index k=0; kfirst >> m_keyBitsOffset) + outerOffset; + const Index outer = it->first & keyBitsMask; + // sorted insertion + // Note that we have to deal with at most 2^OuterPacketBits unsorted coefficients, + // moreover those 2^OuterPacketBits coeffs are likely to be sparse, an so only a + // small fraction of them have to be sorted, whence the following simple procedure: + Index posStart = mp_target->outerIndexPtr()[outer]; + Index i = (positions[outer]++) - 1; + while ( (i >= posStart) && (mp_target->innerIndexPtr()[i] > inner) ) + { + mp_target->valuePtr()[i+1] = mp_target->valuePtr()[i]; + mp_target->innerIndexPtr()[i+1] = mp_target->innerIndexPtr()[i]; + --i; + } + mp_target->innerIndexPtr()[i+1] = internal::convert_index(inner); + mp_target->valuePtr()[i+1] = it->second.value; + } + } + } + delete[] m_hashmaps; + } + + /** \returns a reference to the coefficient at given coordinates \a row, \a col */ + Scalar& operator() (Index row, Index col) + { + const Index outer = SetterRowMajor ? row : col; + const Index inner = SetterRowMajor ? col : row; + const Index outerMajor = outer >> OuterPacketBits; // index of the packet/map + const Index outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet + const KeyType key = internal::convert_index((outerMinor<(m_hashmaps[k].size()); + return nz; + } + + + protected: + + HashMapType* m_hashmaps; + SparseMatrixType* mp_target; + Index m_outerPackets; + unsigned char m_keyBitsOffset; +}; + +} // end namespace Eigen + +#endif // EIGEN_RANDOMSETTER_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsArrayAPI.h b/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsArrayAPI.h new file mode 100644 index 0000000..41d2bf6 --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsArrayAPI.h @@ -0,0 +1,286 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_BESSELFUNCTIONS_ARRAYAPI_H +#define EIGEN_BESSELFUNCTIONS_ARRAYAPI_H + +namespace Eigen { + +/** \returns an expression of the coefficient-wise i0(\a x) to the given + * arrays. + * + * It returns the modified Bessel function of the first kind of order zero. + * + * \param x is the argument + * + * \note This function supports only float and double scalar types. To support + * other scalar types, the user has to provide implementations of i0(T) for + * any scalar type T to be supported. + * + * \sa ArrayBase::bessel_i0() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_i0_op, const Derived> +bessel_i0(const Eigen::ArrayBase& x) { + return Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_i0_op, + const Derived>(x.derived()); +} + +/** \returns an expression of the coefficient-wise i0e(\a x) to the given + * arrays. + * + * It returns the exponentially scaled modified Bessel + * function of the first kind of order zero. + * + * \param x is the argument + * + * \note This function supports only float and double scalar types. To support + * other scalar types, the user has to provide implementations of i0e(T) for + * any scalar type T to be supported. + * + * \sa ArrayBase::bessel_i0e() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_i0e_op, const Derived> +bessel_i0e(const Eigen::ArrayBase& x) { + return Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_i0e_op, + const Derived>(x.derived()); +} + +/** \returns an expression of the coefficient-wise i1(\a x) to the given + * arrays. + * + * It returns the modified Bessel function of the first kind of order one. + * + * \param x is the argument + * + * \note This function supports only float and double scalar types. To support + * other scalar types, the user has to provide implementations of i1(T) for + * any scalar type T to be supported. + * + * \sa ArrayBase::bessel_i1() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_i1_op, const Derived> +bessel_i1(const Eigen::ArrayBase& x) { + return Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_i1_op, + const Derived>(x.derived()); +} + +/** \returns an expression of the coefficient-wise i1e(\a x) to the given + * arrays. + * + * It returns the exponentially scaled modified Bessel + * function of the first kind of order one. + * + * \param x is the argument + * + * \note This function supports only float and double scalar types. To support + * other scalar types, the user has to provide implementations of i1e(T) for + * any scalar type T to be supported. + * + * \sa ArrayBase::bessel_i1e() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_i1e_op, const Derived> +bessel_i1e(const Eigen::ArrayBase& x) { + return Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_i1e_op, + const Derived>(x.derived()); +} + +/** \returns an expression of the coefficient-wise k0(\a x) to the given + * arrays. + * + * It returns the modified Bessel function of the second kind of order zero. + * + * \param x is the argument + * + * \note This function supports only float and double scalar types. To support + * other scalar types, the user has to provide implementations of k0(T) for + * any scalar type T to be supported. + * + * \sa ArrayBase::bessel_k0() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_k0_op, const Derived> +bessel_k0(const Eigen::ArrayBase& x) { + return Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_k0_op, + const Derived>(x.derived()); +} + +/** \returns an expression of the coefficient-wise k0e(\a x) to the given + * arrays. + * + * It returns the exponentially scaled modified Bessel + * function of the second kind of order zero. + * + * \param x is the argument + * + * \note This function supports only float and double scalar types. To support + * other scalar types, the user has to provide implementations of k0e(T) for + * any scalar type T to be supported. + * + * \sa ArrayBase::bessel_k0e() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_k0e_op, const Derived> +bessel_k0e(const Eigen::ArrayBase& x) { + return Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_k0e_op, + const Derived>(x.derived()); +} + +/** \returns an expression of the coefficient-wise k1(\a x) to the given + * arrays. + * + * It returns the modified Bessel function of the second kind of order one. + * + * \param x is the argument + * + * \note This function supports only float and double scalar types. To support + * other scalar types, the user has to provide implementations of k1(T) for + * any scalar type T to be supported. + * + * \sa ArrayBase::bessel_k1() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_k1_op, const Derived> +bessel_k1(const Eigen::ArrayBase& x) { + return Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_k1_op, + const Derived>(x.derived()); +} + +/** \returns an expression of the coefficient-wise k1e(\a x) to the given + * arrays. + * + * It returns the exponentially scaled modified Bessel + * function of the second kind of order one. + * + * \param x is the argument + * + * \note This function supports only float and double scalar types. To support + * other scalar types, the user has to provide implementations of k1e(T) for + * any scalar type T to be supported. + * + * \sa ArrayBase::bessel_k1e() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_k1e_op, const Derived> +bessel_k1e(const Eigen::ArrayBase& x) { + return Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_k1e_op, + const Derived>(x.derived()); +} + +/** \returns an expression of the coefficient-wise j0(\a x) to the given + * arrays. + * + * It returns the Bessel function of the first kind of order zero. + * + * \param x is the argument + * + * \note This function supports only float and double scalar types. To support + * other scalar types, the user has to provide implementations of j0(T) for + * any scalar type T to be supported. + * + * \sa ArrayBase::bessel_j0() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_j0_op, const Derived> +bessel_j0(const Eigen::ArrayBase& x) { + return Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_j0_op, + const Derived>(x.derived()); +} + +/** \returns an expression of the coefficient-wise y0(\a x) to the given + * arrays. + * + * It returns the Bessel function of the second kind of order zero. + * + * \param x is the argument + * + * \note This function supports only float and double scalar types. To support + * other scalar types, the user has to provide implementations of y0(T) for + * any scalar type T to be supported. + * + * \sa ArrayBase::bessel_y0() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_y0_op, const Derived> +bessel_y0(const Eigen::ArrayBase& x) { + return Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_y0_op, + const Derived>(x.derived()); +} + +/** \returns an expression of the coefficient-wise j1(\a x) to the given + * arrays. + * + * It returns the modified Bessel function of the first kind of order one. + * + * \param x is the argument + * + * \note This function supports only float and double scalar types. To support + * other scalar types, the user has to provide implementations of j1(T) for + * any scalar type T to be supported. + * + * \sa ArrayBase::bessel_j1() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_j1_op, const Derived> +bessel_j1(const Eigen::ArrayBase& x) { + return Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_j1_op, + const Derived>(x.derived()); +} + +/** \returns an expression of the coefficient-wise y1(\a x) to the given + * arrays. + * + * It returns the Bessel function of the second kind of order one. + * + * \param x is the argument + * + * \note This function supports only float and double scalar types. To support + * other scalar types, the user has to provide implementations of y1(T) for + * any scalar type T to be supported. + * + * \sa ArrayBase::bessel_y1() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_y1_op, const Derived> +bessel_y1(const Eigen::ArrayBase& x) { + return Eigen::CwiseUnaryOp< + Eigen::internal::scalar_bessel_y1_op, + const Derived>(x.derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_BESSELFUNCTIONS_ARRAYAPI_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsBFloat16.h b/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsBFloat16.h new file mode 100644 index 0000000..6049cc2 --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsBFloat16.h @@ -0,0 +1,68 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BESSELFUNCTIONS_BFLOAT16_H +#define EIGEN_BESSELFUNCTIONS_BFLOAT16_H + +namespace Eigen { +namespace numext { + +#if EIGEN_HAS_C99_MATH +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bessel_i0(const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::bessel_i0(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bessel_i0e(const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::bessel_i0e(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bessel_i1(const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::bessel_i1(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bessel_i1e(const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::bessel_i1e(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bessel_j0(const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::bessel_j0(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bessel_j1(const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::bessel_j1(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bessel_y0(const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::bessel_y0(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bessel_y1(const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::bessel_y1(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bessel_k0(const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::bessel_k0(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bessel_k0e(const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::bessel_k0e(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bessel_k1(const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::bessel_k1(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 bessel_k1e(const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::bessel_k1e(static_cast(x))); +} +#endif + +} // end namespace numext +} // end namespace Eigen + +#endif // EIGEN_BESSELFUNCTIONS_BFLOAT16_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsFunctors.h b/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsFunctors.h new file mode 100644 index 0000000..8606a9f --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsFunctors.h @@ -0,0 +1,357 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Eugene Brevdo +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BESSELFUNCTIONS_FUNCTORS_H +#define EIGEN_BESSELFUNCTIONS_FUNCTORS_H + +namespace Eigen { + +namespace internal { + +/** \internal + * \brief Template functor to compute the modified Bessel function of the first + * kind of order zero. + * \sa class CwiseUnaryOp, Cwise::bessel_i0() + */ +template +struct scalar_bessel_i0_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_i0_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const { + using numext::bessel_i0; + return bessel_i0(x); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { + return internal::pbessel_i0(x); + } +}; +template +struct functor_traits > { + enum { + // On average, a Chebyshev polynomial of order N=20 is computed. + // The cost is N multiplications and 2N additions. We also add + // the cost of an additional exp over i0e. + Cost = 28 * NumTraits::MulCost + 48 * NumTraits::AddCost, + PacketAccess = packet_traits::HasBessel + }; +}; + +/** \internal + * \brief Template functor to compute the exponentially scaled modified Bessel + * function of the first kind of order zero + * \sa class CwiseUnaryOp, Cwise::bessel_i0e() + */ +template +struct scalar_bessel_i0e_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_i0e_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const { + using numext::bessel_i0e; + return bessel_i0e(x); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { + return internal::pbessel_i0e(x); + } +}; +template +struct functor_traits > { + enum { + // On average, a Chebyshev polynomial of order N=20 is computed. + // The cost is N multiplications and 2N additions. + Cost = 20 * NumTraits::MulCost + 40 * NumTraits::AddCost, + PacketAccess = packet_traits::HasBessel + }; +}; + +/** \internal + * \brief Template functor to compute the modified Bessel function of the first + * kind of order one + * \sa class CwiseUnaryOp, Cwise::bessel_i1() + */ +template +struct scalar_bessel_i1_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_i1_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const { + using numext::bessel_i1; + return bessel_i1(x); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { + return internal::pbessel_i1(x); + } +}; +template +struct functor_traits > { + enum { + // On average, a Chebyshev polynomial of order N=20 is computed. + // The cost is N multiplications and 2N additions. We also add + // the cost of an additional exp over i1e. + Cost = 28 * NumTraits::MulCost + 48 * NumTraits::AddCost, + PacketAccess = packet_traits::HasBessel + }; +}; + +/** \internal + * \brief Template functor to compute the exponentially scaled modified Bessel + * function of the first kind of order zero + * \sa class CwiseUnaryOp, Cwise::bessel_i1e() + */ +template +struct scalar_bessel_i1e_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_i1e_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const { + using numext::bessel_i1e; + return bessel_i1e(x); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { + return internal::pbessel_i1e(x); + } +}; +template +struct functor_traits > { + enum { + // On average, a Chebyshev polynomial of order N=20 is computed. + // The cost is N multiplications and 2N additions. + Cost = 20 * NumTraits::MulCost + 40 * NumTraits::AddCost, + PacketAccess = packet_traits::HasBessel + }; +}; + +/** \internal + * \brief Template functor to compute the Bessel function of the second kind of + * order zero + * \sa class CwiseUnaryOp, Cwise::bessel_j0() + */ +template +struct scalar_bessel_j0_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_j0_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const { + using numext::bessel_j0; + return bessel_j0(x); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { + return internal::pbessel_j0(x); + } +}; +template +struct functor_traits > { + enum { + // 6 polynomial of order ~N=8 is computed. + // The cost is N multiplications and N additions each, along with a + // sine, cosine and rsqrt cost. + Cost = 63 * NumTraits::MulCost + 48 * NumTraits::AddCost, + PacketAccess = packet_traits::HasBessel + }; +}; + +/** \internal + * \brief Template functor to compute the Bessel function of the second kind of + * order zero + * \sa class CwiseUnaryOp, Cwise::bessel_y0() + */ +template +struct scalar_bessel_y0_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_y0_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const { + using numext::bessel_y0; + return bessel_y0(x); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { + return internal::pbessel_y0(x); + } +}; +template +struct functor_traits > { + enum { + // 6 polynomial of order ~N=8 is computed. + // The cost is N multiplications and N additions each, along with a + // sine, cosine, rsqrt and j0 cost. + Cost = 126 * NumTraits::MulCost + 96 * NumTraits::AddCost, + PacketAccess = packet_traits::HasBessel + }; +}; + +/** \internal + * \brief Template functor to compute the Bessel function of the first kind of + * order one + * \sa class CwiseUnaryOp, Cwise::bessel_j1() + */ +template +struct scalar_bessel_j1_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_j1_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const { + using numext::bessel_j1; + return bessel_j1(x); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { + return internal::pbessel_j1(x); + } +}; +template +struct functor_traits > { + enum { + // 6 polynomial of order ~N=8 is computed. + // The cost is N multiplications and N additions each, along with a + // sine, cosine and rsqrt cost. + Cost = 63 * NumTraits::MulCost + 48 * NumTraits::AddCost, + PacketAccess = packet_traits::HasBessel + }; +}; + +/** \internal + * \brief Template functor to compute the Bessel function of the second kind of + * order one + * \sa class CwiseUnaryOp, Cwise::bessel_j1e() + */ +template +struct scalar_bessel_y1_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_y1_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const { + using numext::bessel_y1; + return bessel_y1(x); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { + return internal::pbessel_y1(x); + } +}; +template +struct functor_traits > { + enum { + // 6 polynomial of order ~N=8 is computed. + // The cost is N multiplications and N additions each, along with a + // sine, cosine, rsqrt and j1 cost. + Cost = 126 * NumTraits::MulCost + 96 * NumTraits::AddCost, + PacketAccess = packet_traits::HasBessel + }; +}; + +/** \internal + * \brief Template functor to compute the modified Bessel function of the second + * kind of order zero + * \sa class CwiseUnaryOp, Cwise::bessel_k0() + */ +template +struct scalar_bessel_k0_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_k0_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const { + using numext::bessel_k0; + return bessel_k0(x); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { + return internal::pbessel_k0(x); + } +}; +template +struct functor_traits > { + enum { + // On average, a Chebyshev polynomial of order N=10 is computed. + // The cost is N multiplications and 2N additions. In addition we compute + // i0, a log, exp and prsqrt and sin and cos. + Cost = 68 * NumTraits::MulCost + 88 * NumTraits::AddCost, + PacketAccess = packet_traits::HasBessel + }; +}; + +/** \internal + * \brief Template functor to compute the exponentially scaled modified Bessel + * function of the second kind of order zero + * \sa class CwiseUnaryOp, Cwise::bessel_k0e() + */ +template +struct scalar_bessel_k0e_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_k0e_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const { + using numext::bessel_k0e; + return bessel_k0e(x); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { + return internal::pbessel_k0e(x); + } +}; +template +struct functor_traits > { + enum { + // On average, a Chebyshev polynomial of order N=10 is computed. + // The cost is N multiplications and 2N additions. In addition we compute + // i0, a log, exp and prsqrt and sin and cos. + Cost = 68 * NumTraits::MulCost + 88 * NumTraits::AddCost, + PacketAccess = packet_traits::HasBessel + }; +}; + +/** \internal + * \brief Template functor to compute the modified Bessel function of the + * second kind of order one + * \sa class CwiseUnaryOp, Cwise::bessel_k1() + */ +template +struct scalar_bessel_k1_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_k1_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const { + using numext::bessel_k1; + return bessel_k1(x); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { + return internal::pbessel_k1(x); + } +}; +template +struct functor_traits > { + enum { + // On average, a Chebyshev polynomial of order N=10 is computed. + // The cost is N multiplications and 2N additions. In addition we compute + // i1, a log, exp and prsqrt and sin and cos. + Cost = 68 * NumTraits::MulCost + 88 * NumTraits::AddCost, + PacketAccess = packet_traits::HasBessel + }; +}; + +/** \internal + * \brief Template functor to compute the exponentially scaled modified Bessel + * function of the second kind of order one + * \sa class CwiseUnaryOp, Cwise::bessel_k1e() + */ +template +struct scalar_bessel_k1e_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_bessel_k1e_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& x) const { + using numext::bessel_k1e; + return bessel_k1e(x); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { + return internal::pbessel_k1e(x); + } +}; +template +struct functor_traits > { + enum { + // On average, a Chebyshev polynomial of order N=10 is computed. + // The cost is N multiplications and 2N additions. In addition we compute + // i1, a log, exp and prsqrt and sin and cos. + Cost = 68 * NumTraits::MulCost + 88 * NumTraits::AddCost, + PacketAccess = packet_traits::HasBessel + }; +}; + + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BESSELFUNCTIONS_FUNCTORS_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsHalf.h b/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsHalf.h new file mode 100644 index 0000000..8930d1a --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsHalf.h @@ -0,0 +1,66 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BESSELFUNCTIONS_HALF_H +#define EIGEN_BESSELFUNCTIONS_HALF_H + +namespace Eigen { +namespace numext { + +#if EIGEN_HAS_C99_MATH +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bessel_i0(const Eigen::half& x) { + return Eigen::half(Eigen::numext::bessel_i0(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bessel_i0e(const Eigen::half& x) { + return Eigen::half(Eigen::numext::bessel_i0e(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bessel_i1(const Eigen::half& x) { + return Eigen::half(Eigen::numext::bessel_i1(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bessel_i1e(const Eigen::half& x) { + return Eigen::half(Eigen::numext::bessel_i1e(static_cast(x))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bessel_j0(const Eigen::half& x) { + return Eigen::half(Eigen::numext::bessel_j0(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bessel_j1(const Eigen::half& x) { + return Eigen::half(Eigen::numext::bessel_j1(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bessel_y0(const Eigen::half& x) { + return Eigen::half(Eigen::numext::bessel_y0(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bessel_y1(const Eigen::half& x) { + return Eigen::half(Eigen::numext::bessel_y1(static_cast(x))); +} +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bessel_k0(const Eigen::half& x) { + return Eigen::half(Eigen::numext::bessel_k0(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bessel_k0e(const Eigen::half& x) { + return Eigen::half(Eigen::numext::bessel_k0e(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bessel_k1(const Eigen::half& x) { + return Eigen::half(Eigen::numext::bessel_k1(static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half bessel_k1e(const Eigen::half& x) { + return Eigen::half(Eigen::numext::bessel_k1e(static_cast(x))); +} +#endif + +} // end namespace numext +} // end namespace Eigen + +#endif // EIGEN_BESSELFUNCTIONS_HALF_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsImpl.h b/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsImpl.h new file mode 100644 index 0000000..24812be --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsImpl.h @@ -0,0 +1,1959 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Eugene Brevdo +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BESSEL_FUNCTIONS_H +#define EIGEN_BESSEL_FUNCTIONS_H + +namespace Eigen { +namespace internal { + +// Parts of this code are based on the Cephes Math Library. +// +// Cephes Math Library Release 2.8: June, 2000 +// Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier +// +// Permission has been kindly provided by the original author +// to incorporate the Cephes software into the Eigen codebase: +// +// From: Stephen Moshier +// To: Eugene Brevdo +// Subject: Re: Permission to wrap several cephes functions in Eigen +// +// Hello Eugene, +// +// Thank you for writing. +// +// If your licensing is similar to BSD, the formal way that has been +// handled is simply to add a statement to the effect that you are incorporating +// the Cephes software by permission of the author. +// +// Good luck with your project, +// Steve + + +/**************************************************************************** + * Implementation of Bessel function, based on Cephes * + ****************************************************************************/ + +template +struct bessel_i0e_retval { + typedef Scalar type; +}; + +template ::type> +struct generic_i0e { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T&) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return ScalarType(0); + } +}; + +template +struct generic_i0e { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* i0ef.c + * + * Modified Bessel function of order zero, + * exponentially scaled + * + * + * + * SYNOPSIS: + * + * float x, y, i0ef(); + * + * y = i0ef( x ); + * + * + * + * DESCRIPTION: + * + * Returns exponentially scaled modified Bessel function + * of order zero of the argument. + * + * The function is defined as i0e(x) = exp(-|x|) j0( ix ). + * + * + * + * ACCURACY: + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0,30 100000 3.7e-7 7.0e-8 + * See i0f(). + * + */ + + const float A[] = {-1.30002500998624804212E-8f, 6.04699502254191894932E-8f, + -2.67079385394061173391E-7f, 1.11738753912010371815E-6f, + -4.41673835845875056359E-6f, 1.64484480707288970893E-5f, + -5.75419501008210370398E-5f, 1.88502885095841655729E-4f, + -5.76375574538582365885E-4f, 1.63947561694133579842E-3f, + -4.32430999505057594430E-3f, 1.05464603945949983183E-2f, + -2.37374148058994688156E-2f, 4.93052842396707084878E-2f, + -9.49010970480476444210E-2f, 1.71620901522208775349E-1f, + -3.04682672343198398683E-1f, 6.76795274409476084995E-1f}; + + const float B[] = {3.39623202570838634515E-9f, 2.26666899049817806459E-8f, + 2.04891858946906374183E-7f, 2.89137052083475648297E-6f, + 6.88975834691682398426E-5f, 3.36911647825569408990E-3f, + 8.04490411014108831608E-1f}; + T y = pabs(x); + T y_le_eight = internal::pchebevl::run( + pmadd(pset1(0.5f), y, pset1(-2.0f)), A); + T y_gt_eight = pmul( + internal::pchebevl::run( + psub(pdiv(pset1(32.0f), y), pset1(2.0f)), B), + prsqrt(y)); + // TODO: Perhaps instead check whether all packet elements are in + // [-8, 8] and evaluate a branch based off of that. It's possible + // in practice most elements are in this region. + return pselect(pcmp_le(y, pset1(8.0f)), y_le_eight, y_gt_eight); + } +}; + +template +struct generic_i0e { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* i0e.c + * + * Modified Bessel function of order zero, + * exponentially scaled + * + * + * + * SYNOPSIS: + * + * double x, y, i0e(); + * + * y = i0e( x ); + * + * + * + * DESCRIPTION: + * + * Returns exponentially scaled modified Bessel function + * of order zero of the argument. + * + * The function is defined as i0e(x) = exp(-|x|) j0( ix ). + * + * + * + * ACCURACY: + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0,30 30000 5.4e-16 1.2e-16 + * See i0(). + * + */ + + const double A[] = {-4.41534164647933937950E-18, 3.33079451882223809783E-17, + -2.43127984654795469359E-16, 1.71539128555513303061E-15, + -1.16853328779934516808E-14, 7.67618549860493561688E-14, + -4.85644678311192946090E-13, 2.95505266312963983461E-12, + -1.72682629144155570723E-11, 9.67580903537323691224E-11, + -5.18979560163526290666E-10, 2.65982372468238665035E-9, + -1.30002500998624804212E-8, 6.04699502254191894932E-8, + -2.67079385394061173391E-7, 1.11738753912010371815E-6, + -4.41673835845875056359E-6, 1.64484480707288970893E-5, + -5.75419501008210370398E-5, 1.88502885095841655729E-4, + -5.76375574538582365885E-4, 1.63947561694133579842E-3, + -4.32430999505057594430E-3, 1.05464603945949983183E-2, + -2.37374148058994688156E-2, 4.93052842396707084878E-2, + -9.49010970480476444210E-2, 1.71620901522208775349E-1, + -3.04682672343198398683E-1, 6.76795274409476084995E-1}; + const double B[] = { + -7.23318048787475395456E-18, -4.83050448594418207126E-18, + 4.46562142029675999901E-17, 3.46122286769746109310E-17, + -2.82762398051658348494E-16, -3.42548561967721913462E-16, + 1.77256013305652638360E-15, 3.81168066935262242075E-15, + -9.55484669882830764870E-15, -4.15056934728722208663E-14, + 1.54008621752140982691E-14, 3.85277838274214270114E-13, + 7.18012445138366623367E-13, -1.79417853150680611778E-12, + -1.32158118404477131188E-11, -3.14991652796324136454E-11, + 1.18891471078464383424E-11, 4.94060238822496958910E-10, + 3.39623202570838634515E-9, 2.26666899049817806459E-8, + 2.04891858946906374183E-7, 2.89137052083475648297E-6, + 6.88975834691682398426E-5, 3.36911647825569408990E-3, + 8.04490411014108831608E-1}; + T y = pabs(x); + T y_le_eight = internal::pchebevl::run( + pmadd(pset1(0.5), y, pset1(-2.0)), A); + T y_gt_eight = pmul( + internal::pchebevl::run( + psub(pdiv(pset1(32.0), y), pset1(2.0)), B), + prsqrt(y)); + // TODO: Perhaps instead check whether all packet elements are in + // [-8, 8] and evaluate a branch based off of that. It's possible + // in practice most elements are in this region. + return pselect(pcmp_le(y, pset1(8.0)), y_le_eight, y_gt_eight); + } +}; + +template +struct bessel_i0e_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T x) { + return generic_i0e::run(x); + } +}; + +template +struct bessel_i0_retval { + typedef Scalar type; +}; + +template ::type> +struct generic_i0 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + return pmul( + pexp(pabs(x)), + generic_i0e::run(x)); + } +}; + +template +struct bessel_i0_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T x) { + return generic_i0::run(x); + } +}; + +template +struct bessel_i1e_retval { + typedef Scalar type; +}; + +template ::type > +struct generic_i1e { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T&) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return ScalarType(0); + } +}; + +template +struct generic_i1e { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* i1ef.c + * + * Modified Bessel function of order one, + * exponentially scaled + * + * + * + * SYNOPSIS: + * + * float x, y, i1ef(); + * + * y = i1ef( x ); + * + * + * + * DESCRIPTION: + * + * Returns exponentially scaled modified Bessel function + * of order one of the argument. + * + * The function is defined as i1(x) = -i exp(-|x|) j1( ix ). + * + * + * + * ACCURACY: + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0, 30 30000 1.5e-6 1.5e-7 + * See i1(). + * + */ + const float A[] = {9.38153738649577178388E-9f, -4.44505912879632808065E-8f, + 2.00329475355213526229E-7f, -8.56872026469545474066E-7f, + 3.47025130813767847674E-6f, -1.32731636560394358279E-5f, + 4.78156510755005422638E-5f, -1.61760815825896745588E-4f, + 5.12285956168575772895E-4f, -1.51357245063125314899E-3f, + 4.15642294431288815669E-3f, -1.05640848946261981558E-2f, + 2.47264490306265168283E-2f, -5.29459812080949914269E-2f, + 1.02643658689847095384E-1f, -1.76416518357834055153E-1f, + 2.52587186443633654823E-1f}; + + const float B[] = {-3.83538038596423702205E-9f, -2.63146884688951950684E-8f, + -2.51223623787020892529E-7f, -3.88256480887769039346E-6f, + -1.10588938762623716291E-4f, -9.76109749136146840777E-3f, + 7.78576235018280120474E-1f}; + + + T y = pabs(x); + T y_le_eight = pmul(y, internal::pchebevl::run( + pmadd(pset1(0.5f), y, pset1(-2.0f)), A)); + T y_gt_eight = pmul( + internal::pchebevl::run( + psub(pdiv(pset1(32.0f), y), + pset1(2.0f)), B), + prsqrt(y)); + // TODO: Perhaps instead check whether all packet elements are in + // [-8, 8] and evaluate a branch based off of that. It's possible + // in practice most elements are in this region. + y = pselect(pcmp_le(y, pset1(8.0f)), y_le_eight, y_gt_eight); + return pselect(pcmp_lt(x, pset1(0.0f)), pnegate(y), y); + } +}; + +template +struct generic_i1e { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* i1e.c + * + * Modified Bessel function of order one, + * exponentially scaled + * + * + * + * SYNOPSIS: + * + * double x, y, i1e(); + * + * y = i1e( x ); + * + * + * + * DESCRIPTION: + * + * Returns exponentially scaled modified Bessel function + * of order one of the argument. + * + * The function is defined as i1(x) = -i exp(-|x|) j1( ix ). + * + * + * + * ACCURACY: + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0, 30 30000 2.0e-15 2.0e-16 + * See i1(). + * + */ + const double A[] = {2.77791411276104639959E-18, -2.11142121435816608115E-17, + 1.55363195773620046921E-16, -1.10559694773538630805E-15, + 7.60068429473540693410E-15, -5.04218550472791168711E-14, + 3.22379336594557470981E-13, -1.98397439776494371520E-12, + 1.17361862988909016308E-11, -6.66348972350202774223E-11, + 3.62559028155211703701E-10, -1.88724975172282928790E-9, + 9.38153738649577178388E-9, -4.44505912879632808065E-8, + 2.00329475355213526229E-7, -8.56872026469545474066E-7, + 3.47025130813767847674E-6, -1.32731636560394358279E-5, + 4.78156510755005422638E-5, -1.61760815825896745588E-4, + 5.12285956168575772895E-4, -1.51357245063125314899E-3, + 4.15642294431288815669E-3, -1.05640848946261981558E-2, + 2.47264490306265168283E-2, -5.29459812080949914269E-2, + 1.02643658689847095384E-1, -1.76416518357834055153E-1, + 2.52587186443633654823E-1}; + const double B[] = { + 7.51729631084210481353E-18, 4.41434832307170791151E-18, + -4.65030536848935832153E-17, -3.20952592199342395980E-17, + 2.96262899764595013876E-16, 3.30820231092092828324E-16, + -1.88035477551078244854E-15, -3.81440307243700780478E-15, + 1.04202769841288027642E-14, 4.27244001671195135429E-14, + -2.10154184277266431302E-14, -4.08355111109219731823E-13, + -7.19855177624590851209E-13, 2.03562854414708950722E-12, + 1.41258074366137813316E-11, 3.25260358301548823856E-11, + -1.89749581235054123450E-11, -5.58974346219658380687E-10, + -3.83538038596423702205E-9, -2.63146884688951950684E-8, + -2.51223623787020892529E-7, -3.88256480887769039346E-6, + -1.10588938762623716291E-4, -9.76109749136146840777E-3, + 7.78576235018280120474E-1}; + T y = pabs(x); + T y_le_eight = pmul(y, internal::pchebevl::run( + pmadd(pset1(0.5), y, pset1(-2.0)), A)); + T y_gt_eight = pmul( + internal::pchebevl::run( + psub(pdiv(pset1(32.0), y), + pset1(2.0)), B), + prsqrt(y)); + // TODO: Perhaps instead check whether all packet elements are in + // [-8, 8] and evaluate a branch based off of that. It's possible + // in practice most elements are in this region. + y = pselect(pcmp_le(y, pset1(8.0)), y_le_eight, y_gt_eight); + return pselect(pcmp_lt(x, pset1(0.0)), pnegate(y), y); + } +}; + +template +struct bessel_i1e_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T x) { + return generic_i1e::run(x); + } +}; + +template +struct bessel_i1_retval { + typedef T type; +}; + +template ::type> +struct generic_i1 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + return pmul( + pexp(pabs(x)), + generic_i1e::run(x)); + } +}; + +template +struct bessel_i1_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T x) { + return generic_i1::run(x); + } +}; + +template +struct bessel_k0e_retval { + typedef T type; +}; + +template ::type> +struct generic_k0e { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T&) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return ScalarType(0); + } +}; + +template +struct generic_k0e { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* k0ef.c + * Modified Bessel function, third kind, order zero, + * exponentially scaled + * + * + * + * SYNOPSIS: + * + * float x, y, k0ef(); + * + * y = k0ef( x ); + * + * + * + * DESCRIPTION: + * + * Returns exponentially scaled modified Bessel function + * of the third kind of order zero of the argument. + * + * + * + * ACCURACY: + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0, 30 30000 8.1e-7 7.8e-8 + * See k0(). + * + */ + + const float A[] = {1.90451637722020886025E-9f, 2.53479107902614945675E-7f, + 2.28621210311945178607E-5f, 1.26461541144692592338E-3f, + 3.59799365153615016266E-2f, 3.44289899924628486886E-1f, + -5.35327393233902768720E-1f}; + + const float B[] = {-1.69753450938905987466E-9f, 8.57403401741422608519E-9f, + -4.66048989768794782956E-8f, 2.76681363944501510342E-7f, + -1.83175552271911948767E-6f, 1.39498137188764993662E-5f, + -1.28495495816278026384E-4f, 1.56988388573005337491E-3f, + -3.14481013119645005427E-2f, 2.44030308206595545468E0f}; + const T MAXNUM = pset1(NumTraits::infinity()); + const T two = pset1(2.0); + T x_le_two = internal::pchebevl::run( + pmadd(x, x, pset1(-2.0)), A); + x_le_two = pmadd( + generic_i0::run(x), pnegate( + plog(pmul(pset1(0.5), x))), x_le_two); + x_le_two = pmul(pexp(x), x_le_two); + T x_gt_two = pmul( + internal::pchebevl::run( + psub(pdiv(pset1(8.0), x), two), B), + prsqrt(x)); + return pselect( + pcmp_le(x, pset1(0.0)), + MAXNUM, + pselect(pcmp_le(x, two), x_le_two, x_gt_two)); + } +}; + +template +struct generic_k0e { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* k0e.c + * Modified Bessel function, third kind, order zero, + * exponentially scaled + * + * + * + * SYNOPSIS: + * + * double x, y, k0e(); + * + * y = k0e( x ); + * + * + * + * DESCRIPTION: + * + * Returns exponentially scaled modified Bessel function + * of the third kind of order zero of the argument. + * + * + * + * ACCURACY: + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0, 30 30000 1.4e-15 1.4e-16 + * See k0(). + * + */ + + const double A[] = { + 1.37446543561352307156E-16, + 4.25981614279661018399E-14, + 1.03496952576338420167E-11, + 1.90451637722020886025E-9, + 2.53479107902614945675E-7, + 2.28621210311945178607E-5, + 1.26461541144692592338E-3, + 3.59799365153615016266E-2, + 3.44289899924628486886E-1, + -5.35327393233902768720E-1}; + const double B[] = { + 5.30043377268626276149E-18, -1.64758043015242134646E-17, + 5.21039150503902756861E-17, -1.67823109680541210385E-16, + 5.51205597852431940784E-16, -1.84859337734377901440E-15, + 6.34007647740507060557E-15, -2.22751332699166985548E-14, + 8.03289077536357521100E-14, -2.98009692317273043925E-13, + 1.14034058820847496303E-12, -4.51459788337394416547E-12, + 1.85594911495471785253E-11, -7.95748924447710747776E-11, + 3.57739728140030116597E-10, -1.69753450938905987466E-9, + 8.57403401741422608519E-9, -4.66048989768794782956E-8, + 2.76681363944501510342E-7, -1.83175552271911948767E-6, + 1.39498137188764993662E-5, -1.28495495816278026384E-4, + 1.56988388573005337491E-3, -3.14481013119645005427E-2, + 2.44030308206595545468E0 + }; + const T MAXNUM = pset1(NumTraits::infinity()); + const T two = pset1(2.0); + T x_le_two = internal::pchebevl::run( + pmadd(x, x, pset1(-2.0)), A); + x_le_two = pmadd( + generic_i0::run(x), pmul( + pset1(-1.0), plog(pmul(pset1(0.5), x))), x_le_two); + x_le_two = pmul(pexp(x), x_le_two); + x_le_two = pselect(pcmp_le(x, pset1(0.0)), MAXNUM, x_le_two); + T x_gt_two = pmul( + internal::pchebevl::run( + psub(pdiv(pset1(8.0), x), two), B), + prsqrt(x)); + return pselect(pcmp_le(x, two), x_le_two, x_gt_two); + } +}; + +template +struct bessel_k0e_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T x) { + return generic_k0e::run(x); + } +}; + +template +struct bessel_k0_retval { + typedef T type; +}; + +template ::type> +struct generic_k0 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T&) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return ScalarType(0); + } +}; + +template +struct generic_k0 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* k0f.c + * Modified Bessel function, third kind, order zero + * + * + * + * SYNOPSIS: + * + * float x, y, k0f(); + * + * y = k0f( x ); + * + * + * + * DESCRIPTION: + * + * Returns modified Bessel function of the third kind + * of order zero of the argument. + * + * The range is partitioned into the two intervals [0,8] and + * (8, infinity). Chebyshev polynomial expansions are employed + * in each interval. + * + * + * + * ACCURACY: + * + * Tested at 2000 random points between 0 and 8. Peak absolute + * error (relative when K0 > 1) was 1.46e-14; rms, 4.26e-15. + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0, 30 30000 7.8e-7 8.5e-8 + * + * ERROR MESSAGES: + * + * message condition value returned + * K0 domain x <= 0 MAXNUM + * + */ + + const float A[] = {1.90451637722020886025E-9f, 2.53479107902614945675E-7f, + 2.28621210311945178607E-5f, 1.26461541144692592338E-3f, + 3.59799365153615016266E-2f, 3.44289899924628486886E-1f, + -5.35327393233902768720E-1f}; + + const float B[] = {-1.69753450938905987466E-9f, 8.57403401741422608519E-9f, + -4.66048989768794782956E-8f, 2.76681363944501510342E-7f, + -1.83175552271911948767E-6f, 1.39498137188764993662E-5f, + -1.28495495816278026384E-4f, 1.56988388573005337491E-3f, + -3.14481013119645005427E-2f, 2.44030308206595545468E0f}; + const T MAXNUM = pset1(NumTraits::infinity()); + const T two = pset1(2.0); + T x_le_two = internal::pchebevl::run( + pmadd(x, x, pset1(-2.0)), A); + x_le_two = pmadd( + generic_i0::run(x), pnegate( + plog(pmul(pset1(0.5), x))), x_le_two); + x_le_two = pselect(pcmp_le(x, pset1(0.0)), MAXNUM, x_le_two); + T x_gt_two = pmul( + pmul( + pexp(pnegate(x)), + internal::pchebevl::run( + psub(pdiv(pset1(8.0), x), two), B)), + prsqrt(x)); + return pselect(pcmp_le(x, two), x_le_two, x_gt_two); + } +}; + +template +struct generic_k0 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* + * + * Modified Bessel function, third kind, order zero, + * exponentially scaled + * + * + * + * SYNOPSIS: + * + * double x, y, k0(); + * + * y = k0( x ); + * + * + * + * DESCRIPTION: + * + * Returns exponentially scaled modified Bessel function + * of the third kind of order zero of the argument. + * + * + * + * ACCURACY: + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0, 30 30000 1.4e-15 1.4e-16 + * See k0(). + * + */ + const double A[] = { + 1.37446543561352307156E-16, + 4.25981614279661018399E-14, + 1.03496952576338420167E-11, + 1.90451637722020886025E-9, + 2.53479107902614945675E-7, + 2.28621210311945178607E-5, + 1.26461541144692592338E-3, + 3.59799365153615016266E-2, + 3.44289899924628486886E-1, + -5.35327393233902768720E-1}; + const double B[] = { + 5.30043377268626276149E-18, -1.64758043015242134646E-17, + 5.21039150503902756861E-17, -1.67823109680541210385E-16, + 5.51205597852431940784E-16, -1.84859337734377901440E-15, + 6.34007647740507060557E-15, -2.22751332699166985548E-14, + 8.03289077536357521100E-14, -2.98009692317273043925E-13, + 1.14034058820847496303E-12, -4.51459788337394416547E-12, + 1.85594911495471785253E-11, -7.95748924447710747776E-11, + 3.57739728140030116597E-10, -1.69753450938905987466E-9, + 8.57403401741422608519E-9, -4.66048989768794782956E-8, + 2.76681363944501510342E-7, -1.83175552271911948767E-6, + 1.39498137188764993662E-5, -1.28495495816278026384E-4, + 1.56988388573005337491E-3, -3.14481013119645005427E-2, + 2.44030308206595545468E0 + }; + const T MAXNUM = pset1(NumTraits::infinity()); + const T two = pset1(2.0); + T x_le_two = internal::pchebevl::run( + pmadd(x, x, pset1(-2.0)), A); + x_le_two = pmadd( + generic_i0::run(x), pnegate( + plog(pmul(pset1(0.5), x))), x_le_two); + x_le_two = pselect(pcmp_le(x, pset1(0.0)), MAXNUM, x_le_two); + T x_gt_two = pmul( + pmul( + pexp(-x), + internal::pchebevl::run( + psub(pdiv(pset1(8.0), x), two), B)), + prsqrt(x)); + return pselect(pcmp_le(x, two), x_le_two, x_gt_two); + } +}; + +template +struct bessel_k0_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T x) { + return generic_k0::run(x); + } +}; + +template +struct bessel_k1e_retval { + typedef T type; +}; + +template ::type> +struct generic_k1e { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T&) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return ScalarType(0); + } +}; + +template +struct generic_k1e { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* k1ef.c + * + * Modified Bessel function, third kind, order one, + * exponentially scaled + * + * + * + * SYNOPSIS: + * + * float x, y, k1ef(); + * + * y = k1ef( x ); + * + * + * + * DESCRIPTION: + * + * Returns exponentially scaled modified Bessel function + * of the third kind of order one of the argument: + * + * k1e(x) = exp(x) * k1(x). + * + * + * + * ACCURACY: + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0, 30 30000 4.9e-7 6.7e-8 + * See k1(). + * + */ + + const float A[] = {-2.21338763073472585583E-8f, -2.43340614156596823496E-6f, + -1.73028895751305206302E-4f, -6.97572385963986435018E-3f, + -1.22611180822657148235E-1f, -3.53155960776544875667E-1f, + 1.52530022733894777053E0f}; + const float B[] = {2.01504975519703286596E-9f, -1.03457624656780970260E-8f, + 5.74108412545004946722E-8f, -3.50196060308781257119E-7f, + 2.40648494783721712015E-6f, -1.93619797416608296024E-5f, + 1.95215518471351631108E-4f, -2.85781685962277938680E-3f, + 1.03923736576817238437E-1f, 2.72062619048444266945E0f}; + const T MAXNUM = pset1(NumTraits::infinity()); + const T two = pset1(2.0); + T x_le_two = pdiv(internal::pchebevl::run( + pmadd(x, x, pset1(-2.0)), A), x); + x_le_two = pmadd( + generic_i1::run(x), plog(pmul(pset1(0.5), x)), x_le_two); + x_le_two = pmul(x_le_two, pexp(x)); + x_le_two = pselect(pcmp_le(x, pset1(0.0)), MAXNUM, x_le_two); + T x_gt_two = pmul( + internal::pchebevl::run( + psub(pdiv(pset1(8.0), x), two), B), + prsqrt(x)); + return pselect(pcmp_le(x, two), x_le_two, x_gt_two); + } +}; + +template +struct generic_k1e { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* k1e.c + * + * Modified Bessel function, third kind, order one, + * exponentially scaled + * + * + * + * SYNOPSIS: + * + * double x, y, k1e(); + * + * y = k1e( x ); + * + * + * + * DESCRIPTION: + * + * Returns exponentially scaled modified Bessel function + * of the third kind of order one of the argument: + * + * k1e(x) = exp(x) * k1(x). + * + * + * + * ACCURACY: + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0, 30 30000 7.8e-16 1.2e-16 + * See k1(). + * + */ + const double A[] = {-7.02386347938628759343E-18, -2.42744985051936593393E-15, + -6.66690169419932900609E-13, -1.41148839263352776110E-10, + -2.21338763073472585583E-8, -2.43340614156596823496E-6, + -1.73028895751305206302E-4, -6.97572385963986435018E-3, + -1.22611180822657148235E-1, -3.53155960776544875667E-1, + 1.52530022733894777053E0}; + const double B[] = {-5.75674448366501715755E-18, 1.79405087314755922667E-17, + -5.68946255844285935196E-17, 1.83809354436663880070E-16, + -6.05704724837331885336E-16, 2.03870316562433424052E-15, + -7.01983709041831346144E-15, 2.47715442448130437068E-14, + -8.97670518232499435011E-14, 3.34841966607842919884E-13, + -1.28917396095102890680E-12, 5.13963967348173025100E-12, + -2.12996783842756842877E-11, 9.21831518760500529508E-11, + -4.19035475934189648750E-10, 2.01504975519703286596E-9, + -1.03457624656780970260E-8, 5.74108412545004946722E-8, + -3.50196060308781257119E-7, 2.40648494783721712015E-6, + -1.93619797416608296024E-5, 1.95215518471351631108E-4, + -2.85781685962277938680E-3, 1.03923736576817238437E-1, + 2.72062619048444266945E0}; + const T MAXNUM = pset1(NumTraits::infinity()); + const T two = pset1(2.0); + T x_le_two = pdiv(internal::pchebevl::run( + pmadd(x, x, pset1(-2.0)), A), x); + x_le_two = pmadd( + generic_i1::run(x), plog(pmul(pset1(0.5), x)), x_le_two); + x_le_two = pmul(x_le_two, pexp(x)); + x_le_two = pselect(pcmp_le(x, pset1(0.0)), MAXNUM, x_le_two); + T x_gt_two = pmul( + internal::pchebevl::run( + psub(pdiv(pset1(8.0), x), two), B), + prsqrt(x)); + return pselect(pcmp_le(x, two), x_le_two, x_gt_two); + } +}; + +template +struct bessel_k1e_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T x) { + return generic_k1e::run(x); + } +}; + +template +struct bessel_k1_retval { + typedef T type; +}; + +template ::type> +struct generic_k1 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T&) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return ScalarType(0); + } +}; + +template +struct generic_k1 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* k1f.c + * Modified Bessel function, third kind, order one + * + * + * + * SYNOPSIS: + * + * float x, y, k1f(); + * + * y = k1f( x ); + * + * + * + * DESCRIPTION: + * + * Computes the modified Bessel function of the third kind + * of order one of the argument. + * + * The range is partitioned into the two intervals [0,2] and + * (2, infinity). Chebyshev polynomial expansions are employed + * in each interval. + * + * + * + * ACCURACY: + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0, 30 30000 4.6e-7 7.6e-8 + * + * ERROR MESSAGES: + * + * message condition value returned + * k1 domain x <= 0 MAXNUM + * + */ + + const float A[] = {-2.21338763073472585583E-8f, -2.43340614156596823496E-6f, + -1.73028895751305206302E-4f, -6.97572385963986435018E-3f, + -1.22611180822657148235E-1f, -3.53155960776544875667E-1f, + 1.52530022733894777053E0f}; + const float B[] = {2.01504975519703286596E-9f, -1.03457624656780970260E-8f, + 5.74108412545004946722E-8f, -3.50196060308781257119E-7f, + 2.40648494783721712015E-6f, -1.93619797416608296024E-5f, + 1.95215518471351631108E-4f, -2.85781685962277938680E-3f, + 1.03923736576817238437E-1f, 2.72062619048444266945E0f}; + const T MAXNUM = pset1(NumTraits::infinity()); + const T two = pset1(2.0); + T x_le_two = pdiv(internal::pchebevl::run( + pmadd(x, x, pset1(-2.0)), A), x); + x_le_two = pmadd( + generic_i1::run(x), plog(pmul(pset1(0.5), x)), x_le_two); + x_le_two = pselect(pcmp_le(x, pset1(0.0)), MAXNUM, x_le_two); + T x_gt_two = pmul( + pexp(pnegate(x)), + pmul( + internal::pchebevl::run( + psub(pdiv(pset1(8.0), x), two), B), + prsqrt(x))); + return pselect(pcmp_le(x, two), x_le_two, x_gt_two); + } +}; + +template +struct generic_k1 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* k1.c + * Modified Bessel function, third kind, order one + * + * + * + * SYNOPSIS: + * + * float x, y, k1f(); + * + * y = k1f( x ); + * + * + * + * DESCRIPTION: + * + * Computes the modified Bessel function of the third kind + * of order one of the argument. + * + * The range is partitioned into the two intervals [0,2] and + * (2, infinity). Chebyshev polynomial expansions are employed + * in each interval. + * + * + * + * ACCURACY: + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0, 30 30000 4.6e-7 7.6e-8 + * + * ERROR MESSAGES: + * + * message condition value returned + * k1 domain x <= 0 MAXNUM + * + */ + const double A[] = {-7.02386347938628759343E-18, -2.42744985051936593393E-15, + -6.66690169419932900609E-13, -1.41148839263352776110E-10, + -2.21338763073472585583E-8, -2.43340614156596823496E-6, + -1.73028895751305206302E-4, -6.97572385963986435018E-3, + -1.22611180822657148235E-1, -3.53155960776544875667E-1, + 1.52530022733894777053E0}; + const double B[] = {-5.75674448366501715755E-18, 1.79405087314755922667E-17, + -5.68946255844285935196E-17, 1.83809354436663880070E-16, + -6.05704724837331885336E-16, 2.03870316562433424052E-15, + -7.01983709041831346144E-15, 2.47715442448130437068E-14, + -8.97670518232499435011E-14, 3.34841966607842919884E-13, + -1.28917396095102890680E-12, 5.13963967348173025100E-12, + -2.12996783842756842877E-11, 9.21831518760500529508E-11, + -4.19035475934189648750E-10, 2.01504975519703286596E-9, + -1.03457624656780970260E-8, 5.74108412545004946722E-8, + -3.50196060308781257119E-7, 2.40648494783721712015E-6, + -1.93619797416608296024E-5, 1.95215518471351631108E-4, + -2.85781685962277938680E-3, 1.03923736576817238437E-1, + 2.72062619048444266945E0}; + const T MAXNUM = pset1(NumTraits::infinity()); + const T two = pset1(2.0); + T x_le_two = pdiv(internal::pchebevl::run( + pmadd(x, x, pset1(-2.0)), A), x); + x_le_two = pmadd( + generic_i1::run(x), plog(pmul(pset1(0.5), x)), x_le_two); + x_le_two = pselect(pcmp_le(x, pset1(0.0)), MAXNUM, x_le_two); + T x_gt_two = pmul( + pexp(-x), + pmul( + internal::pchebevl::run( + psub(pdiv(pset1(8.0), x), two), B), + prsqrt(x))); + return pselect(pcmp_le(x, two), x_le_two, x_gt_two); + } +}; + +template +struct bessel_k1_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T x) { + return generic_k1::run(x); + } +}; + +template +struct bessel_j0_retval { + typedef T type; +}; + +template ::type> +struct generic_j0 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T&) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return ScalarType(0); + } +}; + +template +struct generic_j0 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* j0f.c + * Bessel function of order zero + * + * + * + * SYNOPSIS: + * + * float x, y, j0f(); + * + * y = j0f( x ); + * + * + * + * DESCRIPTION: + * + * Returns Bessel function of order zero of the argument. + * + * The domain is divided into the intervals [0, 2] and + * (2, infinity). In the first interval the following polynomial + * approximation is used: + * + * + * 2 2 2 + * (w - r ) (w - r ) (w - r ) P(w) + * 1 2 3 + * + * 2 + * where w = x and the three r's are zeros of the function. + * + * In the second interval, the modulus and phase are approximated + * by polynomials of the form Modulus(x) = sqrt(1/x) Q(1/x) + * and Phase(x) = x + 1/x R(1/x^2) - pi/4. The function is + * + * j0(x) = Modulus(x) cos( Phase(x) ). + * + * + * + * ACCURACY: + * + * Absolute error: + * arithmetic domain # trials peak rms + * IEEE 0, 2 100000 1.3e-7 3.6e-8 + * IEEE 2, 32 100000 1.9e-7 5.4e-8 + * + */ + + const float JP[] = {-6.068350350393235E-008f, 6.388945720783375E-006f, + -3.969646342510940E-004f, 1.332913422519003E-002f, + -1.729150680240724E-001f}; + const float MO[] = {-6.838999669318810E-002f, 1.864949361379502E-001f, + -2.145007480346739E-001f, 1.197549369473540E-001f, + -3.560281861530129E-003f, -4.969382655296620E-002f, + -3.355424622293709E-006f, 7.978845717621440E-001f}; + const float PH[] = {3.242077816988247E+001f, -3.630592630518434E+001f, + 1.756221482109099E+001f, -4.974978466280903E+000f, + 1.001973420681837E+000f, -1.939906941791308E-001f, + 6.490598792654666E-002f, -1.249992184872738E-001f}; + const T DR1 = pset1(5.78318596294678452118f); + const T NEG_PIO4F = pset1(-0.7853981633974483096f); /* -pi / 4 */ + T y = pabs(x); + T z = pmul(y, y); + T y_le_two = pselect( + pcmp_lt(y, pset1(1.0e-3f)), + pmadd(z, pset1(-0.25f), pset1(1.0f)), + pmul(psub(z, DR1), internal::ppolevl::run(z, JP))); + T q = pdiv(pset1(1.0f), y); + T w = prsqrt(y); + T p = pmul(w, internal::ppolevl::run(q, MO)); + w = pmul(q, q); + T yn = pmadd(q, internal::ppolevl::run(w, PH), NEG_PIO4F); + T y_gt_two = pmul(p, pcos(padd(yn, y))); + return pselect(pcmp_le(y, pset1(2.0)), y_le_two, y_gt_two); + } +}; + +template +struct generic_j0 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* j0.c + * Bessel function of order zero + * + * + * + * SYNOPSIS: + * + * double x, y, j0(); + * + * y = j0( x ); + * + * + * + * DESCRIPTION: + * + * Returns Bessel function of order zero of the argument. + * + * The domain is divided into the intervals [0, 5] and + * (5, infinity). In the first interval the following rational + * approximation is used: + * + * + * 2 2 + * (w - r ) (w - r ) P (w) / Q (w) + * 1 2 3 8 + * + * 2 + * where w = x and the two r's are zeros of the function. + * + * In the second interval, the Hankel asymptotic expansion + * is employed with two rational functions of degree 6/6 + * and 7/7. + * + * + * + * ACCURACY: + * + * Absolute error: + * arithmetic domain # trials peak rms + * DEC 0, 30 10000 4.4e-17 6.3e-18 + * IEEE 0, 30 60000 4.2e-16 1.1e-16 + * + */ + const double PP[] = {7.96936729297347051624E-4, 8.28352392107440799803E-2, + 1.23953371646414299388E0, 5.44725003058768775090E0, + 8.74716500199817011941E0, 5.30324038235394892183E0, + 9.99999999999999997821E-1}; + const double PQ[] = {9.24408810558863637013E-4, 8.56288474354474431428E-2, + 1.25352743901058953537E0, 5.47097740330417105182E0, + 8.76190883237069594232E0, 5.30605288235394617618E0, + 1.00000000000000000218E0}; + const double QP[] = {-1.13663838898469149931E-2, -1.28252718670509318512E0, + -1.95539544257735972385E1, -9.32060152123768231369E1, + -1.77681167980488050595E2, -1.47077505154951170175E2, + -5.14105326766599330220E1, -6.05014350600728481186E0}; + const double QQ[] = {1.00000000000000000000E0, 6.43178256118178023184E1, + 8.56430025976980587198E2, 3.88240183605401609683E3, + 7.24046774195652478189E3, 5.93072701187316984827E3, + 2.06209331660327847417E3, 2.42005740240291393179E2}; + const double RP[] = {-4.79443220978201773821E9, 1.95617491946556577543E12, + -2.49248344360967716204E14, 9.70862251047306323952E15}; + const double RQ[] = {1.00000000000000000000E0, 4.99563147152651017219E2, + 1.73785401676374683123E5, 4.84409658339962045305E7, + 1.11855537045356834862E10, 2.11277520115489217587E12, + 3.10518229857422583814E14, 3.18121955943204943306E16, + 1.71086294081043136091E18}; + const T DR1 = pset1(5.78318596294678452118E0); + const T DR2 = pset1(3.04712623436620863991E1); + const T SQ2OPI = pset1(7.9788456080286535587989E-1); /* sqrt(2 / pi) */ + const T NEG_PIO4 = pset1(-0.7853981633974483096); /* pi / 4 */ + + T y = pabs(x); + T z = pmul(y, y); + T y_le_five = pselect( + pcmp_lt(y, pset1(1.0e-5)), + pmadd(z, pset1(-0.25), pset1(1.0)), + pmul(pmul(psub(z, DR1), psub(z, DR2)), + pdiv(internal::ppolevl::run(z, RP), + internal::ppolevl::run(z, RQ)))); + T s = pdiv(pset1(25.0), z); + T p = pdiv( + internal::ppolevl::run(s, PP), + internal::ppolevl::run(s, PQ)); + T q = pdiv( + internal::ppolevl::run(s, QP), + internal::ppolevl::run(s, QQ)); + T yn = padd(y, NEG_PIO4); + T w = pdiv(pset1(-5.0), y); + p = pmadd(p, pcos(yn), pmul(w, pmul(q, psin(yn)))); + T y_gt_five = pmul(p, pmul(SQ2OPI, prsqrt(y))); + return pselect(pcmp_le(y, pset1(5.0)), y_le_five, y_gt_five); + } +}; + +template +struct bessel_j0_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T x) { + return generic_j0::run(x); + } +}; + +template +struct bessel_y0_retval { + typedef T type; +}; + +template ::type> +struct generic_y0 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T&) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return ScalarType(0); + } +}; + +template +struct generic_y0 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* j0f.c + * Bessel function of the second kind, order zero + * + * + * + * SYNOPSIS: + * + * float x, y, y0f(); + * + * y = y0f( x ); + * + * + * + * DESCRIPTION: + * + * Returns Bessel function of the second kind, of order + * zero, of the argument. + * + * The domain is divided into the intervals [0, 2] and + * (2, infinity). In the first interval a rational approximation + * R(x) is employed to compute + * + * 2 2 2 + * y0(x) = (w - r ) (w - r ) (w - r ) R(x) + 2/pi ln(x) j0(x). + * 1 2 3 + * + * Thus a call to j0() is required. The three zeros are removed + * from R(x) to improve its numerical stability. + * + * In the second interval, the modulus and phase are approximated + * by polynomials of the form Modulus(x) = sqrt(1/x) Q(1/x) + * and Phase(x) = x + 1/x S(1/x^2) - pi/4. Then the function is + * + * y0(x) = Modulus(x) sin( Phase(x) ). + * + * + * + * + * ACCURACY: + * + * Absolute error, when y0(x) < 1; else relative error: + * + * arithmetic domain # trials peak rms + * IEEE 0, 2 100000 2.4e-7 3.4e-8 + * IEEE 2, 32 100000 1.8e-7 5.3e-8 + * + */ + + const float YP[] = {9.454583683980369E-008f, -9.413212653797057E-006f, + 5.344486707214273E-004f, -1.584289289821316E-002f, + 1.707584643733568E-001f}; + const float MO[] = {-6.838999669318810E-002f, 1.864949361379502E-001f, + -2.145007480346739E-001f, 1.197549369473540E-001f, + -3.560281861530129E-003f, -4.969382655296620E-002f, + -3.355424622293709E-006f, 7.978845717621440E-001f}; + const float PH[] = {3.242077816988247E+001f, -3.630592630518434E+001f, + 1.756221482109099E+001f, -4.974978466280903E+000f, + 1.001973420681837E+000f, -1.939906941791308E-001f, + 6.490598792654666E-002f, -1.249992184872738E-001f}; + const T YZ1 = pset1(0.43221455686510834878f); + const T TWOOPI = pset1(0.636619772367581343075535f); /* 2 / pi */ + const T NEG_PIO4F = pset1(-0.7853981633974483096f); /* -pi / 4 */ + const T NEG_MAXNUM = pset1(-NumTraits::infinity()); + T z = pmul(x, x); + T x_le_two = pmul(TWOOPI, pmul(plog(x), generic_j0::run(x))); + x_le_two = pmadd( + psub(z, YZ1), internal::ppolevl::run(z, YP), x_le_two); + x_le_two = pselect(pcmp_le(x, pset1(0.0)), NEG_MAXNUM, x_le_two); + T q = pdiv(pset1(1.0), x); + T w = prsqrt(x); + T p = pmul(w, internal::ppolevl::run(q, MO)); + T u = pmul(q, q); + T xn = pmadd(q, internal::ppolevl::run(u, PH), NEG_PIO4F); + T x_gt_two = pmul(p, psin(padd(xn, x))); + return pselect(pcmp_le(x, pset1(2.0)), x_le_two, x_gt_two); + } +}; + +template +struct generic_y0 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* j0.c + * Bessel function of the second kind, order zero + * + * + * + * SYNOPSIS: + * + * double x, y, y0(); + * + * y = y0( x ); + * + * + * + * DESCRIPTION: + * + * Returns Bessel function of the second kind, of order + * zero, of the argument. + * + * The domain is divided into the intervals [0, 5] and + * (5, infinity). In the first interval a rational approximation + * R(x) is employed to compute + * y0(x) = R(x) + 2 * log(x) * j0(x) / PI. + * Thus a call to j0() is required. + * + * In the second interval, the Hankel asymptotic expansion + * is employed with two rational functions of degree 6/6 + * and 7/7. + * + * + * + * ACCURACY: + * + * Absolute error, when y0(x) < 1; else relative error: + * + * arithmetic domain # trials peak rms + * DEC 0, 30 9400 7.0e-17 7.9e-18 + * IEEE 0, 30 30000 1.3e-15 1.6e-16 + * + */ + const double PP[] = {7.96936729297347051624E-4, 8.28352392107440799803E-2, + 1.23953371646414299388E0, 5.44725003058768775090E0, + 8.74716500199817011941E0, 5.30324038235394892183E0, + 9.99999999999999997821E-1}; + const double PQ[] = {9.24408810558863637013E-4, 8.56288474354474431428E-2, + 1.25352743901058953537E0, 5.47097740330417105182E0, + 8.76190883237069594232E0, 5.30605288235394617618E0, + 1.00000000000000000218E0}; + const double QP[] = {-1.13663838898469149931E-2, -1.28252718670509318512E0, + -1.95539544257735972385E1, -9.32060152123768231369E1, + -1.77681167980488050595E2, -1.47077505154951170175E2, + -5.14105326766599330220E1, -6.05014350600728481186E0}; + const double QQ[] = {1.00000000000000000000E0, 6.43178256118178023184E1, + 8.56430025976980587198E2, 3.88240183605401609683E3, + 7.24046774195652478189E3, 5.93072701187316984827E3, + 2.06209331660327847417E3, 2.42005740240291393179E2}; + const double YP[] = {1.55924367855235737965E4, -1.46639295903971606143E7, + 5.43526477051876500413E9, -9.82136065717911466409E11, + 8.75906394395366999549E13, -3.46628303384729719441E15, + 4.42733268572569800351E16, -1.84950800436986690637E16}; + const double YQ[] = {1.00000000000000000000E0, 1.04128353664259848412E3, + 6.26107330137134956842E5, 2.68919633393814121987E8, + 8.64002487103935000337E10, 2.02979612750105546709E13, + 3.17157752842975028269E15, 2.50596256172653059228E17}; + const T SQ2OPI = pset1(7.9788456080286535587989E-1); /* sqrt(2 / pi) */ + const T TWOOPI = pset1(0.636619772367581343075535); /* 2 / pi */ + const T NEG_PIO4 = pset1(-0.7853981633974483096); /* -pi / 4 */ + const T NEG_MAXNUM = pset1(-NumTraits::infinity()); + + T z = pmul(x, x); + T x_le_five = pdiv(internal::ppolevl::run(z, YP), + internal::ppolevl::run(z, YQ)); + x_le_five = pmadd( + pmul(TWOOPI, plog(x)), generic_j0::run(x), x_le_five); + x_le_five = pselect(pcmp_le(x, pset1(0.0)), NEG_MAXNUM, x_le_five); + T s = pdiv(pset1(25.0), z); + T p = pdiv( + internal::ppolevl::run(s, PP), + internal::ppolevl::run(s, PQ)); + T q = pdiv( + internal::ppolevl::run(s, QP), + internal::ppolevl::run(s, QQ)); + T xn = padd(x, NEG_PIO4); + T w = pdiv(pset1(5.0), x); + p = pmadd(p, psin(xn), pmul(w, pmul(q, pcos(xn)))); + T x_gt_five = pmul(p, pmul(SQ2OPI, prsqrt(x))); + return pselect(pcmp_le(x, pset1(5.0)), x_le_five, x_gt_five); + } +}; + +template +struct bessel_y0_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T x) { + return generic_y0::run(x); + } +}; + +template +struct bessel_j1_retval { + typedef T type; +}; + +template ::type> +struct generic_j1 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T&) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return ScalarType(0); + } +}; + +template +struct generic_j1 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* j1f.c + * Bessel function of order one + * + * + * + * SYNOPSIS: + * + * float x, y, j1f(); + * + * y = j1f( x ); + * + * + * + * DESCRIPTION: + * + * Returns Bessel function of order one of the argument. + * + * The domain is divided into the intervals [0, 2] and + * (2, infinity). In the first interval a polynomial approximation + * 2 + * (w - r ) x P(w) + * 1 + * 2 + * is used, where w = x and r is the first zero of the function. + * + * In the second interval, the modulus and phase are approximated + * by polynomials of the form Modulus(x) = sqrt(1/x) Q(1/x) + * and Phase(x) = x + 1/x R(1/x^2) - 3pi/4. The function is + * + * j0(x) = Modulus(x) cos( Phase(x) ). + * + * + * + * ACCURACY: + * + * Absolute error: + * arithmetic domain # trials peak rms + * IEEE 0, 2 100000 1.2e-7 2.5e-8 + * IEEE 2, 32 100000 2.0e-7 5.3e-8 + * + * + */ + + const float JP[] = {-4.878788132172128E-009f, 6.009061827883699E-007f, + -4.541343896997497E-005f, 1.937383947804541E-003f, + -3.405537384615824E-002f}; + const float MO1[] = {6.913942741265801E-002f, -2.284801500053359E-001f, + 3.138238455499697E-001f, -2.102302420403875E-001f, + 5.435364690523026E-003f, 1.493389585089498E-001f, + 4.976029650847191E-006f, 7.978845453073848E-001f}; + const float PH1[] = {-4.497014141919556E+001f, 5.073465654089319E+001f, + -2.485774108720340E+001f, 7.222973196770240E+000f, + -1.544842782180211E+000f, 3.503787691653334E-001f, + -1.637986776941202E-001f, 3.749989509080821E-001f}; + const T Z1 = pset1(1.46819706421238932572E1f); + const T NEG_THPIO4F = pset1(-2.35619449019234492885f); /* -3*pi/4 */ + + T y = pabs(x); + T z = pmul(y, y); + T y_le_two = pmul( + psub(z, Z1), + pmul(x, internal::ppolevl::run(z, JP))); + T q = pdiv(pset1(1.0f), y); + T w = prsqrt(y); + T p = pmul(w, internal::ppolevl::run(q, MO1)); + w = pmul(q, q); + T yn = pmadd(q, internal::ppolevl::run(w, PH1), NEG_THPIO4F); + T y_gt_two = pmul(p, pcos(padd(yn, y))); + // j1 is an odd function. This implementation differs from cephes to + // take this fact in to account. Cephes returns -j1(x) for y > 2 range. + y_gt_two = pselect( + pcmp_lt(x, pset1(0.0f)), pnegate(y_gt_two), y_gt_two); + return pselect(pcmp_le(y, pset1(2.0f)), y_le_two, y_gt_two); + } +}; + +template +struct generic_j1 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* j1.c + * Bessel function of order one + * + * + * + * SYNOPSIS: + * + * double x, y, j1(); + * + * y = j1( x ); + * + * + * + * DESCRIPTION: + * + * Returns Bessel function of order one of the argument. + * + * The domain is divided into the intervals [0, 8] and + * (8, infinity). In the first interval a 24 term Chebyshev + * expansion is used. In the second, the asymptotic + * trigonometric representation is employed using two + * rational functions of degree 5/5. + * + * + * + * ACCURACY: + * + * Absolute error: + * arithmetic domain # trials peak rms + * DEC 0, 30 10000 4.0e-17 1.1e-17 + * IEEE 0, 30 30000 2.6e-16 1.1e-16 + * + */ + const double PP[] = {7.62125616208173112003E-4, 7.31397056940917570436E-2, + 1.12719608129684925192E0, 5.11207951146807644818E0, + 8.42404590141772420927E0, 5.21451598682361504063E0, + 1.00000000000000000254E0}; + const double PQ[] = {5.71323128072548699714E-4, 6.88455908754495404082E-2, + 1.10514232634061696926E0, 5.07386386128601488557E0, + 8.39985554327604159757E0, 5.20982848682361821619E0, + 9.99999999999999997461E-1}; + const double QP[] = {5.10862594750176621635E-2, 4.98213872951233449420E0, + 7.58238284132545283818E1, 3.66779609360150777800E2, + 7.10856304998926107277E2, 5.97489612400613639965E2, + 2.11688757100572135698E2, 2.52070205858023719784E1}; + const double QQ[] = {1.00000000000000000000E0, 7.42373277035675149943E1, + 1.05644886038262816351E3, 4.98641058337653607651E3, + 9.56231892404756170795E3, 7.99704160447350683650E3, + 2.82619278517639096600E3, 3.36093607810698293419E2}; + const double RP[] = {-8.99971225705559398224E8, 4.52228297998194034323E11, + -7.27494245221818276015E13, 3.68295732863852883286E15}; + const double RQ[] = {1.00000000000000000000E0, 6.20836478118054335476E2, + 2.56987256757748830383E5, 8.35146791431949253037E7, + 2.21511595479792499675E10, 4.74914122079991414898E12, + 7.84369607876235854894E14, 8.95222336184627338078E16, + 5.32278620332680085395E18}; + const T Z1 = pset1(1.46819706421238932572E1); + const T Z2 = pset1(4.92184563216946036703E1); + const T NEG_THPIO4 = pset1(-2.35619449019234492885); /* -3*pi/4 */ + const T SQ2OPI = pset1(7.9788456080286535587989E-1); /* sqrt(2 / pi) */ + T y = pabs(x); + T z = pmul(y, y); + T y_le_five = pdiv(internal::ppolevl::run(z, RP), + internal::ppolevl::run(z, RQ)); + y_le_five = pmul(pmul(pmul(y_le_five, x), psub(z, Z1)), psub(z, Z2)); + T s = pdiv(pset1(25.0), z); + T p = pdiv( + internal::ppolevl::run(s, PP), + internal::ppolevl::run(s, PQ)); + T q = pdiv( + internal::ppolevl::run(s, QP), + internal::ppolevl::run(s, QQ)); + T yn = padd(y, NEG_THPIO4); + T w = pdiv(pset1(-5.0), y); + p = pmadd(p, pcos(yn), pmul(w, pmul(q, psin(yn)))); + T y_gt_five = pmul(p, pmul(SQ2OPI, prsqrt(y))); + // j1 is an odd function. This implementation differs from cephes to + // take this fact in to account. Cephes returns -j1(x) for y > 5 range. + y_gt_five = pselect( + pcmp_lt(x, pset1(0.0)), pnegate(y_gt_five), y_gt_five); + return pselect(pcmp_le(y, pset1(5.0)), y_le_five, y_gt_five); + } +}; + +template +struct bessel_j1_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T x) { + return generic_j1::run(x); + } +}; + +template +struct bessel_y1_retval { + typedef T type; +}; + +template ::type> +struct generic_y1 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T&) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return ScalarType(0); + } +}; + +template +struct generic_y1 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* j1f.c + * Bessel function of second kind of order one + * + * + * + * SYNOPSIS: + * + * double x, y, y1(); + * + * y = y1( x ); + * + * + * + * DESCRIPTION: + * + * Returns Bessel function of the second kind of order one + * of the argument. + * + * The domain is divided into the intervals [0, 2] and + * (2, infinity). In the first interval a rational approximation + * R(x) is employed to compute + * + * 2 + * y0(x) = (w - r ) x R(x^2) + 2/pi (ln(x) j1(x) - 1/x) . + * 1 + * + * Thus a call to j1() is required. + * + * In the second interval, the modulus and phase are approximated + * by polynomials of the form Modulus(x) = sqrt(1/x) Q(1/x) + * and Phase(x) = x + 1/x S(1/x^2) - 3pi/4. Then the function is + * + * y0(x) = Modulus(x) sin( Phase(x) ). + * + * + * + * + * ACCURACY: + * + * Absolute error: + * arithmetic domain # trials peak rms + * IEEE 0, 2 100000 2.2e-7 4.6e-8 + * IEEE 2, 32 100000 1.9e-7 5.3e-8 + * + * (error criterion relative when |y1| > 1). + * + */ + + const float YP[] = {8.061978323326852E-009f, -9.496460629917016E-007f, + 6.719543806674249E-005f, -2.641785726447862E-003f, + 4.202369946500099E-002f}; + const float MO1[] = {6.913942741265801E-002f, -2.284801500053359E-001f, + 3.138238455499697E-001f, -2.102302420403875E-001f, + 5.435364690523026E-003f, 1.493389585089498E-001f, + 4.976029650847191E-006f, 7.978845453073848E-001f}; + const float PH1[] = {-4.497014141919556E+001f, 5.073465654089319E+001f, + -2.485774108720340E+001f, 7.222973196770240E+000f, + -1.544842782180211E+000f, 3.503787691653334E-001f, + -1.637986776941202E-001f, 3.749989509080821E-001f}; + const T YO1 = pset1(4.66539330185668857532f); + const T NEG_THPIO4F = pset1(-2.35619449019234492885f); /* -3*pi/4 */ + const T TWOOPI = pset1(0.636619772367581343075535f); /* 2/pi */ + const T NEG_MAXNUM = pset1(-NumTraits::infinity()); + + T z = pmul(x, x); + T x_le_two = pmul(psub(z, YO1), internal::ppolevl::run(z, YP)); + x_le_two = pmadd( + x_le_two, x, + pmul(TWOOPI, pmadd( + generic_j1::run(x), plog(x), + pdiv(pset1(-1.0f), x)))); + x_le_two = pselect(pcmp_lt(x, pset1(0.0f)), NEG_MAXNUM, x_le_two); + + T q = pdiv(pset1(1.0), x); + T w = prsqrt(x); + T p = pmul(w, internal::ppolevl::run(q, MO1)); + w = pmul(q, q); + T xn = pmadd(q, internal::ppolevl::run(w, PH1), NEG_THPIO4F); + T x_gt_two = pmul(p, psin(padd(xn, x))); + return pselect(pcmp_le(x, pset1(2.0)), x_le_two, x_gt_two); + } +}; + +template +struct generic_y1 { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + /* j1.c + * Bessel function of second kind of order one + * + * + * + * SYNOPSIS: + * + * double x, y, y1(); + * + * y = y1( x ); + * + * + * + * DESCRIPTION: + * + * Returns Bessel function of the second kind of order one + * of the argument. + * + * The domain is divided into the intervals [0, 8] and + * (8, infinity). In the first interval a 25 term Chebyshev + * expansion is used, and a call to j1() is required. + * In the second, the asymptotic trigonometric representation + * is employed using two rational functions of degree 5/5. + * + * + * + * ACCURACY: + * + * Absolute error: + * arithmetic domain # trials peak rms + * DEC 0, 30 10000 8.6e-17 1.3e-17 + * IEEE 0, 30 30000 1.0e-15 1.3e-16 + * + * (error criterion relative when |y1| > 1). + * + */ + const double PP[] = {7.62125616208173112003E-4, 7.31397056940917570436E-2, + 1.12719608129684925192E0, 5.11207951146807644818E0, + 8.42404590141772420927E0, 5.21451598682361504063E0, + 1.00000000000000000254E0}; + const double PQ[] = {5.71323128072548699714E-4, 6.88455908754495404082E-2, + 1.10514232634061696926E0, 5.07386386128601488557E0, + 8.39985554327604159757E0, 5.20982848682361821619E0, + 9.99999999999999997461E-1}; + const double QP[] = {5.10862594750176621635E-2, 4.98213872951233449420E0, + 7.58238284132545283818E1, 3.66779609360150777800E2, + 7.10856304998926107277E2, 5.97489612400613639965E2, + 2.11688757100572135698E2, 2.52070205858023719784E1}; + const double QQ[] = {1.00000000000000000000E0, 7.42373277035675149943E1, + 1.05644886038262816351E3, 4.98641058337653607651E3, + 9.56231892404756170795E3, 7.99704160447350683650E3, + 2.82619278517639096600E3, 3.36093607810698293419E2}; + const double YP[] = {1.26320474790178026440E9, -6.47355876379160291031E11, + 1.14509511541823727583E14, -8.12770255501325109621E15, + 2.02439475713594898196E17, -7.78877196265950026825E17}; + const double YQ[] = {1.00000000000000000000E0, 5.94301592346128195359E2, + 2.35564092943068577943E5, 7.34811944459721705660E7, + 1.87601316108706159478E10, 3.88231277496238566008E12, + 6.20557727146953693363E14, 6.87141087355300489866E16, + 3.97270608116560655612E18}; + const T SQ2OPI = pset1(.79788456080286535588); + const T NEG_THPIO4 = pset1(-2.35619449019234492885); /* -3*pi/4 */ + const T TWOOPI = pset1(0.636619772367581343075535); /* 2/pi */ + const T NEG_MAXNUM = pset1(-NumTraits::infinity()); + + T z = pmul(x, x); + T x_le_five = pdiv(internal::ppolevl::run(z, YP), + internal::ppolevl::run(z, YQ)); + x_le_five = pmadd( + x_le_five, x, pmul( + TWOOPI, pmadd(generic_j1::run(x), plog(x), + pdiv(pset1(-1.0), x)))); + + x_le_five = pselect(pcmp_le(x, pset1(0.0)), NEG_MAXNUM, x_le_five); + T s = pdiv(pset1(25.0), z); + T p = pdiv( + internal::ppolevl::run(s, PP), + internal::ppolevl::run(s, PQ)); + T q = pdiv( + internal::ppolevl::run(s, QP), + internal::ppolevl::run(s, QQ)); + T xn = padd(x, NEG_THPIO4); + T w = pdiv(pset1(5.0), x); + p = pmadd(p, psin(xn), pmul(w, pmul(q, pcos(xn)))); + T x_gt_five = pmul(p, pmul(SQ2OPI, prsqrt(x))); + return pselect(pcmp_le(x, pset1(5.0)), x_le_five, x_gt_five); + } +}; + +template +struct bessel_y1_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T x) { + return generic_y1::run(x); + } +}; + +} // end namespace internal + +namespace numext { + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(bessel_i0, Scalar) + bessel_i0(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(bessel_i0, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(bessel_i0e, Scalar) + bessel_i0e(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(bessel_i0e, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(bessel_i1, Scalar) + bessel_i1(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(bessel_i1, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(bessel_i1e, Scalar) + bessel_i1e(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(bessel_i1e, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(bessel_k0, Scalar) + bessel_k0(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(bessel_k0, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(bessel_k0e, Scalar) + bessel_k0e(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(bessel_k0e, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(bessel_k1, Scalar) + bessel_k1(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(bessel_k1, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(bessel_k1e, Scalar) + bessel_k1e(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(bessel_k1e, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(bessel_j0, Scalar) + bessel_j0(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(bessel_j0, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(bessel_y0, Scalar) + bessel_y0(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(bessel_y0, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(bessel_j1, Scalar) + bessel_j1(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(bessel_j1, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(bessel_y1, Scalar) + bessel_y1(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(bessel_y1, Scalar)::run(x); +} + +} // end namespace numext + +} // end namespace Eigen + +#endif // EIGEN_BESSEL_FUNCTIONS_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsPacketMath.h b/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsPacketMath.h new file mode 100644 index 0000000..943d10f --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/BesselFunctionsPacketMath.h @@ -0,0 +1,118 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BESSELFUNCTIONS_PACKETMATH_H +#define EIGEN_BESSELFUNCTIONS_PACKETMATH_H + +namespace Eigen { + +namespace internal { + +/** \internal \returns the exponentially scaled modified Bessel function of + * order zero i0(\a a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pbessel_i0(const Packet& x) { + return numext::bessel_i0(x); +} + +/** \internal \returns the exponentially scaled modified Bessel function of + * order zero i0e(\a a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pbessel_i0e(const Packet& x) { + return numext::bessel_i0e(x); +} + +/** \internal \returns the exponentially scaled modified Bessel function of + * order one i1(\a a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pbessel_i1(const Packet& x) { + return numext::bessel_i1(x); +} + +/** \internal \returns the exponentially scaled modified Bessel function of + * order one i1e(\a a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pbessel_i1e(const Packet& x) { + return numext::bessel_i1e(x); +} + +/** \internal \returns the exponentially scaled modified Bessel function of + * order zero j0(\a a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pbessel_j0(const Packet& x) { + return numext::bessel_j0(x); +} + +/** \internal \returns the exponentially scaled modified Bessel function of + * order zero j1(\a a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pbessel_j1(const Packet& x) { + return numext::bessel_j1(x); +} + +/** \internal \returns the exponentially scaled modified Bessel function of + * order one y0(\a a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pbessel_y0(const Packet& x) { + return numext::bessel_y0(x); +} + +/** \internal \returns the exponentially scaled modified Bessel function of + * order one y1(\a a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pbessel_y1(const Packet& x) { + return numext::bessel_y1(x); +} + +/** \internal \returns the exponentially scaled modified Bessel function of + * order zero k0(\a a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pbessel_k0(const Packet& x) { + return numext::bessel_k0(x); +} + +/** \internal \returns the exponentially scaled modified Bessel function of + * order zero k0e(\a a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pbessel_k0e(const Packet& x) { + return numext::bessel_k0e(x); +} + +/** \internal \returns the exponentially scaled modified Bessel function of + * order one k1e(\a a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pbessel_k1(const Packet& x) { + return numext::bessel_k1(x); +} + +/** \internal \returns the exponentially scaled modified Bessel function of + * order one k1e(\a a) (coeff-wise) */ +template +EIGEN_DEVICE_FUNC EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pbessel_k1e(const Packet& x) { + return numext::bessel_k1e(x); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BESSELFUNCTIONS_PACKETMATH_H + diff --git a/external/unsupported/Eigen/src/SpecialFunctions/HipVectorCompatibility.h b/external/unsupported/Eigen/src/SpecialFunctions/HipVectorCompatibility.h new file mode 100644 index 0000000..d7b231a --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/HipVectorCompatibility.h @@ -0,0 +1,67 @@ +#ifndef HIP_VECTOR_COMPATIBILITY_H +#define HIP_VECTOR_COMPATIBILITY_H + +namespace hip_impl { + template struct Scalar_accessor; +} // end namespace hip_impl + +namespace Eigen { +namespace internal { + +#define HIP_SCALAR_ACCESSOR_BUILDER(NAME) \ +template \ +struct NAME > : NAME {}; + +#define HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(NAME) \ +template \ +struct NAME##_impl > : NAME##_impl {}; \ +template \ +struct NAME##_retval > : NAME##_retval {}; + +#define HIP_SCALAR_ACCESSOR_BUILDER_IGAMMA(NAME) \ +template \ +struct NAME , mode> : NAME {}; + +#if EIGEN_HAS_C99_MATH +HIP_SCALAR_ACCESSOR_BUILDER(betainc_helper) +HIP_SCALAR_ACCESSOR_BUILDER(incbeta_cfe) + +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(erf) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(erfc) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(igammac) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(lgamma) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(ndtri) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(polygamma) + +HIP_SCALAR_ACCESSOR_BUILDER_IGAMMA(igamma_generic_impl) +#endif + +HIP_SCALAR_ACCESSOR_BUILDER(digamma_impl_maybe_poly) +HIP_SCALAR_ACCESSOR_BUILDER(zeta_impl_series) + +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(bessel_i0) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(bessel_i0e) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(bessel_i1) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(bessel_i1e) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(bessel_j0) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(bessel_j1) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(bessel_k0) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(bessel_k0e) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(bessel_k1) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(bessel_k1e) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(bessel_y0) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(bessel_y1) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(betainc) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(digamma) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(gamma_sample_der_alpha) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(igamma_der_a) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(igamma) +HIP_SCALAR_ACCESSOR_BUILDER_RETVAL(zeta) + +HIP_SCALAR_ACCESSOR_BUILDER_IGAMMA(igamma_series_impl) +HIP_SCALAR_ACCESSOR_BUILDER_IGAMMA(igammac_cf_impl) + +} // end namespace internal +} // end namespace Eigen + +#endif // HIP_VECTOR_COMPATIBILITY_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h b/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h new file mode 100644 index 0000000..691ff4d --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsArrayAPI.h @@ -0,0 +1,167 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +#ifndef EIGEN_SPECIALFUNCTIONS_ARRAYAPI_H +#define EIGEN_SPECIALFUNCTIONS_ARRAYAPI_H + +namespace Eigen { + +/** \cpp11 \returns an expression of the coefficient-wise igamma(\a a, \a x) to the given arrays. + * + * This function computes the coefficient-wise incomplete gamma function. + * + * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, + * or float/double in non c++11 mode, the user has to provide implementations of igammac(T,T) for any scalar + * type T to be supported. + * + * \sa Eigen::igammac(), Eigen::lgamma() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseBinaryOp, const Derived, const ExponentDerived> +igamma(const Eigen::ArrayBase& a, const Eigen::ArrayBase& x) +{ + return Eigen::CwiseBinaryOp, const Derived, const ExponentDerived>( + a.derived(), + x.derived() + ); +} + +/** \cpp11 \returns an expression of the coefficient-wise igamma_der_a(\a a, \a x) to the given arrays. + * + * This function computes the coefficient-wise derivative of the incomplete + * gamma function with respect to the parameter a. + * + * \note This function supports only float and double scalar types in c++11 + * mode. To support other scalar types, + * or float/double in non c++11 mode, the user has to provide implementations + * of igamma_der_a(T,T) for any scalar + * type T to be supported. + * + * \sa Eigen::igamma(), Eigen::lgamma() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseBinaryOp, const Derived, const ExponentDerived> +igamma_der_a(const Eigen::ArrayBase& a, const Eigen::ArrayBase& x) { + return Eigen::CwiseBinaryOp, const Derived, const ExponentDerived>( + a.derived(), + x.derived()); +} + +/** \cpp11 \returns an expression of the coefficient-wise gamma_sample_der_alpha(\a alpha, \a sample) to the given arrays. + * + * This function computes the coefficient-wise derivative of the sample + * of a Gamma(alpha, 1) random variable with respect to the parameter alpha. + * + * \note This function supports only float and double scalar types in c++11 + * mode. To support other scalar types, + * or float/double in non c++11 mode, the user has to provide implementations + * of gamma_sample_der_alpha(T,T) for any scalar + * type T to be supported. + * + * \sa Eigen::igamma(), Eigen::lgamma() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseBinaryOp, const AlphaDerived, const SampleDerived> +gamma_sample_der_alpha(const Eigen::ArrayBase& alpha, const Eigen::ArrayBase& sample) { + return Eigen::CwiseBinaryOp, const AlphaDerived, const SampleDerived>( + alpha.derived(), + sample.derived()); +} + +/** \cpp11 \returns an expression of the coefficient-wise igammac(\a a, \a x) to the given arrays. + * + * This function computes the coefficient-wise complementary incomplete gamma function. + * + * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, + * or float/double in non c++11 mode, the user has to provide implementations of igammac(T,T) for any scalar + * type T to be supported. + * + * \sa Eigen::igamma(), Eigen::lgamma() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseBinaryOp, const Derived, const ExponentDerived> +igammac(const Eigen::ArrayBase& a, const Eigen::ArrayBase& x) +{ + return Eigen::CwiseBinaryOp, const Derived, const ExponentDerived>( + a.derived(), + x.derived() + ); +} + +/** \cpp11 \returns an expression of the coefficient-wise polygamma(\a n, \a x) to the given arrays. + * + * It returns the \a n -th derivative of the digamma(psi) evaluated at \c x. + * + * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, + * or float/double in non c++11 mode, the user has to provide implementations of polygamma(T,T) for any scalar + * type T to be supported. + * + * \sa Eigen::digamma() + */ +// * \warning Be careful with the order of the parameters: x.polygamma(n) is equivalent to polygamma(n,x) +// * \sa ArrayBase::polygamma() +template +EIGEN_STRONG_INLINE const Eigen::CwiseBinaryOp, const DerivedN, const DerivedX> +polygamma(const Eigen::ArrayBase& n, const Eigen::ArrayBase& x) +{ + return Eigen::CwiseBinaryOp, const DerivedN, const DerivedX>( + n.derived(), + x.derived() + ); +} + +/** \cpp11 \returns an expression of the coefficient-wise betainc(\a x, \a a, \a b) to the given arrays. + * + * This function computes the regularized incomplete beta function (integral). + * + * \note This function supports only float and double scalar types in c++11 mode. To support other scalar types, + * or float/double in non c++11 mode, the user has to provide implementations of betainc(T,T,T) for any scalar + * type T to be supported. + * + * \sa Eigen::betainc(), Eigen::lgamma() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseTernaryOp, const ArgADerived, const ArgBDerived, const ArgXDerived> +betainc(const Eigen::ArrayBase& a, const Eigen::ArrayBase& b, const Eigen::ArrayBase& x) +{ + return Eigen::CwiseTernaryOp, const ArgADerived, const ArgBDerived, const ArgXDerived>( + a.derived(), + b.derived(), + x.derived() + ); +} + + +/** \returns an expression of the coefficient-wise zeta(\a x, \a q) to the given arrays. + * + * It returns the Riemann zeta function of two arguments \a x and \a q: + * + * \param x is the exponent, it must be > 1 + * \param q is the shift, it must be > 0 + * + * \note This function supports only float and double scalar types. To support other scalar types, the user has + * to provide implementations of zeta(T,T) for any scalar type T to be supported. + * + * \sa ArrayBase::zeta() + */ +template +EIGEN_STRONG_INLINE const Eigen::CwiseBinaryOp, const DerivedX, const DerivedQ> +zeta(const Eigen::ArrayBase& x, const Eigen::ArrayBase& q) +{ + return Eigen::CwiseBinaryOp, const DerivedX, const DerivedQ>( + x.derived(), + q.derived() + ); +} + + +} // end namespace Eigen + +#endif // EIGEN_SPECIALFUNCTIONS_ARRAYAPI_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsBFloat16.h b/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsBFloat16.h new file mode 100644 index 0000000..2d94231 --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsBFloat16.h @@ -0,0 +1,58 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPECIALFUNCTIONS_BFLOAT16_H +#define EIGEN_SPECIALFUNCTIONS_BFLOAT16_H + +namespace Eigen { +namespace numext { + +#if EIGEN_HAS_C99_MATH +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 lgamma(const Eigen::bfloat16& a) { + return Eigen::bfloat16(Eigen::numext::lgamma(static_cast(a))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 digamma(const Eigen::bfloat16& a) { + return Eigen::bfloat16(Eigen::numext::digamma(static_cast(a))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 zeta(const Eigen::bfloat16& x, const Eigen::bfloat16& q) { + return Eigen::bfloat16(Eigen::numext::zeta(static_cast(x), static_cast(q))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 polygamma(const Eigen::bfloat16& n, const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::polygamma(static_cast(n), static_cast(x))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 erf(const Eigen::bfloat16& a) { + return Eigen::bfloat16(Eigen::numext::erf(static_cast(a))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 erfc(const Eigen::bfloat16& a) { + return Eigen::bfloat16(Eigen::numext::erfc(static_cast(a))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 ndtri(const Eigen::bfloat16& a) { + return Eigen::bfloat16(Eigen::numext::ndtri(static_cast(a))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 igamma(const Eigen::bfloat16& a, const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::igamma(static_cast(a), static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 igamma_der_a(const Eigen::bfloat16& a, const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::igamma_der_a(static_cast(a), static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 gamma_sample_der_alpha(const Eigen::bfloat16& alpha, const Eigen::bfloat16& sample) { + return Eigen::bfloat16(Eigen::numext::gamma_sample_der_alpha(static_cast(alpha), static_cast(sample))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 igammac(const Eigen::bfloat16& a, const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::igammac(static_cast(a), static_cast(x))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::bfloat16 betainc(const Eigen::bfloat16& a, const Eigen::bfloat16& b, const Eigen::bfloat16& x) { + return Eigen::bfloat16(Eigen::numext::betainc(static_cast(a), static_cast(b), static_cast(x))); +} +#endif + +} // end namespace numext +} // end namespace Eigen + +#endif // EIGEN_SPECIALFUNCTIONS_BFLOAT16_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h b/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h new file mode 100644 index 0000000..abefe99 --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsFunctors.h @@ -0,0 +1,330 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Eugene Brevdo +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPECIALFUNCTIONS_FUNCTORS_H +#define EIGEN_SPECIALFUNCTIONS_FUNCTORS_H + +namespace Eigen { + +namespace internal { + + +/** \internal + * \brief Template functor to compute the incomplete gamma function igamma(a, x) + * + * \sa class CwiseBinaryOp, Cwise::igamma + */ +template struct scalar_igamma_op : binary_op_base +{ + EIGEN_EMPTY_STRUCT_CTOR(scalar_igamma_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& x) const { + using numext::igamma; return igamma(a, x); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& x) const { + return internal::pigamma(a, x); + } +}; +template +struct functor_traits > { + enum { + // Guesstimate + Cost = 20 * NumTraits::MulCost + 10 * NumTraits::AddCost, + PacketAccess = packet_traits::HasIGamma + }; +}; + +/** \internal + * \brief Template functor to compute the derivative of the incomplete gamma + * function igamma_der_a(a, x) + * + * \sa class CwiseBinaryOp, Cwise::igamma_der_a + */ +template +struct scalar_igamma_der_a_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_igamma_der_a_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& a, const Scalar& x) const { + using numext::igamma_der_a; + return igamma_der_a(a, x); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& x) const { + return internal::pigamma_der_a(a, x); + } +}; +template +struct functor_traits > { + enum { + // 2x the cost of igamma + Cost = 40 * NumTraits::MulCost + 20 * NumTraits::AddCost, + PacketAccess = packet_traits::HasIGammaDerA + }; +}; + +/** \internal + * \brief Template functor to compute the derivative of the sample + * of a Gamma(alpha, 1) random variable with respect to the parameter alpha + * gamma_sample_der_alpha(alpha, sample) + * + * \sa class CwiseBinaryOp, Cwise::gamma_sample_der_alpha + */ +template +struct scalar_gamma_sample_der_alpha_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_gamma_sample_der_alpha_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator()(const Scalar& alpha, const Scalar& sample) const { + using numext::gamma_sample_der_alpha; + return gamma_sample_der_alpha(alpha, sample); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& alpha, const Packet& sample) const { + return internal::pgamma_sample_der_alpha(alpha, sample); + } +}; +template +struct functor_traits > { + enum { + // 2x the cost of igamma, minus the lgamma cost (the lgamma cancels out) + Cost = 30 * NumTraits::MulCost + 15 * NumTraits::AddCost, + PacketAccess = packet_traits::HasGammaSampleDerAlpha + }; +}; + +/** \internal + * \brief Template functor to compute the complementary incomplete gamma function igammac(a, x) + * + * \sa class CwiseBinaryOp, Cwise::igammac + */ +template struct scalar_igammac_op : binary_op_base +{ + EIGEN_EMPTY_STRUCT_CTOR(scalar_igammac_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& x) const { + using numext::igammac; return igammac(a, x); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& x) const + { + return internal::pigammac(a, x); + } +}; +template +struct functor_traits > { + enum { + // Guesstimate + Cost = 20 * NumTraits::MulCost + 10 * NumTraits::AddCost, + PacketAccess = packet_traits::HasIGammac + }; +}; + + +/** \internal + * \brief Template functor to compute the incomplete beta integral betainc(a, b, x) + * + */ +template struct scalar_betainc_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_betainc_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& x, const Scalar& a, const Scalar& b) const { + using numext::betainc; return betainc(x, a, b); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Packet packetOp(const Packet& x, const Packet& a, const Packet& b) const + { + return internal::pbetainc(x, a, b); + } +}; +template +struct functor_traits > { + enum { + // Guesstimate + Cost = 400 * NumTraits::MulCost + 400 * NumTraits::AddCost, + PacketAccess = packet_traits::HasBetaInc + }; +}; + + +/** \internal + * \brief Template functor to compute the natural log of the absolute + * value of Gamma of a scalar + * \sa class CwiseUnaryOp, Cwise::lgamma() + */ +template struct scalar_lgamma_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_lgamma_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { + using numext::lgamma; return lgamma(a); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a) const { return internal::plgamma(a); } +}; +template +struct functor_traits > +{ + enum { + // Guesstimate + Cost = 10 * NumTraits::MulCost + 5 * NumTraits::AddCost, + PacketAccess = packet_traits::HasLGamma + }; +}; + +/** \internal + * \brief Template functor to compute psi, the derivative of lgamma of a scalar. + * \sa class CwiseUnaryOp, Cwise::digamma() + */ +template struct scalar_digamma_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_digamma_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { + using numext::digamma; return digamma(a); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a) const { return internal::pdigamma(a); } +}; +template +struct functor_traits > +{ + enum { + // Guesstimate + Cost = 10 * NumTraits::MulCost + 5 * NumTraits::AddCost, + PacketAccess = packet_traits::HasDiGamma + }; +}; + +/** \internal + * \brief Template functor to compute the Riemann Zeta function of two arguments. + * \sa class CwiseUnaryOp, Cwise::zeta() + */ +template struct scalar_zeta_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_zeta_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& x, const Scalar& q) const { + using numext::zeta; return zeta(x, q); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x, const Packet& q) const { return internal::pzeta(x, q); } +}; +template +struct functor_traits > +{ + enum { + // Guesstimate + Cost = 10 * NumTraits::MulCost + 5 * NumTraits::AddCost, + PacketAccess = packet_traits::HasZeta + }; +}; + +/** \internal + * \brief Template functor to compute the polygamma function. + * \sa class CwiseUnaryOp, Cwise::polygamma() + */ +template struct scalar_polygamma_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_polygamma_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& n, const Scalar& x) const { + using numext::polygamma; return polygamma(n, x); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& n, const Packet& x) const { return internal::ppolygamma(n, x); } +}; +template +struct functor_traits > +{ + enum { + // Guesstimate + Cost = 10 * NumTraits::MulCost + 5 * NumTraits::AddCost, + PacketAccess = packet_traits::HasPolygamma + }; +}; + +/** \internal + * \brief Template functor to compute the error function of a scalar + * \sa class CwiseUnaryOp, ArrayBase::erf() + */ +template struct scalar_erf_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_erf_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar + operator()(const Scalar& a) const { + return numext::erf(a); + } + template + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& x) const { + return perf(x); + } +}; +template +struct functor_traits > { + enum { + PacketAccess = packet_traits::HasErf, + Cost = + (PacketAccess +#ifdef EIGEN_VECTORIZE_FMA + // TODO(rmlarsen): Move the FMA cost model to a central location. + // Haswell can issue 2 add/mul/madd per cycle. + // 10 pmadd, 2 pmul, 1 div, 2 other + ? (2 * NumTraits::AddCost + + 7 * NumTraits::MulCost + + scalar_div_cost::HasDiv>::value) +#else + ? (12 * NumTraits::AddCost + + 12 * NumTraits::MulCost + + scalar_div_cost::HasDiv>::value) +#endif + // Assume for simplicity that this is as expensive as an exp(). + : (functor_traits >::Cost)) + }; +}; + +/** \internal + * \brief Template functor to compute the Complementary Error Function + * of a scalar + * \sa class CwiseUnaryOp, Cwise::erfc() + */ +template struct scalar_erfc_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_erfc_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { + using numext::erfc; return erfc(a); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a) const { return internal::perfc(a); } +}; +template +struct functor_traits > +{ + enum { + // Guesstimate + Cost = 10 * NumTraits::MulCost + 5 * NumTraits::AddCost, + PacketAccess = packet_traits::HasErfc + }; +}; + +/** \internal + * \brief Template functor to compute the Inverse of the normal distribution + * function of a scalar + * \sa class CwiseUnaryOp, Cwise::ndtri() + */ +template struct scalar_ndtri_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_ndtri_op) + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { + using numext::ndtri; return ndtri(a); + } + typedef typename packet_traits::type Packet; + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet packetOp(const Packet& a) const { return internal::pndtri(a); } +}; +template +struct functor_traits > +{ + enum { + // On average, We are evaluating rational functions with degree N=9 in the + // numerator and denominator. This results in 2*N additions and 2*N + // multiplications. + Cost = 18 * NumTraits::MulCost + 18 * NumTraits::AddCost, + PacketAccess = packet_traits::HasNdtri + }; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SPECIALFUNCTIONS_FUNCTORS_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h b/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h new file mode 100644 index 0000000..2a3a531 --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsHalf.h @@ -0,0 +1,58 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPECIALFUNCTIONS_HALF_H +#define EIGEN_SPECIALFUNCTIONS_HALF_H + +namespace Eigen { +namespace numext { + +#if EIGEN_HAS_C99_MATH +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half lgamma(const Eigen::half& a) { + return Eigen::half(Eigen::numext::lgamma(static_cast(a))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half digamma(const Eigen::half& a) { + return Eigen::half(Eigen::numext::digamma(static_cast(a))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half zeta(const Eigen::half& x, const Eigen::half& q) { + return Eigen::half(Eigen::numext::zeta(static_cast(x), static_cast(q))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half polygamma(const Eigen::half& n, const Eigen::half& x) { + return Eigen::half(Eigen::numext::polygamma(static_cast(n), static_cast(x))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half erf(const Eigen::half& a) { + return Eigen::half(Eigen::numext::erf(static_cast(a))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half erfc(const Eigen::half& a) { + return Eigen::half(Eigen::numext::erfc(static_cast(a))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half ndtri(const Eigen::half& a) { + return Eigen::half(Eigen::numext::ndtri(static_cast(a))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half igamma(const Eigen::half& a, const Eigen::half& x) { + return Eigen::half(Eigen::numext::igamma(static_cast(a), static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half igamma_der_a(const Eigen::half& a, const Eigen::half& x) { + return Eigen::half(Eigen::numext::igamma_der_a(static_cast(a), static_cast(x))); +} +template <> +EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half gamma_sample_der_alpha(const Eigen::half& alpha, const Eigen::half& sample) { + return Eigen::half(Eigen::numext::gamma_sample_der_alpha(static_cast(alpha), static_cast(sample))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half igammac(const Eigen::half& a, const Eigen::half& x) { + return Eigen::half(Eigen::numext::igammac(static_cast(a), static_cast(x))); +} +template<> EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC Eigen::half betainc(const Eigen::half& a, const Eigen::half& b, const Eigen::half& x) { + return Eigen::half(Eigen::numext::betainc(static_cast(a), static_cast(b), static_cast(x))); +} +#endif + +} // end namespace numext +} // end namespace Eigen + +#endif // EIGEN_SPECIALFUNCTIONS_HALF_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h b/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h new file mode 100644 index 0000000..f1c260e --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsImpl.h @@ -0,0 +1,2045 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2015 Eugene Brevdo +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPECIAL_FUNCTIONS_H +#define EIGEN_SPECIAL_FUNCTIONS_H + +namespace Eigen { +namespace internal { + +// Parts of this code are based on the Cephes Math Library. +// +// Cephes Math Library Release 2.8: June, 2000 +// Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier +// +// Permission has been kindly provided by the original author +// to incorporate the Cephes software into the Eigen codebase: +// +// From: Stephen Moshier +// To: Eugene Brevdo +// Subject: Re: Permission to wrap several cephes functions in Eigen +// +// Hello Eugene, +// +// Thank you for writing. +// +// If your licensing is similar to BSD, the formal way that has been +// handled is simply to add a statement to the effect that you are incorporating +// the Cephes software by permission of the author. +// +// Good luck with your project, +// Steve + + +/**************************************************************************** + * Implementation of lgamma, requires C++11/C99 * + ****************************************************************************/ + +template +struct lgamma_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(const Scalar) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return Scalar(0); + } +}; + +template +struct lgamma_retval { + typedef Scalar type; +}; + +#if EIGEN_HAS_C99_MATH +// Since glibc 2.19 +#if defined(__GLIBC__) && ((__GLIBC__>=2 && __GLIBC_MINOR__ >= 19) || __GLIBC__>2) \ + && (defined(_DEFAULT_SOURCE) || defined(_BSD_SOURCE) || defined(_SVID_SOURCE)) +#define EIGEN_HAS_LGAMMA_R +#endif + +// Glibc versions before 2.19 +#if defined(__GLIBC__) && ((__GLIBC__==2 && __GLIBC_MINOR__ < 19) || __GLIBC__<2) \ + && (defined(_BSD_SOURCE) || defined(_SVID_SOURCE)) +#define EIGEN_HAS_LGAMMA_R +#endif + +template <> +struct lgamma_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE float run(float x) { +#if !defined(EIGEN_GPU_COMPILE_PHASE) && defined (EIGEN_HAS_LGAMMA_R) && !defined(__APPLE__) + int dummy; + return ::lgammaf_r(x, &dummy); +#elif defined(SYCL_DEVICE_ONLY) + return cl::sycl::lgamma(x); +#else + return ::lgammaf(x); +#endif + } +}; + +template <> +struct lgamma_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE double run(double x) { +#if !defined(EIGEN_GPU_COMPILE_PHASE) && defined(EIGEN_HAS_LGAMMA_R) && !defined(__APPLE__) + int dummy; + return ::lgamma_r(x, &dummy); +#elif defined(SYCL_DEVICE_ONLY) + return cl::sycl::lgamma(x); +#else + return ::lgamma(x); +#endif + } +}; + +#undef EIGEN_HAS_LGAMMA_R +#endif + +/**************************************************************************** + * Implementation of digamma (psi), based on Cephes * + ****************************************************************************/ + +template +struct digamma_retval { + typedef Scalar type; +}; + +/* + * + * Polynomial evaluation helper for the Psi (digamma) function. + * + * digamma_impl_maybe_poly::run(s) evaluates the asymptotic Psi expansion for + * input Scalar s, assuming s is above 10.0. + * + * If s is above a certain threshold for the given Scalar type, zero + * is returned. Otherwise the polynomial is evaluated with enough + * coefficients for results matching Scalar machine precision. + * + * + */ +template +struct digamma_impl_maybe_poly { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(const Scalar) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return Scalar(0); + } +}; + + +template <> +struct digamma_impl_maybe_poly { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE float run(const float s) { + const float A[] = { + -4.16666666666666666667E-3f, + 3.96825396825396825397E-3f, + -8.33333333333333333333E-3f, + 8.33333333333333333333E-2f + }; + + float z; + if (s < 1.0e8f) { + z = 1.0f / (s * s); + return z * internal::ppolevl::run(z, A); + } else return 0.0f; + } +}; + +template <> +struct digamma_impl_maybe_poly { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE double run(const double s) { + const double A[] = { + 8.33333333333333333333E-2, + -2.10927960927960927961E-2, + 7.57575757575757575758E-3, + -4.16666666666666666667E-3, + 3.96825396825396825397E-3, + -8.33333333333333333333E-3, + 8.33333333333333333333E-2 + }; + + double z; + if (s < 1.0e17) { + z = 1.0 / (s * s); + return z * internal::ppolevl::run(z, A); + } + else return 0.0; + } +}; + +template +struct digamma_impl { + EIGEN_DEVICE_FUNC + static Scalar run(Scalar x) { + /* + * + * Psi (digamma) function (modified for Eigen) + * + * + * SYNOPSIS: + * + * double x, y, psi(); + * + * y = psi( x ); + * + * + * DESCRIPTION: + * + * d - + * psi(x) = -- ln | (x) + * dx + * + * is the logarithmic derivative of the gamma function. + * For integer x, + * n-1 + * - + * psi(n) = -EUL + > 1/k. + * - + * k=1 + * + * If x is negative, it is transformed to a positive argument by the + * reflection formula psi(1-x) = psi(x) + pi cot(pi x). + * For general positive x, the argument is made greater than 10 + * using the recurrence psi(x+1) = psi(x) + 1/x. + * Then the following asymptotic expansion is applied: + * + * inf. B + * - 2k + * psi(x) = log(x) - 1/2x - > ------- + * - 2k + * k=1 2k x + * + * where the B2k are Bernoulli numbers. + * + * ACCURACY (float): + * Relative error (except absolute when |psi| < 1): + * arithmetic domain # trials peak rms + * IEEE 0,30 30000 1.3e-15 1.4e-16 + * IEEE -30,0 40000 1.5e-15 2.2e-16 + * + * ACCURACY (double): + * Absolute error, relative when |psi| > 1 : + * arithmetic domain # trials peak rms + * IEEE -33,0 30000 8.2e-7 1.2e-7 + * IEEE 0,33 100000 7.3e-7 7.7e-8 + * + * ERROR MESSAGES: + * message condition value returned + * psi singularity x integer <=0 INFINITY + */ + + Scalar p, q, nz, s, w, y; + bool negative = false; + + const Scalar nan = NumTraits::quiet_NaN(); + const Scalar m_pi = Scalar(EIGEN_PI); + + const Scalar zero = Scalar(0); + const Scalar one = Scalar(1); + const Scalar half = Scalar(0.5); + nz = zero; + + if (x <= zero) { + negative = true; + q = x; + p = numext::floor(q); + if (p == q) { + return nan; + } + /* Remove the zeros of tan(m_pi x) + * by subtracting the nearest integer from x + */ + nz = q - p; + if (nz != half) { + if (nz > half) { + p += one; + nz = q - p; + } + nz = m_pi / numext::tan(m_pi * nz); + } + else { + nz = zero; + } + x = one - x; + } + + /* use the recurrence psi(x+1) = psi(x) + 1/x. */ + s = x; + w = zero; + while (s < Scalar(10)) { + w += one / s; + s += one; + } + + y = digamma_impl_maybe_poly::run(s); + + y = numext::log(s) - (half / s) - y - w; + + return (negative) ? y - nz : y; + } +}; + +/**************************************************************************** + * Implementation of erf, requires C++11/C99 * + ****************************************************************************/ + +/** \internal \returns the error function of \a a (coeff-wise) + Doesn't do anything fancy, just a 13/8-degree rational interpolant which + is accurate up to a couple of ulp in the range [-4, 4], outside of which + fl(erf(x)) = +/-1. + + This implementation works on both scalars and Ts. +*/ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T generic_fast_erf_float(const T& a_x) { + // Clamp the inputs to the range [-4, 4] since anything outside + // this range is +/-1.0f in single-precision. + const T plus_4 = pset1(4.f); + const T minus_4 = pset1(-4.f); + const T x = pmax(pmin(a_x, plus_4), minus_4); + // The monomial coefficients of the numerator polynomial (odd). + const T alpha_1 = pset1(-1.60960333262415e-02f); + const T alpha_3 = pset1(-2.95459980854025e-03f); + const T alpha_5 = pset1(-7.34990630326855e-04f); + const T alpha_7 = pset1(-5.69250639462346e-05f); + const T alpha_9 = pset1(-2.10102402082508e-06f); + const T alpha_11 = pset1(2.77068142495902e-08f); + const T alpha_13 = pset1(-2.72614225801306e-10f); + + // The monomial coefficients of the denominator polynomial (even). + const T beta_0 = pset1(-1.42647390514189e-02f); + const T beta_2 = pset1(-7.37332916720468e-03f); + const T beta_4 = pset1(-1.68282697438203e-03f); + const T beta_6 = pset1(-2.13374055278905e-04f); + const T beta_8 = pset1(-1.45660718464996e-05f); + + // Since the polynomials are odd/even, we need x^2. + const T x2 = pmul(x, x); + + // Evaluate the numerator polynomial p. + T p = pmadd(x2, alpha_13, alpha_11); + p = pmadd(x2, p, alpha_9); + p = pmadd(x2, p, alpha_7); + p = pmadd(x2, p, alpha_5); + p = pmadd(x2, p, alpha_3); + p = pmadd(x2, p, alpha_1); + p = pmul(x, p); + + // Evaluate the denominator polynomial p. + T q = pmadd(x2, beta_8, beta_6); + q = pmadd(x2, q, beta_4); + q = pmadd(x2, q, beta_2); + q = pmadd(x2, q, beta_0); + + // Divide the numerator by the denominator. + return pdiv(p, q); +} + +template +struct erf_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE T run(const T& x) { + return generic_fast_erf_float(x); + } +}; + +template +struct erf_retval { + typedef Scalar type; +}; + +#if EIGEN_HAS_C99_MATH +template <> +struct erf_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE float run(float x) { +#if defined(SYCL_DEVICE_ONLY) + return cl::sycl::erf(x); +#else + return generic_fast_erf_float(x); +#endif + } +}; + +template <> +struct erf_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE double run(double x) { +#if defined(SYCL_DEVICE_ONLY) + return cl::sycl::erf(x); +#else + return ::erf(x); +#endif + } +}; +#endif // EIGEN_HAS_C99_MATH + +/*************************************************************************** +* Implementation of erfc, requires C++11/C99 * +****************************************************************************/ + +template +struct erfc_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(const Scalar) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return Scalar(0); + } +}; + +template +struct erfc_retval { + typedef Scalar type; +}; + +#if EIGEN_HAS_C99_MATH +template <> +struct erfc_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE float run(const float x) { +#if defined(SYCL_DEVICE_ONLY) + return cl::sycl::erfc(x); +#else + return ::erfcf(x); +#endif + } +}; + +template <> +struct erfc_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE double run(const double x) { +#if defined(SYCL_DEVICE_ONLY) + return cl::sycl::erfc(x); +#else + return ::erfc(x); +#endif + } +}; +#endif // EIGEN_HAS_C99_MATH + + +/*************************************************************************** +* Implementation of ndtri. * +****************************************************************************/ + +/* Inverse of Normal distribution function (modified for Eigen). + * + * + * SYNOPSIS: + * + * double x, y, ndtri(); + * + * x = ndtri( y ); + * + * + * + * DESCRIPTION: + * + * Returns the argument, x, for which the area under the + * Gaussian probability density function (integrated from + * minus infinity to x) is equal to y. + * + * + * For small arguments 0 < y < exp(-2), the program computes + * z = sqrt( -2.0 * log(y) ); then the approximation is + * x = z - log(z)/z - (1/z) P(1/z) / Q(1/z). + * There are two rational functions P/Q, one for 0 < y < exp(-32) + * and the other for y up to exp(-2). For larger arguments, + * w = y - 0.5, and x/sqrt(2pi) = w + w**3 R(w**2)/S(w**2)). + * + * + * ACCURACY: + * + * Relative error: + * arithmetic domain # trials peak rms + * DEC 0.125, 1 5500 9.5e-17 2.1e-17 + * DEC 6e-39, 0.135 3500 5.7e-17 1.3e-17 + * IEEE 0.125, 1 20000 7.2e-16 1.3e-16 + * IEEE 3e-308, 0.135 50000 4.6e-16 9.8e-17 + * + * + * ERROR MESSAGES: + * + * message condition value returned + * ndtri domain x <= 0 -MAXNUM + * ndtri domain x >= 1 MAXNUM + * + */ + /* + Cephes Math Library Release 2.2: June, 1992 + Copyright 1985, 1987, 1992 by Stephen L. Moshier + Direct inquiries to 30 Frost Street, Cambridge, MA 02140 + */ + + +// TODO: Add a cheaper approximation for float. + + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T flipsign( + const T& should_flipsign, const T& x) { + typedef typename unpacket_traits::type Scalar; + const T sign_mask = pset1(Scalar(-0.0)); + T sign_bit = pand(should_flipsign, sign_mask); + return pxor(sign_bit, x); +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double flipsign( + const double& should_flipsign, const double& x) { + return should_flipsign == 0 ? x : -x; +} + +template<> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float flipsign( + const float& should_flipsign, const float& x) { + return should_flipsign == 0 ? x : -x; +} + +// We split this computation in to two so that in the scalar path +// only one branch is evaluated (due to our template specialization of pselect +// being an if statement.) + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T generic_ndtri_gt_exp_neg_two(const T& b) { + const ScalarType p0[] = { + ScalarType(-5.99633501014107895267e1), + ScalarType(9.80010754185999661536e1), + ScalarType(-5.66762857469070293439e1), + ScalarType(1.39312609387279679503e1), + ScalarType(-1.23916583867381258016e0) + }; + const ScalarType q0[] = { + ScalarType(1.0), + ScalarType(1.95448858338141759834e0), + ScalarType(4.67627912898881538453e0), + ScalarType(8.63602421390890590575e1), + ScalarType(-2.25462687854119370527e2), + ScalarType(2.00260212380060660359e2), + ScalarType(-8.20372256168333339912e1), + ScalarType(1.59056225126211695515e1), + ScalarType(-1.18331621121330003142e0) + }; + const T sqrt2pi = pset1(ScalarType(2.50662827463100050242e0)); + const T half = pset1(ScalarType(0.5)); + T c, c2, ndtri_gt_exp_neg_two; + + c = psub(b, half); + c2 = pmul(c, c); + ndtri_gt_exp_neg_two = pmadd(c, pmul( + c2, pdiv( + internal::ppolevl::run(c2, p0), + internal::ppolevl::run(c2, q0))), c); + return pmul(ndtri_gt_exp_neg_two, sqrt2pi); +} + +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T generic_ndtri_lt_exp_neg_two( + const T& b, const T& should_flipsign) { + /* Approximation for interval z = sqrt(-2 log a ) between 2 and 8 + * i.e., a between exp(-2) = .135 and exp(-32) = 1.27e-14. + */ + const ScalarType p1[] = { + ScalarType(4.05544892305962419923e0), + ScalarType(3.15251094599893866154e1), + ScalarType(5.71628192246421288162e1), + ScalarType(4.40805073893200834700e1), + ScalarType(1.46849561928858024014e1), + ScalarType(2.18663306850790267539e0), + ScalarType(-1.40256079171354495875e-1), + ScalarType(-3.50424626827848203418e-2), + ScalarType(-8.57456785154685413611e-4) + }; + const ScalarType q1[] = { + ScalarType(1.0), + ScalarType(1.57799883256466749731e1), + ScalarType(4.53907635128879210584e1), + ScalarType(4.13172038254672030440e1), + ScalarType(1.50425385692907503408e1), + ScalarType(2.50464946208309415979e0), + ScalarType(-1.42182922854787788574e-1), + ScalarType(-3.80806407691578277194e-2), + ScalarType(-9.33259480895457427372e-4) + }; + /* Approximation for interval z = sqrt(-2 log a ) between 8 and 64 + * i.e., a between exp(-32) = 1.27e-14 and exp(-2048) = 3.67e-890. + */ + const ScalarType p2[] = { + ScalarType(3.23774891776946035970e0), + ScalarType(6.91522889068984211695e0), + ScalarType(3.93881025292474443415e0), + ScalarType(1.33303460815807542389e0), + ScalarType(2.01485389549179081538e-1), + ScalarType(1.23716634817820021358e-2), + ScalarType(3.01581553508235416007e-4), + ScalarType(2.65806974686737550832e-6), + ScalarType(6.23974539184983293730e-9) + }; + const ScalarType q2[] = { + ScalarType(1.0), + ScalarType(6.02427039364742014255e0), + ScalarType(3.67983563856160859403e0), + ScalarType(1.37702099489081330271e0), + ScalarType(2.16236993594496635890e-1), + ScalarType(1.34204006088543189037e-2), + ScalarType(3.28014464682127739104e-4), + ScalarType(2.89247864745380683936e-6), + ScalarType(6.79019408009981274425e-9) + }; + const T eight = pset1(ScalarType(8.0)); + const T one = pset1(ScalarType(1)); + const T neg_two = pset1(ScalarType(-2)); + T x, x0, x1, z; + + x = psqrt(pmul(neg_two, plog(b))); + x0 = psub(x, pdiv(plog(x), x)); + z = pdiv(one, x); + x1 = pmul( + z, pselect( + pcmp_lt(x, eight), + pdiv(internal::ppolevl::run(z, p1), + internal::ppolevl::run(z, q1)), + pdiv(internal::ppolevl::run(z, p2), + internal::ppolevl::run(z, q2)))); + return flipsign(should_flipsign, psub(x0, x1)); +} + +template +EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE +T generic_ndtri(const T& a) { + const T maxnum = pset1(NumTraits::infinity()); + const T neg_maxnum = pset1(-NumTraits::infinity()); + + const T zero = pset1(ScalarType(0)); + const T one = pset1(ScalarType(1)); + // exp(-2) + const T exp_neg_two = pset1(ScalarType(0.13533528323661269189)); + T b, ndtri, should_flipsign; + + should_flipsign = pcmp_le(a, psub(one, exp_neg_two)); + b = pselect(should_flipsign, a, psub(one, a)); + + ndtri = pselect( + pcmp_lt(exp_neg_two, b), + generic_ndtri_gt_exp_neg_two(b), + generic_ndtri_lt_exp_neg_two(b, should_flipsign)); + + return pselect( + pcmp_le(a, zero), neg_maxnum, + pselect(pcmp_le(one, a), maxnum, ndtri)); +} + +template +struct ndtri_retval { + typedef Scalar type; +}; + +#if !EIGEN_HAS_C99_MATH + +template +struct ndtri_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(const Scalar) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return Scalar(0); + } +}; + +# else + +template +struct ndtri_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(const Scalar x) { + return generic_ndtri(x); + } +}; + +#endif // EIGEN_HAS_C99_MATH + + +/************************************************************************************************************** + * Implementation of igammac (complemented incomplete gamma integral), based on Cephes but requires C++11/C99 * + **************************************************************************************************************/ + +template +struct igammac_retval { + typedef Scalar type; +}; + +// NOTE: cephes_helper is also used to implement zeta +template +struct cephes_helper { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar machep() { assert(false && "machep not supported for this type"); return 0.0; } + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar big() { assert(false && "big not supported for this type"); return 0.0; } + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar biginv() { assert(false && "biginv not supported for this type"); return 0.0; } +}; + +template <> +struct cephes_helper { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE float machep() { + return NumTraits::epsilon() / 2; // 1.0 - machep == 1.0 + } + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE float big() { + // use epsneg (1.0 - epsneg == 1.0) + return 1.0f / (NumTraits::epsilon() / 2); + } + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE float biginv() { + // epsneg + return machep(); + } +}; + +template <> +struct cephes_helper { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE double machep() { + return NumTraits::epsilon() / 2; // 1.0 - machep == 1.0 + } + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE double big() { + return 1.0 / NumTraits::epsilon(); + } + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE double biginv() { + // inverse of eps + return NumTraits::epsilon(); + } +}; + +enum IgammaComputationMode { VALUE, DERIVATIVE, SAMPLE_DERIVATIVE }; + +template +EIGEN_DEVICE_FUNC +static EIGEN_STRONG_INLINE Scalar main_igamma_term(Scalar a, Scalar x) { + /* Compute x**a * exp(-x) / gamma(a) */ + Scalar logax = a * numext::log(x) - x - lgamma_impl::run(a); + if (logax < -numext::log(NumTraits::highest()) || + // Assuming x and a aren't Nan. + (numext::isnan)(logax)) { + return Scalar(0); + } + return numext::exp(logax); +} + +template +EIGEN_DEVICE_FUNC +int igamma_num_iterations() { + /* Returns the maximum number of internal iterations for igamma computation. + */ + if (mode == VALUE) { + return 2000; + } + + if (internal::is_same::value) { + return 200; + } else if (internal::is_same::value) { + return 500; + } else { + return 2000; + } +} + +template +struct igammac_cf_impl { + /* Computes igamc(a, x) or derivative (depending on the mode) + * using the continued fraction expansion of the complementary + * incomplete Gamma function. + * + * Preconditions: + * a > 0 + * x >= 1 + * x >= a + */ + EIGEN_DEVICE_FUNC + static Scalar run(Scalar a, Scalar x) { + const Scalar zero = 0; + const Scalar one = 1; + const Scalar two = 2; + const Scalar machep = cephes_helper::machep(); + const Scalar big = cephes_helper::big(); + const Scalar biginv = cephes_helper::biginv(); + + if ((numext::isinf)(x)) { + return zero; + } + + Scalar ax = main_igamma_term(a, x); + // This is independent of mode. If this value is zero, + // then the function value is zero. If the function value is zero, + // then we are in a neighborhood where the function value evalutes to zero, + // so the derivative is zero. + if (ax == zero) { + return zero; + } + + // continued fraction + Scalar y = one - a; + Scalar z = x + y + one; + Scalar c = zero; + Scalar pkm2 = one; + Scalar qkm2 = x; + Scalar pkm1 = x + one; + Scalar qkm1 = z * x; + Scalar ans = pkm1 / qkm1; + + Scalar dpkm2_da = zero; + Scalar dqkm2_da = zero; + Scalar dpkm1_da = zero; + Scalar dqkm1_da = -x; + Scalar dans_da = (dpkm1_da - ans * dqkm1_da) / qkm1; + + for (int i = 0; i < igamma_num_iterations(); i++) { + c += one; + y += one; + z += two; + + Scalar yc = y * c; + Scalar pk = pkm1 * z - pkm2 * yc; + Scalar qk = qkm1 * z - qkm2 * yc; + + Scalar dpk_da = dpkm1_da * z - pkm1 - dpkm2_da * yc + pkm2 * c; + Scalar dqk_da = dqkm1_da * z - qkm1 - dqkm2_da * yc + qkm2 * c; + + if (qk != zero) { + Scalar ans_prev = ans; + ans = pk / qk; + + Scalar dans_da_prev = dans_da; + dans_da = (dpk_da - ans * dqk_da) / qk; + + if (mode == VALUE) { + if (numext::abs(ans_prev - ans) <= machep * numext::abs(ans)) { + break; + } + } else { + if (numext::abs(dans_da - dans_da_prev) <= machep) { + break; + } + } + } + + pkm2 = pkm1; + pkm1 = pk; + qkm2 = qkm1; + qkm1 = qk; + + dpkm2_da = dpkm1_da; + dpkm1_da = dpk_da; + dqkm2_da = dqkm1_da; + dqkm1_da = dqk_da; + + if (numext::abs(pk) > big) { + pkm2 *= biginv; + pkm1 *= biginv; + qkm2 *= biginv; + qkm1 *= biginv; + + dpkm2_da *= biginv; + dpkm1_da *= biginv; + dqkm2_da *= biginv; + dqkm1_da *= biginv; + } + } + + /* Compute x**a * exp(-x) / gamma(a) */ + Scalar dlogax_da = numext::log(x) - digamma_impl::run(a); + Scalar dax_da = ax * dlogax_da; + + switch (mode) { + case VALUE: + return ans * ax; + case DERIVATIVE: + return ans * dax_da + dans_da * ax; + case SAMPLE_DERIVATIVE: + default: // this is needed to suppress clang warning + return -(dans_da + ans * dlogax_da) * x; + } + } +}; + +template +struct igamma_series_impl { + /* Computes igam(a, x) or its derivative (depending on the mode) + * using the series expansion of the incomplete Gamma function. + * + * Preconditions: + * x > 0 + * a > 0 + * !(x > 1 && x > a) + */ + EIGEN_DEVICE_FUNC + static Scalar run(Scalar a, Scalar x) { + const Scalar zero = 0; + const Scalar one = 1; + const Scalar machep = cephes_helper::machep(); + + Scalar ax = main_igamma_term(a, x); + + // This is independent of mode. If this value is zero, + // then the function value is zero. If the function value is zero, + // then we are in a neighborhood where the function value evalutes to zero, + // so the derivative is zero. + if (ax == zero) { + return zero; + } + + ax /= a; + + /* power series */ + Scalar r = a; + Scalar c = one; + Scalar ans = one; + + Scalar dc_da = zero; + Scalar dans_da = zero; + + for (int i = 0; i < igamma_num_iterations(); i++) { + r += one; + Scalar term = x / r; + Scalar dterm_da = -x / (r * r); + dc_da = term * dc_da + dterm_da * c; + dans_da += dc_da; + c *= term; + ans += c; + + if (mode == VALUE) { + if (c <= machep * ans) { + break; + } + } else { + if (numext::abs(dc_da) <= machep * numext::abs(dans_da)) { + break; + } + } + } + + Scalar dlogax_da = numext::log(x) - digamma_impl::run(a + one); + Scalar dax_da = ax * dlogax_da; + + switch (mode) { + case VALUE: + return ans * ax; + case DERIVATIVE: + return ans * dax_da + dans_da * ax; + case SAMPLE_DERIVATIVE: + default: // this is needed to suppress clang warning + return -(dans_da + ans * dlogax_da) * x / a; + } + } +}; + +#if !EIGEN_HAS_C99_MATH + +template +struct igammac_impl { + EIGEN_DEVICE_FUNC + static Scalar run(Scalar a, Scalar x) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return Scalar(0); + } +}; + +#else + +template +struct igammac_impl { + EIGEN_DEVICE_FUNC + static Scalar run(Scalar a, Scalar x) { + /* igamc() + * + * Incomplete gamma integral (modified for Eigen) + * + * + * + * SYNOPSIS: + * + * double a, x, y, igamc(); + * + * y = igamc( a, x ); + * + * DESCRIPTION: + * + * The function is defined by + * + * + * igamc(a,x) = 1 - igam(a,x) + * + * inf. + * - + * 1 | | -t a-1 + * = ----- | e t dt. + * - | | + * | (a) - + * x + * + * + * In this implementation both arguments must be positive. + * The integral is evaluated by either a power series or + * continued fraction expansion, depending on the relative + * values of a and x. + * + * ACCURACY (float): + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0,30 30000 7.8e-6 5.9e-7 + * + * + * ACCURACY (double): + * + * Tested at random a, x. + * a x Relative error: + * arithmetic domain domain # trials peak rms + * IEEE 0.5,100 0,100 200000 1.9e-14 1.7e-15 + * IEEE 0.01,0.5 0,100 200000 1.4e-13 1.6e-15 + * + */ + /* + Cephes Math Library Release 2.2: June, 1992 + Copyright 1985, 1987, 1992 by Stephen L. Moshier + Direct inquiries to 30 Frost Street, Cambridge, MA 02140 + */ + const Scalar zero = 0; + const Scalar one = 1; + const Scalar nan = NumTraits::quiet_NaN(); + + if ((x < zero) || (a <= zero)) { + // domain error + return nan; + } + + if ((numext::isnan)(a) || (numext::isnan)(x)) { // propagate nans + return nan; + } + + if ((x < one) || (x < a)) { + return (one - igamma_series_impl::run(a, x)); + } + + return igammac_cf_impl::run(a, x); + } +}; + +#endif // EIGEN_HAS_C99_MATH + +/************************************************************************************************ + * Implementation of igamma (incomplete gamma integral), based on Cephes but requires C++11/C99 * + ************************************************************************************************/ + +#if !EIGEN_HAS_C99_MATH + +template +struct igamma_generic_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar x) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return Scalar(0); + } +}; + +#else + +template +struct igamma_generic_impl { + EIGEN_DEVICE_FUNC + static Scalar run(Scalar a, Scalar x) { + /* Depending on the mode, returns + * - VALUE: incomplete Gamma function igamma(a, x) + * - DERIVATIVE: derivative of incomplete Gamma function d/da igamma(a, x) + * - SAMPLE_DERIVATIVE: implicit derivative of a Gamma random variable + * x ~ Gamma(x | a, 1), dx/da = -1 / Gamma(x | a, 1) * d igamma(a, x) / dx + * + * Derivatives are implemented by forward-mode differentiation. + */ + const Scalar zero = 0; + const Scalar one = 1; + const Scalar nan = NumTraits::quiet_NaN(); + + if (x == zero) return zero; + + if ((x < zero) || (a <= zero)) { // domain error + return nan; + } + + if ((numext::isnan)(a) || (numext::isnan)(x)) { // propagate nans + return nan; + } + + if ((x > one) && (x > a)) { + Scalar ret = igammac_cf_impl::run(a, x); + if (mode == VALUE) { + return one - ret; + } else { + return -ret; + } + } + + return igamma_series_impl::run(a, x); + } +}; + +#endif // EIGEN_HAS_C99_MATH + +template +struct igamma_retval { + typedef Scalar type; +}; + +template +struct igamma_impl : igamma_generic_impl { + /* igam() + * Incomplete gamma integral. + * + * The CDF of Gamma(a, 1) random variable at the point x. + * + * Accuracy estimation. For each a in [10^-2, 10^-1...10^3] we sample + * 50 Gamma random variables x ~ Gamma(x | a, 1), a total of 300 points. + * The ground truth is computed by mpmath. Mean absolute error: + * float: 1.26713e-05 + * double: 2.33606e-12 + * + * Cephes documentation below. + * + * SYNOPSIS: + * + * double a, x, y, igam(); + * + * y = igam( a, x ); + * + * DESCRIPTION: + * + * The function is defined by + * + * x + * - + * 1 | | -t a-1 + * igam(a,x) = ----- | e t dt. + * - | | + * | (a) - + * 0 + * + * + * In this implementation both arguments must be positive. + * The integral is evaluated by either a power series or + * continued fraction expansion, depending on the relative + * values of a and x. + * + * ACCURACY (double): + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0,30 200000 3.6e-14 2.9e-15 + * IEEE 0,100 300000 9.9e-14 1.5e-14 + * + * + * ACCURACY (float): + * + * Relative error: + * arithmetic domain # trials peak rms + * IEEE 0,30 20000 7.8e-6 5.9e-7 + * + */ + /* + Cephes Math Library Release 2.2: June, 1992 + Copyright 1985, 1987, 1992 by Stephen L. Moshier + Direct inquiries to 30 Frost Street, Cambridge, MA 02140 + */ + + /* left tail of incomplete gamma function: + * + * inf. k + * a -x - x + * x e > ---------- + * - - + * k=0 | (a+k+1) + * + */ +}; + +template +struct igamma_der_a_retval : igamma_retval {}; + +template +struct igamma_der_a_impl : igamma_generic_impl { + /* Derivative of the incomplete Gamma function with respect to a. + * + * Computes d/da igamma(a, x) by forward differentiation of the igamma code. + * + * Accuracy estimation. For each a in [10^-2, 10^-1...10^3] we sample + * 50 Gamma random variables x ~ Gamma(x | a, 1), a total of 300 points. + * The ground truth is computed by mpmath. Mean absolute error: + * float: 6.17992e-07 + * double: 4.60453e-12 + * + * Reference: + * R. Moore. "Algorithm AS 187: Derivatives of the incomplete gamma + * integral". Journal of the Royal Statistical Society. 1982 + */ +}; + +template +struct gamma_sample_der_alpha_retval : igamma_retval {}; + +template +struct gamma_sample_der_alpha_impl + : igamma_generic_impl { + /* Derivative of a Gamma random variable sample with respect to alpha. + * + * Consider a sample of a Gamma random variable with the concentration + * parameter alpha: sample ~ Gamma(alpha, 1). The reparameterization + * derivative that we want to compute is dsample / dalpha = + * d igammainv(alpha, u) / dalpha, where u = igamma(alpha, sample). + * However, this formula is numerically unstable and expensive, so instead + * we use implicit differentiation: + * + * igamma(alpha, sample) = u, where u ~ Uniform(0, 1). + * Apply d / dalpha to both sides: + * d igamma(alpha, sample) / dalpha + * + d igamma(alpha, sample) / dsample * dsample/dalpha = 0 + * d igamma(alpha, sample) / dalpha + * + Gamma(sample | alpha, 1) dsample / dalpha = 0 + * dsample/dalpha = - (d igamma(alpha, sample) / dalpha) + * / Gamma(sample | alpha, 1) + * + * Here Gamma(sample | alpha, 1) is the PDF of the Gamma distribution + * (note that the derivative of the CDF w.r.t. sample is the PDF). + * See the reference below for more details. + * + * The derivative of igamma(alpha, sample) is computed by forward + * differentiation of the igamma code. Division by the Gamma PDF is performed + * in the same code, increasing the accuracy and speed due to cancellation + * of some terms. + * + * Accuracy estimation. For each alpha in [10^-2, 10^-1...10^3] we sample + * 50 Gamma random variables sample ~ Gamma(sample | alpha, 1), a total of 300 + * points. The ground truth is computed by mpmath. Mean absolute error: + * float: 2.1686e-06 + * double: 1.4774e-12 + * + * Reference: + * M. Figurnov, S. Mohamed, A. Mnih "Implicit Reparameterization Gradients". + * 2018 + */ +}; + +/***************************************************************************** + * Implementation of Riemann zeta function of two arguments, based on Cephes * + *****************************************************************************/ + +template +struct zeta_retval { + typedef Scalar type; +}; + +template +struct zeta_impl_series { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(const Scalar) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return Scalar(0); + } +}; + +template <> +struct zeta_impl_series { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE bool run(float& a, float& b, float& s, const float x, const float machep) { + int i = 0; + while(i < 9) + { + i += 1; + a += 1.0f; + b = numext::pow( a, -x ); + s += b; + if( numext::abs(b/s) < machep ) + return true; + } + + //Return whether we are done + return false; + } +}; + +template <> +struct zeta_impl_series { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE bool run(double& a, double& b, double& s, const double x, const double machep) { + int i = 0; + while( (i < 9) || (a <= 9.0) ) + { + i += 1; + a += 1.0; + b = numext::pow( a, -x ); + s += b; + if( numext::abs(b/s) < machep ) + return true; + } + + //Return whether we are done + return false; + } +}; + +template +struct zeta_impl { + EIGEN_DEVICE_FUNC + static Scalar run(Scalar x, Scalar q) { + /* zeta.c + * + * Riemann zeta function of two arguments + * + * + * + * SYNOPSIS: + * + * double x, q, y, zeta(); + * + * y = zeta( x, q ); + * + * + * + * DESCRIPTION: + * + * + * + * inf. + * - -x + * zeta(x,q) = > (k+q) + * - + * k=0 + * + * where x > 1 and q is not a negative integer or zero. + * The Euler-Maclaurin summation formula is used to obtain + * the expansion + * + * n + * - -x + * zeta(x,q) = > (k+q) + * - + * k=1 + * + * 1-x inf. B x(x+1)...(x+2j) + * (n+q) 1 - 2j + * + --------- - ------- + > -------------------- + * x-1 x - x+2j+1 + * 2(n+q) j=1 (2j)! (n+q) + * + * where the B2j are Bernoulli numbers. Note that (see zetac.c) + * zeta(x,1) = zetac(x) + 1. + * + * + * + * ACCURACY: + * + * Relative error for single precision: + * arithmetic domain # trials peak rms + * IEEE 0,25 10000 6.9e-7 1.0e-7 + * + * Large arguments may produce underflow in powf(), in which + * case the results are inaccurate. + * + * REFERENCE: + * + * Gradshteyn, I. S., and I. M. Ryzhik, Tables of Integrals, + * Series, and Products, p. 1073; Academic Press, 1980. + * + */ + + int i; + Scalar p, r, a, b, k, s, t, w; + + const Scalar A[] = { + Scalar(12.0), + Scalar(-720.0), + Scalar(30240.0), + Scalar(-1209600.0), + Scalar(47900160.0), + Scalar(-1.8924375803183791606e9), /*1.307674368e12/691*/ + Scalar(7.47242496e10), + Scalar(-2.950130727918164224e12), /*1.067062284288e16/3617*/ + Scalar(1.1646782814350067249e14), /*5.109094217170944e18/43867*/ + Scalar(-4.5979787224074726105e15), /*8.028576626982912e20/174611*/ + Scalar(1.8152105401943546773e17), /*1.5511210043330985984e23/854513*/ + Scalar(-7.1661652561756670113e18) /*1.6938241367317436694528e27/236364091*/ + }; + + const Scalar maxnum = NumTraits::infinity(); + const Scalar zero = 0.0, half = 0.5, one = 1.0; + const Scalar machep = cephes_helper::machep(); + const Scalar nan = NumTraits::quiet_NaN(); + + if( x == one ) + return maxnum; + + if( x < one ) + { + return nan; + } + + if( q <= zero ) + { + if(q == numext::floor(q)) + { + if (x == numext::floor(x) && long(x) % 2 == 0) { + return maxnum; + } + else { + return nan; + } + } + p = x; + r = numext::floor(p); + if (p != r) + return nan; + } + + /* Permit negative q but continue sum until n+q > +9 . + * This case should be handled by a reflection formula. + * If q<0 and x is an integer, there is a relation to + * the polygamma function. + */ + s = numext::pow( q, -x ); + a = q; + b = zero; + // Run the summation in a helper function that is specific to the floating precision + if (zeta_impl_series::run(a, b, s, x, machep)) { + return s; + } + + w = a; + s += b*w/(x-one); + s -= half * b; + a = one; + k = zero; + for( i=0; i<12; i++ ) + { + a *= x + k; + b /= w; + t = a*b/A[i]; + s = s + t; + t = numext::abs(t/s); + if( t < machep ) { + break; + } + k += one; + a *= x + k; + b /= w; + k += one; + } + return s; + } +}; + +/**************************************************************************** + * Implementation of polygamma function, requires C++11/C99 * + ****************************************************************************/ + +template +struct polygamma_retval { + typedef Scalar type; +}; + +#if !EIGEN_HAS_C99_MATH + +template +struct polygamma_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(Scalar n, Scalar x) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return Scalar(0); + } +}; + +#else + +template +struct polygamma_impl { + EIGEN_DEVICE_FUNC + static Scalar run(Scalar n, Scalar x) { + Scalar zero = 0.0, one = 1.0; + Scalar nplus = n + one; + const Scalar nan = NumTraits::quiet_NaN(); + + // Check that n is a non-negative integer + if (numext::floor(n) != n || n < zero) { + return nan; + } + // Just return the digamma function for n = 0 + else if (n == zero) { + return digamma_impl::run(x); + } + // Use the same implementation as scipy + else { + Scalar factorial = numext::exp(lgamma_impl::run(nplus)); + return numext::pow(-one, nplus) * factorial * zeta_impl::run(nplus, x); + } + } +}; + +#endif // EIGEN_HAS_C99_MATH + +/************************************************************************************************ + * Implementation of betainc (incomplete beta integral), based on Cephes but requires C++11/C99 * + ************************************************************************************************/ + +template +struct betainc_retval { + typedef Scalar type; +}; + +#if !EIGEN_HAS_C99_MATH + +template +struct betainc_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar b, Scalar x) { + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return Scalar(0); + } +}; + +#else + +template +struct betainc_impl { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(Scalar, Scalar, Scalar) { + /* betaincf.c + * + * Incomplete beta integral + * + * + * SYNOPSIS: + * + * float a, b, x, y, betaincf(); + * + * y = betaincf( a, b, x ); + * + * + * DESCRIPTION: + * + * Returns incomplete beta integral of the arguments, evaluated + * from zero to x. The function is defined as + * + * x + * - - + * | (a+b) | | a-1 b-1 + * ----------- | t (1-t) dt. + * - - | | + * | (a) | (b) - + * 0 + * + * The domain of definition is 0 <= x <= 1. In this + * implementation a and b are restricted to positive values. + * The integral from x to 1 may be obtained by the symmetry + * relation + * + * 1 - betainc( a, b, x ) = betainc( b, a, 1-x ). + * + * The integral is evaluated by a continued fraction expansion. + * If a < 1, the function calls itself recursively after a + * transformation to increase a to a+1. + * + * ACCURACY (float): + * + * Tested at random points (a,b,x) with a and b in the indicated + * interval and x between 0 and 1. + * + * arithmetic domain # trials peak rms + * Relative error: + * IEEE 0,30 10000 3.7e-5 5.1e-6 + * IEEE 0,100 10000 1.7e-4 2.5e-5 + * The useful domain for relative error is limited by underflow + * of the single precision exponential function. + * Absolute error: + * IEEE 0,30 100000 2.2e-5 9.6e-7 + * IEEE 0,100 10000 6.5e-5 3.7e-6 + * + * Larger errors may occur for extreme ratios of a and b. + * + * ACCURACY (double): + * arithmetic domain # trials peak rms + * IEEE 0,5 10000 6.9e-15 4.5e-16 + * IEEE 0,85 250000 2.2e-13 1.7e-14 + * IEEE 0,1000 30000 5.3e-12 6.3e-13 + * IEEE 0,10000 250000 9.3e-11 7.1e-12 + * IEEE 0,100000 10000 8.7e-10 4.8e-11 + * Outputs smaller than the IEEE gradual underflow threshold + * were excluded from these statistics. + * + * ERROR MESSAGES: + * message condition value returned + * incbet domain x<0, x>1 nan + * incbet underflow nan + */ + + EIGEN_STATIC_ASSERT((internal::is_same::value == false), + THIS_TYPE_IS_NOT_SUPPORTED); + return Scalar(0); + } +}; + +/* Continued fraction expansion #1 for incomplete beta integral (small_branch = True) + * Continued fraction expansion #2 for incomplete beta integral (small_branch = False) + */ +template +struct incbeta_cfe { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE Scalar run(Scalar a, Scalar b, Scalar x, bool small_branch) { + EIGEN_STATIC_ASSERT((internal::is_same::value || + internal::is_same::value), + THIS_TYPE_IS_NOT_SUPPORTED); + const Scalar big = cephes_helper::big(); + const Scalar machep = cephes_helper::machep(); + const Scalar biginv = cephes_helper::biginv(); + + const Scalar zero = 0; + const Scalar one = 1; + const Scalar two = 2; + + Scalar xk, pk, pkm1, pkm2, qk, qkm1, qkm2; + Scalar k1, k2, k3, k4, k5, k6, k7, k8, k26update; + Scalar ans; + int n; + + const int num_iters = (internal::is_same::value) ? 100 : 300; + const Scalar thresh = + (internal::is_same::value) ? machep : Scalar(3) * machep; + Scalar r = (internal::is_same::value) ? zero : one; + + if (small_branch) { + k1 = a; + k2 = a + b; + k3 = a; + k4 = a + one; + k5 = one; + k6 = b - one; + k7 = k4; + k8 = a + two; + k26update = one; + } else { + k1 = a; + k2 = b - one; + k3 = a; + k4 = a + one; + k5 = one; + k6 = a + b; + k7 = a + one; + k8 = a + two; + k26update = -one; + x = x / (one - x); + } + + pkm2 = zero; + qkm2 = one; + pkm1 = one; + qkm1 = one; + ans = one; + n = 0; + + do { + xk = -(x * k1 * k2) / (k3 * k4); + pk = pkm1 + pkm2 * xk; + qk = qkm1 + qkm2 * xk; + pkm2 = pkm1; + pkm1 = pk; + qkm2 = qkm1; + qkm1 = qk; + + xk = (x * k5 * k6) / (k7 * k8); + pk = pkm1 + pkm2 * xk; + qk = qkm1 + qkm2 * xk; + pkm2 = pkm1; + pkm1 = pk; + qkm2 = qkm1; + qkm1 = qk; + + if (qk != zero) { + r = pk / qk; + if (numext::abs(ans - r) < numext::abs(r) * thresh) { + return r; + } + ans = r; + } + + k1 += one; + k2 += k26update; + k3 += two; + k4 += two; + k5 += one; + k6 -= k26update; + k7 += two; + k8 += two; + + if ((numext::abs(qk) + numext::abs(pk)) > big) { + pkm2 *= biginv; + pkm1 *= biginv; + qkm2 *= biginv; + qkm1 *= biginv; + } + if ((numext::abs(qk) < biginv) || (numext::abs(pk) < biginv)) { + pkm2 *= big; + pkm1 *= big; + qkm2 *= big; + qkm1 *= big; + } + } while (++n < num_iters); + + return ans; + } +}; + +/* Helper functions depending on the Scalar type */ +template +struct betainc_helper {}; + +template <> +struct betainc_helper { + /* Core implementation, assumes a large (> 1.0) */ + EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE float incbsa(float aa, float bb, + float xx) { + float ans, a, b, t, x, onemx; + bool reversed_a_b = false; + + onemx = 1.0f - xx; + + /* see if x is greater than the mean */ + if (xx > (aa / (aa + bb))) { + reversed_a_b = true; + a = bb; + b = aa; + t = xx; + x = onemx; + } else { + a = aa; + b = bb; + t = onemx; + x = xx; + } + + /* Choose expansion for optimal convergence */ + if (b > 10.0f) { + if (numext::abs(b * x / a) < 0.3f) { + t = betainc_helper::incbps(a, b, x); + if (reversed_a_b) t = 1.0f - t; + return t; + } + } + + ans = x * (a + b - 2.0f) / (a - 1.0f); + if (ans < 1.0f) { + ans = incbeta_cfe::run(a, b, x, true /* small_branch */); + t = b * numext::log(t); + } else { + ans = incbeta_cfe::run(a, b, x, false /* small_branch */); + t = (b - 1.0f) * numext::log(t); + } + + t += a * numext::log(x) + lgamma_impl::run(a + b) - + lgamma_impl::run(a) - lgamma_impl::run(b); + t += numext::log(ans / a); + t = numext::exp(t); + + if (reversed_a_b) t = 1.0f - t; + return t; + } + + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE float incbps(float a, float b, float x) { + float t, u, y, s; + const float machep = cephes_helper::machep(); + + y = a * numext::log(x) + (b - 1.0f) * numext::log1p(-x) - numext::log(a); + y -= lgamma_impl::run(a) + lgamma_impl::run(b); + y += lgamma_impl::run(a + b); + + t = x / (1.0f - x); + s = 0.0f; + u = 1.0f; + do { + b -= 1.0f; + if (b == 0.0f) { + break; + } + a += 1.0f; + u *= t * b / a; + s += u; + } while (numext::abs(u) > machep); + + return numext::exp(y) * (1.0f + s); + } +}; + +template <> +struct betainc_impl { + EIGEN_DEVICE_FUNC + static float run(float a, float b, float x) { + const float nan = NumTraits::quiet_NaN(); + float ans, t; + + if (a <= 0.0f) return nan; + if (b <= 0.0f) return nan; + if ((x <= 0.0f) || (x >= 1.0f)) { + if (x == 0.0f) return 0.0f; + if (x == 1.0f) return 1.0f; + // mtherr("betaincf", DOMAIN); + return nan; + } + + /* transformation for small aa */ + if (a <= 1.0f) { + ans = betainc_helper::incbsa(a + 1.0f, b, x); + t = a * numext::log(x) + b * numext::log1p(-x) + + lgamma_impl::run(a + b) - lgamma_impl::run(a + 1.0f) - + lgamma_impl::run(b); + return (ans + numext::exp(t)); + } else { + return betainc_helper::incbsa(a, b, x); + } + } +}; + +template <> +struct betainc_helper { + EIGEN_DEVICE_FUNC + static EIGEN_STRONG_INLINE double incbps(double a, double b, double x) { + const double machep = cephes_helper::machep(); + + double s, t, u, v, n, t1, z, ai; + + ai = 1.0 / a; + u = (1.0 - b) * x; + v = u / (a + 1.0); + t1 = v; + t = u; + n = 2.0; + s = 0.0; + z = machep * ai; + while (numext::abs(v) > z) { + u = (n - b) * x / n; + t *= u; + v = t / (a + n); + s += v; + n += 1.0; + } + s += t1; + s += ai; + + u = a * numext::log(x); + // TODO: gamma() is not directly implemented in Eigen. + /* + if ((a + b) < maxgam && numext::abs(u) < maxlog) { + t = gamma(a + b) / (gamma(a) * gamma(b)); + s = s * t * pow(x, a); + } + */ + t = lgamma_impl::run(a + b) - lgamma_impl::run(a) - + lgamma_impl::run(b) + u + numext::log(s); + return s = numext::exp(t); + } +}; + +template <> +struct betainc_impl { + EIGEN_DEVICE_FUNC + static double run(double aa, double bb, double xx) { + const double nan = NumTraits::quiet_NaN(); + const double machep = cephes_helper::machep(); + // const double maxgam = 171.624376956302725; + + double a, b, t, x, xc, w, y; + bool reversed_a_b = false; + + if (aa <= 0.0 || bb <= 0.0) { + return nan; // goto domerr; + } + + if ((xx <= 0.0) || (xx >= 1.0)) { + if (xx == 0.0) return (0.0); + if (xx == 1.0) return (1.0); + // mtherr("incbet", DOMAIN); + return nan; + } + + if ((bb * xx) <= 1.0 && xx <= 0.95) { + return betainc_helper::incbps(aa, bb, xx); + } + + w = 1.0 - xx; + + /* Reverse a and b if x is greater than the mean. */ + if (xx > (aa / (aa + bb))) { + reversed_a_b = true; + a = bb; + b = aa; + xc = xx; + x = w; + } else { + a = aa; + b = bb; + xc = w; + x = xx; + } + + if (reversed_a_b && (b * x) <= 1.0 && x <= 0.95) { + t = betainc_helper::incbps(a, b, x); + if (t <= machep) { + t = 1.0 - machep; + } else { + t = 1.0 - t; + } + return t; + } + + /* Choose expansion for better convergence. */ + y = x * (a + b - 2.0) - (a - 1.0); + if (y < 0.0) { + w = incbeta_cfe::run(a, b, x, true /* small_branch */); + } else { + w = incbeta_cfe::run(a, b, x, false /* small_branch */) / xc; + } + + /* Multiply w by the factor + a b _ _ _ + x (1-x) | (a+b) / ( a | (a) | (b) ) . */ + + y = a * numext::log(x); + t = b * numext::log(xc); + // TODO: gamma is not directly implemented in Eigen. + /* + if ((a + b) < maxgam && numext::abs(y) < maxlog && numext::abs(t) < maxlog) + { + t = pow(xc, b); + t *= pow(x, a); + t /= a; + t *= w; + t *= gamma(a + b) / (gamma(a) * gamma(b)); + } else { + */ + /* Resort to logarithms. */ + y += t + lgamma_impl::run(a + b) - lgamma_impl::run(a) - + lgamma_impl::run(b); + y += numext::log(w / a); + t = numext::exp(y); + + /* } */ + // done: + + if (reversed_a_b) { + if (t <= machep) { + t = 1.0 - machep; + } else { + t = 1.0 - t; + } + } + return t; + } +}; + +#endif // EIGEN_HAS_C99_MATH + +} // end namespace internal + +namespace numext { + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(lgamma, Scalar) + lgamma(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(lgamma, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(digamma, Scalar) + digamma(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(digamma, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(zeta, Scalar) +zeta(const Scalar& x, const Scalar& q) { + return EIGEN_MATHFUNC_IMPL(zeta, Scalar)::run(x, q); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(polygamma, Scalar) +polygamma(const Scalar& n, const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(polygamma, Scalar)::run(n, x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(erf, Scalar) + erf(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(erf, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(erfc, Scalar) + erfc(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(erfc, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(ndtri, Scalar) + ndtri(const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(ndtri, Scalar)::run(x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(igamma, Scalar) + igamma(const Scalar& a, const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(igamma, Scalar)::run(a, x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(igamma_der_a, Scalar) + igamma_der_a(const Scalar& a, const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(igamma_der_a, Scalar)::run(a, x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(gamma_sample_der_alpha, Scalar) + gamma_sample_der_alpha(const Scalar& a, const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(gamma_sample_der_alpha, Scalar)::run(a, x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(igammac, Scalar) + igammac(const Scalar& a, const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(igammac, Scalar)::run(a, x); +} + +template +EIGEN_DEVICE_FUNC inline EIGEN_MATHFUNC_RETVAL(betainc, Scalar) + betainc(const Scalar& a, const Scalar& b, const Scalar& x) { + return EIGEN_MATHFUNC_IMPL(betainc, Scalar)::run(a, b, x); +} + +} // end namespace numext +} // end namespace Eigen + +#endif // EIGEN_SPECIAL_FUNCTIONS_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h b/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h new file mode 100644 index 0000000..2bb0179 --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/SpecialFunctionsPacketMath.h @@ -0,0 +1,79 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2016 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPECIALFUNCTIONS_PACKETMATH_H +#define EIGEN_SPECIALFUNCTIONS_PACKETMATH_H + +namespace Eigen { + +namespace internal { + +/** \internal \returns the ln(|gamma(\a a)|) (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet plgamma(const Packet& a) { using numext::lgamma; return lgamma(a); } + +/** \internal \returns the derivative of lgamma, psi(\a a) (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pdigamma(const Packet& a) { using numext::digamma; return digamma(a); } + +/** \internal \returns the zeta function of two arguments (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pzeta(const Packet& x, const Packet& q) { using numext::zeta; return zeta(x, q); } + +/** \internal \returns the polygamma function (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet ppolygamma(const Packet& n, const Packet& x) { using numext::polygamma; return polygamma(n, x); } + +/** \internal \returns the erf(\a a) (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet perf(const Packet& a) { using numext::erf; return erf(a); } + +/** \internal \returns the erfc(\a a) (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet perfc(const Packet& a) { using numext::erfc; return erfc(a); } + +/** \internal \returns the ndtri(\a a) (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pndtri(const Packet& a) { + typedef typename unpacket_traits::type ScalarType; + using internal::generic_ndtri; return generic_ndtri(a); +} + +/** \internal \returns the incomplete gamma function igamma(\a a, \a x) */ +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +Packet pigamma(const Packet& a, const Packet& x) { using numext::igamma; return igamma(a, x); } + +/** \internal \returns the derivative of the incomplete gamma function + * igamma_der_a(\a a, \a x) */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pigamma_der_a(const Packet& a, const Packet& x) { + using numext::igamma_der_a; return igamma_der_a(a, x); +} + +/** \internal \returns compute the derivative of the sample + * of Gamma(alpha, 1) random variable with respect to the parameter a + * gamma_sample_der_alpha(\a alpha, \a sample) */ +template +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet pgamma_sample_der_alpha(const Packet& alpha, const Packet& sample) { + using numext::gamma_sample_der_alpha; return gamma_sample_der_alpha(alpha, sample); +} + +/** \internal \returns the complementary incomplete gamma function igammac(\a a, \a x) */ +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +Packet pigammac(const Packet& a, const Packet& x) { using numext::igammac; return igammac(a, x); } + +/** \internal \returns the complementary incomplete gamma function betainc(\a a, \a b, \a x) */ +template EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +Packet pbetainc(const Packet& a, const Packet& b,const Packet& x) { using numext::betainc; return betainc(a, b, x); } + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_SPECIALFUNCTIONS_PACKETMATH_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/arch/AVX/BesselFunctions.h b/external/unsupported/Eigen/src/SpecialFunctions/arch/AVX/BesselFunctions.h new file mode 100644 index 0000000..2d76692 --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/arch/AVX/BesselFunctions.h @@ -0,0 +1,46 @@ +#ifndef EIGEN_AVX_BESSELFUNCTIONS_H +#define EIGEN_AVX_BESSELFUNCTIONS_H + +namespace Eigen { +namespace internal { + +F16_PACKET_FUNCTION(Packet8f, Packet8h, pbessel_i0) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pbessel_i0) + +F16_PACKET_FUNCTION(Packet8f, Packet8h, pbessel_i0e) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pbessel_i0e) + +F16_PACKET_FUNCTION(Packet8f, Packet8h, pbessel_i1) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pbessel_i1) + +F16_PACKET_FUNCTION(Packet8f, Packet8h, pbessel_i1e) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pbessel_i1e) + +F16_PACKET_FUNCTION(Packet8f, Packet8h, pbessel_j0) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pbessel_j0) + +F16_PACKET_FUNCTION(Packet8f, Packet8h, pbessel_j1) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pbessel_j1) + +F16_PACKET_FUNCTION(Packet8f, Packet8h, pbessel_k0) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pbessel_k0) + +F16_PACKET_FUNCTION(Packet8f, Packet8h, pbessel_k0e) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pbessel_k0e) + +F16_PACKET_FUNCTION(Packet8f, Packet8h, pbessel_k1) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pbessel_k1) + +F16_PACKET_FUNCTION(Packet8f, Packet8h, pbessel_k1e) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pbessel_k1e) + +F16_PACKET_FUNCTION(Packet8f, Packet8h, pbessel_y0) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pbessel_y0) + +F16_PACKET_FUNCTION(Packet8f, Packet8h, pbessel_y1) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pbessel_y1) + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_AVX_BESSELFUNCTIONS_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/arch/AVX/SpecialFunctions.h b/external/unsupported/Eigen/src/SpecialFunctions/arch/AVX/SpecialFunctions.h new file mode 100644 index 0000000..35e62a8 --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/arch/AVX/SpecialFunctions.h @@ -0,0 +1,16 @@ +#ifndef EIGEN_AVX_SPECIALFUNCTIONS_H +#define EIGEN_AVX_SPECIALFUNCTIONS_H + +namespace Eigen { +namespace internal { + +F16_PACKET_FUNCTION(Packet8f, Packet8h, perf) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, perf) + +F16_PACKET_FUNCTION(Packet8f, Packet8h, pndtri) +BF16_PACKET_FUNCTION(Packet8f, Packet8bf, pndtri) + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_AVX_SPECIAL_FUNCTIONS_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/arch/AVX512/BesselFunctions.h b/external/unsupported/Eigen/src/SpecialFunctions/arch/AVX512/BesselFunctions.h new file mode 100644 index 0000000..7dd3c3e --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/arch/AVX512/BesselFunctions.h @@ -0,0 +1,46 @@ +#ifndef EIGEN_AVX512_BESSELFUNCTIONS_H +#define EIGEN_AVX512_BESSELFUNCTIONS_H + +namespace Eigen { +namespace internal { + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_i0) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_i0) + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_i0e) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_i0e) + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_i1) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_i1) + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_i1e) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_i1e) + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_j0) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_j0) + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_j1) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_j1) + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_k0) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_k0) + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_k0e) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_k0e) + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_k1) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_k1) + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_k1e) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_k1e) + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_y0) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_y0) + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pbessel_y1) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pbessel_y1) + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_AVX512_BESSELFUNCTIONS_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/arch/AVX512/SpecialFunctions.h b/external/unsupported/Eigen/src/SpecialFunctions/arch/AVX512/SpecialFunctions.h new file mode 100644 index 0000000..79878f2 --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/arch/AVX512/SpecialFunctions.h @@ -0,0 +1,16 @@ +#ifndef EIGEN_AVX512_SPECIALFUNCTIONS_H +#define EIGEN_AVX512_SPECIALFUNCTIONS_H + +namespace Eigen { +namespace internal { + +F16_PACKET_FUNCTION(Packet16f, Packet16h, perf) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, perf) + +F16_PACKET_FUNCTION(Packet16f, Packet16h, pndtri) +BF16_PACKET_FUNCTION(Packet16f, Packet16bf, pndtri) + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_AVX512_SPECIAL_FUNCTIONS_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/arch/GPU/SpecialFunctions.h b/external/unsupported/Eigen/src/SpecialFunctions/arch/GPU/SpecialFunctions.h new file mode 100644 index 0000000..dd3bf4d --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/arch/GPU/SpecialFunctions.h @@ -0,0 +1,369 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2014 Benoit Steiner +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GPU_SPECIALFUNCTIONS_H +#define EIGEN_GPU_SPECIALFUNCTIONS_H + +namespace Eigen { + +namespace internal { + +// Make sure this is only available when targeting a GPU: we don't want to +// introduce conflicts between these packet_traits definitions and the ones +// we'll use on the host side (SSE, AVX, ...) +#if defined(EIGEN_GPUCC) && defined(EIGEN_USE_GPU) + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 plgamma(const float4& a) +{ + return make_float4(lgammaf(a.x), lgammaf(a.y), lgammaf(a.z), lgammaf(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 plgamma(const double2& a) +{ + using numext::lgamma; + return make_double2(lgamma(a.x), lgamma(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 pdigamma(const float4& a) +{ + using numext::digamma; + return make_float4(digamma(a.x), digamma(a.y), digamma(a.z), digamma(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 pdigamma(const double2& a) +{ + using numext::digamma; + return make_double2(digamma(a.x), digamma(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 pzeta(const float4& x, const float4& q) +{ + using numext::zeta; + return make_float4(zeta(x.x, q.x), zeta(x.y, q.y), zeta(x.z, q.z), zeta(x.w, q.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 pzeta(const double2& x, const double2& q) +{ + using numext::zeta; + return make_double2(zeta(x.x, q.x), zeta(x.y, q.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 ppolygamma(const float4& n, const float4& x) +{ + using numext::polygamma; + return make_float4(polygamma(n.x, x.x), polygamma(n.y, x.y), polygamma(n.z, x.z), polygamma(n.w, x.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 ppolygamma(const double2& n, const double2& x) +{ + using numext::polygamma; + return make_double2(polygamma(n.x, x.x), polygamma(n.y, x.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 perf(const float4& a) +{ + return make_float4(erff(a.x), erff(a.y), erff(a.z), erff(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 perf(const double2& a) +{ + using numext::erf; + return make_double2(erf(a.x), erf(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 perfc(const float4& a) +{ + using numext::erfc; + return make_float4(erfc(a.x), erfc(a.y), erfc(a.z), erfc(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 perfc(const double2& a) +{ + using numext::erfc; + return make_double2(erfc(a.x), erfc(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 pndtri(const float4& a) +{ + using numext::ndtri; + return make_float4(ndtri(a.x), ndtri(a.y), ndtri(a.z), ndtri(a.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 pndtri(const double2& a) +{ + using numext::ndtri; + return make_double2(ndtri(a.x), ndtri(a.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 pigamma(const float4& a, const float4& x) +{ + using numext::igamma; + return make_float4( + igamma(a.x, x.x), + igamma(a.y, x.y), + igamma(a.z, x.z), + igamma(a.w, x.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 pigamma(const double2& a, const double2& x) +{ + using numext::igamma; + return make_double2(igamma(a.x, x.x), igamma(a.y, x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pigamma_der_a( + const float4& a, const float4& x) { + using numext::igamma_der_a; + return make_float4(igamma_der_a(a.x, x.x), igamma_der_a(a.y, x.y), + igamma_der_a(a.z, x.z), igamma_der_a(a.w, x.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pigamma_der_a(const double2& a, const double2& x) { + using numext::igamma_der_a; + return make_double2(igamma_der_a(a.x, x.x), igamma_der_a(a.y, x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pgamma_sample_der_alpha( + const float4& alpha, const float4& sample) { + using numext::gamma_sample_der_alpha; + return make_float4( + gamma_sample_der_alpha(alpha.x, sample.x), + gamma_sample_der_alpha(alpha.y, sample.y), + gamma_sample_der_alpha(alpha.z, sample.z), + gamma_sample_der_alpha(alpha.w, sample.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pgamma_sample_der_alpha(const double2& alpha, const double2& sample) { + using numext::gamma_sample_der_alpha; + return make_double2( + gamma_sample_der_alpha(alpha.x, sample.x), + gamma_sample_der_alpha(alpha.y, sample.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 pigammac(const float4& a, const float4& x) +{ + using numext::igammac; + return make_float4( + igammac(a.x, x.x), + igammac(a.y, x.y), + igammac(a.z, x.z), + igammac(a.w, x.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 pigammac(const double2& a, const double2& x) +{ + using numext::igammac; + return make_double2(igammac(a.x, x.x), igammac(a.y, x.y)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +float4 pbetainc(const float4& a, const float4& b, const float4& x) +{ + using numext::betainc; + return make_float4( + betainc(a.x, b.x, x.x), + betainc(a.y, b.y, x.y), + betainc(a.z, b.z, x.z), + betainc(a.w, b.w, x.w)); +} + +template<> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE +double2 pbetainc(const double2& a, const double2& b, const double2& x) +{ + using numext::betainc; + return make_double2(betainc(a.x, b.x, x.x), betainc(a.y, b.y, x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pbessel_i0e(const float4& x) { + using numext::bessel_i0e; + return make_float4(bessel_i0e(x.x), bessel_i0e(x.y), bessel_i0e(x.z), bessel_i0e(x.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pbessel_i0e(const double2& x) { + using numext::bessel_i0e; + return make_double2(bessel_i0e(x.x), bessel_i0e(x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pbessel_i0(const float4& x) { + using numext::bessel_i0; + return make_float4(bessel_i0(x.x), bessel_i0(x.y), bessel_i0(x.z), bessel_i0(x.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pbessel_i0(const double2& x) { + using numext::bessel_i0; + return make_double2(bessel_i0(x.x), bessel_i0(x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pbessel_i1e(const float4& x) { + using numext::bessel_i1e; + return make_float4(bessel_i1e(x.x), bessel_i1e(x.y), bessel_i1e(x.z), bessel_i1e(x.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pbessel_i1e(const double2& x) { + using numext::bessel_i1e; + return make_double2(bessel_i1e(x.x), bessel_i1e(x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pbessel_i1(const float4& x) { + using numext::bessel_i1; + return make_float4(bessel_i1(x.x), bessel_i1(x.y), bessel_i1(x.z), bessel_i1(x.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pbessel_i1(const double2& x) { + using numext::bessel_i1; + return make_double2(bessel_i1(x.x), bessel_i1(x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pbessel_k0e(const float4& x) { + using numext::bessel_k0e; + return make_float4(bessel_k0e(x.x), bessel_k0e(x.y), bessel_k0e(x.z), bessel_k0e(x.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pbessel_k0e(const double2& x) { + using numext::bessel_k0e; + return make_double2(bessel_k0e(x.x), bessel_k0e(x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pbessel_k0(const float4& x) { + using numext::bessel_k0; + return make_float4(bessel_k0(x.x), bessel_k0(x.y), bessel_k0(x.z), bessel_k0(x.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pbessel_k0(const double2& x) { + using numext::bessel_k0; + return make_double2(bessel_k0(x.x), bessel_k0(x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pbessel_k1e(const float4& x) { + using numext::bessel_k1e; + return make_float4(bessel_k1e(x.x), bessel_k1e(x.y), bessel_k1e(x.z), bessel_k1e(x.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pbessel_k1e(const double2& x) { + using numext::bessel_k1e; + return make_double2(bessel_k1e(x.x), bessel_k1e(x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pbessel_k1(const float4& x) { + using numext::bessel_k1; + return make_float4(bessel_k1(x.x), bessel_k1(x.y), bessel_k1(x.z), bessel_k1(x.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pbessel_k1(const double2& x) { + using numext::bessel_k1; + return make_double2(bessel_k1(x.x), bessel_k1(x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pbessel_j0(const float4& x) { + using numext::bessel_j0; + return make_float4(bessel_j0(x.x), bessel_j0(x.y), bessel_j0(x.z), bessel_j0(x.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pbessel_j0(const double2& x) { + using numext::bessel_j0; + return make_double2(bessel_j0(x.x), bessel_j0(x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pbessel_j1(const float4& x) { + using numext::bessel_j1; + return make_float4(bessel_j1(x.x), bessel_j1(x.y), bessel_j1(x.z), bessel_j1(x.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pbessel_j1(const double2& x) { + using numext::bessel_j1; + return make_double2(bessel_j1(x.x), bessel_j1(x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pbessel_y0(const float4& x) { + using numext::bessel_y0; + return make_float4(bessel_y0(x.x), bessel_y0(x.y), bessel_y0(x.z), bessel_y0(x.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pbessel_y0(const double2& x) { + using numext::bessel_y0; + return make_double2(bessel_y0(x.x), bessel_y0(x.y)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE float4 pbessel_y1(const float4& x) { + using numext::bessel_y1; + return make_float4(bessel_y1(x.x), bessel_y1(x.y), bessel_y1(x.z), bessel_y1(x.w)); +} + +template <> +EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE double2 +pbessel_y1(const double2& x) { + using numext::bessel_y1; + return make_double2(bessel_y1(x.x), bessel_y1(x.y)); +} + +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_GPU_SPECIALFUNCTIONS_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/arch/NEON/BesselFunctions.h b/external/unsupported/Eigen/src/SpecialFunctions/arch/NEON/BesselFunctions.h new file mode 100644 index 0000000..67433b0 --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/arch/NEON/BesselFunctions.h @@ -0,0 +1,54 @@ +#ifndef EIGEN_NEON_BESSELFUNCTIONS_H +#define EIGEN_NEON_BESSELFUNCTIONS_H + +namespace Eigen { +namespace internal { + +#if EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC + +#define NEON_HALF_TO_FLOAT_FUNCTIONS(METHOD) \ +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +Packet8hf METHOD(const Packet8hf& x) { \ + const Packet4f lo = METHOD(vcvt_f32_f16(vget_low_f16(x))); \ + const Packet4f hi = METHOD(vcvt_f32_f16(vget_high_f16(x))); \ + return vcombine_f16(vcvt_f16_f32(lo), vcvt_f16_f32(hi)); \ +} \ + \ +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +Packet4hf METHOD(const Packet4hf& x) { \ + return vcvt_f16_f32(METHOD(vcvt_f32_f16(x))); \ +} + +NEON_HALF_TO_FLOAT_FUNCTIONS(pbessel_i0) +NEON_HALF_TO_FLOAT_FUNCTIONS(pbessel_i0e) +NEON_HALF_TO_FLOAT_FUNCTIONS(pbessel_i1) +NEON_HALF_TO_FLOAT_FUNCTIONS(pbessel_i1e) +NEON_HALF_TO_FLOAT_FUNCTIONS(pbessel_j0) +NEON_HALF_TO_FLOAT_FUNCTIONS(pbessel_j1) +NEON_HALF_TO_FLOAT_FUNCTIONS(pbessel_k0) +NEON_HALF_TO_FLOAT_FUNCTIONS(pbessel_k0e) +NEON_HALF_TO_FLOAT_FUNCTIONS(pbessel_k1) +NEON_HALF_TO_FLOAT_FUNCTIONS(pbessel_k1e) +NEON_HALF_TO_FLOAT_FUNCTIONS(pbessel_y0) +NEON_HALF_TO_FLOAT_FUNCTIONS(pbessel_y1) + +#undef NEON_HALF_TO_FLOAT_FUNCTIONS +#endif + +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pbessel_i0) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pbessel_i0e) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pbessel_i1) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pbessel_i1e) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pbessel_j0) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pbessel_j1) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pbessel_k0) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pbessel_k0e) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pbessel_k1) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pbessel_k1e) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pbessel_y0) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pbessel_y1) + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_NEON_BESSELFUNCTIONS_H diff --git a/external/unsupported/Eigen/src/SpecialFunctions/arch/NEON/SpecialFunctions.h b/external/unsupported/Eigen/src/SpecialFunctions/arch/NEON/SpecialFunctions.h new file mode 100644 index 0000000..ec92951 --- /dev/null +++ b/external/unsupported/Eigen/src/SpecialFunctions/arch/NEON/SpecialFunctions.h @@ -0,0 +1,34 @@ +#ifndef EIGEN_NEON_SPECIALFUNCTIONS_H +#define EIGEN_NEON_SPECIALFUNCTIONS_H + +namespace Eigen { +namespace internal { + +#if EIGEN_HAS_ARM64_FP16_VECTOR_ARITHMETIC + +#define NEON_HALF_TO_FLOAT_FUNCTIONS(METHOD) \ +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +Packet8hf METHOD(const Packet8hf& x) { \ + const Packet4f lo = METHOD(vcvt_f32_f16(vget_low_f16(x))); \ + const Packet4f hi = METHOD(vcvt_f32_f16(vget_high_f16(x))); \ + return vcombine_f16(vcvt_f16_f32(lo), vcvt_f16_f32(hi)); \ +} \ + \ +template <> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE \ +Packet4hf METHOD(const Packet4hf& x) { \ + return vcvt_f16_f32(METHOD(vcvt_f32_f16(x))); \ +} + +NEON_HALF_TO_FLOAT_FUNCTIONS(perf) +NEON_HALF_TO_FLOAT_FUNCTIONS(pndtri) + +#undef NEON_HALF_TO_FLOAT_FUNCTIONS +#endif + +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, perf) +BF16_PACKET_FUNCTION(Packet4f, Packet4bf, pndtri) + +} // namespace internal +} // namespace Eigen + +#endif // EIGEN_NEON_SPECIALFUNCTIONS_H diff --git a/external/unsupported/Eigen/src/Splines/Spline.h b/external/unsupported/Eigen/src/Splines/Spline.h new file mode 100644 index 0000000..79edd52 --- /dev/null +++ b/external/unsupported/Eigen/src/Splines/Spline.h @@ -0,0 +1,507 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 20010-2011 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPLINE_H +#define EIGEN_SPLINE_H + +#include "SplineFwd.h" + +namespace Eigen +{ + /** + * \ingroup Splines_Module + * \class Spline + * \brief A class representing multi-dimensional spline curves. + * + * The class represents B-splines with non-uniform knot vectors. Each control + * point of the B-spline is associated with a basis function + * \f{align*} + * C(u) & = \sum_{i=0}^{n}N_{i,p}(u)P_i + * \f} + * + * \tparam _Scalar The underlying data type (typically float or double) + * \tparam _Dim The curve dimension (e.g. 2 or 3) + * \tparam _Degree Per default set to Dynamic; could be set to the actual desired + * degree for optimization purposes (would result in stack allocation + * of several temporary variables). + **/ + template + class Spline + { + public: + typedef _Scalar Scalar; /*!< The spline curve's scalar type. */ + enum { Dimension = _Dim /*!< The spline curve's dimension. */ }; + enum { Degree = _Degree /*!< The spline curve's degree. */ }; + + /** \brief The point type the spline is representing. */ + typedef typename SplineTraits::PointType PointType; + + /** \brief The data type used to store knot vectors. */ + typedef typename SplineTraits::KnotVectorType KnotVectorType; + + /** \brief The data type used to store parameter vectors. */ + typedef typename SplineTraits::ParameterVectorType ParameterVectorType; + + /** \brief The data type used to store non-zero basis functions. */ + typedef typename SplineTraits::BasisVectorType BasisVectorType; + + /** \brief The data type used to store the values of the basis function derivatives. */ + typedef typename SplineTraits::BasisDerivativeType BasisDerivativeType; + + /** \brief The data type representing the spline's control points. */ + typedef typename SplineTraits::ControlPointVectorType ControlPointVectorType; + + /** + * \brief Creates a (constant) zero spline. + * For Splines with dynamic degree, the resulting degree will be 0. + **/ + Spline() + : m_knots(1, (Degree==Dynamic ? 2 : 2*Degree+2)) + , m_ctrls(ControlPointVectorType::Zero(Dimension,(Degree==Dynamic ? 1 : Degree+1))) + { + // in theory this code can go to the initializer list but it will get pretty + // much unreadable ... + enum { MinDegree = (Degree==Dynamic ? 0 : Degree) }; + m_knots.template segment(0) = Array::Zero(); + m_knots.template segment(MinDegree+1) = Array::Ones(); + } + + /** + * \brief Creates a spline from a knot vector and control points. + * \param knots The spline's knot vector. + * \param ctrls The spline's control point vector. + **/ + template + Spline(const OtherVectorType& knots, const OtherArrayType& ctrls) : m_knots(knots), m_ctrls(ctrls) {} + + /** + * \brief Copy constructor for splines. + * \param spline The input spline. + **/ + template + Spline(const Spline& spline) : + m_knots(spline.knots()), m_ctrls(spline.ctrls()) {} + + /** + * \brief Returns the knots of the underlying spline. + **/ + const KnotVectorType& knots() const { return m_knots; } + + /** + * \brief Returns the ctrls of the underlying spline. + **/ + const ControlPointVectorType& ctrls() const { return m_ctrls; } + + /** + * \brief Returns the spline value at a given site \f$u\f$. + * + * The function returns + * \f{align*} + * C(u) & = \sum_{i=0}^{n}N_{i,p}P_i + * \f} + * + * \param u Parameter \f$u \in [0;1]\f$ at which the spline is evaluated. + * \return The spline value at the given location \f$u\f$. + **/ + PointType operator()(Scalar u) const; + + /** + * \brief Evaluation of spline derivatives of up-to given order. + * + * The function returns + * \f{align*} + * \frac{d^i}{du^i}C(u) & = \sum_{i=0}^{n} \frac{d^i}{du^i} N_{i,p}(u)P_i + * \f} + * for i ranging between 0 and order. + * + * \param u Parameter \f$u \in [0;1]\f$ at which the spline derivative is evaluated. + * \param order The order up to which the derivatives are computed. + **/ + typename SplineTraits::DerivativeType + derivatives(Scalar u, DenseIndex order) const; + + /** + * \copydoc Spline::derivatives + * Using the template version of this function is more efficieent since + * temporary objects are allocated on the stack whenever this is possible. + **/ + template + typename SplineTraits::DerivativeType + derivatives(Scalar u, DenseIndex order = DerivativeOrder) const; + + /** + * \brief Computes the non-zero basis functions at the given site. + * + * Splines have local support and a point from their image is defined + * by exactly \f$p+1\f$ control points \f$P_i\f$ where \f$p\f$ is the + * spline degree. + * + * This function computes the \f$p+1\f$ non-zero basis function values + * for a given parameter value \f$u\f$. It returns + * \f{align*}{ + * N_{i,p}(u), \hdots, N_{i+p+1,p}(u) + * \f} + * + * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis functions + * are computed. + **/ + typename SplineTraits::BasisVectorType + basisFunctions(Scalar u) const; + + /** + * \brief Computes the non-zero spline basis function derivatives up to given order. + * + * The function computes + * \f{align*}{ + * \frac{d^i}{du^i} N_{i,p}(u), \hdots, \frac{d^i}{du^i} N_{i+p+1,p}(u) + * \f} + * with i ranging from 0 up to the specified order. + * + * \param u Parameter \f$u \in [0;1]\f$ at which the non-zero basis function + * derivatives are computed. + * \param order The order up to which the basis function derivatives are computes. + **/ + typename SplineTraits::BasisDerivativeType + basisFunctionDerivatives(Scalar u, DenseIndex order) const; + + /** + * \copydoc Spline::basisFunctionDerivatives + * Using the template version of this function is more efficieent since + * temporary objects are allocated on the stack whenever this is possible. + **/ + template + typename SplineTraits::BasisDerivativeType + basisFunctionDerivatives(Scalar u, DenseIndex order = DerivativeOrder) const; + + /** + * \brief Returns the spline degree. + **/ + DenseIndex degree() const; + + /** + * \brief Returns the span within the knot vector in which u is falling. + * \param u The site for which the span is determined. + **/ + DenseIndex span(Scalar u) const; + + /** + * \brief Computes the span within the provided knot vector in which u is falling. + **/ + static DenseIndex Span(typename SplineTraits::Scalar u, DenseIndex degree, const typename SplineTraits::KnotVectorType& knots); + + /** + * \brief Returns the spline's non-zero basis functions. + * + * The function computes and returns + * \f{align*}{ + * N_{i,p}(u), \hdots, N_{i+p+1,p}(u) + * \f} + * + * \param u The site at which the basis functions are computed. + * \param degree The degree of the underlying spline. + * \param knots The underlying spline's knot vector. + **/ + static BasisVectorType BasisFunctions(Scalar u, DenseIndex degree, const KnotVectorType& knots); + + /** + * \copydoc Spline::basisFunctionDerivatives + * \param degree The degree of the underlying spline + * \param knots The underlying spline's knot vector. + **/ + static BasisDerivativeType BasisFunctionDerivatives( + const Scalar u, const DenseIndex order, const DenseIndex degree, const KnotVectorType& knots); + + private: + KnotVectorType m_knots; /*!< Knot vector. */ + ControlPointVectorType m_ctrls; /*!< Control points. */ + + template + static void BasisFunctionDerivativesImpl( + const typename Spline<_Scalar, _Dim, _Degree>::Scalar u, + const DenseIndex order, + const DenseIndex p, + const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& U, + DerivativeType& N_); + }; + + template + DenseIndex Spline<_Scalar, _Dim, _Degree>::Span( + typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::Scalar u, + DenseIndex degree, + const typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::KnotVectorType& knots) + { + // Piegl & Tiller, "The NURBS Book", A2.1 (p. 68) + if (u <= knots(0)) return degree; + const Scalar* pos = std::upper_bound(knots.data()+degree-1, knots.data()+knots.size()-degree-1, u); + return static_cast( std::distance(knots.data(), pos) - 1 ); + } + + template + typename Spline<_Scalar, _Dim, _Degree>::BasisVectorType + Spline<_Scalar, _Dim, _Degree>::BasisFunctions( + typename Spline<_Scalar, _Dim, _Degree>::Scalar u, + DenseIndex degree, + const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& knots) + { + const DenseIndex p = degree; + const DenseIndex i = Spline::Span(u, degree, knots); + + const KnotVectorType& U = knots; + + BasisVectorType left(p+1); left(0) = Scalar(0); + BasisVectorType right(p+1); right(0) = Scalar(0); + + VectorBlock(left,1,p) = u - VectorBlock(U,i+1-p,p).reverse(); + VectorBlock(right,1,p) = VectorBlock(U,i+1,p) - u; + + BasisVectorType N(1,p+1); + N(0) = Scalar(1); + for (DenseIndex j=1; j<=p; ++j) + { + Scalar saved = Scalar(0); + for (DenseIndex r=0; r + DenseIndex Spline<_Scalar, _Dim, _Degree>::degree() const + { + if (_Degree == Dynamic) + return m_knots.size() - m_ctrls.cols() - 1; + else + return _Degree; + } + + template + DenseIndex Spline<_Scalar, _Dim, _Degree>::span(Scalar u) const + { + return Spline::Span(u, degree(), knots()); + } + + template + typename Spline<_Scalar, _Dim, _Degree>::PointType Spline<_Scalar, _Dim, _Degree>::operator()(Scalar u) const + { + enum { Order = SplineTraits::OrderAtCompileTime }; + + const DenseIndex span = this->span(u); + const DenseIndex p = degree(); + const BasisVectorType basis_funcs = basisFunctions(u); + + const Replicate ctrl_weights(basis_funcs); + const Block ctrl_pts(ctrls(),0,span-p,Dimension,p+1); + return (ctrl_weights * ctrl_pts).rowwise().sum(); + } + + /* --------------------------------------------------------------------------------------------- */ + + template + void derivativesImpl(const SplineType& spline, typename SplineType::Scalar u, DenseIndex order, DerivativeType& der) + { + enum { Dimension = SplineTraits::Dimension }; + enum { Order = SplineTraits::OrderAtCompileTime }; + enum { DerivativeOrder = DerivativeType::ColsAtCompileTime }; + + typedef typename SplineTraits::ControlPointVectorType ControlPointVectorType; + typedef typename SplineTraits::BasisDerivativeType BasisDerivativeType; + typedef typename BasisDerivativeType::ConstRowXpr BasisDerivativeRowXpr; + + const DenseIndex p = spline.degree(); + const DenseIndex span = spline.span(u); + + const DenseIndex n = (std::min)(p, order); + + der.resize(Dimension,n+1); + + // Retrieve the basis function derivatives up to the desired order... + const BasisDerivativeType basis_func_ders = spline.template basisFunctionDerivatives(u, n+1); + + // ... and perform the linear combinations of the control points. + for (DenseIndex der_order=0; der_order ctrl_weights( basis_func_ders.row(der_order) ); + const Block ctrl_pts(spline.ctrls(),0,span-p,Dimension,p+1); + der.col(der_order) = (ctrl_weights * ctrl_pts).rowwise().sum(); + } + } + + template + typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::DerivativeType + Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const + { + typename SplineTraits< Spline >::DerivativeType res; + derivativesImpl(*this, u, order, res); + return res; + } + + template + template + typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::DerivativeType + Spline<_Scalar, _Dim, _Degree>::derivatives(Scalar u, DenseIndex order) const + { + typename SplineTraits< Spline, DerivativeOrder >::DerivativeType res; + derivativesImpl(*this, u, order, res); + return res; + } + + template + typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisVectorType + Spline<_Scalar, _Dim, _Degree>::basisFunctions(Scalar u) const + { + return Spline::BasisFunctions(u, degree(), knots()); + } + + /* --------------------------------------------------------------------------------------------- */ + + + template + template + void Spline<_Scalar, _Dim, _Degree>::BasisFunctionDerivativesImpl( + const typename Spline<_Scalar, _Dim, _Degree>::Scalar u, + const DenseIndex order, + const DenseIndex p, + const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& U, + DerivativeType& N_) + { + typedef Spline<_Scalar, _Dim, _Degree> SplineType; + enum { Order = SplineTraits::OrderAtCompileTime }; + + const DenseIndex span = SplineType::Span(u, p, U); + + const DenseIndex n = (std::min)(p, order); + + N_.resize(n+1, p+1); + + BasisVectorType left = BasisVectorType::Zero(p+1); + BasisVectorType right = BasisVectorType::Zero(p+1); + + Matrix ndu(p+1,p+1); + + Scalar saved, temp; // FIXME These were double instead of Scalar. Was there a reason for that? + + ndu(0,0) = 1.0; + + DenseIndex j; + for (j=1; j<=p; ++j) + { + left[j] = u-U[span+1-j]; + right[j] = U[span+j]-u; + saved = 0.0; + + for (DenseIndex r=0; r(saved+right[r+1] * temp); + saved = left[j-r] * temp; + } + + ndu(j,j) = static_cast(saved); + } + + for (j = p; j>=0; --j) + N_(0,j) = ndu(j,p); + + // Compute the derivatives + DerivativeType a(n+1,p+1); + DenseIndex r=0; + for (; r<=p; ++r) + { + DenseIndex s1,s2; + s1 = 0; s2 = 1; // alternate rows in array a + a(0,0) = 1.0; + + // Compute the k-th derivative + for (DenseIndex k=1; k<=static_cast(n); ++k) + { + Scalar d = 0.0; + DenseIndex rk,pk,j1,j2; + rk = r-k; pk = p-k; + + if (r>=k) + { + a(s2,0) = a(s1,0)/ndu(pk+1,rk); + d = a(s2,0)*ndu(rk,pk); + } + + if (rk>=-1) j1 = 1; + else j1 = -rk; + + if (r-1 <= pk) j2 = k-1; + else j2 = p-r; + + for (j=j1; j<=j2; ++j) + { + a(s2,j) = (a(s1,j)-a(s1,j-1))/ndu(pk+1,rk+j); + d += a(s2,j)*ndu(rk+j,pk); + } + + if (r<=pk) + { + a(s2,k) = -a(s1,k-1)/ndu(pk+1,r); + d += a(s2,k)*ndu(r,pk); + } + + N_(k,r) = static_cast(d); + j = s1; s1 = s2; s2 = j; // Switch rows + } + } + + /* Multiply through by the correct factors */ + /* (Eq. [2.9]) */ + r = p; + for (DenseIndex k=1; k<=static_cast(n); ++k) + { + for (j=p; j>=0; --j) N_(k,j) *= r; + r *= p-k; + } + } + + template + typename SplineTraits< Spline<_Scalar, _Dim, _Degree> >::BasisDerivativeType + Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const + { + typename SplineTraits >::BasisDerivativeType der; + BasisFunctionDerivativesImpl(u, order, degree(), knots(), der); + return der; + } + + template + template + typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::BasisDerivativeType + Spline<_Scalar, _Dim, _Degree>::basisFunctionDerivatives(Scalar u, DenseIndex order) const + { + typename SplineTraits< Spline<_Scalar, _Dim, _Degree>, DerivativeOrder >::BasisDerivativeType der; + BasisFunctionDerivativesImpl(u, order, degree(), knots(), der); + return der; + } + + template + typename SplineTraits >::BasisDerivativeType + Spline<_Scalar, _Dim, _Degree>::BasisFunctionDerivatives( + const typename Spline<_Scalar, _Dim, _Degree>::Scalar u, + const DenseIndex order, + const DenseIndex degree, + const typename Spline<_Scalar, _Dim, _Degree>::KnotVectorType& knots) + { + typename SplineTraits::BasisDerivativeType der; + BasisFunctionDerivativesImpl(u, order, degree, knots, der); + return der; + } +} + +#endif // EIGEN_SPLINE_H diff --git a/external/unsupported/Eigen/src/Splines/SplineFitting.h b/external/unsupported/Eigen/src/Splines/SplineFitting.h new file mode 100644 index 0000000..9f6e8af --- /dev/null +++ b/external/unsupported/Eigen/src/Splines/SplineFitting.h @@ -0,0 +1,431 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 20010-2011 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPLINE_FITTING_H +#define EIGEN_SPLINE_FITTING_H + +#include +#include +#include +#include + +#include "SplineFwd.h" + +#include "../../../../Eigen/LU" +#include "../../../../Eigen/QR" + +namespace Eigen +{ + /** + * \brief Computes knot averages. + * \ingroup Splines_Module + * + * The knots are computed as + * \f{align*} + * u_0 & = \hdots = u_p = 0 \\ + * u_{m-p} & = \hdots = u_{m} = 1 \\ + * u_{j+p} & = \frac{1}{p}\sum_{i=j}^{j+p-1}\bar{u}_i \quad\quad j=1,\hdots,n-p + * \f} + * where \f$p\f$ is the degree and \f$m+1\f$ the number knots + * of the desired interpolating spline. + * + * \param[in] parameters The input parameters. During interpolation one for each data point. + * \param[in] degree The spline degree which is used during the interpolation. + * \param[out] knots The output knot vector. + * + * \sa Les Piegl and Wayne Tiller, The NURBS book (2nd ed.), 1997, 9.2.1 Global Curve Interpolation to Point Data + **/ + template + void KnotAveraging(const KnotVectorType& parameters, DenseIndex degree, KnotVectorType& knots) + { + knots.resize(parameters.size()+degree+1); + + for (DenseIndex j=1; j + void KnotAveragingWithDerivatives(const ParameterVectorType& parameters, + const unsigned int degree, + const IndexArray& derivativeIndices, + KnotVectorType& knots) + { + typedef typename ParameterVectorType::Scalar Scalar; + + DenseIndex numParameters = parameters.size(); + DenseIndex numDerivatives = derivativeIndices.size(); + + if (numDerivatives < 1) + { + KnotAveraging(parameters, degree, knots); + return; + } + + DenseIndex startIndex; + DenseIndex endIndex; + + DenseIndex numInternalDerivatives = numDerivatives; + + if (derivativeIndices[0] == 0) + { + startIndex = 0; + --numInternalDerivatives; + } + else + { + startIndex = 1; + } + if (derivativeIndices[numDerivatives - 1] == numParameters - 1) + { + endIndex = numParameters - degree; + --numInternalDerivatives; + } + else + { + endIndex = numParameters - degree - 1; + } + + // There are (endIndex - startIndex + 1) knots obtained from the averaging + // and 2 for the first and last parameters. + DenseIndex numAverageKnots = endIndex - startIndex + 3; + KnotVectorType averageKnots(numAverageKnots); + averageKnots[0] = parameters[0]; + + int newKnotIndex = 0; + for (DenseIndex i = startIndex; i <= endIndex; ++i) + averageKnots[++newKnotIndex] = parameters.segment(i, degree).mean(); + averageKnots[++newKnotIndex] = parameters[numParameters - 1]; + + newKnotIndex = -1; + + ParameterVectorType temporaryParameters(numParameters + 1); + KnotVectorType derivativeKnots(numInternalDerivatives); + for (DenseIndex i = 0; i < numAverageKnots - 1; ++i) + { + temporaryParameters[0] = averageKnots[i]; + ParameterVectorType parameterIndices(numParameters); + int temporaryParameterIndex = 1; + for (DenseIndex j = 0; j < numParameters; ++j) + { + Scalar parameter = parameters[j]; + if (parameter >= averageKnots[i] && parameter < averageKnots[i + 1]) + { + parameterIndices[temporaryParameterIndex] = j; + temporaryParameters[temporaryParameterIndex++] = parameter; + } + } + temporaryParameters[temporaryParameterIndex] = averageKnots[i + 1]; + + for (int j = 0; j <= temporaryParameterIndex - 2; ++j) + { + for (DenseIndex k = 0; k < derivativeIndices.size(); ++k) + { + if (parameterIndices[j + 1] == derivativeIndices[k] + && parameterIndices[j + 1] != 0 + && parameterIndices[j + 1] != numParameters - 1) + { + derivativeKnots[++newKnotIndex] = temporaryParameters.segment(j, 3).mean(); + break; + } + } + } + } + + KnotVectorType temporaryKnots(averageKnots.size() + derivativeKnots.size()); + + std::merge(averageKnots.data(), averageKnots.data() + averageKnots.size(), + derivativeKnots.data(), derivativeKnots.data() + derivativeKnots.size(), + temporaryKnots.data()); + + // Number of knots (one for each point and derivative) plus spline order. + DenseIndex numKnots = numParameters + numDerivatives + degree + 1; + knots.resize(numKnots); + + knots.head(degree).fill(temporaryKnots[0]); + knots.tail(degree).fill(temporaryKnots.template tail<1>()[0]); + knots.segment(degree, temporaryKnots.size()) = temporaryKnots; + } + + /** + * \brief Computes chord length parameters which are required for spline interpolation. + * \ingroup Splines_Module + * + * \param[in] pts The data points to which a spline should be fit. + * \param[out] chord_lengths The resulting chord length vector. + * + * \sa Les Piegl and Wayne Tiller, The NURBS book (2nd ed.), 1997, 9.2.1 Global Curve Interpolation to Point Data + **/ + template + void ChordLengths(const PointArrayType& pts, KnotVectorType& chord_lengths) + { + typedef typename KnotVectorType::Scalar Scalar; + + const DenseIndex n = pts.cols(); + + // 1. compute the column-wise norms + chord_lengths.resize(pts.cols()); + chord_lengths[0] = 0; + chord_lengths.rightCols(n-1) = (pts.array().leftCols(n-1) - pts.array().rightCols(n-1)).matrix().colwise().norm(); + + // 2. compute the partial sums + std::partial_sum(chord_lengths.data(), chord_lengths.data()+n, chord_lengths.data()); + + // 3. normalize the data + chord_lengths /= chord_lengths(n-1); + chord_lengths(n-1) = Scalar(1); + } + + /** + * \brief Spline fitting methods. + * \ingroup Splines_Module + **/ + template + struct SplineFitting + { + typedef typename SplineType::KnotVectorType KnotVectorType; + typedef typename SplineType::ParameterVectorType ParameterVectorType; + + /** + * \brief Fits an interpolating Spline to the given data points. + * + * \param pts The points for which an interpolating spline will be computed. + * \param degree The degree of the interpolating spline. + * + * \returns A spline interpolating the initially provided points. + **/ + template + static SplineType Interpolate(const PointArrayType& pts, DenseIndex degree); + + /** + * \brief Fits an interpolating Spline to the given data points. + * + * \param pts The points for which an interpolating spline will be computed. + * \param degree The degree of the interpolating spline. + * \param knot_parameters The knot parameters for the interpolation. + * + * \returns A spline interpolating the initially provided points. + **/ + template + static SplineType Interpolate(const PointArrayType& pts, DenseIndex degree, const KnotVectorType& knot_parameters); + + /** + * \brief Fits an interpolating spline to the given data points and + * derivatives. + * + * \param points The points for which an interpolating spline will be computed. + * \param derivatives The desired derivatives of the interpolating spline at interpolation + * points. + * \param derivativeIndices An array indicating which point each derivative belongs to. This + * must be the same size as @a derivatives. + * \param degree The degree of the interpolating spline. + * + * \returns A spline interpolating @a points with @a derivatives at those points. + * + * \sa Les A. Piegl, Khairan Rajab, Volha Smarodzinana. 2008. + * Curve interpolation with directional constraints for engineering design. + * Engineering with Computers + **/ + template + static SplineType InterpolateWithDerivatives(const PointArrayType& points, + const PointArrayType& derivatives, + const IndexArray& derivativeIndices, + const unsigned int degree); + + /** + * \brief Fits an interpolating spline to the given data points and derivatives. + * + * \param points The points for which an interpolating spline will be computed. + * \param derivatives The desired derivatives of the interpolating spline at interpolation points. + * \param derivativeIndices An array indicating which point each derivative belongs to. This + * must be the same size as @a derivatives. + * \param degree The degree of the interpolating spline. + * \param parameters The parameters corresponding to the interpolation points. + * + * \returns A spline interpolating @a points with @a derivatives at those points. + * + * \sa Les A. Piegl, Khairan Rajab, Volha Smarodzinana. 2008. + * Curve interpolation with directional constraints for engineering design. + * Engineering with Computers + */ + template + static SplineType InterpolateWithDerivatives(const PointArrayType& points, + const PointArrayType& derivatives, + const IndexArray& derivativeIndices, + const unsigned int degree, + const ParameterVectorType& parameters); + }; + + template + template + SplineType SplineFitting::Interpolate(const PointArrayType& pts, DenseIndex degree, const KnotVectorType& knot_parameters) + { + typedef typename SplineType::KnotVectorType::Scalar Scalar; + typedef typename SplineType::ControlPointVectorType ControlPointVectorType; + + typedef Matrix MatrixType; + + KnotVectorType knots; + KnotAveraging(knot_parameters, degree, knots); + + DenseIndex n = pts.cols(); + MatrixType A = MatrixType::Zero(n,n); + for (DenseIndex i=1; i qr(A); + + // Here, we are creating a temporary due to an Eigen issue. + ControlPointVectorType ctrls = qr.solve(MatrixType(pts.transpose())).transpose(); + + return SplineType(knots, ctrls); + } + + template + template + SplineType SplineFitting::Interpolate(const PointArrayType& pts, DenseIndex degree) + { + KnotVectorType chord_lengths; // knot parameters + ChordLengths(pts, chord_lengths); + return Interpolate(pts, degree, chord_lengths); + } + + template + template + SplineType + SplineFitting::InterpolateWithDerivatives(const PointArrayType& points, + const PointArrayType& derivatives, + const IndexArray& derivativeIndices, + const unsigned int degree, + const ParameterVectorType& parameters) + { + typedef typename SplineType::KnotVectorType::Scalar Scalar; + typedef typename SplineType::ControlPointVectorType ControlPointVectorType; + + typedef Matrix MatrixType; + + const DenseIndex n = points.cols() + derivatives.cols(); + + KnotVectorType knots; + + KnotAveragingWithDerivatives(parameters, degree, derivativeIndices, knots); + + // fill matrix + MatrixType A = MatrixType::Zero(n, n); + + // Use these dimensions for quicker populating, then transpose for solving. + MatrixType b(points.rows(), n); + + DenseIndex startRow; + DenseIndex derivativeStart; + + // End derivatives. + if (derivativeIndices[0] == 0) + { + A.template block<1, 2>(1, 0) << -1, 1; + + Scalar y = (knots(degree + 1) - knots(0)) / degree; + b.col(1) = y*derivatives.col(0); + + startRow = 2; + derivativeStart = 1; + } + else + { + startRow = 1; + derivativeStart = 0; + } + if (derivativeIndices[derivatives.cols() - 1] == points.cols() - 1) + { + A.template block<1, 2>(n - 2, n - 2) << -1, 1; + + Scalar y = (knots(knots.size() - 1) - knots(knots.size() - (degree + 2))) / degree; + b.col(b.cols() - 2) = y*derivatives.col(derivatives.cols() - 1); + } + + DenseIndex row = startRow; + DenseIndex derivativeIndex = derivativeStart; + for (DenseIndex i = 1; i < parameters.size() - 1; ++i) + { + const DenseIndex span = SplineType::Span(parameters[i], degree, knots); + + if (derivativeIndex < derivativeIndices.size() && derivativeIndices[derivativeIndex] == i) + { + A.block(row, span - degree, 2, degree + 1) + = SplineType::BasisFunctionDerivatives(parameters[i], 1, degree, knots); + + b.col(row++) = points.col(i); + b.col(row++) = derivatives.col(derivativeIndex++); + } + else + { + A.row(row).segment(span - degree, degree + 1) + = SplineType::BasisFunctions(parameters[i], degree, knots); + b.col(row++) = points.col(i); + } + } + b.col(0) = points.col(0); + b.col(b.cols() - 1) = points.col(points.cols() - 1); + A(0,0) = 1; + A(n - 1, n - 1) = 1; + + // Solve + FullPivLU lu(A); + ControlPointVectorType controlPoints = lu.solve(MatrixType(b.transpose())).transpose(); + + SplineType spline(knots, controlPoints); + + return spline; + } + + template + template + SplineType + SplineFitting::InterpolateWithDerivatives(const PointArrayType& points, + const PointArrayType& derivatives, + const IndexArray& derivativeIndices, + const unsigned int degree) + { + ParameterVectorType parameters; + ChordLengths(points, parameters); + return InterpolateWithDerivatives(points, derivatives, derivativeIndices, degree, parameters); + } +} + +#endif // EIGEN_SPLINE_FITTING_H diff --git a/external/unsupported/Eigen/src/Splines/SplineFwd.h b/external/unsupported/Eigen/src/Splines/SplineFwd.h new file mode 100644 index 0000000..00d6b49 --- /dev/null +++ b/external/unsupported/Eigen/src/Splines/SplineFwd.h @@ -0,0 +1,93 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 20010-2011 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SPLINES_FWD_H +#define EIGEN_SPLINES_FWD_H + +#include "../../../../Eigen/Core" + +namespace Eigen +{ + template class Spline; + + template < typename SplineType, int DerivativeOrder = Dynamic > struct SplineTraits {}; + + /** + * \ingroup Splines_Module + * \brief Compile-time attributes of the Spline class for Dynamic degree. + **/ + template + struct SplineTraits< Spline<_Scalar, _Dim, _Degree>, Dynamic > + { + typedef _Scalar Scalar; /*!< The spline curve's scalar type. */ + enum { Dimension = _Dim /*!< The spline curve's dimension. */ }; + enum { Degree = _Degree /*!< The spline curve's degree. */ }; + + enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ }; + enum { NumOfDerivativesAtCompileTime = OrderAtCompileTime /*!< The number of derivatives defined for the current spline. */ }; + + enum { DerivativeMemoryLayout = Dimension==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ }; + + /** \brief The data type used to store non-zero basis functions. */ + typedef Array BasisVectorType; + + /** \brief The data type used to store the values of the basis function derivatives. */ + typedef Array BasisDerivativeType; + + /** \brief The data type used to store the spline's derivative values. */ + typedef Array DerivativeType; + + /** \brief The point type the spline is representing. */ + typedef Array PointType; + + /** \brief The data type used to store knot vectors. */ + typedef Array KnotVectorType; + + /** \brief The data type used to store parameter vectors. */ + typedef Array ParameterVectorType; + + /** \brief The data type representing the spline's control points. */ + typedef Array ControlPointVectorType; + }; + + /** + * \ingroup Splines_Module + * \brief Compile-time attributes of the Spline class for fixed degree. + * + * The traits class inherits all attributes from the SplineTraits of Dynamic degree. + **/ + template < typename _Scalar, int _Dim, int _Degree, int _DerivativeOrder > + struct SplineTraits< Spline<_Scalar, _Dim, _Degree>, _DerivativeOrder > : public SplineTraits< Spline<_Scalar, _Dim, _Degree> > + { + enum { OrderAtCompileTime = _Degree==Dynamic ? Dynamic : _Degree+1 /*!< The spline curve's order at compile-time. */ }; + enum { NumOfDerivativesAtCompileTime = _DerivativeOrder==Dynamic ? Dynamic : _DerivativeOrder+1 /*!< The number of derivatives defined for the current spline. */ }; + + enum { DerivativeMemoryLayout = _Dim==1 ? RowMajor : ColMajor /*!< The derivative type's memory layout. */ }; + + /** \brief The data type used to store the values of the basis function derivatives. */ + typedef Array<_Scalar,Dynamic,Dynamic,RowMajor,NumOfDerivativesAtCompileTime,OrderAtCompileTime> BasisDerivativeType; + + /** \brief The data type used to store the spline's derivative values. */ + typedef Array<_Scalar,_Dim,Dynamic,DerivativeMemoryLayout,_Dim,NumOfDerivativesAtCompileTime> DerivativeType; + }; + + /** \brief 2D float B-spline with dynamic degree. */ + typedef Spline Spline2f; + + /** \brief 3D float B-spline with dynamic degree. */ + typedef Spline Spline3f; + + /** \brief 2D double B-spline with dynamic degree. */ + typedef Spline Spline2d; + + /** \brief 3D double B-spline with dynamic degree. */ + typedef Spline Spline3d; +} + +#endif // EIGEN_SPLINES_FWD_H diff --git a/external/unsupported/README.txt b/external/unsupported/README.txt new file mode 100644 index 0000000..70793bf --- /dev/null +++ b/external/unsupported/README.txt @@ -0,0 +1,50 @@ +This directory contains contributions from various users. +They are provided "as is", without any support. Nevertheless, +most of them are subject to be included in Eigen in the future. + +In order to use an unsupported module you have to do either: + + - add the path_to_eigen/unsupported directory to your include path and do: + #include + + - or directly do: + #include + + +If you are interested in contributing to one of them, or have other stuff +you would like to share, feel free to contact us: +http://eigen.tuxfamily.org/index.php?title=Main_Page#Mailing_list + +Any kind of contributions are much appreciated, even very preliminary ones. +However, it: + - must rely on Eigen, + - must be highly related to math, + - should have some general purpose in the sense that it could + potentially become an official Eigen module (or be merged into another one). + +In doubt feel free to contact us. For instance, if your addons is very too specific +but it shows an interesting way of using Eigen, then it could be a nice demo. + + +This directory is organized as follow: + +unsupported/Eigen/ModuleHeader1 +unsupported/Eigen/ModuleHeader2 +unsupported/Eigen/... +unsupported/Eigen/src/Module1/SourceFile1.h +unsupported/Eigen/src/Module1/SourceFile2.h +unsupported/Eigen/src/Module1/... +unsupported/Eigen/src/Module2/SourceFile1.h +unsupported/Eigen/src/Module2/SourceFile2.h +unsupported/Eigen/src/Module2/... +unsupported/Eigen/src/... +unsupported/doc/snippets/.cpp <- code snippets for the doc +unsupported/doc/examples/.cpp <- examples for the doc +unsupported/doc/TutorialModule1.dox +unsupported/doc/TutorialModule2.dox +unsupported/doc/... +unsupported/test/.cpp <- unit test files + +The documentation is generated at the same time than the main Eigen documentation. +The .html files are generated in: build_dir/doc/html/unsupported/ + diff --git a/include/bounds.hpp b/include/bounds.hpp index b822097..95cadd7 100644 --- a/include/bounds.hpp +++ b/include/bounds.hpp @@ -15,8 +15,8 @@ namespace bounds { using Mask = Eigen::Array; - Mask is_out_of_bounds(const Vector &xi, const Vector &lb, const Vector &ub); - bool any_out_of_bounds(const Vector &xi, const Vector &lb, const Vector &ub); + Mask is_out_of_bounds(const Vector& xi, const Vector& lb, const Vector& ub); + bool any_out_of_bounds(const Vector& xi, const Vector& lb, const Vector& ub); struct BoundCorrection { @@ -24,17 +24,27 @@ namespace bounds Vector lb, ub, db; Float diameter; size_t n_out_of_bounds = 0; + bool has_bounds; - BoundCorrection(const Vector &lb, const Vector &ub) : lb(lb), ub(ub), db(ub - lb), - diameter((ub - lb).norm()) {} + BoundCorrection(const Vector& lb, const Vector& ub) : lb(lb), ub(ub), db(ub - lb), + diameter((ub - lb).norm()), + has_bounds(true) + { + //! find a better way + if (!std::isfinite(diameter)) + { + diameter = 10; + has_bounds = false; + } + } - void correct(const Eigen::Index i, parameters::Parameters &p); + void correct(const Eigen::Index i, parameters::Parameters& p); - virtual Vector correct_x(const Vector &xi, const Mask &oob) = 0; + virtual Vector correct_x(const Vector& xi, const Mask& oob, const Float sigma) = 0; - [[nodiscard]] Mask is_out_of_bounds(const Vector &xi) const; + [[nodiscard]] Mask is_out_of_bounds(const Vector& xi) const; - [[nodiscard]] Vector delta_out_of_bounds(const Vector &xi, const Mask &oob) const; + [[nodiscard]] Vector delta_out_of_bounds(const Vector& xi, const Mask& oob) const; [[nodiscard]] bool any_out_of_bounds() const { @@ -46,7 +56,7 @@ namespace bounds { using BoundCorrection::BoundCorrection; - Vector correct_x(const Vector &xi, const Mask &oob) override + Vector correct_x(const Vector& xi, const Mask& oob, const Float sigma) override { return xi; } @@ -63,14 +73,14 @@ namespace bounds COTN(Eigen::Ref lb, Eigen::Ref ub) : BoundCorrection(lb, ub), sampler(static_cast(lb.size()), rng::normal(0, 1.0 / 3.)) {} - Vector correct_x(const Vector &xi, const Mask &oob) override; + Vector correct_x(const Vector& xi, const Mask& oob, const Float sigma) override; }; struct Mirror final : BoundCorrection { using BoundCorrection::BoundCorrection; - Vector correct_x(const Vector &xi, const Mask &oob) override; + Vector correct_x(const Vector& xi, const Mask& oob, const Float sigma) override; }; struct UniformResample final : BoundCorrection @@ -79,24 +89,24 @@ namespace bounds UniformResample(Eigen::Ref lb, Eigen::Ref ub) : BoundCorrection(lb, ub), sampler(static_cast(lb.size())) {} - Vector correct_x(const Vector &xi, const Mask &oob) override; + Vector correct_x(const Vector& xi, const Mask& oob, const Float sigma) override; }; struct Saturate final : BoundCorrection { using BoundCorrection::BoundCorrection; - Vector correct_x(const Vector &xi, const Mask &oob) override; + Vector correct_x(const Vector& xi, const Mask& oob, const Float sigma) override; }; struct Toroidal final : BoundCorrection { using BoundCorrection::BoundCorrection; - Vector correct_x(const Vector &xi, const Mask &oob) override; + Vector correct_x(const Vector& xi, const Mask& oob, const Float sigma) override; }; - inline std::shared_ptr get(const parameters::CorrectionMethod &m, const Vector &lb, const Vector &ub) + inline std::shared_ptr get(const parameters::CorrectionMethod& m, const Vector& lb, const Vector& ub) { using namespace parameters; switch (m) diff --git a/include/common.hpp b/include/common.hpp index 1aa6e5e..905a350 100644 --- a/include/common.hpp +++ b/include/common.hpp @@ -21,8 +21,9 @@ #include #include #include +#include -using Float = long double; +using Float = double; using Matrix = Eigen::Matrix; using Vector = Eigen::Matrix; using Array = Eigen::Array; @@ -39,6 +40,7 @@ namespace constants extern size_t cache_min_samples; extern bool cache_samples; extern bool clip_sigma; + extern bool use_box_muller; } /** @@ -336,6 +338,49 @@ namespace rng namespace functions { Float sphere(const Vector &x); - Float rastrigin(const Vector &x); Float ellipse(const Vector& x); + Float rastrigin(const Vector &x); + Float rosenbrock(const Vector& x); + Matrix random_rotation_matrix(int n, int seed); + + enum ObjectiveFunction { + ELLIPSE, + ROSENBROCK, + SPHERE, + RASTRIGIN + }; + + inline FunctionType get(const ObjectiveFunction f) + { + switch (f) + { + case ELLIPSE: + return ellipse; + case RASTRIGIN: + return rastrigin; + case ROSENBROCK: + return rosenbrock; + case SPHERE: + return sphere; + default: + return sphere; + } + } + + inline std::string to_string(const ObjectiveFunction f) + { + switch (f) + { + case ELLIPSE: + return "ellipse"; + case RASTRIGIN: + return "rastrigin"; + case ROSENBROCK: + return "rosenbrock"; + case SPHERE: + return "sphere"; + default: + return "unknown"; + } + } } diff --git a/include/matrix_adaptation.hpp b/include/matrix_adaptation.hpp index 778acac..24642d8 100644 --- a/include/matrix_adaptation.hpp +++ b/include/matrix_adaptation.hpp @@ -10,27 +10,43 @@ namespace matrix_adaptation { struct Adaptation { - Vector m, m_old, dm, ps; + Vector m, m_old, dm, ps, dz; Float dd; Float expected_length_z; - Matrix inv_C; Adaptation(const size_t dim, const Vector& x0, const Vector& ps, const Float expected_length_z) : m(x0), m_old(dim), dm(Vector::Zero(dim)), ps(ps), dd(static_cast(dim)), - expected_length_z(expected_length_z), - inv_C(Matrix::Identity(dim, dim)) + expected_length_z(expected_length_z) { } - virtual void adapt_evolution_paths(const Population& pop, const parameters::Weights& w, - const std::shared_ptr& mutation, - const parameters::Stats& stats, size_t mu, size_t lambda) = 0; + virtual void adapt_ps(const parameters::Weights& w); + + void adapt_evolution_paths( + const Population& pop, + const parameters::Weights& w, + const parameters::Stats& stats, + const parameters::Settings& settings, + size_t lambda, size_t mu + ); + + virtual void adapt_evolution_paths_inner(const Population& pop, const parameters::Weights& w, + const parameters::Stats& stats, const parameters::Settings& settings, size_t mu, size_t lambda) = 0; + + bool adapt_matrix( + const parameters::Weights& w, const parameters::Modules& m, const Population& pop, + size_t mu, const parameters::Settings& settings, parameters::Stats& stats) + { + if (settings.one_plus_one and !stats.has_improved) + return true; + return adapt_matrix_inner(w, m, pop, mu, settings, stats); - virtual bool adapt_matrix(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, - size_t mu, const parameters::Settings& settings, const parameters::Stats& stats) = 0; + } - virtual void restart(const parameters::Settings& settings) = 0; + virtual bool adapt_matrix_inner( + const parameters::Weights& w, const parameters::Modules& m, const Population& pop, + size_t mu, const parameters::Settings& settings, parameters::Stats& stats) = 0; virtual Vector compute_y(const Vector&) = 0; @@ -38,6 +54,24 @@ namespace matrix_adaptation virtual Vector invert_y(const Vector&) = 0; + virtual void restart(const parameters::Settings& settings, const Float sigma) + { + m = settings.x0.value_or(Vector::Zero(settings.dim)); + m_old.setZero(); + dm.setZero(); + ps.setZero(); + dz.setZero(); + } + + Float distance(const Vector u, const Vector& v) + { + const auto& delta = u - v; + return invert_y(delta).norm(); + } + + Float distance_from_center(const Vector& xi) { + return distance(m, xi); + } }; struct None final : Adaptation @@ -46,17 +80,16 @@ namespace matrix_adaptation { } - bool adapt_matrix(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, - const size_t mu, const parameters::Settings& settings, const parameters::Stats& stats) override + bool adapt_matrix_inner(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, + const size_t mu, const parameters::Settings& settings, parameters::Stats& stats) override { return true; } - void adapt_evolution_paths(const Population& pop, const parameters::Weights& w, - const std::shared_ptr& mutation, const parameters::Stats& stats, - size_t mu, size_t lambda) override; + void adapt_evolution_paths_inner(const Population& pop, + const parameters::Weights& w, + const parameters::Stats& stats, const parameters::Settings& settings, size_t mu, size_t lambda) override; - void restart(const parameters::Settings& settings) override; Vector compute_y(const Vector&) override; @@ -67,8 +100,8 @@ namespace matrix_adaptation { Vector pc, d; Matrix B, C; + Matrix A; Matrix inv_root_C; - bool hs = true; CovarianceAdaptation(const size_t dim, const Vector& x0, const Float expected_length_z) : Adaptation(dim, x0, Vector::Zero(dim), expected_length_z), @@ -76,6 +109,7 @@ namespace matrix_adaptation d(Vector::Ones(dim)), B(Matrix::Identity(dim, dim)), C(Matrix::Identity(dim, dim)), + A(Matrix::Identity(dim, dim)), inv_root_C(Matrix::Identity(dim, dim)) { } @@ -85,41 +119,45 @@ namespace matrix_adaptation virtual bool perform_eigendecomposition(const parameters::Settings& settings); - void adapt_evolution_paths(const Population& pop, const parameters::Weights& w, - const std::shared_ptr& mutation, const parameters::Stats& stats, - size_t mu, size_t lambda) override; + void adapt_ps(const parameters::Weights& w) override; + + void adapt_evolution_paths_inner(const Population& pop, const parameters::Weights& w, + const parameters::Stats& stats, const parameters::Settings& settings, size_t mu, size_t lambda) override; - bool adapt_matrix(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, size_t mu, - const parameters::Settings& settings, const parameters::Stats& stats) override; + bool adapt_matrix_inner(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, size_t mu, + const parameters::Settings& settings, parameters::Stats& stats) override; - void restart(const parameters::Settings& settings) override; + void restart(const parameters::Settings& settings, const Float sigma) override; Vector compute_y(const Vector&) override; Vector invert_y(const Vector&) override; }; - struct SeperableAdaptation : CovarianceAdaptation + struct SeparableAdaptation : Adaptation { - using CovarianceAdaptation::CovarianceAdaptation; - - bool perform_eigendecomposition(const parameters::Settings& settings) override; - }; + Vector pc, d, c; + bool hs; + SeparableAdaptation(const size_t dim, const Vector& x0, const Float expected_length_z) : Adaptation(dim, x0, Vector::Zero(dim), expected_length_z), + pc(Vector::Zero(dim)), + d(Vector::Ones(dim)), + c(Vector::Ones(dim)), + hs(true) + { + } - struct OnePlusOneAdaptation: CovarianceAdaptation - { - constexpr static Float max_success_ratio = 0.44; + void adapt_evolution_paths_inner(const Population& pop, const parameters::Weights& w, + const parameters::Stats& stats, const parameters::Settings& settings, size_t mu, size_t lambda) override; - using CovarianceAdaptation::CovarianceAdaptation; + bool adapt_matrix_inner(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, size_t mu, + const parameters::Settings& settings, parameters::Stats& stats) override; - void adapt_evolution_paths(const Population& pop, const parameters::Weights& w, - const std::shared_ptr& mutation, const parameters::Stats& stats, - size_t mu, size_t lambda) override; + void restart(const parameters::Settings& settings, const Float sigma) override; - bool adapt_matrix(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, size_t mu, - const parameters::Settings& settings, const parameters::Stats& stats) override; + Vector compute_y(const Vector&) override; + Vector invert_y(const Vector&) override; }; @@ -130,25 +168,137 @@ namespace matrix_adaptation MatrixAdaptation(const size_t dim, const Vector& x0, const Float expected_length_z) : Adaptation(dim, x0, Vector::Ones(dim), expected_length_z), M(Matrix::Identity(dim, dim)), - M_inv(Matrix::Identity(dim, dim)) + M_inv(Matrix::Identity(dim, dim)), + outdated_M_inv(false) { } - void adapt_evolution_paths(const Population& pop, const parameters::Weights& w, - const std::shared_ptr& mutation, const parameters::Stats& stats, - size_t mu, size_t lambda) override; + void adapt_evolution_paths_inner(const Population& pop, const parameters::Weights& w, + const parameters::Stats& stats, const parameters::Settings& settings, size_t mu, size_t lambda) override; - bool adapt_matrix(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, size_t mu, - const parameters::Settings& settings, const parameters::Stats& stats) override; + bool adapt_matrix_inner(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, size_t mu, + const parameters::Settings& settings, parameters::Stats& stats) override; - void restart(const parameters::Settings& settings) override; + void restart(const parameters::Settings& settings, const Float sigma) override; Vector compute_y(const Vector&) override; Vector invert_y(const Vector&) override; + + private: + bool outdated_M_inv; + }; + + struct CholeskyAdaptation final : Adaptation + { + Matrix A; + Vector pc; + /* + First, as only triangular matrices have to be stored, the storage complexity is optimal. + Second, the diagonal elements of a triangular Cholesky factor are the square roots of the eigenvalues + of the factorized matrix, that is, we get the eigenvalues of the covariance matrix for free*/ + + CholeskyAdaptation(const size_t dim, const Vector& x0, const Float expected_length_z) + : Adaptation(dim, x0, Vector::Ones(dim), expected_length_z), + A(Matrix::Identity(dim, dim)), + pc(Vector::Zero(dim)) + { + } + + void adapt_evolution_paths_inner(const Population& pop, + const parameters::Weights& w, + const parameters::Stats& stats, const parameters::Settings& settings, size_t mu, size_t lambda) override; + + bool adapt_matrix_inner(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, size_t mu, + const parameters::Settings& settings, parameters::Stats& stats) override; + + void restart(const parameters::Settings& settings, const Float sigma) override; + + Vector compute_y(const Vector&) override; + + Vector invert_y(const Vector&) override; + }; + + struct SelfAdaptation final : Adaptation + { + Matrix A; + Matrix C; + + SelfAdaptation(const size_t dim, const Vector& x0, const Float expected_length_z) + : Adaptation(dim, x0, Vector::Ones(dim), expected_length_z), + A(Matrix::Identity(dim, dim)), + C(Matrix::Identity(dim, dim)) + {} + + void adapt_evolution_paths_inner(const Population& pop, + const parameters::Weights& w, + const parameters::Stats& stats, const parameters::Settings& settings, size_t mu, size_t lambda) override; + + bool adapt_matrix_inner(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, size_t mu, + const parameters::Settings& settings, parameters::Stats& stats) override; + + void restart(const parameters::Settings& settings, const Float sigma) override; + + Vector compute_y(const Vector&) override; + + Vector invert_y(const Vector&) override; + + }; + + struct CovarianceNoEigvAdaptation : CovarianceAdaptation + { + using CovarianceAdaptation::CovarianceAdaptation; + + void adapt_ps(const parameters::Weights& w) override; + + bool perform_eigendecomposition(const parameters::Settings& settings) override; + + Vector invert_y(const Vector&) override; + }; + + struct NaturalGradientAdaptation final : Adaptation + { + Matrix A; + Matrix G; + Matrix A_inv; + Float sigma_g; + + NaturalGradientAdaptation(const size_t dim, const Vector& x0, const Float expected_length_z, const Float sigma0) + : Adaptation(dim, x0, Vector::Ones(dim), expected_length_z), + A(Matrix::Identity(dim, dim) / sigma0), + G(Matrix::Zero(dim, dim)), + A_inv(Matrix::Identity(dim, dim)), + sigma_g(0), + outdated_A_inv(false) + {} + + void compute_gradients( + const Population& pop, + const parameters::Weights& w, + const parameters::Stats& stats, + const parameters::Settings& settings, + size_t mu, + size_t lambda + ); + + void adapt_evolution_paths_inner(const Population& pop, + const parameters::Weights& w, + const parameters::Stats& stats, const parameters::Settings& settings, size_t mu, size_t lambda) override; + + bool adapt_matrix_inner(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, size_t mu, + const parameters::Settings& settings, parameters::Stats& stats) override; + + void restart(const parameters::Settings& settings, const Float sigma) override; + + Vector compute_y(const Vector&) override; + + Vector invert_y(const Vector&) override; + + private: + bool outdated_A_inv; }; - inline std::shared_ptr get(const parameters::Modules& m, const size_t dim, const Vector& x0, const Float expected_z) + inline std::shared_ptr get(const parameters::Modules& m, const size_t dim, const Vector& x0, const Float expected_z, const Float sigma0) { using namespace parameters; switch (m.matrix_adaptation) @@ -157,10 +307,16 @@ namespace matrix_adaptation return std::make_shared(dim, x0, expected_z); case MatrixAdaptationType::NONE: return std::make_shared(dim, x0, expected_z); - case MatrixAdaptationType::SEPERABLE: - return std::make_shared(dim, x0, expected_z); - case MatrixAdaptationType::ONEPLUSONE: - return std::make_shared(dim, x0, expected_z); + case MatrixAdaptationType::SEPARABLE: + return std::make_shared(dim, x0, expected_z); + case MatrixAdaptationType::CHOLESKY: + return std::make_shared(dim, x0, expected_z); + case MatrixAdaptationType::CMSA: + return std::make_shared(dim, x0, expected_z); + case MatrixAdaptationType::COVARIANCE_NO_EIGV: + return std::make_shared(dim, x0, expected_z); + case MatrixAdaptationType::NATURAL_GRADIENT: + return std::make_shared(dim, x0, expected_z, sigma0); default: case MatrixAdaptationType::COVARIANCE: return std::make_shared(dim, x0, expected_z); diff --git a/include/modules.hpp b/include/modules.hpp index a78724c..e64b4a1 100644 --- a/include/modules.hpp +++ b/include/modules.hpp @@ -6,7 +6,7 @@ namespace parameters { DEFAULT, EQUAL, - HALF_POWER_LAMBDA + EXPONENTIAL }; enum class BaseSampler @@ -44,7 +44,8 @@ namespace parameters MXNES, LPXNES, PSR, - SR + SR, + SA, }; enum class CorrectionMethod @@ -61,8 +62,8 @@ namespace parameters enum class RestartStrategyType { NONE, - STOP, RESTART, + STOP, IPOP, BIPOP }; @@ -72,8 +73,11 @@ namespace parameters NONE, COVARIANCE, MATRIX, - SEPERABLE, - ONEPLUSONE + SEPARABLE, + CHOLESKY, + CMSA, + COVARIANCE_NO_EIGV, + NATURAL_GRADIENT }; enum class CenterPlacement diff --git a/include/mutation.hpp b/include/mutation.hpp index 876cf3d..ec2d80a 100644 --- a/include/mutation.hpp +++ b/include/mutation.hpp @@ -7,191 +7,196 @@ namespace parameters { - struct Stats; - struct Parameters; - struct Weights; - struct Strategy; - struct Modules; + struct Stats; + struct Parameters; + struct Weights; + struct Strategy; + struct Modules; } namespace matrix_adaptation { - struct Adaptation; + struct Adaptation; } namespace bounds { - struct BoundCorrection; + struct BoundCorrection; } namespace mutation { - struct ThresholdConvergence - { - Float init_threshold = 0.1; - Float decay_factor = 0.995; - virtual Vector scale(const Vector &zi, const Float diameter, const size_t budget, const size_t evaluations); - }; - - struct NoThresholdConvergence : ThresholdConvergence - { - Vector scale(const Vector &zi, const Float diameter, const size_t budget, const size_t evaluations) override - { - return zi; - } - }; - - class SequentialSelection - { - Float seq_cutoff_factor; - size_t seq_cutoff; - - public: - SequentialSelection(const parameters::Mirror &m, const size_t mu, const Float seq_cutoff_factor = 1.0) : seq_cutoff_factor(m == parameters::Mirror::PAIRWISE ? std::max(Float{2.}, seq_cutoff_factor) : seq_cutoff_factor), - seq_cutoff(static_cast(mu * seq_cutoff_factor)) - { - } - virtual bool break_conditions(const size_t i, const Float f, Float fopt, const parameters::Mirror &m); - }; - - struct NoSequentialSelection : SequentialSelection - { - - using SequentialSelection::SequentialSelection; - - bool break_conditions(const size_t i, const Float f, Float fopt, const parameters::Mirror &m) override { return false; } - }; - - struct SigmaSampler - { - Float beta; - - SigmaSampler(const Float d) : beta(std::log(2.0) / std::max((std::sqrt(d) * std::log(d)), Float{1.0})) {} - - virtual void sample(const Float sigma, Population &pop) const - { - pop.s = sampling::Random>(pop.s.size(), - std::lognormal_distribution<>(std::log(sigma), beta))(); - } - }; - - struct NoSigmaSampler : SigmaSampler - { - using SigmaSampler::SigmaSampler; - - void sample(const Float sigma, Population &pop) const override - { - pop.s.setConstant(sigma); - } - }; - - struct Strategy - { - std::shared_ptr tc; - std::shared_ptr sq; - std::shared_ptr ss; - Float cs; - Float sigma; - Float s = 0; - - Strategy( - const std::shared_ptr &threshold_covergence, - const std::shared_ptr &sequential_selection, - const std::shared_ptr &sigma_sampler, - const Float cs, const Float sigma0) : tc(threshold_covergence), sq(sequential_selection), ss(sigma_sampler), cs(cs), sigma(sigma0) {} - - virtual void mutate(FunctionType &objective, const size_t n_offspring, parameters::Parameters &p) = 0; - - virtual void adapt(const parameters::Weights &w, std::shared_ptr adaptation, Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) = 0; - }; - - struct CSA : Strategy - { - Float damps; - Float expected_length_z; - - CSA(const std::shared_ptr &threshold_covergence, - const std::shared_ptr &sequential_selection, - const std::shared_ptr &sigma_sampler, - const Float cs, const Float damps, const Float sigma0, const Float expected_z) : Strategy(threshold_covergence, sequential_selection, sigma_sampler, cs, sigma0), damps(damps), expected_length_z(expected_z) {} - - void mutate(FunctionType &objective, const size_t n_offspring, parameters::Parameters &p) override; - - void adapt(const parameters::Weights &w, std::shared_ptr adaptation, Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) override; - }; - - struct TPA : CSA - { - using CSA::CSA; - - Float a_tpa = 0.5; - Float b_tpa = 0.0; - Float rank_tpa = 0.0; - - void mutate(FunctionType &objective, const size_t n_offspring, parameters::Parameters &p) override; - - void adapt(const parameters::Weights &w, std::shared_ptr adaptation, Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) override; - }; - - struct MSR : CSA - { - using CSA::CSA; + struct ThresholdConvergence + { + Float init_threshold = 0.1; + Float decay_factor = 0.995; + virtual Vector scale(const Vector& zi, const Float diameter, const size_t budget, const size_t evaluations); + }; + + struct NoThresholdConvergence : ThresholdConvergence + { + Vector scale(const Vector& zi, const Float diameter, const size_t budget, const size_t evaluations) override + { + return zi; + } + }; + + class SequentialSelection + { + Float seq_cutoff_factor; + size_t seq_cutoff; + + public: + SequentialSelection(const parameters::Mirror& m, const size_t mu, const Float seq_cutoff_factor = 1.0) : seq_cutoff_factor(m == parameters::Mirror::PAIRWISE ? std::max(Float{ 2. }, seq_cutoff_factor) : seq_cutoff_factor), + seq_cutoff(static_cast(mu* seq_cutoff_factor)) + {} + virtual bool break_conditions(const size_t i, const Float f, Float fopt, const parameters::Mirror& m); + }; + + struct NoSequentialSelection : SequentialSelection + { + using SequentialSelection::SequentialSelection; + + bool break_conditions(const size_t i, const Float f, Float fopt, const parameters::Mirror& m) override { return false; } + }; + + struct SigmaSampler + { + sampling::GaussianTransformer sampler; + + SigmaSampler(const Float d) : sampler{ std::make_shared(1) } + {} + + virtual void sample(const Float sigma, Population& pop, const Float tau) + { + sampler.sampler->d = pop.s.rows(); + pop.s.noalias() = (sigma * (tau * sampler().array()).exp()).matrix().eval(); + } + }; + + struct NoSigmaSampler : SigmaSampler + { + using SigmaSampler::SigmaSampler; + + void sample(const Float sigma, Population& pop, const Float tau) override + { + pop.s.setConstant(sigma); + } + }; - void adapt(const parameters::Weights &w, std::shared_ptr adaptation, Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) override; - }; + struct Strategy + { + std::shared_ptr tc; + std::shared_ptr sq; + std::shared_ptr ss; + Float sigma; + Float s = 0; - struct PSR : CSA - { - Float success_ratio = .25; + Strategy( + const std::shared_ptr& threshold_covergence, + const std::shared_ptr& sequential_selection, + const std::shared_ptr& sigma_sampler, + const Float sigma0) : tc(threshold_covergence), sq(sequential_selection), ss(sigma_sampler), sigma(sigma0) + {} - Vector combined; + virtual void mutate(FunctionType& objective, const size_t n_offspring, parameters::Parameters& p); - using CSA::CSA; + virtual void adapt(const parameters::Weights& w, std::shared_ptr adaptation, Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) {}; + }; - void adapt(const parameters::Weights &w, std::shared_ptr adaptation, Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) override; - }; + struct CSA : Strategy + { + using Strategy::Strategy; - struct XNES : CSA - { - using CSA::CSA; + void adapt(const parameters::Weights& w, std::shared_ptr adaptation, Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) override; + }; - void adapt(const parameters::Weights &w, std::shared_ptr adaptation, Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) override; - }; + struct TPA : Strategy + { + using Strategy::Strategy; - struct MXNES : CSA - { - using CSA::CSA; + Float a_tpa = 0.5; + Float b_tpa = 0.0; + Float rank_tpa = 0.0; - void adapt(const parameters::Weights &w, std::shared_ptr adaptation, Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) override; - }; + void mutate(FunctionType& objective, const size_t n_offspring, parameters::Parameters& p) override; - struct LPXNES : CSA - { - using CSA::CSA; + void adapt(const parameters::Weights& w, std::shared_ptr adaptation, Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) override; + }; - void adapt(const parameters::Weights &w, std::shared_ptr adaptation, Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) override; - }; + struct MSR : Strategy + { + using Strategy::Strategy; + void adapt(const parameters::Weights& w, std::shared_ptr adaptation, Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) override; + }; - struct SR : CSA - { - constexpr static Float tgt_success_ratio = 2.0 / 11.0; - - using CSA::CSA; + struct PSR : Strategy + { + Float success_ratio = .25; - void adapt(const parameters::Weights& w, std::shared_ptr adaptation, Population& pop, - const Population& old_pop, const parameters::Stats& stats, const size_t lambda) override; - }; - + Vector combined; - std::shared_ptr get(const parameters::Modules &m, const size_t mu, - const Float mueff, const Float d, const Float sigma, const std::optional cs, const Float expected_z); + using Strategy::Strategy; + + void adapt(const parameters::Weights& w, std::shared_ptr adaptation, Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) override; + }; + + struct XNES : Strategy + { + using Strategy::Strategy; + + void adapt(const parameters::Weights& w, std::shared_ptr adaptation, Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) override; + }; + + struct MXNES : Strategy + { + using Strategy::Strategy; + + void adapt(const parameters::Weights& w, std::shared_ptr adaptation, Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) override; + }; + + struct LPXNES : Strategy + { + using Strategy::Strategy; + + void adapt(const parameters::Weights& w, std::shared_ptr adaptation, Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) override; + }; + + + struct SR : Strategy + { + constexpr static Float tgt_success_ratio = 2.0 / 11.0; + + using Strategy::Strategy; + + void adapt(const parameters::Weights& w, std::shared_ptr adaptation, Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) override; + }; + + + struct SA : Strategy + { + using Strategy::Strategy; + + void adapt(const parameters::Weights& w, std::shared_ptr adaptation, Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) override; + + void mutate(FunctionType& objective, const size_t n_offspring, parameters::Parameters& p) override; + + private: + Float mean_sigma; + }; + + + + std::shared_ptr get(const parameters::Modules& m, const size_t mu, const Float d, const Float sigma); } \ No newline at end of file diff --git a/include/parameters.hpp b/include/parameters.hpp index 1f4d0a5..fd625db 100644 --- a/include/parameters.hpp +++ b/include/parameters.hpp @@ -24,13 +24,14 @@ namespace parameters Settings settings; Stats stats; + std::shared_ptr sampler; + Weights weights; Population pop; Population old_pop; restart::Criteria criteria; - std::shared_ptr sampler; std::shared_ptr adaptation; std::shared_ptr mutation; std::shared_ptr selection; diff --git a/include/population.hpp b/include/population.hpp index c7a3c08..f6f068a 100644 --- a/include/population.hpp +++ b/include/population.hpp @@ -9,15 +9,16 @@ struct Population Matrix Y; Vector f; Vector s; + Vector t; size_t d; size_t n; Population(const size_t d, const size_t n) - : X(d, n), Z(d, n), Y(d, n), f(Vector::Constant(n, std::numeric_limits::infinity())), s(n), d(d), n(n) {} + : X(d, n), Z(d, n), Y(d, n), f(Vector::Constant(n, std::numeric_limits::infinity())), s(n), t(n), d(d), n(n) {} Population(const Matrix &X, const Matrix &Z, const Matrix &Y, const Vector &f, const Vector &s) - : X(X), Z(Z), Y(Y), f(f), s(s), d(X.rows()), n(X.cols()) {} + : X(X), Z(Z), Y(Y), f(f), s(s), t(f.rows()), d(X.rows()), n(X.cols()) {} Population() : Population(0, 0) {} diff --git a/include/repelling.hpp b/include/repelling.hpp index c8cdbb7..7d1ba8f 100644 --- a/include/repelling.hpp +++ b/include/repelling.hpp @@ -12,22 +12,22 @@ namespace repelling { namespace distance { - Float manhattan(const Vector &u, const Vector &v); - Float euclidian(const Vector &u, const Vector &v); - Float mahanolobis(const Vector &u, const Vector &v, const Matrix &C_inv); + Float manhattan(const Vector& u, const Vector& v); + Float euclidian(const Vector& u, const Vector& v); + Float mahanolobis(const Vector& u, const Vector& v, const Matrix& C_inv); bool hill_valley_test( - const Solution &u, - const Solution &v, - FunctionType &f, + const Solution& u, + const Solution& v, + FunctionType& f, const size_t n_evals); bool hill_valley_test_p( - const Solution &u, - const Solution &v, - FunctionType &f, + const Solution& u, + const Solution& v, + FunctionType& f, const size_t n_evals, - parameters::Parameters &p); + parameters::Parameters& p); } struct TabooPoint @@ -37,20 +37,17 @@ namespace repelling Float shrinkage; int n_rep; Float criticality; - // Matrix C; - // Matrix C_inv; TabooPoint( - const Solution &s, - const Float radius/*, - const Matrix& C, const Matrix& C_inv*/ ) : solution(s), - radius(radius), - shrinkage(std::pow(0.99, 1. / static_cast(s.x.size()))), - n_rep(1), - criticality(0.0) {} - /*, - C(C), - C_inv(C_inv) {}*/ + const Solution& s, + const Float radius) : + solution(s), + radius(radius), + shrinkage(std::pow(0.95, 1. / static_cast(s.x.size()))), + n_rep(1), + criticality(0.0) + {} + /** * \brief Rejection rule for a taboo point for a given xi @@ -59,11 +56,11 @@ namespace repelling * \param attempts determines the amount of shrinkage applied; radius = pow(shrinkage, attempts) * radius * \return */ - bool rejects(const Vector &xi, const parameters::Parameters &p, const int attempts) const; + bool rejects(const Vector& xi, const parameters::Parameters& p, const int attempts) const; - bool shares_basin(FunctionType &objective, const Solution &sol, parameters::Parameters &p) const; + bool shares_basin(FunctionType& objective, const Solution& sol, parameters::Parameters& p) const; - void calculate_criticality(const parameters::Parameters &p); + void calculate_criticality(const parameters::Parameters& p); }; struct Repelling @@ -71,8 +68,6 @@ namespace repelling std::vector archive; int attempts = 0; Float coverage = 20.0; - // Matrix C; - // Matrix C_inv; virtual ~Repelling() = default; @@ -83,38 +78,36 @@ namespace repelling * \param p * \return */ - virtual bool is_rejected(const Vector &xi, parameters::Parameters &p); + virtual bool is_rejected(const Vector& xi, parameters::Parameters& p); /** * \brief Update the archive of points * \param p */ - virtual void update_archive(FunctionType &objective, parameters::Parameters &p); + virtual void update_archive(FunctionType& objective, parameters::Parameters& p); /** * \brief Hook before sampling starts */ - virtual void prepare_sampling(const parameters::Parameters &p); + virtual void prepare_sampling(const parameters::Parameters& p); }; struct NoRepelling final : Repelling { - bool is_rejected(const Vector &xi, parameters::Parameters &p) override + bool is_rejected(const Vector& xi, parameters::Parameters& p) override { return false; } - void update_archive(FunctionType &objective, parameters::Parameters &p) override - { - } + void update_archive(FunctionType& objective, parameters::Parameters& p) override + {} - void prepare_sampling(const parameters::Parameters &p) override - { - } + void prepare_sampling(const parameters::Parameters& p) override + {} }; - inline std::shared_ptr get(const parameters::Modules &m) + inline std::shared_ptr get(const parameters::Modules& m) { if (m.repelling_restart) return std::make_shared(); diff --git a/include/restart_criteria.hpp b/include/restart_criteria.hpp index 2d84462..d892575 100644 --- a/include/restart_criteria.hpp +++ b/include/restart_criteria.hpp @@ -10,153 +10,174 @@ namespace parameters namespace restart { - struct Criterion { - bool met; - std::string name; - size_t last_restart; + struct Criterion + { + bool met; + std::string name; + size_t last_restart; - Criterion(const std::string& name): met(false), name(name) {} + Criterion(const std::string& name) : met(false), name(name), last_restart(0) {} - virtual ~Criterion() = default; + virtual ~Criterion() = default; - void reset(const parameters::Parameters &p); + void reset(const parameters::Parameters& p); - virtual void update(const parameters::Parameters &p) = 0; + virtual void update(const parameters::Parameters& p) = 0; - virtual void on_reset(const parameters::Parameters &p){}; - }; + virtual void on_reset(const parameters::Parameters& p) {}; + }; + + using vCriteria = std::vector>; + + struct Criteria + { + Criteria(const vCriteria& c) : items(c) {} - using vCriteria = std::vector>; - - struct Criteria { - Criteria(const vCriteria& c): items(c){} - - void update(const parameters::Parameters &p) + void update(const parameters::Parameters& p) { - any = false; - for (const auto& c: items) + for (auto& c : items) { c->update(p); - any = any or c->met; } } - - void reset(const parameters::Parameters &p) + + void reset(const parameters::Parameters& p) { - for (const auto& c: items) - c->reset(p); + for (auto& c : items) + c->reset(p); + } + + bool any() const + { + for (const auto& c : items) + if (c->met) + return true; + return false; + } + + std::string reason() const + { + std::string res; + for (const auto& c : items) + if (c->met) + res += c->name + "; "; + return res; } vCriteria items; - bool any; - - static Criteria get(const parameters::Modules modules); - }; - - - struct ExceededMaxIter: Criterion - { - size_t max_iter; - ExceededMaxIter(): Criterion("ExceededMaxIter"){} - void update(const parameters::Parameters &p) override; - void on_reset(const parameters::Parameters &p) override; - }; - - struct NoImprovement: Criterion - { - size_t n_bin; - std::vector best_fitnesses; - NoImprovement(): Criterion("NoImprovement"){} - void update(const parameters::Parameters &p) override; - void on_reset(const parameters::Parameters &p) override; - }; - - struct MaxSigma: Criterion - { - static inline Float tolerance = 1e4; - MaxSigma(): Criterion("MaxSigma"){} - void update(const parameters::Parameters &p) override; - }; - - struct MinSigma: Criterion - { - static inline Float tolerance = 1e-20; - MinSigma(): Criterion("MinSigma"){} - void update(const parameters::Parameters &p) override; - }; - - struct UnableToAdapt: Criterion - { - UnableToAdapt(): Criterion("UnableToAdapt"){} - void update(const parameters::Parameters &p) override; - }; - - struct FlatFitness: Criterion - { - size_t max_flat_fitness; - size_t flat_fitness_index; - Eigen::Array flat_fitnesses; - - FlatFitness(): Criterion("FlatFitness"){} - void update(const parameters::Parameters &p) override; - void on_reset(const parameters::Parameters &p) override; - }; - - struct TolX: Criterion - { - static inline Float tolerance = 10e-12; - Vector tolx_vector; - TolX(): Criterion("TolX"){} - void update(const parameters::Parameters &p) override; - void on_reset(const parameters::Parameters &p) override; - }; - - - struct MaxDSigma: Criterion - { - static inline Float tolerance = std::pow(10., 20.); - MaxDSigma(): Criterion("MaxDSigma"){} - void update(const parameters::Parameters &p) override; - }; - - struct MinDSigma: Criterion - { - static inline Float tolerance = 1e-8; - MinDSigma(): Criterion("MinDSigma"){} - void update(const parameters::Parameters &p) override; - }; - - - struct ConditionC: Criterion - { - static inline Float tolerance = std::pow(10., 14.); - ConditionC(): Criterion("ConditionC"){} - void update(const parameters::Parameters &p) override; - }; - - struct NoEffectAxis: Criterion - { - static inline Float tolerance = 0.; - NoEffectAxis(): Criterion("NoEffectAxis"){} - void update(const parameters::Parameters &p) override; - }; - - struct NoEffectCoord: Criterion - { - static inline Float tolerance = 0.; - NoEffectCoord(): Criterion("NoEffectCoord"){} - void update(const parameters::Parameters &p) override; - }; - - struct Stagnation: Criterion - { - static inline Float tolerance = 0.3; - - size_t n_stagnation; - std::vector median_fitnesses; + + static Criteria get(const parameters::Modules modules); + }; + + struct ExceededMaxIter : Criterion + { + size_t max_iter; + ExceededMaxIter() : Criterion("ExceededMaxIter") {} + void update(const parameters::Parameters& p) override; + void on_reset(const parameters::Parameters& p) override; + }; + + struct NoImprovement : Criterion + { + size_t n_bin; + std::vector best_fitnesses; + NoImprovement() : Criterion("NoImprovement") {} + void update(const parameters::Parameters& p) override; + void on_reset(const parameters::Parameters& p) override; + }; + + struct MaxSigma : Criterion + { + static inline Float tolerance = 1e4; + MaxSigma() : Criterion("MaxSigma") {} + void update(const parameters::Parameters& p) override; + }; + + struct MinSigma : Criterion + { + static inline Float tolerance = 1e-20; + MinSigma() : Criterion("MinSigma") {} + void update(const parameters::Parameters& p) override; + }; + + struct UnableToAdapt : Criterion + { + UnableToAdapt() : Criterion("UnableToAdapt") {} + void update(const parameters::Parameters& p) override; + }; + + struct FlatFitness : Criterion + { + size_t max_flat_fitness; + size_t flat_fitness_index; + Eigen::Array flat_fitnesses; + + FlatFitness() : Criterion("FlatFitness") {} + void update(const parameters::Parameters& p) override; + void on_reset(const parameters::Parameters& p) override; + }; + + struct TolX : Criterion + { + static inline Float tolerance = 1e-11; + Vector tolx_vector; + TolX() : Criterion("TolX") {} + void update(const parameters::Parameters& p) override; + void on_reset(const parameters::Parameters& p) override; + }; + + struct MaxDSigma : Criterion + { + static inline Float tolerance = std::pow(10., 20.); + MaxDSigma() : Criterion("MaxDSigma") {} + void update(const parameters::Parameters& p) override; + }; + + struct MinDSigma : Criterion + { + static inline Float tolerance = std::pow(10., -20.); + MinDSigma() : Criterion("MinDSigma") {} + void update(const parameters::Parameters& p) override; + }; + + struct ConditionC : Criterion + { + static inline Float tolerance = std::pow(10., 14.); + ConditionC() : Criterion("ConditionC") {} + void update(const parameters::Parameters& p) override; + }; + + struct NoEffectAxis : Criterion + { + static inline Float tolerance = 10.0 * std::numeric_limits::epsilon(); + NoEffectAxis() : Criterion("NoEffectAxis") {} + void update(const parameters::Parameters& p) override; + }; + + struct NoEffectCoord : Criterion + { + static inline Float tolerance = 10.0 * std::numeric_limits::epsilon(); + NoEffectCoord() : Criterion("NoEffectCoord") {} + void update(const parameters::Parameters& p) override; + }; + + struct Stagnation : Criterion + { + static inline Float tolerance = 0.3; + + size_t n_stagnation; + std::vector median_fitnesses; std::vector best_fitnesses; - Stagnation(): Criterion("Stagnation"){} - void update(const parameters::Parameters &p) override; - void on_reset(const parameters::Parameters &p) override; - }; + Stagnation() : Criterion("Stagnation") {} + void update(const parameters::Parameters& p) override; + void on_reset(const parameters::Parameters& p) override; + }; + + struct TooMuchRepelling : Criterion + { + static inline Float tolerance = 50; + TooMuchRepelling() : Criterion("TooMuchRepelling") {} + void update(const parameters::Parameters& p) override; + }; + } \ No newline at end of file diff --git a/include/sampling.hpp b/include/sampling.hpp index 4db1e2d..c1bea50 100644 --- a/include/sampling.hpp +++ b/include/sampling.hpp @@ -17,23 +17,23 @@ namespace sampling struct Sampler { Sampler(const size_t d) : d(d) {} - [[nodiscard]] virtual Vector operator()() = 0; size_t d; - + virtual void reset(const parameters::Modules &, const size_t) { } - + virtual Float expected_length() { return std::sqrt(static_cast(d)); } + [[nodiscard]] virtual Vector operator()() = 0; }; /** * @brief Testing sampler, simple incrementing generator. */ - struct Tester : Sampler + struct Tester : Sampler { Tester(const size_t d) : Sampler(d) {} @@ -219,7 +219,7 @@ namespace sampling /** * Should be overwritten, transforms U(0,1) indep samples into something else */ - [[nodiscard]] virtual Vector transform(Vector x) = 0; + [[nodiscard]] virtual Vector transform(const Vector& x) = 0; [[nodiscard]] virtual Vector operator()() override { @@ -236,7 +236,6 @@ namespace sampling return (*sampler)(); } - protected: std::shared_ptr sampler; }; @@ -244,7 +243,7 @@ namespace sampling { IdentityTransformer(const std::shared_ptr sampler) : SampleTransformer(sampler) {} - [[nodiscard]] virtual Vector transform(Vector x) + [[nodiscard]] virtual Vector transform(const Vector& x) { return x; } @@ -262,11 +261,62 @@ namespace sampling return sqrt(dd) * (1.0 - 1.0 / (4.0 * dd) + 1.0 / (21.0 * pow(dd, 2.0))); } - [[nodiscard]] virtual Vector transform(Vector x) override + [[nodiscard]] inline std::pair box_muller(const Float u1, const Float u2) { - for (auto &xi : x) - xi = ppf(xi); - return x; + const Float r = std::sqrt(-2.0 * std::log(u1)); + const Float theta = 2.0 * M_PI * u2; + return { r * std::cos(theta), r * std::sin(theta) }; + } + + + [[nodiscard]] inline Vector box_muller(const Vector& u) + { + static Vector u_extra; + static int n_extra_used = 0; + static bool needs_new_sample = true; + static Float extra_sample; + + size_t n = u.size(); + size_t m = n / 2; + + Vector z(n); + for (size_t i = 0; i < m; ++i) { + const auto&[n1, n2] = box_muller(u(2 * i), u(2 * i + 1)); + z(2 * i) = n1; + z(2 * i + 1) = n2; + } + + if (n % 2 != 0) + { + if (u_extra.size() <= n_extra_used) + { + u_extra = (*sampler)(); + n_extra_used = 0; + } + + if (needs_new_sample) + { + const auto&[n1, n2] = box_muller(u(n - 1), u_extra(n_extra_used++)); + z(n - 1) = n1; + extra_sample = n2; + needs_new_sample = false; + } + else + { + z(n -1) = extra_sample; + needs_new_sample = true; + } + } + + return z; + } + + + [[nodiscard]] virtual Vector transform(const Vector& x) override + { + if (constants::use_box_muller) + return box_muller(x); + return x.unaryExpr(&ppf); } }; @@ -276,11 +326,9 @@ namespace sampling UniformScaler(const std::shared_ptr sampler) : SampleTransformer(sampler) {} - [[nodiscard]] virtual Vector transform(Vector x) override + [[nodiscard]] virtual Vector transform(const Vector& x) override { - for (auto &xi : x) - xi = -scale + (2.0 * scale) * xi; - return x; + return (-scale + (2.0 * scale) * x.array()).matrix(); } }; @@ -290,17 +338,11 @@ namespace sampling LaplaceTransformer(const std::shared_ptr sampler) : SampleTransformer(sampler) {} - [[nodiscard]] virtual Vector transform(Vector x) override + [[nodiscard]] virtual Vector transform(const Vector& x) override { - for (auto &xi : x) - { - if (xi < 0.5) - xi = b * std::log(2.0 * xi); - else - xi = -b * std::log(2.0 * (1.0 - xi)); - - } - return x; + return ((x.array() < 0.5) + .select(b * (2.0 * x.array()).log(), + -b * (2.0 * (1.0 - x.array())).log())).matrix(); } }; @@ -310,11 +352,9 @@ namespace sampling LogisticTransformer(const std::shared_ptr sampler) : SampleTransformer(sampler) {} - [[nodiscard]] virtual Vector transform(Vector x) override + [[nodiscard]] virtual Vector transform(const Vector& x) override { - for (auto &xi : x) - xi = s * std::log(xi / (1 - xi)); - return x; + return (s * (x.array() / (1.0 - x.array())).log()).matrix(); } }; @@ -335,11 +375,9 @@ namespace sampling return median_z; } - [[nodiscard]] virtual Vector transform(Vector x) override + [[nodiscard]] virtual Vector transform(const Vector& x) override { - for (auto &xi : x) - xi = gamma * std::tan(M_PI * (xi - 0.5)); - return x; + return (gamma * (M_PI * (x.array() - 0.5)).tan()).matrix(); } }; @@ -347,14 +385,13 @@ namespace sampling { DoubleWeibullTransformer(const std::shared_ptr sampler) : SampleTransformer(sampler) {} - [[nodiscard]] virtual Vector transform(Vector x) override + [[nodiscard]] virtual Vector transform(const Vector& x) override { - for (auto &xi : x) - if (xi < 0.5) - xi = -std::sqrt(-std::log(2.0 * xi)); - else - xi = std::sqrt(-std::log(2.0 * (1.0 - xi))); - return x; + return ((x.array() < 0.5) + .select( + -(-((2.0 * x.array()).log())).sqrt(), + (-((2.0 * (1.0 - x.array())).log())).sqrt() + )).matrix(); } }; diff --git a/include/settings.hpp b/include/settings.hpp index ae475f4..810f8ed 100644 --- a/include/settings.hpp +++ b/include/settings.hpp @@ -5,86 +5,116 @@ namespace parameters { - struct Settings - { - size_t dim; - Modules modules; + struct Settings + { + size_t dim; + Modules modules; - std::optional target; - std::optional max_generations; - size_t budget; + std::optional target; + std::optional max_generations; + size_t budget; - Float sigma0; - size_t lambda0; - size_t mu0; + Float sigma0; + size_t lambda0; + size_t mu0; - std::optional x0; - Vector lb; - Vector ub; - std::optional cs; - std::optional cc; - std::optional cmu; - std::optional c1; - bool verbose; - Float volume; + std::optional x0; + Vector lb; + Vector ub; + std::optional cs; + std::optional cc; + std::optional cmu; + std::optional c1; + std::optional damps; + std::optional acov; + bool verbose; + Float volume; + bool one_plus_one; - Settings(size_t dim, - std::optional mod = std::nullopt, - std::optional target = std::nullopt, - std::optional max_generations = std::nullopt, - std::optional budget = std::nullopt, - std::optional sigma = std::nullopt, - std::optional lambda = std::nullopt, - std::optional mu = std::nullopt, - std::optional x0 = std::nullopt, - std::optional lb = std::nullopt, - std::optional ub = std::nullopt, - std::optional cs = std::nullopt, - std::optional cc = std::nullopt, - std::optional cmu = std::nullopt, - std::optional c1 = std::nullopt, - bool verbose = false) : dim(dim), - modules(mod.value_or(Modules())), - target(target), - max_generations(max_generations), - budget(budget.value_or(dim * 1e4)), - sigma0(sigma.value_or(2.0)), - lambda0(lambda.value_or(4 + std::floor(3 * std::log(dim)))), - mu0(mu.value_or(lambda0 / 2)), - x0(x0), - lb(lb.value_or(Vector::Ones(dim) * -5)), - ub(ub.value_or(Vector::Ones(dim) * 5)), - cs(cs), - cc(cc), - cmu(cmu), - c1(c1), - verbose(verbose), - volume(0.0) - { - if (modules.mirrored == Mirror::PAIRWISE and lambda0 % 2 != 0) - lambda0++; + Settings(size_t dim, + std::optional mod = std::nullopt, + std::optional target = std::nullopt, + std::optional max_generations = std::nullopt, + std::optional budget = std::nullopt, + std::optional sigma = std::nullopt, + std::optional lambda = std::nullopt, + std::optional mu = std::nullopt, + std::optional x0 = std::nullopt, + std::optional lb = std::nullopt, + std::optional ub = std::nullopt, + std::optional cs = std::nullopt, + std::optional cc = std::nullopt, + std::optional cmu = std::nullopt, + std::optional c1 = std::nullopt, + std::optional damps = std::nullopt, + std::optional acov = std::nullopt, + bool verbose = true, + bool always_compute_eigv = false + ) : dim(dim), + modules(mod.value_or(Modules())), + target(target), + max_generations(max_generations), + budget(budget.value_or(dim * 1e4)), + sigma0(sigma.value_or(2.0)), + lambda0(lambda.value_or(4 + std::floor(3 * std::log(dim)))), + mu0(mu.value_or(lambda0 / 2)), + x0(x0), + lb(lb.value_or(Vector::Ones(dim) * -5)), + ub(ub.value_or(Vector::Ones(dim)* 5)), + cs(cs), + cc(cc), + cmu(cmu), + c1(c1), + damps(damps), + acov(acov), + verbose(verbose), + volume(0.0), + one_plus_one(false) + { + if (modules.mirrored == Mirror::PAIRWISE and lambda0 % 2 != 0) + lambda0++; - if (mu0 > lambda0) - { - mu0 = lambda0 / 2; - } + if (mu0 > lambda0) + { + mu0 = lambda0 / 2; + } - if (lambda0 == 1) - { - mu0 = 1; - modules.elitist = true; - modules.active = false; - modules.weights = RecombinationWeights::EQUAL; - modules.ssa = StepSizeAdaptation::SR; - modules.matrix_adaptation = MatrixAdaptationType::ONEPLUSONE; - cc = 2.0 / (static_cast(dim) + 2.0); - c1 = 2.0 / (pow(static_cast(dim),2) + 6.0); + if (modules.ssa == StepSizeAdaptation::SA || modules.matrix_adaptation == MatrixAdaptationType::CMSA) + { + mu0 = mu.value_or(lambda0 / 4); + } - if (modules.restart_strategy == RestartStrategyType::BIPOP || modules.restart_strategy == RestartStrategyType::IPOP) - modules.restart_strategy = RestartStrategyType::RESTART; - } - volume = (this->ub - this->lb).prod(); - } - }; + + if (modules.ssa != StepSizeAdaptation::CSA + and modules.matrix_adaptation == MatrixAdaptationType::COVARIANCE + and not always_compute_eigv + ) + { + modules.matrix_adaptation = MatrixAdaptationType::COVARIANCE_NO_EIGV; + } + + if ( + modules.matrix_adaptation == MatrixAdaptationType::NONE + ) + { + modules.active = false; + } + + if (lambda0 == 1) + { + mu0 = 1; + one_plus_one = true; + modules.elitist = true; + modules.active = false; + modules.sequential_selection = false; + modules.weights = RecombinationWeights::EQUAL; + modules.ssa = StepSizeAdaptation::SR; + + if (modules.restart_strategy == RestartStrategyType::BIPOP || modules.restart_strategy == RestartStrategyType::IPOP) + modules.restart_strategy = RestartStrategyType::RESTART; + } + volume = (this->ub.cwiseMin(10 * sigma0) - this->lb.cwiseMax(-10 * sigma0)).prod(); + } + }; } \ No newline at end of file diff --git a/include/stats.hpp b/include/stats.hpp index 9aa108d..f41ef03 100644 --- a/include/stats.hpp +++ b/include/stats.hpp @@ -16,7 +16,9 @@ namespace parameters Solution global_best = {}; bool has_improved = false; Float success_ratio = 2.0 / 11.0; - Float cs = 1.0 / 12.0; + Float cp = 1.0 / 12.0; + size_t last_update = 0; + size_t n_updates = 0; void update_best(const Vector &x, const Float y) { @@ -30,7 +32,7 @@ namespace parameters has_improved = true; } - success_ratio = (1 - cs) * success_ratio + (cs * has_improved); + success_ratio = (1 - cp) * success_ratio + (cp * has_improved); } }; } diff --git a/include/to_string.hpp b/include/to_string.hpp index 5315a95..ed968e0 100644 --- a/include/to_string.hpp +++ b/include/to_string.hpp @@ -11,8 +11,8 @@ namespace parameters { case RecombinationWeights::EQUAL: return "EQUAL"; - case RecombinationWeights::HALF_POWER_LAMBDA: - return "HALF_POWER_LAMBDA"; + case RecombinationWeights::EXPONENTIAL: + return "EXPONENTIAL"; default: case RecombinationWeights::DEFAULT: return "DEFAULT"; @@ -63,9 +63,14 @@ namespace parameters return "MXNES"; case StepSizeAdaptation::LPXNES: return "LPXNES"; - default: + case StepSizeAdaptation::SR: + return "SR"; case StepSizeAdaptation::PSR: return "PSR"; + case StepSizeAdaptation::SA: + return "SA"; + default: + return "unknown"; } } inline std::string to_string(const CorrectionMethod &s) @@ -111,9 +116,22 @@ namespace parameters { case MatrixAdaptationType::MATRIX: return "MATRIX"; - default: + case MatrixAdaptationType::CHOLESKY: + return "CHOLESKY"; + case MatrixAdaptationType::NONE: + return "NONE"; + case MatrixAdaptationType::SEPARABLE: + return "SEPARABLE"; case MatrixAdaptationType::COVARIANCE: return "COVARIANCE"; + case MatrixAdaptationType::CMSA: + return "CMSA"; + case MatrixAdaptationType::COVARIANCE_NO_EIGV: + return "COVARIANCE_NO_EIGV"; + case MatrixAdaptationType::NATURAL_GRADIENT: + return "NATURAL_GRADIENT"; + default: + return "unkown"; } } diff --git a/include/weights.hpp b/include/weights.hpp index 17ed571..966f998 100644 --- a/include/weights.hpp +++ b/include/weights.hpp @@ -1,26 +1,31 @@ #pragma once - #include "settings.hpp" namespace parameters { - struct Weights + struct Weights { Vector weights; Vector positive; Vector negative; - Float mueff, mueff_neg; - Float c1, cmu, cc; + Float c1, cmu, cc, cs; + Float damps, acov; + Float sqrt_cc_mueff, sqrt_cs_mueff; + Float lazy_update_interval; + Float expected_length_z; + Float expected_length_ps; + Float beta; - Weights(const size_t dim, const size_t mu, const size_t lambda, const Settings &settings); + Weights(const size_t dim, const size_t mu, const size_t lambda, const Settings &settings, + const Float expected_length_z); - void weights_default(const size_t lambda); + void weights_default(const size_t mu, const size_t lambda); void weights_equal(const size_t mu); - void weights_half_power_lambda(const size_t mu, const size_t lambda); + void weights_exponential(const size_t mu, const size_t lambda); Vector clipped() const; }; diff --git a/modcma/__main__.py b/modcma/__main__.py index bf1b207..c9bbef1 100644 --- a/modcma/__main__.py +++ b/modcma/__main__.py @@ -18,7 +18,7 @@ type=int, help="number of iterations per agent", required=False, - default=50, + default=1, ) parser.add_argument( "-l", "--logging", required=False, action="store_true", default=False @@ -27,6 +27,12 @@ parser.add_argument("-s", "--seed", type=int, required=False, default=42) parser.add_argument("-p", "--data_folder", type=str, required=False) parser.add_argument("-a", "--arguments", nargs="+", required=False) +parser.add_argument( + "-c", "--cpp", required=False, action="store_true", default=False +) +parser.add_argument( + "--plot", required=False, action="store_true", default=False +) args = vars(parser.parse_args()) for arg in args.pop("arguments") or []: @@ -34,4 +40,4 @@ exec(arg, None, args) -evaluate_bbob(**args, active=True, step_size_adaptation="psr") +evaluate_bbob(**args) diff --git a/modcma/c_maes/cmaescpp/__init__.pyi b/modcma/c_maes/cmaescpp/__init__.pyi index c8ae86c..b2ea6f8 100644 --- a/modcma/c_maes/cmaescpp/__init__.pyi +++ b/modcma/c_maes/cmaescpp/__init__.pyi @@ -34,6 +34,7 @@ class Population: f: numpy.ndarray n: int s: numpy.ndarray + t: numpy.ndarray @overload def __init__(self, dimension: int, n: int) -> None: ... @overload @@ -56,7 +57,7 @@ class Parameters: adaptation: ( matrix_adaptation.MatrixAdaptation | matrix_adaptation.CovarianceAdaptation - | matrix_adaptation.SeperableAdaptation + | matrix_adaptation.SeparableAdaptation | matrix_adaptation.OnePlusOneAdaptation | matrix_adaptation.NoAdaptation ) diff --git a/modcma/c_maes/cmaescpp/matrix_adaptation.pyi b/modcma/c_maes/cmaescpp/matrix_adaptation.pyi index 40afdb4..e3de1ee 100644 --- a/modcma/c_maes/cmaescpp/matrix_adaptation.pyi +++ b/modcma/c_maes/cmaescpp/matrix_adaptation.pyi @@ -10,7 +10,14 @@ class Adaptation: m_old: numpy.ndarray ps: numpy.ndarray def __init__(self, *args, **kwargs) -> None: ... - def adapt_evolution_paths(self, pop: modcma.c_maes.cmaescpp.Population, weights, mutation: modcma.c_maes.cmaescpp.mutation.Strategy, stats, mu: int, lamb: int) -> None: ... + def adapt_evolution_paths( + self, + pop: modcma.c_maes.cmaescpp.Population, + weights: modcma.c_maes.cmaescpp.parameters.Weights, + stats: modcma.c_maes.cmaescpp.parameters.Stats, + settings: modcma.c_maes.cmaescpp.parameters.Settings, + mu: int, lamb: int + ) -> None: ... def adapt_matrix(self, weights, modules, population: modcma.c_maes.cmaescpp.Population, mu: int, settings, stats) -> bool: ... def compute_y(self, zi: numpy.ndarray) -> numpy.ndarray: ... def invert_x(self, xi: numpy.ndarray, sigma: float) -> numpy.ndarray: ... @@ -39,5 +46,27 @@ class NoAdaptation(Adaptation): class OnePlusOneAdaptation(CovarianceAdaptation): def __init__(self, dimension: int, x0: numpy.ndarray, expected_length_z: float) -> None: ... -class SeperableAdaptation(CovarianceAdaptation): +class SeparableAdaptation(CovarianceAdaptation): + c: numpy.ndarray + pc: numpy.ndarray + d: numpy.ndarray + def __init__(self, dimension: int, x0: numpy.ndarray, expected_length_z: float) -> None: ... + +class CovarainceNoEigvAdaptation(CovarainceNoEigvAdaptation): + def __init__(self, dimension: int, x0: numpy.ndarray, expected_length_z: float) -> None: ... + +class CholeskyAdaptation(Adaptation): + A: numpy.ndarray + pc: numpy.ndarray + def __init__(self, dimension: int, x0: numpy.ndarray, expected_length_z: float) -> None: ... + +class SelfAdaptation(Adaptation): + A: numpy.ndarray + C: numpy.ndarray def __init__(self, dimension: int, x0: numpy.ndarray, expected_length_z: float) -> None: ... + +class NaturalGradientAdaptation(Adaptation): + A: numpy.ndarray + G: numpy.ndarray + A_inv: numpy.ndarray + def __init__(self, dimension: int, x0: numpy.ndarray, expected_length_z: float) -> None: ... \ No newline at end of file diff --git a/modcma/c_maes/cmaescpp/mutation.pyi b/modcma/c_maes/cmaescpp/mutation.pyi index 454745c..72be370 100644 --- a/modcma/c_maes/cmaescpp/mutation.pyi +++ b/modcma/c_maes/cmaescpp/mutation.pyi @@ -97,11 +97,10 @@ class SigmaSampler: beta: float def __init__(self, dimension: float) -> None: ... def sample( - self, sigma: float, population: modcma.c_maes.cmaescpp.Population + self, sigma: float, population: modcma.c_maes.cmaescpp.Population, beta: float ) -> None: ... class Strategy: - cs: float s: float sequential_selection: SequentialSelection sigma: float diff --git a/modcma/c_maes/cmaescpp/options.pyi b/modcma/c_maes/cmaescpp/options.pyi index 1420703..2de7091 100644 --- a/modcma/c_maes/cmaescpp/options.pyi +++ b/modcma/c_maes/cmaescpp/options.pyi @@ -21,7 +21,7 @@ from typing import ClassVar # PSR: StepSizeAdaptation # RESTART: RestartStrategy # SATURATE: CorrectionMethod -# SEPERABLE: MatrixAdaptationType +# SEPARABLE: MatrixAdaptationType # SOBOL: BaseSampler # STOP: RestartStrategy # TOROIDAL: CorrectionMethod @@ -112,10 +112,16 @@ class CorrectionMethod: class MatrixAdaptationType: __members__: ClassVar[dict] = ... # read-only + NONE: ClassVar[MatrixAdaptationType] = ... COVARIANCE: ClassVar[MatrixAdaptationType] = ... MATRIX: ClassVar[MatrixAdaptationType] = ... - NONE: ClassVar[MatrixAdaptationType] = ... - SEPERABLE: ClassVar[MatrixAdaptationType] = ... + SEPARABLE: ClassVar[MatrixAdaptationType] = ... + ONEPLUSONE: ClassVar[MatrixAdaptationType] = ... + CHOLESKY: ClassVar[MatrixAdaptationType] = ... + CMSA: ClassVar[MatrixAdaptationType] = ... + COVARIANCE_NO_EIGV: ClassVar[MatrixAdaptationType] = ... + NATURAL_GRADIENT: ClassVar[MatrixAdaptationType] = ... + __entries: ClassVar[dict] = ... def __init__(self, value: int) -> None: ... def __eq__(self, other: object) -> bool: ... @@ -184,12 +190,15 @@ class RestartStrategy: class StepSizeAdaptation: __members__: ClassVar[dict] = ... # read-only CSA: ClassVar[StepSizeAdaptation] = ... - LPXNES: ClassVar[StepSizeAdaptation] = ... + TPA: ClassVar[StepSizeAdaptation] = ... MSR: ClassVar[StepSizeAdaptation] = ... + XNES: ClassVar[StepSizeAdaptation] = ... MXNES: ClassVar[StepSizeAdaptation] = ... + LPXNES: ClassVar[StepSizeAdaptation] = ... PSR: ClassVar[StepSizeAdaptation] = ... - TPA: ClassVar[StepSizeAdaptation] = ... - XNES: ClassVar[StepSizeAdaptation] = ... + SR: ClassVar[StepSizeAdaptation] = ... + SA: ClassVar[StepSizeAdaptation] = ... + __entries: ClassVar[dict] = ... def __init__(self, value: int) -> None: ... def __eq__(self, other: object) -> bool: ... diff --git a/modcma/c_maes/cmaescpp/parameters.pyi b/modcma/c_maes/cmaescpp/parameters.pyi index a538a9f..4197318 100644 --- a/modcma/c_maes/cmaescpp/parameters.pyi +++ b/modcma/c_maes/cmaescpp/parameters.pyi @@ -56,6 +56,7 @@ class Settings: cmu: float | None = ..., c1: float | None = ..., verbose: bool = ..., + always_compute_eigv: bool | False = ... ) -> None: ... class Solution: @@ -66,23 +67,34 @@ class Solution: def __init__(self) -> None: ... class Stats: - centers: list[Solution] - current_avg: float - current_best: Solution + t: int evaluations: int + current_avg: float + solutions: list[Solution] + centers: list[Solution] global_best: Solution + current_best: Solution has_improved: bool - solutions: list[Solution] success_ratio: float - t: int + cs: float + last_update: int + n_updates: int def __init__(self) -> None: ... class Weights: - c1: float - cc: float - cmu: float mueff: float mueff_neg: float + c1: float + cmu: float + cc: float + cs: float + damps: float + sqrt_cc_mueff: float + sqrt_cs_mueff: float + lazy_update_interval: float + expected_length_z: float + expected_length_ps: float + beta: float negative: numpy.ndarray positive: numpy.ndarray weights: numpy.ndarray diff --git a/modcma/modularcmaes.py b/modcma/modularcmaes.py index 782dcdc..9c41ebe 100644 --- a/modcma/modularcmaes.py +++ b/modcma/modularcmaes.py @@ -264,6 +264,7 @@ def break_conditions(self) -> List[bool]: return [ self.parameters.target >= self.parameters.fopt, self.parameters.used_budget >= self.parameters.budget, + self.parameters.should_stop ] def fitness_func(self, x: np.ndarray) -> float: @@ -433,6 +434,8 @@ def evaluate_bbob( instance=1, target_precision=1e-8, return_optimizer=False, + cpp=False, + plot=False, **kwargs, ): """Helper function to evaluate a ModularCMAES on the BBOB test suite. @@ -459,6 +462,10 @@ def evaluate_bbob( The target precision for the objective function value return_optimizer: bool = False Whether to return the optimizer + cpp: bool = False + Wheter to run the C++ backend + plot: bool = False + Plotting stats **kwargs These are directly passed into the instance of ModularCMAES, in this manner parameters can be specified for the optimizer. @@ -474,10 +481,13 @@ def evaluate_bbob( # This speeds up the import, this import is quite slow, so import it lazy here # pylint: disable=import-outside-toplevel import ioh + from modcma.c_maes import Settings, parameters, options, constants, utils + from modcma.c_maes import ModularCMAES as cModularCMAES evals, fopts = np.array([]), np.array([]) if seed: np.random.seed(seed) + utils.set_seed(seed) fitness_func = ioh.get_problem( fid, dimension=dim, instance=instance ) @@ -491,15 +501,114 @@ def evaluate_bbob( f"Optimizing function {fid} in {dim}D for target " f"{target_precision} with {iterations} iterations." ) - + n_succ = 0 + if plot: + iterations = 1 + for idx in range(iterations): if idx > 0: fitness_func.reset() target = fitness_func.optimum.y + target_precision - - optimizer = ModularCMAES(fitness_func, dim, x0 = np.zeros(dim), target=target, **kwargs).run() + + ps_norm = [] + pc_norm = [] + eigvals = [] + f_values = [] + dm = [] + sigma = [] + hs = [] + + if cpp: + # TODO: map c++ types to python types + modules = parameters.Modules() + modules.matrix_adaptation = options.COVARIANCE + modules.ssa = options.StepSizeAdaptation.CSA + modules.restart_strategy = options.RestartStrategy.NONE + + settings = Settings( + fitness_func.meta_data.n_variables, + x0=np.zeros(dim), + modules=modules, + lb=fitness_func.bounds.lb, + ub=fitness_func.bounds.ub, + verbose=True, + sigma0=2.0, + target=fitness_func.optimum.y + 1e-8, + budget=fitness_func.meta_data.n_variables * 10_000, + ) + optimizer = cModularCMAES(settings) + while not optimizer.break_conditions(): + optimizer.step(fitness_func) + ps_norm.append(np.linalg.norm(optimizer.p.adaptation.ps)) + pc_norm.append(np.linalg.norm(optimizer.p.adaptation.pc)) + eigvals.append( + optimizer.p.adaptation.d**2 + ) + sigma.append(optimizer.p.mutation.sigma) + f_values.append(optimizer.p.pop.f.mean()) + dm.append(optimizer.p.adaptation.dm.copy()) + hs.append(optimizer.p.adaptation.hs) + title = "modcmacpp" + else: + optimizer = ModularCMAES(fitness_func, dim, x0 = np.zeros(dim), target=target, **kwargs) + + + while optimizer.step(): + ps_norm.append(np.linalg.norm(optimizer.parameters.ps)) + pc_norm.append(np.linalg.norm(optimizer.parameters.pc)) + eigvals.append( + (optimizer.parameters.D**2).ravel() + ) + sigma.append(optimizer.parameters.sigma) + f_values.append(optimizer.parameters.population.f.mean()) + dm.append(optimizer.parameters.dm.ravel()) + hs.append(optimizer.parameters.hs) + title = "modcmapy" + + if plot: + import matplotlib.pyplot as plt + + f, (ax0, ax1, ax2, ax3, ax4) = plt.subplots(5, figsize=(13, 10), sharex=True) + f.suptitle(title) + ax0.plot(f_values, label=f"fmin: {fitness_func.state.current_best_internal.y}") + ax0.legend() + axs = ax0.twinx() + axs.plot(sigma, color='red') + + axs.set_ylabel("sigma") + + ax1.plot(ps_norm) + ax12 = ax1.twinx() + ax12.plot(hs, color="red") + ax12.set_ylabel("hs") + + ax2.plot(pc_norm) + + for i, v in enumerate(np.array(eigvals).T): + ax3.plot(v, label=i) + + for i, v in enumerate(np.array(dm).T): + ax4.plot(v, label=i) + + ax0.set_ylabel("f") + ax1.set_ylabel("ps") + ax2.set_ylabel("pc") + ax3.set_ylabel("eigvals") + ax3.legend() + + ax4.set_ylabel("dm") + ax4.legend() + + for ax in ax0, ax1, ax2, ax3, ax4: + ax.grid() + ax.set_yscale("log", base=10) + plt.show() + evals = np.append(evals, fitness_func.state.evaluations) fopts = np.append(fopts, fitness_func.state.current_best_internal.y) + + if fitness_func.state.current_best_internal.y <= target_precision: + n_succ += 1 result_string = ( "FCE:\t{:10.8f}\t{:10.4f}\n" @@ -510,7 +619,7 @@ def evaluate_bbob( result_string.format( np.mean(fopts), np.std(fopts), - *ert(evals, optimizer.parameters.budget), + *ert(evals, n_succ), iterations, ) ) diff --git a/modcma/parameters.py b/modcma/parameters.py index 8a96e8e..492647b 100644 --- a/modcma/parameters.py +++ b/modcma/parameters.py @@ -267,7 +267,7 @@ class Parameters(AnnotatedStruct): bound_correction: ( None, "saturate", "unif_resample", "COTN", "toroidal", "mirror") = None orthogonal: bool = False - local_restart: (None, "restart", "IPOP", "BIPOP") = None + local_restart: (None, "restart", "IPOP", "BIPOP", "STOP") = None base_sampler: ("gaussian", "sobol", "halton") = "gaussian" mirrored: (None, "mirrored", "mirrored pairwise") = None weights_option: ("default", "equal", "1/2^lambda") = "default" @@ -442,6 +442,9 @@ def init_adaptation_parameters(self) -> None: / ((self.d + 2) ** 2 + (2 * self.mueff / 2)) ))) + acov = 2.0 + cmu2 = (acov * (0.25 + self.mueff + 1.0 / self.mueff - 2.0) / (pow(self.d + 2., 2.0) + acov * self.mueff / 2.0)) + amu_neg = 1 + (self.c1 / self.mu) amueff_neg = 1 + ((2 * mueff_neg) / (self.mueff + 2)) aposdef_neg = (1 - self.c1 - self.cmu) / (self.d * self.cmu) @@ -505,9 +508,13 @@ def adapt(self) -> None: self.record_statistics() self.calculate_termination_criteria() self.old_population = self.population.copy() - if any(self.termination_criteria.values()): + if any(self.termination_criteria.values()) and self.local_restart != "STOP": self.perform_local_restart() + @property + def should_stop(self): + return any(self.termination_criteria.values()) and self.local_restart == "STOP" + def adapt_sigma(self) -> None: """Method to adapt the step size sigma. @@ -577,16 +584,15 @@ def adapt_covariance_matrix(self) -> None: 1 - (self.c1 * dhs) - self.c1 - (self.cmu * self.pweights.sum()) ) * self.C - if self.active: - weights = self.weights[::].copy() - weights = weights[: self.population.y.shape[1]] - rank_mu = self.cmu * (weights * self.population.y @ self.population.y.T) - else: - rank_mu = self.cmu * ( - self.pweights - * self.population.y[:, : self.mu] - @ self.population.y[:, : self.mu].T - ) + + weights = self.weights[: self.population.y.shape[1]].copy() if self.active else self.pweights + n = len(weights.ravel()) + rank_mu = self.cmu * ( + weights + * self.population.y[:, : n] + @ self.population.y[:, : n].T + ) + self.C = old_C + rank_one + rank_mu def perform_eigendecomposition(self) -> None: diff --git a/modcma/utils.py b/modcma/utils.py index a9d1d84..ae0cbfe 100644 --- a/modcma/utils.py +++ b/modcma/utils.py @@ -228,14 +228,14 @@ def inner(*args, **kwargs): return inner -def ert(evals, budget): +def ert(evals, n_succ): """Computed the expected running time of a list of evaluations. Parameters ---------- evals: list a list of running times (number of evaluations) - budget: int + n_succ: int the maximum number of evaluations Returns @@ -253,7 +253,6 @@ def ert(evals, budget): with warnings.catch_warnings(): warnings.simplefilter("ignore") evals = np.array(evals) - n_succ = (evals < budget).sum() _ert = float(evals.sum()) / int(n_succ) return _ert, np.std(evals), n_succ except ZeroDivisionError: diff --git a/perf.data b/perf.data new file mode 100644 index 0000000..7da2c82 Binary files /dev/null and b/perf.data differ diff --git a/scripts/distributions/plots.ipynb b/scripts/distributions/plots.ipynb index ff077f9..feb57cc 100644 --- a/scripts/distributions/plots.ipynb +++ b/scripts/distributions/plots.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 6, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -482,18 +482,25 @@ }, { "cell_type": "code", - "execution_count": 85, + "execution_count": 3, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "1000000" - ] - }, - "execution_count": 85, - "metadata": {}, - "output_type": "execute_result" + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[3], line 22\u001b[0m\n\u001b[1;32m 20\u001b[0m t \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 21\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m label, sampler \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(labels, samplers):\n\u001b[0;32m---> 22\u001b[0m times \u001b[38;5;241m=\u001b[39m [time_sampler(sampler) \u001b[38;5;28;01mfor\u001b[39;00m _ \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;241m1000\u001b[39m)]\n\u001b[1;32m 23\u001b[0m t\u001b[38;5;241m.\u001b[39mappend((label, np\u001b[38;5;241m.\u001b[39mmean(times), np\u001b[38;5;241m.\u001b[39mstd(times)))\n\u001b[1;32m 25\u001b[0m time_data \u001b[38;5;241m=\u001b[39m pl\u001b[38;5;241m.\u001b[39mDataFrame(t, schema\u001b[38;5;241m=\u001b[39m[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msampler\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mmean\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mstd\u001b[39m\u001b[38;5;124m'\u001b[39m], orient\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mrow\u001b[39m\u001b[38;5;124m'\u001b[39m)\n", + "Cell \u001b[0;32mIn[3], line 22\u001b[0m, in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 20\u001b[0m t \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 21\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m label, sampler \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(labels, samplers):\n\u001b[0;32m---> 22\u001b[0m times \u001b[38;5;241m=\u001b[39m [\u001b[43mtime_sampler\u001b[49m\u001b[43m(\u001b[49m\u001b[43msampler\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mfor\u001b[39;00m _ \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;241m1000\u001b[39m)]\n\u001b[1;32m 23\u001b[0m t\u001b[38;5;241m.\u001b[39mappend((label, np\u001b[38;5;241m.\u001b[39mmean(times), np\u001b[38;5;241m.\u001b[39mstd(times)))\n\u001b[1;32m 25\u001b[0m time_data \u001b[38;5;241m=\u001b[39m pl\u001b[38;5;241m.\u001b[39mDataFrame(t, schema\u001b[38;5;241m=\u001b[39m[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msampler\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mmean\u001b[39m\u001b[38;5;124m'\u001b[39m, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mstd\u001b[39m\u001b[38;5;124m'\u001b[39m], orient\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mrow\u001b[39m\u001b[38;5;124m'\u001b[39m)\n", + "Cell \u001b[0;32mIn[3], line 17\u001b[0m, in \u001b[0;36mtime_sampler\u001b[0;34m(sampler, n)\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mtime_sampler\u001b[39m(sampler, n \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1_000_000\u001b[39m):\n\u001b[1;32m 16\u001b[0m start \u001b[38;5;241m=\u001b[39m perf_counter()\n\u001b[0;32m---> 17\u001b[0m \u001b[43msampler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mn\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 18\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m perf_counter() \u001b[38;5;241m-\u001b[39m start\n", + "Cell \u001b[0;32mIn[3], line 5\u001b[0m, in \u001b[0;36m\u001b[0;34m(a)\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mscipy\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m stats\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtime\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m perf_counter\n\u001b[1;32m 4\u001b[0m samplers \u001b[38;5;241m=\u001b[39m [\n\u001b[0;32m----> 5\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m a: \u001b[43mstats\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcauchy\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m2.0\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mscale\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrvs\u001b[49m\u001b[43m(\u001b[49m\u001b[43msize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43ma\u001b[49m\u001b[43m)\u001b[49m,\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m a: stats\u001b[38;5;241m.\u001b[39mdweibull(\u001b[38;5;241m2.0\u001b[39m, scale\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m)\u001b[38;5;241m.\u001b[39mrvs(size\u001b[38;5;241m=\u001b[39ma),\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m a: stats\u001b[38;5;241m.\u001b[39mnorm()\u001b[38;5;241m.\u001b[39mrvs(size\u001b[38;5;241m=\u001b[39ma),\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m a: stats\u001b[38;5;241m.\u001b[39mlaplace()\u001b[38;5;241m.\u001b[39mrvs(size\u001b[38;5;241m=\u001b[39ma),\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m a: stats\u001b[38;5;241m.\u001b[39mlogistic()\u001b[38;5;241m.\u001b[39mrvs(size\u001b[38;5;241m=\u001b[39ma),\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mlambda\u001b[39;00m a: stats\u001b[38;5;241m.\u001b[39muniform()\u001b[38;5;241m.\u001b[39mrvs(size\u001b[38;5;241m=\u001b[39ma),\n\u001b[1;32m 11\u001b[0m ]\n\u001b[1;32m 13\u001b[0m labels\u001b[38;5;241m=\u001b[39m[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCauchy\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdWeibull\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mGaussian\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mLaplace\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mLogistic\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUniform\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mtime_sampler\u001b[39m(sampler, n \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1_000_000\u001b[39m):\n", + "File \u001b[0;32m~/code/ModularCMAES/venv/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py:493\u001b[0m, in \u001b[0;36mrv_frozen.rvs\u001b[0;34m(self, size, random_state)\u001b[0m\n\u001b[1;32m 491\u001b[0m kwds \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mkwds\u001b[38;5;241m.\u001b[39mcopy()\n\u001b[1;32m 492\u001b[0m kwds\u001b[38;5;241m.\u001b[39mupdate({\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msize\u001b[39m\u001b[38;5;124m'\u001b[39m: size, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mrandom_state\u001b[39m\u001b[38;5;124m'\u001b[39m: random_state})\n\u001b[0;32m--> 493\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdist\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrvs\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwds\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/code/ModularCMAES/venv/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py:1069\u001b[0m, in \u001b[0;36mrv_generic.rvs\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 1066\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1067\u001b[0m random_state \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_random_state\n\u001b[0;32m-> 1069\u001b[0m vals \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_rvs\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msize\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrandom_state\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrandom_state\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1071\u001b[0m vals \u001b[38;5;241m=\u001b[39m vals \u001b[38;5;241m*\u001b[39m scale \u001b[38;5;241m+\u001b[39m loc\n\u001b[1;32m 1073\u001b[0m \u001b[38;5;66;03m# do not forget to restore the _random_state\u001b[39;00m\n", + "File \u001b[0;32m~/code/ModularCMAES/venv/lib/python3.10/site-packages/scipy/stats/_distn_infrastructure.py:994\u001b[0m, in \u001b[0;36mrv_generic._rvs\u001b[0;34m(self, size, random_state, *args)\u001b[0m\n\u001b[1;32m 987\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_rvs\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, size\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m, random_state\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m):\n\u001b[1;32m 988\u001b[0m \u001b[38;5;66;03m# This method must handle size being a tuple, and it must\u001b[39;00m\n\u001b[1;32m 989\u001b[0m \u001b[38;5;66;03m# properly broadcast *args and size. size might be\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 992\u001b[0m \n\u001b[1;32m 993\u001b[0m \u001b[38;5;66;03m# Use basic inverse cdf algorithm for RV generation as default.\u001b[39;00m\n\u001b[0;32m--> 994\u001b[0m U \u001b[38;5;241m=\u001b[39m \u001b[43mrandom_state\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43muniform\u001b[49m\u001b[43m(\u001b[49m\u001b[43msize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msize\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 995\u001b[0m Y \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_ppf(U, \u001b[38;5;241m*\u001b[39margs)\n\u001b[1;32m 996\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m Y\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] } ], "source": [ @@ -516,14 +523,14 @@ " sampler(n)\n", " return perf_counter() - start\n", "\n", - "# t = []\n", - "# for label, sampler in zip(labels, samplers):\n", - "# times = [time_sampler(sampler) for _ in range(1000)]\n", - "# t.append((label, np.mean(times), np.std(times)))\n", + "t = []\n", + "for label, sampler in zip(labels, samplers):\n", + " times = [time_sampler(sampler) for _ in range(1000)]\n", + " t.append((label, np.mean(times), np.std(times)))\n", " \n", - "# time_data = pl.DataFrame(t, schema=['sampler', 'mean', 'std'], orient='row')\n", + "time_data = pl.DataFrame(t, schema=['sampler', 'mean', 'std'], orient='row')\n", "\n", - "# time_sampler(samplers[2])\n", + "time_sampler(samplers[2])\n", "\n", "samplers[2](10**6).size" ] @@ -563,11 +570,116 @@ "plt.tight_layout()\n", "plt.savefig(\"figures/time.pdf\")" ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAssAAAIHCAYAAABpIhEUAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAACFv0lEQVR4nO3deVxU1fsH8M8Aw7CjLMqOgiBuIKIm7mKu5YKpZbYY+S2/aWb0q7S0XLMyLVPLzDX7lqalSWkqihuKC6i4ooKAIC4ssi8Dc39/GOgVRplh4A7web9evGKee++ZZ04jPJw59xyZIAgCiIiIiIioEgOpEyAiIiIi0lcslomIiIiI1GCxTERERESkBotlIiIiIiI1WCwTEREREanBYpmIiIiISA0Wy0REREREarBYJiIiIiJSg8UyEREREZEael0sFxYW4pNPPoG3tzdMTEzg5OSEkJAQpKamVruN9evXQyaTPfHrp59+qsVXQkRERET1kUxft7suKipCv379EBUVBUdHR/Tq1QuJiYk4ceIE7O3tERUVBQ8Pjye2c+TIEaxevbrKY9nZ2di+fTsAID4+vlrtEREREVHjobfF8syZM7FgwQIEBgZiz549sLCwAAAsWbIE7733Hvr06YMDBw7U6Dm+//57vPXWW+jRoweOHDmig6yJiIiIqCHRy2K5pKQEzZo1Q3Z2NmJiYuDv7y867ufnh9jYWJw6dQoBAQFaP0+PHj1w9OhRrFy5Em+++WZN0yYiIiKiBkYv5yxHRkYiOzsbnp6elQplABg9ejQAICwsTOvnuH79Oo4ePQpjY2OMHTtW63aIiIiIqOHSy2L57NmzAIBOnTpVebw8Hhsbq/Vz/PzzzwCAZ555Bk2bNtW6HSIiIiJquIykTqAqycnJAAAXF5cqj5fHk5KStH6O8mL55Zdfrtb5xcXFKC4urnisUqmQmZkJW1tbyGQyrfMgIiIiotohCAJyc3Ph5OQEAwPtxoj1sljOy8sDAJiZmVV53NzcHACQm5urVfsnTpzAlStXYGNjg2eeeaZa1yxcuBBz5szR6vmIiIiISDo3btxQOwj7JHpZLNe28lHlsWPHwtjYuFrXzJgxA6GhoRWPs7Oz4ebmVlF005MplUpERESgX79+kMvlUqdTb7DfNMc+0w77TXPsM+2w3zTHPtNOZmYmvL29YWlpqXUbelksly8TV1BQUOXx/Px8ANDqhZeWlmLz5s0Aqj8FAwAUCgUUCkWluI2NDWxtbTXOozFSKpUwMzODra0t/6FrgP2mOfaZdthvmmOfaYf9pjn2Wc3UZMqsXt7g5+bmBgBISUmp8nh53N3dXeO29+zZgzt37sDDwwPdu3fXPkkiIiIiavD0slj28/MDAMTExFR5vDzu6+urcdvlUzBeeuklLbMjIiIiosZCL4vlHj16wNraGvHx8Thz5kyl41u3bgUADBs2TKN28/Ly8OeffwJgsUxERERET6aXxbKxsTGmTJkCAJg8eXLFHGXg/nbXsbGx6NOnj2j3vuXLl8PHxwczZsxQ2+4ff/yBgoICdOvWDV5eXrX3AoiIiIioQdDLG/wAYObMmQgPD8fRo0fh5eWFXr16ISkpCcePH4e9vT3Wrl0rOj89PR1xcXFIS0tT26amaysTERERUeOmlyPLAGBiYoKIiAjMmjULZmZm2L59O5KSkjBhwgTExMTAw8NDo/bS0tKwf/9+yOVyPP/887WUNRERERE1JHo7sgwApqammDt3LubOnfvEc2fPno3Zs2erPe7o6IjS0lIdZkdEREREDZ1eF8tEREQkHaVSibKyslpp18jICEVFRbXSfkPEPgMMDQ0lWWOaxTIRERGJ5OTkID09HcXFxbXSviAIcHBwwI0bN2q0WURjwj67T6FQwM7ODlZWVnX2nCyWiYiIqEJOTg5SU1NhYWEBOzs7yOVynRdnKpUKeXl5sLCwgIGB3t4+pVcae58JggClUons7GykpqYCQJ0VzCyWiYiIqEJ6ejosLCzg4uJSayOYKpUKJSUlMDExaZSFnzbYZ/fvZbO0tERKSgrS09PrrFhunL1NRERElSiVShQXF8Pa2rpRf9RP+ksmk8Ha2hrFxcVQKpV18pwslomIiAgAKm4ck+ImKqLqKn9/1tWNjiyWiYiISISjyqTP6vr9yWKZiIiIiEgNFstERERERGqwWCYiIiIiUoPFMhEREdETFBQU4Ntvv8XAgQPh6OgIhUIBS0tLtG3bFhMmTMCOHTsa1M56Bw4cgEwmw4QJE6RORXJcZ5mIiIjoMSIjIzFmzBikpaXBxMQEXbp0gZOTE4qLixEfH48NGzZgw4YNaNu2LS5cuCB1uqRjLJaJiIiI1IiJiUH//v1RXFyM999/HzNnzqy0GcaNGzewZMkSrFy5UqIsda9r1664dOkSrK2tpU5FciyWiYiIiKqgUqnw0ksvobi4GPPmzcPMmTOrPM/V1RVff/01XnrppTrOsPaYmZnBx8dH6jT0AucsExEREVVh586duHTpEtzc3DBjxownnh8QECB6fPjwYUyZMgW+vr5o2rQpTE1N4ePjg+nTp+PevXuVrl+/fj1kMhlmz55dZfvPPvssDA0NkZiYKIqfP38eL730Ejw8PGBiYgJ7e3t07NgR06ZNQ1pamujco0ePYuTIkXB3d4dCoYCDgwO6du2K6dOnIy8vr+I8dXOW7927h2XLlmHQoEEVbdja2mLw4MHYu3dvlXn37dsXMpkMiYmJ2L59O7p16wZzc3PY2Nhg3LhxSElJqbpD9QRHlomIiOixVCoBWQUlOmxPhdwCJZQGxTAwqN1xu6ZmxjAw0G4Ti127dgEAxowZA0NDQ42vf//993H27Fn4+vqif//+KCoqQkxMDL744gv89ddfiIqKgoWFhVa5lYuOjkbPnj1RVFQEX19fjBgxAgUFBUhISMDSpUsxcuRIODo6AgDCwsIwcuRICIKArl27onv37rh37x6uXr2KL774ApMmTXpiPlFRUZg6dSpatGiB1q1bIzAwEMnJydizZw/27NmD1atXIyQkpMprv/vuOyxZsgS9evXC0KFDcfz4cWzatAnR0dE4e/YsTE1Na9QXtYXFMhERET1WVkEJAuaHS52GVqJnPg1bC4VW1549exYA4O/vr9X1n376Kbp37y6a91tcXIypU6di1apVWLJkCT755BOt2i737bffoqioCF999RXee+890bHLly+Lnvurr76CSqXC1q1b8dxzz4nOPXnyJGxtbZ/4fK1bt8axY8fQrVs3Ufz06dMICgrCu+++i7Fjx1ZZdK9YsQKHDx9GYGAggPsrjAwYMABHjx7Fr7/+qrbIlhqnYRARERFVISMjAwBgZ2dX5fHXX38dEyZMEH0dOXKk4viQIUMq3SCnUCjwzTffwMjICH/++WeNc7x79y4A4Omnn650zMfHp2JU+UnndunSBZaWlk98vpYtW1YqlIH7f1BMnjwZOTk5iIiIqPLad999t6JQBu7Piw4NDQUAHDp06InPLRWOLBMRERFpYcOGDZXWVu7bty969uxZ8Tg1NRVhYWG4fPkycnJyoFKpAADGxsa4evVqjXMICAjArl27MHnyZMyfPx89e/aEkVHV5V1AQAAuXbqEl19+GbNmzUJAQIBW02DKysqwb98+HD16FGlpaSguLgaAitej7nUNHDiwUszb2xsAKs2t1icslomIiIiqUD4tIT09vcrjpaWlFd9PmjQJP/zwg+j4kiVLMH36dCiVylrL8f3338eRI0dw4MAB9OvXDxYWFggMDMQzzzyDCRMmiEa2P/vsM5w7dw5hYWEICwtD06ZN0bNnTwwfPhwvvfQSTExMnvh8KSkpePbZZyumqFQlNze3yriLi0ulWPlodnnBrY9YLBMREdFjNTUzRvTMyh/da0ulUiE3Lw+WFhZ1coOftvz8/BAZGYnTp09j/PjxGl0bFRWF9957D9bW1li6dCn69u0LBwcHKBT35087OTlpPJpaPir9MCsrK+zfvx+RkZEICwvDgQMHsH//fuzduxcLFy7E4cOH4eXlBeD+EnenTp3C/v378ddff+HgwYMVhfOXX36JY8eOPXHe8sSJE3H27Fk899xz+OCDD9C6dWtYWlrCwMAAq1atwptvvglBEKq8trb/X9cWFstERET0WAYGMq1vkquKSqWCXFUMKwuFXhdQQ4YMwXfffYctW7bgiy++0GhFjG3btgEAFixYgFdffVV0rLCwELdu3ap0jbHx/cL+4SXcHpaamlplXCaToWfPnhXTP+7cuYNp06bh119/xccff4zffvut4lwjIyMMHDiwYkpEUlISQkJCsH//fnzxxRf48ssv1b6m/Px87N27F82bN8fmzZsr9UdCQoLaa+sz/X2HEhEREUlo6NChaNOmDZKTk7Fw4UKNrs3KygJQ9dSDLVu2VDn6Wn4z3pUrVyodu3LlSrXXI27WrFnFWs3nz59/7Lnu7u748MMPq3VudnY2VCoVHB0dKxXKSqWy4g+EhobFMhEREVEVDAwMsHHjRigUCsyaNQsffPABsrOzK52XkZGBuLg4Uaz8xrU1a9aI5ixfvHixojh9VJcuXWBmZoZdu3YhOjq6Ip6eno433nijymkYK1euxPXr1yvFd+7cCeD+1ItyX3/9dZUj2lWdW5VmzZrB2toa58+fR2RkZEW8rKwMH374YZVFfkPAYpmIiIhIjYCAAISHh8PBwQGLFi1C8+bN0adPH4wbNw7BwcHo0qULHB0dceDAAfj4+KBz584AgNdeew0ODg4ICwtD69at8fzzz2PAgAHo2LEjevXqBXd390rPZWFhgf/7v/9DaWkpevbsicGDB2PIkCHw9vZGWVkZunTpUumalStXwsPDA+3atcPo0aPxwgsvoGPHjnj33XdhYmIiWsd5zpw5cHZ2RqdOnfD8889j7NixaN26NZYuXQobGxv83//932P7wsjICB988AFKS0vRp08fDBw4EC+88AJatWqFlStXYvLkyTXsbf3EYpmIiIjoMXr27In4+HgsXboUPXv2RFxcHH7//XeEh4cjNzcXY8eOxbZt23Du3Dm0b98ewP2VNE6ePIkXX3wRJSUl2LFjB1JTUzFv3jz8+uuvap9r9uzZWLRoEVxcXLB//36cP38eISEh2L17d8Wc5ofNmzcPISEhkMlk2LdvH8LCwlBYWIiJEyfizJkz6NGjR8W5y5YtwwsvvICCggLs2rUL//zzD4yMjBAaGorY2NiKGwEf56OPPsKGDRvg6+uLyMhIhIeHw8/PD1FRURV/KDQ0MkHdLYv0WDk5ObC2tkZ6enq1dryh+/OZdu7ciaFDh0Iul0udTr3BftMc+0w77DfNNbQ+KyoqwvXr19GyZctqLSOmLZVKhZycHFhZWen1DX76hH32gCbv04yMDNjZ2SE7OxtWVlZaPV/j7m0iIiIiosdgsUxEREREpAaLZSIiIiIiNVgsExERERGpwWKZiIiIiEgNFstERERERGqwWCYiIiIRripL+qyu358slomIiAgAYGhoCACi7ZmJ9E35+7P8/VrbWCwTERERAEAul0OhUCA7O5ujy6SXBEFAdnY2FApFnW0EZFQnz0JERET1gp2dHVJTU5GSkgJra2vI5XLIZDKdPodKpUJJSQmKiooa/W501dXY+0wQBCiVSmRnZyMvLw/Ozs519twslomIiKhC+ZbA6enpSE1NrZXnEAQBhYWFMDU11Xkh3lCxz+5TKBRwdnbWeutqbbBYJiIiIhErKytYWVlBqVSirKxM5+0rlUocOnQIvXv3rrOP0us79tn9OcpSvHYWy0RERFQluVxeK8WJoaEhSktLYWJi0mgLP02xz6TT+Ca9EBERERFVE4tlIiIiIiI1WCwTEREREanBYpmIiIiISA0Wy0REREREarBYJiIiIiJSg8UyEREREZEaLJaJiIiIiNRgsUxEREREpIZeF8uFhYX45JNP4O3tDRMTEzg5OSEkJETrveoTExMxadIktGzZEgqFAnZ2dggMDMSiRYt0nDkRERERNQR6WywXFRUhKCgI8+bNQ15eHkaMGAFXV1esW7cO/v7+SEhI0Ki9Xbt2oV27dli1ahVsbW0xatQodOrUCYmJifjhhx9q6VUQERERUX1mJHUC6syfPx9RUVEIDAzEnj17YGFhAQBYsmQJ3nvvPYSEhODAgQPVauvy5csYNWoULC0tsXfvXnTv3r3imEqlQkxMTG28BCIiIiKq5/RyZLmkpATLly8HAKxYsaKiUAaA0NBQ+Pr64uDBg4iOjq5We6GhoSgqKsL69etFhTIAGBgYoHPnzrpLnoiIiIgaDL0sliMjI5GdnQ1PT0/4+/tXOj569GgAQFhY2BPbunHjBnbv3g0PDw8MHTpU57kSERERUcOll9Mwzp49CwDo1KlTlcfL47GxsU9s68CBA1CpVOjevTtKS0vxxx9/IDIyEmVlZWjfvj2ef/55NG3aVHfJExEREVGDoZfFcnJyMgDAxcWlyuPl8aSkpCe2dfHiRQCAhYUFevXqhaioKNHxjz/+GFu3bkW/fv0e205xcTGKi4srHufk5AAAlEollErlE/MgVPQT+0sz7DfNsc+0w37THPtMO+w3zbHPtKOL/tLLYjkvLw8AYGZmVuVxc3NzAEBubu4T28rKygIArF69GhYWFvjll18wePBg3L17F/PmzcPPP/+M4OBgXLhwAc7OzmrbWbhwIebMmVMpHhERoTZPqtrevXulTqFeYr9pjn2mHfab5thn2mG/aY59ppmCgoIat6GXxbIuqVQqAEBpaSl++OEHjB07FgDQtGlTbNy4EXFxcTh58iS+++47LFiwQG07M2bMQGhoaMXjnJwcuLq6ol+/frC1ta3dF9FAKJVK7N27FwMGDIBcLpc6nXqD/aY59pl22G+aY59ph/2mOfaZdjIyMmrchl4Wy+WrX6j7ayA/Px8AYGlpWe22LCwsMGbMmErHX3vtNZw8eRIHDx58bDsKhQIKhaJSXC6X802rIfaZdthvmmOfaYf9pjn2mXbYb5pjn2lGF32ll6thuLm5AQBSUlKqPF4ed3d3f2Jb5ee4ublBJpNVOt6iRQsAwJ07d7RJlYiIiIgaML0slv38/ABA7WYh5XFfX98ntlW+9Fz53OVHZWZmAoBoLWciIiIiIkBPi+UePXrA2toa8fHxOHPmTKXjW7duBQAMGzbsiW11794dtra2uHXrFuLi4iodL59+UdV6zkRERETUuOllsWxsbIwpU6YAACZPnlwxRxm4v911bGws+vTpg4CAgIr48uXL4ePjgxkzZojaMjIyQmhoKARBwOTJkyuWfAOA8PBwrF+/HjKZDG+++WYtvyoiIiIiqm/08gY/AJg5cybCw8Nx9OhReHl5oVevXkhKSsLx48dhb2+PtWvXis5PT09HXFwc0tLSKrX1/vvvIyIiAuHh4fD29ka3bt2Qnp6OqKgolJWVYcGCBejatWtdvTQiIiIiqif0cmQZAExMTBAREYFZs2bBzMwM27dvR1JSEiZMmICYmBh4eHhUuy25XI6dO3fiiy++gJ2dHXbv3o1z586hT58+CAsLw0cffVSLr4SIiIiI6iu9HVkGAFNTU8ydOxdz58594rmzZ8/G7Nmz1R6Xy+X44IMP8MEHH+gwQyIiIiJqyPR2ZJmIiIiISGoslomIiIiI1GCxTERERESkBotlIiIiIiI1WCwTEREREanBYpmIiIiISA0Wy0REREREarBYJiIiIiJSg8UyEREREZEaLJaJiIiIiNRgsUxEREREpAaLZSIiIiIiNVgsExERERGpwWKZiIiIiEgNFstERERERGqwWCYiIiIiUoPFMhERERGRGiyWiYiIiIjUYLFMRERERKQGi2UiIiIiIjVYLBMRERERqcFimYiIiIhIDRbLRERERERqsFgmIiIiIlKDxTIRERERkRoslomIiIiI1GCxTERERESkBotlIiIiIiI1WCwTEREREanBYpmIiIiISA0Wy0REREREarBYJiIiIiJSg8UyEREREZEaLJaJiIiIiNRgsUxEREREpAaLZSIiIiIiNVgsExERERGpwWKZiIiIiEgNFstERERERGqwWCYiIiIiUoPFMhERERGRGiyWiYiIiIjUYLFMRERERKQGi2UiIiIiIjVYLBMRERERqcFimYiIiIhIDRbLRERERERqsFgmIiIiIlKDxTIRERERkRp6XSwXFhbik08+gbe3N0xMTODk5ISQkBCkpqZq1E6LFi0gk8nUfl2+fLmWXgERERER1WdGUiegTlFREYKCghAVFQVHR0eMGDECiYmJWLduHf766y9ERUXBw8NDozZfffXVKuPW1ta6SJmIiIiIGhi9LZbnz5+PqKgoBAYGYs+ePbCwsAAALFmyBO+99x5CQkJw4MABjdpcv3697hMlIiIiogZLL6dhlJSUYPny5QCAFStWVBTKABAaGgpfX18cPHgQ0dHRUqVIRERERI2AXhbLkZGRyM7OhqenJ/z9/SsdHz16NAAgLCysrlMjIiIiokZEL6dhnD17FgDQqVOnKo+Xx2NjYzVqd9GiRYiPj4dCoUC7du0QHBwMe3v7miVLRERERA2WXhbLycnJAAAXF5cqj5fHk5KSNGr3gw8+ED1+9913sWzZMoSEhDzx2uLiYhQXF1c8zsnJAQAolUoolUqN8misyvuJ/aUZ9pvm2GfaYb9pjn2mHfab5thn2tFFf+llsZyXlwcAMDMzq/K4ubk5ACA3N7da7Q0fPhz9+vVDQEAA7O3tkZCQgLVr12Lp0qWYOHEibG1tMWLEiMe2sXDhQsyZM6dSPCIiQm2eVLW9e/dKnUK9xH7THPtMO+w3zbHPtMN+0xz7TDMFBQU1bkMvi2Vd+/bbb0WP27Vrh8WLF8PHxwdvvPEGPvzwwycWyzNmzEBoaGjF45ycHLi6uqJfv36wtbWtlbwbGqVSib1792LAgAGQy+VSp1NvsN80xz7TDvtNc+wz7bDfNMc+005GRkaN29DLYrl89Qt1fw3k5+cDACwtLWv0PK+//jpmzpyJuLg4JCYmokWLFmrPVSgUUCgUleJyuZxvWg2xz7TDftMc+0w77DfNsc+0w37THPtMM7roK61Ww9iwYQOKiopq/OTquLm5AQBSUlKqPF4ed3d3r9HzGBgYwNPTEwCQlpZWo7aIiIiIqOHRqlh+7bXX4OTkhLfffrti5Qpd8vPzAwDExMRUebw87uvrW+PnysrKAvBgHjQRERERUTmtiuWJEyeitLQUK1asQKdOndCtWzesWbOmYnpETfXo0QPW1taIj4/HmTNnKh3funUrAGDYsGE1ep4LFy4gLi4OZmZm8PHxqVFbRERERNTwaFUsr1q1CmlpaVi1ahW6dOmCEydO4I033oCTkxMmTZqEU6dO1SgpY2NjTJkyBQAwefJkURG+ZMkSxMbGok+fPggICKiIL1++HD4+PpgxY4aorZ07d2L//v2VniM2NhZjxoyBIAiYOHEijI2Na5QzERERETU8Wu/gZ25ujokTJyIqKgqxsbGYPHkyjIyMsGrVKjz11FPw9/fHypUrK9Yj1tTMmTPx1FNP4ejRo/Dy8sLzzz+Pbt264b333oO9vT3Wrl0rOj89PR1xcXGV5h6fOHEC/fv3R4sWLTBixAiMGzcOTz31FAICAnDp0iX07dsXn3/+ubbdQEREREQNmE62u27fvj2+/fZb3Lx5Ez///DN69+6Ns2fPYvLkyXBycsLrr7+O6Ohojdo0MTFBREQEZs2aBTMzM2zfvh1JSUmYMGECYmJi4OHhUa12Bg0ahJCQEFhZWSEyMhJbt27FtWvX0LNnT/z4448IDw+HqampNi+biIiIiBo4nS4dp1QqkZubW7FZiCAIUCqVWLduHdavX4/g4GCsXr0aTZo0qVZ7pqammDt3LubOnfvEc2fPno3Zs2dXigcGBiIwMFCTl0FEREREBEBHI8tRUVF4/fXX4ejoiLfeeguxsbEYNWoU9uzZg5ycHPzvf/9Dhw4dsG3bNkydOlUXT0lEREREVOu0HlnOysrCxo0b8eOPP+LixYsQBAGurq748MMPMXHiRDg4OFScO27cOIwZMwb+/v7YuXOnThInIiIiIqptWhXLL730Ev744w8UFxdDJpNhyJAhmDRpEoYOHQoDg6oHq42MjNClSxds2LChRgkTEREREdUVrYrlX375BQ4ODggJCcEbb7xRsePekwQHB9d41z0iIiIiorqiVbG8ZcsWjBgxAkZGml0+bNiwGm8kQkRERERUV7S6wS8/Px8nTpx44nlRUVH46aeftHkKIiIiIiLJaVUsT5gwAatXr37ieWvWrMFrr72mzVMQEREREUlOJ0vHqaNSqSCTyWrzKYiIiIiIak2tFssJCQmwsrKqzacgIiIiIqo11b5D79Fd9M6cOaN2Z73S0lLExcXh0KFDGDBgQM0yJCIiIiKSSLWL5dmzZ0Mmk0EQBMhkMpw5cwZnzpx57DXNmjXDZ599VtMciYiIiIgkUe1ied26dQAAQRAQEhKCnj174vXXX6/yXGNjYzg5OaFbt25QKBS6yZSIiIiIqI5Vu1h+9dVXK77fsGEDhgwZIooRERERETU0Wm1KEhERoes8iIiIiIj0Tq2uhkFEREREVJ9Va2Q5KCgIMpkMGzZsgIuLC4KCgqr9BDKZDPv27dM6QSIiIiIiqVSrWD5w4ABkMhkKCgoqHlcXNyUhIiIiovqqWsXy9evXAQDOzs6ix0REREREDVm1imV3d/fHPiYiIiIiaoh4gx8RERERkRpaFcu3b9/GoUOHcPv2bVE8Pj4eL7zwAtq3b4+hQ4ciKipKJ0kSEREREUlBq2L5888/R79+/ZCdnV0Ry8nJQc+ePbFlyxZcvHgR//zzD/r374+rV6/qLFkiIiIiorqkVbF84MABtG3bFt7e3hWx9evX4/bt2xg3bhzi4uKwZMkSFBYWYvHixTpLloiIiIioLmlVLKempsLDw0MU+/vvv2FkZIRvvvkGXl5emDZtGvz8/HDw4EGdJEpEREREVNe0KpZzc3NhZmZW8bisrAzHjh1DQEAA7OzsKuI+Pj5ISUmpeZZERERERBLQqlh2cnLC5cuXKx4fOXIEeXl56Nu3r+i80tJSGBsb1yhBIiIiIiKpaFUsBwYGIjY2Ft988w3OnTuHmTNnQiaTYdiwYaLzLl26VLGRCRERERFRfaNVsTxjxgwoFAq899576NixIyIjI9G3b19079694pzExERcvHgRTz31lM6SJSIiIiKqS9Xawe9R7dq1w5EjR7B06VKkp6cjICAA77//vuic3bt3w8/PDyNHjtRFnkREREREdU6rYhkAOnXqhA0bNqg9/uabb+LNN9/UtnkiIiIiIslxu2siIiIiIjW0Hlkul5ycjLS0NBQXF6s9p3fv3jV9GiIiIiKiOqd1sbx27VrMmzcPycnJTzy3rKxM26chIiIiIpKMVsXyunXrMHHiRABA+/bt4e3tDUtLS50mRkREREQkNa2K5SVLlsDIyAhbt27F8OHDdZ0TEREREZFe0OoGv6tXr6J3794slImIiIioQdOqWLaxsYGdnZ2ucyEiIiIi0itaFcsjRoxAZGQklEqlrvMhIiIiItIbWhXLn332GczNzfHaa68hKytL1zkREREREekFrW7we++999C2bVv8+uuv+PvvvxEQEAAXFxcYGFSuvWUyGdasWVPjRImIiIiI6ppWxfL69esrvs/Ozsb+/fvVnstimYiIiIjqK62K5YiICF3nQURERESkd7Qqlvv06aPrPIiIiIiI9I5WN/gRERERETUGWo0sl8vIyMDPP/+MEydOID09Hf3798cHH3wAALhw4QLi4+Px9NNPw8zMTCfJEhERERHVJa2L5S1btmDixInIy8uDIAiQyWRwdnauOJ6amorg4GBs2LABL730kk6SJSIiIiKqS1pNwzh27BhefPFFGBkZYfHixThx4gQEQRCd079/f1hbW+OPP/7QSaJERERERHVNq5Hlzz77DAYGBti7dy86depU5TmGhobo1KkTzp8/X6MEiYiIiIikotXI8tGjRxEYGKi2UC7n4OCAtLQ0rRIjIiIiIpKaVsVyQUEB7O3tn3heTbfCLiwsxCeffAJvb2+YmJjAyckJISEhSE1NrVG7V69ehampKWQyGZ5++ukatUVEREREDZdWxbKzszMuXLjw2HMEQcD58+fRsmVLrRIrKipCUFAQ5s2bh7y8PIwYMQKurq5Yt24d/P39kZCQoFW7APDGG2+guLhY6+uJiIiIqHHQqlgePHgw4uLisGnTJrXnrF69Gjdu3MAzzzyjVWLz589HVFQUAgMDceXKFWzevBnHjx/H4sWLcffuXYSEhGjV7po1a3DgwAH85z//0ep6IiIiImo8tCqWp0+fDmtra7zyyiv48MMPERUVBQDIz8/H6dOn8cknn+Dtt9+Gvb093n33XY3bLykpwfLlywEAK1asgIWFRcWx0NBQ+Pr64uDBg4iOjtao3du3b+P999/HgAEDMG7cOI3zIiIiIqLGRati2cXFBX///Tfs7OywaNEi9OjRAzKZDFu3bkXnzp0xf/58NGnSBDt27ECzZs00bj8yMhLZ2dnw9PSEv79/peOjR48GAISFhWnU7jvvvIPCwkJ89913GudERERERI2P1puSBAYGIi4uDmvWrMHevXuRmJgIlUoFFxcXDBgwAG+++Sasra21avvs2bMAoHa1jfJ4bGxstdvcuXMnNm/ejLlz56JVq1ZISUnRKjciooZGpRKQXahEcUkJCkuBklIV5HKpsyIi0g812u7a0tIS06ZNw7Rp03SUzn3JyckA7o9gV6U8npSUVK328vPz8dZbb6F169b48MMPtcqpuLhYdFNgTk4OAECpVEKpVGrVZmNT3k/sL82w3zTHPlPvbm4xDl9Lx/mbubiUloPr6QXIKiiBqmJfKSNMPxkOewtjuNqYwauZObq4N0WXFk3h1MRUytT1Et9r2mG/aY59ph1d9FeNiuXakpeXBwAwMzOr8ri5uTkAIDc3t1rtzZw5E0lJSYiIiICxsbFWOS1cuBBz5sypFI+IiFCbJ1Vt7969UqdQL7HfNMc+uy9XCZy4I8OZDAMk58uqdc3dvBLczStBTPI9bD51f7lOZzMBAXYqdLIT0FRRmxnXP3yvaYf9pjn2mWYKCgpq3IZWxfLRo0cRERGBS5cuISsrCzKZDDY2Nmjbti369euHp556qsaJ6cqpU6fw7bff4pVXXkHfvn21bmfGjBkIDQ2teJyTkwNXV1f069cPtra2Osi04VMqldi7dy8GDBgAOT/jrTb2m+bYZ/dduJmDH48kYs/F21CWCU++4AlSC2RITTbEXzeAwe2aY2LPFujgrN10u4aC7zXtsN80xz7TTkZGRo3b0KhYjo2NRUhICE6fPg3g/lrKD5PJ7o9YdO3aFWvWrEHbtm21Sqp89Qt1fw3k5+cDuD8N5HFKS0vxn//8B02aNMFXX32lVS7lFAoFFIrKQylyuZxvWg2xz7TDftNcY+2zq7dzsWTvFew6f6tW2lcJwM7zt7Hz/G308bbHR0PboLXD438eN3SN9b1WU+w3zbHPNKOLvqp2sXzy5EkEBQUhPz8f5ubmGDJkCDp27Ag7OzsIgoD09HScPn0au3fvxvHjxxEYGIgDBw5UuZrFk7i5uQGA2pvwyuPu7u6PbSclJQVnzpyBg4MDxowZIzp27949AEB0dHTFiPOBAwc0zpWISF/kF5fim/ArWBuZiDKV+pFkC4URunvaooOzNdo6WcHB2gQ25saQqcrwz9596BzYC2m5SiTczcOppCycTMzEvYKq5/0dvHIXh6/exQtd3fDBoNZoYqbdVDciIn1VrWK5rKwM48ePR35+Pl5//XUsXrwYVlZWVZ6bk5OD0NBQrF27Fi+++CIuXrxYMeJcXX5+fgCAmJiYKo+Xx319favV3q1bt3DrVtUjLPfu3cPBgwc1yo+ISN9ExN3BR3+cQ1p2UZXHjY0M8KyvI0b5u6BrSxsYG1VeOVSpVKKpAmjtYIn2rnIAzfEmgDKVgOPXM7DjzE38FZuGvOJS0XUqAfjleDL2XryNhcEd8HTb5rXwComIpFGtdZb//PNPXLt2Dc8//zx+/PFHtYUyAFhZWWH16tUYM2YMrly5ovFayADQo0cPWFtbIz4+HmfOnKl0fOvWrQCAYcOGPbadFi1aQBCEKr8iIiIAAP3796+IERHVN0XKMszecQGvrTtZZaFsbSrH/w30xomP+mPJ2I7o6WVXZaH8OIYGMnT3tMPnz/kicnoQPhzsA3vLytPS7uYWY+JPp/Deb2dRUFJaRUtERPVPtX5ihoWFwcDAAJ999lm1G164cCEAYPv27RonZWxsjClTpgAAJk+eXDFHGQCWLFmC2NhY9OnTBwEBARXx5cuXw8fHBzNmzND4+YiI6qPkjAKMXBGJ9UcTKx0zNjTA20GtcPjDfpgS5KWz6RHWpnL8t68nDr7fF+/094KJvPKvkd9jUjByRSTi7+bp5DmJiKRUrWkY0dHRaN26NVq2bFnthj08PODj46PxltTlZs6cifDwcBw9ehReXl7o1asXkpKScPz4cdjb22Pt2rWi89PT0xEXF4e0tDStno+IqD45Gp+Ot/4XU+Vc4p6t7DBvZHu0tDOvtec3MzbCuwO88XwXV3y87Rwi4u6Kjl+5nYfhy47gmxf8MYDTMoioHqvWyHJaWhq8vb01btzb2xs3b97U+DoAMDExQUREBGbNmgUzMzNs374dSUlJmDBhAmJiYuDh4aFVu0RE9d2vJ5Lx8poTlQplhZEB5o1sj42vd63VQvlhTk1MsXZCFywa7QsLhXj8Jb+kDG9uPIUNVYx8ExHVF9UaWc7OztZq62orK6uKne60YWpqirlz52Lu3LlPPHf27NmYPXt2tdvu27cv5ykTUb0iCAKW77+GxXuvVDrm1cwCK8Z3gnfzul/CTSaTYUxnV3RuYYP//hyNy7cebBilEoBPd1zAjcwCfDS0DQwMNLvhm4hIatUaWS4tLYWBgWY3hACAgYEBSkt5kwcRUU2pVALmhF2sslB+uk1zbJvcQ5JC+WEt7cyx7a0eGOXvXOnY6iPX8dG2c49d0o6ISB9pXgETEVGdEgQBs/48X+WNfG/19cSqlwMqTYGQiqmxIRaP9UPogMpT9zadvIH/23IWpWUqCTIjItJOtX+6btiwARs2bKjNXIiI6BGCIOCTPy/gf8eTRXGZDJg9rB1e7d5CmsQeQyaTYWp/Lzg3McUHv8eKRpO3nU6FIAhYMrYjp2QQUb1Q7WJZ2/m9mm5IQkRE9wmCgPl/X8LGqCRR3MhAhsVj/TCiY+XpDvrkuQAXWJoYYcovp1Hy0Gjy9jM3Ya4wwvyR7fk7goj0XrWmYahUKq2/ysrKavs1EBE1SKsPX8eaI9dFMUMDGZaN89f7QrncwHYOWPVKABSPbITyv+PJ+PyfyxJlRURUfZyzTESkh/48k4oFOy+JYoYGMnz7gj+GdHCUKCvt9G3dDKte6Qy5oXgU+YeDCdh4LFGapIiIqonFMhGRnjl6LR3/t+Vspfii0b54xrd+Fcrl+njb49sX/PHoNOVPd1xARNwdaZIiIqoGFstERHok7lYu3twYDWWZ+D6R6UN8MKqTi0RZ6caQDo744jlfUUwlAFP+F4NLadqvyU9EVJtYLBMR6Yl7BSX4z0+nkFssXp/+1UB3vNm7YexaOqazK6b29xLF8kvK8Pr6k7iTUyRRVkRE6rFYJiLSA2UqAe9sOoPkzAJRfHA7B3wyrF2DWjXi3ae9MKKjkyh2M7sI/9kYjeJS3hRORPqFxTIRkR5YsjcOB6/cFcX8XJvgmxc6wrCBrUcsk8nwxXO+6OzeVBQ/e+MeFvx9Sc1VRETSYLFMRCSxf86nYUVEvChmZ6HADy8FwERuKFFWtctEbohVr3SGu62ZKP7TsST8eSZVoqyIiCpjsUxEJKH4u3l47zfxyhdGBjJ8N74THKxNJMqqbtiYG2PlSwEwkYt/FU3//Ryu3M6VKCsiIjGtiuU5c+YgJSVF17kQETUqxaVlePuX08gvEc/TnfVsW3RtaSNRVnWrjaMV5o/sIIoVKssw6edo5D1yoyMRkRS0LpZbtmyJYcOGYceOHVCpVE++iIiIRL7YFYeLjyyZ9lwnF7wS6C5RRtIYHeCCcV1dRbGEu/mYue2cRBkRET2gVbE8f/58uLm54e+//0ZwcDBcXV0xa9YsJCYm6jg9IqKGKeLyHayNFG9l7dXMAvNHtm9QK19U16fD2qG9s5Uotv3MTc5fJiLJaVUsf/TRR4iPj8eePXswZswYZGRkYMGCBWjVqhUGDx6M33//HaWl/PiMiKgqd3KKKu3QZ2xkgGUv+sPUuGHe0PckJnJDfD8+AJYmRqL4zO3nkXqvUKKsiIhqeIPf008/jU2bNiE1NRVfffUVWrdujT179mDs2LFwcXHB9OnTcfXqVV3lSkRU76lUAt7bchYZ+SWi+Kxn2sDHwUrNVY2Dq40Z5o9sL4rlFpUidPMZlKkENVcREdUunayGYWtri9DQUFy4cAFHjhzBuHHjcOfOHSxatAg+Pj7o378/tm3bpounIiKq1/53IhmHr6aLYgPaNsdL3RrXPGV1RnR0xshHNiw5fj0TPx5OkCgjImrsdLp0XHx8PMLCwrBv376KmIuLCyIiIjB69Gh07doVN27c0OVTEhHVGzcyC7Bwp3jTDQcrE3z5nG+jnKeszpwR7eHcxFQUW7wnDhduZkuUERE1ZjUulpVKJTZt2oT+/fvD29sbX3zxBUpLSxEaGorLly8jKSkJkZGRGDJkCE6dOoUpU6boIm8ionpFpRLw/tazKHhkmbhFY3zR1NxYoqz0k7WpHEvG+uHhvx+UZQI+2BoLZRlXXyKiumX05FOqdunSJfz444/YuHEjMjMzIQgCunfvjkmTJmHMmDFQKBQV5wYGBuKvv/5Ct27dcPDgQZ0kTkRUn2yMSkJUQqYo9uJTbujlZS9RRvrtKQ9bTOrjie8PPNjZ8MLNHKw6lIDJ/VpJmBkRNTZaFcs9e/bEsWPHIAgCrKys8N///heTJk1C+/btH3tdu3btcPLkSa0SJSKqr5Iy8vH5rsuimHMTU3w0tI1EGdUP0572QvjF27h6J68itjT8Kga1a45WzSwlzIyIGhOtpmEcPXoU/v7+WLVqFW7evInly5c/sVAGgIkTJ2Lt2rXaPCURUb0kCAI+/D0WhUrx9IsvR/vCQqH1h3uNgsLIEF+O9oXBQ9MxSspU+GBrLFfHIKI6o9VP6pMnTyIgIEDj6wIDAxEYGKjNUxIR1UtbolMqTb94qZsberSykyij+sXfrSle79kSPx5+sIFLTPI9rD+aiNd7tpQwMyJqLLQaWf7777+xY8eOJ54XFhaGuXPnavMURET1XkZeMT57ZPUL5yammDGE0y80ETqgNVrYmoliX+2Ow43MAokyIqLGRKtiefbs2di+ffsTz9uxYwfmzJmjzVMQEdV7C/6+hHsFSlFsfnB7mHP6hUZMjQ3xxXO+olihsgxzwi5IlBERNSY6XWf5UWVlZTAwqNWnICLSS5HX0vHH6VRR7BlfR/Rr3UyijOq3pzxs8VI3N1Es/NId7LlwS6KMiKixqNVK9sKFC2jatGltPgURkd4pUpbh423nRDFLEyN8+mxbiTJqGN4f5AM7C4UoNifsIgpKSiXKiIgag2p/FhgSEiJ6fOTIkUqxcqWlpYiLi8OpU6cwcuTIGiVIRFTffHcgHokZ4vm0Hw72QTMrE4kyahisTeWY+UwbTNt8piKWeq8QS/dd5TxwIqo11S6W169fX/G9TCbDtWvXcO3atcde4+vri0WLFmmdHBFRfXMjswArD8aLYp3cmuDFrm5qriBNjOjohN9O3cDR+IyK2JrD1zHK3wWtHbj2MhHpXrWL5YiICAD31wwNCgrC4MGD8eGHH1Z5rrGxMZycnODu7q6bLImI6okFf19CSemDLZkNDWT4bFQHGDy8WDBpTSaTYd7I9hjyzWGU/Lv1dalKwKw/z2PzG90gk7GfiUi3ql0s9+nTp+L7V199Fb169RLFiIgauyNX0/HPIzecvdzNHT4OVhJl1DB52lvgzT4eWLb/waebJ65nYue5W3jG11HCzIioIdJq/aJ169bpOg8ionpNWaaqtJSZjbkx3n3aW6KMGrbJ/Vph+5lU3MgsrIh9tvMS+rdpBhO5oYSZEVFDw3XdiIh0YOOxJFy9kyeKvT+oNazN5BJl1LCZyA3x8VDxTX2p9wqx6lCCRBkRUUNVrZFlDw8PyGQyhIeHo2XLlvDw8Kj2E8hkMsTHxz/5RCKieio9rxhfh18Rxdo7W2FsZ1eJMmocBrVzQKCHLY4lPLjZ7/sD8RjT2QWO1qYSZkZEDUm1iuXExEQAgFKpFD0mIiJg0T9xyC0Sr/U7e1g7GPKmvlolk8nwybC2eObbw1AJ92OFyjJ8sesyvnnBX9rkiKjBqNY0DJVKBZVKBW9vb9Hj6n4RETVUsSn38Fv0DVEs2N8ZnVvYSJRR49LG0QrjHlmWb/uZm4hOypQoIyJqaDhnmYhIS4IgYP5flyAID2JmxoaYPsRHuqQaodAB3rAyEX9QOifsIlQqQc0VRETVx2KZiEhLey/exolE8QjmlKBWaM6d+uqUrYUC7zyy6khsSjZ+j0mRKCMiakhYLBMRaUFZpsLnuy6LYi5NTRHSo6VEGTVurwS6w9PeXBRbtDsOBSWlaq4gIqqeahXLhoaGWn8ZGWm1lDMRkV7bdCIZCen5otj7g1pzjV+JyA0NMOvZtqLYndxirD1yXaKMiKihqFYl6+rqyi1EiYj+lVukxDfhV0UxXxdrDPN1kigjAoC+rZuht7c9Dl25WxFbeTAB47q6wdZCIWFmRFSfabR0HBERASsPxiMjv0QU+2hoGxhwqTjJTR/sg8NX71bcdJlXXIpl+69h9vB20iZGRPUW5ywTEWng5r1CrD4s/mj/6TbN0c3DVqKM6GFtnawQ7O8siv0clYTER6bMEBFVF4tlIiINLN5zBcWlD9aPNzSQcak4PfPewNYwNnrw661UJWDRnjgJMyKi+qxa0zCSk5MBAM7OzjA0NKx4XF1ubm5PPomISM9duJmNP06LlyMb19UVrZpZSJQRVcW5iSle694CPxxKqIj9HZuGiT2z4O/WVMLMiKg+qlax3KJFCxgYGODixYvw9vZGixYtqn3Dn0wmQ2kpl+4hovrvy3/iRBuQWCiMMO2R9X1JP7zVtxU2nbyB7EJlRWzhrsvY/EY33rBORBqpVrHcu3dvyGQymJmZiR4TETUWxxMycPChVRYAYFIfD9hxlQW9ZG0mx5R+rbBg56WK2Inrmdh/+Q76t2kuYWZEVN9Uq1g+cODAYx/XlsLCQixcuBCbNm1CcnIybGxsMHjwYMybNw/Ozs5PbgBAaWkp5s+fj5MnT+LSpUu4e/culEolXF1dMWDAAHz44Ydwd3ev5VdCRPWZIAhYtFs859XOQoGQntyARJ+9HOiO9UcTkXqvsCL2xT+X0bd1Mxhy5RIiqia9vcGvqKgIQUFBmDdvHvLy8jBixAi4urpi3bp18Pf3R0JCwpMb+bedOXPm4NChQ3B0dMTgwYMxaNAglJSU4Pvvv4evry9OnTpVy6+GiOqziLg7OJWUJYpN7d8KZsbcdEmfmcgN8f6g1qLYldt5CDt7U6KMiKg+0lmxnJWVhaysLAgPT+irgfnz5yMqKgqBgYG4cuUKNm/ejOPHj2Px4sW4e/cuQkJCqtWOiYkJjhw5gqysLERGRmLLli34888/kZCQgOnTpyMnJweTJk3SSc5E1PCoVAIW7b4iirk0NcULXXjjcn0w3M8JPg6Wotg34VegLFOpuYKISKxGxfKOHTswcOBAWFhYwM7ODnZ2drC0tMTAgQPx559/at1uSUkJli9fDgBYsWIFLCwe3GkeGhoKX19fHDx4ENHR0U9sy8jICD169Ki07bahoSHmzZsHExMTREdHIzs7W+t8iajhCou9iUtpOaLYu097i5YmI/1lYCDDewPFo8uJGQX4PTpFzRVERGJa/bQXBAEhISEIDg5GeHg4CgoKYG1tDWtraxQUFCA8PByjRo3ChAkTtBppjoyMRHZ2Njw9PeHv71/p+OjRowEAYWFh2qRfQSaTwdDQEDKZDMbGxjVqi4gaHmWZCkv2ikeVvZtbYKR/9e6ZIP3wdJtm8HNtIop9u+8qikvLpEmIiOoVrYrlpUuXYv369XB0dMT333+Pe/fuITMzE5mZmcjOzsbKlSvh6OiIjRs3YunSpRq3f/bsWQBAp06dqjxeHo+NjdUmfQD3C/4vvvgC+fn56NevH0xNTbVui4gapi2nUpCUUSCKvTewNW8Oq2dkMhnef2R0+WZ2EX45rtmeAUTUOGl1d8qqVatgZmaGw4cPo2VL8d3glpaWeOONNzBgwAB06NABq1atwrRp0zRqv3zTExcXlyqPl8eTkpI0avfDDz/E7du3kZOTg9jYWMTHx6NNmzZYvXr1E68tLi5GcXFxxeOcnPsfyyqVSiiVSnWX0UPK+4n9pRn2m+Z00WdFyjIsDRePKvu5WKOfl02D/X/RkN9rXd2t8FTLpjh+/cGNmsv3X8Oojg41ulGzIfdZbWK/aY59ph1d9JdWPyGuX7+OgQMHViqUH9ayZUv0798fe/bs0bj9vLw8AKhY1/lR5ubmAIDc3FyN2v39998RHx9f8djX1xc///zzY19HuYULF2LOnDmV4hEREWrzpKrt3btX6hTqJfab5mrSZxE3ZbidayiK9bTKxK5du2qalt5rqO+1QHPg+EO/9jLyS/Dxhr0Y4FzzG9Mbap/VNvab5thnmikoKHjySU+gVbFsb29frTm+crkcdnZ22jxFrbh27RoAID09HdHR0fj4448REBCAH3/8Ea+++upjr50xYwZCQ0MrHufk5MDV1RX9+vWDra1trebdUCiVSuzduxcDBgyAXC6XOp16g/2muZr2WUFJKeYsOQzgwYhEd08bTBvXWYdZ6p/G8F47XRKDg1fSKx4fvqvAnJd6wcpUu9fbGPqsNrDfNMc+005GRkaN29CqWA4ODsbPP/+MrKwsNG3atMpzMjMzsX//fowfP17j9stXv1D310B+fj6A+1M+tGFnZ4dBgwahW7du6NChA/773/8iKCgIrq6uaq9RKBRQKCrv1CWXy/mm1RD7TDvsN81p22e/Hk1GZr74o7v3B/k0mv5vyO+19wf54OCVIxWPswtLsSHqBkIfmdOsqYbcZ7WJ/aY59plmdNFXWt3gN3/+fHh4eCAoKAj79++vdDwiIgIDBgyAp6cnPvvsM43bd3O7v35pSkrVS/uUx2u68561tTWGDRuGwsJCfqxBRACAvOJS/HAwXhQL8mkGf7eqBwaofmnvbI2hHRxEsTVHriMjr1jNFUTU2FVrZDkoKKhSzNjYGNHR0RgwYABsbGwqCtfk5OSKIe9u3bph5MiR2Ldvn0ZJ+fn5AQBiYmKqPF4e9/X11ajdqpRPE7l7926N2yKi+m/D0URkFYhHld992luibKg2hA7wxj/nb0H171Tl/JIyrDwYj4+faSttYkSkl6pVLB84cEDtMUEQkJGRUeWckGPHjkEm03yJpR49esDa2hrx8fE4c+YMOnbsKDq+detWAMCwYcM0bvtRBw8eBAB4enrWuC0iqt9yi5RYdShBFHu6TXN0cLGWKCOqDa2aWWJkR2f8cTq1IvbTsST8p5cHmlmZSJgZEemjahXL169fr+08RIyNjTFlyhQsWLAAkydPxp49eypWwFiyZAliY2PRp08fBAQEVFyzfPlyLF++HMHBwVi4cGFF/O+//0bTpk3RvXt30XMUFBRgwYIFOHjwIBwcHDB48OC6eXFEpLfWRSYiu1A8qjztaS+JsqHaNO1pb+w4exOl/w4vF5eq8MOhBMx6lqPLRCRWrWK5pnODtTFz5kyEh4fj6NGj8PLyQq9evZCUlITjx4/D3t4ea9euFZ2fnp6OuLg4pKWlieInT57EnDlz4OzsjI4dO8La2hq3bt3CmTNnkJmZCWtra/z222+iLbWJqPHJLlRi9WHxqPKgds3R3pmjyg2Rm60ZxnR2wa8nblTEfo5Kwpt9PNDMkqPLRPSAVjf41QUTExNERERg1qxZMDMzw/bt25GUlIQJEyYgJiYGHh4e1Wpn1KhRCA0NhZOTE06ePInffvsNJ0+ehLu7O2bMmIFLly6hV69etfxqiEjfrT1yHTlFpaLYNM5VbtDe6tsKRg/txlhcqsIPBxMecwURNUbab1v0kHv37iE3NxeCUPXC7uWrW2jK1NQUc+fOxdy5c5947uzZszF79uxKcV9fXyxevFir5yeixiG7QIm1R8TTzZ7p4Ig2jlYSZUR1wdXGDKMDXLDp5IPR5f8d5+gyEYlpXSzfunULM2fOxI4dOx674LNMJkNpaana40REUlt9JAG5xQ9+TslkwDucq9woTO7XClujUyrmLhcpVVh1MAEzOXeZiP6l1TSMtLQ0dO7cGWvXroVCoYC9vT0EQUC3bt3QrFmzihHmwMBATnEgIr2WlV9SaVT5WV8neDfXbtMjql/KR5cf9vPxJNzN5brLRHSf1puS3Lx5E3PnzsWNGzcwZMgQyGQyREZGIi0tDQcOHICPjw9kMhl27dql65yJiHTmx8MJyC8pq3hsIAPe6c9R5cZkcj/x3OUipQqrDsU/5goiaky0Kpb/+ecftGzZEjNnzqzyeO/evbFnzx6cPn0a8+bNq1GCRES15V5BCTYcTRTFhvs5oVUzro7TmLjamOG5TuLR5Y1RHF0movu0KpZTU1NFG4UYGhoCAIqLH/xgcXZ2Rr9+/fDbb7/VLEMiolqyNjKx0qjy2xxVbpQ4ukxE6mhVLFtZie8Qb9KkCYD7RfTDTExMKsWIiPRBTpES6yLFc5WH+TnB056jyo2Rm60ZRnVyFsU2RiUhPY+jy0SNnVbFspubG5KTkyset2/fHgCwc+fOilhBQQEiIyPh6OhYwxSJiHRvQ2QicovEK2BM6ddKwoxIalP6ecGw0ugy110mauy0KpaDgoIQGxuLu3fvAgCGDx8Oc3NzvP/++5g+fTqWLVuGfv364fbt2xgyZIhOEyYiqqm84lKseWRUeUh7B3hxBYxGzc3WDKP8xaPLPx1L5OgyUSOnVbE8fvx4jBo1ChcvXgQA2NjY4IcffoAgCPjyyy8xbdo0nDx5Em3btsWCBQt0mjARUU39HJWEewVKUWxKP85VJmBKUKtKo8s/cnSZqFHTalMSPz8//Prrr6LYuHHj0KNHD+zcuRNZWVnw9vbG8OHDIZfLdZIoEZEuFJaUYfVhcfHzdJvmaOvE3foIcLc1xyh/Z2yJTqmI/XQsCf/p7QE7C4WEmRGRVHSy3XU5Nzc3TJo0SZdNEhHp1C8nkpGeVyKKTe3Pucr0wJSgVvjjdCrK/t3Vr1BZhh8PJWDG0DYSZ0ZEUtBqGkZVsrKykJWVVbF7HxGRvilSluGHg+LlwPp428PXpYk0CZFecrc1R7B/5ZUxMvNL1FxBRA1ZjYrlHTt2YODAgbCwsICdnR3s7OxgaWmJgQMH4s8//9RVjkREOvHbqRu488hGE1O5rjJVYUq/Vnho6jIKSsqw5gjnLhM1RloVy4IgICQkBMHBwQgPD0dBQQGsra1hbW2NgoIChIeHY9SoUZgwYQJHmolIL5SUqrDygHhUuUcrWwS4N5UoI9JnLezMMbKjeHR5w9Ek3Cvg6DJRY6NVsbx06VKsX78ejo6O+P7773Hv3j1kZmYiMzMT2dnZWLlyJRwdHbFx40YsXbpU1zkTEWns95gU3MwuEsXeDuKoMqk3OagVZA+NLucVl2JtZKJk+RCRNLQqlletWgUzMzMcPnwYb775pmhHP0tLS7zxxhs4fPgwTE1NsWrVKp0lS0SkDWWZCt8duCaKdW1hg24ethJlRPWBp70Fhvk6iWLrIq8ju1Cp5goiaoi0KpavX7+O/v37o2XLlmrPadmyJfr374/r16+rPYeIqC78eeYmbmQWimJvcwUMqoYpj4wu5xaVYsPRRMnyIaK6p1WxbG9vD2Nj4yeeJ5fLYWdnp81TEBHpRJlKwIoI8ahyR9cm6NmKP5voybybW2Joe0dRbM2R68gt4ugyUWOhVbEcHByM/fv3IysrS+05mZmZ2L9/P0aOHKltbkRENfZX7E1cT88Xxab2bwXZw8OFRI8xJUj8KUR2oRI/HUuSKBsiqmtaFcvz58+Hh4cHgoKCsH///krHIyIiMGDAAHh6euKzzz6rcZJERNpQqQQs3y8eVW7vbIV+rZtJlBHVR20crTCoXXNRbPXhBOQXl0qUERHVpWrt4BcUFFQpZmxsjOjoaAwYMAA2NjZwd3cHACQnJyMjIwMA0K1bN4wcORL79u3TYcpERNWz++JtXL2TJ4q9HeTFUWXS2NtBXth94XbF46wCJX6OSkJIdzcJsyKiulCtYvnAgQNqjwmCgIyMjIoC+WHHjh3jLyUikoQgAN8fFN9g7ONgiQFtmqu5gki99s7WeLpNM4RfulMRW3UoAeM6Oz/mKiJqCKpVLHNFCyKqb85nyXD5Vq4oNiWoFQwM+Ac8aeftIC9RsZyRX4JNp1LAP7+IGrZqFcvlUyyIiOoDQRCwO0V8S4anvTmGPLKqAZEm/FyboG9rexyIu1sR+/HwdXzQVsKkiKjWaXWDHxGRPjt0NR038sUjyFOCWsGQo8pUQ4/u+ng3rwRRd/i+ImrIqjWyrM7t27exdu1aHD58GKmpqQAAZ2dn9O7dG6+99hqaN+eHU0RUtwRBwIoDCaKYu61ZpZ3YiLQR4N4UvbzscPhqekUsPNUAxcoyyOVyCTMjotqidbH8+++/IyQkBHl5eRAEoSJ+7tw57N69G59//jnWrFmD5557TieJEhFVx9H4DJy+kS2KTe7XCkaG/CCNdGNqfy9RsZytlGHr6ZuY0MNDwqyIqLZo9dvj1KlTGDduHPLz8xEcHIxt27bh9OnTOHPmDLZv345Ro0YhLy8PL774Ik6dOqXrnImI1Pp231XRY5empgj254oFpDtdWtgg0MNWFPvh0HWUlKokyoiIapNWxfLChQtRVlaGLVu2YOvWrRgxYgT8/Pzg6+uL4cOHY8uWLdiyZQuUSiU+//xzXedMRFSl4wkZOH49UxT7b19PyDmqTDo2tb947nJadhF+j0mRKBsiqk1a/QY5cuQIunfvjuDgYLXnBAcHo0ePHjh8+LDWyRERaWLZI7v1OVgpMDrARaJsqCHr5mGDri1sRLEVEdegLOPoMlFDo1WxnJ2dDTe3J+9a5Obmhuzs7CeeR0RUUzHJWThyLV0U+0+vllAYGUqUETVkMpkMb/dvJYqlZBVi2+lUiTIiotqiVbHs4OCA06dPP/G8M2fOwMHBQZunICLSyLJH5ipbygWMDeBcZao9PVvZoaOrtSi2IuIaSjm6TNSgaFUsDxo0CHFxcfjoo49QVlZW6bggCJg5cyYuX76MwYMH1zhJIqLHOZeSjYiHNooAgP5OKpjIOapMtUcmk2FKX/EKGEkZBdhx9qZEGRFRbdBq6bhZs2bhjz/+wBdffIFff/0VY8eORYsWLQAASUlJ2LJlCxITE2Fra4uZM2fqMl8iokqW7RePKjc1k6N781KJsqHGpLeXHVzNBdEmOMv3X8OIjs7cBIeogdCqWHZxccH+/fsxfvx4nD9/HosWLYJMdv+HQvmayx06dMD//vc/uLjw5hoiqj2X0nKw5+JtUez1Hi2gyLskUUbUmMhkMgx2UeHHuAefYiSk5+Ov2JsY0ZHTgIgaAq03JenQoQNiY2Nx4MABHD58GDdv3v/YycnJCb169ULfvn11lSMRkVrLH1kBw9pUjvFPueLQPhbLVDfaNRXQxsESl27lVsSW7b+GYb5OMODoMlG9p1WxPGrUKDg6OmLFihXo27cvC2MiksTV27nYeT5NFHu9Z0tYKLQeByDSmEwGTOnngcm/nq2IXbuTh13nb+EZX0cJMyMiXdDqBr+dO3ciIyND17kQEWlkecQ1/DvzCwBgaWKEV7u3kCwfarye9mkGHwdLUWzZ/qtQqQQ1VxBRfaFVsdyyZUvk5+frOhciompLuJuHsEdWHXitewtYm8olyogaMwMDGd4OEu/qd/lWbqX59ERU/2hVLI8bNw4HDx7ErVu3dJ0PEVG1rIiIx8ODdubGhgjp2VK6hKjRG9LeAV7NLESxb/ddrbjxnYjqJ62K5RkzZqBXr17o06cPtm3bBqVSqeu8iIjUSs4owPYz4p3SXuneAk3MjCXKiOj+6PKUIPGufhfTcrDv0h2JMiIiXdDqLpjWrVtDpVLhxo0bGD16NGQyGZo1awYTE5NK58pkMsTHx9c4USKict8duIayh4aVTeWGmMhRZdIDz/o6YWn4VSSkP5iq+O3+q+jfplnFEqtEVL9oVSwnJiaKHguCwCkZRFQnUrIKsDU6RRR7qZsbbC0UEmVE9IDhv6PLob89WBkjNiUbB6/cRd/WzSTMjIi0pdU0DJVKpdEXEZGufH8gHqUPjSorjAzwn94ej7mCqG4N93OCu62ZKLaUc5eJ6i2timUiIimkZRdiyynxqPK4rm5oZll5ChiRVIwMDTC5n3ju8unke4i8xiVXieojjaZh7Ny5E9u3b8eNGzegUCjg6+uL1157DS1bcq4gEdW+Hw4moKTswadVxoYGmNTHU8KMiKoW7O+Mb/ddRUpWYUVs6b4r6NHKlnOXieqZahfL48ePx6ZNmwCg4qOksLAwfPXVV9i0aROGDx9eOxkSEQG4k1OEX08ki2LPd3GFgzVHlUn/yP8dXZ7xx7mK2MnELEQlZCLQ01bCzIhIU9WahrFmzRr8+uuvMDQ0xIQJE/Dtt99iwYIF6NatG4qKivDKK68gOzu7tnMlokZs1aEEFJc+GFWWG8owqS9HlUl/PdfJBU6P/DH37b6rEmVDRNqqVrG8YcMGGBgYYNeuXVizZg2mTJmCGTNmIDIyEq+++ipyc3Pxxx9/1HauRNRIpecV43/HxaPKowNc4NzEVKKMiJ7M2MgA/31k7vKxhAycuJ4pUUZEpI1qFcvnzp1Dt27d0L9//0rHPvroIwiCgHPnzlVxZc0UFhbik08+gbe3N0xMTODk5ISQkBCkpqY++eJ/3bt3D7/88gvGjRuHli1bwtjYGJaWlnjqqaewdOlSbqhCVA+sPnwdhcqyiseGBjL8t0+rx1xBpB/GdnaBg5V4dHnZfo4uE9Un1SqWc3Jy4OlZ9ced5fGcnBzdZQWgqKgIQUFBmDdvHvLy8jBixAi4urpi3bp18Pf3R0JCQrXa+eqrrzB+/Hhs3rwZTZs2xahRo9C1a1ecPXsW06ZNQ1BQEAoKCnSaOxHpTlZ+CTYeSxTFgv2d4fbI0lxE+khhZIhJfcRLGx6+mo7opCyJMiIiTVWrWBYEAYaGhlU3YHC/CV2vpzx//nxERUUhMDAQV65cwebNm3H8+HEsXrwYd+/eRUhISLXaMTc3xwcffIDExETExMRg06ZN2LdvH86dOwc3NzccOXIE8+fP12nuRKQ7ayOvI7/kwaiygQyVluUi0mcvdHWDvaV40xyOLhPVH3q5znJJSQmWL18OAFixYgUsLCwqjoWGhsLX1xcHDx5EdHT0E9uaMWMGvvjiC7i5uYniXl5e+PzzzwEAv/76qw6zJyJdyS5UYn1koig23M8JLe3MpUmISAsmckO8+cjGOQfi7uLsjXvSJEREGql2sbxhwwYYGhpW+SWTydQeNzLSfEftyMhIZGdnw9PTE/7+/pWOjx49GsD9petqws/PDwBw8+bNGrVDRLVjXeR15BaXVjyWyYApQRxVpvpn/FPusLMwFsU4ukxUP1S7WBYEQasvbaZnnD17FgDQqVOnKo+Xx2NjYzVu+2Hl854dHBxq1A4R6V52gRJrjlwXxYZ2cESrZpYSZUSkPVNjQ/ynl3h0OfzSHZxP5bKrRPquWsO+up6P/CTJyfeXiHJxcanyeHk8KSmpRs+zdOlSAMCIESOeeG5xcTGKi4srHpff0KhUKrmiRjWV9xP7SzONtd9WHbqG3CLxqPJbvVtUqx8aa5/VFPtNc5r02fMBTlh5MB5ZBQ/OXRp+Bd+92LG20tNbfK9pjn2mHV30l+ZzJOpAXl4eAMDMrOq73c3N789XzM3N1fo5Vq5cifDwcDRp0gTTp09/4vkLFy7EnDlzKsUjIiLU5klV27t3r9Qp1EuNqd/ylcDq04YAHmwL3NFGhWvRh3FNg3YaU5/pEvtNc9Xtsx52MvyV/OCG+b2X7uDHLTvh3Ein4fO9pjn2mWZ0seKZXhbLte3w4cN45513IJPJsHbtWjg5OT3xmhkzZiA0NLTicU5ODlxdXdGvXz/Y2nLr0upQKpXYu3cvBgwYALlcLnU69UZj7Lcle6+iuOzBFAyZDFjwYk94NbN4zFUPNMY+0wX2m+Y07bNeRaU4vOQQsgsffGpyrswZ/xnqV5tp6h2+1zTHPtNORkZGjdvQy2K5fPULdX8N5OfnAwAsLTWfu3j+/HmMGDECJSUl+PbbbxEcHFyt6xQKBRQKRaW4XC7nm1ZD7DPtNJZ+y8wvwU9R4t36hvs5oa1zU43baix9pmvsN81Vt89s5HK83tMDS/ZeqYj9c/E2rmcWwbt545uPz/ea5thnmtFFX+nl0nHly7ylpKRUebw87u7urlG7169fx8CBA5GVlYXZs2fj7bffrlmiRKRzqw4lVFpXeWp/LwkzItKtV7u3gKXJg7EqQQCW79dkghER1SW9LJbLl3SLiYmp8nh53NfXt9ptpqWlYcCAAUhLS8M777yDTz/9tOaJEpFOpecVY8PRRFFsZEdneNpXb/oFUX1gbSrHaz1aimJhsTdx7U6eRBkR0ePoZbHco0cPWFtbIz4+HmfOnKl0fOvWrQCAYcOGVau9rKwsDBo0CPHx8Xjttdfw9ddf6zJdItKRVYcSUKh8MKpsaCDD2xxVpgYopEcLWCjEo8vfRXB0mUgf6WWxbGxsjClTpgAAJk+eXDFHGQCWLFmC2NhY9OnTBwEBARXx5cuXw8fHBzNmzBC1VVBQgGeeeQbnzp3D2LFj8eOPP0Imk4GI9Mud3CL8dCxRFAv2d+ZufdQgNTEzxqvdxVMJt59JRWJ6vporiEgqenmDHwDMnDkT4eHhOHr0KLy8vNCrVy8kJSXh+PHjsLe3x9q1a0Xnp6enIy4uDmlpaaL4xx9/jGPHjlXsJvj6669X+Xzr16+vrZdCRNXww8EEFCkfrOluaCDD29ytjxqw13t6YF1kIgr+naOvEoAVEdewaEzjWhmDSN/pbbFsYmKCiIgILFy4EL/88gu2b98OGxsbTJgwAfPmzVO7YcmjsrKyAABlZWX45Zdf1J7HYplIOrdzivBzlHiTodGdXOBuy1FlarhszI3xcqA7fjiYUBH743Qqpvb3gqsN1+8n0hd6OQ2jnKmpKebOnYtr166huLgYaWlpWLduXZWF8uzZsyEIQqWid/369dXalpuIpPP9gXgUlz4YVTYykGEKR5WpEfhPLw+YyB/8Ki5TCVjBuctEekWvi2UiavjSsgvxywnxuspjOrtyZI0aBTsLBV56Sjx3eUt0CucuE+kRFstEJKnvIuJR8tCostyQo8rUuLzR2wMKI/Ho8tfhVx5zBRHVJRbLRCSZG5kF2HRSPKr8fBdXODcxlSgjorrXzMoEE7q3EMV2nL2Jy7dypEmIiERYLBORZL4Jvwpl2YN7BowNDTC5H0eVqfGZ1Mez0rrLi/dwdJlIH7BYJiJJXL2di22nxVvaj+/mBkdrjipT49PU3Bj/6eUhiu29eBunk7MkyoiIyrFYJiJJfLUnDqqHFqIxMzbkqDI1aiE9W6CpmVwU+2pPnETZEFE5FstEVOfO3LiH3Rdui2ITe7aEnYVCooyIpGdpIsdbfcV/MEZey8DRa+kSZUREAItlIpLAV7vFo2VNzOSY2NtDzdlEjcfLge5obiX+o3HRnjjuB0AkIRbLRFSnjl5Lx5FHRsre6usJKxO5miuIGg8TuSGm9vcSxU4n38O+S3ckyoiIWCwTUZ0RBAFfPDKq7GBlglcCW0iTEJEeGtvZFW6PbMrz1Z44qFQcXSaSAotlIqozey7extkb90Sxqf29YCI3lCYhIj0kNzRA6ABvUezyrVyExd6UKCOixo3FMhHViTKVUGmucgtbM4zp7CJRRkT6a5ifE7ybW4hiX++9AmWZSs0VRFRbWCwTUZ3YfjoVV+/kiWLvDvCG3JA/hogeZWggw3sDW4tiiRkF2HQiWc0VRFRb+FuKiGpdSakKX4eLdyNr42iFYb5OEmVEpP8Gtm0OP9cmotg34VeRV1wqTUJEjRSLZSKqdRujkpCSVSiKvT/IGwYGMokyItJ/MpkMM4b4iGIZ+SVYdTBeooyIGicWy0RUq7ILlVi2/6oo1tm9Kfq1biZRRkT1RzcPWzzdRvxv5cfD13E7p0iijIgaHxbLRFSrvjtwDfcKlKLYjKE+kMk4qkxUHR8O9sHDH8IUKsvwzSPTmoio9rBYJqJak5JVgHWRiaLYkPYOCHC3kSYhonrIq7klnu/iKoptPnkDV2/nSpQRUePCYpmIas2SPVdQUvpgqSsjAxk+GOzzmCuIqCrvPu0N04fWI1cJwBf/XJYwI6LGg8UyEdWK86nZ2HYmVRQb/5QbWtqZS5QRUf3VzMoE/+ntIYqFX7qDqIQMiTIiajxYLBORzgmCgIW7LkF4aHdeS4URpvb3ki4ponrujd4esLMwFsUW7rwEQeA22ES1icUyEencgSt3EXlNPOI1qa8nbC0UEmVEVP9ZKIzwztPibbDPpmTj73NpEmVE1DiwWCYinSpTCfh8p3gupYOVCUJ6tJQoI6KG44UurvB4ZCrTl//Eie4NICLdYrFMRDq1+eQNxD1yl/57A71hamyo5goiqi65oUGlm2STMwuw4WiiNAkRNQIslolIZ3KKlFi8J04U83GwxKhOLhJlRNTwDGrXHAHuTUWxb/ddRXpesUQZETVsLJaJSGeW7buKjPwSUWzWs21hyG2tiXRGJpNh1rNtRbHc4lIs3sONSohqA4tlItKJ6+n5WP/IR8ED2jZHj1Z20iRE1IB1dG2CUf7Ootjmk8m4eDNHooyIGi4Wy0SkEwv+vghl2YMlrOSGMnw8tI2EGRE1bB8M9qm0Ucncvy5wKTkiHWOxTEQ1dvjqXYRfuiOKhfRoiRbcgISo1jhYm+Ctvp6iWFRCJnZfuCVRRkQNE4tlIqqR0jIV5v11URSzszDGlKBWEmVE1Hj8p7cHnJuYimILdl5CkbJMooyIGh4Wy0RUI7+cSMaV23mi2P8NbA1LE7lEGRE1HiZyQ8wYKl5K7kZmIdZGXpcoI6KGh8UyEWktK78ES/aK78Bv62iFMZ1dJcqIqPF5poMjurawEcWW77+G2zlFEmVE1LCwWCYirS3aE4d7BUpR7NNhXCqOqC7JZDJ8MqwtZA/9sysoKcOCvy9JlxRRA8JimYi0cvbGPfx6IlkUG9rBAU952EqUEVHj1d7ZGmMDxJ/o7Dh7E0evpUuUEVHDwWKZiDRWphIwc/t5PLxClZmxIWY+01b9RURUqz4Y3BrWpuJ7BT7ZcQElpSqJMiJqGFgsE5HGfj2RjHOp2aLY1P5ecHrkrnwiqju2Fgq8P6i1KHbtTh5v9iOqIRbLRKSRjLxiLNodJ4q1amaBkB4tJcqIiMqN6+oGXxdrUezbfVdx816hRBkR1X8slolII5/vuozsQvFNfXNHtIOxEX+cEEnN0ECGeSPaV7rZb/7fF9VfRESPxd9uRFRt0UmZ2BKdIooN93NCd087iTIiokf5uTbBC13cRLGd527h0JW7EmVEVL+xWCaiaiktU2Hm9guimIXCCB8/00aijIhInQ8GtUZTM/HNfp/uuMCd/Yi0wGKZiKplzZHruJSWI4pNe9oLza1MJMqIiNRpam6MDweLd/a7np6P7yKuSZQRUf3FYpmInigxPb/STn2tm1vi1e4tpEmIiJ5obGdX+Ls1EcW+OxCPy7dyqr6AiKrEYpmIHksQBHy8/RyKH1qrVSYDFj7XAXJD/ggh0lcGBjJ8FtwBRg/tqFmqEjD993MoUwmPuZKIHsbfdET0WFujUxB5LUMUezWwBTq5NZUoIyKqrjaOVnizj4codubGPWw8lihNQkT1EItlIlLrbm4x5v99SRRzsjbB/z2y8QER6a+3g7zQ0s5cFPtydxxSufYyUbWwWCYiteb+dbHSmsrzRraHhcJIooyISFMmckMsHNVBFCsoKcPMbecgCJyOQfQkLJaJqEr7Lt1G2NmbotgwPyf0b9NcooyISFvdPGwxrqurKBYRdxdhsWkSZURUf7BYJqJKcoqUmLn9vChmbSrHJ8+2lSgjIqqp6UPawN5SIYrN2XEBGXnFEmVEVD+wWCaiSuaFXURadpEoNvOZyr9oiaj+sDaVY+7wdqJYRn4JZm4/z+kYRI+h18VyYWEhPvnkE3h7e8PExAROTk4ICQlBamqqRu0cPHgQc+bMwTPPPAN7e3vIZDK0aNGidpImqufCL96utKV1j1a2GB3gIlFGRKQrg9s7YGBb8VSqXedvYccjU66I6AG9vUunqKgIQUFBiIqKgqOjI0aMGIHExESsW7cOf/31F6KiouDh4fHkhgC88847OHv2bC1nTFT/ZeWXYPof50QxC4URvnjOFzKZTM1VRFRfyGQyzA9uj5OJmcgqeHDz7id/XkA3D1vuyElUBb0dWZ4/fz6ioqIQGBiIK1euYPPmzTh+/DgWL16Mu3fvIiQkpNptDRw4EPPnz8fu3btx4cKFWsyaqH6b9ed5pD8yf/GTZ9vCpamZRBkRka41szTB/JHi1TGyC5WY/nssp2MQVUEvi+WSkhIsX74cALBixQpYWFhUHAsNDYWvry8OHjyI6OjoarX35Zdf4uOPP8bAgQNhY2NTKzkT1XdhZ2/ir0fujA/yaYYxnTn9gqihecbXEcP8nESxiLi7+O3UDYkyItJfelksR0ZGIjs7G56envD39690fPTo0QCAsLCwuk6NqEG6k1uEWX9WXv3i81EdOP2CqIGaO7xdpZt254ZdxI3MAokyItJPelksl88v7tSpU5XHy+OxsbF1lhNRQyUIAmb8fg73CipvPtKM8xeJGqym5sb44jnxdIz8kjK8v/UsylScjkFUTi9v8EtOTgYAuLhU/fFveTwpKanOciouLkZx8YO5nDk5OQAApVIJpVKp7jJ6SHk/sb80U9v99vPxZOy7fEcUG9KuOQa3sau3/6/4XtMO+01z9b3PennaYEyAM7ZEP1hlKiohE99HXMWbvVvW2vPW936TAvtMO7roL70slvPy8gAAZmZV31Rkbn5/j/vc3Nw6y2nhwoWYM2dOpXhERITaPKlqe/fulTqFeqk2+i01H1hyzhDAg6kWFnIBPU1TsWuXZks06iO+17TDftNcfe6zzgZAuLEhskoe/BxYsvcKytIuoYVl7T53fe43qbDPNFNQUPNpRXpZLOujGTNmIDQ0tOJxTk4OXF1d0a9fP9ja2kqYWf2hVCqxd+9eDBgwAHK5XOp06o3a6reCklIEf38cpUK+KP71C53Q19teZ88jBb7XtMN+01xD6TMX30y8vPYUymdfqCDDlhQL7JgcCEsT3b+uhtJvdYl9pp2MjIwat6GXxXL56hfq/hrIz7//y93Sspb/5H2IQqGAQlF59zK5XM43rYbYZ9rRdb99HnYJCeniQvn1ni0xoJ2TmivqH77XtMN+01x977MeXs3xdpAXlu67WhFLuVeET8IuY9k4/1q70be+95sU2Gea0UVf6eUNfm5ubgCAlJSUKo+Xx93d3essJ6KG5O/YNPx6QrxEVDsnK3wwuLVEGRGR1N4OaoUuLZqKYn/FplXa0ZOosdHLYtnPzw8AEBMTU+Xx8rivr2+d5UTUUNzILMD0P8QryZgZG2LZOH8ojAwlyoqIpGZkaIBvXvCHtal4JO7TPy/g2p08ibIikp5eFss9evSAtbU14uPjcebMmUrHt27dCgAYNmxYHWdGVL+VlKrwzqbTyC0qFcXnjmgPD3sLNVcRUWPh3MS00nJyhcoyvP3raRSWlEmUFZG09LJYNjY2xpQpUwAAkydPrpijDABLlixBbGws+vTpg4CAgIr48uXL4ePjgxkzZtR5vkT1xWc7LyEm+Z4oNrKjE57r5CxNQkSkdwa3d8T4p9xEsUtpOfh4+zluh02Nkl7e4AcAM2fORHh4OI4ePQovLy/06tULSUlJOH78OOzt7bF27VrR+enp6YiLi0NaWlqltlavXo3Vq1cDeLDeXlpaGrp161Zxznfffad2ExSihmDH2ZtYfzRRFHOzMcO8ke25Sx8Ricx6ti1OJmbiyu0H0y/+iElFJ7emeKkb7xeixkUvR5YBwMTEBBEREZg1axbMzMywfft2JCUlYcKECYiJiYGHh0e120pJScHx48dx/PjxivnOJSUlFbHjx49XbDJC1BBdvZ2L6b+L5ykbGxngu/GdamVZKCKq30zkhvhufADMjcX3McwJu4CY5CyJsiKSht4WywBgamqKuXPn4tq1ayguLkZaWhrWrVtX5c5+s2fPhiAIWL9+vdpjj/vq27dv7b8gIgnkFZdi0s/RKHhkvuH8Ee3R3tlaoqyISN+1amaBr8b4iWLKMgFv/RyD9LxiNVcRNTx6XSwTUc2oVAL+77eziL8rXk/5+c6uGNvFVaKsiKi+GNLBEW/2EX+SeyunCFN+iUFpmUqirIjqFotlogZs6b6r+OfCLVGsvbMV5oxoJ1FGRFTfvD+wNQI9xDvVRiVk4svdcRJlRFS3WCwTNVB/x6aJduMCAGtTOb4fHwATOddTJqLqMTI0wLIX/eFgZSKKrzqUgK3csIQaARbLRA3Q+dRsvLfljChmaCDDihc7wdXGTJqkiKjesrNQ4LuXOkFuKF45Z8YfsTiZmClRVkR1g8UyUQNzN7cYb/x0CkVK8XzCWc+0QU8vO4myIqL6rpNbU8wd0V4UU5YJeHNjNG5kFkiUFVHtY7FM1IAUlJTi9Q0ncTO7SBQf19UVr3ZvIU1SRNRgjOvqhpAeLUWxzPwSvL7hJHKLlBJlRVS7WCwTNRClZSq8/ctpxKZki+JdW9hgznBuPEJEuvHxM23Qr7W9KHbldh7e+l8MSkq5QgY1PCyWiRoAQRAwJ+wi9l2+I4q72pjiu5c6wdiI/9SJSDcMDWT4dpw/vJtbiOKHr6Zj+u+x3BKbGhz+BiVqAH48nICNUUmiWBMzOda/1hV2FgqJsiKihsrSRI41r3aBrbmxKP7H6VQuKUcNDotlonpua3QKPtt5WRQzNjLAj690hqe9hZqriIhqxtXGDGsmdIHpI0tRfn8gHhuOJkqTFFEtYLFMVI/tvnALH/4eWym+ZKwfurSwkSAjImpMOro2wYrx/jA0EN8TMTvsAnacvSlRVkS6xWKZqJ6KvJaOt385jTKVeH7gR0N98Kyvk0RZEVFjE+TTHAuDO4higgCEbj6D8Iu3JcqKSHdYLBPVQ2du3MMbP51CSZn4zvM3+3jgjd6eEmVFRI3V2C6uCB3gLYqVqgS89UsMjlxNlygrIt1gsUxUz8Sm3MMra44jv6RMFB/X1RXTB/tIlBURNXZvB7XChEfWcy8pVeE/P53CKe7yR/UYi2WieuRcSjZeWn0cOUWlovgzHRwxf2QHrqVMRJKRyWT45Nm2GNvZRRQvVJZhwrqTiE5iwUz1E4tlonrifGoOxq+OqlQo9/a2x9fPd6x0gw0RUV0zMJBh4ShfPOvrKIrnFZfilTUncOI6C2aqf1gsE9UDyXnAhA2nKhXKvbzssOrlAG46QkR6w9BAhq+f74in2zQTxfNLyvDq2hM4Fp8hUWZE2uFvWCI9d/x6JpZfNER2obhQ7tnKDj++0hkmj6xxSkQkNbmhAZa/2AlBPuKCuVBZhtfWn8CRayyYqf5gsUykx/Zduo2Qn2JQXCaeYtGjlS0LZSLSayZyQ6x8KQAD2jYXxYuUKrzxcwxOZ3DqGNUPLJaJ9NSfZ1Lx5sZolJSKl4fr2coOq1/pAlNjFspEpN+MjQzw3fhOGNLeQRRXlgnYcMUA/ztxQ6LMiKqPxTKRnhEEAT8eSsA7m86g9JENRwa3c8CaCZ1ZKBNRvSE3NMC34/wxzE+8WZIAGWaHXcLS8KsQBEHN1UTSY7FMpEfKVAI+3XEBC3ZeqnRslL8Tlr/oD4URC2Uiql/khgb45vmOeLmbe6VjX4dfwYw/zkH5yCZLRPqCxTKRnigoKcWbG0/hp2NJlY71cVBh4ch2MDLkP1kiqp8MDWSYO6Idpj3tVenYppM38OraE8guUEqQGdHj8TcvkR5IvVeIMSuPIfzSnUrH/m+AF4JbqGDAdZSJqJ6TyWSY9rQ3Zj/rAxnEUy+Oxmcg+PtIJKbnS5QdUdVYLBNJ7HhCBoYvO4ILN3NEceN/5/m92bsluDEfETUk459yw2veKpjIxWVIwt18jPwuEoev3pUoM6LKWCwTSUQQBPx0LBHjVx9HRn6J6Ji1qRw/T3wKwx+5IYaIqKHwsxXwv5AusLdUiOL3CpR4de0JrIi4xhv/SC+wWCaSQH5xKd777Sw++fNCpRUvPOzM8cdb3dG1pY1E2RER1Q1fF2v8ObkHfBwsRXGVACzaHYc3N0Yjt4jzmElaLJaJ6tiltBwMX34Ef5xOrXQsyKcZtk/pAU97CwkyIyKqe05NTLH1v90x8JHNSwBgz8XbGLbsCGJT7tV9YkT/YrFMVEcEQcAvx5MxckUk4u9WvoFlSr9W+PGVzrAykUuQHRGRdCwURlj5UgDeH9Qaj97LnJhRgOe+P4ofDyVApeK0DKp7RlInQNQY3M0txow/ziH80u1KxywVRvhytC+GdHCUIDMiIv1gYCDD5H6t4Otijbd/PY17Dy0jpywTsGDnJRy6eheLx/ihmZWJhJlSY8ORZaJa9s/5Wxj8zaEqC+UOztb4a2pPFspERP/q5WWPsCk94e/WpNKxw1fTMeDrQ9h+OpU3/1GdYbFMVEsy80sQuvkMJv0cXWm1CwCY0L0Ftv43EO625hJkR0Skv1xtzPDbm4GY3M+z0tKZ2YVKTNt8Bm9sjMad3CJpEqRGhdMwiHRMEARsO52KeX9dRFYVu1HZmhtj4agOGNjOQYLsiIjqB7mhAd4f5IMennaYtvkM7uQWi47vvXgbJxMzMX2wD8Z2duXGTVRrOLJMpEPX0/Px8poTCP3tbJWF8sC2zbH73d4slImIqql7Kzv8M603hlWx7vy9AiWm/3EOY344hsu3cqq4mqjmOLJMpAPZhUos23cVG44lQllWeR6dpcIInw5vh+c6OUPG7fiIiDRiY26MZeP8MbS9A2ZuP19palt0Uhae+fYIJnRvgbeDWqGJmbFEmVJDxGKZqAZKy1T49eQNfL33CjKrmJcMAEPaO2D28HZozru3iYhqZEgHR3RtaYPZYRcRdvam6FiZSsCaI9exNToFbwe1wsuB7lAYGUqUKTUkLJaJtCAIAvZduoNFu+MQdzu3ynMcrU0wd0R7DKhioX0iItKOrYUCy8b5Y3SACz758zySMgpEx7MLlZj/9yX8dCwJHw72wdAODvxEj2qExTKRBgRBwIErd/H13iuITcmu8hxjQwO81rMF3g7ygoWC/8SIiGpDH2977J7WG98diMfKA/EoKVOJjidnFmDyLzHwdbHG1CAv9G/TjEUzaYW/yYmqQRAEHLmWjiV7r+B08j215w1u54AZQ324HBwRUR0wkRsidIA3RndywRe7L+Pv2LRK58SmZGPiT6fQ1tEKU/u3wsC2Dlw5gzTCYpnoMUpKVdh5Lg2rjyTgfKr6O63bOFph1rNt0N3Trg6zIyIiAHCzNcOKFzshpEcWPtt5CdFJWZXOuZiWg0k/x6B1c0tM6uuBZzo4wdiIi4LRk7FYJqpCdoESv5xIxoajibiVo37R+xa2Zpja3wsjOjrDkCMVRESSCnBviq2TAvHP+Vv44p/LSHxkPjMAxN3Oxbubz+KznZcx/ik3jH/KHfaWCgmypfqCxTLRvwRBQEzyPWw+mYyws2koVJapPdfN5n6RPLKjE4wMOTJBRKQvZDIZhnRwxIC2zREWexPL9l9Dwt38SufdzS3GN+FX8V1EPJ71c8T4p9zRya0J5zVTJSyWqdHLzC/BttOp2HwyGVdu5z32XHdbM0zu2wrBnZwhZ5FMRKS3jAwNEOzvguF+zvj7XBqW779a5c/4kjIV/ohJxR8xqfCwN8foABc818mFy31SBRbL1CjlFZci/OJthJ29iUNX71a5kcjDurawweu9WuLpNs053YKIqB4xNJBhuJ8Tnu3giL2XbmPtkes4fj2zynMT7ubjy3/i8NXuOPT2tseIjk7o36Y5rEzkdZw16RMWy9Ro5BYpcehKOv6KvYn9l++guFT12PMNDWQY2sERE3u2hJ9rk7pJkoiIaoWBgQyD2jlgUDsHXLyZg/VHr2P7mZsoqeJ3gUoADsTdxYG4uzA2NEAvLzsM7eCIp9s2h7UpC+fGhsUyNWhJGfnYd+kO9l++g+PXM544ggzcn4/8fBdXjAlwQTN+DEdE1OC0dbLCl6P9MH1IG2w6mYzfTt6o8mZA4P40jX2X72Df5TuQG8rQpYUN+rVuhr6t7dGqmQXnODcCLJapQcnML0FUQgaOxqfjaHxGlTd1VMXYyACD2znghS6u6OZhyzU4iYgaARtzY7zVtxX+28cTp5KysOXUDfwdm4b8kqpv8FaWCTgan4Gj8RlYsPMSnJuYom9rewR62qJrSxs0s+QAS0PEYpnqLUEQkJJViJjkLJxOvofj1zNxKU39WsiPMjKQoZeXHZ71dcKAdpyTRkTUWMlk90eMu7Swwezh7bDr3C3sPJeGw1fTK+0M+LDUe4X43/Fk/O94MgDAw84cXVva4CkPG3RtaQvnJqZ19RKoFrFYpnojM78El9JycObGPZxOvoczN7KQnleiURvGhgZ4ysMGz3RwxKB2DmhqblxL2RIRUX1kZmyE5wJc8FyAC3KKlAi/eBs7z93CoSt3H1s4A0BCej4S0vOx6eQNAIC9pQK+ztZo72wNXxdrdHC25vS+ekivi+XCwkIsXLgQmzZtQnJyMmxsbDB48GDMmzcPzs7OGrWVlZWF2bNnY/v27bh16xYcHBwQHByM2bNno0mTJrXzAkgrhSVluJ6ej7jbObiclotLt3JxOS0Hd3KLtWrPzsIY/Vo3Q/82zdDTyx4WCr1+2xMRkZ6wMpFjVCcXjOrkgrziUkReS8eBuDs4EHcXadnqN6wqdze3uGK+c7lmlgq0cbSCd3MLeDWzhFdzC7RqZgFLfrqpt/S2aigqKkJQUBCioqLg6OiIESNGIDExEevWrcNff/2FqKgoeHh4VKut9PR0BAYG4tq1a/Dw8MDIkSNx4cIFLF26FLt27cKxY8dgY2NTy6+IHpZXXIqUrAIkpufjenoBkjLycT09H4kZ+bido11RXM5UboguLW3Q3dMW3T1t0d7JmnOQiYioRiwURhWraQiCgLjbuTgQdxdRCRk4lZiFvOLSarVzJ7cYd3Lv4uCVu6K4k7UJPOwt4GpjBjcbM7jamMLt3++5Aoe09LZYnj9/PqKiohAYGIg9e/bAwsICALBkyRK89957CAkJwYEDB6rV1rRp03Dt2jWMGjUKmzdvhpHR/Zc9depULFu2DKGhoVi/fn0tvZLGpaRUhayCEmTklSAjvxhp2UW4lV2EtOxCpGYV4GqKIWae3o/cour9UKkOG3NjdHRtAn/XJujmaQs/lyYwNuKGIUREVDtkMhl8HKzg42CFSX08UVqmwqW0XBy/noET1zNxIjET9wqUGrV5M7sIN9WMVluaGMGliSlkRQY4qrwAB2szNLcygYO1As0sTdDcygQ25sbcB6CWyARBePJaWnWspKQEzZo1Q3Z2NmJiYuDv7y867ufnh9jYWJw6dQoBAQGPbSstLQ0uLi4wMjJCcnIymjdvXnGsuLgYrq6uyMzMxM2bN9GsWbNq55iTkwNra2ukp6fD1tZWsxeo5wRBQEFJGXKKlMgtKkVukRI5RaXIKSx/fD+WVaBEZn4xMvJKkJlfgvS8YuTosAiuisLIAD4OlvB3a3q/QHZrAjcbswa9dI9SqcTOnTsxdOhQyOUcXagO9pl22G+aY59pp6H3m0olIDEjH+dSs3EuJRuxqdm4kJqtdpUNXZDJAGtTOWzMjNHETA4bc2M0NTNG0/L/mslhaSKHhYkRLBRGsPz3vxYmRjA3NmqwhXZGRgbs7OyQnZ0NKysrrdrQy5HlyMhIZGdnw9PTs1KhDACjR49GbGwswsLCnlgs//PPP1CpVOjVq5eoUAYAhUKBYcOGYe3atdi5cycmTJigy5chIggCVAKgEgSUqQQI/36vEgSoVA99LzzyvUqAskyFUpWAklJVxffKUhWU//63VKVCSZn4+9Ky++cqywQUK8tQWP5VokJRxfdlKFCWoajkwfGif2NlKun/hnJuYoo2jlZo42h5/y94R0u0sDVvsP+giYioYTAwkMHD3gIe9hYY0fH+PVYqlYCE9HxcuJmNa3fycOV2Lq7eyUNSRoFOfucKAnCvQKnxiHY5M2PD+8WzwggmckOYyA3+/e+/3xsZQvFw3OjB93JDAxgZyiA3lMHIwAByQ9m/MQPIDWQwKj9uID7P6N/zDA1kMJDJYCC7P2pvIENFTCbDv8ceHK9relksnz17FgDQqVOnKo+Xx2NjY3XS1tq1a6vVVlWClhyCgcL8kQIYKBMEUYGsf+P3+sHBygQt7MzQwtYcLezM//2vGdxtzGFqbCh1ekRERDphYCBDq2b3b+Z7WHHp/Zvar97OQ3JmAW5kFuBGVgGSMwtw815RnQ1eFZSUoaCkTOub6evKg+L5ocJa9lBhXVF4338sFFVvv4XH0ctiOTn5/nqFLi4uVR4vjyclJdVZW8XFxSgufvAGys7OBgBkZOXAQFG7Uw/qIwMZ0MRUDjsLYzSzMoGDlQJ25nJkpiagT9eOcLExQzNLE5jIqyqIlSjIvYeq91JqfJRKJQoKCpCRkdEgP66sDewz7bDfNMc+0w77TcxeDti7KNDdRQGgaUVcWabCrZwipN4rQmpmPk7EXoZlM2dk5CtxJ7cE6bnFSM8vgR58GFynNJnMoiq+X03UZNaxXhbLeXl5AAAzM7Mqj5ubmwMAcnNz66ythQsXYs6cOZXiqd9PeGIOjZW6Pz8W12kWRERE1NhlZGTA2tpaq2v1sljWRzNmzEBoaGjF43v37sHd3R3Jyclad35jk5OTA1dXV9y4cUPrSfaNEftNc+wz7bDfNMc+0w77TXPsM+1kZ2fDzc2tRksE62WxXL5MXEFB1R/E5+ffn39iaWlZZ20pFAooFIpKcWtra75pNWRlZcU+0wL7TXPsM+2w3zTHPtMO+01z7DPtGBhov6SsXi5G6+bmBgBISUmp8nh53N3dvU7bIiIiIqLGRS+LZT8/PwBATExMlcfL476+vnXaFhERERE1LnpZLPfo0QPW1taIj4/HmTNnKh3funUrAGDYsGFPbGvw4MEwMDDA4cOHcefOHdGx4uJihIWFwdDQEEOHDtUoR4VCgU8//bTKqRlUNfaZdthvmmOfaYf9pjn2mXbYb5pjn2lHF/2mlzv4AcDMmTOxYMECdO/eHXv27KlYtaJ8u+s+ffqItrtevnw5li9fjuDgYCxcuFDU1ksvvYT//e9/eO6557Bp06aK7a7feecdfPvtt3j11Ve53TURERERVaKXN/gB94vl8PBwHD16FF5eXujVqxeSkpJw/Phx2NvbY+3ataLz09PTERcXh7S0tEptffPNN4iKisLvv/8OHx8fdO7cGRcuXMD58+fh5eWFJUuW1NXLIiIiIqJ6RC+nYQCAiYkJIiIiMGvWLJiZmWH79u1ISkrChAkTEBMTAw8Pj2q3ZWdnhxMnTuDtt99GSUkJtm3bhuzsbEydOhUnTpyo0XIiRERERNRw6e00DCIiIiIiqentyDIRERERkdRYLNeSefPmQSaTQSaT4eeff5Y6Hb0UGxuLKVOmoFu3bnBycoJCoYC1tTUCAwOxbNkyKJVKqVPUO5cvX8YXX3yBfv36wc7ODnK5HA4ODhg1ahQOHz4sdXp6Kz8/Hxs3bsTbb7+Np556CgqFAjKZDLNnz5Y6NckVFhbik08+gbe3N0xMTODk5ISQkBCkpqZKnZreio6Oxueff45Ro0bBxcWl4mc9Va2goADbt2/H66+/jtatW8PExATm5ubw8/PD3LlzkZeXJ3WKemvJkiUYNWoUvLy8YG1tDYVCAXd3d7zyyis4d+6c1OnVCxkZGWjWrBlkMhlatWqlXSMC6dzly5cFhUIhyGQyAYCwceNGqVPSS8uWLRMACO7u7kL//v2FF154Qejfv79gYmIiABD69OkjFBcXS52mXnF2dhYACBYWFsLTTz8tjB07Vmjfvr0AQJDJZMLXX38tdYp66fTp0wKASl+ffvqp1KlJqrCwUOjWrZsAQHB0dBTGjh0rdO3aVQAg2NvbC/Hx8VKnqJdGjBhR5fuJqvbjjz9W9FGbNm2EMWPGCIMGDRIsLS0FAIKPj49w+/ZtqdPUS7a2toKJiYnQtWtXITg4WAgODha8vb0FAIJcLhfCwsKkTlHvvfrqqxX1mKenp1Zt8F+3jqlUKqF3795C8+bNK36gsliuWnx8fJW/jG/dulVRAC5btkyCzPRX//79hZ9++kkoLCwUxVeuXCkAEAwNDYULFy5IlJ3+unbtmvD6668LK1euFKKjo4W5c+eyWBYE4eOPPxYACIGBgUJubm5FfPHixRV/sFJln3/+uTBr1ixhx44dQlpamqBQKFgsP8b69euFN954Q7h48aIofvPmTcHf318AIIwbN06i7PTbkSNHKv28FwRBWLFihQBAaN68uaBUKiXIrH4IDw8XAAhvvPEGi2V9smrVKgGA8PPPPwuvvvoqi2Utbdy4UQAgBAcHS51KvTFw4EABgDB79mypU9F7CxcubPTFcnFxsWBtbS0AEGJiYiod9/X1FQAIp06dkiC7+oXFsvaOHj0qABAUCgU/SdSQp6enAEA4e/as1KnopYKCAsHT01No27atcOXKlRoVy5yzrEO3bt3CBx98gP79+2P8+PFSp1OvyeVyAICxsbHEmdQf5Vu737x5U+JMqD6IjIxEdnY2PD094e/vX+n46NGjAQBhYWF1nRo1IuU/t4qLi5GRkSFxNvULf08+3pw5c5CQkICVK1dW9JW2WCzr0NSpU1FYWIjvv/9e6lTqtaysLCxevBgA8Mwzz0icTf2RkJAAAHBwcJA4E6oPzp49CwDo1KlTlcfL47GxsXWWEzU+5T+35HI59zzQwMaNGxEXFwcvLy94eXlJnY7eiY2NxeLFi/Haa6+hV69eNW5Pb3fwq2/++usvbNmyBXPmzOEbV0NXr17FggULoFKpcPv2bRw9ehR5eXmYNGkSR+irKT4+Hn/99RcAYPjw4RJnQ/VBcnIyAMDFxaXK4+XxpKSkOsuJGp+lS5cCAAYPHgyFQiFxNvpr0aJFuHDhAvLz83Hp0iVcuHABTk5O+PXXX2FoaCh1enpFpVJh4sSJaNKkCb788kudtMliWQfy8vLw1ltvwdvbGx9++KHU6dQ7t2/fxoYNG0SxqVOnYt68eTAw4IcfT1JaWooJEyaguLgYzz//PAICAqROieqB8uW6zMzMqjxubm4OAMjNza2znKhx2blzJ9asWQO5XI558+ZJnY5e2717N/bt21fx2N3dHT/99BN/3ldh2bJlOHnyJNatWwdbW1udtMliGUBwcDAuXbqk0TU//fQTunbtCgD46KOPcOPGDezbt69R/WVc034r17NnTwiCgLKyMiQnJ2Pbtm2YM2cOdu3ahT179qBFixY6zFpauuqzh02dOhVHjhyBh4cHvvvuu5qmqJdqo9+ISDqXL1/GSy+9BEEQsGjRooq5y1S18PBwAMC9e/dw7tw5zJ07F3369MH8+fPx8ccfS5yd/khOTsbMmTPRp08fTJgwQWftslgGcP36dcTFxWl0TUFBAQDgxIkTWLFiBV5++WUEBQXVRnp6qyb9VhVDQ0O0bNkSoaGhaNGiBZ577jm8/fbbDeoGI1332YIFC/D999+jefPm2L17d4Od86frfiPAwsICgPp+ys/PBwBYWlrWWU7UOKSmpmLw4MHIyspCaGgo3nnnHalTqjeaNGmCXr16YefOnQgMDMSsWbMwcOBAdOnSRerU9MLkyZNRUlKClStX6rRdFssAzpw5o/W1O3fuhEqlwrlz59C3b1/RscuXLwO4X9CsXr0agwcPxvTp02uQqX6pSb89SXBwMCwsLPDPP/+gpKSkwdztq8s+W7lyJWbOnAlra2v8888/2u9MVA/U5nutsXJzcwMApKSkVHm8PO7u7l5nOVHDl5mZiYEDByIpKQmvvfYavvrqK6lTqpfkcjmef/55REdHIywsjMXyv/766y80adIEkyZNEsWLiooA3P9DrbxW27RpU7VviGexrCOP+2V++fJlXL58uUFNJ6htMpkMNjY2SE5ORlZWFpo3by51Snpl06ZNmDx5MszMzPD333+jY8eOUqdE9Uz5x94xMTFVHi+P+/r61llO1LDl5eVhyJAhuHjxIkaNGoUff/yR24TXgJ2dHQDg7t27EmeiX+7du4eDBw9WeayoqKjiWHkBXR28e6qGZs+eDeH+5i6Vvl599VUA95d4EQQB69evlzbZeiQhIQE3btyAlZVVxQ8Eum/nzp145ZVXYGRkhG3btqFHjx5Sp0T1UI8ePWBtbY34+Pgq/9jfunUrAGDYsGF1nBk1RMXFxRgxYgROnDiBQYMGcRUHHSgv+jw9PSXORH+oq8euX78O4H5flcc0GcBksUySWbZsGW7dulUpHhcXhxdffBGCIOCVV17hD9SHREZGYvTo0RAEAZs3b8bAgQOlTonqKWNjY0yZMgXA/Xl+5XOUAWDJkiWIjY1Fnz59eLc91VhZWRnGjRuH/fv3o1evXvjjjz8azNS62hQZGYl//vkHKpVKFFcqlVi2bBk2btwIU1NTPP/88xJl2HhwGgZJZvHixZg2bRr8/PzQqlUrCIKApKQkREdHQ6VSoXfv3li4cKHUaeqVZ599FoWFhWjZsiW2b9+O7du3VzqnZ8+emDhxYt0np+eCg4ORlpYG4MEuh6tXr8Y///wDAHB0dMS2bdsky08KM2fORHh4OI4ePQovLy/06tULSUlJOH78OOzt7bF27VqpU9RLf//9t2ips5KSEgBAt27dKmKzZs3ipkr/Wr58ecW/LTs7O7z11ltVnvfVV1/xk8SHXL16Fa+99hrs7OwQEBAAW1tbpKen49y5c0hLS4OJiQnWr18PV1dXqVNt8Fgsk2QWLFiAnTt34tSpU9i9ezcKCwthY2ODAQMGYNy4cXj55Ze5zvIj7t27B+D+6hDlHytVhcVyZadPn660wUZqaipSU1MBNM4b2UxMTBAREYGFCxfil19+wfbt22FjY4MJEyZg3rx5ajcsaezu3r2L48ePV4o/HOM80geysrIqvn/cH6SzZ89msfyQPn364KOPPsLBgwcRGxuL9PR0GBsbo0WLFhg9ejSmTp3aoG/s1icyQRAEqZMgIiIiItJHHLYjIiIiIlKDxTIRERERkRoslomIiIiI1GCxTERERESkBotlIiIiIiI1WCwTEREREanBYpmIiIiISA0Wy0REREREarBYJiIiIiJSg8UyEREREZEaLJaJiIiIiNRgsUxEREREpMb/A1N+pH6tCY/OAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAssAAAIHCAYAAABpIhEUAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAACpMUlEQVR4nOzdd3QVVdfH8e9NT0ihhBYIVYqUUAJI7yCICCigCCoilkew4WNB0YemWHlFwYJIsSugCIpSQyd0CDX00EKH9HKTe98/YsqkQHLTk99nLdZy9pyZ2TkGsnPumXNMVqvVioiIiIiIZGBX2AmIiIiIiBRVKpZFRERERLKgYllEREREJAsqlkVEREREsqBiWUREREQkCyqWRURERESyoGJZRERERCQLKpZFRERERLKgYllEREREJAtFuliOiYnh7bffpn79+ri4uODj48OoUaM4f/58tu8xf/58TCbTbf98++23+fiViIiIiEhxZCqq213HxsbSrVs3AgMDqVq1Kp06deL06dNs376dihUrEhgYSJ06dW57n02bNjFnzpxMz4WFhbFkyRIATpw4ka37iYiIiEjpUWSL5QkTJvDOO+/Qrl07Vq5cibu7OwDTp0/n5ZdfpkuXLqxbty5Xz/jiiy949tln6dChA5s2bcqDrEVERESkJCmSxXJ8fDyVKlUiLCyM3bt306JFC8P5Zs2aERQUxM6dO/H397f5OR06dGDLli18+eWXPP3007lNW0RERERKmCI5Z3nz5s2EhYVRt27dDIUywODBgwFYtmyZzc84deoUW7ZswcnJiaFDh9p8HxEREREpuYpksbxv3z4AWrZsmen55HhQUJDNz/j+++8B6NevH+XKlbP5PiIiIiJScjkUdgKZOXPmDADVq1fP9HxyPCQkxOZnJBfLjzzySLbax8XFERcXl3JssVi4fv06FSpUwGQy2ZyHiIiIiOQPq9VKREQEPj4+2NnZNkZcJIvlyMhIANzc3DI9X6ZMGQAiIiJsuv/27ds5evQo5cuXp1+/ftm6Ztq0aUyaNMmm54mIiIhI4Tl79myWg7C3UySL5fyWPKo8dOhQnJycsnXN+PHjGTduXMpxWFgYNWrUSCm65fbMZjMBAQF069YNR0fHwk6n2FC/5Zz6zDbqt5xTn9lG/ZZz6jPbXL9+nfr16+Ph4WHzPYpksZy8TFx0dHSm56OiogBs+sITEhL45ZdfgOxPwQBwdnbG2dk5Q7x8+fJUqFAhx3mURmazGTc3NypUqKC/6Dmgfss59Zlt1G85pz6zjfot59RnuZObKbNF8gW/GjVqAHDu3LlMzyfHa9asmeN7r1y5ksuXL1OnTh3at29ve5IiIiIiUuIVyWK5WbNmAOzevTvT88lxPz+/HN87eQrGiBEjbMxOREREREqLIlksd+jQAS8vL06cOMHevXsznF+0aBEA/fv3z9F9IyMj+eOPPwAVyyIiIiJye0WyWHZycmLs2LEAjBkzJmWOMiRtdx0UFESXLl0Mu/fNnDmThg0bMn78+Czv+9tvvxEdHU3btm2pV69e/n0BIiIiIlIiFMkX/AAmTJjA6tWr2bJlC/Xq1aNTp06EhISwbds2KlasyNy5cw3tr169SnBwMKGhoVneM6drK4uIiIhI6VYkR5YBXFxcCAgI4K233sLNzY0lS5YQEhLCyJEj2b17N3Xq1MnR/UJDQ1m7di2Ojo48+OCD+ZS1iIiIiJQkRXZkGcDV1ZXJkyczefLk27adOHEiEydOzPJ81apVSUhIyMPsRERERKSkK9LFsoiIiBQes9lMYmJivtzXwcGB2NjYfLl/SaQ+A3t7+0JZY1rFsoiIiBiEh4dz9epV4uLi8uX+VquVKlWqcPbs2VxtFlGaqM+SODs74+3tjaenZ4E9U8WyiIiIpAgPD+f8+fO4u7vj7e2No6NjnhdnFouFyMhI3N3dsbMrsq9PFSmlvc+sVitms5mwsDDOnz8PUGAFs4plERERSXH16lXc3d2pXr16vo1gWiwW4uPjcXFxKZWFny3UZ0nvsnl4eHDu3DmuXr1aYMVy6extERERycBsNhMXF4eXl1ep/qhfii6TyYSXlxdxcXGYzeYCeaaKZREREQFIeXGsMF6iEsmu5O/PgnrRUcWyiIiIGGhUWYqygv7+VLEsIiIiIpIFFcsiIiIiIllQsSwiIiIikgUVyyIiIiK3ER0dzaeffkrv3r2pWrUqzs7OeHh40KhRI0aOHMnSpUtL1M5669atw2QyMXLkyMJOpdBpnWURERGRW9i8eTNDhgwhNDQUFxcXWrdujY+PD3FxcZw4cYIFCxawYMECGjVqxMGDBws7XcljKpZFREREsrB792569OhBXFwcr7zyChMmTMiwGcbZs2eZPn06X375ZSFlmffatGnD4cOH8fLyKuxUCp2KZREREZFMWCwWRowYQVxcHFOmTGHChAmZtvP19eX//u//GDFiRAFnmH/c3Nxo2LBhYadRJGjOsoiIiEgmli9fzuHDh6lRowbjx4+/bXt/f3/D8caNGxk7dix+fn6UK1cOV1dXGjZsyOuvv87NmzczXD9//nxMJhMTJ07M9P733nsv9vb2nD592hA/cOAAI0aMoE6dOri4uFCxYkWaN2/Oiy++SGhoqKHtli1bGDhwIDVr1sTZ2ZkqVarQpk0bXn/9dSIjI1PaZTVn+ebNm3z22WfcfffdKfeoUKECffr0YdWqVZnm3bVrV0wmE6dPn2bJkiW0bduWMmXKUL58eYYNG8a5c+cy79AiQiPLIiIicksWi5Ub0fF5eD8LEdFmzHZx2Nnl77hdOTcn7Oxs28Ti77//BmDIkCHY29vn+PpXXnmFffv24efnR48ePYiNjWX37t28//77/PnnnwQGBuLu7m5Tbsl27dpFx44diY2Nxc/PjwEDBhAdHc3JkyeZMWMGAwcOpGrVqgAsW7aMgQMHYrVaadOmDe3bt+fmzZscO3aM999/n2eeeea2+QQGBvL8889Tq1YtGjRoQLt27Thz5gwrV65k5cqVzJkzh1GjRmV67eeff8706dPp1KkT99xzD9u2bePnn39m165d7Nu3D1dX11z1RX5RsSwiIiK3dCM6Hv+pqws7DZvsmtCTCu7ONl27b98+AFq0aGHT9f/73/9o3769Yd5vXFwczz//PLNnz2b69Om8/fbbNt072aeffkpsbCwfffQRL7/8suHckSNHDM/+6KOPsFgsLFq0iAceeMDQdseOHVSoUOG2z2vQoAFbt26lbdu2hviePXvo3r07L730EkOHDs206J41axYbN26kXbt2QNIKI7169WLLli389NNPWRbZhU3TMEREREQyce3aNQC8vb0zPf/EE08wcuRIw59NmzalnO/bt2+GF+ScnZ355JNPcHBw4I8//sh1jleuXAGgZ8+eGc41bNgwZVT5dm1bt26Nh4fHbZ9Xu3btDIUyJP1CMWbMGMLDwwkICMj02pdeeimlUIakedHjxo0DYMOGDbd9dmHRyLKIiIiIDRYsWJBhbeWuXbvSsWPHlOPz58+zbNkyjhw5Qnh4OBaLBQAnJyeOHTuW6xz8/f35+++/GTNmDFOnTqVjx444OGRe3vn7+3P48GEeeeQR3nrrLfz9/W2aBpOYmMiaNWvYsmULoaGhxMXFAaR8PVl9Xb17984Qq1+/PkCGudVFiYplERERkUwkT0u4evVqpucTEhJS/vuZZ57hq6++MpyfPn06r7/+OmazOd9yfOWVV9i0aRPr1q2jW7duuLu7065dO/r168fIkSMNI9vvvvsu+/fvZ9myZSxbtoxy5crRsWNH7rvvPkaMGIGLi8ttn3fu3DnuvffelCkqmYmIiMg0Xr169Qyx5NHs5IK7KFKxLCIiIrdUzs2JXRMyfnRvK4vFQkRkJB7u7gXygp+tmjVrxubNm9mzZw/Dhw/P0bWBgYG8/PLLeHl5MWPGDLp27UqVKlVwdk6aP+3j45Pj0dTkUem0PD09Wbt2LZs3b2bZsmWsW7eOtWvXsmrVKqZNm8bGjRupV68ekLTE3c6dO1m7di1//vkn69evTymcP/jgA7Zu3XrbecujR49m3759PPDAA7z66qs0aNAADw8P7OzsmD17Nk8//TRWqzXTa/P7/3V+UbEsIiIit2RnZ7L5JbnMWCwWHC1xeLo7F+kCqm/fvnz++ecsXLiQ999/P0crYvz+++8AvPPOOzz22GOGczExMVy8eDHDNU5OSYV92iXc0jp//nymcZPJRMeOHVOmf1y+fJkXX3yRn376iTfffJNff/01pa2DgwO9e/dOmRIREhLCqFGjWLt2Le+//z4ffPBBll9TVFQUq1atonLlyvzyyy8Z+uPkyZNZXlucFd3vUBEREZFCdM8993DnnXdy5swZpk2blqNrb9y4AWQ+9WDhwoWZjr4mv4x39OjRDOeOHj2a7fWIK1WqlLJW84EDB27ZtmbNmrz22mvZahsWFobFYqFq1aoZCmWz2ZzyC0JJo2JZREREJBN2dnZ89913ODs789Zbb/Hqq68SFhaWod21a9cIDg42xJJfXPvmm28Mc5YPHTqUUpym17p1a9zc3Pj777/ZtWtXSvzq1as89dRTmU7D+PLLLzl16lSG+PLly4GkqRfJ/u///i/TEe3M2mamUqVKeHl5ceDAATZv3pwST0xM5LXXXsu0yC8JVCyLiIiIZMHf35/Vq1dTpUoVPvzwQypXrkyXLl0YNmwYgwYNonXr1lStWpV169bRsGFDWrVqBcDjjz9OlSpVWLZsGQ0aNODBBx+kV69eNG/enE6dOlGzZs0Mz3J3d+e///0vCQkJdOzYkT59+tC3b1/q169PYmIirVu3znDNl19+SZ06dWjcuDGDBw/moYceonnz5rz00ku4uLgY1nGeNGkS1apVo2XLljz44IMMHTqUBg0aMGPGDMqXL89///vfW/aFg4MDr776KgkJCXTp0oXevXvz0EMPcccdd/Dll18yZsyYXPZ20aRiWUREROQWOnbsyIkTJ5gxYwYdO3YkODiYxYsXs3r1aiIiIhg6dCi///47+/fvp0mTJkDSSho7duzg4YcfJj4+nqVLl3L+/HmmTJnCTz/9lOWzJk6cyIcffkj16tVZu3YtBw4cYNSoUaxYsSJlTnNaU6ZMYdSoUZhMJtasWcOyZcuIiYlh9OjR7N27lw4dOqS0/eyzz3jooYeIjo7m77//5p9//sHBwYFx48YRFBSU8iLgrbzxxhssWLAAPz8/Nm/ezOrVq2nWrBmBgYEpvyiUNCZrVq8syi2Fh4fj5eXF1atXs7XjjSTNZ1q+fDn33HMPjo6OhZ1OsaF+yzn1mW3UbzlX0vosNjaWU6dOUbt27WwtI2Yri8VCeHg4np6eRfoFv6JEfZYqJ9+n165dw9vbm7CwMDw9PW16XunubRERERGRW1CxLCIiIiKSBRXLIiIiIiJZULEsIiIiIpIFFcsiIiIiIllQsSwiIiIikgUVyyIiIiIiWVCxLCIiIiKSBRXLIiIiIiJZULEsIiIiIpIFFcsiIiIiIllQsSwiIiIikgUVyyIiIiIiWVCxLCIiIpIFk8mEyWQq7DQMunbtislk4vTp0/n2jFq1ahW5r7uwqFgWERERKUVOnz6NyWSia9euhZ1KseBQ2AmIiIiISPZ9++23REdHU61atXx7xpo1azCbzfl2/+JExbKIiIhIMVKjRo18f0bdunXz/RnFhaZhiIiIiOSRs2fP8vTTT1OzZk2cnZ2pVKkS999/Pzt27Mjymt9++422bdvi5uaGt7c3Q4YM4fjx40ycOBGTycT8+fMN7bOasxwSEsJ//vMf6tevj5ubG+XLl6dx48Y8/fTTBAcHAzBx4kRq164NwPr161PmZJtMJkaOHJlyr1vNWT579izPP/889evXx9XVlfLly9OqVSsmTZpEeHh4zjutiNPIsoiIiNyaxQIx1/P0fqboCLCPB7t8HrdzLZ//z/jX/v376d69O1evXqVBgwbcf//9nDlzht9//51ly5bx448/MmTIEMM1M2bM4MUXX8TOzo7OnTtTpUoVtm3bRps2bejfv3+2n3327FlatmzJ9evXqVevHvfccw+JiYmEhITw9ddf065dOxo0aEDz5s154IEHWLx4MZUrV6ZPnz4p9+jYseNtn7Nx40buu+8+bt68Sa1atejfvz8xMTEcOXKEiRMnMmDAAJo3b57tvIsDFcsiIiJyazHX4cO8+1jeDvDKs7vdxisnoIx3vj/GarUyfPhwrl69yquvvsp7772XMjK7ePFihg4dyqhRo+jYsSNVq1YF4OTJk7z66qs4OTnxzz//0K1bNwASEhJ46qmnmDdvXrafP2fOHK5fv87YsWP57LPPDOfOnDmTMv944MCBNG/enMWLF9OwYcMMo9a3cv36dR544AFu3rzJhx9+yLhx47BL84vI1q1b8fHxyfb9igtNwxARERHJpXXr1rF//35q1KjB1KlTDVMYHnjgAQYOHEhkZCRz585Nic+dO5f4+HgeeeSRlEIZwMHBgenTp+Pu7p7t51+5cgWAnj17ZjhXo0aNPJmDPGfOHK5cuUKfPn3473//ayiUAdq1a0elSpVy/ZyiRsWyiIiISC5t3LgRgKFDh+Lo6Jjh/COPPGJoB7B582aADFMzAMqWLUvv3r2z/Xx/f38A3njjDf78809iY2Ozn3w2rV69GoCnn346z+9dlBXpYjkmJoa3336b+vXr4+Ligo+PD6NGjeL8+fM23e/06dM888wz1K5dG2dnZ7y9vWnXrh0ffvhhHmcuIiIipcmFCxeApBfjMpMcT1vDhIaGAuDr65vpNTlZ9WLkyJEMHTqUQ4cO0b9/f8qVK0fnzp159913uXjxYrbvcytnz54FSt9KGUV2znJsbCzdu3cnMDCQqlWrMmDAAE6fPs28efP4888/CQwMpE6dOtm+399//83gwYOJiYmhZcuWtG3blmvXrrF//36++uorXnnllXz8akRERIox1/JJc3/ziMViISIiAg8Pjwwf5ec51/L5e/9syu/d8Ozt7fnll194/fXX+eOPP1i7di3btm1j48aNvPfee/zzzz+0b98+X3MoqYpssTx16lQCAwNp164dK1euTJm3M336dF5++WVGjRrFunXrsnWvI0eOcP/99+Ph4cGqVasM3ywWi4Xdu3fnx5cgIiJSMtjZ5e1LchYL1kQnKONZYCtV5LfkF9tCQkIyPZ+8zFvajUSqVq1KcHAwZ8+epVGjRhmuSR7JzYkWLVrQokULJk6cSHh4OBMnTuT//u//ePHFF9m+fXuO75eWr68vR44c4cSJEzRt2jRX9ypOiuR3aHx8PDNnzgRg1qxZhgnu48aNw8/Pj/Xr17Nr165s3W/cuHHExsYyf/78DL9V2dnZ0apVq7xLXkREREqdTp06AbBw4UISExMznP/+++8N7QA6dOgAJK2WkV5YWBgrV67MVU6enp5MmzYNk8nEgQMHUuJOTk5A0qobOZH88uDs2bNzlVdxUySL5c2bNxMWFkbdunVp0aJFhvODBw8GYNmyZbe919mzZ1mxYgV16tThnnvuyfNcRURERLp27UrTpk05ffo0b7/9NlarNeXc77//zm+//Ya7uzujRo1KiT/++OM4OTnx7bffsmHDhpR4YmIiL7/8MhEREdl+/nfffWcoiJP9/fffWK1Ww7xob29vHB0dOXHiRKaFfVZGjx6Nt7c3f//9N5988onhawQIDAzk8uXL2b5fcVEkp2Hs27cPgJYtW2Z6PjkeFBR023utW7cOi8VC+/btSUhI4LfffmPz5s0kJibSpEkTHnzwQcqVK5d3yYuIiEiJ07Zt2yzPjR49mtGjR/PDDz/QrVs33n33XX7//XeaN2/OmTNn2Lx5Mw4ODnzzzTcpayxD0otyH3zwAS+++CLdunWjS5cuVK5cme3bt3P9+nVGjBjB999/nzISfCuLFy/m0UcfpW7dujRt2hRXV1dOnTrFtm3bsLOzY+rUqSltnZyc6NOnD8uWLaNZs2a0bNkSJycnOnTowOOPP57lM8qXL8/ChQu57777eOmll/j0009p3bo1MTExHD58mOPHj7Nnz54St3xckSyWz5w5A0D16tUzPZ8cz2peUFqHDh0CwN3dnU6dOhEYGGg4/+abb7Jo0SLD+oaZiYuLIy4uLuU4eTtHs9mcstC33FpyP6m/ckb9lnPqM9uo33KupPWZ2WzGarVisViwWCz59pzkEcnkZxUH27Zty/Lc3XffjcVioXHjxuzcuZN33nmHFStWsGjRIry8vBgwYACvv/46bdq0yfD1Pvfcc/j4+PDRRx8RGBiIi4sLXbt25d133+Wjjz4CoFy5coY+S5b2/9OLL75ItWrV2LJlCxs3biQqKgofHx+GDh3KuHHjaNWqleHZs2fP5pVXXmH16tX8+OOPJCYmYjabeeyxxwz5pc+3c+fO7Nmzhw8//JAVK1awZMkS3N3dqV27NpMmTaJ27dr5/v/UYrFgtVoxm83Y29vfsm1e/N00WdOPoRcBTz31FF9//TVvvvmm4TehZMePH6devXrUq1ePo0eP3vJezzzzDF999RUODg64u7vz+eef06dPH65cucKUKVP4/vvv8fLy4uDBg4ZJ9+lNnDiRSZMmZYj/+OOPuLm55fyLFBERKWIcHByoUqUKvr6+2RrNlPyTmJhIx44dCQ4O5vDhw1SuXLmwUyoy4uPjOXv2LBcvXrztvOvo6GgefvhhwsLC8PT0tOl5RXJkOS8l/3aTkJDAV199xdChQ4Gk39K+++47goOD2bFjB59//jnvvPNOlvcZP34848aNSzkODw/H19eXbt26UaFChfz9IkoIs9nMqlWr6NWrV6YLtkvm1G85pz6zjfot50pan8XGxnL27Fnc3d1xcXHJt+dYrdaUpePye0m1ou7EiRNUqFCBsmXLpsTi4uJ48803OXLkCD169KBevXrqszRiY2NxdXWlc+fOt/0+vXbtWq6fVySL5eTVL6KjozM9HxUVBYCHh0e27+Xu7p7pDjmPP/44O3bsYP369be8j7OzM87Ozhnijo6OJeIfyIKkPrON+i3n1Ge2Ub/lXEnps8TEREwmE3Z2dvm6/nHyQFbys0qzxYsX87///Q9/f398fX0JDw9n3759hIaG4u3tzaxZs7Czs1OfpWFnZ4fJZMrW37u8+HtZJIvl5B1rzp07l+n55HjNmjVve6/kNjVq1Mj0N7HkHXVK4tubIiIiUrT16NGDffv2ERgYSFBQEAkJCVSrVo3//Oc/jB8/Psvd/aTgFMliuVmzZgBZbhaSHPfz87vtvZKXnrtx40am569fvw5gWMtZREREpCC0bt2an376qbDTkFsokuP4HTp0wMvLixMnTrB3794M5xctWgRA//79b3uv9u3bU6FCBS5evEhwcHCG88nTLzJbz1lERERESrciWSw7OTkxduxYAMaMGZMyRxmStrsOCgqiS5cu+Pv7p8RnzpxJw4YNGT9+vOFeDg4OjBs3DqvVypgxY1KWfANYvXo18+fPx2Qy8fTTT+fzVyUiIiIixU2RnIYBMGHCBFavXs2WLVuoV68enTp1IiQkhG3btlGxYkXmzp1raH/16lWCg4MJDQ3NcK9XXnmFgIAAVq9eTf369Wnbti1Xr14lMDCQxMRE3nnnHdq0aVNQX5qIiIiIFBNFcmQZwMXFhYCAAN566y3c3NxYsmQJISEhjBw5kt27d1OnTp1s38vR0ZHly5fz/vvv4+3tzYoVK9i/fz9dunRh2bJlvPHGG/n4lYiIiBQvRXALBpEUBf39WWRHlgFcXV2ZPHkykydPvm3biRMnMnHixCzPOzo68uqrr/Lqq6/mYYYiIiIlR/JuaGazGVdX10LORiRzybvy3W73vrxSZEeWRUREpGA5Ojri7OxMWFiYRpelSLJarYSFheHs7Fxga5sX6ZFlERERKVje3t6cP3+ec+fO4eXlhaOjY57vGGexWIiPjyc2NrbUb7CRXaW9z6xWK2azmbCwMCIjI6lWrVqBPVvFsoiIiKTw9PQEkl6cP3/+fL48w2q1EhMTg6ura6nfujm71GdJnJ2dqVatWsr3aUFQsSwiIiIGnp6eeHp6YjabSUxMzPP7m81mNmzYQOfOnUvENuEFQX2WNEe5ML52FcsiIiKSKUdHx3wpTuzt7UlISMDFxaXUFn45pT4rPKVv0ouIiIiISDapWBYRERERyYKKZRERERGRLKhYFhERERHJgoplEREREZEsqFgWEREREcmCimURERERkSyoWBYRERERyYKKZRERERGRLKhYFhERERHJgoplEREREZEsqFgWEREREcmCimURERERkSyoWBYRERERyYKKZRERERGRLKhYFhERERHJgoplEREREZEsqFgWEREREcmCimURERERkSyoWBYRERERyYKKZRERERGRLKhYFhERERHJgoplEREREZEsqFgWEREREcmCimURERERkSyoWBYRERERyYKKZRERERGRLKhYFhERERHJgoplEREREZEsqFgWEREREcmCimURERERkSyoWBYRERERyYKKZRERERGRLKhYFhERERHJgoplEREREZEsqFgWEREREcmCimURERERkSyoWBYRERERyYKKZRERERGRLKhYFhERERHJgoplEREREZEsqFgWEREREcmCimURERERkSyoWBYRERERyUKRLpZjYmJ4++23qV+/Pi4uLvj4+DBq1CjOnz+fo/vUqlULk8mU5Z8jR47k01cgIiIiIsWZQ2EnkJXY2Fi6d+9OYGAgVatWZcCAAZw+fZp58+bx559/EhgYSJ06dXJ0z8ceeyzTuJeXV16kLCIiIiIlTJEtlqdOnUpgYCDt2rVj5cqVuLu7AzB9+nRefvllRo0axbp163J0z/nz5+d9oiIiIiJSYhXJaRjx8fHMnDkTgFmzZqUUygDjxo3Dz8+P9evXs2vXrsJKUURERERKgSJZLG/evJmwsDDq1q1LixYtMpwfPHgwAMuWLSvo1ERERESkFCmS0zD27dsHQMuWLTM9nxwPCgrK0X0//PBDTpw4gbOzM40bN2bQoEFUrFgxd8mKiIiISIlVJIvlM2fOAFC9evVMzyfHQ0JCcnTfV1991XD80ksv8dlnnzFq1KjbXhsXF0dcXFzKcXh4OABmsxmz2ZyjPEqr5H5Sf+WM+i3n1Ge2Ub/lnPrMNuq3nFOf2SYv+qtIFsuRkZEAuLm5ZXq+TJkyAERERGTrfvfddx/dunXD39+fihUrcvLkSebOncuMGTMYPXo0FSpUYMCAAbe8x7Rp05g0aVKGeEBAQJZ5SuZWrVpV2CkUS+q3nFOf2Ub9lnPqM9uo33JOfZYz0dHRub5HkSyW89qnn35qOG7cuDEff/wxDRs25KmnnuK11167bbE8fvx4xo0bl3IcHh6Or68v3bp1o0KFCvmSd0ljNptZtWoVvXr1wtHRsbDTKTbUbzmnPrON+i3n1Ge2Ub/lnPrMNteuXcv1PYpksZy8+kVWvw1ERUUB4OHhkavnPPHEE0yYMIHg4GBOnz5NrVq1smzr7OyMs7Nzhrijo6O+aXNIfWYb9VvOqc9so37LOfWZbdRvOac+y5m86CubVsNYsGABsbGxuX54VmrUqAHAuXPnMj2fHK9Zs2aunmNnZ0fdunUBCA0NzdW9RERERKTksalYfvzxx/Hx8eG5555LWbkiLzVr1gyA3bt3Z3o+Oe7n55frZ924cQNInQctIiIiIpLMpmJ59OjRJCQkMGvWLFq2bEnbtm355ptvUqZH5FaHDh3w8vLixIkT7N27N8P5RYsWAdC/f/9cPefgwYMEBwfj5uZGw4YNc3UvERERESl5bCqWZ8+eTWhoKLNnz6Z169Zs376dp556Ch8fH5555hl27tyZq6ScnJwYO3YsAGPGjDEU4dOnTycoKIguXbrg7++fEp85cyYNGzZk/PjxhnstX76ctWvXZnhGUFAQQ4YMwWq1Mnr0aJycnHKVs4iIiIiUPDbv4FemTBlGjx5NYGAgQUFBjBkzBgcHB2bPns1dd91FixYt+PLLL1PWI86pCRMmcNddd7Flyxbq1avHgw8+SNu2bXn55ZepWLEic+fONbS/evUqwcHBGeYeb9++nR49elCrVi0GDBjAsGHDuOuuu/D39+fw4cN07dqV9957z9ZuEBEREZESLE+2u27SpAmffvopFy5c4Pvvv6dz587s27ePMWPG4OPjwxNPPMGuXbtydE8XFxcCAgJ46623cHNzY8mSJYSEhDBy5Eh2795NnTp1snWfu+++m1GjRuHp6cnmzZtZtGgRx48fp2PHjnz99desXr0aV1dXW75sERERESnh8nTpOLPZTERERMpmIVarFbPZzLx585g/fz6DBg1izpw5lC1bNlv3c3V1ZfLkyUyePPm2bSdOnMjEiRMzxNu1a0e7du1y8mWIiIiIiAB5NLIcGBjIE088QdWqVXn22WcJCgri/vvvZ+XKlYSHh/PDDz/QtGlTfv/9d55//vm8eKSIiIiISL6zeWT5xo0bfPfdd3z99dccOnQIq9WKr68vr732GqNHj6ZKlSopbYcNG8aQIUNo0aIFy5cvz5PERURERETym03F8ogRI/jtt9+Ii4vDZDLRt29fnnnmGe655x7s7DIfrHZwcKB169YsWLAgVwmLiIiIiBQUm4rlH3/8kSpVqjBq1CieeuqplB33bmfQoEG53nVPRERERKSg2FQsL1y4kAEDBuDgkLPL+/fvn+uNRERERERECopNL/hFRUWxffv227YLDAzk22+/teURIiIiIiKFzqZieeTIkcyZM+e27b755hsef/xxWx4hIiIiIlLo8mTpuKxYLBZMJlN+PkJEREREJN/ka7F88uRJPD098/MRIiIiIiL5Jttv6KXfRW/v3r1Z7qyXkJBAcHAwGzZsoFevXrnLUERERESkkGS7WJ44cSImkwmr1YrJZGLv3r3s3bv3ltdUqlSJd999N7c5ioiIiIgUimwXy/PmzQPAarUyatQoOnbsyBNPPJFpWycnJ3x8fGjbti3Ozs55k6mIiIiISAHLdrH82GOPpfz3ggUL6Nu3ryEmIiIiIlLS2LQpSUBAQF7nISIiIiJS5OTrahgiIiIiIsVZtkaWu3fvjslkYsGCBVSvXp3u3btn+wEmk4k1a9bYnKCIiIiISGHJVrG8bt06TCYT0dHRKcfZpU1JRERERKS4ylaxfOrUKQCqVatmOBYRERERKcmyVSzXrFnzlsciIlLMmWMgJhJ7S1xhZyIiUqTYtBqGiIgUc6FBcORPCNkCF/dD7E0cgXsB65GXoXITqNkOGvYDn5agKXUiUkrZtBrGpUuX2LBhA5cuXTLET5w4wUMPPUSTJk245557CAwMzJMkRUQkD1itcOQv+KoLfNUJ1r8PpzdC7E1DM1NcOJzZAhs/hq+7w5cd4cDipOtFREoZm4rl9957j27duhEWFpYSCw8Pp2PHjixcuJBDhw7xzz//0KNHD44dO5ZnyYqIiI3O74b5/eDnhyF0b86uvXQAFo2COT3hjAZBRKR0salYXrduHY0aNaJ+/fopsfnz53Pp0iWGDRtGcHAw06dPJyYmho8//jjPkhURkRyyJML6D2FODwjZnLt7nd8Jc++G1ZMgMSFv8hMRKeJsmrN8/vx52rVrZ4j99ddfODg48Mknn+Dt7c2LL77IggULWL9+fZ4kKiIiORR5BX57Ek5mseuqZ3VoPBBqdgDvepjtnFm/ajldm/ricH4bHFwCN0MyXrdpOpzZCoPngqdPfn4FIiKFzqZiOSIiAjc3t5TjxMREtm7dir+/P97e3inxhg0b8ueff+Y+SxERyZkbIfDtALiRyVKfXjWg5/+g8SCws0+Nm81EuVTFWq83NOoHPSYmvQS4+n9w/aTxHme2Jk3LePQP8K6Xr1+KiEhhsmkaho+PD0eOHEk53rRpE5GRkXTt2tXQLiEhAScnp1wlKCIiOXQlGOb2yVgom+yg63gYuwOaDjYWypmxs4NG98Gz26DXZLBzNJ4PP5/0nNCgvM1fRKQIsalYbteuHUFBQXzyySfs37+fCRMmYDKZ6N+/v6Hd4cOHUzYyERGRAnD5CMzrCxEXjHH3KvDYn9D1dXB0ydk9HZygwwswakXSqHRa0Vdh/r1wYW+u0hYRKapsKpbHjx+Ps7MzL7/8Ms2bN2fz5s107dqV9u3bp7Q5ffo0hw4d4q677sqzZEVE5BbCQ+GHwRB9zRiv1Bie3gC1OuTu/tX94en1UM3fGI8Lgx+GwI3Tubu/iEgRZFOx3LhxYzZt2sSIESPo06cPEyZMYMmSJYY2K1asoFmzZgwcODAP0hQRkVuKDU8qlMPOGuPVWsHIP8Gjct48x6180jzlWp2M8ajL8P1giL6eN88RESkibN7Br2XLlixYsCDL808//TRPP/20rbcXEZHssiTCwpFJ6yGnVa0VPLoEnD3y9nnOHjB8YdJo8umNqfFrx5LWcX5sGdg7Zn29iEgxYtPIsoiIFCEbPoQTa4yx8nXg4V/yvlBO5ugKD34PlRoZ42e2wuqJ+fNMEZFCYPPIcrIzZ84QGhpKXFxclm06d+6c28eIiEhmjq+Bde8ZY27eMGIxlPHO/Jq84loWhi+Cb3olrYyRbOtM8L0raSUNEZFizuZiee7cuUyZMoUzZ87ctm1iYqKtjxERkayEnYfFowFrasxkDw9+lzSyXBC8qsFDP8A3vSExPjX+xxio3Bgq1C2YPERE8olNxfK8efMYPXo0AE2aNKF+/fp4eOTTR30iIpKR1QpLx0JMuhfqek6Emu0zvSTf+LSAvu/Dny+lxuLCYcl/4PG/b7+es4hIEWZTsTx9+nQcHBxYtGgR992nj9lERArcrnlwYq0x1vBeaP9c4eTj/zicCYSgX1JjZ7fB1lnQ4fnCyUlEJA/Y9ILfsWPH6Ny5swplEZHCcP0UrJhgjHlUhQEzwWQqnJxMJug3HcrVMsbXTk3aKEVEpJiyqVguX7483t75/OKIiIhkZLXC0ufAHGWM3zcTXMsVTk7JnN1h4BdAmoI9MQ6WPJO0vJ2ISDFkU7E8YMAANm/ejNlszut8RETkVvb9bFzbGKDlY1CvZ+Hkk17N9tBujDF2YQ/snFs4+YiI5JJNxfK7775LmTJlePzxx7lx40Ze5yQiIpmJuQEr002/8KoBd79TOPlkpfsEqFDPGFszBSIuFU4+IiK5YNMLfi+//DKNGjXip59+4q+//sLf35/q1atjZ5ex9jaZTHzzzTe5TlREpNRbMxmirxpj93yYfxuP2MrRFe79P1hwb2osLiyp0H/g68LLS0TEBjYVy/Pnz0/577CwMNauXZtlWxXLIiJ54Pwu2DnPGGt4LzToUzj53E7tTuD3EAT9nBrb/yu0fARqa6MqESk+bCqWAwIC8joPERHJitUK/7yBYfMRRzfoM63QUsqW3lMg+O+kUeVkK96Ap9Zr7WURKTZsKpa7dOmS13mIiEhWDi+Fs4HGWOdXoGyNwsknu9wrQY+3YPl/U2MX9ye9pNhieOHlJSKSAza94CciIgUkIQ5WvW2Mla2ZccWJosr/cajY0BhbMxniozJvLyJSxOSqWL527RozZsxg+PDh3H333XzwwQcp5w4ePMjSpUuJjo7OdZIiIqXW9q/hxmljrOdEcHAujGxyzt4BeqdbrSPyImz5rHDyERHJIZumYQAsXLiQ0aNHExkZidVqxWQyUa1atZTz58+fZ9CgQSxYsIARI0bkSbIiIqVKbBhs+NAYq94GGg8qnHxsVa8n1O1u3J5786fQ6glwr1h4eYmIZINNI8tbt27l4YcfxsHBgY8//pjt27djtVoNbXr06IGXlxe//fZbniQqIlLqBH4BsTeNsbvfLbwtrXOj91QwpfmRY46CTdMLLx8RkWyyaWT53Xffxc7OjlWrVtGyZctM29jb29OyZUsOHDiQqwRFREql6OuwdZYx1vBe8G1dOPnkVuXG0GwY7P0hNbbjG2g3FryqZX2diEghs2lkecuWLbRr1y7LQjlZlSpVCA0NtSkxEZFSbessiAtPEzBBtzcKLZ080eU1sHNMPU6Mgw0fZN1eRKQIsKlYjo6OpmLF288zy+1W2DExMbz99tvUr18fFxcXfHx8GDVqFOfPn8/VfY8dO4arqysmk4mePXvm6l4iInku6hps+9IYazwoaXS2OCtXE/wfM8b2fA/XTxZOPiIi2WBTsVytWjUOHjx4yzZWq5UDBw5Qu3ZtmxKLjY2le/fuTJkyhcjISAYMGICvry/z5s2jRYsWnDxp+z+uTz31FHFxcTZfLyKSrzZ/AvGRqccmO+g6vtDSyVOd/gsOLqnHlgTY+HHh5SMichs2Fct9+vQhODiYn3/+Ocs2c+bM4ezZs/Tr18+mxKZOnUpgYCDt2rXj6NGj/PLLL2zbto2PP/6YK1euMGrUKJvu+80337Bu3TqefPJJm64XEclXkZeTlotLq+lQqFi/cPLJa55VoU26f3/3/Qw3zxZOPiIit2FTsfz666/j5eXFo48+ymuvvUZgYNLOUlFRUezZs4e3336b5557jooVK/LSSy/l+P7x8fHMnDkTgFmzZuHu7p5ybty4cfj5+bF+/Xp27dqVo/teunSJV155hV69ejFs2LAc5yUiku82z4CEmNRjkz10ebXw8skP7Z/POLq85dPCy0dE5BZsKparV6/OX3/9hbe3Nx9++CEdOnTAZDKxaNEiWrVqxdSpUylbtixLly6lUqVKOb7/5s2bCQsLo27durRo0SLD+cGDBwOwbNmyHN33hRdeICYmhs8//zzHOYmI5Lvo67BznjHWfBhUqFs4+eQX90rQMt3c5d3fJo2qi4gUMTbv4NeuXTuCg4OZPn06ffr0oWHDhtSvX5/u3bvz3nvvERwczF133WXTvfft2weQ5WobyfGgoKBs33P58uX88ssvvPHGG9xxxx025SUikq+2f520/nAykz10fiXfH2uxWLkRFc+1yDhiEiA+wZLvz6TD88aVMRJiMy6VJyJSBNi8gx+Ah4cHL774Ii+++GIepZPkzJkzQNIIdmaS4yEhIdm6X1RUFM8++ywNGjTgtddesymnuLg4w0uB4eFJSzqZzWbMZrNN9yxtkvtJ/ZUz6recK5Z9Zo7GYftXpN1uxNJoIInu1SAPv44rEXFsPH6VAxciOBwazqmr0dyIjseSsq+UA6/vWE1Fdyd8y7tRr1IZWtcsR+ta5fAp65pneeBWGfumQ7Hbl7rusnXHHBLuGguuZfPuOfmsWH6vFQHqt5xTn9kmL/orV8VyfomMTHoL3M3NLdPzZcqUASAiIiJb95swYQIhISEEBATg5ORkU07Tpk1j0qRJGeIBAQFZ5imZW7VqVWGnUCyp33KuOPVZ7Sur8Iu+ZoitT2xB+PLlub53hBm2Xzax95odZ6Kyt/vflch4rkTGs/vMTX7ZmbRcZzU3K/7eFlp6WynnnOu0KGNuTg9+xERSpW6Kj+T4j69ytOrA3N+8gBWn77WiRP2Wc+qznImOjs71PWwqlrds2UJAQACHDx/mxo0bmEwmypcvT6NGjejWrZvN0y/yw86dO/n000959NFH6dq1q833GT9+POPGjUs5Dg8Px9fXl27dulGhQoU8yLTkM5vNrFq1il69euHo6Hj7CwRQv9mi2PVZohmHL940hCx1e9LxgWdydduDF8L5etNpVh66hDnRevsLbuN8tInzZ+z58yz0aVyZ0R1r0bSaV67uaTVtxXTwt5TjhmEB3PHIdHByv8VVRUex+14rItRvOac+s821a9du3+g2clQsBwUFMWrUKPbs2QMkraWclsmUNGLRpk0bvvnmGxo1amRTUsmrX2T120BUVNKcPg8Pj1veJyEhgSeffJKyZcvy0Ucf2ZRLMmdnZ5ydMw6lODo66ps2h9RntlG/5Vyx6bPDv0OYcek0u07jsLMx92OXIpi+6ih/H7iYF9llYLHC8gOXWH7gEl3qV+SNe+6kQZVb/3ucpc7/hTTFsinmBo4HfoW7ns6jbAtGsfleK2LUbzmnPsuZvOirbBfLO3bsoHv37kRFRVGmTBn69u1L8+bN8fb2xmq1cvXqVfbs2cOKFSvYtm0b7dq1Y926dZmuZnE7NWrUAODcuXOZnk+O16xZ85b3OXfuHHv37qVKlSoMGTLEcO7mzZsA7Nq1K2XEed26dTnOVUQkV6xW2PSJMVa9DdRsn+NbRcUl8Mnqo8zdfJpES9Yjye7ODrSvW4Gm1bxo5ONJFS8XypdxwmRJ5J9Va2jVrhOhEWZOXolkZ8gNdpy+zs3ozOf9rT96hY3HrvBQmxq8encDyrrlcKpb5cZQvy8c/Ts1Fvg5tB4NdvY5u5eISD7IVrGcmJjI8OHDiYqK4oknnuDjjz/G09Mz07bh4eGMGzeOuXPn8vDDD3Po0KGUEefsatasGQC7d+/O9Hxy3M/PL1v3u3jxIhcvZj7CcvPmTdavX5+j/ERE8szx1XA53Y6oHV+EHP67GRB8mTd+209oWGym550c7LjXryr3t6hOm9rlcXLIuBiS2WymnDM0qOJBE19HoDJPA4kWK9tOXWPp3gv8GRRKZFyC4TqLFX7cdoZVhy4xbVBTejaqnKPcaT/WWCzfOA3Bf8Od9+bsPiIi+SBbS8f98ccfHD9+nAcffJCvv/46y0IZwNPTkzlz5jBkyBCOHj2a47WQATp06ICXlxcnTpxg7969Gc4vWrQIgP79+9/yPrVq1cJqtWb6JyAgAIAePXqkxEREClz65dK8GySNtGZTrDmRiUsP8vi8HZkWyl6ujvy3d322v9GD6UOb07Ged6aF8q3Y25loX9eb9x7wY/Pr3XmtT0MqemSclnYlIo7R3+7k5V/3ER2fkMmdslCzA1RtZoxpGTkRKSKy9S/msmXLsLOz49133832jadNmwbAkiVLcpyUk5MTY8eOBWDMmDEpc5QBpk+fTlBQEF26dMHf3z8lPnPmTBo2bMj48eNz/DwRkUJx+TCcDDDG2o8Fu+wVs2euRTNw1mbmbzmd4ZyTvR3Pdb+Dja91Y2z3ejmfHpEFL1dH/tO1Lutf6coLPerh4pgx18W7zzFw1mZOXInM3k1NJmg31hg7swXO52yXVhGR/JCtf5F37dpFgwYNqF27drZvXKdOHRo2bJjjLamTTZgwgbvuuostW7ZQr149HnzwQdq2bcvLL79MxYoVmTt3rqH91atXCQ4OJjQ01KbniYgUuG1fGo/dKkDTodm6dMuJq9w3axNHLmZcQrPjHd6seKkzL/dugKdL/rwI5ObkwEu96rP25a50a1Axw/mjlyK577NNrDp0KXs3bDQQPHyMsa3abVVECl+2iuXQ0FDq16+f45vXr1+fCxcu5Pg6ABcXFwICAnjrrbdwc3NjyZIlhISEMHLkSHbv3k2dOnVsuq+ISJEQfR32/WKMtRoFji63vfSn7Wd45JvtGV66c3awY8rAJnz3RBtqe5fJy2yz5FPWlbkjW/PhYD/cnY2vwUTFJ/L0dztZkMnIdwYOTnDXU8bYoSUQlvmL3iIiBSVbxXJYWBheXjlfS9PT0zNlpztbuLq6MnnyZI4fP05cXByhoaHMmzcv0539Jk6ciNVqZf78+dm6d9euXbFaraxevdrm/EREbLZrPiTEpB7bOUCrJ255idVq5bM1xxj/2/4Mq13Uq+TOsuc68kjbmjl+qTq3TCYTQ1r5suy5jjRMt4ScxQr/W3qQqX8ewnKLFToA8B8Jjmk2ebIkwPbZeZ+wiEgOZKtYTkhIwC6bc+gMN7ezIyEhBy95iIiUBolm2DHHGGs8CDyrZnmJxWJl0rJDfLzqaIZzPe+szO9jOlC/so1rHeeR2t5l+P3ZDtzfolqGc3M2neKN3zMW+Qau5aD5cGNs13yIj8q0uYhIQch5BSwiIrlzeBmEnzfG7vpPls2tVitv/XEg0xf5nu1al9mP+GeYAlFYXJ3s+XhoM8b1yjh17+cdZ/nvwn0kJFqyvkHb/wBpRsZjw2D/orxPVEQkm7JdLC9YsAB7e/sc/fn222/zM3cRkeIp8AvjcfU2UN0/06ZWq5W3/zjID9vOGOImE0y6rzGv9mmInV3BTru4HZPJxPM96vHxkGbYp8vt9z3n+e/CfVlPyahQF+r1Msa2f520eYuISCHIdrGc1XrFt/sjIiJpXNgD57YbY22fybSp1Wpl6l+H+S4wxBB3sDPxyYPNeax9rXxKMm884F+dL4a3xMne+KNmyd4LvPXHgax/RrR+0nh8aT+c3ZZPWYqI3Fq2imWLxWLzn8TExPz+GkREio+dxmUv8fCBO+/LtOmcjaf4ZtMpQ8zezsRnw1owoHnGecFFUe/GVZj9qD/O6TZC+WHbGd7750jmF93RE8rVMsa2f50/CYqI3IbmLIuIFJTM5t+2ehzsM66F/Mfe87yz/LAhZm9n4tOHWtC3adYvAhZFXRtUYvajrXC0N07J+Gr9Sb7bejrjBXZ2GVcGOfQHRF7OvyRFRLKgYllEpKDs+wXM0anHJnto8UiGZluOX+W/C/dliH842I9+fsWrUE7WpX5FPn2oBemnV/9v6UECgjMpgluMAIc0a05bzLBrQf4mKSKSCRXLIiIFwWqFnd8YYw37ZVguLvhiBE9/twtzonE+7+t9G3J/y4xrzBcnfZtW5f0H/AwxixXG/rCbw6Hp1uR3Kw9NBxtju+ZBopYjFZGCpWJZRKQgnNkKV9LN0W01ynB4MzqeJ7/dSUScsSB8rF1Nnu5cMnYtHdLKl+d71DPEouITeWL+Di6Hxxobp3/RL/w8BC/P5wxFRIxULIuIFIQd6UaVy9eF2l1SDhMtVl74eS9nrkcbmvVpXIW3+zcu8F358tNLPesxoLmPIXYhLJYnv9tFXEKal8J9mkP11saL02/mIiKSz1Qsi4jkt8grSS+opdVqVNKLbP+aviqY9UevGJo08y3LJw81z7BWcXFnMpl4/wE/WtUsZ4jvO3uTd/4yvtSYYXT51Hq4fjKfMxQRSaViWUQkv+39PukFtWT2ztD84ZTDfw6EMivghOESb3dnvhrhj4ujfUFlWaBcHO2Z/WgralZwM8S/3RrCH3vT7G7YaAC4lDVevFsbXolIwVGxLCKSnywW2DnPGGtyf9ILbMCJK5G8/Ktx5QsHOxOfD29JFS8XSrLyZZz4coQ/Lo7GH0WvL97P0UsRSQeOLoZfLADY8wMkmhERKQg2FcuTJk3i3LlzeZ2LiEjJc2It3DTuwJf8Yl9cQiLP/biHqHjj5k1v3duINrXLF1SGherOqp5MHdjUEIsxJ/LM97uITH7RseVjxouiLkPw3wWUoYiUdjYXy7Vr16Z///4sXboUi8WS13mJiJQM6ZeLq9w05aW19/8O5lC6JdMeaFmdR9vVLKjsioTB/tUZ1sbXEDt5JYoJv+9POqjUEHzbGi/aNb9gkhORUs+mYnnq1KnUqFGDv/76i0GDBuHr68tbb73F6dOn8zg9EZFiLOw8HP3HGGv1OJhMBBy5zNzNxq2s61VyZ+rAJiVq5Yvs+l//xjSp5mmILdl7IXX+sv9I4wUn1sKNdCP2IiL5wKZi+Y033uDEiROsXLmSIUOGcO3aNd555x3uuOMO+vTpw+LFi0lI0MLxIlLK7f0RrGk+eXNyB7+hXA6PzbBDn5ODHZ893AJXp5L5Qt/tuDja88VwfzxcHAzxCUsOcP5mDDQeCC5eac5YYc93BZqjiJROuXrBr2fPnvz888+cP3+ejz76iAYNGrBy5UqGDh1K9erVef311zl27Fhe5SoiUnxYLBmLuSYPYHF05+WF+7gWFW849Va/O2lYxTiyWtr4lndj6sAmhlhEbALjftlLor0L+D1ovGD3d9rRT0TyXZ6shlGhQgXGjRvHwYMH2bRpE8OGDePy5ct8+OGHNGzYkB49evD777/nxaNERIqH0xsyvtjX8jF+2H6GjceuGsK9GlVmRNvSNU85KwOaV2Ngug1Ltp26ztcbT2acihF5EY6tKLjkRKRUytOl406cOMGyZctYs2ZNSqx69eoEBAQwePBg2rRpw9mzZ/PykSIiRdPudKPKlRpx1rUh05YbN92o4unCBw/4lcp5ylmZNKAJ1cq6GmIfrwzmYGL1jDv66UU/EclnuS6WzWYzP//8Mz169KB+/fq8//77JCQkMG7cOI4cOUJISAibN2+mb9++7Ny5k7Fjx+ZF3iIiRVf0dTi8zBCytHiEVxYHEZ1umbgPh/hRroxTQWZX5Hm5OjJ9aDPS/v5gTrTy6qIgElo8amx8fDXc1CCMiOQfm4vlw4cPM27cOHx8fBg+fDgBAQG0a9eOb7/9lnPnzvHRRx9Rr149ANq1a8eff/5JmzZtWL9+fZ4lLyJSJO1fCIlxqcf2Tvwa157Ak9cNzR6+qwad6lUs4OSKh7vqVOCZLnUNsYMXwvnmRgtw8kgNWjOZGy4ikodsKpY7duxIkyZN+OSTTzCbzfznP/8hKCiITZs2MWLECJydnTO9rnHjxkREROQqYRGRIs1qzbAdc1Ttu5m05qIhVq2sK2/cc2dBZlbsvNizHvUquRtiHwecI6zeIGPDvT8mvVApIpIPbCqWt2zZQosWLZg9ezYXLlxg5syZNGnS5LbXjR49mrlz59rySBGR4uHCHrh0wBD65Ho7YszG6RcfDPbD3dm4TJoYOTvY88FgP+zSTMeIT7Qw5UIrY8Ows3BKn1qKSP6wqVjesWMHO3fuZPTo0bi5uWX7unbt2vHYY4/dvqGISHGVbkpAlKsPcy7UMMRGtK1Bhzu8CzKrYqtFjXI80bG2IbYotALX3OsbG+79oQCzEpHSxKZi+a+//mLp0qW3bbds2TImT55syyNERIqf+GjYv8gQWhDbEWuaf2qrlXVlfF9Nv8iJcb0aUKtC2oEZE1+FtzM2OrwMYm4WZFoiUkrYVCxPnDiRJUuW3Lbd0qVLmTRpki2PEBEpfg79AXHhKYcWTHwf09HQZOqgJpTR9IsccXWy5/0H/AyxhfHtSCBNPybEwoHFBZyZiJQGebrOcnqJiYnY2eXrI0REio50L/ZtSPTjAqnTLfr5VaVbg0oFnVWJcFedCoxomzqd5QaerExsaWy05/sCzkpESoN8rWQPHjxIuXLl8vMRIiJFw7UTcGaLIfRLYteU//ZwceB/9zYq4KRKllfuboi3e+pqSwsTuxgbXNgNlw8jIpKXsv1Z4KhRowzHmzZtyhBLlpCQQHBwMDt37mTgwIG5SlBEpFjY+6Ph8JrVg9UW/5Tj1/o0pJKnS0FnVaJ4uToyod+dvPjLXgA2WPy4ZC1LZdPN1EZ7voe73ymU/ESkZMp2sTx//vyU/zaZTBw/fpzjx4/f8ho/Pz8+/PBDm5MTESkWLBbY97Mh9EdiB8z//hPbskZZHm5TI7MrJYcGNPfh151n2XLiGonY81tiJ/7jkGa3xKBfoOdEsHcstBxFpGTJdrEcEBAAgNVqpXv37vTp04fXXnst07ZOTk74+PhQs2bNvMlSRKQoO70Bws8ZQosSOwNgb2fi3fubYpd2sWCxmclkYsrAJvT9ZCPxiRYWJnYxFstRV+DYSmjYr/CSFJESJdvFcpcuqXPDHnvsMTp16mSIiYiUWummYBy21OCQtRYAj7StScMqnoWQVMlVt6I7T3epw2drj3PS6sNOS31a2R1NbbDnexXLIpJnbFq/aN68eXmdh4hI8RQbDoeM684njyqXL+PESz3rZ3aV5NKYbnewZO95zl6PYWFiF2OxfHQFRF4Gd608IiK5p3XdRERy49AfkBCTcmi22rMksQMAr9zdAC83zZ3NDy6O9rx5T9LmLn8l3kWM1Sn1pDUxae6yiEgeyNbIcp06dTCZTKxevZratWtTp06dbD/AZDJx4sQJmxMUESnS0k3BWGdpxjW8aFLNk6GtfAspqdLh7sZVaFenAltPwnLLXTxgvzH15J7vod1YMGmuuIjkTraK5dOnTwNgNpsNxyIipdr1kxnWVk6egjGxf2Ps9VJfvjKZTLzdvxH9Pt3IwsQuxmL5yhE4vxuq+2d9AxGRbMhWsWyxWG55LCJSKqVbLu661Z21lpYMalGNVrXKF1JSpcudVT0Z1qYGP26zEGKpRE27y6kn93ynYllEck1zlkVEbGGxYE03BeOPxA44Ojnzet+GhZRU6TSuV308XJxSRvWTWQ8sBnNMFleJiGSPimUREVuEbMIUdtYQWpzYibHd76CyduorUBXcnXmhZ30WJ3bGYk2d+mKKC4cjfxViZiJSEqhYFhGxgWWPcVT5iMWXm16NGNWhdiFlVLo92q4mrhVrstnS2BBP3P19IWUkIiVFtople3t7m/84ONi0lLOISNEVF0niwSWG0KLEzrzSpyEujvaFk1Mp52hvx1v3NsowFcPu1DoIO184SYlIiZCtStbX1xeTlt8REQEgZt9vuCamzoVNsNpxtHIf3vDzKcSspGuDSnxfpy8RZ+fhYUr6/2PCStTOHyjT49VCzk5EiqscLR0nIiJwZdM8aqQ5Xm9pxrP3dsBOS8UVunH3NOevL9rykH1ASixm+3eU6f6K1lwWEZtozrKISA5cCjlCjfDdhtjRqv1pW6dCIWUkaTXy8eRy3QcMMe+4M4Qe2FBIGYlIcadiWUQkB/b++aXh+Ka1DL0GjSycZCRTDwx4gFPWKobYidWzCykbESnusjUN48yZMwBUq1YNe3v7lOPsqlGjxu0biYgUcQfP3+DOy39Cmk/zj1TsTduqGlUuSqqVc2Od70Bqn0v9xcbv5hr2ngyleZ2qhZiZiBRH2SqWa9WqhZ2dHYcOHaJ+/frUqlUr2y/8mUwmEhIScpWkiEhR8Mcfi3nDdMUQa3j3M4WUjdxKy3v/g+XLr7DDCoCnKYb1S+fR7IXxemFdRHIkW8Vy586dMZlMuLm5GY5FREqLbSevcceFpYZ/Na+51aHCHXcVXlKSJc8qtThX7i6q3whMibW4tpy1R56gx52VCzEzESluslUsr1u37pbH+SUmJoZp06bx888/c+bMGcqXL0+fPn2YMmUK1apVy9Y9EhISmDp1Kjt27ODw4cNcuXIFs9mMr68vvXr14rXXXqNmzZr5/JWISHFmtVr57O+9fGUfaIh7tH1UKywUYZU6j4I/Uv+fdbQ7wCPLN9K1wQPYa+USEcmmIvuCX2xsLN27d2fKlClERkYyYMAAfH19mTdvHi1atODkyZPZvs+kSZPYsGEDVatWpU+fPtx9993Ex8fzxRdf4Ofnx86dO/P5qxGR4iwg+DKVzq+kjCkuJWbBDqcWwwoxK7kdp8b9MTu4pxzbmaw0u76CZfsuFGJWIlLc5FmxfOPGDW7cuIHVas2T+02dOpXAwEDatWvH0aNH+eWXX9i2bRsff/wxV65cYdSoUdm6j4uLC5s2beLGjRts3ryZhQsX8scff3Dy5Elef/11wsPDeeYZzTkUkcxZLFY+XHGUB+w3Gk/U7Q4eVTK/SIoGJzcc/IzLyA2238Anq4IxJ1oKKSkRKW5yVSwvXbqU3r174+7ujre3N97e3nh4eNC7d2/++OMPm+8bHx/PzJkzAZg1axbu7qkjA+PGjcPPz4/169eza9eu297LwcGBDh06ZNh2297enilTpuDi4sKuXbsICwuzOV8RKbmWBV0gPPQEHewPGuJ2LYYXUkaSE6bmxv9PdewuUv7GPhbvOldIGYlIcWNTsWy1Whk1ahSDBg1i9erVREdH4+XlhZeXF9HR0axevZr777+fkSNH2jTSvHnzZsLCwqhbty4tWrTIcH7w4MEALFu2zJb0U5hMJuzt7TGZTDg5OeXqXiJS8pgTLUxfdZT7040qW128oME9hZSV5IhvG6wV7jCEBtuv59M1x4hLSCykpESkOLGpWJ4xYwbz58+natWqfPHFF9y8eZPr169z/fp1wsLC+PLLL6latSrfffcdM2bMyPH99+3bB0DLli0zPZ8cDwoKsiV9IKngf//994mKiqJbt264urrafC8RKZkW7jxHyLWoDFMwTE0eAEeXQspKcsRkwtT8YUPoXvtAroeF8eO2nO0ZICKlU7ZWw0hv9uzZuLm5sXHjRmrXrm045+HhwVNPPUWvXr1o2rQps2fP5sUXX8zR/ZM3PalevXqm55PjISEhObrva6+9xqVLlwgPDycoKIgTJ05w5513MmfOnNteGxcXR1xc6ss94eHhAJjNZsxmc47yKK2S+0n9lTPqt5zLiz6LNScyY/VRWpmCqWV3yXAuocmDWEvg/48S+73WaDAOa6ZgSrPm8t12O5i51oP7m1fBzcmmH4VACe6zfKZ+yzn1mW3yor9s+hfi1KlT9O7dO0OhnFbt2rXp0aMHK1euzPH9IyMjAVLWdU6vTJkyAEREROTovosXL+bEiRMpx35+fnz//fe3/DqSTZs2jUmTJmWIBwQEZJmnZG7VqlWFnUKxpH7Ludz0WcAFE5ci7HnJYYMhHuFclbV7L8K+5blNr8gqid9r7TwaUyniQMrxYPsN/BHVkTcXrKJXtdy/mF4S+6wgqN9yTn2WM9HR0bm+h03FcsWKFbM1x9fR0RFvb29bHpEvjh8/DsDVq1fZtWsXb775Jv7+/nz99dc89thjt7x2/PjxjBs3LuU4PDwcX19funXrRoUK2uo2O8xmM6tWraJXr144OjoWdjrFhvot53LbZ9HxCUyavhEXIulnv81wzq39aO5p3y+vUi1SSvL3mulgDCx5OuW4g91BfLjKxitVmDSiE56utn29JbnP8pP6LefUZ7a5du1aru9hU7E8aNAgvv/+e27cuEG5cuUybXP9+nXWrl3L8OE5f2M8efWLrH4biIqKApKmfNjC29ubu+++m7Zt29K0aVP+85//0L17d3x9fbO8xtnZGWdn5wxxR0dHfdPmkPrMNuq3nLO1z37acobrUWYG2O3AwxSTesJkh32Lh7Ev4f8fSuT3WuMB8PcrEJc0hc7OZGWQ/SZmxQxkQeBZxvVukKvbl8g+KwDqt5xTn+VMXvSVTS/4TZ06lTp16tC9e3fWrl2b4XxAQAC9evWibt26vPvuuzm+f40aNQA4dy7zpX2S47ndec/Ly4v+/fsTExOjjzVEBIDIuAS+Wp80XWuwvXEKBnW6gqdPwScluefoCk3uN4QG268HrHyz6RTXIuMyv05ESr1sjSx37949Q8zJyYldu3bRq1cvypcvn1K4njlzJmXIu23btgwcOJA1a9bkKKlmzZoBsHv37kzPJ8f9/PxydN/MJE8TuXLlSq7vJSLF34Itp7kRbcaHq3SwM66tTHOtrVysNR8Bu+anHNa2u4S/6Si74hvw5foTvNmvUeHlJiJFVraK5XXr1mV5zmq1cu3atUznhGzduhWTyZTjpDp06ICXlxcnTpxg7969NG/e3HB+0aJFAPTv3z/H905v/fr1ANStWzfX9xKR4i0i1szsDScBGGS/CTtTmhe/nL2gYcmcq1xqVG8FFerBtWMpocH2G9iV0IBvt4bwZKc6VPLUkoAiYpStaRinTp2y+c/JkydznJSTkxNjx44FYMyYMSlzlAGmT59OUFAQXbp0wd/fPyU+c+ZMGjZsyPjx4w33+uuvv9iyZUuGZ0RHR/Pmm2+yfv16qlSpQp8+fXKcp4iULPM2nyYsxgxY//2IPo0m9yd9lC/Fl8kEmay57EIccQkWvtqQ859XIlLyZWtkObdzg20xYcIEVq9ezZYtW6hXrx6dOnUiJCSEbdu2UbFiRebOnWtof/XqVYKDgwkNDTXEd+zYwaRJk6hWrRrNmzfHy8uLixcvsnfvXq5fv46Xlxe//vqrYUttESl9wmLMzNmYVCz5m45SO93aypqCUUI0ewjWTgGrBQAPUwx97HawxNKR7wNDeLpLHSp5aHRZRFLZ9IJfQXBxcSEgIIC33noLNzc3lixZQkhICCNHjmT37t3UqVMnW/e5//77GTduHD4+PuzYsYNff/2VHTt2ULNmTcaPH8/hw4fp1KlTPn81IlLUzd10ivDYBCCTF/sq1Ev6CF+KP08fqNPNEEr+FCEuwcJX6zW6LCJGtm9blMbNmzeJiIjAas18Yffk1S1yytXVlcmTJzN58uTbtp04cSITJ07MEPfz8+Pjjz+26fkiUjqERZuZu+kUAC7Eca99oLFB84eTPsKXkqH5w3Ai9cXz9naHqMYVzlORH7ZpdFlEjGweWb548SKjR4+mUqVKVKhQgVq1alG7du0Mf7I7AiwiUljmbDpJRFzSqPLdmaytTLOHCikzyRcN+yW9sPmv5DWXAWLNFmZrdFlE0rCpWA4NDaVVq1bMnTsXZ2dnKlasiNVqpW3btlSqVCllhLldu3aa4iAiRdqNqPiUUWXIbG3lblpbuaTJdM3lDUDSz67vt4VwJULrLotIEps3Jblw4QKTJ0/m7Nmz9O3bF5PJxObNmwkNDWXdunU0bNgQk8nE33//ndc5i4jkma83niQqPhEgi7WVH87kKin2WowwHNayu0QrUzDw7+jyhhOFkZWIFEE2Fcv//PMPtWvXZsKECZme79y5MytXrmTPnj1MmTIlVwmKiOSXm9HxLNhyOuVYayuXItX8wbu+IZT2U4XvAjW6LCJJbCqWz58/b9goxN7eHoC4uNR/WKpVq0a3bt349ddfc5ehiEg+mbv5dMqoctLayummYGht5ZIrizWXXYkFNLosIqlsKpY9PT0Nx2XLlgWSiui0XFxcMsRERIqC8Fgz8zanzlVuaTpGbbuLxkZaW7lk83sw6QXOf7mbYuljtyPl+LvAEK5GanRZpLSzqViuUaMGZ86cSTlu0qQJAMuXL0+JRUdHs3nzZqpWrZrLFEVE8t6CzaeJ+HddZYAhDul27NPayiWfpw/U7W4IDXFI/XQhaXRZK2OIlHY2Fcvdu3cnKCiIK1euAHDfffdRpkwZXnnlFV5//XU+++wzunXrxqVLl+jbt2+eJiwikluRcQl8k2ZU2YU4BjpuNzbS2sqlQ7qpGO3tDlKNKynH3249rdFlkVLOpmJ5+PDh3H///Rw6dAiA8uXL89VXX2G1Wvnggw948cUX2bFjB40aNeKdd97J04RFRHLr+8AQbkabU4572+3E1RKV2kBrK5ceDfqBi5chNNhhU8p/x5otfK3RZZFSzaYd/Jo1a8ZPP/1kiA0bNowOHTqwfPlybty4Qf369bnvvvtwdHTMk0RFRPJCTHwiczYai5+nPQP5972uJFpbufRwdIEmg2HnNymhR103MyNiIJD0ycK3W0N4snMdvN2dCydHESlUebLddbIaNWrwzDPP5OUtRUTy1I/bz3A1Mj7luCrXaBS729hIayuXLs2HG4rlCuYL3GV/lG2JDQCIMSfy9YaTjL/nzsLKUEQKkc3bXad348YNbty4kbJ7n4hIURNrTuSr9cblwMZV3o0Jra1cqlVrCd4NDKGXK+4wHH8XGML1qHhEpPTJVbG8dOlSevfujbu7O97e3nh7e+Ph4UHv3r35448/8ipHEZE88evOs1w2bDRhpb91nbFRk0FaW7m0yWTN5VZR63A3pc7NiY5P5JtNmrssUhrZVCxbrVZGjRrFoEGDWL16NdHR0Xh5eeHl5UV0dDSrV6/m/vvvZ+TIkRppFpEiIT7BwpfrjKPKI30v4RJ+ythQayuXTunWXLYzR/NGrWOGJgu2hHAzWqPLIqWNTcXyjBkzmD9/PlWrVuWLL77g5s2bXL9+nevXrxMWFsaXX35J1apV+e6775gxY0Ze5ywikmOLd5/jQlisIfafsumWi6twB1RvXYBZSZHhWRXq9jCEBtmtN6weGBmXwNzNpws2LxEpdDYVy7Nnz8bNzY2NGzfy9NNPG3b08/Dw4KmnnmLjxo24uroye/bsPEtWRMQW5kQLn687boh1rOlG5TPLjQ21tnLplm4qhuv5LTx2p/H7Yd7mU4TFmBGR0sOmYvnUqVP06NGD2rVrZ9mmdu3a9OjRg1OnTmXZRkSkIPyx9wJnr8cYYm/VOQZx4WkiJvDT2sqlWoN7Mqy5PLbCTsPvTxGxCSzYcrpg8xKRQmVTsVyxYkWcnJxu287R0RFvb29bHiEikicSLVZmBRhHlZv7lqX+hd+NDe/oAV7VCjAzKXIcXaDpEEPI+9gi+jWubIh9s+kUEbEaXRYpLWwqlgcNGsTatWu5ceNGlm2uX7/O2rVrGThwoK25iYjk2p9BFzh1NcoQe621PaaQLcaGLR4pwKykyEq/xvbNEP5753VDKCzGzLdbQwowKREpTDYVy1OnTqVOnTp0796dtWvXZjgfEBBAr169qFu3Lu+++26ukxQRsYXFYmXmWuOocpNqnrQN+9vY0K1C0kfwIj4toWJDQ6jW2SXcnW50ec7Gk0TFJRRkZiJSSLK1g1/37t0zxJycnNi1axe9evWifPny1KxZE4AzZ85w7do1ANq2bcvAgQNZs2ZNHqYsIpI9Kw5d4tjlSEPs+a61Ma34ydiw2TBwuP3UMikFktdcXvV2auzgEl4YPoEVBy+lhG5Em/k+MIRR7WsUQpIiUpCyVSyvW7cuy3NWq5Vr166lFMhpbd26FZPeLBeRQmC1whfrjS8YN6ziQU+HfRB5ydhYUzAkLb8HYfVEsFqSjs1RNLq5jp531mP14cspzWZvOMmwVprnLlLSZatY1ooWIlLcHLhh4sjFCENsbPc7sNv7srFh9dZQyfixu5RyHlXgjp5wbGVqbO+PPNfje0OxfC0qnp93nqNyJrcQkZIjW8Vy8hQLEZHiwGq1suKc8ZWMuhXL0LemCX5fYWysUWXJTPOHjcXy6Y00c79J1wYVWRd8JSX89cZTvNqoEPITkQJj0wt+IiJF2YZjVzkbZZwCNrb7HdgH/QTWxNSgYxlocn8BZyfFQoN7wKWsMbb3J57rXs8QuhIZT+BlTTcUKclyVSxfunSJadOmcc8999CsWTOaNWvGPffcw3vvvcelS5dufwMRkTxmtVqZte6kIVazghv9m1aFPd8bGzcZBM4eBZidFBsOzhnWXGbvj/j7etKpnnH/gNXn7YgzJyIiJZPNxfLixYupX78+EyZM4J9//mH//v3s37+ff/75hzfffJMGDRqwePHivMxVROS2tpy4xp6zYYbYmG534HAuEK6fMDZu8WgBZibFTvo1l8POwIkAnu9hHF0OM5tYtOdCASYmIgXJpmJ5586dDBs2jKioKAYNGsTvv//Onj172Lt3L0uWLOH+++8nMjKShx9+mJ07d+Z1ziIiWfp0zTHDcfVyrgxqUQ32fGds6F0ffNsUYGZS7Pi0gCp+xtiuebSuVZ52dSoYwl9tOEV8gqUAkxORgmJTsTxt2jQSExNZuHAhixYtYsCAATRr1gw/Pz/uu+8+Fi5cyMKFCzGbzbz33nt5nbOISKa2nbzGtlPG3db+07UujuYIOLjE2LjFI0lr6opkxWQC/5HGWPDfEHExw+hyaFgsi3efK7jcRKTA2FQsb9q0ifbt2zNo0KAs2wwaNIgOHTqwceNGm5MTEcmJz9Lt1lfF05nB/tVh/yJIiEk9YeeQtBGJyO00HQKObqnH1kTY8z1t65SnTa3yhqazAo5jTtToskhJY1OxHBYWRo0at9+1qEaNGoSFhd22nYhIbu0+c4NNx68aYk92qo2zg33GKRj1+4B7xQLMTootF8+MK6bs/haT1cpzPe4whM/diOH3PecLMDkRKQg2FctVqlRhz549t223d+9eqlSpYssjRERy5LN0c5U9HK0M9a8GF/fDhXT/XrV8rAAzk2LP/3Hj8c0QOLWOjnd409zXy3BqVsBxEjS6LFKi2FQs33333QQHB/PGG2+QmJhxuRyr1cqECRM4cuQIffr0yXWSIiK3sv9cGAFpNooA6OFjwcXRHnbOMzb28IE7ehRgdlLsVfOHyk2MsV3zMZlMjO1axxAOuRbN0n1aGUOkJMnWDn7pvfXWW/z222+8//77/PTTTwwdOpRatWoBEBISwsKFCzl9+jQVKlRgwoQJeZmviEgGn601jiqXc3OkfeUEiI+EoF+NjVs+Anb2BZidFHvJL/ot/29q7MhfEHmZzvW88S1jNWyCM3PtcQY0r4a9nV4gFSkJbCqWq1evztq1axk+fDgHDhzgww8/xPTvW+VWqxWApk2b8sMPP1C9evW8y1ZEJJ3DoeGsPGTcBOmJDrVwjjyM6eBvEB+ResJkBy21trLYoOkQWPlW6ouilgTY+wOmu8bSp7qFr4NTfwE7eTWKP4MuMKB5tUJKVkTykk3FMiQVw0FBQaxbt46NGzdy4ULSx04+Pj506tSJrl275lWOIiJZmpluBQwvV0eG3+XLhjWHsdu9wNi4Xm/w0i/wYgPXstB4EOz7MTW2+1to8yyNy1m5s4oHhy+m/mL22drj9PfzwU6jyyLFnk3F8v3330/VqlWZNWsWXbt2VWEsIoXi2KUIlh8INcSe6Fgbd2cHykafxO7iPuMFrUYVYHZS4viPNBbL109iCtmEyQRju9VhzE+p32/HL0fy94GL9POrWvB5ikiesukFv+XLl3Pt2rW8zkVEJEdmBhzn35lfAHi4OPBY+1oA1LwaYGzs5Qt39Cy45KTk8W0DFe80hOz2fAtAz4aVaFjFw3Dus7XHsFisiEjxZlOxXLt2baKiovI6FxGRbDt5JZJl6VYdeLx9LbxcHSE2nOo3Ao0XtHxML/ZJ7mSyo5/pyF84mcOxszPxXHfjrn5HLkZkmE8vIsWPTcXysGHDWL9+PRcvXszrfEREsmVWwAnSDtqVcbJnVMfaANgdWISDJS71pMkeWowo4AylRPIbCg4uKYcmixnf65sA6NukCvUquRuaf7rmWMqL7yJSPNlULI8fP55OnTrRpUsXfv/9d8xmc17nJSKSpTPXolmy17hT2qPta1HWzQmsVuz2pHuxr0Ff8NTcUckDbuWh0QBDqNa1dUnfd3YmxnY37up3KDScNYcvF2CCIpLXbHrBr0GDBlgsFs6ePcvgwYMxmUxUqlQJFxeXDG1NJhMnTpzIdaIiIsk+X3ecxDTDyq6O9oz+d1SZczsxXT5ovEAv9kle8h8JQb+kHLrHXSTh9Aao35N7/XyYsfoYJ6+mTlX8dO0xetxZKWWJVREpXmwqlk+fPm04tlqtmpIhIgXi3I1oFu06Z4iNaFuDCu7OSQc75xovKFcL6nQrmOSkdKjRDio2hCtHUkJ2O7+B+j2x/3d0edyvqStjBJ0LY/3RK3RtUKkwshWRXLJpGobFYsnRHxGRvPLFuhMkpBlVdnaw48nO/245HHMDDv5mvKDlY2Bn0z91IpkzmaD1aGPo2D8QlvRL3H3NfKhZwc1wfobmLosUW/oJIiLFRmhYDAt3GkeVh7WpQSWPf6eA7fkBEmJTzlntHPVin+QPvwfBKfVlPpPVAjvnAeBgb8eYbsa5y3vO3GTzcS25KlIc5ahYXr58OU899RR9+/Zl4MCBvP3225w6dSq/chMRMfhq/UniE1M/rXKyt+OZLnWTDiwW2PG1ob21YT9w10ffkg9cPKHZQ8bY7gWQkLQKy6AW1aheztVwesaaoxpdFimGsl0sDx8+nP79+/PNN9+wYsUKli5dyjvvvEPjxo1ZunRpfuYoIsLl8Fh+2n7GEHuwtS9VvP4dVT6+Gm6cNpy3tHqygLKTUindVAyirsDhZQA4ZjK6vOP0DQJPXi+o7EQkj2SrWP7mm2/46aefsLe3Z+TIkXz66ae88847tG3bltjYWB599FHCwsLyO1cRKcVmbzhJXELqqLKjvYlnutZNbbB9tqH9TdcaWKu3Kaj0pDSqdCeWGu2Nse2pn2480LI6Pl7GVaI+XXOsIDITkTyUrWJ5wYIF2NnZ8ffff/PNN98wduxYxo8fz+bNm3nssceIiIjgt99+u/2NRERscDUyjh+2GUeVB/tXp1rZfz/mvnYCjq8ynD9VsVfSi1gi+cjS6glj4GwgXNwPgJODHf9JN7q89eQ1tp/S6LJIcZKtYnn//v20bduWHj16ZDj3xhtvYLVa2b9/f54nFxMTw9tvv039+vVxcXHBx8eHUaNGcf78+dtf/K+bN2/y448/MmzYMGrXro2TkxMeHh7cddddzJgxQxuqiBQDczaeIsacmHJsb2fiP13SFCE7vjG0t7qU5Xy5tgWVnpRi1vr3EONYzhjcMSflP4e2qk4VT+Po8mdrNbosUpxkq1gODw+nbt26mZ5LjoeHh+ddVkBsbCzdu3dnypQpREZGMmDAAHx9fZk3bx4tWrTg5MmT2brPRx99xPDhw/nll18oV64c999/P23atGHfvn28+OKLdO/enejo6DzNXUTyzo2oeL7betoQG9SiGjWSl+aKj4I93xvOW5oPJ9HOuYAylFLN3pGQCl2NsaBfIeYmAM4O9jzTpY7h9MZjV9kVcqNg8hORXMtWsWy1WrG3t8/8Bv+uX5rX6ylPnTqVwMBA2rVrx9GjR/nll1/Ytm0bH3/8MVeuXGHUqOztyFWmTBleffVVTp8+ze7du/n5559Zs2YN+/fvp0aNGmzatImpU6fmae4iknfmbj5FVHzqqLKdCeOLU0G/QlzadyZMWFo+XnAJSqkX4t0Vq12aPb7M0bDvp5TDh9rUoKKH8Zc3jS6LFB9Fcp3l+Ph4Zs6cCcCsWbNwd09dy3LcuHH4+fmxfv16du3addt7jR8/nvfff58aNWoY4vXq1eO9994D4KeffsrsUhEpZGExZuZvPm2I3dfMh9reZZIOrFbDC1UA1L87adc+kQIS61gOa4N+xuD2r5OWMwRcHO15urNxdHld8BX2nb1ZQBmKSG5ku1hesGAB9vb2mf4xmUxZnndwyPmO2ps3byYsLIy6devSokWLDOcHDx4MwLJly3J877SaNWsGwIULF3J1HxHJH/M2nyIiLiHl2GSCsd3TjCqHbIHLB40XtdFycVLwLP7pPu28fiJpOcN/Db+rJt7uToYmGl0WKR6yXSxbrVab/tgyPWPfvn0AtGzZMtPzyfGgoKAc3zut5HnPVapUydV9RCTvhUWb+WaTcdOje5pW5Y5KHqmBdMvFUb4u1OleANmJGFlrtIdKjY3BwM9T/tPVyZ4nOxlHl1cfvsyB81p2VaSoy9awb17PR76dM2eSloiqXr16pueT4yEhIbl6zowZMwAYMGDAbdvGxcURFxeXcpz8QqPZbNaKGtmU3E/qr5wprf02e8NxImKNo8rPdq6V2g/hF3A4vIy0i8Ml+o/CkphYavsst9RvOZfSZwkJmFo/hcNfL6SePBmA+XwQVLoTgAf9ffhy/QluRKf274zVR/n84eYFmXKRoO+1nFOf2SYv+ivncyQKQGRkJABubm6Zni9TJmm+YkREhM3P+PLLL1m9ejVly5bl9ddfv237adOmMWnSpAzxgICALPOUzK1ater2jSSD0tRvUWaYs8ce0pTCzctbOL5rI8f/PW50/hfqWVNf/Euwc2LFxfIkLF+eEitNfZaX1G85t2rVKuwsZejt4IFzQurPpvO/TWBfjdS1mDt4m/jzTOoL86sOX+brhcupVqZA0y0y9L2Wc+qznMmLFc+KZLGc3zZu3MgLL7yAyWRi7ty5+Pj43Paa8ePHM27cuJTj8PBwfH196datGxUqVMjPdEsMs9nMqlWr6NWrF46OjoWdTrFRGvtt+qpjxCWmTsEwmeCdhztSr9K/L/vGR+Lw6VjDNaYWj9C7zxCgdPZZXlC/5Vz6PrPzOAKbPko5X/NmINUe+QrKeAPQKTaBjdM3EBaT+qnJ/sRqPHlPswLPvTDpey3n1Ge2uXbtWq7vUSSL5eTVL7L6bSAqKgoADw+PTM/fyoEDBxgwYADx8fF8+umnDBo0KFvXOTs74+yccd1WR0dHfdPmkPrMNqWl365HxfNtoHG3vvua+dCoWpqNH3b/CnFp13Y3Yd9+DPbp+qe09FleU7/lXEqf3fUUbJkBlqSPfk2JcTju+x66vAJAeUdHnuhYh+mrjqZc+8+hS5y6Hkv9yjn/mVbc6Xst59RnOZMXfVUkl45LXubt3LlzmZ5PjtesWTNH9z116hS9e/fmxo0bTJw4keeeey53iYpInpu94WSGdZWf71EvtYEl0fDiFAAN+kKFzDdOEilQHpWh6WBjbMfXkBCfcvhY+1p4uKSOVVmtMHPtcUSkaCqSxXLykm67d+/O9Hxy3M/PL9v3DA0NpVevXoSGhvLCCy/wv//9L/eJikieuhoZx4Itpw2xgc2rUbdi6lrrBC+HG8Y2tBuT77mJZFvb/xiPIy/Bwd9SDr1cHXm8Q21Dk2VBFzh+ObIgshORHCqSxXKHDh3w8vLixIkT7N27N8P5RYsWAdC/f/9s3e/GjRvcfffdnDhxgscff5z/+7//y8t0RSSPzN5wkhhz6qiyvZ2J59KOKgNsTTeqXLUZ1OxQANmJZFPVZlCzozG2dVbSEPK/RnWohbuzcXT58wCNLosURUWyWHZycmLs2KSXd8aMGZMyRxlg+vTpBAUF0aVLF/z9/VPiM2fOpGHDhowfP95wr+joaPr168f+/fsZOnQoX3/9NSaTCREpWi5HxPLt1tOG2KAW1VJ36wM4vwvObDFe2G5s0huAIkVJ+tHli0FwZmvKYVk3Jx5rb5xKuGTveU5fjUJEipYi+YIfwIQJE1i9ejVbtmyhXr16dOrUiZCQELZt20bFihWZO3euof3Vq1cJDg4mNDTUEH/zzTfZunVrym6CTzzxBJmZP39+fn0pIpINX60/Saw5dU13ezsTz6XdrQ8yjip7+ECjgfmfnEhONegLZWvCzTT7AWydBTXbpxw+0bEO8zafJvrfOfoWK8wKOM6HQ0rXyhgiRV2RLZZdXFwICAhg2rRp/PjjjyxZsoTy5cszcuRIpkyZkuWGJenduHEDgMTERH788ccs26lYFik8l8Jj+T7QuMnQ4JbVqVkhzahy2Dk4+LvxwjZPgoNxC2GRIsHOHu56Blak+bTzyF9w9Th4J/0SWL6ME4+0q8lX60+mNPltz3me71EP3/Jav1+kqCiS0zCSubq6MnnyZI4fP05cXByhoaHMmzcv00J54sSJWK3WDEXv/Pnzs7Utt4gUni/WnSAuIXVU2cHOxNj0o8rbvoI0m5Dg6Ab+IwsmQRFbtBgBzp5pAlbY8qmhyZOd6uDimPqjONFiZZbmLosUKUW6WBaRki80LIYftxvXVR7Sytc4shYbDrsWGC9sPhzcyhdAhiI2cvGEVo8bY/t+goiLKYfe7s6MuMs4d3nhrnOauyxShKhYFpFC9XnACeLTjCo72mcyqrxrHsSFpQmYMr5AJVIUtX0W7NNMFUqMh8AvDE2e6lwHZwfj6PL/rT6KiBQNKpZFpNCcvR7NzzuMo8oPtvalWlnX1IA5NunFqLQa9tMmJFI8eFSBZg8ZYzvnQmzqL3+VPF0Y2b6WocnSfRc4cjEcESl8KpZFpNB8svoY5sTUdwac7O0Y0y3dqPK+H5M2dUir47gCyE4kj7R/AUizvGFcOOycZ2jyTJe6GdZd/nilRpdFigIVyyJSKI5diuD3PcYt7Ye3rUFVrzSjyokJsHmG8cLanaG6PyLFhvcdcOe9xljgF5AQl3JYrowTT3aqY2iy6tAl9py5URAZisgtqFgWkULx0cpgLGkWonFzss84qnxoScatrTWqLMVRh5eMx5EXYd/PhtCojrUo5+ZoiH20Mji/MxOR21CxLCIFbu/Zm6w4aJxaMbpjbbzdnVMDVits+sR4oU8LqNM13/MTyXPV/aFWJ2Nsy6dgSV0O0cPFkWe7Gn9h3Hz8GluOXy2IDEUkCyqWRaTAfbTCOFpW1s2R0Z2NH0FzbBVc2m+MdRynra2l+OrwovH42vGkjUrSeKRdTSp7OhtiH64M1n4AIoVIxbKIFKgtx6+yKd1I2bNd6+LpYvz4mU3TjccV6kHDdPM+RYqTO3pA5abG2KbpSZ+i/MvF0Z7ne9QzNNlz5iZrDl8uiAxFJBMqlkWkwFitVt5PN6pcxdOFR9vVMjYM2QpnthpjHV8EO/2TJcWYyQQdXjDGLuyB42sMoaGtfKmRbrvrj1YGY7FodFmkMOgnj4gUmJWHLrHv7E1D7Pke9XBxtDc23PR/xmPPatB0aP4mJ1IQGg+CcrWNsfXvGUaXHe3tGNervqHJkYsRLAu6UBAZikg6KpZFpEAkWqwZ5irXquDGkFbVjQ1D98GxFcZY++fAwQmRYs/eATr/1xg7twNOrDWE+jfzoX5ld0Ps/1YdxZxoQUQKloplESkQS/ac59jlSEPspV71cbRP98/QuveNx67loeWj+ZydSAHyexDK1jTG1r9vGF22tzPxcu8Ghianr0Xz83bjjpcikv9ULItIvotPsPB/q427kd1Z1ZP+fj7GhqH7INi4OgDtnwOnMvmcoUgBsneETi8bY2e3wan1hlDvRpVp5lvWEPtk9TEi4xLyOUERSUvFsojku+8CQzh3I8YQe+Xu+tjZpVsGLrNR5TZP5nN2IoWg2TDwqmGMrTOOLptMJsb3bWhoci0qntnrTxREhiLyLxXLIpKvwmLMfLb2mCHWqmY5ujWoZGyY1aiys0c+ZyhSCBycoFO63SjPbIHTGw2htnUq0PNO49+Vrzee4lJ4bH5nKCL/UrEsIvnq83XHuRltNsTG39MQU/rNRdZ/YDzWqLKUdM2Hg2e6F1zTf7oCvNanIWk/hIkxJ/JJumlNIpJ/VCyLSL45dyOaeZtPG2J9m1TBv2Z5Y8PQfXDkT2Os/ViNKkvJ5uAEnV4yxkI2welNhlC9yh482NrXEPtlx1mOXYrI7wxFBBXLIpKPpq88SnxC6lJXDnYmXu3TMGPDde8Zj13LQZun8jk7kSKgxSNJ64intc647jLASz3r45pmPXKLFd7/50hBZChS6qlYFpF8ceB8GL/vPW+IDb+rBrW9061scXYHBC83xjRXWUoLB2fomG50+fRGOBlgCFXydOHJznUMsdWHLxN48lp+ZyhS6qlYFpE8Z7Vamfb3YcPgmIezA8/3qJe+IayZZIy5VdCospQumY0ur5mcYXT5qc518HY3bs4zbflhrFZtgy2Sn1Qsi0ieW3f0CpuPG0e8nulalwruzsaGJ9dlePufTi9rVFlKF0cX6PKaMXZhDxxeZgi5OzvwQk/jNtj7zoXx1/7Q/M5QpFRTsSwieSrRYuW95ca5lFU8XRjVobaxodWaNHqWlmc1aPVEPmcoUgQ1Hw4V7jDG1k4FS6Ih9FBrX+qkm8r0wT/BhncDRCRvqVgWkTz1y46zBKd7S//l3vVxdbI3Njy8DC7sNsa6vJY0yiZS2tg7QLc3jbGrwRD0iyHkaG+X4SXZM9ejWbDldD4nKFJ6qVgWkTwTHmvm45XBhljDKh7c3zLdWrKWxKRRs7Qq3JE0uiZSWjUaCFWaGmMB0yAhzhC6u3Fl/GuWM8Q+XXOMq5HGdiKSN1Qsi0ie+WzNMa5FxRtib93bCPv021rv+ylp1Cytbm8mja6JlFZ2dtDjf8ZY2BnYOc8QMplMvHVvI0MsIi6Bj1dqoxKR/KBiWUTyxKmrUcxP91Fwr0aV6XCHt7FhfHTGUeUqfkmjaiKl3R09oUZ7Y2z9+xBz0xBq7luW+1sYV9D4ZccZDl0Iz+cERUofFcsikife+esQ5sTUJawc7U28ec+dGRtunQkR6d7e7/G/pFE1kdLOZIKe6UaXY67Dxo8zNH21T8MMG5VM/vOglpITyWP66SQiubbx2BVWH75siI3qUJta6TcgibgEmz4xxup2h3o98zdBkeKkRlu48z5jbNuXcCPEEKri5cKzXesaYoEnr7Pi4MX8zlCkVFGxLCK5kpBoYcqfhwwxb3cnxna/I2Pjde+COSpNwAS9puRvgiLFUc+JYJdmDn9ifMalFoEnO9ehWllXQ+yd5YeJNSdmaCsitlGxLCK58uP2Mxy9FGmI/bd3AzxcHI0NLx2C3d8aYy2GQ5Um+ZyhSDFUoS60ftIYO7AIzu0yhFwc7Rl/j3EpubPXY5i7+VR+ZyhSaqhYFhGb3YiKZ/oq4xv4jap6MqSVb8bGq94Ga5qNExzdMq4rKyKpurwKLl7G2Mo3M2yD3a9pVdrUKm+IzVx7nEvhsfmdoUipoGJZRGz24cpgbkabDbH/9c9kqbhjq+H4KmOs/XPg6ZPPGYoUY27lofMrxtiZrXBoiSFkMpl4u38jTGn+2kXHJ/LOX4fzP0eRUkDFsojYZN/Zm/y0/Ywhdk/TKtxVp4KxYUI8/POaMeZeGdo/n88ZipQAbZ6CsjWNsRUTID7KEGpSzYuh/sZPdJbuu8CW41fzO0OREk/FsojkWKLFyoQlBwyfBrs52TOhX6OMjbd9AdeOG2Pd3wJn9/xNUqQkcHCG3ulegg0/B5v+L0PTV/s0wMvV+K7A20sPEp9gydBWRLJPxbKI5NhP28+w/3yYIfZ8j3r4pHsrn/BQWP+BMVbNX9tai+TEnfdB7c7G2OZP4brxJb4K7s68cncDQ+z45Ui97CeSSyqWRSRHrkXG8eEK41bVd1RyZ1SH2hkbr/4fxBtXyqDvh9qARCQnTCbo+wGYUjcgITEOVmR8QXZYmxr4VTe+FPjpmmNcuBmT31mKlFj6iSUiOfLe30cIizG+1Dd5QGOcHNL9c3ImEIJ+McZajIDq/vmcoUgJVOlOuOtpYyz4Lzi+2hCytzMxZUCTDC/7Tf3LuBa6iGSfimURybZdIddZuOucIXZfMx/a1/U2NkxMgOXp3uJ39kza1lpEbNPlNXBL93ft79cgIc4QauZbloda1zDElu+/yIajV/I7Q5ESScWyiGRLQqKFCUsOGmLuzg682e/OjI23fwUXg4yxruPBvVI+ZihSwrmWTdrZL61rxzN/2e/uBpRzM77s97+lB7Wzn4gNVCyLSLZ8s+kUh0PDDbEXe9ajsqeLseHNs7D2HWOs4p3QJt1uZCKSc82HQ7VWxtjGj+HqMUOoXBknXutj3Nnv1NUoPg9ItzKNiNyWimURua3TV6My7NTXoLIHj7WvZWxotSZNvzAb14Cl/ydgn277axHJOTs7uPf/0r3sFw9/vpRhZ7+hrXxpUaOsIfb5uhMcuWj8pVdEbk3FsojcktVq5c0l+4lLs1aryQTTHmiKo326f0IOL4Ojfxtj/iOhRtv8T1SktKjqB+2eNcZOb4R9PxlCdnYm3h3UFIc0O2omWKy8vng/iRZjYS0iWVOxLCK3tGjXOTYfv2aIPdauFi1rlDM2jA2Hv181xspUzDjHUkRyr+t48DLu2MeKNyHK+Hf1zqqePN2ljiG29+xNvtt6Op8TFCk5VCyLSJauRMQx9a/DhpiPlwv/TbfxAQBrJkFEqDHW5z1wLZexrYjkjlMZuOcjYyzmOqx4I0PT57rXo7Z3GUPsgxXBnNfayyLZomJZRLI0+c9DGdZUnjKwCe7ODsaGpzbAjjnGWN3u0OSBfM5QpBRr0AcaDTDGgn6GYONUKBdHe6bd39QQi45PZMLv+7FaNR1D5HZULItIptYcvsSyfRcMsf7NfOhxZ2Vjw7hI+GOsMebgCv0+xrAzgojkvT7vg7Nxxz6WvQgxNwyhtnUqMKyNcdpGQPAVlgWl+zRIRDJQsSwiGYTHmpmw5IAh5uXqyNv3NsrYeM0kuBlijPX8H5Svk7GtiOQtz6rQ511jLPIi/JNxOsbrfe+kooezITZp6UGuRcZlaCsiqVQsi0gGU5YdIjQs1hCb0C/jD1pOb4Lts42xGu2gTbpteUUk/zQfDnf0NMb2/QhHVxhCXq6OTL6vsSF2LSqeCUsOaDqGyC0U6WI5JiaGt99+m/r16+Pi4oKPjw+jRo3i/PnzObrP+vXrmTRpEv369aNixYqYTCZq1aqVP0mLFHOrD13KsKV1hzsqMNi/urFhfBT8McYYc3CFAbOS1oIVkYJhMkH/T5O2lE9r2QsZpmP0aVKF3o2MU6n+PnCRpemmXIlIqiL7Ey02Npbu3bszZcoUIiMjGTBgAL6+vsybN48WLVpw8uTJbN/rhRdeYOLEiSxfvpyrV6/mY9YixduNqHhe/22/Iebu7MD7D/hhSj//+J/xcOO0MdbjbahQN3+TFJGMvKrB3emmY0SEwl8vGzYrMZlMTB3UJMNW2G//cZBL4cZPk0QkSZEtlqdOnUpgYCDt2rXj6NGj/PLLL2zbto2PP/6YK1euMGrUqGzfq3fv3kydOpUVK1Zw8ODBfMxapHh7648DXE03f/HtextRvZybseHhP2H3AmPMty3cpekXIoWmxYiM0zEOLIagXw2hSh4uTB1oXB0jLMbM64uDNB1DJBNFsliOj49n5syZAMyaNQt3d/eUc+PGjcPPz4/169eza9eubN3vgw8+4M0336R3796UL18+X3IWKe6W7bvAn+nejO/esBJDWqWbfhEeCkufM8Ycy8DAz8HOHhEpJMnTMVzKGuN/vZzhU6B+flXp38zHEAsIvsKvO8/mb44ixVCRLJY3b95MWFgYdevWpUWLFhnODx48GIBly5YVdGoiJdLliFje+iPj6hfv3d/UOP3CYoE/nk3a/CCtvu9r+oVIUeBVDfp/YozFR8BvT0FigiE8+b7GGV7anbzsEGevR+dzkiLFS5Eslvft2wdAy5YtMz2fHA8KCiqwnERKKqvVyvjF+7kZnXHzkUqeLsbG27+CE2uNsTvvS/r4V0SKhsaDklbISOvsNtg03RAqV8aJ9x8wTseIik/klUX7SLRoOoZIMofbNyl4Z86cAaB69eqZnk+Oh4SEZHo+P8TFxREXlzqXMzw8HACz2YzZbM7qMkkjuZ/UXzmT3/32/bYzrDly2RDr27gyfe70Nj4zdC8Oq94m7Wt+VvcqJPT5CBKMI1aFTd9rtlG/5VyR7bOeU3E4vRnTzdMpIeu690is3hZrjXYpsU51yzPEvxoLd6WuMhV48jpfBBzj6c618y29IttvRZj6zDZ50V9FsliOjIwEwM3NLdPzZcok7XEfERFRYDlNmzaNSZMmZYgHBARkmadkbtWqVYWdQrGUH/12Pgqm77eHNCWwu6OVjq7n+fvv1B+eDglRdA1+G8fEeMP1W6o8xtV1gXmeV17R95pt1G85VxT7rFylR+l4cyp2WAAwWRMx//wo6xpMId4xdZm5Vnaw2smeG/Gp/w5MX3WUxNDD1PLI3xyLYr8VdeqznImOzv20oiJZLBdF48ePZ9y4cSnH4eHh+Pr60q1bNypUqFCImRUfZrOZVatW0atXLxwdHW9/gQD512/R8QkM+mIbCdYoQ/z/HmpJ1/oVUwNWK/aLHsMu/oqhXWLbsbTp8Uqe5ZOX9L1mG/VbzhX1PrNujIcN76Ucu5pvcHfUQhIf+tXwQm51v+s8MncnybMvLJhYeM6dpWPa4eGS919XUe+3okh9Zptr167l+h5FslhOXv0iq98GoqKSfrh7eOTzr7xpODs74+zsnCHu6Oiob9ocUp/ZJq/77b1lhzl51VgoP9GxNr0aG9+QZ+ssOLrcGPO9C/teE7G3L9r/H/W9Zhv1W84V2T7r+iqc2wYnA1JCdqfWY7d1BnR9LSXWoV5lnutejxlrjqXEzt2M5e1lR/hsWIuM66znkSLbb0WY+ixn8qKviuQLfjVq1ADg3LlzmZ5PjtesWbPAchIpSf4KCuWn7cYlohr7ePJqnwbGhme3w6q3jTHX8jB4HhTxQllESBo9vv9r8KhqjK+bBicCDKHnut9B61rlDLE/g0Iz7OgpUtoUyWK5WbNmAOzevTvT88lxPz+/AstJpKQ4ez2a138zriTj5mTPZ8Na4OyQZp3kiIvw66NgSfvyninpB69XtYJJVkRyz70iDJ4LprTroFth8RNwI/VFeQd7Oz55qAVersZfhP/3x0GOX44soGRFip4iWSx36NABLy8vTpw4wd69ezOcX7RoEQD9+/cv4MxEirf4BAsv/LyHiNh0660OaEKdiqmb/5AQB7+MSNouN63O/4V66XYIE5Gir2b7pO3o04q+Bj8Ph/jU6VjVyrpmWE4uxpzIcz/tISY+sSAyFSlyimSx7OTkxNixYwEYM2ZMyhxlgOnTpxMUFESXLl3w9/dPic+cOZOGDRsyfvz4As9XpLh4d/lhdp+5aYgNbO7DAy3TjBRbrfDXODi3w3hx7S7QVX+/RIqt9s9Dg3uMsUv7YcmzSX/v/9WnSVWG31XD0OxwaDhvLtmv7bClVCqSL/gBTJgwgdWrV7Nlyxbq1atHp06dCAkJYdu2bVSsWJG5c+ca2l+9epXg4GBCQ0Mz3GvOnDnMmTMHSF1vLzQ0lLZt26a0+fzzz7PcBEWkJFi67wLzt5w2xGqUd2PKwCbGl3e2fw17vjdeXLYmDJmv7axFijM7Oxj0FczpCVeDU+OHliRtWNLp5ZTQW/c2Ysfp6xy9lDr94rfd52lZoxwj2up9ISldiuTIMoCLiwsBAQG89dZbuLm5sWTJEkJCQhg5ciS7d++mTp062b7XuXPn2LZtG9u2bUuZ7xwfH58S27ZtW8omIyIl0bFLEby+2DhP2cnBjs+HtzQuC3VyHfzzuvFiRzd46EdwK5//iYpI/nLxTPr77OxljK+ZAkdSV71xcbTn8+H+lHEy/oI8adlBdp+5URCZihQZRbZYBnB1dWXy5MkcP36cuLg4QkNDmTdvXqY7+02cOBGr1cr8+fOzPHerP127ds3/L0ikEETGJfDM97uITjffcOqAJjSpluYH5qVD8MsjYE03L3HQl1ClSQFkKiIFwvuOf1/4S1sC/PvC3/nUF+vvqOTOR0OaGS41J1p59vvdXI2MQ6S0KNLFsojkjsVi5b+/7uPEFeN6yg+28mVoa9/UQMRF+HEoxKX7hKXzK9BoQAFkKiIFql5P6PE/Y8wcDT8+aFgho2/TqjzdxfhJ7sXwWMb+uJuEREtBZCpS6FQsi5RgM9Yc45+DFw2xJtU8mTSgcWogLjKpUA4zrrvMnfdB1zcKIEsRKRQdXoBmDxtjUZfhhyEQkzrV4pXeDWhXx7hTbeDJ63ywIhiR0kDFskgJ9VdQqGE3LgAvV0e+GO6Pi+O/8xATzbBoFITuM15cvQ3cPzvphSARKZlMJug/A2p3NsavBidNyUpImmrhYG/HZw+3oIqni6HZ7A0nWaQNS6QU0E9CkRLowPkwXl641xCztzMx6+GW+JZ3SwpYLPDHGDi2wnhxudow7CdwdC2YZEWk8Dg4wdDvoOKdxvjpjUm/SCcmrcnu7e7M5yNa4mhv3PZ6/G9B7Dh9vaCyFSkUKpZFSpgrEXE89e1OYs3G+YRv9buTjvW8kw6s1qRVL4J+MV7sWh5GLIYy3gWUrYgUOteyMHwhuFcxxo/8CcueT/rFGmhZoxyTBxhf9jUnWnn6u12cvR5dQMmKFDwVyyIlSHR8Ak8s2MGFsFhDfFgbXx5rXys1sG4abP/KeLGDa9KIcoW6+Z+oiBQtZX1h+K/g5GGM7/0BVr6ZsmnJsDY1GNWhtqHJ9ah4nliwg4hYc0FlK1KgVCyLlBAJiRae+3EPQefCDPE2tcoz6b40G49snQXr3zdebOcAD34HNdoiIqVU1Wbw8M/gYJybTODnsO69lMM3+91JtwYVDU2OXork2R92E5+gFTKk5FGxLFICWK1WJi07xJojlw1x3/KufD6iJU4O//5V3/o5rEi/woUp6WW+er0KJlkRKbpqdYQhC5J+gU5r/Xuw/gMg6f2HT4e1oH5ld0OTjceu8vriIG2JLSWOimWREuDrjSf5LjDEECvr5sj8x9vg7e6cFNj6OawYn/Hie/8PmjxQAFmKSLHQoA8M/BIwvsxHwDuwLulTKQ8XR755rDUVyjgZmvy257yWlJMSR8WySDG3aNc53l1+xBBzcrDj60dbUbfivyM/W2dlXij3mgytHi+ALEWkWPEbAv0+yhhf925Kwexb3o1vRrbG1dG4JfYX606wYMvpAkhSpGCoWBYpxlYcvMhri4MyxKcPbUbrWuWTDjZ9ksnUC6DnpKRNCUREMtN6NNyTRcG8ZgpYrTT3Lcus4S2wtzOOQk9cdpCl+y4UUKIi+UvFskgxtfn4VZ77cQ+JFuP8wDfuaci9fj5Jb6+vfAtW/y/jxT0nQccXCyZRESm+2jyZecG88SNY/l+wWOjesDLTBjU1nLZaYdwve1l96FIBJSqSf1QsixRDe8/e5KlvdxKfaHzz/OkudXiqc92kjQSWPgdbPs14ca/JKpRFJPvaPAn9Ps4Y3zEHfhsNCfEMbe3LuF71DacTLFae/XE3m45dLaBERfKHimWRYibo3E0e/WYbUfGJhviwNr683qchmGNh0UjY813Gi++epqkXIpJzrUdD/0/J8NLfgcXw8zCIi+S57ncwMu167kB8goUnv93JTu3yJ8WYimWRYmT/uTBGzNlGeGyCId6vaVWmDmyKKfoafHsfHF5mvNBkDwO/gHbPFmC2IlKi+D8GQ+aDnaMxfnw1zL8HU8RF3r63EUNbVTecjjEnMnLeDnaFqGCW4knFskgxceB8OMPnBGYolDvXr8j/Pdgc++vHYU4POLvNeKG9c9KGI80fLsBsRaREajwwaac/xzLGeOg+mNMDu8sH/7+9O4+Lstz/P/66ZxhmZI9NQBQFcUlzTXNDKkttMcNsL5fqVKfFetj5ndMpLct82DllncLKb5aZ1jl2WvQcDZeDWbnjDqGggoIiLqyyDstcvz9GMBxQQZwZ4PN8PO4HcF1z337mEoY391z3dTN3Qh/u7BNcp7vYXMWkzxNIOCKBWbQ8EpaFaAEyi2HKlzttgnJUpD+fPjoQ1+Nb4LNbIP9o3R1dPeGR76HHHfYrVgjRukXcDJP+A+2uqdt+NgsWjUWftp737+/HLT0D63SXVFQzeVECW9Ny7VisEFdOwrIQTm77kTzm79dTWFY3KI/o6s/CRwdi2rMIloyH8oK6O3qFwuNroUuU/YoVQrQNHQfB4/HgG163vaII/nkvhq0fMv/B/tzco25gLqusZuriBDYdlsAsWg4Jy0I4sfUHTvHYkt2Yq+teVDO8qx8LH+qNKW7aueWb6gZpgvvBH9ZD+172K1YI0bb4d7UG5o5D6rYrC8S/juk/T7Dgvh7cem37Ot3llRae/Go3e3IvuFhQCCclYVkIJ/WfvVk8tXQXFVV1l4cb0dWfz8cH0+6rO2Dv17Y7dr8DpsaBZ5CdKhVCtFnuftYpGb3vse1LXo7r4tF8fJsPt/Wu+3pUWa348qCOrxOO2alQIZpOwrIQTkYpxcJf03lh2V6qLrjhyNheQXwx9BSmz0fCiT22O0e9ZL2Yz9Xdtk8IIa4Ggwnu+RxunonN0nKn92NYeCOx16Uxrm9InS6FxqyVB/gg/hBK1X2tE8KZSFgWwolUWxSv/zeZOXEHbPru7evPx77LMHz7qO38ZIM73LcERr0GOr19ihVCiBqaBiP/BA/9G4zedfsqinBZ/gc+cFvEY4Pb2+z6fvxB/vpDEpUX3GRJCGchYVkIJ1FaUcVTS3eyZGuGTd+DAcf4W8FL6HZ8arujb4R1fvK14+1QpRBCXES30fDkBgjoadOl27OEmdnPMmeIbShetuMYkxclUFhaaY8qhWgUCctCOIGsgjLuXbCV+AOn67TrsPB1z228VTwD3akk2x2vvdv6iynQ9heTEEI4hF8EPBEPfW3XdtfOpPBw4hSWX/srBuoG4y1pucR8spmjOSX2qlSIyyJhWQgH256ey12xm0g+cbZOezf9SXaGvMvwIx+iVxecbXExwZ3/sN5Ny3TBW55CCOFoRg+I+QRi/s/2BiaWKvqnL+AXz9e5zpBVpyv9TAl3f7yZjYfO2LFYIS5OwrIQDqKUYsnWozz82XZySypq23VY+KNpHatNr+Cbt9d2R//u8Ief4Pqp1nmCQgjhrPo+AE/9CkHX2XSFVB7lP4ZX+ZPbj7hwfvnLgtJKJi9K4KMNh+XCP+EUJCwL4QAl5ipe+vc+XvtPcp0VL/poaax2e52/sBh9dXmdfRQaDH0OnvpF1k8WQrQc/l3hiZ8g6k+g1b0AWWep4DnL1/zPbSaDtJTadouCd9am8tTSXRSVyzxm4VgSloWwswPZZ7lr/iZ+2HP+7UdvinnL5XNWGF+juyXNZp9i10CqJ62EMXPA0M6e5QohxJVzcYVRM+GJ/1nfHbtAF0sG3xrf5F3DAvworG1ft/8U42I3kXi8wI7FClGXhGUh7EQpxT+3Z3L3R5tJO2O9gEXDwr36n/nJ+BKPuKxHh+1bjtXXP8HPPeagLrxLlhBCtDQdBsJTv1I95Dnru2UXmKj/1fp6qP8feqoBOJpbyj2fbGHhr+lYLDItQ9ifhGUh7OBMkZk/LNnFK8uTMJ+7I1+ULpEfXV/lHcOn+GlFtjv5d4fJq7CMeZtqvdHOFQshxFViMGEZNYuN3WaiAnvbdHtrpbxl+II1ri9zi24XoKisVsyJO8DkLxI4fbbc9phCXEUSloW4ytb8dpKx//iV+AOnAOilHWWJYS5LXd/mWp3tmsoY3OCWWfD0JugSZd9ihRDCTvLdu1L1eDyMfRtcPW36I3VZfOY6j29cZ9NPOwzAxkM53Pr+r6zYkyUX/wm7cXF0AUK0VnklFby1an/t3ORw7QTPuyxnvG4LOq2BF/ked1p/cfh0tGOlQgjhIDoXGPJH65rx62bAb9/ZPOQGXQorjK/xY/VgPqi6h4NlHXnxm738mJTNnJjeBHqa7F+3aFMkLAvRzJRSLN+TxexV+8kvrSRSO85zLisYp9vacEgO6AmjZ0PkrfYtVgghnIFXMEz8HAZOhrWvwslEm4fcoU/gDn0Cq6sHEVsVw//2w46jebw8tgf3Xd8RnU6W0hRXh4RlIZrRkZwSZq74jU2Hc+ipZfCWYTm36XY0HJI9g+GmV6HfQ6DT1/8YIYRoK7qMhCd/sZ5hXj8bCjNtHnKbfge36Xfwv+qBxJbdzcs/VPLtruPMielNjyAvBxQtWjsJy0I0g8KySmLXH2LJ1nRGqD18bVjNcH1ywzu4esKIF2HIM+DqZrc6hRDC6el00Oc+6HkX7PgMfn0HygtsHnarfhe36neRYOnO58duY9yHeUwaFs7zN3fFx83V/nWLVkvCshBXoKrawr92HOOTdYmMMsezRr+GcN3Jhndw9YQbnoQhz4K7n/0KFUKIlsZggmHPQf9HYPv/wbaPoLzQ5mGDdakMdk3lmCWAxVvHcPvOW3lsVB8eHRqG0UXesRNXTsKyEE2glGL9gdP8EBfH0IJVrNFvxstQ1vAORm8Y8jTc8DS4+dqvUCGEaOna+cCNf7FeCJjwKWydD2X5Ng/rqDvDTN1XTFffsnLtUKZtup27bh/H7X2C0TSZzyyaTsKyEI2glGJjcjpJaxYRdfZHPtYdufhPkXsADH4KBv/B+oIvhBCiaUxeMPJPcMNTsONz2L4AirJtHuaumXnA5WceMP/Mge/n8+m62+l+6+NE942U0CyaRMKyEJdBVZnZ/+v3FGz/msHl2xmpVV58lfLAXjD0Geg90fpWohBCiOZh/N01H/tXwNaPIHtvvQ/tqcukZ8kCzMs/Y8uPg3AbeD99b34AnWs7e1YsWjgJy0I0pLqSyvRNZG36Gr/M1fRSxdb2Bk5MKDS0yNHWkNwlGuQMhhBCXD0urtYLAa+7FzK3wtaPUKmr0VS1zUONWhXDK7fCtq2UbHuFnE5jCBn+MIaIaOtxhLgICctC/J65GNLWU/HbStTBNRiriuh8iV1KTUGYBk9GN+BRuZmIEELYm6ZB2DAIG4Z2Nhu15yvKtn+BW2lWvQ93pxT3zOWQuRyz3h3VdTSm68ZB11utUz2EuICEZSHyMyB9Ayo1DpW2AV11BZc6z1CFnlPtowm66Uncuo2WNZKFEMIZeAWjRf8/3KJeojptAyc3LCDgxE+4UlXvw43VJZC6HFKXY9EZ0MKj0bqNha6jwDfczsULZyVhWbQ95Wfh6EZI2wBpP0FeGmCdXXGpiROJup5UXjuRPmOm0MHT/6qXKoQQogl0OvSRo+gQOYrqknz2xi+FxH/Tp+q3Bm8SpbNUwuF46wbgEwYRN1u3LiPlIu02TMKyaP3KCuDYduuctowtcHwn1DOnrSH7LOH85hVN6MhHGXH9APRyS1UhhGgx9O7X0G/8NCzjnufX3Ykc+2UpvQo3MEB3+OI7FmTAri+sm6aDkAEQNhQ6DYNOQ2QZ0DZEwrJoXZSC/COQtdsajjO3walkoIHbTdejUunZZulJvBqEpdttTLzpBh7u6HPVShZCCHH16XQaN17fF67vy/4TZ5n9yw4q9scxih0M0/2Gq3aRkyjKAlk7rduWWGtbQA/oNNS6dRhonbahu9gySaKlkrAsWi6loCATTuw5v2XvrfcOT5eSpzzYbOlNfPUADnkP447BPXl2YCiBXrLsmxBCtDbXhnhx7YOjyCuJYtmOTP6ekEp4wVZG6XczQpdEgHb20gc5k2Lddn1h/droBcF9IaQfhPS3btd0kZWRWgEJy6JlKC+E0ylwer/1xen0fjj5G5TlNelwZuXCLks3Nlr68KvlOg7ruzCmVwgPDOrIkHA/dDLVQgghWj1fd1eeubErf4yOYGfGUL7deYyZiVl0rDjKCF0SUbokbtClYNQqL30w87nrYY5uPN9m8oGg66xnoQN7QuC1ENgD2l1z1Z6TaH4SloXzsFjgbJb1gru8dMhNgzOpcPoAnD1+RYc2KwN7VQQ7LN3ZYelBgqU7lbp2REX681ifEG7t1R4vk6GZnogQQoiWRNM0BnX2ZVBnX2bd1YvVSSeJSxrEl4dy0CrLuV6XymBdKoO0FPrrDtNOq7i8A5cX2AZoAM8Qa2gO6Al+4dYpHL4R4B0qqys5IQnLwq5cqsusAbj0pHXJtrz088E4/yhUm5vl3zmjvNhniWCnpTsJlh78prpQgQFXvY4bInx5/bpgxvQK4hp3WYxeCCHEeW6uLtwzMJR7BoZytryS+P2niEvqyEcH+1JRZcFAFb20owzSpTBIl0o/XRqBWkHj/pGiE9Yt7ae67XpXuKbzufB8brumszVEu7VvpmcoGsupw3JZWRlz585l2bJlZGZm4uvry9ixY5k9ezYdOnRo1LHy8/OZNWsWK1as4OTJkwQFBRETE8OsWbPw8fG5Ok+gLVEKKoqh+DQUZUNhlvVscOFx6+eFx3E5e5w7ygshsXn/6TzlQZIlnEQVTpKlC4mWcE7iS81CcP4erozvHsionoGMiAzAw+jU3/ZCCCGchJfJwIQBoUwYEEqxuYrNh3P4OfU0P6d6sLCwKwur7wQgkHz66NK5TpdOHy2d63RH8L+cec8Xqq6AnIPW7QIG4Da9Gy5ZXazh+febZxB4tAePQOvUD5kn3aycNjWUl5dz8803s23bNoKDgxk/fjxHjx7liy++YNWqVWzbto3w8MtbMDwnJ4ehQ4dy+PBhwsPDufvuu0lOTuaDDz5g9erVbN26FV9fWQLGRpUZyvKhNO/cx1woOW0NxDVbyWkoPgXFZ6Cq7KKHu9If3SqlI10Fc1B15KAllFQVSrLqwnHlX+fo7Qx6RnbxZViEH8Mi/Ogd4i1zkIUQQlwRD6MLY3oFMaZXEEopUk8V8XPqGbal57LzqAvx5muItww892hFCLn01h0hUsuiu+4Y3bTjhGsnLr7qxiW4VpfC6WTr1hC98XxwvvCjmy+086370dCuyfW0FU4blt966y22bdvG0KFDWbduHR4eHgC89957vPTSSzz22GP8/PPPl3WsF198kcOHDzNhwgS++eYbXFysT3vatGnExsYyffp0Fi9efJWeiYMoBVXlYC5qYDtrPRNcXlg3EJcVWC+aK8uHylKHlF6sTGSo9hxRQRxRwRy0hHJQhXJEBVOB7bxiX3dX+nX0oX9HH4ZE+NE31AdXF1m+RwghxNWhaRo9grzoEeTF09ERVFVbOJBdxPYjuSQcySPhaB4nSv05YfFnHYPgXD52oYrO2km6a8fppjtGuJZNZ+0knbVTeGoXP+F02arNUJhp3S6HS7tz4fka61YTpE1e1hU+jF7nPvc8t3md/2jyAhdj89TtxDSl1OUvQGsnFRUVBAYGUlhYyO7du+nfv3+d/r59+5KYmMjOnTsZOHBgA0exys7OJjQ0FBcXFzIzM2nf/vycH7PZTMeOHcnLy+PEiRMEBgZedo1nz57F29ubnJwc/Pz8Lv/JWSzWt1ku3KoqrOG0ssx6hrayZiuFyvLzfZWl1hBc+/XvHmcutgbhinOB2FL/7T2dQa7y5ITyI0MFcVS156gK4qilPUdVMDl40dB5aKOLjh5BnvTvdI01IHfyoZOvG1orfsupsrKSuLg4br/9dgwGuQjxcsiYNY2MW+PJmDVNax83i0VxNLeEpKxCko4XkphVSHJWISUVDZ1VVvhxljDtlDU866wBOkw7RYiWc3lL2TmK3tUanF3drZuhHRjcrJur2/nPDe0a7ncxWUO33tX60cV0/vPaj8YmrWOdm5uLv78/hYWFeHl5NekpOuWZ5c2bN1NYWEhERIRNUAaYOHEiiYmJrFy58pJhec2aNVgsFqKiouoEZQCj0ci4ceNYtGgRcXFxTJkypdG16n94DIxAdaV12kJ1hfXzanOdNnWuTXPiANtcipWJbOVHtvIlS/mTrfw4gR8nlF9tezmX/ku0g087egZ70TPY0/oXfLAnnf3c5Q56QgghnJpOpxEe4EF4gAfj+1mvsbJYFOk5JSSfKOTw6WIOniri0OliMnJLqbZALt7kKm92q25gqXs8IxUEa7mEaLl00HIIIbf26xAtlwCtAG/NMe8GU10BpTnW7SpTOgNanQDtei5ou1rDtN5g3XQGa5/eBX2Z5dIHvgSnDMv79u0DYMCAAfX217QnJl76SrHLOdaiRYsu61j10aVvAOOlw1tLj3eFyo0C5UEO3uQob84ob3Lw5ozyqf36zLm+Mi7/Rh5BXiY6+7vR2c+dzv7u5z66EebrTjtXWT5HCCFE66DTaXQN9KBroEeddnNVNUdySjh0qpjMvFKO5ZVyLL+UzLxSThSUU21RmHHlqArmqApu8PhGKvCnkACtkACtgECtgACtgACsX/trhfhQzDVaMd6UoNOcbmLBJWmWSqi4jDWvf0dnvvLn6ZRhOTPTOs8mNDS03v6a9oyMDLsdy2w2YzafX9assNB6l7izzfCfYA+VSk8RJkpVO0owUUQ7SpSJEtpRqNwpwJ1C5WENxXhwVrlTiDsFyp0i3KnmcoOrBShFp4FPOwP+Hq4EepkI8jLi724gLyud6MH9CPV1I9DThMlQ33ErKS0qwEF/IzudyspKSktLyc3NbZVvV14NMmZNI+PWeDJmTSPjVleAAQJCjQwLNQLnb1hSWW3h5NlysgrKycorISExBc/ADuSWVHK6qIKcIjM5JRVYFJQBx3DnGO5AyEX/PQ0LnpTioxXjQwne5wK0j1aCj1aEN6V4aKW4U46XVoY7ZXhQhrtWhiflGK7gIkV7q8lpVzLr2CnDcnFxMQBubm719ru7uwNQVFRkt2PNnTuXN954w6a94/vFl6yhrWroz495dq1CCCGEEG1dbm4u3t7eTdrXKcOyM/rrX//K9OnTa78uKCggLCyMzMzMJg9+W3P27Fk6duzIsWPHmjzJvi2ScWs8GbOmkXFrPBmzppFxazwZs6YpLCykU6dOV7REsFOG5Zpl4kpL638jvqSkBABPT0+7HctoNGI02l6U5u3tLd+0jeTl5SVj1gQybo0nY9Y0Mm6NJ2PWNDJujSdj1jS6JqykUbtvM9bRbDp16gTA8ePH6+2vaQ8LC7PrsYQQQgghRNvilGG5b9++AOzevbve/pr2Pn362PVYQgghhBCibXHKsDx8+HC8vb1JS0tj7969Nv3fffcdAOPGjbvkscaOHYtOp2Pjxo2cPn26Tp/ZbGblypXo9Xpuv/32RtVoNBp5/fXX652aIeonY9Y0Mm6NJ2PWNDJujSdj1jQybo0nY9Y0zTFuTnkHP4AZM2YwZ84chg0bxrp162pXrai53XV0dHSd213Pnz+f+fPnExMTw9y5c+sc65FHHuHrr7/mnnvuYdmyZbW3u37hhRf48MMPmTx5cuu73bUQQgghhLhiTnmBH1jDcnx8PFu2bCEyMpKoqCgyMjLYvn07AQEBLFq0qM7jc3JySE1NJTs72+ZY//jHP9i2bRvff/89PXr04Prrryc5OZnffvuNyMhI3nvvPXs9LSGEEEII0YI45TQMAJPJxIYNG5g5cyZubm6sWLGCjIwMpkyZwu7duwkPD7/sY/n7+5OQkMDzzz9PRUUFy5cvp7CwkGnTppGQkHBFy4kIIYQQQojWy2mnYQghhBBCCOFoTntmWQghhBBCCEeTsHyVzJ49G03T0DSNr776ytHlOKXExESee+45hgwZQkhICEajEW9vb4YOHUpsbCyVlZWOLtHppKSk8Le//Y2bbroJf39/DAYDQUFBTJgwgY0bNzq6PKdVUlLC0qVLef7557nhhhswGo1omsasWbMcXZrDlZWV8dprr9GtWzdMJhMhISE89thjZGVlObo0p7Vr1y7efvttJkyYQGhoaO1rvahfaWkpK1as4PHHH6d79+6YTCbc3d3p27cvb775JsXFxY4u0Wm99957TJgwgcjISLy9vTEajYSFhTFp0iSSkpIcXV6LkJubS2BgIJqm0bVr16YdRIlml5KSooxGo9I0TQFq6dKlji7JKcXGxipAhYWFqVGjRqkHHnhAjRo1SplMJgWo6OhoZTabHV2mU+nQoYMClIeHh7rlllvUfffdp3r37q0ApWmaev/99x1dolPas2ePAmy2119/3dGlOVRZWZkaMmSIAlRwcLC677771ODBgxWgAgICVFpamqNLdErjx4+v9/tJ1G/hwoW1Y9SzZ0917733qjFjxihPT08FqB49eqhTp045ukyn5Ofnp0wmkxo8eLCKiYlRMTExqlu3bgpQBoNBrVy50tElOr3JkyfX5rGIiIgmHUN+upuZxWJRI0eOVO3bt699QZWwXL+0tLR6fxmfPHmyNgDGxsY6oDLnNWrUKLVkyRJVVlZWp33BggUKUHq9XiUnJzuoOud1+PBh9fjjj6sFCxaoXbt2qTfffFPCslLq1VdfVYAaOnSoKioqqm2fN29e7R+swtbbb7+tZs6cqf773/+q7OxsZTQaJSxfxOLFi9WTTz6p9u/fX6f9xIkTqn///gpQDz74oIOqc26bNm2yeb1XSqmPPvpIAap9+/aqsrLSAZW1DPHx8QpQTz75pIRlZ/Lpp58qQH311Vdq8uTJEpabaOnSpQpQMTExji6lxRg9erQC1KxZsxxditObO3dumw/LZrNZeXt7K0Dt3r3bpr9Pnz4KUDt37nRAdS2LhOWm27JliwKU0WiUdxIbKSIiQgFq3759ji7FKZWWlqqIiAh17bXXqoMHD15RWJY5y83o5MmT/PnPf2bUqFE8/PDDji6nRTMYDAC4uro6uJKWo+bW7idOnHBwJaIl2Lx5M4WFhURERNC/f3+b/okTJwKwcuVKe5cm2pCa1y2z2Uxubq6Dq2lZ5Pfkxb3xxhukp6ezYMGC2rFqKgnLzWjatGmUlZXxySefOLqUFi0/P5958+YBcMcddzi4mpYjPT0dgKCgIAdXIlqCffv2ATBgwIB6+2vaExMT7VaTaHtqXrcMBoPc86ARli5dSmpqKpGRkURGRjq6HKeTmJjIvHnzmDp1KlFRUVd8PKe9g19Ls2rVKr799lveeOMN+cZtpEOHDjFnzhwsFgunTp1iy5YtFBcX8/TTT8sZ+suUlpbGqlWrALjrrrscXI1oCTIzMwEIDQ2tt7+mPSMjw241ibbngw8+AGDs2LEYjUYHV+O83nnnHZKTkykpKeHAgQMkJycTEhLCv/71L/R6vaPLcyoWi4UnnngCHx8f/v73vzfLMSUsN4Pi4mKeeeYZunXrxl/+8hdHl9PinDp1ii+//LJO27Rp05g9ezY6nbz5cSlVVVVMmTIFs9nM/fffz8CBAx1dkmgBapbrcnNzq7ff3d0dgKKiIrvVJNqWuLg4Pv/8cwwGA7Nnz3Z0OU5t7dq1rF+/vvbrsLAwlixZIq/39YiNjWXHjh188cUX+Pn5NcsxJSwDMTExHDhwoFH7LFmyhMGDBwPwyiuvcOzYMdavX9+m/jK+0nGrMWLECJRSVFdXk5mZyfLly3njjTdYvXo169ato3Pnzs1YtWM115j93rRp09i0aRPh4eF8/PHHV1qiU7oa4yaEcJyUlBQeeeQRlFK88847tXOXRf3i4+MBKCgoICkpiTfffJPo6GjeeustXn31VQdX5zwyMzOZMWMG0dHRTJkypdmOK2EZOHLkCKmpqY3ap7S0FICEhAQ++ugjHn30UW6++earUZ7TupJxq49er6dLly5Mnz6dzp07c8899/D888+3qguMmnvM5syZwyeffEL79u1Zu3Ztq53z19zjJsDDwwNoeJxKSkoA8PT0tFtNom3Iyspi7Nix5OfnM336dF544QVHl9Ri+Pj4EBUVRVxcHEOHDmXmzJmMHj2aQYMGObo0p/Dss89SUVHBggULmvW4EpaBvXv3NnnfuLg4LBYLSUlJ3HjjjXX6UlJSAGug+eyzzxg7diwvv/zyFVTqXK5k3C4lJiYGDw8P1qxZQ0VFRau52rc5x2zBggXMmDEDb29v1qxZ0/Q7E7UAV/N7ra3q1KkTAMePH6+3v6Y9LCzMbjWJ1i8vL4/Ro0eTkZHB1KlTeffddx1dUotkMBi4//772bVrFytXrpSwfM6qVavw8fHh6aefrtNeXl4OWP9Qq8lqy5Ytu+wL4iUsN5OL/TJPSUkhJSWlVU0nuNo0TcPX15fMzEzy8/Np3769o0tyKsuWLePZZ5/Fzc2NH3/8kX79+jm6JNHC1LztvXv37nr7a9r79Oljt5pE61ZcXMxtt93G/v37mTBhAgsXLpTbhF8Bf39/AM6cOePgSpxLQUEBv/zyS7195eXltX01AfpyyNVTV2jWrFko681dbLbJkycD1iVelFIsXrzYscW2IOnp6Rw7dgwvL6/aFwRhFRcXx6RJk3BxcWH58uUMHz7c0SWJFmj48OF4e3uTlpZW7x/73333HQDjxo2zc2WiNTKbzYwfP56EhATGjBkjqzg0g5rQFxER4eBKnEdDeezIkSOAdaxq2hpzAlPCsnCY2NhYTp48adOemprKQw89hFKKSZMmyQvq72zevJmJEyeilOKbb75h9OjRji5JtFCurq4899xzgHWeX80cZYD33nuPxMREoqOj5Wp7ccWqq6t58MEH+emnn4iKiuKHH35oNVPrrqbNmzezZs0aLBZLnfbKykpiY2NZunQp7dq14/7773dQhW2HTMMQDjNv3jxefPFF+vbtS9euXVFKkZGRwa5du7BYLIwcOZK5c+c6ukyncuedd1JWVkaXLl1YsWIFK1assHnMiBEjeOKJJ+xfnJOLiYkhOzsbOH+Xw88++4w1a9YAEBwczPLlyx1WnyPMmDGD+Ph4tmzZQmRkJFFRUWRkZLB9+3YCAgJYtGiRo0t0Sj/++GOdpc4qKioAGDJkSG3bzJkz5aZK58yfP7/2Z8vf359nnnmm3se9++678k7i7xw6dIipU6fi7+/PwIED8fPzIycnh6SkJLKzszGZTCxevJiOHTs6utRWT8KycJg5c+YQFxfHzp07Wbt2LWVlZfj6+nLrrbfy4IMP8uijj8o6yxcoKCgArKtD1LytVB8Jy7b27Nljc4ONrKwssrKygLZ5IZvJZGLDhg3MnTuXf/7zn6xYsQJfX1+mTJnC7NmzG7xhSVt35swZtm/fbtP++zaZR3pefn5+7ecX+4N01qxZEpZ/Jzo6mldeeYVffvmFxMREcnJycHV1pXPnzkycOJFp06a16gu7nYmmlFKOLkIIIYQQQghnJKfthBBCCCGEaICEZSGEEEIIIRogYVkIIYQQQogGSFgWQgghhBCiARKWhRBCCCGEaICEZSGEEEIIIRogYVkIIYQQQogGSFgWQgghhBCiARKWhRBCCCGEaICEZSGEEEIIIRogYVkIIYQQQogGSFgWQgghhBCiAf8f0V330pVNp6wAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAssAAAIHCAYAAABpIhEUAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAADcj0lEQVR4nOzdd3hUVfrA8e9Meoc00kkCCZESShDpVRBURBCxKyKr7oKu4lpQ9AeKbVVW7CLVrqAgIEoHIRB6LwECSSCElt4zmZnfHzET7kzqZJJMkvfzPDy799xzz31ziOTNmVNUer1ejxBCCCGEEMKEurEDEEIIIYQQwlpJsiyEEEIIIUQlJFkWQgghhBCiEpIsCyGEEEIIUQlJloUQQgghhKiEJMtCCCGEEEJUQpJlIYQQQgghKiHJshBCCCGEEJWQZFkIIYQQQohKWHWyXFBQwGuvvUZkZCSOjo4EBAQwadIkUlJSatzG4sWLUalU1f75+uuv6/ErEUIIIYQQTZHKWo+7LiwsZMiQIcTFxeHv78+AAQNITExk9+7d+Pj4EBcXR3h4eLXtbN++nfnz51d4LysrixUrVgCQkJBQo/aEEEIIIUTLYbXJ8owZM3jzzTfp06cP69atw9XVFYA5c+bw3HPPMWjQILZs2VKnd3z++ef861//ol+/fmzfvt0CUQshhBBCiObEKpPl4uJifH19ycrKYv/+/XTv3l1xv2vXrhw+fJi9e/cSExNj9nv69evHjh07+OKLL3jiiSfqGrYQQgghhGhmrHLOcmxsLFlZWbRr184kUQYYP348AKtWrTL7HefOnWPHjh3Y29szYcIEs9sRQgghhBDNl1Umy4cOHQKgR48eFd4vKz98+LDZ7/j2228BuO2222jdurXZ7QghhBBCiObLtrEDqEhycjIAQUFBFd4vK09KSjL7HWXJ8kMPPVSj+kVFRRQVFRmudTod6enpeHl5oVKpzI5DCCGEEELUD71eT05ODgEBAajV5o0RW2WynJubC4Czs3OF911cXADIyckxq/3du3dz6tQpPD09ue2222r0zNtvv82sWbPMep8QQgghhGg858+fr3QQtjpWmSzXt7JR5QkTJmBvb1+jZ6ZPn860adMM11lZWYSEhBiSblE9jUbD5s2bGTJkCHZ2do0dTpMh/VZ70mc1t/vSbp7b9pxJ+Ru932Bw8OCGD6iJke8180i/1Z70mXnS09OJjIzEzc3N7DasMlku2yYuPz+/wvt5eXkAZn3hJSUl/PTTT0DNp2AAODg44ODgYFLu6emJl5dXreNoiTQaDc7Oznh5ecl/6LUg/VZ70mc1l5uWi42TjUl5jm2O/NtWA/K9Zh7pt9qTPqubukyZtcoFfiEhIQBcuHChwvtl5W3btq112+vWrePKlSuEh4fTt29f84MUQohmIDU3tcLyi3kXGzgSIYSwTlaZLHft2hWA/fv3V3i/rDw6OrrWbZdNwXjwwQfNjE4IIZqPlNyUCssv5kqyLIQQYKXJcr9+/fDw8CAhIYGDBw+a3F+2bBkAo0ePrlW7ubm5/Pbbb4Aky0IIAZCaJyPLQghRFatMlu3t7Zk6dSoAU6ZMMcxRhtLjrg8fPsygQYMUp/d98sknREVFMX369Erb/fXXX8nPz6d3795ERETU3xcghBBNRGUjy6m5qVjhAa9CCNHgrHKBH8CMGTPYsGEDO3bsICIiggEDBpCUlMSuXbvw8fFh4cKFivrXrl0jPj6e1NSKR0mg9nsrCyFEc6bRariaf7XCe7maXLKLs/Fw8GjgqIQQwrpY5cgygKOjI5s3b+bVV1/F2dmZFStWkJSUxMSJE9m/fz/h4eG1ai81NZVNmzZhZ2fHPffcU09RCyFE03Ep/xJ6Kh89rmyKhhBCtCRWO7IM4OTkxOuvv87rr79ebd2ZM2cyc+bMSu/7+/tTUlJiweiEEKJpq24RX0puClGeUQ0UjRBCWCerTpaFEELUn+qS5cq2lRMth0ajQavV1ku7tra2FBYW1kv7zZH0GdjY2DTKHtOSLAshRAtV3Y4XsiNGy5Wdnc21a9coKiqql/b1ej1+fn6cP3++TodFtCTSZ6UcHBzw9vbG3d29wd4pybIQQrRQ1Y0sy17LLVN2djYpKSm4urri7e2NnZ2dxZMznU5Hbm4urq6uqNVWu3zKqrT0PtPr9Wg0GrKyskhJKd3Fp6ESZkmWhRCihTJOhtu6tSUpJ6nS+6JluHbtGq6urgQFBdXbCKZOp6O4uBhHR8cWmfiZQ/qsdC2bm5sbFy5c4Nq1aw2WLLfM3hZCCGGy20UP3x6Ka5mG0fJoNBqKiorw8PBo0R/1C+ulUqnw8PCgqKgIjUbTIO+UZFkIIVqgEl0Jl/IuKcpifGMU11lFWeRp8hAtR9nCscZYRCVETZV9fzbUQkdJloUQogW6mn8VrV75g8Z4ZBlkKkZLJaPKwpo19PenJMtCCNECGR9z7WTrRBvnNriqXBXlcjCJEKKlk2RZCCFaIOMk2N/FH5VKRSt1K0W5jCwLIVo6SZaFEKIFMk6CA1wDACRZFkIII5IsCyFEC2S800WASyXJsuyIIQQA+fn5fPTRR4wYMQJ/f38cHBxwc3OjY8eOTJw4kZUrVzark/W2bNmCSqVi4sSJjR1Ko5N9loUQogUyHjH2d/UHZGRZiIrExsZy9913k5qaiqOjIzfeeCMBAQEUFRWRkJDAkiVLWLJkCR07duTYsWONHa6wMEmWhRCiBTJOggNdAwForW5dZT0hWpr9+/czbNgwioqKeP7555kxY4bJYRjnz59nzpw5fPHFF40UpeX16tWLEydO4OHh0dihNDqZhiGEEC2MTq+rcIEfmI4spxWmUVhS2FChCWFVdDodDz74IEVFRbzxxhv897//rfDUuODgYP73v/+xffv2Roiyfjg7OxMVFYW/v39jh9LoJFkWQogW5lrBNTQ65clXZSPLxskyyPZxouVas2YNJ06cICQkhOnTp1dbPyZGebDPtm3bmDp1KtHR0bRu3RonJyeioqJ46aWXyMzMNHl+8eLFqFQqZs6cWWH7t99+OzY2NiQmJirKjx49yoMPPkh4eDiOjo74+PjQrVs3nnnmGVJTlf/97tixgzvvvJO2bdvi4OCAn58fvXr14qWXXiI3N9dQr7I5y5mZmXz88cfccssthja8vLwYOXIk69evrzDuwYMHo1KpSExMZMWKFfTu3RsXFxc8PT257777uHDhQsUdaiVkGoYQQrQwxlMr7NR2eDl5oS3R4qBywMPeg6ziLMP91NxUwjzCGjpMYUV0Oj0Z+cUWbE9HTr4GjboItbp+x+1aO9ujVpt3iMUff/wBwN13342NjU2tn3/++ec5dOgQ0dHRDBs2jMLCQvbv38+7777L6tWriYuLw9XVtfqGqrBv3z769+9PYWEh0dHRjBkzhvz8fM6ePcvcuXO58847DaPDq1at4s4770Sv19OrVy/69u1LZmYmp0+f5t133+XJJ5+sNp64uDiefvppQkND6dChA3369CE5OZl169axbt065s+fz6RJkyp89rPPPmPOnDkMGDCAW2+9lV27dvHjjz+yb98+Dh06hJOTU536or5IsiyEEC1MRdvGqVVqtJSu5Pd38Vckyyl5ygNMRMuTkV9MzOwNjR2GWfbNuBkvVweznj106BAA3bt3N+v5//u//6Nv376Keb9FRUU8/fTTzJs3jzlz5vDaa6+Z1XaZjz76iMLCQt5//32ee+45xb2TJ08q3v3++++j0+lYtmwZd911l6Lunj178PLyqvZ9HTp0YOfOnfTu3VtRfuDAAYYOHcqzzz7LhAkTKky6P/30U7Zt20afPn2A0h1Ghg8fzo4dO/jhhx8qTbIbm0zDEEKIFsZ4O7iy+cqVXafmyjQM0TKlpaUB4O3tXeH9xx57jIkTJyr+XD9vedSoUSYL5BwcHPjwww+xtbXlt99+q3OMV69eBeDmm282uWc857iqujfeeCNubm7Vvi8sLMwkUYbSXyimTJlCdnY2mzdvrvDZZ5991pAoQ+m86GnTpgHw119/VfvuxiIjy0II0cIYJ79lB5KUMU6WjY/GFkKUWrJkicneyoMHD6Z///6G65SUFFatWsXJkyfJzs5Gp9MBYG9vz+nTp+scQ0xMDH/88QdTpkxh9uzZ9O/fH1vbitO7mJgYTpw4wUMPPcSrr75KTEyMWdNgtFotGzduZMeOHaSmplJUVARg+Hoq+7pGjBhhUhYZGQlgMrfamkiyLIQQLYzxtIqyA0kqu5YFfqKlKpuWcO3atQrvl5SUGP7/k08+yZdffqm4P2fOHF566SU0Go3xoxbz/PPPs337drZs2cKQIUNwdXWlT58+3HbbbUycOFExsv3WW29x5MgRVq1axapVq2jdujX9+/fnjjvu4MEHH8TR0bHa9124cIHbb7/dMEWlIjk5ORWWBwUFmZSVjWaXJdzWSJJlIYRoYWRkWdRWa2d79s0w/ejeXDqdjpzcXNxcXRtkgZ+5unbtSmxsLAcOHOCBBx6o1bNxcXE899xzeHh4MHfuXAYPHoyfnx8ODqXzpwMCAmo9mlo2Kn09d3d3Nm3aRGxsLKtWrWLLli1s2rSJ9evX8/bbb7Nt2zYiIiKA0i3u9u7dy6ZNm1i9ejVbt241JM7//e9/2blzZ7XzlidPnsyhQ4e46667eOGFF+jQoQNubm6o1WrmzZvHE088gV6vr/DZ+v67ri+SLAshRAui1+tNkl/jZNn4+mr+VYq1xdjbmJ90iKZNrVaZvUiuIjqdDjtdEe6uDladQI0aNYrPPvuMpUuX8u6779ZqR4zly5cD8Oabb/LII48o7hUUFHDp0iWTZ+ztS/8bu34Lt+ulpFT8i6tKpaJ///6G6R9XrlzhmWee4YcffuCVV17h559/NtS1tbVlxIgRhikRSUlJTJo0iU2bNvHuu+/y3//+t9KvKS8vj/Xr19OmTRt++uknk/44e/Zspc82Zdb7HSqEEMLirhVco0ir/LgzyFX50ajxNAw9ejnJT7RIt956KzfccAPJycm8/fbbtXo2IyMDqHjqwdKlSyscfS1bjHfq1CmTe6dOnarxfsS+vr6GvZqPHj1aZd22bdvy4osv1qhuVlYWOp0Of39/k0RZo9EYfkFobiRZFkKIFsR4VNlebY+Ps4+izM3eDQ8H5Qp+mYohWiK1Ws0333yDg4MDr776Ki+88AJZWVkm9dLS0oiPj1eUlS1cW7BggWLO8vHjxw3JqbEbb7wRZ2dn/vjjD/bt22cov3btGo8//niF0zC++OILzp07Z1K+Zs0aoHTqRZn//e9/FY5oV1S3Ir6+vnh4eHD06FFiY2MN5VqtlhdffLHCJL85kGRZCCFakPM55xXXZXssGzMebb6QY90nbAlRX2JiYtiwYQN+fn689957tGnThkGDBnHfffcxduxYbrzxRvz9/dmyZQtRUVH07NkTgEcffRQ/Pz9WrVpFhw4duOeeexg+fDjdunVjwIABtG3b1uRdrq6u/Oc//6GkpIT+/fszcuRIRo0aRWRkJFqtlhtvvNHkmS+++ILw8HA6derE+PHjuffee+nWrRvPPvssjo6Oin2cZ82aRWBgID169OCee+5hwoQJdOjQgblz5+Lp6cl//vOfKvvC1taWF154gZKSEgYNGsSIESO49957ad++PV988QVTpkypY29bJ0mWhRCiBbmQq0x6g9xMPyKuqNz4OSFakv79+5OQkMDcuXPp378/8fHx/PLLL2zYsIGcnBwmTJjA8uXLOXLkCJ07dwZKd9LYs2cP999/P8XFxaxcuZKUlBTeeOMNfvjhh0rfNXPmTN577z2CgoLYtGkTR48eZdKkSaxdu9Ywp/l6b7zxBpMmTUKlUrFx40ZWrVpFQUEBkydP5uDBg/Tr189Q9+OPP+bee+8lPz+fP/74gz///BNbW1umTZvG4cOHDQsBq/Lyyy+zZMkSoqOjiY2NZcOGDXTt2pW4uDjDLwrNjSzwE0KIFiQlRzmdItA1sMJ6xuUyDUO0dM7Ozjz99NM8/fTTNX4mKCiI7777rsJ7iYmJFZarVCr+85//mIzy6nQ6Vq9ejbu7u2JR5OjRoxk9enSN4nnooYd46KGHalR38ODBle5q8fDDD/Pwww+blEdHRzNx4kST8i1btlT6ntDQ0ErfYy1kZFkIIVoQ4xHiYLeK5yiajCzLNAwhRAslybIQQrQgxkmv8dzkysolWRZCtFSSLAshRAtRrC3mSv4VRVmgW8XTMIyT5RxNDllFprsACCFEcyfJshBCtBAXcy+iRzk3sLI5y36ufia7ZMgiPyFESyTJshBCtBDGyW4rh1a42btVWNdObWdy7LVMxRBCtESSLAshRAtR050wKrsvO2IIIVoiSZaFEKKFqOkey5Xdl5FlIURLJMmyEEK0EDXdCaOy+5IsCyFaIkmWhRCihTCeRlHZThiG+zINQwghJFkWQoiWQK/Xcz7nvKKs2pFlo2kYF3MvotVpLR6bEEJYM0mWhRCiBcguziZXk6soq+2c5RJ9CZfzL1s8NiGEsGaSLAshRAtgvLhPrVLj5+JX5TOtHVrjZOukKJOpGEKIlkaSZSGEaAGMF+f5u/hjp7ar8hmVSiU7YgghWjxJloUQogWo7U4YldUznvcsRHOnUqlQqVSNHYbC4MGDUalUJCYm1ts7QkNDre7rbiySLAshRAtQ250wDPVkRwwhmp3ExERUKhWDBw9u7FCaBNvGDkAIIUT9M3tk2XgaRq5MwxCisX399dfk5+cTGFizX3rNsXHjRjQaTb2135RIsiyEEC1AbU/vKxPsFqxsR+YsC9HoQkJC6v0d7dq1q/d3NBUyDUMIIZo5rU5Lam6qosx4ekVljOulF6aTr8m3WGxCNDfnz5/niSeeoG3btjg4OODr68u4cePYs2dPpc/8+uuv9O7dG2dnZ7y9vbn77rs5c+YMM2fORKVSsXjxYkX9yuYsJyUl8c9//pPIyEicnZ3x9PSkU6dOPPHEE8THxwMwc+ZMwsLCANi6dathTrZKpWLixImGtqqas3z+/HmefvppIiMjcXJywtPTk549ezJr1iyys7Nr32lWTkaWhRCimbucf5kSfYmirKYjywGuASZlKbkpRLSOsEhsoonQ6aAg3aLtqfJzwKYY1PU8bufkWf/v+NuRI0cYOnQo165do0OHDowbN47k5GSWL1/OqlWr+P7777n77rsVz8ydO5dnnnkGtVrNwIED8fPzY9euXfTq1YvRo0fX+N3nz5+nR48epKenExERwa233opWqyUpKYmvvvqKPn360KFDB7p168Zdd93FL7/8Qps2bRg5cqShjf79+1f7nm3btnHHHXeQmZlJaGgoo0ePpqCggJMnTzJz5kzGjBlDt27dahx3UyDJshBCNHPGUyecbZ1p7dC6Rs862Trh4+TD1YKrivYkWW5hCtLhPct9LK8GPCzWWjWeTwAX73p/jV6v54EHHuDatWu88MILvPPOO4aR2V9++YUJEyYwadIk+vfvj7+/PwBnz57lhRdewN7enj///JMhQ4YAUFJSwuOPP86iRYtq/P758+eTnp7O1KlT+fjjjxX3kpOTDfOP77zzTrp168Yvv/xCVFSUyah1VdLT07nrrrvIzMzkvffeY9q0aaiv+0Vk586dBASY/oLd1Mk0DCGEaOYq2gmjNltCyY4YQlRvy5YtHDlyhJCQEGbPnq34b+yuu+7izjvvJDc3l4ULFxrKFy5cSHFxMQ899JAhUQawtbVlzpw5uLq61vj9V6+W/kJ78803m9wLCQmxyBzk+fPnc/XqVUaOHMl//vMfRaIM0KdPH3x9fev8HmsjybIQQjRzxnsj13QnDEN92RFDiGpt27YNgAkTJmBnZ3rgz0MPPaSoBxAbGwtgMjUDoFWrVowYMaLG74+JiQHg5ZdfZvXq1RQWFtY8+BrasGEDAE888YTF27ZmVp0sFxQU8NprrxEZGYmjoyMBAQFMmjSJlBTzRjUSExN58sknCQsLw8HBAW9vb/r06cN7771n4ciFEMJ6GCe3NV3cV8Y4WZaDSYQwdfHiRaB0YVxFysqvz2FSU0sX3gYHB1f0SK12vZg4cSITJkzg+PHjjB49mtatWzNw4EDeeustLl26VON2qnL+fOl/+y1tpwyrnbNcWFjI0KFDiYuLw9/fnzFjxpCYmMiiRYtYvXo1cXFxhIeH17i9P/74g/Hjx1NQUECPHj3o3bs3aWlpHDlyhC+//JLnn3++Hr8aIYRoPOezlcltiHvttp0y3j5OkuUWyMmzdO6vheh0OnJycnBzczP5KN/inDzrt/0aqu/T8GxsbPjpp5946aWX+O2339i0aRO7du1i27ZtvPPOO/z555/07du3XmNorqw2WZ49ezZxcXH06dOHdevWGebtzJkzh+eee45JkyaxZcuWGrV18uRJxo0bh5ubG+vXr1d8s+h0Ovbv318fX4IQQliF5JxkxXWIW+2SZeP6F3IuoNVpsVHb1Dk20USo1ZZdJKfTodfag4t7g+1UUd/KFrYlJSVVeL9sm7frDxLx9/cnPj6e8+fP07FjR5NnykZya6N79+50796dmTNnkp2dzcyZM/nf//7HM888w+7du2vd3vWCg4M5efIkCQkJdOnSpU5tNSVW+R1aXFzMJ598AsCnn36qmOA+bdo0oqOj2bp1K/v27atRe9OmTaOwsJDFixeb/FalVqvp2bOn5YIXQggrklWURXaxct/T2ibLxiPLGp2Gy/mX6xybEM3JgAEDAFi6dClardbk/rfffquoB9CvXz+gdLcMY1lZWaxbt65OMbm7u/P222+jUqk4evSoodze3h4o3XWjNsoWD86bN69OcTU1Vpksx8bGkpWVRbt27ejevbvJ/fHjxwOwatWqats6f/48a9euJTw8nFtvvdXisQohhDVLzlaOKtuqbPF39a9VG56OnrjYuSjbNRqtFqKlGzx4MF26dCExMZHXXnsNvV5vuLd8+XJ+/fVXXF1dmTRpkqH80Ucfxd7enq+//pq//vrLUK7VannuuefIycmp8fu/+eYbRUJc5o8//kCv1yvmRXt7e2NnZ0dCQkKFiX1lJk+ejLe3N3/88Qcffvih4msEiIuL48qVKzVur6mwymkYhw4dAqBHjx4V3i8rP3z4cLVtbdmyBZ1OR9++fSkpKeHXX38lNjYWrVZL586dueeee2jdumb7jQohRFNjnNQGuAZgq67dP/0qlYoQtxBOpJ8obzc7md7+vS0SoxBNQe/elX+/T548mcmTJ/Pdd98xZMgQ3nrrLZYvX063bt1ITk4mNjYWW1tbFixYYNhjGUoXyv33v//lmWeeYciQIQwaNIg2bdqwe/du0tPTefDBB/n2228NI8FV+eWXX3j44Ydp164dXbp0wcnJiXPnzrFr1y7UajWzZ8821LW3t2fkyJGsWrWKrl270qNHD+zt7enXrx+PPvpope/w9PRk6dKl3HHHHTz77LN89NFH3HjjjRQUFHDixAnOnDnDgQMHmt32cVaZLCcnl/7jHhRU8fZGZeWVzQu63vHjxwFwdXVlwIABxMXFKe6/8sorLFu2TLG/YUWKioooKioyXJcd56jRaAwbfYuqlfWT9FftSL/VnvRZucTMRMV1kGtQpf1SVb8FuQYpkuXEzETpX5rf95pGo0Gv16PT6dDpdPX2nrIRybJ3NQW7du2q9N4tt9yCTqejU6dO7N27lzfffJO1a9eybNkyPDw8GDNmDC+99BK9evUy+XqfeuopAgICeP/994mLi8PR0ZHBgwfz1ltv8f777wPQunVrRZ+Vuf7v6ZlnniEwMJAdO3awbds28vLyCAgIYMKECUybNo2ePXsq3j1v3jyef/55NmzYwPfff49Wq0Wj0fDII48o4jOOd+DAgRw4cID33nuPtWvXsmLFClxdXQkLC2PWrFmEhYXV+9+pTqdDr9ej0Wiwsal67YQl/ttU6Y3H0K3A448/zldffcUrr7yi+E2ozJkzZ4iIiCAiIoJTp05V2daTTz7Jl19+ia2tLa6urnz22WeMHDmSq1ev8sYbb/Dtt9/i4eHBsWPHFJPujc2cOZNZs2aZlH///fc4OzvX/osUQogGsCxvGQc1Bw3Xve17c7vz7bVuZ13BOv4qKv+Y+AbbG3jA9QFLhCisiK2tLX5+fgQHB9doNFPUH61WS//+/YmPj+fEiRO0adOmsUOyGsXFxZw/f55Lly5VO+86Pz+f+++/n6ysLNzd3c16n1WOLFtS2W83JSUlfPnll0yYMAEo/S3tm2++IT4+nj179vDZZ5/x5ptvVtrO9OnTmTZtmuE6Ozub4OBghgwZgpeXV/1+Ec2ERqNh/fr1DB8+vMIN20XFpN9qT/qs3M/rfoZr5df9O/fn1qiK129U1W/FCcX8tas8WS52KZZ1IDS/77XCwkLOnz+Pq6srjo6O9fYevV5v2DquvrdUs3YJCQl4eXnRqlUrQ1lRURGvvPIKJ0+eZNiwYUREREifXaewsBAnJycGDhxY7fdpWlpand9nlcly2e4X+fn5Fd7Py8sDwM3NrcZtubq6VnhCzqOPPsqePXvYunVrle04ODjg4OBgUm5nZ9cs/oFsSNJn5pF+qz3pM9MDScJah1XbJxX1W1irMJN2bWxtUKuscp14g2su32tarRaVSoVara7X/Y/LBrLK3tWS/fLLL/zf//0fMTExBAcHk52dzaFDh0hNTcXb25tPP/0UtVotfXYdtVqNSqWq0X93lvjv0ip7u+zEmgsXKj5Stay8bdu21bZVVickJKTC38TKTtRpjqs3hRAtW05xDumF6Yoy423gasr4IJMibRFX8uXfTSHqatiwYYwbN47U1FR+//13Nm/ejJOTE//85z/Zv38/HTp0aOwQWzyrHFnu2rUrQKWHhZSVR0dHV9tW2dZzGRkZFd5PTy/9QXL9Xs5CCNEcGJ+0p1apa33UdRkfJx8cbRwp1BYq2vdz8atTjEK0dDfeeCM//PBDY4chqmCVI8v9+vXDw8ODhIQEDh48aHJ/2bJlAIwePbratvr27YuXlxeXLl0iPj7e5H7Z9IuK9nMWQoimzHjbOH8Xf+xtzFu0pVKpCHZXjkob7+EshBDNkVUmy/b29kydOhWAKVOmGOYoQ+lx14cPH2bQoEHExMQYyj/55BOioqKYPn26oi1bW1umTZuGXq9nypQphi3fADZs2MDixYtRqVQ88cQT9fxVCSFEwzqfrRxZru3JfcbauimnvsnBJEKIlsAqp2EAzJgxgw0bNrBjxw4iIiIYMGAASUlJ7Nq1Cx8fHxYuXKiof+3aNeLj40lNTTVp6/nnn2fz5s1s2LCByMhIevfuzbVr14iLi0Or1fLmm2/Sq1evhvrShBCiQRgns8bzjmvLeGTZeJqHEEI0R1Y5sgzg6OjI5s2befXVV3F2dmbFihUkJSUxceJE9u/fT3h4eI3bsrOzY82aNbz77rt4e3uzdu1ajhw5wqBBg1i1ahUvv/xyPX4lQgjROIynSZi7uK+M8ci0TMMQQrQEVjuyDODk5MTrr7/O66+/Xm3dmTNnMnPmzErv29nZ8cILL/DCCy9YMEIhhLBexiO/dZ2GYZIs5ySj1+tb/J6vQojmzWpHloUQQpgvX5PP1YKrirK6TsMwfr6gpIC0wrpv+C+EENZMkmUhhGiGjEeVVagIcguqU5u+zr7Yq5W7achUDCFEcyfJshBCNEPGi/vauLTBwcb0FNLaUKvUJvOeZUcMIURzJ8myEEI0Q8YjvnWdr1xG9loWQrQ0kiwLIUQzZDwNo647YZQxTrpl+zghRHMnybIQQjRDlt5j2dBOBTtiCNGcqVQqq9vxpexAtap2AROWI8myEEI0Qw01DeN89nn0er1F2hZCCGskybIQQjQzhSWFXM6/rCirr2kYOZocMooyLNK2EEJYI0mWhRCimbmQc8GkzFLJsp+LH7Zq5XlWsshPCNGcSbIshBDNjPE8Yh8nH5ztnC3Stq3aliBX5X7NMm9ZiFKZmZl8/PHH3HLLLbRt2xYHBwe8vLwYOXIk69evr/CZwYMHo1KpSExM5NtvvyUmJgZnZ2d8fX155JFHSElJqfH7U1NT+e9//8ugQYMIDAzE3t4ePz8/xo0bx549eyp9Li8vj3fffZeePXvi7u6Oi4sLUVFRTJkyhVOnTpnU37VrF3fffTf+/v7Y29sTFBTE5MmTSU5unv8WWPVx10IIIWovKTtJcW2pUeXr20vMTqz0faL50el1ZBZlWq49nY6cohxKCktQq+t33K6VQyvUqoYZG4yLi+Ppp58mNDSUDh060KdPH5KTk1m3bh3r1q1j/vz5TJo0qcJn33//fT777DMGDBjAmDFjiIuL4+uvv2bTpk3s3LmTgICAat//22+/8eKLL9KhQweio6Nxd3fn9OnTLF++nNWrV7N69WpGjBiheCY1NZXhw4dz7NgxWrduzeDBg3FwcODs2bN88cUXREREEBkZaaj/2Wef8dRTTwFw4403MmDAAOLj41mwYAErV65k69at3HDDDXXoResjybIQQjQz1yeyAGEeYRZtP9QjlG0p2wzXkiw3f5lFmQz6aVBjh2GWrfdsxdPRs0He1aFDB3bu3Env3r0V5QcOHGDo0KE8++yzTJgwAVdXV5Nnv/zyS1avXs2tt94KgEaj4dFHH+W7775j6tSp/Prrr9W+v1+/fhw9epROnTopyteuXcsdd9zBv/71L06fPq3Y3eOhhx7i2LFjTJgwgQULFihiS0xMJDs723Bd9suAv78/v/32GzExMYZ7CxYsYPLkyTz66KPExcVVG2tTItMwhBCimUnMSlRct3Vva9H2Q91Dq3yfEC1VWFiYSaIM0L17d6ZMmUJ2djabN2+u8NkJEyYYEmUAOzs75s6di7OzMytXruT8+er3NO/SpYtJogxwyy23cPfdd5OQkMDRo0cN5bt372bjxo34+voyf/58kyQ+NDSU6Ohow/U777yDVqvliy++UCTKAI899hh33HEHu3bt4sCBA9XG2pTIyLIQQjQzxiPLxsltXRm3l5SdhE6va7CPuoWwZlqtlo0bN7Jjxw5SU1MpKioC4PTp04r/NXbvvfealHl5eTFixAhWrFjB9u3bue2226p9f1FREX/++Se7d+/m6tWrFBcXA3DkyBHD+7t06QLAhg0bALjvvvtwc3Orsl2dTsfGjRtxdnbmlltuqbDOgAEDWLlyJbt376Z79+7VxtpUSLIshBDNSHZxNumF6Yqyth6WHVk2Hqku1BZyJf8Kfi5+Fn2PEE3NhQsXuP322zl06FCldXJyciosb9u24v9OQ0NDgdK5xdU5cuQId9xxB4mJiTV6f9lodbt27apt+9q1a+Tm5gJgb29fbd3mRJJlIYRoRpKylPOHbVQ2BLtadoGfr7MvzrbO5JfkG8rOZZ2TZLkZa+XQiq33bLVYezqdjpycHNzc3BpkgV9DmTx5MocOHeKuu+7ihRdeoEOHDoavcd68eTzxxBP1doiPXq9nwoQJJCYm8uSTT/Lkk08SHh6Oq6srKpWKl19+mbffftvs9+t0OgBcXV256667qqxb0VSQpkySZSGEaEaMp2AEuQVhZ2Nn0XeoVCraurflRPoJQ1lSdhJ9AvpY9D3CeqhVaosuktPpdNgW2+Lu6F7vyXJDycvLY/369bRp04affvoJGxsbxf2zZ89W+XxSUpJifvD15QD+/v5VPn/y5ElOnjxJz549+fzzz03uV/T+4ODSX6QTEhKqbBvA29sbR0dH1Go1ixYtsrojwOtT8/gOFUIIAZgmy5Ze3FfGZJGf0XuFaGmysrLQ6XT4+/ubJMoajYbly5dX+fzPP/9sUpaens66detQqVT069evyuczMkpP0gwKCqrwXkX7PN98880A/PDDD4YpFpWxtbVl8ODBZGdns3HjxirrNjeSLAshRDNivDOFpRf3Gdr1ULYrybJo6Xx9ffHw8ODo0aPExsYayrVaLS+++GKFh3tc76effmLt2rWG65KSEp599lny8vK4/fbbCQkJqeJpaN++PWq1mk2bNikWERYWFvLkk0+Snp5u8kyvXr0YMmQIV65c4fHHHycvL09xPzEx0bAwEOCVV15BrVbz6KOPsmXLFpP2cnNzWbhwIQUFBVXG2tTINAwhhGhGjPc8rq+RZeN2Zfs40dxVtCVcmcmTJzN58mReeOEFXnnlFQYNGsTQoUPx9PRk165dXL58mSlTpvDpp59W2sbjjz/OqFGjGDhwIP7+/uzatYtz584REBDAJ598Um18vr6+PPbYY3z11Vd07dqVoUOH4uTkxLZt29BqtUycOJHFixebPPfNN98wbNgwfvjhB9auXUv//v1xcHAgISGBgwcP8sEHHxh2z+jfvz+ffvopU6dOZciQIXTu3JnIyEjs7OxITEzk4MGDFBUVMW7cOJycnKrv1CZCkmUhhGgmdHqdSbJs6QNJyhiPLF/MvUixthh7m6pXyQvRVO3atavSeyNHjgTg5ZdfJigoiA8//JDY2FicnJzo378/r7/+Ovv376+y/f/85z/07NmTuXPnsmvXLlxcXHjooYd46623CAoKMiywq8rnn39OVFQUCxYsYOPGjXh4eHDzzTfz5ptvsmjRogqfCQwMZM+ePXz44YcsW7aM9evXY2NjQ1BQEP/617+4/fbbFfWffPJJevfuzYcffsiWLVtYvXo1zs7OBAYG8sADDzBu3Dg8PDyqjbUpUenra1lmM5ednY2HhwfXrl3Dy8urscNpEjQaDWvWrOHWW2/Fzs6yC46aM+m32mupfXYp7xLDlw1XlG28eyO+zr41er42/ZZbnEufH5QL+pbfsZz2rdvXLugmrrl9rxUWFnLu3DnCwsJwdHSst/fodDqys7Nxd28+C/zMNXjwYLZu3cq5c+cM28RVRPqsXG2+T9PS0vD29iYrKwt3d3ez3teye1sIIZqRc1nnFNfOts74OPnUy7tc7V1N2pZjr4UQzZEky0II0UyYnNznEVqv2zsZT8U4l32u4opCCNGESbIshBDNREMt7qusfVnkJ4RojmSBnxBCNBPGyWqYe/0s7itjvC2dTMMQovYq2oJNWBcZWRZCiGaioQ4kKSMHkwghWgJJloUQohko0hZxMfeiosx4TrGlGbefWZRJZmFmvb5TCCEamiTLQgjRDCRnJ6NHuRNofY8sB7gGYKtSzuaT0eXmQXaVFdasob8/JVkWQohmwHi+sK+TLy52LvX6Tju1HUFuQVXGIZoWGxsboHT/aCGsVdn3Z9n3a32TZFkIIZoBk/nKHvU7qlxG5i03L3Z2djg4OJCVlSWjy8Iq6fV6srKycHBwaLCDgGQ3DCGEaAaMd8IwTmLrS6hHKFwov5aR5abP29ublJQULly4gIeHB3Z2dhbfr1un01FcXExhYWGLP42uplp6n+n1ejQaDVlZWeTm5hIYGNhg75ZkWQghmgGTA0kaKlk2eo/xKYKi6Sk7EvjatWukpKTUyzv0ej0FBQU4OTnV68E5zYn0WSkHBwcCAwPNPrraHJIsCyFEM2A8olvfO2GUMV5EmJydjE6vQ61qeSNfzYm7uzvu7u5oNBq0Wq3F29doNPz1118MHDiwwT5Kb+qkz0rnKDfG1y7JshBCNHGZhZlkFmUqyhp0GsZ1inXFXMy9aLLwTzRNdnZ29ZKc2NjYUFJSgqOjY4tN/GpL+qzxyK/+QgjRxCVkJSiubdW2BLgGNMi7vRy9cLNzU5SdzTrbIO8WQoiGIMmyEEI0ccbJaah7KLbqhvngUKVSEd4qXFEm85aFEM2JJMtCCNHEnc1UJsvhHuGV1Kwfxu+TkWUhRHMiybIQQjRxxiO5xiO99c0kWc6UZFkI0XxIsiyEEE2c8ZzlBh9ZNkrOE7IS5EALIUSzIcmyEEI0YfmafC7lXVKUNfY0jJziHNIK0xo0BiGEqC+SLAshRBNmPAVDrVI32B7LZQJcA3C0cVSUyVQMIURzIcmyEEI0YcaL6QJdA3GwcWjQGCpK0GWRnxCiuZBkWQghmrCETOV85XYe7RolDuOpGMZxCSFEUyXJshBCNGHGI7hhrcIaJQ7jZFn2WhZCNBeSLAshRBNmsm1cAy/uM7y3ley1LIRoniRZFkKIJqpYW0xyTrKirNGSZaP3Xi24SnZxdqPEIoQQliTJshBCNFFJ2Uno9DpFWWMlyyFuIdiqlEdsy44YQojmQJJlIYRoooynOvg6++Jq79oosdjZ2BHsHqwok3nLQojmwKqT5YKCAl577TUiIyNxdHQkICCASZMmkZKSUqt2QkNDUalUlf45efJkPX0FQghRf4yT5cYaVa7s/TJvWQjRHNhWX6VxFBYWMnToUOLi4vD392fMmDEkJiayaNEiVq9eTVxcHOHhtfvB8Mgjj1RY7uHhYYmQhRCiQRlPc2jXqnG2jSsT7hHORjYarmX7OCFEc2C1yfLs2bOJi4ujT58+rFu3DlfX0o8W58yZw3PPPcekSZPYsmVLrdpcvHix5QMVQohGYnUjy7IjhhCiGbLKaRjFxcV88sknAHz66aeGRBlg2rRpREdHs3XrVvbt29dYIQohRKPS6rQkZiUqysI8GmeP5TLGyfrF3IsUlhQ2UjRCCGEZVpksx8bGkpWVRbt27ejevbvJ/fHjxwOwatWqhg5NCCGswsXcixTrihVljT2yHOoeqrjWoycxO7FRYhFCCEuxymkYhw4dAqBHjx4V3i8rP3z4cK3afe+990hISMDBwYFOnToxduxYfHx86hasEEI0goQs5XzgVg6t8HT0bKRoSjnbORPoGkhKbvki7ITMBKI8oxoxKiGEqBurTJaTk0s32Q8KCqrwfll5UlJSrdp94YUXFNfPPvssH3/8MZMmTar22aKiIoqKigzX2dmlm+1rNBo0Gk2t4mipyvpJ+qt2pN9qryX02en004rrUPdQSkpK6tSmJfot1C1UkSyfST+DJrj5/j20hO+1+iD9VnvSZ+axRH9ZZbKcm5sLgLOzc4X3XVxcAMjJyalRe3fccQdDhgwhJiYGHx8fzp49y8KFC5k7dy6TJ0/Gy8uLMWPGVNnG22+/zaxZs0zKN2/eXGmcomLr169v7BCaJOm32mvOfbYtf5vi2jbLljVr1lik7Tr1W4HycuepnYSmhNYpnqagOX+v1Sfpt9qTPqud/Pz8OrdhlcmypX300UeK606dOvHBBx8QFRXF448/zosvvlhtsjx9+nSmTZtmuM7OziY4OJghQ4bg5eVVL3E3NxqNhvXr1zN8+HDs7OwaO5wmQ/qt9lpCn/249kdIK78e2Hkgt0bdWqc2LdFvRWeKiN0da7jOd8rn1lvrFpc1awnfa/VB+q32pM/Mk5aWVn2lalhlsly2+0Vlvw3k5eUB4ObmVqf3PPbYY8yYMYP4+HgSExMJDQ2ttK6DgwMODg4m5XZ2dvJNW0vSZ+aRfqu95tpnOr3OZFu2SM9Ii32tdem3SK9IxfX5nPPo1XrsbewtEZrVaq7fa/VN+q32pM9qxxJ9ZdZuGEuWLKGwsP62AwoJCQHgwoULFd4vK2/btm2d3qNWq2nXrnQT/9TU1Dq1JYQQDeVi7kXyS5SDCRGtIxopGqX2rdorrkv0JXLstRCiSTMrWX700UcJCAjgqaeeMuxcYUldu3YFYP/+/RXeLyuPjo6u87syMjKA8nnQQghh7c5knlFcu9u74+NkHTv7uNq74u/irygzjlcIIZoSs5LlyZMnU1JSwqeffkqPHj3o3bs3CxYsMEyPqKt+/frh4eFBQkICBw8eNLm/bNkyAEaPHl2n9xw7doz4+HicnZ2JipKtjYQQTYNx8tm+VXtUKlUjRWPKeHRZkmUhRFNmVrI8b948UlNTmTdvHjfeeCO7d+/m8ccfJyAggCeffJK9e/fWKSh7e3umTp0KwJQpUxRJ+Jw5czh8+DCDBg0iJibGUP7JJ58QFRXF9OnTFW2tWbOGTZs2mbzj8OHD3H333ej1eiZPnoy9ffOeTyeEaD5OZyi3jbOWKRhljOM5kyHJshCi6TL7BD8XFxcmT55MXFwchw8fZsqUKdja2jJv3jxuuukmunfvzhdffGHYj7i2ZsyYwU033cSOHTuIiIjgnnvuoXfv3jz33HP4+PiwcOFCRf1r164RHx9vMvd49+7dDBs2jNDQUMaMGcN9993HTTfdRExMDCdOnGDw4MG888475naDEEI0OOOR2ohW1pUsG48sn848XUlNIYSwfhY57rpz58589NFHXLx4kW+//ZaBAwdy6NAhpkyZQkBAAI899hj79u2rVZuOjo5s3ryZV199FWdnZ1asWEFSUhITJ05k//79hIfX7FjXW265hUmTJuHu7k5sbCzLli3jzJkz9O/fn6+++ooNGzbg5ORkzpcthBANTqPTmCyYa9+6fSW1G4fxyHJKbgp5GstM0xNCiIZm0a3jNBoNOTk5hsNC9Ho9Go2GRYsWsXjxYsaOHcv8+fNp1apVjdpzcnLi9ddf5/XXX6+27syZM5k5c6ZJeZ8+fejTp09tvgwhhLBaydnJaHTKE6mMR3IbW5hHGGqVGp1eZyhLyEwg2qfui7KFEKKhWWRkOS4ujsceewx/f3/+9a9/cfjwYcaNG8e6devIzs7mu+++o0uXLixfvpynn37aEq8UQogWyXhKg6+TLx4OHo0UTcUcbBwIcQtRlMkiPyFEU2X2yHJGRgbffPMNX331FcePH0ev1xMcHMyLL77I5MmT8fPzM9S97777uPvuu+nevbvFjmMVQoiWyHixnLVNwSgT0TqCxOxEw7XxokQhhGgqzEqWH3zwQX799VeKiopQqVSMGjWKJ598kltvvRW1uuLBaltbW2688UaWLFlSp4CFEKIls/bFfWUiWkWwPmm94VoW+QkhmiqzkuXvv/8ePz8/Jk2axOOPP244ca86Y8eOrfOpe0II0ZKZ7LFspSPLxnHJ9nFCiKbKrGR56dKljBkzBlvb2j0+evToOh8kIoQQLVVhSSHJ2cmKMmsdWTZedJhWmEZ6YTqejp6NFJEQQpjHrAV+eXl57N69u9p6cXFxfP311+a8QgghhJGErAT06A3XKlSEeYQ1YkSVC3YLxl6tPOwpITOhkaIRQgjzmZUsT5w4kfnz51dbb8GCBTz66KPmvEIIIYQR46kMQW5BONs5N1I0VbNV2xLeSrkf/qmMU40UjRBCmM8iW8dVRqfToVKp6vMVQgjRYjSVxX1ljOOT7eOEEE1RvSbLZ8+exd3dvT5fIYQQLYbxjhLWurivjCzyE0I0BzVeoWd8it7BgwcrPVmvpKSE+Ph4/vrrL4YPH163CIUQQgCmyaa1jywbL/I7k3kGvV4vnzgKIZqUGifLM2fORKVSGf6hO3jwIAcPHqzyGV9fX9566626xiiEEC1ednE2l/MvK8qs7ZhrY8bJfK4ml8v5l/Fz8avkCSGEsD41TpYXLVoEgF6vZ9KkSfTv35/HHnuswrr29vYEBATQu3dvHBwcLBOpEEK0YMajyrZqW9q6W/e+9X4ufrjYuZCnyTOUnco4JcmyEKJJqXGy/Mgjjxj+/5IlSxg1apSiTAghRP0xXhwX6h6KnY1dI0VTMyqVivat2nPo6iFD2ZnMMwwMGtiIUQkhRO2YdSjJ5s2bLR2HEEKIKhhvu2bt85XLRLSOUCTLsn2cEKKpqdfdMIQQQlhGfHq84jrSM7KRIqmdyNbKOI2/DiGEsHY1GlkeOnQoKpWKJUuWEBQUxNChQ2v8ApVKxcaNG80OUAghWjqdXmcyItuhdYdGiqZ2jONMzEqkWFuMvY19JU8IIYR1qVGyvGXLFlQqFfn5+YbrmpItgoQQom5SclLIL8lXlHXwbBrJsvHIcom+hITMBG7wuqGRIhJCiNqpUbJ87tw5AAIDAxXXQggh6t/JjJOKa09HT3ycfBopmtpxtXclyDWIC7kXDGUn009KsiyEaDJqlCy3bdu2ymshhBD1x2S+cutIy39qpymAglxsdEWWbZfSUfDrk2VZ5CeEaErM2g1DCCFEw4nPUCbLFpmvnHoYTq6GpB1w6QgUZmIH3A7oTz4HbTpD2z4QdRsE9IA6JOcdWndgY3L52hXjr0cIIayZWbthXL58mb/++ovLl5WnSSUkJHDvvffSuXNnbr31VuLi4iwSpBBCtGTGI8tmz1fW6+Hk7/DlIPhyAGx9FxK3QWGmopqqKBuSd8C2D+CrofBFfzj6S+nzZjCO92T6SfRmtiWEEA3NrGT5nXfeYciQIWRlZRnKsrOz6d+/P0uXLuX48eP8+eefDBs2jNOnT1ssWCGEaGmyirJIzUtVlJmVLKfsh8W3wY/3Q+rB2j17+SgsmwTzb4bk2g+CGMebU5zDpbxLtW5HCCEag1nJ8pYtW+jYsSORkeWrnBcvXszly5e57777iI+PZ86cORQUFPDBBx9YLFghhGhpjOf32qntCPMIq3kDOi1sfQ/mD4Ok2LoFk7IXFt4CG2aBtqTGjwW4BOBm56Yok6kYQoimwqxkOSUlhfDwcEXZ77//jq2tLR9++CERERE888wzdO3ala1bt1okUCGEaImMp2C0b9UeO3UNj7nOvQrf3gWbZ4NeZ3rfPQj6TIV7f4Cpe9E8fYQNN7xLyYTvod+/oVUli7m3z4Elt0P2xRqFoVKpTA5ROZl+spLaQghhXcxKlnNycnB2djZca7Vadu7cSUxMDN7e3obyqKgoLly4UFETQgghasB4BNZ43+JKZSTBguFwdrPpPY8QuGsBPHMYbnkTom4F7whw8yfP0R99xAgY/jo8fRAmfAOe4aZtJO8snZZxrWZT7YwXJcqOGEKIpsKsZDkgIICTJ8tHBbZv305ubi6DBw9W1CspKcHeXk5pEkIIc5m1uO9qPCwcCRlGe+Kr1DB4OkzdA13Gg9qm6nbUauh4B/xrV2nybDyinZ1S+p7Uw9WGZBy3HHsthGgqzEqW+/Tpw+HDh/nwww85cuQIM2bMQKVSMXr0aEW9EydOGA4yEUIIUTsanYaEzARFWZRnVNUPXTkJi0ZBjtEUCVc/eGQ1DH4J7BxrF4itfem0jElrS0elr5d/DRbfDhcPVtmEcbJ8Puc8eZq82sUhhBCNwKxkefr06Tg4OPDcc8/RrVs3YmNjGTx4MH379jXUSUxM5Pjx49x0000WC1YIIVqSxKxEinXFirIqp2Fkp8J34yE/TVnu2wme+AtC+9UtoKAYeGIrBMYoy4uy4Lu7ISOx0kfbt2qPjap8JFuPntMZsluSEML6mZUsd+rUie3bt/Pggw8ycuRIZsyYwYoVKxR11q5dS9euXbnzzjstEKYQQrQ8xvOV/Vz88HDwqLhyYXZpopx1Xlke2BMmrga3NpYJytkTHv4NQgcoy/OuwLfjIT+9wsccbBwIdQ9VlMlUDCFEU2D2CX49evRgyZIlld5/4okneOKJJ8xtXgghWrxT6cpFcFGtK5mCodPC0oml+yFfL7AnPLwCHNwqesp8Dm7wwNLS0eTEbeXlaadL93F+ZBXYmO7Y0cGzAwlZ5dNKZPs4IURTYNbIshBCiPpnvL2a8fZrBn+9BwkblWWe4XD/T5ZPlMvYOcE934JvR2V58k7YMLPCR2SRnxCiKTJ7ZLlMcnIyqampFBUVVVpn4MCBdX2NEEK0OMYjr8bbrwFwZiNseUdZ5uwND/4CLt6m9S3JqRU8sKx0i7rslPLynZ9A8E2lO2lcxzj+05mn0eq02FS3K4cQQjQis5PlhQsX8sYbb5CcnFxtXa1Wa+5rhBCiRbpWcI30QuX8X5OdMLJS4JfJgL68TGUD91SyN3J98AiEe7+DBSNAe91ixN+mQJtO4NXOUGQ8slxQUsD5nPOEeoQ2TKxCCGEGs5LlRYsWMXnyZAA6d+5MZGQkbm719FGfEEK0QMZTMJxtnQlyCyov0Oth5VQoMFpQd/NMaNuXBhXQHUa9C6ufLS8ryoYV/4RH/zDs5+zt5I2XoxdpheW7dZxMPynJshDCqpmVLM+ZMwdbW1uWLVvGHXfcUf0DQgghauV42nHFdWTrSNSq65aZ7FsECZuUD0XdDn2faoDoKhDzKCTHweGfysvO74Kdn0K/pw1FUV5RxKbEGq6Ppx9nZNjIhoxUCCFqxawFfqdPn2bgwIGSKAshRD0xTpY7el23kC79HKydoXzAzR/GfAIqVQNEVwGVCm6bA61DleWbZpcelPK3jp7KBYEn0k40QHBCCGE+s5JlT09PvL3reeGIEEK0YMZJpCFZ1uth5VNgfPrdHZ+AU+sGiq4SDq5w5+fAdQm7tghWPFm6vR1GST+lvxTo9XqEEMJamZUsjxkzhtjYWDQajaXjEUKIFi+zMJOLecrjqg1J5qEflXsbA/R4BCJubqDoqtG2L/SZoiy7eAD2LgRMk+Xs4mxSclMQQghrZVay/NZbb+Hi4sKjjz5KRkaGpWMSQogW7Xi6cgqGo40jYR5hUJAB64ymX3iEwC1vNmB0NTB0BnhFKMs2vgE5l/F38Tc5hfBEukzFEEJYL7MW+D333HN07NiRH374gd9//52YmBiCgoJQq01zb5VKxYIFC+ocqBBCtBQmi/s8I7FV28LG1yH/mrLyre/V38Ej5rJzgtv/B0tuLy8ryoJ1M1Dd9RUdPTuyM3Wn4dbxtOMMbzu8EQIVQojqmZUsL1682PD/s7Ky2LRpU6V1JVkWQojaMU6Wb/C8AVL2wd5FyopRt0MHK91JImwARN8Lh38sLzvyM/R4iBu8blAky7LITwhhzcxKljdv3mzpOIQQQvzNOHns5NkR/nwZxeEjds4w8u2GDay2RrwB8X+UjiqXWfsyHW95RVGtbJGfqrF28hBCiCqYlSwPGjTI0nEIIYQAsoqyuJB7QVF2Q/YVOB+nrDjweWgV0oCRmcHVF4a9Cmv+U1526QgdLycoqmUUZXA5/zJ+Ln4NHKAQQlTPrAV+Qggh6ofxYjd7tT3tYj9TVmrV1nTHCWsV8yj4KI/pDtr+MW52ynnWx9KONWRUQghRY3VKltPS0pg7dy4PPPAAt9xyC//9738N944dO8bKlSvJz8+vc5BCCNFSGE/BiLTzwC4jSVnp5plg69BwQdWFjS2MUO7Wocq9REcbZ0WZ8TxtIYSwFmZNwwBYunQpkydPJjc31zDXLDAw0HA/JSWFsWPHsmTJEh588EGLBCuEEM2dyeK+dKM9iIN6QaexDRiRBUTcDO2GKo7nvuFqIrvcnAzXsshPCGGtzBpZ3rlzJ/fffz+2trZ88MEH7N692+QEpmHDhuHh4cGvv/5qkUCFEKIlMDnmuiBHWeGWtxrvSOu6GDEbVOU/cjoWKE8glJP8hBDWyqyR5bfeegu1Ws369evp0aNHhXVsbGzo0aMHR48erVOAQgjRUuQU55Cck6wou6GouPwi6nYIvrGBo7KQNp2g631w8DvA6OsC0grTuFpwFV9n38aITgghKmXWyPKOHTvo06dPpYlyGT8/P1JTU80KTAghWpqT6ScV17Z6PRHFmr+vVDDk5YYPypIGvQhqOwBCSkpw0ekUt2XeshDCGpmVLOfn5+Pj41NtvboehV1QUMBrr71GZGQkjo6OBAQEMGnSJFJSUqp/uAqnT5/GyckJlUrFzTffXKe2hBDCUoyTxYhiDfZlF53Glo7ONmWt20LMI0DpDx/j0WVJloUQ1sisZDkwMJBjx6re5kev13P06FHCwsLMCqywsJChQ4fyxhtvkJuby5gxYwgODmbRokV0796ds2fPmtUuwOOPP05RUZHZzwshRH0wma9c/HcyqVLD4OmNEFE9GPAfsHUE4IZiZbIsi/yEENbIrGR55MiRxMfH8+OPP1ZaZ/78+Zw/f57bbrvNrMBmz55NXFwcffr04dSpU/z000/s2rWLDz74gKtXrzJp0iSz2l2wYAFbtmzhH//4h1nPCyFEfTFJlstGXrtMAJ/IRoioHrj7Q6/Sf387ysiyEKIJMCtZfumll/Dw8ODhhx/mxRdfJC6u9GSpvLw8Dhw4wGuvvcZTTz2Fj48Pzz77bK3bLy4u5pNPPgHg008/xdXV1XBv2rRpREdHs3XrVvbt21erdi9fvszzzz/P8OHDue+++2odlxBC1Jfc4lySspX7Kd9QVAwqGxj0QiNFVU/6Pg22juUj53+7UnCFK/lXGikoIYSomFnJclBQEL///jve3t6899579OvXD5VKxbJly+jZsyezZ8+mVatWrFy5El/f2q9sjo2NJSsri3bt2tG9e3eT++PHjwdg1apVtWr33//+NwUFBXz22WfVVxZCiAZ0LO0Yesq3TrPV64nUFEO3+8CrXSNGVg9cfaHHI7TVmC7yO3pNdlASQlgXs0/w69OnD/Hx8cyZM4eRI0cSFRVFZGQkQ4cO5Z133iE+Pp6bbrrJrLYPHToEUOluG2Xlhw8frnGba9as4aeffuLll1+mffv2ZsUlhBD15ci1I4rrqOJiHLCBgc/X+7t1Oj0ZecWk5RZRUALFJbrqH6qrfk9jo7ajs9FUDEmWhRDWxuwT/ADc3Nx45plneOaZZywUTqnk5NJ9RoOCgiq8X1aelJRU4X1jeXl5/Otf/6JDhw68+OKLZsVUVFSkWBSYnZ0NgEajQaPRVPaYuE5ZP0l/1Y70W+01xT47fPmA4rpzUTG6jneidQ0EC34dV3OK2HbmGkcv5nAiNZtz1/LJyC9GZxjUtuWlPRvwcbUn2NOZCF8XbmzbmhtDWxPQyqmqpmvHuQ02XSbQOXk1u5wcDcWHL+1vUn9vTfF7zRpIv9We9Jl5LNFfdUqW60tubi4Azs7OFd53cXEBICcnp8L7xmbMmEFSUhKbN2/G3t6++gcq8PbbbzNr1iyT8s2bN1cap6jY+vXrGzuEJkn6rfaaUp8dzIiD6w7m61JUzFZtd7LXrKlz2zka2H1FxcE0Ncl5NTv972puMVdzi9mfnMlPe0u36wx01hPjraOHt57WDnUOCxdNNzoXLVeUHblykNW/r0atMvuDz0bRlL7XrIn0W+1Jn9VOfn5+ndswK1nesWMHmzdv5sSJE2RkZKBSqfD09KRjx44MGTLE7OkX9WHv3r189NFHPPzwwwwePNjsdqZPn860adMM19nZ2QQHBzNkyBC8vLwsEGnzp9FoWL9+PcOHD8fOzq6xw2kypN9qr6n12ZWcFDJWKacjdPKNIeSuJ+vU7rGL2Xy1PZF1xy+j0db9KOmUfBUpyTasPg8jO7Vhcv9QugR61KnNa79uhcLyKXX5Ki2dbgojzLtp7Cnd1L7XrIX0W+1Jn5knLS2tzm3UKlk+fPgwkyZN4sCB0o8L9XrlP74qVemIRa9evViwYAEdO3Y0K6iy3S8q+20gLy8PKJ0GUpWSkhL+8Y9/0KpVK95//32zYinj4OCAg4PpUIqdnZ1809aS9Jl5pN9qr6n02cmjXyuu3bQ6woa8gNrM2E9fzmHO+lP8cfSSJcIzodPDmqOXWXP0MoMifXj51hvo4Ff1v8eV8R/8Mr6rx3PFtvzH0ckjS4gc8T9Lhdsgmsr3mrWRfqs96bPasURf1ThZ3rNnD0OHDiUvLw8XFxdGjRpFt27d8Pb2Rq/Xc+3aNQ4cOMDatWvZtWsXffr0YcuWLRXuZlGdkJAQAC5cuFDh/bLytm3bVtnOhQsXOHjwIH5+ftx9992Ke5mZmQDs27fPMOK8ZcuWWscqhBB1otdzJP43uG6GWCeVI+rQ/rVuKq+ohA83nGJhbCJaXeUjya4OtvRt50WXQA86Brjj5+GIp4s9Kp2WP9dvpGefAaTmaDh7NZe9SRnsSUwnM7/ieX9bT11l2+mr3NsrhBdu6UAr51pOdWvTiS52rdmoL59WdyRxA3fotKC2qV1bQghRD2qULGu1Wh544AHy8vJ47LHH+OCDD3B3d6+wbnZ2NtOmTWPhwoXcf//9HD9+3DDiXFNdu3YFYP/+/RXeLyuPjo6uUXuXLl3i0qWKR1gyMzPZunVrreITQgiLObOBo9psoHyRW5egflDLfzc3x1/h5V+PkJpVWOF9e1s1t0f7M657EL3CPLG3NZ0TrNFoaO0AHfzc6BxsB7ThCUCr07PrXBorD15k9eFUcotKFM/p9PD9rmTWH7/M22O7cHPHNrWKvXPoMDaeW2G4PkoRxP8BN9xeq3aEEKI+1GgFxW+//caZM2e45557+OqrrypNlAHc3d2ZP38+d999N6dOnar1XsgA/fr1w8PDg4SEBA4ePGhyf9myZQCMHj26ynZCQ0PR6/UV/tm8eTMAw4YNM5QJIURD0+74hKMOytHYzpFjavx8oUbLzJXHeHTRngoTZQ8nO/4zIpLdLw9jzoRu9I/wrjBRroqNWkXfdt68c1c0sS8N5cWRUfi4mU5Lu5pTxOSv9/Lcz4fILy6poKWKdWmvPOn1pL09RTs/qVWMQghRX2r0L+aqVatQq9W89dZbNW747bffBmDFihW1Dsre3p6pU6cCMGXKFMMcZYA5c+Zw+PBhBg0aRExMjKH8k08+ISoqiunTp9f6fUII0SiunCDxwnby1Mp/irv41OxTs+S0fO78NJbFOxJN7tnbqHlqaHu2vTiEqUMjaj89ohIeTnb8c3A7tj4/mH8Pi8DRzvTHyC/7L3Dnp7EkXM2tUZsdvTtdvxEIJSoV8Zf3QUrtTmkVQoj6UKNked++fXTo0IGwsLAaNxweHk5UVFStj6QuM2PGDG666SZ27NhBREQE99xzD7179+a5557Dx8eHhQsXKupfu3aN+Ph4UlNTzXqfEEI0uF1fcMRo4XAbZ198nH2qfXRHwjXu+HQ7Jy+ZbqHZv703a58dyHMjOuDuWD8LgZztbXl2eCSbnhvMkA6m8Z66nMsdH29n/fHL1bblZu9GmLvy58sRB3vYKaetCiEaX42S5dTUVCIjI2vdeGRkJBcvXqz1cwCOjo5s3ryZV199FWdnZ1asWEFSUhITJ05k//79hIeHm9WuEEJYhfx0OPSTyRSMaJ+u1T76w+5kHlqw22TRnYOtmjfu7Mw3j/UizNvFouFWJqCVEwsn3sh746NxdVAug8kr1vLEN3tZUsHItzHj0fSjDg5wfAVkVbzQWwghGkqNkuWsrCw8PGq/l6a7u7vhpDtzODk58frrr3PmzBmKiopITU1l0aJFFZ7sN3PmTPR6PYsXL65R24MHD0av17Nhwwaz4xNCCLPtWwwlBaUjqNfp7N250kf0ej0fbzzN9F+PmOx2EeHryqqn+vNQ77a1XlRdVyqVirt7BrPqqf5EGW0hp9PD/608xuzVx9FVsUNHF+8uiuuj9vagK4Hd8+olZiGEqKkaJcslJSWo1bU/TUmtVlNSUvNFHkII0SJoNbBnPoUqFaeMThU1ThrL6HR6Zq06zgfrT5ncu/mGNiyf0o/INubtdWwpYd4uLP9XP8Z1DzS5N3/7OV5ebprkl+nso/wlIdHejiy1qvSXiuK8Cp8RQoiG0LTOExVCiObgxCrITuGkvR0l140Cq1DR0cv0MCe9Xs+rvx2tcCHfvwa3Y95DMSZTIBqLk70NH0zoyrThplP3ftxznv8sPUSJVmdyL7JVJPZq5S8OxxwcoDALjiyrt3iFEKI6NU6WlyxZgo2NTa3+fP3119U3LIQQLU3c58Df83Kv065VO1zslHON9Xo9r/12jO92JSvKVSqYdUcnXhgZhVrdsNMuqqNSqXh6WAQf3N0VG6PYlh9I4T9LD5lMybCzsSPKK0pRZpiisvsrkO09hRCNpMbJcmX7FVf3RwghxHUuHoALuwFM5isbT8HQ6/XM/v0E38QlKcpt1So+vKcbj/QNrddQ6+qumCA+f6AH9jbKHzUrDl7k1d+OmvyMiPZWLvIz7BRy+Qic31WvsQohRGVqlCzrdDqz/2i12vr+GoQQounYW77t5SGjkWXjxX3zt51jwfZzijIbtYqP7+vOmG6m84Kt0YhOfsx7OAYHo4NQvtuVzDt/nlSUGX/9hx3sMaTTu7+qxyiFEKJyMmdZCCEaynXzb6/aqEmxU84z7ubbzfD/fzuYwptrTiju26hVfHRvd0Z18a/3UC1pcAdf5j3cEzsb5ZSML7ee5ZudiYbr679+gAwbG5Js/+6j479B7pV6jlQIIUxJsiyEEA3l0E+gyS/9v0ajyq52rrTzaAfAjjPX+M/SQyaPvzc+mtuim1aiXGZQpA8f3dsd4+nV/7fyGJvjS5PgAJcAfJyUB5wcdPy7n3Qa2LekIUIVQggFSZaFEKIh6PWwd4Hh0pAE/i3aJxobtQ3xl3J44pt9aLTK+bwvjYpiXA/TPeabklFd/Hn3LuW8ZJ0epn63nxOp2ahUKpPR5YPX/1KxbxFoZTtSIUTDkmRZCCEaQvJOuFo+R/eg0chyN59uZOYX84+v95JTpEwIH+nTlicGNo9TS+/uGczTwyIUZXnFWh5bvIcr2YV0NTrB8JDjdYsgs1Mgfk1DhCmEEAaSLAshREPYUz6qXKSC40bJchfvrvz7x4Mkp+crykd28uO10Z0a/FS++vTszRGM6RagKLuYVcg/vtlHJy/lyPMZe3uyr5+7sWd+Q4QohBAGkiwLIUR9y71aukDtbyfs7dFcl/+pULH1iCNbT11VPNY1uBUf3tvNZK/ipk6lUvHuXdH0bNtaUX7ofCbL4zA5nOTw9b9YnNsK6WcbIkwhhAAkWRZCiPp38NvSBWpll07Kg0f8nML4amuqoszb1YEvH4zB0c6mQUJsaI52Nsx7uCdtvZwV5d/FXcTPsb2i7KCLh/Lh/XLglRCi4UiyLIQQ9Umng72LFEUHfdoqri9e8lVc26pVfPZAD/w8HOs9vMbk6WLPFw/G4Gin/FGUmGK0I4aX0Z7SB74DrQYhhGgIZiXLs2bN4sKFC5aORQghmp+ETZBZfgKfHjioVi7gK8wNUVy/entHeoV5NkR0je4Gf3dm36k8ubDIqD+OaHNR9FjeFYj/o/6DE0II6pAsh4WFMXr0aFauXIlOp7N0XEII0Txct10cwAW/TqQVZyvKtAXlI8139Qji4T7KkefmbnxMEPf1CjZcawuUyXK+tpAzwTHKh/YtboDIhBDCzGR59uzZhISE8PvvvzN27FiCg4N59dVXSUxMtHB4QgjRhGWlwKk/FUUH2/VVXOtKXNFrSkeRI3xdmX1n52a180VN/d/oTnQOdAdAr3VDV+yluH+wbQ/lAwmbICMJIYSob2Ylyy+//DIJCQmsW7eOu+++m7S0NN58803at2/PyJEj+eWXXygpkY3jhRAt3MHvQX/dJ2/2rhxyVi5oKx1FVWFvq+bj+7vjZN88F/RVx9HOhs8fiMHNsfR4a22+cnT5oJ0KHK9f6KeHA980YIRCiJaqTgv8br75Zn788UdSUlJ4//336dChA+vWrWPChAkEBQXx0ksvcfr0aUvFKoQQTYdOZ5rMdb6Lg2nHFUXa/NIpF6/edgNRfu4NFZ1VCvZ0ZvadnQHl1BSAg9eOQPQ9ygf2fyMn+gkh6p1FdsPw8vJi2rRpHDt2jO3bt3Pfffdx5coV3nvvPaKiohg2bBjLly+3xKuEEKJpSPxLsbAPIDd6AqcylAMIuoK2DO/Yhgd7t6x5ypUZ0y2QO7sFmCTLKbkpXO00Rlk59xKcXtuA0QkhWiKLbh2XkJDAqlWr2Lhxo6EsKCiIzZs3M378eHr16sX58+ct+UohhLBO+41GlX07sqlQg57yaRl6vQ3edu34713RLXKecmVmjemMn1Moeq3ylMPV6ekQdKOysiz0E0LUszonyxqNhh9//JFhw4YRGRnJu+++S0lJCdOmTePkyZMkJSURGxvLqFGj2Lt3L1OnTrVE3EIIYb3y0+HEKkWRrvtDfBi7TllWEMj7d8fQ2kV5Yl1L5+Fkx/8mdEdbqJy3/NWejZR0f1hZ+cwGyJRBGCFE/TE7WT5x4gTTpk0jICCABx54gM2bN9OnTx++/vprLly4wPvvv09ERAQAffr0YfXq1fTq1YutW7daLHghhLBKR5aCtqj82saen4v6klqknK8c0aoLAyJ8EKZuCveih293RVmmLp4FGd3B3q28UF/B3HAhhLAgs5Ll/v3707lzZz788EM0Gg3//Oc/OXz4MNu3b+fBBx/EwcGhwuc6depETk5OnQIWQgirptebHMecF3YLszZewMYpWVH+5E3DGzKyJudffZT9o3a8yAdbz5AVMVZZ8eD3pQsqhRCiHtia89COHTvo0aMHTz75JPfffz/ORlshVWby5MkMHDjQnFcKIUTTcPEAXD6qKPowvQ9Ftsm4XHdynwoVfQN7NnR0TUpMm27YqGzR6kv7TaXSo7U/xxsXe/I+1/1CknUezm2FdkMaKVIhRHNm1sjynj172Lt3L5MnT65xogyl0zEeeeQRc14phBBNg9GUgDynAOZfDMHW+ZyiPKJ1BB4OHojKOdo6Eu2jPArbxvkcy1K9SHONVFY++F0DRiaEaEnMSpZ///13Vq5cWW29VatW8frrr5vzCiGEaHqK8+HIMkXRksL+6FFjY5Qsx7QxOr5ZVMi4n0r7UcWX2X2UFU+sgoLMBotLCNFymJUsz5w5kxUrVlRbb+XKlcyaNcucVwghRNNz/DcoyjZc6lDxbUF/QIuNU6KiqiTLNWOSLDtdAJWGpcV9KLl+JmFJIRz9pYGjE0K0BBbdZ9mYVqtFra7XVwghhPUwWtj3lzaai3ijdkxFZVOkuCfJcs108+mGWlX+c0Sl0mLjlEwG7qzT9lBWPvBtA0cnhGgJ6jWTPXbsGK1bt67PVwghhHVIS4DkHYqin7SDAUymYIS6h+Lt5N1QkTVprvauRHlGKcrK+nOpdpCy8sX9cOVEQ4UmhGgharwbxqRJkxTX27dvNykrU1JSQnx8PHv37uXOO++sU4BCCNEkHPxecZmmd2ODrnT0WOYr101MmxiOp5XvUW3jnAjAX7poLutb0UaVWV75wLdwy5sNG6AQolmrcbK8ePFiw/9XqVScOXOGM2fOVPlMdHQ07733ntnBCSFEk6DTwaEfFUW/afuhwRbQ4eCaxPW7AEuyXDsxbWL45nj5LiN2zskUUIIWW37VDuCfttedlnj4J7h5JtjYNXygQohmqcbJ8ubNmwHQ6/UMHTqUkSNH8uKLL1ZY197enoCAANq2bWuZKIUQwpol/gXZFxRFy7Sle8rbOl5Fp8pT3JNkuXZ6+CrnJutVxTi4pFKUF8xS7SBlspx3FU6vg6jbGjhKIURzVeNkedCg8rlhjzzyCAMGDFCUCSFEi2U0BeOELoTj+lAA+nTK4mB++b0AlwACXAMaMLimr7Vja9q3as+ZzPJPM/t0ymTL7mDO6gPYq4ukp/pU+QMHvpVkWQhhMWYt8Fu0aFGl85WFEKJFKcyG48p958tGlT1d7PHyUo44y6iyeYz7zdEtiWBPJ6CChX6n1kLulYYKTQjRzMm+bkIIURfHf4OSAsOlRm/DCm0/AP4zIpLD1w4oqkuybJ6ebZRHgx+6epDpo0pP8ftdexMFevvym3pt6dxlIYSwgBpNwwgPD0elUrFhwwbCwsIIDw+v8QtUKhUJCQlmByiEEFbNaArGFl1X0vCgc6A7vTvoefvkVcV9SZbN06ONct5yriaXtv5Z9An3YudZWKO7ibtstpVXOPAt9JkKKlUDRyqEaG5qlCwnJiYCoNFoFNdCCNGipZ812Vu5bArGzNGd2Ht5g+Kel6MXbd1l4bM5fJ19CXELITkn2VC259IeXhs9jts+2sZS7SBlsnz1JKTshyD55UQIUTc1moah0+nQ6XRERkYqrmv6RwghmiWj7eLS9a5s0vVgbPdAeoZ6svvSbsX9Xv69UMlIp9l6+fdSXO+6tIsb/N25r1cIu3RRJOl8lQ8c+AYhhKgrmbMshBDm0OnQG03B+E3bDzt7B14aFYVOr2N3qjJZ7u3fuyEjbHZu8r9Jcb3v8j40Wg3Thkfi5mhvGNUvoz/6C2gKEEKIupBkWQghzJG0HVXWeUXRL9oBTB3anjbujpzOOE1GUYbifi8/5cioqB3j/isoKeBo2lG8XB34982R/KIdiE5fPnKvKsqGk783dJhCiGZGkmUhhDCD7oByVPmkLphMj45M6hcGwK7UXYr7ga6BBLkFNVh8zZGnoyeRrSMVZXGpcQA83KctTj5tidV1UtzX7v+2weITQjRPNUqWbWxszP5ja1vjc0+EEKJpKMpFe2yFomiZdiDPj4zC0c4GwGS+svEUAmEe49HlsqkudjZqXr29o8lUDPW5LZCV0lDhCSGaoRplssHBwbIoRQgh/lZw6FectOVzYUv0ak61GcnL0aUn85XoSth7ea/imZv8JFm2hN7+vfn2RPlo8aGrhygoKcDJ1onBHXz5NnwUOecX4aYq/ftRoSdv73e4DHuhsUIWQjRxtdo6TgghBFzdvoiQ66636rryr9v7oVaXDiocSztGniZP8YzxTg7CPDFtYrBR2aDVawHQ6DQcuHKAvgF9AZh2azd+/7w399psNjxTsPsbXIY+L3suCyHMInOWhRCiFi4nnSQke7+i7JT/aHqHexmujecrt2/VHm8n7waJr7lztXelk7dyXvL1/d0xwJ0r7e5S3PcuSib16F8NEp8QovmRZFkIIWrh4OovFNeZeheGj52oKDPeMk52wbAs4yktxv1915i7OKf3U5QlbJhX73EJIZqnGk3DSE4uPTEpMDAQGxsbw3VNhYSEVF9JCCGs3LGUDG64shqu+zT/pM8IevuXjyoXlhRy4MoBxXOyuM+ybvK/ia+OfGW4Pp5+nOzibNzt3QEIbO3MluA7CbtQ/otNdOZGDp5NpVu4f4PHK4Ro2mqULIeGhqJWqzl+/DiRkZGEhobWeMGfSqWipKSkTkEKIYQ1+O23X3hZdVVRFnXLk4rrQ1cPUawrNlyrVWp6+vVskPhaiq4+XbFX2xv6WafXsffSXoaGDDXU6XH7P9F98SVq9AC4qwrYunIRXf89XRasCyFqpUbJ8sCBA1GpVDg7OyuuhRCipdh1No32F1cq/tVMcw7Hq71y1Nh4vnJHz46GEU9hGY62jnT37c6uS+V9vfvSbkWy7O4XyoXWNxGUEWco6562hk0nH2PYDW0aNF4hRNNWo2R5y5YtVV7Xl4KCAt5++21+/PFHkpOT8fT0ZOTIkbzxxhsEBgbWqI2SkhJmz57Nnj17OHHiBFevXkWj0RAcHMzw4cN58cUXadu2bT1/JUKIpkyv1/PxHwf50iZOUe7W+2GTHRauT+BAdsGoL738eyn62viXFADfgZPgt/K/s/7qozy0ZhuDO9yFjVoGfIQQNWO1C/wKCwsZOnQob7zxBrm5uYwZM4bg4GAWLVpE9+7dOXv2bI3bmTVrFn/99Rf+/v6MHDmSW265heLiYj7//HOio6PZu3dv9Q0JIVqszfFX8E1Zh4uqyFCmQ4199/sU9bKKsjh67aiiTOYr1w/jfj2TeYbLeZcVZfadRqOxdTVcq1V6uqavZdWhiw0SoxCiebBYspyRkUFGRgZ6vd4i7c2ePZu4uDj69OnDqVOn+Omnn9i1axcffPABV69eZdKkSTVqx9HRke3bt5ORkUFsbCxLly7lt99+4+zZs7z00ktkZ2fz5JNPVt+QEKJF0un0vLf2FHfZbFPeaDcU3JQ7LuxK3YVOrzNcO9g40MO3R0OE2eJ08uqEm52bomzHxR3KSvbO2EYrt5Ebb/MXH66PR6PVIYQQNVGnZHnlypWMGDECV1dXvL298fb2xs3NjREjRvDbb7+Z3W5xcTGffPIJAJ9++imuruUjA9OmTSM6OpqtW7eyb9++atuytbWlX79+Jsdu29jY8MYbb+Do6Mi+ffvIysoyO14hRPO16vBFslMT6GdzTFGu7v6ASV3jZC2mTQyOto71Gl9LZau2pXdAb0WZSbIMqLop/57C1ZfwzDjEL/su1Gt8Qojmw6xkWa/XM2nSJMaOHcuGDRvIz8/Hw8MDDw8P8vPz2bBhA+PGjWPixIlmjTTHxsaSlZVFu3bt6N69u8n98ePHA7Bq1SpzwjdQqVTY2NigUqmwt7evU1tCiOZHo9UxZ/0pxhmNKusdPaDDrcoyvZ7Yi7GKsn4B/eo9xpbMuH93XNyBVqdVVgruhd6rvaJovM1WPtp4mqISo7pCCFEBs5LluXPnsnjxYvz9/fn888/JzMwkPT2d9PR0srKy+OKLL/D39+ebb75h7ty5tW7/0KFDAPToUfHHl2Xlhw8fNid8oPQH27vvvkteXh5DhgzBycnJ7LaEEM3T0r0XSErLM5mCoep8F9gpR4zPZZ3jUt4lRVm/QEmW65Nx/2YXZ3MsTfkJACoVqm73K4put4kjPSuL73fV7swAIUTLVKPdMIzNmzcPZ2dntm3bRlhYmOKem5sbjz/+OMOHD6dLly7MmzePZ555plbtlx16EhQUVOH9svKkpKRatfviiy9y+fJlsrOzOXz4MAkJCdxwww3Mnz+/2meLioooKipf3JOdnQ2ARqNBo9HUKo6WqqyfpL9qR/qt9izRZ4UaLXM3nKKnKp5QtXLhWEnne9Abtb31/FbFdRvnNgQ7Bzepv7em9r3mZe9FmHsY57LPGcq2nd/GDa1uUFbsOB7bjW+gum7P5VvUe/hkkxvjuvnhbG/Wj0Kg6fWZtZB+qz3pM/NYor/M+hfi3LlzjBgxwiRRvl5YWBjDhg1j3bp1tW4/NzcXwLCvszEXFxcAcnJyatXuL7/8QkJCguE6Ojqab7/9tsqvo8zbb7/NrFmzTMo3b95caZyiYuvXr2/sEJok6bfaq0ufbb6o4nKODc/a/qUoz3HwZ9PBS3BojaJ8Va5yWlhQSRB//PGH2e9vTE3pe82/yJ9zlCfLa46vIei86UBLH7dO+OaU71Qy3uYvfsvrzytL1jM8sO4L05tSn1kT6bfakz6rnfz8/Dq3YVay7OPjU6M5vnZ2dnh7e5vzinpx5swZAK5du8a+fft45ZVXiImJ4auvvuKRRx6p8tnp06czbdo0w3V2djbBwcEMGTIELy+vKp4UZTQaDevXr2f48OHY2dk1djhNhvRb7dW1z/KLS5g1ZxuO5HKbjXL/Xue+k7m1722KssKSQt745Q1F2YReExgeMrz2wTeipvi95nHRgx1byhf2pehSGHDzANzslTtlqI4VwIonDNf91McI4Brbrvox68EBuDuZ9/U2xT6zBtJvtSd9Zp60tLQ6t2FWsjx27Fi+/fZbMjIyaN26dYV10tPT2bRpEw88YLpivDplu19U9ttAXl4eUDrlwxze3t7ccsst9O7dmy5duvDPf/6ToUOHEhwcXOkzDg4OODg4mJTb2dnJN20tSZ+ZR/qt9sztsx92JJOep2GMeg9uqoLyGyo1Nt3vx8aozT1X9lCkLZ+mpVap6RfUr8n+fTWl77Xegb1xsHEw9L9Wr2XftX0Mb2v0i0qnMfDH81BUOoVOrdIz1mY7nxbcyZK480wb0aFOcTSlPrMm0m+1J31WO5boK7MW+M2ePZvw8HCGDh3Kpk2bTO5v3ryZ4cOH065dO956661atx8SEgLAhQsVb+1TVl7Xk/c8PDwYPXo0BQUF8rGGEAKA3KISvtxaOl1rvI1yCgbhg8E9wOQZ410wOnt3xsPBo75CFNdxtHUkpk2Moiw2Jda0op0TdB6nKBpvsxXQs2D7OdJyi0yfEUIIajiyPHToUJMye3t79u3bx/Dhw/H09DQkrsnJyYYh7969e3PnnXeycePGWgXVtWtXAPbv31/h/bLy6OjoWrVbkbJpIlevXq1zW0KIpm/JjkQy8jUEcI1+aqOdFbpV/EmZ8f6+smVcw+ob0FfxdxB7MRa9Xo/K6Chyuj0I+xYbLsPUl4lRnWJfcQe+2JrAK7d1bKCIhRBNSY2S5S1btlR6T6/Xk5aWVuGckJ07d5r+Y1UD/fr1w8PDg4SEBA4ePEi3bt0U95ctWwbA6NGja922sa1bS1ewt2vXrs5tCSGatpxCDfP+OgvAWJvtqFXXLfxy8ICo20yeuZR3iTOZZxRlfQP61mucQqlfQD/e533D9aW8S5zLOkd4q3BlxaCe4BUBaacNReNt/mJfSQe+3pnEPwaE4+suh8gIIZRqNA3j3LlzZv85e/ZsrYOyt7dn6tSpAEyZMsUwRxlgzpw5HD58mEGDBhETU/7R2yeffEJUVBTTp09XtPX777+zY4fpqU75+fm88sorbN26FT8/P0aOHFnrOIUQzcui2ESyCjSA/u+P6K/TeVzpR/lGdl7cqbh2s3ejs3fneoxSGGvXqh2+zr6KMuOpMQCoVFDBnsuOFFFUouPLv2r/80oI0fzVaGS5rnODzTFjxgw2bNjAjh07iIiIYMCAASQlJbFr1y58fHxYuHChov61a9eIj48nNTVVUb5nzx5mzZpFYGAg3bp1w8PDg0uXLnHw4EHS09Px8PDg559/VhypLYRoebIKNMzfVposxahOEWa0t3JlUzC2pSgPLOnt3xtbtfn79oraU6lU9Avox/Izyw1l2y5s46GOD5lW7novbHoD9DoA3FQFjFTvYYWuP9/GJfHEoHB83WR0WQhRzqwFfg3B0dGRzZs38+qrr+Ls7MyKFStISkpi4sSJ7N+/n/Dw8OobAcaNG8e0adMICAhgz549/Pzzz+zZs4e2bdsyffp0Tpw4wYABA+r5qxFCWLuF28+RXVgCVLCwzyui9CN8IxqtxmS+cv/A/vUWo6iccb/vvbyXPE2eaUX3AAgfoigq+xShqETHl1tldFkIoWSR4Y/MzExycnLQ6yve2L1sd4vacnJy4vXXX+f111+vtu7MmTOZOXOmSXl0dDQffPCBWe8XQrQMWfkaFm4vPdjCkSJut4lTVuh2f+lH+Eb2XdlnkpANDBpYb3GKyvUN6IutypYSfekvPBqdhriLcQxrO8y0crf7IaF84Xlf9XECuUoKPny3S0aXhRBKZo8sX7p0icmTJ+Pr64uXlxehoaGEhYWZ/KnpCLAQQjSW+dvPklNUmmTdUsHeynS9t8LnjI+47uzVGW8n6zmIqSVxtXclxk+5hdzWC1srrhx1W+mCzb+V7bkMUKjRMU9Gl4UQ1zErWU5NTaVnz54sXLgQBwcHfHx80Ov19O7dG19fX8MIc58+fWSKgxDCqmXkFRtGlaGivZWHVLi3sl6vN0nGBgbLqHJjGhQ0SHH914W/0P09N1mhwj2X/wJKf3Z9uyuJqzmy77IQopTZh5JcvHiR119/nfPnzzNq1ChUKhWxsbGkpqayZcsWoqKiUKlU/PHHH5aOWQghLOarbWfJK9YCVLK38v0VPAXnss9xPue8omxw0OD6CFHUkHH/pxWmcTzteMWVuz+ouAxVX6anKh74e3T5r4T6CFEI0QSZlSz/+eefhIWFMWPGjArvDxw4kHXr1nHgwAHeeOONOgUohBD1JTO/mCU7Eg3XNd1bGeCv88oRaF8nX6I8o+ojTFFDwe7BhHmEKcoqnYoRGAPekYqi6z9V+CZORpeFEKXMSpZTUlIUB4XY2NgAUFRU/g9LYGAgQ4YM4eeff65bhEIIUU8WxiYaRpVL91Y2moJRyd7KYJqEDQweaNYhTMKyjKdiGM8rN6hkz2UnCgEZXRZClDMrWXZ3d1dct2rVCihNoq/n6OhoUiaEENYgu1DDotjyuco9VKcJU19SVqpkb+WsoiwOXDmgKDNO0kTjMN6N5ET6CS7nXa64cvQ9pQs4/+aqKmSkeo/h+pu4JK7lyuiyEC2dWclySEgIycnJhuvOnUtPq1qzZo2hLD8/n9jYWPz9/esYohBCWN6S2ERy/t5XGeBuW6MRyEr2VgbYcXEHWr3WcO1g48BN/jfVS5yidrr5dsPN3k1R9lfKXxVXdg+AdkMVRXfbltctHV2WnTGEaOnMSpaHDh3K4cOHuXr1KgB33HEHLi4uPP/887z00kt8/PHHDBkyhMuXLzNq1CiLBiyEEHWVW1TCgutGlR0p4k673cpKleytDLDl/BbFdS+/XjjZVjxdQzQsO7Ud/QOUB5QYzy9XMJqK0Vd9jECuGq6/3pkoo8tCtHBmJcsPPPAA48aN4/jx0lXGnp6efPnll+j1ev773//yzDPPsGfPHjp27Mibb75p0YCFEKKuvo1LIjNfY7geod6Lk+66w0Wq2Fu5RFfC9pTtijKZgmFdjLfwi0uNo7CksOLKHW4DRw9F0Xjb8r/fQo2Or2R0WYgWzaxkuWvXrvzwww8MGlT+A+K+++7j1KlTfPbZZ8yePZulS5eyf/9+PDw8qmhJCCEaVkGxlvnblMnPE+5GJ/ZVsrcywMErB8kuzlaUyal91qV/QH/U181FLtQWsvvS7oor2zlC5/GKooedYinbcxng650yd1mIlszsE/wqEhISwpNPPsn06dO56667sLOzs2TzQghRZ9/vTuZabrHh2p80OhbuV1aqZG9lgE3nNymuI1tH4u8qazOsSSvHVnTz6aYo25S8qeLKYLKQ00tzkZtsThmuCzRaGV0WogWzWLKckZFBRkaG4fQ+IYSwNoUaLV9uVW4HNq3NflTUbG9lvV7PxqSNirKhIUMrrCsal/Hfy+bzm9HqtBVXDuwB3h0URc/57FFcfxOXRHpeMUKIlqdOyfLKlSsZMWIErq6ueHt74+3tjZubGyNGjOC3336zVIxCCGERP+89zxXFQRN6Ruu3KCt1Hlvp3son009yMe+iouzmkJstG6SwCONkOb0w3WS7P4MK9lzumbcFV1X5POf8Yi0LtsvoshAtkVnJsl6vZ9KkSYwdO5YNGzaQn5+Ph4cHHh4e5Ofns2HDBsaNG8fEiRNlpFkIYRWKS3R8sUU5qjwx+DKO2eeUFSvZWxlgQ/IGxXWgayCRrSMrqS0aU7BbMB1aK0eLNyZvrKQ2JnsuqzX5vBx6WlFlyY4kMvNldFmIlsasZHnu3LksXrwYf39/Pv/8czIzM0lPTyc9PZ2srCy++OIL/P39+eabb5g7d66lYxZCiFr7Zf8FLmYpd0T4ZyujRV9e7SHoxkrbMJ73enPIzXJqnxUbFjJMcb0xeWPlAzju/tBOWX+seqti98DcohIWxiZaOEohhLUzK1meN28ezs7ObNu2jSeeeEJxop+bmxuPP/4427Ztw8nJiXnz5lksWCGEMIdGq+OzLWcUZf3bOtMmeY2yYhV7KydmJXImU9nGsLbDKqwrrIPx309qXirH049X/oDRVAynlB08coPy+2FR7DmyCjQIIVoOs5Llc+fOMWzYMMLCwiqtExYWxrBhwzh37lyldYQQoiH8dvAi59MLFGWvhp+Gouu3gFNBdMV7K4PpR/jeTt509elqyTCFhUW0iiDYLVhRZrxAU6HDrSZ7Lk/12qv4/SmnsIQlOxItGKUQwtqZlSz7+Phgb29fbT07Ozu8vb3NeYUQQliEVqfn083KEeFuwa2IvLhcWbH9MPAIrLQd42R5SPAQxV6+wvqoVCqTBZhVzlu2c4QudyuKvE8v47ZObRRlC7afI6dQRpeFaCnM+pd+7NixbNq0iYyMjErrpKens2nTJu68805zYxNCiDpbffgi567lKcpevNEGVdIOZcXuD1XaxqW8Sxy5dkRRJrtgNA3Gu2KczTrL2awqdrUw3mM7M4n/3JCuKMoq0PD1ziRLhSiEsHJmJcuzZ88mPDycoUOHsmmT6UbvmzdvZvjw4bRr14633nqrzkEKIYQ5dDo9n2xSjip3DnSnd9YfyorOXqUfwVfCeGGfm70bN/pVvhBQWI9on2h8nHwUZVUeUBLQA3yiFEWh51dwi9Ho8vxtZ8krKrFYnEII61WjZHno0KGKP2PGjMHe3p5Dhw4xfPhwfHx86NmzJz179sTX15ebb76ZgwcPYm9vLyPLQohGs/b4ZU5fyVWUPT04DNWhH5QVu94HtpVPLTNOrgYFDcLORk4obQrUKrXJ6HKV85Yr2HOZYyv49wDl8ecZ+Rq+jZPRZSFaAtuaVNqyZUul9/R6PWlpaaSlpZnc27lzp2yrJIRoFHo9fL5VucA4ys+Nm20PQe5lZeUqpmBkFGaw9/JeRZlMwWhahoUM46f4nwzXR9OOcjH3IgGuARU/EH0PbJgJel3ptSaPjplbuPmGCDacuGKoNu+vs9zXs/J57kKI5qFGybLsaCGEaGqOZqg4eSlHUTZ1aHvUB59TVgy6EXyVH7tfb33SerT68mOSHW0c6RvY16KxivrV068n7vbuZBeX736yLnEdEztPrPgBNz9ofzOcXldedvB7nhr2rSJZTssr5se9F2hTQRNCiOajRsly27Zt6zsOIYSwGL1ez9oLyllm7XxcGNVWBcvXKitXMaoMsDZRWX9Q8CCcbCs+DltYJzu1HTe3vZlfT/9qKPsz8c/Kk2UonYpxfbKcuI2urpkM7uDDlvirhuKvtp3jhY71ELQQwmrIvkdCiGbnr9PXOJ+nnAI2dWh7bA7/ANeNEmPnAp3HVdrO1fyr7Lm0R1E2MnSkRWMVDeOW0FsU18fSjpGcnVz5Ax1uBcdWyrKDP/DU0AhF0dXcYuKuyHRDIZqzOiXLly9f5u233+bWW2+la9eudO3alVtvvZV33nmHy5cvV9+AEEJYmF6v59Mtyq3B2no5M7qLPxz4Vlm581hwcKu0rXVJ69BTfjyys60z/QP7WzRe0TB6+fXC09FTUWb8qYGCrYPJnssc/J6YYHcGRCjPD9iQoqZIo0UI0TyZnSz/8ssvREZGMmPGDP7880+OHDnCkSNH+PPPP3nllVfo0KEDv/zyiyVjFUKIau1ISOPA+SxF2ZQh7bG9EAfpCcrK3R+usi3jZGpoyFAcbR0tEqdoWLZqW4a3Ha4o+zPxz6ofMt4VIysZEjbz9DDl6HKWRsWyAxctEaYQwgqZlSzv3buX++67j7y8PMaOHcvy5cs5cOAABw8eZMWKFYwbN47c3Fzuv/9+9u7dW32DQghhIR9tPK24DmrtxNjugXDgG2VF70gI7lVpO5fyLnHgygFFmUzBaNqMp2KcyjjF2cwqDigJ6A5+0cqyfYu4MdSTPuFeiuIv/zpHcYnOUqEKIayIWcny22+/jVarZenSpSxbtowxY8bQtWtXoqOjueOOO1i6dClLly5Fo9HwzjvvWDpmIYSo0K6zaew6pzxt7Z+D22GnyYFjK5SVuz9UuqduJYxHld3s3egbILtgNGU9fHuYHFBS5eiySgUxE5Vl8X9AziWT0eXUrEJ+2X/BQpEKIayJWcny9u3b6du3L2PHjq20ztixY+nXrx/btm0zOzghhKiNj41O6/Nzd2B8TBAcWQYlBeU31LalB5FU4c9zyiRqWMgwOYikibNR2zAidISi7M/EP9Hr9ZU8Qem8ZTvn8mu9Fg58S+9wT3qFKudAf7r5DBqtjC4L0dyYlSxnZWUREhJSbb2QkBCysrKqrSeEEHW1PzmD7WeuKcr+MSAMB1sb0ykYkSPBVTnCeL3zOec5mnZUUTYqdJTFYhWNx3gqzbmsc5zKOFX5A47upjum7P8alV7PU8PaK4ovZBSw/ECKpUIVQlgJs5JlPz8/Dhw4UG29gwcP4ufnZ84rhBCiVj42mqvsZqdnQkwgXDoCF43+verxSJVtGU/BaO3Qml7+lc9vFk1HV5+u+Lv4K8qqXegX86jyOjMJzm2hf3tvugV7KG59uvkMJTK6LESzYlayfMsttxAfH8/LL7+MVmu6XY5er2fGjBmcPHmSkSNlQYwQon4duZDF5usOigAYFqDD0c4G9i5SVnYLgPbDqmxvzbk1iuub296MrbpGZzgJK6dSqUwW+v1x7o+qp2IExkCbzsqyfYtRqVRMHRyuKE5Ky2flIdkZQ4jmxKxk+dVXX8XT05N3332X9u3b8+KLL/L555/z+eef89JLL9G+fXvefvttvLy8mDFjhqVjFkIIhY83KUeVWzvb0beNHopz4fDPyso9HgK1TaVtxafHczpD2d6oMJmC0ZyMDFMO4qTkppjsfKJQ0UK/k79D7hUGRngT7KJMtD/ZdAatrorkWwjRpJiVLAcFBbFp0yY6depEUlIS7733HlOnTmXq1Kn897//5dy5c3Tu3JlNmzYRFBRk6ZiFEMLgRGo2644rD0F6rF8oDjagOvYrFOeU31CpoUfVeyuvSliluPZz8SOmTYzF4hWNr6NnR8I8whRlq86uqqT237rcDdcfc64rgYPfoVKpGBmknHZx9loeqw/L6LIQzYXZh5J06dKFw4cPs2nTJmbNmsXjjz/O448/zqxZs9i0aROHDh2ic+fO1TckhBB18InRDhgeTnY8cFMwAOr9S5SVI0aAR+W/wJfoSvj93O+KstvDb0etqtNhp8LKqFQq7mh3h6Js7bm1FGmLKn/IqRV0MtoBav/XoNfRqbWeG/yUJ0F+vOkMOhldFqJZMGsS3rhx4/D39+fTTz9l8ODBDB482MJhCSFE9U5fzmHN0VRF2WP9w3B1sKVV/lnUlw4pH+g5qcr2dqXu4lqBckeN0eGjLRKrsC63hd3G3P1zDdc5mhy2nt9qsrWcQsxEOPR9+XX6WVRJ21GpYOqQcKb8UP79duZKLn8cvcRt0f6m7QghmhSzhkvWrFlDWlqapWMRQoha+WTzGa5fl+XmaMsjfUMBaHtts7KyRzC0v7nK9lYmrFRcd/TqSHir8Epqi6bM39WfG/1uVJQZT8ExEdwLfG5QFKkPfA3AzVG+RJmMLp+W0WUhmgGzkuWwsDDy8vIsHYsQQtTY2au5rDLadeDRvqF4ONlBYTZBGXHKB3o8UuXCvjxNHpuSNynKjD+qF82L8acG21O2k16YXkltKlzopzr5O/aabNRqFU8NVZ7qd/JSjsl8eiFE02NWsnzfffexdetWLl26ZOl4hBCiRj7dnMD1g3Yu9jZM6l+6aEt9dBm2uuvmn6psoPuDVba3IWkDhdpCw7WNysbkAAvRvAxvOxwHGwfDdYm+xOTkRhPRE8DW0XCp0mkITt8OwKjOfkT4uiqqf7TxdNXb0gkhrJ5ZyfL06dMZMGAAgwYNYvny5Wg0GkvHJYQQlUpOy2fFQeVJaQ/3DaWVsz3o9agPGC3s6zAK3KueO2r8EXy/wH54OXlZJF5hnVztXRkaPFRRVu1UDGdP6DhGURSatqX0+06tYupQ5al+x1Oz2XjiiiXCFUI0ErOS5Q4dOnDs2DHOnDnD+PHjcXJyIiAggPDwcJM/7dq1s3TMQogW7rMtyn1snexsmPz3qDIX9qK6ckz5QDUL+y7lXWL3pd2KstHtZGFfS2D893w07Sjnss5V/ZDRVAzXokuoEv8C4PboAMK9XRT3P9oko8tCNGVmJcuJiYkkJyej1+vR6/XodDouXbpEYmKiyZ9z56r5R0cIIWrhQkY+y/ZdUJQ92DsEL9e/P07fu1D5QOtQCB9SZZurz65GT3ky42rnyuCgwRaIVli7PgF98HJUfoJgvNDTREgf8IlSFKn3LgDApoLR5cMXsth6SnnCpBCi6TArWdbpdLX6I4QQlvL5lgRKrhtVdrBV84+Bf+9YUZABx35VPtDjEVBX/k+dXq/n19PKZ0aEjsDxunmpovmyVdtya/itirLfzvxGia6k8odUKrhxsrLo9J+QVfpL3B1dA2jr5ay4P1fmLgvRZMlO+0KIJiM1q4Cle5Wjyvf1CsHX7e/E9sB3UFK+SE+vtqt2Yd/ey3s5n3NeUTa2/dhKaovm6M72dyqurxZcZXvK9qofir4H7MsX86n0Oti7CABbGzVThihHlw8kZxJ7RrZcFaIpqlWyvGbNGh5//HFGjRrFnXfeyWuvvSbTLIQQDebLrWcp1pZ/WmVvo+bJQX+vi9DpYM9Xivr6qNvA1bfKNn85/Yviup1HO7r6dLVMwKJJiGwdSbR3tKLM+PvChKM7dL1XWbZ/CZSU7sIytnsgQa2dFLfnbjwlo8tCNEE1TpYfeOABRo8ezYIFC1i7di0rV67kzTffpFOnTqxcWc38LiGEqKMr2YX8sDtZUXbPjcH4efw9qnxmA2QkKu7rev6jyjazirJYn7heUTYuYhwqlarO8YqmZVzEOMX1tgvbuJJfzS4WRlMxyLsKJ0p307CrYHR5T2IGcWer2MdZCGGVapQsL1iwgB9++AEbGxsmTpzIRx99xJtvvknv3r0pLCzk4YcfJisrq75jFUK0YPP+OktRSfmosp2NiicHX7fbzu55ivqZTiHog3pV2ebvZ3+nWFdsuLZV28ouGC3UyLCRONmWjwRr9drqF/r53oAupK+ybHf5pxt39QgiwEM59/2jjafrHKsQomHVKFlesmQJarWaP/74gwULFjB16lSmT59ObGwsjzzyCDk5Ofz666/VNySEEGa4llvEd7uUo8rjY4IIbPV3cpOWAGeUI8TnfIaXLsSqhF6vN/mofVjIMFo7trZM0KJJcbFzYVTYKEXZr6d/RaevepG6rudjyoLzcXDpCAD2tmr+aTS6vPNsGrvPyeiyEE1JjZLlI0eO0Lt3b4YNG2Zy7+WXX0av13PkyBGLB1dQUMBrr71GZGQkjo6OBAQEMGnSJFJSUqp/+G+ZmZl8//333HfffYSFhWFvb4+bmxs33XQTc+fOlQNVhGgC5m87R4FGa7i2Uav456DrkpA9CxT19Y6tSGndu8o2j6cd51TGKUWZ8UfxomUx/vs/n3OevZf2VvmMPvJWCuyMfsHaM9/wfyf0DMLPXTm6/PEmGV0WoimpUbKcnZ1d6eEiZeXZ2dmWiwooLCxk6NChvPHGG+Tm5jJmzBiCg4NZtGgR3bt35+zZszVq5/333+eBBx7gp59+onXr1owbN45evXpx6NAhnnnmGYYOHUp+fr5FYxdCWE5GXjHf7ExUlI3tHkhI2dZcxXlw4FvFfV23B9CqHaiK8ahygEsAvf2rTrBF8xbtHU37VsqR4GoX+tnYkeQ1WFl2+GcoyATAwdaGJweFK25vO32NfUkZdYxWCNFQapQs6/V6bGxsKm7g7/1LLb2f8uzZs4mLi6NPnz6cOnWKn376iV27dvHBBx9w9epVJk2q+kSuMi4uLrzwwgskJiayf/9+fvzxRzZu3MiRI0cICQlh+/btzJ4926KxCyEsZ2HsOfKKy0eV1SqUC6cO/wxF16+ZUKHr8WiVbeZr8llzbo2ibGzEWNQq2U2zJVOpVCajyxuSNpBVVPWanCTvwejVtuUFmnw49IPh8t5eIfi4KX95k9FlIZoOq/zJUFxczCeffALAp59+iqtr+V6W06ZNIzo6mq1bt7Jv375q25o+fTrvvvsuISEhivKIiAjeeecdAH744YeKHhVCNLKsAg2LYxMVZXd0DSCs7DhhvV6xoAqAyFtKT+2rwppza8jT5Bmu1Sq1yV67omW6Pfx27NR2hutiXTErzqyo8plCu9boO9ymLNz9Vel2hoCjnQ1PDFSOLm+Jv8qh85mWCFkIUc9qnCwvWbIEGxubCv+oVKpK79va2lbfuJHY2FiysrJo164d3bt3N7k/fvx4AFatWlXrtq/XtWvpXqoXL16sUztCiPqxKPYcOUXlJ6mpVCiPEk7aAVeOKR/qVfV2cXq9np/if1KU9Qvoh5+LX53jFU1fa8fW3Bxys6Ls5/ifq1/oF2P0aWd6Qul2hn974Ka2eLvaK6rI6LIQTUONk2W9Xm/WH3OmZxw6dAiAHj16VHi/rPzw4cO1bvt6ZfOe/fzkh6QQ1iYrX8OC7cpDj27t4k97X7fyAqPt4vBsB+FDq2z30NVDnEw/qSi7N+reSmqLluieqHsU18k5yey8uLPKZ/QhfcG3k7Iw7jPD/3Wyt+EfA5SjyxtOXOFoimy7KoS1q9Gwr6XnI1cnObl0i6igoKAK75eVJyUl1ek9c+fOBWDMmDHV1i0qKqKoqMhwXbagUaPRyI4aNVTWT9JftdNS+23eX2fIKVSOKv9rYGh5P2RfxPbEKq7fHE4bMwmdVltln31//HvFdZBrEDf53tTi+rciLfV7zViX1l1o36o9ZzLPGMq+P/E9vXxN9+029FlJCaobH8f293+X3zy7GU3KYfC9AYB7YgL4YmsCGfnl/Tt3wyk+u79b/XwhVky+12pP+sw8luiv2s+RaAC5ubkAODs7V3jfxaV0vmJOTo7Z7/jiiy/YsGEDrVq14qWXXqq2/ttvv82sWbNMyjdv3lxpnKJi69evr76SMNGS+i1PA/MP2MB1qXA3Tx1n9m2jLH3pmPITEfryhX8lanvWXvKkZE35wj3jPsvV5bIue52irHNJZ/7840+Lfw1NWUv6XqtMx6KOnKE8Wd6Wso3vVn1Ha5uK9+Fev349ap0LI2zdcCgp/9mU8usMDoWU78Xcz1vF6uTyBfPrT1zhq6VrCHSphy+iCZDvtdqTPqsdS+x4ZpXJcn3btm0b//73v1GpVCxcuJCAgIBqn5k+fTrTpk0zXGdnZxMcHMyQIUPw8vKqz3CbDY1Gw/r16xk+fDh2dnbVPyCAltlvc9afpkhbPgVDpYI37+9PhO/fi32Lc7H9aKriGVX3hxgx8m6g8j6bf3Q+2sPlCbaDjQMv3vYiHg4e9fjVNB0t8XutMoM1g9m0YhO5mtLBGz160oLTeKDbA4p6xn2mdjsJ29833G+bGUfgQ1+CizcAAwpL2DbnL7IKyj81OaIN5B+3dm2Ar8p6yPda7UmfmSctLa3ObVhlsly2+0Vlvw3k5ZWuYndzc6vwflWOHj3KmDFjKC4u5qOPPmLs2LE1es7BwQEHB9N9W+3s7OSbtpakz8zTUvotPa+Yr+OUp/Xd0TWAjoHXjejt/xmKrt/bXYVN3ynYGPXP9X1WoivhlzPKPXNHhY3C29XbovE3By3le60qHnYejGk/hu9OfGcoW5Gwgqk9puJgU8XPgpsehx1zQVf60a9KW4TdoW9h0PMAeNrZ8Vj/cOasLz8Q58/jlzmXXkhkm9r/TGvq5Hut9qTPascSfWWVW8eVbfN24cKFCu+Xlbdt27ZW7Z47d44RI0aQkZHBzJkzeeqpp+oWqBDC4ub9ddZkX+Wnh0WUV9BpFQunAOgwCrwqPjipzNbzW7mcf1lRJgv7RFXu6aBc6JdZlMnaxLVVP+TWBrqMV5bt+QpKig2Xj/QNxc2xfKxKr4dPNp1BCGGdrDJZLtvSbf/+/RXeLyuPjo6ucZupqakMHz6c1NRU/v3vf/N///d/dQ9UCGFR13KLWLIjUVF2Z7dA2vmU77VO/BrIUNahz5Rq2/4hXrmferR3NJ28OlVSWwgI8wgzOdXxhxM12Je/9z+V17mX4divhksPJzse7RemqLLq8EXOXMk1O1YhRP2xymS5X79+eHh4kJCQwMGDB03uL1u2DIDRo0fXqL2MjAxuueUWEhISePTRR/nf//5nyXCFEBYy76+zFGjKR5Vt1Cqeun5UGWCn0aiyf1do26/KduPT49mVuktRJqPKoiaMv0+Oph3l4JWDVT/k3xXa9leW7fy0dAj5b5P6heLqoBxd/myzjC4LYY2sMlm2t7dn6tTSxTtTpkwxzFEGmDNnDocPH2bQoEHExMQYyj/55BOioqKYPn26oq38/Hxuu+02jhw5woQJE/jqq69QqVQIIazLlZxCvt6ZqCgb2z2w/LQ+gJR9kLxD+WCfqaUrAKvwzfFvFNeejp6MCB1Rl3BFCzEoaBD+Lv6Ksq+Pf139g8ajy5cOQ3L5Xs2tnO15pK9yKuGKgykkXstDCGFdrHKBH8CMGTPYsGEDO3bsICIiggEDBpCUlMSuXbvw8fFh4cKFivrXrl0jPj6e1NRURfkrr7zCzp07DacJPvbYY1Rk8eLF9fWlCCFq4MutZynUlO/pbqNW8dT1p/WB6aiyWwB0vLPKdq/mX+X3c78ryu7tcG+Fi7SEMGartuWBGx7g/b3lO1xsTN7I+ZzzBLsFV/5gh1HQqi1kXncewM5PoW1fw+Vj/cNZFJtI/t9z9HV6+HTzGd67u2XtjCGEtbPKkWUAR0dHNm/ezKuvvoqzszMrVqwgKSmJiRMnsn//fsLDw6tvhNIpGABarZbvv/+eJUuWVPhHCNF4LmcX8m2c8pCh8T2CaOt13ahy1gU4tlz5YK9/gK3yCGFjP5z8gRJd+TZd9mp7kxPahKjKXRF34WJX/r2o0+v49vi3VT+ktoGbnlSWnfwdrpVPtfB0seehPsrR5V8PpHA+ve77wgohLMdqk2UAJycnXn/9dc6cOUNRURGpqaksWrSowpP9Zs6ciV6vNxkhXrx4cY2O5RZCNJ7PtyRQVFI+qmyrVjHVeFR515dw3SEk2DlDzMQq2y0oKeDnUz8ryka3G42no2ddQxYtiKu9K3dF3KUoW35mOVlF1RxV3f1BcHC/rkAPOz5SVPnHgHAc7cp/FGt1ej6VuctCWBWrTpaFEM1falYB3+9W7qt8d89ggj2vOxmzMBv2GX0C1O0BcK466V11dpVJQvNwx4frFK9omR644QFsVOUn7xWUFLDs1LKqH3J0h56PKssO/QA5lwyX3q4OPHiTcnR56b4LMndZCCsiybIQolF9tjmB4utGle1sKhhV3rcIFEmvynQBlRGdXsd3J79TlA0IHEB4q5pN4RLiegGuAQxvO1xR9v2J79FoNVU/2PtfYHPdVCFtMcR9rqjy+MBwHGz/v737jm+qXh84/knSNOkuXbSFskrLpuwhUzYiIogioixxXAf6w3tVRARELyqCA1EUZaq45QoiSwXZm5ZZKKsttIXu3abJ+f0RKD1NW6C78LxfL15NnvM9hyfn1SZPzvkO9dXlDzafQghRPUixLISoMlGJmXy3T31VeVTHAOq4O1wPmLKtA6MKajrkhouQhOeFE5UepYqNbSFXlUXpFb4rcTnrMhsjN5a8k4svhBSapnD/Esi+/uXPx9XI+LsaqJr8FnqJk7GpCCGqnhTLQogq8+Hm05jM18cM2Ou0PHt3oavKod9aF3UoqPuUEo+rKArbsrepYk1qNaGzb+cy5SvubK28W9HOp50qtvzE8huPe7nrBaDA9IY5qbB/qarJ070CbeZdnrdRri4LUR1IsSyEqBKn49L49ZB6SfsxXerh51bgqrI5D3Z8pN6xYU+o256SHLh8gEiz+or12BZjZY51UWaF705EJEcQnhde8k5ejaHZverY7s8gLyf/aS0ne57ooe4itOl4HIcik8qUrxCi7KRYFkJUifc3hmMpcEHO0V5ne1X5+Grbpa1vcFUZYMkx9Tzsfk5+DG44uHSJClHA3QF308hNXdRuyd5y46vL3f5P/Tw9FkK/U4Umdm9ALUe9Kvb+xhsU4kKICifFshCi0h2OSmbDMXXXikndG+LlXGChEEWB7R+qd/RvC416l3jso/FH2R27WxWb0HICeq2+mD2EuHlajZZJrSapYtHmaPbF7St5x7rtoUEPdWznx2C5Ph2ii1HPM73VXxh3RCSwMyK+TDkLIcpGimUhRKV7f4P6apm7o55JPQvNUnF6E8QdUce6T7nh0taLwxarnnsaPRneeHipcxWisEENB1HHuY4qVvhuRpG6vah+nhBhXaikgMe61qe2q3p1ybkbw2U9ACGqkBTLQohKtTMinu2FrpQ90zsQV2OhK7/b56ufewZB00L9Pgs5nXSav6L+UsXGthiL0c5Y6nyFKEyv1TOx5URVbG/cXsKuhJW8Y+O+ULuVOrZ9vvUuylVGvY7JfYNUTQ5FJvPnictlylkIUXpSLAshKo2iKLxb6Kqyr6uRsV0bqBte2AWRu9Sx7i+CtuS3rC+PfKl67qJ3YVQTWdpalL9hjYfh7eCtihW+q2FDo4FuL6hjlw5BxJ+q0EMdAqhXcFEervbxt8jVZSGqghTLQohKs/F4HKFRyarY5L5BGPU6dcPtH6ifu9aBVg+VeOyo1CjWn1+vij3c5GGc9E6lTVeIYhl0Bsa1GKeKbYneQnjiDQbktRgOtRqqY1vfUV1d1uu0TOkfrGpyMjaNNWGXypSzEKJ0pFgWQlQKs0Wx6avcwNORBzvUVTeMCYXTG9Sxu54HO3tKsihsERbl+kqA9tgzusnoMuUsREkeDH4QN3s3VWxR6KKSd9LZQc9/q2PR++CMuvvQ0BB/gms7q2IfbDqFyWxBCFG5pFgWQlSK1Ycucvpyuir2f/2D0esKvQ1teVf93MED2pW88t65lHOsPbtWFetk6IS7wb206QpxQ456Rx5p8ogqtjlyM8cTjpe8Y+tR4F5fHdv6rurqsk6r4aUBTVRNzidk8t1e9fzhQoiKJ8WyEKLC5eZZ+GCzejWyZn6uDG3tr24YEwrh6tkBuOt5sC+5K8Vnhz9TXVV2sHOgh6FHCXsIUT4ebvIwDhoHVWzh4YXFtL5Kp4ceL6ljUXvg3FZVaEDz2oQEuKtiH24+TXpOXmnTFUKUghTLQogKt3L3BaKTslSx/wwMRqstNA1cUVeVOz1R4rFPJ5227asc/DBOWumrLCqei70L3Q3dVbF/ov8h9EpoyTuGjAa3eurYFvXVZY1Gw9TBTVVNEjJy+WLrmTLlLIS4NVIsCyEqVEqWiQV/nVbFOtSvxd1NfNQNi7uqbHAp8fifHv4UhesFhrPembHNSu62IUR56mLoQi1DLVVs4aEbXF22s4cehVajjNwJ57epj93Ik37N1H8ri7edIy41u9T5CiFujRTLQogK9emWCJIzTarY1Huaoim8uMjW99TPb+Kq8omEE2yO3KyKjW0+FjeDWzF7CFH+DBoDE5pPUMV2xeziQNyBkndsMwZcCw1wLXx3BXhlUFMK3oTJMpn5sFC3JiFExZFiWQhRYaKTMlm647wqNrilL+3re6gbxoTCSfUAPe567oZXlQv3DXW1d+XR5o+WNl0hSm1k0EibeZc/OfRJySvv2dlDj/9Txy5sh/PbVaGg2i6M6higin2/L4rTcWllylkIcXOkWBZCVJj5G0+Rm3d94J2dVsPLg5raNtzyjvq5Qy3o9GSJxz4Yd5Ct0eoBURNaTsDFvuQCW4iKYLQzMqnVJFVsf9x+dlzaUfKObR+zziNe0Bb1vMsA/9cvGIcC85FbFHh3/cky5SyEuDlSLAshKsTRiyn8eviiKjamcz0aehUaeBe1D8LXqWM36KusKArzDsxTxTyMHjzS9JFi9hCi4o0MHomvk68qNv/AfMwWc/E72Rmge6Gry+e3wdm/VSEfVyNP9Gykim0+cZndZxPKlLMQ4sakWBZClDtFUZjzxwnVxTEXgx2T+wYVbgh/zlLHHD1veFV504VNhF0JU8WebP0kjnrHYvYQouLZ6+x5ts2zqtjppNP8dua3kncs6uryn2/aXF1+smcjvJzVi/PMWXei5K4eQogyk2JZCFHutpy6wo4I9RWvp3sH4ulsUDc8u8Vm9D89XirxqrLJbOKjgx+pYvVc6vFQcMnLYQtRGYY2GkpQLfWXwk8Of0JWXlYxewB6I/R6RR27dAhOrFGFnA12vNBPvQx2aHQKvx+JKVPOQoiSSbEshChXZovCO+vUfSl9XY1M7NZQ3VBRrFfPCnKtAx0eL/H4P5z6gcg09SpmL7R7Ab1OX+qchSgvOq2Ol9qrFxy5nHmZr49/XfKObcaAZ2N17K+3oFAXjoc7BtCoUFem99aHq8YGCCHKlxTLQohy9f2+KMILjdJ/aUAwDvY6dcMTa+DSQXWs1yvWq2zFSMtN4/PQz1Wx1t6t6V+/f5lyFqI8davTja5+XVWxr45+RWJ2YvE76ezg7mnqWHw4hH2vCul1WptBspGJmSzfeb4sKQshSiDFshCi3KRmm5i3MVwVa+rrwoh2heaStZitV80K8mxsvbpWgiVHl5CUk6SKvdT+Jds5m4WoYlM6TEHD9d/LDFMGi0IXlbxT8/vBt5U69vccyMtRhQa2qE37+upFUD7+8zTx6ep2QojyIcWyEKLcLPjzNAkZuarY9Huboyu8rHXoKutVs4Lunma9ulaMqLQoVhxboYr1CehDu9rtypSzEBWhqUdThgYOVcV+CP+B00mni9kD0Gqh7wx1LCUS9i9VhTQaDdPvba6KpeXkMW+jLFQiREWQYlkIUS7OxWewrNCt4P7Na9OtsZe6YW6m7VVl39bWq2olmLtvLrmW64W4TqPjhfYvlCFjISrWc22ew6C7PqjVrJh5d++7Jc9e0bgf1LtLHdv6LmQlq0JtAtwZ0VY9g8b3+yI5fim1rGkLIQqRYlkIUS7e/v04JvP1IkCv0zDtnma2DXd9AmmFRu/3nWG9qlaMHRd38HeUet7Z0U1H08itUTF7CFH1/Jz9mNBSvQz2ntg9Nku0q2g00K/Q1eWsRNg2z6bpy4Oa2ixU8ubaYzKVnBDlTIplIUSZbTt9hc0nLqtiE7s1pEHhBUjS4mD7h+pYYB8I6lfssU1mE+/sVa/w52H04Jk2z5QlZSEqxcSWE/Fz8lPF5u6bW/JUcvW6QLP71LE9iyDpgirk62bkmd6Bqtjus4lsOBZbppyFEGpSLAshyiTPbGH22uOqmJezPc/1aWzbeMt/wZRRIKCB/rNLPP63J7/lfOp5VezFdi/KstaiRnCwc+DfHf6tisVkxLD06NJi9riq30zQFujDb861nWoReKJnI+q4O6hib687QbaphFUDhRC3RIplIUSZfLs3klNx6arYvwc0wcVYaN7juONwUD1Aj7ZjwLdlsceOz4rns9DPVLGWni0Z1nhYmXIWojL1r9+fTr6dVLElR5dwMf1iMXsAnoHQ8Ql17OhPEH1AFTLqdUy9Rz2VXFRiFkt2nCtTzkKI66RYFkKUWlJGLvM3qUfgN/dz5cEOAbaNN70BSoGFE/SOtvPKFjJ331wyVFeiYWrnqWg18tYlag6NRsMrnV5Bp7nevzjHnMOcPXNK7l/c62UwuqljG6fZLIM9pJUfnRp4qGKf/BVBXGp2mXMXQkixLIQog7kbw0nONKliM4YWMVXc6c0QsUkdu+t5cPUv9tg7Lu5g3bl1qtiwwGG09m5dppyFqArBtYIZ1WSUKrY1emvJg/0cPaDnf9SxyF1wfLUqpNFoeGNocwpON56Za+bt30+UMWshBEixLIQopdCoZFbtVS87fU8rXzo38lQ3zMuF9a+oY8614a7JxR47Ky+L2bvVfZld7F14sf2LZUlZiCr1bNtn8TSq/z7m7JlDWm5aMXsAnZ4E9/rq2IbXIVd9x6VlHTceaq++o/Nb6CV2RsSXKWchhBTLQohSMFsUXl99VHU32NFex+tDmts23vMZJESoY32mg8G52OMvCl1k05/zpfYv4eXgVcweQlR/rvauvNrpVVXsStYVPjr4UfE72RlgQKFBsKnRsP0Dm6YvD2qCm4N6rMAbvx0jN89i01YIcfOkWBZC3LJVeyM5cjFFFZvcNwj/QqPySY2Bre+pY3Xal7isdXhiOMuPLVfF2vm0Y3jQ8DLlLER1MLDBQLrX6a6K/RD+A4cvHy5+p2b3QcOe6tiOjyFRPYjP09nAfwY2UcUiLqfLYD8hykiKZSHELUlIz2HuBvVS1Y19nJnYraFt480zIFc9UwaD5xa7AInZYubNXW9iVq5Pe2WntWNG1xkyqE/cFjQaDa93eR0Hu+tfLBUUZu2ahclsKm4nGPweFBggiDkHNtgOkB3dqR6t66oHBX7852kuJZcwr7MQokTy6SOEuCXv/HGSlCz1h/qbw1pgb1fo7SRyN4R9r461fRTqti/22F+f+Jqw+DBVbFKrSTRyl5X6xO2jjnMdnglRL6oTkRzBl0e+LH4nn2bQ+Sl1LPx3iFAPENRpNcwe1tJmsN9bv6vnQhdC3DwploUQN+3AhUR+PBCtit0X4s9dgYX6EpvzYF2hUfwGV+uy1sU4m3KWBYcWqGINXBswqdWkMuUsRHU0pvkYmtRSd5n4IuwLTiSUMINFr1fAsdDf2h+vQF6OKhQS4M7DHeupYuuOxPLPqStlylmIO5UUy0KIm5JntvD66mOqmLPBjmlDmtk23vs5xKqvENN7Kjj7FHlss8XM9O3TyTFf/9DXoGFG1xkYdIYy5y5EdaPX6pnVbZZq7uU8JY9pO6YV3x3Dwd26sl9BCRFFD/Yb2IRajurBfjN+OyYr+wlRClIsCyFuylfbz3EiJlUVe7FfELVdjeqGyVHw19vqmHcz6FRoNbIClh9fbtP9YkyzMXTw7VCmnIWozlp4trC5c3I66TSLwhYVv1ObMVCn0N/FtnkQf1oVquVkzyuD1Cv7nYvP4NO/C81MI4S4ISmWhRA3dD4+w2alvia1XRh3VwN1Q0Wxdr8otOoeQz8EXaHlr6+KSIrgk0OfqGL1XeszuV3x8zALcbt4qvVTBNcKVsW+OvIVR+OPFr2DVgv3flBosF8urP0/m5X9HuoQQNt67qrYp1vOcDJW/aVXCFEyKZaFECVSFIVpq4+QU2CuVo0G5jzQCr2u0FvIiTVw6g91rP14qNelyGObzCbrbWfL9dvOGjS81e0t1WwBQtyu9Do9b3d/GzuNXX7MrJiZtn0a2XnFLFft1xq6qgcIcn4bhK5ShbRaDf8d3gq7Aitq5lkUXv35CGZLCctsCyFUpFgWQpTopwPR7IhIUMXGdW1Au3q11A2zU+GPl9UxJ2/bPpYFLDi8gOMJ6lH641qMo41PmzJkLETN0tSjKU+GPKmKnU05y/v73y9+p95TwU29Yh8bpkGG+m+1mZ8rT/VSzyZzOCqZlbvOlyVlIe4oUiwLIYp1JS2Ht35Xj873dzPy70ILHwDw5yxIi1HHBr0DDrVs2wK7Lu1i6dGlqlhDt4Y82+bZMuUsRE00qdUkmnmoB8t+H/49f0X+VfQO9k5wT6FiOisRNrxm0/T5PkE09HJSxd7bEM5FmXtZiJsixbIQolhvrj1uM6fy7Ptb4mywUzc89w/sKzRHbGAfaPlAkcdNyk5i2nb1ggp6rZ53e7yL0c5Y5D5C3M70Wj3v9nzXpvvRGzvfIC4jruidmgyC5sPUsbDvIFzdFcqo1zFnRCtVLDPXzOu/HkFRpDuGEDcixbIQokh/nohjTeglVWxoiD99m9VWN8xJh/89p47ZOcCQeahWRrhKURTe2PEGV7LUc76+2O5FmnkWMQ2dEHeIhm4NeaXjK6pYSk4K07ZPw2wpZsq3Qe+CQb1iH2tehKwkVahLI09Gd1J32/g7/AprwgrdDRJC2JBiWQhhIzXbxOur1aPx3Rz0vHFvc9vGf86C5AvqWL8Z4FH0qnurTq5iS/QWVaybfzcebf5oWVIW4rYwImgE/ev3V8X2xO5h6bGlRe/g6geD/quOpcfCetvuGK8Oboa3i3re8lm/HSMhPcemrRDiOimWhRA2Zq85TkyKeiT+60NsP2g5vx32fqGO1esKnQoty3tV2JUw5u6fq4p5GD14q/tbaDXydiSERmNdjMfXyVcVX3BoAXtj9ha9U5sx0LifOhb6LZzaoAq5Oeh5874WqlhCRi6vrz4q3TGEKEG1/nTKysrijTfeIDg4GKPRiL+/PxMnTuTixYu3dJytW7cya9YshgwZgre3NxqNhgYNGlRM0kLUcJuPx9ksad2tsScj29dVN8zNgP8VGoxn5wDDFlrngi0kKTuJl7a+RJ4lTxWf3W02Xg5eNu2FuFO5Gdx4p8c7qi+QFsXCf/75D5czL9vuoNHA0I+tS8oXtOYFm+4Yg1r6MqC5uivVH0dj+a1QlyshxHXVtljOzs6mT58+zJ49m/T0dIYNG0ZAQABLly6lbdu2nD179qaP9cILLzBz5kzWrVtHfHx8BWYtRM2WlJHLq78cUcWcDXa8+0BrNIX7H6+fCknn1bG+b4BnoM1xzRYzU7dNJTYjVhWf0HICPev2LI/UhbittK/d3mZmmMTsRP699d+qecnzudWBgYW6Y6TFwO8vqRYr0Wg0vDW8pc1S2G/87xhxqcXM6yzEHa7aFstvvfUWu3fvpmvXrpw6dYrvv/+ePXv2MG/ePK5cucLEiRNv+lgDBgzgrbfeYsOGDRw7dqwCsxaiZpv+v6PEF+q/+Ma9zalby1Hd8MRaOLhcHQvoAp2L7n7xRdgX7Li0QxXrULsDk9vKKn1CFGdSq0k2XyYPXT7Ehwc+LHqHto/adsc4+jOE/aAK+bgYeet+9ewYKVkmXv05TLpjCFGEalks5+bm8skn1uVvFy5ciLOzc/62KVOm0Lp1a7Zu3cqBAwdu6njvvfce06ZNY8CAAXh4eFRIzkLUdGtCL7G20Mj4Pk19eLBDoe4XqTHw2/PqmN4J7v8UtDoK+yf6Hz4L/UwV83LwYm6vudhp7WzaCyGstBot/+3+X+o411HFVxxfwfpz6213uNYdw+iujv/+ks1doCGt/Rga4q+K/R1+hR/2R5VD5kLcXqplsbxjxw5SUlIIDAykbdu2NttHjhwJwJo1ayo7NSFuS5fTspn+P9vZL94Z0Urd/cJigf89Y138oKDB7xbZ/eJM8hle/udlFK5frdJpdMztOVf6KQtxE9wMbszrPQ+9Vt1t4vUdr3MsoYg7pW51YOiH6lhuGvzyJJjV4wXevK+FzaDdN9ccJyoxszxSF+K2US2L5dDQUADatWtX5PZr8bCwsErLSYjblaIoTP35CMmZtouP+LgWWiBk7+dwptCKYs3us97+LSQ5O5nn/3qeDFOGKv5iuxfp4NuhXHIX4k7QwrMFUztPVcVyzDlM/msyVzKvFLHDcOsMGQVF7YHt81WhWk72vPuAujtGRq6Z//wUitki3TGEuKZa3gONjIwEoG7dukVuvxa/cOFCkdsrQk5ODjk51/typqamAmAymTCZihhsIWxcO09yvm5NRZ+3r/dE8udJ9Qj7wS1qM6iZl/r/jDmM3aY3KDjMT3H2JW/Q+5CnvmJlspiYsmUKUWnqW7qD6w/mkeBHKvx3QH7XSkfO262rrHM2rMEwjl05xs8RP+fHLmdeZvJfk1ncbzEGXaFpHfu9hd35HWiSz+eHlC3vYK7bBaVe1/xYj0APHmxfhx8PXJ9lavfZRD77+zRP9WxYYa9HftdunZyz0imP81Uti+X09HQAHB0di9zu5GRd4z4tLa3ScpozZw6zZs2yif/999/F5imKtmnTpqpOoUaqiPN2MQPmH9FBgRLYWa/Q3eEif/xx/cPTLi+D3uFvoDfnqvbf6TuO+C27VTFFUVibtZZ9uftU8Tq6OnRK7sQff6iX4q1I8rtWOnLebl1lnLPWSmsO6A5w3nw+P3Y04Sj/+uVfPOD4gM2MNbV8xtI9+S20WADQKGZM341lS5PZ5OqvTzPXQQub7XUk5V7ff/6mU5hjTtDApWJfk/yu3To5Z7cmM7Ps3YqqZbFcHU2dOpUpU6bkP09NTSUgIIC7774bT0/PKsys5jCZTGzatIn+/fuj1+tvvIMAKu68ZebmMfyzPeQp6m4SHzzcjt7B3tcDioLup3Foc9W3e81dnqNT3//YHHfliZXsObRHFfNy8OKrgV/h4+hTbvmXRH7XSkfO262r7HPWPbs7YzeM5WLG9S+zh02H6RTQiadbP23TXtmWC/+8k//cwZTEwIwfMT/8g2pAbt3WiTy2ZD/Xel9Y0PBjtDO/PdsVF2P5vy75Xbt1cs5KJyEhoczHqJbF8rXZL4r7NpCRYf1wd3Gp4K+8BRgMBgwGg01cr9fLL+0tknNWOuV93t5Zc4Kz8epC+fHuDenfQj1Cnl0L4dQ6dSygM7r+M9Hp1Pn8ce4PPjj0gSpmr7Xn47s/po6bekR/ZZDftdKR83brKuuc+eh9WNB3AY+ue5TMvOufkV8c/YI6rnUYETRCvUPvlyF6D5z9Oz+kPbcV7a6PoPcr+bFuQbV5vk8QH/15Oj8WnZzNG2tOsmB0W9t51suJ/K7dOjlnt6Y8zlW1HOBXr149AKKjo4vcfi1ev379SstJiNvJ72ExrNqr7k/cwt+Vlwc1UTeM2gub3lDHHDxg5FIoVCjvi93HtO3TbP6v2d1m08q7lU1cCFE6QbWCeLfnuzZLxL+56022RW9TN9bqYMRicPFTx7fMgTN/q0LP92lMxwa1VLG1YTE2K3oKcaeplsVySEgIAAcPHixy+7V469atKy0nIW4XUYmZvPqLeiYZR3sdC0a3xWBXYJ7ktFj4YSyolqfWWD94C10ljkiK4IW/X7BZWez/2v8f9zS6p7xfghB3vN4BvXmt02uqmFkx89LWlzgWX2hKOWdvGLkENAXnQVfg58ch6fpAeTudlg8fboubg/qL8Iz/HSPicnp5vwQhaoxqWSx369YNNzc3zpw5w+HDh222//TTTwAMHTq0kjMTombLzbPwwneHSMsuNN/qsJY08r6++A95OfD9o9blcgvq+W8IUq8QFp0WzVObniItVz3g9uEmDzOhxYRyzV8Icd2opqN4vOXjqlhWXhb/2vwvziafVTeuf5d1OfqCMhPguzGQe707Vh13B5vp5LJMZp5fdYisXHO55i9ETVEti2V7e3uee+45AJ599tn8PsoA8+fPJywsjF69etG+ffv8+CeffELTpk2ZOnWqzfGEEFb/XXeCg5HJqtj9bfx5oF2BK8WKAr9PgWj1bBY07AW91X9fcRlxTNo4ictZ6qnn7g64m1c7vVph/RyFEFaT201mSKMhqlhSThJPbHqC6LRC3SfumgxNCt3piTsCq5+x/t1fNailH2M611M1OxGTyrTVR2Q5bHFHqpbFMsDrr79O586d2blzJ0FBQYwaNYouXbrw0ksv4e3tzZIlS1Tt4+PjCQ8PJyYmxuZYX375JV26dKFLly4MGWJ9U4mJicmPdenSpdguH0LcLn4LvcSynedVsXoejsy+v6W6qN27GA59rd7ZvT48uEw1ej4pO4knNz3JxfSLqqYh3iG82/NddEUsfS2EKF9ajZbZd82ms19nVfxy5mWe2PgElzMLfJHVamH45+BVaGzC8dU2C5ZMv7c5wbWdVbFfDl7kmz2R5Zm+EDVCtS2WjUYjf//9N9OnT8fR0ZHVq1dz4cIFxo8fz8GDB2nUqNFNHys6Opo9e/awZ8+e/KI4Nzc3P7Znz578RUaEuB2djkvj1Z/V/ZTt7bR8Oqadelqos1tg/avqnfWO8PC34OiRH0rLTeOpTU9xNkV9qze4VjAL+y7Ewc6hvF+CEKIYep2ej+7+iNZe6nE80enRPLHxCRKzCyxPb3S1/j0b3NQH+XM2nLw+641Rr+PTMe1xsld/6Z215hgHI5PK/TUIUZ1V22IZwMHBgTfffJOIiAhycnKIiYlh6dKlRa7sN3PmTBRFYdmyZcVuK+lf7969K/4FCVEF0nPyePrrA2QW6m/41rCWtKxT4AMz7jh8/xgohfolDl8Evi3zn6bmpvLUpqc4kXhC1ayBawM+7/85boU/hIUQFc5J78Sn/T4luFawKn425SyTNk5SF8xeja8O+CtYAlwd8Hfx+l3Wxj7OvP9giOp4JrPCM18fJD49ByHuFNW6WBZClI3FovDvH0I5c0U9n/KoDgE81DHgeiAtFr59CHIK3WHp+R9oPiz/aUpOCk9ufJIj8UdUzfyc/Fg8YDFeDl7l/hqEEDfHzeDG5/0/p76relrV00mneXzD48RnxV8PBvWDvjPUBzBlwrejVDNkDG7lx1O91HdyY1Ozee7bg+SZLeX+GoSojqRYFuI29tGfp1l/LFYVa1nHlVnDWlwP5KRbC+UU9bzLNLsPel+fmiolJ4UnNj7BsQT1tFReDl58OeBLfJ18yz1/IcSt8XLwYnH/xfg7qRcXikiOsC2Yu70AIY+oD5BxGb55ELKud7X4z4AmdG2kXql299lE3tsQXu75C1EdSbEsxG3q97AY1WpcAG4Oej4b0x6j/mo/RLMJfpoIMaHqnet2ghFfWAcEAQlZCUzaOMmm64W3gzdLBi6hnqt65LwQour4OfuxZNAS6jir50M/m3KWiRsmEptx9Qu0RgNDP4KGPdUHiA+3dsnKs3a1sNNpWfBIW3xdjapmX/xzlp9kwRJxB5BiWYjb0NGLKbz042FVTKfVsPCRdgR4OFoDFgv871k4vUG9c62GMHoV6K2D9C6mX2Tc+nGcTDypaubj6MPSQUtp6Nawol6GEKKU6jjXYclA24L5XMo5xv4xlvMp560BO3t4aCV4N1Mf4Pw26xdps3VOdi9nA58+2g69Tj0d5NRfwth3PhEhbmdSLAtxm7mSlsOTK/aTbVL3J5w+pBndg672KVYU66wXYd+rd3bwgEd/Bidru4ikCMb+MZYLqRdUzWo71mbpwKU2fSOFENWHv7M/ywYtI8AlQBWPyYhh3PpxHE84bg04uMOYH8G5UFeqk2thzWTrF2ugXb1avDmspaqJyazw1MoDRCVmVtTLEKLKSbEsxG0kMzePx5fv41JKtio+ulMA4+5qcD2wZQ7s/Vy9s52D9YqyZyAAYVfCGL9hvHqeVqxXrJYOWipdL4SoAXydfFkycInNHaDE7EQmbpjI3pi91oB7AIz5Aexd1Ac4/A1snJa/aMnoTvWY2K3QsTJyeXz5PtKy1cvdC3G7kGJZiNtEntnC898eIiw6RRXv1MCDWfcVWHhk10LY+q56Z60djFoJ9boAsOnCJh7f8DgpOepjNXZvzIrBK2yuVAkhqi9fJ1+WDVpGC88WqniGKYOnNj/FmjNrrAG/EHjkO7BT901m96ew5Z38p9OGNOPuJt6qJqfi0nnmm4Pk5skMGeL2I8WyELcBRVGYteY4f55UXwUO8HDg00fbYW939U9916ew4bVCe2usg/mC+qMoCkuOLmHKlilkm9VXp0O8Q1g2aBk+jj4V+EqEEBXBw+jBVwO/orOveqW/PEser21/jU8OfWJdyrpBd3hwufULdEFb34Gt7wHW8Q8fj25rs8LfttPxvPpzmCyJLW47UiwLcRtYvO0sK3er+xW7O+pZNqETXs4Ga2DXp7Bhqu3O934ALR/AZDExc9dMPjjwgU2Tbv7d+KL/F7LgiBA1mJPeiYX9FtKvXj+bbZ+Hfc4r214hx5wDTQbB/YsA9WA+/n4btljvSrkY9Xw1riOeTvaqJr8cuihTyonbjhTLQtRwPx2I5r/r1DNV2NtpWTy2A4HeV6/87FpYdKHc/03oMIGUnBT+telf/HL6F5smI4JGsKDvAhz1jhWRvhCiEhl0Bt7v9T6PNnvUZtsf5/5g0oarq/21fhCGvG97gC3/zS+YAzwc+Wp8Rxz06iWxP9tyhuU7z1dE+kJUCSmWhajBNhyL5ZWfw2zi8x8KoWMDD+uT7R8W0fUC6DcLur3AqaRTjFk3hj2xe1SbNWiY0n4KM7vORK/VV0D2QoiqoNPqeKXTK0zrPA2tRl0GHL5ymIfXPsyx+GPQcRLcU0zB/OdsUBTaBLizcExbdFr1VeiZa47xW+ilinwZQlQaKZaFqKF2RMTz/LeHMFvU/QNfu6cp97b2t45e3zgdNs+w3bnfLOj+ImvPrmXM72NspoYz6ozM7z2fCS0nXB8YKIS4rTzc9GEW9l2Ik95JFY/JiOGxPx7j51M/Q6cnii6Yt70P6/4NFgt9mtZmzvBWqs2KAlO+P8zm43EV+RKEqBRSLAtRAx2OSubJFfvJNatHnj/VqxFP9gy0LiTw2/Ow82Pbnfu/ianrs/x3z3+Zum2qzUA+Lwcvlg5aSr/6tv0ahRC3l+51urNi8Ar8nPxU8WtjGN7Y8QbZ7R6DIfNsd973JfwyCfJyeahjAFP6B6s251kUnvn2INtPx9vuK0QNIsWyEDVMWHQyY7/aQ0auWRUf3SmAVwc1BVM2/DQeDq203XngHGLbjGLChgmsOrnKZnNLz5asGrKKll4tbfcVQtyWgmsF8+2Qb2nn085m268RvzL2j7FENx0EQz/GZtDf0Z/hu9GQk87zfRozvuB87kBunoUnVuxnv6zyJ2owKZaFqEGORKfw6Jd7SM3OU8WHtPLjrftboclMgBX3wYk16h01Orj/Mzb7BzNyzUhCr4TaHPvB4AdZPng5vk6+NtuEELc3Lwcvvhz4JY81f8xm24nEEzy45kHW1vKEB5dB4TEMEZth2T1o0mJ5497mPNShrmpzlsnM+KX7OHBBCmZRM0mxLEQNcfRiKmO+3G1TKPcM9uaDUW3QJUbAl30hSj1QD52BzJFfMiMznP/b8n82C40YdAZmd5vNG13fwF6nngZKCHHn0Gv1vNzxZeb2mouDnYNqW7opnanbpvJKwi7SRi2HQv2ciQmFL/uivXyMOSNac29rdbeO9Jw8xn61l73npGAWNY8Uy0LUAJHpMH75fptCuUeQF1881h776J3wZT9IOq/e0d6Fo8Pm8dCpJUVOC1fHuQ4rB6/k/sb3V1zyQogaZVCDQawasspmiWyAdefWMfLIxxy8by441FJvTL0ISwahO/MnH4xqQ79m6gWMMnLNjFuyl11nEioyfSHKnRTLQlRze84l8slxHSlZ6kK5e2MvFj/WHuOhJbBiGGQnq7abXOvyWa8neezwfJvZLgD61+/P9/d+TzPPZhWZvhCiBgp0D2TVkFXcF3ifzbZLGZeYcOg9Puw2jhyPQgV1bhp8+yD6XR/zyei29GmqLpizTGYmLNvL9ggpmEXNIcWyENXYnyfimLjiIDlm9aCabo09WfxIS4zrJl+dvkldSB/1b8mohoF8GvEjeYp6m4OdA7O7zWZer3myIp8QolhOeife7v42c3vOxUXvotpmUSx8deYXRtbx52BAW/WOigU2z8D4v0kseqgp/ZvXVm3ONll48uuDHEqQaSlFzSDFshDV1P8OX+SplQfIzVNPD9e9sRdfDfPD4eshcPgb1bYsjYb5ge0YY0zndOo5m2O28mrFT0N/4v7G98v8yUKImzKo4SB+vu9n2tdub7PtfHoU4+0S+W9QBzIKv6cc+xX7ZQP4dLA7g1uqBw6bzArLT2n5Zm9URaYuRLmQYlmIakZRFBb/c5YXvjtMXqEFRwa18GVp1ziMX/WES4dU23YZDYxs3IyllngsirrA1ml0PNn6SZYPXk4913oV/hqEELcXP2c/vhrwFS+0ewE7rZ1qm4LCqrzLDA9sylYH9cBALh9Hv7g3C1qdYWiIf6H9NMxcc4KPNp9GUdTvdUJUJ1IsC1GNmC0KM347xtvrTthsezDEi089vkP/42Oq/smX7HRMqV2bJ/1qE5mXbrNfk1pN+GbINzzf9nlZtloIUWo6rY5JrSbxw70/0Mqrlc32GHMGz/l686yfH5F2BQrq3DTsfn2CjxyXMLFTbZv9Pth8iqm/HMFUaJElIaoLKZaFqCYyc/N4auV+VuyyHYw32juKd5NfQrvvi/xYjgY+d3dlWN06bHI02Oyj1+qZ3HYyq+5dRQvPFhWauxDizhFUK4iVg1fy7w7/xqgz2mz/x6jn/rr+fFzLjcwCXTO0h1YwPeZZ3u5iWxR/ty+KcUv2kpJpqtDchSgNKZaFqAYuJmfx4KJdbD5xWRXXYuGbZrt5K/11tHFHAFCATY4O3F/Hj09quZNdRNfjEO8Qfhr6E0+0fkKuJgshyp1Oq2Nci3H8ct8vdPLtZLPdpIHF7m4Mq+vHWidHrpXHmisnGRM2nl+b/4MedWG880wCwz/bwfn4jEp4BULcPCmWhahie84mcN+C7Ry7lKqKB+ti2e//Pt3OfYxOsX6o7DcaeNSvNlNqexOtty2C3Q3uzOg6g+WDltPIvVGl5C+EuHMFuAbw5YAv+W/3/+Jp9LTZHmtnx1QfLx7292Wn8epVaEsebc8uYqvLDFrpL6ran72Swf2f7mDb6SuVkb4QN0WKZSGqiKIorNh1njFf7iEhIzc/rsXCv4wb+cP4Gh6JhwE4rdfzXG1vJvjVJsxo2+VCq9HycJOHWTt8LSODR6LT6irrZQgh7nAajYahgUNZO3wtY5uPxU5jZ9PmhMGep/x8eNLXm+P21i/6/qbz/E8/jX87/o4d16e4TM40MW7JXhb+HSED/0S1YPsbLYSocBk5eUxffZRfDqmvqrTWnGGuwzKaWM6AGc7q7fjc3Y31To5Yipnqra1PW17r/BpNPZpWRupCCFEkZ3tn/tPxP4wIGsGcPXPYE7vHps0uBwd21XGgf0YmTyWl0MSUy3N8wxDH7bycNY59ivV9zKLA3A3hhEYlM++hEFyM0p1MVB0ploWoZCdiUnnu24OcuXK9X54b6fzH7nsesfsLrUUhQq/nc3dXNjg5ohRTJNdzqcfkdpMZUH+AzJkshKg2At0DWTxgMVuitvDhwQ85m3LWps0mJ0c2OTnSJyOTp5NTaJZ7gR8Nb/KTuSdzTKNJwLpg0sbjcQxdsJ2PR7eldV33yn0hQlwlxbIQlURRFFbtjWLWmmPkXF1oRIOFkbp/eNVuFZ6aNI7Z2/OVuyubnByLPY6H0YN/hfyLB4IfkMF7QohqSaPRcHe9u+lRtwe/nfmNhYcWcjnrsk27v5wc+cvJkd4ZmTyeksrInH/or93P3LxRrDL3wYyO8wmZPPDZTl4e2JTHuzdEq5WLA6JySbEsRCW4kpbD1F+OsPlEXH6shzaMqXaraKq9wDYHI8vcfNjvYDsN0zVOdk6MbTGWcS3G4aR3qoy0hRCiTOy0dowIGsHghoP55sQ3LDm6hLTcNJt2W5wc2eLkSEh2DuNTUpmVuZRxuo28m/cwmy3tMJnh7XUn+Of0FeY9GIKPa/HvlUKUNymWhahg64/GMu3XI/mD+FpozvOK3So62h3ld2dHXnX145x98VeInfXOdNB2YMa9M/By9qqstIUQotw42DkwqdUkRjUZxbcnvmXF8RWk5qbatAs1Gvg/ozcBJhOPpaTwUfp8jpqbMMf0CIeVxmw7HU//D/5h1n0tGNbGX7qgiUohs2EIUUESM3KZ8v1hnv76AAkZuTTSXOID/UI+dJrBTu9o+gbUYZaXZ7GFsovehadDnmbtsLX0c+iHm8Gtkl+BEEKULxd7F54Kecr6vmbsh5t90e9rUXo9//XyoE+9Omz0juNd59ks1H9IsCaKlCwTL35/mCdXHuByWnYlvwJxJ5Iry0KUM0VR+PXQRWavPU5SpokgTTRP2/2K0SWUH12decPBr8T9vR28eaTZIzzU5CFc7V0xmWRFKyHE7cVZ70xvY29m9J/B/879j69PfE1sRqxNu0ytlh9cXfjB1YWQ7As8nzYLJbU5i0wj2HQc9p1P5NVBTXmoQ4D0ZRYVRoplIcrRufgMpq8+yvaIeJpqzvMvpx9JcjvPx86OJOpK7kLR2L0x41qM456G92Cvs6+kjIUQouo46Z0Y12IcjzR7hI3nN7L82HJOJJ4osm2o0UCo0YCbRwyDM+YyIiWA/2WM5NVfTPx4IJq3h7ekqa9rJb8CcSeQYlmIcpCSZWLBn6dZsessnXQ7GOu9kSOu6Sywtwdcit1Pg4a76tzFo80epZt/N+l/J4S4I+m1eoY0GsI9De9hb+xevj7xNf9E/4NFsdi0TdHp+M7VBVyTaZD7GWPTHLgQ04+hHycy9q5GPN+nMe6OcsFBlB8ploUogzyzhVX7oli4eRdB+p/pVPc4YQ4aQjUaoPg3aw+DO8ODHuCB4AcIcAmovISFEKIa02g0dPbrTGe/zsRmxPLTqZ/45dRPXMlOKLL9eXs95z3zwHM9HbN+J+J4EwYdfIBJd9/FY13rY7CT1UxF2UmxLEQpKIrCr2EnWbHtc0y6nWTXzeSwRsONxsy29wphVPMx9K3XV7paCCFECXydfHmu7XM8FfIUW6K28P2Jb9kTt7/Y9kccdOAQgUZ5h1+POvDboY483PUpRndoLXftRJlIsSzETVIUhXMp51h24Fd2nfuNOLtElPweFsW/EfvpnLg3eAT3NhlJI7dGlZKrEELcLvRaPf3r96d//f5Epkay9tTPrAn/keg82/maARSNhkjHbGAbc45vY0moO53r3cu4DsMJqhUkhbO4ZVIsC1GC7Lxs9sbuZVvUFv46s4nL5mTrhhssnOeswACP1tzb7hna1+mKViOzNAohRFnVc63HMx3+j3+1f5HDMftYc3Ah6+MPkaZRit0nTp/MbzFf89uar/HQutCnUT961etDJ99OOOqLXy1ViGukWBaiALPFzMmkk+yN2cuemN3sj91HjuXmpm4zWiz00LkxoMkD9G73DEa9QwVnK4QQdyaNRkNb/0609e/Eq3k5/HN4MRtPfMfWvCQytcVfnEi0pPFTxK/8FPEreo2O9rU70Nm/C518O9Hcszl2WimLhC35rRB3NEVRiEiOYG/sXvbE7GF/7H7STEXf2iuKk8VC9xyFAXV70aPLFBy8giowWyGEEIXZ2xno1+E5+nV4jpykC+zYPY8NF/5kmz2k6YovnE2Kmd2xe9gduwcAJztHOvh2pKNvRzr7dSa4VrDcFRSAFMviDpOdl82xhGMcvnyY0CuhhF4JJTE78ZaO4ZOXR4/MHFrbN2Zwj+dxaHoPaGXEtRBCVDVDrfr0GfwxfSwWsk9vYsM/H3I4O5wdjgZi7EoueTLyMtkavZWt0VsBcDO40ca7DSHeIYR4h9DSq6V027hDSbEsbluKohCdHs3R+KP5xXF4Yjh5St4tHUerKLTJyaFHZja+2X74Nn6QkIcmoHcpeZERIYQQVUSrxdhkIMOaDOTejCTCNq3g0onviDNEs93RyCGjgbwbDPRLyUlRFc86jY7gWsG09m5NG582tPJqRYBLgFx9vgNIsSxuC2aLmQtpFziRcML6L9H6Ly335rtUFNQg10Sn7Gw6ZufgklGb8879qNvzMbp3aIdOllQVQogaQ+dUi7b3v0DIfZP552AY+q0rmRj7F9mOl9hrNLLXwcAZ+xtP5WlWzPmfLd+Hfw9Yl+1u6tGUZp7NaObRjOaezWng2gCd3G28rUixLGoURVFIyE4gIjmCM8lniEiOICIpgvCkcLLyskp9XH9THp2ys+mUlU27LBNnTU3YrHRkR/BgRt7dmTEB7uX3IoQQQlQ6rVZD7w4h0CGE45dS+XnrPnKPr+NF9hGsP06og549V4vnKP0Npjy6Kt2Uzv64/ewvMP+zUWckuFYwjWs1prF7YwLdA2ns3hhvB2+Ztq6GkmJZVEsWxcLlzMtcSL3A2ZSz+YXxmeQzJOckl+nYWkUhONdESE4OIdk5tM3JwdHkwA5LSzab2/GZ210M6dGMZ9vXxcfVWD4vSAghRLXR3N+V5qP7kpjRg+/2RfLe3nAaJe2ir+4gz2qPkGeXwSGjgVCDgcNGe8Lt7THfZKGbbc4mLD6MsPgwVdzF3oUg9yAC3QMJdA+kkVsj6rvWx9fJV7pyVHNSLIsqY1EsXMm8QmRaJBdSLxCZGpn/OCotihxzTrn8P+5mM61zcgnJzqFNTg4tc3LRWXQcsASzzdKaRZZWROgaMrCFPw93DKBLI0+00tVCCCFuex5O9jzTuzH/6hXI/gtd+XF/FNPDLhKQeZ7u2UfooT3CFO1JLNo8jhnsOWwwEGqwJ8xoIFF3a10t0nLTOHj5IAcvH1TF7bX2BLgEUM+1HvVc6lHPtR71XetT37U+Po4+UkhXA1IsiwpjspiIy4gjJiOGS+mXiE6NZm/mXn778zdiMmOIzYjFdJNzGN8sn7w8muWaaJaTS7PcXJrn5FLbbCZX0XNYCWSvpQkLLU3Za2mCSetAjyAvJrb2p3+L2rgab+62mxBCiNuLRqOhYwMPOjbwYOZ9LfjjSCzrjnRk+el4NKZsOmjD6WQKp2PGScZoIzBqcrmi03HCXs9xgz3H7e05YbAn7gYzbhQl15LLmZQznEk5Y7PNTmuHr6Mv/s7+1HasTXpWOqYzJgLcAvB38sfXyRe9Tj67KpoUy+KWWRQLyTnJXMm8wpWsK/k/L2deVj/OuoJFsdgeIK7sORgtFhqa8mica6KxKZfGuSaa5+biZbb+f1cUV0ItTVlmacJeS1OOKg3JRY+9TkvnQA9mtPJjYAtfajndeFCHEEKIO4ejvR0PtK/LA+3rkpptYvPxONYdCWDhqRBy8yzoyaOF5jwdtSfpmBvOA5ln+JcmHoAErZaTBntO6/VE2Fv/ndXrySphoZSS5FnyiE6PJjo9Oj/2956/8x9r0ODt4I2Pow/ejt54O3jj7Xj1+dXH3g7e1DLWkivUZVCti+WsrCzmzJnDd999R2RkJB4eHgwaNIjZs2dTp06dWzpWUlISM2fOZPXq1cTGxuLr68vw4cOZOXMm7u7uFfMCaghFUcjKyyIxO5HknGQSsxNJyk6y/stJyn+cmJ1oLY6zrpBnubXp10rLwWKhnimPQJPpamFs/emfl8e1G2CJijNHLMGsUhpxxNKQMEsjYvEArF0pvJztGdbEh77NfOge5I2zoVr/2gshhKgmXI16RrSry4h2dUnPyWNHRDxbwi+zJdyZxSmNWWy+FwAfkmitPUsr7Vlam85yr/YcXhrrHP4W4JKdjjP5BbQ9Z/R6LujtSlxt8GYoKFzOuszlrMuQUHw7O40dng6e+Dj64GH0oJaxFrUMtaw/jbXwMHrgbnDPf+xo5yiDEQuotlVDdnY2ffr0Yffu3fj5+TFs2DDOnz/P0qVLWbt2Lbt376ZRo0Y3daz4+Hi6du1KREQEjRo14v777+fYsWN89NFH/PHHH+zatQsPD48KfkUVL9ecS2puKqm5qaTlppGWm0ZqztXHpjTrtpzr25JzkvOL4fLqH1waDhYLAaY86uXlUc9kov7Vx/VNJrzMFq79ueYpWs4qfoQqAfxoqUu4UpdjSkOiFS/g+h+1g15Hz4Ye3BXoyV2BnrT0d5M+yEIIIcrE2WDHwBa+DGzhi6IohMelsSX8CrvPJrD/vB2bc2qx2dL+amsFfxJoqT1HkOYiTcxRBOdG0zXzEvaatKstrFeiI68WzpF6Oy7o9UTaWR+X9mp0UfKUPOIy44jLvLlbu/Zae9yN7vlFtIu9C672rrjau+Ji75L/vOBjV4P1uUFnKLe8q4tqWyy/9dZb7N69m65du7Jx40acnZ0BmD9/Pi+99BITJ05ky5YtN3WsF198kYiICEaMGMH333+P3dU+RZMnT2bBggVMmTKFZcuWVdArKZpFsZCdl02OOYfsvGwy8zLJMGVc/2nKtP4rEL8Wy8izbs8wZZCVl0W6KZ203LQqLXhL4mix4J+Xh1+e+erPPPzzzPk/vc1mCpay6YqRC4ovexRfzil+nLLU5ZRSl3OKH7nY9s3ycLKnTYA7bQPc6RLoSUhdd+zt5HaTEEKIiqHRaGjq60pTX1ee7hVIntnCiZg09pxLYO+5RPaeT+RSpheXLF5spCOYrfvZkUcDTSxNNNEEa6NopImhQV4s/bLjcNGk5B9fARJ0Wi7Z2XHJzo4YO93Vn3ZctNMRY2dHRjkW04XlWnK5nHmZy5mXb3lfe609rgZXnPROONo54qh3zH/spHfCwc7B+lzviJOd9ee1dgVjDnYOGHQGjHbGKu9ColEURanSDIqQm5uLj48PKSkpHDx4kLZt26q2h4SEEBYWxv79+2nfvn0xR7GKiYmhbt262NnZERkZSe3atfO35eTkEBAQQGJiIpcuXcLHx+emc0xNTcXNzY13/n4HnZOObLO18M3Jy8l/fK0YLvg425xNdl52uQ9sqwoaRaGWxYJPnhkvsxkfsxnvq8Wvl/l6MexmsVD4um6C4sIlxZMLii/nldqcV3w5b6nNecWPeFzBZg8rg52Wpr4utK1Xy1og13OnnsftfbvIZDKxbt067rnnHvQ3OffnnU7OWenIebt1cs5K53Y/bxaLwvmEDI5cTOFIdAphF1M4djGFjFxzMXsoeJJKfU0cDTSxNNDG0kATR31NHP6aeLw1qYVaQ6pWQ8zVAvqKTnf9n53152WdjkSdFuU2+HzUa/UYdUYMdgZrAX31sVFnxKAzqB4b7a7Grj7Oy8jj2bueJSUlBVdX11L9/9XyyvKOHTtISUkhMDDQplAGGDlyJGFhYaxZs+aGxfL69euxWCz06NFDVSgDGAwGhg4dypIlS1i3bh3jx4+/5VyXn1yOzuH2WanHTlHwMJupZbbgbrEUeGzGw2zBu0BB7Gk2F3Gd13plOEbxJEbxIFTxIkbx5BKeXFI88+PZ3Pg2TR13B5r5udLMz8X6Dd7PhQaeTrKCnhBCiGpNq9XQyNuZRt7ODGtjHWNlsSicjc/g2KUUIi6ncyoujdOX07mQkInZAgm4kaC4cVAJtnZ0LsBALn6aBPw1CdTRxONPgvW5KYEATQLtNMm4adJt8sgDEnQ64nU6Ll8topN0WpK01kI6SaclOf+xDlM1LaxNFhMmi4k0062vymvOKu4Lys2rlsVyaGgoAO3atSty+7V4WFhYkdtv9VhLliy5qWPVJA4WCy5X/7laLLhYlOvPzdaYq8VCLbOFWlcL4VpmM06KUuQ13RTFkWTFmXjcuKS4Eaq4EY8bVxR34hU3rihuXMGNeMWNLG5+IQ9fVyMNvBxp4OlEAy+nqz8dqe/hhIP97fMlRAghxJ1Nq9XQ2MeZxj7OqnhOnplz8RmcjksnMjGTqMRMopIyiUzM5FJyNmaLQg72nFf8OK/4FXt8A7l4kYK3JgVvTTI+mmS8Ncl451mfe2lSqE86tTTpuJGGVqPuWKAAmRpNfuGcrNWSqNORotWSptWSqtWSprM+TtNqSL0W02rLtX91dVQti+XIyEgA6tatW+T2a/ELFy5U2rFycnLIybneJzglxdq3qDy+sVzjYLFgVBQcLRYcLQqOioKDRcFBUXCyWKw/FYs1ZrHgeK2touBituB8rSBWLDZXfE2KjjSMZCoOZOBIGg5kKEZicSBccSIZJ1IUZ2tRjDOpihMpOJGsOJGGE2ZutnC1AJloNeDuoMfL2R4fVyO+rga8nPQkXjxLr05tqOvhiI+LEaO+qOOayExLJrNsp/O2YTKZyMzMJCEh4ba8XVkR5JyVjpy3WyfnrHTkvKl568G7roG76hqAWvlxk9lCbGo2F5OzuZiYwd6wk7j41CEhw8TltFzi03KIz8jFokAWEIUTUTgB/iX+fxosuJCJuyYddzJw06TjRgbumgzcNWm4kYmzJhMnsgnQZOFEFs5k4aTJwoVs9Bp17WMC0jRa0rVa0rUa0nRaMjQasrTXf2ZpNWRqtGRpNPmxTK3G+lyrIUtjbVPehfe1Oq0svY6rZbGcnm69leDo6FjkdicnJwDS0m58Ob68jjVnzhxmzZplEz815dQNc7hTFff1Y16lZiGEEEKIO11CQgJubm6l2rdaFsvV0dSpU5kyZUr+8+TkZOrXr09kZGSpT/6dJjU1lYCAAKKiokrdyf5OJOft1sk5Kx05b7dOzlnpyHm7dXLOSiclJYV69eqVaYrgalksX5smLjOz6BvxGRkZALi4uFTasQwGAwaD7aA0Nzc3+aW9Ra6urnLOSkHO262Tc1Y6ct5unZyz0pHzduvknJWOtgzdO6plj+x69eoBEB0dXeT2a/H69etX6rGEEEIIIcSdpVoWyyEhIQAcPHiwyO3X4q1bt67UYwkhhBBCiDtLtSyWu3XrhpubG2fOnOHw4cM223/66ScAhg4desNjDRo0CK1Wy7Zt27h8Wb0STU5ODmvWrEGn03HPPffcUo4Gg4EZM2YU2TVDFE3OWenIebt1cs5KR87brZNzVjpy3m6dnLPSKY/zVi1X8AN4/fXXefvtt7nrrrvYuHFj/qwV15a77tWrl2q5608++YRPPvmE4cOHM2fOHNWxHn30Ub755hseeOABvvvuu/zlrl944QU+/vhjxo0bV+nLXQshhBBCiOqvWg7wA2uxvHnzZnbu3ElQUBA9evTgwoUL7NmzB29vb5YsWaJqHx8fT3h4ODExMTbH+vDDD9m9ezc///wzTZs2pUOHDhw7doyjR48SFBTE/PnzK+tlCSGEEEKIGqRadsMAMBqN/P3330yfPh1HR0dWr17NhQsXGD9+PAcPHqRRo0Y3fSwvLy/27t3L888/T25uLr/++ispKSlMnjyZvXv3lmk6ESGEEEIIcfuqtt0whBBCCCGEqGrV9sqyEEIIIYQQVU2K5Qoye/ZsNBoNGo2Gr7/+uqrTqZbCwsJ47rnn6NKlC/7+/hgMBtzc3OjatSsLFizAZDJVdYrVzsmTJ3n33Xe5++678fLyQq/X4+vry4gRI9i2bVtVp1dtZWRksHLlSp5//nk6d+6MwWBAo9Ewc+bMqk6tymVlZfHGG28QHByM0WjE39+fiRMncvHixapOrdo6cOAA77zzDiNGjKBu3br57/WiaJmZmaxevZrHH3+cJk2aYDQacXJyIiQkhDfffJP09PSqTrHamj9/PiNGjCAoKAg3NzcMBgP169dn7NixHDlypKrTqxESEhLw8fFBo9HQuHHj0h1EEeXu5MmTisFgUDQajQIoK1eurOqUqqUFCxYogFK/fn2lb9++ysMPP6z07dtXMRqNCqD06tVLycnJqeo0q5U6deoogOLs7Kz069dPeeihh5SWLVsqgKLRaJQPPvigqlOslg4dOqQANv9mzJhR1alVqaysLKVLly4KoPj5+SkPPfSQ0qlTJwVQvL29lTNnzlR1itXSsGHDivx9EkVbvHhx/jlq1qyZ8uCDDyoDBw5UXFxcFEBp2rSpEhcXV9VpVkuenp6K0WhUOnXqpAwfPlwZPny4EhwcrACKXq9X1qxZU9UpVnvjxo3Lr8cCAwNLdQz56y5nFotF6dmzp1K7du38N1Qplot25syZIj+MY2Nj8wvABQsWVEFm1Vffvn2VFStWKFlZWar4okWLFEDR6XTKsWPHqii76isiIkJ5/PHHlUWLFikHDhxQ3nzzTSmWFUWZNm2aAihdu3ZV0tLS8uPz5s3L/8IqbL3zzjvK9OnTld9++02JiYlRDAaDFMslWLZsmfLkk08qx48fV8UvXbqktG3bVgGU0aNHV1F21dv27dtt3u8VRVEWLlyoAErt2rUVk8lUBZnVDJs3b1YA5cknn5RiuTr54osvFED5+uuvlXHjxkmxXEorV65UAGX48OFVnUqNMWDAAAVQZs6cWdWpVHtz5sy544vlnJwcxc3NTQGUgwcP2mxv3bq1Aij79++vguxqFimWS2/nzp0KoBgMBrmTeIsCAwMVQAkNDa3qVKqlzMxMJTAwUGnevLly6tSpMhXL0me5HMXGxvLyyy/Tt29fxowZU9Xp1Gh6vR4Ae3v7Ks6k5ri2tPulS5eqOBNRE+zYsYOUlBQCAwNp27atzfaRI0cCsGbNmspOTdxBrr1v5eTkkJCQUMXZ1CzyOVmyWbNmcfbsWRYtWpR/rkpLiuVyNHnyZLKysvjss8+qOpUaLSkpiXnz5gEwZMiQKs6m5jh79iwAvr6+VZyJqAlCQ0MBaNeuXZHbr8XDwsIqLSdx57n2vqXX62XNg1uwcuVKwsPDCQoKIigoqKrTqXbCwsKYN28eEyZMoEePHmU+XrVdwa+mWbt2LT/++COzZs2SX9xbdPr0ad5++20sFgtxcXHs3LmT9PR0nn76ablCf5POnDnD2rVrAbjvvvuqOBtRE0RGRgJQt27dIrdfi1+4cKHSchJ3no8++giAQYMGYTAYqjib6mvu3LkcO3aMjIwMTpw4wbFjx/D392fVqlXodLqqTq9asVgsTJo0CXd3d957771yOaYUy+UgPT2dZ555huDgYF555ZWqTqfGiYuLY/ny5arY5MmTmT17Nlqt3Py4kby8PMaPH09OTg6jRo2iffv2VZ2SqAGuTdfl6OhY5HYnJycA0tLSKi0ncWdZt24dX331FXq9ntmzZ1d1OtXahg0b+PPPP/Of169fnxUrVsj7fREWLFjAvn37WLp0KZ6enuVyTCmWgeHDh3PixIlb2mfFihV06tQJgNdee42oqCj+/PPPO+qbcVnP2zXdu3dHURTMZjORkZH8+uuvzJo1iz/++IONGzfSoEGDcsy6apXXOSto8uTJbN++nUaNGvHpp5+WNcVqqSLOmxCi6pw8eZJHH30URVGYO3duft9lUbTNmzcDkJyczJEjR3jzzTfp1asXb731FtOmTavi7KqPyMhIXn/9dXr16sX48ePL7bhSLAPnzp0jPDz8lvbJzMwEYO/evSxcuJDHHnuMPn36VER61VZZzltRdDodDRs2ZMqUKTRo0IAHHniA559//rYaYFTe5+ztt9/ms88+o3bt2mzYsOG27fNX3udNgLOzM1D8ecrIyADAxcWl0nISd4aLFy8yaNAgkpKSmDJlCi+88EJVp1RjuLu706NHD9atW0fXrl2ZPn06AwYMoGPHjlWdWrXw7LPPkpuby6JFi8r1uFIsA4cPHy71vuvWrcNisXDkyBF69+6t2nby5EnAWtB8+eWXDBo0iFdffbUMmVYvZTlvNzJ8+HCcnZ1Zv349ubm5t81o3/I8Z4sWLeL111/Hzc2N9evXl35lohqgIn/X7lT16tUDIDo6usjt1+L169evtJzE7S8xMZEBAwZw4cIFJkyYwPvvv1/VKdVIer2eUaNGceDAAdasWSPF8lVr167F3d2dp59+WhXPzs4GrF/UrtVq33333U0PiJdiuZyU9GF+8uRJTp48eVt1J6hoGo0GDw8PIiMjSUpKonbt2lWdUrXy3Xff8eyzz+Lo6Mjvv/9OmzZtqjolUcNcu+198ODBIrdfi7du3brSchK3t/T0dAYPHszx48cZMWIEixcvlmXCy8DLywuAK1euVHEm1UtycjJbt24tclt2dnb+tmsF9M2Q0VNlNHPmTBTr4i42/8aNGwdYp3hRFIVly5ZVbbI1yNmzZ4mKisLV1TX/DUFYrVu3jrFjx2JnZ8evv/5Kt27dqjolUQN169YNNzc3zpw5U+SX/Z9++gmAoUOHVnJm4naUk5PDsGHD2Lt3LwMHDpRZHMrBtaIvMDCwijOpPoqrx86dOwdYz9W12K1cwJRiWVSZBQsWEBsbaxMPDw/nkUceQVEUxo4dK2+oBezYsYORI0eiKArff/89AwYMqOqURA1lb2/Pc889B1j7+V3rowwwf/58wsLC6NWrl4y2F2VmNpsZPXo0f/31Fz169OCXX365bbrWVaQdO3awfv16LBaLKm4ymViwYAErV67EwcGBUaNGVVGGdw7phiGqzLx583jxxRcJCQmhcePGKIrChQsXOHDgABaLhZ49ezJnzpyqTrNauffee8nKyqJhw4asXr2a1atX27Tp3r07kyZNqvzkqrnhw4cTExMDXF/l8Msvv2T9+vUA+Pn58euvv1ZZflXh9ddfZ/PmzezcuZOgoCB69OjBhQsX2LNnD97e3ixZsqSqU6yWfv/9d9VUZ7m5uQB06dIlPzZ9+nRZVOmqTz75JP9vy8vLi2eeeabIdu+//77cSSzg9OnTTJgwAS8vL9q3b4+npyfx8fEcOXKEmJgYjEYjy5YtIyAgoKpTve1JsSyqzNtvv826devYv38/GzZsICsrCw8PD/r378/o0aN57LHHZJ7lQpKTkwHr7BDXbisVRYplW4cOHbJZYOPixYtcvHgRuDMHshmNRv7++2/mzJnDt99+y+rVq/Hw8GD8+PHMnj272AVL7nRXrlxhz549NvGCMelHel1SUlL+45K+kM6cOVOK5QJ69erFa6+9xtatWwkLCyM+Ph57e3saNGjAyJEjmTx58m09sLs60SiKolR1EkIIIYQQQlRHctlOCCGEEEKIYkixLIQQQgghRDGkWBZCCCGEEKIYUiwLIYQQQghRDCmWhRBCCCGEKIYUy0IIIYQQQhRDimUhhBBCCCGKIcWyEEIIIYQQxZBiWQghhBBCiGJIsSyEEEIIIUQxpFgWQgghhBCiGFIsCyGEEEIIUYz/B7azYuFb1pVrAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAssAAAIHCAYAAABpIhEUAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAADoMklEQVR4nOzdd3hUxfrA8e9ueockhCQkgVACUkIV6R0EBBFE7IrIFa+iP8Wu6AVFsXJFwYJUuxdQBETpNRh66DWQQggtIb1tdvf3R8yGs7tpm02yyb6f5+HRM2fOnNmT9u7sOzMqvV6vRwghhBBCCGFCXdsdEEIIIYQQwlZJsCyEEEIIIUQpJFgWQgghhBCiFBIsCyGEEEIIUQoJloUQQgghhCiFBMtCCCGEEEKUQoJlIYQQQgghSiHBshBCCCGEEKWQYFkIIYQQQohS2HSwnJuby1tvvUVERASurq4EBwczadIkkpKSKtzG0qVLUalU5f779ttvq/GVCCGEEEKIukhlq9td5+XlMXDgQKKjowkKCqJv377ExcWxd+9eGjVqRHR0NM2bNy+3nV27drFw4UKz59LT01m1ahUAsbGxFWpPCCGEEELYD5sNlqdPn867775Lz5492bBhA56engDMmTOHF154gf79+7Nt27Yq3ePLL7/kqaeeonfv3uzatcsKvRZCCCGEEPWJTQbLBQUFBAQEkJ6ezsGDB+ncubPifMeOHTly5Aj79++na9euFt+nd+/e7N69m6+++oopU6ZUtdtCCCGEEKKescmc5aioKNLT02nRooVJoAwwfvx4ANasWWPxPS5cuMDu3btxdnZmwoQJFrcjhBBCCCHqL5sMlg8fPgxAly5dzJ4vLj9y5IjF9/j+++8BuOOOO2jYsKHF7QghhBBCiPrLsbY7YE5CQgIAISEhZs8Xl8fHx1t8j+Jg+eGHH65Q/fz8fPLz8w3HOp2O1NRU/Pz8UKlUFvdDCCGEEEJUD71eT2ZmJsHBwajVlo0R22SwnJWVBYC7u7vZ8x4eHgBkZmZa1P7evXs5c+YMvr6+3HHHHRW6Zvbs2cycOdOi+wkhhBBCiNqTmJhY6iBseWwyWK5uxaPKEyZMwNnZuULXvPbaa0ybNs1wnJ6eTlhYmCHoFuXTaDRs3bqVgQMH4uTkVNvdqTPkuVWePLOK23t5Ly/sfMGk/J0e7zAgdEDNd6iOke81y8hzqzx5ZpZJTU0lIiICLy8vi9uwyWC5eJm4nJwcs+ezs7MBLHrhhYWF/PLLL0DFUzAAXFxccHFxMSn39fXFz8+v0v2wRxqNBnd3d/z8/OQHvRLkuVWePLOKy0rJwsHNwaQ80zFTfrdVgHyvWUaeW+XJM6uaqqTM2uQEv7CwMAAuXrxo9nxxedOmTSvd9oYNG7h69SrNmzenV69elndSCCHqgeSsZLPll7Iv1XBPhBDCNtlksNyxY0cADh48aPZ8cXlkZGSl2y5OwXjooYcs7J0QQtQfSVlJZssvZUmwLIQQYKPBcu/evfHx8SE2NpaYmBiT8ytWrABg9OjRlWo3KyuL33//HZBgWQghAJKzZWRZCCHKYpPBsrOzM1OnTgXg6aefNuQoQ9F210eOHKF///6K3fvmzZtHmzZteO2110pt99dffyUnJ4cePXrQqlWr6nsBQghRR5Q2spyclYwNbvAqhBA1ziYn+AFMnz6dTZs2sXv3blq1akXfvn2Jj49nz549NGrUiMWLFyvqX79+ndOnT5OcbH6UBCq/trIQQtRnGq2GaznXzJ7L0mSRUZCBj4tPDfdKCCFsi02OLAO4urqydetW3nzzTdzd3Vm1ahXx8fFMnDiRgwcP0rx580q1l5yczJYtW3BycuLee++tpl4LIUTdcTnnMnpKHz0uLUVDCCHsic2OLAO4ubnx9ttv8/bbb5dbd8aMGcyYMaPU80FBQRQWFlqxd0IIUbeVN4kvKSuJNr5taqg3Qghhm2w6WBZCCFF9yguWS1tWTtgPjUaDVqutlnYdHR3Jy8urlvbrI3lm4ODgUCtrTEuwLIQQdqq8FS9kRQz7lZGRwfXr18nPz6+W9vV6PYGBgSQmJlZpswh7Is+siIuLC/7+/nh7e9fYPSVYFkIIO1XeyLKstWyfMjIySEpKwtPTE39/f5ycnKwenOl0OrKysvD09EStttnpUzbF3p+ZXq9Ho9GQnp5OUlLRKj41FTBLsCyEEHbKOBhu6tWU+Mz4Us8L+3D9+nU8PT0JCQmpthFMnU5HQUEBrq6udhn4WUKeWdFcNi8vLy5evMj169drLFi2z6cthBDCZLWLLgFdFMeShmF/NBoN+fn5+Pj42PVH/cJ2qVQqfHx8yM/PR6PR1Mg9JVgWQgg7VKgr5HL2ZUVZ14CuiuP0/HSyNdkI+1E8caw2JlEJUVHF3581NdFRgmUhhLBD13KuodUr/9AYjyyDpGLYKxlVFraspr8/JVgWQgg7ZLzNtZujG43dG+Op8lSUy8YkQgh7J8GyEELYIeMgOMgjCJVKRQN1A0W5jCwLIeydBMtCCGGHjIPgYM9gAAmWhRDCiATLQghhh4xXugj2KCVYlhUxhAAgJyeHzz77jGHDhhEUFISLiwteXl60bduWiRMnsnr16nq1s962bdtQqVRMnDixtrtS62SdZSGEsEPGI8ZBnkGAjCwLYU5UVBT33HMPycnJuLq6cuuttxIcHEx+fj6xsbEsW7aMZcuW0bZtW44fP17b3RVWJsGyEELYIeMguIlnEwAaqhuWWU8Ie3Pw4EEGDx5Mfn4+L730EtOnTzfZDCMxMZE5c+bw1Vdf1VIvra979+6cPHkSHx+f2u5KrZM0DCGEsDM6vc7sBD8wHVlOyUshrzCvpromhE3R6XQ89NBD5Ofn88477/Dhhx+a3TUuNDSU//73v+zatasWelk93N3dadOmDUFBQbXdlVonwbIQQtiZ67nX0eiUO18VjywbB8sgy8cJ+7Vu3TpOnjxJWFgYr732Wrn1u3ZVbuyzc+dOpk6dSmRkJA0bNsTNzY02bdrw6quvkpaWZnL90qVLUalUzJgxw2z7o0aNwsHBgbi4OEX5sWPHeOihh2jevDmurq40atSITp068dxzz5GcrPz53b17N3fddRdNmzbFxcWFwMBAunfvzquvvkpWVpahXmk5y2lpaXz++efcfvvthjb8/PwYPnw4GzduNNvvAQMGoFKpiIuLY9WqVfTo0QMPDw98fX25//77uXjxovkHaiMkDUMIIeyMcWqFk9oJPzc/tIVaXFQu+Dj7kF6QbjifnJVMuE94TXdT2BCdTs+NnAIrtqcjM0eDRp2PWl2943YN3Z1Rqy3bxOLPP/8E4J577sHBwaHS17/00kscPnyYyMhIBg8eTF5eHgcPHuSDDz5g7dq1REdH4+npWX5DZThw4AB9+vQhLy+PyMhIxowZQ05ODufPn2fu3LncddddhtHhNWvWcNddd6HX6+nevTu9evUiLS2Ns2fP8sEHH/Dkk0+W25/o6GieffZZmjVrRuvWrenZsycJCQls2LCBDRs2sHDhQiZNmmT22i+++II5c+bQt29fRo4cyZ49e/j55585cOAAhw8fxs3NrUrPorpIsCyEEHbG3LJxapUaLUUz+YM8ghTBclK2cgMTYX9u5BTQddam2u6GRQ5MH4Kfp4tF1x4+fBiAzp07W3T9f/7zH3r16qXI+83Pz+fZZ59lwYIFzJkzh7feesuitot99tln5OXl8fHHH/PCCy8ozp06dUpx748//hidTseKFSu4++67FXX37duHn59fufdr3bo1f//9Nz169FCUHzp0iEGDBvH8888zYcIEs0H3/Pnz2blzJz179gSKVhgZOnQou3fv5qeffio1yK5tkoYhhBB2xng5uOJ85dKOk7MkDUPYp5SUFAD8/f3Nnn/88ceZOHGi4t/NecsjRowwmSDn4uLCp59+iqOjI7///nuV+3jt2jUAhgwZYnLOOOe4rLq33norXl5e5d4vPDzcJFCGojcUTz/9NBkZGWzdutXstc8//7whUIaivOhp06YBsGPHjnLvXVtkZFkIIeyMcfBbvCFJMeNg2XhrbCFEkWXLlpmsrTxgwAD69OljOE5KSmLNmjWcOnWKjIwMdDodAM7Ozpw9e7bKfejatSt//vknTz/9NLNmzaJPnz44OpoP77p27crJkyd5+OGHefPNN+natatFaTBarZbNmzeze/dukpOTyc/PBzC8ntJe17Bhw0zKIiIiAExyq22JBMtCCGFnjNMqijckKe1YJvgJe1WclnD9+nWz5wsLCw3//+STT/L1118rzs+ZM4dXX30VjUZjfKnVvPTSS+zatYtt27YxcOBAPD096dmzJ3fccQcTJ05UjGy/9957HD16lDVr1rBmzRoaNmxInz59uPPOO3nooYdwdXUt934XL15k1KhRhhQVczIzM82Wh4SEmJQVj2YXB9y2SIJlIYSwMzKyLCqrobszB6abfnRvKZ1OR2ZWFl6enjUywc9SHTt2JCoqikOHDvHggw9W6tro6GheeOEFfHx8mDt3LgMGDCAwMBAXl6L86eDg4EqPphaPSt/M29ubLVu2EBUVxZo1a9i2bRtbtmxh48aNzJ49m507d9KqVSugaIm7/fv3s2XLFtauXcv27dsNgfOHH37I33//XW7e8uTJkzl8+DB33303L7/8Mq1bt8bLywu1Ws2CBQuYMmUKer3e7LXV/bWuLhIsCyGEHdHr9SbBr3GwbHx8LecaBdoCnB0sDzpE3aZWqyyeJGeOTqfDSZePt6eLTQdQI0aM4IsvvmD58uV88MEHlVoR47fffgPg3Xff5dFHH1Wcy83N5fLlyybXODsX/YzdvITbzZKSzL9xValU9OnTx5D+cfXqVZ577jl++ukn3njjDf73v/8Z6jo6OjJs2DBDSkR8fDyTJk1iy5YtfPDBB3z44Yelvqbs7Gw2btxI48aN+eWXX0yex/nz50u9ti6z3e9QIYQQVnc99zr5WuXHnSGeyo9GjdMw9OhlJz9hl0aOHMktt9xCQkICs2fPrtS1N27cAMynHixfvtzs6GvxZLwzZ86YnDtz5kyF1yMOCAgwrNV87NixMus2bdqUV155pUJ109PT0el0BAUFmQTKGo3G8AahvpFgWQgh7IjxqLKz2plG7o0UZV7OXvi4KGfwSyqGsEdqtZrvvvsOFxcX3nzzTV5++WXS09NN6qWkpHD69GlFWfHEtUWLFilylk+cOGEITo3deuutuLu78+eff3LgwAFD+fXr13niiSfMpmF89dVXXLhwwaR83bp1QFHqRbH//ve/Zke0zdU1JyAgAB8fH44dO0ZUVJShXKvV8sorr5gN8usDCZaFEMKOJGYmKo6L11g2ZjzafDHTtnfYEqK6dO3alU2bNhEYGMhHH31E48aN6d+/P/fffz9jx47l1ltvJSgoiG3bttGmTRu6desGwGOPPUZgYCBr1qyhdevW3HvvvQwdOpROnTrRt29fmjZtanIvT09PXnzxRQoLC+nTpw/Dhw9nxIgRREREoNVqufXWW02u+eqrr2jevDnt2rVj/Pjx3HfffXTq1Innn38eV1dXxTrOM2fOpEmTJnTp0oV7772XCRMm0Lp1a+bOnYuvry8vvvhimc/C0dGRl19+mcLCQvr378+wYcO47777aNmyJV999RVPP/10FZ+2bZJgWQgh7MjFLGXQG+Jl+hGxuXLj64SwJ3369CE2Npa5c+fSp08fTp8+zcqVK9m0aROZmZlMmDCB3377jaNHj9K+fXugaCWNffv28cADD1BQUMDq1atJSkrinXfe4aeffir1XjNmzOCjjz4iJCSELVu2cOzYMSZNmsT69esNOc03e+edd5g0aRIqlYrNmzezZs0acnNzmTx5MjExMfTu3dtQ9/PPP+e+++4jJyeHP//8k7/++gtHR0emTZvGkSNHDBMBy/L666+zbNkyIiMjiYqKYtOmTXTs2JHo6GjDG4X6Rib4CSGEHUnKVKZTNPFsYraecbmkYQh75+7uzrPPPsuzzz5b4WtCQkL44YcfzJ6Li4szW65SqXjxxRdNRnl1Oh1r167F29tbMSly9OjRjB49ukL9efjhh3n44YcrVHfAgAGlrmrxyCOP8Mgjj5iUR0ZGMnHiRJPybdu2lXqfZs2alXofWyEjy0IIYUeMR4hDvcznKJqMLEsahhDCTkmwLIQQdsQ46DXOTS6tXIJlIYS9kmBZCCHsRIG2gKs5VxVlTbzMp2EYB8uZmkzS801XARBCiPpOgmUhhLATl7IuoUeZG1haznKgZ6DJKhkyyU8IYY8kWBZCCDthHOw2cGmAl7OX2bpOaieTba8lFUMIYY8kWBZCCDtR0ZUwSjsvK2IIIeyRBMtCCGEnKrrGcmnnZWRZCGGPJFgWQgg7UdGVMEo7L8GyEMIeSbAshBB2wjiNorSVMAznJQ1DCCEkWBZCCHug1+tJzExUlJU7smyUhnEp6xJandbqfRNCCFsmwbIQQtiBjIIMsjRZirLK5iwX6gu5knPF6n0TQghbJsGyEELYAePJfWqVmkCPwDKvaejSEDdHN0WZpGIIIeyNBMtCCGEHjCfnBXkE4aR2KvMalUolK2IIIeyeBMtCCGEHKrsSRmn1jPOehajvVCoVKpWqtruhMGDAAFQqFXFxcdV2j2bNmtnc664tEiwLIYQdqOxKGIZ6siKGEPVOXFwcKpWKAQMG1HZX6gTH2u6AEEKI6mfxyLJxGkaWpGEIUdu+/fZbcnJyaNKkYm96LbF582Y0Gk21tV+XSLAshBB2oLK79xUL9QpVtiM5y0LUurCwsGq/R4sWLar9HnWFpGEIIUQ9p9VpSc5KVpQZp1eUxrheal4qOZocq/VNiPomMTGRKVOm0LRpU1xcXAgICGDcuHHs27ev1Gt+/fVXevTogbu7O/7+/txzzz2cO3eOGTNmoFKpWLp0qaJ+aTnL8fHx/Pvf/yYiIgJ3d3d8fX1p164dU6ZM4fTp0wDMmDGD8PBwALZv327IyVapVEycONHQVlk5y4mJiTz77LNERETg5uaGr68v3bp1Y+bMmWRkZFT+odk4GVkWQoh67krOFQr1hYqyio4sB3sGm5QlZSXRqmErq/RN1BE6HeSmWrU9VU4mOBSAuprH7dx8q/8e/zh69CiDBg3i+vXrtG7dmnHjxpGQkMBvv/3GmjVr+PHHH7nnnnsU18ydO5fnnnsOtVpNv379CAwMZM+ePXTv3p3Ro0dX+N6JiYl06dKF1NRUWrVqxciRI9FqtcTHx/PNN9/Qs2dPWrduTadOnbj77rtZuXIljRs3Zvjw4YY2+vTpU+59du7cyZ133klaWhrNmjVj9OjR5ObmcurUKWbMmMGYMWPo1KlThftdF0iwLIQQ9Zxx6oS7ozsNXRpW6Fo3RzcauTXiWu41RXsSLNuZ3FT4yHofy6sBH6u1Vo6XYsHDv9pvo9frefDBB7l+/Tovv/wy77//vmFkduXKlUyYMIFJkybRp08fgoKCADh//jwvv/wyzs7O/PXXXwwcOBCAwsJCnnjiCZYsWVLh+y9cuJDU1FSmTp3K559/rjiXkJBgyD++66676NSpEytXrqRNmzYmo9ZlSU1N5e677yYtLY2PPvqIadOmob7pjcjff/9NcLDpG+y6TtIwhBCinjO3EkZlloSSFTGEKN+2bds4evQoYWFhzJo1S/Ezdvfdd3PXXXeRlZXF4sWLDeWLFy+moKCAhx9+2BAoAzg6OjJnzhw8PT0rfP9r14re0A4ZMsTkXFhYmFVykBcuXMi1a9cYPnw4L774oiJQBujZsycBAQFVvo+tkWBZCCHqOeO1kSu6EoahvqyIIUS5du7cCcCECRNwcjLd8Ofhhx9W1AOIiooCMEnNAGjQoAHDhg2r8P27du0KwOuvv87atWvJy8ureOcraNOmTQBMmTLF6m3bMpsOlnNzc3nrrbeIiIjA1dWV4OBgJk2aRFKSZaMacXFxPPnkk4SHh+Pi4oK/vz89e/bko48+snLPhRDCdhgHtxWd3FfMOFiWjUmEMHXp0iWgaGKcOcXlN8cwyclFE29DQ0PNXVKpVS8mTpzIhAkTOHHiBKNHj6Zhw4b069eP9957j8uXL1e4nbIkJhb97NvbShk2m7Ocl5fHoEGDiI6OJigoiDFjxhAXF8eSJUtYu3Yt0dHRNG/evMLt/fnnn4wfP57c3Fy6dOlCjx49SElJ4ejRo3z99de89NJL1fhqhBCi9iRmKIPbMO/KLTtlvHycBMt2yM23KPfXSnQ6HZmZmXh5eZl8lG91br7V234FVfdueA4ODvzyyy+8+uqr/P7772zZsoU9e/awc+dO3n//ff766y969epVrX2or2w2WJ41axbR0dH07NmTDRs2GPJ25syZwwsvvMCkSZPYtm1bhdo6deoU48aNw8vLi40bNyq+WXQ6HQcPHqyOlyCEEDYhITNBcRzmVblg2bj+xcyLaHVaHNQOVe6bqCPUautOktPp0GudwcO7xlaqqG7FE9vi4+PNni9e5u3mjUSCgoI4ffo0iYmJtG3b1uSa4pHcyujcuTOdO3dmxowZZGRkMGPGDP773//y3HPPsXfv3kq3d7PQ0FBOnTpFbGwsHTp0qFJbdYlNfocWFBQwb948AObPn69IcJ82bRqRkZFs376dAwcOVKi9adOmkZeXx9KlS03eVanVarp162a9zgshhA1Jz08no0C57mllg2XjkWWNTsOVnCtV7psQ9Unfvn0BWL58OVqt1uT8999/r6gH0Lt3b6BotQxj6enpbNiwoUp98vb2Zvbs2ahUKo4dO2Yod3Z2BopW3aiM4smDCxYsqFK/6hqbDJajoqJIT0+nRYsWdO7c2eT8+PHjAVizZk25bSUmJrJ+/XqaN2/OyJEjrd5XIYSwZQkZylFlR5UjQZ5BlWrD19UXDycPZbtGo9VC2LsBAwbQoUMH4uLieOutt9Dr9YZzv/32G7/++iuenp5MmjTJUP7YY4/h7OzMt99+y44dOwzlWq2WF154gczMzArf/7vvvlMExMX+/PNP9Hq9Ii/a398fJycnYmNjzQb2pZk8eTL+/v78+eeffPrpp4rXCBAdHc3Vq1cr3F5dYZNpGIcPHwagS5cuZs8Xlx85cqTctrZt24ZOp6NXr14UFhby66+/EhUVhVarpX379tx77700bFix9UaFEKKuMQ5qgz2DcVRX7le/SqUizCuMk6knS9rNSKBHUA+r9FGIuqBHj9K/3ydPnszkyZP54YcfGDhwIO+99x6//fYbnTp1IiEhgaioKBwdHVm0aJFhjWUomij34Ycf8txzzzFw4ED69+9P48aN2bt3L6mpqTz00EN8//33hpHgsqxcuZJHHnmEFi1a0KFDB9zc3Lhw4QJ79uxBrVYza9YsQ11nZ2eGDx/OmjVr6NixI126dMHZ2ZnevXvz2GOPlXoPX19fli9fzp133snzzz/PZ599xq233kpubi4nT57k3LlzHDp0qN4tH2eTwXJCQtEv95AQ88sbFZeXlhd0sxMnTgDg6elJ3759iY6OVpx/4403WLFihWJ9Q3Py8/PJz883HBdv56jRaAwLfYuyFT8neV6VI8+t8uSZlYhLi1Mch3iGlPpcynpuIZ4himA5Li1Oni/173tNo9Gg1+vR6XTodLpqu0/xiGTxveqCPXv2lHru9ttvR6fT0a5dO/bv38+7777L+vXrWbFiBT4+PowZM4ZXX32V7t27m7zeZ555huDgYD7++GOio6NxdXVlwIABvPfee3z88ccANGzYUPHMit38dXruuedo0qQJu3fvZufOnWRnZxMcHMyECROYNm0a3bp1U9x7wYIFvPTSS2zatIkff/wRrVaLRqPh0UcfVfTPuL/9+vXj0KFDfPTRR6xfv55Vq1bh6elJeHg4M2fOJDw8vNq/pjqdDr1ej0ajwcGh7LkT1vjZVOmNx9BtwBNPPME333zDG2+8oXgnVOzcuXO0atWKVq1acebMmTLbevLJJ/n6669xdHTE09OTL774guHDh3Pt2jXeeecdvv/+e3x8fDh+/Lgi6d7YjBkzmDlzpkn5jz/+iLu7e+VfpBBC1IAV2SuI0cQYjns492CU+6hKt7MhdwM78ks+Jr7F8RYe9HzQGl0UNsTR0ZHAwEBCQ0MrNJopqo9Wq6VPnz6cPn2akydP0rhx49ruks0oKCggMTGRy5cvl5t3nZOTwwMPPEB6ejre3t4W3c8mR5atqfjdTWFhIV9//TUTJkwAit6lfffdd5w+fZp9+/bxxRdf8O6775bazmuvvca0adMMxxkZGYSGhjJw4ED8/Pyq90XUExqNho0bNzJ06FCzC7YL8+S5VZ48sxL/2/A/uF5y3Kd9H0a2MT9/o6znVhBbwI49JcFygUeBzAOh/n2v5eXlkZiYiKenJ66urtV2H71eb1g6rrqXVLN1sbGx+Pn50aBBA0NZfn4+b7zxBqdOnWLw4MG0atVKntlN8vLycHNzo1+/fuV+n6akpFT5fjYZLBevfpGTk2P2fHZ2NgBeXl4VbsvT09PsDjmPPfYY+/btY/v27WW24+LigouLi0m5k5NTvfgFWZPkmVlGnlvlyTMz3ZAkvGF4uc/E3HMLbxBu0q6DowNqlU3OE69x9eV7TavVolKpUKvV1br+cfFAVvG97NnKlSv5z3/+Q9euXQkNDSUjI4PDhw+TnJyMv78/8+fPR61WyzO7iVqtRqVSVejnzho/lzb5tIt3rLl40fyWqsXlTZs2Lbet4jphYWFm34kV76hTH2dvCiHsW2ZBJql5qYoy42XgKsp4I5N8bT5Xc+T3phBVNXjwYMaNG0dycjJ//PEHW7duxc3NjX//+98cPHiQ1q1b13YX7Z5Njix37NgRoNTNQorLIyMjy22reOm5GzdumD2fmlr0h+TmtZyFEKI+MN5pT61SV3qr62KN3Brh6uBKnjZP0X6gR2CV+iiEvbv11lv56aefarsbogw2ObLcu3dvfHx8iI2NJSYmxuT8ihUrABg9enS5bfXq1Qs/Pz8uX77M6dOnTc4Xp1+YW89ZCCHqMuNl44I8gnB2sGzSlkqlItRbOSptvIazEELURzYZLDs7OzN16lQAnn76aUOOMhRtd33kyBH69+9P165dDeXz5s2jTZs2vPbaa4q2HB0dmTZtGnq9nqefftqw5BvApk2bWLp0KSqViilTplTzqxJCiJqVmKEcWa7szn3GmnopU99kYxIhhD2wyTQMgOnTp7Np0yZ2795Nq1at6Nu3L/Hx8ezZs4dGjRqxePFiRf3r169z+vRpkpOTTdp66aWX2Lp1K5s2bSIiIoIePXpw/fp1oqOj0Wq1vPvuu3Tv3r2mXpoQQtQI42DWOO+4soxHlo3TPIQQoj6yyZFlAFdXV7Zu3cqbb76Ju7s7q1atIj4+nokTJ3Lw4EGaN29e4bacnJxYt24dH3zwAf7+/qxfv56jR4/Sv39/1qxZw+uvv16Nr0QIIWqHcZqEpZP7ihmPTEsahhDCHtjsyDKAm5sbb7/9Nm+//Xa5dWfMmMGMGTNKPe/k5MTLL7/Myy+/bMUeCiGE7TIe+a1qGoZJsJyZgF6vt/s1X4UQ9ZvNjiwLIYSwXI4mh2u51xRlVU3DML4+tzCXlLyqL/gvhBC2TIJlIYSoh4xHlVWoCPEKqVKbAe4BOKuVq2lIKoYQor6TYFkIIeoh48l9jT0a4+JgugtpZahVapO8Z1kRQwhR30mwLIQQ9ZDxiG9V85WLyVrLQgh7I8GyEELUQ8ZpGFVdCaOYcdAty8cJIeo7CZaFEKIesvYay4Z2zKyIIUR9plKpbG7Fl+IN1cpaBUxYjwTLQghRD9VUGkZiRiJ6vd4qbQshhC2SYFkIIeqZvMI8ruRcUZRVVxpGpiaTG/k3rNK2EELYIgmWhRCinrmYedGkzFrBcqBHII5q5X5WMslPCFGfSbAshBD1jHEecSO3Rrg7uVulbUe1IyGeyvWaJW9ZiCJpaWl8/vnn3H777TRt2hQXFxf8/PwYPnw4GzduNHvNgAEDUKlUxMXF8f3339O1a1fc3d0JCAjg0UcfJSkpqcL3T05O5sMPP6R///40adIEZ2dnAgMDGTduHPv27Sv1uuzsbD744AO6deuGt7c3Hh4etGnThqeffpozZ86Y1N+zZw/33HMPQUFBODs7ExISwuTJk0lIqJ+/C2x6u2shhBCVF58Rrzi21qjyze3FZcSVej9R/+j0OtLy06zXnk5HZn4mhXmFqNXVO27XwKUBalXNjA1GR0fz7LPP0qxZM1q3bk3Pnj1JSEhgw4YNbNiwgYULFzJp0iSz13788cd88cUX9O3blzFjxhAdHc23337Lli1b+PvvvwkODi73/r///juvvPIKrVu3JjIyEm9vb86ePctvv/3G2rVrWbt2LcOGDVNck5yczNChQzl+/DgNGzZkwIABuLi4cP78eb766itatWpFRESEof4XX3zBM888A8Ctt95K3759OX36NIsWLWL16tVs376dW265pQpP0fZIsCyEEPXMzYEsQLhPuFXbb+bTjJ1JOw3HEizXf2n5afT/pX9td8Mi2+/djq+rb43cq3Xr1vz999/06NFDUX7o0CEGDRrE888/z4QJE/D09DS59uuvv2bt2rWMHDkSAI1Gw2OPPcYPP/zA1KlT+fXXX8u9f+/evTl27Bjt2rVTlK9fv54777yTp556irNnzypW93j44Yc5fvw4EyZMYNGiRYq+xcXFkZGRYTgufjMQFBTE77//TteuXQ3nFi1axOTJk3nssceIjo4ut691iaRhCCFEPROXHqc4burd1KrtN/NuVub9hLBX4eHhJoEyQOfOnXn66afJyMhg69atZq+dMGGCIVAGcHJyYu7cubi7u7N69WoSE8tf07xDhw4mgTLA7bffzj333ENsbCzHjh0zlO/du5fNmzcTEBDAwoULTYL4Zs2aERkZaTh+//330Wq1fPXVV4pAGeDxxx/nzjvvZM+ePRw6dKjcvtYlMrIshBD1jPHIsnFwW1XG7cVnxKPT62rso24hbJlWq2Xz5s3s3r2b5ORk8vPzATh79qziv8buu+8+kzI/Pz+GDRvGqlWr2LVrF3fccUe598/Pz+evv/5i7969XLt2jYKCAgCOHj1quH+HDh0A2LRpEwD3338/Xl5eZbar0+nYvHkz7u7u3H777Wbr9O3bl9WrV7N37146d+5cbl/rCgmWhRCiHskoyCA1L1VR1tTHuiPLxiPVedo8ruZcJdAj0Kr3EaKuuXjxIqNGjeLw4cOl1snMzDRb3rSp+Z/TZs2aAUW5xeU5evQod955J3FxcRW6f/FodYsWLcpt+/r162RlZQHg7Oxcbt36RIJlIYSoR+LTlfnDDioHQj2tO8EvwD0Ad0d3cgpzDGUX0i9IsFyPNXBpwPZ7t1utPZ1OR2ZmJl5eXjUywa+mTJ48mcOHD3P33Xfz8ssv07p1a8NrXLBgAVOmTKm2TXz0ej0TJkwgLi6OJ598kieffJLmzZvj6emJSqXi9ddfZ/bs2RbfX6fTAeDp6cndd99dZl1zqSB1mQTLQghRjxinYIR4heDk4GTVe6hUKpp6N+Vk6klDWXxGPD2De1r1PsJ2qFVqq06S0+l0OBY44u3qXe3Bck3Jzs5m48aNNG7cmF9++QUHBwfF+fPnz5d5fXx8vCI/+OZygKCgoDKvP3XqFKdOnaJbt258+eWXJufN3T80tOiNdGxsbJltA/j7++Pq6oparWbJkiU2twV4daof36FCCCEA02DZ2pP7iplM8jO6rxD2Jj09HZ1OR1BQkEmgrNFo+O2338q8/n//+59JWWpqKhs2bEClUtG7d+8yr79xo2gnzZCQELPnzK3zPGTIEAB++uknQ4pFaRwdHRkwYAAZGRls3ry5zLr1jQTLQghRjxivTGHtyX2Gdn2U7UqwLOxdQEAAPj4+HDt2jKioKEO5VqvllVdeMbu5x81++eUX1q9fbzguLCzk+eefJzs7m1GjRhEWFlbG1dCyZUvUajVbtmxRTCLMy8vjySefJDU11eSa7t27M3DgQK5evcoTTzxBdna24nxcXJxhYiDAG2+8gVqt5rHHHmPbtm0m7WVlZbF48WJyc3PL7GtdI2kYQghRjxiveVxdI8vG7crycaK+M7ckXLHJkyczefJkXn75Zd544w369+/PoEGD8PX1Zc+ePVy5coWnn36a+fPnl9rGE088wYgRI+jXrx9BQUHs2bOHCxcuEBwczLx588rtX0BAAI8//jjffPMNHTt2ZNCgQbi5ubFz5060Wi0TJ05k6dKlJtd99913DB48mJ9++on169fTp08fXFxciI2NJSYmhk8++cSwekafPn2YP38+U6dOZeDAgbRv356IiAicnJyIi4sjJiaG/Px8xo0bh5ubW/kPtY6QYFkIIeoJnV5nEixbe0OSYsYjy5eyLlGgLcDZoexZ8kLUVXv27Cn13PDhwwF4/fXXCQkJ4dNPPyUqKgo3Nzf69OnD22+/zcGDB8ts/8UXX6Rbt27MnTuXPXv24OHhwcMPP8x7771HSEiIYYJdWb788kvatGnDokWL2Lx5Mz4+PgwZMoR3332XJUuWmL2mSZMm7Nu3j08//ZQVK1awceNGHBwcCAkJ4amnnmLUqFGK+k8++SQ9evTg008/Zdu2baxduxZ3d3eaNGnCgw8+yLhx4/Dx8Sm3r3WJSl9d0zLruYyMDHx8fLh+/Tp+fn613Z06QaPRsG7dOkaOHImTk3UnHNVn8twqz16f2eXsywxdMVRRtvmezQS4B1To+so8t6yCLHr+pJzQ99udv9GyYcvKdbqOq2/fa3l5eVy4cIHw8HBcXV2r7T46nY6MjAy8vevPBD9LDRgwgO3bt3PhwgXDMnHmyDMrUZnv05SUFPz9/UlPT8fb29ui+9n30xZCiHrkQvoFxbG7ozuN3BpVy708nT1N2pZtr4UQ9ZEEy0IIUU+Y7Nzn06xal3cyTsW4kHHBfEUhhKjDJFgWQoh6oqYm95XWvkzyE0LURzLBTwgh6gnjYDXcu3om9xUzXpZO0jCEqDxzS7AJ2yIjy0IIUU/U1IYkxWRjEiGEPZBgWQgh6oF8bT6Xsi4pyoxziq3NuP20/DTS8tKq9Z5CCFHTJFgWQoh6ICEjAT3KlUCre2Q52DMYR5Uym09Gl4UQ9Y0Ey0IIUQ8Y5wsHuAXg4eRRrfd0UjsR4hVSZj+EEKKuk2BZCCHqAZN8ZZ/qHVUuJnnLQoj6ToJlIYSoB4xXwjAOYquLcd6yjCwLIeobCZaFEKIeMNmQpKaCZaP7GO8iKIQQdZ0Ey0IIUQ8Yj+hW90oYxYwnESZkJKDT62rk3kIIURMkWBZCiDouLS+NtPw0RVltpWEU6ApMlrATQoi6TIJlIYSo42LTYxXHjmpHgj2Da+Tefq5+eDl5KcrOp5+vkXsLURPi4uJQqVSoVKoy602cOBGVSsWMGTOqdL8ZM2agUqlYunSpybmYmBhuv/12GjRoYOhTXFxcle4nyifbXQshRB1nHJw2826Go7pmfr2rVCqaN2jO4WuHDWUX0i/QL6RfjdxfCHuRmZnJAw88wOXLlxkwYAChoaGoVCo8PT1ru2v1ngTLQghRx51PUwbLzX2a1+j9m/sog2UZWRbCclOnTuW+++4jKChIUb5v3z6Sk5N56KGH+O6772qpd/ZJgmUhhKjjjFegaN6g5oPlmxkH70KIivP398ff39+k/OLFiwA0b16zP99CcpaFEKLOM85ZrvGRZaPgPDY9Fr1eX0ptIezHgAEDDHnFq1atokePHnh4eODr68v9999vCIBvZpyzXJwz/dhjjwHw9ttvG/KVJ06cqLj2u+++o0+fPnh7e+Pu7k5kZCSzZ88mLy/P5D7FOdbbtm1j/fr1DBw40JALnZaWxtKlSw052LGxsUyYMAF/f3+8vb0ZMWIEJ06cAKCwsJD33nuPiIgIXF1dadmyJfPnz7fug6xlMrIshBB1WI4mh8vZlxVltZGGcbPMgkxS8lLwdzMdHRN1k16nQ5uWZrX2dDod2sxMCgsLUaurd9zOoUEDVNV8j/J88cUXzJkzh759+zJy5Ej27NnDzz//zIEDBzh8+DBubm6lXuvp6cmjjz7KuXPniIqKomPHjnTq1AmAPn36GOpNmTKFBQsW4OrqyqBBg3B3d2fbtm28/vrrrFmzhk2bNuHu7m7S/o8//sjChQvp1q0bI0aMIDY2VjGZ8cKFC3Tv3p3GjRszZMgQTpw4wV9//cWBAwc4cuQITz75JNu2bWPgwIE0b96crVu3MnXqVJydnfnXv/5lvYdYiyRYFkKIOsw4BUOtUtfYGsvFgj2DcXVwJU9bMnp1Pu28BMv1iDYtjbO9elu93atWb9FUq91ROPr61sCdSjd//nx27txJz549AcjJyWHo0KHs3r2bn376iUmTJpV6rb+/P0uXLmXx4sVERUUxZswYZs6cqaizcuVKFixYQHBwMNu2baNVq1YApKenM2rUKHbt2sVbb73Fxx9/bNL+N998w88//8y9995r9v7ffvstr776Ku+99x4qlQq9Xs+kSZNYunQpgwcPRq1Wc/bsWRo1agTA5s2bGTJkCO+++269CZYlDUMIIeow48l0TTyb4OLgUqN9MBegyyQ/IUo8//zzhkAZwN3dnWnTpgGwY8eOKrf/2WefAfCf//zHECgD+Pj4MH/+fFQqFV9//bXZdIw77rij1EAZinKki1M/oGgFnOeffx6AEydO8OmnnxoCZYDBgwfTuXNn4uPj682ydhIsCyFEHRabpsxXbuHTolb6YZyKYdwvIezZsGHDTMoiIiIASE5OrlLbGo2G6OhoAB588EGT85GRkURGRpKVlUVMTIzJ+TvvvLPM9gcMGICTk5OirHiSoZOTEwMGDDC5pvh8VV+brZBgWQgh6jDjEdzwBuG10g/jYNk4PUSIuqq8zUiKFU9qNVc/JCTEpMzLq2gzn/z8/Cr0DlJSUigoKMDf3x8PDw+zdZo1awZAUlKSybmwsLAy22/SpIlJWfHazoGBgTg4OJR6vqqvzVZIzrIQQtRhJsvG1fDkPsN9jVbEkDSM+sWhQQNa7Y6yWns6nY7MzEy8vLxqZIJfVdw8KS4nJ8fsJLnic4DZgLW6X2N5ygr4XV1dy7y2rL7X9uuqKRIsCyFEHVWgLSAhM0FRVmvBstF9r+VeI6MgA29n71rpj7AulVpt1UlyOp0OB0dHHL29bT7g8vX1xc3NjdzcXM6fP0/79u3N1jt/vugNorlR5Ork5+eHs7Mz169fJzs722ywXpw7bG6UWJTPtr9DhRBClCo+Ix6dXqcoq61gOcwrDEeVcvxFNicR9YGDgwO9exetBPLHH3+YrZOYmEhMTAxqtdpQt6Y4OTnRo0cPAH7++WeT88eOHePw4cN4enoalpwTlSPBshBC1FHGqQ4B7gF4OnvWSl+cHJwI9Q5VlEnesqgv/u///g+A999/nz179ijOpaenM2nSJHQ6HePGjSM0NNRcE9XqmWeeAYo2NCke4QbIzMxk6tSp6PV6pkyZUm7KhTDPpoPl3Nxc3nrrLcOuMMHBwUyaNMlsgnpZmjVrZtjtxty/U6dOVdMrEEKI6mMcLNfWqHJp95e8ZVFfjBo1ipdffpm0tDR69epFr169ePDBBxk1ahRNmzZl06ZNtG/fni+++KJW+jd+/HieeOIJLl68SPv27Rk1ahQTJkygRYsWbN++nR49evD222/XSt/qA5vNWc7Ly2PQoEFER0cTFBTEmDFjiIuLY8mSJaxdu5bo6OhK74/+6KOPmi338fGxRpeFEKJGGac5tGhQO8vGFWvu05zNbDYcy/Jxoj754IMPGDhwIF999RV79uxh3759uLu706ZNG+6++26efvrpUlejqAlff/01ffr04auvvmL79u0UFhbSokULnnvuOZ5//vkydwkUZbPZYHnWrFlER0fTs2dPNmzYYFiGZM6cObzwwgtMmjSJbdu2VarN4n3WhRCiPrC5kWVZEUPUc8OHD2f48OEVrl9WnNKsWTPDcnM3mzFjBjNmzDApnzhxIuPGjcPbu/RJsw8//DAPP/xwhfq2dOnSMuOiiRMnMnHixFLPm+t7Rduua2wyDaOgoIB58+YBRVtEFgfKANOmTSMyMpLt27dz4MCB2uqiEELUKq1OS1x6nKIs3Kd21lguZhysX8q6RF6h6Y5hQghRl9hksBwVFUV6ejotWrSgc+fOJufHjx8PwJo1a2q6a0IIYRMuZV2iQFegKKvtkeVm3s0Ux3r0xGXE1UpfhBDCWmwyDePw4cMAdOnSxez54vIjR45Uqt2PPvqI2NhYXFxcaNeuHWPHjlXsZy6EEHVFbLoyH7iBSwN8Xa23Dq4l3J3caeLZhKSskknYsWmxtPFtU4u9EkKIqrHJYDkhoWiR/dIW9i4uj4+Pr1S7L7/8suL4+eef5/PPP2fSpEnlXpufn6/YtjEjIwMo2pNdo9FUqh/2qvg5yfOqHHlulWcPz+xs6lnFcTPvZhQWFlapTWs8t2ZezRTB8rnUc2hC6+/Xob59r2k0GvR6PTqdDp1OV/4FFirOdy2+lyifPLMSOp0OvV6PRqMxu932zazxs2mTwXJWVhZAqVtKFs82zczMrFB7d955JwMHDqRr1640atSI8+fPs3jxYubOncvkyZPx8/NjzJgxZbYxe/ZsZs6caVK+devWUvspzNu4cWNtd6FOkudWefX5me3M2ak4dkx3ZN26dVZpu0rPLVd5+PeZv2mW1KxK/akL6sv3mqOjI4GBgWRlZVFQUFD+BVVU0b/jooQ8s6K5bbm5uezYsaPcQYLibcirwiaDZWv77LPPFMft2rXjk08+oU2bNjzxxBO88sor5QbLr732GtOmTTMcZ2RkEBoaysCBA/Hz86uWftc3Go2GjRs3MnToUJycnGq7O3WGPLfKs4dn9vP6nyGl5Lhf+36MbDOySm1a47nln8snam+U4TjHLYeRI6vWL1tW377X8vLySExMxNPTs1o3sNDr9WRmZuLl5YVKpaq2+9Qn8sxK5OXl4ebmRr9+/cr9Pk1JSSnzfEXYZLBcvPpFae8GsrOzAfDy8qrSfR5//HGmT5/O6dOniYuLo1mzZqXWdXFxwcXFxaTcycmpXvyCrEnyzCwjz63y6usz0+l1JsuyRfhGWO21VuW5RfhFKI4TMxPRq/U4Ozhbo2s2q758r2m1WsOGXWp19a0BUJxGUN33qU/kmZUo/h6tyM+dNX4uLXray5YtIy+v+pYDCgsLA+DixYtmzxeXN23atEr3UavVtGhRtIh/cnJyldoSQoiacinrEjmFysGEVg1b1VJvlFo2aKk4LtQXyrbXdUhxEKbVamu5J0KUrvj7s6beNFh0l8cee4zg4GCeeeYZw8oV1tSxY0cADh48aPZ8cXlkZGSV73Xjxg2AWt11RwghKuNc2jnFsbezN43cbGNlH09nT4I8ghRlxv0VtsvJyQkHBwdyc3PLryxELcnNzcXBwaHGPs2xKFiePHkyhYWFzJ8/ny5dutCjRw8WLVpkSI+oqt69e+Pj40NsbCwxMTEm51esWAHA6NGjq3Sf48ePc/r0acN2lUIIURcYB58tG7S0qRxG49FlCZbrDpVKhbu7O+np6TK6LGySVqslPT0dd3f3Gvu9Z1HO8oIFC/jvf//LTz/9xMKFC9m7dy/79u1j2rRp3H///UyePJlu3bpZ3ClnZ2emTp3Ku+++y9NPP82GDRsMI79z5szhyJEj9O/fn65duxqumTdvHvPmzWPs2LHMnj3bUL5u3TpcXV0ZNGiQ4h5HjhzhvvvuQ6/XM3nyZJyd63c+nRCi/jh7Q7lsnK2kYBRr1bAVO5NKVus4d0OC5bokICCAuLg44uPj8fX1xcXFxepBiU6no6CggLy8PLvPv60oe39mer2e/Px8UlNT0el0BAQE1Ni9LZ7g5+HhweTJk5k8eTLHjh1jwYIF/PDDDyxYsIBvvvmGyMhIpkyZwgMPPFDmPualmT59Ops2bWL37t20atWKvn37Eh8fz549e2jUqBGLFy9W1L9+/TqnT582yT3eu3cvM2fOpGnTpnTs2BF3d3fOnz/PwYMHKSwsZMCAAbz//vuWPgYhhKhxxiO1rRrYVrBsPLJ8Nu1sKTWFLXJ2diYkJITr169X23wevV5Pbm4ubm5uNvWpiC2TZ1bEw8ODwMDAGh3ktMpqGO3bt+ezzz7jo48+YuXKlXzzzTds376dp59+mhdffJF7772Xp556SjESXB5XV1e2bt3K7Nmz+fHHH1m1ahW+vr5MnDiRd955p9QNS4zdfvvtJCYmsm/fPsM22t7e3vTp04cHH3yQxx57rNwFrYUQwlZodBqTCXMtG7YspXbtMB7pTspKIluTjYeTzA2pK9zd3QkLC6OwsLDKm92Yo9Fo2LFjB/369asXq4jUBHlmReuAOzrW/EJuVr2jRqMhMzPTsGB28e4qS5YsYenSpYwdO5aFCxfSoEGDCrXn5ubG22+/zdtvv11u3RkzZjBjxgyT8p49e9KzZ8/KvAwhhLBZCRkJaHTKHamMR3JrW7hPOGqVGp2+ZJex2LRYIhtVfVK2qFnVFZw4ODhQWFiIq6ur3QZ+lSXPrPZYJeklOjqaxx9/nKCgIJ566imOHDnCuHHj2LBhAxkZGfzwww906NCB3377jWeffdYatxRCCLtknNIQ4BaAj4tPLfXGPBcHF8K8whRlMslPCFFXWfx28caNG3z33Xd88803nDhxAr1eT2hoKK+88gqTJ08mMDDQUPf+++/nnnvuoXPnzlbbjlUIIeyR8WQ5W0vBKNaqYSviMuIMx8aTEoUQoq6wKFh+6KGH+PXXX8nPz0elUjFixAiefPJJRo4cWeoMTUdHR2699VaWLVtWpQ4LIYQ9s/XJfcVaNWjFxviNhmOZ5CeEqKssCpZ//PFHAgMDmTRpEk888YRhx73yjB07tsq77gkhhD0zWWPZRkeWjfsly8cJIeoqi4Ll5cuXM2bMmEon/Y8ePbrKG4kIIYS9yivMIyEjQVFmqyPLxpMOU/JSSM1LxdfVt5Z6JIQQlrFogl92djZ79+4tt150dDTffvutJbcQQghhJDY9Fj16w7EKFeE+4bXYo9KFeoXirFaugxqbFltLvRFCCMtZFCxPnDiRhQsXlltv0aJFPPbYY5bcQgghhBHjVIYQrxDcndxrqTdlc1Q70rxBc0XZmRtnaqk3QghhuWrdL1Gn09n1LjNCCGFNdWVyXzHj/snycUKIuqhag+Xz589btNW1EEIIU8YrStjq5L5iMslPCFEfVHiGnvEuejExMaXurFdYWMjp06fZsWMHQ4cOrVoPhRBCAKbBpq2PLBtP8juXdg69Xi+fOAoh6pQKB8szZsxApVIZftHFxMQQExNT5jUBAQG89957Ve2jEELYvYyCDK7kXFGU2do218aMg/ksTRZXcq4Q6BFYyhVCCGF7KhwsL1myBAC9Xs+kSZPo06cPjz/+uNm6zs7OBAcH06NHD1xcXKzTUyGEsGPGo8qOakeaetv2uvWBHoF4OHmQrck2lJ25cUaCZSFEnVLhYPnRRx81/P+yZcsYMWKEokwIIUT1MZ4c18y7GU4OTrXUm4pRqVS0bNCSw9cOG8rOpZ2jX0i/WuyVEEJUjkWbkmzdutXa/RBCCFEG42XXbD1fuVirhq0UwbIsHyeEqGuqdTUMIYQQ1nE69bTiOMI3opZ6UjkRDZX9NH4dQghh6yo0sjxo0CBUKhXLli0jJCSEQYMGVfgGKpWKzZs3W9xBIYSwdzq9zmREtnXD1rXUm8ox7mdcehwF2gKcHZxLuUIIIWxLhYLlbdu2oVKpyMnJMRxXlCwRJIQQVZOUmUROYY6irLVv3QiWjUeWC/WFxKbFcovfLbXUIyGEqJwKBcsXLlwAoEmTJopjIYQQ1e/UjVOKY19XXxq5Naql3lSOp7MnIZ4hXMy6aCg7lXpKgmUhRJ1RoWC5adOmZR4LIYSoPib5yg0jrP+pnSYXcrNw0OVbt12KRsFvDpZlkp8Qoi6xaDUMIYQQNef0DWWwbJV85eQjcGotxO+Gy0chLw0nYBSgP/UCNG4PTXtCmzsguAtUIThv3bA1mxNK5q4Yvx4hhLBlFq2GceXKFXbs2MGVK8rdpGJjY7nvvvto3749I0eOJDo62iqdFEIIe2Y8smxxvrJeD6f+gK/7w9d9YfsHELcT8tIU1VT5GZCwG3Z+At8Mgq/6wLGVRddbwLi/p1JPobewLSGEqGkWBcvvv/8+AwcOJD093VCWkZFBnz59WL58OSdOnOCvv/5i8ODBnD171mqdFUIIe5Oen05ydrKizKJgOekgLL0Dfn4AkmMqd+2VY7BiEiwcAgmVHwQx7m9mQSaXsy9Xuh0hhKgNFgXL27Zto23btkRElMxyXrp0KVeuXOH+++/n9OnTzJkzh9zcXD755BOrdVYIIeyNcX6vk9qJcJ/wijeg08L2j2DhYIiPqlpnkvbD4tth00zQFlb4smCPYLycvBRlkoohhKgrLAqWk5KSaN68uaLsjz/+wNHRkU8//ZRWrVrx3HPP0bFjR7Zv326VjgohhD0yTsFo2aAlTuoKbnOddQ2+vxu2zgK9zvS8dwj0nAr3/QRT96N59iibbvmAwgk/Qu//gwalTObeNQeWjYKMSxXqhkqlMtlE5VTqqVJqCyGEbbEoWM7MzMTd3d1wrNVq+fvvv+natSv+/v6G8jZt2nDx4kVzTQghhKgA4xFY43WLS3UjHhYNhfNbTc/5hMHdi+C5I3D7u9BmJPi3Aq8gsl2D0LcaBkPfhmdjYMJ34NvctI2Ev4vSMq5XLNXOeFKirIghhKgrLAqWg4ODOXWqZFRg165dZGVlMWDAAEW9wsJCnJ1llyYhhLCURZP7rp2GxcPhhtGa+Co1DHgNpu6DDuNB7VB2O2o1tL0TntpTFDwbj2hnJBXdJ/lIuV0y7rdsey2EqCssCpZ79uzJkSNH+PTTTzl69CjTp09HpVIxevRoRb2TJ08aNjIRQghRORqdhti0WEVZG982ZV909RQsGQGZRikSnoHw6FoY8Co4uVauI47ORWkZk9YXjUrfLOc6LB0Fl2LKbMI4WE7MTCRbk125fgghRC2wKFh+7bXXcHFx4YUXXqBTp05ERUUxYMAAevXqZagTFxfHiRMnuO2226zWWSGEsCdx6XEU6AoUZWWmYWQkww/jISdFWR7QDqbsgGa9q9ahkK4wZTs06aosz0+HH+6BG3GlXtqyQUscVCUj2Xr0nL0hqyUJIWyfRcFyu3bt2LVrFw899BDDhw9n+vTprFq1SlFn/fr1dOzYkbvuussK3RRCCPtjnK8c6BGIj4uP+cp5GUWBcnqisrxJN5i4FrwaW6dT7r7wyO/QrK+yPPsqfD8eclLNXubi4EIz72aKMknFEELUBRbv4NelSxeWLVtW6vkpU6YwZcoUS5sXQgi7dyZVOQmuTcNSUjB0Wlg+sWg95Js16QaPrAIXL3NXWc7FCx5cXjSaHLezpDzlbNE6zo+uAQfTFTta+7YmNr0krUSWjxNC1AUWjSwLIYSofsbLqxkvv2aw4yOI3aws820OD/xi/UC5mJMb3Ps9BLRVlif8DZtmmL1EJvkJIeoii0eWiyUkJJCcnEx+fn6pdfr161fV2wghhN0xHnk1Xn4NgHObYdv7yjJ3f3hoJXj4m9a3JrcG8OCKoiXqMpJKyv+eB6G3Fa2kcRPj/p9NO4tWp8WhvFU5hBCiFlkcLC9evJh33nmHhISEcutqtVpLbyOEEHbpeu51UvOU+b8mK2GkJ8HKyYC+pEzlAPeWsjZydfBpAvf9AIuGgfamyYi/Pw2N24FfC0OR8chybmEuiZmJNPNpVjN9FUIIC1gULC9ZsoTJkycD0L59eyIiIvDyqqaP+oQQwg4Zp2C4O7oT4hVSUqDXw+qpkGs0oW7IDGjaixoV3BlGfABrny8py8+AVf+Gx/40rOfs7+aPn6sfKXklq3WcSj0lwbIQwqZZFCzPmTMHR0dHVqxYwZ133ln+BUIIISrlRMoJxXFEwwjUqpummRxYArFblBe1GQW9nqmB3pnR9TFIiIYjv5SUJe6Bv+dD72cNRW382hCVFGU4PpF6guHhw2uyp0IIUSkWTfA7e/Ys/fr1k0BZCCGqiXGw3Nbvpol0qRdg/XTlBV5BMGYeqFQ10DszVCq4Yw40bKYs3zKraKOUf7T1VU4IPJlysgY6J4QQlrMoWPb19cXfv5onjgghhB0zDiINwbJeD6ufAePd7+6cB24Na6h3pXDxhLu+BG4K2LX5sOrJouXtMAr6KXpToNfrEUIIW2VRsDxmzBiioqLQaDTW7o8QQti9tLw0LmUrt6s2BJmHf1aubQzQ5VFoNaSGeleOpr2g59PKskuHYP9iwDRYzijIICkrCSGEsFUWBcvvvfceHh4ePPbYY9y4ccPafRJCCLt2IlWZguHq4Eq4Tzjk3oANRukXPmFw+7s12LsKGDQd/Fopyza/A5lXCPIIMtmF8GSqpGIIIWyXRRP8XnjhBdq2bctPP/3EH3/8QdeuXQkJCUGtNo29VSoVixYtqnJHhRDCXphM7vONwFHtCJvfhpzrysojP6q+jUcs5eQGo/4Ly0aVlOWnw4bpqO7+hra+bfk7+W/DqRMpJxjadGgtdFQIIcpnUbC8dOlSw/+np6ezZcuWUutKsCyEEJVjHCzf4nsLJB2A/UuUFduMgtY2upJEeF+IvA+O/FxSdvR/0OVhbvG7RREsyyQ/IYQtsyhY3rp1q7X7IYQQ4h/GwWM737bw1+soNh9xcofhs2u2Y5U17B04/WfRqHKx9a/T9vY3FNWKJ/mpamslDyGEKINFwXL//v2t3Q8hhBBAen46F7MuKspuybgKidHKiv1eggZhNdgzC3gGwOA3Yd2LJWWXj9L2Sqyi2o38G1zJuUKgR2ANd1AIIcpn0QQ/IYQQ1cN4spuz2pkWUV8oKzVoarrihK3q+hg0Um7THbLrc7yclHnWx1OO12SvhBCiwqoULKekpDB37lwefPBBbr/9dj788EPDuePHj7N69WpycnKq3EkhhLAXxikYEU4+ON2IV1YaMgMcXWquU1Xh4AjDlKt1qLIu09bBXVFmnKcthBC2wqI0DIDly5czefJksrKyDLlmTZo0MZxPSkpi7NixLFu2jIceesgqnRVCiPrOZHJfqtEaxCHdod3YGuyRFbQaAi0GKbbnvuVaHHu83AzHMslPCGGrLBpZ/vvvv3nggQdwdHTkk08+Ye/evSY7MA0ePBgfHx9+/fVXq3RUCCHsgck217mZygq3v1d7W1pXxbBZoCr5k9M2V7kDoezkJ4SwVRaNLL/33nuo1Wo2btxIly5dzNZxcHCgS5cuHDt2rEodFEIIe5FZkElCZoKi7Jb8gpKDNqMg9NYa7pWVNG4HHe+HmB8Ao9cFpOSlcC33GgHuAbXROyGEKJVFI8u7d++mZ8+epQbKxQIDA0lOTraoY0IIYW9OpZ5SHDvq9bQq0PxzpIKBr9d8p6yp/yugdgIgrLAQD51OcVryloUQtsiiYDknJ4dGjRqVW6+qW2Hn5uby1ltvERERgaurK8HBwUyaNImkpKTyLy7D2bNncXNzQ6VSMWTIkCq1JYQQ1mIcLLYq0OBcfNBubNHobF3WsCl0fRQo+uNjPLoswbIQwhZZFCw3adKE48fLXuZHr9dz7NgxwsPDLepYXl4egwYN4p133iErK4sxY8YQGhrKkiVL6Ny5M+fPn7eoXYAnnniC/Px8i68XQojqYJKvXPBPMKlSw4DXaqFH1aDvi+DoCsAtBcpgWSb5CSFskUXB8vDhwzl9+jQ///xzqXUWLlxIYmIid9xxh0UdmzVrFtHR0fTs2ZMzZ87wyy+/sGfPHj755BOuXbvGpEmTLGp30aJFbNu2jX/9618WXS+EENXFJFguHnntMAEaRdRCj6qBdxB0L/r921ZGloUQdYBFwfKrr76Kj48PjzzyCK+88grR0UU7S2VnZ3Po0CHeeustnnnmGRo1asTzzz9f6fYLCgqYN28eAPPnz8fT09Nwbtq0aURGRrJ9+3YOHDhQqXavXLnCSy+9xNChQ7n//vsr3S8hhKguWQVZxGco11O+Jb8AVA7Q/+Va6lU16fUsOLqWjJz/42ruVa7mXK2lTgkhhHkWBcshISH88ccf+Pv789FHH9G7d29UKhUrVqygW7duzJo1iwYNGrB69WoCAio/szkqKor09HRatGhB586dTc6PHz8egDVr1lSq3f/7v/8jNzeXL774ovzKQghRg46nHEdPydJpjno9EZoC6HQ/+LWoxZ5VA88A6PIoTTWmk/yOXZcVlIQQtsXiHfx69uzJ6dOnmTNnDsOHD6dNmzZEREQwaNAg3n//fU6fPs1tt91mUduHDx8GKHW1jeLyI0eOVLjNdevW8csvv/D666/TsmVLi/olhBDV5ej1o4rjNgUFuOAA/V6q9nvrdHpuZBeQkpVPbiEUFOrKv6iqej+Lg9qJ9kapGBIsCyFsjcU7+AF4eXnx3HPP8dxzz1mpO0USEorWGQ0JCTF7vrg8Pj7e7Hlj2dnZPPXUU7Ru3ZpXXnnFoj7l5+crJgVmZGQAoNFo0Gg0pV0mblL8nOR5VY48t8qri8/syJVDiuP2+QXo2t6F1rMJWPF1XMvMZ+e56xy7lMnJ5AwuXM/hRk4BOsOgtiOv7ttEI09nQn3daRXgwa1NG3Jrs4YEN3Arq+nKcW+MQ4cJtE9Yyx43V0PxkcsH69TXrS5+r9kCeW6VJ8/MMtZ4XlUKlqtLVlYWAO7u7mbPe3h4AJCZmWn2vLHp06cTHx/P1q1bcXZ2Lv8CM2bPns3MmTNNyrdu3VpqP4V5GzdurO0u1Eny3CqvLj2zmBvRcNPGfB3yC9iu7UzGunVVbjtTA3uvqohJUZOQXbHd/65lFXAtq4CDCWn8sr9ouc4m7nq6+uvo4q+noUuVu4WHphPt839TlB29GsPaP9aiVln8wWetqEvfa7ZEnlvlyTOrnJycnCq3YVGwvHv3brZu3crJkye5ceMGKpUKX19f2rZty8CBAy1Ov6gO+/fv57PPPuORRx5hwIABFrfz2muvMW3aNMNxRkYGoaGhDBw4ED8/Pyv0tP7TaDRs3LiRoUOH4uTkVNvdqTPkuVVeXXtmVzOTuLFGmY7QLqArYXc/WaV2j1/K4JtdcWw4cQWNtupbSSflqEhKcGBtIgxv15jJfZrRoYlPldq8/ut2yCtJqctRaWl3Wzjh/nVjTem69r1mK+S5VZ48M8ukpKRUuY1KBctHjhxh0qRJHDpU9HGhXq/85atSFY1YdO/enUWLFtG2bVuLOlW8+kVp7ways7OBojSQshQWFvKvf/2LBg0a8PHHH1vUl2IuLi64uJgOpTg5Ock3bSXJM7OMPLfKqyvP7NSxbxXHXlod4QNfRm1h389eyWTOxjP8eeyyNbpnQqeHdceusO7YFfpHNOL1kbfQOrDs38elCRrwOgFrx3PVseTP0amjy4gY9l9rdbdG1JXvNVsjz63y5JlVjjWeVYWD5X379jFo0CCys7Px8PBgxIgRdOrUCX9/f/R6PdevX+fQoUOsX7+ePXv20LNnT7Zt22Z2NYvyhIWFAXDx4kWz54vLmzZtWmY7Fy9eJCYmhsDAQO655x7FubS0NAAOHDhgGHHetm1bpfsqhBBVotdz9PTvcFOGWDuVK+pmfSrdVHZ+IZ9uOsPiqDi0utJHkj1dHOnVwo8OTXxoG+xNoI8rvh7OqHRa/tq4mW49+5KcqeH8tSz2x99gX1wqaTnm8/62n7nGzrPXuK97GC/f3poG7pVMdWvcjg5ODdmsL0mrOxq3iTt1WlA7VK4tIYSoBhUKlrVaLQ8++CDZ2dk8/vjjfPLJJ3h7e5utm5GRwbRp01i8eDEPPPAAJ06cMIw4V1THjh0BOHjwoNnzxeWRkZEVau/y5ctcvmx+hCUtLY3t27dXqn9CCGE15zZxTJsBlExy6xDSGyr5e3Pr6au8/utRktPzzJ53dlQzKjKIcZ1D6B7ui7OjaU6wRqOhoQu0DvSifagT0JgpgFanZ8+FFFbHXGLtkWSy8gsV1+n08OOeBDaeuMLssR0Y0rZxpfrevtlgNl9YZTg+Rj6c/hNuGVWpdoQQojpUaAbF77//zrlz57j33nv55ptvSg2UAby9vVm4cCH33HMPZ86cqfRayAC9e/fGx8eH2NhYYmJiTM6vWLECgNGjR5fZTrNmzdDr9Wb/bd26FYDBgwcbyoQQoqZpd8/jmItyNLZ9xJgKX5+n0TJj9XEeW7LPbKDs4+bEi8Mi2Pv6YOZM6ESfVv5mA+WyOKhV9Grhz/t3RxL16iBeGd6GRl6maWnXMvOZ/O1+XvjfYXIKCs20ZF6HlsqdXk85O5P/97xK9VEIIapLhX5jrlmzBrVazXvvvVfhhmfPng3AqlWrKt0pZ2dnpk6dCsDTTz9tyFEGmDNnDkeOHKF///507drVUD5v3jzatGnDa6+9Vun7CSFErbh6kriLu8hWK38Vd2hUsU/NElJyuGt+FEt3x5mcc3ZQ88yglux8ZSBTB7WqfHpEKXzcnPj3gBZsf2kA/ze4Fa5Opn9GVh68yF3zo4i9llWhNtv6t7t5IRAKVSpOXzkASZXbpVUIIapDhYLlAwcO0Lp1a8LDwyvccPPmzWnTpk2lt6QuNn36dG677TZ2795Nq1atuPfee+nRowcvvPACjRo1YvHixYr6169f5/Tp0yQnJ1t0PyGEqHF7vuKo0cThxu4BNHJvVO6lu2Ovc+f8XZy6bLqEZp+W/qx/vh8vDGuNt2v1TARyd3bk+aERbHlhAANbm/b3zJUs7vx8FxtPXCm3LS9nL8K9lX9fjro4w9+y26oQovZVKFhOTk4mIiKi0o1HRERw6dKlSl8H4OrqytatW3nzzTdxd3dn1apVxMfHM3HiRA4ePEjz5s0talcIIWxCTioc/sUkBSOyUcdyL/1pbwIPL9prMunOxVHNO3e157vHuxPu72HV7pYmuIEbiyfeykfjI/F0UU6DyS7QMuW7/SwzM/JtzHg0/ZiLC5xYBenmJ3oLIURNqVCwnJ6ejo9P5dfS9Pb2Nux0Zwk3Nzfefvttzp07R35+PsnJySxZssTszn4zZsxAr9ezdOnSCrU9YMAA9Ho9mzZtsrh/QghhsQNLoTC3aAT1Ju3925d6iV6v5/PNZ3nt16Mmq120CvBkzTN9eLhH00pPqq4qlUrFPd1CWfNMH9oYLSGn08N/Vh9n1toT6MpYoaODfwfF8TFnZ9AVwt4F1dJnIYSoqAoFy4WFhajVld9NSa1WU1hY8UkeQghhF7Qa2LeQPJWKM0a7ihoHjcV0Oj0z15zgk41nTM4NuaUxvz3dm4jGlq11bC3h/h789lRvxnVuYnJu4a4LvP6baZBfrH0j5ZuEOGcn0tWqojcVBdlmrxFCiJpQt/YTFUKI+uDkGshI4pSzE4U3jQKrUNHWz3QzJ71ez5u/HzM7ke+pAS1Y8HBXkxSI2uLm7MAnEzoybahp6t7P+xJ5cflhCrU6k3MRDSJwVivfOBx3cYG8dDi6otr6K4QQ5alwsLxs2TIcHBwq9e/bb78tv2EhhLA30V8C/+Tl3qRFgxZ4OClzjfV6PW/9fpwf9iQoylUqmHlnO14e3ga1umbTLsqjUql4dnArPrmnIw5GffvtUBIvLj9skpLh5OBEG782ijJDisreb0CW9xRC1JIKB8ulrVdc3j8hhBA3uXQILu4FMMlXNk7B0Ov1zPrjJN9FxyvKHdUqPr23E4/2alatXa2qu7uG8OWDXXB2UP6pWRVziTd/P2byNyLSXznJz7BSyJWjkLinWvsqhBClqVCwrNPpLP6n1Wqr+zUIIUTdsb9k2cvDRiPLxpP7Fu68wKJdFxRlDmoVn9/fmTGdTPOCbdGwdoEseKQrLkYbofywJ4H3/zqlKDN+/UdcnDGE03u/qcZeCiFE6SRnWQghaspN+bfXHNQkOSnzjDsFdDL8/+8xSby77qTivINaxWf3dWZEh6Bq76o1DWgdwIJHuuHkoEzJ+Hr7eb77O85wfPPrB7jh4EC84z/P6MTvkHW1mnsqhBCmJFgWQoiacvgX0OQU/a/RqLKnkyctfFoAsPvcdV5cftjk8o/GR3JHZN0KlIv1j2jEZ/d1xji9+j+rj7P1dFEQHOwRTCM35QYnMa7/PCedBg4sq4muCiGEggTLQghRE/R62L/IcGgIAv8R2SgSB7UDpy9nMuW7A2i0ynzeV0e0YVwX0zXm65IRHYL44G5lXrJOD1N/OMjJ5AxUKpXJ6HLMzW8qDiwBrSxHKoSoWRIsCyFETUj4G66V5OjGGI0sd2rUibScAv717X4y85UB4aM9mzKlX/3YtfSebqE8O7iVoiy7QMvjS/dxNSOPjkY7GB52vWkSZEYSnF5XE90UQggDCZaFEKIm7CsZVc5XwQmjYLmDf0f+7+cYElJzFOXD2wXy1uh2Nb4rX3V6fkgrxnQKVpRdSs/jX98doJ2fcuT5nLMzGTfnbuxbWBNdFEIIAwmWhRCiumVdK5qg9o+Tzs5obor/VKjYftSV7WeuKS7rGNqAT+/rZLJWcV2nUqn44O5IujVtqCg/nJjGb9GYbE5y5OY3Fhe2Q+r5muimEEIAEiwLIUT1i/m+aIJa8aGbcuORQLdwvtmerCjz93Th64e64urkUCNdrGmuTg4seKQbTf3cFeU/RF8i0LWloizGw0d58UHZ8EoIUXMkWBZCiOqk08H+JYqimEZNFceXLgcojh3VKr54sAuBPq7V3r3a5OvhzFcPdcXVSfmnKC7JaEUMP6M1pQ/9AFoNQghREywKlmfOnMnFixet3RchhKh/YrdAWskOfHogRq2cwJeXFaY4fnNUW7qH+9ZE72rdLUHezLpLuXNhvtHzOKrNQvHEsq/C6T+rv3NCCEEVguXw8HBGjx7N6tWr0el01u6XEELUDzctFwdwMbAdKQUZijJtbslI891dQnikp3Lkub4b3zWE+7uHGo61ucpgOUebx7nQrsqLDiytgZ4JIYSFwfKsWbMICwvjjz/+YOzYsYSGhvLmm28SFxdn5e4JIUQdlp4EZ/5SFMW06KU41hV6otcUjSK3CvBk1l3t69XKFxX1n9HtaN/EGwC91gtdgZ/ifEzTLsoLYrfAjXiEEKK6WRQsv/7668TGxrJhwwbuueceUlJSePfdd2nZsiXDhw9n5cqVFBbKwvFCCDsX8yPob/rkzdmTw+7KCW1Fo6gqnB3VfP5AZ9yc6+eEvvK4Ojnw5YNd8XIt2t5am6McXY5xUoHrzRP99HDouxrsoRDCXlVpgt+QIUP4+eefSUpK4uOPP6Z169Zs2LCBCRMmEBISwquvvsrZs2et1VchhKg7dDrTYK793cSknFAUaXOKUi7evOMW2gR611TvbFKorzuz7moPKFNTAGKuH4XIe5UXHPxOdvQTQlQ7q6yG4efnx7Rp0zh+/Di7du3i/vvv5+rVq3z00Ue0adOGwYMH89tvv1njVkIIUTfE7VBM7APIipzAmRvKAQRdblOGtm3MQz3sK0+5NGM6NeGuTsEmwXJSVhLX2o1RVs66DGfX12DvhBD2yKpLx8XGxrJmzRo2b95sKAsJCWHr1q2MHz+e7t27k5iYaM1bCiGEbTpoNKoc0JYteRr0lKRl6PUO+Du14MO7I+0yT7k0M8e0J9CtGXqtcpfDtampEHKrsrJM9BNCVLMqB8sajYaff/6ZwYMHExERwQcffEBhYSHTpk3j1KlTxMfHExUVxYgRI9i/fz9Tp061Rr+FEMJ25aTCyTWKIl3nh/k0aoOyLLcJH9/TlYYeyh3r7J2PmxP/ndAZbZ4yb/mbfZsp7PyIsvK5TZAmgzBCiOpjcbB88uRJpk2bRnBwMA8++CBbt26lZ8+efPvtt1y8eJGPP/6YVq1aAdCzZ0/Wrl1L9+7d2b59u9U6L4QQNunoctDmlxw7OPO//F4k5yvzlVs16EDfVo0Qpm5r7keXgM6KsjTdaRbd6AzOXiWFejO54UIIYUUWBct9+vShffv2fPrpp2g0Gv79739z5MgRdu3axUMPPYSLi4vZ69q1a0dmZmaVOiyEEDZNrzfZjjk7/HZmbr6Ig1uCovzJ24bWZM/qnKd6Kp+P2vUSn2w/R3qrscqKMT8WTagUQohq4GjJRbt376ZLly48+eSTPPDAA7gbLYVUmsmTJ9OvXz9LbimEEHXDpUNw5Zii6NPUnuQ7JuBx0859KlT0atKtpntXp3Rt3AkHlSNafdFzU6n0aJ0v8M6lbnzMTW9I0hPhwnZoMbCWeiqEqM8sGlnet28f+/fvZ/LkyRUOlKEoHePRRx+15JZCCFE3GKUEZLsFs/BSGI7uFxTlrRq2wsfFB1E6V0dXIhspt8J2cL/AimQ/UjwjlJVjfqjBngkh7IlFwfIff/zB6tWry623Zs0a3n77bUtuIYQQdU9BDhxdoShaltcHPWocjILlro2Ntm8WZhk/p6LnqOLrjJ7KiifXQG5ajfVLCGE/LAqWZ8yYwapVq8qtt3r1ambOnGnJLYQQou458TvkZxgOdaj4PrcPoMXBLU5RVYLlijEJlt0ugkrD8oKeFN6cSViYB8dW1nDvhBD2wKrrLBvTarWo1dV6CyGEsB1GE/t2aCO5hD9q12RUDvmKcxIsV0ynRp1Qq0r+jqhUWhzcEriBNxu0XZSVD31fw70TQtiDao1kjx8/TsOGDavzFkIIYRtSYiFht6LoF+0AAJMUjGbezfB386+pntVpns6etPFtoygrfp7Ltf2VlS8dhKsna6prQgg7UeHVMCZNmqQ43rVrl0lZscLCQk6fPs3+/fu56667qtRBIYSoE2J+VBym6L3YpCsaPZZ85arp2rgrJ1JK1qh2cI8DYIcukiv6BjRWpZVUPvQ93P5uzXZQCFGvVThYXrp0qeH/VSoV586d49y5c2VeExkZyUcffWRx54QQok7Q6eDwz4qi37W90eAI6HDxjOfmVYAlWK6cro278t2JklVGnNwTyKUQLY78qu3Lvx1v2i3xyC8wZAY4ONV8R4UQ9VKFg+WtW7cCoNfrGTRoEMOHD+eVV14xW9fZ2Zng4GCaNm1qnV4KIYQti9sBGRcVRSu0RWvKO7peQ6fKVpyTYLlyugQoc5P1qgJcPJLJzw5luba/MljOvgZnN0CbO2q4l0KI+qrCwXL//iW5YY8++ih9+/ZVlAkhhN0ySsE4qQvjhL4ZAD3bpROTU3Iu2COYYM/gGuxc3dfQtSEtG7TkXFrJp5k926WxbW8o5/XB7NdF0E19puSCQ99LsCyEsBqLJvgtWbKk1HxlIYSwK3kZcEK57nzxqLKvhzN+fsoRZxlVtozxc3P1iifU1w0wM9HvzHrIulpTXRNC1HOyrpsQQlTFid+hMNdwqNE7sErbG4AXh0Vw5PohRXUJli3TrbFya/DD12J4bUTRLn5/aG8jV+9cclKvLcpdFkIIK6hQGkbz5s1RqVRs2rSJ8PBwmjdvXuEbqFQqYmNjLe6gEELYNKMUjG26jqTgQ/sm3vRorWf2qWuK8xIsW6ZLY2XecpYmi6ZB6fRs7sff52Gd7jbudthZUuHQ99BzKqhUNdxTIUR9U6FgOS4uDgCNRqM4FkIIu5Z63mRt5eIUjBmj27H/yibFOT9XP5p6y8RnSwS4BxDmFUZCZoKhbN/lfbw1ehx3fLaT5dr+ymD52ilIOggh8uZECFE1FUrD0Ol06HQ6IiIiFMcV/SeEEPWS0XJxqXpPtui6MLZzE7o182Xv5b2K892DuqOSkU6LdQ/qrjjec3kPtwR5c3/3MPbo2hCvC1BecOg7hBCiqiRnWQghLKHToTdKwfhd2xsnZxdeHdEGnV7H3mRlsNwjqEdN9rDeuS3oNsXxgSsH0Gg1TBsagZers2FUv5j+2ErQ5CKEEFUhwbIQQlgifheq9ERF0UptX6YOakljb1fO3jjLjfwbivPdA5Ujo6JyjJ9fbmEux1KO4efpwv8NiWClth86fcnIvSo/A079UdPdFELUMxIsCyGEBXSHlKPKp3ShpPm0ZVLvcAD2JO9RnG/i2YQQr5Aa61995OvqS0TDCEVZdHI0AI/0bIpbo6ZE6dopzmsPfl9j/RNC1E8VCpYdHBws/ufoWOF9T4QQom7Iz0J7fJWiaIW2Hy8Nb4OrkwOASb6ycQqBsIzx6HJxqouTg5o3R7U1ScVQX9gG6Uk11T0hRD1UoUg2NDRUJqUIIcQ/cg//ipu2JBe2UK/mTOPhvB5ZtDNfoa6Q/Vf2K665LVCCZWvoEdSD70+WjBYfvnaY3MJc3BzdGNA6gO+bjyAzcQleqqKvjwo92ft/wGPwy7XVZSFEHVeppeOEEELAtV1LCLvpeLuuI0+N6o1aXTSocDzlONmabMU1xis5CMt0bdwVB5UDWr0WAI1Ow6Grh+gV3AuAaSM78ceXPbjPYavhmty93+Ex6CVZc1kIYRHJWRZCiEq4En+KsIyDirIzQaPp0dzPcGycr9yyQUv83fxrpH/1naezJ+38lXnJNz/vtsHeXG1xt+K8f34Cycd21Ej/hBD1jwTLQghRCTFrv1Icp+k9GDp2oqLMeMk4WQXDuoxTWoyf991j7uaCPlBRFrtpQbX3SwhRP1UoDSMhoWjHpCZNmuDg4GA4rqiwsLDyKwkhhI07nnSDW66uhZs+zT/VaBg9gkpGlfMK8zh09ZDiOpncZ123Bd3GN0e/MRyfSD1BRkEG3s7eADRp6M620LsIv1jyxiYybTMx55Pp1DyoxvsrhKjbKhQsN2vWDLVazYkTJ4iIiKBZs2YVnvCnUqkoLCysUieFEMIW/P77Sl5XXVOUtbn9ScXx4WuHKdAVGI7VKjXdArvVSP/sRcdGHXFWOxues06vY//l/QwKG2So02XUv9F99TVq9AB4q3LZvnoJHf/vNZmwLoSolAoFy/369UOlUuHu7q44FkIIe7HnfAotL61W/NZMcW+OX0vlqLFxvnJb37aGEU9hHa6OrnQO6MyeyyXPeu/lvYpg2TuwGRcb3kbIjWhDWeeUdWw59TiDb2lco/0VQtRtFQqWt23bVuZxdcnNzWX27Nn8/PPPJCQk4Ovry/Dhw3nnnXdo0qRJhdooLCxk1qxZ7Nu3j5MnT3Lt2jU0Gg2hoaEMHTqUV155haZNm1bzKxFC1GV6vZ7P/4zha4doRblXj0dMVli4OYADWQWjunQP6q541sZvUgAC+k2C30u+Zn3Ux3h43U4GtL4bB7UM+AghKsZmJ/jl5eUxaNAg3nnnHbKyshgzZgyhoaEsWbKEzp07c/78+Qq3M3PmTHbs2EFQUBDDhw/n9ttvp6CggC+//JLIyEj2799ffkNCCLu19fRVApI24KHKN5TpUOPc+X5FvfT8dI5dP6Yok3zl6mH8XM+lneNK9hVFmXO70WgcPQ3HapWejqnrWXP4Uo30UQhRP1gtWL5x4wY3btxAr9dbpb1Zs2YRHR1Nz549OXPmDL/88gt79uzhk08+4dq1a0yaNKlC7bi6urJr1y5u3LhBVFQUy5cv5/fff+f8+fO8+uqrZGRk8OSTT5bfkBDCLul0ej5af4a7HXYqT7QYBF7KFRf2JO9Bp9cZjl0cXOgS0KUmuml32vm1w8vJS1G2+9JuZSVndxwjlcvIjXfYwacbT6PR6hBCiIqoUrC8evVqhg0bhqenJ/7+/vj7++Pl5cWwYcP4/fffLW63oKCAefPmATB//nw8PUtGBqZNm0ZkZCTbt2/nwIED5bbl6OhI7969TbbddnBw4J133sHV1ZUDBw6Qnp5ucX+FEPXXmiOXyEiOpbfDcUW5uvODJnWNg7Wujbvi6uharf2zV45qR3oE91CUmQTLgKqT8uvUXH0Z3xuHWXngYrX2TwhRf1gULOv1eiZNmsTYsWPZtGkTOTk5+Pj44OPjQ05ODps2bWLcuHFMnDjRopHmqKgo0tPTadGiBZ07dzY5P378eADWrFljSfcNVCoVDg4OqFQqnJ2dq9SWEKL+0Wh1zNl4hnFGo8p6Vx9oPVJZptcTdSlKUdY7uHe199GeGT/f3Zd2o9VplZVCu6P3a6koGu+wnc82nyW/0KiuEEKYYVGwPHfuXJYuXUpQUBBffvklaWlppKamkpqaSnp6Ol999RVBQUF89913zJ07t9LtHz58GIAuXcx/fFlcfuTIEUu6DxT9Yfvggw/Izs5m4MCBuLm5WdyWEKJ+Wr7/IvEp2SYpGKr2d4OTcsT4QvoFLmdfVpT1biLBcnUyfr4ZBRkcT1F+AoBKharTA4qiUQ7RpKan8+Oeyu0ZIISwTxVaDcPYggULcHd3Z+fOnYSHhyvOeXl58cQTTzB06FA6dOjAggULeO655yrVfvGmJyEhIWbPF5fHx8dXqt1XXnmFK1eukJGRwZEjR4iNjeWWW25h4cKF5V6bn59Pfn7J5J6MjAwANBoNGo2mUv2wV8XPSZ5X5chzqzxrPLM8jZa5m87QTXWaZmrlxLHC9veiN2p7e+J2xXFj98aEuofWqa9bXfte83P2I9w7nAsZFwxlOxN3ckuDW5QV247HcfM7qG5ac/l29T7mbfFiXKdA3J0t+lMI1L1nZivkuVWePDPLWON5WfQb4sKFCwwbNswkUL5ZeHg4gwcPZsOGDZVuPysrC8CwrrMxDw8PADIzMyvV7sqVK4mNjTUcR0ZG8v3335f5OorNnj2bmTNnmpRv3bq11H4K8zZu3FjbXaiT5LlVXlWe2dZLKq5kOvC84w5FeaZLEFtiLsPhdYryNVnKtLCQwhD+/PNPi+9fm+rS91pQfhAXKAmW151YR0ii6UBLT692BGSWrFQy3mEHv2f34Y1lGxnapOoT0+vSM7Ml8twqT55Z5eTk5FS5DYuC5UaNGlUox9fJyQl/f39LblEtzp07B8D169c5cOAAb7zxBl27duWbb77h0UcfLfPa1157jWnTphmOMzIyCA0NZeDAgfj5+ZVxpSim0WjYuHEjQ4cOxcnJqba7U2fIc6u8qj6znIJCZs7ZiStZ3OGgXL/XvddkRva6Q1GWV5jHOyvfUZRN6D6BoWFDK9/5WlQXv9d8Lvmwe1vJxL4kXRJ9h/TFy1m5UobqeC6smmI47q0+TjDX2XktkJkP9cXbzbLXWxefmS2Q51Z58swsk5KSUuU2LAqWx44dy/fff8+NGzdo2LCh2Tqpqals2bKFBx80nTFenuLVL0p7N5CdnQ0UpXxYwt/fn9tvv50ePXrQoUMH/v3vfzNo0CBCQ0NLvcbFxQUXFxeTcicnJ/mmrSR5ZpaR51Z5lj6zn3YnkJqtYYx6H16q3JITKjUOnR/AwajNfVf3ka8tSdNSq9T0DuldZ79edel7rUeTHrg4uBiev1av5cD1AwxtavRGpd0Y+PMlyC9KoVOr9Ix12MX83LtYFp3ItGGtq9SPuvTMbIk8t8qTZ1Y51nhWFk3wmzVrFs2bN2fQoEFs2bLF5PzWrVsZOnQoLVq04L333qt0+2FhYQBcvGh+aZ/i8qruvOfj48Po0aPJzc2VjzWEEABk5Rfy9faidK3xDsoUDJoPAO9gk2uMV8Fo798eHxef6uqiuImroytdG3dVlEUlRZlWdHKD9uMUReMdtgN6Fu26QEpWvuk1QghBBUeWBw0aZFLm7OzMgQMHGDp0KL6+vobANSEhwTDk3aNHD+666y42b95cqU517NgRgIMHD5o9X1weGRlZqXbNKU4TuXbtWpXbEkLUfct2x3EjR0Mw1+mtNlpZoZP5T8qM1/eVJeNqVq/gXoqvQdSlKPR6PSqjrcjp9BAcWGo4DFdfoavqDAcKWvPV9ljeuKNtDfVYCFGXVChY3rZtW6nn9Ho9KSkpZnNC/v77b9NfVhXQu3dvfHx8iI2NJSYmhk6dOinOr1ixAoDRo0dXum1j27cXzWBv0aJFldsSoi7Q63TkHTuG5lL93fJXW6jF88gRshwccXB0qPB1uQVaDv9+jD4FWoar95HlcFPqlZM7xDtD0l+Ka9Ly0vCPPsPNszP6uKrJuKysVxdY+txqW88sPT1O3rwj3yXOqb6lsUdjZUW9Hm6EQ2ayoehx3R+4afM4t/wIifmx+LhX7iPbuvrMKsMxoDFuHSNROdTP1ydEeSoULF+4cKH8Slbk7OzM1KlTeffdd3n66afZsGGDYQWMOXPmcOTIEfr370/XriUfvc2bN4958+YxduxYZs+ebSj/448/aNiwIb169VLcIycnh3fffZft27cTGBjI8OHDa+bFCVHLLv/nP6QtX1Hb3ah2wcDlH36s9HXP3/T/SfgqT25/xew104wLVs0lqdJ3tg2WPrfaZvw1KFz1fhlfg5KvaziXeIPvAMiKhiwL7l1Xn1lleI0YTsh//1vb3RCiVlQoWK5qbrAlpk+fzqZNm9i9ezetWrWib9++xMfHs2fPHho1asTixYsV9a9fv87p06dJTk5WlO/bt4+ZM2fSpEkTOnXqhI+PD5cvXyYmJobU1FR8fHz43//+p9hSW4j6SpuRQdqKlbXdDSFEHZP5519oXn0Vp8aNy68sRD1j+Urs1czV1ZWtW7cye/ZsfvzxR1atWoWvry8TJ07knXfeKXXDEmPjxo0jMzOTnTt3sm/fPlJTU3Fzc6Nly5ZMmTKFZ555hqCgoGp+NULYBm1GRtFH0UIIUUnaGzckWBZ2ySrBclpaGpmZmehL+SNcvLpFZbm5ufH222/z9ttvl1t3xowZzJgxw6Q8MjKSTz75xKL7C1Hv6HQmRc7h4aC2aGEcm6XX68nKysLT07NC8yZ0Oj1xKdno9HpU6AlXXUbNTc/KoxG4m66nnluYS3KWMve7qXdTHNQ2Ow5Rpso+N1ui0+uIS48DSv4ONfYIxMPJw7RyXgZkKr9ucfpACnFAhYpm/u44VPBnoi4/s/IU3LSJF2D294cQ9sDi3+iXL19m+vTprF69uswFn1UqFYWFhZbeRghhTWbe0Ib/vgp1BTYZqks0Gg3r1q1j5MiRFVpj85MNp/l8S9GmRWPUu5jr/EXJSZUant9hdsm4D/Z+wPcnvzcct/drz0+jfqr6C6gllX1utuaDDZPZk1yyiczYln15u7eZwRZNLnzcGvLTDUW/aQYwTzsWgMl9wpk+qmIrY9T1Z1aWkx0i4aatgksbEBOivrNoOCk5OZlu3bqxePFiXFxcaNSoEXq9nh49ehAQEGD4gerZsyd9+/a1aoeFEJbTmxkZqm+jYZV1I7uAxbtKJjGbrq080GygrNfr2X5xu6KsX2i/aumjqJj+If0Vxzsu7kCnNzMaanbN5R0Uj0p/vyeea5my7rLJ7wadBMvCPlm8KcmlS5d4++23SUxMZMSIEahUKqKiokhOTmbbtm20adMGlUrFn3/+ae0+CyEsZW5kqJ6lYFTWNzvPk12gBShlbeUHzF53IeMCiZmJirIBIQOqo4uigoyff0peCidSTpiv3PkhxWEz9RW6qU4DkKfRsWBHrLmr7Ivx7wZzbzyEsAMW/ZX866+/CA8PZ/r06WbP9+vXjw0bNnDo0CHeeeedKnVQCGFF5oJlOx5ZTsspYNnuOMPxWIddqFU3PSMXH2hzh9lrdyQqR6AD3AJo49umOropKijUO5Rwn3BFmfHov0GTruAfoSi6+VOF76JldNnkd4OkYQg7ZVGwnJSUpNgoxOGfhcrz80t+sTRp0oSBAwfyv//9r2o9FEJYj7kJOnYcLC+OijOMKoPeNAWj/biij+zNMJeCYe8pLbbAOBVje2IpwbJKZfKpwSiHaNzIA2R0GUzTMMylcQlhDywKlr29vRXHDRo0AIqC6Ju5urqalAkhao/eOOdQpbLbAC8jT8OSqJJc5S6qs4SrLysrlbK9dXp+OoeuHlKUGQdponb0C1HmjZ9MPcmV7CvmK0feWzSB8x+eqjyGq/cZjr+Ljud6lh2PLpukYcjIsrBPFgXLYWFhJCQkGI7bt28PwLp16wxlOTk5REVFyRrGQtgU02DZXi2LiiMzr2SlnnscjUYg/VpBSDez1+6+tButXms4dnFw4bag26qln6JyOgV0wsvZS1G2I2mH+crewdBikKLoHseSukWjy+et3sc6Q9IwhAAsDJYHDRrEkSNHuHbtGgB33nknHh4evPTSS7z66qt8/vnnDBw4kCtXrjBixAirdlgIUQXGH6Pa6eS+rPxCFt00quxKPnc57VVW6vRAqW8mtiVuUxx3D+yOm6P5dA1Rs5zUTvQJ7qMoM84vVzBKxeilPk4TrhmOv/07zn5Hl41/P0gahrBTFv2lfPDBBxk3bhwnThTNMvb19eXrr79Gr9fz4Ycf8txzz7Fv3z7atm3Lu+++a9UOCyEsZ5xzaK8pGN9Hx5OWU7J+7DD1ftx02SUVVGroeJ/Zawt1hexK2qUokxQM22K8hF90cjR5hXnmK7e+A1x9FEXjHUu+vnkaHd/Y6eiyac6yjCwL+2RRsNyxY0d++ukn+vcv+QNx//33c+bMGb744gtmzZrF8uXLOXjwID4+PmW0JISoUcZ/6+wwWM4t0LJwpzL4meIdraxUytrKADFXY8goyFCUGefJitrVJ7gP6ptykfO0eey9vNd8ZSdXaD9eUfSIWxQ3/7B8+7ed5i5LzrIQgIXBcmnCwsJ48sknee2117j77rvr3W5GQtR5xuuk2mEaxo97E7ieVWA4DiKFtnkHlZVKWVsZYEviFsVxRMMIgjxlboYtaeDagE6NOinKtiRsMV8ZTCZy+mkucZvDGcNxrkZrn6PLJjnLkoYh7JPV/lLeuHGDGzduyHaYQtgyO89ZztNo+Xq7cjmwaY0PoqJiayvr9Xo2x29WlA0KG2S2rqhdxl+XrYlb0eq05is36QL+rRVFLzTapzj+Ljqe1OwC7Ipalo4TAqoYLK9evZphw4bh6emJv78//v7+eHl5MWzYMH7//Xdr9VEIYS1Gb2btLQnjf/sTuarYaELPaP02ZaX2Y0tdW/lU6ikuZV9SlA0JG2LdTgqrMA6WU/NSTZb7MzCz5nK37G14qkrynHMKtCzaZV+jyyqVcRpG7fRDiNpmUbCs1+uZNGkSY8eOZdOmTeTk5ODj44OPjw85OTls2rSJcePGMXHiRBlpFsKGmEzQsaOR5YJCHV9tU44qTwy9gmvGBWXFUtZWBtiUsElx3MSzCRENI0qpLWpTqFcorRsqR4s3J2wupTYmay6rNTm83uysosqy3fGk5djR6LKkYQgBWBgsz507l6VLlxIUFMSXX35JWloaqamppKamkp6ezldffUVQUBDfffcdc+fOtXafhRCWsuOc5ZUHL3IpXbkiwr8bGE368msJIbeW2oZx3uuQsCF2u6JIXTA4bLDieHPC5tIHcLyDoIWy/lj1dkW8mJVfyOKoOCv30obJ0nFCABYGywsWLMDd3Z2dO3cyZcoUxY5+Xl5ePPHEE+zcuRM3NzcWLFhgtc4KIarIOFCwk0BPo9XxxbZzirI+Td1pnLBOWbGMtZXj0uM4l6ZsY3DTwWbrCttg/PVJzk7mROqJ0i8wSsVwS9rNo7covx+WRF0gPVeDPTBZOk4+KRZ2yqJg+cKFCwwePJjw8PBS64SHhzN48GAuXLhQah0hRM2y13WWf4+5RGJqrqLszeZnIf/mJeBUEGl+bWUw/Qjf382fjo06WrObwspaNWhFqFeoosx4gqZC65Emay5P9duveP+UmVfIst1xVuylDTP+/SDrLAs7ZVGw3KhRI5ydncut5+TkhL+/vyW3EEJUBzvMWdbq9MzfqhwR7hTagIhLvykrthwMPk1Kbcc4WB4YOlCxlq+wPSqVymQCZpl5y06u0OEeRZH/2RXc0a6xomzRrgtk5tnB6LLJOsuShiHsk0W/6ceOHcuWLVu4ceNGqXVSU1PZsmULd911l6V9E0JYnf2lYaw9cokL17MVZa/c6oAqfreyYueHS23jcvZljl4/qiiTVTDqBuNVMc6nn+d8ehmrWhivsZ0Wz4u3pCqK0nM1fPt3vLW6aLvUxhP8ZGRZ2CeLguVZs2bRvHlzBg0axJYtpgu9b926laFDh9KiRQvee++9KndSCGElJuss1+9gWafTM2+LclS5fRNveqT/qazo7lf0EXwpjCf2eTl7cWtg6RMBhe2IbBRJI7dGirIyNygJ7gKN2iiKmiWu4naj0eWFO8+TnV9otX7aIhWyzrIQUMFgedCgQYp/Y8aMwdnZmcOHDzN06FAaNWpEt27d6NatGwEBAQwZMoSYmBicnZ1lZFkIG2K8dJzJOqr1zPoTVzh7NUtR9uyAcFSHf1JW7Hg/OJaeWmYcXPUP6Y+Tg+xQWheoVWqT0eUy85bNrLnM8VX8X1/l9uc3cjR8H13PR5dNVsOQkWVhnxwrUmnbtm2lntPr9aSkpJCSkmJy7u+//7abCURC1Al2tBqGXg9fbldOMG4T6MUQx8OQdUVZuYwUjBt5N9h/Zb+iTFIw6pbBYYP55fQvhuNjKce4lHWJYM9g8xdE3gubZpTk6GqyaZu2jSG3tGLTyauGagt2nOf+bqXnudd5JnMaJFgW9qlCwbKsaCFEPWFH6ywfu6Hi1OVMRdnUQS1Rx7ygrBhyKwQoP3a/2cb4jWj1Jdskuzq40qtJL6v2VVSvboHd8Hb2JqOgZPWTDXEbmNh+ovkLvAKh5RA4u6GkLOZHnhn8vSJYTsku4Of9F2lspol6wfi9tKRhCDtVoWC5adOm1d0PIUQNsJel4/R6PesvKt8ItGjkwYimKvhtvbJyGaPKAOvjlPX7h/bHzdH8dtjCNjmpnRjSdAi/nv3VUPZX3F+lB8tQlIpxc7Act5OOnmkMaN2IbaevGYq/2XmBl9tWQ6dtgHGalskOoELYifo7rCSEMGUnaRg7zl4nMVv52qYOaonDkZ/gplFinDyg/bhS27mWc419l/cpyoY3G27VvoqacXuz2xXHx1OOk5CRUPoFrUeCawNlWcxPPDOolaLoWlYB0Vfr58+R6dJxEiwL+1SlYPnKlSvMnj2bkSNH0rFjRzp27MjIkSN5//33uXLlSvkNCCFqlvEfu3qYhqHX65m/Tbk0WFM/d0Z3CIJD3ysrtx8LLl6ltrUhfgP6m/I03R3d6dOkj1X7K2pG98Du+Lr6KsqMPzVQcHQxWXOZmB/pGupN31bK/QM2JanJ12ipd4zfTMs6y8JOWfyXcuXKlURERDB9+nT++usvjh49ytGjR/nrr7944403aN26NStXrrRmX4UQVWUHS8ftjk3hUGK6ouzpgS1xvBgNqbHKyp0fKbMt42BqUNggXB1drdJPUbMc1Y4MbTpUUfZX3F9lX2S8KkZ6AsRu5dnBytHldI2KFYcuWaObtkUtS8cJARYGy/v37+f+++8nOzubsWPH8ttvv3Ho0CFiYmJYtWoV48aNIysriwceeID9+/eX36AQokbojUaWjddRrQ8+23xWcRzS0I2xnZvAoe+UFf0jILR7qe1czr7MoauHFGWSglG3GadinLlxhvNpZWxQEtwZAiOVZQeWcGszX3o291MUf73jAgWF9SuYNFlaUrIwhJ2yKFiePXs2Wq2W5cuXs2LFCsaMGUPHjh2JjIzkzjvvZPny5SxfvhyNRsP7779v7T4LISxVz7e73nM+hT0XlLut/XtAC5w0mXB8lbJy54fLzNk2HlX2cvaiV7CsglGXdQnoYrJBSZmjyyoVdJ2oLDv9J2ReNhldTk7PY+XBi1bqqY0w/vmQkWVhpyz6S7lr1y569erF2LFjS60zduxYevfuzc6dOy3unBDCyur50nGfG+3WF+jtwviuIXB0BRTmlpxQOxZtRFKGvy4og6jBYYNlI5I6zkHtwLBmwxRlf8X9ZfKJi0KHe8DJveRYr4VD39OjuS/dmylzoOdvPYdGW48CSpMJfvXotQlRCRb9pUxPTycsLKzcemFhYaSnp5dbTwhRQ4zTMOpRzvLBhBvsOnddUfavvuG4ODqYpmBEDAdP5QjjzRIzEzmWckxRNqLZCKv1VdQe41SaC+kXOHPjTOkXuHqbrphy8FtUej3PDG6pKL54I5ffDiVZq6u1znhpyTLfVAhRj1kULAcGBnLo0KFy68XExBAYGGjJLYQQ1cB0gk79CZY/N8pV9nLSM6FrE7h8FC4Z/b7q8miZbRmnYDR0aUj3oNLzm0Xd0bFRR4I8ghRl5U706/qY8jgtHi5so09LfzqF+ihOzd96jsL6MrpskoYhwbKwTxYFy7fffjunT5/m9ddfR6s1XS5Hr9czffp0Tp06xfDhMiFGCJtRT3OWj15MZ+tNG0UADA7W4erkAPuXKCt7BUPLwWW2t+7COsXxkKZDcFRXaA8nYeNUKpXJRL8/L/xZ9qhpk67QuL2y7MBSVCoVUwc0VxTHp+Sw+nA9WRlD0jCEACwMlt988018fX354IMPaNmyJa+88gpffvklX375Ja+++iotW7Zk9uzZ+Pn5MX36dGv3WQhhsfoZLH++RTmq3NDdiV6N9VCQBUf+p6zc5WFQO5Ta1unU05y9oWxvRLikYNQnw8OVgzhJWUkmK58omJvod+oPyLpKv1b+hHoof67mbTmHtj6MwhqnaUkahrBTFv2lDAkJYcuWLbRr1474+Hg++ugjpk6dytSpU/nwww+5cOEC7du3Z8uWLYSEhFi7z0IISxmnYdSDLIyTyRlsOKHcBOnx3s1wcQDV8V+hILPkhEoNXcpeW3lN7BrFcaBHIF0bd7Vaf0Xta+vblnCfcEXZmvNrSqn9jw73wM3bnOsKIeYHVCoVw0OUP1fnr2ez9kjdH102XlpS1lkW9sriYaUOHTpw5MgRtmzZwsyZM3niiSd44oknmDlzJlu2bOHw4cO0b9++/IaEEDVGbzTaZbKOah00z2gFDB83Jx68LRQA9cFlysqthoFP6W/gC3WF/HHhD0XZqOajUNeD5yRKqFQq7mxxp6Js/YX15GvzS7/IrQG0M1oB6uC3oNfRrqGeWwKVO0F+vuUcuro+umz8yVNdfz1CWMiiJLxx48YRFBTE/PnzGTBgAAMGDLByt4QQ1aKebXd99kom644lK8oe7xOOp4sjDXLOo758WHlBt0lltrcneQ/Xc5UraoxuPtoqfRW25Y7wO5h7cK7hOFOTyfbE7SZLyyl0nQiHfyw5Tj2PKn4XKhVMHdicp38q+X47dzWLP49d5o7IINN26gqTnGUJloV9sugv5bp160hJSbF2X4QQ1c14gk4Zm3LUBfO2nlP8/fZydeTRXs0AaHp9q7KyTyi0HFJme6tjVyuO2/q1pXmD5qXUFnVZkGcQtwbeqigzTsExEdodGt2iKFIf+haAIW0CaGMyuny2bo8uG/96kAl+wk5ZFCyHh4eTnZ1t7b4IIaqZcc6h8Tqqdcn5a1msMVp14LFezfBxc4K8DEJuRCsv6PJomRP7sjXZbEnYoigz/qhe1C/GnxrsStpFal5qKbUxO9FPdeoPnDUZqNUqnhmk3NXv1OVMk3z6usQ4Tcs4jUsIe2FRsHz//fezfft2Ll++bO3+CCGqUz1Kw5i/NVaRQunh7MCkPkWTttTHVuCouyn/VOUAnR8qs71N8ZvI0+YZjh1UDiYbWIj6ZWjTobg4uBiOC/WFJjs3moicAI6uhkOVTkNo6i4ARrQPpFWAp6L6Z5vP1t3NPCQNQwjAwmD5tddeo2/fvvTv35/ffvsNjUZj7X4JIaqD8chQHR1ZTkjJYVWMcqe0R3o1o4G7M+j1qA8ZTexrPQK8y84dNf4IvneT3vi5+Vmlv8I2eTp7Mih0kKKs3FQMd19oO0ZR1CxlW9H3nVrF1EHKXf1OJGew+eRVa3S35hn/fpA0DGGnLAqWW7duzfHjxzl37hzjx4/Hzc2N4OBgmjdvbvKvRYsW1u6zEMJSxn/s6uh2119sU65j6+bkwOR/RpW5uB/V1ePKC8qZ2Hc5+zJ7L+9VlI1uIRP77IHx1/lYyjEupF8o+yKjVAzP/Muo4nYAMCoymOb+Horzn22po6PLalk6TgiwMFiOi4sjISEBvV6PXq9Hp9Nx+fJl4uLiTP5duFDOLx0hRI0x/oNdF5eOu3gjhxUHLirKHuoRhp/nPx+n71+svKBhM2g+sMw2155fi/6mDVs8nTwZEDLACr0Vtq5ncE/8XJWfIBhP9DQR1hMatVEUqfcvAsDBzOjykYvpbD+j3GGyLjD5/VAH430hrMGiv5Q6na5S/4QQNqIepGF8uS2Wwpteh4ujmn/1+2fFitwbcPxX5QVdHi0zN1uv1/PrWeU1w5oNw/WmvFRRfzmqHRnZfKSi7Pdzv1OoKyz9IpUKbp2sLDr7F6QXvYm7s2MwTf3cFefn1sXcZePfD/L3XNipujesJISwnPEfuzo2wS85PZfl+5Wjyvd3DyPA65/A9tAPUFgySU+vdip3Yt/+K/tJzExUlI1tObaU2qI+uqvlXYrja7nX2JW0q+yLIu8F55LJfCq9DvYvAcDRQc3TA5Wjy4cS0og6V8eWXDWZ4CfBsrBPlfpLuW7dOp544glGjBjBXXfdxVtvvSVpFkLUKUZpGHUsZ/nr7ecp0Jb8wXZ2UPNk/3/mReh0sO8bRX19mzvAM6DMNleeXak4buHTgo6NOlqnw6JOiGgYQaR/pKLM+PvChKs3dLxPWXZwGRQWrcIytnMTQhq6KU7P3XymTo0uG/9+qEt9F8KaKhwsP/jgg4wePZpFixaxfv16Vq9ezbvvvku7du1Yvbqc/C4hhE0wnaBTd4Llqxl5/LQ3QVF2762hBPr8M6p8bhPciFOc13X7V5ltpuenszFuo6JsXKtxdXr9aWGZca3GKY53XtzJ1ZxyVrEwSsUg+xqcLFpNw8nM6PK+uBtEny9jHWebY5yGIcGysE8VCpYXLVrETz/9hIODAxMnTuSzzz7j3XffpUePHuTl5fHII4+Qnp5e3X0VQlSV8R+7OpSGsWDHefILS4J9JwcVTw64abWdvQsU9dPcwtCHdC+zzT/O/0GBrsBw7Kh2lFUw7NTw8OG4OZaMBGv12vIn+gXcgi6sl7Jsb8mnG3d3CSHYR5n7/tnms1Xua40x/v0gOcvCTlXoL+WyZctQq9X8+eefLFq0iKlTp/Laa68RFRXFo48+SmZmJr/++mv5DQkhalcdXTruelY+P+xRjiqP7xpCkwb/BDcpsXBOOUJ8odHQMicw6vV6k4/aB4cNpqFrQ+t0WtQpHk4ejAgfoSj79eyv6MrJ09V1e1xZkBgNl48C4Oyo5t9Go8t/n09h74U6Mrps8vtBRpaFfapQsHz06FF69OjB4MGDTc69/vrr6PV6jh49avXO5ebm8tZbbxEREYGrqyvBwcFMmjSJpKSk8i/+R1paGj/++CP3338/4eHhODs74+XlxW233cbcuXNlQxVhX4xzDutIusHCnRfI1WgNxw5qFf/uf1MQsm+Ror7etQFJDXuU2eaJlBOcuXFGUWb8UbywL8Zf/8TMRPZf3l/mNfqIkeQ6Gb3B2rfQ8L8TuoUQ6K0cXf58S90YXTZOR5J1loW9qlCwnJGRUermIsXlGRkZ1usVkJeXx6BBg3jnnXfIyspizJgxhIaGsmTJEjp37sz58+cr1M7HH3/Mgw8+yC+//ELDhg0ZN24c3bt35/Dhwzz33HMMGjSInJwcq/ZdCFul19W9dZZvZBfw3d9xirKxnZsQVrw0V0E2HPpecV7X6UG0ahfKYjyqHOwRTI+gsgNsUb9F+kfSsoFyJLjciX4OTsT7DVCWHfkf5KYB4OLowJP9mytO7zx7nQPxN6rY2xpg/PtBcpaFnarQX0q9Xo+Dg4P5Bv7JabL2esqzZs0iOjqanj17cubMGX755Rf27NnDJ598wrVr15g0qewduYp5eHjw8ssvExcXx8GDB/n555/ZvHkzR48eJSwsjF27djFr1iyr9l0Im1UHl45bHHWB7IKSUWW1CuXEqSP/g/yb50yo0HV5rMw2czQ5rLuwTlE2ttVY1HXgzYOoPiqVymR0eVP8JtLzy56TE+8/AL3asaRAkwOHfzIc3tc9jEZeyjdvdWJ02WTpOAmWhX2yyb8MBQUFzJs3D4D58+fj6VmyluW0adOIjIxk+/btHDhwoNy2XnvtNT744APCwsIU5a1ateL9998H4KeffjJ3qRD1UN1Kw0jP1bA0Kk5RdmfHYMKLtxPW6xUTqgCIuL1o174yrLuwjmxNtuFYrVKbrLUr7NOo5qNwUjsZjgt0Baw6t6rMa/KcGqJvfYeycO83hjenrk4OTOmnHF3edvoahxPTrNHlamOytKSssyzsVIWD5WXLluHg4GD2n0qlKvW8o6Nj+Y0biYqKIj09nRYtWtC5c2eT8+PHjwdgzZo1lW77Zh07Fq2leunSpSq1I0RdYZxzaOvrLC+JukBmfslOaioVyq2E43fD1ePKi7qXvVycXq/nl9O/KMp6B/cm0COwyv0VdV9D14YMCRuiKPvf6f+VP9Gvq9GnnamxRcsZ/uPB25ri7+msqGL7o8vGOcsysizsU4WDZb1eb9E/S9IzDh8+DECXLl3Mni8uP3LkSKXbvllx3nNgoPyRFHbCZLtrm/xwCYD0HA2Ldik3PRrZIYiWAV4lBUbLxeHbApoPKrPdw9cOcyr1lKLsvjb3lVJb2KN729yrOE7ITODvS3+XeY0+rBcEtFMWRn9h+F83Zwf+1Vc5urzp5FWOJdnwsquShiEEABUa9rV2PnJ5EhKKlogKCQkxe764PD4+vkr3mTt3LgBjxowpt25+fj75+fmG4+IJjRqNRlbUqKDi5yTPq3Ks+dy02kLFsU6vt9mvx4Id58jMU44qP9WvWUl/My7heHKNYuxL23USOq22zGf244kfFcchniHcFnCbzT6HmiQ/o0U6NOxAywYtOZd2zlD248kf6R5gum634ZkVFqK69Qkc//i/kpPnt6JJOgIBtwBwb9dgvtoey42ckuc7d9MZvnigU/W8kCoy/suvLSy02veGfK9Vnjwzy1jjeVU+R6IGZGVlAeDu7m72vIdHUb5iZmamxff46quv2LRpEw0aNODVV18tt/7s2bOZOXOmSfnWrVtL7acwb+PGjeVXEias8dz8zvx/e/cdHlW1NXD4Ny0z6SEFkkDooRfpICAoRRQRQawoRbFcC/rhvSpiAdGLimIBFEWpKna5gkhTQXqHUAMJEBJIAglJSM+U8/0RSHJmUkgjE2a9z5PHzDqFnWNyZs0+a+99nIAirxMSE9m3alWJ+9eUTDN8uU9H0cfAN/jbiNqziSvpS5uz3xOuFA78s2jdWJPgj6XIz2N/zTJsGay9tFYVa2dpx+o/Vlf5z1Cbyd8otMltQxSFyfKms5v4ZsU31NEVPw/3unXr0No8Gaz3xmgpfG86+8urHGhYOBdz70ANK88UDphfd/Q8839cRX3PavghKqleXCy+RV6fPnmSXVV8v5DftfKTa1Y+VTHjmVMmy9Vt06ZNPPfcc2g0GhYsWEBoaGiZx0yePJlJkyYVvL506RJhYWHcfPPNBAQElHKkuMJsNrNu3ToGDRqEwWAo+wABVO11S46KJuXPvwpeh9QPpdPtt1e2iVVu1roT5FoLSzA0Gnj7wT6E17082DcvA/0nz6iO0XR6mMFD7gFKvmZfHvoSa0Rhgm3UGXlp6Ev4Gn0R8jdaVH9zf/5a/hcZ5vzOGwWF5LBkRt8wWrWf/TXTeh+Dze8XbG+Uup36D38OnoEA9M2xsGnWP6RlFz41OWitz2O3d7wGP1X5nN+5i0u7CueZbty4Ed2q6H4hv2vlJ9esYpKTkyt9DqdMlq/MflHSp4HMzPxR7N7e3sVuL82hQ4cYPnw4eXl5fPLJJ4wYMeKqjjMajRiNjvO2GgwG+aUtJ7lmFVMV101rV4Oo1eqc7v/Fxcw8lmxXr9Z3Z8dQ2tQv0qO39wfILTq3uwbdjU+js/tZil4zi83Cz1HqOXNva3IbgV6BVdr+64H8jYKvwZfhzYfzzdFvCmLLo5fzTOdnMOpKeS/o8Ths/Rhs+Y9+NdZcDAe+hn7/AcDfYODRPk2Zta5wQZzVRxI5dTGHFvXK/55WnbR2A/S1aKr890J+18pPrln5VMW1csrRPVemeYuLiyt2+5V4o0aNynXeU6dOMXjwYFJSUpg6dSrPPvts5RoqRG1TC+ZZ/uKfkw7zKk8cEF64g82qGjgFQMvbIKD4hZOu2Bi7kcSsRFVMBvaJ0tzXUj3QLzU3lTWn15R+kHc9aD9KHds1Hyx5BS/H3tgYb1NhIqooMOevKJyOw2rXMnWccE3O905J4ZRue/fuLXb7lXiHDh2u+pzx8fEMGjSI+Ph4nnvuOd54443KN1SI2kZx7qnjkjJyWbz1tCp21w31aRZUONc6kasgRb0PvZ4u89zLItXzqXcI7EDbgLYl7C0ENPFt4rCq47KjVzEvf89/qV9nJMLhXwpe+robGN+7iWqXFRHniDqfUeG2VgeN3YdpRWbDEC7KKZPl3r174+vrS3R0NPv373fY/tNPPwEwbNiwqzpfSkoKt956K9HR0YwfP54PP/ywKpsrRK3h+GbnXMnyF/+cJNtc2Kus02p4tmivMsA2u17lkI7QqHep5428GMmO+B2qmPQqi6th/3tyKPkQ+8/vL/2gkI7QqI86tm2uauq1R3o3xsuo7l3+9G8n612W5a6FAJw0WXZzc+OZZ/IH7zz99NMFNcoAs2bNIiIign79+tGlS5eC+Jw5c2jVqhWTJ09WnSsrK4uhQ4dy8OBB7r33XubPn4/GyVctE6La2L/ZOVEZxvn0HJZsO62KjehUv3C1PoCze+DMVvWBvZ4pcyXCpUeWql77m/wZ3HhwZZorXES/Bv0I8QxRxZYcWVL2gfa9ywkRcKZwrmY/DzfG3qguJVy+/yynkzJxGvZ/V9d4GlkhnIVTDvADePXVV1m/fj1bt24lPDycvn37EhMTw44dOwgKCmLBggWq/ZOSkoiMjCQ+Pl4VnzJlCtu2bStYTfDRRx+lOIsWLaquH0UI5+FQs+w8Hxw/33iSHHNh+3RaDc8WXa0PHHuVvUOhzV2lnvdC1gV+P/W7KnZ/y/uLHaQlhD29Vs/o1qN5f3fhDBd/nvmT2PRYwrzDSj6w5W3g1whSi6wHsG0uNLqx4OWjfZqycMtpsi7X6NsUmPt3FDPvcZKZMRzuD9KzLFyT83Qr2TGZTPz999+89tpreHh4sHz5cmJiYhg3bhx79+6ladOmZZ+E/BIMAKvVyrfffsvixYuL/RLCNdiv4OccyXLipRy+3q5eZGhU5wY0CijSq5wWB4d/VR/Y/THQq5cQtrfs2DIstsJputy0bg4rtAlRmrvD78bTUPi7aFNsfH3k69IP0uqgx5Pq2LHfIamw1MLf042He6l7l3/Zd5bYi5WfF7YqaOzKMBTpWRYuymmTZQB3d3fefPNNoqKiyM3NJT4+noULFxa7st/UqVNRFMWhh3jRokVXtSy3EK5AsSvDsH8zrCmfbYgm11L4RqzXanjGvld5x+dQZBESDB7QZVyp5822ZPPD8R9UsWHNhuFv8q9sk4UL8XLz4u7wu1WxX6N+JS23jKWqOz0ERp8iAQW2fqLa5bG+TTEZCv8OrTaFuc5Su+xQhiHvlcI1Occ7pRDi2nDCqePi07L5dqd6XuV7uoYR5l9kZcycS7DH7gnQDaPBo/Skd8XJFQ4JzZg2YyrVXuGaRrcejU5TuPJetiWbn47/VPpBJh/oOl4dO7AM0hMKXgZ6GXmoh7p3+cc9cc5Ru2x/f5COJeGiav6dUghx7di/2TlBFcanf0eTV6RX2aArpld5z0JQJb0axwFUdmyKjW+OfaOK9a3fl6Z+V1fCJURRoV6hDGo0SBX79ui3mK3m0g/s+RToipQKWfNg+2eqXR6/qSlGvbp3+cP1x6lpDlNLyjzLwkVJsiyEC1Ec5lmu2VtA7MUsvtul7lW+r1sY9f3cCwPmnPyBUUW1GlrmIiSRlkhiM2JVsTFtpVdZVJz9U4nz2edZe2Zt6Qd5B0NHu2kKdy+AnMIPf3V9TIy7sbFql98OnONYwiVqlF0Zhn0ZlxCuQpJlIVyJ/ZtdDdcsf7T+BGZrYZvcdFqevtmuV/nAt/mLOhTVZ1Kp51UUhU05m1SxlnVa0iO4R6XaK1xb+6D2dK7bWRVbfHRx2eNebnwO1WOc3Euwe6Fqlyf7NXOYd/mDtTXcu2x/f5AyDOGiJFkWwpU4lGHUXB3GicR0ft2nXtJ+dM+GhPgW6VW2WmDLx+oDm9wEDbpQmj3n93DGqu6xHtN2jMyxLirN/ulEVGoUkZbI0g8KbA6t71DHtn8GltyCl3U83Xisr7pEaN2RRPadSalUeyvFvgxDZsMQLkqSZSFciX3NYQ3Os/z+2khVR7eHm86xV/nIcselrcvoVQZYcFg9D3uIZwi3NbmtYg0Vooibw26mqa86qd2Qs6Hs3uXe/6d+nZEAB75ThR7p05g6HgZV7P21ZSTi1cm+DENqloWLkmRZCBdiP09qTU0dtz82lTWH1aUVE/o0IdCryEIhigKbP1IfGNoJmvYv9dyHkg6xPWG7Kja+3XgMWkMJRwhx9bQaLRPaT1DF4qxx7ErcVfqBDbpA477q2NZPwFY4HaK3ycBT/dUfGLdEJbM1KqlSba4oh/uDVGEIFyXJshCuxP7NrobKEt5fo+4t8/MwMOEmu1kqTqyDxIPqWJ9JZbZ5fsR81esAUwAjmo+ocFuFsDekyRDqe9VXxeyfZhSr9/Pq18lR+QuVFPFwr0bU81GvLjlzbWTNrAdgPwBYyjCEi5JkWQhX4gTzLG+NSmKzXU/ZU/2b4WOy6/ndPEv9OiAcWtnVfdo5kXKCv2L/UsXGtB2DSW+qcHuFsGfQGnik3SOq2M7EnURciCj9wOYDoF57dWzzLNVYApNBx8QB4apd9p1J5c+j5yvV5gpxWO1akmXhmiRZFsKVOEwdd217lhVF4V27XuVgHxNjejVW7xizDc5sU8f6PF9mcv/lwS9Vr70N3tzXUpa2FlVvePPhBLkHqWL2TzUcaDTQ+zl17Nw+iPpTFbq3axgNiy7Kw+Ua/2s8dZv91JKy2q1wVZIsC+FCHN7srnEZxtojiRyITVXFJg4Ix2TQqXfc/KH6tU99aH9vqeeOvRTL6tOrVbH7W96Pp8Gzos0VokRGnZGxbceqYhviNhB5sYwBeW1HQJ0m6tjGd1S9ywadlkmDWqh2OZaQzoqIc5Vqc7nZ1yzLPMvCRUmyLIQrqcF5lq02xaFWuXGAB/d0baDeMf4AnFijjt34LOjdKM28iHnYivScu+HGAy0fqFSbhSjNPS3uwdfNVxWbd2Be6Qfp9HDTv9WxuF0QrS4fGtYxlBb1vFSxD9cdx2y9hqUQ9h+mpWZZuChJloVwJQ41y9euZ3n5vrOcOJ+hiv3foBYYdHa3oQ3vql+7+0Pn0lfeO5V2ipUnV6pi3Y3d8TP6VbS5QpTJw+DBgy0fVMXWn1nPkeQjpR/Y4T7wa6SObXxX1bus02p4YXBL1S6nk7P4bqd6/vBq5bDctfQsC9ckybIQLqVmyjDyLDY+XK9ejax1iA/DOoSqd4w/AJHq2QG48VlwK72U4rP9n6l6ld317vQ19i3lCCGqxv0t78dd466Kzd0/t4S9L9MZoO8L6ljsDji1URUa3KYeHcP8VLGP1p8gI9dS0eaWi2PNsvQsC9ckybIQLkSxK8O4VvMsL90eQ1xKtir2n1tboLXvuSquV7n7Y6We+0TKCcda5Rb346mVWmVR/bzdvOlj7KOK/RP3DwcuHCj9wI4PgG9DdWyDundZo9Ew+bZWql2SM/P4YmN0pdp89ezLMKRnWbgmSZaFcCU1MHVcWraZ2X+dUMW6NqrDzS3rqncsqVfZ6F3q+T/d/ylKkR5zL4MXY1qXXrYhRFXqaexJHWMdVWzuvjJ6l/Vu0NduNcozW+H0JvW5mwYwsLX6b2X+plMkXsqpcHuvmv39QcowhIuSZFkIV+IwG0b1/5OfbogiNcusik2+vRUa+xKQje+pX19Fr/LR5KOsP7NeFRvTZgy+Rt8SjhCi6hk1Rsa3Ga+KbYvfxp7EPaUfeMNo8LEb4Gr/dAV4aUgrVflwttnKR3ZlTdXBYWpJKcMQLkqSZSFciH3NoX1NYlWLS8li4ZbTqtht7YLp0shfvWP8ATimHqDHjc+U2atsXxvq4+bDQ20eqmhzhaiwUeGjHOZdnrNvTulzE+vdoO//qWMxm+H0ZlUovJ4393ULU8W+3xXLicT0SrW5THYfaO3LuIRwFZIsC+FKrvHUcbPWHifPUpig67UaXhzSynHHDe+oX7vXge6Pl3ruvYl72RinHhA1vt14vN1KT7CFqA4mvYkJ7SeoYrsTd7Pl3JbSD+z0cP484kVteMfhKdD/DWyBe5H5yG0KvLv6WKXaXCb7+4OUYQgXJcmyEK7kGi5KcuhsGr/uP6uKje7RkCaBdgPvYndB5Cp1rIxaZUVR+GDPB6qYv8mfB1s9WMIRQlS/US1GEewZrIrN2jMLq81a8kF6I/Sx610+vQlO/q0K1fUx8dhNTVWx9UfPs/1kcqXaXCr7MgyZZ1m4KEmWhXAl12ieZUVRmPHHUVVu7m3UM3FAuP2O8Oc0dcwjoMxe5XUx64i4EKGKPd7hcTwMHiUcIUT1c9O58fQNT6tiJ1JO8Fv0b6UfWFzv8p9vOny4ffympgR6qRfnmbHqaPUtQ21fhiE1y8JFSbIshAu5VjXLG45fYEuUusfryf7NCPAyqnc8ucFh9D99Xyi1V9lsNfPx3o9VsYbeDbm3RenLYQtxLQxrOozwOuoPhXP2zyHbkl3CEYDBBP1eUsfO7YOjK1QhL6Oe5waql8E+EJfG7wfjK9XmkjjcH6QKQ7goSZaFcCUOb3ZV37NstSm8s0pdSxnsY+KR3k3s2qLk954V5VMfuj5a6vl/OP4DZ9LVq5g91/k5DDpDhdssRFXRaXW80EW94Mj5rPN8feTr0g+8YTQENFfH/noL7Eo47u8WRlO7Uqb3VkeqxgZUGfuaZSnDEC5KkmUhXMk1mGf5+12xRNqN0n9hcAvc3XTqHY+ugHN71bF+L+X3spUgPS+dzw98rop1COrAoEaDKtVmIapS7/q96RXSSxX76tBXXMy5WPJBOj3cPEUdS4qEiO9VIYNO6zBI9szFLBZvPV2ZJhfP/rO0JMvCRUmyLIQrcSjDqNqe5Us5Zj5YG6mKtQr2ZmRnu7lkbdb8XrOiAprn966VYsGhBaTkpqhiL3R5wXHOZiFq2KSuk9AUyTYzzZnMOzCv9IPa3AXB7dWxv2eAJVcVurVtPbo0Ui+C8smfJ0jKUO9XWQ7LXUsdhnBRkiwL4UIcBgJVcZI5+88TJGfmqWKv3dEGnX1SfmBZfq9ZUTdPye9dK0FseixLDi9RxW4Ju4XO9TpXqs1CVIdW/q0Y1myYKvZD5A+cSDlRwhHkP+kZ8IY6lnYGdi9UhTQaDa/d0UYVS8+18MHaKl6oxKEMQ5Jl4ZokWRbClVTjPMunkjJZZPcoeFCbevRuHqjeMS/LsVc5uEN+r1opZu6aSZ6tMBHXaXQ81+W5SrRYiOr1zA3PYNQVDmq1Klbe3flu6bNXNB8IDW9Uxza+C9mpqtANYX6M7KSeQeP7XWc4cu5SZZtdyP7DtJRhCBclybIQrqQap457+/cjmK2FSYBBp2HK7a0dd9w2B9LtRu8PeKPU+uktZ7fwd6x63tkHWj1AU9+mJRwhRM0L8QphfDv1Mtg7EnY4LNGuotHAQLve5eyLsOkDh11fHNLKYaGSN1cerrqp5ByWu5aeZeGaJFkWwpVUUxnGphMXWH/0vCr2SO8mNLZfgCQ9ETZ/pI41uwXCB5Z4brPVzDs71Sv8+Zv8eeqGpyrTZCGuiUfaPUKIZ4gqNnPXzNKnkmvYE1rfqY7tmAcpMapQsK+Jp/o3U8W2n7zImsMJlWrzFQ41y5IsCxclybIQLsT+za4q5lm2WG1MX3lEFQv0cuOZW5o77rzhv2DOLNoCGDS91PN/e+xbTl86rYo93/l5WdZa1Aruenf+3fXfqlh8ZjwLDy0s4YjLBk4FbZEafmue41SLwGM3NaW+n7sq9vaqo+SYS1k18GpJGYYQgCTLQrgW+ze7KqhZ/nbnGY4nZqhi/x7cEm+T3bzHiUdgr3qAHp1GQ3C7Es+dlJ3EZwc+U8XaBbRjePPhlWqzENfSoEaD6B7cXRVbcGgBZzPOlnAEENAMuj2mjh36CeL2qEImg47Jt6unkou9mM2CLacq1WbA8f4gPcvCRUmyLIQrcSjDqNzpUjLzmLVOPQK/TYgP93QNc9x53evqqesMHo7zytqZuWsmmaqeaJjcYzLaKhyYKER102g0vNT9JXSawvriXGsuM3bMKL20od+LYPJVx9ZOcfg7Hto+hO6N/VWxOX9FkXgpp3LtdqhZlp5l4ZrkHUcIF1LVy13PXBtJapZZFXtjWDFTxZ1YD1Hr1LEbnwWf0BLPveXsFladWqWKDW82nA5BHSrVZiFqQos6Lbiv5X2q2Ma4jaUP9vPwh5v+o46d2QZHlqtCGo2G14e1UVVNZOVZefv3o5VrtH3NskwdJ1yUJMtCuJIqnDruQGwqy3aql52+vX0wPZoGqHe05MHql9Qxr3pw48QSz51tyWb6dnUts7ebN893eb7C7RWipj3d6WkCTOq/jxk7ZpCel17CEUD3x8GvkTq25lXIUz9xaVffl3u7qJ/o/HbgHFujkirRYpkNQwiQZFkI11JFs2FYbQqvLj+kOp2Hm45Xh7Zx3HnHZ5AcpY7d8hoYvUo8/7wD8xzqOV/o8gKB7oElHCGE8/Nx8+Hl7i+rYheyL/Dx3o9LPkhvhMF2g2AvxcHmDx12fXFIS3zd1WMFXv/tMHmWCpZP2D8hkgF+wkVJsiyEK6mieZaX7TzDwbNpqtjEAeGE2o3K51I8bHxPHavfpdRlrSMvRrL48GJVrHPdzowIH1GhtgrhTG5tfCt96vdRxX6I/IH95/eXfFDrO6HJTerYlk/gonoQX4CXkf/c2lIVizqfUeHBfo5Tx0myLFyTJMtCuJCqmDouOSOXmWvUS1U3r+vFI72bOO68/g3IU8+UwW0zS1yAxGqz8ua2N7EqhdNe6bV63uj1hgzqE9cFjUbDqz1fxV1f+MFSQWHatmmYreaSDoLb3oMiAwSx5sIaxwGyD3RvSIcG6kGBn/x5gnOppczrXHJj1a+lCkO4KHn3EcKVVEEZxjt/HCMtW/2m/ubwtrjp7W4nZ7ZDxPfqWKeHoEGXEs/99dGviUiKUMUmtJ9AUz9ZqU9cP+p71eepjupFdaJSo/jy4JclH1S3NfR4Qh2L/B2i1AMEdVoN04e3cxjs99bv6rnQr4r9B1QpwxAuSpJlIVxJJedZ3hNzkR/3xKlid3YM5cZmdrXEVgusshvFb/TJX9a6BCfTTjJ732xVrLFPYya0n1CuNgpRG4xuM5qWddQlE19EfMHR5FJmsOj3EnjY/a398RJYclWhjmF+3N+toSq26mAC/xy/UL5GSs2yEIAky0K4Foep466+Z9litfHq8sOqmJdRz5ShrR133vk5JKh7iOk/GbzqFntuq83Ka5tfI9da+KavQcMbvd7AqDNedRuFqC0MWgPTek9Tzb1sUSxM2TKl5HIMd7/8lf2KSo4qfrDfrS2p46Ee7PfGb4fLtbKfxu7JkyJ1GMJFSbIshAtxWAChHGUYX20+xdH4S6rY8wPDqedjUu+YGgt/va2OBbWG7narkRWx+Mhih/KL0a1H0zW461W3T4japm1AW4cnJydSTjAvYl7JB90wGurb/V1s+gCSTqhCdTzdeGmIemW/U0mZfPq33cw0pbEfWyDzLAsXJcmyEK6kgvMsn07KdFipr2U9b8be2Fi9o6Lkl1/YrbrHsI9AZ7f89WVRKVHM2TdHFWvk04iJnUueh1mI68UTHZ6gRZ0WqthXB7/iUNKh4g/QauGOD+0G++XByv9zGJNwb9cwOjX0U8U+3RDNsQT1h94SSc2yEIAky0K4lgpMHacoClOWHyS3yFytGg3MuLs9Bp3dLeToCjj+hzrWZRw07Fnsuc1Wc/5jZ1vhY2cNGt7q/ZZqtgAhrlcGnYG3+7yNXqMviFkVK1M2TyHHUsJy1SEdoJd6gCCnN8GBZaqQVqvhvyPaoy/yd26xKbz880GsV9NLbH97kEVJhIuSZFkIV1KBMoyf9sSxJSpZFRvbqzGdG9ZR75hzCf54UR3zDHKssSxi9v7ZHElWj9If23YsN9S9ocx2CXG9aOXfisc7Pq6KnUw7yfu73y/5oP6TwVe9Yh9rpkCm+m+1dYgPT/RTzyazPzaVpdtOl9kux3mWJVkWrkmSZSFcSHnnWb6Qnstbv6tH54f6mvi33cIHAPw5DdLj1bEh74B7Hcd9gW3ntrHw0EJVrIlvE56+4elS2yTE9WhC+wm09lcPlv0+8nv+OvNX8Qe4ecLtdsl09kVY84rDrs/eEk6TQE9V7L01kZwta+5lKcMQApBkWQjXUs6p495cecRhTuXpd7XDy6hX73jqH9hlN0dss1ug3d3FnjclJ4Upm9ULKhi0Bt7t+y4mvanYY4S4nhm0Bt696V2H8qPXt75OYmZi8Qe1HAJthqtjEd9BpLoUymTQMWNke1UsK8/Kq78eLL232GFREulZFq5JkmUhXIlDGUbJu/55NJEVB86pYsM6hjKgdT31jrkZ8L9n1DG9Owz9oNgyD0VReH3L61zIVs/5+nzn52kdUMw0dEK4iCa+TXip20uqWFpuGlM2T8FqK2HKtyHvglG9Yh8rnofsFFWoZ9MAHuiuLtv4O/ICKyLsngYV4TC1pPQsCxclybIQLkRxmGe5+FvApRwzry5Xj8b3dTfw+h1tHHf+cxqkxqhjA98A/+JX3Vt2bBkb4jaoYr1De/NQm4dKb7wQLmBk+EgGNRqkiu1I2MHCwwuLP8AnBIb8Vx3LSIDVjuUYL9/WmiBv9bzl0347THJGrsO+gMPUcVKzLFyVJMtCuBL7EfAlJMvTVxwhPk09Ev/VoY5vtJzeDDu/UMca9oLudsvyXhZxIYKZu2eqYv4mf97q8xbacq4mKMT1SKPJX4wn2DNYFZ+9bzY743cWf9ANo6H5QHXswLdwfI0q5Otu4M0726piyZl5vLr8UPGJsJRhCAE4ebKcnZ3N66+/TosWLTCZTISGhvLII49w9uzZcp1n48aNTJs2jaFDhxIUFIRGo6Fx48bV02ghnJnDm51jmcT6I4kOS1r3bh7AqC4N1DvmZcL/7Abj6d1h+Nxik/CUnBRe2PgCFptFFZ/eezqB7oEO+wvhqnyNvrzT9x3VB0ibYuM///yH81nnHQ/QaGDYJ/lLyhe14jmHcowh7YIZ3EZdSvXHoQR+syu5KjhvUVKGIVyU0ybLOTk53HLLLUyfPp2MjAyGDx9OWFgYCxcupFOnTpw8efKqz/Xcc88xdepUVq1aRVJSUjW2WggnV8Y8yymZebz8y0FVzMuo5927OzgsfcvqyZByWh0b8DoENHP4Z602K5M3TSYhM0EVH99uPDc1uKlcP4IQrqBLvS4OM8NczLnIvzf+WzUveQHf+nCrXTlGejz8/oLqQ7JGo+GtEe0clsJ+/X+HSbykfpokU8cJkc9pk+W33nqL7du306tXL44fP87333/Pjh07+OCDD7hw4QKPPPLIVZ9r8ODBvPXWW6xZs4bDhw9XY6uFcG5lTR332v8OkWRXv/j6HW1oUMdDfaKjK2HvYnUsrCf0KL784ouIL9hybosq1rVeVyZ2klX6hCjJhPYTHD5M7ju/j4/2fFT8AZ0ecizHOPQzRPygCtX1NvHWXerZMdKyzbz8c4T6HiFlGEIATpos5+XlMWdO/vK3c+fOxcvLq2DbpEmT6NChAxs3bmTPnj1Xdb733nuPKVOmMHjwYPz9/aulzULUCg5TxxW+Ga44cI6VdiPjb2lVl3u62pVfXIqH355VxwyecNenoNVh75+4f/jswGeqWKB7IDP7zUSv1TvsL4TIp9Vo+W+f/1Lfq74qvuTIElafWu14wJVyDJOfOv77Cw5PgYZ2CGFYx1BV7O/IC/ywO7bI+WSeZSHASZPlLVu2kJaWRrNmzejUqZPD9lGjRgGwYsWKa900IWo3h6nj8m8B59NzeO1/jrNfvDOyvbr8wmaD/z2Vv/hBUbe9W2z5RXRqNC/+8yIKhf+uTqNj5k0zpU5ZiKvga/Tlg/4fYNCqyyZe3fIqh5OLeVLqWx+GfaSO5aXDL4+DVT1e4M072zoM2n1zxRFiL2blv5Cp44QAnDRZPnDgAACdO3cudvuVeERExDVrkxDXBZv91HEaFEVh8s8HSc1yXHykro/dAiE7P4douxXFWt+Z//jXTmpOKs/+9SyZ5kxV/PnOz9M1uGvFfwYhXEzbgLZM7jFZFcu15jLxr4lcyLpQzAEj8mfIKCp2B2yepQrV8XTj3bvV5RiZeVb+89MBrDbFYZyCFGEIV+WUz0DPnDkDQIMGDYrdfiUeExNT7PbqkJubS25uYS3npUuXADCbzZjNxQy2EA6uXCe5XuVTlddNsUuWLVYri7ac5M9j6hH2t7Wtx5DWgep/M34/+nWvq+bPULyCsQx5HyzqHiuzzcykDZOITY9VxW9rdBsPtniw2n8H5HetYuS6ld+1umbDGw/n8IXD/Bz1c0HsfNZ5Jv41kfkD52PU2U3rOPAt9Ke3oEk9XRBSNryDtUFPlIa9CmJ9m/lzT5f6/LincJap7Scv8tnfJxhXz64n2Warsp9TftfKT65ZxVTF9XLKZDkjIwMADw+PYrd7euavcZ+enn7N2jRjxgymTZvmEP/7779LbKco3rp162q6CbVSVVy3ZmYzRauKV2zYwdtpzSg6hZyXQaGP+1n++KPwzVNvyaR/5OsYrHmq820NHkvShu2qmKIorMxeya68Xap4fV19uqd2548/1EvxVif5XasYuW7ldy2uWQelA3t0ezhtPV0QO5R8iH/98i/u9rjboSe4Tt0x9El9Cy35Sa9GsWL+bgwbWk4nz1A4zVxXLax305GSV3j8rHXHca97lhuLnE+x2Vi1alWV/kzyu1Z+cs3KJysrq9LncMpk2RlNnjyZSZMmFby+dOkSYWFh3HzzzQQEBNRgy2oPs9nMunXrGDRoEAaDoewDBFC11y36zemqR6l/JXtj0anfYD+8vzP9WwQVBhQF3U9j0eapH/daez5D9wH/cfg3lh5dyo59O1SxQPdAvrr1K+p61K1U+6+W/K5VjFy38rvW16xPTh/GrBnD2czCD7P7zfvpHtadJzs86bC/sikP/nmn4LW7OYVbM3/Eev8PqgG5DTpc5OEFuwvWLbKhYU2ihypZ1igKt99+e5X8HPK7Vn5yzSomOTm50udwymT5yuwXJX0ayMzMr4H09va+Zm0yGo0YjUaHuMFgkF/acpJrVjFVct3sBvidTcuBIhPEPNqnCYPaqkfIs20uHLfrTQrrgW7QVHQ6dXv+OPUHH+77UBVz07rxyc2fUN9XPaL/WpDftYqR61Z+1+qa1TXUZfaA2Ty06iGyLIXvkV8c+oL6PvUZGT5SfUD/FyFuB5z8uyCkPbUR7baPof9LBbHe4fV49pZwPv7zREEsIV39JAlAr9c7zrleCfK7Vn5yzcqnKq6VUw7wa9iwIQBxcXHFbr8Sb9So0TVrkxDXBbtk2VbkTa9tqA8vDmmp3j92J6x7XR1z94dRC8EuUd6VsIspm6c4/JPTe0+nfVB7h7gQomLC64Tz7k3vOiwR/+a2N9kUt0m9s1YHI+eDd4g6vmEGRP+tCj17S3O6Na5T8NpW3BL0MiOGcEFOmSx37NgRgL179xa7/Uq8Q4cO16xNQlwX7N7olMu1yh5uOmY/0AmjvkhFc3oC/DAGVMtTa/LfeO16iaNSonju7+ccVhb7vy7/x+1Nq+axrRCiUP+w/rzS/RVVzKpYeWHjCxxOsptSzisIRi0ATdERCwr8/CikFA6U1+u0fHR/J3zdDVf2cCQLkwgX5JTJcu/evfH19SU6Opr9+/c7bP/pp58AGDZs2DVumRC1m/1sGMrlnuU3h7ejaVDh4j9YcuH7h/KXyy3qpn9DuHqFsLj0OJ5Y9wTpeeoBt/e3vJ/xbcdXXeOFECr3tbqPR9s9qoplW7L51/p/cTL1pHrnRjfmL0dfVFYyfDca8gqnd6zv514wnZxSXLmF9CwLF+SUybKbmxvPPPMMAE8//XRBjTLArFmziIiIoF+/fnTp0qUgPmfOHFq1asXkyZMdzieEyGe1qXuFFDTcdUMod3cu0lOsKPD7JIhTz2ZBk37QX/33lZiZyIS1EzifrZ567uawm3m5+8tVWtsohHA0sfNEhjYdqoql5Kbw2LrHiEu3K2W8cSK0tHvSk3gQlj+l6jEe0i6E0T0aFpss2yRZFi7IKZNlgFdffZUePXqwdetWwsPDue++++jZsycvvPACQUFBLFiwQLV/UlISkZGRxMfHO5zryy+/pGfPnvTs2ZOhQ/NvKvHx8QWxnj17lljyIcT14rcD5xx6her5ujP9rnbqpHbnfNj3tfpgv0ZwzyLV6PmUnBQeX/c4ZzPOqnbtGNSRd296F10xS18LIaqWVqNl+o3T6RHSQxU/n3Wex9Y+xvmsIh9ktVoY8TkE2o1NOLLcYcGS1+5oQ+NAT4d/77udZ6qq6ULUGk6bLJtMJv7++29ee+01PDw8WL58OTExMYwbN469e/fStGnTqz5XXFwcO3bsYMeOHQVJcV5eXkFsx44dBYuMCHE9OpGYzss/R6Cxqzd8ZWhrvE1FBuqd3ACrX1YfbPCA+78Fj8JpM9Lz0nli3ROcTFM/6m1RpwVzB8zFXe9e1T+CEKIEBp2Bj2/+mA6B6nE8cRlxPLb2MS7mFFme3uST//ds9FWf5M/pcKxw1huTQcebdzmOC5rx+2H2nkmp0vYL4eycNlkGcHd358033yQqKorc3Fzi4+NZuHBhsSv7TZ06FUVRWLRoUYnbSvvq379/9f9AQtSAjFwLT369h6w8K1q7ITvNgwsXJiDxCHz/MChW9QlGzIPgdgUvL+Vd4ol1T3D04lHVbo19GvP5oM/xtX8TFkJUO0+DJ58O/JQWdVqo4ifTTjJh7QR1whzY/PKAv6IpwOUBf2cLn7I2LjqO4TKrxcZTX+8lKSPXYZsQ1yunTpaFEJVjsyn8+4cDRF/ILHYUu0Z7+RaQngDf3gu5dk9YbvoPtBle8DItN43H1z7OwaSDqt1CPEOYP3g+ge6BVf4zCCGujq/Rl88HfU4jH/W0qidSTvDomkdJyk4qDIYPhAFvqE9gzoJv7yucIaOYmmUNkHAph2e+3YvFKvXLwjVIsizEdezjP0+w+nACgEOvMpD/ZpibkZ8op8Wqt7W+E/oXTk2VlpvGY2sf43CyelqqQPdAvhz8JcGewVXefiFE+QS6BzJ/0HxCPdWLC0WlRjkmzL2fg44Pqk+QeR6+uQeyU/JrnO1cKeXafvIi762JrPL2C+GMJFkW4jr1e0S8ajWuYudHVazw0yMQf0Adb9AdRn5R8GaZnJ3MhLUTHEovgtyDWHDrAhr6NKzq5gshKijEK4QFQxZQ30s9H/rJtJM8suYREjLzP0Cj0cCwj6HJTeoTJEXml2TZzZsOoCnyofuLf07y057iFw8T4noiybIQ16FDZ9N44cf9qpihmL92zV/T4cQadbBOE3hgGRjyB+mdzTjL2NVjOXbxmGq3uh51WThkIU18m1Rl04UQVaC+V30W3OqYMJ9KO8WYP8ZwOu10fkDvBvcuhaDW6hOc3oRm5SSH87rZ3Ucm/xLBrtMXHfYT4noiybIQ15kL6bk8vmQ3OWZ1PeEr9ktZA0T+rn7t7g8P/Qye+bXHUSlRjPljDDGXYlS71fOox8JbFzrURgohnEeoVyiLhiwizDtMFY/PjGfs6rEcST6SH3D3g9E/gpddKVX0Oodzvmx3HzFbFZ5YuofYi1lV2XQhnIoky0JcR7LyLDy6eBfn0nJU8Qe6h/FQj2JKJYqO39G75/coBzQDIOJCBOPWjFPP00p+j9XCIQul9EKIWiDYM5gFty5weAJ0Mecij6x5hJ3xO/MDfmEw+gdw8y7cqZg1hUbeEMojve3OlZnHo4t3kZ7jWLYhxPVAkmUhrhMWq41nv91HRFyaKt69sT/T7mznMMeyilYP9y2Fhj0BWBezjkfXPEparvpczf2as+S2JQ49VUII5xXsGcyiIYtoG9BWFc80Z/LE+idYEb0iPxDSER78DvSmy3sUN85BYcrQ1tzcMkgVPp6YwVPf7CXPIjNkiOuPJMtCXAcURWHaiiP8eUzdCxzm786nD3XGTa9FsRUzdZwGQJM/mC98EIqisODQAiZtmESOVd073TGoI4uGLKKuR91q/EmEENXB3+TPV7d+RY9g9Up/FpuFVza/wpx9c1AUBRr3gXsWg1Zf3MxxKDYFnVbDJw90okU99TzMm04k8fLPEfnnEeI6IsmyENeB+ZtOsnS7uq7Yz8PAovHdCfQy5gd2zi/mSAXu+BDa3Y3ZZmbqtql8uOdDh716h/bmi0FfyIIjQtRingZP5g6cy8CGAx22fR7xOS9teolcay60HAJ3zaPYOoytcwDwNhn4amw3AjzdVJt/2XdWppQT1x1JloWo5X7aE8d/V6lnqnDTa5k/pivNrqzAtW0urJ/qeHC/f0PX8aTlpvGvdf/ilxO/OOwyMnwkswfMxsPgUQ2tF0JcS0adkff7vc9DrR9y2PbHqT+YsObyan8d7oEhbzmeYOts2PAuAGH+Hnw1rhvuBp1ql882RLN46+nqaL4QNUKSZSFqsTWHE3jp5wiH+Kx7O9KtsX/+i80fwZpXii0/pNtjHE85zuhVo9mRsEO1SYOGSV0mMbXXVAxaQ9U3XghRI3RaHS91f4kpPaag1ajTgP0X9nP/yvs5nHQYTZdxxRytgQ3/hT+ng6JwQ5gfc0d3QqdV90JPXXGY3w6cq74fQohrSJJlIWqpLVFJPPvtPqx2tciv3N6KOzqE5i9CsvY1WJ+/pG1xufL62PWM/n20w9RwJp2JWf1nMb7deDTFFS4KIWq9+1vdz9wBc/E0eKri8ZnxPPzHwyw/+ZvDMQXlyJveh1X/BpuNW1rVY8aI9g77Tfp+P+uPJFZX84W4ZiRZFqIW2h+byuNLdpNnVY88f6JfUx6/qRlYLfDbs7D1k8KNimPS+9bO/zoM5At0D2ThkIUMbORY1yiEuL70qd+HJbctIcQzRBU328y8tfPt0g/e9SX8MgEsedzbLYxJg1qoNltsCk99u5fNJ5JKOIEQtYMky0LUMhFxqYz5ageZeVZV/IHuYbw8pBWYc+CncbBvaZnnss+f2wW0Y9nQZbQLbFeFLRZCOLMWdVrw7dBv6Vy3sype7JwW9jeNQz/Ddw9AbgbP3tKccTc2Vm3Os9h4bMludssqf6IWk2RZiFrkYFwaD325g0s5FlV8aPsQ3rqrPZqsZFhyJxxdoT5Qo4Mh7zqcr+ib4T0t7mHxbYsJ9gx22E8IcX0LdA/ky1u/5OE2DxfEinkYhTLkHbAfwxC1HhbdjiY9gdfvaMO9XRuoNmebrYxbuIs9MZIwi9pJkmUhaolDZy8x+svtDonyTS2C+PC+G9BdjIIvB0CseqAeOiNZo75kZmaUwzkVTf7o+Om9p/N6r9dx07k57COEcA0GrYEXu73IzH4zcde7YysmWX4/ZT/p9y0Guzpn4g/AlwPQnj/MjJEduKODuqwjI9fCmK92svOUJMyi9pFkWYha4EwGjFu82yFR7hseyBcPd8Etbit8ORBSTqsPdPPm0PAPuPf4AlafXOVw3mCvUJbetpS7mt9VfY0XQtQqQxoPYdnQZQ5LZANsit3IqIOfsPfOmeBeR73x0llYMARd9J98eN8NDGytXsAoM8/K2AU72RadXJ3NF6LKSbIshJPbceoic47oSMtWJ8p9mgcy/+EumPYtgCXDISdVtd3s04DP+j3Ow/tnEXMpprjlBVh42yJaB7SuvsYLIWqlZn7N+OaOZQ5xjQLnMs8xft97fNR7LLn+dgl1Xjp8ew+GbZ8w54FO3NJKnTBnm62MX7STzVGSMIvaQ5JlIZzYn0cTeWTJXnKt6lS3d/MA5j/YDtOqiZenb1In0odC23Ffk2Z8GvUjFiV/m6aY0Tq+JlmRTwhRPC83L+zXvL7yyqbY+Cr6F0bVD2VvWCf1gYoN1r+B6X8TmHdvKwa1qafanGO28fjXe9mXLNNSitpBkmUhnNT/9p/liaV7yLOop4fr0zyQr4aH4P71UNj/jWpbtkbDrGadGW3K4MSlU6ptxSXL9m+EQgihYp8s291HTmfEMk5/kf+GdyXT/n5y+FfcFg3m09v8uK2deuCw2aqw+LiWb3bGVkerhahSkiwL4WQURWH+Pyd57rv9WOwWHBnSNpiFvRIxfXUTnNun2rbNZGRU89YstCVhU9QJtk6j48GW9zv+Y1q5BQghSmF3j3io5YPotXpVTEFhmeU8I5q1YqO7u/r480cwzO/P7PbRDOsYanechqkrjvLx+hMoSrET1QnhFOSdUggnYrUpvPHbYd5eddRh2z0dA/nU/zsMPz6sqk8+p9cxqV49Hg+pxxlLhsNxLeu05Juh3zCm9cMO2yRZFkKUxn4FzxHN7+KHO36gfWB7h33jrZk8ExzE0yEhnNEXSajz0tH/+hgfeyzgke71HI77cP1xJv9yELPdIktCOAt5pxTCSWTlWXhi6W6WbItx2PZAUCzvpr6AdtcXBbFcDXzu58PwBvVZ52F0OMagNTCx00SW3bGMtgFtUWyOb0RShCGEKJVdsqzYFMLrhLP0tqX8u+u/MelMDof8YzJwV4NQPqnjS1aR47X7lvBa/NO83dPxXvTdrljGLthJWpa56n8GISpJkmUhnMDZ1GzumbeN9UfPq+JabHzTejtvZbyKNvEgkL+QyDoPd+6qH8KcOn7kFJPxdgzqyE/DfuKxDo9huLKAQHFPOaVnWQhRGvt7xOUSL51Wx9i2Y/nlzl/oHtzd4TCzBub7+TK8QQgrPT24kh5rLhxjdMQ4fm3zDwbUifHW6GRGfLaF00mZ1fGTCFFh8k4pRA3bcTKZO2dv5vC5S6p4C10Cu0Pfp/epT9Ap+W8qu01GHgqpx6R6QcQZDA7n8jP68UavN1g8ZDFN/ZqqNyrFPOKUZFkIURqHZFn9qTvMJ4wvB3/Jf/v8lwBTgMPhCXo9k+sGcn9oMFtNl3uhbRY6nZzHRu83aG84q9r/5IVM7vp0C5tOXKjSH0OIypB3SiFqiKIoLNl2mtFf7iA5M68grsXGv0xr+cP0Cv4X9wNwwmDgmXpBjA+pR4TJseRCq9Fyf8v7WTliJaNajEKn1Tn+g8WUYchsGEKI0jjcIYor59JoGNZsGCtHrGRMmzHoNXqHfY4a3XgipC6PBwdxxC3/g36o+TT/M0zh3x6/o6dw+svULDNjF+xk7t9RMvBPOAXH32ghRLXLzLXw2vJD/LJP3avSQRPNTPdFtLRFgxVOGvR87ufLak8PbCUktp3qduKVHq/Qyr9Vqf9mcW869oN3hBBCxa5nWbGVnLx6uXnxn27/YWT4SGbsmMGOhB0O+2xzd2dbfXcGZWbxREoaLc15PMM3DPXYzIvZY9ml5N/HbArMXBPJgdhUPri3I94mxydpQlwrkiwLcY0djb/EM9/uJfpCYV2eLxn8R/89D+r/QmtTiDIY+NzPhzWeHiglJLQNvRsysfNEBjcafHVJr32yLCUYQoiyONwnyu7pbebXjPmD57MhdgMf7f2Ik2knHfZZ5+nBOk8PbsnM4snUNFrnxfCj8U1+st7EDPMDJJO/YNLaI4kMm72ZTx7oRIcGfpX/eYSoAEmWhbhGFEVh2c5Ypq04TO7lhUY02Bil+4eX9csI0KRz2M2Nr/x8WOfpUeJ5/E3+/Kvjv7i7xd2Fg/euhv3jU+lVFkKUxf4+UVw5V7GHabi54c30bdCX36J/Y+6+uZzPPu+w31+eHvzl6UH/zCweTbvEqNx/GKTdzUzLfSyz3oIVHaeTs7j7s628eGsrHu3TBK1W7l3i2pJkWYhr4EJ6LpN/Ocj6o4kFsb7aCCbrl9FKG8MmdxOLfOuy291xGqYrPPWejGk7hrFtx+Jp8Cx/I6RnWQhRTvZPrYqbgrI0eq2ekeEjua3JbXxz9BsWHFpAel66w34bPD3Y4OlBx5xcxqVdYlrWQsbq1vKu5X7W2zpjtsLbq47yz4kLfHBPR+r6lHyvFKKqSbIsRDVbfSiBKb8eLBjE11Zzmpf0y+imP8TvXh687BPCKbeSe4i9DF501XbljTveINArsMLtsK81lHplIUSZHGbDqNhp3PXuTGg/gfta3se3R79lyZElXMq75LDfAZOR/zMFEWY283BaGh9nzOKQtSUzzA+yX2nOphNJDPrwH6bd2ZbhN4TKfUxcE9K1JEQ1uZiZx6Tv9/Pk13tIzsyjqeYcHxrm8pHnG2wNimNAWH2mBQaUmCh7G7x5suOTrBy+koHuA/E1+lauQfZTx8mbjBCiLPb3ieKmoCwHbzdvnuj4RP59zTQQX7fi72uxBgP/DfTnlob1WRuUyLte05lr+IgWmljSss08//1+Hl+6h/PpOZVqjxBXQ3qWhahiiqLw676zTF95hJQsM+GaOJ7U/4rJ+wA/+njxuntIqccHuQfxYOsHubflvfi4+WA2V9GKVlKGIYQoL/v64HKWYZTEy+BFf1N/3hj0Bv879T++Pvo1CZkJDvtlabX84OPNDz7edMyJ4dn0aSiX2jDPPJJ1R2DX6Yu8PKQV93YNk1pmUW0kWRaiCp1KyuS15YfYHJVEK81p/uX5Iym+p/nEy4OLutJLKJr7NWds27Hc3uR23HRuVd84uzc5eXwphCiLRmM3dVwVz3vsafBkbNuxPNj6QdaeXsviw4s5evFosfseMBk5YDLi6x/PbZkzGZkWxv8yR/HyL2Z+3BPH2yPa0SrYp0rbJwRIsixElUjLNjP7zxMs2XaS7rotjAlay0GfDGa7uQHeJR6nQcON9W/kodYP0Tu0d7UmsA7zo0qyLIQoi8NsGNWzSIhBa2Bo06Hc3uR2dibs5OujX/NP3D/Yiin7SNPp+M7HG3xSaZz3GWPS3YmJH8iwTy4y5samPHtLc/w8qqHDQbgsSZaFqASL1cayXbHMXb+NcMPPdG9whAh3DQc0GqDkm7W/0Y8R4Xdzd4u7CfMOu0atlTIMIUQ5OQzwq5oyjJJoNBp6hPSgR0gPEjIT+On4T/xy/Ccu5CQXu/9pNwOnAywQsJpu2b8TdaQlQ/bezYSbb+ThXo0w6otZzVSIcpJkWYgKUBSFXyOOsWTT55h1W8lpkMV+jYayxsx2CezIfW1GM6DhgOoptSiNfa2hJMtCiDI4PO26hstPB3sG80ynZ3ii4xNsiN3A90e/ZUfi7hL3P+iuA/coNMo7/HrInd/2deP+Xk/wQNcOUnYmKkWSZSGukqIonEo7xaI9v7Lt1G8k6i+iFFRYlHwjDtF5ckeLkdzRchRNfZtek7YWx35+VHnrEEKUqZLzLFcFg9bAoEaDGNRoEGcunWHl8Z9ZEfkjcRbH+ZoBFI2GMx45wCZmHNnEggN+9Gh4B2O7jiC8TrgkzqLcJFkWohQ5lhx2JuxkU+wG/opex3lrav6GMhbO81JgsH8H7uj8FF3q90KrcYJeXPsOIelZFkKUxaEM49r1LBenoU9Dnur6f/yry/Psj9/Fir1zWZ20j3RNye1KNKTyW/zX/Lbia/y13tzSdCD9Gt5C9+DueBhKXi1ViCskWRaiCKvNyrGUY+yM38mO+O3sTthFru3qpm4z2Wz01fkyuOXd9O/8FCaDezW3tpzsaw0lWRZClMV+OrYaTpav0Gg0dArtTqfQ7rxsyeWf/fNZe/Q7NlpSyCrl3nbRls5PUb/yU9SvGDQ6utTrSo/QnnQP7k6bgDbotZIWCUfyWyFcmqIoRKVGsTNhJzvid7A7YTfp5uIf7RXH02ajT67C4Ab96NtzEu6B4dXY2kqyf3wqjyKFEGXQUPNlGGVx0xsZ2PUZBnZ9htyUGLZs/4A1MX+yyQ3SdSUnzmbFyvaEHWxP2AGAp96DrsHd6BbcjR4hPWhRp4VzPBUUNU6SZeFSciw5HE4+zP7z+zlw4QAHLhzgYs7Fcp2jrsVC36xcOrg157a+z+Le6nbQOv+Ia/v5UaVuTwhRJvte2mqaOq6qGOs04pbbPuEWm42cE+tY889H7M+JZIuHkXh96SlPpiWLjXEb2Ri3EQBfoy83BN1Ax6COdAzqSLvAdlK24aIkWRbXLUVRiMuI41DSoYLkOPJiJBbFUq7zaBWFG3Jz6ZuVQ3BOCMHN76HjveMxeJe+yIjTsX+TkzIMIURZnKxm+apptZha3srwlrdyR2YKEeuWcO7odyQa49jsYWKfyYiljA6DtNw0VfKs0+hoUacFHYI6cEPdG2gf2J4w7zDpfXYBkiyL64LVZiUmPYajyUfzvy7mf6XnXX1JRVGN88x0z8mhW04u3pn1OO01kAY3PUyfrp3R1dYlVe1rlqVnWQhRFvvbRDXPs1wddJ516HTXc3S8cyL/7I3AsHEpjyT8RY7HOXaaTOx0NxLtVvZUnlbFWvDe8n3k90D+st2t/FvROqA1rf1b0yagDY19GqOrBU8bxdWTZFnUKoqikJyTTFRqFNGp0USlRhGVEkVkSiTZluwKnzfUbKF7Tg7ds3PonG3mpLkl65VubGlxG6Nu7sHoML+q+yFqin2PUG1N+oUQ14zDctdOWLN8tbRaDf27doSuHTly7hI/b9xF3pFVPM8uWhiOcMDdwI7LyXOsoYwpjy7LMGewO3E3u4vM/2zSmWhRpwXN6zSnuV9zmvk1o7lfc4Lcg6T8rZaSZFk4JZti43zWeWIuxXAy7WRBYhydGk1qbmqlzq1VFFrkmemYm0vHnFw65ebiYXZni60d662d+cz3Rob2bc3TXRpQ18dUNT+QE3CYZ1keHQohyuJQhlEzzahqbUJ9aPPAAC5m9uW7XWd4b2ckTVO2MUC3l6e1B7HoM9lnMnLAaGS/yY1INzesV5no5lhziEiKICIpQhX3dvMm3C+cZn7NaObXjKa+TWnk04hgz2Ap5XBykiyLGmNTbFzIusCZ9DPEXIrhzKUzBd/HpseSa82tkn/Hz2qlQ24eHXNyuSE3l3a5eehsOvbYWrDJ1oF5tvZE6Zpwa9tQ7u8WRs+mAWivx15X+55l6eEQQpTFYQW/2tuzXBx/Tzee6t+cf/Vrxu6YXvy4O5bXIs4SlnWaPjkH6as9yCTtMWxaC4eNbuw3GjlgdCPCZOSirnylFul56ew9v5e95/eq4m5aN8K8w2jo05CG3g1p6NOQRj6NaOTTiLoedSWRdgKSLItqY7aZScxMJD4znnMZ54i7FMfOrJ389udvxGfFk5CZgPkq5zC+WnUtFlrnmWmdm0frvDza5OZRz2olTzGwX2nGTltL5tpasdPWErPWnb7hgTzSIZRBbevhY7q6x261lkMZhtyAhRBlsO84qMVlGKXRaDR0a+xPt8b+TL2zLX8cTGDVwW4sPpGExpxDV20k3c2RdMs8xmhtFCZNHhd0Oo66GThidOOImxtHjW4kljHjRnHybHlEp0UTnRbtsE2v1RPsEUyoVyj1POqRkZ2BOdpMmG8YoZ6hBHsGY9Bd5+9dTkCSZVFuNsVGam4qF7IucCH7QsF/z2edV3+ffQFbcb0QiZVvg8lmo4nZQvM8M83NeTTPM9MmL49Aa/6/d0Hx4YCtFYtsLdlpa8UhpQl5GHDTaenRzJ832odwa9tg6niWPajjeuFYhiE9y0KI0jnULNeW2TAqwcNNz91dGnB3lwZcyjGz/kgiqw6GMfd4R/IsNgxYaKs5TTftMbrlRXJ3VjT/0iQBkKzVcszoxgmDgSi3/K+TBgPZFeycsNgsxGXEEZcRVxD7e8ffBd9r0BDkHkRdj7oEeQQR5B5EkMfl15e/D3IPoo6pjvRQV4JTJ8vZ2dnMmDGD7777jjNnzuDv78+QIUOYPn069evXL9e5UlJSmDp1KsuXLychIYHg4GBGjBjB1KlT8fPzq54foJZQFIVsSzYXcy6SmpvKxZyLpOSk5H/lphR8fzHnYn5ynH0Bi618069VlLvNRkOzhWZm8+XEOP+/oRYLVx6AXVS8OGhrwTKlKQdtTYiwNSUBf64M4w70cmN4y7oMaF2XPuFBeBmd+te++thPHSfJshCiLPb3CSefZ7mq+ZgMjOzcgJGdG5CRa2FLVBIbIs+zIdKL+WnNmW+9A4C6pNBBe5L22pN0MJ/kDu0pAjX5c/jbgHN6HdEFCbQb0QYDMQZ9qasNXg0FhfPZ5zmffR6SS95Pr9ET4B5AXY+6+Jv8qWOqQx1jnfz/murgb/LHz+hX8L2H3kM6VIpw2qwhJyeHW265he3btxMSEsLw4cM5ffo0CxcuZOXKlWzfvp2mTZte1bmSkpLo1asXUVFRNG3alLvuuovDhw/z8ccf88cff7Bt2zb8/f2r+SeqfnnWPC7lXeJS3iXS89JJz0vnUu7l783p+dtyC7el5qYWJMNVVR9cEe42G2FmCw0tFhqazTS6/H0js5lAq61g5iKLouWkEsIBJYwfbQ2IVBpwWGlCnBJI0fmN3A06bmriz43NArixWQDtQn2vzxrk8pIyDCFEeTkM8Ls+yzCuhpdRz61tg7m1bTCKohCZmM6GyAtsP5nM7tN61ufWYb2ty+W9FUJJpp32FOGas7S0xtIiL45eWedw06Rf3iO/J/rM5cT5jEFPjMHAGX3+9xXtjS6ORbGQmJVIYtbVPdp107rhZ/IrSKK93bzxcfPBx80HbzfvgtdFv/cx5r826oxV1m5n4bTJ8ltvvcX27dvp1asXa9euxcvLC4BZs2bxwgsv8Mgjj7Bhw4arOtfzzz9PVFQUI0eO5Pvvv0d/uaZo4sSJzJ49m0mTJrFo0aJq+kmKZ1Ns5FhyyLXmkmPJIcuSRaY5s/C/5qz8ryLxK7FMS/72THMm2ZZsMswZpOel12jCWxoPm41Qi4UQi/Xyfy2EWqwF/w2yWlVTeWYoJmKUYHYowZxSQjhua8BxpQGnlBDycKzN8vd044YwPzqF+dGzWQAdG/jhppdE0IH9m5x8gBBClMGhd9EFyjCuhkajoVWwD62CfXiyXzMsVhtH49PZcSqZnacusvP0Rc5lBXLOFshauoE1/zg9FhprEmipiaOFNpammngaWxIYmJOItyat4PwKkKzTck6v55xeT7xed/m/es7qdcTr9WRWY4dHni2P81nnOZ91vtzHumnd8DH64GnwxEPvgYfBo+B7T4Mn7nr3/NcGDzz1+f+9sl/RmLveHaPOiElvqvESEqdMlvPy8pgzZw4Ac+fOLUiUASZNmsTixYvZuHEje/bsoUuXLiWdBoD4+HiWLVuGm5sbn376aUGiDDBz5ky+++47vv76a9577z3q1q1b7rZ+efBLdJ46cqz5iW+uJbfg+yvJcNHvc6w55FhyqnxgW03QKAp1bDbqWqwEWq3UtVoJupz8BloLk2Ffm81hXvtkxZtzSl12KsGcVupxWgnmtK0ep5UQkvDBcSb8fEa9llbB3nRqWCc/QW7oR0N/eVx0NaRmWQhRbnb3ido8z3J10uu0tG/gS/sGvkzo2xSbTeF0ciYHz6ZxMC6NiLNpHD6bRmYeRCkNiFIa8LutZ5EzKARwiUaaRBprEmisTaCxNZFG5kS6aJII0lxS/XsKcEmrIf5yAn1Bpyv80uf/97xOx0WdFuUa3+vzbHkkZSeRlJ1UZec0aA2YdCaMemN+An35e5POhFFnVH1v0l+OXf7ekln5slGnTJa3bNlCWloazZo1o1OnTg7bR40aRUREBCtWrCgzWV69ejU2m42+fftSr1491Taj0ciwYcNYsGABq1atYty4ceVu6+Jji9G5Xz8r9egVBX+rlTpWG342W5HvrfhbbQQVSYgDrNZi+nnze4bjlQDiFX8OKIHEKwGcI4BzSkBBPIeyH9PU93OndYgPrUO88z/Bh3jTOMCz9q6gV9Mcapal910IUQb73ksXq1muKK1WQ9MgL5oGeTH8hvwxVjabwsmkTA6fSyPqfAbHE9M5cT6DmOQsrDZIxpdkxZe9Sov8QucijOQRokkmVJNMfU0SoSTnvzYnE6ZJprMmFV9NhkM7LECyTkeSTsf5y0l0ik5LijY/kU7RaUkt+F6H2Uk7Ucw2M2abmXRz+VfltWZbK/3vO2WyfODAAQA6d+5c7PYr8YiIiGK3l/dcCxYsuKpzFWfaUgseeue7eWgBnQJaFHSAVgEdSmFMAR2gU/K36y//V+sw47wG0GFTDFjQYUWHBR1n0RKDDgt6LIoWy+W4BR02Cm+uXmQQTgbhxBTbToNOg5tei5tOi5teh5tei/Hy6+JyueLPcp1TFBqmXSJ20aJKDcqzpaapA1KzLIQoi13nRNK8eaT88H3lz1tF97XaRgu0v/x1haJAnsVGjtmK2Wojz2Ijz2rDbFXIs5Tek5+GiTTqc5T6aFDQYy34Mmis6LEUvK6LlRBs6C5/oVHI76Mu7Hm1ocEKWDUaLJf/m//68rYi/7VSNOa8sixW7q3kOZwyWT5z5gwADRo0KHb7lXhMTNmpU1WdKzc3l9zcwprgtLT8xCMwwYpXLcw5rvx5WArKHa7mZmW7/JVfQqIF3C5/VRVzwdmFveTYqj1frtlMcnIpw6drMbPZTFZWFsnJyRiuctlaIdetIq73a3bJbCbXWqRn7uzZ/K8qUtX3tdpOAxgvf1UVG5B3+atQ+RIXLfmdaflJo/N1EJYmw5b/+1uZaQ+dMlnOyMh/lODh4VHsdk9PTwDS08vujq+qc82YMYNp06Y5xG85ebLMNgjhlKJOQGBgTbdCCCGEqHbJycn4+vpW6FinTJad0eTJk5k0aVLB69TUVBo1asSZM2cqfPFdzaVLlwgLCyM2NhYfH5+abk6tIdet/OSaVYxct/KTa1Yxct3KT65ZxaSlpdGwYcNKTRHslMnyldkvsrKyit2emZkJgLe39zU7l9FoxGh0fDDi6+srv7Tl5OPjI9esAuS6lZ9cs4qR61Z+cs0qRq5b+ck1qxhtJcbpOGW1bcOGDQGIi4srdvuVeKNGja7puYQQQgghhGtxymS5Y8eOAOzdu7fY7VfiHTp0uKbnEkIIIYQQrsUpk+XevXvj6+tLdHQ0+/fvd9j+008/ATBs2LAyzzVkyBC0Wi2bNm3i/Hn1SjS5ubmsWLECnU7H7bffXq42Go1G3njjjWJLM0Tx5JpVjFy38pNrVjFy3cpPrlnFyHUrP7lmFVMV102jVGYujWr06quv8vbbb3PjjTeydu3aglkrrix33a9fP9Vy13PmzGHOnDmMGDGCGTNmqM710EMP8c0333D33Xfz3XffFazi99xzz/HJJ58wduzYa77ctRBCCCGEcH5OOcAP8pPl9evXs3XrVsLDw+nbty8xMTHs2LGDoKAgFixYoNo/KSmJyMhI4uPjHc710UcfsX37dn7++WdatWpF165dOXz4MIcOHSI8PJxZs2Zdqx9LCCGEEELUIk5ZhgFgMpn4+++/ee211/Dw8GD58uXExMQwbtw49u7dS9OmTa/6XIGBgezcuZNnn32WvLw8fv31V9LS0pg4cSI7d+6s1HQiQgghhBDi+uW0ZRhCCCGEEELUNKftWRZCCCGEEKKmSbJcTaZPn45Go0Gj0fD111/XdHOcUkREBM888ww9e/YkNDQUo9GIr68vvXr1Yvbs2ZjN5ppuotM5duwY7777LjfffDOBgYEYDAaCg4MZOXIkmzZtqunmOa3MzEyWLl3Ks88+S48ePTAajWg0GqZOnVrTTatx2dnZvP7667Ro0QKTyURoaCiPPPIIZ8+eremmOa09e/bwzjvvMHLkSBo0aFBwrxfFy8rKYvny5Tz66KO0bNkSk8mEp6cnHTt25M033yQjI6Omm+i0Zs2axciRIwkPD8fX1xej0UijRo0YM2YMBw8erOnm1QrJycnUrVsXjUZD8+bNK3YSRVS5Y8eOKUajUdFoNAqgLF26tKab5JRmz56tAEqjRo2UAQMGKPfff78yYMAAxWQyKYDSr18/JTc3t6ab6VTq16+vAIqXl5cycOBA5d5771XatWunAIpGo1E+/PDDmm6iU9q3b58COHy98cYbNd20GpWdna307NlTAZSQkBDl3nvvVbp3764ASlBQkBIdHV3TTXRKw4cPL/b3SRRv/vz5BdeodevWyj333KPceuutire3twIorVq1UhITE2u6mU4pICBAMZlMSvfu3ZURI0YoI0aMUFq0aKEAisFgUFasWFHTTXR6Y8eOLcjHmjVrVqFzyF93FbPZbMpNN92k1KtXr+CGKsly8aKjo4t9M05ISChIAGfPnl0DLXNeAwYMUJYsWaJkZ2er4vPmzVMARafTKYcPH66h1jmvqKgo5dFHH1XmzZun7NmzR3nzzTclWVYUZcqUKQqg9OrVS0lPTy+If/DBBwUfWIWjd955R3nttdeU3377TYmPj1eMRqMky6VYtGiR8vjjjytHjhxRxc+dO6d06tRJAZQHHnighlrn3DZv3uxwv1cURZk7d64CKPXq1VPMZnMNtKx2WL9+vQIojz/+uCTLzuSLL75QAOXrr79Wxo4dK8lyBS1dulQBlBEjRtR0U2qNwYMHK4AyderUmm6K05sxY4bLJ8u5ubmKr6+vAih79+512N6hQwcFUHbv3l0DratdJFmuuK1btyqAYjQa5UliOTVr1kwBlAMHDtR0U5xSVlaW0qxZM6VNmzbK8ePHK5UsS81yFUpISODFF19kwIABjB49uqabU6sZDAYA3NzcargltceVpd3PnTtXwy0RtcGWLVtIS0ujWbNmdOrUyWH7qFGjAFixYsW1bppwIVfuW7m5uSQnJ9dwa2oXeZ8s3bRp0zh58iTz5s0ruFYVJclyFZo4cSLZ2dl89tlnNd2UWi0lJYUPPvgAgKFDh9Zwa2qPkydPAhAcHFzDLRG1wYEDBwDo3LlzsduvxCMiIq5Zm4TruXLfMhgMsuZBOSxdupTIyEjCw8MJDw+v6eY4nYiICD744APGjx9P3759K30+p13Br7ZZuXIlP/74I9OmTZNf3HI6ceIEb7/9NjabjcTERLZu3UpGRgZPPvmk9NBfpejoaFauXAnAnXfeWcOtEbXBmTNnAGjQoEGx26/EY2JirlmbhOv5+OOPARgyZAhGo7GGW+O8Zs6cyeHDh8nMzOTo0aMcPnyY0NBQli1bhk6nq+nmORWbzcaECRPw8/Pjvffeq5JzSrJcBTIyMnjqqado0aIFL730Uk03p9ZJTExk8eLFqtjEiROZPn06Wq08/CiLxWJh3Lhx5Obmct9999GlS5eabpKoBa5M1+Xh4VHsdk9PTwDS09OvWZuEa1m1ahVfffUVBoOB6dOn13RznNqaNWv4888/C143atSIJUuWyP2+GLNnz2bXrl0sXLiQgICAKjmnJMvAiBEjOHr0aLmOWbJkCd27dwfglVdeITY2lj///NOlPhlX9rpd0adPHxRFwWq1cubMGX799VemTZvGH3/8wdq1a2ncuHEVtrpmVdU1K2rixIls3ryZpk2b8umnn1a2iU6pOq6bEKLmHDt2jIceeghFUZg5c2ZB7bIo3vr16wFITU3l4MGDvPnmm/Tr14+33nqLKVOm1HDrnMeZM2d49dVX6devH+PGjauy80qyDJw6dYrIyMhyHZOVlQXAzp07mTt3Lg8//DC33HJLdTTPaVXmuhVHp9PRpEkTJk2aROPGjbn77rt59tlnr6sBRlV9zd5++20+++wz6tWrx5o1a67bmr+qvm4CvLy8gJKvU2ZmJgDe3t7XrE3CNZw9e5YhQ4aQkpLCpEmTeO6552q6SbWGn58fffv2ZdWqVfTq1YvXXnuNwYMH061bt5pumlN4+umnycvLY968eVV6XkmWgf3791f42FWrVmGz2Th48CD9+/dXbTt27BiQn9B8+eWXDBkyhJdffrkSLXUulbluZRkxYgReXl6sXr2avLy862a0b1Ves3nz5vHqq6/i6+vL6tWrK74yUS1Qnb9rrqphw4YAxMXFFbv9SrxRo0bXrE3i+nfx4kUGDx5MTEwM48eP5/3336/pJtVKBoOB++67jz179rBixQpJli9buXIlfn5+PPnkk6p4Tk4OkP9B7Uqu9t133131gHhJlqtIaW/mx44d49ixY9dVOUF102g0+Pv7c+bMGVJSUqhXr15NN8mpfPfddzz99NN4eHjw+++/c8MNN9R0k0Qtc+Wx9969e4vdfiXeoUOHa9YmcX3LyMjgtttu48iRI4wcOZL58+fLMuGVEBgYCMCFCxdquCXOJTU1lY0bNxa7LScnp2DblQT6asjoqUqaOnUqSv7iLg5fY8eOBfKneFEUhUWLFtVsY2uRkydPEhsbi4+PT8ENQeRbtWoVY8aMQa/X8+uvv9K7d++abpKohXr37o2vry/R0dHFftj/6aefABg2bNg1bpm4HuXm5jJ8+HB27tzJrbfeKrM4VIErSV+zZs1quCXOo6R87NSpU0D+tboSK08HpiTLosbMnj2bhIQEh3hkZCQPPvggiqIwZswYuaEWsWXLFkaNGoWiKHz//fcMHjy4ppskaik3NzeeeeYZIL/O70qNMsCsWbOIiIigX79+MtpeVJrVauWBBx7gr7/+om/fvvzyyy/XTWldddqyZQurV6/GZrOp4mazmdmzZ7N06VLc3d257777aqiFrkPKMESN+eCDD3j++efp2LEjzZs3R1EUYmJi2LNnDzabjZtuuokZM2bUdDOdyh133EF2djZNmjRh+fLlLF++3GGfPn36MGHChGvfOCc3YsQI4uPjgcJVDr/88ktWr14NQEhICL/++muNta8mvPrqq6xfv56tW7cSHh5O3759iYmJYceOHQQFBbFgwYKabqJT+v3331VTneXl5QHQs2fPgthrr70miypdNmfOnIK/rcDAQJ566qli93v//fflSWIRJ06cYPz48QQGBtKlSxcCAgJISkri4MGDxMfHYzKZWLRoEWFhYTXd1OueJMuixrz99tusWrWK3bt3s2bNGrKzs/H392fQoEE88MADPPzwwzLPsp3U1FQgf3aIK4+ViiPJsqN9+/Y5LLBx9uxZzp49C7jmQDaTycTff//NjBkz+Pbbb1m+fDn+/v6MGzeO6dOnl7hgiau7cOECO3bscIgXjUkdaaGUlJSC70v7QDp16lRJlovo168fr7zyChs3biQiIoKkpCTc3Nxo3Lgxo0aNYuLEidf1wG5nolEURanpRgghhBBCCOGMpNtOCCGEEEKIEkiyLIQQQgghRAkkWRZCCCGEEKIEkiwLIYQQQghRAkmWhRBCCCGEKIEky0IIIYQQQpRAkmUhhBBCCCFKIMmyEEIIIYQQJZBkWQghhBBCiBJIsiyEEEIIIUQJJFkWQgghhBCiBJIsCyGEEEIIUYL/B7zrgHzbWJXAAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAssAAAIHCAYAAABpIhEUAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd1xT5/4H8M/JAsKSvaeCuHCgVgT3to5WrR12qPV2XL291U5b26t2L2/ttcvW1draYVtXbd2gouAEnKjIFpC9R8b5/cEvgXMSIAmBhOT7fr18tec568lhffPk+3wfhmVZFoQQQgghhBANAlN3gBBCCCGEEHNFwTIhhBBCCCGtoGCZEEIIIYSQVlCwTAghhBBCSCsoWCaEEEIIIaQVFCwTQgghhBDSCgqWCSGEEEIIaQUFy4QQQgghhLSCgmVCCCGEEEJaYdbBcl1dHd58802Eh4fD1tYWvr6+WLx4MfLy8nS+xtatW8EwTLv/vvvuu058JYQQQgghpDtizHW56/r6eowbNw6JiYnw8fHBqFGjkJmZiTNnzsDDwwOJiYkIDQ1t9zonT57Et99+q3VfRUUFdu3aBQBIT0/X6XqEEEIIIcR6mG2wvGrVKrzzzjuIjo7GwYMH4eDgAABYt24dXnjhBYwZMwZxcXEduseXX36Jf/7zn4iJicHJkyeN0GtCCCGEEGJJzDJYbmxshKenJyoqKnDhwgUMHjyYs3/gwIFITU3FuXPnEBUVZfB9YmJicOrUKXz11Vd4+umnO9ptQgghhBBiYcwyZzkhIQEVFRXo2bOnRqAMAPPmzQMA7N271+B7ZGRk4NSpU5BIJJg/f77B1yGEEEIIIZbLLIPllJQUAMCQIUO07le1p6amGnyP7du3AwDuvfdeuLi4GHwdQgghhBBiuUSm7oA22dnZAAB/f3+t+1XtWVlZBt9DFSw/9thjOh3f0NCAhoYG9bZSqURpaSnc3NzAMIzB/SCEEEIIIZ2DZVlUVVXB19cXAoFhY8RmGSxXV1cDAKRSqdb99vb2AICqqiqDrn/mzBncuHEDrq6uuPfee3U657333sOaNWsMuh8hhBBCCDGdnJycVgdh22OWwXJnU40qz58/HxKJRKdzVq5ciRUrVqi3KyoqEBgYqA66SftkMhmOHTuGcePGQSwWm7o73QY9N/3RM9PdmYIzeOHECxrtb414C2MDxnZ9h7oZ+l4zDD03/dEzM0xpaSnCw8Ph6Oho8DXMMlhWlYmrra3Vur+mpgYADHrhcrkcP//8MwDdUzAAwMbGBjY2Nhrtrq6ucHNz07sf1kgmk0EqlcLNzY1+0PVAz01/9Mx0V11SDaGdUKO9SlRFv9t0QN9rhqHnpj96Zh3TkZRZs5zgFxgYCADIzc3Vul/VHhQUpPe1Dx48iLt37yI0NBQjR440vJOEEGIB8qvztbbfqbnTxT0hhBDzZJbB8sCBAwEAFy5c0Lpf1R4ZGan3tVUpGI8++qiBvSOEEMuRV52ntf1ONQXLhBACmGmwHBMTA2dnZ6SnpyM5OVlj/86dOwEAM2fO1Ou61dXV2L17NwAKlgkhBADya2hkmRBC2mKWwbJEIsGyZcsAAEuXLlXnKANNy12npqZizJgxnNX7NmzYgIiICKxcubLV6/7++++ora3FiBEjEBYW1nkvgBBCuonWRpbzq/Nhhgu8EkJIlzPLCX4AsGrVKhw+fBinTp1CWFgYRo0ahaysLCQlJcHDwwObN2/mHF9cXIy0tDTk52sfJQH0r61MCCGWTKaQoai2SOu+alk1Khsr4Wzj3MW9IoQQ82KWI8sAYGtri2PHjuGNN96AVCrFrl27kJWVhYULF+LChQsIDQ3V63r5+fk4evQoxGIxHnzwwU7qNSGEdB8FtQVg0frocWspGoQQYk3MdmQZAOzs7LB27VqsXbu23WNXr16N1atXt7rfx8cHcrnciL0jhJDurb1JfHnVeYhwjeii3hBCiHky62CZEEJI52kvWG6trByxHjKZDAqFolOuKxKJUF9f3ynXt0T0zAChUGiSGtMULBNCiJVqr+IFVcSwXpWVlSguLkZDQ0OnXJ9lWXh7eyMnJ6dDi0VYE3pmTWxsbODu7g4nJ6cuuycFy4QQYqXaG1mmWsvWqbKyEnl5eXBwcIC7uzvEYrHRgzOlUonq6mo4ODhAIDDb6VNmxdqfGcuykMlkqKioQF5eUxWfrgqYKVgmhBArxQ+GgxyDkFWV1ep+Yh2Ki4vh4OAAf3//ThvBVCqVaGxshK2trVUGfoagZ9Y0l83R0RG5ubkoLi7usmDZOp82IYQQjWoXQzyHcLYpDcP6yGQyNDQ0wNnZ2ao/6ifmi2EYODs7o6GhATKZrEvuScEyIYRYIblSjoKaAk5blGcUZ7uioQI1shoQ66GaOGaKSVSE6Er1/dlVEx0pWCaEECtUVFsEBcv9Q8MfWQYoFcNa0agyMWdd/f1JwTIhhFgh/jLXdiI7eEm94MA4cNppYRJCiLWjYJkQQqwQPwj2sfcBwzDoIejBaaeRZUKItaNgmRBCrBA/CPZ18AUACpYJIYSHgmVCCLFC/EoXvvatBMtUEYMQAEBtbS0+++wzTJ48GT4+PrCxsYGjoyP69u2LhQsXYs+ePRa1sl5cXBwYhsHChQtN3RWTozrLhBBihfgjxj4OPgBoZJkQbRISEvDAAw8gPz8ftra2GDZsGHx9fdHQ0ID09HRs27YN27ZtQ9++fXHlyhVTd5cYGQXLhBBihfhBsJ+DHwDAReDS5nGEWJsLFy5gwoQJaGhowEsvvYRVq1ZpLIaRk5ODdevW4auvvjJRL41v+PDhuHbtGpydnU3dFZOjNAxCCLEySlapdYIfoDmyXFJfgnp5fVd1jRCzolQq8eijj6KhoQFvvfUWPvzwQ62rxgUEBOC///0vTp48aYJedg6pVIqIiAj4+PiYuismR8EyIYRYmeK6YsiU3JWvVCPL/GAZoPJxxHrt378f165dQ2BgIFauXNnu8VFR3IV9Tpw4gWXLliEyMhIuLi6ws7NDREQEXn31VZSXl2ucv3XrVjAMg9WrV2u9/owZMyAUCpGZmclpv3z5Mh599FGEhobC1tYWHh4eGDRoEJ5//nnk53N/fk+dOoX77rsPQUFBsLGxgbe3N4YPH45XX30V1dXV6uNay1kuLy/H//73P0yZMkV9DTc3N0ydOhWHDh3S2u+xY8eCYRhkZmZi165dGDFiBOzt7eHq6oqHH34Yubm52h+omaA0DEIIsTL81AqxQAw3Ozco5ArYMDZwljijorFCvT+/Oh8hziFd3U1iRpRKFmW1jUa8nhJVtTLIBA0QCDp33M5FKoFAYNgiFn/99RcA4IEHHoBQKNT7/JdeegkpKSmIjIzEhAkTUF9fjwsXLuCDDz7Avn37kJiYCAcHh/Yv1Ibz588jNjYW9fX1iIyMxOzZs1FbW4vbt29j/fr1uO+++9Sjw3v37sV9990HlmUxfPhwjBw5EuXl5bh58yY++OADPPPMM+32JzExEc899xyCg4PRu3dvREdHIzs7GwcPHsTBgwfx7bffYvHixVrP/eKLL7Bu3TqMGjUK06dPR1JSEn766SecP38eKSkpsLOz69Cz6CwULBNCiJXRVjZOwAigQNNMfh97H06wnFfDXcCEWJ+y2kZEvX3Y1N0wyPlVE+HmYGPQuSkpKQCAwYMHG3T+f/7zH4wcOZKT99vQ0IDnnnsOGzduxLp16/Dmm28adG2Vzz77DPX19fj444/xwgsvcPZdv36dc++PP/4YSqUSO3fuxNy5cznHnj17Fm5ubu3er3fv3jh9+jRGjBjBab948SLGjx+P5cuXY/78+VqD7s8//xwnTpxAdHQ0gKYKI5MmTcKpU6ewY8eOVoNsU6M0DEIIsTL8cnCqfOXWtvOrKQ2DWKeSkhIAgLu7u9b9Tz75JBYuXMj51zJvedq0aRoT5GxsbPDpp59CJBJh9+7dHe5jUVERAGDixIka+/g5x20dO2zYMDg6OrZ7v5CQEI1AGWh6Q7F06VJUVlbi2LFjWs9dvny5OlAGmvKiV6xYAQA4fvx4u/c2FRpZJoQQK8MPflULkqjwg2X+0tiEkCbbtm3TqK08duxYxMbGqrfz8vKwd+9eXL9+HZWVlVAqlQAAiUSCmzdvdrgPUVFR+Ouvv7B06VK8/fbbiI2NhUikPbyLiorCtWvX8Nhjj+GNN95AVFSUQWkwCoUCR44cwalTp5Cfn4+GhgYAUL+e1l7X5MmTNdrCw8MBQCO32pxQsEwIIVaGn1ahWpCktW2a4EeslSotobi4WOt+uVyu/v9nnnkGX3/9NWf/unXr8Oqrr0Imk/FPNZqXXnoJJ0+eRFxcHMaNGwcHBwdER0fj3nvvxcKFCzkj2++++y4uXbqEvXv3Yu/evXBxcUFsbCxmzZqFRx99FLa2tu3eLzc3FzNmzFCnqGhTVVWltd3f31+jTTWarQq4zREFy4QQYmVoZJnoy0UqwflVmh/dG0qpVKKquhqODg5dMsHPUAMHDkRCQgIuXryIBQsW6HVuYmIiXnjhBTg7O2P9+vUYO3YsvL29YWPTlD/t6+ur92iqalS6JScnJxw9ehQJCQnYu3cv4uLicPToURw6dAjvvfceTpw4gbCwMABNJe7OnTuHo0ePYt++fYiPj1cHzh9++CFOnz7dbt7ykiVLkJKSgrlz5+Lll19G79694ejoCIFAgI0bN+Lpp58Gy7Jaz+3sr3VnoWCZEEKsCMuyGsEvP1jmbxfVFqFR0QiJ0PCgg3RvAgFj8CQ5bZRKJcTKBjg52Jh1ADVt2jR88cUX+PXXX/HBBx/oVRHjjz/+AAC88847eOKJJzj76urqUFBQoHGORNL0M9ayhFtLeXna37gyDIPY2Fh1+sfdu3fx/PPPY8eOHXj99dfxyy+/qI8ViUSYPHmyOiUiKysLixcvxtGjR/HBBx/gww8/bPU11dTU4NChQ/Dy8sLPP/+s8Txu377d6rndmfl+hxJCCDG64rpiNCi4H3f6O3A/GuWnYbBgaSU/YpWmT5+OPn36IDs7G++9955e55aVlQHQnnrw66+/ah19VU3Gu3Hjhsa+Gzdu6FyP2NPTU12r+fLly20eGxQUhFdeeUWnYysqKqBUKuHj46MRKMtkMvUbBEtDwTIhhFgR/qiyRCCBh9SD0+YocYSzDXcGP6ViEGskEAjw/fffw8bGBm+88QZefvllVFRUaBxXUlKCtLQ0Tptq4tqmTZs4OctXr15VB6d8w4YNg1QqxV9//YXz58+r24uLi/HUU09pTcP46quvkJGRodG+f/9+AE2pFyr//e9/tY5oaztWG09PTzg7O+Py5ctISEhQtysUCrzyyitag3xLQMEyIYRYkZyqHM62qsYyH3+0ObfKvFfYIqSzREVF4fDhw/D29sZHH30ELy8vjBkzBg8//DDuv/9+DBs2DD4+PoiLi0NERASGDh0KAFi0aBG8vb2xd+9e9O7dGw8++CAmTZqEQYMGYdSoUQgKCtK4l4ODA1588UXI5XLExsZi6tSpmDZtGsLDw6FQKDBs2DCNc7766iuEhoaiX79+mDdvHh566CEMGjQIy5cvh62tLaeO85o1a+Dn54chQ4bgwQcfxPz589G7d2+sX78erq6uePHFF9t8FiKRCC+//DLkcjnGjBmDyZMn46GHHkKvXr3w1VdfYenSpR182uaJgmVCCLEiudXcoNffUfMjYm3t/PMIsSaxsbFIT0/H+vXrERsbi7S0NPz22284fPgwqqqqMH/+fPzxxx+4dOkS+vfvD6CpksbZs2fxyCOPoLGxEXv27EFeXh7eeust7Nixo9V7rV69Gh999BH8/f1x9OhRXL58GYsXL8aBAwfUOc0tvfXWW1i8eDEYhsGRI0ewd+9e1NXVYcmSJUhOTkZMTIz62P/973946KGHUFtbi7/++gt///03RCIRVqxYgdTUVPVEwLa89tpr2LZtGyIjI5GQkIDDhw9j4MCBSExMVL9RsDQ0wY8QQqxIXhU3ncLPwU/rcfx2SsMg1k4qleK5557Dc889p/M5/v7++OGHH7Tuy8zM1NrOMAxefPFFjVFepVKJffv2wcnJiTMpcubMmZg5c6ZO/Xnsscfw2GOP6XTs2LFjW61q8fjjj+Pxxx/XaI+MjMTChQs12uPi4lq9T3BwcKv3MRc0skwIIVaEP0Ic4Kg9R1FjZJnSMAghVoqCZUIIsSL8oJefm9xaOwXLhBBrRcEyIYRYiUZFI+7W3uW0+TlqT8PgB8tVsipUNGhWASCEEEtHwTIhhFiJO9V3wIKbG9hazrK3g7dGlQya5EcIsUYULBNCiJXgB7s9bHrAUeKo9VixQKyx7DWlYhBCrBEFy4QQYiV0rYTR2n6qiEEIsUYULBNCiJXQtcZya/tpZJkQYo0oWCaEECuhayWM1vZTsEwIsUYULBNCiJXgp1G0VglDvZ/SMAghhIJlQgixBizLIqcqh9PW7sgyLw3jTvUdKJQKo/eNEELMGQXLhBBiBSobK1Etq+a06ZuzLGflKKwtNHrfCCHEnFGwTAghVoA/uU/ACOBt793mOS42LrAT2XHaKBWDEGJtKFgmhBArwJ+c52PvA7FA3OY5DMNQRQxCiNWjYJkQQqyAvpUwWjuOn/dMiKVjGAYMw5i6Gxxjx44FwzDIzMzstHsEBweb3es2FQqWCSHECuhbCUN9HFXEIMTiZGZmgmEYjB071tRd6RZEpu4AIYSQzmfwyDI/DaOa0jAIMbXvvvsOtbW18PPT7U2vIY4cOQKZTNZp1+9OKFgmhBAroO/qfSoBjgHc61DOMiEmFxgY2On36NmzZ6ffo7ugNAxCCLFwCqUC+dX5nDZ+ekVr+MeV1peiVlZrtL4RYmlycnLw9NNPIygoCDY2NvD09MScOXNw9uzZVs/5/fffMWLECEilUri7u+OBBx7ArVu3sHr1ajAMg61bt3KOby1nOSsrC88++yzCw8MhlUrh6uqKfv364emnn0ZaWhoAYPXq1QgJCQEAxMfHq3OyGYbBwoUL1ddqK2c5JycHzz33HMLDw2FnZwdXV1cMHToUa9asQWVlpf4PzczRyDIhhFi4wtpCyFk5p03XkWVfB1+NtrzqPIS5hBmlb6SbUCqBulKjXo+prQKEjYCgk8ft7Fw7/x7/79KlSxg/fjyKi4vRu3dvzJkzB9nZ2fjjjz+wd+9e/Pjjj3jggQc456xfvx7PP/88BAIBRo8eDW9vbyQlJWH48OGYOXOmzvfOycnBkCFDUFpairCwMEyfPh0KhQJZWVn45ptvEB0djd69e2PQoEGYO3cufvvtN3h5eWHq1Knqa8TGxrZ7nxMnTmDWrFkoLy9HcHAwZs6cibq6Oly/fh2rV6/G7NmzMWjQIJ373R1QsEwIIRaOnzohFUnhYuOi07l2Ijt42HmgqK6Icz0Klq1MXSnwkfE+lhcAcDba1drxUjpg797pt2FZFgsWLEBxcTFefvllvP/+++qR2d9++w3z58/H4sWLERsbCx8fHwDA7du38fLLL0MikeDvv//GuHHjAAByuRxPPfUUtmzZovP9v/32W5SWlmLZsmX43//+x9mXnZ2tzj++7777MGjQIPz222+IiIjQGLVuS2lpKebOnYvy8nJ89NFHWLFiBQQt3oicPn0avr6ab7C7O0rDIIQQC6etEoY+JaGoIgYh7YuLi8OlS5cQGBiIt99+m/MzNnfuXNx3332orq7G5s2b1e2bN29GY2MjHnvsMXWgDAAikQjr1q2Dg4ODzvcvKmp6Qztx4kSNfYGBgUbJQf72229RVFSEqVOn4sUXX+QEygAQHR0NT0/PDt/H3FCwTAghFo5fG1nXShjq46kiBiHtOnHiBABg/vz5EIs1F/x57LHHOMcBQEJCAgBopGYAQI8ePTB58mSd7x8VFQUAeO2117Bv3z7U19fr3nkdHT58GADw9NNPG/3a5sysg+W6ujq8+eabCA8Ph62tLXx9fbF48WLk5Rk2qpGZmYlnnnkGISEhsLGxgbu7O6Kjo/HRRx8ZueeEEGI++MGtrpP7VPjBMi1MQoimO3fuAGiaGKeNqr1lDJOf3zTxNiAgQNspelW9WLhwIebPn4+rV69i5syZcHFxwejRo/Huu++ioKBA5+u0JSen6Wff2iplmG3Ocn19PcaPH4/ExET4+Phg9uzZyMzMxJYtW7Bv3z4kJiYiNDRU5+v99ddfmDdvHurq6jBkyBCMGDECJSUluHTpEr7++mu89NJLnfhqCCHEdHIqucFtoJN+Zaf45eMoWLZCdq5Nub9GolQqUVVVBUdHR42P8o3OzrVzr6+jzl4NTygU4ueff8arr76K3bt34+jRo0hKSsKJEyfw/vvv4++//8bIkSM7tQ+WymyD5bfffhuJiYmIjo7GwYMH1Xk769atwwsvvIDFixcjLi5Op2tdv34dc+bMgaOjIw4dOsT5ZlEqlbhw4UJnvARCCDEL2VXZnO1AR/2CZf7xuVW5UCgVEAqEHe4b6SYEAuNOklMqwSokgL1Tl1Wq6GyqiW1ZWVla96vKvLVcSMTHxwdpaWnIyclB3759Nc5RjeTqY/DgwRg8eDBWr16NyspKrF69Gv/973/x/PPP48yZM3pfr6WAgABcv34d6enpGDBgQIeu1Z2Y5XdoY2MjNmzYAAD4/PPPOQnuK1asQGRkJOLj43H+/HmdrrdixQrU19dj69atGu+qBAIBhg4darzOE0KIGaloqEBlI7fuqb7BMn9kWaaUobC2sMN9I8SSjBo1CgDw66+/QqFQaOzfvn075zgAiImJAdBULYOvoqICBw8e7FCfnJyc8N5774FhGFy+fFndLpFIADRV3dCHavLgxo0bO9Sv7sYsg+WEhARUVFSgZ8+eGDx4sMb+efPmAQD27t3b7rVycnJw4MABhIaGYvr06UbvKyGEmLPsSu6osogRwcfBR69ruNq6wl5sz70ub7SaEGs3duxYDBgwAJmZmXjzzTfBsqx63x9//IHff/8dDg4OWLx4sbp90aJFkEgk+O6773D8+HF1u0KhwAsvvICqqiqd7//9999zAmKVv/76CyzLcvKi3d3dIRaLkZ6erjWwb82SJUvg7u6Ov/76C59++innNQJAYmIi7t69q/P1uguzTMNISUkBAAwZMkTrflV7ampqu9eKi4uDUqnEyJEjIZfL8fvvvyMhIQEKhQL9+/fHgw8+CBcX3eqNEkJId8MPan0dfCES6Pern2EYBDoG4lrptebrVmZjhM8Io/SRkO5gxIjWv9+XLFmCJUuW4IcffsC4cePw7rvv4o8//sCgQYOQnZ2NhIQEiEQibNq0SV1jGWiaKPfhhx/i+eefx7hx4zBmzBh4eXnhzJkzKC0txaOPPort27erR4Lb8ttvv+Hxxx9Hz549MWDAANjZ2SEjIwNJSUkQCAR4++231cdKJBJMnToVe/fuxcCBAzFkyBBIJBLExMRg0aJFrd7D1dUVv/76K2bNmoXly5fjs88+w7Bhw1BXV4dr167h1q1buHjxosWVjzPLYDk7u+mXu7+/9vJGqvbW8oJaunr1KgDAwcEBo0aNQmJiImf/66+/jp07d3LqG2rT0NCAhoYG9bZqOUeZTKYu9E3apnpO9Lz0Q89Nf/TMmmWWZ3K2/R38W30ubT03fwd/TrCcWZ5JzxeW970mk8nAsiyUSiWUSmWn3Uc1Iqm6V3eQlJTU6r4pU6ZAqVSiX79+OHfuHN555x0cOHAAO3fuhLOzM2bPno1XX30Vw4cP13i9//rXv+Dr64uPP/4YiYmJsLW1xdixY/Huu+/i448/BgC4uLhwnplKy6/T888/Dz8/P5w6dQonTpxATU0NfH19MX/+fKxYsQJDhw7l3Hvjxo146aWXcPjwYfz4449QKBSQyWR44oknOP3j93f06NG4ePEiPvroIxw4cAC7du2Cg4MDQkJCsGbNGoSEhHT611SpVIJlWchkMgiFbc+dMMbPJsPyx9DNwFNPPYVvvvkGr7/+OuedkMqtW7cQFhaGsLAw3Lhxo81rPfPMM/j6668hEong4OCAL774AlOnTkVRURHeeustbN++Hc7Ozrhy5Qon6Z5v9erVWLNmjUb7jz/+CKlUqv+LJISQLrCzZieSZcnq7RGSEZghnaH3dQ7WHcTxhuaPifuI+mCBwwJjdJGYEZFIBG9vbwQEBOg0mkk6j0KhQGxsLNLS0nDt2jV4eXmZuktmo7GxETk5OSgoKGg377q2thaPPPIIKioq4OTkZND9zHJk2ZhU727kcjm+/vprzJ8/H0DTu7Tvv/8eaWlpOHv2LL744gu88847rV5n5cqVWLFihXq7srISAQEBGDduHNzc3Dr3RVgImUyGQ4cOYdKkSVoLthPt6Lnpj55Zs18O/gIUN2/H9o/F9Ajt8zfaem6N6Y04ntQcLDfaN9I8EFje91p9fT1ycnLg4OAAW1vbTrsPy7Lq0nGdXVLN3KWnp8PNzQ09evRQtzU0NOD111/H9evXMWHCBISFhdEza6G+vh52dnYYPXp0u9+nJSUlHb6fWQbLquoXtbW1WvfX1NQAABwdHXW+loODg9YVchYtWoSzZ88iPj6+zevY2NjAxsZGo10sFlvEL8iuRM/MMPTc9EfPTHNBkhCXkHafibbnFtIjROO6QpEQAsYs54l3OUv5XlMoFGAYBgKBoFPrH6sGslT3sma//fYb/vOf/yAqKgoBAQGorKxESkoK8vPz4e7ujs8//xwCgYCeWQsCgQAMw+j0c2eMn0uzfNqqFWtyc7UvqapqDwoKavdaqmMCAwO1vhNTrahjibM3CSHWraqxCqX1pZw2fhk4XfEXMmlQNOBuLf3eJKSjJkyYgDlz5iA/Px9//vknjh07Bjs7Ozz77LO4cOECevfubeouWj2zHFkeOHAgALS6WIiqPTIyst1rqUrPlZWVad1fWtr0h6RlLWdCCLEE/JX2BIxA76WuVTzsPGArtEW9op5zfW977w71kRBrN2zYMOzYscPU3SBtMMuR5ZiYGDg7OyM9PR3Jycka+3fu3AkAmDlzZrvXGjlyJNzc3FBQUIC0tDSN/ar0C231nAkhpDvjl43zsfeBRGjYpC2GYRDgxB2V5tdwJoQQS2SWwbJEIsGyZcsAAEuXLlXnKANNy12npqZizJgxiIqKUrdv2LABERERWLlyJedaIpEIK1asAMuyWLp0qbrkGwAcPnwYW7duBcMwePrppzv5VRFCSNfKqeSOLOu7ch9fkCM39Y0WJiGEWAOzTMMAgFWrVuHw4cM4deoUwsLCMGrUKGRlZSEpKQkeHh7YvHkz5/ji4mKkpaUhPz9f41ovvfQSjh07hsOHDyM8PBwjRoxAcXExEhMToVAo8M4772D48OFd9dIIIaRL8INZft6xvvgjy/w0D0IIsURmObIMALa2tjh27BjeeOMNSKVS7Nq1C1lZWVi4cCEuXLiA0NBQna8lFouxf/9+fPDBB3B3d8eBAwdw6dIljBkzBnv37sVrr73Wia+EEEJMg58mYejkPhX+yDSlYRBCrIHZjiwDgJ2dHdauXYu1a9e2e+zq1auxevXqVveLxWK8/PLLePnll43YQ0IIMV/8kd+OpmFoBMtV2WBZ1uprvhJCLJvZjiwTQggxXK2sFkV1RZy2jqZh8M+vk9ehpL7jBf8JIcScUbBMCCEWiD+qzICBv6N/h67pKfWERMCtpkGpGIQQS0fBMiGEWCD+5D4vey/YCDVXIdWHgBFo5D1TRQxCiKWjYJkQQiwQf8S3o/nKKlRrmRBibShYJoQQC8RPw+hoJQwVftBN5eMIIZaOgmVCCLFAxq6xrL6OlooYhFgyhmHMruKLakG1tqqAEeOhYJkQQixQV6Vh5FTmgGVZo1ybEELMEQXLhBBiYerl9SisLeS0dVYaRpWsCmUNZUa5NiGEmCMKlgkhxMLkVuVqtBkrWPa294ZIwF3Piib5EUIsGQXLhBBiYfh5xB52HpCKpUa5tkgggr8Dt14z5S0T0qS8vBz/+9//MGXKFAQFBcHGxgZubm6YOnUqDh06pPWcsWPHgmEYZGZmYvv27YiKioJUKoWnpyeeeOIJ5OXl6Xz//Px8fPjhhxgzZgz8/PwgkUjg7e2NOXPm4OzZs62eV1NTgw8++ABDhw6Fk5MT7O3tERERgaVLl+LGjRsaxyclJeGBBx6Aj48PJBIJ/P39sWTJEmRnW+bvArNe7poQQoj+siqzONvGGlVueb3MysxW70csj5JVoryh3HjXUypR1VAFeb0cAkHnjtv1sOkBAdM1Y4OJiYl47rnnEBwcjN69eyM6OhrZ2dk4ePAgDh48iG+//RaLFy/Weu7HH3+ML774AqNGjcLs2bORmJiI7777DkePHsXp06fh6+vb7v13796NV155Bb1790ZkZCScnJxw8+ZN/PHHH9i3bx/27duHyZMnc87Jz8/HpEmTcOXKFbi4uGDs2LGwsbHB7du38dVXXyEsLAzh4eHq47/44gv861//AgAMGzYMo0aNQlpaGjZt2oQ9e/YgPj4effr06cBTND8ULBNCiIVpGcgCQIhziFGvH+wcjBN5J9TbFCxbvvKGcoz5eYypu2GQ+Afj4Wrr2iX36t27N06fPo0RI0Zw2i9evIjx48dj+fLlmD9/PhwcHDTO/frrr7Fv3z5Mnz4dACCTybBo0SL88MMPWLZsGX7//fd27x8TE4PLly+jX79+nPYDBw5g1qxZ+Oc//4mbN29yqns89thjuHLlCubPn49NmzZx+paZmYnKykr1turNgI+PD3bv3o2oqCj1vk2bNmHJkiVYtGgREhMT2+1rd0JpGIQQYmEyKzI520FOQUa9frBTcJv3I8RahYSEaATKADB48GAsXboUlZWVOHbsmNZz58+frw6UAUAsFmP9+vWQSqXYs2cPcnLar2k+YMAAjUAZAKZMmYIHHngA6enpuHz5srr9zJkzOHLkCDw9PfHtt99qBPHBwcGIjIxUb7///vtQKBT46quvOIEyADz55JOYNWsWkpKScPHixXb72p3QyDIhhFgY/sgyP7jtKP71siqzoGSVXfZRNyHmTKFQ4MiRIzh16hTy8/PR0NAAALh58ybnv3wPPfSQRpubmxsmT56MXbt24eTJk7j33nvbvX9DQwP+/vtvnDlzBkVFRWhsbAQAXLp0SX3/AQMGAAAOHz4MAHj44Yfh6OjY5nWVSiWOHDkCqVSKKVOmaD1m1KhR2LNnD86cOYPBgwe329fugoJlQgixIJWNlSitL+W0BTkbd2SZP1Jdr6jH3dq78Lb3Nup9COlucnNzMWPGDKSkpLR6TFVVldb2oCDtP6fBwcEAmnKL23Pp0iXMmjULmZmZOt1fNVrds2fPdq9dXFyM6upqAIBEImn3WEtCwTIhhFiQrApu/rCQESLAwbgT/DylnpCKpKiV16rbMioyKFi2YD1seiD+wXijXU+pVKKqqgqOjo5dMsGvqyxZsgQpKSmYO3cuXn75ZfTu3Vv9Gjdu3Iinn3660xbxYVkW8+fPR2ZmJp555hk888wzCA0NhYODAxiGwWuvvYb33nvP4PsrlUoAgIODA+bOndvmsdpSQbozCpYJIcSC8FMw/B39IRaKjXoPhmEQ5BSEa6XX1G1ZlVmI9o026n2I+RAwAqNOklMqlRA1iuBk69TpwXJXqampwaFDh+Dl5YWff/4ZQqGQs//27dttnp+VlcXJD27ZDgA+Pj5tnn/9+nVcv34dQ4cOxZdffqmxX9v9AwKa3kinp6e3eW0AcHd3h62tLQQCAbZs2WJ2S4B3Jsv4DiWEEAJAM1g29uQ+FY1Jfrz7EmJtKioqoFQq4ePjoxEoy2Qy/PHHH22e/8svv2i0lZaW4uDBg2AYBjExMW2eX1bWtJKmv7+/1n3a6jxPnDgRALBjxw51ikVrRCIRxo4di8rKShw5cqTNYy0NBcuEEGJB+JUpjD25T31dZ+51KVgm1s7T0xPOzs64fPkyEhIS1O0KhQKvvPKK1sU9Wvr5559x4MAB9bZcLsfy5ctRU1ODGTNmIDAwsI2zgV69ekEgEODo0aOcSYT19fV45plnUFpaqnHO8OHDMW7cONy9exdPPfUUampqOPszMzPVEwMB4PXXX4dAIMCiRYsQFxencb3q6mps3rwZdXV1bfa1u6E0DEIIsSD8msedNbLMvy6VjyOWTltJOJUlS5ZgyZIlePnll/H6669jzJgxGD9+PFxdXZGUlITCwkIsXboUn3/+eavXeOqppzBt2jSMHj0aPj4+SEpKQkZGBnx9fbFhw4Z2++fp6Yknn3wS33zzDQYOHIjx48fDzs4OJ06cgEKhwMKFC7F161aN877//ntMmDABO3bswIEDBxAbGwsbGxukp6cjOTkZn3zyibp6RmxsLD7//HMsW7YM48aNQ//+/REeHg6xWIzMzEwkJyejoaEBc+bMgZ2dXfsPtZugYJkQQiyEklVqBMvGXpBEhT+yfKf6DhoVjZAI254lT0h3lZSU1Oq+qVOnAgBee+01+Pv749NPP0VCQgLs7OwQGxuLtWvX4sKFC21e/8UXX8TQoUOxfv16JCUlwd7eHo899hjeffdd+Pv7qyfYteXLL79EREQENm3ahCNHjsDZ2RkTJ07EO++8gy1btmg9x8/PD2fPnsWnn36KnTt34tChQxAKhfD398c///lPzJgxg3P8M888gxEjRuDTTz9FXFwc9u3bB6lUCj8/PyxYsABz5syBs7Nzu33tThi2s6ZlWrjKyko4OzujuLgYbm5upu5OtyCTybB//35Mnz4dYrFxJxxZMnpu+rPWZ1ZQU4BJOydx2o48cASeUk+dztfnuVU3ViN6B3dC3x+z/kAvl176dbqbs7Tvtfr6emRkZCAkJAS2traddh+lUonKyko4OVnOBD9DjR07FvHx8cjIyFCXidOGnlkzfb5PS0pK4O7ujoqKCjg5ORl0P+t+2oQQYkEyKjI421KRFB52Hp1yLweJg8a1adlrQoglomCZEEIshMbKfc7BnVreiZ+KkVGZof1AQgjpxihYJoQQC9FVk/tauz5N8iOEWCKa4EcIIRaCH6yGOHXO5D4Vflk6SsMgRH/aSrAR80Ijy4QQYiG6akESFVqYhBBiDShYJoQQC9CgaMCd6jucNn5OsbHxr1/eUI7y+vJOvSchhHQ1CpYJIcQCZFdmgwW3Emhnjyz7OvhCxHCz+Wh0mRBiaShYJoQQC8DPF/a084S92L5T7ykWiOHv6N9mPwghpLujYJkQQiyARr6yc+eOKqtQ3jIhxNJRsEwIIRaAXwmDH8R2Fn7eMo0sE0IsDQXLhBBiATQWJOmqYJl3H/4qgoQQ0t1RsEwIIRaAP6Lb2ZUwVPiTCLMrs6FklV1yb0II6QoULBNCSDdXXl+O8oZyTpup0jAalY0aJewIIaQ7o2CZEEK6ufSKdM62SCCCr4Nvl9zbzdYNjmJHTtvtittdcm9CukJmZiYYhgHDMG0et3DhQjAMg9WrV3fofqtXrwbDMNi6davGvuTkZEyZMgU9evRQ9ykzM7ND9yPto+WuCSGkm+MHp8FOwRAJuubXO8MwCO0RipSiFHVbRkUGRvuP7pL7E2Itqqqq8Mgjj6CgoABjx45FQEAAGIaBg4ODqbtm8ShYJoSQbu52OTdYDnUO7dL7hzpzg2UaWSbEcMuWLcNDDz0EHx8fTvvZs2eRn5+PRx99FN9//72JemedKFgmhJBujl+BIrRH1wfLLfGDd0KI7tzd3eHu7q7RnpubCwAIDe3an29COcuEENLt8XOWu3xkmRecp1ekg2XZVo4mxHqMHTtWnVe8a9cujBgxAvb29nB1dcXDDz+sDoBb4ucsq3KmFy1aBABYu3atOl954cKFnHO///57xMbGwsnJCVKpFJGRkXjvvfdQX1+vcR9VjnVcXBwOHDiAcePGqXOhy8vLsXXrVnUOdnp6OubPnw93d3c4OTlh2rRpuHr1KgBALpfj3XffRXh4OGxtbdGrVy98/vnnxn2QJkYjy4QQ0o3VympRUFPAaTNFGkZLVY1VKKkvgbud5ugY6Z5YpRKK8nKjXU+pVEJRVQW5XA6BoHPH7YQ9eoDp5Hu054svvsC6deswatQoTJ8+HUlJSfjpp59w/vx5pKSkwM7OrtVzHRwc8MQTT+DWrVtISEjAwIEDMWjQIABAbGys+rinn34aGzduhK2tLcaPHw+pVIq4uDi89tpr2Lt3Lw4fPgypVKpx/R9//BHffvsthg4dimnTpiE9PZ0zmTEjIwPDhw+Hl5cXJk6ciKtXr+Lvv//G+fPnkZqaimeeeQZxcXEYN24cQkNDcezYMSxbtgwSiQT/+Mc/jPcQTYiCZUII6cb4KRgCRtBlNZZVfB18YSu0Rb2iefTqdvltCpYtiKK8HDdHxhj9uneNfkVNYacSIHJ17YI7te7zzz/HiRMnEB0dDQCora3FpEmTcOrUKezYsQOLFy9u9Vx3d3ds3boVmzdvRkJCAmbPno01a9Zwjvntt9+wceNG+Pr6Ii4uDmFhYQCAiooKzJgxAydPnsSbb76Jjz/+WOP633zzDX766Sc8+OCDWu//3Xff4dVXX8W7774LhmHAsiwWL16MrVu3YsKECRAIBLh58yY8PDwAAEeOHMHEiRPxzjvvWEywTGkYhBDSjfEn0/k5+MFGaNOlfdAWoNMkP0KaLV++XB0oA4BUKsWKFSsAAMePH+/w9T/77DMAwH/+8x91oAwAzs7O+Pzzz8EwDL7++mut6Rj33ntvq4Ey0JQjrUr9AJoq4CxfvhwAcPXqVXz66afqQBkAJkyYgMGDByMrK8tiytpRsEwIId1Yejk3X7mnc0+T9IOfisHvFyHWbPLkyRpt4eHhAID8/PwOXVsmkyExMREAsGDBAo39kZGRiIyMRHV1NZKTkzX2z5o1q83rjx07FmKxmNOmmmQoFosxduxYjXNU+zv62swFBcuEENKN8UdwQ3qEmKQf/GCZnx5CSHfV3mIkKqpJrdqO9/f312hzdGxazKehoaEDvQNKSkrQ2NgId3d32Nvbaz0mODgYAJCXl6exLzAwsM3r+/n5abSpajt7e3tDKBS2ur+jr81cUM4yIYR0Yxpl47p4cp/6vryKGJSGYVmEPXog7FSC0a6nVCpRVVUFR0fHLpng1xEtJ8XV1tZqnSSn2gdAa8Da2a+xPW0F/La2tm2e21bfTf26ugoFy4QQ0k01KhqRXZXNaTNZsMy7b1FdESobK+EkcTJJf4hxMQKBUSfJKZVKCEUiiJyczD7gcnV1hZ2dHerq6nD79m30799f63G3bze9QdQ2ityZ3NzcIJFIUFxcjJqaGq3Buip3WNsoMWmfeX+HEkIIaVVWZRaUrJLTZqpgOdAxECKGO/5Ci5MQSyAUChET01QJ5M8//9R6TE5ODpKTkyEQCNTHdhWxWIwRI0YAAH766SeN/ZcvX0ZKSgocHBzUJeeIfihYJoSQboqf6uAp9YSDxMEkfRELxQhwCuC0Ud4ysRT//ve/AQDvv/8+kpKSOPsqKiqwePFiKJVKzJkzBwEBAdou0an+9a9/AWha0EQ1wg0AVVVVWLZsGViWxdNPP91uygXRzqyD5bq6Orz55pvqVWF8fX2xePFirQnqbQkODlavdqPt3/Xr1zvpFRBCSOfhB8umGlVu7f6Ut0wsxYwZM/Dyyy+jvLwcI0eOxMiRI7FgwQLMmDEDQUFBOHz4MPr3748vvvjCJP2bN28ennrqKeTm5qJ///6YMWMG5s+fj549eyI+Ph4jRozA2rVrTdI3S2C2Ocv19fUYP348EhMT4ePjg9mzZyMzMxNbtmzBvn37kJiYqPf66E888YTWdmdnZ2N0mRBCuhQ/zaFnD9OUjVMJdQ7FERxRb1P5OGJJPvjgA4wbNw5fffUVkpKScPbsWUilUkRERGDu3LlYunRpq9UousLXX3+N2NhYfPXVV4iPj4dcLkfPnj3x/PPPY/ny5W2uEkjaZrbB8ttvv43ExERER0fj4MGD6jIk69atwwsvvIDFixcjLi5Or2uq1lknhBBLYHYjy1QRg1i4qVOnYurUqTof31acEhwcrC4319Lq1auxevVqjfaFCxdizpw5cHJqfdLsY489hscee0ynvm3durXNuGjhwoVYuHBhq/u19V3Xa3c3ZpmG0djYiA0bNgBoWiJSFSgDwIoVKxAZGYn4+HicP3/eVF0khBCTUigVyKzI5LSFOJumxrIKP1i/U30H9XLNFcMIIaQ7MctgOSEhARUVFejZsycGDx6ssX/evHkAgL1793Z11wghxCzcqb6DRmUjp83UI8vBTsGcbRYsMiszTdIXQggxFrNMw0hJSQEADBkyROt+VXtqaqpe1/3oo4+Qnp4OGxsb9OvXD/fffz9nPXNCCOku0iu4+cA9bHrA1dZ4dXANIRVL4efgh7zq5knY6eXpiHCNMGGvCCGkY8wyWM7Obiqy31phb1V7VlaWXtd9+eWXOdvLly/H//73PyxevLjdcxsaGjjLNlZWVgJoWpNdJpPp1Q9rpXpO9Lz0Q89Nf9bwzG6W3uRsBzsFQy6Xd+iaxnhuwY7BnGD5VuktyAIs9+tgad9rMpkMLMtCqVRCqVS2f4KBVPmuqnuR9tEza6ZUKsGyLGQymdbltlsyxs+mWQbL1dXVANDqkpKq2aZVVVU6XW/WrFkYN24coqKi4OHhgdu3b2Pz5s1Yv349lixZAjc3N8yePbvNa7z33ntYs2aNRvuxY8da7SfR7tChQ6buQrdEz01/lvzMTtSe4GyLKkTYv3+/Ua7doedWx908feM0gvOCO9Sf7sBSvtdEIhG8vb1RXV2NxsbG9k/oIF3/jpNm9Mya5rbV1dXh+PHj7Q4SqJYh7wizDJaN7bPPPuNs9+vXD5988gkiIiLw1FNP4ZVXXmk3WF65ciVWrFih3q6srERAQADGjRsHNze3Tum3pZHJZDh06BAmTZoEsVhs6u50G/Tc9GcNz+ynAz8BJc3bo/uPxvSI6R26pjGeW8OtBiScSVBv19rVYvr0jvXLnFna91p9fT1ycnLg4ODQqQtYsCyLqqoqODo6gmGYTruPJaFn1qy+vh52dnYYPXp0u9+nJSUlbe7XhVkGy6rqF629G6ipqQEAODo6dug+Tz75JFatWoW0tDRkZmYiODi41WNtbGxgY2Oj0S4Wiy3iF2RXomdmGHpu+rPUZ6ZklRpl2cJdw432Wjvy3MLdwjnbOVU5YAUsJEKJMbpmtizle02hUIBhGAgEAggEnVcDQJVGoLoXaR89s2YCgQAMw+j0c2eMn0uDnva2bdtQX9955YACAwMBALm5uVr3q9qDgoI6dB+BQICePZuK+Ofn53foWoQQ0lXuVN9BrZw7mBDmEmai3nD16tGLsy1n5bTsNSGkWzMoWF60aBF8fX3xr3/9S125wpgGDhwIALhw4YLW/ar2yMjIDt+rrKwMAEy66g4hhOjjVvktzraTxAkeduZR2cdB4gAfex9OG7+/hBDSnRgULC9ZsgRyuRyff/45hgwZghEjRmDTpk3q9IiOiomJgbOzM9LT05GcnKyxf+fOnQCAmTNndug+V65cQVpamnq5SkII6Q74wWevHr3MKoeRP7pMwTIhpDszKFjeuHEj8vPzsXHjRgwbNgxnzpzBU089BV9fXzzzzDM4d+5chzolkUiwbNkyAMDSpUs5Qfi6deuQmpqKMWPGICoqSt2+YcMGREREYOXKlZxr7d+/H0ePHtW4R2pqKh544AGwLIslS5ZAIrHsfDpCiOW4WcYtG2cuKRgq/P7cKqNgmRDSfRmcIW5vb48lS5YgMTERqampWLp0KUQiETZu3Ih77rkHgwcPxldffaWuR6yvVatW4Z577sGpU6cQFhaGBx98ECNGjMALL7wADw8PbN68mXN8cXEx0tLSNHKPz5w5gwkTJiA4OBizZ8/Gww8/jHvuuQdRUVG4du0axo4di/fff9/Qx0AIIV2OP1Ib1sO8gmX+yPLN8putHEkIIebPKNMp+/fvj88++wx37tzB9u3bMXr0aKSkpGDp0qXw9fXFk08+ifPnz+t1TVtbWxw7dgxvvPEGpFIpdu3ahaysLCxcuBAXLlxAaKhuy7pOmTIFixcvhpOTExISErBz507cunULsbGx+Oabb3D48GHY2dkZ8rIJIaTLyZQyjQlzvVx6tXK0afBHlvOq81AjM06aHiGEdDWj1h6RyWSoqqpSF8xWra6yZcsWDB8+HPPmzUN5ebnO17Ozs8PatWtx69YtNDQ0ID8/H1u2bNG6st/q1avBsiy2bt3KaY+OjsamTZuQmpqK4uJiyGQylJSU4NixY1iyZEm7K78QQog5ya7MhkzJXZGKP5JraiHOIRAw3D8v6eXprRxNSPe1evVqMAyDrVu3QqlUwsXFBQzD4MaNG1qPnzVrFhiGQXh4uNb9jY2NsLOzA8MwuHv3bof71NLYsWPBMAwyMzMNuq6u4uLiwDAMFi5cyGnfunUrGIbB6tWrO/X+ncEowXJiYiKefPJJ+Pj44J///CdSU1MxZ84cHDx4EJWVlfjhhx8wYMAA/PHHH3juueeMcUtCCLFK/JQGTztPONs4m6g32tkIbRDoGMhpo0l+xNIJBALExMQAAE6ePKmxn2VZJCQ0Ldhz8+ZNrcHw2bNnUV9fj969e8PT07NzO0x0ZnCwXFZWhs8++wwDBgxATEwMtmzZAldXV6xZswbZ2dnYuXMnJk6cCBsbGzz88MM4d+4c+vbta7TlWAkhxBrxJ8uZWwqGCj8Vgz8pkRBLNGrUKADag+UrV66gtLRUXR5X2zEnTpzgXMcQy5Ytw7Vr13D//fcbfA3CZVCw/Oijj8LPzw/Lly/HtWvXMG3aNOzZswcZGRlYtWoVvL29Nc4RiUQYNmyYuq4xIYQQ/Zn75D4Vfr9okh+xBm0Fy6q2l19+ud1jOhIsu7u7IyIiAs7O5vWJU3dmULD8448/okePHli5ciVu376Nffv2YcaMGe0uv3j//ffjzTffNKijhBBCtNRYNtORZX6/qHwc6c727NmD6OhoSKVSuLm5Ye7cuVrzkocOHQpbW1vcvHkThYWFnH0nTpyAUCjErFmz0KtXL/UosopSqVSnafCD5b///hszZsxAr169YGdnh9DQUKxYsQIlJSUafWgtZ7ml7du3IyoqClKpFJ6ennjiiSeQl5encdzChQvBMAzi4uK0XodhGAQHB7d6H0shMuSkX3/9FbNnz4ZIpN/pM2fO7PBCIoQQYq3q5fXIrszmtJnryDJ/0mFJfQlK60vhautqoh6RjmCVLOprZO0fqCOlUon6ahnETGO7A20dZWsvBiMwfNGer776Cs8++ywYhsGoUaPg4+ODxMREDB8+XCOmkUgkuOeeexAfH4+TJ09i7ty56n0nTpzAoEGD4ODggJiYGPzwww+oqalRryB8+fJllJeXw8/PDyEhIerzXn31VXzwwQeQSCQYPHgw/P39kZqaiv/+97/Ys2cPEhIS4OXlpfPr+fjjj/HFF19g1KhRmD17NhITE/Hdd9/h6NGjOH36tNYiCtbOoGC5pqYGZ86cwciRI9s8LjExETdu3MDjjz9uUOcIIYQ0S69IBwtWvc2AQYhzSBtnmE6AYwAkAgkalY3qtvTydLh6U7DcHdXXyLD5Jc20ge5g8UexsHM0bOGxrKwsLF++HGKxGHv37sWUKVMANFX/WrRoEbZv365xzqhRozSC5ezsbOTk5GDOnDkAmlYq3rZtGxITEzFhwgQA2lMwfv31V3zwwQfo378/fvvtN3h6esLJyUldVWLt2rX497//jZ9++knn1/T1119j3759mD59Oue1/PDDD1i2bBl27dql/4OycAa9nVu4cCG+/fbbdo/btGkTFi1aZMgtCCGE8PBTGfwd/SEVS03Um7aJBCKE9uDWw79Rpr2cFiHmavPmzaivr8fDDz+sDpQBQCwWY/369ZBKNX/+VMFuyzQL1f+rqmWo/qvtmJbB8jvvvAMA2LFjB3r1av60RhUsDxo0CDt37kRxcbHOr2n+/PnqQJn/Wvbs2YOcnBydr2UtOvWzD6VSCYYx/KMPQgghzbrL5D4Vfv+ofBzpblQB7EMPPaSxz83NDZMnT9Zoj46OhlAoREpKCqqrqwE0jxrHxsYCAPr06QMXFxfOJD/+yPLdu3eRkpKCsLAw9O/fX+M+DMMgJiYGCoVCr4Xf2notLMtqnXho7QxKw9DV7du34eTk1Jm3IIQQq8GvKGGuk/tUaJIf6e7u3LkDAAgKCtK6X9vkNkdHRwwaNAjnz59HYmIiJk6ciBMnTiA0NBQ+Pj4AmgLdkSNHIi4uDnK5HLm5ucjNzYWLi4s6MFYtHnLz5s12Bx71GVlu77WoXjNppnOwvHbtWs52cnKyRpuKXC5HWloajh8/jkmTJnWsh4QQQgBoBpvmPrLMn+R3q/wWWJalTxy7IVt7MRZ/FGu06ymVSlRVVcHR0bFLJvh1tVGjRuH8+fM4ceIEhgwZgqtXr+Kxxx7jHBMTE4M///wTFy9exPXr19Vtqp8PpVIJAPD29saUKVPUqyKLxWKNn6HWAuDOpOqfNdA5WFaVIlH9oktOTkZycnKb53h6euLdd9/taB8JIcTqVTZWorCWW4rK3Ja55uMH89WyahTWFsLbXrMWPzFvjIAxeJKcNkqlEjJWDDtHSacHyx3h4+ODtLQ0ZGVloW/fvhr7s7KytJ43atQofPrppzh58iSGDh0KlmXVKRgqLVf7UwXLLfOVVVUp3N3d1ctpV1ZWwsnJqUPPLCsrC5GRka2+Fl9fX3WbRNL0NVelk7RkTbnNOgfLW7ZsAdC0XOPixYsRGxuLJ598UuuxEokEvr6+GDFiBGxsbIzTU0IIsWL8UWWRQIQgp64fTdKHt7037MX2qJHVqNtulN2gYJl0G6NGjUJcXBx++eUXTJs2jbOvtLQUBw8e1HqeKjBOSkrCsWPHOG0qw4YNg1gsxsmTJ3Ht2jX1/VT8/f0RERGBq1ev4saNG5wJfh3xyy+/aJS8U70WVR60iiptRFtN6UOHDhmlP92BzsHyE088of7/bdu2Ydq0aZw2QgghnYc/OS7YKRhiYdd/vKwPhmHQq0cvpBSlqNtuld/CaP/RJuwVIbpbtGgRPvzwQ/zwww9YsGABJk6cCKCp3Nry5ctRU1Oj9TxPT0/07t0baWlp2Lp1q3pVvZbs7OwwZMgQHD16FBUVFbCzs0NUVBTnmDfeeAMLFizA3LlzsW3bNoSGcivMlJSU4Pfff8c//vEPnV/Tzz//jEcffVRd3UMul6tfy8yZMxEYGKg+dsyYMQCAL7/8Ek888QTc3NwANKXiWtMicwZN8FO9SyKEENI1+GXXzD1fWSXMJYwTLFP5ONKdhISE4JNPPsGyZcswZcoUjB49Gt7e3khMTERZWRkWLFiAH374Qeu5o0aNQlpaGsrKyjBr1iytufoxMTFISkoCANxzzz3qtAeVRx55BFeuXMG7776LYcOGYcCAAQgLa/rZT09PR2pqKhwcHPQKlp966ilMmzYNo0ePho+PD5KSkpCRkQFfX19s2LCBc+y4ceMwZswYxMfHo2/fvoiJiUFxcTGSkpLw3HPP4eOPP9b5vt2Z+SYKEUIIUUsrTeNsh7uGm6gn+gl34faT/zoIMXdLly7FH3/8gWHDhiEpKQkHDhzAwIEDkZiY2GZqRMuUCn4KhkrLlAf+Etcq77zzDuLj4zFnzhzcvXsXu3fvxrFjx6BQKPDss89iz549er2eF198EZs3b0ZFRQV27dqFyspKPPbYY0hKSuKMKgNNnw7t3r0bzzzzDBiGwf79+1FaWor169fjo48+0uu+3RnDsizb3kHjx48HwzDYtm0b/P39MX78eN1vwDA4cuRIhzppjiorK+Hs7Izi4mL1xxKkbTKZDPv378f06dMhFpv3x8fmhJ6b/iztmSlZJaJ/jEatvFbd9sWELzDKX/sfV0N1xnO7UHgBT/zdnLInYkRIWpAEidB4k8VMydK+1+rr65GRkYGQkBDY2tp22n2MNVnNmtAza6bP92lJSQnc3d1RUVFhcDljndIw4uLiwDAMamtr1du6ohJBhBDSMXlVeZxAGQB6u/Y2UW/0wx9ZlrNypJeno49bHxP1iBBC9KNTsJyRkQEA8PPz42wTQgjpfNfLrnO2XW1d4WHnYaLe6MdB4gB/B3/kVueq266XXqdgmRDSbegULPOLXZui+DUhhFgrjXxll3Djf2onqwPqqiFUNhj3umgaBW8ZLNMkP0JId9Kpy10TQgjpuLQybrDc28UIKRj5qcD1fUDWKaDgElBfDjGAGQDY6y8AXv2BoGgg4l7AdwjQgeC8t0tvHMlunrvCfz2EEGLODMoQLywsxPHjx1FYyF1NKj09HQ899BD69++P6dOnIzEx0SidJIQQa8YfWTY4X5llget/Al+PAb4eBcR/AGSeAOrLOYcxDZVA9ingxCfAN+OBr2KBy781nW8Afn+vl16HDnPLCSHELBgULL///vsYN24cKioq1G2VlZWIjY3Fr7/+iqtXr+Lvv//GhAkTcPPmTaN1lhBCrE1FQwXya/I5bQYFy3kXgK33Aj89AuQn63du4WVg52Lg24lAtv6DIPz+VjVWoaCmQO/rEEKIKRgULMfFxaFv374ID2+e5bx161YUFhbi4YcfRlpaGtatW4e6ujp88sknRussIYRYG35+r1ggRohziO4XUCqA+I+AbycAWQkd60zeOWDzFODwGkAh1/k0X3tfOIodOW2UikEI6S4MCpbz8vI0llz8888/IRKJ8OmnnyIsLAzPP/88Bg4ciPj4eKN0lBBCrBE/BaNXj14QC3Ss51tdBGyfCxx7G2CVmvud/IHoZcBDO4Bl5yB77hIO9/kA8vk/AjH/Bnq0Mpn75Dpg2wyg8o5O3WAYRmMRleul11s5mpgDSpMh5qyrvz8NCparqqoglUrV2wqFAqdPn0ZUVBTc3d3V7REREcjNzdV2CUIIITrgj8Dy6xa3qiwL2DQJuH1Mc59zIDB3E/B8KjDlHSBiOuAeBjj6oMbWB2zYZGDSWuC5ZGD+94BrqOY1sk83pWUU65Zqx5+USBUxzJNQKATQtNgKIeZK9f2p+n7tbAYFy76+vrh+vXlU4OTJk6iursbYsWM5x8nlco11zgkhhOjOoMl9RWnA5qlAGa8mPiMAxq4Elp0FBswDBO38oREIgL6zgH8mNQXP/BHtyrym++Snttslfr9p2WvzJBaLYWNjg4qKChpdJmaJZVlUVFTAxsamy1bNNKh0XHR0NHbs2IFPP/0UEyZMwKpVq8AwDGbOnMk57tq1a+qFTAghhOhHppQhvTyd0xbhGtH2SXevA1unA7Ul3HYHb2DeZiA4Rv+OiCRNaRlBscCvC4GK7OZ9tcXA1hnAE3sA30GtXoIfLOdU5aBGVgN7sb3+/SGdyt3dHXl5ecjNzYWzszPEYrHR63orlUo0Njaivr7e6pdu1pW1PzOWZSGTyVBRUYHq6uoujS8NCpZXrlyJ33//HS+88AKAphcwbtw4jBw5Un1MZmYmrl69iieffNI4PSWEECuTWZGJRmUjp63NNIzKfOCHeZqBsmc/4LE/AEevjnXIPwp4Or7pHnnnm9sbKoAfHgCWHAJcgrWe2qtHLwgZIRSsAgDAgsXNspsY5DmoY30iRufk5AQAKC4uRl5eXqfcg2VZ1NXVwc7OzvgL7FgoemZNbGxs4Ofnp/4+7QoGBcv9+vXDyZMnsX79ehQXFyMqKgovvfQS55gDBw5g4MCBuO+++4zRT0IIsTr8fGVve2842zhrP7i+simIrcjhtvsNBRb8CkhdjdMpqSvw+G5gx8NNNZpVau4C2+cBTx7Uei8boQ2CnYKRXtE8Up5WmkbBsplycnKCk5MTZDIZFAqF0a8vk8lw/PhxjB49uss+Su/u6Jk15Sib4rUbvILfkCFDsG3btlb3P/3003j66acNvTwhhFi9G6XcSXARLq2kYCgVTekRhZe57X5Dgcd3ATaO2s4ynI1jUwD+wwPcgLnkZlMd5yf2AkLNP2i9XXtzg2UqH2f2xGJxpwQnQqEQcrkctra2Vhv46YuemelYX9ILIYR0E/zyavzya2rHPwLSj3DbXEOBR342fqCsIrYDHtwOePbltmefBg6v1noKTfIjhHRHBo8sq2RnZyM/Px8NDQ2tHjN69OiO3oYQQqwOf+SVX34NAHDrCBD3PrdN6g48+htg7655vDHZ9QAW7GwqUVfZIrf19AYg4J6mShot8Pt/s/wmFEoFhO1V5SCEEBMyOFjevHkz3nrrLWRnZ7d7bGfkOxFCiCUrritGaX0pp02jEkZFHvDbEgAtSnwxQuDBVmojdwZnP+ChH4BNkwFFi8mIu5cCXv0At57qJv7Icp28DjlVOQh2Du6avhJCiAEMCpa3bNmCJUuWAAD69++P8PBwODp20kd9hBBihfgpGFKRFP6O/s0NLAvsWQbUcQNqTFwNBI1El/IdDEz7ANi3vLmtoRLY9Syw6C91PWd3O3e42bqhpL65Wsf10usULBNCzJpBwfK6desgEomwc+dOzJo1q/0TCCGE6OVqyVXOdrhLOARMi2km57cA6Ue5J0XMAEb+qwt6p0XUIiA7EUj9ubktJwk4/TkQ85y6KcItAgl5Certq6VXMTVkalf2lBBC9GJQsHzz5k2MHj2aAmVCiFUov1uLtMQCFNyuQE15A4RiAdwDHBEywB3BA90hEBi/5ik/WO7r1mIiXWkGcGAV9wRHH2D2BsBU9VcZBrh3XVOAXJbZ3H70bSBsMuDZlELS17UvJ1i+VnKtU7rDKllkXS5BRkoRinKqIW9UQOokgVeIM3qP8IarDy2GQgjRjUHBsqurK9zdO3niCCGEmFhddSMSfr2FtDMFnLRgACjOqcb1U/lw8bHH2EfC4RvmYtR784NIdbDMssCefwGyGu4JszYAdsbtg95sHID7vgS2TIf6gSkagF3PAEuOAAIhN+hH05sClmWNushCYUYl4n68juKcak57WUEt8m6U48KBLPQc4olRD4bB3tnGaPclhFgmg0rHzZ49GwkJCZDJZMbuDyGEmIXCjEr8tPYM0pI0A+WWyvJrsGvdRVw4mGW0e5fXl+NOzR1OmzrITPmJW9sYAIY8AYRNNNr9OyRoJBC9lNt25yJwbjMAaATLlY2VyKs23ipxl+Nz8ftH5zUCZb70C3exY20S8m6UGe3ehBDLZFCw/O6778Le3h6LFi1CWRn9oiGEWJbsqyXYte4Caisb2z8YTYO9p39Px5l9GUa5/9VSbgqGrdAWIc4hQF0ZcJCXfuEcCEx5xyj3NZrxqwC3MG7bkbeAqkL42PtorEJ4rdQ4qRjJh7MRv+MGlMo23t200FAjx57PknH7YpFR7k8IsUwGpWG88MIL6Nu3L3bs2IE///wTUVFR8Pf3h0CgGXszDINNmzZ1uKOEENIV8tMr8NeXlyCXKTntElshIkb6wCvYCXVVMlw7nY+SXO7o5dl9GbB3lqDfKL8O9UFjcp9rOEQCEXBkLVBbzD14+kedt/CIocR2wIz/AttmNLc1VAAHV4GZ+w36uvbF6fzT6l1XS65iUtCkDt3y5tlCJOy8pdHu4i1Fnxhf2PeQoCirCtdO5aOhVq7er5SzOLDpMmYsHYiAPkZaEpwQYlEMCpa3bt2q/v+KigocPXq01WMpWCaEdBdVpfXY/2WqRqDsH+GCiQv7wr5Hc35r5Dh/nP87E0l7uKPJx3+6ATd/B3iHcEdP9cEPlvu49gHyzgPntnAPjJgB9DbTShIho4DIh4DUn5rbLv0CDHkMfdz6cILljk7yK8mrxtHvNK8xZEoQ7pkVAoGwaSAnfJg3hkwJwtHvriHzUnP5OqWcxd9fX8IDK4ehh5e0Q30hhFgeg4LlY8eOGbsfhBBiUgqFEn9vvIz6au5cjLChnpiwqC+EQu4nZ4yAwdDpIbB1kCD+x+aV9pQKFke2XsODrw+DSGLYynT84LGfa1/g79fASZ4WS4Gp7xl0/S4z+S0g7a+mUWWVA6+h75TXOYd1ZJKfQq7EoS1XNd7gRM/piSGTgzSOt3OUYNqzkYj/4TquJuSr2xvrFfjr60t44NWhBn/dCCGWyaBgecyYMcbuByGEmNT5/Zm4m1nJaQvs56Y1UG6p/2g/VNytRfLhHHVbeWEtkvZmIGZuL737UdFQgdzqXE5bn8q7QE4i98DRLwE9AvW+fpdy8AQmvAHsf7G5reAS+hamcw4rayhDYW0hvO299b7FhQNZGukwfUf5ag2UVQQCBmMWRKChTo70C835yqV3apC4+zZiHwhr9VxCiPUxaIIfIYRYkqLsKpz7i1vNwtnDDpOfbDtQVom+vye8Qpw4balHclBWUNPKGa3jT3aTCCTomfAF96AeQZoVJ8xV1CLAg7tMt//J/8FRzM2zvlJyRe9LV5XW4/zf3K+bm589Rs8Pb/dcgYDBhCf6wtWXW2855WgO7tws17svhBDL1aFguaSkBOvXr8eCBQswZcoUfPjhh+p9V65cwZ49e1BbW9vhThJCSGdhWRYnfrkBtkUFBYGAweQl/WAjFet0DYFQgPGP94FA1JxGoFSyOPV7ehtnacdPwQgXO0NcxitLN3E1IOom9YGFImAyt1oHU12AvkJubjA/T1sXibvSoWiRfsEIGIx/vA+EYt3+tIlthJjyj/4QiloczwLxO9KgUChbP5EQYlUMDpZ//fVXhIaGYsWKFdixYwcOHz6M69evq/fn5eXh/vvvx++//26UjhJCSGe4df4u8m9VcNqGTAuCZ5BTK2do5+pjj0ETuWkRmanFyL9Vrtd1NCb3lfJqEPsPB/rdr9c1TS5sItBzPKepT1EmZ1vfSX7FudW4caaQ09Z/tJ9BX7d7Zody2krv1ODSsdxWziCEWBuDguXTp0/jkUcegUgkwieffIIzZ86AZbl1LSdMmABnZ2cKlgkhZkveqMCp37nlxpzcbRE1tfV817ZETQmCnSN3NPrc/ky9rqGxzHVdFfeAKe+abknrjpj8NsA0/8npW8dNUVFN8tMV/7lK7EQYNiPYoK4NHO8P9wAHjes31MlbOYMQYk0MXpREIBDg0KFDeP755zF06FCNY4RCIYYMGYLLly93uJOEENIZkg9no7q0gdM2ck4viMSGVUOQ2IkQNTWY05Z9tRSFGZXaT+CpaqxCdlU2p61PQ4uFUSJmAAHDDOqbyXn1AwY+rN7kvC4AJfUlKKrTbXGQ0vwapF+8y2kbNDEAdg4Sg7omEAow5uHenLaGWjmSD2e3cgYhxJoYFCyfOnUK0dHRGDJkSJvHeXt7Iz8/v81jCCHEFOprZLhwkBsM+Yb1QOhgjw5dt+8oX9g5cYO25CO6BV3XS69ztkUsi7BGVSk7Bhj3Wof6ZnJjXgEETSPvgXI57JXcvGBd85ZTjuRwquhJ7ESIHB/Qoa55hzprfO1TDuegrlq3VRwJIZbLoGC5trYWHh7t/0Hp6FLYdXV1ePPNNxEeHg5bW1v4+vpi8eLFyMvLa//kNty8eRN2dnZgGAYTJ07s0LUIId3TpbhcyOoVzQ0MEPtAmEG1flsSS4QYNIEbuKVfKEJ1WX275/KDxbBGGdRhd7/7m0ZnuzOXICDqCQBNf3z4o8u6BMv11TKkJRVw2gaM9YONnUGVUDnumRkKtPjyyxoUuHCARpcJsXYGBct+fn64cqXtMj8sy+Ly5csICQkxqGP19fUYP3483nrrLVRXV2P27NkICAjAli1bMHjwYNy+fdug6wLAU089hYaGhvYPJIRYpMZ6OVKO5nDaekV5wiPQOMtG9431hUjS/OuVVbK4FN/+m3yNfOXG/w8mGQEwdqVR+mZyo14ERLYAgD6N3GBZl0l+V07mcSpgCIQMBoz1N0rXXH3t0Xs4t9bzpbhc1FTQ3wtCrJlBwfLUqVORlpaGn376qdVjvv32W+Tk5ODee+81qGNvv/02EhMTER0djRs3buDnn39GUlISPvnkExQVFWHx4sUGXXfTpk2Ii4vDP/7xD4POJ4R0f1dP3kFDDXfyFj/XuCNs7cXoPcKH03btVH675cg0gmXVyOuA+YBH+7WDuwUnH2B40+/fvnqOLLMsi2unuKl9vaI8Ye9svDJ6w2aEQCBoHl5WyJS4FEeVMQixZgYFy6+++iqcnZ3x+OOP45VXXkFiYtPKUjU1Nbh48SLefPNN/Otf/4KHhweWL1+u9/UbGxuxYcMGAMDnn38OB4fmWcorVqxAZGQk4uPjcf78eb2uW1hYiJdeegmTJk3Cww8/3P4JhBCLI5cpcPEQ96P14AFucPd3aOUMwwwY68fZrqtsRNalklaPr26sRlYlt55yn4ZGgBECY142at9MbuRzgMi2eeT8/92tu4u7tXdbOQkozKhExd06Tlv/McYZVVZx9rBDRDR3dPny8TzIGhWtnEEIsXQGBcv+/v74888/4e7ujo8++ggxMTFgGAY7d+7E0KFD8fbbb6NHjx7Ys2cPPD099b5+QkICKioq0LNnTwwePFhj/7x58wAAe/fu1eu6//73v1FXV4cvvvii/YMJIRbpRlIhaiu4QVrUtGCj38fN1wHeodyav1cT7rR6/JWSK2BbzFoTsSzCZY3AoIcBt55G759JOXgCQ55AkExzkt/l4tYrKF0/zR1Vdva003jGxjCQVy+7oUaOtMSCVo4mhFg6gxcliY6ORlpaGtatW4epU6ciIiIC4eHhGD9+PN5//32kpaXhnnvuMejaKSkpANBqtQ1Ve2pqqs7X3L9/P37++We89tpr6NWrl0H9IoR0byzL4lI89yN1v9494B3q3Cn36xPjy9nOvlLaanWFS8WXONsRjY2wgRAY/VKn9K0lpZJFWU0jSqobUCcHGuVdsHpdzHMQCsToz0vFaC1YlssUuHWeO+ocMcK7wxMytXH1sUdgPzdOW8qRHM4qj4QQ69Gh6cOOjo54/vnn8fzzzxupO02ys5s+IvX31/7xmqo9KytL636+mpoa/POf/0Tv3r3xyiuvGNSnhoYGzqTAysqmuqkymQwymay100gLqudEz0s/9Nz019ozK7hdieKcak7bgLF+nfZsgwe6QviTQD0hjVWyuHmuAH1ifDSOTS28yNnu39AIZd/7oHDwA4zYv6KqBpy4VYzLd6pwLb8SGcW1KKttRHMcKMKrZw/Dw0GCAFcpwjztMSzIBcOCXeDbw85o/YDUC8IB89E/ex+S7GzVzakFF7R+PdIvFKGhlptnHhrl3mlfu/5jfZB9pTltprywFukphQjqzw2i6efTMPTc9EfPzDDGeF4dr7XTCaqrm/6YSaVSrfvt7e0BAFVVVVr3861atQpZWVk4duwYJBLDita/9957WLNmjUb7sWPHWu0n0e7QoUOm7kK3RM9Nf/xnVppiC6B5hT2hnRKXshJxuROrg0ncbFFX0HzPM4euI6PiosZxyWWJnLJlAxoaEa8YjMr9+zvchyoZcOYug+QSAbJrdBuJLapuRFF1Iy5kl+Pnc02VPPykLKLclRjizsLFCHPq7GWD0L/hD07bpbvJ2PfnPggY7gefxefs0PJPlo2rHCcSj3a8E61gWUDsKIWsqnmBmridqfDIrtN6PP18Goaem/7omemntra2w9cwKFg+deoUjh07hmvXrqGsrAwMw8DV1RV9+/bFuHHjDE6/6Aznzp3DZ599hscffxxjx441+DorV67EihUr1NuVlZUICAjAuHHj4Obm1saZREUmk+HQoUOYNGkSxGJx+ycQAPTcDKHtmdVVNeKHQ2fQcjWLoZN7YuBE404Q47vtU4TDm5sXG2ksFWHcqImwc2x+4363Kg9le7npCP08oxA495kO3fvKnUp8czITB68WQqboeApBXi2DvGwh9uUAU/t5YUlsMAb4dSyFpfj3eKC+OaWullGg3z0hCHFvrildV9WI7QeSOOdFT++L8Hu8OnTv9qS5FSD+h5vq7YYSEWKHj4OTe/MIO/18Goaem/7omRmmpKT1idW60itYTk1NxeLFi3HxYtOoCMtyf/mqcseGDx+OTZs2oW/fvgZ1SlX9orV3AzU1NQCa0kDaIpfL8Y9//AM9evTAxx9/bFBfVGxsbGBjozmUIhaL6ZtWT/TMDEPPTX8tn1nqmTwo5c2/s4QiAfqP8u/0Zxo6yAsiyQ3IG/8/FYMFsi+Xo//o5moZ1y9/xznHUaFEyLiXITCwbzcLq7Du0A38dblzJqUpWWD/5ULsv1yIMeEeeG16H/T2NqxGtc/Y1+C5bx7uipr/HF2/tA3hk/+r3r55rQhsizRqkUSAsKHeEIs798PRiHt8kfhHBif94+aZIoyYrTnhkn4+DUPPTX/0zPRjjGel82+as2fPYvz48aipqYG9vT2mTZuGQYMGwd3dHSzLori4GBcvXsSBAweQlJSE6OhoxMXFaa1m0Z7AwKaZyLm52mtbqtqDgoLavE5ubi6Sk5Ph7e2NBx54gLOvvLwcAHD+/Hn1iHNcXJzefSWEmD9WyeLKcW4lirChnrB16Pw/OGKJEMGR7rh1rnly2q3zhc3BMsviUtpuoEWGWD/GFoLgWL3vVdMgx6eHb2BzQiYUbUxGc7ARYWRPNwzwc0ZfXyd4O9vC1V4CRqnA34eOYGj0KORXyXC7qBrnsspwNrMU5bXa8/7ibxThxM0iPDQ8EC9P6Y0eUj1T3bz6YYDYBUfY5rS6S5mHMUupAARNKRAZKcWcU4L6uUFi2/lZhCKJEOH3eOPSsea/RddP5WP4jBAIhAbPjyeEdDM6/bZRKBRYsGABampq8OSTT+KTTz6Bk5P2cj2VlZVYsWIFNm/ejEceeQRXr17Ve7bywIEDAQAXLlzQul/VHhkZqdP1CgoKUFCgfYSlvLwc8fHxevWPENK95N4oQ1Upd7np/kZa9U0XvaI8OcHynRvlqKloaFpM49ZhXFZUAmie5DbAPwbQ8/fmsbS7eO33S8iv0L6stkQkwIxIH8wZ7I/hIa6QiDSDPZlMBhcboLe3I/oHiAF44WkACiWLpIwS7Em+g32p+ahu4E60U7LAj0nZOHS1EO/dPwAT++qXHtE/eAKOZOxSb19GA5D2F9BnBmQNCuRcK+UcHzLQXa/rd0TfGF9OsFxT0YisK6UIiey6PhBCTEunt8a7d+/GrVu38OCDD+Kbb75pNVAGACcnJ3z77bd44IEHcOPGDb1rIQNATEwMnJ2dkZ6ejuTkZI39O3fuBADMnDmzzesEBweDZVmt/44dOwYAmDBhgrqNEGKZ0k5z3yy7+TvAK9j49XlbE9TPDWKb5oliLAtkJBcBABSnNuCyDXc0tn/4bJ2vXS9TYPWeK1i05azWQNnZTowXJ4fjzGsTsG7+IMSGuWsNlNsiFDAY2dMd78+NRMKr4/HK1Ah4OGqmpRVVNWDJd+fwwi8pqG2Ua7mSdgN6cVd6vS6RoOF008JUOddKOctbMwIGQQO6LlB193eAJ+975erJ1utlE0Isj06/Mffu3QuBQIB3331X5wu/9957AIBdu3bp3SmJRIJly5YBAJYuXarOUQaAdevWITU1FWPGjEFUVJS6fcOGDYiIiMDKlSv1vh8hxHI11suRflGzPm9XEv1/KkZLWZdLgLvXkJl7EjUC7q/iAR66fWqWXVKL+z5PwNZTmRr7JEIB/jW+F068Mg7Lxofpnx7RCmc7MZ4d2xPxL43FvyeEwVas+Wfktwu5uO/zBKQXVWu5gqa+7v1aFgKBnGGQVngeyDuPjJQizrG+Yc6wte/afM2+vFJ/WZdLUFPe0MrRhBBLo1OwfP78efTu3RshISE6Xzg0NBQRERF6L0mtsmrVKtxzzz04deoUwsLC8OCDD2LEiBF44YUX4OHhgc2bN3OOLy4uRlpaGvLz81u5IiHEGqVfuKueXAc0jUyGD+/aYBloWlK7pdy0MshPbcQl3sRhL6knPKQe7V7vVHoxZn1+EtcLNEtoxvZyx4Hlo/HC5N5wsu2cwFIqEWH5pHAcfWEsxvXW7O+NwmrM+t9JHLpa2O61HCWOCHHi/n25ZCOB8tSXyOQtER4S2f6zMbawYV4QtfxkQMnixtn2XxchxDLoFCzn5+cjPDxc74uHh4fjzh3DPq6ytbXFsWPH8MYbb0AqlWLXrl3IysrCwoULceHCBYSGhhp0XUKIdbnOS8EI6u8GqZNxRln1EdDXlVNHWd6oRP65SxopGJEeA9u91o4z2Xhs0xmNSXc2IgHeuq8/vn9yOELc7Y3S7/b49rDD5oXD8NG8SDjYcKfB1DQq8PT357BNy8g3H380/bKNDQqSr6G+mvsauzJfWUViK0KvIdwg/cYZWv6aEGuhU7BcUVEBZ2f9a2k6OTmpV7ozhJ2dHdauXYtbt26hoaEB+fn52LJli9aV/VavXg2WZbF161adrj127FiwLIvDhw8b3D9CiHmrLK7DnZvlnLauTsFQsXOQaORJZ9X2xyV+vrJ7/1avwbIs/nfkJlb+fkmj2kWYpwP2/isWj40I6pQloNvCMAweGBqAvf+KRQSvhJySBf6z5wre3ncVyjYqdAxwH8DZviyRIKMuitPm5ufAqXHclfifRhTnVKM0v6aVowkhlkSnYFkul0Mg0L9MjkAggFyu+yQPQggxplvnuPmuNvYiBHfh5DC+wH7cVIzMhiG4wVtVlB80qiiVLNbsvYpPDt3Q2Dexjxf+WBqDcC/Dah0bS4i7Pf74ZwzmDPbT2PftyQy89odmkK/S34P7JiFTIkZm4yDu9U0wqqzi19sFUmfu14pGlwmxDlQokhBisdLPc4PlsKFeEGqZkNZVgvpzg+UKhT/sGprbGDDo66a5mBPLsnhj92WtE/n+ObYnNj4WpZECYSp2EiE+mT8QKyZppu79dDYHL/6aArlCqbEvvEc4JILmYNSu0RHl8mDOMfw3G11JIGAQNpRbEu/m2UKqpESIFdD5r8a2bdsgFAr1+vfdd9+1f2FCCOkEsioBygq4q4CGDevc5ZHb4xnoCDtH7oS7wPLm4Lhnj56wF3NzjVmWxZu7r+CHpGxOO8MAa2b1w8tTIyAQdG3aRXsYhsFzE8LwyQMDIeT17Y+LeXjx1xSNlAyxUIwItwj1tl9lGHe/rRCewaYdOQ8fzv3+qSyuR2GG5gRLQohl0TlYbq1ecXv/CCHEFGrzuSOt9j1s4BOq/9wLY2IEDAKCucFjQHkf9f/zUzBYlsXbf17D94lZnHaRgMGnDw7CEyODO62vxjA3yh9fLhgCCW+1u13Jd/DG7ssafyMi3Zsn+fmV9+bs8wsAhCZeNc8j0BEu3lJO262zd1s5mhBiKXT6zaNUKg3+p1AoOvs1EEIIB8uyqMvnjuD2GuoJxgxGYIMECZxtv4owCJRNv4r5k/u+PZGBTSczOG1CAYP/PTwYswdp5gWbo8n9vLHx8SjY8BZC+SEpG+//fZ3Tpn79LOBfwQ2W/ZmkTu2nLhiG0RhdzkgpBo0LEWLZKGeZEGJxSnJrIK/l/noLizJtCgYAoL4CAUWbOE1ipQ3cawIAAIM8B6nbdyfn4Z391zjHCgUMPntoMKYN4C6SYe7G9vbExseHQizkvln5Ov42vj+dqd5WvX7neg84Nrpwjg0o+wGoNv0obi9e3nJdlQyNpcJWjiaEWAIKlgkhFif9Andin5O7rcnzXQEAKT/DTlkIVxE3rcK3MgwOYgf0dO4JADh1qxgv/pqicfpH8yJxb2T3CpRVxoR74LOHBoM/uP+fPVdwLK0pCPa194WHnQf8KriTA6WCUrgIMoHz27qot63r4SmFm78Dp6220DwmVxJCOgcFy4QQi8KyLG7zguVeUV5dXntYA8sC55pGlf0kVzi7fCt7ItIjEkKBEGkFVXj6+/OQKbif7b86LQJzhmjWmO9Opg3wwQdzuYuPKFlg2Q8XcC2/EgzDYJDnII0UjABJChgGwPktgML05Uh7DfHkbNcViMC2UUOaENK9UbBMCLEoxTnVqCpt4LT1GurZytFdKPs0UNSUo+sruczZ5V0ZioFug1Be24h/fHcOVQ3cgPCJ6CA8PdoyVi19YGgAnpvArXRR06jAk1vP4m5lPSLdBsKvgrvf3ya16X8q84C0/V3V1Vb15K3mp2wQoDDD8AW4CCHmjYJlQohFuZ2smYLhzvvY3CTONucqu0uucnZJlLboJRuAf/+UjOxSbrm7qf288ebMfqYfGTei5RPDMHuQL6ftTkU9/vH9eYTI+sJGwa040cP2UvPG2W+7oottcvG2h6svt8Tf7eRiE/WGENLZKFgmhFiU9IvcYDl0sKfpA83qIuDqbvVmpl0dSu3yOYekJgkQf4Pb94EBPfDpQ4M0ahV3dwzD4IO5kRgaxJ3El5JTjoQT3FH1Ctsi3JK2WFY6Ix4ovd0V3WxTT14qRkZyMaViEGKhKFgmhFiMsoIalOXXcNpCB3m0cnQXSt4OKGXNm3b2uON0i3NI/g1uv90dbPD1o1GwFVtmpQVbsRAbHx+KIDfuKHL+Le4iH/mO6Ui259XHvmD6Ba/4qRg15Y0ozKRUDEIsEQXLhBCLwU/BsHMSwzvEyUS9+X9KJXBuC6cp2SMI+bxg2U8uAPP/A5MiAYMvFgyBt7NtV/XSJFztJfjq0SjYqpYgZwE/OffNQYFjBpLdeDWlL/4AKGQwJTdfB40FSjJTKRWDEEtkULC8Zs0a5ObmGrsvhBDSIfy80eBIN9MvRJJ+FChvLhXHAkgWyHHHKZ1zmA0YeCma+vrGjL4YHuLalb00mT4+Tnj7vqaVC52VDBxZ7tcr3+k2LimqwUnOqLkLpP3VdZ1sRchAd852BgXLhFgkg4PlkJAQzJw5E3v27IFSqTR2vwghRC/VZfW4y/sYnB/MmMQ57iIkud79UNJYiTpJFUrtCjj7/OVCzB3ij8ejg7qyhyY3L8ofDw8PgJ+C+yepTlSNCtu7qFXU41ZAFPek81u7roOtCI7kpmKU3qlBRVGdiXpDCOksBgXLb7/9NgIDA/Hnn3/i/vvvR0BAAN544w1kZmYauXuEEKKbjBTuqB4jYuEb5tzK0V2kIg+48TenKbnnSPX/FzhyR5fDRWK8fV9/009INIH/zOyHATbctJMCx9vA/z+K5KAh3BPSjwJl3MVduppXiBNsHbjLqlMqBiGWx6Bg+bXXXkN6ejoOHjyIBx54ACUlJXjnnXfQq1cvTJ06Fb/99hvkctMXjieEWI/MS9wgxc5TDoHQxNMykn8E2BafvEkckCJtznMtdOQGe6ECMewkljmhrz22YiH6iW04bQVOzVUvksUMYNvyzQ8LXPy+i3qnnUDAILA/N12GUjEIsTwd+ksyceJE/PTTT8jLy8PHH3+M3r174+DBg5g/fz78/f3x6quv4ubNm8bqKyGEaNVYL0duWhmnzdbTxG/YlUrNYK7/XCSXNNdYLnTI5OxuqJShuqy+CzpnfuprZKi6y01hyHdsESwXXwIiH+SedOF7k6/oF8wLlu/cLEd9jWknHxJCjMsowy5ubm5YsWIFrly5gpMnT+Lhhx/G3bt38dFHHyEiIgITJkzAH3/8YYxbEUKIhtzrZVDKm2vcMgIGtu4mDpYzj3Mm9gFAdeR83ChrHkAot7uLekbBOabgtnWWHytIr+Bsy6BEsX3zRPK86jwU9ZvNPam6ALh5oCu61yq/CBdA0Py9xypZZF8pMWGPCCHGZtTPKNPT07F3714cOXJE3ebv749jx45h3rx5GD58OHJycox5S0IIQRYvBcOnpxME4lYO7ioXeKPKnn1xtF4GFs1pGSwEKLHh5icXZnCDRmuRn17O2S61YaBgRZy2faWlgP8w7okmnugnthHC1o37hodSMQixLB0OlmUyGX766SdMmDAB4eHh+OCDDyCXy7FixQpcv34dWVlZSEhIwLRp03Du3DksW7bMGP0mhBAATSN5mZe5I3n8PNIuV1sKXNvLaVIOfgyfJhzkttX5YfBgb06btY4sF2ZwX/fAwV5Q1Ady2r45ewTywY9zT7x1GCg37SCMrRf3U4zsyyVQyKlKFCGWwuBg+dq1a1ixYgV8fX2xYMECHDt2DNHR0fjuu++Qm5uLjz/+GGFhYQCA6Oho7Nu3D8OHD0d8fLzROk8IIUU5VaitaOS0mTxYvvQroGho3hZK8EvDSOQ3XOUcFtZjAO4Z6sNpK8qusrpAi1WyuJvNXblv8CAvDPEczGkrV6ZhU9lgQOLY8mSTT/Sz8+AGy431Cty5WW6azhBCjM6gYDk2Nhb9+/fHp59+CplMhmeffRapqak4efIkHn30UdjY2Gg9r1+/fqiqqtK6jxBCDJF5iTuq7Oxphx6e0laO7gIsq7Ecc03IFKw5kguhXTan/Zl7JsEzmLvCoEKuRHFudad305yUFdZCVs9NZfAKccI/oydx2gS2d/BJ/C1UhN3PvUDyj00TKk1EaMvCI8iR00apGIRYDoOC5VOnTmHw4MHYuHEj7ty5gw0bNqB///7tnrdkyRJs3rzZkFsSQohW/Hzl4AEmXojkzkWg8DKn6dPSaDSIssEImkcgGTAY6TcUtvZijWWTC25bV94yfzEZ+x42sHe2QZTXIAiZ5rxlhmGhkGTgrTtDuReoyAEyTPupZdAA7qcZmSnFYFm2laMJId2JQcHy2bNnce7cOSxZsgRSqe4jONHR0XjiiScMuSUhhGioqWjA3Szup1XBkSYOlnkpATV2vvj2TiBE0gxOe5hLGJxtmuoGe4VyF0/h5+9aOn6w7PX/o+22IltEegzg7BNKM7Az3w0lDuHciyT/0Kl9bE/wADfOdlVpPcrya03UG0KIMRkULP/555/Ys2dPu8ft3bsXa9euNeQWhBDSrizexD6JrRA+vUy4al9jLXBpJ6dpW30sWAgg5AXLUV7Nyzd7h3BTMaytIkYh7w2PZ3BzSkPL5wTg/58jg68ro7kXubYXqCvvpB62z8VHCgcXbgpi9lUqIUeIJTAoWF69ejV27drV7nF79uzBmjVrDLkFIYS0i7+0cGA/NwhNuWrf1d1AQ/MoqRIMttfFAlBAaJfJObRlEOjFC5Yri+tRX20dC1s05WjzguWg5uehESzb5QKMDL82RkOOFqXl5PXA5d86ta9tYRgGgf24o8v8N3OEkO6pU/+qKBQKCAQmXm6WEGKR5DIFcq5zV+3jfxTe5XgT+44rInEH7hDY5oMRNnD2tQwCXXzsIRRxf1cW5VjHZOiSvGrOgjIA4Nlistwgj0EQMM3PhmEUENplowxOOKgYwr3Yxe2d2tf2BPGC5Tu3ytFYb+LFcQghHdapkeyVK1fg4uLSmbcghFip/PQKyBtaVFBggMD+JgyWS9KB7FOcpp8VYwFAIwUj2CkY7nbNudVCoQBu/g6cY4qyrSNY5ucr9/CSwkbavKKMg8QBEa4RnGNUz/NXxRjuxe5cAO5e65yO6sA/wgUCQfMiM0o5i7wb5SbrDyHEOETtH9Jk8eLFnO2TJ09qtKnI5XKkpaXh3LlzuO+++zrUQUII0Sb3Wiln2zPICXYOEhP1Bk3ly1ooYR1xWNk0etxWvrKKZ6AjJ3DkT1y0VIW8YLllvrJKlFcUrpY016gWSjMBAMeVkShke8CLKW8++OJ2YMo7ndHVdknsRPDu6cypsZx9uQQhpp50SgjpEJ2D5a1bt6r/n2EY3Lp1C7du3WrznMjISHz00UcGd44QQlqTc42bghHQx4SfYimVQMpPnKbdihjIIAKghI1DFlpWAdYWLHsEcoPEomzrqIjBf1PQMl9ZJcorCt9fba4yIpZmow5yKCDC74pReFbUYrXE1J+BiasBoWnWOw/q78YJlrOulIBlWTAM0/pJhBCzpnOwfOzYMQAAy7IYP348pk6dildeeUXrsRKJBL6+vggKCjJOLwkhpIW66kaNnN6APiZctS/zOFCZy2naqRgNABDZFkHJ1HD26RIsVxbXo75GBlt70wR9XaGxXo6yfO6z8QrWDJaHeHJzk1mmETb2+WioCcCvijHcYLmmCLh5EIi4t1P63J7Afm44/Ue6eruqpB7lhbVw8bY3SX8IIR2nc7A8ZkxzbtgTTzyBUaNGcdoIIaSr5F4vA1rMCRNJBPAONWHJOF4KxjVlIK6ywQCA6H4VSG5RbtfX3he+Dr4al3D1tYdAxHAmuxXnVME/wsRLd3eiktxqtFy3gxEwcOflbgOAi60LevXohVvlzZ9mRvcrR9yZANxmfXFOGY6hghvNJ1zcbrJg2c3PHvY9bFBT3jyhM+tyCQXLhHRjBk3w27JlS6v5yoQQ0tn4+cq+YS4a1SS6TH0lcJVbd141quxqL4GbG3fEWduoMgAIRQK4+3EDxbsWPsmPv6y3i7cUIolQ67H852brmIUAVzsAWib63TgAVN81Xkf10FRCjvsGJ/sKlZAjpDujum6EkG6FZVnzyle+uhuQ16k3ZawQuxQxAIAXJ4cjtfgi5/DWgmUAcOelYhRberDMS6XxCNCc3Kcy1Iu7xHVKUTJWTmtaxe9PxT2oY1tM7mQVTbnLJsIvIZd3sxyylpVbCCHdik5pGKGhoWAYBocPH0ZISAhCQ0N1vgHDMEhPT2//QEII0UFFUR2qSus5bSbNV+alYMQpB6IEzujv54QRvVm8d72Is7+tYNkz0BFXW2xb+shyUQ53ZNk9QDMFQ2WIFzdvuVpWjSCfCkSHuuH0bWC/8h7MFZ5oPuDidiB6GWCCiXX+fVzBCBiwyqYcE6WcRV5amemXYieEGESnYDkzMxMAIJPJONuEENLV+CkYUicJXH1NlA9aelujtrIqBWP1zH44V3iYs8/N1g1BTq1PfOZP8qu4W4eGOjls7HSeXtJtKBRKlN7hTu7Tlq+s4in1RKBjILKrstVtZwvO4s2Zc3DvZyfwq2IMN1guug7kXQD8W39z0lls7ETwDnVC/q3mZcuzrpRQsExIN6VTGoZSqYRSqUR4eDhnW9d/hBBiLPwUDP8+LqYry8UrF1fKOuCocgjuH+yHocGuOFNwhrN/uM/wNvvq5usAgZC731JTMcoLaqGQc/8+uLeRhgE0Pb+WkgqS0MfHCQ8PD0SSMgJZSk/uCRe/h6kE8RbIyeG9ySOEdB+Us0wI6TaUCiVy0/j5yiZKwVAqwfJSMHYrYiCW2ODVaRFQskqcyecGyyN8RrR5SaFYoDFKbqnLXvPzlR1cbdotk3ePzz2c7fOF5yFTyLBiUjgcbSXqUX0V9vJvgKwOphDYlxssV9ytQ2WxafpCCOkYCpYJId3G3awqNNbJOW0BpiqtlnUSTEUOp+k3xSgsG98LXk62uFl2E2UN3MB+uDd3ZFQb/uhqSV51K0d2b0W8Shju/m2PKgOaz69OXofLJZfh5mCDf08Mx2+K0VCyzSPzTEMlcP1P43RYT+7+DrB14Ab/NLpMSPdEwTIhpNvIvc4NNlx8mmramoLyIndU+boyAOXOfbE4JgQAkJSfxNnv5+AHf0f/dq/LLx9XklfTypHdm2YljNbzlVVcbV0R7hLOaUvMTwQAPB4dBDuPICQo+3H2Ky5s72BPDcMIGAREcKu0ULBMSPekU7AsFAoN/icSWd7EFEKIaZhNybiGaiiu7OI07VSMxktTI2ArbqoTzM9X5qcQtMaNN8mt9E4NlArLmvvBsiyKNSphtD+yDGiOLqtSXcRCAd6Y0VcjFUOQEQdU5Bne2Q4I6Mv91CP3ehmUSraVowkh5kqnSDYgIIDWtSeEmFRjvRwFtys4babKV65L+R12iub8UzkrwA2vqXgtsmllPrlSjnOF5zjn3OOtW7DMH1lWyJUoL6wzXcWPTlBVWo+GWm46TVuVMFoa4TMC2681jxanFKWgTl4HO5Edxvb2xPbQaajK2QJHpunrw4BFzbkfYD/hZeO9AB3xvz8bauUoyqqCV4jmkt6EEPOlV+k4QggxlTs3y6FUNI/KCQQMfMN6mKQvRSe3ILDFdrxyIP45IwYCQdOgwpWSK6iRcdMn+JUcWmPrINZYLrkkr9qigmX+qLKNVARHN1udzo3yioKQEULBNi3yIVPKcPHuRYz0HQkAWDF9EP78cgQeEh5Tn1N35nvYj3+py2suO7jYwsVbirKC5vXOc66VUrBMSDdDOcuEkG4hl5eC4RXqBIlt16d5FWZdR2DlBU7bDZ+ZGBHaXP2An6/cq0cvuNvpXmPXjTe6XGxhk/z4y1y7+zvo/Omlg8QB/dy5ecktn3dfXyfc7TmXe/2GbORfPm5gbzuGP7pMecuEdD8ULBNCuoUc3uQ+U6VgJO/7irNdztpj0v0LOW38knG6VMFoyd2fO4pckmtZwTK/wgc/T7s9/JQW/vOeO3suMlhvTlv64Y163cNY+HnLBbcr0Fgvb+VoQog50mlYJju7acUkPz8/CIVC9bauAgMD2z+IEEJaUVPeoLHamymC5St5Zehzdx/QYhD0usdkjPBpHlWul9fj4t2LnPN0ndynwg8eLa18nEaw7KdnsOxzD7659I16+2rpVVQ2VsJJ0pTe4OciRVzAfQjJbX5jE1l+BMm38zEo1KcDPdefb1gPCISMOoVIqWBx50Y5reZHSDeiU7AcHBwMgUCAq1evIjw8HMHBwTp/ZMYwDORyehdNCDEcf1RZYieCZ5Bu1ROMaffu3/AaU8Rpi5jyDGc7pSgFjcpG9baAEWCo91C97sMPHqvLGlBfI2t30Y7uQN6oQEURd3EOfYPlgR4DIRFI1M9ZySpxruAcxgeOVx8zZMazUH71NQRoClKdmDrE79mCgf9e2aUT1iW2IniHOuPOzXJ1W861UgqWCelGdAqWR48eDYZhIJVKOduEENIV+PnKfuE9IBB2bRZZ0u0S9Lqzh/Nbs0QaCrde3FFjfr5yX9e+6hFPXfXwkkIgYqCUN09oLMmthl9vE5XKM6KyglqgZfU0BnD10W/yoq3IFoM9ByOpoPlZnyk4wwmWnbyDketyD/zLEtVtg0v24+j1JzGhj5fB/TdEQB9XjWCZENJ96BQsx8XFtbndWerq6vDee+/hp59+QnZ2NlxdXTF16lS89dZb8PPz0+kacrkcb7/9Ns6ePYtr166hqKgIMpkMAQEBmDRpEl555RUEBQV18ishhBiKZVmT5yuzLIv//ZWMr4WJnHbHEY9rVFhoGcABulfBaEkoFMDVx55TNaI4zzKCZX4KhpO7HcQ2Qr2vM9xnOOdZ89+kAIDn6MXA7uavWazgMh7bfwJje8+FUNB1Az4BfV2RtOe2erusoBZVpfVwdNWtAgghxLTMdoJffX09xo8fj7feegvV1dWYPXs2AgICsGXLFgwePBi3b99u/yL/f501a9bg+PHj8PHxwdSpUzFlyhQ0Njbiyy+/RGRkJM6dO9f+hQghJlF6pwa1FY2ctq4Olo+l3YVn3kHYM83l3JQQQDL4Yc5xFQ0VuFx8mdOmb76yiuZKfpaRt1zCyz13M7AkHv+53iq/hcKaQk6bpN9MyETNz1HAsBhYegB7U+4YdE9DeQQ6wkbKHZui0WVCug+jBctlZWUoKysDyxpndaK3334biYmJiI6Oxo0bN/Dzzz8jKSkJn3zyCYqKirB48WKdrmNra4uTJ0+irKwMCQkJ+PXXX7F7927cvn0br776KiorK/HMM8+0fyFCiEnwgwpHV1s4e9p12f2VShYfHbiBucIT3B09xwOO3IoLSflJULLNq+3ZCG0wxHOIQffVmORnIRUxSnlBv6H1o/u59YOjmJu3furOKe5BEilEkdwycvOEx/HpoTTIunBVRIGAgT9v6etcCpYJ6TY6FCzv2bMHkydPhoODA9zd3eHu7g5HR0dMnjwZu3fvNvi6jY2N2LBhAwDg888/h4ND8x+NFStWIDIyEvHx8Th//ny71xKJRIiJidFYdlsoFOKtt96Cra0tzp8/j4qKilauQAgxpdzr3Hxl/z4uXTpnYm/qHVTmpyNGeIXTLhi8QONYfrAW5RUFW5FhH7XzJ72V5teAtYClkjVGlvWc3KciEogwwncEp00jWAbADOJ+nUIFBXAtS8Fv53MNuq+hNOotXy+ziK8nIdbAoGCZZVksXrwY999/Pw4fPoza2lo4OzvD2dkZtbW1OHz4MObMmYOFCxcaNNKckJCAiooK9OzZE4MHD9bYP2/ePADA3r17Dem+GsMwEAqFYBgGEomkQ9cihBifQq5EXouJUUDXpmDIFEqsO3QDc3ijyqytM9B7OreNZZFwJ4HTFuMbY/C9+SOu8kYlKkvqDb6eOaivkXFWJgQMH1kGNJ/vqTunoFAquAcFDAfr1ovTNE8Yj8+O3ESDnHdsJ+J/39ZXyzQWZyGEmCeDguX169dj69at8PHxwZdffony8nKUlpaitLQUFRUV+Oqrr+Dj44Pvv/8e69ev1/v6KSkpAIAhQ7R/fKlqT01NNaT7AJr+sH3wwQeoqanBuHHjYGfXdR/rEkJ0U3C7AvKGFgENA42PszvTr+dykVVSo5GCwfSfC4i5I8YZFRkoqCngtMX4GR4sS50kGnmuZfk1rRzdPfBrZQuEDHp4SQ2+Hv/5VjZW4koJ9xMAMAyYQY9wmmYIE1FaUYEfk/RbM6AjnNztNNKHsq+WdNn9CSGGM2it2I0bN0IqleLEiRMICQnh7HN0dMRTTz2FSZMmYcCAAdi4cSOef/55va6vWvTE399f635Ve1ZWll7XfeWVV1BYWIjKykqkpqYiPT0dffr0wbffftvuuQ0NDWhoaB4RqaysBADIZDLIZDK9+mGtVM+Jnpd+rPm5ZV0p5my7+ztAZMO0+yyM8czqZQqsP3wDQ5k0BAu4E8fk/R8Ey7t2fE48Z9tL6oUAaUCH+uDiLUXB7Ur1dlFuJfz6OBt8vfZ09vdaUQ433a2Hlx2USgWU/NFgHblJ3BDiFIKMygx124mcE+jTow/3wL7zIDryFpgWNZenCM5iw1FHzBnkDanE8GXT9Xlmfr17oOJuc43p7KsliJygW2UnS2PNv9cMRc/MMMZ4Xgb9hsjIyMDkyZM1AuWWQkJCMGHCBBw8eFDv61dXN300parrzGdv3/SxXVVVlV7X/e2335Cenq7ejoyMxPbt29t8HSrvvfce1qxZo9F+7NixVvtJtDt06JCpu9AtWeNzu5soBdBcVqxeVIL9+/frfH5HntmxOwwKq4RYLjrOaa+y8cHR5AIghduPvdXctDB/uT/++usvg+8PAFUyGwDNKWKXz93EHdmlDl1TF531vVZ2hft66tgKvb6e2vg0+CADzcHy/qv74Z+jOdAS7dgPnlXNlUrmCY9jd00sXt92CJP8Op47rMszq6sRAWgeXb5zsxz79u6HQP/KeRbDGn+vdRQ9M/3U1tZ2+BoGBcseHh465fiKxWK4u5vPKkW3bt0CABQXF+P8+fN4/fXXERUVhW+++QZPPPFEm+euXLkSK1asUG9XVlYiICAA48aNg5ubWxtnEhWZTIZDhw5h0qRJEIu7/0pkXcVan1tDrQzf/c2tazx6epROaRgdfWa1jXKsWXcCtqjGvUJu/V7pyCWYPvJeTlu9vB5v/fYWp23+8PmYFDhJ73u3dFmah1M5zWUypUIXTJ+uOY/DWDr7e23PjRTUoHmkvP/QMAyeHNChazrfccapuOaJfXnKPIyaOAqOEm6lDOZKHbDrafV2jOAKfFGME0XeWPPoKDjZGfZ69XlmjXVybLt4GuqpPCyDQWHRXZpaZC6s9fdaR9AzM0xJScfTnQwKlu+//35s374dZWVlcHHR/kNeWlqKo0ePYsECzRnj7VFVv2jt3UBNTVPem6OjYcvduru7Y8qUKRgxYgQGDBiAZ599FuPHj0dAQOu/tG1sbGBjY6PRLhaL6ZtWT/TMDGNtzy37dhlazg8WigUIiHCDSKz7MJyhz2zHqWyU1sgwW3AWjkyLpZkZAYSDH4GQd82zd8+iQdGcpiVgBIjxj+nw18s9gLvyX3lBLURCEZhOXlCjM77XWJZFWT73d7pHgFOH7zPCbwRshDbq569gFThffB6TgnhvVPrNBv56CWhoCtYFDIv7hSfxed192JaYgxWTe3eoH7o8M7FYDM9gJxRmNL9hyL9RiZABnh26d3dmbb/XjIGemX6M8awMmuD39ttvIzQ0FOPHj8fRo0c19h87dgyTJk1Cz5498e677+p9/cDAQABAbq720j6q9o6uvOfs7IyZM2eirq6OPtYgxMzk8Ja49u3lrFegbKjqBjm+jm9K15on5KZgIHQs4OSrcQ6/CkZ/9/5wtul4bjF/GWi5rPtWxKitaERDrZzTZuiCJC3ZimwR5RXFaUvIS9A8UGwH9J/DaZonjAfAYtPJDJRUN2ie0wk0S8hRvWVCzJ1OI8vjx4/XaJNIJDh//jwmTZoEV1dXdeCanZ2tHvIeMWIE7rvvPhw5ckSvTg0cOBAAcOHCBa37Ve2RkZF6XVcbVZpIUVFRh69FCDEe/mIk/hFdUzJu26lMlNXK4ItixAh4lRUGaf+kjF/ftyMl41qSOklgYy9CQ01zkFmaXwNnj+5Xvae0gFsJQyQRGG2555G+Izlfg4Q7CWBZVrMe96BHgfNb1ZshgkJEMTdwvrE3vopPx+v39jVKf9oS0McF5/ZnqreLc6pRV90IOwcqX0qIudIpWI6Li2t1H8uyKCkp0ZoTcvr0aYMWD4iJiYGzszPS09ORnJyMQYMGcfbv3LkTADBz5ky9r80XH980g71nz54dvhYh3QGrVKL+8mXI7nTtkr/6qKoBKou4wYNr2XVU/n1Np/MVcgUcUlNRLRRBKNJ9NLquUYGU3ZcR26jAVMFZVAtbpF6JpUCWBMj7m3NOeX053BNvoOXsjFhbASoLuMcZytlWhLs1zR8C5sddgNudzll9ztDnpov82wK0/JPjZCdH1cEDRrl2dDWLEddaPpM7uMV8By97L+6BLAuUhQBV+eqmJ5V/wk5Rj1u/piKnIR3OUv0+stX3mdkpAZFQDLmi+W/jzZ/iEOzfdSsK6kvk6QW7gZFghFY8E5FYNZ2C5YyMjPYPMiKJRIJly5bhnXfewdKlS3Hw4EF1BYx169YhNTUVY8aMQVRU80dvGzZswIYNG3D//ffjvffeU7f/+eefcHFxwciRIzn3qK2txTvvvIP4+Hh4e3tj6tSpXfPiCDGxgv/8B+W/7jR1N9qU5zMS6N08iiturELt6pWog+5VC3wBFPzwo973Xt6yH+CNZse/ovWcFfyGXeuRp/edtROFPwT4jlJv5x89D5cvvjPS1TUZ+tzacyfsQcBvtHpbnHYOebu3Ge36/K+BfNf7bXwNmr+uIbiD1/E9AKA6ETBkmRB9n5nzgGdR4tZfvX1rTxLEN4z/zI3JcdpU+P/3v6buBiEmoVOw3NHcYEOsWrUKhw8fxqlTpxAWFoZRo0YhKysLSUlJ8PDwwObNmznHFxcXIy0tDfn5+Zz2s2fPYs2aNfDz88OgQYPg7OyMgoICJCcno7S0FM7Ozvjll184S2oTYqkUlZUo3/mbqbvRrjIXbp1cl7I0dY1ca2Nfw/2dVmPvY6KedEyN1JuzbV9b0MqRls+l7DonWC5zjQALoOsWcddf1V9/Q/bqqxB7ebV/MCEWxqAJfl3B1tYWx44dwxtvvAGpVIpdu3YhKysLCxcuxIULFxAaGqrTdebMmYMVK1bA19cXZ8+exS+//IKzZ88iKCgIK1euxLVr1zBq1Kj2L0SIBVBUVgIGLEHflVgwKHUJ57S5ll03UW9Mz563KmCN1BusWYdV2tXaU7Cswv9+rrd1Q52dh4l6oztFWVn7BxFigQxftqiF8vJyVFVVgW3lj7CquoW+7OzssHbtWqxdu7bdY1evXo3Vq1drtEdGRuKTTz4x6P6EWBylZl6kJCQEEJjP++YKsSfkYu4nPV5OdZDY6z6vgGVZVFdXw8HBQad5E0oli8ySGihZFgxYhDAFEKDFs7L3AKSa9dTr5HXIr+bmfgc5BUEoMMqvVgCAi4Cbu60USqAIHwypoqKVMwyn73PTlYyxQaOEWwbP2U0MibPx5oooWSUyKzKBFp9AeNl7w16speJGfSVQxf26ZbLekEMIBgyC3aUQ6vgzYcgzcwEgUdSgUdjct8qwWPSoSdXp/K7S2GIRLwBaf38QYg0M/o1eUFCAVatWYc+ePW0WfGYYBnK5vNX9hJAupOUNbcjuXRDosMhQVzn/dyawq3khjh5eUvT/aode15DJZNi/fz+mT5+uU43NTw6m4X9HmxYtmi04ifWSL5p3MgJg+XGtJeM+OPMBtl/brt7u79YfO2bo19f2sCyLUy+e4FTEcHh7PUIijb/gk77PTVf56RXAR+fV2wIhg/6/b4NQaNw3aR8cXIKk/OZFZO7vNQprY7QMtsjqgI97Aw3Nbzj+kI3FBsX9AIAlsSFYNUO3yhiGPrOMzVdw40zzMur1Ex5Gz6f1L7Xama4NiARaLBXc2oAYIZbOoN9U+fn5GDp0KDZv3gwbGxt4eHiAZVmMGDECnp6e6h+o6OhoSnEgxIywWkaGjDmCaAz8+soBnby6WVlNIzafbJ7ErFlbeZzWQJllWcTnxnPaRgeM1jiuoxiG0ai3XJZf08rR5onfX2dPqdEDZQAY4z+Gs3089ziUrJbRUK01l49DNSq9PSkLRVWdW3eZX285L60MSqV5BaMavxvMrH+EdBWDFyW5c+cO1q5di5ycHEybNg0MwyAhIQH5+fmIi4tDREQEGIbBX3/9Zew+E0IMpW1kyIxSMGSNCuSnl3Pa/Pt0bn3lb07cRk2jAgBaqa38iNbzMiozkFOVw2kb6z+2M7oIF29usFxeqH11U3PFr7Hs6i3tlPvwn39JfQmullzVfvDgRzmbwYJCDGXSAAD1MiU2Hk/XdpbR8Je4bqiVoyi7qlPvqTf+7wZtbzwIsQIG/ZX8+++/ERISglWrVmndP3r0aBw8eBAXL17EW2+91aEOEkKMSFuwbEYjy/m3yqGUN/eRETDw6915I8vltY3YdipTvX2/8CQETItnZOMMRNyr9dzjOdwRaE87T0S4RnRGN+HCCy7LCrpXsMxf5trFp+Mr92kT4BSAEOcQTht/9F/NLwpw504kbfmpwveJnTu67OBiq/F15S/EY3L83w2UhkGslEHBcl5eHmehEOH/FypvaGj+xeLn54dx48bhl19+6VgPCSHGo22CjhkFy/wUDK9gR9jYGW+yHN/mhEz1qDLAaqZg9J/T9JG9FtpSMDorpaWHFz9YrulW+aNlvJFlF5/OGVkGNFMx4nNaCZYZRuNTgxnCRNihaTnxLhld5n1qkmtmS1/zv5+1pXERYg0MCpadnLizmnv06AGgKYhuydbWVqONEGI6LD/nkGHMKmdZY4nrTkzBqKyXYUtCc67yEOYmQgS8cmatLG9d0VCBi3cvctr4QZox8UcgG2rlqK+WtXK0eZE1KFBVWs9p46eVGNNof27e+LXSayisKdR+cOSDTRM4/58DU4+pgrPq7e8Ts1Bc3Xmjy/x8/Pz0CsjUb97MgEYaRvd5g0aIMRkULAcGBiI7O1u93b9/U3H1/fv3q9tqa2uRkJAAH5/uWUCfEMukGSybi9rKRpTkctdPC4jovGB5W0ImquqbK0w8IOKNQLr9X3vnHR5Fuf3x72xP771XekLvCNKLiCBWFBAVG6I/vPeq1y56sXsVUWw0C3a4okgv0nsSakhvpPe2fX5/hJR3Zjd1k2zY83mePMmced93353szpw5833PiQICh5rse/TaURj4RqdGKVVihN+ITpknADh52EEiY/9XPUWKUZZfw37sOMDNp/MiywO9B8JJ4cTY/s7523RjZ38gYiJjukPW2LYuupwq7GUxAqLdwEka/69GPY/cpLJOe702QzIMggDQTmd54sSJSEhIQGFhIQDg1ltvhYODA/75z3/iueeew+rVq3HzzTcjPz8fM2bMsOiECYLoAMLHqFa0uC87kY0qy5VS+IQ7m2ndMao0enzdJKqsgga3yU+yjQbea/Zm4kDWAWZ7uO9w2MlMyzUsgUTCwdWbdTB7yiK/EkEmDGcPFWQKaae9nlwix1j/sYxNqC9nEEgxRksuIgCFDdubjqV3WnRZYSeDTyj7Gc+6YkWFP4TnB5JhEDZKu66UCxYswLx583DpUt0qY3d3d3z++efgeR7vvPMOnn76aZw6dQp9+/bFm2++adEJEwTRfoSaQ+uSYLBOQkC0a6ekFwOAb49noKymUcYwVXIadsYmTh0nAWLvNtlXb9TjcM5hxtaZEox6xIv8ekb6OLFeufMkGPUIU/gdzz0OtV5tunGvWYDKhTHNlzX+f9U6I77sxOhyYB9WimFNi/zEmmWKLBO2SbuuRLGxsdi8eTPGj2+8QNxzzz24evUqPv30U7zxxhv4+eefcfbsWbi4uDQzEkEQXYrwWmclzjLP88juIr1yrdaArw6xzs8jzsfZRmZyKwNAXEEcKrQVjE2ok+0MRIv8ekhkWRgB70wJRj1j/cdC0kSLrDaocTLvpOnGchXQfz5jWmh3BE2/LJuOdZ52WZhvuTi7CjUV2k55rTZDmmWCANBOZ9kcwcHBePTRR/H888/j9ttvt2gFKIIgLIAwT6qVyDDK8mtQVco6I52lV/7+ZCaKqhqdET8Uo6/6LNvITG5lANiXtY/ZjnaLhp9j56/NEC6K61Ga5SYInf7OwFXlioFeAxnbvsx9phsDooWcHrprGCG92rBdqzN0WnTZJ8wZciUrS8lJtBIphkizTDIMwjax2JWytLQUpaWlPSqdEUHYHFaqWRZKMBxcFJ2SXkytM+Dzg2w6sBU+Z8GhdbmVeZ7H3oy9jG1i8ESTbS2NUIZRWVQLvc6KMieYgDfyKCuoZWzC99FZCP8v+7P2w2A0c7wCBgOevRjTM16nmO1vjmegpNryEV+pVAL/aFfGZjVSDAmljiMIoIPO8u+//46pU6fC0dERnp6e8PT0hJOTE6ZOnYr//e9/lpojQRCWQnAzax0iDLFzENTHvVP01D+dzkIBU2iCx2z+ANuo/1yzuZWvlFzBteprjG1y8GTLTtIMwogszwPlAkfU2qgsVcOgYx0sF+/ucZZL1CWidH8NmMi5PLT6ABy5Rp1zjdaArw93TnRZ+BQl63KJVQSeOE4ow+ieeRBEd9MuZ5nneSxZsgRz587Fnj17UFNTAxcXF7i4uKCmpgZ79uzBvHnzsHjxYqv4whMEUYdogY4VRJYNBiNyrrKR5c7QK2v1Rqw9wEaVFwflQ1WRxjY0k1sZAPZk7mG2AxwDEO0Wbaa1ZVGoZHBwVTI2a8+IUZ7POvMKlRT2zoouee0gpyD0cmOjxXsz95ppDVHOZYmuBv8OTWKabDyagbIay0eXhYv8qko11nEjRDIMggDQTmf5o48+woYNG+Dn54fPPvsMZWVlKCkpQUlJCcrLy7F27Vr4+fnhm2++wUcffWTpORME0V6sULNckFYBnZp9PB4oKNZgCX49m41r5WxGhMdcBYu+PCKBwGFmxxDqXicHT+7SjCI9rey1cBGiq499lx6vScGTmO29mXvNB3Cc/YAItv1cyUHGX6zS6LHuSLqFZwm4+znA3oW9ibAKKQaljiMIAO10lr/44gvY29vj0KFDeOSRR5iKfk5OTli6dCkOHToEOzs7fPHFFxabLEEQHUToKFhBNgyhU+AR4AAHF6WZ1u1DZzDi0wPJjG1siD18MrezDZvJrZxeno7kMnaMSSGTTLbtLMQZMaw7fVx3LO5rivD/k1udi0sll8x3EEgx7HKOYlEf9vOw/kgaymstWz2R4ziRFCPbCvIti1LH0ZNiwkZpl7OclpaGSZMmISwszGybsLAwTJo0CWlpaWbbEATRtVhjnmWhU9AZEoz/xV1DVgn7WPul8CRA0zQFHAfEmM6tDIgf4XvaeSLWK9aS02wRUWQ517ojy2UF3essR7lGIcgpiLEJF2gy9Jopyrm8zOM0c/9UqdZj49F0C86yDqEUIzuxFEZDN0dyhecHyrNM2Cjtcpa9vLygULSsO5PL5fD09GzPSxAE0RlYmWZZW6tHXhqbs1iYd7ajGIw81uxnI8IDg1wRfW0L2zByEuASYHYcobN8c9DNTC7frkDobJYX1Fh1tK8sr3udZY7jRAswm9Uty1XAgDsYk2fSL5jVz4exfX04DZVqy0aXhZFlba0eBZmVFn2NNiPKs0wyDMI2adeZfu7cudi3bx9KS80/JiopKcG+fftw2223tXduBEFYHOuSYeRcLWUWHUpkHPwjXS36Gn8kXENaEStXeHaYFFzGUbbhoPvNjpFXnYfzRecZW1dlwWiKsOS1Vm1AbaVlnTZLodcaUFnKasS72lkGxFkxUstTkVreTFYLYY7tsgz8ow8rFSqv1WHTsQxLTREA4OCqFFU3zL7czVIMiXCBn/XemBFEZ9IuZ/mNN95AeHg4Jk6ciH37xIne9+/fjylTpiAiIgL/+c9/OjxJgiAshCjPcvc6y8L8yn7hLqICDR3BaOTxyT42qtw/wBkjy/9iG9p71D2CN4NwYZ+TwgnDfM0vBOwsHN1VkMjY/1l5gXVKMcoLa0X3ZkJnvyuI8YqBl50XY2u2QIn/YMCrN2MKzdqKaYLo8leHUlGt0VtsngAQJFjY2t2L/DhQnmWCAFrpLE+cOJH5mTNnDhQKBeLj4zFlyhR4eXlh6NChGDp0KLy9vTF58mTExcVBoVBQZJkgrAhh6jhRHtUuJvtK55a43nkpH0kFVYxt+YQwcPGb2Yax9wAy89IyoXM1PnA85NKur1AqkXBw8WRzQAuLflgLwkwdjm5Ki94ItRYJJxFFl5vVLZvIuYyLW/HUOLb8eWmNDt8et2x0WShByksth07TjYVnRNkwKLJM2Cay1jQ6cOCA2X08z6O4uBjFxcWifceOHbOKBUQEQVzHirJhVJWqRQ6VJfXKPA98dpBdYNzb1wmTZfFAVT7buBkJRqm6FKfzTzO27pBg1OPibc8cN2uNLHf34r6mTAqehB8Tf2zYvlB8AdeqrsHf0d90h5i7gD2vNmp0ddXoW3YAk/tEYc/lgoZmX/ydinuGmte5txX/aFdwEq7hptZo4HEtuQwh/Tws9hptQrSmgZxlwjZplbNMGS0I4gbBivIsCx8xK+1l8Ap2stj4F0o5XMljF0gtmxgJSdwzbMPAYYA3+9i9KbszdsPAN0b3VFIVRgeMttg824qrd8+ILIvSxnWDBKOeob5D4axwRoW2cTHprvRdWNx/sekOTr5A5GQgaVejLe57PDnpW8ZZLq7W4ofT2fAxMUR7UKhk8A1zRm5KeYMt63JJ9znLwntpkmEQNkqrnOWQkJDOngdBEF2ANaWOE+qVA3u5QWIhDTXP89iZzd4IRHg5YEYIB2zZyTZuJqoMADvT2fbjg8bDTma6HHZXICwXXV5opZHlbs6x3BS5RI7JIZPxW9JvDbYd6TvMO8tAnRSjqbOcfgixjmWY0MsLBxILG8xfHkrDv/pabq6BfdwZZ7k7F/kJZVqiCqAEYSN0f/kugiC6DiuRYfBGvlP1yn8nFSGrmn1vyyZGQpqwGWgSJYbcAeg/z+w4hTWFOJV3irFND51usXm2B1ORZWtLH8fzvFU5ywAwLXQas32x+CIyKzLNd+g1E1C5sra4zXhyYhRjKqzS4niB5b5HwkV+xTlVqKmwfIntViFKHWddnzOC6Co65Czn5+dj1apVmDlzJmJjYxEbG4uZM2firbfeQn5+fssDEATRtQgvdt0kwyi+ViVKeWYpvTLP81hzgE0NFuJhj9kD/IBz37KN+88FlOalH7sydoFvotO0l9ljbMBYi8yzvQgjy3qNofucKTOoq3XQ1LCZIrrbWR7uOxzuKvYzJnxqwCBTinIuI+57DAlyxrgotn7AnhwJNDrLLMTzDnOGXMUuhBTeWHYZwptpyrNM2CjtvlL++uuviI6OxosvvogdO3bg/PnzOH/+PHbs2IEXXngBvXr1wq+//mrJuRIE0VGsJHVc1iX20bKzpwouXpaRNhxNKca5rHLG9sTNkZBlHwdKUtjGgxY2O5bQmZoYPBEqmcoi82wvjq5KSOXsqdvaFvmVC3TUEikHJ3fLljBvKzKJDFNCpjC2Hek7mu8kzIpRngmk7MfySWx0uVzH4Zdz1ywxTUilEgREC1LIdVfpawmljiMIoJ3O8unTp3HPPfeguroac+fOxZYtW3Du3DnExcVh69atmDdvHqqqqnDvvffi9OnTLQ9IEESXIHxcL8yj2lVkdaIE4+O9SezYbnaYOygAOPcN29AzGggabnacvOo8nCs4x9i6W4IBAJyEE91YWNsiv/JCdj7OnnaQSLtf9SeUYlwtvYrUsmYKlPgPAnxjWNuZ9RgW6o5R4eyiu8//ToNWbxlnMlAgxci+XNItUhtRaklSYRA2SrvOXqtWrYLBYMDPP/+MX375BXPmzEFsbCxiYmJw66234ueff8bPP/8MnU6Ht956y9JzJgiivVhBuWu9zoDcpDLGJiz1215OpBbjRBrriD82IQJyXSVwcSvbeND9zWq2hVFlJ4UTRvt3XxaMpggzS1hfZJmdj6WeGnSUwd6DRQVKmo0ucxwwZDFrS/wLqMwTRZdzy9X49Wy2ReYplCRVlWpEGvAuQfj9oMgyYaO060p5+PBhjB49GnPnzjXbZu7cuRgzZgwOHTrU7skRBGFhrCB1XF5KOfS6JvPgxJG09rJaUK3P11mJ+UMCgfO/APom0U6JrK4QSTPsSGOdqEnBk7qlEIkpXKw8fZwwsmwtzrJUIsXU0KmMbUf6juajtgPuAORNbk54A3DuW4wMd8fwUNapXbM/GTpDxx1KN197OLiwRXKyu0OKIVrgR84yYZu060pZXl6O4ODgFtsFBwejvLy8xXYEQXQRQhlGN2iWhSnjvIOdoHLouBN6NrMUh5OLGNvD48KglEnFEozo6YAjG2Fk5liZhQvFFxjbjNAZHZ6jpbD6yLLQWfa2DmcZEEtp0srTcLX0qvkOKmdxxpSzm8DxPJ6cFMmYs0trseVcTofnyHGcSJrUHaWvhaklrS3rCkF0Fe1yln19fXHu3LkW28XFxcHX17c9L0EQRCcgXqDTHc5y5+iVVwu0yk5yHncOCQDyzgPXBOerwYuaHUsowXBTumG4n3l9c1cjdD7LC2qtKgeucIGfi1f3ZsJoSqxXLPwc/Bhbiwv9hjzAbpdlAGkHMDbSEwODXJhda/YnQ2+B6LJQipGTWAqjBcZtEyIZhvV8xgiiK2mXszxt2jQkJibi3//+NwwGcbocnufx4osv4sqVK5g+vfsXxBAEcZ1u1izXVmlRmMVW1bNEyrjz2eXY36RQBABM8jdCJZcCp9ezjZ38gchJzY63PW07sz05ZDJkklbVcOoShJFlvc6I6nJNN82GRV2tg7qaTQtoLTIMoC5aKlzo91faX81HTQOGAD79WduZDeA4DssmhDPmjOIa/B7f8cwYQmmSVm1AQUalmdadBMkwCAJAO53ll156Ce7u7nj77bcRGRmJZ599Fp999hk+++wzPPfcc4iMjMSqVavg4eGBF1980dJzJgii3XSvs5x9pZSZgkwhgV+4i/kOrWT1Pjaq7GYvx2gfHtBWAQk/sY0H3w9I2Dy2TUksSURSKTvejDDrkWAAgL2LAjIl+x6sRbdcUcTOg5NwcPLo3nR7QqaHsUGcnKocUeYTBlML/a78CVQV4KYoTwQ5sN+rT/Ylw9DBKKyDixLu/g6MrculGEKZFskwCBulXVfKwMBA7Nu3D/369UNGRgbeffddLFu2DMuWLcM777yDtLQ09O/fH/v27UNgYKCl50wQRHsRyjC6WIWRdYm92AdEu4lyBreVy7kV2HWJLYL04JhQKKUAd/E3QNskGsdJgMHN51belrKN2fZ18MUQnyEdmqOl4Thx+jhr0S0LJRhO7kpIZd2fNq4pfd37IswljLFtS91mpvV1BtwBNC1zbtQDcd+B4zhMD2S/V6lF1fgjoePRZWGWmK5e5CdMLUl5lglbpd1nsAEDBiAhIQH79u3Da6+9hqVLl2Lp0qV47bXXsG/fPsTHx6N///4tD0QQRJch1LWK8qh25mvzvCgyZgkJxieCDBgudnIsGBEEAJCc3cg2jpoKuJi/gdcb9fgz7U/Gdkv4LZB04XFqLabKXlsD5YWCtHHe1qNXrofjONwacStj25m2ExpDM1IWO1egnyAD1NlNAG9EPzcefXzZSpCr9yXD2MHocmAfVoqRl1oOrVpvpnUnIHzyRJplwkZplwhv3rx58PPzw5o1azBhwgRMmDDBwtMiCKJT6MZy16V5NagqZZ2RoL4dc5aT8iux/UIuY3twbBgclTK41qRCkhfPdhi6pNnxTuSeQFEtm1FjdvjsDs2xsxA6odYaWbYmvXJTZoXNwkdnP2rYrtRV4mDWQVFqOYYhi4H47xu3S1LBZRwGxwHLbg7HE5sbP2/JBVX460IeZsX4icdpJf5RrpBIuAan22jgcS2pDKEDPFvoaSFEmmVylgnbpF1Xyu3bt6O4uNjScyEIorMRLtBppiiHpRFKMBzdlHDz7VjU8ZP9ycz120klw6LRoQCAkKL9bGOXICBycrPj/Z7yO7Pd16Mvwl3DzbTuXqw3stwznGU/Rz8M8x3G2IQSHBFBwwGvPoxJcm4TAGByb2/0FkWXkzoUXVaoZPAJd2ZsXSrFEJ4eaIEfYaO0y1kOCwtDdXW1pedCEEQnI9QcCvOodiamJBgdef3UwipsE2QdeGB0KFzs5IC6AoGlx9kOgxc1u7CvWleNfZn7GJvwUb01IYwsVxRaR/q4MlGOZeuTYdQjfGpwOOcwStTNLKIzsdCPu/InFLoKSCQcnpzIVvW7klcp0tO3FaFUqSsX+QllWtbw+SKI7qBdzvI999yDgwcPIi8vz9LzIQiiM+kmGYZBZ0TOVTYi1lG98pr9KYyE0kEhxZKxdYu2JBd+gczYRPLBSYFB9zU73p6MPVAb1A3bUk4qKmBhTQjTxxn0RlSWqs207hq0aj1qK7SMzVojywAwJWQKlFJlw7ae14sqN4qIuROQNWb34Iw6BJUcBgDM6O+LKG9HpvnHe5M6VMxD+D0puVbddWkCSYZBEADa6Sw///zzGDduHMaPH48tW7ZAp9O13IkgiO5HGBnqoshyXmo59FpBies+7S9xnVlcg61xbKW0haND4WqvAHgeknOChX29ZgDOzWtHhY/gxwSMgYedR7vn2NnYOckhV7GRcqFeuKsRSjDAAc6e1pU2rimOCkdMDJrI2FqUYti7A33nMKbQ4gN1nzsJh2UT2ap+l3IrsPdyQbvn6B3iJPo/d5kUQ3h+IBkGYaO0y1nu1asXLl68iOTkZMyfPx92dnbw9/dHeHi46CciIsLScyYIor0IL3ZdVO46U/Do2CvICXaOinaP9+kBNo+tnVyKh65HlZF9GlzBRbZDCwv78qrzcDLvJGObHWGdC/vq4TjO6speC511RzclZHLz0hdrQPh/vlB8AWnlac13EkgxHDV54NL/BgDcEuOPcE82P/LH+9ofXZZIJQiIZm8ss7tKiiGh1HEEAbTTWU5PT0dmZiZ4ngfP8zAajcjLy0N6erroJy2thZMOQRBdhvCC3VWp44SL+zqSBSO7tAa/nMlmbPeNDIaH4/XH6afXsR3cQoHwm5sd84/UP8A3qZbiKHfEhMAJ7Z5jVyEse93di/xEaeOsqMy1OUb5j4KHin2CIFzoKSJ4FODVmzFJTn8NAJCaiC4nZJfj4FW2wmRbEOmWr5R2SNrRWkTnB1JhEDZKu66URqOxTT8EQVgJ3SDDqK0Ul7gO7oBe+bMDKdA3eR9KmQQP33Q9Y0VtKXDxN7bD4EXNarN5nsdvSWyfqaFToZJZr3ygHlFhEqEMoosRZcLwtl69cj0yiQwzw2cytv8l/w96YzP5jDkOGPYQa0raAZTX3cTdGuuPEA/2RuGjDmiXgwSSpeoyDUrzuuApgvD8QNdzwkaxvkz7BEF0HsKLXRcs8BOVuFZK4dvOEte55bX4+TQbVb5neDC8na47tue+A/SNi9x4ibzFhX2n808jqzKLsc2NnGumtXUhdJaFpaa7mp6SY1nIbZG3MduFtYU4nHO4+U4xdwGKxsV8HG8ETq8HAMikEjxxMxtdPpdZhiPJ7Uu56upjDwdXJWPLvtIFUgzRAj9ylgnbpE1Xyu3bt2Pp0qWYMWMGbrvtNrz88ssksyCIHoVAhtEFmmWhXjkg2rXdJa4/P5gKraHxgq2QSvDo+OvrIoxG4NSXTHu+9yzA0bvZMX9N+pXZjnCJQKxXbLvm19U4e4qd5a54PG8OoWbatQfIMAAg2i0aMZ4xjE34uRChcgZi72ZtZzcC+rpMFXMHBSDQjf3/fLT3arv+PxzHiaLLWZc7f5Gf8PzQnZ8tguhOWn3FWrBgAWbPno2vv/4aO3fuxO+//44333wT/fr1w++/t6DvIgjCKhAv0OlcZ5nnedFipPamjCuoUGPzyUzGdtewIPi6XI8qJ+8BStOZ/cahDzc7ZrmmHLvTdzO2eVHzujT/dEcQOst6rRE1gtRtXYVOY0B1uSBtXA+QYdQzL2oes30o+xAKalrIYiGQYqC6ELhcl01DbiK6fCq9FMdT2xcRDuzNfm9yrpbCYOjsSK9QhkHOMmGbtMpZ/vrrr7F582ZIpVIsXrwYH3/8Md58802MHDkSarUaCxcuRHl5eWfPlSCIjiK82HWyDMNUievgdi7u++LvVGj0jc6BXMrh0QlNsu2c/IJpX2YXDD5weLNj/pn6J7TGRgdPJpFZfRaMpji6KiGRsQ5NRVH35Fo2JQEROvPWzPSw6bCTNc7XwBtaXujn3QfG4NGs7WTj043bBwfC34XVvn+8N6ld8xPeZOrUBhSkV5ppbSGE5wfSLBM2SquulBs3boREIsFff/2Fr7/+GsuWLcPzzz+PI0eOYNGiRaisrMRvv/3W8kAEQXQvXZw6zlSJa1eftj+aL6rS4LsTbFR5/pBABLhed26KU4BkNkKc5jWl2QWMPM+LHrVPCp4EN1X78z93NZyEg7OHQIpR2D3p44R6ZQcXBeRK604b1xQHuQNmhM1gbL8l/QZjCzpd49AHWUPWcSDvPABAIZPgMUF0+VhqMU6mtT26bO+sgEcAm5Ku03XLovMDRZYJ26RVzvL58+cxcuRITJo0SbTv3//+N3iex/nz5y0+udraWrz88suIjo6GSqWCv78/lixZgpycnJY7X6esrAzff/897rnnHoSFhUGhUMDJyQkjRozARx99RAVVCNtCqDnsZLmBpUpcf3UoDbU6Q8O2VMLhsfFNnJBTXzPteZUrctxGNjvmpeJLuFp6lbEJH8X3BEQZMbopslwmTBtnxWWuzSH8/2dVZuF03ulm+/DRM1ErF9xgnfqq4c87hwbC15mNLq/e177oslCKIbwZtTTC7yrlWSZslVY5yxUVFWaLi9TbKyoqLDcrAGq1GhMnTsTKlStRVVWFOXPmICgoCOvXr8egQYOQmpraqnHee+89LFiwAD/++CPc3Nwwb948DB8+HPHx8Xj66acxceJE1NR0byJ/gugqeGPX5VnWaw3ISRSUuG6HBKO0WotvjqUztrmDAhBcn5pLWw2c+5bZbxy4AAYJmz1AiDCq7O/gj5F+zTvY1oipRX7dgShtXA/JhNGUGM8YRLqykeAWF/pJ5cjwmMDaEn4CassAAEqZFI+OD2d2H0oqwpmMti/QE0ox8tIqoKnpxICP8PxAmmXCRmnVlZLneUilph+nSa5rmiydT/mNN97A8ePHMWrUKFy9ehU//vgjTpw4gffffx+FhYVYsqT5ilz1ODg44F//+hfS09Nx9uxZ/PDDD9i7dy/Onz+P4OBgHD58GG+88YZF504QVksXpo7LSSqDXtf4ehzXvsV9646koVrbGFWWcGAXTiX8BGiarpngYBz8QLNj1uhqsD1tO2ObGzUXki4q0mJJhOWkK7op17IobVwPWtxXD8dxoujynow9KNc0vyYnw3MCeIms0aCrAeI3N2zePTwYXk7szVt7osv+0a6Qyho/o7yR79ysGKLUceQsE7aJVV4ZtFotPvnkEwDAmjVr4OjYmMtyxYoViImJwcGDB3HmzJkWx3r++efx9ttvIzg4mLFHRUXhrbfeAgBs3rzZVFeCuAHpOhlG5gU2p6xPmAtUDvI2jVFeq8OGI+mM7dZYf4TVlxPmeWZBFQAgelpd1b5m2J62HdW66oZtCScR5drtKYhlGN3jLAsj2j1pcV9Tbgm/BXJJ4+dUa9Ria/LWZvuo5W7ge81ijSe/bLg5VcmleOQmNrp8ILEQ8VllbZqbXCFFQLQrY8u4UNSmMdqCKLUk5VkmbJRWO8sbN26EVCo1+cNxnNn9Mpms5cEFHDlyBOXl5YiIiMCgQYNE++fPnw8A2LZtW5vHbkpsbF0u1WvXrnVoHILoKQg1h52ZZznjIussh/Rve1R5/ZE0VGoaK6lxHNhSwhlHgYKLbKfhzaeL43kePyb+yNjG+I+Br4Nvm+dnDQid0ppyLXRNIvFdgcFgRFUJq5XuiTIMAHBTuWFy8GTG9lPiTy0v9BsieNpZklKXzvA6C0aEwNNRwTRpT3Q5uD9bmjvzYolIXmU5hJpliiwTtkmrnWWe59v10x55Rnx8PABg8ODBJvfX2xMSEto8dlPqdc++vj3zIkkQbUZU7rpzHi6VFdSIHsuH9Pds0xjlNTp8fZgtejRzgB8ivZ0aDYJ0cXCPAMInNjtufGE8rpRcYWx3977bTGvrx1QEt6t1y1UlGtET+p4aWQaAu3rfxWxnVmbi2LVjzfbhg0cD3v1Y4/FPG/60U0jx8Dg2urzncgEu5LQt7WpIP9ZZrqnQoii7qk1jtBqSYRAEAKBVYV9L65FbIjOzLkVUYGCgyf319oyMjA69zkcffQQAmDNnTottNRoNNJrGfLH1Cxp1Oh1l1Ggl9ceJjlfbsORxMxj0zLaR5zvl/5GWwBZzsHOSw8VH2abX+uLvZFSq2ajy4zeFNo5RcQ2yy9uY2JdhyBIYDYZmj9n3l75ntgMdAzHCe0TP/VxKADtnOWorGudfmlcFZ6/mFziaor2ftZI8Nt+vwk4KibxzPltdwQC3AYh0jURyWXKD7fvL32O4tzhvd8Mx0+vBDVsK2Z9PNe5M3Q9dTgLg3QcAcNcQf6w9mILSJovyPtpzFZ/eO7DVc3Nwl8PZU8Xk005NKICrn6qZXu1DeOU36PUW+5/S9aDt0DFrH5Y4Xm3XSHQBVVV1d8n29qZTDzk41OkVKyvbn5B97dq12LNnD1xdXfHcc8+12H7VqlV47bXXRPb9+/ebnSdhmt27d7fciBBhiePmcfUqmsal8vLzcW77drPt20vRaTs0Pb1wzjX4a8dfre5frQO+OidF08fAA92NSD5zCPXuS9+cHxHFN8oN9BIFdua5Q9/k/QiPWZWxCrsqdjG2/vr+2PHXjlbPzRoxSOwBNC7CPv73WVzMav8Foq2ftapMOYBGZ42Xa/HXX63/f1sjfTV9kYxGZ/lQziF8t+07uElN5+HevXs3JEYHTJU5QalvvDbl/PYi4oMbczGP8eTwR2bj/2r35QJ8+fN2CFIoN4vBQQkUNUo6Eo6kIM9wofUDtBKf7Cy4NNlOT03FKQufL+h60HbomLUNS2Q8s0pnubM5dOgQnnrqKXAch3Xr1sHf37/FPs8//zxWrFjRsF1RUYGgoCDcfPPN8PDwaKYnUY9Op8Pu3bsxZcoUyOVtW+hly1jyuBUnp6B0776Gbb8AfwyaObOjU2TQaw3YuOc4msalRk0dgIjBXq0e44PdSdAYGiUYHAe8ee9YRHlfX+yrrYLs42VMH27Q/Zg6/Q4A5o/ZVxe+giGh0cFWSpV4dtazcFG6oCezvygRSacao/mBPuEYM9N0us/maO9n7cT/0lB2MbthOyDMG1Nn9m3z61sTE3QTsG/rPlTp6oI3PHgUBxVjwcAFTDvhMZM4XQEOv9ewP6TsOALu/xxwqJMhjVPrceiDv1Fe2/jU5LwhAA/PjG313DJDSrBjbaNWX1cmw6QJU6C0t+x5teDkKVScaswzHRoagmEWOl/Q9aDt0DFrH8XFxS03agGrdJbrs1+Yuxuorq5bxe7k5GRyf3NcuHABc+bMgVarxccff4y5c+e2qp9SqYRSKX6sKZfL6UPbRuiYtQ9LHDeJQIMokUgt/r+4llgBgyBlXGh/r1a/Tkm1FpuOs9X6bo31R9+AJhG9sz8Bmqa53TlIRz8BqeA1mh4zvVGPX5PZnLkzwmbA07FtWmprxFVQAKSqWNOh/2tbP2vVpVpm29Xbocd/x13kLpgTOQffXf6uwbY1ZSuWDV4GpbSZa8GIpcDRjwBjXWSfM2ggj/8WGP9PAIC7XI4Hx4bjg92NBXF2XMpHWoka0T6tu6aF9PWEVC5p+J7xPJCbVImooT7tfr+mkAgW6EvAWfz/SteDtkPHrG1Y4lhZZeq4+jRv2dnZJvfX20NCQto0blpaGqZOnYrS0lK8+uqrePLJJzs2UYLoaXRBnmVhFgzf8LaljPvi71RRXuXlk6IaGxgNzMIpAECvGYBH85HUg1kHkV+Tz9h68sK+pjh7dW9hEuHruXhaXj/bHdzVi13oV6Ypw870nc13cvIBBsxnbae+BPSNNxSLRofCSdXoiPI88Mm+ZLQWmYkUcsJUjRZBVO2aUscRtolVOsv1Kd3Onj1rcn+9PSYmptVj5ubmYsqUKcjNzcVTTz2FV155peMTJYieBt/5qeMyBBft4H6tlykVVWmw8Wg6Y7ttYAAivBpzrSNxO1DKtsGoJ1oce3Mim089xjMG/Tz6mWnds3ARVfFTd2maL2Fu556cCaMpYS5hoqqOmy+3Ii//yMfY7ap84OJvDZsudnI8MCaMabIt4RqSC1qf1SJEkEIu45LlU8hxgptpnrJhEDaKVTrLY8aMgYuLC1JSUhAXFyfa/8svvwAAZs+e3arxSktLMW3aNKSkpOCBBx7Ahx9+aMnpEkSPQXyxs6yzXJZfI6ogJ7yoN8cXf6eiVtcYVZZKODzZNKoMAMcEUWW/WCBkTLPjJpYk4kTuCcZ2o0SVAXFk2aA3orpcY6a1ZdHU6qGpZrOs3CjOMiD+nFwovoC4grjmO/nFAiFjWduxNUzqtSVjQuGoZKPLn+5vfXRZeBNaW6FFYVb7F72bhMpdEwQAK3WWFQoFli2rW7zzxBNPNGiUAeCDDz5AQkICxo8fjyFDhjTYP/nkE/Tu3RvPP/88M1ZNTQ1mzZqF8+fP484778SXX34JrhOrlhGEVSO82FlYhiGUYNg7K+AZ5GimNUtBpRqbjqUztrmDAhqr9QFAzhkg8yjbcdSyFisRfnPpG2bbXeWOqaFTWzWvnoC9swIyOfu/7Cophuh1OMDJ/caQYQDA+MDx8HPwY2ybLm1quaMwupyXAGQ25mp2tVdg0WhWSrg1LgfpRdVoDa7e9qLCL5kXLSzFEH6vujiNLEFYC1a5wA8AXnzxRezZswdHjx5FVFQUxo0bh4yMDJw4cQJeXl5Yt24d076oqAiJiYnIzc1l7C+88AKOHTvWUE3wwQcfhCk2bNjQWW+FIKwHkWbZsjeOwot1cD/3Vt+cfn4wFeomCwOlEg5PNq3WB4ijyk7+QN/bmh23sKYQf6b9ydju7nW3yUVaPRWO4+DsZYeSa42OVnlhLfyjTKc5syRCZ9nRVQmp3CrjMO1CJpFhQZ8FeO90Y4aLvZl7kVWZhSCnIPMde80AXEOAsib1AI6tAUJGN2w+ODYc64+ko+a6Rt/IA2v2J+PdO1qXGSO4vwfO729c25NxoQRDZ4Y106ONiM4PFFkmbBOrPaOpVCrs378fL730Euzt7bF161ZkZGRg8eLFOHv2LMLDw1seBHUSDAAwGAz4/vvvsXHjRpM/BGEbCCv4Wc5Z1mkNyEksY2ytrdqXX6HGt8fZIkPzBwcixKNJVLk8G7i4he04/GFAxpYQFrL5ymbojY0yAYVEIarQdiMglD40LVrRmVQUsq9zI0kw6rk96nY4yBs/i0beiG8vfdt8J4kUGPEoa7vyJ1DUKLVwd1Dg/lFsdPm3cznIKmldXlihxCk/rRzqassVrOAEMgyeIsuEjWK1zjIA2NnZ4fXXX0dycjI0Gg1yc3Oxfv16k5X9Xn31VfA8L4oQb9iwoVVluQnCFhAuABJeDDtCTmIpDPomKeMkHIL6tC6y+dmBFGia9JVJOCwTRpVPfA40KUICuT0wZHGz49bqa/HT1Z8Y2+yI2XBXubdqXj0J4SK/8sIukmEUCxf33TgSjHocFY64Pep2xrYleQvKNS2Uqh50H6B0bmLggaMfM00eHhcOVZNIvMHIY00rtcsBUa5MFJ/ngaxLJa3q2ypEMgy6VhK2iVU7ywRBWJhOTB2XeZG9SPuGO7eqSEJueS2+P8nmVb5jaBCC3JvkDlZXAGcET4AGLgDsm3d6t6VuEzk0C/subHFOPRFnL9ZJ7S7N8o0YWQaABX0WQMo1Vt6r1dfil6u/NN9J5QwMfYC1xW8GKvMaNj0dlbhvBBtd/vlMdqu0y3Up5NgbUmE2mg4hPD9QYImwUchZJghbQnixs5AKg+d5pJ8vYmytTRn36f4UaJtEleVSE1HlM+sBxunlxAuoBBh5I7678h1jGxcwDuGurZNw9TTEMoyucpZvfBkGAPg7+mNKyBTG9v3l76EztCB7GPk4IG0iFTJogeOfMU2W3hQOpYyNLn+45ypaQ0h/9oYx42IxjBaKAItSS1KeZcJGIWeZIGwIXpRn2TKngJJr1agsZp2m0AEtO8tZJTX44RQbVb5rWBACXJs4XDp13cKopvSe1WIRkkR9IrKqshjbwn43ZlQZgCgzQm2lDlq13kxry8AbeRMyjBvTWQbETyUKaguwK3NX852cfIFYQZrC0+sAdePNn7ezCotHhzJNfo+/hit5FWiJ0AHsugB1lQ75qS3IQ1qLQIbRlbm7CcKaIGeZIGwJ4cXOQprltAQ2quzkroJHQMsp4/67Jwk6Q+OcFFIJnrhZEFWO/76uqENTxq5odlye53FIfYix9XLrhRG+I1qcU0/FyUMlelLQ2dHl6nINjHr2M3UjapbrGeA1AIO9BzO2jZc3trzuZfRTYP45mgrg9HqmyaPjI0R5l9/f1XJ02dnTDu7+DoxN+JSn3QjPDyTDIGwUcpYJwpYQyTAso8NIFzjLoTGeLaaMS8qvxJZzbEn7BSOD4efSJDJp0ANHPmI7ht0EBA5Bc5wpOINMAxuxXthv4Q2dY10ml8LRlU2H19mL/ITOuEwugb1z89lJejrCpxPJZclI1Cc238kzEuhzC2s7/hmgbywc4+agwMPjWInQ7kv5OJdZ2uKcQmPY6HJavIWcZaEMg7JhEDYKOcsEYUsINYcWyLNcXa5Bfhr7uDgspuWUce/tSmQC3fYKqTiqfGmruLR1C1FlAFh3kc3D7ufghxlhM1rs19Nx8mCjukJpjKUpF6SNc/K0u6FvSADg5qCbEe7COrUH1Adaji6P+T92uyoPiP+BMS0ZGwo3waLY93a14IhD/H0rzatBWUHr0s81i1CGQZplwkYhZ5kgbAhhnlRLpI4Trr6Xq6Twj3Zttk9cVhl2XmSlFQ+NDYOnY5PIKM8Dh//LdvQfBIRPaHbsC0UXcDzvOGN7oP8DkEtazszR03H2ECzy62RnWahXdrmBJRj1SDgJHhrwEGPLNmTjVP6p5jsGDgFCx7G2ox8DxsZ0iE4qOR6fwN4wHkkuxtHk5iPFPqHOsHNiP9/Cpz3tQXR+IBUGYaOQs0wQtoTwYmeBKKDwkW9wXw9IZc2fWt7byUbLXO3leOgmQZaKpN1A/nnWNnZFi3P+MuFLZttD5YG5kXOb7XOj4CRwVis7WbNsK2njhEwPm44AxwDGJnyaYZIxT7Pbxcl1hUqacP+oEPg4s3Kad3clNhu55iScaKGfRXTLwgXAJMMgbBRylgnClrBwnmW91oDsy2x+5bDY5iUYR5OLcFgQKXt8QgScVYLI7+EP2G2PKKC3QPcpIKk0Cfuy9jG2hf0WQiW78SOeAOAskGF0emTZBqr3mUIukWNJ/yWM7WT+SSQUJjTfMXIS4DOAtR3+gFlLoJJLsXxSFNPkXGYZ9l4uaHZooW75WpIFqvmJql2Ts0zYJuQsE4QtIUod17HIcvaVUuh1Tar2cUBIM/mVeZ7H24Kosq+zCgtHhbINM44BmcdY29inW3Tuvzr/FbPtJHfCXb1uvNLW5jAlw+jMCqW2UL3PHHMi58DLzouxCZ9qiOA4YMxTrO3aOSB5L2O6c2gQgpsW5cF1jX8zqduC+rgzT3R4I4/MSx0rUCJMLUnVbglbhZxlgrAhRBe7Dsow0gSPev0iXaFyNK8N3nUpH/FZZYxt+aQoqORStuHhD9lt5wBgwJ3NziWrIgs70ncwtrt73Q0HuYOZHjcewgV+eo0B6qoORhfNoNMaUFOuZWy2ElkGAKVUiUX9FjG2A9kHkFjSwoK8fnMBtzDWdvAtJrosl0qwYko00+RKXiW2JVwzO6xcKUWgoLx8ekezYgg1y5RnmbBRyFkmCFvCgnmWeSMvThk3wLwEw2DkRVrlUA973DE0kG2YGw8k7WRto58EZM2nJFubsBbGJpFzBRS4p9c9zfa50XB0U4qeFnSWFKOySDyuLTnLAHBH9B1wUbgwtrXxa5vvJJUBN/2DtWWfAlJY+dDsWH9E+7C5yj/cfRU6g3kphPD7l3GxBIZm2reI8GaaNMuEjULOMkHYEiLNcvsjywWZlaLIYnN65a3ncpBUUMXY/m9KNORSwWnowNvstp07MLj5yntp5Wn4I/UPxjZcORyuStdm+91oSKQSOLmzi8M6qzCJcFw7JznkSqmZ1jcm9nJ73NvrXsa2J3MPLhVfar5jzF2AawhrO/g2E12WSjg8M7UX0yS9uAY/nGTzhzdF6Cxra/XITSprfi7NISp3TZFlwjYhZ5kgbArLyTDS4guZbVcfe7j62Jtsq9Ub8eEethpZHz9nzI7xZxvmxgOJbHYAjH4SUDQvpfgs7jMmqmwns8M45bhmety4dFWuZVsqc90cd/e6G3Yc+97XxK0x0/o6Ujkw7hnWlnUCSDvImKb29UFskCtj+++eJFRpTJcxd3RTwjvEibGldkCKIdYsU2SZsE3IWSYIG4IXyDA6kmc59RzrLDdXiOSb4xnILmWdq39Oi4ZEGLkyFVUe/nCz80gqTRJrlaPvhoPEdrTKTemqXMsVRbaZCUOIk8IJY5VjGdvf2X8jvjC++Y6x9wAuwaztABtd5jgOz8/ozTQprtbii4MpZocVVfOLK+zAwjyhDIMiy4RtQs4yQdgSFkodV5JbjdI8tkJY+CAvk23La3VYvS+JsQ0NccPNvbzZhuaiyko2Uibk07hPwTeJmDvKHbGwT/OyjRsZcWS5c2QYwoi1MG2dLTFSORJuSnZx3ZpzLUSXZQpgnKAaZeZRIP0QO3a4Byb3Yb8rXx5KQ36F6Zsg4fewqlSDgvTK5udiDuH5gWQYhI1CzjJB2BKibBjtG0YYVXZwUcAn1Nlk208PJKOshs3I8PzM3uKyyAffYbdbEVW+XHwZezL3MLaFfRfCRelipseNjzDCK4wAWwqhDEPopNsSSk6JB/o+wNiO5R7DmfwzzXccuABwFixwFT5dAfDs9N6MfLhWZ8B/BbKmetz9HERyqNS45nM0m0OUWpJkGISNQs4yQdgQQs2hUJPYWlLjWGc5fJC3yZzN2aU1WH8knbHN6O+LISHubMPceOAKu0APo5e1GFUWakOdFc64r+99zU/+BkcUWS7pnFzL4siybcow6pkfNV+Ud/mTc580f+xlCmDc/7G2jMNA+mHGFOXjhLuGBTG2H09lISlfHDHmOE4UXU45204phuCGVijjIghbgZxlgrAlLJA6rqKoFoWZ7EXanATjg11XodU3OugyCYd/Te8tbnjgLXbbzg0YvrTZeZzNP4uD2eyCqAf6PwAnRfMO9o2O0Gk16IyoqdCaad0+tLV6aGrYRWa2HFkGAJVMhYcGPMTYTuefxpFrR5rvOOj+ujziTTnwlugp0P9NjoZdk3zkRh54e8cVk0NGCL6P5YW1KLlW3cI7MIHw/EAyDMJGIWeZIGwJCxQlEUaVVQ5y+EeKZQ8XcsqxJS6HsS0YEYwwT8HCu6xTQOJ21taCVpnnebx/5n3G5q5yx7297zXTw3ZwcFFAIhPkWrawFKOyRDyeoyBlnS0yP3o+fB18GdsHZz6AwWgw30mmBMYKosvph4DU/YzJ21mFh28KZ2x7LhfgeKq4Sp9XsBMc3dj/R4pAOtUqhE+LKM8yYaOQs0wQtoQF8iyLsmDEekIiFZfFXfXXZcY3d1LKsHxSFAQNgb2vsTZ7jxajyrszdiOhMIGxLY1ZCnu56dR1tgQn4eDk3rmL/IQZNuxdFJAJqzDaIAqpAk8MfIKxJZUm4feU35vvaCq6vPd10c3t0pvC4enIFudZtf2ySGJhSooh/N62CqEMgzTLhI1CzjJB2BAd1SxXl2uQm1rO2ExJMA5cLcSRZDbi9eiECHg4CqKPqQdEq/8x7plmo8o6gw4fnf2IsQU7BePO6ObLYdsSwswUFo8sC3Ms27gEoymzw2cjyo29Kfwk7hPU6pu5YZGrgPHPsrZr54DL2xiTo1KGpyazZbDjs8vx5/lc0ZBCKUZxThXKCmpE7ZpDdH4gFQZho5CzTBC2hOhi17bIclp8ETOGXCVFUG92sZ7ByOOt7ayW0tdZhSVjwgRz4euiZ01xDgCGPtjsHH66+hMyK9kqZk8Nfgpyqbx1b8IGcBJkxOjsyLKTjS/ua4pUIsUzQ9iCIwU1Bfj20rfNdxy4APCIZG373gAEEo67hwUhXCBlemdHIrM2AAB8I1xh58R+J9ocXRZqlkmGQdgo5CwThC3RwTzLqefYFFShAzwhlbNj/HgqC4mCVfrPTI2GnULwmP7yNuDaWdY2/tm6KJsZKrWV+Dz+c8YW4xWDKSFTWvkObANRZNnChUmEmTBsfXGfkDEBYzDKbxRj+/rC1yhRl5jvJJUBN7/A2ooSgYQfGZNcKhEtks0sqcHGo+mMTSLhEDZQIMWIa6uzLNgmZ5mwUchZJghbQiTDaH1kWV2tQ05iGWMLF1yMK9Q6vL8rkbH19nXCvMGCXLJGQ13UrCkekXXRtWZYd2EdSjWljO2ZIc+IczbbOJ1dxU/kLLuTsyxkxdAV4Jp4m9W6aqyNX9t8p763Ab4DWNv+VYBew5im9fPBkBC2CMrHe5NQVMW2ixB8P/PTKkwuzjSHqNw16TAIG4WcZYKwIUS5VtvgZKbGFcLYJPWcVC5BcD9WgrF6bxKKq9k0ZS/d0hdSoVMev7kuataUm1+oi66ZIasyC5submJsE4MmYrDP4Fa/B1vByZN1XqtK1Mz/rqNQ9b6W6e3eG7MjZjO2nxJ/QlJpkpkeqHvSM+kV1laeCZxez5g4jsNLt/RlbJUaPd7fxRYqCejlBoUd+51KOduGAiUiGQY5y4RtQs4yQdgSHciznHQqn9kO6e8BharxQpxWVI0NgkfBU/r6YEykJzuQtkYcVfaNqYuqNcO7p96F1tjoiEs5KZ4a8lSr529LCCPLRgOP6jKNmdZtQ6vWQ13NVmQkGYZplg1cBqW0cVGrgTfg7ZNvN18gJHIyEDyatR18G6gtY0wDg1wxbxCbQePHU5m4dK2iYVsqkyA8lv3+JZ1ui7NMqeMIAiBnmSBsi3amjqup0CInkZU/RA31Ybbf/PMSdIZGJ0Au5fDCzD7iwY59AlQKVu9PeqVZ/fSRnCPYn8Xmnb2n9z0Idwk308O2sXOSQybQkltqkZ8wqgyQDMMcfo5+eKA/Wwb7RN4JUYl2Bo4DJguiy7UlwKH3RU3/Nb23qFDJ639cZJzxyGHs97QgvQLlha38LIjKXVNkmbBNyFkmCFuinTKMlLMFTFeZUoqQAR4N24eSCrHnMhuxWjImDKHCAiSV+cDh/7K2iIlA1GSzr60z6PDWSbbCn7vKHY8PfLxVc7dFOI4TRXstpVsWOsv2zgrIhIs3iQaW9F8CPwc/xvbuqXebTyUXPBLocytrO7EWKM1gTL4uKjw+IYKxHU8twc6LeQ3bgb3doHJgs2Ikn2GfEplDpFkmZ5mwUchZJggbQlS8oJXZMJJOsxfXsBhPyK87SHqDESv/uMTs93RUYNlEQRosADjwH0DXtOwuB0xZ2exrf3/le6RXpDO2pwc/bfNlrVvCWZA+zlK5lsVp4yiq3Bx2Mjv8Y+g/GFtudS7WX1hvpsd1Jr8KSJrojQ1acapFAA/fFI4AV/Z//eb2y1Dr6lLOSaUSRAxmF/olnWqlFINkGAQBgJxlgrAthBe7VmiWK0vUyE1mC5FENXm0+/3JTFzNr2L2/2NqLzipBHmP8y8BZ9kFehi0APDtb/a1i2qL8Fn8Z4ytv0d/zImc0+K8bR2hE1tZZCkZBjsOOcstMyVkCob7Dmds6y6sQ05VjpkeADwigGEPs7YLvwDZZxiTSi7F8zPZVHJZJbVYdyStYVsomSrOqULJtWq0iPD8QJFlwkYhZ5kgbAmRDKPlLsln2CiU0l6G4D51WTBKq7X4YDe7Ar+vnzPuGBokHmj3y2zqOrm9OK+sgHdPvYtqHXtRf37E85C0YWGirdJZ6eOEqccoE0bLcByHZ4c/CynXKFfRGDRYdWJV89KG8f8CVC6sbdcLou/xrAF+GB7KZqb5ZF8y8ivq/ld+Ua6wd2HLZCe1QoohSi1J5a4JG4WuOARhQ7Sn3HWyQIIRPtCroRDJu7sSUVbDZkZ4ZbaJVHFJe4Dk3axt9JOAs7/Z1z2ScwTb07YztjkRcxDjFdPinAlxxLeikxb4UfW+1hHtFo27et3F2A5mH2x+sZ+9O3DTP1lb5jHg0lbGxHEcXp7dl1FN1GgNePPPywDqCpREDvFm+iSfLmhZgyzULFPqOMJGIWeZIGyJNqaOK8uvQUEGW42v/pFufFYZNp9ky07PHOCLEeEejA16LbDjWdbm6AOMXm72dWv1tVh5nNUyOymc8PSQp5udL9GIsyDXcnWpBgZDxyODpFluP08MegIeKvb7serEKlRqK830ADB8KeAawtp2vgho2Scu/QNccOcQ9onO7/HXcDS5CIBYilGWX4OiLFY+JYayYRAEQM4yQdgWbcyGIVw1b+ckR0AvVxiMPF7ceoEZzl4hxYuz+kLEic+A4mTWNvElQOlo9nXXxq8V6TmfGfIMPO08zfQghAhlGDxf5zB3BK1aD3UV+ySBZBitx1nhjOeGP8fYCmsL8dHZj8x3kimBqYJFsBXZwOEPRU3/Nb0XXOzYtQIv/34RWr0RPmHOohR/woW7IoRPiGiBH2GjkLNMELZEG/Is8zyPq4JV8xGDvSGRSrD5ZCbO57CL/pZPioK/YFU+KnKBg++wtoAhzZa1TixJxMaLGxnbYO/BmBs112wfQozSQQaZkk3pZipHclswVSqZciy3jWmh0zA2YCxj+ynxJ8QVxJnv1OdWIOwm1nbkY6AkjTF5OCrxz2m9GFtyQRXWHUkDx3GIGsZKMZJO5zcrrRCnjiNnmbBNyFkmCBuiLanjCjMrUZrLPuqNGuqN4ioN3t3JlqqO9HbEkjFh4kH2vAJoBY96Z7xrtgCJwWjA68deh4E3NNhkEhleGfUKLeprIxzHiRxZU85uWxA623ZOcsqx3EY4jsOLI1+EnazxxpIHj9eOvQadQWeuEzDjHaDJAkEYNMBO8QLZe4YHIyaQXRT48d4kXCurRaRAilFVosG1pLLmJstukwqDsFHo6kMQtkQbZBhXjucx207uKvhFuOKtv66gvJa9qL8+px8UMsHpJPM4kPAjaxt0HxA4xOxrfnv5WyQUJTC2hwY8hHBXqtTXHjrbWabFfe0jwDEAj8eyRXWSy5Lx1fmvzHfy7gOMeIS1Jf4JJLMLBKUSDivn9Bct9nvjz0vwDHSEuz9bKOjKcUE1zaYIb1BJhkHYKOQsE4Qt0co8ywa9EUknWT1jr5G+OJtVip/PZDP2W2P9MTpCoCU26IHtglX8Sue6stZmSC1PxepzqxlbqHMoHhrwkNk+RPOIci13VIZRTGnjLMWCvgvQy42VTHyR8AUuF18232n8s4C94Lv217OAntWixwa54u5hwYxt+/k8HEoqQq+Rvow95WwhdBoDTEKaZYIAQM4yQdgWotRxpiPLGReKoa5mo8eRw7zx4taLjM1RKcMLs/qIBzj5OZDHRogx4XnA0VvcFnXyi5cOvwSNofGiz4HDK6NegVKqNPduiBZwcmePXUcjy5QJw3LIJXK8NuY1JveyntfjhSMvmJdj2LnWVfZrSnGy6cV+03rBzZ5d7PfK7xcROtiLiTrrNAakxhWafDlO8OSJJx0GYaOQs0wQNoQor6oZGUaiQILhF+GCX67m43JuBWN/enIUfJwFDlNZFrDvTdbm1QcYLqhG1oSNlzaK5BcL+izAUN+hZvsQLWP5yDKbq5kiyx2jn0c/0ZOTpNIkrE1Ya77TwAVAgOB7ceh9oCiJMbk5KPDsdLayX1pRNTaczUJQX7aAyZVjZqQYwrUFlGeZsFHIWSYIW6IVeZbVVTqkny9ibJ793UWV+nr5OGHR6FC2M8/XyS8EVfcw+7+AVFD++jrJpcn45NwnjC3EOQTLB5vPw0y0Did3VlNcWaruUGEJYWSaNMsd55GYRxDtFs3Yvj7/NS4UXTDdQSIBbvlQsNhPC/zxf6I1CXcODcKgYFfG9umBFDj1ZhcAZieWmn7qQJplggBAzjJB2BatSB139VQ+jIbGi65UJsFXGXnQ6Bv7chyw6vYBkEsFp5DL24Crf7G2IYuB4JEmp6Mz6OoeOxsbHztz4PDGmDeYbAFE+xBGfo16HjWV2naNpdMYUFvJygMobVzHkUvleHPsm5BxsgabgTfghcMvQK038yTALwYYxS4QRPohIH4zY5JIOPxn7gDImnzP9UYeH1/JgULVxNnmgasn2adJAEQ1SagoCWGrkLNMELZEK2QYiYLV8bJge/ydXsLYFo0KxeBgN7ajugL461+szcFLrLFswuq41bhUfIkdu98iDPQeaLYP0XrsnRWQyNj/cXulGCZzLJMMwyL0du+NpbFLGVtqeSreO/2e+U4Tngdc2Ip92PkCUF3MmPr4OeOR8Ww2mbM55TAGsjejV47ltZhassXy2ARxg0LOMkHYEC1dDEuuVYvKW28tZYuP+Luo8A9B4QMAwN7XgEqB9nH6W4Cdm7gtgGPXjmH9hfWMLcwlDE8MfKK5t0C0AU7CwdHNMrplUzmW5UrKsWwpHhrwEPq4s4tlf0z8Efsy95nuoHAAZgqc6doSYOe/RU2fnBiFME82ZdxPRaXMdll+DfLT2TUJJMMgiDrIWSYIW6KF1HHChT56BYeLRvax/crb+sNRKWNsSPsbOCXIERsxEeh/u8lplKpL8cJhtqCCXCLH2+PehkpG0UpLYqlcy8LFfSTBsCxyiRxv3/S2SH708tGXkV9tpix1r+lA3zmsLeEHIJGVQqnkUqyaN4CxpfB6qJXsU4crxwRSDFFREoosE7YJOcsEYUuIZBiNfxr0RlGBgrOcFnyTNrNj/TGpD1sFDJoq4H/LWJvMDpj1vkmZB8/zePnIyyisZdNVPT34afTxMJGGjugQlsqIIU4bR5pySxPmEoZnhz3L2Mo15Xjh8AswGM3kQp7+NqBkF+xh29NALRs5HhnugXuGN5FtcMBpjr0RvnoyD1q1vrEJ5VkmCADkLBOETcGL8iw3ngJS4wpFC7jOKxov0C52crx8S1/xoHtfA8oyWNvkVwB301X3Nl/ZjAPZBxjbGP8xuK/vfa14B0RbsVxkmQqSdAXzouZhSsgUxnYi7wTWX1xvuoOzHzD9P6ytKg/YIZZjPDejD7ycGnNvX1AYYGySO1mnNiD5dEFjB9IsEwQAcpYJwrYQpg1rcjG8eCiH2ZUlNaBE2tj+xVnshRYAkH4YOPkFawseBQwXlOW9TkJhAt49/S5jc1e5442xb0Bippog0TEs5SxTQZKugePqivH4OrCV9lafW42TuSdNdxq4AIiczNrivweu7mRMLnZyvH5rv4btSgmPNBl7A82cB0iGQRAArNxZrq2txcsvv4zo6GioVCr4+/tjyZIlyMnJablzEw4ePIjXXnsNs2bNgpeXFziOQ2hoaOdMmiCsGdHFru5iWJZfg5zEMmZPvLIxqjwm0gPzhwSyXbXVwP8Ei/FkdsCcNeJiBqjTKT9z8BnojXrGvnLMSnjaeYraE5bBlAyjPRFCkWaZnOVOw0XpgrfGvcXcQBp5I/759z9RUFMg7sBxwOyP60rKN2XbUyI5xvT+vpjat1FKFadkv48FGZUozKxsHLcpJMMgbBSrdZbVajUmTpyIlStXoqqqCnPmzEFQUBDWr1+PQYMGITU1tdVjPfXUU3j11Vexfft2FBUVtdyBIG5UzORZFkaVazgeV+V1zrKjUoa3b48Rlb7FjueB0nTWNullwCNC9LIGowHPH3oeedXsAqIH+j+AmwJvavv7IFqNMLKs0xigqdGbaW0avdZEjmVyljuVIT5DRJlhStQl+MfBfzB5yRtwCQCmCeQYlbnAn88wN8kcx+GNuf0bSmGnyYyo4NjzwoXr5wNKHUcQdVits/zGG2/g+PHjGDVqFK5evYoff/wRJ06cwPvvv4/CwkIsWbKk1WNNnToVb7zxBnbu3ImLFy924qwJwroxlTpOrzPgsiALxgWFHobrvvHLt/RFoJs9O9DlP4CzG1lb0EhghGn5xRcJX+DItSOMbajPUCwfRFX6OhtHN6WouERbF/mZzLFM2TA6nYcGPCS6mTxXcA7/PfNf0x0G3SeWY1z4FUj4iTF5O6nwxm112TF4DkhQsosHr57Mh7ZWTzIMgriOVTrLWq0Wn3xSV/52zZo1cHR0bNi3YsUKxMTE4ODBgzhz5kyrxnvnnXfwwgsvYOrUqXB3d++UORNEj0CUOo5DytlCaKrZSGP89YV9E3t7446hAvlFRS7w+5OsTe4A3PYpIBHn3f07+298Fv8ZY/O088S749+FTCITtScsi1QmgaMrqzVvq25ZqFdWOcqhUNH/rrORcBL8Z+x/EOAYwNg3XdqEHWk7xB3q5RgqV9b+5zOip0CzYvwwO9YfAHBeoWcW+uk1Blw9lU95lgniOlbpLB85cgTl5eWIiIjAoEGDRPvnz58PANi2bVtXT40gejai1HESkQQjQ2ZAmZSHi50cb80bwMovjEbgf4/XFT9oyoy3TcovUspS8K+//wW+yYVYyknx7k3vkk65CxEt8mtrZJkyYXQbLkoXvD/hfcglcsb+4pEXcbHYxJNSlwBg9n9Zm7YS+G0pYGBvil+/tR+8nJSokgApcvFCP16Y+ZGcZcJGsUpnOT4+HgAwePBgk/vr7QkJCV02J4K4IRBc7MrLjchNZiv01UeVV97WH97OAqfo5OdAiqCiWJ9b6x7/CihTl+HJfU+iWlfN2J8e/DSG+g5t5xsg2oNjBzNiCJ1lkmB0Lf08+uH5Ec8zNo1Bg+X7lqOwptBEh7l1GTKaknUCOPwBY3JzUODt2+vkGHEK1pEuyqpCUQFrIxEGYatY5XO0zMxMAEBgYKDJ/fX2jIwMk/s7A41GA41G07BdUVFXFlSn00GnM7HYghBRf5zoeLUNSx43XuAsX7jAFiWo5ngkyQ2Y0c8H0/t4sq+ZGwfZ7pcZ+Svv6Av99PcAPXtR1Rl1WHFgBbIqsxj7jJAZuDf63k7/DNBnjcXBVcFslxfVmDw25o5beVENs23vqqBje52u+qzNCZ2Di4UX8Wvyrw22gpoCLN+3HF9O/hJKqSCt4+Q3IEs/Aq4svcHEH3gLhsCR4INHNdjGRbjjjiEB+Pl0DsokRrgaG2NoF+KqEdZ0TKPRYu+TvqNth45Z+7DE8bJKZ7mqqgoAYG9vb3K/g0NdjfvKysoum9OqVavw2muviez79+83O0/CNLt37+7uKfRILHHcInQ61KuKdTIHJCVp0fQB03mFHvYKHmPtcvDXX43yDJm+GhMSX4bcwDrXR30XoejAccbG8zz+qP0Dp7SnGHuANADDy4bjr7/YUrydCX3W6qi6JgfQGA3OSSvA9u3pZtsLj1tBqh2aXi4y81JQuv2KhWfZs+mKz1oMH4Mz0jNIN6Q32C4UX8Bjvz2G2+1vF2WscfNeiLFlb0CCuptkjjdA98NCHOi1Elp5Y5q5oRJgj1KKeI0B49WN54O0q2r4K1yg1NY9feKNRmzfvt2i74m+o22HjlnbqKmpablRC1ils2yNPP/881ixYkXDdkVFBYKCgnDzzTfDw8OjG2fWc9DpdNi9ezemTJkCuVzecgcCgGWPW8rrKxsepeb4jwX4xgujATzOKfX48O7BmBDt1diJ5yH9ZREkWvZxr2HkMgyf9E/Ra3xz+RucOHeCsXnaeeLraV/D2967Q/NvLfRZY8m6VIK/mmQCkhiUmDlzgqidueP23fGT0KLxydqIMYMQGkuac6DrP2tj1WOxcOdC5FQ33szG6eIwPGg4Ho15VNSeP6QF/n6rYdtOV4pp1T/DcPdPzILcwJgSPPzVaYxSy6BoeH7EITtgPCLSfq/b4nnMnDnTIu+DvqNth45Z+yguLu7wGFbpLNdnvzB3N1BdXaeBdHJy6rI5KZVKKJVKkV0ul9OHto3QMWsfFjlu1xf4GTkpsgPYlFSJcgPuuikMU/r5s32OrQGuCqJJQSMgnfIqpFJ2Pn+l/YUPz33I2BQSBT6++WMEuLAr+rsC+qzV4ertyGxrqvWAUQK5Upy9BGCPm8FgRE2Zhtnv6u1Ix1VAV33WvOXeWD1pNe7bfh9q9I3XyC8ufIEA5wDMi5rHdpjwLyD7BJC6v8EkSTsIybGPgAnPNtjGRPng4clRuLgtHYO0ja5Bjv9YhGbsgNRY91RJJpOJc653APqOth06Zm3DEsfKKhf4BQcHAwCys7NN7q+3h4SEdNmcCOKG4LqznO89BFqlK7OrJFCFf03vxbbPOgnsfpm12bkD89cDAkf5VN4pvHD4BdFLrhyzEgO8BnR46kT7MbUgr7UZMarLNKIkKo7u4sAB0XVEuUXh7ZveFpWIf/3Y6ziUfYhtLJEC874EnPxY+4FVQMp+xvTkxEhow1lZoV7ugFzfEY0GyohB2CBW6SzHxsYCAM6ePWtyf709Jiamy+ZEEDcExrpsqlmBNzPmHLkRby4ZDKWsSaSxMg/4aSHAlKfm6i68gihxcmkyntr/lKiy2P8N+T/MDLfMY1ui/ciVUqgc2Zub1mbEqBK0kykkUDlQVKu7mRA0Af8e/m/GZuANeObgM7hYJEgp5+gFzF8HcE2fJPDArw8CpY0L5WVSCVYtHoIMJesQZwXeDL5emkGFSQgbxCqd5TFjxsDFxQUpKSmIi4sT7f/ll18AALNnz+7imRFEz4Y3GlHmEokqp2DG3ndCIMK9mjyq12uAH++rK5fblJv+AUSxFcKyK7PxyO5HUKllF9ze3etuPNDvAYvOn2g/olzLrXSWK0tYCYaTu8qij+GJ9nNX77vwYP8HGVutvhaP7XkMqWWpbOOQ0XXl6JtSUwz8sADQNqZ3DHC1w8jpoeyY9j4o9uhXt0GRZcIGsUpnWaFQYNmyZQCAJ554okGjDAAffPABEhISMH78eAwZMqTB/sknn6B37954/vnnReMRBFGHwcgjK2giY9OqJLjntuhGA88Df64AstlsFggbD0xgv1/51fl4aNdDKKgtYOw3B92M54Y/R06VFeHk0b7CJEKnmnIsWxfLBy/HrPBZjK1UU4qHdz+M7EqBlHH0cqCX4ElP/nlg6+NMxHju9AjoHFk9e1Zg3XnDSM4yYYNYpbMMAC+++CJGjBiBo0ePIioqCnfddRdGjhyJZ555Bl5eXli3bh3TvqioCImJicjNzRWN9dVXX2HkyJEYOXIkZs2qO6nk5uY22EaOHGlW8kEQNwq/x19DjdILRR6sfnjMjFBIpU1OBSe/BM59y3Z2DQHu2MCsni9Vl2Lp7qXIqWIrAMZ6xeLtm96G1ETpa6L7EDnLrY4ss+2EBU6I7kXCSbBy9EqM8BvB2AtqCvDwrodRUNPkRlYiAeZ+DngK1iZc2soULOE4DhPnRDJNSt16ocIxCD+czLT0WyAIq8dqnWWVSoX9+/fjpZdegr29PbZu3YqMjAwsXrwYZ8+eRXh4eKvHys7OxokTJ3DixIkGp1ir1TbYTpw40VBkhCBuRJLyK/HcrwnICpoMNFkUJJUDA8c3Kf6TegDY8RzbWW4P3P09YO/eYKrUVuKR3Y8gtZx91BvtFo01k9bATmbXGW+D6ADtLXkt1CxTZNn6kEvl+OjmjxDjya7jya7KxsO7HkaJukl5epVz3fdZ6cIOsnclcKUx603fkb5Q2LEuQkbINKz68yLOZpZa/D0QhDVjtc4yANjZ2eH1119HcnIyNBoNcnNzsX79epOV/V599VXwPI8NGzaY3dfcz4QJEzr/DRFEN1Cl0ePRb89ApjYi32cYs6//ICcoVNfTROVfAn68H+AN7ABz1wK+/Rs2K7QVeGT3I7hccplpFuocis+nfA4X4UWYsArarVkWlbqmTBjWiIPcAZ9O/hTRbtGMPbU8FQ/teoh1mD0jry/4a+oCXF/wl1MXUJLJpRg4iq0hUOgZC2ct8Pi3Z1FUxWrZCeJGxqqdZYIgOobRyOMfP8UjpbAaw9VS8E2kERKDFv2HXq/iVZkHfH8noBE8Ybnpn0DfOQ2b5ZpyLN21FOeLzjPN/Bz88OXUL+FpR4UqrBWhDKO6XAODvnn9Kc/zqCxlnSKSYVgvLkoXfD7lc4Q4s2lVk0qT8ODOB1FUW9RojJoMTHqFHUBXA3x/V0OGjL7D3SBrkssZnARDdCrkVaix7Puz0BtIv0zYBuQsE8QNzEd7k7DjYh4cjMAALVuDyD/3MOwcZICmqs5RLs9iO/e5FZjQmJqqXFOOh3c9jIvFbFoqTztPfDX1K/g6+Hba+yA6jkg+wQNVpc1HBzU1eug17JMGkmFYN552nvhyypfwd2CLCyWXJYsd5jFPAbH3sgNUFwDf3QHUlkJhJ0dg9kFmd7ReARcDh+OpJXhnZ2JnvQ2CsCrIWSaIG5Q/E3Lx0d4kAMAwjQwyNGam4Ix6BGftrZNc/LIEyI1nOwcOB+Z9UbcgCEBxbTEe2vWQSHrhZeeFddPWIdiZTUVHWB9KexnkKnbRZUtSDKEEg+MABzeSYVg7fo5+WDd9HQIc2XzoqeWpWLJzCfKq8+oMHAfM/ggIY6t5oiixTpJl1CEwZz8khsabKgk4jNDU3Xh/8XcqfjljungYQdxIkLNMEDcgF3LK8czPcQAAByMwUMNGlX3zTkClKQO3byWQtJPt7BYG3LMZkNct0supysGiHYtwpeQK08zb3hvrp69HmEtYp70PwnJwHGdikV9ts32EzrSDq5LNnEJYLQGOAVg3Tewwp5WnYeFfC5Fenl5nkCmAO78BvPqwA6QfAvfHCih01Qi4dpjZ1V8rhYuh7ub7+d8ScCq9BARxI0NnPYK4wSis1GDpptNQ6+r0hCPVcsiZqLIBIZm76jYS/2Q727kD9/0KONRpj5NLk7Hwr4XIqMhgmvnY+2D9tPUibSRh3bQ117IobZwbSTB6Ev6O/tgwfQOCnIIYe251LhbtWIRLxZfqDHauwIKfAUeBlCplNwAgOGsPJAZtg1kKDqPVdTfgOgOPR745g6ySGhDEjQo5ywRxA1Gj1ePBjadwrbzOyXEycojRso/e/fKOwV59XbfYtGaIzK4uouwRAQBIKEzA4p2L2TytqItYrZ++nqQXPZC2ZsQQp40jCUZPw9fBF+umrRM9ASpRl2DJziU4mXuyzuAaBCz4CVA4NTa6fn5QaisQmPM307+vTgq369HlkmotHtx4CpVqttw9QdwokLNMEDcIeoMRT35/DgnZ5Q22UWqhVlmH0Iy/xJ0lMuCub4DgkQCA3Rm78eDOB1GuKWeaRbpGYtOMTaJIFdEzaKuzLCp17UGR5Z6Ir4MvNkzfgH71JauvU62rxiN7HsG2lG11Br9Y4N4fAFn9/7mxql9w1m5IBdrlMepGedfV/Co8/t1ZaFvIsEIQPRFylgniBoDneby27RL2XmmMArsZOFEGjIBrR6DSlDVs11Wj5uoW80VNAc/zWHdhHVYcWAG1gXWkYr1isWH6Bnjbe3fiOyE6E5Jh2C7uKnd8Pe1rjPBlK/3pjXr8+/C/8cm5T8DzPBA6FrhjIyCRoWm1eoWuCoHZ+5m+vXUyeOsbGx1KKsJzvybUjUMQNxDkLBPEDcCXh1LxzXFWVzxZp2C+4DKpESGZgsV84IFbPgT63w6dUYdXj72KD898KBp/jP8YfDHlCyo40sMROstVpRoYjeYdG5EMgyLLPRoHuQPWTF6DycGTRfs+T/gczx56FhqDBug1HbhtLVidVp12WSHTN2xzAKbolE0D0PjtXA6llCNuOMhZJogezi9nsvGf7WymilBeilA1+/UeIN8CpVZQdGT8P4ChD6BcU47Hdj+G35J+E40/L2oeVk9aDXu5vcXnTnQtQhmG0cCjtkJrsq1eZ0CNYB/lWO75KKVKvDf+PdzX5z7Rvr/S/sJDO69X+4u5A5j+BrNfrq9FrOQHxuav4dCbZ59gfXYgBRuPplt87gTRXZCzTBA9mJ0X8/DsrwmskQcWKJ0Zk52kHINUYkcYwx7G1dKrWLB9AU7knWB2ceCwYsgKvDrqVcglcktPnegG7J0UkMjYaKE53bKpgiVUve/GQCqR4tnhz+KFES9AwrFuQFxhHO7+425cLLoIbshiUd8Biu1wlBQxtttljpBx7Ofq1W0X8Xv8NYvPnSC6A3KWCaKHciS5CE9+fw4GwWP05/oFQVvAOkDDHH6EnBPn1N2TtQcL/lwgSg2nkqrwwYQP8ED/B8AJLoJEz4WTcCLdsVlnWWBX2MmgtJOZbEv0TO7ufTfWTFoDB7kDY8+tzsX9f92Pram/i/pIocUIp+8Ym7ZEg5f6stlxeB5Y8WMc9lzKt/zECaKLIWeZIHogcVllWLrpNLQGduX5I2NCobrISi1cpTnoa78L4MVO7xsn/yNayOdp54n109djcohY10j0fITp38w5y0I7pY27MRkbMBabZmyCn4MfY9cZdXjj5Jsm+/RSHYSnLJVtH1eCFTdFMDa9kcfj35/F4SQ2Ek0QPQ1ylgmih5GQXYaFX59AtdbA2O8ZHoTxOiWqy1md6SinTZBybNt6hP5zf4/+2DxrM/p79rfonAnrQag7rioRyy0AE2njSIJxwxLtFo3vZ32Pwd6DGbvJpZ88B47jMdppA2PWVOsRUwYsHh3K2LV6Ix7edBqnqcof0YMhZ5kgehDns8tx31cnUKHWM/ZZA/zwj1ERiN+bxdiDFOcQpjwJcFJg+tui8ZpeDO+IvgMbZ2yEr4OvqB1x4yDUHbc2skx65RsbTztPfDXtK9zf9/4Gm4mHUeCnvwVI5AhSnkeE8iiz7+Kha3gsJgh3Dg1k7LU6AxavP4UzGeQwEz0TcpYJoodwIacCC746LnKUb4r2wgd3xuLId/EwGhrdXwl0GOf8FTiZEjXzv8K71cmiMXmubnX8yjEr8fKol6GQKjr9fRDdS2sLk4ir95GzfKMjl8jxr2H/wrvj34WdzA5GE87ye6VxqLxrIyB3wBjn9ZChyRMIHjj8bRz+c9sA3BLDyjqqNHos/PokTqaRw0z0PMhZJogeQGYVsHjjaZGjPC7KE1/cPwTpOw8jK5l1bgY6bIObfSUuzHkfd15dhx2p20Xj+jr645sZ3+C2yNs6c/qEFeHkJpRhmIksF5OzbKtMD52OzbM2i0pkA8ChrIOYf/5jnL31XTg5GjDE8Rdmf162Hpe37MOHdw3E5D5sAaNqrQGL1p3EsZTiTp0/QVgacpYJwso5kVaCTy5JUV7LOspjIz3x5f1DYDy6EYf+LGP2OUiKEOt3BJ+NX4r74z5ARkUGTOW0WD9jA/p49Om8yRNWh7CwiKZGD63gJow38qLUcSTDsC0iXCPw3S2bRXaOB65VX8MD597Bf8csQp/AeLhI2RRxx/ZpoN7zGT65ZxAm9mYd5lqdAQ9sOInDyeQwEz0HcpYJworZezkfSzadhcbAurpjIj3w5b39odq+HIe3ZELDOzH7Q0N34r6IIHya/DP0fJ0jxJlYreOioop8toajmzirhXCRX22VDgY9m2mFIsu2h6PCERCkjqzfMvJGfJ3yG+4O8kZA5F6mjY63w9/bq6Dc+hDW3tkbU/r6MPvVOiOWfnsW54opLSXRMyBnmSCslP/F5eCRb85AK3BaxkZ64us5frD7dhbSjl9FkvomZr/B4xKe8j2NpIo0xm7KWRZeCIkbH5lCCjsntsiMULcsdJ4lEg72LqRnt0mEzrLgPJJelYWn3M9A6xPH2jXDcPVMMRQbpuLTGa6Y0Z9dOKwz8Nh4VYLvTrKLkgnCGiFnmSCsDJ7n8eXfqXjqhzjoBQVHpvfzxfpR+VB9fRNqslKxv+IJZr9WVoNvQ7+HkWcdbCknxb297ha/mIROAbZIS4VJqkqFmTCUkEjoxsomEZwj7ut1L2QStjgNDx7fBv8AtZzN8f53xVJU5uZD/uUErB6Qgtmx/oJ+HF7ddhkf7UkCz5tMVEcQVgFdKQnCijAYebzy+0W8uf2yaN8dsZ741P0HyH++H3xtGfZXPI5aIyujOBKyBbWKSsbWy60Xvpv1HRb2uR8iyFm2SYS6ZeEiP2FkWehcE7aDsILn3Mjb8NMtP2GA5wDGrpXV4mDYz6yNd8De8qfAa6og2/IwPrJfhyXDWUkGAHy45yqe/+08dIIiSwRhLdCVkiCshBqtHo98cxqbjmWI9t3jlYW3y56B5NQXAIDLtZORrhnOtElzS0Ci18mGbblEjuWDlmPzLZvRz6MfeKP4QkSxQttEmBGjslQYWaaCJMR1BM4yb+QR5RaFb2Z8g38M/QdU0sbPRppHAq56nmLa52gHIL5mNgBAcm4TXsp9Am+OFJ+LfjiVhUXrTqK8RtcJb4IgOgY5ywRhBeSU1eKOtcew53IBY5fAiO/6HMcbVS9Ckn8eAFCkC8aByoeYdjXyShyM+LHB+431isUvs3/BwzEPQy65rk819ZSTIss2iaOw5HVxC86yBznLNovwHHFd4iWVSLGo3yL8dutvGO7beON+OOxXVCpKmS5HKu9Hni4SAMAVXsGChMXY0vdvyME6xkdTijH3syNIL6ruhDdCEO2HrpQE0c2cSC3GrasP4+I1Vu8XLc3Daf/3MCbtY0j5uovKCYUz1lc9D55nnZ0DEZuhllfBVemKV0a9go3TNyLcNZx9Id7EI05ylm2Slkpei6r3mcigQdgIImeZvesOcg7CV1O/wn/G/gceKg9oZbXYH/kteDQ938jwXeVz+FvuUbdp1GNQ6locdHoFA+Q5zHiphdW47dMjOJRU2AlvhiDaB10pCaKb4Hkem46lY8FXJ1BcrW2wS2DEY6pd+Ev1b7iXxAEAkuRyLPP2wuaaJ2GvYVeVX/Q5jGz3y7i71934Y+4fmB89H1KJVPyCJmQYlA3DNhHmTK4q08DYZDEpRZaJekRnCFNyLo7D7IjZ+GPuH1jYdyEKXNMR57+PaaPSemBb1VNY6uOFS4q6p13+unT8T/4C/mH/J2RozPVdVqPDonUnsWZ/Mi38I6wCcpYJohuo1ujxzE/xePl/F5mMFzFcCv6yfwXPYgOkBjVS5TI86+WB+QG+KKmYjqiiocw4hQ5ZqB6Whh9v+REvjHwBLkrzeZNNXXSEi3cI20AYWeaNPGrK6xxkox7QVOubbU/YEILIMm8077w6Khzxz2H/xC+3/gIMK0SuUwqzP7R0ADQlc3BXgB9WeHsiUS6HxKjFMuN32G3/EoZxVxraGnng3Z2JeOSbM6hUk46Z6F7IWSaILuZybgVu/eQwfjvX+PjRBVV4Q/Y1tipfRi9jCpLlcvzTywO3Bfhhu6MD/Mv7YFT6bcw4OpkaQ+/3xYZZ69DbvXfLLyx0lkmCYbPYOckhlbH//3rdskEt/lxQNgwbRnSeaDnSG+EagS+nf4GbFkdAq6hh9g3PmoWw4hjsdrDH/EA/POXticsKOcKMGfhZ+Trek6+FB8ob2u+6lI/Zqw8jIbvMAm+GINoHXS0JoovgeR7fn8jEbWuOIKWwbgELByPukB7APuUzuE+2F5cVcqzw9sTcQD/scHQAz3FwrfXGlKuLIBF8XWctGYRbBk5rfXRY+PiUoso2C8dx4kV+1zNiGGrZz4XKUQ650oSsh7ANhOcJU3Iuk904TO03EXMfGSGqZDIx+T54VAcAAPY52OPOAD886e2JOKUC86V/150PpbshhQEAkF5cg9s/O4ov/05l5EIE0VWQs0wQXUBhpQYPbzqDf285D831inzjJAn4U/EC3pZ/gQv2Ojzg6427A3yx28G+oZ+d1gkzLz8CpcGeGW/47DBEDfZr2yQoskw0wdwiP32tpNl2hG0hvBk3lYKyOUL7eWHM7VGMTW5UYublpXDUuDXYDjjY435/X9zn54NTDjxek6/HDsVzmCw5A4CHzsDjze2XsWj9SRRUqEEQXQldLQmik9lxIQ/T//s39lzOBwD049KxSb4KXyjfxiWXItwW4Idlvt44bcc6JQq9CrMuPwpnjSdjjxzijaEzQ9s8D6HWkPTKto3QCa7PgGFQc822I2wMUTaMtg8ROykIfUazN/cOOlfMuvQYVDoHxh6vUuL/fLwwO9APp13K8ZHyA/yoWImBXDIA4FBSEaZ8+De2nsuhxX9El0HOMkF0EiXVWqz4MQ6PfnsGxdVahHPX8KF8Df7r8AqOemVjUlAAXvP0QNr1leFNkRplmHn1EXjWBDJ2r2AnTFzUp32OrjB1HDnLNo0wI0aDsyyILAvlGoSNITxPmEpB2eIQHMbf0wt+kewCZDe1D25JfAwyg0LUJ0sux3883TExOAC7vPLxtuNKrJH/F9FcFsprdXj6xzgs/eYMCiopykx0PrKWmxAE0RZ4nseWczlY+ccllNboEMVl41HZFqic4vGzsyNetmtePuGj8MW8tKfAl7PSC2dPFW5ZFgu5op36UZJhEE1wEjjB9SWv9bUUWSaaIGmfZlmIVC7BzMdisOX9syi51lh0xLMyCEuzV+J/0Z8gR5Ml6lcjkeAnZyf85OyEWHUGnqx8DXxFX6zVzcPuS8Cp9BI8N7037hwaBIlwrgRhIchZJggLklZUjZe2XsDh5CL05tLxmMPPKHVJx8eO9iiRejbbN9I1Egt7LYJkVwiyM9kKWBKFETMf7w97Z3EEptUILnIkw7BtxJHlOs2yMBsGOcu2DccJUsd1QPqgcpBj9pOx+PWdM0wub+M1FZY6rIR8Vh6+SdqIyyWXTfaPVykRr1LCxT0XM6rfxbzyIPyvej6e+02Hn89k4825/dHb17nd8yMIc5CzTBAWoLxWh9V7k7DpWCqGS49godcunHeuwmqFAoCT2X4cOIwOGI37+tyH4e4jsOOLC8i6zDrKCjspXAdVw9nLrkNzFOVHJWfZphE6wdpaPTQ1OpFmWehUEzaGKBtGx3TCjm4qzF4+EFvePwN1VWM+79ykcvj94YtNj32L+Ipz+Pbyt/g7+28YTcg+yqVS/ODsBDiXIVT7GRZW2iEjdzJmf1yChaPD8eTESLjadyCwQBACyFkmiA6gNxix+VQW1uw5hij5rxgeeAkJdhziOQ6A+ZO1u9IVc6Nux+3RtyPIKQi1VVr8779xKMioZNopVFLMfHwATl86ZIHZkgyDaMRUCev8tEqAJxkG0QTRAr/2yTCa4u7ngFuejMFv752GUdc4fm5yObZ+cA6zlw/E6okjkFedh1+u/oLfrv6CQnWxybHSFXKke+gBjx0YVvsnki/1wvSzt+Ohm0fj/lEhUMoo7SHRcchZJoh2wPM8tiRcwaZDn0MnPQp1YA3iOA4trZkd4hmLu/ouwKTgSVBI65zpsoIa/LkmAWX5bPJ+hUqK2U8NhEegPXDJApMWag3JWbZpZHIp7JwVqK1oLLWel1LBtJHKJbBzEi9AJWwHkVzLQhko3P0d4Dm8FuVxLkzFyOKcavz6zhnMeiIGvv6+WDZoGR6JfQQHsg7gx8vf40T+abNjnreTAnbJ4Pi3sOWCHX4/Nwx3j3oE9wyNIdkZ0SHIWSaIVsLzPNLK07DhzBYcS/sd+bIS8A0KC/MnYj+pA26Jnodbes1HuEs4sy/7Sgl2fHEBmhq2vLDKQY5bnoyFT6gzdDrLlHoV5kelSwfh5KZkneXUcna/u4qcDFung3mWm0PhbMTs5THY/ukF1JQ3fg4ri9X49Z0zmPpgP4QO8IRcIseUkCmYEjIFmRWZ+OPqr9iW+DOy9ZUmx+U5Dpn2agCHsOrSIayLd8WI4FuwaOhcRLlF0WeaaDPkLBNEM6j1apzMO4lDWQewL2U3CgxldTtaCLY58sBU9xjcMvhxDAkYBYmJRTIJ+7Jx9NdkUUUqR3clbl0+EG6+bP7RDiMMCFFk2eZx8lAx0p+8VDaybEqqQdgYIhmGZXMbu/s74PZ/DsHvH8WhvLC2wa5TG/DnpwkYMTscQ6aHgLue6SLYORiPD/0/PDbkacTlnsK2s2uwo+gcKjnz88qXl+H33G/x+7Zv4S5xwsTwyRgfPBHDfYfDXm5vth9B1EPOMkE0wWA04ErpFZzMPYkTucdxOu8UNMbWRXZVRiPGSV0wtdftmDD4cajkphfkqat12LfpMtLii0T7PAIcccuyGDi6dYJOVKg1JGfZ5hEt3hP4G6RXJkSp4zqhEIizpx3m/XMI/vw0AQXpTW7YeODE76m4llyGKQ/0hZ1T4zoQjuMwyH84BvkPx3N6Df6O+xK7Lv+Ag/pS1DRzbisxVuKX5C34JXkL5JwUQ3yGYoT/SAz3HY6+Hn0hk5BbRIihTwVh0/A8j+SyZJzMO4kTuSdwOu80KnWmH+2ZwsFoxFgNj6mB4zFu5ArYeUY12z7zYjH2f3uFSZtUT1isJyY/0BcKVSd9LYWPT+lRpM3j1MJNmZMHOcu2DofOk2E0xd5ZgbkrBmH/t1dw9WQ+sy/rUgl+WHkS4+/thfCBXqK+CpkSk4cuw+Shy6ApzcCR4+9jZ8ZeHFIAlVLzjrOON+B43gkczzsBAHCQ2WOo7zAM8x2GEX4jEO0WLXoqSNgm5CwTNoVar8bF4ouIK4hDfGE84gvjUaIuadMY3no9xtVoEKOIxIxxT8Ku90xA0vyKa02NDod/ScaVo7km9w+dGYrht4Q1PGrsDIT5UUm3R7QUOe6UJxxEz0IYpe1g6rjmkCmkmPxAX3gEOOL41hQmiF1TocVfa88jaqg3xt0dDTtH09mGlG4hmDjjY0w0GqFO2o2df/8XcepEHLFXIlfWvMtTra/BweyDOJh9EADgonTBQK+BiPWKRaxXLPp79ifZho1CzjJxw8LzPLKrsnGh6EKDc5xYkgg9r2+5cxMkPI+BGg3G1ajhq/aDb+QdiL3zAcidmi8yUj+HlLOFOPzTVVQ3WcBSj8pRjskP9EVIP482zaldCC9yJMOweVoqZU2RZaKzNctCOI7D4Gkh8Alzxq6vLzIL/wAg6XQBshNLMeb2SEQP9zUfYJBIoOo1DXN6TcMt1aVI2L0J1y7/gHxlNg7bq3BOpYS+hYBBuaaccZ6lnBTRbtGI8YrBQO+BGOA5AEFOQRR9tgHIWSZutsDD9wAAHw1JREFUCAxGAzIqM3C5+HLdT0ndT6W29ZKKpoRqdRiuVmOYWgOnah+kO05G4E33Y+zQwZC2Mvqbl1aOIz8nizIM1BPQyw2TF/fpuuidULNMkWWbpyVnWFgSm7BBhKcJC+RZbg0B0W6464Xh2LfpMjIusDmWayt12LPhMhL2Z2PM/Cj4R7k2O5bUwQ2DbnsKsbcux99nEyA/+A2W5O2D2v4aTqpUOGmnRIqi5SImBt7QcG35MfFHAICj3BG93Xujj0cf9HHvg74efRHqHAppC08biZ4FOctEj4LneRSri5FcloyUshQklyUjuTQZiaWJqNXXtjyAGfx1egxXqzG8Vo3BtTqk6nphDz8MR6JnYP7NI7AgyLXVY5XkVuP09nQknco3uV+ukmLM7ZHoO9a/a6UQwohQJ0o+iJ6BykEOmVwCvc60A+ToSpFlW0dU7rqTNMumsHdWYNYTMbh6Ig+HfkoSpdgsyKjElvfPInygF4bdEgbPQMdmx5NIOEwYGgsMjcWlaxX49eApaC9tx9M4hWj5JcTbyXHiuvOcJW9dfvEqXRVO55/G6Sb5n1VSFaLdohHpFolI10hEuEYg0jUSXnZeJH/roZCzTFglRt6IgpoCZFRkILU8tcExTilLQZmmrENjS3ge0VodYjUaxKo1GKTRwF5nhyPG/thjGIzPXEZj1rg+eGJIILydW+8s5KdX4OyODKTGF4rTtF0nuJ87Jizo3S1ZBkR5lunRoc3DcRwc3VWigjgAYO+igFROnxGbRyTD6NqX5zgOvUb6IbCPOw5+n2gyi1BqXCFS4woRMsADQ6aFwC/StcVx+/o7o+89k1BSPQ4/nMrEOycTEV56DJOkZ/GE5Dz0smqcUykRr1QiTqVAokIBQysdXbVBjYSiBCQUJTB2J4UTolyjEOEagQjXCIS7hCPEOQS+Dr4k5bByyFkmug0jb0RhTSEyKzORUZGBzIrMhr+zKrOgMYgzRrQHV4MBMRotYtUaDNRo0F+jhdQoxRljNA4ZY7DWOADJ0jBM6+ePu4cFYWS4ByStjLoadEakxhfi4t85yLlaZrads6cKo+dFInxQN0YWhJFlinAQqJNamHKWKW0cAUB8nugiGYYQBxclZjw6ABnni3H0t2SU5ok/sxnni5Fxvhi+4S7of5M/IgZ7Q6ZoXg7h7qDA4xMi8dj4CJzOGIWfT2fhpYQcBNWkY6z6PMZJzmOF5AqMEj0uKhWIUyoRr1QgQaVEibRtUotKbSXOFpzF2YKzjF0hUSDIKQjBzsEIdgpGsHMwQpxDEOIcAm97b3KkrQBylolOQ2fUIb86H7nVubhWdQ3ZFdk4WXMSv+/9Hbk1ucirzoOulTmMW4u3Xo8+Wh36aLToo9Wir0YLH4MBWl6OOD4CJ429sMbYGyeNvaCT2GFclCeWxPhjSj8fOKta99iN53mUXKvGleN5uHIsF+oq8+9BYSfD0JmhiJkQ2P1ROpEMg07AhHmnmJxlAoBYrtWFMgwhHMchNMYTQf3ccenQNZzclgZ1tfj8m5dajrzUchz6KQm9Rvqi90g/eAY5Nhuo4DgOw0LdMSzUHa/e2g9/nc/D9vPDsDGpCJxOjaGSRAzXJWJY9RUskCRDxWlRKJXiskKOS0oFLikUuKxUIL+FjBum0Bq1SClPQUp5imifTCKDr70v/B394WPvg6raKuhSdAhyCYK/gz98HXwhl1JJ+s6GnGWizRh5I8o0ZSisKURhbWHD74KaAvbv2kIYTUUhTEt524TKaESYTo9IrQ6ROi0itTr01Wrhaah7vULeGfHG3thg7IWTxt64wIdBCzkUUglGRLjjlQF+mNbPF24OLS/qAK5rpXOqkHK2EMlnCkxG4pqisJNhwIQAxE4KMpviqKsRyzAoskyYKEzSgp2wLUSa5U7OhtEapFIJBkwIRPRwHyTsz0bCvmyTTrOmRo+EfXX7nT1ViBzig4jBXvAKdmr2/GevkOH2IYG4fUggKtQ67LmUj+3ng7Dmaiy0eiPk0KMfl45hkisYpk3E7TUpeIyrk4cUSyS4olQgSS5HsqLuJ1UuR207gxN6ox7ZVdnIrspusO0/sb/hbw4cvOy84G3vDS97L3jZecHL/vr29b+97LzgpnKjCHUHsGpnuba2FqtWrcIPP/yAzMxMuLu7Y/r06Vi5ciUCAgLaNFZpaSleffVVbN26FXl5efD19cXcuXPx6quvwtXVtXPeQA+B53nU6mtRoi5BmaYMJeoSlKpL6340pQ1/l6hL6pzj2kLojW1Lv9Ze7IxGBOv0iNDprjvGdb/99XrUPwAr4R1x3hiNzXw4zhvDkGAMRx7cUb+M29NRgTm9vDGpjzfGRnnBUdm6j31tpRbZiaXIvFSCrIvFJlO/iebrrMDASUHof1MAFHZW9vUSpo4jZ5kARZaJFhCeJzoxz3JbUdrLMWxWGAZODsalw9cQtyfTZMEnAKgoUuPszgyc3ZkBO2cFgvu4I6ivO4L6uMPe2XxAw1klx7zBgZg3OBBVGj2OJBfhQGIBDiQ64svySHxpuAUA4I1SxEhSMUCSihhdKm6RpMGTq8vhbwRwTSZFSoMDrUCKXI4MuazZaoOtgQePgtoCFNQWAMXm28k4GTzsPOBt7w13lTvcVG5wU7rV/Va5wV3lDlela8Pf9jJ7Cqg0wcqu5o2o1WpMnDgRx48fh5+fH+bMmYP09HSsX78ef/zxB44fP47w8PBWjVVUVIRRo0YhOTkZ4eHhuO2223Dx4kV89NFH+Ouvv3Ds2DG4u7t38jvqfLQGLSq0FajQVqBSW4lKbSUqNNf/1lXW7dM07ivTlDU4w5bSB7cHO6MRQTo9gvV6BOt0CLn+d4hOB0+DsSFzkZ6XIJX3QzwfhJ+NgUjkA3GRD0M274mm+Y3s5FLcFOaO0REeGB3hgf7+Li1qkA0GI0pyqpGXWo78tArkpZajvLD12TUCol3Rd5w/wgd6QSa30pRBJMMgTGAugkxp4wgAJhb4dZ8MwxxypRSxk4LQf3wA0hOKcPFQDrIul5ptX1uhReKJPCSeyANQt6bEJ8wFvuHO8A13gUeAI6Qy8fnRUSnDtH6+mNbPFzzPIzG/EgcSC3E8tRin02XYo3HDHuOQ6615+KMY/SVpiOJy0MuQhWhtNkbVXIOCq7zeoi4SnXndcc6Uy5AhlyNTVvd3e6PRptDzeuTX5CO/pnWPdhUSBVxVrg1OtJPCCc4KZzgrnOGkcGrYbvq3s7JuWym98c4dVussv/HGGzh+/DhGjRqFXbt2wdGxLiXMBx98gGeeeQZLlizBgQMHWjXW008/jeTkZMybNw8//vgjZNc1RcuXL8fq1auxYsUKbNiwoZPeiWmMvBFqvRoagwZqvRo1+hpU66obf+tq6n6a2Ott1fq6/dW6atTqa1Glq0KltrJbHd7msDca4a/Xw09vuP5bD3+9oeG3l8HApPKs4lXI4H1xgvdFGu+Hq8ZAXOUDkcb7QQuxNsvdQYGBQa4YFOSKkREeiA10hcLEiQ4A9FoDKkvUKCuoRcm1KhTnVKPkWjVK86th1LctYuLopkTUUB/0GeMHN1+HNvXtFoQXOUodRwDwCHCARMLBKIgYerSQhouwDUTRRSuQYZhDKpMgYrA3IgZ7o7ywFpePXEPS6XxUFKmb7VdRpEZFkboh3adEwsHV1x7u/g7w8HeAu78jXL3t4eSpgvz6gkGO49Db1xm9fZ3x6PgI6A1GXM6txIm0YpxMK8HJ9BJcq/HENaMndmEYYKh7LRn0COXy0IvLRrQkC+FcLkL1eZiszocT15iTnwdQLJXgmkyGazIZcmXS679lyJFJkSuToboTAx5aoxYFNQUoqCloc1+FRAFnpTMc5A6wl9nDXm7f8LeD3AF2Mru6bbk9HGR1v+vbNbXZyeyglCqhkqm6XUJilc6yVqvFJ598AgBYs2ZNg6MMACtWrMDGjRtx8OBBnDlzBkOGDDE3DAAgNzcXmzdvhkKhwKefftrgKAPAu+++ix9++AHffvst3nnnHXh7e7d5rl+d/wpSBynUhjrHV6PXNPxd7ww3/VttUEOtV1t8YVt3wPE83IxGeOsN8DQY4G0wwOu68+tpaHSGXYxGUV77Yt4J13hvnOR9kc77IJ33RbrRB+m8H4rgDHEm/DqUMgl6+zphULBbnYMc7Ipg97ryo5oaPWortShKK0dNhQ61lVpUlWpQWVyLimI1KorVqK1oWUrRHE7uKoQP9kLkYG/4hDn3qMdUpFkmTGHnqMDo2yNx/PdU6DUGcBIeQ2aGwtnDrrunRlgDgvNEV+ZZ7gguXnYYeVsERswJR2FmJVLOFiD5bCEqWvHE0GisW8Rdcq0ayYJ99s4KOHuq4OxpB0c3JeydlbB3VsDeWYFAFwWihwThwbFh4Hkgvbga53PKcT67HAk55biYU45qLZDMByKZD8SfxpFNRubhgQqEcPkI5fIQKslDqCEfIbp8DOGK4MVVMPPgAVRIOORed6ALpdLGH1nd7wKpFCVSCfguPtdrjVoU1RahqFac5q+9yCVyqKQqKGXKOgf6+t8qqQpKqZL5WyW7brv+t76647JRq3SWjxw5gvLyckRERGDQoEGi/fPnz0dCQgK2bdvWorO8Y8cOGI1GjBs3Dj4+Psw+pVKJ2bNnY926ddi+fTsWL17c5rluvLIRUjsrfezeDmQ8D3eDAW4GI1yNxiZ/G+BuMMKriUPsYTCYiPPWRYavGT2Qy3sgjvdCHu+JXN4D+bwHCnh3FPJu0EIJCQ9IUPchlPGAjOfgBMCNB2Tg4GmnQICTCr6OCnjZKeAql0EFDjq1Hto0LTSXcnG4NgvaWj3U1ToYDZaPeMgUEvhHuSG4rzuC+7nD1acH67hEmmWSYRB1xE4KQv8JAaipVGPv/t0YPC24u6dEWAvC6KUVaZZbA8dx8A5xhneIM0bNjURZQQ2yLpUg81IJchJLodMY2jReTYUWNRVa5KVWmG0jkXBQ2MugtJNBaS9DjL0MQ+2coIhyRS14FKt1KFHrUFCjQV61FvnVGmh5HjrODQWcG66hFw5xdcFo4/UfGaeFF1cKH64Yvlwx/Lgi+HJF8NcWIkhSjMFcGVy4KtFc9ACKpVIUSaUouO5El0olKJXUOdKlUgnKGv6WQmel1zedUQedUYdKXdur8hpq2/Y/NoVVOsvx8fEAgMGDB5vcX29PSEgwub+tY61bt65VY5nikb+fgp2iPgJj6kN23WbyA9ho403YGk2c+X0mbDy461aubmSOA8cLm3NMT47dyYxV36IcQDk4JNfPl+Ouz5sDDwl4TgJw9b8bbyCUAEKv/7SJGgDFagBqlAEoa2v/dmCvL4OrPg+uurofJ30xJNk8sB8oRd1Pl8PzCC6vQNaGDR1alGcsE5TdJs0y0QSpVAKVg5zuoQgWgVyraO1alP70Y8fHtdB5rT04AOgDoDc4VErdUSb3bfiplrl1eHyjkYe6StdsWlE5gIDrP0BrMiapADgDCGmwlFz/ucAbAd4IjjeCg7FBKsOBR91VmgfHX/8NHlKehyd4eKHRVncx55v8XN/m+etbjV4Kb+Z3xyvWdKw/Z6Z/rbYWT+GxDo1tlc5yZmYmACAwMNDk/np7RkZGl42l0Wig0TRqgsvL6xyPSokrdBL7FufRrfBm/rbkuA33vz0DubYK9rX5cKjOg31NHhxrcmFfnQ+5vjElnB7d5ByboTjLsuNpdDoUFzezfLoHo9PpUFNTg+LiYshbWbaWoOPWHm70Y1ah00FjaBKZy8mp+7EQlj6vtQeH6z8BAHQyO9TY+6Da3g819r6ocvBBjb0vdAqnbp5lR+BgTtrINGmLvQehltRd1zuS9tAqneWqqrpHCfb2pp1QB4e6xVSVlS2H4y011qpVq/Daa6+J7C99d3eLcyAIqyQ5CfD07O5ZEARBEESnU1xcDBcXl3b1tUpn2Rp5/vnnsWLFiobtsrIyhISEIDMzs90H39aoqKhAUFAQsrKy4Ozs3N3T6THQcWs7dMzaBx23tkPHrH3QcWs7dMzaR3l5OYKDgzuUItgqneX67Bc1NaarpFVXVwMAnJxafixiqbGUSiWUSnHuQBcXF/rQthFnZ2c6Zu2AjlvboWPWPui4tR06Zu2DjlvboWPWPiQdWKdjlUs5goPrVmJnZ2eb3F9vDwkJMbm/s8YiCIIgCIIgbAurdJZjY2MBAGfPnjW5v94eExPTpWMRBEEQBEEQtoVVOstjxoyBi4sLUlJSEBcXJ9r/yy+/AABmz57d4ljTp0+HRCLBoUOHUFDAVqLRaDTYtm0bpFIpZs6c2aY5KpVKvPLKKyalGYRp6Ji1DzpubYeOWfug49Z26Ji1DzpubYeOWfuwxHHj+I7k0uhEXnzxRbz55psYPXo0du3a1ZC1or7c9fjx45ly15988gk++eQTzJ07F6tWrWLGuu+++/Ddd9/h9ttvxw8//NBQxe+pp57Cxx9/jEWLFnV5uWuCIAiCIAjC+rHKBX5AnbO8Z88eHD16FFFRURg3bhwyMjJw4sQJeHl5Yd26dUz7oqIiJCYmIjc3VzTWf//7Xxw/fhy//vorevfujaFDh+LixYu4cOECoqKi8MEHH3TV2yIIgiAIgiB6EFYpwwAAlUqF/fv346WXXoK9vT22bt2KjIwMLF68GGfPnkV4eHirx/L09MTJkyfx5JNPQqvVYsuWLSgvL8fy5ctx8uTJDqUTIQiCIAiCIG5crFaGQRAEQRAEQRDdjdVGlgmCIAiCIAiiuyFnuZNYuXIlOI4Dx3H49ttvu3s6VklCQgKWLVuGkSNHwt/fH0qlEi4uLhg1ahRWr14NnU7X3VO0Oq5cuYK3334bN998Mzw9PSGXy+Hr64t58+bh0KFD3T09q6W6uhrffPMNnnzySYwYMQJKpRIcx+HVV1/t7ql1O7W1tXj55ZcRHR0NlUoFf39/LFmyBDk5Od09NavlzJkzeOuttzBv3jwEBgY2nOsJ09TU1GDr1q148MEH0atXL6hUKjg4OCA2Nhavv/46qqqqunuKVssHH3yAefPmISoqCi4uLlAqlQgJCcHChQtx/vz57p5ej6C4uBje3t7gOA6RkZHtG4QnLM6VK1d4pVLJcxzHA+C/+eab7p6SVbJ69WoeAB8SEsJPmjSJv/vuu/lJkybxKpWKB8CPHz+e12g03T1NqyIgIIAHwDs6OvKTJ0/m77zzTr5///48AJ7jOP7DDz/s7ilaJefOneMBiH5eeeWV7p5at1JbW8uPHDmSB8D7+fnxd955Jz98+HAeAO/l5cWnpKR09xStkjlz5pj8PBGm+fLLLxuOUZ8+ffg77riDnzZtGu/k5MQD4Hv37s3n5+d39zStEg8PD16lUvHDhw/n586dy8+dO5ePjo7mAfByuZzftm1bd0/R6lm0aFGDPxYREdGuMejbbWGMRiN/00038T4+Pg0nVHKWTZOSkmLyYpyXl9fgAK5evbobZma9TJo0id+0aRNfW1vL2NeuXcsD4KVSKX/x4sVump31kpyczD/44IP82rVr+TNnzvCvv/46Ocs8z7/wwgs8AH7UqFF8ZWVlg/39999vuGElxLz11lv8Sy+9xP/+++98bm4ur1QqyVluhg0bNvBLly7lL126xNivXbvGDxo0iAfA33PPPd00O+vm8OHDovM9z/P8mjVreAC8j48Pr9PpumFmPYM9e/bwAPilS5eSs2xNfPHFFzwA/ttvv+UXLVpEznI7+eabb3gA/Ny5c7t7Kj2GqVOn8gD4V199tbunYvWsWrXK5p1ljUbDu7i48AD4s2fPivbHxMTwAPjTp093w+x6FuQst5+jR4/yAHilUklPEttIREQED4CPj4/v7qlYJTU1NXxERATft29f/urVqx1ylkmzbEHy8vLwr3/9C5MmTcKCBQu6ezo9GrlcDgBQKBTdPJOeQ31p92vXrnXzTIiewJEjR1BeXo6IiAgMGjRItH/+/PkAgG3btnX11Agbov68pdFoUFxc3M2z6VnQdbJ5XnvtNaSmpmLt2rUNx6q9kLNsQZYvX47a2lp89tln3T2VHk1paSnef/99AMCsWbO6eTY9h9TUVACAr69vN8+E6AnEx8cDAAYPHmxyf709ISGhy+ZE2B715y25XE41D9rAN998g8TERERFRSEqKqq7p2N1JCQk4P3338cDDzyAcePGdXg8q63g19P4448/8PPPP+O1116jD24bSUpKwptvvgmj0Yj8/HwcPXoUVVVVePTRRylC30pSUlLwxx9/AABuvfXWbp4N0RPIzMwEAAQGBprcX2/PyMjosjkRtsdHH30EAJg+fTqUSmU3z8Z6effdd3Hx4kVUV1fj8uXLuHjxIvz9/bF582ZIpdLunp5VYTQa8dBDD8HV1RXvvPOORcYkZ9kCVFVV4fHHH0d0dDSeffbZ7p5OjyM/Px8bN25kbMuXL8fKlSshkdDDj5bQ6/VYvHgxNBoN7rrrLgwZMqS7p0T0AOrTddnb25vc7+DgAACorKzssjkRtsX27dvx9ddfQy6XY+XKld09Hatm586d2Lt3b8N2SEgINm3aROd7E6xevRqnTp3C+vXr4eHhYZExyVkGMHfuXFy+fLlNfTZt2oThw4cDAP79738jKysLe/futak7444et3rGjh0LnudhMBiQmZmJLVu24LXXXsNff/2FXbt2ITQ01IKz7l4sdcyasnz5chw+fBjh4eH49NNPOzpFq6QzjhtBEN3HlStXcN9994Hnebz77rsN2mXCNHv27AEAlJWV4fz583j99dcxfvx4vPHGG3jhhRe6eXbWQ2ZmJl588UWMHz8eixcvtti45CwDSEtLQ2JiYpv61NTUAABOnjyJNWvW4P7778fEiRM7Y3pWS0eOmymkUinCwsKwYsUKhIaG4vbbb8eTTz55Qy0wsvQxe/PNN/HZZ5/Bx8cHO3fuvGE1f5Y+bgTg6OgIwPxxqq6uBgA4OTl12ZwI2yAnJwfTp09HaWkpVqxYgaeeeqq7p9RjcHV1xbhx47B9+3aMGjUKL730EqZOnYphw4Z199SsgieeeAJarRZr16616LjkLAOIi4trd9/t27fDaDTi/PnzmDBhArPvypUrAOocmq+++grTp0/Hc88914GZWhcdOW4tMXfuXDg6OmLHjh3QarU3zGpfSx6ztWvX4sUXX4SLiwt27NjR/spEPYDO/KzZKsHBwQCA7Oxsk/vr7SEhIV02J+LGp6SkBFOnTkVGRgYeeOABvPfee909pR6JXC7HXXfdhTNnzmDbtm3kLF/njz/+gKurKx599FHGrlarAdTdqNX7aj/88EOrF8STs2whmruYX7lyBVeuXLmh5ASdDcdxcHd3R2ZmJkpLS+Hj49PdU7IqfvjhBzzxxBOwt7fHn3/+iYEDB3b3lIgeRv1j77Nnz5rcX2+PiYnpsjkRNzZVVVWYMWMGLl26hHnz5uHLL7+kMuEdwNPTEwBQWFjYzTOxLsrKynDw4EGT+9RqdcO+ege6NdDqqQ7y6quvgq8r7iL6WbRoEYC6FC88z2PDhg3dO9keRGpqKrKysuDs7NxwQiDq2L59OxYuXAiZTIYtW7ZgzJgx3T0logcyZswYuLi4ICUlxeTN/i+//AIAmD17dhfPjLgR0Wg0mDNnDk6ePIlp06ZRFgcLUO/0RUREdPNMrAdz/lhaWhqAumNVb2tLAJOcZaLbWL16NfLy8kT2xMRE3HvvveB5HgsXLqQTahOOHDmC+fPng+d5/Pjjj5g6dWp3T4nooSgUCixbtgxAnc6vXqMMAB988AESEhIwfvx4Wm1PdBiDwYB77rkH+/btw7hx4/Dbb7/dMNK6zuTIkSPYsWMHjEYjY9fpdFi9ejW++eYb2NnZ4a677uqmGdoOJMMguo33338fTz/9NGJjYxEZGQme55GRkYEzZ87AaDTipptuwqpVq7p7mlbFLbfcgtraWoSFhWHr1q3YunWrqM3YsWPx0EMPdf3krJy5c+ciNzcXQGOVw6+++go7duwAAPj5+WHLli3dNr/u4MUXX8SePXtw9OhRREVFYdy4ccjIyMCJEyfg5eWFdevWdfcUrZI///yTSXWm1WoBACNHjmywvfTSS1RU6TqffPJJw3fL09MTjz/+uMl27733Hj1JbEJSUhIeeOABeHp6YsiQIfDw8EBRURHOnz+P3NxcqFQqbNiwAUFBQd091RsecpaJbuPNN9/E9u3bcfr0aezcuRO1tbVwd3fHlClTcM899+D++++nPMsCysrKANRlh6h/rGQKcpbFnDt3TlRgIycnBzk5OQBscyGbSqXC/v37sWrVKnz//ffYunUr3N3dsXjxYqxcudJswRJbp7CwECdOnBDZm9pIR9pIaWlpw9/N3ZC++uqr5Cw3Yfz48fj3v/+NgwcPIiEhAUVFRVAoFAgNDcX8+fOxfPnyG3phtzXB8TzPd/ckCIIgCIIgCMIaobAdQRAEQRAEQZiBnGWCIAiCIAiCMAM5ywRBEARBEARhBnKWCYIgCIIgCMIM5CwTBEEQBEEQhBnIWSYIgiAIgiAIM5CzTBAEQRAEQRBmIGeZIAiCIAiCIMxAzjJBEARBEARBmIGcZYIgCIIgCIIwAznLBEEQBEEQBGEGcpYJgiAIgiAIwgz/D8alAbgfTEpJAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAssAAAIHCAYAAABpIhEUAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd1hUV/rA8e8MvSsgUhVQsGNBjdh7iyXRaHoscROTuNloel3Tu5tk0xNLerLRxBYTu0ZRsHdFRSkqFnqHYWZ+f/Bj4M4Mbejwfp4nz+6ce++5Z66I75x5z3tUer1ejxBCCCGEEMKEuqEHIIQQQgghRGMlwbIQQgghhBDlkGBZCCGEEEKIckiwLIQQQgghRDkkWBZCCCGEEKIcEiwLIYQQQghRDgmWhRBCCCGEKIcEy0IIIYQQQpRDgmUhhBBCCCHK0aiD5by8PF566SVCQ0Oxt7fH19eXuXPncvny5Sr3sWLFClQqVaX/ffvtt3X4ToQQQgghRFOkaqzbXefn5zNixAiioqLw8fFhyJAhxMXFsW/fPtq0aUNUVBTBwcGV9rN7926+/vprs8cyMjJYvXo1ALGxsVXqTwghhBBCtByNNlh+4YUXeP3114mIiGDTpk04OzsDsGTJEh5//HGGDRvGjh07anSPzz77jIcffphBgwaxe/fuWhi1EEIIIYRoThplsFxYWIiXlxcZGRkcOnSI3r17K4737NmTY8eOceDAAcLDwy2+z6BBg9izZw+ff/45Dz74YE2HLYQQQgghmplGmbMcGRlJRkYGHTp0MAmUAW677TYA1q1bZ/E9Ll68yJ49e7C1tWXmzJkW9yOEEEIIIZqvRhksHz16FIA+ffqYPV7SfuzYMYvv8f333wNw880307p1a4v7EUIIIYQQzZd1Qw/AnISEBAD8/f3NHi9pj4+Pt/geJcHyvffeW6XzCwoKKCgoMLzW6XSkpqbi4eGBSqWyeBxCCCGEEKJu6PV6srKy8PX1Ra22bI64UQbL2dnZADg6Opo97uTkBEBWVpZF/e/bt4+zZ8/i7u7OzTffXKVr3nzzTV5++WWL7ieEEEIIIRpOYmJiuZOwlWmUwXJdK5lVnjlzJra2tlW65tlnn2XRokWG1xkZGbRr184QdIvKaTQatm/fzogRI7CxsWno4TQZ8tyqT55Z1e27uo/Hdz1u0v7qgFcZHjC8/gfUxMjPmmXkuVWfPDPLpKamEhoaiouLi8V9NMpguaRMXG5urtnjOTk5ABa98aKiIn755Reg6ikYAHZ2dtjZ2Zm0u7u74+HhUe1xtEQajQZHR0c8PDzkL3o1yHOrPnlmVZedko2Vg5VJe5Z1lvxuqwL5WbOMPLfqk2dWMzVJmW2UC/zatWsHwKVLl8weL2lv3759tfvetGkT169fJzg4mIEDB1o+SCGEaAaSspPMtl/JuVLPIxFCiMapUQbLPXv2BODQoUNmj5e0h4WFVbvvkhSMe+65x8LRCSFE83E5+7LZ9ivZEiwLIQQ00mB50KBBuLm5ERsby5EjR0yOr1y5EoDJkydXq9/s7GzWrFkDSLAshBAASTkysyyEEBVplMGyra0tCxYsAOCRRx4x5ChD8XbXx44dY9iwYYrd+z7++GM6d+7Ms88+W26/v/32G7m5uQwYMICQkJC6ewNCCNFElDeznJSdRCPc4FUIIepdo1zgB/DCCy+wZcsW9uzZQ0hICEOGDCE+Pp7o6GjatGnDsmXLFOcnJycTExNDUpL5WRKofm1lIYRozjRaDTdyb5g9lq3JJrMwEzc7t3oelRBCNC6NcmYZwN7enu3bt/Piiy/i6OjI6tWriY+PZ/bs2Rw6dIjg4OBq9ZeUlMS2bduwsbHh9ttvr6NRCyFE03E19yp6yp89Li9FQwghWpJGO7MM4ODgwCuvvMIrr7xS6bmLFy9m8eLF5R738fGhqKioFkcnhBBNW2WL+C5nX6aze+d6Go0QQjROjTpYFkIIUXcqC5bLKysnWg6NRoNWq62Tfq2trcnPz6+T/psjeWZgZWXVIDWmJVgWQogWqrKKF1IRo+XKzMwkOTmZgoKCOulfr9fj7e1NYmJijTaLaEnkmRWzs7PD09MTV1fXerunBMtCCNFCVTazLLWWW6bMzEwuX76Ms7Mznp6e2NjY1HpwptPpyM7OxtnZGbW60S6falRa+jPT6/VoNBoyMjK4fLm4ik99BcwSLAshRAtlHAy3d2lPfFZ8ucdFy5CcnIyzszP+/v51NoOp0+koLCzE3t6+RQZ+lpBnVryWzcXFhUuXLpGcnFxvwXLLfNpCCCFMql308eqjeC1pGC2PRqOhoKAANze3Fv1Vv2i8VCoVbm5uFBQUoNFo6uWeEiwLIUQLVKQr4mrOVUVbuFe44nVGQQY5mhxEy1GycKwhFlEJUVUlP5/1tdBRgmUhhGiBbuTeQKtX/kNjPLMMkorRUsmssmjM6vvnU4JlIYRogYy3uXawdqCtY1ucVc6KdtmYRAjR0kmwLIQQLZBxEOzj5INKpaKVupWiXWaWhRAtnQTLQgjRAhkHwb7OvgASLAshhBEJloUQogUyrnTh61ROsCwVMYQAIDc3l48++oixY8fi4+ODnZ0dLi4udO3aldmzZ7N27dpmtbPejh07UKlUzJ49u6GH0uCkzrIQQrRAxjPGPs4+gMwsC2FOZGQkM2bMICkpCXt7e/r164evry8FBQXExsbyzTff8M0339C1a1dOnjzZ0MMVtUyCZSGEaIGMg2A/Zz8AWqtbV3ieEC3NoUOHGDVqFAUFBTz55JO88MILJpthJCYmsmTJEj7//PMGGmXt69+/P6dPn8bNza2hh9LgJA1DCCFaGJ1eZ3aBH5jOLKfkp5BflF9fQxOiUdHpdNxzzz0UFBTw6quv8s4775jdNS4gIID//Oc/7N69uwFGWTccHR3p3LkzPj4+DT2UBifBshBCtDDJeclodMqdr0pmlo2DZZDycaLl2rBhA6dPn6Zdu3Y8++yzlZ4fHq7c2GfXrl0sWLCAsLAwWrdujYODA507d+aZZ54hPT3d5PoVK1agUqlYvHix2f4nTZqElZUVcXFxivYTJ05wzz33EBwcjL29PW3atKFXr1489thjJCUp//7u2bOHW265hfbt22NnZ4e3tzf9+/fnmWeeITs723BeeTnL6enp/Pe//2XcuHGGPjw8PBg/fjybN282O+7hw4ejUqmIi4tj9erVDBgwACcnJ9zd3bnzzju5dOmS+QfaSEgahhBCtDDGqRU2ahs8HDzQFmmxU9nhZutGRmGG4XhSdhJBbkH1PUzRiOh0etJyC2uxPx1ZuRo06gLU6rqdt2vtaItabdkmFn/++ScAM2bMwMrKqtrXP/nkkxw9epSwsDBGjRpFfn4+hw4d4u2332b9+vVERUXh7OxceUcVOHjwIIMHDyY/P5+wsDCmTp1Kbm4uFy5c4MMPP+SWW24xzA6vW7eOW265Bb1eT//+/Rk4cCDp6emcO3eOt99+m/nz51c6nqioKB599FECAwPp1KkTERERJCQksGnTJjZt2sTXX3/N3LlzzV776aefsmTJEoYMGcLEiROJjo7m559/5uDBgxw9ehQHB4caPYu6IsGyEEK0MObKxqlVarQUr+T3cfJRBMuXc5QbmIiWJy23kPDXtjT0MCxy8IXReDjbWXTt0aNHAejdu7dF1//73/9m4MCBirzfgoICHn30Ub788kuWLFnCSy+9ZFHfJT766CPy8/N57733ePzxxxXHzpw5o7j3e++9h06nY+XKlUyfPl1x7v79+/Hw8Kj0fp06dWLv3r0MGDBA0X748GFGjhzJwoULmTlzptmg+5NPPmHXrl1EREQAxRVGxowZw549e/jpp5/KDbIbmqRhCCFEC2NcDq4kX7m810nZkoYhWqaUlBQAPD09zR6///77mT17tuK/snnLEyZMMFkgZ2dnxwcffIC1tTVr1qyp8Rhv3LgBwOjRo02OGeccV3Ruv379cHFxqfR+QUFBJoEyFH+geOSRR8jMzGT79u1mr124cKEhUIbivOhFixYB8Pfff1d674YiM8tCCNHCGAe/JRuSlDAOlo23xhZCFPvmm29MaisPHz6cwYMHG15fvnyZdevWcebMGTIzM9HpdADY2tpy7ty5Go8hPDycP//8k0ceeYTXXnuNwYMHY21tPrwLDw/n9OnT3Hvvvbz44ouEh4dblAaj1WrZunUre/bsISkpiYKCAgDD+ynvfY0dO9akLTQ0FMAkt7oxkWBZCCFaGOO0ipINScp7LQv8REtVkpaQnJxs9nhRUZHh/8+fP58vvvhCcXzJkiU888wzaDQa40trzZNPPsnu3bvZsWMHI0aMwNnZmYiICG6++WZmz56tmNl+4403OH78OOvWrWPdunW0bt2awYMHM2XKFO655x7s7e0rvd+lS5eYNGmSIUXFnKysLLPt/v7+Jm0ls9klAXdjJMGyEEK0MDKzLKqrtaMtB18w/ereUjqdjqzsbFycnetlgZ+levbsSWRkJIcPH+buu++u1rVRUVE8/vjjuLm58eGHHzJ8+HC8vb2xsyvOn/b19a32bGrJrHRZrq6ubNu2jcjISNatW8eOHTvYtm0bmzdv5s0332TXrl2EhIQAxSXuDhw4wLZt21i/fj07d+40BM7vvPMOe/furTRved68eRw9epTp06fz1FNP0alTJ1xcXFCr1Xz55Zc8+OCD6PV6s9fW9Z91XZFgWQghWhC9Xm8S/BoHy8avb+TeoFBbiK2V5UGHaNrUapXFi+TM0el02OgKcHW2a9QB1IQJE/j000/59ddfefvtt6tVEeP3338H4PXXX2fWrFmKY3l5eVy9etXkGlvb4r9jZUu4lXX5svkPriqVisGDBxvSP65fv85jjz3GTz/9xPPPP8///vc/w7nW1taMHTvWkBIRHx/P3Llz2bZtG2+//TbvvPNOue8pJyeHzZs307ZtW3755ReT53HhwoVyr23KGu9PqBBCiFqXnJdMgVb5dae/s/KrUeM0DD162clPtEgTJ06kS5cuJCQk8Oabb1br2rS0NMB86sGvv/5qdva1ZDHe2bNnTY6dPXu2yvWIvby8DLWaT5w4UeG57du35+mnn67SuRkZGeh0Onx8fEwCZY1GY/iA0NxIsCyEEC2I8ayyrdqWNo5tFG0uti642SlX8EsqhmiJ1Go13333HXZ2drz44os89dRTZGRkmJyXkpJCTEyMoq1k4drSpUsVOcunTp0yBKfG+vXrh6OjI3/++ScHDx40tCcnJ/PAAw+YTcP4/PPPuXjxokn7hg0bgOLUixL/+c9/zM5omzvXHC8vL9zc3Dhx4gSRkZGGdq1Wy9NPP202yG8OJFgWQogWJDErUfG6pMayMePZ5ktZjXuHLSHqSnh4OFu2bMHb25t3332Xtm3bMmzYMO68805uvfVW+vXrh4+PDzt27KBz58707dsXgDlz5uDt7c26devo1KkTt99+O2PGjKFXr14MGTKE9u3bm9zL2dmZJ554gqKiIgYPHsz48eOZMGECoaGhaLVa+vXrZ3LN559/TnBwMN26deO2227jjjvuoFevXixcuBB7e3tFHeeXX34ZPz8/+vTpw+23387MmTPp1KkTH374Ie7u7jzxxBMVPgtra2ueeuopioqKGDZsGGPHjuWOO+6gY8eOfP755zzyyCM1fNqNkwTLQgjRglzKVga9/i6mXxGbaze+ToiWZPDgwcTGxvLhhx8yePBgYmJiWLVqFVu2bCErK4uZM2fy+++/c/z4cbp37w4UV9LYv38/d911F4WFhaxdu5bLly/z6quv8tNPP5V7r8WLF/Puu+/i7+/Ptm3bOHHiBHPnzmXjxo2GnOayXn31VebOnYtKpWLr1q2sW7eOvLw85s2bx5EjRxg0aJDh3P/+97/ccccd5Obm8ueff/LXX39hbW3NokWLOHbsmGEhYEWee+45vvnmG8LCwoiMjGTLli307NmTqKgowweF5kYW+AkhRAtyOUuZTuHn7Gf2PON2ScMQLZ2joyOPPvoojz76aJWv8ff354cffjB7LC4uzmy7SqXiiSeeMJnl1el0rF+/HldXV8WiyMmTJzN58uQqjefee+/l3nvvrdK5w4cPL7eqxX333cd9991n0h4WFsbs2bNN2nfs2FHufQIDA8u9T2MhM8tCCNGCGM8QB7iYz1E0mVmWNAwhRAslwbIQQrQgxkGvcW5yee0SLAshWioJloUQooUo1BZyPfe6os3PxXwahnGwnKXJIqPAtAqAEEI0dxIsCyFEC3El+wp6lLmB5eUsezt7m1TJkEV+QoiWSIJlIYRoIYyD3VZ2rXCxdTF7ro3axmTba0nFEEK0RBIsCyFEC1HVShjlHZeKGEKIlkiCZSGEaCGqWmO5vOMysyyEaIkkWBZCiBaiqpUwyjsuwbIQoiWSYFkIIVoI4zSK8iphGI5LGoYQQkiwLIQQLYFerycxK1HRVunMslEaxpXsK2h12lofmxBCNGYSLAshRAuQWZhJtiZb0VbdnOUifRHXcq/V+tiEEKIxk2BZCCFaAOPFfWqVGm8n7wqvaW3XGgdrB0WbpGIIIVoaCZaFEKIFMF6c5+Pkg43apsJrVCqVVMQQQrR4EiwLIUQLUN1KGOWdZ5z3LERzp1KpUKlUDT0MheHDh6NSqYiLi6uzewQGBja6991QJFgWQogWoLqVMAznSUUMIZqduLg4VCoVw4cPb+ihNAnWDT0AIYQQdc/imWXjNIxsScMQoqF9++235Obm4udXtQ+9lti6dSsajabO+m9KJFgWQogWoLq795UIcAlQ9iM5y0I0uHbt2tX5PTp06FDn92gqJA1DCCGaOa1OS1J2kqLNOL2iPMbnpeankqvJrbWxCdHcJCYm8uCDD9K+fXvs7Ozw8vJi2rRp7N+/v9xrfvvtNwYMGICjoyOenp7MmDGD8+fPs3jxYlQqFStWrFCcX17Ocnx8PA899BChoaE4Ojri7u5Ot27dePDBB4mJiQFg8eLFBAUFAbBz505DTrZKpWL27NmGvirKWU5MTOTRRx8lNDQUBwcH3N3d6du3Ly+//DKZmZnVf2iNnMwsCyFEM3ct9xpF+iJFW1Vnln2dfU3aLmdfJqR1SK2MTTQROh3kpdZqf6rcLLAqBHUdz9s5uNf9Pf7f8ePHGTlyJMnJyXTq1Ilp06aRkJDA77//zrp16/jxxx+ZMWOG4poPP/yQxx57DLVazdChQ/H29iY6Opr+/fszefLkKt87MTGRPn36kJqaSkhICBMnTkSr1RIfH89XX31FREQEnTp1olevXkyfPp1Vq1bRtm1bxo8fb+hj8ODBld5n165dTJkyhfT0dAIDA5k8eTJ5eXmcOXOGxYsXM3XqVHr16lXlcTcFEiwLIUQzZ5w64WjtSGu71lW61sHagTYObbiRd0PRnwTLLUxeKrxbe1/LqwG3WuutEk/GgpNnnd9Gr9dz9913k5yczFNPPcVbb71lmJldtWoVM2fOZO7cuQwePBgfHx8ALly4wFNPPYWtrS1//fUXI0aMAKCoqIgHHniA5cuXV/n+X3/9NampqSxYsID//ve/imMJCQmG/ONbbrmFXr16sWrVKjp37mwya12R1NRUpk+fTnp6Ou+++y6LFi1CXeaDyN69e/H1Nf2A3dRJGoYQQjRz5iphVKcklFTEEKJyO3bs4Pjx47Rr147XXntN8Xds+vTp3HLLLWRnZ7Ns2TJD+7JlyygsLOTee+81BMoA1tbWLFmyBGdn5yrf/8aN4g+0o0ePNjnWrl27WslB/vrrr7lx4wbjx4/niSeeUATKABEREXh5edX4Po2NBMtCCNHMGddGrmolDMP5UhFDiErt2rULgJkzZ2JjY7rhz7333qs4DyAyMhLAJDUDoFWrVowdO7bK9w8PDwfgueeeY/369eTn51d98FW0ZcsWAB588MFa77sxa9TBcl5eHi+99BKhoaHY29vj6+vL3LlzuXzZslmNuLg45s+fT1BQEHZ2dnh6ehIREcG7775byyMXQojGwzi4rerivhLGwbJsTCKEqStXrgDFC+PMKWkvG8MkJRUvvA0ICDB3SbWqXsyePZuZM2dy6tQpJk+eTOvWrRk6dChvvPEGV69erXI/FUlMLP6739IqZTTanOX8/HxGjhxJVFQUPj4+TJ06lbi4OJYvX8769euJiooiODi4yv39+eef3HbbbeTl5dGnTx8GDBhASkoKx48f54svvuDJJ5+sw3cjhBANJzFTGdy2c61e2Snj8nESLLdADu7Fub+1RKfTkZWVhYuLi8lX+bXOwb1u+6+iut4Nz8rKil9++YVnnnmGNWvWsG3bNqKjo9m1axdvvfUWf/31FwMHDqzTMTRXjTZYfu2114iKiiIiIoJNmzYZ8naWLFnC448/zty5c9mxY0eV+jpz5gzTpk3DxcWFzZs3K35YdDodhw4dqou3IIQQjUJCVoLidTuX6gXLxudfyrqEVqfFSm1V47GJJkKtrt1Fcjodeq0tOLnWW6WKulaysC0+Pt7s8ZIyb2U3EvHx8SEmJobExES6du1qck3JTG519O7dm969e7N48WIyMzNZvHgx//nPf3jsscfYt29ftfsrKyAggDNnzhAbG0uPHj1q1FdT0ih/QgsLC/n4448B+OSTTxQJ7osWLSIsLIydO3dy8ODBKvW3aNEi8vPzWbFihcmnKrVaTd++fWtv8EII0YhkFGSQWaise1rdYNl4Zlmj03At91qNxyZEczJkyBAAfv31V7Rarcnx77//XnEewKBBg4DiahnGMjIy2LRpU43G5OrqyptvvolKpeLEiROGdltbW6C46kZ1lCwe/PLLL2s0rqamUQbLkZGRZGRk0KFDB3r37m1y/LbbbgNg3bp1lfaVmJjIxo0bCQ4OZuLEibU+ViGEaMwSMpWzytYqa3ycfarVh7u9O042Tsp+jWarhWjphg8fTo8ePYiLi+Oll15Cr9cbjv3+++/89ttvODs7M3fuXEP7nDlzsLW15dtvv+Xvv/82tGu1Wh5//HGysrKqfP/vvvtOERCX+PPPP9Hr9Yq8aE9PT2xsbIiNjTUb2Jdn3rx5eHp68ueff/LBBx8o3iNAVFQU169fr3J/TUWjTMM4evQoAH369DF7vKT92LFjlfa1Y8cOdDodAwcOpKioiN9++43IyEi0Wi3du3fn9ttvp3XrqtUbFUKIpsY4qPV19sVaXb1f/SqVinYu7Tiderq038wEBvgMqJUxCtEUDBhQ/s/7vHnzmDdvHj/88AMjRozgjTfe4Pfff6dXr14kJCQQGRmJtbU1S5cuNdRYhuKFcu+88w6PPfYYI0aMYNiwYbRt25Z9+/aRmprKPffcw/fff2+YCa7IqlWruO++++jQoQM9evTAwcGBixcvEh0djVqt5rXXXjOca2try/jx41m3bh09e/akT58+2NraMmjQIObMmVPuPdzd3fn111+ZMmUKCxcu5KOPPqJfv37k5eVx+vRpzp8/z+HDh5td+bhGGSwnJBT/cvf3N1/eqKS9vLygsk6dOgWAs7MzQ4YMISoqSnH8+eefZ+XKlYr6huYUFBRQUFBgeF2ynaNGozEU+hYVK3lO8ryqR55b9ckzKxWXHqd47e/sX+5zqei5+Tv7K4LluPQ4eb40v581jUaDXq9Hp9Oh0+nq7D4lM5Il92oKoqOjyz02btw4dDod3bp148CBA7z++uts3LiRlStX4ubmxtSpU3nmmWfo37+/yfv95z//ia+vL++99x5RUVHY29szfPhw3njjDd577z0AWrdurXhmJcr+OT322GP4+fmxZ88edu3aRU5ODr6+vsycOZNFixbRt29fxb2//PJLnnzySbZs2cKPP/6IVqtFo9Ewa9YsxfiMxzt06FAOHz7Mu+++y8aNG1m9ejXOzs4EBQXx8ssvExQUVOd/pjqdDr1ej0ajwcqq4rUTtfF3U6U3nkNvBB544AG++uornn/+ecUnoRLnz58nJCSEkJAQzp49W2Ff8+fP54svvsDa2hpnZ2c+/fRTxo8fz40bN3j11Vf5/vvvcXNz4+TJk4qke2OLFy/m5ZdfNmn/8ccfcXR0rP6bFEKIerAyZyVHNEcMrwfYDmCS46Rq97MpbxN/F5R+TdzFugt3O99dG0MUjYi1tTXe3t4EBARUaTZT1B2tVsvgwYOJiYnh9OnTtG3btqGH1GgUFhaSmJjI1atXK827zs3N5a677iIjIwNXV1eL7tcoZ5ZrU8mnm6KiIr744gtmzpwJFH9K++6774iJiWH//v18+umnvP766+X28+yzz7Jo0SLD68zMTAICAhgxYgQeHh51+yaaCY1Gw+bNmxkzZozZgu3CPHlu1SfPrNT/Nv0PkktfD+4+mImdza/fqOi5FcYW8nd0abBc6FQo60Bofj9r+fn5JCYm4uzsjL29fZ3dR6/XG0rH1XVJtcYuNjYWDw8PWrVqZWgrKCjg+eef58yZM4waNYqQkBB5ZmXk5+fj4ODA0KFDK/05TUlJqfH9GmWwXFL9Ijc31+zxnJwcAFxcXKrcl7Ozs9kdcubMmcP+/fvZuXNnhf3Y2dlhZ2dn0m5jY9MsfkHWJ3lmlpHnVn3yzEw3JAlqHVTpMzH33IJaBZn0a2VthVrVKNeJ17vm8rOm1WpRqVSo1eo6rX9cMpFVcq+WbNWqVfz73/8mPDycgIAAMjMzOXr0KElJSXh6evLJJ5+gVqvlmZWhVqtRqVRV+ntXG38vG+XTLtmx5tIl81uqlrS3b9++0r5KzmnXrp3ZT2IlO+o0x9WbQoiWLaswi9T8VEWbcRm4qjLeyKRAW8D1XPm9KURNjRo1imnTppGUlMQff/zB9u3bcXBw4KGHHuLQoUN06tSpoYfY4jXKmeWePXsClLtZSEl7WFhYpX2VlJ5LS0szezw1tfgfkrK1nIUQojkw3mlPrVJXe6vrEm0c2mBvZU++Nl/Rv7eTd43GKERL169fP3766aeGHoaoQKOcWR40aBBubm7ExsZy5MgRk+MrV64EYPLkyZX2NXDgQDw8PLh69SoxMTEmx0vSL8zVcxZCiKbMuGycj5MPtlaWLdpSqVQEuCpnpY1rOAshRHPUKINlW1tbFixYAMAjjzxiyFGG4u2ujx07xrBhwwgPDze0f/zxx3Tu3Jlnn31W0Ze1tTWLFi1Cr9fzyCOPGEq+AWzZsoUVK1agUql48MEH6/hdCSFE/UrMVM4sV3fnPmPtXZSpb7IxiRCiJWiUaRgAL7zwAlu2bGHPnj2EhIQwZMgQ4uPjiY6Opk2bNixbtkxxfnJyMjExMSQlJZn09eSTT7J9+3a2bNlCaGgoAwYMIDk5maioKLRaLa+//jr9+/evr7cmhBD1wjiYNc47ri7jmWXjNA8hhGiOGuXMMoC9vT3bt2/nxRdfxNHRkdWrVxMfH8/s2bM5dOgQwcHBVe7LxsaGDRs28Pbbb+Pp6cnGjRs5fvw4w4YNY926dTz33HN1+E6EEKJhGKdJWLq4r4TxzLSkYQghWoJGO7MM4ODgwCuvvMIrr7xS6bmLFy9m8eLF5R63sbHhqaee4qmnnqrFEQohRONlPPNb0zQMk2A5KwG9Xt/ia74KIZq3RjuzLIQQwnK5mlxu5N1QtNU0DcP4+ryiPFLya17wXwghGjMJloUQohkynlVWocLfxb9GfXo5emGrVlbTkFQMIURzJ8GyEEI0Q8aL+9o6tcXOynQX0upQq9Qmec9SEUMI0dxJsCyEEM2Q8YxvTfOVS0itZSFESyPBshBCNEPGaRg1rYRRwjjolvJxQojmToJlIYRohmq7xrKhHzMVMYRozlQqVaOr+FKyoVpFVcBE7ZFgWQghmqH6SsNIzExEr9fXSt9CCNEYSbAshBDNTH5RPtdyryna6ioNI0uTRVpBWq30LYQQjZEEy0II0cxcyrpk0lZbwbK3kzfWauV+VrLITwjRnEmwLIQQzYxxHnEbhzY42jjWSt/Wamv8nZX1miVvWYhi6enp/Pe//2XcuHG0b98eOzs7PDw8GD9+PJs3bzZ7zfDhw1GpVMTFxfH9998THh6Oo6MjXl5ezJo1i8uXL1f5/klJSbzzzjsMGzYMPz8/bG1t8fb2Ztq0aezfv7/c63Jycnj77bfp27cvrq6uODk50blzZx555BHOnj1rcn50dDQzZszAx8cHW1tb/P39mTdvHgkJzfN3QaPe7loIIUT1xWfGK17X1qxy2f7iMuPKvZ9ofnR6HekF6bXXn05HVkEWRflFqNV1O2/Xyq4ValX9zA1GRUXx6KOPEhgYSKdOnYiIiCAhIYFNmzaxadMmvv76a+bOnWv22vfee49PP/2UIUOGMHXqVKKiovj222/Ztm0be/fuxdfXt9L7r1mzhqeffppOnToRFhaGq6sr586d4/fff2f9+vWsX7+esWPHKq5JSkpizJgxnDx5ktatWzN8+HDs7Oy4cOECn3/+OSEhIYSGhhrO//TTT/nnP/8JQL9+/RgyZAgxMTEsXbqUtWvXsnPnTrp06VKDp9j4SLAshBDNTNlAFiDILahW+w90C2TX5V2G1xIsN3/pBekM+2VYQw/DIjtv34m7vXu93KtTp07s3buXAQMGKNoPHz7MyJEjWbhwITNnzsTZ2dnk2i+++IL169czceJEADQaDXPmzOGHH35gwYIF/Pbbb5Xef9CgQZw4cYJu3bop2jdu3MiUKVN4+OGHOXfunKK6x7333svJkyeZOXMmS5cuVYwtLi6OzMxMw+uSDwM+Pj6sWbOG8PBww7GlS5cyb9485syZQ1RUVKVjbUokDUMIIZqZuIw4xev2ru1rtf9A18AK7ydESxUUFGQSKAP07t2bRx55hMzMTLZv32722pkzZxoCZQAbGxs+/PBDHB0dWbt2LYmJldc079Gjh0mgDDBu3DhmzJhBbGwsJ06cMLTv27ePrVu34uXlxddff20SxAcGBhIWFmZ4/dZbb6HVavn8888VgTLA/fffz5QpU4iOjubw4cOVjrUpkZllIYRoZoxnlo2D25oy7i8+Mx6dXldvX3UL0ZhptVq2bt3Knj17SEpKoqCgAIBz584p/tfYHXfcYdLm4eHB2LFjWb16Nbt37+bmm2+u9P4FBQX89ddf7Nu3jxs3blBYWAjA8ePHDffv0aMHAFu2bAHgzjvvxMXFpcJ+dTodW7duxdHRkXHjxpk9Z8iQIaxdu5Z9+/bRu3fvSsfaVEiwLIQQzUhmYSap+amKtvZutTuzbDxTna/N53rudbydvGv1PkI0NZcuXWLSpEkcPXq03HOysrLMtrdvb/7vaWBgIFCcW1yZ48ePM2XKFOLi4qp0/5LZ6g4dOlTad3JyMtnZ2QDY2tpWem5zIsGyEEI0I/EZyvxhK5UVAc61u8DPy9ELR2tHcotyDW0XMy5KsNyMtbJrxc7bd9ZafzqdjqysLFxcXOplgV99mTdvHkePHmX69Ok89dRTdOrUyfAev/zySx588ME628RHr9czc+ZM4uLimD9/PvPnzyc4OBhnZ2dUKhXPPfccb775psX31+l0ADg7OzN9+vQKzzWXCtKUSbAshBDNiHEKhr+LPzZWNrV6D5VKRXvX9pxOPW1oi8+MJ8I3olbvIxoPtUpdq4vkdDod1oXWuNq71nmwXF9ycnLYvHkzbdu25ZdffsHKykpx/MKFCxVeHx8fr8gPLtsO4OPjU+H1Z86c4cyZM/Tt25fPPvvM5Li5+wcEFH+Qjo2NrbBvAE9PT+zt7VGr1SxfvrzRbQFel5rHT6gQQgjANFiu7cV9JUwW+RndV4iWJiMjA51Oh4+Pj0mgrNFo+P333yu8/n//+59JW2pqKps2bUKlUjFo0KAKr09LK95J09/f3+wxc3WeR48eDcBPP/1kSLEoj7W1NcOHDyczM5OtW7dWeG5zI8GyEEI0I8aVKWp7cZ+hXzdlvxIsi5bOy8sLNzc3Tpw4QWRkpKFdq9Xy9NNPm93co6xffvmFjRs3Gl4XFRWxcOFCcnJymDRpEu3atavgaujYsSNqtZpt27YpFhHm5+czf/58UlNTTa7p378/I0aM4Pr16zzwwAPk5OQojsfFxRkWBgI8//zzqNVq5syZw44dO0z6y87OZtmyZeTl5VU41qZG0jCEEKIZMa55XFczy8b9Svk40dyZKwlXYt68ecybN4+nnnqK559/nmHDhjFy5Ejc3d2Jjo7m2rVrPPLII3zyySfl9vHAAw8wYcIEhg4dio+PD9HR0Vy8eBFfX18+/vjjSsfn5eXF/fffz1dffUXPnj0ZOXIkDg4O7Nq1C61Wy+zZs1mxYoXJdd999x2jRo3ip59+YuPGjQwePBg7OztiY2M5cuQI77//vqF6xuDBg/nkk09YsGABI0aMoHv37oSGhmJjY0NcXBxHjhyhoKCAadOm4eDgUPlDbSIkWBZCiGZCp9eZBMu1vSFJCeOZ5SvZVyjUFmJrVfEqeSGaqujo6HKPjR8/HoDnnnsOf39/PvjgAyIjI3FwcGDw4MG88sorHDp0qML+n3jiCfr27cuHH35IdHQ0Tk5O3Hvvvbzxxhv4+/sbFthV5LPPPqNz584sXbqUrVu34ubmxujRo3n99ddZvny52Wv8/PzYv38/H3zwAStXrmTz5s1YWVnh7+/Pww8/zKRJkxTnz58/nwEDBvDBBx+wY8cO1q9fj6OjI35+ftx9991MmzYNNze3SsfalKj0dbUss5nLzMzEzc2N5ORkPDw8Gno4TYJGo2HDhg1MnDgRG5vaXXDUnMlzq76W+syu5lxlzMoxiratM7bi5ehVpeur89yyC7OJ+Em5oO/3Kb/TsXXH6g26iWtuP2v5+flcvHiRoKAg7O3t6+w+Op2OzMxMXF2bzwI/Sw0fPpydO3dy8eJFQ5k4c+SZlarOz2lKSgqenp5kZGTg6upq0f1a9tMWQohm5GLGRcVrR2tH2ji0qZN7Ods6m/Qt214LIZojCZaFEKKZMNm5zy2wTss7GadiXMy8aP5EIYRowiRYFkKIZqK+FveV178s8hNCNEeywE8IIZoJ42A1yLVuFveVMC5LJ2kYQlSfuRJsonGRmWUhhGgm6mtDkhKyMYkQoiWQYFkIIZqBAm0BV7KvKNqMc4prm3H/6QXppOen1+k9hRCivkmwLIQQzUBCZgJ6lJVA63pm2dfZF2uVMptPZpeFEM2NBMtCCNEMGOcLezl44WTjVKf3tFHb4O/iX+E4hBCiqZNgWQghmgGTfGW3up1VLiF5y0KI5k6CZSGEaAaMK2EYB7F1xThvWWaWhRDNjQTLQgjRDJhsSFJfwbLRfYx3ERRCiKZOgmUhhGgGjGd067oSRgnjRYQJmQno9Lp6ubcQQtQHCZaFEKKJS89PJ70gXdHWUGkYhbpCkxJ2QgjRlEmwLIQQTVxsRqzitbXaGl9n33q5t4e9By42Loq2CxkX6uXeQtSHuLg4VCoVKpWqwvNmz56NSqVi8eLFNbrf4sWLUalUrFixwuTYkSNHGDduHK1atTKMKS4urkb3E5WT7a6FEKKJMw5OA10DsVbXz693lUpFcKtgjt44ami7mHGRof5D6+X+QrQUWVlZ3HXXXVy9epXhw4cTEBCASqXC2dm5oYfW7EmwLIQQTdyFdGWwHOwWXK/3D3ZTBssysyyE5RYsWMAdd9yBj4+Pon3//v0kJSVxzz338N133zXQ6FomCZaFEKKJM65AEdyq/oPlsoyDdyFE1Xl6euLp6WnSfunSJQCCg+v377eQnGUhhGjyjHOW631m2Sg4j82IRa/Xl3O2EC3H8OHDDXnFq1evZsCAATg5OeHu7s6dd95pCIDLMs5ZLsmZnjNnDgCvvPKKIV959uzZimu/++47Bg8ejKurK46OjoSFhfHmm2+Sn59vcp+SHOsdO3awceNGRowYYciFTk9PZ8WKFYYc7NjYWGbOnImnpyeurq5MmDCBU6dOAVBUVMQbb7xBaGgo9vb2dOzYkU8++aR2H2QDk5llIYRownI1uVzNuapoa4g0jLKyCrNIyU/B08F0dkw0TXqdDm16eq31p9Pp0GZlUVRUhFpdt/N2Vq1aoarje1Tm008/ZcmSJQwZMoSJEycSHR3Nzz//zMGDBzl69CgODg7lXuvs7MysWbM4f/48kZGR9OzZk169egEwePBgw3kPPvggX375Jfb29owcORJHR0d27NjBc889x7p169iyZQuOjo4m/f/44498/fXX9O3blwkTJhAbG6tYzHjx4kX69+9P27ZtGT16NKdOneKvv/7i4MGDHDt2jPnz57Njxw5GjBhBcHAw27dvZ8GCBdja2vKPf/yj9h5iA5JgWQghmjDjFAy1Sl1vNZZL+Dr7Ym9lT762dPbqQvoFCZabEW16OucGDqr1fq/Xeo+mQvZEYu3uXg93Kt8nn3zCrl27iIiIACA3N5cxY8awZ88efvrpJ+bOnVvutZ6enqxYsYJly5YRGRnJ1KlTefnllxXnrFq1ii+//BJfX1927NhBSEgIABkZGUyaNIndu3fz0ksv8d5775n0/9VXX/Hzzz9z++23m73/t99+yzPPPMMbb7yBSqVCr9czd+5cVqxYwahRo1Cr1Zw7d442bdoAsHXrVkaPHs3rr7/ebIJlScMQQogmzHgxnZ+zH3ZWdvU6BnMBuizyE6LUwoULDYEygKOjI4sWLQLg77//rnH/H330EQD//ve/DYEygJubG5988gkqlYovvvjCbDrGzTffXG6gDMU50iWpH1BcAWfhwoUAnDp1ig8++MAQKAOMGjWK3r17Ex8f32zK2kmwLIQQTVhsujJfuYNbhwYZh3EqhvG4hGjJxo4da9IWGhoKQFJSUo361mg0REVFAXD33XebHA8LCyMsLIzs7GyOHDlicnzKlCkV9j98+HBsbGwUbSWLDG1sbBg+fLjJNSXHa/reGgsJloUQogkznsENahXUIOMwDpaN00OEaKoq24ykRMmiVnPn+/v7m7S5uBRv5lNQUFCD0UFKSgqFhYV4enri5ORk9pzAwEAALl++bHKsXbt2Ffbv5+dn0lZS29nb2xsrK6tyj9f0vTUWkrMshBBNmEnZuHpe3Ge4r1FFDEnDaF6sWrUiZE9krfWn0+nIysrCxcWlXhb41UTZRXG5ublmF8mVHAPMBqx1/R4rU1HAb29vX+G1FY29od9XfZFgWQghmqhCbSEJWQmKtgYLlo3ueyPvBpmFmbjaujbIeETtUqnVtbpITqfTYWVtjbWra6MPuNzd3XFwcCAvL48LFy7QvXt3s+dduFD8AdHcLHJd8vDwwNbWluTkZHJycswG6yW5w+ZmiUXlGvdPqBBCiHLFZ8aj0+sUbQ0VLLdzaYe1Sjn/IpuTiObAysqKQYOKK4H88ccfZs9JTEzkyJEjqNVqw7n1xcbGhgEDBgDw888/mxw/ceIER48exdnZ2VByTlSPBMtCCNFEGac6eDl64Wzr3CBjsbGyIcA1QNEmecuiufjXv/4FwFtvvUV0dLTiWEZGBnPnzkWn0zFt2jQCAgLMdVGn/vnPfwLFG5qUzHADZGVlsWDBAvR6PQ8++GClKRfCvEYdLOfl5fHSSy8ZdoXx9fVl7ty5ZhPUKxIYGGjY7cbcf2fOnKmjdyCEEHXHOFhuqFnl8u4vecuiuZg0aRJPPfUU6enpDBw4kIEDB3L33XczadIk2rdvz5YtW+jevTuffvppg4zvtttu44EHHuDSpUt0796dSZMmMXPmTDp06MDOnTsZMGAAr7zySoOMrTlotDnL+fn5jBw5kqioKHx8fJg6dSpxcXEsX76c9evXExUVVe390WfNmmW23c3NrTaGLIQQ9co4zaFDq4YpG1ci2C2YrWw1vJbycaI5efvttxkxYgSff/450dHR7N+/H0dHRzp37sz06dN55JFHyq1GUR+++OILBg8ezOeff87OnTspKiqiQ4cOPPbYYyxcuLDCXQJFxRptsPzaa68RFRVFREQEmzZtMpQhWbJkCY8//jhz585lx44d1eqzZJ91IYRoDhrdzLJUxBDN3Pjx4xk/fnyVz68oTgkMDDSUmytr8eLFLF682KR99uzZTJs2DVfX8hfN3nvvvdx7771VGtuKFSsqjItmz57N7Nmzyz1ubuxV7bupaZRpGIWFhXz88cdA8RaRJYEywKJFiwgLC2Pnzp0cPHiwoYYohBANSqvTEpcRp2gLcmuYGssljIP1K9lXyC8y3TFMCCGakkYZLEdGRpKRkUGHDh3o3bu3yfHbbrsNgHXr1tX30IQQolG4kn2FQl2hoq2hZ5YDXQMVr/XoicuMa5CxCCFEbWmUaRhHjx4FoE+fPmaPl7QfO3asWv2+++67xMbGYmdnR7du3bj11lsV+5kLIURTEZuhzAduZdcKd/vaq4NrCUcbR/yc/bicXboIOzY9ls7unRtwVEIIUTONMlhOSCgusl9eYe+S9vj4+Gr1+9RTTyleL1y4kP/+97/MnTu30msLCgoU2zZmZmYCxXuyazSaao2jpSp5TvK8qkeeW/W1hGd2LvWc4nWgayBFRUU16rM2nlugS6AiWD6feh5NQPP9c2huP2sajQa9Xo9Op0On01V+gYVK8l1L7iUqJ8+slE6nQ6/Xo9FozG63XVZt/N1slMFydnY2QLlbSpasNs3KyqpSf1OmTGHEiBGEh4fTpk0bLly4wLJly/jwww+ZN28eHh4eTJ06tcI+3nzzTV5++WWT9u3bt5c7TmHe5s2bG3oITZI8t+przs9sV+4uxWvrDGs2bNhQK33X6LnlKV/uPbuXwMuBNRpPU9Bcftasra3x9vYmOzubwsLCyi+ooar+Oy5KyTMrXtuWl5fH33//XekkQck25DXRKIPl2vbRRx8pXnfr1o3333+fzp0788ADD/D0009XGiw/++yzLFq0yPA6MzOTgIAARowYgYeHR52Mu7nRaDRs3ryZMWPGYGNj09DDaTLkuVVfS3hmP2/8GVJKXw/tPpSJnSfWqM/aeG4F5wuI3BdpeJ3rkMvEiTUbV2PW3H7W8vPzSUxMxNnZuU43sNDr9WRlZeHi4oJKpaqz+zQn8sxK5efn4+DgwNChQyv9OU1JSanweFU0ymC5pPpFeZ8GcnJyAHBxcanRfe6//35eeOEFYmJiiIuLIzAwsNxz7ezssLOzM2m3sbFpFr8g65M8M8vIc6u+5vrMdHqdSVm2UPfQWnuvNXluoR6hiteJWYno1XpsrWxrY2iNVnP5WdNqtahUKtRqNWp13dUAKEkjKLmXqJw8s1JqtRqVSlWlv3e18ffSoqf9zTffkJ9fd+WA2rVrB8ClS5fMHi9pb9++fY3uo1ar6dChuIh/UlJSjfoSQoj6ciX7CrlFysmEkNYhDTQapY6tOipeF+mLZNtrIUSTZlGwPGfOHHx9ffnnP/9pqFxRm3r27AnAoUOHzB4vaQ8LC6vxvdLS0gAadNcdIYSojvPp5xWvXW1daePQOCr7ONs64+Pko2gzHq8QQjQlFgXL8+bNo6ioiE8++YQ+ffowYMAAli5dakiPqKlBgwbh5uZGbGwsR44cMTm+cuVKACZPnlyj+5w8eZKYmBjDdpVCCNEUGAefHVt1bFQ5jMazyxIsCyGaMouC5S+//JKkpCS+/PJL+vXrx759+3jggQfw9fVl/vz5HDhwoEaDsrW1ZcGCBQA88sgjiiB8yZIlHDt2jGHDhhEeHm5o//jjj+ncuTPPPvusoq8NGzawbds2k3scO3aMGTNmoNfrmTdvHra2zTufTgjRfJxLU5aNaywpGCWMx3M+TYJlIUTTZXGGuJOTE/PmzSMqKopjx47xyCOPYG1tzZdffslNN91E7969+fzzzw31iKvrhRde4KabbmLPnj2EhIRw++23M2DAAB5//HHatGnDsmXLFOcnJycTExNjknu8b98+Ro0aRWBgIFOnTuXOO+/kpptuIjw8nNOnTzN8+HDeeustSx+DEELUO+OZ2pBWjStYNp5ZPpd+rpwzhRCi8auV5ZTdu3fno48+4sqVK3z//fcMHTqUo0eP8sgjj+Dr68v999/PwYMHq9Wnvb0927dv58UXX8TR0ZHVq1cTHx/P7NmzOXToEMHBVdvWddy4ccydOxdXV1ciIyNZuXIl58+fZ/DgwXz11Vds2bIFBwcHS962EELUO41OY7JgrmPrjuWc3TCMZ5YvZ18mR1M7aXpCCFHfarX2iEajISsry1Awu2R3leXLl9O/f39uu+020tPTq9yfg4MDr7zyCufPn6egoICkpCSWL19udme/xYsXo9frWbFihaI9IiKCpUuXcuzYMZKTk9FoNKSkpLB9+3bmzZtX6c4vQgjRmCRkJqDRKXekMp7JbWhBbkGoVcp/XmLTY8s5W4ima/HixahUKlasWIFOp6N169aoVCrOnj1r9vwpU6agUqkIDQ01e7ywsBAHBwdUKhXXr1+v8ZjKGj58OCqViri4OIv6raodO3agUqmYPXu2on3FihWoVCoWL15cp/evC7USLEdFRXH//ffj4+PDww8/zLFjx5g2bRqbNm0iMzOTH374gR49evD777/z6KOP1sYthRCiRTJOafBy8MLNzq2BRmOenZUd7VzaKdpkkZ9o7tRqNYMGDQJg9+7dJsf1ej2RkcUb9pw7d85sMLx//37y8/Pp1KkTXl5edTtgUWUWB8tpaWl89NFH9OjRg0GDBrF8+XLc3d15+eWXSUhIYOXKlYwePRo7OzvuvPNODhw4QNeuXWttO1YhhGiJjBfLNbYUjBLGqRjGixKFaI6GDBkCmA+WT548SWpqqqE8rrlzdu3apejHEgsWLOD06dPceuutFvchlCwKlu+55x78/PxYuHAhp0+fZsKECaxdu5aLFy/ywgsv4O3tbXKNtbU1/fr1M9Q1FkIIUX2NfXFfCeNxySI/0RJUFCyXtD311FOVnlOTYNnT05POnTvj5ta4vnFqyiwKln/88UdatWrFs88+y4ULF1i/fj2TJk2qdPvFW2+9lZdeesmigQohhDBTY7mRziwbj0vKx4mmbO3atURERODo6IiHhwfTp083m5fct29f7O3tOXfuHNeuXVMc27VrF1ZWVkyZMoWOHTsaZpFL6HQ6Q5qGcbD8119/MWnSJDp27IiDgwPBwcEsWrSIlJQUkzGUl7Nc1vfff094eDiOjo54eXkxa9YsLl++bHLe7NmzUalU7Nixw2w/KpWKwMDAcu/TXFhbctGvv/7K1KlTsbau3uWTJ0+u8UYiQgjRUuUX5ZOQmaBoa6wzy8aLDlPyU0jNT8Xd3r2BRiRqQq/Tk5+jqfzEKtLpdORna7BRFVY60VZT9k42qNSWb9rz+eef89BDD6FSqRgyZAg+Pj5ERUXRv39/k5jG1taWm266iZ07d7J7926mT59uOLZr1y569eqFs7MzgwYN4ocffiAnJ8ewg/CJEydIT0/Hz8+PoKAgw3XPPPMMb7/9Nra2tvTu3Rt/f3+OHTvGf/7zH9auXUtkZCRt27at8vt57733+PTTTxkyZAhTp04lKiqKb7/9lm3btrF3716zRRRaOouC5ZycHPbt28fAgQMrPC8qKoqzZ89y3333WTQ4IYQQpWIzYtGjN7xWoSLILaiCKxpOgEsAtmpbCnWFhrbY9FjcvSVYboryczQse9I0baApmPvuYBxcLNt4LD4+noULF2JjY8O6desYN24cUFz9a86cOXz//fcm1wwZMsQkWE5ISCAxMZFp06YBxTsVf/PNN0RFRTFq1CjAfArGr7/+yttvv0337t1ZtWoVXl5euLq6GqpKvPLKK/zrX//i559/rvJ7+uKLL1i/fj0TJ05UvJcffviBBQsWsHr16uo/qGbOoo9zs2fP5uuvv670vKVLlzJnzhxLbiGEEMKIcSqDv4s/jjaODTSailmrrQlupayHfzbNfDktIRqrZcuWkZ+fz5133mkIlAFsbGz48MMPcXQ0/ftXEuyWTbMo+f8l1TJK/tfcOWWD5ddffx2An376iY4dS7+tKQmWe/XqxcqVK0lOTq7ye5o5c6YhUDZ+L2vXriUxMbHKfbUUdfrdh06nQ6Wy/KsPIYQQpZrK4r4SxuOT8nGiqSkJYO+44w6TYx4eHowdO9akPSIiAisrK44ePUp2djZQOms8ePBgALp06ULr1q0Vi/yMZ5avX7/O0aNHCQkJoXv37ib3UalUDBo0CK1WW62N3yp6L3q93uzCw5bOojSMqrpw4QKurq51eQshhGgxjCtKNNbFfSVkkZ9o6q5cuQJA+/btzR43t7jNxcWFXr16cfDgQaKiohg9ejS7du0iODgYHx8foDjQHThwIDt27KCoqIhLly5x6dIlWrdubQiMSzYPOXfuXKUTj9WZWa7svZS8Z1GqysHyK6+8onh95MgRk7YSRUVFxMTE8PfffzNmzJiajVAIIQRgGmw29pll40V+59PPo9fr5RvHJsjeyYa57w6utf50Oh1ZWVm4uLjUywK/+jZkyBAOHjzIrl276NOnD6dOneLee+9VnDNo0CD++OMPDh8+zJkzZwxtJX8/dDodAN7e3owbN86wK7KNjY3J36HyAuC6VDK+lqDKwXJJKZKSX3RHjhzhyJEjFV7j5eXFG2+8UdMxCiFEi5dZmMm1XGUpqsa2zbUx42A+W5PNtdxreDuZ1uIXjZtKrbJ4kZw5Op0Ojd4GBxfbOg+Wa8LHx4eYmBji4+Pp2rWryfH4+Hiz1w0ZMoQPPviA3bt307dvX/R6vSEFo0TZ3f5KguWy+colVSk8PT0N22lnZmbi6upao2cWHx9PWFhYue/F19fX0GZrW/xnXpJOUlZLym2ucrC8fPlyoHi7xrlz5zJ48GDuv/9+s+fa2tri6+vLgAEDsLOzq52RCiFEC2Y8q2yttqa9a/3PJlWHt5M3TjZO5GhyDG1n085KsCyajCFDhrBjxw7+97//MWHCBMWx1NRUNm3aZPa6ksA4Ojqa7du3K9pK9OvXDxsbG3bv3s3p06cN9yvh7+9P586dOXXqFGfPnlUs8KuJ//3vfyYl70reS0kedImStBFzNaU3b95cK+NpCqocLM+aNcvw/7/55hsmTJigaBNCCFF3jBfHBboGYmNV/18vV4dKpaJjq44cvXHU0HY+/TxD/Yc24KiEqLo5c+bwzjvv8MMPP3D33XczevRooLjc2sKFC8nJyTF7nZeXF506dSImJoYVK1YYdtUry8HBgT59+rBt2zYyMjJwcHAgPDxccc6LL77I3XffzfTp0/nmm28IDlZWmElJSeG3337jH//4R5Xf0y+//MI999xjqO5RVFRkeC+TJ0+mXbt2hnOHDRsGwGeffcasWbPw8PAAilNxW9ImcxYt8Cv5lCSEEKJ+GJdda+z5yiVCWocogmUpHyeakqCgIN5//30WLFjAuHHjGDp0KN7e3kRFRZGWlsbdd9/NDz/8YPbaIUOGEBMTQ1paGlOmTDGbqz9o0CCio6MBuOmmmwxpDyXuuusuTp48yRtvvEG/fv3o0aMHISHFf/djY2M5duwYzs7O1QqWH3jgASZMmMDQoUPx8fEhOjqaixcv4uvry8cff6w4d8SIEQwbNoydO3fStWtXBg0aRHJyMtHR0Tz66KO89957Vb5vU9Z4E4WEEEIYxKTGKF6Huoc20EiqJ7S1cpzG70OIxu6RRx7h999/p1+/fkRHR7Nx40Z69uxJVFRUhakRZVMqjFMwSpRNeTDe4rrE66+/zs6dO5k2bRrXr19nzZo1bN++Ha1Wy0MPPcTatWur9X6eeOIJli1bRkZGBqtXryYzM5N7772X6OhoxawyFH87tGbNGubPn49KpWLDhg2kpqby4Ycf8u6771brvk2ZSq/X6ys7aeTIkahUKr755hv8/f0ZOXJk1W+gUrF169YaDbIxyszMxM3NjeTkZMPXEqJiGo2GDRs2MHHiRGxsGvfXx42JPLfqa27PTKfXEfFjBLlFuYa2T0d9yhB/8/+4Wqountuha4eY9Vdpyp61yprou6Oxtaq9xWINqbn9rOXn53Px4kWCgoKwt7evs/vU1mK1lkSeWanq/JympKTg6elJRkaGxeWMq5SGsWPHDlQqFbm5uYbXVSUlgoQQomYuZ11WBMoAndw7NdBoqsd4ZrlIX0RseixdPLo00IiEEKJ6qhQsX7x4EQA/Pz/FayGEEHXvTNoZxWt3e3faOLRpoNFUj7OtM/7O/lzKvmRoO5N6RoJlIUSTUaVg2bjYdUMUvxZCiJbKJF+5dWjtf2unyYO8bKx0BbXbL8Wz4GWDZVnkJ4RoSup0u2shhBA1F5OmDJY7ta6FFIykY3BmPcTvgavHIT8dG2ASoD/zOLTtDu0joPPN4NsHahCcd2rdia0JpWtXjN+PEEI0ZhZliF+7do2///6ba9eUu0nFxsZyxx130L17dyZOnEhUVFStDFIIIVoy45lli/OV9Xo48wd8MQy+GAI734a4XZCfrjhNVZAJCXtg1/vw1Uj4fDCcWFV8vQWMx3sm9QxVWFsuhBCNgkXB8ltvvcWIESPIyMgwtGVmZjJ48GB+/fVXTp06xV9//cWoUaM4d+5crQ1WCCFamoyCDJJykhRtFgXLlw/Bipvh57sg6Uj1rr12AlbOha9HQ0L1J0GMx5tVmMXVnKvV7kcIIRqCRcHyjh076Nq1K6GhpaucV6xYwbVr17jzzjuJiYlhyZIl5OXl8f7779faYIUQoqUxzu+1UdsQ5BZU9Q50Wtj5Lnw9CuIjazaYywdg2TjY8jJoi6p8ma+TLy42Loo2ScUQQjQVFgXLly9fNtly8Y8//sDa2poPPviAkJAQHnvsMXr27MnOnTtrZaBCCNESGadgdGzVERt1Fev5Zt+A76fD9tdArzM97uoPEQvgjp9gwQE0jx5nS5e3KZr5Iwz6F7QqZzH37iXwzSTIvFKlYahUKpNNVM6kninnbCGEaFwsCpazsrJwdHQ0vNZqtezdu5fw8HA8PT0N7Z07d+bSpUvmuhBCCFEFxjOwxnWLy5UWD0vHwIXtpsfc2sH0pfDYMRj3OnSeCJ4h4OJDjr0P+pCxMOYVePQIzPwO3INN+0jYW5yWkVy1VDvjRYlSEUMI0VRYFCz7+vpy5kzprMDu3bvJzs5m+PDhivOKiopM9jkXQghRdRYt7rsRA8vGQ5pRTXyVGoY/Cwv2Q4/bQG1VcT9qNXSdAg9HFwfPxjPamZeL75N0rNIhGY9btr0WQjQVFgXLERERHDt2jA8++IDjx4/zwgsvoFKpmDx5suK806dPGzYyEUIIUT0anYbY9FhFW2f3zhVfdP0MLJ8AWUYpEs7eMGs9DH8GbKq5jbG1bXFaxtyNxbPSZeUmw4pJcOVIhV0YB8uJWYnkaHKqNw4hhGgAFgXLzz77LHZ2djz++OP06tWLyMhIhg8fzsCBAw3nxMXFcerUKW666aZaG6wQQrQkcRlxFOoKFW0VpmFkJsEPt0FuirLdqxs8+DcEDqrZgPzD4cGd4BeubC/IgB9mQFpcuZd2bNURK1XpTLYePefSpFqSEKLxsyhY7tatG7t37+aee+5h/PjxvPDCC6xevVpxzsaNG+nZsye33HJLLQxTCCFaHuN8ZW8nb9zs3MyfnJ9ZHChnJCrb/frC7PXg0rZ2BuXoDvetgcAhyvac6/D9bZCbavYyOys7Al0DFW2SiiGEaAos3sGvT58+fPPNN+Uef/DBB3nwwQct7V4IIVq8s6nKRXCdW5eTgqHTwq+zi+shl+XXF+5bDXYu5q6ynJ0L3P1r8Wxy3K7S9pRzxXWcZ60DK9OKHZ3cOxGbUZpWIuXjhBBNgUUzy0IIIeqecXk14/JrBn+/C7FblW3uwXDXL7UfKJewcYDbvwevrsr2hL2wZbHZS2SRn2jKcnJyWLJkCSNGjKBt27bY2trSunVrIiIieOmll0hISGjoIVZox44dqFQqZs+e3dBDaXIsnlkukZCQQFJSEgUFBeWeM3To0JreRgghWhzjmVfj8msAnN8KO95Stjl6wj2rwMnT9Pza5NAK7l5ZXKIu83Jp+96PIeCm4koaZRiP/1z6ObQ6LVaVVeUQooHt2bOH6dOnc/XqVRwdHRkwYABt27YlIyOD/fv3ExUVxTvvvMP69esZPXp0Qw9X1DKLg+Vly5bx6quvVumTlFartfQ2QgjRIiXnJZOar8z/NamEkXEZVs0D9KVtKiu4vZzayHXBzQ/u+AGWjgVtmcWIax6Btt3Ao4OhyXhmOa8oj8SsRALdAutnrEJY4MiRI4waNYr8/HyefvppXnzxRZycnAzHdTodq1ev5qmnnpK9JZopi4Ll5cuXM2/ePAC6d+9OaGgoLi519FWfEEK0QMYpGI7Wjvi7+Jc26PWwdgHkGS2oG70Y2g+kXvn2hglvw/qFpW0FmbD6IZjzp6Ges6eDJx72HqTkl1brOJN6RoJl0Wjp9Xruvfde8vPzWbx4Mf/+979NzlGr1UybNo1Ro0aRmJhophfR1FmUs7xkyRKsra1ZvXo1x44dY+XKlSxfvrzc/4QQQlTPqZRTitehrUNRq8r8yj64HGK3KS/qPAkG/rMeRmdG+BwIu13ZlhgNez9RNHX2UM6On0pVvk8hGpO//vqLEydO4O/vz/PPP1/huW5ubnTv3h2ApKQk3nnnHYYNG4afnx+2trZ4e3szbdo09u/fb/Z6lUpFYGCg2WMrVqzAysqKt956y+SYRqPh888/Z/DgwbRq1QoHBwc6duzInDlzOHjwoNn+UlNTeeihh/Dx8cHOzo7u3buzbNkyxTkHDhxApVIpygIbe+ONN1CpVGY/RDQnFs0snzt3jqFDhzJlypTKTxZCiCYu/XouMVFXuXohg5z0Aqxs1HgGuBDUw5PAnp6o1apav6dxsNzVo8xCutSLsPEF5QUuPjD1Y1DV/liqRKWCm5cUB8hl6y1vew1CxoJXcZDc1b0rkZcjDYdPp5yuk+HodXriT6Rw8egNbiRmU1SoxdHVlrZBbnQa4I27j1PlnQgDvU5HXnZWrfWn0+nIy8rCGj1qdd3WGnBwdkFl4T3++OMPAGbMmIG1ddVDpjVr1vD000/TqVMnwsLCcHV15dy5c/z++++sX7+e9evXM3bsWIvGVFZOTg4TJ07k77//xsnJyRAwx8XF8cMPP+Dm5kZ4uLIuenp6OhEREWRnZzNkyBCSk5P5+++/uf/++9HpdIbMgb59+9KnTx/27t3LyZMn6datm6IfvV7P0qVLUavV3H///TV+L42ZRcGyu7s7np51vHBECCEaWF52IZG/nidm31VFWjBAcmI2Z/Yk0drHieF3heIb0rpW720cRBqCZb0e1v4TjHe/m/IxONTuGKrNzhlu+QyWT8TwwLQFsHo+zNsKaitl0E/xhwK9Xo+qFoP8axcz2fHjGZITsxXtaVdzuXw2nUMb4+nQx4sht4fg5GZXa/dtzvKys/jsH3c39DAs8tBXP+DoWk598kocOXIEKC6XWx2DBg3ixIkTJgHmxo0bmTJlCg8//DDnzp2r8c/9v/71L/7++2+GDh3KypUradOmjeHYtWvXiIuLM7lmzZo13HHHHaxYsQI7u+Kf/9WrV3Prrbfy6quvGoJlgPnz5/PAAw/w1Vdf8cEHHyj62bp1KxcuXGDChAm0a2e0s2czY9FHralTpxIZGYlGo6nt8QghRKNw7WImP7+yj5ho00C5rLSkHFYvOcyhTfG1du/0/HSu5Ci3qzYEmUd/VtY2BugzC0IayQr89gMh4hFl25XDcKD4K17jYDmzMJPL2ZepLSd2XuK3dw+aBMrGYg9d56dXorl8Nq3W7i2an5SU4vz6skFoVfTo0cMkUAYYN24cM2bMIDY2lhMnTpi5suquXLliCHi//fZbkzG2bdvW7C7Krq6ufPzxx4ZAGeCWW26he/fuJCQkKALsu+66C1dXV7777juTqmdff/01AP/4xz9q9D6aAouC5TfeeAMnJyfmzJlDWpr8ohFCNC8Jp1JYveQQuZmFlZ9M8WTv3t9i2bf+Yq3c3ziP197KniC3IMhLg01G6Rdu7WDc67Vy31oz8gXwCFG2bX0Vsq7h4+Rjsgvh6dTaScU4siWBnT+dRaer4NNNGQU5Raz96AgXDt+olfsLUVZBQQFr1qzh+eef54EHHmD27NnMnj2b48ePA8UprTWxY8cOtFot48ePp3379lW+Ljw8HA8PD5P20NDiOu5JSUmGNicnJ+655x5SU1NZtWqVoT05OZnff/8db29vJk+eXIN30TRYlIbx+OOP07VrV3766Sf++OMPwsPD8ff3N5t3pFKpWLp0aY0HKoQQ9SEpNoM/PztOkUanaLe1t6LzQB/aBrqSl6Xh9N4kUi4pZy/3r7+Ik5st3Yb41WgMJov73EOxVlvD1lcgN1l58sR3627jEUvZOMCk/8A3k0rbCjJg0wuopn9FV/eu7E3aazh0KuUUY9qPqdEtz+2/RuTK8ybtrb0d6TLIF6dWttyIz+L0niQKcosMx3VFejYuPcGkR3oS0MW9RmMQzU9JUHnjRvU+UB0/fpwpU6aYTYMokZVVsxzwksobHTp0qORMJX9/f7PtJVXNjGeQ58+fz6effspXX33FXXfdBcC3335LYWEhc+bMqVYud1Nl0TtcsWKF4f9nZGSwbdu2cs+VYFkI0VRkpeaz4bNjJoGyf+fWjJ7dFadWpV9bho3w5+BfcUSvVc4m//3zWTz8nfEOsixHEkyD5S7uXeDyQThgVF2o8yToNN7i+9SpoCEQdgcc+7m07fj/oM+9dPHoogiWa7rIL+VyNtu+Ne2jz7j23DQlCLVV8UROaD9v+oxrz7ZvTxN3vLR8na5Iz19fHGfGs/1o1daxRmNprhycXXjoqx9qrT+dTkdWVhYuLi71ssDPUr169SIyMpJDhw5xzz33VOkavV7PzJkziYuLY/78+cyfP5/g4GCcnZ1RqVQ899xzvPnmm+j1VfsGBIqfV22p7vPu0aMHAwcOZMeOHZw7d46QkBCWLl2KSqVS5Dc3ZxYFy9u3b6/tcQghRIPSanX89eUJ8rOVazFC+noxak5XrKyU/8Co1Cr6TgzC3tmWnT+W7rSn0+rZuuI0tz/fD2tby3amMw4eu7l3hb+eQ5E8beMI49+0qP96M/ZViPmzeFa5xMbn6DpOWYKrJov8tEU6Ni8/ZfIBJ2JaB/qMNf1q2sHFlgkPhbHzhzOciiz9urkwX8ufXxxnxjN9Lf5za85UarXFi+TM0el0FKHC0dW1zoPlmrj55pv55JNP+PXXX3nnnXeqNIt65swZzpw5Q9++ffnss89Mjl+4cMHsdTY2NmRnm8+1N1e/OSAgAIDY2NhKx1RT8+fPZ8+ePXz99ddMmTKFU6dOMXr0aIKD62nzowZmUbA8bNiw2h6HEEI0qIMb4rgel6loa9fNw2ygXFb3oX5kXM/lyJbSf8zSr+USve4ig6Z3rPY4MgoyuJSt3AWsS+Z1SIxSnjj0SWjVyFegO3vBqBdhwxOlbVeP0/Wa8h/3tII0ruVew9vJu9q3OLQx3iQdpusQX7OBcgm1WsWwuztTkFdE7KHSr9dTr+QQteYCg2eElHutaFnGjx9Pt27dOHnyJK+//nqF9YQzMzNJTEwkI6P4w6G5dIe0tDQ2b95s9nofHx8SEhJISUkxySnesmWLyfnDhw/HysqKjRs3kpiYaAie68KMGTNYuHAhK1asMOxS2BIW9pVovB/nhBCintxIyOLAn8pqFm5tHBh7f8WBcomIWzvQNshV0XZsayJpV3PKuaJ8xovdbNW2dIj8VHlSq/amFScaq/A50Ea5EYn/7v/iYqP8avxkyslqd52Vms/Bv5R/bh5+TgydGVrptWq1ilGzuuLuq6y3fHRbIlfOpVd7LKJ5UqlUfP/999jb27N48WKeffZZcnKUf6/1ej1r166lb9++7N+/n44dO6JWq9m2bZtiEV9+fj7z588nNTXV+DZA6UTka6+9pmh/55132L17t8n5vr6+3HfffeTn5zNr1ixD5Y4S169fJzo62qL3bcze3p5Zs2Zx/fp1fvzxR9q0acMtt9xSK303BTUKllNSUvjwww+5++67GTduHO+8847h2MmTJ1m7di25ubk1HqQQQtQVvV7Prv+dRV+mgoJarWLsvG7YOdpUqQ+1lZqR93VBbV2aRqDT6dnzW/W/HjVOwQi1ccMmzags3ejFYN1E6gNbWcNYZbUOVfZVulopc4ON87SrImp1LNoy6RcqtYqR93XByqZq/7TZ2Fkx7h/dsbIuc74edv4Ug1Zbezmiomnr1asXW7ZsoW3btrz11lt4eXkxevRo7r77biZNmoSPjw9Tp041zO56eXlx//33k5mZSc+ePZk0aRIzZswgMDCQbdu2MXv2bLP3efrpp3FwcOCDDz6gd+/e3HbbbXTq1InFixfz8MMPm73mww8/ZODAgWzfvp327dszceJE7rjjDiIiIggICOCnn36qtefw4IMPGlKlZs2aha2tba313dhZHCz/+uuvBAcHs2jRIn766Se2bNnCmTNnDMcvX77Mrbfeym+//VYrAxVCiLpw/uB1ks5nKNr6TGiPV3vXcq4wz93HiV6jlWkRcceSSTqfXq1+TBb3pRrVIPbvD91urVafDS5kNHQYqWjqciNO8bq6i/ySL2Vzdt81RVv3oX4W/bndNFWZd5l6JYfj2y+Vc4VoiQYNGsT58+d577336NevH8eOHeN///sfkZGRBAYG8u9//5tz584xatQoAD777DPef/99goKC2Lp1K7t27WL06NEcOHCg3DJv3bp1Y9u2bQwfPpyzZ8+yefNmOnTowN69e+nXr5/Za1xcXNi+fTsffvgh3bp1Y9euXaxdu5YbN25w9913c99999XaMwgNDTWklrSUhX0lLMpZ3rt3r6FQ9fvvv8/gwYPp37+/4pxRo0bh5ubGb7/9VuUVpEIIUZ+KCrXs+U1ZbszV057w8VWvWVpW+Lj2nI68Ql5W6SLBAxvimPxoryr3YbLNdZ5RealxbzTcltY1MfY1+Hww6ItnbLvm5YCLg+FwdRf5HdgQp3ht62BNv0mBFg2t50h/zu67qtjI5MCGOLoM8sXOofmXxRJV4+zszOOPP87jjz9e6blWVlYsWrSIRYsWmRxbvHgxixcvNnvdgAEDzBZR6NmzJ/fddx+ZmZkmx2xtbXn00Ud59NFHKxzT8OHDK6zAsWLFCkW1M2N79+4lMTGRYcOG0alTpwrv1dxYvCmJWq1m8+bNPPbYY/Tt29fkHCsrK/r06VPjHWqEEKKuHNmSQHaqsqbowGkdsbaxrBqCrYM14eMDFW0Jp1K5dtH0HzhzsgqzSMhKULR1KSizMUrnSRBgfoap0WvbDXreaXipeF9ASn4KN/KqVss2NSmH2MPXFW29Rgfg4GzZ18JqKzXD7lT+41+QW8SRLQnlXCFEy/P668XpVAsWLGjgkdQ/i4LlPXv2EBERUele6d7e3oqdYIQQorHIz9FwaJMyGPINaUVw7+pta2us6xBfHFyVQduRrVULus6knlG8ttbrCSksmaVWwYjnajS2BjfsaVAX54G3KyrCyah2bFXzlo9uTVRU0bN1sCZsZM0qAXgHu5n82R/dkkhedtV2cRSiOdqzZw/3338/N910E3/88Qd9+vRh2rRpDT2semdRsJybm1ulfdJruhV2Xl4eL730EqGhodjb2+Pr68vcuXO5fPly5RdX4Ny5czg4OKBSqRg9enSN+hJCNE3Hd1xCk68tbVDB4BkhFtX6LcvG1opeo5SBW+yhG2Sn5Vd6rXGwGFKowRB2d7u1eHa2KWvdHsJnAcX/+BjPLlclWM7P1hATfVXR1mO4X62kS9w0ORjK/PFrCrQc2iizy6LlOnv2LMuWLeP06dPcfPPN/Pbbb426LnZdsegd+/n5cfJkxWV+9Ho9J06cICgoyKKB5efnM3LkSF599VWys7OZOnUqAQEBLF++nN69e5db1LsqHnjgAZPtHIUQLUdhfhFHtymL/HcM96JNu9rZNrrrYF+sbUt/vep1eo7vrPxDvkm+cuH/B5MqNQx/tlbG1uCGPAHW9gB0KVQGy1VZ5Hdy92VFBQy1lYoew81v31td7r5OdOqvrPV8fMclcjLk3wvRMs2ePRu9Xk9mZibr168vd3Fic2dRsDx+/HhiYmL4+eefyz3n66+/JjExkZtvvtmigb322mtERUURERHB2bNn+eWXX4iOjub999/nxo0bzJ0716J+ly5dyo4dO1pUMW0hhNKp3VcoyClStBnnGteEvZMNnQb4KNpO70mqtByZSbBcMvPaYya0qbx2cJPg6gP9i3//dq3mzLJer+f0HmVqX8dwL5zcaq+MXr9JQajVpdPLWo2O4zukMoYQLZlFwfIzzzyDm5sb9913H08//TRRUcU7S+Xk5HD48GFeeukl/vnPf9KmTRsWLlxY7f4LCwv5+OOPAfjkk09wdnY2HFu0aBFhYWHs3LmTgwcPVqvfa9eu8eSTTzJmzBjuvPPOyi8QQjQ7RRothzcrv1oP7OGBp79zOVdYpsdwP8XrvMxC4o+nlHM2ZBdmE5+prKfcpaAQVFYw7KlaHVuDG/goWNuXzpz/v+t517mee72ci+DaxUwyrucp2roPq51Z5RJubRzoHKGcXT7x92U0hdpyrhBCNHcWBcv+/v788ccfeHp68u677zJo0CBUKhUrV66kb9++vPbaa7Rq1Yq1a9fi5eVV7f4jIyPJyMigQ4cO9O7d2+T4bbfdBsC6deuq1e+//vUv8vLy+PTTTys/WQjRLJ2NvkZuhjJIC58QWOv38fB1xjtYWfP3VOSVcs8/mXISfZlVa9Z6PaGaQuh1J3h0qPXxNShnL+gzi/Ya00V+J5LLr6B0Zq9yVtnNy8HkGdeGnkb1sgtyioiJulrO2c1TRSXGhGho9f3zaXGWdkREBDExMSxZsoTx48fTuXNnQkNDGTlyJG+99RYxMTHcdNNNFvV99OhRgHKrbZS0Hzt2rMp9btiwgV9++YXnnnuOjh07WjQuIUTTptfrOb5T+ZW6X6dWeAe71cn9ugzyVbxOOJlabnWF48nHFa87FxZihxUMfbJOxlaWTqcnLaeQlOwC8oqgsKgedq8b9ChWahu6G6VilBcsF2m0nD+onHXuPMC7xgsyzXH3caJdNw9F29GtiYpdHpsrK6visokajaaSM4VoOCU/nyU/r3WtRsuHXVxceOyxx3jsscdqaTjFEhKKvyIt2SnGWEl7fHy82ePGcnJyePjhh+nUqRNPP/20RWMqKChQLAosKQyu0Wjkl0oVlTwneV7VI8+t+sp7ZlcvZCo2noDidIm6eraBPd2x+lltWJCm1+k5d+AqXQb5mJx77NphxevuBYXout6C1tkPanF8N7IK2HU+mRNXsjidlMnF5FzScgspjQOteWb/Fto42xLg7kiIlxP92remX2BrfFs5VNR19Ti2xarHTLonrCfawd7QfOzqIbN/HrGHblCQq8wzDw73rLM/u+7DfUg4WZo2k34tl9ij12jfXRlEN8e/nzY2NqSnp+Pk5FQnH0agdGZQr9ej08nW4lUhz6yYXq8nPT3dsN12ZX/3auPvZqPcmig7u/gfM0dHR7PHnZycAMjKyjJ73NgLL7xAfHw827dvt3gv8zfffJOXX37ZpH379u3ljlOYt3nz5oYeQpMkz636jJ9Z6lF7wMbw2spBx/H4KE7UYXUwWw978q6W3nPf5jNczDhsct6RtChF2bIeBYXs1PYmc8OGGo8hSwP7rqs4kqImIadqwc+N7EJuZBdyKCGdXw4UV/Lwc9QT7qmjj6ee1rWwps5J04vuBb8r2o5fP8L6P9ajVim/+Ew+4EDZf7Ls3IvYFbWt5oMoh14PNi6OaLJKZ652rDxGm4Q8s+c3p7+fdnZ2eHp6UlBQgJOTE9bWdRcqpKSUn8cvzGvJz6yoqIicnBwyMjJITk6u0sZ3ubm5Nb6vRX8D9uzZw/bt2zl9+jRpaWmoVCrc3d3p2rUrI0aMsDj9oi4cOHCAjz76iPvuu4/hw4db3M+zzz6r2LYyMzOTgIAARowYgYeHRwVXihIajYbNmzczZswYbGxsKr9AAPLcLGHumeVlFfLD5n2U3c2i79gO9BxduwvEjF3wucGWZaWbjRSmWjNiyGgcXEo/uF/PukzaOmU6QjevcNpNn1+je5+8kslXu+PYdOoaGm3NUwgu56q4nGDF+kQY360t8wYH0sOvZiksyb/thPzSlLpclZZuNwUR5FlaUzovq5DvN0YrrouY2JXQm9rW6N6VifG4ys4fzhleF6RYM7j/CFw9S2fYm+vfz6ysLNLS0khPT6+T/vV6Pfn5+djb29fZ7HVzI8+smJ2dHcHBwfTs2bNK59fGh4tqBcvHjh1j7ty5HD5cPCtinGBd8ofXv39/li5dSteuXS0aVEn1i/I+DeTk5ADFaSAVKSoq4h//+AetWrXivffes2gsJezs7LCzM51KsbGxaVa/IOuDPDPLyHOrvrLP7Ni+y+iKSn9nWVmr6T7Ev86faXCvtljbnqWo8P9TMfSQcCKd7kNLq2WcOfGt4hoXrY6gEU+htnBs565lsWTzWf48UTeL0nR62HDiGhtOXGNYaBuem9iFTt6W1aj2Gf4cXutv43qZ2cszx78hdOx/DK/Pnb6Bvsy3zta2akL6emNjU7dfjna+yZeo3y8q0j/O7bvBgKmmCy6b299Pd3d33N3d0Wg0aLW1XwlEo9Hw999/M3To0Gb13OqSPLPiHOXqvvfaeFZV/k2zf/9+Ro4cSU5ODk5OTkyYMIFevXrh6emJXq8nOTmZw4cPs3HjRqKjo4mIiGDHjh1mq1lUpl274pXIly6Zr21Z0l5ZcexLly5x5MgRvL29mTFjhuJYyaflgwcPGmacd+zYUe2xCiEaP71Oz8m/lZUoQvp6Ye9c9//g2NhaERjmyfkDpYvTzh+8Vhos6/Ucj1kDZTLEuqnsUQcOrva9cgqK+GDLWZZFxqGtYDGas501Azt40MPPja6+rni72ePuZItKp+WvzVvpGzGEpCwNF25kcyA+jf1xqaTnms/723n2BrvO3eCO/u14alwnWjlWM9WtbTd62LRmq740re543Bam6LSgLk6BuHg0WXFJ+24e2NrXfRahta0VoTd5c3x76b9FZ/Yk0X9SEGqrlrGLWV19CLCysqKoqAh7e/sWG/hVlzyzhlOl3zZarZa7776bnJwc7r//ft5//31cXc2X68nMzGTRokUsW7aMu+66i1OnTlX764KSqfVDhw6ZPV7SHhYWVqX+rl69ytWr5mdY0tPT2blzZ7XGJ4RoWi6dTSMrVbnddPda2vWtKjqGeymC5Stn08nJKCjeTOP8Fk5oM4HSRW49/AdBNX9vbo+5znO/HScpw/y22rbWaiaF+TCttz/9g9yxtTYN9jQaDa3toJO3C90DbIC2PAhodXqiL6aw9sgV1h9LIrtAudBOp4cfoxPYfOoab97ag9Fdq5ce0T1wFFsvrja8PkEBxPwJXSahKdCSeDpVcX5QT89q9V8TXQf5KoLlnIxC4k+mEhRWf2MQQjSsKn00XrNmDefPn+f222/nq6++KjdQBnB1deXrr79mxowZnD17ttq1kAEGDRqEm5sbsbGxHDlyxOT4ypUrAZg8eXKF/QQGBqLX683+t337dgBGjRplaBNCNE8xe5Uflj38nWkbWPv1ecvTvpsHNnalC8X0erh45AYA2j0fc8JOORvbPXRqlfvO12hZvPYkc5bvNxsouznY8MTYUPY9N4olM3sxOMTTbKBcESu1ioEdPHlrehiRz4zk6fGdaeNimpZ2I6uAed8e4PH/HSW3sMhMT+b16Kjc6fWMrS0Fe4s3pko8narY3lqlVtG+R/0Fqp7+zngZ/ayc2l1+vWwhRPNTpd+Y69atQ61W88Ybb1S54zfffBOA1atXV3tQtra2LFiwAIBHHnnEkKMMsGTJEo4dO8awYcMIDw83tH/88cd07tyZZ599ttr3E0I0X4X5RcQeNq3PW5+s/z8Vo6z4Eylw/TRxl3aTo1b+Ku7RpmrfmiWk5HLLJ5Gs2BNncszWSs0/R3Zk19MjWDAypPrpEeVwc7DhoeEd2PnkcP41KgR7G9N/RlYdusQtn0QSeyPbTA+munp2K1sIhCKViphrB+HyQS4evaE41zfEDXun+v0KuqtRqb/4EynkpBeUc7YQormpUrB88OBBOnXqRFBQUJU7Dg4OpnPnztXekrrECy+8wE033cSePXsICQnh9ttvZ8CAATz++OO0adOGZcuWKc5PTk4mJiaGpKSkcnoUQrREsYeuGxbXQfHMZGj/+g2WoXhL7bIuxaRRtOdLjhstHG7r6EUbxzaV9rcnNpkpn+zmzFXTEpqDO3qyceFQHh/bCVf7ugksHW2tWTgmlG2PD2dEJ9Pxnr2WzZT/7mbzqWuV9uVi60KQq/Lfl+N2tuj2fEac0RbhQWGVP5vaFtKvLdZlvxnQ6Tm7v/L3JYRoHqoULCclJREaGlrtzkNDQ7lyxbKvq+zt7dm+fTsvvvgijo6OrF69mvj4eGbPns2hQ4cIDg62qF8hRMtyxigFo313Dxxda2eWtToCuror6igXFepIOnDcJAUjrE3l5ZB+2pfAvUv3mSy6s7NW8+ot3fnu/v4EeTrVyrgr49vKgWWz+/HubWE42ymXweQUannwuwN8Y2bm25jxbPoJOzuuHjlNfrbyPdZnvnIJW3trOvZRBuln97Ws7a+FaMmqFCxnZGTg5lb9Wpqurq6Gne4s4eDgwCuvvML58+cpKCggKSmJ5cuXm93Zb/Hixej1elasWFGlvocPH45er2fLli0Wj08I0bhlJudx5Vy6oq2+UzBKODjbmuRJx+d257hxvrJn93L70Ov1/HfrOZ797bhJtYsQL2fW/XMw9w5oX+81WFUqFTP6BrDun4PpbFRCTqeHf689yWvrT6GroEJHD88eitcnbG25mBeuaPPwc1bUOK5Pxt9GJCdmk5qUU87ZQojmpErBclFREWp19cvkqNVqioqqvshDCCFq0/kDynxXOydrAutxcZixdt2UqRhxBX04a7SrqHHQWEKn0/PyulO8v/msybHRXdry+yODCG1rWa3j2hLk6cTvDw9iWm8/k2Nf777Ic7+bBvklurdRfkiIs7UhrrCXsv8GmFUu4depNY5uyj8rmV0WomVoGYUihRAtUuxBZbAc0rctVmYWpNWX9t2VwXKG1h+HgtI2FSq6ephu5qTX63lxzQmzC/keHt6BL+8NN0mBaCgOtla8P7Mni8aYpu79vD+RJ349SpFWZ3IstFUoturSYNSh0IX0okDFOcYfNuqTWq0ipK+yJN65/dekkpIQLUCV/9X45ptvsLKyqtZ/3377beUdCyFEHdBkqUm7qtwFNKRf3W6PXBmvdi44uCgX3LVLLw2OO7TqgJONMtdYr9fz0pqT/BCdoGhXqeDlKd14anxn1OrGtfWtSqXi0VEhvD+jJ1ZGY/v98GWe+PWoSUqGjZUNnT06G177ZYYoj9tb4RXYsDPnof2VPz+Zyflcu2i6wFII0bxUOVgur15xZf8JIURDyE1SzrQ6tbLDJ7j6ay9qk0qtIiBQGTwGpHcx/H/jFAy9Xs9rf5zmu6h4Rbu1WsUHt/di1sDAOhtrbZge7s9nd/fB1mi3u9VHrvDimhMm/0aEeZYu8vNL76Q45hcAVg28a16bdi609nZUtJ3ff72cs4UQzUWVfvPodDqL/6uLPeWFEKIier2evCTlDG7Hvl6oGsEMbHt1pOK1X0YIal3xr2LjxX1f77rI0t0XFW1WahX/vbM3U3uZ5gU3RmO7efPlfeHYGW2E8kN0Am/9dUbRZnj/evDPUAbL/qroOh1nVahUKpPZ5YtHk5F5ISGaN8lZFkI0OymXcijKVf56Cwlv2BQMAPIzCLixVNFko7PDMycAgF5evQzta45c5vUNpxXnWqlVfHRHbyb0UG6S0dgN7+TFl/f1xcZK+WHli50X+G5vnOF1yft3y2+DS2FrxbkBaT9AdsPP4nY0ylvOy9JQmGpVztlCiOZAgmUhRLMTe0i5sM/V077B810BOPoLDrpruFsr0yp8M0NwtnGmg1sHAPacT+aJX4+aXP7ubWHcHNa0AuUSw0Lb8NEdvTGe3P/32pNsjykOgn2dfGnj0Aa/DOXiQEd1Kq3VcXDwm3oabflaeTni4e+saMu91jgWVwoh6oYEy0KIZkWv13PBKFjuGN623msPm9Dr4UDxrLKf7UnFId/MDoS1CcNKbUXM1Swe/O4gGq3yu/1nJnRmWh/TGvNNyYQePrw9Xbn5iE4PC344xOmkTFQqFb28epmkYATYHkWlAg4uB23DlyPt2MdL8TrvqjX6CmpICyGaNgmWhRDNSnJiNlmpBYq2jn29yjm7HiXshRvFObq+ticUh7wzg+np0Yv03EL+8e0BsgqUAeGsiPY8OLR57Fo6o28Aj45SVrrIKdRy/4r9XM/MJ8yjJ34ZyuP+dseK/0/mZYjZUF9DLVcHo938dAVqrl20fAMuIUTjJsGyEKJZuXDENAXD0+hr8waxvzRX2dP2lOKQrc6ejpoe/OvnIySkKsvdje/mzUuTuzX8zHgtWjg6hKm9fBVtVzLy+cd3BwnSdMVOq6w40cr+eOmL/V/XxxAr1NrbCXdfZYm/C0eSG2g0Qoi6JsGyEKJZiT2sDJaDe3s1fKCZfQNOrTG8jHPII9UhSXHKsWg1O88qx94zoBUf3NHLpFZxU6dSqXh7ehh92ysX8R1NTCdyl3JWPcP+Bucdy2wrfXEnpF6oj2FWqINRKsbFI8mSiiFEMyXBshCi2Ui7mkNaUo6iLbhXm3LOrkdHvgedpvSlgxNXXM8rTkk6qxy3p7MdX9wTjr1N86y0YG9jxZf39aW9h3IWOem8cpOPJJdYjjgZ1cc+1PAbXhmnYuSkF3ItTlIxhGiOJFgWQjQbxikYDq42eAe5NtBo/p9OBweWK5qOtGlPklGw7FekRvX/E5PWahWf3t0Hbzf7+hplg3B3suXze8KxL9mCXA9+RcoPB1ddLnLEw6im9OEfQKuhIXn4OptsUBJ3TFIxhGiOLAqWX375ZS5dulTbYxFCiBoxzhsNDPNo+I1IYrdBemmpOD1wRF3EFddYxWl2qGirLR7ri5O60j/IvT5H2WC6+Ljy2i3FOxe66VS46JV/XkmuFziuzUaRnJFzHWL+rL9BliOop6fi9UUJloVoliwOloOCgpg8eTJr165Fp9PV9riEEKJastPyuW70NbhxMNMgDig3Ibnk3Y2UwkzybLNIdbiqOOZfZMX0Pv7cF9G+PkfY4G4L9+fO/gH4aZX/JOVZZ5Nhf51cbT7nA8KVFx1cUX8DLEdgmDIVI/VKDhk38hpoNEKIumJRsPzaa6/Rrl07/vjjD2699VYCAgJ48cUXiYuLq+XhCSFE1Vw8qpzVU1nr8Q1xK+fsepJxGc7+pWg60mGg4f9fdVHOLoda2/DaLd0bfkFiA/j35G70sFOmnVx1uQD//yiOtO+jvCB2G6QpN3epb22DXLF3Vm6rLqkYQjQ/FgXLzz33HLGxsWzatIkZM2aQkpLC66+/TseOHRk/fjyrVq2iqKjhC8cLIVqOuOPKIMXBqwi1VQMvyzjyI+jLfPNm68xRx9I812suymAvWG2Dg23zXNBXGXsbK7rZ2CnarrqWVr04YqMC+7IffvRw+Lt6Gp15arWKdt2V6TKSiiFE81Ojf0lGjx7Nzz//zOXLl3nvvffo1KkTmzZtYubMmfj7+/PMM89w7ty52hqrEEKYVZhfxKWYNEWbvVcDf2DX6UyDue7TOZJSWmP5mnOc4nBBpobstPx6GFzjk5+jIeu6MoUhyaVMsJx8HMJuV1506LsG39Ev0ChYvnIunfychl18KISoXbUy7eLh4cGiRYs4efIku3fv5s477+T69eu8++67dO7cmVGjRvH777/Xxq2EEMLEpTNp6IpKa9yq1CrsPRs4WI77W7GwDyA7bCZn00onENIdrpOv0irOuXqhZZYfuxqboXitQUeyU+lC8svZl7nRbaryouyrcG5jfQyvXH6dW4O69GdPr9OTcDKlAUckhKhttfodZWxsLOvWrWPr1q2GNn9/f7Zv385tt91G//79SUxMrM1bCiEE8UYpGD4dXFHblHNyfTlkNKvs1ZVt+Rr0lKZl6FGTYqfMT752URk0thRJsemK16l2KrR6a0Xb+tRU8O+nvLCBF/rZ2Flh76H8wCOpGEI0LzUOljUaDT///DOjRo0iNDSUt99+m6KiIhYtWsSZM2eIj48nMjKSCRMmcODAARYsWFAb4xZCCKB4Ji/uhHImzziPtN7lpsLpdYomXe97+SByk7Itz4/evb0VbS11ZvnaReX77tm7Ldr8doq2r/Zvpaj3fcoLz2+B9IadhLFvq/wWI+FECtoiqRIlRHNhcbB8+vRpFi1ahK+vL3fffTfbt28nIiKCb7/9lkuXLvHee+8REhICQEREBOvXr6d///7s3Lmz1gYvhBA3ErPIzShUtDV4sHz8V9AWlL62suV/BQNJKjilOC2kVQ9u6uujaLuRkNXiAi29Ts/1BOXOfb17taWPV29FW7ouhqVpvcHWpezFDb7Qz6GNMlguzNdy5Vx6wwxGCFHrLAqWBw8eTPfu3fnggw/QaDQ89NBDHDt2jN27d3PPPfdgZ2dn9rpu3bqRlZVl9pgQQlgi7rhyVtnNy4FWXo7lnF0P9HqT7Zhzgsbx8tZLWDkkKNrn3zQGr0DlDoPaIh3Jl7LrfJiNSdq1XDT5ylSGtkGuPBwxRtGmtr/C+zvPkxFyq7KDIz8WL6hsIFb2etq0d1G0SSqGEM2HRcHynj176N27N19++SVXrlzh448/pnv37pVeN2/ePJYtW2bJLYUQwizjfOXAHg28EcmVw3DthKLpg9QICqwTUKlLZyBVqBjo1xd7JxuTbZOvXmhZecvGm8k4tbLDyc2O8La9sFKV5i2rVHq0thd59UpfZQcZiXCxYb+1bN9D+W1G3NFk9Hp9OWcLIZoSi4Ll/fv3c+DAAebNm4ejY9VncCIiIpg1a5YltxRCCBM5GQVcj1d+WxUY1sDBslFKQI6DL19faYe140VFe0jrENzsiusGtw1Wbp5inL/b3BkHy23/f7bd3tqesDY9FMesHC+yMsmDFOdQZSdHfqjTMVYmsIeH4nVWaj5pSbkNNBohRG2yKFj+448/WLt2baXnrVu3jldeecWSWwghRKXijRb22dpb4dOxAXftK8yF4ysVTd/kD0aPGiujYDm8ben2zd5BylSMllYR45rRBx6vwNKUhrLPCfj/56jii8wIZSen10Feeh2NsHKtfRxxbq1MQUw4JSXkhGgOLAqWFy9ezOrVqys9b+3atbz88suW3EIIISplvLVwu24eWDXkrn2n1kBB6SypDhXf5w0GtFg5xClOLRsEtjUKljOT88nPbhkbWxTnaBsFy+1Ln4dJsOxwCVQafi2MoIgypeWK8uHEqjoda0VUKhXtuilnl40/zAkhmqY6/VdFq9WiVjfwdrNCiGapSKMl8Yxy1z7jr8LrndHCvr+1YVzBE7V9EiqrAsWxskFgax8nrKyVvytvJLaMxdApl7MVG8oAeJVZLNerTS/UqtJno1JpsXJIIA1XNmn7KDs7/H2djrUy7Y2C5Svn0ynMb+DNcYQQNVankezJkydp3bp1Xd5CCNFCJcVmUFRQpoKCCtp1b8BgOSUWEvYomn7RDgcwScEIdA3E06E0t9rKSo2Hv7PinBsJLSNYNs5XbtXWETvH0h1lnG2d6ezeWXFOyfP8VTtM2dmVQ3D9dN0MtAr8O7dGrS7dZEZXpOfy2fQGG48QonZYV35Ksblz5ype796926StRFFRETExMRw4cIBbbrmlRgMUQghzLp1OVbz2au+Kg7NtA42G4vJlZaToXdiiK549rihfuYRXOxdF4Gi8cLG5umYULJfNVy4R3jacUymlNaqtHOMA+FsXxjV9K9qq0ktPPvw9jHu9LoZaKVsHa7w7uClqLCecSCGooRedCiFqpMrB8ooVKwz/X6VScf78ec6fP1/hNWFhYbz77rsWD04IIcqTeFqZghHQpQG/xdLp4OjPiqY12kFosAZ02DnHU7YKsLlguU07ZZB4I6FlVMQw/lBQNl+5RHjbcL47VVplxMYxgTyK0GLNb9ohPGRdZrfEY7/A6MVg1TD7nbfv7qEIluNPpqDX61GpVOVfJIRo1KocLG/fvh0AvV7PyJEjGT9+PE8//bTZc21tbfH19aV9+/a1M0ohhCgjL7vQJKc3oEsD7toX9zdkXlI0rdQOBcDa/gY6VY7iWFWC5czkfPJzNNg7NUzQVx8K84tIS1I+m7aBpsFyHy9lbrJeVYidUxIFOQH8qh2mDJZzbsC5TdD55joZc2XadfNg7++xhtdZKfmkX8ultbdTg4xHCFFzVQ6Whw0rzQ2bNWsWQ4YMUbQJIUR9uXQmDcqsCbO2VeMd3IAl44xSME7r2nFKHwhARLcMjpQpt+vr5Iuvs69JF+6+TqitVYrFbsmJWfh3buCtu+tQyqVsyu7boVKr8DTK3QZobd+ajq06cj699NvMiG7p7NgXwAW9Lwd0ofRVny294PD3DRYse/g54dTKjpz00gWd8SdSJFgWogmzaIHf8uXLy81XFkKIumacr+wb0tqkmkS9yc+EU8q68yWzyu5Otnh4KGeczc0qA1hZq/H0UwaK15v5Ij/jbb1beztibWtl9lzj52bvEk+AuwNgZqHf2Y2Qfb32BloNxSXklB9wEk5KCTkhmjKp6yaEaFL0en3jylc+tQaK8gwvNXorVmsHAfDE2FCOJR9WnF5esAzgaZSKkdzcg2WjVJo2AaaL+0r0bavc4vrojSM8O6F4F78/tDeRpy+zuFOvLc5dbiDGJeQun0tHU7ZyixCiSalSGkZwcDAqlYotW7YQFBREcHBwlW+gUqmIjY2t/EQhhKiCjBt5ZKXmK9oaNF/ZKAVjh64nKbjR3c+VAZ30vHnmhuJ4RcGyVzsXTpV53dxnlm8kKmeWPQNMUzBK9GmrzFvO1mTT3ieDiGAP9l6ADbqbmG61q/SEw99DxAJogIV1/l3cUalV6HXFOSa6Ij2XY9Iafit2IYRFqhQsx8XFAaDRaBSvhRCivhmnYDi62uLu20D5oKkXTGorl6RgLJ7cjQPXtiiOedh70N61/IXPxov8Mq7nUZBXhJ1DlZeXNBlarY7UK8rFfebylUt4OXrRzqUdCVkJhrb9V/fz0uRp3PzRLn7VDlMGyzfOwOVD4F/+h5O6YudgjXewK0nnS7ctjz+ZIsGyEE1UldIwdDodOp2O0NBQxeuq/ieEELXFOAXDv0vrhivLZVQuLlXvzDZdH27t7UffQHf2Xd2nON7fp3+FY/XwdUZtpTzeXFMx0q/moi1S/vvgWUEaBhQ/v7Kir0bTxceVO/u3I1rXmXidl/KCw9/RUNobbZCTaPQhTwjRdEjOshCiydBpdVyKMc5XbqAUDJ0OvVEKxhrtIGxs7XhmQmd0eh37kpTB8gCfARV2aWWjNpklb67bXhvnKzu721VaJu8mn5sUrw9eO4hGq2HRmFBc7G0Ns/ol9CdWgSaPhtCuqzJYzrieR2Zyw4xFCFEzEiwLIZqM6/FZFOYVKdoCGqq0WvxuVBmJiqZV2iEsGNmRtq72nEs7R1qBMrDv762cGTXHeHY15XJ2OWc2bTeMKmF4+lc8qwymzy+vKI8TKSfwcLbjX6NDWaUdik5fOjOvKsiEM3/UzoCrydPfGXtnZfAvs8tCNE0SLAshmoxLZ5TBRmuf4pq2DUF3WDmrfEYXQLpbV+YOCgIgOilacdzP2Q9/F/9K+zUuH5dyOaecM5s200oY5ecrl3C3dye0daiiLSopCoD7Itrj0KY9kbpuiuPaQ9/XcKSWUalVBHRWVmmRYFmIpqlKwbKVlZXF/1lbN7+FKUKIhtFoSsYVZKM9uVrRtFI7lCfHd8beprhOsHG+snEKQXk8jBa5pV7JQadtXms/9Ho9ySaVMCqfWQbT2eWSVBcbKzUvTupqkoqhvrgDMi5bPtgaCOiq/Nbj0pk0dDp9OWcLIRqrKkWyAQEBsq+9EKJBFeYXcfVChqKtofKV847+hoO2NP+0SK/mbNvxPBdWvDNfka6IA9cOKK65ybtqwbLxzLK2SEf6tbxar/iRm5nBxcMHSDp/lozrV8m8fg2tVkteQQErD0XSpl0QviGd8OvcDTevtrV676zUfApylek0FVXCKGuAzwC+P106W3z0xlHyivJwsHZgeCcvvg+eQFbiclxUxX8+KvTkHPgBp1FP1d4bqCLjn8+C3CJuxGfRNsh0S28hRONVrdJxQgjRUK6cS0enLZ2VU6tV+Ia0apCx3Ni9nHZlXu/U9eThSYNQq4snFU6mnCRHo0yfMK7kUB57ZxuT7ZJTLmfXSrCs02qJidrN0U1/cDnmNIq9psu4kp7KlTOnOLqpON/Xr3M3eowcS2jEYGxsa572YjyrbOdojYuHfZWuDW8bjpXKCq2+eJMPjU7D4euHGeg7EIBFE3vxx2cDuMNqu+GavH3f4TTyyXqvuezc2p7W3o6kXS3d7zzxdKoEy0I0MZKzLIRoEi4ZpWC0DXbF1r7+07yuxZ+hXeYhRdtZn8kMCC6tfmCcr9yxVUc8HapeY9fDaHY5uYaL/PQ6Hce3bWLZwgfZ8NG7XD5zqtxA2ZzLZ07y16f/Ydmj/+D4tk3odDXbjc54m2tPf+cqf3vpbOtMN09lXnLZ593V15XrHaYr+y9IIOnE3xaOtmaMZ5clb1mIpkeCZSFEk5BotLivoVIwjqz/XPE6Xe/EmFtnK9qMS8ZVpQpGWZ7+ylnklEuWB8vJCXH89O+n2PTFR2Rcu2pxPwDZaals+uIjvnvqUZLOxVjcj3GFD+M87coYp7QYP+/pU6dzUe+taIvd8mW17lFbjPOWr17IoDC/qJyzhRCNUZWmZRISindM8vPzw8rKyvC6qtq1a1f5SUIIUY6c9AKT3d4aIlg+eTmNLtfXQ5lJ0DNtxjLAp3RWOb8on8PXDyuuq+rivhLGwaMl5eP0Oh371qxkz68/oNOanwm2trOjfY/etA3qgJtXW9TWNhzYF02HAH+uXzzPpdMnyc82rfOcnBjPTy89ycDb7qL/rTNQq62qNTaTYNmvmsGyz018dfwrw+tTqafILMzE1bY4vcGvtSM7Am4h6FLpB5uw9K0cuZBEr2Cfat2rpnxDWqG2UhlSiHRaPVfOpstufkI0IVUKlgMDA1Gr1Zw6dYrQ0FACAwOr/JWZSqWiqEg+RQshLGc8q2zrYI1X+6pVT6hNa9as4jnVDUVb53HzFa+P3jhKoa7Q8FqtUtPXu2+17mMcPGanFZCfo6l0044SBbm5/PXpEs7vjzJ73LtDCP2mTCe4T3+sbW0N7RqNhpgbqfSdOBEbGxu0RRpiD0RzdPMGEk4cU/Sh1+mI/N/3xJ84wpRFz+HgUrU83KJCLRk3lJtzVDdY7tmmJ7ZqW8Nz1ul1HLh6gJHtRhrO6TPpIXSff4Ga4iDVVZXHzrXL6fmvZ+t1wbqtvTXewW5cOZduaEs8nSrBshBNSJWC5aFDh6JSqXB0dFS8FkKI+mCcr+wX2gq1Vf1mkUVfSKHjlbWK35opjsF4dFTOGhvnK3d172qY8ayqVm0dUVur0BWV5hWnXMrGr1PlpfIyrl9j1Zv/Ju3KJdN+vX0YNWc+7Xv2qdLvcCtrG0IHDCZ0wGDijh7i7++XcSMhTnHOpVMn+OnFJ7j1mcW09vattM+0q7lQNl1aBe4+1Vu8aG9tT2+v3kRfLX3W+67uUwTLrt6BXGp9E/5ppR8YeqdsYNuZ+xnVpXare1QmoIu7SbAshGg6qhQs79ixo8LXdSUvL48333yTn3/+mYSEBNzd3Rk/fjyvvvoqfn5+VeqjqKiI1157jf3793P69Glu3LiBRqMhICCAMWPG8PTTT9O+ffs6fidCCEvp9foGz1fW6/X8988jfGGlnKl1GXCfSYWFsgEcVL0KRllWVmrcfZwUVSOSL1ceLKdcSmDlay+QnaZ8XiqVmn5TpzNg+h0WV7MI7NmHdt17EvXbz0St+gW9vrT2c1rSFX584QlufeolfEM7VzxGoxQMV08HbOyql8YBxc+17LM2/pAC4DV0Lqwp/TMbrD7BvRt2MbzTdKzU9TfhE9DVnei1Fwyv067mkpWaj4t71SqACCEaVqNd4Jefn8/IkSN59dVXyc7OZurUqQQEBLB8+XJ69+7NhQsXKu/k//t5+eWX+fvvv/Hx8WH8+PGMGzeOwsJCPvvsM8LCwjhw4EDlHQkhGkTqlRxyMwoVbfUdLG+PuY7X5U04qUrLuelQY9v7TsV5GQUZnEg+oWirbr5yCdOd/CrOW74ae46fFz9jEijbu7gy/blXGHLnrBqXfVNbWTFwxt3c/vLbOLt7KI7lZ2Wy6o0XuXL2TIV9pBjlnntYWBLP+LmeTz/PtZxrijbbbpPRWJc+R7VKT8/Ujaw7esWie1qqTTsX7ByVc1MyuyxE01FrwXJaWhppaWnoq1GOqCKvvfYaUVFRREREcPbsWX755Reio6N5//33uXHjBnPnzq1SP/b29uzevZu0tDQiIyP59ddfWbNmDRcuXOCZZ54hMzOT+fPnV96REKJBGAcVLu72uHk51Nv9dTo97248y3SrXcoDHUaCi7LiQnRSNLoyM652Vnb08epj0X1NFvlVUBHjRkIcK19/gfysTEW7Z7tA7n3zA9qH9bJoDOXx69SFu15/nzaBwYr2wrw8Vr3xUoWVMlKNgn5L60d38+iGi40yb33PlT3Kk2wdsQ5TlpG7zepvPtgcg6Yed0VUq1X4G219fUmCZSGajBoFy2vXrmXs2LE4Ozvj6emJp6cnLi4ujB07ljVr1ljcb2FhIR9//DEAn3zyCc7Opf9oLFq0iLCwMHbu3MnBgwcr7cva2ppBgwaZbLttZWXFq6++ir29PQcPHiQjI6OcHoQQDenSGWW+sn+X1vW6ZmLdsStkJsUyyOqkol3d+26Tc42DtfC24dhbW/ZVu/Git9SkHPRmtkpOu3qFVa+/SEGOcsbWJ7Qzt//7LVzbeFl0/8q4uHtyx+K3COyp/DBQmJfLqjdeMsltLmEys1zNxX0lrNXWDPAdoGgzCZYBVS/ln1Ow+iruaUdZddA0p7sumdRbPpNm9s9TCNH4WBQs6/V65s6dy6233sqWLVvIzc3Fzc0NNzc3cnNz2bJlC9OmTWP27NkWzTRHRkaSkZFBhw4d6N27t8nx2267DYB169ZZMnwDlUqFlZUVKpUK2zIrwoUQjYO2SMflMgujoH5TMDRaHUs2n2Wa0ayy3t4NOk1Utun1RF6JVLQN8h1k8b2NZ1yLCnVkpuQr2rLTUln52ovkpCs/ULTr0YsZz7+GvbNlgWhV2To4MvWJFwjqFa5oL8jN4bc3/01WSrKiPT9Ho9iZECyfWQbT57vnyh60xhumBPRH79FR0XSb1U4+2nqOgqKaba5SHcY/t/nZGpPNWYQQjZNFwfKHH37IihUr8PHx4bPPPiM9PZ3U1FRSU1PJyMjg888/x8fHh++++44PP/yw2v0fPXoUgD59zH99WdJ+7Ngxs8erQq/X8/bbb5OTk8OIESNwcKi/r3WFEFVz9UIGRQVlAhoVJl9n16VfD1wiPiXHJAVD1X062ChnjC9mXORqjnLTj0F+lgfLjq62JnmuaUmls7KawgLWvPsqmTeUebp+nbtxy5MvYGNfP4vHrG1tmfL48yYBc3ZqCr+9+W8KckvHbFwrW22lolVbR4vvbfx8MwszOZmi/AYAlQpVr7sUTZOsokjNyODH6OrtGVATrp4OJulDCadS6u3+QgjLWbRX7JdffomjoyO7du0iKChIcczFxYUHHniAMWPG0KNHD7788ksee+yxavVfsumJv7+/2eMl7fHx8dXq9+mnn+batWtkZmZy7NgxYmNj6dKlC19//XWl1xYUFFBQUDojkplZnBuo0WjQaDTVGkdLVfKc5HlVT0t+bvEnlTOTnv7OWNupKn0WtfHM8jVaPtxylr6qGALVyoC0qPvt6I363pm4U/G6rWNbAhwDajSG1t6OXL1Qmod841Imfl3c0Ov1/PXJf7gae05xfpv2wUxa9CyorSy6r8XPTaViwqNP8vtbi0kqs8AvOTGetUveYsqTz6NWW3EjUZnu1qqtAzqd1uLtsz1sPQhyDeJi5kVD267EXXRp1UV5YtfbsN76KqoyNZfHqffz8TYXpvXyxtHW8m3Tq/PM/Dq1IuN6aY3phFMphI2qWmWn5qYl/16zlDwzy9TG87LoN8TFixcZO3asSaBcVlBQEKNGjWLTpk3V7j87u/irqZK6zsacnIq/tsvKMt1ZqiKrVq0iNjbW8DosLIzvv/++wvdR4s033+Tll182ad++fXu54xTmbd68uaGH0CS1xOd2PcoRKC0rlm+dwoYNG6p8fU2e2fYrKq5lWbHQ+m9Fe5adD9uOXIWjynGsy1amhfkX+fPnn39afH+ALI0dUJoiduLAOa5ojpN6/BCpx5VrNqydXHAKH8jWHTupKUufm32PfthcTUKTWRoUJxw/zA9vvYZHr36knVS+nzx9RrX+PM3xKfDhIqXB8oZTG/BPNJ1oiXDphldWaaWS26z+Zk3OYJ7/ZjNj/GqeO1yVZ5aXYw2Uzi5fOZfO+nUbqOYGiM1KS/y9VlPyzKonNze3xn1YFCy3adOmSjm+NjY2eHo2nl2Kzp8/D0BycjIHDx7k+eefJzw8nK+++opZs2ZVeO2zzz7LokWLDK8zMzMJCAhgxIgReHh4VHClKKHRaNi8eTNjxozBxqZqO5GJlvvcCnI1fPuXsq7x0InhVUrDqOkzyy0s4uUlu7Anm5utlPV7HQfOY+LAmxVt+UX5vLrqVUXbzP4zGdNuTLXvXdYJx8vsSSwtk+lo1ZqufnrW/nRIcZ6tgyMzXnodD7+AGt2vNn7WMgYN4n+LnyavTMCcduoIA0aP4bSNEzmUzpR37xtC77E1G7PbFTf27Chd2HdZd5kho4fgYquslKE6mQerHzS8HqQ+iS/J7Lrhzcv3DMHVwbL3W51nVphXxDeH92JYyqNX0Sskol5TixqLlvp7rSbkmVkmJaXm6U4WBcu33nor/8feeYdHUa1//Dvb03vvPUBI6L03KSKCFVHBhlfFcvXeq1x7u+r1qj8UuwIqigIKgvTee0hCQkjvvWw2ZbN9fn8Esjkzm80m2YSFPZ/nyQPzzjlnZ2d3Z955z/e87/r16yGXy+HhYfpHXl9fj4MHD2LJEv6K8a64lv2is6eBlqurvl1celbu1tvbG7fccgvGjBmDwYMH44knnsC0adMQEtL5RVsqlUIq5ecoFYvF9EvbTeg56xn2dt6K8+XouD5YKBYgJN4LIrHlYbienrMNJ4tR36LFAsE5uDAdSjMzAgiH3gchZ8xz1eeg1htlWgJGgPHB43v9eXmHkJX/6ssqsffLDeh4YhhGgFufexH+nDRuvaE33zXvoGDc9sK/semtf8OgN8or9n/9GaRu9wMwviefENden6MxQWMgFUrbz7+e1eNC7QXMDOM8qAxaAOz6J6Buc9YFDIuFwuP4vPV2/HC6BM/PiuvVcVhyzsRiMXzDXVFVYHxgqMhuRMTgvslYciNgb9c1a0DPWfewxrnq0QK/d955B5GRkZg2bRoOHjzI23/o0CHMnDkTUVFR+M9//tPt8UNDQwEApaWmU/tcs/e28p6bmxvmz5+P1tZWOq1BodgYJZwS14HRbt1ylHtKs1qHr4+0ybXuFJISDEROAVz5JZ25WTASvBPgJnXr9bF0LAPNsnq0NmyDqpmUn01csoy3uO56Exw/CFOXLidsWrUKLXXbwLK6dltPC5J0RCaSYbgf+f5PlJ3gNxQ7AAmLCNOdwiMAWHx/vAB1zWp+nz6An0KO5lumUGwdi5zladOmEX8LFiyARCJBamoqZs6cCR8fH4wYMQIjRoyAr68vZsyYgZSUFEgkEtx+++3dPqikpCQAQHJyssn91+yJiYndHpvLNZlITU1Nr8eiUCjWg1uMJDi+f1LG/XCyEHKlFoGoxXgBJ7PCENMzZdz8vr1JGdcRR1cJpE5tE4C61pNg9WS2jeiRYzDi1oVWeS1rkzRrLgZNmUHYWH01dK1tjqxIIrBauedxgeOI7RPlJ0ynLR1yP7EZIajCcCYbLRo9vjqSx2/fB4QMIGdja0ua0dqs6aQ1hUKxBSySYRw+fLjTfSzLoq6uzqQm5NSpUz0qHjB+/Hi4ubkhLy8PKSkpGDJkCLF/8+bNAID58+d3e2wuR460LYaJiorq9VgUyo0AazBAlZ4ObXn/lvztDk0tQGMNuS7CU34FjbszLeqv1+nhnJaGZqEIQpHl0ehWjR6pf6ZjgkaP2YJzaBZ2kF6JHYEiCVC2m+jToGqA9+lsdFydMUEmQGMl2a6nuMlEqGwoh159jrC7urhhXMxgNO3ZY5XXAXp+3jpjVEgUytwuoEFhnCXQqy9AIA6Fp1sYmvZa59jHNrMYk9mxIl85cpkf4efkRzZkWUAeATRVtJseMeyAg16F3E1pKFHnwc2xe1O23T1nDgZAJBRDpzfeG3N+PYzw4P6rKNhdRL5+cEhKBCO045WIFLvGIme5oKCg60ZWRCKRYMWKFXj33Xfx1FNPYe/eve0ZMD7++GOkpaVh8uTJGD7cOPW2evVqrF69GgsXLsR7773Xbt+xYwc8PDwwbhwZeVAqlXj33Xdx5MgR+Pv7Y/bs2f3z5iiU60zl66+jYdPm630YZikLGAfEGaO4Yk0TlG+sRCssz1oQCKDy51+6/dp/73gc4ESzj7xoss/zXMPWVSjr9iubRhB7JzROZNSTMbAYfCEdtcdNz771hp6et85IkElwMiYIBoFxIlPbsgeCugSU/bnBaq/D/Qx0W9838xkYP9cIlONl/AQAaD4N9KRMSHfPmdvgJ1DnldC+nbvtDMTZ1jvnfYHLnNkI/uST630YFMp1wSJnubfa4J7wyiuvYP/+/Th58iRiYmIwceJEFBUV4cyZM/Dx8cGaNWuI9rW1tcjKykJFRQVhP3fuHN58800EBQVhyJAhcHNzQ2VlJVJSUlBfXw83Nzds3LiRKKlNodys6Bsb0bD59+t9GF0i9yDz5HrIs9pz5NobDUwWYCAXO8dW1sOt9caYundVaRBfUYfLQT5GI6tEgyAdLID+K1xuO3jIrxDOstwz3ubPRdOu3dC+9BLEfn5dN6ZQbjJ6tMCvP5DJZDh06BBeffVVODo6YuvWrSgqKsKyZcuQnJyMyEjLVn4vWrQIzz//PAIDA3Hu3Dls3LgR586dQ1hYGFauXInMzExMnDixj98NhWIb6BsbiUwKtggLBvUesYTNU36lk9Y3NxVuTmiQkY6yQBiIiBpFJz1sk7DaRoiEZLahZrESpR49y2h0o8P9PqtkXmh18Omkte2gl8u7bkSh3IT0vGxRBxoaGtDU1GR6QQWM2S26i4ODA9566y289dZbXbZ944038MYbb/DsiYmJ+Oijj3r0+hTKTYeBr4uUREQAAtt5blaIfaETkzM9fq6tkDhZvq6AZVk0NzfD2dnZonUTBgOLwroWGFgWDFhEMJUQoMO5cvIBHPn51Ft1rahoJrXfYa5hEAp6f2lVMUA6r96RBGKnuTDEtkCmt77D3N3zZilaRgqh8zzoGn8CWGPJ68wQXwR6+cLBCs9vBtaAQkUh0GEGws/JH05iExk3VI1AE/m5FbL+0EEIBgzCvR0htPA30ZNz5gFAom+BRmg8tsaYCXBvSbOof3+hyeMsejRx/aBQ7IEeX9ErKyvxyiuvYNu2bWYTPjMMA51O1+l+CoXSj5h4oI34cysEFhQZ6i8u7C4EthoLcbj7OSLhq+5pW7VaLXbu3Im5c+dalGPzo71Z+OxgW9GiBYLjWCX5wriTEQB/P2oyZdwHZz/A+sz17dsJXgnYcKt1dLjbP34P2jNkCjSx4zQwQlc4v7MKEYnWL/jU3fNmKRV5CjAfXoDYaSa0zVvb7ToGyJk4Erf/6zWrOOcf7H0UZyqMRWQWRk/EW+NNBFu0rcD/4gC18YFji3YKVuvbMos8OiECr9w60KLX7Ok5K1iTgeyzxjLqqumLEfV491Ot9iWZgxOBDqWCOwuIUSg3Oz0KJ1VUVGDEiBFYs2YNpFIpfHx8wLIsxowZA19f3/Yf1NixY6nEgUKxIVgTkSFrRhCtATe/ckgfVzeTt2iw5rhxETM/t/JUk44yy7I4UkqWlp4UMskqx5Rz9iSyOY6yQBwLgaRNyy2vaDHVzWa5drxCcWT7e7hGfvI5XD7Kz9ffEyYHTya2j5YehYE1EQ01mXP5KK5FpdefKUJNU9/mXebmWy7LksNgsC1nlHdtsLHjo1D6ix4XJSkvL8dbb72FkpISzJkzBwzD4MSJE6ioqMDhw4cRHx8PhmGwa9cuax8zhULpKaYiQzYkwdBq9KjIayBswQP6Nr/yt8fy0aJpqzRnOrfyfSb7FTQWoKSphLBNCZ7S6+NRtTTjwJqvCJtQ7NgWVb7qvDRUma5uaqvUVxqde7HDFIg4pagP/fANmuW9L87BPf91qjpcrrtsuvFQMudyuKAKI5gsAIBKa8A3R/s27zK3xLVaqUNNcVMnra8T3GuDqQcPCsUO6NFdcvfu3YiIiMArr7xicv+kSZOwd+9eXLx4EW+//XavDpBCoVgRU86yDUWWK3IbYNB1KOUsYBAU13eR5QalBj+cLGzfXig8DgHT4RxJ3YD4eSb7Hi0hI9C+Dr6I94zv9TEdXb8GLRzHMW78XWAERgGzvPLGcpblFcbjZQQOiBlzD7Ff3dKCg5wHhJ4Q4hqCCLcIwsaN/rcTNBzwJheSdpxV+Ol030aXnT1k8PAnRencQjzXHe61gcowKHZKj5zlsrIyolCI8GqicrXaeGEJCgrC1KlTsXHjxt4dIYVCsR6mFujYkLPMlWD4hbtA6mCVdcgmWXOisD2qDLB8CUbCorYpexOYkmD0VtJSnJ6KSwf3EraIIcMRP34KYZNXttxQ+lF5JSkbiR41BvHjSclEztmTyLtwttevxZViHCnpxFlmGN6swa3C03CACkA/RZc5syalNlb6mvt9NiXjolDsgR45y66ursS2u7s7gDYnuiMymYxno1Ao1w+WqzlkGJvSLPNKXPehBKNRpcXaE0at8jAmBxECspx0Z+WtFWoFLlZfJGxcJ6276DQa7Pt2NWETyxww47Gn4BlAZnRQK3VQNWtxI6BV69FUryJsHv5OmPbQ43BwIe8lB9Z8Ca2KbNtdJgWTuvHM+kxUtVSZbpx4T9sCzqs4MyrMFhgrJf50ugi1zX0XXebq8SvyFNC2P7zZADwZxo3zgEahWJMeOcuhoaEoLi5u305IaEuuvnPnznabUqnEiRMnEBAQ0MtDpFAo1oPvLNsKykYN6krJ+mkh8X3nLP9wohBNKmOmnrtEnAikVwwQPMJk35PlJ6FnjU6NVCjF6IDRvTqec9t/R0MlWVRp4n1L4ertCxcvBwhE5Gd1o0gxGqqU5NeOATz8HOHg4orJDzxCtG2qrcHJzb2rZDfEdwhcOJroo2VHTTd2DQSiphGmu0TGtm3R5XxuL6sRFOsBRmD8XA06FhU5DX32et2GyjAoFAA9dJanTZuGtLQ01NTUAABuu+02ODk54Z///CdeeuklfPbZZ5g6dSqqqqowZ84cqx4whULpBdxpVBta3FeaRUaVxVIh/CJdO2ndO5rVOnzfIaosgxq3izkSgCH3dfowcbjkMLE9yn8UHESm5RqW0FBVibNbNhG2gNh4DJk5FwAgEDBw93Xk9LkxnOV6TuYOVy8ZRJI26d7ASdMQMiiR2H9hx1bUFBf2+PXEAjEmBE4gbFx9OQFHijFOkIEg1LRv/3iqsM+iyxIHEfzCye94yRUbKvzBvT5QGQbFTunRnXLJkiVYtGgRLl9uW2Xs6emJr7/+GizL4r///S+ee+45nDt3DgMHDsS7775r1QOmUCg9h6s5tC0JBukkBMW6QyjsG2d+/ekiNCiNMoZZgvNwMHRw6hgBkHSvyb46gw7Hy44Ttt5IMFiWxaF1X0OnNZavZhgBZjzyJJgOzgp3MRhXB2yrcI/To4OkhGEYzHj0SQhFRl06azBg37ere6WP5abwO11xGipdJ/KOuHmAzI0w3Skyfr4qrQHf9mF0OXgAKcWwpUV+fM0yjSxT7JMe3YmSkpKwYcMGTJ5svEEsXrwY2dnZ+OKLL/DOO+9g06ZNSE5Ohpubm5mRKBRKv8K919mIs8yyLEr7Sa/cqtHju2Ok8/O462myUSe5lQEgpToFjZpGwsbVyXaHvPNnkJ98jrANmT0PvuGRhM3dj+Ms3yCRZW4E3IPzPjwDgzFywZ2ErSL7Ci4d2tfj15wQOAGCDlpklV6Fs5WdLB4Uy4AE8vUfdDiBjj+WH0/1nXaZm2+5rrQZykZNJ637GapZplAA9NBZ7ozQ0FD87W9/w8qVK3HHHXdYtQIUhUKxAtw8qTYiw2ioUqJZTjojfaVX/uVsMWqbjc5IAOowUJVMNuoktzIAHCwhC2jEesQiwLlnazO0ahUO/fANYXNy98D4u+/ntfXwJxf53VCa5Q5wnX4AGH373XD3J8/h8Q0/QNXczGtrCe4ydwzxGULYDhabKXzCWcjppS3HaGF2+3arVt9n0WW/CFeIpULCVpZlI1IMnmaZyjAo9onV7pRyuRxyufyGSmdEodgdNqpZ5kownNwk8AjgO1W9RaXV4+sjZDqw5/2SwcCy3Mosy+JA0QHCNi10msm2lnBmy0Y01lQTtsn3PwypoxOvLVeG0VTbCp3WhjInmIA1sGiobiVs3PcBACKJBNMffoKwtTY14uSmn3v82tzP5VDJIegNnZyvoGGAdxxhesGHjPb/dLoI9S3Wj/gKhQIExroTNpuRYgho6jgKBeils7xt2zbMmjULzs7O8Pb2hre3N1xcXDBr1iz8+eef1jpGCoViLTgPs7YhwuA7ByEDPPtET73xfAmqiUITLOazh8lGCQs7za18pf4KylvKCduM0Bk9Opb68lKc2/YHYQsZOBjxE6aYbM+NyLIsoOA4orZGk1wFvZZ0sNx8TT8EhScNQ/TIsYQtZc8O1BQVmGzfFVxnuV5Vz0v3146JnMsjWg7DmTHqnJUaPb4/3jfRZe4sSklmvU0EnhiGK8O4PsdBoVxveuQssyyLhx9+GAsXLsT+/fuhVCrh5uYGNzc3KJVK7N+/H4sWLcKyZcts4gdPoVDa4C3QsYHIsl5vQFk2GVnuC72yRmfAV4fJqPKykCrIGjnOWCe5lQFgf/F+YjvIOQixHrGdtO4clmVxcO3XMOiNqesEQiGmP/JEpw8JEpkITu5SwmbrGTEUVaQzL5EJ4egq6bT9lAcfhUhs3M+yBhxc+3WP7iMhLiGI8yCjxQeKD3TSGrycywKtEv8OzyGa/HCyCA1K60eXuYv8muVq23gQojIMCgVAD53lVatWYd26dQgICMCXX36JhoYG1NfXo76+HgqFAl999RUCAgLw008/YdWqVdY+ZgqF0lNsULNcXdAIrYqcHg/mFGuwBr8nl6JcQWZEeMKds+jLKxoIHtnpGFzd64zQGT2KgOcnn0VRGhnlHDZ3AbyCQ83242fEsG1nmbsI0d3P0ez5cvP1w8gFdxC20sx0ZJ00k/rNDNNDpxPbB4oPdO54uwYAUWT7hYIjhL/YrNZhzYnCHh2LOTwDnODoRj5E2IQUg6aOo1AA9NBZ/uabb+Do6Ihjx47h8ccfJyr6ubi4YPny5Th27BgcHBzwzTffmBmJQqH0K1xHwQayYXCdAq8gJzi5STtp3TO0egO+OJxL2CaEOcKveCfZ0Exu5UJFIXIbyDGmh0032dYcep0WR376nrA5e3ph7J2Lu+zLz4hh2+njLFncx2Xkgjvh6uNH2I6sXwONqvuRVu7nU9FSgcv1lzvvwJFiOJSdxNIB5Pdh7YkCKFqtWz2RYRieFKPUBvIt81LH0Zliip3SI2e5oKAA06dPR0RERKdtIiIiMH36dBQU9ExvRqFQrI8t5lnmOgV9IcH4M6UcJfWks/VqZA6g7pgCjgESTedWBvhT+N4O3kjySer2sVzctR3yClL3POm+ZZDIui5qwossV9h2ZLmhuvvOslgixZQHycp+zfV1OLNlY7dfP8Y9BiEuIYSNu0CTIG4uL+fyCq/zxPNTk0qHH04WdvtYuoIrxSjNksOgv86RXO71geZZptgpPXKWfXx8IJF0rju7hlgshre3d09egkKh9AU2plnWtOpQWUDmLObmne0tegOLzw+REeEhIe6ILd9CNoyeDrgFdToO11meGjKVyOVrCUpFA079/ithC4iJ63RRHxeus6moVtp0tK+hsvvOMgBEjxyLsMShhO389i2QV5R16/UZhuEtwDSrWxbLgMF3ESbvnM2YN4iMdH9/vABNKutGl7mRZU2rDtXFTVZ9jW7Dy7NMZRgU+6RHd8qFCxfi4MGDkMs7nyaqr6/HwYMHcfvtt/f02CgUitWxLRlGWbacWHQoEDEIjHa36mv8lVaOglpSrvDiSCGYopNkw6EPdDpGZUslLtVeImw9yYJx4rf10LSSDuTUZcstjvBzS15rVHq0NlnXabMWOo0eTXJSI26ps8wwDKYuWw6B0Jh/2KDX4cj6td0+Dm5WjHxFPvIVZrJacHNsNxThHwNIqZCiVYsfTxV1+1jM4eQuJaobAkBp5nWWYgi4C/xs98GMQulLeuQsv/POO4iMjMS0adNw8CA/0fuhQ4cwc+ZMREVF4T//+U+vD5JCoVgJXp7l6+ssc/MrB0S68Qo09AaDgcXqg2RUOSHIFWMUu8iGjl5tU/CdwF3Y5yJxwUj/zhcCmqK6MB9pB/cQtoETpyIgOq6THnycPWUQiMjPTFFtm1IMRU0r79mM6+ybwysoBEPn3EbY8s6fRtGllG4dR6JPInwcfAib2QIlgcMAn3jCFF6yFbdwosvfHctHi1oHaxLCWdh6vRf5MaB5likUwEJnedq0acTfggULIJFIkJqaipkzZ8LHxwcjRozAiBEj4OvrixkzZiAlJQUSiYRGlikUG4KbOo6XR7WfKb3StyWu91yuQk41WQXumSkRYFI3kA2TFgOizqVlXOdqcvBkiIWWVyhlWbatUl+HyJxIKsWE+5ZaPAYACAQM3LxJbTO36IetwM3U4ewh7faD0Ng77oWDK6khPvzjdzB0VlzEBAJGwIsum9Utm8i5jIyteHYiWf5crtRi/WnrRpe5EqTKfAW06utYeIaXDYNGlin2iciSRocPH+50H8uyqKurQ11dHW/fqVOnbGIBEYVCuYoNZcNolqt4DpU19cosC3x5hFxgHO/vghmiVKC5imxsRoIhV8lxvuo8YeuuBCPn7EmUXk4nbKMX3AUXz+6v6XDzdSTOm61GlnuyuI+L1NEJE+55APu+Xd1uqy0uRPrBfUicMdvicaaHTsdvWb+1b6fXpaO8uRyBzoGmOyTeA+x/w6jR1bZgYMNhzBgQg/2ZxoqL3xzNx+IRnevcu0tgrDsYAdP+UGvQsyjPbUDYIC+rvUa34K1poM4yxT6xyFmmGS0olJsEG8qzzJ1iljqK4BPqYrXx0+UMrlSSC6RWTIuGIOUFsmHwSMCXnHbvyL6ifdCzxuieTCjDuKBxFh+HTqPBkZ/WEDZXH18Mn7/Q4jE64u57Y0SWeWnjuiHB6EjCtJlI2fMXaooL223Hf/sJceMmmiwLbooR/iPgKnFFo8a4mHRv4V4sS1hmuoOLPxA9A8jZa7Sl/IKnp68nnOW6Fg1+PV8KPxND9ASJTAT/CFdU5CnabSWZ9dfPWeY+S1MZBsVOsehOGRYW1qs/CoViG9hS6jiuXjk4zgMCK2moWZbFnlLy8hbl44Q5YQyQTeqGzUWVAWBPIdl+cshkOIi6TvN2jQs7tqKxhoxkT1ryEMSSnuWS5paLVtTYaGS5BzmWTSEQCDFl6WOErbVRgdN//NZJDz5igRgzwsjZgN2Fu8134koxCo8hybkBU+JI/fO3xwqgsaJSgitFup6L/LgyLV4FUArFTrj+5bsoFEr/YSMyDNbA9qle+WhOLUpayPe2Ylo0hGkbgA5RYoidgIRFnY5To6zBucpzhG12uOXT/83yel5+4KD4QYgdM8HiMbiYiizbWvo4lmWt5iwDQGhCEqJGjCFsyTu3QV5Z3kkPPreE30JsZ9RloLixuPMOcXMBmTtpS9mAp6fFEKaaZg1OV1vvd8Rd5FdX1gxlo/VLbFsEL3WcbX3PKJT+olfOclVVFd577z3MnTsXSUlJSEpKwty5c/H++++jqqqq6wEoFEr/wr3ZXScZRl15My/lmbX0yizL4vPDZGqwMC9HzB8cAFxcTzZOWAhIO5d+7C3aC7aDTtNR5IgJQZY7usc3/AitukP6NIbB1KWP9Sqiz40s69T66+dMdYKqRQu1kswU0RtnGQAmP/AwBEKjctCg1+Ho+jVmepCM8h8FTxn5HePOGhCIpLycy0j5BcNDXDExhtSa7y8TQK21TnjZN8IVYhm5EJL7YNlvcL+nNM8yxU7p8Z3y999/R2xsLF555RXs3r0bly5dwqVLl7B79268/PLLiIuLw++//27NY6VQKL3FRlLHlVwmp5ZdvWVw87Fc2mCOk3l1uFiiIGxPTY2GqPQ0UJ9HNh76oNmxuM7UtNBpkIlkFh1HZV4OMo7sJ2wJU2bALzLaov6d4ewuhVBMXrptbZGfgqOjFggZuHj2roS5h38ghs0lU8nlnjuN4vRUi/qLBCLMDJtJ2LotxVAUA3mH8Mx0Mrqs0DLYfNHyKLc5hEIBgmI5KeSuV+lrAU0dR6EAPXSWz58/j8WLF6OlpQULFy7Eli1bcPHiRaSkpGDr1q1YtGgRmpubcd999+H8+fNdD0ihUPoF7nQ9N49qf1HShxKMTw/kkGN7OGDh0CDg4k9kQ+9YIGRUp+NUtlTiYvVFwmapBKMtVdy3hE3i4IAJ95p3zi2BETC8BwtbW+SnqCGPx9XbAQJh72cxxiy6h59K7odvLU4lx5ViZMuzkd9gpkBJ4FDAP5G0XViLkeGeGBtJLrr7+mgBNDrrOJPBHClGaWb9dZHa8FJLUhUGxU7p0dXrvffeg16vx6ZNm7B582YsWLAASUlJSExMxG233YZNmzZh06ZN0Gq1eP/99619zBQKpafYQLlrnVaPipwGwsYt9dtTzuTX4UwB6Yg/MSUKYm0TkLGVbDz0AbOabW5U2UXignGBlmXByDp5FOVZlwnb6IX3wMndo5Me3YObWcL2Isvk8Vhr1kDq6ITxd99P2GqKC5F+aJ9F/Yf5DuMVKDEbXWYYYPgy0pa1C2iq5EWXKxQq/J5catFxdAVXktQsV/M04P0C9/dBI8sUO6VHd8rjx49j3LhxWLiw89RHCxcuxPjx43Hs2LEeHxyFQrEyNpA6rjJPAZ22w3Ew/EhaT/mMU63P31WKO4cHA5c2A7oO0U6BqK0QiRl2F5BO1PTQ6RYVItGqVTj68zrC5ubnj2FzF3TZ11LcbDx9HDeybC1nGQAGT5sF79Bwwnb815+gVraY7tABoUCIWeGzCNvuwt3mo7aD7wLEHR5OWD1wcT3GRHpiVDjp1H5+KBdafe8dSg9/Rzi5kUVySq+HFIO3wI86yxT7pEd3SoVCgdDQ0C7bhYaGQqFQdNmOQqH0E1wZxnXQLHNTxvmGukDmZHk1vM5ILpbjeG4tYXtsYgSkIiFfghE7G3AmI4zEMTaVIL2OLCIyJ3yORcdxfvsWNNXVELbJ9z8Mkbj37/EaNh9Z5jrLvtZzlgVCIaY8+Chha21U8LKOdAZXSlOgKEC2PLvzDjJXfsaU5B/BsCyenk7qz0vlrdhyscyi4zAHwzA8adL1KH3NXYhqa1lXKJT+okfOsr+/Py5evNhlu5SUFPj7+/fkJSgUSh/AX6BzPZzlvtErf8bRKruIWdw9PAiovASUc65Xw8yXmeZKMDykHhgV0Lm++RpNdbU4u20zYQsZlIjokWO77NsduM6norrVpnLgchf4ufn0LhMGl7DBQxA1YjRhS975JxoqK7rsm+SThACnAMLW5UK/4Q+R2w1FQMFhTIj2xpAQUkP9+aFc6KwQXeZKMcqy5DBYYdxuwZNh2M53jELpT3rkLN9yyy3IysrCv//9b+j1/IUVLMvilVdewZUrVzB7tuU5SSkUSh9znTXLrc0a1JSQVfWskTLuUqkCh7LIaO70QANkYiFwfi3Z2CUQiJ5udrydBTuJ7RlhMyASdF3w9Ngv66BTq9u3GUbQ61RxpuBGlnVaA1oU6k5a9y+qFi1ULWRaQGvKMK4x+X4ylZxep8PRn9ea6dEGwzC8hX67CnaZj5oGDQf8EkjbhXVgGAYrpkQS5qI6Jbal9j4zBleapFHpUV3U1EnrPoLKMCgUAD10ll999VV4enrigw8+QHR0NF588UV8+eWX+PLLL/HSSy8hOjoa7733Hry8vPDKK69Y+5gpFEqPub7OcukVOXEIIokAAZFunXewkM8OklFlD0cxxvmxgKYZSONMzw97ABCQeWw7klWfhRw5Od6ciK4lGOXZV5B5/DBhGzx9FnzCIrrs210c3SQQScn3YCu65cZa8jgYAQMXL8vS7XUHj4AgDJ19K2HLOXsSJRlpXfadHUEGccqay3iZTwhMLfS7sgNorsakGG+EOJG/q9UHc6HvZRTWyU0Kz0CynHe/SzG4Mi0qw6DYKT26UwYHB+PgwYMYNGgQioqK8OGHH2LFihVYsWIF/vvf/6KgoAAJCQk4ePAggoODrX3MFAqlp3BlGP2swii5TN7sg2I9eDmDu0tmRSP2XiaLID0yPhxSIcBk/AFoOkTjGAEwzHz6tu1524ltfyd/DPcbbrYPazDg0A/fEDapoxPG32O+lHZPYRh++jhb0S1zJRgunlIIRX3zUDbmjnshc3ElbId+/K7LVHIDPQciwo18iNmev72T1lcZfBfQscy5QQek/AyGYTA7mPxd5de24K+03keXuVli+nuRHze1JM2zTLFXenwFGzx4MNLS0nDw4EG8+eabWL58OZYvX44333wTBw8eRGpqKhISEroeiEKh9BtcXSsvj2pfvjbL8iJj1pBgrOZkwHBzEGPJ6BAAgCD5B7JxzCzArfMHeJ1Bhx0FOwjbrZG3QtDFeco8fhiVueQisTF33AtH195HzTvDVNlrW0BRw0kb52tdvXJHZE7O/FRyhfnIOHzAbD+GYXBbFFngZE/BHqj1ZqQsDu7AIE4GqOQfAdaAQR4sBviTlSA/O5gLQy+jy8EDSClGZb4CGpWuk9Z9AHfmiWqWKXZKj+6UixYtwlNPPQUAmDJlCl599dV2Gcarr76KKVOmWPMYKRSKtbiO5a7llUo0y0lnJGRg75zlnKom7EwnF3U9MiECzlIR3JX5EFRyqruNeNjseGcqzqC2lcyoMT9yvtk+GlUrjv2yjrCZkghYG64TaquR5b7QK3ckcfot8A4JI2zHf/0RaqX58zEvYh6x3aRtwpGSI+ZfjCvFqM8HU3QcDAOsmEpql3Orm7ErvdL8eF0QGOMOQQcphEHPopyTo7xP4WmWqbNMsU96dKfcuXMn6urqrH0sFAqlr+Eu0LHywjNzcCUYzh5SePj3Luq4+lAucf92kYmwdFw4ACCs9hDZ2C0EiJ5hdrxteduI7YFeAxHpHtlJ6zbO/bkZzXLyvU1+4BEIRdZLFWcK240s96+zLBAKMZmTSk6paMCZLb+Z7RfgHICR/iMJG1eCwyNkFOAzgHz9iz8CAGbE+yKeF13O6VV0WSITwS+SlJn0qxSDe3mgC/wodkqPnOWIiAi0tHSdAJ5CodgWXM2htbM0mMOUBKM3r59f04ztnKwDD40Lh5uDGFA1Ilh+muwwbKnZhX0t2hYcLD5I2LhT9Vwaa6pxfvsWwhaWOBSRw0Z20sN6cCPLjTW2kT6ugZdjue9kGNcITxyKyOFkar/knX9CXmleN8ydNThedhz1KjOL6Ews9GOu7IBE2wiBgMHT08iqflcqm3h6+u7ClSr15yI/rkzLFr5fFMr1oEfO8uLFi3HkyBFUVvZuiolCofQz10mGodcaUJZNRsR6q1f+/FAeIaF0kgjx8IS2RVuC9M0QGTpIPhghMPR+mGN/0X6o9Kr2bSEj5BWw4HLk57XQaTXGlxH0Tao4U3DTx+l1BjTJVZ207h80Kh1aGzWEra8jy9eYfP8jvFRyh3/8zmyfmWEzIRVK27d1rI5XuZFH4t2AyJjdgzFoEVJ/HAAwJ8EfMb7ORPNPD+T0qpgH93dSX97Sf2kCqQyDQgHQQ2d55cqVmDhxIiZPnowtW7ZAq9V23YlCoVx/uJGhfoosV+YroNNwSlxzFi91h+I6JbamkJXSHhwXDndHCcCyEFzkLOyLmwO4koUouHCn4McHjYeXg1en7Usz05F96hhhS5o5F17BXVc3tQYOLmKIZWSknKsX7m+4EgwwgKu39dPGmcIzMAjD5pIzAfkXzqIw5UKnfZwlzpgWMo2wdSnFcPQEBpKly8PrDrd97wQMVkwjq/pdrmjEgczqrt9AJ/iGufA+536TYnCvD1SGQbFTeuQsx8XFISMjA7m5ubjzzjvh4OCAwMBAREZG8v6ioqKsfcwUCqWncG92/VTuupgzdewT4gIHZ0mPx/viMJnH1kEsxKNXo8ooPQ+mOoPs0MXCvsqWSpytPEvY5kd1vrCvLVXct4RN5uSMcXfdZ8HRWweGYWyu7DXXWXf2kEIk7lz6Ym3GLLoHjm7uhO3QD99Cr+s8gwT3c06vS0eBosD8C3GkGM7qSjCFRwEAtyYGItKbzI/86cGeR5cFQgGCYskHy9L+kmIIaOo4CgXoobNcWFiI4uJisCwLlmVhMBhQWVmJwsJC3l9BQRcXHQqF0m9wb9j9lTqOu7ivN1kwSuVKbL5QStjuHxMKL+er0+nn15AdPMKByKlmx/wr/y+wHaqlOIudMSV4Sqft04/sR3VBHmEbe9d9cODk/O1ruGWvr/ciP17aOCuXue4KqaMTJiwm82jXl5ciZc+OTnoAYwPHwktGziBwF3ryCB0L+MQTJsH57wEAQhPR5bRSBY5kkxUmuwNPt3xF3itph6Xwrg9UhUGxU3p0pzQYDN36o1AoNsJ1kGG0NvFLXIf2Qq/85eE86Dq8D6lIgMcmXc1Y0SoHMv4gOwxbalabzbIs/sgh+8wKnwWZyLR8QK1U4viGHwmbZ1AIkmbO7ca7sA68wiRcGUQ/w8uE4ds/euWOJEyeAb9IcqHdqc2/QNmoMNleJBBhbiT52f2Z+yd0BjP5jBkGGElm4GBydgOKtoe425ICEeZFPiis6oV2OYQjWWppUENe2Q+zCNzrA72fU+yU/q11S6FQri/cm10/LPDjlbiWCuHfwxLXFYpWbDpPRpUXjwqFr8tVx/biz4DOuMiNFYi7XNh3vuo8SppKCNvC6IWdtAbObN0IpaKBsE158FEIRSLTHfoQrrPMLTXd3/R3jmVTMAIBpj20nLCplS04/uuPnfQAbo++ndiuaa3B8bLj5l8o8R5AYlzMx7AG4PxaAIBIKMBTU8no8sXiBpzI7VnKVXc/Rzi5Swlb6ZV+kGLwFvhRZ5lin3TrTrlz504sX74cc+bMwe23347XXnuNyiwolBsKjgyjHzTLXL1yUKx7j0tcf30kHxq98YYtEQrwt8lX10UYDMA5UkfMxs8DnH3Njvl7zu/EdpRbFJJ8kky2baiqRPKOrYQtYugIRAwxXw67r3D15jvL/TE93xlczbR7P8swrhEYOwADJkwhbJcO7kUVRzpzjViPWCR6JxI27veCh8wVSLqXtCX/AOjaMlUsHBqEYA/y81l1ILtHnw/DMLzocklm3y/y414frud3i0K5nlh8x1qyZAnmz5+P77//Hnv27MG2bdvw7rvvYtCgQdi2rQt9F4VCsQn4C3T61llmWZa3GKmnKeOqG1XYcLaYsN0zMgT+blejyrn7AXkhsd8w4jGzYyrUCuwr3EfYFsUs6jT12+EfycViAqEQUzgFMfoTrrOs0xig5KRu6y+0aj1aFJy0cddBhnGNiUuWQSztIKVhWRxa93WnDt+imEXE9rHSY6hWdpHFgiPFQEsNkNmWTUNsIrp8rlCO0/k9iwgHx5O/m7JsOfT6vo70cmUY1Fmm2CcWOcvff/89NmzYAKFQiGXLluHTTz/Fu+++izFjxkClUuHBBx+EQmFaD0ahUGwI7s2uj2UYpkpch/Zwcd83R/Oh1hmdA7GQwd+mdMi2c/Ybon2DQyjYYLJQBZcd+TugMRgdPJFA1GkWjPyL55B3/gxhG3LLrfAMDLb0LVgdZ3cpBCLSoWmsvT65lk1JQLjOfH/i4umN0QvvJmxlVy4j6+RRk+1nR8yGg8h4vHpW3/VCP98BMISOI21njbMbdwwLRqAbqX3/9ECOBUfPh/uQqVXpUV3Y1ElrK8G9PlDNMsVOsehO+cMPP0AgEGDXrl34/vvvsWLFCqxcuRInTpzA0qVL0dTUhD/++KPrgSgUyvWln1PHmSpx7e7X/an52mY1fj5DRpXvHB6MIPerzk1dHpBLRogLfGaaXcDIsixvqn166HR4yPj5n3UaDQ6tJZ1xRzd3jL1zcXfehtVhBAxcvThSjJrrkz6Oq1d2cpNALO2/tHGmGD7vdrj5+hG2Iz+vhVbFf6BwEjthTsQcwvZHzh8wdKHTNYx4hDSUnAYqLwEAJCIBnuBEl0/l1+FsQfejy46uEngFkSnp+ly3zLs+0MgyxT6xyFm+dOkSxowZg+nTp/P2/fvf/wbLsrh06ZLVD661tRWvvfYaYmNjIZPJEBgYiIcffhhlZWVdd75KQ0MDfvnlFyxevBgRERGQSCRwcXHB6NGjsWrVKlpQhWJfcKeg+zgbhrVKXH93rACtWn37tlDA4InJHZyQc98T7VmZO8o8xpgd83LdZWTLswkbdyr+Guf/2oKGqgrCNvG+ZZA5OZts35/wMmJcp8hyAzdtXD+Uue4KkUSCyQ+QzmxzXS3Obttssj338y9pKsH5yvNmX4ONnYtWMecB65yxcuDdI4Lh70pGlz872LPoMleKwX0YtTbc3yrNs0yxVyxylhsbGzstLnLN3tjYaL2jAqBSqTBt2jS8/fbbaG5uxoIFCxASEoK1a9di6NChyM/Pt2ic//3vf1iyZAl+++03eHh4YNGiRRg1ahRSU1Px3HPPYdq0aVAqr28ifwqlv2AN/ZdnWafRoyyLU+K6BxIMeYsGP50qJGwLhwYh9FpqLk0LcHE9sd8wZAn0AjJ7ABduVDnQKRBjAvgOdmNNNc5s2UjYAmLjMWjSNF7b64GpRX7XA17auOuQCcMU0SPHIjSBXLB5btvvaKis4LVN9E5EtDsZCe5yoZ9QjCKvKaQtbSPQ2gAAkIqE+NvkSGL3sZxaXCjq/gI9rhSjsqARamUfBny41weqWabYKRbdKVmWhVBoejpNcFXTZO18yu+88w5Onz6NsWPHIjs7G7/99hvOnDmDjz76CDU1NXj4YfMVua7h5OSEf/3rXygsLERycjJ+/fVXHDhwAJcuXUJoaCiOHz+Od955x6rHTqHYLP2YOq4spwE6rfH1GKZni/vWnChAi8YYVRYwIBdOpW0E1B3XTDAwDHvI7JhKrRI7C3YStoUxCyEw8fBw+KfvoNN00F0zDKY/9Dcw/ZB2zxK45aQbr1OuZV7auOu4uK8jDMNg6tLHiM9Lr9XioInFfgzD8KLL+4v2Q6E2vyanyHsKWEGH1IFaJZC6oX3z3lGh8HEhH956El0OjHWHUGR8H6yB7dusGLzUcdRZptgntnG156DRaLB69WoAwOeffw5nZ+NU5/PPP4/ExEQcOXIEFy5c6HKslStX4oMPPkBoaChhj4mJwfvvvw8A2LBhg6muFMpNSP/JMIrTyZyyfhFukDmJuzWGolWLdScKCdttSYGIuFZOmGWJBVUAgNhb2qr2mWFnwU60aFvatwWMgJdrFwAK0y4i58xJwpY0Yw78IqN5ba8XfBnG9XGWuRHt67m4j4t3aDiG3DKPsBVcPI/c86d5bW+NvBVigfF7qjFosDV3q9nxVWIPsHHk+Dj7bfvDqUwsxOOTyOjy4awapJY0WP4mAIglQgTFuhO2ovTabo3RHXipJWmeZYqdYrGz/MMPP0AoFJr8Yxim0/2iHiTqP3HiBBQKBaKiojB06FDe/jvvvBMAsH379m6P3ZGkpLapufLy8l6NQ6HcKHA1h32ZZ7kog3SWwxK6H1Vee6IATWpjqjaGAVlKuOgkUJ1BdhplPl0cy7L4Les3wjY+cDz8nfwJm16nxcG1XxM2mYsrxt/7QDfeQd/DdUqVCg20HSLx/YFeb0BzPamVthUZxjXG330/nNxJbfGhtd/wFvt5yDwwI3QGYduYtbHrhX7DObOd9Xlt6QyvsmR0GLydJUSTnkSXQxPI0tzFGfU8eZX14GqWaWSZYp9Y7CyzLNujv57IM1JTUwEAw4YNM7n/mj0tLa3bY3fkmu7Z39+/i5YUyk0Cr9x130wuNVQredPyYQne3RpDodTi++Nk0aO5gwMQ7etiNHDSxcEzCog0ryVOrUnFlforhO3e+Ht57S7s+BPycrJa4MTFD8LB2YXX9npiKoLb37rl5no1b4beliLLACB1dMJkTk7sproanPrjV17be+LvIbaLm4pxqvyU2fHZ0HGA7yDSePqL9v86SIR4bCIZXd6fWY30su6lXQ0bRDrLykYNakubuzWGxVAZBoUCALAo7GttPXJXFBe3pYgKDjadv/SavaioqFevs2rVKgDAggULumyrVquhVht1i9cWNGq1WppRw0KunSd6vrqHNc+bXq8jtg0s2yefR0EaWczBwUUMNz9pt17rm6O5aFKRUeUnJ4Ubx2gshyhzOxH70g9/GAa93uw5++XyL8R2sHMwRvuOJto21dXi9O+kE+UXGY34CVNt7/srABxcxWhtNB6XvLIZrj7mFziaoqfftfpKMt+vxEEIgbhvvlu9IWrkWAQPTEDp5fR224W/tiB23CR4BYW02wZ7DEa0ezRyG3Lbbb9k/oJRvvy83e3nTKcDM3I5RDueNe7MPwRtWRrgOwAAcM/wQHx1JA/yDovyVu3Pxhf3DbH4PTh5iuHqLSPyaeenVcM9QGamV8/g3vn1Op3VPlN6P+g+9Jz1DGucr+5rJPqB5ua2p2RHR9Oph5yc2vSKTU09T8j+1VdfYf/+/XB3d8dLL73UZfv33nsPb775Js9+6NChTo+TYpp9+/Z13YjCwxrnzSs7Gx3jUpVVVbi4c2en7XtK7XkHdLy8MK5K7Nq9y+L+LVrgu4tCdJwGHuJpQO6FY7jmvgws+w0xrFFuoBNIsKfSE7oO74d7zpoNzdjbuJewJegSsHvXbsJWeXw/tGpyel4cPQi7dpPtbAW9wBGAcRH26aPJyCjp+Q2iu9+15mIxAKOzxoo12LXL8s+7PxFExANXLrfriQ16Pf746D0ETp9HpEobqB6IXBid5WNlx/Dz9p/hIeTn4QbazpnA4IRZIhdIdcZ7U9kfryA11Ji+brw3g7+KjZ/VvsxqfLtpJzgplM2id5ICtUZJR9qJPFTq08306Bl+pSVw67BdmJ+Pc1a+XtD7Qfeh56x7WCPjmU06y33NsWPH8Oyzz4JhGKxZswaBgYFd9lm5ciWef/759u3GxkaEhIRg6tSp8PLyMtOTcg2tVot9+/Zh5syZEIu7t9DLnrHmeavLzYP8wMH27YCgQAydO7e3h0ig0+jxw/7T6BiXGjtrMKKG+Vg8xsf7cqDWGyUYDAO8e98ExPheXeyraYbo0xVEH2boA5g1+y4AnZ+z79K/gz7N6GBLhVK8OO9FuEmNLkFhygXkFpPyj0FTZmD6g8ssPv7+5lBtFnLOGaP5wX6RGD/XdLpPc/T0u3bmzwI0ZBglK0ERvpg1d2C3X7+/OClgcX67MSVca3UFotydET9+crttinYKDm49iGZtW/CGBYu6kDosGbKEGIt7zgQuV4Dj/2vfH9ZwGkEPfA04tcmQJqp0OPbxUShajbMml/RBeGwumd7OHMVh9dj9lVGrr20QYfqUmZA6Wve6Wn32HBrPGfNMh4eHYaSVrhf0ftB96DnrGXV1dV036gKbdJavZb/o7GmgpaVtFbuLS/e1g+np6ViwYAE0Gg0+/fRTLFy40KJ+UqkUUil/WlMsFtMvbTeh56xnWOO8CTgaRIFAaPXPojyrEXpOyrjwBB+LX6e+RYMfT5PV+m5LCsTAoA4RveSNgLpjbncGwnFPQch5jY7nTGfQ4fdcMmfunIg58HY2aqm1KhUO/0DqoGXOLpi05CGb/s66cwqANNepe3W83f2utcg1xLa7r5NNn69xdy5G1qmjaKqtabcd/2UdYkaOaS804yZ2w4LoBfg58+f2NlvztmLFsBWQCs3cC0YvB06uAgxtkX1Gr4Y4dT0w+Z8AAE+xGI9MiMTH+4wFcXZfrkJBvQqxfpbd08IGekMoFrT/zlgWqMhpQswIvy56dg8BZ4G+AIzVP1d6P+g+9Jx1D2ucK5tMHXctzVtpaanJ/dfsYWFh3Rq3oKAAs2bNglwuxxtvvIGnn366dwdKodxo9EOeZW4WDP/I7qWM++ZoPi+v8jPTY4wNDHpi4RQAIG4O4GU+knqk5AiqlFWEjbuw79TvG9BYQ+qtJ93/EBxd3WDLuPpc38Ik3Ndz87a+ftaaiGUyTFv2OGFTKhpwcuPPhO2eOHKhX4O6AXsK95gf3MUPGHwnaTv3LaAzPlAsHRcOF5nREWVZYPXBXFiKyEQKOW6qRqvAq3ZNU8dR7BObdJavpXRLTk42uf+aPTEx0eIxKyoqMHPmTFRUVODZZ5/F66+/3vsDpVBuNNi+Tx1XxLlphw6yXKZU26zGDycLCdvtQ4IQ5dOhrHTWTkBOtsHYp7oce0MWmU890TsRg7yM2Qtqigpw/q8tRJvgAQlImDLTomO/nrjxqvip+jXNFze3s61lwjBF1IjRiBw2krCl7NmByjxjOrcItwheVccNmRbk5R/zBLndXAVk/NG+6eYgxkPjI4gm29PKkVtteVaLME4KuaLL1k8hxy28wy3iQqHYCzbpLI8fPx5ubm7Iy8tDSkoKb//mzZsBAPPnz7doPLlcjltuuQV5eXl46KGH8Mknn1jzcCmUGwb+zc66znJDlZJXQY57UzfHN0fz0ao1RpWFAgZPd4wqA8ApTlQ5IAkIG2923Kz6LJypOEPYOkaVWYMB+75dTeShFghFmPHoU8SiL1uFG1nW6wxoUag7aW1d1K06qFvILCs3grPMMAymPfQ4RGLjQjmWNWDvN59BrzO+H+7sQ3pdOlKqU8wPHpAEhE0gbac+J1KvPTw+HM5SMrr8xSHLo8vch9DWRg1qSnq+6N0ktNw1hQLARp1liUSCFSvaFu889dRT7RplAPj444+RlpaGyZMnY/jw4e321atXIz4+HitXriTGUiqVmDdvHi5duoS7774b33777Q1x86NQ+gTuzc7KMgyuBMPRVQLvEOdOWpNUN6nw46lCwrZwaJCxWh8AlF0AismKehi7ostKhD9d/onY9pR5Ylb4rPbttAO7UZGTRbQZdfud8AoOwY2Ao6sEIjH5WfaXFIP3Ogzg4mnbMoxruPn6Y/QiUmpRU5iPCzu2tm9PDp6MAKcAos2Pl3/senBudLkyDSg25mp2d5Rg6ThSSrg1pQyFtS2wBHdfR17hl+IMK0sxuL+rfk4jS6HYCjbpLAPAK6+8gtGjR+PkyZOIiYnBPffcgzFjxuCFF16Aj48P1qxZQ7Svra1FVlYWKioqCPvLL7+MU6dOtVcTfOSRR7Bs2TLeH4ViF/A0y9Z9cOTerEMHeVr8cPr1kXyoOiwMFAoYPD2NU1aaG1V2CQQG3m523BplDXYU7CBs98bd275Iq1lej2O//EDsd/cPwOjb77bouG0BhmF40WVFzfVxlp3dpRCKbfbWwmPkbYvgHUI6rac2/QJ5RRkAQCQQYckAMgPGgeIDKGkqMT9w3BzAnbOu5tTnxOYjEyLhKDGmkTOwwOfdiS5zpRjp9Rb3tQje9YFGlin2ic1e0WQyGQ4dOoRXX30Vjo6O2Lp1K4qKirBs2TIkJycjMjKy60HQJsEAAL1ej19++QU//PCDyT8KxT7gVvCznrOs1ehRltVA2Cyt2lfVqML602SRoTuHBSPMq0NUWVEKZJCaYox6DBCRJYS5bLiyATqDcVpdIpAQFdoOrf0aaiUZzZvx6FMQScyPa2twpQ8di1b0JY015OvcCBKMjghFYsx6/Bnit6DTarDv28/bZUt3xNwBJ7Hxu2hgDVh/eb35gQVCYPTfSNuVHUCt0Rn2dJLggbGkQ/3HxTKU1FuWF5YrcaoqUEDVYr2CFQxHhsHSyDLFTrFZZxkAHBwc8NZbbyE3NxdqtRoVFRVYu3atycp+b7zxBliWxbp16wj7unXrLCrLTaHYA9wFQNybYW8oy5JDr+uQMk7AIGSA6QIOXL48nAd1h74iAYMV3Kjyma+BDkVIIHYEhi8zO26rrhUbszcStvlR8+Ep8wQAZJ8+juwzJ4j9AydORdjgIRYdty3BXeTXb5HlOu7ivhtDgtGRgJg4DJtNroEpyUhD+uG24g/OEmfcEXMHsX9L7hYo1F2Uqh56PyB17WBggZOfEk0emxgJWYdIvN7AWhxdDopxJ6L4LAuUXLZidJknw6D3Sop9YtPOMoVCsTJ9mDquOIO8SftHulpUJKFC0YpfzpJ5le8aEYIQzw65g1WNwAXODNCQJYCjp9mxt+dv5zk0Dw58EADQ2tSIA2u+IvbJXFwx+YFHcCPi6kM6qddLs3yjRZavMf7eB+DiTRbOOfLT92hpaJudXDJgCYSMUTLRqmvF5uzN5geVuQIjHiJtqRuApsr2TW9nKe4fTUaXN10otUi73JZCjnwg5Waj6RXc6wMNLFHsFOosUyj2BPdmZyUVBsuyKLxUS9gsTRn3xaE8aDpElcVCE1HlC2sBwull+AuoOBhYA36+QubNnRg0EZHubRKuQz98C6Wigdg/7aHH4ejmbtFx2xp8GUZ/Ocs3tgzjGhKZA2Y+SqYgVLe04ODarwEAgc6BmBlGphH8JfMXaPVdyB7GPAkIO0h69Brg9JdEk+WTIiEVkdHlT/ZnwxLCEsgHxqKMOhisFAHmpZakeZYpdgp1likUO4Ll5Vm2ziWgvrwFTXWk0xQ+uGtnuaReiV/PkVHle0aGIMi9g8OlVfEWRiF+XpdFSLJ0WShpJhdhPTioLaqcd+EsMo8dIvZFjRiN+HGTujxmW4WbGaG1SQuNStdJa+vAGlgTMowb01kGgIihI4iS1wAp1bk2K3GN6tZq7C3ea35QF38giUw/h/NrAJXx4c/XVYZl48KJJttSy3GlshFdET6YXBegataiKr8LeYilcGQY/Zm7m0KxJaizTKHYE9ybnZU0ywVpZFTZxVMGr6CuU8b93/4caPXGY5IIBXhqKieqnPpLW1GHjkx43uy4LMvimOoYYYvziMNo/9FQtTRj/7eriX1SRyfMeOTJGzqtpIuXjDdT0NfR5RaFGgYd+Z26ETXLHZm6bDlkLq6Ebf93X0DZqMBgn8EY5juM2PdD5g9dr3sZ9yyID0fdCJxfSzT52+QoXt7lj/Z2HV129XaAZ6ATYePO8vQY7vWByjAodgp1likUe4Inw7COc1jIcZbDE727dDxzqpqw5SJZ0n7JmFAEuHWITOp1wIlVZMeISUDwcJjjQvUFFOvJiPWDgx4EwzA4un4NmuWkvnrKg4/C2dPy4im2iEgshLO7lLD19SI/rjMuEgvg6HpjZRHh4ujqhmlLHyNsrY0K7P+uLTvGtdmJa+Q25CJLR+bo5uEdDQy4lbSd/hLQGQvHeDhJ8NhEMsvTvstVuFgs7/KYwxPJ6HJBqpWcZa4Mg2bDoNgp1FmmUOwJrubQCnmWWxRqVBWQ08URiV2njPvf3iwi0O0oEfKjype38ktbdxFVBoA1GWQe9gCnAMyJmIPCtIu4dJCcNg9LHIpBU2Z0OeaNgIsXGdXlSmOsjYKTNs7F2+GGjs5fI37CFESPJMtc55w5iSsnj2JqyFREupFO7WHV4a6jy+P/Tm43VwKpvxKmhyeEw4OzKPZ/e7twxMH/vckrlWiotiz9nFm4MgyqWabYKdRZplDsCG6eVGukjuOuvhfLhAiMdTfbJ6WkAXsySGnFoxMi4O3cITLKssDx/yM7Bg4FIqeYHTu9Nh2nK08TtocSHoJeqcaer8gotVjmgFnLn74pHDwAcPXiLPLrY2eZq1d2u8ElGNdgGAYzHn2KJ8c4+P2XUDY04NHBjxL2Un0pzlWdMz9o8HAgfCJpO/kpYDCmQ3SRifHkFPKB8URuHU7mmo8U+4W7wsGFdLK5sz09gXd9oCoMip1CnWUKxZ7g3uys4CRyp3xDB3pBKDJ/afnfHjJa5u4oxqOTOIWGcvYBVZdI24Tnuzzmb9O+Jba9ZF5YGL0QB9Z8ieY68lgnLXkIrj6+Zse7kXDhOKtNfaxZvlnSxpnCyd0DMx55krCpWpqx79vVuCX8FgQ5BxH7uLMZJhn/HLldl9tWqKQDD4wNg58rKaf5cG+W2cg1I2B4C/2solvmLgCmMgyKnUKdZQrFnrBynmWdRo/STFL/G5FkXoJxMrcWxzmRsienRMFVxsnJfPxjctsrBojn6D455MhzcLDkIGF7cNCDKDxzBldOHCHsoQlJSJox2+x4NxquHBlGn0eWb/DqfV0RN3YC4saS0eD8C2eRc/wYHk54mLCfrTqLtJo08wNGTwf8BpO24x8TawlkYiGemR5DNLlY3IADmdVmh+bqlstzrFDNj1ftmjrLFPuEOssUij3BSx3Xu8hy6RU5dNoOVfsYIMxMfmWWZfEBJ6rs7yrDg2PDyYZFp4DiU6RtwnNdOvffXfqO2HYRu2Cez0zs//4Lwi51csLsJ/9utdR5toIpGUZfVii9Gar3dcX0R57g5d4+tO4bTHUbCx8HsogJd1aDB8MA458lbeUXgdwDhOnuESEI7ViUB1c1/mZSt4UM8CRmdFgDi+LLvStQwv190Gq3FHvl5rpTUCgUs/Budr2UYRRwpnoDot0hc+68at/ey1VILWkgbM9Mj4FMLCQbHv+E3HYNAgbfbfZYShpLsLtwN2G7J/YeHPvuG6hbyGpo0x95Ei5eXS9CvNHgLvDTqfVQNfcyutgJWo0eSoWGsN1skWUAcHBxxczHVhA2tbIFB75cjQcHkJkxDpceRlZ9FwvyBi0EPCJI25H3ieiyWCjA8zNjiSZXKpuwPa2802HFUiGCOeXlC3ubFYOrWaZ5lil2CnWWKRR7wop5llkDy08ZN7hzB1RvYHla5XAvR9w1IphsWJEK5OwhbeOeBkTmU5J9lfYVDB0i5xJIMKzMF0VpF4l2ceMmYQCn8MTNgrOHlDdb0FdSjKZa/rg3o7MMANEjx2DgpGmErexKBqKuCOEmcSPsX6WSJdR5CEXApH+QttJzQB4pH5qfFIhYPzJX+Sf7sqHVdy6F4P7+ijLqoTfTvku4D9NUs0yxU6izTKHYEzzNcs8jy9XFTbzIojm98taLZcipbiZsf58ZC7GQcxk6/AG57eAJDCMjeFwKFAX4K/8vwjZeMxQXNm0ibM6eXpj+iPky2TcyAqEALp7k4rC+KkzCHdfBRQyxVNhJ6xufaQ89DjdfP8J27o9NuMd5DmHbX7wfl+sumx8s8R7APYy0HfmAiC4LBQxemBVHNCmsU+LXs2T+8I5wnWVNqw4VOQ3mj8UcvHLXNLJMsU+os0yh2BXWk2EUpNYQ2+5+jnD3czTZVqMz4JP9ZDWyAQGumJ8YSDasSAWyyOwAGPc0ICErlHH5MuVLIqrszDgg6rwKei3pzN/yxHNwcHYxO9aNTn/lWr6ZylxbgtTRCXOf/geh42UNBkh258JFR37vP0/5nNudRCgGJr5A2krOAAXkItRZA/2QFOJO2P5vfw6a1abLmDt7SOEbRn6/83shxeBrlmlkmWKfUGeZQrEjWI4Mozd5lvMvks6yuUIkP50uQqmcdK7+eUssBNzIlamo8iiymhqXHHkOT6t8Z+lQ6BoaCNvQOfMRnjjU7Fg3A/2Va7mx9ubOhGGKwNgBGHvHYsLWXFuDOZlk2sOjpUeRWpNqfrCkxYBbKGk7TEaXGYbByjnxRJO6Fg2+OZLX6bC8an4pNb1YmMeVYdDIMsU+oc4yhWJPWCl1XH1FC+SVZIWwyKE+JtsqWrX47GAOYRsR5oGpcZz8xp1FlaXmI8FfpHwBtkPEPK7GE0gtI9p4BYdi4n3LzI5zs8CPLPeNDIMbseamrbtZGb3wbgTFDyRsjiVNGFxNfv8/v9hFdFkkASZyqlEWnwQKjxGmMZFemDGA/K18e6wAVY2mH4K4v8NmuRrVhU3mj6UzuNcHKsOg2CnUWaZQ7AleNoyeDcONKju5SeAX7mqy7ReHc9GgJDMyrJwbz6+ad+S/5LYFUeXMukzsL97fvu2sFGFsGpkRQCSRYv7fX4JYIuV2vynhRni5EWBrwZVhcJ30mxWBUIi5K/4BqSMpDRqe5gqXFlH79qmKU7hQdcH8YEOWAK6cBa7c2RUAL86OJ+TDrVo9/o8ja7qGZ4ATTw6Vn2I+R3Nn8FJLUhkGxU6hzjKFYkdwNYc9zTOcn0I6y5FDfU3mbC6VK7H2RCFhm5Pgj+FhnmTDilTgCrlAD+NWdBlV7qgNFRiAaal+AEfPOe3hx+EVHMrtetPCiyzX902uZX5k+eaXYVzD1ccXMx4lq/tBo8PMlAAI9cbfweqLq82fe5EEmPh30lZ0HCg8Tphi/Fxwz8gQwvbbuRLkVPEjxgzD8KLLeck9lGJwHmi5Mi4KxV6gzjKFYk9YIXVcY20raorJm3RnEoyP92ZDozM66CIBg3/Njuc3PPw+ue3gAYxabvY4kquScaTUuCBqWJY7POUiok3cuElImDLT7Dg3G1ynVa81QNmo6aR1z9C06qBWkg8l9hJZvkb8+MkYNHk6YXNVCDA6wzizcb7qPE6UnzA/0NAH2vKId+Tw+7xZoL/PiIVDh3zkBhb4YPcVk0NGcX6PippW1Je3mGxrFu71gcowKHYKdZYpFHvCCkVJuFFlmZMYgdFuvHbpZQpsSSG1w0tGhyLCm5PZouQckLWTtHWhVWZZFh9d+Kh9O7jaAQkF5DGInV0x9aG/8eUeNzlObhIIRJxcy1aWYjTV88dz9rQPmUtHpj/8BDyDyYhvbKkLokuN3/GPL3wMvUHf+SAiKTCBE10uPAbkHyJMvq4yPDaJXEi4P7Map/P5Vfp8Ql3g7EF+Hnkc6ZRFcGeLaJ5lip1CnWUKxZ6wQp5lXhaMJG8IhNwUUyze25VJ+OYuUhGemR4DTkPgwJukzdGry6jyvqJ9SKtJaxu3RYSJKWSJbYFIBL8J0yFxsB9pwDUYAQMXz75d5MfNsOHoJoGIW4XRDhDLZJj79D/BiMgZjTHpnvBobKtkmSPPwba8beYHMhVdPvAW7+F2+aRIeDuTxXne25nJk1iYkmJwf7cWwZVhUM0yxU6hzjKFYkf0VrPcolCjIl9B2ExJMA5n1+BELhnx+tuUKHg5c6KP+Yd5q/8x8QWzUWWtXotVyasAACIdg6nJPpDqSEdt4n3LIPO8+cpZWwo3M4XVI8vcHMt2JsHoiGdQCHxHTSJsIoMAU5J9INa2OZurU1ajVWfmgUUsAya/SNrKLwKZ2wmTs1SEZ2eQZbBTSxXYcamCNyRXilFX1oyGaiWvnTl41weqwqDYKdRZplDsCd7NrnuR5YLUWmIMsUyIkHhysZ7ewOL9naSW0t9VhofHR3COhW2LnnXENQgY8YjZY9iYvRHFTcUAC4y75AXPJjLSFjt2IhJnzrXsDd2kuHAyYvR1ZNnFjhb3mcIlPAqDZ8wmbG5KMcZf8gJYoFpZjfWX15sfZMgSwCuatB18B+BIOO4dGYJIjpTpv7uziLUBAOAf5Q4HFzFh63Z0matZpjIMip1CnWUKxZ7oZZ7l/ItkCqrwwd4QiskxfjtXgizOKv0XZsXCQcKZps/cDpQnk7bJL7ZF2TqhSdOEr1O/BgAMLHRBZAXpNHgFh+KWvz1jdzplLrzIspULk3AzYdjb4j5TTFzyMPwiSZlReKUTBue1pVT8Pv171KvqOx9AKAKmvkzaarOAtN8Ik1go4C2SLa5X4oeThYRNIGAQMYQjxUjprrPM2abOMsVOoc4yhWJP8GQYljuVqhYtyrIaCFsk52bcqNLio71ZhC3e3wWLhnFyyRr0bVGzjnhFt0XXzLAmfQ3kajn866QYcYXMpyxxcMSCf7wMicy+o5xA31fx4znLntRZFonFmP/3lyBzcibsw7LdEVzlgBZtC75K/cr8IANvB/wHk7ZD7wE6NWG6ZZAfhoeR3/9PD+SgtplsF8X5fVYVNJpcnNkZvHLXVIdBsVOos0yh2BG8XKvdiMDmp9TA0CH1nFAsQOggUoLx2YEc1LWQacpevXUghFynPHVDW9SsI1NfbouudUJJUwl+zPgRjq1CTL7oAwFLjjn36X/AIyCok972hYs36bw216uIz6632Gv1vq5w8/XD3Kf/QfyuGDCYlOoNtyYxNmZtRI48p/MBBAJg+uukTVEMnF9LmBiGwau3klUEm9Q6fLSXLFQSFOcBiQP5m8pL7kaBEp4MgzrLFPuEOssUij3RizzLOeeqiO2wBC9IZMYbcUFtC9ZxpoJnDvTD+GjOQjuNkh9V9k9si6qZ4cNzH8Kg0WL6BV84aEhJx9g7FyNq+CiL3oc9wI0sG/QsWhrUnbTuHhqVDqoWsiIjlWEYiRg6AhMXLyVsEp0A0y/4QKhm8cHZD8wXCImeAYSOI21HPgBaGwjTkBB3LBpKPhz+dq4Yl8sb27eFIgEik8jfX8757jjLNHUchQJQZ5lCsS96mDpO2ahBWZacsMWM8CO2391xGVq90QkQCxm8PHcAf7BTq4Emzur96a+b1U+fKDuBw8WHMCnFG16N5IK+iKEjMPaOxRa9D3vBwUUMEUdLbq1FftyoMkBlGFxG3nYHBkyYQthclWJMueiNs+VniBLtPBgGmMGJLrfWA8c+4jX91+x4XqGSt/7KIJzx6JHk77S6sBGKGgu/C7xy1zSyTLFPqLNModgTPZRh5CVXE11FUiHCBhtzGx/LqcH+TDJi9fD4CIRzC5A0VQHH/4+0RU0DYmZ0+tpavRbvn30fw6+4I7TakdjnERCEuSv+0eOy3TcrDMPwor3W0i1znWVHVwlE3MWbdg7DMJj5+NO8BX+BdQ4YecUDH5770HwqudAxwIDbSNuZrwB5EWHyd5PhySlRhO10fj32ZFS2bwfHe0DmRGbFyL1AzhJ1+j64mmXqLFPsFHqHoVDsCF7xAgudzJzz5M01ItEb4qsOkk5vwNt/XSb2eztLsGIaJw0WABz+D6DtWHaXAWa+bfa1f7nyC8TptbwKfTJnFyx88TXInJ076WnfuHLSx1kr1zI/bRyNKptCLJFiwT9fhpM7uRBvYKEr3DOasTZ9bSc9rzLjDUDQQW+s1/BTLQJ4bFIkgtzJz/rdnZlQadtSzgmFAkQNIxf65ZyzUIpBZRgUCgDqLFMo9gX3ZmeBZrmpXoWKXLIQSUyHqd1fzhYju6qZ2P+PWXFwkZHRLFRdBpJ/JG1DlwD+CZ2+dm1rLX7f/x3GZpALCQVCEW574d90QZ8ZuE5sU621ZBjkONRZ7hwXT2/c9sLLEHIq/I267IE9+35GWXNZJz0BeEUBIx8jbembgdILhEkmFmLlXDKVXEl9K9acKGjf5kqm6sqaUV/egi7hXh9oZJlip1BnmUKxJ3gyjK675F4go1BSRxFCB7Q5r/IWDT7eR67AHxjgirtGhPAH2vcambpO7MjPK8vh4z3vYMxZZ17mi5mPPYWQgYM76UUB+i59HDf1GM2EYZ7A2HjMevwZwsaAwbhkN3y87XXz0obJ/wJk5IwK9r7M+x3PGxyAUeHkA+Xqg7moamz7rAJi3OHoRmr9cyyQYvBSS9Jy1xQ7hTrLFIod0ZNy17kcCUbkEJ/2QiQf7s1Cg5LMjPD6fBOp4nL2A7n7SNu4pwHXwE5f92D6Lki3XOGVsh654E4kTJ3Z5XHbO3zNct8s8LP36n2WMHDSNIy7m8whLjII4LmrDDuSN3fe0dETmPRP0lZ8Cri8lTAxDIPX5g8kVBNKjR7v7sgE0FagJHq4L9En93x11xpkrmaZpo6j2CnUWaZQ7Ilupo5rqFKiuoisxndtSje1pAEbzhYT++YO9sfoSC/CBp0G2P0iaXP2A8aR0baOyBuqcWzVajiryOnr0OHDMfHeB80eM6UNV06u5Ra5Gnp97yODVLPcM8Ysuhdxk6cQNplGiOTP16KmzowcY9RywD2MtO15BdCQMoqEIDfcPZyc0dmWWo6TubUA+FKMhiolaktI+RQfmg2DQgGos0yh2BfdzIbBXTXv4CJGUJw79AYWr2xNJ4ZzlAjxyryB4HHmS6Aul7RNexWQml6Yp1WrsObNv8O5kTw2SbA3bn9mJc18YSFcGQbLtjnMvUGj0kHVTM4kUBmGZTAMgznLn4NzbChhd2oRYN1bL0Cj6iTyL5ICsziLYBtLgeOf8Jr+a3Yc3BzItQKvbcuARmeAX4QrL8Ufd+EuD+4MEV3gR7FT6F2HQrEnupFnmWVZZHNWzUcN84VAKMCGs8W4VEYu+ntmegwCOavy0VgBHPkvaQsa3mlZa71Oh1//+ypQTo6tchXg4df+D2IZdcwsReokgkhKSlhM5UjuDqZKJdMcy5YjFImwbOWHUHuRDq2gshnr330ROo3GdMcBtwERk0jbiU+B+gLC5OUsxT9viSNsudXNWHOiAAzDIGYkKcXIOV9lVlrBTx1HnWWKfUKdZQrFjuhO6ria4ibIK8ip3pgRvqhrVuPDPWSp6mhfZzw8PoI/yP7XAQ1nqnfOhyYLkLAsiz1frUJ1eiZhb5HpsWDla3Byc+/0WCl8GIbhObKmnN3uwHW2HVzENMdyN5E6OuHule+gxUFP2OXZ+dj2f+9Br9PxOzEMMOe/ANPhXOvVwB7+AtnFo0KRGEwuCvz0QA7KG1oRzZFiNNerUZ7T0PnBcmeeqAqDYqdQZ5lCsSe6IcO4crqS2HbxlCEgyh3v77oCRSs5Ff/WgkGQiDiXk+LTQNpvpG3o/UDwcBOHxeLg2q+ReewQYVeL9fC4fzISIkd0epyUzulrZ5ku7usZ0SGDELh0DlQS0mEuuHAOu7/4BKwpuYPvAGD046QtaweQS1YDFAoYvL0ggbfY750dl+Ed7AzPQLJQ0JXTnGqaHeGuaaAyDIqdQp1lCsWesDDPsl5nQM5ZUs8YN8YfySVybLpQSthvSwrEuChv7gDATs4qfqlrW1lrDizL4shP3yFlz1+EXScwIGOyEMun/d3MG6KYg5drubcyjDqaNs5aLJ38BLKnO0AjIn+TV04cwYE1X5rOVDH5RcCR81vb9SKgI7XoSSHuuHckqY3eeakSx3JqETfGn7DnJddAqyad9naoZplCAUCdZQrFvuCljjMdWS5Kr4OqhYweR4/0xStbMwibs1SEl+cN4A9w9mugMo20TVkJOJOaSZZlceyXdbiw40/CbmBYHB5Wi7/PfwNSodTcO6KYwcWTPHe9jSzTTBjWQywQ41/z38bBkbXQCcjfZeq+XTiyfg3fYXZwb6vs15G6XNOL/W6Jg4cjqY1+fVsGwof5EFFnrVqP/JQak8fIcGaeWKrDoNgp1FmmUOwI3s23ExlGFkeCERDlhs3ZVcisaCTsz82IgZ8rx2FqKAEOvkvafAYAozjVyACc3PQzzm37nbAZwOLokFpMmXwHRvhT+UVvsH5kmczYQCPLvWOQ1yAsmPwgDg6vgZ4hf5sX/tqCIz99z//NDlkCBHF+F8c+AmpzCJOHkwQvziYr+xXUtmBdcglCBpIFTK6c6kSKwV1bQPMsU+wU6ixTKPaEBXmWVc1aFF6qJWzeCZ68Sn1xfi5YOi6c7MyybfILLaeU7vz/A4RklOv077/i9O+/kt3B4lhSLdg4HzwzrPM8zBTLcPEkNcVNclWvCktwI9NUs9x7Hk98HM6xoTgytAYGTuT2wo6tOPTDN6TDLBAAt37CWeynAf76O29Nwt0jQjA01J2wfXE4Dy7x5ALA0iy56VkHqlmmUABQZ5lCsS8sSB2Xfa4KBr3xpisUCfBdUSXUOmNfhgHeu2MwxELOJSRzO5C9i7QNXwaEjmnfZFkWJzaux4mN64lmLFgcT6xDYVAr3hn/DhxE1BHrLdzIr0HHQtnUSXqyLtCq9WhtIqU5NG1c7xELxXh3wrsoD9DieFIdT+pwcdd2HFjzFbnoLyARGPskOVDhMSB1A2ESCBj8Z+FgiDr8znUGFp9eKYNE1sHZZoHss+RsEgBeTRJalIRir1BnmUKxJyyQYWRxVseLQh1xtLCesC0dG45hoR5kR1UjsOtfpM3Jh9BYsiyLwz9+x4soA8DJwfXIC27B0kFLMcR3SJdvhdI1jq4SCETkZ9xTKYbJHMtUhmEV4j3jsTxpOfKDWnAsqY4XYU7duwP7v/+CdJinrATcyIp92PMy0FJHmAYEuOLxyZGELblMAUMw+TB65VRll6kluyyPTaHcpFBnmUKxI7q6GdaXt/DKW2+VkwVCAt1k+Aen8AEA4MCbQBNH+zj7fcChzak2GPTY981nSN75J6/rqUF1yAlpRoRbBJ4a8pSlb4fSBYyAgbOHdXTLpnIsi6U0x7K1eHTwoxjgOaDNYR5SCwNHw5y2fzf2fPUpDPqrmSskTsDc/5GDtNYDe/7NG/vpaTGI8CZTxm2slRPbDVVKVBWSaxKoDINCaYM6yxSKPdFF6jjuQh+dhEGGgZy2f/v2BDhLReQ4BUeBc9+RtqhpQMIdANoq8+387CNcOriXaMKCxcmEOmSFNUMsEOODiR9AJqLRSmtirVzL3MV9VIJhXcQCMT6Y9AEcRA4oCFTiiAmHOePIfmz7+D/Qaq6mioubDQxcQA6U9iuQRUqhZGIh3ls0mLDlsTqopOSsw5VTHCkGrygJjSxT7BPqLFMo9gRPhmH8r15n4BUoSGY0YDu0mZ8UiOkDyCpgUDcDf64gbSIHYN5HAMNAq1Fj28f/QdbJo0QTA8PiaFItskPbKvw9N+w5DPAykYaO0iuslRGDnzaOasqtTYRbBF4c+SIAoChAicNDa3gOc975M/j93degarlaGXP2B4CUXLCH7c8BrWTkeEykFxaP6iDbYIDzDPkgnH22EhqVsYIgL7UkjSxT7BTqLFModgTLy7NsvATkp9TwFnBd6lBhzM1BjNduHcgf9MCbQEMRaZvxOuAZCWWjApvefhn5F84Su/UCFoeH1qAgSAkAGB84HvcPvL8nb4nSBdaLLNOCJP3BophFmBk2EwBQ7N+Kg8NqeHmYy65kYOMbL6FZXg+4BgCz/0MO0lwJ7ObLMV6aMwA+Lsbc2+kSPaGP1qr0yD1fbexANcsUCgDqLFMo9gU3bViHm2HGsTJiV4lQj3qhsf0r88gbLQCg8Dhw9hvSFjoWGPU4Gior8Otr/0RF9hVit05owP7h1Sj2b5vW95R54p0J70DQSTVBSu+wlrNMC5L0DwzD4PWxr8Pfqa3SXqlfK/aNquZV+qspLsSvr/0T8sryttzL0TPIgVJ/AbL3ECY3BzHeum1Q+3aTgEUBZ1ziOkBlGBQKABt3lltbW/Haa68hNjYWMpkMgYGBePjhh1FWVtZ15w4cOXIEb775JubNmwcfHx8wDIPw8PC+OWgKxZbh3ezaboYNVUqUZTUQe1Klxqjy+Ggv3Dk8mOyqaQH+5CzGEzkACz5HRX4Ofnn1H5BXlBO7tSIWe0dWo8LH6Hi9Pf5teDtwSvhSrIYpGUZPIoQ8zTJ1lvsMN6kb3p/4fvsDZJWnGrvGVEIlJR1bRXUVfnnlHyi9kgHM/7StpHxHtj/Lk2PMTvDHrIFGKVWKVEfsry5qQk3x1UW+XGeZyjAodorNOssqlQrTpk3D22+/jebmZixYsAAhISFYu3Ythg4divz8fIvHevbZZ/HGG29g586dqK2t7boDhXKz0kmeZW5UWcmwyBa3OcvOUhE+uCORV/oWu1cC8kLSNv015BXWYuOb/0ZrI5lFQ+skxI6xFaj2VLfbHkp4CJOCJ/X8/VC6hBtZ1qr1UCt1nbQ2jU5jIscydZb7lOF+w4nMMHJXLf4aWwG1C3nbVjU1YvM7r+ByWjZwC0eO0VQB7HiBeEhmGAbvLExoL4VdIDKgkSGvC+lXrwc0dRyF0obNOsvvvPMOTp8+jbFjxyI7Oxu//fYbzpw5g48++gg1NTV4+OGHLR5r1qxZeOedd7Bnzx5kZGT04VFTKLaNqdRxOq0emZwsGOkSHfRXfePXbh2IYA9HcqDMv4DkH8ixg8fgXI0v/vzwXeg0amIf4+uCLaOL0OBidLhG+I3AM0Nplb6+xtlDyisu0d1FfiZzLNNsGH3Oo4MfJR4mmx112DKqCKyvM9FOr9Nh1+qPcCIbYKOmk4Ok/w6kbSRMvi4yvHN7W3YMlgHSOswiAUD22SpoWnVUhkGhXMUmnWWNRoPVq1cDAD7//HM4OxsvDM8//zwSExNx5MgRXLhwwaLx/vvf/+Lll1/GrFmz4Onp2SfHTKHcEPBSxzHIS66BuoWMNKZeXdg3Ld4Xd43gyC8aK4BtTxMmrdAZuxRjcfTndbxFhK4xYfg56TKUMuMN2dvBGx9O/hAiAScFHcXqCEUCOLuTWvPu6pa5emWZsxgSGf3s+hoBI8B/JvwHQc5B7TaV1ICfh1yGc3wYr/3pLb/hr8rB0Eo4BYN2vMCbBZqXGID5SYEAgEsSHbHQT6fWI/tcFc2zTKFcxSad5RMnTkChUCAqKgpDhw7l7b/zzjsBANu3b+/vQ6NQbmx4qeMEPAlGkUiPBiELNwcx3l80mJRfGAzAn0+2FT+4SpNWgo1105F59jzv5ULGjMSa+AvQiI03WSEjxIeTPqQ65X6Et8ivu5FlmgnjuuEmdcNHUz6CWCBut+lELL6LPoOwqeN57bPPX8Bv1VPQqO3wgKRpAv5YDujJh+K3bhsEHxcpmgVAnpi/0I/lFvikzjLFTrFJZzk1NRUAMGzYMJP7r9nT0tL67ZgolJsCzs1OoTCgIpfUFl+LKr99ewJ8XTlO0dmvgbyD7ZvlrS74uXQMKivIctgAMPyOu7Au9Aya9S2E/blhz2GE/4jevAtKN3HuZUYMrrNMJRj9yyCvQVg5eiVhUxnU+NJrP0Y/8ABPW1xVUYufisegsNndaCw5Axz/mGjn4STBB3e0yTFSJKQjXVvSjNpq0kZFGBR7xSbn0YqLiwEAwcHBJvdfsxcVFZnc3xeo1Wqo1UYdZmNjW1lQrVYLrVbbWTdKB66dJ3q+uoc1zxvLcZbT08miBC0MixyxHnMG+WH2AG/yNStSINr3Ghi0BahT5AE4Uh0JPSdaLZbJMP1vz+AjxTqUVJUQ++aEzcF9sff1+XeAftdInNwlxLaiVmny3HR23hS1SmLb0V1Cz+1V+uu7tiB8ATJqMvB77u/ttmplNT532IrXnn8R+z9fBU2r8XNSaQz4oyQB430KMcqrFAwDsIffhz54DNjQse3tJkZ54q7hQdh0vgwNAgPcDUbHOz2lBREdD8JgsNr7pL/R7kPPWc+wxvmySWe5ubmtMpGjo6PJ/U5ObTXum5qa+u2Y3nvvPbz55ps8+6FDhzo9Topp9u3bd70P4YbEGuctSquF8Or/tSIn5ORo0HGC6ZJEB0cJiwkOZdi1yyjPEOlaMCXrNYj1Gqj1QuytiEF2kw9vfLGzK/wnzcQnhT/inOYcsS9IGIRRDaOwa9cuXr++gn7X2mguFwMwRoPLCqqxc2dhp+2556063wEdbxfFlXmQ77wCipH++K4lsom4ILyAQn1huy29Lh3/afwK86fOQeWx/dA2GWeKWDA4XhOBylYXzA7MhlSoh/bXB3E47m1oxMY0cyMEwH6pEKlqPSarjNeDgmwVAiVukGraxmQNBuzcudOq74n+RrsPPWfdQ6lUdt2oC2zSWbZFVq5cieeff759u7GxESEhIZg6dSq8vLyu45HdOGi1Wuzbtw8zZ86EWCzuugMFgHXPW95bb7dPpZYFTgBY441RDxYXpTp8cu8wTInt4AizLISbl0KgqUG1ygl/lcVDruE/IIYkJGHOihewqWQrzlw8Q+zzdvDG97d8D19H314dv6XQ7xpJyeV67OqQCUigl2Lu3Cm8dp2dt59Pn4UGxpm10eOHIjyJas6B/v+uTVBNwIN7HkRZi/FhNkWbglEJo/Dw7V9g39efIv8C+fvLbfbGTwVOmBd0BQGQ45aWTdDfuxEQCNvbBCfW47HvzmOsSgRJe/oUBqVBkxFVsK1ti2Uxd+5cq7wP+hvtPvSc9Yy6urpej2GTzvK17BedPQ20tLRpIF1cXPrtmKRSKaRSKc8uFovpl7ab0HPWM6xy3q5KJgyMEKVBZH7jLLEe90yKwMxBgWSfU5+DzdqJtAZ/HKqKhI4VgsuI+YswcfFS7Cnei08ufkLskwgk+HTqpwhyC+L162vod60Nd06qMXWLDjAIIJbyP0uAPG96vQHKBjIVoLuvMz2vHPrru+Yr9sVn0z/D/Tvvh1JnvEd+k/4NglyDsPAfL+Psn5tx4rf1RGYahdYBvxYmYbxvEUayRyA+tQqY8mL7/vExfnhsRgwythdiqMboGpQFTkB40W4IDW2SLZFIxM+53gvob7T70HPWPaxxrmxygV9oaCgAoLS01OT+a/awMH7qHAqFYoarznKV73BopO7ErvpgGf41O45sX3IWyl1vYVvpAOyrjOE5yjInZ9z+r1cx+f6HcaEmGS8ff5n3km+PfxuDfQZb9W1QuoepBXmWZsRoaVDzkqg4e/IDB5T+I8YjBh9M+oBXIv6tU2/hePkJjF54Nxb9+03InMmAkgECHKuOwO8lCWjZ/z8g7xCx/+lp0dBEkrNGOrETKvxHdxiEZsSg2B826SwnJSUBAJKTk03uv2ZPTEzst2OiUG4KDG3ZVEuCpxLmMrEB7z48DFJRB2e4qRIF3/wNP+QmIbeZP+XuHx2LBz74FFHDRyNXnotnDz0LrYFcSPH34X/H3EjrTNtSeo5YKoTMmYyuWJoRo5nTTiQRQOZEo1rXmykhU/DvUf8mbHpWjxeOvICM2gyEJw7FA++vQmDsAF7fohYP/Jg/FHnfPwvIjQvlRUIB3ls2HEWcstolwVPBXpNm0MIkFDvEJp3l8ePHw83NDXl5eUhJSeHt37x5MwBg/vz5/XxkFMqNDWswoMEtGs0uoYR94JRgRPoYp+q1LY3Y/8Yy/JEdCKVewh0Gw+bchnvf/ACuPr4obSrF4/seR5OGXHB7b9y9eGjQQ33zRijdhpdr2UJnuamelGC4eMqsOg1P6Tn3xN+DRxIeIWytulY8sf8J5Dfkw9XHF/e88T7GLLqHV41PqZdga14odr/xKNQNNe32IHcHjJkdTo7p6Ic6r0FtGzSyTLFDbNJZlkgkWLFiBQDgqaeeatcoA8DHH3+MtLQ0TJ48GcOHD2+3r169GvHx8Vi5ciVvPAqF0obewKIkZBph08gEWHx7bPt26eV0/PTsg0gt5S9pcHB1w4J/voqpy5ZDKBKjqqUKj+59FNWt1US7qSFT8dKol6hTZUO4ePWsMAnXqaY5lm2LZ4Y9g3mR8wibXC3HY/seQ2lTKQRCIcbf8wDufvVdOHvwK9hmVEqx7rlHUJhirIi7cHYUtM6k5KokuO26YaDOMsUOsUlnGQBeeeUVjB49GidPnkRMTAzuuecejBkzBi+88AJ8fHywZs0aon1tbS2ysrJQUVHBG+u7777DmDFjMGbMGMyb13ZRqaioaLeNGTOmU8kHhXKzsC21HEqpD2q9SP3w+DnhEAoF0LQqcWDNl/jtzZcgb9Lx+kcmJWHph6sRPaJNvyhXybF833KUNZMVAJN8kvDBpA8gFJhePEa5PvCcZYsjy2Q7boETyvVFwAjw9ri3MTpgNGGvVlbjsb2PoVrZ9iAbMigRD/z3M0QO4Rf7am414Pf3Xse+b1dDrVSCYRhMWxBNtJF7xKHROQS/ni3uuzdDodgoNussy2QyHDp0CK+++iocHR2xdetWFBUVYdmyZUhOTkZkZKTFY5WWluLMmTM4c+ZMu1Os0WjabWfOnGkvMkKh3IzkVDXhpd/TUBIyA+iwKEgoBoZMDkZhygWs+8dTSNmzg9dXxBgw467bcfvKd+Dk7gEAaNI04fF9jyNfkU+0jfWIxefTP4eDyKFv3xCl2/S05DVXs0wjy7aHWCjGqqmrkOhNruMpbS7FY3sfQ72qrcKmo6sbbn/pTcxaci8kAj1vnLT9u7HuhSeQc/YkBo7xh8SBdBGKwm7BezsykFws77s3Q6HYIDbrLAOAg4MD3nrrLeTm5kKtVqOiogJr1641WdnvjTfeAMuyWLduXaf7zP1NmTKl798QhXIdaFbr8Lf1FyBSGVDlN5LYFzeQwYHvVuH3915HU20Nr2+ArBEPPHkPku58tF1S0ahpxOP7HkdmfSbRNtw1HF/P/BpuUre+ezOUHtNjzTKv1DXNhGGLOImd8MWMLxDrEUvY8xX5eHTvo+0OM8MwGHzb/Vj6/GMIdWrgjdNcX4dtH/0Hf/3fe4hPImeHaryT4KoBnlyfjNpmNa8vhXKzYtPOMoVC6R0GA4t/bExFXk0LRqmEYK9KI1jWAH3reaSf+hyXjx3i9RMxekzxzcO9D8yE56Sl7XaFWoHle5fjUu0lon2AUwC+nfUtvB1ooQpbhSvDaFGoodeZ15+yLIsmOekUURmG7eImdcPXM79GmCuZVjVHnoNH9jyC2tbadpvryEW486E7MN0/F2KGH2XOO38aFw/8HwzK08Z8zYwAw7UyVDaqsOKXZOj0VL9MsQ+os0yh3MSsOpCD3RmVcDIAg68WGjDoyqFp+hla1VFo1PzoYohjA5ZGJmP4hBEQTDPmTVaoFXhs72PIqMsg2ns7eOO7Wd/B38m/b98MpVfw5BMs0Cw3Hx1UK3XQqUlHisowbBtvB298O/NbBDqRxYVyG3J5DjMz4TkMmTIVD0YmI8yJL63QatTQqE9C07Qeem2bVjlWJ4GbnsHp/Hr8d09W374ZCsVGoM4yhXKTsiOtAqsO5AAARqpFEBpaoG3ZA03Tr2D1fMmFRKDDTP8c3BV6Ce6RicCibwBB2yWirrUOj+59lCe98HHwwZpb1iDUNZQ3HsW2kDqKIJaR0+pdSTG4EgyGAZw8qAzD1glwDsCa2WsQ5ExWzcxX5OPhPQ+jsqWyzcAwwPxVcI8bhTtC0jE38AochRreeKy+FtrmzdA0bwejb8JodduD9zdH87H5guniYRTKzQR1limUm5D0MgVe2JQCAHDTaZHYcB5qxRroNRkm2w9wrcLDUeeR6FEJxjMCWLwBELct0itrLsPS3Utxpf4K0cfX0RdrZ69FhFtEn74XinVgGMbEIr9Ws324zrSTuxRCIb1t3AgEOQdhzS18h7lAUYAHdz2IQkVhm0EkAe7+CYzvAAxwQ7DtTAAARZhJREFUq8GyqAsY7M7PKgUABm0O1I1rMUBxBl7atow5K/9Iw7nC+r58KxTKdYde9SiUm4yaJjWW/3geKo0e8U1XsLhsA1jVKQD8dHCeIiXuDk3D3KBsOIm0gIMncP/vgFOb9jhXnosHdz2IosYiop+fox/W3rKWp42k2DbdzbXMSxvnQSUYNxKBzoFYN3sdQlxCCHtFSwWW7l6Ky3WX2wwO7sCSTYCzPxyEOswKyMU9YanwlrTwB4UeBtUZ3F3+MwY3pkOv0+Hxny6gpF7Z5++HQrleUGeZQrmJUGp0eGTdWUjLM7G4fBNm1h6C2MC/4Qn1BsSX1+Je/1SEOCnajCKHtoiyVxQAIK0mDcv2LGvP03qNIOcgrJ29lkovbkC6mxGDnzaOSjBuNPyd/LHmljW8GaB6VT0e3vMwzlacbTO4hwBLNgISFwBAsGMjFgelYFBpDcQ6/gJAkUGJKXXHcH/pr/CqysAj686iSaXltaNQbgaos0yh3CRodXq8tGoz4s+uxa3Vu+GtqTPRikFggwpTrhQjskYBIcO2mQUi4J6fgNAxAIB9RfvwyJ5HoFAriN7R7tH4cc6PvEgV5cagu84yr9S1F40s34j4O/lj3ex1GHStZPVVWrQteHz/49iet73NEJAE3PcrIGr7nAUMi7C6Rky+UozgeiUAfkVON10jZtfsx9CL6/Diqk1Qa/mONYVyo0OdZQrlJqDkcjo+fu5ZhJ1fDz9Ntck2AlEYQpTxGFJUBunVSFFb6mSmbTFfzEywLIs16Wvw/OHnodKTjlSSTxLWzV4HX0ffPn43lL6CyjDsF0+ZJ76/5XuM9icr/ekMOvz7+L+x+uJqsCwLhE8A7voBEIhwrVq9RG9AYkkFwpsjIBCbXqPgq6lFVPIv+OjZZ1CQcqFtLArlJoE6yxTKDQrLsihOT8Omd17Bxjdfgqim0GQ7RuAFsfNCOLjdjtiio9xRgFs/ARLugNagxRun3sAnFz7hjTE+cDy+mfkNLThyg8N1lpvlahgMnTs1PBkGjSzf0DiJnfD5jM8xI3QGb9/XaV/jxWMvQq1XA3Gzgdu/AjeSHFN0HM7u8yFxvhuMMMDka0jrivDHe69jwyv/QH7yOeo0U24KRNf7ACgUSvdgDQbkXjiDs1s3oTI3u9N2jMAdIoexEIjjwDACJEp+h1TDKes++R/AiIegUCvwwuEXcKbyDG+cRTGL8MqYVyAWiK39Vij9DFeGYdCzaG3UwMmdr0XWafVQNpJpxGiO5RsfqVCK/03+H/53/n9Yn7me2LerYBcqmiuwatoqeCbeBTRUA3+sbt8v1rUiSfArzonvh0R0LwzaPOhaj4M18LNhVORmYcsHb8I3IgpjFt2D6BFjwAhofI5yY0KdZQrlBkGv0yH75FGc2/Y76kqLO23XKHSGr89ksKoYMEzbzclBoMBQ2R8ohDvZeORjyJZn4/nDz/MyXjBg8Pfhf8eyQcvaS11TbmwcXSQQiBgYdMZoX1O9yqSzbKpgCa3ed3MgFAjx4qgXEeYahvfOvgcDa6zEl1KTgnv/uhefTPkEA4YvA7Ca6DtYshOZutloNnhDKImGQBwJRpyNGvlRuOiaea9VXZCHbR/9B55BIRg+bwEGTJwKsYQuFKXcWNDHPArFxlE1N0F+ORU/vPAEdn/xSaeOcrPQEYe9JsJ/5r8AdVy7owwAI51+g5jh59TdX7IfS3Ys4TnKMqEMH0/5GA8lPEQd5ZsIRsDwdMedLfLjSjAkDiJIHWh85Wbi3vh78fn0z+EkdiLsFS0VeGDXA9iav43XRwgNRrv83L7NMAJAF4/wWS/hgPcUKESuJl+rvqwE+75ZjW+fehgnN/0MpaLBqu+FQulLqLNModgotcWF2PfNaqx55lHUpZxFc12tyXYKkQsOeU3Cj8FLMG7OrXC4QuY7dReWYaDjXoDlO73vnP0PbyGft4M31s5eixlhfF0j5caHm/6tM2eZa6dp425OJgRNwI9zfkSAE6lB1hq0eOfsuyb7xMmOwFuUT9h0aY2YPe9W/BS8GPu8p0EuNr2+obVRgVObN2Dtc8tRfeao2VkyCsVWoGECCsWGMOj1yL94Hhd3bUNxeqrZtrViT1xwH4ocp2iwjACLR4VgslaKiwpSZzrW5UcIGT10Jp6Nuf5zglcCPpn6Cfyd/Hv9Xii2CVd33FzPl1sAJtLGUQnGTUusRyx+mfcLXjj8ApKrk9vtJpfmsQwYhsU4l3XYJn+r3axu0SGxAVg6PhLrTgqQ5RyD6JY8jGy4AC+tnDeMXqtFY14Wfn7pWQTFD0LSrLmIGTUOIjFdG0GxPaizTKHYAI211bh0cB/SD+1Fc72p/MhGqh0CcMZ1CAodwq7lfsO8wQH4x9gobHznLNE2RHIREdKzACMEZr8PbCUzXXS8Gd4VexdeGvUSJEKJVd4TxTbh6o4tjSxTvfLNjbeDN7675Tt8cuET/HT5JwAmJ6PAzn4fOLUSIdJLiJKeRJ56XPu+jGPleOLFEVBqdNh4vhQ5zjHIcYpGaGsJRjSlIUhZYvK1y65koOxKBhxc3TB46kwkzpgDN1+/PnmfFEpPoM4yhXKd0Ot0yE8+i0sH9qAgNRkwl2KJYeCXNAZrGoKRz3gTuybF+uDju5OwZ9UZGPTGMQTQYqLrd2BEUigXfYVVhadxO2dYlmlbHf/KmFdwezR3L+VmxNLCJPzqfdRZvtkRC8T418h/IdEnEa+deA0qA7+E9f/kKXjmnh/gsvkxjHddi6Ka4dDhqkSHBY6vT8F/XpoApUaPv9IqAIZBsWMoih1DEczK8TevMtSknIJBr+ON3dqowNk/N+Pstt8RkTQMg6bMRNSI0TTaTLnuUGeZQulHWJZFTVEBLh87hCvHD6OlgT892REHVzckTJ2FlFYxPiz1goIhbzATY7zxzQPDUbjnOEpyycpZQ5y2w8OxCem3foSXstegqbSQ5yz7OwfivTmrMMBrgBXeHeVGwMWDK8PoJLJcR51le2V2+GzEuMfg+YPPAcgl9h0rOYKjgly8d9uHGLbzZQxv3YwzzUva91eW6pC55SA+uWcaVFo99mcaiySVMh54t8kbX/3zTjCZx5G85y/oVfyFx2BZFKRcQEHKBcicnBE/YTIGTZ4Bv8houuCYcl2gzjKF0g801tbgyokjyDx2CLUlRV2294uMwZBb5iF+3CScLpTj8x/OQc2JxEyI9sa3DwyH4eQPOLbDC4BL+z4nQS2SAk7gy7HL8U3Kx9CxOniZeJ21c9bBwyuwl++OciPBLSyiVuqgUekgkRlvB6yB5aWOozIM+yLKPQo/37oBJf8eSdgZFihrKcdDF/+Lh8YvxWPnd+BKzlQo9MbryKmDaoS7fonVix/Hk79cxMErRoe5VavH439k48v7bkG4zAUxPh5IP7gHpZfTTR6HqqUZKXt2IGXPDngFh2LQ5OmIGzcJrt4+ffPGKRQTUGeZQukj1MoWZJ85gcyjh1CSmW5eZgFA4uCIAROmYPD0W+AXEQUAOJBZhSd+ToZGT0ZTxkd74dv7EiDb+Qz2HvaFmg0n9oeH78H9USHIyd3UbmNMvLybjFbkszecPUzkVK5XwzPQeDtobdZCrzMQbWhk2f5wlji3rYvocO26diUysAZ8n/cHDoSE4Hn2ABRZD7S30bIOOLqzGXMVj+Kruz/DU5sZ7Ltc1b5fpTVg+fpkLIkSYd78CRg0cSrqSouRum8XMo4cgKaVL/8AgLrSYhz9eS2O/rwWgXEDET9uImLHTICTu0efvH8K5RrUWaZQrEhrUyNyz59GzpmTKEpLManL4xIQG4/E6bMRN2YCxDKjQ/JnShle2JgKHacc8YRob3y3wB+y9fNQkC9CjupOYr/e6zKe9T8PQyPp7JhylkGnNO0OkUQIBxcxWpu07bamehU8A425drkZMgQCBo5udOGnXcJ1ljnXkcLmEjzrWYqH/AZDUjXEaFePRPaF44irnYUv7voJzwgY7EqvbN+v1bP4IVuA8LMlWDY+El7BoZj20OOYuHgpss+cQMbh/Si5fKnTwyrPuozyrMs4tO5bhAwajLhxExEzahwcXEzneaZQegN1limUXtLSIEfuuVPIPn0CJZcvgTUYuuzj4u2DAROmYODEqfAKDiX2sSyL744V4N2dmbx+swf547NhZRB/fzeULSwONa4i9mtESmwI/4WoyAUAQkaI++LuBPAzYQctP2uXOHvIeM5yR5rl3EwYUggE9MHKLhEIgA7XtPvj7sN/FZugMxgDASxYrA/9FffVR0KmNTqrRxuXI7DiWbh8OwWfzfsYfxfGYHtqeYd+DN7YnglFqx7PTG/TI4tlMgyaPB2DJk+HoroSGUcOIuPIATTWGCPTHWFZA4rTU1GcnooD33+JkEGJiBoxGtEjxsDFy9tkHwqlu1BnmULpJizLor6sFPnJZ5F34SzKsi53KbEAAKmjE2LHjMeAiVMRHD8IjAlHVW9g8eb2DPx4iq9rvivJGx+4/grBpm/AssChxpVoNZAyihNhW9AqaSJscR5xeHP8m4hpdkYedZYpaNMt1xQbvyfcRX7cyDK36h/FfmAYhkgxuTD6dowIuAuvn3wdl2qNkV+NqBVHIjbhluxHjDbWCQcUz2KB4HWItjyGVUMfhM+opVhzlnR8P9mfjQpFK96+PQFiofGa5Obrj3F33Yexd9yL0isZuHz0IHLOnoS6pcXksRr0ehSlXURR2kUcXPMV/CKj2xznkWPhHRJGFwdSegx1likUC9BptSi9fAn5yeeQf/EcFFWVXXcCIBSJED5kBAZOmorIoSMhknQ+la3U6PDMhovE6vFrLPYpwbsN70GQ1XZzymydgUL1KKJNgUcasnyMeZbFAjGeSHoCyxKWQSwQQ91YwBuX3jrsE25GjCY5N7JMC5JQrsJxMFkDixiPGPw05yesz1yP1RdXt1cBLfBKQ7b3OcTWGhcFlmkGI1U5H0OctkFw8Ue86nMWUWNewcunyQf1X8+VoLheiS+XDIebI5kqjhEIEDJwMEIGDsaMR59EYepFZJ08itzzZ6A1lU3jKlX5uajKz8XJjT/DzdcPUcNHI2LIcAQNTIBYQitSUiyHOssUSic0VFWi+FIKClLOoygtBVq16RRbXEQSKSKGDEfM6HGIHDYKUkfHLvuUNbRi+Y/nkVHeSNgFMOCnAWcxpvBLCK5Om9dqQ3G46VGinVLchCNRv7V7v0k+SXhr3FuIdI80NjIV/KaRZbvEmVvyuq4LZ9mLOst2C/cacVXiJRQIsXTQUkwLmYY3Tr2Bs5VtD+rHI35HQGM0XDTGRXcnmh6Av+Qy/MW5YGquYEndMgwc+CjuvjwWWhgd45N5dVj45QmsWToS4d5OMIVQJEbU8FGIGj4KWrUKBRfPI+vkMeQnn4NOqzHZBwAU1VVI3rUNybu2QSSWIHjQYEQkDUP4kOHwCAiiUWeKWaizTKFcRdmoQElGGooupaD4UgoU1aY1cqYQyxwQOWwkYkePQ8SQEcRCva44k1+HJ39ORl0LeaGPFVbiV78f4VmQYmwrccVR+Uo4sqSzczhqA1TiZrhL3fHssGexMHohhAIh+UKsCS01dZbtkq5KXvOq95nIoEGxE3jOMvnUHeIagu9mfYe/8v/CR+c/Qp2qDoei12P+5afA4FpfEX5uegkj/V7EJG0dYNBhaP5XOOKyG4+rnsIlbVD7ePk1Lbj9ixP4bPFQTIwxnx5OLJUhdswExI6ZAE2rEoWpycg9fwb5yWc7lWoAgE6rQWHKBRSmXAB++BauPn4ITxqK0IQhCBk0GI6uNEsQhYQ6yxS7RdOqRHlWJorSU1F8KRXVhXnd6u/s6YXIYSMROWwkwgYPNSuxMAXLsvjpdBHe2n6ZyHghgAGPy/bjH8JfIbzqtOSIxVjl4Q5R+XLEqP2JcTL8jqPUMxP3xt6LFUNXwE3ayYXe1MJDGk2xS7g5k5sb1DB0+A7SyDLlGgw4k1ImriMMw2B+1HxMDZmKL1O/xC+ZvyAl8CCGls9obyPTeGF787NYH/oZnpM3YKBGi0BtIf4Uv4wvxHfh/5S3QHfVJWlQarF0zVm8MCsOT06JsijqK3FwbHec9Todyq5kIPf8aeSeO42m2hqzfRtrqpC2fzfS9u8GAHiHhCEkIREhgxIRMmAwZM7OXb4+5eaGOssUu0HZqEDZlQyUXclAaWYGqgvzLcpc0Q7DICA6FpHDRiFy2Ej4hEX0eOquRa3Dq1vT8cfFMsKeyOThQ4d1iDPkAXogXyzC1+5u2O3kiEGVUzC+dgTRvsapBC0jC/DbuN8Q7xlv9jVZE4sQ6dSjfcKNLLMGFkqFGlJnIQw6QN2iM9ueYkdwIsusofPFzM4SZ/xz5D+xKGYR3jv1PioO5iGgKap9f7h8MM46L8A9wXsxs0WJx+UKxGk1WIGfMc/xOP7VuhTn2LbrmIEFPtyThdSSBnx0dxJcZJaXvBaKRAhNSEJoQhKmLl2OmqIC5F04g8KUZFTkZIE1NcvWgdqSItSWFOHiru0Aw8A3LBIhgxIQMigRgbEDaHo6O4Q6y5SbEpZl0VhThbIrl1F6JQNlmRmoLy/t9jiObu4ITUhCeNIwRAwdYZXpucyKRqz4JRl5NcZpQjc045+i33Cf6CAEBha5YjG+dnfFHidHsAyD4IZ4jC28nRhHK1JhxAP+eD1pjWVOL9dZphIMu8XBRQyhSEAUHmmqU0Hq7AS9iv+9oNkw7BjedaLrzD9R7lH4dvY32Bd6CBlfKyHRGNdtjCqZB7lDJfZ5pWGfkyOmtSjxtwYFBmiKsEn6FjbrJ+E97WLUoe1au/dyFeZ/dhyfLh6KxGD3bh8+wzDwDY+Eb3gkxt6xGKrmZhRdSkFhapsMo1leb34AlkV1YR6qC/NwYcefAACPwGAExsYjMHYAguIGwjMwyGR2I8rNA3WWKTcFrc1NqMrNRkVuNirz2v5tbVR0exyRVIqQAQkISxyK0MFDrJpuiGVZbDhbgje3Z0B91UlhYMCdwqN4SbQBXkwTMiQSfO/uin1OxpuLe6svZmYvhQDkxXjew0MRMyTA8gPgRtFpVNluYRgGzp5SKKqNmQSa5Cp4hzlB30p+L2TOYoilQu4QFHuBe52wcDaOYRjMGjQNsY/XYMfqNIA1jjMt935sla1CnVMZDjo54qCTI6a0KPGIohF3qo9ipuA8PtTdgw36adBDiMI6Je748iT+dUs8HpkQ0auc3zJnZ8SNnYC4sRPAsizqSopQkHIBxRlpKMvMsGght7y8FPLyUmQc3t82ppMzAq46zwExcfCLjIbMiUo3biaos0y54dBptagpykdFTjYqc7NQmZcNeUV51x1NIBAK4RcVg7DBQxCWMAQBsXEQiiyf7rOUmiY1Vv5xCfszjYsGJwrSsFK0AfGCIhxzkGGdmy/OO5ARPAeNC+ZmPg6pnsyoMWp+BGKGdcNRBmhkmULg4ikjnOVri/x0rQJeO4r9wg0WdEu6BiB8kA/G3xGDE5tz221igxRzM5djy+D/Q7NUDgA47OSIw06OSFKpsUzRiDeVa7FUuBcf6O7FfsMwaPXAuzszcTSnBh/dlQRf195/LxmGgXdoOLxDwzHytjug1+lQmZeDkow0lGSkoTwr02yGjWuoWppRcPE8Ci6eb7d5BATCLzIGfpHR8I+MgW9EJCQOXWdGotgm1Fmm2DSqlmbUFOajurAA1YV5qCnMR11ZCQx6fY/GE0mlCIyJR1D8IAQPGISA6LhuZa7oCbvTK/Hylkvt2S4GMYV4UbQBI0Xp2OHsiJdcA1Ag4TvoEp0M8zL/Blc1WYUqergvRswN7/ZxcLWGVK9s33Cd4GsZMPQqxmw7ip3By4bR/SGSpoegvrwFmScr2m1OWnfMu/wE/kxYBZXYKElLlUnxd5kPQrRaPKBQYFXzx0jXx+E97X1IYaNxLKcWMz85ijdvG4QFQwKteh0TikQIihuAoLgBGLPoHui0WlTmZKE4Iw2lly+hIjcbOo2664EAyCvKIa8ox5UTR9oMDAPPwGD4R8XA76rz7BMaDqmj6RR5FNuCOssUm4BlWTTV1qCmuADVBfmoLmz766zEqaXIXFwRFDcQwfEDETRgEHzDoyAU9c/Xvr5Fg3f+uty+iC+SKcfToi1IkJ7F767OeMk5CE1C09FdoUGEudmPw1sZTNh9Ql0wbemAnt0guItaqLNs13AzYrQ7y5zIMjcnM8XO4F4nulgcZ3oIBpMXx6GhWomKXKM8zkPlh1uznsDWAZ9CJyQjuCViMf7j7Yn/83THrc1V+KDxbeSqhmKV7g5kt4bgud9SsONSBd5dmABfl755oBOJxQgemIDggQkAAL1Oh5qiApRnZ6I8KxNl2Zlorqu1bDCWRX1ZCerLSnD56MF2s6uPL3zCIuATGg6fsAh4h0bA3d8fAm7qT8p1hTrLlH7lmlOsqCxHXWkxakuLUVdajLrSErOVmCxBKBbDNyIKAdFx8I+ORUB0HNx8/fo9gsqyLLZcLMPbf12GXKlFDFOKv4m2QOaSik2uznjNwbx8wk/ij0UFz4JVkFN2rt4y3LoiCWJJDy+iVIZB6YALxwm+VvJa10ojy5QOcPXB3ZRhXEMoFmDuE4nY8lEy6suNkWTvphAsL30bf8auRpm6hNdPKRBgo6sLNrq6IElVhKeb3gTbOBBfaRdh32XgXGE9Xpodj7tHhPRKy2zRexCJ4B8VA/+oGAybcxsAoLG2pt15Ls++gtriAuh1ui5GMtJYU43GmmrknT/TbhNJpfAOCYNPaDi8gsPgFRQMz+AQyFzdrf2WKBZCnWVKn2Aw6NFYUwN5RdlVZ7gYNcVFqC4qQN6G76zyGp5BIQiIjoV/dBwComPhHRrWJ3rj7lBQ24JXt6bjeG4t4plCPOG0CXK3Qnzq7Ih6obfZvtHu0XgwbikEe8NQWiwn9gkkBsx9MgGOrt3L5UzAuclRGYZ9w48st00vc7NhUGfZvmEYTuo4EykoLUXmJMb8p5Pw+38vELm8DeUyLHd6G+J5lfgp5wdk1mea7J8qkyJVJoWbZwXmtHyIRYoQ/NlyJ176Q4tNF0rx7sIExPv3b1o3V28fuHr7IH7cJACAXqdFbXERqvJzUZmfg6q8XNSWFHZLOqhTq1GZm43K3GzCLpbJIHB0xt7iHHgHh8IzOAReQaFw9/OHQEgj0X0JdZYpPYZlWTTL69BwVZtVX1GGhspyyMvL0FBVCYPe8qdrcwiEIniHhLVpvMIi4RseAZ+wSIvKSPcXilYtPjuQgx9P5WOU8AQe9NmLS67N+EwiAeDSaT8GDMYFjcP9A+7HKM/R2P1NOkoySUdZ4iCE+9AWuPo49OoYeflRqbNs13CdYE2rDmqllqdZ5jrVFDuDlw2j584y0JaGcP4zQ7DlowtQNRvvERU5CgT85Y8fn1iP1MaLWJ+5HkdLj8JgQvahEArxq6sL4NqAcM2XeLDJAUUVMzD/03o8OC4ST0+LhrtjLwILvUAoEsMvMhp+kdFIxGwAgE6jQU1xAaryclGZl4OqglzUl5V2+x6pVakAlQpXjh/mvKYI7v6B8AwMhrt/ANz9A+DhHwh3/0A4e3jStHZWgDrLFLMY9Ho01dVAUV0FRU0VFFVVkFeWQ15RhoaKcovS7HQHqaMTfMIj4BsWCd+IKPiERcArOOS6R4w7Q6c3YMO5Eny+/xRixL9jVPBlpDkwSGUYAJ1frD2l7lgYcwfuiL0DIS4haG3W4M//S0F1URPRTiITYu6Tg3H+8jErHC2VYVCMmCphXVXQRKT4Amhk2e7hLfDrmQyjI54BTrj16UT88b/zMGiN41fkKrD144uY/8wQfDZtNCpbKrE5ezP+yN6MGlWdybEKJWIUeukAr90Y2boDuZfjMDv5Djw6dRweGBsGqej6R1xFEgkCouMQEB3XbtPrtKgvK0VNcSFqigpQU1SA2uJCtDTIzYxkGr1O1z6Dy3ttsQRufv5w9w/s4ES3/evs5UW10RZCnWU7hzUY0NxQD0V1FRqvOcQd/t9UV9vtVEGWIHFwgFdwKLyCw+AdEtr2/5BQOHt43RDyAJZlsSXtCn489jW0wpNQBSuRwjAAzDugw72TcM/AJZgeOh0SYZsz3VCtxI7P09BQpSTaSmRCzH92CLyCHYHLVjho7udInWW7RiQWwsFVgtZG48KqyrxGoo1QLICDi20+qFL6B971uBcyjI54BjrBe1QrFCluRMXIurIW/P7fC5j3VCL8A/2xYugKPJ70OA6XHMZvmb/gTNX5Tse85CAEHHLBsO9jS7oDtl0ciXvHPo7FIxJt7r4iFInbFvaFRQATp7bblYoG1BQVoqYoH7UlRai7uihQ09qzNT06raZTR1ogFLXJSHx84OrjB1cfX7h6+8Lt6v+dPb2ovOMq1Fm+iWFZFmplC5rratFUX4emuho01bX921xfh8aaKjTWVHdrMUJ3kTo5wSMgCF7BofAIDEZeeSVmL7wDHn7+Nnfx6gqWZVGgKMC6C1twqmAbqkT1YNsVFp2/lwChE26NXYRb4+5EpFsksa/0Sj12f5MOtZL8DGROYtz6dBL8wl2h1Wqtc/xczbJVRqXcyLh4SElnOZ8s5OPiKbvhfqcUK9PLPMvmkLgaMP+ZROz8Ih1KhfF72FSnwu//vYBZjwxC+GBviAVizAybiZlhM1HcWIy/sn/H9qxNKNU1mRyXZRgUO6oAHMN7l49hTao7RofeiqUjFiLGI8amv9OObu4ISxyCsMQh7bZrksfqokIc37cHAe5ukFeUor6sFEpFQ49fy6DXoaGqAg1VFSb3MwIBXLy8253oNofaB65ebY60s6eXTckh+xLqLN+gsCwLVUszmuvr2pzhulo01V/9t6623UHubYYJSxBJpPAICISHfyA8AoPg7h8Ij4AgeAQEwsHFtf3CpNVqUblzJ1y8vG36YtURlU6Fs5VncazkMA7m7UO1vqFtRxfBNmcWmOWZiFuHPYnhQWMhMLFIJu1gKU7+ngsDRwPo7CnFbc8MgYe/lfNvcgNCNLJs97h4yQjpT2U+GVk2JdWg2Bk8GYZ1IsvX8Ax0wh3/HI5tq1KgqDHer7QqPXZ8kYbR8yMxfHYYmKuZLkJdQ/HkiL/jieHPIaXiHLYnf47dtRfRxHR+XFXiBmyrWI9t29fDU+CCaZEzMDl0Gkb5j4Kj2PadPYZh4OLpDZmLG9yLyzB17lyIxW03odbmJtSXlqCurKTN8a0sR0NlBRoqK3otk2QNhvZsHZ0hcXCAs0eb4+zi5d3mRHO2HV3dbnjdNHWWbQydVgtlgxwt7X/1Hf5v/FM2yPs0IsxF4uDQNjXj6w93P/+rznCbQ3wzLSDQG/S4Ir+CsxVncabiNM5XnoPaYFlkV2YwYKLQDbPi7sCUYU9CJja9IE/VosXBHzNRkMrPz+kV5IxbVyTC2aMPdKJcreFN8plReg5v8R7H36B6ZQovdZyVnWUAcPV2wKJ/DseOL9JQXdjhgY0FzmzLR3luA2Y+NBAOLsZ1IAzDYGjgKAwNHIWXdGocTfkWezN/xRGdHEoz17Z6QxM2527B5twtEDNCDPcbgdGBYzDKfxQGeg2ESHBjuUUOzi4Iih+IoPiBhJ1lWSgVDW3riyrbosfyymvOdHmPZR1cNK2tqG8tRX15aadtBEIhnDw82xxpd084unvAyc0dTu4ecOzwr6O7O8QS23xAv7G+FTcgLMtC3dICZaMCrY0KtDY1dvi/Ai0NDVAq5GiWtznAqpbm63KcQrEYrj5+cPP1g9u1f3392m0yZ5cbJhrcHViWRW5DLs5WnsWZijM4X3keTVrTU3umcDIYMEHNYlbwZEwc8zwcvGPMti/OqMOh9VeItEnXiEjyxoyHBkIi66OfJXf69Cb8PCndw6WLhzIXL+os2zsM+k6G0RFHVwkWPj8Uh9ZfQfZZshhVyeV6/Pr2WUy+Lw6RQ3x4fSUiKWaMWIEZI1ZALS/CidMfYU/RARyToNPCTwCgZfU4XXkGpyvbchw7iRwxwn8kRvqPxOiA0Yj1iOXNCt4oMAwDJ3cPOLl7IHhAArGPZVm0Nira1ifVVrf9W1ONxtrq9kiyNRfvG/R6NNXWoKm2psu2EgdHnhNN/usGBxc3OLq6Qixz6De/hDrLvSTv/GmUAG0OcJMCysZGqK7+e8057mlpZmvi4OoGFy/vq9Mibf+6ennD1dcfbr5+cHJzv2miw+ZQ6VTIqMtASnUKUmtSkVqTinpVfbfG8NXpMFGpRqIkGnMmPg2H+LlAFyuK1Uotjm/OxZWTprVhI+aGY9StEe1TjX0BNz/qzfjwQ+keXUWO+2SGg/L/7d17dJTVufjx7zv3TJKZZHIl5AK5cREJF6EgIlas0FZrg1TraVXUrv48WtHFWetcKhxRyqLVwmmLVs7RWhRbsTdYRREtaqlCASFCIpBIEkjI/X6d+7z798ckgZAJ5EZmAvuz1rvmnf3ObB42wzvP7He/e48tF38vDHPquEvRGbTc9tBUYsZHcHBnSa9ObHubm/e2FJB1QzwLv5tNWETg2YaM0Wnc+vVfcauq4jz9N97/xy845ixiv9lI9WVWb+302tlXsY99Ff4lqq1GKzPiZpATl0NOXA7TYqeNiWEbl6Moir8n1xrFuKxJfY4LIXC0t/kXEOu6t6knma6rpb2pEWfHwDuVBsPtsON22Gmurrzsa7U6HWGRFsIs1t6PkRbMFithFgthkVacI5CDyWR5mP72v7/CpA/u3eJma1SvJDgyJpZIWwyRMXFExMQSEW1DZwjOnJPBJISgoqOCLxq+6EmOi5qK8IrBDV/RCMEMl4uFdieJznEkZn6HnHseQh956UVGumMoyavn0z98SWeru89xU4Se2x6aStp1MYOKaUgu/pK7Bn4cSZd2uaWsZc+ydKXHLF9MURRmLUkjYaKFD35zoteNfwCnj9RRUdTMgrszyZ6b2H8Hg0aDadIS7pq0hDs6m8n/2xtUndpOrbGCT80mPjcZ8V6mw6DV1doredYqWrKjs5keN50Z8TO4PvZ6UiJTxmzvc38URcFssWK2WElIzwz4Go/bRWdTE+1NDf57p7q288+b6GhuvGJXIsA/ZV5HcxMdzZfu8HKOwE3yMlkOUVqdDnNU9PnxPVFRhEfZei6rhEdHd12SiEYX5GQ9FPhUH2XtZZxqPOXfmvxbu3tov34nuD3MdTqZ43QR2ZnA2YjbSL75fm66YRbaAfb+1pxpZf8fi/vMMNBt/KRoblsxZfR67y4esyx7lq95l0uGL14SW7oGXXyaGIF5lgdifHY09z49l4/eOEXZF73nWHa0e9i79RT5H1ewYHkWSVlRl6xLGx7NzG8/Sc63VvKPvHz0+7bxcM1HOM1VHDaZOBxmpGQAHUo+4ev5bnm76G0AIvQRTLZNZkrMFKbYpjA1ZioTLBPQXuXzF+sNxp4FUPqjqj7sra29Eml7a4v/vquLHn0jNOvTlSKT5VGkMxr9lwa6xtuEWayEWaxEREUTHn1BIhxlwxgeLi+TByCEoNHZSHFLMSUtJRS3FFPcXExRcxEO79BvWEjyeJnrdDLX4WSWw0OpZxJ7xRz2Z3+d5V/9Ct9LiRpwXU3VnRzZfZbTn9UGPK43aVlwdyZTb0oa3X/ji3uEruCQD2lsMIXr0ek1eD2BE6CIKNmzfK3rs9z1FewpvJjZYuCbj0/ny0M1fPKH032m2Kwra2fHxjzSZ8Qx546JxCZHXLI+jUbhlhty4IYcTla18ed9n+E+uZun+Ixs/UmOh+k51JU8nxtgJ1SHp4MjtUc4csH8zyatiezobDKjM8mMyiQjKoPMqEziwuKuqe91jUZLRLSNiGgbZPR/P48QArfD3jV5QQudrc0993Nd+NjZ2oyjrTUoibVMlofJGp9ITFw8YZYLEuCe8TLWC8bNWNAb5RfPQKlCpc5eR1lbGaWtpT2JcUlLCS2ulmHVrRGCbLeHHJeLHKeLmS4XZk8Y+9Vp7PXN4mXrjXxz4RQen51MvGXg/2a1Z9vI21NG6fH6vtO0dUm9zsYt35sclFkG+syzfJVdOpQGT1EUImymPgviAJitBrR6+Rm55vUZhjG6f7yiKEyaN47kKTb2/b4o4CxCpcfqKT1WT9r1Mcxeksa4zKjL1js1ycLU+xbT1LmQ7Z+V8/zhItKb/8libR6Pawrw6jr53GTkuNHIMZOBIoMB3wATXafPSX5DPvkN+b3KIw2RZEVlkRGVQUZUBunWdNIsaSSGJ151QzkGQ1EUjOZwjOZwbEnJl3ytEAKPy4njgvvC7F2P55+39XruvMwwjYGQyfIw3bd+EzExozDe9CqkCpV6ez3l7eWUtZVR3lbes3+u/RwuX98ZI4YiyudjustNjtPFDJeLaS43WlXLUTWbT9TpbFGvp1g7kSXXJfHdOSnMS49BM8BeV59HpfR4PSf+UUnlly39vs4Sa+LGZZmkzwxiz8LFPcvXUA+H1L9ImzFgsiynjZOAvueJURqGcbFwq5GvP3o9ZQWNHPhLMc01fT+zZQWNlBU0kphuZdrNSWTMikdnuPRwCFu4gcduyeRfF2VwpGw+fzxyjjX5laTYz3KTs4CFmgJWaQpRNV5OGA0cMxo5bjSQbzLSNMjV7drd7eTV5ZFXl9er3KAxkBKZQqolldTIVFItqaRZ0kizpBFvjr+mE+mLKYqCwRSGwRSGNT5hQO+pr6tj9Y6BvbY/MlmWrhiP6qG2s5bqzmqqOqqoaKvgsP0wf/3wr1Tbq6nprMEzwDmMByre62WK28MUl5spbjdTXW4SfD7cQs8xkcFhdRIvqZM5rE7CowljYVYsD09P4mvXJWAxDeyymxCCpqpOCg/WUPjPapwd/f8dDGE6bvjGBKbfkhz8Xro+wzDkCVjqPymWybIE9B2uNYrDMC6mKAoTpseScp2Nk59UcXjXGZydfc+/NaWt1JS28skfTjNpXiKT540jNiXikh0ViqIwZ4KNORNsrP3WdbxXUMPugjm8froBxePkBk0Rcz1FzOks5HuaYkyKm3qtllMGPSeNBk4aDJwyGqi9zIwbgbhVNyWtJZS0lvQ5ptPoSDQnkhSRRII5gQ5HB54SDynWFJLCk0gMT0SvlfctXcpILNktk2Vp0FSh0uJqod5eT72jvuexzl7Xe99RjxqoFyLwUN5BMakqEz1eMt0eMj1uMt0eprrdxPr8f169sHBcncxWdRKH1cl8ISbiRo9Bq+ErGTaeuX4cS65LJDp8YLOECCForOygJK+e4qN1AXviLmQI03H9LePJWZzS7xRHo63vMAzZsywFWJjkMuXStaXPmOUrPBvGQGi1Gq6/JZnsuQnkf1xB/kcVAZNml91L/kf+45ZYE5mzE8iYFUdc6qXXDTAbdNw9O5m7ZyfT5vSw92QtuwtSeOnLHNxeFT1erlPOMkdTyBx3EXfbS/hXxT88pFGjodBo4LReT7HBv5Xq9TiG2DnhVb1UdFRQ0XF+0Y+PD33cs6+gEBcWR7w5njhzHHFhccSZu5537ceFxRFtipY91MMQ0smyw+Fgw4YNbN++nfLycmw2G0uXLmXdunWMHz9+UHU1Nzezdu1adu7cSU1NDYmJieTm5rJ27VqioqKuzF9gjBBC4PA6aHI20eJqocnZRLOz2b+5mnv2m5xN/uTYUY9XHZ3VA8NUlVSPlwyPpysx9j8meb10/1ZsEhEUqNm8JdIpUCeSr6ZTg43u27hjIwzcNSmexVPiuSkrjgjjwD72jnY3FUXNlJ9s4tyJxoBTv/WJ12JgxuIUpt08HkNYiP33unjqOJksS8ieZekyLj5PXMF5lgfLaNYz55sTmXFbKic/reLY3vKACz4BtDU4yXu/jLz3ywizGEidYiNlqo2UKTbMlv47NCwmPctmJbNsVjIdLi/7ixv4e1Edfy+K4JXWTF7x3QFAPM1M15RyvaaU6Z5S7tCcIVbxj5VVgSqdlpKeBNpAiV5PmV53ydUGB0IgqHPUUeeog8b+X6dTdMSExRBvjsdmshFtiibaGO1/NEVjM9mIMkb17Jt1ZtmhcoEQ+zY/z+l0cuutt3Lw4EHGjRvHXXfdxdmzZ/ntb3/LO++8w8GDB0lPTx9QXQ0NDcyfP5/i4mLS09P59re/zYkTJ/jlL3/Je++9xz//+U9sNtsV/htdeW6fmzZ3G23uNtrd7bS722lzde172v3HXOePtbhaepLhkRofPBRhqkqKx0uq10uqx0Na136ax0OsT+2ZucgrNJSKcRwXKfxRTaZIJHNCTKRCxHLh/EZhei03T7RxY0YMN2bEMC3JetkxyD6fSlNlJzWlrdSeaaOmtJXW+oHPrjE+O4qpC5NInxGHTh+iUwbJYRhSAP31IMtp4yQgwA1+wRuG0R+9UUvO4hSmLRrP2fwGTnxSyblTzf2+3tHmpuhQDUWHagD/PSUJE60kpltITLcSMz4Cra7v+THCqGPJdYksuS4RIQRFte38vaieg6WNHDmrY68rmr3q7K5XC5JoZJrmDFlKJZN858h2VzDfXoVBae96hb8nurwrcS7X6yjT6ynX+feH2hsdiFd4qbXXUmsf2KVdg8ZAlCmqJ4mONERiMViwGCxEGiJ7nl+4bzH6nxu1V9+5I2ST5Z/85CccPHiQ+fPn88EHHxAR4Z8SZtOmTfzbv/0bDz/8MH//+98HVNdTTz1FcXExy5Yt4+2330bXNaZo5cqVbN68mVWrVrF169Yr9DcJTBUqTq8Tl8+F0+vE7rXT6ek8/+ix+7cLyrvLOr3+452eThxeBx2eDtrd7UFNeC/FrKokeb2M8/q6Hr0keX09j3E+X6+pPDuEiTKRyCGRyBkxji/VZL4UyZwR43DTd2yWLdzAjJQoZqZEMS8jhpzkKAwBTnQAXreP9iYnLXUOmqo6aKzspKmqk+baTlTv4HpMIqKNZN2QwJQF44hODB/Ue4Pi4i85OXWcBMSMD0ejUVAv6jGMucw0XNK1oU/vYggMw+iPVqchY1Y8GbPiaa13cGp/FaeP1NLWcOmlm9sanLQ1OHum+9RoFKISzdiSwolJCseWFEFUvJnIWBP6rhsGFUVhcqKFyYkWHl2Ugdencqq6nUNnGjl8ponDZ5uossdSpcbyAXOgaxE5HV4mKDVMUirI1pwjXalmgreG25y1RCrn5+QXQKNWQ5VOR5VOR7VO2/Woo1KnpVqno/MKdni4VTd19jrq7HWDfq9BY8BitBCuD8esM2PWm3v2w/XhhOnC/M/1ZsJ1/sfu111YFqYLw6g1YtKZgj6EJCSTZbfbzYsvvgjASy+91JMoA6xatYrXX3+dffv2cfToUWbPnt1fNQBUV1fz1ltvYTAY+PWvf92TKAO88MILbN++nTfffJPnn3+e+Pj4Qcf6asGraMO1OH3+xNfldfXsdyfDF+47fU6cXueI39gWDIoQRKsq8V4fsT4f8T4fcV3Jb6zvfDJsVdU+89o3ikiqRDyHRSJnRQJnRSJn1QTOinE0YKHvTPh+Rp2GyYmRzEyN9ifIqVGk2vzLj7rsXhztbhrOtGJv8+Bod9PR7KK90UFbo5O2RieOtssPpbiUSJuJ9FlxZM6KJ2GiZUxdppJjlqVAwiIM3Hh3Jgf/WorX5UPRCGZ/YwKWmLBghyaFgovOE6M5z/JwWOPCmPftDL5yVzr15e2U5NVRnFdP2wCuGKqq/ybupqpOii86ZrYYsMSasMSGERFtxGwxYrYYMFsMJFsNZM9O4ZGbJiIEnG3spKCylYKKVvIrWzlR2UqnG4pFMsUimXfVeRfULIihjTSllglKDRM0NUzw1ZLmqWW20kCc0tYrDgG0aRSquxLoeq32/KbzP9ZptTRpNYhRPte7VTcNjgYaHH2n+RsqvUaPSWvCqDP6E+iufZPWhFFr7LVv0nWVde17O4c/bDQkk+X9+/fT2tpKRkYGM2fO7HN8+fLl5Ofns2vXrssmy3v27EFVVRYuXEhCQu+pQ4xGI3feeSevvfYau3fvZsWKFYOO9fXC19GGhehl9yHQCYHN5yPapxKlqhfs+7D5VOIuSIhjfL4A/bz+nuEqNYZqEcMxEUeNiKVaxFArYqgTNupFNG6MaARo8H8IdQJ0QiESiBagQyE2zMD4SBOJEQbiwgxE6XWYUPA4vbjPuHGdrOZTxzncDi/OTg+qb+R7PHQGDUlZ0aROtZF6nY2ohDE8jqvPmGU5DEPyy1mcwrRbxmNvd/Lhx39j1pLUYIckhYqLey9DaMzyQCiKQnyahfg0C/NzM2mps3PuZBPlJ5uoLGrG4/INqj57mxt7m5ua0rZ+X6PRKBjMOoxhOoxmHdPNOm4Ii8SQFYUDQaPTQ5PTQ53dRU2nm9pOF24h8CjR1CnRVDGJTxR/Z7TatekUN3FKMwlKI4lKI+OUBhKVBpLc9aRoGpmltGBVOvrE4gUatVoatFrqupLoZq2GZo0/kW7Wamjp2dfiCdHvN4/qwaN6aPcMflVen2Nw/8aBhGSyfPz4cQBmzZoV8Hh3eX5+fsDjg63rtddeG1Bdgfy/fzxJmKG7BybQh6yrLOAH8HyZCFB2vkjp/1iAMoHSVar4a1YUFHHxy5Ve71R6H+xVV/crWoFWFIq741WUrrgVBBqEogGl+/H8DwgjMKFrGxQ70OgEnLQALYN9/xCYvS1EeWuI8vi3SG8jmgoBH0Mz/m3UCUFqaxvntm4d1k15astFy27LMcvSBbRaDaZwvfwNJfV20XCthi1baP7D28Ovd4TOa0MRDkwBJqPQrrXRok/s2Tp10cOuX1UFzg7PJacV1QPjuzYYyIxJJsACpPWUNHVtXwgVhIoiVBTUnqEyCgL/t7RAEV2PCLRCEIsgjvNl/i9zccHW9VyIrmfnsxTRz+PwV6wZ3vuVft7vcDt4kn8dVt0hmSyXl5cDkJwceCWX7vKysrJRq8vlcuFynR8T3NrqTzzaNVF4NObLxhFUop/9kay35/fv2KB3d2B21BLeWYPZXkOEvRpzZy167/kp4bwEKTnuR+O5ka3P5fHQ2HiJ26fHMI/Hg91up7GxEf0Al62VZLsNxdXeZm0eDy7fBT1zlZX+bYSM9HltKMK7tvGARxeG3ZxAp3kcdnMiHeEJ2M2JeAyRQY5yOBT6G9rY6yWDKR9DnBr/9/pwpj0MyWS5o8N/KcFsDpyEhof7b6Zqb798d/xI1bVhwwaeffbZPuVrfvfdy8YgSSGp+DTExgY7CkmSJEm64hobG7FarUN6b0gmy6Hov/7rv1i1alXP85aWFtLS0igvLx9y419r2traSElJ4dy5c1gslmCHM2bIdhs82WZDI9tt8GSbDY1st8GTbTY0ra2tpKamDmuK4JBMlrtnv7DbA6+S1tnZCUBk5OUvi4xUXUajEaOx79yBVqtVfmgHyWKxyDYbAtlugyfbbGhkuw2ebLOhke02eLLNhkYzjPt0QvJWjtRU/53YFRUVAY93l6elpQU8fqXqkiRJkiRJkq4tIZks5+TkAJCXlxfweHf59OnTR7UuSZIkSZIk6doSksnyggULsFqtlJSUcOzYsT7H//SnPwFw5513XraupUuXotFo+OSTT6ir670SjcvlYteuXWi1Wr7xjW8MKkaj0cgzzzwTcGiGFJhss6GR7TZ4ss2GRrbb4Mk2GxrZboMn22xoRqLdFDGcuTSuoNWrV7N+/XpuvPFGPvjgg55ZK7qXu160aFGv5a5ffPFFXnzxRXJzc9mwYUOvur7//e/zu9/9jrvvvpvt27f3rOL35JNP8qtf/YoHH3xw1Je7liRJkiRJkkJfSN7gB/5kee/evRw4cICsrCwWLlxIWVkZhw4dIi4ujtdee63X6xsaGigqKqK6urpPXb/4xS84ePAgf/7zn5k8eTI33HADJ06c4IsvviArK4tNmzaN1l9LkiRJkiRJGkNCchgGgMlk4uOPP2bNmjWYzWZ27txJWVkZK1asIC8vj/T09AHXFRsby+HDh3niiSdwu93s2LGD1tZWVq5cyeHDh4c1nYgkSZIkSZJ09QrZYRiSJEmSJEmSFGwh27MsSZIkSZIkScEmk+UrZN26dSiKgqIovPnmm8EOJyTl5+fzox/9iHnz5pGUlITRaMRqtTJ//nw2b96Mx+MJdoghp7CwkJ/97Gd89atfJTY2Fr1eT2JiIsuWLeOTTz4Jdnghq7Ozk23btvHEE0/wla98BaPRiKIorF27NtihBZ3D4eC///u/yc7OxmQykZSUxMMPP0xlZWWwQwtZR48e5ac//SnLli0jOTm551wvBWa329m5cyePPPIIkyZNwmQyER4eTk5ODs899xwdHR3BDjFkbdq0iWXLlpGVlYXVasVoNJKWlsYDDzxAQUFBsMMbExobG4mPj0dRFDIzM4dWiZBGXGFhoTAajUJRFAGIbdu2BTukkLR582YBiLS0NLF48WLx3e9+VyxevFiYTCYBiEWLFgmXyxXsMEPK+PHjBSAiIiLEbbfdJu655x4xbdo0AQhFUcT//M//BDvEkPT5558LoM/2zDPPBDu0oHI4HGLevHkCEOPGjRP33HOPmDt3rgBEXFycKCkpCXaIIemuu+4K+HmSAnvllVd62mjKlCniO9/5jliyZImIjIwUgJg8ebKora0NdpghKSYmRphMJjF37lyRm5srcnNzRXZ2tgCEXq8Xu3btCnaIIe/BBx/syccyMjKGVIf83z3CVFUVN998s0hISOg5ocpkObCSkpKAX8Y1NTU9CeDmzZuDEFnoWrx4sXjjjTeEw+HoVb5lyxYBCK1WK06cOBGk6EJXcXGxeOSRR8SWLVvE0aNHxXPPPSeTZSHE008/LQAxf/580d7e3lO+cePGnh+sUl8//elPxZo1a8Rf//pXUV1dLYxGo0yWL2Hr1q3ihz/8oTh58mSv8qqqKjFz5kwBiPvuuy9I0YW2Tz/9tM/5XgghXnrpJQGIhIQE4fF4ghDZ2LB3714BiB/+8IcyWQ4l//d//ycA8eabb4oHH3xQJstDtG3bNgGI3NzcYIcyZtx+++0CEGvXrg12KCFvw4YN13yy7HK5hNVqFYDIy8vrc3z69OkCEEeOHAlCdGOLTJaH7sCBAwIQRqNRXkkcpIyMDAGI48ePBzuUkGS320VGRoaYOnWq+PLLL4eVLMsxyyOopqaGf//3f2fx4sV873vfC3Y4Y5perwfAYDAEOZKxo3tp96qqqiBHIo0F+/fvp7W1lYyMDGbOnNnn+PLlywHYtWvXaIcmXUO6z1sul4vGxsYgRzO2yO/JS3v22WcpLS1ly5YtPW01VDJZHkErV67E4XDw8ssvBzuUMa25uZmNGzcC8M1vfjPI0YwdpaWlACQmJgY5EmksOH78OACzZs0KeLy7PD8/f9Rikq493ectvV4v1zwYhG3btlFUVERWVhZZWVnBDifk5Ofns3HjRh566CEWLlw47PpCdgW/seadd97hj3/8I88++6z84A7S6dOnWb9+PaqqUltby4EDB+jo6ODRRx+VPfQDVFJSwjvvvAPAt771rSBHI40F5eXlACQnJwc83l1eVlY2ajFJ155f/vKXACxduhSj0RjkaELXCy+8wIkTJ+js7OTUqVOcOHGCpKQk3nrrLbRabbDDCymqqvKDH/yAqKgonn/++RGpUybLI6Cjo4PHHnuM7Oxs/uM//iPY4Yw5tbW1vP76673KVq5cybp169Bo5MWPy/F6vaxYsQKXy8W9997L7Nmzgx2SNAZ0T9dlNpsDHg8PDwegvb191GKSri27d+/mN7/5DXq9nnXr1gU7nJD2/vvv8+GHH/Y8T0tL44033pDn+wA2b97MZ599xm9/+1tiYmJGpE6ZLAO5ubmcOnVqUO954403mDt3LgA//vGPOXfuHB9++OE19ct4uO3W7aabbkIIgc/no7y8nB07dvDss8/y3nvv8cEHHzBhwoQRjDq4RqrNLrRy5Uo+/fRT0tPT+fWvfz3cEEPSlWg3SZKCp7CwkO9///sIIXjhhRd6xi5Lge3duxeAlpYWCgoKeO6551i0aBE/+clPePrpp4McXegoLy9n9erVLFq0iBUrVoxYvTJZBs6cOUNRUdGg3mO32wE4fPgwL730Evfffz+33nrrlQgvZA2n3QLRarVMnDiRVatWMWHCBO6++26eeOKJq+oGo5Fus/Xr1/Pyyy+TkJDA+++/f9WO+RvpdpMgIiIC6L+dOjs7AYiMjBy1mKRrQ2VlJUuXLqW5uZlVq1bx5JNPBjukMSMqKoqFCxeye/du5s+fz5o1a7j99tuZM2dOsEMLCY8//jhut5stW7aMaL0yWQaOHTs25Pfu3r0bVVUpKCjglltu6XWssLAQ8Cc0r776KkuXLuU///M/hxFpaBlOu11Obm4uERER7NmzB7fbfdXc7TuSbbZlyxZWr16N1Wplz549Q1+ZaAy4kp+1a1VqaioAFRUVAY93l6elpY1aTNLVr6mpidtvv52ysjIeeughfv7znwc7pDFJr9dz7733cvToUXbt2iWT5S7vvPMOUVFRPProo73KnU4n4P+h1p2rbd++fcA3xMtkeYRc6su8sLCQwsLCq2o4wZWmKAo2m43y8nKam5tJSEgIdkghZfv27Tz++OOYzWbeffddZsyYEeyQpDGm+7J3Xl5ewOPd5dOnTx+1mKSrW0dHB1//+tc5efIky5Yt45VXXpHLhA9DbGwsAPX19UGOJLS0tLSwb9++gMecTmfPse4EeiDk3VPDtHbtWoR/cZc+24MPPgj4p3gRQrB169bgBjuGlJaWcu7cOSwWS88JQfLbvXs3DzzwADqdjh07drBgwYJghySNQQsWLMBqtVJSUhLwx/6f/vQnAO68885Rjky6GrlcLu666y4OHz7MkiVL5CwOI6A76cvIyAhyJKGjv3zszJkzgL+tussG04Epk2UpaDZv3kxNTU2f8qKiIv7lX/4FIQQPPPCAPKFeYP/+/SxfvhwhBG+//Ta33357sEOSxiiDwcCPfvQjwD/Or3uMMsCmTZvIz89n0aJF8m57adh8Ph/33XcfH330EQsXLuQvf/nLVTO07krav38/e/bsQVXVXuUej4fNmzezbds2wsLCuPfee4MU4bVDDsOQgmbjxo089dRT5OTkkJmZiRCCsrIyjh49iqqq3HzzzWzYsCHYYYaUO+64A4fDwcSJE9m5cyc7d+7s85qbbrqJH/zgB6MfXIjLzc2luroaOL/K4auvvsqePXsAGDduHDt27AhafMGwevVq9u7dy4EDB8jKymLhwoWUlZVx6NAh4uLieO2114IdYkh69913e0115na7AZg3b15P2Zo1a+SiSl1efPHFnv9bsbGxPPbYYwFf9/Of/1xeSbzA6dOneeihh4iNjWX27NnExMTQ0NBAQUEB1dXVmEwmtm7dSkpKSrBDverJZFkKmvXr17N7926OHDnC+++/j8PhwGaz8bWvfY377ruP+++/X86zfJGWlhbAPztE92WlQGSy3Nfnn3/eZ4GNyspKKisrgWvzRjaTycTHH3/Mhg0b+P3vf8/OnTux2WysWLGCdevW9btgybWuvr6eQ4cO9Sm/sEyOIz2vubm5Z/9SP0jXrl0rk+ULLFq0iB//+Mfs27eP/Px8GhoaMBgMTJgwgeXLl7Ny5cqr+sbuUKIIIUSwg5AkSZIkSZKkUCS77SRJkiRJkiSpHzJZliRJkiRJkqR+yGRZkiRJkiRJkvohk2VJkiRJkiRJ6odMliVJkiRJkiSpHzJZliRJkiRJkqR+yGRZkiRJkiRJkvohk2VJkiRJkiRJ6odMliVJkiRJkiSpHzJZliRJkiRJkqR+yGRZkiRJkiRJkvohk2VJkiRJkiRJ6sf/B/Dle2dLRI1RAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "samplers = [\n", + " stats.norm(),\n", + " stats.logistic(scale=np.sqrt(3)/ np.pi),\n", + " stats.laplace(scale=1/np.sqrt(2)),\n", + " stats.uniform(loc=-np.sqrt(3), scale=np.sqrt(3)*2),\n", + " stats.dweibull(2.0, scale=1),\n", + " stats.cauchy(0, scale=1),\n", + "]\n", + "\n", + "\n", + "labels=[\"Gaussian\", \"Logistic\", \"Laplace\", \"Uniform\", \"dWeibull\", \"Cauchy\",]\n", + "\n", + "x = np.linspace(-4, 4, 500)\n", + "for l in labels:\n", + " plt.figure(figsize=(8, 6))\n", + " for sampler, name in zip(samplers, labels):\n", + " y = sampler.pdf(x)\n", + " plt.plot(x, y, label=name, linewidth=3)\n", + " if name == l:\n", + " break\n", + " \n", + " plt.legend()\n", + " plt.grid()\n", + " plt.ylim(0, .7)\n", + " plt.xlim(-4, 4)\n", + " \n", + " plt.ylabel(\"Probability Density\")\n", + " plt.savefig(f\"figures/pdf_{l}.png\")\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": "venv", + "display_name": "venv (3.10.12)", "language": "python", "name": "python3" }, diff --git a/scripts/matrix/bbob_matrix_adaptation.png b/scripts/matrix/bbob_matrix_adaptation.png new file mode 100644 index 0000000..323024a Binary files /dev/null and b/scripts/matrix/bbob_matrix_adaptation.png differ diff --git a/scripts/matrix/get_data.py b/scripts/matrix/get_data.py new file mode 100644 index 0000000..e0860e1 --- /dev/null +++ b/scripts/matrix/get_data.py @@ -0,0 +1,195 @@ +from time import perf_counter +import warnings +import numpy as np +import modcma.c_maes as modcma +import ioh +import pandas as pd + +from multiprocessing import Pool, Process +import cma as pycma + +from pprint import pprint + +np.random.seed(12) + +DIMS = 2, 3, 5, 10, 20, 40, #100 +FUNCTIONS = list(range(1, 25)) +N_REPEATS = 100 +BUDGET = 100_000 +ROOT = "data" + + +def ert(runs, target = 1e-8): + total_evals = 0 + n_suc = 0 + for row in runs: + total_evals += row['evals'] + if row['best_y'] <= target: + n_suc += 1 + + if n_suc <= 0: + return float("inf") + return total_evals / n_suc + +def run_modma(problem: ioh.ProblemType, + x0: np.ndarray, + logger_obj, + module_name, option + ): + + modules = make_modules(module_name, option) + # modules = modcma.parameters.Modules() + # modules.matrix_adaptation = matrix_adaptation + # modules.ssa = modcma.options.StepSizeAdaptation.CSA + # modules.restart_strategy = modcma.options.RestartStrategy.STOP + + settings = modcma.Settings( + problem.meta_data.n_variables, + x0=x0, + modules=modules, + lb=problem.bounds.lb, + ub=problem.bounds.ub, + verbose=True, + sigma0=2.0, + target=problem.optimum.y + 1e-8, + budget=problem.meta_data.n_variables * BUDGET, + ) + + cma = modcma.ModularCMAES(settings) + start = perf_counter() + while not cma.break_conditions(): + if cma.p.criteria.any(): + logger_obj.update(cma.p.criteria.items) + cma.step(problem) + + if cma.p.criteria.any(): + logger_obj.update(cma.p.criteria.items) + + stop = perf_counter() + elapsed = stop - start + return elapsed, cma.p.stats.t, problem.state.evaluations, cma.p.stats.n_updates + + +class RestartCollector: + def __init__(self, strategy = modcma.options.RestartStrategy.STOP): + modules = modcma.parameters.Modules() + modules.restart_strategy = strategy + settings = modcma.Settings( + 2, + modules=modules, + ) + cma = modcma.ModularCMAES(settings) + self.names = [x.name for x in cma.p.criteria.items] + self.reset() + + def update(self, items): + for item in items: + if item.met: + setattr(self, item.name, getattr(self, item.name) + 1) + + + def reset(self): + for item in self.names: + setattr(self, item, 0) + +def collect(name, module_name, option): + logger = ioh.logger.Analyzer( + folder_name=name, + algorithm_name=name, + root=ROOT + ) + collector = RestartCollector() + logger.add_run_attributes(collector, collector.names) + for fid in FUNCTIONS: + for d in DIMS: + problem = ioh.get_problem(fid, 1, d) + problem.attach_logger(logger) + runs = [] + for i in range(N_REPEATS): + modcma.utils.set_seed(21 + fid * d * i) + collector.reset() + run_modma(problem, np.zeros(d), collector, module_name, option) + # print(name, fid, d, problem.state.current_best_internal.y, problem.state.evaluations) + runs.append(dict(evals=problem.state.evaluations, best_y=problem.state.current_best_internal.y)) + problem.reset() + print(name, fid, d, "ert:", ert(runs)) + + +def make_modules(module_name, option): + modules = modcma.parameters.Modules() + modules.restart_strategy = modcma.options.RestartStrategy.STOP + setattr(modules, module_name, option) + return modules + +def collect_modcma(): + options = modcma.options.MatrixAdaptationType.__members__ + del options['COVARIANCE_NO_EIGV'] + + with Pool(6) as p: + p.starmap(collect, options.items()) + + +def run_pycma(problem: ioh.ProblemType, x0: np.ndarray): + options = pycma.CMAOptions() + options['CMA_active'] = False + options["verbose"] = -1 + options["CMA_diagonal"] = False + options['conditioncov_alleviate'] = False + options['ftarget'] = problem.optimum.y + 1e-8 + options['maxfevals'] = problem.meta_data.n_variables * BUDGET + + cma = pycma.CMAEvolutionStrategy(x0, 2.0, options=options) + settings = modcma.Settings(problem.meta_data.n_variables) + assert settings.lambda0 == cma.sp.popsize + start = perf_counter() + + target = problem.optimum.y + 1e-8 + budget = problem.meta_data.n_variables * BUDGET + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + while problem.state.evaluations < budget: + X, y = cma.ask_and_eval(problem) + cma.tell(X, y) + + if problem.state.current_best.y <= target: + break + if cma.stop(): + break + + + stop = perf_counter() + elapsed = stop - start + + return elapsed, cma.countiter, problem.state.evaluations, cma.sm.count_eigen + +def collect_pycma(): + logger = ioh.logger.Analyzer( + folder_name="pycma", + algorithm_name="pycma", + root=ROOT + ) + for fid in FUNCTIONS: + for d in DIMS: + problem = ioh.get_problem(fid, 1, d) + problem.attach_logger(logger) + for i in range(N_REPEATS): + np.random.seed(21 + fid * d * i) + run_pycma(problem, np.zeros(d)) + print("pycma", fid, d, problem.state.current_best_internal.y, problem.state.evaluations) + problem.reset() + + + +if __name__ == "__main__": + # p1 = Process(target=collect_modcma) + # p2 = Process(target=collect_pycma) + + # p1.start() + # p2.start() + # p1.join() + # p2.join() + # collect_modcma() + + mods = modcma.parameters.Modules() + \ No newline at end of file diff --git a/scripts/matrix/plots.ipynb b/scripts/matrix/plots.ipynb new file mode 100644 index 0000000..6b11c0c --- /dev/null +++ b/scripts/matrix/plots.ipynb @@ -0,0 +1,394 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "id": "04945a7e", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd \n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "5411b5b2", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABQkAAAJOCAYAAAAUBIGvAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs3Xd4VNXWwOHfmZpMegMSCCSUUBQUEAQBaVJEiiiKYAPlqldQruUq9npV7C0iflJVVKQoIAIKAtKRYiF0EggQSK+TTD3fHyFjQhJSmGSSsN7nmefemdPWkcnMmr3X3ltRVVVFCCGEEEIIIYQQQghxydJ4OgAhhBBCCCGEEEIIIYRnSSOhEEIIIYQQQgghhBCXOGkkFEIIIYQQQgghhBDiEieNhEIIIYQQQgghhBBCXOKkkVAIIYQQQgghhBBCiEucNBIKIYQQQgghhBBCCHGJk0ZCIYQQQgghhBBCCCEucdJIKIQQQgghhBBCCCHEJU7n6QBExZxOJ6dPn8bPzw9FUTwdjhBCCCHqGVVVycnJISIiAo1G+ojrG8kFhRBCCHExKpsLSiNhPXD69GkiIyM9HYYQQggh6rnExESaNWvm6TBEFUkuKIQQQgh3qCgXlEbCesDPzw8o/Mf09/f3cDSiIbDZbKxZs4bBgwej1+s9HY4Q1SbvZdFQ1PR7OTs7m8jISFdOIeoXyQWFu8n3p2go5L0sGoq6kgtKI2EdFhsbS2xsLA6HAwB/f39JDIVb2Gw2TCYT/v7+8mUq6jV5L4uGorbeyzJUtX6RXFDUFPn+FA2FvJdFQ1FXckGZlKYOmzx5MnFxcezcudPToQghhBBCiFomuaAQQgghapM0EgohhBBCCCGEEEIIcYmTRkIhhBBCCCGEEEIIIS5x0kgohBBCCCFEHRQbG0uHDh3o1q2bp0MRQgghxCVAGgmFEEIIIYSog2ROQiGEEELUJmkkFEIIIYQQQgghhBDiEieNhEIIIYQQQgghhBBCXOJ0ng5A1CxVVbFarTgcDk+HIuoQm82GXq/HbDaj1+s9HY6oJVqtFoPBgKIong5FCCFELZFcUJRFcsFLk16vl39vIcQFSSNhA+VwOEhKSiIjIwOr1erpcEQd1LhxY44cOeLpMEQtMxgMBAUFER4ejlar9XQ4QgghaojkgqIikgtemvz8/IiIiMDX19fToQgh6iBpJGyAHA4Hhw8fJj8/n5CQEAICAtDpdFI9JMQlTFVV7HY7WVlZpKSkkJubS5s2baShUAgh6rDY2FhiY2OrXAUouaAQ4nyqqlJQUMDZs2c5cuQI7du3x2g0ejosIUQdI42EDVBSUhL5+fnExMTg4+Pj6XCEEHVIQEAAISEhHDp0iCNHjhATEyM/GoUQoo6aPHkykydPJjs7m4CAgEofJ7mgEKIsPj4+BAQEEBcXx759+4iOjiYoKMjTYQkh6hBZuKQOi42NpUOHDnTr1q3Sx6iqSkZGBiEhIZIUCiHK5OPjQ3BwMOnp6ezcuRNVVT0dkhCXPHN2IsH6OMzZiZ4ORdRzkgsKIS5Ep9MRGhqKzWbj66+/liHnQtQRuYeP4PfLr+Qe9uzfpDQS1mGTJ08mLi6OnTt3VvoYq9WK1WqtUm+zEOLSExgYiMFgYOfOnZIcCuFhqqqSenIrXtosUk9ulYZ7cVEkFxRCVMTX1xedrnBQ4Zo1a0hLS/NwREJc2lRV5ezKVeiTznB25SqP5oLSSNjAFM1ZU/ShL4QQZSn6jFBVlaNHj3o4GiEubXlZx8nLTMCpasnLTCAv67inQxL1mOSCQoiKFM1J3ahRIzIzMzl+XL53hPCknIOHyNl/AFWvJ2f/AXIOHvJYLNJI2EB5Yo6x9bsSWbU1gfW7ZKiUEHVd0WeEyWTi+PHjOJ1OD0ckxKVJVVVSTmxBVe040aOq9nPPpZpQXByZb1YIUZ6izwdFUdDpdJw+fdrDEQlx6VJVlaSVP+G02XB6eeG02Uha+ZPHckFpJBRus373SVZvS2D97pOeDkUIUUkajQaHw4Hdbvd0KEJckvKyjpObGY9G5wUoaHRe5GbGSzWhAKo3P7UQQlSFVquloKDA02EIccnKOXiInLj9aH18QFHQ+viQE7ffY9WE0kgo3CYv30ZSah55+TZPhyKEqCSpNBHCc4qqCJ1OOxqNAQCNxoDTKdWEolB15qf2pEMnMvjfnO0cOpHh6VCEEEKIOu+fKkI7GqMRAI3RiNNm91g1oTQSCrdQVZXkdDP5FjvJ6eZ698Pm559/ZuLEicTExODv74/RaCQ8PJxBgwbx3nvvkZKS4tr3xRdfRFEUXnzxxVqJbcKECSiKwty5c2vlesWtX78eRVHo169frV9bCCEauqIqQq3Ou8TQL63OW6oJRb2jqirLfjvKn0dSWfbb0XqVC0oeWDbJA4UQomYVryIskQt6sJpQGgmFWxw8nkFuvg1FUcjNt3HweP3oQU5NTWXQoEEMHjyYuXPnYrPZ6N+/PzfffDPt27dny5YtPProo7Rs2ZLt27d7OlwhhBANRIkqQq2hxDaNVqoJRf1z8HgGcfHpeBt1xMWn14tcUPJAIYQQnlKiitCgL7HNk9WEsuyZuGiqqrJi8zGcqopWo+A897xti6A6PZQxKyuL3r17c/DgQdq1a8dnn31Gnz59SuxjsViYN28eL7zwAklJSR6KVAghRENzfhVh8QTw/GpC38AozwUqRCUU5YI2u5MQfyNp2ZY6nwtKHiiEEMKTilcROsz5OC0FKJrCOr7zqwn927WttbikklBctKKeY61WU/hm1mrqRQ/yQw89xMGDB4mKimLz5s2lEkMAo9HIfffdx969e2nfvr0HohRCCNHQXKiKsIhUE4r6pCgX9PHSoSgKPl51v5pQ8kAhhBCecv5chDpfX7S+vij5BUBh3uepakJpJBQXpXjPseZcR7FGAZvdyYrNx+rsD5tjx46xYMECAN59912Cg4MvuH/jxo1p27Z0631KSgqTJ08mMjISg8FAZGQkDz30EJmZmeWea/Xq1QwfPpxGjRphMBiIiIhg7Nix/P7779W6l127dnH77bfTvHlzjEYjwcHBDBkyhJUrV5a5f1JSElOnTiUmJgYvLy9MJhORkZEMHDiQt99+u9LXTUlJ4ZprrkFRFMaOHcvq1atRFIV27dqV++9eUFBASEgIiqIQFxdXrfsVQoj67p8qQi8c9oIyPzNlbkJRXxTPBb0MWgC8DNo6nQtKHih5oBBCeFJZcxFqDEacJhPg2bkJpZFQXJTze46BetGDvGLFChwOB4GBgYwcObJa50hMTKRLly4sXryY7t27M2jQIHJycvj4448ZPHgwNlvpVZ6fe+45hg4dysqVK4mJiWHMmDE0btyYhQsX0qNHD2bPnl2lGD744AO6d+/OggULCAkJYeTIkVx22WWsX7+eG264gZdffrnE/mfOnOGqq67iww8/xGKxMHToUEaOHEl0dDR79+7l1VdfrdR1Dx06RM+ePdm6dStPPPEE33zzDUOGDKFjx44cPHiQX375pczjvv76a9LT0+nfvz8dOnSo0r0KIURDULyK0Ol0YrdkY81Px+ko/Z0h1YQiNjaWDh060K1bN0+HUq76mAtKHih5oBBCeEpZKxqXxxPVhNJIKKqtrJ7jInW9B7mot7ZLly5otdoK9i7b7Nmzue666zh+/DiLFy/mxx9/5K+//qJp06bs3LmTRYsWldh/1apVvPrqq3h5ebF69Wo2bdrEggUL2LNnD59//jkOh4MHHniAffv2Ver6q1ev5pFHHiE4OJgNGzawZ88evvvuO3777Td+//13mjVrxgsvvMCGDRtcx3z22WecPn2a++67j4SEBL7//nu++eYbNm7cyJkzZ1i8eHGF1/3tt9/o2bMnCQkJfPrpp0yfPt31o+Dhhx8G4OOPPy7z2NjYWACmTJlSqXsUQoiGpqiKUKM14rDlAqA67RQNLSlOqgnF5MmTiYuLY+fOnZ4OpUz1NReUPFDyQCGE8JSiKkJFq8GRl0tZOWART1QTysIll6j1uxJZv/tkhfs1a+THpFGXl3jt8x/+5mRyDrn5NuJPZaFoFJLSHBRYHaCq2BwKSWlmjHqtqwe5XVQwBVY7b8yrXJJ778jLiWzs53q+71gaKRlm+nWNrNqNliMlJQWARo0aVfsczZo1IzY2FmOx1v+iYSbTpk3jl19+Ydy4ca5tRUM4HnzwQQYNGlTiXPfeey/ff/89K1as4IMPPuCzzz6r8PovvPACqqry6aefcu2115bY1rFjR959911uvfVWPvroI/r27QvA2bNnARg6dGipicT1ej0DBw684DW//vprJk6ciF6vZ/ny5Vx//fUltt9+++1MmzaNFStWcPz4cVq0aOHatm3bNnbt2kVkZCSjRo2q8P6EEKKhKV5FqKpWONdwotV7n5ubML/UMRqtAZs9n5QTW/AJaFFnF4EQ9U9N5ILnc2cu6E6SB0oeKIQQnlBUReiwWnEUWFAAhzkfY1hYucdojEZseXkkrfwJv7YxNZ4LSiXhJarA6iAr11LhIzffWurY3HwrmTkFnErOxeFUUZ0qDocKqlrYBq4WPtdqlZI9yCqVumZWrgW7w1nimjb7uUbIOmTgwIGYTKZSrxdNbH3q1CnXa3a7nc2bNwMwYcKEMs937733AvDrr79WeO3U1FR27NiBt7c3I0aMKHOffv36AbBlyxbXa927dwdg2rRpLFmyhNzc3AqvVeS1117j9ttvJyQkhN9++61UYgjg7e3Nfffdh9PpZMaMGSW2FfUeP/DAA9XutRdCiPqsqIpQQYNaNLxY0aAz+JZ7jFQTippSE7ng+Q935oJ1jeSBkgcKIURV5Rw8RHbcflS7naKmPkWvgws0/NV2NaE0El6ivAxaAnyNFT58vUuvuujrbUCn02K1OdDpNOh0GrRaBRSl8I2uKGi1ClqNpuR8NAqVumaArxGdtuRbU6/TlhrGcjHCzrXUJycnV/sczZs3L/N1f39/oHBy5iJpaWmu59HR0WUe16pVK6BkUlme+Ph4VFUlPz8fo9GIoiilHkW940W95QB33nknt99+O4cOHeLmm28mMDCQTp068eCDD7Ju3bpyr7d582aeeeYZjEYjGzdu5Morryx33wcffBCdTsesWbNc95ySksJ3332H0WjkX//6V4X3J4QQDY2ritBhw27/5/tBb/RHUS6cjsnchKIm1EQuWOrhxlzQnSQPlDxQCCFqm6uK0GzGabMDoGg1GAICKzy2NucmlOHGl6h+XSOrPXT33pGX8c6CXZxNNxPib3SVu55OzXP1GkeE+gCFfwh5BXZWbD7GY+O78uK/elbrmpe1DAFCqnVsWbp27coXX3zB7t27cTgc1erR1Gg818budBb2rvv6+nLzzTdX+jiNRsOXX37J008/zY8//sjmzZvZvHkzM2bMYMaMGYwYMYKlS5eW+u9x2WWXodfr+f3333nooYdYvHgx3t7eZV6jWbNm3HTTTSxcuJBvv/2Wu+++m88//xyLxcKdd97pSsyFEOJSUlRFqKpOlHNzz2h0Xmh1F56wGkpXE/oGRtVwtOJSUBO5YFnclQu6k+SBkgcKIURtyzl4iOy/43Da7K7vTX1QEFTi++T8akL/dm1rLE5pJBRVVtYqduU5f3W7dlHBtRTlhQ0fPpxHH32UzMxMli1bxujRo2v0eiEhIRiNRiwWC8eOHaNTp06l9jl27BgATZs2rfB8kZGFSb2iKMyePbvKiWqHDh3o0KED//3vf1FVlXXr1jF+/HiWL1/O/PnzmThxYon9AwMDWbZsGcOHD+enn37i+uuvZ8WKFfj6lj1E7uGHH2bhwoXExsZyxx138OmnnwIyUbUQ4tJUVEVot+WjOs9NnaEoaHVerlWNC3uFnTgdtjK/WxVFg91hlbkJRZ1Q33NByQMlDxRCiNpUVEVoy852Pdd6e6NotDitVtdrOBw4rdayc0GNBnuBpcbnJpThxnVYbGwsHTp0oFu3bp4OxeVCq9iVpy6ubteqVSvXZNKPPfYY6enpF9w/OTmZgwcPVvt6Op2O3r17AzB37twy95k9ezYA/fv3r/B8ERERdOrUiZycHFatWlXtuKAwwRw4cCDjx48HYO/evWXu5+/vz6pVqxg8eDAbNmzguuuuIyMjo8x9e/XqRdeuXdm5cyfPPvssJ06coFu3bq65cIQQ4lKiqg4s+emoqoPCFexUtDojqtOG02E597CiwYHTYS32WsmHVmvAask6dx4hPKMh5IKSB/5D8kAhhKh5qt2OOT6hcNG6c9+DGoMeZ0F+sUcBGpsNZ0HBea//89B6GbGmpqHa7TUWq1QS1mGTJ09m8uTJZGdnExAQ4OlwgKr1HBepiz3IAB999BHbt2/nyJEj9O7dm88++8yVwBWxWq18+eWXPPfcc8TGxtK2bfXLeh977DHWrl3LjBkzuOGGG0qsIDd37lyWLVuGXq9n6tSplTrfq6++ysiRI5k4cSKff/55qYmrVVVlx44dZGVlMXjwYADmz5/PZZddRteuXUvsm5OTw/r16wFKrER3PpPJxPLlyxk3bhxLliyhX79+rFmzhsaNG5fad+rUqdx111288cYbgPQeCyEuXRqNjuhOt5N2aicZZ//CFNCMJlH9SnyP2mw2Nm7cyLU9rkWv15d7Lq3OhEYj6ZvwnIaSC0oe+A/JA4UQomY58gvQmnzwCg8HoOnokfi1jSmxj91uZ+OGjVzZ91p0uvJzPZ2vH5oL5IoXS7JMUWnFe479TVV7U3oZtK75aNq2CKoTw6SCgoLYvHkzY8eOZf369fTp04fo6Gg6deqEyWTi7Nmz7Nixg9zcXPz9/YmIiLio611//fU8++yzvPrqqwwaNIhevXrRvHlzDhw4wO7du9FqtXz66adcdtlllTrfiBEj+OCDD3jssccYOXIkrVu3pm3btgQEBJCSksIff/xBcnIyTz75pCs5XLJkCXfffTcRERFceeWVBAUFkZGRwebNm8nKyuLyyy+vcEJpg8HAwoULmThxIl988QXXXnstv/zyi2voS5GxY8fy3//+l7NnzxIWFsbYsWOr9x9OCCEaAKN3EBGtBxMc3hmt3hv9eSsaa2027KoJL59GF2wkFMKTGlIuKHmg5IFCCFFbzq75GafNhsZgIPDKTjQeOKDUPjabDUdgAN5Nm3o0F5RGQlFp1ek5LlIXe5ABGjVqxK+//sqqVav4+uuv2bJlC2vXrsVisRASEkLPnj254YYbuPPOOwkOvviYX3nlFXr16uXqvd62bRuhoaHccsstPP7441UehvHwww8zYMAAPvroI3799VfWrl2LRqOhSZMmdO7cmRtuuKHEhNaPPfYY0dHRbNmyhd27d5Oenk5wcDAdOnRg/PjxTJw4ER8fnwqvq9VqmTdvHr6+vsyYMYM+ffrwyy+/0Lp1a9c+BoOBfv368e233zJp0iSMxoon5xdCiIbOy0cm7ReVFxsbS2xsLA5H3Rhi3tByQckDJQ8UQojaED7iBlAUMv/4k2Y33ejpcC5IUevCxCDigoqGG2dlZeHv73/Bfc1mM/v376d9+/aYTCa3xaCqKu8s2MXuA8kE+ZX9JZ+ckY/DqaLVKDQKKnvFs4wcC13aNeKx8V093oMsalZmZibNmjWjoKCA+Pj4Uj3MwrOKPiv+/vtv8vLymDRpEgaDwdNhVZnNZmPlypUMGzZMqq9EnWS35qEzVPyju6bfy1XJJUTdU19ywcqQXPDSIHlg3Vb0OZGQkEBCQgItWrRgzJgxng6rWiQXFPWJ3WxGV853c13JBaWSUFSK3aGSmlmAl1FHvrXs3mynU0VVVZxOyt3Hy6gjLasAu0NFr5PEsCF7/fXXycvLY9y4cZIYCiEuSXlZiSQe+J6Qpt0IjeiGoqncIg9C1EWVyQUrQ3LBS4PkgUIIUVp5DYR1iTQSikrR6zQ8eddV5Jpt5e4T+91ecsxW/EwGJt9yZbn7+ZkM6HWysHZDtGXLFmbPnk18fDzr1q3DZDLx6quvejosIYSodU6HjaRjv6CqTlJPbsdgDCAgrL2nwxKi2iqTC1aW5IINk+SBQgjxj9xjx9AHBGIM8fz0GlUhjYSi0oL8vAjy8yp3+9CeURRYHXgZtEQ29qvFyERdcejQIWbNmoW3tzc9evRg+vTptGzZ0tNhCSFErUtJ3ILNkg2AyS8C/9Dqr4oqRF1RUS4oLm2SBwohRCG72czxeV/itFoJHz6MkGt61pspNqSRULhNv64ylOBSN2HCBCZMmODpMIQQwqPyc5JIP7MXAEXREt7yOhRFqqaEEA2b5IFCCFHo1NIfsOXkApC9/yAh1/T0cESVJxmrEEIIIYSbOJ12Th/7xfU8LLIHBu8gD0YkhBBCCCFqS3bcfjJ27QFA6+1FszE31ZsqQpBGQiGEEEIIt0k9uQNrfjoAXj6NCA7v4uGIhBBCCCFEbbCb80n8brHrecTIERgCAzwYUdVJI6EQQgghhBsU5CWTdvp3ABRFQ3irQTLMWAghhBDiEpG0fAW2rMI5qf3bxRDc/SoPR1R1krkKIYQQQlwk1eng9NGfARWAkKbd8DKFejYoIYQQQghRK3IOHiJt+04AtEYDzW65uV4NMy4ijYRCCCGEEBfJbstDVZ0AGE0hhEZ083BEQgghhBCiNjgsFhIXLnI9jxg5HENQ5eekzs3NJe6vLWQn/UrcX1vIzc2tiTArRVY3FkIIIYS4SHqjP9Edx5F2age+Qa1QNFpPhyQagNjYWGJjY3E4HJ4ORQghhBDlSFr+I9aMTAD82rQiuMfVFR6jqip79uzh22+/Zfny5eRkpWCzWdHrv8IvIIwRI0YwduxYOnfuXKsViVJJKIQQQgjhBhqNjrDIa/D2bezpUEQDMXnyZOLi4ti5c6enQxFCCCFEOfwv64Ah0L9wmPGtt1TYqHfgwAFGjx7N6NGjmTdvHmZzLgadgo/JgEGnYDbnMm/ePNc+Bw4cqKU7kUpC4Ua/JezA4rBg1BrpE9Xd0+EIIYQQQgghhBBC1Cj/9u1o+9/HMJ88hTEk+IL77tixg/vvv5+kpCSCgoIIDQ3FWpCB067DqYJGAY1OR0hIGGazmW3btjFu3DhmzpxJ9+41384ilYTCbTad2MEvRzex6cQOT4cihBBC1DhrQSbH4xZjMad5OhQhhBBCCOFBWm9v/Nq0vuA+Bw4c4P777+fMmTNERETg4+OD6rThdFhAOdc8p2hwOiyoThs+Pj5ERERw5swZ7r///lqpKJRGQuE2eVYzZ3JTyLOaPR2KEEIIUaNU1UnSsV8wZ58k/q8F5GYmeDokITzuSFoCb236lCNpCZ4ORQghhKhRtpwcVFWt9P6qqjJt2jSSkpIIDw9HoylsjrNZc0FVgaIhygqoauHrgEajITw8nKSkJKZNm1ala1aHNBLWsEWLFjF69GiaN2+OyWTisssu45133sFms3k6NLdSVZWUvHQK7AWk5KXX+BvX3X7++WcmTpxITEwM/v7+GI1GwsPDGTRoEO+99x4pKSmufefOnYuiKCiKgsFgIDk5udzzWiwWQkJCXPu/+uqrZe73/fffM3LkSCIiIjAYDAQEBNC6dWuGDh3KK6+8wr59+yq8h5EjR7qu8/fff1f9P4IQQohKy0zehzn7FAA6vQ8mvwgPRySEZ6mqyspDv7Iv+RArD/1ar3JByQOFEEJUhdNq5chHnxD/f7OxZmZW6pg9e/awZ88egoKCXA2ETofVVUVYNI2houCqJnQ6rEBhQ2FQUBB79+5lz549NXBH/5BGwhr29ttvYzQaefPNN/nxxx8ZP348zz77LJMmTfJ0aG51OC2eXJsZBQ25NjOH0+I9HVKlpKamMmjQIAYPHszcuXOx2Wz079+fm2++mfbt27NlyxYeffRRWrZsyfbt20sdb7PZ+OKLL8o9/9KlS0lPTy93u8PhYPz48YwePZrly5cTHBzMsGHDGDZsGI0bN2bjxo08//zzzJkz54L3kZSUxMqVK13PZ82aVYm7F0IIUR02Sw7Jx39zPW/SciAarcGDEQnheYfT4jmQegQvnZEDqUfqRS4oeaAQQojqSPppNZbUNLIPHOTkwkWVOubbb7/FYrFgMplcrxVVEZ6/0ImilKwmBDCZTBQUFLBw4UL33EQ5ZOGSGrZ8+XLCwsJcz/v374+qqjz33HO8+eabNG5c/1dAVFWVVYc3oKpONIoGVXWy6vAG2oRE1+pS3VWVlZVF7969OXjwIO3ateOzzz6jT58+JfaxWCzMmzePF154gaSkpBLbOnXqxP79+5kzZw6PPfZYmdeYPXs2AN26dStzZcJPP/2Ur7/+Gj8/P3744Qf69+9fYrvZbGbFihUVVp7OmzcPh8NB06ZNOXXqFF9++SXTp0/HYJAfrUII4U6qqnImfh1OZ+HnckBYB3wDW3g4KiE8qygXtDlsBHsHkp6fWedzQckDhRBCVEdewnFSNxZ2Fmt0OiJGjazwmNzcXJYvX47JZHJ9LxavIiwcaly8Al8pUU2o0RpQFAWTycSyZct49tln8fX1df/NIZWENa54A2GRrl27AnD69OnaDqdGFPUcaxQtiqKgUbT1ogf5oYce4uDBg0RFRbF58+ZSiSGA0WjkvvvuY+/evbRv377EtrCwMEaMGMG+ffvK7F0+ceIEa9eu5eqrr6ZDhw5lxvDNN98AMGXKlFKJIRT2Ftx6663cfvvtF7yXoiT0nXfeoWXLlqSmpvLDDz9c8BghhBBVl516wDX/oE5vonGL0t8dQlxqinJBH0Phjx8fg6nO54KSBwohhKgqp81G4jcLKZpRo8nQQXg1blThcSkpKeTn5+Pl5eV6rfhchBaLFYez5DQdZVUTenl5kZ+fT2pqqlvupyyXZCPhwYMH+eijj5gwYQIdO3ZEp9NdcJ6Q83333Xf069ePoKAgfHx8uOKKK3jzzTcrPc/gxo0bMRgMtGrV6mJuo04o3nOsOdcirlEUbA7buerCujkfzbFjx1iwYAEA7777LsHBF16mvHHjxrRt27bU6/fccw/wT3JW3Jw5c3A6na59ynL27FkAGjWq+IOlPBs2bODw4cOEhIQwevRoJk6cCMhQEyGEcDe7zczZhA2u502iB6DVeV3gCCEavuK5oPHcsHuj1lCnc0HJA4UQQlTH2TU/U5BcOE+tKbIZYX2vrdRx+fn5OJ3OMucitNvtJJ3N4NTpNKxWe7GjlDLnJnQ6nZjNNbdY7CXZSDhjxgwefvhh5s2bx99//43D4aj0sf/5z3+49dZb2bx5M927d2fo0KGcOHGCJ598kgEDBpCfn3/B4+Pi4vjggw+477778Pf3v9hb8bjze46BetGDvGLFChwOB4GBgYwcWXF5cHmGDh1KREQE33zzTYl/e1VVmTNnDiaTidtuu63c45s3bw4UToKdlZVVrRiKksDbb78dg8HAhAkT0Gg0/PzzzyQmJlbrnEIIIUo7E/8rDocFAP+QGPyC639nnxAXqz7mgpIHCiGEqCrziUSSfy3sLFa0GiJvuxVFq63Usd7e3q4GPvinilBVVZJTsnA4nNhsdjKySjb+nV9NWNTQWHxeQ3e7JBsJL7/8ch5//HG++uor9u/fz5133lmp477//ns++OADfH192b59O6tXr2bx4sUcPnyYjh07smnTJp577rlyj09NTeXGG2+kdevWvPHGG+66HY8pq+e4SF3vQf79998B6NKlC9pK/mGXRavVcvfdd5Odnc2iRf9MWLp27VqOHz/OmDFjLtgYPGXKFAD++OMPWrRowZ133smMGTPYvn07Vqu1wutnZWW5rlvUU92sWTMGDRqE0+lk7ty51b43IYQQ/yjISyEn/QgAWp0XjaP6ejgiITyvvuaCkgcKIYSoCqfNxolvFqKeGxLcZPB1eIc3qfTxYWFheHt7U1BQUKKKMD0jhwJL4YhUnU5LaLDfeUeWrCYsKCjA29ub0NBQd91aKZfkwiXnryxcVPJZkddeew2AadOm0aVLF9froaGhfPLJJ/Tp04ePP/6Y5557joCAgBLH5uTkcP3112O1Wlm/fj0+Pj4XeRcX57eEHWw6saPC/SL8mnB35zElXpu3ZxGnc86QZzUTn3kSBYUzeSlY7FZUVcXuLHzupTW4epBjQltisVt5d8tnlYrvzitvppl/uOv5/pTDpOZl0Ceqe9VutBwpKYUlwhczvKPIxIkTef3115k9e7arwbmoV/dCQ0wAbrzxRmbNmsUTTzxBWloaX375JV9++SVQON/AsGHDmDZtGt26dSvz+K+//pr8/Hy6du3KFVdc4Xr93nvvZfXq1cyZM4dnn322zk4aLoQQ9YWXTxgtLruFpKM/E9asBzp9zfXgClEbaiIXPJ87c0F3kjxQCCFEVZz9ZR0FZwqniPBuGk6jAaXnkb0QX19fRowYwbx58zB5qaCq5JotZGUXVqErikLjsEC02tKf14qioDqdWC05mM35jB07tsYWLYFLtJKwOk6dOuValWz8+PGltvfu3ZvIyEgsFgsrV64ssc1isTBq1CgSEhJYvXo1ERERtRLzhVgcFrIKcip85FlLj3XPs5rJzM/mdM5ZnE4HqqricDpRz63Go1L4XKNoS/Qgq6iVumZWQQ4OZ8kh4DaHHcu5IV51TZs2bejTpw8bNmzg2LFjZGRk8P3339OqVSuuvbbiOQruueceTpw4wbfffssDDzzAVVddhcFgoKCggCVLltCzZ08+//zzMo8tev38JHTUqFGEhIQQHx/PunXrLv4mhRBCYPKLILrT7fiFxHg6FCEuWk3kguc/3JkL1lWSBwohRMOmqiq2c1NCKBoNzW8bW+lhxsWNHTsWg16LOS8Xm91JSlq2a1tIsB9eXvpyjiysJjTn5WIw6LjllluqcxuVdklWElbHnj17AAgODiY6OrrMfa666ioSExPZs2cP48aNA8DhcHDbbbexc+dO1q1bV+akx+ezWCxYLP80iGVnF755bDZbhYujVHbxFKPWSIDX+aWspfkYSldK+BhM6LU6LA4bWo0O7blKTLtTQUVFQUGr0aDVaDDq/pmPpkVgs0pdE0CrKflHp9fqMGqNlTq2MopWnU5OTnbL+e655x5+++035syZQ5MmTSgoKGDixImV7rktWr3u1ltvBSAvL4+ffvqJp59+msOHDzN58mSGDh1Ks2bNXMf88ccf7Nq1Cy8vr1IN1waDgdtvv50PP/yQ2bNnM3DgQLfcp2h4nE4nTqcTm81WLysNij7zKvvZJ4R72CvepYpq+r0sfyPifDWRC57PnbmgO0keKIQQorIURaH5bbcS0PEyrKnpeDetXtHXlVdeSbs24ezaewCdVnENXfb18SbA/8IjVFQVsnPy6XJlC6688spqXb+ypJGwkuLjCyddLppguCyRkZEl9gWYPHky33//Pa+88goOh4Nt27a5tnXo0KHMeUpef/11XnrppVKvr1mzpsIJKvV6PY0bN77wzQB9orpXe+juXVfezEfb5nI2N5Vg70BXApSUm4zD6USr0RDuWzh8Q1VV8qxmVh3ewEM9JvDUtVOqdc32YW0grFqHlqlr16588cUX7N69G4fDcVHz0QDccsstrsVwQkJC0Gg03H333dU+n4+PD2PGjKFnz57ExMRgNpv56aef+Ne//uXap2goi06nY/jw4aXOkZaWBsCSJUvIzMwkMDCw2vGIhuvMmTOkp6ezatUqdLr6+5Xw888/ezoE0QApOPDSZJDvDAFqpxG9pt7LNbkKnqifaiIXLIu7ckF3kjxQCCFEVQVcdtlFHW/OPsGUib154qWTpGbkEhRoIs9sJSzUn8I8s+z5e51OJymp2YSFBjDlnt6Ys0/gGxh1UbFcSP39RVjLcnJyAC44l2DRuPCiyj+AVatWAfDcc8+VWtTk119/pV+/fqXO89RTT/Hoo4+6nmdnZxMZGcngwYMrXBHZbDZz5MiRC9/MRSprFbvynL+6XUxoyxqNrbKGDx/Oo48+SmZmJsuWLWP06NEXdT4fHx9uvfVWZs2aRWJiYqne3upq2rQpHTp04Pfffyc1NdX1usVi4auvvgIgNzeXzZs3l3uOgoICvvrqKyZPnnzR8YiGp0mTJvj5+TF06FAMBkPFB9QxNpuNn3/+mUGDBqHXl1eiL0T1JCesJyv1BF4+PjSOHojBK7DGrlXT7+XiuYkQF6u+54KSBwohhKhNqqqScmILkRG+PD11CP/7YBWp6XkEB/q5thc1EhYt9qWqKgUFNrJzzYSF+PPCf2+medMAUk5swSegRY2NApNGwhqWkJBQ5WOMRiNGY+mhtXq9vsIfDjX9I7n4KnZ+hsotvmLUGlw9yG1CouvEkMZWrVoxbtw4vvrqKx577DH69u1LcHBwufsnJyeTkZFxweHikyZN4vvvvwco0dN7IaqqXvC/h8Ph4NSpUwAlks0lS5aQnp5OREQEJ06cKLcHfMaMGTz44IPMmjVLkkNRJo1Gg0ajqdTnS11W3+MXdU9eViLZaXEoioKtIB293lAr77Gaei/L30f9FBsbS2xsLA5H3ZmfryHkgpIHCiGEuBDV4SB+1lyCunYmsEvni/7eUlUHloJMtDojHds35bWnRhI75zcOHDlDTm4+Xl46DHo9iqKiqgpWm42CAjsGg5ZOHSL5z/2DiW5eOLTSaslCVR0oSs0058nCJZXk51fYwpuXl1fuPrm5uQAVVvvVZ1XpOS5yfg9yXfHRRx/RunVr4uPj6d27N5s2bSq1j9VqZfbs2XTu3Jn9+/df8Hw9evQgNTWV1NRUbrrppkrFMHz4cKZPn87p06dLbcvMzOTf//43SUlJ+Pv7c/3117u2FQ0xueOOOy44ROa2227DYDCwZ88e9u7dW6mYhBDiUud02Eg69ovreVjzXhi8AjwYkbhUTZ48mbi4ONfieXVBQ8kFJQ8UQghRnpQNG8k+cJDjX33D6WXLL/p8WVk5vPzeBrLoRturp9B36H38uGoDixcvYuLEifj5h+HAixyzigMv/PzDmHjPRBYvXsSPP/3K4NFP06brv2jT9V+07HQnGk3N1ftJJWElRUVFAZCYmFjuPkXbivZtaKrTc1ykrvUgAwQFBbF582bGjh3L+vXr6dOnD9HR0XTq1AmTycTZs2fZsWMHubm5+Pv718iq1KdOnWLatGk89dRTtGvXjrZt2+Ll5cWZM2fYuXMneXl5eHt7M3/+fEJDQwFKrFRX0Xw3QUFBDB8+nCVLljBr1iw++ugjt9+DEEI0NCmJW7BZCofnevtFENS4o4cjEqJuaEi5oOSBQgghylJwNpkzqwrniFY0CoFXXHFR53M6nTz77LMcOnyURx9/jpdeeolhw4YB0LNXY3r2uo4XXsolKSmJ1atXM2TIEMLDw13T2dU2qSSspM6dOwOFkwAXX5ikuN9//x2ALl26uOWasbGxdOjQgW7durnlfBerOj3HRepiDzJAo0aN+PXXX/npp5+466670Gq1rF27lkWLFhEXF0fPnj15//33iY+Pp3v36k3ufSGLFy9mxowZjBkzBo1Gw2+//cZ3333Hnj17aNeuHU888QT79+9n1KhRrmPmzJmDqqpcddVVdOjQocJr3HXXXQB89dVXFBQUuP0ehBCiIcnPSSL9zF4AFEVLRMvrUBRJl4SAhpcLSh4ohBCiONXpJPGbhTjtdgDCru2DT1SLizrnjE8+ZPv27UBh501Z7Tu+vr5ERUURGRlJVFSUxxoIQSoJK61Zs2Z069aNnTt3smDBAp555pkS2zdt2kRiYiJGo9HVKnyxJk+ezOTJk8nOziYgwLPDnIp6ji12C146I1aHrdQ+TtWJqqo4VWeZ2zWKBovdUmd6kIsbOnQoQ4cOrdS+EyZMYMKECVU6/9y5c5k7d26p11u1akWrVq144IEHKn2ul19+mZdffrnS+48aNco1+akQQojyOZ12ThcfZhzZA4N3kAcjEqLuqEwuWJG6mgtKHiiEEAIg9bfN5B0/AYAxLJQm1w+5qPNt3LieT2d8ACh4eQfyxhtvEBYW5oZIa450jVfB008/DcAbb7zB7t27Xa+npaXx4IMPAjBlyhSPN+jVBIfTQVp+OkadkQK7pcyHU1VRceJU1XL3MeqMpOdn4HDWnQm4hRBCCIDUkzuw5qcD4OXTiOBw94wMEKIhqEwuWJmH5IJCCCHqIktKKkkrfwJAUSBy7C1oLmLht9OnT/PUk4+gOh2oTjvjbr7GNUK1LrskKwl3797tatQDOHr0KAAzZ85kxYoVrteXLl1KeHi46/mNN97Iww8/zIcffkiPHj0YOHAgPj4+rF27lszMTHr16sUrr7xSezdSi3RaHY9c8y/yrOZy9/ns96/IseThZ/ThvqtuL3c/X4MJnfaSfOsJIYSoowryUkg7XThtiKJoCG81SIYZC1FMZXLBypJcUAghRF2iOp0kLvwOp61wmHFo7174toyu9vmsViuP/GcyWZlpAFzdNYYHH36xzlTQX8gl+e2cnZ3tGhNe3MmTJzl58qTrucViKbXPBx98QK9evYiNjWXLli3YbDZatWrFtGnTeOSRRzAYDDUauycFevkT6FX+ys3XteyDxWHBqDXS1L9JLUYmhBBCXByDVwBBjTuScfZPQpp2w8sU6umQhKhzKsoFhRBCiPoobctWco8WzpdrDAmmybDKTUFRntdf/x9xf+8BILxxEM+/+CJePvUjt7wkGwn79et3UXNz3Hrrrdx6661ujKhssbGxxMbG4nDUj+EYfaLcP6GzEEIIURs0WgNNovsTENoOL59Gng5HCCGEEELUAtXhIHn9RtfzZreOQWs0Vvt8P/zwA0sWfY2qOjHodTz75CSat7rGHaHWChlHU4dNnjyZuLg4du7c6elQhBBCiEuCt184ikbr6TCEEEIIIUQtULRa2kydQuAVHQm9pgd+bVpf1Pm0ajZ6XWGh1wP3XM81A+6uV1PYXJKVhEIIIYQQqurEbjWjN/p6OhQhhBBCCOEhej8/ou6+E/UiR3HabWZimpl54/nb2bH7CLfePhm9sX5N01F/mjOFEEIIIdwo48wfHPtjPuln/kBVnZ4ORwghhBBCeJCirf5oElVVSTr2Cw57AU3DQ7jrjnEEhLZ3Y3S1QxoJhRBCCHHJsRZkkZy4BafTxtmE9RTkpXg0ntzDR/D75VdyDx/xaBxCCCGEEA2dqqokr/sVe26uW8535MgRQMXoHQyAVu9Nk5YD68VqxueTRkIhhBBCXFKKenpVpx2AoMad8PZt7NF4zq5chT7pDGdXrrqoxdWEEEIIIcSFpe/4ndMrfuLAm++QtW/fRZ1rx44djB8/ntdee53AJt1p0WEMEa2GoNOb3BRt7ZJGwjosNjaWDh060K1bN0+HIoQQQjQYmcl/Y84+CYDe4Eej5r08Gk/OwUPk7D+AqteTs/8AOQcPeTQeIYQQQoiGypqZxellywGw5+Zd1KIiycnJPP300zidTpYuXcry5csx+TfFN7CFu8KtddJIWIfJ6sZCCCGEe9ksuSQf/831vEnLgWi0Bo/Fo6oqSSt/wmmz4fTywmmzkbTyJ6kmFEIIIYRwM1VVObloCY78AgCCunbGv0P15g202Ww8+eSTZGZkANCrVy9Gjx7ttlg9RRoJhdskr9/ImdVrSF6/0dOhCCGEEKWoqsqZ+LU4nTYAAsI6eLynN+fgIXLi9qP18QFFQevjQ07cfqkmFEIIIYRws4xdu8mO2w+A3s+XpqNHVftc7733Hn/+sYcCcyqhwSZefvklNJr638RW/+9A1BmpGzdyZvUvpG6URkIhhBB1T3bqAXIzEwDQ6U00btHHo/H8U0VoR2M0AqAxGnHa7FJN2EAcOXKEBx54gC5duqDX64mKivJ0SEIIIcQlyZadw+nvl7meNxtzEzpT9eYNXLVqFQsXfovVko1ep2HKPb1w5h93V6geJY2Ewm3seXkUJCVhz8vzdChCCCFECXabmbMJG1zPm0QPQKvz8mBEJasIi1a/U6SasEHZt28fK1asICoqissvv9zT4dS4nEOH2f/adHIOHfZ0KEIIIYSLqqqcXLwEuzkfgKDOVxDQsXrfy8eOHePVV1/Fbs1Dddq5544BdGh/GQGNLnNnyB4jjYTCLVRVxZKcgqOgAEtySr2rfvj555+ZOHEiMTEx+Pv7YzQaCQ8PZ9CgQbz33nukpKR4OkS369evH4qisH79ek+HIoQQNc5myUHR6gHwD4nBL7iVR+MpUUVoMIDqdG2TasKGY8SIEZw8eZIlS5Zw9dVXezqcGqWqKqdX/EjWX39zesWP9eq9K3mgEEI0bFl//EnWX4WrGOt8fWg6+sZqnScvL4/HH38cszkHu81M/96XcV3fK4hoPQSNRufGiD1HGgnrsPq0unHOwUPYc3NRFAV7bm69qX5ITU1l0KBBDB48mLlz52Kz2ejfvz8333wz7du3Z8uWLTz66KO0bNmS7du3ezpcIYQQ1eTt25iWne4kOLwzjaP6ejqcYlWEJmwZGRScTkKTn489N0eqCRuQhjA3UWW53tPe3vXmvSt5oBBCNHx2cz4nFy91PW92043ofH2qda6PPvqIEyeOYyvIIioyjEl3XkdYZE+8fMLcFa7HXTqZSz1UX1Y3LqqGUJ0qaLWoTrVeVD9kZWXRu3dvfvnlF9q1a8fGjRuJj4/nhx9+YMGCBaxbt4709HRmzpyJr68vSUlJng7ZrebPn8/+/fvp3r27p0MRQohaodUZaNziWnT66s0/4y7Fqwgd+QU48vNRHQ4UiwXOfXVKNWHlHDx4kI8++ogJEybQsWNHdDodiqLw6quvVur47777jn79+hEUFISPjw9XXHEFb775JjabrYYjb1iKv6d1AQH14r0reaDkgUKIS4PW24uIUSPQmbwJ7HQ5gVdeUe1z3XfffXSIicDbS8djU0YSEBxJSERXN0breQ2jHlJ4VFHPsaLVAApoNa4eZP92bT0dXrkeeughDh48SFRUFJs3byY4OLjUPkajkfvuu49Ro0aRmZlZ+0HWoObNm3s6BCGEqHGqqrrm+6srir43URQc5+bxVbRanF5GdH5+hc/Pqyasy9+nnjRjxgw++OCDah37n//8hw8++ACdTseAAQPw9fVl3bp1PPnkkyxfvpw1a9bg7e3t5ogbpvPn16wP713JAyUPFEJcGhRFIfiqrvjFxKBoLi4nNChZPDF5AKfPXElEkzAiWg9GURpW7V3DuhtR64r3HFM0pEajqfM9yMeOHWPBggUAvPvuu2UmhsU1btyYtm1LJrnffPMNAwcOJDg4GKPRSIsWLbjnnns4dKjk8JrMzEy8vb3RarWcOnWq3GuMGTMGRVFK/Ng5fvw406dPZ8CAATRv3hyj0UhgYCC9e/dm5syZOJ3OUudJSEhAURSioqJwOBy8++67dO7cGV9f3xI/lMubiyYlJYUPP/yQYcOGER0djbe3N/7+/lx11VVMnz6dgoKCMuNXFMV1/sWLF9O7d2/8/f3x8fGhV69erFy5stx7t9vtzJ49m+uuu47Q0FCMRiPNmjXjuuuu46OPPirzmLVr13LTTTcRHh6OwWCgUaNGjB49mq1bt5Z7HSHEpSU3M4HEgz9gs2R7OhSXou9Ne35+iYW+9IGBqDp9iX2lmrBil19+OY8//jhfffUV+/fv584776zUcd9//z0ffPABvr6+bN++ndWrV7N48WIOHz5Mx44d2bRpE88991yJY+bOnev6rrvQY9GiRTVxq3VWfVylW/JAyQOFEJcevb8fOl/fah9vt+Vz+tjPaLUaIpuG0qhFHwxege4LsI6QRkJxUerryowrVqzA4XAQGBjIyJEjq3SsqqrcfffdjBs3jo0bN9K5c2duuukmvLy8mDNnDp07d2bVqlWu/QMDAxk9ejROp5MvvviizHOmpaWxfPlyDAYDd9xxh+v1L774gmnTppGQkEBMTAw33XQTV155JTt37uSBBx7glltuKTf5VlWVm266iaeeeoqQkBBGjhxJp06dKry/1atXM3XqVP78809atGjBjTfeSPfu3Tl48CDTpk1jwIABWCyWco9/4YUXuOWWWwAYNmwYbdq0YcuWLQwfPpylS5eW2j8rK4v+/ftz7733snHjRi6//HJuvvlmYmJi+PPPP3n44YdLHfP4449z3XXX8cMPP9C8eXNuvPFGWrZsyQ8//ECfPn2YM2dOhfcphGjYHHYrScfWkpd5nGN/fIklP93TIQGF35tZf/6N02J1fW/q/f3Q+pSeG6c+fJ962qRJk3jrrbcYP3487dq1q/QcgK+99hoA06ZNo0uXLq7XQ0ND+eSTTwD4+OOPycrKcm0bPXo0+/fvr/AxZMgQN95h3Vcfc0HJA8sneaAQoqEwnyy/Y6ayHA4HsbGxZGVl4bDlodEYAPANjCKwUfVWR67rZLixqLbiPcd6P/8S2zRGI7a8PJJW/oRf25g6N9Tr999/B6BLly5otdoqHTtz5kzmz59PaGgoP//8M1deeSVQ+N/jpZde4qWXXmLcuHEcOnSIsLDCCUzvuecevv76a+bNm8e0adNKnfOrr77CarVy8803ExIS4np9yJAh3HjjjVx+eckPoNOnTzNs2DCWLFnCokWLXMlYcSdOnMDpdPLXX38RExNT6fvr2rUrW7dupUePHiVez8jI4LbbbmPNmjV8+OGH/Pe//y3z+A8//JCtW7eWWMXxxRdf5KWXXmLatGmMHj26xP733HMPmzZtonPnzixZsoSoqCjXNrvdzo8//lhi///7v//jnXfeoXXr1ixevLhEwrtx40aGDx/OAw88QO/evWnTpk2l71sI0bCkJG7Cbs0FwNu3CQavIA9HVHwOXycaLy9UqxWtjwmdvz/lFVvV9e/T+ujUqVOu+Z7Hjx9fanvv3r2JjIwkMTGRlStXMm7cOAACAgIICAio1VjruvqaC0oeWD7JA4UQDUHWvjjiZ80lqGtnmo4ehc5UvfmoY2NjmT9/Pj/99BNvvvkm7TqNJyVxKyERV9Wp7zV3kkbCS1Ty+o2kbtxY4X7eTZsSfe/EEq/Fz5pD/qlT2HPzyIuPB0XBecaCo+Bcr6INLGeS0Bi9SsxH47BYODj9rUrFFzVxAqbIZq7nWfvisKSk0qjftZW/yQtISUkBoFGjRlU+9u233wbg+eefdyWGUNhr/sILL7B06VL+/PNP/u///o+nn34agIEDB9KiRQsOHDjA1q1b6dmzZ4lzFvV4TpxY8r91eStbR0RE8OabbzJkyBC+++67MpNDKKyUqEpiCNC+ffsyXw8KCuKjjz6ibdu2fPfdd+Umhy+//HKJxBDgqaee4v333+fQoUMkJiYSGRkJwB9//MGSJUvw8vJi+fLlNG3atMRxOp2OUaNGuZ47nU5efPFFoHCYz/k94tdeey3PPfccTzzxBDNnznT9WwkhLi152SfJOPsXABqNniYtB9aJRK6o4krn64vWy4g9N/fcsBcF14ol56kv87vVJ3v27AEgODiY6OjoMve56qqrSExMZM+ePa5GwoamJnLB87kzF3QnyQPLJ3mgEKK+s5vzObloCQAZu/bg1zaG4KuqvrjIunXrmD9/PlD4vWG329FoDTSO6uvWeOsaaSS8RDktBVgzK56jSR8QWOo1e24elowsLMnJOB1OFK0WHE5ALfyNo4DqcKJo/5mb0K9tDKhqpa4JoDrsJZ/bbDgtZc+BUptOnjzJ0aNHAbj77rtLbVcUhYkTJ/LII4/w66+/upJDRVG4++67efnll5k7d26J5HDv3r3s3buX8PBwhg4dWuqcFouFNWvWsHPnTpKTk7FYLKiqSk5ODlC4smN5br755mrdp8PhYP369WzZsoWkpCTy8/NRVdU1pOVC1xwxYkSp14xGIy1btmTPnj2cOnXKlRwWDce54YYbSiWGZdmzZw+nT5+mVatWdO1a9gd9v379ANiyZUuF5xNCNDxOh42ko7+4noc1vwaDl+erv0pXXCnofP0qdWxdrsiqj+Lj44ELL9xQ9D1VtG91mc1m11xsx44dw2w2u+Ys7NatGy1atCjzOIvFUmJIZ3Z2Yf5ks9kqXHm5sisz10wuWJI7c8G6QPJAyQMbmqJ/V4fDUW9XdS+Ku77GL9zv1Pc/YM0snC7Et20bfDt1rPL748SJE7zwwguuz72HH36Y9u3b1+j7rKbfy5U9rzQS1mGxsbHExsbicDjcfm6N0QtDoH+F++l8S8+RpPP1QavXoVotaHTawsQQwKYUFkNQmBQqGi0an396kH2ioyp1zcLjS741Fb0ejdGrUsdWRtHwj+Tk5CodVzThdEhICP7+Zd9Lq1atSuxbZOLEibzyyit8++23vP/++64VE4t6j++6665SQ162bdvG2LFjOXHiRLkxFf1wOF+jRo0wVaOs+vDhw4wePZp9+/ZV+ZpQ/o+uov9exSe8Pn78OADt2rWrVGzHjh0D4OjRoxX+SC6qEhBCXFpSTm7DZilMDL39wglqXPEcXDVNdTg4NnsO2X/HlZi3rbKkmtC9ihpXfMqYB7KI77mJzS/0fVcZycnJpaq8ip7PmTOHCRMmlHnc66+/zksvvVTq9TVr1lT43a7X62ncuHGFsdVILnged+aC7iR5YPkkD7y0nDhxglOnTpXo0Kivfv75Z0+HIOoA3dlkfLbtPPdES2KgP3E//VSlc1gsFj788EPOnDmDRrHTvUsr/Hx0tfY3UlPvZbPZXKn9pJGwDps8eTKTJ08mOzvb7XPgNOp3bbWH7kbdM4FD731Awdlk9MHBri/pgqTTrgpCr/AIoLB3qqj6IeaRqXR4/tlqXTPgsg7VOq48Xbt25YsvvmD37t04HI4qz0dTHVFRUfTv359169axdOlSxo8fj81mc62ud/4QE7PZzI033sjZs2eZOHEi//73v2ndujX+/v5otVoOHTpE27Zty52wuij5rKoxY8awb98+hg8fzhNPPEGHDh3w9/dHr9djtVoxnlu5sDyVnTS+OopW8WvSpEmFE8OHhobWWBxCiLopP+cM6UmFQ0kVRUt4y0EoimfXaFNVlcRFS0hZ/xvO/HyMjRrhtFrL3A+HA6fVWuaPX0WjwV5gkWrCeiYqKqpaq/s+9dRTPProo67n2dnZREZGMnjw4HIbp4qYzWaOHDlS4TVqIhcsi7tyQXeSPLB8kgdeWpo3b47T6SQyMpJhw4Z5Opxqsdls/PzzzwwaNAi9Xu/pcIQHOQoKOPLO+9jOtZ1E3HQjwT26V+kcRfPL5ufn4+/nQ6NgDY88MAQfUyotOt6BTl+9uQ0ro6bfy5Xt+JRGQlFlZa1iV566Wv0wfPhwHn30UTIzM1m2bFmpSZTLUzQUIi0tjezs7DIT9aJezrKGTUycOJF169YxZ84cxo8fz/Lly0lNTeWaa66hbduS/202btzI2bNn6dKlC7Nnzy51rsOHD1cq5qo4cOAAf/75J40aNWLp0qXodCU/Itx9zaLe5gMHDlRq/6LhKSEhIcydO9etsQgh6jen007SsZ8pmtsvrFkPjN6eX6zk7M9rSdu2A9VuB0XBUZCPRlc6/VJV0NhsOAsKKO+rVetlxJqahmq3o8gPoWrz8ysc5p2Xl1fuPrm5hYveVNQgV1OMRmOZjTF6vb7CHw618SO5vueCkgeWTfLAS4+iKIV/o1ptvW9gq8zno2jYzny/DHt2Doqi4BfTmka9r6lyp+qiRYtYvXp14d+FxsrjU27B5G0ksPFleJtqZ/qamnovV/ac0kgoquRCq9iVpy7OpdSqVSvGjRvHV199xWOPPUbfvn0JDg4ud//k5GQyMjJo27YtrVq14ujRo8ydO5eHH364xH6qqrqSlv79+5c6z80338yUKVNYt24diYmJ5U5UDZCeng6UP2zjyy+/rNS9VkXRNSMiIkolhjVxzaFDh/LUU0+xcuVKTp8+TURExAX379atG6GhocTFxbFv3z4uu+wyt8YjhKi/cjOOYckv/Azz8mlEcEQXD0cE6Tt2cmbVGhRFwatxY8KHD8O/Q9nD6ux2Oxs3bOTKvteW+flbROfrh0Z+BF2UotVTExMTy92naFvxlVZFoYaQC0oeWDbJA4UQ9VXOocOkbd0OgNZooNktY6r8XbNv3z7Xgkd2ax4PTepP0/AQDF5BNGre2+0x11WeHYMj6p2q9BwXOb8Hua746KOPaN26NfHx8fTu3ZtNmzaV2sdqtTJ79mw6d+7M/v37AXj88ccBeOWVV/jjjz9c+6qqyquvvsrevXsJDAzkX//6V6nzeXt7c9ttt+F0Opk+fTqrVq3CZDIxduzYUvsWrS63du1a4uLiSmz77LPP+Pbbb6t/8+WIiYlBq9Xy119/sX79+hLbli9fznvvvefW61155ZWMGjWK/Px8Ro0aVWrOHbvdzrJly1zP9Xq9awLZ0aNHl/lv5nA4WLduHdu2bXNrrEKIus0/JIZmMSPQG/0Jb3mdx4cZ5xw8ROLCxa7nTW8cSZPB12Fq1qzMh3fTpjgCA/Bu2rTcfUzNmmEI9PwiLPVd586dgcJqsPIWJvn9998B6NLFs43NsbGxdOjQodxVbj2hoeSCkgeWJnmgEKI+yDl0mP2vTSfnUGF1s8NiIXHhItf28BuuxxhSfsdPedauXYvdbsfptDGkfzuu6d4WUIhoPQSN5tKpr7t07lRctOr0HBepaz3IAEFBQWzevJmxY8eyfv16+vTpQ3R0NJ06dcJkMnH27Fl27NhBbm4u/v7+rt7N+++/ny1btvDFF19w1VVX0bdvXxo1asTu3bs5ePAg3t7eLFiwwDUp9vkmTpzIzJkziY2NBWD8+PGuoU/Fde7cmVGjRvHDDz/QuXNn+vXrR3BwMHv37uXgwYM8/fTT/O9//3Prf5PQ0FCmTJnCBx98wMCBA+nTpw8REREcPHiQ3bt38+yzz/Lqq6+69Zpz5sxh2LBhbNu2jTZt2nDNNdcQERHBmTNn+Ouvv0hJSSkx386UKVM4ceIEb731Fn369OGyyy6jdevWeHt7c+bMGfbu3UtmZiYzZsygR48ebo1VCOF5eVknSDq2jvCWA/AJKFlh4xfcEt+gKI83EOafOk3C3Pmo5+bPCu19DWH9+3o0JvGPZs2a0a1bN3bu3MmCBQt45plnSmzftGkTiYmJGI1Gj8/RVZPzU1dHQ8oFJQ8sTfJAIURdp6oqp1f8SNZff6PxMhLzyFScVitejcKwpmfg2yqakGt6VnyiMjz00ENERDRh6cKZ3HFLHwBCm3XH27fixcAaEqkkFJVWnZ7jInWxBxkKV3779ddf+emnn1yryq1du5ZFixYRFxdHz549ef/994mPj6d798JJTxVFYf78+SxYsIDevXuza9cuFi1ahNlsZsKECezZs4frr7++3GteffXVJYZHlDXEpMh3333HW2+9Rdu2bdm0aRNr1qyhefPmrF69mkmTJrnvP0Qx7733HrNmzaJz587s2rWLlStXYjKZ+Oabb3jllVfcfr2goCA2bNjAjBkzuPrqq9m7dy+LFi3i0KFDXHnlla4kurg333yTzZs3c/vtt5Obm8uqVav48ccfOX36NP369ePzzz8vs1deCFG/qapK8vFN5GYcJfn4pjIn7Pd0A6E1PYNj/zcLh6VwcZKAjpfR9MaRHm8QESU9/fTTALzxxhvs3r3b9XpaWhoPPvggUNgYURca5uqShpYLSh5YmuSBQoi6zPU95O3t+j7R+/kR/a97iRx7C5G33oJSzQWUFEWhd9cwnnlkBDqdFi+fRoRG1J1K/tqiqNVZck3UqqLe46ysrEqtaLd//37at2+PyeS+lXdUVeXQex+QuXsv+sDAMvexpCS7Vjc2hjUqcx9bZiaBXa4k5pGp8oNJCA8q+qz4+++/ycvLY9KkSRgMBk+HVWU2m42VK1cybNgwmaz6EpCbmUD8n1+hqiqKotCs7Qg0GgP+oW3rxHeKo6CAwx98TMHZZAB8WjSn1b/vQ1OJv62afi9XJZeoT3bv3u1q1AM4evQoqampNGvWrMTCEUuXLiU8PLzEsVOnTuXDDz9Er9czcOBAfHx8WLt2LZmZmfTq1Yuff/652ivEult9yQUrQ3JBITyv6HMiISGBhIQEWrRowZgxYzwdVrVILnjpKPoeyvrjL/TBwdjS0wm4ouNFfZ/Y7XbXHKx5mSc4cWApAIqiJbrT7bW6CF5dyQVluLGoFNVux5qahtbLiLMgv+ydnM7C5RmdznL3kZUZhRBCVIeqqqSc2ILTaUdvDMBmyeLkoR/R6rzJTjtIeKvB6PSebdDRGAz4tWtLwdlkjGGhRN87sVINhKL6srOz2b59e6nXT548ycmTJ13PLRZLqX0++OADevXqRWxsLFu2bMFms9GqVSumTZvGI488Ui87TmpSpXLBSpBcUAghRHWcX82u9TG5qgn927Wt+ATnOX36NA888ACPP/441157LebcJNe2Ri1612oDYV0ijYSiUjR6PW2feAx7bm65+xz95FNs2bno/X1p9eAD5e4nKzMKIYSoqrys4+RmxqPVeZ/rLVawFWSheGuxFmSi0Xr+e0XRaGg6agTG0FD82sag8/XxdEgNXr9+/cocdl5Zt956K7feeqsbI3Kv2NhYYmNjcTgcng6lUrlgZUkuKIQQoirOnxPXabVgTc9Ao9VWa65bq9XKE088wenTp3n00Ud59dVXGTp0KN6+TchOPUBQ4041eDd1mzQS1mF1KTEEMAQGYrjA8BKtlxeOAitaLy9MzZrVXmBCCCEatBJVhHofVKcDh70AFRW7NZcm0ePr1Kpzob2qN2F2XaXVamvlOoqisGTJEkaOHFkr16sP6trCJRXlgkIIIURNKFFFCFjSM8DpxGG3k/XHn1WuJnzzzTc5cOAAAJGRkfTu3RsA38AW+Aa2qIlbqDfqTkYtSqlriWFFQq+9FqelAI3Ry9OhCCGEaEBKVBECVks2hX3FCqCiqnaPxZa2fQdeTZrg06J5xTvXUzJ9tRBCCCE85fwqQlt2Nqq9MPcrmtalKtWEy5Yt4/vvvwfAaDTy5ptv4uvrW2Px1zfSSCjcplG/az0dghBCiAbm/CpCu70Ap6Nw5WBFo0PR6Eg5sQWfgBa1vghC5p9/cXLhIhSdjhZ33U5AsRVLqyI3N5fTp0+TmJhIfHw8ERERdS5ZVRSFDh06EBoaWiPn37BhQ42cVwghhBD1W/EqQtVmw56T49pmDAnB6XBUem7CgwcP8sYbb7ie3z9hEKH++a5F8YQ0EgohhBCiDiteRYjqxGbJKlwkCzB4BQKQmxlPXtZxfAOjai+uhAROfPk1qgqqzY75eGKVGglVVWXPnj18++23LF++HLPZTF5eHm+//TYmk4kRI0YwduxYOnfuXGeS1v/97381NhRYo9HUyHmFEEIIUX+VrCL0w5qc7NqmD/BH0evR6HTY8vIqrCbMzs7miSeewGot7Gy+YWgvunUM4kz8r+TnniGi1eBauae6TjIyIYQQQtRJxasINRo9VksWqtOBqjpBUdDqjGi0BpxOOyknttTasFhLSgrxs+biPDfUJahrZ5pcP6TSxx84cIDRo0czevRo5s2bR0FBAd7e3q5HQUEB8+bNc+1TNGeOuPTExsbSoUMHunXr5ulQhBBCiFpXvIrQkZOL03ZumLFej87PD+DcSsc+rmrCsjidTl588UVOnToFQLu2rbn5+pau7f4hMTV8J/WHNBIKIYQQok4qXkXocBSgOmwoigZFo0NvKJYY6rxd1YQ1zZaTw7HPZmHPMwPg16YVkWNvqXS1344dOxg3bhzbtm3DZDLRtGlTgoODMZlMGAwGTCYTwcHBNG3aFJPJxLZt2xg3bhw7duyoydsSddTkyZOJi4tj586dng5FCCGEqFXFqwgVjQZbdrZrmz44CPgn99IYjThtdpJW/lRmp/G8efPYuHEjAAEBAUz91wD059ZlC2x0ea2ORqnrZLixEEIIIeqc8+ciBBWnw4bTXoDBOwiN1uDaV6M1YLPn1/jchA6LhfjP52BJSwfAO7wJURPuQqOrXDp14MAB7r//fs6cOUNERMQFh9gqioKPjw/e3t4kJSVx//338/XXX9OuXTu33EtVLF26FIDu3bvX62sIIYQQov74p4rQhC0zw/W6zs8Pjd5QYt/zqwnPn5swJiYGf39/cnJy+O/U8fh65QKgNwbQuEWfmr+ZekQaCYUQQghR55RY0VhRAAWD0R+n3oRGqy+x7/nVhDXRG6w6HBz/YgHmxJMAGAL9if7XvWi9vSt3vKoybdo0kpKSKmwgLE6j0RAeHs7p06eZNm0aS5curfU5CkeNGtUgriGEEEKI+qF4FaHWy8s1zFjR6dD7+5d5jMZoLHduwl69evHll1+yZdMvtGiUee5VhYjWg0t0PAsZbiyEEEKIOqbEXITFEzdFKdVAWKSm5yY8tfQHsuP2A6D1MhL9r3sxBAZU+vg9e/awZ88egoKCqrxIh0ajISgoiL1797Jnz54qHSuEEEIIUd8Un4tQazDi1bgxGqMRQ3AQlNNZWtHchE2aNKJLWwUozBNDIrpi8ouoyduol6SRULjNX7tOsmvrcf7addLToQghhKjHiqoINRo9qM5KHVPTcxOaolqgaDQoWg1RE+/GOzy8Ssd/++23WCwWTCZT9a5vMlFQUMDChQurdbwQQgghRH1QvIpQYzQChRWExrAwNAbjBY8tPjfhrl27SnQcpyRuxZJfOGWM0RRKWLMeNXcT9Zg0Egq3+Wv3KXZvO8Ffu095OhQhhBD1lKuK0GHDZs3Dkp+Ow15QqWNrspow+KqutLzvXpqPG4tfm9ZVOjY3N5fly5djMpmqPVRYURRMJhPLli0jNze3WufwFIfDwZ9//snu3bvJy8vzdDhCCCGEqMOKVxFWNW8qqiZcs2YNk+6+m2effRaz2YzTYSUn7fC5fTQ0bT0ERaOtifDrPWkkFEIIIUSdUVRFqDodKKigOivdSFjT1YR+MW0I6tK5yselpKSQn5+Pl5dXmdsdDgdnz54lOzub9PT0cs/j5eVFfn4+qampVY6hJuTl5bFkyRKWLFnCiRMnytxn/vz5NGnShM6dO9OtWzcaNWrE008/XSNDwhui2NhYOnToQLdu3TwdihBCCFHjiqoIHfkFOPJycVoKcFqtVXqcysnh8927sGVlsXr1ajZu3IhGayC603gCQtsRFnkNRlOop2+1zpJGwjqsviWGBfk20lPzKMi3eTqUSomKikJRFBRFYerUqRfc96233nLtq6vkKpa1ISEhAUVRiIqK8nQoQghx0YqqCO02M06nHVVVUQGtzrtwZeNKPBRFg8NhvehqwvykJNJ/3+WW+8rPz8fpdJY5F6Hdbuf48eOcOXOGrKysCzYSajQanE4nZrPZLXFdrEWLFjFmzBjGjh1bZk//qlWrmDBhAunp6YX/lqpKfn4+06dP5z//+U/tB1wPTZ48mbi4OHbu3OnpUCrl1IkMvp2zk1MnMire2cMkDxRCiLpHtduxpqShOhzYc/OwJKcUNhYW5FfqkZeTzXu/rcemOlHtDoYPG8aQIUMA0Oq8iGg9hODwLp69yTqu7nzLiVImT57M5MmTyc7OJiCg8pOje4KqqmSm52O12MlMz0dV1VpfffFifPXVV7z11lsYDGWvbDR79my3Xi8hIYHo6GhatGhBQkKCW88thBD1lao6KDCnoDodrte0Om9Up42qNPdptQaslixU1YGiVD3VsWZmEf9/s7BmZmPLzKTRwAEX9Z3m7e3tauArzm63c+LECSwWC6qqYrfbL3ieoobG6s5r6G6//PILAN27dycyMrLU9ieffBIozBGuuOIKoqOjWbt2LTk5OcTGxnLPPfdwxRVX1GrMouaoqsr23xJIOJKG3qBj9PjAepMLSh4ohBB1g0avp8n1g0lcuBgAnY+J6En3oPUuezRGcaqq8tzrr5Pl74eXvx8xMW2Z9vTTpb6L6st3k6dII6Fwi5PHMynIt6EoCgX5Nk4ezyQyKsjTYVXKVVddxe+//84PP/zALbfcUmr7li1bOHDgAN26datzPflNmzZl//796PVlr/YphBD1jd7gh9NUWJEeENaB0KZXVes8Wp0JjabqaY4jP59jn32ONTMbgKy/4wjrey3KRXzOhoWF4e3tTUFBgauBr6iC0Gq1AmA0GvH29sbf37/c8xQUFODt7U1oaN0YIhMXF4eiKFx77bWltu3du5e//voLRVGYMmUKH3zwAQCHDh2ia9eumM1mZs+e7Xpd1H8nj2eSGJ+OwagjMT693uSCkgcKIUTdYc/N5ezPa9Gc67Rpfvu4EnNB5+bmuqZx8fb2JiwsDF9fXwAWLFjAxh070BgM+Pr68sZbr5GW+CuNmvdBb/T1yP3URzLcWFw0VVXZuTkBVVXRaJQSz+uDe+65Byi/l3jWrFkl9qtL9Ho97dq1o1WrVp4ORQghLlry8U3YrDlotHq8/cKJaD0YL59G1XpUJxl02u3Ez5lPwZmzABhDgom+dwKai/wB7uvry4gRIzCbza6KweINhHq9ntatWxMUFERwcHCZ51BVFbPZzKhRo1zJsKcVzY3Ytm3bUtvWrFkDgE6n4/nnn3e9HhMTw5gxY1BVlc2bN9dOoKLGFeV+drsDH18Ddruj3uSCkgcKIUTdcWrpD9jzCqdVCbyiI4GdOqKqKrt37+bJJ5+ke/fuDBgwgOuvv54BAwbQvXt3nnzySRYsWMB7773nOs9LL72E1rqf7LRDHPvzS8zZJz11S/WONBKKi1bUc6zRalAUBY1W4+pBrg86duzIVVddxZo1azh1quTKzLm5uSxcuJBmzZoxePDgMo+Pi4vjhRdeoFevXjRt2hSDwUBISAjXXXcdCxcuLLX/hAkTiI6OBuD48eOuOW6KHkVefPFFFEXhxRdf5MSJE9x7771ERkai1+uZMGECUP5cNA899BCKotCnT58yh68988wzKIpCly5dKCio3IIAQghRk3IzE8g4+wcAiqKlaeuh1aoErC5VVUn89jtyjxwFCoe3tLzvXvR+fm45/9ixYzEajeTk5JRqIGzRokWFlUBmsxkvL68yK508paiRsKzqx02bNgHQo0cPQkJCSmzr3r07AMeOHavhCEVtKcoFvbz0KIqCl5e+3uSCkgdKHiiEqBuy9u0jY09hLqgzedP0phs5cOAAo0ePZvTo0cybN881qsLPz881SmPOnDnce++9HD58mIKCAiZOnMgV7UPJzYgHQNFoMHiX3QkrSpPhxuKiFO85VhRQVVAUXD3IzVrUj/lo7rnnHn7//Xfmzp3LM88843p94cKF5ObmMnXq1DInnAd49913mTVrFu3ataNjx44EBgZy4sQJfv31V9auXcu2bdt49913Xfv37t2b3NxcFi9ejI+PD2PGjLlgbIcPH6Zz584YDAZ69eqFqqoVDjV755132LZtG5s2beLZZ5/ljTfecG1btWoVr7/+Ov7+/ixcuLDc1TaFEKK2OB1WTh9d43reqEUfjKaQCxzhfmdWriJj1x4ANDod0fdOxBgW5rbzd+7cmc6dO7NlyxbXa8UbCC9UceV0OsnIyKBHjx507lz11ZVrStEci1lZWaW2bd261dVIcb6wc/9dc3NzazZAUSuK54LepsLGbr1BS0GBrd7kgpIHCiGEZzny8zm1eKnrecSNI9mzfz/3338/SUlJBAUFERoaWur7xGQykZ9fuCZCXl4eZ86coePlMZxN2ODaJzx6IDp93ZjPuT6QSkJxUc7vOQbqXQ8ywPjx4/H29mbu3LklXp89ezaKolxwiMmdd97J0aNH2b9/P6tWreKbb75hy5YtxMXF0axZM9577z127Njh2n/SpEm8/fbbAISGhjJ37twSj/MtWLCAYcOGkZCQwKJFi1i8eLHr+PIYDAYWLlxIYGAgb775Jj/99BMAJ0+e5M4770RVVT7//HNat259wfMIIURt0GgNNInqj1ZrxDcwiqDGnWr1+qlbtnJ27a9AYUdXizvH4xPVwq3XUBSFN954w1Vp5OPjU6kKQqfTSVJSEuHh4UyfPr1ONbYUVQgeP368xOt79+4lLS0NgJ49e5Y6Lj8/H6DcRSLEP2JjY+nQoQPdunXzdCjlagi5oOSBQgjhWadXrHTNB+3fri1nfUzcf//9nDlzhoiICHx8fMrNgZo0aUJAQADe3t5otVr+/cB9HDlWOLw4IKwDfsEyJUNVSCOhqLbiPcd6g7bENr1BW6/mowkICOCmm27iyJEjbNhQ2Otw8OBBNm/eTN++fWnZsmW5x5a3vW3btjz33HMALFq0qNqxBQcH8/HHH2M0Gqt0XHR0NHPnzkVVVe68807i4+O57bbbSE1NZcqUKXVqyJoQQviHtCG60x2EtxpUqw1hWfv2cWrJ967nTUePIqDj5TVyrXbt2jFz5kwiIiLQaDRYrdZyvyOLesRPnz5NkyZNmDlzZplz/3lSp06dUFWV7777rsTr8+bNA0Cj0dC7d+9Sx504cQKA8PDwmg+ynps8eTJxcXF1bsGMIg0lF5Q8UAghPCcv4ThpW7cDoDUaaHrzaJ566ilXJ2l5ldxFtFotkZGRREdH07hREGeT03hvxgp0Bj8at+hbG7fQoMhw40vUX7tO8tfuUxXuF9LIlyGjLivx2uof9pGWnEt+vo2zp7JRNAoZaWZsVgeqqqI4Cp/r9ZoSq9vZrA6+m/d7peIbNLIDYY3/mQfq+LE0sjPy6di1WdVutAruuecevvrqK2bPnk3fvn1dE1hXZqLq3NxcfvrpJ/bs2UNqaqprrqmkpCSgMNGsruuuu46AgIBqHTtq1CgeffRR3n33XTp37kxWVhZXXXUV77zzTrXjEUKImuKJled0vn5oTSbsuXk06t+X0N693Hr+xMREZs6cyTPPPIO3tzfdu3fn66+/Ztq0aezdu5eMjAxMJhNGoxGr1YrZbMZisbjmIOzRowfTp0+vcw2EUPgds2rVKg4cOMC4ceO4++672bVrF7GxsSiKUu73V1GDV128p0tJTeSC53NnLljTJA8UQgjPMDWPpOmoESSt/InwG65n3/EE9uzZQ1BQUIUNhMXptAoWqxl/P2/2HzpJcm44bXQyaqGqpJHwEmW1OsjLtVa4n4+frdRrBfk2cnMsZKbn43SqaACniquXWFVVnA4VjZemxNyEqqpW6poATkfJHmeH3YnV6qjUsdXVv39/oqOjWbRoEe+//z7z58/H39+/wrlili9fzsSJE11Dq8qSnZ1d7bjOn4y6qqZPn86qVauIi4vDx8eHhQsXyhAvIYTHqU4HuZnH8Qsuv0KnNvi0aE6bh6eQtnUb4Tdc79ZznzhxggceeIDk5GRSUlL44IMP8PLyol27dixdupQ9e/awcOFCli1bhtlspqCgAI1Gg8lkYuzYsdxyyy107ty5Tg0xLm7ixIm8++67HD58mIULF7oWaVBVFa1W66qiKs5sNvPLL7+gKIprARPhGTWRC57PnblgTZM8UAghPEPRaAjr24eATpejDwjgraeewmKxlDv/qtPp5OzZs4SFhaHTFTVpqVgthZ+13l4GcvNsLF+5gV7Xuje3uxTIcONLlMGgxcfXUOHDy7v0XEle3nq0Og12mwOtToNWp0GjVUrMQ6PRKmg0JeejURSlUtf08TWg0Zb8QaTVaTCcN4zF3RRFYcKECZjNZu6++27OnDnDbbfdhre3d7nHnDp1irFjx5KWlsYTTzzBH3/8QVZWFg5HYVXl6tWrAS5qmM2Frl8Z27dv59ChQwDk5eXx119/XdT5hBDCHVJObuXkoeWcOvwTDrtnV9c0hoYQMeIGlCr0Vlfk+PHj3HfffSQnJwOQmZnpmosPcK0s+sYbb7Bjxw5+/vlnnn/+eX7++Wd27NjB66+/TpcuXepsAyEUznu2evVqOnfujKqqrofJZOLTTz/lmmuuKXXMN998g9lcWHE2YMCA2g5ZFFMTuWCphxtzwZomeaAQQniWISiIPLOZ5cuXYzKZys2Bkk6fIj09lWPHjrpyK4fdiuosXM1do9Xj6xfAsmXLZJG0apBKwktUx67Nqj10d/DIDixdsJfMdDN+/l6uP9701LzCCkKtQnCoD1CYFBWtbjd6/JWM/9fV1bpmi5a1s8rlhAkTeOmll1i+fDlQ8RCT5cuXk5+fz+jRo5k+fXqp7YcPH66ROCsrNTWV2267DbvdzsSJE5k7dy4TJkxgz549tGjh3kn5hRCisvKyT5J2ehcAOelHCIm4Cq2udlbYdFqtpG3fSWivnm5tFCwuPj6e+++/n/T0dADatGnDJ598QlBQUJn7+/r6EhUVRWRkJFFRURUuZlKXREVFsWvXLnbt2sWRI0fw8fGhV69e5d6rl5cXL7zwAoqilNmIKGpPTeSCZXFXLlgbJA8UQoja4bTZKEg6g6l5ZInXU1JSyM/PL7eDJCUlhcysTFBVHA6baziyVmdEIQCbNQeD0R+HaiU/P5/U1FR8fWt/Opv6TBoJRZWVtYpdec5f3S4yquwfDXVF8+bNGTVqFBs3bqRNmzZcffWFE9miH4BlJVqqqrJgwYIyjysa5mG32y8y4vIVTVR98uRJ7rrrLmbPnk1ISAhvv/02Y8eO5bfffqtXP0SFEA2Dw17A6SOrXc/DInvi5RNWK9dWnU6Of/U1WX/tIy8+nubjxqJx8+fgsWPHeOCBB1zfDzExMXzyyScEBgZe8Lij6cfZYN1N2/TLaNe4/q042rVrV7p27VrhfuPHj6+FaERNa6i5oOSBQghRO5LX/srZn38h9NreNBk6BO25xZny8/NxOp1lzkWYnp5OSkoKnKvODgv2Ra/75ztIozNi1BkABY3GjtPpdI1eEJUnw41FlVxoFbvy1KfV7QCWLFlCamoqW7durXDf9u3bA4Wr1hVNTg3gcDh4/vnn2bJlS5nHhYWFYTAYOHPmjCvBdLfXX3+dVatW0aFDBz755BPXaz179mT79u088cQTNXJdIYQoj6qqnIlfh91aOPTD5N+M4PAutXbtU98vI+uvfQDk7D+A1c2fv0eOHClRQdiuXTs+/fTTChsIVVVl1dENnHWms+rohnrxXSkuXQ09F5Q8UAghalb+6dOc/WUdqgqpv23BWmxOV29vbzQaDU6ns8QxmZmZnD17FlUtXKcgJNgPH5MBm/X84cSFjYZFDY0mk6lG76UhkkZCUSVV6Tkucn4PckMyYsQIunbtysmTJ4mJiWH48OGMHTuWVq1aMX36dJ588skyj9Pr9YwcORKHw8GVV17J+PHjmTRpEpMmTXJLXBs3buT555/HZDLx3Xff4eNTOPxbp9PxzTffEBwczPvvv88PP/zglusJIURlZKceIDutcPidVmskotUQFKV2UpGU9RtI3VT4g13RaIiacBdejRu77fyHDh3i/vvvJyMjA8D1w9zf37/CYw+nxXMo9Sg6tBxKPcrhtHi3xSWEu0ku+A/JA4UQompUh4PEbxehnmsEbDSwH94REa7tYWFheHt7U1Dwz3zV2dnZJCUlFXYyqSpBgT4EBvgACk57AU5H6QWxCgoK8Pb2LnfxE1E+aSQUlVadnuMi9akHuSp0Oh3r16/n6aefpmnTpqxdu5b169fTuXNntm7dytChQ8s9dubMmdx///0oisKiRYuYNWsWs2bNuuiYUlJSGDduHA6Hg9jYWDp06FBie/PmzZk7dy6KojBx4kQSEhIu+ppCCFERa0EmZ+J/dT1v0nIgemPtzBGTsWcvp5evdD2PHDsGv7Yxbr3GF198QVZWFgCXX345sbGxlWogVFWVVYc3YHPa8cKAzWln1WGpJhSFir7Hu3Xr5ulQAMkFzyd5oBBCVE3Kxt8wJ54EwKtxIxpfN7DEdl9fX0aMGIHZbEZVVXJzczl16hQAquogwN+boEBfQEVVnaiqE0t+umsIcuF+KmazmVGjRsl8hNWgqA3lW7oBy87OJiAggKysrAp/cJjNZvbv30/79u3dXlqbmJDBki93o9NpMBhLT2dZ1sIlxVktdux2Jzfd0aVOz0cjxKWg6LPi77//Ji8vj0mTJrnmSKpPbDYbK1euZNiwYTK3Uh2mqk6O71tEfm7hcLyAsA5EtBpUK9fOPXqMYzP/D6e9cHhKk6GDaTL4Ordfx2q18uijj2I2m/noo49clTsVOZR6jPe3zkKv0WHJK8Do44XNaec/Pe8lJrSl2+KrSi5xvqCgIDQaDV9++SXXX3+922Kq7WvUZ/UlF6yI5IJC1A1FnxMJCQkkJCTQokULxowZ4+mwqkVywfrDkpLCwbfew2m3oyjQ+qEH8YmKKrXf7t27GT16NHq9njNnzqCqKqqq4uujIyzEH0VRUJ2FDYQAKOBlCkOjLfwtk5eXR35+PkuWLKFLl9qZ1sYdavq9XNlcQhYuEZVS1HNstdjRG4zYbY5S+zidhX+8Tidlblc0ClaLnZ2bE2jWIrDSQ1SEEELUb+mnd7saCPXGAJpE9a2V6xacOUP87LmuBsKQHt1pPGhgBUdVj8Fg4J133sHhcFS6YcZVReiw4as3YaEAo9aA2ZbPqsMbaBMSXSe+K7OyslAUBZvNVq+vIS5OZXLBikguKIQQlybV6SRx4SKc5xZsCu3Tu8wGQoDOnTvTuXNntm3bRlBQEOnp6fj6GAgNNhU2EKrqPw2EgIKCzZqL0TsYp9NJRkYGPXr0oHPnzrVxaw2ODDeuw+rSEBOnQyU7Mx+DUYfN6sBaxkM910ioOtUyt9usDgxGHTlZ+TgdUsAqhBCXioBGHfANigYUmrYe4urprUm2rCyOfTYLR37hnDb+7drS7ObRbmuU2LdvH2fPni3xmtForFLl1uG0eA6kHsHHYHLFpSgKPgYTB1KPyNyEok6pTC5Y0UNyQSGEuDSlbdtO7tHCvMYQHEST64eUu6+iKLzxxhuEh4djt9tp0jiM0GCfwnmsVVyLlwAoGi0oGpwOC3ZbAUlJSYSHhzN9+nTpiKomqSSswyZPnszkyZNdZaGepNVpGHNXVwrM5ffwr/juD/LNNrxNeobfckW5+3mb9Gh10j4thBCXCp3eRLOYERTkJePt677FQiqi9TFBZhamZk1pcfcdKNqqzaFWnj///JMpU6YQHBzMZ599RqNGjap8juJVhH6GkkOTjVoDeVZznaomBFi3bh2ZmZmeDkN4SGVywcqSXFAIIS4d1sxMkpb/6HoeeesYtEZjmfsWrUrcrl0719ytJxPj8TPp8Pb2wokDzvUxKYoGRdGgqk7yzRaSU0/SLDKamTNn0rZt29q4tQZJGglFpfn6GfH1K/uPGaBrzxZYrQ4MBi2hjWWCUCGEEP9QFKVWGwj1AQG0nvxvTn2/jPBh15ebjFbVnj17mDp1KmazGbPZzGeffcazzz5b5fMUryLMtZqxOqyu4R3nVxO6c27Ci/HRRx95OgThYRXlgkIIIcT5VJsNY6NGmBNPEnJ1N/xi2pS5X1JSElOnTmXatGl06dKF7t278/nMt/nvYw9x4HAS2bmZGA06jAYdilYDOLFYCiiw2DDotXRsH8Hb775Nl27da/cGGxhpJBRu07FrM0+HIIQQoo4wZ5/C4B2ETu/ehROqQuvlRfPbbnXb+Xbv3s3UqVPJz88HoEePHvz3v/+t8nmKVxHqNTqyLTmoqKBCACoKSp2rJpR17oQQQghRHcawMNo8PJnULdsI6lr2QiJpaWk8+OCDJCYmMmXKFD788EO6du1KsNdZ3n35Vg4eTWfVLzvZsPUwFosdFQWNRoPRqOf66zozZMAVtG7hR4DxLKqqejxvqs+kkVAIIYQQbmWz5JB4cDkajZbwVoPwDYyqleum/76LgI6Xu61qsLidO3fyn//8B4vFAsA111zD22+/Xa1VwYuqCL10RjIKsopGzaBDi0LZcxN6sppwzpw5tXat+rQKoRBCCCEqR9FqCevTq8xt2dnZrgZCgCZNmtCyZUvyso6TmxmPTu9N25aBxEzqz6Tbe5GTr8Hm0OLtpScwwBcfU2He57BbyM2MJy/reK3lng2RNBIKIYQQwm1U1cnpo6txOiw4HZCVst+tiVrOocOcXLSEZmNuKjFcJW3bdhIXLsa76UZaTroHvRvn8t2+fTuPPPIIVqsVgN69e/Pmm29Wq4GweBWh2WHHea5Cz0vnhc5Zste7rlQT3n333R65rhBCCCHqL9XhqHA+aLPZzJQpUzh69ChQ2EA4Y8YMgoKCSPhrFU6nHb3eB4O3AZslG18/I8GhAVBGTqTRGrDZ80k5sQWfgBZSTVhNMmOwEEIIIdwmPWk35uxTAOgMvjSJ7u+2c6uqyukVP5L119+cXvGjawhs9v4DnFy0BID8U0lkxx1w2zW3bt1aooHw2muvrXYDIfxTRWhXHdjPrc6n12gJ8irdqCkrHQshhBCiPsr6ex8H33kP84kT5e5jsVj4z3/+Q1xcHAAhISHMmDGDxo0bu6oItTpvFEVBUTQYjAHojWU3EEJh3qTVebuqCUX1SCOhEEIIIdyiIC+ZlMStrudNWw9Bq/Ny2/lzDh4iJ24/Wm9vcuL2k3PwEObEkxyf/yWqs7DBMKxvH0J6Xu2W6x07dozHHnvM1UDYv39/pk+fXu0GwqIqwjyrGau9cIVYjaIQYgpCU07Ca9QasDlsrDq8QeYFFEIIIUSdZzfnc3LREgrOJHP4w1jMJ0+W2sdms/HEE0+we/duAPz9/fnkk0+IjIxEVVVSTmzB6bSj0RbLuRSlwupAjdaA02kn5cQWyZuqSRoJhRBCCHHRnA4bpw7/hKo6AQiJuAqTv/sWtFJVlaSVP+G02dEFBOC02Tm19AeO/d8sHJbCRrzAKzoSMeIGt10zOjqam2++GYCBAwfy+uuvo9frq32+oipCnUZb2CsOBHsHotOUP/uLVBMKIYQQoj5JWr4CW3YOAH5tY/Bu2rTEdqfTyXPPPcfmzZsBMJlMfPzxx7Rq1QqAvKzj5GQcQ3U64FxeWVlSTXjxZE5CIYQQQly0s8d/w1qQCYCXTxhhzXq49fyuKkIfn8IE0NuLzN17MISFofXywic6iubjb0PRuK//U1EUHn30UWJiYrj++uvR6aqfNhWfizDUFEyeLR9VVfHSVbzISl2Zm1DUvtjYWGJjY3E4HJ4ORQghhKhQzqHDpG3fCYDWaKDZLTeXylv++OMP1q1bB4DBYOD999+nQ4cOQGG+lHxiM3ZrHgAF5lT0Rn80F+hQPZ+iaLA7rDI3YTVJJaEQQgghLkpO+jEyk/8CQNFoiWh9PYrmwhNVV0XxKkKN0Qiqii0nF6fDgS0rC2NYKNH3TkBzEVV+RfLy8ko8VxSFESNGXFQDIfxTRehjMKEoCr4GE35Gn0odK9WEl67JkycTFxfHzp07PR2KEEIIcUEOi4XEhYtcz8NH3IAhMLDUfp07d+a1117D29ubt99+my5duri2qaqDvKxEQHU9VKf93IJ4lX9otQaslixUVTrZqkoqCYXbpCXtxumwotEaCAnvUvEBQggh6j2HvYCkYz+7njdu0Rejd5Bbr1GyihCsaWmoNhuKRoPTYqHRgP7oTKaLvs4vv/zCa6+9xocffsjll1/uhsgLOZwOlh/4GYvdgpfOiNVhK7WPqqo4VAdWh63MHm+NosFit0g1oRBCCCHqpDM/rcaangGAb6toQnqUP0f0ddddR9euXQkKKpkzmrNPotUa0JhCAYWmbYbi5RNWrXi0OlOVKhBFIfkvJtwmPWkPdmsuOoOvNBIKIcQlQqM10rjFtZyJ/xVTQDMCG7mvcQ1KVhHq/fwLO5U510Cm0aA1GEjdvJng7lddVMPZmjVrePbZZ3E6nUyZMoUvvviCyMhIt9zD2qOb+PPsAQxaPQV2S9k7qSo2HFjslnJX7TPqjKTnZ+BwOtBpJYUTQgghRN2Ql5BA6m+bANDodETeOqbEFDAJCQlERUWVOOb8BkKbJZfTR1ajaLQoaGnUog+BjS6r8dhFSTLcWLiNw5ZPgTkVhy3f06FUKCoq6txS6gqLFi0qd7/rrrsORVGYO3du7QVXBymVWEnqQg4fPswTTzxB9+7dCQsLQ6/XExAQQIcOHbjjjjtYsGAB+fl1633z4osvoigKL774YonX586di6IoTJgwwSNxCVHXKIpCQFh7ojvdTnjL69xe4Xb+XIT23FwUvQ6NXo9XaAg6f3/XSsfVtWrVKlcDIRQuUtL0vEm2qysu+TDr4rcQ5hOCv5cfU7rfzTN9Hyr1eLL3gwwwXMWTvR8sc3vR45FrJkkDoaiz8rJOcGTPXPKyTng6lApJLlg1kgv+Q3JBIUpy2mwkfvMdRYsJN7l+MMawf6r/lixZwi233MKSJUvKPYeqOjl15Ccc9gIAfIOiCW7SuUbjFmWTRkLhFqqqYslPx2m3YMlPr1fLjT/zzDPY7fZauVa/fv1QFIX169fXyvU8zW6389hjj9GuXTveeustDh48yBVXXMEtt9zCgAEDMBgMLFiwgNtvv52oqCiOHDni6ZDrDUlQRV1j8ApAp7/4Ib/FlZqL8BwF0JpMaLy80RiNOG12klb+VK3vnpUrV/L888+7GghvuukmnnnmGTRuWAAlOS+Nb/9eDoBOo2VE2+vo0DiGpv5NSj0i/BoToPElwq9xmduLHgFe/hcdlxA1QVVVko9vIjfjKMnHN0kuWA7JBSUXdBfJBUVdkRefgCU1DQBTZDPCru3j2vbTTz/x+uuvo6oqr732Gn/++WeZ50hJ3Ep+zmkA9AY/IloNlqlVPES6ooVb5GUdx2HPR1EUHPZ88rKO4xsY5emwKmQymTh06BCff/45DzzwgKfDaXDuuOMOvv32W/z9/Xnvvfe46667Sk3+f/bsWWbOnMk777xDamoqrVu39lC0lTN69Gh69OhBQECAp0MRwmOcTjvm7JM1/jnvqiI0eYPqBEWLzs+vxD6KoqD18XFVE/q3a1vp8y9fvpyXX37Z1ZgxZswYnnjiCbc0EBbYCpi/d1Hh8GGgY+N29I1y74rPQtQleVnHyc2MR6M1kpsZL7mgACQXFOJS4BfThphHHubk4qU0u+UmFG3h4nUbNmzghRdecOVZd911Fx07dix1fG5mAmmnfz/3TCGizVC0Oq/aCl+cRyoJxUVTVZWUE1sorC/WwLnn9aEHeerUqQC8/PLLmM1mD0fTsMyaNYtvv/0Wg8HA2rVrueeee8pcHbRx48Y8//zz7Nu3jxYtWngg0qoJCAigXbt2hIeHezoUITwmJXELiQd+IOnYLzgd1hq5xj9VhDbsZjOWM2dxWsqez6861YQ//PBDiQbCW2+9lSeffNItDYRO1cm3f68gNS8dgCa+YYy57AbpERcNVlEu6HTa0Rl8cTrtkgsKyQWFuIR4N42g9UMP4n3u72LHjh1MmzbNNVLj5ptv5qGHHiozFzJnnXT9/0bNe2Hyi6idoEWZpJFQXLSinmM02sI/eo3W1YNc1w0bNoy+ffuSlJTEe++9V+njcnJy+L//+z9uuukm2rRpg4+PDz4+PnTs2JFnnnmGzMzMEvuvX78eRVHYsGEDAP3793fN7VJ8npui/fr161futcubE6b463PmzKFnz54EBASgKAoJCQkAHD9+nOnTpzNgwACaN2+O0WgkMDCQ3r17M3PmTNeH+MVSVZVXX30VgMmTJ3PVVVdVeEyzZs1KJVvF54I5ceIE9957L5GRkej1+hJDK5YsWcKkSZO4/PLLCQoKwsvLi+joaO655x4OHjxY7jXz8/N58cUXadOmDUajkfDwcO6++25OnCh/LqWKhnacPn2aRx99lPbt22MymfDz86Nbt258/PHHZQ5lmjBhgus9EB8fz5133kmTJk0wGo20atWKZ599Fst5DSNRUVFMnDgRgHnz5pV4L13ovSOEO+RlnSA9aQ8AWSn7sVqya+Q6RVWEaDQ48wtQnU6saamFFYXnOb+asCJLlizhlVdecTVgjBs3jv/+979ua8Rbe3Qz+1MOA+Ct9+KuK8dg1BkueMzhxEx+3lf4v0LUN0W5oFbnXfj3qPOWXFByQckFJRcUl5iiz58///yTRx99FJvNBhR+zj755JPl5lmNWvQmovUQ/ENiCJYFUD1OhhuLi1K857hwligVUFw9yD4BLep85cT06dPp0aMHb775Jg888AAhISEVHvPHH39w3333ERYWRtu2benatSsZGRns2rWL1157jYULF7Jt2zbXuZo0acLdd9/NqlWrOHv2LEOGDKFJkyau87lzWMVDDz3EJ598wjXXXMMNN9zAsWPHXP8GX3zxBc899xzR0dHExMTQq1cvkpKS2Lp1K5s3b2bNmjUsWrToov/N/vzzT1cyeuedd17sLXH48GE6d+6MwWCgV69eqKpKaGioa/utt96K0WikQ4cODBgwALvdzt9//82cOXNYuHAha9as4ZprrilxTrPZzMCBA9m2bRs+Pj4MHjwYb29vVq9ezY8//sgNN9xQ5Tg3btzIjTfeSEZGBlFRUQwaNAiLxcKOHTt46KGHWL58OStWrECv15c6du/evUydOpWgoCD69u1Leno6mzdv5n//+x/79u1j6dKlrn3HjBnDtm3b2Lx5M61ataJ3796ube3ataty3EJUlsOWz+kja1zPGzXvjZcp9AJHVE9RFaGjwILDYnF9JumDgkApu39TYzRiy8sjaeVP+LWNueDnmPbcMBgoHAo3depUt31X7Us+xNpjhav7KYrCuI6jCDYFXvAYVVX5cXMCpzPhx80JtI8OrfPfnWVJSkpi7dq1xMXFkZ6ejs1mY9asWZ4OS9Sw4rmgXu8DgEZrwGbPl1xQckFAckHJBUVDZElJIWPPHzQa0A9NsQrhgwcP8vDDD1NQULgASd++fXnhhRcqHKkRENqOgFB579YFlWokHDBgQE3H4fLqq6+W+gCvz44cOcLbb7/Njh07+Ouvv2jatKnrC7MhKN5z7HTaQaVUD3Jdn4/m6quv5qabbmLJkiX873//4913363wmKioKH755Rf69+9f4gPPbDbz73//m/nz5/P8888TGxsLFH5Zz507l379+nH27FmmTZtWY7188+fPZ/PmzfToUXruqyFDhnDjjTdy+eWXl3j99OnTDBs2jCVLlrBo0SJuueWWi4ph165dABgMBjp16nRR5wJYsGABd9xxB59//jnGYosXFPnqq68YPnw4Pj4+rtdUVWXGjBlMnjyZ++67j7/++qtEwvvCCy+wbds22rVrx9q1a4mIKCxrN5vNjB8/nvnz51cpxjNnznDTTTeRmZnJJ598wv333+96b6SlpXHrrbeyZs0aXn/9dZ5//vlSx3/wwQc888wzvPTSS64GjL///psePXrw/fffs3XrVnr27AnA22+/zdy5c9m8eTO9e/e+5FdcFLVDVVWSjq3FbssDwCcgkqAmV9TItXIOHiJ7XxxOq9X1d6vz9UXrXf7CKFWZm3DUqFE4HA5OnTrFlClT3NaAkW8r4Lu/V7ieD23dj5jQlhUed/B4BvsT0jFoYX9COgePZ9AuKtgtMdWGtLQ0HnnkEb755hscDgdQ+H5RFKVUI+EDDzzA7NmziYyM5OjRo54IV7jZ+VWEILmg5IKSC0ouKBoy1enkxDffkRefQNYffxJ1z90YQ0KIj49n8uTJ5ObmAtC9e3def/31Ep2z/5zDgaIp/brwvEo1EhaVvdekomQyNTW1Rq9T2/bt28eKFSvo3r07qqqSkZHh6ZDcpqye4yL1rQf5tddeY9myZXzyySdMnTq1wvlQmjVrRrNmzUq9bjKZmDFjBgsWLOC7775zJYa16fHHHy8zKQTo1q1bma9HRETw5ptvMmTIEL777ruLTgyL/o6Dg4PL/FKwWCzcf//9pV7v3bs3kyZNKvV6cHAwH3/8cZlJIcDYsWNLvaYoCg8++CBffvklW7duZf/+/XTo0AEoHFoyc+ZMAN577z1XUgiF/4affvopq1evdvWAVcb7779PWloaU6ZM4d///neJbSEhIcyfP5/o6Gg+/vhjnnvuuVJ/E127duWVV14p8frll1/OnXfeyaeffsovv/ziSgyF8ISslH3kZBQ26mh1XoS3GoxSTlXfxSiqIrTn5KCe63jSGPToAyueIL4q1YQ33XSTO8MGCocWj+s0iq///IGY0JZcG3V1hceoqsqKzcew2Z146cFmd7Ji8zHatgiq89+dAPHx8fTt25dTp05Vav65f//733z22WckJCSwceNGrr322lqIUtQUyQUlFyyP5IKSC4qGK23rNvLiEwBwWq3ozy0ql5ub6xpS36lTJ9555x0MhtLTrdgsuRyP+45GkdfgH1r5BecauqPpx9lg3U3b9Mto19hzCzhVabhxfZh8uK4ZMWIEo0aNAgp7z1etWuXhiAqlJe12zSl1IV4+YUS2HVnitcSDyyjIS8Fuyyc/5xQoGpz5aYWT16sqqA4s+WloNPoSPchOh5Wjf3xRqfiaxQzH27ex63lOxjGsBZmE1NAcBW3btuWee+7hs88+47nnnqt0z+GWLVv47bffOHHiBGaz2fU3YjAYSElJISMjg6CgoBqJuTxjxoy54HaLxcKaNWvYuXMnycnJWCwWVFUlJycH4ILztriLzWZj3rx5ZW4rKzG87rrrKlxB7siRI6xatYojR46Qk5PjqmY5e/YsUHhfRYnh7t27ycnJITQ0lKFDh5Y6V5MmTRg8eDDLli2r9D39+OOPQNlJKkDTpk1p06YNcXFxHD58mJiYmBLbhw8fXuYPqPbt2wNw6tSpSscihLtZ8zM4m7DR9Ty85XXoDb41cq2cg4fI3L0X1amiaDQoGgVDSAiFU1pcWHnVhF9++WW5f+/u1ja0FQ/1mIifwadSjSIHj2cQF5+Oj5cOS4EVHy8dcfH1o5rQ4XAwcuRITp4snHB8/Pjx3H333Rw+fJgpU6aUecwVV1xBTEwMhw8fZvXq1dJI6EE1kQuez525YE2TXFByweIkFxSibNb0DJJWrHQ9b3brGDTnGgI7duzIzJkz+eijj3j99dfx9vYudbyqOjl9ZBU2SzanjqzC4bAQ1Pjiq43rO1VVWXV0A2ed6aw6uoG2jVp5rHOtSo2E33//PSNHjqx4x2pwx2qCdVFdvS+nw4rdmlvhfo4yfgQ6bPnYLDlY89NRVSeKCijqudWNKfxfpwNFaywxN6GqUqlrFp7CUfK5015jK2gWefHFF/nyyy/56quvePzxxy84NCI5OZmbb76ZTZs2XfCc2dnZtZ4YRkVFlbtt27ZtjB079oKTMWdnX/wiBEVzxGRkZOBwOEr1IPv6+pbodHj11Vd57rnnyj3fhe7J4XAwZcoUZs6cecGOjOL3VfSD9kLnjY6OLndbWY4dOwZAnz59Ktw3JSWlVGLYvHnzMvf19/cHqFJPthDupDodnDqyCqezcPLpwEaX4xfcqmaupaokLvwOe14eilaL6nSiDwhEdThRK/kdoGg02AssrmrCefPm8fHHH6PRaNBoNAwePLhGYi8uxFS5z/3iVYR+3gYsBWA0aMnLsdaLasL58+ezb98+FEXh/fff56GHHgKocIXY/v37c+jQIbZv314bYYpy1EgueB535oK1QXLBQpILFpJcUIiSVFXl5KIlOCyFOVlIz6vxa1Oy4q1t27Z8/PHH5Z4jJXEb5pzCBm+dwRf/4DY1F3A9cjgtnkOpR9Gh5VDqUQ6nxVdqypqacEkuXHLw4EHWrFnDrl272LVrF/v378fhcPDKK6/w7LPPVnh80dCBP/74A6vVSuvWrbn99tt55JFHypyEti7SaA3oKlEFotWXbv3X6r1RNDqcThuKRvfPcDPVwbmxYYUrHWu0aBSDqwfZ5BdRqWsCKErJRELR6NBoL7wy5MUKDw9n6tSpvP766zz11FOu3sCyTJo0iU2bNtGzZ09eeuklrrjiCoKCglz//hERESQlJbm9+rYyK86V1WMDhT/abrzxRs6ePcvEiRP597//TevWrfH390er1XLo0CHatm3rlpi7dCms+LRYLPz1119ceeWVF3W+8u4JCudv+fTTT2nSpAnvvvsu11xzDY0bN8bLywsorGz5+uuva7wSuujfZsyYMSXmwylLWROi19UOBSHSTv9OQV4yAAavQBq3qLnKL7vZTM7BQygaDagqWi8vUFWcBflVOo/Wy4g1NY1Zn3/Op+eGkzmdTtePQnf6++wBMguy6dW8W5Ub9IpXERafy62+VBMuXrwYKBweWNRAWBlFc6EdOlTxStSi5tRILnged+aCtUFyQckFL4bkgqKhy9i1m+wDhZXGhkB/Avr/P3vnHV9Flf7hZ+b2e9MLkJDQewcRWUBEEAREEEGsKFZUQNa2sP5sq+6uupZ1FWwooChILxKaSFEpUkJvARKSkISQ3m6f+f1xyYWQThKSwHn85IMzc86cM/cmM++8532/7y0sWLCAe++9t0I2UF7WGdKTdl3YkmjceliJz5jrDVVVWRuzBafiwogep+JibcwWWgc3r5XF4uvSSfj555/zySefXFHfv/71r3zyySdotVoGDhyIj48Pv/76K9OmTWPVqlWsX7++zIdYXSE4rMcVp+5GtLmTuIPzcVjT0Rn8vb+4toI0UNwga7zVLlVV9erRNOt8P617PH5FY/oGXh0v+rRp0/jqq6+Iiopi69atJbbJz88nKioKWZaJiooiICCg2PGUlJQrGr9Qs6Ew3eNyzpw5c0XnBU+1tXPnztGjRw++/fbbYsdjYmKu+NyX07VrV5o2bcqZM2eYN29elQ3Dsli4cCEAX375ZYmRziVdV+PGjQHKLCJU2QJDkZGRxMTEMG3aNHr27FmpvgJBXcY/tCMFOWfJz0kkvNVQZE3NLYbpLBZaPPk4Z5evxBTemMj77kGSr8w4mrN4Cd9ecBACTJ48mQkTJlTTTD2k5J1n4aGfcbidnM05x9iOw9FUUIT70ihCP3PRz9So15Bvc9X5aMLo6GgkSaq0vmNhhFF6evH0VMHVoyZswZKoLlvwaiFswepB2ILCFhRcWzhzc0lafjH9PnjEcF78+9+Jjo7m9OnTTJ8+vUxHt9ORR9LJdd7tBk36YPYNL7X99URMeizH0k5i1pmwO2yYdUaOpZ2stWjCCi1XPPfcczz33HO0bFkz6UVXa4xCOnXqxEsvvcQPP/zA0aNHGT9+fIX6LV++nE8++QQfHx927tzJunXrWLJkCTExMXTu3Jnff/+9zDD5a4WSqtiVxuXV7eo6/v7+vPLKKwD87W9/K7FNdnY2brcbPz+/YkYheLSvSlupLDT8CgVdL6fQYDl9+jQOR/HUurJWtMsjIyMDKD2VYd68eVd87suRJMn7OX722WdER5eveXSlFF5XSQLjhw8fZt++fcX233DDDfj4+JCWlsb69euLHT937lyJ+8ti2LBhwEVDtaYp73dJIKgudAYfItvfRbOO466KPliDAbfQbtrLtHz6SSxNIjFHRFTqx9S4Md/9/DPfzruoe/bcc89Vu4OwwGnlu31LcLg9adgqKnIlCrlcGkWYb3ORmWv3Hrs8mrCuUujkK6l4Q1kUvkRUJCJKUDcRtqCwBctD2II1j7AFBVeTs0uX4yrwZHZYOnfkndmzvX/XGzdu9Gp/loSqKiTFrMXt8vT3CWhGUA3VGqhveKMI3U4MFzInDRo9TreTtTFbaqUuSIWs2f/+9798/PHHdOzYscYmcjXGKOSJJ57gP//5Dw888ADt2rWrcGj3v/71LwCmT5/uDaEHz4r4zJkzAc9DMDs7u/onXUe4tIpdRdN/ZY3eq0dTH4rfTJo0iSZNmrBz5062b99e7HjDhg0JDAwkKyuL778vKr69Y8cO/v73v5d67sIXqcOHD5d4vGnTprRu3ZqsrCzee++9Isc2b97M66+/XtnL8VIoerxx40aOHDlS5NhXX33FTz/9dMXnLoknn3ySsWPHYrfbufXWW5kzZ06JRkxubi4HDhy44nEKr2vGjBlFXjiTk5N5+OGHSxzTZDLx1FNPAfD888+TnJzsPWa1WnnmmWewWiuX3vjyyy8TEBDARx99xIcffliiYR8bG1ttBnjh79Ll36VAUBNIkozJt9FVG88U1gidn2+x/Xl5ecTGxnLkyBFiY2PJyyuqbaaqKp9//jmzZs3y7nvhhRd4+OGHq3V+iqqw4MBKMgo8Drxw34bc3WFYhSP+Lo0idLsVMnPsKPlOgvMlNA6PDptRr/FWOq6rz87CdLryNAgvp/CeGxRUd1Op6wozZsygQ4cOpVakrQ2ELShswYoibEFhCwquDXKOHCVr/0EAJJORL/dHe++NZrOZzz77jLCwsFL7pyUW1SEMazmkVJmK643CKEKL3lxUekZv9kYTXm3EN1NBzp49y65dnvz5Bx54oNjxfv36ERkZid1uJyoqqtjxa4XKrBwXUt9WkA0GA2+99RZQ8ouPRqPxGmgPP/wwvXv35oEHHqBfv3706dOHESNGlLiSCTBmzBjAszJ955138vjjj/PEE0+wbds2b5t3330XSZJ4/fXX6d69O+PGjaNnz54MHDiwUppPl9O9e3dGjRpFbm4u3bt35/bbb+f++++nffv2PP30097V3upCkiR+/PFHpk6dSm5uLo8++ighISEMHjyYBx98kPvuu49+/foRGhrKokWLaNCgASNGjKj0OK+88gp6vZ6vv/6atm3bcu+99zJs2DBatmyJ3W5n9OjRJfZ766236NWrF0eOHKFNmzaMHDmScePG0aJFC7Zu3Vppp0JERAQrVqwgMDCQl156icjISAYNGsRDDz3EnXfeSatWrWjRokWZQr6VoXfv3oSHhxMdHU2PHj145JFHvAsgAkFVKcg5i9t1dQTSHVnZZO0v/eVQVVX27t3LtGnT6NWrFwMHDmTYsGEMHDiQXr16MW3aNPbu3YuiKHz66adFUuheeumlEp/ZVWX9ya2cSPcI1Fv0JsZ3G4O+EmnYhVGEGlkiM9cBKoQoYFEljHlOUNV6EU0YGRkJUOmX+99++w2A1q2FWHl5TJo0iSNHjnht0LqAsAWFLVhRhC0obEHBtYFP61Y0vG0gqgQ/pqWy+ULBJoPBwCeffOJ11JdEflY8aWcv0SFsNRSt0CEESo4iLKQ2owmFk7CCFIbSBgUFlVrpqlB7oqrh9Ha7nZycnCI/AE6ns0I/NcWVrBwXUt9WkMePH0/nzp1LPf7Xv/6V5cuX06dPH44fP86qVauw2+3MmDGDuXPnltrvjjvu4Ouvv6ZTp078+uuvfPvtt3zzzTdFxNvvvvtufv75Z/r27cuJEyeIiopCp9OxYMEC/vGPf1TpuhYtWsR//vMf2rZty++//8769etp0qQJ69at44knnqjSuUtCp9Px3//+lyNHjvDSSy/RqlUr9u7dy8KFC4mKiiItLY3Ro0czb948YmNjGTt2bKXHuOmmm9i9ezcjR44kPz+flStXcurUKaZMmcL27du9FeEux2KxsGnTJl577TUaNmzIunXr2Lp1K4MGDWL37t2VrmgH0L9/fw4fPsxrr71GREQEu3btYtGiRezbt4+GDRvyxhtv8PXXX1f6vCWh1+tZt24dI0eOJDExkXnz5vHNN99UOg1JURQURanw/aUu/kDF74/ip/yfgrx04o8u59T+eWRnxNXoWA6bjdg53xE753viFy3BXmAtcvzgwYPcddddjB49mrlz52K1WjEajfj4+GA0GrFarcydO5fRo0czdOhQZs+ejaqqqKrKCy+8wJgxY6p9znsTD7Lp9MVn2b0d7sRHa674NTscrPrtFAU2J7n5DkDFrKr4IKFKoHUoaOxuVFXFoJNxOt2s+u0UDoejynOvbgYMGICqqixYsKDEiJmSiI2NZeXKlUiSxK233lrtcxLULMIWvIiwBSuGsAXrvi0oEJSHrNPRaNjtrLGY2LhnNwBarZYPPviA7t27l91ZkrzFSRpE9sHs17imp1tvKCmKsJDajCaU1PrwlK5hJkyYwNy5c8usbvzpp5/y3HPP0a1bt1KdgFOnTuV///sfY8eOZdGiRYBn9bEwsvCrr75i37593tTkG2+8scRVxjfffLNEA+DHH3/EbDaXeS06nY6GDRvSvn37cttWlrysOGIP/IAk69BoDcWOl1S45FLcLjuq4qR5lwfxCWhWrXMTCASVo6CggKNHj7J+/XoyMjLo3bs3Wu11WctKUASVAN1J9HI+ADZ3IDmukqNhqgPjoSMYTnkMH9VsIveWfqgXNJZOnDjB559/TkZGBj4+PhgMhhKjllRVxW63k5eXh8ViQa/XM27cOPr06VPt881R8tnhPIgbTzpbe21zmmlKT68piXM58OtRcLhAVQEVmiPho0q4JdCoYNWqpBlUkMDpBrcCt7aHhiW/51aIgoICHnjgAbKzs0t9Ya4s+/fv98qvPPXUU3z++ecArFixgtGjRyNJEm6329s+IyODIUOGsHfvXnQ6HSdPnvRGIwrKJicnB39//wp9f4X399qwBctD2IICQd2g8D4RFxdHXFwcTZs2vSIHbV3A6XQSFRXF8OHDvdXFBdXPzJkzvdkasizz7rvvMnDgwAr1dTnyyTy3n5CI3iLN+AKqqvLpjjkcPHcUnazF1+ADeDRv/f09BcFUVSXDmkXnhu2Z0ntClQvZVdSWqPIbYaFQrI+Pj1c89VIOHz7MK6+8wm+//YbL5aJ79+688sor3H777VUd+qpSWGGsrHL2Pj6eL7Yw8g8gNTWVe+65p0i7wu3Zs2eXKKT+97//nRdeeMG7nZOTQ2RkJEOGDKmQYXjy5MmyL+YKKFw5drsd6DQGFHfxiARVVTxvPKpS4nFJknG5HZyP34bFv2mdrdYoEFxPNGrUCF9fX4YOHVriPbyu43Q62bBhA4MHDxaGYTWQkbyb9LNxgD9avS9NOtx7RY6AipBz6DDxm38Hf38krYYWz0zEFOnRVzp27BhvvPEGVquV5s2bV0g7ODQ0lJSUFCwWCw899BDt2rWr1vkWOK18vut7fGwercRujToypn3FdQjB8yz95Kf96HRphAbqScu24ytJ+NoVVABVAVnGrEKwyYDLoPEYiDl2sglhwrCuV/zsvNQ2qS66du3KhAkTmD17Nl999RWxsbG89NJLRaqyFhQUEB8fz5o1a/joo49ISkpCkiSmTp0qHIT1jIrYguUhbEGBQCCoH7jtdpxZWRgbNmTRokVF5FzeeOONCjsIAbR6C6GR1b94W58pjCJUVJUcRz42t50gY0CRNpdHE16tSsdVchKuWbOGESNGIEkS27dvLyaqfOLECfr06UNeXp43reD3339n+PDhzJ49u9qFxOsizZo1q3RKhcFgwGAo/lKm0+nKfQmuqZdkVXXjsGej0ehR3PZSGilAoZOw5DYajR6HPRtVdSNJImpJIKhtZFlGluUK3V/qMvV9/nUBa945MpJ2XXhpl4hoPRSjyadGxrKnpZO0aInXQRAx6k78WnjSulRV5bXXXiMlJYXw8PAyHYR5eXneBTqNRkNYWBhJSUm89tprLFu2rFodEE5HLrIsI0kSEX5hjO00HF0ldAgBjsVlcPRMJhaTDq1WQ4NAEz6ZdiRVRZUlUAEJJEXFkO/CbdAgyTIWk46jZzI5nZRHu2ZXVuyjpv4+Pv/8c06fPs2WLVvYsGEDGzZsAPB+9r6+FwvQFNpDt99+O//+979rZD6CmqNCtmAFELagQCAQ1H2SV68hY8dOGg4exM19+zK/SRPi4+P529/+xh133FFmX6c9B63eVywElUKhFmGB04rT7UKSJJxuFy7VXaytQaMn31HA2pgttA5uflU+0yo9mX/++WdUVaVNmzYlVl174YUXvKvJsizj5+dHVlYWAJMnT2bIkCE0anT1qiVWhUIjNz8/v9Q2hVUWqyuNpy4hy1padHnIW7a8JOIOL8btLECjM9OsY+nh6hqtGVkWRqFAIBDUFRS3g6STa/F4qSCkcc8a04xRnE7i5n6P2+ZxMgR270pw34ury9HR0URHRxMYGFimgzA5OZmsrCxCQkIIDQ0FPLZGYGAg+/bt8wq5VxchliAm957AymMbGNp6QKUdhGeSs1n52ymcLgU/s6evzqmgdbhRJAkKjT5JQpFA63CjcSi4DRqMeg35Nhc//3Gatk0D65TRrdfr+eWXX3j11Vf55JNPsNkuFrwpTJUpxGAw8Nxzz/Gvf/0LjUZTG9MVVIGK2IIVRdiCAoFAUHfJj40j/Y9tqCqc++VX2vbowaxZs/j9998ZOXJkmX1djnxiDy3A5BNGeMvBaLTGqzTr+kNMeiwHU4/hdDu96dcBJn8MGj02ij5jayOasEpP5+joaCRJYtCgQcWOJSYmsmbNGiRJ4pZbbmHx4sUEBQUxf/58xo8fT35+PrNmzSpVA7Cu0axZMwASEhJKbVN4rLDttYbO4IvO4Fvq8dDI3ihuB7JGj9HS4CrOTCAQCARV4dyZrThsWQAYLQ0JaXxTjY11dvlKrGeTADCEhhAxbmwRp9dPP/2E3W4nJKS4tm0hhQ5CgLS0NPz8/LwR+GazmczMTBYuXFitTkIAs87EfZ3LNo5LIi45h49+3EtSWj7Bfhe0FVUVQ57zYhThpRRGE+Y5KdDLxSodX2k0YU2h0Wj497//zQsvvMBPP/3E1q1biYuLIysrCx8fHyIiIrjlllu47777aNxYCJbXZ8qzBQUCgUBQv1GcThJ+WkjhGl+jYUMxBAdhgHIdhKqqcPbkWtxOK3mZpzl35jfCWw6u+UnXI1RVZfnRdeTZ85EvOAh99RYsOlOpGahXO5qwSqqRqampAHTq1KnYsaioKO9Ffv755wQFeQza+++/n8GDB6OqKr/88ktVhr+qFFbtSU9PJza25Ooyu3d7Kv1U10vJjBkz6NChQ4lRmnWR4LAehEb0Jjisel/KBAKBQFBz5GacJCv1MACyrKNx66FIcs1EeWXs3kP69p2esXRamk0Yj+YSeY28vDxWrVqF2Vy8yht4DKtTp06RlpaGy+UCoHHjxkUkOiRJwmw2s3LlSm+E/xXP15qFSyme+lEZEs7l8sXS/ZzPtOJ0uimwuXA43agFTrR2N4rHX4jiVlBUcLs9+oSKBFq7p53D6UYjS9jsnmjCulpzLjQ0lMmTJ7Nw4UL+/PNPTpw4wd69e1m5ciUvvviicBAKBAKBQFDHObf+F/7cf4APt2xCE9aI0P79Ktw37eyfFOQkAqDVWWjQpG9NTbPesj/lCAfOHUWSJCRJwqQ14G8se/Htalc6rpKTMC0tDaDE1f6tW7cC0LFjR9q2bVvkWGHRkmPHjlVl+KtKRESE11n3448/Fjv++++/k5CQgMFgYPjw4dUy5qRJkzhy5Ai7du2qlvMJBAKBQHApLqeV5FMXF+waNrsF/WWiydWF4nKRsna9dztizGhMYUUrA58/fx6r1YrRWDw1xeVycebMGXJycnC5XLjdbho3blyixIfRaMRqtXrtlCsh31HAV7t/ZNbuH8mzly41UhZnz+fxxdID2OwuXG4FvU6DTqvBanehz3eiqipuFVBVNIqKpIKiqB5noepxiurznVjtLqwON0aDlvRsGy533XQSCgQCgUAgqL8UJJ5l608/8dGWTew9m8h/9+/FeomMSFnkZ8eTlrjzwpZE49ZD0erMNTfZeojT5WTWngW4FTcSEnqNliBTQIX6GjR6nG4na2O21PhicZXSjQsKCgCw24uLF2/fvh1JkhgwYECxY2EXXgoKU4XqC6+88gqjR4/m3XffZdiwYd6IwfT0dJ599lnAo7Xo7+9fm9MUCAQCgaBCaLRGQpv05dyZLfgENMM/tEONjSVrtbSa/Cxnvv8BQ2gIQb2KR8lbrVYURSmmRWiz2UhISPBGD4JH3640DWBZllEUxWunVBZFUfjxwHKyrNlkWbNZenQtD3cbU6lzpKTn88WSAxTYnEiSxE2dwrh7QCt0WpnUs9lsXnUUjUVGp/dEbeZm29A6nJh9jBhNHs1Cp8ON261w7x3tadDYY1v4mvXotFVa4602Bg4ciCRJvP322/TpU/Gqhbt27WLatGlIksTGjRtrcIYCgUAgEAgqgup2s/l/n/L+rxtxuN3o/P0IDg8vsaDq5bgc+Zw9uda7HRrZG7NfRE1Ot96hqipf7v6BDGsmkiSj1WgINlVcZ/pqahNWyUno7+9PRkYGKSkpRfbHx8cTGxuLJEn85S9/KdZPURSAWhPe3rt3r9epB3Dq1CkAvvzyS37++Wfv/mXLlnkdmgB33XUXzz33HP/73//o3bs3gwYNwmKxsHHjRrKysujbty9vv/321bsQgUAgEAiqgCRJBDbsjNmvMVqtqcafy/oAf1o9OxH1gh1wOSaTyevgKyQ7O5vk5GTvqqnBYCAgIKDEaMNCCh2NZvOVrWBHxWziVMYZAHz0Fka1q5yeTmpmATMX7yfP6gCgaZgfT9/dBaNei6qq7PrlJKgqZrPO+5kHBZvJzs7G19fg3afTyuTm2Dhz9Dw9ujeuUwVLADZv3owkSZWO2MzIyPD2FQgEAoFAUPvs+nE+/1gwH6vTiazTcfPgwbzzzjvlFhq7VIcQwOLfhODwnldjyvUKl9vFqYw4FFVFI8v4631xqwpu90Wb15Nh4sbhdpZoI8mSjN1lr3Ftwio5Cdu2bcu2bdtYvXo1L774onf/okWLvP/fr1/xHPZCp2JZouQ1SU5ODjt37iy2PzExkcTERO92SRGSn3zyCX379mXGjBls27YNp9NJy5YtmT59Os8//zx6vb5G5y4QCAQCQXVjMF29QhiSRoNUisEZGhqKyWTCZrNhNptRVZWMjAyvg9BsNtO4cWO02rLNF5vNhslkuiI7Y2/SIX4/8yfgMcYe6joaf2PJEYslkZ5tZebi/eQWeByEEQ19eequzhj1njknnski7mQaGq1crnEnSRJGo46E2AwSz2QR2Syw0tcjEAgEAoFAUBan9u3n+ddfJ9duR5Lgxpv78cGHH1bIt3G5DmF4q9u9FXsFF5EkCV+jL1m2XLSyBpfqxuW6TPdaVXHixu6yQyk2okFrIMOaiVtxo9VUyZ1XKlU665AhQ9i2bRtbtmxh5syZPPbYY+zZs4f33nsPSZLo0qULkZGRxfrt27cPgFatWlVl+CtmwIABVcrjHjduHOPGjavGGZXMjBkzmDFjBm531UTTBQKBQCAoRFFcWHOTsfgXfz5XNwUJiZzfvIXGY+5GazaV297Hx4c777yTuXPnEhjoScGIiIggNjYWPz8/GjZsWK5jTVVVCgoKuPfee/Hx8anUfM/mpLD0yBrv9sh2g2kWWPHPKTvPzszFB8jO8ywyhof48MzdXTAbdd65/f7LCQryHciy5NEo1Jdtiun0Gmw2J7v+iCOiacA1EX3ndDoB0Ol0tTwTgUAgEAiub1JTU3nu5ZfIdnoWN9u178CnX39dZsZGIQU5iUKHsIJoNVpe6PMk+Y7SpXBcLhdbtmzhln63lLkg7qM315iDEKroJJw4cSIffvgheXl5TJkyhSlTpgAeI1iSJF544YVifVRVZd26dUiSVG1VgK9VJk2axKRJk8jJyRE6hwKBQCCoFlLP/E7muf0ENupKgyb9kOWaMTJcBQXEzf0eR0YmBfEJtHj6KQzB5Ucs3nPPPSxYsICCggIsFgs6nY4WLVqUGz1YSEFBAUajkXvuuadS881z5PP9viW4FI/u4Y2Nu3JTRPdKncNi0hHRwIeMHCsNgy08M+aigxDgwJ6znI5JR7rwX0GBE/9ynITXYjTh8ePHAQgMrP/XIhAIBAJBfSUzM5Nnn32Wc5mZGBs2JNzHl69/WlDhRVaDuQF+wa3JSY8ROoQlkGHNQkYiwOTx5QQY/QgoIzvF6XTiL/sQ7tuwVhdSq/Rm0LBhQxYsWMC9995LXl5ekWMPP/ww48ePL9Znw4YNpKamIkkSt9xyS1WGFwgEAoFAUAnysuLIPLcfgKxzhwhs0AWDufpTjVVVJWH+TzgyMgHQ+vig8y8/ZXf9+vXMmzePLl26sGvXLq9GYUUdhIqikJmZSe/eveneveIOPrfi5sf9y8my5QDQxL8xo9oNqXTUnlYj8/Dw9qzeZmRAj0h8zBfTdJLPZrNh1RFURUWWJXR6DX7+5a/SQ92IJszJySm14Fxqairx8fFl9ldVlfz8fPbu3csHH3yAJEl06tSpBmYqEAgEAoGgInz55ZfExcUB0DgiglmzZhEUGlrh/hqtnvBWw/ALbotPYPMammX9xOa0MSd6EQUOK490H0ukf3htT6nCVDl8YNiwYRw/fpwff/yRkydPYrFYGDJkCEOGDCmx/Y4dO7jllluQJInbbrutqsML6hA7zmbgcCvoNTK9G189fSuBQCAQlI/LWUDSqfXe7QZNb64RByHA+U1byD58FACt2UTThx9ELsPRpygKn332Gd999x0AXbt2JSwsjOTkZMLCwopVOy7tHIXtC2VPKsr2hL2czvQ4uXwNFh7qOvqK0zg0GpmRN7cssi85MZsVC/Zht3qEqPUGLX4BxkpVtKvtaMKPP/6Yt956q9h+VVWZOHFipc5VmHFy//33V9f0BAKBQCAQVAJVVZn63HMkJSVx8uRJPv/8c0Ir4SAsRJIkfINalt/wOkJRFH44sJzUPE9ht2VH1jK59wTkcrQaYxKy2HAYWnfOokOLyn8X1UW15BiFhYUVKVxSFq+//jqvv/56dQwrqGPsTMog1+7C16AVTkKBQCCoQ6iqSvLpX7yV53wCmhHYsEuNjJV3+jTJURd1/Zo8eD/6MtJKc3Jy+L//+z+2b9/u3RcZGcnjjz/OpEmTSEpKIjAwELPZXKJTrVCDMDMzk7CwML788kvatm1bqTn3juxBWkEGuxL38VDXu/Ez+laoX4HNyfz1xxnVvyUhASVrLiYlZLF22SFysmwoiorBqMVs0eF2Fa3wrKoqigIup7vE65RkCYfdVavRhKXpOV+JzvNDDz3EY489VtUp1XkWL17MDz/8wJ49e0hLS6N58+Y89thjPPfcc0KTUSAQCAS1Ruau3aTv/JN//m0aubJEeHjFIt2seSlodRZ0horZStcbqqqy8vgGYtJjATDrTDzYdXS5DkJVVVn9RxxJWbD6jzjaNw+pNR3qmlM7FFx3FDjdpBbY0Mjli9MLBAKB4OqRlXqQvEyPsaLRmghrObhGDA9nbi5nvv8RVfE4jRreNhC/9u1KbX/69GlefPFFEhISAJBlmRdffJFx48YhSRLz589n+vTp7Nu3j8zMTMxmM0ajEVmWURQFm83m1SDs3bs37733XqUdhABaWcNd7W+nX5MbCbFUbJHLanfxxdIDJJzLJf5cLs+O6UrDoKJi3WfjM1m34ggul4LiVtDpNegNGpxOpdj5VFVFcYPDUbKTEEBv0JKbbUVxq2i0V9dw7NatG4888kiRfXPnzkWSJAYMGECTJk3K7C/LMj4+PjRv3pzbbrvtukk1/uCDD2jWrBnvv/8+DRs2ZNu2bbz66qscOHCAuXPn1vb0qp3YrHyiTqUwvGUjmgdYans6AoFAILgEt9tNXl4eZkni7IpVuK02Yv83g7bTXqpQf5ezgMTjq1AVN2GthuAb2KKGZ1z/+CN+NzsS9gKgkWTGdxtDsLn8DJDjZzI5GpeBXgNH4zI4fiaTds1qJ/BKOAnrMPWpurGqqqRb7dhcCulWuzeVqC7TrFkzzpw5A8CiRYsYO3Zsie1uu+02Nm7cyOzZs5kwYcJVnKFAIBBUHbs1g3Nntnq3w1reViOV51RFIf6H+TizPbp+Pq1a0mhoydIjAJs3b+b111+noMBT5S0gIID33nuPG264wdumXbt2LFu2jOjoaBYuXMjKlSuxWq0oioIsy5hMJu69917uueceunfvXuXnTkUdhDaHi6+WHSThXC4AilI8ks5uc7Jh1VHcLsWjv9ejMTf2bYZGU/JKssvlZMvWrdzS/wa02tIjzExmHRpt+enX1c2oUaMYNWpUkX2FTq6pU6cycuTIqz6n+sCqVauKpG/deuutqKrKa6+95nUcXiuoqsrGuFSOp+di0Mg83rVZnbYFhR0oEAiuJxRF4R//+AeHDh3i77fcis5qA8C/a+cKFZZTVYWkmLW4nB67LTNlHz4Bzev0ff5qc+z8SVaf2OjdHtNxOM0DI8vtp6oqP/9xGqdLwagDp0vh5z9O07ZpYK18vsJJWIepT9WNT2flU+B0I0sSBU43p7PyaRlYsapIdYH/+7//46677qqwOL5AIBDUB1TFTdLJtaiKZ7EpsGGXGlv1Pbf+F3JPnARA5+dL04ceQCpBS1BRFGbNmsVXX33l3de2bVs++OADwsLCirWXJIkePXrQo0cPXn31VdLS0igoKMBsNhMSElLhCnyX4lLcLD0SRf+mN9HIt0Gl+jqcbmatOERccjYAZqOuxChCg1FH/8Gt+TXqGE1aBDNoeLsynXtOpxOjGYIb+NSbNNSHH34YSZLKjSK8nilJ36nQEZ6UlHRNOQlPZ+UTk5GPUaMhJiO/XtmCwg4UCATXMqqq8t577xEVFYXbamX6wUO8d8dIjH6+NB5VsUW+9LO7yc/xZH5odWbCWw0VDsJLSM5NZf7BFV4Jllub96FHeOcK9T1+JpMjsRlYjFrsNgcWo5YjsbUXTVihpei7776bu+++m127dtXYRK7GGIKaQVVVNp05j6J6fqEUFTadOX9FGkW1gdls5sSJE8yaNau2pyIQCATVStrZndjyzwOgNwbSoEm/GhlHVRRsqamARzuv6fgH0fmVrFWzYcOGIg7C22+/nW+++aZEB+Hl+Pj40KxZMzp06ECzZs2uyEEIsPr4RvYmHWLmn99xJDWmwv2cLoVvVh7iVGIWACaDjmfHdiUspOS0yhZtQrljbBcG3VG2g7C+MmfOHGbPnk23bt1qbQ7Hjx/n008/ZcKECXTu3BmtVoskSbzzzjsV6r9o0SIGDBhAYGAgFouFrl278v777+N0Omtszlu3bkWv19Oy5bUj9F5oC7oUBV+9Fpei1BtbUNiBAoHgWkZVVT799FOWLFmCqii4srK5v1sPtLJMxN13ofUpXxoiPyeR84k7LmxJhLcaWiNZKfWVHHsec6IXYXc5AOjcsB2DW91cob6XRhEa9BoADHqNN5qwNp6jFbJYly9fzooVK0hOTq6xiVyNMQQ1Q+HKsUb2RHxoZLwryPWBqVOnAvDWW295094EAoHgWsA/tCMmnzAkSaZx66HImpqJUJNkmabjH6TxXSMJGzEcn5alRysOHjyYW265BVmWmTp1Ku+88w5Go7FG5lUSu8/uZ3vCHgDcqoKPvmJGrsutMOfnw5yIzwTAoNfy9N2daRx60VGZm2Mr1i8swr/UFGNB1fn888957rnnmDt3LocOHaqURMtf//pXxo0bxx9//EGvXr0YOnQo8fHxTJs2jYEDB2K1Wqt9vkeOHOGTTz7hqaeews/Pr9rPX1sU2oJmrQZJkjBrNfXGFhR2oEAguJaZPXs23333HQCu7Gye6tWbnpFN8O/UAf+u5RexczkLSIpZA3icVSERN2HxLz+F9nri6PkYsm0euZ0I/zDGdRpRbqGSQi6NIiyMzJQkqUg04dVGWK2CKnHpynFhsLEE9WoFefjw4dxyyy0kJyfz8ccfV6rvggULGDRoEEFBQRgMBpo2bcpjjz3GiRMnSmzfrJlHnycuLo5NmzYxZMgQAgMDMZlM9OjRw3sDL43FixczdOhQQkND0ev1NG7cmIceeogjR45Uat4CgeD6QG/0p2nHsTTpMAajpXJptZVFkiRC+/ejwYBbymwnyzJvvfUWM2fOZPz48Vc1VSU+6yzLjq7zbo9ufztNAhqX28/tVvgu6ihHYtMB0Os0TBzdmSaNLjp54k6ls3DObo7sT6r+iQtKpVOnTrz00kv88MMPHD16lPHjx1eo3/Lly/nkk0/w8fFh586drFu3jiVLlhATE0Pnzp35/fffee2114r0mTNnDpIklfuzePHiEsdMS0vjrrvuolWrVrz77rtVvva6wqW2oP6CQ1yvkeuNLSjsQIFAcK2yYMECZs6cCYDbZmN8h07c3LwFGpORiDGjy7XBVFUh6eRFHUKLXyQhjW+s8XnXN26K6M49nUYQbA7kkW5j0VVwUb6IFuGFKMJCjLUYTVgp4Y3U1FTi4+Nrai6CesilK8cORUFVKbaCXB/0aN577z169+7N+++/z9NPP01wcHCZ7VVVZcKECXz33XdotVr69+9PgwYN2Lt3L7Nnz+ann35iyZIlDB06tMT+3377Le+88w49evRg6NChxMXFsWPHDh555BEyMjL461//WqS9y+XiwQcfZOHChRgMBm644QYaN27MiRMn+OGHH1i6dClLly4tdTyBQHD9IkkyZt/wGjm322ZDU0YUoKqqzJ07l65du9K9e3fvfovFQs+ePWtkTqWRa89j3v6luC/oM/4l8gZ6Nu5aob77T6Zx8KQnbVun1fDkXZ1pHn5RKzg2Jo2NUcdQFZU/fj2Ff6CJxk3Kr2R3LVFQUMCKFSvYsWMHiYmJ5OTklBvVJ0kSGzduLLNNeTzxxBNFtuUSdDBL4l//+hcA06dPp0ePHt79ISEhzJw5k5tvvpnPPvuM1157zasLPXr0aHr37l3uuRs3Lu54zs3NZdiwYTgcDjZv3ozFcu1U/r08ihDqny0o7ECBQHCtsXLlSj744APPhqoyrk07bmvmyfQIHzkCXQVqHqQn7SY/+xIdwtZDkSoYIXe9cUN4Z7o26oBW1pTf+AKFUYRGnQZFBZ3TTQOrhMvkRjFoa02bsFJOwokTJ9bUPAQlUNerG1+6cuxzmci6XiNT4HKy6cx5WgRY6ryo6U033cTdd9/N0qVL+ec//8lHH31UZvsvv/yS7777jpCQEDZs2ODVY1JVlX/84x/84x//4P777+fEiRMlipa/++67rFy5khEjRnj3zZkzh0cffZQ333yTiRMnYjKZvMfeeOMNFi5cyE033cT8+fNp3ry599jixYu57777eOCBBzh9+jQBAQFV+zAEAkG9piA3CaM5BFmjr9Fxsg4cJHHREpo8cB9+7dsVO261WnnrrbfYsGEDQUFBfP/997VWpMGluPl+/1Jy7HkANAuMZETbQRXu371NKKkZzdi4K54nRnWiVUSA99jpE+f5NeoYhYu8rdo3IPyS49cDX3zxBa+88grZ2dkV7qOqaq3ZBmfPnvVqYD/wwAPFjvfr14/IyEgSEhKIiori/vvvB8Df3/+KCsnZ7XZGjRpFXFwcv//+O+HhNeO0rw2uFVtQ2IECgeBa4pdffimizXv/7bczyOYCwLdNK4J6lR8N6LBlcz5hh3db6BAWpcBpxawzFdlXGQdhYRShze7G6XKTV+CgOTI6t4Qrz0mBXoNRryHf5rrqlY4r7CSs66kC1yI1Wd14x9kMdiZllNuukcXIvR0iiuz76UgiKfk2CpwuEnKsSBKkWhUc7guRhCqkWu0YZLnICrLdrfDF3tMVmt+49o0J87n4R3ciI48Mq4PejWvOg/6vf/2LlStXMnPmTKZOnUrTpk1LbVu4KvP6668XEWyXJIk33niDZcuWceDAAb7++mteeeWVYv2nTJlSxDAEmDBhAu+99x7Hjh1j9+7d3HyzR+w0IyODjz/+GKPRyJIlS4pFKIwdO5aJEycyc+ZM5s2bx+TJk6/0IxAIBPUchy2bhKPL0eo9VedMPo1qZBz7+TQSFizEbbMTO+tbWj03GUvTixVuk5KSePHFF4mJ8RQFycjIYOfOnYwcWbEKetXNymPric86C4C/0ZeHuoxGUwlDTpIkhv6lGT3bNyQk4OKz6dTx82xac9FB2LpDA24Z3AZJrrvOkOrmnXfe4Y033qiQnVho3Na2TRkdHQ1AUFBQEWfLpfTs2ZOEhASio6O9TsIrwe12c99997Fr1y5+/fVX2rZte8Xnqm5qwha8nOq0BWsaYQcKBIJrhb1796Ionnvyfffdxwsvvkj+6VjOLl9BxD1jK+Rs0hv9iWgznKRTGwgK6y50CC/hdEY8c/ct4q72t9M9rNMVneP4mUwOnkzD7nQjATqnikZxo0qgdShoHApug6ZWogkr5CR85JFHanoeXpo0aVJ+I0GVcbgVcu2uctv56Yu3KXC6yLE5SbM6cKsqGlVCkfC+JKkqKArIGsmrR9MiwAKqWqExAdxK0RcIl+JxQtYkbdu25bHHHuOrr77itddeK1UXJjExkVOnTgEl/21IksSjjz7K888/z6ZNm0o0Du+8884Sz92+fXuOHTvG2bNnvfs2bdqE1Wpl0KBBJaYwAQwYMICZM2eybds2YRwKBNcpHt2YdSiKE4ctm8xzB2vESag4ncR99z1umx2AgG5dMTe5aDj++eefTJ8+nZwcj4Cz2WzmnXfeoX///tU+l4qwMzGaPxP3AaCVtYzvOgYfQ9mpnqqqkpZlIzSwqIPiUgdhzNFUtqw77n32tenYkP63tb6uHITHjh3jjTfeAKBNmzZ8/vnn9OnTB5PJhCRJLFu2jNtuu43Y2FiioqL45JNPSE5O5tFHH+Xzzz9Hr6/ZaNfSiI2NBcq2OSMjI4u0vVImTZrE8uXLefvtt3G73ezYcTEqo0OHDqUWL7Hb7djtdu924d+T0+kst/JyRSsz14QteDnVaQvWNMIOFFyPqKqKqqq43e4arepekxTOu77Ovyb461//ikajIScnhylTpuByuTA0iaT5lElIklThz8ro25QmHe5DozOLz/cC6QWZfLdvMTannQUHVqJDS9uQlpU6h6qqzF9/jOx8BxoJQCIUCVkClwo6VcWQ5yBfZ8Cgk8m3Oln12ylahPtUKZqwot9hhZyEs2fPvuKJCOomeo2Mr6H8r9+sK97GrNOi1Ug4FQWtLCEX6s+oXNAkBFkGjSxhkC6uIEf4mSs0Jnj6XopWlr1i2DXJm2++ybx58/jhhx946aWX6NKleMWnQsMtODi4VOO+ZcuWRdpeTmkvJoXns9kuVsg8fdqz4r5x48Zybwrnz58v87hAILh2STv7J9a8ZAB0Bj8aNSu7gMiVcnbZCqxnPeMYG4QQMc6zIq2qKvPnz+e///2vd/W6SZMmfPTRRzRr1qxG5lIR3Irind/dHYYS4R9WZntVVVmx9TTbDiTxxKhOtClBWzDm6Dm2rDvhdRC269yIfgNbXVcOQvCkGauqitlsZv369SU+28xmMx07dqRjx448+eSTjBo1ijlz5pCfn8+CBQtqYdYefUCgTF1AHx+Phl6hc+5KWbt2LQCvvfZasUIomzZtYsCAASX2+/e//80//vGPYvvXr1+P2Vx2updOp6tQan9N2IKXU5224NVA2IGC6434+HjOnj1LQUEBUVFRtT2dKrFhw4bankKdomXLlqiq6n0OCaqOU3Wx3XmQfNUKQIgcQMzOY5ySjlfqPCdSYF+Mp+CrIoGvpGJWZNwAErhUFdnmwpbpwK4B1Q17jyYx56ckGpb82KkQBQUFFWpXKU1CwbVD78ZBV5y6O659Y77ZH0dagYMAg85rsJwrsHkiCGVoaPYI2auq6tWjebxrM6be2OqKxmwTdHUEr8PCwpg6dSr//ve/+fvf/87q1atrZJyKCqsD3pftVq1a0bdv3zLbtmtXXBdMIBBc+xTkJpGW+OeFLYnGrYbWiCZhxu49pO/wjCPrtDR95GE0BgN2u51//vOfRV4w+vbtyzvvvIOvr2+1z6My9GlyAw19QjidEU+P8M7lto/aFseWvR6R7lkrDvHqYzfhZ7n4WV7uIGzfJYy+t7a87hyEAFu2bEGSJO65554KZYIEBASwfPly2rRpw6JFi3jggQdqLQX9ahEXF3dF/f7+97/zwgsveLdzcnKIjIxkyJAhpTqmCikoKODkyZPljlETtmBJVJcteDUQdqDgeqNJkyYoikJkZCTDhw+v7elcEU6nkw0bNjB48GB0uopVlb3WOHXqFC6Xq4ikhTXxLNb4BAJ790Kq4D3HmpuEw56NX3C7Oq0jWxu4FTdz9y9Gm6nHHz0NLME8dcODGLWGSp0nJiGTn3bvQZKcaGQw6rVEqBIauxtFlnArCrJGRlZUgtGT72dABTJy7GQTwoRhXa/4u6nowqdwEgoqTUlV7EqjvlW3A5g2bRpfffUVUVFRbN26tdjxwlSP9PR0cnJySjTWC1d9S0sLqQyFKU9t27Zlzpw5VT6fQCC4tnC7HCSdXAd4vFYhETdh8i07Wu5KsCankLhoiXc7YuzdmMIaoaoqU6ZMYe/evd5jjz32GE8//XSlXoRrkpZBTWkZVLq+WCHrdpzhlz/PeLdHD2hVxEEIEBhkQafX4rC76NA1jD63trxuDen4+HiAUiv+OhyOYvsCAwN55JFH+Oijj/j+++9rxUlY6LjOz88vtU1enqfITXkOuZrCYDBgMBR/8dDpdOW+BF+Nl+Rr2RYUdqDgekKSJCRJQqPR1HsHW0Xuj9ciCQkJTJ06Fbvdzv/+9z+6dOmC4nKRvGQp1qQUcg8eoukjD6ErZ9HW5SzgXOwGXM58HPnJNGoxCFkW7iLwLHatPLKB2KwEJEnCojfz6A334muq3PPs2JkMZi45hNXuRJYkTEYtjYx6dJk2FEnypGQCSB4JD61DQetUPdqEJh1Hz2RyOinvirUJK/r3UTesd0G94dIqdhVN/9VrZK8eTW2LlVcEf39/r37M3/72t2LHIyIivGkkJRlrqqp69996661Vns+gQYPQ6/Vs3ryZ1NTUKp9PIBBcW5yL24zT7lkZNPmEEdK4/Ip1lcVtt3Nm7ncoTo+WWPBNNxJ0Y0/A84IxZswYAIxGI++99x7PPvtsrTkIVVUlObfy98pfd8ezdvtF/bm7b23NXzoXd7aGNPRh+N2d6NYr8rp2EMLFtN3Lq7cWVmUtPH453bt3B2D37t01OLvSKUx/T0hIKLVN4bHaTJWvq1zrtqCwAwUCQX3h3LlzPPvss6Snp5OXl8eMGTNQVZXzmzZjTUoBwG2zoi1HpsKja70el9OzeOZ05CFJwlVUyG9n/mTX2f2AR9/64W5jCDIFVOocZ8/nMWv5QdKzbagqmIxaQvyMGPKdSKrqyT2+FAkkVcWQ5wRVxajX4HQp/PzH6Rp/jopvXlApKrNyXMjlK8j1gUmTJtGkSRN27tzJ9u3bix1/6aWXAHj77bfZv3+/d7+qqrzzzjvs27ePgIAAnnzyySrPpWHDhkyZMoX8/HzuvPNODh48WKyN3W5n5cqVHDt2rMrjCQSC+kNO+gmy044CIMs6wlvdXiNGXeLCxdhS0wAwNQ6j8d13FTl+++23M3XqVGbPns2gQYOqffzKsDMxmv9t/5ZfT2+rsBG1ZW8iq367WHF1VP+W3NztYgTQ5ecJbeTLjX2bXdcOQrio6Xd5xKC/vz9wMdLwclwuj7P53LlzNTi70il0Uqanp5damKTQgdmjR4+rNq+SmDFjBh06dODGG6vf+X+lXA+2oLADBQJBXScjI4NnnnmG5GSPTnTLli15//33sZ87x7kNGwGQZInIe8chaTRlnis9aQ/52Z5MCo3ORONWQ4WT8AJHUk+w5sQm7/bYjnfQNCCi0ucJC7bQpJEfNocLk0FLaIAJrVNB63CjSJLHR3ipvSlJKJKE1uFG4/Doa19a6bgmEd98HaauGYZXsnJcSH1aQQZPms9bb70FlCzwOXHiRMaPH09aWho9e/bktttu44EHHqB9+/a8/vrrmEwmfvzxx2LRFVfKu+++ywMPPMCff/5Jt27d6NGjB2PHjuW+++6jX79+BAcHM2rUqCvWPhIIBPUPlyOf5NO/ercbNb8VvdG/Rsby79oFjdGAxmgg/L572bh5c7E248ePp3Xr1jUyfkWJzUxg5bENqKisP7mF2MySnVSX8seBJJZvuajfdkff5gy44WK15sPRSfzx66l68ey62hRG2V3u7Gvbti2qqvLHH3+U2K/QqVJb1Y0jIiK8ttWPP/5Y7Pjvv/9OQkICBoOh1jW6Jk2axJEjR9i1a1etzqOQ68UWFHagQCCoTfLy8oiNjeXIkSPExsZ6JTAKycnJYfLkyd7FuMjISGbMmIGfry8JPy1GcbkBCB1wC+bIsh1aBTlnOZ9wcTGkcauhaPWlF/a6nrC7HCw+HIV6QdLntpb96BbW4YrOJUngcLrRaWUCfQ04nS50OQ5QPNVfJbeK7FZBvVh1XEUFRUWX48DhcKGRJWx2V41HEwonYR2mrhmGV7JyXEh9W0EGzwtv584li9xLksR3333Hjz/+SL9+/dizZw+LFy+moKCACRMmEB0dzbBhw6ptLlqtlh9++IGoqCjuuusuUlNTWblyJevWrSMjI4M777yTH3/8kf79+1fbmAKBoG6j0ZkIieiFJMn4BbfBL6TmBOsDunSmzfNT8R15B1Nfe7VGBf2vlGxbDj/sX4aiekT++zXtRYtydAh3Hkpm8cYT3u0hvZtxW6+LfQ7uPcu2zac4eiCZbZuEo/ByunbtiqqqxSKbCp9FmzZtYs+ePUWOnT59mlmzZiFJEu3bt79qc72cwnTSd999t4ieZnp6Os8++ywAkydP9kZFCjxcT7agsAMFAsHVRFVV9u7dy7Rp0+jVqxcDBw5k2LBhDBw4kF69ejFt2jT27t1Lfn4+U6dO5cQJj/3SoEEDZs6cSUhICGm//UH+GY/j0BAaQqPbB5c5pstp5ezJtXh1rRv3wuJffiGy6wWDVs8j3cdi0ZvpFtaBQS36Vaq/zeHy/r/LrZKWbcPHrMfmdKNYXWidblRAVvB8BSrIHr+g5wcJtwTaC+2tDjdGg5b0bBsud83ZpJIqLN46T05ODv7+/mRnZ1eoot3Ro0dp37495nK0ByqDqqp8sz+OQ+dz8NeXLHiZZrOjKiDJEGIsucpPtsNJp1A/Hu8q0rQEgtqk8F5x6NAh8vPzeeKJJ2otqqcqOJ1OoqKiGD58+HUpVg1gyz+PzuCLRmus0XEOHTrEyy+/zPnz5wFP8YdVq1bh43N1ixCcTI9j2dG1jG4/lFbBzQBwuV18sWseiTmelJtWQc14rMe95eoi7j56jh/XHUNVVQb2bMKIfs29z6YDexLZufViKmr3m5pww1+a1Nizq6Z/lytjS1SUOXPm8Nhjj9GsWTNvoQbwVFls3749brcbHx8fJk6cSKtWrTh58iSzZs0iKysLSZL46KOPmDp1apXmsHfvXq9Tr3DstLQ0IiIiihSNWLZsGWFhRTUmp06dyv/+9z90Oh2DBg3CYrGwceNGsrKy6Nu3Lxs2bPDqK9Y29cUWrAjCFhQIap/C+0RcXBxxcXE0bdqUsWPH1va0rohrwRY8duwY06dPJzo6Grvdjtlsxmg0IssyiqJgs9koKCjAYDAgyzIWiwWj0UhQUBCzZs2iSZMm2NPTOf6fj1AcTgBaTX4anxYtSh1TVRUSjq8kP8uTZmz2a0yT9neLNOMSyLJm46O3oNVUvJDLn0dSWPXbaSaN7UqjYE9kZmaujbwCJ6qqsvXnYyScSsftVrzPQr1Bg1t14OPrw4UEZFRVxZrnIKxpIP1HeKpO+5r1BPhWrqoyVNyWEOVqBBXCrapkWJ0YNBpsbqXENqoKCh7vd2ltDBoNmTYnblVFKwxDgUAgqDJGS/Wks12KqijknojBr11bAFauXMm///1vnE6P4dmgQQM+/PDDq+4gVFWVqBObOJx6AoPGwJTeEwBYdnSt10EYaPLn/i6jKlQ4pWf7hmg1EvEpuUUchPt3J/LnbxcdhD16N+GGv5RfHfl6484770Sj0XDmzBm2bdtGnz59AI8u0iuvvMJbb71FXl4eH374YbG+PXr04JlnnqnyHHJycti5c2ex/YmJiSQmJnq37XZ7sTaffPIJffv2ZcaMGWzbtg2n00nLli2ZPn06zz//fL1cOKlJKmILVgRhCwoEAsFF/vzzTyZOnEhycjKBgYGEhIQUW0Axm80EBgYSGxtLbm4uOp2O9u3b89lnn9GkSRNUVSVx0RKvgzCkX58yHYQAGUl7vQ5Cjw7hMOEgxGNrXv75B5gql1Ww/WAyCzeeAFVlxuL9vPTgDfj7GAj0NRLoayQhNoOkuEwUt4rmgr1qNOuw+OjJznagvyxaX7JAekouks1NZLPAql9kOQgnoaBCaGWZp3s0J9/pLrXN9wfPkO9wY9FrGN+59Jcpi06DtpaqXgoEAkF9RnE7sRWcx+wbXqPjnN+8laSfo/C7oTuL4+NYuGSJ91j37t157733CAoKqtE5lERMeizH0k5i1Bo4lnaSmPRY0goy2ZPkSXfVyVrGdxuDRV/x6KlubRrQrU0D73b0znh2bzvj3e7ZpyndbxKpNyURHBzMiRMncDgcNGjQoMixN998E4vFwttvv11ES0mSJMaNG8cXX3xRLU64AQMGVCkNfNy4cYwbN67K86gpZsyYwYwZM3C7S7e/rhYVsQUrirAFBQKBwBNBOHHiRFJSUggPDy9zgVOSJAIDA7HZbLhcLvLz81EUz4JN5p695J7w6CvrAwMIu6NsuQNFcZF57oB3u3FLoUNYyJqYTciSzJBW/ZGvwGn6+/6zLPk1xrvdvU0ofpaL9o6qqvyx6RQOuwtZ9jgCzT56zBZ9qfaMTq/BZnOy6484IpoG1HgUvnASCiqMn0GHn6H0EO7+TUJxuD1C1mE+NZv2JhAIBNcjqfG/kXnuIMHhPQmN6I0kl12t7krIO3Wa5Kg15NhsvPPeu5xWFWSDJ6Vh3LhxPP/887WSzqOqKmtjtuB0OwkyBZBhzWLx4Shy7RcdUGM73UG4b8NSz3E0NoOcfDs3dQor8fjeHfHs2X7RQXhj32Z06xVZYluBh+bNm5d67OWXX+a5555j+/btpKSkYLFY6NmzZ7G0X0HpTJo0iUmTJnlThGqb8mxBgUAgEFQMVVWZPn06ycnJ5ToICwkMDESWZWRZJisri+nTp7Ns2TL8O3Yg+C83kb59JxFj70ZjKDsVVZa1NOt8P0mn1mGyNMASIBZDAf5M3MfWOE92QoY1i/s7j6qUQ27z3kRWXFIMb8ANkYy8uUWRcySeyeJcUg4WXwO2AicWXz0mc9mLppIkYTTqSIjNIPFMVo1HEwonoaDa6N346keVCAQCwbVIfnY8yad/JazFQK+AdG7GaTLPeSLmMlKiCQjtgN5UvUaCMzeXM9//QHxGBv/Z/CvZGg06fz90Oh3Tpk3jrrvuqtbxKkNhFKFFb0aSJCx6M0fOnyDEHIRRa6B/s5vo2qj0inMn4jP5dtUhXG4Fl1ulb9eL0ZiqqrJ3Rzx7d1yshtzr5uZ07Vl2RUBB+RgMBgYMGFDb0xAIBAKBoE4RHR1NdHS01/FXUQoXjGRZZt++fURHR9OjRw8i7xlDaP9+GBuWvlh6KVqdici2I69o7tcipzLOsPzoOu92i8DK6VD/ujueVb9d1Ge+7cYmDO/bvMg5VFVl1x9xuFxufP2MmMw6NJqKffdXM5pQxPkLBAKBQFCHUFWV1DO/k5d5itQzv6OqKi5HPsmnf/G2adj0lmp3EKqKQvy8H3Hm5GLRG1CNRnT+foSEhPDll1/WqoPw0ihCg8az2mrQ6DFoDNhdDloFNWNo6wGl9j+ZmMWsFR4HIUBMQmaRlA6XU+HU8fPe7d79hYNQIBAIBAJBzfHTTz95i5SURlpaGjk5OSUeM5vN2Gw2Fi5c6N1XUQdhIZIkCx1CIC0/g3n7l6KoHjuxT5Oe9I7sUeH+63eeKeIgvL13syIOQqfDzcljqSSeySIhNgOjUYckSRV2EELxaMKaRPxGCAQCgUBQh8jPPkNeViyyxkBeVix5WWdIOr0Bt8sKgE9gcwIadKr2cVPWbSA35hQADRs15L/fzKJHjx58//33dOnSpdrHqwyXRxGCx1jyNXj0c3pFdCtVNyY2KZtZKw7hdHl01Dq1DGH8sPZFVmB1eg13jO2Mf6CJv9zSgs43CAehQCAQCASCmiEvL49Vq1ZhNptLjQjLyMjg/PnznD17lqysrOINVBWT0cjKlSuLaP+WRkFuEgnHVuByFlRx9tcWBQ4rs6MXYnXaAGgb0pIRbQdVuP+abbGs2Xax2N3wPs0Z+pdm3u/VZnWyevEBfo06xsbVR3HYXUiyhMvpLvFHUSj1mCRLOOwudv0RVyU95vKoULpxfHx8+Y2ukCZNRP67QCAQCATgiZg7H78NRXGhM/jjtGdzNmY1itvpWXHUmQhrcVu1pxgk7t5DQtQaTDo9kizRdPyD+LRswZe9e9e4OHJ5XBpF6HuZqLZBoyffUcDm2B10btiu2FzjU3L4ctlB7A4XAO2aBfHw8A4lrtxafAzc/WB3tLrq13kUCK6UulS4RCAQCATVw/nz57FarZhMphKPp6Wlcf78xQyHwgIll+LMykLKyyPP4eB8aio+Pj6ljudyWjkbswaXI4/YAz/QtOM96I0BVb6O+o5LcfP9/qWkF2QC0MgnlAe63FWpgiUmw0WX2p03t2Rgz4ta1vm5dtYsO0Rmuscxm5qUg96gxeko+ZmuqiqKGxwOd6n2t96gJTfb6qmMrK0ZG71CTsJmzZrVyEuCJEm4XK5qP++1gjAMBQKB4PqiMIpQozUhSRKyRk9BdgJ6UxAarYHwlkPQ6ipeubciHI2OZtKDDxJm8eHFW24lYvgwfFq2AKh1ByEUjSK0uexk2XKw6E3IkoyP3oJFb/ZWOm4T0sLb7+z5vCIOwtZNAnnszo7otDKqqnLsYAqt2zco4hQUDsKS0Whq5nMRdmD51LXCJQKBQCCoOlarFUVRimkRKopCUlISubm53n0hISEEBRXV/nfbbLjyC5BUcOXnk5uRAS1aUBKqqpB8aj0uhyfaUG8MRGfwq+Yrqn+oqsqyI2uIzfQExPnoLUzofg8GbdlFRC5nwA2RuNwqep1M/+4XM1FysqxELTlEbo4nQtHiY2D4mM4YjaUX/3K5nGzZupVb+t+AVlt6O5NZh0Zbc0nBFS5cUpPhjIKSEYahQCAQXD8UiSLUWS5oERagouJy5BHc+EZ8AppV65i//vor055+hvzsbFKyslifkca0AbdU6xhV4dIoQq2sJdOahUNxYrc6MGj0+Ogt3mjCtTFbaB3s0X9JSc/n8yX7KbA5AWgZEcDjIzuh02pQVZUdW05zKDqJ2Jg0hozqgFYrnINlIWxAgUAgEAiqD5PJhCzLRSIEHQ4HiYmJ2O12777Q0FBCQkKKdlZVnJmZF/5XRWex4Hd5m0vISN5LXlYcABqtkfBWQ4UOIfBH/G72JHkKAmplLY90H0uA6cp8Lrf1Kpodm5GWT9SSQ1gLHAD4+hkZPqYTfgElR44W4nQ6MZohuIEPOl3pTsKapkJOwkceeaSm5yEQCAQCwXXN5VGETnsuqG4kJFTFhY9/9clzKIrCV199xaxZs1B9LGjsdpoFBPDQG68hVaLCXk1TGEWo03gchCqgkWT0Gj2WCxGVhZWOC6MJWwU159tVh8m3ehyEzcL8eWJUJww6j4Nw26ZTHNmfDEBSQhZJCdk0aR5U2hQEQP/+/cuMKs3Ozmbfvn3ebV9fX1q2bInFYiE/P5/Tp097hdclSaJbt274+YkoBoFAIBBcn4SGhmIymbDZbJjNZvLy8jh79qzXaSjLMo0bNy4xhdiRlY1yQWfZKcv4BgQUdyReoCA3idT4bd7t8Fa3ozOUnpZ8PdGpQRt2Jx0gJTeVcZ1GEOkfXm4fRVFZuvkk7ZoG0qllyZ95anIOa5cfxm7zZEoEBpsZdncnLD6Gap1/TVIhJ+Hs2bNreh4CgUAgEFy3XB5F6BGVVkGSABlJoyMtcSc+gS2qnAKcn5/Pa6+9xtatWwGQZJlR4x/ib5Mn49eoUdUvppoojCK0uew4XI4LnwUYNSbcNgM6vdHb9tJowim9m/Pg0HZ8ufQgIQFGnhrdGaNei6qo/LHpFEcPeByEkgQ3D24jHIQVYPPmzaUeO3jwoLfy9dixY3n55Ze58cYbi7XbtWsXH3zwAYsWLSI7O5u5c+fSqVP1F+ARCAQCgaCu4+Pjw5133sncuXM9GRApKd5jBoOBiIgI9Priaa+K3Y6rsEiJBA5ZYtSoUSU6E90XdAjBkw0QHF79GSn1mQCTP8/cOJ7jaafo0qh9ue0VRWXBhuPsOpLCjkPJPH5nJ9pfZkOejc9k/cqjuJweJ25oQx+Gju6E0VR7UYFXQoXTjQWC8jgevR2n045OZ6Bt97/U9nQEAoGg3nB5FKHLWQCKGyQZgykAVVXJy4olP/tMlQy8M2fO8OKLLxIXFwd4Vqqfe+45HnzwwTqhP3gpMemxHE49hsN90UFo0hpw2vRY7S6kPDuNgkqOJmzTqAWT7+lKgK8Bk8HjIPz915McO5hyoT3ccnsbWrdvWGvXdzlx2QXs0gTTIbuA1iH1Q2IkIyODESNGkJiYyMcff8zUqVNLbXvjjTfy008/0a9fP6ZOncqdd97J3r17CQwMvIozFggEAoGgbnDvvfeyYMECjwb1hdRjX19fwsPDi2kVAqCqODIyvZsugwGTJHHPPfeU0FQh6RIdQrNvOKGRvWvsWuorBq2+Qg5Ct6Ly47pj7D12DvA4DK2OoprKbpfClvUxXgdheKQ/Q0Z2RKevf5I2dSenSFDvObFvO4f/3MKJfdtreyoCgUBQb7g0ilDWXLZqfKF4iazRoyguzsdvu2J9uN9//52HH36YuNhYHFlZ+FosfPrppzz00EN1zkGoqirLjq4lx55P4eUatQbMGl9sdjeyLGGzu7BdMNAURcWg0eN0O1kbswVVVQkP9cFs1KEqKlt/iSniIBwwtG2dchCqqsrGffsxH/2Vjfv21xsNwBkzZpCQkMDAgQPLdBBeypQpUxg4cCDx8fHMmDGjhmcoEAgEAkHdpHv37nTv3p28vDzCwsIIDQ0lIiLC6yBUbHZsySkoNo9GoTMnB+VCsS9JpyXHbqdbt25079692LkzkqMv0yEcdt3rENpdDn4+vhG7y1Gpfm63wvdRR7wOQo0s88gdHejRtkGRdhqtzO2jOqA3aGjaMpihd3Wqlw5CEE5CQTVitxaQnX4Ou7WgtqciEAgE9YZLowhV1V2ig0iSJDRakzea8EpYu3Yt+fn5OLKyCNNoea1nLzqHhVV1+jXCzsS9HEg5iiRJSJKEQasn2BRAdr4DVVXRyBKqqpKV58CtqJzLLCArz1EkmhBAVVS2bDjBicMew06S4NZh7WjVrkFZw191TmXmkX14O5bss2Qf3s6pzLzanlKFWLp0KZIkcffdd1eq39ixY1FVlSVLltTQzK4dZsyYQYcOHUpM4a6LnEs4zerv/8e5hNO1PRWBQCCok+zduxe73Y4kSbz77ruEhYWRm5tbrIKxIzsLt9WKIzsLxeHAdaHisQqkO52EhYXx3nvvFVvoVVUVhy3Luy10CEFRFeYfWM7vZ/7ki13fk2XLqVA/l1thzuoj7I85D4BGIzNhRAe6tg4tsX1wqA8j7+3GbXe0q9HqwzVN/Z25oE6hqio5WWk47TZystLqTRRETEwMkydPpkOHDlgsFoxGIxEREdx4441MnjxZvMBcxoQJE5AkiTlz5lSq35w5c5AkiQkTJtTIvASC+sqlUYSSrMVhzcRhzQBVKda2qtGEr776Ks1CQugRGMybQ4YRotUh6Yrr3dQ2qqry08GfUVQFCQmDRkeIKRC7043N7kKWPcZwYTRhSno+LpdCXoEDm40i0YQHo88ScyQV8DgIBw5vR8u2JRt2tYWqqvy6Nxp91llUjQ591ll+3RtdL56jhWnrpQmml0ZwcHCR/oLSmTRpEkeOHGHXrl21PZVyUVWV/ds2cPb0MfZv21AvfodB2IKVRdiCAsGVoaoq3333HU8//TT//Oc/UVWVdu3a8eWXX9KoUSOSkpLIz89HVVXcNhuKzQayjGKz4bbZUSUZq9NJutNBWHg4X375JW3bti02jiRJhLUYRHjLIYRE9BY6hEDUiV85lnYKgCxbDg63s9w+TpfC7FWHOXQqDQCtRubxOzt5C5aoqsrpE2moStFnXWCwGVlTv91s1aZJWFBQwIoVK9ixYweJiYnk5OTgdrvL7CNJEhs3bqyuKQhqkZT4U9itBUiSjN1aQEr8KcKatqrtaZXJ0qVLeeCBB7Db7QQHB9O3b19CQ0PJzMxk3759zJgxgwULFjBmzJjanqpAILhGuRhFaMTlyAVVQVUVkDVodUa4JDXk8mjC8ow+t9uNRnMxzUHNzOKFdh0xSTKSJBFxzxhMYXWnUEkhMemx2Fx2DFoDsiQTbA5EkiSy8gqjCD2fiSRJuFxuFEVFp5XRaGR8TDpc6sVowg5dm5IQm0ny2WwGDW9H89aVc2ZdDU5l5pF5eAdGxY1La0LrspJ5eAenenSnVZBvbU+vTFwX0p5iY2Mr1a+wfWF/wbVBSvwpkuJi0OmNJMXFCFtQIBAILmC1Wnn77bdZv349AFFRUQwcOJABAwbQq1cv5s+fz/Tp09m3bx+ZmZlonU50bjeyRovicpGTnoZLq0Wn09Knd2/ee//9Eh2El+IfWr7W3vXAzsRofj/jWWiTJZkHu4ymgSW4zD5Ol5tvVh7m+JkMAHRaDU+M6kSbJh4dZVVR2bH1NIeik2jXuRH9BrWqc9I9VaFanIRffPEFr7zyCtnZ2RXuo6rqNfVBXs+oqsrBHRs936kse7cbNWlZZ7/jc+fO8cgjj2C323nxxRd55513MBqNRdrs2bOHxYsX19IMBQLBtU4RLUJJg+LyaM54ipUElagdI2v0OF1Wzsdvw+LftNR77M6dO3nvvff49NNPady4MW67nTNzv8Mse5yGwb17EdTzhhq7tiulsKKxw+0g0OSPqqq4FDd2px2rw46kAfVClT6XooCkogCqpCXQT4+CG1mSsbvsFyodT+D2UR1ITcklPDKgVq+tJC5GESaiag0gSahaA/qsRH7dG03LQTfX2ecoQPPmzTl06BBz5szhhRdeQKst36x0uVzeCKTmzZvX8AwFV4tC28/tcmLxCyQ/J1PYggKBQACcPXuWl156iZiYGO++p556iv79+3u327Vrx7Jly4iOjmbe51+wbNEi7IqCqjiQAAMS48aM4qGnn6Z79+4l3lcVtxNZU7+q6NY0MemxLD+6zrt9V/vbaRXcrNx+ial5nEzMAkCv0/DkXZ1pFREAeHSwf9sQw4kjHimbYwdTaNWuAWER9aPoXEWospPwnXfe4Y033qhQSkHhL3N9ST8QVIzClWP5kuiOur6C/PPPP5OXl0d4eDgffPBBiW1uuOEGbrih7r1ECwSCa4PCKEJZY/BWnwPQGXxLFZcuL5pQVVV++OEH/ve//6EoCi+++CLffvst55csw5bqSZcwNQ6j8ehRNXZdV0KePR/wFCdJt2Zg0BqKCEvnFNhRJQVZljDk+xKU0oTzDc5gNecgAZIGnKoTpwMktxaDzkCGNRO34kar09ZJByEUjSJUdSZQFNDokF32ehFNOGrUKA4dOsTx48d5+OGHmT17NgaDodT2DoeDxx57jGPHjiFJEnfdddfVm6ygRim0BQ1Gs0dH1GgWtqBAILju+fPPP5k+fTo5OR4NPLPZzNtvv80tt9xSrK0kSXTv3h1L+w6MHH4n+UYDNrcLo0aHxWYjrF0H2pTiILTmJpNwfCWNmt+KX3CbGr+u+kBqXho/7F/m9T3d3LQXvSK6Vahv83B/JtzRgfnrj/PEqE40D/c4AN0uhV/XHCPuZDrgkbK5+bbW15SDEKqoSXjs2DHeeOMNANq0acPGjRuxWq2A55d8+fLl5OXlcfDgQd577z3CLgikP/roo9hstnLTkQV1n0tXjgtfaiVJxu1yeqML6yLnznk8/6GhldemcrlczJo1iwEDBhAUFITBYKB58+Y888wzJCQkFGu/efNmJEliwIABFBQU8Morr9CqVSuMRiPh4eE8/vjjnD17ttTxMjMzeeONN+jWrRu+vr6YzWY6d+7MO++8Q0FB8SIxb775JpIk8eabbxIfH8/jjz9OZGQkOp2uiA7M0qVLeeKJJ+jUqROBgYEYjUaaN2/OY489xvHjx8v9HPbv38/dd99NaGgoJpOJLl268Mknn1zR33VSUhIvvPAC7du3x2w24+vry4033shnn30m0tEE1ySXRhG6XTavBqGsNaLRGsvsW5o2od1u54033uC///0viuI5X1hYGOe3bSczej8AGqOBZg+PR9bVnZXmAqeVWXsX8OXuHyhwWnm+z5P83y1TvD/3t3oY7Zm/4J/aj6C0WwiK7YZPdijBsd2QTvfGN+VmdPF/4d4W4xnsvIte6YN58caneb7PE2g11aaqUu2UFEUIFIsmrKvPUYAXX3yRBg08RWB++ukn2rVrx3/+8x/27NlDdnY2TqeT7Oxs9uzZw3/+8x/atWvH/PnzAWjQoAEvvvhibU5fUE1cagtq9R4nsVZvELbgJQhbsHyELSi4llBVle+//57Jkyd7HYRNmjRh7ty5JToIC8k9foLcI0fxDQigoUZLpCoR7uuDb0AAuUeOknv8RLE+bpeNszFrvP/mZpyqseuqL+Q58pkdvRDbhSyd9qGtGdbm1kqdo1PLEF577Cavg9DpcLNuxWGvg1CWJQbd0Z62neqedE9VqZKT8IsvvkBVVUwmE+vXr+fWW28ttoJsNpvp2LEjL7/8MocOHaJfv37MmTOHhx9+uEoTvx6oDxXtLl85BoqtINdFmjRpAsChQ4cqpYuZm5vL4MGDefLJJ9mzZw9dunRh5MiRGAwGvvjiC7p37050dHSJfR0OB4MGDeKTTz6hbdu2jBw5EoBvv/2Wnj17FglBL+TIkSN07dqVt956i9TUVPr168dtt93G+fPnee211+jbt2+paf4xMTF0796dqKgobrrpJkaOHFlEXH7cuHHMnz8fk8nEwIEDuf3225FlmdmzZ3PDDTewbdu2Uj+HP//8k969exMdHc2gQYPo378/x48f569//Sv33XdfpV4Itm7dSqdOnfj444+x2WwMHjyYvn37curUKaZMmcIdd9yB01m+uKxAUJ8ojCKUkFHdFyLmJBmdofyosZIqHaekpPD4448TFRXlbff444/z9tS/kr3+F+++yPvGYQitO7p8NqeNb/f8REpuKufz01lwcCUBRj8a+zWisV8jwn0bsnNvLm6rBZMagPWcBqNDgxuwuLSE64Lw0wbiLjDzx89JpCXYcOSp7P/lPH76uhuBBxejCGXFjSJrcbtdqJKEogJaPbLi9kQT1uFKxwEBAaxatQp/f09qeHx8PNOnT6dXr14EBQVhNBoJCgqiV69eTJ8+nTNnzqCqapF+gvqPsAWFLShsQYHgIg6Hg1dffZVPPvnEu2h78803891335Ups6GqKslRa1CcLiSNjDMnG7fNhi3lHLJOi+J0kRy1psjflqqqJJ1aj9PhqXxs8g3DJ/D6lvJwuV18v28pmVbPfTHMtwH3dR6JXEqWDkCBzcmfR1KK7TcaPIvNdpuTqKUHORufBYBWK3P7XR3rpNZ1dVAlJ+GWLVuQJIl77rnH+6Ati4CAAJYvX05QUBCLFi1i5cqVVRn+mqeuV7QraeW4kLq+gnzXXXd5dLrcbgYPHsytt97KO++8Q1RUFOfPny+139NPP83mzZsZMWIEp06dYvPmzSxatIhjx47x8ccfk56ezr333lviCur27dtJS0vj6NGjrF69moULF3L69GnGjBlDSkpKMce51Wpl5MiRJCQk8OqrrxIXF8eaNWtYuXIlp0+f5v7772ffvn08//zzJc71xx9/ZPjw4cTFxbF48WKWLFlSJJ3mhx9+IDU1lV27drFkyRJWrFjByZMnmTFjBvn5+Tz11FOlfneff/45jz32GDExMSxYsIB169axd+9eQkNDWbx4MV999VVFvgZSUlK4++67ycrKYubMmZw6dYoVK1awdu1aYmJiGDhwIOvXr+ff//53hc4nENQHCqMI3S47LpcVVVU9BTl0JlTFjeJ2lvsjSTJut4Pz8dvYs2cP48eP59ixYwCYTCbef/99nnnmGXKPHEVxee5Hobf0I6BL59q89CI4XA5mRy8iMScZAF+Dhbs7DCvS5viZTI7EZmAxeoy0AJeKDLgAWZKwWN1IqkqYG/LTrdgcLjQamV43N0eS66YOGhSNIlTQoLFmo7XloioKiuKuV9GEN954I3v37mXw4MHe3+WyfoYMGcLevXvp2bNnbU+9XlDXF4yFLShsQWELCgRF0Wq1RRz3TzzxBB9++CE+Pj5l9iuMItSYTDjSM7ggwYzWYkHSaNFYLMWiCTNSosnL9BQD02iNNG41rFTJmusFBRUfvRkAP4MPE7rfg0GrL7V9ntXJjMX7mb/uGJv3JhY7XpDv4OdFB0lN9jhi9QYtw8d0JqJpYM1cQB2gSnk48fHxAPTu3bvE4w6Ho9i+wMBAHnnkET766CO+//577wqa4OpyPHo7J/ZtL7ddQGgYN4+4v8i+336eT9b5ZOzWfM4nJyBJEjnpqTiddlBV3JKLnPRUtHp9ET0ap8PO2h9mVGh+fe+4j6AG4d7tpNgT5Gal07b7Xyp3oaXg4+PDxo0beeSRR9i5cyebN29m8+bN3uPdunVj4sSJPPnkk97qoEePHmX+/PmEh4fz448/4utbNErlr3/9Kxs2bCAqKoo1a9YwYsSIYuN+8MEHRRzqRqORmTNnsmbNGnbs2MG2bdvo06cPAHPnzuXUqVOMGDGCt99+u8h5zGYzX331FRs3buT777/nww8/JDCw6I0qKCiIzz77rFR9qHvvvbfYPkmSePbZZ5k3bx7bt2/n6NGjdOjQoVi7sLAwPvzwwyIi9R07duT1119nypQpfPjhh0ycOLHEcS/lv//9L+np6UyePJlnnnmmyLHg4GDvittnn33Ga6+9VmfFzwWCyqCqbhy2LE8V4wtpxpJGhwQobjsFBXYysvOx210YDFqC/C2YzcX/jmVZx8rVm/hx+X9xuz3nady4MR999BEtW7YEIGz4UPTBwWTt3Uv4iDuu2jWWh9PtZO6+xZzJ8hhjZp2JJ264nxBLkLeNqqr8/MdpnC4FP7MOrUPBgoRLAp1GBlVF63BjzrSjcam4VJXsfAej7+tW5w23S7UIFUmDpCqogMZlQylMN9fqkW31Q5uwWbNmrFu3joMHD7J06VL+/PNPkpKSyMvLw8fHh8aNG9OrVy9Gjx5N5851x1FdH5g0aRKTJk0iJyen2iMva8IWvJzqtAWrG2ELCltQIKgJZFnmX//6F08//TRPPvkkt95afprrxShCJ4rNjnohxV7SatH5+3nOazDgzM8nOWoNvm3bYMs7R+qZ373nCG85pEIZKdcCJ9PjWHZ0LaPbDy1WiESv0fFg19FsOPkbHRu2wd/oV+p5cgsczFy8n5R0jzb2xl3x9OrQELPxoizPtk2nyEjzHDeZdQy7uxPBoWU7fOs7VXIS5uZ6vKmXa3mYTCZsNpv3+OV0794dgN27d1dleEEVcDrtFOSX/P1cism3uEFqtxWQn5dDbmYaiuJGljWeyAfVU3NSUlUUxY0ka7wryI2aeF5YKzImgHLZ6qvb7fQ4IauRtm3bsmPHDv78809Wr17Nzp072bt3L+fPn2ffvn0888wzLFmyhNWrV6PX64mKikJVVYYNG1bMKCxkwIABREVFsW3btmKGYUBAQIlO8QYNGjB06FCWLl3K5s2bvYbh6tWrgZINOPAYtz179iQqKopdu3YxZMiQIsdvu+22cl8oTp48ydq1azl58iS5ubneVe9CnZ7jx4+XaBiOGzeuWAVAgEceeYQpU6YQExNDUlIS4eFlG/flXWPjxo1p3bo1R44cISYmhjZthBCvoP4jy1qad3mQtLM7yUw5iEZnJKLNnRw8fIIlS1awOmodVpsNxa0ga2RMRiN3DL+dMWNG0a1rZ+8L0vHjMXy3eIp3xfimm27i3//+N35+RY2h4JtuJKhXzzrzYuVS3Mzbv4xTGZ5UaaPWwBM33EdDn6K2xKVRhBJgyHMiqSoajQQXLkV2qehsbhSNhCRLJMoqeXUwYulSFEUpokUoafWelHNJQtXokDUXIgDqWaVjgM6dOwsnYD2iRmzBy6hOW7AmELagsAUFgqqiqirp6elFUvn9/PyYN2+et7BneeQeP0HOkaMobgX1QqCVJMsYQkJAulgctDCaMOvoIdJtuygMNwwOu+G6STNWVZWoE5s4nHoCg8bAlN4TitlHsiRze+vStR8BsvPsfL7kAOcyPA5APx8Dk8Z0LeIgBOg7sCUZafm43QrD7+6Mf6Cpei+oDlIlJ6HFYiEnJ6dYxKC/vz82m80baXg5heKzhQ8fwdVHpzNgtpS/0mAwmkvcp9VocDkdaDRaZNmzuuqWXEiqCpKELGuQZRmd7qIeTUhYZIXGBJAvrNgWotHo0OlKr5hYFXr16kWvXr0Az00nOjqa//znPyxYsIBffvmFTz75hJdffpnTp08D8M033/DNN9+Uec6S0lSaNWtW6gteoT5FYuLFEOfC8caPH8/48eOvaLzScLvdTJ48mS+//LLMFKBCod3S5ns5vr6+BAcHk56eTmJiYrmGYeE13nzzzWW2A881CsNQcK2gN/oT3nIIQY26ceTIYR6a8BzR0dHY7XbMZjNmsw+yLKMoCjabjXk/LmLRkpV0796dd999l3bt2tG1RwOeemoiX3/9NePHj2fy5MloNBpURUG6zCitK84lRVFYcHAFx9M8GmUGrZ7HetxLuF9R0WdVVVm08QQ5+Q4ah1jQOBS0DjeKJHkLfMiK594lXbiF2QIN5Ntc/PzHado2Dawz11xIls3Bz7/9jsaWS+bp456Kxnqzxxlo9gckcLuLaubUo2hCQf2jJmzBy6lOW7AmEbagsAUFgivBZrPxz3/+kz179jBv3jyCgi5mRFTUQVgYRejOz0dxub33CH1IMJK2qLumMJowYd8SdM0DQJIw+YQRGlk92Xb1gZj0WI6lncSoNXAs7SQx6bGY9SbMOhNBpoAKnSMr187MJfs5n+kp/BTga+TZMV0JLcEBaDLrGT6mMxJg8a0Zf0Rdo0pOwmbNmnHgwIFizr62bduSkpLCH3/8UWK//fs9VRb1+tJzwwU1S9vuf7ni1N1+d9zHhoVfkZ2ZhsXv4otYdto572qyf0hDwHPTs9sKOLhjI4PHPcWdj75wRWOGN786BoEkSfTo0YP58+dTUFDAypUrWb58OS+//LJXeLZbt2507dq1zPPcdNNNVzT+pUZa4XhDhw6lYcOGZfZr2rRpsX0mU+mrHJ988glffPEFjRo14qOPPqJPnz40bNjQuyL8wAMPMH/+/CppCFWkb+E1jh07FovFUmbb4ODgK56LQFBXOXA4jokTXyA5OZnAwEBCQkKKvUCazWYCAwMpKChgx44d3H///Xz55Zf06tWLJ598khtuuMGr7+bMzeXUjC9oNHxondIfBFBUhYWHV3HonKdipk7W8ki3e2gS0LhY2817E4k+cR5USM+y0lSVkVQVVZZQFBWNonq1egAUjYyi12ABjsRmcPxMJu2aBRU7b23gVlS2HDnO/t/WIGWdw+1yYHA5Lqto7EmfLkY9jCYU1B9qwhYsieqyBa8WwhYUtqBAUBGSk5N56aWXvJXAp02bxpdffllh52AhucdPkL3vgKdgyYW++uAgZH1xh5QkSWja+uKQc5DtJvQWfxq3HoZUykLNtYaqqqyN2YLT7STIFECGNYuVxzZgddqQJImHu40p0a68lIwcGzMX7yc92wpAkJ+JZ8d2Idjfc788l5xDYJAZveGiq8znOnEOFlIlJ2HXrl3Zv38/Bw8eLLK/f//+bNmyhU2bNrFnzx5uuOEG77HTp08za9YsJEmiffv2VRleUEuUVMWuNC6vbhfWtNVVmmXVGTJkCCtXriQtLQ2AyMhIAPr27ctnn31W6fPFxcWVeywiIsK7LzIykmPHjvH4448zduzYSo9XFgsXLgTgyy+/LDHtpaTqepcSGxtb4v7c3FzS0z1l4S+9ltKIjIwkJiaGadOmCRF7wXWB2+VAc0E8+dixY0ycOJGUlBTCw8PLNColScLpdGI2m0lJSWHixInMnz+fdu3aef92VEUhft6P2FLPEzfneyLvuZvgv5SsGVxb6GRPCodG1jC+2xhaBBUvenYiPoNZKw55nIGyhN6tonVdjCJU3AqSChpAlUCVQeNS0DgUjHpNnYomPJmazvpfonDEH0XiQoEaRwGyqqIYK6hnU4+iCQ8dOsTixYu9moS5ubn4+voSHh7OTTfdxNixY+nYsWNtT1NQDQhbUNiCwhYUXM/s3r2b6dOnk5WVBXgc8vfff3+lHYSqqpK4ZBmuvDy4ED2t8/dDYzKj+qsoLUA+DVL2xfus5NSh2p04s7Np2uWe60aHEC5GEVr0nmePSWdiX8oRQsyBGLUGfjn9O4/1KFm6ACA928qMxfvJzLEBEOxvYtLYrgT6eRZHzpxKZ+PqY4Q28mHY6E5oddeH8/VyqlT6ZsCAAQD8+uuvRfY//PDDaLVaFEVh4MCB/O1vf+Orr77ib3/7Gz179iQvLw+A++67ryrDC2qBsqrYlUZdrG5XkXkUpssXGjjDhnmqbq5cuRKbzVbpMbOysli1alWx/efPn2ft2rXAxb+pS8crNOKqk4yMDKDkVefDhw+zb9++MvsvWrQIu724RuT3338PQKtWrWjcuOxVHKjZaxQI6hoOWzanor8lLXEnitvF9OnTSU5OJiwsrEyjUlVVUlJSSE5OJjU1FX9/f5KTk5k+fXqRe1nKuvXkxnjSeHV+vvh1qlvOGFmSGd1hKP2a9uLBLqNpE9KiWJtDp9L430/7KLA5kSUJo15DI1lzQcriYjv3hR9FI6FKEpKqejQLAYtR640mrC3ybA7mb9jAqu8+xRF/BE/YowpaPcgaFIPlYhRhedSDSscJCQkMHz6crl278vbbb3uLmMTFxXHw4EHWrVvHW2+9RZcuXRgxYgQJCQm1PWVBFRC2oLAFQdiCgusTVVWZP38+zz77rNdBGBkZydy5cxk4cGClz5d7/AT5sbFofCxIkoTWYkbr64uKitIECAKlCaiXpE/I2RJStIJ7bzZqqrOarqzuc2kUoUHjWXDPd+TjVlzk2HMJNgVyX+fSi+Kqqsqc1Ue8DsLQQDOT7+nmdRDGHE1lw6ojuN0KKWdzOLj3bM1fVB2lSk7CO++8E41Gw5kzZ9i2bZt3f8uWLXnllVdQVZW8vDw+/PBDnnnmGT788ENvOfAePXoUq2AlqPtUZuW4kMtXkOsCM2fO5JFHHinye1uIqqosXbrUu0Jc6Mzu3r07Y8aMISEhgbvvvrvE1eD8/Hx++OGHUvU2X3zxxSJaM3a7nUmTJpGfn0+vXr3o27ev99hTTz1F06ZNWbRoEdOmTSuxEFBKSgpff/11pa4d8Ebxzpgxw5vmAZ6w+YcfftirG1oaSUlJvPTSS15xa/BU/HvrrbcAeP755ys0j5dffpmAgAA++ugjPvzwwxIrosfGxjJv3rwKnU8gqKuoqkLyqQ243XbOJ+5g04YfiY6OJjAwsEwHocvlIj4+nszMzAvnUbFarQQGBrJv3z6io6MByDl6jHMbPAt2kizRdPyD6EoR1a9NZElmRNtBdGjQutixvcdT+XbVIdKzbSiKisGgIcSgRWd3o0gX3GwXXupVVNyS1/WGIoHW7kYtcKKRJWx2TzTh1XZGKKrK1oOH+eab/3Fuz69ILs8LtF6np1XPW3DofTy+Tm0l5Va0emTF7YkmzMyr9nlXhX379tGjRw/WrVuHqqrl/qxZs4YbbrjBKz0jqH8IW1DYgiBsQcH1h91u58033+TDDz/0/s306dOHuXPn0qJF8YXP8ijUIlSdLvTBIeiDg9EFBgISBABBgOvCvwFF+8qSASXO5ulfRxZeaprLowizbDnY3U4kScbucnBL896YdaVLLEiSxP1D2mI26mgYbGHyPd0IuJBGfGR/ElvWHfcqv7RsG0rXnuVHQl+rVCndODg4mBMnTuBwOGjQoEGRY2+++SYWi4W3337bGzkIni9n3LhxfPHFF0KTsJ5x6cqx0Vy5st9avcGrR9OoSctaTwFzOp189913fPfdd4SGhtK9e3dCQkLIysriyJEjXqPvoYce4vHHH/f2mz17NllZWaxZs4a2bdvStWtXmjdvjqqqxMXFsX//fhwOB0ePHi2mHfOXv/wFRVFo27YtAwcOxGw28/vvv5OUlESDBg347rvvirS3WCysXr2aESNG8P777/PVV1/RpUsXIiIiKCgo4MSJExw9epQGDRrw5JNPVur6X3nlFdauXcvXX3/Npk2b6NGjBzk5OWzZsoUWLVowevRoli1bVmr/p59+mlmzZrF69WpuuukmMjMz2bRpEw6Hg9GjR1d4ASAiIoIVK1YwZswYXnrpJd5//306depEWFgY2dnZHD16lFOnTnHTTTfx0EMPVeoaBYK6RGbKAQpyPSuSOoMf6349iN1uL1IJ73JsNhsJCQneFzVJkmjUqBEBAQGoqkpmZiYLFy6kU4sWxP+4wNsvbPgwfFpW3litCbbG7aRVcDPCfcvW0tp+MImFG2NQFRWXW8Fo0GLSaTDkO0FVkRRQZBVVki46ClUVRb3wLFFBo6ro853kqCpGg5b0bBsut4pOe/WeN7/s2ceRXy5Gw8hAZOuO3DZ4BPP/PIA+6yyq1uCJByjBqC90epaoT6g1oM+sW9qEubm5jBgxwptaGBkZybPPPsttt91G69atsVgs5Ofnc/LkSX755RdmzpxJfHw8aWlpjBgxgiNHjpRaIVZQNxG2oLAFCxG2oOB64ty5c7z00kscPXrUu+/RRx/lmWeeqXSKcSG5x0+Qe+QoGosnilBzQUNURUWJxGNE2AAjKM1BilaRL6RVXFrpOPf4Cfzata3iFdZtLo0i9NVbyHMUkOfwFB2RkdBr9exIiObGxl3LfLaEh/gw6Z6u+Jn1+Jj1qKrKvj8T2L3tjLdN+y5h9L21JZJc+3ZWbVElJyGUXtkKPCtDzz33HNu3byclJQWLxULPnj0JCwur6rCCWuBKVo4LqWt6NI8//jjNmzdn48aN7Ny5kyNHjnDu3Dm0Wi3h4eHcf//9PPzwwwwdOrRIP19fX9avX89PP/3EvHnz2LNnD/v27cPPz4+wsDAefPBBRo4cScuWLYuNqdfrWb16Nf/4xz9YvHgxZ8+eJTAwkAkTJvDWW295dW4upWPHjhw4cIAvvviCZcuWceDAAbZv305ISAgRERG89NJLjB49utLXf9NNN7F7925effVVdu3axcqVK4mMjGTKlCm8+uqrTJkypdz+Tz31FG+88QYbNmwgLy+P1q1b8/jjjzNlypRK/X7079+fw4cP89lnn7F69Wp27dqF3W6nQYMGNGnShIceeogxY8ZU+hoFgrqCw5pJasLv3m3fBn/h55//g9lc+r00Ozub5ORkr0NMq9USERHhFaGXJAmz2cyKFSu4LygU8j2Gkn/H9oQO6F/DV1QxtsbtJOrEr5h0Rh7rcS+R/iVXuNy0J4GVWz2RRZIkMaJfC269IYLzSTlsXHYYp9sFkoQsS/j4G3E43KiqiiRJGI0XzRinw43brXDvHe1p0NgfX7MenbZKCROVpl/nThzd+Qtqbga+gaHcdvsomjZridPtpuDYn8iKE7dGD6VE6EgoqK6SIwJUZDSKk4Jjf+K6tS+6q1j5tTT++9//kpSUhCRJjB49mu+++w6zuWglXD8/P3r06EGPHj2YMmUKDz/8MEuWLCEpKYlPPvmEV199tZZmXz+YMWMGM2bMKBKtVZsIW1DYgpf2F7ag4Hph9+7dXgeh0WjkzTff5Lbbbruic6mqytkVq8g5eAi3zY7OaEK5JIJWDZQgUAOOC0XaXECIhNpJwb1fQbrwOJBkGZfNTnLUGnzbtqn1hZea5NIoQqvLTrbtYtX1QHMAGkn2Vjq+VM4mLctKkJ8R+RKHX3iIZ4FLVVX+/C2WA3suphV3uzGSnn2bXtOfZUWQ1OslPrUek5OTg7+/P9nZ2fj5+ZXZtqCggKNHj9K+fftihnpVUFWVDQu/Iv7EIUw+Jc8hLzMdRVWQJRmfwJKrj1nzcmjSphODxz113fzxbd68mVtvvZVbbrmFzZs31/Z0BALg4r3i0KFD5Ofn88QTT9TL6G6n00lUVBTDhw9Hp9PV9nSKoaoKZw4vxpqXDEBgw65YacrAgQMxmUzF7tMFBQWkpqYWSSnz9fUlIiICrVZbrG1eejpfjLiLcH9/9EGBtHlhKtpqvPdfKdsT9rDi6Hrv9sh2g+nTpLggvaKofL3iIMfiPNpYt94QyZ03e4y7uTO3kxiXiSxLSJKETq/BL8BY6rNDVVVyc2w0bx3K6Ae61fgzRlVVEs6do0mjRkX27z52DEfWeXr36uuNLnC7XCyd9T45mellnRG7zY7BaKCIAONl+AWGcPcTL6PRVm6dtzK2REXp0aMH+/bto3Xr1hw8eLBC9xCHw0Hnzp2JiYmha9eu3pR5QdnUF1uwIghbcHNtT0cg8N4n4uLiiIuLo2nTptVeHOdqcbVswffee49t27bx4Ycf0qrVlS9ynNvwC0lR67AnJyPr9cj6y+Z8kxEaaMF6QY/ZIoFWAqcKB+wQW1SH0NCgAR3/8TpyHbSDqwNVVfl0xxwOnjuKUWsgy5bjVWj01VvwN/qiqioZ1iw6N2zPlN4TkCSJ+JQcvlh6kM4tg7l3cNsijkJVUflt40mOH0rx7ut1c/NaTzGu6d/litoSVY4kFFwfKG43eVkZaPUGnI7iIsUAiqqgqgoKlNpGqzeQl52J4nZX+gVHIBAI6hsZydFeB6He6E+DJn05djwGRVFKTE9JT0/HarV6U4x1Oh1Nm5a8oqk6HDitVqwuJ7JWQ7OHH6oTDsLdZ/cXcRAOaXVLiQ5CAFmWeHRER75afpA2TQIZ3MtT7Xjj6qOcPZOJJEkXoo+0+PgZynQoeCILdSTEZpB4JovIZoHVe2GXkHj+PGvWrsKaFMPoB58mMuKi8H/Pdu2AdkXaa7Ra7nhwEnZrQanndLqcbN26lf79+6PTlm4YGsyWOvP8PHXqFJIk8cgjj1R4kUGv1zNhwgT+7//+j9OnT9fwDAXVSUVswYogbEGBQFDXKclOe+GFF3jmmWeqtNCWuTea5DXrPbZNgwaEjxyBb7s23uMFBcmcPbsGWdIiB+pwOvNQFIcng8Io0WTsOMyWogWBtD6+16yDEIpGEboVt9dBaNGZ8Dd6JEskScKiN3ujCXWOYL5cdhC7w8WfR1JoGGxhYM+LkdpH9id7HYSSBH0HtqJ9F5HtWkiVnsyyLCPLMkuXLmXkyNIryVzOunXrGD58OJIklSuKK6gbaLRahj7wbJkvOJuWzcZakI/JbOHW0Y+W2q4uveAIBAJBTWEvyOB8wnbvdliLwcgaHSaTCVmWiwjFF9KgQQPy8vIwGo34+vqWmZKsqCqyLGPS6ggfdSfmJsXT1K42+1OOsOTwGu/2rc37MLBFnzL76HUanrm7CxqNjKKo/L4xhr07ElBVkGUwmXVYfCtWQVWn12CzOdn1RxwRTQOqPUrJZncQtXkj8fv/AMWT77Nh7XIefWwKUjmaRGZff8y+/qUedzqd6Ey+BIaG1cmo2LJo3bp4IZqyqEoEhqD2qIgtWFGELSgQCOoqqampvPzyyzz88MMMGjTIu1+n01Xp+Zx3OpaEBRd1ixvfNZIGAwd4t1VV5dzBrSCpaAxm3C4rqur02DKSjEajI9d+iuA2va6bKOzLtQglrYQKWJ0ObPlabBo3Rr1HfsWg0ZPvKGDx/g1kH+uAw+Gx01pGBNDnMgdguy6NSIjLIDE+i1uHtqVl29CrfWl1mio/na80W1lkOdc/ynvB6djrVpxOOzqdgcAGwhMvEAiuX1RVIenUOlTVY6AEhXXH7OdZ+Q0NDUWn0yDoj5cAAQAASURBVJGcnEyDBg2KFG4wGAw0bdq0QimCTlXFr2FD2o4cQXCfv9TMhVSCw6knWHBwJeqFNd5+TW9kSKui+ogut8Kq304zoEcEgX5G736NRsbldLMx6hgnj6bidLiQJAkfPwMmc8XT4GsymvDPg/vZuXkN7vxs7z6t3kibziVHSV4PNGnShCNHjpCVlVWpfoXtmzRpUv2TEtQo5dmCAoFAUJ+Jjo5m2rRpZGRk8Oabb9K0adNqWdiyn08jbvZcFJfHLgzu3YvQW28p0iY/+wx5WbFotCZUxYXLcbH4q97oue/mZcWSn30Gn4BmVZ5TfeDyisYAPnozeXlgtTuR8uw0CvLYzJIkoZMMHDx3gkA1GAMhtG4SyOMjO2HQFdVx1mhkbhvRnvPn8giLqDvPtNTEWNKO7yC1S3saN29TfocaQizhCaqNtt1r/yW1LjJgwADhFBcIrjNcjgIUxRMprzcGEhrpiaZLS0vj22+/JS8vj7y8PFRVLVbdtSIOQlVVKSgo4N5HH6XlqDur/wIqyfG0U/x4YLn3Xtcroht3tBlUZKXb6XIz++cjHI1N52hcBlPGdcP3ggNQVVTWLj9MUkIW+XkOFFXFz8+ATqfB5axcwQZJlnDYXdUWTXg2NZW1a1eQm3QxNVaSJMLbdmf44OFYLJYqnb8+M2LECA4fPszq1asrVVl19erVSJLEnXfW/u+uQHA1ELagQFC3UVWVxYsX88EHH3gLRQUGBlbL360rP5/Ts77FdaHInG+b1kSMGV3EPlFVlfPx21AUF1qtGactAy6MrdGZ0WgNqKqK02XlfPw2LP7XfnENVVX5Yf8yrE4bvvqLtpbN4cJmdyHLEja7C5vDhVGvxeZwkZ3rRpXd5Fti6OLfmsdHdkKn1WAtcOCwu/EPNHnPo9Vp6pSDUFVVDu74FVtOGgd3/Ep4s9a19h3XipOwoMDzB2I0GstpKRAIBAJB/UNn8KF55/tJS9iBb1Ar8vIKmDt3LgsWLMButxMQEEB6ejpOpxO73Y7BULF0WtXpQtJpKSgowGg0cs8999TwlZRPti2HefuW4r6Qfts9rBN3tb+9iGFjs7uYtfIQpxKzAMjMsZGSnu91EkqyRNtODUlKyEJVVcwWA0iSN1WksugNWnKzrShuFY32ygwsu9PJ2o1riTuwE1W5OA9zSDiDh46ieYSIgpsyZQpff/01q1atYuHChYwbN67cPosWLWLlypWEhISUWz1VIBAIBIKaxuFw8N5777FixQrvvl69evHvf/8bf/+qOZEUp5O4Od9hP58GgLFRA5o98hCSpmhk26VRhG5nntfukDQ6dHpPNV5JktBoTddNNOHSo2uIyYhFQsLmsmPSeXxHWXkejUatRsblVsjKc+BvgfRsG6ggKXpUcwb9+1rQaTXk5diIWnoIl0th5L1d8amghM3VJiX+FClnYpA0WlLOxJASf4qwprUjz1IrTsIdO3YAHu0lgUAgEAiuRWRZi09oD36cP5/vvvuO/Px877GgoCD0ej3Jyckl6tsoNjuOzEz0gYHIRo8xozid2M+lIhn0ZNps9P7LX+jevftVu57S8Df6MazNQFYeW0/nhu24p+MdyNJFfb58q5Mvlx0g4ZynYrNBr+WJUZ1oFRFQ5Dyt2zfEYXfjF2DEZKp6pW2TWYdGW7ZOYFkoKpyJPek11GWjmW59htCv540lFp25HgkPD2fJkiWMGTOGhx56iN27d/PCCy/Q6LKKzwApKSl8/PHHfPzxxwQHB7NkyRLCwoQ0iUAgEAhqj9TUVP72t79x6NAh777x48czefJkNJc58iqLqqokLlpC3qlYAHS+PrR44nE0JlOxdoVRhLKkxe20eg5IEnqDn6eyxgVkjf66iCb848wuVh/f6CnYIsk4FCcmjEWiCMFTAM9qc+JwuL0fk1lvQNbb+eX0b4Rpw1iz7DD5uZ5iW1vXn2D4mM61dVml4oki3Ijb7ULWGnC7XRzcsZFGTVrWyndcYSfhgQMH2LdvX4nHfv3113L1aFRVJT8/n7179zJv3jwkSeLGG2+szFyvO2bMmMGMGTO8Ic8CgUAgqNt4jBnPw/ynn35i1qxZZGZmeo/r9XrGjh3LhAkTSE1N5f777yc5OZmwsLAijidHdhZuqxWHLGE0NgRVxZGejltRSEtLp1Gjhrz33nt1xjjs0+QGQsyBtAhqWuQ6svPsfLH0ACnpHgep2ahj4ujONGnkR16uvdhqbsdu4Vd13mVh0uu46dY72L5yLmHtbuCO24biUweqR9clHnvsMQC6du3Kpk2b+PDDD/n444/p0KEDrVu3xmKxkJ+fz8mTJzl8+LC3WE+XLl2YPXs2s2fPLvG8kiTxzTffXLXrEAgEAsH1x/79+/nb3/5Geno64NGFfv3117n99tur5fySJOHTuhWZ0fuQZJnmjz+KPqi4VvKlUYSyRofiNqC47Oj0vkiyttg5r/Vowt/P/Mniw1HYXHYkScbf6IOfwSPNUxhFqLlga8qShIKKoqpoJAmzUUeQvxG7S0NMfCILd/+J7PJ8hv6BJvoPrlyhtatFSvwpkuJi0BvNuKw29EYTSXG1F01YYSfhsmXLeOutt4rtV1WVTz/9tFKDFr5EPf3005Xqd70xadIkJk2aRE5OTpVDnQUCgUBQs9jyU0mJ20xY89swmIM4ePCg10EoyzKjRo3iiSeeoGHDhoAnmvDLL79k4sSJJCUlERgYiNlsRrHbUWw2kGUUmw23zYYrL5/8Aiu5dhsNAgKZ9f33tG3bttau1eV2odUUNSHahLQosp2ebeXzJQdIz/asiPua9TwzpithIRZOHT/PlnUn6HdbK9p0aHjV5l0adruN9RvX0ql9J5o3v2iM9WrbmojHX6RxcFAtzq7uMmfOHK+juvBft9vNoUOHikRlQFEH+ubNm8s9t3ASCgQCgaCmcDgcTJ8+3esgDAsL48MPP6RNm+otFhF0Y0/0gYG4bTbMTSKLHb80ilCnsyBJEnqDP4rWiawpuZLytRxNuDVuJ6uPbyTHnuvRpzb4YNQacbid2J0urA47kgZvkTwASaOiqqDXa/Hx0eB0O9HlWjAfCyNNk0UDSwjBoRaG3d2pUsXwrhbeKEKXE4PJAlYbWp0eh62g1qIJK5Uvo6pqkZ/S9pf307BhQ77++msGDhxY7RckEAgEAsHVxuVykBizFmtuMrEHf6Qg5ywTJ05Eq9UyZMgQFi9ezP/93/95HYSF9OrVi/nz59O7d2+sVitnz57lfHIyVqcTh6pidTo5n5hI0vlU7C4XXRpH8NOSxfTu06eWrhTO56fznz++4NC5Y6W2Sc0o4NOF+7wOwkA/I1PGdScsxMLBvWf5NeoYbrfC1vUnSE3OuVpTL4aqquyN3sU3X/yHUwd2snH9Sm+0G3gcX8JBWDYl2YYl2X6l7S+trUAgEAgENYVer+ftt99GlmVuvPFGvv/++2p3EBbi06ol/p06lnjs0ihCryNIkpC1+iJpxpdyeTThtcLm2O1EnfgVAJfixqwzopU12Fx2bC472QUFqJILSXZjsJpoHNsOg9WEJLtRJRdO1YHdZcedrsdwrDEaRYtLcdMgzIc7xnapkw5CuBhFaDCaiyy8GoxmbzTh1abCkYR33XUXzZo1K7Lv0UcfRZIkJk+eTI8ePcrsL8syPj4+NG/enM6dO1c5x18gEAgEgtpGVVV+//13PvrgLQb1bcqg/p3RmwIx+TQi0k/DqlWrCA0NLfMc7dq1Y9myZURHRzPv8y9YtmgRdkVBVRxIKuglGNm2PUPad2To314ksGuXq3R1xcmwZjFrz3yybbn8cGA5j3QbS7vQ4mkQ0SdSyc7z6L+EBpp5dkxX/C16dm49zYE9Z73tWndoSEhD32L9rwbJyWdZv3Y5mef+n73zjq+iyvv/e+b2kt4rCSX0jhQBRUAQbOCiiL2zu7q668/C7uq66q6rz7r7PLqLirrWtWEBAelFQYpSQg09CSSk99x+78z8/rjkkgsJSUhCAsz79cpLZ+acOWcu985853O+JT+wz1ZVztHjx+iRlt4hc7rQyMnJ6egpqKioqKhcwthsNgoKCsjLyyMnJ4fExESsVmuz+g4bNoy33nqLgQMHtpk24S4txZF/gojBg5psW+dFKEledJqWFXS92LwJ12ZvYuWRHwC/QHZLv+sZnHBKWM0+Uc1/Fu3FpBXw+SCyUsTi06DzRVMTreD2+vD6FCYPSafgcDWKxb/gmJQWzrU3DkCr65zaU30vQqM5+Hur1Rtwd5A3YbNFwoEDBzJw4MCgfffeey8AEyZM4IYbbmjbmamoqKioqHRitm/fzty5c9m5czseZyUVZQWMHdWH9G6TEES/MdKUQFiHIAgMHjwYS+8+3DD1euxGIy6vF6GqkhBBJCQkhOSbb+pQgbDaVcM72z6l2uUvQBJvjSE1PKnBtpNGdKHa5uFYUQ2/vGkAZoOW71cc4siBkkCbwSNSGToq9bwbtm6ng1VrlpG9bztyPa81fXw6E6++jh5JnScvYmenS5cuHT2Fix41P7WKiopKMIqikJmZyRdffMHixYtxOBzY7XZeffVVzGYz119/PTNnzmTw4MEBG2P37t0sW7aMp556KsjuaMrRqSX47Hay330fd2kZ7tJS4q6eeFYbx159jNqqHBTZi8dVic4QikbbvMq7F1NuwgpnFeuyNwa2r+kxjnHpowLbiqLw2Y58JIcFNCDbfJgkkATQuQSMDiNavUiN3cWe9SXERfrbpfeI5qopPdFoOm+xuVO5CE34PG60+lP//qd7E57P3IStqm5cl3C6LX9cKhcun3zyCXa7HYvFwu23397R01FRUVFpF7KysnjjjTfYsmULoOB1+cNlw0PNaEwZGC3NEwZPp/bgIWqz9hMSHk640YCnrBzJZEaRZRSfl9B2CoNp1tzcNt7Z9hmVzmoAYi3R3D/0Vsw6U4PtBUFgxvgeeHwSGgRWLNzHieNVJ4/B6PHd6T3g/Fa2VWSZnZk/s2XDStwu56kD5jD6jZ7MVYMHoREv7JV4lYsPNT+1ioqKyikOHDjAnDlzyMzMxO12YzabMZlMyLKMyWTC5XLx4Ycf8vnnnzN48GBefvllsrKy+J//+R98Ph8xMTGBolttiez1kvv+R7hLywCo2rmbmCuvQGNoWPSr8yL0eexwcsHS66lBIKzRMOPTEQQRn+S54L0JI03h3D34Zj7I/JJJ3a/girQRQccPHqskK7sCSZZxumRSZBAAWQCNomCweZEiDehNWkrtPsI8PgYOSWLshB4IndiuUxSFPZtX43E5UWQJRVEIjQh+h+gob8JWiYR33313W81D5QLHZrPxzjvvUFpaSkxMDDfeeGOzXb1VVFRULgSys7N56623WLt2bWCf12MnKSGcW28azRVjLye935RzOreiKBQuXYbs9aELCcVnsyO5XAAIGg2iQU/h8hWE9O513o1Ah8fJu9s/p8xRAUCkOYIHht6KVW8JtNl7tAyLSUd64ikRQxQFZK/MsoX7KCuxAaDRiIyf2pO07tHn9RoAFi75muNZO06lutZoieozguuvmkC4uWGxU0VFpfnYbDZKS0txOp2YTCZiYmJUW1BFRaXN+Pnnn5k9ezaFhYVEREQQHR2NIAgoioLX68VsNmOxWIiIiMDhcLB582bGjx9PWFgYFovfZtm6dSv33HMPoth23mWKopA3/yts2f4UHLoQK10fvK9RgdDfR8JpKwLlVB5kjcaALHtaNLZGo8fjrkZRJAShVdJOh9I9Ko0nxswm3BgatF9RFBZtOEq13Q0KWBQBiwKIfptSlhW0HgmNR8Zo0FLu8uEI0zNmQvdOLRAC7N+2geysTBRZRjj5fXTaa4BTodEd5U3Ybt+kqqoqamtrCQkJITw8vL2GUelATnf1zs3NRZIkcnJyGD58eIOu3ioqKioXGoqi8OKLL7J48eKgogpxsZFcP3EoY0f2QqPRktR9ciDMuKXUeRFqLP7KdlqzGdntQnK6MERFoQC1WfupPXiI0F7nr6qx0+vi3R2fU2wrBSDcGMqDQ2cRajyVR3Db/mI+XXEAg17DwzMGkhzrP6YoCqsWZQUEQoNRy6Qb+xKfGHrmQC0gM2sfG9csYvSEGxjcp+FE4A2R2nsox7IyAQVtQnfGT5xKn6Tz682oonKxcbot6HQ6kWUZURQxmUyqLaiiotImHDhwgNmzZ1NUVERiYuJZRT5BEDAYDPh8PioqKqiqqqJr167ce++9PPbYY20qEAIUr1xF5fZMAESdlvT770UfEXHWPrLPjag1YDD7F02jk0cSFn1uESMarRlRvHAEQkVROFKRS4+o4PzPpwuEADsOlPDzvmIUGURBIEoGjQBKPQFQqOdNaDHpOFhm59DxKnqldc7CczWVZez8cSUHMzchST7Ek+8OOoMRkyWEWps9qH1HeBO22bfJbrfz3nvv8c0337B161aczlOhPCaTicsuu4wZM2Zwzz33BJR8lQuXhly9RVFEURREUWzQ1btXr14dPe0g0tLSOHYsuCKUXq8nLi6OUaNG8cgjjzB27NgOmp2KikpnQRAEtFptQCCMiorivvvuYWBXB4rkF8BikkdhMEed0/lP9yIEf14bQadDK2oQjUb/KrndTuHSZYT0zDgvBoLb5+H9zPkU1BQBEGKw8sCwWUSYTnkLbtpdwJdrD4Oi4HL7+HlfUUAkFASBUVd147svd2M06bhmej8iosytmpMsy2z+YTlScS6bf1jOwF69GzT2ZUnCYavGGnbKQBzSPZ19/UcTF5/MhMH90bbxS8KlyEcffdTqc9x1111tMBOVjqCxsD9RFJFludPbgqodqKJyYaAoCnPmzKGwsLBJgRDA6XSSn5+Pz+dDp9Ph8/kICQnh8ccfb3P7qWLbdopWrAb8UcJd7rgNc2rKWfsoikzB0VUosg9Ro8Ma0ZXY1NGXxEKKoigsPbSWDcd+ZmK3sUzsNqbRtsUVdv41fyc+SUYjClgECBEEFAF/BWhJQVAUZJFT3oR6DXaXjyUbs+nZJaJTfaYup52sn3/gyN5teFxOvB4XoiCi1ekxW0PR6g1Bzgh1dIQ3YZuIhKtWreLuu++muLgY4IyLczgcrF+/nvXr1/PSSy/x4YcfMnHixLYYWqUDaMzVu7y8HEVR0Gg0REZGBly9t2zZwqxZs5g3bx7Dhw/v6OmfwejRo+ne3f9jq6qqYtu2bcyfP58vv/ySV199lccff7yDZ6iionI+qampwWg0otfrA/seeOABNmzYwKxZs7jlllvwOk6Qf2gJACZrPJGJ556bt/bgIWqy9iMajUHGjACg9T+mBUFAY7GcV2/CwtoSCmr8z3WL3sQDQ28l2nxKdFu77TiLN2QHti8fkMi0K4MNl9j4ECZP60tYuAlLSPOScZ+NHVn78JQcA60eT8kxdmTtY1i//kFtThw7wtpVi3H7fNz74O/QaE59hndOndqpDMYLnXvuuadVn6cgCKpIeIHSmC1YH7PZfEHYgqodqKLSucnMzCQzM5OIiIgmBcKqqiqKiooCeoROpyM+Pp7CwkIyMzPbtJaC7Wg2eV98GdhOuP46wvr3a7JfZdEu7NX+BQqtzkxC17MXOLlYUBSFJQfXsPH4VgBWH91An5juJIbGn9G2rMrJ3z7YSrXdjSgIaDUCSYIGUZKQ6wmEAIIEiPW8CY1asnIqOHisstN4E1aWFrFuwQd4PW4URcFprwUELGERGIzmJvNQnm9vwlYvoy9btozrrruO4uJiFEVBURQsFgsDBw5k9OjRDBw4EKvVGjhWWFjItddey/Lly9ti/irnmdNdvS0nQ+MaQhAELBYLiYmJFBUVMXv2bA4cOHCeZ9w0DzzwAB988AEffPABCxcu5MiRI9x1110oisJTTz3FoUOHOnqKKioq5wGHw8G7777L9ddfzzfffBN0LDY2lsWLF3PXXXdhNBoJiexGlz4zMJiiSOg2CUE4t8epoigUfrcUX00t3qoqFJ8PAG1ICNrQMLQhp8J6RYMB2eujcOmyBlca25q0iGTuGXwzYcYQ7h86izhrTGDO323MCRIIrxqWwozxPSg6UY0iB88tMSW8TQRCWZb5ecNKkCXQm0GW+HnDSmTZn8/HXlvN0oWfsOCL9ygvK8FWVcGGjT8EneNSMMLPN3X23bn+qVx4XGy2oGoHqqh0br744ouAt/LZqKqqorCwMPBsMZvNpKenExERgcvlYv78+W02J1dJCbnvf4gi+W2Q6MtHEnNl057HLnspJcd/DGwndJuEtpEicBcTiqKw6MCqgEAoIPCLvlMbFAgBIkIMeLwSigJ6nUhSiBGdV/YLhPIpgRBA0QrIghDkTej1ySzZmN1p7IywqFhMlpPpeGQJgNCIaAwmS7MK1ZzuTdjetMqTsLq6mjvvvBOv1wvANddcwx/+8AdGjw52l1UUhR9//JGXX36ZZcuW4fV6ueOOO8jOziY0tHW5iVTOHy119a5DFEUSEhIoKChgzpw5LFiwoFO/qBmNRubOncvXX3+N3W7nm2++Yc6cOR09LRUVlXbC4/Hw1Vdf8f7771NZWQnAe++9xw033BBkkGq1wY9Mc2gS6QNub9X9rHrvXiq3bgskLXaXlWGMj+OkH2EQHeFN2D0qjSdH/xLtSW88WVZY+MMRNuw8EWhz7eh0JlyWys6f89i26Rj9hiQx8or0Nr/Pn/IiNIAogtaAp+QY2/fuRuOoYuvmdTjdpxJ+yyHRSOENG58qbcP777/fZBtJkigrK2PTpk0sXboUSZKYOXMmkydPPg8zVGlrLgVbULUDVVQ6DzabjcWLF2M2m5u8Z4SGhlJRUYHH4yEiIoK4uLhAH7PZzKJFi3jmmWfapKCS7HL5bREgtFdPkqbf2OT8FFmi4MgKlJPFSiLjB2EN79LquXR2ZEXm2/0r+Snfn7exTiAcljSg0T6H86pweSSsZh1RoUZMlW4EWUFQTmlqCqBoBBRBABQEufN4E9pqKrGGnspLKYoig0ZPIu9IFuXF+ThsNWj1LVtAP5/ehK3yJHzzzTepqKhAEASee+45li5dypgxY86YsCAIjB07lu+++47nn38egMrKSt58883WDK9ynmmJq/fpiKJIREQEO3fuJDMzs51m2HZYrVZ69vS/gOfm5nLllVciCAKfffZZo33+53/+B0EQuOWWW844tn37du6++27S09MxGo1ERkYycOBAnnzyyaB8ON9//z2CIDBu3DjcbjfPP/88GRkZGI1GUlNTefrpp3GdrHhaXV3NE088QdeuXTEajaSlpfHnP/8Z30lPpPqUlpby+uuvM3XqVNLT0zGZTISGhjJs2DBeeeWVwDlVVC4kbDYbOTk55OXlkZOTg81ma1F/SZJYuHAh06ZN45///GdAIBRFkSuvvDKwAHY2WvOA9tpsHPnXG0geb8Di0YWF0pBAWEd7ehPKikxWyeEz9tcJhJKs8MWqg0EC4U1X9WDCsFQ2rTvKtk3+e9neHScoyKtq27nV9yLU6Pw7NToEn4fNC/7DhnXLAwKhojNgHTCO2+7+FRMGNL+wiUrLufvuu5v8u++++3jqqadYuHAhe/bsoXfv3nz11VfodDruvvvujr4ElRZyqdiCqh2ootI5qKuYbjQam2wriiIpKSkkJiYSHx8fZKMZjUacTidlZWVtMi9zaio9HnuE8IH96XLX7QiapgvXCaKG6KTLEDV6DOZoYlJHt8lcOjOyIrNw/4pTAqEgcHO/684QCBVFoarWHfj/JRuzcXl8hFn0CE4vWrfPrwoCiuL/X0kUkE+2VwBZAK1bQnF40YgCLrfvvHsT2moq2bziK5Z+/C8qS4uCjiWk9SClR1+K83MwGJsWvU/nfHoTtkok/O677wAYOXIkzz33XLP6PPvss4waNcr/j79kSWuGVznPNNfVuzHMZnObu3q3JzU1NQAYDAYee+wxAP7973832FaW5YDo/cgjjwQd+/vf/87w4cP56KOP0Ov13HjjjYwZMwav18urr77KunXrzjifx+Nh8uTJ/POf/6R3795cffXV1NTU8D//8z/cfPPNVFRUMGLECD766COGDBnClVdeSXFxMc8//zy/+c1vzjjfihUreOyxx9i9ezddunRh2rRpDB8+nIMHDzJnzhzGjx+P2+1u1eelonI+UBSFHTt28PTTTzN8+HAmTZrE888/z6RJkxg+fDhPP/00O3bsOKtBIMsyK1eu5Oabb+Yvf/kLJSUlgWOTJk3iq6++4o9//CNhYWFB/ezVx6kozAysALcGT2UlB/72P3jKKxBEEUEUMcTEoDGd/f56ujdhWyErMt9kLeOjnV+x+uiGBj+/nBPV/Ly/ODCPWZN7cXm/BFZ/t5+sXYWBdsPHppOYEt5mc4MGvAhlCcFtQ5B84Hag+DyAAMl9GHvLL7lvyiTira0rkqLS9vTs2ZMVK1ZgMpl46KGHOHz4TFFapXNzKdmCqh2ootLx1K+YXh9ZliksLDzje6vX68+w34BAQSWHw9FmczNERZF2951omiFg1hEa3ZOuA24nqcfUC6oi8blQZ1v+nL8T8NuOM/tdz5DE4LyNkqwwf80h/vHpdsqrnfgkhbIqF0aDFqfLi7nGg1jP9FYE8J4UCGXl1J+k+N8T9HYvTrcPo0FLebULn9T+IqHX42b3ptUs/2Quxw/vQ1EUdm1cGWRPK4rCni1r8HncCKKIz+tp4M+LIkv4vN4GjwuiiM/jZs+WNe0qfrbqm3no0CEEQeDWW29tUb9bb72VzZs3qzk+LiBa4urdGIIgtLmrd3uxe/dusrP9+bYGDRrEjTfeSJcuXdi0aROZmZkMHjw4qP3SpUvJzc1lwIABXHHFFYH9ixYt4qmnnsJoNPLhhx+esbqclZXV4Oe5efNmhg8fTnZ2NlFR/oqpx44dY/DgwSxZsoRx48aRkZHB559/HjDUt23bxqhRo3j77bf5/e9/T2pqauB8Q4cOZfPmzYwcOTJonMrKSm699VZWrlzJ66+/zpNPPtmKT01FpX1prJKmLMuYTKZmVdKUZZn77ruPvXv3Bu0fO3Ysv/rVr8jIyGhwbMnnoeDoKnweG7UVR0jueT0abfONwvq4ioo4Ou8d7LnH/BXhtVqMsTEIOn3TnfF7E7ZlpWNFUVh8YDXbTuwGYG32JgbE9SbWGh3UrntKODPG92DB90e4c0pveqWEs/SbPRSd8L9IC6LAlZN60KN3XKvmczpBXoQ6E4osocgSGp+n7gKQZZkuE29lysC+mHVNr+ardBxJSUncddddzJ07l3/961+8/vrrHT0llWZyKdmCqh2ootI5qF8xvQ6bzUZhYSE+nw+Xy0VaWlqT56kTGs91gQP8heasPbojtNCL+nR0hksj3dryw98HbEtBELi1/w0MjO8T1Mbjlfho6X72Zfs9POct2MOTdwzj6buGYXN4OXa4jO8XZYHGfw69QYPZ2niYrtcjIUkyM6/tTWxSGCFmPTptq8twNIosy2Rn7WDfT+twOU8J0AajmeRuvf1ujyfv8bIkYauqQKs34PU0siijKCiSD5/H1WiuQq3egK26ElmS0GjbR2hu1VmrqqoASEhIaFG/+Pj4oP4q559PPvmETz75pMl2vXr14p///GfA1dtkMpGXl9dgWILD4QjcgA8fPkxUVBSRkadyAMiyTElJCbIsc9111531Jv2Pf/yD3r17B7Y3bNjA8ePHuf3221t4pS2jurqajRs38uijjyLLMomJidxyyy1oNBoefvhhnnrqKebOncu7774b1K9uZfnhhx8O2l/nYfvXv/61wfCTPn36nLEP/DfB//znPwHDEKBLly7ceeedvP766+Tk5LB69eqgz3DYsGFMmTKFxYsX8/333wdVjKz/WdYnIiKCf/3rX/Ts2ZMvv/xSNQ5VOi2NVdJUFAWv14vZbMZisTRZSVMURQYMGBAQCYcMGcIjjzzCgAGN50UBKDn2Az7PyXBmQUTUNE/QOx17Ti7Z776Hp7IK2eVC1OkwxsUhtOAh35a5CRVFYdnhdWzO2x4496wBN54hENYxekAivbpEYBBFFs/fTWW53yDS6jRMvK43KWkRDfZrDad7EUp1YeBaAxrJi6QxISDTxSioAuEFwvDhw5k7d65axK6DaQ9b8HTa0hY8H6h2oGoHqnQuYmJiAgvBBoOB4uJiqqurA8c9Hk+z7kUulwuTyUR0dMP2TVNUbN3G8c/mEz6wP6m33Yqo0zWrn6IoOGrysYSlnNO4FzLDkwaxs3AfNo+DWQNupH9c8MK9zenl3W/3cKzQv9isEUWuGZmGTisSEWIk3Gpg6+oj6PUafF4ZS4gBs+Xs9rdOK1Jb4+LY/lKGDE5qt7x9iqJQdPwIuzauorqiNLBfFDVkDBxB72Fj0RuCnQk0Wi3X3PZr3M7GvVm9Pi/r16/niiuuQKdt/DtmMFvaTSCEVoqEERERlJaWUlBQ0KJ+hYX+sKTw8PDWDK/SCux2e1CIXWPExfk9Quq7ekuS1GC+k7oVHlmW8fl8SJJ0RhtJkpBlmdLSUkymxis5nZ4LzO12Y7fbm5zvuXDvvfdy7733nrG/W7dufP3111gsFsBf/e7Pf/4zn376KX//+9+JiPC/DB85coSVK1cSHh7OHXfcEehfVFTEzp07EUWR+++/v0VzSk1NpV+/fmfs79GjB+BfEY6NjW30eEO/SUmS+P7779m0aROFhYU4nc6g6pIHDx5s0RxVVM4Xp1fSPFserLpKmiaTicLCQu6++24+/fTTIK+Pe++9l4MHD3LfffcxfPjwJg0IW1UuVaVZAIiijoSuE8+pmrGnqoqjb72N5PHiPWngGqIiUWQZxeNponcwgijic7lb7U24JvtH1uf+5D8nAjf3vS5gxNmcXnJOVNO/e7BBLfpkFn2zG7vNP2eTWcfkG/sSEx9CWyPLMj9/vxTR60I2BwuQstYABos/b4rbxs8bVjKkT98W50lTOf8YDH4vgBMnTjTRUqU9aQ9b8HTa0hZsL1Q7ULUDVTovVquV66+/nnfeeYeysrKge4rFYiEhIQGtVnvWcHlFUXA4HMycOfOcvJdtR46SN/8rAKp27SGsX18ihg5pVt+q4t0U5X5PWHQv4tKuQqM9t0XmC5FoSyQPDruNUnsFfWJ7BB0rr3Yyb8EeSiv9gplBr+W+6/uSkXrK1ss/VkVeTgUWqwFRI6JthkegIAgYjTrycirIP1bVLovXHpeTzSu+oigvO2h/ao++9B81IahgyemYQ8Iwh5wZDl+H1+tFZwohIiYBXTOF6PagVSJhRkYGJSUlfP755zz66KPN7vfFF18ABBICq5x/LBZLg8bF6dQZQPVdvTUazRlVPuFUrgdRFNFqtWgaSOCq0WgQBIGYmJizrh6f/qMwGAwBI62tGT16NN27dwf8eSxiY2MZOXIk11xzTdB1RkREcOeddzJv3jz+85//8MQTTwDwxhtvoCgK9957b9A1HT9+HPB72jaUG+Ns1A8RqU/dg62x4yEh/pf001fUDh8+zPTp09m3b1+jY9bl3lFR6UycayVNr9eLLMscOXKE+++/n+3btweEtIiICObNm9es80g+F4VHVwe2Y7uMRW9s2e+5Dn14OLETxlO4bAWiTofGaERuoThYH43RgKesHMXnQzgHQ+L7nM2sPvpjYHtan8mBPDHVNjdvfrObkgoHd03tzaCMU8+LjWuPBgTC0DAjU27qR2h44y/654rk87Fs8Rf4Ck7mrfM6wVD/OSCc+j6crHS8I2sfw/r1b/O5qLQtu3f7w4/0+kvnZakz0h624Om0pS3YXqh2oB/VDlTpjFRXV1NeXo7dbvdHUmg0iKJIXFxcwOGoqdxsDocDo9HIzTff3OLxXcUl5Lz/IYrkd4aJHnM54UMGN9HLj9tRTvGxDf7rKDtAaHSvi7qasST7BVyNeOq+H2OJIsYSFdTuRKmNed/sptbhtyVDzHpm3zSApBgrRSeqKTpRw8DLktm6MRefT8Jk1rVoMVyn1+Byedm6MZfkLuFt7k2o0xvwuJyB7aj4ZAaNnkR0wsXjLdoqkXDq1Kn8+OOP/PTTT/zlL3/hmWeeabLPX//6VzZv3owgCFx77bWtGV6lFdx+++0tCt2t7+qdktLwD+Dw4cP4fD60Wm1gJbM+oigSGxuL0WhkyZIlLVrJGTt2bLPbtpQHHniAe+65p1ltH330UebNm8ebb77J448/jsvl4v3330cQhDNCTFpDU0JISz1lZsyYwb59+7juuut46qmn6NOnD6Ghoeh0OjweT8CrQ0Wls9HSSpper5fS0tJAKIpGoyErK4sNGzYE5YlqLsW5P+Dz+r2YLWFdCI8907OjJcRdPQF9RDjm9DTkNqgmqbWGNDvkpT6bjm9j+eHvA9vX9ZzIiGS/0Vte7eTNr3dTXu03gBZtyKZv1+hATpfxU3rx7Rc7MZl0TJ7WF5O57YWewmOH2bx2CXnZBxEUGQQRwedG0ZtP5gJSCKoCrdGBz616E14AHD16lDfffBNBEM57KKlKMO1hCzZFa2zB9kK1A1U7UKVzsmXLFv70pz9RXl6OyWTCbrcTGhpKYmJisxYpwB+RUFlZyciRI8/IJdoUPpuNnHffQ3L67bXQXj1JuvH6ZolOsuzjxJHlKIpfOIuIG3hRC4Q+WeLzPd8iIDCr/42N3qMOHa/kvcX7cHv8nugxEWZmT+9PVJiJw/uLWb/qMLKkYLe5ycupwGhsmUAIbe9NKEsSYr0FL0EUGThmEj+v+ZYBIyeQ0qNvu4U1dxStEgl/9atf8eqrr1JZWclzzz3Hzz//zNNPP83o0WeW8964cSOvvPJKoCJyREQEv/rVr1oz/AXBkSNHePTRR1m/fj1Go5FbbrmFv//97+3mFdde1Ll6f/jhh0RERJzTD6G1rt6dgT59+jBx4kRWr17NsmXLKCgooKqqiilTptCtW7egtnWrvIWFhVRXV7d4FbmtOHDgALt37yY2NpYFCxac8VBVq0uqdGbqKmk2lUPG5/NRXl5OVVVV0IqyXq9HFEWWLFnSYpGwtiKb6rIDAIga/ckw4+bf+xRZxllQgDk5ObBPEAQiLxvWonm0NTsK9rDowKrA9jU9xjGmy2UAFFc4ePPrXVTb/GE7kaEmfj1jQFDSZ7NVz7Uz+mMy6dHp2zYHoKO2mh0blnPwwF5q7XZEyYsiiMh6E7LejEZo2DMJUVS9CTuAOk+ppvB4PJw4cYLVq1czd+5cqqqq/PkvZ81q5xmqtCWqLajagSoq5xOz2UxlZSWCIJCRkUF5eTkOh6PZInldBeSEhAReeeWVFt2zZK+XnPc+xF1eAYApMZ4ud9+B0JAN0gCleZtwO/zFOAymSGK7jGn22BcaPlni010LyCr130v0Gh0397vujHbl1U7eXrAH6WSKsrSEMB64sR9mo5Ztm46R+ZPfplAUhZ0/5+Fx+9DpDfi8Z6ataApBFPC4fa3yJlRkmWOH9rBny1pGTZ4R5CkYm5TG1NsfCRIPLyZaJRKGhYXx0UcfMW3aNCRJ4rvvvuO7777DbDbTrVs3LBYLdrud7OzsQD45RVHQarV8/PHHhIZe3JV9qqurGT9+PImJiXz55ZdUVFTw+OOPU1xczNdff93R02sxM2fO5PPPP8fhcJyTyNkaV+/OxGOPPcbq1av597//Hciv+cgjj5zRLj4+noEDB7Jr1y7ee+89fve7353vqQJQUeF/uDW26vbf//73fE9JRaVZNKeSpiRJVFdXU1RUFCQOajSaQML8yspKlixZwp/+9Kdmv5RKXieFOafCjOO6XInO0PwXWtnnI+/z+VTt2k3XB+8nJONM7+qOIjk0gRCDlVq3jfFdL2dc+igA8oprmbdgN3anPw9YXKSF2dP7U5BdQUhvHXrDqftHaFjbhhdLko9DO7ewc8v3VNgdeH0SeByAgM8UhqjVoWnKwFO9Cc87aWlpLTa8636nI0aM4Je//GV7TOuiYu7cucydO7fB3H4dgWoLqnagisr5YsCAAcyaNYv8/Hx+//vfk5OTw+zZsykoKCAiIqJR+7BuMaKyspKEhATmzZvXojRniiyT9/l87LnHANCFhZL+wP1omulxa686TkVhJgCCoCGx+zWIYvsVmehIfJKPT3YvZP9JgVAras+oYFxHVJiJq0eksnxzLn27RnPX1N6IAqxddpDsg6eKf/TsF0/OoVJ8Phmv59yffXqDltpqJ7KkoNG2zFYpLTjGzh9XUlHiz++auWE5E2fcH1TZ+mIVCKGVIiH4Q44XL17MPffcQ3FxMeBPhLxnz55Am/ovbnFxcXz44YdMmjSptUN3eubNm0dpaSnbtm0L5HwxmUz84he/YPv27QwdOrSDZ9gyBg8ezODBg9myZUsgL01zaY2rd2dj6tSpdO/ePVCVsVu3bkyZMqXBts899xw33XQTf/zjH0lNTeUXv/hF0PGsrKx2D7nKyMhAo9GwZ88evv/+e8aNGxc4tnjxYv73f/+33cZWUWkN9StpNsaRI0dwu92BXKiiKBIZGUlUVFTgHmU0GnE6nZSVlTVbJCw78TOS1x9uaw1PJyym+b9Rye0m94OPqD3oN5hyP/yY3n/8PVpz2+ftOxdirdHMvux2dhcd4KqTAmFOQTXzFuwJhH8kxYTw0I19ydx8nEP7ijl2tJzJ0/qi0bS98KbIMqvmv0t+QT62k6vFsiAiCCI+UyganY6g0OLGUL0JO4SmckGdjlar5a677uKf//xnhyblvlB4+OGHefjhh6mpqekwT7T6qLagageqqLQHFRUVzJ8/n4ceeijovvLYY48himIgj+lnn33GnDlz2LlzJ5WVlZjNZgwGAx6PB4fDgdvtDixGjBw5kldeeaXFdRCKlq+kMnMXABqDnvT770Ef3rz7r8/rpODoisB2bOpojJaYFo1/oeCTfHy86xsOlh0FQCdquXvwzXSPSmu0z6QRXYgONzEoIxaPy8uyRVmUFNYCIAgw4oqu9BucyNBRXXA5Wl+8ymTWoWlGwZM6aqvK2b15DflH9wftN5jMeL1u9IbOYcu3N20iaU+ePJkjR47w3nvvsWDBAn7++WccjlOlnS0WC5dddhm/+MUvuOeeey64UNtzZenSpYwfPz4oKfQNN9yA1WplyZIlF5xIKAgCL7/8MrNmzQq4bjfHOGyNq3dnRBRFHnnkEX77298C8Otf/7rRa5o+fTp//etfeeaZZ5gxYwa9evVi4MCBOJ1Ojhw5QlZWFu+//367GofR0dE88sgjvPbaa0yYMIGxY8eSmJjIwYMH2bFjB8888wx/+ctf2m18FZVzpa6SpqIoVFVV4XQ6SUhICGojSRKSJKEoCrGxsURFRZ3hKVGXaL/+c6kpYlJGIcsSteWHSOg6odn3LZ/NRvY77+HIy/ePrdWSetvMTiMQ1hFtjmR818sBOHisgv8s2uf33gPSE8O459o+bFp1mLzcSgAK8qoozK8muUvbV4kTRJHq0ERsx44BAp74HihVRRhcNQiaZgqEdajehOeVu+++u1ntDAYDERER9O3bl6uvvjpQLVflwkO1BVU7UEWlLVEUhVWrVvHKK69QXV1NaGgot912W+D46SlGevXqxYIFC8jMzGT+/PksWrQIh8OBy+VCFEXMZjMzZ87k5ptvZvDgwS2+18heLzX7/QKRIAqk3nFbUNqYpq6lMHs1Pq/f3rSEdSEifmCLxr9Q8EpePt75DYfK/RV+dRod9wy+mW6Rp/Iuen0yx4tq6JYcHtgnCAJDe8VRUWZnxbf7sNX409todRrGT+lJl27+IifWEAPWkPOXK9XjcrJv63qO7PkZ+WQ4NEB4VCwDR08iPrXbWXpffLSZ36vFYuE3v/kNv/nNbwB/haza2lpCQkI6VVjxwYMHWblyJdu3b2f79u3s378fSZJ48cUXm1V45csvv2Tu3Lns2rULj8dD9+7duf322/nd7353xop4VlYWd911V9A+rVZLRkYG+/cHq9MXCr169WLevHnnxdW7MzN58mTAnyvjvvvuO2vbP/zhD4wfP57XX3+d9evX88033xASEkJKSgpPPfUU48ePb/f5/u///i8DBgzgjTfeYPv27ezcuZP+/fvz+eefM3PmTNU4VOlUeDwedu7cyZIlSygrKwtUTQeIiooKqogaFRVFVVUVkZGRjVbprOt/tiqap+PPQTiemJSRaHXN6+curyD77Xdxl/pz0GhMRro+cB+W9LRmj9seHCnPZWfRPm7qPeWMl3mXx8fHy/YHBMKeXSKZNSGDtYv3U1rkX9kVNQJXXdOrzQRCWZZRZBlNPTF30rir+aC4CHdyX7oYRYrXZYLW4PcObAmqN+F55f333+/oKah0AKotqNqBKiptQXl5OX/729/4/vvvA/s+/fRTZsyYEWTrnY4gCAwZMoQhQ4bwzDPPUFhYyIoVK5g8eTIJCQmtyncq6nR0f+TXHPvov4T06kVY34ZDZxuiujQLW6VfNNNojSR2uxpBuPgWKz2Sl492fsWR8lzAn4PwnsG30DXyVNV1p9vHfxbtJbeghoem9ycj9ZQNWZhfzYpv9wVCiS1WPZOn9SUq5vznqVVkmcN7trLv5+/xuE8VFDSaLPQfOZ703oOCQowvFdotOD40NLRTiYN1vPnmm7z22mvn1Pe3v/0tr732GlqtlvHjx2O1Wlm7di1PP/00ixcvZuXKlUFhcZWVlYHS7PWJiIgI5Ae5EBk+fHiDrt6SJCHLMpIkUVFR0WpX7/YmNzf3nPvW5W+54447Gvw3Pp2RI0cycuTIJtuNGzfurKFb99xzz1kr8P35z3/mz3/+8xn7BUHgvvvua9SQbWm4mIpKW6IoCnl5eWzevJnNmzezbds2XC5X4J5SXyS02+1BhmNcXBxGo/GsYXgulwuTydRk8ZOGaK5A6CwoIPvt/+Ct8Qtr+vBQuj70AMb4+BaP2ZbkVubz4c6v8EpeXD43t/a/Ea14alXeqNdyz3V9mffNbnqnRzHt8nRWLNhLdaU/1Fpv0DDphr4kJLdNmGNpwXF2/PAdUSndGTbm6sD+CIuJqTfOItyg5euP30DweZENevCdQ6iJICL6vKo3oYpKO9KYLWg0GgPe2y6Xq1PbgqodeArVDlQ5nyiKwrJly3j11VepqakJ7J84cSJPPfXUWQXC07FaraSlpZGSkkJaWlqbpLHQGAyk339vi8Uha0Q6lvAu2KuOkdD1arT6iy960uPz8OHOrzha4c/XaNDquXfwTNIiTnlbVtW6eXvhHgrLbAD8d9l+nr1/BDqt3/4MCTOi1WrweiSi46xMuqEPFmsHVVgXBPKPZgUEQo1WS89Bo+g1ZDQ6/aVb9b1VIuELL7wAwK233kpGRkaz+x09epRPPvkEgD/96U+tmUKL6devH0888QSDBw9myJAhvPTSS3z88cdN9lu4cCGvvfYaVquVH374gSFDhgBQVlbG+PHj+fHHH3n22Wd59dVX2/sSOgUNuXrXvcz7y44bW+Xq3ZkpLCxk7ty5iKIYCDVRUVE5dyRJ4s477wwUuKpDo9EQHh5OVVUVUVFRhISEnDU/YUO0pJKmvfo4OkMYemPLBDHb0Wxy3vsAyek3MIyxMXSd/QD6iLYPzW0J+dWFvJ/5BV7JL7TJstxg4G735HAeu3UwOhmWfrUb58kcMGarninT+xEZ3Xoj1+WwsWvTarL376TG7eVQXj5deg4gJuZU2GnPqBDcHg9eezWKVocgec55PEWrw2uvxuvzYWjBy4aKikrzacgWrEsTIYoiJpPporQFVTtQReXcKS0t5aWXXmLDhg2BfREREcyZM4cJEyZ0yJx8NjuCThtUmORcvMe0OjMpPW/EUZOPJSyl6Q4XIJIi4/b57TODVs99Q2bSJfyUQFhUbmfegj1U1fptYrNRx/039AsIhOAPJZ58Yx/2ZhYwZmJ3dLqOKwAiCAKDRk9i9Vf/oUtGf/qPHI85pOPz/3Y0rRIJ//znP/s/2EGDWiQSHjlyJND3fIuEDzzwQNB2cz0MXnrpJQDmzJkTEAjBn+fjjTfeYOzYsfz73//m2WefDXi0REREUFVVdca5Kisr6dGj81S6PFdOd/W+7rrrKC0tJSYmhiVLlrTK1bszMmfOHE6cOMHq1aupqqril7/8ZbvmkFFRuZhQFIXDhw+zadMm7HY7Dz/8cOCYVqtl+PDhrFu3DvCHEF9++eWMGjUKg8HAXXfdhdlsbrFACM2vpOnz2DlxaCmKIhPbZSzhsf2a9UIrOZ1BAqGlSwrpD9yHtoNz7xbWlvCfHZ8HDLkeUencNmAaGlHDoeOV9EgJD74+p49lS/YHQj/CI81Mmd4Xa6ixVfNQZJmj+7azZ/Naqu12ajxeZAVkaxQb80qZFhOcm86g13PrvY9RVVvbqnEBwkNCVIFQRaWdOd0WLCsrw+FwYDabiY6OvqhsQdUOVFFpHd999x2vvvoqtfWe8ZMnT+bJJ59slkdueyB7veS89wGy10P6/fc1u0BJYwiCcNEKhAAmnZH7hs7kk10LmNz9SlLDkwLHcgqqeffbvThc/sXmyFATs2/qT7hZj9cjodOfEgNj4kO4asr59Sx32GrYs2UtXTL6B+UYjIxLYuqdv8Ea2rGL+52Ji7MWdxtz4sQJtm7dChCUSLWOMWPGkJKSQl5eHkuXLmXWrFkA9O7d+4zcg5IkcejQIW644Yb2n/h5xGq18uCDD2K327FYLBeVUVjH559/zvHjx4mPj+e3v/0tL7/8ckdPSUWlU1NVVcVPP/3Epk2b2Lx5cyDNgtFo5MEHHwwKJ7nxxhvp378/o0aNonv37gEBS1GUdq+kqSgKhTlrkCR/8mRHTT4Rcc3LZacxmUi9bSa5739ESEYGXe6+I2gluiMosZfz7vbPcHr9wmV6RCp3DvoFGlHDdz9ms3rrca4e0YWpl6cH+hzKKg4IhHGJoUy+sQ8GY+tCdsqL8tn+w3eUFRdS7fbilmQUjR5P2mA0Kb3pmtxwKHZcVBRxUVGtGltFReX8Y7VaL0r7rw7VDlRRaR379u0LCISRkZH84Q9/CKq2fb5RZJnjn32BPdcfOnvsw4/o/ugjLfJ6dtlL0OlD0Og6V4G69sSsM/HA0FlBn9OeI2V8tDQLn+Qv+pEUE8JD0/sjSjKL5+/GGmpg0vV9EMS28Sgvzstm2/dLGDbuOuJSup61rdfj5mDmJg5kbkLy+agsKWDyrb8M8hZVBcJgOkQkrKsYc6HkCcrMzAT8N7P09PQG2wwbNoy8vDwyMzMDIuHUqVN5/vnnA951AIsXL8Zms3Httdc2Op7b7cbtdge263I1eL1evN6z52dq6nh7cvvtt3fY2OeD1uSuUVHprNSlCfB6va0OBZMkib179/LTTz+xefNmDhw40GCeI6fTyc6dO4OEuxEjRjBixAgAfD5fUPsXX3yRO++8k8LCQuLj44OeHXXnP30cWZYpKioiPj6ev/zlL2ecsz41ZQeorahLNG0iKml0i+6l5owMujxwH+a0LsiiiNyB9+FyRyXvZn6Oze0P3U4JTeT2ftNQfApfbjjIpt2FAKzckktGShhd4kMAuHxcOrYaF3qDlisn90DUnPvzxO1ysO+ndeTs34nD66PW468+7Y3rjrvLEHomxHB1WjQWnbZDn1mdjbrPor0+E/WzVlFpHaodqKLSOh555BF+/PFHBg0axBNPPNHh9QuKlq2gauduADQGPckzftEiW1jyucg7uBgUhcTuk7CEpTbd6QLD5XWx9PA6rukxDnM9IbT+57RxVwFfrzscsMUzUiO49/q+1FQ4WfntPpwOLxVldrZtPsZlo9NaPSdFUdi1aRUnsg+g0xu4+paHGi6cJcvkHtjFni1rcTpsgf1Oey01lWWERTVc8FClg0TCEydOABASEtIRw7eYnJwcAFJTG//hp6SkBLUFmD17Nv/617+48cYbefbZZ6msrOTxxx/nxhtvZNiwYY2e629/+xvPP//8GftXrlzZZHVOnU5HXFzcWduoqKio1FFUVERFRQXLly9Hq23dI6G8vDyQmuF09Ho9PXr0ICMjg549e1JYWEhhYWGzz3377bfz5ptvkpubi8ViwWAwBBkEdYspiqLgdrux2+1ERERw++23c/ToUY4ePdrgeUU8ROkPIAj+xasqbyQHCtY1PhFFQVtcgi++gfvswQPNvp72wKm4+cm7F6fiX2QKFSzEO0NYtWI1mccgr169rAEpsG/HBvbV6y/rwAWsXNnwZ9Vc7KV5VBcdxY0GCQGvKYyapEFgjaC7vQT94Tx+ONyqIS5qVq1a1S7ndTgc7XJeFRUVFRWV0ykqKuLQoUNcccUVgX1ms5n//ve/HS4OApT/tJXiNX57TxAFutx5O6akxGb3VxSFwuzV+Dx+8an8xDbMoSkXTe5VAKfXxX+2f05+TSEFNcXcP/RWTLrgNDSVtS4W/nAkIBAO7R3HrVf35PjRcr5ffgjppGdhaLiRjD5to1EUHT9KQe5hdHojBbmHKTp+lIQu3YPaFOdls3PjSqrKigP7REGk+4Dh9L3sCvTGS8fz81w47yJhfn4+b731FsAFk5evzi3acpYcU3XhFfUrNIWHh7N27VoeffRRZsyYEciL1VRxk9///vc8/vjjge2amhpSUlKYNGlSkzdVh8PBkSNHmrwmFRUVFYD4+HhCQkK45pprmlVNzuPxsHPnTjZv3kx8fDwzZ84MOr5o0SLy8vIA6N69e6Ci44ABA1pUre50pk6dyjXXXMMzzzzDzp07sdvtmEwmDAZDIP+V2+3G6XRiMBi44oor+Otf/3rWSpqKolBweDGOGv+CVUhkBsO6Xt1oe9nr48QX86k+cJiEjAyixow+5+tpDz7d8y36UiN6jMRaorh/8K3oRSOfrjxIjVROWJh/5ffmq7rhK3XQd1ACoeFtbyS5vV7+8+7rKC473tRBeBN7MSo+nHEpURi1HZecurPj9XpZtWoVV199dZtUZzyd+vaJioqKiopKe6AoCgsWLOD//u//UBSFL774gsTEU+JbZxAIaw8dJv/LrwPbSdNuILRPy/KLVpdmUVvhX1TVaAwkdJt0UQmEDq+T/2z/nBM1RQBUuqqocdeeIRJGhBi569o+vL94H1cNTWHq5Wns3pbPtk3HAm0SksOYeF1vjKbW2zaKorBnyxoknxdLaAT2mkr2bFlDfGo3BEGgprKMXRtXUZB7KKhfUteeDLz8akLC1XQ2zaHZIuFrr73Ga6+91uCxhx56qMnqXoqiYLfbKS8vB/wvKlOnTm3+TC9QMjIyWL58eYv6GAwGDA3ktNLpdE2+OLTHi4WKisrFiyiKiKLY6P1FURSOHz8eyCu4ffv2QDqEHj16cMcddwS1f+CBB5BlmVGjRgXSLLQV/fv3Z+HChUGVNB0OBy6XC1EUMZvN3Hrrrc2upFlZvAdnbT6CIKDVWUjsNh6NtuF7qORycfzDj6g9fBRBECheupzIgQPRR3aeHCY397+W97bbcPncPHTZ7ehFE+8v3sfBYxUIgoBGI3Lb1RkU7iuhIK+awrwabrh1ICbzuYu3HreTkvxckrudMq51Oh1DJ07nh0I74eFhXNc9gS5hZ/eCVzlFc57153peFRUVFRWV9qKgoIAXX3wxkMsf4I033uAvf/lLB84qGFdREbkffoxyMv1ZzBVjiG7hoq/HWUlx7g+B7fiuE9EZLp58rA6Pk3e2f0phbQkAVr2FB4bNIs7asF3fv1s0T94xjNhwExtWHebw/pLAsR59Yhk7sQcaTdukmavzIjQYzQiCgMFoDvImPF0gjIiJZ9CYycQmpbXJ+JcKzRYJq6qqyM3NRRCEoNxPiqJQUlJylp4N07t37yBvuc5MXVi03W5vtI3N5nc17gyrI3Bmfi4VFRWV+pztHuFwOAJ5BTdv3txoWPDRo0epqqoKqkh33XXXtfVUgzi9kmZhYSErVqxg8uTJJCQkNDtpvtddQ8mxDYHthK4T0GgbruTrrakl+53/4DxRAICo15F2952dSiCEk4mkh83C7XOjxci8b3aTU1ANgE6r4faJGRzZdoKKMv+zzG73UFnuOCeRUFEUjh3cza6Nq3A5HVx1y4PExiYEjl/eOwNDRBWDYsPQtZFhqKKi0jJUW1BFRaUx2vL+IMsyX331Ff/6179wOp2B/dOmTWvSkeh84q2tJfvd95Gc/sJuYX17k3hDy+xWRZY4cWQ5suzP8xse04fQqO5N9LpwsHnsvLv9c4rqCYQP1hMIax0edh8uY/TA4NDsCIuepd/soejEqaiFy0anMfCy5DbzsKzvRWg0++19rd6A2+UIeBMOuHwChccOYzJb6T9qAl0y+gcVKFFpHs0WCcPDw+nSpUvQvmPHjiEIAtHR0U3myhNFEavVSnp6OhMnTuS+++5rsk9nIS0tDSAQQtcQdcfq2nYUGo0/lOtsSfpVVFRU6u4RDd0rtm/fzpNPPtlgv5iYGEaNGsXll1/O8OHDO3RhxGq1kpaWRkpKCmlpac32lFIUhcKjqwMGXlhMH6wRDRelcpeVkz3vHdzl/oR+WrOJ9Afuw5LWpcH25xO3z4OCglF7yvPcqDVg1Br4YMm+gEBo0Gu5fUIP9m48hq3G7wVqMGq5ZlpfYhNa/u9XXV7C9h++o7TgOB5JotrtY/mKxdx150OBNqIgcFlC5xJRVVQuFVRbUEVFpSkkSQJOFRQ9V/Lz83nhhRfYsWNHYF9cXBzPPPMMo0aNatW525qyHzfhqagEwJSUSOodt7VYQCrN/wmX3S+g6Y3hxKVd2ebz7ChsbjvvbP+MYlspACEGKw8Ou41Yiz9Et6zKybwFuymrciLLCmMHJwX67thyPCAQajQi467pSdeM6DadX7AXIXhcTgTxTG/CsdfOIiYpDa0aQXHONFskfOyxx3jssceC9tVVmHznnXe44YYb2nZmnYi6Cpzl5eXk5OQ0WOF427ZtAAwZMqTNxp07dy5z584N3MSbg16vR6/XU11dTVhYWJvNRUVF5eKiqqoKl8vFjz/+GJQrBmDo0KFotVp8Ph86nY7BgwcHhMGuXbte8DlXBEEgMnEIbpffUIzrckWD7Rz5J8h55z94a/2e4vrwMLrOfgBjJygO5ZG8fJD5JV7Jy31DZwZVnAO4fmxXcgtr8Ekyt4zpSua6bNwuv2AQEmpkyk39CItoWT5Cr8fNvp+/59Cun5BlmRqPD4dXwheVSnXCEI5VO9SwYhWVToBqC6qoqDSFzWZDlmW8Xu859Zdlmfnz5/Pvf/8bl8sV2H/TTTfx2GOPnTWXf0cRP/lqFJ+PqsxMuj5wL5oG0nudDUdNPuUFdaHUAondr0HUnHvKls6EzW3n7e2fUmIrAyDMGMKDQ28j2hIJQF5xLW8v3IPN4QFg7fY8Lusbh1Hvl5OGj0mjuLAGe62bSTf2JTa+bQvU1vci1On01FaW4/N6EDUaQiNigrwJE9IujLoXnZlWFS5JTU1FEIQLxiPwXElOTuayyy5j69atfPrpp/zxj38MOv7jjz+Sl5eHwWBo0zyLDz/8MA8//DA1NTXNNvIEQSAiIoLS0lKioqI65Q1aRUWlY7Hb7ZSVlbFlyxaOHz+OoigBzxPwV5/77W9/S3JyMkOHDsVkuvgqgFnD0+g64Ha87lo02jONxNrDR8h97wMkt98YMsbH0vWhB9GHd/wLt0/y8dHOr8ipPA7AJ7sW8MDQWUHibVSYiV/9YiAFxyrZti4byef3FIiKsXDNtH6Yrc03ahVFIe/wPnZuXInTXovLJ1Hj9uIzhODqMRwpMpnkUBNWvVqU5FKja9eugD/NwOuvv97Bs1GpQ7UFVVRUzobP56OsrCyQSktRFLTalskCTqeTjz76KCAQJiQk8OyzzzJ8+PA2n29bIYgiiddfS+yEq9C2UL+QJQ8FR1YGtmNSRmGydvyicVux/thPwQLhsNuINvsFwgO5Fby/ZB8er99xKS7Kwuzp/QMCIYBWp2HSDX1QZAVraMPpe1pD0fGjnMg+iCxL1FaVB/bLkoTX4z7Dm1CldbRKJMzNzW2jaXR+/vCHPzB9+nRefvllpkyZEvAYLC8v59e//jUAjzzySKdYsU1ISMBms3Ho0CEiIyMJDw9Hq9Ve8N4/Kioq546iKPh8PqqqqigrK+P48eOsWrUKQRCoqKgIeIbXceutt3bQTM8fGq2xwTyEiiSR/+VXAYHQkt6F9PvvbbFB2RqOlOeyYP9ypve+hu5RaYH9kizxye6FHCnPBcCgNTClx1WUVbkIDzGg0576d6w8UcP2H7KpSzuUlBrOxOt6ozc0/9HvdjrYvOIrivNzkBWFGrcXpwSelIF4kvuj1+m4Oi2GIfHhiOoz5pKjbpGhb9++HT0VldOoswUPHjxIVFSUaguqqKigKAoul4uioiLcbjelpf6wUq/XS1RUy6q+WiwWnnnmGR577DFmzJjBb37zm07pOCR7vYinhZ2eiz0niDqik0dQnPsDRkssUYlD22qKnYJruo+jwllNfnUBDw27nUhzOABbs4r4fNVBZNlvTKYnhnH/9X05sKuQjD5xhISdsqMt1pZ5ZjYXn9fDj999htNWgyCKgeeYRqPFZA1FZzCgKAR5E6rPutbRKpHwQmTHjh0BUQ/8ifcB5s2bx5IlSwL7FyxYQELCqSTs06ZN49FHH+X1119n5MiRTJgwAYvFwpo1a6iqqmL06NG8+OKL5+9CzoJGo6FHjx7k5eVRWFhIWVlZR09JRUWlk1BdXU1mZibbtm2jR48emM1mbr755kviYep2lKM3RTZ5rYJGQ/p993D4X29g7ZpOlztvR9Sfv3ASRVFYemgd+0oOYdAY+M3IexAEAVmR+WLvYvaXHgZAr9Fx35BbUFxW/u+bHaQnhnHvdX0CFeScDk9AIOzWM4YrJ2e0uLqczmDE5bDj9ErUeLx4w5NwdRuBYgyhW4SFqd3jCTOoOV8uVWJjYykuLiYyMrKjp6JyGnW24OHDhykoKFBtQRUVlQAOh4PS0lK8Xi+1tbXo9fozag+czrFjxzCZTMTGxgb2XX755Xz11VdN9m1vbIePELJ6HbYeGUT06R3YX3vwEHlfzKfL3Xdh6ZLaqjEEQSA8ti/m0CQEQYMgXFzFMERRZFb/G7B7HIQaQ1AUhTVb8/huY3agzYDuMcycmMGPqw5z7Gg5OYfLuGHmwBYtPrcERVHIO5LFz2sWUlrgr4Xh/xMxWUMwGM1w0qYXBFRvwjbkkhMJa2pq+Omnn87Yn5+fT35+fmDb7Xaf0ea1115j9OjRzJ07l02bNuH1eunWrRtz5szhd7/7Hfrz+BLZFBqNhrS0NBwOB+vXr8fn82GxWFrsSq5y8XLixAmSkpKabqjS7lRVVbF+/Xq0Wm2Lf6OSJOHxePD5fIwYMYLk5OTAMZvNxqZNmwDQarWEhIRgtVqJi4vjsssuw+Vy0atXr0vCC8jjrCRnz2eYw5JJSJ+IznD2KsjG+Hh6PPoIhugoBM35DaM9XJ7DgbIjGLUGDpQd4XB5Dt2j0vhq31J2F+0HQCtquWvQDHw2K+98uwu3x8e+7DJW/Xyca0alATBoeAp2mwetTmTEmHQEseVCsCiKRA8ex/7l3+DqNQYpMgWTXsuk9Fj6xYReEuKySuP07NmT4uLiIPtJJZhvvvmGf/7znxw4cIDa2lqSkpKYNm0azz77LBER7VvcR6PRkJGRwaZNm9i0aRMajQaLxXKG57jKpYtqC156eL1efD4fkiRhs9lQFIXhw4c3+j2QZZn//ve/vPXWWwwbNozXXnst6Nnf0QKhoigUL12OrrCI4qXLCe/dC0EQcBUVkfvhx0guN0fnvkX3Rx/GnNz677reGN76SXcCqpzV+GQpkHMQQCNqCDWGIMsKC74/wo+7TgSOjRmYxKRhKaz4Zi9lJf5c3VUVDgpPVNOla8u8UJvL3p/WsW/remory1AUBUEUMZgsmCwhDRacOb3SsWqjnjuXnGI0bty4VpV8v+WWW7jlllvacEaNcy6FS06nT58+mEwmDhw4QG5u7jknp1W5uJBlmdLSUoxGo/qy0AnYunUr+fn5zQ7TqDPwvF5voCqdLMv4fD4mTZoU1DYiIoLo6GjCwsICD0tRFImKiqJnz57069fvosw5WB9FkSk4ugpFkbBXHaOiKJO4LmNPHZdlyjdvIWrkiCBB0BgX29Dp2nmuCssP/4BX8hJpCqfCWcXyw9+TXJzAjoI9AGgEkTsH3YS3JpT3l+zB6/M/I7omhTFuyCmRWBAERl/VrdnioK26gswNK+g3fBwRsac86Yf06sU+zyzyaj30jQlhUtc4LLpLznxQaYAZM2bwww8/8PXXX/O73/2uo6fTKamoqGDcuHE8+eSThIWFsWfPHp5//nl27drFmjVr2n18URS5/PLLCQ8P59ChQxQWFrbKrlS5eFBtwUsbjUZDeno6PXr0oF+/fkG5qevIzs7m+eefZ9++fQBs2rSJlStXMnny5PM93UapPXiI2v0HUHQ6avcfoPbgIUyJiWS/+x6Sy+/0E9KzB6bEhCbOdCaKIuOoOYElLKWtp92hVDireGfbp0iyxEPDbg8SCgHsLi97jp7yPr92dDoDu0Sy+Itd2G3+VDw6vYYJ1/YmJa39FrvSew9m9+a1eD0udHoj1rAINNrGo1cE4cxKxyrnhmrld2LOpXBJQ6Snp5Oeno7H48Hj8bRKJFW5OPB6vaxevZqJEyeiU8vDdyh2u523334bt9tNeHh4g23cbjd2ux2Hw4HD4WiwjSRJFBQU8O677zaZpF6j0VxSLwUVhTtw2goB0BvDiEkeGTgme70c+++nVO/ZhyMvn5SZHRt6XedFaNGbEQQBi95MZuE+siuPY9QaEASBWQOm4aoM5eOle5FOisQZyeEkyQLlxbUkpZ4y2JojEEo+L/t3bOTA9h+RJAm7vZbJNz8QWKUVBYHrMpIod3rJiDy7B6bKpcWDDz7IW2+9xebNm3n11Vd54oknOnpKnY4HHnggaHvcuHEYjUYeeughjh8/Tmpq60LgmoMoivTr14++ffvidrvVBWMVQLUFL3W0Wi1Go7FBm0eSJD766CPefvvtwP1CEARuu+02rrzyyvM91UZRFIXCpcuQvV5koxHZ66VwyXcgavBUVAFgTk4i9Y7bGvQ8a4qyEz9Tlv8T4bH9iOtyBaLmwv+dVDiqeHvbJ1S5agBYsH85Dw67LahNiFnP7OkDeOOrXVx/RVfiDDqWzN+N72QRvJBQI5On9SEiqu2KYkk+H7bqCsKiTi3QW0LDMZjM6PRGQiKimhXirXoTtg2qSHgJodfrO1VItErH4fV6MRgMhISEqIZhB1NXXc5kMjUq4B8/fhyn0wkQFI5cV13earUiiiKSJOFyuYiPjz8vc78QcDvKKc3bEthO6DYpYOT5HE5y3/8A29EcACq37yB6zOg2CUc5F+p7EeowUlhlJzxEj0/2UeOuxagxMLP/DTjKw/h8ZVbg+9IvNQJrrZfiSierFu/nupsHEB3bPDGvMPcwO9YvxVZTBYqCzStRWFBMWkERvZITA+2iTAaiTO2TkFrlwkWv17NkyRJuuukmnn76aTZv3sxvfvMbLr/8ctXeOAt1ORzPt1gnCAJGoxGjse0rT6pceKi2oEpDHD16lOeff56srKzAvi5duvDcc88xYMCADpzZmdQePERt1n40Fgu43WgsZiq37UAXGYnGaEQfHkb6/fegMbTcfnHUFlCW/zMAVSX7CI/td8FXMy5zVPDOtk+pdtUCEGOJYmb/GxpsmxBt4Q/3XsbhvcWsWnk4kOM6LiGUq2/ojcncNs94RVHIP7qfXZtWoUgSU+74DdqT96Oi40exVVf4w4ubmQNS9SZsG1SRUEVFRaUDcTqdyLKMIAjYbDZsNhtxcXFBK1914cVAQBS0WCxYLJZAO5fLRW1tbaOehpci/jDjlSiKP7QuMmEI5hC/8OWtqSH77XdxFhQBoDHoSbvnrg4TCCHYi7CqxoPT7UMQBSJDw6ly1TAqdQi1RWF88/2BQJ9BXSLRlDmpsfvDP7RaDc1ZNLXXVJH543JOZB8EwCvJVHt82ON740kdyNpiJ90SZHQtLHSicmnRtWtXwO/trCgKCxcuZOHChWg0GqKioppMZSAIQqCA3Lly8OBBVq5cyfbt29m+fTv79+9HkiRefPFFnnnmmSb7f/nll8ydO5ddu3bh8Xjo3r07t99+O7/73e/aVDiRJAmv18vevXt5/vnnmTp1Kt26dWuz86uoqKi0Bp/PxwcffMC7774bsDlFUeSOO+5g9uzZGM5BaGtPTnkR+tBaQ8DtRna5kTweqK5GFxpC+gP3ojuHaDzJ56HgyArAr4zFJI+48AVCewXvbD8lEMZao3lw6CxCDFYKy+z8kJnPzeN7BArcKYrCtg25HNhTFDhHt54xXDkpA422bWzDytJCMjesoLTgWGDfgcyN9BvuTw+3Z8saJJ8Xo7llUSyqN2HrUUVCFRUVlQ7C6XSSmZlJdXU1paWlgYeYxWIhJCQk0E6j0aDVajEYDI2+VMqyjCiKzc5reClQXrANl70EAL0xgpiUUQC4S0s5Ou9dPBWVAGgtZro+eD/m1I7LORPsRWjA5XYjigIut48wyYRe1JF5/ChVWXoE/N+TYWnReAtqcHr8ImhYhIkp0/sREta4l5Ak+TiYuZmsbeuRfD4URaHW46PWHI2r90hkcziCAL2iQpolNqpc2uTm5gbuW3X/VRQFn89HcXFxk/3bwnB/8803ee21186p729/+1tee+01tFot48ePx2q1snbtWp5++mkWL17MypUr2yxna1RUFNXV1QBMmjSJ+fPnt8l5VVRUVNqCHTt28NZbbwW209PTee655+jXr18Hzqpx6nsRCoKA6HEjuT0IoojschEz7kpMiYlNn6gBinPX4XX7w3FNIYlEJV3WllM/75TYy3ln26fUuv0FR+KsMTw4dBZWg4Uj+VX8Z9FeXG4figK3Xp0RqCJc31twyMhUhoxMbZPnttNuY8+WNeTu30n9GKq45HRSuvUB/F6EBbmHMRjNLR5T9SZsPapIqKKionIeqampYcOGDaxbt45NmzbhdDpxu93IshwIJa6trQ0SCaOjo5EkqcGk0nW4XC5MJhPR0dHtfg0XAi57KWX5dZXsBRK7T0IUtTiO55H9zn/w2f0el/rIcLrNfhBDTEzHTZZTXoRmvYnymhoMjlBiSlIpjcmj2uAhPNRMsTufxIR0KgpNDE+LwpFXhSz5zavY+BAmT+uL0XR2z6efVy/k+GF/AnKPJFEta7Glj8IX0xUEgTirgeu7JxBvVcMRVZomNbVtXhhaQ79+/XjiiScYPHgwQ4YM4aWXXuLjjz9ust/ChQt57bXXsFqt/PDDDwwZMgTwp4AYP348P/74I88++yyvvvpqoM8HH3zAvffe2+S5v/zyS2bMmBG07/vvv8fhcLBnzx7+8pe/cP3117Nq1aqz3tdVVFRUzhfDhw9n6tSpLF++nLvvvpsHH3yw06aNqO9FqAsJRfH5EJwuEEUQBESdjpqsLOInX93iZ1R12UGqy/wRG6JGT2K3Sc0Ode2MFNtKeWfbZ9g8dgDiQ2J5cOgsLHozuw6V8vHy/UiSP9dgYZkNt1fCqPe/jwwdlYqt1kVSagQ9ere+mJ/k83Fo1xb2b9uA1+sJ7A8Jj2Tg6EkkpvkFyjovQp/HjU5vwFevbXMRRBGfx616E54jqkjYiWmL6sYqKiqdh5deeolvv/026Det0WgICwujoqKC0NBQQkNDsVqD3err8lc1hqIoOBwOZs6ceUbfSxFFlk6GGfuNnqjEoZis8dQePETuBx8huf3Ghikhnq4P3X9OoShtSZ0XoUfyYHc78cgekip6YLGFIQkSheYjKBYdPtmHNbmAfqYRlOdUBvLDpHaNZPzUXuh0TYsNGYNGcezQPmo8Pqqju+PuMhi0erSiwBWp0YxMikRUDSmVZpKbm9vRUzijMEhzCzK99NJLAMyZMycgEIJ/UeaNN95g7Nix/Pvf/+bZZ58NFI+bPn06I0eObPB89UlKOjNtwaBBgwC4/PLLGTRoECNHjmTBggVniIkqKioq54Pjx4+TkpISJJ488cQT3HrrrfTp06cDZ9Y0p3sRotUim0zg8aALsSIajNRm7af24CFCe/Vs9nm97hqKctYGtuPTr0Jv7FgbsTXYPPYggTAhJJYHh96GWW9iQ+YJvvnhCHXGZK+0SG6dkBEQCMHvkTducvM/v7NRXnyCLSu+8ufAPolOb6DvZVfSfcBlaDSnxpUlCVtVBVq9Aa/Hfc5javUGbNWVyJKERqvKXi1B/bQ6MW1V3VhFReX8U1xcTFxccP6S8PDwIIEwMjKScePGkZSUxJ/+9CfMZnOTlYkbwuFwYDQaufnmm1s97wsNR00ekbosHDX9CYvy50dTFAmjORq3owyDOYro5BEoikLx6jUBgdDaNZ20++5Ba26bUMLWcLg8h70lB3D5PPh8MiZ7OBZbJLIoYXGEYbRbqTa6CA81k7/Xjra2CKPWnxuoZ784xkzogdhAFWNZknA57ZitoYF9UXFJeLsNo0yMQLb6xefUMBPXdk8gytQ5PQZUVNqaEydOsHXrVgBuu+22M46PGTOGlJQU8vLyWLp0KbNmzQIgLCysTeyxIUOGIAgCR44cafW5VFRUVFqCx+Ph3Xff5YMPPuDPf/4zU6dODRwLDQ3t9ALh6V6Egf16PYbwMES9HkUBr91O4dJlhPTMaJYXmaLInDiyAlny24mh0T0Ji+7VbtdxPrDqLYxMGczqoz+SFBrP/UNvxagxsnhDNmu3HQ+0G94nniFJ4Sz4eAeTb+xLQnLb6w4mSwguh1+sFASBrn2G0G/kVRhNZ773aLRarrnt17idrc+zbjBbVIHwHFA/MRUVFZU2QFEUsrOzWbt2LevWrePQoUMsXLiQ5OTkQJtx48axfPlyrrrqKsaPH0///v0RRRFFUViwYAFbtmzBZDI12xMG/LkIKysrGTlyJIMHD26PS+u0KIpCWf5mjJpqyvI3ExqZ7s9Lo9GT2H0yIZHd0BlCEUX/oy7t3rs58q83MMTG0OWO2xA7QTVHWZb5aOfX1LptCIjIskJUSSqCLOLTeND6DERVJHDCchjFosNtqqWmrBaj1sDgEakMHdVwuGfJiVx2/LAUQRS5+paHgr5TU8ZN5J2duegFgYnpMQyKC1e9B1UuKTIzMwH/Qk16enqDbYYNG0ZeXh6ZmZkBkbCt2LhxI4qiBAq/NITb7cbtPuVBUVPjz4/l9XrPe1VklYuTuu+R+n26dMjKyuLFF18kJycHgL///e8MGTKEqKioDp5Z86k9eIiaffsRTf60KIqioNSFVmh1gSgL0WymZt9+KvdlEdIzo8nzVhTuwFFzwn8afQhRSWMuit/GFSkjMGtM9IvtiSCJ/HdlFjsOlgSOXzU0mThB5IeVhwBYuWgfN8wcgDW0dYVqFFlGqGd76gwmMgaNorTgGAMun0hYlN+RorHPWGc0ozO2TZ71C+nfsb3vy809b5uJhLIs8/XXX7NixQqysrKoqKjA6/WeUbVu7969Ac+4vn37ttXwKioqKucdWZbZt28f69atY926deTl5QUdX7duHXfeeWdgu3fv3nz77bdniDqCIPDyyy8za9YsCgsLSUhIaJZQKMtyoP0rr7xyyeXbsFcfw1Z2GMXlxVZ2GHv1MazhaYHjIZHBiYq1ZjPdH/4lGrM5yHDpKFxeF/O2fUJ25TEEQUSWwFQbibk2Ekn0oQggabyY7KGnvAljZMqkbEZmpDPs8i5nntNhY9fGVeQe3H1yj8K+nT/Rf8ioQJtos4FpGQkkhZgINXS8UKqicr6pe0FOTU1ttE1KSkpQ23Nl8uTJTJgwgb59+2IwGMjMzOTvf/87AwYMYNq0aY32+9vf/sbzzz9/xv6VK1eqBapU2pRVq1Z19BRU2hmv18vKlSv5/vvvkWV/KhZRFBk5ciQbN24M5MTu9CgK1vU/oq+tBUlCMRmR9afErLrFlLq2GqeDzPfex3bFGJqqxibiIVTnQy/aqfTEsD9/TXtdRbsiKTKaBnIorpJO8HMOlNT7iPonQWHmfg5VnvpswnQKP2xYc87F6xRZxlGej7OikMjuQxHrhRErigJiOBt/2n5uJ7+EaK/7ssPRPO/MNrkjbNy4kbvuuisoN42iKA2+sH799de88MILhIaGUlhYiNGoJkdXUVFpPTabjdLSUpxOJyaTiZiYmHbLz7djxw5Wr17NunXrKC0tbbBN3759iTmtGMbZRLxevXoxb948Zs+eTUFBAREREZjNDVf0qstBWFlZSUJCAvPmzaNnz7bJGXKhoCgKJcc34XM5webDp3VScuxHLGFd/EmPJYniVWuIHjsabb0Qbm0nydlYVFvCxzu/YX/ZYf/zEgHFqyOqJA1BFpG1/pU+WVDQyGKQN6E9sph9mkzGKwMD3w9Zljm6dyt7tqwL5G+RZBmbIZz11QK9JBmd5pTR2Ds69MxJqai0kq1bt56xWLxmTfCLVllZGR6PB6PR2GS+1faitrYW4KzpHeqeH0EvnefA8OHD+e9//xsQG9PS0vj1r3/N448/ftaiAL///e95/PHHA9s1NTWkpKQwadIkQkPV369K6/F6vaxatYqrr74aXSfwrFdpH/bs2cNf//pXcnNzA0XxMjIyeOaZZ8jIaNrDrjNRe/AQRxZ9hwT+IiVuDzqLFdFkoqamhtDQ0CC7WTIaMVbXMrh7j2Z5EyqKjMtWhCnk3KoidzT5NYV8smchM3pPpVtk8EKyxytxfOE+3EINWo3IjCu7UZhVQqnNRlgYIMDwMV3oMzDhnJwOFEWhMPcQe7asQXRWYjHp6RJjof/ICW10dZcG7X1fbq5N02qRcOXKlVx//fX4fD4URUGr1WK1Wqmqqmqw/UMPPcQLL7xATU0NS5cu5aabbmrtFFRUVC5RFEUhMzOTL774gsWLF+N0OpFlGVEUMZlMXH/99cycOZPBgwe3qZfdxx9/zIYNG4L2iaLIkCFDGD9+POPGjSM2tuVVwIYPH85nn33GnDlz2LlzJ5WVlZjNZoxGI6IoIssyLpcrkINw5MiRvPLKK5ecQAh+L8La0sMoTi8IAorbS1XxXoqPrScmYSTH//sp1fv2U3vwIF1/+RAaQ+vCJtoSt8/D29s+pcJZhcvnRiNqEH1mdDUhmO2hSBovOqoJlXNw+XrhEXWY63sThpo5UHaEw+U5ZER3pawoj+3ff0dVWfHJERRcaClJGoQ7NgMEgQ15ZYxPa31lOhWVhjhy5Aj33XcfGzduDOxrbLH4b3/7G//3f/9HTEwMJ06cuOir+7744ou8+OKLLe5nMBgwNHDf0ul0qqCj0qao36mLE7fbzVtvvcUnn3yCLMsIgoBWq+Whhx7irrvuunC8B0+iKAol3y3DV1MDgoggCAhaLRqTMeAlKAhC0HNHYzTidTgoXbmKiL59mvUuoI88M0rjQuB41Qk+3PUVLp+bT/Ys4KHLbicl7JTYqdPpeGj6AN79di/j+ieQteU4thq3/3uh0zB+Sk+6dDu3sPOqsmJ2/riC4nz/YpggCAj4CwlqtdpLLtKpLWiv+3Jzz9mqu0NVVRWzZs3C6/USEhLCP/7xD2677TZWrVrF9OnTG+yTkJDAyJEj2bJlC2vWrFFFwrOgVjdWUWmcAwcOMGfOHDIzM3G73ZjN5kA+vzox7cMPP+Tzzz9n8ODBvPzyy/Tq1fwExHa7nY0bN/Ljjz/y7LPPBt1Ur7rqKjZs2IBOp2PEiBGMHz+esWPHEhER0err6tWrFwsWLCAzM5P58+ezaNGiM8TPmTNncvPNN7e5+HmhcMqL0AFeBUQBdAKKLFF0dDUVa37Gs8/v4ek8UYAz/wTWbo3n/jrfGLR6pmaM542fP0RAIFQbQWmNm+TyBL8XocZDuHIcg1KBIGYj+YYia5Qgb0Kv5GV51hqqvHvI2b8zcG6fLGOLSqcsYSCK3l+UJdSgJTVMDU9UaR927NjB+PHjqa2tPZUX6iz86le/4n//938pLS1l5cqVTJky5TzMMpg6bxq73d5oG5vNBqB67amoqHQ45xItM3fuXD799NPAdp8+fXjuuefo1q1be0+3XajatZvK7TsAvxAoaDQYYqIRRE2jzx5BENBYLI1WOnbWFqI3RaLRdp6F5HPhWFU+7+34ArfPX3QlOSyRWEs0sqwEFbazmHRMH5HK2qUH8Xr8+oLFqmfytL5ExbQ80sbltLP3p3Vk79sR9G8Qk9iFwWMnExGT0MorU+koWiUSzp07l8rKSrRaLcuXL2fUqFFNdwIuv/xyNm/ezI4dO1oz/EWPWt1YRaVhfv75Z2bPnk1hYSERERFER0efIZaZzWYiIiJwOBxs2bKFWbNmMW/ePIYPH97oeSsrK1m/fj3r1q3jp59+CiR3nTJlStD9bdy4cRiNRkaPHn1O1YibQhAEhgwZwpAhQ3jmmWcoKyvD4XBgNpuJjo5utzDqC4UgL0JRRNADGgFFVpA8LlwnXIgIaAx60u67p1MJhHWEGqyIgkiINozyKjcmhxWLw+9FaMKGQalCUbTohHI0mnK8mtAzvAmzi44QXpiLQaMHRcFjDudE4hB8ISc9BgUYFh/OVWmxGDQdn4NR5eLD6XQybdo0ampq0Gq1PPXUU9x9993s2rWLW265pcE+3bt3Z9CgQezatYtVq1Z1iEiYlpYGcEYe2frUHatrq6KionI+aW20zL333suyZcuw2+3Mnj2bO+6444L13PY5HByd+xayz4eg0SBoRAwxMQiapqUM0WBosNKx113L8QPfotEaSOw+GfMFGmKcW5nH+5nzAwJht8gu3D1oBkfyalm2OZdfTh+AxeR3dHA5vUECYUyclUk39MVsbTz1RUNIko8ju7eyb+sPgRQ3AJbQcAaNvpqkrr0vSSeGi4lWiYRLly5FEAR+8YtfNFsgBAKhcdnZ2a0ZXkVF5RLkwIEDzJ49m6KiIhITE89a4EMQBCwWCyaTicLCQmbPns1nn30W5FFYXFwcKDySmZkZSOhcn40bNwbd40JDQ5k0aVLbXlgjWK3WS14UrM/pXoSCXgQD/rAGRUHxKZAiopUsdH3wAczJSR09ZQ6X53CsKp+J3cYC/nkuP/wDiqLgk0CSfURVxCPIIloELEIeiDJegxWdx4ZBcxSH0A+NbCCyMp4T1oNYfEZcOomqSAsxNSKVyQOoiOoOJ5NVR5n1XN89nuRQ1YNQpf145513yM/PRxAEvvjii0AUSVZW1ln7jR07lp07d7Jt27bzMc0zqKsEX15eTk5OToMVjuvmNmTIkPM6t9NRo0pUVC49ziVaJikpKeAlDRAREcFf//pXYmJiGq3ifiEgud0c+sf/4amoQBBFRI3GLxA2M1y6IW9CRZEpOLoCWXIjS24qi3ZdkCJhdsVxPsicj0fyOzV0j0zjrsEzyNxfxherD6EoCu9+u5df/WIAep0Go0nH2Ik9WLv0AGndo7jqmp5odS0Xjr0ed5BAqNPp6TV0DD0HjUJzgYWxqzRMq/4VDx3yl8qeMKFlCSnDw8MBqK6ubs3wKioqlxiKojBnzhwKCwubFAjrI4oiCQkJFBQUMGfOHBYsWIAgCDz11FOsXbu2wT6xsbGMGzeOq666qsNfElVOUd+LUNCKYBQAxS8QOmWQZIRIPUlXTe9wgVBWZL7P2cKqI+tRUIi1RDMgvjeSLFHurMCgNVDjdmHxWLHaIxBlEZ1QiU4sx6czIOtEfIoBvbcSvVCOTnRjtqdgdJqpNtgItRgoiwvBkXwlPp1fDBQFuDw5ijEpUWg7QQVnlYubumrtU6ZMaTTNTEP07t0b8Ocy7AiSk5O57LLL2Lp1K59++il//OMfg47/+OOP5OXlYTAYmDp1aofMsQ41qkRF5dKipdEymzdvZsKECSQnJ7Ns2bKgglBni565EJC9XnLe+4CaAwdRZBlRp0MXHo4iKygeT6CdoiggScgeT4MebIIo4nO5A96EFQU7cNScAECrtxKfftV5u6aWcKQ8lwX7lzO99zV0j0oLOna04hgfZH6J96RAmBHVlTsGTuf7bQUs25QTaBceYgj6TLr1jMFo0pGYHIYgnpu3n9Fkoe9lV7Jr40rSeg+i/8jxmCwhTXdUuWBolUhYVx2lpdXp6kL4LrSEqSoqKh1LZmYmmZmZRERENFsgrMPn8xEREcHOnTvJzMxkyJAhxMfHB7VJSUnhqquuYvz48fTp06fFY6i0L0FehJICVg2KoICi+LfdJ71AtQKVNXsJV/p3WLiDw+tk/p7FHCg7Gti3t/ggIXIi6Ylh/O7yB9mTU8h/Fu4lqUqH1iegoGDU5IAgI2n1oICk1aP1OomU9yErRkTZRHz+YIqrfMy6sR8D0hPYXerhh2NlJIQYua57PHEWY4dcs8qlx759+wC49tprW9Svzm5srMjd+eAPf/gD06dP5+WXX2bKlCmBxaDy8nJ+/etfA/DII4+owpyKisp5o6XRMoIgBAraVVZW8vTTT/POO++cxxm3L+7SUhzHjqOcDDPWWMwoPi+KzxvUTlFA9HqRXS4aM/s0RgOesnKc1QWU5G0K7E/qPhmNtvPZTYqisPTQOvaVHMKgMfCbkfcEbNoj5bl8mPklXtkHQM/obtw+YDrf/pDDpt0FgXOM6h1P79gQdNrg71FSaniz5+F2Odi/bQO9h47FYDoVndJ9wGXEJqepeQcvUlql0kVGRlJSUkJ5eXmL+uXm5gIQHR3dmuFVVFQuMb744gvcbnez7x0Oh4Pa2lpqa2vxer10796dyspK5s+fz5AhQ7jqqqvYtm1bQBjs2rWrmkOjExPwInR7EayauhhjkBUUu4QgCv4E1k4ftaWHsVcfwxqedt7nWVBTxMe7vqHS6feWFxC4sstoio9E8vraTO65ri8Dukfz045D6OwmdJJf3NSJFeiEcnw6f6U+Ufah9TkRkBEUCQU9Bk0eBm8qOruZn3bUMqZXBpcnQYhey4DYMET1+6tyHqmsrARocTX35hQ4aS47duwIiHoAR4/6hfl58+axZMmSwP4FCxaQkHDqZWbatGk8+uijvP7664wcOZIJEyZgsVhYs2YNVVVVjB49+pyqEquoqKicCy2JlpFlmZKSksA9WKfT4fP5AmlzLpZFblNiIt1//Uuy332PhGunYEpqOCTY5/Ox/of1DLryirM6IYlmIydyvwP8z6CoxGGYQ5PbY+qt5nB5DgfKjmDUGjhQdoTD5TlkRPtzbDt9LnyKPwVF75ge3NL3ej5eepC9R8sC/cf3T6T6aAVb9pei02no1T++wXEaQ5Ykjuzdxr6fv8fjdiFJPoZeeWpBUKPRqgLhRUyrRMLu3btTUlLC5s2befDBB5vdb/ny5QiCwMCBA1szvIqKyjlgs9koKCggLy+PnJwcEhMTL4icdzabjcWLF2M2mxsV8hRFweFwUFNTQ21t7Rk5nGw2G2azmUWLFvHMM88wePDgoMpvKp2XIC9CnUBgqVgGuVZCQECoq0Dt9eFzOSg5vglLWJfzKvxuO7GLhftX4ju5umvWmRiXNIEfNjioqPFXXJ5/Mk9MVnY5sSedHxUUDOJREGRkrRat14FGciOAXwxFQRYEjvWYiL5KJKrURVZ2OQePVdIrLZJBceHn7RpVVOoICwujvLw8EFnSXPLz8wGIiopq9Rxqamr46aefGhyjbhwAt9t9RpvXXnuN0aNHM3fuXDZt2oTX66Vbt27MmTOH3/3ud+j1LUvmrqKionKuNDdaxm63U1hYGIjMA7BYLISFhVFWVsbOnTsvqjQ55tQU+vzpj4hnEf+8Xi9SeBimpCR0dbZgAxRmr8HjqgLAaIklJnlkW0+3TajLXe2VvESawqlwVrH88A/0iEpHEAT6x/Xi1v43sKf4ANd3n8rb32SRW+hfmNaIIhP6xFOYVYp0chF6b+YJMvrEIjaziF3hscPs/HElNZWnRMdjB/fQf8R49EZT21+wSqejVSLhpEmT2LhxI1999RUvvfTSGaF7DbFmzRo2bNiAIAhMnjy5NcOrqKg0k9MrpDkcDux2O6+++ipms7nJCmmdgdLSUpxOJ0bjmSEBhYWF2Gw2XC5XYN/pK4kWiwWdTocoijidTsrKyi4IcVTFj9+L8BCKw4OACGZABsElgiAiak8lXhZEEcXpPa/ehD7Jx8IDK9l2YldgX3JoAmkM59tlpciyf9XaqBGZelkKq7ceB6cPgxdkEbRCJVqhHEmrReepRaxXQEcWNUiiCQEZrWLDkRBPRKUHnD6WbMymZ5eITvu7Vbm4SUtLo7y8nO3bt3Pvvfc2u9+aNWsA6NOnT6vnMG7cuFZ5Jt5yyy2NVmLuDKiFS1RULg2aipaRZZni4uKgNA2CIBAXF0dERASKonDixIlAtMyFiKIoVO/ZS1j/fkF2zdkEwuZSW3GUqpK9AAiilqTu1yCInbPac50XoUXvd4yw6M1neBMOjO9D97DuvD5/JyUVDgAMOg3jusWQt7c4cK6E5DCuvr53swTCmopSdv64ksLjwfmC03oOYMCoCapAeAnRKl/khx56CLPZjN1uZ8aMGU0WItm8eTOzZs0C/BWX7r777tYMf9Ezd+5c+vTpw2WXXdbRU1G5gDlw4ADTp09n+vTpfPjhh7hcLkwmU+CvrkJaXZsDBw509JSx2+0cOHCAVatW8d577/HCCy/w5JNPUlRUxIkTJ85oL8syPp8v6E8QBEJCQkhMTCQjI4PU1FSsVmugMpzD4eiAK1M5FxRFofDgGry2WhS35I8ScZ38A+q52/kRRfAqAW/CtgxtbIxFB1cFCYSD4gYg5A9gzeaSgECYZDbQHZGCfaWUVTiIVvzXJikKRo4gCh40khNR8QuEigA+nRGPMQxJZ0RQJCLL9qGtdINXJlqB8ionPqn9r09FpSEmTJiAoih88cUXzfYm3LlzJytWrEAQBCZOnNjOM7zwefjhh8nKymLr1q0dPRUVFZV2ojnRMqcLhGazmW7duhEREQH4BcO6aBmbzXY+pt2mKIpCwcJF5H7wMScWfItSb7G0tfi8DgqzVwe249OuRG+KaLPztyX1vQgVSUNhmR23x4fD62T54R+CbFqzUUdagj9vbohJx+UJ4eQdKA0cz+gTx5Sb+mEwNu5dCeBxOdmxfhnLP3szSCCMjk9m4s0PMOLq6ZisoW18pSqdmVbJ8nFxcbz00kv89re/ZfPmzfTs2ZMHHnggaLVz6dKlHD9+nGXLlvHdd98hyzKCIPB///d/WCyWVl/AxYxa0U6ltTRWIU1RFLxeL2azGYvFEqiQtmXLFmbNmsW8efPavSKaoihBhtCOHTv497//TX5+PhUVFWe0d7vdyLIcFF5Rh16vR6PRYDab0Wg0mEwmEhISGgzXqMvVYjabzzim0vmQJYmja9+jxn4Qxe0DQUD2SYjC2de4zrc34YSuo9lXcgiPz8Nl0WPYukXB5jgpmijQJ8SEUutGFgSqKhyM6RLBwQo3GouIViwHewU+QYuo+MOUZVGDT29BrlvlFgRkrYHQmkLCfcX4LFFIkszUqzLOSEitonK+ePDBB/nHP/5BRUUFd999N19++eVZ80FlZ2czY8YMFEXBYrFw3333ncfZqqioqHRO6qJlTCYTsixjs9kCC9t1WCwWqqqqEEWR2NjYgDhYH6PReEFGyyiKQtHSZZRu2AhA+abNRAwdgqVLapucX6M1EZ08kpJjG7BGpBEW07dNztse1PcirKrx4PS6cLvciBrYVZwV5E0oCAK3TMxAoyiIpQ6Kj1cFznPZ6DQGXpbcZKSJIsus+fq9oNBisyWEAaOvJrVHPzVS5RKl1b67jz76KCUlJfztb38L/BcIfKGuv/76QNs65fv555/njjvuaO3QKirtjs1mC3pwx8TEXDAP3ZZWSLNYLJhMJgoLC5k9ezafffYZvXr1OufxFUWhurqavLw88vLyyM/PD/z/8ePHefnll88QInfv3t3o+bRaLTqdDkEQkCQJjeZUiEBMTAwxMTHNmledJ6VaOKnz4yop5tCqt/GG1aDoFLApIAaHFjeKKJ7X3IRhxlDuGHgT+49WsWrdqWJeoRoNKaKIYvMExk/PiKaq3IHskzEatSiOg8jIePUW9B4bkkbvL15SDwGQRS0obnAfRGceg9ftY39mAb16x6pGnEqH0LVrV5544glefvllFi1axKBBg/jtb39LbW1toE1WVlZgsfi9997DbrcjCALPPfdcm+QkVFFRUbnQOXbsGLW1tVRXV+PxeFAUheTkZEJCQgJtrFYrISEhxMXFNZp370KNlilZvYbiNd8HtlNumdFmAiH433Mi4wdiCU1Gq2vcW7Ojqe9FqMOA02sDvQtZAVER8Pg8LDmwlt+NTg9cg63ahXKilspqf3iNRity1TU9Se/RvPccQRTJGDiCbd9/h0arpfeQMfQcfDnas+R2VLn4aX2AP/CXv/yFMWPG8Mc//pHMzMxG2/Xr14+XX36ZqVOntsWwKm1AR4tgHT1+Q5yev8/pdAa8z0wm0wWRv68lFdLqI4oiCQkJFBQUMGfOHBYsWNDia3zhhRc4cuQIeXl5QS+Kp5OXlxckEqakpAB+wS8lJeWMv+TkZJ5//nk+/PDDc67aVlfYZObMmR3+PVNpHEWWKd2wgROHlyJHyqBRQAFBr0EQmv/Yai9vQrvHwYojPzC1x1UY64l56REpRPSMZePWbTicHroYDZhcPhThZPVivYbR47vTtUc0H85djV5bhs+lQfQW49PpQRTxGEJRTvvJCXWRJYKAT6tH5y1GchWjN8RQW+1ElhQ02s55L1K5+PnrX/9KXl4en3zyCfv372f27NnAqcXi/v37B9rWLRbfd999PPHEE+d/sioqKiqdAFmWycrKYv369axfv559+/ZRW1uLKIoBG7e2tjZIJBRFkeTks1fivRCjZUq+X0/hspWB7eRfTCNyePuk2jKYO/fCVH0vwrKaWowuAzFFvSmNz8YX4sSkMbHt+AE2JexjdLd+AGi1Ir6TBUpMZj2Tb+xDTHxIo2PUVpWj1RkwWU69B3XtMwR7TRXdBwzHrIYVq9BGIiHANddcwzXXXMPevXtZv349ubm5VFVVYbVaSU5O5sorr2To0KFtNZxKK+hoEayjxz8bBw4cYM6cOWRmZuJ2uzGbzZhMpsDKXF3+vs8//5zBgwfz8ssvt8rbrr1oboW0hhBFkYiICHbu3ElmZiaDBw+mvLyc48ePB3kD5uXl0a1bN1544YWg/nv37iU7O/usY8TFxSGflmskOjqaDRs2YDI1nhR35syZfP755zgcjnNKV+BwODAajdx8880t7qtyfvBUVHLsi8+pNWSjhAF6f24+HDKCppFHlkLDGXZFESQfPre9zbwJ86oL+O+uBVS7arB7HNwx8Kagc4aHGLhxVBo7fsxF45YCVZjjk0IZN7kn1hA9R/Zuw6r9GVOEjNEcQkmBgEejRyOICAJIsoKC33tQEEA8eQ5FAUWvR6+RSOxSzsjJ12K26NGo4cYqHYggCHz88ceMGTOGF154gcLCwkbbxsTE8Oc//5lf/epX53GGFzZq4RIVlYuL9957j88//zwotY5Wqw28axgMBqxWK6GhLRdrLrRombKNmylYtCSwnXjDtUSPvrxNzi1LXpz2YiyhZxdWOwt1XoQeyYvD7cIju0ku6Y6lNhJZkMnXHKVGK4HWx/ubljIgIYMQsx5LiIFJN/Rh87psxl/bC2uIocHze9xO9v28niO7fya1Z39GTJwWOCaIIgMuV3MEq5yizUTCOvr160e/fv3a+rQqbURHi2AdPf7ZaCx/X33MZnOH5O9rKU1VSGsKWZYpLS3l9ttvJyoqKqhqcH18Pt8Z+5KTk8nNzSU+Pp7k5GRSU1ODvAGTkpIwGM58gAmCcFaBEGDw4MEMHjyYLVu2BL43LbmmyspKRo4cyeDBg5vdT+X8kvvFf6m1HIe6r4JGBLcPQSMG1SapjyAKjZbhEjQiilvCVVmIokgt8kSsj6Io/JSfyeKDq5Fk/8t6TmU+X/ywmxtG9sF8Mim02+Ula+MxNF45MLdho7owcFgyTnsNPyyaT3F+DgCS10VxfiE+vQUQEE+qgnUF6E6//wgoSAh4tQbKi3LwuYuxxHc/p+tRUWlrZs+ezb333svKlSsbXSyeMmXKBeXh0hlQ81OrqFy4lJWVERUVFfQ89/l8Z+TeHjhwIAkJCWzdupXU1NRzWtC80KJlKrZuI//rBYHt+GsmETvuynM6l6Mmj0hdFo6a/oRF+fP1lRz/kcri3UQmDCYm5XJEsc1ljzblcHkOe0sO4PK58flkTPZwzLZIZFHCbIvA6LDistjQCHpkQwUF9jx6mrsBEBMXwvUzBzT4vZFlmeysHezdsg63yx+GnntgFz36X0ZkXNJ5vUaVC4fO/WtRaVM6WgTr6PHPRkfn72tL6ldIUxQFn8+HJElIkoQsy0iSRHV1NU6nk9LSUjQaDampqUHJ5usqBOfm5mI0GoPy/9VR9xmdXoDkT3/6E2azGb1e3+bXJggCL7/8MrNmzaKwsLDR4iSnI8tyoP0rr7zSacPEL3XcjgqUgSIUgCBqMEXHo+xzU7vnENrQEKABTxpFwelwYDGbA157pyPV1CL0TES4ohm5DBvAI3lZkLWczMK9gX3Rxjjs2d34qbISt/0wd03tjSAIGIw6BgxNYvvm44RFmLjqmp5Ex1nJPbCLzA3L8XrcAPgkmVoJPIqIT9EgikJg/o1+PwUBEfDIGjweJ3u2rCE+tZv6fVbpNOj1eq677jquu+66jp6KioqKynlHURQOHjwYCCM+cOAAX375Jenp6YE2Y8eO5cMPP2TkyJFcccUVjBkzhqioKHbs2MH06dMviWiZ6j17yfviy8B23IRxxF094ZzOpSgKZfmbMWqqKcvfTGhkOraqHCqL/XnOK4t3ExHbv9NWMwb/NXy2+1tq3TYERGRZIaokFUEW8Wk9aL0GoioSyTccJrm8Bwgyyw+vJyO6a8AGbMgWLM7LJnPDcqorTlU81mg09BoymtDI5uVyV7k0UUXCS4SOFsE6evyz0ZH5+5qDy+WipqbmjL+6BMe1tbXU1NRwxRVXMHny5ECOR4PBwMGDBxs9Z524ZzQa8Xg8QSKhXq8PeHfGxsbSq1cvUlJSSE1NJTk5mZSUFBISEhqsYhkeHt7mn0F9evXqxbx585g9ezYFBQVERERgNjechLhuVbWyspKEhATmzZtHz54923V+Ki1DcjrRmEw4agvIP7AIWfRhiIrGEBpNSs8bObz8NTQeLUqZs8H+igI43cgOsTGNEA1afMVVKD4fQgsTMZfZK/jvrm8osp0ysBK1GRzbGYUs+fOrZWWXUVblIibC7/44aHgqGq2GPgMTkLxONi79ghM5db9FBY/WREl0OtqDG/FpDchAc+VLAZAF8Gj0FOQepuj4URK6qN6EKioqKioqHYHH42Hr1q2sX7+eDRs2UFJSEnR8/fr1QSJhr169WLt27RlRNZdStIwpKQl9ZATusgpixo4mfuqUc36Hslcfw16Vi6xosFflUlN2kOJj6wPH47pc2akFQvB7ERbZStGIWr8XYW0k5tpIZNGHAkgaLxZ7GGl5vdErejQagbydpRzOOFXpuD61VeXs2riqnu3pJ7VHXwZcfjWWENUrXeXstLlI6PF4qKqqajQ88XRSU9uucpFKw3S0CNbR4zdFU/n7KioqAtV0IyMjz5hj/fx9Q4YMaXAMr9dLdXV1g+Je/b+wsLAzkrk/+uij7Nixo8nriIqKYvLkyYEcjw0JeA1RVy24PiaTiS5duuB0Ovnf//1f+vTp06xznS+GDx/OZ599xpw5c9i5cyeVlZWYzWaMRmNQ6HrdqurIkSN55ZVXVIGwE+Gz2cn/ZgHu0lJ6PPYbyk78jCT5vews0Smk9JqGVmem51P/D5/N1vh5fD7W/7CeQVdecdbvvNYagthCgXBfySHm712C2+efl0bQElLbl5xjJ8MlFYVYRWR0v8SAQAggigIDhyWTfzSLbeu+C4R3+CSZ2qg0ypIGY9q7CkHyoIhmNHLd76959zdRUfAo4Pa5VG9CFRUVFRWVk5zPgogrVqxg1apVbNmypdH33p49exIRESxQCYLQaNqdSyVaRh8ZQfdHfk35lp+ImzjhnOesKAqlxzehKD5kdCiyj7yD3yJqDAiCgDUinfDYzp0GrS4XoU/2EaINpcJpJ6o4DUEWETRlhMnZ2OmGICUgukLw6tyIOhmntZLlh3+gR1R60Od3YMdG9mxZG5T7PTI2kcFjJxOdoOouKs2jTUTCQ4cO8a9//Yvly5eTk5MTqF7XFIIgNJjTTKVtacsiFo2JYJ15/KZoKn9feXk5Pp8PrVZLaGhoIGS3/l9lZSVPPvkkY8eOpaamhmeeeSZIUPzyyy/55z//2eRcEhMTzxAJm5uDqK6ScP0cjyEhIQiCgCiKaDSawF9hYSFerxej0UhGRsYZ56qrrtaZK6T16tWLBQsWkJmZyfz581m0aNEZRXBmzpzJzTff3KkrUV+KVO/bR/78r/HW+sW/4pWrSZp0DceyvkarNZGUcR0arT9cXR8ejv4s3qlerxcpPAxTUhK6FoqAZyOnMo+Pd34d2DaJobhze1Bc65+X6JVJFTWYRIFDu4vo3TsuqJqc5POSuWElbpfD79Eq6ChJuwxfVCrIEqKrBkWjRyN7ERH8xVeaiaCAjIJX1GKrrkSWJDTNXBRQUWlPlixZwldffcXPP/9MQUEBNpsNq9VKYmIiI0aMYMaMGVx77bUdPU0VFZWLiI4qiLhmzRq+//77oH16vZ5hw4ZxxRVXMHbsWOLi4lp0zkspWkYXGkr8pKtbdQ579TFsVTmIWiPgAkHA46xEb4rEYIogoevETmn/+yQfa3M2Mjr1Mk7UFAUqGlfVSJhqojA7QpE0HsI4hpFKRDkfmxKHKGuQRYnC1Bys0RIHyo5wuDzYm9BotgYEQpPZSv9RE0jrOQChhe/gKpc2rX6rePvtt3n00Ufxer0AzRYIVZqmrSratbaIhdlsprKykvnz55+TSNfU+Gfz1GvJ+JIk4Xa7cblcgT+tVnuGt+qqVasoKSnB5XJRXV3NBx98gMfjoaCgAEVRkGUZRVEIDw8PEugUReHw4cMNju3z+di0aRMVFRVoNBrKy8uDrqWlQl99BgzwJ6INCQkhLCyMkJAQQkNDz9iuW6mMiYnBZDLhcrlITm64opckSdjt9rPmPLkQKqQJgsCQIUMYMmQIzzzzDGVlZTgcDsxmM9HR0RdE4uZLCcnp5MS3i6n4eVtgn9ZswpSYgEZrJLX3dESNvlMkl04LT6ZfXE/2FB/EKiVSnpWMoGhAUQjxKcQiYjhZYUQUBKqrnEEioUarY/iEG1jx9QdUhyZRkzYcRe/3Noy2muhz4/2sOHgMnSCg17Y8V6LHJ+FVFMYPzlAFQpUOZ/v27dxzzz1kZWUF9tXZg3Ue9AcPHuSjjz6iX79+vP/+++2y6HcxolY3VlFpnPYsiOj1esnMzGT9+vVs3bqVjz76KMgD8IorrmDt2rVEREQwduxYxo4dy4gRI1q9uH4xRsvYjx2nbP0GUm69pcVRHY1R50Uoyz60WjPgQPJ6UVDweWx06XszWl3nc3Qos1fw6e6FFNQWc6K6iFqPHa/kRYcBl8tNUkUCgiKiFSoxKNUoihadUI5WqMRHJD6dBxt2LJIJr+Rl+cF1Qd6EXTL6k7M/k+j4VHoNHY1O33C1YxWVs9GqN4sffviBX/7ylwiCgKIoWK1Whg0bRnx8fINu1Cotoy0q2tUvYtHYSkpFRQUOhwNJkoJElfrtZVnm448/ZtKkSYwfPz6o/9KlS3G73QGPNfB7ogmCgMvl4rPPPgP8Qlp9Tx9ZlrHb7RQXFwdEQkEQkGU5INTV/ddkMrFo0SKeeeYZrFYrn332GfPnzw8SBOuE6voMHjyYd955J2jfRx99xP79+wFwu91UVVUhiiIejyeoXUuSBtc9tH0+HxqN5gyxLyUlhSuvvDIg7oWGhgb91Rf8TufOO+9s9jwArFYr119/PR9++CEREREN/rtHRkai0Wga/V5daBXSwH/dF8pcL0VqDx0m7/P5eKqqUVBQEiE0MoMuM2aiCw0F6FTGnCAIzOh7LTp3NJs3SggICJJMnCQQptWgOXmvi461Mu6anlhDNLgcNozmU9/BmOR0nIOuo1oTAoKAVhQYkxLFiMRIPtxzDLsxnDC9Dvc5ztHu8bKpzEXvJKVTrpSrXBqsXLmS6dOnB3Ld1hEeHo7FYsFut1NVVRXYv2fPHsaMGcOiRYuYOHFiB8z4wkKtbqyi0jDtURCxpqaGjRs3sn79ejZt2oTdbg8c27p1K2PGjAlsX3nllbz33nv069evxZFSTXExRcs48k+Q/fa7SE4XPpuNtPvuQdMGOkGdF6FG61+A1QgeEAQEpYkicB3IjoK9LNy/HI/kf2fdW3KIWo8ds85Eda0Xo92K2R6KLPqIUE4gIiNhRIMLo/YolYIZsyMEo92KQ2MntdpNRd5ODvc85U0oiCLjbrxL9RxUaRWtEgn//ve/B/7/T3/6E3PmzMFoNLZ6UiptR/28HI1RXl6Ow+FAlmUcDkeDbeqEuwULFpwhEv7zn/8MegGoj9vtpri4GFEUA0JYHR6Ph/z8/EAeD6/XS1FRUYPnSU5Oxul0UlZWhtVqxW63k5eXd7ZLB8DpPLPYQf3Pon6+htM5/ZggCFit1kDIbv0QXkmScDqdvPrqqwwbNuyM4h0DBgzgH//4R5PzbStmzpzJ559/fklUSFPp3EhuN4XfLaPsx00AKIICPTXouoUixlgQzR3/zFAUhU1524i1RNMj6lRycaPWwC2XjcVWtI+DWcUkImIx+B+bggADhiYz9PIuVBTn/3/27jtMjqtM9P/3VFXnMDlpgkZZsoIlWZJly5ZzBEdwBAwm2WDWa2+4ePfCZVl4WPjd5S5mMSwZgQHjJBwk23LAQbbloGDlPJqcU+fu6qrz+6M1LY1mRhpJMxqF83kePTNdXeHUqKen+q33vC+rVv4VXyCXi67/dPbCTBOCj58zm99urGVCjpdrJpWS73GStm264iYuXSdhDf0edCQuXac7YWJJiXESXgwrp7/W1lZuv/327N/aBQsW8I//+I9cfvnlFBQUZNfr6uri5Zdf5r/+6794//33SSQS3HrrrWzbtu2op+MpiqKMZEPEurq6bDfiDRs2DPrZQNd16urq+i0LBALMmTNnZE/skHGf6rNlEi0t7P35L7HiB2o2jkTw6uAsQofDRzoVQexvAyc0B0LTaa97B1/O+JMiWJhMp3hm+0usa9qcXVbozSccS9IU7yQlNOLJNOXd1QgpcMpeHKILWzoAgY2BIToxRBfSzqe6zcbZ0Y5DF0hp8+IHzzPl6r870OlYBQiV43RcQcI1a9YghODWW2/l3/7t30ZoSMpIOviu00gYrIbk0QTajlVfpl5fENPv9xMMBnG73QP+eTye7Pfl5eUD9vWFL3yBW2+9FbfbTWdnJ1/96lfxeDx4vd5sHT4hxKB/VCorKwcdXyyWqTs2bdq0fh+MxsqZ1CFNOXnZpsmu//oxibZMZ2CpSYxFOejjPAjDIBnrIBZuIJA3sDPbiZJMp3hq60o2tmzD5/Rw36K7yfceuJmRNm0qhUZc03E5MtOCfQEXl1w9jeJSH5vXvMqO9e8ggZ7uTtavW8P8Bednty8PeLh7znjG+d3Z9xRD07h3/gSi5vFPH/Q5dAx1MaiMkR/+8If09PQghOBrX/saP/rRj4bMXr/tttu49dZbefDBB/nxj39Mb28v/+///T9+8IMfjMHIFUU5VY10Q8QHH3yQ2traAesHg0GWLFnC0qVLOe+888Y0KHcqzpZJtrez52e/IB3NfHbzTRhP9ec/NyLTjQ/OIhRCoBtuJFEEAqcnF2lbRHpqiPbW4s+tPu7jHY+mUAt/2vgMHbGu7LJ5ZbPRuibwTMtfQNeJ2Qk88SDeaBBLM/GzF6SNFA6ksJEIdGwCcidC09BSaVJODUtY4DIIxXqxbAtDV+VnlJFxXK+kvjvHqgj1yevguhyHYxgGmqYNqGHXN3UokUiQSqW4/vrrB2z7z//8z6RSKaSU/f7Ztk1rayv//u//jsPhGJDNaBgGxcXF/aYb9/2x75u63Pd9Op3u10Tj9ttv5/bbbz+mn8m5556b/T4SiZCTk0MikcDpdB7T/uDkq993JnVIU05emsNBzpzZJF55DeHWcV9WhvTamekgQqd8ytVjGiBsi3by6IanaYt2ANATi/J/l6/is0suY3p1pqao0CDUHc8GCCdNK2LJpZOJhdt5+fE/0tvVDlISNtOE3PnEUj7mSol20O9OeWBgJnfQ5SDoGrlGK4oyFlasWIEQgrlz5w4ZIDyYEIL/+q//4q233mL9+vU899xzKkioKMpROZaGiLZtE4lESCQSAxoiLl26lD/84Q8AVFVVsXTpUpYuXcrZZ5+Nrh99zWAFUl3d7PnZL7IN6ryVFUz84udHZJrxoVmEAEIzsKQLr9uHphlIoWOm42OaTSil5N36tazY+RqWnbkp7DKcLB13MWs/kDR3tJOvnU/cjBHtNSmx3ehpDV3vxCV6kThAaAhAIBFYuOjFkj4kBkbKT4t7HB+/9irmTqlSAUJlRB3Xq6myspJdu3ZhqKLpJ62Dm1gMVUi3oKAgG6QbrCYeZDII/X4/F1544YDnrrnmmiGPH4lE+OlPfzpoEM4wDAoKCujq6kIIgWEYQ9bbiUajoxKEG079voN/PoM5Wev3nUkd0pSTV8mVl5MMd2JOiJG2Y4BA051UTrsOb3DwxjonwsaWbTy1dSXJdKYWaTIJ6cZp2PFC/vjSdv750wsI+pwYhs4l10xj5VObOe/iiUycWsi2tW+x9f03saVNyrLoTdlEK+dils8kZgs+au1lXmnumJ2bopwofdk3n/rUp4b9IUwIwac+9SnWr18/YPqeoignRiQSoampifr6empqahg3btxJdQ17OMNtyGiaJpFIhHA4nJ3xA5l6qQc3RLzmmmvIz89n6dKljB8//kScwmkt1dPLnp/9nFRPLwCeslIm3vNF9MOUvjoa0d5awj170XTXIX93BJqeufmayS70jGk24e6ufTy7/eXs4/JgKVVyIS+81IW1P3nHabkJRjRkPIUhBFJIvNRksghxgZRoJBCkgczrVyNFXJaRsKfSk/bxwcYES2cN/vldUY7Vcc1R6gsYbdy4cUQGo4y8viDYwX8cD5Wfn09RUdGgnYXhQADphhtuOOoLiOEc/0iO5/jDcdttt+FyuYasx3ikn8/JXL+vr0Pa4sWLicfjNDY2ZhvVpFIpYrEYXV1dNDY2Eo/HWbx4MY899tiQBZ0VZSjSsmhZ9TJtf3u933Iz1Ys5JbE/QJhpTjL+rE+OWYDQsi2e3/EKf9r4V5LpFLaUxMNO0jVzMOKFaGkbr66RSB0orVBQ5OeOLyykuFjw6pO/ZvN7r5O2LXoTKdo1P6E5H8OsmIWmZxqTzC4Ojsm5KcqJ1tekrqqq6qi26yvdoZrcKcqJI6Vk3bp1fP3rX2fRokVceeWVfPvb3+bKK69k0aJFfP3rX2fdunXHfL1+IhyuIWM0GqW7u5vW1lb27t3L7t27aWlpIRqN9junaDSK1+vl2WefJRKJMHXqVD7zmc+oAOEIMMNh9v7PL0h2ZqbWuosLmXjvlzCOs+NzHyklrfveJJ2Mkk5FkHLomXKa7sS207TXvTMmr+nJ+dXMLTsLgPkl87Bq5/D6mo5MgFBKSpwOqtIgoykMQLMlhujCQQcSJyAQWPsDhJDJJ9SQaCSpQEo3BZZk695OdtR2n/DzU05vxxUkfOCBB9B1nd/85jf09vaO1JiUEXakINiRHG8Q7EjHLygooLCwcMhafqMdhOur39fd3X3UNRT76vfNnTv3pK3f19ch7emnn+Zzn/scbrebeDxOIpEgHo/jdru5++67efrpp1m+fLnKIFSOWqKlhV0/foSWF1+meeWLxJuaAIiFm6jd8gTpVGa6idOdw/iZt+L2FY3JOEOJML/48E+srv0AgKRpEWvLR9SdjZH24oinKU3aTHU7KQj2b6iSjIdZ9Zef09XWSNy0aI+l6CmbRezsj2H78qgIevjS3AlcPL5I1QhUzhjV1dVApoHJ0WhrawNgwoQJR1hTUZSRsH37dm666SZuuukmli1bli2T0/cvkUiwbNmy7Drbt28f6yFnSSnp7u5m8+bNPPnkk7S1tZFKpQas19zcTEtLC11dXSSTyX7PORwO8vLyqKqqIjc3N3st3NHRcaJO44zQ8uKqbB1qV0E+E++9B8cQs9SORXfLBno7tgESpI2ZDA257qHZhKPt0ECkEIIbZ1zNxydcx7rVfmqbwpnlNkzzuMhPWoTCCZAgJFiaxCN2AxaSzOw1ibH/e4GNGws/kFnPFhKXKSGe5vm3957UwX3l1HNc84RnzpzJT37yE+69916uvvpqnnjiiQE17ZSxN9ZNLI50/KEy9Ebq+EdyJtTvO7RDWnNzMy+99BJXXXUVZWVlp8wUE+XkIm2b9jffomXlS9h9TY2kJFZbh2fcOMKdu7CszIW621dM5fQbMBwjczf5aNm2zS8+/FO2cHQklibdMgF3vBzNBn80TYHDwOsz6GqPsmV9E3MWHPh75gvmUjThLLZuWkvc6Scx5wLsQBEuQ+Oy6iLmluT2q0OoKGeCm2++mfXr1/PUU09x3333DXu7J598EiEEN9988yiO7vTwyCOP8Mgjj2BZx9/oSDl6kUiE9vZ24vE4Ho+HoqKiU+6a6f333+eee+6hubmZvLw8CgsLEUIgpcQ0TbxeLz6fj7y8PGKxGGvWrOGOO+7g5z//+QmfWbJ3714+/PBDGhsb+/3rSzSIx+OEw+FBfx+cTiemaWYfezwe/H4/gUBgQNbyoQ0RlZEx7vqPk+rsJNnWxsR7v4wzd/AyUsci1LWH+u3PIKWNQENoOobz8L+Lmu48IbUJexMhHtv0HEuqFjCr5ECyhdtwcd6kmXyQv5665l7ydIMSAVrCQgKmaYMAG9DowEkbNk4kfeMU2Hjoq0oIYOPEQSea7MKWBRRKQWdPnLQlcRjqOlQZGcddTPDLX/4yhYWF3HPPPUydOpXrrruORYsWUVBQMKxAy1133XW8Q1COYKyDYGN9/OE4k+r3+f1+qqurqayspLq6GscIdBlTzjzJjk7qH3ucyN6a7DJ3cRFVd96Gd//Uw+LxF2KmItjpJBXTPo6mH3tzoKHs6arljdQ6pnXNZHrJ5CHX0zSNq6dcxLL1TxMOCYzW2XjSuehJi/ykTZ7PiaFn3pdmzCljxpxSpJT93gMmLLyU97tSJMtngW5wVmGAKyaWEHCqurzKmem+++7jl7/8JW+88QY//OEP+cd//McjbvOjH/2IN954gwkTJvC1r33tBIzy1Hbfffdx3333EQqFhqzbrIwsKSXr16/nL3/5C8899xzxeBzbttE0DY/Hw3XXXcdtt93GvHnzTuobxJDJILznnntoaWk5YidgIQQ+nw+Px0NzczP33HMPf/7zn5k+ffpxj0NKSW9vL42NjTQ0NGS/Pvjgg/3qob/zzjv86Ec/GnI/feMfLJMwLy+PYDCI0+nE6XQetmZ+3//nUPXalWOju1xM+MLdmKEwroKhk0COVlfLBpp2v4yVTiAQaIYTpzsXIQ7/efJE1Cbc1r6LJzavIGbGaYm0UR4sJc9z4L1a1wSfuGgSzz69GUfKyt5Q9nidXHL5ZNa+XYsuuzBSH4JMoQsn9Ps9PeQcpQOkSaGrjrRRjmVLrr1kKg5DzWJRRs6IfLKJRCIEAgE6Ozt58sknefLJJ4e1nRBCBQlPkLEOgo318Yejr37fQw89xIYNG+ju7sbr9eJ2u7N3HBOJRHb68+LFi/nBD35wSgUIFeV4SSnpfHcNzc+twEpmLtKFgMKlF1J27dVoBwWdhdAYN/kqADRt5ANpUkpe3PMGrXYXL+55g2nFkw77gW1WyXSumXgFL7wSJm0KXJEUhUIjGHQjALfHwdIrp1Bc4mTNS49TMfksJsyYm92+qiCXsxdfwo6uMNdMLGVy/qmVTaIoIy03N5cVK1Zwww038L/+1//ivffe45//+Z9ZuHDhgHU/+OAD/vM//5Mnn3ySSZMm8cwzzxAMqvqdJ4OxzpYb6+MfbPv27Tz00EOsX7+eZDKJ1+vNzoLpuw5ctmwZjz32GPPmzeP73//+iATRRoOUkoceeojm5uYjBggPpmkaZWVlNDU18dBDD7F8+fKjCobG43FWrFiRzQJsaGigoaFh0Ky9W265hRkzZmQfl5eX93veMAzGjRtHeXk55eXlFBQU8N3vfnfQTMKhmi8Opm+69Ug3RDzT2KaJnUxh+H3ZZZrDMWIBQiltWmvfpKt5A+lUGIlEN7w43TnDfk2OVjZh2krz4u7XsyVsAHRh8PjrW7hm3iyqSg/8ffvorX24TDtzwQxMmFLI+ZdMZOVTG9BSmzCoBZkABJAADnNTXYjM8+lWHK4uzFQO29Y3MX1G8Ul/00I5dRz3p7Z7772XX/7yl9nHaj78yWusg2Bjffzh6Kvft379eh5//HGeffbZAXeQb7vtNm655ZZT4g6yooykdCRC7R//THjHruwyZ34eVXfchm9iNe31awgWTMbtK84+PxrBwT67OmvY0roLy9TZ0rqLXZ01TC2cCEBLuI3NbTu4fFL/juyXTl2I0VXL317YQb7XgXt/FmDVhHyWXjmF9oadvPinFSQTcXbv20NReTX+YG52+4vHF3Hx+CKcurpjqyiXXnopAMFgECklTz31FE899RTBYJBJkybh8/mIRqPs2bOHUOhA7ahgMHjYLEIhBK+++uqoj/9MNtbZcmN9/MEMNS33YF6v96SYljsc69evZ/369eTl5R1VqSHIBArz8vLYsGED69evZ/78+UgpCYVC/bIBGxsbWbBgAVdffXW/7b///e8P6ziNjY39goQzZ87km9/8ZjYoWFJSMmDsu3fvZtmyZQOy/YerLxnhtttuO+Wmjp9M7HSafcv+QKqzi0n3fgnHCGc621aKxt0vEumuwbZS2FYK3eHF5crJBtuGYzSyCTuiXfxp0zM0hVqyy8Z5qujcWcn2UIrO1u3806fOwenI1BZctHQCz/3lI1xuB0suncTEqUXU795Ge80KDBlBygSZ7sUOwAtHiqdIHWQKO7YNp2sJ4d44tiXR1XRjZYQc16e3J554gl/84hdA5hfw8ssv54ILLqC0tFR1rBsBo1GHZqyDYGN9/OE4tH5fR0cHsVgMr9dLYWGhuqBQzliay4XZfaCDWsF55zLu+o+jOQya9qwi1LGDnvbNVM+8Fac7d1THIqXkxV2vE02msFJOokaKF3e9zpSCCaxv3sLyrS9g2mk0282S6vm49l+oRSNJdr/fQGmOG00IdENj8dIJTJyaw/o3n6Vu12YSaZvepIlluHm/poFLzz5wLio4qCgHvP7669m/031f+6YVrl+/Prte3w3kvnU2bNgw5D6P9YO/MnxjnS031scfakwnw7TckfSXv/yFZDJ5zNlytm3T1dXFV77yFSZPnkxjYyORSGTQ9Q4OEno8HgoKCujs7ARA13XKysqygb+KigoqKiooLy8f0Bm9uLiYG2644bDjuu2223jssceIxWL4fL7DrjuY0W6IeCaQlkXdH/9MaGumwc3eX/2GqQ/+PWIEG7f1duwg0l2DlJJ0KorQDBwOH7adHrBu5m+MjW2Zg/79EEIjbaVGJJtwffNm/rrtJZLpzGwaTeiU2jPZu9aH2N9tORRO0tgeYcK4TOC0pCzIxVdPo7wqF0GKd196krpdW8jNFaSSDqIhG6crgMc//EBr2nRgpXs5/5oSKidPRVfTjZURJORxpP4tXbqU1atX4/V6WbFiBRdddNFIjk3Zr68OTW9v74hPzYlEImMaBBvr45+pTNNk5cqVXHvttaomoXJUYnV17Fv2KBWfvJngjOnYVoqGnSsP6hwnKJ98FcHC0c0A3tmxl//71i8wW70UtFTRWVqHoyTGosqz2dO1LzPWhEmk28Plpddx51UHPry991YNGz9soLDYzyXXTCMWauCD154lGokQSpokLJt0YTWJSYvx+bz83YJJqmOxMupG+315NK4ljjY7abiEEKpRxyFG6v/v0Gy5oyn9MhLZcmN9/MFIKbnppptYs2bNUU3LhUyArKmpicWLFx/1tNzRFIlEWLRoEYlEgvz8fGzbxrKs7Ne+f+FwGMMwsG17wFTfpqYmOjs70TSN6dOno+v6oMeaP39+Nmmkz+uvv47X66WiooKSkpIhtz0Wp+P/16lE2jZ1f/4L3WszN4I0w2Dil7+Af/KkkT2OlDTveZlQ125sK4llDt1kRkqy5QoO91/q9OQz5ZwvHdMsl2Q6xbPbV7G2aVN2mU8PYjVNo7cjsz9hS0qloLo4wC2fno/QDgxGSkndzk2sf+tFkol4dpmZSpCKx/Dl5B/V61FKSTTUTeXks7ji1i+r1/Jp4mS5FjyuTMJt27YhhOArX/mKChCeovx+/5gG5cb6+IqiDK1n4yY8ZaW4ioqyy7xVVUz/l/+FZhikzTj1258hEW0FQAid8inXEsifOKrjymYRJlKUtk3FH81Ftls05qzn3foPKfDk0xtJYXUXE4xM54POZmZNKmDO5Mx5LDh/PD6fk8kz8tn0zsvs3baOaMoinEpjG06S0xaTLprAlHw/10wqUQFCRRmCbdtjPQTlKIx1ttxYH38oIz0tdzSkUinC4TDhcJhQKEQkEsl+37c8HA7zyU9+kunTp2drPAoh2LZt25D7tSwrG8AbN25cv0CD0+nMZnfatk1VVdWATMC+f4e6+OKLR/xn0OdUaIh4upJS0vDU8oMChDrVn//siAcIIfP/XDbxMgrLFyI0AysdH3Jd0zR58803Wbp46WEDK7rhPeYyOIl0gq3tB8rt5MpK2jaPQ9qZ154zZVMuNDwOnd7OGBvXNXL2gop++9i3/aNsgNDpclM9bQ4fvfsqLo/vqF+PQghcbi9N+3bRUreHsvFDN+9TlKN1XEHCvs5SgxWoVhRFUU5N6ViMxuXP0L12Pb7xVUz+u6/2m0KiGQZmMkTdtuWkEj2ZZbqTymnX4w0O/LAw0nZ11rCpZSfOnjy8sSC2lsYbC+KO5BDTejHjEXy9s/DFxuEOpxg/Pp+pVXnZ7XVdo7xS55XHf05PTze9SRPTllh55SQmn48vEOSqiSVML/CrDxGKopwWxqqJxcly/MM53mm5Xq+X7u5uHn/88cMGCaPRKN3d3dmAXiQSGRDkC4fDzJ49m9tuu63ftpdffvmgjT8OtWDBAqZPn54t5eN0HqYBwiFSqVS/clHBYBBd10kmkzz66KPMnj172PsabadCQ8TTjZSSpmeeo/Pd9wAQmmD8XZ8mOH1kfpbdrZtwunPw5RyYgi40Hacnc/3mcA3dmEY3TdLSi9tXPGozpHLcQW6Z+XEe3fBXHF1TaW3OTA0WtqQwDbmahsPIBNxdboNAsH/pNSEE51zycV76888oGz+FeRdezVvP/4l0KonD6SJtDuzYfSRC00inkmxa8yqlVYdv3qcoR+O4goRVVVVs3bqVRCIxUuNRFEVRxlBo23bqH38SszfTZCBaW0fvps3knj0nu04y1kHd9mdIpzK1iQyHl8oZN+H2jn6XwH5ZhO1TEVKQdiQwTDcFbVU0+D6ClAdfqAR/JEme14kjnCQWSuIuPPAnz+ML0BaOEYqnQDdITl6IWTqFc8ryuGR8EW5j5KZGKYqijLWxzpYb6+MPJRKJ8Nxzzw0ZYDqUbduYpjlg6m46nea3v/0tmqaRTCYB+OEPf9hv2x/84AesXLnyiMdIp9MDgoSBQGBYQcK+BkF9NR6FELjdbnRdR9M0dF3Pft831tzcXJxO54ApwU6nk3Q6jW3bR9U5+EQ5FRoink5aXniJ9jdXA5m+IVWfuoOcWTOPe79S2rTVvU1X8zo03Un1zNtweUemO/LxiJlxNARuhzu77KziKXx+9uf4n8e3ATaOpEUZGj6nnn3/qJ5cwAWXTcZMhuhorqewrDK7vT+Yx1V3fAV/MA8rnSbS04XhdGGmksc8TsPpItLbjW1Z6MboNQtUzizH9Uq6/vrr2bJlC2+++SZ33XXXSI1JURRFOcGsZJKmZ5/P3iEG0D1uym+6gZw5B7IHYqFG6nc8h21lLmic7lwqp9+I0z2yXe2G0j+LMICtmyDA1k284Xzc3aX4Uj6CkQTFOQEMXcMwNCKhJPmFBwqcu70+8uddQvdH75GYuoTCggI+NqmUiqD3hJyHoijKcIxUE7sTlS13oo9v2zbJZJJEIjHk17KyMqZMmZLdxrIsfvrTn5JMJmlubqapqQld14lEIkgpsW07+7W8vByPx5PdNhqN0tDQMGB8fVNyn3vuOVwuF4ZhDGjCM9xA28GdwPssWrSI3t5eAoEAgUCAYDCI3+8nGAxmlwUCAUpLSwEoKirC4/GQSCSYMGHCoMfpazLkdruHDJAmEgk8Hs8x/7+NtlOhIeLpoPWVV2l95bXs44pbbyFv3tzj3q9tmTTtfolw9579j1NEevaOeZBwX3c9f970LONzy7lj9g39XjeTy4q4/Jww779VQ76uZxvjudwGSy6dTPWkPHZseIetH7yJy+Pn6ju/gsN5IKvQH8xkRuqGwdV3fpVk/MjB/yNxeX0qQKiMqON6Nd1///386le/4tFHH+VrX/sac+fOHaFhKYqiKCdKZM9e6v78F1JdBzoXB6dPpeLWW3Dm9g/+pRLd2QCh21dM5fQbMBwnJrB2aBahJnXS3l4Cop6wrMIRyqOyfga2YaK7TAxdo6wihwsvn0TjnrUkSufj9h6ogXrN4nP5tauYxaW5LB6Xj66pDw+Kopxc7rvvPu67775ssfFjMdxsuba2NtLpA51DD103mUzym9/8hmuvvZalS5dml4dCIX7xi18ghBj0XyqVYtmyZZimiW3b/TLWEolEtmNuX2Du4CBd31fDMPB6vTz77LN84xvfwO/3c//99/POO+8c8fzvvPNO/uEf/iH7WNM0li1bBmSaHcTj8Wxm3aEODc4eKQuyr05nOp3OBtj6zJw5k6uuuqpfUG+woN9gxeS/9a1vHfE8D+b3+7nuuutYtmwZeXl5xzxFPBaLcdttt53U9cOFEMyfP5/58+fzjW98QzVEHGFSSlKdXdnHFZ+4kYJzj7/UWDoVpX7HsySibfuXCEonXEJeydhNa7elzes17/LynreQUrKxJYTXKuLjc87D0DO/+6lkmvbNbZS4HGiHZA/GIx288uSv6OnI1OqORXrZseFdZi26eNDjeQM5eAMn5ia7ohyN4woSlpSUsHz5cm688UauuOIKfvKTn3DrrbequzSKoiinANs0aV75Ah1vrqavz73ucjLu+o+Tv/jcQd/Lc4tnkTZjxEKNVEz9GJo+/HpHx0NKydqmjf1qEaY1E5/WhC/WgvQaJO0CHLaBKWziVpyqs32cO7uENS89SlNzA1v37OUTt342e14uXeOe+RNVcFBRlNNaXxOLgwNWgwmHw9l644OxbZtYLMZHH33UL0gYiUR47LHHhtwumUzS3d2Npmn9GmVAJkjX3t5+xHNwOBzk5eURj8fp6OjA7/djDDNz5tCySEIIXC4XyWRyyKBf31TdQzmdTnJzcwdM302lUpimyc9+9jPOOussAoHAgHqA1157Lddee+2wxjwSbrvtNh577DFisRg+n+/IGxyib5ruLbfcMgqjGx2qIeLIE0JQccsnEA4Hzvw8Cpecf9z7TMY6qd/+DGYqDICmOSifei3+3Orj3vexCiXCPLb5OfZ21QJgS4mM5fDatjDOeC3Xnp/JyHW6DCZNK2LLhiZcboPzL5lE9aRctrz/Bjs2vIvcf0EthGDa3POYPm/JmJ2Tohyr4woSfv7znwdg9uzZ/O1vf+POO+/kgQceYMGCBRQUFBzxbpsQgl//+tfHMwRFURTlGMXqG2h/Y3X2sX/iBCrvuBVXQcFhtysYt5CCcQsQ4sR0/e1JhPjr1pdYXfc+sUR6fxahhvSH8CTbsTQHnmQbKV8YEclB2DoNlRvZ0VFL519MeuIJUpYktG8n9U0NVJUfqA+jAoSKopzuDp5+ORIODST2fSgeykh0wZZSZuvM9dXmmzBhAt3d3bjdblwu16Bf3W73oB2Rf/KTn+BwOEin09xyyy2kUiny8/OHDA72cTgclJWVDVje1dVFTk4O8+fPP2mCVPPmzWPevHmsWbMmW6NwuGzbpru7m8WLFzNv3rxRHKVyKhCaRvlNN4xIIlC0t46GnSuwrcz7iOH0Uzn9hhNS13ooOzr28Pjm54imMp2HU6aN1VGJ6KpER/DKe7WcM72EkvzMzJmFF1QjpWTeuVVEeppY9dhfCPceyLbMLShh4aXXkV8y+s38FGU0HFeQ8He/+132zaLva1tb27CK8vZRQUJFUZSx4Z84gaKlF9D5zhrKPnYNhRcu6dfFWEqbttq38PhLCRYeKPSdeb8f/eCaLW3eqfuQVbvfpDcZIZyM4Q4dnEXYiLAt0oYXIx3D5aklHp+BJuKMbw6TaO6lVfchhIbtzSEx5QJahI+qIx9aURTltNEXIDpSsK6qqqpfwO/Q4F88HieRSHD55Zf3W15UVMRvf/vb7PpSyn7/GhoauPfee3G5XAM6j/r9fiorMzduhBDZIF3f9N+Dl8ViMTRNw+vNfFD/u7/7u2P7gUC/wNfNN9/MsmXLjhggHMrJOi1XCMH3v/997rjjDpqbmykrKxtWoNC27ez6P/jBD9QMsTNQz8ZNuAoK8JSPyy4biddBT9tWmve+AmTeK9y+IiqmXY/DOTa/N2nb4qVdr/NW7fsASAmJmE66cRpOMx9hS3wxkwVzyynOO5CJ7XDoLFxSwcZ3VrFny9rsck3TmbloKdPnLUHTVQM85dR13BUuj3T38HDUHx1FUZQTJ9nejrOgoF8gsOzaqyk4bzHukuJ+60rbomnPKkKdOxFCQ3d48OWcuPBaU6iFp7e+SEOomVjCpD3WjbQl+W3VCFtDBEN4km3YuhOEwNaduJOtWF4nXrMN23Ji6xJL2tgVs/FNX8QnplYwPkc1JlEU5cxycBOLvgDbYA4N4B0qGo0SDAaZNGlSv+VOp5PZs4euIzZ16lRyc3NJJBIDrv0dDscRj9tntJponM7TcqdPn87Pf/5z7rnnHpqamsjLyxuyNmVfsLO7u5uysjJ+/vOfq07AZ6DeLVuo/f0f0d0uJn75i3irKo+80TDphou+AKE/bwLlk69B04f3+z/S4maCX699jIZQMwCptEWqOxe9fSpO6cRIpMkzJXleJ537ugn1JMg5KFCYSsap3bEx+7iwtIKFl15PML/ohJ+Looy04woS1tTUjNQ4FEVRlFEibZu2v71Oy4urGHf9dRRdeKA+iuZ0DggQ2laKhp0riPbWZbaXknQqekLGmkqneHnPW6yu+4CUadETThIzE9iaxNtZTjBUiKmnslmElpHpGGej4TIjBLQ9mM4ACIHp8NAwfjYXzpnNzdMnY4zQVDtFUZRTyVg3sRjr4x/J6T4td9GiRfz5z3/moYceYsOGDXR3d+P1enG5XKRSKWKxGMlkMhvsXLx4MT/4wQ9UgPAMFN6xk9pljyJtm3QsTvfadSMaJAzkT6Jk/FLMZIji8ReesLI1g3EbLoIuPxKIxtJYreNxxSoRNngjKQoMnUAgc41pGBrhUP8goT+Yx6zFl7DlvdeZfd5lTJ61oN9NeEU5lR1XkHD8+PEjNQ5FURRlFCTb26n701+I1mYCfs3PryA4fSquosHvdKbNGPXbn8l2mxNCp3zKtQTyJ476WLe37+Gv21+iM9pDKJIkEjfRLR+GbeAJFVDSNAVhaxi+RL8sQgANCwlodhpNpunJm0pXYAI9vENLbwRdDKxJpSiKcqYY62y5sT7+4ZwJ03KnT5/O8uXLWb9+PY8//jjPPvsssViMRCKRncJ92223ccsttzBv3ryT+lyU0RHZs5ea3/wOO53p6J03fy7jbrjuuPZpW6kBDe7yy06OYLoQgk/O+hj7Xn2UaH0J7nQQI5EmJ2mT73PhMDLvAVUT87ng0sl0Nu/CTHlxOF3ZfUydcy6Vk2fi9Q/sSK4op7Ljnm6sKIqinHykbdOx+h2aV6zENtMACE1QtPQCHLm5g26TSvRSv/2vpBI9AGi6i8rp1+MNjBt0/ZGUskye2rqSxq4uQtEUSIEvOhlPJAdnPIQz5kW3DSQ2bmcdIn4gixDA1pzYegrdSpF0BYglq8htihId72STaye7OmuYWjj6gU5FUZST0Vhny4318Y/kTJiWK4Rg/vz5zJ8/n2984xs0Nzfz0ksvcdVVV1FWVnZS1VNUTqxobR01v/pN9noxd84squ647bgy45LxLuq3P0N+2XzyS88eqaEOak9XLW+k1jGtaybTSyYPuk5LuI2YmWBi/oHSOV6Hh3+94gv8v9YP6dnXQ76mEQy6EULgdBmcf8lEyspdrP3bkzTX7WbKnEXMX3pNdnuhaSpAqJyWVE6soijKaSbV1c2e//kljX99NnvB5yoqZPLXvkLZx65FG6T+UyLaTu2Wx7MBQsPpp3rmLSckQAjg1B1cP/0KzLSNI5VHQftiSloryIuAkXShW06QYOf04kk0Z6pLH/LhLe3wkXIGcCd6cCdb0aUgv2Mc0USCF3e9flw1dBVFUU5lfdlyZWVlNDc3D7vj8Ehly4318Yejb1ru4sWLicfjNDY20tXVlc24i8VidHV10djYSDweZ/HixTz22GMsWrRo1MY0Wvx+P9XV1VRWVlJdXa0ChGeweGMTe3/xK6xkpttwcMY0qj59J+I4Gm9EQw3s2/w4ZjJE6743iPTsG6HRDiSl5MU9b9Bqd/HinjcGXOtJKVlTv46fvLeMP238K13RUL/nezpiFIZNxnmc5PhdCCGompjPJz4zF8xaVv35ZzTX7QZg98b3CXV3jNq5KMrJQgUJFUVRTnLhnbvY9r0fEN6567DrSSnpfO99dvzfHxLZvSe7vPCC85n6D3+Pr7p60O1ioUZqtz5J2owB4HTnUj3zFlzeghE7h0O1RtrpTfS/UJtVPI0vz7+dcS1zKY+6Kfe7kNholo6QAsvZSY61Ht2Mo9kmmp06ZK8CKQyEbeHVGklrJt5oEGcol4buDizbGrXzURRFOdn1ZcuVlpbS1NRENBod8uaJlJJoNEpTUxOlpaUjki031scf7hiXL1/O008/zec+9zncbjfxeJxwOEw8HsftdnP33Xfz9NNPs3z58lMig1BRhpJoaWHP//wCK54AIDBlEtWfuwvNOPbJhr3t26jfthzbSgLg8hbg8oxss6GD7eqsYUvrLixTZ0vrLnZ1HuiZEDPj/HHjcv667SXSdpq2cC//58m/0NQRya6Tk+fB43Hgcug4XQYXXTWVxRcW8/6qP7PuzRcwzcy1pscXYMnHbieYN3rnoigni2G9A7z55pvZ75cuXTro8mN18P4URVGU/qSUND2/gt5Nm9HcLqY++PdDZlJ0rH6HxuXPZB8783KpvP1WAlMGn3rR7zj7A2huXwmV02/AcHiOsMWxSVtpXqt5m1d2v4MWK+DGqdexeFYZkMk06dpsU+VyommZc9TxY1rdeLXtGHozDjOCFBoIgW6lsLX+tW4ynY5deJLtxD0T0cM+CmrOJtdXgK4d+11xRVEOz7ZtnnrqKV566SW2bt1KV1cXpmmyZ8+efutt3ryZUChETk4OM2fOHKPRnrmGamLhdrvRNA3btrNZc6PRxGKsjz8ch07L7ejoIBaL4fV6KSwsVFl3ymnBisfZ8z+/JB3N3CD2VY+n+vOfG3S2yXBIKeloWENH4/vZZf7casqnXDOgLuFIkVLy4q7XiSZTWCknUSPFi7teZ0rBBOp6G/nzxmfoSYSwbUl3JInsKsMfncgfX9zOA7fPx2FoOJw6S6+Ywub1TZx/yQTqd37IqsfexD7oxvKkmecw5/zLcbrco3IeinKyGVaQ8OKLL0YIgRCCdDo9YPmxOnR/iqIoSn/hHTsJb92G7vEQ3rqN8I6dBKcP/mEpf+E5tL/xBqmuHvIXLaD8huvQPUcO9nmD5ZRPuZaets37L+aO7QLxSHZ37uOpLS+wr6OVcMxEyjBPrnmPuVM/jtuZ+XM0d2EldXu70HTBlOkBOt79EJ+jAdsQ6FYyM81Y00g7PNi6CwZJQrGFjmEl8bj2EeNsvJaDaJtN2pI4DFWMXVFG2ttvv81dd93Fvn37ssuklINeIz711FP8+7//O8FgkObmZtxu9aHrcB555BEeeeQRLGvkMqEHa2IRj8exbRtN0/B4PKPaxGKsj380/H6/CgoqpyXd46Hk8ktpePoZvJXlTPzS59FdriNvOAjbTtO85xVCnTuyy/JK5lBSfdGodjDe1VnDppadOHtyKWuporO0jk2unfx507Nsat2GlJJEyqK318LTMxNXogh3xCSv0kHasrPNScqr8vC447yz8vf0dLRm9x/IyWfBpddRXF49auegKCejYecSH246gDI6RuPCUFGUU4eUkuaVL2CbaRz5+ZhdXTSvfIHAtKkIIZC23a+otO52U3XH7ViJODmHydDpe98++INXIH8i/rwJo/JhLJqK8fyOV3lr73p6I0lsWwICX3giTiuX1s4Y48syhZ9LxgVZsLiEaNcmare/gkeLYbr0/dOL06QdHiyHFxAIADFIMCKbTdiKUZSAUIDqfD+GrgKEijLSVq1axXXXXUc6nUZKiWEY+P1+enp6Bl3/y1/+Mv/+7/9OKBRi5cqV3HzzzSd2wKeY++67j/vuuy+bfTlSxjpbbqyPrygKFF6wBMPvxz9l8rBuKg/GMuM07HyeWLgpu6xk/FLySueOaoA/m0WYSFHaNhV/NBfZbtGYs55X9rxJoSefUNQk0esjGJ6NK+7AG0mS53NShsDt7D+7pL2pNhsgFEIwbe55zFx0McYxZlYqyqlsWEHCb33rWwADftH7liujY7QuDBVFOTVkswh9PoQQ6D5fNptQWmmanl3BpHu/hDMvL7uNf9LhO/hKadO670003UFx1ZJ+z430xZyUkvXNm/nLRy/S2tOLmc4UqneYuRS3z6TU9lBVEaSqJNBvu4pJQZa/s56YmSYN2JqBrWtoVhLL8PaFBw/L1gwMM4m0duJ2nUsilsK2JLrKJFSUEdPT08Mdd9yBaZoEAgF++MMfcuedd/Lyyy9z0003DbpNWVkZixcvZs2aNbz66qsqSHgSGOtsubE+vqKcKQ69uQyQO/f4Og837nohGyAUmk755GsI5E86rn0Ox4Eswjy8sSC2lsYbC+KO5BDTemhOhPGEJpMbmYAnnMZvpynI9aDrGu2tEXq74+Tme7P7mzrnXOp2bsa2LBZeeh35JeWjfg6KcrIadpDw7rvvRgjBhg0bmDt3bna5oiiKMvL6ZREGMll2mstFKhJhz0//B3QDIQT1jz3OxHu+NOCibzCZ6SAvE+rcCYDh8JJfNm9Uxt8R6+JP65/no8bdJJKZshJCGuR1zmB8qhSfrmE4Nbo7omzf1MyMsw90US4oKsEsrIa2OtKVZyHdObg3vQBON4YxsK6gbVlo+iDnL9y4U63MvSKXhbNnoxuqV5eijKRHHnmE7u5uDMPgxRdf5LzzzhvWdueffz7vvvsu69atG+URKoqiKADpWIy9P/8VxZddQu6c2SO23+LxF1K75QmEblA57QY8/pIR2/dQ+mURtk9FkxqmkcRIOyloq6LB1400PQS7q/GGU+R6nQS8mbqIldV5LF5aSbirhtz8A7NuhKZxwcdux+X2oh1HZ2dFOR0M+xPTsmXLWLZsGXV1daM5HkVRFIX+WYRWJEI61IvZ042dSBBvbMJOZrrGCYcT2zSPuD/bStGw/dlsgBAEmn5stWeGoyXcxpaWPdkAoSdazozWpUwzy8hxGhi6hrTj5Ppr2LPhKeyDyipoQnDJZR8nvfgWLlx6GTltWxHSQuiObH3c7L/s2TDwOd0BtsXWj97A41PTRRRlpK1cuRIhBJ/4xCeGHSAEsk0o9u7dO1pDUxRFUfazEgn2/uLXxOobqP39o/R8tHHE9u32FVEx/XomzLr9hAQIoX8tQm8siKZ1kG+vR9c68Ybz8XSVkduWRyCaoDTXS8DrzDYomX22wZvP/pp3X3ySjpb6fvv1+AIqQKgoHEWQUFEURTkxDs4i1Fwu0uEwqa5uzJ4epJRIKUmHw1Tc+kkmfOFzRyw0nTZj1G59imgoczEkNJ2KaR8nt/isUTuHmcXTWFB1Fg7bR3XbYs4OzaTYcOFy6Eg7gUvbid94l1h0N/tam1n/0Yf9tp9RXsLXzjsLV28LqbZaMFwwjGzJfjQNDBeptlrWbd0ygmenKArAzp2Zmw6XXXbZUW2Xm5sLQG9v70gPSVEURTmIlUxS86vfEqvLXAMaPh+ecWXHtC8pJb3t25F2/3r5vmAFDlfwuMc63DE8s20VoUSE/M4yNCnwsg8X3XjZh2brVNWfRaC3EN1p4jA0Ksbncd0t0+lqXM3qlX8hHg0jgQ1vvaT6KyjKIIbduERRFEU5MQ7OIsRKY6dSSNsGkcmW01wuNIeBMzfniHUEU4le6rf/lVSiBwBdd1Ex/Xq8gXGH3e5oxFJxnt3wHpP801l4VimQGeelhRchIuXYwkY4BNJOolk1uPVGQNKTMklZEjSdjxrbmT/vQAMSIQROTfD+W6sQaRPb5YT0EBmTtoUcrM0xgNDQ0ibvv7WK+WfNRDvaQKOiKEMKhUIA5OfnH9V25v7sZ8NQl6GKoiijxTZN9v12GZG9NQAYPi8T7/0yrqKio96XtC2aa16lt30bsXAjpRMuPeGdx1PpFI9teo51zZtxR3LwRnPRtHacdGOj46QbXetEpkuwtTQxO8bERQEmFli8+cwvSSUT2X2VVU3mnIs/Nqbd0xXlZKWuzhRFUU4iB2cR6k4nidY2pLT3Pytw5Oai+7yYXd39Oh0PJhFtp377X0mbMQAMp5+q6Tfi8haM2Fhf3b6exz96gagZpTjRycyJ1+N1Z6b25nj9YEqQJlZiN269EYdbEDbTJOI2aDpm+TRS5bPIL8onZdm4Dqo5aKbTmNFepOFAWKkhx6FJiZBDd4GXhgMz2ouZTuNyOkfk3BVFyQQH29ra6OzsPKrt9u3bB0BhYeEojEpRFEWx02n2LXuU8M7dAOgeNxPv+SKestKj3peVTtCwcwWxUAMAPW2byS2aiSdw9Ps6FlJKNrft4Lntr7C7qwbLsslvq0LYAo9Wi8DGwoVOEo+oIUYhEkFLxXq2btlKR8oB+wvUOF1u5l14NeOnzVEBQkUZggoSKoqinET6ZRHaFvRNgxAC3eXE2N8B8uBOx8Hp0wbsJxZuon77M9j7g2tOTz5V02/E4QoMWPdY7Gtv439WL6c+Uptd1unYxofbzmXpvAoACkv8lI9L0LDrDfxejbht0xu3kELDHDeDVMVs8nNzuKy6iCl5/gEXay6nk9vv/nt6wuEhx2FZaT784AMWLFyIrg/9Jy03EFABQkUZYZMnT6atrY13332XL33pS8Pe7sUXX0QIwdlnH19XTUVRFGUgaVnU/fHPhLZuA0B3OZn4pc/jrag46n1lZqQ8QyrRDYAQOuMmX3nCAoRtkQ6e3f4yu7v2EUnGiSYTeMJ5+ML56KILl+zFFgYgsDFw0k3M0YpXpphSHyWu95IMFOEyXFRNmcm8C6/G7VXd1BXlcFSQUFEU5SRxaEdjIQSGmcbs6cl0Lz4oiKa5XJjR6JDZhA6nH013YlspPP5SKqddj+7wHPcYY8kUv3nzZd5reQ+b9P6BQ3FkEnNcM1gyp/805sWXzmN541t0JU1soWGWTSdVMRtvIMCllYXMK81FO8yd3JKCAkoKhs58NE2TXZu3MKVqPA6Hak6iKCfSlVdeydtvv82TTz7J9773PUpLj/yh8dVXX+Wtt95CCMFVV111AkapKIpy5pC2Td1jj9Pz0SYANMOg+vOfw1ddfdT7ioebqd/xHFY6DoBueKiY9vERLVkzlGQ6xat7V7O69gNS6TQ94SThdC8SQUHTJPS0gV+rR9NsLDI3gSU6Gmm81CLQ0VIGptukVya4+dq7KJ84fdTHrSinA1WcSVEU5SQR3rY9m0XYF/Rz5ATRnI5+AULI1Ow7OJvwUA5XkKrpNxIsmEbVjJuPO0Bo25KVazdx/+M/4t2Wt7MBQm8ilwXdFzEjMQWzN8Hat/p3zMstKCBVOpVk6TSiC27GnrqYC6ZU8tVzJnFOWd5hA4SKopzcvvzlL+P1eolGo3zyk588YiOSd999lzvuuAOAvLw8PvvZz56IYSqKopwxEs0t9O4PEApdo/ruuwhMmXzU+wl17qR261PZAKHTnUf1rNtOSIAQYF9PPX/bu4bOUJyWzhiJhERYGkW10wlGCnDQg0PrxJYOkH0hjb5swh7ioghhG/Q4g2yuNIgG1WwSRRmuo84k/MY3vsGPfvSjETm4EIJXX311RPalKIpyKkt297Dj/z2MFYvjChzcIU5g+AMgbRD97+scnE3onzoFgURoB2r6ubwFlE+5+rjHZkubh197mg+b18P+BiG65WBqdDaFiSI0YWMndmEl9rD1Aw/nXDALfX9DAiEEV19zE3/cUs/ZJTlcVFlIwKUy/hTldFBSUsL3vvc9HnjgAd59912mTZvGF7/4RSzrQI3QlStXUldXxwsvvMCKFSuwbRshBD/60Y/w+XxjOHpFUZTTj6d8HBO+eDf7lv2BqttvJTjj6LLnpJR0Na2lrf7t7DJvsIKKqR9DN9wjPdwhTSucxOS8iaxu34InWo2eShDomoQnmoOQ4Nb3AjZSOECkEWQyCfuyCd200isXkkolCJubeXHX60wpmKDqECrKMBx1kHDLli0jcmAppfolVRRFAWINDex6+BHM7m4QAmmmEE5X9nkjMHgdwYOzCes+fBI96KJi6sf7BQpHgiY0cvIkWmumTGJZrJoJsck4ENjJPZiJ3eh6Gl/QSVwmeO/Ddzl/8YXZ7atzfdx3ziRy3So4qCinm/vvv5+2tjb+4z/+I/sVyF7jXXfdddl15f4aq9/+9rf59Kc/feIHqyiKcgYITJ3CjP/9EIbXe/QbS5tw997sw5zCGZRNvGzEry0PFjcTrG/ewuKKeWjagRvit825BmfTRHbvaUbaMYSpoUkdXevEoXUihYZGAkEaiYaFh361CbUw3lgBzp48NrXsZFdnDVMLJ47aeSjK6eKopxtLKUfkn6IoigK9mzaz679/SrypKXPzxDAy9QcPInMk1jyJzBn43incLuRMB93NG4h019C89/izs6Nxk+21Xf2WfWLmVVToZSzovoApscnoyTrMnlewE9vw+ASaz0FP2iaaX81WOzjgfV4FCBXl9PXd736XFStWMG/evMNe+82aNYvnn3+eb3zjG2M9ZEVRlFNKeOcutn3vB4R37hrwXLS2bsCyYwoQAkLTqZj2cZzuHIoqzqNs0hWjFiC0pc3apk3859s/588bVvDtx1aQSKWzz+spF2ZNAo9uI22JbjkBG7exFSGS2QAhZEKD2v7vJToCGy/70CTkd4wjmkjw4q7XVRxCUYbhqDMJv/vd77JkyZLRGIuiKMoZQ0pJ++tv0Pz8StLxBHYige5y4S4phoMuxiQSuwrIBzsN2iaJIJOhI3UJ00ALuLDiCaxkCl9O5TGPKW3ZvLW+geUb3gbLwXc/fT1BX6aGi2G6mNA8Czuxj3RiN9gJXG4H0uUibFmYeRNIVp6N9OYQcxiEUmly1LRiRTljXH311Vx99dVs3ryZN998k3379tHT04Pf76eiooKLLrqIc845Z6yHqSiKcsqRUtL0/Ap6N21Gc7uY+uDfZ7O12994i8ZnnqP0qsspufKKY5qpd+gMP8PhZcLsT6Hpo3cd1xRq4a/bVrGjrZbeSBIzbdNrbeT1tbO5+rxqAAI5bqbOLKbx3VbSegqPiOJ17MJF5/7iN31jFtg4sDHoK4vTV5tQ0zrwRgtwhnJp6O7Asi0MXfVuVZTDOerfkFmzZnHRRReNxlgURVHOCNKyaHhqOZ1r3kdKidnbC5qGMz8fO20BB+p5yTwBeTqkJeQJbJ+F6JZIBzBDB//+FU0bbZskeNHRd26TUrJpTwdPvPUR9azDdPWg2S6ef2c6d14xCwBfwEVJQQsNuzZjOHT0gIuYJUnlVpKqOhvbm4vb0FhSWcDCsjwMTfXFUpQz0axZs5g1a9ZYD0NRFOW0Ed6xM9PYzuPJNqwLTp9G57traHzmOQBaXnoF/+TJ+Ccd3XTaeKSVttq3MjUHD2pyN1oBwpgZZ9XuN3lz74d0hxOkzMw1rytRQn77ZFoLo/3WX3B+Ne9uXoenfQ8uEUaXsf3PCEDDxonEgYTsTfT9Z4AggY9GItY4CmrOJtdXgD6K06YV5XShwuiKoignUDoWo/b3jxLeuTu7zBEMYJsmdjIxcIMqd6ZhSVyCB6iykT1JmOMGjwQbMCVsSGK5Ish0GuEY/oVdfWuY5W/sZFP3OmLefYAECTmWl4rqVL91L/rYFTz5m60kZJpwzv7goC8PTYNFpXlcUFmI16EuvhRFUUbKI488wiOPPNKvGYyiKGcOKSXNK1/ANtM48vMxu7poXvkCZjhMw5NPZ9crveryow4Qhrt207j7RaRt0bBzBZUzbkTTRic8YEubDxs38szWV2np6SWRzEwN1tM+CtrPoijhJ+hysLAyr992Lo8TGWvHTQSBhcBComHjxsZBNptQSmS/LEqBjRMHXWh047UKiLbZpC2Jw1B9ERTlcFSQUFEU5QSRUrLvt8uI7KkBQDMMKm+/Bd/ECaQjkQHrx2LNNDa+gCYMtDwHtm1iB1IY0/xkuwwbXkpLL8Z5SQ6GP4A2zABhTzjJyndqeHv3VsK+rVjezJ1Zn+2lonsC+VGT9NZOmHJgm5z8fLRZFxASfmx/PgBnFQW4ZHwReW7ncfxkFEVRlMHcd9993HfffYRCIXJycsZ6OIqinGDZLEKfL9uwrnfDRqJ79qK5Mt2Giy+5iJIrrxj2PqWUdLWsp632rQPLkEg7DaMQJEylUzzy3qNsba4lFjcBEFInt2sqJeFSchwGnkDmuGvf3cfk6YU4XfvL3eiC8aVz6NhbiyYkSAMhfOhCcPBtaduWaNohwT9pACZ5jlridjHV+X4MXQUIFeVIVJBQURTlBBFCUPaxa9jz01+guV1M+Pxn8VVXA+DMze23rpSS1k1vgpDoLi9CCGQa0vEQEhunJx+3t4DK6TfhcPkHHuwwdtV38z/PrKPHvY1EsAkAhzAo7S2npEvHmd4Bdozt63ex8OLzCOT4s+O/5sIL+fWGWqpyPFxWXUx5wHO4QymKcoZJpVL09PSQSAySGT2IqqqqUR6RoijKqalfFmEguH+ZjRmJYFsWrmIXRRcuoezj1w67FqGUNq373qC7dWN2WbBwGmUTLx+1LEKn4STXHSSeyGQPeiMVjOuaSJ5w4vM79o8rTV6gA03uo3aHwZQ5iwCwLYmZcoHzLDDXg3DBcOsuCgHSgbDacLt6ScRysC2JrjIJFeWwVJBQURTlBPJVVzP+s5/GXVqCq6BgyPWivbVEemrQDU/2wk/THWiagW2lcLiCjJ95C7rhPuoxpJ3ddOatxrSTaEJQlCiktNWDN9WIsGP7j6WBR7J591bOO2dRdtsyv4cvzB1Pqc99TMWxFUU5/ezcuZP//u//5sUXX6SmpmbY3SOFEKTT6SOvqCiKcgY6NIvQTiYwO7sQmoadSOCfNIHyG68f9vWYbaVo2LWSaE9tdllhxbkUlp87otd0trSRNuj6gfrUN828kl117cjt5RTbPnweR+YGuG3icTTh1OqwkyY2sH3d20ycOR9dN9ANjU98Zj6vL19PS52Ox5czcKxSEo5ECPi9AwKIUkri0V5Kqzq55KYb0Q1VM1tRjkQFCUfZ7t27+c///E/ef/99Nm3aRHl5Ofv27RvrYSmKcgJIKenZ8BG5Z89BHNTII2fmWUfcrr3uHWw7jcPhyy4XQuD05JOKd6LrLjTdNaxxdIcS5AUPBBPLc4rIz3GR6nRR1uwlEGtHl/G+g6B5DBL544hXns1HopBzpUQ76KKrzK+yBxVFyfjFL37B/fffj2lmppANN0CoKIqiDO3QLEI7mSTZ0Zl5UohMoDCZGnZWnZmMUL/jGZKxjv270CibeDk5RTNGdNx7Ouv5zXvPkG4v41u3XY/XnckU9Gk+pjXPJeEy0YRA2kk0uxaX3ohD2HBQ05G8ojLMZALdm5nJEuluoLOlBo/Xh8M5MHwhpURooDv0wYOd0kdnSw3h7gb8wckjer6Kcjo6qiChuvA7elu2bOH5559n0aJFSCnp7u4e6yEpinIC2KZJ/WOP073+IxJNTZR97Nphb9uXRagZbqS0sNIpkDYIDcPhxeHKIdq7j2hvLf7c6iH309Yd47m39rJ9Xxf/8rlF5O8PFAbdAc5ur6Z772YMOrKXZcJlYBaOI1Z5NlZOCQC9SZO2aJJS/9FnLCqKcnp74403uPfeezPZIFLi9/tZsGABpaWluFzDu4mhKIqiDHRoFiG6ln2vNbwedJ+P8LYDnY4PJ52Ksm/zY6TNTOdgTXdRMe3j+IIVIzbeUCLCb959nnXNm7EsG41OXnzvLG6+KBOEdDh1pp5VzKa1e5DJvbiMJlxubX9QTyCEoGrKTKbPv4DcwpLsfqWUbFrzKulUEofTRdpMDTi2lCBti7RpDhozFZpGOpVk05pXKa2apGbCKMoRDDtIWFOTKbRfXFw8aoM5HV133XXccMMNANx77728+OKLYzwiRVFGmxkOs+83y4jW1gHQ9trr5J0zH3dp6RG37csitKwU0oyDEKTSEIoJgl5J0OFF052Y6Tjtde/gyxk/4GInljBZ9V4tqzc0kdA6ifr3sPytIF/42NzsOvkGRNhfM8yhYReNI1I1FysnM0YhYG5JDkuriggMctdWURTl//7f/5v9/v/8n//DQw89hNutbigoiqIcj8FqEQrDgau4iHQ4giM3FwmY0RjNK18gMG3qYQNfusOLL3c8ve1bcbiCVE6/AZcnf0TGalkWT6x7g1W73yKZTmaXu6NBuqLhTJbf/rHNnl/Kjvd+j8sLmp5pO6JpGtXTz2b6/CUEcgeW4bEti0hPF4bThZlKDngeyHQ2ttKkU4khMysNp4tIbze2ZaEb6rpWUQ5n2L8h48ePH81xnLY0TdU9UJQzSaKlhb2/+g2prh4AdJeTqk/fMawAIUCkp5bezh3YVgqBAAnptCCZdhJNagT3X2zphodIT02/bELLsnl7YxMvraklkowR9e0i7m5AFxB2bQfmZo9zyQ0f4w8Pb8LKKyBUPRc7pyx7YTU538dl1cUUeVUmkKIoQ1uzZg1CCG699Vb+7d/+bayHoyiKcloI79hJaMs2dI+nX/BPGA4ceXmZ7yGTTbj1yNmEQgjKJlyKbrgpGHcOhsM7IuN8a8dW/rxhBb2pAzPlHEkvE3tmUyBzWFhU1m/8/qCP2YsXsXvTh+iGwcSz5jN93vl4A0N3btcNg6vv/CrJeGzIdcy0yZtvvsnSpUtxGI4h13N5fSpAqCjDcMr/luzYsYNVq1axdu1a1q5dy7Zt27Asi+985zt84xvfOOL2TzzxBI888ggfffQRqVSKyZMn86lPfYoHH3wQh2PoNxlFUZRDhbbvoPb3j2IlMnc6nblBJnzh83jKxw1rezMVpXbL41jpBAINBNjoRFIGmiZIpiWJlIXbZfTLJvQGq9ha08Wzb+2lrTtK0tVKJG8HUktSltQpaUwhe+pJX5LG0DNv+/5ggNxr7mRPXMsGB0v9Li6vLqY613e4YSqKogAQj2dqmX7sYx8b45EoiqKcHqSUNP71GczeXjS3G93r4eB6fQfTXC7MaHRANqGUNql4Dy7vgWxBoemUjL9wRMZY39HJT15fTkN874GxpHUqemYwLlWK2+nATvew5uXlTJr2efxBf3a96fOW4HC6mXr2ubi9/sF2P4A3kHPYQKJpmjg8AfKKytTnd0UZAad8kPBnP/sZDz/88DFt+8ADD/Dwww9jGAaXXnopfr+f1157ja9//es899xzrFq1Co9HFehXFOXIOt5+l8blf0Xamdqt3spyJnz+czhyhr6oOVi0t566bctJxbsQ+2uz6A4vvRGNaDhET0sPuaW59DgNSp16Npsw1LWXR595jfX7DCwtTji4jZSjnaK0ZFyDhTMWRRM6RnsXvZ1hCorzsse8/KzJ7F1fQ9BlcMn4Is4qDPZrUKIoinI4lZWV7Nq1C0NlZiiKooyItr+9TveH6wCQqRRmby+OnNxB1xVCDMgmtC2Txt0vEAs1Uj3zVlzegVN4j9c7TWtoSmZKkWELikLjqY5PwOdwY4tOzPBOhNWB1++kYfdGps8/P7utL5jLnPMuG/ExKYoyck75ubCzZs3in/7pn/jjH//Itm3b+MxnPjOs7f7617/y8MMP4/f7ee+993jppZd46qmn2LVrF7Nnz2b16tV885vf7LfN7373O4QQR/z35JNPjsapKopyEpK2TeNfn6XhqeXZAGHO7JlM+uq9wwoQStuire4darc+RTLWgUSC0HG4c7HwEEuahNp6SfSmCLX1Ek+aJFIWAJruBCxy2ErMU0NX7tv4RCuzm5OM3xPDFUtjaA50oWG6/Wysbep37CKvi9vPquQr8ycyqyhHBQgVRTkqF16YyUrZuHHjGI9EURTl1CalpP3td9j3m2VI2850MNZ1DO/hpwZrLhe2maZ55QukkmFqtz5BpLsG20rRsPM5pG0d99jMtN3v8bXTLqLA5yMYKWJe2xJmpKbhoRcztBor8jYeZw95hV7cHgf1uzar5qeKcoo55W/9fvGLX+z3eLg1AL/3ve8B8NBDDzF//vzs8sLCQn76059y4YUX8pOf/IRvfvOb5Oz/oH/TTTexePHiI+67vLx8uMNXFOUUZ5smkT17so+LL7mIso9dgxjGe5GUkvrtzxAN1WNbKWwrha45cXryEEKjvTdGPBIjETLRNI1EyCQeidHjchyUTeimwNdMkbueqrY03pCFsDR0zYEmBGlXLvEp8zDLJrBFuLjQtjEOGtukPDW1WFGUY/PAAw+wbNkyfvOb3/D1r389e72kKIqiDJ9tmjQuf4a219/EiscRmobuduMqyAct0+BD5kjsiaDtBdF7UJ3CvmzCuu3see+34MwE5DTdSemESxH7tz8WiWSalR/s4J3te/jm7VeR48/UqvY7fCwILSUcTaPZrVjRdUirB7fHgTfXi6Zr+IK5TJ+/hAnT56puwopyijnlg4THorGxkQ8++ACAO++8c8DzF1xwAZWVldTX17Ny5UruuOMOAHJyctQFsKIo/eguFxO+cDe7f/wIJVddQcG5i4a9rRCCYOFUIr11pFNRhGbg9OQjNI1EMk08aRJuD2HZgpTTjzMVIdwewu3zHlSb0EXr3giV7SlicTe6MNB1DcsRJDxhLsnKSeiGjgB6EiY1PTGm5A+vBoyiKMrhzJw5k5/85Cfce++9XH311TzxxBNUVFSM9bAURVFOGWZvL/t+9wci+2oxe3uRUuLICeLMzaWvFqFEYlcB+WCnQdskM83t9hMlLuQkjUR3C+7i4gMdjI9xqrGZtnnro3qWf/QG3cYuhFNnxbsTuPOK2ZnjaYKqEg+b9z5P2grjcBr4c73ohk4wr5AZ51xA1ZRZ2Q7GiqKcWs7IIOH69esByM/PZ8KECYOus2DBAurr61m/fn02SHiiJJNJkskDLd5DoRCQKcpqmuYJHYtyeup7HanX07GR+zsM9xE+H5P+6UE0h+Oof6be3Kl4gzsxkyF0wwtCIG2bnkgym0VoGh7SmgNhuEmE4sSjUXrcDkocmaYjHrcHQyRxGQJLzyFUPptY9WScLgMNgZSSyqCbS6sKKfO7Tqv/d/VaVk4Xo/1aHq39fvnLX6awsJB77rmHqVOnct1117Fo0SIKCgqGNbvjrrvuGpVxKYqinOyi+2rZ97vfY4bC2MkkdiKBMzcXR25u/xVzgXwgvf9rLtCTeUqWSOQEEFLHiifQLQ/Vs27DcB79TBHblqzd3spT771Pk9iI5ch0FBY27ItvQ8pZ2evf+YunsuODFO6AB6fLIK+wlLMWXkj5hOnDmk2jKMrJ64wMEtbUZAqtVlVVDblOZWVlv3WPVSwWY+XKlQDs3buXWCyWrVm4cOFCxo8fP2Cb//iP/+Db3/72gOWrVq3Ce4S6FIpyNF5++eWxHsIpx9HUjGvPXiLnnQtHWazfEHGcWoiYVXLQUkmeYzduPY6VsIAYaQsSSZtIey+WLUgbLoSUpDUXlhkn0taLz++iszuJoUOw0EVXt4NI2WT25c1Hc2qIRIx4AjykmWBHyO1Ksn4frB/JH8ZJRL2WldPFaL2WY7HYqOwXIBKJEAgE6Ozs5Mknnxx2bWYhhAoSKopyxur+cC1mKIyUEjseR3O50L1e7FQqu44EKNdBCEhIcAvscgltFkzQoHL/TWsBsimJ3RxDX3J0nxellGyt6eLp1ZupMdeTdLZlnrAFhdESykPjmV9Y2O8GuT8nwIKLltLV1shZCy6ktGqymlasKKeJMzJIGA6HAfD5hr7D4vdnpuP1ZfEdq7a2Nm655ZZ+y/oe//a3v+Vzn/vcgG3+5V/+hX/4h3/IPg6FQlRWVnLllVcSDAaPazyKApmMkpdffpkrrrgCh8Mx1sM5JUgp6Xj9DVp3ZuoPju/spuqznxl27cHe9k101L+DlBZlk2fgz81kMdt2mpoNjaQSfVk+klDMxErESYRM0oYHNIFmm2hmEtuWxHtMrEQM4fHj8zgAQXzmBbTJcvK8uQgEXofOhRX5nF18encsVq9l5XQx2q/l472eGcq9997LL3/5y+xjVaBeURRleMbdcB3xxibQNeL1DaQ6O7ET8f4rFeiQ74GUDTaQkpAvoNiEchfsb5pHrYnYlcYs7kKm04hh/h3pCiX4/Qub2drzETFvDdJpg4TcRD7jeirJTfVC6j12vi9ZcO4scgsPlN46+/zL1ZRiRTkNnZFBwhOpurr6qC+YXS4XLpdrwHKHw6E+BCsjSr2mhsdOp2l4ajld73+YvUvq8PkwdB3tCNmElhmnqeZlIt2ZrGQhBOH2zeQWTtm/LweT5t6Flc5cFG7e086jb26mSr6N3zRIuZwYiSi6nQkiSgRpS7Jlt0mLZw5fumEOE8tzKEla/HprN7quc155AeeW5+PSz5zpHuq1rJwuRuu1PBr7fOKJJ/jFL34BZN7bLr/8ci644AJKS0sHvY5RFEU5k0nb7ndzWXM4mPDFu9HdbsxwmHQk0n99KWlqfIlotB7D8CFEpoRMOh3Fd+UMfP7xdHd9REHhOQQvmAKA4Q+gHcX7fSjdxUbzJZLeKAB+M0BZZxn58Sh6egPIzPWny+2gduc6cgsvOTB+FSBUlNPSGRkkDAQCAESj0SHXiex/k1aZe4pyZktHo+z73e+J7DlQeqDs2qsovuzSI06riIYaaNr9EunUgYu+/NK5FFUt6betwxXA4Qpg2zY/e3YD6VQrHiuJpek4UxHEQTcapKZj4cKZSBPTWln5QS//OGUyJT7BTdN9VAS9BJxn5Fu7oign2H//938D4PV6WbFiBRdddNEYj0hRFOXkFKurp+6xv1D92c/gLjlQdsbYP7PNmZu7v1nJAZGefSRSrRguH7px0I0XHRKpVkrLL6dg4vyjalAST6bxuA5cJxb788jNMejpclPWWURhKIHT2gZYAJmmJAEXE8+aQ/W0mUd/4oqinHLOnDSTg1RXVwNQX18/5Dp9z/WtqyjKmSfZ3s6uHz+SDRBqhkH1XZ+i5PLLDhsglNKmvf5d6rY+lQ0Q6oabimnXUVJ9EZo2eBBvZ10PSTPNJH0Xukyj2SlEphoNUgjSDg9JVxDT5QUdyu3dbNnbwY7abgBmFAZVgFBRlBNm27ZtCCH4yle+ogKEw5BOp5kzZw5CCB577LGxHo6iKCdI14dr2f2Tn5FoaaPmN8tIx+JH3EZKSXvdO9h2Gk13Ylsp0mZmO013YttpOurfwenJH9YYwrEUT/9tN//2q3fp7D1wfK/Tw8LU2czc66asey9OqxGw0A2N3Hwf884/n+s+dz+Lr/wEOQXFx3T+iqKcWs7IT5Pz5s0DoLOzk5qamkE7HH/44YcAzJ8//4SO7WCPPPIIjzzyCJZljdkYFOVMFdm9h32/+332Qs4R8FP9+c/hGz90wyMAMxmicfeLxMPN2WXeYAXjJl+Fw+nvt65lSxLJND6PAyklz7+9lzLjHXJjUUBkilRLsHQnaacXeVBg0jJcBFIJnO69PP92IdPG56mC0YqinFCp/cX1Fy5cOMYjOTU8/PDDtLe3j/UwFEU5QaRl0fTcCtrfXJ1dZvh9SCt9xG2jvbVEemrQDTeWGcvedBZCQzdc6IaHSE8N0d5a/LnVQ+4nkUrz+toG/raunjBNRL17eeZtP5+/dl52nTwrTpedaVaiaQJ/0MtZCxYzY/75eAM5Q+1aUZTT1BmZSVhRUZG9oP3Tn/404PnVq1dTX1+Py+Xi2muvPdHDy7rvvvvYunUrH3zwwZiNQVHORLG6evb8/JfZAKG7tIQpf/93RwwQArTUvH5QgFBQVHk+VTNuGhAgbGgL86M/r2PZyq1IKdlR282WvR0UproRkAkQCo2Uy4/p8vULEALYmoGwbcqtXf2yCRVFUU6UqqrMe2IikRjjkZz8Ghoa+Pa3v80PfvCDsR6KoignQDoSYc/Pf9UvQFh4/mImfeUeHPtLXw0lm0VomZipKOlUBFtKUmmbVDLTqb4vm7C97p1B69+baZs31jXw3d+8x4oPttDmeZ/e4EdYRoRme3O/bZZ+7EoMhwt/0M/iy6/klq/8M+dcdI0KECrKGeqMzCQE+Nd//Vduuukmvv/973PNNddkMwY7Ozv56le/CsDXvvY1cnLUm6OinGk8FeXkzDyLno2bCU6fxvi7PoXudg9r29IJF7N3YxO64aJ88tV4AmX9njfTFi++W8vf1tYjpcTSEry/pZi3PmrEKfbitNLYwsByODOdjQ/NDpQSIUFqIpNNmIzj9KpsQkVRTrzrr7+eLVu28Oabb3LXXXeN9XBOag888ADXX389S5cuHeuhKIoyymINjez73TJSXT0ACF2j4uabKDjv3GFtH+mpJdS5E8tKIjK3jrEsSSxlgOXE45UIIQbNJrRtydrtrbzw7j46wxFi3r3E8mpBSEpiTkqb07hyW7CljS4yjUe8Ph83fO4L5JeU4XR5RvznoSjKqeWUDxKuW7cuG9QD2LNnDwA///nPef7557PLly9fTlnZgQ/rN954I/fffz8//vGPWbx4MZdddhk+n49XX32Vnp4elixZwne+850TdyKKopw0hKZRdefteKveoeiiCxGH6d4mpY0QB5KyHa4gldOvx+XJRzf6BxZ31/fwl1d20NETR5LGcmzC0BrZ15vPlr1dTLB3IRAk3QHQDkn0lvRrYCKkxDKc6Onk/mzCieyo7WZ69fBq0yiKohyv+++/n1/96lc8+uijfO1rX2Pu3LknfAw7duxg1apVrF27lrVr17Jt2zYsy+I73/kO3/jGN464/RNPPMEjjzzCRx99RCqVYvLkyXzqU5/iwQcfHLGO0C+++CKrVq1ix44dJJPJEdmnoignp+5166n/yxPYZmZKsSPgp/ruu/ANs869mYxQu+Vx0uk4Ag0E2Gg0d1p0NrWTW5ZHrt+F22Wg6U7MdJz2unfw5YynK5TgV89sprkzQtLVSiRvJ7aIUxzXKWuxcSZ60DUdvTNNV2sPRWUHGp6UVk0cjR+HoiinoFM+SBgKhXjvvfcGLG9oaKChoSH7eLCLsocffpglS5bwyCOP8M4772CaJpMmTeKhhx7iwQcfxOl0jurYFUU5OZihMGaoF29FRXaZ5nRSfOnFQ24jpaSndSPdrRsZP/M2dOPA+4U3MK7furGEyXOr97JmU2YacspowWd+QHFHHN108K7+KgYBAsk4luEaECAUUsKhM0kkcFA2ocO9R2UTKopyQpWUlLB8+XJuvPFGrrjiCn7yk59w6623ntD3oJ/97Gc8/PDDx7TtAw88wMMPP4xhGFx66aX4/X5ee+01vv71r/Pcc8+xatUqPJ7jy6pJJBJ87Wtf41vf+hZlZWXs27fvuPanKMrJK9neTt2fHkPamYs23/hKqj93F45hzkwLde6icecKUvEuBCKTLejw0BMWdLV0EO81EVoP3bkBypy+AdmEOYEqIlYvPTkbMI1OihKScW0WjlgsExzUMx/9DZeHnq7OfkFCRVGUPqd8kPDiiy8etA7DcN16663ceuutIziikaMalyjK6Is3NVHzq99ip9NM+fu/w1Vw5Ew8K52gec/LhLv3AtCy7zXGTbpq0A/GG3e389RruwlFk1haAltfR3lPE56EBrYDhKCgVZAndyMAy3AipERCZoLJYd7e+mUTyt109MwmbUkchgoSKooy+j7/+c8DMHv2bP72t79x55138sADD7BgwQIKCgrQDs2IPoQQgl//+tfHNYZZs2bxT//0T8ybN4/58+fzve99jz/84Q9H3O6vf/0rDz/8MH6/nzfeeCNbdqajo4NLL72U1atX881vfpP//M//zG7zu9/9jrvvvvuI+37iiSf45Cc/CcD3vvc9nE4n999//zGeoaIopwpXURFl115D0/MryV+0gIpP3IQ2zIxkKW06m9aRjHchkQhh4HDnYFo63d3tJEMmmiZIhkx6usPkBdw4DK1fNmHlzArM4o/IqW9lXLOFM2ahoaHrDgRgOIOcteBCzrv8QgzHKR8GUBRllKh3h5PYfffdx3333UcoFFK1ERVlFIS2baf2949iJTMdOhufWs7EL3/hsNvEQg007n4p22UOQNfdcCCsl9XQFua3z21BIkk6a8iNb6Kww0Sz9Mz0EaeTnrI52N4gRdv27c8iFJm6g307OVK8b382YU4yztIFKRzGGdmPSlGUMfC73/0ue3Ok72tbWxsrV64c9j6ON0j4xS9+sd/jIwUm+3zve98D4KGHHsoGCAEKCwv56U9/yoUXXshPfvITvvnNb2avwW666SYWL158xH2Xl5cDUFtby//3//1//PGPfyQajQKZGTAAsViM3t5edX2nKKeZoksuwl1WSmD6tKPKqhZCI7dkJt0t69F1F05PHgJBW0+U3rYepA2GQydtWvS29dDq96JpGqUF3mw2YSLcwFl1aTobkwipoQsDTQg0I5cpc8/nwisvwOVRM+UURTk8FSRUFOWM1P7W2zQ982x2Soi3qpLK24fOKpbSpqPxfToa3qcvvU833JRNvIJA/uB1XCqKA0yf6mJTzQtUd3TjTGkgdaShkwiW0FN1Lu6CArwfPQMCpK6jWeZRnYcgs50wJRtXv8JFCy8d9odkRVGU43U8sznGqjRCY2MjH3zwAQB33nnngOcvuOACKisrqa+vZ+XKldxxxx0A5OTkHFVQr6amhmQymc0qPNgXvvAF7r//fiKRyCBbKopyKuj5aCNmKEzRhUuyy4QQBGdMP+K2tmWSNqM43bnA/jI2LRvRDTcOVw5CaCSSaXq6wyRDJrquZaYX6xrJkEkkHMEX9NITSVCY481mE44rnUlvYyuapiGMfKqnn8uF1ywhEBxeAz5FURQVJFQU5YwiLYvGZ56jY/U72WW5Z8+m6s7bh5wSYiZDNO1+iVi4KbvMGyxn3KSrcbj82WXd4QS5flf2g28qmSBV+zITW3oQtobUNCy3m55xZ0PFDAI+J5pt4YyEkJqBZpuHnV48JAG24UD2JEmlkrjdqjOdoiijr6amZqyHcEzWr18PQH5+PhMmTBh0nQULFlBfX8/69euzQcKjNXfuXP72t7/1W9bS0sIdd9zBN7/5Ta644ooht00mk/3qafdlIJqmiWke3c0kRRlM3+tIvZ6OnrRt2la9QvtrryM0gZGfh3/qlGFvn4i20lLzCkhJ1Vm3oekOor11hLtr0BweEAJp23SHE9ksQuEQmTkrmkCaEGrvJZWKoRUHQXrQDDfh7hrOXnQTtbv2kFM0gyVXLKCwJACc3v/P6rWsnC5G+7U83P2qIKGiKKeN8M5dNDz5NBWfvJnAIBdrViJB7e//SGj7juyykssvpfTqKxFDZN+Fu/bQvOdlLKvvw5qgqHIxBeMWZLsaW7bkjXX1vPDOPm65bCqLZpYCoGk6rh6NNBq20yCWU05swrl48nPRNI3phX4uKi/k0S1XsC9HotkS7RiycmwhsDXBhJCGw3Ad9faKoijHYvz48WM9hGPSF9ysqqoacp3Kysp+6x6L3NxcLr744n7L+hqXnHXWWVx44YVDbvsf//EffPvb3x6wfNWqVXi93mMek6Ic6uWXXx7rIZxShGniXbseo7U9u6xt+XLis2cNY2uJT2/FZ7Rkl9Q1/5ZIupw8x07cegRLOoEYaQt6u2Mkwya6LhD7C1X3XSXGO1OYEQvd1PA5NHSho4sU2z56ntyJU9GNOO+vfWsEz/zkp17LyulitF7LsVhsWOupIKGiKKcFKSVNz6+gd9NmNLeLqQ/+fb+pbMnOLmp+/RsSLW0ACF2j8pZPkL9o4WH3ayZD2QChwxlg3JSr+3UvbmyP8NiqHTS0hbG0BE+sXseMCZcT8DoxHA4mXvpx1v7tKcLl89ErpuBzGowLuLl8QjFVQS9SSjwLJkJXGK9DP+bzj5gW7ikBNF01LVEURTmccDgMgM/nG3Idvz+TJd6XwXei/cu//Av/8A//kH0cCoWorKzkyiuvJBgMjsmYlNOLaZq8/PLLXHHFFTiG2VzjTJdobaV+2aMkEynIyUFogtKPX0v+kvOPWD4hFe+mpeYVkrE4kClb4PIWUzrhcgxXgJoNjaQSfVk+kt5IknB7GGyJ5tAAiZW2sdISW0qQYJs20a4E7ikFOB0G4CTo8XPu2VehaWfOx3z1WlZOF6P9Wh7uNc2Z8+5xClLdjRVl+MI7dhLeug3d4yG8dRvhHTsJTp+WfT5WV5cNEBpeD9V3fxb/pMFrCR4sr3Qu0d46hGZQNvEydCNT08VMW7y0ppbXPqzHljZxZx2B+G70YADEJdntl5wzm/fiOm6HQY7bwSXjizmrMIC2/2LSkpJQ2sLtMEgfx/m7HQZhy8KSEmOM6nwpiqIoh1ddXT2sOo4ulwuXa2BmuMPhUB+ClRGlXlPD07tpM3V/egwrmUIIgeHzMv4znxp05srBpLTpbtlIW91qpLT2BxMFhRWLKBy3EKFlbhBPmnsXVjoOwN7GXlY8u5KS3ma0tJtU2kbIvgCioK+rnWkJwsmZRPUlzFqQyYzWDS8O15lZdka9lpXTxWi9loe7TxUkPImp7saKMjxSSppXvoBtpnHk52N2ddG88gUC06Zm7+zmzZtLsrWN7vUbmPjFu3EVFQ26n0S0FY+/NLtMCEH51I8hhJ7d1+6GHh5/ZSft3TFMI0Ra20J1Ww9uM4EZtVhT/z5XTL0IAI9D55oZFSTSNovG5WEcMq3Z0DTunT+BqHn8NwN8Dn3A/hVFUZT+AoFMja6+jsOD6WsoorL2FOXMJm2b1pdfoeWlV7LLPOVlVH/us7gK8g+7rZkM07znZaKh+uwypzuXcZOvxuMv6beuwxXA4QogpWTF+/twJnajpUHIFCCR8sANYIkDKR0ILUnU2MP7jeex1Fs0Zs2gFEU5vaggoaIop7xsFqHPl+n85vMNmk1YctUVFF28FN09sMOblU7QvPcVwl17qJpxE76cA7Wq+qZsxBImz63ey5pNzdgiTcy9h+LuZgoivaCB5XKgCRNXkwOmHtj3/NK8w44/6HIQdKk7n4qinJzefPPN7PdLly4ddPmxOnh/J0p1dTUA9fX1Q67T91zfuoqinJmann2e9jdXZx/nzTubyttuQXM6D7udbZnUbP4zlhk/sG3p2RRXLkHT+1/z9UaSPPfWXi5dWEkokqKmfi2TEnGElICA/QHCTHDQiRQCNEA6CKZi7Klfy47ayUyvPnzQUlEUZThUkFBRlFNavyzCQCbjQ3M6SXR0UPuHPzLru9/O3lkVQgwaIIyFGmnc/SLpVCZzpGn3S0ya+7kBF3EvvruPNZuaSTra0M29TG7sxSHj2E4dqWtYzgDJqecRrTjyNGZFUZRTxcUXX4wQAiEE6XR6wPJjdej+TpR58+YB0NnZSU1NzaAdjj/88EMA5s+ff0LHdihVekZRxlbB4kV0vfc+tmlSdu01FF1y0bDe9zTdQUHZfNrq3sZw+hk36Yp+N6Ah0/hu9YZGXnh3H8lUms7eOCkzTaG1Fd3SsDHQSAE6tnQhhYbUJGTbl+jolkahtZXnVu9m2viFKptQUZTjpoKEiqKc0g7NIsS2SHV0Im2b8PYdtL32N0ouu3TQbaW06Wz8gPaG9+i74NJ1F6UTLh0QIAQ4b14Br+xcRXFzN8FUB9KhY+kOEIJk2Qzk5AWU5/o5q0hNT1MU5fQyVB294dTXO9lUVFSwcOFCPvjgA/70pz/xv//3/+73/OrVq6mvr8flcnHttdeO0SgzVOkZRRlb7tJSqj59B0I3+s1OGYyUsl+QLr9sPtK2ySudk61p3aemqZcnX9tFU3sEiY2tb2Vzj4nR6WZyIg7SjS10kBoSA7SDexvvP54AIZ3kJOIqm1BRlBGjgoSKopyyDs0iNHt6SIfDSNsGTQPbpv2Ntyi+9JIBd1bNZISm3S8SCzdml3kD5YybfFW2JkxHT4KivAPFn7e9v52JdQ0IPYnldoAA25OLOf1CgiXjuGR8ETOLgtmmJIqiKKeDb33rWwAD3kf7lp+K/vVf/5WbbrqJ73//+1xzzTXZjMHOzk6++tWvAvC1r31NBeYU5QwipaTr/Q/Jmz8X7aAC/zkzZx52O9tK0Vr7Jrrhpbjq/OxyITQKKxb1WzcaN3l+9V7WbG4GwNRb8JkfUtgbJarnYGthdEtDkmloYgsdGPpmjFTZhIqijDAVJDyJqSkminJ4B7IIvViRCKYjinFZHun1PWi9GnpuLommpgG1CcNde2je8zKWldy/RFBYcS6F5QsRQqO9O87jr+6kqT3CQ59dSMCbqTvjL8wBt4WNDkLDrDobY9I8LqkqZtG4PBy6ahqiKMrp51vf+hZ33303Qgg2bNjA3Llzs8vH2rp167JBPYA9e/YA8POf/5znn38+u3z58uWUlZVlH994443cf//9/PjHP2bx4sVcdtll+Hw+Xn31VXp6eliyZAnf+c53TtyJKIoypqxkkvo//4WejZuJ1dVRecsnhrVdLNRI055VmMkQIPDnVeMNjBuwnm1L3tvSwvOr9xJLmFgihqZ9QHlvK66UjrCc5MgwaEmQbuQw43wqm1BRlJGmgoQnMTXFRFGG1pdFaCUS2NEYWBb60iBauRsjnYOxy4PQ9AGdjrua19Nae6DYvuH0Uz75arzBcixb8sbaOl54Zx8pO0XcVcsTrwX4/MfnAHD22TN4bd0MtEg76RkXMm/yBJZWFuJzqrdSRVFOb8uWLUMIwY033pgNEp4MQqEQ77333oDlDQ0NNDQ0ZB8nk8kB6zz88MMsWbKERx55hHfeeQfTNJk0aRIPPfQQDz74IM4jNCY4EdQNY0UZfcmOTmp+8zsSLa0AdK15j4LzFuOtKB9yG9tO01G/hs7mtdllmmZk61sf6i+v7OT9Lc1ILEznNgrCO8mJ2mhWJmNRSg2JE8NOZ25GH4VsNmFaZRMqinL81CdbRVFOSb2bN9P9wVps00RoGqLYiVbmRqYk2jg3oktH9AzsdOzPraa9/l1s2ySQN4mySZejG24a2yM8tmoHDW1hUnobzkQn43p6EWfXApkgoUvXuPyaj7O7J85lE0oo8rrG9oegKIpyhrv44ouPqy7irbfeyq233jqCIxpZ6oaxooyu8I6d1P7hj6RjmS7EutvF+E/fedgAYSLaTtOel0jGOrPLPIEyxk26Cqd78N/TRWeVsHrnOtzmOipa4+iWDlJHINCcZfSkc/HLtUjLOewswj7ZbMJknPauTaStBTgMFSRUFOXYqCChoiinHCklratewTZN2H+nVJ8ZQBg6xCV4BHYlaD0SzeXCjEaz2YROTx6lEy7FtlPkFs8mbdm8sHovr31YT5ootmwivyNBQG9EaGma1m0mvvAKPI5MwelzxhWyoFxdeCmKoiiKopyqpJS0/+0Nmle+gLQzNxrcxUVUf/6zuIuLh9jGprNpLR0Na5DSBjJ1B4sqzyO/bD5CaNl9x5NpvO4DdQ1Dvduo7F6DKyFBZjIFNd2HL+8cFly4kNff/W9Es8RGIDj6zGGJwLAEpWI7qvqNoijHQwUJFUU55YR37CS8bTuOvFyscBijOg8xzg0WoJP5mg8yHwiAbvbPJswpmg7A7oYeHn9lJ23dEdKiGW8oQVDrwGV0wv44YG4iiVMcmPahpm8oiqIoiqKcuuxUivq/PEH3+o+yy3JmzqDqU3egu92DbpM2YzTseJ54pDm7zOUtZNykK3H7irLLWrtiPPXaLpKmxd/fNg9Ny1w3ptstPAkNW9poQsfhncKccy9lwZKJCM3itVUJdBxoIn1sJyUAHNihJKaZwuUa/DwURVGORAUJFUU56aVjcVpefIn8BfPxVFYe6Gicn4/h92NPE6CBTErSlo0hNYRTIOcASRCaC+vDaL/ahCnTYtmKrfTG2tHineRiEtBq0aQJAjQhEMXjOeuij6Hp6q1SURRFURTlZBfeuYuGJ5+m4pM3E5g6ZcDzZjjM3l/8injjgWBf6VWXU3LF5Qht6BQ8XXch7b4AnqBg3DkUVpyLpmWuEVOmxcvv1/G3D+tJ2xYpRydrNpdw/pzMtOVzlp7PR++/SzyaZMLMS7jwivnk5HkAkNKgrOxm6ve04fYY2RvVR0VCIp6msqwEp1OVw1EU5dipT76Kopy0pJR0r11H83MrMMMRYvtqKb36yv0djX0IIZC5QD5gQiMFbPDNYr69mXFaVyarMA2yEPTc/tmEmoACbwdmZwSf3o471QFkrss0t5fKhZdx9XnnEXA5hh6goiiKoiiKclKQUtL0/Ap6N21Gc7uY+uDfD5gBYni92WxB3eWk6s7byZk964j7FprOuMlX0bhrJaUTL+vXwXjzng6efn033aEEphHCdmwkNxynR1YDmSChpml87M67kdJgXGVuv33bliQZd+HyFCIBjrHMqssDqYQT25LoqiahoijHSAUJFUU5KSVaWmh4ajmRPTXZZfHWVhqe/msmizAQRCKxKwENrKRks2s6rXoRW4xplPIOmhAgQHwEEgdWKpLNJmzpiNLT00O+vRWxv2ukrmvkVE3n6mtvpDw/b4zOXFEURVEyVHdjRRm+8I6dmRvJHk+/G8MHE7pO9Wc/Q+2jf6L8xutxl5YOuq9Q5y6c7tx+U4ld3gImzPlUtvZgZ2+c5a/vYcveDmyRIu7ZQUlnE3nRGALYtPktrpw9D0PLlK0pqygc9Fi6ofHJu84hETOP+2fg8TrQDVWUUFGUY6eChCcxdWGonImsZJLWl1+iCHJ6AAB30UlEQVSl/fU3kbadXZ47ZxbBmWex9+e/PCiLUGayCNPQ7CwknJBU1b1AT9XZtAYKKTM7QYOEnqa7K4lHOND3XzSWTp2Cx20heiyEELj9QRZfdh0LZp89dievKIpykvvGN77Bj370oxHZlxCCV199dUT2dbpS3Y0VZXiklP3K0ZhdXTSvfAHfxAmkQ2FchQXZdQ2/n0n3fnnQ/VjpBC37XifUsQOXt4DqWbdnpxRDplGJmbZ5fW09L79fRyqdJuGsxxffy9T6ELq09q8nyG+S2QDhkfgDLvwBNU1YUZSxp4KEJzF1YaicSaSU9G7aTNNfnyXV05td7irIp/zmGwlMn8bO/3oYK5HE4fZgpVJQroMusA3YxhSCe7fi7W3GbnSwcdJZFMdXIzwCq8KmvaWQgngtLjNzETl12lQuu/Ja3l7ezMRJU7jyio/jcnvG8CegKIpy8tuyZcuI7EdKqRpBKYoyYrJZhPtvJOs+H6HNW9n+vR8gDIOpD9yP4fcddh/R3jqa9rxMOhUBIBnrJNy5i5yiGf3Wa+mMsvLdfaT0LqS+k6qWHjxWDACBQNMcVE1bzOU3Xjs6J6soijKKVJBQUZSTQvOKF2h77fXsY83QKb70EoovuwTN4cA2TVIdnehuF3YiDgU65HvAtGl1FdMTgpJQC7Zu4OltoSU6g0a9iOJUF83jJrDj/CqqN6YoDHWQ6uhEptPMryrmrHsexOtRwUFFUZThkPIYi2UpiqKMkn5ZhIFgdrnZ20t4VxJXcTH1f3mcCV+4e9Dtbcukre5tulsPdDvWdCel1ZcQLJw2YP1xRV6CZTXIHbXkJroR+4sIakIjt3Ail9/8ScrHl43wWSqKopwYKkioKMpJIXfuHNpffwNpSwLTplBx8424ig7UgdEcDib/49cwwz3oupemxpeIRutJWy42paYQaNqGsC0shwfdjBNs3sa6yfNwySSyrZnC1ncI5wuiN1zDOfMvR3NkGpKoAKGiKMrwffe732XJkiVjPQxFUZSs/lmEkI5EMHt6QAjsRALd5aT0mqsG3TYeaaVp94ukEj3ZZb5gJWWTrsDhCmDZknXbWjhnegmalsl+7mjuIm/zDmRf9qAQuNw5nHf59cw975zRPl1FUZRRpYKEiqKMCSuZRHcdqL3iraig5MorcJcUkzNndr9paFY6QVfLBrqa1+MNlJNXOodEqhXD5WVHt4eeMJT2tmDrjswFoe7EE24l1NmFv3MPjmQYBLhcGkGcOHPV9H1FUZRjMWvWLC666KKxHoaiKApwSBah309KhGCBA/GRA9mWAiFw5ufjLuuf2Sdti46mD+hoeJ++dsJC6BSPv4C8kjkIobGvOcQTr+6kqT1CPJlm6bwKAIrKCsjJLaK3sw7DMJg653wuvu7jOF3OE336iqIoI04FCRVFOaFSPb00PfscybY2pj749wj9QEHn0isv77eulU7S1bKerub12FYKgHD3XhKxDmw7jWm52GlXEWzejJAWtuZEomFrDpyJXspq3sZ0+tE0G5+WYurU8cxfevUJPV9FURRFOVaqiZ2iHF54x05CW7chNEGypRVtaQ5amQvSEj1horlcRHbuHNDpOBnv6hcgdPuKGTf5KlyefKJxk+dX72LN5mYsESNl7OOpD9pZNPMm3E4DTdO47KZbWLPqOS676WYKSgbvkKwoinIqUkFCRVFOCGlZtL/1Nq0vrcJKZgJ+HavfoeiiCwesmwkObtgfHEwe9IzAFyynt2M7muFmT7eTSNSiNNSCpTuxNQNhmThTMYSUCMtEs9P4fDpzZ5XidpskIo34c6tPzEkriqIoynFQTewUZWh9WYTpUBgpJVqJC63EiTRt9DI32ngvdIMZjdG88gUC06ZmZ6q4fUUUli+ko/EDCivOpXDcAiQaazY389xbe4kmEqT0OoI9YSoTzcQnRhCanT121cRKqu796liduqIoyqhRQUJFUUZdtGYfDU8+Tby5JbvM8HkxAv5+61npFN0tG+hqXod1SHAwt2gG+eMWsnvzs8QSKcIpyU572kFZhC6MZBR9f8ZhZiuJ5XDTMvFchGMPth2lve4dfDnjVVdNRVEURVGUU1hfLUIj4CcdjqCd5QNDQ0sK8AjsStB6QPf5CO/ZTmj7dnJmHOhUXFi+iED+ZNy+IhrbIzz56i5qmntIa+04kl1UhLrw0AqAv86iqaeZCYXjx+hsFUVRTgwVJFQUZdSkIxGanl9J1/sfZpcJAQXnLab02qsxvN5+6zfuep5ob/1BSwQ5RTMoLF9EYxc89fJayrXdpG1okd5sFqGtO9CEQMi+6VgSqRtYugNHMkJnzGaP7mRmnkWkp4Zob63KJlQURVEURTnFWMkkdjKFEfAfqEWYnwd5Gto4D1gCBGAC+SBzQRguZKVG/btPE5z+r9kbxULTcXkLeebNPbyxrgGTMCLdTGFPjICsR8MEwDB0yirKKDT8Q45LURTldKGChIqijDhp23S99z7NK14gHYtnl3sryqn45E14q6oG3S6/dN7+IGEmOFgwbgH72iRPP1fDnoZuziv/kLRPsIkp9Fhegi1bEdJCak5AIg0nmHEshwepO0FKNDNOsGUbO/0zmZTaiSFSKptQURRFURTlFCKlpGfdeppXrMRTXk7BkvOzHY0RAm1mAHSQKUnasjGkhnAI5EzABmHrJIxOOjd/QOHsRdn9CiFImEnS1j4CoRQ5Zh1OQgBomiCYE2Dxpdcwbd5iNE0bo7NXFEU5cVSQ8CSmilUrp6pkRwcNTy1H2pli0LrHTdk1V1Fw/nkITcO2UnS1fIQ3WI43MC67nS+3msKKcwnkT2NXk8Vjy/dS3xrG1EIYdgcdjkI2mjNwtNXhb12HYSaQhgM0gQCk7iCtG5l0RQAhkIYDb6iF1ugM6nQHUwJOUslepLQQQr0FKoqiDJeUcqyHoCjKGShWV0fj8meJ1tYBkOzuJVbfkMkiDAQhF8gHTEjZgpTuwaWlcOqAF4gDaMj6OB2tqymYtTB7o9hMWTRv2U3x/9/encfXUd33/3/Ncjdd7ZItS5Z3vOAFbIPBYAMGggOEhKWUZg/N0rSQbxJo+0hCQ9IEmqZJ+v2GtE5/oWkS2oQ0gQBpErOVJWAWY2NjMDaWd8uWrF26i3S3mfP7Q/a1hTfZli1Zej8fDz3QnTkzc+bqcP25nzlLexMFpgELg2VZRAoCzJgznwWXX0tBkeYDFZGRQ9+QhzBNVi1nqvDo0Yy69BKan3+BsvPmUfP+6wgUF+F7Gdr2rOudczCXoqC4lgkz/yR/nGVZjKpdyKPPb+GFtbvJOp0Y00zQG01u1AQaGrZS3PESGA833Y3t50i7BeC/q0dgn++xQVyvm8I9m3jeO5+pk8czeXIttq2PPxGR/tq+fTsAo0ePHuSaiMhIke3qonH5E7Sver3P9kh1NfGN+3sRgj8OsMFPG7p6oLWljbETIgRwepOBLlgbwNpj0ZzYQPPvVnDpB3oXzgsEHUY5TWD2YIBgyKVmbA0Xvuf91EycdvpvWkRkkOlbsoicFGMMXW+tp3jm2djugY+UqvdeRfGsmRROmdybHGxYTVvD63i5VL5Md2wP6Z4OQpGyPuccNx5Sb24knCrHq5xEJLWLkvqV2H4OAMvPYftZPDsIWO9KCr6bhW8HKYg34ozu4cFXd/KtGdMH8B0QERn+JkzQZP2DQaNKZCTys1la/vgizc88i5c+sCBduGo01R+4jr2PP4HxfexQqE8vwpwDHR0JErEMe/dA4fRCnBxgIJvyaE/kIOPTvvx/6FiygLLiMABXvP86HvvZFgoLg8y96HLOPv8S3EBgMG5dRGTQKUkoIics1dTE7t88RmLLVqrfdzVVV16R3+eEQhRMHHfY5CBYlFROp3D0fF7ZkKCixGPOWZVAb9Jx/cpmQqFJRAKNlDSuw/azWJZF0HVIZXIY28VzguTcaL/q6dsubi5DefPbNIy6kA3bWpk1ZdRAvhUiIiIDTqNKZKSJb6qj/qHfkGnvyG9zImHGXP1eKi9eSHzzloPmIjyoF6Fn6M56JGJZbMciEcvS0WYoDxkIW6TH+uxtqqa9dCxjEm+y8aXXuPiaSwGoHl/N+z74MUorqyguV3woIiObkoQicty8dJrm/32G5udfwHg+AE1P/S/lFywgUFSEMT7tjWv3JQd7+hxbXDmdaOV8Vr7TzQtP15FMZRhVEWTW5Aps28KyLFqLbaq3r8D20liWRSjgUloQYlemmI6qWmp2vohvhw7MPXgsVm9vwmi8kfDoBD9/YQ3fmrxUC5eIiIiIDCHG9/MJQsu2qLj4Isa89yrcaBRjDI3LH8dLpQmEI/jRHJQ5kDFkMhZNjT34viEQsMlkDE17U0Rrw9gZC6c6QnpMFCdVR3vIZtQryzFXX5KPBcdPmz2Yty0iMmQoSSgi/WaMIfb2BvY8+lsyHZ357cHyMmpvuoFAUdG+LRaJju19EoTFFdOJVMxjxdsJXn6yjlQmSybYQqpwJ10Zix2Ns5g8thSA6y+YyW82PU8wnaMkEuTs2fM569yL+drjbzGmZSWW7+EFQnAcOT7fcXGzGcpb3mbvqAtJZ3KEQxpKIiIiIjJUFJ89g+IZ0zG+R8317ydSXZ3fZ3I50l2tOHOK8IM+FFpg2ZiMT1ePS7wrh23bGGPj2D49yTRd6SJClk2sNY7tdGBbAVyypDMGk8thaVixiEgfShKKSL+k29rZ8+hviW3YmN9muw6jLl/CqCsuxQmG89sty6Ky9kJ2bXyE4oppuCVzWbE+wauPbyLreaSDzfQU7SAai1CZnEIwUo8f7qR3YhmYOqqYRZdfjdW0hXMXXk5RaQXJnhSRXCfRWCO+EwLLPr4bsCx8J0Q01kikrBPP9wAFhiIiIiKnm/E8Wle8THL7diZ84mN9RndM+MRHsYPBPtsyqU7aGl7HXVqJ45Xg+1l8P4NtB2k2KVp3t+H7Bjdgg2VhOTZe1qOpoRNDbw9Fx4Wi8gBVoycx/5JrsJUgFBE5hJKEIiNQYvMWiv73ORJTp1E28+xjlm9+9nn2PvEUfi6X31Y0bSo1N15Hj7+Xbev/i5opSyksnZjfHy0Zx+RzP8FjK5p4bUMdnu+TDu2lu3AHBYkQVS2TcKI9FKXXEWhP0rx9N2dVHDj+knlzgbn51wXhEAvt3bRYHm44DNZRVys5POOSS6W5yN5NQTh0/MeLiIiIyEmJbXyHht/+jlRzCwBl69+mZM6B4b5O6ECMlupupW3PamJtdYDBciywA3ipBGDIECQR66I7kcZ2bKz9D5FN7wiY7mSaYNDFCdgEAhaTZ4zlovd9AcdxTuMdi4icOZQkFBlhjDE0LX+CQONempY/QenZM445N5+XSuUThIGSYmo+cC1+Feze/Xty2W4AWnevJFoyoc+5QpFS0tk9JAN76I5sI9wTpHrveJxIlqi1gUBXEgsL23XYu+ltOH/xEevgex52uptIpIBjLGd8ZBYEIgVYmR58z8Nx9REoIiIicjqkmptp+O3viG3c1Gd79+7dfZKEAD3xRlobVpHo2N5nu20HKCippWPvm2RyNm0dcTqbEhjf4ARsjAHP8/ByPsYYMJDLeUTLI5w9q4pAIEVPvL7Pg20RETlA35BFRpj4pjriTXUE3zuKeF0d8U11FM+YftRjqt5zBZ1vvEHRrBmE59XQ2vI6uV3dfcq4wSK272lj3JhyAu6BocBeZR253TuobhpH0DVE7K0EEvHe5KBt4zo24yZN49LLrjpqHRzX5eoP30a6p/uo5fojVBBVglBERETkNMh199D09NO0vvgyxvfz26MTJzD2hg9QMH5cn/J7tz9HR9ObfbY5bpjyMfMIl57NW6t/hd+dIp4OkI730JPvRWhhjMHL9V7DsiywDJZlUVgawTghfD9Jy66XD3mwLSIivfQtWWQEMcbQsHw5THFxqoMYDxqWL6do+rTewGrf/DDG9xh9+ZIDB7o2oz/+Htqb1hJv2NnnnIVlU4jbM3jsjQRbd6/nz66azsLZByaZLtk9lpr2DsLsJNDTmxy0LBvHsaiqncTll1/NmNoJ/ap/QVEJBUUlA/BOiIiIiMipZHyf9pWv0bj8CXLJAw95g6XFVF/3PkrnzT1soq6geFw+SegGC6mono+JnMVLbzbzzubnuLBqJ55vsD2Pzr3JfC9C6E0Muq5NLufjuDa2Y+FlfTqbEkRLI4wpj5Do3E6ya6d6E4qIHIaShCIjSHxTHYmmzVgXRTA5gzUmSOKVzcQ31eGEQ+x++BF6GvZiuy4lc+YQqqzAy/aw7c1fkMsm+5yrsGwK7f5UHno9QX3TbgwePZHd/M/rcS6YeR223Rv0dYayFGXfwQIsy8a2Lcqqa1my5GomTjprEN4FERERETnVunftov6hR/Kvbddl9JVLGLXkMpxQCON7dLXWEYyUEymsypcrKp9MYdkkisomkzDjWL62gbWb1pA1SRZXv0nAypLJOLQ3dJLo7MEN2PTmGnuno9mfHMwnIB2LVCJNT2eSdFEEh4x6E4qIHIGShEPYsmXLWLZsGZ7nDXZVZBjY34vQTHaxLMglDIFCCzPJYcu//QjLdvKBkp/LEd+0iVDlxTiBCKFoJbnO3iRhtGwyzemz+O2rCZraG/GtHKlIPd2RnTiOT6CkG98YbHrPdeXF5/Lg5lcId7dSUDGGxZctZeb0sxWUiYiIHINiQTmTRSdOpPTcOXSue4uyeedSfd21BMvK8P0cHXvfpK3xdbLpGNHSCYyfcUP+OMuyGTf9A/zm2c2sWLeWnJ0Ar5nCuEthVZbmxjQdzUlS3TmMMRjjH1iwJH+OA3Gm7VjkMj5dLUlKyoOUFReQSXdhjIdl6euwiMjB9Kk4hN1+++3cfvvtxGIxSko0xFJOzv5ehPZFEXIpHwN4KR97lEv6zRaClGFHQgSnVTBh6QcpnDQxf+yosQsxxmZPzxQeXpGgPdaEb2XpidTTU7ATxzGM6R5N4d4QbriJxkv2Mq60BoAJZYXMvexqyuwsF8w5F9u2D19BERER6UOxoJwpvHSajtWvU3HxRX0SdDXvv47KSxZROHkyXi5DW8Nq2hrX4GV78mWSnTtJ97QTipT3OWdBSQ8Zq45oZ5hANkABe6lbH8OycuC72CYHWGRyAdJe8OgVNB6ZVsPOVDnX/8l1TJ40FtvWV2ERkXfTJ6PICHBwL0JsCz9ncPCxsmCFHOxZhXiNacJLxmFHA5iSXJ/jI0VjKB67lB/8dCU5k6GnYCfdkXqCQZ8xqUoK6yMEvWYCdGH1QPeuNtiXJAS4Zt7sd1dJRERERM5wxvfpWLOWxj88TrYrhhOJUDZ/Xn5/sLwMuyhMS/0rtO9dh++l+xwfLZ1AYeU8XnwrwbTxLuPHFAPg+4Zdq5spaw1TQCMRvwkLn0DIJpOzsf00xrLxCGOMs3+k8VFYOKQJxtt56Pk6/n76tAF+J0REhgclCUVGgL69CD0cs29lOQuMD86kAqjyMa4PlkXL7pUUlEzGcQ70+istClE+qYmNnesJhWFMtpSiHVFCqRZcazfYFmBh2RZvbVzL9HPmDM7NioiIiMgpl9y5i4bHfktyZ31+297Hn6R07rlYto3xPZp3raCjeT3G7/sAuqh8Km7xHF55J82r/7uLdDbDrKYyPvP++QDYtsWkqhDdu97AMj7BkENxURE+FrvbckTJ4BHA4PSzthY+LkE6yDbsYOP2NmZOrhyot0JEZNhQklBkmPM97129CHvzeXbQhrC974UFQZtMVyfFFRfwTvs4/vu/VvO3Hz2PgNsbfFmWxagqQ0eqiMJdRYTjrQSsBoxjA/smhy4oYtTshVy2aPHg3rSIiIiInBKZzi4a/7CcjtfX9tleMutsaj5wHdb+qWUsm55E00EJQouSUWeTDkzn2fUJ3qjbhkeWVKge327jtb0RPtg9m6KC3qHDF10+nx0bnqIgDOFIiEkz5/O7t5JEvTfA8jEEjqveBgesNGWZvfzqqVf4+89epzmyRUTeRUlCkWHIGENiy1ZaV7xEuqWFRNu23l6EGR8rBMmMRcOOHsaOD1NY5GJ8sByL7lc6eOLNDC2FCTw7xavrG7lkbm3+vDVNNXS8tZmA04Tv2hjs3uAqHCU6/XyWXryYyRXFg3jnIiIiInIq+NksLX98gab/fRY/k81vD48ZzdjrP0BwfCXBcFl+u2VZVI5dwO7Nf6Bk1GzaspN4ZE0HW3dvw7PT9ER2YkwHJW2jKExVYI9qxLNTQG+SsKgkyuXXvZ9krJPp8y7CDkR49LkfEaQDHxc43gTfgd6E3t5dZLIeoaC+DouIHEyfiiLDSO+k0WtofeklUnubMcaQbm7GuaS4txeh5+MUWjTtTBPvyrJ3j8XkyTZ+GpyQjV3tMmHdaradP5FUpJG6jiCXcCBJSDSEG4zhY2NZQKgAd8o8Llm4iLnVFdh6GisiIiIy5CU2b6Hof58jMXUaZTPPPmZ5L51m03f/L5n2jvw2tyDCmKuXEpk9nva9a0is28H4s28kWjI+XyZaOpExMz7Gvz26ieb27eScJN2FOzB+F2XtVRT1jKLA30PAb8NutulobaW09sAD56nnXJD/PZVKU5rb07twCSEs/BO4cxvL6j2P8T30dVhEpC99KooMA+mWFlpXvEz7qtV4qQMTQvvpNKbUYFUF8dI+xveJdxg6Yx4ZJ0Rnl0dnm0dRgYWXAac6QNWmvYzxIV1RTidb8c0SbKt32Mj5F8zhzfUvEUp34E88l/POu4hFE8cQcrRisYiIiMiZwBhD0/InCDTupWn5E5SePeOYw26dUIjCyZNob+/Asi3KL15I8UVn09m+npZ3VufLte5Z1SdJaFkWJUXFZJ0uuoo3YPwYZe1jKE5WUeDvJuC3Y1s2wWCAwqIwndvroHbyYesQcB3GjomQ6Co4yXeggNGlkfyUOiIicoCShCJnsNiGjbSueInYO3V9tpuAITC7DN/qwk0acCz8Hh+DoaEhS9Z3yASjWOkEzW05CiMueD5WyKFgRhETtzi0NUE43EBucZZgIATAmKIIs694H54T5MopYykOHd9cMCIiIiIyuOKb6og31RF87yjidXXEN9VRPGN6nzK5RAKnoODA/IJA9fuuIdfTTfGls4mnttCw88k+xwSCRZjQeJ5dtZMrFkzIbzcYMgWbKWssoCQWJWJ2E/A7sS2bQDBItDBESVkp0+ddxJTZ5x+x3o7rcv0nPk+6p/uk34NQQRTH1VdhEZF30yejyBms+bnnSWzdnn9tFTqEzhuDX+7he1n81ix2cYhc2sdYPoluiCc9sk4BFhZZN0w82U2ix6WoAFI9hjbLIRXqJmhypNI276x9i3MuOBCwXTd7qoYVi4iIiJyBjDE0LF8OU1yc6iDGg4blyymaPg3LsvBzOVpXvEzTU09T8/7rqLjowt7jfI9keg/mPIeWtlf6nDMYLiUXnsWLm0O8+UI7xmxj0thSJtWUAOBlDVVbKknF1hPwY9i2TSAQoKAwRFllOTPmL2LyzPm4gWM/fC4oKqGgqGTg3xgREQGUJBQ5Y6T27iU0enSfJ7qVlywmsXU7bnUxoXmjyBb0YGywjE26rRMTtDB2b3BmMDS35cj5NsZ1sfAxtksuZ7O3NUs8GqCrs4c0LplIEN+3IBBge0cH5xxUDyUIRURERM5M8U11JJo2Y10UweQM1pggiVc2E99UB8aw57H/Id3SCkDj409QOvccnEiEVLKFxm1P9zlXqGAUXUzjqQ02Oxpj+FYnqfAeuiM7eXxNkNtqFgEQCDqcNamQLeuSuIEABdEg5VWjmHneYibOmKsefSIiQ4g+kUWGMON5dL21ntYVL5PYtp0pn/00RdOn5fcHJ1RQdPMsMlYHOSsFBlKZHMmuFGztIjgpjJfzId+LMEfWLcCy9q0HZxl8yyLelSbZY0i7BRgsjG3TUzIed9oCas+ZfsT6iYiIyKmzbNkyli1bhud5g10VGQb29yI0k10s2yKX8AkU2phJDpvv+1fscPjA3ISWoWTWTIzfuzhIpGgMBcW1dMd2E4rW0JiaxHOrc7R1JfCtDD0Fu+iO1BPwLaq6RlMwtaHPtZdcezkdDesoKYsy64LLGD91NrajOQFFRIYaJQlFhqBsLE7bqytpe+VVsl2x/PbWFS/lk4TtjWtp2vkC2JDzDMmeDF3dsKVtDBWvvc2EWRbYFuQ8wH9XL0KD5WVxMz1gDMZANmfwXZueinGkqiczPtnIgo2rmLv0wkF6F0REREa222+/ndtvv51YLEZJiYZYysnZ34vQvijSOxWNAS/lYY9yyVitBNNl2EUhgrMrCE+vpnbBzX0WNBk9fjEbtrXw8xVddKcSeHY33YU7SYX3EMShpmsMxS0e4Vw9ide68C/y84vfRaIR3v+Jv6CwuKzPqBgRERlalCQUGSKMMXTv3EXrSy/T+cY6jOf32R+uGk3R2TPyrwvLJtGw/Y+0d/XQ1eOwrXMs9bExeJ7HhOirODUhTCaHhSHebfXORXhQL0Jj9T69NZaFoTdYi9XOorQkyIXZ1ygr6MZOhTG5HFY/5ogRERERkaHp4F6E2BYm4+EYHyvTO6e1PacIP+YRWlCJHQmR8TtJdu6gsGxS/hyRwipKykJ05bbRXbSDdKiJsB1kbFc1xU0+4dxOXNI4toPT2UGsvY3SilH544tKKwbj1kVE5DgoSSgyBCS2bafht/9Dd/2ePtst26Jo1tlE508kPLaGksqDhhqHS6msOY8V25rY3FpO1krTU7ANu3Qv6WnF5AIeiYxHptujsbGbnAcm0NuLEMDYDp4TBAtyTphAroexreuYWG5jEcaKRoheNRVL88SIiIiInNHe3YsQTO/UM46FscCZWABJH2zofaJss2vPbiI95fkFSADcwjjJUasIWC6ju2ooasgRye7EJYdt29iWSzgSoHbyZIyvYfIiImcaffsXGQLsYKBPgtCNFlC+8HyCZ4+mK/YOrck3SL69gQYDNyw5kCgcM/ESItteotu8jlXYQUnYJeA4BFM+TQ0ZEu0ZepJZMmkfY9lY9MZ9ABYGLxjZt93gEcCLd5NNRokU5+jxIhR63RjjYVn6qBARERE503g9PbS/voaWF1fkexH6WR8TcDFhF9fZl8izgKBFpqMLM3Y2L28tZdveLOPHbOWLH5yXH3ZcEx3D9NxZ+FvaCGd34lo5HNsGyyESCTB+6jTOuWgJo2om9BmqLCIiZwZ98xc5jYwxJLZsBd/vswBJQW0t0YkTMLkcFYsuhNogHU1v0Lx7E4meHD3pLMbAO01v0XHeeMqKwvljK2sylOSSWDmX8G6PSFOK7akMxjf4dgDb97FM79Bl4/n4tvPuSmEAYwVJexk2vmOzbVqWYPvZfHbqe7BtfUyIiIiInCmMMfTU19P68qt0vrGObCxOLpjEPbuUXNrHCkNP1qdhUyc14wsoKrIxvoVlQezFVl4sjtFWEiYVbmB9IsaOxin53oTGtyjaths/14Zt24BDOOIyccZMzr1oCZXV4wb35kVE5KTo27/IaeCl03SsXkPrSy+R2ttMeEwV0//2zj5PWCf8+UeJxTbTtOd14u90kezJkds3L2F7TzFbO8fRmLV4ffNu3jP/rPxxcwqn8NYbzxCOde8bIWKTtV28ANh+Dtv45NwIuUABAJY5QiUtyLlh3EwPVmsxjeE9/H7lXmZMqdWTYBEREZEhzkul6Hh9LW2vvkrPnkagN2GY7erCviiK2deL0DMWe/d0E+/KsndPD+EJBZDxcSM2wSkBJqx/mbpJ5ThBn+KCAHYkAfQmCUNhl7PnLmTjqscJhQNMmTWbuRdfQdno6kG8cxERGShKEg5hy5YtY9myZXie5vMYTPG6zex++BFqb76JomlTj+vYdEsLrS+9Qvtrq/BS6fz21N4mktu2UzhlMgBte1azc+srJLqTpNK5fLmW7jK2dNbQ7qYJjqknGIhhiiuAA0nC4kgZhfEsvuPg2RaeDZlAAbGSaoq6GihIZvYlCK38fISHZcB3XLDT1Lb4tE5royG5m5x3HgFXSUIRERGRoaa31+Bu2l5dSefaN/DSmQP7HIOp8rHKwthjI3hpH2P5xJIB4l05LNsi3pWjM2ZTGs7iZcCpDjJ2UxtTs4VkTBXRLT28OXotEy4amz/voqWXEg4lmbVgISXlowfjtkVE5BRRknAIu/3227n99tuJxWKUlJQc+wAZcMYYGn7/B7reWo8dDjHtji8cs1ed8X3i72yidcXLxN7ZdMj+6KSJjLpkEdGJE/Lbspk48USCdLY3Ibw3WUFdVyVeRYrs+DrCroeT8Snak2Htrme5YvLFOPuGDZdWFNFTPRnTuYt02QT86rNoCbk4bY2MbngLzw31TkxtjpIg3McCPDdEJJOgJllC7ew2XEcJQhEREZGhKLZhA9v/44E+20zIEJhZCjUumXgnjh8Fx8Lv8fEsh9aWHnzf4Lo2uZxPa0sPReOCOJ6PFXIwMyoo3hTECtbjWJB6qwEuOnD+UDjAxe/9wOm9UREROS2UJBQ5ivimOuIbNuJEIsQ3bCS+qY7iGdOPWN54Hpu+9/9INTX32W4HXMrOm0/loovJFYepq+9m3r6FRAAqas4juvMNtu4uYJdXgqmMkyqrwwUKW9NEGtIE4yksY+G6AVI9PUSjhfnzz1n0Hl7r6MZ2A5QEHeIdq6jc/GbvUGPbPfIQ40NuADzHxbHTjGn0iE/twvM9XEcfFSIiIiKDzc9ksIPB/OuiadNwowXkkt3YZS6BuaPxS3PYgQBeKoWfS0PUxkv29iKMpwL0JNM4toVlWTi2RU8yTTwVIeJCQ32WWFeaQCCLIYRlWXS17KQnGScSLRrEOxcRkdNB3/xFjsAYQ+Pyx8mkMsTsMMV+isblj1M0fdoRexNajkNkbE0+SRgsL6Ny0UWUXbCAnS2tvPj285DazjutEygpuo6zxpUCEAgVUzn7JranHyNLG4GuFBV7soQ6erByBtuysZwAvmORti1eX/0ml152cf66i8+eQGJbE3OrSphcGuW1l1pYmXuZXDCEhcXhqmuMOeJ9eIEQ4Uw3l4UvUoJQREREZBB56TSda9bS9spKghXlTPzEx/L7LNeh9D3zSPp78EJpsC0s45LK5Eg3tWHlfKyAhZcxh/QiBLBsCy/rs2dXLB8v2rYhGMyRS7vMvGgJFyy5QglCEZERQt/+RY4gvqmO2IaNJHHpTns4AZfAvt6ERVPPouut9bSvXsPET3wUOxDIH1e5eBG5RJLKSy7GmjCFtRvqaHnylxQ7e/NlJpXu4dW3d+eThABjiqso3NmGuzeOnfaxsLAtGxNw8ByLTChKunwiXvVZ9NRO7lPXwqDLzTN654rxfZ/1K/4I5DBWENvPcVhHSBJagLEsIMf6FX/kgosX7lu9TkREREROl+763bS98mqfuQZ7GhrJxuMEioowvsf29f9NOtQKQC4H3d1pOpMWjXvCTKzrIHppEV7ah8P0Iuw9xsfzfby0TzDoYtsWlmUzZoxL5fY0s846m0hh8aC9ByIicnopSShyGPt7EWZTGbIlIUadGya+LkW2M8X2//gpTjRKLhYHoGPNG1RcuCB/bHj8eFJLb+IPGzbAW89TGemg2Dlw7pxxSUYqCI3eDczObw/YQcJ7gBywv9egG6SndBy5qik4lTU4rkPYsXCdIyftcpkcPekYnhvA9rIcaUZBYzhsD0MAA3hugJ50nFwmRzAcPHxBERERERkwB/ca7N6955D9kbE15GK9SULLdggXVNLR3kg8maG9O8T2rvE0xEcxf+MThM4NgA3kfHzsQ3oRwkFTVhvwPJ9RlRFGlTiESwIYk6Zh+fKjjqIREZHhRUlCkcM4uBdhdFaQcLWDnXHIPdNNctt2QlVVOOHwvrKb8klCYwxP/vFlYk2vUxmJQeTAOS03SEdBCe/kYnitOwhv30Zi9iUUBqMAOK5N6ZRzaN25ip6iajKjJkHVBILhEEEsaosjzK0q4ezKYkJHSRIGQgHcKz7IzpY2CgLO4QsZ6O7ppiBSwJGyiN1ZjymjKgmEAocvICIiIiIDws9m2fPY/9C5Zm2fFYoBnFCQ4vNn4ZxVRM2cq7HtA1/hyqvns62+gZd3l9DcXUIm2Eam6A1Kytpwq0swGQ/fN+ztNCRiPbiO3Sfh57o2uazBcnp7F4YKwrhOFi/l4YwJkt7UisnlsAKKB0VERgIlCUXeJd+LsCcNpS5e1FC3IUbNmBDO6CCmOU0u1kXZ+fMpu+giomdNyR/re2nGBl7HjcQAsG0LOxRkbzDMprYWQjtaKWxNYWUsMPDa+le5Yv6V+eOnX3AhmyvHEywqImhZFAQc5owuZm5VKaMKQv2qv2cMcTeCUzqa9JHvkowTwYlEOFKW0AESgSCeMbh6eiwiIiJyyliuS/euXX0ShAXjaim6YAa5sh4SsR2kuxpZ90yACxdcSmVp75PocHQUtbPez/a6RzFjNhCKeJSFXbJjRtOZSNHe6pHszJBO5fA9g3HpE/rZjkXQcTHGkMv6tLV1E5hWgOUHKYvaRK+aiuXqK6OIyEihT3yRd9nfizCV9SicHmFHS4aumIftZJkwu4j0CxaOE+DN4FhWPtfC+7NFXHxODQCOG6aq9lzi3S+TdV02pzI017cS3pumNOWDZwEWlu1gAg5tmztg/oFrL5hWy6uxNFXREHOrSplaXohrH1+CzrVt/nL+JJJZ74hlcrksL/zxBS6dfymue+Qnw9GAg6v5CEVEREQGTPfu3cQ2bKTqqvfke/VZlkXFRQtp/N0fKJk/l8g5tSRzu2jteJtkQ5buVI6c59OT3sDqjdO5+qKJ+fMVF7mUjG/BtW2CbYbI5hQduS7avd5Y0PN8jG/2TTxtDjvdjGVZOK5FdyJDNhkhXJKjx4tQ6HVjjIdl6WujiMhIoE97EXqHeJhcDjsczs9FmKmKQolL57YUWTdMZzzL6NEheqqi5JoT2Fv+l/NmjmH124F8khCgouY8fr/qBdp3NBOI54h6NhiwLBvjuvjBIImSWrKjJlM88aw+9XBsi9vOm0zwKMOJ+6M4FKD4KMOEs1mHQnKMiYYJaPiIiIiIyGHF6zaz++FHqL35JoqmTT3h8/TONfgGba++Snd971yDxTOmUzB+fL5M6XlzscaHaGtaR9OeF0mmcmT2PfRNewF2dI1je7KIwszbfZKE5ZFSJuwKk93TgON52JZNNxE83wM8bJMFwCeAl3N7578+HGOwTIZNGwLsmhTAdE/ks1Pf02d4s4iIDG/6xJcRLdXUTNurr9Kx6nUqLlpIdMrk/FyEkRlB9rSmyfoOmVAUKx2nqSXN+AURrFyI4nCapNtJuLiRnOfnFxMJBAvxdkUIdPiADY6DcR2SRdWkKiZhRk8gFA0Ttm1afY90ziPkHpg78GQThCIiIjI8LFu2jGXLluF5Rx4dIKeOMYaG3/+BrrfWY4dDTLvjC8e9gEf37j29KxQfZq7BtpWr8knCZNcutqz/HbF4nJ70gSxeIhNhW1cN27IebkUrpmI7weIyfONjWwdixuJ0lLgBy+6NKbM5i5hbCCZMSW43vuVicME/en0tAti5GOHdZ7Gtag+/X7mXGVNqtXCJiMgIoSShjDh+NkvXW+tpe+VVElu357e3rXyN2Dvv9K5oXBbGLoKuZkPWjWAB2UCIrng3mVpDBBtyPiRjNLf9kcau8xlXXps/16QZ5/H2a41kCspJlE/Cq5pEoLiYsGsD1r7hxCXMHlXSJ0EoIiIist/tt9/O7bffTiwWo6SkZLCrM+LEN9UR37ARJxIhvmEj8U11FM+YfszjvHSazrVv9K5QXL/7kP0FtWOpuOhCSufPy28LRsrJZdP5BGFbTwmb46V0FKTwR20nGoRgJkdkT5bQul28Neltzp0wJ3/87PkLePXJrRSUjGXijDn8bkcb7e56Zu1IguVh6N/c1gYHrDTlPW004tKQ3E3OO4+AqyShiMhIoCShjBjplhbaXl1J+2urySW7++yzXZdgRQVd697sXdF4RoA9LWmyxsY4Lrbl49sBsjmHhvoeSopc2hoz9PQ4pJ0Qz774FJ+4/pP58027YB7PdYNbVkkw7GJhEXJtZo8qZm5VCWOiYT2RFRERERmi9i9k52dzBMrLyba307j8cYqmTztmDLf74UfoeH1tn21OKEjpvLlUXLSQmJvjza31jG/pYUptb/IuECykauxsNuzdxuZMiFRJnFzxLiKeT7gxQ6Q5jduTxsLCtmzim5vgoCThnAVzGDd5AqNrKjDGsPrF/yDyxxKCbMHgHrbOxnCY+QktjHEJ0sHYlrMou6wN11HMKiIyUihJKMNepqOD+v/+NfHNWw/ZFxo9iuy0ObzljKb6leWEM9l39SIM9wZPxuB4GYzv09mepSfpk86E8I0NxqJzW0uf804cVcrUaZNoiKcYXxJhXlUpMyqKCGgosYiIiMiQt78XoR8M09SepDQYPmxvQi+dxnIc7INWAC5fcH4+SRgZW0PlxQsJz5zF21s2sH7d73C8NjKey8uxUqbUluaPq55yJVt2biSTbiXanCayN00gmQK/NzHoWAeSfXt3NPSpbzAUYHRNRW+dfI9Ec4qy7rZj9CI09FnqOL/VwbLSlHW3kWgO4fkerqOvjSIiI4E+7WXQJbt20bjtWaonX0G0ZPyxDzhObmEhPQ2N+de26xCYOoOdFeOpSyQJxPYyxn4Dx+zq24vQt8EGN53E8nonfMaAMRa5rA8hSBRWE6uYTMHoiRhj+jylfe/kKiKuQ3kkOOD3JCIiIiKnxoFehFmSFQGiMwIk38lR1JbN9yZMNTTS9sqrdLy+htqbb6LsvPn54wvPmsLoK5ZQNGc29bkwL9atwvrf+4m4PeyfZCbo5GhreptMdibBQO9Wx3YYtxPiO5rBs3pXHLZc7H0PmW23lFG105m14DymzZx4xPo7tsPU1nG0sxHLCmHbh1mkzoBvPGzLOVyeEHwImk7Oar0Ax9bUOCIiI4WShDKojDE071xBomMrzTuDTJzzoRMehms8j663N5BubqHqPVfkt9uBAOULzqd9w5vEJ42l0QXfa6fIrmdGCYAh2hODSpdkt+kzF6Ht5w4kCAEs8GwX37Jon3QB8WAtwWyWjJ+mPtbN+JJovujYosgJvisiIiIiMlj29yL0ghECZzmEqx1MzuDFwnSuXceGb/4D2a5Yvnzbqyv7JAnb42lWF06g5aUXqQrXU+DkDvrWZeghTFOogNYxO8n6aYIU5I8tccfQ7e/EtnuThJZTROnoszh73nxmzjuLUPgwCb938XI+iea3sMgBAfCPtPCN39uZ0Bxun41FmkTLW3i563ADShSKiIwEShLKoEp27STWshm/O0PMbCbZtZPC0onHdY5Me8e+uQZXkY3FsWyb8gsXECgqAmBv/VrqgtvInu1jqKcQ+gRqYTuLSadpNTmiU12a9vUiNK6LsS3IpTCWje8E8dwQvmUTyCQpaNlBatIYwl0Jpm5fT3BGFZTMGMB3R0REREROp4N7EXZXBqDQsGtDjJoxYexAHL89TbxuM6HRo3t7+oWChEaNwngeltObSGutf4lg50rGRQ9k3yzb0BMoYLvl09YZI7KjhfKOFC+UPc81C6/Nl1t4ySX8btsGomUTmDpnHrPPm0Fx6fE9eLYsQ0WlTbwzeuRCBlLpDOFQ+PA9CQGIUlRmY1mHzSKKiMgwpCShDBpjDM27XiaX6saPpzE2NO96mWjJhGP2JjSeR+ydd2h7+VXi72zCNwYKwIwBq9mjc+0bjLr0EgACrk0uG88/JDX42K5Pt2XR0pHA3tRFpgd812H0GENXbN+KxhaATSZUjLGdPjM7e26ISKyJCxIvULC+lZK2HrqedKmaOV0LkoiIiIicofr0Ipxssbs5TSzm4TgZJs4sxG/K4KdSBIoLqbjyPewuGUdntIBxzoGedmXFERoCFtmcIRCEdjfE5s4Y/q44odY0ZVkDvoWFTfPbO2DhgevXTqrmltv/hopRhSccUzquy/s+djvpnu4jlsnmsrzwwgtceumlBNwj904MFURxXH1lFBEZKfSJL4Mm2bWTeMtmvO4MvvEx3RniLUfvTZjp7KJ95UraXltJJhfDFAMzwBQBDviBIJ2Tz2Pu4kX5YwpLxhIOOexK5oi5hq5kD6YtS7AtjZWxcLJhsAzBYIaWdp+s72BcF2t/WvHgBKEx2BiM5YDvk25ppWp6gJ6XMmRa2zC5HFbg2MNARERERGRoOaQXYZFFvDmH7djE4jm6RxXg1hQSaM+yJ2nx83U5ykIrCUXHMuesyvx5ysfMJbTtJXZ2Jtm7s5tAS4ZQujcxCGBbDrZtY9tBAqnCPnWwLIvK0UUnfS8FRSUUFJUccX82myUQKaJsVDUBxa4iIrKPkoSn2MMPP8wvfvELXn/9dVpbW5k0aRKf/OQn+fznPz+i/0E+uBehyXj4NlgZj1yq+4i9CROdO9jx21+QyXVippMfGmFsm6wbImlcfMtmSyLBBd1ZSgp7V3ILRsrJVl3A1pVPEWlOEUz74FmADZaFF7TJhqMkSoopaNmB5wYO5AShN1lo9v13X+LQsiyybpjOWDel00OsnTuPG6+9EnsE/01FREREzkSZ9g7aV60i09GZ70XoTrbZ05LG98F1bXI5j6bWDDVnR+l8KUmBu4dLR72IG4EtHd3sbTuXMRW9w3vdYJQdu4vo2FRPyOtddMTC7k0MWgGcUA1jxk9n5nnnMnnq6MG8dRERkT6UJDzFvve97zFx4kS+853vUFVVxcsvv8xXv/pV3nzzTR544IHBrt6g6dOL0AKw8C2Dt683YaJjG+HAaNzCA0Mtmne+iBkLpqP3HDknQI8dJGM5pL0gbekQTZ6h3q7n6fVvcPPCCwGwLBur0aNwR8++1w4mYOMFwiRKxxMrGQ+FIaq2v4gxBt9xsfvM4GwO9Co8aJtvu+Q8m/a9ScbVtLH8zRbOnn3sodIiIiIiMrj8bJaut9bT/toqEpu34PuGXGcHxvf39SKEeHMOx7GxLHAcm3gshzcmROn7inHwCVgx/HAp00btIO03A5Py5x87ejZNG97Btmxs28UJjqG8aioz553LWTOrKYgGB+/mRUREjkBJwlPsd7/7HaNGjcq/vvzyyzHGcPfdd+cThyNN316EPrE0NDVnqKkOUhKBXDbB5ld/jP2sx7TP/x8Kxo8DIFJUQ1tbIxk3TFu2kJbuEppzsNf00O50ghMjGnIpT+XwsjuBC/PXnHPeBax99RmMbUgWV9NTPgkzajzBSJBMZ5yynh2EYs14bvDIcze/i21B1gkT6+pmwpgWmlKN5DxDwFWSUERERGSoMcbQU7+b9tdW0bH2DbyeVH6fn073LoBXXPquXoS9s87YjiGXNTQ3p5k8OUqmC2KdcfbuTJDshob0c3z8pgNJwgWLFrBh5SoKSsYx/dxzmT67lrKKgsNVS0REZMhQkvAUOzhBuN95550HQENDw4hMEuZ7EfZkMGGLPU2GRCKH1xagpMLC+D5YWfwyi9aXX2H8viRh6ejZ/H5NB2+1txNzO8kG6sExhIMOVR6UtRgi7V1YOUNL61a45MA1K0aV0DNjCYloGcGiQiKuDViUhgNMtbeTWf0ycd8j50awMP1KFBoMxuntTdjZFOf9l3biOkoQioiIiAwlfiZD2ysraVv5Gqm9TYfsD5SXkWpqwwTD9JT37UXoOIBlsLCwHZuu9ix12QSppCGTDpAzLr5j0bxpG57v4di9C5gURMN88LbPUlpWgGUrPhQRkTODPdgVOFmbNm3iX/7lX7j11luZM2cOrutiWRb33ntvv45/6KGHWLJkCWVlZUSjUc4991y+853vkM1mT1mdX3jhBYLBIFOmTDll1xiqjDHs3fYcuUwSO2KTzBoSsRyeEyIeyxGLe2B6y2XH9QZt+0UKq8hUOrRFdkAkRmXQZnrM4exNPdS+HaOwuQfHd7ACQXLJGJl0qs+1L1l0PgVlJYwpjrB4XCWfnjuRv5o3ntrYWlJdKbJuCMvat0ZJP372l826IWKdPk2NOzHGOz1vpIiIiMgwkezaxZa1PyPZtevUXMCy2PvU030ShHYggDV9FtsXXM0fC6pJFcZJje2di7BpXy9Cy+qdjdrL+WRSHtm0Ry7XG7v62Hi+g2UMtl1KaXA69ru+WpVVRJUgFBGRM8oZ35Pw3/7t37jvvvtO6NgvfvGL3HfffbiuyxVXXEFhYSHPPvssX/rSl/jd737HU089RSQSGdD6btiwgfvuu4+/+Iu/oLi4eEDPfaLidZvZ/fAj1N58E0XTpp7Sa8VaN9HV+g7GeBhj2Nlg8H3wgmGsTDc7dsOs6gxWwCFd7tBUW0r1QcdfMnkGTZveoLA1TbAnhW1ssC38QBDPtvAch57iWnKjp9Dak6MmdODYuVWlzKgoYlTBgY2+7/Pmpji+Z2McB3LeIbMPHpPlkPFs3twU55Jrz/i8u4iIiMhpY4yheecKEh1bad4ZZOKcD53U/M6p5ma6d9VTfv55+W12IEDZefNpXfEyVu0Y2saMYjc+IaeTktBuzq3pxK0Kk+mxSAf9g+YitPByHrms3zvOxLIAg+8bIlHwvdEEOguYVFLMotv+TAlBERE5453xGY3Zs2fzN3/zN/ziF79g48aNfOxjH+vXcY899hj33XcfhYWFrFy5kieffJLf/OY3bN68mTlz5rBixQruvvvuPsf87Gc/w7KsY/48/PDDh71ma2srN9xwA2eddRbf/va3T/reB4Ixhobf/4Gut9bT8Ps/YMxxp8iOfG7fJ7lzF03/+wzp1jaMMbQ1rgHPBwOxuLevF2EQC4PnBEkksnSmLcj5BP0ce3Y+16dOpsmjsj5OOJ0DN0A25JIJOiSLq+iatJD4BbdgzbuC4NiJNPb07Q1aFHT7JAgBMpk0mZiH5wSwvSy2lzuBnyyeEyAT98hk0gP2/omIiIgMd8muncRaNuMnM8RaNpPs2nnc5/DSadpWvsbmHyzjnW9/j/pfPUQukczvj3XuYWdxjOZLRtE0pZtcdCdjovWUheME/Qyul8UzkArm2Lv3QC9C2Lfg3b6JaPYvYGLbNsGgxYyadmalt1GyfTWZndsH5P0QEREZTGd8T8JPf/rTfV7bdv/ynt/61rcA+PKXv8z8+fPz2ysrK/nhD3/IJZdcwr/+679y9913U1JSAsCNN97IwoULj3nusWPHHrItHo9zzTXXkMlkeP7554lGo/2q56kW31RH55o3wLLoXPMG8U11FM+YfsLny7R3EN9UR9eWDcRjm/GiWay3wA4GiZwzjnjLZvzuHJ7x2b7bwveBgIMFGNvBeFDfblNc45HtyhEOt5Ds2klh6UQAzj53Bk89W4aViZMLFpGunIRffRZucQkBLGqLI8yoKGRGRRGl4WOvGuc6QdqmXkfCzeHkTnyosOc6FOUCuI5WqhMRERHpj4MXs/PjaYwNzbteJloy4Zi9CY0xJLdtp/21VXSuexM/k8U4BkrBJD061qxl1KWLewt7KVI99WDvf/BsMJaH7Xhkmjppb+yhM2dhXEMu6+O4Tv76lgWu62DbVj5x6PuGeCxL1aggPYUGuyVN4/LHKZo+7aR6QYqIiAy2Mz5JeCL27NnDqlWrAPjwhz98yP7Fixczbtw46uvrWb58OR/60IcAKCkpyScMj0c6neb6669nx44drFixgpqampO7gQFijKFx+eNkelLE7AjFfs9xBzheKkViy1bidZuJb6oj1d2CqQZTCYzuLWNVQGzTJuKl9b1BYMqnM2VIJrJ4TpD8KiEWeLZLT6yHTVnwcoZQoekTLAaDLoUzLqbJ2DjlVTiOzaTiCDMqiphRUURxKHB8b4JjUTSxmmwqc3zHvYsLFIWDoIVLREREZJ/nn3+eyy+//JDts2bNYv369YNQo6Elv5hddwYfg+nOEN/Xm3D/A+J3y3R20bF6NW2vvUY62Q5FYMaBKQIi4NsOie6zmDN/bv6YguJqwmGbzp4euoxFRypNujMLLRmCMQ+fCKFIBj+bxfcN7rvCOedd8Z1lgedBU0ua8VMC+B0BMq1tmFwOK3CcsaiIiMgQMiKThGvXrgWgvLycSZMmHbbM+eefT319PWvXrs0nCU+E53l88IMfZNWqVTz77LNMn37sXnrpdJp0+sCw1VgsBkA2mx3QBVXim+qIvb2RHdPG0lIaZFRnhvDbG+l4ewNF06f16xz1D/43nW+9DeVgqoF8DtXgY5G1HbZWBFl88Wy69j6B153Bw6e+3cEYwO7tRWh5WRwvi5XLYhmfeLeLF4oS67Zp3LGR8pptREvGA7D4wvmsbe5ienkh08qiFAYPNOMTeX/+Yu54uk+iF+F+0YCL8Tyy3tBfvGT/+3QqF+gROR3UlmW4ONVtWf+PDK4f//jHzJo1K/+6oKBgEGszNBzci9BkfHzbYGV8cqnuI/Ym7I7tYdujD5DJdWAmkf8mY4CcG6LbCpCzbHYBc32X/cvfuYEC0uXn8vrqP1LQmsFJ5bA8G7DJuTaO7eO4Pp4HjuvAMeYWtCwLx7GJxzxi0wppvvQSPnTje7GVIBQRkTPciEwSbt/eO2fI+PHjj1hm3LhxfcqeqNtvv53HHnuMe+65B8/zePXVV/P7Zs6cedjFS/7xH/+Rb3zjG4dsf+qppwYuqDSGwhdWYMUTNE+vxI210lxWydite1n7k5+SuHTxviXdDHayG7elFbe1le75c8FxALDwKLT2UjAni11gYbDwjEPGtklYFm/nDJt6fLpDKUrq/pdqJ4HJeMRSFt2JDBiDm0li+R694V3vhNA+NpbvkQtG6B41mWavBe+1h+nITmd/t8MSYO922Dsw78aI9fTTTw92FUQGhNqyDBenqi13d3efkvNK/8yaNatfU9aMJH16EVq9caBvGbzuDPGWOmKtmwh5pYSqqvLJwtY9r2FqfUxnb5LRswOk7ABpK4DBoiMbpMUz1FuNPP7m63x48aL89QIdBRTtTAFg0Tuc2LYcjDOaiqoEXk8z2YyP49pYHHtkiGWDl/Vpb0oSqKpnW6fH2WXHPExERGRIG5FJwng8DnDUeQELCwuBA734TtQTTzwBwN13333IQijPPfccS5YsOeSYr3zlK9x5553517FYjHHjxrF06dIBWxE5vqmOrb97nHem12B3d+E7AezuLvZMr2HGni6mYeP3pEhs3kKmvSN/3MQZZ1M49SwAWupfommnTaorStIypGyfLuPzTspiW9rCw8b2DdMyUJDtxOQ8PEy+F6HlZQ96Qtz7X2PZvYuIGA/PDVNSEiTkZykO+ZxzwZx8b0I5OdlslqeffpqrrrqKgJ56yxlMbVmGi1Pdlk82nhEZSO/uRRhL9w7drakOUVJgkcsm2fLaT7CeMcy44wsUjKsFIFI4hkbqyDghYn6E1mwhLTlDk5+mmTie3UORa1HZ42FSO4EDScJzzzufN/64HAsfOzCKaOkkps6ew5u7t1HsPseept4VjbEs+rWM377ehMmuHGPH7OXF11YxY+J7NSehiIic0UZkkvB02rFjx3EfEwqFCIVCh2wPBAID8sXBGEPLU0+TTmdoLgriJJJ4gTBONkVLUZCJHR3seeg3hEaPzq/YbPaFS6kdOymbeTYAlTVzeeudp8kFsuzNWryTgLYElCQsJvZYRDMeQS9LMJhjeyrCOaOzxDO9vQg9J4hjDJafw1g2vu3iuUGM3dskjZ8lEtvLjFgrpRELLx2ivWElJRWTFXwNoIFqUyKDTW1ZhotT1ZaH6/8fmzZt4qmnnuL111/n9ddfZ+PGjXiexz333MNXv/rVYx7/0EMPsWzZMtatW0cmk+Gss87iIx/5CHfccceAvmfXX389ra2tVFZWcv311/Ptb3+b8vLyATv/maa3F2EdfioDBRbtTWmS8QwtASgpL8D4PpZtQZVF+6rV+SRhccU03kps483OVjqcTny7CcuyKAhYjE1ASVuOYLIHPIv2+HZ4z4FrFpcVMnnO1ZSPqmba7HGUV0bJ5nwaH3uCrvpE71yEAYt+pgh72eBlDV17E1SM20g2dxXBgDPA75aIiMjpMyKThEVFRQAkk8kjlkkkEgAD1nNvKIlvqiO2YSPbJ1XmexFaltX73+4uds8Yy4S6Rvx0GqcwAmdV0lOZoz5TyjlLr8qfJxgupSFdwY5tDTjdDqNzWcYYwPbBtjEOeI5L2rJJdxt8F3a3W/m5CL1ghKzV+8TWAixjsPEBMFZvb8Pd7RYlEy38VI5URyPGeFjWiGy2IiIi8i7/9m//xn333XdCx37xi1/kvvvuw3VdrrjiCgoLC3n22Wf50pe+xO9+9zueeuopIpHISdWvpKSEv/7rv2bJkiUUFhaycuVK/vEf/5FXXnmF1atXH/ah8HDneVn21C0nl01ihW2SsRzxWBbbsYjHssTjHsVFLsYypMe6hMaOzR8bKqggV15IW+Jtwq7NqIRDaXuOULwby7f2DSEOYDsWTixONpMmEDzwHr/vliv61MV1DOWhOHVdWRzXwj7GXITvZlkWjmuR6MpSc1YC1zmOBKOIiMgQNCKzLRMnTgSgvr7+iGX279tfdrjYv6JxOnWgF6EfCGIBxnaws1kay6LUhByy40PEzo7iOh14Joufbmb9jl2ce9aBxV7SsVoKk41gGSzX7fvs1bLIhorJhYrJBKPUvbKWWIHBd0MYYwE2ljGw76je3w4EZ54TItaZZeuLHjWJGIWzx2JdqqezIiIi0mv27Nn8zd/8DfPmzWP+/Pl861vf4r/+67+Oedxjjz3GfffdR2FhIX/84x+ZP38+AK2trVxxxRWsWLGCu+++m+9973v5Y372s5/x53/+58c890MPPcTNN98MwLx585g3b15+35IlS5g/fz5Lly7ll7/8Jbfeeutx3vGpEa/bzO6HH6H25psomjb1lF6ru2sXyc5dGN/HGENTYxrfNwQCNtmMoaE+Q6TGxw44ZEstthfkGHXQ8XOKx9DeYBFJJLH93kSdjYu1bwViy45SVDGJs2bPxbaP1RvUZv3mBGRsfCtIP6YiPJQB22RYvznOZdgncAIREZGhY0QmCfcHa21tbWzfvv2wKxyvXr0aIB80DoZly5axbNkyvAFcLfdIvQiBPr0JG5aMp9BP0rLHkOox5FI5PGPTwkt9koTnzjqHl3b1vldeIEK2oAKvaBRO6WiqqsdSW1FM+69+iVO/nsbyEiCBj8OBKOzI0ZjBwSZNc6SI0qY2WrbvZnouhzVMh0yJiIjI8fn0pz/d57Vt9y9J861vfQuAL3/5y31ivcrKSn74wx9yySWX8K//+q/cfffdlJSUAHDjjTf2a/GRsQf1fDucq666ivLyclatWjUkkoTGGBp+/we63lqPHQ4x7Y4vDMjULl4qRXzLVrq2vUVqcyMTb/04wfJyWnavxHg+GIh15Yh15bAtG2NsbNsnmcwSSwcotXxCvk9bwwrM7IX5OpVSTFE8hW055HNydpjiislMmXUus+afTUlZ/xb6y2TSZDuzOCYA+BzPSOOD+SaA35klk0kTDp9c71MREZHBNCKThLW1tSxYsIBVq1bx4IMP8nd/93d99q9YsYL6+npCoRDXXnvtINWyd2Xk22+/nVgslg9QT8bRehFafg48D8vPYXtZmuqzNASjWJbBYGFMEAMU5vpGT+fOnMRzmy+joqqGSbVjGF9RyJhoiJLQviHMvs9fj03jBAoZm+jGc0K9qyb3h2XhOSEcunlu7jhyoytZ7KgnoYiIiJy4PXv2sGrVKgA+/OEPH7J/8eLFjBs3jvr6epYvX86HPvQhoHfo8EDEY/sNlTmW45vqiG/YiBOJEN+wkfimOopnTD/u8/iZDMntO0hs2UJ822aS2T34ow0EwW6CxJathKbHe1c0Tmbxch4Ne7K9cwG6vdk+y7bwcj7NbVmcSpdEZ4ZwcSPJrp0Ulk4E4OxzpvPi70vxst0UV0xi8qxzOWfBHIpK+pcYPFjADVFUcTXxzvhxH/tuxWXFBNyRN3xcRESGlxGZJAS46667uPHGG/n2t7/NNddck3+K3NbWxm233QbA5z73uQENBgfbUXsRZtPY/v4eixa2l8PzPXwrAJZNNlRMpqCCssLqPucsKAhx14fed8RrpnM52uwUM3qCQA/GPr4kn7Ed8NKU9wR5x+khncsRCQaP6xwiIiIi+61duxaA8vLyw44mATj//POpr69n7dq1+SThQHnyySdpb2/nggsuGNDznoj9D5DbvB52l1jUdvW+Lpo+rd9JzOZnnye2cSOJHTswER8zBkwVfQaLZCp80q0tdEW29K5onPaJpaA7mcWxrYOuZfX2MOxM05PMYjs2BSmP5l0vEy2Z0Du02LG58k8+QlVNFcWl0ZO6f8e1+dCnlpDqzp7UeQAiBQEcV8ONRUTkzHbGJwnXrFmTT+oBbN26FYAf/ehH/P73v89vf/TRR6muPpDguuGGG/j85z/PD37wAxYuXMiVV15JNBrlmWeeobOzk0WLFnHPPfecvhs5xY7WixD2JeP2JQmNZQEWvhsia01ldLSYBdcuYfr4csKh4xvqGwkG+UTNlby55WF8NwSWfcgA44P7Jh66r7cekVyCW6v/VAlCEREROSnbt28HYPz48UcsM27cuD5lT9RHP/pRJk2axHnnnUdRURErV67kn/7pn5g7dy4f/OAHj3hcOp0mnU7nX8diMQCy2SzZ7MkntPaLb6qj6+0N7BkVIh3w2BN0KHt7Ax1vb6Bo+rQ+ZY3nk2lvJzSqss/2rs2biMe2YWYChQAGY8CzbDKWRUOPxetjAtw+ayKxrQ/hdWfw8Gluy+H7Btux8DwfzzMY0/uDgZznE7AtUj0erfUbKa/ZRrSk9282ceq4/PtxskJhm1B4YHoADuTf5lTbX9czqc4ih6O2LMPFqW7L/T3vGZ8kjMVirFy58pDtu3fvZvfu3fnXBwda+913330sWrSIZcuW8fLLL5PNZpkyZQpf/vKXueOOOwgOo4TU0XoRAhgnSM52e5OFlo3lZXGz3Yzx6pixLcak984lHKo67uv6vk/dmpexyGEIYPu5Q8oc7Tm1RW+i0CLHpjUvseTKxf2eb0hERETk3eLx3qGl0eiRe6EVFhYCB5JzJ2rWrFk8+OCD3HffffT09FBbW8unPvUpvv71rx81zvzHf/xHvvGNbxyy/amnnqKg4PiH1R6WMRS+sIKeTJJMKIJjW2RsQ3Omm/hPfkrikkXY8QSB1lbc1jbc1jYAuq5ZCvtisYjTQtGoXTjFOTzbJotDxrFI2rApC3XdhoQPkGPd2kepdhKYjEc8ZZFMZPE8H9/vGwlaloWhN1FYVhqiuszG8np487WH6chO58RWF5Ejefrppwe7CiIDQm1ZhotT1Za7u7v7Ve6MTxIuWbKk94njCbrlllu45ZZbBrBGA2egFi45Vi9CYF+wt7+Xn8mvdNxcFGTS9sxxDz3ZL5vO0p2NY9kutn9iGXEL8G2X7myCbDpLKKL5XkRERGTo+8pXvsJXvvKVEzruzjvvzL+OxWKMGzeOpUuXUlxcPCB1i2+qY8vvlrOzpgDwcVybXManZUwB0+v3Uv7CS/RJyEV7E6fzzjmXgvG9Pfna977B+nW7yLohPMenPQd1KZudaQiloCTuMDYF5V4ad3w7JmrwMDS3eYeN323LwnZ6hx97nk9P2sdyLEzaozjawzkXzMn3JpSTk81mefrpp7nqqqsIaFFAOYOpLctwcarbcn8ffJ7xScLhbKAWLjlWL8LD2b/Ssd3dxfZJlYROcCJrJxQge9Gf0JU4+QmhSwuLcI5zuLOIiIjIwYqKigBIJpNHLJNIJAAGLCF3vEKhEKHQoQ9FA4HAgHxxMMbQ8tTTtPkpMsEgjmuBsXBci3TYELOy2M0thEaP7u3Z5xrM2CAmNJZAKJSvQ0X1OSTWPUJ9IsvedhsTcyhIe5yby+AYC9uysW2LUKCHnoSFCbskMhbdyQyOY2Fhg9WbHLQcC7tPfNpbLpEpoMTx8dLdtDespKRi8pBZ9GU4GKg2JTLY1JZluDhVbbm/51SScJjL9yJM9tBSVIl9uF6ERzp2X2/ClqIgk7b0nFBvQte2uW3xOSSzJ9cbEiAacHA11FhEREROwsSJEwGor68/Ypn9+/aXHW7im+ro2rCBxtEhwMey9q0ubFmAz96xRRTtSJAdU0J8fBi/IEbOzrIt3czcmpr8eVw3xNaNxdDRSAX7koKWjW33/SJiWT65nI0JWLTsTmN8gxO0j7rQh+1YvT0b2zIUTwrjp3KkOhoxxsOy9BVGRETkVNC/sMOcyeXItLSxc8porH72Itxvf29Cq7uLnVNGU9LahsnlsI4zq10cClCsHoAiIiIyBMybNw+AtrY2tm/fftgVjlevXg3A/PnzT2vd3m2gpp452IEVjQ/qRciB1YUd1yJVAImLSnCrWmjPQa7JJ5v0CPV4vFm3hXkzpubPN6lmOnu6WvvEl5ZTSLRkNJVjxlIzYTz+K88SW7+GbTuKaY8AhMjljv3g1+DT3uZj7U5TnYhROHss1qXOgL0XIiIi0peShMOcHQhgbvkEDc/8jECsGz9wnIux7OtN2FAaZs6Vn8BWF24RERE5g9XW1rJgwQJWrVrFgw8+yN/93d/12b9ixQrq6+sJhUJce+21g1TLXgM19czBjtSLsHexEIO/b6Xh7Z0+bsID28YYi6znkM2F6GjbAxxIEk6dcTatu+opG11N9bjxTJg2meraynwvQT+bZcWv/pPuZJb24gjGT4KxMf0a12KD5dFuF1CSbKNl+26mn8ADaxEREekfJQmHOWMMj7/xPMFEJ74TgOOdw2Vfb0I30cnjbzzPnLlTNA+MiIiInNHuuusubrzxRr797W9zzTXX5HsMtrW1cdtttwHwuc99bsASc0PFEXsRWpBN5Q4qCJ5neuPAbIAcQWxsAm6UgnTfB86z5s1g1vwZR44PHYdH547Cbbap7ejG4GL6PX2MhWVcTLCH5+eOIzu6ksWOehKKiIicKkoSDmEDMcQklc5it+/E8nKYgIvl+8d/EsvCyuWw23eSSmeJhI+zN6KIiIjIKbBmzZp8Ug9g69atAPzoRz/i97//fX77o48+SnV1df71DTfcwOc//3l+8IMfsHDhQq688kqi0SjPPPMMnZ2dLFq0iHvuuef03chpcqRehBiwbAvj911xOJd1IT2W6inTGD9nNrUTqxg9pu9iLpZ99IfH6VyONjvF2XEb8PGPcz5BY9k4Jkd53GZjdQ/pXI5IULGoiIjIqaAk4RA2EENMsl4KO5PGOC6Wnzv2AUdgHBc7kybrpYigwExEREQGXywWY+XKlYds3717N7t3786/TqfTh5S57777WLRoEcuWLePll18mm80yZcoUvvzlL3PHHXcQHGaJqKPPRQi2bfeO7rWs3oSh8fFzWcbvfYcp6QKmXfInJzSaJBIMcuvY97D+nV+BHSJgH/9QYeNbRL1u/rzm/UoQioiInEJKEg5z0VAhhcWX0N0TP+lzFRWUEA0VDkCtRERERE7ekiVLMMYcu+AR3HLLLdxyyy0DWKOBNZALlxyxF+E+jtP3tWVZ+PjsHR2icsMG4pvqKJ4x/biv6/s+21e/jEUOywqAOf5RLZZlY0yabatf5rIrFvcmNEVERGTAKUk4zDmuzUc+vpRUd/akzxUpCOQnoRYRERGRU2ugFi45Vi/Cw+td6TiDoc1L07j8cYqmTzvu3oS5TI50TxdYAYw58VEtWAEyqS5ymRxBTX0jIiJySihJOAIUFoUoLAoNdjVEREREZBAcqxfhkfQmBH0aR4eoOMHehMFwkD/9yzuJd3Ydf8XfpbisRAlCERGRU0hJQhERERGRYWp/L8L2bA+ZYKifvQj3O9CbsD2bOuHehKOqRzGqetRx111EREROL40dHcKWLVvGzJkzWbBgwWBXRURERETOQCaXI9XSQkNVGDDHneDrLW9oqAqTbmnF5E5iyLCIiIgMaUoSDmG33347GzZsYNWqVYNdFRERERE5A9mBANsXzSMdNtiuBZbV25Gw3z8WtmuRDhu2LZqHHTj+1YlFRETkzKAkoYiIiIjIEDQQo0o8z2Prmyv2rSps4fkc9w9YYHy2vvnigKy0LCIiIkOTkoQiIiIiIkPQQIwqSaZ6KPTSOI6F8Q34/nH/GN/gOBaFfppkqmcA71BERESGEi1cIiIiIiIyTEUCQaZMrwGTPOlzGStKJKDVhUVERIYrJQlFRERERIapQDDI+VfeRk8iftLnKigqIhBUklBERGS4UpJQRERERGQYK62ooLSiYrCrISIiIkOc5iQUEREREREREREZ4ZQkHMIGYkU7ERERETkzKRYUERGR00lJwiFsIFa0ExEREZEzk2JBEREROZ2UJBQRERERERERERnhlCQUEREREREREREZ4ZQkFBERERERERERGeGUJBQRERERERERERnhlCQUEREREREREREZ4ZQkFBEREREZgpYtW8bMmTNZsGDBYFdFRERERgAlCUVEREREhqDbb7+dDRs2sGrVqsGuioiIiIwA7mBXQI7NGANALBYb5JrIcJHNZunu7iYWixEIBAa7OiInTG1ZhotT3Zb3xxD7Ywo5sygWlIGmfz9luFBbluFiqMSCShKeAeLxOADjxo0b5JqIiIjImSwej1NSUjLY1ZDjpFhQREREBsKxYkHL6JHykOf7Pg0NDRQVFWFZ1im5xoIFCwZ1KMupuv5AnfdEz3O8x/W3fH/KHa1MLBZj3Lhx1NfXU1xc3O/6nQnUlk/NOdSWTz+15YE/x6lqx/0tO5ht2RhDPB6npqYG29ZsM2caxYKDe1595pxZBrMtn8prqy2rLQ+Xa6stD+1YUD0JzwC2bVNbW3tKr+E4zqB+qJ6q6w/UeU/0PMd7XH/L96dcf8oUFxcPu39M1ZZPzTnUlk8/teWBP8epasf9LTvYbVk9CM9cigUH97z6zDmzDGZbPpXXVltWWx4u11ZbHtqxoB4lC9A7MfZwvP5AnfdEz3O8x/W3fH/KDfbfdLAM9n0P5bZ8MudQWz79Bvu+h2NbPlXtuL9lB/tvKnI0g90+9Zmjz5yBMpj3fSqvrbY88qgtD/w51JaPTcONRUagWCxGSUkJXV1dw+6Jm4wsassyXKgti8jppM8cGS7UlmW4GCptWT0JRUagUCjE17/+dUKh0GBXReSkqC3LcKG2LCKnkz5zZLhQW5bhYqi0ZfUkFBERERERERERGeHUk1BERERERERERGSEU5JQRERERERERERkhFOSUESOacuWLfzlX/4l8+fPJxAIMHHixMGuksgJefjhh7nxxhsZP348BQUFzJo1i3/+538mm80OdtVEjssjjzzC4sWLqaysJBQKMXnyZO688046OjoGu2oiMgwpFpThQrGgDAenMg50B6B+IjLMvf322/z+97/nggsuwBijL6Fyxvre977HxIkT+c53vkNVVRUvv/wyX/3qV3nzzTd54IEHBrt6Iv3W3t7OkiVL+Nu//VtKSkp46623+MY3vsG6det45plnBrt6IjLMKBaU4UKxoAwHpzIO1MIlInJMvu9j270dj//yL/+SJ554gh07dgxupUROQEtLC6NGjeqz7d577+Xuu+9m7969VFVVDVLNRE7ev//7v/MXf/EX7Ny5k/Hjxw92dURkGFEsKMOFYkEZrgYqDtRwYxE5pv1BociZ7t1BIcB5550HQENDw+mujsiAKi8vB9CQKREZcIoFZbhQLCjD1UDFgfq0FzlDbdq0iX/5l3/h1ltvZc6cObiui2VZ3Hvvvf06/qGHHmLJkiWUlZURjUY599xz+c53vqMvl3LaDXZbfuGFFwgGg0yZMuVkbkNkUNqy53mkUilWr17NN77xDa699lq1ZZERYrD//RQZKIPdlhULykAYNnGgEZEz0he+8AUDHPJzzz339PtY13XN0qVLzU033WRKS0sNYBYvXmy6u7uPeOxnP/tZM2HChAG8ExnpBqstG2PM22+/bSKRiPnc5z43ULcjI9hgtOWSkpL8dZYuXWoSicRA35aIDFGKBWW4UCwow8FwiQPVk1DkDDV79mz+5m/+hl/84hds3LiRj33sY/067rHHHuO+++6jsLCQlStX8uSTT/Kb3/yGzZs3M2fOHFasWMHdd999imsvcsBgteXW1lZuuOEGzjrrLL797W8P1O3ICDYYbfn555/npZde4v/7//4/NmzYwPvf/348zxvI2xKRIUqxoAwXigVlOBg2ceBJpxlFZEj4xCc+0a8nFQsWLDCAuffeew/Z9+KLLxrAhEIh09nZedjj9fRYTrXT0ZZjsZg5//zzzYQJE8yePXsGrO4iBztdn8v7vfrqqwYwDz300EnVW0TOTIoFZbhQLCjDwZkaB6onocgIsmfPHlatWgXAhz/84UP2L168mHHjxpFOp1m+fPnprp5Iv51MW06n01x//fXs2LGDJ598kpqamtNSZ5HDGcjP5fnz52NZFlu2bDkldRWRM59iQRkuFAvKcDAU40AlCUVGkLVr1wK9Kx9NmjTpsGXOP//8PmVFhqITbcue5/HBD36QVatWsXz5cqZPn37qKytyFAP5ufzSSy9hjGHy5MkDW0kRGTYUC8pwoVhQhoOhGAe6J3W0iJxRtm/fDsD48eOPWGbcuHF9ygJ0d3fnn1xs27aN7u5uHn74YQAWLFjAhAkTTlWVRQ7rRNvy7bffzmOPPcY999yD53m8+uqr+X0zZ86kuLj4FNVY5PBOtC2/973v5corr2TWrFmEQiHWrl3Ld7/7Xc455xxuuOGGU1pnETlzKRaU4UKxoAwHQzEOVJJQZASJx+MARKPRI5YpLCwEIBaL5bc1Nzfzp3/6p33K7X/905/+lFtvvXWAaypydCfalp944gkA7r777kMmAH7uuedYsmTJANdU5OhOtC1fcMEF/PznP88HjBMnTuS2227jzjvvJBgMnsIai8iZTLGgDBeKBWU4GIpxoJKEInJMEydOxBgz2NUQOWk7duwY7CqIDIh77rmHe+65Z7CrISIjhGJBGS4UC8pwcCrjQM1JKDKCFBUVAZBMJo9YJpFIAKirvQxpassyXKgti8jppM8cGS7UlmU4GIrtWElCkRFk4sSJANTX1x+xzP59+8uKDEVqyzJcqC2LyOmkzxwZLtSWZTgYiu1YSUKREWTevHkAtLW19Zn49GCrV68GepdQFxmq1JZluFBbFpHTSZ85MlyoLctwMBTbsZKEIiNIbW0tCxYsAODBBx88ZP+KFSuor68nFApx7bXXnu7qifSb2rIMF2rLInI66TNHhgu1ZRkOhmI7VpJQZIS56667APj2t7/NmjVr8tvb2tq47bbbAPjc5z5HSUnJoNRPpL/UlmW4UFsWkdNJnzkyXKgty3Aw1NqxZbRMlcgZac2aNfkPDYCtW7fS2tpKbW0tY8eOzW9/9NFHqa6u7nPsF77wBX7wgx8QCAS48soriUajPPPMM3R2drJo0SKefvppIpHIabsXGdnUlmW4UFsWkdNJnzkyXKgty3AwXNqxkoQiZ6jnn3+eyy+//Jjltm/ffthJTn/961+zbNky3njjDbLZLFOmTOGjH/0od9xxB8Fg8BTUWOTw1JZluFBbFpHTSZ85MlyoLctwMFzasZKEIiIiIiIiIiIiI5zmJBQRERERERERERnhlCQUEREREREREREZ4ZQkFBERERERERERGeGUJBQRERERERERERnhlCQUEREREREREREZ4ZQkFBERERERERERGeGUJBQRERERERERERnhlCQUEREREREREREZ4ZQkFBERERERERERGeGUJBQRERERERERERnhlCQUEREREREREREZ4ZQkFBERERERERERGeGUJBQRnn/+eSzLwrIs/v7v//6Q/Tt27Mjvv/XWW097/UYavd/9ew9G6vv0X//1X/n7vv/++we7OiIiMgwoFhxa9H4rFjwaxYJyKrmDXQEREZFTobOzk+9///sAzJ07lxtuuGFQ6zNQXn/99fzv8+fPH8SaiIiIiAxdigVFjp+ShCIiMix1dnbyjW98A4BPfOITwy4wDAQCzJkzZ5BrIyIiIjI0KRYUOX5KEorIMU2cOBFjzGBXY8TQ+90/I/F98n2fN954A4BZs2YRCoUGt0IiIjIijMR/cweT3u/+GYnvk2JBOdU0J6GIiMgZoq6ujkQiAWh4iYiIiMhIo1hQTjUlCUVERM4Qa9asyf+uwFBERERkZFEsKKeakoQiI8CTTz7JjTfeSHV1NeFwmPHjx3PTTTfx1FNP9ev4Y60cdrgV8bZs2cIXvvAFpk+fTjQaZcyYMSxduvSw13z55Zf58Ic/zJQpUwiHw1RVVfGnf/qnrFu3rt/3mEql+NGPfsR1113HuHHjCIfDlJSUMHv2bD7/+c9TV1d31OMPdw8NDQ3cddddzJo1i8LCQoqLi5k3bx7f/OY3icfjRz2f7/s8+OCD3HDDDUyYMIFIJEI4HGbs2LGce+65/Omf/ik//OEPaWtrO+TY412pbfPmzdx5552ce+65lJWVEQ6Hqa2t5f3vfz8/+9nP8DzvtN13KpXit7/9LZ///Oe5+OKLGTVqFIFAgKKiIqZOncrHPvYxnn766WPeU38c6X3av33SpEn5bQ888EC+7ME/zz//PLlcjpqaGizLorS0lO7u7mNeO5FIUFxcjGVZ1NbWHvU97q/u7m6+//3vc8kll1BZWUk0GuWcc87hO9/5Dj09PYAmqhYRkROjWFCx4Om6b8WCJ06xoAwJRkSGLc/zzKc//WkDHPHnC1/4gnnuuefyr7/+9a8fcp7t27fn93/iE584ZP+7j3/kkUdMYWHhEa957733GmOM8X3ffO1rXztiuUAgYP7nf/7nmPf5/PPPm7Fjxx71Ph3HMd/61reOeI5338OTTz5pysvLj3i+adOmmd27dx/2XK2trWbhwoVHrc/+n+9+97vH/X4f7J577jGu6x71GrNmzTJbtmw55fdtjDGTJk3q131ff/31Jh6PH/E8/XkPjlTm4O3H+nnuueeMMcbcfffd+W0/+clPjvqeG2PMj370o3z5r33ta8csfyzPPvusqampOWI9L7jgAtPe3m4uu+yyfHtOJpMnfV0RERneFAsqFlQsqFhQ5Hho4RKRYeyOO+7gxz/+MQCO4/CRj3yEJUuWEAqFeOONN/iP//gP7rvvPurr6wfsmmvWrOGf/umfcByHz33uc1xwwQU4jsPzzz/PT3/6U3K5HF/96ldZtGgRa9as4Zvf/CYTJkzg1ltvZcaMGSSTSX7961/z1FNPkc1mufXWW9m0aROVlZWHvd7jjz/O9ddfTzabxbZtrr76at7znvcwduxYUqkUq1ev5j//8z/p6urirrvuAuArX/nKUe/hjTfe4Hvf+17++osXL6aoqIhNmzbxwx/+kL1791JXV8ef//mfH/Zp+Gc+8xleffVVAMaNG8cHP/hBpk6dSllZGclkks2bN/PKK6/w4osvntR7fffdd3PvvfcCYFkWf/Inf8LSpUvzdf3pT3/Kzp07efvtt/Pvd01NzSm7b+h9AlpaWsoVV1zBvHnzmDBhAgUFBcRiMd58801+9atf0djYyG9/+1s++clP8utf//qk3oPDGT16NI8++ijNzc189rOfBeDyyy/n85///CFlZ8+eDfT+zb71rW/heR73338/f/7nf37Ua/zoRz8Cev+/+vSnP31S9f3973/PTTfdRDabBeCqq67iuuuuY8yYMezcuZP777+f1157jS9+8YusXbsWgBkzZlBQUHBS1xURkeFPsaBiQcWCigVFjstgZylF5NRYsWKFsSzLACYajZoXX3zxkDINDQ1mxowZfZ5SnezTY8BMnDjRbNu27ZByDzzwQL7M7NmzTSgUMu973/tMd3f3IWU//vGP58t+5zvfOew9NjQ05J90jh492rzyyiuHLbd7924ze/bs/FO3jRs3HvMeampqzPr16w8p19jYaGpra/PlXn/99T77m5qajG3bBjAXX3yx6enpOWydjDGmubnZbNiw4ZDt/Xly+uqrr+avEw6HzeOPP35ImUQiYa6++ur8ua655ppTdt/7LV++3GQymSPeczKZNDfccEP+PIdrl/19D45V5niewhtjzPXXX58v/+abbx6x3OrVq/PlrrvuumOe92g2bNhgioqKDGBKS0vNk08+eUiZeDxuzjnnnD5/p4997GMndV0RERn+FAseoFhQsaBiQZH+0ZyEIsPUP//zP2OMAeCf/umfWLx48SFlqqur+dWvfoXjOAN67V/84hd95gDZ7+Mf/zhTp04FYP369ZSUlPDggw8SiUQOKXvvvfdiWRYATzzxxGGv893vfpf29nYAHn74YRYuXHjYcmPHjuWhhx7CcRw8z+O+++475j38/Oc/Z9asWYdsHzNmDH/3d3+Xf/3444/32b9t2zZ83wfgIx/5COFw+IjXGDVqFGefffYx63I43/nOd/LX+eY3v8nVV199SJloNMp///d/M2bMmHxdjzW3z4ne937XXHMNgUDgiOcvKCjggQceIBqNAr3zwwwVt912W/73+++//4jl9j85BvJPp0+EMYaPfOQjxONxbNvmscceY+nSpYeUKyws5Lvf/W6fbZqDRkREjkWx4AGKBRUL9odiQREtXCIyLKXTaf7whz8AUFJSctQu8Oecc85h/zE6UfPnz+fiiy8+4v5Fixblf//4xz9OcXHxYcuNGzeOCRMmALBhw4ZD9htj+M///E8ALrroIi655JKj1mvGjBlccMEFQO/k3Uczd+5cLr/88iPuv+qqq/K/r1+/vs++/QEP9J1YeCAd/PctLCzsE9C8W0lJSZ/9jzzyyBHLnsx9H4/i4mLmzJkDkB+KMxRcddVVnHXWWUBvgLx/guiDxeNxfvnLXwK9bfSaa6454es99NBD+SEjt912G5dddtkRy1522WXY9oF/ss8777wTvq6IiAx/igUPpViwl2LBI1MsKAKak1BkGFq3bh2ZTAboDcRCodBRy1955ZVHfBp4vI70BHe//U8ygXygdrSyO3bsoKOj45B9GzZsyK8GV1ZWxmOPPXbMuu1/Sr59+3ZSqdQRn+xedNFFRz1PbW1t/vd3123mzJmMHTuWPXv28JOf/ATP8/jMZz7DwoULB+wp/bp160in00Dv3/fgYPRw3vve9/K1r30NOHogdjL3fbCOjg5+8Ytf8MQTT7B+/Xra2tpIJpP53gwH271791GveTpZlsVnP/tZ/vZv/5bOzk5+9atfHbKi4IMPPkgikQDg05/+9En9Tb/3ve8B4LouX/7yl49aNhQKMXr0aPbu3YtlWcydO/eErysiIsOfYsHDUyyoWPBoFAuKKEkoMiw1NDTkf9//NOxo+lOmvyoqKo66/+Agtb9l9wdBB9uxY0f+9+XLl7N8+fLjqCW0t7cfceLmI02M/e56AaRSqT77HMfh/vvv56abbiKdTvPAAw/wwAMPUFxczIUXXsiiRYt4z3vew8UXX5wfQnO8Ghsb879PmzbtmOUPLnPwse92Mve9329/+1s+9alP5YP2Y4nFYv0qd7p88pOf5O677yaVSnH//fcfEhjuH3riOA6f+tSnTvg6jY2NrF69GiA/ufqx7H+vpk6dSlFR0QlfW0REhj/FgsemWPBQigUVC4pouLHIMLT/6RbQr1WvjvX08Xgc3A1+IMu+W2dn5wkfC+Sfrh/OydQL4Nprr2X16tXcfPPNBINBoPcf9aeffpq///u/Z/HixUyZMoWf//znJ3T+eDye/70/f7vCwsLDHvtuJ3vfr7zyCjfffHM+KDznnHO48847+eEPf8gvf/lLHnnkER599FEeffTR/Fw3++fSGSrKy8u55ZZbgN77OXgozerVq1mzZg0A1113Xb+CuSN59tln80/TjzasZ79du3bR3d0NaA4aERE5NsWCx6ZY8FCKBRULiqgnocgwdHAgsP8fk6NJJpOnsjqnxMH3eOedd/LP//zPg1ibQ82ePZuHHnqIZDLJSy+9xKuvvsqLL77Iiy++SDqdZvv27XzsYx9j69atfP3rXz+ucx/85LA/f7uDvyicyqeOX/va18jlcgAsW7bsqPPj/MM//MMpq8fJ+qu/+qv8HEf3338/P/jBD/K/73cyk1RDb6C33/4J3I/mmWeeyf+uOWhERORYFAsOPsWCigWPRrGgDFXqSSgyDB38VGvLli3HLN+fMkPNwXOi1NfXD2JNji4ajbJ06VK+9rWv8fTTT9PS0sI999yT3/8P//AP7N2797jOWV1dnf998+bNxyxfV1eX//1Iw2pOVjab5fnnnwd6A5ejBYXQd4jQULNw4ULmzZsHwH/913/R09PTZ5LqCRMm8N73vvekrtHU1JT//XArOr7bT3/60/zvenosIiLHolhw6FAseHiKBRULytCkJKHIMHTOOefk5wx56aWXDjuPy8EOfjJ1ppg7dy4lJSUAPPfcc8e8x6GiqKiIr371q1x//fVAb0B1vKu6zZ07N//3XbFixTF7CBy8gt+FF154nDXun9bW1vyT42PNa7Rq1SpaW1tPST0OdvCQmcNNlH00f/VXfwX0DmX69a9/3WeS6s985jMnPRzHdQ905N+5c+dRy/7xj3/kxRdfzL9WYCgiIseiWHDoUiyoWBAUC8rQpSShyDAUCoW49tprAejq6uInP/nJEcuuX7+ep5566nRVbcA4jsNHPvIRoDco+b//9/8Oco2Oz6RJk/K/7w+o+isYDHLdddcBvcNHfvjDHx6xbCwW49/+7d/yr//kT/7kOGvaPwfPh3Os3gjHO6TmRB08DOl4h1F9+MMfzn/xuP/++/PDS1zX5ZOf/ORJ123ChAn5359++ukjluvs7OwznGXSpEmUlpae9PVFRGR4Uyw49CkWPPUUC4ocPyUJRYapv/7rv86vmPalL32JV1555ZAyTU1N/Nmf/Rme553u6g2Iu+66K/+P5Fe/+lW+//3vH3Xy42QyyY9//OP8UIFT4cknn+T//b//R0dHxxHLNDc385vf/Cb/+txzzz3u6/zt3/5t/gnm3Xff3ecJ8X7d3d18+MMfzq9id+2113LOOecc97X6o7i4OL9y3uuvv87DDz98SBnP87jjjjt4/PHHT0kd3q28vDwf3L3xxhvH9QQ5Go3y8Y9/HICXX345P0n1Bz7wgT5DfE7UlVdemf/9kUceyQ/POdiePXu49tpr2bRpU36b5qAREZH+Uix4KMWCigX7S7GgjFRauERkmFq0aBH/5//8H37wgx8Qj8e59NJL+ehHP8pll11GKBTijTfe4Mc//jHt7e3cdNNNPPLII4Nd5eM2duxYfv3rX/P+97+fdDrNHXfcwQ9/+ENuvPFGZs6cSWFhIfF4nO3bt7N69WqeffZZUqlUn3lgBlpjYyN33nknX/rSl1iyZAkLFy5k8uTJFBYW0tbWxptvvskvf/nLfOB4yy239Guy4ne78MILueuuu7j33ntJpVJcc8013HzzzSxdupSioiLq6ur4yU9+kp/vpaqqin//938fyFs9xBe/+MX8/DO33HILf/Znf8Zll11GWVkZW7Zs4Re/+AUbN25k9uzZhEIhXn/99VNaH+gNwB555BG2bt3KLbfcwk033URpaWn+S9MFF1xAeXn5YY/9y7/8S/7lX/6lz7aTnaR6v5kzZ/K+972PP/zhDxhjuPrqq/nUpz7F4sWLSaVSvPbaazz44IPEYjGqqqry89ZoeImIiPSXYkHFgooFFQuKHDcjIsOW53nmU5/6lAGO+POFL3zBPPfcc/nXX//61w85z/bt2/P7P/GJTxyy/1jHH+zrX/96vuxzzz131LKXXXZZvuzRrFq1ykyfPv2o97n/x3Ec8+///u8ndQ/GmHzZyy67rM/2n/3sZ/2qB2Buvvlmk0wmDzn3sd7vg33zm980ruse9TozZ840W7ZsOezxA3Xfxhjj+7755Cc/edS6zJkzx2zbtu2Yf9v+vAf9KbNu3TpTUFBwxPocTxucPHmy8X3/qOWPR0NDg5k2bdpR36/rrrvOfPOb38y/fuKJJwbs+iIiMvwpFlQsqFhQsaDI8dBwY5FhzLZtfvzjH/P444/zgQ98gNGjRxMMBqmtreXGG2/kiSee4Pvf//5gV/OknX/++WzYsIGHHnqIj370o0ydOpXi4mIcx6GkpITZs2fzoQ99iB/96Efs3r2bT3/606esLh//+Md57bXX+Na3vsX111/PtGnTKCwsxHEcioqKmDVrFp/61Kd4/vnneeihhygoKDip69199928/fbb3HHHHcyZM4eSkhKCwSA1NTW8733v46c//Snr1q1jypQpA3SHR2ZZFv/xH//Bb37zG9773vdSUVFBIBBgzJgxXHbZZfzrv/4rr732Wp85eE61c845h7Vr1/LZz36Ws88+m2g0mn9y3B9Lly7N//6Zz3zmuI49lurqal577TXuuusupk2bRigUorCwkClTpvCRj3yE5cuX87vf/Y7169fnj9HTYxEROR6KBRULKhZULChyPCxjjnOZHxERkRFi7ty5rFu3jkAgQH19PVVVVYNdJRERERE5TRQLykijnoQiIiKH8corr7Bu3ToAbrrpJgWFIiIiIiOIYkEZiZQkFBEReRdjDHfffXf+9Re/+MXBq4yIiIiInFaKBWWk0urGIiIiwFtvvcWePXvo6OjgF7/4Bc888wwA73vf+1i4cOEg105ERERETiXFgiKak1BERASAW2+9lQceeKDPttGjR7Nq1SrGjx8/SLUSERERkdNBsaCIhhuLiIj04TgOEydO5JOf/KSCQhEREZERRrGgjGTqSSgiIiIiIiIiIjLCqSehiIiIiIiIiIjICKckoYiIiIiIiIiIyAinJKGIiIiIiIiIiMgIpyShiIiIiIiIiIjICKckoYiIiIiIiIiIyAinJKGIiIiIiIiIiMgIpyShiIiIiIiIiIjICKckoYiIiIiIiIiIyAinJKGIiIiIiIiIiMgIpyShiIiIiIiIiIjICKckoYiIiIiIiIiIyAj3/wM/ISxiBwx2nwAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "stats = pd.read_csv(\"time_stats.csv\")\n", + "stats_pycma = pd.read_csv(\"time_stats_pycma.csv\")\n", + "stats = pd.concat([stats, stats_pycma])\n", + "\n", + "\n", + "f, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 6))\n", + "\n", + "n = np.arange(2, 1000)\n", + "c = 0.01\n", + "\n", + "colors = [\n", + " \"#4C72B0\", # blue\n", + " \"#55A868\", # green\n", + " \"#C44E52\", # red\n", + " \"#8172B3\", # purple\n", + " \"#CCB974\", # yellow-brown\n", + " \"#64B5CD\", # cyan\n", + " \"#937860\", # brown-gray\n", + " \"#000000\"\n", + "]\n", + "\n", + "colormap = {}\n", + "for (method, group), color in zip(stats.groupby(\"method\", sort=True), colors):\n", + " colormap[method] = color\n", + " label = method.title().replace(\"_\", \" \") if method != \"CMSA\" else method\n", + "\n", + " time_gb = group.groupby(\"dim\")['time']\n", + " marker = \"^\"\n", + " if method == \"pycma\":\n", + " marker = 'o'\n", + " plot_dat = pd.merge(\n", + " time_gb.apply(lambda x: np.exp(np.mean(np.log(x)))).rename(\"mean\"), \n", + " time_gb.std().rename(\"std\"),\n", + " on=\"dim\"\n", + " ).merge(group.groupby(\"dim\")['n_updates'].mean().rename(\"n_updates\"), on=\"dim\")\n", + "\n", + " ax1.errorbar(\n", + " plot_dat.index, plot_dat['mean'], plot_dat['std'],\n", + " label=label, \n", + " marker=marker,\n", + " markersize=13,\n", + " linestyle='dashed',\n", + " alpha=.8,\n", + " linewidth=2,\n", + " color=color\n", + " )\n", + "\n", + " ax2.plot(\n", + " plot_dat.index, plot_dat['mean'] / plot_dat['n_updates'], \n", + " label=label, \n", + " marker=marker,\n", + " markersize=13,\n", + " linestyle='dashed',\n", + " alpha=.8,\n", + " linewidth=2,\n", + " color=color\n", + " )\n", + "\n", + "for ax in ax1, ax2:\n", + " ax.grid(which=\"major\")\n", + " ax.set_yscale(\"log\", base=10)\n", + " ax.set_xscale(\"log\", base=10)\n", + " ax.tick_params(axis='both', which='major', labelsize=16)\n", + " ax.legend(fontsize=16, ncol=1, fancybox=True, shadow=True) \n", + " ax.set_xlabel(r\"dimensionality $d$\", fontsize=22)\n", + "\n", + "ax1.set_ylabel(\"Time total [s]\", fontsize=22)\n", + "ax2.set_ylabel(\"Time per update [s]\", fontsize=22)\n", + "plt.tight_layout()\n", + "plt.savefig(\"time_matrix_adaptation.png\", dpi=500)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "43bac3ed", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from iohinspector import DataManager\n", + "\n", + "data_path = \"/home/jacob/code/ModularCMAES/data\"\n", + "data_folders = [os.path.join(data_path, folder) for folder in os.listdir(data_path)]\n", + "manager = DataManager()\n", + "for folder in data_folders:\n", + " if os.path.isdir(folder):\n", + " manager.add_folder(folder)\n", + "\n", + "\n", + "completed = manager.overview.group_by([\"algorithm_name\", \"function_id\", \"dimension\"]).len().filter(len=100)\n", + "completed_overview = manager.overview.join(\n", + " completed,\n", + " on=[\"algorithm_name\", \"function_id\", \"dimension\"],\n", + " how=\"inner\"\n", + ")\n", + "\n", + "def ert(runs, target = 1e-8):\n", + " total_evals = 0\n", + " n_suc = 0\n", + " for row in runs.iter_rows(named=True):\n", + " total_evals += row['evals']\n", + " if row['best_y'] <= target:\n", + " n_suc += 1\n", + "\n", + " if n_suc <= 0:\n", + " return float(\"inf\")\n", + " return total_evals / n_suc" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "4b14ed26", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABLIAAATYCAYAAAALc+7FAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs3Xl4VOXZx/HvmZmsE5IAYQkaNgEpsrlAQC1aFbFYrbXS2kqVSsUltSoqCAqKVaniLmMti4i21IUXrVqtoqIEJBAVFEUFBWSRnYRkskxmOe8fh5lsk2QSskzg97muXMycOcszwzxnztxzP/djmKZpIiIiIiIiIiIiEuVsLd0AERERERERERGRSCiQJSIiIiIiIiIirYICWSIiIiIiIiIi0iookCUiIiIiIiIiIq2CAlkiIiIiIiIiItIqKJAlIiIiIiIiIiKtggJZIiIiIiIiIiLSKiiQJSIiIiIiIiIirYICWc1owoQJpKenk5yczIABA3jjjTdaukkiIiIiIiIiIq2GYZqm2dKNOFZ888039OjRg7i4OHJzcznvvPPYvHkz7du3b+mmiYiIiIiIiIhEPWVkNaO+ffsSFxcHgGEYlJWVsXPnzhZulYiIiIiIiIhI66BAVg3cbjd33303F1xwAe3atcMwDJ577rmw63o8HiZPnkyXLl1ISEggMzOTpUuXhl33hhtuICEhgSFDhnDOOecwYMCAJnwWIiIiIiIiIiJHDwWyarB//37uvfdevv76awYNGlTruuPGjePRRx/liiuu4IknnsButzN69GhWrFhRbd2nn34at9vNe++9x/nnn49hGE31FEREREREREREjiqqkVUDj8dDXl4enTt35pNPPmHIkCEsWLCAcePGVVpvzZo1ZGZmMmvWLG677TYASktL6d+/Px07duTjjz+u8RgXXXQR119/PaNHj27KpyIiIiIiIiIiclRQRlYN4uLi6Ny5c53rLV68GLvdzoQJE0LL4uPjGT9+PKtWrWL79u01buvz+fjuu+8apb0iIiIiIiIiIkc7BbKO0Nq1a+nTpw/JycmVlg8dOhSAdevWAXDo0CEWLVqE2+3G5/PxyiuvsGzZMkaMGNHcTRYRERERERERaZUcLd2A1m7Xrl2kp6dXWx5c9uOPPwLWLIVz587lhhtuwDRNevXqxaJFixg8eHCN+/Z4PHg8ntD9QCDAvn37iI2NRSNC6ycmJoakpCTVJGslTNOksLCQLl26YLNFR7w9EAjw448/YrfbK/VLaRwJCQkkJCS0dDOE6Ox/YPXBYJazPgObXlJSErGxsS3djGNSNPbBQCDAzp07sdvtlJWVtXRzpJWJi4vD6XS2dDMi0hT9z+PxcP311/Pee++Rn59Pv379eOyxxxg+fHjE+wgEAmzbtg3DMPQZ2MoYhkFycjJ2u72lm9IqRNoHFcg6QiUlJcTFxVVbHh8fH3ocIDk5mWXLltVr3zNnzmTGjBmVlsXFxdG+fXsFZOrJ6/Wyd+/elm6G1NP27ds5/vjjW7oZgBWUzsjIoH379qH+LY2npKSEgwcPtnQzpIJo6n8AX3/9NaeddlpoJmFpWocOHcLtdrd0M45p0dQHd+7cSdeuXUlLSwt73StSG4/Hw/79+1u6GfXSmP3P5/PRvXt3VqxYwfHHH8/LL7/MRRddxNatW0lKSopoH1999RVDhw7VZ2ArZJomBw4c0A/h9VRXH1Qg6wglJCSEfVOWlpaGHm+oKVOmMHHixND9N954g8cffxyXy0WbNm2qrR/8tTojI6PRf8Fryn03xzE3b95MdnY2V111FR07djzitnm9XpYtW8bPfvYzYmJijnh/jS2a2xdJ2woLC+nRo0fY93lLiY2NpV27djz11FOccsopYdcJBAL88MMPdOvWrUn6YFPtu6WPaZoma9eu5csvv+T222+P6Fher5d3332X888/Pyrf49HaNqi7fQUFBWRkZERV/wNCswi7XK6wF/7qf4177Pfff59AIFBtkhto/e/xlhRJ26KxD9psNtLS0nC5XDXO5q0+2HxaW9s2bNhAbm4uf/nLX0hMTGzR9rXEZ6DT6WT69Omh+5dffjkTJ07k22+/5dRTT41oH19++SWnnHIKs2fPDvsaqv81n/q2raysjFdffZUBAwZw7rnnNnn7jpXPQAWyjlB6ejo7d+6stnzXrl0AdOnSpcH7jouLq/SrV0xMDKmpqQwaNCjsr2F+vx+73U7v3r0bPXWxKffdHMfs0KEDubm5OJ1O2rdvf8Rt83q9JCYm0r59+6g7QUB0ty+StgWXR9MvTmVlZcTFxTFo0CBOPPHEsOv4/X4Mw2iyPthU+46GY/r9fr799lsSEhIi+rU/+D5KTk6O2vd4NLYNIm9fNPU/sPpgu3bt6N+/f42fgep/jWf79u1s3LixWg1QOHre4y2hPm2Lpj7o9XqJj4/XZ2CUaG1ti42N5fPPPycuLi7sOaU5RdoHZ86cybp161izZg15eXlhZ68HK9ts+vTpvPDCC+Tl5TFw4EDuu+8+Ro4cWeO+N23axMGDB+nVq1fE7S4rKyMtLY1+/frpM7CFNaRtOTk5OByOZnn/HyufgdEV3myFBg8ezMaNGykoKKi0fPXq1aHHRUREREREpHV48MEHQ5nAtRk3bhyPPvooV1xxBU888QR2u53Ro0ezYsWKsOuXlJQwduxYpkyZQkpKSlM0XeSYoEDWEbrsssvw+/3MmTMntMzj8bBgwQIyMzPJyMho8jY89dRTdOvWjbi4OB566KGItztw4AAdO3Zk69atjdqes88+m5tvvrlR91kfl19+OY888kiLHV+OPc3dByv2sar9rSX6X13HrPq4+qg0pmj7DGxMV155ZaUSA5FoyDlAfVKOxNHYB88+++x6973WoCHnh/pu8/vf/54FCxbUr2FRaOPGjfzwww/MmjWrxnXWrFnDiy++yMyZM5k1axYTJkzggw8+oFu3bkyaNKna+l6vlzFjxtCrV69KQw2PRHP2v3POOadFv+PVV0tcE+vztPloaGEtZs+eTX5+fmjmwTfeeIMdO3YAcOONN5KSkkJmZiZjxoxhypQp7N27l169erFw4UK2bt3K/Pnzm7yNn3/+ORMnTuQ///kPAwcOZN++fRFve//99/PLX/6S7t27h5bt27eP6dOn89///pc9e/bQtm1bBg0axJ133tkotaWaw1133cWIESP405/+pF86pMk1dh8cN24cCxcurLbu+eefzxNPPFHnPpcsWdLsacT1Pab6qDSWpux/DoeD448/njFjxnD33Xc3WpvPPvtsBg8ezOOPP17nuk8++SR9+/at1/4bcg5Qn5SGaurPwHbt2jFkyBBmzpzZKme2remccu+99zbaxDH1Oac05PxQ322mTp3KWWedxeTJk2nXrl29jhVNOnXqVOc6ixcvxm63M2HChNCy+Ph4xo8fz9SpU0O1fsGqq/SHP/wBwzBYuHBhowwdbor+l5+fz2uvvRZ2m8WLF0fNhEc1fWedPn06Z5xxRou1S5+nzUeBrFo8/PDD/PDDD6H7S5YsYcmSJQCMHTs29OZ8/vnnmTZtWqWx0W+++SYjRoxo8ja++eabDB06lNGjR+P3+yOeYai4uJj58+fzzjvvVFr+61//mrKyMhYuXEjPnj3Zs2cP77//PgcPHmzxQFZZWVlEU4H379+fE044gX/+859kZWU1Q8vkWNbYfRDgggsuqPZrpsPhiGjGn5a4aKzvMdVHpbE0Zf/zer18+umnXHXVVZimyfjx4xu7+bUqKysjNTW13gWHG3IOUJ+Uhmrqz8Ddu3dz11138ctf/pJ33323UdveXMKdUwzD4MEHH2y2NgSvoRtyfmjIZ3xGRgb/+te/uPHGG+t9vNZk7dq19OnTp1rdo6FDhwKwbt26UCDr2muvZdeuXbzzzjs4HHV/Bfd4PJUmFCspKQGs+kx+vx+A119/nSFDhjBq1CgCgQBut5tAIFDnvoP976233grtC6zJd0zTrLQMCO0zNTUVm81W7fGmEDxm1ecTfC9feumllJWV8eyzz4a+s37wwQfs27cv1L6ank9TtQ3gJz/5CSeccALPP/88N9xwQ6VtfD4fXq+3UdsSTvAYzXGs+oqkbZG2W4GsWkSaahkfH8+sWbNqTT1tCr169eL7778HrGJoY8eO5c4774xo27feeou4uDiGDRsWWpafn092djYffvghZ511FgDdunVj6NCh+P1+Nm3aBFi//PTv3x+AF154gZiYGK6//nruvffe0K8LgUCASZMmMW/ePGJjY7nuuuu45557QscKBAI8+OCDzJkzh927d9OnTx+mTZvGZZddFlrnnHPO4fjjjyctLY1//etfDBgwgPfff7/O7QAuuugiXnzxRV2QS5Nq7D4YFBcXR+fOnSst8/v9EQWyqv4yG0l/Xbx4MTNmzOC7774jMTGRk08+mf/85z/Ex8dTVlbGzTffzEsvvURBQQGnnXYajz32GEOGDAl7zKKiIq6//nqWLFlCmzZtuO2228K2U31UjlRz9L+MjAzOO+883n///VAg63//+x/33XcfX375JXa7neHDh/PEE09wwgknhPZRU5/Kysrio48+4qOPPgplWG7ZsoXu3buH+qrD4eCf//wn/fv3p7i4OLR/sGZ0ve6663jttddITk5m0qRJ/Oc//6nW56veHzhwIPHx8TV+JoP6pNRfc/TBzp07c8cdd/DTn/6UgwcPAtC9e3duvvnmSkOGBg8ezCWXXBJ6X0dynRkIBHj44YeZM2cO27dvp1OnTlx77bWh5xAIBJg1axavvvpqg69lqz6f4Dll6dKloUBWQ84pgwcPZtasWVx99dVhzynjxo2rdD4ZMGAAy5Ytq3Z+aKpzytlnn81LL7101Aeydu3aRXp6erXlwWXBUT0//PAD8+bNIz4+nrS0tNB6b7/9Nj/96U/D7nvmzJnMmDGj0rKLL76Y77//npiYGEaNGsW2bdsA68fOiy++mAcffDDUJ2sTDKa1b98+9P0OrNniioqKKi2r6IwzzqBv375MnTqVK6+8khNPPJHY2Fj+7//+j5iYGH7729/y5z//ObR+IBBg3rx5vPzyy+zfv5/u3btz/fXXM2rUqNA62dnZPPPMM2zatAmbzcbgwYOZOnUqXbt2DR0zWFT9jTfeoE+fPjz11FOsWLGChQsXcvzxx1NWVkbbtm359a9/DRBqf0lJCXl5eaH7ZWVlzJo1i7feegu3203//v254447GDBgAGAN5+/duzdgBQkdDgeXX345f/nLXyp9x63rOQ0bNoyFCxdWKvi/a9cutm3b1qwzMC5durTZjlVftbWtuLg4on0okNWKffzxxwwfPpzrr7+esWPHkpCQEJotsS7Z2dnVpntNSkoiKSmJ1157jWHDhtU6c9jChQsZP348a9as4ZNPPmHChAl07dqVa665JvT4xIkTWb16NatWrWLcuHGcccYZoQ49c+ZM/vnPf/LMM8/Qu3dvli9fztixY+nQoUMoiAbw2muvcf3117Ny5cp6bTd06FDuv/9+PB5PRDOgiTREY/fBplJbf921axe/+93veOihh/jVr35FYWEh2dnZmKYJWJmp77//PgsXLqRbt2489NBDjBo1iu+++y7sr7S33347H330Ef/5z3/o2LEjU6dO5bPPPqs28YX6qByp5uh/X375JR9//HHoghqgqKiIiRMnMnDgQNxuN9OnT+dXv/oV69atw2az1dqnnnjiCTZu3Ej//v259957AWtW3aCFCxeGPvP8fn+1GbImTpzIypUref311+nUqRPTp08P27+qquszGdQnpf6aow+63W7++c9/0qtXL1JTUyNuWyTXi1OmTGHu3Lk89thjnHnmmezatYtvvvkmtI/nn3+eK6+8ko8//pg1a9Y0+Fq2ouA5pVu3bqFlDTmnfPTRR5imyWOPPcamTZvCnlMqnk9q0lTnlIEDBzJnzpyj/nxSUlIS9vkFh98Fs6i6desWuq6K1JQpUyrVaVu0aBFvv/02J5xwAnFxceTk5HDmmWdy7bXXcsUVV5CYmMiePXs44YQT6gyWPP300wwZMiQUtAlKTk4mEAhUWx4IBPj+++9JSEigbdu29O7dm4SEBF5//XVuvvlmcnJyyMnJ4eqrr+YXv/hF6H3wwAMP8PbbbzNnzhx69+5NdnY2N9xwAwMGDAj1kfXr1zNlyhQGDBiA2+3mnnvu4bbbbiM3N5ctW7aEjnPttdeG3ssnnHACSUlJfPrpp1x22WU1vscqthfglltu4YMPPghd0z788MNce+21fPvtt7Rr1y50rKuvvprVq1fz6aefct111zF48GD+9Kc/hZ7TW2+9xT333MNPf/pTVq5cWe05XXDBBcyZM4euXbuG2paenk7Hjh0ZPXp03f/5R8jr9bJ06VJGjhwZlbMW1tW2qpPo1USBrFbA5XLhcrk4dOgQ/fr1Cy1PSkpi69atnHnmmXTu3JlLLrmEZcuWcd555/F///d/te7zhx9+oEuXLpWWORwOnnvuOa655hqeeeYZTjnlFM466ywuv/xyTjrppErrZmRk8Nhjj2EYBieeeCLr16/nscceCwWyBg4cGKop0rt3b2bPns3777/PyJEj8Xg8PPDAA7z33nsMHz4cgJ49e7JixQr+8Y9/VPrw79atGw8++CB2u71e23Xp0oWysjJ2795d6WJBpDFV7INer5ff/va37Ny5k8TERKZNm8aYMWNq3DZcHwx68803SUpKqrTsjjvuqPYrb6Rq66+7du3C5/Nx6aWXhvpK8JepgoICXnzxRZ599ll+/vOfAzB37lyWLl3K/Pnzuf322ysdx+12M3/+fP75z39y7rnnAoR+LatKfVSOVMX+Fx8fz7nnnktRURF2u52bbrop9HkUTiT9z+fz4fF4sNlsPPnkk6HHg7/4Bj377LN06NCBDRs20L9//1r7FFjT0CcmJlbLugTr8zJYrLfqUIjCwkIWLlzIokWLQv1rwYIFNT6Pimr7TA5Sn5T6qnod6vf7KSkpoWfPnowZM4aHH364xm0j/QwsKioiPT2d//znPxFnMkRyvVhYWMgTTzzB7NmzueqqqwDry/GZZ54Z2s+AAQPIysqid+/e9O3bt8HXsuHOKbNnzw493pBzSr9+/di0aRMpKSk1nlMqnk/CacpzSseOHY+J80lCQkKl4X9BpaWloccbKi4urlKAJrgvu92O3W4nJSWFrVu3MmLECI477ji6d+9OXFxcKHizbNmyGve9bds2jjvuOOx2e6XlhmFgGEa15VXXsdvtGIbBwIEDQ1ljffv25emnn+bDDz/kggsuwOPx8Le//a1SH+nduzcff/wx8+bN45xzzgGodq28YMECOnTowDfffBN6/r179652Pgl+Z50zZ06l76wDBw4M+3yKiop45plneO655/jFL34BwLx58+jevTvPPfcct99+O4ZhkJGRweOPP45hGPTr14+vvvqKJ554gmuvvTb0nN555x3S0tLo1asXJ554YrXnlJGRQVlZGfv27Qu9/202Gw6Ho1kDSzExMVEXyAqqrW2RtlmBrFYgKyuLrKwsXn75ZebOnRta/sUXXwDlF8g33ngjo0aN4r333qtznyUlJWGL9f3617/mwgsvJDs7m5ycHN5++20eeugh5syZw+mnnx5ab9iwYZWKFA4fPpxHHnkkdOFd8SQCVhR67969AHz33XcUFxdXuoAGK93z5JNPrrSsYgCtPtsFT/aRpiaKNETFPlhQUMCjjz6K0+mkTZs2oZohTqcz7LY19UGAn/3sZ/z973+vtCwlJYUDBw40qJ219ddBgwZx7rnnMmDAAEaNGsX555/PZZddRtu2bfn+++/xer2V+n5MTAxDhw7l66+/rnac77//nrKyMjIzM0PL2rVrx4knnlhtXfVROVIV+198fDzLli1j586ddOnShUGDBnHppZfSvn37sNtG0v+Kiop47LHHcDgcXHrppaGhCZs2bWL69OmsXr2a/fv3h2pkbNu2jf79+9fap+pSW4bK5s2b8Xq9odorYJ0XwvWvqmr7TA5Sn5T6qnodCvCPf/yj0mdATSL9DMzLy+Ppp5/mF7/4BYsWLaqWKRJOJNeLX3/9NR6PJxTACaexrmXDnVMqBq8ack751a9+VefrUFfGW1OeU4IBiKP9fJKens7OnTurLQ9mJkYSFGyocP1v0aJFDB48uNZAFNTe/+qjMfpIbe//YH8P916u6TvrvHnzqmUzA6Fr2oqF4MNd09Z2zRx8ThdccAGmaYbWq/qc9HnaPBTIasXWrVtHr169Ql+Uzz777LAn03DS0tLIy8sL+1h8fDwjR45k5MiRTJs2jT/96U/MmDEjbEHOmlSNpBqGEToxBQuB/ve//+W4446rtF7V1NCKv2TUZ7tgHYWKQzZEGlvFPuh0OunYsSObNm2ic+fOpKWlcfDgwRoDWbX1QafTSa9evSot8/v9DQ5k1cZut7N06VI+/vhj3n33XZ566inuvPNOVq9e3ejHqkh9VI5U1c/AxMREwMrGCBZ4rUmk/e/ZZ59l0KBBPPvss6E6JhdddBHdunVj7ty5dOnShUAgQP/+/SkrKwNq71M9evSo9TnVdL44UrV9JgepT0p9Ve2DmzZtYvPmzVx++eVs2LCh1m3r8xk4b948UlJSeOWVVzj77LOx2WzV+nfF4sCRXC9GkinTWNey4c4p8+fPD9Xda+g5pa7AXlOdT6Duc8qhQ4eAo/98MnjwYJYtW0ZBQUGlgu/Ba6i6hmgeiar9rz5q63/10Rh9pKb3f8U+XdNzDPed9e677w4byGoMwef0+uuv4/V66d69eyhoWPE56fO0eTRftTFpdOvWrWPQoEEN2vbkk0+u8yIjqF+/fhQVFVVaVvVLbk5OTqgQXyT7i4uLY9u2bfTq1avSX3BmjyPd7ssvvwwVihdpKjX1wU8//RS/31/r+7k+ffBI1dVfDcPgjDPOYMaMGaxdu5bY2FheffVVTjjhBGJiYvj4449D23q9XnJzcysNcw4Krl/xeHl5eWzcuLHauuqjcqSq9r/8/HwuueQSunXrxu23317reyvS/mez2Zg6dSrTp0+ntLSUAwcO8O2333LXXXdx7rnn8pOf/CTsl4Ga+hRYQwsbMoNSz549iYmJITc3N7Ts0KFDYftXQ6hPSn1V7YOTJk3illtuiWjb+nwGGoaBzWYLDeHq0KFDpVpcBQUFbNmyJXQ/kuvFYI2f999/P6I2VNXQa9ngOeWuu+6ipKTkiM4pwREY0XhO2bRp0zFxPrnsssvw+/3MmTMntMzj8bBgwQIyMzNrfS8cqar9zzAMrrzySoYNG8a//vWvWrdtjmvQSPpIpO//+hyz6nfWoBNOOIHY2NhKNePCXdPWds0cfE7bt2+nW7duNfZ7fZ42D2VktWLr1q3j4osvbtC2o0aNYsqUKeTl5YWGOxw4cIAxY8Zw9dVXM3DgQNq0acMnn3zCQw89VO0427ZtY+LEiVx77bV89tlnPPXUUzzyyCMRHTs4k9ktt9xCIBDgzDPP5NChQ6xcuZLk5ORQrYIj2S47O5vzzz+/Qa+NSKTC9cH8/HzGjx9faRhwOOH6YJDH42H37t2VllVMc66v2vrr6tWref/99zn//PPp2LEjq1evZt++ffzkJz/B6XRy+eWXM3nyZNLS0ujatSsPPfQQxcXFoV+SK0pKSmL8+PHcfvvttG/fno4dO3LnnXeGrWuiPipHqmr/S01NDc28NWbMGC677DI6deoUdtva+l9VY8aM4fbbb2fRokXcf//9tG/fnjlz5pCens62bdu44447Kq1fW58Ca8a11atXs3XrVpKSkmjXrl1EtX/atGnDVVddxe233067du3o2LEjd999Nzab7YjOD0Hqk1JfFfvgf/7zH3r37k2PHj0iKvge6WdgXl4es2fPxu1287Of/QywZrV+7rnnuOiii0hNTWX69OmVfkiN5HoxPj6eyZMnM2nSJGJjYznjjDPYt28fX331VdjPt6oaei0L5ecUl8vFxIkTG3xO6dmzJxD+nBKJpjynfPrpp9WGlLU2c+bMobS0NDTz4BtvvMGOHTsAq5xLSkoKmZmZjBkzhilTprB371569erFwoUL2bp1K/Pnz2/S9lX9DPzoo48oLi4mKSmJUaNGMWDAgGpD/4Jq63+HDh1i3bp1lZbVZ6KFoEj6SNu2bet8/4dT23fWX/7yl2G3cTqdXH/99aH3e03XtLVdMwef06233sptt93GpZdeitvtrtbv9XnaPBTIaqUCgQDr169n2rRpDdp+wIABnHLKKbz88stce+21gPUlNDMzk8ceeyw0jjgjI4NrrrmGyZMnh07eYE1PWlJSwtChQ0OFdSdMmBDx8f/617/SoUMHZs6cyebNm0lNTeWUU05h6tSpR7xdaWkpr732Gv/73//q+aqIRC5cH/R4PPz5z39m0qRJlepKhROuDwb973//qzad84knnshrr73WoLbW1l+Tk5NZvnw5jz/+OAUFBXTr1o1HHnmEn//85/j9fm699VZSUlL4wx/+QGFhIaeddhrvvPNOjV/+Z82ahdvt5qKLLqJNmzbceuutoSEGQeqjcqRq+wzs1KkTgwYNIjs7u8YJEmrrf1U5HA5uuOEGHnnkEe666y5efPFF/vKXv9C/f39OPPFEnnzySc4+++zQ+rX1KYDbbruNq666in79+lFSUsKWLVvo3r17RM/70Ucf5brrruMXv/gFycnJTJo0ie3btx9xrRP1Samvqn0wJyeHl19+mRdffBGPx4PX6yU5OZnp06eH3T7Sz8A2bdrQt29fXnrppVAtoClTprBlyxZ+8YtfkJKSwl//+tdKGVkQ2fXitGnTcDgcTJ8+nR9//JH09HSuu+66iF+Dhl7LOhwO/vznP/PQQw9x/fXXN+icMmvWLEaMGAGEP6dEqinOKaWlpbz//vu8/fbbDd5HNHjqqafYtm1b6P6SJUtYsmQJAGPHjiUlJQWwZrecNm0aL7zwAnl5eQwcOJA333wz9P/TFMJ9Bh533HFs2rSJ9PR0Ro8ezWeffVZjIKu2/vfhhx9Wq/N29dVXV5vgJxJ19RGbzVbn+z+c2r6z1tb//va3vxEIBGq9pq3rO+5f//pX2rdvz1NPPcX06dOrPSd9njYjU1qNl156yTzvvPPM0tLSsI/7fD5z4cKF5qWXXhrR/t58803zJz/5ien3++tc1+fzmV9//bXp8/nMs846y7zpppvq0/QGqXjM+nj66afNkSNHVlq2b98+8+677zZ/+OGHRmlbWVmZ+dprr5llZWWNsr/GFs3ti6Rthw4dMgHz0KFDzdiy2m3fvt1MT083v/rqq7CPBwIB87e//a2ZlZUV8Xu2oX2wPo6kvzb0mHUJ10e//PJL8+67767x/FZVa3+Pt6S62heN/c80TfNf//qX+fOf/zzse2T37t1mXl6e+fXXX5sHDhwwTzrpJPOLL76odX/N0f+ORCTHdLvdZkpKijlv3rwjOla4PvnOO++YTz31VNj1W/t7vCW11s/ALVu2mMcff7z5zTff1LhO8D07f/5889Zbb61zn0dDH2wpTdW2xjinzJ492zz99NMrtW3z5s3m3XffbR48eLAxmnlEWutn4HPPPWdefPHFYT8D3W536DMwPz/fPOWUU8w1a9bUuj/1v+oivWaurW3hPk9N0zTnzJljvv76643RzDodK5+Bysg6ipx//vmsXbuW0tJSjj/+eF555ZXQdKfhXHjhhWzatImdO3c26Rju5hYTE8NTTz3V0s2QY8zKlSt5+eWXOfHEE0Ozq7zwwguVZpOp6mjtg3VRH5XG9sMPPzBhwgRKS0uJjY3lxhtvrLXvQevsf2vXruWbb75h6NChHDp0iHvvvRegxqEUkVKflJbQGvvg0aYpzikxMTHcddddjdXEY57L5cLlcpGfn8+QIUPCrrNnzx5+9atf4fF4cDgcXHPNNTWuG6T+1zT0edp8FMg6irz77rts2rQp4qLrADfffHPTNqoF/OlPf2rpJsgx6Mwzz8Tr9aoPRkB9VBrb0KFD+fTTT4+J/vfwww/z7bffEhsby6mnnkp2dvYRF5RVnzx2rFq1ijPOOIN77723yYINV1111VHdB482jX1OGT9+PJs2bWrEFh7bsrKyyMrKYuHChaGhjVX17NmTzz777Jj4DIx2+jxtPgpkSb19+OGHLd0EEYmQ+qvI0ePkk0/m008/belmSCsVCAS45ZZb6szUkGOHziki5XTN3LookNUKBFNKDx06FHbKexFpPqZptnQTjkp6XSUSep80H73WR585c+aQmZlZbQKOSOk9IdKy1AdbL/3fNT4FslqBYErpG2+8wbRp08jNzaVNmzbV1vP7/Wzbto3i4uKIU0oj1ZT7bo5jbt26FYDExMRGaJkci+Lj4ykpKeGTTz7B5/OFXUd9sOE+//xzHA4HMTExTXocab3i4+PZu3cvn3zyCUlJSdUeV/9rPKZp8u2334Z9naXpud1uAC699FI+++wz8vLyWLBgAePGjau2rsfjYfr06ZVmTLvvvvsYOXJkpfUOHDjA448/Tk5OToOGE8XHx1NcXMwnn3xCaWlp2HXUB5tPa2vbN998A0BCQkJLNq1VS0xMZPfu3Xz66ac4nc5qj6v/NZ/6ts3j8bB3715OOOGEZmjdsUOBrFYkMzOTr776irfeeovY2Nhqjwc7VdeuXZvsBNYU+26OY9rtds4555wjriMix67Y2Fjy8/P5/vvva5zaWn2w4WJjY7n00kux2WxNehxpvU4//XS++OIL/vvf/+ozsBkkJydz4YUXNvtxxQo6AWzcuJFBgwbVOtxl3LhxLF68mJtvvpnevXvz3HPPMXr0aJYtW8aZZ54ZWu/OO+/k5ptvJjU1tUFtSkxMJC8vj2+++abG+kfqg82ntbXN4XBw8cUXEx8f38Kta73OPPNMrrjiCt588019BrawhrSta9eunHHGGU3csmOLAlmtSHx8PD6fj9tvvz1sRpbX6+Xtt9/m5z//eaNnNTTlvpvjmDabTV+QpVHceuutYX8JA/XBI2G32zEMo0mPIa1TcHi93+/XZ2Azcjh0idhSOnfuDMCXX37Jxo0ba6xptWbNGl588UVmzZrFbbfdBsCVV15J//79mTRpEh9//DFgzUyXm5uLy+U64rbddttt+gyMAq2tbfqMP3JOpxOv16vPwChQ37YZhhF1wbijga5SWiG73R72AtM0zdBjjX0B2pT7jqZjitTFMIwa34/qgyKNLzi8vqCggJSUFH0GylEvLi4uovUWL16M3W5nwoQJoWXx8fGMHz+eqVOnsn37djIyMvjoo4/49ttvOe644wA4dOgQDoeD77//ngULFtSrbfoMjA5q27FLn4EtL5rbdizRKy8iIiIi0sqsXbuWPn36kJycXGn50KFDAVi3bh0ZGRlMmDCByy+/PPT4TTfdRI8ePbjjjjtq3LfH48Hj8YTuFxQUAFYmgtfrDbtNcHlNjx+Jptx3NB0zUmpbw9XVvmhtt4hUpkCWiIiIiEgrs2vXLtLT06stDy778ccfAau+VcXJbhISEkhKSqq1XtbMmTOZMWNGteXvvvtunRPnLF26NJLmN0hT7juajhkpta3hampfcXFxM7ekdhWH14tIOQWyRERERERamZKSkrDDEIMFtUtKSsJu99xzz9W57ylTpjBx4sTQ/YKCAjIyMjj//POrZYAFeb1eli5dysiRI5ukRk9T7Tuajhkpta3h6mpfMPswWlQdXi8iFgWyRERERERamYSEhErD/4JKS0tDjzdUXFxc2CBZTExMncGJSNZpqKbcdzQdM1JqW8PV1L5obrOIlNM0biIiIiIirUx6ejq7du2qtjy4rEuXLs3dJBERkWahQJaIiIiISCszePBgNm7cWG0o1OrVq0OPi4iIHI0UyBIRERGpRW6uwcyZQ8nNNVq6KSIhl112GX6/nzlz5oSWeTweFixYQGZmJhkZGS3YOhERkaajGlmtgGarEBERaRmmCbNn21i/Po3Zs20MHw6G4lnSDGbNmsWBAwcAeOONN9ixYwcAN954IykpKWRmZjJmzBimTJnC3r176dWrFwsXLmTr1q3Mnz+/JZsuIiLSpBTIagU0W4VIy1EgWeTYlpMDK1caJCT4WLnSQU4ODB/e0q2SY8F9990Xur1kyRKWLFkCwNixY0PXg88//zzTpk3jhRdeIC8vj4EDB/Lmm28yYsSIFmmziIhIc9DQQhGRWmRlZbFhwwZyc3Nbuiki0sxME1wu8HggJcWDx2PdN82WbpkcCw4dOoRpmtX+unfvHlonPj6eWbNmsWvXLkpLS1mzZg2jRo1quUaLiIg0AwWyRERERMLIyYHsbEhJMTEM69/sbGu5iIiIiLQMBbJEREREqqiYjeV0WsucTpSVJSIiItLCFMgSERE5hmgGvsgEs7FSU8uLuxuGdV9ZWSIi0hxcLhf9+vVjyJAhLd0UkaiiQJaIiMgxouoMfMoqCq9iNlZsLBQWGvh8VjRLWVkiItJcVKtVJDwFskRERI4RlWfgM5RVVIOK2VhFRbBzJ+ze7SQ/31BWloiIiEgLUyBLRETkGKAZ+CJTtTaWx1P+WFyc9WIpK0tERESk5SiQJSIicgzQDHyRqVobq2IgKzbW+ldZWSIiIiItR4EsERGRo5xm4ItMuNcpGMiy2Uzs9vJ19fqJiIiItAwFskRERI5ymoEvMlVfJ7/f+gOIiQlUWlevn4iIiEjLUCBLRETkKFYxy8jng7Ky8seUVVSutmwsqB7IAr1+IiIiIi1BgSwREZGjWMUso5QUKCw0KC52AMoqqihc1lrFQJbDUT2QpddPREREpPkpkNUKuFwu+vXrx5AhQ1q6KSIi0opUzTIyDGjf3iQ+3hdaR1lF5a9TURHY7VBaav0VFUEgYP1B+fKKf3a7td6x/PqJiIiINCcFslqBrKwsNmzYQG5ubks3RUREWpFwWUYAtgqf/soqAq8Xtm+3gnpuNxQWWn8VA1k+n4HbbYQeC/653dZ2O3ZY+xERERGRpuVo6QaIiIhI4wtmGR06BElJta/rdEJ+vrW+y9UszYsqsbHw8suQl1d5+YMPwpdfWjMW/uEPqzjrrBHExMSE3Ue7dtZ+RERERKRpKZAlIiJyFMrJgffft4a//fADdOxoBVvCqZiVdawm/3bqZP1VtGCBFRA8cMDPypVu+vaFGuJYIiIiItJMNLRQRETkKGOa8PjjcPCgNYzQNOuu3xSslTV3brM0sdUwDKtIvoiISHNTrWSR8BTIEhEROcqsWgX/+58VhDEMSEyE9u1r3yaYlfXxx83SRBFpBfQlWqRlqVaySHgKZImIiBxFTBPuvNMqVB4s8N6uXeWZ9srKbLXOwCciAvoSLSIi0Uk1skRERI4iW7bAZ59ZQalAAJKTywNVFoPSUgdGxWkMK3A6qxc9Pxbdey/s3Qs9e8K117Z0a0REREQkSIEsERGRo4TPZwVguncHvx9+9jPIyqq8jtfrY/nyVYwYEX4GPrcbNIoIPvkEfvwRvvgC/vznlm6NiIiIiAQpkCUiInKUmDcPvvzSmlmvRw948EGrPlZFXi9s3lzzDHwFBc3T1mhWUmIFscDKyKoheU1EREREWoBqZImIiBwF9u+H55+3bttscN991YNYEpmtW8tv9+zZYs0QERERkTAUyBIRETkKpKVZGVldu8J110H//uHXM3JzGTpzJoaKN9doy5by2z16tFw7RERERKQ6DS0UERE5SvTrB//8J8TH17CCaWKbPZu09euxzZ4Nw4dr3FwYmzeX31ZGloiIiEh0UUaWiIjIUSQx0RpaGFZODsbKlfgSEjBWroScnGZtW2uhjCwRERGR6KVAloiISCv1448wf741Q2GdTBNcLvB48KSkgMdj3TfNJm9naxMMZMXFQefOLdsWEREREalMgSwREZFWyO+Hu+6Cv/8drrkG9u6tY4OcHMjOxkxJAcOw/s3OVlZWFWVlsGOHdbtHj1qy20RERESkRejyrBVwuVz069ePIUOGtHRTREQkSjz7LHzxhXV7/35wOmtZuUI2VmhFp1NZWWH88AMEAtZt1ccSERERiT4KZLUCWVlZbNiwgVzNMCUiIlgBrLlzrds2G9x3Xx2BrMPZWKSmlhd3NwzrvrKyKunYEWbMgHHj4KyzWro1IiIiIlKVAlkiIiKtSFGRNaQwmDV0zTUwcGAtG1TMxjLNytlXLZCVtWrVKmw2G/fdd1+zHK++UlLgwgvhz3+Gc89t6daIiMixTCNzRMJTIEtERKQVefBBq8g7wKBBcPXVdWwQzMaKjYUdOzC2bcPw+azHmjkrKxAIcMstt+iCXEREJAIamSMSngJZIiIircQ778Bbb1m3nU7461/Bbq9lg2A2VkkJ5OVZy0pKsHs85es0Y1bWnDlzyMzM5Cc/+UmTHkdEREREjl4KZImIiLQCu3bBzJnl9++4A7p0qWOjYDZWWVn5WMQ2bfBVLKhVQ1bWAw88wAUXXEC7du0wDIPnnnsu7CE8Hg+TJ0+mS5cuJCQkkJmZydKlS6utd+DAAR5//HFmzJgR2RNuAT4frFgBO3eWv1wiIiIiEl0UyBIREWkFnn0W3G7r9gUXwM9/XscGwWys/HwrkAXgcGB27lx93TBZWQ8++CBff/01gwYNqvUw48aN49FHH+WKK67giSeewG63M3r0aFasWFFpvTvvvJObb76Z1NTUup9sC9m+HW6+GX75S7j77pZujYiIiIiE42jpBoiIiEjdbr8dEhLgww+tbKw65eTABx9YAarg+MPjjgs/FrFiVtbhOhwbN26kd+/efPLJJzXWtFqzZg0vvvgis2bN4rbbbgPgyiuvpH///kyaNImPP/4YgLVr15Kbm4vL5arns25eW7aU3+7ateXaISIiIiI1UyBLRESkFYiNhYkT4brrIDGxjpVNE554Ag4eBNvh5Ou0NGvDmupgOZ1W9tbcuQB06tSpzjYtXrwYu93OhAkTQsvi4+MZP348U6dOZfv27WRkZPDRRx/x7bffctxxxwFw6NAhHA4H33//PQsWLKjzOM1l8+by2z17tlw7RERERKRmCmSJiIi0InUGscDKxvrf/6zbhmGlcqWl1b5NMCvrcBZVJNauXUufPn1ITk6utHzo0KEArFu3joyMDCZMmMDll18eevymm26iR48e3BFRalnzqRjI6tGj5dohIiIiIjVTIEtERCRKPfss/Oxn9QyqmKZV4MnthpgYa1m7dtYQw8OP28rKoLTUCl5VZLdDUVHEh9q1axfp6enVlgeX/fjjjwAkJiaSWCECl5CQQFJSUq31sjweD54KsysWFBQA4PV68Xq91dYPLgv3WKQ2b7ZjmtbL0Lmzn+CuGmPf9dUSx4xUNLcNort9kbQtGtstIiISTRTIEhERiUJLl8LTT8P8+TB5Mlx8cYQber1QUgLJyVZQqk0bK4h1OChkAI7SUoyqQawgpxPy8iI6VElJCXFxcdWWx8fHhx4Pp6YZECuaOXNm2BkO33333UpBsarCzZgYiUAA1q37KT6fjY4di1m6NLfR9n0kWuKYkYrmtkF0t6+2thUXFzdjS0RERFofBbJERESizO7d8MAD1u2KtdojEhsLixdbwagtW6B790qZVz6vl1XLlzNixAhighlbFbndUENx96oSEhIqZU0FlZaWhh5vqClTpjBx4sTQ/YKCAjIyMjj//POrDWUEK4tl6dKljBw5MvzzqsP27eB0Wi/08OFtGD16dKPtuyFa4piRiua2QXS3L5K2BbMPRUREJDwFskRERKJIIADTp0NhoXX//POhQkwlMp06WX99+1Z/zOvFvXmz9Vi4L9L1+BKdnp7Ozp07qy3ftWsXAF26dIl4X1XFxcWFzfaKiYmpNThR1+M12bGjPN7XqxfExFSPHjZ030eiJY4ZqWhuG0R3+2prW7S2WUREJFrYWroBIiIiUm7hQvjsM+t2584wZUr1UlZhff115WrlzWDw4MFs3LixWgbJ6tWrQ4+3FpqxUERERKR1UCBLREQkSmzYAM88Y9222eCvf7VKXNXJ7bYKaY0daw0rNM0mbWfQZZddht/vZ86cOaFlHo+HBQsWkJmZSUZGRrO0ozF4PNbkjqAZC0VERESimYYWioiIRIHiYrjzTvD7rfvjxsHJJ0e48YMPwuEZAnnrLfjVr+pZWKu6OXPmUFpaGpp58I033mDHjh0A3HjjjaSkpJCZmcmYMWOYMmUKe/fupVevXixcuJCtW7cyf/78Izp+c7v2WrjmGtizBzp0aOnWiEQHl8uFy+XCHzwxiYiIRAEFskRERKLAww9bBccBTjoJJkyIcMO33oK337ZuO51w331HHMQCeOqpp9i2bVvo/pIlS1iyZAkAY8eOJSUlBYDnn3+eadOm8cILL5CXl8fAgQN58803GTFixBG3obnZbJCe3tKtEIkeWVlZZGVlUVBQEOrzIiIiLU2BLBERkRa2ZQu8+aZ1OzHRikU5IvmE3rkT/va38vt33glHUGC9ovXr14edHbCq+Ph4Zs2axaxZsxrluCIiIiIitVGNrFbA5XLRr18/hkQ4HbqIiLQuPXrAP/5hFXe//XaIqLSUz2cFroqLrfsXXmhNcXiU0WegiIiIiFSkQFYrkJWVxYYNG8jNzW3ppoiISBM5+WR4+WX4xS8i3GDuXPjyS+v2ccdZxd6PQs3xGfj663DzzfDkk+XDO0VERFqafswRCU+BLBERkSiRmAiGEcGKn30Gzz5r3bbb4YEHrI2lQT7/HFasgOefh8LClm6NiIiIRQkNIuEpkCUiItICvv7aCpwEAvXc0OeDe+4B07TuX3edVR1eGmzLlvLb3bu3WDNEREREJAIKZImIiDSzkhKrvNWTT0JWFhw6VI+NHQ64915rer1TToGrrmqydh4LTBM2b7Zud+6sxDYRERGRaKdAlogcMyZMmEB6ejrJyckMGDCAN954o6WbJMeoxx6Dbdus20VFDQieDB4MixZZQwpt+ig/EgcOgNtt3e7Ro2XbIiIiIiJ109WviBwzJk6cyNatWykoKODZZ59l7NixHDhwoKWbJceYZctgyRLrdnw83HcfxMQ0YEdt2kBaWqO27VgUzMYCBbJEREREWgMFskTkmNG3b1/i4uIAMAyDsrIydu7c2cKtkmPJvn3w17+W37/9dujaNYINvV74z38aUFBL6lKxPlbPni3XDhERERGJjAJZIhK1HnjgAS644ALatWuHYRg899xzYdfzeDxMnjyZLl26kJCQQGZmJkuXLg277g033EBCQgJDhgzhnHPOYcCAAU34DETKBQJw991QUGDdP+ccuPjiCDf++9+tCNgNN8DevU3WxmjU1FOPV8zIUiBLREREJPopkCUiUevBBx/k66+/ZtCgQbWuN27cOB599FGuuOIKnnjiCex2O6NHj2bFihXV1n366adxu9289957nH/++RiG0VTNF6lk0SJYs8a63bEj3HUXRPT2W7PGmt4QYN06yMtrqiZGpaaeelwzFoqIiIi0LgpkiUjU2rhxIz/88AOzZs2qcZ01a9bw4osvMnPmTGbNmsWECRP44IMP6NatG5MmTQq7jd1u59xzz+W9997jrbfeaqrmi4R8+y3Mnm3dNgyYMQOSkyPYMD8fpk8vv//nP8OJJzZFE49ZwUBW+/YR/p+IiIiISItSIEtEolanTp3qXGfx4sXY7XYmTJgQWhYfH8/48eNZtWoV27dvr3Fbn8/Hd9991yhtFQnKzTWYOXMoubnl6VZz5oDPZ93+wx8golFypgn33gv791v3MzPh979v/AYfwwIBuPFGGDsWLrqopVsj0roZubkMnTkTo4myJ0VERIIcLd0AEZEjsXbtWvr06UNylVSKoUOHArBu3ToyMjI4dOgQ//3vf7n44ouJj4/n1VdfZdmyZcycOTPsfj0eDx6PJ3S/4HBhI6/Xi9frDbtNcHlNjx+Jptx3NB0zUtHaNtOEJ580WL8+jSefhNNO82IYVm2sJ56w8fXXBn/6k59Imm0sWYLto4+sO6mp+O+6C/x+6+8I1PXaRdtr2pRstnrUKRORmpkmttmzSVu/Htvs2TB8eIRjp0VEROpPgSwRadV27dpFenp6teXBZT/++CNgzVI4d+5cbrjhBkzTpFevXixatIjBgweH3e/MmTOZMWNGteXvvvsuiYmJtbappkLzjaEp9x1Nx4xUtLXtm2/a8sEHp5GQYPDBByaPPfYJfftaNa0GDoS+fW0sXVr3zIOJu3dz6mOPYTucxrX+17/mYLDAViOp6bUrLi5u1OOIyDEgJwdj5Up8CQk4Vq6EnBwrmCUiItIEFMgSkVatpKSEuLi4asvj4+NDjwMkJyezbNmyiPc7ZcoUJk6cGLpfUFBARkYG559/frXsryCv18vSpUsZOXIkMTEx9XkadWrKfUfTMSMVjW0zTXjpJTs2G7RpU0BRUTKff34mt9zir19iQlkZ9quvBqfT2u9llzHsttsarZ11vXbB7EMRkYiYJrhc4PHgSUkhvqjIuj9smLKyRESkSSiQJSKtWkJCQqUhgEGlpaWhxxsiLi4ubIAsJiamzsBJJOs0VFPuO5qOGaloatuqVbByJaSkBPD7IRAwWL7c4NNPbfVLTHjhBfj+e+sLYM+eMHEi9iZ4jjW9dtHyejaHDRusIu8dO+r7tkiD5eRAdjZmSgr4/ZgpKRjZ2crKEhGRJqNi7yLSqqWnp7Nr165qy4PLunTp0txNkmNQhYQEnE4oLIzB7Ya9e+Guu6zHI/a738Gll0JsLDzwAIQJqErjmDgRLrwQfvnLlm6JSCtV9eQH1r8ej7W8Xic/ERGRyCiQJSKt2uDBg9m4cWO14VCrV68OPS7S1A4nJJCaan1/O3QoDsOwiolv2GA9HrGEBJg6FRYvhl69mqrJx7zCwvIJITt3btm2iLRaFU9+wbRGw7DuB7OyREREGpkCWSLSql122WX4/X7mzJkTWubxeFiwYAGZmZlkZGS0YOvkWFAxISE+Hn78sXyMWlpa+eP1TkxQNiEALpeLfv36MWTIkEbd75Yt5be7d2/UXYscG8JlYwUpK0tERJqQamSJSNSaM2cOpaWloZkH33jjDXbs2AHAjTfeSEpKCpmZmYwZM4YpU6awd+9eevXqxcKFC9m6dSvz589vyebLMSKYkJCSAjt2QFmZtTwuDjp1gqKi8sSEGsvFvPcenHgiKPBaTVZWFllZWRQUFJCSktJo+60YyOrZs9F2K3LsqJqNVTFgVTUrS7WyRESkESmQJSJR66mnnmLbtm2h+0uWLGHJkiUAjB07NvSl9vnnn2fatGm88MIL5OXlMXDgQN58801GjBjRIu2WY0fFhITiYjg8SSY2m8nxx5sYhoHTCfn5tUzitWkTTJsGMTEwebJVtEma3ObN5bcVyBKpp4onv7S08OvUefITERFpGA0tFJGotX79ekzTDPvXvcJYoPj4eGbNmsWuXbsoLS1lzZo1jBo1quUaLseMYEJCWZkVyAKrLlaHDiUEJ/+rtVyMxwN33gler7WDb79tzuYf0ypmZPXoUfu6Rm4uQ2fOxMjNbdpGibQW4WpjVaVaWSIi0kQUyBIREWmAYELCwYNQWmotMwzIyDCJiQlUWrfGcjGPP16eGtSnD/z5z83Sdil/2ZOSak4oAcA0sc2eTdr69dhmz1a9H5HaamNVpVpZIiLSBBTIEhGpRVMVmpbWr2JtLMMIBrGsSQerCpuYsHw5vPKKdTsuDu6/H2Jjm6v5x7TiYti927rdo0cdI55ycjBWrsSXkICxcqUyS0QiycYKUlaWiIg0AQWyRERqkZWVxYYNG8jVkCKpoGJCQqdOcPzx1iSDtSUnVEpM2LsPZswof/DWW+se3yaNZuvW8tu1vuwV/qM9KSnKLBGpTzZWkLKyRBpMP6iKhKdAViugE5iISHSpmpDQpg0kJ9e+TXligknOdc/BoUPWA2efDb/6VdM2WCrZv788+a3WQu+H/6PNw2l3ZkqKMkvk2JabG3k2VpCyskQaTD+oioSnQFYroBOYiEj0eOcdmDoViorAbrfqY1X9KyuzhV1ut0PRQQ+uj06yEhM6dLBmLNRsXs1qxAhYsQJeew1Gj65hpXCZJ8oskWPd3Lm1nvxsZWXhT4p2u7Wd+o6IiDQCR0s3QEREpLX4+GO46y745huIjwe3O9xaBqWlDoxwwamAH2dZHjvohJcYYu+91yqyJc3OZrOGhNYoXB2gqpklw4c3Q0tFosiOHVZAN8zJzwAcpaXhz31gbbdjhzVLq+oBiojIEVAgS0REJAKffw63324lE/TsCeecAxMmVE+m8np9LF++ihEjRhATE1NlL3b4fC/t/v4gsb/+PWjIeHSqmI1VdUpDpxPy863Hhw1TNp0c1VwuFy6XC7/fby1YuBB8vrDr+rxeVi1fXsO577B27RTEEhGRI6ZAloiISB02boSbbrLiGgA//zk88ICV1VOV1wubN7vp2xfCfpfrOwhGzYm8UPIxrtoX6eZQMRvL7cY4eJBYsKakjItTVpYcM7KyssjKyqKgoICUlBTo2LHmgoBeL+7Nm6n55CciItI4VCNLRESkFtu2wZ//XD6SZtgw+OtfwwexIpaaqi96EWrsOpFbt1qTRLpcVpZdNVVrYxUXQ3ExsYWFGF6vtY5qZYmIiIi0GAWyREREarB3L2RlwcGD1v2BA2HWrNpjUEZuLkNnzsQIBl727IFXXlHAI0ps3AgffQQLFsBnn4VZoWptrOLi0ENmfLx1Q7OwiYiIiLQYBbJERETCOHTIysTatcu636sXPP64NbqsRqaJbfZs0tavxzZ7Nvj91qyEDz4IN99s1VaSFrV5c/ntnj2rPBhupsJOnaBDB7xOpzXzWpCyskRERERahAJZIiK1cLlc9OvXjyEqyn1MMU2YPLk86HHccTB7ds2lYUJycjBWrsSXkICxcqUVxAqm/Xz//RGOR5TGUDGQ1aNHlQfDzVSYmIjZvj2etm0rr6usLBEREZEWoStqEZFaNHZ9HmkdDANuuAHatLEmrXv66eqT11VTIZvHk5ICRUXwj39Yy202q7BWnZEwaWpbtlj/xsZaAcqQcNlYdVFWloiIiEiz06yFIiIiYQwcCHPmWDGoSgGPmhzO5jFTUqzp6UtLrSkMi4qsYYUnn9zUTZY6+HxW8X6Abt0qjxQMm41Vl6pZWZrBUERERKTJKZAlIiJCeUJNxRhG79712DiYzdO+PfFbt0IgYP15vfCnPzV2c6UBtm+3ypZBlWGFwf+/oiJISrKCkAAFBRAXB7Gx2MrKrOVVg1x2u7Wdy2VNaRlpEExEREREGkSBLBEROeaZJjz2mBWT+MtfGhCLqJDNYxQW4igutnYWE2MFt3Jzla0TBYLDCqFKIMvrtaJcTie43dYyvx/27QPAiIvDER+PUdMbw+mEHTus/cTGNk3jRURERARQIEtERIQFC2DRIut2URFMnVqPjStmYyUlWQGNoOOOg8JCZetEiRpnLIyNhZdfhry88mUrVljTVAKBX/2KVZ07M2LECGJiYsLvvF07BbFEREREmoECWSIickx75RWrmHvQSSfVcwcVaysdOFA+RjE5GVJSrMws1VCKCrXOWNipk/UX9N//Qnw8AObIkbjz8qBvXyvLTkRERERajGYtFBGRY9b//gcPPVR+/6ab4Je/rMcOqs50d9xxkJSEPzYWMz3dWkcz20WN886Dyy+HzEzIyKhj5S++CN006x3dFBEREZGmoowsERE5Jq1YAXffXR5b+uMf4Q9/qOdOwsx0Zx5/PCX5+cQEhxFqZruocc451l+dysrg22+t2927W9l1IiIiIhIVlJElIiLHnLVrYdKk8hnsLr0UbrihnjsxTXjySWsmO6ez8mNVa2EpK6vBXC4X/fr1Y8iQIc130G++AZ/Puj1wYPMdV0RERETqpECWiIgcU779Fm6+2Uq6ATj/fLjjjgbUYV+xwqqjVFxcHvSoSdWsLIlYVlYWGzZsIDc3t/kOWmFYIf37N99xRURERKROCmSJiMgxwzTh/vutmQkBTj8dZswAW30/Df1+K4WrpMS6vX173dsoK6tF7d5duRZ/rSoGspSRJSIiIhJVFMgSEalFiwxrkiZjGPDww1bZo4EDrULv9Z6EzjStlK6NG60ZCW026Nw5soMrK6vFPP00jBoF555bR9zRNGH9eut2YiL07Nks7RMRERGRyKjYu4hILbKyssjKyqKgoICUlJSWbo40go4dYd48K/4UH9+AHTz7LCxaZGVixcRAhw7WzkpLrcdNE1tZmXW/6nhFu91KB3O5YNiwBoxnlIbassX61+223gM1KiuDAQOsrKyePa3/22AxNRERERFpcQpkiYjIUa242Io3Vcy8Sk1t4M5efdUKQpWVWUGpNm0gEIDCwtAqBuAoLcWoKUjldMKOHeD1QmxsAxsi9REIlAeyjjsO4uJqWTkuzkrVM01r6KiIiIiIRBUFskRE5KhVVga33GLFJh58EBISjmBnH3wAM2daGTo9e8LvfgeXXFJtNZ/Xy6rlyxkxYgQxNY1bbNdOQaxmtHt3ecJcjx4RbmQY1tBCEREREYkqCmSJiMhRye+3ZiP89FPr/p13wqOPNnBnn3xi7SAQsO7/8Y9w003hhwZ6vbg3b4a+fRtQgEuawtat5bdV8kpERESkdVOxdxEROeoEAvDXv8Ly5db9xEQYP/4Idrh8uTUUEODCC+Evf1F9q1Zk8+by27VmZPn9VhqfiIiIiEQtZWSJiMhRxTThscfgzTet+zEx1kyFJ510BDu95RZrXOLGjTBtmjW8UFqNiANZX30F115rZdP97ndw/vlN3jYRERERqR8FskRE5Kgyfz78+9/WbZsNHngAhg49wp0aBlx/vZWxY7cfcRuleQULvQN0717Lil98YWXerV8PF13U1M0SERERkQbQT8oiInLUePlleOaZ8vt33QU/+1kDdlRQAN99V325glitjmmWZ2Slp9dRv/2LL8pvDxzYpO0SERERkYZRIEtERFqd3FyDmTOHkptbXqfq7bfhoYfK15k4ES6+uAE7Ly21hhKOHw+ffXbkjZUWlZcHHo91u9ZhhaZpZWKBFe1SVXgRERGRqKRAloiItCqmCbNn21i/Po3Zs22YprXs9dfL1xk/Hn7/+wbs3OeDKVPg88+hqAimT1fx71auXTtYsQIWL7Zq9Ndozx7Yt8+63b+/6qDJUWvChAmkp6eTnJzMgAEDeOONN1q6SSIiIvWiqzQREWlVcnJg5UqDhAQfK1ca5ORYJaweewzOOAPGjIHrrmvAjoNTHWZnW/cTE60q8bGxjdp+aX4Oh1Ubq1evWlYKZmMBDBjQ1E0SaTETJ05k69atFBQU8OyzzzJ27FgOHDjQ0s0SERGJmIq9i4hIq2Ga4HJZQ8VSUjwUFcXjcsGwYRAfD488YiXSGEbd+6q24yefhP/+17ofEwOPPmrNXifHhoqBLNXHkqNY3wrnNcMwKCsrY+fOnbRv374FWyUi4bhcLlwuF36/v6WbIhJVlJElIiKtRk6OlTCVkmLi9xskJZlkZ1vLwcq8adCIsBdegH/+07pts8H998NppzVau6XhXC4X/fr1Y8iQIU17oIqF3pWRJWF89tlnXHzxxbRr147ExET69+/Pk08+2WTHc7vd3H333VxwwQW0a9cOwzB47rnnalzf4/EwefJkunTpQkJCApmZmSxdujTsujfccAMJCQkMGTKEc845hwF6z4tEpaysLDZs2EBubm5LN0UkqiiQJSJSi2b7Ei11qpiNFRsL+/Ylsm+fQUmJtdw0G7jj11+3srGCpkyBc85plDbLkTvSi/gZM+Af/4Dly2tZqawMvvnGut2tGyQnN+hYcvR69913GT58OHv37mXatGk88cQT/OIXv2DHjh1NdswDBw5w77338vXXXzNo0KA61x83bhyPPvooV1xxBU888QR2u53Ro0ezYsWKaus+/fTTuN1u3nvvPc4//3yMeqexioiItBwNLRQRqUVWVhZZWVkUFBSQkpLS0s05pgWzsZKSYMcOA7/f+uJltxPKyho+vJ47/egjuO++8vs33AC/+lXjNVpaVF4eBOtYDx0KI0bUsOJ331mF/kHDCqWagoICrrzySi688EIWL16MLcK0z7y8PJYtW8all14a9vF///vfXHzxxTidzrCPd+7cmV27dtG5c2c++eSTWn9QWbNmDS+++CKzZs3itttuA+DKK6+kf//+TJo0iY8//rjaNna7nXPPPZfHH3+c3r17M3r06Iiel4iISEtTRlYroIwQETnWBbOxioqsyeWCEwnGxEBGhpWl1aCsrAMHyjf63e/gj39s1HZLy9qypfx2jx61rNivH7zzjlXcv4aggxy7Fi1axJ49e7j//vux2WwUFRURCATq3O7pp5/mN7/5Da+++mq1xxYsWMDvf/97Fi5cWOP2cXFxdO7cOaI2Ll68GLvdzoQJE0LL4uPjGT9+PKtWrWL79u01buvz+fjuu+8iOo6IiEg0UCCrFdDYaBE51uXkwLvvWoGs4PdHuz1A164mMTGQmkqlWlkRu/RSmDkTLr4YbrmlAVXiJZpt3lx+u2fPOlZu3x7OPlv1saSa9957j+TkZHbu3MmJJ55IUlISycnJXH/99ZSWlta43eTJk7nwwgv53e9+x/vvvx9avmrVKm644QbGjh3L9ddf3yhtXLt2LX369CG5yrDYoUOHArBu3ToADh06xKJFi3C73fh8Pl555RWWLVvGiBrTFUVERKKPAlkiIhLVvF7IyrKGiQXjTImJ0KlTMTEx1n2n8wiyss47D6ZPb2CVeIlmEWdkidRi06ZN+Hw+fvnLXzJq1Cj+7//+j6uvvppnnnmGP9aSxelwOHjppZcYNmwYl1xyCatXr+aDDz7g0UcfZdSoUSxYsKDRalPt2rWL9PT0asuDy3788UfAmqVw7ty5HH/88bRv356//e1vLFq0iMGDB4fdr0YFiIhINFKNLBERiVp5eTBuHHz1lVULyzCgXTvo2NHk0KHy9QyjclZWjbWyDhywdqbsg2OCAlnSGNxuN8XFxVx33XWhWQovvfRSysrK+Mc//sG9995L7969w24bHx/P66+/zs9+9jNGjx5NWVkZffr04d///jcOR+NdhpeUlBAXFxf2+MHHAZKTk1m2bFnE+1WdSBERiUb6+VlERKLW6tWwYoU1nNBuhy5doFOn8OvWmZXldsONN8Ktt8LixU3abokOwaGFqanQtm0NK61ZAw8+CG+9BQcPNlfTpBVJSEgA4He/+12l5b///e8Ba6hgbZKTk3n44Yc5ePAgbreb8ePHh/bZmG30eDzVlgeHPjb28URERFqSAlkiIhK1UlKsoFRsLHTvbt2vSdWsrErKymDiRNi40drhc89BcXHTNVxaXEEB7N9v3a61PtaKFfDKK9bw0q+/bpa2SevSpUsXADpViaJ37NgRsGYnrM3mzZu54oor6Nu3L926dePBBx9k165djdrG9PT0sPsMLgs+BxERkaOBAlkiIhI1KmZSBWcqjI2FXr0gkoSCsFlZfj9MmQKffWbdT021VkhMbOzmSxTZurX8dq3DCr/4ovy2Cr1LGKeeeioAO3furLQ8WHeqQ4cONW67a9cuRo4cSUxMDEuXLuWtt96itLSU0aNHc7ARMwAHDx7Mxo0bKSgoqLR89erVocdFRESOFgpkiYhIVCgqskb9vfuudT8nx8quSk0lVNS9LtWyskwT7r8fPvrIWiExEZ58Erp1a4JnINEkovpYZWXwzTfW7W7doMqMbyIAv/nNbwCYP39+peXz5s3D4XBw9tlnh90uLy+PUaNG4Xa7ee+99zj++OPp3bs3d999Nzt27ODCCy+kqKioUdp42WWX4ff7mTNnTmiZx+NhwYIFZGZmkpGR0SjHERERiQYq9i4iIi1u2zYriLVli1UXq2tXK2mqqAiSkqDqDPemCWVlNkpLy2cyDLLbre1cLhj2iQvj9detBxwOePhh6NeveZ6UtKhu3WDMGKtO1okn1rDSN9+Az2fdVjaW1ODkk0/m6quv5tlnn8Xn83HWWWfx4Ycf8sorrzBlypQah+09/fTTbN++nQ8//LBSMfiePXvy2muvMXr0aBYuXMgNN9xQ47Fnz55Nfn5+KPvrjTfeYMeOHQDceOONoQLsmZmZjBkzhilTprB371569erFwoUL2bp1a7UAnIiISGunQJaIiLSoVatg6lQoLLTux8VBfj5s324NFXS7w21lUFrqqHHqeqcTdny6B++GfxJrw4p23XcfDB3aRM9Cos3gwdZfrdavL789cGATtkZau2eeeYauXbuyYMECXn31Vbp168Zjjz3GzTffXOM2kydP5pJLLuGkk06q9tgZZ5xBTk4OA+oIoD788MP88MMPoftLlixhyZIlAIwdO7bSTILPP/8806ZN44UXXiAvL4+BAwfy5ptvMkKztIqIyFFGgSwREWkRpgn//Cc89ZQ1KyFYRbkffRSOPx5efhlqqqHs9fpYvnwVI0aMICbcuMOPPqLd0/cRazucbTN5Mpx3XtM8EWm9VB9LIhQTE8Pdd9/N3XffHfE2DocjbBAraGAEwdOtFYu91SE+Pp5Zs2Yxa9asiLcRERFpjRTIEhGRZufxWAlSb79dvuzss+Hee8trsHfqZP2F4/XC5s1u+vatoX5WcRtILYMi4Npr4bLLGvkZyFEhmJGVmAgnnNCybREREYlCRm4uQ2fOxOjQAU4/vaWbIwKo2LuIiDSzvXvhT3+qHMSaMAEeeqgRJxI85RSYOxfGj7cOJseU4mJreGqt9uyx3owA/fuDTZdEIiLSOoWCTbm5jbtj08Q2ezZp69djmz278vTSIi1IV20iItJsAgH4y1/g66+t+wkJMGuWFciqTxwhogu2Pn3g+uurV4OXo94HH1gjSUeOLJ8FsxoNKxQRkaNBUwabcnLI/cDNr70vk/uB+/CU0CItT4EsEZFauFwu+vXrx5AhQ1q6KUcFmw0mTbJmFuzSBRYsgJ/9rJ47CXfB9uOPsHChfikUwJqpEKwaa0lJNax0wglwzTWQmQmnntpsbRMREWlUOTkYK1fiS0jAWLmy8YJNpok528VT+Vfykf+nPJV/JeZsl661JCookCUiUousrCw2bNhAbmOnah/DTjnFysJ64QXo1asBO6h6wfbuu5CVZVWNnzmzvHK8HLMq1sfu0aOGlXr2tOqnuVyazVJERFon0wSXizXufowpWcQadz/rc60xgk05OeS8X0S2fzhJhvVvzvtFysqSqKBAloiINJm8PJg3r3psacQIqDBrfOQOX7Dh8eBJSYHSUrjuOti2zXr8s8/A7T7idkvrFszISkioecIAERGRVi8nB3N5Nk96rye7dBhPeq/HXJ595MGmw9lYrvwr8BBHB/t+PMThyr9CWVkSFTRroYiINImNG+HWW2HXLut655prGmGnOTmQnY2ZkgI+nzX9YX4+xMVZGTazZ0NyciMcSForjwd27rRu9+ihGu4iInKUOvzjXo67PytKTyPJKGJF6WnkuPszPJht7PVaH4wej/Xjn8cD6emVx93v2wcrV1Ze75tvyHljH9neTFKNPGwBP6m2gsNZWf9ieE4ODB/ecs9djnkKZImISKN77z245x7rWghgyRL4/e/B6TyCnVbIxqJ9e+K3bIGyMivdKz/fCmJ17twIrZdo4nK5cLlc+P3+iNb/4YfyH4prHFb4/ffWJADduyvSJSIirdPhbCyX7yk8ZixdAtv4MdAFV8FYhr08DuOTT8IXinz0USs1PuiHH+C++8rvmybmlq243E/iMWNIM/YBJk5/Pvm2DFz5VzBstgtj2DBNqCMtRldvIiLSaAIB+Pvf4Y47yoNYJ51k1WE/oiAWhLKxSE3F2LMHR0mJdQHlcFjBiD17jrj9En3qW6duy5byi+qePWtYac4c+M1v4JxzytO3REREWosK2VjZntNItRdiYJLKIbIDZ5ATGGplWoUbAujxVL4fF1f5flEROYUnkW2eSSqH8GMngB3DMEh1uCvXymqiIYa5uQYzZw4lN1eBMglPGVkiItIoiopg2jRYvrx82YUXwp13QmzsEe48mI1VWgolJdbBgrp1szKyXC7Qr4PHvK1by///a8zIWr/e+jcQsIZYiIiItCYVs7ECsaTF5INh4DSLyScVl/FnhpWOx8jIgK5dIT7eCljFxVnTRlewjQyyz/w7B0oTOVicwP5l63nTP5jddMaOHwMD0zSJNXx0M3aRT5vyrKzXXoPUVPjtb2v59ah+TBNmz7axfn0as2fbGD5cl3ZSnQJZIiJyxLZts+phbdli3bfZ4Oab4Xe/a6SLj2A2ViAAxcXly7t0KU+bzz5c3FQ1G45pwfcg1BDI2rvX+gMrXVBDC0VEpDUJm40FHkc8pi2W+ICfdwMjuZsZtN/RkQMDxnIwz+DgDti/H574Nfykwu427knlsRVDrDtFbty7T2KXmY5hmBgVLuL82Imx+Um1F1pZWe8+zwZ7N1b7T6SP63X6DIjjxCtOo+eYU4mJa/hna04OrFxpkJDgY+VKR9Rd2hm5uQydOROjQwc4/fSWbk6rkrvwa56ZlEKHh77m9D8NPKJ9KZAlIiJHZMMGyMqCwkLrfnIyzJwJmZmNdICKtbGOOw62boVAgJK2bXEmJ2OANW5RWVkC/PCD9X8fG2u9Xar54ovy2wMGNE+jREREGkuYbCwTg++93TAMA9MEr+nAFRhP9y+2YRQVgbO8Vtb+/ZV3165d8JaJuWcv+/ztCGCQQBkxhh+74ccfAIfNGkbotJWQ72+Dq2AsqUYBn9n78llxX1gOLAfHjZvo+ZM4+pyVbgW3ToQ+faBNm7qfWsVLvpQUD0VF8dF1aWeafDLjbWbmTmXKjLcZ9r/oSxeL1kCbGTBx3VfAx3kn47pvPcOvNjFsDX/t9DOkSANo3LZIue7doUMH63bPnlY9rEYLYgUClWpj4XDA8cdj9uiBPz6+fD3DsB7PboQpp6VVW7jQz4svwsMP15BspUCWiIi0VuGysQwwDBM7AcC6JLIbAdxmIkWBhMNZyCYOB3TsCFXnTunVC+6/H/5+/Xqm2B/EafdwQtwO+iRso0fcTrrG7qZrzE4yYveE9p9qLyQ7cAbf0QtSUiGmvIaEr8zPxs+LefOpLTw6aRfXjvMwb171pxGc1bqi4CVfSlwJSXt2kRJXElWXduaqHGavOIUPzbOYveIUzFVR0rCgw4G263Kn8smMt5ushllD5Mz7khU7utPGcLNiR3dy5n15RPtTIEuknkwTZs84yIZPkpg942A0nR9EWkRiojUBzoUXwnPPQUZGI+14+XIYMwYeecT6aS5YLT4+3gpoVeV0Wuu5XFH1wS3NKzbWuiiv8UfIYH0sUCBLRERal9zcw9lY1+IJxOK0lYQeamNzk2ovJM2eT7pjH05bCT3ifuTluCv54G+5rFoFb70FZ59deZfJyTDqfJPTPniI1wpHUkZMpf2G47SV4CGONPaz9Ix7ePrtHtw80c7ok37ghLgd2DDBDEBeHuzexYknVt5+71646CJrzpVrr7WuI994w8ro93hMkgp34ygpIalwNx6PGR2XdqZJzox3yC49jSRbMdmlp5Ez450oaFi5aA20mQET18wCK4PQcRBPIBbXzALMQMNfOwWyJGpFa9ZTziqTlSsMkgOHWLnCIGdV9Jy8RBpTTX1w797qEwQefzzMmGEFtY5YWZl1RTNxInz1FbzzjpVtVVfqtrKypC5lZfDNN9btrl2t94uIiEhrMXdutWysoE6OfaTH7KdDTB7tYwro6DjIBl9v9pSmkPzckxjU8p0lJ4ec94vI9g+vtt9wQllZ/uF8k72Poaxh7F9P5N41P+elz/qw4s63eb7vTO5Kn89vLimjf/8KG5sm36616p0WFsKnn8KiRXD77dYl36G8AFsPtWWn2YW9BfGkVsnKKitrmdiRuSoH14pBeIx4Otj34zHica0YFDXBomgOtOXM+5Ls7d1IdRRi2CDVUUj29m5HlJWlQJZEpaqzVURB/wMOZ/PO2I+n1KSDYf3rmrE/aton0lhq6oNffAFjx1qF3UtLm+DA27bB1VdbVzSmaU0dHQhAQkJk2ysrS2rzzTfg9Vq3lY0lIiKtjLny47DZWOE4bSVW5ovvWszltfzIZ5qYs1248n6Px4zFSTEEzAp/gSr/Wn9OivGYsbjyfo85u8J1V/fuxE69jX4f/Z1LZg5j0t970rVrhePl5pI4+UbOjMulg7M42ITDl3wmhs9HaSCOApI5FGiDs0pW1t/+ZhV/Hz0arrwSbrkF7rsPnnkGXnkFPvjAul7dt68RXvAKr1EwSJRqdx8O5LmjKlgUrYG2itlYTrsHAKfdc8RZWSr2LlEpWmeryFllkr0CUo1DYLeT6j9E9opUclaZDD89ejLHorXIn7Qe4frg3r1WyrfPBwcPwtNPW0lTjeZ//4MHHiifldDjsYocZWSA3R7ZPqpmZUXDiUOazYoVXdi926B3b/jpTyEmpsoKX1b45W/gkc2WIyIi0txyC060gim2AgzTpDzJ6nCAyQiANQ0OBpBqK7CCLYd+wvCaqqZ7veRsSLaysYxDGKafqslbhhngcAmu8mVY34my/cPJ2bCM4V6vNb4/KCnJKhFR1YsvclrM55zmuR48cHDg2SxJu4Y7nu1DcqIP012Cx7D24zACGEVuUpNLyM5OJCcHDhywrkUrTkIczsUXw/TplZfde681eqB9e+svLc36a98e2rYNU1vz4EH4/HPM3E9wfXgSHjOOtMBBTNPEaRSQb3bG9eFJDJtxL8bUKZWf/9691kRECQmV/5pituRQoG08qY6qgbb5DP9fE1fLLy62/kpLoaQEb2Ep7jwvRfleVvznAO//cApxRhkF/kSchhvDXjkra/iE+v+4qECWRJ3QbBVuL11Kt7LXPAGXK7bFZ6uomI2V5vAQMKxocv7hrKxh/+sQHZNWtILZNCS6hZsxJiuLw8VErXVOO81KnGoUJSUwaxa8/nr5sowMqxrp7t1WEKtq+pdpYisrs5ZXfX/b7VBUpBkMj0GrV6fzwQc2HA5YuTLMCj6fFejMz1dGloiItDpzfX/E43eQZndXDyyFCTY5TTf5/iRcvmsZtv0JjKrBJsCMicV1wiMUbY0hKc1H1YR70zQpLiom0ZmIUeWayg4U7XfgOuFRhsXEUucVl2lCeroV0CmxMsrafv4hH249B4e7A8c59mLYSgnExOHxGxg2B/hMnAW7yY/tgctlMHCgNQvi/v1WCS6zYjDPPPxns9O+fYVjLlmCN7+I1+ddAAE/+APV/rUFfKT260LaCSncccfh37u++QZuu413N3bn/bKxOCnE9JsY+DEMg1Qzn+yyoeQ8Oo/hk3yVX9v/+z+YP7/6axAba9V7DQa2BgyAu++uvM4LL1hPruJ6CQnl94P/du4MbdtWysZKs+8jEACnvYR8fwdcKwYy7L9vYQzob103Hw42ccopldv7xRfw8ccU5vvJyzcoKgxQ5DYpKoKiYgN3sUFRsY2ixA4U/fwyuneH3/728La33QZr1vD7zfextSydMjMGsGOa8Wwt6U0BbYgxyzBMg54xxTgAp8NDfmkbXDMLGPan+s9gqECWRB1rtgqTVO8+YkpLSLXvIzu7Czk5RosmV1TMxjLsNgj4Mey2qMvKqljkr80KP5mrcjBOV1aKRC40Y0yKiddrfd599RX06GH9uPbb31pp3OHqrdfbpk0wZQps3Vq+7Be/gJtvhl/+0hoq6HZX28wAHKWl1S6oQpxO2LHDGkZW5YJNjk5+P+zdm4jTaZW/Cvv+vPJK+MMfYPt2q7CbiIhIK/JxyihSE9pgJLaptNw0TYoK3SS1Sap0bWQAqcU2sj0XkjPpLIaHuSbyemH7vgScyeAuC/fhaVLiNwl44yFMqMqZDDv2OyK75DIMqxjW9ddb1d1feomcb9qS7T6ZVDMPo6QIAJtRRqxhYLcFwG63srIceWS/5SArYTGTjv8GUt34D7nJzzM5kG9jf34MB8racOCUkRzIHM3JJ1c45iOPkFfkhAOn1di0AHAw3+Dgxgq/gbZpg+kuYlbRDewnjRjK2G12xsC0MuKAMmK5rPBZThlZSocTE3n22cPbHg7Uzdv/Sz4t+gl2I4DD8OEw/DjwW/8afhzbOuOIswJno0cf3va//4XvvmPRwVH4TTsOw4+94jaG39rXJRfhuHAUhfesILv0cqsGlacU07TjJh6H6eGd4p9y55gX6Jy2AXfAmsmyKBBP0XkDKLXF8sILh4+5YQPMm8ffd1/Jy3nn1fx/GBMLbmvQQyiQdXgmcR/2w0EsS5EvDjdO7PhC75wANiBgZY0dQVaWAlkSVSpmY6WVHsA0bDhLD5Dv7nDEWVl+v1VQsLjYOq8E/6162+Opkmni9bLomQKm/TWGPcVtiCMWs9SqGeQwAthNH6XF8dz42z3c4+rILy5uwdJzFdJKy4v8NUM6aT1o2GN0q5iNZcWCEjEPl0XYv9+qS3DJJY14wB9+KA9iJSRYQa3gp/jLL1u/RoXh83pZtXw5I0aMIKba+LHD2rVTEOsY8tZbBtu3J5GRYQVda2QYVC7WISIi0joUeWJJbhdXPWuKACX4cBCPUaUMtj0BigrA9e92DBtd/StBbGytl1x4vT6WL19V6zVXvS+5kpLgd7/D/M1vcf18P55t8aR59lhDGg3D+uJmPxyqsNugzI+z4Efy/Z1xLenMsO5PYxhWRlj7w399EoFE4MQkuHV0teO19+TznxNu5YAvhf2+VA74UzjgS+WA0Z4DRgf2m+3Y37YzB0zKs7m6dCEn5qd8ziDs+DEM67UNYGIEh3CasJ80Nq3fjz/FJBTsGzgQSkrY9N+fkru5h3Ux7T88m2Ogwt82Jyy2itiHAlmHg2D/2HcpRYFa6sQu7oz5ehGpW0+3srFsBQCUEscOswsm4CWWf5ReRfe9uzAcFd4bXzggzjpubCyhYFSSrbjm4xk2OJw5VVRUYfmAAWC30z02HkdRAGdCgMT4AO+viSPWW0aqrRC7LYDNMLHjD71GR5KVpUCWRJWK2VimCT57LLEBL6leKyvr448N9u0rH4IbHI5bNSBVXAyTJ1cugZKTAzfdVPWIpjXUxOsFrw98XkhJYdw4R/nw5TffZPW9JezYfxkGfnw4QuPGy0wb4MCPjfU72jF3nsEvLq6w+3nzuPmRDPaZ7enYzkeHNOjY2UaH42Lp0DWBjj2T6NgrmTbpSfVOpwynYlppZ/tudvs741oxiGHRkpVlmthmzyZt/Xpss2dbofwoCbCJJZiN5XDAtm0Gfr8Nu936gIuNhU6dGvmA550Hl15qpXzNnFk5wNCpU80H9Hpxb94MffuGKYQkxxrThKeftlFaamPvXqP2QJaIiEgrVUOiOmBQWuqoMVO9rkT1Oi652LzZ3SSXXDlrbGR/25HUjsUYPxpgs1vBnQoBE8DKyvJ5SbUdItt9MjlFAxietN6qN5WUZP21aWP9G+4iYPJk7IbBcW3acFxwvTZtrBemSgp3IFD+9cT8diOuLaMxDRsd7Pn4DTs+0443YLPaCARMg9JALIeKHKR4DwBp1sbnnQfnnYfvIFBW92tRqRkPPwyFhfjG9QIPlQNfgQrBsPg4inbks8Pbm7YxVm0sDAObadVKMwA7ftwkUUQiSW0Tyl9bu43YWOt7c2wsVoLB7Nn0/SKN0etScSbbcCY7cKY4cKbGkJTqwJlk4HRaL1tKSoX2/vGPADxUYdGqOet5bUUcx8XsJSnGE1ru9wewQpAcUVaWAlkSNUwTHnkECg76KSmKYwcnkGIWkO44EMrK+vvfY9m40eq3dcnPr3DH6yVhxYewd5B1NvZ5y4NXVasZxsfj8ThCk6SZHTuRU+gkgEEsfuyY2AhgEMCHgwA2bATw4uD7L4sxTWd5bGbzZjb++BP2+hL5dnvVFvqBQ8AhYts6uf2pbvzqV+WPFs9bxMotXeiYEUeHHkl0OCGZmE7trBNvuA+pli7yF4mcHHI/cHO/92Xu/OAfZKoYd1Spmo0VrDkQH2+NwtqzpxHKTm3bVj0b5tZbrR0qe0oaKCcH1q41MIwARUUGHk+YlUyz5c+BIiIiR+DVV62vAlU1SdZUEyu/7jRJK9ttfSWLjbX+NQ8Ha4LsNvAbOOMD5Md1wNX/7wxbGMBITIjss/3ccyNuVyiZocJ3q7SYQyTZy2eJ9Pv92CtMROT2J1DidfAX23ww767UpgcfPPz102clmvl84f+Skys0ondvAGbOrn1b79eb+PtDhRwwulmzWBoGxMVh90FH8rAd/t560JdMT9tWHnu4LUk/PRmn0yp6X+n90LEjdOzIOcPgnIhfrfDKZyrsRFpsQa3rVs3KipQCWdKiTNMqkbN8uVUP74MPTAxfgDIzEQzDGkNrs2H4/aGsrPbtDesEYwbKg1FVglP2Nol4PGnlB7LbSfv3U5zpuZxEWykJsR4S40tJMDwk2DzWssP/Jl49lpiY8iFvOft7sdvv4wRjsxVNNmxggD8QwG6zEzANfKaNAm8CebvbkLMqMVQry/T6cNj82DAJ1FL+sIxYEhMrvzA7/v4GU76ZVmFhEW3tu+kQe4gObUrpmOqhQ3uTjhdlcsG13Yj7rEKRP2MvAbNikb8oyMo6PLXvU/lX8pH/pyTnlzB0tgsjyotxu1wuXC4Xfr+/pZvSpAIBWL3aysZKTbUukvLywDC8dOtmw2YzjmwywEAAFi6Ev/8d7rmnQu40EBfXeE9EjjkVA7B2u0kgYE29fdNNVU4tWVnWVd+AAdbtppg1SEREpAn16VMl4HFYU2ZNNZXgKIDU+BKMPHf5DNWHs4qoeultd2B4SkhtV0Z2biI5XzTt7+GVCqjb9tW6rtNWQr4R/juXw9HwurIjRtTWQJNVF/yLXb7xtI8pqHTN4zD8tLcfCt23G36+KuvNwQXzOfG3g5v8u1fOvC/J3t7NqtlVx6GqZmWddHm3iI6hQJZEJDfXYObMoXToYDRKWaMNG+DNN60A1u7d1heRrVsh4DeJCfjAMHAYfuIMj3Uys9utrKwDyaR6tjG58/MklhwoD0IllAejEmylxIw6F0Y+UH5Am42uXeFxx6Ply5KTrZkeOne2cmmD/w7uGeoZpgmupw08AQdpMd7KZyHTBJs1Cj2WAO0pZKcnodIMhsash3j9wQCBg/kc/D6PvZvd7NtWwt4dHvbt8rNvH+w9aGdfm2TS0yu8QCUl7C2qGNmy5PnbkFfSho0lQHC62Z3tGXVNhWwsewH5ZQkcMpOJwUcAk/8V/5RHL5rDiPNX0aGznQ7pDhI7ODGuurLyl7mSEuvTr1GqeFeRk0PO+0Vk+4eTZFj/5rz/L4ZHeVZWVlYWWVlZFBQUkFIph/bo8OOP8PTT1ttg82YrGJB2OAbcrZtJYaEHw7ACTU6nlelY76ysAwdg2jRYs8a6P3MmDBoExx3X6M9Hjj3BC2GbzXpP2u3w+edVAq5lZbB2rXWlv38/3Hhji7ZZRETkWBb8EaqoyCTJd4BSfyzEOCrNuugPBLAbFb6nGIDPhz3/AEWOBFwuo+kmp6440iU4ZK8WVjCmeUfCNFagrdHbVY9srFD7KmRluX4TWVaWAllSJ9OE2bNtrF+fxuzZtkYpa/T551ZRwaCiInC7TRIooa1xgDa2IuIpxW93AHawB7Oy9vN9XluSU/cyvM36mg9w4ED1ZbffbgVpggGrhFoK5x1WaaZCR+2/3huOGmYwtNmwpbUjLa0daZl1HtISE0PXv2Xxl+Vl7P3Rx969sP+gwd78WPYVJuD3BsDvAyAp2Ub8uorZWPnsph0lJFACoSJ/Dxy8lkVLyov8JTi8nLPVxowZFY77+ON8vHATcQk2OrQPkNbBIDEt0RoEHfxLTYUTToBTT43wyRDKxnLlX4GHODrbd7E7kI4r/wqGtYKsrKNRYSEsWAD//rf13d7ttpalppb/V1RNWDEM6p+VlZMD06fDwYPlO7niCqsfihyhitlYwaTJuDjrfqWA67ffWm90gP79W6y9IiIicnimxO1WAMOdb4DRBgIVLzxNAqaJLWBQaaZEIwBuE2eahx074ptscur6BImCmjNYFM2BtvpkY1Vun5WVlbtwQ0TbKJAldcrJgZUflJHiO8TKD1LIyYmP6Avsjh1WxtVHH1mjOCoWXh8xwqqH5Qh4OLXDdr7YbeOgP5UM/xbrDW8GK+xViMja7Ti9h8g3jsNVeCXDTv8nRnqFbKpggKpTpyrV5w776U/r9bxNE1wz9uMpNUlzeIC6h6E47R7yS81KWVkNEhND10tO4cpLqj8UCFhZMXt3+dm72U2pPbbKicwgYDhCr6FV5M9nFfnzxZLksAJgJSRWenkBOHSIe3f9if2+1NCiRFspHRz5dHDkWX8xP5B2Cpz55KmVSx1ddJGV9RAMdlX8d+9ect46SLZvGKn2gsP1uwpbTVbW0cTrhcWLYe5cKDj8I4lpWu+psrK640sRZ2X5fNYwwoULy5elpcF998FpNU99LFIfwWyspCQrEAtWTbdqAdf1FX74qPhhJCIiIs0uNhZefskk75ppsGqV9f2twjVlwAR3YSFJbdpUqvmOiVW0dchw2s17iNjYJgjGVKo7XIBhmlVKKptWAw2roHqQAaTaC5olWBStgbZgNlZRoAtJ9hJKfVXHuZr4A3bsZpUAJWA3AhQFEpj7aGFEx1IgS2pl/dpt4jlUShfvTn48FIfLFcewYUa1fhkIwJdfWsGr5cutYUpBH31U4btDXh7HvfAPHo/P5+SiFXzxQ28u33c/7cz9lIdesDp+xZOG3YbhN0hNKCPbcTY51/ysSWMf9cnGCqoxK6sR2WxWscZ27ez0PSkF8+NV/KHiicywcVz8ATr69mHaYvHjoCxgZ5+3LUn2En5+QQz7jQ7sO2jn+OMr79uf0Z2DsZ3BFrDSG/wBigPx/FDWmR/KKkQ4PmlHx40VanabJhu3xXPX9r+Q5sgnzZFfHvyybydtz1c8XHgLHhyk2fMIcPhk6m+jrKxmYpqwbBk8+aQVZA6KjbW+6L/ySuVsrJpElJX1449w552Vgwenn27VxmrX7sifjAiVs7GCcfPCQj8JCfbqAVcFskRERKJKpy05dPriZUhLgITSSo8FTJMCzyGS41OwVb04TXPDFy/D5kuhUxMEY8q8uNaeThFOksxiSv3VQyaBgDXdV1V2fBTRFte60xlW5sWIa4p0segNtHmLvWw/6MRpK8HtDz/6yTRNDDP8cZ22EnbmVy+vE44CWVKrnBzIfs9Dqm8/2Gyk+vaT/V58KCurpMQqe/PRR7BiRfnooXIm+P18912Ft1piIrz+OmeWlWHawLVvDB5/DGmBQqu4icNeXuykaoHtYK0sdwdcrtgmGxcdzMYqKrWTZIfSQPWTULVx28Em2kyKSu1HnpUVYUNrSiu1GSZ2mw/wkWCzotz7vO0Y7Z7P8P/dHfaF819zHTelwr59VhmZfXtN9u3ysW9PgJIi/+Hglh9iY+nQocKGXi+70/qzeWt3Npf5KmfS+QO4PQ620AODAMW+JHo6tiorqxlt2GDN4vvFF5WXjx4N110HkyZZ2ViV/k9rUWtW1mefwcSJ5XND2+1WPaLf/17FtaVRhYrEploB2c6dTRISSkhJicEwqkxOEHzzJyRYQ6NFRESk5ZQXybLSqktLqz1uKyuzllf9zmK3W9sd8VTa4XmNWLafcDbOGHATZopIE0pLS4mPj6+aVASAE9jR7Wy8RixNMUlkNAfaYpNieXnlceRtC59V5fX5+CQ3l9OGDCGmhprMMe270GtY3cdSIEtqVDEbK80sImC34/QXkX+oNJSV9dhjsGRJlQ29ZRhFRQxM/I6zvO8x4mwb3R6fXP54XByccgp88gk5nX9F9g8jSI3Pw/A4ILaOaTbslWcwzMkxmiT24S0z2b5uP06ScOOsPmsGYAYCGDUMN3RSxI51brxlacTGNV0kq7GL/MXGWuWLyhmA9X9SXHw4uLXP+uvRo/KGxbffTdxfrewIzAD4rKCXuW0b+zxpBLARgx8wMAyzvE3Kympy27dXDmKddpo1o9tPfmJlcweDAfUax55aQ1ZW9+5WH3e7rWLuM2dCv36N92REqJyNFZycoKpQwPWRUobt3mO9v086qXxWJBEREWkZoSJZzvIfPyswAEdpKUZNF6dOpzXEoAmKZMXGwsuvxZKXF/5xr9fL8uW5jBgxgpgapohs165pandB9AfaOvXvQKf+4X8d93q9bDa/pu/Pe9T42hUEa5/UQYGso0hjzywYysby7sdjT6AgkEh7R0GlrKwzz4Qli/1QXEy85xDDHZ8wwvyIM5I+p52jAGKBb5PBvB0qZi9NnYqZkorr2kSKfF6SyoopNRKqFPmrIevJcGAvdVN0yIfLFdMkWVmxhpeXe04hzx6+I5mUnyBqOnS77snEGi9Dk5wiaPYif4mJ1lDCSnWxKrjgAhg1yvos2r/fxr59NvZnf8eqGe8y2zaONkYRNsBmlE9Hoqys5jFyJPzrX1YG5U03wRlnHB65W/ePYZSV2er3Y1i7dlYdrNdegylTrB2LNLKK2Vg1ncbKA64mOW0HMDxpPQwY0JzNFBERkXBiY62Zv2qIFvm8XlYtX15rsKgpo0XBssvheL2webObvn2tecSaW7QH2pqLAllHicacWbCszCptc8stJvv2G9j8nTEDdkxM4mwBks08Kyvr/lLmdpjKZftP4qeJnzGk7VfE2nyVd5acDEOHWlV4KxZg79IFbxls327i9Obj9idaQwoDlav5hZ+twgSfH6c3nx070vB6jcbviLGxdHrtH3Sq4QxhnSBa7uQK0VnkzzCgTRvrr0d3E/OJ+1lUdglxNi/HxewNvScrjhgNm5Ul1UQSqC4rs64Jvv8e7r67fLnNZg0tbN++cjJKHT+GAQalpY4afw1zOk12fH4Q7z47sR1Tyx8YMsT6E2kCkWRjBTmdkP9jAJdvDMOc6zEUyBIREYkOdUSL3Js302LRoigXzYG25qJA1lEiJwdWrjRISPCxcqWj5iLMNcjNtWpdrV0LX31lBce3fG9i+E0Mo/ybb2EgkZRgVtbqdNa1L+GOjgvKd2S3W4V0hw2z/n7ykxrr4sTGwsuTPiXvmknQMQ6clQu71ThbBUBRMXg8tLv9QWJjm+gLczSfXKO4yF9ITg457xeR7R9Oqr3m6VfDZmWddFLTtKmVqitQbZqwdCnMnm3VWQe4+GI4+eTydTp2rL7fOn4Mw7d2HQf/9jfa3XEHjoo7A2us6T/+Qbuct4h94CR47DENC5VmkZsb+XBYw4BUo4Bs98nkFA1guAJZIiIiIq2eAllHgcozN3koKoqvtfad2119tM/zz1u1coL727fPJOAPEEMABz4SzGISbKW0cZSBzYbTV0S+24srZizD+s7GGH44cHXaadZP4BE2vNO/H6dT2RfQthNVB/EGMHFTSBJtsFUdwJdoQuEe+PcTMPqFY+4LdDQX+bMaaGLOduHKvwIPcaTZDtW6etWsLJ52NX6bWrHaAtWffQaPP24VdA8yDPj888qBrJrUGK81TQL3zsL/3ZvY37Bju/xf5f1swwa4d6pVmyAGa6aHNWsgM/MIn6lI3ebOjSwbK8jZLY38HQFcqXcyLLVtjcPBRUREmsrf//535s6dy/r167nzzju55557WrpJIq2aAllHgWCtkJS4EuL37MLRPpbs7ERycqzY0s6dVqZV8O/gQVi2rHKi1MknBwNZJk5/If5CB53Mg7Qz8ojFCzYr4cewxQNgOOxWVpb/dHImn9uwskZRXOQv2kV7kb9Is7GCqmZlnZSb2xStapVqClR36QJPPQUfflh5/cxMqw5Wnz5HeOCcHIyVK/ElJOBYudI60WRmwr//bR3Yd3gYsdMJ06YpiCXN5uOP6zk5QWwMqR0he1evemcri4iINIb09HTuueceFi1a1NJNETkqKJDVypV/yTVJK9sNxSU47Ac4ZCTwpz8ZHH88HDhQfbtNm+DEE8t3cl6PzWScupbB377ELV/8kY2+U+nEHgwMKxBiGJiGUR4TsR/Oyirw4HLFM2yYUf+kqCgv8hfNorrIXzAbK+/3eMxY0siHQKUVwg57dFJMvpmEK+/3uObMbYKGtU6hQHWKid8PbdqY/Pe/Bp99ZhXgDzrhBLj55kb6kl4heuZJSSG+qMgaOti5sxVFCDrpJGtWwi5dGuGgIpEpKrLKLzbK5AQiUiuXy4XL5cLvDzN9s4hE7JJLLgHgrbfeatmGiBwlFMhq5UIzN8WX4D9YwvdmHwKFNvwxAb77zo7fX3kYYUwM9OtnZXeELF1K16lT6Qqscg8gu3AQqWYehs2wrv7tdrDZMKtcxISysg7PYNigL9DRXIcqykVtkT+vl5wNyVY2lnEIw/RXqd8FhhmoEtw6XL/LOES2fzi537zTbM2NZhWzsdq3h4ICMAwDtxv27IHu3aFDB7j+erjoohrL0dXf4ROLmZICfj9mfDzGG29At27lJ5Qrr4QbbgCHPkakeTV8coJjNolXpMGysrLIysqioKCAlIqT9ogc5R544AHWrVvHmjVryMvLY8GCBYwbN67aeh6Ph+nTp/PCCy+Ql5fHwIEDue+++xg5cmTzN1rkGKJvIK1Y1Wwsw/RhGFbRb5vPh9dm4+BBg3PPhVNOsYYPntS9iLhASeXiIsOHQ0yMVXdp/2+sLBpbCcSFH5YWEszKOlSKyxXXsKwsOeqYMbG4TniEoq0xJKX5KK36uGlSXFRMojOx2hdOO1C038HcHg/A+vnN1uZoFQpUp5ZnkLRpY+J0GhQVwciR1uyECQmNeNAq0TPbgQMY+/ZZQwn37YPjj4d776XGqRNFGlnVjJBXX61e5xHA6/WxfPmqypmoL71kvWdPPBHS0o7VJF4REamnBx98kK5duzJo0CA+rFrHoYJx48axePFibr75Znr37s1zzz3H6NGjWbZsGWeeeWbzNVjkGKNAVitWMRvLyHOD3U6SWUQAO4mUQIeO+GPiGXu5j+FGDix+yyqo8/OfWzVtgtq0gT/+kZwDfch+PJNUYyeGI6b2INZhjZKVJUcVrxe270vAmQzusnCnGJMSv0nAG0+4N5kzGXYeiG/ydkY704QHHrBG3lYtap2RYWVk7dgB8Y39UlWJngViYiAlxWpIWRncequCWNKsqmaE9OljDS2sqlom6r598MYs68FTT4V//KNZ2y0iIq3Xxo0b6d27N5988glDhoSfIX3NmjW8+OKLzJo1i9tuuw2AK6+8kv79+zNp0iQ+rliOQUQalQJZrVS12lgBE2JsdHLsw26zg9eLWeRhZyAd129yGNbljvJsqffeg0mTIC6ufH/XTMD1B5OifDdJpo9SI77a0C9/IIDdqDJ2yQC76aMoz6usLAHqLH0WPmuiipgY6NWrCRsZ5Q4ehMmT4d13rUysoqLKk4HGxVk1zrKzadzi1T6fFeQuLa0UPTM7d8ZISIDiYvjXv+CCC1RkSKLf+vXltwcObLl2iIhIq9OppvodFSxevBi73c6ECRNCy+Lj4xk/fjxTp05l+/btZGRkNGUzRY5ZCmS1UuGysTCwIlx+HwT8GIUFpDpsZJedRE7KAIYnrbeyLM4/H0pKKgWyvF7Y/lUBTn8hbqMNBKoW2zEJmCa2gEG1LBojgNN/iB0bAni9KRq2IUdcv6ugoOnaFs28XnjxRZgzBzZsgEDAeo3y8ioHssC6n5/fSMWr8/Lgtddg7lz49FMrSmYY1vkErOJbwTFZjR49E2kiX3xRfnvAgJZrh4iIHJXWrl1Lnz59SK6SJjx06FAA1q1bFwpk+Xw+fD4ffr8fn89HaWkpMTEx2O32Zm+3yNFAgaxWKGw2VqwNAgFsXu/hL6CACU5/Ifm2VFz2vzDskUKMM04PW5w5Nsbk5Z5TyNv6GbRPq/atOGCaFBcVkeh0Yqv6jdk04cB+2vU8hdgYFxGNSRSRSlasgEcfhW3brELWbrcVxOrcGdq2rb6+YVhx6SOKK23YYKXPvfuuVRNr61YrelZcHH79Ro2eiTQxBbJERKQJ7dq1i/T09GrLg8t+/PHH0LL77ruPGTNmhO7ff//9NRaQB6uIvKfC7FwFh3/l9Xq9eL3eausHl4V77Eg15b6j6ZiRiua2QXS3L5K2RdpuBbKaicfj4frrr+e9994jPz+ffv368dhjjzG8Ad8+c3PDZGOBlTUR/GJpWPcNA1LT48l2n0xOLAyv6X/c66XT/q/olLwLvLuqPWwCJf4SErwJ4cNUycD+DZoOSqSetm61AljBMgqmaZX2iY2Fnj3Ls9ZMs/q2DYorlZXB++9bRbC//LJ8eVGRFT1r06bmdLpGiZ6JNAOvF775xrqdkRE+GiwiInIESkpKiKswwiUo/nAB05KSktCye+65h3vuuSfifc+cObNS4Cvo3XffJTExscbtli5dGvEx6qsp9x1Nx4xUNLcNort9tbWtuKYf1KtQIKuZ+Hw+unfvzooVKzj++ON5+eWXueiii9i6dStJ4aZfqsXcuWGysQ4zbXYMm3F4qKEBZV6c7j3kx/bA5TJq/rJbR2Ejn9fLquXLa61rpOmgROpn2zb47W/h8GRsABx3HOzeDR061Dz0MqjecaUvv4SJE60iXBUFz0EpKdaX/toiYsrKktZg40YraAvKxhIRkSaRkJBQKWsqqLS0NPR4Q02ZMoWJEyeG7hcUFJCRkcH5559fbSgjWFksS5cuZeTIkTV/V2ugptx3NB0zUtHcNoju9kXStoIIa8wokNVMnE4n06dPD92//PLLmThxIt9++y2nnnpqvfb18cdhsrEOM+22ysvsdowiN6nJJWRnJ9b+ZbeOwkbuzZuptbCRyDEuN9dg5syhdOhgRDSxX9eucMYZsHy51fVuugmef94KbFWtiUVRMc7du6yhwRWC3/WKK/XoYRVyD+rVy4qktW0LV11VXhurNsrKktZAwwpFRKSJpaens3PnzmrLd+2yRrd06dKlwfuOi4sLm+0VExNTa3CirsePRFPuO5qOGalobhtEd/tqa1ukba5a0VsOc7vd3H333VxwwQW0a9cOwzB47rnnwq7r8XiYPHkyXbp0ISEhgczMzDpT+TZt2sTBgwfp1YCp2YqKTOz5Byj1x1JqxFMaiK3wF1f5vhFPqT8We/4BiopMXK7wQ5RE5MiYJsyebWP9+jRmz7aF7Wffflu9/918M0yYAP/3f9aovuxsK05UOZ5kYuzbi6OkBGPfXqzBvpaqcSXACla99hosXFj5YE4n/PKXcN55Vmrnv/8Nl1wC8+ZZNbKqRc9q4HRa6+uEItFKMxaKiEgTGzx4MBs3bqyWQbJ69erQ4yLSNBTIqsH+/fu59957+frrrxk0aFCt644bN45HH32UK664gieeeAK73c7o0aNZsWJF2PVLSkoYO3YsU6ZMISUlpd5tczo8uN0GhUYbCgPOCn+JuE3r30rLjTa43QZOh4cdO6zSISLSuHJyYOVKg4QEHytXGuVBJayhglOnwhVXwLJllbfr2tUKZMXFWXGhoiIrqbK0tMLfwWJKC72UkEBpode6X+Fxu93azjWrGPPxJ2D0aLjvPitYVVhY+YATJ8Lf/gYnn2xFwUJToKZGPkwwbPRMJIoEM7ISEqzMQxERkUZ22WWX4ff7mTNnTmiZx+NhwYIFZGZmhmYsFJHGp6GFNUhPT2fXrl107tyZTz75hCFDhoRdb82aNbz44ovMmjWL2267DYArr7yS/v37M2nSJD4OVnA+zOv1MmbMGHr16lVpqGF9vHrKfSR98pk1FqnC986ACe7CQpLatMFW8fuoCezZA0OG027eQ8TGqqaNSGMqn0kUUlI8FBXF43LB4MHwwgtWYlSwhMJjj1nDCatmi3u9sH27lezkdlfaO+wvBn8iAZsNmz8Ae4ohLRHrBGBCWRnOsiJ2LN2Cd+u/ibX5rE1LS61g0+jR5burGKwKNryoyBquWHHY4eHHbWVl1vKqQa5Q9Ey1siTK+P1wwQVWVlZCQrUh+CIiInWZM2cOpaWloZkH33jjDXbs2AHAjTfeSEpKCpmZmYwZM4YpU6awd+9eevXqxcKFC9m6dSvz589vyeaLHPUUyKpBXFwcnTt3rnO9xYsXY7fbmTBhQmhZfHw848ePZ+rUqWzfvj0UjQ8EAvzhD3/AMAwWLlyI0cAvfn2+epXkNCckVP7SGTBNCjyHSI5PwVZ132lu+OJl2HwpdFJNG5HGFExqSkkx8futf5cuNTjvvPIAFlhJTH/8Y/gyczXOt7B2Hdx6K4GOcbj9fpLsdmweD9xzv1W0/X//g10/QntoZy+wglgxMTBqFPzmN9CvX80Nrzl6BlhhMkdpac3nKqeTUJqnJnqQaGG3w5//3NKtEBGRVuypp55i27ZtoftLlixhyZIlAIwdOzY0qub5559n2rRpvPDCC+Tl5TFw4EDefPNNRowY0SLtFjlWKJB1hNauXUufPn2qzR4xdOhQANatWxcKZF177bXs2rWLd955B4fjCF76oiJITlb2hEgzcLlcuFwu/BWnFqygYjZW+/Zw4ICNvDyDggL45hvo3t2qz/7b38I111h1sGpSbb4F04T7HoHABgJtu1BQUEBycjK2H3+EGb+zDmgYEF9hB5dlWXWv2rat+8lptlIRERGRatavXx92dsCq4uPjmTVrFrNmzWqGVolIkAJZR2jXrl2kp6dXWx5cFkxH/eGHH5g3bx7x8fGkpaWF1nv77bf56U9/GnbfHo+n0pSuwUKCptOJGSZ7AtPEcTi4ZYYLVDmdmNu24S8urvcXT+/hwlreZiyw1RLHjFQ0tw2iu32RtC2a2p2VlUVWVhYFBQVha9pVLDG1Z4/BgQOJ2O1W7NjttiYKnDXL+rfewtWvCtan2r8f4uOtIYGnnWZlX511Vv2HUWm2UhERERERaUUUyDpCJSUlYadGjY+PDz0O0K1bN8x6zu41c+ZMZsyYUW35RzfdhDMhoQGthbKkJMree69B2wJ1zsbYFFrimJGK5rZBdLevtrYVFxc3Y0sarmI2Vloa5OeXPxYfb8WU4uOtrKwG7fyJJ+DQIWvHXbuWP+Z0WllUCQnw0ktwwglH9kREjhKG3w/btkHPnso8FhGRVq+ukQEixyoFso5QQkJCpaypoNLDmVEJDQw4AUyZMoWJEyeG7hcUFJCRkcEZf/xj2FRXr9fL0qVLGTlyZM3DgBqoKfcdTceMVDS3DaK7fZG0reo0xtGqasJUx44mBQUmHTpYo/6Kison9hten9J0u3dbMwu+9pp1327HyMuzomJgHaxtW6tG1t69CmSJHJa0cyf2++8vL0g3dmxLN0lERKTB6hoZIHKsUiDrCKWnp7Nz585qy3ft2gVAly5dGrzvuLi4sNleMTExtQYn6nr8SDTlvqPpmJGK5rZBdLevtrZFa5srqpqNBdbou/T0IlJTkzEMA6fTSqaKuDTdV1/Bv/4FS5fC5s3W7GvB18Lnq7xuvXcucvRL3rrVunHoUPWpQUVERETkqGBr6Qa0doMHD2bjxo3VMkhWr14delxEjj7hyldRVEzSnl1QZA2NDJazCmZlhRUIwAcfwNVXw1VXwbvvQmGhVWDL4bAyr3r2xDzuuMrbRbRzkWOHkZvLif/+t5UKCTBwYMs2SERERESahAJZR+iyyy7D7/czZ86c0DKPx8OCBQvIzMwMzVgoIkePitlYTmdoKca+vThKSjD27QWsmnhOp7Wey2VtV82HH8KkSfDFF+U7P3TIGkbYpw+kp9ecWVLnzkWOEaaJbfZsnHv3Yuzda/WZXr1aulUiIiIi0gQ0tLAWs2fPJj8/PzTz4BtvvMGOHTsAuPHGG0lJSSEzM5MxY8YwZcoU9u7dS69evVi4cCFbt25l/vz5Ldl8EWki4bOxisDtxjRsGG63dd+ZVC1xanhmAGwVfkM46yzo3Nmqi9WzJ5x6Kjz9NKSk1D1TYLWd16cQl8hRJCcHY/lyAoBRVGSN963vDJ4iIiIi0iookFWLhx/+f/buO7yp8osD+Pcm6UzblDJbKCCUKSCiLAcyFBAUFRcIIuKECiIiiDJEwaqAgFB+yhAQQUTEBSpDdqFQRRQBBcFCgUKBzrTNvr8/Tm9Gm9kmTdqez/P0IePm5m3Jm/ve9573nLk4d+6c+f6mTZuwadMmAMDw4cPNCfc+++wzTJs2DWvWrEFOTg46dOiAzZs3o0ePHn5pN2PMd+zlxgJESrpuEiHK5bRcMCsLuEEJoCRX1jU9kp/7C93u/QLCnA8sO5TLgVdfpQqEXboAI0aU3rlznCuL1XRSpywqsvS/S5foce4PjFWatDQBSUldULeugNtu83drGGOMVWe8tNCJ9PR0iKJo96dp06bm7UJDQzFnzhxkZmZCo9Hg8OHD6Nevn/8azhjzmbQ0x9FYkMsBAfSvWg2oC4GCfAjn0hGtvoB9pxsgdfM1ICPDdqe9etEk1KFDdnbuAufKYjWdFCKpUFB/kMupj3F/YKzSiCKweLEMx47VweLFMl7tzhhjzKd4IosxxjywbBnNW8nlgEYDaDQiNJk50BiDoUEoNKYQaBACjUEBzZkL0JzPgqbQALlgRKExDMm5wyCezyi7YymqxHbnNj8ync7u45DL6XWcK4vVNNYhkkYjPSaTWR7n/sBYpUhNBVJSBISFGZCSIvA8MmOMMZ/ipYVVQHJyMpKTk2GUBumMMb+5cIFW86nVJQ9otYBaAIRIwCRANBohGERADAZMAKCgE2uFAsqYYFxo0Qv6zjIEl96xXk9RJDY7txAAKDQaCI4itZRKapxeDwSX2Ttj1ZMUjaVSARcv0mPBwVTtk3PHMVYprOeTVSotCgtDebU7Y17C54GM2ccTWVVAYmIiEhMTkZ+fb87LxRjzj9WrAYOh5I4oApOmAQcPAvXrQ7x0EcbCIsjlcgiiSBs2igeSkoCbbgIEATExDuaZgoOBDRuAnBy772vQ63Fw71706NEDQY6SwDvcOWPVkPXZc1SU5Yw5LIxzxzFWiSzzySKMRvp33z6B55EZ8wI+D2TMPp7IYowxD9SrR+fMAICDqcCfG4A6YUCYBmKdPBiLMyCXKyCoVEBoKGA4ATQpBtq4cSJdvz792KPXQ332LNC6tetqhozVBNblQ0NCILZsiaJr1xARGUmRi1zRkzGfs55Prl0byM+neeS8PJ5HZowx5jucI4sxxsrDevSuVNJDUVHQqlQQmzcHYmPpRFqr5Vw9jHmbnf4HAKagICAkhO4oldz/GPMx6/lkacKKa5AwxhjzNZ7IYoyx8rA3egegj4yk6mkAj+YZ8xUH/c8G9z/GfMrBfDIAnkdmjDHmWzyRxRhjnnI2ei+NR/OMeRf3P8YCgrP5ZJ5HZowx5ks8kcUYY55yJxpEwqN5xrwrLY37H2N+5s58Ms8jM8YY8xWeyGKMMU94Eg0i4dE8Y96zbBn3P8b8jFf3MsYY8yeeyGKMMU94Eg0i4dE8Y95z4AD3P8b8iFf3MsYY8zeeyGKMMU8sWwYUFgJyOaDRlPmR6XR2H4dcTq/j0TxjFcP9jzG/4tX1jDHG/E3h7wYw15KTk5GcnAyDwQAAyM/Pt7udXq9HUVER8vPzERQU5NU2+HLfgfSe7grktgGB3T532iZ9xsUAOuGU2pKfng6EhQF2+qEIoLi4GKIowu7YPiwMSE8Hrl8HgoM9en/ug7a4beXnqn2B2P8Aqz7I/c/vArltQGC3r6ofA/Py8rFgAVBcDNSqBRiN1tuYYDLlw2gEBMFyrTw0FMjOBhYsANq2dT+Y0hr3QVvctvKr8sdAPg/0u0BuGxDY7fPmMVAQA62XMocuXLiA+Ph4fzeDsUqTkZGBRo0a+bsZALj/sZonkPofwH2Q1TyB1Ae5/7GaJlD6nxTQoNPpcObMGX83h7FK46oP8kRWFWIymXDp0iVERkZCsHM5Kz8/H/Hx8cjIyEBUVJRX39uX+w6k93RXILcNCOz2udM2URRRUFCAuLg4yGSBsQLaVf8DuA9WJm5b+blqXyD2P4CPgYEkkNsGBHb7+BhYPtwHbXHbyo+PgZ7j/mcrkNsGBHb7vHkM5KWFVYhMJnPrykBUVJTPPrS+3Hcgvae7ArltQGC3z1XbVCpVJbbGNXf7H8B9sDJx28rPWfsCrf8BfAwMRIHcNiCw28fHwPLhPmiL21Z+fAz0HPc/W4HcNiCw2+eNY2DgTDMzxhhjjDHGGGOMMeYET2QxxhhjjDHGGGOMsSqBJ7KqkZCQEMyYMQMhISFVat+B9J7uCuS2AYHdvkBuW0VxH6w83LbyC/T2lRf3v8oTyG0DArt9gdy2iuI+WHm4beUX6O0rL+5/lSeQ2wYEdvu82TZO9s4YY4wxxhhjjDHGqgSOyGKMMcYYY4wxxhhjVQJPZDHGGGOMMcYYY4yxKoEnshhjjDHGGGOMMcZYlaDwdwOY+0wmEy5duoTIyEgIguDv5jDmM6IooqCgAHFxcZDJAmO+nfsfqykCsf8B3AdZzRGIfZD7H6spArH/AdwHWc3hbh/kiawq5Pz582jWrBkiIiIC6ouVBTZRFFFcXAy9Xu/vpngsIyMDjRo18nczAACXLl1CfHw8QkNDA7IKCCs/6YDJtU9sBVL/A4Bz586hefPmPIivgoxGI9Rqtb+bUeUEUh+UjoFhYWEIDg72d3MYA0DH78LCQhiNRq/vO5D6HwCkp6ejRYsWiIiI4GMgqzCTyYSCggJ/N8MpV32QJ7KqkBMnTqBt27aYPXs2IiMjyzxvMplw8eJFNGzY0OsTXb7cdyC9p7sCuW2AbfsEQcCRI0cQHByMp59+2t9Ng16vx7Zt29C3b18EBQXZ3SY/Px/x8fF2P+f+EhYWhsjISLz77rto27at3W24D1Yeb7ZNp9PhwIEDuOWWW9CrV68Kt82dz7g/uWpfIPY/ADh27BjatWuHWbNmISIioszz3P8qj6dtu379On7//XcMGTIETZs29Xn7ArkPVtVjYFBQEFQqFd599120bt3a7jbcBysPt40YDAYcPHgQrVu3xr333uvWa6rqMfDo0aNo3749Zs2ahfDw8DLPc/+rPIHcNsC99hUVFeHgwYPo06cPbr311kprmzePgTyRVYWo1WrExsaif//+diNCjEYjTp8+jRYtWkAul3v1vX2570B6T3cFctuAsu1TKBRIS0tDVFSUv5sGvV6P8PBwREVFuTzBCKQrTsXFxYiIiECvXr1w44032t2G+2Dl8XbbLl++DIPB4JU+4sln3B/cbV8g9T+AjoENGzZEv379+BjoZ562zWg04vjx4xBFsVKOQ4HcB6v6MbBPnz5o1aqV3W24D1YebptFdna2R8fvqnoMLCgoQOPGjXHPPffwMdDPArltgPvtO3v2LIxGY6WeH3rzGBh4U4iMMa8LtINxVcZ/S8b8h/sfY/7D/Y8Fopr0uaxJvytjrvBEVjWwaNEiNGnSBCEhIfjggw/cft3169dRr149pKen+65xlahnz54YP368v5vhd0OGDMGHH37o72ZUG8nJyWjbti169uzpcJua1AdL9zNX932td+/eLt+vdJuGDBmCefPm+bZhrNLUpP4n8efxrmfPnpgwYYLD57l/1TzcByv/vZ31weqEj9+ulbf/OVOevulsPOiP/uLOe1bnc8cRI0aYvyeqa7/hiawq7o8//sCECRPwv//9D+np6Rg7dqzbr509ezYeeOABu/kqDh48CLlcjoEDB5Z5zlGnr8wvA3vvtWnTJrzzzjtefZ+rV69i9OjRaNy4MUJCQtCgQQP069cPKSkpXnsPb//dpk6diqSkpIBP4FdVJCYm4sSJE9i9e7fd573dB935zPmqr40cORKCIJh/FAoF2rRpgwEDBri9D1/0Q2c2btzo8ftNnToVs2fPRl5eno9axSqLt/ufdR8ICgrCDTfcgEmTJkGj0fig9dUT96+ahftg4PHW37C8Y43evXvj3Xff9fh17uDvF1sV6X/OlO6bo0aNQps2baBQKGzGif3793drf5U9NizPe3rzu6e8fcdX4/vq2m84R1YVt3nzZnTp0gUDBgzwqCpQUVERVqxYga1bt9p9fsWKFRg7dixWrFiBS5cuoX79+l5pr06n81m1m5iYGK/v8+GHH4ZOp8Pq1avRrFkzXLlyBb/88guys7NRr149r79fRUh/23bt2qF58+b4/vvv0alTJ383q9rzdh/092euf//+WLlyJQBaY3/27Fm0adPG7df7oh+6ej9P8xNIfeTzzz9HYmKij1rGKoMvjoFSH9Dr9fjtt9/w1FNPQRRFPPPMM95ufqXy5fHXGvevmoX7oPsqqw8C9v+GgiDg/fffr5T39xX+frFV3v7njKO+eeedd+KLL76wGXO5W8W7sseG5X1P7jdVC0dkVWEJCQmYOnUqDhw4AEEQMHLkSLdf++OPPyIkJATdunUr85xarcaXX36J0aNHY+DAgVi1apX5uVGjRmHPnj1YuHChedY6PT0dI0eOtPt4z5498dJLL2H8+PGoU6cO+vXrBwD4+eefcccddyA6Ohq1a9fGfffdhzNnzpjfx2QyYc6cOejXrx/Cw8PRuHFjzJ49GwCcvpc0i7106VLExcXBZDLZ/G4PPPAARo0aZfM+SUlJuOGGGxAWFoabbroJGzduBADk5uZi3759eP/999GrVy80adIEXbp0wZQpU3D//fe7tQ/p+Q8++AAJCQkICQlx63fRarUYN24c6tWrh9DQUNxxxx1IS0uz+V0c/W0BYODAgfjpp59cfxBYhXi7D7rzmXPUB119DgHLZ+all16CSqVCnTp1MG3aNIiiaN5GigKTfurWrYtatWq5/XvZCy139Z4bN25E+/btERYWhtq1a+Puu+9GYWEhALjsC6WXFhYWFmLEiBGIiIhAbGysw1Dq+++/H+vXr3f792KBx1fHQKkPxMfH48EHH8Tdd9+NX375xfy8O9/Pzj7T7vbV8ePHY86cOahbty4aNGiAt956y2Ybg8HgtF85Oka4ar+zY5Y9W7ZsgUqlwtq1a82Pcf+qGbgPBm4ftPc33L59u83rnLXD0fgUcD6GHzlyJPbu3Ys1a9aYo3ek17n6u/Px2zMV6X/OOOqbwcHBNuPDBg0auD0+LM/Y8Ouvv8agQYMQERHh8djQ3nu68/lyt9+MHz8et99+O5RKZZn3dtR3XJ37OnqdO99XpX83eylmqmO/4YmsKuzAgQNo1qwZ5syZg8zMTCxevNjt1+7btw+33HKL3ec2bNiA1q1bo1WrVhg+fDg+/fRT8xfL/Pnz0b17dzz33HPIzMxEZmYm4uPjsXDhQruPA8Dq1asRHByMlJQUfPzxxwCow02YMAG//vorfvnlF8hkMjz00EPmiacpU6bggw8+wOjRo3Hs2DGsW7fOHBXm7L0kjz76KK5fv45du3aZH8vOzsbPP/+MYcOGmR9LSkrCZ599ho8//hjHjx/HK6+8guHDh2PPnj2IiIhAREQEvv32W2i1Wod/S2f7kH6X9957D9OmTcOJEyfc+l0mTZqEr7/+GqtXr8aRI0eQkJCAfv36ITs72+a97f1tAaBz5874888/nbabVZy3+6A7nzlHfdDV51CyevVqKBQKHD58GAsXLsSHH36I5cuXe/7Le8DZe2ZmZmLo0KEYNWoUTp48id27d2Pw4MHm7xx3+4Lktddew549e/Ddd99h27Zt2L17N44cOVJmuy5duuDw4cPcR6owXx0Drf311184cOCATWUdV59JV59pd/vqZ599hvDwcBw4cAAffPAB3n77bZsBtTt92d4xwlX7nR2zSvviiy8wdOhQrF271ubYyv2rZuA+GLh90N7fsHQ0mLN2OBtrOxvDL1y4EN26dcOjjz6KCxcu2LzO1d+dj9+eqUj/c8bdvllRrsaGw4YNw+DBg/HXX39VeGwIuP/5kjjrN5s2bUJSUhLS0tLKvLejvuPq3NfR69z5vir9u+3ZswcnTpywaXe17DciqzK+/PJL8e677xY1Go0oiqJYWFgoymQy8eDBg6IoiuLp06fFLl26iG3atBHbtWsnqtVqh/t64IEHxFGjRtl97rbbbhMXLFggiqIo6vV6sU6dOuKOHTvEkydPigaDQbzrrrvEl19+uczr7D1+1113iTfffLPL3+3q1asiAPHYsWNifn6+GBISIn7yySfm93T3vawfK/07fvLJJ2JcXJxoNBpFURRFjUYjhoeHiwcOHLDZzzPPPCMOHTpUFEVR3Lhxo1irVi0xNDRUvO2228QpU6aIf/zxh2gwGMSTJ0+KhYWFTvch/S7Lli1z+LuXbrdarRaDgoLEtWvXmh/T6XRiXFyc+MEHH9i8ztHf9siRIyIA8cyZM6IoiuKePXvEOXPmOGxDZdLpdOK3334r6nQ6h9vk5eWJAMS8vLxKbJlzGRkZYmxsrHj8+HHzY9Z98O+//xZvuukmsXXr1uJNN90khoaGit98843D/Tnqg64+c/b6oDufZVGkz0ybNm1Ek8lkfmzy5MlimzZtRFEUxaeeekqUy+WiUqk0/4SHh4vvvPOOzT6s39ud+87e87fffhMBiOnp6WX+Fs76wnvvvSeePHlS7NGjh/n9CgoKxODgYHHDhg3m7a9fvy6GhYWV+b74448/bN535cqV4saNG8u0oTzc+Yz7k6v2BWL/E0VRXLt2rXjvvfc6PAbOnTtXbN68udimTRtx7NixNp+50hz1P+s+EBISIgIQZTKZuGHDBvHkyZNiXl6ey+9nZ59pT/rq7bffbnMM7Ny5szh58mTz8876lbRN6WOEq+OLu8escePGiVOnThVVKpW4e/fuMtuU7l8Gg0GcMWOGePToUYf79aZA7oNV9Rj433//iY0aNRL//vtv82Ol++D7778vNm/eXLzxxhvFNWvWON0f90Hf9UFHf0PrY5w7Y01H4/3SrMfwoiiKPXr0EJ988kmb8burv3tFjt/r168XP//8c5fttP49q+IxcNWqVeKgQYMcHgMfeOABMSoqShw8eHCZ1/7www9iy5YtxYSEBKefLWk/pfvmiBEjyowPlUqlOHv2bPM2zsaD5R0b7tixo8x5oCfnSZ6MDz3pN2vWrDF/N7h6b0dK9xt7r3Pn+8re75aVlSWGhoaK48aNMz9Wut+IoiguXLhQ3LZtm9N2eps3j4GcI6sK+/PPPwEA7du3B0BLjsaOHYuhQ4ciLy/P6brl4uJihIaGlnn8n3/+weHDh/HNN98AABQKBR5//HGsXLkSU6dOLVc77c3qnz59GtOnT8ehQ4dw7do182z0+fPnUVRUBK1Wi969e8NgMJTrPQFg2LBheO6557BkyRKEhIRg7dq1GDJkCGQyCkT8999/UVRUhHvuucfmdTqdDjfffDMAylc0cOBA7Nu3D6mpqfjpp5/wwQcfYOnSpbjttttc7uPkyZPQarXo06eP2+0+c+YM9Ho9br/9dvNjQUFB6NKlC06ePGmzraMrJmFhYQBonTvzHes+qFQq8dtvv+H06dOIjY1F8+bNy3wurDnqg64+c/a481mWdOvWzaZ8c/fu3TFv3jwYjUYAQK9evfC///0PAOXISk9Pr3CuNWfvedNNN6FPnz5o3749+vXrh759++KRRx5BrVq1nPaFv//+Gw888IDN+5w5cwY6nQ5du3Y1PxYTE4NWrVqVaRP3karPuv9dvXoVS5YswaZNm9CmTRv06tULqamp6N69u93XOup/gKUPFBYWYv78+VAoFBg8eDBOnz7t1vezs8+0J321Q4cONvdjY2ORlZVlvu+sX0k5TEofI1y1391j1tdff42srCzs3bvX7tIw7l81g3UfPHbsGNavX4+vv/4azZs3x91334377rsP0dHRdl/LfdC3fdDe3/Dhhx92ux3OOBvDt2vXzu5rXP3d+fjtudLngWPHjkW/fv2wY8cOm+0MBgMmTJiAXbt2QaVS4ZZbbsFDDz2E2rVr292vo77ZpUsXrFy50iZHVkVyX7kaG/bu3RsPPPAA+vfvj379+rk1NnT02XX38+Vuv7ntttug0+ncem+JL/qNs9/thhtusHlNdew3PJFVhR09ehQJCQlQKpU4fvw4goKCcOuttwJw/cVSp04d5OTklHl8xYoVMBgMiIuLMz8miiJCQkLw8ssvl6udSqWyzGP3338/mjRpgmXLlplzWbVr1w46nc7c0Srq/vvvhyiK2LJlCzp37ox9+/Zh/vz55uelhIhbtmxBw4YNbV5rPQkYGhqKe+65B/fccw+mTZuGZ599FjNnzsTWrVtd7iM3N9crv4sj9v62AMzhrXXr1vXp+9d01n3Q2g8//IA+ffo4/P8BHPdBwPlnzh53P8vuUCqVSEhIAEATWaIo+jRJp1wux/bt23HgwAFs27YNixYtwptvvolDhw757D0B7iPVgXX/KyoqgsFggFarhV6vh16vd1ocwVn/s+4Dn376KW666SZ8+umnuPPOO91ql7PPtCd91XopFQAIglAm76Mrzr6D7HH3+NuxY0ekpaVh1apV6Nq1q83JCMD9q6aw7oMnT55Et27dEBISYs7j8vPPP2PIkCF2X8t90D5v9UF7f8MVK1Z4JWG+szG8I67+7s6WhJXG3y+k9Bi0Z8+euHjxYpntDh8+jBtvvNH8d7/33nuxbds2DB061O5+HfXN8PBwJCQkeFxgpzzkcjm2bt2KDRs24OTJk5U2Nqxq/cYT1bHfcI6sKuzo0aO46aabANAsb0REBEaPHo3OnTu7LHt78803l1k7azAY8Nlnn2HevHk4evSo+eePP/5AXFwctmzZAoCS/UnRG9YcPV7a9evX8c8//2Dq1Kno06cP2rRpY/OF2aJFC4SFhWHnzp0O9+HOe4WGhmLw4MFYu3YtvvjiC7Rq1comsqRt27YICQnB+fPnkZCQYPNTOueWtbZt25qTDbrah/S7WCcpdfW7NG/e3JxPQaLX65GWloa2bds6/Z0lx48fR4MGDVCnTh23tmeOJScno23btujZs2eZ56z7oLWvvvoKjz/+uNP92uuDjlh/5oCynxlPPsulBwGpqalo0aKFTwcmrt5TEATcfvvtmDlzJn7//XcEBwfjm2++cdoX7FVSbN68OYKCgmzeLycnB6dOnSqz7V9//YVGjRpxH6nCrPtf3bp1MWHCBPTu3Rvx8fG4++670bx5c4evdbf/yWQyvPHGG5g+fTo0Go3b38+OPtPlPe7YU56+7Kr97hyzpP2sWrUK33//vd1y79y/agbrPtiuXTvs2bMH+fn5yMnJwe7du+2eVEu4D/q2D1qT/oZTp05FcXGxW+0A7I+1XY3hpdeVnvBz9Xfn47fnHI1BS7t06ZLNJEjDhg290jcryp2xYadOnfDWW2+5PTZ0dJ7kyedL4qzfHDhwwOl7l+477vQbe69z5/vK0e8mFVmQVMd+wxFZVdjRo0cxaNAgADQJtX//fmzcuBFdu3bFwIED0blzZ4dLm/r164cpU6YgJyfHXHFi8+bNyMnJwTPPPAOVSmWz/UMPPYSvv/4a06ZNQ9OmTXHo0CGkp6cjIiICMTExkMlkdh+3p1atWqhduzaWLl2K2NhYnD9/Hq+//rr5+dDQUEyePBmvv/46Jk2ahMGDByM7OxvHjx83z4i7+17Dhg3Dfffdh+PHj2P48OE2z0VGRmLixIl45ZVXYDKZcMcddyAvLw8pKSmIiorCfffdh0cffRSjRo1Chw4dEBkZiV9//RUffPCB+e/uah9PPfUUJk+ejEmTJiE4OBi33347rl696vJ3GT16NF577TXExMSgcePG+OCDD1BUVOT2FYH9+/c7XIbGPJOYmIjExERcuHABXbp0sXnOug9K1Go1Dh48iC+//NLpfu31wevXr7v8zAH2PzOuPoeS8+fPY8KECXjhhRdw5MgRLFq0yKZyi1arxeXLlwFQRNbVq1dRq1Yth4lm3eHsPQ8dOoRffvkFffv2Rb169XDo0CFcvXoVbdq0gVKpdNgXRo0ahWvXrtm8T0REBJ555hm89tprqF27NurVq4c333zTvJzY2r59+9C3b99y/07M/6z7X05ODrZs2YIdO3agXbt2uO+++7B371706NHD7mvt9T9HHn30Ubz22mtYt24dkpKSXH4/O/tMu3PMcJervmyPsz71zDPPmI+/zo5ZkhtuuAE7duxAnz59oFAosGDBAvNz3L9qBus+2LZtW7z00ksYOXIk6tWrh27dujmd0OE+6Ns+6OhvmJycjIkTJ7psB2B/rOFqDF/6dSqVCjExMW793fn47Rl7Y1BvcNQ3dTodLl++bNOvFQpFuSdGXI0Nt2/fjtatWyMkJAS//vqrW2NDR+dJnowPrTnqN5MnT8Zbb70FvV6PefPmlXlve33HVb9x9DpX/cbe7/bGG2+UidKsjv2GJ7KqKJPJhGPHjmHatGkAaHb9lltuQWxsLEJCQjBgwAAcPXrU4URW+/bt0alTJ2zYsAEvvPACAFpWePfdd5eZxAKAwYMHY+7cufjzzz8xceJEPPXUU2jbti2Ki4vx33//oWnTpnYft0cmk2H9+vUYN24c2rVrh1atWuGjjz6yiXiZNm0aZDIZFi1ahOnTpyM2NhYvvvii+Xl336t3796IiYnBP//8gyeeeKLM8++88w7q1q2LpKQknD17FtHR0ejUqRPeeOMNREREoGvXrpg/f755TXR8fDyee+45TJ48GRcuXHC5D+l3USgUmD59Oi5duuTW7/Lee+/BZDLhySefREFBAW699VZs3brVrTK3Go0G3333nU0VQ+Z9pfug5JdffsE999zjMPeHxF4fdPczZ+8z4+pzKBkxYgSKi4vRpUsXyOVyvPzyy3j++efNz//888+IjY21eU2rVq3w999/l/tv5ew9o6KisHfvXixYsAD5+flo0qQJ5s2bh3vvvRcAnPaF0hNZADBnzhyo1Wrcf//9iIyMxKuvvoq8vDybbTQaDb799lv8/PPP5f6dmH+V7n87duxAQkICoqOjERYWhoEDByI1NdXhRJa9/ueIQqHAmDFjMG/ePEydOtXl97Orz7S7fdUVV33ZEVftd3XMstaqVSvs3LkTPXv2hFwux7x587h/VTPJyclITk4us/zF3jHw+eefR69evdCiRQu88MILaNGihcP9ch/0XR909Dd86aWXzBXBlUqly3Y4Gu+7GsNPmDABQ4cORfv27W1e5+rvzsdv9zkag9oTFxdnE4F18eLFMhdmrTnqm/v27UOjRo1stq3I+NDV2FBKCVNYWOjR2NARdz5fpTnqN0ajEZMnT0ZRUZHd97bXd1z1G0evc+f7qvTv9sorr5gvSgPVuN94NQ09c+q5554TGzRoIEZGRort2rUTv//+e49eX7pqoTW9Xi927NhRTE1NFXU6nXjfffeJP/zwg9P9bd68WWzTpo25ip8z1hXTKos/3tNdgdy2JUuWiHfffbdN+7hqYcXZq1pYmsFgEHv27Om0WqG1yu6D7lYg8ud7uqsibVuyZIl4zz332DzGVQstArH/iWLZqoXWDh48KHbs2FE8evSoqNVqxQEDBojffvut0/3xMbD8nLXNXv/iqoUWVfUYaK9qYWmXLl0ST548KR4/flxs3769qNfrne6T+2D51dS22ft+qalVC0szGAzi6tWry1Qt1Ov1YkJCgnjhwgWxoKBAbNmypXjt2jWn71W6b3r7/9SdsWFN/Yx7Q+n22es3oshVC5kHJkyYgEWLFiEkJARpaWm4++67cfbsWYdVIzyhUCjwzjvv4Mknn0RwcDD69u2L++67z+lrBg4ciNOnT+PixYse5wVggSsoKAgLFy70dzNqpLy8PBw7dgz9+vVza3vug/4RFBSERYsW+bsZzE1SREheXp7DfCDdunVD//79MXjwYISGhqJPnz4ul1xw//MN7l8110MPPYRr166hVq1aWLlyJRQK56cZ3AeZp/j7xbG+ffvi999/h0ajQaNGjfDVV1+he/fuUCgUmDdvHnr16gWTyYRJkya5PPfkvlm9VNd+wxNZlah169bm24IgQKfT4eLFi16ZyAKoCkVCQoJHiZvHjx/vlfdmgePZZ5+F0WjE6dOn/d2UGkelUmH//v0IDg52+zXcByvfs88+6+8mMA9IeerWrVuHzz//3OF2s2bNwlNPPcXHQD/j/lVzpaSk4PTp09wHmc/w94tj27Ztc9j/Bg0a5HE+Le6b1Ud17TdctdABtVqNGTNmoH///oiJiYEgCFi1apXdbbVaLSZPnoy4uDiEhYWha9eu2L59u91tx4wZg7CwMHTu3Bm9e/dG+/btffhbMMaYxe7du50mg60u78kYY4wxxgITjw2ZN/BElgPXrl3D22+/jZMnT7osbTpy5Eh8+OGHGDZsGBYuXAi5XI4BAwZg//79ZbZdsmQJ1Go1duzYgb59+5apKOCMIAgwGAwe/y6M6XQ6jz5rjDEWaPgYWHXp9XoA4ONQFSeKIvdBFnB4jMtYzcRLCx2IjY1FZmYmGjRogF9//RWdO3e2u93hw4exfv16zJkzBxMnTgRAVRjatWuHSZMm4cCBA2VeI5fL0adPHyxYsAAtWrTAgAED3GpTw4YNcebMGSxbtgyRkZFlnjeZTMjMzERsbKzLcqKe8uW+A+k93RXIbQNs2ycIAjIyMtCyZUt/N6tKkvLz6PV6GAwG/Pfff6hfv77dQZPRaERubi6ys7PdXlbhLl/uO5De010XL5qwc2cY9PocNGxYsT6o0+mQnZ3ttWXezDfi4uLwzz//YPny5YiIiCjzPB8DK4+nbcvLy4NCoUCDBg0qoXXMF6KionD9+nV8++23+PXXX+1uw32w8nDbiMFgQEZGBvr06ePT9wkEQUFByMvLw+XLl+2eB/IYtPIEctsA99pXVFSEgoICj9KhBBqeyHIgJCTErQHXxo0bIZfLbcrthoaG4plnnsEbb7yBjIwMh0nyDAYD/v33X7fb1K5dO2RkZJjLi9vbX15eHpo3b+4ywaanfLnvQHpPdwVy24Cy7evWrVuZEq/MPVJ+nvz8fKhUKqSkpCAtLc3utiaTCRcvXkTDhg19Moj31b4D6T3dIYpASoqIjAwRP/wg4PbbBVT0Yqy05JsFrptvvhnnz5+HSqXiY6Cfedq26OhodOjQAfXq1auE1jFfUCgU0Gg0TvNfcR+sPNw2IggC2rZti27duvn0fQLBLbfcgt9++w2ffPKJ3ckHHoNWnkBuG+B++1QqlcuVZ4EssL75qqDff/8dLVu2RFRUlM3jXbp0AQAcPXoU8fHxyMvLw5YtWzBo0CCEhobim2++wa5du5CUlOTxew4aNKjM+wEUuh8aGooBAwYgKCiofL+QA77cdyC9p7sCuW1A4LevKnv66acd/k31ej127dqFXr16+aQP+mrfgfSe7jhyBFi+XERRkRZqdQjGjBHQqVP59ycIAurUqQOlUum9RjKf4WOg/wVy25hv9e/f327/A7gPViZuW80TGxsLtVqNJ554wm5UMo9BK08gtw1wr30ymQx169a1e2GwquCJrAqSQmdLkx67dOkSADpRWrZsGcaMGQNRFJGQkIB169ahY8eODvet1Wqh1WrN9/Pz8wHQh1PKN2FNeszecxXly30H0nu6K5DbBgR2+9xpWyC2W1K/fn2ng/j69eujadOmPhlE+GrfgfSerogiMHUqYDSKiInJQ2GhCps2CXjoIVQ4KosxxhhjLJA1btzY4cUcHoNWjkBuGxD47fMWnsiqoOLiYoSEhJR5PDQ01Pw8QLkFdu3a5dG+k5KSMHPmzDKPb9u2DeHh4Q5f56hiojf4ct+B9J7uCuS2AYHdPmdtKyoqqsSWsKokNRXYtw9QqUQYjfTvvn0CUlOB7t393TrGGGOMMcaYr/FEVgWFhYXZRE1JNBqN+fnymjJlCiZMmGC+n5+fj/j4ePTt29fhTPz27dtxzz33+GQm3lf7DqT3dFcgtw0I7Pa50zYp+pAxa6IIJCcDWi1QuzaQnw8olUBeHj3erRtHZTHGGGOMMVbd8URWBcXGxuLixYtlHs/MzARAVZbKKyQkxG60V1BQkNPJCVfPV4Qv9x1I7+muQG4bENjtc9a2QG0z8y8pGis62jJhJQh0f98+cFQWY4wxxhhjNUDgpdmvYjp27IhTp06ViSA5dOiQ+XnGGGMVYx2NBQBqtSX0Sqmkx5OTaTvGGGOMMcZY9cUTWRX0yCOPwGg0YunSpebHtFotVq5cia5duyI+Pt6PrWOMserBOhrr6lXgwgXg8uVwGI1lo7IYY4wxxhhj1RcvLXRi8eLFyM3NNVce/OGHH3DhwgUAwNixY6FSqdC1a1c8+uijmDJlCrKyspCQkIDVq1cjPT0dK1as8GfzGWOsWrCOxgoPB0pSEEIQALmcbiuVQG4u58pijDHGGGOsuuOJLCfmzp2Lc+fOme9v2rQJmzZtAgAMHz4cKpUKAPDZZ59h2rRpWLNmDXJyctChQwds3rwZPXr08Eu7GWPek5ycjOTkZBiNRn83pcayjsYKDwdiY4Hr14GwMB2kwxjnymKMMcYYY6xm4KWFTqSnp0MURbs/TZs2NW8XGhqKOXPmIDMzExqNBocPH0a/fv3813DGmNckJibixIkTSEtL83dTaiTraCyl0jJh1ayZiPBwg822nCuLMcYYY4yx6o8nshhjjAUse5UKHeFcWYwxxhhjjFV/PJHFGGMsIJWOxnIHR2UxxgJZWpqApKQuSEvjRH6M+UNV64PJyclo27YtOnfu7O+mMBZQeCKLMcZYQLKOxsrNBS5dsiR6d4SjshhjgUoUgcWLZTh2rA4WL5bxZDtjlawq9kFOccGYfTyRxRhjLOBI0ViFhVSZMCsLyMkBzpwBCgpoQkunk0GjQZkfuZxex1FZjLFAkpoKpKQICAszICVF4Ml2xioZ90HGqg+eyGKMMRZw9HogI4OWCl67Buh0gMkEBAXRZJVaLUCjUUCtFlBQAJsftZped+EC7YcxxvzNeqm0SqXlJdCMVTLug4xVLwp/N4AxxhgrLTgY2LCBorDefBP45x96fOpUoGNHQK83YO/eg+jRoweCgoLs7iMmhvbDGGP+Ji2VVqlEGI307759FBHSvbu/W8dY9cd9kDH/k3LU1a0r4LbbKrYvnshijDEWkOrXB65fB86dA0JDgWbNgMcfpzxYej1w9qwarVtTlBZjjAUq60iQ2rWB/HyKGs3Lo8e7dXNdlZUxVn7cBxnzv9I56rp3r1i/46WFVQBXq2CM1VTr1lluDx3KA03GWNVjXbhC+g7jwhSMVR7ug4z5n7dz1PFEVhXA1SoYYzVRVhawfTvdVqmAAQP82x7GGPOUdSSIKAKXL1tm45VKcJ4exnzMug8WFNC/Eu6DjFUOX+So44ksxhhjAemrrwCjkW4//DAQEuLf9jDGmKekSBClErh0CcjNBa5eDYPJxBEhjFUGqQ8KAvW/9HQBxcWUXYf7IGOVwzpHnSBIOeoq1u94IosxxljA0WiAr7+m2woF8Nhj/m0PY4x5SroCrdFQ9VWTiR6XyUTISkbgHBHCmO9IfVCtpnxY0mMymaWzcR9kzLeso7GUSnrMG/2OJ7IYY4wFnEuXgFq16HbfvkCdOv5tD2OMeUq6Ai2TAQYDPRYaCsTEaMzbcEQIY76Tmgrs2UOTyVJurLp1gZAQo3kb7oOM+ZZ1jjpp0sob/Y4nshhjzAkutuAfzZrR0sKPPgKeftrfrWGMMc9YX4GOi6OTZ4UCaNRILFO0giNCGPM+6z4opSZQKoHatct2Mu6DjPlG6WgsgwEwGOggWNF+xxNZjDHmBBdb8B+ZDLjtNuCGG/zdEsYY80zpKml16gDNm9NkVmkcEcKY90l9MCaGxhG1awMNGwIoLILyciZQWGTe1roP8nCPMe8pfSwMDqalvd7IE8kTWYwxxhhjjHmJKAKLF9vmAwFoct7eSTTAESGMeVPpKBBBAOrVA+RyEcLVLCiKiyFczQJQNlfWsmX+azdj1Ym93FgAHQu9kSeSJ7IYY4wFjEuXgFOn/N0Kxhgrv2++oZ/QUJRaRuj4JDpQI0J4eT2rirZtA/butUSBmBUWAmo1REFGGeALC81PSX3wwIHKbi1j1ZMUjRUcXPpYaFGRqCyeyGKMMRYwVq4EnngCGD0auHjR361hjDHP5OUBL78M6HTAlSvA9euUaFqjATTZRdAU6FGMMGgK9HRfY3leLqfz6kCKCOHl9ayq0euBxETqf0ajVf/TiNBk5kBjDEaxLBwaYzDd14hl+iBjrGKkaKy8PKram5FB/dGe8kZl2VmpzxhjjFW+3Fxgyxa6feKEpWohq9mSk5ORnJwMo6MREGMBwmAAJk2i7zK5nJZOGI1AQQEAiMC1IsAYDpNMBpnRBFwpAuqEA7BcqlYqeRKfsYpYtIgmsQCK8jZXPdZqAbUACJEwiYBMAKAWgWtaICTU/HqlEsjJqfRmM1atpKYCu3ZRt5OVBEBqNEB4eNltS0dl3Xije+/BE1mMMcYCwtdfUxQDADz4oP2DHat5EhMTkZiYiPz8fKhUKn83hzGH5s0DfvuNqq5GRABJSZSXBwDw+1Hg1VdhqhcCtdGICLkcMq0WeG8ecPPNNvsJCgISEiq9+YxVeXv3AuvWUR8EgHfeAVq2BIV5TJoGHDwIU716UKvViIiIgCwrC+jcHfjgA/PaJ7Ua4JW0jJWfKFLV8evXLbmwatemSWJHEVdKJV0ESk6mH3fwRBZjjDG/0+mADRvotkwGPP64f9vDGGOe+Oor+gFoEv5//wM6dCh5UhSBWfMA0wmYasUhPz8fUVFRkF26BGyZBwxZY5NAJD+/8tvPWFWXmQm89RbdDgoCXn0VGDSo5MmDqcCfG4A6YTCFFSNfl4eoMBVkdQrp8dzBQPfuALj/MVZRqanAzz/TbUGgfJF16zp/TXnyRHKOLMYYY363YwdduQGAnj2BuDi/Nocxxtx2+DAwZ47l/ptvWk1iAWXrjwMVrzvOGDPT64HXX7dMQvXuDQwZUvKko9JpAJcLZczLRBGYPp2W1AsC3a9dm7qZlItOp5PZ5Icsb55InshijDHmV6IIrF1ruT9smP/awhhjnjh/nk6gTSa6P2IEcN99VhvwSTRjPvfRR8Dx43S7YUM6kTYHOdqbSJbwhDJjXnXpEvDrrzQpZTJZDnMFBfSjVgvQaBRQqwXzY5bnPMsTyUsLGWOM+dXvvwP//EO327YtFcnAGGMBymCg5UtSFMiddwIvvVRqo9In0dYTVqVPokuWNjHG3LdzJ/DFF3Q7KAh4/33KUQfAdiLZnPW9FOvkPN26VUaTGauWTCZg9mygcWMqdNKlC/Daa7bzx3q9AXv3HkSPHj0QFBRkdz/u5onkiSzGGGN+tW6d5fYTT5S9YMoYY4FIoQBGj6boj7g4YNYsS2JbAOU7ieYvQMbcduUKMHOm5f7EiUDr1lYbSBPJUVGO+1Z5S6Yxxmxs2kRL7YOC6Jg4fz5QukaPXg+cPatG69a0nT3u5qnjpYVVQHJyMtq2bYvOXEKDMVbNXLoE7NlDt+vWBe6+273XCWlp6JKUBMHdjJCMMeYDvXsDK1YACxaUXTnodEmThJc2MVZudesCTz1FE8h9+wKDB1s9KU0kZ2fTjJdG43hHvMyXsQrr14/6oSDQBLO9QtPeHL/zRFYVkJiYiBMnTiCNT9gYY9VMgwZ0AtilC1UqVLgTJyyKkC1ejDrHjkG2eDEPOhljftWqlZ0CFdbRWEVF9OMIn0QzVi4yGTBqFPDJJ8DUqaXmi1NTgW3bKATEYKCEdlIyu9LKUzKNMWYjMpKWFq5ZQ+P6Mrw8fueJLMZYpUpLE5CU1AVpabx8gtEg9PbbgSVL6KqqW1JTIaSkwBAWBiElhaMYGGOV5rffaPmES1I0VnAwlWQ9dw64etX+thyVxViF3HwzEB5u9YAoAklJtGxXWu9bv36ptb+lSBPK7pZMY4yVIQillvda8/L4nSeyGGOVRhSBxYtlOHasDhYvlvGFZ2bDrdQwVlEOWpWKoxgYY5Xm4kVKXPvuu8CcOZTM1i7pe0qjAXJyLI8HBzveOUdlMeaWc+eAo0ddbPTLL8COHTRxJU0U21vnZE3a7sAB7zSUsRrgyBEgK8uNDX0wfueJLMZYpUlNBVJSBISFGZCSIvCFZ+a5kigHUaUCBIH+5SgGxpiPFRYCr7xiSUKbkeFkYykaS6+3LGWKjHR+Is1LmxhzSaMBJk8Gnn8e+OwzBysFjUYqH6rTUb8KCqK+pdGYf2Q6nc19849cTp2dMebS1at0cWfIEKoe6pQPxu88kcUYqxTW6UJUKi1feK7hdu6kal/79jlOWVGG9YdIyqrMUQyMMR8zmYA33gDOnqX7TZtSVJZcbmdj6XsqJwcoLrZ8wVmdSLs8iealTYzZNWcO8O+/1K02b6a5qjKWLaPwSbmc+mNEBKBWAwUFQEEBBLUaCo0GgtVj5h+12k7VBsZYaSYTMGMGkJdHF3h++snJMNxH43d30uoyxliFSReoVSoRRiP9u28fRWV17+7v1jmWnJyM5ORkGB2uIWHl8fnnwJ9/UuDBJ58At9zixovsVQArnVsmkD9MjLEq6aOPgJQUuh0VRSXFIyIcbKzXA2fO0Bm2NDhXqWhSC4AA0Em0o7XUSiWdhDPGbGzZAnz3Hd0ODQXee4/+tXHwIJURbdaMIrOmTQNuuslmE4Nej4N796JHjx4ICgoq+0ZqNcCV4hlzau1a4PBhul2vnp1iC9Z8NH7niSzGmM9ZT8TXrk0z90olzeInJwPdurmZH8kPEhMTkZiYiPz8fKhc5Vdgbjl+nCaxABprdurkxousP0R16tg+p1RSQtdA/zAxxqqc77+niXeAAjw++ACIj3fyAoUCaNGCojsAoGdPWuZUwuVJNEBLoRISvNJ+xqqDs2cpd7vkjTdo/GAjOxt4800aLwQFAS+/TOWQS9ProT57ljJS2+uD0vphxphdf/9NQ26AhtwzZzpZOe/D8TtPZDHGfI4DaZi1desst594ws3jVukPkXUYMn+YGGM+cPQoLSGUTJoE3Hqrixd9+SVw4gSFitSvT2Ej1uFbrk6iAT6RZsxKURHlxdJo6P6DDwIDBtjZsFYtSp41fz6VQx45shJbyVjNUFxM88UGA91/8kkXAYzW4/dLlyBYHw8rOH7nHFmMMZ+ynogXBODKFcusBac3qnmysqiQEEDHrnvvdeNF9tbWl8YfJsaYF126RElspcH6Y48BDz/s4kVFRbb5rd56y8kaRMaYK6JIc8H//Uf3W7SgfmmXIFDW6WXLqO/J+DSXMW+bP58qhwJ0PWb0aCcbW4/ftVq6SHPpEoJzcy3bVGD8zj2cMeZT0kR8RASl/cjJAa5eDYPJVHYinlV/GzZYStY/8ggQEuLGi6yv5jjKVcYfJsaYFxUWWgKmunQBXn3VjReFhwMrVwI33ggMHcp5dhiroO++A378kW6HhwPvv+/GuKFDB0pmxxjzql27gE2b6HZoKDB7tuPAYgCW8btSSSUOSxitk9tVYPzOE1mMMZ+xnoiPirKMKwTBcqGMA2lqjuJiywFQoaCJLJesP0RGI5UrysykhMql8YeJMeYlLVoAn31GUaPvveegQqE9TZoAn34KjB3r0/YxVt0VFVGhBcnUqUDjxqU2OnXKUomBMeYzajUwa5bl/quv0uHOIevxe3a2ZVweE2M7kQWUe/zOE1mMMZ+xDqSRyYAGDYDYWCAmpti8DQfS1BxbtlhSv/TrVzbno13WH6Lr1+kAl5sLwd5EFn+YGGNeVKcO8M475QjukMuB4GCftImxmiI8HFi6lE6WH3kE6Nu31Ab5+cDEicD48bShyeSPZjJWI0RE0GRyVBTQqxflqnNKGr+bTFTFFwBCQiDWrVt223KO33kiizHmE47SGqlUYpm0BRxIU/2ZTMAXX1juP/GEGy+y/hCJIv0LAGFhEMPD7b+GP0yMsXI6ftzx6mWHtFpgxQrL9xNjzGsSEoA1a4AJE0o9YTIB06dTMjtRBPbvtyS0Y4z5RK9ewPr1NKHltFCTNH5Xq+kHoBc0bOj4heUYv/NEFmPMJ1JTge3b6Yqaq6p0HEhT/eXnA/Xq0e1OnYBWrdx4UeloLImzUC7+MDHGyuHYMeC554Bx4zwsGrh4MfC//wHDhtEyJ8aYV4WH2wlwXLmSJq8AQKUCPviAoyAZqwT16lGXcyo1Fdizh0qNSieB9es7T3BnPX5PS3OrLTyRxRjzOlGkddTZ2cCVK0BBgevXcCBN9RYdTed6X3wBvPKKGy+QruYUFlJIcmEhXYFVKOhHo4FMp6ODZOkfuZy25w8TY8wNV65Qvg+dDjh0yDZ61Km0NMvGly65yHrLGHPl5EmqimYve4DZwYPAxx/TbUEA3n2XclcwxryquJiCEjwaSkvj92vXaNxuMlFm+LAw8zjd5fjduvqvE4ry/VqMMebYDz8AO3dakuMWFgKRkc5fU46JeFYFtWjh5oZ6PZCRYal0IuW+CA0FCgogAFBoNBCchShfuED74au0jDEHiotpcj07m+7fcgvwzDNuvLCgAHjrLcv9sWOBG27wRRMZqxEKCoDJk2lO+OhRmtCKiSm1UWYm8OabljPr0aOBrl0ru6mM1Qjz51ORpl27gClTXJ/LAaBx95kzdNtkoiTJ4eHmqAa3xu8XL7rVPp7IYox5VWEhjef1ero4HRJCE1QaDT0vioBOJ7OJNpV4OBHPqrPgYGDDBgqPeOMNeqxBAyphJJPBoNfj4N696NGjB4IcRUHExPAkFmPMIZMJmDHDsiKwYUNaoaRwZ3Q8dy6FcgHArbcCjz/us3YyVt2JIvD22zSJBdD4sMxJs05HM13S2t8ePYCRIyuzmYzVGLt2WSqN790LvPiimxNZwcHAt98C//1HkVn33ks5RUq4NX4PCqIEeS7wRBZjzGtMJrpQlp1Nk1KCQFUubJYWanUw5RRCrQsGQkLL7MODiXhWBeTkUALl225DmST/LtWvD+zeTVFYAM2Qtm1Lt/V6qM+eBVq35uU8jLFyWbqUoocBOvbMn+9G7g+AXrRli+WFb71Vji84xpjkiy/oxBmgqmjvvWfn0D53LnDiBN1u2BCYOZP7HWM+kJVFFXslEycCjRt7sIP69emna9eyUQvujN/dTFTJE1mMMa9ZsoRybzZrRpFYSUk01jATRZhemwrjL79AfmsfyObMsZsJ3s2JeFYFfP01pbJo3JgiH266yYMX//svXQYCKLvkgAE+aSNjrObZtg1Yvpxuy2R0vGrWzI0XZmdTTh7Ja69xfh7GKuDYMWDhQsv9t9+206WysoCtW+l2cDAwZ46b4SGMMU+YTHRtRppL6t0beOCBcu7MVbWvCuKJrCogOTkZycnJMHpcE5qxyvPjj8CqVXQ7JIRWgHXrVmqjg6kQj30FjbIQoce+gpD7MNC9e5l9eVQxigUsnY5WBwKUrqpuXQ93EB8PvP46sHo18MQTHHnFGPOKEyds01u9/DJFjbokVTLJzaX7vXoBAwf6oIWM1Qx5eXSYl05xnnoKuOMOOxvWqwesWQNMmgQ8+STQsmWltpOxmuLzz4HDh+l2vXrA1KluzkcdPgxs3EhLc9wKba44nsiqAhITE5GYmIj8/HyoKumDwZgnjh2jsb3k1VftTGJJVSy0WmhVKoRKVeW6dfP5jD3zj23bLAmUe/UC4uI83EFICPDII8CDD1qSvTPGWAWtWkUT7QAwaBDNk7tl925LlGhMDOXv4+MXY+ViMgHTp1tSzXXsCIwZ4+QFjRvTZBZf1GLMJ/7+m1bXAHRoe/ttWurrUl4edeZr1+ik8NNPgdhYn7YVAHhhMWOsws6dAwwGuj14MPDYY3Y2Sk0F9u2DqFIBgkD/7ttHj7NqRxSBdess94cNq8DOFApO2s4Y85pZsyj/bMeOFA3i9lxUjx50pq1Q0FXnWrV82UzGqrU1a4CUFLodHU0rdqVq1wBoICFVJ5TwJBZjPlFcTNdmpPO5ESOojolLokgJta5do/vNm1N+rErAEVmMsQq77z76zvrqK4r6LnNSYBWNhdq1ae2gUkkz+ByVVS0dOWKpBNa2LdC+vQcvFkX+PDDGfCY4mK40Fxd7OEculwOjRlG+Ps6LxVi5mUyWSSxBoMnlevVKbfTpp0BGBs02h5YtDsQY856lS4Hz5+l2mzZUpdAt335L0coAzUhXYvETnshijHlF5870Y1dJNBaioy0TFIJA96WoLDu5sljVtXat5fawYR7OSy1aROUOR44EmjTxdtMYY9VcWpqApKQuqFtXMOe+0uttgzkEAQgPL+cb8CQWYxUik9ESpiVLaDK5bE7Vg1QpRhSp8MvKlRyNxZgPPfUUrbA5fBiYPdvN7nbuHDBvnuX+9OlAnTo+a2NpvLSQMVYu6elubmgdjaUoNXeuVNLjycllw8dZlZWRQfOTAF1h7dPHgxfn5FCG+B9+AIYPB4qKfNJGxlj1JIrA4sUyHDtWB4sXyyCKwM6dwNChlqvNHvnvP+D4ca+3k7GaTqEAxo0DXnih1BOZmbR0VxoX9u7Nk1iM+Vh0NM1Jff45paNzSa+nTPAaDd1/+GFafl+JeCKLMeaxbduARx+lqG+X809SNJbRCKSnQ1CrLc+Vjspi1cL69ZbPxWOPlZ2/dPli6aD4wAMVCJlgjNVEqalASoqAsDADUlIErF8PTJtGF19GjgQuXfJgZ9JA/emnKTpESh7CGCsXewXYbSK2dTpg8mRL+eoePajjMsZ8ThCApk3d3PiTT4CTJ+l2kybAK6/4qlkO8UQWY8wjUtlyUaSQcCnHgV1SNFZuLlBYSPcvXoRgPZLhqKxqpbiYgqkAKjo4eLAHL1argS+/pNsKBZXYZowxN1kHAKtUWhQXAxMnWubG77zTw0JKK1YA//xDCX127uTqqYxVQFoaXdyS8mfaNXcuDTQBoGFDYObMSsu3w1hNs3Onpbq4R377DVi9mm4rFLQW0Q957PibgTHmtqwsYMIE27Llt9/u5AWpqcDWrXRWIV1ya9AAonVZGo7KqlbCwoDly4H776dJLLfK9ko2bqTJLAAYOLDSqp4wxqoHKQBYpaKLIhoNFVIqLAQ6dKDVSm7n6/vrLwo7BijJ+8yZXD2VsXK6fp3637lzFGAlBXLY+OEHYNMmuh0cDMyZA0RGVmYzGasxTp4EpkwBhgwBDhzw8MW7dlmCD8aMAVq39nr73MHJ3hljbtFo6Mq2VF3VZdlyUaQlGbm5ltwG9etDVKmoWqE1pZK2kyoYsiqtZUtgxgwPA+w0GkuGeJmMsk4yxpibShfHTU8PhcFAQVQFBXRO7PY8lEZDSWulCKznnqMyTowxl0oXWzCZaBJLivy4+WagVatSL/rnHyApyXL/zTdpMMEY87riYupiRiP1yyNHYC6M4pZXX6VOvGsX5bP1E47IYoy5JIpUqlyK9o6NdeOkYNkyYP9+upItCHRmERNjf9tKisp6/vnnERsbi6ioKLRv3x4/SGvgmE94VKnw++8p0TsA3H23m5kmGWOMWBfHzc4WUFSkgCBYcvQ5Xc5U2qJFlszwN95IObIYYy7ZK7awdCnw66/0fN26wDvvlFotmJ8PvPaaJdz/4YcpKrua4TEoCxTz5lkOcW3b2im44Iog0NKLefP8uvSXJ7IYYy6tWEEJ3gHKvT1/PlCrlpMX/PEHTfWbTPQFFx1N5eucqYRcWRMmTEB6ejry8/Px6aefYvjw4bh+/bpP3qsmKvd/m15vWWsP8EkjY8wj1tFYJhNw9arlufh4uurs9qHl8GFLrr7gYLqKY70cnjHmUOliC8uX0xgSoOHgu+/auaYpipaLVzfeSNEe1RCPQVkg2LkT+PZbuh0WBsyaVYGioB5dsfY+nshijDm1cycVawLo+2rWLCAhwckLzpwBRo2iK2xyOSVJcie7rnVUVlqaN5peRuvWrRESElLydgJ0Oh0uXrzok/eqaY4fBx55BPj6awpZ9shPPwFXrtDtO+8EWrTwevsYY9WXFI0VEWFblbBuXToEuR3wW1BA1UwkL79M1ZgYYy7ZK7YwbZplhW5iIi0rLEOlAj76iHLtvP9+tc1Fx2NQ5m9ZWXQeJ3ntNTcXQBgMVJVwzx6fta08eCLLD7RaLUaNGoXGjRsjKioK3bp1w8GDB/3dLMbK0OmogIzkpZeoErJT6emUzdNopPCt2rUp34jVj0ynK/MYNBqa+CospGWJAN599130798fMTExEAQBq1atsvuWWq0WkydPRlxcHMLCwtC1a1ds377d7rZjxoxBWFgYOnfujN69e6N9+/ae/2FYGWvX0n97UhLg4E/vWFSU5UjK0ViMMQ/YnjzT5BUAhIcbULs2hWC5HfD74Yc00geALl2ARx/1beMZq0asiy0IAvW5nBwa1t1xh4tCxDIZXQRt0KDS2usKj0FZdWIyUerH/Hy636cPrQ50y/Ll1LlffZVuBwieyPIDg8GApk2bYv/+/cjNzcX48eNx//33Qy1V62IsQAQHA598AjRtCgwYAIwY4caL7ryT1nKEhdHlcbWarnKX/AhqNRQaDYRSj6OggLZVKoGSK1Tvv/8+Tp48iZtuusnpW44cORIffvghhg0bhoULF0Iul2PAgAHYv39/mW2XLFkCtVqNHTt2oG/fvhD8HBZbHWRlATt20O3oaKBfPw930LMnVSxcsoRKizHGmJusc2NJ6Rjj44FatTTmbdxOwzh0KIUcR0RQxQo/5v5grCqxnlBWKoG8vGDo9ZZiC2+9Vao7Xbliyf4eoHgMyqqTNWssuerq1fOgiu/Ro5YKvjIZ0L27r5roMa5a6AdKpRLTp0833x8yZAgmTJiAf/75B7fccosfW8ZYWfHxwKpVNKnl1hdecDDw8880QLHzAoNej4N796JHjx4IcrQoOygISEjAqVOn0KJFC/z666/o3Lmz3U0PHz6M9evXY86cOZg4cSIAYMSIEWjXrh0mTZqEA3ZqysrlcvTp0wcLFixAixYtMGDAADd+MebIl19alg488ghQEjnvGZmMIiAYY8xN1ifPdepYHlcqRZfFce0ez1q2BD77jJbI16/vy6YzVq2UnlCWy8WSf6mfnjxpdf6r09GapmvXgPfeC9gLWDwGZdXFxYt0rRiwpImJinLjhWq17frgF1+kPHYBospdajpy5AgGDRqEmJgYhIeHo127dvjoo4989n5qtRozZsxwK7QU8Cy8VHL69GlkZ2cjwWniIcYqhyiWXXoREeEkZYFWC+zda/tY/fpUqrx1a7s/6kaNHD6H1q3Na0Pqu3EisXHjRsjlcjz//PPmx0JDQ/HMM8/g4MGDyMjIcPhag8GAf//91+V7MMeKi4FvvqHbCgWvxGGMVZ7UVEqxFxrq+kKL21FZwcF0/GKMuaV0NBYARETo0aSJiIYN6RzYZlnv3LlUBjsri4opSCfJAYbHoKyqSksTkJTUBWlpdGBs2JCiIpVKYORIoFMnN3f0/vtAZibdvvlmenEAqVIRWdu2bcP999+Pm2++GdOmTUNERATOnDmDCxcu+Ow9r127hrfffhuNGzfGTTfdhN27dzvdfuTIkdi4cSPGjx+PFi1aYNWqVRgwYAB27dqFO+64o8z2xcXFGD58OKZMmQKVSuWj34Ix961eDfz7L03Au4ysMRqBKVNoImvcODfXHnrX77//jpYtWyKq1KWFLiXRPUePHkV8fDzy8vKwZcsWDBo0CKGhofjmm2+wa9cuJCUl2d2vVquFVqs1388vWVSu1+uh1+vtvkZ63NHzFeHLfVfkPb/7TkBeHl0TuftuEVFRJrjVRFGEbPlymHr1clE9oPxt85dAbhvgun2B2m7GrIkiMGkS5fsoLqaJ9Oho56+xG5V14gQVmCh32SbGarbS0VjShFVYGKVKDQqyTCB3v/YDsGkTbRAcTGUMq/ASXl+NQQHPx6E1cQzqL4HcNlEEPvpIwLFjdfDRR8Ctt+ohCMDddwPt2lH0sjvNFn7+GbIff6Q7EREwTptG531GY4Xa587fzt2/a5WZyMrPz8eIESMwcOBAbNy4ETI3v/RycnKwa9cuDB482O7zX3zxBQYNGgSldAmhlNjYWGRmZqJBgwZOQ0sBz8NL9Xo9Hn30USQkJNgsNWTMX/bssVw1u3iRcq4rHH1LmEzAO+9YorGWL6fkSJW8HCMzMxOxdqoiSo9dKilhJQgCli1bhjFjxkAURSQkJGDdunXo2LGj3f0mJSVh5syZZR7ftm0bwsPDnbbJVRRmRfhy356+p8kELFzYGXl59PeIi/sVP/5Y6NY+VWfOoOOSJcCHHyLjrrtwdtAgr7YtEARy2wDH7SsqKqrkljDmuWXLgMOHaemSu2lmSkdldW9yiZZKNGpEx7PmzX3aZn97/vnn8cMPP6CwsBBNmjTBu+++i/vdzvbLWFnW0VhhYfa3MU8gJ+Wh29UkmLvrm2/Sct4qzFdjUKD849CaMgYNBIHYtr//roWdO29FWJiAnTtFzJ//K1q3zvFoH6HZ2bhl3jwoNJRr8sT99+Pq778Dv//utXY6+9u5Ow6tMhNZ69atw5UrVzB79mzIZDIUFhYiLCzM5YTWkiVLMGPGDHz11Vd46KGHbJ5bsWIFnn32WSQnJ2PMmDF2Xx8SEoIGblbQcBZe+sYbbyAjIwPx8fEAAJPJhCeffBKCIGD16tWc7I/53enTwNSplitpd9zhZBJLFIEFC4DNm+l+UBAwb55fcooUFxebyxlbCw0NNT8PAFFRUdi1a5fb+50yZQomTJhgvp+fn4/4+Hj07du3zJU3iV6vx/bt23HPPfc4zv9VTr7cd3nf88ABATqdDCoV0KmTiOeeu8vtfctefhlCSRRqmwcfROv+/b3aNn8K5LYBrtsnXfVlLFBJxyujkQ4/UVG0vLBkzA1RBHQ6GTSaspNcUnHc5MUiuoXNgFBUBJw6Rcn+3nij8n+ZSjRhwgQsWrQIISEhSEtLw913342zZ8+idu3a/m4aq6KkaCyjEcjIoGgP63x1QMkEcpQR+3Zokdq4FbpHHAMefhgYONA/jfYiX41BAc/HoTVtDOpPgdo2UQS+/FIOmQwIDy9Afn4U/vjjDrzyitHtCz4wGiEfPZqW5YSEQOzfH53ffNNrbXTnb+fuOLTKTGTt2LEDUVFRuHjxIh588EGcOnUKSqUSTz75JObPn2/+wiht8uTJOHz4MIYOHYotW7agT58+AIBNmzbhhRdewPDhwzF69GivtNHd8FIAeOGFF5CZmYmtW7dC4XC2gLHKkZ0NTJhAyzMAoG9fqoLs0KpVwLp1dFsmA2bPBpxEK/pSWFiYTei1RFNyRhPm6BKhCyEhIXYHJ0FBQS4PWu5sU16+3Len7/nll5aTxOHDgaAguXs7PHGCQikEAYiLg2zAADq79GLbAkEgtw1w3D5ftZmjQZg3XL8OvPwyHa/kclqdFBxMldEsBGg0CocXCZVK4MKRK9CHHUOwDEBcHO20mmvdurX5tiAI0Ol0uHjxIk9ksXKRorHy8ih/uyBQ/4xUFEN5JZOuhkZEABChzL2IXH04kq8+im5dTBBefdXfzfcKX41BgfKPQ2vKGDQQBFrbDh4EUlIAlcqEy5dDodUCP/4oQ0qKDL16ubmTjAwgPd08Rscbb0Dug9/R2d/O3b9plZlBOX36NAwGAx544AE888wzSEpKwu7du7Fo0SLk5ubiiy++sPs6hUKBL7/8Ev3798eDDz6IHTt2QK1W44knnsC9996LlStXei0ayt3w0nPnzmH58uUIDQ1FHavLFj/99BPuvPNOr7SFMXdJxWOkXH5t21LVcYfdYtMmGrlI3nwT6N3b5+10JDY2FhcvXizzeGbJLxQXF1fZTaoRjEYKwAsKon89+upaudJye+TIck9isaqFo0FYRWk0dNElKwto1gxo2pRyRZc+19PrDdi796Dj6rjnzyPmjRcRDAMd7KQsuAHm3XffxdGjR3H48GHk5ORg5cqVGGkn2a5Wq8X06dOxZs0a5OTkoEOHDpg1axbuueeeMtuOGTMGK1euhEajwYABA9C+fftK+E1YdZSaCuzYQf1SOozHxooIzb4CsbgYwtUsIEIJXLsGoVCNaLkR+wo7IfWxrujusIJQ1cJjUBYorJf5AgK0WorMKi4GPv4Y6NnTzWX4zZoB69cDM2fS0vsAPDZKqsxEllqtRlFREV588UVzlcLBgwdDp9Phk08+wdtvv40WLVrYfW1oaCi+//579OrVCwMGDIBOp0PXrl2xYcMGr0ZDuRte2qRJE4ily8I5kZycjOTkZBgrmFyNsdJEEUhKAv74g+7Xq0crBB0mef/lF3qBZNw44IEHfN5OZzp27Ihdu3YhPz/fJhry0KFD5ueZ98nlNOH50kvApUse5Go9exaQwuvr1AHuu89nbWSBhaNBWEWYTPSdc/w43Y+PBz79FLD38dHrgbNn1Wjd2k4Od70eeGcKgCy6P2yYByWcKtf777/vVrEhTwoNLVmyBIsWLcLu3bvx119/cWoLVi6iCLz3HkX0S6dStWsDKkUhoFZDFGQQ1GogJwe4ehUAoJQVIzesIZLXB6PbQPdz2wUyHoOyQCEt8w0Lo4s9APWx2Fh6LjUV6N7dzZ3Vr0+zYgHeSatMmQgpNHPo0KE2jz/xxBMAgIMHDzp9fVRUFObOnYvs7Gyo1WosWLCgQuGejtroi/DSxMREnDhxAmlpaRVqH2OlrV0L/PAD3Q4JoUmsunUdbHz4MEVfSZOwI0b4pUphaY888giMRiOWLl1qfkyr1WLlypXo2rWreTlveSUnJ6Nt27ZOCz3UZLVrAx5d0F+1ynL7ySdpTRALSO+++y769++PmJgYCIKAVdb/d1a0Wi0mT56MuLg4hIWFoWvXrg6TeI4ZMwZhYWHo3LkzevfuzdEgzG3/+x9dSwGoEtqCBfYnsVxavhz45x+63awZ4CBHaiA4deoUzp07hzlz5jjcRio0lJSUhDlz5uD555/Hzp070aRJE0yaNMnua+RyOfr06YMdO3bgR6kqFWMe2LYN2L6dLmIJAhAZCdSrJ9IZtEmEKJcDJpGyvMfGAYIAoV49RNcNNhdbqA58PQZlzB1SNFZxMXDtmuXx2rXpR6u1FPNyW4BPYgFVKCIrLi4Ox48fR/1SyaTr1asHgKoTOnP27FkMGzYMrVu3RnFxMR5++GGkpKTYXQpYXhxeyqqSQ4eAhQst9996C2jTxskLoqJopJKTAwwaBIwd6+smYunSpdBoNOZluT/88AMuXLgAABg7dixUKhW6du2KRx99FFOmTEFWVhYSEhKwevVqpKenY8WKFRVuQ2JiIhITE5Gfnw9VSXJyVk4XLwI//0y3o6KAUgU4WGDhaBAWSNq3pyvNWi1FgiQklGMnx45ZljbL5bQuMYAn00uPee3xpNBQaQaDAf/++6/X2stqBq0WSEyk1BRBQfQTEwNosouAAj0ghMEoAnIBdF8VBMQ1B4KDIBdLii0kA926Bfa5ciCMQRlzR2oqVZ4vKrJMVgUHG1GnjgyCINhW67UXlbV9O1Whnzy5JK9d1VBlJrJuueUWbN++HRcvXkSrVq3Mj0tfLnUdhpHQRJKUGX/79u0oLi7GHXfcgb59+2LPnj2IiYnxShs5vJRVJW3bAl270pfa888DdlJp2GrdGlixAvjiC2DixEoZfSxatAjnz58339+0aRM2bdoEABg+fLh5Yumzzz7DtGnTbPKDbN68GT169PB5G2sanQ746SegXz+qEuaRzz6j9UEAMHQohVWwgHXq1Cm0aNECv/76q8OIRCkaZM6cOZg4cSIAYMSIEWjXrh0mTZqEAwcOlHmNFA2yYMECtGjRAgMGDPDp78Gqhx49KJjq1CngttvKsQONhtYmSt9Bzz9Px7Uqzt1CQ3l5ediyZQsGDRqE0NBQfPPNN9i1axeSrNMFWNFqtTarDKQqUnq9Hnq93u5rpMcdPV8Rvtx3IL2nu/zVNlEE3npLhitXZOa8WBERNDmFa0WAMRyQy2ESRcgEgRJqXimiVAJaABChVALnz4soKjL6ZR7Z1d9OepzHoKwqkArJX71qiZAMCQGio4shCLS2Xqmk4Ei7E8iXLwPvvksVU44epbF6rVp++E08V2Umsh577DG89957WLFiBXpbJZZevnw5FAoFevbsafd1OTk56NevH9RqNfbv349GjRoBALZu3YqePXti4MCB2LFjB5ReSGT2yCOPYO7cuVi6dKl5QM/hpSxQRUZSRNaPP3pQAblxY5qtryTHjh2zW2K4tNDQUMyZM8fp8gvmHdu2Ae+8Q5+d11+nCpduMZksyW3Cw4HHH/dZG5l3cDQICzStWtFPucjlwN130/LmNm2o0EQ14G6hIUEQsGzZMowZMwaiKCIhIQHr1q1zeJE1KSkJM2fOLPP4tm3bEO7iIoSjpcXe4Mt9B9J7uquy25aTE4ItW25B7drBkMlMeOKJfxAbW4ios2fRbuVKmCKCzCEhxtBQyLVayPR6/PXI08hv1sy8n4gIHXbs0FVq20tz9LcrKioCwGNQVjXs3UvncgBNUAUFAfHxIk0uw/K43agskwmYPt1S9rd9e9qwiqgyE1k333wzRo0ahU8//RQGgwF33XUXdu/eja+++gpTpkxxuGxvyZIlyMjIwO7du22SwXfs2BGbN29G3759sXr1aoxxkiNh8eLFyM3NdRpaCoDDS1mVI5cD99/v4MmcHKpQ+PTTHmTyZtWZKALr1tHt/HygQQMPXiyT0VWefftoAb8bg0MW+HwVDQJ4HhHC0SCVpzLalpsL7Nsn4P77PUnqQRy277nnKBQ5OpoG8FJ0ViVy52/nyd/V3UJDUVFR2CUV2nDDlClTMGHCBPP9/Px8xMfHo2/fvg5P7vV6PbZv325eBeFNvtx3IL2nu/zZtp49gYkT5XjuORN6974FEEXIR66AEHyWrpJeugSj0QhZcB2gYT0Ily7hpsxdMCY9FRBrCV397aRjDWOBThSBRYuoholMRud1jRtbCjBYsxuV9dlnwJEjtEGDBsCUKQHRR91VZSayAODjjz9G48aNsXLlSnzzzTdo0qQJ5s+fj/Hjxzt8zeTJk/Hggw/ixhtvLPPcHXfcgdTUVJfJZufOnYtz586Z7zsKLQU4vJQFLlEE1qwBBgygCG+nioqoIuHJk8CZM1SCNcAGcazyHTlCy3oAWprqcZ5umQy46y6vt4v5j6+iQYDyR4RwNEjl8VXb9HoBn3xyE/77T4XvvruIBx7417yMyR3Rp06hy9dfI+3UKeS2bFl2gwsXgL/+8l6Dy8HZ306KCHGHrwoNhYSE2J0gCwoKcjlx4s425eXLfQfSe7rLH21r2hRYvx5QKEouch48CKSk0Djx0iVIU8+CKEKQyYDoaAgpKZD99psHZdN8z9HfLtD+r7l6PXMkNRVIS6Mqvrm5VGwwONh+UvcyUVmqE1RFRXry7bdpIroKqVITWUFBQZgxYwZmzJjh9msUCoXdSSxJhw4dXO4jPT3d7ffj8FIWCNLSBCQldUHduoI5l8j69cBHH9G/8+c7WZ6h0wGvvkqTWACtl87JAUoKK9Q0PICwWLvWcnvYsCp10Yb5iK+iQQDPI0I4GqTy+LJtogjMnClDdrYAlQq4fDkK3bu3dH0BxmoHwhdfQDx2DHWbNoX4+ONUfzxAuPO38yQihAsNMV8rKKBoDuvgfHPEh1QuraCA8tCVDAwM4eGQNWgAAXCRoIe5wkWHmD1S19NqKUDBnY+GuSt+ZEA3zVQI0rnN008DnTr5tL2+UKUmshhjrokisHixDMeO1cHixTJ0704z7/Pn0/NZWcB//zmYyDIagTffpOl9gJZ/LV5cYyexAB5ASDIy6CoOQB+HPn3cfGFREXDunIuSmKyq8lU0CFD+iBCOBqk8vmjbsmXA1q10rhsaSklsY2M9WN5+8CDEgwehCQtD6K5dEO69F5g0CXjyyYBaJu/sb+fJ35QLDTFfkioUxsZScH6ZIi+pqZQ8s6gI5rBJlQqa8HCY87g7TNDDGCuPfftoZfy+fdS13J0bNnfFnwqR2kCF7hGgJRZWeU6rksA5ojPGvCI1FUhJERAWZkBKioBvvqElz1IqkFGjgP797bxQFKlqhRQ1ERZGIVxWyTlZzbV+vSVU+bHH7K+/t+vrr+kE8qWXgLNnfdY+5h+xsbHmyA9rHA3CyuPnn4FPPqHbggDMmuXhHLjVJWpdRARder58mY5ldipoVgePPPIIjEYjli5dan6MCw0xbzCZgLfeAk6cAH75hXJC2xBFqvqSk2OZJK5VC6K9CEilkmbFkpPtr3tijLll2zbglVeAZ54B1GqaP9Zoyv7odDK7j8s1ahSqRSRffRRiaBgdaN0e1AeWqtlqxphd1mGmKpUWBQWhePlliqARBErQ+eKLDl68eDHw3Xd0W6EA5swB2rWrrKazAFZQAHz/Pd0ODQUGD3bzhTod8PnndPvQIV5OUA1xNAjzlj/+oIgPybhxdMzySGoqsG8fRJUKIVeu0HeOWk2zYbff7s3mVoqlS5dCo9E4LTbEhYaYryxbBkip3MLDqVaCjdmzqc/J5dTXYmIoSY9bCXo4KosxTx06RBPKJhNw5QqNydVqe1sK0GgUEOyNu/MMUMqLcUFXH/pXJiG4cWNfN9tneCKLsWqkZAwPlUqE0UjzCHl5dCHs5pspj5/dlRWffQasXk23BQF45x3KY8BqNCnX2q+/CihJdYT77vOg4OD33wPXr9PtXr2AG27wSTuZ/zzyyCOYO3culi5diokTJwLgaBDmuQsXKDWjVKzvoYeA4cM93InVlRxBLodCo6EDnnQwrIIWLVqE8+fPm+87KjbEhYaYt23dShNZgCU60qr4O5UuTk6mM+qgIKB2bddpKDhXFmPlduIEMHEiYDDQoe2ll4ARI+x3I73egL17D6JHjx5ll6qLKuDng4i58CeCB0+qnMb7CE9kMVZNWEdj1a4NnD8fAp2Oxhh5ecC8eXRFrYxt22jZheT114F77qm0drPAZJ1r7cIFOerXp4Pl0KFu7sBgoAlSydNP+6SdzHc4GoRVhvx8YPx4Or8FgC5dgMmTy3GOK13JUSopGSRAO2nQgC5jV8EokGPHjtktbFAaFxpi3vTXX7bRkS+/DJSZEz1+nM6mg4Mp03Tduq53zFFZjJXLuXMUpSxdVO7ZkyaXHVXz1euBs2fVaN3aXtF5AWhzL4B7fdfgSsITWYxVE9IYPjoayM0VoFYHQS6nVYKCQF+CdtPV3Hor0LIlcOoUMHo08PDDld30gFZTqxZa51oDFOjTh9KmNWni5g62bQNKJkDQvTsne6+COBqEVYbcXLoAA1DQ5vvvlyNdh3Qlp7CQdiYlhYyOppPsixc5CqScauoxsKa6fBmYMMESxDhoEFUptmF95TQ+njpsSYEP6XmZTmdTxdBMLqd+yv2RMbdcvUrRV9LFnk6dKKWxo0msmoSTvTNWDViPKZRKy/IMAGjYkMb0DvNrxsQAS5dSVadRoyqtzVVFYmIiTpw4gTSpkmMNUDrXmsFAc1JvveXmDkwmYOVKy33+XFVJx44dgyiKdn+aNm1q3k6KBsnMzIRGo8Hhw4fRr18//zWcVSmNG9PK9jvvpAqFkZHl2ElqKiXzKSw0T2KZFAqIUoJI6ygQ5pGaeAysqYqKaBIrO5vud+pExYIEATQw+OUX6l96PZUyViopRKSgwOZHUKuh0GggqNVlnoNaTa+7cMF2sMoYKyM/nyaxpJo6LVrQCpvgYOevE9LS0CUpCUJaGk0ojxlDkcnVDEdkMVYNWEdjCQJQr54Ig0GLoKBwREfTxTKnkdwREVSKjjGUzbWmUonYt09wfyXA7t3Af//R7Y4dKUEbY+XEESHVX0wMMH9+OV9M66BpxC9Fd4SHoygyElFSUkjOzcOYSx9/TMH5ANCoEdX8CQoC9bGPPgLWrKEQralTgQ0bqFqhHQa9Hgf37rWfn0cSE+P6bJyxGkyrpYnlM2foflwcsGiRGxd7RBGyxYtR59gxyBYvBtq3Bw4fpp/XXgMef9znba8sPJHFWBUnisDChfSFV6eO5XGlUg+VSgQg2I7h485DWJIMTJtGE1iMWSmday0/n84B8/LcPAcURY7GYl6VmJiIxMRE5Ofnm5czsqrtxAla0e6Vit+pqcD+/XTmnZUFKJUQGzSgLy8J5+ZhzKUXXqA0FH/8QdGRKhXomD5vHrB+PW30/ffAwIHALbdQhUJ79Hqoz56FgwQ9zEN8MadmMpkopQcA1KpF12usz/McSk2FkJICQ1gYFL/8Ahw5Qud7ISFA164+bXNl46WFjFVxa9YA331nyYVlj3kMv8eI1Cc+ovDwF16wxI8zVkKKxoqMBP79V0BeXjCMRg9W5vz+O3DyJN1u1YpPGBljNk6cAJ57jhLXWs81lYv1zHtUFCXZiouzfzBUKmk7h+vsGavZlEqKjPz0U6BpU9CZdFKSZRJLEIA336RJLFZpeHlvzRQWBnz4IVXx/egjWobvktUxURcRQVEMV6/S46++WtKxqw+eyGKsihJFGltMmEAF4nJyrHJtFhZBeTkTKCwyb68MNUB7NR/Jf/emMbwo8pUyZsP6nFCjoar1BQXByM0V3D8HvPlmOuJ27EjRWLyEhzFW4soV4JVX6Lvk8GHbwqYeycqiE+qdO23X1TsL8eJcWYy5JJMBzZqBJrFmzQJKCnxAJgNmzKCzasZYpQgKokOd2/WSSq5GiyoVQnNy6LinVlMIdDXsuzyRxVgVVFwMTJ9OybcLCqhyRXi4NIYXIVzNgqK4GMLVLAAiYDJCuJCBaORin/pmpCr7uLnQmtUkltxYtJRQEh0tun8OKAjAbbcBy5cDvXv7usmMsSqiqAgYPx64fp3ud+wIPP98OXZ06hTw1FPAzz9TpV2pyok7OCqLMbMLFyiRdFZWqSeMRpq0+v57ui+TAe+8A9x3X6W3kbGaZMsWO/3RXVZXowWdDnKtlvouQH26GuKJLMaqmIwM4OmngR9/pGhRkwmoWxdo0qRkIquwEFCrIQoymoVXq2m0UlwMpawYWoQguc4MiDG1/f2rVAnJyclo27YtOnfu7O+m+JR1NJbBYDnmhYfrzUEOHp8DcjQWYwz0fTJlCnD6NN1v1AiYO7ccuZ5TUoBnnqGDX2EhHdsiI93/ruGoLMYA0NDwlVeoG4wYAZw9W/KEwUDJ3H/6ie7L5bS8kCvRMuZTP/5I88ejRgHp6eXYgXQ1OjycjpEAHfNiY6liYTU85vFEFmNVyL59wJNPAv/+S2P4wkL6frLk2xRpKt8kQpTLAZNImTsLCwEAglyO6EZK7PstvDp+n/lETclNYF350roQUUSEpTy203NAna4ymskYq4I+/JDmoACad1q4kL5LPPLVV3TmXVxMM+l6PSURCQ6mtdClfmQ6nd3HIZfTMZGjslgNZTQCr79uKS4cGQnUq1fy5JIlwPbtdFuhAD74AOjTxy/tZKymOHAAmDmTbl++DOzZ4+EOpKvRxcXAtWuWY1utWlS5qZpGIvNEFmNVgMlEZZFfeYWuookiLdMID6cBiHmMnl0ETYEeGiEMGjEEGjGYHtcroBFDoanfGPLQEB7DMxvW0ViA5V86RzTZbOswKmvGDFondOgQf7AYY2Zffkk/AM0hzZlDEcRuM5mohNr779NtAOjVi5K6R0TQQbGgwOZHUKuh0Ggg2HkOajV9kV24QJNhzKmaEpVck8ybZ7kYpVJRgndzEevhwylJVnAwzUDfdZff2slYTfDXX8CkSZaVEA8/TFGSHpGuRltVozcFBUGsV69aRyJ7o/AxY8zH8vOpMqGkZ0+aL8jIoDE5EYFrRYAxHJDLIRqNECirO2CUATF1AX0ooLcdw3u8tINVO46isWJiLOeNErtV7M+dA3bsoAms9HRg82b+YDHGkJJCJ82SN98Ebr3Vgx1oNJQQcudOy2MjRlBin6tXbb+wrBj0ehzcuxc9evRAkKOiJjEx/D3lhsTERCQmJiI/Px8qlcrfzWEV9NVXwIYNdFuhoInlRo2sNoiJAf73PzqWc3VCxnzqv/+ogq9UrKtPH2DyZA8zc1hfjW7YkCazLl9GUUQEoqQdKZVUwTA5GejWrdqk/uCJLMaqgOhouhj94ov08+STtILQZgz/+1EqrVovBKbwcKjz8xFZXAwhP5/CtuYvpopyJXgMz4Cy1eulidGgICAyUrRJ+i4pczxcvdoShTV8OH+wmFclJycjOTkZxmqarLQ627DBMhn+9NPAoEEevFitBhITgePH6b5MRuuhBg+m+/XrW6+rt6XXQ332LNC6NVfnZazEoUM0cSV54w2gU5tioNBkWzChdm36YYz5zJUrdE0mP5/u33or1VSQebpezvpqtCAAwcEQ4+NtqzbZvQpd9fFEFmMBSq+3HX936AD88INlbGEzhhdFYNY8wHQCiGkIE4B8bR6i6qkgK9IDOWeALfOAIWuqzSw88w5H0Vi1ajl+jc3xcMt1dN+yhZ6IjAQeecSXzWU1EEeEVF1z5gCzZtFE+ejRHr44PJyuLh8/Trffe48qojLGPJaeTpEe0sTyiBHAoLuLgLHj6IGPPqJ+xhjzufx8YOxYmswCgFatKHrZ4+vAWi2tS9RqgTp1nG9bDaOyOEdWFcD5CWoWvZ7G66++WnZZl8MLZKmplBkwNLTsF5NSWW3XRrOKkaKxCgspd40UVCWK9FHSaACdTuY8X/L0yxANJZEyQ4bwQJgxZhYcTAlsZ80qx1VmmQx46y2gb19gxQqexGKsnPLzgfHjLRHXPXoALz1VQBGPR4/Sz/TpfmwhYzWHTkf9UaoU2qgRsGiRbVCkW0wmquJ7+DBVG3WlGubK4omsKqCmVE1jtFzw+eeBjRupgsWyZW68SBSBadPoxdeu2a8e5zBDN6vJ9HrKs6ZU0gBXoaDJ0uhoKiagVgvQaBRQqwX7+ZLDjLhwRgu9qKDM8EOG+PtXYoz5kclkWSYhEQQPVvfl5treDw4G3n0XaNHCG81jrEaKiKD6CADQsiUwa1I+ZC+NAY4dowejooBnn/VfAxmrQRQK4Kab6HZMDJ2axcR4uBNRBGbPBn76iQ68xcXuVQ+vZueDvLSQsQDx22/AlClAdjbdDw4GYmNdvEijoWn9ffvobEEUaUKrYUPb7arp2ujKUJ3z8wQHUw4bB/mSodcbsHfvQccJkz//AjE/rEawzAAMfpzKHzHGaqzFiykv+4IFQNOmHrxQFIFPPqGrOCtXAvHxPmohYzWPTAa8/DKQkADckpCH8AmjgVOn6MnoaGDJEprhYgGpOo9DayKpP9atS3mxSp+yuSSKFMK1dq3lKnR8PBAS4vq11ex8kCeyGPMzUQQ+/5y+k6SlhLGxwAcfAG3aOHnhv//SzNfOnfTCoCC6qtaggf3tq+Ha6MpQ3fPzuMiXjLNn1fbzJRcUAHuXAkFF9OTw4T5vK2MsMKSlCUhK6oK6dQXzir9vvwU++4xuP/ssVdp1a6mETkcZbn/6ie6//DIdFHmZMmNeNbB7NlUMktY0xcQAH38MNGvm34Yxp6r7OLSmeuKJcr5w9Wr6uXoVMBrppFGhsJQ+BABRhEyno8dKn++Zc4NU/fNBXlrImB8VFdFc1MKFlkmsbt1oDO9wEksUKYxmxAgKC5dm4+PiaFpfLrf/umq4Npp5x3ff0apUj2zYQB9gALj/frq0xBir9kQRWLxYhmPH6mDxYhlEkVJ0JCVZtnnxRTcnsfLzqWyTNIklCFQwIizMJ21nrKY4cMBS8BMAnfQ+/7xlEqtuXcpfwZNYjPnchg3An396YUebNlHosyjSRaBateh2qfwfgloNhUYDQa0u8xzlBlECFy7QFesqjCOyGPOT9HTgtdeA//6zPDZqFJ0AOEyKm5dHmXP37qUvrqtXaeDfvDll53aFo7JYKadPUzCEQkFzo2PGuPnCTp2ALl2AX38FnnrKp21kjAWO1FQgJUVAWJgBKSkKbNpEEcXSqpcnnnCzeOmFC8C4ccD583Q/JISywkvJfBhj5XL6NPD665T/+a23gL63XKdJrIwM2qBBA4rEatTIr+1krCbYsoVW2YSE0L+3317OHW3bZrliJJNRjqy+fe1uatDrcXDvXsepQQCKyPS4TGJg4Yksxvxk3TrLJJZSSZMJPXo4eYEo0pXrkyfpfmEhhXE1aeLeJBZQ7dZGs4pbv57+NRicVMW05+abKa/GxYvlWODPGKuKpEqnWi2gUmlRUBCKl1+m5cmCANx5J6VtdOnPP4EJEyzJ3WNigPnzgRtv9GHrGav+srOBV16xBEzv3g3c0zMKQpMmNJEVF0f56FwmYWWMVdT+/RR/ANBx8++/yzmRdfgwFfaSErQ/9RQwdqzj7fV6qM+ehf3cINUHLy1kzE8mTKDcms2bA2vWuJjEAugsITGRbkdFAY0b0zJCaV10qR/z2ujSP9Zro6tBxQpWfrm5lhU9SiWtEPQYT2IxVmOkptJ1EJWKjh1aLQUGFxbS8Wz2bCcRxZIdOyj0WJrEuuEGYNUqnsQKUMnJyWjbti06d+7s76YwF3Q64NVXgcuX6X7btsCMGYAQHEShIIMHA8uX8yQWY5Xgzz+ByZMtqWMee4xW3pRL8+aWZcCDB1NgA+OILMYqi8lkO8APDaXKTpGRHqQD6daNZuRvvZXWbyiVtNa5FAGgtdGOlg5ar42u4mGlrPw2bbJU633gAc6tzBhzzDoaq3ZtID09FHo9Hdvy8ymgyuV3yLlzwBtvWEb2nTvTCXZkpM/bz8qHE00HptIFF0SRIj+OHaPn69UT8eGHgqWQWXAw9T3GmM+dPUvRyVot3b/nHmDixApkdKldmyIpv/wSeOYZTg1TgieyGKsEJ04Ab78NzJljW1W8Xj0nL9q+nS59z5xp+4X1wAP074YNQE6O3ZfWlLXRrPwMBqp0D9DH6/HH3XiR0QhMnQrcey+tIeIDKasEXHo8MEjRWNHRQGGhgKIihTkoGKCBu6MKqGZNmlA01pIlFAL6xhvVetkDY75QuuBC9+7AihXA1q30fCg0mB/yFuroXwbA0VeMVabLlylgKj+f7nfpQqdyLqOVXYmKAp57rsLtq054IosxH/vmG7rgrNcDkyYBK1e6SGlVXAzMnUul5ACgXTuKRy2tfn3HZw01ZG00K7+dO4GsLLp9551urhDcvt3yM3gwX91llYIjQvzPOhqrTh0AEKFSaaFWhyM+ngbsbtcQefppICGBJ8MZK6fUVCBlpw4qQx5Sdqrw0UehWLOGnhO0GswKfhOtMvYAL56kyoROr5oyxrwlN5cmsaTxdZs2dErncdzAlSt0UJ082c0SwDUT58hizEd0OkrgPnu2pbppeLglAaddp04Bw4dbJrEAygzImJd98YXl9tChbrzAZKJZWEm/fl5vE2MsMB04YInGkuaeIiP1aNZMRFSUbQ0RG1eu0Ky5NUGgpJA8icWYx2hSWYQ2T4MG+osozNFh1iyRUp5qivGSYQF6CntoY5XK/WJAjDG3SUt709IsxzGTiQotpKfT/caNgYULy5G2IyeHciL/+CMwerQlnyQrgyeyGCsHe19g1jIzaQmz9XzUkCFU7Tgmxs4LRJHKxz31FOUQAShx1ltvUU4sxrzo+HFLHo2EBEq55tL+/cCZM3S7QwegUyeftY8xFjguXKBDU05O2QvD0lVmpZKitWxqiPz9N71wyhSquMQYq7DUVGDfDi2iDdcAmQxhxgLk54kozNbiPvWXGBFekjOgQwdawhsV5d8GM1bNlESO0wcAAQAASURBVF7aKx3zZDLg0Uepplbt2sDixQ7O+ZwpLATGjbPMhhUUUFoPZhdPZDHmIUdfYJJDhyio6uRJuh8SQpFZEyc6WOWXk0NT+HPnWkK3WrcG1q4F7ruPr1r7WXWs2LRpk+X20KFufMREkRJwSJ5+mj+XjFVzokjfFQMHAhkZFGWs0djfVhBKRWXt20e5PK5do0H4okVcJZexCrKOxlKKhRDlctTDNURCDdmVS5hS62M6NHfqRGfRERH+bjJj1U5qKpCSIiAszICUFMEmEnnAACp8sngxEBfn4Y61WjoflE4g69alq0O1a3ut7dUN58hizEO2X2AKpKYC3btTSOnq1cD//mcpyNSoESV4b9HCwc4OHwamT6fBvmT4cGDMGE7EHiCqY36e116jSvc//wz07+94OyEtDV2SkiBcvUphXAB9mO+4o3Iayhjzi6wsYNYsICWFIrJMplLLIwqLoLycSZneS06WlUpaAZE86Ry6aV6FIJYcCDt0AD78kCe/Gasg62gsQSEHIEKQC6iju4RCMRJHituge69Q6m9ul8NmjLnLOl+kSqVFYWFomfyQt91Wjh0bDBS9fOQI3Vep6I08ng2rWTgiizEPlP4Cs15KceIE3ZYmse68E1izxskkFgB8/71lEismBvjoI6rXypNYzIdCQylX+9KlsJTmLk0UIVu8GHWOHYPs/fct0RQcjcVYtSWKVPlsyBDKi1VYCKjVFG3VvLl0bixCuJoFRXExhKtZAOi7QRBERJuuY9/hYKQW3Eg77NuX1tRHR/vnF2KsmpCisYpzdVAbwyDKZBBMJsBggBKF0BoVSBZHQ5y/gCexGPMRqXqvSiVCEGg4vG2bnfyQnjCZqLT93r10PzycopibNfNKm6sznshizAOlv8BUKtG8lKJdO1pJIQhUXXzePCAy0sUOX3+dZtu7d6ccWeWaxmfMB1JTIaSkwBgcDOH8eTqjjY8H7r7b3y1jjPlAbi5dEH7zTapCKIr0b0QEJa01lw4vmd0SBRnNchUW0kD8wgUo1VegNQUh+eqjEEc+TWFdfGGGsQo7eBDYtkUPtVaBHMTgulFlvsAkCEA0crEvrwNSf3d0dYpVVdUxxUVVZB3MoFQChYUK5OVRhphXXy3n6nlRpNQyP/5I94ODKaKybVuvtr264oksxtxU+gsMKJvg9rnnqLDbs89aDfqtSfVYJRERwPLlVNbC44yAjHlGihZ0yfrDbjLRz9WrlLjZ7gebMVaVnT0LPPYYsGOH5bEbb6SVg3XrWgdhinQcM4kQ5XLAJFJlwnPpQEEBnVDL1dgn74nUWxL5+4IxL8jOBp59RkROXkl/EoA8YyRMMgVllpbLoZRpoM3TIDlZ5HR01UxiYiJOnDiBtLQ0fzelRpOCGaKjgcJCATk5oRAE6oKnTpUzKmvLFmDDBrotkwFJSW5WYGIAT2Qx5jbrLzAAyM0NRm6uYJPgViajyKwyioqAGTNovcaVK7bP1avHg33mc6IIjBxJH8O//3axccmHXVSpYD5KFxVxwknGqqn4eKBOHbodFQW8+y7NXxcVUffXaEp+sougKdBDI4RBI4bQvwV6aAqN0JiCoUEo5HF1UWgKt61gyBgrl337KJflmX9FyEUDBEFAtLwANwRfgiCIVEUoOBiCQo5owzXs26Gt2DInxlgZ1td3tVrKHSmpU4cu+pTrmNe3L9C7N92eMQO46y6vtbkm4LNnxtxg/QUWEgKcPy9ArQ7GlSs0B1Wm7Li1EyeAYcNo1j0/H5g2zYPQGMa848gR+ihu2UIXfBwqFXqoqVMHYrNmtGb/k0/4zJSxaigoiFJ09OpFF4d79qRKhUolrR4sKAAKCkQUXClCgTEcBYiAWlSiABEoMClRIETRT3Q81EYllEoa6EuFeFnVxcua/KOoiCaUx48HzpwRYTKaEAQdGikuIzboGmRCqXGkXAalWMhRWYz5QGoqpbAyGGwX10RFAQ0alKra64ngYBqUJydTiWDmEa5ayJgbpGiskBDgv/+omrhEr7f9AuveveQJkwlYu5a+nAwGeiw8HHjoIY7AYpXuiy8st4cOdbKhdeihtJ4oLIzWF5X5kDPGqhqtFliyhMbMLVtaHk9IoCq7kg0bKPeH2e9HKRFIvRCYwsKgVqsRERkJWVER7fT994HO9cybx8RweqzqoDpW7g10f/5JBa0vXChJSZcvIlLMR2MhAwqjEZCHALKyRVcsUVmhSE0N5UM1Y14gipT3+OpVmBO8A0BkpA5xcaEABEvV3lIVDO0yGinUWSKXA127+vA3qL54IosxF0QRWLyYBvQGQ8mXk9GEYEMxGsXKEa4KhyiW+gLLvk4hotZT8zfeCMyeDTRq5K9fhdVQly4Be/bQ7Tp1gD59HGxoHY0lrTOSeHSUZsx7kpOTkZycDKP1FQRWLidO0Alyejpw+DCwerXjyab69ekHAH03zJoHmE4AxYColSEvPBxRoSrIQgFcvAhsXQgMX8PfDYxV0M8/0ySWKALZ14wIN6pxA/6DgJK+JYoA7PQzuQxKQyFy8zRITg5Bt24Cd0fGKuiLL2g1gyhSHIJMBsTGAiaTDkAogJKCC9FuXO89fBj44ANgwQI+H/QCDgthzIUffwS++w7Q6aTxuYgIqNHMdBbh+ZcBiLZfYMv+pJAXaRJLECg50YoV/KXF/OLLLy0rAh99lJYR2WUdepidbftc6aM0Y5WEE91WnF4PfPwxHYrS0+mxc+doYsstqanArl0UHlJQAOTlQVFYSM/xdwNjXjV2LFUKbRBZiOD866gvZtL4UwDNPMsdn75xrizGvEcUgU2baJGNTEbj56ZNgcjIsmt3SxcAK+OvvyiqOT0deOYZusrMKoQnshhzIiWFvmu0WstqwPoqLeJN5yCTiZbS4yj5AsstQvKUCxCvl0wC1K5N32gvvUSZABmrZEVFNBEL0Ph38GAHG0rRWBoNhR9mZUE4d842n5vLozRjLNCcOUMTWMuXW7pz27a08r1jRzd2YDIBkybRugopKk66LC3h7wbGysVkoqqh1sLCgCXP/Ibo4ynQGOSQwwgNwqAJioQGoVRYwRQMjSnE6nbJjxAKuWhAYY6ec2UxVkGpqUBaGhAXB0RGAjfcQNd67XF6TefMGWDcOKC4mO7feKNVyDMrL57I8gOtVotRo0ahcePGiIqKQrdu3XDw4EF/N4uVYjAAb7xB5/RyOU0CNG0qIkZ7CRCtSo9nZcEclaUSsU99M1IL2wO33w6sXw906eLvX4XVYJs303wrQJWPatVysKEUjWUwOD5Z5cgLxqoMkwn47DNg+HDgn3/oMbkceOEF4NNPaUDuUkEBMGIELYeQyeg7IDQUYtOmMISFWbbj7wbGPJaVRee2I0ZQcQWz779HzFvjcKEwGkpRDbUQiQJFNArEkuIKJiUKTOFUcMEUbvUY/aiFSCiNebhwIp8LLjDmoexsOvRZZ9uIiaHqvtapreyxe03n0iUgMZEKfgHArbcC773nemfMJQ4R8QODwYCmTZti//79aNSoETZs2ID7778f6enpiIiI8HfzWAm5HFCp6GSgVi2gYUNArimkWQG5HIBI/0pRWcoIKOsqKY1QnenoNr8pBDvJOBmrLCYTzaVKHCZ5l47WBQUUkSUIgFwOMTbWHHFoxrmyGAt4Fy9SmsajRy2PNWsGzJwJtGnj5k5OnAAmTwYOHKAvk6AgGs3XK0noLl1ZlvB3A2Nu276dipVJ57YzZlAGCmHlp8CSJQgWRWwIGYEcYwilpShVJMgkiigqLES4UglZ6b4misD1a4hp1gnBQcmwm0+LMVbGiRPAxIlU/OTxx8vWPnKlTK6sFteA0aOBa9dog7ZtgQ8/5EooXsITWX6gVCoxffp08/0hQ4ZgwoQJ+Oeff3DLLbf4sWXMYLCsAExNBf74g2bgY2IAoCT6yiQCQbKSqBWTJSrrBiUEQUB0nBL7Mm5A6iEu7lYdVOVE06mpwPnzdPuWW4AWLZxsuGcPXUaSjtYNGthfDut2RkvGmL/k5lLlM4C67LBhwJgxbo6dRZGy2370Ee1IraZJrPh4WlshbVMafzcw5lJBAeV6/ukny2N161KkpCCADtQyGWAwoH6MHvWjigFDbpn9iACKjcUI04fZn6aKAnDtBCXI45Nmxlz68Udg1izKiXzlCgUiFxYCERF0jdeaKAI6ncx87deaXE6vS16gQzcxEcLFi/TEDTfQcTU8vHJ+oRqgyk5kzZ49G1OnTsWNN96Iv/76y2fvo1arMWfOHBw6dAiHDx9GTk4OVq5ciZEjR9rdXqvVYvr06VizZg1ycnLQoUMHzJo1C/fcc4/D9zh9+jSys7ORkJDgo9+CuWI0Av/7H/D778Ann9CXUHIyfRHVr1/yBVZUBBToASEMMAKiXqTBg1xOj2cXAeFKyxcYX5SuFqpy6XFRpKSU6eluRGNlZ9NtQaCT1agox7luOPKCsYB2443A00/TyfJbbwGdOnnwYlEEDh6kE+CrV+mkOiHBvZNh/m5gzKHDh6k/ZmVZHuvbF3j9dTrkAgDuvJMiIbVaKjGck2N3Xwa9Hgf37kWPHj0Q5KiCS0wMT2Ix5oLRSPNLa9daHmvXDjh9mg5pUnoOWwI0GgUEB8c4ZbgJF/achT7uHIJloDKHycl0sYd5TZWcyLpw4QLeffddKJVKn7/XtWvX8Pbbb6Nx48a46aabsHv3bqfbjxw5Ehs3bsT48ePRokULrFq1CgMGDMCuXbtwxx13lNm+uLgYw4cPx5QpU6rcSXJ1kZVFubCkJRhLlgAvvkj5CixfYCJwrQgwhpdcKTMCYggN0o2gf68UAXXCAQhQKql0Ml8IY/50++0UFPHrr7Qk367UVGDbNjqSy+X0ExvrfMccecFYwBBFWv3XrZttyo1nnwWeeqocF39lMlqDOGAA7bxxY/cPZPzdwFgZOh2weDGwbp3lsYgIYMoUoF/3fKtZrBIPP2y57SghtF4P9dmzQOvWTkoRM8acyc+nfnjokOWxhx4CXnuN5pAdzCNDrzdg796DjieSt+1AzIo5CJYZaEI5OdmyLJ95TZWcyJo4cSK6desGo9GIa9KaUwdycnKwa9cuDHZQquuLL77AoEGDHE6KxcbGIjMzEw0aNMCvv/6Kzp07O3yvw4cPY/369ZgzZw4mTpwIABgxYgTatWuHSZMm4cCBAzbb6/V6PProo0hISLBZasgqz4EDwLRpQF4e3ZfLqdBgcDCwYYPVF9jvR6lkanAxoNFAVABGoxFyZTiEevVplKLVAu/NA26+GQBfCGOBQSZzUm9AFIF58+iDrlBQHpx69WgGVq8HRBEynQ7OY6c58oIxX0hLE5CU1AV16wq47Tb72+TmUs7YHTsol+zTT1ueCwpy8/zWZKJ1FNYT2LVqAU2aAP/+S98NdtZV8HcDY+6ZOJHGm5LOnSkyq/71E8Aj46ms6BNP+Kl1jNVMZ84AEyZQTkmADl2TJlnmkevXdzqPjLNn1Y7nkVvdAygzgM8/p1nsxo198jvUdFWuauHevXuxceNGLFiwwK3tlyxZgsceewzffPNNmedWrFiBJ554AqtXr3b4+pCQEDRo0MCt99q4cSPkcjmef/5582OhoaF45plncPDgQWRYlSQxmUx48sknIQgCVq9e7TA0kfmG0UjfK+PGWSaxGjSg8uTDhtH9+vXpQlfrViJab5yF1jkH0Vr7B1rLTqG17BSaR11Cqxt0aB15Ea1jstDadAKtt8yj7VvzxDurAvR64LffaLbLZKKawiYTJfEoKICgVkOh0UBQq82PmX/UatiEHjLGvEYUgcWLZTh2rA4WL5bZXeW7bx/w2GM0iQXQsvhLlzx8o5wcYPx4YNQo20vPej1w+bIlLLlU/+fvBsbcN3w4/RscTCfOyclA/X/2As8/T8v6588H9u/3byMZq0F27qT5Y2kSq1YtSjFjHQzpjJCWhi5JSRDS0hxsIADPPANs3Ai0bOmVNrOyqlREltFoxNixY/Hss8+iffv2br1m8uTJOHz4MIYOHYotW7agT58+AIBNmzbhhRdewPDhwzF69GivtO/3339Hy5YtEVUqRLhLSTjE0aNHER8fDwB44YUXkJmZia1bt0JhL6Ey85msLAoj/eMPy2M9etDVsdLR3QCoTvmPP9KZhVR+PDYWWlFEiDQBycspWAA5eJAiC10eO4ODgV27aFR96BBVUrGqnMo5OBjzj9RUICVFQFiYASkpCpvDSmEhBVJ+/71l+6goOq7FxXnwJkeOAG++SXmwAODtt+mEGrATlmyLvxsYc1+XLhTU37UrVQ/Fhg3A3Ll04QgAOnYEOnTwZxMZq1F27rQU3m3dmrqjm3ErFJG8eDHqHDsG2eLFloPzlStld1K7ttfazMqqUjMoH3/8Mc6dO4cd0uVHNygUCnz55Zfo378/HnzwQezYsQNqtRpPPPEE7r33XqxcudJr0VCZmZmItZNbRnrsUsml0nPnzmH58uUIDQ1FnTp1zNv99NNPuPPOO8u8vipXTQs09pYSjhtHEd12PwZbt1KcqcFgWacRHw8xONiyEwknuWUBwGgE3n0XyMykvFgffeTiXLJpU2DOHIqisJrEAsA5OBjzA6n+glYLqFRaFBaGmg8rR47QRZfMTMv2d9wBTJ0KWA0nnDOZ6ALN0qWWE+mYGGDIENvtXKyr4O8GxmyJIvDDD0BKCi35tR4GDh0K6m8LPqLlRpK+falT86Qvc4DPA71v6lTgv/+okODUqUBoqAcvTk2FkJICQ1gYFCkpdOXp77/puDpvnpN8HszbqsxE1vXr1zF9+nRMmzYNdevW9ei1oaGh+P7779GrVy8MGDAAOp0OXbt2xYYNG7waDVVcXIyQkBC77y89DwBNmjSB6KgamB1VuWpaoElNtV1KmJQEuAzuKyqiGa/ISLrcLZdz6XEWsPbssZzkBgd7MDYuPYnFGPOL1FQ6jKhUIoxG+nfvXgEvv2ybZyc8nKI8Bg3y4LrJ9et0NefwYctjnTtTzXG+csxYuWVnA7Nn0zEYAL76ipb+mmm1wPTpwC+/WB57+mlg9GiK9mfMAT4PrDiDgdI9SkJDaTm+Uulh3IHVlSatSoXQwkKqMlpYSDsaPx74+mvXRZOYV1SZb86pU6ciJiYGY8eOLdfro6KiMHfuXGRnZ0OtVmPBggUICwvzahvDwsKg1WrLPK4pSZLq7fdjnhs7FmjblpYSrlvnYhJLFIE1a+hbrn59ID7etiSUPUolDVaSk+1PdjHmY+vXW24PHepgo19+obKcjLGAYh2NJdWgUSrpAszatZbDSqdO1NcfeMCDQfjhw/SlIE1iyWRUojc5mSexmEPJyclo27at02JHNd3evRTQKE1iAUB6utUGOTk0YSVNYslkVC47MZEnsRjzsQMHKPfVhQu2j0dElGPxTMmVJlGlohcrFJSao7CQnn/xRZ7EqkRV4tvz9OnTWLp0KcaNG4dLly4hPT0d6enp0Gg00Ov1SE9PR3Z2ttN9nD17FsOGDUPr1q3RpEkTPPzww8i0js33AqnCYWnSY3EeJa9g3lC6qGVQECV5nzfPQT6svXstIVvSZfE6dQB3owBLR2UxVolOnaKlRwCtGOzWzc5G6ekUkTF0KCWhZIwFDOmwEx1tGWALAkUQFxdTgdwJE4CPP/YwH9ann9JJszRWqluXdvLss3wizZxKTEzEiRMnkOYoqXE1JlUOTUuzf7ZbVERRWBMmWLpWrVo0xpw0yWrDN98E/vyTboeHAwsWAA6qqTPGvEMUgdWrgZdfpqTur75KfbZCO7S60iTXaKjjm0yUa3LECPphlaZKjF4uXrwIk8mEcePG4YYbbjD/HDp0CKdOncINN9yAt99+2+HrMzMzcc899yAoKAjbt2/H9u3bUVhYiL59+7qcAPNEx44dcerUKeTn59s8fujQIfPzrHIYDMCiRcCDD1L1cGtRUXZm4E0mmuGaMIEGHEYjfVkVFlIUlkZT5sdcerz0j3XpcY7KYpXoiy8st4cMsfM5NxqBGTPobFijAc6dq9T2MVYeNSUixF40liQigh5r1ozmoD2ee6pTx3I8uu02Cknu1Mkr7WasOnJVOfTPPym/qnVR9DvvBL78ErjrrlI7mziROnHdulQe+7bbfN5+xmoyjYZO5xYtshz6Gjeu4E6trzQVFyPs2jUaaMvlNL6+9daKNpt5qErkyGrXrh2+sT5SlJg6dSoKCgqwcOFCNG/e3O5rc3Jy0K9fP6jVauzfvx+NGjUCAGzduhU9e/bEwIEDsWPHDihLjxrL4ZFHHsHcuXOxdOlSTJw4EQCg1WqxcuVKdO3a1VyxkPnWlSsUsS1VJZw8mZZkOEzkl5tLL5CWW6SmAj//TEuvpNLjpQgAlR53FJNqXXqcE3iySpCdTR9bgNK5DRxoZ6PPPgOOH6fbjRtThAZjAa6m5AeRxshRUXT12DrPuiDQ6r+jR8uZgvH+++nFTZsCw4dzFBZjLjiqHCqKFMy4cqWlVkJYGEV7OFzq26wZVV5xVkCBMeYVmZnUH0+dsjz2wgvAM89U4NBnfaVJJoOQlWV5Ljqa/l2yhL4kuNhXpakSE1l16tTBgw8+WObxBQsWAIDd5yRLlixBRkYGdu/ejRYtWpgf79ixIzZv3oy+ffti9erVGDNmjMN9LF68GLm5ueaqgz/88AMulCy0HTt2rHlg3bVrVzz66KOYMmUKsrKykJCQgNWrVyM9PR0rVqzw8Ldm5ZGSQrk0rasSPvwwYCcHPzl5EnjtNeDyZcsLxo8HBgygmXUuPc6qiK+/pnlTgCIRy6TkO32aMlsCdCSfOdPDMi2MMV+RxsgaDVBQQGNljUZArVqWAbHbhXENBkoK0qOH5TFBoCXFPMBmzCVnlUMFAcjKskxidehAh1PztWpRBLZvB3r1sq3o2aFDpf8ejNU0R47Qst7cXLofHg68/TbQs2cFdyxdaTIa6QtAolQCDRvSShwu9lXpPJrIunjxIho2bOirtvjE5MmT8eCDD+LGG28s89wdd9yB1NRUtHdRtm7u3Lk4Z7UEZ9OmTdi0aRMAYPjw4TZXiD/77DNMmzYNa9asQU5ODjp06IDNmzejh/WAknmdwQD873+0FloSG0tVCdu1c/Ci77+n+sg6Hd2PiQHefx+4+Wa6z6XHWRWh11vSXclkpSolSRvMmEEdBQCeesqNcp2MscqSmkopGouLaZwM0Pmw9XImtwrjZmZa8vF88AHQu7ftDhhjLtmrHLpvn2DudxMnUtT/fffR4dRcB8hoBObMoQPyfffRcZf7HWM+J4pUKXTePMsxND6e7jdr5oWdSzPbdeua03LoIiMR2qgRrc5x+0oT8yaPJrIaN26MBg0a4JZbbsGtt96Kzp0749Zbb0VddxNhe9nu3btdbqNQKOxOYkk6uHGFJN2m9IhzoaGhmDNnDubMmeP2a1jFXLkCTJliyaMJ0IXot95ykNBdpwPmzgVKJiMB0JWy9993P6k7YwFk717g+nW63bOnnYIpy5dbYqwTEoDnnqvM5jHGnBBFYOFCS3ESKeVGfLwIjcY2MY/TsfKePRQaIuXpTEqiXDwcecmY26zPWWvXBnJyBMjlgrkgdbdu1A/Xry8VdF9URGkq9u+n+5s3A4MGcS46xirBP//QtRtJt27Au+86OA/0lHVurPBwqr4SFASdwYBQ66osLq80MW/zaCIrLi4OFy9exObNm7Flyxbz4/Hx8TYTW7feemu1zmPBAsehQzSJJY3b5XKqTjF0qIPJ8KIiYMwY4K+/LI89+igleefIKlZF9epFJ8JffEFJ3m0cP07JPADqIDNn8pJXxgLInj10zmsyUReVy4EmTaibajS229odK+v1lH/HutpDXBxFHPMkFmMesT5nLSgQcPlyOBQKCtC37nc2h9Fr1ygtxd9/032FgvJc8CQWY5WidWvg+eeBpUupcOBLL3khFWRWFh1XDx+mme06dejxWrUgiqIlj42Eo7IqnUcTWRkZGcjMzMShQ4dw6NAhpKam4rfffsP58+eRkZFhk5C9efPm5omtzp07o1OnTggPD/f6L8CqL6nscd26gsMCLyEhllzsLpcSApQ4qEkTmsgKDqYlGHazYjNWdchkwO23048No5FCE6VkHs89B7RqVdnNY4w5UFxM3VKrpWspcjnVYQgJcVz01masHH8RwhtTgBMnLBv07k35sCIjK+V3YKy6sM5Vp9PRRVJRFGAyWcaaZc5Rz54Fxo2z5FqNiKCof65gxlilevZZmjv2Stfbv5+WBl+8CFy9Sit2XE1McVRWpfM42XtsbCwefPBBc4L1WbNmYcaMGWjUqBFatmyJ/Px8/Pnnn/j3339x5swZrF+/HgAgl8uhk3IRMeZC6bLHjopAdOxIhdf++MPJUkJrgkCh30VF9I3HJ/WsOpPLqZjBzJm0RmLkSH+3iDFWQqejAoLnz1sisRo3tgqiKiyC8nImRXdERJhfZx4rby9G6vHZ6C4vmcQKCqLo4kce4SvBjJVDaiqwcyflbbaeSI6KotVExcWlzlHT0ugYa31FdeFCLyTlYYw5C2j44QcKRh482PKYTOaFSSy9Hli0CFi3jr4Erl6lL4QmTdx7PUdlVaoKBd0tX74cM2bMMCdD3759Ow4dOoTs7GysWLECjRo1giiKiI+PRzAvZWEesC17TAk2AcqDJQWXSJ58kpL52Z3EKiqipVXWQkIoGSdPYrEqrnRfsKtLF+DLL2mZkaJKFKplrEb49ltg924KnJTLLbVFNBpAoxGhvZwDQ5EJ2ss50GjEksfpR67OReF1LZLP30cn3PHxwKpVtFSeB86MecxgoNWBV69ajq0y0Yg4XEKcqhByOZ2jSrmyxB820/olaRKrTRvqgzyJxViFlQ5okCaWDQYKeJw5k1IbHznixTfNyABGjaJJLIAmsPR6usLk7jrF0lFZzKcqNJG1YMECdOrUCa+88orN42FhYXj66afx119/4fbbb4coivj3338r1FBWc5Que6zV0uT4woX0/bJqle32MpmDcfv58xSBkphItxkrh+TkZLRt2xadO3f2d1PK+PhjYPRoSvbudFIrIoJy5jDGAsYDDwAqFc0vq1Q0Xi4oKPm5pkWBWkCBEEX/XtNanisA1EIklAoNLujqQ3/PAGDtWr44w1g5XboEPPggcPQoTSoLAhAeJqJZyEXU0l+DcDULgGh1jioidflflvJoPXpQcp7atf33SzBWjdgLaMjNpbnjksVeMBq9OFe0dSswbBhw8iTdVyhoOWFwMEU7W19JKvmR6XR2H4dcTpNgycmOcwQwr6jQ5fkzZ86YlxjaExkZia+++grNmjXD/Pnz8f7771fk7VgNUbrssVIp4vvvBRw5QufjH38M3HEH0LKlk53s3Us5QgoL6f5bbwErVvCVauaxxMREJCYmIj8/P6CKWGi1wNdfU67J338HtmyxGkMfOQLcfDN/3hkLYCEhwIEDlN7KZp5ZFIFJ0yAePIiCsDBEFhdD6NydSjKZ+7Qc+O0iYvRXEPzUTO7rjJWTyUQ1gA4epNtBQXT+Wie0EPgvDyZBBkGtpvGkMqJk5ZCA5NAJ6NY4DUK3rsDEiV7ILM0YA8oGNBQWhiIpiR7PzKRtFArg9ddpArpCiotplc7331sea9yYzhvHjqUwTCnq0ooAQKHRQHB07FUqgQsX6AoVr0rzmQpNZNWtWxcnpZlLBxo0aICePXtiy5YtPJHFXCpd9jgrS47cXCp7fPUqXbV++WWgRQsHOzCZgE8+oUkrSbNmlLCPB/qsGvnpJ0vBlL59rSaxUlPpklXXrlQ1SVqvxBjzK6ORIj/i4y2PNWhAPzYOpgJ/boCpdijyjUZEhcshS1kBXOoP9Olj2a71LZXSbsaqM5kM6NePjqkhIZQKJyxMBP7LAkwiRLmcxpZZWcANSgiCQFFZqcFIXfEZuvcJ5/ElY15UOqBBoQC2bQOaNqWAhpgYWl7YoYMX3mzNGttJrAEDaIYsPBzYsAHIybH7MoNej4N796JHjx4IclT1PiaGJ7F8rEITWf3798eKFSuwdetW9OvXz+F2YWFhSE9Pr8hbsRrCuuzx1asCrl0LMyfBLS6mwjBPPOHgxfn5wNSpdIlb0qcPTWJxxUxWjYiiZQk/AAwZUnKjoAB4+226fegQVV15+OFKbx9jzJbJRDk99u2jizVt2zrYsNTVHNn16xCuXaNokHHjgGPHOPKDsQoSRcvckygCKSkUQNGkCY03UVhIURhyOQCR+lxuLo0zo1SWfM6rlOjWh6IzGGMVVzqgISMjGIWFdCHo6lWgc2fKi1yvnpfe8KmngF9+oeqEr78O3Hef5bn69R1fDNbroT57FmjdmsI4mV9UaDQ0ZcoUhIWF4bHHHsN3331nd5v8/HwcsJ5YYMwB67LH2dn0I1GpqJL4Tz85WG586hSVf5I+azIZhW699x5PYrFqJy2NKn4DdEXKfFI8bx5dNQYoIsu6nAtjzC9MJmDWLODHH2mu+eWXqQ6JXVZXc4T8fIRnZdHSBLkcOHMGcDDWYoy5ZjQCy5YBb75pGUtKXa5evZJ5K4h0HDWVTGCJImDQ0/1z5wHRxPmcGfMR64CGa9cEFBQEQxCobxqNwHPPVXASq/RJZEgIZY1fu9Z2EotVCRWayLrhhhuwdu1a6HQ6DB48GL1798aqVatw8uRJnDt3Dj///DP69++PrKwsdOvWzVttZtVUaipVcCoutqS2AugLKz4eqFXLwaBh2zZK6n7pEt2PjqYZsSef5HBvVi1JiS4BYOjQkht79gCbN9NtpZKWFfLnnzG/Mpnoeoq0ckEup8Bhu9dXrC9FFxRQMhBp0B0WRldzvv6ak8cyVg6XLgHPP0/ZJ7Ztoz4pdbnCQuqbGg2gyS6CpkAPjRAGjUEBnU4GjRgCDUKg0cmguabmfM6M+YD1IVCppKWFkthYIDSUaiqUu7/99RcNmksXAGvalMIxWZVT4VrsDzzwAHbt2oWRI0di9+7d2LNnj83zoigiJCQEs2fPruhbsWpM+vLS6+kCmNFI/8bEFCMmRglAsIRyJwPdulmdo0dE0AsBCk354AM7SUcYqx4uXKAJXYAmeXv1AnUM6+/YiRM5NxZjfiaKlMdj0ya6L5MBSUnAXXc5eEFqKhUq0WoBnc7yeHQ0HdMKCy1Xc7p393XzGas2tm4F3n3XcpFUWimo1wMZGdb5nEXgWhFgLMl7ZTIBCAEgALKSQec1LW1XMi7lfM6MeUdKiiUaSxCoT0VHaxEdHf5/9s47vqnq/ePvm3SntKWUUfYWWSL+WIIgQ0GcXwT1qwh83VInKohbUZGhqFAcKNMtojJcqCCzgAoKooIgUKDsrrRNmyb398chTdImbdqmTcfzfr3yanLvzblP0px7zv2cZxAZqfpomYZAux3ee0/dQNpsMHkyLFggnbYGUG4hC6B3797s3r2bjz76iGXLlvHLL79w4sQJ6tSpQ79+/XjiiSfo1q2bP04l1FAcrqR166rrSkoKNIrOxnDsMJiaQWRkEVfugovYhReqZbbjx2HiRLkwCTWSbds0pk7tSY8ehoLVqOuugyCjru6OHbG4/fuLe7QgBBhdh1mzVK5YUDfOzz8PgwYV84bp01VIk8FQsFJjqVuXiEaNVGUkr6s5glBxJCYmkpiYiM1mC7QppSYrS0UNffWVc1vjxqovOhJFu+Vz3r5DVSojB+x2dMBms2GMikJr2FDFBOfmwksvq8rASD5noeKpzn3QF/LzYdEileI1Nxfi4pz7IiOtmExO4bjUQ+CZM6oCoWuao9BQpYrFxvr3gwiVjl+ELACDwcCNN97IjV4zcQtCUex2FUHhcCWNi1MXphYtdPj3OHpODtrJExDp4pV1Op/ERCO9e2vOi9htt8mkXqix6DrMmWNg58449u830KyZcrH+z3+A1atVokqAqCiV/EP6glCDqG6TeF2H2bOdBRk0Tc2jL720mDd98YVKAul4g9GI3qQJ+Q5vY8d2j6s5glBxJCQkkJCQQEZGBtHR0YE2x2d+/12F8TqyTgBcdhlMmqQc+R0U5HNOSYEpN0H6XpW82aChAzmREYQ1D8agHYQwXSWFXvUy3LBExlqhUqiufdAX/vgDpkyBHTvg2DGnN5YnSj0Ebt0KTz4Jp087G/jf/+DOOx0J8YRqjpS+EQJGVhY8+KAKV163rtDF62zFGF0zKNU8KwvQ0dLOEJN5mPXfZrvnypLJhFCDSUqCjRs1QkPzC3LIDR8O0dZTKgGPg8mTVZkXQahBJCQksHv3brZt2xZoU3zizTdh8WLn6yefVP3VK7oOS5eqibXBoFaLW7XynEjLZFKrPpKYRxA8YrfDO++o9U2HiBURoW6Wp0xxF7HcWLtWFVQwGtWcMiICvWVL8lyFA8nyLgh+ITsbXnlF6Up796qKhHZ7yd6NPg2BNhvMnQsJCU4RKzZWvWn8eBGxahAiZAkB4cgRdfHasAG2bFGenyaTY6+zYoxuNKpKMcdPqBnJsWOYDNnkpuWQODVd5vFCjcc1+WXdurmYTHDOOaq+AWFhcPHF6sBLL4VLLgmgpYIggLuW/NhjcNVVJbwhKUkNhk2bqje3bOm9nLfcSAtCsWiaKmRtt6vXXbuqAimXXVbMm3QdVq1Sd9HBwSr+sEULNcYWRsRkQSgXmzbB9dcrr2W7XS3OWiyqsFdJ6V1LHAKPHVPpZubPd/bP3r3VRaBnT39/FCHA+C20UBB85ddf4ZFHID1dXbyyslQe28LeWEoxP1v+OD0NskPAaFAXsVgD63+LkugKocbjyB8XHa1js6m/O3ZoHDoETfpEquqEAwc6E34IghBQrrsOgoJU3o8RI7wclJGh8jq2beseW1+nTsknkFxZQi3EkSeyfn2NCy/0fpymqZDCP/+EK6+EW2/14IBx9Ki6sb3/frXTMdA2bqz6l6GYdX4J8RWEMpGaqrywHFH0oHTj2Fg1pNWt61s7xQ6Bhw6puGJQ/TghQVWxL65PC9UW+a8KlcqXXyqvzvR0JZRnZyuXb5PpbNlji44lJRWLLQQLYeTagrFYDVjsIVisRiyEYWnQHGNcLFlZmiyICTUat1LEZGM6loKJ7KKLwRddBDUsb4IgVGdGjFCClkcOHVIulQkJygvEtUyTL4hXllDLcM0TOWeOwW3el5UFu3e7Hx8VBZ9+qhwz3ESsvDx4910YNUq5g3z6qftAGxnp2w2veGUJQqnIy4ObbnIXsf7v/1TOuiNH/DgE9uypQn7i41WM8dixImLVYOQ/K1QKdruq4DRlilqlBnWtad5cLUCbzSrpe+apXDLNGplaHTLzw8jKDyeTOuqhm8iMaEimPRKzGbeyx4JQFXCsGG/b5h8PCcciscmkY07JICg7B8PJE8TUyZd7WEGoInz8Mfz4o48Hb92qJtaHDqncHRMmqDtxo9GxmuP2MOTledyO0ajeJzfSQi3AkScyPDyfjRu1grFv50648Ua47z44dcr9PUWiAh3xTG+8oUQoUELWpk0iJgtCBRMSAiNHqud16qhggrlzVXrIYoZA8vIMxQ+BM3PQ7YXGwDvvVEK1RCrUeCS0UKhwsrJUnpCNG53brr9ezd9PnXIpe6zrMPFJNamw29EtFmwGG0ajEc2R9Lb3hapE+dnJhpQ9FqoKhVeM+/Qpe8RPXp6aoD/4oEqAid2OlhdDMBE0zUjBlLOHtLDWJCaGulfvFAShQvAW1vTppzBjhlrwff75EioTOg52JO9p1UqtxuTmqtWcQmhAkMWC5q2Du67myEAo1FBcHaaio3PJygpjzhzYtQvmzXN2p+nT1aMIR4+qeKa1a53bDAa44Qa4/Xa4+241UY2MVHfIhU5eICYX7oeuYrKE+AqCG3a7yrnumu7x5puV08Lo0SodZF4eJCeroczDEAi5edhTszDnhUBo0Xx1JkM2h7//G+t7fxMy5gbnDqPRtzB9odpTKiFr6dKlDB8+nAhPlXQEwQuffuoUsQwG5UZ67bXqdUHZY4DNSfD7JxAXBidOoBvysOk2jA0bocUZ1VXu908gbYTkJBCqHO4rxkGlSp1hsaiQ/l9/VY9du1QBhH//BU3TMebngw42LYgQmwUNjRg9hfVrmpCUFCLdQRAqEG8i9eefw7Rp6hi7HQ4c8NKAzQYvvwyffOLcdtFFSvkym11Wc9zJt1rZvG4d/fv3J9hb8ndZzRFqOIXzRJpMOl9+qfHLL84KhF27qnRXbuTlwZIlKpQwL8+5/fzz1US0bdsS76RFTBaE0rNnjxreevdW6WQcBAe799OQEDUsehwCdR37I09g++EHjP83GMOMGU6xOCdHqdjrfiLWmEHI3Ezodb6qhCTUKkolZF133XWEh4czbNgwRowYwZVXXklUVFRF2SbUEG6+GX75Bf74Q036e/TwcJDrkltcnLraHT5MTmwspnr10EAS3ApVFk8rxqX5mW7dqjwUXdtzlCIONtoJsucSYbRQVz+NATvYwVTHQJotRLqDIFQwnkTqU6fgxRedx4wbp5w7ipCRAY8+qjq5gzFj4J571MqOyeS9TJPVinn/fujQwXsVQ0GowbiOrfXqwbFjQWRkaOTmqjGyTh247Tb1KJIL68Yb3dXlevXggQdg2DDngFnsnbSIyYLgwJdiC7m5Ki3V4sVq/ebvv2HIEGjf3nu7bg4NrmxOQt/5KRZTFmE7P0VLu1atDv/9N0yZrMLzHU5aw69SuWqEWkephKwnn3ySZcuW8fnnn/PFF18QHBzM4MGDufbaa7nqqquIi4urKDuFaozRCFOnqnlCs2YeDjh5Uk3yXXMUhIWht26NLSPDeZxUihGqKJ4qC65frxX8TDMzYccO2L5dibo33OBeCrxbN/XzdqS6CQ9Xk4CGDXXqZR0h2JYKBg3dakWtEetotnxi6rmfRxAE/+JJpH78caVPORg9WuVtLyImHzqkbpwPHVKvg4Lg8cdVKTVBEErEObbCsUP5pKUa0YLtGI1GcnKUHjx2rIc3hoTAhRcqIcsRRnjnnUo4LozXO2lETBYEfEud8csv8MILzuEOlLZks5XxhGcH3tzoaMIcIbwHD8JrrzmTI0dEqNw1w4aV+bMJ1ZtSJXt/9tln2blzJ3/99RdTpkyhc+fOfP3119x+++3Ex8czePBg5s6dS0pKSkXZK1Rx7HaYPVuVPXYlMtKLiLVpk5pg3Hff2dJsLpMMTy4mUilGqGK4VRY8+/MNC1Pi1T33qEXhQYNUvqvFi5Vn4rZt7m1ERSlvjilTYOVKaNlStdEwMovg7DQ1EXdUSdBQE+qsLExkSXcQhArEVaTWNKVFbdzojEK64QYVKuHRI3L3buesPiYG3nxTRCxB8BHXsfXkSZ30NB3Nbof8fGJidOrUgdWrz459eXlFK//ccYcafD/4QLk8exKxBEEoEW/FFkAt6jz/vNKJXdds7rgD3n8fzj23jCdcvx49Oho0Db1OHVi+HJ55xtnPO3RQJxARq1ZTpqqF7du357HHHuPnn3/m33//ZcaMGfTs2ZO1a9dyzz330KxZM/r27csrr7zCAa9JI4SaRlaWullftAgeeuhskmpv5OfDnDlKwDpyROUY0LSS46OkUoxQxXDc6MbEQFqaxvHjEfzzj0Z2tsp19euvRUWmwtWVQA36l12m8mKp9nS0kyeUOuwQsQCMQWqWYFf7Y2J06Q6CUAEUFqlzcoI4c0Z1yZMnVa7Hhx4qZtgaNkzFHLZpo1Tsbt0q0XpBqN64jq31TBaw29DQaawdpWndLOrWPTsVfPt3uO46+PBD9wYiI1X297ZtA2K/INQECnslOxZP7Xb44QcYNQq++MJ5fNeuSju+444yRtwWGngNViva8eMqL9bJk2r/jTfC/PlePCSE2kSZhCxXWrRowYQJE9i4cSNHjhxhzpw5DBgwgK1bt/Lwww/Tpk0b/u///o8XX3yRv/76yx82V3tyc3O55ZZbaN68OVFRUfTu3ZvNmzcH2qxycfQo/O9/zqTup06pqmseOX5cXeEWLnQmAwoK8u7aXRjxyhKqCIVvdO12sFrVZdVgcN7wtmunPDemT1cryK+/7kN7ZEGm+WxJprO/c4dLCKiYXbNZvLIEoYJwvZG22+HMmVA0TXU9mw369y8kYjnKp7kyfjwsWACNG1eW2YJQ7XEfW3Wis1NoZDhBi5DDRJMBJ05gCskj91QGiY8dRk8+rJI/Hz8eaNMFoUZR2CtZpc5Q0TeTJsHp0+q4iAj1+p13oHVrP5zwbKoZe1CQmlA7qoTedpvysJS8dAJ+ELJcadSoEePHj+eHH37g+PHjvPPOOwwbNoxdu3bxxBNP0KlTJ2bOnOnPU1ZL8vPzadmyJRs2bCAtLY0HHniAK6+8ErPH2qNVA0eSv23bii49b9+uctfu369eR0WpCcigQR4aWrcO/vtfVaINlMKuaUpVD/IxZZt4ZQlVhELjLeHhSkkKC1N5ZRs3VgLXfffBww+rPlG3ri/tnfXGOlutEHAO6I4uaDSIV5YgVBCFRWqjEerVs6Bpqg+HhcEbb7iIxxkZKpb4q6/cGzIY1AxfEASf+egjl7E1OwvMZuoGmQnRrKozpmeg7d1LjO00683nk5TVRcUwuVYnFAShXHhKneHwJdiyRTkbg1rUWbpUeWcZyqMseDqhpqE3aaIqO0RFKY8JWbUVzuJXIcuV2NhYbrnlFlatWsWJEydYsmQJ//nPf7yXsK1FmEwmnnrqKZo3b47BYOCGG24gJCSEv//+O9CmeaRwkj/X68fy5XD33aqYIECLFsrRqkhlQqsVZs1SKrojS26jRk7Z3mgEi6XIw5CX53F7gTIvbihCgPA03kZEQOPGZlq21GnYUBXgzMvz7WfqaC8rC4x52VgyrViCIrEQikUPU8/1MCz2EOdDC8eSacWYly3dQRD8SGGRmqxs6qUdpmXDbJo0KbSWcvCgCiHculUlC/HqjiwIQnHY7SqX8z33qKmiyaTDiRNg18FgUDmyrFblEmm1YjLkkEsoiY2fR3/zLQk1EgQ/UtgrGZy+BBs3wtVXw0svwcsvQ4MGfjjhsmXqxjIiwt3dOSxM3WDGxooTg+BGhQlZrkRFRXHTTTexdOlSHnrooTK18ccffzBq1Chat25NREQEcXFx9O/fnxUrVvjZWnfMZjNPP/00w4YNIzY2Fk3TWLhwodfjc3NzmTRpEo0bNyY8PJxevXqxevXqYs+xd+9ezpw5Q9sqGsfvKcmfY7Lx3HPO9D29eikRq0gF1Lw85Qr6/vvObQMHqoMzM5UKYDar5y4PzWwmyGJB87APs1m97/Dhogk+BcED/g7pLXKjexbX1ajSOA9arZCcrCbu5uPZZNoiyNQiyQyqS2ZQDJm6CbNuItMeQabdpB5aJJm2CMzHszGZdOkOguAHdF2NbadPO0Rq5fkYlJNDWNpxQHdGuD97En3MWGeWW5NJ1GRBKAO5uaoA2RtvqCmexQLW9Gz1wmgAax5afj6gK89kuw2tTiQxLaNZv68JSVtkoVwQ/IWuq/u8jAyVLmbvXq0gdYZj/PvqKxg8uOT0xiWSkQEzZ6q0Mzk57mWBXZHUMkIhfIzlCjwHDx4kMzOTsWPH0rhxY7Kzs/nss8+46qqreOutt7jjjjsq5LynTp3iueeeo3nz5px33nmsXbu22OPHjRvH0qVLeeCBB2jXrh0LFy5k+PDhrFmzhn79+hU5Picnh9GjRzN58mSio6Mr5DOUB0+lxxMTlWi+Zo3zuOuuU0lvjUYPjYSEQJcuqlxbcLDKCD9qlLryffIJpKZ6PHe+1crmdevo378/wd7KHsfGSpy04BOuIb1Nmzblk08+4corr+TAgQNERkaWqi3XfhEXV/yxJpPyWExMhN69vQ/4ISHwyXt5pK7fpeIQG4SCyRmSZNfBnJlJZJ06GFzbyMpWhrz0MrGDz5fuIAjlwGZTeT5+/FH11dOnIS5chTbpmkEtrGRloZlMxBgzWb8mn6RWregTuVMlw3vlFYiPD/THEIRqRVqactj/7TeVV9JuhyZNdEJSj6vBD909B53BqMSs/HxMkQbS0kseYwVBKBm7XaWMmTvXmcTdcW935kwYcXFFF2n79CnjyWw2+PxzpV4fOXJWtDYqD4n8/KI3lX47sVBTqDZC1vDhwxk+fLjbtnvuuYcLLriAV155xauQlZqaypo1axgxYoTH/R9++CFXXXUVJi9leePj40lJSaFRo0b8/PPP9CgSM+dk69atfPTRR8yYMYOHH34YgDFjxtC5c2cmTpzIpk2b3I63Wq2MGjWKtm3b8tRTT3ltN5C4Jvmz2RxJ/jTuvlvtNxjUpP/aa0to6L771B3BmDHutVgbNvSe5N1qxbx/vyqx6k3IEgQfcYT0OrjhhhuYMGECf//9NxdccEGp2vLmjeUJn8ddq5WGMx6m4dq1YD8BsU3cGrfrOhm56USFRWNwPWmoriYAq16GG5bgTKIlCEJpOHZMeYR8+aWazAcHQ062DpkqtEk3GtWOEycgLBxTaipp9gYknhxF78vqor3wvOTDEoRScugQ3H+/8kjOylKPxo0hNjTLeWNrMECeXeWMDA4+W3HBDmYzWnYWMTGRcm8r1Hoc+Yzr19e48MLSvffwYVi1Sj2OHIEDB5TO5Lj9Cg6G0FAruh6Epvm+SFuMsSom8Z9/nIW/dF2lnXGoZZ68rsp9YqEmUSmhhRWF0WikWbNmpDkSNHlg7ty5XHfddXz++edF9r377rvceOONLFq0yOv7Q0NDadSokU/2LF26FKPR6CaqhYWFceutt7J582aSk5MLttvtdm6++WY0TWPRokVVMndYcUn+du1S0YJz5ngQsZKTobDnWkgITJ3qLmIJQgm8+OKLPoX1VmZIr6d+URIlekPbbPD44/Ddd/D33+q1r9cEKX4gCOVmzRpVh2TTJue9c8OG0CzW5WZaQ4U4paXB6dOq6xkzWa/3I2nkTBGxBKGU/PabqnidnKzGxowMCA+HyEgdS0oqFlsIFi0Mix6CJSiS7KBIlRvSfna7LQRLSipGoy55IoVaTXH5jEviww/hmmtU4c+jR5WYbDYr8apuXWjZEtq00alTx1owNS3z1PPIEXjkEZVg+Z9/1LasLOWB1bw51K9f/PxX5ryCC9VOyMrKyuLUqVPs27ePWbNm8fXXXzN48GCvx0+aNInLL7+c//73v/zwww8F25ctW8add97J6NGjudvhXlROtm/fTvv27YmKinLb3rNnTwB27NhRsO3OO+8kJSWFTz/9lCBfq/VVMk5vLJWrANyvH+edB2c/mpPvvoObblI35fv2VbbJQg1j2rRp/Pnnn5x33nnFHjdu3DheeeUVbrrpJl577TWMRiPDhw9nw4YNHo8vT0jvtm2+e2M5KHbctdvhmWfghx+cMRVRUVL8QBAqgbw8mDZNzaszMlQXBFWHpF49HU46E00DkG9Tr/PzQdMwNY0lNySKxDdKd+MgCLWd775T97Lp6ep1q1Yqn3NUlI45OZVMs0amVseZE9Jjnsg6ZJo1zKdzJW2qUKvxlM/YE3Z70eKe3bs7nzscoUwmFRATH6/EZU+UKWXV00+756bp0EGVPwwPVzecviC5soSzVE0FpRgeeugh3nrrLQAMBgMjRoxgzpw5Xo8PCgri448/ZtiwYVxzzTV8//33mM1mbrzxRi677DIWLFjgN2+olJQU4j3kxnBsO3r0KKDyfb3zzjuEhYUR55Jg5+uvv+aiiy7yiy3lxW6HF16AM2fUNULP12mkZUBQECZTZFGvztxc5SK6bJmzkblz1TZBKCN79uyhXbt2xYb1VnZI77x5SjOKjHQKvA50HfLyDFgsRUUuV62poN/oOrz4Inz9tXputUK9euomOTPT7f0aqOIH3q5XrrN4SZQlCCXy778qlHDvXvXasSjcsuVZ56qsQt5YAMFBYLepR/0GaDHRxARJyg5BKA3Z2Sq3s+OGulcvJShnpWSQ+tgMWL0ajDlKUTYqEdljnkgdOH4cevSB6dOJrafJ8CfUOrzlM3aNvDt0CFauVKGDN90EN97ofH/79ippe6dOKqrv7rtVFUJDCe4uZUpZdd99cMstKsfxPfeoOe+NN5ZvdVgG3lpLtROyHnjgAUaOHMnRo0f55JNPsNls5BWWlgsRFhbG8uXLGThwIMOHDycvL49evXrxySef+NUbKicnh9DQUI/nd+wHaNGiBXopFOTExEQSExOx2Wz+MbQYTp1SVSgWLYING9S1wmhUq8/HbbFEHj9NcBsTMTGa8/oRfwAefdTpIgowfLjaJgjloKG3/GkuFBfS+9hjj5GcnEyzsyW5/RHSe/iws9BmUTQsliCv7bppTcG6Enod2TSDg2HxYrU65QEpfiAI/uWTT5wiVnCwmrifOgVBQWCx6JCSCraQs+IV2Ox2jEYDBIUoISs1G0yxGI1aUZFaEASvRESough33AHDhilBOejoISInPUDD3bsh5191F61ZIUyF7HrNExlnht8/gbQRcK7c0Aq1D2/5jH/4QXkar1wJv//uPH7lSnchS9OUkKzrcPPNvhUyclBsyqqdO9WA6ppWpmtX5SnRr5+6ENx8c7GrwwWRCD6tDgu1jWonZHXo0IEOZ2/0xowZw6WXXsqVV17Jli1bir0pjYqKYubMmQwaNAiAV199lXBvvpJlJDw8nNzc3CLbLWc7ZlnPl5CQQEJCAhkZGcWGQZU1yV9enroArlih8oPYbCrJnyPZrabbidLTiDKmEZydC1lZTq+sx47Q23ozmkWJdISGquzvV14pFxWhUvAlpNchZDlCer/99tsyi9iLFimvDU9YrfmsW7e5WLEpNvasiDVnDnz0kdpoMMCUKXDppd5PLMUPBMGv3Hcf/PyzGqqefRbuustFpM7NhUxAjwC7ulbYdR2DXVNv0Oxg1uFULoSGiUOkUGOpqMXUzp3hgw9UOKH26y8qvjc93Rnf26qVb3nnJPmzUItx9caqV08JV6Bx+rTK+9i8uXuXMBjUok1OTtGQwdIUMnLg0TnqxAk1x/3qKyViLVrk7t41dKj6m5enEuR5WR2WSAShJKqdkFWYkSNHcuedd7Jnzx7OOeccr8ft37+fm266iQ4dOpCTk8O1117Lxo0bPYYClpX4+HiOHDlSZHtKSgoAjRs39tu5ClM4yV+fPr5fhJYtUy7eDhzRFCYTxMXpRJ1JxmBNx2YwKN/uEyfQWoQTk3+G9ZsMJLVqq0qPt24NL72k/gpCJVFRIb25ubluwnSGmh1Qt66VqCjPSTisViv795tp08ZarNaU//Z8DC6J6+2PPYY+cGCxyT2sZ/dZKzEBSCDO6StiW9kpyb6qZrevN9IlLeZkZ7vfF4eHw2uvKXE5LEx5aKWmokKVxo0DLVl5Y9Wrhz0mxj2sqVBIE5omDpFCjcTXxVTw3gdPnFD9a/x49/vZli2B5ctViH1+vpqAWizQrJnvxRMkzEioxbiKTxYLpKSoKkQ2mxK1HM5ObdsqH4Nhw5TgVRiHIObVOSorB+Px01hswWiFKh0VOEe9bqP3HwvRFi5wNvDnn/D9954XakNCXAbeokgkglAS5Rayjh8/zg8//MCvv/7K8ePHSU1NpW7dujRs2JALLriAQYMG+RQeVFYc4XrpjmyRHkhJSeGSSy4hODiY1atXk5OTQ79+/bj00kv56aefiI2N9Yst3bp1Y82aNWRkZLh5h2zZsqVgf0XhnuQvyOtYnpqqLm6uLqOXXgqzZqnt9eurSUZ0tJpHaNlZkJWhrlLo6q/ZDP8ewJSb6yw9fmNrtImPqLsBQahEKiqkd+rUqTz77LNFtn/33XdElDDBLq5ioiEvj+6LF2M6e83ac+21pGiaWrnygZKqMVYEgTinr4htZcebfdnZ2ZVsSfH4ciNd3GKO3a48PxYsUAvDTZs63+e6vtSwXj4Nv3lfhfwe/RuCNDXm2Y9jD2tcNKzJNaRJbp6FWo63PrhnDzzwgBKz8vPVc0B1zDfeUB3T0YCuq3lkaKiEGQnCWbwt5rh6Y8XFqfs4m03DaFT3cjab0oPfew/OOaf4bmG1FuccpcOpbLAEw4lsiIvAmThS7TcZLRz+7h+sf88jxHA2bCEqSrk6F1OUjYYN1cOLURKJIBRHmYQsq9XKxx9/TGJiIlu3bgXweIPocAXs1asXCQkJXHfddd4V1RI4ceIEDRo0KGLH4sWLCQ8Pp2PHjh7fl5qaytChQzGbzWzYsIGmZ2ew3377LRdffDGXX34533//PaZC6nJZGDlyJDNnzuTtt98uSDqdm5vLggUL6NWrV0F4k78pKclffr4KGVy+XKn2o0bBWfMAJWbfd59S6/PzVdx0bCxomvK+wq5DsEHlBDEYwGoDu10tggWbWR80kKRLhtNHNCwhAFRUSO/kyZOZMGFCweuMjAyaNWvGpZdeWiSM0YHVamX16tUFwrlXLr4Y4333Yb/sMs7/73853wd7fG7bjwTinL4itpWdkuxzeB9WJ7wt5pw5o4qCOmo+PPYYvPuuhznx778rr5C9e1V5cLtduWzFx0OdOp4rI0lIkyAU4KkPgso24dDGf/wRbr0V6kTYVGd0qSbOtdfCN98ooUrCjAShAG+LOYVDAY1GiIjIJzjYSEyMOub0aeXEUNLwVKxz1PYd6A89RG5INqHhEWgvvQznn525/vuvEqN3/0GsMUOJWAaDutm8804lZglCBVFqIWvJkiVMnjyZlJQUdF2nfv369OnTh06dOlGvXj2ioqJIT0/n9OnT7Nq1i82bN5OUlMSWLVt49NFHmTp1KqNHjy61oXfeeScZGRn079+fJk2acOzYMd5//33++usvXn75ZSIjIz2+b+7cuSQnJ7N27VratWtXsL1bt26sXLmSSy+9lEWLFjF+/Hiv554zZw5paWkFIUorVqzg8OHDANx7770FF5VevXoxatQoJk+ezIkTJ2jbti2LFi3iwIEDvPvuu6X+zL7iLcnf0qVqTP/6azWZd/D113D//e4T+Ztu8pDkz1PFJg312pID9RtgqluXtJOhMo8XAkZFhfSGhoZ69PQKDg4uUZwo8Zj69WHRIgxlmGz7cn5/E4hz+orYVna82VeVbfaEt8UcoxGeekolcHfwf/9XaJzKzFS5PJYtUw05xr26dZXrVnFlmySkSRAAz33w0UeVgOXQgDt3Vgne69QBMDq9MAwGeOghuP56pXJJmJEglEhhbywHsbEWoqND0DQNXVdp53y9R/PoHKXr8PzL6PbdpMeaiM7KQlv1MtywBD7+WHVqux0czgy9eqn+LGlmhEqgVEJWnz592Lp1K3Fxcdx3332MGzeO8847r8T37dixgwULFvDhhx8yduxY5s6dyybH8qiPXH/99bz77ru88cYbnD59mjp16nDBBRcwbdo0rrrqKq/vmzRpEtdccw2dOnUqsq9fv34kJSXRpUuXYs89c+ZMDh48WPB62bJlLFu2DIDRo0e7qeOLFy/mySefZMmSJaSmptK1a1dWrlxJ//79S/V5faVwkr+0NOVWeuKE8uZs2dL9whUXB5dfro4vPA9wV/Yd3lh2MAB5toISyBgN6nVONlqjhjKPFwJKIEN6fWbLFujSxT3nh0y2BcEveFrM+fprjV9+UU4aoO5vn3tOTeYLyMtT2XCPHVOvdV0NjlFRZ2PrfViZEa8sQSjSB3VdbWvVSuXbGThQ1TNxyz7xwANKtBo2DPr2VdskzEgQfMKXxOx+WWs5eyI9OhpsNvToaDRHg+ef71SqmzWDBx+Eiy6ScVCoNIpZaizK3r17mT59OocOHWLWrFk+iVigbiRfe+01kpOTeemll9izZ0+pDb3hhhtYvXo1x44dw2q1cubMGVavXl2siAUQFBTkUcRy0LVr12KrHQIcOHAAXdc9Plq2bOl2bFhYGDNmzCAlJQWLxcLWrVsZ6qjOUAG4Xshyc+Ho0UhOnFDXFbNZLS4HB8OQIfD667BqFdx7r5pYuOKa5M9o1LGkpGJJtWDJD8JiNWKxBZGbH4TFHqIeWjiWTCuWM9luqQlKkYJIEPzCyJEjsdlsvP322wXbKiOk1xPatm30nDoVbds258Y1a1Snu+cej+ESgiCUHdfFHBPZhB49yaljNjIyKBgLe/WCDz8sJGKBEpMvv1w9Dw+Hq65Sizf16pW9ZJMg1DJc+2CEnk3moXwy0+zY7aoA4Y03wrRpEGZJc3+jo1qvQ8QSBMEn3Ma9EjLjmEzquDLdoxU+kd3u3mD79qqD33ef8s7q319ELKFSKZVH1v79+73mhvGF0NBQHnnkEe68884ytyE48eRWajDoZ/+q/fXrw4oVFMRKe8NqheT9VkzkYv43WzWqm9wuSLodNHsQoIGmQ74NjqukfyaTJqkJBL/z9ttvY7FYig3rreiQXp9Lj+s6hjlziNu5E8OcOWrpa/NmmDxZDf6//w5Ll6pqaIIg+AXnYo5O9tEMjuQ0wp5nxxikYzZrDB8Ozz57NkIwP18NjK7eHLfcomIvxo1TySO9lmySRNOC4AlHH4yK0kk+aCfHGo5mzMcYZMBg0OjTBwyffqz6x5tvgpectoIg+Ma2bV68sbKyMR1LgaCgAo+FcnllbdqkEttpGtqhQ0RYLCrs3rXBBx/03wcThFJSKiGrPCJWRbRT2ynsVqqbs4mzZaDH1COmQShWKxw6pCqfFnvh+v13Qj74gE9yfye1UThkZatkt0aUKhUVhT0yErPF4iw9Duq43Fw4m/RPUhMI/mb27NkcOnSo4LW3sN6KDOn1ufR4UhLaxo3kh4cTtHGjSn75zjvq5hlg+HAYM6bc9giCoHBbzAnPIjsnGxsxaHYboUE2gsOC+PffsxP933+HF15Q1ZPuuMPZSGgoPPqoCjP0XrJJEk0Lggdc+2BsaBan8+3oBGGw59OigZ30vDASE3bTW5uh+uGDDyr3SD9VCxeE2si8eUVzY4GOdvIEQTk5aCdPQKQJR4JjnyPgdV3dOG7dqh5Llqhs8cHBoGkYbDa01FQlZklIvVAFKJWQNWjQIIYNG8bEiRMryh7BR4p6Y6kLWP38NLR8C4S2IiRE8+068/PP8P33NNSgYagOKQcgKEcluq0TDFoGdj2dDHuh0uOhuhK8HEn/5EIm+JmdO3f6JHw7QnpnzJhRCVZ5wKVD5kZHE5aWpm6OHXl2Bg+Gp58uPnG0IAilwtUbSzt5AhNZxBrTyNeNNA7JITuuBevX6SSNf48+P892TtKHDoUWLdwbK7ZkkySaFgRPuPZB48kTNDXkkExjGmvHCTMHo+km1v8RS1KrLvSJ3AlXX11yiIAgCMWyaZMnbyxVqETXDGiO3DImH72y7HZ4/nm188QJtc1sVuOh0VhwIrvRiDEoSAqdCFWGUglZa9euLZITSggMRZL8ebiAaaZI9+tMbx22b4fGjaFRI2djV18Nb70F0dEqIfXHH6uYRC+VIAuQC5kgKFySYRpyciAnB2w2dWN72WVqgmA0BtpKQagxFPbGclTYjdNPE6RpkGXHZD5B2slgEj+Jo3dLXY2Vbdo4vSQLI4mmBcFnPPXBYKOBFobDGAHSszEF5ZBmb0TiqevpPX0E2lVXBtpsQaj2ZGWpmiTOCHgdUlLBFoLNYMBos6vXTZ1eWQUR8K9a6R37L9o57Z0NGgzw999OEUvXVYI7UPeGkZHoERFk5+QQVaeOalEKnQhVAHEPqIYUTfLnqDCooxuNYD/7Gl3l5LPYSXz4X/SRo1RIxSefuDdYr57yU125Eo4fV5P8krIHOihXFkFBqAG4dsigIMJPnVLb7Xb1eOklufEVBD9T2BsLuw4Gg5pLaxrk56MdSyFGS2e9+XyS8v8PJkyARYuUmCUIQrnw1gfRdci3gl1Hs+UTE5zF+uCBJNUXEUsQ/IEjAj4z8+zjVC6ZZo1MrQ5m3USmVke9PpVLZoadzNO5mFMyMFlOcfjr37E+9GjRRnv2VKH2vXqpIiihoarsaLNmKpSwsLexFDoRqgCl8sgSqgZFkvxlOVejQVd/zWY4fRotN5eYDCvrfw4jqVUd+kQCy5fDXXe5X5S6dlWJqUuq5VoY8coSajglJnt3zOYjI9GSk9HsdtUHTSa1XPbrr9IvBMGPePPGAtBsNrBb1UG6DZOeRVpQPRLbzaL3f8Nl0VgQ/IDXPmi3Yci3qrmhBtjtmOKjSMsKFccNQfATn3/uEjSj6zDxSdi8GXuDBpjNZiKDgjCcOAFhceoeLd9a8N5YYwYhx1Lh6FEVoePgf/+Du+9WC6833+ysUFgc4pUlBBgRsqoh7kn+nN5YBBtUOBP2s2UID0NoCCYN0uyRJJ4cRe+LgtFGXls0V49jViIVmwTBjWKTvbvO5uvVU0tjeXkQFgbNm0NKivQLQfAzzsUcF0+QEAPk25SQpdyyAB0tNISYxnVYv02TtRZB8BMevbGCNcjNcx5kMKoi12mpxNSPYf166YOC4A/at1ehhQBsToLfP4G4cOyGNGwn9xFkMKixMEWHsFbuqWLatIGew4rOSR0NlsapQZwZhAAjQlY1xC3Jn6s3lm7HYM1zXnjsNrDZ0UKCiYnUWB98CUm3DPd8nbFapWKTIJQW12R1BgN6kybk2e2ENWmCZjTKAC8IFUDBYk4hbyyMRsg/O0YFBakFm1wLJrJIy40UTVkQ/IBXbyxNU94ceXnqdXAw2OxgNmOqL31QEPxO4cpfuk5B1zIY1D2Z2Qz//a/qeD16FF8xtGglsZIRrywhgJRayFq0aBGLFi0q9Yk0TSPfW4JVoVQ4k/w5k/sRHAQ66LqmLmKOdFXGCGjWAiMGso4Xc52Rik2CUDo8DfiaRl50NGGOG2sZ4AXB76jFnELeWAAa2I1BGI0Gp9dxng3t5Ali6pvEI0QQ/IBXj0gAoxE9KBgt+OzthdEgfVAQKooilb8gPzwcY0iI8sLSdZX3+NJLfet0HtorEfHKEgJIqYUsXRJ6B5wCp6ncXDBroNUBuwHQ0Q2hymvKkXAzV4fTeRAaVrLTlFRsEgTf8WXAlwFeEPxOVhZEBWdjybSCFn52/FPYCMKIAexnN2hBkGnFGJ1NVpZJNGVBKCfz5p3NQmEspg+6vJY+KAgVgBfvKUtsLCHR0epeUNfhyBHfFlMlxYxQDSm1kDVs2DAmTZpUEbYIPvL55xBpcib3o2FDtRKtgzkzk8g6dTCo9CCqCmGPPjB9OmiaOE0Jgj8ojfu1eGUJgl8xmXTMx7PBFgFBRrA7+pSOXdcx2B05sgBNh3wbHM/GFBfB4cOaRMALQjk4fFj6oCAEHH8vpkqKGaEaUmohq1GjRgwYMKAibBF8pH17iPrDmdyPcKWa23WdjNx0osKiMTguNHFmdVzaCPEGEYQy4LFqYWncr8UrSxD8yufP/E7kk09Dg1AwRRRsL7KY4yArW4nOL71M7ODzZY4tCOVg0SLI37YDHnpI+qAgBIKKWEyVFDNCNUSSvVdHxBtEECqNIlULJRmmIASU9t/NIcq+G2KbuPUlj4s5AKFnwytWvQw3LAGk/wlCWWlQXydq1csgfVAQAoMzUZ1/F1MlxYxQzTCUfIhQ5SjPBUwQhPJRmv7nQPqhIPgPt9K9PiD9TxD8h4yBghBYHKV7TSbfjjeZ1PGJiWoxVhBqCCJkVUfkAiYIgcOR6dZoVEkvCz0KkmEWfrgmw5R+KAhlR/qfIAQOGQMFIbDIYo4gABJaWK1wVIzM2LAB6tQBu91tv13XybDbwWZzd+kGdfxPP8EPP0DPnqU+t9VqJTs7m4yMDO+x0X4mEOf0lapsG1Rt+3yxLSMjA6haVVIL+t+BAxAeDmdtdDsGyMnJQdd1z4ET4eFw4ACcPl3qPALSB90R28pOSfZVxf4HLn1Q+l/Aqcq2QdW2T8bAA9IH/YDYVnaq/RhoNisnhawst/12XcdssUBQUNH7QIDMTHj1VejYsdQpLqT/uVOVbYOqbZ8/x0BNr4RempaWxowZM3jhhRcq+lQ1msOHD9OsWbNAmyEIlUZycjJNmzYNtBmA9D+h9lGV+h9IHxRqH1WpD0r/E2obVan/gfRBofZRUh+sUCErIyODl19+mddee43MzEz3ql9CqbHb7Rw9epQ6dep4LH+akZFBs2bNSE5OJioqyq/nrsi2q9I5faUq2wZV2z5fbNN1nczMTBo3bozBUDUioEvqfyB9sDIR28pOSfZVxf4HMgZWJaqybVC17ZMxsGxIH3RHbCs7MgaWHul/7lRl26Bq2+fPMbBMoYW//PILK1as4Pjx4zRs2JCrrrqK7t27F+y3WCy88sorzJw5k/T0dHRdp2PHjmU5leCCwWDwaWUgKiqqwn60Fdl2VTqnr1Rl26Bq21eSbdHR0ZVoTcn42v9A+mBlIraVneLsq2r9D2QMrIpUZdugatsnY2DZkD7ojthWdmQMLD3S/9ypyrZB1bbPH2NgqYWshx9+mFmzZrltmzJlCk8++STPPPMM27Zt4/rrr+fgwYPouk7z5s155plnGDNmTGlPJQiCIAiCIAiCIAiCIAgFlErIWrVqFa+88gqgVLR27dqRkZHB/v37mTJlCueccw533303GRkZxMbG8sQTTzB+/HhCSpnQURAEQRAEQRAEQRAEQRAKUyoha968eQDce++9TJ8+ndDQUAD+/PNPrr32WsaOHUt+fj4DBw7k448/Ji4uzv8WC14JDQ3l6aefLvi/VJe2q9I5faUq2wZV276qbFt5kT5YeYhtZaeq21dWpP9VHlXZNqja9lVl28qL9MHKQ2wrO1XdvrIi/a/yqMq2QdW2z5+2lSrZe7NmzQgODuaff/4pknjru+++Y9iwYURFRZGcnEydOnXKbZwgCIIgCIIgCIIgCIIgOChVKYaTJ09y/vnne8we37t3bwAuuugiEbEEQRAEQRAEQRAEQRAEv1MqISsvL89rBnlH1vn69euX3ypBEARBEARBEARBEARBKESphCxBEARBEARBEARBEARBCBSlSvYO8M8//7B48eIy7R8zZkxpTye4YLfbOXr0KHXq1EHTtECbIwgVhq7rZGZm0rhxY4+hzIFA+p9QW6iK/Q+kDwq1h6rYB6X/CbWFqtj/QPqgUHvwtQ+WKtm7wWAoc8fRNI38/PwyvVdQJCcn07x580CbIQiVRnJyMk2bNg20GQAcPnyYZs2aBdoMQag0qlL/AxkDhdpHVeqDMgYKtY2q1P9AxkCh9lFSHyyVR1bz5s1FAQ4g+/fvp0WLFkycOJHIyMgi++12O8ePH6dhw4Z+X0GoyLYr85xWq5V9+/YxaNAgevbsWa52vvvuOy699FKCg4P9Yps/qcr2+WJbRkYGzZo1q1KFI0wmE2FhYTz++ONeJxLSBysPT7ZZLBYOHDjAFVdcQefOnQNmW1Xuf1CyfVWt/yUmJpKYmIjZbKZVq1Y88sgjmEymIsdJ/6s8/GGb2Wzm8OHDXH/99bRq1cqv9lXlPlhdx8CwsDDCw8N5/PHHvQpa0gcrj+psm9VqZf/+/fTv358+ffpUun3VbQx0sHv3btq0acPDDz9MREREkf3S/yqPqmwblM0+XdfZt28f7du356qrrqow2/w5BpZKyDpw4EBpDhf8zIkTJ2jTpg233noroaGhRfbbbDb27t1Lu3btMBqNfj13RbZd2ed85513yMjIKChQUBasVisRERFERUVVuUkyVG37SmNbVRLOs7OzqVu3LldddRVdu3b1eIz0wcrDm22vvfYamZmZ5erf5aUq9z/w3b6q0v8SEhJISEjgww8/ZOHChdxyyy0yBgYYf9im6zovvPACWVlZfu+vVbkPVtcx0Gw2Exsby3/+8x86duzo8Rjpg5VHdbdtwYIF5Z6Ll5XqNgY6OH78OOeccw7/+9//ZAwMMFXZNii7fV999RX//vtvhfZLf46BVU9CFLyi63qVVH2rG8HBwZQiolYQAAp+M0FBpU4tKFQiQUFB0r9rKLquV8kJo1A2NE2T/lrN0DRN+qDgF2QuXnp0XZc5qFChVLcxWVSRGsDs2bNp0aIFoaGhTJ8+3S9tnj59mgYNGpTJC+/iiy/mgQce8Isd5eGGG27g5ZdfDrQZgiAIgiAINZaqPg/VdZ077riD2NhYNE1j+/btbq937NjhF5sFz8h8XBCEiqBUQtbixYvZtGmTx30ZGRlYLBaP+z788EMmTJhQeuuEEvntt9+YMGECb7zxBgcOHODee+/1S7svvPACV199NS1btgTglltu4dxzzyUoKIjg4GAaNmzIJZdcwvz587Hb7W7vXbZsGVOmTPGLHeXhiSee4IUXXiA9PT3Qpgg1nKo2iQ8klSlkT5gwgVdeeaVSziVUXapK/6sqizjF4YuNFfE55Ea25lId5qHffPMNCxcuZOXKlaSkpHD48GG314HMqejAU78bN24cmqahaRohISG0bduW5557rtoVz5L5eMVSVcbAyqQs41R1GKMrksKfvyaMy6USssaNG8c777zjcV/dunVJSEjwuO+7777jtddeK711QomsXLmSnj17Mnz4cOLj4wkPDy93m9nZ2bz77rvceuutbtsvuugiDh8+zIEDB/j6668ZOHAg999/P1dccYXboBobG1slEiR27tyZNm3a8N577wXaFKEGE4hJvKZp1KtXj2HDhvH777/75XyueBvsjx07xr333kvr1q0JDQ2lWbNmXHnllfzwww8Fx1SmkH3XXXcxdepUmRzXYiqr/508eZJnnnmGVq1aERoaSqNGjRg6dCgbN270y/n8getNb3BwMK1atWLixIlui4yBWmiSG9maS3WYh+7bt4/4+HguvPBCGjVqxMGDB91elzVcKy8vr2wfsBQMGzaMlJQU9u7dy0MPPcQzzzzDjBkzKvy8vuLLdyDz8YojUEKyp/GlvJRGaCrLWFaW9/gyrvpCWUW0ihTfasK47LfQQl3Xq1VMZU2gbdu2PPHEE2zatAlN0xg3bpxf2v3qq68IDQ2ld+/ebttDQkJo1KgRTZo0oXv37jz22GN8+eWXfP311yxcuLDguMKdbunSpXTp0oXw8HDq1avHkCFDyMrKAlRVhenTp9O2bVtCQ0Np3rw5L7zwAgBt2rRh0aJFbjZ069aNZ555xqe2Aa688ko++ugjv3wvguCJQEziU1JS+OGHHwgKCuKKK64o9/l84cCBA1xwwQX8+OOPzJgxg507d/LNN98wcOBAt0WMyhCyHRPn9u3by+S4llNZ/W/UqFH8+eefzJ8/nz179rB8+XIuvvhiTp8+Xe7zFUdpb5QdN7379+9n1qxZvPXWWzz99NMF+wO10CQ3stWXxMREOnbsyJAhQ4rsqw7z0HHjxnHvvfdy6NAhNE2jZcuWRV6Dmo9OnTqVVq1aER4eznnnncdnn33mdv6LL76Ye+65hwceeIC4uDiGDh3q9b1Lly51e999993HxIkTiY2NpVGjRm5z2XHjxvHTTz/x2muvFdw0OzxhHMJ5ixYtuPvuuxkyZAjLly8veG9ubi733XcfDRo0ICwsjH79+rFt2zZAXR9jYmKw2WwA7NixA03TePTRRwvef9tttzF69GifPkdx34HrfLxBgwb873//k/l4JRCIOai38aWyyMvLK9NYVtbxz9O46tp/qys1YVyWHFnVmE2bNtG6dWtmzJhBSkoKc+bM8Uu769ev54ILLvDp2EGDBnHeeeexbNkyj/tTUlL473//yy233MKff/7J2rVrGTFiRIHoOXnyZF566SWefPJJdu/ezQcffEDDhg19OndJbQP07NmTrVu3kpub61ObglAaAjWJb9SoEd26dePRRx8lOTmZkydPAr5PQn2dUAcFBXHuuedy4MABxo8fj6ZpbN26lWuvvZb27dvTqVMnJkyYQFJSklv7rkJ2ZmYmN910EyaTifj4eGbNmlXkmG+++YZ+/foRExNDvXr1uOKKK9i3b59bm54mzgCXX365TI5rKZXV/9LS0tiwYQMPPfQQAwcOpEWLFvTs2ZPJkycXKVFtt9u99i0o3W+9YcOG3H777W7b77nnHqKjo4mLi+PJJ58ssoDouOlt1qwZ11xzDUOGDGH16tVu7bv2vaysLMaMGUNkZCTx8fEewww89eFBgwbx4osvun3ukq49ciNbPUlISGD37t18//33RfZVh3noa6+9xnPPPUfTpk1JSUnhp59+cnvtEH2mTp3K4sWLefPNN/njjz948MEHGTNmDFu3bnVrb9GiRYSEhLBx40befPNNr+8dPXo0P/30k9v7TCYTW7ZsYfr06Tz33HMFffO1116jT58+3H777aSkpJCSkkKzZs08fp7w8HA3gfvRRx/ls88+Y9GiRfz666+0bduWoUOHcubMGS666CIyMzPZvn07AD/99BNxcXGsXbu24P0//fQTF198sc+fw9N3UHg+/sMPP3DJJZfIfLyCCdQc1Nv4AiWPcd4cEIoTcwcNGlRkDliWuaan18XNhx14GlddIxGKE5PBu1Bd0nfl7X2+jLdZWVlMmjSJ6Ohor2M7VP9xWYSsakxkZCQHDhygX79+NGrUiLFjx9KrVy+uu+66Isf+5z//oW7duowcObLEdg8ePEjjxo19tqNDhw5eY6hTUlLIz89nxIgRtGzZki5dujB+/HgiIyPJzMzktddeY/r06YwdO5Y2bdrQr18/brvtNp/OW1zbDho3bkxeXh7Hjh3z+fMIgq8EchJvNpt57733aNu2LfXq1QNKNwn1ZUJ9+PBh1q1bR506dfjmm29ISEjAZDIVsSUmJsarnRMmTGDjxo0sX76c1atXs379en799Ve3Y7KyspgwYQI///wzP/zwAwaDgf/85z9ueU883TwA9OjRQybHtZTK6n+RkZFERkbyww8/lPg7K65vQel+6+vWrXObVC9atIigoCC2bt3Ka6+9xiuvvOI13QPArl272LRpEyEhIV6PeeSRR/jpp5/48ssv+e6771i7dm2R/umpDztujB34cu2RG9maR2nmoaA8PVq0aMHDDz9cbLv+nIdGR0dTp04djEZjgWeT6+v69euTm5vLiy++yPz58xk6dCitW7dm3Lhx3HTTTXzyySdu7bVr147p06dzzjnncM4553h97+jRo3nrrbcK3te1a1eefvpp2rVrx5gxY/i///u/gpvh6OhoQkJCiIiIKFisKlwdUtd1vv/+e7799lsGDRpU8H2++eabzJgxg8suu4yOHTsyb948wsPDeffdd4mOjqZbt24FwtXatWt58MEH2b59O2azmSNHjvDPP/8wYMAAnz+Hp+/A03z8xhtvlPl4BRPIOai38aW4Ma44B4SSxFxvc0AHvsw1PVHSmO3tcwcHBxdsmzhxolcxGbwL1SXNB7y9z5fxdtKkSWzbto1ly5Z5Hduh+o/LUsOzGuPIjdOlSxcA7r33XoYOHepx1ez+++/nlltuKRKq54mcnBzCwsJ8tkPXdTRN87jvvPPOY/DgwXTp0oWhQ4dy6aWXMnLkSOrWrcuff/5Jbm4ugwcP9vlcvrbtwOFim52dXaZzCDWTO+64gxUrVpCVlUWLFi148cUXufLKK0vdTuFJ/DXXXMOaNWsYMmSIW0hCcnIyN998MydOnCAoKIgnn3ySUaNGeW3X2yR+7dq1REdHA2qyEB8fz8qVKzEYDAWT0O+//54+ffoA0Lp1azZs2MBbb73FgAEDCtpxTKhBTUjnzJlTsIJaeEKdmZnJvn370HWdDh06lOr7yczMZNGiRXzwwQcF/XzBggVFPtu1117r9nr+/PnUr1+f3bt3FyThdUycHThCJVwnxy1atCiVfUL1xtf+l5aWxpAhQ8jPzyc/P5/777+/wNPJE4X7X1BQEPPnz+f222/nk08+oXv37gwYMIAbbriBrl27ur23uL4Fpfut22w2DAbnemOzZs2YNWsWmqZxzjnnsHPnTmbNmuX2WVauXElkZCT5+fnk5uZiMBi83tyYzWbeffdd3nvvvYL+uWjRIpo2bVpwjC992Ndrj/TVmkdp5qGg8u4U9vLwhD/nob7wzz//kJ2dXdBPHeTl5XHuuee6bSt8g1/ce88///yC14WvFfHx8Zw4caJE2xx92mq1YrfbufHGGwsE7uTkZKxWK3379i04Pjg4mJ49e/Lnn38CMGDAANauXctDDz3E+vXrmTp1Kp988gkbNmzgzJkzNG7cmHbt2vHHH3/49Dk8fQeF5+NDhgyhW7dubsfIfNz/+DoGArRs2ZKoqCgMBgN169ZlzZo1XtstaQ5a3PhS3BiXl5dXIHg6xgDHtQNwm3uCc54HReeArvg61/RESWM2eB5XX3/9dUDNxd944w0WLlzIZZddBsC8efNYvXo17777Lo888kiRebUv31Xnzp09vs+X8dZsNjN//nymTZvG4MGDMRqNRcZ2B9V9XBYhqxqzY8cO2rZtW+AhcfHFF3PkyBGPx1588cVursTFERcXR2pqqs92/Pnnn7Rq1crjPqPRyOrVq9m0aRPfffcds2fP5vHHH2fLli0lxnEbDIYiYRNWq9Wnth32ONTw+vXr+/x5hJrPhAkTmD17NqGhoWzbto0hQ4awf//+As8mX/F1Eh8UFMSrr75Kt27dOHbsGBdccAHDhw/36N0E3ifxPXv2ZMGCBRiNRlJTU5k7dy6XXXYZW7duxWw2+zwJLe2Euqz5D/fv34/VaqVnz54F26KjoznnnHPcjtu7dy9PPfUUW7Zs4dSpUwWrUYcOHSq4ufe2OiiT49qLr/2vTp06rFu3joiICLKysujcuTMjRozw2t899b8RI0bQvn37glCkr7/+munTp/POO++4hXOU1LfK81vv3bu32816nz59ePnll7HZbAXeGwMHDuSNN94gKyuLWbNmERQUVGSy7GDfvn3k5eXRq1evgm2xsbFu/dOXPuzrjbz01ZpHaeahe/fu5a+//uLKK69k165dxbbrz3moL5jNZgBWrVpFkyZNCrbbbDaOHj3qdmzhcdvbe0GFJDlw9eAA0DStSLVFTzj6dEhICI0bNy5ITO96k18cF198MfPnz+e3334jODiYDh06FNwTpKamFgjNvn4OKPodFJ6PJyYmcuTIEbZs2ULbtm0BmY+Xh8TERBITE0lLS6NHjx4F20srJG/atMnNS84bJc1BLRaL1/GluDFu6NChJTogeKM4DzFf55qe8GU+7GlcHTFiBHv37mXfvn0lisne8GU+UBhfxlvH2O762QqP7Q6q+7gsoYXVmB07dnDeeef5vd3zzz+f3bt3+3Tsjz/+yM6dO71OlEEN1n379uXZZ59l+/bthISE8Pnnn9OuXTvCw8Pd4oxdiYuLK8j9A5CRkcG///7rU9sOdu3aRdOmTYmLi/Pp8wi1gw4dOhRMzDRNIy8vz+vkuzg8TeI9iVPx8fEFq5ONGjUiLi6uYFLnCW+T+IiICNq2bUvbtm3p0aMH77zzDllZWcybN89tErpjx46Cx+7du4vEzpd2Qt2uXTs0TeOvv/7yekx5uPLKKzlz5gzz5s1jy5YtbNmyBXBPdO1N9JPJce3F1/5nNBqJiIgA1GpmScVpvPW/0NBQLrnkEp588kk2bdrEuHHjiiS6Lalvlee37gsmk4m2bdty3nnnMX/+fLZs2cK7775b5vZ8wddrj/TVmkdp5qEPP/wwU6dO9elYf89DS6Jjx46EhoZy6NChgjHW8YiPjy/ze73lufJESEiIR3HK0aebN29epLpis2bNCsKtHFitVrZt20bHjh0BCvJkzZo1q0C0cghZa9euLciPVd7P4Tof//nnnwkODuaLL74o2C/z8bLjyFNXuP/4OgaWlpLmoMWNL8WNcQ7B8+uvv6Zjx47Mnj2bc845p8i9nSf88bk84ct82NO4On/+/HKf25f5QGFKM9f3heo+LpdayPrnn39YvHhxkUdx+/755x+/Gy6oC1hh111/MHToUP74448iFzGH6+GRI0f49ddfefHFF7n66qu54oorGDNmjMe2tmzZwosvvsjPP//MoUOHWLZsGSdPnuTcc88lLCyMSZMmMXHiRBYvXsy+fftISkoquCgOHDiQ5cuXs379enbu3MnYsWPdcgYU17aD9evXc+mll/r9O6ptbNu2jXvuuYdOnTphMplo3rw51113HXv27KnQ85rNZp5++mmGDRtGbGwsmqa5VSYqTG5uLpMmTaJx48aEh4fTq1cvr7Hu48ePJzw8nB49ejBo0CA392ZfKYuY/Msvv2Cz2YqdGPo6idc0DYPBQE5Ojt8m01B0Qh0bG8vQoUNJTEx0q0LkIC0tzWM7rVu3Jjg42C3pZXp6utvv5vTp0/z999888cQTDB48mHPPPbdUK/F//PGHTI5rKaXpf2lpaZx33nk0bdqURx55pNjfi6/9r2PHjh77gzfK+1t3THIdJCUl0a5duyK5dBwYDAYee+wxnnjiCXJycorsb9OmDcHBwW7tpqamuvVPX/qwr9ceuZGtefg6D/3yyy9p37497du396ldf85DfaFOnTo8/PDDPPjggyxatIh9+/bx66+/MmfOHDcxpjTvnT17tk/pPBy0bNmSLVu2cODAATfvjOKIiIjgrrvu4pFHHuGbb75h9+7d3H777WRnZxdUnKtbty5du3bl/fffLxCt+vfvz6+//sqePXsKxK3yfI7C8/HPP/+cM2fOuKUjkPm4/ynNGKhpGgMGDKBHjx68//77xR7ryxjoaXzxZYwrzgHBm5hbEr6MU/7C8bmfeuopLBYLbdq0KVFMhqKfzdf5QOH3+TLeOsZ2h8ceFB3bHVT3cbnUoYUbN250+2c50DTN677yxq4LRbHb7ezcuZMnn3zS72136dKF7t2788knn3DnnXcWbF+/fj1NmzYlKCiIunXrct555/H6668zduxYtzwerkRFRbFu3TpeffVVMjIyaNGiBS+//HJBHPGTTz5JUFAQTz31FEePHiU+Pp677roLUJVYdu7cydVXX010dDRTpkxxU+1LattisfDFF1/wzTff+P07qm1MmzaNjRs3MmrUKLp27cqxY8eYM2cO3bt3JykpyasLbHk5ffo0zz33HM2bN+e8884rMTx23LhxLF26lAceeIB27dqxcOFChg8fzpo1a+jXr5/bsXPnzmX27NmsXbuWXbt2lekatWPHjiJVy4rjzJkzjBkzhnnz5hV73NChQ5k8eTKpqaluLteOSbwjtHDOnDmYzWauvPJKt0mo3W6nX79+pKens3HjRqKiohg7dqzPdrpOqFNTU7Hb7SQmJtK3b1969uzJc889R9euXcnPz2f16tW88cYbHl2o69Spw9ixY3nkkUeIjY2lQYMGPP300xgMhoLvu27dutSrV4+3336b+Ph4Dh065FYavCQ2bNggk+MajiOsIj093W3SXpr+FxMTw2+//cbx48cZMWIEI0eO9Foht3D/O336NCNHjuSyyy7jkksuISYmhp9//pnp06dz9dVX+/w5yvtbP3ToEBMmTODOO+8suMH0VonIwahRo3jkkUdITEwskmA7MjKSW2+9lUceeYR69erRoEEDHn/8cbfx3Jc+7Ou1R25kaxalmYcmJSXx0Ucf8emnn2I2m7FarURFRfHUU095PN6f81BfmTJlCvXr12fq1Kns37+fmJgYzj//fEaPHl2m93bv3p3HHnvM5/M//PDDjB07lo4dO5KTk+OTlwrAiy++iK7r3HzzzWRmZvJ///d/fPvtt25zhwEDBrBjx44CISs2NpaOHTty/Phxt3Cjsn4OT/PxSZMmyXy8ginNGLhhwwaaNGlCSkoKQ4YMoUuXLkXC6hx4m4MWpvD4UtIYt2XLFn744QcuvfRSGjRowJYtW9wcEFznnpGRkQU5YUvCl3HKnzg+9wcffMDUqVO5++67C87dvHlzpk+f7iYme/pssbGxPs0HPL2vpPE2MjKSW265hRkzZtCpUyfi4+OLjO0Oqv24rJeCFi1a6C1btizzQygfH3/8sT5kyBDdYrF43J+fn68vWrRIHzFihMf9a9as0a+99lqfzrVy5Ur93HPP1W02W0Hbf/75p56fn18248tAec85d+5c/ZJLLimyfeHChfrSpUvLZVteXp7+xRdf6Hl5eeVqp6Lwt30bN27Uc3Nz3bbt2bNHDw0N1W+66Sav7ztz5oz+2WefebXtgw8+0M1mc5H3paen64B+4sQJPSUlRdd1Xd+2bZsO6AsWLPB4ri1btuiAPmPGjIJtOTk5eps2bfQ+ffoU+/muuOIKfdWqVcUek5ycrMfHx+t//PGHruu6brPZ9IiICH3lypUFxxTXBy0Wi37RRRfpixcvLvY8Dnr27Km/+eabBa/HjBmjAwWPOnXq6D169HD7Ldvtdv3VV1/VzznnHD04OFivX7++PnToUP2nn34qOGbAgAH6/fff73auq6++Wh87dmzB67///lvv3bu3Hh4ergP6P//8o+u6rh89elRPSEjQW7RooYeEhOhNmjTRr7rqKn3NmjVe28/IyNBvvPFGPSIiQm/UqJH+yiuv6D179tQfffTRgmNWr16tn3vuuXpoaKjetWtXfe3atTqgf/75515tzs/P13fs2KFHR0frmzdvLtg+Z84c/euvv/bpO64oqvv1wdH/0tPTK9my4nn//ff1yy67TLdYLKXuf67cfffd+qefflrsMa79z2Kx6BMnTtQ7duyoR0dH6xEREfo555yjP/HEE3p2dnbBe3zpW6X5rbuOgQMGDNDHjx+v33XXXXpUVJRet25d/bHHHtPtdntB22PHjtWvvvrqIp9l6tSpev369XWz2VzExszMTH306NF6RESE3rBhQ3369Ok+9eEePXrot99+e8H4XNK1Jycnp0hfddi2YcOGYv8XZaEq90FfbKuKffDff//VmzZtqv/1119ej/GlDy5YsEB/6KGHSjxfTZiHViTVyTZP8/ElS5boH3/8cSDMq7Zj4MKFC/Wrrrqq3GPgww8/7HUu7cDTHHTw4MFFfm+u44uuFz/G7d69Wx86dKhev359PTQ0VG/fvr0+e/bsgrY8zT3//PNPvX///kXG1rLMNQu/x5cx29u4+sILL+ixsbF6enq6npOTo9977716XFycHhoaqvft21ffunWr2/GFP9u///5b4nzA2/t8meunpaXpV155ZbFju6dx+dtvv3X7n1QE/hwDSyVkCYGlJCFr0KBBet26dfXw8HC9SZMm+qZNmwr2DR48WI+Li/O4zxuzZs3SDx06pOt69ZxAzJs3z+OES4Qs/9G9e3e9e/fuXvc///zzutFo1JctW1bEtrfeeksH9MTExCLv83QBK0nIeuSRR3Sj0Vjkovfiiy/qQMFv2RPDhg3TX3vtNa/7db2okOUJb5MIu92u33DDDfrTTz9d7DlcqamTeLPZrEdHR+vvvPNOudrJz8/Xn3rqKX3IkCFu20XIKpnqOol3FbI84a3/HTt2TM/IyNB1XU3uOnXqpP/+++/Fnquq9T9PE+5A4ejDU6ZM8fn78LawJEKWZ6piH/RFyCpuHurAVyFL16v/PLQiqU62eZqPi5BVelyFLE94GwPNZnPBGJiZmal37969iNBSmKo2BpYWf801vVGV+5+u+2afp3G5uglZUrWwBvHdd9+xd+9ejzkzvFWwKI4HHnjAT5YFhttuuy3QJtRodF3n+PHjdOrUyesxkyZNYuvWrfz3v/9l1apVBWVxN2/ezMyZMxk9ejR33323X+zZvn077du3Jyoqym27o4rJjh07aNasGenp6axatYqrrrqKsLAwPv/8c9asWeNzEtriuPTSS9m+fTsWi4WmTZvy6aef0qdPHzZu3MjHH39M165dC/JtLFmypNi8XJdffjl79+7lyJEjpc5xVZXYvn07f/31Fz179iQ9PZ3nnnsOoFQhWd4ICgritddeK3c7Qs3AW/87ePAgd9xxR0GS93vvvbfEnHg1pf/5A2992HE994Xg4GBmz55dUSYKFYgjtLe4BMQOipuHOnCt8lkS1X0eKihkPl45eBsDjx8/zn/+8x9AVbu8/fbb3aofeqK6jYEVOdesqdSEcVmELEEQysT777/PkSNHCgYLTwQFBfHxxx8zbNgwrrnmGr7//nvS0tJ45ZVXGDp0KAsWLPBb/HpKSorH6kKObY4S2pqmMW/ePMaPH4+u67Rt25YPPvjAa8Jaf0zi+/Xr51Pi1sLUlEn8zJkz+fvvvwkJCeGCCy5g/fr1fkksOWrUKNq1a+cHC4WagLf+17NnT3bs2FHq9mpK//MHhfvw2rVrPZZn94bcyFZfEhISSEhI4MCBA1x00UWBNkcQBC94GwNbt27Nb7/9Vur2qtsYWFFzzZpKTRiXRcgSah16MWXXBd/466+/SEhIoE+fPiUmEQ8LC2P58uUMHDiQ4cOHk5eXR/v27fnwww+LlJIuDzk5OYSGhno8v2M/qKSka9as8bldxyT+8OHDBd5dgu+cf/75/PLLL4E2QxCqNSUVuqhIPPVhm83G3r17A2SRECgcXo2CUF7kdyT4E5lr1k5KdRd5//3389RTT1GvXr0yn/DkyZNMmTKF119/vcxt1Faio6M5fPgw33zzDZGRkUX222w2jhw5wqFDh7y6dJeVimy7Ms+Zl5fHoUOH6Nu3r1/aq40cO3aMyy+/nOjoaJYuXerT/yYqKoqZM2cyaNAgAG699VbCw8P9ald4eDi5ublFtlssloL95cFgMKDrOpmZmeVqR6g4bDYbOTk5lXaNEioXg8GAxWIpk3ejUPXIzc0lLy+vQvqrtm0bPadORatfHy680O/t10ZMJhMZGRn8+OOPHDlyxOMxMg+tPKqzbVarlQMHDtCrV68AWFd9MRqNWCwWEQGFCsNsNpe7AmxlUiohKzExkQULFpCQkMAtt9xSqpCOv//+m3feeYe33nqLnJwcEbLKQM+ePfnrr7/4+++/PXqe2Gw29uzZQ/v27StkAlFRbVfmOTVNo3PnzgwYMMAv7dU20tPTueyyy0hLS2P9+vU0btzYp/ft37+fm266iQ4dOpCTk8O0adO49tprad68ud9si4+P9zi5TklJAfDZVm+YTCZOnjzJ8uXL+emnnzweY7fbC0Ic/T0QVGTbVemcvuLJtry8PHRdp3379gG2TqgI2rdvz65du5g2bRoRERFF9kv/qzz8YVtubi7BwcG0bt3av8bpOoY5c4jbuRPDnDnQpw9UQAn22kZoaCiZmZnk5uaya9cuj8fIPLTyqM62aZpGx44dGThwYACsq760b9+e3377jWnTpnlcnJUxsPKoyrZB2ezTdZ2cnBwuvvjiijXOj5RKyNq2bRv33nsv06ZNY/r06fTp04fBgwfTp08fzj33XOrVq0dkZCRms5nTp0+ze/duNm/ezOrVq9m6dSu6rtO3b99qn1gsUDgGg7vuuqtIQmtQKxxfffUVw4cPJzg42K/nrsi2q9I5Be9YLBauvPJK9uzZw/fff0/Hjh19el9KSgqXXHIJwcHBrF69moyMDC688EKGDx/OunXriI2N9Yt93bp1Y82aNWRkZLj1jy1bthTsLw+apmGz2RgyZIjX32N+fj6bN2+md+/efg2brOi2q9I5fcWTbQaDgebNm9O0adMAWydUBG3btuXkyZNceOGFHoUs6X+Vhz9sMxqNtGnThgYNGvjXuKQktI0byQ8PJ2jjRkhKUmKW4BduvfVWj3NQkHloZSK21T46dOjA8ePH6dOnj4yBAaYq2wZlt69u3bo+399VBUr1zZ9//vls2LCBpUuXMmvWLDZt2sTmzZuLfY/D/fHCCy/kwQcf5Nprry27tYIgBASbzcb111/P5s2b+fLLL+nj401BamoqQ4cOxWw2s2HDBpo2bYrVauXpp5/m2Wef5fLLL+f777/HZDKV28aRI0cyc+ZM3n77bR5++GFArfgvWLCAXr16+a3qygUXXFDsJD49PZ2+fftWyCS+otquSuf0lapsm1Cx9O7d2+tijvS/yqHK2qbrkJgIubnkRkcTlpWlXvfuLV5ZgiDUCGQMDDxV2Tao+vb5izJJiCNHjmTkyJHs2LGDL774gh9//JHt27eTlZVVcIzJZKJ79+4MHDiQa665ptzeEIIgBI6HHnqI5cuXc+WVV3LmzBnee+89t/2jR4/2+L65c+eSnJzM2rVr3UKRW7duzRdffMHw4cNZtGgR48eP93ruOXPmkJaWVlB1cMWKFRw+fBiAe++9l+joaAB69erFqFGjmDx5MidOnKBt27YsWrSIAwcO8O6775br8wuCIAjVgKQkWL8ePToabDb06Gi09evFK0sQBEEQahjl8oXr1q0b3bp145lnngEgOzub9PR0YmJi/J7IWRCEwOEoXb9ixQpWrFhRZL83IWvSpElcc801dOrUqci+vn37kpSURJcuXYo998yZMzl48GDB62XLlrFs2bKC8zqELIDFixfz5JNPsmTJElJTU+natSsrV66kf//+JX5GQRAEoRrj4o1FvXqQkQEmE6Sni1eWIAjVlsTERBITE7HZbIE2RRCqFH4N6oyIiPAYsysIQvWmrKXfg4KCPIpYDrp27VpiGwcOHPD5fGFhYcyYMYMZM2b4/B5BEAShBnDWG4uYGKdgpWnqtXhlCYJQTUlISCAhIYGMjAy3xduazrZtGlOn9qR+fU2KzwoeqXpp9gVBEARBEATBV1y9sfLy0FJT0fLz1T6TSW1PTFTHCYJQq3EIJNu2iYdmVUXXYc4cAzt3xjFnjkEu3YJHRMgSBEEQBEEQqi+u3linT8Px45iOHVN3Q4W9sgRBqLWIQFI9SEqCjRs1wsPz2bhRk0u34BERsgRBEARBEITqias3VlAQnPXEsoWGOkMMxSurzCQmJtKxY0d69OgRaFMEodyIQFL1cb2kR0fnyqVb8IoIWYIgCIIgCEL1xNUby2wu2JzvWnRIvLLKTEJCArt372bbtm2BNkUQyoUIJNUDxyU9OlpH09RfuXQLnhAhSxAEQRAEQah+uN6ZmkyQlVWwKz8szP1Y8coShFqNCCRVn8KXdJBLt+AdEbIEQRAEQaiySGiT4BVXbyy7HbKz1faQEPSgQoW5xStLEGotIpBUD3wpPisIDkTIEgRBEAShyiKhTYJHivHGIjLS83vkzlUQaiUikFR9PImNDuTSLXhChCxBEARBEAShelH4ztQlP1aRuyAHcucqCLUOEUiqB4XTHR4+rJGZGYLVKpduwTN+E7J2797NO++8w9SpU1m+fHnBdrvdTl5enr9OIwiCIAiCINRmHHemWVlgNILFAunpKrwQ0A0GDHl5anvhh9Go3id3roJQK/DkjeVABJKqQWGxsU4diIvTMRh0x2VdREehCOUWspKTkxkyZAhdunThzjvv5IknnuCLL74o2D9v3jzCw8P54YcfynsqQRAEQRAEobZjtUJysrqzMZvhzBnIz1dCVlAQWlYWQRYLmtkMmZnuD7NZve/wYdWOIAg1luK8sRyIQBJ4PImNYWFgMlkJDVWvRXQUChNU8iHeOXPmDAMGDODAgQN07tyZ/v37M3fuXLdjrrvuOu655x6WL1/O4MGDy2WsIAiCIAiCUMsJCYFPPoHUVPU6Lw/++AN+/RU6dSL/ggvYvG4d/fv3Jzg42HMbsbGqHUEQaiwOgcRkgpMnVbc3Gt2PKSyQdOoUEFNrLa5iY1xc8ceaTJCWpo7v3buoh51QuyiXkDVt2jQOHDjAww8/zLRp09A0rYiQVbduXbp06cKGDRvKZaggCIIgCIIgANCwoXo46NoV/vtf9dxqxbx/P3ToAN6ELEEQajSuAonVqqKPz5yBpk2Lqh+uAkliYuXbWpspLvSzMIVFxz59KsNCoapSrtDCL7/8kpYtW/LSSy+hFfPLa926NUePHi3PqQRBEARBEARBEAShRBwCSZ06kJGhtmkahIcXjR90FUikQG7l4Sn0MzMTTp2CrCzNY6inhIIKDsolZB08eJDu3btjMBTfTEhICGfOnCnPqQRBEAShWrBtm8bUqT3Ztk183gVBEAShsnEVSHJynIJHbCx4u211CCTz5lWenbUdT95Y6ekqDDQ5GfLzi/6zJFeW4KBcQlZYWBiZmZklHnfo0CGio6PLcypBEARBqPLoOsyZY2DnzjjmzDHIaqEgVDRLlsCXX6olfEEQBJwCSVSUEkZACSB163p/j0Mg2bSpUkys9XgqPmuxqHocjkqFui7FZwXvlCtHVocOHfj111/JysrC5KUUxKlTp/jtt9/o1atXeU4lCIJQq9G2baPn1Klo9evDhRcG2hzBC0lJsHGjRnh4Phs3BkkOB0GoSKxW5T6Rna3uUL/91ru7hSAItQJXgSQ/Xz1AiVr5+eqykZdnwGIpmpPJIZAIFU/h4rOgBCxHMdmgILBYgrymL3ItPit1O2on5RKyRo4cySOPPMKECRN44403PIYYPvLII2RnZ3P99deX51SCIAi1F13HMGcOcTt3YpgzRykjUqqlyuEayhAdnUtWVphU1hGEimTHDiVigbouiojldxITE0lMTMRmswXaFEHwCYdAEhEBJ044PXaCg1X+JdBKFEgcBVGrAjW1DxYuPgtqMXDmTPX8mmvsxMdvLrb6rBSfrd2US8hKSEhg0aJFvPPOO/zyyy+MGDECgH379vHKK6/w6aefsnXrVrp168a4ceP8Ya8gCELtIykJbeNG8sPDCdq4UUq1VFEcoQzR0To2m/q7fr0m/y5BqChcK2L36xc4O2owCQkJJCQkkJGRIWlChGqBQyB5/32YP19tu+giuP9+9dxqzWfdOu8CidkMPXpUosElUJP7YOHis99+C2Fh6vmgQTqZmWYpPit4pVxCVlhYGN9++y2jRo1i06ZNbN++HYANGzawYcMGdF2nR48efPHFF16VVEEQBKEYXNx8cqOjCXMkBRA3nyqFqzdWvXqqQpLJpHJzyL9LECoIh5BlMKhOJgiCgBqHf/zRKYo89BC0baueW62wf793gcRR4VCofHbtcj7v1EmXZO5CsZRLyAKIj49nw4YNfPvtt6xatYr9+/djt9tp1qwZl112GVdffbVX101BEGofjopu9etrkurJF866+ejR0WCzoUdHozlKtYibT5XBtfJOZqbGmTOhBAe7V9aRf5cg+JHDh+HgQfW8a1eVAEcQBAE4dAjy8tTzfv2cIpZQdcnPh9271fPGjVXYoCAUR7mFLAdDhw5l6NCh/mpOEIQaSOGKbpLqqQTEzadKYrer/Bt//KEmXbfe6vw3xcWpf092djCHDmnUrasmZ/LvEgQ/s3Gj87mEFQqC4ELr1rBiBaxcCR06BNoawRf++UfNowC6dAmsLUL1wG9CliAIQklIRbdS4nDzMZnQjh3DqGkQHS1uPlSeZ5+uw7Fj8OefTuFq9273qkZ16ji9sTQNQkKctaBTU1XU048/1up/lyD4H1chq2/fwNkhCEKVJDQUrr020FYIvrJzp/O5CFmCL5RbyMrPz+fTTz/lhx9+4OjRo1gsFo/HaZrGDz/8UN7TCYJQTZGKbqXE9QvLyYGsLMJtNlVi3mSCtLRa6+ZTGZ59ug4TJ8Jvv8GZM8UfN3++0xsLVE6OqKg8srPD0XWw2eDkSZVodsMGqbAjCOUmJwd+/lk9b9BA4oYEQRCqOW3awIgRKk9W166BtkaoDpRLyDp58iSXXnopv//+O7quF3us5MkShNqNVHQrJa5Jl5KTCzZrx49Dixa12ivLX559WVnK02r3bjAa4aabnPs0TXlieRKxGjSAc8+FTp2USDVlitMby/HeqKg8GjQIIyVFw2JR7f/2G1xxhdIf27Ur00cXBAFg2zb3BDgyxxQEAZUbKyQEGjUKtCVCaeneXT0cWK2Bs0WoHpRLyJo4cSK//fYbbdu25e6776Zdu3bUqVPHX7YJglBDkFRPpaTwF+ZKdrb6AuvUqZVeWWX17MvLg7//VqKVI0Tw4EHVHkB8vLuQBdCxIxw5ogSrjh3V33PPhfr1nbbcfLNq27HNldBQaNkSTp+GEyfUpOyXX2D0aLjrLhg7VoUdCoJQSjp0gAceUOGFAwYE2hpBEKoIr7wCmzfDZZfBhAlSA0IQajLlErJWrlxJw4YNSUpKIlZKCwiC4AVX5yJXr5Va7FRUPK5fmKclqWPHIDKyVn6BpfXs+/VXePlllUTUZvPebkqKymdVt65z24MPwqOPehfIPP2uC6NpKuQwMlKtFJvNSsDds0dELEEoMw0aKEV49OhAWyIIQhVh714Vvg8q8jgiIrD2CIJQsZRrGp2Tk0Pfvn1FxPKRO+64g/j4eKKioujSpQsrVqwItEmCUOG4etAYDHDihPOO32RS2xMTnZ4xtR7XL8xkUl9aXJz7sqLNplx8atkXWPirAfU3OxsefxxmzlShgq6EhSlPrMIiVnCw8rIaNQqeego+/ljl0S/8Xm8ClSdbiiMsTKXxCQ1VDnUTJ/r2mQVBEARBKJnFi53Pb74ZgqSkWbUhOVkFGQhCaShXF2/Xrh05OTn+sqXGM2HCBGbPnk1oaCjbtm1jyJAh7N+/n3qFQ4cEoQbhUniP5GSw2yE0NIyoKKXR1EKnouIp7OYTHAz166PrOllhYUSdPKlUlLy8WufW5vTGgjNnNFJTwzl+XMNqVRFGR48681c5cIhHTZs6QwQ7dlTby5N03RdvrMIYDNC4sRLe/vrL/d/1889qX+PGZbdJEARBEGojR4/Ct9+q5zExcPXVATVHKCUzZsCmTdCsGSxaJCGhgm+UyyPr1ltvZe3atRw+fNhf9tRoOnToQGhoKKCS3+fl5XHkyJEAWyUIFYer18rp00rEArDZxCvLIyW4+ehGIzRsCE2aQPPmamMt+QIdX01ODpw6paoA5uYasduVQGS3q22u5ZtBiVVr1iiPq6eegpEjlZBVHhHLYUtWlkribrEUfeTlGTxuNxqVkOX67zpzBiZNghtugGXLavS/EYDNmzdjMBh4/vnnA22KUB35/HPV0R0DiiAItZ733nNeEm64QXlBC9UDu11VKgQ1r5J024KvlEvIuueee7jiiisYNGgQ3377LfYaNKkwm808/fTTDBs2jNjYWDRNY+HChR6Pzc3NZdKkSTRu3Jjw8HB69erF6tWrPR47fvx4wsPD6dGjB4MGDaJLly4V+CkEIbC4eq00aaIEh7AwiIvL8Zorq1bjg5uPHhPjvlRVS77ApCT48Uc1ybFYnNuDgtTXERennNcuvbToe8sjWnnCalXehSaTynmVmen+MJs1LJYgzGbNwz71vsOHnenP3nxT5c3KzoYXX4T771eRozURu93Ogw8+SI8ePQJtilAdyciAqVPhf/+D224LtDWCIFQBzpyBL75Qz8PD4brrAmqOUEqSk9WlHaBz51pTu0jwA+WOHn7rrbcYMGAAw4cPJygoiPj4eAweMthqmsa+ffvKe7pK49SpUzz33HM0b96c8847j7Vr13o9dty4cSxdupQHHniAdu3asXDhQoYPH86aNWvo16+f27Fz585l9uzZrF27ll27dqFJbxVqKK7ORXFxamBq2RKMRh2z2f1Yk8m9AF+txNXNJzLSqdZYrRAUxNacLjx/bBxP2BfSO2KX+3uNRvW+GlrBUNdhyhTlcRUcrD5eUBDUq5dN/fqRaJqGrqsKg4sWwZAhFfsVhITAJ5+o5PCesFrzWbduM/379yc4ONjjMbGxToHt/vvVZ/z8c/V60ya4/np45BFVeakm/TvffvttevXqRXp6eqBNEaojSUlOt4vOnQNriyAIVYKPPlLZFgBGjJCwtOqGqye9+HcIpaFcQlZycjIXXXQRycnJ6LqO1Wrl0KFDHo+tboJNfHw8KSkpNGrUiJ9//tnr6vHWrVv56KOPmDFjBg8//DAAY8aMoXPnzkycOJFNmzYVeY/RaGTw4MG8+uqrtGvXjuHDh1foZxGEQLBxY1HnotBQz2FThZ2KOnWqTEurCIXdfBwcO4auwxz9Kdbb+jDneA696m12iht5eeoRGel08/G3C1KASUqCP/5QFYjy89XfJk10zGanF3Blpwtr2FA9PGG1wv79Zjp0UMJbSZhMKln9wIHw3HMqdDIzU4VCrlkDkycr4auyefHFF9mxYwdbt24lNTWVBQsWMG7cuCLH5ebm8tRTT7FkyRJSU1Pp2rUrzz//PJdcconbcadPn+bVV18lKSmJBx54oHI+hFCzcJQkAyi0UCgIQu0jKws+/VQ9DwqCm24KrD1C6dnlsjYr6xNCaShXaOGkSZM4dOgQffv2ZdmyZezcuZN///3X42P//v3+srlSCA0NpVGjRiUet3TpUoxGI3fccUfBtrCwMG699VY2b95McnKy1/fm5+fzzz//+MVeQahK5OfD7bcrjxVfKrpBrUn15B2Hm8+33zof8+dDu3YkNR3J+rDBhEYbWB99OUnTflL7//MfqFcP4uPhtdfU+2uYiOXq2deypfLua95cOaEVpqr8hrRt2+g5dSratm2let+FF6p/4WWXObetWaPCJH780c9G+sC0adP4888/Oe+884o9bty4cbzyyivcdNNNvPbaaxiNRoYPH84GV9EBePzxx3nggQeIiYmpQKuFGovdrtwVQanZ558fWHsEQQg4K1aohR+A4cNVwReheuHwyNK0WrqQLZSZcnlkff/997Ro0YLVq1cXJDGvbWzfvp327dsTVciPtWfPngDs2LGDZs2akZ6ezqpVq7jqqqsICwvj888/Z82aNUydOjUQZgtChaHrcNddsH+/GpSOHwcfNGE3j5pS3v/XHAq7+Zw8iR4aRmLKTeRq4dSJtZKVFUziqpb0vgG0vn2dHgoffQRXXhkYuyuA3bvVvWpKitOz72wBR8A3z76AFHHUdQxz5hC3cyeGOXOUEaXwSI6KUmGUAweqfFlpaerx/PPQo0flJkHds2cP7dq184tX8vbt29m2bRuJiYmVZr9Qw9i921mfvVcv39wdBUGo0YwYoeYKixfDmDGBtkYoLTk5sHevet6mjfpfCoKvlEvIysnJYeDAgbVWxAJISUkhPj6+yHbHtqNHjwIqtHLevHmMHz8eXddp27YtH3zwAd26dfPadm5uLrm5uQWvM85mwrNarVgdWYJdcGzztK+8VGTbVemcvlKVbYPA2vf66waWLTNgsynnoOBgyMlxqg66rpOXZyAnR0fT3AtEGAyQlaXx1ls1p3BEuTh8mKSsLqw3n090fRs2DaKjddav15RQc+21sHw5/Pkn/POPKs13442BtrrcrFoFL7ygNL2QEGeeNV8onG+t0qPak5LQNm4kPzycoI0by6yoDRoE3bopMWvtWnj00cqv5NPQW+ykC8V5JT/22GMkJyfTrFkzfvrpJ/7++2+aNGkCQHp6OkFBQezbt48FCxZU2GcQahCuHn59+wbOjlpGYmIiiYmJ2Gy2QJsiCEUICYGrrlLreNUsi42Amr5K2kOhrJRLyOrYsSNnzpzxly3VkpycHI9CXtjZuq85OTkAREVFsWbNmlK1PXXqVJ599tki27/77jsiipGsvVVM9AcV2XZVOqevVGXboPLtW7OmGStWtCYnx4SmGQgJsZKVZScrq/CRQVgsuZ6awGCAXbu8ZNGuZeiHkkk8OYpcezD16hjJsNoxmVSFOyXUGNAefRTGjVMuSm++qTKdV1PfeptNRUh+8IF6/eefqnpfgwa+T1AD6pXlEgeZGx1NWDkT8MfGwowZ8OuvcMEF7vsyM1V4ZaBXL331Sr7jjju44YYbCvbff//9tGrVikcffbRS7RWqMRs3Op+LkFVpJCQkkJCQQEZGBtHR0YE2RxA8IiJW9cQ1P5YkehdKS7mErHvvvZdbbrmFXbt20bmWyqjh4eFuXlMOLGcrjoWHh5e57cmTJzNhwoSC1xkZGTRr1oxLL720yE0DKO+b1atXc8kll3itlFVWKrLtqnROX6nKtkFg7Fu+XGP9egMxMcor5pZb7AwebATckxnl5+ezadMmLrzwQoKCPF+CgoOjOPfcire5qpP0c5DyxjKYybDEkK/nexBqOinf+s8+g+xsmDVLlaevZqSlqaTmjrBSXYewMCVsGo3OIo4O9KwcjMdPY7EFoxVKxBawIo5JSbB+PXp0NNhs6NHRaOVU1DStqIgFMG0a/P47PP205/2Vha9eyREREW4LMOHh4URGRhabL0u8kquu52+l23bqFMbdu9Xzdu2wxcSoqgpeqO7fXVW0WxCqEvn5Krm7UL1JS1NzNptNhCyh9JTrEjB69Gh2797NoEGDmDJlCpdddhnNmzf3l23Vgvj4eI4cOVJke0pKCgCNGzcuc9uhoaEevb2Cg4OLFSdK2l8eKrLtqnROX6nKtkHl2bdmjbqxdggGDz4I//uf51oSViscOmSmc+cgr7ZlZFTd77Sy0HVITOpOrj2YPC2YjBNGbDYTQUEa0dGFwucSElQ28NRUWL0arr5a7agm7NkDDz8MZzUPgoLUb2juXPWRXIs4KnQ4lQ2WYDiRDXERgLtaZTJVchFH16z09epBRgaF3Of8pqj9+CN88416fuedcMMNcM89SvirbHz1Si7MwoULS2xbvJIDd05fqSzbGm3dyjnp6QAcjI7mwFdf+fS+6vrdZWdnV6IlglC90HW47TZo0gTGjoX27QNtkVBW7rtPzWP++ksV9BGE0lAuIcvoUjZq/PjxxR6raRr5+fnlOV2VpFu3bqxZs4aMjAw3L6ktW7YU7BeEmszPP8Pjjztj3G+8UUW6CeUjaUM+60+cQ4wxkxS7M1dRSopKjhkd7eqVFaVmA46b/mnTVL6salDB8LvvlNkOx5vYWJg+XeWHuvhiJWQVYfsO9IceIjckm9DwCLSXXvZYwSw2thK/grPeWMTEOAWrCopz7NABundXIYeg8vxv2gTPPANdu/rlFD4jXskVR1X2/K1025o2xRAUhLZpEx3vuIOOJfzQq/t35/A+FAShKNu2qZC0Xbvg4EFYskRCC6szoaFQQnFkQfBIuYQsvRT1zUtzbHVi5MiRzJw5k7fffrugYlNubi4LFiygV69eNGvWLMAWCkLFYberHD55eer15ZfDAw/IhKK86Dokzsol1x5MbFA6+br7pTotTYXaBQe7OPtcfjl8+SXs2AHJyaqEz223BcR+X5k7F+bPd77u2BFmznSm+CpcxBFQX87zL6Pbd5MeayI6Kwtt1ctwQwXOZK1WOHMGTp9Wf8+cgVOn3J8fOFAkK712+rTyysrN9atXVuPGKh3axx/D7Nmq/x06pP7dN9+sVjeLE/C2bdOYOrUn9etrXHhh+WwRr+SKpyp7/laabRdcoB52OwZQMcc+UF2/u6pqsyBUBVwdeseOlTmnINRWyiVk2e01u7LYnDlzSEtLK8jxsWLFCg4fPgyo/GDR0dH06tWLUaNGMXnyZE6cOEHbtm1ZtGgRBw4c4N133w2k+YJQ4RgM6kY6IQGaNoUnn/T5/kIohqQkWL85iBhjJnkEF8zSgoPt6LoRXVceWSEhrs4+BlXa7sYbVWn6oUMD/ClKxjW11RVXwGOP+eBB5a88VFarcvdyiFOnT0NkpCoX6MpNN8HffxffltmsxKy4OPW/0nUMeXmqzZMnITwcfvrJr15ZBgP8979w4YUqT9auXUpYXrRIFXd79lnluVUYXYc5cwzs3BnHnDkG+vQp302AeCULlYoMMIJQq9m9G7ZuVc+bNoXBgwNrjyAIgUPS5BXDzJkzOXjwYMHrZcuWsWzZMkDlB3NUb1m8eDFPPvkkS5YsITU1la5du7Jy5Ur69+8fELsFoTJp0ADeeUfl55HEm+WnINWSHkLcOc1IP2OD0yqM22SyUq9eEJmZGrGx6li3XFlt26qSf23aVIslyjFjYN8+5Yl1/fU+mFxSHqr/+z8lTtWv797YmjUqd5iraOUpdOe884oKWSUpa7quxCqr1U2ZC3FN7JWdrc53330qwVWdOiV8UN9p0QLefVc54L31lkqAu2+fWqVesqRo7pCkJNi4USM8PJ+NG4PKra1VhldyYmIiiYmJ2Gy2crclCIIgVF8WLXI+HzNGtO3qzGOPqQW4Ll3Uwpz8L4XSIredxXDgwAGfjgsLC2PGjBnMmDGjYg0ShCpAZqYSrVwjH6Qit/9wplrS0IKDyCXobB5znaAgO2FhysEH3FMwvfYa3HILRLVtG0Dri+foURUW50DTlOeQz5qb48uJjkY7c4awjAy09HQlbC1dqhK2RUYqscg1h9LBgyoZV0mcOVN02znnKAPr1VNJt+rVc39+4IDKtB4R4fZBLHXrEmq3K9HMZlMztN9/V0LZI4+oSpN+Un6NRvjf/6BfP+WdtWePEjbbtXM/zlUHjI7OJSsrrNiIx7fffhuLxRJwr+SEhAQSEhLIyMgoWEASahHr16ukeX4UgAVBqH4cOqSGd1BD8BVXBNaeyqImLubk5ytH9dxc+PNP5fwuCKVFhCxBEHwmOxvuvVdpBNOmOQUVwT+4Cg2OVEt16igNJDcXQkKKhnObTMohaMoUNcGbObOQF87ZpNsBKWl3Frtdee29+64KRe3Z07nPZxHL8eVkZSkPLKuVIJtNqTi6rsSikyfVF3L6tLuQFRvrfB4Wpl7Hxam/rs+LJORChWsWZ9P06SpJVf367vs0DT02Fi0mxplPKy8P9u9Xneejj5QANnCg37zn2rVTq9ULF8I11xRtdvNmhw6oY7Opv+vXa169smbPns2hQ4cKXotXslDpHD2qSpgaDDBqlBKBBUGolSxerIZdUMJHNahn4xdq4mLOP/84i/x06RJYW4TqS6mELMeEtkmTJhiNRrcJri80b968VMcLglB1sFph4kSViwfgqadUonfBf3gqfBcRoR66rpOeXrRohqYpfSQvT3nijBun3LWvuAJYt04pW5dcohTIAJCVpXKnrVunXj/6KHz6qXJmKhXr18NXXyk11aViLqBuckND1ayoffui/ukXX6yqGtarp9RXf4VdevqHFcZoVAJZbCwcPqzyaWVlqaXliRPh9dcpd8Z1F4KD4fbbi27fuFGFG+bkeI7K9OSVtXPnTo/VAQsjXslChbFhg/prt7sVUhAEoXZx4gSsXKmem0xw7bWBtUcoHzt3Op+LkCWUlVIJWS1btsRgMLB7927at29Py5Yt0Xy8IdA0jfz8/DIZKQhCYLHbVchSUpJ6HRnp+WZZKDuuDkeRkTqWw6eUKhEaCmHh6Drk5RmwWIoKDo0bq+g5h0PSM8/A75uzePiHJwjJz4b33lMlJVu3rtTPdOgQPPQQ/Puvem0wKKHN1UHKJ3RdJYDKyXGKVOHhZEdEEFmvHprDK+vIESUcFV40iYpy99DyB57c54ojOBhatlRfSn6+en+nTn5L/l4cGRnq/5CcrL6+tDQNg8E9NNWPeegFwX84hCyAvn0DZ4cgCAHlww/V0AnKOTMyMrD2COXDVcjq3DlwdgjVm1IJWc2bN0fTtIKywI7XgiDUXHRdeV45UgyFhMCsWUWTSAvlw2pVQoPJBOZ0G6TmA/kQaoe64YCGxRLk9ZrbsKEzyk7TYNm3Jv4Kfofp1ntpxGl46SUlBlXSNXvjRnj8ceWABCpE8sUXyyiWJCWpG9rGjdWybFwcemws9vR0p7BV2aqML95YhXHk2srJgVtvhf79i773889VuGFMjN9MPXFCOYPZ7eo3cvw41KunvjeTqVDBABnShaqCxaLy3oGqKlI46ZsgCLWGTp3UWtzhwyoxuFC9cUR3hITI/YRQdkolZBVOfu5rMnShfNTEJH9C9WHePBUKBkozmDZNRWkJ/iUkBD75RBXdY+ef8OwznMiri3XIZTRKuBa7PZ916zbTv3//gsWEwsTGwpYtMHWqCjXcndeWm47OYGrcy/T89Vf4+msYPrxCP4euqzxNiYnOXBatW8PLL0OpCtj99Zf6wbVr5+75FB2ttutFwywrTZVxd59z5iFz2W/Iy8Oj+5zRqN63aRPcfbf7vl9/hRdegFdfVdnb//tf5ZFXTk6eVF9ZTIyKzIyJceZbqw5eWTIG1lK2bVMXMlCVDERlFYRay5AhqlbK/v1lSE0gVCnS05VzOkCHDu7FowShNJSr0OW6devYs2dPicft3buXdY4EKUKpSUhIYPfu3Wzbti3Qpgi1jI8/hrffdr5+5hm46KKAmVPjadhQDeodjHvpEHaQpOyuTPp2ELfdprxpmjY1q/1eHg0awJVXqmTfjRsDmoH0eq1JODSR5Wn9lUCSmVlh9ufkwOTJMGeOU2caNEjZ47OIZbUqz7ExY+CJJ5TC4ur5VFx95sKqTEXh5j5nVt+py0MzmwmyWNA87MNsVu87fFi148pbb6m/WVnqSxwxAlatUv/8MuLQ3PLyVMRl8+bQoIG7CGgyKZ3QVXysSsgYWEvZuNH5XMIKBaHWYzBAFS7MLPjIH384n0tYoVAeylW18OKLL2bcuHHMnz+/2OOmT5/O/PnzZTVVEKoR337rnsx9woQKd+YRHBw+DMD+3CZQJwSrVQlTvjrBtm+v0mI99RRs2BCJqV4Y50f8rSrnvfGGSjJeARw75rz31DS46y7lWFSc9uTG3r0qGZtjgWTfPqWM+ZqHCirHK8vNfa4o+VYrm9etK9Z7jtjYoiWXnn8e3nwTVqxQ4tXx4+r7eP99eOAB93KPPlI4AtJkKipWFdb/OnUq9WkEwb/oujM/VnAw9OgRWHsEQRAEvyD5sQR/US6PLEEQaia6Dl9+6Xx9yy1w442Bs8df3HHHHcTHxxMVFUWXLl1YsWJFoE3yjKuQFRJMbGzpUyZFRcErr6jotWdfq0uz6Ay149NP4c8//WvvWVq1gmefVdF2L7+s0kD5JGLZbDB/Ptx8s1PEMhpVLMHhw6XPQ1UZXlkF7nOeH+amTYvdT4MGRdusX1+VePzwQ/dKhnv2wPjxcN99qma1j7jmozeZij+2qntlCbWM/fuVMg7Qvbsq3SoIQq3CZlOpEnbv9m+72rZt9Jw6FU28fAOCCFmCv6gUISs1NZWwsLDKOJUgCH5A01RC9/79VXRT4VQ+1ZUJEyZw4MABMjIymD9/PqNHj+b06dOBNqsoycmk5tfhjD0agoNp06ZszRgMSkwa8J9YZ5lJXcfy/EzW/FD2cDUHuq4mmq4MGgTLl6vfjk/s36/ctubOdZYkatNGxSMeOaLC7IxGlW+q0KMgD1XhhyMPVXVVZdq0gddfV9/JOec4t2/apBTlvXt9aqY0+ehd9T+Z2wsBxzWssF+/wNkhCELA+O47+OwzlWlg7lw/NarrGObMIW7nTgyueRCESuO++1RgwIgREB8faGuE6kypQwsPObKzncVsNhfZ5iA/P58//viD7777jjZlvRMTBCEghIaq0EJNqzk5djt06FDwXNM08vLyOHLkCPWqUuZQXYfkZPbntjybAVOjdWs/tHvjjbByJfq+/Uxd15dVv+UxcnQYEyYUjXBzsG2bxtSpPalfX3NzEALlvfPCCxAWBo895r4vKsoHe+x2Ff/4xhvOXFEGA4wd6xTdXPNQFUIDlYfK24/TNQ+Vtw9Y1enZE5YsgW++UbP4Y8eUd4oPSUJcvbFKG5U5b175zBaEclOvHnTsqFwxJD+WINQ6HIVjHPTu7aeGk5LQNm4kPzycoI0bq26VkxpM+/ZSqVDwD6UWslq2bOl24/DZZ5/x2WefFfseXdcZPXp06a0TBKHSSElR4lVsrHOb0Rg4ewBefPFFduzYwdatW0lNTWXBggWMGzeuyHG5ubk89dRTLFmyhNTUVLp27crzzz/PJZdcUuTY8ePHs2DBAiwWC8OHD6dLly6V8ElKwenTYLGwL7dpgQDjl3WAoCB49FG2v7yGVfmjITiEpUtVlOH06SpSzhVdhzlzDOzcGcecOQb69HEKmseOwcMPq8KCoCLlRowopT0HDiilxeHS1bKlikt0TdBUEXmoqhsGg0pON2QIfPSRErdcxTtdh7VrlQucS4ctjTeWA4dX1qZN/vwA5UeqFtZCLr9cPVJToW7dQFsjCEIls2mTM5K+Sxc/Vct2WeHJjY4mzOG5XZFVjgVBqDBKLWQ1b968QMg6dOgQERERxHlZ7g0JCaFp06Zce+213F1TYpMEoQZy5oxKwaNpakyvKq6+06ZNo3nz5px33nmsXbvW63Hjxo1j6dKlPPDAA7Rr146FCxcyfPhw1qxZQ79CYSlz585l9uzZrF27ll27dnn36AkUyckA7M9rUiDC+MUjC6B7d7q/352nlsNLL6lKdn/8ATfdBC++6J5LPCkJNm7UCA/PZ+PGoIJFy19/hUmTnPpSeHjp83cB6kPdeqty/xk9WmWGDw11P6Zhw6IKmwOrFfP+/bWndnNIiIqvKMymTfDII9CihfLX798fHY3ERBVdGRmpoi1d0bNyMB4/jcUWjFYoeZYjKrMqkZCQQEJCAhkZGURHRwfaHKEyERFLEGolCxc6n48b5yed6ewKjx4dDTYbenQ0miOfpnhlCUK1o9RC1gGXslkGg4FRo0aVWLVQEISqi9kM995boJ8wZYofcxGUkz179tCuXTt+/vlnenipWrV161Y++ugjZsyYwcMPPwzAmDFj6Ny5MxMnTmSTB/cSo9HI4MGDefXVV2nXrh3Dq1I5xtBQGDSI/Z91BE0JO34Tss5y1VXKrXviRDh6VIWT3XOP0pIcE0ZHWFp0dC5ZWWEkJsKhQyqBvMMxpkkTldTdp3LYR4+qBOdBLsPOLbeo/DcdO/r3A9YW7HaVSwvg4EF46CHo3h3r+AdITu7oJSpTh1PZYAmGE9kQF4EK1HRiMnl1hBMEQRCECuW332D7dvW8VSu46CI/NOoab1+vHmRkqMEuPV28siqRL7+EZs3g3HPVQqgglIdSC1muLFiwgLY+3cEIglAVycuDCRPg77/V6wYN4KmnAmuTKw29eeO4sHTpUoxGI3fccUfBtrCwMG699VYee+wxkpOTadasmcf35ufn808pqsBVCh07ok+bzj8/AxmqkF1UlDONlL/o0AHeezubJ8ccZOPpDtjtGnPnwq5dKpJt/XqIjtax2dTflSs1fv5ZefgA9OqlqgmVmA/LboelS5Xg4pr/CpSoJSJW2TEY4Ikn4NVX1cwf4NdfCbltDJ/0HUHqlNuKVkfcvgP9oYfIDckmNDwC7aWXi8RsmM3gRTcWhIrFbldhx61ayU2lINRSXHNjjR3rY/XjknDE20dHO68thasci1dWhZKTo3Kr2u1qMfWDDwJtkVDdKZeQNXbsWH/ZIQhCJWOzqSTdv/6qXkdHq0WpRo0Ca1dp2b59O+3btyeqkKLS82yc3I4dO2jWrBnp6emsWrWKq666irCwMD7//HPWrFnD1KlTPbabm5tLbm5uweuMjAwArFYrVi+qkmO7t/2+kpEBdrsRXYeWLXWsVrvf2nagbdtG5JQpvHzqJO+c9xrv7OxTkG7pk0808vIgNlYnLU3jzBmNrCydEycgIkJn9Gid8ePtGI0lCGxHj2J48UW0n39Wr+fNw3bhhX7J8unv78OfVKptHTrAG2+g/fQThsTEAtfK+hs+o/6W5egjR2L/3/+U4qjrGJ+bAfbdZMSaiMrKghUzsF270E00yMioet+pUEv480915xofr6qZljr5niAI1Zl9+2DdOvW8QQMYOtQPjTq8sSwWSE9Hy8tz5pR0VDkRr6wK588/lYgF7ulQBaGslEvIEgSheqLralXEkXYqPFw5zLRqFVCzykRKSgrxHpJ6ObYdPXoUUFUK582bx/jx49F1nbZt2/LBBx/QrVs3j+1OnTqVZ599tsj27777joiIiGJtWr16dSk/RVEeeAAyMkLIyzPy1Vc5fm0boE5yMuf/8w+arjNy6x1k/WcGi5b34MyZME6fDqNePQuZmTbS0sLIybGhaZCZqdGt227atNnPt98W07iuE5+UROsVKwhyEQOPXngh+//4A5sfveD89X1UBJVtmzZmDPGbN9Pyu+8IdiS6evNNrIsXs+2RR4g8epT/+/57bCEhoGlkBQVh/P57fp41i1SXip7Z2dmVarcgFLBxo/qbkiI3lIJQC3H1xho92k8pMB3eWHXrqtDCkycJCQ93emeJV1alsGuX83lVq7MkVE9EyBKEWsjs2bB8uXoeFKTyHFXX1ZGcnBxCCycJR4UXOvYDREVFsWbNGp/bnTx5MhMmTCh4nZGRQbNmzbj00kuLeH85sFqtrF69mksuucR7Jb3i0HX18OBHX+62PWBIT0dbtgyAB3LXcO3yS7jsMiN5eRoNGpgAHbvdjN0eiq6rfOMWSycuu6yD93vM48eVF9aWLRAWph6NGmF//HHa9ehBO79YXjHfh78IqG1XXgmPP45hyRK0Dz6AvDz0iy7ikuuvxzhuHJrBgL1+fTIzM4moXx9DSgr9fvsN24MPFggHDu9DQah0NmxwPu/bN3B2CIJQ6eTnq8hiUE7E11zjh0Zdc2PFxUFkJLrJhDUrizDHMYW9soQKYedO5/POnQNnh1BzECGrGiClxwV/snixeoC6b33hBfdqddWN8PBwtxBAB5azpdrCy5hNMjQ01KNAFhwcXKI44csxHklLUwmqmjSBIUPgzjv917Yn7r1XueWlpsKaNaS0+4vs7C7ExoLBoKHrdgwGnWbNdIKDDeTkqEqGv/xiKLpoqeuwciXMnKnK3jmUrmuugQcfxFCoOp6/8Ov34WcCZltMjPrfXn89vPkmjBmD8ZdflLdLTAyawQB2O5rBgBYTg7ZxI4ZffilYia5q36eMgbWE06dh9271vH37ovndhIAg/U+oLIKClEdWUpKalpTg/O4bDm+smBjnvCQ0FN21nG9hr6zqurJbhdF1p5BlMkHLlgE1R6gh+CN9nlDBJCQksHv3brZt2xZoU4Rqjq7DkSPO15Mnw+DBgbPHH8THx5OSklJku2Nb48aNK9ukspOcrDLw//tv5ZSNi4pSMYycXbSccprcXJ3CmlNoqEonYTKpRc3ERHW8GytWwLPPKhELVJb6119XycgrSMQSSsBRvaFFC+eKtMmk8oQ4JvTF/lOrBjIG1hJcK8z26xc4OwQ3pP8JlYmmqTUVvxSTdnhjZWeXPA+pBmNhdebECTh1Sj3v1MlPCfyFWo/8jAShBrJtm8bUqT3Zts09/kvT4NFH4ZZbYPz4mpFHt1u3buzZs6dIONSWLVsK9lcbkpPRdZiQ/CAv77yESkmxNHw4nH8+SVldWH/iHGK0NGfYYFY2pmMpkKVyJhVetHRj2DBo3Vo9v/xy+PhjuPDCSvgAQom4rkg78Fa1SRAChWtYoQhZgiCUl6Qk+O47yMyEkyeLF6hcx0IRbf2Oa1ih5McS/IUIWYJQw9B1mDPHwM6dccyZYygybmuaErFuuSUw9vmbkSNHYrPZePvttwu25ebmsmDBAnr16kWzZs0CaF0pOXyYk/l1WWc+nw9/ac+KFZVwTk1Dn/QoiaeuI9cejCnjmPIKQ0c7eYKgnBy0kycA9UMqWLScbXf/bYWEwHPPwSuvKM8sL3nEhErGNT+IyaQuAGFh7sfISrQQaPLznUJqVJQkUBGEWkR2Nvz+u58b1XWVADYtTbn/nD7t9Bj3hmMsnDfPz8YIrone5fIu+AvJkSUINYykJJXHKDw8n40bg/j4Yzj/fDjnnEBbVnrefvttLBZLQeXBFStWcPjwYQDuvfdeoqOj6dWrF6NGjWLy5MmcOHGCtm3bsmjRIg4cOMC7774bSPNLT3Iy+3KbqufBIbRpUzmnTTrZhvX2+sQYT6Ohw/FjUDcWzGZ0zYBmNqsJoClSLVoGZ7H+czNJn+XTZ2QTZ0MdOqiHUHXwlB+kMJIfRAg0v/3mvMm88EKJOxGEWsTnn8OsWdC9Ozz0kJ/mq5s2wbffqmuJpqkKhZGRxb/HMRa6hjkLfkESvQsVgQhZglCDcHW+iI7OJSMjjAcfhHbt4NVX1SShOjF79mwOHTpU8HrZsmUsO1tlb/To0URHRwOwePFinnzySZYsWUJqaipdu3Zl5cqV9O/fPyB2l5nDh9mfe1YYCgmuFCGr4DcTEklc/nHQDRBhgpMnwK6jG41gt6sEB83D4NgxTOkZpOU3IHHiP/S+phFakLHiDRVKT+FqTcXhWrUpMbFSzBOEAiSsUBBqJXl58N576vmvv4Jfao3ousqjkZOjGnQIVI4E77qOIS/PPV+kA6OxZM8todS0awcZGcr5tm7dQFsj1BTKJWS53mAWR0hICLGxsYSEhJTndIIglIDD+SI6WicvTyM7Ww0aJ07AO++o+1NvThlVkZ07dxLlQ4haWFgYM2bMYMaMGZVgVQWSnMy+3F4QFAyaoSDlVEXidNgxoNVroiZ9ublw1KwmdOjqb0Ym7NkL6GpOaMxk/emOJK3Lo8+gslWGFCoYX7yxHEh+ECGQaJoSU3NyKFoSVRCEmso336j0VQADBuCfec++fSpW0bEQFxPjJk5pQJDFguZtXDSZKqfgTi3i0UfV37y8wNoh1CzKJWS1bNnS+0WgEAaDgU6dOnHLLbdwzz33YBC3cUHwK67OFzExcORIOLquxvCcHJg6tXqJWLWOzExIS1OhhWdF/1atKvaUjt9MVpbyuLdoEZCvQ8oxsIVAUBA2ux2j3Q42wI6yzWDE2CiOLHMkifM1eg+U31aVozTeWA4cXlmSH0SobO67TyVv3LtXhQAJglDjsdth0SLn63Hj/NCozQbPPw8tW6rnw4bBbbe5HZJvtbJ53Tr69+9PsCcXMLMZevTwgzFCYcSnRfAn5RKymjdvjqZpHDx4sGBbdHQ0mqaRlpbmdtyxY8f4/fffefDBB/n6669ZtWqViFmC4Eec3lhw5IhGfr4BoxFCQ9VkYfduWeiu0hw+jK7Dv3mNISqYxo0hvIIdnaxWSE5W+oXZfHZjbi6YNdDqKPHKZju7Q1dCliECYmNBN2IyweHDqh2ZnFQxSuON5aCK5gdJTEwkMTERW8FvUaiRBAXBuecG2gpBECqJn34Cxy1k9+5+qma3aJHKLB4cDG3aKFGrcIETqxXz/v0qp6cnIatQFWxBEKom5RKy/v33X2688UYsFgtPPvkkN910U0HOmoyMDN5//32mTJlC7969WbJkCZs3b+b222/nu+++45133uGOO+7wy4cQhNqOw/nCYlGChCMNgNEILVoot+3EROjdWzxnSkul3UQnJ3Msvx7Z9jAIqZxE7yEh8MknLh70ug4Tn4TNmyEuDv3IEWy5uRiNRuV9GxwM/fvD9OnKNx+laYmIVcUo4mpnKbK/OuUHSUhIICEhgYyMjII5hiAIglB90XVYuND52i/eWHv2gKOCtcGgKigXFrGESic7GyIiAm2FUBMpl5D1+uuv88UXX7B9+3Y6FKpUFRUVxd13383AgQPp1q0bc+bM4cEHH+Szzz6jW7duvP/++yJkCYKfSEqCtWvV/aeuq22aptOsmU5oqOZWkEy8skpHpd1E9+7N/ntmwbR4CA+vtIqFDRuqBwCbk+D3TyAuHE7/jW7PxmawYawThda4MVjS1f60EfJDqsp4dLVzIvlBhCpDfr56yM2mINQqfvkF/vhDPW/f3k9TCocX1t9/K2XMLy5eQnlIT4dLLlGRnsOH+0mwFISzlEvIevfddxkwYEAREcuVDh06cPHFFzN//nwefPBBOnfuTPfu3fnDcfUSBKFc6Dq8/rryujIalYNFUBDExOQQFqZKDbsWJBOvrCpKVBT7QjtCjHpZGYne3SicUyk0FE6dwqLrRMTHK9FDfkjVgyKudu5IfhChypCUBBMnwv/9H4wZo/4KglDjKeyN5ZfpRKtWquHPPoNrr/VDg0J52bVLpTfZvx/OnAm0NUJNo1xC1r59++jig9odGxvL+vXrC163atWK3377rTynFgThLElJKqVNgwZqkAgNhWbNdLKy7AXHuBYkE6+sqku3bvC//6mCO8WsD1QMhXMqBQejN2pEfnq68xj5IVUf3FztCiH5QYSqwoYNqozVpk0wYkSgrREEoRLYv19NIQCaNIHBg/3YeHAw3HCDHxsUysPOnc7nnTsHzg6hZlIuISsiIoJtJZTp1nWdn3/+mQiX4NicnBzq1KlTnlMLgoC7E02TJio5eGSk55Utcaap+nTtqh6VTmkq3MkPSRAEf6DrsHGjeh4UBD17BtYeQRAqhVat4M03lfPUwIEqmqDMWCwqH5Yk66yS7NrlfC6RnoK/KVfZwP79+7Nv3z4mTZrkMRGy3W5n8uTJ/PPPPwwYMKBg+/79+2nSpEl5Ti0ItRpdh61bizrRREWp8dwThZ1phCqExQJffAE//wynT1f++UtT4U5+SIIg+IN//4WUFPW8e3fJBiwItQRNU1HEc+b4wRFz1iwYOxb27vWLbYL/sNudQla9etCoUWDtEWoe5fLIeu655/jmm2+YOXMmn376KaNGjaJly5ZomsaBAwf49NNPOXDgAOHh4TzzzDMAHDhwgN27d3PPPff4w/5agZQeF1yxWmHKFFi1CurU8c2JxoE401RRkpNViWiAK66As9fLSqE03lgO5IckCEJ52bDB+bxfv8DZIQhCwCjX9GHTJpUPC+DOO2HlShHEqxAHDzrrzXTuLFNFwf+US8jq1KkTX331FTfddBMHDhxg5syZbvt1XSc+Pp733nuPzmcDYyMjI1mzZg3t27cvz6lrFVJ6XHBgNqu8uFu3qgqFu3ZB8+a+Dw6S4qiKkpzM6fxosu1hNGnSrHyusqWlNN5YDuSHJAhCeREhSxBqFfn5zqJE5SYjQ63qOhg/vtqIWJs3b6Zv374899xzPPHEE4E2p8KQsEKhoimXkAUwYMAA/vnnH5YuXcpPP/3E4cOHAWjSpAn9+/dn1KhRhIeHFxwfFxfnFmYoCIJvnDgB99+vvKd1XUWghYWptAAWi/uxug55eQYslqITBqNRiWDiTFOFOHyY5Wn9STw5itBX45neEfr2rYTzOryxsrJUcjUPPyRDXh7yQxIEwa+YzbBjh3rerJlakREEoUYzd67q9uPGKe3aWyoMn5g+XZXrBjUHqSZVCu12Ow8++CA9akFlYNdE7yJkCRVBuYUsgLCwMEaPHs3o0aP90ZwgCIXYvx/uuUeJWaByYbVoocZwh9uuOxoWSxCaF3HBZILDh1WYouTHrAIkJ7MvtykAuVoYDRpU0nmtVhXWaDJ5/CFpQJDF4vV3JD+k/2fvzMObqrY+/J6kc0vLPBcKFGQGJwqIDCKDKIgCyiTicOEDBJGLIPcyiSgKyCAUFVBArhMiKMNFBC9joRRUFAEFKWUsFOg8p835/tgkTdqkTZukSdv9Pk+eJmdcSc8+e5+11/otSWkg0+vLIZGRQkAFZDSWRFIBSEmBzZshPR2mTxdZgNWqlfBge/fCDz+I95UqwezZZWYybfXq1YSFhZFkWhG6nGKIyNJooEUL19oiKZ84xJElkUicx88/wz//mednqFtXCGR6e0NCguV9dLocDh48SteuXfH09LS4TdWq0vfgNly5QnTW/QBovL1o2LCUzuvlBZs2Wb2QcnQ6jh48WOh1JC8kibOR6fXlEJlWKJFUKAxOLID+/e1wYt25AwsW5H2eNg1nzf698847nDx5kqioKBISEli3bh2jR48usF1WVhazZ89m48aNJCQk0LZtW+bPn0+vXr3ymX6HZcuWERkZyeTJk51is7uQnS3qeQA0aVJmsj4lZQyHObKuXbvGtWvXyMyfmmJC165dHXU6iaRC8OOPMGeOCHgBaNkSli0TvgOAWrUs76fTQXR0Ks2bgzX/g8Q2SiMaRH/5KjHZdUCrpUEjben6hWrVKvRCSo2ORl5IEonEody4If76+sK997rWFolE4lSysuDLL8V7jQaee66EB1JVURjHEM30yCPQt69DbLTEe++9R4MGDWjXrh379++3ut3o0aPZvHkzkydPpmnTpqxfv55+/fqxb98+upg46v/9738zefJkKleu7DSb3QUvL/jpJzh7Vvz/JRJnYLcj6/vvv+eNN97g3LlzhW6nKAo5OTn2nk4iqTCkp8P77+c5sR56SExCyVmN0sXp0SDZ2Vy9CtmqJ3h50bix408hkUgkbsVHH8H16yJvXkZ0SiTlmu3bIT5evO/ZU8jilYht20SRGRAzujNmODWl8Ny5czRt2pQTJ05Y1bSKioriq6++YtGiRUydOhWAUaNG0bp1a6ZNm8aRI0cA+PXXXzl+/Djh4eFOs9fd8POD++93tRWS8oxdjqxdu3YxaNAg9Ho9QUFBNG7cmMDAQEfZJpFUaPz8YOlSUVG4b1944w2hry0pZ1y7xt+ZQh8LTy+aNHGtORKJRFIq1K0rXhKJpNySmwsbN+Z9fv55Ow6WkCBCuvR6mDkTqlSx277CqGUtWt2EzZs3o9VqGTNmjHGZj48PL730Ev/617+4cuUKwcHBHDhwgL/++ot69eoBkJSUhIeHBxcuXGDdunVO+w4SSXnGLkfW22+/jV6vZ+7cubzxxht4yVk1icShtGwJn38uZq/KiI5lkSjHj9NhwQKUGjWgc2dXm+N6rlwhOlsMbPCSjiyJRCKRSCTlg59+gmvXxPuOHYVSQYkZPRoefBAiIsBN5Gp+/fVXmjVrViCQo0OHDgCcPHmS4OBgxowZw9ChQ43rX331VRo1asQbb7xRqvZKJOUJuxxZJ0+epH379syePdtR9lQIsrKyGDduHHv37iUxMZGWLVuydOlSOnXq5GrTJC4kKQm+/hpeftm8JHG5qkquqmhWrqT6qVNoVq6ETp3Kj4eupOTmEu3dUvwOMrVQIpGUZ1RV3vMlknLO8eMKCxZ0oHp1hfXr85Zb0EkvPq1aiZebEBsbS506dQosNyy7fv06AH5+fviZaIP4+voSEBBQqF5WVlYWWSYCU8nJyQDodDp0Bt0REwzLLK2zl+IcOy4OVq3S0KqVSocOaokLGDnz+9iLO9sG7m2fLbbZarddjiytVktzu1zrFZOcnBxCQkI4fPgw9evXZ9OmTfTv35+YmBgCAgJcbZ7EBVy/DhMnwqVLwqE1dWo5HetHRqJERJDj64tHRIQowV7RHbg9enChPVBJxUOrli/HpUQikZiyY4fIM+rSBZ56yg6xHIlE4o6oKqxcqeHUqeq8+aaGO3fEeLZVqxLqJen15rO7bkZGRgbe3t4Flvv4+BjXW2K9qYfPCgsWLODNN98ssPzHH380c4rlZ8+ePUUeu6TYcuzffqvB11+3BODRRy/x2GMxTj+nq3Bn28C97SvMtnRDidMisMuR1bZtW65evWrPISok/v7+ZlFsQ4cOZcqUKfz111/cL1XxKhxnz8Krr+YJYe7ZAy+9lFeZsNygqhAeDllZZAUF4ZOWJj537FhOvXa2odfD7dsACg1DFDwcVktWIpFI3IyICCHwHh0N3bpJR5ZEUs6IjISICAVf3xz++MODhx+GCxdENFaxh3o5OTBmjHB8P/+8WwrF+vr6mkVNGcjMzDSuLykzZsxgypQpxs/JyckEBwfTu3dvi5rUOp2OPXv20KtXLzwdXGm6OMc+f15DUJD4Zw8d2oqHHmrp9HOWNu5sG7i3fbbYZog+LAq7HpkmT57MM888w4kTJ3jggQfsOZTN/PLLL8ydO5fDhw+TmZlJ48aNGTNmDJMmTXLK+VJTU1m0aBHHjh0jKiqKhIQE1q1bx2gr8bFZWVnMnj2bjRs3kpCQQNu2bZk/fz69evWyeo7z588THx9PaGioU76DxH05cgSmTwfDhE3DhrBiRTl0YoEY3Rw6hBoUBLm5qEFBKIcOVfioLI0G9u6FmzfzKkpLJBJJuSMnB44eFe8DA6F1a9faI5FIHIrJfCVBQVmkpfng6SlkMxo1KsEBP/0Ufv9dvGJiYN48R5tsN3Xq1OGaQQTMhNjYWADq2lHQwtvb22K0l6enZ6HOiaLW24Mtxz5zJs9p2b69BntNceb3sRd3tg3c277CbLPVZrtiNQcNGsSsWbPo06cPq1at4vLly/Ycrkh+/PFHOnXqRFxcHLNmzWL58uU88cQTTo0Ku337NvPmzePs2bO0a9euyO1Hjx7NkiVLGDFiBMuXL0er1dKvXz8OHz5scfuMjAxGjhzJjBkzCAoKcrT5Ejfm++9h8uQ8J1a7drBuXTkt4mQ6uvH3F8v8/cXn8HCxvgKjKFC7Ntxzj6stkUgkEifx+++Qlibed+rkltEVEomk5NydryQoSEVRxN9Dh4RmUrGzA8+cgbVrxXuNBkyE0t2J9u3bc+7cuQIRJMeOHTOur0jodPDnn+J9/fpQiASYRGI3dmtkGZg4cSITJ060uq2iKOTk5JT4XMnJyYwaNYrHH3+czZs3o7HxjpiQkMC+fft4+umnLa7/8ssvGTBgAP6Gh+t81KlTh9jYWGrXrs2JEyd48MEHrZ4rKiqKr776ikWLFjF16lQARo0aRevWrZk2bRpHjhwx216n0zFkyBBCQ0OlYH4FQlVhzRpYvTpv2SOPwPz5UG4Lf+aNbiA7WyxTFNHDVeSorGvXYNo0kV7Towf06eNqiyQSicQ5mE7odeniOjskEonDMZ2vrFYNkpPFfGVSUglUJLKyYM4cob0AQm+jZcnS05zN4MGDWbx4MatXrzY++2VlZbFu3TrCwsIIrmDp0+fP5w3z27RxrS2S8o9djiy1GFEUxdnWEl988QU3b97k7bffRqPRkJaWhq+vb5EOrVWrVjFnzhy++eYbnnrqKbN1n3zyCS+//DLh4eGMHz/e4v7e3t7Url3bJhs3b96MVqtlzJgxxmU+Pj689NJL/Otf/+LKlSvGG5per+e5555DURQ2bNiAUoE1gioSOTnwzjuwbVvesmHD4LXX3FrL0j4Mo5vMTEhLQ8nKQqlWTazz94fExIqrlXXpEvz1l3gFB0tHlkQiKb8YHFmKUjEnLiSScoxhvrJyZbhzRyEnx4OgoBLOV65aBRcvivfNmwtHlgtYvXo1mZmZxsqD27dvN2YBTZw4kaCgIMLCwhgyZAgzZswgLi6O0NBQNmzYQExMDJ988olL7HYlf/yR9146siTOxi5Hlt7gKS8F9u7dS2BgINeuXWPgwIGcO3cOf39/nnvuOZYuXWqsDpGf6dOnExUVxbBhw9i5cyc9e/YEYMuWLYwdO5aRI0cybtw4h9j466+/0qxZswICfB06dADg5MmTRkfW2LFjiY2NZffu3XhIdecKQ3Y2nDuX93nKFBg+3HX2lAqRkbBvn0gpuevQ9r1zR0zZVfSorKtXeTv2Rfw1GdwTdz+PudoeicQNCQ8PJzw8nNzcXFebIikp168LgXcQTzcy30QiKTeYRmN5eIgCNrm5PmRlKTRsWMz5yl9+gS++EO+9vIQulouek1asWGEmm7Nlyxa2bNkCwMiRI42SMJ999hmzZs0y00fesWMHXbt2dYgdZakPNHVkSRlEibMpMzEg58+fJycnhyeffJI+ffrw7bff8uKLL/LRRx/xwgsvWN3Pw8ODr7/+mo4dOzJw4ECOHTvGTz/9xPDhw3nsscdYt26dw6KhYmNjqVOnToHlhmUGj/6lS5dYu3YtUVFRVK9enYCAAAICAjh06JBD7JC4L35+sHw5hITAggUVwImlqiI8/NatvBBxDw8yq1TJ26YCa2VlX7zG94nd+E/8Y2w81szV5kgkbsmECRM4c+YMx48fd7UpkpISEZH3/qGHXGeHRCJxOIZoLI0GbtzIWx4QUHC+slDS02Hu3Lyx4Pjx0Lixk6wumlOnTqGqqsVXSEiIcTsfHx8WLVpEbGwsmZmZREVF0ceBEfZlqQ88dUr89fKCpk1da4uk/FNmQoFSU1NJT0/n//7v//jggw8AePrpp8nOzubjjz9m3rx5NLXSYnx8fNi2bRs9evSgX79+ZGdnExYWxqZNmxwaDZWRkWGxuoQhWizjrqp3w4YNi5VqWZY88ZKCqKr5DFS1avDVVy6bYCpdtm6FAwfE6EZRwMcHtX599AbBX3D7qCxntr/LZ9PQIy6Oxi0L3jskEomkXGDqyJL6WBJJucEQjZWYKIS+DePdSpWyqVbNB1BsV5FYskREbwLce28FmO0tXyQmwpUr4n3z5thdrVAiKYpiRWRdvnyZy5cvGx/oDJ9tfdmDr68vAMOGDTNbPvzuTe6ooaSzFQIDA1m8eDHx8fGkpqaybNky4zEdha+vL1lZWQWWZ2ZmGteXhLLkiZeY89tvMG4cpKaaL68QTixVhS1bwNtbOLICA0UomqUv78ZRWc5sf9F/341SUzQ0buXn8ONLJBKJy8nNFRULAapXh2Yy+lQiKS8cPQr//a8YwhkcVNWrQ1BQtnEbm6Ky0tPh5Enx3s9PRGaVW/HY8olGI6qx9+wp5yskpUOxHqdDQkLQaDScOXOGZs2aERISYnNanr1VC+vWrcvp06epVauW2fKaNWsCojphYURHRzNixAiaN29ORkYGgwYNIiIiwmIqYEmpU6cO165dK7A8NjYWEN9BUnH43/9g5kyhizVtmkgprFCzE4ZY87p1RVqhQTvOkqMq/yinVatSNdUl6PVcuHZX28/LkyahFUzoXiKRVAy0WvGke+KEKGVW0Yp6uBFjxoxh+/btpKWl0bBhQ9555x369+/varMkZZTcXJgwAVJS8sa3tWpBlSoqSUnm2xYZleXnB59/LjZo3Bjq1SuNryBxIIGBMHKkq62QVCSK5chq0KABiqLgefduZfhcGtx///3s2bOHa9eucc899xiXG3SnatSoYXXf2NhYevXqhaenJ3v27CEjI4MuXbrQu3dvDhw4QNWqVR1iY/v27dm3bx/Jyclmgu/Hjh0zrpdUDL7+GhYvNvfZ6HQVwJH1559w+TL06pWn/Fm9um0PLqajnPBwp5vqcm7e5ELGXUe6lxdNmrjWHIlEInEaPj5yit4NmDJlCitWrMDb25vjx4/z6KOPEh0dTTVDJWGJxEZyc+Ef/4AzZ4SvWlGgTh0xJ2nLfKVFFQlvb1EFSSKRSGygWI6smJiYQj87k2eeeYZ3332XTz75hEceecS4fO3atXh4eNC9e3eL+yUkJNCnTx9SU1M5fPgw9evXB2D37t10796dxx9/nL179+Lv72+3jYMHD2bx4sWsXr2aqVOnApCVlcW6desICwszViyUlH2OH1dYsKADNWoodO6ct1yvh5Ur4bPP8pY9/riIzCr3Tqy9e4Wwu14v1D4NdZhtdXabjnIqQhrtlStEZ4kZR28/LTJgUyKRSCTOpHnz5sb3iqKQnZ3NtWvXpCNLUmxSUmDPHjHk8/QUwfd3i/hZxWatLIlEIrGBMqPUc++99/Liiy/y6aefkpOTQ7du3di/fz/ffPMNM2bMsJq2t2rVKq5cucL+/fvNxODbt2/Pjh076N27Nxs2bGD8+PFWz71y5UoSExON0V/bt2/n6tWrAEycONFYfjUsLIwhQ4YwY8YM4uLiCA0NZcOGDcTExPDJJ5846qeQuBhVhZUrNZw6VZ2VKzV06iQ64+xskdL/44952774otDIKtedtV4Pa9fC6tXis6qKkoxpaaJkzV2NOCOqiiY7WyzP/8NotWK/NWtKx3YXknXxOlezxT2pUXCOlIKQSCQSiRnvvPMOJ0+eJCoqioSEBNatW8fo0aMLbJeVlcXs2bPZuHEjCQkJtG3blvnz59OrV68C244fP55169aRmZlJv379aNOmTSl8E0l54+xZ8dfHRzixKlUqep8CUVlN4uCtt2DqVGjY0Kn2SpxLQoIQem/eXFQslEhKgzLjyAL46KOPaNCgAevWrWPr1q00bNiQpUuXMnnyZKv7TJ8+nYEDB9LKguZOly5diIyMLLITX7x4MZcuXTJ+3rJlC1u2bAFg5MiRRkcWwGeffcasWbPMBhM7duyga9euxfy2EnclMhIiIhR8fXOIiPAgMhJatxb98M8/i200GnjjDXj6adfa6nQyM4X3bu/evGWPPSYqFaanF1S5BxTAIzPTelqyvz9Y0JpzFFlZWYwbN469e/eSmJhIy5YtWbp0KZ1KuVpiTLX70dfyg2wdjVuXqVuxRCKp4FiLSi7At9+KCNsuXaB7dzG5IbGZ9957jwYNGtCuXTv2799vdbvRo0ezefNmJk+eTNOmTVm/fj39+vVj3759dMmX0rlq1SpWrFjB/v37+eOPP0pNIkRSfjBUKszIgOBgMeY1nbNUVcjO1hQ6XxkertKxynyUo0dFdcJFiyj8ZlJxKQvV6w8dgnnzRD2nf/8bpPSepDRw2NPTtWvXuHbtmrFCnyXsdeZ4enoyZ84c5syZY/M+Hh4eFp1YBtq2bVvkMYqTQunj48OiRYtYtGiRzftIyg6GzjsrC4KCskhL82HpUrH84kWxjbe3CEgq977LuDihZfDnn+KzosCkSULpMS5OTM9YIEen4+jBg3Tt2tWot1cAT08IDXWK2Tk5OYSEhBhTjTdt2kT//v2JiYkhoBQfsi5kB8Ndeb4mHUrttBKJRGIX1qKSLfLjj2KGZ+9eUcVWOrKKxblz52jatCknTpzgwQcftLhNVFQUX331FYsWLTLKWowaNYrWrVszbdo0jhw5UmAfrVZLz549WbZsGU2bNqVfv35O/R6Ssk9ysgi8f+UV4bi6ckXMO6alWdpaITPTw6qT1N8frv56G513FF4aRDhX69bONL9MM2HCBCZMmEBycrJZ8IQ78ccf4m9ODlIqQ1Jq2O3I+v7773njjTc4d+5codvZW7VQInEHDIX4goJUcnPF32PHFLp1E+urVIFlyypA0b0//oB//hPu3BGf/fzg7bfh4YfF51q1xMsSOh2p0dEi/tiaIys52fE238Xf35/Zs2cbPw8dOpQpU6bw119/cf/99zvtvPlp1AhGjYK//64A14tEIik3WIpKthjQmpoKJ0+K9/Xri9ANSbHIX6nbEps3b0ar1TJmzBjjMh8fH1566SX+9a9/ceXKFasarTk5Ofz9998Os1dSPomPF9UJz58X9Xzefx82bbI6X4lOl8PBg0etT1jeuEHV6f/AK/fuc+Hs2XmVrSVlEoMjS6OBFi1ca4uk4mCXI2vXrl0MGjQIvV5PUFAQjRs3NqvWJ5GUJ0yjsapVE74Wf39IShIzEP37C02s8jxW/+WXX5j7yiscjooiU6+nsbc3Y5o0YdLXX+OMsnupqam8//77HDt2rEiNECieToiB8+fPEx8fT6iTIsCs0aKF7OwlEknZwlJUslXh5mPHRGkzEKmFMoXNKfz66680a9aswPi7QwcR6nvy5EmCg4NJSkpi586dDBgwAB8fH7Zu3cq+fftYsGCBK8yWlBHi4uD//k84sEAE4d+8KXzThcxXEh2danm+Uq+HRf+G3Fjx+emnZUphGSc9XUzKgkim8PNzrT2SioNdjqy3334bvV7P3LlzeeONN/CS6m6ScowhGqtyZdFJQ55wZUQETJxYvp1YP/74I/379+fe5s2ZVb8+AarKhSpVuNq1q1OcWAB37txh3rx5NmmEQPF0QgAyMjIYOXIkM2bMKN1w7fR0uHBBjASLU9lRIpFIXIilqORDhxTLUVmHD+e9t3D/lTiG2NhY6tSpU2C5YZmhUJGiKKxZs4bx48ejqiqhoaF88cUXtG/f3uJxs7KyyMrKMn5OvhsprdPp0BkGQfkwLLe23h6ceWx3OqetlIZtV6/CxIlaYu/6nGrVghUrcqlVK28cXFzblI0b0RgiNevUIXfChMIP5gSK+u3c8f/tzpw9K/yTIDNEJaWLXY6skydP0r59e7M0HYmkPGKYhc7MFDPRqakK1aoJ50NFKCecnJzMqFGjePzxx9m8eTMag+7JtGnW0wOBhIQE9u3bx9NWVO+//PJLBgwYgL+/v8X1tWvXJjY2ltq1axeqEQLF1wnR6XQMGTKE0NDQ0r+H/fknGNJARo0S2mISiUTixhQWlVyg/9PrxQwPiLJm993nMrvLOxkZGXh7exdY7uPjY1wPEBgYyL59+2w+7oIFC3jzzTcLLP/xxx/xKyLkYs+ePTafp7g489judE5bcZZtN2/68fHHbUlKEtdWtWoZDBnyG3/8kWVMIyuubf6xsdy3bBmanBxUReG3kSNJKmKC0plY++3S09NL2ZKyjen1IB1ZktLELkeWVqulefPmjrJFInFbIiPhp5/Mi/DFx/tSvbqFcsKlW/zO+dy6xRfffMPNmzd5++230Wg0pD38ML69e6PRaArdddWqVcyZM4dvvvmGp556ymzdunXrGDt2LOHh4YwfP97i/t7e3janKxdHJ0Sv1/Pcc8+hKAobNmwo9apNKediSdVVo7bHHZTatUv13BKJRFISTKOSDbdMq/3fn38KYR2ADh1kPXYn4uvraxY5ZcBQfMnX17dEx50xYwZTpkwxfk5OTiY4OJjevXtb7Zd1Oh179uyhV69e1ou5lBBnHtudzmkrzrTtr7/ggw+0AAQFCU3PFSv8qV69Z8lt0+nQvvii8H4D6ogRPPTKKw6121aK+u2SnajTWh45dSrvfZs2rrNDUvGwy5HVtm1brl696ihbJBK3RKeDV1+F27dF8JGiCDHDwMBsDE2o3EZlnTwJU6ey9/ZtAgMDuXbtGgMHDuTcuXP4+/vz3HPPsXTpUuPMb36mT59OVFQUw4YNY+fOnfTsKQZBR48eZfHixYwcOZJx48Y5xFRbdUIAxo4dS2xsLLt378bDw2HFW23mwH6VuX8vxU+TyRuXfZD1oiQSiTtjGo1Vvbr5Oov9n0wrLDXq1KnDtWvXCiyPvZsPVreEJcS8vb0tRnp5enoW6TixZZuS4sxju9M5bcXRtv3+uwgST00Vbbl5c1i5EipXLnziskjbjh8XkgqKAo0bwyuvoHXxb2rtt3PX/7U7oqp5jqyAAGjY0LX2SCoWxb8rmTB58mQiIiI4ceKEo+yRSNyKy5dhwAD47TfQakX/6+cHjRqp+PrmVeHMPytdLti2TSh8JiZy/tw5crKyePLJJ+nTpw/ffvstL774Ih999BEvvPCC1UN4eHjw9ddf07FjRwYOHMixY8f43//+x5IlS+jTpw/r1q1zWDSUrTohly5dYu3atURFRVG9enUCAgIICAjg0KFDDrHDFqL/FmIC6XofKjepVmrnlUgkkpJgKRrLgMX+z5BWCPDQQ6VkZcWkffv2nDt3rkAUybFjx4zrJRJb+c9/8rIP2rWDjz4S7bs4KMeP02HBApTjx/MWdu4MH38MDRrAvHkySrMYhIeH07Jly0LlNVzFzZt5BcxbtRIT/RJJaWHX5TZo0CBmzZpFnz59WLVqFZcNJS0kkjKOqgo/zvDhEBUl5D40GqhZU8w2WJqs8fcXs9Xh4WL/MoteD0uXioFGjnDWpXp6kp6VxahRo/jggw94+umn+eCDDxg7dixfffUV58+ft3o4Hx8ftm3bRrNmzejXrx+DBw+mWbNmfPnllw6NhrJVJ6Rhw4aoqkpGRgapqanG18MPP2zxuM4YQPx9xWCnQmiYdGRJJBL3xTQay4qcoXn/F58AZ86IFU2bWi9tJnEIgwcPJjc3l9WrVxuXZWVlsW7dOsLCwoyRyBKJLcybJyTtwsJEJFZAQDEPoKpoVq6k+qlTaFauNB8Q33cfbN4swrwkNjNhwgTOnDnDcVPHoJtw65aoWwQyrVBS+titkWVg4sSJTJw40eq2iqKQk5Njdb1E4k68/z589ZWYlUpNFVq1DRuKv9YoF1pZqanwr3+BqTD6M8/g+9lncPo0w4YNM9t8+PDhfPzxxxw9epSmTZtaPWxgYCCLFy/mkUceAeCll14qsW6HNZylEzJhwgQmTJhAcnKyYyobqirRtysB4O+bS406pZ/aKJGUJcLDwwkPDyc3N9fVplRIIiNh/35RbDUry3I/aNb//VmZTl99JaKybNQ4lFhm9erVZGZmGiOKt2/fbpT0mDhxIkFBQYSFhTFkyBBmzJhBXFwcoaGhbNiwgZiYGD755BNXmi8pg/j4wLJl4OFRwqCpyEiUiAhyfH3xiIgoOCCWITvlijZt4LvvICGhjE/iS8okdj1BqcW4YouzrUTiah55RDiybt0SHXloqEgtLIoyrZV15QpMmQIXL4rPWq2oSjhoEHX37uX06dPUyjezXrNmTUBUJyyM6OhoRowYQfPmzcnIyOC9995j0KBBNGjQwGHmO0snxNGkX0vgRmZlAJrUTClb14hE4gIc7kyW2IyqwpIlQiNSo4FLl0RmkCVnlrH/W6XQcWMTlCZNSt3e8saKFSvMsh22bNnCli1bABg5cqSxPXz22WfMmjWLjRs3kpCQQNu2bdmxYwddu3a12wbpSC7f/PgjtG8vMg4MFFGU0jom4ZtZQUH4JCbC66/DgQO2DaIlZZYqVVxtgaQiYpdbXK/XF+slkZQV7rsP+vYVM1L16tne/5ZZrawTJ+D55/OcWIGBYjAyaBAA999/P0ABR5FhlrhGjRpWDx0bG2usDLNnzx7++9//kpmZSb9+/Yg3VLVyAGVFJyT62C3j+8bBMkpVIpG4L/v2wX//K94rikirtxalUWb7Pzfm1KlTqKpq8RUSEmLczsfHh0WLFhEbG0tmZiZRUVH06dPHITa4c1qTxD6+/loE4f/f/+UVGbWLu2J6alCQcGplZgqR90GDICXFASeQSCSSPGR8p6TCc+mSSCU09bWqKsTEQG6ucGJlZhZ8ZWdrLC7XaiEtrQxpZamqEEIwOIAaNYLPPoMHHjBu8swzzwAUSFNYu3YtHh4edO/e3eKhExIS6NOnD6mpqezdu5f69evTtGlT5syZw9WrV3n88cdJS0tzyNcoKzoh0b/lDeaaNJW3YIlE4p5kZYkH3OxsEY3l4QHBwYVP7JQbrUiJpJzz6aewaJF4f/lynsO6xOQT0/NOTBTL9Ho4fdqOMC+JRCKxjBRnKQPIsG7noKoir/v994UDqlYtGDlSrNPpRKadv39e9RZzFDIzPaxW3PP3h6tXxXHcvjCLosDChfDcc0KA8+23C6h73nvvvbz44ot8+umn5OTk0K1bN/bv388333zDjBkzrKbtrVq1iitXrrB//34zDa3GjRvz3Xff0a9fPzZs2MD48eOtmrdy5UoSExML1QgByoxOyN9/571v0kYO7CQSifuh18NLL4mJHq1WOLEaNLBc6MQUJTGeyigc+p8/kUc86PSQdNZLJO6EqsKqVbBuXd6yl1+GESPsPLBJaVMlLQ3P9HRx8/D0hIwMUTmpTIrHSqyxeTNs2QKtW8OwYWIeXCIpTRzmyLp27RrXrl0zCitbwhG5+hURqQ/ieJKSYP58kTZhYMcOcSPWaoXzadMmIV5oCZ0uh4MHj9K1a1c8rYzsq1YtA04sAzVrium5OnWsCnF+9NFHNGjQgHXr1rF161YaNmzI0qVLmTx5stXDTp8+nYEDB9KqVasC6x566CEiIyNpU0SZk8WLF3Pp0iXjZ2saIeBcnRBHEV31AWiuh2wdjfvK1EKJROJeqCq8+y7s2iWikr28RBdhyBQybGOISjabz7mdijY7mzSdQvhyXzp29pY6gBKJm6DXi8nbr7/OWzZpEowaZeeBTaOxAgJEiJeBunXFjHCZFI+VFMZvv8G5c+I1cKCrrZFUROx2ZH3//fe88cYbnDt3rtDtZNVCibsQFQVz5gghdwNPPw2vvWaeMlGrlvWq4TodREen0rx50TPUrkA5fpwOCxag1KgBnTubr4yJEamEb75pXku9Xr1Cj+np6cmcOXOYM2eOzXZ4eHhYdGIZaNu2bZHHiImJsfl8Bp2QRYZ4eTfk+nVA0RBYw5tqDbxdbY5EIpGYsWaNmGnPzhZ9YmCg6PN0OtOtLEQl5+ZApifgib9PDlfjvMtGVLJEUgHQ6+Gtt2D79rxl06fDkCEOOLghGsvHRzixDFodAQFCOM/Do4yX9JZY4tQp8dfLCwopXC6ROA27HFm7du1i0KBB6PV6goKCaNy4MYGy1LLETcnOhg8/hI0b85YFBcHs2dCtm+vscjiqimblSqqfOoVm5UoxaDA8bBw9CjNmiNmxmTPF1JwshVyqbN4snFm3b8uJSYlE4l789ResXi26hcaN4ZVX4OGHC25nMSp590+w5q5G4bNDqTp2iHRiSSRugE4nxrp79ojPGo34/MQTDji4IRorJUVEZN0l18sLTd26KFDGS3pLLJGQICRUALed1JeUf+xyZL399tvo9Xrmzp3LG2+8gZccsUjclIsXhd/mr7/yloWFwdy5UEjBvbJJZCRKRAQ5vr54RESIGbCOHeGrr2Dp0ryZsthYkWMpa+aWKhoN1K8vXhKJROJO3HOPeMCdPx/eeEOk21vCYlTyxz+Az9008CFtoGapmCyRSIpg1648J5aHh5BC7dnTQQePjISffgKDJhZAQAAZAQF4GiZK85c0lVFZxcIdtZJPn857X4RCiETiNOxyZJ08eZL27dsze/ZsR9kjkTiFr77Kc2J5eopZ5mHDymEwkolOQVZQED5pabBiBezdC99/n7ddt24ixlxWkSldoqNh/XpR+qtTJ6GQKZFIJG7EgAHiwaRYwr1ZWSJvH6B6deERk5QL3PEhWlI8+veHP/4QWrCLFxdUnCgxhjGnqoo0wowMCApCrVNHTJSaIqOySow7aiUb0gpBOrIkrsMuR5ZWq6V58+aOskUicRqvvirG2IaZqGbNXG2Rk7irU6AGBUFuLmqlSijbt8Mvv+RVInzxRVFTvdx58coA58/n1bj29paOLIlE4nIyM4W0jSnFrj718895aUWdO8uH1HKEOz5ES4qHoogIy2efhSZNHHhgk0qF+PkJR1XVqsKxZckIGZVVbjB1ZMmhrMRV2PUk27ZtW64aEmQlEjcif7VBPz/44AOhj1VunVimVWP8/dHodChxceIp5dYtEYo2fz6MHy+dWC5i1cZKrIh7lv8mPYS+rswtlEgkruXSJVFtatcuOw90+HDe+y5d7DyYRCKxh4QEOHvWfJlG40AnVm6uGFeajDnRaIQTqzD8/cX2higuiUM5flxhwYIOHD/u3IkEvT4vtbB6deuFsSQSZ2PX0+zkyZOJiIjgxIkTjrJHIrGL7GwhA/XUU3erw5kQHFxw1rlcYTozptPhGxcHOTlCsyA9XeRT9u3raisrNN8drcWGO4+z9OZwNA2DXW2ORCKpwMTFwYQJovDErFkiA71EqGqeI8vDQwhQSiQSp2LNaREXB//4B4wbZ64L6zCys2HaNBg0CA4cEGNOWyMw80dlSRyGqsLKlRpOnarOypUap/oJY2IgLU28b9NGBuBKXIddjqxBgwYxa9Ys+vTpw6pVq7h8+bKj7JJIik10NIweDZ9/nleUr8JIOuSLxsLLi1yD187PDypVElPucgbMZSQlQXySEEJt7H1Nqr1LJBKXkZws5jZu3BCfmzUTsjUl4tKlvJmje+8VfZBEInEa1pwW16/Dyy8LR0NqKsyb5+BhX2qquHHs3y9Ccm7dEpOlmZkFXprsbIvL0WqFF0RGZTmUyEiIiFDw9c0hIkJxqp/wjz/y3su0QokrsVsjy8DEiROZOHGi1W0VRSEnJ8ee00kkFlFV2LxZRGJlZ4tlnp7w6KMVaJbANBpLUUBVyaxaFW+dTpRlTEuTugQu5sIFjBdok6rx4OvrWoMkEkmFJDMTJk8Wkz8A9eqJmiAGGcViU7MmvPMOREQIR5ZEInEq5k4LDyIjoU4doRwRFye2qVcPFi1y4Dg4Ph4mThRhXqoqIv6rVBHOrXwogEdmJoq1k/v7w9WrovyprHhvN6Zz2UFBWaSl+ThVU/+RR8Rt/9Qp+UghcS12ObLUYnjSi7OtRGIr8fGi+N6hQ3nLGjcWgu5Nm7rOrlIlK0sMLlJSRLK6AUVBrVFDDCRktRiXc+FMFuQKZ36TYJ2LrZFIJBWRnBwh+vz77+Jz1aqiW6hWzY6D+vlB797iJZFInIolp8WCBUJBIjFRbNOoEaxaJeYxHUJsrMhDNmTeVKkC27ZZFUfK0ek4evAgXbt2xdPT0/Ixq1aVTiwHYZjLDgpSyc0Vfw8dUpw2dx0QIB4lShzFK5E4CLscWXq93lF2SCQFMOT/16ihWCwVfOQIzJ0rnFkGnnlGVCj09i41M13L6dPCiXXqlNAmyc0Vf/Mjq8WUGEeVHo8+mWx837iptpAtJRKJxPHo9SLVyCBn5ecnIrFklrNEUnbI77Tw8VH58UeFkBDhYLjnHli5UviaHEJ0tEgnNIR61awpPGmFlTbV6UiNjobmzUWKhMRpmDo2q1UTaeP+/kLOQs5dS8o7snSZxC0pSrRwzRqYNCnPiVW1KixfLvQnK4QTKztbjFRGj4bffhNPKAAZGdb3kdViSsSECRM4c+YMx48ft+s4F85mG983aS3TCiUSSenywQfw3/+K956eIh3/nntca5PE/QkPD6dly5Y8+OCDrjalwpNfDjUrS8utW4qxiGCbNvDRRw50Yv3xhxDdMjixGjaETz8t3IklKVXyK4vo9cJrJTX1JRUB6ciSuCVFiRa2apX3vnNn+OoreOih0rXRZZw5AyNHwvr1Ip0wNVXoLTVuLETdrSGrxbgMVYULF8XttrpHIoHNarvYIolEUpG4dg2++Ua812hE+v3999t/XGX7dvjpJ4s6OZLygaMmcyT2Y+q0yM2F27fFpJhWK+Y3R40qfBhYLK5cEaUPk+9Gk7doAWvXQm05fnEX8js2AeLjfbh4UUFVnTN3ffSowtat8PffeXPoEomrsCu1sLhVChs0aGDP6SQVBFtECzt3Fh12rVoinbBChM1mZ4tBxPr1ovdQVVE33ccHmjQRTydFIbWyXEJCAiTpK0FllcaVb8nZTIlEUqrUqwcffihE3l95RYj12oNy/Dgd3nkHjaqK/iggAPbutZzaLpFI7MZ0bGyQQw0KyiIlxY/AQDEE/OQT6N7dQUO7+vXhiSeEB/yBB+D992VFUhdhTeIifzRWaqpCZqYWrVZo6Xt6wr59jlUU2bpVMeoSb94MISGOOa5EUhLsGnGEhIRYr0iRD1m1UGIr+fP/AwJUfvhB4ehRzLSyJk1ynY2lzqVLIm/ywoW8ZdWqidrp1avb5sQCqZXlIi5cQDzoBQTQZFhdCHW1RRKJpKLRti1s3QpBQXYeSFXRrFxJ9d9/R/H1FY75++6TTiyJxIlYKE5NQICOoCAxvHB4cWpFgddfhwYNYNAgKczuQiZMmMCECRNITk4m6O4N3JJjU6tV8fLKJTdX6LBmZ4vrYswYMc9gRZvfZlQVTp8Wz/0BAeLSkEhciV2phQ0aNLD4ql+/PlqtFlVVUVWVBg0aEBwc7CibyzxZWVm8+OKLNGjQgMDAQDp27MjRo0ddbZZbkD9MNjNTy82bCklJotJShZV2qlQJ7twR77VaGDtW9FxZWeJzZmaBlyY72+JytFrRs0mtrFKjWjUYPhzCwoSGhUQikTgbg6yNKXY7sQAiI1EiIlANfUlaWgXK7ZdISp+0NFE08M6dgkFRAQEqhuLUdqWSqSpcv26+TKOBYcOkE8sNye/YBJGgUbNmBvXqiWgsRRFD/nPnoFcvEZWbnl7ycyYkeBsfRVq3tn0OXSJxFnZdgjExMVy8eLHA69KlS6Snp7Nt2zYaNmxIjx49uHjxoqNsLvPk5OQQEhLC4cOHSUxMZPLkyfTv359UqTFhEo0FcXEKt2/7oteLG/GJE3nVliocVasKT16zZrBxIzz/vBA98fcX2iQpKWYvJTUVj8xMFAvrSE0V+129Cjqdq79ZhaBxY5gyRQwwZYV6iUTibP7+G559Vgi8O3S+wmS2SRXKwkJl2lJpYYlEYjdHjohxw+nTkJMjnFqWsEsGVa8X1R+GDoU//7TXZImTsaSNZUqlSipNmogILE9P8e+9dk2okwwcKNINS8KlS4HG93JSVuIOOC0O3MPDgyeeeILg4GA6dOhAx44dGTNmjLNOV6bw9/dn9uzZxs9Dhw5lypQp/PXXX9zvCPXVMoqoVCh0JePjzUUEAwNFJ/3xx9ClSzmXdtLphLPqqafMS888+ij06CG8egCbNgnxJQvk6HQcPXiQrl274mmt9HHVqnKWrbTIzhYjUD8/V1sikbiMMWPGsH37dtLS0mjYsCHvvPMO/fv3d7VZ5Y7r14UOVkoKfPaZCN4dPtxBB78726RWqoQ2MTEvIjgmBurUcdBJJBJJYqKQpfrvf0Xz0uvFkK0wlZYSyaDm5MC8eXklTSdNgi1bRO6YxC05frxgNFZ+FEUM84OChBMrKUk4QRWl5EPRy5fzHFmtW5fsGBKJI3F6UGC7du144IEH+Oijjxx63LfffhtFUWjt5JaUmprKnDlz6Nu3L1WrVkVRFNavX291+6ysLKZPn07dunXx9fUlLCyMPXv2FHqO8+fPEx8fT2hoxRbO+eor+P57yMgwd2LVqiXysKtUqQAF986dE9FWq1bBe+8VXG9wYoH4YZo3t/pKrV+/0PXUrFl636uic/QodO0qplW/+87V1kgkLmHKlCnExMSQnJzMp59+ysiRI7ljyFOQOIT4eOHEun1bfG7VSszAOwSTMADF0ElrNKJfkqnqEolDUFXYtQsGDxZ/09JEIH1AgKjrU7my9X2LHZWVlSV0sAxOLI1G3ECkE8utWbPGejRWfrRaCA4WDi2NRuinhYWZb2OLhPXx4wo7djQ2RgRKR5bEHSiV7NZ69epx7tw5hx3v6tWrvPPOO/iXQvWM27dvM2/ePM6ePUu7du2K3H706NEsWbKEESNGsHz5crRaLf369eOwlZy4jIwMRo4cyYwZM4wCfhUNVYV33xV9Z1ZWXs51pUpQu3YaVaqIwbHd+f/uTE4OrF4Nzz0nnFkgYn+jo11rl8RusrIg7o84cc3Gx4O3t6tNkkhcQvPmzfG+e/0rikJ2djbXrl1zsVXlh/R0ePVVMBSUDgmB5csdGAhqKspieJoxTPuX+1kmicT5xMaKNjxrloisUlUReO/nJyQKbAmit3msnJIiBt6GEnSenmICdcAAR3wViRM5cqTwaKz8GG7TaWkwZIj5OlUV+mvz5okscUuoKnzwgYY7d3yIi1OMjjGJxNU43ZGlqiq///679fSmEjB16lQ6duzIAw88UOS2CQkJbNmyxer6L7/8kjRrCedAnTp1iI2N5dKlSyxatKjQc0VFRfHVV1+xYMECFi1axJgxY/jf//5Hw4YNmTZtWoHtdTodQ4YMITQ01CzVsKKhKHDliphx0mrFc36DBlCvnoqHh2q2XYnz/92Zc+dg1CjhyDKU1g0NFTkhjRu71jaJ3fz6K/Rb0IUe5z7mq/jeYmpMIikDvPPOOzZFIxcnEnn8+PH4+vry4IMP8sgjj9BGCm04hOxsmDoVzp4Vn2vWFKn6hUVvFAtTUZbkZPEQDGLmqVq1cjzLJJGUDpcvwzPPCCeFgRYthPOqVq3iOS2KHCvfuSOKBv36q/js5wcrVgj5Conbk5Zmtc4T2dmaQus8rV5tfps+cAB+/hm2bROKJqtXi8wYUyIj4eBBBUVRSUszVz2RSFyJUx1Zt2/fZty4cZw/f56OHTs65JgHDx5k8+bNLFu2zKbtV61axTPPPMPWrVsLrPvkk08YPnw4GzZssLq/t7c3tWvXtulcmzdvRqvVmmmB+fj48NJLL3H06FGuXLliXK7X63nuuedQFIUNGzaglGvRp4KY3kRVVaRBKIqQ2Gjc2Hq4bLmKysrJEfHBplFYGg28/LLQyLrnHtfaJ3EI0dFAdjapel/8NJlQv76rTZJIbOK9996zKRq5OJHIq1atIjU1lb1799K7d+8K1/c5A71eRHBERYnPgYHCiWXj0MU2TKOxTKNKq1Urx7NMEoDw8HBatmzJgw8+6GpTyjXBwWC41daoAYa584yMkjstLI6Vr1+Hl17KG3dWriwEaG0IDpC4B1bqPJGaqpCZ6UFqqmJznSdD2iqIa2f1auHQ2rZN9C2GOYz0dNBqVfR6OH++HDyDScoFdom9Ny4kWiQlJYX4+HhUVcXLy4s333zTnlMBkJuby8SJE3n55ZdtnsWdPn06UVFRDBs2jJ07d9KzZ08AtmzZwtixYxk5ciTjxo2z2zaAX3/9lWbNmhEYGGi2vEOHDgCcPHmS4LvRGGPHjiU2Npbdu3fj4eE0zX234/ZtUUWpefM88dnISPEKCSk6LT//eLlTJ2db7CT+/hvmzIG//spb1qQJzJ0rpuAk5YYLFxDhEkCTKvEyHltSZjh37hxNmzblxIkTVh9iDZHIixYtYurUqQCMGjWK1q1bM23aNI6YhhfcRavV0rNnT5YtW0bTpk3p16+fU79HeUZVRTbQTz+Jz97esGyZg4N5TaOxqlcXHXVGBpkaDX7VqqFACVWmJWWBCRMmMGHCBJKTkyusBIYzyM01lz1VFPjXv+A//4Hx40Uk1ty5eU6LgginhbXJAFOnhTElMTNTTJbGxYnPtWuLNtuwoQO/mcTZbN1q+XlJp8vh4MGjhRZ6yl/n6YknRBGtNWtg82ZxXd6+LVINv/xS1Jk6dEhMkEAOOp2WCxfK+DOYpNxglwclJiam0PVeXl507dqV+fPnG5059vDRRx9x6dIl9u7da/M+Hh4efP311/Tt25eBAweyd+9eUlNTGT58OI899hjr1q1z2IxwbGwsdSxU7TEsu379OgCXLl1i7dq1+Pj4UL16deN2u3bt4uGHH3aILe5GTg58/bWY9ElPh/37he51tWrm42NbKBfj5XPn8pxYGg2MHi0GF7KKoNPJyspi3Lhx7N27l8TERFq2bMnSpUvp5KQeOfpvPeiEkmajUG0ZvWAlFZFatWoVuU1hkcj/+te/uHLlinECJz85OTn8/fffDrO3IpKYCBER4r1WK6I42rZ14Anu3BFp7vlKZKn16pGTlJS3XbmZZZJInM/vv8P8+TB5MnTunLe8bl0wVSIppDh1iZwW+PiIlMK33oJGjcRAWhb+KXM0a2ZwLJmj00F0dCrNmwvJM1upXFlo/j/7rNBVPHBALD93Dn78Ufg/GzVS8fLKJDDQi+vXlbL9DCYpN9jlyLp48aLVdV5eXtSoUcNh0UZ37txh9uzZzJo1ixo1ahRrXx8fH7Zt20aPHj3o168f2dnZhIWFsWnTJodGQ2VkZBiFbPOf37AeoGHDhqjFiMkMDw8nPDycXIN+UhkjKkoMrk0vFw8PkXJ1/nzRJWTzUy7Gy489JqbQr1wRU24tW7raogpDTk4OISEhHD58mPr167Np0yb69+9PTEwMAQ6u1KOqcOHPbECljudt/BoV7RiQSMoStkYiJyUlsXPnTgYMGICPjw9bt25l3759LFiwwOqxs7KyyMrKMn5OTk4GhL6kzjQ34i6GZZbW2Yszj23POQMCxATR5MlanntOz4MPqjjKROXoUTRvvoly8iQAarVqxnwS1eSvsciwnx9KYiLqihXk3n+/S59wXPH/shVbbHNHuyX2k54u0n6/+UY0pXfeEc4qawUZatUSL0uU1GnBk0+KHTp3dqCInqQ80KABvP8+/PILLFkCJ06IaEBFyasmXy6ewSTlBru8OA1LMRR15syZVK1alYkTJ5Zo/8DAQBYvXswjjzwCwLJly/D19XWkifj6+poNug1kZmYa15eEshrWfeMGLF2al/IA4gb41FMibDooSMhDpaWJwfjdn8mIqubl/+cfD5vm/7v9jEBuLhw+DN265S1TFJFa6OMjo7BKGX9/f7PiCkOHDmXKlCn89ddf3H///Q49182bkJ4korGaeF+V+liScoetkciKorBmzRrGjx+PqqqEhobyxRdf0L59e6vHXrBggUVZgh9//BG/QkrxWROadwTOPLY95xw6VEFVVf77X/vPp+h0NNq1i+ADB9BmZuKXnEyulxfpdx2JpqTkW6b18EC7dy8nli4loXlz+42xE1f8v2ylMNvS09NL0RJJaRARIRxXN2/mLataVURVOqyyqCWuWhh7yHRuSSHcdx9s2CDSCi9eFM9rvr5GlYzykRkjKReUCXGm8+fPs3r1apYtW2YcFINwEOl0OmJiYggMDKRq1apWjxEdHc2IESNo3rw5GRkZDBo0iIiICIsD8JJSp04di6XEY2NjAahbt67DzuXOZGcLrfJPPxUpgwbatBEh0wYJqOxsEZDk0Px/dyM6WkRcnTkjpje6ds1bZykuuIzw9ttvM3PmTFq1asUff/zhtPOkpqby/vvvc+zYMaKiokhISGDdunWMHj3a4vZZWVnMnj2bjRs3kpCQQNu2bZk/fz69evWyeo7z588THx9PaGiow+03CL0DNPa+BsGuf7CTSByJrZHIgYGB7Nu3r1jHnjFjBlOmTDF+Tk5OJjg4mN69exeIAAMRxbJnzx569erl0ErJzj52cc957pzICnKKGZcvo501S6S+BwaixMeDlxdKaCiBJhHsqqqSkpxMpcBA8z46MBDl+nW6/PYbua+95rInHFf8v2zFFtuSLTgNJWWThARYvBh2785b5u0tJnSHDRPqEk5j0yZx8rfegj59nHgiSXkjKkqMYUNChBPLFBmVJXEXSsWRlZiYyKJFi3j77bdLtP+1a9fQ6/VMmjSJSZMmFVjfqFEjXn31VauVDGNjY40Dhj179pCRkUGXLl3o3bs3Bw4cKNQBVhzat2/Pvn37SE5ONhtkHzt2zLi+rHL8uMKCBR2oUUMxy+e3xH/+Ax9+mPe5alWYOBEef9y8w/byckL+fymhHD9OhwULUGrUwOIPkpsrvHkff5xXHuTdd8XUhdt63Wzj6tWrvPPOO/hbKy3pQO7cucO8efNo0KAB7dq1Y//+/YVuP3r0aDZv3szkyZNp2rQp69evp1+/fuzbt48uXboU2D4jI4ORI0cyY8YMp0Q7mgm9e1+FYOsONYmkLOKsSGQQVYMtOck8PT0LdU4Utd4enHns/OT1u1507iyGa2fOwLhxorrZwoUFHzBKjKrCzp1COT4jQzypZGWJ8OfgYJR839mQTqgoChpTZ9XdJxwlIgLNzz+7/AmnNP9fxaUw29zVZontqCrs2iVStUzl5MLChKh7vXpOPvmaNaIEHcDs2aKgkBMm7CTOp7QlZvLX91CUglUKZVSWxB1wqiMrOTmZ999/n+XLl5OSklJiR1br1q3ZunVrgeUzZ84kJSWF5cuX06RJE4v7JiQk0KdPH1JTU42aOAC7d++me/fuPP744+zdu9chD+WDBw9m8eLFrF692li9KSsri3Xr1hEWFmZV8NbdUVVYuVLDqVPVWblSQ6dOhd+whg4VDqr4ePF+zBjr1Qidkv/vbFQVzcqVVD91Cs3KlRT4QaKj4c034fTpvGUhISIyq4w7sQCmTp1Kx44dyc3N5fbt24Vum5CQwL59+3j66actrv/yyy8ZMGCA1fZXu3ZtYmNjqV27dqGV06D41dN0Oh1DhgwhNDTULNXQkURHAzVrQOXKNH7tWTmIlJQ7ZCSyc7DU7165ApMmCZ2do0dF1POECQ44WVoaLFgAP/yQt6xBA1Gl5cYN4cyykPuvyc6m7Of+SyTO4eOPYe3avM+BgTBlipjUdUSTsDqhqteLKKxNm/KWPf+8cGRJyiSlLTETGVm0frGMypK4AyVyZP38889s376dmzdvUqtWLQYMGMB9991nXJ+ZmcmSJUtYvHgxSUlJqKpKSzvErKtXr87AgQMLLDdEYFlaZ2DVqlVcuXKF/fv307RpU+Py9u3bs2PHDnr37s2GDRsYP3681WOsXLmSxMREY1rj9u3buXr1KgATJ0403lTCwsIYMmQIM2bMIC4ujtDQUDZs2EBMTAyffPJJMb+1+xAZCRERCr6+OUREeJjdsNLTxQzxAw/kbe/nJ8q2VqtWTvvNyEiUiAhyfH3xiIjIu4PjX4S1AAEAAElEQVTr9SIc7aOP8hLJNRoYORL+7//KhRPr4MGDbN68mV9//dUmvbpVq1YxZ84cvvnmG5566imzdevWrWPs2LGEh4dbbX/e3t4WU4gsUZzqaXq9nueeew5FUdiwYYPDKpfm59IlQKNF8dXSaERn8HHKaSQSl1GeI5FdSf5+d9cuEemcmCjW33uvKHTrEObMEaWEDTz5pPCYDRhgNfdfATwyM63fO8tE7r9E4jwGDBBDwsxMUaV76lSRReAQrE2o5uSI9myaxzhlCgwf7qATS8o7+aOxjKSl438jVlTruhudIKOyJK6m2I6sqVOnsnTpUrNlb731FrNmzWLu3LkcP36cZ599lkuXLqGqKg0aNGDu3LmMGjXKYUYXh+nTpzNw4EBatWpVYF2XLl2IjIykTZs2hR5j8eLFXLp0yfh5y5YtbNmyBYCRI0eaecc/++wzZs2aZabRs2PHDrqaaiOVIUxvaEFBWaSl+RAeLkKj9+6FZctEyPS330Lt2nn73S1YVf4w+UGygoLwMcw6164torBM9aIaNhRRWEVcX2WF3NxcJk6cyMsvv1xkmzEwffp0oqKiGDZsGDt37qRnz54AHD16lMWLFzNy5EjGjRvnEPtsrZ4GMHbsWGJjY9m9e7dDK5fmZ80a8Sx37ZrQ9ZdIyhulEYlc1iv3Fpf8/W5Kig/jxuWleISGCslFC1mXJWPCBBHi5ekJ//43GDQFC8n9z9HpOHrwYKGp/y7L/ZdIXEB2tvnlXreu0IWtXNlcHtUhWJpQvfdecUJD9LlGI5xajz/u4JNLyjOWo7FUlFtxeGRkoNyKgwB/QJFRWRKXU6wnuJ07d7JkyRJACLc2bdqU5ORkoqOjeeutt7jnnnsYN24cycnJVK1alZkzZzJ+/Hi8nDSQKUovB8DDw8OiE8tA27ZtizxGTEyMzTb5+PiwaNEiFi1aZPM+7ozhhhYUpJKbK/7u26fw9NPiAd3ABx+Iaizlnrs/iBoUBLm5qEFBKIcOCcGD8+fFNooCI0YIIROHPWm4no8++ohLly6xd+9em/fx8PDg66+/pm/fvgwcOJC9e/eSmJjIkiVL6NOnD+vWrXNYNJSt1dMuXbrE2rVr8fHxobrJdNOuXbt4+OGHHWKLAa1W+DNLscCrROIwVq9eTWZmZqHRyKURiVxWK/eWFNN+NydHOLSSkoQeVrNmsHIlVKrkwBM2aiRSC0NDxdO3gSJy/1Ojo3G/3H+JpHTJzhZpvj/+KCKwTCsQDhjghBNamlBdtkyc+NQpsY2Xl9BlLaOT6BLXYD0aKw1SU1EVDUpqqvjsL6OyJK6nWI6sNWvWAGIAu3DhQqMI69mzZxk0aBDPP/88OTk59OjRg6+//trsIVFS9jC9oVWrJm5UGRkKcXFikB0SIm5YnTuLzLlyT/4fJDlZ3MGTkkT6hJ+fuPPPnQs2OEjLEnfu3GH27NnMmjWLGjVqFGtfHx8ftm3bRo8ePejXrx/Z2dk0a9aML7/80qHRULZWT2vYsCFqftXKQrArGiQ6Gg4fFqWvW7eGmjWLfwyJxEWsWLGCy5cvGz9bi0Yub5HIrsS0m6laFS5e9EGnE5nrSUmwYkW+B4ziEhEBn38uHnxNJxnl/0oiKUBRhY5++00UBDTMd3/4Ifzzn042ytKE6n//K9TjAwLEuHTpUjCRfJFIbMFaNBZxcaBXUbVa0RnFxUEjGZUlcT3FKvr6888/ExISwtKlS80eGFu0aMGyZcvIyckhMDCQ7777TjqxygGmN7SUFIUbN/xJTBRRJqmpwm+zZAksXy50Ycs9hh8kMBAlPV0sM9zBjx8XqvZfflnunFggCitUrVrVJl0sSwQGBrJ48WLi4+NJTU3lpZdesquamSWcVT1twoQJnDlzhuPHjxd/519+EeGK06aJB0iJpAxx6tQpVFW1+AoJCTFuZ4hEjo2NJTMzk6ioKPrIUu8lwrTfvXFDITPTA0URQU8aDdwNjis+2dmifNqrr4q66h984EizJeWY8PBwWrZsWWixlfJI/oILpvNfaWmiwOdLL+U5sbRaESlZjHmykhll8HQbiuT4+4vo/zt3oEoVUalQOrEkxcRwaaWl5dX3yMyEzPh0MlN0ZCq+ZKre4m+KTiy/u41pfQ+nXv8SST6K5ci6desW9957LxpNwd06duwIwMMPP0wlh8a8S1yBaV+Zni4Gz3q9cM9rtULvJzgYHn64goSRqqp4CEhIED/GtWtiVgLEICIrS1R8Kod6IOfPn2f16tVMmjSJ69evExMTQ0xMDJmZmeh0OmJiYoiPjy/0GNHR0YwYMYLmzZvTsGFD3nvvPWNVM0dRp04di8d0VfW0r76CVV9VYXdSR7L1HqLBSCQSiRXyP6Ma+lZFEZNFubklfFC4dAlGjxYTLQauXxcHlEiKwK7JnDKMecEFhchIsfzQIRgyBL75Jm/bVq1EoOOYMU4eE1sKmVEUEabp4QHjx8M99zjRAEl5RacTlXEN9T1SUiAlRSXlZjopuX6kEECq6k8KAeLzzXSxPkVsb1rfQyIpLYqV15OdnW1Vn8IgsFzctCOJe2LoKwMC4ObNvOWVKgnJjKwsEWBSIcJIL1+G+fNh+3YxYNBqQVXxTEsTs1/lPK722rVr6PV6Jk2axKRJkwqsb9SoEa+++qqximh+YmNj6dWrF56enuzZs4fk5GQ6d+5Mv379OHjwIFUdVMbH3aqnbd8Ofx1qhiZ1HIfveUk6siSSElJRxN7zP6PWqqWSnKynbl2toUhU8boZVRU3ooULxbQ5iMmWyZPFk3iFmIWSSIqPpUJHS5YIGbk9e/K28/ERtRKefVZETJaaUR4e5o5og1DRt9/CwIGybUuKjZeXhfoev54UubI1vdH7+ZGakkJApUpo0tPFdfju+6LIwF1kfQ9JaeO8cl2SMkt+sT9vbzGhW7lyFvXq+aIoCh4eFUDc79w5WLdOlGeMjhYRWJ6eRsdVjqm+UzlWO2zdujVbt24tsHzmzJmkpKSwfPlymjRpYnHfhIQE+vTpQ2pqKocPH6Z+/frodDrmzJnDm2++yeOPP87evXvxN4TI20FpVE+zFb0eLl4EdNnU97qJl48GpJNfIikRFUHs/dYtUfjWVGRXo4FatdKpVCkQUIrXzaSmigosP/6Yt6xRI7GsaVMnfhOJpOxjqdDRjh0KDRtidCp37Cjq/JRasHdkJPzvfyJNIiUFJSsrrxxyOZ9QlZQOZvU9VBXmvw/6M1C1HnogOSuJQJ8gND6IzJSd78PQjeXqmUdStii2I+vvv//ms88+K9H6UaNGFfd0EheQf1bY1xeaNFFJS9MBQmeoXPeZv/8Oa9fmlTBOTRUvT0/hjKhSBVWrRU1KytunHP8g1atXZ+DAgQWWGyKwLK0zsGrVKq5cucL+/ftpavLw1LhxY7777jv69evHhg0bGD9+vNVjrFy5ksTExEIrpwGlUj3NVq5dg+xsFbKzaRJwVYiwOn26ViKRlEX27YPXX4c//hB1IUyfCfK/t6mbOXUK/v1vc0Gtp5+GKVPyHnwlEolFrNX18fYWDuc6dUR7feyxUnx+j4sTeYu3buVNqCYkoJgWkCnHE6oSF2D6MJiTI6IADZTjZx5J2aLYjqyIiAgirIgWK4pidb2iKNKRVQZISxO61PlLr1oqLldu+8zjx/OcWKoqSkX5+EDjxiKt0LA8P+X2Byk506dPZ+DAgbRq1arAuoceeojIyEjatGlT6DEWL17MpUuXjJ+tVU4D96meFh2tiI5fVWnsfU2mFUokkgKkp8PixfD990IwOjdX6JJUrmx9H5u6maNH85xYlSrBzJnQs6fjv4BEUg6JjBTO5fw6dbVrCz/SG29AqdWyyM4W2nZLlogsAa1WGOPjg1qzJqqpIJF0Lkgchak3V6OBq1dRatTIewYC+cwjcQuK5chq0KABirxQyy3JyTBsmChoZF561TLlos/U68WN2rSi3ZAhsGEDBAWJm/PHH4spONMbuCXKxQ9iO/v37y9yGw8PD4tOLANtbajwGGMoCWQDhuppixYtsnkfZ3DxImIACjTxvgr1LadeSiSSislvv8GsWcLflJYmgn4rVSo6Tcmmbuall+DYMbHx/PniCVwikRRKejrs3g1Tp4oAqGrVzJ3Khuf2jRuhd+9SeG6PiBBFhi5dEiraer2YVK1ZU+izGiZaTZHOBYkjMERj+fsLoWRVhbg4NNWq5W1TwZ55JO5JsRxZxXmglJQt7twRxU4iI0VfmZZWMJLUEmW2z8zOhp074bPPoFs3IX5rIDAQ1qwREVgvvCC2tVXfqMz+IBJHcuGCYu7ICu7uWoMkEolboNOJ7mX9etHXqirEx4Ofn5CwsqXLKNDNxF4394BptSKCIyCg6AkYiaQCo6oiE/e774SI+61bwmek1Qrnsqmeeqk+t8+ZI8aokOfprlxZ5B4X1qalc0FiL6bRWKmpeRkoVaqgz6/kLp95JC5GirZIuH5dTOD+9pu4Z3l5iQF1UU4sKNhnuj3p6aJG8pNPwttvi1qz335bcFbrnnvgxImCZY6Losz9IBJnEB0tHFla9DTwuiFTCyUSOwgPD6dly5Y8+OCDrjbFLi5eFHMjn34qnFgg/E9eXkJgt0A3k5aO/41YSEs3W5zXzahETvxc6F+dOWO+b1CQdGJJJFZISBBDwWeegRdfhG3bxPDw1i3RNn19LbdJf3/xfB8ebllhwmEYItlVVcwqBwZCgwa2telSM1JSLjFEYymKWbVb1VSPzYB85pG4GOnIquBcvAgvvyz8ObduiT6vXj3xNzPT/JWdrSmwLDNT9KtpaW7eZyYnw+rV8MQTsHSp+LIGWrcu6MgyzEikpYkvaOGLa7KzLS4vGz+IxFnk5sLly4CHJw1qZOBZNVDMokokkhIxYcIEzpw5w/Hjx11tSonJzYXXXoM//xSftVoRBR0YmNdtmHclKlk3EshJ15N1I4HMTNW8m8nNJu1GCuFfVkHV5YjyaRkZrv2SEkkZ4Pp1IdS+dOldGYC76PXiFRwMTZqI7L38NVqc8tyuqsLxZMqgQdC5M4wcKcI4q1WTE6oS52N49klPF89NBurWtX79ScepxIUUW+xdUn44exZeeUX4cFTVUOpb3I/y96mgkJnpYVUjzd9fhGPrdGJ22W2IixPTblu2FBzkd+8upsctaTjpdMK75+8vwtTyoQAemZnWNePc9geROJvbt33R6UCpWpUmj1aFd/e42iSJROJitFqYPh0mThQRz2+9JbLXN2yw0s1kZUGqgqoEoqQqcDsLvH0AVfRlSUn4A1eza6Hz9MNr5EhZkVBSrjl+XGHBgg7UqKHQubPt+2Vnmw/D6tQRbe/cOfH53nthwAD44guIjRUOrMJwaDbVuXOwcKG4Kfz733nLtVpYvhyee05MjAYE5EXHGFDVvAnV/EaYTqjKlC+JrRiisUwfAqtVEyGK1pxUMp1V4kKkI6uC8ssvQhYq/W7GQsuWojCKtfuUTpfDwYNH6dq1K56enha3qVrVzXw2er3ImYyNzVum0UDfvjB6tBjJWMPLCzZtEvHnFsjR6Th68GChv4f7/SCS0sDDQ2XwYJVLl8AGLXuJRFJOyf8A3akTvPcedOkC3t5imcVuRlVh2izUo0dJ8fWlUkYGyoOdhG7Oxx/D0SNwN8ujauMqeC35FEJDS+U7SSSuQFVh5UoNp05VZ+VKDZ06Fe6byc6GgweF9lVCAvznP+YVCEeMgAsXhMpEw4ai0OeRI6VY6CgpCVatgq1bxVj1t9/gqafEYNyAnFCVlCaGaKw7d/LE4by8RBWSzEzpOJW4JdKRVQHR60XJb4MT6957RYh1QID1fXQ6iI5OpXlzsOa3cTTK8eN0WLBAlHwtzvSbAY1GVCD84ANxM37ySTG7VVRZKAO1aomXJXQ6UqOjKdUfRFImqFYtk+ee0+PpKfVpJJKKSGoqLFoknlWXLjUf0/fsab6txW7maCT8vgl9NR+Sc3MJ9NOi+flzePWImCk3BF4NGiTyFWUklqScExkJEREKvr45RER4WHUgRUcL59XOneaKEX/+CS1a5H1+/PG896ba1tWr22ZPiaOycnNFhsCHH5qnbgUHi4G2KXJCVXKX8PBwwsPDyTWtPuBodDr46y/z69DEiSodpxJ3RDqyKiAajSho9NJLQgdg4UI3HAerKpqVK6l+6hSalSspcvrtt99Ejsbrr4u4cQODBolZgmefFR26RCKRSCRO4tdfYfbsvEDgrVuFFrvNmD5VV6sGyckoOp3QdczKgpAQIaw1axY88ogzvoJE4laYNomgoCzS0nzMHEjp6aLi4HffiQqE+alXz6ovCMjLprKnro9NUVm//CJmkQ05jSDKlb78MgwbZnlSVE6oShA6kRMmTCA5OZmgoCDnnMTLS8zAvPmm6G8GD4ahQ42rpeNU4o5IR5YLyMrKYty4cezdu5fExERatmzJ0qVL6VSKecW1a8Mnn4hxslv2f5GRKBER5Pj64hERYXmkoKoiHnz9ejFAAPHFpk3L28bfH8aNKzWzJRIAZdcuUVwgOFjosHXo4GqTJBKJHRSlz6PTwUcfwWef5aXo+/mJV7Gw8FSt+vqieHiImfG6dWHNGtHXSSSlQKlEgxSCoUkEBank5oq/hw4pHD0qlu/cmZdhYMDLS/h5n3wS7r+/oGi7AdO6PlZkqIyFjkqcTRUXB8uWwY8/mi/v10+I5tWoYetPIZE4l+7dxXj1yy9h1CjzB0TpOJW4IdKR5QJycnIICQnh8OHD1K9fn02bNtG/f39iYmIIKCy/rwgKG2j/8AP06JGnywFuPA42mX7LCgrCJ/9IQa+H//0P1q0TYbCmHDsmQrdl2XGJC8jNhZQUT0iIhhs3xGvECFebJZGUaVz9IF2UPk90NMycaR5o0b49zJtneya78USWcpx8fESHHR8vorGsRWhIJE6gVKJBrGAhQBF//zyJqXr1zJ1YTZvCwIGiKmFgYNHHL0KGCocUOoqMNHdiNW8usgfatSvaQImktPHzEyk7EkkZQDqyXIC/vz+zZ882fh46dChTpkzhr7/+4v777y/RMa0NtA2DgPXr4aGHRFSz2zvS706/qUFBkJuLGhSEcugQHD4s4sPXr4fLl833CQkRAu59+0onlsRlXLoEc+d2Zl1OU4aqifyjxnciKksikZQYVz5Ig3V9Hr0evv4aVqwQ4tIAHh7wf/8nJrOtRYFYZft2EV6SnS06b9OH52rVxEzU4cOyMpSkwhAZKUTbvbwgNlbBz888rW/mTLFN374i+qpFi+LpTBchQ+WYQkdPPAGbN8P16zBhgjC02DcHicRJ6HTiuUlek5IySLEcWZfzOw+KSYMGDUq87+nTp5k7dy4///wzN27cwM/Pj5YtW/L666/Tv39/u+wqjNTUVBYtWsSxY8eIiooiISGBdevWMXr0aIvbZ2VlMXv2bDZu3EhCQgJt27Zl/vz59OrVy+o5zp8/T3x8PKF2VB2yNNAOCxMVkr79VmwTESFe3buX+DTOx9r0W3y8yNWuXdt8lNKypXBgde8ub8ISlxMdLa7NpBQt2kp6cU2aarZJJJIyhTV9nnvvhX/+UwQBG2jUCObPh3vuKeZJzp8XEcYffyzCQjw9xZN1fl3HEitMSySlQ1EpuLaSliYcWP/8pwhs9vAQl7uieFC5cl5T2L8fdu0qQQqvCUXIUBWv0NGlS8LZbBqJrdGIG0PlyraFiUkkpcnKlaIawpw5xQwhlkhcT7EcWSEhIdarFRSBoijk5OSUaF+AS5cukZKSwvPPP0/dunVJT0/n22+/ZcCAAXz88ceMGTOmxMcujNu3bzNv3jwaNGhAu3bt2L9/f6Hbjx49ms2bNzN58mSaNm3K+vXr6devH/v27aNLly4Fts/IyGDkyJHMmDGjxDPNlgbaK1bAjh2we3fedtOnu7kTCyyrbiqKGNBfvizKwAYEwAMP5GkPycG8pBg4U6Puf/9TiI31w1NNonH1a2J0LIUvJZIyizV9nl9/hSpV8rYbOlTI3Zim7xfJyZMiwvjwYeHASk0VM+PWoopLpDAtkZQORaXgFkVSEhw4IJQjjh0TvtyLF4UfyHCcjAzx2GJoCocPi1o/zmoKNlfPTkuDtWvhiy+ExkDr1uapg3ZM5EskTuPXX8U1q6oijHj7dvD1dbVVEonNFMuR1aBBA4uOrEuXLhnfG5wxSXfr3iqKYlckloF+/frRr18/s2WvvPIK999/P0uWLLHqyEpISGDfvn08baVs0JdffsmAAQPw9/e3uL5OnTrExsZSu3ZtTpw4wYMPPmjVxqioKL766isWLVrE1KlTARg1ahStW7dm2rRpHDlyxGx7nU7HkCFDCA0NNUs1LC6WBto7dijUrSt8PhqNKELx2GMlPkXpoKrw/vtiNKPTiSk3AwEB4glBUeDTT6FtW9fZKSnTOEujTlVhxw6FzAwP4jRBNPK6CsENHWi5RCIpTQrT5wkPFxo9166JVMKwsGIc9MgREYF18mTeslu3xPtatcTEjVabpxpviozKkrgp1lJwbWHhQpF9p9eLz4YmodcLibjAQDGPmZOTCYjJIac3BVuqZ+v1QoR2+XK4cydv+YYNojy4ROKuZGSIh0NDP/P889KJJSlzFCsXKyYmhosXLxpfFy5c4IEHHqB69eosXbqU+Ph4EhISjK9ly5ZRvXp1HnjgAS5cuOBw47VaLcHBwSQmJlrdZtWqVTzzzDNs3bq1wLpPPvmE4cOHs2HDBqv7e3t7U9tGVfTNmzej1WrNnGo+Pj689NJLHD16lCtXrhiX6/V6nnvuORRFYcOGDSWOdDMdaPv7i8937ihkZIhBgKen0MVyWyeWqoq0irVrhZHbt4uyMUlJeaIjIAYPtWsLVc+0NNfZKynzGDTqGjRogEajYejQoXh5efFX/sIBxSQyEi5dUlAUlVS9H1eza0H9+g6yWiKRlDamAcJpaQoZGVqzoKjTp8W8is1OLBC5/q++mufEAvGkrtEIrccaNQrXecwflSWRuAH5MwOyssRnS77YGzcKLq9WLc+JBSKQWa8XYu733CMy9P39zXdyelMwqZ6tGKpnm3L2LLz8MsyenefE8vISQtlvv+0EgyQSB7JihahUACJ6UBYmkpRB7BIVWrp0Kdu3b+d///sfr776KpUrVzauCwoKYtKkSfz0009s27aN999/315bAUhLS+P27dtcuHCBpUuXsmvXLnr27Gl1++nTp/P4448zbNgwfvrpJ+PyLVu2MHbsWEaOHMm4ceMcYtuvv/5Ks2bNCMyXA9+hQwcATpoMXMeOHUtsbCzffPMNHh4l19w3HWjr9RAX50dGRl5Z4H/8A7p2LfHhjRjDq48ft/9gubnw889iturJJ2HYMPjwQzh+XHyJu1pXSn6Hlb8/hY6OJE7j9OnTDBkyhMaNG+Pn50f16tXp2rUr27dvd+p5U1NTmTNnDn379qVq1aooisL69eutbp+VlcX06dOpW7cuvr6+hIWFsWfPnkLP4QiNOlUVYwKdDjw0uWjQ8+Htwaj1pdC7RFJaGPR5jh+3PzQjLg5mzIDbt8VY/+pVSEjwISfHvCsqNr175wn6NGoEc+dCw4ai07Y1IlT2hRI3wzQzQFEMKbh5vp+YGOH0HTlSaJ9HR5vv36OHmPcZNUoEKzZuLILwq1Ur/LxOawr5qmebnSQhQTiqRo2C33/P26d7d/jmGxg3Tka2SNybqChR5QBEQ5szR+oMS8okdlUtXL9+Pd26daN169ZWt2ndujXdu3dnw4YNvP766/acDoB//vOffPzxxwBoNBqefvppVq5caXV7Dw8Pvv76a/r27cvAgQPZu3cvqampDB8+nMcee4x169aVOBoqP7GxsdSxIOxsWHb9+nVApGKuXbsWHx8fqpuU2N61axcPP/ywzefLX6n75k0FnU6DVisisby9hdbACy/YGXJtS3i1rej10L+/eEowJS1N6IP4+oq0ioAAVG9vEZllQOqDuAxXadTduXPH7TXqQFyOBw6IZ1ElV8Vfk8Gh1HuJTIlFXqUSifOxV58nJ0c8kx45Il6//CL0eRTlboBUrh50OcTf0FMr2LvorigtTURf5eaKTthAYKCIyKpWTcwyHTsmqrCY6kIWhewLJW5EYTV6Xn1V+GkvXjTf53//gyZN8j43bgxbt4pL++hRoX1lS5NwWlOwVj37++9FGmFKSt62DRvC66+L/EaJxN1JS4N58/I+T5woNdwkZRa7HFkXLlygrQ1aRdWqVePQoUP2nMrI5MmTGTx4MNevX2fTpk3k5uaSbZqCZgEfHx+2bdtGjx496NevH9nZ2YSFhbFp0ya7oqHyk5GRgbcFpVcfHx/jeoCGDRuiFmPqKDw8nPDwcHJzc82W59dFr1lTJTk5F0XR0qCBiA5xSOduEl7tYQivtuWA8fHw999CkN2ARgPNm+c5srRauO8++OMPsX1wcN7IReqDuA2u0qirXbu222vUGQbxGRni8lZzhCMrS+9F+I9N6TheXqYSiT1Y6wNNsUefJytLZLYnJ4vPpvo8np6g1ar461MIUG8TqNMAjfD3Vyx3RfHx8NVXYrbbMDkzaJB5tbJBg/JOFB4uHiwCAkRavSmqiiY7WyzPfxMxhF3LvlDiYkzHollZkJTkxe3bCpmZQog9Odk82LB5c6EUkR9FKThBawsOHxYWJo63eTOEhgqRbD8/GDsWnnnGxpKGEokbsGyZyO8FuP9+cf1KJGUUu+II/f39iYqKKtQpo6oqx48ft/qgWlyaN2/Oo48+yqhRo9ixYwepqan079+/SMdQYGAgixcvJj4+ntTUVJYtW4avg0N/fX19ycrKKrA88+7gtKTnmzBhAmfOnOG4SVpffm0sEJ139eoZNGyo4u3toJDrwsKrLXHpEnz2Gbz4IvTpA1OnmmtdgVj+6KOiHPGePSI8+/JlEYlV3Ok3icuwV6Nu3bp1ZV6jDvIG8V5edwfiWi0+NStRuZqWQ7/6y8tUIrETS32gKbbq8+TkiKx2E5UBQEQvh4TkfU5Ly3uGbdQImtVNo57+MpU0qShpqZCWVrArun5dKFY/8YTIoUpNFQfLzBRRV5bQ6eDKFdFZp6aKKA+Tl5KaikdmJoqFdaSmiv2uXhXHkUhcQP6x6J07CikpXuh0YmJHrxdO4bZt4bXXYNs2+M9/RGC+JSwVri4Khw8L82t25D/Jo4+KL7B1q9AVkk4sSVnhyBFx3YJwxMqUQkkZx65wpO7du7NlyxZef/113nvvPbT5BEpzc3N54403uHDhAoMMM5AOZvDgwYwdO5Zz585xzz33WN0uOjqaESNG0Lx5czIyMhg0aBAREREWUwFLSp06dbh27VqB5bGxsQDUrVvXYeey1tlrNHl9qkNCrq2FVxsOqNeLaKoDB8QrJsZ8//R08eRgevI+fcQL3GT6TWIraWlpZGRkkJSUxLZt29i1axfPPvus1e2nT59OVFQUw4YNY+fOnUY9u6NHj7J48eJS16gLDhaaVQaNut27d9sVlWl6+RofmhUF76r+eHpqSLwmL1OJxNlYqtx76JBCZKRIWTKkCx47JrqkmjXhkUfM22Tv3kJQunNnWL9edDH16oGiqHAxDvQqqlZrEKOERv4iKutODuFjTtPR5x8oqolatYcHPP64qARlLW3Dy0tEbiUkWFydo9Nx9OBBunbtiqe1h+WqVcVxJBIXkH8sWqmSaDsgorA8PcW49B//KHoMakOAItnZGucGKObmwoIFIvoqMxMlLQ1t5coQFJQ39ty+HTZulJ26pOzx22957ydPBgc+l0okrsAuR9a8efP44YcfWLp0KZs3b+aZZ56hUaNGgKhwuGnTJi5fvoy/vz9vvvmmQwzOjyFdL8lUSykfsbGx9OrVC09PT/bs2UNGRgZdunShd+/eHDhwgKpVqzrElvbt27Nv3z6Sk5PNHqaP3Z2Nbd++vUPOUxzfj10+n8LCqz/4QExrHzpkXnLYlMaNhfhlcCGC146YfpP6IKWGIzTqEhMTWbJkCX369CnTGnVgfvmKCEiV+PhMPD395GUqkZQC+buppCRQFIWEBBg6VPSR+W8xcXEi671p07xlQ4eK19GjQgfX2CUZ9Bu1WkAVf1PvRmUlp1A5JZNDyTWJbNSKTgGnRCrh00+LSI2aNYv+ArVqiZcldDpSo6NFLpaM+pC4GZbGogEBKlWqZFKrlh8eHgqqCtdsnNDJH6BYgKxs9AlppGZ7gbdPgdWmAYo2+3ZzckT1wV9/FZOuBw/CmTPC0LuRjt6JiaItazSyU5eUbcaNExUK9+yBp55ytTUSid3Y5chq0aIFu3btYvjw4Vy+fLlAZUJVValXrx6ff/45LVu2tMvQuLg4auYbFOp0Oj777DN8fX2tHj8hIYE+ffqQmprK4cOHqV+/PgC7d++me/fuPP744+zdu9chqY+DBw9m8eLFrF692qjRk5WVxbp16wgLCzNGg9jL8eO2+37sepi25GQyHPDIETFiuOtIBEQn364ddOsmXkV9X6kPUuZwlEZds2bN+PLLL8u0Rl3+QbyiiBQlP78c4zYyeFAicS6m3ZROB9ev+6PRiMCKGzeEX8mgz1O5sugDO3cW0Vf5Kfhgrgqvl14FTw3oc0U/p8sVywMD8dckkKirRHjicDq+dhFl6LMiekMiKedYGiJqNODvn4MhQaM4Y9BCAxRVFf3rM8n96Se0D/REs2iRxQ61WAGKn30Gq1fnjT1VVchcGMTx7n6hXG9vPAxjBtmpS8o6nTuLl0RSDrD7KbJLly6cP3+eb7/9lv3793P16lUA6tWrR7du3Rg8eLDxQdIexo4dS3JyMl27dqVevXrcuHGDzz//nD///JP333+fACtlq1etWsWVK1fYv38/TU2mX9u3b8+OHTvo3bs3GzZsYPz48VbPvXLlShITE40RHdu3bzd+z4kTJxqrnYWFhTFkyBBmzJhBXFwcoaGhbNiwgZiYGD755BO7fwMDa9aUQiaeYUSfng46HUpiYt7TgOGA6eniyb1TJ+G4evhhqFLF9i9SxPSbAkIfxJrBJZp+k9hD8+bNad68OSCE1Hv37k3//v05duxYoZFVBo26Rx55BICXXnqpTGnUTZgwgeTkZLPKhhaDCbOz8cjMAF8f8PJCUTRyAlcicRKWIkJMH6h1OvH6xz+gSxdo0aJwOZACbdo0GkufK06okBeVVb06io83lSt7csijJ5FtNXSSPixJBcBZmQFWAxSPRqKe+oZM/zR8Tn2DkjjItg41IwNOnRJlSJ97Lk9UFoTD2XQCNS1NjGsrVRJjWT8/VC8vspKS8DbcOGSotUQikbgNDgmH8Pb2Zvjw4QwfPtwRh7PIs88+yyeffMKHH37InTt3qFSpEvfffz/vvfceAwYMsLrf9OnTGThwIK1atSqwrkuXLkRGRtKmTZtCz7148WIuXbpk/Lxlyxa2bNkCwMiRI80ebj/77DNmzZrFxo0bSUhIoG3btuzYsYOuXbsW9ytb5cgRJ2fi6fWwdq0oM6zTYZhaUwwOScMB4+KETkD37iX7IlIfpMxTUo269957j0GDBtHAgSV/S1OjzuogPiUF39u3URISoF59EbEhJ3AlEqeQ3/GkqiIi0tPTg0qVxDZZWUJo2sIQwIwCAcIZerhyC3QG7U8FvaJBo/EUzqycHLiZBPUbo1UV0m7KNi6pOBRHFcJu30++okM+hUXjp6YKHaCffxbpgmfOiPBMEDcC00iUe+8VKYP33Sfeb9xoKo6Xd+78yE5dUpaYPx9at4Ynn5TXqqTc4bi8HiczdOhQhg4dWuz9PDw8LDqxDLRt27bIY8TkFzAvBB8fHxYtWsSiRYts3qe4pKWJSt4OF8LMyID//he++AL27xcnMAmv1ppWRjJ05GvXimiskt4cpT5ImaYkGnXJycl07tyZfv36cfDgwTKnUQfWB/GKaZrlXQernMCVSByPRWdyWjr1Mq/gUT0YJSAAVRXSjjbr81xW8ffSkXo9Q0RmZGuAgLwd9SrkeorPih5SVbidBd4+MkBYUmEoNBorLR3/G7Gi2IFJpoRdvp+iig4dPAgnToioq3Pn8ioN5ueXX8wdWcHBsHOnMOboUeH4KhXPnETiHJTjx+mwYAFKjRriWt+zB777Trx+/hneesvVJkokDsVhjqwzZ85w5MgRbt26RatWrYxRUnq9npycHLzkyM5hOFwI8+ZNERm1dasY9aem5qVTeHlB1aqoQUHkpKTk7SM78gqFIzXqdDodc+bM4c033yyTGnWFDuLTM9DodIACXnnOVzmBK5E4loLOZBXlVhweGRkot+IgwB9FUWzrpq5fx2vLFjb5HCOhajqoCIXq3FTxQK7Vovr7k67R4FutGhoFsc3Nm/BgJ1i4EBRFBghLKgTWdVoLtkERvmjHkNFSNQdD0SFDh/rRR8KBZYmQEBFxdd998MAD5utMo65k9WxJWUdV0axcSfVTp9CsXAn33APvvpu3vksX19kmkTgJux1ZV65c4YUXXmDfvn3GZc8//7zRkbVmzRrGjx/Pjz/+SM+ePe09nQThbyogCWaPEObMmWIm6u5xuHVLOLHqi9Qo4/L8yI68wuBojbrGjRvz3Xff0a9fvzKnUVfYIJ70NBS9XqQymIjxSL+vROI4LEdjCT0rVdGg3K0qiH+Abd3UjRuwfj21gFo+iImcjIsQFAA1aoC/P3rSSE5KItAnBY3hINVT4fdNkPi0bNSSckv+gidr1lip0ZOeDik69PiiSdFBfDr45U1SFbtGT3a2mGTdvRtUFSU6Gj+dTnSmph3qffcJR5aiQGhonuPq3nvFoLcoZPVsSXkgMhIlIoIcX188IiJg4kTh8AV45BHo3du19kkkTsAuR1Z8fDzdunUjJiaG1q1b07VrV1atWmW2zTPPPMMrr7zCtm3bpCPLQTRrludfMmKrEGZODqha88762WeFI8vTUwiJ3LghBu9WnBNGZEdeYXCGRt1DDz1UJjXqrA7i09Lv6uloIVexfxAvkUiAgg/SBZ3JedUFVa1WpBbFxUGjfFFZPybTKekHqF0bTO8J7dsLrZzbtyEsDP74Q2rlSCR3yV/w5OpVS5kBKtxOh1w/9BoNmlw93EyH6n4YorKgkMwAvR6io+H06bzX+fNw4YLIFPAUKb2a3FwxjjVte7NnQ4cOoh0XGBwXgayeLSkP5NeRi48XDuCQEOHMnTFDXp+Scoldjqz33nuPmJgYpk6dynvvvYeiKAUcWVWqVKFNmzYcPnzYLkMlhWCLEGZiImzZAt98I3KkTUOse/SASZPg8cfh1VdFNImtqV5yIF8hkBp1eVgdxN9KA+4qTKMUbxAvkUiskv9BukDlXtPqgqh5VQXT0sDXF/+cVBJvK4QPj6BjyEKUB+43d2RpNCIFIzgY/vwThg6VWjkSiRU2bBC+JDN+PQn//Cf6mt6k5uYSoNWiycqCd98XkVEmVK2i4oUOMOkEz52DkSPNj2kqc6EooCjkenkJZ5anZ17bu3PHvD0XB1k9W1IeMNGRU7KzRQeZmyv6wEWLildRXiIpQ9jlyPr+++8JCQnh3XfftX6TR6QRRURE2HMqSWEUJoRZqxZ8+aUQtDQIUX/xhbkjS6uFUaOE2KUMr5ZICsXqIH7iRNScZHIBbWAgipeX5UG81NGRSOzCvHJvXjQWnhrQ303rzdZBTAxotSiqSuVcXw6lticyrQ2dfvlFRF+Z6uG0bSu1ciQSG6hZM1/gk6rC/PdBfwZ9lbrGgiua69dh5/vQa5moHnj6dN7fkSPhxRfzjhEaKjpG04IpGRnCYVWzJvj5oXp5kZGcjKe3t1jviLYnq2dLyjr5dOR8bt4Uy/V6ofHYo4dr7ZNInIhdjqxLly7x+OOPozHRgrGEl5cX8fHx9pxKYo38QpjJyaJzv30bnn8eKlXKV1ZNETe23Ny7s9f5jiPDqyWSQrE6iM/5A9Ujm1y9Hm3l+kKnZ+f7MHSjbBMSiQMxq9x7V5cHxRf0CnqdgkbVg+oJuYCnFrQatEouaXo/wiu9QccvtCiWHFVSK0ciKT6m7UZV0WZlocTHC6fUt9+KioL5pSrOnDH/7OEhnFuVKgmJi6QkGD1aaLUa9s2f3uuotierZ0vKMibtT0lKQpuZmVesKzlZ9kuSco1djiwfHx9STCvZWeHy5ctmOjYSB5JvAOGZmopy+7YY4aemQqNGYhDg5wcDBwo9rHr1Ch5HhldLJCXD0AY9PcX1D6heXijy4VYicQp53VSeLg8eWtADet3dNCTEg2+uBvwDRYqhpydX/YPQNTRLahLIyRyJpPjki2JUYmPxjY8XbUJVRfjyrVui0RraRdWqlrWsDEVfVBWee862yEgZESmpAOTXiTSSP4o4OjpvXf365tU9ZduQlEPscmQ1b96cX375hbS0NPytaCrdvn2b3377jbCwMHtOJbFE/htYRgbeiYliAKHRiIfq1FQhhPnkk4XrXsnwaomk+Ji2QU9P0e5yc0U70GrlAFsicQLGyr13dXmo6Q3+fuhVyLl0CU+9HiUgQER5ALyfl+JrtZuSkzkSSfHJH8Xo65u3TlFEv5idLTSsnngCWrYUxRYK6w+LExkpIyIlFYD8OpFG8rUVtWFDci5fRuvjk5eRI9uGpBxjlyNr8ODBvP7660yZMoUPP/zQYorh66+/Tnp6Os8++6w9p5JYwsIAItfLC61BrN3XVyxv1Mg28XYZXi2RFA/TNhgQgKqqpCUkUMkgTisH2BKJw2nWDAIr5enyUFVUF9SrKinV71CpalU0mnThaL52zbYUXzmZI5EUDwuacqqfH7qAALSVK4sxqJeXaIO3bsEjjxTtmJI6dRKJbVhqK1otmVWr4hUUJMoMybYhKefY5ciaMGECGzZsYO3atfz88888/fTTAFy4cIElS5bwzTffEBUVRfv27Rk9erQj7JUYsNLZZ1WujGelSii+vnmDeHkDk0gcj5U2qJo69OUgQiJxDlaiNlQPj7zPxXUmy8kcicR2LLVBLy+yKlfGOygoL3qxOG1Q6tRJJLZhS1uRbUNSzilcpb0IfHx82L17N506deKXX35h1qxZABw+fJjXX3+dY8eO8cADD7Bjxw7rM5iSkmHlBqb38gIfH/Eh/w1MIpE4jpIMIiQSif2YOpGLijb29xfbhYcXFIuWSCQlwxlt0FSnTqsVenT5XkaduvwvU5062c4l5R3ZB0okgJ0RWQB16tTh8OHD7N69m507dxIdHY1eryc4OJjHHnuMJ5980rqmhKRkFCf0WkaESCSOR7ZBicR1HD8uNXQkElfiDB0rqVMnkdiG7AMlEsBOR9bBgwepXbs2zZo1o0+fPvTp08fidufPnyc2NpauXbvaczqJAXkDk0hcixSjlUhcx5o1UkNHInEVztKxkjp1EoltyD5QIgHsdGR1796dF154gU8++aTQ7RYuXMinn35asGyopGTIG5hE4jqkGK1E4lqOHJEaOhKJqyjOZKoBW9ug1KmTSIpG9oESCWCnRhaAKvNtSx97b2ASiaTkOGIQL5FISo7U0JFIXMeaNbINSiSuRLY/iQRwgEaWLSQkJOBjECCXlBiD0zA5NVVEeKSlma3XqyqpmZng4YHG0gN2SgosWwYtWxY7IkSn05Genk5ycnKpCfe74py24s62gXvbZ4ttycnJgHs5yo3tb9Uq0ZZ8fWUbdCHStpJTlH3u1v7Cw8MJDw8nJycHgGRfX7hroykqkJGRgaqqWGxdvr4QEwN37hQ7/Ui2P3Pc2TZwb/vKfB8YEyPakmyDLkXaVnLKWh9owNgGZftzOe5sG7i3fY7sAxW1mK308uXLxvchISEMHjyYxYsXW9w2JyeH06dPM2LECEJCQvj999+LcypJPq5evUpwcLCrzZBISo0rV65Qv359V5sByPYnqXi4U/sD2QYlFQ93aoOy/UkqGu7U/kC2QUnFo6g2WGxHlkajMVYMUVXVpoqEqqry7rvvMm3atOKcSpIPvV7P9evXqVSpksXfPTk5meDgYK5cuUJgYKBDz+3MY7vTOW3FnW0D97bPFttUVSUlJYW6deui0didAe0Qimp/INtgaSJtKzlF2eeO7Q9kH+hOuLNt4N72yT6wZMg2aI60reTIPrD4yPZnjjvbBu5tnyP7wGKnFjZo0MDYeC5fvoyfnx/VrQgee3l5Ub9+fQYNGsS4ceOKeypJPjQajU0zA4GBgU67aJ15bHc6p624s23g3vYVZVtQUFApWlM0trY/kG2wNJG2lZzC7HO39geyD3RH3Nk2cG/7ZB9YMmQbNEfaVnJkH1h8ZPszx51tA/e2zxF9YLEdWTExMcb3Go2GIUOG8Omnnxb3MBKJRCKRSCQSiUQikUgkEkmxsCtect26dbz00kuOskUikUicSvfu3fHx8SEgIICAgAAee+wxV5skkUgkEolEIpFIJJJiYFfVwueff95RdkgcgLe3N3PmzMHb27tMHdudzmkr7mwbuLd9rrZt7dq1jBw50inHlm2w9JC2lRx3t6+kyPZXerizbeDe9rmzbfYi22DpIW0rOe5uX0mR7a/0cGfbwL3tc6RtxRZ7N+XLL7/k3//+N6tWraJv374Wt/nhhx8YP348CxcuZPDgwSU2VCKRSOyle/fuvPzyy05zZEkkEolEIpFIJBKJxLnYlVr45ZdfkpiYyCOPPGJ1mx49epCQkMDnn39uz6kkEkkFIzU1lTlz5tC3b1+qVq2KoiisX7/e4rZZWVlMnz6dunXr4uvrS1hYGHv27LG47WuvvUaNGjXo1asXv//+uxO/gUQiKYyjR4+i0WiYP3++q02RSCQSiUQikZQh7HJk/f7777Rt2xYvLy+r23h7e9OuXTt+++03e04lkUgqGLdv32bevHmcPXuWdu3aFbrt6NGjWbJkCSNGjGD58uVotVr69evH4cOHzbZbuHAhFy9e5PLly/Tq1YvHHnuMlJQUZ34NiURiAb1ez2uvvcaDDz7oalMkEolEIpFIJGUMuxxZN27coF69ekVuV69ePW7cuGHPqSQSSQWjTp06xMbGcunSJRYtWmR1u6ioKL766isWLFjAokWLGDNmDP/73/9o2LAh06ZNM9u2Q4cOBAQE4Ovry7Rp06hUqRKRkZHO/ioSiSQfq1evJiwsjBYtWrjaFIlEIpFIJBJJGcMusXc/Pz/u3LlT5HZ37twpNGpLYht6vZ7r169TqVIlFEVxtTkSidNQVZWUlBTq1q1b5LabN29Gq9UyZswY4zIfHx9eeukl/vWvf3HlyhWCg4Mt7qvRaLBVJlC2P4mrOXnyJO+++y5Hjx4lKyuLkJAQRo8ezf/93/859DyG9hcYGMj777/PsWPHiIqKIiEhgXXr1jF69GiL+2VlZTF79mw2btxIQkICbdu2Zf78+fTq1ctsuzt37rBs2TIiIyOZPHmyzXbJNiipKJj2gRqNXXPODkO2P0lFwR3bH8g2KKk42NoG7XJktWrVioiICOLj46latarFbeLj4zl8+DCtW7e251QS4Pr161YfyCWS8siVK1eoX79+odv8+uuvNGvWjMDAQLPlHTp0AMTDf3BwMImJiRw/fpyuXbuiKArh4eHEx8cTFhZmky2y/UncjbNnzzJ9+nSmT5/ulOMfOXKEefPm0aBBA9q1a8f+/fsL3X706NFs3ryZyZMn07RpU9avX0+/fv3Yt28fXbp0MW7373//m8mTJ1O5cuVi2SPboKSiYUsfWFrI9iepaLhT+wPZBiUVj6LaoF2OrEGDBhEREcHIkSPZvHkzfn5+ZuszMjJ47rnnyMjIkBULHcDt27epXr06Y8aMwd/fv8B6vV7PzZs3qVWrlsNnEJx5bHc6p604yjZVVUlLS+O+++6jd+/eDrNPp9Px448/0rt3bzw9PR12XEdgi23JyckEBwdTqVKlIo8XGxtLnTp1Ciw3LLt+/brxvDNmzOCvv/7C09OT9u3b89///pegoCCLx83KyiIrK8v4Wa/Xo9Vqee2116zuo6oqcXFx1KxZ0+GzZc48tjud01YcaVtGRgY+Pj4899xzNl1zRaHT6di3bx89evRwSPtLSUkhLCyMBx98kHXr1tl8z0lMTOTw4cM88cQTFu1LTEzk8ccfL9CfpKSk0KhRI5o0aUJsbCy1a9fmxIkThepZGVJ8Fy1axNSpUwEYNWoUrVu3Ztq0aRw5cgQQjufjx48THh5enJ8AgJs3b1KzZk3+8Y9/FBhvgOwDSxN3tM1wvx45ciRVq1atMH1gaeHv74+HhwevvfaaVSe0bIOlh7St5OS3Lzs7m9zcXJ599lkaNGjglu0P4Nq1a9SqVYuXX35Z9oEuJDs7G51OR61atRgxYoTb9TFQcZ4D7XJkjR07ljVr1rB7926aNWvG8OHDad68OQB//vknX375JdevX+eee+5h/Pjx9pxKAvz999+0aNGCmTNn4uvrW2B9bm4u58+fp2nTpmi1Woee25nHdqdz2oojbdu1axdnzpwpEFFkDzqdDj8/PwIDA93yBmarbbY4KDIyMvD29i6w3MfHx7geoEaNGpw4ccJmOxcsWMCbb75ptqx27dp06NCBNm3a2HwcifuTkJDAJ598wo4dOxw22+nn58exY8cccqwffviBuLg4+vbty5EjR8jMzMTLy6vIgd0333zDl19+ybRp0+jYsaPZuoiICMLDwxkzZgz9+vUzW5eeng6INmTrfcnWFN8DBw7w119/GfU1k5KS8PDw4MKFC6xbt67Qc5w/f55WrVrx73//W/aBLsYdbcvOzmbhwoXExcUREhJSYfrA0iIlJYWaNWsydOhQ7rvvPovbyDZYekjbSk5++3Jzc1m6dCmxsbFmGUTu1P5A9IHt2rVj5syZxjGuKbL9lQ65ubksXryYxMREt+xjoOI8B9rlyPL19WX37t089dRT/Pzzz7z//vtm61VV5d5772Xr1q0WPceS4qHX6/H29nYrr7TEfgICAtDr9a42o8zi6+trFjllIDMz07i+JMyYMYMpU6YYP1+7do2ePXvSvHlzmjZtanEfvV7PhQsXaNKkiVNmw5x1bHc6p6040rasrCzq1q3LQw895JA0eJ1Ox549e+jVq5dDBhAbNmwgMDCQ0NBQJk2axPnz5/H392fEiBEsXrzY4oAWoHfv3iQlJbF06VK+//57HnnkEUA4nT788EOGDh3KihUrCgwUkpOTi22jrSm+Y8aMYejQocb1r776Ko0aNeKNN94o8hx6vR4fHx+3uxYl7oGXlxfe3t6yP3USer0ejUZjMSNAIinLaLVafH19yc3NdbUphZKbm4uPj4/bOdgqGobrxTBRLnEddo8G69evT1RUFN999x1jx46lb9++9O3blzFjxrB161ZOnDhBgwYNHGGrxAorVqygYcOGeHt7s3DhQocc886dO9SsWZOYmBiHHM8a3bt3L5bYb1ln6NChBRy+EvswVDfMj2GZLYLxlvD29iYwMND4MoS3arXaAq9Vq1bRuHFjfH19WbhwIRqNxuJ2xXklJiZSp04drly5glarNT68l/TYPXv25J///Gex9rF0To1Gw7hx46hRowYeHh6cOnXK4jJ7v39JbLP1NWLECJYtW1bgeJ6eng57AQ471oULF8jJyWHQoEH07duXb7/9lhdffJHVq1czZswYq/v5+vryzTff0LFjRwYPHswvv/zCwYMHeeGFF7jvvvv49NNP8fLysmp/cbA1xdfPz4/atWsbX76+vgQEBBRbL8uUstAHOqqvU1WVMWPGULVqVRRF4eTJkxaXlXdkX+peVKQ26EzKgo22MmrUKLPJwNKkqN9x+PDhRUYAlyVk+7NMSftGU1tlX+PeOGRaU1EUBgwYwKpVq9i5cyc7d+7kww8/5Mknn5ReYyfz22+/MWXKFD788ENiYmKYOHGiQ4779ttv8+STTxISEsJHH31E5cqVycnJMa5PTU3F09OT7t27m+23f/9+FEXhwoULNp1ny5YtvPXWWw6xuTDcZXAwc+ZM3n77bZKSklxtSrmhffv2nDt3rkAUiSGtq3379k49f2m0QYBbt24xd+5cGjVqhLe3N7Vr16ZPnz5EREQ45Hy28sMPP7B+/Xp27NhhDMO3tMwZHDx4kP79+1O3bl08PDzYu3evxe3Cw8MJCQnBx8eHsLAwoqKizNaXtXaYmppKeno6o0aN4oMPPuDpp5/mgw8+YOzYsXz11VecP3/e6r4+Pj5s27aNZs2a0a9fPwYOHEiHDh14/fXX8fCwKyjbDFtTfPOzfv16Zs6cWeixw8PDadmyJa+//nqBdbL9lb32Z8s25a0Nl2dKqw2CaIevvfYaoaGh+Pj4UKtWLR566CE+/PBDY0q0NUprvOlMRo8ejaIoKIqCj48Pjz76KNOnTzdGoEts41//+hcff/xxubh/VLQ+sDg4om+UfY17I+Pzyzg7duygQ4cO9OvXjzp16pQ4jcqU9PR0PvnkE1566SUAevToQWpqKn/88Ydxm0OHDlG7dm2OHTtm1oHu27ePBg0a0KRJE5vOVbVqVbvFFLOzs+3a39HHKYzWrVvTpEkT/vOf/zj9XBWFwYMHk5uby+rVq43LsrKyWLduHWFhYU6v8FIabRBgyJAhnD17lk8//ZRz586xbds2unfvzp07d+w+X3G4cOECderUoXPnztSuXduob5R/WXGxpf2lpaXRrl27QoXCv/76a6ZMmcKcOXP45ZdfaNeuHX369CEuLs64TVlrh4ZratiwYWbLhw8fDsDRo0cL3T8wMJDFixcTHx9Pamoqixcvtuh0stdGZ6T4AkyYMIEzZ86waNGiAutk+yt77a+obcpjGy7PlFYbjI6O5umnn2bPnj288847/Prrrxw9epRp06axY8cOq45VENe3I8ab7kDfvn2JjY3l/PnzvPHGG6xZs4Y5c+a42iyHUBrjcBD3j+DgYD7//PNSOZ8zqWh9YHFwRN8o+xr3xmGOrDNnzrB27VoWLFjAtm3bjMv1en2p3ZgqGqGhocycOZMjR46gKAqjR492yHH/+9//4u3tbRQHvueee6hTpw7Hjx83brN//36efPJJGjVqRGRkpNnyHj16GD/r9XoWLFhAo0aN8PX1pV27dmzevNm4Pn+klOn2AQEBDBw4kG+//dbMvu7du/PKK68wefJkqlevTp8+fdi8eTNt2rTB19eXatWq8eijj5KWlgaIGawDBw6wfPly40xWTEyMxeOA8OB36dKFypUrU61aNZ544gmzCLPNmzfTvn172rdvT82aNc3OVZgdBvr3789XX31Vov9NRWP16tXMnz+fTz/9FIDt27czf/585s+fb5wdCQsLY8iQIcyYMYNp06axevVqHnnkEWJiYhwSYm2IBskffQil1wYN1ef++c9/0qNHDxo2bEiHDh2YMWMGAwYMMO6n1+tZuHAhoaGheHt706BBA95++22z9dOmTaNq1arUrl2buXPnmp03/7U/YMAALl++bFw/evRoJk6cyOXLl1EUhZCQEIvLimr3YLkdQ+Ft6LHHHmP+/Pk89dRTVn+7JUuW8I9//IMXXniBli1b8tFHH+Hn52e8hgyUpXZoSI+tVauW2fKaNWsCQqy+MKKjoxkxYgTNmzenYcOGDB06lPj4eIfa6KwU38KQ7a9str+itimPbbi8UlptEOCVV17Bw8ODY8eO8cwzz9CiRQsaN27Mk08+yc6dO+nfv79xW0vXd2HjTUvtZMeOHVSrVs2om3Ty5EkURTHT83v55ZcZOXIkUPTY0WDXpEmTCr0PmLJz506CgoLMHC6GaJjg4GAeffRRevbsyZ49e8z2y8rKYtKkSdSsWRMfHx+6dOliNoaHwtu6LWN3W75HTk4Or7zyCkFBQVSvXp1Zs2ahqmqh/ydb7C/qXmvL79i9e3e+/vprq/uUBcpTH7hjxw4qV65s1t48PDzM0vpM25vheNauU0t9Y1H7WEP2Ne6L3Y6sK1eu8Oijj9KmTRvGjh3LzJkz+e6774zr16xZg6+vLz/99JO9p5Lk48iRIzRu3JhFixYRGxvLypUrHXLcQ4cOcf/995st6969u1kFrn379tG9e3e6devGvn37AJE6cuzYMTNH1oIFC/jss8/46KOPOH36NK+99hojR47kwIEDFs9tuv3vv//O888/z6hRowpsv2HDBry8vIiIiGDu3LkMGzaMF198kbNnz7J//36efvppY2e5fPlyOnXqxD/+8Q9iY2OJjY01RumYHuejjz4CxKzzlClTOHHiBD/99BMajYannnoKvV5PbGwsw4YNY/To0ezcuZOffvrJeC7DOmt2GOjQoQNRUVEWoxck5qxYsYJZs2bx4YcfAiI1YNasWcyaNcvs4f2zzz5j8uTJbNy4kUmTJqHT6dixYwddu3a12wZDNMj+/fsLrCutNhgQEEBAQAA//fRTodfNjBkzePfdd5k1axZnzpzhiy++MHN+bNiwAX9/f44dO8bChQuZN2+e2QDY0rU/ceJEo3jy8uXLmTdvHvXr1yc2Npbjx49bXGZru8/f/mxtQ9bIzs7m559/5tFHHzUu02g0PProowWilspSOzRcC9euXTNbbtCdqlGjhtV9Y2NjjaLze/bsYc+ePaSlpTF37lyHOrNckeIr21/Za39FbVNe23B5pbTa4J07d9izZw/Dhw+3KjafX8rE0vjOlKLaycMPP0xKSgpnz54F4MCBA1SvXt1sLHDgwAHjJFdhY8f8dhV2HzDwxRdfMGzYMD7//HNGjBhh8TufO3eOo0eP4uXlZbZ82rRpfPvtt2zYsIFffvmF0NBQ+vTpY7znF9XWbbmH2PI9PvvsMzw8PIiKimL58uUsWbKEtWvXFvl/Ksr+ou61tvyObdu25fjx42X6/lGe+kBDe/v111+BvPZmmlZu2t6g8OvUUt9Y1D7WkH2NG6PawZ07d9RGjRqpiqKobdq0USdMmKAqiqK+8MILxm3i4+NVDw8PddKkSfacSqKq6tdff60++uijamZmpqqqqpqWlqZqNBr16NGjqqqq6pNPPqkGBgaqTz/9dIF9o6Oj1e7du6stWrRQW7duraamplo9z5NPPqm++OKLZss+/vhj1c/PT83MzFSTk5NVDw8PNS4uTv3iiy/Url27qqqqqj/99JMKqJcuXVJVVVUzMzNVPz8/9ciRI2bHeumll9Rhw4apqqqq3bp1U1999VWL2+fk5Khnz55VX3zxReP2hn3uvfde4+eff/5ZBdSYmBir38n0PNaOY41bt26pgHrq1CnjuS5cuKCePXtWzcnJKZYdqqqqv/32m9l2Bw8eVBcuXFikHcUhOztb/e6779Ts7GyHHtcR2GJbUlKSCqhJSUmlaFnhXLlyRa1Tp456+vRp4zJb2+Cff/6ptmvXzvjy8fFRt27davVcltrgpk2b1KCgINXHx0ft3LmzOmPGDPW3334zrk9OTla9vb3VNWvWWDxmt27d1C5dupgte/DBB9Xp06dbtePGjRsqoJ48edK4bOnSpWrDhg3NtjNdZku7N9iTv/3Z2oZUVVUBdcWKFWZt8Nq1aypQ4Nyvv/662qFDB7Nlpu0wMzNTnTNnjnrq1Kkiz2sLjm5/v/zyiwqow4cPN1s+bNgw1cPDQ7127ZrF/eLj49U2bdqoNWvWVM+dO2dcHhUVpfr5+alhYWEW+wJL7e/48eMqoK5bt87iuSIjI1VAXbRokXFZZmamGhoaqoaFhRXn61rl888/Vx977LES9YFLlixRW7ZsqbZo0UKdOHGiqtfrrZ5Htj/ntb+itilpGzawcOFC9eDBg7IPdAIXL15U69evr/7555/GZcVpg4sWLVJbtmyptmrVSt24cWOh58rfBg33lw8++MDsmqtWrZrq7++v+vv7q9OmTTMut3R9FzbeNJC/ndx3333q66+/rubk5KgDBw5U3377bdXLy0tNSUlRr169qgJm91ZTTMeOpjYUdh8w2Lhy5Uo1KChI3b9/v9m2zz//vKrValV/f3/V29tbBVSNRqNu3rzZuE1qaqrq6empfv7558Zl2dnZat26dY1jzcLauq1j98K+R05Ojvrggw+qLVq0MLvXTp8+XW3RooXZ75H//1SU/UXda235HXNyctTvvvvO7DdYuXKlumvXLlVV3bP9qaqqrl+/Xh0wYIDFPvDy5ctq165d1SZNmqht2rRRN23aZLbv9u3b1WbNmqmhoaGF/naq6ro+8L777jOOIQYOHKi+9dZbqqenp5qYmFigvdlynebvL4vTP5o+M1rqa5YvX67OmTPHLfsYVa04z4F2Kb2+9957xMTEMHXqVN577z0URWHVqlVm21SpUoU2bdpw+PBhe04lscDvv/8OQJs2bQCYOHEiffr0sagTMHr0aObPn8/DDz9MfHx8ofooGRkZBcq5d+vWjfT0dI4fP05ycjLNmjWjRo0adOvWjRdeeIHMzEz2799P48aNjVUq//77b9LT0+nVq5fZsbKzs7n33nsLnNfS9qqqotPpCmxvOlPQrl07evbsSZs2bejTpw+9e/dm8ODBVKlSxep3tHQcA+fPn2f27NkcO3aM27dvG2fTLl++TJ8+fejZsyft27enc+fOPPXUUzzzzDNUqVLFZjsM+etFCZNK3B9b2+A999xjrJaSmppKSEhIgXZhiqU2+PTTT9OsWTPjzNKuXbtYuHAha9euZfTo0Zw9e5asrCx69uxp9bht27Y1+1ynTh0z3ZnCrv127doV8WsIitPu87c/e9pycSlL7fDee+/lxRdf5NNPPyUnJ4du3bqxf/9+vvnmG2bMmGE1bW/VqlVcuXKF/fv307RpU+Py9u3bM3PmTN566y02bNjA+PHjrZ575cqVJCYmGqO/tm/fztWrVwFxvQcFBQHmKb5xcXGEhoayYcMGYmJi+OSTTxz1U5hha/u7desWK1eu5PTp03h6etK1a1ciIyPp1KmTxePK9uf89ucIylIbLsuEh4cTHh5uUSbE1jZ46tQpvvjiC37++WdUVaVHjx488cQTVquVWmqDloiKikKv1zNixIgC0RKWxncGbG0nDz/8MFFRUaiqyqFDh1iwYAGbNm3i8OHDxMfHU7duXeO9tbD2ayoyXdR9YPPmzcTFxREREcGDDz5YwPYePXrw4YcfkpyczFtvvUXVqlUZNGiQcf2FCxfQ6XQ89NBDxmWenp506NDBGF1WWFu39bcp6nuA6BdMI+U6derE+++/T25uLlqtFij4fyrKflvutbb8jobnoLJ8/zBtf8nJySxZsgR/f38qVapk1M3y9/cnJyeHKVOmsG/fPoKCgrj//vt56qmnqFatmsXjuqoPNIxt/vnPf3Lo0CHmz5/P559/zuHDh0lKSjJrb8V9xizpPiD7GnfGLkfW999/T0hICO+++26h1QkbN27s1lUNyionT54kNDTUGGbdvXv3AqkngHHw/vDDDwNCYL0wqlevXkBzJTQ0lNq1a7N//36SkpLo1q0bIHRPgoODOXLkCPv27eORRx4x7pOamgqI3PR69eqZHc+SIy3/9rm5ucTExBASEoKfn5/Ztqah5Vqtlj179nDkyBF+/PFHVqxYwb///W+OHTtGo0aNCv2ulkLU+/fvT8OGDVmzZg1169ZFr9fTunVrsrOzjec6dOgQX3/9NeHh4cyaNct4LlvsMIRGF5YOJCkb2NoGTdm2bRs9e/a0mh4BltsgiHbTq1cv+vbty6xZs3j55ZeZM2cOo0ePtkng09PT0+yzoihmaQ/5r32dTke7du3Q6XRFHttAcdp9/t/AnrYM4nfTarXcvHnTbPnNmzepXbu22bKy1g4/+ugjGjRowLp169i6dSsNGzZk6dKlhVZjnT59OgMHDqRVq1YF1rVs2ZJDhw5x3333FXrexYsXc+nSJePnLVu2sGXLFgBGjhxpdGSBSCOZNWsWGzduJCEhgbZt2zosxdcSxWl/OTk5RuF5nU5n1BezhGx/zmt/RW1TnttwWWXChAlMmDCBmJgY4zjSgK1t8OzZs3Tq1Mn4cNyuXTt++OEHhg4davGc+dtgaGioUd/UlMaNGwOWi0kU1sfa2k66d+/Op59+ym+//YanpyfNmzene/fu7N+/n4SEBONYGAofO5pS1H3g3nvv5ZdffuHTTz/lgQceKPB85e/vT2hoKLm5ubz99ts8++yzBYS5i6Kwtm7rb1PU97CVwv5PlrBVzLyo39Ggs1qW7x+m7c/f35+aNWty/vx54700Pj4ef39/oqKiaNWqlfH/+dhjj/Hjjz8WKCBjwFV9oKX29uCDD3LgwAGzZ08o/jNmSfcB2de4M3ZpZF26dIn77rsPjabww3h5eTlcWFYibmC2zNKeP3+egIAA+vfvz3333cc777xT6Pb33nsvZ86cKbC8Q4cOHDhwgP3795vlKHft2pVdu3YRFRVlpo/VsmVLvL29uXz5MqGhoWYvS5XkLG3fsGFDq9uboigKDz30EG+++Sa//vorXl5ebN261bjey8vLKCBYGHfu3OGvv/5i5syZ9OzZkxYtWhS4mRvONXHiRE6cOGF2rqLsAPjjjz+oX78+1atXL9IeiXtjaxs0ZdOmTTz77LOFbmOtDeanZcuWRnHWpk2b2qVHaMu1bwvFbff5saUNWcPLy4v777/f7DfQ6/X89NNPBaJvylo79PT0ZM6cOcTExJCdnc358+cLdWIBeHh4WHRiGWjbtm2hk1AAMTExqKpq8WUQTzXg4+Nj1OrIzMwkKirKKN7rDGxtfzVq1GDq1Kk0aNCAunXr8uijjxZaWVe2P+e1v6K2Kc9tuDxiaxts3bo1+/fvJzExkYSEBPbv31/opE/+NmgQI//8888LFNApCba2ky5dupCWlsby5cuND9EGR5bpWNhR7RegSZMm7Nu3j++//56JEycWuq1Go+GNN95g5syZZGRkGPc3aE4Z0Ol0HD9+nJYtWxqXWWvr9t5DTDHVNwKIjIykadOmxmgsa9+/MPttvdcW9TueP3++zN8/rLW/n3/+mdzcXOP/6/r162aOm3r16hWr/VnDkX0g5OlkLV261NjerD17luQ6Lem1Lfsa98WuiCwfHx9SUlKK3O7y5ctms7YSx3Dy5EmzahHWyMnJ4dChQ5w8eZKaNWvSt29fHnzwQaupTX369GHGjBkkJCSYpRSEhYUxf/58dDqdmVe8W7duvPLKK2RnZ5s5sipVqsTUqVN57bXX0Ov1dOnShaSkJCIiIggMDOT55583O2/+7Tt16sTp06fZvXs3lStXLrC9gWPHjvHTTz/Ru3dvatasybFjx7h16xYtWrQwbhMSEsKxY8eIiYkhICDAalRalSpVqFatGqtXr6ZOnTpcvnzZrEKN4Vw9e/YkLS2N3377zXguW+wAIaLYu3dvi+eXlC1sbYMGkpOTOXLkSJHVT/K3wTt37jB48GAee+wxevXqReXKlTlx4gQLFy7kySefBMT9ePr06UybNg0vLy8eeughbt26xenTp22aqbV07U+fPt3m72aguO3elKLaUGpqKn///bdx+6tXr3Ly5Elq1KhhTGmeMmUKzz//PA888AAdOnRg2bJlpKWl8cILL5idS7bDso+t7S8hIYEdO3YQExODr68vjz32GAcPHrQaKSbbn3PbX1HbyDZcdrC1DbZs2ZJJkybxyCOPEBQURMeOHQt1Zlgah65cuZKHHnqIsLAw5s79f/buOzyqMm38+HdKyiQhgQCSQihKlyYKAfRlbVhQ0eUVy6KI64rvGl0BXRAVCxbEsIpKdJfVpbj6Q5dFV1gLqEgvUUA6oRgghRZC+kwmM+f3x8mUJDPJJJma3J/rmisn55w55w7kyZlzn/t5nhcZOHAgWq2WzMxMDh48WG9Xwto8bSft2rWjd+/efPLJJ/aBtEeNGsVdd91V47NwQ58dG6tXr172SZX0ej3z5893u++dd97JjBkzyMjI4KmnniI6Opo//vGP/PnPfyY+Pp4uXbrwxhtvUF5ebv9bVF9bb87fkNpOnDjBtGnTeOSRR9ixYwfvvvtujVnoXGko/sb8ra3v3/Hnn3+ud4iHUOCq/V24cIGHHnqIv//9700+biCugaC2o4EDB/Lxxx/b29sVV1zBtGnT6tx7NuX3tKm/23KtCV7NSmT16dOHHTt2UFZW5rY09Ny5c/zyyy+kpqY251SiFqvVyp49e5g1a1aD+yYnJ3PFFVfYs81jxoxh165dbv+ADxgwgCFDhvDZZ5/xyCOP2NenpqZSUVFBnz59asxC8Zvf/IaSkhJ69+5NYmJijWO9/PLLdOzYkTlz5nDs2DHatm3LkCFDeOaZZ1yeu/b+bdq04YorruDZZ591+/PFxsayfv165s+fT3FxMV27duUvf/kLN998s32fp556igceeIB+/fpRUVHBr7/+6vJYWq2WZcuW8ac//Yn+/fvTu3dv3nnnHftTAOdzFRUV0a1bN/u5Dhw40GAcRqORL774gm+++cbtzyOCi7vxQRrTBm3+85//cMMNNzQ49kftNhgTE8OwYcNYsmQJr732GmazmZSUFB5++OEabWnWrFno9Xqef/558vLySExM5P/+7/88is3V7/5bb73V4DgUrjS23ds01JZ/+umnGsnyuXPnMnfuXB544AEWL14MwN13383Zs2d5/vnnOXXqFIMHD+abb76p8TdL2mHoa0z7++677+jRo4f9AcYtt9zC1q1b3SaypP35tv01tI+04dDQ2GvgI488Yv9M+Yc//KHGuH21ufoceskll/Dvf/+bzz77jJkzZ5KTk0NERAT9+vXjqaeeqnesP1c8bSdXXHEFBw4csH8OjI+Pp1+/fpw+fZrevXsDDX92bIrevXvzww8/cPXVV6PT6dwmgPR6PY899hhvvPEGf/zjH4mOjub111/HarVy//33U1JSwhVXXMG3335rTwo21Nab+jektvvvv5+KigqGDRuGTqfjiSeeYPLkyQ2+r6H4G/O31tW/o9Fo5Pvvv+frr79u1M8TTFy1P5PJxGOPPcb06dMZOXKkfX1SUlKNCqzc3FyGDRvm9tiBuAba/OY3v2HXrl32ttO2bds67c2mKb+njX2PXGuCXHNGnZ83b56i0WiUyZMnKxaLRVEUpc6shZMmTVK0Wq2ycOHC5pxKKHVnLaytqqpKWbJkSZ3ZYsxmszJ48GDl/PnzisViUW699VZl5cqV9Z5r1apVSt++fe3/r7YZBJ1ni/G1QJzTU02N7b333lNGjx5dY53MWlhXMM4Y42rWwtrctUGbW2+9Vfnyyy89Op+0wfo1J7ba7TDYZy30tobiC8b2pyh1Zy2szV3727JlizJ48GCloqJCqaqqUsaMGaN88cUX9Z5L2l/9Ah2bq2uposishb7katbC2uq7Bp4+fVpRFHUW3wEDBihms7ne80kbrJ/E1nQLFixQRo4cWSO+UJy10JnValXuvvtuJS0trc6/u9lsVnr06KHk5OQoJSUlSq9evZRz587Vey5pfyp31xqZtbDpgmbWwrS0NJYsWcIHH3zAzz//zLhx4wB1xok333yTf/3rX2zfvp3BgwczadKkZqbcRENuuOEGdu7cidFopHPnzvzrX/9ixIgR6PV6XnvtNUaNGoWiKNxwww3ceuut9R7rlltu4fDhw+Tm5ja6T7xwLywsjHfffTfQYQgfcdcGQR1YdPv27fz73//26FjSBn1H2mHL5K79DR8+nDFjxnDZZZeh1Wq57rrrGuwOJe0vuEkbDk71XQNvv/12ioqKiI6OZtGiRej19d+CSBsUvhIWFsZzzz0X6DA8ZusZcOHCBZczMAJs2rSJzz77jN69e9u72X700UcMGDAAvV7PX/7yF6655hqsVivTp093O2OhjbQ/lVxrgluzx8j69ttvGT9+PJs3b2bnzp0AbNy4kY0bN6IoCkOHDuWLL76oM1OB8L7Vq1dz+PBhlwMp3nzzzTW6uHmioYGEReP94Q9/CHQIwofqa4NxcXF1ZuJqiLRB35B22DLV1/5effVVXn311UYdT9pf8JI2HJzqa4Nbtmxp9PGkDQpfeOihhzh8+HCgw/CYbebQJUuW2GcNru2qq67CbDa7bX9jx45t1JiuIO0P5FoT7JqVyAJITExk48aNfPvtt/z3v//l2LFjWK1WUlJSuPnmm7n99tsbnBVJCCGEEEIIIYQQQoiGNDuRZXPjjTf6dJptIVoqRVECHYJoBPn/EkKI4CR/n4UQLZn8jRPCwWuJLOE7tr7RxcXFdOrUiT179hAbG1tnP4vFQnZ2Noqi1DutcVP48tjBdE5PeSs2RVHIyspqcBY7EXgREREYjUb27NmDXq93WWkqbdB/vBnbqVOnADAYDN4ITfhIZGQk586dY+/evbRp06bOdml//hOMsZWWllJeXi7XUx+JiIigoqKC3bt3u+1pIW3QfyS2pqsdX0VFBYWFhUH/GSAyMpKzZ8+yb98+YmJi6myX9ucftt+X8PDwQIfS6nklkXXu3Dk++OADfvzxR3JyclAUhc6dO3PNNdfw+9//nosuusgbp2m1bH2j8/Pz6dq1KytWrHDZeCwWCydPniQlJcUnf8B8dexgOqenvBlbWFgY99xzj5ciE95mSyRbLBYKCwv55ZdfOHjwoMt9pQ36j7djGzhwIN27d/dCZMJXUlNT+eWXX/j3v/8t18AAC9bYunXrxqBBgwIdRosUHR1NQUEBO3bsYN++fS73kTboPxJb07mKLzExkWHDhgU4svqNHDmSBx54gOXLl8s1MMA6derU4ID5wveancj6z3/+w4MPPkhRUVGNcscDBw7w3Xff8frrr7No0SJ++9vfNvdUrV50dDRms5k//elPLjPxZrOZb7/9lhtvvNHrg+v78tjBdE5PeTM2g8EQdH+ghYMtkVxcXExcXBxPPfUUERERLveVNug/3oxNr9dLFUcQc04mV1ZWyjUwCARjbDqdjsjISDQaDWazOdDhtFhyDQwOElvT1Y5Pq9ViMBiCfkznuLg4TCYTjz/+uMuqZGl//qHVatHr9Xz99deBDqXVa1Yia9u2bYwfP56qqiquuOIKJk6caH+inZ2dzdKlS8nMzOTuu+9mw4YNpKameiXo1i4qKsrth3iDwUBMTIxP/oD56tjubN5cxfz5v6FLlzaMHBlcvWAD8e8hgkNYWJjL9gctrw0G8+95MMcmvKt2Mrm1XAOD+Xc8mGMTviXXwOAgsTVdsMfXkOjoaLkGBpg8LAkOzcoOzJ49G4vFQnp6Ok8++WSd7Wlpabz11ls8+eSTvPzyy6xatao5pxOtiKLAggVa9uzpwIIFWkaMgCB/UCJEi5OZqWHOnGF07Khh5MhARyOEEEIIIYQQoG3Omzdv3kz//v1dJrFspk6dyoABA9i0aVNzTiVama1bYdMmDQZDFZs2adi6NdARCdG61E4my0Q5QgghhBBCiGDQrESW2WxmwIABDe7Xv39/KcETHlMUyMgAkwni4kyYTOr3ciMthP9IMlkIIYQQQggRjJqVyOrTpw8nT55scL/c3Fx69+7dnFOJVmTrVtiwAeIiKog5nU9cRAUbNiA30kL4iSSThRBCCCGEEMGqWYmsRx55hI0bN7Ju3Tq3+6xbt44NGzbwyCOPNOdUopVw3EArxJScQl9RQUzJKUwmRW6khfATezI5TkGjUb9KMlkIIYQQQggRDJqVyHr44YeZMmUKt9xyC0899RS7d++mpKSEkpISdu/ezZ///GduueUWpkyZwuTJk70Vs2jBbDfQbSMr0JSVomi0aMpKaRspVVlC+INzNVZ0tLouOhqpyhJCCBEwtslHMjNl5h8hhBDNnLVQp9PZl9966y3eeustl/vNnz+f+fPn11in0WioqqpqzulFC+NcjdWh8hRYFRSdDqxWootPcSG8OxkZGoYPlxkMhf9kZGSQkZGBxWIJdCh+YU8mt3W0M41G/d6WTB4xIpARCiGEaE1kJmshhBC1NasiS1GUJr+sVqu3fgbRQjhXY5WVWMm2duGMpQNGrUGqskTApKWlsX//fjIzMwMdis85V2NZrVBR4ajAkqosIYQQgSCTjwghhKitWYksq9XarJcQNs7VWNHFpyi3GqhQIim0xmHWhINVXS9jZQnhO87VWPn5cPy4hlOn1P6FtauyhBBCCF+TyUeEEEK40qxElhDeUntsrAqNAarLxg1aE+h0UpUlhA853yyEhakVWQBhYY4ulVKVJYQQwp9k8hEhhBCuNCuRVVlZ6fG+J06caM6pRAtWuxpLsSpUKJEAhGmq0GssoNNKVZYQPuRcjWUyOdaHhzuqZ6UqSwghhL/I5CNCCCHcaVYia+TIkfz6668N7vfll18yZMiQ5pxKtGB1ZirUhdFOW4wBI1GaCseOUpUlhE/UvlkwGh3bwsNrDnIvNxHCW7Zs2YJWq+WVV14JdChCiCDkyeQjQgghWqdmJbJ27NjB5ZdfzooVK1xut1gsPPnkk/z2t7/lwoULzTmVaKFqV2NhVdDqoJPmNN34lSRrjqOPk1RlCeETtW8WKpzyx84VWSA3EcI7rFYrU6dOZejQoYEORQgRhFxVY9nIAxUh/CszU8OcOcPIzJTpQkXwaFYia+bMmRQVFTF+/HimTJlCVVWVfdvJkyf5n//5H+bPn0+7du344osvmhuraIFqV2Oh06kbrE6fTJznWJaqLCG8ytXNgq0iS68HrbbuXYLcRIjmWrhwIampqfTt2zfQoQghgpDzA5Zjx+DoIStFxy1QVi4PVITwI0WBBQu07NnTgQULtPK5TwSNZiWyXn31Vf773/8SHx/Pu+++y1VXXcXx48dZtWoVl112GVu3bmX48OHs3LmTW2+91VsxixbCdgNdVqagu1CA0RKOUROJ0RqO0RqGUYnASKT61RquvjSRGC3h6C4UUFYmVVlCNFftaqzKSkcRZGSk6/fITURgvfrqq2g0Gvr37+/T85SWlvLCCy9w0003ER8fj0ajYfHixW73N5lMzJgxg6SkJAwGA6mpqaxZs6bOfgUFBcyfP5+XXnrJh9ELIbwhIyODfv36+bV6svYDlg4dFOKUQgyWMjRnzwCKPFARwk+2boVNmzQYDFVs2qSRz30iaDR71sKbbrqJnTt3MmLECLZv386AAQO4/fbbKSws5KmnnmL9+vWkpKR4I1bRwpjNcPIkROtNlJZqKNG0odgSzYWqGEpoo740sZRYoyixRjtemjaUlmqI1pvIyVGPI4RoPEcyWS2GNBrVJFbnztCxI0RFQWWlFqOROi+dTn2f3ET4V05ODq+99hrRtfva+EBBQQGzZ8/mwIEDDBo0qMH9J02axJtvvsmECRN4++230el0jBkzho0bN9bY79lnn2XKlCm0bdvWR5ELIbwlLS2N/fv3k5mZ6bdz1n7AEqcvo2NVPh2056G0FMrK5IGKEH7gnFSOizNJ8lgEFb03DtK5c2f++c9/ctlll1FUVIRGo+HBBx/kjTfe8MbhRQsVHg6ffapQ+PAs2LIFOnVie/mlvJF7H+04wwT9pwxrs4PIhAS0zl2yFeD0aRg6gvgP3iA8XPprC9/JyMggIyMDi8XS8M4hxp5MjlbvDWpTFDAa9Wg0rttYdDT2ZHJ4uI+DFQA89dRTDB8+HIvFwrlz5+rdt7CwkLVr1zJu3DiX25ctW8a4cePcJsUSEhLIz88nISGBn376qd6KjO3bt7Ns2TLS09N56qmnAJg4cSL9+/dn+vTpbN68GYCdO3eSmZlJRkaGJz9uo9jG8OjYUcPIkV4/vBDCD5xvnDt0AFDgzBmwKig6nfq05cwZ6B5NdLSGCxfU/YcPrzkShRCi+WxJ5bg4BYtF/bphg1qVNWJEoKMTrZ1XElnffvst999/P8XFxfTr14+srCwWLVqE1WolIyMDg8HgjdOIFqjTr1vptPsz6GAAg5E1JcOJpIIKTRS9tEfoEXWCmEgj2tqfTjqUwu7P4Ng46CR/SYXvpKWlkZaWRnFxMXFxcYEOx6vCw+Gzz6Cw0PV2s7mK9eu3MGrUKMLCwlzuEx8vSSx/Wb9+PcuXL2fnzp08/vjjDe7/3nvv8cILL/Cvf/2L3/72tzW2rVmzhoyMDEpKSnj00Uddvj8iIoLY2FiPYlu+fDk6nY7Jkyfb10VGRvLQQw/xzDPPcPLkSVJSUli3bh2HDh0iOTkZgKKiIvR6PUePHmXRokUencuV2mN4jBghN7VChKI6MxWWlalPWnQ6QFG/2qqyomNqVGXJjbVoiQL1QNU5qdy+PRQXqw8wi4okeSyCQ7O6FlqtVp555hluueUWzp07x9SpU/nll19Yu3YtSUlJLFmyhNTUVA4dOuSteEVL4qJf057SbmCxgqIwgD3qftKvSQif6dQJ+vRx/+rcubTe7RddFOifoHWwWCw8/vjj/OEPf2DAgAEevWfGjBnccsst3HvvvXz//ff29Z9//jnvv/8+v/vd7/jjH//olfh27txJr1696iS+hg0bBsCuXbsAmDx5MkeOHGHXrl3s2rWLsWPHkpaWxltvvdWs88sYHkKEvrqTjyhUnjpPpVWPoq2eDEirzmDNGRkrS7QOgejeCy6SysgYqSK4NCuRdc011zB37lxiY2P54osvmDdvHjqdjiuvvJJdu3Zxww03sHfvXq644go++ugjb8UsWopa/ZqqisvZV9YdFCuJnKKDrhC9yYSmtBRKSmq+Sktr9msSQjRbXh688w589x2cPRvoaISzv/71rxw/fpyXX37Z4/fo9Xo+/fRThg8fzh133MG2bdv4/vvvmThxIkOGDOGDDz5w2220sfLz80lMTKyz3rYuLy8PgKioKBISEuwvg8FATExMs8bLkjE8hGgZXFVjnS2O4KilOwdN3ahUwkBDzaosubEWwuucr6sREY4xVEFmrhbBo1ldCzds2MAVV1zBZ599Rrdu3Wpsa9++PV9//TWvvfYaL7zwAg8++CD3339/c04nWppa/ZqOHAvH9Kd2kJvHgJg8rHc/yJa+fevt1iT9moTwnp07YelSdfmxx2DChMDGI1QFBQU8//zzzJo1i44dOzbqvZGRkXz55Zdcc801jBkzhsrKSoYNG8Zjjz2GXu+V0QUAqKioICIiwuX5bdtdqW8WRJuGulXIGB5ChD7nIv2YGDAaFcgvxGiNwQqggMWqxWjVq8msqirIL4TO0eh0GnuRvnR3EqL5nJPKpaVw6pQGiyUGrVZD27ZIl14RFJpVkfXYY4+xadOmOkksZ8888wzff/89CQkJzTmVaKmc+jXtLr1YTUpFRTGw7QmUUaMo7dy5/n5P0q9JCK85cMCx3K9f4OIQNT333HPEx8d7NC6WK7GxscybN4/z589TWlrKvHnzXCadmsNgMGAymeqsNxqN9u1NVV+3irpdkeRpsRChqPbkIyXnTJSUajBqDFirb1cqLJGOmaw1bSgp1VByziRF+kJ4Ue3ramWlY5utrkCusyIYNOtx7DvvvOPRfqNGjbKPjyGEO3v2AFHR0L07Az98HuUSEziN6yKE8K39+x3LffoELg7hcPjwYRYuXMj8+fPt3fNATRCZzWays7OJjY0lPj7e7TGOHTvGhAkT6NOnDxUVFdxzzz3MmjXLq3EmJiaSm5tbZ31+fj4ASUlJXj2fjSdjeMjTYiGCX40ifUWB6bMwbf6ZCWV/g8pKemoO85LhFfRdu6ozWTvNYM0bb4BGI0X6QnhB7euqc3I4LEwBNHKdFUGhWRVZjdFBnUNXCLd271a/hodDz35haqdsIYRfWCxgm5cjORk8nKxO+Fhubi5Wq5U//elPdO/e3f7atm0bWVlZdO/endmzZ7t9f35+PqNHjyYsLIw1a9awZs0aysrKePHFFzl//rzX4hw8eDBZWVkUFxfXWL9t2zb7dm+r/dTYYgGjUYfRKE+LhQhF9iL9C1vps/sz2sRpidSYiNSY6K89QM+IX+kTmU2fyOP0MRynT4dz9Nn9mbq/FOkL0WyuqpxdVWSBXGdF4DUqkbV06VI2b97scltxcbG9C0Ft/+///T+mTZvW+OhEq3H+PNge5vftW/MPpRDC9379Vf1AAtKtMJj079+fzz//vM7r0ksvpUuXLnz++ec89NBDLt9bWFjIjTfeSGlpKd999x2dO3emZ8+erFq1inPnznH77bdTVlbmlTjvvPNOLBYLCxcutK8zmUwsWrSI1NRUUlJSvHIeZ85PjSsr4fBhDefOGTh/vu7TYiFEiHC6k84N62q/Q07S5GOtPa6f3EkL4VWuqpxtiSy93lpjX7nOikBrVCJr0qRJfPDBBy63tWvXjrS0NJfbVq9ezdtvv9346FqoLVu2oNVqeeWVVwIdStA4cQIiI9UPIQMHBjgYIVoh5/Gx+vYNXByipg4dOnDHHXfUeXXo0IE2bdpwxx13MGDAAJfvfe+99zh58iSrV6+mZ8+e9vWDBw/mueeeY/fu3SxZsqTe8y9YsIBXXnmFf/zjHwCsXLmSV155hVdeeYWioiL7fqmpqYwfP56ZM2cyffp0Fi5cyLXXXkt2djZvvPGGF/4laqr91Ni5O5EtISv3uEKEIKc76TzzRfUnsuROWgivcVWNVVXluH7q9XUvpHKdFYHktSmLFEVBkd/gBlmtVqZOncrQoUMDHUpQGTwY1k1azOGlW4g50AWO3gtdugQ6LCFaDUlktTwzZszgjjvu4NJLL62zrV+/fmzYsIEhQ4bUe4x58+Zx/Phx+/crVqxgxYoVANx3333ExcXZty1dupRZs2bx0UcfUVhYyMCBA1m1ahWjRo3y0k/k4OqpcUQElJerH6oVRcbKEiLkON9Jd+hAbpkjkdVZm4fiaqbV6Gi4cEGmLBSimVxdV53Hx6pdkQVynRWB5b25t4VHFi5cSGpqao0n2UKlO7SfPuU74KcdoJ0Q6HCEaFVkoPfQ8uOPPza4j16vd5nEshk4cCCaBm76srOzPY4pMjKS9PR00tPTPX5PU9S613U6v5rIAjAawWCQe1whQkqtO+ncyo7qWBN6PYnJWqxWF7ctcictRLPZrqtlZRATo15DQZ1B1Gq17aNgNNa9jup06vvkOiv8zW+DvYea0tJSXnjhBW666Sbi4+PRaDQsXrzY5b4mk4kZM2aQlJSEwWAgNTWVNWvW1NmvoKCA+fPn89JLL/k4+hC1b5/6NSoKunYNbCxCVMvIyKBfv34tuoqyqgqystTlLl2gTZvAxiNEfTIz6z41BjWRZWPrXig9j4QIEc530jodGI2cM7UBq4KeKi4KK0RrNqt32LVfznfS0jtEiEYzm+HkSfXhT2kplJSoL1siy2oFi0VLaanGvs15n+hoyMmpWcElhK9JRZYb586dY/bs2XTp0oVBgwbV+/R70qRJLF++nClTptCzZ08WL17MmDFjWLt2LVdddZV9v2effZYpU6bQtm1b3/8AoaagAM6cUZf79gWtVp2CSogAS0tLIy0tjeLi4hrdqFoSkwnuukutypIcsgh2f/973WosgIgIxw1sRYWawAKpyhIiJNS+kwYWx/6JQmscZ63t0ZWVoDca3VeROt9JOw+aJ4RoUHg4fPYZFBbWXJ+VBXv2QF6elaSknxg7NpUwNzNyxcdL0xP+JYksNxITE8nPzychIYGffvrJbTXG9u3bWbZsGenp6Tz11FMATJw4kf79+zN9+nT7LI87d+4kMzOTjIwMv/0MoeKrr2DN4goGnBvLmLhNJNTTFUYI4X3R0TBlSqCjEMIzmzfXrcaCmhVZzpMoS88jIUKAiztpDRBf/TKbzWxZv55Ro0a5vZGWO2khmq5TJ/XlrE8fGDsWzGYrX31VQJ8+MrO8CB6SyHIjIiKChISEBvdbvnw5Op2OyZMn29dFRkby0EMP8cwzz3Dy5ElSUlJYt24dhw4dIjk5GYCioiL0ej1Hjx5l0aJFPvs5QsHWrbBhWzgbzt3JsOh9JPTrF+iQhBBCBKmyMoiNrZmsArVHkVZrxWrVUVFRc7uM4SFECKh9J717N3z/PaSkwOWXU9q5M3InLYQQApqQyDpy5AhLly5t1LYjR440PrIQsXPnTnr16kVsbGyN9cOGDQNg165dpKSkMHnyZO655x779ieeeILu3bvz9NNP+zXeYLRnD2A0Eq4x0zsyGySRJYQQwg2nnke1aACNfWDawkJwnuRMeh4JEWJ27YKPPwZAM3t2YGMRQggRVBqdyNq0aRObNm2qs16j0bjdpihKgzMjhar8/HwSExPrrLety8vLAyAqKoqoqCj7doPBQExMTL3jZZlMJky2EWuB4uJiQC2vNrsYTc+2ztW25vLVsQsL4cQJHZqKCnpHZqNrG4O5Qwdw+hl98fM0VzDHBsEdnyexBWPcLVVVFZw9CwkJUqUiQsPnn6uzKtVmNlcxd24Wu3b1JSVFwyOP1J2BU3oeCREafvgBdi5LIun8jVzfZhttk5OhEbOoCiGap7QUTp2CpCQpghTBqVGJrC5durTYhFRTVVRUEBERUWd9ZPVgHRUVFS7f524GRGdz5sxxOcPh6tWrayTFanM1Y6K3ePvY+/a1p6SgD9GVlfQM30u2wcCer7/26Tm9KZhjg+COr77YysvL/RhJ63bkCNx3nzqG0MSJ6kuIYNarl9q1sDazGW677Vf++tdexMTIpMxChLKtW2HFTz2hLIlBhizadu4siSwh/Oinn6B6+GcefVRD+/aBjUeI2hqVyMqWC0gdBoOhRtWUjbF6cA6DwdDkY8+cOZNp06bZvy8uLiYlJYUbbrihTldGUKtY1qxZw+jRo90PhNlEvjr28eNa2oSVotHpGNruJF3HjCFlzBifntMbgjk2CO74PInNVn0ofG//fvXrhQs1u2EJEYoiIy24eLYkhAgxOTlAZSUAyfEVrrPXQgifyc11LHfsiL3bvhDBQm5bmikxMZFc55ZeLT8/H4CkpKQmHzsiIsJltVdYWFi9yYmGtjeHt4+9fz9oYttARHcGTb0F3f/0Rlfr+L78eZormGOD4I6vvtiCNeaW6MABx7IMTyeEECIY5OVYwVxFlNZIbLd4qgIdkBCtTE6OYzkpSanxvRDBQGrvm2nw4MFkZWXVqSDZtm2bfbtwzWKBffsAjZZOXQ1c9NBtap8RIYTf2CqytFppfkIIIQLPaoX8k2ZAoXPYGTQpnQMdkhCtjnOdRmdpgiIISSKrme68804sFgsLFy60rzOZTCxatIjU1FRSUlICGF1wO3oUbEOIDRgQ2FiEaI0qK9V2CNCtG9Qz9J4QIWPXLnjmGRg3DtauDXQ0QojGOn0aLBXqpC9J4WflLlqIALBVYBkM0K5dYGMRwhXpWliPBQsWcOHCBfvMgytXriSnulU//vjjxMXFkZqayvjx45k5cyZnzpyhR48eLFmyhOzsbD788EOvxJGRkUFGRgYWi8UrxwsWu3c7lgcODFwcQrRWR46osxYC9O0b2FhE67NlyxauvPJKZs+ezXPPPee14164oGH1anX5wAG45hqvHVoI4Qd5eYBZHR8rKewcdL4ksAEJ0cpYrdXtEDWPLHO9iWAkiax6zJs3j+PHj9u/X7FiBStWrADgvvvuIy4uDoClS5cya9YsPvroIwoLCxk4cCCrVq1i1KhRXokjLS2NtLQ0iouL7edsCa66Cp7/3RF2byzminA9lPeQkhAh/MjWrRBkfCzhX1arlalTpzJ06FCvH7t3b8W+fOiQ1w8vhPCx3FzsA713DjsNnX8T2ICEaGVOn3Y86ExODmwsQrgjiax6eDpLY2RkJOnp6aSnp/s2oBYmIQHGVq1g7InP4HWgy3swbFigwxKi1XAe6F0qsoQ/LVy4kNTUVIqKirx+7IQEdYKz4mJJZAkRinJzgYhIiGlDUrcwkGE6hPAr54HdpWevCFYyRpYIrH37HMtyJy2EX9kSWTLQe3Dbt28f48eP5+KLLyYqKooOHTowatQoVq5c6dPzlpaW8sILL3DTTTcRHx+PRqNh8eLFbvc3mUzMmDGDpKQkDAYDqamprFmzps5+BQUFzJ8/n5deeskncWs00Lu3unzuHBQU+OQ0QggfycsD2raFlBSSP3wZOnYMdEhCtCrOiSypyBLBShJZInDMZsjKUpe7dIE2bQIbjxCtSFUV5OeryxdfDJGRgY1HuHf8+HFKSkp44IEHePvtt5k1axYAY8eOrTHRiLcVFBQwe/ZsDhw4wKBBgxrcf9KkSbz55ptMmDCBt99+G51Ox5gxY9i4cWON/Z599lmmTJlC27ZtfRS5I5EFUpUlRKgZOBCuvVZtx0lJgY5GiNbn7FnHslRkiWAlXQtFQGzfDlVHculvDCdWZ4ZLLw10SEK41FInW9Dr4fvvITsbSktd76PJzGTYnDloOnaEkSP9Gp9wGDNmDGPGjKmx7rHHHuPyyy/nzTffZPLkyS7fV1hYyNq1axk3bpzL7cuWLWPcuHFER0e73J6QkEB+fj4JCQn89NNP9Y5ntX37dpYtW0Z6ejpPPfUUABMnTqR///5Mnz6dzZs3A7Bz504yMzPJyMho8Odujj59HMsHD8qvrxChZPx49SWECIzJk+Hee9XKrC5dAh2NEK5JRZYIiEWL4E+z4rg2633OVcXJSNMiaKWlpbF//34yMzMDHYrXabVqNZbLWUMVBe2CBXTYswftggWgKC52EoGi0+lISUnhwoULbvd57733uOuuu/j888/rbFuzZg0TJ05kyZIlbt8fERFBQkKCR/EsX74cnU5XI6kWGRnJQw89xJYtWzh58iQA69at49ChQyQnJ5OQkMCnn37K3LlzefDBBz06j6ekIkuIEFZVJdccIQKsTRt11Bc3z7qECDhJZIWAjIwM+vXr55PZnQLBaq0eGstYwUX6Qjroi6QiS4hgs3Urmk2bqDIY0GzaBFu3BjqiVq+srIxz585x9OhR3nrrLb7++muuu+46t/vPmDGDW265hXvvvZfvv//evv7zzz/n/fff53e/+x1//OMfvRLbzp076dWrF7GxsTXWD6uewGPXrl0ATJ48mSNHjrBr1y527drF2LFjSUtL46233vJKHDZduzq6yx486NVDCyF8bdUq+J//gbvvhk2bAh2NEEKIICRdC0NAWloaaWlpFBcXExcXF+hwmu3oUSgvByoqGGA4IiNNCxFsFAUyMsBkwhQXR2RZmfr98OHqSNoiIJ588kn+9re/AaDVahk3bhwLFixwu79er+fTTz/lpptu4o477uC7776jtLSUiRMnMmTIED744AM0Xvr/zM/PJzExsc5627q8vDwAoqKiiIqKsm83GAzExMR4fbwsrRZ69oQ9e9QZ0EpLISbGq6cQQviA2Qy6EzlojUb1A6Ncc4QQQrggiSzhd3v2oJZlmSoZGHcYevSQkaaF8KOKCnj6abVkfNgwGDKk1g5bt8KGDShxcWCxoMTFodmwQV0/YkRAYhYwZcoU7rzzTvLy8vjss8+wWCxUVlbW+57IyEi+/PJLrrnmGsaMGUNlZSXDhg3jscceQ6/33keAiooKIiIiXJ7ftt2V+mZBtGnqOHXjx8ONN6rdDF2EJoQIQp9/Dm/NvY3E8n78udNHjEhJCXRIQrQqx4/DJ5+osxUOG1ZzzEkhgol0LRR+t3s3YDQCilqRJeNjCeFXWVlqb40PPoCvvqq10akayz4wQnS0+n1GhoxbEkB9+vTh+uuvZ+LEiaxatYrS0lJuu+02lAb+T2JjY5k3bx7nz5+ntLSUefPmuUw6NYfBYMBkMtVZbzQa7dubqqnj1I0ZA/fcA5ddBmFhTT69EMKPcnLAbLRwojKBcJ0FXFR6CiF859Ah+Pe/4Z13YNu2QEcjhHuSyBJ+t2cPoNOij4+lz6U6GDAg0CEJ0ars3+9YrpNHrq7Gom1bR5cOjUb93laVJYLCnXfeSWZmJllZWfXud+zYMSZMmECfPn3o2rUr99xzD+fPn/dqLImJieTn59dZb1uXlJTk1fMJIVqmvFwFqitNk5NRp9gVQvhNTo5juXPnwMUhREMkkSX8qrhYLVklIpI+13Um/F8fw+23BzosIVqVAwccy337Om1wrsYymdAUF6OpqlK3SVVW0LF11ysqKnK7T35+PqNHjyYsLIw1a9awZs0aysrKePHFF72azBo8eDBZWVkUFxfXWL+t+nHu4MGDvXYuIUTLlZttBqsVvcbCRT1iG36DEMKrnBNZycmBi0OIhkgiS/jVnj2O5YEDAxeHEK2ZLZGl18MllzhtcK7GOnsW8vIwnD2rbpOqrIA5c+ZMnXVms5mlS5diMBjo56Z7dmFhITfeeCOlpaV89913dO7cmZ49e7Jq1SrOnTvH7bffTllZmVdivPPOO7FYLCxcuNC+zmQysWjRIlJTU0kJ0Dg35eWwcycsW6bmYYUQwUtRqhNZQGLYObQpchcthL9nr8/NdSxLRZYIZlKvGwKaOtBtMNq927EsPQqF8L/ycsjOVpd79oTw8OoNztVY7dtDQgJUVFBVWem4UERHw4ULMoOhnz3yyCMUFxczatQokpOTOXXqFB9//DEHDx7kL3/5CzFupuN77733OHnyJD/++CM9e/a0rx88eDDPPfccL7/8MkuWLOHRRx91e+4FCxZw4cIF+6yDK1euJKf6ce3jjz9un0k3NTWV8ePHM3PmTM6cOUOPHj1YsmQJ2dnZfPjhh976p2i0N96AVavU5YEDZUhGIbZs2cKVV17J7Nmzee655wIdTg1FRVBerFYBJ4WdBRnoXQi/z15vq8iKi5PZfkVwk0RWCPD3HzBfat8e+lxSSdYRHQMG6AIdjhCtzqFDjp6BNboVOldjabXQti1KXByVRUXY5xStXZUlMxj6xd13382HH37I+++/T0FBAW3atOHyyy9n7ty5jB071u37ZsyYwR133MGll15aZ1u/fv3YsGEDQ+pMWVnTvHnzOH78uP37FStWsGLFCgDuu+++GtekpUuXMmvWLD766CMKCwsZOHAgq1atYtSoUY39kb2md29HIuvgQUlkidbNarUydepUv1V2NFZeHmCuHh8r7Cx0rvu3SwjhO5WVYCsCl26FIthJIkv41V13wV2l/6Ti8IdE/t9F8MLz6pRSQgi/cDk+lnM1VocO9R9AqrL87p577uGee+5p9Pv0er3LJJbNwIED0TTw/5dtK9/zQGRkJOnp6aSnp3v8Hl/r3duxfPBg4OIQIhgsXLiQ1NTUesfVC6ScHBwDvYefgc43BjYgIVqZ/HzHw07pViiCnYyRJfxv/34MWhOanJPQrl2goxGiVXE5Y6GrmQrdkbGyRAhxTmQdOhS4OIRojNLSUgDGjRtHfHw8Go2GxYsXu9zXZDIxY8YMkpKSMBgMpKamsmbNmjr7FRQUMH/+fF566SVfht4seXlAx4ugSxeS7rtWuhYK4WcyY6EIJZLIEv5nu5OOjoYuXQIbixCtjK35hYfDxRdTsxorOlr9vrQU6huTT2YwFCEiOtpxL3z4cP2/1kIEi4KCAgCysrIYNGhQvftOmjSJN998kwkTJvD222+j0+kYM2YMGzdurLHfs88+y5QpU2jbtq2vwm623FwgLAyiY0i+9zcQERHokIRoVWSgdxFKpGuh8JuKCjCUnXN0vu7bVx2LRwjhNw89BPv2gdGo3i+wpVY1Vnk5nDwJgCY+3vWNhIyVJUJI797qr3RlJRw/Xp3AFSKIJSQkALB3716ysrLcjmm1fft2li1bRnp6Ok899RQAEydOpH///kyfPp3NmzcDsHPnTjIzM8nIyPDPD9BEkyfDDTeolVndugU6GiFan86d4cYb1cosaYMi2EkiS/jN/fdDZb6GIXmTeSFxIRoZdVcIv7vlFvUFOKqxysrUqWmMRiguBqvVvr+2slJdX7vLoU6nvk/GyhJBrndv+O47dfngQUlkieAX4WEl0vLly9HpdEyePNm+LjIykoceeohnnnmGkydPkpKSwrp16zh06BDJ1aM3FxUVodfrOXr0KIsWLfLJz9AUHTuqLyFEYIwcqb6ECAWSyBJ+UVwM2dnAWQvtTEnqPa8ksoQILLNZLVWJjla7EwKUlDgSWRYL+qoq9wOCR0erj+3MZrWvohBBqE8fx/KhQzBmTOBiEcKbdu7cSa9evYiNja2xftiwYQDs2rWLlJQUJk+eXGPCiCeeeILu3bvz9NNP+zXeBh07Bnv3qv2Be/ZUH7AIIYQQLkgiKwRkZGSQkZGBJYQH99i7t3qhwsgAwxF1WRJZQgRWeDh89hkUFjrW/fGPcPYshIdTtWgRWzZuZNSoUYSFhbk+Rny8JLFEUJOZC0VLlZ+fT2JiYp31tnV5eXkAREVFERUVZd9uMBiIiYmpd7wsk8mEyWSyf19cXAyA2WzGbDa7fI9tvbvtDdH8+CPa6u6P1pdeQrnRMWthc4/dFIE4p6cktqZrKL5gjVsIUZMkskJAWloaaWlpFBcXExcXF+hwmmTPHgAFjBUMbHtYHV/HxYcvIYTvbN8Ol1wC7ds7rezUSX2BmtAqKYHISLjsMujXj9LsbLWkxV0iS4ggFx+v/opHRkLXroGORgjvqaiocNkNMTIy0r7dFXczIDqbM2eOyxkOV69eXSMp5oqrWRMbcu5cJOVLi7j0dBcuDjvOsV9/peSrr7xy7OYKxDk9JbE1nbv4ysvL/RxJcLBY1BEn9JIdECFCflWFX+zejdr9yGJRK7L69ZMxdYTwo+JiePRRdXnUKHjzTRc72aY0BKmYFC3KF19ILla0PAaDoUbVlI3RaLRvb6qZM2cybdo0+/fFxcWkpKRwww031OnKaGM2m1mzZg2jR492X8XrxsqVGt7f3wfKb+SpTkv533vvBaeHt805dlMF4pyektiarqH4bNWHrc2ePeqECwkJMGEC3H13oCMSon6SyBI+Z7VWdy00VdJBX0SCvgAuvTTQYQnRqjh3qXJbDLlvn2NZ2qhoQYLwXkqIZktMTCQ3N7fO+vz8fACSkpKafOyIiAiX1V5hYWENJic82ae206cBcyUAnePKCGvf3uUDz6Ycu7kCcU5PSWxN5y6+YI7Zl3Jy1Hu2vDy1OkuIYKcNdACi5fv1V3VyM2JiGPD7K9B8+IHTtGlCCH84cMCx7LbYyj6YHdC/v0/jEUII0TyDBw8mKyurTgXJtm3b7NtDRd5JC5irAEjuFiZV+0L4WU6OY7lz58DFIYSnJJElfE4dH0s14LIwGDxY/kKKkJGRkUG/fv0YOnRooENpFudeg337uthBURwVWTKGnWjB5EmzaCnuvPNOLBYLCxcutK8zmUwsWrSI1NRUUlJSAhhd4+QeqQAUAJJ61D8GlxDC+5yLO+U2TYQC6VoofG73bsfywIGBi0OIpmgJky2AoyIrMhK6dXOxQ0kJJCVBaanarVCehosWJj0dfvpJrRBetSrQ0QjRsPT0dAoKCgBYuXIlOdUlE48//jhxcXGkpqYyfvx4Zs6cyZkzZ+jRowdLliwhOzubDz/8MJChN1rucXWmuPb6IiK6yYMUIfzNuSIrOTlwcQjhKUlkCZ+zjc2j07mpBBFC+NSFC+qYB6BOQKh1VYsbGwsffQSVlVBU5M/whPCLo0fVF8D58+pshkIEs1deecW+vGLFClasWAHAfffdZ3+wsnTpUmbNmsVHH31EYWEhAwcOZNWqVYwaNSogMTeF0QgF59RqrOSws1IOIkQA2BJZHTuCi+HxhAg6ksgSPrd4MWR9nMnJb/cT8e8wuO46dS50IYRfeDQ+lk14uPopRogWpndvtSIL1AcsI0cGNh4hGlJUVOR2hkCbyMhI0tPTSU9P90kMGRkZZGRkYPFhn9y8PNSHKEBy2BlIGeKzcwkh6iovh8JCdVnyyCJUyBhZwufCw6H/2bXcfORdePNNOHEi0CEJ0ao4J7KkKlK0Vn36OJadZ/EUQriXlpbG/v37yczM9Nk58vJQS4XDwkmOOCd30kL4mXQrFKFIElkhoEUMNm0bRBpq3k0IIXyuURVZQrRQvXs7lg8dClwcQoiacnOBjhdBjx4kvfVnqQoWws9kxkIRiiSRFQL88TTMp8xmyMpSl7t0gTZtAhuPEK2MokBYGERFgctJrLKyYOxYmDkTNm70e3xC+EO3bo5xPySRJUTwCAtTPx6GhUFyF51MNiKEn8mMhSIUyRhZwmesVpg1C3rHnOHy4hQuNRxTZ0MTQvjVvHlqPjk/381A7/v2qX078vKgf3+46iq/xyiEr2m10LMn7N2rPn0uLYWYmEBHJYQYN059Wa2BjkSI1un222HAAPXaOHhwoKMRwjNSkSV8Jjsbvv0W3vnAwOKCW9WV0q9JiICwPfF2ybnrrySbRQvm3L3QVigshAgOWq2bhy1CCJ+KjVUTWLfeCgkJgY5GCM/I5UL4zJ491QtGIwMM1XOey02yEMHHlsjSamve6QvRwjj/esuA70IEiW+/hYkT4Zln1JJJIYQQogGSyBI+s3t39UJFBQMMR9Sb5F69AhqTEKKWigo4Wp1o7tEDDIbAxiOEDznPNSLjZAkRJI4cgf37YfVqKC4OdDRCCCFCgIyRJXxmzx7AakVXWUHfyF/Vm+TIyECHJUSr8uCD6gRQQ4bAPfe42OHgQcfAJFIxKVq4Hj3giSfUyiyZQFeIhmVkZJCRkYHFYvHJ8bOy4KX3RpJc0IabYjdzrYw0LYRfFRbCmjXqIO89esBFFwU6IiE8IxVZwidKS+HXXwGjkV4RJ4jUVsr4WEL42blzakL5hx/gxx/d7LR/v2NZ2qjwoy1btqDVannllVf8ds7wcLj/fhg2TB0TRAhRP1/PnH38OBw6FccPJVdwoioREhN9ch4hWjpNZibD5sxB08i2evgwvPEG/OlPsGyZj4ITwgckkSV8Yu9eUBQgLIwBNyTCjTfC8OGBDkuIVuXAAcey2xyVDPQuAsBqtTJ16lSGDh0a6FCEEAGUl6tApRmA5E4WdWYSIUTjKAraBQvosGcP2gULqm/CPJOT41iWgkgRSqRrofAJ+/hYYWEMfOAyuOmygMYjRGvknMjq29fNTraBdSMi4JJLfB6TEAALFy4kNTWVoqKiQIcihAig3GMmsKrdFpO7ym2JEE2ydSuaTZuoMhjQb9oEW7fCiBEevdU5kZWc7KP4hPABqcgSPuE86cyAAYGLQ4jWrMFeg4WFkJenLvftCzqdX+ISjZOZmcljjz3GpZdeSnR0NF26dOGuu+4iKyvLp+ctLS3lhRde4KabbiI+Ph6NRsPixYvd7m8ymZgxYwZJSUkYDAZSU1NZs2ZNnf0KCgqYP38+L730kg+jd89iUa9Ry5fDN98EJAQhRLXcrDL7cnKv6ABGIkSIUhTIyACTCVNcHJhM6vceVmXl5jqWpSJLhBJ59CG8zmqtHugdiI+HpKTAxiNEa6Qojoqs2Fg37bBNG1iyRO1eGBfn1/iE5+bOncumTZsYP348AwcO5NSpUyxYsIAhQ4awdetW+vfv75PzFhQUMHv2bLp06cKgQYP40e1Aa6pJkyaxfPlypkyZQs+ePVm8eDFjxoxh7dq1XHXVVfb9nn32WaZMmULbtm19EndDjEaYNEld7t8fbropIGEIIYDcE1UARGmNxPaQUaaFaLStW2HDBpS4OLBYUOLi0GzY4HFVlq0iS6uFTp18HKsQXiSJrBDg6xljvM1igcceg92bSoi2lqAxxctshUL42dmzUFCgLvftCxqNi530enVcLBkbK6hNmzaNTz75hPDwcPu6u+++mwEDBvD666/zz3/+0+X7CgsLWbt2LePGjXO5fdmyZYwbN47oaNdVEAkJCeTn55OQkMBPP/1U73hW27dvZ9myZaSnp/PUU08BMHHiRPr378/06dPZvHkzADt37iQzM5OMjAyPfnZfiI5Wnzrn5KiD3Fqt6gd4IYR/Wa1w6pR6cUoOO4smRcpBhGgUp2os2reH4mL1IldUpK4fPtzNB0DH222JrMRE9WOhEKFCPrqFAF/PGONtYWHwv/8LL/X/F9M3joVRo2D9+kCHJUSr4tH4WCIkjBw5skYSC6Bnz55ceumlHHD+j67lvffe46677uLzzz+vs23NmjVMnDiRJUuWuH1/REQECQkJHsW4fPlydDodkydPtq+LjIzkoYceYsuWLZw8eRKAdevWcejQIZKTk0lISODTTz9l7ty5PPjggx6dx1t691a/mkyQne3XUwshqp05A1VGtSIrOfyM9GsSorGqq7Fo29aRsNJo1O9tVVn1KC6GsurevTI+lgg1ksgSvmMboMdqhS5dAhuLEK1Mg+NjiZCmKAqnT5+mQ4cObveZMWMGt9xyC/feey/ff/+9ff3nn3/O+++/z+9+9zv++Mc/eiWenTt30qtXL2JjY2usHzZsGAC7du0CYPLkyRw5coRdu3axa9cuxo4dS1paGm+99ZZX4vBUnz6O5UOH/HpqIUS13FzUG+5OnUi6PEkSWUI0hnM1VnQ0WK3oy8rQlJSo33swVpbMWChCmSSyhO/Y7qSjoyWRJYSfNViRdfYsLF0KO3ZAebnf4hLe8fHHH5Obm8vdd9/tdh+9Xs+nn37K8OHDueOOO9i2bRvff/89EydOZMiQIXzwwQdo6uly0Bj5+fkkJibWWW9bl1c9qUBUVBQJCQn2l8FgICYmxu/jZdkqsgAOHvTrqYUQ1XJzgagoiG9P8sTrwGAIdEhChA7naiyLBc2RI0QWFsK5cx5XZUkiS4Qy6QkrvKqiQh3o/dKLzhJ95oy6sm9fGYBECD977DG4+mo4ehRc9g7buRPeeUddnjxZfYmQcPDgQdLS0hgxYgQPPPBAvftGRkby5Zdfcs011zBmzBgqKysZNmwYjz32GHovDoZRUVFBRESEy/PbtrtS3yyINr4YJ1IqsoTwjC/HaR04EJ58Up08V2a4FqIRnKuxOnRQE1cREWA2q+uMRrWQ4MKFesfK0umgZ081oSWJLBFqJJElvGrPHnj0UdCURvB/xtt5qMN/pF+TEAHQq5f6cmvfPseyj2a9E9536tQpbrnlFuLi4uzjUjUkNjaWefPmce211wIwb9488vPzvRqXwWDAZDLVWW80Gu3bmyotLY20tDSKi4uJq2d2TU1mJsPmzEHTsSOMHFnvMePj1c/+586piSxFqXc8XCFaLU/bX1N066a+hBCN5GpsrLg4KC1VlwsL1dHbnauyXMxgeP316ktR6u2BKERQkjIZ4VW7d6tflQojiWHn1G9kRjQhgo9zIkuSzSGhqKiIm2++mQsXLvDNN9+QlJTk0fuOHTvGhAkT6NOnD127duWee+7h/PnzXo0tMTHRZXLMts7TWJtMUdAuWECHPXvQLljg0SdyW1VWSQl4Oa8nhPDEmTPqMBTFxYGORIjQUXtsLNvq2FgUW1KruFgdo9jDsbI0Guk8I0KP/MoKr7IlsqgwMtBwWF2Wm2QhgovF4hhEKzlZfWIngprRaOS2224jKyuLVatW0c/Dv6v5+fmMHj2asLAw1qxZw5o1aygrK+PFF1/0ajJr8ODBZGVlUVzrhnTbtm327T61dSuaTZuoMhjQbNrU4ExNIONkCRFw33wDEyfCtdfC6tWBjkaI0JCZWbcaC0CrpSoqSl22WtWnNI2YwVCIUCOJLOE1Vivs3Qug0K7qDMlhZ6BdOzcD9AghfGX1ati+vZ6H3EePqk/oQComQ4DFYuHuu+9my5Yt/Otf/2KEi+4BrhQWFnLjjTdSWlrKd999R+fOnenZsyerVq3i3Llz3H777ZTZ5t1upjvvvBOLxcLChQvt60wmE4sWLSI1NZWUlBSvnMclp6fTprg4j54+g1qRlZSk3kN7uceUEKIBZjPs+cnE+apYtan6umpTiJbi73+vU41lY3Zed+GC+tXDqiwhQo2MkSW85uTJ6hvnSjMDwg6pDwn69ZOBR4TwI0WB119X22J8PHz7rYsmaJtRFGR8rBDw5JNP8uWXX3Lbbbdx/vx5/vnPf9bYft9997l833vvvcfJkyf58ccf6dmzp3394MGDee6553j55ZdZsmQJjz76qNtzL1iwgAsXLthnHVy5ciU51dMcPf744/Yxc1JTUxk/fjwzZ87kzJkz9OjRgyVLlpCdnc2HH37YrJ+/QdVjhShxcWCxoMTFoalnTBCbq6+Ga67xbWhCCNdycuDBZTdA2ZXcGreRF2WkaSE8s3lz3WqsatbwcAgPh8pKdUbqykr1exdjZR0/Dn/+szrI+/XXw5gx/v0xhGguSWQJr7F3K7RUMaBzobos1R5C+FV+vqMSq29fN3lkGR8rpOzatQtQk0grV66ss91dImvGjBnccccdXOri73C/fv3YsGEDQ4YMqffc8+bN4/jx4/bvV6xYwYoVK+zndR78eenSpcyaNYuPPvqIwsJCBg4cyKpVqxg1alSDP2OTOY8V0r69+ssfHQ1FRfXO1ATyjEWIQMrNRb3JBjrFlEpZpBCeKiuD2Fh1ZkJnioK2shKiohzbzp5Vr406nfo+p+viiRNw7Jj6cnrWJUTIkESW8Jo9e6oXDFEM/Ntj0HOilLAK4We2oa+gnhyVLZGl1dYcKEgEpR9//LFJ79Pr9S6TWDYDBw5E00A2Jzs72+PzRUZGkp6eTnp6usfvaTZXMzfVHhPEw66YQgj/yTtRBeYqAJKTkMyyEJ6KjnbMTuhEA+iNRoiIUMd7AbV7YXi44305OWq/3vBwNZlcTQoiRSiSRJbwGltFllZbfQNtiA1oPEK0Rs69Bvv2dbFDRQUcOaIu9+gBBoNf4hLC65yrsTp0qLktOlr9AN9AVZZNVZVaHGIbJ1cI4Vu5B0sA9WFncrewwAYjRCj5/HOIiamzuspsZsv69YwaNYqwjz+GNm3UPvTt2zt2io+3J7aqRwkAJJElQpMksoRXlJWp40eDWp4q98YiWG3ZsoUrr7yS2bNn89xzzwU6HK9zrshym8gaM0atypLxsUQoc67GUhQ0RUVEFhaisVjUD+4eVGXt2QNz56rXr8mT4cEH/foTCNFq5R4pty8n9ap7Uy6EcKNXL7VrYW1mM6XHjqkzmbz+eoOHca7ISk72YnxC+IkksoRXnD0LF1+s9rMeMCDQ0QjhmtVqZerUqQwdOjTQofiEojgqsjp0gI4dXewUHw8vvqgu20rPhQg1tauxLBY4dQq9xaKWBbdv71FVVnQ0HDyoLh865N8fQYhQkJGRQUZGBhaLxavHzTuhdivUYaVT33ivHlsI0TBbRVZ4eN2iZiFCgTbQAYiWoVs3+PRTWPv0tzy87Q8wezZkZQU6LCFqWLhwIampqfR1WaoU+nJyHMMmeDSGu1YuASJE1R4bS693jANSUaEmaWuPleVC166Ot0kiS4i60tLS2L9/P5mZmV47pqJAbr4OgMSwc2i7pnjt2EKIhlmtjoqszp3l46AITfJrGwIyMjLo169fSFSRxBzZRfuTu+DLL9VZo4RogtLqbMy4ceOIj49Ho9GwePFil/uaTCZmzJhBUlISBoOB1NRU1qxZU2e/goIC5s+fz0svveTL0AOqwW6FQrQEztVY0dGO9c4DXFVUqF+jo9X9MjJcTj6i0zlmazp5Uu0mL4TwreJiKKtQKySTws/KAD1C+MqJE/Duu/D739eowj93zj5pqHQrFCFLElkhwBdPw3zGNhsayJ20aLKCggIAsrKyGDRoUL37Tpo0iTfffJMJEybw9ttvo9PpGDNmDBs3bqyx37PPPsuUKVNo27atr8IOuAZnLKyslDt1EfoyM+vOVAg1E1nl1ePveFCV5Txxp1RlCeF7ublAUjL06UPy/ddKvyYhfCU9HZYsUWfk+ukn+2qZsVC0BJLIEs1mf8htNsPhw+py164uZ9QQwhMJCQkA7N27l/T0dLf7bd++nWXLljFnzhzS09OZPHkyP/zwA127dmX69On2/Xbu3ElmZiYPP/ywz2MPpHbt1IkItVo3eeTMTHUGm7vugtWr/R2eEN7x97/XrcYCFFeJLGiwKqtPH8eyJLKE8L1z56pz0BotyQPaS78mIXzljjscy198YV90nrFQKrJEqJLB3kWzffUVfPghDEi4wL0lSfSJPA6XXhrosEQIi4iI8Gi/5cuXo9PpmDx5sn1dZGQkDz30EM888wwnT54kJSWFdevWcejQIZKrr9ZFRUXo9XqOHj3KokWLfPIzBMLEierLZAKX/4T79qk38seO+T02Ibxm8+a61VgAej1WnQ4dqF0LFUXdp3ZVVq0ZDCWRJYR/jRoFmzbBqVN18tFCCG8aNUq9/l24AGvXqsO+xMUxeDDMnKlWZjXQ8UGIoCWJLNFsv/yidsE+8YuO28Krn4h7NNK0EM2zc+dOevXqRWytaYiHDRsGwK5du0hJSWHy5Mncc8899u1PPPEE3bt35+mnn3Z7bJPJhMlksn9fXFwMgNlsxmw2u3yPbb277c3RmGNrtWqBZJ31u3ejqa5IsfTu7XqnJp7T3yS2pmsovmCN266sTJ163GisuV5RsIaFqZlcUD+4Gwzqsk6nvs/FDIa2KkarVRJZQvhLeDh06RLoKITwvy1btnDllVcye/ZsnnvuOd+eLCwMbrkFPv5Y/cz31Vdw772kpECKzLEgQpwkskSz7d2rftWaKugXV13pIYks4Qf5+fkkJibWWW9bl5eXB0BUVBRRTt2ODAYDMTEx9Y6XNWfOHJcDw69evbrGsVxxNdi8tzT52IrCyPXrCSsvxxwdzeadO2HXLt+e0w8ktqZzF1+5c7e8YBQd7Zie04kG1EStbUDbCxegqqrm+3Jy1A/ztqkKURe7d4ejR9VixcrKGpuFEN62eTOsWaMOzjN6tGS0RKthtVqZOnWqfyfwuv12NZEFavfCe+6pW9EsRAiSRJZolvJyOHJEXe6h+5UorUl98u08eq4QPlJRUeGyG2JkZKR9uyvuZkB0NnPmTKZNm2b/vri4mJSUFG644YY6FWA2ZrOZNWvWMHr0aMLCwjz4CTzX0LGrqkBf31/03Fx0YWEQF4cyciRjbrml2ecMJImt6RqKz1Z9GLQ+/9zlGIxVZjOZK1cy8uOP0Wg00L8/vPhizZ3i411mqfr0URNZFgv8+qtcwoTwqV27YOVKdblvX0lkiVZj4cKFpKamUuTPmd0vvhgGDlQHfD96FPbvlyFgRIsgiSzRLPv2VT/8tloZYPlFXXnJJW4G6BHCuwwGQ43ufzbG6i5HBlu3oiaIiIhwmSQLCwtrMDnhyT5N5e7YaWnqALp9+8JLL7lIamVlOZ7ADRigJrWaec5gILE1nbv4gjlmAHr1UrsW1mY2U3DsGMq4cdCzJwwdWnMArHrcdRfceKOawGrf3svxCiHsrFZ46ZPeJJ0dR9/IXxklU6aJIPXaa6+xa9cutm/fTmFhIYsWLWLSpEl19jOZTDz//PN89NFHFBYWMnDgQF555RVGjx5dY7+CggLmz5/P1q1bmTJlin9+CJs77lATWYBp+Ur2Gi+lc2fo2FHmWhChSxJZoln27KleMBoZaMhSlyXLL/wkMTGRXOc5hKvl5+cDkJSU5O+QAsJqhYMH1QpJs9lNZda+fY5laaOiBbM+/3yjErUgTUIIfzl9Gv57oDsYE/lNm52McjE8gBDBYO7cuXTp0oVBgwbx448/ut1v0qRJLF++nClTptCzZ08WL17MmDFjWLt2LVdddZV9v2effZYpU6bUO6yFz1x/PcybB+XlHF+5m0f+YwGtjttvh1mz/B+OEN4gOVjRLPZEVmQEA/7yoFoWcv31AY1JtB6DBw8mKyurTleobdu22be3BsePq0ksUCuyXHJOZMkYdkIIIQIgL1eBSnVCieT2RnUwaiGCUFZWFsePHyc9Pd3tPtu3b2fZsmXMmTOH9PR0Jk+ezA8//EDXrl2ZPn26fb+dO3eSmZnJww8/7I/Q64qKghtuACDHkgimSgBayfNe0UJJIks0maI4Ellx7XSk3DYYHnwQUlMDGpdoPe68804sFgsLFy60rzOZTCxatIjU1FRSWsmULAcOOJZdJrIsFsdOSUnQrp1f4hJCCCGc5R4uB6sFgOTkAAcjRD06derU4D7Lly9Hp9MxefJk+7rIyEgeeughtmzZwsmTJwFYt24dhw4dIjk5mYSEBD799FPmzp3Lgw8+6LP467j3Xnj9dXIen2uf0Vd69opQJl0LRZOdPKlOCgUwYIBMgCG8Lz09nYKCAgBWrlxJTk4OAI8//jhxcXGkpqYyfvx4Zs6cyZkzZ+jRowdLliwhOzubDz/80CsxZGRkkJGRgcVi8crxfGH/fseyy2KrEyfUqdhAHQBbiJauqkpN3u7cCRMmqJOQNCAnB37+GQ4dUt8iN9lCeP8amLvfMch1UjeZHlSEtp07d9KrV686kwANGzYMgF27dpGSksLkyZO555577NufeOIJunfvztNPP+2/YC+5BC65hJzXHKskkSVCmSSyRJPt3etYHjgwcHGIluuVV16xL69YsYIVK1YAcN999xEXFwfA0qVLmTVrVo1BNletWsWoUaO8EkNaWhppaWkUFxfbzxlsnBNZLiuyuneHdevUG/tmDIAvRMiYPRu++kpdvvxyjwbBWr0a3ntPXR4wQBJZQoD3r4F5R8vty8m9opt9PCECKT8/n0QX47zZ1uXl5QEQFRVFVFSUfbvBYCAmJqbe8bJMJlONCY1sw2iYzWbMZnOd/W3rXG1zduKEFkVRqw8uushCA7s36tjeFIhzeiqYY4Pgjs+T2DyNWxJZosmuu07tpbT7u9MML9sFB7pAjx4y3oHwmqKiojpPuWqLjIwkPT293jEMWjKLRZ2QENQbb7f/XFFR6g29EK3BoEGORNbPP3uUyHKe4PDQIbj5Zh/FJkQrlnvCUdmV1D8+gJEI0XwVFRUuZ7iOjIy0b3dl8eLFDR57zpw5vPTSS3XWr169ukZSrLY1a9bUe9wdO4ZRVGTAoDexed1GCPM8HdDQsX0hEOf0VDDHBsEdX32xlZeXu93mTBJZoskiImDwYBi847/qY+wlqE/Bx4wJdGhCBERmpoY5c4bRsaOGkSP9c87sbDAa1WUZw12Iapdd5ljesQMmTmzwLb17O5YPHfJBTEIIck/pAAvt9UVEXiwjTYvQZjAYalRN2RirP5gZmlEFP3PmTKZNm2b/vri4mJSUFG644QaXD3nNZjNr1qxh9OjRhLkpKqiqgleeNdO2tIBe5n3cEtsG5ZprGozFk2N7WyDO6algjg2COz5PYqs9iZc7ksgSzec8G5rMYS5aKUWBBQu07NnTgQULtIwY4Z9x4xrsVihEa9S9uzqpQWGhOk6W1Qra+ue3ad8eOnSAc+fURJaiyNiPQniTyQQFmg4QV05ym/MyQI8IeYmJieTm5tZZn5+fD0BSM6YFjIiIcFntFRYWVm9yor7tp0+DYjFBSQmd25xBv+on+2yGnmjo3L4QiHN6Kphjg+COr77YPI1ZZi0UzWe7k46OhlYyS5wQtW3dCps2aTAYqti0ScPWrf45b4MzFh44AM8/D599po5mLURroNE4qrLKyhz9bxtgq8oqLobq+xAhhJfk5QFxbSEpieS7rlK7vAsRwgYPHkxWVladCpJt27bZtweTnBzU+7WwMDqHn1Y/vJ4+HeiwhGgSSWSJJlm3Dv7f/4O9689Tdea8urJfvwafeAvREikKZGSoT5vj4kyYTOr3iuL7cz/2GHzwATz5pJtE1o4d6lhBb7wBmZm+D0iIYOE8JtyOHR69RboXCuE7YWHw29/CsGHSFV60DHfeeScWi4WFCxfa15lMJhYtWkRqaiopQfaA//x50Ok0ENeWzmFn1GrllSsDHZYQTSJdC0WTfPEFbNgAlITx7/COdI04JZ9KRIvkydTjW7eq7SEuTsFiUb9u2KBWZY0Y4dv4oqKqx6ob7GYH6forWqshQxzLP/8Mv/tdg2+pPeC7B0OHCCE81LkzPPtsoKMQwjMLFy7EaDTaZx5cuXIlOdWV7Y8//jhxcXGkpqYyfvx4Zs6cyZkzZ+jRowdLliwhOzubDz/8MJDhuzRmDNx4I5zeoxDz++qHm19+Cb//vRQjiJAjiSzRaIoCe/aoy7EU0SX8lPqN3CSLFqihqcedq7Hat1e7JEVHQ1GRun748ACPs7N3r/o1IgIuuSSAgQjhZ5dcok7jWVzs8ThZUpElhA+Vl6ttsHpGNyGC2bvvvsuJEyfs369YsYIVK1YAcN9999k/Ey5dupRZs2bx0UcfUVhYyMCBA1m1ahWjRo0KSNwN0ekgafBFcNVA2LxZ7fP7009qqaQQIURSr6LRcnLgwgV1uX/EEcdNulRkiVbIVo3Vtq0jYaXRqN9v2IDfxspyqbCwelAS1H6HOl0AgxHCz7RaxzhZxcVw9GiDb0lKgpgYdfngQR/GJkRr9O9/w1VXwc03Q/UYQkIEqz179qAoistXt27d7PtFRkaSnp5Ofn4+RqOR7du3c+ONNwYucE/dfrtj+YsvAhaGEE0liawQkJGRQb9+/Rg6dGigQwEc1VigMLDyJ3UxPh46dQpUSEIEhHM1VnQ0WCxw/nwEVVXq974eK2vlSvjkE9i1S51SuQ7nKQ2lYlK0RpdfDgkJan8KDxK5Gg2MHAn/8z/qZ3yr1Q8xCtFKmLKrZ1A4e1a9SAohAmfUKPWpK8DatWpXAiFCiHQtDAENdW3yN3siq9LMQE31N/36yTzlotVxrsaqrIQTJzSYTGHk5Wno2rVmVZYvxsr6178cuap160Bf+y+6jI8lWru77/ZobCxnr73mo1iEaMUUBW74628JL/kNQ6IOMrdz50CHJERI8GSsVk8UFcGcOepYdUOGwMiRYXDLLfDxx2A2qxMD3Xuvl6IWwvekIks02u7d6lcNCpfekAwXXSTdCkWrU7say7nYo7zc8cDZV1VZZjMcPqwud+3q5uG2JLJEayfdaYUICsXFUFYGhZY2lGjjIAgezAoRCtLS0ti/fz+ZzZx5+sQJ+O47WLwY1q+vXmnrXti3r/SsESFHKrJEo1RUOG6eL+kXQfTb1Y+um/mUQIhQU3tsLL0ekpIUfv1V3X7uHBgMvqvKOnpUTWaBmzyyojgSWXFx6uA/QgghRADkHq8Cs9oHvvNFlVLFL4Sf5eY6lpOTqxcuvhhWrIAuXQISkxDNIRVZolEOHnSMGTJggNMGeeotWihXY9TVrsayiYqCuDiT/fu8PAgP901VlvPwV337utghP98xK8Oll8pNgxBlZXD6tMe7W63qE2whRPPl7jkPqBfBpGS5Hgnhbzk5juUaPXsliSVClCSyRKPs2eP48FEjkSVEC+WqpNvVTIU2bdqY7bOeWSxqMisuzvszGDrPqOayIisyEqZMgdGj4corvXdiIULNmTNw//1wzTXw9tseveXll9UB38eNU/NfQrRW3ppwKO9gsX05qVt4c8MSQjSS20SWECFKElmiUS6+GG64ARI7WRnY1xzocITwO3fVWM4SExXCwtTligr1RtjbVVm2iiyNBnr1crFDfDzcd586sufdd3vnpEKEovh4yM5WS6x27PCoEep0apsFyMrybXhCBDNvjc+Te6TCvty5b5vmhiWEaCSXXQudKQrs3Qu//OK3mIRoDklkiUa56iqF116DlY+sotsDv4EHHoDt2wMdlhB+k5npvhrLRqdTn3bZthcWql0MvVWVVVkJR46oy927q10ahRBu6PUwaJC6fO5czcfSbvTp41g+dMhHcQnRiuSecIylmtQ/PoCRCNE62S598fEuPjdeuAD33AOTJsE77/g5MiGaRhJZomn271fvpvftA638GonW4+9/VyusdDowGuu+Kiu1GI3qvu3aqUUgVqta3VFW5p2qrCNHoEodM9f1+FhCiJqGDHEs79jR4O69ezuWnbvxCiGaJu+0OpZqlNZIXJ/EAEcjROtiMqmzaYObaiznWUR/+QX7zEVCBDHJQIimce7XJHfSohXJyVG7FJaWQklJzVdpqQajUU9pqYaSEjXHGxEBMTHqkFXR0er7zc3sles80LvL8bHOn4c9e9RksxACLr/csfzzzw3u3qOH4xmNVGQJ0TxWK+S36wfdu5N8RSKaizoGOiQhWhXnboUux8fSaOD22x3ff/mlz2MSorn0gQ5AhIbMTA0vvTSc2FgN11xZ6Rg0pGtX9wMFCdECLVniqIaqzWyuYv36LYwaNYow2yBZtcTHq90MmyMlBW66SU1oucwjr1sHr76qlo09/zzcckvzTihEqOvXT214lZUeVWRFRKjddo8ehWPHJCcsRHOcOQNVig4iDSQNMchjdCH8zLlHvcuKLIAxY+Ddd9WnratWwaOPgpvPskIEA0lkiQYpCixYoGX37o7ceaeGMVeWssisoNfgphxEiJbroosgNtb1NrMZjh0rpU+f+q/9lZXNS2alpqovt/btU79aLPV8YhGiFQkLg4ED4aef4NQpdTrRpKR639K7t5rIsljUr0KIpmnfHj7+WK0KcXf9FEK4lpGRQUZGBhaLpeGd3ejUCe66S01oue1I07atOrvv6tXq4K4bNsC11zb5nEL4mjwTEQ3auhU2btQACqWlkHfMhF5T/cf00ksDGpsQoWb9erjjDsdg7T5hS2RptTUH+xGiNWvkOFnOA75nZbmZ2UEI0aCwMPVSdO21cMUVgY5GiNDijZlDe/eG6dPVcdx/85t6dnTuXvjFF00+nxD+IIksUS9FUQenVge3VrBa4dw5xTFYtVRkiRYuIyODfv36MXTo0GYfa+1amDZN7WYxYwaUl3shwNoqKhzlIz16gMHgg5MIEYKcx8mSRJYQ/rN7N/zzn/Djj2qlhxAiOA0dConVkzFs2QKnTwc2HiHqIYksUa+tW9XK0vBwdRxAnQ7yLxjYWjZA/UaqPUQL540nYTZXXgm9eqnLx4/DK680fgbD8+exz4ro0sGD6si6IBWTQjjr318tDdFooKiowd1tbRVqDpQrhGikjRth/nx46qmas5UIIYKLVgtjx6rLigIrVwY2HiHqIYks4ZatGstkUscIAdBqQLFYyTg7HuWSHs0ftVqIViQ8HN54wzE/wurV8K9/Ne4YCxZoGTUK7rlHHeanDlu3QpBElhDOIiLg/ffh++/hL39pcPeYGFiwAP77X3jrLasfAhSiZfrqxyi+Kx7GgYpu6mwlQgi/sVrhwoVGPDgdO1Z94APq7IVWuf6J4CSDvQu3bNVYbds6bph1WivtdRfYUHoZW2PPMiKgEQoRejp3hhdeUMcqAHjzTbVQxNNeugcOaLBa4ddfoUMHFztIIksI9wYPbtTuw4erXzdv1jBnzjA6dtQwcqT3wxKiJXtn4xDOFQ0jXl/Malu3JSGEX5w9q05eHR2t5qiefLKBN3TqBFdfrT59veMOR1JLiCAjFVnCJedqrLAwR0WWIVpLdO8UTDHxZOTe0ehuUUIIdcDb3/1OXa6qUsfLKi5u+H0mk5bjx9XlSy5xUxBpS2RFRMDFF3slXiFaM9vMvXv2dGDBAq1c94RoBJMJzpVEANA5rqT+KX2FEF6Xk6N+LStrRE7qjTfg1VfVMbM0GjSZmQybMweNF4bZEMJbJJElXHKuxjp/3rE+Kgo0YXradoxgw+44tm4NWIhChLQ//QkGDlSX8/PVKq2Gqrfz8mLs+7is4CosdJRP9u2rjmMnhGiWrVth0yYNBkMVmzZp5LonRCPkHSoBq/o0NKljVYCjEaL1cR7jsXNnD9/knPFSFLQLFtBhzx60CxY0fnBXIXxEElmiDudqLJ1O7VcNoNEotG2r/vGKjla3Z2TI3zMhmkKvhzlzIC5O/X7DBvjoo/rfc/JkG/uyy0TWmTOQnKwuS7dCIVzbvBlmz1a7TBQU1LurxQLPPKM+0FEURa57QjRS3i9n7ctJydJFSQh/s1VkQSMSWc62bkWzaRNVBgOaTZuQpzkiWEgiS9ThXI2l10NsrLo+NrbSXuCh0ajbN2yQv2dCNFWnTurMhRqN2s66d69//5wcRyKrb18XO/TuDf/5D3z3Hdx/v3eDFaKl+PlndQDbnBzYsaPeXbdvV69xZjNUVIQRF6fIdU+IRsg96Og33/limSBICH9zTmTZnnV6rKJCfZpTUIApLk6qGERQkUSWqMG5Gis6Wh3KIDkZunVTiAsvRXPqNBRdgCqzVGUJ4QUjRsCzz8I//wmjRtW/b05ODKAmmC+5pJ4d27Z1MxK8EILLL3cs79zpdjfb9VCrVV9Wq4bwcLnuCdEYuUeM9uWk3m3q2VMI4Qu2RJZGA42ea+H++9Uq5qoqtGYzSlycVDGIoCGJLFGDczWWc/foyEjQm0xwoXoMnrJyqcoSwkvuuAOSkurfp7wczpyJAqBnTzcDvQshGjZokJqZArU6yw3b9bBNG8f10GTSyHVPiEbIy3EM/pjUPz6AkQgRmjIyMujXrx9Dhw5t0vttY2R16tTIz46KAufOqQO4arWElZXJ2DIiqEgiS9jVrsaqoaycyMJCsFR/IDFEAvL3TLR8zf0A0RSKAvv311y3YoWGvLxoysrcdCsUQngmOhr69FGXjx51DATpxPl6GBcHMTFq9/rwcEWue0I0Qq75Iog0oNNp6DS4seUgQoi0tDT2799PZhNmDCwthaIidbnR3Qq3boVjx9TuORoN+vJy9aInT3NEkJBElrCzPX2Oi4Nff3X+bK+gOXsGbZUZqszqk+zqlL5UZYmWrjkfIJqivBxmzYKJE9VqblA/NyxapMVo1HPmjMZ+D17DDz/AhAnw2mtw8KBfYhUiZA0Z4lh20b3QuTq5TRvo3FkhNraSiAi57gnhKUWBXEMP6N6dhGv6omsTFeiQhGhVmjRjITie5lRWqhc8QNHroapKqhhE0JBElgAcf6/KyqC4WB3bLzdXfRnPl2MsrsRIJEZLGEaNAaNRg9EIRqM6s2FZmfw9E8IbvvlGfYGa0Dp9Wr1ZPn5cQ9u2JsLD1a6+dfzyCxw6BCtWNDgTmxCBtGXLFrRaLa+88krggnBOZNUa8L3e6uRq8jletBbNqUqurISLL4b4+CZUgwghmq3JMxY6P83p0AGlWzfKbX0T5WmOCBL6QAcggoPZDCdPqjfIZx0zJaPTKZScLgdrNFDdrdAUDiUK4BhEKzpa/WNpNsvYPUI0xx13wMaNsH69Wg4+Y4Z6M2AyQdu2JsrKIvnkE7j55prj2LFvn2O5Xz9/hy2ER6xWK1OnTvVrV12XLrtMbUCKUmecLHdjRTqr/Tl+xAifRyxEQKSlpZGWlkZxcTFxcXGNem9EBCxapC5bLD4ITghRr//5H/jsM7UwoWtXD9/k/DSnQwfHtdJkcuwTHa123cnIgOHD3V8shfAhSWQJQE0+ffaZWgFi687029/ChH674MknUSJNWEpK0AGamDbw+gL1RsBJfLwksYRoLq0WXnwR7rtPnVdh61Y4cwY6dlSwWCAuTmHDBk3Nm2eLBQ4cUJeTkqBdu0CFL0S9Fi5cSGpqKkW2QTsCpU0bddaErCw4fBhKSqBNmzqf3+sjn+OF8JxOF+gIhGh9wsPVqsiLL27Em+RpjggR0rVQ2J0+rfawiIxU74WfnqHQ579/oY91P320WepLf4Q+usPq+t4Kffpgf110UaB/AiFahthYeP110OvVCsmSErBa1Q8TLrs0HT3qeFLWv39gghYtzmuvvcZNN91EfHw8Go2GxYsXu9zPZDIxY8YMkpKSMBgMpKamsmbNmjr7FRQUMH/+fF566SUfR+6hyy9XvyoK7NoFePb53UZ6VwjRgGXL4PbbIS2tZtWwECI4edK33kb62IsAk0SWH02ePJnExERiY2MZMGAAK1euDHRIdooCb73l+P6RRyB6j9MneqNR3aDXq6VX8sldCJ/q1w9uu02dcUanU6uzLlwIx2JxcfPsfINw6aWBCFe0QHPnzuXAgQMMGjSo3v0mTZrEm2++yYQJE3j77bfR6XSMGTOGjRs31tjv2WefZcqUKbStHjg24K6/Hh59FD78EIYPb9Tndxv5HC9EPbKz1T5N27aB1RroaIQQDZGnOSKESCLLj6ZNm0Z2djbFxcX84x//4L777qMgSAZlXrMG9uxRly++GO643ekTfViY4wOIwSCf3IXwA0VRewuGhandDQFKS9VEVp0mKIks4QNZWVkcP36c9PR0t/ts376dZcuWMWfOHNLT05k8eTI//PADXbt2Zfr06fb9du7cSWZmJg8//LA/QvfMoEHw+9+rX8PCGvX53UY+xwvh3ov/HsAjx59hdt4fqLyoMSNNCyGay2yGv/8dvv5a7UHfIHmaI0KMJLL8qE+fPkRERACg0WiorKwk13le1ACprIR333V8P2UK6DKdPtFHREByMopOpyay5JO7ED5nu6lOTlabIIBWqxAR4aIJ2hJZWi307h2okEUL06lTpwb3Wb58OTqdjsmTJ9vXRUZG8tBDD7FlyxZOnjwJwLp16zh06BDJyckkJCTw6aefMnfuXB588EGfxd8YzjP36nTYZ+V1flVWal2ul5l7hXBtd048P5f34buyEYR1bBvocIRoVU6dgr/9TR3/+MMPPXiDPM0RIUYSWW6UlpbywgsveHV8EIBHH30Ug8HA0KFDufbaaxkwYIAPfwrPLFsG+fnqcmoqjBheNyOvtGlDWUICim0QacnAC+Ezzg/FYmLUmWY6doQOHSrs+9ib4DtVKEeOqisvuURNNgvhJzt37qRXr17ExsbWWD9s2DAAdlWPPTV58mSOHDnCrl272LVrF2PHjiUtLY23nPu0B5Bt5t7oaLU7b0lJzVdpqQajUU9pqcbFtpoz9wohwFpZRV5JDABJbcvRaGU2BCH8KSfHsdy5oYJID57maCsrXT/lkac5IkBk1kI3zp07x+zZs+nSpQuDBg3ixx9/dLvvpEmTWL58OVOmTKFnz54sXryYMWPGsHbtWq666qoa+7733nu8++67/Pjjj+zduxdNEExzdMst6gf4lSth6lTQbHOTkddoHH2cZLYKIXym9kMxvR7at1coKnKMMWJvguusbI29lBExe6RbofC7/Px8EhMT66y3rcvLywMgKiqKqKgo+3aDwUBMTEy942WZTCZMTtN9FxcXA2A2mzG7yBjZ1rna5pbVCllZaHfu5JOHOnH+smtd7lZVVcXmzVsYOXIker3rj07x8Wq79FYyq0k/j58Ec2wQ3PF5Elswxt1YZ/eepkpRpypMuqgqwNEI0fo4J7KSkxvYufbTnFo0gN5odH/f6vw0R6awF34iiSw3EhMTyc/PJyEhgZ9++omhQ4e63M82Pkh6ejpPPfUUABMnTqR///5Mnz6dzZs313mPTqfjuuuuY/78+fTs2ZMxY8b49GdpSPv28Oyz6gDvHdor8GJ1KUitJ+x1yNzjQnidczVWhw7176s2QT0ZXd9g+J3/QdNfElnCvyoqKuxd5p1FRkbat7virsLZ2Zw5c1zOcLh69eoaSbHa3FVEuxJWXMzI6nNUdevGsTij2307d4YTJ1a73X7smMenbZTG/Dz+FsyxQXDHV19s5eXlfozEN3J3F2C7zWiwGkQI4XXOo9c02AbDw+Gzz6Cw0OXmKrOZLevXM2rUKMLCwlwfIz5ekljCrySR5UZERAQJCQkN7lff+CDPPPMMJ0+eJCUlxeV7q6qqOHLkiNdibq4OHYAt1aUgMTHw66/qnXJCgloSUptUZYlWICMjg4yMDCwWi1/O1/gJY7RsONiRrf3/IE1Q+J3BYKhRNWVjrJ7p1tCMrq4zZ85k2rRp9u+Li4tJSUnhhhtuqNOVEdQqljVr1jB69Gj3H7Rd0P3733DyJLHFxSRdc43L7rlNPXZzBOKcngrm2CC44/MkNlv1YSjLPVAMxAOQdHFkYIMRohVqVEUWQKdO6ssVs5nSY8egTx91FiIhgoAksprJk/FBUlJSKCoq4r///S9jx44lMjKSzz//nLVr1zJnzpxAhA1AeTnUeKjtXApiNKrfl5bC+fNw0UWuDyJVWaKFS0tLIy0tjeLiYuLi4nx6rsZUY9lIExSBlJiY6HLSkvzqgReTkpKafOyIiAiX1V5hYWH1Jica2l7H0KHqJ36rFe3Bg1B9/fbKsb0gEOf0VDDHBsEdX32xBWvMjZF71FHdmNynTQAjESK0NfWBqi2RpderNQlCtDSSyGomT8cH0Wg0/P3vf+fRRx9FURR69OjBJ598wuDBg90e25fjg2RmanjuOS0PPWRl3DgFvR40W7ei27BBLQstKFB31OlQOnRAqR68T1EUrLUPFheHZsMGLBs3ogwf3uC5PRXqY1wEUjDH11rGB2kKb0wYI1VZwp8GDx7M2rVrKS4urvFAZ9u2bfbtQW/IEPj8c3V5x456E1lCCM/k5Tg+LSYPbB/ASIQIbU15oKoojq6FSUmOIY6FaEkkkdVMno4PEhsby9q1axt1bF+ND2K1wltvXU5eXgwvvABHjuxn8KAzDJk/n4sKC9GYzWirs/6m6Ggs1UktLVB27pzLY0ZcuMCZWbPYMWWK10tCQnWMi2AQzPG19PFBGst5wpiYGLUosvb2ykotRqNTE6tUqyd1EZGUlYWTkaGRqizhV3feeSfz5s1j4cKF9nEiTSYTixYtIjU11W3X+qBy+eWO5R07AheHEC1ITvIwKAAqK0nsL4ksIfypsBBsQ1R61K1QiBAkiaxmCsXxQVau1FBWpiUuDnr3hqefHoq2qhLdm2+iCQtT+zVpNBAWRlR1/IqiYDQaiYyMdD1jRbt2JFutJIwe7bWB/kJ9jItACub4Wsv4II3VwIQxgAajUV+z/ZVWQmk5UE5027bk5BhkwhjhNQsXLsRoNNori1euXElOdV+Fxx9/nLi4OFJTUxk/fjwzZ87kzJkz9OjRgyVLlpCdnc2HH34YyPA9d9FF6if93FzYuxcqK6URCdFMeWVtIU4d/9kQHehohGhdnMfHkskWREsliaxmCrXxQcrLYeFCR8XGU09BRIQWIsLgnXfgwQfVO2qNBv7yF+jSBfBstgpNfDzaaO9/WgnVMS6CQTDH19LHB2msBiaMwWyuYv36LTXb4KvvwM7qCpL33ie+j0Huv4XXvPvuu5w4ccL+/YoVK1ixYgUA9913n72Lw9KlS5k1axYfffQRhYWFDBw4kFWrVjFq1KiAxN0kl1+uJrIqK9Vk1pAhgY5IiJD2pz+pTUoqhIUIjMsvVxNaoVAYLURTSCKrmUJtfJCPPgJb78Crr671WX3pUtDp1NeECXDDDY5tMluFED7XwIQxHDtW6miCigL5ayHygjpI1v90BLlhEF60Z88el9W/tUVGRpKenk56erofovKRIUPgyy/V5Z9/lkSWEM00ZkygIxCi9Ro4EP72N3W5ephjIVocGfqtme68804sFgsLFy60rwvW8UHOnFFzVaDmqv70J6eN69erL1CnS5s82e/xCSEaIT9fna4Q4NJL5bG3EM3hnLjauTNwcQjREmRnw5YtajlII2daE0J4l3w8FC2VVGTVY8GCBVy4cCHg44M0ddrV2t57Tx3+CmD8eHuvQVV1BRkA06apA/UIIYLX3r2O5UsvDVwcQrQESUnQv7/61Yuz7wrRGmm/+QaWLFG/mT8frroqoPEIIYRoeSSRVY958+Zx/Phx+/eBGh+kKdOu1nbwIPz3v+pymzbw8MO1dvjzn2HkSPj+exg9unkBCyF8b98+x3L//oGLQwgf89bDnAYtXuzb4wvRSmTvK0NrSiIp7CwRQdQzQQghRMshiax6ZGdne7RfKIwP8sEHjj7Sf/gDuMyHXXml+hJCBD/nRFa/foGLQwgf88bDHCGE/7y/fgA/Hh8LwH+1ibgZ+lEI4QNGI9x6qzoZ75VXwkMPBToiIXxDElmtxPPPqz0mtmxRuxUKIUKYxQIHDqjLycnqYO9CCCFEEMgtiARAF66jY7JMpSuEP+XmQkGB+urcOdDRCOE7kshqJWJj1aGvzGanSQe3bIGyMrjuOhkJUIhQcuyYY8A7GR9LCO8yGmH3bnXucp0u0NEIEVJ0ZeXklat3zwltjWhlWikh/Covz3FPJ4ks0ZLJ5aWVsSexjEZ47TV4+ml47DEoLw9oXEKIRlAUGDUK4uMlkSWEN/31r3D11fDoo+rgkkKIRlFyiymzGgBIvqgqwNEI0fpUz0sGSCJLtGxSkdWClZZCZaV6r1vHP/4B+fnqsqKAweDX2IQQzdCrF7z5ptp2rdZARyNEy9GxI1RV33z//LMkioVopOJsR/IqOUWelwvhb7m5UpElWge5woSAjIwM+vXrx9ChQxv1vg8/hDvuUHNWtl5IAJw4AR99pC7r9TBjhnQtFCIUaTTS9UkIbxoyxLG8Y0fg4hAiRJXkKvbl5IsjAhiJEK1Tbq5jOTk5cHEI4WuSyAoBaWlp7N+/n8zMzHr3y8zUMGfOMDIzNeTmwrJlao/BDz6AwsLqnRQF3nhDHSwL4P77oWtX3/4AQoSwpiaShRAhqFs3aNdOXd65UyoehWikC6ccD1eS+sQGMBIhWobGfg7NyVGLEyIj3fTKEaKFkERWC6EosGCBlj17OrBggZZ33nHkqiZMgISE6h3XroWtW9XlTp3g978PSLxChApPE8l+YzSqsxYKIbxPo4HLLlOXy8rg8OHAxiNEiDld3s5e5Z88sH2AoxEi9DXmc6jV6hg5JjlZOtyIlk0SWS3E1q2waZMGg6GKtWs1/Oc/6vr4eJg0qXqnigr4y18cb3rySRkbS4gQo1mxQh3o/fe/V2dWE0J41+WXO5Z//jlwcQgRBBpbDZKVMgyld2/o2ZPkHvIZUwh/KiqKsA/zKONjiZZOElktgKJARoY6DlZcnIkLF+DsWXX9//0fREdX7/iPf8Dp0+ryiBFwzTWBClkI0USa/fvVxr57t1o3LoTwLhknSwi7xlYll5SEAxAVG0ZcnC8jE0LUVlDgSB7L+FiipZNZC1uArVthwwaIi1MoKdGjKOqMhW3bwu23V++Une0Y4D0sDP78Z6k3FSIEafbtUxciIuCSSwIbjBB+kJGRQUZGBhZ/dam95BKIjYXiYjWRJeNkCeGxadN+5sorO1FUpJWPmUL4WUJCGa++auXUKS39+wc6GiF8SyqyQpxzNVZUlFpSqtWqn7v1etDa/odjYuDaa9XliROhS5eAxSyEaJqw0lLH4Ad9+8qMhaJV8Ps4dVqtY5ys4mI4dsw/5xWiBdBo1AepPXoEOhIhWp+YGDPXXacwaRJccUWgoxHCtySRFQLqG5/AVo3Vti0UFmqwWDRoNGre6uBBx7judOgAr70Gf/0rPPigX+MXQnhHm5MnHd9cemngAhGipbONk3XxxVBUFNhYhAgRmq+/5tJ//APt/Plw4kSgwxFCCNGCSSIrBLh7Gu1cjRUdXfOzdnKyuj4jQ93P7oorZFwdIUJUG+cbA0lkCeE7t94Ka9bAZ5/VHPxdCOGWZs8eOuzbh2bZMrhwIdDhCCGEaMEkkRXCnKuxNBro1k0hNraSdu3UXFXbtrBhg+KoyhJChLRY50SWDH4ghO/ExkK7doGOQoiQ8s2WON4p/AOfnL+JgqiUQIcjRKuzd297srKgvDzQkQjhe5LIClG1q7FAHdYjNraSTp3UEqxovQnTqUIynslFsSr1HE0I4Q2azEyGzZmDxhdj+SiKoyKrbVtITPT+OYQQQogm2vpLNP+5cAPzT/+OIk3bQIcjRKtSXAyLFvVn4kQdTz4Z6GiE8D1JZIWo2tVYAJSVE30qH8rKAQXN6VO01RSxYbOWrW+sD2C0QrQCioJ2wQI67NmDdsGCWn16m0/z1VfEHT0KZWVqt0KZDkoI//HXjIlChCqzmdwzYWisVrBY5FmLEH6Wk+NYTk4OXBxC+Is+0AGIxnOuxurQwb4Wzdkz6Csq0Jw9A9b2UF5GtBYuWOPI+OUqhity7yuEz2zdimbTJqoMBvSbNqnZ5hEjvHNsRUH77rtojUY0Z85Av37eOa4Qwr3iYliwAHbsQBsVxbCsLDQdO8LIkYGOTIjg89VXHKjswREuoXfVEQy/ePEaKIRo0Nq1GvLzo9DroXPnQEcjhO9JRVYIysx0VY1VBqWlKBotlJRCXh5QPQ1ypwg2bNbJWFlC+IpTdtkUF+dmpoVm2LoVzZEj6rHDwpwz2EIIX4mKgq+/hl9/Rfvddz6rthQi5CkKxgUfkEVPSmjDOaU9ygIvXgOFEPVSFFi2TIvRqOfMGY1UZIlWQRJZIejvf685NhYocOYMWBUUnQ4sVWA0qZti2hDdIdrr99VCCCfVfX2VuDjQaNSvGzbgleyxc5KsXTs1O/3559KYhfA1vR4GDVIfFBUUUBURgcZWbSmEcNi6la+2taeUGHRYOEcHtn5fJm1FCD/ZuhUOHtSg0SiUlcG5c4GOSAjfk0RWCMjIyKBfv34MHToUgM2bXVdjodMBinqDa7WAVYGETmpVVlvv3VcLIZy4mnkhOtp7VVm+TJIJIep32WVw9ixYrVjCw71fbSlEqFMUlAUZ/K38Pqxo0VOFgoaMCxOkKksIP7B9DK2sBJ1OwWqFL76QpidaPklkhYC0tDT2799PZvVMaGVlas7KaASjUcGYX4jREo6RSCrNOoxKJEYlAqMmEqMlDKNR3b+sTD5/C+F1zjMvGI1EFhSgOX1a3fbtt/CXv8CmTbB3rzoSZ2mp543Q10kyIUT9wsLsD4r0lZWSSBaitq1b2fp9GTusg9FRhQaI1ZWxwTJCqrKE8APbx1CNRn2FhanrpOmJlk4Gew9B0dHq52pAvaEt1YCmDVgAJcxRqlWphXMmiIi0vy8nB8xmCA8PSOhCtCy1Zl7QXLiAvqJCfSymKGpjmzMHunWrOdNCfDysXl3zWKtWwfHjakIsLk79mp0NP/4IYWFozpxBb7VCTEzNEksZTFcI31AU+OYb9atOh86WUC4qUtv98OEyg4po3aqrsTIuTMBEJFoUQEOMtpwL1lgyLkxg+IIMNNJWhPAJ54+htmebkZGO553S9ERLJomsEPT55+q9LIoC02fBli3QqRPKyZNYjEZ0Oh2ahAQoKYGhI+CNN+x/xeLjJYklhNc4V2NpNGCxOLZpNGopZGmpWg4ZE+PY5rxs88MPsH6943tFURNZxcXq4zWNhkiLRU1yRUfDhQvyKUW0ChkZGWRkZGBxbl/+sHWrWk0ZFQVmMxqLBaqqJJEshE11NdYGywj02iosSgSKohCuraKtpqS6KutjRkhbEcInbB9Do6PV2z5Q7/PkMiVaA0lkhaBevSA2FtiyFXZ/Bh0MYDBi7W7GdDKHyPBwNO11EFGqbr8wTv6KCeFttaqxAJT4eMo0GtpERaGxWtWb3rNn1eTTuHFqJUdREVx0Ud3jXbhQ83vnse+cE1WRkdQZ+E7at2jB0tLSSEtLo7i4mLi4OP+c1Ll9x8ZCQQEAGqMR2rSRRLIQNaqxImirLaVcqaLSqiNMU0WYpooLljZSlSWEj9QefUKnU5+nhofL807ROkgiK1S5uIlGr8cUH09EXBwakL9iQvhS7WosUAdj1+kcySZQP1mcOgWjRtWfcJo1S016XbgAhYXw5puOTyNWK1itGMPDidJWD20o7VsI33Fu3+HhEBlJWVUVbdq0QSOJZCFqVGO11ZUQo6sgnmIsFgsajQ6AtjqpyhLCV5wvU1FR0LOnwoULpbRpE4tGo5HLlGjxZLD3UOXqJro2ma5QCN9wNQi7O54Ozt69OwwbBjfcAF26qMmv5GTo3Bm6dEHp2pWqqCjH/tK+hfCN2u07PBylTRsUrdNHJpl0QbRmtaqxorUVLneL1lZgIkJmMBSiGWrPXg/uP4baRrUAuUyJlk8SWaHIFzfRQgjPeZJItmlswknatxCBJQ+KhKhfrWqsepuJvSpLZjAUoilqz14PcpkSAiSRFZoyM313Ey2EqKHOk7DGJJpsGpNw8mWSTAhRP0kkC1E/D6uxbKQqSwjvksuUECpJZIWiv//ddzfRQoga6jwJa0wi2cbThJOvk2RCiPpJIlmI+mVmelSNZSNVWUJ4l9QzCKGSRFYIqFMRsnmzb26ihRAN+/vf1RkFdTowGuu8tJWVLtej06nvqy/h1JibaBtp30J4hySShWiQsvDvZBT+DpMSTjTlYFWcXtZaX9VXNOWYlHAyCn8nVVlCNJPUMwihkkRWCKhTEeKrm2ghRMNyctRPBaWlUFJS46UpLUVvNKJxsY3SUvV9OTlgNtc9ru0mWtq3EIEhiWQhGpR5sI1ajaUpQqNYwOr8sqJRrNWJLMd6jWKhraZIrcraH+v6GiiE8IjUMwih0gc6ANEEtpvoWjSg3kS7+8vmfBMdHu7bGIVoqZYsgaoql5uqzGa2rF/PqFGjCAsLc/3++HjX7c9shpMnpX0LEQjOieSYGDVBXGu7PZFcuw06J5KHD/f87kKIEPT37q9SdrI9MR2qqNVKUBSF8rJyoqKj6lyrdEDZOT0Zl7zJ8LBwpJUI0TRlZRAb6/IyRWWlVi5TotWQRFYIUaorLYo/+kj9oF2L2Wxm06ZNXHnlle5votu1c1RxNILZbKa8vJzi4mL3x/ayQJzTU8EcGwR3fJ7EVlxcDDh+54OBvf1FRqqfIFwwm82ciY+nOCmp/n/36p+vjg8/hAsX3B7bV+3bnVD/PQqUYI4NGo4vGNsfOLVBN+2nWf/ulZXw669gMLhsnwpQUVGBoiiub8ANBsjOhoICryWSg/n3KJhjg+COL9SvgdmnzBhiyil2eZlRqDCbqTJVgouWYoipJPsUFBRUNrqZyOfQmiS2pgv1a6DBUOzmY6Riv065bH/NuExJ+6spmGOD4I7Pm9dASWSFkJKSEgBSrrkmwJEI4R8lJSXExcUFOgzAqf2lpAQ4EiH8I5jaH4RAG8zJgY4dAx2FaEGCqQ3a2t/27c1rf9JMRKgIpvYHjjaYl9f0NijtT4SShtqgRgm2dLNwy2q1kpeXR5s2bVx2LyouLiYlJYWTJ08S66ZipKl8eexgOqengjk2CO74PIlNURRKSkpISkpCqw2Oofwaan8gbdCfJLamayi+YGx/INfAYBLMsUFwxyfXwKaRNliTxNZ0cg1sPGl/NQVzbBDc8XnzGigVWSFEq9XSuXPnBveLjY312S+tL48dTOf0VDDHBsEdX0OxBdNTMPC8/YG0QX+S2JquvviCrf2BXAODUTDHBsEdn1wDm0baYE0SW9PJNbDxpP3VFMyxQXDH541rYPCkmYUQQgghhBBCCCGEqIcksoQQQgghhBBCCCFESJBEVgsSERHBCy+8QEREREgdO5jO6algjg2CO75gjq25pA36j8TWdMEeX1NJ+/OfYI4Ngju+YI6tuaQN+o/E1nTBHl9TSfvzn2CODYI7Pm/GJoO9CyGEEEIIIYQQQoiQIBVZQgghhBBCCCGEECIkSCJLCCGEEEIIIYQQQoQESWQJIYQQQgghhBBCiJCgD3QAwnNWq5UjR45gMpmQoc1ar6ioKC666KJAh+FTiqJQUlJCUlISWm1w5NutViu5ubmUl5djMpkCHY7wAY1GQ3x8PG3atAl0KAEVjO0P1DZ4+PBhaX8BFhYWRkJCAjqdLtChtFjB2AZt18CysjIqKysDHY4QLsXGxhIfH9+sYwRj+xNC1CWJrBCyZ88errzySjp16oRGowl0OCJAysrKOHXqVKDD8IuTJ0/SuXPnQIcBQF5eHl26dCE5OZnIyMhAhyN8QFEUzp49S0lJSaBDCQrB1P4AfvnlF0aNGsVFF10k18AAMpvNnDx5Uh6o+UEwtcHc3Fy6dOlC586dg3ImLCEAioqKOHfunFeOFUztTwhRlySyAmDLli1ceeWVzJ49m+eee87j9x09epQBAwawZMkSYmNj62y3WCxkZ2fTrVs3rz8p9eWxg+mcngpUbIqisHnzZnbu3Mmf//xntzdzZrOZ1atXc8MNNxAWFua3+DzhSWzFxcWkpKQEVWWMXq+nY8eOZGRkkJqa6nIfaYP+44vYzGYzy5YtIykpidtuu61ZxwnW9gcNxxeM7Q/g8OHDDBo0iEWLFrmMTdqff/z666/85z//oXPnztx7770h+TseSKF6DdRoNHTq1Im//vWvXH755S73kTboPxJbXYqi8M0333D27Fn+7//+z+1+oXoNFELUJIksP7NarUydOpWhQ4c2+r1VVVXExMTQtWtXl0/DLBYLJSUlPin59+Wxg+mcngpkbD169GDPnj3ExMS4PbfZbCYqKorY2Nig/BDvaWzBVHVRVVWFXq+nZ8+eJCQkuNxH2qD/+Cq2xMRE9Hq9y4cFngrm9geexxdM7Q/UNhgbG0uXLl3kGhhAERERfP3114SHh4f873gghPI1MCwsjB49esg1MAhIbK5dfPHFnD9/vt5reKheA4UQNUkiy88WLlxIamoqRUVFgQ5FCCGEEEIIIYQQIqTICHZulJaW8sILL3DTTTcRHx+PRqNh8eLFLvc1mUzMmDGDpKQkDAYDqamprFmzps5+BQUFzJ8/n5deesmrsb777rv2Kq033njDK8csKCjgoosuIjs72yvHc3b11VczZcoUt9/74hyh6p577uEvf/lLoMMQDZA22PA5gpG0r5Yh1NpfKJE2IjwhbdBz3ro2Xnvttbz22mvND8hHJk6cyLRp0xr1nsb+28jfJyFaN0lkuXHu3Dlmz57NgQMHGDRoUL37Tpo0iTfffJMJEybw9ttvo9PpGDNmDBs3bqyx37PPPsuUKVNo27at1+L85ZdfmDZtGu+//z7Z2dk8/vjjXjnuq6++yu233063bt3s686ePcvUqVPp0aMHkZGRdOrUiSuvvJL333+f8vJyr5w3FKxfv57bb7+dUaNGodfr+eKLL1zul5GRQbdu3YiMjCQ1NZXt27fXOc5tt91GUlISGo3G5XGee+45Xn31VangC2LSBv1vzpw5DB8+nMsvv5zExETuuOMODh06VGMfaV+tg7/a3+9//3see+yxGvssX76cyMjIRt1IhUKC15m0kZbl6quvJjIykpiYGGJiYrj55pubfcxAtsGWaNKkSWg0Gvurffv23HTTTezevbvGfsuXL+eJJ56o856wsDC6d+/O9OnTMRqNXo2tMX+/3nnnnUY/uF+xYgUvv/yyx/vL3ychWjdJZLmRmJhIfn4+x48fJz093e1+27dvZ9myZcyZM4f09HQmT57MDz/8QNeuXZk+fbp9v507d5KZmcnDDz/s1ThXrVrFsGHDGDNmDImJiRgMhmYfs7y8nA8//JCHHnrIvu7YsWOMGzeONWvW8Nprr7Fz5062bNnC9OnTWbVqFd99912zzxsKKisrKSsrY+DAgcyaNcvtfp9++inTpk3jhRdeYMeOHQwaNIgbb7yRM2fO2PcpKytj0KBBZGRkuD1O//79ueSSS/jnP//p1Z9DeI+0Qf+qrKxk3bp1/PGPf2TZsmV88803mM1mbrjhBsrKyuz7SftqHfzV/mr74IMPmDBhAu+//z5PPvlks8/pb5WVlR7tJ22k5fnggw8oLS2ltLSUr7/+utnHC1QbbMluuukm8vPzyc/P5/vvv0ev13PrrbfW2Cc+Pp7o6Og67zl27BhvvfUWf/vb33jhhRf8Hbr9b0vbtm0bPVh6fHx8o94jf5+EaN0kkeVGRESE28EsnS1fvhydTsfkyZPt6yIjI3nooYfYsmULJ0+eBGDdunUcOnSI5ORkEhIS+PTTT5k7dy4PPvhgk2Ps0aMHzz33HJs3b0aj0TBp0qQmH8vZV199RUREBMOHD7eve+yxx9Dr9Wzbto277rqLvn37cvHFF3P77bfz3//+t8YMX9988w1XXXUVbdu2pX379tx6660cPXrU4/Pb3t++fXuGDx/O2LFja7y/pKSECRMmEB0dTWJiIm+99ZbLp0RWq5Xp06cTHx9PQkICL774Yp3tc+bMoXv37hgMBgYNGsTy5ctr7HP11Vfz2GOPMWXKFDp06MCNN97IzTffzMsvv8zo0aPd/gxvvvkmDz/8MA8++CD9+vXjr3/9K1FRUfzjH/+w73PzzTfzyiuv8Nvf/rbef4/bbruNZcuWNfCvJgJB2mBg2uA333zDAw88QM+ePRk0aBCLFy/mxIkT/Pzzz/b3SftqOTIyMujXrx9//vOfa6z3Z/tz9sYbb/D444+zbNmyGtfwhtrdpEmTWLduHW+//ba9eiI7O5vly5czYMAADAYD7du35/rrr7cnZa+77jrefvvtGucfPHhwjbbUrVs35s+fX+8+rtqRJzGDtBHhXqDaoCsNtYOrr76aP/3pT/Vek2q3h7Fjx3LixIka+1itVt544w169OhBREQEXbp04dVXX7Vva+iaVtt///tf4uLi+Pjjj+3rbPcgCQkJDB48mKeffpqTJ09y9uxZ+z61uxba3pOSksIdd9zB9ddfX2eYE0/au7u/R+7+frn721K7a6Ennxucv/fk/wvk75MQrZkksppp586d9OrVq87sGMOGDQNg165dAEyePJkjR46wa9cudu3axdixY0lLS+Ott95ye2yTyURxcbH9Zes6ZLFYsFgsbNiwgYsvvpi5c+eSk5PDO++8A6gXUts+TXmtX7+eIUOG2L8/c+YMa9as4Xe/+x0Gg8Hle5zPWVJSwpQpU9i2bRurV69Go9Hw29/+FrPZjMViQVEUFEWx71/7e9v7t2zZwqJFi+q8f+rUqWzatInPP/+cb775hvXr17Njx446x1yyZAkGg4HNmzfz+uuvM3v2bL755hv7Pq+++ipLly4lIyOD3bt388QTT3Dffffxww8/1DmOXq9n/fr1ZGRk2H9em9r/3hUVFfz8889ce+21NY5z3XXXsXnzZpf/fvX9v11++eVs376d8vLyGuvNZnO9L6DBfQL18iQ2X9myZQtarZZXXnml2cfavHkzF198Menp6eTn57NgwQIvRAgbNmyoMb15QUGBvQ06P4F15jy7TllZGdOmTeOnn37i+++/R6vV8tvf/rbG7219bO/ftm0bixYtqvP+adOmsWnTJr788kvWrFnDhg0b2LFjR53jLFmyhOjoaLZt28Ybb7zB7Nmza3ywnjNnDkuXLuWvf/0r+/btY+rUqdx3332sW7euznHCw8PZtGkTf/3rX+ucx9atID4+3qOfz9mwYcPYvn07JpOp0e8V/pGWlsb+/fvrVEf7q/05e/rpp3n55ZdZtWpVnSRpQ+3u7bffZsSIETz88MP2aouwsDDuvfdefv/733PgwAF+/PFHxo0bh6IoXvlZnLlqR578rZA2EjivvfaaV8dqBZg6dSodO3Zk9OjRdbqrNVYg2mBzNHRNctUeHn/88RrtYebMmbz++uvMmjWL/fv388knn9CpUyfA82uazSeffMK9997Lxx9/zIQJE1zuU1payj//+U969OhB+/btPfo59+7dy+bNmwkPD6+xvqH2np+f7/bvkau/XykpKfZ/1/qu0eD55wZnDf1/gfx9EqI1k1kLmyk/P5/ExMQ6623r8vLyAIiKiiIqKsq+3WAwEBMTU+94WXPmzKnTv/z666/n6NGjhIWFUVFRQXZ2Nl26dCErK4sZM2Zw/vx5dDodf/zjH7npppvs73vsscfIzMxk+PDhdZ7u1rZ//37i4uI4fPgwoI5/oCgK3bp1q/HkZsSIEfYS4nvvvZennnoKgAEDBgCgKArR0dE888wzjBw5kq+//ppevXpRUVFBYWGh/fi1v7e9H6Bv3748++yz9vcnJyezdOlS0tPT6dKlCwDPPPMM3377bZ1j9uzZk3vvvReA4cOH079/f/7973/TrVs3KisrmTNnDh9++CEXX3wxFouFkSNHcuutt/Lmm2+SlJRkP05KSgp/+MMf7DHZzmGTl5dXY92ZM2ewWCyYTKYa68PCwjh+/Hid97s7jk1VVRWVlZVs2bKF5ORkTpw4wYkTJ/jqq68anNbY3QfZYFBfbL4a78lqtTJ16lSGDh3qlePFxMSQnZ3NVVddhdls5u677yY3N5eoqChmzZrF+PHjATh58iT3338/Z86cQa/X19jmyvHjx+2/gwBHjhyxt0FnHTp0sI+BkZaWxty5cwH43//93xr7/eMf/6Bjx47s37+f/v37N/hz2d5vS8J+8MEHJCQksH//frp27cqSJUv45JNPuO666wBYtGhRjXhtBg4caO/a0LNnTxYsWMD333/P6NGjMZlMvPbaa3z33XeMGDECUKfN3rhxI3/729/4zW9+Yz9Oz5493Q4gbLVamTJlCldeeaVHP1ttSUlJVFZWcurUKbp27dro94vA8bT9Xbhwgeuvv56qqiqqqqp44okn6u3mX7v92axfv57vv/+e77//nmuvvbbO9obaXVxcHOHh4URFRdkrvnfs2EFVVRXjxo2z//7ZroG2hxze4qodefK3QtpI4MydO5cuXbowaNAgfvzxR7f7TZo0ieXLlzNlyhR69uzJ4sWLGTNmDGvXruWqq66y7/fGG2/Qr18/dDod7777LjfffDMHDx5sdBcwG0/boE15eTl9+/Zl/PjxzJs3z+1x3bXB5qrvmgR124PztW/QoEGUlJTw9ttvs2DBAh544AEALrnkEq666qpGXdNArTR99tlnWblyZZ1tq1atIiYmBlCTT4mJiaxatQqt1n39ge09VVVVmEwmtFptncRiQ+09Pz/f7d8joM7fL5vaf1tq/+0qKSnx+HODs4b+v0D+PgnRmkkiq5kqKiqIiIiosz4yMtK+3RV3T9WczZw5s0ZZ7ooVK/j444+55JJLiIiIYOvWrQCMGTOG4uJiMjIyaNOmDdHR0QwfPpzf//739uqNmTNnUlJSwkcffUTPnj3rPa9Wq6VTp072/c6fP2/fdskll9gvpNu3b8dqtXL//fcTHR1t3//w4cO8+OKLbN++nXPnztmf9Gg0Gnr27InBYKBdu3b2/Wt/7/x+5zGlbKXMZrOZsWPH2hNZAH369KlzzEsvvbTGz9qtWzeqqqro2bMn+/bto6Kios7NTGVlJYMHD65xnIEDB9b5N7NarfakXlJSUo3ttn/zlJSUGuvbtWtHRESE23//2sepzfZ/Ul5eTpcuXRgzZozbRJbZbGbNmjWMHj2asLAwt8cMBE9iKy4u9sm5Fy5cSGpqqtcGBrU9zR4wYADFxcW8+eabREdH06ZNG/uYIdHR0ej1eubPn8/gwYM5deoUl19+uX2bKxUVFfa/IfWxtcEJEybUeBp5+PBhnn/+ebZt21ajDZ44ccKjZI/z+53b4IkTJ+zVgLaqU4C4uDh69+5d5zgDBw6s8X1iYqL9eEeOHKG8vLxOF93Kykouu+yyGuvqezL/+OOPs3fv3jqTa3jKNp5Laxosv6XwtP21adOG9evXE/X/2bvv8Ciq9YHj39lN35CEEEroJVSlKRBURKWp2BHsYvtdLLFdrmK5io0rKoqNeO39oiJ2xAIIgoFAUEA0CgLSO6mbZJPN7vz+ONmSZNO3Ju/nefbJ7Mzs7EmyZ3f2nfe8JyaGoqIijj/+eCZNmlRjdkNN/a9v377OGY1HjBjh/KLp0Jh+N3jwYMaOHcvAgQM588wzmTBhApMnT6Z169ZN+dN45Kkf1afN0kcCZ+vWrfTu3Zv169fXeAHGUat1zpw5zguKU6dO5fjjj2fGjBmsXr3aua/7+/aMGTN48803yczMrLVUQm3q2wcd/vOf/9RruGB9PwMbqrbPJKi9PwwePJg//viD0tJSZzDGXUM+0xYuXMjhw4fJyMjw+H8944wz+O9//wtAbm4uL730EmeffTbr1q2rMVjjeExRURHPPvssYWFh1QJXdfX3xr4f1ZU9t2PHjnqfN7ir6/8F8v4kREsmgawmio6O9pjO6siSaErRy8jIyEpBMkdGl9FoxGg0snnzZlJSUoiLiyMuLo7k5GT++usvOnbsSFJSEvn5+c4hj2PHjmXFihVomlZnFk/btm3Jy8tz7te3b1/nWHiDweBc7wi6xMTEVDruhRdeSLdu3Xjttdfo2LEjdrud448/HpvNhtFodAakHPtXve94/Msvv4zVaqVLly4MHjzY+Xj3v4G7qseMiIiotI/BYEDXdYxGozPA+PXXX9OpU6dqf3f348TGxtb6N3P/m4AKOBmNRo4ePVpp/ZEjR0hOTq7xWFWP4+AIunTo0MH5exuNRsLDw+v8X4aHhwddIMuhtrY5+tSkSZP45ZdfyM3N5a233vJYf6O0tJSZM2fy3nvvkZuby6BBg5g1a1a1k8ljx47x3HPPkZmZ6bVZwzZu3EhKSgomkwmTyUS7du3466+/6NChA0lJSeTk5DjrQTiyNKtu8yQpKYnc3Fzn/ZSUFGcfdNezZ0+g+vvMeeed57EP1rfAs+PxVftgfR/vUPX/q2ma88TZbDYDNfdBdzX9nR577DFWrlzJypUr6dy5c4Pa5uAI1Ldt27ZRjxeBU9/+ZzQanZ+fpaWlzuHsNana/xzat2/PF198wbhx4zjrrLP45ptvKmWyNKbfGY1GlixZwurVq/n+++958cUX+fe//83atWvp2rWr83PLXdWh1/XZBzz3o/q0WfpI4DiGrNWmtlqt999/P3v27HEOAavK02unIerbB0EFUf7880/OO+88fvvtt1qPW1MfrE19+kFtn0lQvT9YrVYGDx7sPE5t5/QN+UwbOnQov/zyC2+++SbDhg2rVBoAVF9NSUlx3n/99deJj4/ntddeq7Esgvtj3nzzTQYPHlytYH5d/b2296MePXrU+LvX9BndVHX9v0Den4RoyaRGVhM5ZjesyrHOF6nRDhs3bmTw4MHV1v/888/YbLYaT1zqMnToULKzs533HcUe//e//1WaFcyTY8eOsWXLFh544AHGjh1L//79G3QyUvXxvXr1qvT4nj17Eh4eTlZWlnNdfn4+W7dubcBvCAMGDCAyMpLdu3eTkpJS6dbYv5tDREQEJ554IsuWLXOus9vtLFu2zJlu3hC//fYbnTt3JikpqUntCiXHjh0D1NVoT69xd9deey1z587lyiuv5Pnnn8doNDJx4sRqGTr//ve/ufPOO2sdzttQjemD9emf0gdr74O6rnP77bezdOlSlixZUusJdl1aYv9qLhrS//Ly8hg8eDCdO3fm7rvvrvX/XbX/uevWrRs//vgjBw8e5KyzzqKwsBCof7+LiIioNuxG0zROOeUUHnnkETZs2EBERASfffYZoOq+uZ9jFBQU8Pfff1d6fNu2bevcx5P6tln6SHCrb63WvLw8lixZQmlpKWVlZTz77LPk5OSQmprq8bhV67Q6Xuvu9Tw3bNjAoEGDKtVLBVi/fj02m42OHTs6t/3rX/9i1qxZ2O32Omu5Dh48mOzs7ErrHEGqmh6blJTEvn37nPdzc3P5+++/nftXrcdadd3hw4fZsmUL9913H6effjp9+vRxBkkcx+jZsyfR0dEsWbKk2vP37duXyMhIdu7cSY8ePSrd3P8Ouq7Ts2dPli5dyhdffMGtt95aY5vc/64Gg4GioqJKtVUdbfNUe/bee+/lgQcewGw21/j7HT16tNrf1G63M3LkSGbOnMn69euJiIjgk08+wWazER4eTnl5eb3aCzjXd+vWjfDwcNauXevcJycnh61bt9ZYM7eu/5fj9uuvv9K5c2dat27t1TqyQojgJxlZTTRkyBCWL19OQUFBpZOItWvXOrf7iqNovLu8vDxuuOEGXnvttUYf98wzz+S+++4jNzfXmU48b948TjnlFFJTU3n44YcZNGgQBoOBrKws/vzzT2dacevWrWnTpg2vvvoqycnJ7N69m3vvvbfez+3++Hbt2pGZmUl6erpze6tWrbjmmmu4++67SUxMpF27djz00EMYDIZqV7Rq06pVK+666y7++c9/YrfbGTVqFPn5+WRkZBAXF+esfeCJ2Wxmy5Ytzpls/v77bzZu3EhiYqJzuOP06dO55pprGDZsGCNGjOC5556jqKio0gxXZrOZbdu2Oe97Og6ooqcTJkyo9+/WHDjqL/z2229s3bq1yUMqNmzYQFZWVqXXkjc0tA/m5OQwderUOvun9MHa+2BaWhrz58/nhRdeoFWrVhw8eBBQQxUcV8ylfzV/Del/CQkJbNq0iUOHDjFp0iQmT55cY7aLp/7nrkuXLqxYsYIzzjjDOYtmfftd9+7dWbt2LTt37iQ2Npbt27ezbNkyJkyYQLt27Vi7di1Hjhyhf//+AKSmpvK///2PCy64gISEBGbOnFktE3fMmDG8/fbbnHfeeTXu40l92yx9JLjVt1ar1WrlvvvuY8uWLYSHhzNkyBAWL15MfHy8x+N6qtPauXNndu3a5Xyvz8zMZMyYMZXqe+bl5XHVVVfx2GOPOdcvW7aMtm3bomkahw4dIi8vr8Z6oaDKRfz++++sX7/e2b7CwkIKCwtZtGhRpX0TEhJITk5myJAhvPPOOwwZMoS4uDheeOEFNE0jJyeHv/76q1o9VlD1pwwGA3/99Rd2u52EhATmzp1LaWkpBw4cYO7cuQAcPHjQ+bgbbriBGTNmkJOTwwknnEBOTg7btm1j8uTJXHvttdx5550cOHCAE044gcLCQjZs2EBsbCwXXngh4KoLq2kab7zxBlOnTqWwsJD7778fUIHo3Nxc5/lLQUEB//vf/zCbzQwdOrRSLViA7du3U1BQQFFRUaXfbfDgwei6zqOPPsr1119f6+/nqNG6adMmMjMzOeWUU0hMTOTXX3/l8OHDxMXF8ddff9G6dWtWrlzJihUriImJIT4+3uPf1f214Fh/wQUXMH36dIqLi0lMTHTW73Lfx/1Ydf2/HBYvXsyIESOc6/bu3cu2bdtYvHhxja8vh5pqtcowRSFChC7qlJWVpQP6W2+9VW1bZmamDuhz5sxxrrNYLHpKSoqemprq1XZ89NFH+rhx43SLxaLbbDY9JiZGX7RokXN7UVGRfuKJJ+pvv/22x8cvX75cv/jii+v1XCNGjNBffvll5/3y8nL9xx9/1NPS0vQePXro4eHhemxsrD5ixAh9zpw5elFRkXPfJUuW6P3799cjIyP1QYMG6StWrNAB/bPPPtN1XddPO+00/Y477nDuX/W+++P79u2rL1u2rNLjCwoK9CuuuEKPiYnRO3TooM+dO1cfMWKEfu+999Z4TF3X9QsuuEC/5pprnPftdrv+3HPP6X379tXDw8P1tm3b6meeeab+448/1nqc5cuX60C1m/uxdV3XX3zxRb1r1656RESEPmLECD0zM7PBxykpKdHj4+P1NWvWONdt3LhRf+ihh/Ty8nK9JmVlZfrnn3+ul5WV1bhPoNSnbfn5+Tqg5+fn19r/7r77bt1oNOr5+fmV1j/++OM6oO/evVvXdV1/9tlndZPJpLdv315v3769HhUVpcfGxurXXnttvdu9Z88ePTk5Wf/99991Xdcb3ActFot+6qmn6u+++269nk/6YM3H8dRvqr5GGtu/3nvvPf3DDz+s4b9SP8Hc/3S97va5979g8r///U8/++yzG/0Z6HDzzTfrH3/8ca37VO1/U6dO1ceOHVvpfXfv3r1679699ZEjR+r5+fl19jtd1/UtW7boI0eO1KOjo3VAz87O1s8880y9bdu2emRkpN6nTx/9xRdf1HVd9fmsrCz9kksu0ePi4vQuXbrob7/9tj548GD9oYcech4zPz9fv/TSS2vdx1M/0vW63ys89ZGcnBz9wQcf1F955ZWQfY0HUkM/A3W99vPQnj176meffXa19du3b9cB/dlnn21UOy0Wi56fn++8/frrr3rnzp317Oxsvby8XC8rK9NjYmL0L774Qi8vL9fLy8v1wsJC/cQTT9TffPNN57ry8nJ9xowZeufOnfVu3brpbdq00ePi4pznMTXdhg8frr/00kvO+1dffbXH9/Trr79eLy8v13Nycir1lTfffFMfPHiw/uCDD+rl5eX66NGj9dtvv73Sc5x//vn61KlTnfe//fbbSv1h6dKlOqB//PHHzn3Kysr0Rx99VO/WrZseHh6ud+3aVZ81a5ZeXl6uW61Wfe7cuZU+0yZMmKD/8MMPzsdXbcfmzZv1du3a6XfeeadeXl6uT506tdLv16pVK33YsGH6ggULKrV99OjR+tVXX62XlZXpU6dO1c8///xqf8P//Oc/etu2bfX8/HyPv5/js/2TTz5xtmXChAmV3o+ef/555/Gys7P11NRU5/vXtm3bPP5dy8rK9OHDh+u33Xabc11ubq5++eWXO88bnn76aX348OH6jBkzPP5t6vP/MpvNenx8vP7TTz851y1btkx/8skn9bKyshpvRUVF+ueff64XFRV53H706NGg/AwUQlQmgaxavPjii/pjjz2m33zzzTqgT5o0SX/sscf0xx57TM/Ly3PuN2XKFD0sLEy/++679VdeeUU/+eST9bCwsEpfxrzBPZBVld1u1y+99FI9LS2txgBHQwJZixYt0vv376/bbDZd19UJ9R9//FFr8MTb6vucjg+y119/3U8t89/f46WXXtLHjx9faZ0EslzGjRun9+/fv9p6x8nnl19+qeu6+oJ74MAB5+2SSy7R77nnHj03N7fe7a4ayKqqtj5ot9v1yy67rNIXy7pIH6ydN/4envqXBLJCI5BVVW397+DBg3pBQYGu67qel5enH3fccfqvv/5a63OFUv/zJU99RAJZTePtQNZxxx2njxkzptr633//XQcqBWSb4u+//9Y7d+6s//nnnx631+c8VNd1/a233tL/9a9/1fl80gdrF+pt88Z5g6f3pxUrVuhPP/10rY8L1c9AIURlMrSwFk8//TS7du1y3v/000/59NNPAbjqqquc6c7vvvsuDz74YKVi04sWLWL06NF+a2tGRgYLFiygb9++ziFG7733nnPa3HHjxrFp0yaKioro3LkzH3/8ca31ms455xz++usv9u3b1+SaUd62YcMG/vzzT0aMGEF+fj6PPvoooNKWm5vw8HBefPHFQDcjaNV3SEVMTIyz2DOogq2xsbG11ssqLS2tNJGDoz5I1foUDj/99JOzD55wwglomsbbb7/NwIED+emnn/joo48YNGgQn3/+OYBzW03OOuss5xDWLl26OGtOVC106ks1PeeGDRvYsmULw4cPJz8/31l89txzz/X4t/Fn2xrCaDTy3HPPVas3Ul5e3qQaGe71N4JRXe0L1nbXprbPwF27djFt2jRnfZXbbrut1r4Hwf0Z6E/yGRT8kpOT2bdvX7X1/qjV6q6u89CGkj7YvPji3F3en4Ro2SSQVYuqM4TVJCoqijlz5jBnzhyftCM9PZ309HTy8/MZMGCAx31GjRqF1Wrlr7/+onfv3tXqYyxdurTBz+utmd184emnn2bLli3OwuqrVq1qloVo/+///i/QTQhqJSUl1WYDApzTdjtqSFT19ttv13lsT/VBHHVkPM222L59e48Fov/6669at9Vm4sSJWCyWSvtt3769zrZ7W9Xn3L17N48//jg7d+4kPDycAQMG8O6775Kbm9vgmaa83baGcFxscP/7HjhwgKNHjxIbG9vkttVUfyNYhEp9EPfPwJomf6jtM3DEiBHOgtcNEcyfgf4in0HBL5C1Wt3VdR7q4Gn24ZpIH2xevH3uLu9PQrRsEsgKAWlpaaSlpbFgwYImFXFvLoYOHcrPP/8c6GaIIBAdHV0pa8rBYrE4tzfWfffdx/Tp05339+3bx9ixY+natSu9e/f2+Bi73c727dvp1asXBoN3J4X15bEb+py9e/cOeAakr/4eycnJdO7cmYkTJzb6GFarlSVLljB+/HiPQc9Aq6t9BQUFAWhVzRyfgfPnz+f9998PdHOECCqTJ0/m6aef5tVXX3VOelJaWspbb71FamqqZDOJoCDn7kIIb5NAlhAhRq+Yglr4dkhFZGRkpWwvx5d7g8FQ54xg9dmnsXx57GB6zvrydts0TSMsLMwrAajw8PCgDGQ51NS+YG1zWFgYZrOZ3NzcSpknDjabjZKSEoqLi73+evXlsYPpOetj7969AH4LqLdkr776KhaLxTlM/quvvnL+/W+77Tbi4+NJTU1lypQp3HfffRw+fJiUlBTeeecddu7cyRtvvOHV9sj5hwh28hoVouWQQFYIadWqFfv372fp0qU1nsTv3buXgwcP+uQk3lfHDqbnrK9AtU3XdX7//XdMJlPQ/U0CwZ9DKmJiYjCbzaxatYpjx445px93J33Qf3zRNqvVyq5du5z1XURwGTRoEJs2beLFF1/0GGyz2+3s37+fjh07+iQj0lfHDqbnrK+uXbvSunXrQDej2XvxxRfZvXu3834garU6hvZarVbMZjM//vgjhw8f9rivfAb6j7StOl3X2bBhg8fvSEKI5kcCWSEkNTWVP/74g02bNnk8ibfZbGzbto2UlBSfnED46tjB9Jz1Fci2xcTEcPnll/v1OYOVP4dUhIWFUVhYSGFhoTNQVpX0Qf/xRds0TaNXr16MGTPGK8cT3tWxY0cKCgqYNGkSJpOp2nar1crKlSsZPXq017PKfHnsYHrO+ggPDyc5OTnoa8A1B5s3b67Xl3Jf1mp1DO0tKCggPj6evLw8MjMzPe4rn4H+I23zLD4+nkmTJvn1OYUQgSGBrBASFhaGruvceuutHk9srFYrixcvZuLEiT45iffVsYPpOesrmNvWnMyZM4djx44BgR9S4XDjjTfW+MVC+qD/BHPbhG/17t27xs/AHTt20K9fP5/0P18dO5ies75CcWZL4R033XSTfAYGAWmbEKKlk0CWECJozZo1y7kciCEV4BpWYbPZvHI8IYQQQgghhBCNJ4EsIUTQys/Pr3NYhS+HVED1YRVCCCGEEEIIIQInuKqHCo/S09MZMGAAw4cPD3RThBAiKGRlacyePYKsrOpF94UQQgghhBDNlwSyQkBaWhrZ2dlkZWUFuilCCBFwug7z5hnYvDmJefMMyGzbQgghhBBCtBwSyBJCCBFSMjMhI0MjOrqcjAyNGibQEkL4iJaVxYjZs9HkAluzJ6MChBBCBCMJZAkhhAgZug7p6VBaCvHxpZSWqvuSlSWEn+g6hnnzSNq8GcO8edL5mjkZFSCEECIYSSBLiEaQ+jxCBEZmJqxaBfHxOpqmfq5ahWRlNWOSERJkMjPRMjIoj45Gy8iQzieEEEIIv5NAlhANJPV5Whb5Eh083LOxTCa1zmRCsrKaOckICSJunbA0Pl46nxBCCCECQgJZQjSQ1OdpWeRLdPBwZGMlJIBWkQypaeq+ZGUJ4QcVnVCPjwdNUz+l8wkhhBDCzySQJUQDSH0eIQLDve9pGuTlaVitKpolWVlC+IGkRAohhBAiSEggS4gGkPo8QgSGezZWfj4cPAiHDpkoKZGsLNE8BV0tRkmJFEIIIUSQkECWEPUkF6OFCIyqfa+42LUtKkr9lL4ompOgq8Xo6QPQQTqfEEIIIfxMAlkhQIpNBwf3i9E2GxQXh2G3y8VoIXytat+zWtX6iAibJIaIZinoajF6ysZykM7XrMk5qBBCiGAkgawQIMWmA6/qxeijRzVycqLYt0+Ti9FC+FBt2ViRkbZK+0pfFM1B0NVidG9QTIznfaTzNVtyDiqEECIYSSBLiHqoejE6PFytLy5GsrKE8KGqfa+2QJYkhojmIOhqMToaZDLB9u1gNlffRzqfEEIIIfxIAllC1MFTaRCb2/fnoiK5GN2cybCKwPHU90pKXNsjImzVHiN9UYSyoKvF6GiQxQJHj0J5OezZg+YpmCWdTwghhBB+IoEsIeqwZg18+606R3eUBnGvdVtUJBejmzMZVhE4VbOx7Hb1fRogMhIMHj7BpC+KUBZ0EwM6GlRWpoJYAFFR6FULvoN0PiGEEEL4jQSyhKhFeTnccgvk57suRgPExLiuNjsuTMvFaCG8x5EIUlQERqMKYBUWxqMBNgAAipdJREFUqm12O4SFQVmZAYuFajejUT1O+qIIJUE3MaCjQTk56slBda7OnasXfA9oQ4UQQgjR0kggS4gaWCxw1VXwxx/q3L28XH05BnUO76jPU16uLlbLxWghvMdqhT171Pdis1kFscrKICkJEhNVnTqLJQyzWaOwkEo3s1k9bu9e1wyHQgS7oJsYMDMTvv9edSJNU7cuXVxFIj2RD0IhhBBC+EFYoBsgRDDKzYU774QlS1T2R0QEdOoEcXGufaKiyjGbIwAV4IqIUF+e8/LUxeiRI2u+aC2EqF1EBCxYoPqiJ1ZrOStXrmH06NGE1/DFOjFRHUeIYOeejZWU5Hkfv36+6DrMmqU6YFiY+iBs21Y9qcUCuo6hrEwtV22Ie0qkfBAKIYQQwgckkCVEFfv2wW23QXa2yuwID4euXasP9YiKsjmHFZrN0Lp19YvRJ53k9+YL0Wy0b69unlitsGOHmX79ak8QESIU1JaN5eDXz5d9+2D1alWIzm6HmBjVgIrxvRoQZrGg1TbE0JESKdFkIYQQQniZBLKEcPPHH3DHHXDsGBw5oi5Kd+rkqtHjoOuu8h92uwpklZSo83y5GC2EEN6Tnp5Oeno6Nlv1WSqbg/pkYzn4JSvLbocHHlBXcGw2OO44ePBBlZlVodxqZc3KlbVmREpKZPPQ3PufEEKI0CSBrBAgJxH+sXo13HOPCkjpuroQ3a6dqstTVlZ1bw2LJYywMBXgstvVCAzH+bxcjBbCuz77DH74AQYPhvPOU9+RRcuQlpZGWloaBQUFxMfHB7o5Xlc1G0vX4cABiIzUsNkqlzL1S1aWwQDTpsH996vx9K+8op7UndWKeccOJCWy+Wvu/U8IIURokkBWCJCTCP/4808VxAI44QRVn8cxS2FVjvo8rVufxt69YQwaBN26Vb46LhejhfCezExYs0bdRo+WQJZoHjxlY5WWqplyASIjw2nbtvJj/JKVdfLJ8M476kOwahBLCCGEECLAJJAlRIXrroODB9UXhFmzag9COerzTJyoy8XoZk4yIgNP12HTJrUcEwMpKWrEkxChzlNtLPdh7OHh9mqP8VutrG7dfHRgIYQQQoimMdS9ixAtg6apoYVPPCGZVMIlLS2N7OxssrKyAt2UFmv/fjh6VC0PGqRGPgkR6hzZWEVFrjqMFouquWi3qxu41rvf3GsxOuo1Nsnu3SoN2SsHE0IIIYTwLcnIEi2SxQIPPQQXXwwjRrjWG42Ba5MQwjNHNhbAkCEBa4YQXmW1wp49aqigYwZcUAEqRxDLZjNgNnseO+i1WoxmM0yfDjt3qhlP7rtPruYIIYQQIqhJIEu0OHl58M9/wubNqt7O669Dnz6NP57dDr//DmvXqi8Wl1/utaYKIYCNG13LgwcHrBlCeFVEhEqCys2tvP7//k99TplMOldfvZrTTqt5ZsAm12J0zFC4c6e6//vvqi6WBLKEEEIIEcQkkCValH374Lbb1CgKh4KCph3TYlFfPGw2NVu5BLKE8C5HIMtggOOPD2hThPCq9u3VzSEvT32mREXBoEE6XbqYfTsx4H//Cz/9pJbj4mDuXFWITgghhBAiiEmlEdFi/PGHKujuCGIlJalsrGHDmnbcmBgYOFAt796tpk0XQnhHQQHs2KGW+/WD6OjAtkcIX9q2zbXcq5ePn+z77+Gtt9SywaAKRHbu7OMnFc1ZVpbG7NkjyMryxVSaQgghhIsEskSLsGYNTJsGOTnqfvfu6vy9KUMK3aWmupbXrfPOMYUQ8OuvrmWpjyWau8qBLB8WXt+yBR55xHX/n/+sXDBSiAbSdZg3z8DmzUnMm2eQeQOEEEL4lASyRLO3aBHceSeUlKj7gwfDm29CcrL3nsM9kLV2rfeOK0RL514fSwJZornzSyArJ0cVdy8tVffPOw8uu8w3zyVajMxMyMjQiI4uJyNDIzMz0C0SQgjRnEmNLNGsvfsuvPCC6/6YMfDYYxAZ6d3nOe44Vei9qEhlZNntaqSGEKJpJk9WGZQbN0ogSzR/w4ZBWZkKaPXqpWY19CqrFe65Bw4dUvcHDlSzFGoyFEx4lp6eTnp6OjabrcZ9dB3S01VsND6+lKKiKNLTYeRIeWkJIYTwDfmqLZq1bt1cAaVLLlElQLwdxAIwGl21tvLyYOtW7z+HEC1Rhw5w7rlqYrXExEC3RgjfOussePRRmD/fRzXXjx2Dw4fVctu2MGeOzFAoapWWlkZ2djZZWVk17pOZCatWQXy8jqapn6tWIVlZQgghfEYCWaJZO+00uPdeuP12uPtu32ZJyfBCIYQQQa1DB5WqPGoUPP20mvVEiCZwz8YymdQ6k0ndT09HamUJIYTwCQlkhYD09HQGDBjA8OHDA92UoGexVF83aRJMner79PaRI13LEshqPqT/CSGalfh4eO45NSZeiCZyZGMlJLjOszRN3ZesLCGEEL4igawQUJ+0bgH798MVV8AHHwTm+bt0URe7QdXzcdTRFaFN+l/gLFoEK1dCfn6gWyKE7+XkeL4Y45UDW60+OLBo6TxlYzlIVpYQQghfkmLvoln48081fDAnB+bOhfbtVWF3f9I0mDgRjhxRwwylwKkQjWe3w7PPqiBWQgIsWSJ9SjRvTz+tXuedO8Mrr0Dr1l44aEkJ3Hqriio8+aQUmhNeVTUbyz1gVTUr66STAtVKIYQQzZEEskTIW7NGTcJUXKzud+sG/foFpi233BKY5xWiudm1y5WJNWiQBLFE87dtmwoEHDig4k1NzmLRdXjkEdfsIw88AC+91OR2CgGVs7FqKrVmMqkJcGQGQyGEEN4mQwtFSPv6a7jzTlcQa/BgePNN6NgxoM0SQjTRxo2u5SFDAtUKIfzDalXBW4AePSDMG5cZ33oLli5VyzExasYTIbzEU22sqqRWlhBCCF+RQJYISbquAlYPPQQ2m1o3Zoy62BwXF9i2CSGabtMm17IEslq2ljDhws6drs+ylBQvHHDlSlf2labBrFkqQiaEF9RWG6sqqZUlhBDCFySQJYJWVpbG7NkjyMqqfKnPblelPtxHSFxyCTzxBERG+rmRNSgshB9+gN9/D3RLhAhNjoysiIjADRUWwaElTLiwbZtruVevJh5sxw41jNDh5pth9OgmHlQIl/pkYzlIVpYQQghfkECWCEq6DvPmGdi8OYl58wyVruI99RQsXOi6f/vtasSEIUhezb//DmPHwowZldsphKifY8dg7161PGCACmYJ0Zy5B7KalJFVUADTp7vG20+YANdd16S2CeGuIdlYDpKVJYQQwtuC5Ku/EJVlZkJGhkZ0dDkZGVqlq3iXXAKtWqkaIo89BlOnBlcB0d69XfVN1q6VkzYhGsp9WOHgwYFrhxD+4pVAls0G993nigL37QszZwbXB6QIeVlZ9c/GcpCsLCGEEN4mgSwRdNyv9sXHl1a7itezJzzzDLzwApx9dmDb6klEBJxwglo+fFjVPhFC1J8Uehctzfbt6qfJBO3bN/Ign3yirp4AtG6tPiijorzSPiEcXnsNiorAaASLpfqtrMzgcb3RqB4nWVlCCCG8wRvz4gjhVY7aC/HxOjYbmEw6q1aprKyTTlL7OAJFwSo11XXVce1aqbErREO4B7IkI0s0d2YzHDyolnv1akIC1UUXqYjY55+rMfgdOniriUI47d2rAq5ms6etGhZLGFoNL2KTST3eapUh40IIIZpGAlkiqLhnY7VpA4cPG8nLUydE6ekwcmRojJJITXUtr1sHl10WuLYIEUp0HYYNU8vl5TILqWj+du1yLTepPlZ4uBpaeNllcvVEeE16ejrp6enYKqbVfOcd9d7sidVazsqVaxg9ejTh4eEe90lMlCCWEEKIppNAlggq7jPhFBRoHD0ajdGoSn989x2VsrKCWUqKOlnLyYH169VJX5j0NiHqpGlqAgdQM5QK0dwddxwsX64mG4yN9cIBJYglvCgtLY20tDQKCgqIj4+nXbuaLzBYrbBjh5l+/VRcVQghhPAVqZElgoZ7NlZYGBw44NoWH6/qK4RKbQWDAUaMUMvFxfDbb4Ftj2i89PR0BgwYwPDhwwPdlBYnWGYiFcLXWrVSw2h79WrAg8rK4P77K1eKF0IIIYRoAeRrggga7tlYhYWu9fHx0KVL6M144z680FF/V4SetLQ0srOzycrKCnRThBBC0XV4/HH4/nu47jrIyAh0i4QQQggh/EYCWSIouGdjmUxQUODa1ratSsEymag2g2Ewk0CWEA1jt7uKXgshavHBB7BokVq226Ft28C2RwghhBDCjySQFQJawtAm92yssjJ1A4iIsDlrS2laaGVltWunhokMGKCCWqEQfBMikLZtg3PPhXPOUd/ThWjujh2Dxx5Tr/etW+v5oLVr4bnnXPcffhj69PFB64QQQgghgpOUnw4BVQttNjfu2VhJSXD0qGtbTEw54KoYajJBXl7ozGD4/vtS8FSI+tq4Uf08dCigzRDCb7ZsgS++UMtTp9YjHrVnj5qZ0DETwvXXw/jxPm2jEEIIIUSwkYwsEXDu2ViaVrk+VnR05TmeQy0rS4JYQtSfI5AFMGRIoFohhP+412mvWuhdy8pixOzZaI76fEVFMH26a+z96NFw003+aagQQgghRBCRQJYIqKq1scrK1DJAdDQYjdXH44VarSwhRP1s2qR+RkfLSCnRMrgHslJS3DboOoZ580javBnDvHlgs8GDD8Lff6vtPXuqMYkytacQQgghWiA5AxIBVTUbKyICuneHxES1zpNQy8oCFXDbtatyEXshhMvBg64hhQMHgtEY2PYI4Q/bt6ufBgP06OG2ITMTLSOD8uhotIwMuPdeWLlSbWvVCp55Rl3VEUIIIYRogSSQJQLGkY1VVKS+tFos6qZpEB8PkbZijPuPYckpcW5z3IxG9bhQyMpauVIVsL74Yvjhh0C3RojgJMMKRUtjs7kSrLp2VRdygEqpyqXx8SoFec0a9eFoMMDs2dClS8DaLYQQQggRaFLsXQSM1arq1ppMYDZX3arD0WKwhMPhYkiKASpXdjeZYO9edRznF4Ag1Lq1K9Nk7Vq48MKANkeIoOQYVggweHDg2iGEv+zZ45qht1J9rIpUZT0+Hmw29Ph4tD17YOZMlY48cmQgmiuEEEIIETQkkCUCJiICFiyA3FwPGzdsRP/XvyiNKCYyOgbtiWdg6NBquyUmBncQC2DAAIiNVcG6devUZFNS1kSIyhwZWQaDGlooRHPnsT6We+HINm3UeHSTCfLzYflyeO+9gLRVCCGEECKYSCBLBFT79uoGMGsW9O4NZ5yu0+7rZ9Dt2eQnmogvKkL7+hm47D01tCLEGI0wfLj6DpKfr6Zb798/0K0SIngUFrq+1PfpAzExgW2PEP7gMZDlXjiypEStq1oY8qST/NxSIYQQQojgInkhIijs3Quffw5z5sBd1x51DavQNPUzlCq7e5Ca6lpeuzZw7RAiGO3eDZGRalmGFYqWologyz0bC9B27ya8sFDtINP1CiGEEEI4SSBLBIVlyxxLOuPyP1En7I4ZmZrBCbwEsoSo2XHHwYoV8O67cMklgW6NEP5x8skwYYLK0O3UCY/ZWJH5+Whmc2hO1yuEEEII4SMytFAEBWcgq6iIMQfegoQEtOJiws1mVWAqxIdVdO4MHTvC/v2qFpDFAlFRgW6VEMEjLEzVkxOipZg0Sd2AytlYSUlw7JhzPz0qSk11YjJBXp7ab+TIkBxqL4QQIriVl5dT5piJRAgPIiIiCAsLfBgp8C0QLd7+/ZCdDaDT17KJzrZdoLWG3buJtNnQwsOhXbuQPoHXNJWV9dlnapbFDRtCMh4nhBDCF9yzsTRNXe0AdKNRRXlBamWJgEhPTyc9PR2bzRbopgghfEjXdXbv3s3Ro0cD3RQRApKSkujatStaAL+TSyBLBNwPP1QsFBUxtuBzaJsAR444t+vx8aqThPgJvCOQBWp4YQj+CkIIIbytajaWxaKmtwVsEREY3feVrCzhZ2lpaaSlpVFQUEB8fHygmyOE8BFHEKtTp07ExsZikCnWhQd2ux2z2cy+ffvQdZ3u3bsHrC0SyBIBp4YV6nD4MOMMFVEtx2xN4KoC7TiBv+8+eOghOP30kDqJHz5cNTc62vkdRYgW79NPVXx6yBCYOBHatg10i4TwvaNHIS4OIiKono3l9vlnc3z+OTSDizpCCCGCS3l5uTOI1aFDh0A3RwS52NhYAPbt24fFYqFfv34BaYcEskRAHToEmzcDRUX0tmyma5eiStlYlsREYhx3NE2d+Wdmws03q0q5d9wRMtOcxcfD++9Dr16ukSIi+MmwCt9as0Z9J1+1CkaNkkCWaBlmzoT166FbN513jK8R48jGgsqBrIiI6g+WrCwhhBBe5KiJ5QhQCFEXx2vlp59+QtM0+vbt6/c2SM6gCCj3bKyxxh/VEIuK2iBERlIeE1P5AVYr2Gwq2LVpE9xwA0yfDjt2+LvpjdK3rwSxQk1aWhrZ2dlkZWUFuinNjq6rbgzQqhX06BHY9ojglJ6ezoABAxg+fHigm+I127apzNy8XfnErFnmysYCKC5WPzUNe3h49QfLDIZCCCF8wKvDCdetg4suUj9Fs+N4rVitVtavXx+YNgTkWYWo8MMPQFERmM2Ma7tJjbdwcFyddtemjZr+z2JRjwNYuRIuuwweewwOH/ZLu4UQTbdnD+TkqOVBg0DKMQhPmlswOTfX8brXSclbrz7LjEb1uWY2q1pZdjuEh2OwWtX6qjejUT0uPV1FhIUQQohgoevwwguwfLn6KZ9TzVZUVBQFBQVYrVa/P7d8bRAB9eQTOve1fYPzw7+lu3GPKxsrKgq9VSvPD0pKUukbHTq4gl12O3zxBVx4Ibz4IhQW+qX9TWG1yvu6aNk2bnQtDxkSqFYI4V/btlUs6Dopti1qqKDZrD638vLU55ndDppGmMWC5tjmfjOb1eP27lUfJkIIIUSwcNR+bNUqoNnDgwcPRtM0Vq1a1eDHPvzww6xevbraek3TePrpp73RvGbBYDCg6zp6AL7UyiAnEVBt/srk4l1zubhzNBx2y8aqrVCOY1jF33/DO+/Azp3w9tvqxL6sTK2LiVHDDoPQihWqwPUvv8B778lwKtFyOYYVQsiUuhOiyZyBLM1ArwevhJFjXRtLS9UOW7ZQ3rcvaw4eZPTo0YR7GmIIkJhYUTFeCCGECALuM/F26gT79gWkpuPvv//Or7/+CsD8+fM59dRTG/T4Rx55hNjYWE4++eRK69esWUO3bt281k7ReBLICgHNtti0442uqEhN5afr6ip0VJQqJGWxYCgrU1laVd/4HMMqXn9dRYMuugjeegs++ghiY+HyywPzO9XD3r3gCPCvXSuBLNFyOTKywsLguOMC2hQh/MYZyAJShiVAv4TKOziiulYr5sWLoV8/qCmQJYQQQgSTqjPxBmim3f/9738YDAZOO+00Pv74Y1544YWaLwo1wMiRI73QOuENMrQwBDS3+iBOVqsqkmMyqVma4uLU1H7R0VBYiGY2139YRXw83HmnSnX6z39URpa7jz+GjIygGMuXmupaXrs2cO0QIpByc2HXLrU8YABERga2PUL4y/btruWePQPXDiGEEMKr3LOxTCa1zmRS9/1Y01HXdT744APGjBnD9OnTOXbsGN9++22lff744w8mTZpEYmIiMTExDB48mA8++ABQwwcB7r77bjRNQ9M0VqxY4dzmGFr48MMPk5iYWK0+1G+//YamaXz33XfOdV9//TWpqalER0fTtm1bbr75Zooc9Z5Fo0hGlgiIb7+FzZsjGPvApwzpdMRjkedyq5U1K1c2bFhFcrK6uTt8GJ59Vg07PPFEuP32gKZ/pKSoZufkwM8/Q3m5zGQoWp6KbG9AhhWKlsNudwWyOnWqfs1FCCGECFlVs7EgIFlZq1evZufOncycOZMzzzyTNm3aMH/+fM477zwA/vrrL0466SS6dOnCCy+8QIcOHfjtt9/YvXs3oIYPnnTSSdx2221cccUVAAwYMKDa81x++eU88sgjfPfdd5x77rnO9R988AHt2rVj3LhxACxcuJBLL72U6667jkceeYQDBw5w7733kpuby4cffujrP0ezJV+fRUB8/jmsXw8ffdSGDz5oQ+/eHnayWjHv2NH0YRVffaWCWKAiR9dcA2PHQloadO3a+OM2kqaprKxvvlGzrG/eDEOH+r0ZQgSUFHoXLdGBAyoBGdRFjUrWrIHdu9UUnh4/FIUQQgg/+9//1K0uffvCkSMq+8oxGdeeiom8dF2tnzxZfcZVLRlz5ZXq5lBcDJ99VnldA8yfP5+oqCgmTZpEeHg4kydP5r333sNsNhMbG8vDDz9MREQEGRkZxMXFATiDTuAaPti1a9dahxL27duXoUOH8sEHH1QKZH344YdMmTIFo9GIruvcddddXHrppbz++uvOfZKTk5k4cSIPPvggx0l9jUaRoYXC73JyVKFzgK6mY6Tk/+zbJ7z+enjyycpBq2XL1JvpE0/AsWO+fX4PZHihaOkmT4YHHoBzz1XnNEK0BDt3uparBbK++grmzIGrr648/lAIIYQIlKIiNbqlrtvWrdWzsWw2NfTEZlPrjh5Vk3VVfWzVIXa6Xn1dPZWXl/Pxxx8zceJE4uPjAbjiiisoLi7ms88+A2DZsmVMnjzZGcRqissvv5wvv/ySkoqrVOvWrWPHjh1cXlGveevWrezatYtLLrmE8vJy5+20007DYDCwfv36JrehpZJAlvC7FSvU8Ap0O2NzFqDddCPcfLOK2PuCpqkMrAUL4N571bg+UI1YuBAuuABefrnRb5iNMWKEa1kCWaIl6tQJLrwQHn4YWrcOdGuE8I9TToGlS+GVV+Ccc6psdEzjGR3tIcolhBBCBIDJBO3a1X5r2xZ27KhcGwvU5FxhYermKAVTWKj2d3+8+2NAfXeruq6evv/+e44cOcJ5551HXl4eeXl5DBw4kOTkZObPnw/AsWPH6NixY6OOX9Vll11GUVERX331FaCGFXbr1s052+HRo0cBuOiiiwgPD3feYmJisNls7NmzxyvtaIlkaKHwu6VLKxby8hkbXXEnPFzNVuhLYWEqDWTiRJUi+957KnXVYlGzH8bFQcU46LpoWVmMmD0brW1bqDIta320a6eK/O7YAb//rt7TW7Vq8GGEEEKEmIQEVa6xksOH4dAhtXzccerk3273d9OEEEKIyqoO+/NkzRq47LLK2VgAXbpU3s9sVuPrZ86svVZWTEyThhUCXHfddVx33XWVth05coTDhw/Tpk0b9u/f36jjV9WlSxdOOeUUPvzwQyZPnsyCBQu4+uqrnQXjEysSKObNm0eq+5CcCt4KqLVEkpEl/CovT9XGQrfTyfwnfSMrpi278Ub/NSImBv7xD1Wo65JL1BeG9u1VkKs+dB3DvHkkbd6MYd68Rs/A4Xgvs9sr/iZCCCFaJvfZD2SsrRBCiFDhaabCmvh4BsPi4mK++OILLrzwQpYvX17p9sEHH1BeXs5HH33EuHHjWLhwIYWFhTUeKzw8HEs9RwtdfvnlLF68mEWLFrF//37nsEKAfv360blzZ3bs2MGwYcOq3SSQ1XiSkSX8yjmsMDePsZE/qaD96NGBmUUwMRFmzFBZWIcOVZ79EGD+fDW8w30cIEBmJlpGBuXR0YRlZDR6Bo7UVKiY5ZU//oAzzmjk7yFEiPn6azWccNAgiI0NdGuECAISyBJCCBGKPM1UWBMfz2D4xRdfYDabuf322zn99NOrbX/qqaeYP38+7777LosWLWLUqFHMmDGD5ORksrOzKS4uZsaMGQD079+fL774glNPPRWTyUTfvn1pVcPwmSlTpnDHHXdw8803M2DAAAa7TcetaRpz587liiuuoKioiHPOOQeTycSuXbv4+uuvefzxx+nTp49X/w4thWRkCb9atgzQ7XD0KONarVMrb7opoG2ic+fq4zz27IHnn4dbboFbb4UtW9R6t6sOpfHxTbqqcMIJcP/98OWX6mmEaAnsdnj6abj9dpUE6YMLckIEpd274fHHVbnGv/+ustE9kDVwoF/bJYQQQjRKQ7KxHHyYlTV//ny6du3qMYgFcM0115CZmYnBYGD16tV0796dW265hfPOO4833niDbt26OfdNT0/Hbrdz9tlnM3z4cH7+uebJydq2bcvYsWOrZWM5TJkyhcWLF/Pnn39y+eWXc/755/PMM8/QvXt32rdv3+Tfu6WSjCzhNwUFsG4dkJNLsuEg/aP+hjFjIBij0J9/rmbYAHXFIDMTzjpLZWetWoUeHw82G3p8PFojryrExMCkSd5vumiemlqXLVj8/beqCQfq+3pdF++EaC5++w0+/VQt33Yb9OhRsaGsDP78Uy137QoVsywJIYQQQa0h2VgOPszKchRcr8kdd9zBHXfc4bz/xRdf1LjvqFGjPAav9BqCb99++22tzz1+/HjGjx9f6z6iYSQjS/jNqlVgs9rg2FHGtspCM2iBz8aqSVoazJoF7uOWv/kGrr8ecnJchel9PNZbCMBrddmCwcaNruUhQwLVCiH8b9s213KlSQn/+ENNTw7gNhxBCCGECFqObKyiIlVv2GKp/81oVI+T70+iCSQjS/jNWWdB2zVfs+yt3ZwZtwYmTFBT9wUjg0E1eMwYdQn99ddh716VSqJpaDt2EBEdrWY69OFYbyEAr9VlCwYSyBItVY2BLBlWKIQQItRYraoUi8mkZiNsKJNJfbeyWqvXKRaiHiSQJfzGaIQR1gxGJC9TgaJpzwS6SXWLiFDTyZ57rgpq6bpzWvSIggK0/fuhUyc1HWN6Oowc2eCxUr/8Aj/9pIadvPyy+tMI4VSlLluU4wpWI15rwWDTJvUzMhL69g1sW4Twp+3b1c/YWGjXzm1Djx4wfrzqHJKRJYQQIhRERKiij7m5jT9GYqIEsUSjSSBL+NeTT8LPP8Pvv4NbQb2gt3mzmtmwWzcoKYHcXGyRkRjat0dr4ljvDz6A5cvV8p9/woAB3m++CGEV9Qe8UZct0A4fhv371fLxx0OYfAKJFqKwUH2EgMrGqhSDHjVK3UCGWAghhAgd7durmxABILkfwv9OPBGmTg10K+rPfUaOuDjo0AG9Z09K2rZ1fRNvQq2s1FTX8tq1Xmy3CH2eZoMJ4bpsMqxQtFSObCyoMqywqhDMshRCCCGE8DcJZAmfKyqCO++EL75QI/BCjqcZOaqmwVbNymoACWSJGrm/9kpLVeCqCa+1QHMMKwQZQSValhrrYwkhhBBCiAaTQJbwuVXfmPnp20Iee0zn1VcD3ZoG8pQR44nNpmadakSmTOfOrskRN21Sk3kIUem1V1SEtnMnMYcPq20hmpXlyMjSNBg0KKBNEcKv3ANZvXq5bdi/X/VlIYJUeno6AwYMYPjw4YFuihBCCOEkgSzhc0tf2qpmtdi5k3F99wS6OQ3jKRurqqIi2LEDDhyAqKgGZ8pomqrbDWrijl9+aXqzRTPg/trLyQHAYLWqGm0hmJWl63DCCdCnjyryHhsb6BYJ4T81BrLuuQdOOw2uu05dEBEiyKSlpZGdnU1WVlagmyKEEEI4SSBL+FTxnmOsXh8OQGL5EYakRga4RQ1Q32yssjKVjQUq4GCxNDhTRoYXBq+AXI12f+1VqYiuFRSohRDLytI0+Ne/YP58eOedQLdGCP86/XQ18e0JJ6hSi4AKSm/Zoj4/iovVjLhCCCFEM7BuHVx0kfophC9IIEv41E+PLafMpk7Ox5xSiqFDuzoeEUTqk40F0Lq1K9Bls6m0qgZmygwf7noKCWQFl4BcjXZ/7UVEuMaegpr+DEIyK8tBvq+Lluaqq+Cpp6g8vP6PP8BuV8tSNE4IIUQzoevwwgtqVvYXXgiJ660iBEkgS/jO4cMs/bqi4JNmYOydAwPbnoZwZMQUFalv3RZLtZuhrMx1PzFRPc5uV1fZc3MblCkTFwf9+6vlbdvg2DEf/V4i+FXNBDQYID7eNRbPkb0BIZeVJYRw4z77wcAQ+nwUQgghauG4HtuqVeCut3755ZdMmDCBxMREIiIi6NGjBzfeeCNbt271f2Nq0b17d2699dZANyMkSSBL+EzJK++Ska9OzhM6xXDCmITANqghrFZV18tkArNZZcG43TSzmTCLBc2xraREBRrsdnUrK1PzrVut9X5KR50skKysFq2mTMBWrVzLjuGFIZKVZbPB0aOBboUQQebXX13LkpElhBCiGXC/HtuuXWCut957771ccMEFxMfH89prr7F06VJmzpxJdnY2l156qf8aUg+fffYZd911V6CbEZLC6t5FiEY4cIDV83dSqoeDwcCYS9qG1nCiiAhYsEBlVnlQbrWyZuVKRo8eTXh4uGvDvHmwYoVa7tWrWn2j2px+ukq0SU2FE09sfNNFCHP/9E9KqrzJPZBVWAgdOqhlkwny8tTjRo6sfRhsgGzZAlOnQqdOcPXVMHlyoFskhP8cOaJGoFf6ONB1VyArPh66dAlI24QQQghvqno91v1660kn+f75Fy9ezJNPPsmDDz7Io48+6lw/evRorrvuOhYtWuT7RtRDSUkJ0dHRDB06NNBNCVmSkSV84403WJZ3glpOTGTsudGBbU9jtG8P/frVeDN37lx9/ZNPQs+eavbCP/+E996r99MNGAB33QWnngoxMT78vUTwqvrpX1gI+flqOKHBQHlUlNrPfXhhCGRlOUZQ7dvnKgkkREtxxx0wahRceaXb63/PHtW3QQ0rDMIAtBBCCNEQnubJ8ncVjGeeeYb27dvz4IMPetx+7rnnAmCxWJg+fTodO3YkKiqKIUOG8Nlnnzn3e/vttwkLC+PQoUOVHp+Tk0NERASvvPIKAGvWrOH888+nY8eOmEwmhgwZwntVvv+tWLECTdP4+uuvmTx5MnFxcUyZMgWoPrSwIcdbsmQJV1xxBa1ataJbt2489dRT1X7fNWvWMGHCBOLi4mjVqhWpqaksWbLEub20tJT777+fbt26ERkZSf/+/Zk/f36df+dgIIEs4X1791L2+WJ+Mg8Bg5G4boktJ8PIZIJHHnF9Kfnvf2H37sC2SYQGT3XZDh+GvXtVSpPZjC0iQmULtm6t9nfUaDMa1eOCtFbWxo2u5SFDAtUKIfzPZoOdO1XsuaxMlbwDKg8rHDQoEE0TQgghvMpTdQx/Xm8tLy8nIyODsWPHVh4x48GVV17JK6+8wowZM/j8888ZMGAAF198MV9++SUAF110EWFhYXz88ceVHvfJJ58AOANRu3bt4pRTTuH111/nq6++4uKLL+aGG27gHQ9TdE+bNo1evXrVOpywIce76aab6NOnD5999hnnnXce99xzD99++61ze0ZGBqeffjqlpaW8/vrrfPLJJ1xwwQXsdvtueskll/DKK6/wr3/9i0WLFnHWWWdx1VVX8c0339T69wsGMrRQeN/KlURQxoc97mf5iBnYRvdtyAi70Dd0KFxzDXz4IfzrXzJkRNRP1bpsuu7KujIa0SwWNFAzA4Brm4PJpIJeVqsKdgUJXXdlZJlMkJIS2PYI4U+7d6sAFlR57UsgSwghRAj43//UrS59+6qh9O7VMfbsUddbdV2tnzxZfeRVTUK+8kp1cyguhs8+q7yuPo4dO0ZpaSldu3atdb9ff/2VTz/9lJdffpkbb7wRgLPOOoudO3fyyCOPcP755xMfH8/EiRP54IMPKmVMffDBB84i8gCXXXaZc5uu64wePZq9e/fyyiuvcM0111R63vPPP58nn3yy1rY15HgXX3wxDz/8MABjx47l66+/ZuHChZx11lkAzJgxg5SUFH744QeMFTV+JkyY4Hz88uXL+fLLL/nuu++c68ePH8+BAwd46KGHOPvss2tta6C1pPCC8JcrroDBg+n43ntc+WAqmALdoAC48Ua48ELo3LnBDz18GNatU+W5rr7a+00TQapqXbYNG+A//1HL48dTfv31nuuyuUtMDKogFsD+/a5C74MGuWWkCNECbNvmWq4UyDpyRP00GNS4ciGEECIIFRWp7yZ10TT47bfK2Vg2m8pIdmw/ehT+/ts1Ebf7c7jT9errGkKrY7j+qlWrAFdWlcOll17KP//5T4qKijCZTFx++eVceuml7N69m65du3LgwAF+/PFH3n33XedjcnNzeeihh/jiiy/Yt28fNpsNgDZt2lR73nPOOafOtjfkeO5BKU3T6N+/P3v37gWguLiYzMxMZs+e7QxiVfX999+TmJjImDFjKHf8o1DBrJtuugmbzVbjY4OBBLKEbxx3HDzxRKBbETjh4Y0KYum6Cl4dOwbR0XDZZepQooVo317dAL77TtVaAzjnHFWXbccOVYsthF4U7sMKZWI20dK4B7J69XLb8OyzKmi9fbsURRRCCBG0TCY1+2BtdB127Kg+V5HR6JroxGhU2wsLoUePyllZpipJD5pWfV19tGnThqioqEpD5zzJzc0lPDzcmVXl0L59e3RdJy8vD5PJxLnnnovJZOLDDz9kxowZLFiwgKioKC688ELnY6699lpWr17NzJkzOe6444iLi+O///0vH330UbXnbe84x69FQ46XkJBQ6X5ERAR5eXnO39Fut9OxY8can+vo0aPk5OTUeIH8wIEDdG7E91l/kUBWCEhPTyc9Pd0ZkRUhKjNTXXl3DA3zQNPUrIWLF0NJCWzeDCec4Mc2iuCxbp1rediwytt0HX7/Xc2QefPNBPOUoI5hhSD1sUTLU2NGFqhad1X7thBCCBFEqg7782TNGnXx3T0bC6pXVzGb1febmTNrn8EwJqbhwwoBwsLCOOWUU1i2bBnl5eWE1VDbJjExEavVSm5uLq1bt3auP3ToEJqmOQNE0dHRXHjhhc5A1ocffsh5552HqSLKZrFYWLRoEXPnzuW2225zHsdew8xGdWWKNfR4tUlISMBgMLB///4a90lMTKRt27YsXrzY4/Z2dUUwA0wGeYSAtLQ0srOzycrKCnRTardxI9jtzJsHL7wA2dlBWXfa/ywWNZvhrbfC44/X+UdJTXUtr13r47aJ4JSfD1u3quU+fSAhgawsjdmzR5CVpcHs2XDttfD22/Dzz4FsaZ0cGVkGg0rUFKIl2b5d/YyOhlouigohhBAhydNMhTXxxwyG06dP5+DBg/zHUZ6jisWLFzNq1CiAaoXcP/74Y4YOHeoMVAFcfvnlbNiwge+++47MzEwuv/xy57bS0lLsdjsRbmU9CgsLnQXjG8qbxzOZTJx00km8++67NSbDjBs3jiNHjhAREcGwYcOq3SKCrFxJVZKRJbxj61b4v/+jrEdfPt72EkXGeD75BJYsCbqSPf5XWKiGiQEsXQqjR8PEiTXuPmKEa3ntWpVwI1qY9etdn/AjRqDrMG+egc2bk5g3z8BJ16eiffqp2r5kSeUXTRApKFCp5qBGREZHB7Y9QvhTSQns26eWe/aU+nBCCCGaH08zFdak6gyGtWVlNdbEiROZMWMGDz/8MNnZ2Vx22WUkJSXx999/8+abb5Kfn8+GDRuYNGkS06dPp6SkhL59+/L++++zevVqvvjii0rHGz9+PG3atOH6668nISGhUgH0+Ph4hg8fzhNPPEHbtm0JCwvjiSeeID4+nsP1KSxWhbeP98QTTzBmzBjGjRvHLbfcQuvWrfnll19ISkri+uuvZ/z48Zx33nmcddZZzJgxg0GDBlFUVMTvv//Otm3beP311xv8nP4kp1XCO15+GYB1m6MoyldR39NOkyAWAG3bwv33u+4/+SQcOFDr7j17quXsbBUMEC2M+7DCESPIzISMDI3o6HIyMjQyw0a56mctW+aqpBlk/v7b9R4gwwpFS7Njhyse7RxWaLfDDTeo7NwffghY24QQQoimakg2loM/srKefPJJPv/8c3Jycrj++usZO3YsDz30EP369XNmYb3//vv84x//4IknnuCCCy5g8+bNLFy4kPPOO6/SscLDw5k8eTL79+/n4osvrpalNH/+fFJSUrjmmmu4/fbbmTx5MlOnTm102715vFGjRrFixQo0TePaa69l0qRJfPbZZ3Tr1s25z8KFC7npppt46aWXOPvss7nhhhv4/vvvOe200xr9O/iLZGSJpsvOhpUrAVhmOwNaJwAwblwA2xRsxo1TWViLF6tpOB56SAX/arhEP3Kk+hJkt6vknDFj/NxeEViOYcRGI/rgIaTfqD704+NLKSqKIv31SEaeOhptyfcq0pmV5ZvLWk00eLAq4/XnnxAfH+jWiGCxZs0aTjnlFB599FEeeOCBQDfHZ3budC07A1k7d6rCcZs2QU6OvLkLIYQIWQ3JxnLwR1YWwAUXXMAFF1xQ4/bo6GieffZZnn322TqP9fLLL/NyRdJGVSkpKSxbtqza+ocffti5fPrpp6PXELXb6X6y0MTjff7559XWnXzyyfxQy4WziIgIZs6cycyZM2vcJ1hJRpZouoqObdWNrIicAJqBmBgVjBFuZsyA5GS1/Msv8P77Ne4qdbJaMLsdzj0Xhg+HE08k89cYVq2C+HgdTVM/V62CzE4Xux6zZEng2luHiAgYNAjcLv6IFsxut/PPf/6T4cOHB7opPnfOOfD99/DSS3D66RUrf/3VtcOgQYFolhBCCNFkjmysoiI155DFUv+b0age58usLNH8SSBLNM2vv8Lq1QCsjx5NYaSac/XUU2VYYTWxsfDoo65LFi+95CroXcUJJ7imq5VAVgtjMMD118N//4s+L71ayrYzJTtjCHp0jFq5YgVYrQFrshD19eqrr5Kamkr//v0D3RS/SExUJewc1zAkkCWEEKI5sFphzx51Xmo2q5LA9b2Zzepxe/fK6atoPBlaKJrmv/91Li7tdgNsUbFRGVZYg6FDYepUeOcdVdfogQdUZlaVqF90tPqO88sv6k1+/36Z8aolylyrOVO2HROOOFOyVxvJPONKTvrtNTW8cN06OOWUQDZXNDOPP/44GzduZN26deTm5vLWW29x7bXXVtuvtLSUmTNn8t5775Gbm8ugQYOYNWsW48ePr7TfsWPHeO6558jMzOTOO+/0zy8RbByBrLAwaCHBPCGEEM1PRAQsWAC5uY0/RmKiJD6IxpNAlmi8n3921vIp79iV5fv7ACoIc/LJgWxYkLvpJlizRmVj7dihhmbefnu13UaPVt91UlNddb1Fy+FeQFPTYM8ejfDwKOLj1VWsvDxI338hI/XXVJLfkiVBFcj69FP1Mh8yBM4+W52siNDy5JNP0rVrVwYPHsyKFStq3O/aa69l4cKF3HnnnfTu3Zu3336biRMnsnz5cucU1wD//ve/ufPOO0lISPB944NRQYGrcFbfvhAZGdDmCCGEEE3Rvr26CREIMrRQNI6uV8rG+uWMf1FQqF5Oo0bJ+XmtwsNh1ix1CWLIEJg82eNuV12lRh9ec40EAVqMnBzYsgXs9koFNHNyVIA4OlqlZTmzsra0I9M+Qj12xQooKwtUy6v56SdYvhyefVZm3gxVW7duZdeuXcyZM6fGfdatW8eHH37I7NmzmTNnDtOmTeOHH36gW7duzJgxw7nfhg0byMrK4h//+Ic/mh5wf/4JTz2lArr79lWs3LzZtcPgwQFplxBCCCFEcyAZWaJxtm1Tsy4BdO/OijLXlBMyrLAeevaE11+Hfv1qnLlQtEBLlsCcOehx8aSHzae0tD2xsWpYYUkJlJcbnbuqrCyNdO1WRp63EG38OFU9MwjY7bBxo1qOj5dC76GqfT0usy5cuBCj0ci0adOc66Kiorjhhhu4//772bNnD126dOHHH39ky5YtdOrUCYD8/HzCwsLYvn07b731ls9+h0D55Rc15ALgwQehUydcn5kAAwcGpF1CCCFETex2e6CbIEKE47VS02yM/iCBLNE4vXvDBx/Aq6/C+PHceZqBkSeppBAZVlhPAwYEugUi2KxbB0Dm/q6sKkwkIUHN6uIQFWVzLjuzso4NIHPCTJ9NX9wYu3a5srAGD67/lMwi9GzYsIE+ffoQFxdXaf2IESpTcOPGjXTp0oVp06Zx2WWXObffcccd9OjRg3vvvdev7fWXbdtcyykpFQvuGVlS6F0IIUSQiKgoVGU2m4mNjQ1wa0QoMJvNAJQFcDSIBLJE46WkqLETQASqptPo0YFtUkgrLlbffqp8wbHb1TCV8nL57tOs2Wywfr2qjZV3JaWEkWSCY8dcu0RFlVd6iLNWVjqMHBk8ASNHNhao0bOi+Tpw4ADJzin5XBzr9u/fD0BMTAwxMTHO7dHR0cTGxtZaL6u0tJTS0lLn/YKK6KjVasXqYZojxzpP25qqocf+6y8juq76ZJcuNqwWG8bfflPD8tu1w5aYWOdUTb78fZoqmNsGwd2++rQtGNsthGi+wsLCSEpKYl/FWPjY2FgMMmJEeGC32zGbzezbt4+8vDysVisGgyEgrxcJZIl60bKyGDF7NlrbtpJy5Qu//qrGn+TlwYcfOudqz8uDSZNUdsuJJ8IrrwS0lcKX/vgDiorILBrIqpITSUjW0HUV3wRVWk3XNQ4d0igqUqNTnVlZqyAzk6DJypJAVstRUlJCpIeiiFEVM1SUlJR4fNzbb79d57Fnz57NI488Um39999/XykoVtWSJUvqPHZj1efYdjv8/PMoysqMtGlTwvLl68BmI+H884nfuRPdaGT34sVefc5ACea2QXC3r7a2FTve+IUQwk+6du1KaWmpM5glRG3y8vI4ePAghYWFdO/enbAw/4eVJJAl6qbrGObNI2nzZgxz5sDHH6vp9IT3fPKJqyLwQw+pmQwNBhISoFUrFcjatEnVSYqODmhLha+sW6eysY5ModQQTZIJCgtdm2NjoaAgwlnPvbAQ4uLcsrKeszLy2PdouTlw9dUB+RUcHKWAIiJUGTjRfEVHR1fKmnKwWCzO7Y113333MX36dOf9goICunTpwoQJE6oNZQSVxbJkyRLGjx9PeHh4o5/Xk4Yce+9eiI42Eh0Np5zSiokTJ1bb53gvP6e/BXPbILjbV5+2FcgMGUIIP9M0jd69e7N+/XrWr1+P0WgkIiICLVjS/UVQ0HWdsrIyrFYrhYWFtG7dmpMDlOQi0QhRt8xMtIwMyqOjCfv+exg7Fu65B846i02bDbz3nirwfuqp6ku1aIS77oKff4aDB1WV4Pffh6lTAUhNVTNflZfDhg2SENdsZWWpbCzzUBI6G9G0yvWxVMkCK2VlKvslN1cFslRWls6qL3PI3LSQk9pug0suCdjUoUePqi/yoMrAVZRdEM1UcnKyx6u3Bw4cAKBjx46NPnZkZKTHbK/w8PBagxN1bW+K+hx71y7XMN8+fSA8vGmTMPjy92mqYG4bBHf7amtbsLZZCNG8aZrGsGHDMBgMbN68meLi4oAW8xbBy2Aw0LVrV0aNGkXXrl0D0gYJZIna6boqwFNaii08HPLz4bff4KOP4Oyz+f57VeB9xQp47DE4++xANzhEtWoFjz4KN96o/uYvvaSKHvXp4wxkgRo+JoGsZqi0FH3jJtKPPECRHkNsVBgWi+pudrv6Umww6BgMOkajKq1jNqtMvYgIMBo1irRWpB+ZwkjTTLSMDBgzJiC/ivvEbDKssPkbMmQIy5cvp6CgoFKW1Nq1a53bWxr3Qu+9egWuHUIIIURDaZrGiSeeyAknnIDVapVAlvDIYDAE/KKLBLJE7TIzYdUq9Ph4IvbtA6NRfYMeNQq7rvHDD2q38HAp9N5kJ5yghoS9+65Kv3rgAXj/fYYPj0DTVHyr4ruhaG42bcJaamdPWXtMMTpms0Z5uasWdGQkFBVpWCxhRESAYyTXkSMqKwvAFG9kr7k9Vj2MiCVLgiKQNXhwQJog/Gjy5Mk8/fTTvPrqq9x1112AKtL+1ltvkZqaSpcuXQLcQv+rNmNhfj4sXapm6+jVC6SArhBCiCCnaZpzNkMhgpEEskTN3LKxtLAwDOXlrhPwH39k8/D/48gRNX7ipJNkWKFX3HSTCh5u3Qo7dsC8ecRNn07//pCdDdu3q6FbSUmBbqjwqqwsIgzlLOh5H7m3PginnsqmTfD663DgANxwA4wbV87KlWsYNmw0t9wSTmkpREXBq69CTAxgCyPxpseJKClX1d8DVFBt8mTo2lUFtCSQFdpeffVVLBaLc+bBr776ir0V40Zvu+024uPjSU1NZcqUKdx3330cPnyYlJQU3nnnHXbu3Mkbb7wRyOYHzPbt6mdEhOoL/LQRZs9WK6+5Bm67LVBNE0IIIYRoFiSQJWpWkY1FQoKq3QRqjFPbtrBqFcsSdwHdAVU2S3hBRIQao3n11VBWBvPnw6hRjBw5guxstcu6deChdrAIZeHhkJhI+5wc2k8aAG1UkfRLL1X1puLiVExqxw4zJ5yggkWffKIeuq2iJBYY4ewT4dNdYLFARoYqXudnXbuq2+TJfn9q4WUvvvgiu3fvdt7/9NNP+bRinPNVV11FfHw8AO+++y4PPvgg7733Hrm5uQwaNIhFixYxuoWm6Y4fD1u2qGtBRiNqVlqH4+tT5l0IIYQQQtRG8tuFZ27ZWNjtrjFOMTHQpg12SxlLP8kDdMLCZFihV/XqBbfe6rr/8MOkDnRNxZ2ZGYA2Cd+aNg2++04VQ2vTptKmzp1dwwcdpkxxLX/8sequQOXA1fff+6atosXYvHkzuq57vHXv3t25X1RUFHPmzOHAgQNYLBbWrVvHmWee6bV2pKenM2DAAIYPH+61Y/rStGnwzDMwd27FCvdA1qBBAWmTEEIIIURzIoEs4Zl7NlZ+vmt9mzagafweM5zDRw1QVERqqqpVLrzosstgxAiVoXXVVQwcHkVUlNq0bp1b4EI0H5pWMQ6pbikpMHSoWv77b1i/vmLDiSdCYqJa/uknKC72+HghQklaWhrZ2dlkZWUFuikNV14Ov/+uljt2rBaoFsLf1qxZg8FgYNasWYFuihBCCNFoEsgS1blnY0VEqOLugG40olcUwlpWPhrsOhw+zNgxElXxOoMBHn4Y3nsPrriCiCgDJ5ygYhQjRqjyR6Lhpk2bRnJyMnFxcQwcOJCvvvoq0E3yqD7xJ0dWVvv2zi6qxjE5iryXlalglh8tXqwCrRI/E6LC1q2qL4JkY4mAs9vt/POf/wyZ7EYhhBCiJlIjS1RXQzaW1WTCiIpzLSscAUYjRnM+p5uygBGBam3z1a6dulWYNUtlvmlaANsU4qZPn86LL75IZGQkWVlZjBs3jh07dtAmkFkSpaVqWsIKdjtMmgStW6uY1D/+4flhY8aooUunnFJRh8dhwgRYuFAtL1mi7vuB3Q5PPKGCWO3bw9df++VphQgqhw+ryTicExPKsEIRRF599VVSU1PJd8+0F0IIIUKQZGSJytyzsUwmFTlJSACDAWtFNta20i4csCaB0cAI4y/Evf2CjHXzg7g40P7IVhED0Sj9+vUjsiJopGkaZWVl7Nu3L3ANOnIETjtNRasqssP++kvNTPnXXzgL/HviqE1XKYgFMGQIjByp6qzdeaevWl7Ntm2uTCypZy1aqmuvhVGj4OabK1ZIIEs0grkizXbSpEkkJiaiaRpvv/22x31LS0u555576NixI9HR0aSmprJkyZJq+x07doznnnuORx55xJdNF0IIIfxCAlmiMvdsLE2DqChITkbv3Ru94htz76g9LOx5D7e0XcjkpBVqf6lA7ltlZfD882rq9vnzA90av3n88cc566yzvHYiD3DLLbcQHR3N8OHDGTNmDAMHDvThb1CHrCxVQ2fDBtizB1CTDTqcfHIjjmkwwLx56ht1p05eaWZ9bNzoWh4yxG9PK0TQKChQGVllZapbA65AVlQU9O4dsLaJ0HLs2DEAtm7dyuDBg2vd99prr2Xu3LlceeWVPP/88xiNRiZOnMhPVYaW//vf/+bOO+8kISHBV80WQggh/EYCWcKlajaWuyrj2bpHHuD6pC85rW222j89XbKyfOnPP+H9953/o6JN28jLC3SjfO/JJ5/kjz/+8NqJPMBLL72E2Wxm6dKlTJgwAS2QYzXdi1ePUMNz3QNZp5xS/0Pt3QtbtnipXY0ggSzR0m3f7lpOSUFFtQ4eVCuOO85D+qQQnnXo0AGA3377jTlz5tS437p16/jwww+ZPXs2c+bMYdq0afzwww9069aNGTNmOPfbsGEDWVlZ/KOmsepCCCFEiJFAlnCpmo1VH5qm9pesLN8aNAiuvJJtls7c8Ne9jBkD898tr/txIW7r1q3s2rXLKyfy7oxGI2PHjmXp0qUsXrzYV82vna6ryuigJlUYNIiCAti8Wa3q3l1NclaXwkK4/Xa46CKo5c/kU7ruCmRFR0OfPoFph2ie0tPTGTBgQNAXqN62zbXcqxdgscDpp6tZOmRYoWiASLe6ibVZuHAhRqORadOmOddFRUVxww03sGbNGvZUZPr++OOPbNmyhU6dOtGhQwc++ugjnnzySa677jqftF8IIYTwNSn2LhRHNlZREcTGqiI9drtaNhhA1zGUlakT86pBLqNRPS49XdXmkWrkvnHLLST8+Dub/u4NlJH53lZuuX1AoFvlU+3bt69zn9pO5O+//3727NlDly5dPD62vLycbe7fPv1pzx44dEgtDxkCERGs/dFVAq2+wwpjY1XShyOY9NdfFSOYdF19s16yRGWDnHaaD34J5eBBlXwCMHCgJJ4I70pLSyMtLY2CggLi4+MD3Zwaub+VpKQAXbvC00+rvmi1BqxdovnasGEDffr0IS4urtL6ERUZvhs3bqRLly5MmzaNyy67zLn9jjvuoEePHtx77701Hru0tJTS0lLn/YKCAgCsVivWGl7PjvU1bW8KXx47mJ6zvqRtjVdX+4K13UKIyiSQJRSrVX2xNpnAbFZFqG029e20bVuyrEOZlXMTscesnBG1hnFRP9EzbI/r8SaTGttktarsEuF9EREkzbmHXit3st3SkT+yoWDFL8SdfkKgWxZQ9T2Rz8/P5+uvv+b8888nKiqKzz77jOXLlzN79uxANNuVjQWQmgrA6tWuVfUdVqhpMHkyPPWUuv/xx3D//cAff8DUqa6D+TCQJcMKhag8tLBXL7cNmiafi8InDhw4QHJycrX1jnX79+8HICYmhpiYGOf26OhoYmNja62XNXv2bI+F4b///vtKx/KkphqV3uDLYwfTc9aXtK3xampfsWPmGiFEUJNAllAiImDBAsjNVfWYHnhArR8wAP2RR3n+7vYsW2oiKszA1g4T+Pl4C688cqjyMRIT5WTd11JSSD0rn+2fgw6sv+djxiztrWaXbKHqeyKvaRqvvfYat9xyC7quk5KSwvz58xlSQ+TF11ejDWvWoFXUlbMNGYK91MpPPxnRdVUX+vjjbZWSOGo79oQJ8OKLRoqL4euv4aabbLTq1Qtj27YqGL1mDbZjx9TUlw1Q39/nl18M6LrKxDzuODtWq+/r5QXzFd9gbhvI1WhfcCRAArRv36LfkoUflZSUeByGGBUV5dzuSU0Tp7i77777mD59uvN+QUEBXbp0YcKECdUuHDlYrVaWLFnC+PHjCQ8Pr8dvUH++PHYwPWd9Sdsar672Oc73hBDBTQJZwqV9e3X76CP1TRrg2mvJzOtHxmYdQ5iVIouRIlsYYy+Ohn6tA9veFmrkzUOZv2QPFBWRubczY558EmbNCnSzAqa+J/JxcXEsX7683sf16dVou52TlywhvLgYa3Q0q3fsYO9PR/n77xMB6Nz5KEuX/t6gY/fsmUJGRify8+Hxx7dx6qn76JWcTOe//gJgy9y5HKzIUmuoun6fb789kfz8WAwGnX37Mli82Nao52mMYL7iG8xtA7ka7U2HD6tkZqgYVlhSAmFhEIRf4kTzER0dXemCi4PFYnFub6zIyEiPn63h4eF1Bifqs09j+fLYwfSc9SVta7ya2hfMbRZCuEggS1RmNquaOgCxsehnjCH9H2piQl1X9XuOHIEzzghsM1uyoScaCOuSTPnWv1lbdDx8+xaceiqceWagmxYQvjqR9+nV6D//xBgeDvHx6KefzsRzz+WbbzSSkgxYrXD55bFMnNitQcfu1w+uuEIVp9q+/QQef3wwWteuGDdtAmBYfj72iRMb9Deoz+9jt8NvvxnYsEEjMhIuusg/r8NgvuIbzG0DuRrtC9XqY33yCbz0EgwYAHfeCccfH6imiWYsOTmZffv2VVt/4MABADrWZ8YQIYQQIgRJIEtU9v33qqA7wNlnk7kxilWrICZGJy/PgNEIZWXqpL1t28A2taWKjoZBJ0bwS2EH9u2zsa/7KXTq16/SPlpWFiNmz0Zr27b+VcNDlK9O5H16NXrHDtekCKmpGMPDOf98GDcO1q+H444z1JjIUdOx+/aFYcPg559VubuNGw2MGD4EkpNVNfasLIzFxdCIYtl1/T7//rf6abVCeLh/J8MN5iu+wdw2kKvR3vT3367lXr2AFb+qD8uNG10ZzkJ42ZAhQ1i+fDkFBQWVLrCsXbvWuV0IIYRojvz7jUMEv88/dy7qF1xIerrKxiovV1+6DQY1I1l6usrQEoExciQQFw+dOrP20rnQzS17R9cxzJtH0ubNGObNa/b/qCFDhrB169ZqWSRBfSJ/wQUqaPz445WKsMfEwOjR0KZN4w57ySWu5QULUMGy8ePVCpsNGjC0sjEk/iFaqiuvhMWL4YUXIHWEDr/+qjaYTNCzZ2AbJ5qtyZMnY7PZePXVV53rSktLeeutt0hNTa1xxl4hhBAi1EkgS7hs3QrZ2Wq5Xz8yc/uyahUkJEBhoVqtaSoTa9UqyMwMWEtbvIpJ7iAujswsY+WNmZloGRmUR0ejZWQ0+39UyJ7IJyaqKu3t23vtkKed5sqUXLlSJWI5A1ngGjYshPAqTYN27VQCbBvrQTh6VG04/nh1BUiIRpgzZw5vvvkmAF999RWzZs1i1qxZ5OfnA5CamsqUKVO47777mDFjBq+++ipjxoxh586dPOWYylYIIYRohmRooXCpIRsrPFyNkAA1rC0uDvbtU1lZI0e6RkgJ/+nfX/0fCgpcQUZADQt9+mkoLaU0Pp6ooqKQ/ke9+uqrWCwW58yDX331FXv37gXgtttuIz4+vtKJ/OHDh0lJSeGdd95h586dvPHGG4Fsfr3punf+PWFhMGkSrF2rsrPatAHa94eOHWH/fsjKUjOTtvbORA02m3oNeulwQniUnp5Oeno6Npv/JhFoEkc2FsDgwYFrhwh5s9wmcvn000/59NNPAbjqqquIrxgm/u677/Lggw/y3nvvkZuby6BBg1i0aBGjR4/2ShtCrv8JIYRoESSQJRSbzZWtERlJZuuzWbUKIiOhotQQoMrraJrK0nJkZZ10UkBa3KIZDDC3YkShM4jw559wyy2wdi16585gs6HHx6OF8D/qxRdfZPfu3c77gTiR9/VJvN0Ol10GvXurSRTGjWva8W64Af7xD/c1FcML33lHPdny5Sra5QXZ2XDddep1OHWqGjEphLelpaWRlpZGQUGBs88HNfdA1sCBgWuHCHn5+fk1Ti7iEBUVxZw5c5gzZ45P2hBy/U8IIUSLIIEsoRiN8OGH8PXX6IVm0t82UVqqEjlKSqCoCEwmKwkJBkDDZIK8vJBO9gl5lUo/6To89hhs2gRlZWhFRarAsMkE+fkh+4/avHlznSfx4NsTea+fxM+cqVIbU1PhjDPYulVjxw5V/72kpOmBLI+jmM48U0Wkx42DU05p2hO4qZgQkV27oLzca4cVIqT8/DP8+KOarXDkSGjnCGRpmsxWKIQQQgjhAxLIEi5t2sDUqWSugVVPqawrgwE6d1ZBK4OhFFCzuElWVpDRNJUOs3ChCkrm5mJMTJR/VLApLobvvlMZkOvXw5gxrF7t2uyzCSb79FGF5b1s40bXcjDW1BfCH9auhfnz1fIzs0tpt3WrutOjB7RqFbiGCSGEEEI0U1KB1I+mTZtGcnIycXFxDBw4kK+++irQTarGbofnnlO1sUwmtc5ggNatq898ZzKp/WQGwyCg6+hffKkyfSpScqJyclTARP5RweOXX9T/BGDECAB++sm12ZuBLF2Hdevg7rvVPA7epuuujKxWrdR3diFaom3bXMsptq2uPi71sYQQQgghfEICWX40ffp0du7cSUFBAW+++SZXXXUVx44dC3SznMENXYc77oCvvlJfTOsahVY12Uf43x9/qESb80fnsnpZCXTo4IxAajYbWkVhdPlHBYl161zLw4dTUAC//abudu+uhvJ6yzffqJJpy5fDggXeO67Dnj2qbjzAoEEyMZtouRyBrOhoSM7Ndm2Q+lhCCCGEED4hXz38qF+/fkRGOobmaZSVlbFv377ANqq8HK64Avvc55j1r1zmzwerVc0cXp/kHUn2CawDB+DTT3X2Z+extuh49Q/p1ElNXweq6NL+/fKPChZZWeqnpsGwYWRmqixI8GrpKgBOP92VVfnNN2p2QSwWWLYM7r0Xjhxp0vFlWKEQarRwxaSq9OoFhssvVR3uqad8OFZYCCGEEKJlk0BWDcxmMw899BBnnXUWiYmJaJrG22+/7XHf0tJS7rnnHjp27Eh0dDSpqaksccwAWMUtt9xCdHQ0w4cPZ8yYMQwM9BXbjAxsW7czc04r/ve2FbNZlVhKSqpfXXDJygqsYcPAUFwEZjOZWkUx97AwNWuh4x9YUKAik/KPCqycHPjrL7Xcrx/ExVWqj+XtQFZMDJx7rlouLYVFi1AzF95zDyxdCj/80KTjuweyZASVaKl27HAtp6RULLRtC2PGqA9SIUJceno6AwYMYPjw4YFuihBCCOEkgawaHD16lEcffZQ//viDwXV8S7v22muZO3cuV155Jc8//zxGo5GJEyfyk3vxmwovvfQSZrOZpUuXMmHCBLQAzyJX9slX3LvvVr7JP4kj5a2x29XotMhIlbzhfisrM1RbZ7GowFdRkST7BEJcK53+lg1g19lh68YRa4LaEBVFSWKia8ejR9XYL8nKajCvncSvX+9aHj4cux1nICs62jdZTZMnu5Y//hjsY9ymRKwh2F5fjkBWWBgcd1yTDiVEyHKvj9WrV+DaIYSvpKWlkZ2dTZYjo1gIIYQIAhLIqkFycjIHDhxg165dzJkzp8b91q1bx4cffsjs2bOZM2cO06ZN44cffqBbt27MmDHD42OMRiNjx45l6dKlLF682Fe/Qp1K9xzmro+Gs7xwGHpYOFbCaNNGxTgKCyvfzGYNiyUMs1nzsE0NYdq7Vw1LFH6UmUlq3ncqmghkFbsiCrboaGjXTt1p316l6EhWVoN57STevT7WiBFs3aqStACGD4eIiKYd3pMePdSxQdW0Wnu0F/TsqVZs3AiHDzfquDk5sHu3Wh4wQAW+hWiJKhV6T6l5PyGEEEII4T1hgW5AsIqMjKRDhw517rdw4UKMRiPTpk1zrouKiuKGG27g/vvvZ8+ePXTp0sXjY8vLy9nmfhbsR8XF8M+rc/jZrIY2RrdtxYcvas7vuFVZreWsXLmG0aNHEx4e7nGfxETffBkXNdB1SE9npHaIN41XAJBZdDwT4zNcuyQmosXGuiINJhPk5amsrJEj6zd+VHiHI5AVHg5DhpD1sWuTt4cVupsyxVWa6+OP4aRx4+DVV9WKpUvhiisafMwdO1Ts1GaTYYXC99LT00lPT8fmmA0wiFQKZK3/EJbvVp3itNMgKipwDRNCCCGEaMYkkNVEGzZsoE+fPsTFxVVaP2LECAA2btxIly5dyM/P5+uvv+b8888nKiqKzz77jOXLlzN79my/t9ligbRbdDZvNAJ2YgwWnv+vkaETan6M1Qo7dpjp1099DxdBIDMTVq1iYFIroveXUmKPZF3RcdVHDbqny1QtanbSSf5sccu1f7+rIvSgQRAVxVVXQWoqZGTAqaf67qlPO00l5h0+rP7t+6ecSUeaFsgaNgx+/BGys6FNGy83WIgq0tLSSEtLo6CggPj4+EA3p5Lt29XPxERovfILVQdv4UJYsSKg7RJCCCGEaM4kkNVEBw4cIDk5udp6x7r9FV9eNU3jtdde45ZbbkHXdVJSUpg/fz5DaimMU1paSmlpqfN+QUEBAFarFauHMXyOdZ62uTMYoK9pH5utVuIMRTx37nf0P+OeWh9X32N7UyCes74C3jZdx/jii2hFRRhNJoZG/Mbq4qEcLYtjW0ESPcN3YygrQy8pwV4l60qzWODwYfTnn8d24ol+z8qqz98uGP/nTdK2Lbz8ssrK6tYNUH/2Pn3UzZeMRrj4Yvjvf1US36c/d+PWXr3UN/Bff4WDB1VhvAaKioITTvBBg4UIEeXlcMEFKisrLqoMllWkZ6WkqKHcQgghhBDCJySQ1UQlJSVEeigQE1UxpKCkpASAuLg4li9f3qBjz549m0ceeaTa+u+//56YWk6Sa5ox0d2FB99Hjx7MhJjlFPY/rd61uupzbG8LxHPWV6DaplmtnLxpEzEGAxw9ylBrBj+Vq2GiPx3qTqeoXwgDSi2WSo8zlpYSVlwMQOkPP7D0iy/QAzQetLa/XXFFG5uN8HCVxjRsWECe/qKL4LXX1BfvrVuBCRNUZAtg2TK48sqAtEuIUBYWBrfeWnFn3UZYWpEOK+NthRBCCCF8SgJZTRQdHV0pa8rBUhFAiI6ObvSx77vvPqZPn+68X1BQQJcuXZgwYUK1oYygsliWLFnC+PHjq9WxsttVJhYA+fkYZ8/mnm47ICEB21131TlesLZj+0ognrO+gqJtI0dCbi4AJ+8NZ95dXQH4ddhjXHbnjaxevZqTTz6ZsDC3br5jB9rDD0NZGVFGI+ccOYL9H//wa7Pr87dzZB8K70hMhLvvhn79KmYY3DXOFcj6/nsJZAnRVJs2uZYHDgxcO4QQQgghWgAJZDVRcnIy+/btq7b+wIEDAHTs2LHRx46MjPSY7RUeHu4xAJCVpTF79gjato3g5JNd/9o9e9SX2Jkz1Qxj7N0LcXFq6rHzzsPQgCEQNT23LwXiOesroG3r3FndgD7HQ9oBGDIEBg6MARIw795N2PHHV27fwIGqRtZdd6lxZm++ibF7d5g40e/Nr+1vF6z/b2+w29Wf//jjYdQo3w8tdLj4Yrc73bqpJ966FX7/HQ4dUjNb1sPChfDzz+q1NmECtG7tk+YKEVo2b3YtS0aWaEaCebIFIYQQLZeh7l1EbYYMGcLWrVurZZCsXbvWud0fdB3mzTOweXMS8+YZnAW/d+yAf/xD1fC49daKwrRDhsDixTBnDkye7Jf2Cd/SNLj+elWzKDzcFdTMyvJQ/+q00+DOO133H30UfvnFb20NNenp6QwYMIDhw4c37gA//QQLFsDOnaDrbNkCK1fCSy+pslkBc9ll6s1hwYJ6B7FAtX3JEvX2UVjow/YJEeQOH1aBaex2VW8OVPpjEy5gCRFs0tLSyM7OJssx/a0QQggRBCSQ1USTJ0/GZrPxqmM6e1SR9rfeeovU1FS6dOnil3ZkZkJGhkZ0dDkZGRqZmbBlC0ybBkePqn3atlXJOIAq7nHGGc6MHtF81BTUrOSKK1wpOuXlKkVo926/tjNUNPkk/pNP4KmnVND4779Zvdq16eSTvdPGhtJ12HvC+XDjjdCzZ70fV/X7up/e3oQIOna7qj03ejTcc3MBmM1qw6BBfp9AQwghhBCipZGhhbWYN28eeXl5zpkHv/rqK/bu3QvAbbfdRnx8PKmpqUyZMoX77ruPw4cPk5KSwjvvvMPOnTt54403/NJOXYf0dCgthfj4UoqKopg1S51XFxWpffr3h3nzIMhmLhc+UDmoGUZmJpx0UpWdNA1mzID9+2HNGigogDvugLfflheJN9lsahweqMhPjx5kzHJtDkQg65tv4L33VILYt9+qUcb1tWOH6/v64MHyfV20XHv3qs9cAPvhI64NgwYFpkFCCCGEEC2IBLJq8fTTT7Nr1y7n/U8//ZRPP/0UgKuuuor4ii/87777Lg8++CDvvfceubm5DBo0iEWLFjF69Gi/tDMzE1atgvh4HZsNoqJ0li3T6N4dYmPVF87nn4fYMAscKVSpWaJZ2rNHxaeOHoV27UopKYkiPV3Vha8WdDAa4Ykn4IYb1NjTPXvgnntUEXCJUHjH77+DYwbG4cMpKNT47Td1t0ePwIxAys6umLkQ+PJLuOqq+j9240bXsp9GTQsRlLZvdy33Kt/quiOF3oUQQgghfE6GFtZi586d6Lru8da9e3fnflFRUcyZM4cDBw5gsVhYt24dZ555ptfaUVuNHvdsLJMJLBYjR45o2Gxw5AgMH64ysWJjge++g3POgenT1bhD0ew8/TSsW6cSgcrKwoiP11m1SgU7PTKZ4LnnoE0btXzddRLE8qZ161zLI0aQmVlRU4fADSucMsW1vHAh2HftgTffhH//u87HSiBLCGXbNtdyysQ+qkjhiBEVM6oIIYQQQghfkkBWCKitRo8jGyshAcxmjaNHowGVbGO1wuWXQ3R0xc6ff66+Ra9cqSIdolnRdfjzT/UvNhhUUNNkUkHO9HQ818oC6NBBBbPeegtSU/3Z5ObPPZA1fDgZGa67p5zi/+YAdO3q+jfv3QuZN76lKs9/912dddI2bVI/IyOhb18fN1SIIFYpkHV2b7jlFtWPPMw0LIQQQgghvEsCWSGsajaWe2wqIQFiYuDVVysCGNu3u6YH79NHFc0SzUpmJvzxh6rjr2kqkKVp6rVQa1YWqNdDA4p+i3qwWFx9rlMn7B06smaNuhsdHdiMpksucS0v0N1StJYurfExhw7BgQNq+fjj1etMCH9o8syhPuAIZEVEyKQHQgghhBD+JoGsEOaejaUCFjoJCaXExanJCCsFML74wvXACy+U4WPNjCOoWVamgpoANpuBY8e0+mVleTrgu+/Chg0+a3Ozt3GjSosEGDGCLVsgJ0fdHT5cfQEOlFNPVYl4ABmHerG/LEnd+f77Gh/jyMYCGVYo/KvJM4d6WVmZKikIqtad0RjY9gghhBBCtDQSyApRVbOxHGJjrXTsqKIVzgDGizb0rxapHSIi4KyzAtBi4UvuQc2EBNf6I0fU8LG4uHpkZTmUl8OsWfDCC3DXXXUONxM1qFIfa/Vq191ADSt0MBjg4ovVsh4WwcLoq9Wdbdvg7789Psa9Ptbgwb5tnxDB7O+/XbXuUiL3qClAHSuEaGaCMSNSCCGEkEBWiKqajeWJc1jZsjIyD3RTK8eOVVEN0WxUDWomJEBSkmu72ayGhRUVNSAr69Ah9TM/H+68U/1soRp9Eu8eyBo2jIsvhsceg7PPDlyhd3cXXADh4Wr5c/NYSu0Vd2oYXjh5soprjhsHgwb5qZFCBCFXfSydXhnvqs5x+eWBbJIQPhNsGZFCCCEESCArJNWUjeWJyQSlhWWkH5miAhgXXeSXNgr/8RTUTErSSUoqcQ55KS+HkpJ6ZmWFhcETT7hqZu3eDXffrcbTtECNOonXdTj/fDjtNDjhBGjdmoQEFcR67DFITvZZc+stMVEFpQAKwtrwfcFIdWfJEo/79+wJl12mXhqxsX5qpBBByJm0aCklRduult1mMhZCCCGEEL4lgawQlJVVdzaWg1ZeRoI9h1XmoWTGjoOhQ/3SRuEftQU1o6Js9OihO2et7Ny5AbWyYmPh+edVtAPgl1/UcMN6F9lq4TRNVVR/5hk140KQmjJF1feZcE44KYMrXkA7dqibEMKjtDT48kuYe+lajo+uCGTJeFshhBBCCL+RQFYIqDq06bXX6peNBUBeHiZDCaX2cNKt/0BHirw3J3UNMQ0Lg27d1KxarVrVcwZDh+RkePZZ13TyixfD6697sfUi0AYOhG++gccfh/5TjndtqCErSwih3ms7doTR1mXEGYvUyoEDA9soIYQQQogWRAJZIaDq0KbVq+uXjQVAZBRaTDQJRjOr9nSvXwBDhIT6DjHVNNdQMPcZDB96SE1mWWuS1XHHqbFwDq+8At9+65X2txR2O8ydCytWQHFxoFtTmaa5ku4YM8b1pvLjj5X2+/ZbjQ0b1GtHCFFh82b1MyIC+vYNbFuEEEIIIVqQsEA3QDRcUZGq126xVF6v61BWZsBicQtyRcRBhziMVitFx4ykp8PIkfUMgomgVp+C/1U5JgD47jto0wa+/ho2bIB77sE5BLGaMWPg9tvVLIYAjzwCHTrAkCFN/yWaobCiIvVHHTIEIiLYsgXmz1e3M86AOXMC3cIatGsH11yjimGddppztc2m8dRTBkpL1fDUzz8PXBOFCBo5ObBnj1ru108Fs4QQQgghhF9IICsEmUxqJrrqNCyWMDSPUY1wTCbYuxesVjnnDnWObKyiIpVtVa+gZgWjUWUG2e3qtbRoEWRnw1NP1VKv+OqrVdH3zz9XQSxnGo+oqk12NsZnnlGRwfvuI+PAOc5tJ50UwIbVoawMlva8FaMRzqzI8NOysmjz8HwsZWPQYmM57rjAtlGIQPvpJ43Nm6FXyX5OKo8jMaxA6mMJIYQQQviZBLJC0GefeZ41zGotZ+XKNYwePZrw8HCPj01MlCBWc2C1qmSAxgU1ISkJ4uMhKkoNF9uxQ8WqHngAzjzT0+E0uPdelQp4zTXqwcKj1n/9pRYsFujYkdWfuLadfHJg2lSXoiI1oWlOjqr9M348GDQdw7x57NoSjRZ7GGJNDB4sqZyiZfvpJ40vvgAOt+WN2PYqkCX1sYQQQggh/EoCWSGoTx8VT6jKaoUdO8z06wfh6zJUtGvQIBlH2AxFRMCCBZCb63l7fYOaxcVqWOH27VBSAv/+txoVN326h4BnWJgaYihqpK1bR7fvvlNZa0lJ5Hc5nt9+U9t69lSrg5HJpEr8rFkD+/dDRgacGpaJlpHBRsNdKlpaVMSQIR4i6EK0INu3V3yelpTQq81etTxoUOAaJIQQQgjRAkkgqznSdVWIZ+9e9e353XdV6o1oVtq3VzdPKgU1PcexnN55B554Qg0xBFi4EH77DZ58Ejp1qqMRZWWq0NaFF0rAVNcxPPkkEYWFaAYDjB9P5s/h2O1qc7BmYzlMmaICWeg6Hz9/gFO33oiem8cmbTDYdUw5e0jp1Q9k5lPRQum6yl4F6BBfQmzrcIhtrVJchRBCCCGE38ishc2IlpXFiNmz0d5/XwWxQJ1gSxBL1CIqCh5+GGbOdGVh/fmnmqCwVrm5cPPN8J//wFtv+bqZwS8zE23NGnRNU2P1WrcmI8O1+ZRTAte0+hg1CpKTAbuNNV8fY++fZr4uG8/PpYMp0mIZVLwWwzqZ9lS0XHl5kRQVqeWUc/vDsmXy3ieavfT0dAYMGMDw4cMD3RQhhBDCSQJZzYWu6tkkbd6MYe5cdekYVKaMEPVw/vnw9tvQtauqk3TXXXU8YPNm2LRJLb/0Enz/va+bGBD1Ool3VN8vLkY3GsFux75hE2vWqH4YExP8kzwaDDB5MmA0ouvwcflFPFN2G4X2WA7b2zBY26R+R8d7ixAtzIEDJudyr16oLNQ2bQLXICH8IC0tjezsbLKysgLdFCGEEMJJAlnNRaaqZ1MeFYX2118qIyQuDk4/PdAtEyGkTx947z144YXqddgcQ+ScRo+G225z3X/4Yfj1V1830e/qdRKfmQmrVqkgj6ZBRAR//lxE7l6VvjF8eN1DPIPBBRdARKkZbDbe169kgz4EAzbM9hgMpmj1O2ZKVpbwr/pmhDizkn30hds9kJWS4pOnEEIIIYQQ9SCBrBBQ50m8IxuktBR7WJiKOBw5AuecI1MUigYzmaB798rrjhyByy/3EMOYOlVFP0DVy5o+3TWstaVwy8Zyio0lwzwYDh8G9KAfVuiQEK8zXluCrsNOulFMDOGUY9c1lpSORreUSlaW8Lt6BZPds5LnzfPJa/TgQUcgS5dAlhBCCCFEAEkgKwTUeRJfkQ2ix8cTXlwMRqOaZaxrV/82VDRLNpuazXD7dpWA9corbtlZmgb33QcjRqj7eXlwxx1QUBCo5vqfIxsrLMxV8D42ltFJ2Vynv0WfxGNBX+jdKTOTS468RJGhFWZaYcROhMFKtMHC6qLBZEadLllZIjg5spKjo9EyMnzyGj1wwATl5Ri3/EH3p26BL7/0+nMIIYQQQoi6SSAr1LllY2E0YrBaVbEbgwG++EIyJ0STlZaqGk+gXk6vvQa33go5ORU7hIWpKQ579FD3d+2Cu+9WUyc2d+79LyrKOX5Qj4mhb+vDpEW8xvx2d9KhfQj0w4rfZYD9N8q0SOxoGDQ7ncIOkBKxl1J7OOmFUyUrSwQft35YGh+v+qOXX6Nr1mhs2NCOotwyuoXvI3zDOti922vHF0IIIYQQ9SeBrFDnyAZJSEDLz1frHAVoJXNCeEFMDMydq4JXhop3jHXr4IorYMOGip1atYLnn4fWrdX9n3+Gxx9v/sEOt/5HmzbovXpRlJyshvRqmlofKv2w4nfJjDqdIns0icZ8ekftIUKzYjDoJBgLWVU0VLKyRPCpeO2ujRjFVQf/y9qIUV59jeo6zJtnoKzMiF5SxrDobLVh0CCvHF8IIYQQQjSMBLJCmXs2SHS0aziXpkHbtj65Ki1aJoMBrr0WXn4ZkpLUuqNH4cYb4d13K4YaduwIzzzjqst26JCqm9Vcufc/k6sItG40uvYxmUKjH1b8LrqllPTCqWjodI44TLhW7tzFZCiRrCwRfNxeuy+ar2OVZSQvmq/z6ms0MxPWrtWIiyvDUmLn1FYb1YaBA5t8bCGEEEII0XASyApl7tkgRiN6586Ux8Q474dUNogICSecAPPnq1n4QAWwXngB/vWvijjqoEHwyCMwaZLaEBlZ7Ri+nlnMb9z7n6M2FmDXNT7OHcfusvahk5Xllo21qmgoCcZC918JqPhVJCtLBBu31+5PRUNphZmfzN57jbrHq+PjSigt1Ug/MgW9S1dXBqoQQgghhPArCWSFKk/ZIDExWBIT0du3V/dDJRtEhJTERPWS+r//c8VvVq2C9esrdhg/Hu6/X9XOqsoPM4v5RQ3ZWABbrb2Yc+gaJm2fw+MHrgv+flglG6vUHo7JUOJxV8nKEkGl2ms3gnb6QUqtBtIPTkLPL1BDnpvwGnXEq+PjdcLKrSQYC1hlHkpm67O9+IsIIYQQQoiGkEBWqKohG6SSUMkGESHHYICbblJJVwkJMGUKjBlTywOOHYP9+/0ys5hf1NL/1lpOdC73i9oZ/P2wHtlYDpKVJYJK1deuIR8NnQQtn1XWkWQWD4LPP1dvVtnZDQ5oucerIyPBWFrmCuZuO1NiuEIIIYQQASKBrFBUSzZINcGeDSJC2kknqaGG//xn9W3O8ljbt8M118Btt8Fzz7HOPIApJfNZZx4Qmq/LWvrfuqLjeDrnFopsUQCcErtJbQjWfujIaDEXkZ53JaW2MEwUg12vuNmr/NQxUUypLYz0vCvRzUXB9zuJlsFjJmEpujEMk1ZMqR5Buv0m9HIbLFwIV1+NfsWVlM1fSPGhwmqTqpaUqFjXpk2QlQWrV8N//wtLlqjk0u1bdXYfac1BaxsVzN2eLDFcIYQQQogA8TD2RwS9rKy6s7EcqmaDnHSSP1ooWpB27aqvW7wYXn8dnpit02f2Y3DwIJjN6Hv38YLhf6yyjOQF4828v/I2tFB7XdbQ/3Qd5h66iv22DiRQyJjoLNqH56iNwdoPrVbYs4fM8FNZlT+UBC0fTbeBW1xK0+1gd7sPKuOlaCiZSady0t696jiOIv9C+IN7NlZeRSahAXbau2DXwrBqRj6xXcifWiYxucVYi1th/TMCvgQM+3n8gx5MONf1mt2+XU1o4aDrsHOnqv0XHq6jlZej6xrY7JgiS8mzhZGeDiNH1v0xLIQQQgghvEsyskJAeno6AwYMYLijwvZrr9UvG8shWLNBRLO0Ywc8/jjs3g3XXqfxxehn0OMT4MgRMi1D+KloKLFaET9ZhpFpPj70XpevvQZFRWpCBYvFecvM7cuPhSdiwIbZFkMnw4FK2zEa1eOC6feNiED/aAHpI96mtHUHTH06Qe/ezpuekkJRckf0lJRK6019OlHaugPpI95G/2iBBLGEf9VS161MD6dMD8eOgXLC+NvQiyItFqvdbTbRWBNWrfJrNoLKM6wWFYHZrLqtCuba1EP1QrTwMBISNBlZK4QQQggRIBLICgFpaWlkZ2eT5ZjlbfXq+mVjOQR7jR7RrERFQY8earmsDB57qQ2PxD1DcUE56frNlNrDacchNfSn/Eb0lcH9uqwWSN67VwWHzWYoLITCQvSCQtIPTabIHkUY5djR2FLUBb2g0LkPZrN6nCODKUhk/t2eVZviSWgThhYdDVHutyhsERHqn+q2XouOJqFNGKs2xZO5w0NKnhC+VJEV6amum1GzY8BOmGYjQiujxB5JQpSFvmHbOX54DCccb2XEqEgSE92OV1ZG69uu4pK4b7jy1F1ce42d+Hj1sk/uoNMh7CgdDIfpHLafVsYisNsxmXS5PiRahGqfgUIIIUQQkKGFoaioCOLiVJaHO13HUFam1lcNcrlng8hYCOFDHTvCG2/As8/Cxx8D6Cxa2YpV+pf8aU8hQcvHYLeTEF7AqlKVlXVSEL8u09LSSEtLo6CggPj4eHjnHSgvr7RP5oYoVv2rI7rdiG6zE24wkJ14CplP/shJQ6v008TEoMlgcpT7KiqC2FiPbymUlRnkLUUEl9deU9lYVpWNlRSe69zUI3w3RqPKvtJ12GdtR3JkLu+F34TWeRy89171F+uyZbQt3MEMHoRVsObnsczZ/iDJ7aKIDbNA2SEIM2BDB8LBYkErLiIhITboRgsL4W3VPgOFEEKIICCBrFDkyAapQgPCLBa0mr5RumeDBMkXadE8RUTAPffAkCEw655CigvNbNQHU0gMMZRgDw/DZLSQZ40jvfxGRoZSrax27VQguYKuQ/osKLaCrulg0ImNM1BqN5L+dXdGXha8QZ6KElk1vaUAGhZLWI3vKfKWIgJi9erKtbHqM8tmh9M5qaaok8Wispbz8lR//v00SgssJBXvAd0GNjtEhKnhhQYDWG1w+DCm7iby8jQJ5gohhBBC+JkEskLRZ5+p9Ikqyq1W1qxcyejRowkPD/f82CDKBhHN35kTdPr+dyY3ZE/CrPfDoOnspxPxtjw6GnPVl8wQyMqqTUXNacLCXE1v1QrCw4OvtntVERGwYAHk5nrebrWWs3LlmlrfU+QtRfibXlREuk3NspkUVuw2GUHFbJuaHXVpB0wUk2eLJT3vSkaGLUXz9D5z0UVw7rnw449k/ncjq36rCJDZbVBWqvYpK60omIX6aTZLVpYQQgghRIBIICsU9elTKSPEyWrFvGMH9OunvkULEWiZmXT75TPaRY0jvKgcu66BpurYoIHJUEKetVXoZWVVcAzNKy1VXdJuV7OcmUw64eEaeXnBP/SufXt188RqhR07zPKWIoJKVtjJrCrw8iyb4eHoY8eR/vY4SuNtJEUeg0MF6tiaBpqGpuuq0xsNUCZZWUIIIYQQgSKBLCGEb1REeTLNx7O69AQ6hx+inDAKbTG0MeQARtfQn6pZWSHCkY2VkKCSJFu10snLMxMeHldtjoUQis8JEdReO/G/lK7vQFI7qyPxCgBd1ykqNBPbKrbScFiTDnmHw0kf8TYjXytFqyGF0NmfE41oWgwc1lQEV7eD3a4CWQYjGDXJyhJCCCGECCCZtVAI4RuZmegrV5FefiOl9ghMhhISjIV0CT9UKWvBZCih1B4aMxi6c8/GMplc6yv9biZkZjMhvGz1b96fZdN94gOjUcdyIBeLLQKLMQZLWCyW8FYUh8Vi0aKw2CPUT1sElgO5GI26c+ID6edCCCGEEL4ngSwhhPe5ZWOtKh1WuSBzlaE3VbOyQuXboHs2Vq3FphNcWVlCiIZLT09nwIABDB8+HHAEm1SN9qo3xyybVW/us2x6enupNPHBsVIKzRqFWisK7SZ1002Y9VgK7TGudVorCs0a5mOllSY+EEIIIYQQviVDC4UQ3ufMxnqRUnsESeF5te5etVYWWVn+aWcjuWdjJSXVvq/JREjUyhIiWKWlpZGWlkZBQQHx8fE+mWXTOfFBjg4zHoQ1a1TxuIpD2XUwFxYS26oVBsfhdeDQIRh+Ejz1FIltNJn4QAghhBDCDySQJYTwLkc2Vn5/VlmGkWAoqCiS7Nyh2sxiGpBgKGCVZRiZ+f057rXXAtT4+qlPNpaD1MoSwrtqmLi3ybNstm8P7Xdkwq8LICkaoi3ObXZdp6A0n7ioeAzunT7JrPbPmwT9pXMLIYQQQviDBLKEEN5ltaLv3kN6+Z2U2sJIMporzSIG1WcWAzDpZvJssaSX30j63mf8194Gakg2loNkZQnhPbVM3Nu0WTalcwshhBBChAQJZAkhvCsigsx7PmPVtFgSInW0mFaVNtc0s5gGJBQbWFV6Dll3DoXVPfzc8PrJyqp/NpaDZGUJEQIakmrpIJ1bCCGEEMLvJJAVAtLT00lPT8dmswW6KULUSdch/YNEikohNgEsVbdjp4RywohCqzLfhDEaigrgtYWJfmtvQ732mioaHRuriki703VXsemq34Pdi01L4oYQQcZ92sIaOrehrAzp3EIIIYQQgSeBrBDgKHSbn59PQkICBQUFHvezWq0UFxdTUFBQY32QxvLlsYPpOesrmNsGgW1fWRn8/TdER4Pnl6pOSUkJuq5TbQpD1ON27VIP1INo9kJHW3buLGjS77ZzJxw7VnOdnppIH6xM2tZ4dbXP8RkTTP0PXO3xyWdgHW9cOjj7tscwVVM6dw2C+XUUzG2D4G5ffdoWjH2wrv4Hch7qT9K2xgvVz0AhRGUSyAohhYWFAHTp0iXALRHCt/buVT8LCwuJj48PbGMqOPrfunVN639790Lbtt5okRC+FUz9D0LgM1A6t/CyYOqDQd//hPCyYOp/QojqNF3CzSHDbrezf/9+WrVq5XF68YKCArp06cKePXuI81QJtwl8eexges76Cua2QXC3rz5t03WdwsJCOnbsiMFg8LiPv9XV/0D6oD9J2xqvrvYFY/8D+QwMJsHcNgju9slnYONIH6xM2tZ4ofoZKISoTDKyQojBYKBz58517hcXF+ezDw5fHjuYnrO+grltENztq6ttwXYVrL79D6QP+pO0rfFqa1+w9T+Qz8BgFMxtg+Bun3wGNo70wcqkbY0Xap+BQojKJMwshBBCCCGEEEIIIUKCBLKEEEIIIYQQQgghREiQQFYzEhkZyUMPPURkZGRIHTuYnrO+grltENztC+a2NZX0Qf+RtjVesLevsaT/+U8wtw2Cu33B3Lamkj7oP9K2xgv29gkh6keKvQshhBBCCCGEEEKIkCAZWUIIIYQQQgghhBAiJEggSwghhBBCCCGEEEKEBAlkCSGEEEIIIYQQQoiQIIEsIYQQQgghhBBCCBESJJAV4rKysrj11ls57rjjMJlMdO3alUsuuYStW7c2+di///47U6ZMoWfPnsTExJCUlMTo0aP56quvvNByz1asWIGmaR5vmZmZPnteT8xmMw899BBnnXUWiYmJaJrG22+/7XHfP/74g7POOovY2FgSExO5+uqrOXLkiM/a1pD/u7/bVtV//vMfNE3j+OOPr7Zt9erVjBo1ipiYGDp06MDtt9+O2Wz2W9u8Qfqgb0j/857m3Ael//lOsPZB6X/BRfqgbwRr/wPpg0KI4BAW6AaIpnnyySfJyMhgypQpDBo0iIMHDzJv3jxOOOEEMjMzPb5p19euXbsoLCzkmmuuoWPHjhQXF/PJJ59w/vnn88orrzBt2jQv/iaV3X777QwfPrzSupSUFJ89nydHjx7l0UcfpWvXrgwePJgVK1Z43G/v3r2MHj2a+Ph4Hn/8ccxmM08//TSbN29m3bp1REREeL1t9f2/B6Jt7vbu3cvjjz+OyWSqtm3jxo2MHTuW/v37M3fuXPbu3cvTTz/NX3/9xTfffOPTdnmT9EHfkP7nHc29D0r/851g7YPS/4KL9EHfCNb+B9IHhRBBQhchLSMjQy8tLa20buvWrXpkZKR+5ZVXev35ysvL9cGDB+t9+/b1+rF1XdeXL1+uA/rHH3/sk+M3hMVi0Q8cOKDruq5nZWXpgP7WW29V2+/mm2/Wo6Oj9V27djnXLVmyRAf0V155xSdtq+//PRBtc3fppZfqY8aM0U877TT9uOOOq7Tt7LPP1pOTk/X8/Hznutdee00H9O+++87nbfMW6YO+If3PO5p7H5T+5zvB2gel/wUX6YO+Eaz9T9elDwohgoMMLQxxJ598crUrGr179+a4447jjz/+8PrzGY1GunTpQl5entePXVVhYSHl5eU+f56aREZG0qFDhzr3++STTzj33HPp2rWrc924cePo06cPCxYs8Enb6vt/D0TbHFauXMnChQt57rnnqm0rKChgyZIlXHXVVcTFxTnXT506ldjYWJ+3zZukD/qG9L+mawl9UPqf7wRrH5T+F1ykD/pGsPY/kD4ohAgOEshqhnRd59ChQyQlJXnleEVFRRw9epTt27fz7LPP8s033zB27FivHLsm1113HXFxcURFRXHGGWewfv16nz5fY+3bt4/Dhw8zbNiwattGjBjBhg0b/NaWqv/3QLbNZrNx22238X//938MHDiw2vbNmzdTXl5erW0REREMGTLEr383X5A+6B/S/2rWkvug9D//CfTr3EH6X3CRPugfgX6du5M+KITwN6mR1Qz973//Y9++fTz66KNeOd6//vUvXnnlFQAMBgOTJk1i3rx5Xjl2VREREVx88cVMnDiRpKQksrOzefrppzn11FNZvXo1Q4cO9cnzNtaBAwcASE5OrrYtOTmZnJwcSktLiYyM9Hlbqv7fA9m2l19+mV27drF06VKP2+tq26pVq7zeJn+SPugf0v9q1pL7oPQ//wn069xB+l9wkT7oH4F+nbuTPiiE8DcJZDUzf/75J2lpaZx00klcc801XjnmnXfeyeTJk9m/fz8LFizAZrNRVlbmlWNXdfLJJ3PyySc7759//vlMnjyZQYMGcd999/Htt9/65Hkbq6SkBMDjB3FUVJRzH1+fRHj6vweqbceOHWPmzJk8+OCDtG3b1uM+dbXNsT0USR/0H+l/nrXkPij9z7+CoQ9K/wsu0gf9Jxj6H0gfFEIEhgwtbEYOHjzIOeecQ3x8PAsXLsRoNHrluP369WPcuHFMnTqVRYsWYTabOe+889B13SvHr0tKSgoXXHABy5cvx2az+eU56ys6OhqA0tLSatssFkulfXylpv97oNr2wAMPkJiYyG233VbjPnW1zdd/M1+RPuhf0v88a6l98P/bu/+gqqv8j+PPiyA/RAXTDBAE0ZxCFy3BBCRt/IWEumq7NjEJrfkjVi1SZ3O3ZafdndlSdmLXzcLKbGxap1xyJkBHS90UEUxYc9tVZgWUEH8UApsi/jjfP/xyN7wXBPl54fWYuX9wzudzzvvzubyHmTfncz7Kv47X2Tmo/OtalIMdq7PzD5SDItJ5VMjqJqqqqoiJieHSpUvs3LkTX1/fdptr/vz55Ofnc/LkyXab43b+/v7U1dXx/fffd9iczVG/JLl+ifIPnT17lgEDBrTrf8Ka+t47I7aioiLS09NZsWIF5eXllJSUUFJSQm1tLdeuXaOkpITvvvvujrG15+9ve1EOdjzln62emoPKv87RmTmo/OtalIMdT38DbfXkHBTpaVTI6gZqa2uJi4vj5MmTfPrppzz44IPtOl/9ctuqqqp2neeHTp06hZubG56enh02Z3P4+fkxaNAgu5uA5uXlMWbMmHab+07fe2fE9s0333Dz5k1WrFhBUFCQ9XP48GFOnjxJUFAQr7zyCqNGjcLZ2dkmtrq6OgoLC9v1vrUH5WDnUP7Z6ok5qPzrPJ31e67861qUg51DfwNt9dQcFOmRjDi069evm1mzZhlnZ2eTmZnZpmOfO3fOpq2urs489NBDxt3d3dTU1LTpfMYYc/78eZu2wsJC4+LiYmbNmtXm8zVXfn6+AczmzZtt+pYuXWrc3d3N6dOnrW179uwxgNm4cWO7xNPc772jY7tw4YLJyMiw+YSEhJiAgACTkZFhjh07ZowxZsaMGcbHx8dUV1dbz3/77bcNYLKzs9s8tvaiHGx/yr/m62k5qPzrGF0pB5V/XYtysP11pfwzRjkoIl2DxZgOesBc2sXzzz9PWloacXFx/OQnP7Hpj4+Pv+uxf/zjH1NdXU10dDR+fn5UVFTwwQcf8O9//5vU1FSSk5NbE7pdjz32GO7u7kRERHDvvffy9ddfk56ejouLC4cOHeKBBx5o8zmbsmHDBi5dukR5eTkbN25k7ty51jfWLF++nP79+3PmzBnGjh2Ll5cXK1eu5L///S/r1q1jyJAh5Ofnt8vS6eZ+750Rmz2TJk3i4sWLHD9+3Np29OhRIiIiePDBB1m8eDFlZWWkpqYSHR3Nrl27OiSutqAcbD/Kv7bTXXNQ+de+umIOKv+6FuVg++mK+QfKQRHpIjq7kiat8+ijjxqg0U9rfPjhh2bKlClm8ODBxtnZ2Xh7e5spU6aYHTt2tFH0ttLS0kx4eLgZMGCAcXZ2Nj4+PiY+Pt4UFRW125xNGTp0aKP3tri42Hrc8ePHzbRp04yHh4fx8vIyTz31lKmoqGi3uFryvXd0bI3FGxISYtP+xRdfmIiICOPm5mYGDRpkkpKSGvxnzBEoB9uP8q9tY+6OOaj8a19dMQeVf12LcrD9dMX8M0Y5KCJdg1ZkiYiIiIiIiIiIQ9Bm7yIiIiIiIiIi4hBUyBIREREREREREYegQpaIiIiIiIiIiDgEFbJERERERERERMQhqJAlIiIiIiIiIiIOQYUsERERERERERFxCCpkiYiIiIiIiIiIQ1AhS0REREREREREHIIKWSIiIiIiIiIi4hBUyBIREREREREREYegQpaIiIg4jMDAQCwWi/Xj5ORE3759GTJkCJMnT2bVqlXk5eU1a4ySkpKOCboLc+R70VTsjnhd/v7+WCwWzpw509mhiIiIdGnOnR2AiIiISEtFRkYyfPhwAK5cucLFixcpKChg3759pKam8uijj/Luu+8ybNiwTo5UupLAwEBKS0spLi4mMDCws8OxKi8vp6ysjMGDB+Pv79/Z4YiIiHRpKmSJiIiIw1m0aBEJCQkN2owxZGdn8/zzz7N//34iIiI4dOgQQUFBDY777LPPuHbtGn5+fh0YcdfUXe+Fo13X4cOHARg3blwnRyIiItL1qZAlIiIi3YLFYmHmzJlEREQQHh5OUVERixYt4rPPPmtwXHBwcCdF2PV013vhaNelQpaIiEjzaY8sERER6Va8vLx4/fXXAfj888/58ssvG/Q3tn9S/b5bAFu3biU8PBxPT08GDRrEk08+yenTp4FbK782bNjAmDFj6NOnDwMHDiQhIYHz58/bjefKlSukpqbyyCOP4OXlhZubGyNHjmTNmjV8++23ds/5YSzbt28nKiqKfv360adPHyIjI8nKyrJ7XlFREc888wxBQUG4urri6enJ0KFDiY2NZfPmzTbHN7WXVFlZGcuXL2fEiBG4ubnRv39/IiMjeeutt7hx40abxp2Xl8eaNWsIDw/nvvvuo3fv3gwePJi4uDj27Nlj95ym3H5d7733HhaLhdLSUgCCgoIa7LX2zjvv0KtXL7y9vbl8+XKj44aEhGCxWBq9jqacOnWKxYsX4+/vj5ubGyEhIWzatAn4XyErLCysxeOKiIj0NCpkiYiISLcTExPDgAEDANi9e3eLzn3ppZdITEykb9++xMTE4OHhwV//+leioqKorKxkwYIFrF69Gh8fH6ZPn06vXr3YsmULU6dOpa6ursFY5eXljB8/nlWrVlFUVERYWBgzZ87k6tWrrFu3jnHjxlmLK/akpKTwxBNPADBz5kxGjBhBTk4Ojz/+OBkZGQ2OPX78OOPGjWPz5s24urry+OOPM3PmTPz8/Pj73/9OWlpas+9Bfn4+oaGhbNiwgbq6OubMmUNERARHjx5l6dKlxMbG2lzr3cYNsHbtWlJTU6mtreXhhx9mzpw5DBkyhE8//ZSpU6e2KHZ7hg8fzsKFC+nTpw8A8+bNY+HChdZPZGQksbGxXLp0iQ8++MDuGHv37uXrr78mODiYmJiYFs2/Y8cORo8ezaZNm/D29mb27Nm4ubmxePFifvvb33LkyBFAK7JERESaxYiIiIg4iKFDhxrAbN68+Y7HTpkyxQAmPj7e7hjFxcUN2gEDmHvuuccUFhZa2y9fvmyioqIMYEaPHm2Cg4NNSUmJtf/ChQtm+PDhBjBbt261tt+8edNERkYawPzsZz8z1dXV1r5r166ZF1980QBm8uTJNrHXx+Ll5WVyc3Mb9KWkpBjA3H///Q3aExMTDWB+97vf2Yx3+fJls3//fpt2e/eitrbW2r506VJTV1dn7fvPf/5jAgMDDWDWrl3bJnEbY0xWVpYpLy+3ac/JyTH9+vUzLi4upqys7I6x36mvqXN2795tABMaGmrTZ4wx8+bNM4BJTU2129+YQ4cOGVdXV+Pp6WkyMzMb9K1fv95YLBYDGH9//xaNKyIi0lNpRZaIiIh0SwMHDgRo9PG9xrzyyiuEhoZaf3Z3dyc5ORmAr776ij/96U8MHTq0wTzLli0DaLAf165duzh48CBjxozhzTffpG/fvtY+Z2dnXnvtNUaNGsXevXs5fvx4o7GMHz++QdtLL71E//79OXnyJGfOnLG2nzt3Dri1Aup27u7uREdHN+v6P/roI0pLS/H19eX111/HxcXF2jds2DDWr18PwJ///Gdqa2tbHTfcWkHn4+NjM86ECRNISkri2rVr7Nixo1nx360pU6YQEhLCP/7xDw4cONCgr6ysjB07duDh4cEzzzzT7DFv3LhBQkICV69eZcuWLTbfTXJyMkOGDAG0GktERKS5VMgSERGRbunmzZsA1j2bmsteIWjEiBHArQLUtGnTGu0vLy+3tmVmZgK3HmNzdrZ9v46Tk5O1uJSTk2M3lri4OJs2V1dXhg0bBsA333xjbQ8PDwdg2bJl7Nq1q9Ei053s27cPgAULFuDq6mrTP3fuXLy9vampqbHZf+xu4q737bff8v7777NmzRqeffZZEhISSEhIYP/+/QCcOHHirq6nJVasWAHAhg0bGrS/9dZbXL9+naeeegovL69mj7dt2zZOnDhBdHQ0c+fOtem3WCzcf//9gPbHEhERaS69tVBERES6pYsXLwJY98pqroCAAJs2T09PAHx8fOwWpepXW/2weHTq1CkAXn75ZV5++eUm57xw4UKzYwHo16+fzXyrV6/mwIED7NmzhxkzZuDi4kJoaCjR0dEsWLCg2YWS+iJTUFCQ3X6LxUJQUBCVlZV2C1ItjRtg06ZNvPDCC3z//feNxlVdXX3H2FsrPj6eX/ziF/ztb3/j7Nmz+Pj4UFdXZ92U/ec//3mLxtu+fTsACxcubPSY+uvSiiwREZHmUSFLREREuh1jDAUFBQCMHj26Rec6OTW+YL2pvtvVrwiLiooiODi4yWNDQkJaPZ+Hhwe7d+8mPz+fnTt3kpOTQ05ODkeOHOGPf/wjzz33HH/5y1+aPV5rtCTuL7/8kiVLltCrVy9effVV4uLiCAgIwMPDA4vFQnp6OkuWLMEY044R3+Lh4cGzzz7La6+9Rnp6OikpKWzfvp1z584xceJEfvSjH7VovPoVa7c/Zlnv8uXLHDt2DFAhS0REpLlUyBIREZFuJysri8rKSgC7jwJ2BH9/fwBmz57NqlWrOmzesLAw6+qr69ev88knn/D000/zxhtvMH/+fCZPntzk+X5+fsD/VpTZU1xc3ODY1vjoo48wxrB8+XLWrFlj019UVNTqOVoiKSmJ1NRU0tPTWbt2rfUxw5auxgI4f/48cKtAZs+2bdu4evUqwcHBeHt7333QIiIiPYj2yBIREZFupaqqihdeeAGAqVOnMmbMmE6JIyYmBvhfoaYzODs7M3/+fKZPnw5AYWHhHc+ZNGkScKvIYm+frYyMDCorK+nbty8PP/xwq2P87rvvABpsoF+vtrbW+nheW+jduzdwq8DXmICAAObMmUN5eTm//vWvycnJwdfX1+4eV3dS/8jpv/71L5u+6upqUlJSAO2PJSIi0hIqZImIiEi3YIwhOzub8PBwioqK8PHxse5t1Blmz55NWFgYeXl5JCYm2t0Hq7KykjfffLPJwkpzvfHGG3Y3RK+oqODIkSOA/WLR7Z544gkCAgIoLy8nOTm5QWzFxcW8+OKLACxfvhw3N7dWx/3AAw8AsGXLFmpqaqzttbW1PPfcc9bVX22h/g2B//znP5s8buXKlQD84Q9/AGDJkiV290a7k6ioKODWWxyrqqqs7RUVFcTGxlrf3qjHCkVERJpPjxaKiIiIw3n77betb9e7evUqFy9e5OjRo9bVPZMmTeLdd99tVuGmvTg5OfHJJ58QGxvLli1b+PjjjwkNDSUgIIC6ujpOnTrFV199xY0bN0hISLirQskPpaenk5SURFBQEKNGjaJfv35cuHCBL774gitXrvDYY48xa9asO47j6urKxx9/zIwZM9i4cSNZWVk88sgj1NTU8Pnnn1NbW8v06dOtq4laKzExkbS0NAoKCggKCmLixIn06tXLGvfKlStJS0trk7nmzZvH3r17iY+PZ9q0adbH+VavXs3IkSOtx02cOJGxY8dSUFCAi4sLixcvvqv5fvWrX5GZmcnhw4cZOXIkERERXLlyhX379jFw4EACAgI4ffq0VmSJiIi0gApZIiIi4nAOHjzIwYMHAejTpw/9+/dn9OjRjBs3jp/+9KddpjDg6+tLbm4u7733Htu2bePYsWPk5eUxYMAAfH19Wbp0KbNmzWqTlU2///3vyczMJDc3l9zcXKqqqrj33nsZP348iYmJPPnkk80uloWFhVFYWMirr75KdnY2GRkZuLq6MnbsWJ5++mkWLVrU6sJbPS8vL44cOUJKSgq7du0iOzube+65h2nTppGSksKBAwfaZB6AZcuWUVNTw9atW8nKyrI+OhkfH9+gkAW39lYrKChg/vz53HfffXc139ixY9m7dy+//OUvyc3NZefOnQQGBpKUlERycjIjRozAycmJhx56qNXXJiIi0lNYTGdt2iAiIiIi0gXduHGD4OBgSktLycnJYcKECZ0dkoiIiPw/7ZElIiIiIvID6enplJaWMmHCBBWxREREuhg9WigiIiIiPd6JEydYt24dFRUV7Ny5EycnJ9avX9/ZYYmIiMhtVMgSERERkR7v7NmzvPPOO/Tu3ZuQkBB+85vfEBER0dlhiYiIyG20R5aIiIiIiIiIiDgE7ZElIiIiIiIiIiIOQYUsERERERERERFxCCpkiYiIiIiIiIiIQ1AhS0REREREREREHIIKWSIiIiIiIiIi4hBUyBIREREREREREYegQpaIiIiIiIiIiDgEFbJERERERERERMQhqJAlIiIiIiIiIiIOQYUsERERERERERFxCCpkiYiIiIiIiIiIQ/g/iVLIl0CLrA8AAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "DIMS = np.array([2, 3, 5, 10, 20, 40, 100])\n", + "FUNCTIONS = np.arange(1, 25)\n", + "\n", + "colormap = {\n", + " \"active\": \"red\",\n", + " \"COVARIANCE\": \"blue\"\n", + "}\n", + "\n", + "f, axes = plt.subplots(5, 5, figsize=(14, 15), sharex='col', sharey=False)\n", + "axes = axes.ravel()\n", + "for (fid, ax) in zip(FUNCTIONS, axes):\n", + " f_data = completed_overview.filter(function_id = fid)\n", + "\n", + " for method, color in colormap.items():\n", + " m_data = f_data.filter(algorithm_name = method)\n", + " erts = np.array([ert(m_data.filter(dimension=d)) / d for d in DIMS]) \n", + " label = method.title().replace(\"_\", \" \") if method != \"CMSA\" else method\n", + " marker = \"^\"\n", + " if method == \"pycma\":\n", + " marker = 'o'\n", + " mask = np.isfinite(erts)\n", + " ax.plot(\n", + " DIMS[mask], \n", + " erts[mask], \n", + " label=label, \n", + " marker=marker,\n", + " markersize=12,\n", + " linestyle='dashed',\n", + " alpha=.8,\n", + " linewidth=2,\n", + " color=color\n", + " )\n", + "\n", + " ax.grid(which=\"both\", axis='x')\n", + " ax.grid(which=\"major\", axis='y')\n", + " ax.set_yscale(\"log\", base=10)\n", + " ax.set_xscale(\"log\", base=2)\n", + " ax.set_xticks(DIMS[:-1], DIMS[:-1])\n", + " ax.tick_params(axis='both', which='both', labelsize=12)\n", + " \n", + " ax.text(0.01, 0.99,f\"$f_{{{fid}}}$ (\" + f_data['function_name'][0] + \")\",\n", + " transform=ax.transAxes,\n", + " bbox=dict(boxstyle='round,pad=0.1', facecolor='white', alpha=0.5),\n", + " ha='left', va='top', fontsize=10)\n", + "\n", + " if fid == 11:\n", + " ax.set_ylabel(\"Expected Running time (ERT) / $d$\", fontsize=16)\n", + " \n", + " ylim = ax.get_ylim()\n", + " ax.set_ylim(ylim[0], 1.5 * ylim[1])\n", + " if fid == 1:\n", + " ax.set_ylim(10**1.9, 10**3)\n", + "\n", + " if fid == 5:\n", + " ax.set_ylim(7, 110)\n", + "\n", + " if fid == 23 :\n", + " ax.set_xlabel(f\"Dimensionality $d$\", fontsize=16)\n", + "\n", + " if fid == 24:\n", + " ax.set_ylim(10**3.5, 10**5.1)\n", + "\n", + "axes[24].axis('off')\n", + "handles, labels = axes[0].get_legend_handles_labels()\n", + "axes[24].legend(handles, labels, loc='center', fancybox=True, shadow=True, fontsize=11)\n", + "plt.subplots_adjust(hspace=.05, wspace=0.2)\n", + "# plt.savefig(\"bbob_matrix_adaptation.png\", dpi=500)" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "611bca37", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABE0AAAQyCAYAAABd6wE1AAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjMsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvZiW1igAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs3Xd4U2X7wPFvRpPullJKyyxb9l4OligqggNFWbJ+DEUcqCgqAuorDkBUcCAyBARUEAQUZIMgy4Eoq5S2bGjpnlnP74++yduQFNrSkrbcn+vKpTnnOc+5T0menNx5hkYppRBCCCGEEEIIIYQQTrSeDkAIIYQQQgghhBCiNJKkiRBCCCGEEEIIIYQbkjQRQgghhBBCCCGEcEOSJkIIIYQQQgghhBBuSNJECCGEEEIIIYQQwg1JmgghhBBCCCGEEEK4IUkTIYQQQgghhBBCCDckaSKEEEIIIYQQQgjhht7TAQhRkqxWK2az2dNhCCGEEEIIIW4yOp0OLy8vT4chrpMkTUS5pJTiwoULpKSkoJTydDhCCCGEEEKIm5DRaCQ0NJTAwEBPhyKKSJImolxKSUkhOTmZSpUq4efnh0aj8XRIQgghhBBCiJuEUgqz2UxKSgpnz54FkMRJGSVJE1HuKKW4dOkSgYGBhIaGejocIYQQQgghxE3Ix8eHgIAAzpw5Q0JCgiRNyiiZCFaUO1arFavVKo2SEEIIIYQQwqM0Gg1BQUHk5OTIXItllCRNRLljsVgA0OulI5UQQgghhBDCs+yTwVqtVg9HIopCkiai3JJ5TIQQQgghhBCeJt9LyjZJmgghhBBCCCGEEEK4IUkTIYQQQgghhBBCCDckaSKEYMiQIWg0GmJjY0uk/m3btqHRaJg8eXKJ1H+lLl26SDdIUaZoNBq6dOni6TCEKBciIyOJjIz0dBjFzl07UdKf30KUVZMnT0aj0bBt27YCHyOfxSI/kjQRohz7/fffGT58OPXq1cPPzw8fHx/q1KnDoEGD2Lhxo6fDE6LQ5DUthHuxsbFoNBo0Gg09evRwW2bPnj1oNBqGDBlyXecqr0mJv/76i9GjR9OoUSMCAwMxGAyEh4dz1113MX36dOLj4z0dYqkhXy5Lp4yMDN555x1atWqFv78/RqORatWqcccddzBhwgSio6M9HaIQZZIkTYS4DsdPJfGf+Xs5firJ06E4sdlsjBs3jjZt2vD1119Tu3ZtRo8ezbPPPkvr1q1Zt24dd999N2+99ZanQxWlTNrxKI688x5px6M8HYqT8v6aPnLkCF9//bWnwxAFlJFyihN/LiAj5ZSnQ3Hrl19+YcuWLZ4Oo8yw2Wy8+OKLtGzZkrlz5xIeHs6wYcN46aWX6NWrF+fPn+fFF1+kVq1anD171tPhOpk6dSpHjhyhatWqng7lpnLx9EnWLfqYi6dPejoUh7S0NG699VZee+010tLSGDhwIC+++CI9e/YkPT2dd999l61bt3o6TCHKJFmTVYgiUkrx485o/j6RgNGg44X+rUvNkJDXX3+dDz/8kBYtWvD9999Tp04dp/1ZWVnMmjWLy5cveyhCURoppTi3dh0ph/5B622k/vPPymv6Brnllls8HYIoIKUUl+J+JT0pmktxBiKb9is17xPI7QVy6tQpXn75Zfbt21eqYiutXnvtNaZPn06rVq1Yvnw5devWdSnzxx9/8PLLL5OVleWBCPMXERFBRESEp8O4qSilOLh7I2dPHsXLYOSuviNLxfts5syZ/P333/zf//0fc+bMcYkpJiaGnJwcD0UnRNkmPU2EKKJjcUkcjknEx6jncEwix+JKR2+TEydO8P7771OxYkXWr1/v8uUSwMfHh5deeokpU6Y4bVdK8fHHH3PLLbdgNBqpWbMmU6ZMwWazudRhsViYMWMGzZs3x8fHh6CgILp27cqaNWsKFe+lS5d4/vnnqVu3LkajkdDQUPr06cM///zjUjYqKoqhQ4dSq1YtjEYjISEhNG/enOeeew6l1DXPtXz5coxGI82bN2fRokVoNBqeeuopt2Wjo6PRarX5dnMvj9KOHSft8BF0Pj6kHT5C2rHjng4JKPprOiEhgeeee87xegkLC6Nv374ur63hw4ej0WjYsWOH2/PPmDEDjUbDl19+6dg2b948HnjgASIjI/H29iYkJIQePXq4/RUv75w+u3fv5u677yY4ONjphtZdV/fjx48zfvx4WrVqRcWKFfH29qZ+/fq88sorpKenu5zHPpeP2Wxm8uTJREZGYjQaqV+/Pp9++qnba1NKMX/+fO644w6Cg4Px9fWlXr16jBo1ilOnnHtRpKWlMWnSJBo3boyPjw/BwcH06NGDX3/91W3d5VVGShzpyTFodUbSk2PISInzdEhOGjRowKBBgzhw4ADffvttgY75/fffefrpp2nSpAlBQUH4+PjQtGlT3n33Xcxms6OcfQhQXFwccXFxjuFAeeesWrBgARqNhgULFricJ7/5reyv/7Nnz/LEE08QHh6OVqt1zEWwdetWhg0bRoMGDfD398ff3582bdowZ86covyJnBw/fpwPPviASpUqsX79ercJE4BWrVqxceNGp2FJ9r/HkCFDOHLkCA899BAVK1Z0mmPkhx9+oF+/ftStWxdfX1+CgoK44447WLFiRb4xzZ07lyZNmuDt7U316tUZP3482dnZbstebU6THTt20KtXL0JDQzEajdSrV4/XX3+dzMxMp3J5/10OHDjAXXfdRUBAAEFBQTz00ENOddvLAmzfvt3pNeDu37w8unAqmnOxUXgZvDkXG8WFU6VjyMtvv/0GwJgxY9wmcWrVquWSoC/MPZh9WF5ycjKjRo0iPDwcb29vWrZsydKlS93GpJRi3rx53HbbbQQGBuLr60ubNm2YN2+eS9m8c5AsWLCAVq1a4evr6/hsTElJ4b333qNz585UqVIFg8FAlSpVeOKJJ6457Oirr76iadOmeHt7U7VqVZ5//nnS0tKuekxeJpOJGTNm0KpVK/z8/AgICOCOO+7gxx9/LHAdomyTpIkQRaCUYu2uk5gtNoL9DZgtNtbuOlmgL+4lbcGCBVitVkaNGkXlypWvWtZoNDo9f+mll3jrrbfo2LEjo0ePBnI/xCZOnOhUTinFI488wgsvvEB2djZjxoyhf//+HDx4kN69e/Phhx8WKNbo6Ghat27NzJkzqVOnDmPHjuW+++5j/fr1dOjQgb179zrKnjt3jnbt2rFkyRJatGjB888/z4ABA4iIiODTTz/FarVe9VyffPIJ/fr1o0OHDuzYsYOBAwdSp04dvvnmG5cbSMi9aVVKMWLEiAJdS1mnlOL8Tz9jM1vQBwVhM1s4/9PPZfY1HR8fT4cOHfjoo4+IjIxk3LhxdOvWjZUrV9K+fXunL/qDBg0CYPHixW7rXLRoEUajkUcffdSxbcyYMVy8eJHu3bvz/PPPc//99/Pbb7/RvXt3Vq9e7bae3bt3OxIbI0eO5LHHHrvqtaxcuZKvvvqK2rVrM3jwYEaPHk1ISAjvvfced911l9OX2bz69evHvHnz6NGjB8OHDycxMZExY8Y4JX0gd0hC3759GTZsGDExMfTr14+xY8fSqlUrvv32W/744w9H2cTERDp27Mibb75JhQoVGD16NH369OH333+na9eurFq16qrXUl4opYg/tRubzYLe4I/NZiH+1O5S8T7J680338RoNPL666/n+zrJ68svv+SHH36gadOmjBo1iuHDh6OUYsKECTz++OOOcsHBwUyaNImgoCCCgoKYNGmS43G981tcvnyZjh078vfff/P4448zcuRIAgMDAXjvvffYsWMHbdu25emnn2bgwIEkJCQwatQoXnjhhes678KFCx3tS6VKla5ZXq937aR94sQJOnToQHx8PEOGDGHw4MEYDAYAJkyYwL///svtt9/Os88+y6OPPsqxY8d45JFH+OSTT1zqeuuttxgxYgQJCQmMGDGCRx99lOXLlzu1PwXx2Wef0aVLF3bt2kXPnj155plnqFatGv/5z3+46667MJlMLsfs37+fTp06YTAYGDVqFG3atGHVqlV0797dkbSJjIxk0qRJANSsWdPpNdCiRYtCxVgWKaU4tGczVosZH/9ArBYzh/ZsLhVtQMWKFYHcRGBBFOYezM5kMtG9e3e2b9/OoEGDGDZsGKdPn6Z///4ur2elFAMGDGD48OHEx8fTv39//u///o+MjAyGDx/Oiy++6DauDz74gKeeeooGDRrwzDPPcNtttwG5w1jfeOMNfHx8eOihh3juuedo06YN33zzDe3atSMuzn0Ce8aMGTzzzDO0bduW5557joiICGbOnMndd99doPYxJyeHHj168MILL6CUYvjw4QwcOJC4uDgeeOABZs2adc06RDmghChnsrKy1OHDh1VWVpbb/dkmS74Pk9laoLIHo+LV0Dc3qFFTN6rnZmxVo6ZuVEPe3KAORsXnU6/Fqd6cfOotDl26dFGA2rRpU4GPGTx4sAJUrVq11Llz5xzb4+PjVXBwsAoICFA5OTmO7QsXLlSA6ty5s9P2uLg4FRoaqvR6vYqOjnZs37p1qwLUpEmTnM576623Kp1Op9avX++0/dixYyogIEA1bdrUse3jjz9WgJo5c6ZL/JcvX3Z63rlzZ5W3eXv11VcVoB566CGn18V7772nALVgwQKn481ms4qIiFBhYWHKZDK5/ZuVJpbs7Hwf1iviz69c0t9/q33DRqoDT45Vfz7/kjrw5Fi1b9hIlfT33wWq15qT47ZccSjKa3ro0KEKUBMmTHDavm7dOgWounXrKqs19/1us9lUjRo1VIUKFVT2FTEfOnRIAeqRRx5x2n7y5EmXc547d05VqVJF1atXz2m7/fUPqHnz5rmN1/5+yuvMmTNO7y+7KVOmKEAtXrzYabv9dd++fXuVkpLi2H706FGl1+tVgwYNnMp/8sknClB33nmnyszMdNqXmZnp9L7q37+/AtSXX37pVO7ixYuqevXqqlKlSvm2uaWJ1WLK/2E1X7Ns6uUT6uD2t9XfO99Vh3+bqQ7tfE/9vf1tlXr5xHXVa7VcfzsTExOjANWjRw+llFIvvviiAtQnn3ziKPPbb78pQA0ePNjp2Li4OGWxOH8G2Ww2NWzYMAWoX3/91WlfzZo1Vc2aNd3GMX/+fAWo+fPnu+zL77PA/v4YOnSoSxxKuX+/mc1mdddddymdTqfi4uIKHN+VunbtqgC1efPmApXPy/43B9Qbb7zhtkzez0K7tLQ01bRpUxUUFKQyMjIc26OiopRer1dVq1ZVFy9edGxPSUlRDRo0cNtO2D+/Y2JiHNv+/fdfpdfrVfPmzVVCQoJT+alTpypATZs2zbEtbxu1bNkyp/KDBg1SgFq6dKnTdnexlHZmU06+D4vZXKCyp08cVvOnjlOLpr2iln0yWS2a9oqaP3WcOn3icD71mq6o1+S2XHFYvXq1AlRAQIB64YUX1IYNG1z+/fMqzD2YUrnvK0B16tTJ6bPp9OnTKjQ0VBmNRnXmzBnH9jlz5jje13nvpXJyclSvXr0UoA4cOODYPmnSJAUoPz8/9ffff7vEm5yc7HK/p5RSW7ZsUVqtVv3f//2f03Z7fQaDQR08eNCx3WazOT7T8r4PlHL/urbfQ06cOFHZbDbH9tTUVNWmTRtlMBjU2bNnXeK60rW+n4jSTeY0ETedV2btzHdfw1oVGflgU8fziZ/vxmxx7sGglOJiYhYms5Xqlf0B8DbouJiYydQF+6gc4uPSLbJ65QDG9W/teP7u1/tJSnXtavvh812KcEXOLly4AEC1atUKfezEiROdxkaHhobywAMPsHDhQo4dO0bTprl/m4ULFwLw/vvvO35NA6hRowbPP/88r732GkuWLHHpoZLXn3/+ye7duxk2bJjLEJj69eszYsQIZsyYwT///EOTJk0c+3x8fFzqCgkJcXsO+6+HX331FSNGjOCzzz5Dp9M59g8dOpSJEycyd+5cBg8e7Ni+bt06zp8/z0svvYSXl1e+11BaHJqQ/985sGEDao8Y7nj+76Q3sZmcf1lRSpFz6RI2kwmf/75utEYjORcvcvTdaRjDwlxe077Vq1H/+Wccz4++Pw1TYrLL+VvMeL8ol+SksK9pk8nE0qVLqVixIq+//rrTvvvuu4+77rqLjRs3smvXLu644w40Gg0DBgxg6tSprFu3jocffthRftGiRQAMHDjQqZ5atWq5nDciIoI+ffrwySefEBcXR82aNZ32t2rViqFDhxboGoB8J3Z8+umnmTRpEps2bWLAgAEu+6dOner4hR5yh2vcdtttbN++nbS0NAICAgD49NNP0el0fPbZZy7vKx8fH8e2hIQEli9fTrdu3fi///s/p3JhYWG89NJLPPPMM2zatIn777+/wNfnCcf2ux+mBOAfHEn1Wx5wPD/++xyUzeJ4rpTClJWI1ZqDTueNXu+NVmfAbMni5N+L8TIGue0S7+0XRq2m/RzPTx5chNnk2i28YYdni3pZbr366qvMnTuXt956iyFDhuDv759v2Ro1arhs02g0jBkzhnnz5rFp0ybHr70lxWAw8P777zu10Xbu3m96vZ7Ro0ezceNGtm7d6tSGF4a9falSpYrLvm3btrksV9qlSxeXXjXh4eG89tprbuuvXbu2yzZ/f3+GDBnCCy+8wP79++ncuTMA33zzDRaLhXHjxhEWFuYoHxgYyOuvv+7oFXctX3zxBRaLhU8++cTR+8Bu/PjxzJgxg6VLl7r00unUqZNLD7hhw4axaNEi9u/f79TrqCxa8cXUfPdF1KxLp17/a09XfzUNi8X1szItKQGL2USFsNzXi95gJDUpnp+XzCagQqhLGxASVoW7+v6vx+r6b2aTkZbicv7Hnp5UpGvKq3fv3kyfPp1JkyYxffp0pk+fDkCdOnW45557ePbZZ6lXrx5Q9HswgHfeecfp3q9atWo8++yzTJw4kWXLljleV7NmzcLPz4/Zs2c73UsZDAb+85//sGbNGpYuXUrr1q2d6h85cqTjfjOvoKAgt9fdtWtXGjduzKZNm9zuf+KJJ2jWrJnjuUaj4Z133mH58uUsWLDgqr3VbDYbn332GXXq1GHKlClO/74BAQG88cYb9O7dm5UrV/L000/nW48o+yRpIkQhZZusZJssGLx0jsZTo9Gg1WrJNlnINlnxMZbNt9aVH1zwvy+qycnJjm1//vknvr6+tGvXzqV8165dgdylG69mz549AFy8eNFlfDvA0aNHHf9t0qQJvXr1YsKECYwZM4bNmzdzzz330LlzZ7c3pHZ9+vRh9erVvPbaa7z99tsu+ytVqsTDDz/MsmXLOHr0qGOs79y5cwFcviCWV7acHGzZ2WgNBqfXtEarxZadjS0nB523t4ejLLijR4+SnZ1N165d8fX1ddnftWtXNm7cyF9//cUdd9wB5A7RmTp1KosWLXIkTWw2G9988w0VK1bkvvvuc6rj5MmTTJ06lS1btnD27FmXyfXOnTvnkjRp27Ztoa5D/Xe+kQULFvDPP/+QkpLiNL/QuXPn3B53rfdxQEAA6enpHDlyhLp16zpuovOzf/9+rFYrOTk5bt+rUVG5Ky0dPXq01CdNrofNasJmNaFB4/Q+0el9MGUnodP7oNMbr1HLjVOhQgVeeeUVXnnlFaZNm+b2387OZDIxa9YsR1uYnp7uNNwgv9dacapVqxahoaFu96WlpTFt2jRWrVpFdHQ0GRkZTvtLKr5t27a5zP0FuCRNmjdv7vQlMq9Lly7x7rvv8vPPPxMXF+cykWze2A8ePAjgaJfycrctP/bP1w0bNrB582aX/V5eXo7P2LwKeg9ws7KYcjCbstHrr/ys1GE2ZWMx5eBl9Oxn5bhx4xgxYgTr169n9+7dHDhwgL179zJ79my++uorli9fTu/evQt9D2an1+vp2LGjS3n76/PPP/8EIDMzk0OHDlGlShXee+89l/L2YTHuXofu7i3ttm3bxsyZM9m7dy8JCQlYLP9LbOf3HnT33qlZsybVq1fn33//xWQy5XvssWPHSEpKokqVKm7bAvsy5O6uQ5QvZfObnRDX4d2n87/x0F7xC8Fbo291eq6U4uPlf5GeZaZioPMHY9VKfiSmZlO7ajDPPNbCKRutveLHx1eeaEtJjX4NDw/n6NGjnD17lgYNGhTq2Ly/TtvZx2/nnTMkNTWV6tWru63D3lMlNTX1qudKTEwEcnt1rFu3Lt9y9pvjyMhI9uzZw+TJk/npp58cExzecsstvPnmm27He+/YsQNvb2+XL7x5jRo1imXLljF37lymTZvGuXPn+Pnnn+ncuTP169e/6jWUFk2n5r/MrkbrPHVV4ylvOD1XShH9yadY0tMxXNFjx7tqFUyJSfjXiqTO2KecJy69ot5bxr9YYmO6C/uatr/28pv/xN1rtGHDhrRu3ZqffvqJpKQkKlSowLZt2zhz5gxPPfWU069kJ06coF27dqSmptK1a1d69epFYGCgY+LK7du3u12h4FrzsVzpmWeeYdasWVSvXp3evXsTERHhmLNlypQp+a6CUJD3cUpK7i+dBVmm1P5e3bVrF7t27cq33JVfZEujBm3dT/wMwBXtf/3WIx3/r5Qi7t/vsFqy0BsCndt3nQGd3hvfgKrUbPyoa2+TK57Xbl6w3gLFwf4amj59er6TXgM88sgjrFmzhvr16/PYY48RFhaGl5cXycnJfPTRRzdkxY383h8mk4kuXbrwxx9/0LJlSwYNGkTFihXR6/XExsaycOHC64qvcuXKHDlyhHPnzrlMkjl58mTHF8ply5bRr18/NzXkH3tiYiJt27bl1KlT3HbbbXTv3p3g4GB0Oh1//fUXq1evdord/r7M28vkWufI77wA//nPfwp8DBT8HqCs6jNqQr77NBrnz7QHhjvPt6GUYsuKeeRkZ+IXEOy0r0JoZTLSUqhUpQbd+gxzmeQ7r3v6j4ESuwPMFRAQwKOPPuq4L0pJSeHVV1/l008/Zfjw4Zw9e7bQ92B2oaGhaLWuU2LaX5/213BSUhJKKc6ePes22ZBf/XnrutJ3333HY489hr+/Pz169CAyMhJfX1/HJMT5zWmSX32VK1cmNjaWtLQ0lx5Zdva/07///su///5bqOsQ5YskTcRNx+jl2vW3oGWPxiZyNC4Rfx8vtFdkQnRaDf4+XhyNSyT2XCq3RLofMgJgKEQMhXXbbbexbds2Nm/eTLdu3UrkHIGBgVy6dMntPntXZ3c3X1fWAbkTtBa0S2OTJk34/vvvMZvN/P777/z88898/PHHPPbYY1SpUsWl+/jmzZvp3r0799xzD+vXr+fWW291qbNLly7ccsstfP3117zzzjvMnz8fq9VapiaA1RkL/uv2lWVTjx4j7dgx9P7+LokQjVaH3t+ftGPHyIyNI/CW/BMW2nx+pSkOhX1N219bFy9edLs/v9fooEGDeO655/j2228ZNWqUY2jOlV3iP/zwQ5KSkli0aJHLsJ3Ro0ezfft2t+ctzJKUly5dYvbs2TRr1ozffvvNqcfMhQsXrnoTWhD2bs5nz569Zln73+mFF15g2rRp13VeT9PqCj7cLm/Z9ORYMlJi0el9XL4w2HubZKTEkpV+Dv/gyGKL4Xr5+PgwZcoUhg8fzpQpU9wO79i/fz9r1qyhR48erFu3zml4zJ49e/joo48KdU773yfvL8B29i9U7uT3/li9ejV//PEHw4cPd/QCtFu2bJljuGhR3XrrrWzbto2tW7cW+TMzv9i/+uorTp06xVtvveUyVPDdd991mTTa/r68dOmSS0+1/Nozd+zv2dTUVMdwPAF6r4J/Tl1Z9nzcCc6fisbo7ev2s9Lo7cv5U9EknD9NRE33KzDl1nvjh/wGBQUxa9Ys1q1bR1xcHIcOHSrSPRjkDte02Wwu7aD99Wl/Ddvrb926NQcOHChUvPm9nyZPnoy3tze///67Sw/JZcuW5Vtffu+dixcvotForvoesV9Hnz59+P77768VuijHZPUcIQpI5Vkxx9vgPunhbdB5fCWdIUOGoNPpmDNnjqPbYH6K+utcy5YtyczMZN++fS777OO/rzWLfvv27YH/LZFXGF5eXnTo0IEpU6bw8ccf5/7brF3rNs4tW7ZgMBi455578v2VfOTIkcTHx7Nq1SrmzZtHhQoV6NOnT6HjKmtUnhVztPkkXrRGo8dX0insa/qWW27B29ub/fv3u10ZKb/XaL9+/dDr9SxevJisrCxWrlxJ3bp16dChg1M5+9KGDzzwgNN2pdRVe2IUxsmTuW1I9+7dXYYY7dyZ/7xMBeXv70+jRo2IiYlxDK/JT9u2bdFoNEV6r5YHKs+KOVqd+y9dWp2h1K6kM3jwYBo3bsyXX37JiRMnXPbbX889e/Z0mU8kv9eaTqfLt+dBhQoVAPcJOXvX/cLI7/12tfgKY/DgwWi1WubMmUNCQsJ115dXYWNv3rx5vvsKc632z1f7EIySoNVqy0Xvk4JQeVbM0Rvcf1bqDcZStZLOlTQaDX5+fo7nRb0Hs1gsbo+xvz5btmwJ5PZ2adiwIUeOHCm2oV3R0dE0bNjQJWFy/vx5Tp48me9x7t47cXFxnD59msaNG+c7NAdye6EGBgZy4MCBAq20I8ovSZoIUUDH4pI4HJOIn7c+3yy4RqPBz1vP4ZhEjsUl3eAIc9WtW5fx48eTkJDAvffeS0xMjEuZ7OxsZsyYcdUx7ldjn3BvwoQJTh8ip0+fZsaMGej1ercTVObVrl072rdvz9KlS1m+fLnLfpvN5vSL/e+//+52yI/9FwTvfObcaN68OVu2bMFoNHLPPfc4LTWb93q8vb15/vnnOXnyJIMGDcq3vvIk7dhx0g4fQefnd9XXtM7Pj7TDR0g7VrBlDItbYV/TBoOBfv36kZCQwNSpzhP/rV+/ng0bNlC3bl2XnklhYWHcfffd7Nq1i5kzZ5KamurSkwRw/AJ85Wvp3Xff5Z9//rney3U6x+7du53mMTlz5gwTJuTfxbwwxowZg9Vq5amnnnKZZyE7O9vRLTk8PJy+ffuye/duPvjgA7dfCPbu3es2QVUeZKTEkZ4cg07vOsm3nb23SXpyDBkp7ruIe4pOp+Odd97BbDa7bfPzez3/+++/Lu8fu5CQEBISEhzL0ObVunVrNBoNy5Ytc9ofFRVV6F4rV4tv+/btLstoF0X9+vUZP348ly5d4t5773WbWIKizemRX+zffPMNP/30k0v5/v37o9PpmDFjhlNvztTUVLfzcuXnqaeeQq/XM3bsWE6dOuWyPzk5uUgJrLxCQkI4c+bMddVRVlw4Fc252KjcXiZXaQOM3r6ci43iwqnoGxxhri+++IL9+/e73bdq1SqOHDlCcHAwTZo0KfQ9WF6vvvqq05LVZ86c4aOPPsJoNDpNFvzMM8+QmZnJiBEj3A5fiYmJITY2tsDXV7NmTU6cOOHUcyQ7O5snn3zyqgmNr7/+mr///tvxXCnFq6++itVqZciQIVc9p16v58knnyQuLo4XX3zR7Xn++eeffHtfi/JDhucIUQD2XibZORZ8DEZM5vx/XdFpNWTnWFi76yQNalYoVJf84vL222+TnZ3Nhx9+SIMGDejWrRtNmjTBy8uLmJgYNm3axOXLlwt1E5bXoEGDWLlyJatXr6ZZs2bcf//9ZGRksHz5chITE5k+ffpVJ2i1W7p0KV27duXxxx9n5syZtGrVCh8fH06dOsVvv/1GfHy846Z70aJFfPHFF3Tq1Ik6deoQGBjI4cOH+emnnwgJCbnqqiTNmjVjy5Yt3Hnnndx777389NNPThODhYSE8OijjzqGY5SloTlFZe9lYs3OwcvbB1ueG6ArabRaLNk5nP/pZwIa1C8Tr+n33nuP7du38/bbb7N7927at29PbGws3333Hb6+vsyfP9/tuOxBgwbx008/MWlS7koG7pImo0ePZv78+fTp04e+fftSsWJF9uzZwx9//EHPnj2vOj68oOwr8axYsYI2bdpw5513cvHiRdauXcudd97p+AX7ejz55JNs376db7/9lnr16tG7d28CAwM5deoUGzZs4KuvvuLBBx8EclfaOXbsGOPHj2fRokV07NiR4OBgTp8+zYEDB4iKiuL8+fNuJ94ty+y9TKxWE146IzZr/jfmGo0Wi9VE/Knd+AXV9Mj7JD+9e/fm9ttvd5s0bteuHe3atePbb7/l/PnzdOjQgVOnTvHjjz/Ss2dPt13Su3XrxoEDB7j33nu54447MBgMdOrUiU6dOlGlShX69evHN998Q+vWrbnnnnu4dOkSP/zwA/fccw8rVqwoVOy9evUiMjKS999/37GSx7Fjx1i7di0PPfRQsXSZ/89//oPJZGLGjBnccsstdOrUiebNm+Pr68ulS5f4+++/2bdvH/7+/tfsRZnXoEGDeO+99xg7dixbt26lZs2aHDx4kM2bN/Pwww+zcuVKp/J169bljTfeYNKkSTRr1oy+ffui1+tZsWIFzZo149ixYwU6b5MmTfj000958sknadCgAffddx916tQhLS2NkydPsn37doYMGcLnn39emD+Tk27duvHtt9/y4IMP0rJlS3Q6Hb1793ZapaQ8sPcysZhy8DIYsZiv8VlpyuHQns2E16hzw9uAn3/+mdGjRzt+FKhSpQoZGRn8+eef7Ny5E61Wy6effuqYG6sw92B2ERERZGRk0KxZM3r16kVGRgbffvstly9f5uOPP3aaJ2vUqFHs2bOHhQsXsmvXLrp3706VKlW4ePEiR48eZe/evXzzzTdERkYW6PrGjh3L2LFjadmyJY888ggWi4WNGzeilKJ58+aOiZSv1KNHDzp27Mjjjz9OpUqV2Lx5MwcOHKBDhw6MHTv2muedMmUKf/zxBx9//DHr1q2jU6dOhIWFcfbsWQ4dOsTBgwf57bff3M5FJMoPSZoIUQAWqyIhORtvo54s07W7o3ob9VxOycZiVXjpb/yNs1arZcaMGfTv35/PPvuMHTt2sGPHDmw2GxEREfTo0YOhQ4fSvXv3ItWv0Wj4/vvv+eijj1i4cCGffPIJBoOBVq1aMW7cOHr37l2gemrVqsWff/7JjBkzWLVqFfPnz0en0xEREUGnTp145JFHHGX79etHdnY2u3btYt++feTk5FCtWjWefPJJXnrpJbdLZubVtGlTl8RJp06dHPsHDx7MokWL6NChg8vyeuWRslgwJVxG523Elp11zfI6byOmhMsoiwWNB8ZkF/Y1XalSJfbu3ctbb73F6tWr2blzJ0FBQTz44INMmjQp33/jBx54gMDAQFJTU+nYsSN16tRxKdOyZUt++eUXXn/9dVauXIlOp+PWW29l165d/Pjjj8WSNAFYsGABkZGRrFixgk8++YQaNWowbtw4Xn755WL5omjvDXD33Xczd+5cvv76a5RSVK1alb59+zqtpBESEsLu3buZNWsWy5cvZ8mSJdhsNsLDw2nevDkTJ07Md+WTskwpK6acFHQ6AzbrtYcz6nQGTDkpKGVFoyldt1jvvfee22WDdToda9eu5ZVXXmH9+vXs37+fevXqMW3aNO699163r7WJEyeSlJTE2rVr2blzJ1arlUmTJjna1Llz5xIaGsry5cuZPXs2DRo0YM6cOVSpUqXQSRN/f3+2bNnCSy+9xI4dO9i2bRuNGzdmyZIlVK5cuVjeC1qtlunTpzNw4EA+//xzduzYwf79+8nJySEkJITGjRvzwQcf8MQTTxTqi1G1atXYvn0748ePZ9OmTVgsFlq1asUvv/zC6dOnXZImAG+88QZVqlThww8/5IsvviAsLIzHH3+cN998s1BJyREjRtCiRQtmzJjBjh07WLNmDUFBQdSoUYPnn3++yEs029l7DW3ZsoU1a9Zgs9moVq1auUua2KxW0pMT0RuMmE3XbgP0BiPpKUnYrFZ0+hvbBtjf4xs3bmTHjh2cP38eyJ3we/DgwYwdO9apXS/MPZidwWBg48aNvPLKKyxatIjk5GRuueUWPvnkE5eJku0TtN533318+eWXrF27lvT0dMLCwhxtTGHuQ8eMGYOXlxeffPIJX375JcHBwfTs2ZOpU6e6XQzAzn5fOnPmTE6cOEFISAjPPvssb7311lWH5tgZjUZ+/vlnvvrqK77++mtWrFhBTk4OlStXplGjRowePdrtEsmifNGo0jjwTojrkJ2dTUxMDLVq1SrWIRZJadmkZxZ8PGOAr4HggNKz/KS4umnTpvHSSy/x1VdfMWzYME+Hc0OYkpOxpKcXuLzePwBDcFAJRiRE6WPOScNquXZi0U6n98XL6F+CEQkhbqTMtBRysgo+/NDo64ev/9Unwy+L7D1CCjOkRvxPSX0/ETdG6foZRIhSrEKANxUCpJErj7Kzs5k1axYVKlRwGo9b3hmCgzEEB3s6DCFKNS9jAF5GWYFEiJuVb0AQvgHyg4EQNzNJmgghblq//vor27dvZ8OGDcTFxTF16tRyNyeDEEIIIYQQougkaSKEuGlt2rSJKVOmEBoayvPPP8+LL77o6ZCEEEIIIYQQpYjMaSLKHRkzKIQQQgghhCgt5PtJ2ea63qIQQgghhBBCCCGEkKSJEEIIIYQQQgghhDuSNBFCCCGEEEIIIYRwQ5ImQgghhBBCCCGEEG5I0kQIIYQQQgghhBDCDUmaCCGEEEIIIYQQQrghSRMhhBBCCCGEEEIINyRpIoQQQgghhBBCCOGGJE2EEEIIIYQQQggh3JCkiRBCCFGMunTpgkaj8XQYQgghhBCiGEjSRIhy7Pfff2f48OHUq1cPPz8/fHx8qFOnDoMGDWLjxo2OcpMnT0aj0aDRaHjxxRfzre/ll192lJs8ebLL/nXr1tGzZ0/CwsLw8vIiNDSUJk2aMGzYMFavXn3VWN988000Gg1eXl5cuHChyNcsyp/Y2FjH6y48PByLxeK23JEjRxzlIiMji3y+IUOGoNFoiI2NLXIdQnhC3vdKjx493JbZs2cPGo2GIUOG3NjghBAlKu/73/4wGAxUr16d/v378/fff3s6RCHKLL2nAxCiLDtxOZYfjqznoYb3ULdipKfDcbDZbLz44ot8+OGH6PV6unXrRu/evfHy8uLkyZOsW7eOxYsX8+abbzJx4kTHcXq9nsWLF/Puu++i1zs3DxaLha+//hq9Xu/2S+uUKVOYPHkyvr6+3H///URGRmKxWPj3339Zvnw5x48f54EHHnAbr1KK+fPno9FosFgsLFy4kJdffrl4/yiiQM6eSuLXzSe4/c66VK1RwdPhONHr9Vy8eJGffvqJ3r17u+z/6quv0Go9/1vA119/TWZmpqfDECUoJjmDn6IvcF+dcGoF+3k6HLd++eUXtmzZQrdu3TwdihDlhslkYtOmTWzbto3k5GSCg4Pp0qUL3bt3x2AweDo8AOrUqcPAgQMBSE9PZ8+ePSxdupSVK1eyefNmbrvtNg9HKETZI0kTIYpIKcVPx7fy76XjGHVGxnYYUmq65L/++ut8+OGHtGjRgu+//546deo47c/KymLWrFlcvnzZafu9997LmjVrWLt2LQ8++KDTvp9++okLFy7Qu3dvfvzxR6d9sbGxvPnmm1SvXp09e/ZQpUoVl/Pt3bs333g3b95MbGwsI0eOZNmyZcybN0+SJh6glGLvzlhiT1zGy6Dnof7BpeY1DXDrrbdy8OBB5s2b55I0sVgsLF68mO7du7N9+3YPRZirRo0aHj2/KFlKKTbHXuLY5TSMOi3Dm0eWqvcJQGRkJKdOneLll19m3759pS4+IcqiHTt28MorrxAXF4fVakWj0aCUYsmSJdSsWZN3332XTp06eTpM6tat69Ib+PXXX+c///kPr732Gtu2bfNIXEKUZZ7/SU6IMirqcgxHE07grTdyNOEEUZdjPB0SACdOnOD999+nYsWKrF+/3iVhAuDj48NLL73ElClTnLY//PDDBAcHM2/ePJdj5s2bR4UKFXjooYdc9u3btw+bzcbDDz/skjCxn69Lly75xvzVV18BMHLkSB599FGOHz/Ozp07r3WpopidiUvmdEwiBqOe0zGJnIlL9nRITnx8fHj88cdZt24dly5dctq3du1aLl68yLBhw1yOO3fuHJMmTaJDhw6EhYVhNBqJjIzkqaeecqknMjKShQsXAlCrVi1HF+e8r1/787Nnz/LEE08QHh6OVqt13IheOadJTk4OLVq0QK/Xs2vXLqfzXW2fKJ1OJmcQlZiBt05HVGIGJ5MzPB2SiwYNGjBo0CAOHDjAt99+W6Bj4uLiGD58OFWrVsVgMFCtWjWGDx/OqVOnXMraX+Nms5nJkycTGRmJ0Wikfv36fPrpp27rV0oxb948brvtNgIDA/H19aVNmzZuP2+EKG127NjBiBEjiImJoWLFilSrVo2qVatSrVo1KlasSExMDCNGjGDHjh2eDtWtsWPHArB//34GDhyIRqNh3759bsu+8cYbaDQali5d6rT94MGDDBgwgGrVqmE0GomIiOCee+5hzZo1jjILFixAo9GwYMEC1qxZQ/v27fH19aVq1apMnDgRm80GwMKFC2nevDk+Pj7UqFGDDz74wCWOwnx2C1HSJGkiRBEopVgftR2z1UyQMQCz1cz6qO0opTwdGgsWLMBqtTJq1CgqV6581bJGo9Hpube3N/369ePnn3/m4sWLju0XL15k3bp19OvXD29vb5d6KlasCEBUVFSh401MTOSHH36gUaNGtG7dmieeeAL4XyJF3BhKKfbvisViseLnb8BisbJ/V2ypeE3nNWzYMCwWC4sWLXLaPm/ePEJCQlx6SEHuze706dOpXLky/fr1Y+zYsdSpU4fPPvuMjh07kpKS4ij73HPP0bx5cwCeffZZJk2axKRJk1zmf7h8+TIdO3bk77//5vHHH2fkyJEEBga6jdloNLJ06VIMBgMDBgxwOt/48eM5ePAgEydOlC7TZYBSiq1x8VhsNgIMeiw2G1vj4kvd+wRy54kyGo28/vrrmM3mq5Y9fvw4bdu2Zd68ebRu3ZoXXniBli1bMm/ePNq0acPx48fdHtevXz/mzZtHjx49GD58OImJiYwZM4Yvv/zSqZxSigEDBjB8+HDi4+Pp378///d//0dGRgbDhw+/6lxaQniayWTilVdeITU1lSpVqrgMwzEYDFSpUoXU1FQmTJiAyWTyUKTXptFoGDVqFABz58512W+1Wpk/fz4VK1bk4YcfdmxfsWIF7dq147vvvqN9+/a88MIL9OzZk7Nnz7q9X/vhhx/o27cvtWvXZvTo0fj7+/P222/zxhtv8MEHH/DMM8/QvHlzRo4cic1mY/z48Xz99ddOdRTms1uIEqeEKGeysrLU4cOHVVZWltv9OeacfB9mi7lAZf+5cFQ9+eME9ey6SerlDe+oZ9dNUqNXT1D/XDjqtrzJYnKu12JyW644dOnSRQFq06ZNBT5m0qRJClBLly5VBw4cUIB6//33Hfvff/99Bajff/9dLV26VAFq0qRJjv1paWmqRo0aClA9e/ZUixYtUseOHVM2m+2a5/74448VoKZOnaqUUspms6nIyEjl6+urUlJSCn7hNzGTyZLvw2K2Fqjsyah49eGbG9WsqVvUnBk71KypW9SHb25UJ6Pi3ZY3my1O9Zrzqbc4xMTEKED16NFDKaVUkyZNVOPGjR37z58/r/R6vRo7dqxSSimj0ahq1qzp2H/x4kWVlpbmUu/ChQsVoN5++22n7YMHD1aAiomJcRsPoAA1dOhQZbG4XmPnzp2Vu4/Xzz//XAHqscceU0optW7dOgWo22+/3W09ovjlWKz5PsxW6zXLHklIVeM3/61e23pIvbXziHp96z9q/OZD6khCar71mixXvAfzKVccrnyvvPjiiwpQn3zyiaPMb7/9pgA1ePBgx7auXbsqQH3xxRdO9c2ePVsBqlu3bk7b7a/x9u3bO7XTR48eVXq9XjVo0MCp/Jw5cxzvGZPpf5+HOTk5qlevXgpQBw4cuO7rF+JqMjMz833k5OTkW3blypUqIiJCNW7cWLVs2VK1atVKtW7d2vFo2bKlatmypWrcuLGKiIhQK1eudBybnZ3tVG9WVpbb8xeHK9//eb3xxhsKUF27dlVKKdWoUSMVEBCg0tPTncqtXbtWAeq5555zbLtw4YLy8/NTfn5+6o8//nCp+/Tp047/nz9/vgKUl5eX2rdvn2N7amqqCgsLU76+vio8PFxFR0c79p06dUoZDAbVtGlTp3oL+9ld2l3r+4ko3WROE3HTeWPL9Hz3NQitw9BWfR3P39r+MWar8y90SiniMy9jspqpGhAOgFFn4FJ6AtN3z6GSb0WX8ePVAiN4usMQx/MZu78kOcs1Q/7u3ROKcklO7CvPVKtWrUjHt27dmmbNmjF//nxeeuklAObPn0/z5s1p1aqV218c/f39WbVqFYMGDWLdunWsW7cOgKCgIO644w6GDRvmdlgP/G/yTvukZRqNhoEDB/L222+zbNkyRo4cWaTruJksmLU7333Va1XgngebOJ4v/nwPFovNqYxSiuTELCxmK6GV/QHwMuhITszkuwW/Exzi4/KarlTZnwf7t3Q8/+7r30lPzXE5/4jn7yjSNV3NsGHDGDduHHv37qV9+/YsXLgQi8XidmgOQFhYmNvtgwYNYuzYsWzatInXXnutUDEYDAbef/99dDpdgY8ZNWoUGzZsYPny5bRo0YIZM2YQHBzMkiVLClWPKLr3f3PfYwKgbogfjzeq7nj+4b4ozNb/9SBRSpGQZSLbasVHp8Vbr8eg05JpMfPlnzEEG73czh0SEeDN8OaRjuef/3mSlGzXybRfv/2WIl5V/l599VXmzp3LW2+9xZAhQ/D393cpc+rUKbZu3UqjRo0YMWKE077Ro0fzySefsGXLFk6fPk316tWd9k+dOtWph1WDBg247bbb2L59O2lpaQQEBAAwa9Ys/Pz8mD17Nl5eXo7yBoOB//znP6xZs4alS5fSunXr4rx8IZzccUf+n0e33XYbH330keP5XXfdRXZ2NgBnzpzh8uXLjp4Nvr6+1KxZ01H2xIkTWK1WILdXypNPPum4B2vUqJFTD4pHH32U8+fPu5z/wIED13Flzk6cOOGY0yQjI4O9e/eyc+dOvL29+c9//gPkfh49++yzLFu2jOHDhzuOtfc+ydsWLFy4kIyMDN544w1atvzf576du/vNgQMH0rZtW8fzgIAA7r//fsecdbVr13bsq169Orfffjvbt2/HYrE4FiIoic9uIYpKhucIUUg5VhPZlhz0Gr3jBlmj0aDVasm25JBjLb3dMgtq2LBhHDlyhN9++43ffvuNI0eO5PuF1K5ly5YcOnSIXbt28fbbb/PQQw9hMBhYu3YtDz/8MAMHDnTpwn7gwAEOHjxI165dnT50ZYjOjWU2WTGbLOj0WqfXtEarxWyyYDZZPRyhs4EDB+Ll5eWYC2H+/Pm0bNmSFi1a5HvMypUr6dGjB5UqVUKv1zves6mpqZw7d67QMdSqVYvQ0NBCHzd37lyqVq3KhAkTiI+P54svvpCJY8sIk9VGjtWGFo3T+8RXryPTYsVktV2jhhuvQoUKvPLKK1y6dIlp06a5LfPXX38B0LlzZ5ekj1ardUxsaS+Xl7skh70tT05OBiAzM5NDhw4RHBzMe++9x+TJk50ey5YtA+Do0aNFuUQhSpw9IVJS5YtbdHQ0U6ZMYcqUKXz00UfExMTQv39/9u3bR8eOHYHc+ywfHx+noXQXL15k7dq13HrrrTRq1Mix3T73yd13313gGNx9HkdERFx1n9VqdRoaDsX/2S1EUUlPE3HTebPbC/nu02qc84gTOz/j9FwpxWf7F5FhyqSCT5DTvir+lUnKTiEyuBpPth3kdPN55Y3ouFtHQAmNgQ8PD+fo0aOcPXuWBg0aFKmOgQMHMn78eMeXUvtcDNei0Wi49dZbufXWW4Hcv9fq1at54oknWLJkCX369HHqcWJPitiTJHb16tWjQ4cO7Nmzh3///ZfGjRsX6TpuFkOevjXffdorXnsDR3dweq6UYs3yv8nOMuMf6DzHTcVKvqSn5hBeNYhejzW74jXtfJ5Hn2jNjZrVoVKlSvTq1Ytly5bx6KOPcuzYMT755JN8y0+fPp0XX3yRSpUqcffdd1OtWjV8fHwAmDlzJjk5rj1kruVa8wXlJyQkhE6dOrF06VKqVauWbw8sUTLGd6yf7z7tFa/p59vVc/y/UoqFh06RYbESbNQ7vRcMOi0+eh3Vg3wZ3LSGS3t/Zd+T0S1r37D3CsAzzzzDrFmzmD59Ok899ZTL/tTUVCD/17T9i469XF7u5vGx/0ps/+KYlJSEUoqzZ8+6TD6eV0ZG6ZtQV5QvV5tg/srefhs3bnT8/2uvvcaSJUuoWrUq4HpPV7duXcf/nz17lgEDBjh6dGi1zveV3333XYnPgdSjRw/Wr19/1TLBwcH07duXhQsX8s8//9CkSRMWLFiAxWJx6XFm72Fjv/6CuFrbcLV9eedfKonPbiGKSnqaiJuOQW/I96HX6a9aNi7lLMcvx+Bn8HVJsGi1WvwMvhy/HENcylmn47x0Xs716rzcnr842CeT3Lx5c5HrqFixIg888ADLly9n+fLlPPjgg47JXgtDo9Hw4IMP8vzzzwOwZcsWx76srCzHzOyDBw92rFJif+zZsweQ3iYF4eWly/eh02uvWvbiuTTOxCXh7ePlcnOn1Wrx9vHiTFwSF8+lOR2n1zvfYOrzOX9JGT58OKmpqQwZMgRvb+98k3oWi4W33nqLiIgI/vnnH5YsWeL4tXvSpElFnrCvqEu4rlixgqVLl1KxYkXOnDkjXYtvMINOm+9Df8XrP+++M2lZRCdl4KfXodVo0eCcFPfT64hOyuBMWpZLvV66K96D+Zy/pPj4+DBlyhTS09PdJi3sX2Cu/IXXzj7kM7+Jjq/Fflzr1q1RSuX72Lp1a5HqF6KgfHx88n1cOblr3n3du3dHr9djsVjQarVue2RptVrMZjN6vZ7u3bs7jnU34b6783vC6NGjARy9Tb766isCAwPp27evU7ng4GAgNyF0o5TUZ7cQRSVJEyEKSOVZMceoc5/gMOoMHl9JZ8iQIeh0OubMmUN8fPxVy14tSz9s2DDS0tJIS0u75tCca3E3jv77778nJSWFFi1aMHz4cLcPb29vFi1aJB+OJUTlWTHHy+A+weFl0JXKlXR69OhB1apVOXv2LA8++CAVKlRwWy4hIYGUlBQ6duzoMj76wIEDZGVluRxj/8WxuLtYnzlzhhEjRlCpUiX+/PNPOnTowLRp064rwSlKnsqzYk5+yQ2DTluqV9IZPHgwjRs35ssvv+TEiRNO++xd5Xfs2OESu1LKsYTq1Ya/XU1AQAANGzbkyJEjjiE7QpQl3bt3p2bNmsTH5//+VkqRkJBAZGQk3bt3v8ERFk2HDh1o1qwZixcv5pdffiEqKooBAwbg6+vrVK5du3YA/PLLLzcstqJ8dgtRkiRpIkQBRV2O4WjCCfwMvvn+yqzRaPAz+HI04QRRl2NucIS56taty/jx40lISODee+8lJsY1juzsbGbMmOGYKMydu+++m1WrVrFq1Sruuuuuq55z3759fP31145J0/KKj493TCx2++23O7bbe5DMmDGDuXPnun089NBDJCQk8OOPPxbk0kUhnYlL5nRMIt7e7iewhNzXtLe3F6djEjkTl3xjA7wKnU7HqlWr+OGHH5g6dWq+5cLCwvDx8eGPP/4gMzPTsT0pKYmxY8e6PSYkJASA06dPF1u8NpuNgQMHkpSUxPz586levTpLliwhICCAJ554goSEhGI7lyheJ5MziErMwFevu+r7xFevIyoxg5PJpW+YiU6n45133sFsNru0+zVq1KBr1678+++/jiGZdnPmzOHIkSN069bNZRLYwnjmmWfIzMxkxIgRbofhxMTEEBsbW+T6hShJBoOBd999l8DAQM6dO+fyQ05OTg7nzp0jMDCQqVOnuvRaKc1GjRpFYmIiQ4cOBXAZmgO5SVd/f3+mT5/udm6jkuiBUpTPbiFKksxpIkQB2HuZ5Fhy8NYbMV2xok5eWo2WHEsO66O2U69irSJ3478eb7/9NtnZ2Xz44Yc0aNCAbt260aRJE7y8vIiJiWHTpk1cvnyZt99+O986tFotDzzwQIHOd+7cOQYPHszTTz9Np06duOWWW9Dr9cTFxbF27VrS09Pp2bMnjz76KJA7s/uOHTuIjIykS5cu+dY7dOhQli5dyldffcUjjzxSqL+BuDp7LxNTjgUvgxGLOf9eFRqtBlOOhf27YqlWM9gjr2l32rRpQ5s2ba5aRqvV8tRTTzF9+nSaN29Or169SE1N5eeff6ZmzZpUqVLF5Zhu3boxbdo0Ro4cSZ8+ffDz86NmzZoMGjSoyLG+8847bN++naeffpqePXsCULt2bWbPns2gQYMYNmyYJAdLIXsvkxyrFW+dF+arTPaq1WjIsVrYGhdP7WC/UvM+sevduze33347v/76q8u+zz77jNtvv50RI0awZs0aGjVqxL///suPP/5IpUqV+Oyzz67r3KNGjWLPnj0sXLiQXbt20b17d6pUqcLFixc5evQoe/fu5ZtvviEyMvK6ziNESenUqRNffvklEyZMIDY2FqvVikajQSmFTqejdu3aTJ061TFxcllhn8Pu3LlztG7d2u3qOGFhYXz99dc8/vjjtGvXjt69e9OgQQMSEhLYu3cvkZGRrFq1qljjKspntxAlSZImQhSA1WblclYiRr2RbMu1J54y6o0kZiVhtVld5km5EbRaLTNmzKB///589tln7Nixgx07dmCz2YiIiKBHjx4MHTq02LqQ3nnnnSxevJgNGzbwxx9/sGvXLtLT06lQoQLt27enf//+DB482DFnxrx581BKOeYyuVq91atX55dffnG73KUoOptVkZqchcGoL9DqOAajnrSULGxWhU5fur4MXsvUqVMJCQlhwYIFfPrpp1SuXJl+/foxefJkmjRp4lL+3nvv5f333+fLL79k+vTpmM1mOnfuXOSkyZ49e5gyZQpNmjThgw8+cNo3cOBA1q9fz5IlS5g9ezZjxowp0jlEybAqRWKWGaNOR3YBVscx6nQkZZuxKoW+lCVNAN577z3HvFd5NWjQgAMHDjBlyhTWr1/PunXrqFSpEkOHDmXSpElOy6sWhUajYcGCBdx33318+eWXjmR6WFgY9erVY9q0aWVmSIO4eXXq1ImtW7eyadMmtm3bRnJyMsHBwXTp0oXu3buXqR4mdoGBgTz00EMsXrzYbS8Tu4ceeoi9e/cydepUtm/fzo8//khoaCgtWrS46nHXo7Cf3UKUJI0qjYNvhbgO2dnZxMTEUKtWLby9vYut3uTsVDJMmdcu+F/+Bl+CvIs2cZ4QN0J6Wg7Zmfn3mrqSj68XfgHGaxcUohxJzTGTcZWeWFfy89IRaPS6dkEhhCgFmjZtSkxMjGOIkSgZJfX9RNwY0tNEiAIK9g4kWJIgohzxDzDiL0kQIa4q0OglSRAhRLn0888/888//zBy5EhJmAhxFZI0EUIIIYQQQoibxGeffcbp06eZO3cu3t7evPLKK54OSYhSTZImQgghhBBCCHGTeO+99zhz5gwNGjRg3rx51KpVy9MhCVGqSdJECCGEEEIIIW4SssS3EIWj9XQAQgghhBBCCCGEEKWRJE2EEEIIIYQQQggh3JCkiSi3ZDVtIYQQQgghhKfJ95KyTZImotzR63On6rFYLB6ORAghhBBCCHGzM5vNAOh0Og9HIopCkiai3NHpdOh0OlJTUz0dihBCCCGEEOImppQiJSUFo9GIl5eXp8MRRSCr54hyR6PREBYWxvnz5zEajfj5+aHRaDwdlhBCCCGEEOImoZTCbDaTkpJCeno6VatW9XRIoog0SgZYOXz44YfMnTuXuLg49Ho9rVq1YurUqbRv397ToYlCUkpx4cIFUlJSZAyhEEIIIYQQwiOMRiOhoaEEBgZ6OhRRRJI0yWPlypX4+flRt25dcnJymDlzJt9++y3R0dFUrFjR0+GJIrBarY4xhEIIIYQQQghxo+h0OhmSUw5I0uQqUlNTCQoKYtu2bXTu3NnT4QghhBBCCCGEEOIGKlMTwS5evJhRo0bRpk0bjEYjGo2GBQsWXPWY/fv3c9999xEcHIyfnx8dOnTg22+/vea5TCYTc+bMoUKFCjRt2rSYrkAIIYQQQgghhBBlRZmaCPb1118nLi6O0NBQIiIiiIuLu2r5rVu30qNHD7y9vXn88ccJCAhgxYoVPPbYY5w+fZoXXnjB5ZidO3dy7733kpWVRXh4OBs3biQkJKSkLkkIIYQQQgghhBClVJkanrNp0ybq1atHzZo1effdd5kwYQLz589nyJAhLmUtFgu33HILZ86cYc+ePbRo0QKAlJQU2rVrR2xsLMePH6dmzZpOx2VlZXH27FkuX77Ml19+ydatW9m7dy+hoaE34AqFEEIIIYQQQghRWpSp4Tndu3d3SXLkZ8uWLURHR9O/f39HwgQgKCiIV199FZPJxMKFC12O8/HxoW7durRv3565c+ei1WqZP39+cV2CEEIIIYQQQgghyogylTQpjG3btgFw9913u+zr0aMHANu3b79mPUopcnJyijU2IYQQQgghhBBClH5lak6TwoiKigKgXr16LvvCw8Px9/d3lLF7+eWX6d27N9WqVSMxMZFPP/2UM2fO0KdPH7fnyMnJcUqo2Gw2EhMTqVixIkoprFZrMV6R8BSdTodWW27zi0I4KKVIS0ujSpUqHnnNP/nkk3z++ed88sknPP300wU+zmazce7cOQICAoDc4Zni5qTX69FoNJ4OQ4hC8XTbW1TS9opr0Wq16HQ6T4chhFuFaXvLbdIkJSUFyB2O405gYKCjjN25c+d4/PHHuXTpEiEhIbRt25adO3fSsGFDt3VMnTqVKVOmuGzXaDQEBwej15fbP+9NxWazkZKSIjcD4qZx+vRpqlWrdkPPuXbtWn777TeqVKlS6GPPnTtH9erVAahQoYK0vTcppRRms9nls12IssITbW9RzJ49m9mzZ2MymYiOjgZw3PdK0lLkZbPZSE9Pl177olQrSNsrd5Z5LFq0qFDlJ0yYwLhx4xzPU1JSqFGjBi+//DLdu3d3ZN6vRSnFmTNnqFatWqn5sCmNMRVGccZvMpnYunUrQUFBDBgwoJgiLFlms5mtW7fStWtXvLy8PB3ODSfXX7TrT0tLo1atWgVuu4rLxYsXefLJJ/npp5/o1atXoY+3xzt+/HjuvPNOQkJCCvS+t1qtnD59murVq5eaX8JKY0yF4cn4lVKcP3+e3377jbvvvptWrVrd0PND7nvvl19+4e67775p2x65/sJff2pqKtWrV7/hbW9RjRkzhjFjxpCSkkJwcDAvvvginTt3Jjw8XNpeDymt8VutVvbu3UtycjIjR47Ex8enRM4jbY9cf0m3veU2aWLvYZLfL06pqalUqFDhus5hNBoxGo1O2/z8/Ljtttu48847C1yP1WolKCiIevXqlZqGrjTGVBjFHb+3tzfr1q0jODi4TPw9zGYzvr6+VKxY8aZtPOX6C3/99rJ5b3oXL17Mzp07+f333zl06BAmkynfVcvs9u/fz6RJk9i9ezdms5mmTZsybtw4+vbt67b80KFDeeaZZ2jatGmBY81Lo9Gg1+tp0aKF23ms8mO1WgkMDCxV7VxpjKkwSkP88fHxJCQkEBgYeMPPbX/vBQYG3tRtj1x/0a6/rP1IZY+3fv363H///QU+rjS0E1cqjTEVRmmOv0aNGnz++efk5ORQuXLlEjmHtD1y/SXd9pbbpIl9LpOoqChat27ttO/ChQukp6fTrl27Yj+vVqvFYDAUe73Cs7y9vR3z1JS2DyMhStLrr79OXFwcoaGhREREEBcXd9XyW7dupUePHnh7e/P4448TEBDAihUreOyxxzh9+jQvvPCCU/lZs2aRkZHhsr2wdDod3t7e11WHKB+MRqMMpxTiBrryB0Qh8rJ/Nku7LMqysjPbVCF17twZgF9++cVl34YNG5zKCCGEcG/u3LnExsYSHx/P6NGjr1rWYrEwYsQItFotO3bsYM6cOUyfPp2DBw9Sv359Xn31Vaeky9GjR3nrrbdYuHBhmZr8UAghhBBC3DzK7V3qnXfeSe3atfnmm2/466+/HNtTUlJ45513MBgMPPHEEzc8LpvNxgcffEDz5s3x9fVFo9FQs2bNYqn7zTffRKvVcujQIbf7t27dSp8+fahatSoGg4EKFSrQoEEDHn30UWbNmnXdk+fFxsai0Wjo0qXLddXjKefPn8fHx4ennnrK06EIUWp07969wG3Uli1biI6Opn///rRo0cKxPSgoiFdffRWTycTChQsd2/fs2UN8fDx169ZFr9ej1+uJi4vj2WefdTq+OHiq7dVoNNd8XDnUqWHDhtSpU8dtXZGRkdfcVloVJdbJkyej0WhYsGCBY5u01UKUHaWl7dVqtQQFBdGhQwdmzpyJ2WwulhhutOJu84urPmmXRXlXpobnzJ07l19//RXA0UDOnTuXbdu2AXD77bfzf//3f0DusoNz586lR48edOrUyambeFxcHNOmTfPIjeakSZN4++23qVy5Mr169cLHx4cGDRpcd70XL17kgw8+4JFHHnE7L8Cbb77JpEmTgNwb8vbt2+Pl5cWxY8dYuXIl33//PW3atKFDhw7XHUtZFRERwciRI/n000957rnnqF+/vqdDEqJMsbfF7uYV6dGjBwDbt293bHvwwQdp06aNS7khQ4YwdOjQfM9z5XLvqampQO7NeX5Lvb/xxhu88847VK5cmfvvv9+p7bXZbAW4OvfsbW+fPn1o1KhRvue/WpL+1ltvdRyXN5b86nK3vbQscW+P/2p/08LEmrc++3FhYWGMGDGCzz77jLFjx7q01TabzbGSzo1mP2dZ/UJ2veT6i3b95fnv5an7XrvBgwcDue1ObGwsu3fvZu/evaxdu5b169ff0NXWYmNjqVWrFp07d3Z8XpYXcg8tyrsylTT59ddfnX6lBNi1axe7du1yPLcnTQC6du3Kr7/+yqRJk1i+fLljQsL33nuPxx577IbFbZeVlcXMmTMJCgri0KFDVKpUCchtyKOioq6r7nfeeYf09HQmTJjgsu/3339n8uTJeHl58e233/Lggw867b9w4QKLFy8mODj4umIoD8aPH8+sWbOYOHEiy5cv93Q4QpQp9nbMPqdUXuHh4fj7+zu1dcHBwS7tjpeXFxEREdStWzff87hb7t1oNHL+/Hm3bWl2djYzZ84kICCAlStXEhIS4rTfvmRmUdjb3n79+l21HXfXNud15bFms9ltfVduX7duHXq9/ro/Q4qbu79pUWJNTEwEcr8g5T2uT58+zJ49m3HjxvHhhx86HXPhwgUuXrzo0ZVINm7c6LFzlwZy/YW7/szMzBKKxLM8dd+bV95eagB79+6lS5cubN68mWXLljFw4MDriuNGO3LkSLFO9Fmc9ck9tCjPylTSZMGCBS6N37W0a9eOn3/+uWQCKqTdu3eTnp7OwIEDHR8cxSEzM5OFCxfSpEkTWrZs6bJ/5cqVKKXo27evS8IEcr/MvPjii8UWT1lWtWpVunbtyg8//MDFixdLbJZvIcoj+xA/++plVwoMDLzuYYDgutx7amoqdevWJSIiwm3CZvPmzWRmZjJgwADat2/v2G6z2YiOjqZOnTpFmlMlMzOTH3/8kSZNmvDAAw9ctay7uNyx96zw8vJye8yV2wta741ytb9pUWK1J7gqV67sct32Lz6BgYFObXV4eDgRERHcd999RbyKojObzWzcuJG77rrrpl3BQK6/8Ndv7y1X3njqvvdq2rdvz5AhQ/j888/ZsGFDmUua3HLLLaW2PrmHFuVZuZ3TpDTZtm0bGo2G7t27A7lLeNrHWL722mvXXf93331HSkoK/fr1c7s/Pj4eoFAfWLGxsTRs2JBu3bqRmprKs88+S/Xq1fH29qZhw4Z8+OGHV+1+nZWVxSuvvELNmjUxGo3UrVuX9957D6WU2/KnT5/m6aefpk6dOnh7exMSEsL999/P7t273cZmnzslNTWVcePGUatWLby8vHjuueeKXK9d//79MZvNhU7QCSGuX2xsLE8//fRVyxiNRgIDA50ekLt6mU6nczx27tyJXq93DA1asmSJY+6UN954w/Gl/srjCvpYuXKlo+3Nr4xdQevMm2goSF16vZ46deo4bTt9+jR6vZ4777yTjIwMxo0bR2RkJH5+fjRp0oSPP/4YjUbjUv+RI0cYPHgw9erVw8/Pj/DwcFq3bs0LL7zApUuXXMrv27ePhx9+mPDwcHx9falTpw7PPPMMly5dcvs3dRer/bFu3Tpuv/12AgICCAsLo2/fvkRHR1/132jAgAGYzWYWLVrk8jfU6/V4eXl55AF47Nyl4SHXX7TrL088fd97LY0bNwbg0qVLLvvWrVvHsGHDaNiwIYGBgfj5+dG8eXPeeecdp2Ghef3zzz8MHDiQ2rVr4+3tTaVKlWjRogXPPfcc58+fB3LnZ6pVqxaQO0zV3bxWBbm/vdocJL/88gu33norvr6+hIaG8uijj3LixAm3c0PZXVlf3hgKey8Pcg8tyq8y1dOkrNLr9QwePJht27YRFxfH/fffT8WKFQG49957r7v+tWvXAuQ7AWv16tUBWLFiBRMmTCAsLKzAdefk5NCtWzeio6Pp1q0bJpOJzZs3M27cOA4ePOi2UTSZTNx9990cPnyYLl26kJGRwfbt23nllVdIS0vj7bffdir/22+/0bNnT5KSkmjQoAE9e/YkPj6eDRs2sH79epYsWeJ2OFVWVhadO3cmLi6Ozp0706pVKypUqODY/+effzJmzJhC12v/O65bt46XX365wH8rIW529h4m+fUmSU1NdXqPljRPt72eVpj2+/fff+f2228nOzubZs2a8cADD5CZmcnJkyf56KOPePDBB4mIiHCUX7x4MUOGDMFqtXLbbbdRvXp1/vjjDz7//HO+//57tm3b5vhici2ff/45Tz75JBqNhjvuuIOIiAj27NlDu3bt6NWrV77HSVsthGfMnj2b2bNn5zs/UWlve9PS0gDc3g+PHDmSrKwsmjRpQrNmzUhJSWHfvn289tprbN68mV9++cUpiV3QtrNFixb06dOHFStWULlyZe655x5HHbfffrtTDNe6v3Xn448/Zty4cWi1Wjp16kR4eDh79+69Zjuan8Ley9tJuyzKLSWKTUpKigoICFAbNmxwu79ly5YKUJcvX3babrFY1JEjR5TFYinSeStXrqz0er3KzMx0uz86Olr5+PgoQAUEBKjBgwerL7/8Uv3xxx/5nvPEiRMKUIBq1qyZio+Pd9pXpUoVBagffvjBsT0mJsZxTOfOnVVKSopj3/79+5VOp1O+vr4qLS3NsT0lJUVFREQonU6nFi9e7BTD/v37VYUKFZS/v7+6dOmS2/N07NhRJSUlucSfmJioKlWqVKh68woNDVVGo1FlZWUppZQ6dOiQmjRpksrJyXFbvrQxmUxq1apVymQyeToUj5DrL9r1p6SkKMDpvZvX1KlTFaDmz5/vdv+ECRMUoJYuXeqy7/z58wpQ3bp1K1RMBZGSkqKMRqNauXKl2/2eanuVUo62qqAsFosCVM2aNd3WdeV2d9vytpEFbb+feOIJBahp06a5nPfIkSPq3LlzjuenTp1SPj4+SqfTqdWrVzu2W61W9cwzzyhAtWnTpkDxx8bGKm9vb+Xl5aXWr1/v2G4ymdSAAQMc15Hfa+7KtloppRYvXuz2NXgjSNsj118SbW9pZY974cKFbveX1ra3U6dOCnC6P7THtHLlSpd6U1NT1f333+/2WgvTdtrb5s6dO7uNqyD3t+7a0ejoaGUwGJSXl5fauHGjY7vZbFZDhw69ajt6ZX1FuZe/0pXtclJSkpo0aZKKjo7O95jrJW2PXH9Jt70yPKcYzJ49m0aNGtG2bdt8y5jNZg4fPkz16tWdJiE8cOAAQ4YMoUePHuj1el5//fVCnfvSpUtcvHiR6tWr4+Pj47ZM7dq1WbNmDdWrVyctLY2FCxcyYsQIWrVqRWhoKE899ZSj+6A706ZNIzQ01PG8Tp06TJw4EYBZs2a5lNdqtXzxxReOLvMAbdq04d577yUzM5MDBw44ts+bN4/z58/z3HPPMWDAAKd62rRpw8SJE0lPT2fx4sVuY/v444/dTmA7f/584uPjeeaZZ4pUb4MGDcjJyeHIkSNu9wshXHXu3BnI7SJ8pQ0bNjiVuVHya3u//fZbevXqxR133EFISAidOnVyrM5WEAVpe/O62pLDq1atKsqlFUhB22/7ME57d/q8brnlFqdeJnPnziUrK4u+ffvSu3dvx3atVsvUqVMJCwvjwIEDTpO052fevHlkZ2fTr18/xzAqyB3e8NFHH+Hr63vV46WtFqJ0yq/tXbhwIe3bt6ddu3YEBgbSqlUrli1bVuB6C9v22tnnW3ryySfZsWMHDzzwgNvexr1793apNyAgwDHh9OrVq532FabtLIz87m/dmTdvHiaTid69e9O1a1fHdr1ez4wZM/D39y/0+QtzL38laZdFeSRJk2IwZswYDh8+zP79+/Mtc+TIEXJycmjRooXT9l27drF3715atWqV7+SJV2Mfj3mtbnt33nknJ06cYOXKlYwePZpWrVqh1+tJTk7ms88+o0WLFhw7dszluJCQEO666y6X7fZxpLt373aZ26RmzZpul5OzLz+WN0Fj/3L18MMPu437jjvuAGDfvn0u+yIiIlyWK7Wzz1z/0EMPFbpe+N/kg/YPQyHEtd15553Url2bb775hr/++suxPSUlhXfeeQeDwXDVpXdLQn5t78yZMwkNDWXixIksW7aMqlWrcuedd3Lw4MEC1VvQttdu8ODB+T5q1KhRqGsqqMK0361btwZyP8+2bduGxWLJt96dO3cCuCSkIXe+GXvyw17uauxlHn/8cZd9FStWdLt8dV7SVgtROuXX9iYlJdG7d2/ee+89Vq5cya233kq/fv0KnDwubNtrT07rdDrq1q3L559/zogRI/jhhx/yXW44KiqKjz76iLFjxzJs2DCGDBnCW2+95diXV2HazoK62v2tO/YEdd4hP3bBwcHXbEfdKcy9/JWkXRblkcxpcoPYv0Bc+eExduxYnn76aaKiotw2dtdinzugIEsrGgwGHnroIUciITk5mWXLlvHqq69y6dIlnn76aZdl8mrWrOm2rqCgIIKDg0lOTiYpKckxVhWgWrVqbo+xx5h3Iq3Y2FgAbrvttqvGnpCQ4LLtal804uLiAOjUqVOh6wUcmfXk5OSrHi9EeTd37lxHD4xDhw45tm3btg3IHYttX+pdr9czd+5cevToQadOnXj88ccJCAhgxYoVxMXFMW3atHwnsCsp+bW9a9asITg4mKioKOrVq0ePHj1o2rQps2fPZs6cOdestzBtL7gue3kjFKb9fumll/j111/Ztm0bXbt2xd/fn44dO9KzZ0+GDBnilNQ/d+4cQL7/llWrVgXg7Nmz14zRXld+sV7r9SJttRClU35t73PPPedYctje9v71118sWbLE7QqPVyps2zt48GAgd+n5gwcPcvToUb788ktuvfVWxwSsdkopXnzxRT766KN8Jzu1z4diV5i2s6AKm0i3JzDCw8OLpT4o3L38laRdFuWRJE1uEPuvl1d+eGi12nwn0ioIe2N8ZSNeEMHBwYwePZoqVarwwAMPsHXrVjIzM6/ZHfpaCrN0p/1XzkceeQQ/P798y7lbEs3b2/ua9fbp0+eq3RLzW2rN/qFc0K6RQpRXv/76KwsXLnTatmvXLqehF/akCUDXrl359ddfmTRpEsuXL8dsNtO0aVPee+89t12hr8e1JiOE/NveihUrOh2n1Wpp0qQJMTExBTr39bS9pVFgYCBbtmxh165drFmzhm3btrFlyxY2btzI1KlT2blzZ4GXDNZoNCUc7f9IWy1E6ZRf2+tOxYoVMZvNBaq3sG3vlQnrDz74gPHjxzNmzBi6du3qlLD96aefmDlzJtWrV+fDDz+kY8eOVKpUCS8vL0wmE0aj0SWZUpxtp93V7m9vlMLcy19J2mVRHknS5AaxZ9ybN29erPXaZ/5OTEwsch3dunUDwGq1kpyc7JQ0OXXqlNtjUlNTSU5OxsfH57oaxWrVqnHs2DFeeeUVRxfH4lC1alWOHTvG+PHjadeuXaGPT0pKAgq3TLMQ5dGCBQsK3UuiXbt2/PzzzyUTUB5jxoxhzJgxpKam5rsqWEHbXqvVyv79+53m1bia4mh7S1ph22+NRsPtt9/uWMnh0qVLPPfccyxdupTXXnuNb7/9FoAqVapw7Ngx4uLi3K6QY+9hYu9xcjURERGOuho1auSy395rMD/SVgtROl2r7bVYLKSmpvLLL7+wceNGVqxYUaB6r7ftfemll9i0aRO//PILU6ZMYd68eY59mzZtAuCzzz6jZ8+eTsedPHky3zoL2naWFHs7euHCBbf7T58+XaLnv5K0y6I8kjlNbpCDBw8SGBhI7dq1i7XesLAwwsPDOX36NJmZmW7L5NfF0O7EiRNA7vCdvBMGAly+fJnNmze7HGOftKtjx45OS68Vln28/Q8//FDkOtyxT8hV1AkWjx49itFopGHDhsUYlRDiRito2ztr1ixOnTrFU089VaB6C9L2etr1tt9hYWFMnjwZgH/++cex3T4n1NKlS12OMZlMjkl/7eWuxl7G3ZeKxMREt5MK5yVttRCl09Xa3gsXLtC0aVNCQkIYMGAAM2fOLPBSxMXR9r777rsALFq0yCkxm5qaCrgfmlKYxEd+bafBYAAolnlP8rIPcXfXXqakpFyzHS1u0i6L8kiSJjfA6dOnuXz5Ms2aNSuRbst33HEHVquVP//80+3+iRMn8tJLLxEdHe2y7+zZs4waNQrInTHc3qDn9eKLL3L58mXH85iYGN58800g95fe6zFq1CjCwsJ4//33mTNnjsukshaLhQ0bNjh96BTEyJEjqVixItOmTSt0vdHR0Vy+fJl27dqVii6SQoiiKWjbu3fvXl555RVef/11mjZtWuD6r9X2lgYFbb8///xzt0OTfvrpJwCqV6/u2DZ8+HB8fHxYtmwZ69atc2y32Wy89tprXLx4kdatW19zriqAoUOHYjQaWbJkieNXXshdeeP5558nIyMj32OlrRaidLpW2xsaGsp3333Hxo0beeGFF3j66acL3NMErr/tbdmyJQ8++CAWi4X333/fsd0+h9KcOXOcfnDcuXMnH3zwgdu6CtN2hoaG4uXlRXR09HUNzb/S0KFDMRgMrF69mh07dji2W61WXnjhhRs6jFTaZVFeyfCcGyC/ybCKS8+ePfnuu+/Ytm2b25vU9PR0PvroI6ZNm0b9+vVp1KgR3t7enDlzhr1792I2m6lbty4zZ850ObZ9+/aO/d26dcNsNrN582YyMzMZOHBgvqveFFRwcDCrV6+mV69ejBo1irfffpsmTZpQoUIFLly4wB9//EFycjI//PADTZo0KVS9s2bNYuzYsYWu1z7B5ZVdM4UQZUtB2t6zZ88yYMAAevXqxaRJkwpV/7Xa3ryunHAwrxo1ajgSGcWpQ4cOmEymArXfn3/+OU8++SSNGjWiYcOG6PV6jh49ysGDB/H29uaNN95wiveLL75gyJAh9OrVi9tuu43q1avzxx9/cOzYMUJDQ13mwclPrVq1mD59Ok8//bRjAuHw8HD27NlDUlISAwYMYMmSJW6PlbZaiNLpWm2vXq+nSZMm1KtXj+7du5OYmMiECRPo06dPgeovTNubn8mTJ7N69WrmzZvHxIkTqVSpEgMHDmT16tV8+umnbNu2jWbNmnH27Fl+/fVXXnjhBaZNm+ZST2HaToPBwD333MOaNWto3rw5rVq1wmAwcNtttzF06NAiXQfkLiX/7rvvMm7cOLp3707nzp2pXLky+/btIzExkYEDB7J48WK3P4wWN2mXRXklPU1ugMJMhlUUffv2JSgoiG+++cbt/tdff51FixYxcOBAjEYjO3fu5Pvvv+fw4cO0a9eO999/n7/++svt+HOj0ciWLVvo378/e/bsYcOGDVSvXp1p06YV22oQHTp04NChQ4wfP57AwEC2b9/OqlWriIuLo3PnzixYsMAx3KYwWrRowV9//VXoer/55hu8vLyu+iVHCFH6XavtTU5OZtSoUdSsWZOFCxcWuifgtdrevBYuXJjv48cffyzUeQuqMO33W2+9xbBhw9BoNGzevJk1a9aQlZXF//3f//HXX3+5fDEZNGgQO3fu5P777+fIkSN8//33ZGVlMXr0aL7//vt8J9l2Z8yYMfzwww+0bduWvXv3smHDBpo3b86ePXuoW7duvsdJWy1E6VTY+94WLVpcdc6QKxWm7c1P8+bNeeihh8jOzmbGjBlAbhJ379699OrVi4SEBH788UfS09P54osv8u1pUti2c+7cuQwaNIjLly/zzTff8NVXX7F9+/YiX4fdM888w0cffUTr1q0d7X2LFi3Yu3evo8dH3pUuS4q0y6LcUqLYpKSkqICAALVhw4ZCHWexWNSRI0dUzZo11WuvvVakcz/33HMKUAcOHCjS8Vc6ceKEAlSnTp2Kpb4bzf43tVgshTru9OnTSqvVqr59+zptP3TokJo0aZLKyckpzjBLjMlkUqtWrVImk8nToXiEXH/Rrj8lJUUBKiUlpYQiKxkpKSnKaDSqlStXFviYnJwc1aVLF1W1alV19uzZIp+7uNveorZdecXExChAde7cuVhiKoziiL8g8murlVJq8eLFaunSpSV6/vxI2yPXf7O1vYBauHBhoY67sp3o37+/qlOnTqHqKI1tryflF7/FYlGNGjVSGo1GnT9/vkRjyK9dTkpKUpMmTVLR0dEldm5pe+T6S7rtlZ4mHhYfH8+KFSvYsGEDmZmZHD16lO+//77QK09MmDABf39/pk6dWkKR3hw++OADtFptiXSVF0KUHk899RQ7duzgySefJCYmhj179rBnz55Cj5GXttczpK0Womzq2rUrs2fPZvfu3axdu5YRI0bwzTff8PLLLxeqHml7nUVHRzsmsrXLyclh/PjxHD58mDvvvJPw8PASjUHaZVGeyZwmHvbvv//y2GOPOZ6vWLGCFStWULNmTWJjYwtcT1hYGC+99BKTJ0/m0KFDhZrMUOQ6f/48c+bMYcSIETRo0MDT4QghStCmTZuw2Wy8/vrrTtul7S39pK0WwrNmz57N7NmzizSZafPmzZk9ezanTp3C39+fRo0asWbNGu6///5C1SNtr7Pvv/+eyZMn07p1a2rUqEFqaioHDx7k/PnzhIaGMmvWrBI9v7TLoryTpImHdenSBYvFQlRUFPXq1buu5XvfeOMNpwmnROFERESQlZXl6TCEEAV0PTfusbGxWK1WaXvLIGmrhfCsMWPGMGbMGFJTUwkKCirUsTNnzmT69OnS9hazbt26sWvXLg4fPsyhQ4ewWCxUrVqVJ598kgkTJjit4lMSpF0W5Z0kTYRbkZGRHDlyhHr16nk6FCGEcCvvjXtYWJinwyk1IiMjnZbLFEIIUb61bduW6dOnX3ciSgjhnsxpUgxmz55No0aNaNu2LYDcrJZD8m8qROmmlJL3qQCkvRbiRpP3nLgaeX2I8kCSJsVgzJgxHD58mP3795OdnU1MTAxms9nTYYliopTi2LFjGI1GvLy8PB2OEMINk8lEXFycdA++yaWkpHD+/HkCAgI8HYoQN43z58+7TEIqhN2xY8cApF0WZZoMzylmZrOZc+fOMW3aNHx8fAp0jM1m4/z580RERKDVlo48VmmMqTCKM36TyURWVhYPPvggGo2mmCIUQhS3tLQ0PvzwQ3x9fQtUvjS2c6UxpsLwdPzp6en4+/tz22233fBzC3GzUkrx8ccf4+/vX6Dynm4n3CmNMRVGaY3farWSlpZGu3btCA0N9XQ4QhSZJE1KwMCBA7l48SImk6lA5S0WC+np6TRu3Bi9vnT8k5TGmAqjOOPX6/XUqFGDWrVqFVN0QoiSMHDgQM6fP092dnaBypfGdq40xlQYno7f19eXRo0aFXpySiFE0Q0YMICzZ8+SmZlZoPKebifcKY0xFUZpjV+r1RIaGkqTJk3kh0dRppWed1U5Eh4eTv369Qtc3mw2k52dTbdu3UrN8I/SGFNhlPX4hRCFFxoaSu3atQtcvjS2E6UxpsIo6/ELIQovODiYGjVqFLh8aWwnSmNMhVHW4xeitCs9/beEEEIIIYQQQgghShFJmgghhBBCCCGEEEK4IUkTIYQQQgghhBBCCDckaSKEEKJMmj17No0aNaJt27aeDkUIIYQQQpRTkjQRQghRJo0ZM4bDhw+zf/9+T4cihBBCCCHKKUmaCCGEEEIIIYQQQrghSRMhhBBCCCGuU3RiHNtNfxCdGOfpUEqUDI0UQtxsJGkihBBCCCHEdVBKsT56OxdtiayP3o5SytMhlRgZGln6xKZksl9XkdiUTE+HIkS5JEkTIYQQQgghrkPU5RiOJ0SjR8fxhGiiLsd4OiRxk1BKsfVUApc13mw9lVCuE3ZCeIokTYQQQgghhCgipRTro7ZjtlnwxoDZZmF9VPnubSJKj5PJGUQnZ6JXNqKTMzmZnOHpkIQodyRpIoQQQgghRBFFXY7haMIJfL180Gg0+Hr5cDThhPQ2ESVOKcXWuHjMNoUBG2Zb7nNJ2AlRvCRpUgxkQiwhhBBCiJuPo5eJ1UxKdhpmLBh1BsxWs/Q2ESXuZHIGUYkZ+Oq1aABfvZaoxAzpbSJEMZOkSTGQCbGEEEIIIW4+9l4mWo0Wi7KQrUwA+Bl8pbeJKFH2XiYWmw2DNvcrnUGrxWKzSW8TIYqZJE2EEEIIIYQopLy9TIK9A6ngHYSvxhuNRiO9TUSJs/cy0Wk0JOaYMWs0ucPD9DrpbSJEMZOkiRBCCCGEEIVk72XiZ/B1zGWi+++ttUajkd4mosTk7WViUTbMVhs2NAAYdNLbRIjiJkkTIYQQZZLMJyWE8BSlFGuObiItJx0vrd5tGeltIkrK/+Yy0RFk9CLAqMdL2QCkt4kQJUCSJkIIIcokmU9KCOEpRxJO8NeFf7EqG8nZqW7LSG8TURKc5jLRadFptPjp9U5f6qS3iRDFS5ImQgghhBBCFJBSigV/fIfZZkH732E5JqsZk9WMVVkd/2+ymtFqtORYcqS3iSg2eXuZaDQat2Wkt4kQxct9f0IhhBBCCCGEiz/P/8OF9EtoNRp89N6YbRbMNgsohRkrOZYcyPNl1qg3kpiVhNVmRa+TW29RdPZeJjlWK946PYlZJvQ6DUatBitgttociRStRkOO1cLWuHhqB/vlm2ARQlybtNxCCCGEEEIUwMX0eFYd2UCYXyhtqjbjjprtHPssFgvbt2+n8+2d0eudb7H9Db6SMBHXzaoUiVlmjDodmRYbmRYrGgtg0GPVaMmx2YA8CTudjqRsM1al0EvSRIgik9ZbCCGEEEKIa8ixmFh0cCUmq5kGobXp2/h+tNr/jXQ3m80Eaf2pElAZLy8vD0Yqyiu9VsvoVrXIMFv580Iye88mUi3Ih3siQ9mxfQedWnVCr3d+7fl56dBrZUYGIa6HJE2EEEIIIYS4hrScdGw2G0HeAfRr+oBTwkSIGyXQ6EWg0YuNWTl46bS0qBxMuJ83/lgI9/OWhJ0QJUCSJkIIIYQQQlxDqF8IT3cYQmp2Gv5GP0+HI25imWYrp1OzAKhXwd/D0QhR/knSRAghhBBCiHxYrBbHfCS+Xj74evl4OCJxs4tOSkcpCPMzEuzthdls9nRIQpRr0q9QCCGEEEIIN9Jy0pm2ew57Tv8hSwb/1+zZs2nUqBFt27b1dCg3rajEdADqhUgvEyFuBEmaCCGEEEIIcQWbzcY3f68mOSuF3ad/x2qzejqkUmHMmDEcPnyY/fv3ezqUm5bZptBoZGiOEDeKDM8RQgghhBDiCutPbCMm6RRGvYFBzR+WJYNFqfFYo2pkma0Y9fL7txA3grT+QgghhBBC5PHPxaPsiN0LwKONe1LJr6KHIxLCmY+XztMhCHHTkPSkEEKIMknG1QshSkJCRiLf/bsOgDtqtqNJ5Vs8HJEQuZRSZJplmJgQN5okTYQQQpRJMq5eCFHcTFYziw6uJMdiolaFGtxbr6unQxLCISHLxIf7oljyzylseSYmjk6MY7vpD6IT4zwYnRDllyRNioH82imEEEIIUfbptTpahDciyDuAfs0eQKuVW2VRekQl5i41rNVo0Go0QG7vk/XR27loS2R99HZZ5UmIEiCfBMVAfu0UQgghhCj7tBotXWvfyrhbRxJolJVJROly/L9LDdfNs9Rw1OUYjidEo0fH8YRooi7HeCo8IcotSZoIIYQQQoib2qWMy5gsJsdzo97gwWiEcJVptnA2LQv431LDSinWR23HbLPgjQGzzcL6KOltIkRxk6SJEEIIIYS4aaWbMvjq96XM3vc1SVkpng5HCLdOJGWgFIT5GQn29gJye5kcTTiBr5cPGo0GXy8fjiackN4mQhQzSZoIIYQQQoibkk3ZWPb3j6Rkp2FVVny8vD0dkhBuRf13aE69EOdeJtmWHEwWEzYURp0Bs9UsvU2EKGaSNBFCCCGEEDeljSd2ciIxFi+dF4OaP4y33ujpkIRwYbEpopMyAKj/36SJvZeJUoo0cwY5yoRGo8HP4Cu9TYQoZpI0EUIIIYQQN50j8VFsjdkNQJ9G91LZv5KHIxIiP4q7a4fRNCyQCH9vRy8Tk9WE2WoGwKDJHbIjvU2EKH6SNBFCCCGEEDeVxMxkvv1nLQAdq7emRURjD0ckRP70Wi0tKgfzQP0qaDUap7lMAox++Oi90f33a530NhGi+EnSRAghhBBC3FR+OLKeLHM2NYKq0rPBnZ4OR4gCc6yYYzXjo/cmyDuQEJ9gpzLS20SI4iVJEyGEEEIIcVN5pPF9NKpUjwHNH0Sv1Xk6HCHylZhlYu/ZRBKzcpfEtvcy8TP4otFo3B4jvU2EKF6SNBFCCCGEEDeVIO9Anmj5CEHegZ4ORYirOpKQxsaYS2w4edHRyyTHkkOmOZv0nExMVjMmqxmrsjr+32Q1o9VoybHkSG8TIYqB3tMBCCGEEEIIUdLOpl4gKSuZJpVv8XQoQhRYVFLuUsP1Q/yx2qxczkpEr9OTlpO7PdAYgBYNZqzkWHIgT+8To95IYlYSVpsVvU6+9glRVPLuEUIIIYQQ5VqmOYvFB1eSlJVC3yb306pKU0+HJMQ1ZZgtnEnLAqBeiD96nZ7nbx3Bj0c28teFf6kbEskDDe/GYrGwfft2Ot/eGb3e+eudv8FXEiZCXCd5BwkhRDmTmXqaEK/DZKY2JahibU+HI4QQHmVTNpYfWkNSVgohPsE0rFTP0yEJUSAnEjNAQWU/I4HG3CWFffTeRCfFYtB5cXfdTlQNDMdsNhOk9adKQGW8vLw8HLUQ5Y/MaSKEEOWIUoqEM7/hrUsh4cxvMo5ZCHHT23pyN8cSotFr9Qxs8TA+Xt6eDkmIAolKSgNye5nY/XXhX3IsJkJ9Q6gdUsNToQlxU5GkiRBClCMZKXFkJMdiUzoykmPJSInzdEglZvbs2TRq1Ii2bdt6OhQhRCl1POEkm6J/BeDBhj2oElDZwxEJUTAWm42TSZlA7nwmkPvDyG+n/wCgQ/WWaDXyVU6IG0HeaUIIUU4opYg/tRulLNjwQinLf5+Xz94mY8aM4fDhw+zfv9/ToQghSqGkrBSWHfoRhaJdtRa0qdrM0yEJUWDxmSZsSuFn0BHun9s76lTKWS6kXUKv1cu8PELcQDKniRBClBMZKXGkJ8eg1RmBHLR6b9KTY8hIicM/ONLT4QkhxA11JD6KTHMWVQPD6d3gLk+HU27Mnj2b2bNnY7VaPR1KuRbh783z7euRlGVC+98VcezDciIrVMPXy8fDEQpx85CkiRBClANKKS6d2o3FnIWy2dCgR6s1YLFkE39qN35BNdHkWYZQCCHKu1trtMHf4Ee1oAhZPaQYjRkzhjFjxpCamkpQUJCnwynXjDqto5cJQP3Q2rxw20hMVrMHoxLi5iPDc4QQohxITThGSvy/KJsFNKDBikajQaf3cfQ2EUKIm02z8IaE+AR7OgwhCsV2lWG1Go0Go95wA6MRQkjSRAghyrjUxGhi/12OzWYBjRYvQwA2cpcc1OoM2Gzle24TIYSwO592iXm/Lyc1J93ToQhRZL+dSeTLP2M4dCkFyF02+6/zhzFLDxMhPEKSJsVAVnAQQnhKSvwRTh1egdWchVarx+hbEZ2XD5A7FEd6mwghbhZZ5mwWHVzJ8csn+fn4Vk+HI0SRHU9M42JGDmabLfd5QgzLDq1m5m9fYVM2D0cnxM1HkibFQFZwEEJ4il9wbWzWHDRaHQafULRa13H70ttECFHeKaX47p+1JGYmEewdSK8G3T0dkhBFkmGycDY9G4C6FXKXGt5z+ncAGlaqK8sMC+EB8q4TQogyRClFelKMI/mRnXEeNBq8jEFote6bdOltIoQo77bH7uFwfBQ6rY6BzR/G1yAri4iyKSopHRSE+xsJNHqRmJXMsYSTAHSo1srD0Qlxc5KkiRBClBFWSzZno9Zx+tiPJF/8G6UU8ad2Y7Oa0Wi02KxmxwNsTs81Gi1Wq0l6mwghyp3oxDg2nNgOQO9b7qJaUISHIxKi6KISc+fjqR8SAMC+M3+hUNQNiSTUL8SToQlx05L114QQogzITD3D2RMbsJjSAQ1K2VDKiiknBZ3OgM2a4yirFGixYrOayLvKsE5nwJSTglJWNBpp/oUQZV9KdipL/16NUopWVZrSrmoLT4ckRJFZbDZOJmcAUC/ED4vVwv6zBwHoUF16mQjhKXLXLIQQpZhSNhLO7CXh7H5AYfAOpkrde/DxrwxA7WYDsVqynI4xm83s2LGDTh064eXl5bRPp/d1O++JEEKURWarBV8vHwKMfjzYsAeavJliIcqY2JRMzFZFgFFPuJ83By8cJsOUSZB3AI0q1fN0eELctOTOWQghSilzTipno9aTlX4egKBKDQmP7IJWZ3CU8TIG4GUMcDpOZzZjUb54+4W5JE2EEKI8CfULYUz7wWRZsjHopL0TZZuvXkeTsEACDHo0Gg1nUi8A0LZqi3znLRNClDxJmgghRCllNqWTlX4Brc5ARK1uBIY28HRIQghRKmSas/D1yp3s1ag3YNQbrnGEEKVflQAfHgz43yTG9ze4k/bVWuDj5e3BqIQQkjQRQohSRCnl6F7uG1CFKnXuwiegCgbvIA9HJoQQpcOl9AQ+3fc1XWvdSqfI9jIkR5RrlfwqejoEIW560s9LCCFKieyMS8Qc+oaczMuObUGVGkrCRAgh/ivHYmLRwZVkW3I4fvkkClkNTJQPp1MzuZCejVKKHIuJ1Jx0T4ckhPgv6WkihBAeppSNxAt/EX9qF0rZuHhqJzVuedDTYQkhRKmilGLF4Z+Iz7hMkHcA/Zo+gFYjv/+J8mFjzCXOpWXTu34EGTknWHN0E3fUbMe99bt6OjQhbnqSNBFCCA+ymDM5d+IXMlLiAAioUJuI2t09HJUQQpQ+u07t5+8LR9BptPRv9iD+Rj9PhyREscgwWTiXng1AZJAvc3//A5uyEejt7+HIhBAgSRMhhPCY9ORYzkX/gtWchUajo3JkJ4LDmsr4fCGEuEJs0ml+Or4VgJ4NulMzuJqHIxKi+EQlpYOCiABvLmde4FJ6Al46L1pFNPV0aEIIJGkihBAekZ4cx+mjqwEw+lakat17MfrKZG9CCHGlTHMWS/5ehU3ZaB7eiI7VW3k6JCGKVVRi7vwl9Sr489vpXwFoGdFYVs0RopSQpIkQQniAX1B1fAOqYPQNJazmHWi10hwLIYQ7PnpvutbqyP6zB3m40b3SG0+UKxabjZPJGQBU89ex4dgxADpUa+nJsIQQechduhBC3ABKKdISo/CvUButVo9Go6V6w4ckWSKEENeg0Wi4tUYbOlRrhVYrE7+K8iU2OROzVRFg1BOTdASrslEjqCpVAsM9HZoQ4r/kk0cIIUqY1ZLNuRM/czbqZ+JP7XJsl4TJ9Zk9ezaNGjWibdu2ng5FCFEC4pLPkG3OdjyXhIkoj6L/28ukXgU//rrwDwAdqksvEyFKE7ljF0KIEpSZdo5zUesxm9IADXqDrPZQXMaMGcOYMWNITU0lKCjI0+EIIYpRQkYi8/5Yjr/BjxFt+hPsHejpkIQoEXdGhlE/xB8/Lz1dag7iz/P/0KxyQ0+HJYTIQ5ImQghRApSykXB2Pwln9gIKL2MgVevdi4+/dLcVQoirMVlMLDq4khyLiQj/MAIk2SzKMb1WQ63g/73Gb6shvSeFKG0kaSKEEMXMnJPOuRM/k5l2DoCg0FuoHNkVnd7g4ciEEKJ0U0rxw5ENXEyPx9/gR//mD6HT6jwdlhAlymazyfAzIUoxSZoIIUSxs5GdmYBW60V4ra4EVZJutkIIURB7z/zJn+f/QaPR0L/ZgwQa/T0dkhAlZuXRs/gb9GRkH+Zc2il61O1M3YqRng5LCHEFSZoIIUQxUMqGRpP7K1HuUJz7MHgHYfAO9mxgQghRRpxOOceaoxsBuLdeV2qH1PBwREKUnDSThcMJaSgUFtO/5FjSMFnNng5LCOGG9AMTQojrlJ0Rz8m/l5CeHOvY5h9cUxImQghRQEop1hzbhFXZaFK5AXfUbOfpkIQoUScS0wHw1uWQY0kjyDuQWyrV8XBUQgh3pKeJEEIUkVKKpAt/cenUryhl49KpXfgF1USj0Xg6NCGEKFM0Gg1PtOjDhqjt9Gxwp7Sjotw7/t+kSbb5NADtq7VEq5Hfs4UojSRpIoQQRWAxZ3I+eqOjd4l/hVpE1L5LbvSFEKKI/A1+9Gl8n6fDEKLEma02YlIyMNvMZGadxKDV0rZqM0+HJYTIhyRNhBCikDKST3EuegMWcyYajY6wmndQoXIzSZgIIUQhHY0/QYY5i9ZVmno6FCFumNiUTCxWhdmajpZMGlduSIBMeixEqSVJEyGEKITsjHhOHf0BAKNPCFXq3Yu3b6iHoxJCiLInMTOZ5f+sIcucjV6ro3l4I0+HJMQNcTwxHZuykWU6jVEDHaq18nRIQoirkKRJMZg9ezazZ8/GarV6OhQhRAnz9qtEUKWGaLV6wmp2QquVZlQIIQrLbDWz+OBKsszZVA+qQuOwBp4OSYgbxs9LR6DRiw5VGpOW7U+tCtU9HZIQ4irkbr8YjBkzhjFjxpCamkpQUJCnwxFCFCOlFKkJR/ELroneyxeAiNrdHcsLCyGEKLzVR3/hXNpF/Aw+DGj+EHqtztMhiQKSHwuvX5ealehcIxQFaDWNPR2OEOIa5K5fCCHyYbXkcO7Ees5F/8K56F9QygYgCRMhhLgO+88e5MDZv9FoNPRr+iDB3oGeDkkUwpgxYzh8+DD79+/3dChlmkajQStzoQlRJkhPEyGEcCMr7TxnT6zHnJMKaPANqOLpkIQQosw7k3qe1Ud+AeCuOp2oWzHSswEJcYNdSM9m3+lf8TN40756S/wNfp4OSQhxDZI0EUKIPJSycfncAeJP7wEUXsZAqtTtIUkTIYQoBrFJZ7DYLDSsVI8utTp4Ohwhbqi0HDNf/BnNxXRFgHYXDcPqSdJEiDJAkiZCCPFfFnMmZ6N+IjP1LACBFesTXqsbOr3Rw5EJIUT5cHvNtoT5VaRaUARaGeoobjJRSelkmDLRkk1khSpUCajs6ZCEEAUgSRMhhPgvjUaPOScNrdaLyrW6EBTaEI2MNxZCiOtmUzZHkqR+aG0PRyOEZxy/nE6GOQsvTRIdqssyw0KUFZI0EULc1Gw2CxqNDo1Gg05voFr9+9Fq9Rh8Kng6NCGEKBeiLsew4cR2+jd7kBCfYE+HI4RHmK02DidcxmqzUtE7k6Zht3g6JCFEAUm/SCHETSsnM4HYQ0tJuvi3Y5u3XyVJmAghRDFJzkph6d+rOZNynt2nDng6HCE8JiYlk9ScTLSaHDpWuwW9rvh+u446nczGf3P/K4QofpI0EULcdJRSJF44SMyhZeRkJZJ4/g9sNounwxJCiHLFYrWw+O8fyDRnUTUwnHvqdvF0SEJ4zMELl8i25OClSaZ99RbFVq9SinW7YjmXDOt2xaKUKra6hRC5JGkihLipWMxZnDm+houx21DKin9wJJFNHkOrldGKQghxPaIT49hu+oPoxDgA1h7fzJmU8/h4eTOg+UPF+su6EMXp3OlkYv7VcK6EemoopYhNzcHP4EP9kIBiHaZ2LC6JI7GJGHRwJDaRY3FJxVa3ECKXfHoJIcqdzNTThHgdJjO1KUEV/zfhYEbKac6d2IDFnIFGoyWsxu1UCG8hk70KIcR1UkqxPno7F22JrI/eToYliz2n/0CDhseb9pa5TESppZTiwK5TpCdrOLDrFDVqhZbIfUG/RjWJSsrg9mpti61OpRRrd53EbLHh7QVmi421u07SoGYFubcRohhJ0kQIUa4opUg48xveuhQSzvxGYEgtNBoNZlM6p4+uQikbBu9gqta7D2+/Sp4OVwghyoWoyzEcT4hGj44j8cc5k3IOvU7PnXVuo0FoHU+HJ0S+zsQlczo2Ca0OTscmcSYumeqRxTu3mUajoVqgL9UCfYu13mNxSRyOScTPW09Otgk/bz2HY3J7m9wSGVKs5xLiZibDc4QQ5UpGShwZybHYlI6M5FgyUnK7iXsZ/Amt1p7gsMbUatpfEiZCCFFMlFKsj9qO2WbBGwMWm40sSw71QmrRrfZtng5PiHwppdi/KxZLjhljdhqWHDP7S2BekC0nd3E29UKx1unoZWK2YTDoADAadI7eJjK3iRDFR5ImQohyQylF/KndKGXBhhc2azYXYrY6bhwqVmlLRO3uaHVeHo5UCCHKj6jLMRxNOIGvlw8ajQZ/gy8KG+2qtUCrkVtNUXqdiUvmdEwiOlMmWrMZnSmT0zGJnIlLLrZzHIs/w6rjZ5i5ZzWZ5qziq/e/vUzQKOKTsrHYcnu05O1tIoQoHvJJJoQoNzJS4khPjkGrM6LDhNVqJu1yFOlJJwFkfK8QQhQzRy8Tq9mRIDHqDJitFrbG/Ca/dotSy97LxJxtgpwslEYDOVmYs03F2ttkU+wxTLYwfIwN8fXyKZY67b1MckxWsrItmC1WbP8N11t6mwhR7CRpIoQoF5RSXDq1G6slB3NOOhqNFdCARkv8mb1y4yCEECUg6nIMh+OPY7ZZiM9MJFuZcn/tNvhyNOEEUZdjPB2iEG7l7WVi1hgw67xRimLtbZJpzuL45TQAWodHXHd9dvZeJjabAjQYDXr+O0JHepsIUQIkaSKEKBcun/+dlPh/sVlNgA3QYPSpgJcxkIzkGMfcJkIIIYqH1WZl8cGVpGSnYbZa0PC/Hn25vU3MrI/aLklrUepc2cvEgAUNNtDpirW3yf4zhzDZAvDSedGhavVii92+Yk7FIG98vPUEBxidykhvEyGKlyRNhBBlXkbqWc4c/RGbzQIaLXovPyzKG63OgFZnwGaz/HeuE7lxEEKI4pCYmcz0XXM4kRiLRqPBW28gzK8iRnLnjLoZe5v8b7n7054ORVxD3l4mGqVAo9DbzOg0Co1SxdLbRCnF9rgoFFrCfH0J8zNe+6ACyLtijpdeS2iQN1465+HH0ttEiOIlSRMhRJmklM3x/zarGaWs6PTeePuGojf4Abk3EBqNBp3eh3TpbVLuzJ49m0aNGtG2bVtPhyLETeWv84eZsftLjiacQCmo4B1IJb+K6LV6p3I3U2+TK5e7L+/XW5Zd2ctEo9OC1YrGZkXZVO7zYuhtEp0YR0K2Hq1GQ6vwysUyr5q9l0lWtgWdVoPJbHU8LDacnuu0GrJzLNLbRIhiIEkTIUSZopSNpAsHifl7CVZLTu6N6undaHUGDN4V0Gh1LsdIb5PyacyYMRw+fJj9+/d7OhQhbioVfALJMGVisVkJ9a1AgNHfbbmbqbdJfsvdi9Inby8TgEyNLyaNAcV/h5dptMXS2yTDlAmaUHy9fGhYKbhYYrdYFefjM8g2WUlIySYrx0qWyUq2yYrFCtmm3Of2h7dRz+WUbCxWufcR4nror11ECCE8TylFRnIsF0/9iikrEYDki4cw+oWSnhyDTu+T7684V/Y28Q+OvIGRCyFE2WZTNs6nXaJqYDgANYKqEuZXkZScNLx0XpisZiC3nbYqKyar2dEeazVaciw5rI/aTr2KtcrlKmZXLnevVG6S3i+oZrm83rLM3sskJzMHXU4OFq0RKxqUxgutxgya//7wotOjcnIwZ+awf1cs1WoGF/rfslZIPRqEGknLMVMj0LdY4tfrNFQLCyDbZKFOtWAevbMeoMFisbBjxw46dWqHXu/89S7A14CXXn4nF+J6SNJECFHqZWfEc+nUTjJScseJ6/TeVKrWgaBKjYn791v+n737jpPrKg///7lt+sz2vtqmVa9WtyRL7p1qAzbFQBJToi+QkGYIIT9CaAmQhGASwMSmY8AUY8u9SFaxLcuSrK7VNml73+kzt5zfH7M70lqSLVllV6vzfr32tTu3zbmzM3Pvfe5znmPbaQzNjXPciTs4OMeduCuKimWn5YmsJEnSGeiLDfCbvY/SHu7iM5f/GUX+AmzHJmmn8Ohuklbq2MJCYGKTslJw3HesW3czkBjEdmx0bfKdeh4b7t4DJFF1jwzST1COLQgPJVCtFLaiY41mmCCwVReC0SFoNITioFopwkMJHFug6Wd23hByG6xbPJVo2kJXz805x45DvRzpDuPzGHz4ltkU5GSGMDZNk1wfVBYHMAzjnDyXJEnHTL4jlyRJk4YQDl3NzzHUsxcQKIpKfullFFQsQdM9OI5FOjWMprlw7NRx64GKjWOnjz9vR9NcpFPDCGGjKPLrT5Ik6VQc4bCpdRtPHt6I5Vi4dRe9sYFM7RJN569X3p3pfnAcy7LYsGEDa1evPeFud8Dlm5QBEyEE7Y0b2bBlH9t3HGWwf4i8glwWXzYFl38j0xfJIP1EoukqN1yew8FdD5F0BdnnmgWWyay+rWAo+PPzs/8vx0wh4iYz7/wztDPI1LAdm4N9TcwsnIqqqgRc5+Z9n0xb/HFDIwDXLqvKBkwkSTr/Jt/RS5KkSUNRVIRjAYJQwTSKpqzC5cnJzldVnbr5H8S2EmPWM00zk6a6Ys0Jd1w03Yeqyq8+SZKkU+mJ9fPbPY9yZLgdgPqCGm6bfTN53mPfv7meELme0Jj1TNMkRw1QHiy5ZO52P/Hog9xzzxfo6B7CNm0UQACPPruT+372Al//hpsbb3nfeDdTGiGEIPz8U3jj/Qx4ytCETb4yRKHPJp5O4U9rx4JcCpjxIYafe4qShbNPO/i1v7eBn+78IxWhUj614q5zFjR7fGsr4ViKghwvVy+pOifblCTp9MgrB0mSJgwhHMJ9B/EGy3B5cgEomrKK3JJ5+ILlJ13HcAcx3MEx0zTTxBI+PP7iS+bEXZIk6VzY1PoyjzdsGMkucXPL9KtZWrFAZkucxDNPr+cTf/kpYrEkeTl+NMBRFFQhsIH2zkH+ct1fc3+glLVr1453cyVAWBbpvn5Uj5tu8hCOQ1GyE4RAsWycZHJshqrHTbqvH2FZKKd5PrH16KukRRGdiVlsONLHldVFZ93ujr4oG3e0AXDbVdNkjRJJusBk0ESSpAkhHm6ju/UFkrEegvlTqZx+KwCGO4BxipEZJEmSpHMrbZtYjsX0wjrePfumE7JJpIz+rn387V+vIxpNUFQQQBEKmQFKFBwFdAWKCkP09oX5h7//LBtf2IrL5RrvZl/yVMNgxt//De2NPahPHyVkqKx89xrAYeOGjSxcu+aErmV6IIh6mgGTnlg/jQOtWMzEY/gxtHMT3IjGTYI+FzVlIWbV5p+TbUqSdPpk0ESSpHGVTgzSc2QzkcFMP11VNfD6SxFCyDubkiRJ55njOETSUXJGgiNra1ZQ7C9gTvEM+R18Eo5j0dP6An/43S9p7+wjL8eHqmrYpg3K6AWyguM4aJpCbo6XluZmnnrqKW655ZZxbbuU4crNJVipUzfbwmptIrp7NzlLF2Pn5uCtqDirDNWXju5ACBVDK0FTNKbnn5ubPtOr8vjch5di2s452Z4kSWdGBk0kSRoXtpWkr+0lBrtfQwgHUMgrmUth5Qp049wMzSdJkiSdWmekh9/sfRTbsfnU8o+gazqaqjG3ZOZ4N21CSicGaWtYTyrex8vbD+E4ApfbhbBsTMuhbyhMQW4Qw9AQioKwbVwuF6YV56knHpZBkwmksCTAtbfMZPcXfkVPU5LAnNlnvc20lWZ7x2tYIgef4SfXY1DoPXfZRR63juecbU2SpDMhgyaSJI2Lwe7dDHTtBMCfW01J1RW4fQXj2yhJkqRLgO3YPN+8lWebNmMLB6/hoSfWR3modLybNmHZVoqWPQ9i2ylUzU00YZOp+iqwLJvO3mEcx2FgKEpJUQ6gIISDIgQK0NXRIDMoJ5hkZxdOMonmduEpL4fdr53V9nZ17ydppdD1ety6i2n5gbP+fz++tYXCXC+LZxbL944kjSMZNJEk6YIQQuBYSTQjM0RefulC4pF28ksvI5BbPc6tkyRJujR0RLr5zZ5H6Iz0ADCraBrvmn0jIVk76g1pupuCiiVEh1oorbuOUOAhEJlsElXXCAU8JFNmpr6JIgAQZOYLIODV5HD3E8ThAz0UlQRJNzUD4KupRjkHtUdaBo8iBLiNchSUs+6ac7Q7wpMvtSKEoCjXS3WZrC8kSeNFfnNLknTeJWM9dLdsxLZT1M67E0VRUTWDqpnvHO+mSZIkXRIcx+HZ5s0827QFZyS75B0zr2dB6ekPpXqpScb7UFCyWZD5ZYvIL1uEoqgsX/0uHn7sVeKDNo4rgKF7MHSFVFKQGXQYQEFNR3HpXt5++yfkcPcTQCppsuHJQzi24PLAEQACU+vOybbfM/dWpubP55HGKG5dZUrorXc1dhzBb5/NZCddNqNYBkwkaZzJb29Jks4bMxWl9+hmhvsOAKAoGql4Hx5/8Ti3TJIk6RKjQNPAERzhMKd4Ou+cdQNBmV1yUkIIhnr20N2yAcOTQ+3cO1A1A2Wk0OvDf/wj3/+P/yYdi5NUDAJaKBt4UjgWNHEExBJpqgtyueHGW8drd6TjNDf04diCvEIfyuFMAXp/Xe052359QRlX28NYjkBX33ow8qW9XRzpCuN26bz9iqnnrH2SJL01MmgiSdI559hp+ju209+5HeHYAIQKZ1A8ZSWGW94tkSRJuhAs28JB4NIMVEXl9jm3cDTcwfySWTK75BRsK0Vn0zNEBhoAMNxBhLABg3g8zr9+8Ys89PNfolgmM/NLORxLEokN4vMGMTQtEy9RwLJNYokoHref985eQaqpGfcsWWB3vB0+0AtAbYUXc0cUVdfxTZmCfRbbTFnpbPZWyG2wekrhWbUxljB5ZFMTADeuqCY36D6r7UmSdPZk0OQcuPfee7n33nux7bP5ypWkycFMhWnZ8yCWGQfAGyyjpHoN3oAsMChJknShtA138uu9jzA1r5p3zLoegHxfLvm+3PFt2ASWiHbT3rAeMxUGFIqrVpFfdhmKotLQ0MBf3f1xGvbuBUdwa+1U6i57HzuHwjy56WcMRfqIOzYj1WFRVZWCUBHXrf4g+fl5HPj1wyz9ohzGeTxFw0k624ZRFKjIV+gL+PEUF6MaBrZpvuXtvtS2g6caX+CaulVcWXv5Wbfzkc1NxJMmpQV+rlhYcdbbkyTp7MmgyTmwbt061q1bRzgcJicnZ7ybI0njSncFMdwhFFWnuOoKgvlT5UmiJEnSBWLZFk83bWJDy4sIIYinE1xfvwavIQcrPRUhBANdO+g9shkhHAx3iIr6m/AGSxFC8NsHH+TLn/tH4sNhAh4fV635CDNDQfo9ZUzzFlP/rntoOLqXxvYDJFIxvG4/UytmMm3KHBTdQ1jRaOnYx2LTRHOduyFopTMzmmVSWpFD6cJZlCz4InYicVbbdITDi207MG2TqOlhb2+YqXl+PLr2lrbXP5zgpT1dANx+9TS0c1CgVpKksyeDJpIknZVUYoD+9m2U1FyFprtQFIWKabegGR5Z9E6SJOkCOjLUzm/3rqcn1gfAgtLZvH3mdTJg8qYE0cEmhHAI5tdTVncNmp55zVKpND/4yjeID4epK6lmxbWfoqhiCh19MYQjcHDA8FM+fQ3l09fgIFDJ3CiIjWxdRWVf0TLeqWi8tUtp6WwJITh8IDNi1LRZmbpqiqKg+956sVaAw/0tDMQH8ehuhlJ5/P5gB1fXFLGysuAtba8gx8v/e89CGo4OMrUy96zaJknSuSOvaCRJekssM0Ff24sMdu8GBLorQHHVKgAMWVxQkiTpgjFtk6caX+CF1pcRQhBw+XnnrBuYWzJjvJs2oQkhUBQFRVEpr7+R6GAzucVzs9mRpuXw/T/sY+E1f0blay8y4/I7yMsPUVwawuXSiIRTr9sgJFNJPG4PvC7BMjfPiyazLsdNPJomFk2haSrVdfnZ//3ZevHoqwDML53HwaHM+2HaWQ41XFeRQ12FzFyXpIlEBk0kSTojjmMx2LWLvvaXcew0AIG8OnKLZo9zyyRJki5NSSvFtvZdmeFJy+bythnX4nN5x7tZE5YQDn3t27CtBKU1VwJguALklcxDCMEvf/ITetva+PTnP09xyE1/znQWXDsbn8dg3uIKlq2qIR43ScbH1sGwLJMNGzeyds0SdN0YM8/rM9B02dVivPiDbj74sRX09URJ7N9D48OPkL98GWU33/iWtzmUGGZ/32EAyoKz2DcQJc9rUOg98y5Y0XialGlTkCM/t5I0EcmgiSRJpy3c30DPkU0jRfLA7SukpHoN/pwp49wySZKkS4vl2OhqprNH0B3gttk3oSoas4unjXPLJjYrHaP98OPEw20A5BTOwhsoASASifCPf/1Znlm/HoRg5RVXYh5RKQu48fkM1l4/nZr6zMgogaCbwOtGNTFNE48PCooDGMbYoIk0/jRdpaQ8xNHNzZiRKM5ZFH8FeLl9J0II6vKr6U1mPovT8gJvKYPl4Rea2HGwh/dcO51ls2XhfEmaaGTQRJKk0xYdbMJMhdENP0VTVpJTNBNFkXfOJEmSLqTmwaP8du+j3DL9amYXTwdgbokczvbNRIda6Dj8JLaVQFUNSmuvygZM9rz2Gp/+87tpa25GU1T+/JqrmVNfi+YyaTnczzW3ziQkswAuSmbaRjfUbDAj1tQMQGBq3VvepiMctnfsBmB5xUKeORIF3lrXnKb2Ybbt6wJFoSTv7GqsSJJ0fsigiSRJpzQ67KLhDgJQNGUVhieXgrJFqJq8iyZJknQhpa00jx/ewNYj2xEInm3awqyiaXKEsjchHJveo1vp79wOZLIkK6bdjNubhxCCn953H//2L/9KKp4gzxfg1mveywe/fg/e0lwWlgjmL66UXWsuYluea6S7M8yKNXWUFRkkuzMFYf21NW95m6qi8pfL7uLVjj3k+qqIm224dZWq0JkFPWxH8NtnGwBYMbeU6rLQW26TJEnnjwyaSJJ0AttK09+xjYHOHQTyaqmcfguQKfBaVLl8nFsnSZJ06WkaOMJv9z7KQGIIgCUV87ll+jUyYHIa2g49QnSoBYC8kvkUV1+RHd3tb+7+OI8+/DCO7TCzfCrLbvwr6mfVUViQuVmgqAqaKl/ji5Vl2TQf7sNM27jcGrHmFgA8pSXofv9ZbTvHE+KqupW81DEAwNQ8/xm/V17Y2U5nXxSfx+DWVW8980WSpPNLBk0kScoSwmGoZy+9bVuxzQQAtpXEcSw5fLAkSdI4SFlpHm94nq1HM1kSOZ4gt82+memF8gLrdOUWzyUe6aSs7lpCBfXZ6R19UbSwgiLgqiU3M/3yO8jJ8WPoKsmEScCQAwRf7FobBzDTNoGQm5KyEB3bRrrm1NW+5W06wkE9rmvy8vJ8ZhUEMR1xRtsZjqZ4fGsLALeursPvlRm8kjRRyasgSZKATF/vntYXSCUyd0xcnlyKq64gkFcr72RKkiSNk9ahtmzAZFnlQm6efjUe3f0ma13aHMcinRjE4y8CIJg/lfpQBZruwXEcurq6KCsr45dPHMRZcjvvLV5E8ZTpuF0602YVs+qaegwZMJkUDh/IdMWpn1mMoipEG5sA8J9FPZNHDj5Db6yf66ZeQVVuBQAh95kHPP64sZFU2qK6LMTyObL4qyRNZDJoIkkSw7376Wh8EgBNc1M4ZQV5xfNQVHnSKEmSdKEJIbLB6umFdaytWcG0glrqC2rGt2EXgXRikLaG9VjpKLXzPoDhzhTm1HQP/f393POJT9JwYD+/e/55brisgvUtQ+TUzsIwNFZdPZXpc0rkjYJJIpkwOdoyCED9zCKEEASnT0NRVfy1by3TJGWl2d6xm5SVYk3N8jGf1TMhhKA4z4eha9x21TRU2QVMkiY0GTSRpEvU8Qf6YH49RtuLBPOnUlixDE33jHPrJEmSLk0N/c2sP/QsH7nsPeR4MkUhb5p+1Ti36uIw3LufrubncBwTTfdgpsPZoMm2LVv4q7s/Tn9vDx5d56U//AE7tJQ8n4ucPC/X3DKTgqIzH/lEmriaDvUiHEFhcYC8gkz9kvK33TJmmXQ6zdNPP80zzzzD7t272bhxI9dccw3XXnstLpfrhG3u7NpLykpR4Mtjan41v97fjiMEV1UXURo4/XMnRVG48fIa1lxWgc8ju+VI0kQngyaSNAnFw0fJN/YRD88jp2BsCqrjWAx07iA21ErV7HejKCqqZlC34EOybokkSdI4SZpJHj30LNvadwHwTNNm3j37pnFu1cXBsdN0NT/PcN9+AHyhCsrrb8RwBXAch3u/8e987zvfwUqbFAeC/NPff44bPvpRYjGTV188woo1tbjc8vg32Rze3wtA/azik87fuHEj99xzD62trViWRTqdZteuXfzqV7+iurqar3/966xZsya7vBCCF4++CsCKykWYtqBxKIrjwPV1J3+ONyMDJpJ0cZDjp0nSJCOEoK9tKx5tmL62rQghRqY7DPcdoGnnT+g9uoV4pJ3IQFN2PRkwkSRJGh8H+xr5j633ZQMml09ZzC3TrxnnVl0ckrFemnf/aiRgolBYuYKqWe/GcAXo6+3lw29/B//9rW9jpU3m18zifev+gzV//mEUVSUQdLPmummXfMDkk5/8JIqi8N3vfne8m3JOLV5ZxfTZJUydnqltEz9yBDuVAjIBk7vvvpvm5mYKCgqorKwc87u5uZm7776bjRs3Zrd3ZLidzkgPuqqzuGIeTUMxHAfyvQYF3tOrMzQYTvLfv95Ja2f43O+wJEnnzaV9lJCkSSg23EpsqAVHaMSGWogNt6JqLrpbNpKMdQOguwIUT1lFMF+OviBJkjReEmaSRw89wyvtrwGQ78vj9tk3U5dfNc4tu3gMdu8mnRxEN/xUTLsRX6gyO+9Ln/h/vLj1JVyaxtrL38mM5bfjNVz0doSprisYx1ZPHI888ghbt26lvLx8vJtyzlVU5VFRlQeAY5o0/Pf3QMDUv/8b7rnnHsLhMOXl5SiKkr3BBOByuSgvL6ejo4PPfe5zPPfcc7hcLl48ugOABaWz8BleDg1k6qVMyz/9bl1/2NBIU/sQf9rUxLrbF8j6OZJ0kZBBE0maRIQQ9B7ZghAWDgaOY9Ky9zeoqoGiKKiqQUHFEvLLFsnMEkmSpHH2QutLvNL+GgoKK6uWcEP9Glz6iXUUpFMrqb4CRVEorFyObviy059++Qj+ue9k1oHDzLvqz6ionoOha1TV5lNaHhrHFk8c3d3dfPKTn2T9+vW87W1vG+/mnFfx1iMI28GVG2LDtpdpbW2lqKjolEELRVEoLCykpaWFp59+mjXXruW17kz3r8unLMYRgsbBGADT84On1Yb9zQO8drgXRVF491X1MmAiSRcR2T1HkiaR2HAr0aFmVN0DKGi6FzM5hGOb5BbPZerCD1NYsUwGTCRJkiaAK2suZ3phHR9f+gHeNvNaGTA5DYloF51NzyCEA4CqGZTWXoVu+Oju7OQ7X/kKQgh0XcUMlHDlh/6d6rp5uAyNZatruP7ts3FfBHUkfvazn/Hxj3+cJUuW4Ha7URSFBx544A3X2bZtGzfffDO5ubn4/X5WrFjBr3/961Mu/9GPfpRPf/rTzJs37xy3fnz19UTZuqGJvp5odlq0aWSo4dpaNmzYgG3bYwq9xuNx0un0mO243W5s2+b555/Hrbl416wbWFw+j8qcMtojCeKmjUdXqQx637RNpuXw0HMNAKy5rILyQll0WJIuJvLKSZImCSEEXc0bsMwEhieTjqpqLhRVx+MvpLT2anlXQ5IkaRzt62lgR+ce7pz/DlRFxaW7+LNF7xvvZl0UhHAY6NxBz5HNgMDtKyS/dEF2/nPrH+Pzf/VXhMNh8vPzWXHVbUxVdQxdwed3cfXNMymrzBm/HThDX/jCF2htbaWwsJCysjJaW1vfcPnnnnuOG264AY/Hwx133EEwGOShhx7ife97H0ePHuVv/uZvxiz/3e9+l1gsdsL0yeDQ3m727uwgEUtz9c0zAYg1NQPgr6tj6IUNJ5wPdXd3E4/Hcbvd5OQce58oisLQ0BCGZrCkYgFLKjLvuUMDmYDM1Dw/2mkMF/zsK0fpH04Q8ru5cUXNudhNSZIuIJlpIkkXOSEcokOtNO/+OeG+/QjHwrGSQOZgrxt+EpEOYsNvfMIlSZIknZ3GgVY2pF+lcWDs9208neBXux/mJzt/y+7uA7zasXucWnhxsswEbQf/RM+RTYAgmD+NnMIZmXmWxdf+7u/5xEc+Sv/AIDWFhSyaMRNFUTBUhYqqXN79wcsuqoAJwH333UdLSwu9vb184hOfeMNlLcvi7rvvRlVVNm7cyA9+8AO+9a1vsWvXLqZPn87nP//5MUGXAwcO8OUvf5kf//jHqOrkuhRwHEHTobGj5gjbJtaS2f/A1Fpyc3PH1DABmDJlCnl5efj9/jHThRDk5uae8Dwlfg+1uT5mnEbXnP7hBE+/nHn+d6ydiucSLzwsSRejyfVNKUmXENtKMdC5k6ZdP+XI/t8T7juEQKBqblTtWOqxqrlwHGuk1ol4gy1KkiRJb5UQgscbN9DtDPB444bs9+2e7oN8e8sP2dm5F0VRWFOznAWls8e5tRePeLiN5td+TnSoBUXRKK29moppN6HpHtpbW3nv1ddy//0P4NgOi+rn8Y//+wtmX30VU2cUccM75nDTu+bi9V183Z6uvfZaqqurT2vZZ599lsbGRt7//vezcOHC7PScnBw+//nPk06n+fGPf5yd/uKLL9Lb20t9fT26rqPrOq2trXzmM58Zs/7FqL11kETcxOM1qKzKBSDe1o6TNtH9PtzFxVx55ZVomjamO46u6/j9fjRNy05LpVJomoZ3ag6bj2wjaaWy8+YWhfjA3CpmF715fZwtr3Vi2Q7TpuRx2chIPpIkXVxkqFOSLkKOY9G48wHskYwSyJycuzy56IZv5GQ9AWSyTTTdS3SomdhwK4HcmnFpsyRJ0mTW0N/Mob5GdDQO9TWyq2sf+3obeK0rUzyyOFDIe+bcwpScyTdKyfky2P0aXc3PAwKXJ4+KaTfh8WcuOp97+E/87ac/QzgSxWsYrL3y/cxf9Q4WXjYju35VXf74NPwCe/755wG4/vrrT5h3ww03ALBhw4bstHe+850sWbLkhOU+8pGP8NGPfvT8NfQCOHwgk2UydUYRqpa5NxxrHKlnUleLoqrZgFRzc3N29JzXE0LQ19fHlOoqEpWCPx14mukFdXj00xta+Hi3rKqlOM9LTXmO7CYtSRcpGTSRpIuAEA7xcDv+nCkAqKpOIK+OZLSL3OL5DPXsxkyF0fSTFyNTNRemlaD3yBb8OdXyoC1JknQOCSF4vGEDpmPhwYXpWPzfqw/i1T2oqsqVNZdzTd0qdE2edp0Jb6AURVEIFc6ktOaqbBalaTns3rqPSCRKRV4hV97295QU1YIl2PVKG5evrRvnll9YDQ2ZAqPTpk07YV5paSmBQCC7DEBubu4JXU4Mw6CsrIz6+vpTPk8qlSKVOpZtEQ6HATBNE9M0T7u9o8ueyTqntd20TXNDL0IIaurzstv3zZpBqabiystMUxSFf/3Xf+WTn/wk7e3tmKZJMBhE0zSEEKRSKfr7+wmFQty27g4G9AR1uVXkukKYpknjUIwSn5uA6/Q/z4tmFJ6Xfc7u+3l6TS8Wcv/l/h//+0zXOx3y6C1JE5hlxhnq2ctg92tY6Si18+7E48/00S2tWYuiGsSGW4kNt6Lp3jccOk9mm0iSJJ0fDf3NHOg7jM/wkkon8RkeklaKYn8hH1n0HipDZePdxIuGmYpiuDMji3j8xdTN/yAub6a4eTqdxuVy8fDGRpqLlnHrTR+lZsF1eDQXiqIwf3EFS1fVjGPrx8fw8DDAmAKmxwuFQtllzsbXvvY1vvSlL50w/cknn8Tn851kjTf21FNPnXWbjhcegP5+FcMNL29/gRNOiYYGobkp+/DDH/4w3//+9+nr6yMcDhMIBOjv70fTNIqKivjAhz5Iu6ef9LCJiKVY37keC4UXtSIECkvtXjw4p2xPdxgK/KBrp1zknDvXr+nFRu6/3P8zEY/HT3tZGTSRpAkoEe1msGsn4f5D2WEVNd1DOhnOBk1UzYUQgt4jW7DtNIbmxrEzEdNM9xwHxzazgRRFUbHstMw2kSRJOodGs0xSVgrNyHQHcGsu4maCkDtARbB0nFt4cRCOTe/RrQx07aR6znvwBkoAsgGTP/30p/zb17/O//32t1y/YioNh3rJXXwrGuBy66y9YTo1UwvGcQ8ubi0tLW+6zOc+9zk++9nPZh+Hw2GmTJnC9ddfTyj05rU9RpmmyVNPPcV1112HYZy74Z8P7unm1dRRZs4t4bLlU950+Ztvvhnbtvnxj39MSUkJfr+fOXPmcNVVV3HNNddwYLCRX+99hGJ3gI+s/ACqorK/P8K+hm7yvQbvXrD4lNvuHUrwrV/sIJAw+Mx7FxD0n9+6OufrNb1YyP2X+/9W9n80W+50yKCJJE0gZipMe8N6EtHu7DSPv5j80oUEC6ahqmM/skLYpFPDaJoLx04dNx1UbBw7PeZOi6a5SKeGEcJGUeTHX5Ik6Ww19Dezt+cgKTtNyjbxYKAoCn6XjwN9jTT0NzO98NLqLnKm0slhOg4/TiLaBUBsuDUbNEkmEvx/f7mOPzz6KKqi8IOvfJVPfvE/KEjYABSWBLj2llkEczzj1v7xNpphcqpsknA4TF5e3lk/j9vtxu0+saaHYRhv6ULtra53KnMvq2T2ggps28EwMukdkUMNpAcHCU6bhit/7GsghGDbtm3k5uby5S9/mUgkws0335xt07a9u1AUhRVTFuF2Zfa7OZxEURRmFIRO2XYhBH/c2IzjCMqLAuTl+C7Yjapz/ZpebOT+y/0/k/0/k2XlVZMkjTPHsbLBEN3wY6YiKIpKqGA6eaUL8AZOfZdSVXXq5n8Q20qMmW6aJhs3bmTNijUnfCFouu+E4IskSZJ05oQQPLRvPeFUFFVRcesGmnMs2ySWjvN4wwamFdTK7L5TCPcfprPpaRw7haq5KKu7jlBBpq7G4d27+dRdH6Hx6FGEgLddfS1f+f7/ohgecvO9lE/JZcWaOjT90h4McrSWSUNDA4sXj81+6OrqIhqNsmzZsvFo2gWnqgqqeqw/TP/WFxnatZuym2+g5Nprxizb0tJCR0cHhmGwZMkSnnvuuey8rkgPLUNtKIrCsooFADhC0DAYBWBafuCUbdjV0MuhI4PomsptV02Tn31JmgTklZMkjQMhBPFIO4NdO0nGepm68MMoioqialRMuxmXNw/dOL3+wYY7iOEOjpmmmSaW8OHxF1/SEWdJkqTz6fnmrezuPoiiKHh0N/neXCJmJt33WLbJYZltchKOY9HT+gKD3a8B4A2UUF5/Ey5PJmviN//7v/zrl79CPJnC73Jz1S1/wce++Clcfj8A77zzMgzXBSwWMYGtXbuWr33tazz55JPccccdY+Y98cQT2WUms+7OMMUlQRT1WIBCCEGsqRnIjJzzeps2bQJg8eLFeL1jC+krisLckhmoikrIkznHagsnSJoOHkNlSujkhfeTaYs/bGgE4OqlVRTmnnw5SZIuLjJoIkkXkGOnGe47yGDXLlKJ/uz0RKQDX6gSAF+oYryaJ0mSJJ2mQ31N/GzX7xHCwWd4KfTnjY7+niWzTU4t3HcgGzApKFtM0ZTLUUYyBH7zX9/lC1/+FxwBVcUVXPfBfyLPW0iiNw5VmWGEZcDkmGuuuYa6ujp+8Ytf8OlPf5qFCxcCme46X/3qV3G5XNx1113j28jzaGggzsO/2kUg5Oa9H16SzTxK9/VhRqKouo5vyok1TkaDJqtXrz5hXkmgiA8ueDeOOFbodTTLZGpuAPUUn+UnX2xlOJoiP+Tl2qVvXldFkqSLgwyaSNIFYKYiDHS+ylDvPhw7DYCi6uQUziK/dD5uX+E4t1CSxs9//Md/cN9999Ha2oqu6yxatIivfe1rLF++fLybJkkn1R3t5fvbfk7CSuDWPRT681FQEK+Lmshsk1PLKZpNPNxOqHDGmBHdWjvDNGgzqS4oobBqNle+61MoqczrOjyYQAhxSQSf7rvvvuxF/e7du7PTnn/+eSBzof8Xf/EXAOi6zn333ccNN9zAmjVruOOOOwgGgzz00EO0trbyzW9+k5qamvHYjQvi8IEeAPIL/GO6akVHskx81VNQX5d16zgOgUAAl8t10qDJKFU5tr2GgUzQZPopuuZ09cfYsKMNgHdfVY9xIYfNkSTpvJJBE0m6ACwzxkDXTgBcnhzyShaQUzQLTb90C9dJ0qjq6mq+/e1vU19fTyqV4j//8z+54YYbaGxspKBAjoYhTTxFvgI0VUVBIc8bwrQtINMdwBY26eNGLlMVlZSVuuSzTRw7TV/7KxRWLEXVDBRFpbz+BiDzuq3/1a+47rbbiCZMek2dNZ/8D8oNH3bKRtNUVl09lRlzL52RiDZt2sSPf/zjMdM2b97M5s2bs49HgyYAV111FZs2beKf//mfefDBBzFNk3nz5vGNb3yD973vfRes3ReaEILD+3sBqJ9VPGZerDEzvLC/9sSuOaqq8q1vfYtUKoXb7cY0j40++HzzVuaWzKDQlz9mnbvmVdM4GGVqnv+kbfG6dRZMK8K0HObUyWOXJE0mMmgiSeeYbSUZ6t2HsE0KKzN3yr2BUvJLF+LPrcafU4WiXNpF6yTpeO9+97vHPP7mN7/JD3/4Q/bs2TPp++FLF5fRLAdHOHgMDzmeECkrffwCmNikrBTHD13m1t0MJAaxHRtdm7ynXvHwUfKNfcTD88gpOJZVk4z10t7wGOnkILYZo2zqddl5kcFBvvDnf8FTmzdz57ZtfOFb3+LK+mJ6Dvdjp21y8rxcc8tMCopOXXhzMnrggQd44IEHzmidZcuW8dhjj52fBh3n3nvv5d5778W27fP+XG+muzNCJJzEcGlU140NcoxmmgSmnjrDa3Q0oMaBVjakXyV4tIgnGzfwbNNmvrD207j0Y0MF+wyNecU5p9xWTsDNXTfPxrKdUy4jSdLFafIeuSXpAkvGehnsfo3hvv0Ix0ZRdfJKF2SzSUpq5MWfdHH62c9+xgsvvMD27dvZvXs36XSa+++/n4985COnXGfbtm388z//M1u2bMne8fzsZz/Le9/73jd8rnQ6zQ9+8APy8vKYN2/eOd4TSXrrtnfsZl/PId4//53oms5nV95NLB0fs4xlWWzYsIG1q9ei62NPsQIu36QOmAgh6Gvbikcbpq9tK6H8zN39oZ7ddLdsRAgb3fCTUzQru85rL7zAZ+7+OO19fRi6Rkg3GBqI09s0AEDd9EKuuHYaLvfkfd0uRuvWrWPdunWEw+HscMfj5fD+TNecmvoCdONYdxgzHMEcHERRFXw11WPWcRyHnp4eSkszmUtCCB5v3EC3M8CfDj6FSzNYUDp7TMDkjTiOQD2uAK2uyRtjkjTZyKOQJJ0F4dhEBhsZ7NpFPNKRne72FZJXMh9FDu0rTQJf+MIXaG1tpbCwkLKyMlpbW99w+eeee44bbrgBj8czpm/9+973Po4ePcrf/M3fnLDOCy+8wE033UQikaC0tJSnnnqK/Pz8k2xdki68l9p28Pt9jwOZ4MmyyoXkekLkekJjljNNkxw1QHmw5JIbuSw23EpsqAVHaMSGWogMNBDubyAycBiAQG4NZVOvQzd8OI7D/f/6Ff7je/9DyrQIev3c+cl7+Mw/fhyAy9dmMgNmLyi7ZLszSW/Oth2aDmW65kx7XdccIxRkzpf/P5IdnWgj2SSjDhw4wF133cX8+fP50Y9+REN/M4f6GtHR6I71UuwvZMWURdnlk5bNr/e3UZ8XYEVF/glFYP+4sZFIPM3br5hKbnDsc0mSNDnIKzpJOgv9ndvpPbp15JFCqKCevJIFeIPl8kRPmjTuu+8+pk2bRnV1NV//+tf53Oc+d8plLcvi7rvvRlVVNm7cmB3F4Ytf/CLLli3j85//PLfffjvV1WPv/C1ZsoSdO3fS39/PD3/4Q9773vfy0ksvUVgoiyRL42vzkW386cDTAKysWszSigXj3KKJRwhB75EtCGHhYODYaVr2PIjuCqIoKsXVq8kvXYiiqAx1dfP3H/4oz29/BSGgtrKeW+7+Eu+97Vjh5zkLy8dxb6SLRceRIVJJC5/fRXll7gnzdZ+PQP3UE6aPFtgdrZn1eMMGTMdCIfNethyb8mBJdvnGwRhHhhPETJuVlWNrlbT3RnlhZztCCJbPKZVBE0mapGT+mCSdJiEE8UgHiWh3dlpO4Wx0V4DCimXUX/ZnVEy7GV+oQgZMpEnl2muvPSHIcSrPPvssjY2NvP/9788GTABycnL4/Oc/TzqdPqG4IYDX66W+vp7ly5dz3333oaoq999//7naBUl6SzY0v5gNmKypWc7bZlwnv99PIjbcSnSoGVX3AAqa7sEy46iaQc3c91JQtihby2vb89t4cecuQGHFFe/gw3/7HXymhxeeOISZHv8aGdLFo7Imj3fcuZBVV09FUU//c3n8UMMN/c0c6DuMz/BiYqMoCkkrSUN/c3b50aGGXz9qjuMIfvtMA0IIFkwrYka1zI6UpMlKZppI0ptwHItw30EGu3eRjPXiz6mmatY7ATDcAeov+6gs7CpJI0aHw7z++utPmHfDDZmRMjZs2PCm2xFCkEqlzmnbJOl0CSF4tmkzTzW+AMDVdau4buoVMmByEqNZJo5tolgqajiMcBWiai5c7hAe/7E79q8d7mV9Z4C1196Bf8p05tYvJN6XQFFgxtxSdF0eS6XTpygKxaVBIDhmuhWN0vLAT/FPraX0xhvGfG4HBgbYt28fACtXruSXDY9g2ia6ouHgoCs6Ckp2tCsBHB4JmkzLGxs02bavi5bOYVyGxjvX1p/XfZUkaXzJoIkknUI6OcxQ926GevZg25mLN0XR0F1+hHCygRIZMJGkYxoaGgCYNm3aCfNKS0sJBALZZUb9wz/8A29/+9uprKxkYGCA733ve7S1tXHbbbed9DlSqdSYgEo4HAYy9SRGh408HaPLnsk659tEbNOZuNjbP2ooGeb55q0IIbi2bjVXVl+OZVlvut5k2f8zER1spq9zL1tfaeCVV1oJD8UI5fhZsXI2yxepKHs287W/+wp3fvpTLL/qGsoKfBTd+gGcnjjxcAqf38WVN06jpDyEZVtwESebvNX//6X0frkQok3NRJuaseJxym66ccy8LVu2ADBz5kwGlQgH+g7jd/mwHBsFBb/Lh0d3c6DvMA39zbiNUpKmg8dQqQx5s9uJJ03+tCkzpPENK2pktxxJmuRk0ESSTqKndRP9nduzjw1XkLzS+eQUzUE3vG+wpiRd2oaHhwFOOaJCKBTKLjOqo6ODO+64g56eHvLz81m6dCkvvPACs2bNOuk2vva1r/GlL33phOlPPvkkPp/vjNv81FNPnfE659tEbNOZuNjbD1DrlBAWMeL7Blm/b/0ZrTsZ9v90GEqEIwef4Hv3P0tn9zCOLUbqQsBjL+wjL8eLS/lfzGGLnZ/6NH/7ta8TGnLR15+58+8LCrwFgu07u2HnuO7KOXWm//94PP7mC0ljPPWnfbg9Bpctm0IwxzNmXqz51EMNj3bNWblyZaaWiW0SdPlxa2AqKYIuP6qiEkvHebxhA7NLbwYyWSbHF4B9dHMzsYRJSYGftZdVnK/dlCRpgpBBE0kCbCuNoiioWma0A5c3DwB/aAp5pQsI5NXKjBJJOk9++tOfntHyn/vc5/jsZz+bfRwOh5kyZQrXX389oVDoDdYcyzRNnnrqKa677roJM9LJRGzTmbiY2+8Ih+FkhDzvWx9C9WLe/zNhW0n62rbw1BPr+fp31hONpcgLetE1FQEoAgbCcZqP9KOqCjPKy/ne/f/H/OUreOLh/XTawyxcWsnCpZVnVItionur///RbDnp9MQiKVob+xECLls25cT5jZmgib+udsx0y7LYujVTvH/K3Foe69uE3+VDURSEECgoqIqKomQyTg70HSZJD2CM6ZqTTFvsa84Mi337VdPQ5BDDkjTpyaCJNCnFw0fJN/YRD88jp+DEOw2jUvEBBrt3Mdy7n6KqVeSXZkZFCBXOwBsow+2TRb0k6UyMZpi8PptkVDgcJi8v76yew+1243afmAptGMZbulB9q+udTxOxTWfiYmu/Ixz+uO8p9vUc4mNLP0BpoOistnex7f+ZiAw00dn0NPF4mP/8n0zApLgwhLBsBAq2A/2DYRJJE01VUDQV26sxb/lyXG4X194yi4G+GBVVZ/c9MJGd6f//Ynuv3Hvvvdx7773Y9vj0pTp8sBchoLQidEKWiZ1MkujoAMBfOzZoIoTgn/7pn3j55Zdp1DowbZOAy0fKSuPSxv4P3JqLWDrJYLydfH8ddXn+7DyPS+cf7lrCvqYB6qfknp+dlCRpQpGhUWnSEULQ17YVjzZMX1umT/rY+Q6RgUaO7PsdTa/9lMHu13Ack9hwa3YZVdVlwESS3oLRWiavr1sC0NXVRTQaPWm9E0kaL47j8Js9j/JK+y4SVpLuaO94N2lCUzUd20qw7dUWOruHyc8LgJM5ztqOoKNniHjSRFGgIC9ARUkePb39PPLHXwDg9bkmdcDkUrBu3Tr27dvHtm3bxuX5D+/vAaB+ZvEJ82LNLQhH4C7Mx5U7NmvMMAyuvfZabv/YHRwaaMI/EjDpjQ/QFx8Ys2wm28TDcPwF3jZVw6NrY+Z7XDqLTvL8kiRNTjJoIk06seFWYkMtOEIjNtQyJhjS37Gdxh0P0HboEWLho4BCMK+OqlnvonL628av0ZI0SaxduxbI1Bd5vSeeeGLMMpI03mzH5pe7/8iOzj2oisqd897BgtLZ492sCcVxLBKRzuxjf04VFdNuYduuFhzHwdA1hHAQioKqaXg9BoahU1qUSzDgRdc1HNtm/Z9+e8JNDEk6UwN9MQb6YqiaQt30whPmx5pGu+acPMtYCMHjDRtIWSlURSWciiKEQFM0bGGTts3sj6qopKwUTx7eiBCCaDzNy3u7cBz5PpakS43sniNNKqNDHwph4WAghEXvkS34c6pRFIVEpBMzHUHTPeQWzyWvZB6G+/RrIEiS9MauueYa6urq+MUvfsGnP/1pFi5cCGS663z1q1/F5XJx1113jW8jJQmwbIufv/YH9vc2oCkqH1jwLmYXTx/vZk0oseGjdDU/i5WOUbfgQxjuzNCugbwahgaHAOgfiBD0udEMHQVBQZ4fAccVzRQoikokOowQNooiTz2lt240y2RKTT5uz8m6NQk0r4fA6+qZdHd38/DDD7Ny1Ur6EwO4dTdxM04qOzqiQhqblJUCRUGgAQZu3c1AYhDbsfnTpmZe3ttJc8cw77tuxnneU0mSJhJ55JImldhwK9GhZlTNg0IUxxZEBhuJDbcSyK2hoGIJgbw6QoXTUVX59pek03HfffdlRxzYvXt3dtrzzz8PwOrVq/mLv/gLAHRd57777uOGG25gzZo13HHHHQSDQR566CFaW1v55je/SU1NzXjshiRlmbbJT3f9jkN9TeiqzocWvpsZhVPHu1kThmXG6Wl9geG+AwBohhczNZwNmpiRGOGuNOHhBKqikIrZBHOKIVMGFoECCqiOhdtJoiouptQtkcdd6awIR3D4QCZoMm3WybvGlN1yM6U33YhwnDHTN27cyPe//322bdvGv33nm8TScTY0b+WVjt3U5U3hbdOvY8OGDaxdvRZd1zk8mOSZ1gg1OS5um1HG0e4YL+/NZFwtm1N6fndUkqQJRx69pElDCEFP6yYsMzGSamnhOCpYqWy2iTdQijcgD3aSdCY2bdrEj3/84zHTNm/ezObNm7OPR4MmAFdddRWbNm3in//5n3nwwQcxTZN58+bxjW98g/e9733nrF3jXYxQungJIUhbaQzN4MMLb6e+oGa8mzQhCOEw3LuPntZN2CN34PNK5lE0ZSWa7sExTZ6//wG++V//xe6jR3GEQFdUXK4AtjPa4/vYaDiKUDBTaYQQXH31deOwR9JkYtsO0+eUcrR5gKraU9edU1QVRR1bgWA08L969WpyPSECho+GgRZcmsE1U1dTHiwhRw1QHizBMAxe7uzApRpU5+QRcAX5wbPbAVg2p4za8rc+wpYkSRcnGTSRJgXbStPV/AxDvXtBCFBUQMFwBVBUjehQczbbRJKkM/PAAw/wwAMPnNE6y5Yt47HHHjs/DRqxbt061q1bRzgczo7aI0mnw6W7+Mii99IXG6Ayp2y8mzMhCOFwZP/viYfbAHD7CimrvQZvsBQhBPuefJJvf/krvHDgAI4jCHkD4DjEbAfd5UchU+chM3Br5i6/jUYkbVHu97G88sShYSXpTOiGxpKV1SxZWX3S+XYqhXaSkdWSyWS2aO2qVasA2N1zgISZJMcTYkbhVGzrWPDddgSNg1EApucH2LyrnY7eKD6PwdtW156wfUmSJj9ZCFa66Anh0LTrp/Qe3YoQDoqqY7hDWMKD7vJn7o451kitE1m8S5Ik6VKUMJNsa9+VfezR3TJgchxFUfEGSlBUneKq1dTOvQNvMJOZ6TiCH3/v/9iwbz+2UJg67wo+9/YP8NHV78LlDhKODWJZFooQKMJBEQLLMgnHBnG5/fz5nPk0//FxeQyWzquW+3/Mvi9/lcihsaO3vfLKK6TTaUpLS6kbKRB7sK8RgOWVC1GVsZdDbZEEScvBZ2gEFJX1W1oAuGVVLQGf6/zviCRJE47MNJEuSraVRtUMFEVBUVQ8gWIig40YRhDDHRi533WsuJeme2W2iSRJ0iUqlo7zo+2/oiPSTcpKs7p66Xg3aUKIDrWiGz48/iIACiuWk1cyH8MdItHbRzgRp6Sqiu//fjfKwndT2zXIwnf+GbesWMSO320mOG0hbwvO5OlNDzAU6SXmOCgoCASqqpIfKuLGVe9nimuY3iMdOKaJ5pIXndKZ6+4Ik4inmVKTj6afeM9X2DaxllactIkeDI6Zd3zXHGWkQPF7576NJeULKA0UnbCtQwMRAKbm+Xl0czOptMWUkiAr5sogqyRdqmTQ5ByQ/eovHMtMMNC5g8GunVTOfDv+UCVCCMxUGE13o7sDoCiZLjrHUTUXppUYM5KOJEmSNPlFUzF+uP2XdEd78bt81OefPLX/UmKlY3S3biTcfwhvoITqOe9FUVRUzUCkLZ78zn/zne9/Hz0/n99teJ4ls0vo7Ivxd//2PWLtEQ6+2smwtxDHcphSNY8Pvu+rNB3ZRevRPSRSMbxuP9VT5lJXtQBDc7FTtdHLfFyn6mjjvfPSRWnXK220NvazcNkUlq6qOWF+vK09EzDxefGUHCsSK4TIBk1Gu+YAqIp6ylpGDQPHuuZ4gwG6B+LcfvU0VFWeO0rSpUoGTc4B2a/+/LPMOAMdrzLY/RqOYwIQ7juIP1RJbLiV2FALmu49ZTBEZptIkiRdeoaTYX64/Zf0xQYIugPcvfhOigOF492scSOEw1D3HnqObsax04CCN1CGEA44gr2PrOc//+0bvHCoAUWBUDpN4+HDzKqsYrgyj5btHQAYLp2lK6rZv6sLTVcxXEHKy69h1YqriUaiBIIBlJGCsGbaxrYdbr5xPsZJMgQk6c2kkiZHmwcAmDrjxMwQgFhjEwD+utoxRWB7e3uJRCK4XC6WLl2KZVsA6NrJL4H6E2kGEiaqCnW5fty6xmfvXCRvtknSJU4GTaQJzUrH6O/MBEuEkznQefxFFFYsJ5BXixCC3iNbsO00hubGsTMBlUy/aQfHNrMHOkVRsey0zDaRJEm6BAwmhvnh9l8yEB8kxxPi7iV3Uug79Ygbk10y1ktX87Mkol0AePzFlNVdg9tXRPeOnfzPv3yZ3734IknTwlE1Zl52JT/6yX9RWlJMf2+U1sN9AEyfXcLilVU8+fB+HNvB6zOy4+UIkRk7RxWZpE8Al0sjFrHYv6ODmbOK5bF3ErjQGdZNh/pwHEFBkZ/8Qv9Jl4k2NQPgH6lZMqq4uJhnnnmGpqYmPB4P29p3sf7Qs1xVu5I1NctP2E7IpXP7rAp6o0nceiYvSr5nJUmSQRNpwhJCcOTAH0jFMydqHn8xhZXLCeTWZg9gjmORTg2jaS6ckeERM+uCio1jpzn+WKdpLtKpYYSwURT59pckSZqM0laaH7zycwYTw+R5c7h7yfvJ9+aOd7PGTTzSQeve3wICVTUoqlpJXsl8FEVlz+//yMf+6q/oi8VwUCiqmsmK29fxvhtXUjrSzaGgKMDyNXWUVoQoKgliWw7hoQQut46ZPnbhLITAsSGdtsdcaLrcOpHhBI4t0HR5AXqxu9AZ1of39wBQP7P4pPOF4xBrzgRNAnUnjm6j6zrTp08H4KWjO0iYyVMWJTY0lVKXi5+uf43YwhjXLatC02SGlCRd6uRVozShmKkImuFFVXUURSG/7DKGundTWLn8pNkhqqpTN/+D2FZi7HZMk40bN7JmxRoMwxgzT9N9qKp860uSJE1WLt3FFdXL2HJkO3cvuZMcT2i8mzSuvIFSPP4iDHeQkuorM0MEKwpDkRQ7qcRjePHkBph340e44523YvfG2bOplZm1BeTm+wCYt6giuz1NV7n9rsUk4+aY57Eskw0bN7J2zWJ0feyx1+szTlrAU5LeSGQ4SVdHGEU5ddecZFcXdiKJ5nbhrSjPTnccZ2TAgMy5Y9twJ23hTjRVY3HFvDHbeOFQN+tjBQQPdXOkeZh40uRg6yDXL5c1kCRJkkETaYJIJ4fp73iF4d59lNSsJa9kPgA5hTPJKZz1hqmRhjuI4R5bKV0zTSzhw+MvPiFoIknS5CCLcEtvZGXVEpZULMClXXrHADMVpb9jG8XVV4zchFCpnn0b2LD393/gJ7/5Df/8wP30D1tsb4kw/YP/wocX1FOquTiyO3NXXzc0Bvri2aDJ6wWCbgJB99jnNU08PigoDshjr3ROHD6QeT+WT8nF/7r32yjVcFF0xSqEECjasVLDzzzzDN/97ne57bbbuOuuu3ipbQcA80pmEHAd6+bjOA6PHOzCCnj4zaFO9LY4hqJw+zWy+KskSRkyaCKNq3RymP72lxnq3Q8jAwUnIp3ZoImiyLtSkiSdnCzCLR2vI9LN+oPP8v4F78RneAEuuYCJEA6DXa/Re3QLjmOiam6Kq1YiHIeOrdv4wde/wR+2v4LlOJT8+7/zt1/6Ejcsq0aPlnHkUB9HrAiKAtNml7B0ZQ2+gBweWBpfA30xAKaeomsOgLuokIp3veOE6Zs2baK9vZ3BwUGSZpKdXfsAWF65aMxyzx/oIqwIQME0VKyQzlVVJVQUBc7djkiSdFGTQRNpXKQTg/R1bGO49wCjwRJ/aAqFlcvwhSrHt3GSJEnSRaVtuJMfvforEmaSxxue592zbxrvJl1wiWg3Xc3Pkoxl7sx7A6WECqYRPnyYX33j33ngySfpi8WwUVm+cgW33nEHwhFEGwcY7I8DUFoRYsXaOopKgm/0VJJ0wVxzyywWrYgRCHrOaD3HcdiyZQsAq1evZkfXXkzbpDhQSE1u5ZjlHjnYCQrZgsa2rnDDiqpztQuSJE0CMmgijYvu1o1Eh1oA8OdUZ4IlwfI3XkmSJEmSXqdlsI37dzxIykpTlVvBzdOuGu8mXVC2laa3bQuDXa8BAlVzUVy1moC/hqe//V/c+8tf0tDbh+0IPHmlzL3hw/z9Zz7EzOmZO/fTZpewf1cny66opXZagRwpRJpw8gpOPmIOgBmJkOrpxVc1BfW4LmH79u1jcHAQv9/PggUL+O62HwOwovKyMe/x5w90EcZBEQJHUcARKD6Dl5r7uHq2PC+VJClDBk2kCyIV70czvOhGpm90YcWykd/L8QZLx7NpkiRJ0kWqcaCVH+/4DWnbpC6vig9f9h7c+qXVpaS7dQPDvZluB6GC6ZRUr0F3+TFTaR587CkOdveCJ8D01e/inbe9n0qXmwLXsYvLuQvLmbuwXBZplSYU4QhM08blfuNLlfC+/Rx98LcE6qdS/5cfz07fvHkzACtWrEDXdT4w/5283LaTy8rmZpc5Psskm2fiCBQFHjnYyZUzS1FV+bmQJEkGTaTzLBnrpa99G5GBBvJLF1JSsxYAb7CMKTNP7H8qSZIkSafjUF8TP9n5EJZjMa2glg8tvO2Sq2ECUFS5nGSsh+LKlQztPspQToz8Qh//9ZvdFF1xF5XB51nzjrtYUVNJZ+MA7VaceDhFZfUiFEWRwRJpQmo/OsSTf9zH9DklrL6m/pTLxRqbAPDXjB3lZtOmTUCmaw5Akb+AW2ZcM2aZ47NMMDQUIVAdEAjCisPzB7pktokkSYAMmkjnSTLWS1/bS0QGG7PTbCuZqWwuU38lSZKks2A7Nn848CSWYzGzqJ4Pzn8Xujb5T2mEYzPQtRMzFaa0NtMNSXcFyRXz+Nlnv8xPNzzPossv579/8XNm1eYzFJnBX69dSaQtTNvBPgDKKnNYsbZOHoulCa1hfw+27fBmb9NoUzMAgal12Wl9fX3s378fgJUrV550vTFZJqODDjgiE0ABhCqzTSRJOmbyn2FIF1Qi2k1f+0tEB5uz00IF0ymoWIrHVziOLZMkSZImC03V+LPL3svG1pd4+8zr0VXtzVe6yCUinXQ2P0sqngl+5BTNRgxZPPs//8P3f/97DvX2oekah9uOEolEWFCdR6JpkO6RYEkw5GH5mlpq6mXdEmliM02b1sZ+AOrfYNSc9OAg6YFBFFXBd1ymiW3b3H777fT19XEw1kxz20bW1CxnSs6xrJHRLBNsARogFDBtUFQURUFxZLaJJEnHyKCJdE6F+w6MBEwUQgXTKaxYittXMN7NkiRJkiaBSCpK0J0ZBrTQn39JjJJjW0l6j25hsHs3AJrmJr9wMXt/9gf+9/4H2NTchO0IhCfE4pvv5P++8wU8Hjd9XX0M9sUwXBqXLZvC3MsqZFcc6aLQ2tiPmbYJ5ngoLjv1SE6xkSwTb0UFmtudnV5SUsI999yDEIJvbf4BffEB6gtqskGT47NMHMtBWKBrAhwyARRAETLbRJKkY2TQRDqpePgo+cY+4uF55BTUnXq5SAeqauDxFwFQUL4E205RUL4UtzfvQjVXkiRJmuRead/FH/c/yYcW3sb0wlMflyYLIQSR/kN0t27EMjNDAucUzqK4ejXr/+Ub/NN9PyRhWqQ1F1WXXckVb7uLWy6fhdudKYRbU1/AsitqmTarGJ//0iqOK51f9957L/feey+2bZ+X7R8+kBk2u35m8RtmRY12zfHX1Z50fuNAK33xAdy6i4Wlc7LTR7NMhC0QI9MUhezfmccy20SSpGNk2FQ6gRCCvrateLRh+tq2IoQ4YZl4uI3WfQ/Ruvc39BzdnJ2uu/yUT71eBkwkSTrv7r33XmbPns3SpUvHuynSefbi0Vf57d71mI7Fwb6m8W7OBSEck67WDVhmHJc7lynT3k55/fVEkxpD09aA4SNYs4Dr7v46n/nU55jlyeHwK22kUxaQuehbsKRSBkykc27dunXs27ePbdu2nfNtJ+Jp2loGAaifWfSGy8ZOUs/kyJEj7Ny5E9u2ealtBwCXlc3NjqqVzTJRFYSmgKagaQqCTA8dB3AQOKMhFFXhkYOdOI5zbndUkqSLisw0kU4QG24lNtSCIzRiQy3EhlsJ5NYghCAebqOv7SXikXYAFEXFcAURwkFRZAxOkqQLZ926daxbt45wOExOTs54N0c6Tza1vswjB58BYFXVUm593QgYk4lwjtVUUDUXpTVXMtxykB2/2Mj/tD3Bv/38ZxxsHWBHv8aKu7/J5bNq8cctBhoHAAjmeIhGUrg9l94oQtLk0HSwDyGgqCRAbr7vDZetuvN9RJuax2SaPPTQQ/z85z/nlrffinm5F4BllQuz81OWQ8JxEAqougqaCraDEICiIl6f2eIIEo4gZTl4XfI8V5IuVTJoIo0hhKD3yBaEsHAwEMKi98gWFEWjt20riUgnkAmW5BbPoaB8CYY7NM6tliRJkiaj55u38njD8wCsrVnBjdOunLRFTOPhdjqbn6WwYik5hTNJ9vRw4Gfr+eGDD7K5pRlFU1n9+9/z9ne9m8aWQXyJPHqPhhmCTN2S5VXMXVgu65ZIF7Wa+gIs2yEQdL/psr6qKfiqpoyZtnlzJvs5WJtHn4hTlVtBebAkO9/r0vnLxbV896XDOI5gdn6QK2uLsSyLV155hSVLlqDrYy+PigIevC55ySRJlzL5DSCNERtuJTrUjKp7gCSq7iE61IwnUEoi0omiaCPBkqUYI8X4JEmaWBoHWtmQfpUZA3OYWVI/3s2RpDMmhOCZpk083bgJgGunruaautWTMmBimwl6jmxiqHcfAL2tL9Lz9C5+8qP7eXjvbhJpE9Pw8IEPf5DVa9eSSpqkmgeJWZnhWGfMLWXJymq8PtkNR7r4+YNuFiypfEvrtre309LSgqqqxIsz9VZWVF52wnLP7+sCQ8NvaHxsZT1+l4FpmnTssVhYlY9hyEwtSZLGkkETKWs0y8RxLBRUFGxU1YVlJUlEOykoX0Je6QIMlwyWTHTyovnSJYTg8cYNdDsDPN64gRnFUyflhaY0uQkE/fEhAG6cdiVX1l4+vg06D4QQhPsO0N26EdtKAuBRSlj/zT/yyxdfojcaI63o5NYu5LLrP8RdH7+VwsJMvbCpM4uIDCdZsbaOgiJ5TJYuPV1PPImroICcuXPQPB4ANm3KBFnnL5jP1TNX81rXAeaVzByzniMEYZ+Klla4srYYv0sGSCRJenMyaCJlDXa/xnDfARzHAlJoioMCaLqX2FALxVWrZMDkIiAvmi9tDf3NHOprREfjUF8jDf3Nl8RII9Lkoioq75lzCwtKZzGzaPIFftOJQTqbnyUebgPA7c2ntO4aFMvHIzv/lc5oAiO/kkVXv5/rr7iSPBtKgp7s+quuqkfVFPndLk0aQgg2PtVA+ZQc6qYVvWE3MzuRoPupZxCOYPYX//GEoMmaK9awunoZq6uXnbDuaz3DCF2lrjTETTPLzs/OSJI06ciOr5c4x04z2L2bptd+yZF9D2HbKRACRVFxhI4AVM2F41gjtU5OHElHmlhOdtEsXRqEEDzesAHTsfDgwnQsHm/YID+30kXBEQ6vtO/CEZlRKlRVnZQBEwDLjBEPtyFMm+FGm5Kp70R1F/Nff2pgyvUfY+o1H+Kuv/0v7lx7DWp3nOG+OK9sbc2ur+mqDJhIk0pvV4RDe7vZ9Ewjzpscs2ItrQhH4C7Ix5WbKQKeSCTYvn07AKtXrz7pepbjsPFIX2aZKYW4de0c7oEkSZOZDJpc4rqan6Or+Vni4aM4dhpNc+Py5jFo1PCkcznddghFUdB0L9GhZmLDrW++UWncyIvmS1tDfzMH+g7jM7woioLP8HKg77AMnEkTniMcHtq7nt/uXc/v9j023s15y+Lho+Qb+4iHj54wz0xHs3/rth+rSfCLrz7OJ/7uW/zvv/07HpdOab6PoumL+PN3f4DSBAx2RlAUmDmvlJVXyowxafJq2N8DQM3UAgzjjYMZo0MN+48baviVV14hnU7jyw8w4I5i2uaYdWxH8MM/7KZS1anO8bGkLPfc7oAkSZOaDJpMAC3DcbZpBbQMx8/r81hmnP6O7aQSA9lpOUWzMNw56IYPzfDi8uajam52pCvopJAdqXKEuPiyTS7UazrR7O87zN6eg6iKiqnYGJrB/t4GedE8yVmOzWBiOBMws03cmguBwK25MG1TBs6kCc1xHB7c/Se2d+xGURTq82vGu0lviRCCrn1P4hH9dO17MvuZs8w4HYefoHHnj0mGe2l/ZD333f0JPvmP3+E3r+zCcrtp6OjEcRwWVeYxQ9UIt4exbUH5lBze9YHLuOLaabLQqzRpObZD06FMBkj9zKI3XT7a1ASAv/bYUMOrVq3iR//3I2a8bSG/3fso+3obxqzz0p5ODrUOsv3ldm6bVoauyksgSZJOn6xpMs6EEDx3pI9+xcNzR/qoLwid05RbIRxiw0cY6tlDZKAJEFjpKCU1awHwhaZQUnMlLbt/jqZn7k53mEHarRwMLNqtHDqtIOVGZEy2SSC35py18Vw736/pROEIh0gqSo4nM+SzEILvbP0/wqkIqqLiCIdUMo0jHL656fssq1zIXyy5M7t+OBnB5/KhqzI99WIRTcfojQ3QG+vP/MQzfw8khjBUg774AH6XL/t+VxQFv8uXzTaRtU2kicZybH61+4/s6c4Ee++c/44TCjdeDNLpNA//4ef86cH7GQ7HyQm9wNs6DNasWsRQ50vYVhIrFufRr36BH/9pM4d6e0mjoRdM4da7PsnXPvdnKIqCk7ZJJS1COR6Wr6mlemrBpDx+SdLx2lqHSCZMvD6Diuq8N1zWMU3iRzKZXIHjMk1UVUUr8VAwowS/y8ecounZecm0xfqtLQDcsKIan0cWf5Uk6czIoMk4axqK0TgURxcOjUNxmoZiTM07+2Kr6eQww737GOrdh3VcSrDHX4zHXzJm2b6jW7DtNIbmxrZMdiZLsYWChyRJ4WVnspQSZQBFUbHsNL1HtuDPqZ6wJ3Ln6zUdT0IIhpNh2sKdHBnuoC3cRXu4C11V+cLaz6AoCg39zSTMJJqq4dbcpNMpVFXDdCyiZozOaM+Ybd6/49d0RXvJ8+ZS5MunyJ9PoS/zU+TPzwZjpAvLdmwGEkP0xvqJpGMsP264xAde/Q1t4c4T1hFC0BnrBiDo8gOgkPl8ujUXsXScxxs2MK2gdsJ+bqVLj2Vb/Oy133Og9zCaqvGB+e9idvG08W7WGdu4cSP33HMPjQ37sEwTBRAKPLbhr6koK+Czf/k2Vq1cxhPfX8/31z+LhYLpyWX65e9g5dpbuGFZXfZzOW9RBYZLY+bc0jcshClJk0nDgcz5ydQZRajqGx+j4q1HELaDEQriKsgfM++lth0ALCmfj64du8R58pUj9OVq5Ad9rJgni79KknTmZNBkHAkheK61F9MRuHAwnczjulz/WV3YCOHQsvdBbDMBgKa5CRXNJLdoDoa3EMtxiKUtdE3FUBzSqWEc1UtH2kOPHaLVzEPFwURHEQ6tZh4Nai71Rjea5iKZHGYwkcSlGxiqgq6qaG9ykLtQztdrOp4eb3ieV9pfI5qOnTDPUXWi6RgBl5/HGzbg0gzyvZnU1mFzmJxADo4QDCQG0RUNIQSKoiCEIJKKIYRgID7IQHyQg32N2e0WBwr57Mq7s49fPPoqbt1Fka+AQl8eHsNzQlukt2ZfTwMtQ0czGSTxfgbiQ8cKYSoqS8rno41kAxUHComZcYp8BRT5Cyjy51PkLyCcjPDD7b/EpRnZ/+8omW0iTVS/eO0PHOg9jK7q3LXwtovyvblx40buvvtuhocGCfld6JobAEVVMC2bto5+vvTNh/mv774N/8JbST79MqVzVrH0yttZVl1BqjdO+74enEWVqKqCpqvMWVg+znslSReOmbZpbewHoH5m8Zsun+jsAjJZJqPndb/73e/YtvMVusujFNeVjbnZMBxN8fThbkRAJ7csiK7JYKQkSWdOBk3GUdNQjIaBGF5dJQG4NYUD/VFe6higLODFdBwsR1Dqd5PryfRlHkik2dMbxhqZZzqCZCpKPDaA4S/lstI8ZhQEySmcxdHBITbGqkDxYnWC2d6PEP3Z57+6poiVlQXUzf8gbcNh/rS/nwHTJoVAVUA4Doqq4gjYYyznxkXFKIrCsGXwvVfHFoRVFDBUFUNTWFSay9qqzIV73LT5/cF29JF5hqqiq8d+VwS9TMvPZIHYjuDQQCQ7b3QdXVUxVAW3ruF+k4Pd6Gvq01WSgFdXaRiITehsk5SVpj3cydHhTtrCmZ9Pr/gzvCOBCduxiaZjqIpKaaCIypwypuSUUxkqo8RfiKqqHOpr4kDf4WzXjOMvmjVVJegOcDTcmb1oVhSFf1z7KSKpKH3xgZFuHgP0xfrpiw9S7C/Mri+E4LGG50hZ6ey0gMtPkT8TQKnOrWRJxfwL94JdRBzHyWSNxPuz3Wr644P8xZI7UZXMe3ln115e69o/Zj2XZlDoL6DIl0/KTuNTvQDcPufm7HqjhBD894sPYNpmNsvk9SZrtsm9997Lvffei23b490U6S1YVrmQ5qGjfHDBu5maXz3ezTlj6XSae+65h3A4TEGuB2GnEY4AFQSCVNpCBWLxFH/79/dw3V1f58ZPfJtFNRUY4TSJ7kwg3OtzkUqasmaJdEmKx9IUlwZJxE0KS978PK3oilXkLpyPkz52TrJ+/Xo2vPgCc961mNVLV5Hvy83O++OWZlJ+DZeh8a65FaiT5PgnSdKFJYMm42Q0I8JyHDRFIa7oJJMmNvDLvW0Uel3ZC5ubppawuCxzMjWcMtl4pA+Bg22lsK0EYqRCuCsxRHVu5qKpuGoVVn6K+K5WMJ2TtsEeubA23EFCQTd+d4yeZBy3pqCpCum0jUvXsBzBQFrQYfqZmhcgEk9haAqmfezCXAhI2w5pGyzn2PS0bdM8dOpirIvLcrNBk6Rt89CBjlMuO684xDumZ+7AWY7Df247PCYIoynQOBQjmjbxj1Red6kqCcvikcOdfHBOFSG3gTEB7jI0DrSyvWM3beFOeqP9CMYW6WwPd1FfUAPA8srLmFcyk7JgMYZ2Yj/c7Ig5Z3jRrCgKIU+QkCdI3esuWI4PuliOxYLS2ZmgSryfSCpGNJ35aR48QjQdzwZNhBB896UHCLr8FI5kQRT68iny5RN0By7YxXrjQCsb0q8yY2AOM0vO/5ClCTOJR3dn929D84ts79hNf3wAW5z4+RtKhLMndTML6/EbPor8BRSPZI+E3MGTvlavD5jAsRFzjq9l8nqTNdtk3bp1rFu3jnA4TE5Ozng3RzpDM4vq+YfVn7xoM9eefvppWltbycvxI+woQggECsmUzdBQlFTaAgUqcvMZ7u/C7tnHqhkrSPTGMYFQrocVa+qoqsufNIFMSTodx0aZmkdOQR23vmc+lmmf9ufACAazf4fDYXbt2kXcTFA5u5rlU45lmQyGk7zUNQgBnRlFQeon6M0zSZImPhk0GSfHMiI0bMdEURwURUVXFEzHwWNo5HoMDFXFNxIAEEJgmAPUu/qw4r2omGi6QDcEgUAheYU51ORmDgiKolLoc/PRBdXZLjTHZ3hoqjIm2p7vMcj1GHiiKrnuzIX5cCpBzkiGy1DKzHZzKfK5+YfLZyCEwBYC0xbZrBjLcfAcN+69V9d4x/QyTGfsMqad+T0l5BvzukwJeUcyaJyxv20H/bguQKYtSJoOSY5dkKYsm4FEpj+5LUAhc7Ho1VX290X5z5cP49Y1PIZKyGUQdOmE3DpTQj7mF+eM2Y5LU8/6JNYRDr2x/mwGycopiykOZDI4emP9vNqxO7tsjieYzR6pDGUySUYV+vNP2PbxzsdF8/HbMTSDd8++Kfs4aSbpiw/SG++nLzZIge9Y0bZYOk57OJM6y3HdfQDcuotFZfN4x6zrs9M6wl3k+/Lw6O43bM+ZEELweOMGup0BHm/cwIziqefkgsQRDkOJMD2xfvri/fTEjmWPRNMx7lmzjtyROjBJK0VPLDMSgKHq2QDSaLca33EXiYvK57KofO5b39eGDaSsFB7dTXokgJr5bNqkbTO776qikrJSky7bRLp4xM0Ev9nzKLfMuJpCX+Z77WINmAA8/dR60qkY+J1MZknSZDiWJpZIAwJVUcgNeXHrDsOmyeHtL7GwfBGGS2PRiirmLCxHmwBBfEk6U2eT5SeEoK9tKx5tmL62rYTyM8cj/U2GGT6VrVu3YjkWZVXlVFRUMKvw2I0SYagUVIVIpCzePqtCHvckSXrLZNBkHByfZRIwdNJmjFwS6JoXlyePoZRFnsfgzxfUjPmCT0a7iTT+jgUAHnB5csktnktO0Sx0w3fC87g1lYqg97TadHwQ52Q1EXy6dkI3F0VR0BUFXQUvJz/YuXWNecWndwfYb+h8eP6pU7Sd49rk1lU+vqg2E4AZCar88VAn4bSFz1DRVZVEMrOsoaooQNSycGlqJthipuiJpYBMZsxo0MRyBP/+UgMuVc0GVYIunaDbIOTSKfG7qQyd+FoDxNMJDg+00DbS1aYj0jWmS0tZsDgbNKnPr+HqulUjAZIygu63dvfjVBfNlsjBds8hYbegK8PAubto9hgeKnPKqMw5sZiaR3dz95L30xvrz3T7iQ3QFx9gIDFEykqPyZSImwm+8+L9AATdgWwB2tFitOXBYnK9Z5490NDfzKG+RnQ0DvU1nnFmRdJK0RcboCfWz6yi+mw3qScPb+T55q2nXK8/PpgNmlxWPpfavCkU+QvI8QRPmiFyLtiOTX9iALfuJmmljs0QAhOblJXK9J0b4dbdDCQGsR17TJE8STrfoukYP9r+KzojPQynInxq+Ucu2guYdHKY3qNbONq0HSFsLMuiqyeCadqIkQLMAb+HvKAXXVcQjommChQ1zaz5ZSy+vEp2xZEuameT5RcbbiU21IIjNCIDTQx0N1JQenoZoX2bNjO8Zy8FK5aTu3ABAJs2bUJTNO562/v52Mq7UY8bSnjDkT5cusasohBVOSc/d5MkSTod8qx5HBwfoHAcE8dOAwqOncZxTHy6TsNAlL1th6l0p8ktngOAJ1CCx1+M21dAbvFcvIGyc3LSOTaIc/Jh2FyaStwyx7Wo6vGZMaqiUOQ7lp3QOBilO5Yi5NJx65mCp4mReZqqUuh1kXYcbp9VQYHXTSRtEklbhFPWmO3ETAtGuhr1J9L0J44FPSDTRagy5COaitE61MHjLSZFPj9Bl07KivDS0RdRSaMoaVQEHt2gIlRGZaiU8uCxUYsK/flcX7/mrF+Tk100C8BWqhB6CUnHQROvMPrKne+LZl3TmZpffUJ9AmtkRJjjhzeOpKIEXH6i6RiRVJRIKkrz4JHs/MunLM5mpaSsNE83vpDJ2PAVUOjPJ+g68X2Y7arkWHhwYTrWGwaJemL9HOproi8+MJI50k8kdWy0qY8teX+261KhLx9d1Sn05VHoL6DYn0+hL9OlptCfPyZbpnikq835pms6f73ybmLpsV3gLMtiw4YNrF29Fl0f+38OuHwyYCJdUJFUlB9u/yU90T4CLj/vnXvrRRkwscw4PY0v0NvyMkMpi2DQCyJzjHEcAYpCwOcm6Pfico18xoQNQmCZKeYurGX1Nee/u6AkTVRCCHqPbEEICweDVDLFqxsfYdqiD1JTX/im60cOHiJy6DDBmZlhyR3HYcuWLQCsXr06exx2HEHHUJx9fWEArqouOk97JEnSpUKeOV9gJ2SZJKMIMv2gBQIrHUFRPSTTNk8cbOeWUDOhwhmoqo6iKNTMfR/KOb5r/fosk5M5VbbJRHCyoE/KThERcTy2B4/uGQn62Gxu6+fPF9RQ7D95d5Act8E/XD6dcNoiksoEVvoTCdojQ3RHIzT2t/L1jb9nKBnGES6EfgUJM/P/cISDrUxD0wwMzWBuUYg759ShKiqWI/jDoXb29Xcfl8FiZDNZdPWt/U9PdtF8NJzmNwcGSCZTeDy1vGfmYqaEjt3VHI+LZl3VTggilASK+MKVnyZhJumPD450exkYyVDppzR47CSnN9bPC60vj1nfrbuyWSmXlc1hZlF9tquSz/CSSifx6m729hzk0UPP4NJc9MUHuLJmBeWhUgBaBo/yyMGnT2jvaKFbOPZ5WFg2h0Xlc89b1shblesJZTNcRpmmSY4aoDxYgnGKQKgknQ+vryc0nAzzw1d+SV98gJA7wN1L3j/y2bp4CCGIt7Zy4NmHeGzXszz7YgOmpfLJz3ySPz22DdO0KcoPohvGCcOlOqjYaRNVEaxYNnuc9kCSJobYcCvRoWaEMEjFTTRNw+8bIBQcBt44aCIch2hTMwCBuloA9uzZQ3d/D3mhXBYsWJBd9pUD3fz2mQYuX1JJaVWI0sDF2w1QkqSJQQZNLrCTZZkoqCjYIMC2kiiKhVvR6LByCAfmZgq9qpl/1bkOmIwGHFK2jUczMG0nO90GTNs5riaCQsq2JtwQvicGfQTDqSgmFsOpaLZA5+kEfSzbImWlKPD6KfC6GE6G+cXOB04o1KqgUOLPoa7AYUZhOeG0RThlEk3nZP+uCOZkL7CjaZMDfdGTPSWQKYh709TMhbzlCF5s7x8JrhjZ7kGnGjno+ItmIQSPNbUgUPEgEKjs6bNZXlEyYf5fr+d9g+4+ozy6m9XVS0cCK4MMjnT3aQ930R7uYkpO2ZiuSlE7hi1sSMRxhMPv9j1Gka8ARVGYVlCbDZqUh0qYWzKDQl9+JmPEl6k94j1JnYXjs2QkSTrR6+sJFQULuG/7LxlMDJPrCXH3kvePqYE00dnpFL2vbGbLb9azfstmXj7aiqU7gE5OYTEh+igrCtLRPUx+3smLN4NgKJKgojSHmUWx7LDvknSpGc0ycRyLeFxHU02EUDF0wXD3S+QV173hZyPZ1YWdSKK5XXgrMnXfhoaHcHJU7DKdrlgvFaFS0qbN+s3NmJZNjqFxecXFFaSVJGlikkGTC2hsgEInmYphi8wBQqCgCMhcjqu43AGitspr6RBL9fMXIbeFYCBh4tY0kvbxo3wIbEUl5Tgcf7fdrWkMJk1sIdAnwInfyYI+KTtN0kqjoJG00kTTKdya64Sgj0DQE+unbbgjW6y1K9LDrOJpfHDBuwEIuYP4XB4MzZWpPxLKXNxXhErPqHipW9O4YWpJNnslnLIIp00iKQvLEWOK50bTJs+39p24DT1TZ2V+cQ4rKzMnAbYjaBqKZQMsHZHERTfk8uko9Odz64xrs48t26I/MZgpSBvrpy6/OptlYmguUnYaBwdN0dAUHdO2qMuvYlbRNKqOK7JbGSrL/q8lSTo7r68n9LNdv2MwMUy+L4+PLb7zLdUoGg9CCCIDh/nj/36V+/+wma6jYYQAS3fhza9l2rIb+P7XPs7Rjd9m3QdW8ZX/fZb+oSi5QR/GccUsTdNmKBIn4HPzl+9fhTnYieNYaCcZBU2SJrtjWSYuzHQaj9sEBRRVYbBnD2L3r8gpnIbHX4LHX4Kmj637ExvJMvHVVKNomc+Zf2oeb//HOwjqfsqCxQA8v/0og4k0+UEPVyysvLA7KUnSpCWDJhfQ8QGKhGli2QLInDwJRjM6RoIojnJBAhS6qvKJRbXEzLEV0C3LZOOGjaxZtAZdH3uC5ze0t9yd5Fw7MegjiKSTgMFolnQ4nSTo0oDR1zTND7f/irbh9mzh1OP1xwezfyuKwt+t/uRZj+7iNTSWlp14h1UIQdIaOyStoigsKMkZCa5kgiwpyxn5SZM6bvlI2uTBfW3ZbQ0k0yQtB10By9ZQ0za2qvBcay81OT7CaeusugNNFLqmUxIooiSQ6cIjhOC/9zyAaZvkeXLw6m4i0Qh5gVxURR0pRGtyRfUyeZdXks6Dk9UTcoTD3OIZvH3mdYQ8wTffyDgSQhBpbCTlc4gN7CAR7SJQFKCzP4ziD1I0bTW1C68kv7iK6nw/4CL9kkVtX5Dbrvxz/rDhD/QN9CKcZLaYuqqq5OWU8d7VN1DbGicds1HeBqeomy5Jk9bYLBMDROYcRVMFQnEQik24bz/JWDeKopBfupCSmrUAOLZJMt5LpCkzIt9o1xyAl47uAGBlzRJURSUST/PEng6SFV6mVBRg6Bf3uY4kSROHDJpcQKMBimjaov3Qo8SGW9Fdmbv/kUiE4Mi481Y6ij+nmorptxC4ABe4IbdByD02MGKaGgEsSv2eCV0T4fVBn5bBo/xk56P4Yj5cRwuJlncR8w5R4a3jzxbfAWSCPj/ZuZW0beLWXZQHS5mSkxnqtzKnjDzP2Luh53I43NdTFAXv64bZy3EbvG3a2K4qKdshmjYJpzJBj1GmIygJuImkLAYTaRKWg4IgbQmEqhBLmuSHPDQMxNjdG+aRhsxwwB4jk7USHBl6OeDSqcnxUZvrB46NVKReJAGG44ddVlUVl2Kgo6Eq6hkPtyxJ0pkb/Qx6dTfpdBqf4aGhv4Vbpl8zoQMmdipF03PP89uf/ogn9+1g9sIa/t/HbyVpQjiwlLk3VFBSsxCPy0t9rg930sIaTIENL067mSari7xcLx98z3Kajuyi+egekskoHk+A2ilzqatagKYZPEqC2mmlLNPlaZd06Tk+yySdthBCIZXKdJ1WVQiFFFAsPP4iHCuFx3+scH4y1k3r3t+S8HfCAkEid4DBrtdoGQjT0teC7nKztCJTz+TxrS3EAxouQ6Oy+Myya+Pho+Qb+4iH55FTIM8TJEkaSx69L7CQ20BNtNMfO4jXZaDpSYQQaEqMHC1T7NXGRsQOErKXEHDXjHeTJ6y0beLSjGzQ58mGjfzxwJMMxYcob56Pb9CLYwUYqmukcSBGvuf9uEfSPd8+4zrcuosif8GEK+x5Mm5Nxe11U+AdG8Ap8rm5e2EtQgju29lC1LJwC4XhWGbsICEAW2AJwQtH+tAUsAUjwy6n6Y2NHR1oNGgylDT5n1ebCIwEVAJGpnhtwJUpXFse8FDsnxiF1bJ3uG2ToCvTfkvkYHvmYomjGEoYt+Yilo6f9XDLkiSdSAjBnw4+TSydqSFkoBHSQsTNxIT9zCW6e3jmJz/h97//PS+3NGN7Mxl86d0tBPJnk5uzkJ+8speZs2qpCbhxwmkYSmIBLrfOQF+czoSK2wjgmAJHd1MzdTk1U5fjOE522FNHCBQBbiNAd1LFsgWGPrFeC0k6n07MMhGM3JdBVTNZWYmkht/voKAw9bKPjlnftpJouhfV5cIJpIlbHSRaOvn3b/+a1/Yd4faPv5fAdX56BuJsbuzCKfRQGPKyesrp1zIRQtDXthWPNkxf21ZC+RPvO0uSpPElgyYX2PEHD8Pwn3QZVXNhWgl6j2zBn1N9SX9xW47NYGKI3thAdmSVvpG/Y2aCL1/9t9mT04aBJgaTQ3hjufjCeTiqhS+ST2G6nLQnQvPgEWYWZYZ7rMqtGM/dOueahmIcHozhFQ57N27kyM5tpGPDuPw5VC1cypw1a2iLJLl7YQ0VQS+RtEUkbREdGXo5kraYEvJmtxdNWwgBkZRFJGWd8HyrpxRkgyZDyTQ/23OUgEsnNBpkOW6EoHyva0x2zLl2fJZJJi0ekk4VaEUkHQVd2SOzTSTpPBBC0DR4hMcOPcf2jtcABVVRSAtrwn7m7ESCH3z2b3nwqSfoiWSKcwtVY0p5JSuvWslnPvMP5BVUEI2kWBTykRhO4gxnhnMvKPIze0EZU2cWYxgadyF47De70fwqhiuTMSgQRCNRAkEfykh3WzNtY9sON181XXYXkC45r88yURQFXVewbRtFVcCBdNrG53MRHWomNtxKILcmu34wv55A3lSsdIxEtItkrIfBgSPs3n+UdNpi+dzLAWjpCqMWQq4nzjzPAOH2Hkx/CZ5ACS5P7hsOpBAbbiU21IIjNGJDLSe0QZIkSQZNLrDRg4eme99weF9N95704HGhvH7YyPPJEQ6RVDQbGFlWuTCb/fHrPX/ita79p1x3KBkm35eLEIJwMopu68Q2xXl2z89Jx4dw+XKp6qrHe6PgycMvMKNw6oQPQglHYDsOtiWwbQfbdlBVBX/gWJZJ+5FBLMvBsQWWZfOn7gEat7/I1v/7b4a6OxCOTaY+juDQ5ifY/lA5K//8MzyX6+cvLqvBa7hPOewyQGXIy18tqycyElSJjhSvjY4EWI5fdzhlMZQ0GUqeWB8GMgGWK6sztUeGkiaPHO7Mdgk61kVII+AyRrqjnf7/5/gRczy6m7RtYotcTPJAmJhKHknbj6YMoSoqKSs1Ye98S9LF5MWjr7LpyDZ6o/30xvsRQuDR3QRcPtLxTAbbRMnwsk0TRdMyAXaXTnesnR4nijvgp3rWKnLnXUt+cSVRVcPtzxST9PoMVFugaiq10wqZs6CMkvJQdh+EEBzY0YljOXi9RrZcuhCZb15VwOjuulwasYjF/h0dzJxVLL97pEvG6I1C206TiBuoWCgjIwGqwkFRQNFA2A6JuILXZ570hqGiKBjuAIa7nlBBPZsP9tKXcsjNzeW61W8HwFXgwR/yoNsR6kUDg10OoxXqVNXA4y+mtO4a3N68k7ZRCAsHAyEsedNSkqQTyKDJBXT8wcPQ3DgjRUiFEICDY5vZL2hFUbHs9Lh8cb9+2MgZxec20NA0cISG/uZjmSPxQczjCrLOKJxK3sgoC4W+fFyaQaEvn0J/fua3L58ifz4Fvjx8RiY7oqG/mZe2vMTL928k0hkBx0ERIBR4bf9GgpuCDHy0hxunrT3lHU/HGQlSWCM/Tua3y6XjD2aCBLbl0NY6ErAYmW/bYuS3Q26Bj9r6wuyyLzzdgO1k5jsjAZDRYEhZZQ4r1tZlX/Mff29rZjlHnNC2KTV53PiuudnHTz68H2ukjkvEo/LC4F6ev/crpOMxPMFctOP6zduWxWBnO099+0uYKYvEq/MoVnW8PoPC4gCrrj4WFDvSPICmKXi8Bl6vixKvm7LAsQyUkykNePjw/KpMxkrKImpa2b8jaZNcz7GaOMMpk5ah+Cm3dUVVAWurMgGWcMrkhaN9BF0GAZeWrb8SdOl4DQ1VUbAdm/7EAG7dTdJKIQBbqUKgoSZsHK9Ggio00Y0CuHU3A4lBbMdG1+TXnySdrtcPlXt0uIO+2AAODo4QFHjzCLj9CCEwyQRNxjvb5OievTx47708+uyzfP6//oOli6vpaN7Msptmok/xE6pZQ3NkOh4UqjwuApqGdyQrTtNUrrppBrl5PnwB1wnbdmxBeCiBy61jpo8VUhdC4NiZO+fHv14ut05kOIFjCzTZPUe6RAhhk04NI4SO46TQdRgZKhIFB1UdOd9RBI6TymSjpIYRwkZRjqvfFo5ghI7VRmre3UhZsJhrrrsW3XBhOQ4bj/Th9gS5sqKM6kAFyVg3iVg3yVgPjmMSj7SjHVejrr/9FWLhIyiqQXigAVV1ASlU3TOuNy0lSZqY5FXDBTR68NA0F46dOm46qNg4dprjYxOadvKDx/n2+mEjz+Rk17RN+uOD9MUHspkjvfEBPjD/neR4QgAc6m/i+eatY9ZTFZV8by6F/nxs59gJ6NW1K7lu6hVvGLQRQvDdX/+A57/7KHbEwefJwVBUUFQEYDkWkY4wG7/9FN/smMa1i29m2qxiLlteBUAskuKXP3o528f29WbOK+WKa6cBYFk2Tz6875RtmTqjKBs0sWyHA3u7M/13yfyfRfZvgeE59j/tG0oyHEnh2M4JyyoK5EaT2KkUmttNc8cwffEUSiKOEIK2kI+tP/kuqXgMf17hyGt1bGc0w8CXV0hssI+tP7mX6nv+G7szgaYomK8buWfjkw0k4mPrnLg9Oh6vQVFpkKtunJGdfvhAT6aQrc8g4DMo9Hnw5BuZdNtTKPC6eMf0smyXoNEsltGMluO78QwmTXZ0DZ90O6oCV9UUcXlFAX+98m56Y1EODiSJmzZb22Ok4ybCUTAsDU+wlttnLGJKKHPhE3D5Jn3ApGU4zjatgNnDcaYVXhzDvL4V9957L/feey+2bb/5wtJbkrLS7Ojcw0ttO3jP3FspD2YKNK6qXsqUnHJePPoqQ4kwfpfvpOtf6GwT2zR5+pe/4jc/+Qkv7tmTKWqtw68e+CaVuW8nGktjufLx1CxkOFbGDF1DsRzUpE0Sm872Ycqn5AJkf5+MpqvcftdikvGxGXaWZbJh40bWrll8wshzXp+BJrvnSJcQVdWpnfcBHv/Dq7S3DuLzu7KjS0WjUQKBQPZxPJamojqPG1ctRlWPHaPTQ0Ps+5ev4ikuZMbf/Q2KprFp0yZUReWma27k0JFBhiyLpOUQdBssr6rA0FRyimYCIIRDKjFAKt6Pbhz7noqFjxAdOkI6MYBtp4AkKiqqmoNlJWW2iSRJY0zuK4cJRlV16uZ/ENtKjJlumiYbN25kzYo1J4xUo+m+MQeP8+1kw0a+/mTXEQ5DiTBBtx9Dy7T35badPNu0maFk+KTb7Y0NZIMmdXlVxM0Exf6CbOZInjcHTT1xHMZTXdwKRzA8lKCnK8JrBw/zu2/8FitiE/QVZNKihcAZSZjWVYOAP59obID1f/gZtYWLKCo7dseirS/KcDSNYKQ4mcj0S8/kjULiuJoee5v76U2ks4EP4QiEsMFxQDjUJN2k+gdwF+Sz/WA3rbE4oVg/mrBRhIXmOKjCRnMs/IafvhcTFK5YxpHuMEesGDMOb0SxTRQrhWZbYKcRto120EUTVzPtQx8gljBpivcz6+VfYwF7DuYy3HEEl9ePmUqAAN3tRh15PYVwMqnzwRyiPR0c3fU8C2MCU/fhtnLpeT5K8ZVraO0M09Adxm2mRvr+aiiqghZRUVUF+7jzBtNy2PTsYdJJ63UptODxGhSXhbj+7bOz0/e/1glk5hX5DKr8PjyFOi63ftz7SowJXAVdOmuqCk8IrMRMG0dkiuMC5HpCDKd0dnS10h2O0fjKFtp3vUI6GsEVCFKxYAl5xlV8/oq5KIrCYDLNww2deDQVj67h1TO/3bqGR1fJ97gInMcaLOebEILnjvTRr3h47kgf9QWhSXvSt27dOtatW0c4HCYnZ/IGh8ZDR6SbF4++yq6ufaSsTCD15badvHPWDQCUB0uIpmIc6m/O1hM6mQuVbZKORvnuF/+Zhx99lJ6hoZEnh/pZFVxz7VzWrp6LqrkprlzCloM2vgTkqJnK2JqhMW1WMbMXlFFQdPojbgSCbgLBsd0cTdPE44OC4sCEHnlOki6U7i6b5sM2up5L2swcW4UQxBMOhit43DmARfNhm+5Oiyk1x9aPNTUDoLo9KJrG3oZ9HDlyBF3XWbR4Kf/9230MhBO878aZVFaEMLSxgUlFUfH4CvH4CsdML666AsP9Gt0tz6Oq+siNKnVCdJGXJGniuXivDC5ShjuI4R47/KJmmljCh8dfPO4nWaNFNX2Gl2Q6gaHp7Orex493/hYVhb74IP0j3Rs+tuQD1OVnsjUURckGTLyGZ0w3mkJfPqXBouxzTC+se0snzmbaZue2o/R2RejtjpBOZe4uv/DqEwwPDOD35aKgoAgHoUDajGPbViYIoigoCvT1HeGhx/6V4c7LWTLzrwlWV9HWF+XFhhdINr6C4tgI28z8HvnZ84TBTPc/UXXtNcTTNi/teozkq48hRCYt3XEE9sjfzxsqJf/0T8z/8IcwdI0dux6je8vvcMgEWEZT2R0hUBS475++SOGKZeSHPPQc2cIjjzyQ6RDP2AsQVVX4zuzpTAMqigMU6m38z47NoCh0R8KY8Rh2IslohkluThGGkTmZTyZjRKKDgILjmGz+0bfpLyzCp7vI8XnwKLdz3ZVriCZM2s0BKjf9BJ/hwmMYuNx+VF8uijePHLuI7qfDlFx7NW09EQ72RPAm4whFR1E1FCUTXNFUBee4u6nxpMmGZw9jpW00RUFVMz+j+1VSHuLW98zPDG+swK5X2hCOwOszqPYaeP0+PEWZ7kK6kckeiqYtXMedGLk1lXjDazz0za8S7u7EcWwUFASCxg2Ps+vXP6Lu377BnbfcyHDS5LXuk2ewAFxVXcSqkar73bEkv9h7FI+uZYMsHn30t0Zdrp/qnMydK9N26E+ks/NdmjouQzY3DcVoHIqjC4fGoThNQzGm5p3Z0IvSpclybHZ17ePFo69ydLgjO73IX8DyystYVH6si+DJ6gmNTreFTfq47qbnq57Q8V2GRCrNc088Qc/QEAGvl8WLVuKbdyN1tYJZRR2U1a6gsGIp/b1p/KldoEIox8PsheVMn12M2yMDHJJ0rgkh2La5hXTKwnC5s92KhRA4DljmsW5siqqQTlls29xCZXVudnq0sQkAf10tcTPBF+7/V3pi/dxwxbXsODzEQDhBKODmsvoi3MaJN99Oxe0rJBntRlF1XO4chBCkwplzAzkggyRJryeDJlLW8UO3BgwfESJEkwkc4bCx5UWKfAXZA4eu6kTTsey6Mwun8omlH6LIn4/POHWR29NhmjZ93VF6uyPousrsBeVAJh1696vtxGJJevq76envpHe4i62v/o5EMoKZTlGQW8Jo55RkKkYqnRyzbds2ady1i1hzMx+76SqC1VVUFAVRkm0cOrgtu9zxrVdVhXBXNwCzavKpLVV5Yrjv2AKKktlfRUExFayR0XwWzyzhhpVV/N/2TECBkYCBNrqsouCfMR2A2vIcVi+qYu+W4sxymoam62gjvw2Xi4q33wpAXtDDojnV7F6+FE3TSO3YQfRoCl03UEQmQ0YVoNiZioTCHu2CIxAoWIZG2O9h2HboSMbQptYCUD8llxXVSb79o50I20EIZyTrJsNr6Hw+9VHee+3VpEyb9ngb9hP/i1c38BoufIYb3RNE84VIdBdzOL+D+ne9k+6BOK3DCbyJCELRcNAQqKjKSFDhuG5KkXia5549jLBs1OMCLKPBlqKSIO/6wGWE3JkLnG2bW3Acwd592/nRV+4hFo1karocl6Fk2xbDnR389bq/pDxwPwuWX85VNUUkLXvkx8n8tjO/A65jJ10J0yaWzvycjEtVs0GT/kSa+3a2HPe+IBtocesqi0tzWVSaKUAXN21e7RrMBl88uopHy/z2jmS9nElB3FFCCJ5r7cV0BC4cTCfzuC7XL0/6pDelAI83PEckFUNTVOaUzGBF5SJq86ac8P55fT2hLCEwsUlZqWOVUDm39YQ629v51ff+h6eee45fPf4YoVAId0E+d97xPlpjHVgVVzNgFeE4gv4+my53PWsrV6BpKkWlbuYuqqCyKpfK6rw37E4oSdLZORe1f2LNmUyTwNQ6dnTuoWz2FFarHt5x+bt54uUj2B6Vmy+vOaOACZxqYIbRuoL/P3v3HV9FlTd+/DMztyf3plcSQu+9d0RBbNi7KOjKrsqu69oeyyqW/a3u2h9l3VUe14KFde0NAaWjUiyg9BYChJB+026ZmfP7I+SakEIuCaRw3vvKy83MmZnvudzMPfO9p8jeJpIk1SSTJlLIzznb2Jy7PdTVWkXFqlmAyvGmI9IHMTCpD/GuGDwOd2iFGwC3PRK3/fi+zS7IK+NwdmXvkdxDJRTklSEE+P0VBIwieg+4DEVR+H7bYV7/5N9s37oa06wc4qKZQUqL8hGGiYmOMHRQNUDBZrGHlnxUAKGoBAIVpCUkcN7wkTjjKnsT9OoUy03XX8qGBCcWmw2L1YrFZkOz2Sp/t9tIGz8WgLgoJ/nUzZ0AAQAASURBVL+ddR1nnjYaq82G1Vl5jKZpaJqGxWKhb9++QGWyZfp11zHtggtC+6t+VFXFYrHUGFJw46xZ3DhrFqqqHvMB96KLLuKiiy4C4Kbf3cqb898gKiKmsm9F1bFHsh0uZyQuRwSmEBSXFXLGGWfxp9v/gNfrpaSkhAETJwJgt2p07dqRCedPw+v14i0uxltURGlJCcI0EYZBwuBBla9ZRizThkTw8FuHMA0DYZqhuVsQld8YRXqg20UXoioKxYE9fDX/MVxWKw6L9UhPFjsWpxtvfCzfWc5m5G+uJ6+ogn0lXrT8A0dmyncjFGvlv6AAw/5ro6jA6+PrpTvR/QH+9cZDlJaUVM7pUiPlJdC0yjldSgpzmXXTH/nn028S6XbhdliPzNfiwBVtIyG5Zg8wgFS3kxsHdcJ/JKFSUT3Johukuh2hsoYQRNos+AwD3ajMNlWWrUxaVQR/nT/G6w+yLDOv1vWqjE6L5YxOiaGy7287iLNaLxe7VpVcUUmOcJAcWRnHzsIytheUougm5WXgspjsKCiTvU2kWnTTYPPh7fx8eBtX9j8fVVHRVI1JnccQMIIMSx1ApD2i3uMtmoU/jZlFWaDmxM66rrN8+XImjpuIxVKzidGU+YQMw2D5l1/yzsvz+GbtWgxdR1Hgk3f/y1Uzp/P1ii+J6acRr3bAW7GfHQfiidQV7FYb/vzKibk1rfLeOnpi61j+WJLau6bO/aOXluI7dBgAV6cMvvvpbTyJ0Vw78XJydkdR4jiMEhdBgSO85GfVwgymqWO11n2fk71NJEmqTiZNJIQQfH9wE/9a9yYlFaXk7Sxl/7osfAdNHKkqacPT8fSK50BxDhf3Pvu4PziEEJSVBvAWVdSYYO+j/3zH9u07yMvPJifvILn52RQU5lBSXoTLaWXGTWcQFxdHrMeB5rTi9BXjVCDF46FDVAI/2+38knOIqIgYVFVFHHlodtojcTqofIhHYCoqXgRjz76Qx/71vzViO/2sszj9rLMaVY8+/frRp1+/YxcEEhISSEhIOHZBQNPC+5YEKl9Tp7UTiqKgG3oDDyQKpqmjqRZSE4cycuTIOv8dJ02axKRJk2psMwyD0tJSvF4v8fG/jgkeMHgwdzz6CF6vl+LiYrwFhRQX5FNUWEhJsZeMMaMByEjxcPqAaFaqJuWBCoSvFHFkhSAhBMoehcF9uzIScNotOF25vPvPh6ia4MSuWXBabdhsDmKjokgvv5zz7rqD4lI/md48fln5Drl5WVgj3RjBwJFhQiqKWpXUEygo2N3RHDx0gFdff5/ePSrrX9mTBRISI5nx21FA5Xwt776+ASNo4HTZcLms2J1WHEcmxI1x2+nZLzn0OpSV+LFYVVIjHNw2onIlIt00ayRXfLpJjPPXFThsmsqgpCh8hkFF0MRnGPh1kwq98r+Oau+F8qDBfm/NeZCqG50WS3KkAyEEi/fkUOwPYuoCTVWoKAngjFFlbxMppKCiiLX7f2T9gY2h3oKDkvvSJ7FysusxHYc1+lzRDg/RR+aqqhIMBolSI0l1JzXLcNPCwkLemPsPPnj3PxzOOXxktTnok5bGBRdeyKQJPdn5w6vEKF4KMAhUeCjJ7US8agEbxCVE0GdgKpomJ2CVpJbQlLl/So/MZ+JITmR/sJDDZXnYNCsZrq68u2kTeoqdhEg73WPD+1Kg7l4mNcneJpIkVSeTJqe4gyU5fLRlEdvydrH7px389NY3VOSVQUABU4VtJr+sXk9kYhQF1zW8ZO/RAn6d3JwSDmeXcHB/Adu37mb/gX0UFOXw3Iv34onykF9cwcdff8jabxZiUrlEsFAq+wpoQifKYmfrRx8z9obrSU9yM/epezj0jwg8QsGZEEdEl67MXbqd7W8+TWV/EvXo6UAqOykIBUMPoKAS5+5Wa/nMtsrQTWIjMohxx1PgzcUTEfNrT4tq1RMIynylxHoSiY3IwNBNLI3syqppGlFRUbUm2uzZsyc9e/as56iappx1Fr379Qv1binMy6MoL5/ivDyKCvLpd9aZACTHRTCqTzyLoz2UV/gQpoFpmpQGfRD0UVBWRElBPlA5TKlbJ4O3/rEEv8+HHvQfdVWFiJg4bK5IqpYk8leUsmrtu5SV5WJ3eLA63djtHkxbGn6/H7vdTn5xBVt25qFULf2s8OtQIUUhISkylDTx+XVenfcdgQodTVWw2SsTKy5X5U9sXARjJnUNRXRgXyFCgMNhZWJSDA6HFYu1Zs+iyjlvfq1FlN3KJb06HBlCZNRKxiS4Khuju4vK2JFbzL61qzjw03qCxcXYXW46Dx+JMnKc7G1yCjOFydbcXXy3/we25+3mSJ8w3PZIhncYSAdPUgtHWL+CH37k5blzMUwTt93OaUOG0mvC+eR7oujaJZvi/PWVBc0IDh/qTLkvAUVV6dIznr6DUklKcbeLe70ktSYna+Wy0NCcLl1Ytf97dqzZTO+U7nxcsAO/W8Nht9At3k3XmPp7xR2tqpeJYQSwanbMavMxgYlZbT4mRVHRjYDsbSJJkkyanKoqgj4W7VzBt/u/xzRNdny/lXUvLcOoCOJyu7EEnJUPmgrotgpKcopY9sJnvBDVgedufqzWB4dpmCiqEtr+2suf8sXni8grOER+wSEKi3MxDBOBQNNUtmy+hJGjh2HRVIyoRFxxSSS4o+kSFUmPCBudLZAe5SHa5cCamVW5RK9FpVe3DNJm34ItNgZbTAyGbtIh82ui3fEUeg8fM2kQE5VIckLfGuNl2zIT6FK8iZt69+L59bmUl+TgsTuxaFro3y9oGJT4K3BbbdzUuycdizdicuFJjbOupEt9pl1wAdMuuABd1ykpKcFbXExBTg5FOTkUHs5lyNChAES77Ywd1BlnXCKlhw6iWW2Vw4jMI/OxIECtSqIJzGAQQw+Sk7OXX0oW1JivZaWqEnXwKn737NMEdZMccZifF76C3R6Bzen59cfhobuWTl5eT+Lj4ykq9ZOdV4Zq1L1edVxCZChpUloe4I03vocjE99VJWE0i4rdrhGfEMll04egKpVT2G764QCmblYOIXJYiXZasTsc2KMqf6++dKkQgn+9/xkLnn6M4uwDlcOljtix+kuiUjqg3n4vf7vhctnoOwUd9Obw+o//Df3eLa4To9IG0zuhe52rlrWUgwcP8t6bb7E/ax9/+9/K3oCdxo7h0mHDSOzaC8egM9lUZGWjaZIWyCFQXoQWFU9C2iiszm5s2/0zQwYl0bt/Cq5I2zGuJknS8TpZK5d5evVCBHWsPbuw8dCX/PDZWnboG5l120BEVAeiI+2cnpEQ1ueaEAYBfzGaZsM0/NW2g4qBaQSqT8eEptkI+IsRwkBR5GOTJJ2q5F//KSinNJeX1r8VGoue4krktfmr0CuCuOM8qD4bCAWhmChCxWLaiYxTKMn38p/n5/ObaddiL7Hz049b2bJ5Bzt37GJf1j6ee+GvDB7SD8MUbN65ndVrF1KZt6/8sdqdeOJS6NKlMzGxlfNHREXauWP2TCK/dKEezq45IVi0h4iuXYns1q1ySd8jQxYiu/7a00XVFCzWUmb17sM/1x8+kjRwYdG0I9OeKkeSBuV4rDZm9eqDZi1B1drHg6MFkz5RQbpmJBNtH8eLG9aSXVqKKcyqnAmqopLujuTmoSMYnJyMPcrAggm0noelulgsFmJiYoiJiSGjU6c6ywwdNYqe40+n+LMPiIhNDOXJBAJhmkfeT0dW8LBasdjsJHbqymkpCZQUeykqLaHE56PE78fjdAKQnuTmkgmJrHplW+g6lXO1VCZGfvpKITXve2564XmsFhVHSjkfPP8wDocLq8ON1eHGYnejOdz06NaRPbs60LlrV0orghT5/KiGQEOtnLS3Wq4l8GueA29ZgAX/3YhmciS5QijJoqoKUVEObrptPACGYXLfX1/l5X89QqC8FEdkNJrFWhU1hq5TnH2AeQ/dw+DkKK46t3HD0KS2yRQmuwv2kV9RyMi0wQB08CTTLa4Tqe4kRnQYRHxEbAtH+Std11mxYgX/+ferrFm1EqOiAsVqZfadd9KxY0d+2lNM5GV/JrvkMPYKL5ZyD3GKii2QiGlE0m3QmahaZYLkqhuGy4ldJakdcffsgbtnD1ZlriNv3WH00gCeuASShgykqLiCbjGRdDwyGXtjqaqFLgOmY+g1h70Gg0FWrFjBhFETag0Z0iwuVFU+MknSqUzeAU5B8a5YIm0RRFhdTOs5mSdf+19KDhfh9ESAoYKuIhRxZM16E1VXwVCxWGzk7zzMlKFTcR2Z9LVy1EPlw9kPGzYxeEg/FKDAkkDnIafjiUvBE5tMbHwy/WPtZJjFxJUdxv/6mxgP3o9mtzNxSBoHD/SgcH0pkd26hn5s8fHH/PYgqJvEbVtHzxgnlmFjeH3jeg6XVSYNqpbRURWV5IhIrhswjBExTvK2ryOoT8MW5kzrrZFqtdL33rvQS0sZAFwVCPD1qlUsX72abdu20bNnTyaOHcvp48Zhs1U+WFgi3agtvLR1c7GoKldOPIMNH32AWlLe4CSTihHEbnFwzx/v5nezrgKOLI9aUYFRVvbrBLpA/759ufe3v6WooICigkKKiooo9nopLi3B6/OTGFv50BkX5WR4VxdvFR2mrllHdn8HPcr3ctM//4EnwkbvfgqP3XkLTrsDpysSh8uNzenG4vCQnpbMD2sdDB4xggq/Trmq4ysvwmmPwG5xoJoKigmKKXBUGx+eX1TK6689g7+slIjo2n8zmsWKKzqO8qJ8Hrjvfi6ePAm73X50qFIbVxYoZ8PBTXy3/wfyywuxaVYGJvfBYbGjKAo3Dr3qhF4/EAiwZMkSvvrqKzZt2sSKFSs444wzmDx5cujeU112djbvvfsu781/k9yDBzCDOgD9klM497TTiPdUfnu9a99+4viegSm5mIaVXWUjsGiVPa2sEekoyq/3MpkwkaT2aXT6EJYWL8Zjj6Tf4CFs9/pQUJiU0bg5445mtbux2mtO/q4Fg+jChSMisVnmY5IkqX2RSZNTQHmwgtWZ65nUeTQWzYKmaswcfFnlajdCsGntjwghUC0qolyhoqIUv78cXQ/idsdgt7lQApWrvhiGQXl5KU5HFFHRiUREJREZm0RqRifOPf90oPIb8TNOG8GUIT3oUJaNu+gQZK/DzAuEYjKA8r2ZuI8suZt81lRSzjs37KEDFkx6RIM/EMkkTyTj0pJZl32A7w8dpKi8gmiXkyHJqQxP6YD1SE+V2CilTfS0aCxbdDS26GgAXMDFXbow7aqr+PzzzznnnHPa9Ye/EAJLII0YdwJFxYdwu6otIVrV1QYQpqCi3EtMVDJaIC00p42iKFhcLiyumt9UpXbsyLWPPlL7eqaJUVFRY/jLqJEj+ccjj1CQm0dhfl5loqW4mKKiYopLS0hPSwPA5bCS7AE14MMf8OEvKapx7t3fw0BLBYNHjCAxxsV5k2P4w7V/AAUUVSMiIhJXhBtXZBSJifEsX1jBxLOmsuCzLyguyMZic2DogSOT4GrV/pYqe1y5oqLJOZDF/I8+5TeXX9IcL790AuwqyGR54Ht6FvSlV1K3BssKIcgsOsB3+39gU85WdLMy8WC32BiS0h/d0MFy4hNkK1as4J577iEzM5Og349RXsGP33/PO++8Q0ZGBo8//jgTJkyoccyyN9/mxaeeQJiCKIeDSX36MvmcC8jrMJgxE/thj7CQs3c56eI7fG4/qqJQUhFLfJyNXgM706NPInZH+723SdKprnTXbhRNxZmWhmaxsHvjDkpKTOIz+hJtt5Ia+evKcZIkSSeaTJq0Y6Yw2XBwEwt3LKUsUIFF1ZjUZQwAMc5fx6B2tCTzs2nBn+ujoryw5jl0A2FT0EwbdosFu6OCtO6DOeuaP6McSUJ4IuxkJEcSEdDRy8qwRERw5ZSe5K5czYEP1lD1eGlxOYno2oXIrl2J7NYFR/KvK5Acb8+H6j0tqgzlSJfv5SuYMHFCrWUv21NPi1OdoZvkHijk7DFX8v5XL+EtLyLC6anscXIkZ6AbQcoqSnDYXJw95kpyDxSGNRFudYqqYomoOeFcVGwsp/3ut3WWF6aJGfx1qcUzTj+dT197jbxDhyg8kmQpzC+gqKiQouJiunfpDBwZimMGcZgGFcEgAigpL6MkNweAPcComEgmTD2Tz77+GlMPUlFWSqh7FRARl4TtyJAjAEWxYBoG736+iBsuu1jObdIKCSFYuGs5OWYBC3ctp2di1wb/nVbvW8+n25aEfk/1JDMqbTADk/tgt5ycOT1WrFjBrFmz8Hq9JCQkYBYWohsmFqcTNSaGPXv2cP3113Peeedx2vjxXHTppQBMnjKZL955m0mDBtNn8vlsEIl8cqgcNasM54avKYs/iGkEsNtUigpj0JyDGDCmD2kZMbJHiSSdArI/X0jZnr2kX3EpWs8efLv2B3wBnTKzA9f37YhikStiSZJ08sikSTu1vzibD7d+yf7ibAASI+PJiE6rVW7Xzr0s+mIJZaVlaEfGa1osNhw2F3abHU3RKle1MQUWTUO1WEnrmMHpIzJItwWIKTmM2L+DsnW72bmsnPTLLyFu1EgA3D264enTOzTcxpmaUm0Z2OZTvadFlWAwiBEdhbNDh3bd0+JUZwIdC76nn+UQqcPG8NaPq8kty8cnRCh9oCoKqRFurh40hiGWQ3gLNmBy7kmJT1FVtGpDYSx2O13Omkp9609VDXUDmDB+PEs+/piKoiIKcnIoOJxLQX7lqkOFRUX079sPQwgKCwtRUNBs9srVhgyjcliaplGVOQqdVVEoLCzCEAKLTJq0Ojvy97A9bxcWNLbn7WJH/p4aq5Ud8B4CKucoAeiX1JPFu1bQP6k3o9IGkxaVclLjDQQC3HPPPXi9XlJTUzH9foIVPkxFQS+vQNc0gsEg+zIzefnFf7J97TouvOQSFEUhskcvfvPQc6w4pLIx24caLMXp00l1lpFk2YFp2LG74kntPprOSjLRseHNWyBJUttlBoNUZGVhCoPX81dR8u4yikp9RCekcenUIUQ4ZbtOkqSTSyZN2pnyQAULdy5j3f6fEAjsFhtTuo5ndPpQysvK+fDDj8g+eJibb5kFQEZGOvExqZSWeHE63DgckVhUBVVUTaMqUIWJoWgY/gCRDhvXdIun61dvESwppajatTW7DaP815kdHElJdLnx+pP7AkinlKqJcP1+k6lpsZyecs4xhmeZpLXiiXCr9ypQLRZihgwmBkht4JiRXdP55WsVT2Qciqhc/lEgUIKAXjUkTmAqGkrQYGTXNCwnIHkpNY0QgoU7lhM0dRzYCJo6C3csJyM6jU05W/g26wf2e7PpEd+FG4ZcAUC0w8OfJ96KVWuZB4glS5aQmZlJQkLl6hXB4mICuk5xIEhF0I/pLQaO/KUJQe+oKEzTRFVVnv3PjxzO92Px6yRThqZHEumyo2lO/EGdnl374YnviaLI96oknWrK92Vh6gZlNsjRfPyw4SdMoNPAoQzv03qXSJckqf2SSZN25sOtX7Lx0BYABqX05bS0UWxYs57fPfIHvlmzhvIyH3aHg5nXT8fpdPLLngKmXfcIC16+l+LCQ6iKiiqqBtRUfj+tHFm7vqy8mNSUNPoZgmBJKarVQkTnzkR260Jkt6640tNDQ3Yk6WSQw7MgvUN/FKFgBAPYVLUy2akoR+Z0qVyhR0Ggm34UoZLeoX9LhyzVYUf+Hrbm7cRldeIP+LBpVn7I3sQDXz2BeiRxoKkaEVYXpjBD21oqYQKwbNkygsEgVqsVw+fDqPBRFAhSEgyiCtAU8DgcRLnd5FX4UPr0QdM0hBCkW604gwV077ifyIgy9hwYTnrnJPoOSiUpZZwcPiZJp7DSXbsBwX6PQDcF3QZdRMqQ64mJj+Db7EImdIxv6RAlSTrFyKRJO1A1qSXAmV0nUFBeRIo3hq/mfsX/W/kQpaUVCLMyARIXm0L/fiPxFpfjdDpJjHWh2CKYMu46Pln0PGVl+UQ43Fg0S2j5Vt3QKfWVYrNHcMa4GTjPGE6nvum4OqajWuRbSGpZp/LwLNM0KS9JItqTQHHxISyuGJRfpzUJ/dcUUFHhJToqhfKSpNC3/VLrEOplYgSJtLooEsXovnJMYZJXXkDPuK6MSh/C0A79ibRFHPuEJ9DBgwfZsGEDGzZs4J133qGwsJD4+HjUkhJM08Ric2HDT4TNTrSio9gdlNqjqCj1sffAYQACFYVkRP1Ex4j9qKqK02nn3AvSSEjr1aJ1kySpdSjbvRuf7udwjJ3yctCUFDwpblLiIxiWEt3S4UmSdAqST7xtWFmgnIU7lqEqKud2Ox1FUYiPiGX2yBnc/z9/4/PPvgQgOjqBXj2Gk9F7OIY7gUGdI4j0lQBxJMe5GBjvZl9KDy457Qa+/OYdCkvyME3zyOAcgaqqxHkSOG3CDJITerG9OJK+nTvJbwIlqYUFAiZmEKaOvZqPlvyz5kS4R1SfCHfq2Kswg5XHORxtP2kyd+5c5s6di2EYLR1Kk1T1MomwuVAUBU1RMRSBQ7OhqRYu6H0mvRIaXknnhMa3Ywfz58/n+++/J/vgQQy/H8Pno7ygEGEYVBQV4wgECCpWHC4Hnhgrfr+VoN+HFggglACKKbBoVrJ3L6Ho8GaiPTqG4SQhbSCJHUdhtUW2WP0kSWo9hGFQnrmPskA5BbHRBHLjsMQ4SHTbGdUhDpdVPrpIknTyyTtPK1C6YyfuJUsp7d6DmD69j1neFCZr9//I59u+ZvemnWR+u5eHtj7A/ffdz+VXXIiiKFx48TR27jhIj55DiLDYiKzIxZO/DU/Wt7i3m+zdkkjfOQ9gGoJSrx+BoHN6P2alzGHH/p/Zs38zFf5yHHYXXdL60j29H1js+ExBTk4ppiHQLDJpIkktyeGwcO05KWQeKqLv+In837pVHCopwG8KEAIUBU1VyPBE85vh4xjgKaLTuSk4HO3j1j979mxmz56N1+slKirq2Ae0QoZh8MZP71ER9OE+0ovEioXYiBg0VaOgoohFO1fSM77hlXSagxAi1JOkU6dODBgwAICS/AI+WrAAo8IHwQBdY+Po3bETgQ7pvL5hHVY9iGma6JoLu03HYjEQQsGnO1EQmHoQi6Yxuq+VosO/ABCT2J2EjmOxO2NPaJ0kSWpbKg4cIOArp1TV8cc42DF/IV7la8654WZGpsreaJIktYz20XJuowKBAIsXL+atvz3L3txyOv30C1f/z21MmTIFm63u5SL3FmTx/Acvs/6r9ez7fg+B0gCaYkFF4YuPF3H5FRcC8N2+AOekxBGX9R0AVotKhNOCK8aKxWrBEunG8PtRbTZK4+wU7i9jfO5aDEWju8cCfbpjCIGmKEAArXAtmjBYmTyC0rgEVE0mTCSppQkhML5bRqS/gIkdkxjT4UK+y9rH+gP7KSgrJTYikmEd0hiZ3hGrphEsKkD/dhli9EDZU6wV2Fu4nzd+eo9dBZmoilY5DQ1UroakaiiKQoTNxda8nbVW0mkOQggOHDgQGm6zfv16Dh8+DEJwwbnnhZIm3Tt04JwO6fROTKJ7QgKehAScPbpT4Enmy9tu4tC+PbidbhSrgnYkYWKxGKhWK0ZQoby8mIyOGVx0yZUII5fEjuNwuRua3liSpFOVIzWVggvHsmPneuJFLHt27cIETuvZCbtFzpsnSVLLkEmTFrJixQruuece9uzaRXlJBaai8XOmwdc33EDnrl15/PHHmTBhQqh8ebCC/274jId/M4eK4goUU0FFweP00LNjX4akpDM20Y7h96PZ7fTpHMfe9dG4zSKiOnYgvlsnXOlpONPScCQnhSbCDOom+aUBBhT9QoS/iDKrC808skypKVDUIw9WQhARLGdA4S9sLeuMbgissqeJJLUooesE8vLRHHZMXwUaMCY5mdFJyVRUVOB0OivnOAkGMIOgOewE8vIRuo7Sjud6ae1KA2Us3FG5yllueT5CQLTTg6ooNZadBrBrtiNDMZfTPa5zsyW7ysrKuOyyyyqTJFR2iTcqfCjBAJ0i3MTkF4TKRqZ1YNZ1Myh3x3LAmcz6EoW9B72Y+wKc23M0bx3MIt/nI9ZVmfIRQkFVBVZLGcXFfjwq3H36RNK7j0c5kgySJEmqi2qx0HvQWHI8Fjau3IMhBGk9enFaz4yWDk2SpFOYTJq0gBUrVjBr1iy8Xi8uxYLmTgjtsys+9uzZw6xZs7jvvvtw2JxcfuVlIGBPeRbRllhUtYCeGb0Z2qETfT0OHL4CFFGEU4/An5uHK60D4wd1YHzvG7E47A1O1mq1qPxhTDRZGwpR4qOItjuAym8gS0pKcbsjQg1c02/FHShkyuhorJa2Px+CJLV1qtVKz7vvqLF6EPy6etCgU2D1oLbEFCbrDvzEwh3LqAj68BsBTCFIiIjFZXXWeUxTepsIIcjKygr1JLFarcyZMweAiIgINF3HLCmlS1QUPaJi6JOUTLf4eBwWK47kpNAk40s3ZLGoJAP9UAAtmI8WNHEGTexGEE/6aG4YVcobW9dTUp6Dt9Q88plROR9WanIsl3ceTdcyH6U7duHp1bM5XkpJktqxrH2Cjati2frNx9g1lbMmnYZVk+1OSZJajkyanGSBQIB77rkHr9dLUmwsRbmlCAUUTAQqvqBAEGTP7t38dtbvSInrwMQhA0nu2ZOL+5xD+tWRuHdmYfMXUeqwU+CKpiSxM+5OGXQ5vT+utMr1621WDazHXmVBCEHpsq9QggGsES4wjdB2TRhophFKmmhWK8GyMkqWfYUY3E9+WyhJrcCpvHpQW3PAe4gPNi8EIDkygQrdR4m/FItqocJXwb4fd5O1cQ+lhSVExrhJH9CZjoO6oFkt+HV/o3qb7Nu3j/Xr14cSJXl5eaF9ToeDP//5z2hHlob/08hRRPTqh91iQVEVLKlplMansc2WwGlnDg5d59C2PGwHSnAoCg6bht1uxeHWMPNy0fQyOgwazYvXd2PLzq1s2LiPkjI/kS47Q/p3pHOHgezfEIsv/xuyP/8Cd88e8rNDkqQ6+Q4f5tCylXy3U8dnjWXPlk3EOm1cdfbklg5NkqRTnEyanGRLliwhMzOThIQEfAXF6IoF0wji85fjC1YcWQVCIISCaerEuOPZ+9N2knv2JNXZkW1KGrZO8ZQ5ooiJjmBYnySG9kokMcZ1XPEc3b0/tF2AGgxi+nxUb9/K7v2SJEmNZwoTVan8hjQ9KpXR6UOJc8UwosNA/t+K57Fb7Oz+cQffvr6UksPFCNMMHbtl6UbciVGMum4Sqf06UlBRiGEaodWRhBBkZ2eTmvrr/CCPP/44a9euDf2umSbdE5PoHhlJd2dE5RBOV+XnRc+Jk8jetocsVxKbKyLJLwyg7TbRgiUkdc0nKbYy8Z6W4qHwgBePx0FyBw9JKR4SEl3sffYJ9pf6iBwbR2xMgDHDOjN6WFf0oEYwoKGqJqpWwaEkB3klCbj258jPDkmS6nVw0w9sX/Q5UfYk/NFdsWkmCQkJ9Owpe6hJktSyZNLkJFu2bBmGYaCZJr4ACAX8QT9lviPd6xWwW5047TbKA+XE9Yxj5CXnABAX5aRbjzTcETaG906ic2oUqtq0b+xk935JkqTmJ4Tgh+xfWLJrJbOGXU2Ms3J1nwt6nxkq86cxs/hq6Vfc/coCzJIgHZPTsAiBP68Ae3wsuqKQn1/Aple+4dr/vZgzRp/Ogf0HQpO2btiwgfz8fBYtWkRsbOUqNMMHDaYs+xC9YmLogkrnSDe2I/dwRQH/gYPYunfjx5+zeecbHS2YihY0QZThoLKXot1hxWP9dcLFIcPTGTw0Dau1jJKCHZQUfE988kUs6DOVHCWf02xbMIWCz29HN7QjCzcpGELg0AKkds7n29Jx7O8dx4gGhotKknRq2/HjWip0H9sGd6Zw604EgrFjx8reaZIktTjZejnJioqKUBSFiqISdMWCgsBhcxAI+nHYnNhsDlRFRRNllOpWDudrlPkNIl2VDdgbL2j+YTGye78kSVLzySnN5cMti9hTuA+AFXu/q5EsqeJSHTz16BOUl5aR3iENRVEozc7FjxO1uILIlARcHVxkZmbyx1l/oFevXhQVFdU4h81mY+f27YwYNQqA83r3YfDWHUf2CnSLnZyYjmQqCaQM7MrA7t0AUAIGtvIgFlXF4bASGWkjPSOGtPRoElPcJKZ4AAj4ivF5t+PN346//NehPsV5OzlcDAkxPlxOH+V+O4ahHblqFYWAbiE2xktCjI88L3IScUmS6hTUg3h37sawOylP6YWjwqSfbzgTJ05s6dAkSZJk0uRki46OxgwG8Rm2I3OZVE6WF+2OCZURAMKGRfjo061Djd4kMtsuSZLUOgX0AF/tXs3KzLWYwsSqWji9y1jGdxpZZ/nqwzUVRcHw+SjzG5TpOoah4fT50BwOIiMjOXjwIE6nk4SEBPr16UOflFS6O5yklpfTsdqCO4HkjmQ7U8m2JZGtR+DTLSjFlfsKd3uZdqRcRqdYxo/rTHrHGJJS3UTHuH5dLQ3wleexf+sSfGU51SJWiIjuSFRcTyKiOzMgbhORWjYWi8AMWFA1BQGYpomqqihHjrFY/PTIyKbM6IFFLlcvSVIdVm5YiaXCz57eo/BERzPkzLOYccdvZbtXkqRWQSZNTrKJEyfy73++hM8GFk3UWUZB4BdgV+DaK6bhcsieHpIkSa3Z5sPb+WjrYop9XgB6J3RnWq/JxDqj6z2marimqqoUFhaSfyiH8kCwcqcrioqiEiKTHURFRVGQn8/gTp2Zc8656AcOIsp8iDIfJlCyYydxo0eRvb+Il//9M6bZA45MUaUqCg6nhbjECAYPSw9dOzrWxZRze4d+14PlGL5y7K54ACzWCHxlhwEFl6cDnrgeeGK7oR1Z5cfQTVTjEDFxxQR1C4Kq5ekJ/VcoAApB3UJMdDHluYcwDYEme5pIknSUbT+uIcrpIadjL2JsVk7PSJAJE0mSWg2ZNDnJRqSl44mI47C3AE9EVJ0fCEKYlFWUkOSJY0Raeh1nkSRJklqT/d5sin1eYpxRTOs5hT6J3Rss7/f7+fHHH/F6vZSUlCBME0M3AQWrxYqqWKgIgNPnw2KzYTNN9EM55GUVUqDHUWCLpUCNxnBZueuqynmvYuMjcdotGArEJ7vp2jWOvr2TSEx21zn/laH7KCnYiTdvO2Xe/TjdKXTqexkAFquTtB7n4oxMxmKrvRKbqikMGlxMWaGCZnHBkaSJEIKy8nIiXI5qn282DL2MQYOKUWVPE0mSjnKoNBd77iEOdB+II9JNcPfPOHrEAse3yIEkSVJzk0mTk0gIwfbP1zBl7LW899VLlJQV4XK6QyshAOiGTnlFCQ6bi8ljr2X752uIH9BfZtslSZJaEd3QKQ2UEX1kgtdJncdg02yM6TgMm1Z370AhROhebhgGv/zyC8FgEJvNhlXRcLjcOKxONFVFoGAcmf/KdLoJYKHAkcFKdSC604p5ZEUew1TJLwmSGGvF7rDw+zsmEBFprzduQw9QWrgLb/52yor3IcSvq/UIU8c0dVS18jPJHdu13vMIYSCMEixWO6DX2KepBoqi11h5zWK1I4wShDBQFNn0kCTpV99l/YBhushL7oxFgU+f/zuf/e/jfPzxxzVWB5MkSWopsuVyEpnBINvy7XRM78clk67ni2/+Q1FJHqZZ+e0iR+Y3ifEkcPboy+mY3o9tBSWMCgbRbLaWDl+SJEkCtuft5qOti3BY7MweOQNVUbFqVk7rPLpW2UAgwJo1a1i0aBGHDh3ilVdeAcDlcjHtvPN45+23iXNFoBtWDNUKQlTOdyUEJgoVAQgEShGqhZROQ/HbHGguKympHrp3j2dwvxTiYpyh6zWUMAE4uOtLSgt3h363u+Iqh97E9cDmiG70a6CqFroMmI6hV9TYHgwGWbFiBRNGTag1ibhmcYUSMpIktV1z585l7ty5GIbR5HNtzSzA8NvYNno8Di0Jy95tKMIkPT1dJkwkSWo1ZOvlZFIt+JI74M8tJrHbIK7t0o/dezeyN2szPl8ZDkcEndL70KXTAFTVgl8R+BPTQDYyJUmSWlyxz8sn277i55ytALjtkRSUFxEfEVujnGEYrF27lkWLFrF06VJKqy3pvnfvXjp16gTAHaPH8s0nn5JVVIwjMhEFhV+7Z5goCHQ0fOUFJCR3ZPotVzCkfxrJcRHH7H1omjplRfvw5m8nIX00NkdljxhPbDcCFQWhRIndFXfcr4fV7sZqd9fYpgWD6MKFIyJRrrwmSe3U7NmzmT17Nl6vl6ioqOM+T1A3WLB4O0UlcMXZwyiyKixb+iEA48aNa6ZoJUmSmk4+jZ9EqqaQ33svu2L2oJo2VBRsPe30YDCGaaKpld2t97MNE4GpBrAkd5ZjwCVJklqQYRqs3reeJbtWEjCCKIrC2I7DmNx1PA5LzZ4dH3/8Mf/7v/9bY2ng+JgYxvXqw7DYGJIjIgEIBg3yIzI4c/RVvPr129WGa1qpmk1V14OU+UqJsNp5Yc5dnDOpV4NxCtOgzJuFN38HJQU7MY0AAI6IeOJShwHgie+JJ76XHPIpSVKL2ZFVxOJfoNiyi6ISH9FuB0O7JaKpCs+s/RaQSRNJkloXmTSp5rHHHuO9995j27ZtuFwuJk6cyN///vfQt4JNpRs6e7zZBGx+VDXw6w4hMIVAVap/ywimKdjrzUY3dKwW+Y2dJEnSyeb1l/J/G94hpzQXgI7RHbiw91RS3UkIIdi8eTNxcXEkJSUBEBUVRVFRER6Hk9HduzMsKpoMRau8v5eUU7J5C46J41m2ZAerNpQSmzac8yfHsnj1axR5cwEdzSIwdAUFjVhPAmeNvZqo/WU15kSpTg+Wk5v1LSUFOzB0X2i7xRqBJ647EVEZoW3KkblQJEmSWoIQgs9W7+VgESz8fgueWJPLDmaS9covlPbrTX5+Pk6nk8GDB7d0qJIkSSEyaVLN8uXL+cMf/sDw4cPx+/3cddddnH322WzatAmLpTleKo3YwjGYJd6am4XA5/fjsNtrJE0AYj1RgNYM15YkSZKq21WQyfLA9/Qs6EuvpG51lom0ubBpVlxWJ+f0mMSQ1P7s2b2Hf7zxHosWLWL//v3ccMMN3HLLLQAMTO/InwYMppcnKtR7UFc0Dni64EhNpHe/vgAMHtqBdSu3Yzd8dOnQg5suvJed+3+mOPgduvBiUTxEWUfSLa0fWCP5ZX8u/bZuI6p3L4QQGMHy0Ko2qmqhOG8zwjTQLA48cd3xxPXA6U6VSRJJklqVbZmFbNlbAIA/Yh96cidWK0mM3rOLXwKVSd+RI0dik3P5SZLUisikSTULFy6s8fvLL79Mly5d2Lx5MwMGDGjy+a0WlfuvHU9pebDGdl3XKyfOmzChVnLG7bJhtchGryRJUnMJBAIsXryYp956nn2Hsvjxw++44+o/MGXKFCxWC98f3MSApN7YLDZUReXK/udTcCiPlV+u5Ikv/8quXbsAEIaBxTAo2vXrxKruDqn0j08AVaUwqTvbfLFkl2gYFSByrJwWFQ1AXEIk41z7+aYsDs0MYNE0Rg3qTp8hNhRFIITC5u8z8BZq6CJAoSWGX75YRFdXLiX5O1AtdroMuAYAVbORlDEBqz2KiKh0mSiRJKlVEkLw6erd+N1luHvbCAYjsSiReD1OIuMK+O77DYAcmiNJUuvTppIm8+fPZ+XKlWzYsIFNmzYRCAT497//zcyZM+s9Zt26dcyZM4c1a9YQDAbp378/t99+O5dffvkxr1dcXAxAbGzsMUo2XozbQYzbUWNbMBgk2gVpiZFy4jxJkqQTaMWKFdxzzz3s2rOLsvJSLEGTQ9a93PD1D6R3TGfczCk4unrIKy/krO6nARBlc3PZjZfg9XoxAwGUYJABiUmMiE9gUGoasakdQuffn+fjp25nsueAHz3n15UlVLuFjC5xBHUDq0WtXE2twIGhWLCaQQTQoVMeqirw+yzYHTodOuXh9yeRklBKTGI5urOC/APrUVQVVfehB8pCvU1ikpqe2JckSTqRtmUW8suefCw9rVgccVjsHrRyH10P7CW+a0ee/e0NfPPNNwwfPrylQ5UkSaqhTSVN/vznP5OZmUl8fDwpKSlkZmY2WH7p0qVMnToVh8PBlVdeidvt5r333uOKK64gKyuLO+64o95jDcPgzjvv5JxzziEtLa25qyJJkiSdZCtWrGDWrFl4vV40l5UE04lNNQnYVEodsHXHNnb9ZQ+DzhxOdsQOpj4/EUVRsFgsjEzvSObWrYzo3JFhaelE2u0oCrg6dsTTtw/CNEFR+HjBT+QeLgdA0RQSOkQxfGQ6Qwd1wKL92gNEKBqHI1LwlwcJWKzERHnxxFUQ0K2YikZAV4hNLCUi2o9hVA7RDCg2ImO7E53Qi8iYTnL5XkmS2oyqXia6pwybIx4QKKoTi15BwvatuKaMwuPxMHXq1JYOVZIkqZY21eKaN28e3bt3JyMjg8cff5x777233rK6rjNr1ixUVWXFihUMGjQIgAcffJARI0Zw3333cemll5KRkVHrWCEEN910E/v27WP16tUnqjqSJEnSSRIIBLjnnnvwer3EJsZRWJyLVTcRCmhBg2BZEMMw8B0uZu1/VlLQsSM///wz/fv3B+B3U86kKC4R1WbF3bMHtm492KZHs/iXAs7s2J2kI/OXDBicyrp1++k7MIUJ4zoTEVH3uHyhKJTER1BYVAEIenXJQbUYlJsWDAVUNKxKEIs9yJ7ceA4Ux+FTU5h44WlyyKYkSW1OqJdJLysoKlA5h59NDxAo93HIGkPz9euWJElqXm0qaTJ58uRGl/3666/ZtWsX119/fShhApUrG9x3333MnDmT1157jQcffLDGcUIIbrnlFpYsWcKKFStISEhorvAlSZKkZjR37lzmzp2LYRjHLLtkyRIyMzNJSEigyO/FXmEQ0A2KAn78QR1TARQFCwpGIMBAVySx6q/JicQJ44kcOJA9ZhQrfjhI9pJCNH8hAKu/zWRAj0QAJkzsysRJdU8qW53VovI/N4ygpCyAr+AHSg4WgClwOVXKfILIyEgwbQihM3HCZKwR6XKOK0mS2qRQL5OoMmz2eIQQqKqKYhr4XG72Jyfz1B3/w/VXnMk111yDw+E49kklSZJOojaVNAnHsmXLADjzzDNr7avq+rd8+fIa24UQzJ49m88++4zly5eTnp5+wuOUJEmSjs/s2bOZPXs2Xq+XqKioBssuW7YMwzAwVYHuq8Chm5SjUq6bqCjYFAWXzYrb4aSwogKbx0OUwwlAUDd5Z10hu7YcRi3fh2IKNMBm0Ujs4GHiyGpL+qq1lwSuixACq56NkbuW8txfQOgoqoamKWiqwGbVACdBfzFK6Y+kde5d53LDkiRJrd3RvUwUjiR/DR+6RePbKBdbf/mBea9kc/3117dssJIkSXVot0mTHTt2ANC9e/da+5KTk4mMjAyVqTJ79mzefvttPvnkE5xOJ4cOHQIqJ4Kta+kzv9+P3+8P/e71Vi4lHAwGCQaDtcrXp6psOMecaK0xpnC09fibStZf1r/6f8M9rj0qKipCURS8vhLsFQYIUG2RuIQVl0XDbfowNQU90o5V0/B3zCCyaxcAfGUBDvxwEE03sWgqnlgnAwalMmxkOp4oZ9ixlBTuJm//d/jKDmPofkwjgGZxYrN7KrutV1ROQq4oCprFSWnRHsqKM4mM7tScL4kkSdIJV6uXiakDBgILqAYGsOvgAYRmEpHQXSaHJUlqldpt0qRq5Zv6vn30eDyhMlVefPFFAMaPH19j+9KlSznttNNqneOxxx7j4YcfrrV90aJFuFyusGNevHhx2MecaK0xpnC09fibStZf1j8c5eXlJyiSlhcdHU3Q0FF8Jg7dRFctmKqVSGflimW6YaAJnQqhU1IRYMfeMnwBHYfNgjvKQZeucdgdVoaPTCe9UyxqI3uU1KWsKBNf2WFQNFTNhmZxYHVEoygKQogaZVXNRlCvIHffGiKiMuQDhSRJbUqtuUxEoHKHCB6Z1kTn8JZfUKwKalQXtmUW0quTnN1EkqTWpd0mTY7H0Y3VY7n33nu5/fbbQ797vV7S09M588wz8Xg8jT5PMBhk8eLFTJkypdUsOdwaYwpHW4+/qWT9Zf2Pp/5VveXao4kTJ/Kvf7+MsxQQCrrmQKCgYCJQCagOrGYAylRUodAlqS97s4rp1TUOgKtmDDuuRIkQJt68bdhd8TgiKufIiksdiqrZsLvi2bf5v2hWV73JENnbRJKktqqql0kgshSHPaGyl8mRprYiBIpQKC0qwJu9H0WzEN27I5+u3k3PjBiZIJYkqVVpt0mTqh4mR/cmqeL1eomJiWnSNex2O3a7vdZ2q9V6XA9qx3vcidQaYwpHW4+/qWT9Zf3DqX97fq0yBnfF43FSdrAQiysSU7GgIKhawcFUbPhVjfKyQqJjEjnvotPokBAROj7chIkwDYrztpB3YD1BfzGRMV1I7zkNAKvdQ0L6GPZuehvDCGDV7JhG5dCoyuS9iWkEQw8NiqKiGwHZ20SSpDZFNwSHiyqwptkqe5mYgdA+e8DEGTDYv3EjIIjp1BVXt2hyD1egGwKrRd7nJElqPdrtNPxVc5kcPW8JwKFDhygtLa1zvhNJkiSpfRFC8PXeNVw7pD8RVgv5Pj/BIyvuCCqXvtSNIN6yIiItKteM7kZexk4i3bWT4sdimjqFhzay68fXyN79FUF/MZrFgTMyuUZvRiEMAv5iNM2Gafir/QRQMTCNQI3tmmYj4C9GiGOvFCRJktQaWC0qE86IxeqMRlV0rBYNq0XDAUT4dVTg0I7NKIpCSp++WJ3RTDg9Vq4SJklSq9Nue5pMnDiRxx57jEWLFnHllVfW2Pfll1+GykiSJEntm2Ea6Hv2MdEWjXXsZF796RcKvXmUCwNQEQhURSU2KoGZA/syxmZj8559GCMNLFrjPyaLc7dwOGsNeqAUAM3qJC5lKDFJ/VG1mpOJq6qFLgOmY+gVNbYHg0FWrFjBhFETavX80SwuVLXdfmxLktTOmKbJsv3ZCFyoioE40rPP6hegaAgFNFcEmt1Oct/+CDSW7s/m7B69UVWZOJEkqfVot62vM844gy5duvDWW29x6623MmjQIKByuM5f//pXbDYb1113XcsGKUmSJJ1wmqpx5sEICk0b8Z0m8ptul7N37wZ2H9hChb8cp91Flw696dRpKDF6Ia6ib5hyMAJN1cK6jqH70QOlWGyRxKUOJTqxX4NJDqvdjdXurhlrMIguXDgiEtv1cClJktq/gKFTEVRQMBCi8n6qGQKwELRW3huHXzOLgaaOarGgYFARVAgYOg619qqVkiRJLaVNJU3mzZvHqlWrANi0aVNo27JlywAYN24cN954IwAWi4V58+YxdepUJkyYwJVXXonb7ea9994jMzOTJ598kk6dOrVENSRJkqSTSOg6osBLnjWRMlscDmHQt9MA+nYagBBQNUWIoQjKbHHkWROJLvQidB2lnsSFofspzPkJuzMWd2w3AKKT+qFqFjzxvWSPEEmSTnkOq417xg4mv7yscoOAwgUfULF5B2YggGLRcPXrRcwVF1VNL0W8KwKHVSZMJElqXcJq1R04cIAOHTqcqFiOadWqVbz22ms1tq1evZrVq1eHfq9KmgBMmjSJVatWMWfOHBYsWEAwGKR///787W9/44orrjhpcUuSJEktR7VaUa+8gfXzf8RqaiiV05ggANMwUTW1sr0uQJiwPmkcPa4chFpHwsTQfRRk/0DBoZ8wDT92ZyyRMV1QFBVVtRCd2O8k106SJKn1SvfEku6pXELYu3Ub23/+BWd5OYEKH8UWDffPv9DlrKl4evVs4UglSZLqF1bSpGPHjiQnJzN06FCGDRvG8OHDGTZsGAkJCScqvhpeffVVXn311bCOGTFiBF988cWJCUiSJElq9YQQfPptNoqwYmogqq0+Y6gKmnJk7LwCiiJQTJVPv8mmT/9OoZVq9GA5Bdk/UHjoJ0yzcqUbmyOGuNRhJ70+kiRJbY0QguzPv8AMBjF1g+zych7+YT1pERE816sn7p495MpgkiS1WmElTVJTUzlw4ACffvopn332WWh7enp6jSTKsGHDQkv+SpIkSVJLCuomZQdL0ABDAEdWsYnxeOneaR879nak0OsBKnufaEDZwRKCuonNqlGYs4mczBUIUwfA7oonvsNw3LHdUBQ5WaEkSdKxlGzbTsGmn1mbl8uG/fvZlH2QnLJS4lxOin7ZTMm27bK3iSRJrVZYSZOsrCyys7P57rvv+O677/j222/ZsGED+/btIysriw8++CBUtmvXrqEkyvDhwxkyZAgul6vZKyBJkiRJDdEUhY5xERSr1VeqEfTqnE1cTBGaYmXrrnhCg+qB6Bgn2pFvPa12D8LUcUQkEt9hBJExnWWyRJIkqZGEEHz8/Av8/bOPOVRejiEEZX4/umny06FD/O6zj7nb5eSaF/5X9jaRJKlVCnumupSUFC688EIuvPBCAP7yl78wZ84c0tLS6NGjB16vl40bN7Jz50527drFO++8A4CmaQQCgWYNXpIkSZKORbOoXHn9cHzlwdA2f1kWuZnf4fNpdOhQzqAx6dgj0tGDJZQV/IjDpaBZKhMjEVEdyehzCU53B9mglyRJCtPC+W9y//w3KAsGiXG50FSV8kAAq6YR54rgYGkZ989/g7hRIzn72uktHa4kSVItTfqqbN68ecyZM4cnn3ySzMxMFi9ezHfffUdBQQH/93//R1paGkII0tPTsdnkTNiSJElSy4h024lPiiQ+KZK4xAgCpT+gKCZCsaIoJhXF3+EvWU3h/ncJlG+ntHAjplGZ6FcUBZcnTSZMJEmSwuT3+7nvgQcoCwRIiIzEqmlUBIMIAVZNw2m1khAZSVkgwH0PPoDf72/pkCVJkmppUtLk2WefZciQIfzpT3+qsd3pdHL99dfz888/M3bsWIQQ7Ny5s0mBtmZz586lT58+DB8+vKVDkSRJko6hrDiT0qI9qBYHIBCmgTdvK4WHfgIEEVEdSe95Pqomk/2SJElN8cmrr7E/9zAxLlco8VwerExIu46sUKYoCjEuFwcOH+aTV1+r91ySJEktpUlJk127dtGtW7d697vdbt59911yc3N55plnmnKpVm327Nls3ryZdevWtXQokiRJUgOEEOTuW4Np6gjDwKL4MIwggsrJYTP6XE7H3hfh8nRo4UglSZLaNiEEC995B9M0sWpa5STcQlARCAACp8Ua2mbVNAzTZOE77yCOTNYtSZLUWjQpaZKQkMCWLVsaLJOcnMxpp51WY7UdSZIkSWoJVb1MNIsTzWIHwGJxYLNHIcwgpim7hkuSJDUHoesUFRZV9jARJggTYZrEOV24bTYcmhbajjBRFIWiwiKErrd06JIkSTU0KWly1llnsWnTJr788ssGyzmdTvbu3duUS0mSJElSk1TvZaJqNhRVQxdOrI4oNKsL09TJ3bdGfsspSZLUDFSrlYwJ49AiInCkpOBIScGZmkJsRkeik5JwpKaEtjtSUtAiIsiYMB71yLAdSZKk1qJJSZN7770Xp9PJ5ZdfzkcffVRnGa/Xy5o1a5pyGUmSJElqsuq9TH6d1LXyv4qioFmclBbtoaw4s+WClCRJakfOOOssLDYbOqDabJU/VitC01Ct1tC2oBBYbDbOOGtqS4csSZJUS5OSJp07d+bNN98kEAhw8cUXc/rpp/Pqq6+yZcsWMjMzWbhwIWeddRaHDx9m1KhRzRWzJEmSJIXl6F4mdVE1m+xtIkmS1IwmT55MRkYGubm59d5XhRDk5eXRqVMnJk+efJIjlCRJOjZLU09wwQUXsHTpUmbOnMmyZctYvnx5jf1CCOx2O//v//2/pl5KkiRJko5L3b1Majq6t0lkdKeTG6QkSVI7Y7PZePzxx5k1axYHDx4kISEBa7XhN36/n7y8PDweD4899hg2m1y1TJKk1qdJPU2qjBo1is2bNzN//nwuuugiOnbsiMPhICEhgYsvvphvvvmGkSNHNselJEmSJCksVb1MDCOAoqiYRjD0A2aN3xVFxTACsreJJElSPebOnUufPn0YPnx4o8pPmDCBl19+mS5dupCfn8/+/ftD/y0oKKBLly68/PLLTJgw4QRHLkmSdHya3NOkiqqqXH311Vx99dXNdUpJkiRJajIhDAL+YjTNhmn4q20HFQPTCFC984mm2Qj4ixHCQFGa7WNSkiSpXZg9ezazZ8/G6/USFRXVqGMmTJjA0qVLWbJkCV999RWbNm2if//+nHHGGUyePFn2MJEkqVWTrUFJkiSpXVNVC10GTMfQK2psDwaDrFixggmjJtToLg6gWVyoqvyIlCRJai42m41zzjmHKVOm8Pnnn3POOefUuvdKkiS1RrJFKEmSJLV7Vrsbq91dY5sWDKILF46IRNlwlyRJkiRJkurULHOaSJIkSZIkSZIkSZIktTcyaSJJkiRJkiRJkiRJklQHmTSRJEmSJEmSJEmSJEmqg5zTpBnMnTuXuXPnous6AF6vN6zjg8Eg5eXleL3eVjOuvjXGFI62Hn9TyfrL+h9P/avuXW1tqd2qeOW9t+W19fibStZf1l/ee4+tNb5PWmNM4Wjr8TeVrL+s/4m+9yoijDv0f//7X8455xxcLlejgzmV7N+/n/T09JYOQ5IkqUmysrJIS0tr6TCOqSphHQgE2LVrV0uHI0mS1CRt5d5bRbZ7JUlqDxpz7w0raaKqKk6nk7POOouLL76YadOm4fF4mhxoe2GaJgcPHsTtdqMoSqOP83q9pKenk5WV1Wpez9YYUzjaevxNJesv63889RdCUFJSQmpqKqradkZvyntv69HW428qWX9Zf3nvPbbW+D5pjTGFo63H31Sy/rL+J/reG9bwnAceeID333+fDz74gA8//BCr1coZZ5zBJZdcwvnnn098fHw4p2t3VFVt0jcEHo+n1b3RW2NM4Wjr8TeVrL+sf7j1j4qKOkHRnDjy3tv6tPX4m0rWX9Zf3nuPrTW+T1pjTOFo6/E3lay/rP+JuveGlc5++OGH2bRpE1u3buXRRx+lX79+fPHFF8yaNYuUlBTOOOMM/vGPf5CdnR1WsJIkSZIkSZIkSZIkSa3NcfUB7NGjB/fddx/r169nz549PPHEE4wYMYJly5bx+9//nvT0dMaOHcvTTz/N3r17mzlkSZIkSZIkSZIkSZKkE6/JAyczMjK4/fbbWb16NQcOHOCFF15g4sSJrF27ljvvvJOuXbsybNgw/vrXv7J169bmiLndsdvtzJkzB7vd3tKhhLTGmMLR1uNvKll/Wf9Tuf6N1Rpfp9YYUzjaevxNJesv638q17+xWuPr1BpjCkdbj7+pZP1l/U90/cOaCDYcBQUFfPjhh7z33nt89dVXBAIBFEXhb3/7G3feeeeJuKQkSZIkSZIkSZIkSVKzOWFJk+q8Xi+ffPIJH3zwAaNHj+aOO+440ZeUJEmSJEmSJEmSJElqkpOSNDlVVF96DSqXMZIkqWkURQlrKUPp+LWHZS9B3nultqst/d1JzUfeeyWpecm2o9QYJ2zJYalhBw8eJD09HUVRiIqKQtO0lg5Jkto8wzAoKipq6TBOKVlZWU1aRvJkq7r3AkRHR8t7r9QmCSEoLS0lEAi0dChSC2nL916Px4PVam3hiCSpUiAQoKSkpKXDkNqIxtx7ZdKkGVVl2v/nf/6HyZMnh34/FsMw2L9/P2lpaa2msd8aYwpHW4+/qdpT/YuKili5ciVDhgzhjDPOaNQxwWCQRYsWceaZZ56Sjbjjrb/X6yU9Pb3R967Woireu+++m9NOO424uLhGHdca/05aY0zhaOvxN1VT6q/rOt9++y0+n4/f/e53bfLeJe+9p+a9984772T8+PEkJyc36rjWeJ9ojTGFo63H31RH1z8rK4sNGzYwbdo0+vbt29LhnXDy3nvi770yadKMFEXB5XIxZsyYRj/cQeUfekxMDN27d281N7rWGFM42nr8TdXe6l9RUcGhQ4fweDyNKh8MBnG5XKfsN19NrX9b69KqKAoWi4VBgwZx9tlnN/q41vh30hpjCkdbj7+pmlr/Dh068O9//xtd1xud/GtN5L331Lv3AnTv3p3zzz+/0ce1xvtEa4wpHG09/qY6uv7Dhw8nNzeX3NzcRrcd2zJ57z3x9962M3CyjdA07ZRd7kmSThSHw0EwGGzpMKRWTNM0HA5HS4chSU1S9R7Wdb2FI5GkxpP3Xqm1URQFh8Mh76VSs5FJk5PMNE2eeOIJBg4ciMvlQlEUMjIymuXcjzzyCKqqsmnTpgbLrV27NjRB0iOPPNIs127LFEWhU6dOLXb9pUuXcskll9ChQwdsNhsxMTH07NmTyy67jBdeeIHi4uIWi+1kyc7Oxul0csstt7R0KJIkSZIkNZOWavdWtXOrflRVJSoqilGjRvHss8+26y9i9u7di6IonHbaaS0dSljCjVu2HaWTSSZNTrI5c+Zw9913k5OTw7Rp05gxYwazZ89u8nlzcnJ44oknuPTSS+nfv3+DZd94443Q/3/zzTfrLXfgwAEsFku9N6+2cFNu7TE+8sgjnH766bz//vtERUVx3nnnceaZZ+J0Onn//ff5wx/+wJYtW05qTC3xmqWkpPDb3/6Wl19+me3bt5+060qnjpZMWLfVxOjRDx11/cycObPGMRaLpc7hqfUlp1s6aR2OcGN96KGHUBSFV199tcZ22dCXTiUt3e6dMWMGM2bM4JprrmHAgAGsW7eOP/3pT5x99tmyF0IbJ9uO0skk5zQ5iSoqKnj22WeJiopi06ZNJCQkAJXj8Hbs2NGkc//1r3+ltLSUe++9t8FywWCQd955B4Dk5GS2b9/Od999x8iRI5t0/bZsy5YtLTL+b8OGDTz00ENYrVb+85//cOGFF9bYf+jQIebPn090dPRJj60l3H333bzwwgs88MADLFiwoKXDkdqZOXPm8Je//IWkpCSmTZuG0+mkZ8+eTT7vsRrujzzyCHPmzAGgd+/ejBw5EqvVyrZt23j//ff573//y7Bhwxg1ahRQmbTs3bs3EyZMYPny5U2OrznMmDGj3n3jxo07iZG0D1UN/X/84x/cdttt9OjRo6VDkqQTojW0e49OWn733XecdtppfPXVV7zzzjtMnz69SXFILauq7ThnzhzZe146oWTS5CRas2YNpaWlTJ8+PfTB0RzKy8t57bXX6NevH4MHD26w7MKFC8nLy2Ps2LFMnTqVBx98kDfeeOOUTpr06tWrRa77/vvvI4Tg8ssvr5Uwgcqk1p133nnyA2shHTp0YNKkSXzwwQfk5OSQlJTU0iFJ7URLNdzbS2L06IeO5tRSSevj0ZyxyiSx1JrcfPPN/POf/+T555/n97//fbOdtzW0e482cuRIZs6cyT//+U++/PJLmTRp46rajh9++CG33nor3bt3b+mQpHaqycNzcnJyeOutt7jzzju59tprOe+887j22mu58847efvtt8nJyWmOONu0ZcuWoSgKkydPBmD+/Pmhrs33339/k8//7rvvUlxczFVXXXXMsvPnzwdg+vTpoQ+KBQsW1Brb+fDDD4fiXb58ea3u2A899BCdO3eudz/UHObh9Xq5/fbb6dy5M1arldtuuy10rc8++4wbbriB3r174/F4iIiIYODAgfz1r3/F7/fXW5esrCxuvfVWevTogdPpJDY2lmHDhvHwww/j9Xp54YUX6NatW4MxQu0u199//z2KojSYSHr++edRFIXbb7+9Vky///3v6dq1Kw6Hg9jYWM477zzWrFlT6xy5ubkAx9WQCOc61Y9p6PUCGvXvWuWbb77hggsuICEhAbvdTqdOnbjllls4ePBgrWs39r1w9dVXEwwGT+hDmnTqqWq4T5s27aQ23BubGG2pxG1r0KtXL7p27drSYTRKc8Z6dJJYklrKp59+yjfffENqamqznbM1tXvrUrUE7eHDh+vcH077BuDnn39m+vTpdOnSBYfDQUJCAoMGDeK2224jOzu7Vvlw2nDV208VFRXcc889ZGRkYLfb6datG3/7298QQtRbV6/Xyx//+EfS09NxOBz07t2bZ555BtM0671OQ+2043l9GtP+PJZly5YRFRWF2+3m66+/rrGvqu34wQcfNOpcknQ8jitpEgwGmT9/PqNHjyY1NZVrr72Wp59+mjfffJPPP/+cN998k6effprp06eTmprKmDFjePPNN9v1pEsNsVgszJgxIzR+/rzzzguNsQxnecz6fPrppwDHnIOiuLiYjz/+GJvNxuWXX07nzp0ZM2YMeXl5LFy4sEbZQYMGceaZZwKQlJQUinfGjBmMGzeOQYMGcckll9S7v7qKigomTpzIq6++yqBBgzj//POJiYkJ7f/Nb37De++9R2xsLGeffTbjx48nKyuL+++/n3POOQfDMGrVZeXKlQwYMIDnn3+eYDDItGnTGDt2LMXFxTz00EPs3r2bXr16cfHFFzcqxuqGDBlCr169WLt2Lbt27aqzTNVcMNW/ofjmm28YOHAgc+fOxWq1cu6559KvXz++/PJLJkyYUOvbxPT0dADee++9ej+46xLudRr7egGN/nedP38+48eP5+OPP6Znz55cfPHF2O12XnzxRYYMGcLWrVvrjP1Y74Wq9/Bnn33W6NdDkurT0g33cBOjDz30UCjRu2LFigaTlsfb6G5sA/pkqWuekOONN9wHl3Ab/vXNafLxxx8zevRoXC4XiYmJ3HrrrY0aXy+TxFJLy8nJ4eabb+aNN95o1h5fraXdW5+SkhIAEhMTa+178803w2rfbNiwgeHDh/Pmm2/idru54IILGDVqFMFgkOeee45t27bVKH88bTiAQCDAmWeeycsvv8ywYcOYNGkSBw4c4J577uGBBx6o8xi/38/pp5/O66+/zogRI5gyZQqZmZncfvvt3HDDDXUec6x2Wrjtv8a2Pxvy0UcfcfbZZ2Oz2fj66685/fTTa+yveh+0liGtUjslwvT666+LDh06CFVVhaIoIjExUVxwwQXivvvuE0899ZR4+eWXxZNPPinuvfdeMW3aNBEfHy8URRGqqoq0tDTxxhtvhHvJNqO4uFi43W7x5Zdf1rl/8ODBAhD5+fk1tuu6LrZs2SJ0XT+u6yYlJQmLxSLKy8sbLDdv3jwBiAsuuCC07R//+IcAxGWXXVYrpiVLlghATJw4sc7z7dmzp1H7ATF69GhRWFhYZ7kPP/ywVuxer1ecd955AhCvvfZajX35+fkiISFBAOKJJ54QhmHU2L9mzRpx8OBBsWXLFrFz584GYxRCCEBkZGTU2Pboo48KQDzyyCO1yleds1evXqFtxcXFIiUlRWiaJubPn1+j/Lp160RMTIyIjIwUhw8fDm3ftWuXcDqdAhBut1vMmDFDvPzyy+L777+v973Q2OtkZ2eH3lONeb1ycnJCvx/r33Xfvn3C6XQKTdPERx99FNpuGIa47bbbBCCGDRtW45jGvheEECI+Pl7Y7XZRUVER2rZw4ULx/PPP13vM0QKBgPjwww9FIBBo9DHtyfHWv7i4WACiuLj4BEV2YhQXFwu73S7ef//9GttXrlwpZsyYITIyMgQgzjvvPDFjxgwxY8YMsXLlyibfey+99FIBiNWrV9e5/5FHHhGASE9Pr/E3Vp8PPvhAXHzxxQIQSUlJoVir7g1V1qxZI2JiYgQgevbsKS6++GIxfvx4YbFYhKZp4p133qlx3qq/v1GjRomhQ4eK6OhocfHFF4vzzjsvdA+aMWNGrXiq/mbDAYjU1NRar2ld99n6th9PvOvXrxcOh0MAYsCAAeLyyy8X5513nujTp48AxNKlS2uUf+ONN4SmaQIQY8eOFVdeeaXo0aNH6LXfsmVLo2J98cUXBSAURRETJkwQl19+uUhNTRVRUVFi+vTpAhD//ve/63ytdu3aJQAxfvz40LZDhw6JOXPmiP3799d5TGsn773Nd+994403xG9/+1sxdOhQYbPZGnwvVVm7dq04++yzRVRUlHC5XGLkyJFiwYIF9ZY/++yzxd///nchhBAZGRlhfc5Wj/vodlqVlmz3NnT/mjBhggBqtKN0XRdff/112O2b6667TgDiySefrHWdLVu2iIMHD4Z+P562YvX208SJE2u8R9atWyc0TRMul0sUFRWFXtPqxwwYMEDk5uaGjtm5c6dITU0VgPjggw/qvE597bRw23/N0f7897//LTRNE+np6XXel6vEx8cLm80mSktLaxz73nvv1XtMeyLvvSe+3RtWa2jUqFFCVVWRmJgobrvtNvHjjz826rgffvhB3HrrrSIhIUGoqipGjx4dzmXbjIaSJoFAQNjtdpGenl5rX0VFhbj11ltF586dhc1mExkZGeKvf/1ro66Zk5MjANG5c+djlp04caIAxLvvvhvalpeXJ6xWq3A4HKKoqCi0vbmTJuvWrWtUfarbsWOHAMTFF19cY/vf/vY3AYizzjqr3mOrPpCPN2mye/fuWomRKg8//LAAxKOPPhra9swzzwhA3HHHHXVe4+mnnxaAePrpp2tsX7JkiUhPTw+9TlU/0dHR4uabb67xYRvOdZ588snQh2djXq/qjvXv+uCDDwpAXHXVVbX2+Xy+0IfxqlWrap2zMe+FsWPHCkB8//33oW0yaRIemTSpqaUa7seTGK26Z02YMKHO/U1tdDe2AS1EyydNwok3nAeX40n81hXr3r17hcPhEFarVSxcuFAIUfme2rhxo7j66qtDdWjoQffoJLFMmrRtzXnvrUr2xsfHh/5/Q++lr7/+WlitVuF2u8WsWbPE7bffHjqurr+L559/XkyYMCH0INvcSZOG2r2vvPKK6NGjh3C5XCIjI0PcddddoqysrFHXbGy79+j7l2EYYufOneKmm24KfYEYDAZD+3VdF7fcckvY7Zuzzz5bAI16JjqetmLV/VBVVbF169Zax1R9wbhkyZI6kyaLFi2qdUxVsveMM86odZ2G2mnhtv+a2v586qmnhKIoomfPniIzM7PBY8eMGVMrdpk0OXWcjHZvWMNzduzYwd///nf27dvHM888w8CBAxt13KBBg3juuefIysri8ccfPyWXhdqyZQt+v59BgwbV2jdjxgz+85//cM899/Dll18yZ84cFEVp1HmrhnVU7zpXl3379rFixQqio6OZNm1aaHtcXBznnHMOPp+Pd999t/EVCkNKSgrDhg1rsMyOHTt47rnn+MMf/sANN9zAzJkzefTRR0P7qluyZAkAv/vd705IvEBo6NLWrVv5/vvva+yrGppzzTXXhLYtWrQIIDQc6Gjjx48HYO3atTW2n3HGGezcuZP333+fm266iSFDhmCxWCgqKuLFF19k0KBBNbp2NvY669atC21r7tdr5cqVQM36V7Hb7Vx22WU1ylXXmPdCbGws8OvQBklqimAwyObNm0lPTw+9t6CyO6/FYqF3795YLJYaQ2HqGspxtMOHD5OTk0N6ejpOp7POMl26dOGTTz4hPT2dkpISXnvtNWbNmsWQIUOIj4/nlltuadS1qnvllVfIzs7mtttuq/U3OGzYMB544AFKS0tD81cd7cknnyQ+Pj70e9euXUNdu1944YU6j2loyeEPP/wwrPjD1dh4q+4XVUOxquvVqxcpKSmh3+fNm0dFRQWXX345559/fmi7qqo8/vjjpKamsn79elavXt1gbK+88go+n4+rrrqKqVOnhrZbrVaeeeYZXC7XMevXs2dP/H7/SV9aXmr95s2bx969e8nNzeWmm25qsKyu68yaNQtVVVmxYgUvvfQSTz31FD/99BM9evTgvvvuIzMzM1R+69atPProo7z22muoapOnN6xTfe3e999/nxtuuIHx48fz4Ycfcvfdd/Ovf/2LP/3pT406b2PbvVWq7lWaptGtWzf++c9/MmvWLD744AMslprrYWzYsAEIr30zdOhQAGbPns2yZcsaXMb4eNuKABkZGXWu+Fa1+tahQ4dq7YuNjWXKlCm1tlcNKV2zZk2toY4NtdPCbf81pf15//33c8cddzB48GBWrlxJx44dGywv247SiRbW6jm7d+/G4/Ec98Xsdjt33XXXCX3Yba1+/PFHgFofHp999hkffPABH374IVOnTkXTtLDGaBYXFwPgdrsbLPfmm28ihODSSy/FbrfX2Dd9+nQ++ugj5s+fz4033tjoazdWQzc6IQR33nknzzzzTL0TWVWNPa2SlZUFcMInD7zmmmtYs2YNb775JkOGDAFg/fr1bN++nTFjxoQmTIXKMfgAY8eObfCceXl5tbbZbDYuuugiLrroIgCKiop45513uO+++zh8+DC///3vWbx48XFfp7lfr6rx/nWN7a++/cCBA7X2HetDDwjdY4qKio4rPkmqrr6G+z/+8Q8KCwvJysoiPT0dTdP4/e9/TzAYrPGAXZ/GNtyrEqOfffYZixYtYu3atWzcuDGUGH3vvfdYsWJFo5c/bkqju6EG9M033xxqQB/9ENXQksON+Zs+XuHEO3ToUL744gtmz57NX/7yF8aNG1frgahKYxr+zz33HCtXrmzwXlt1niuvvLLWvri4OM4888xjJpVkQ1+qT10JwPp8/fXX7Nq1i+uvv77GvS4qKor77ruPmTNn8tprr/Hggw8C8O2335KbmxuaQwkqVxP74x//yLx580Jt1qaor927YMECxo4dy5133kn37t2ZMmUKhw8f5vnnn+df//rXMc/b2HZvlar7l8/n46effmLr1q28/PLLjBkzptZcUVX39XDaN3fddRerVq1i2bJlTJo0icjISEaPHs25557LzJkziYqKCpVtSlsxLS2tzrJVr0NdCydUzSlztKioKKKjoykqKqKwsJC4uLjQvobu6eG2/463/bl69WqWL19OcnIyS5cubdSzZ1WZqveHJDW3sJImTUmYnIjztCU//fQTUPvD49VXX2XSpEl06dLluM5bdTM+OrFwtDfeeAOonBjx6ElQA4EAUDnxYGZmZr032ePlcDjq3bdgwQKefvpp0tPTeeaZZxg9ejQJCQlYrVYCgQB2u73BWcFPpCuuuILbbruNd955hyeeeAJVVevsZQKEMvWXXnopERER9Z6zMatkREdHc9NNN5GamsoFF1zA0qVLKS8vx+VyNfo6Vd88tISGekk19F6oUvWB19qXYZVa3ty5c5k7d26dk0VXqa/h3qdPHwzDIC4uju7du+P1etm4cSMPPfRQo64dTsM9nMTosTSl0X08DWg4sUsONySceMN5cGlK4re6qvPUF2d9569OJoml5rBs2TKA0AT+1VX1gqo+SeaFF15YqzfB1KlTmTlzJtdff32zxFRfu1fX9VrPAVFRUY2ejLqx7d4qR9+/nnjiCe6++25mz57NpEmTwmrz1tW+8Xg8fP3116xevZpPPvmEZcuW8fXXX7N48WIee+wxVq5cGVoGtyltxRPVI+hojWmn1aexveSPpU+fPgBs3LiROXPm8MwzzxzzmKrP5Or3eklqTmElTU4//XTOOuss7r777hMVT7tV1XA/ekjT2rVrmTZtGg8//DCffPIJiqJw/vnn88ILLzSq62HVzN8FBQX1ltmwYUOo6+/OnTvZuXNnneWEELz55pvcd999jalSs6haHuzFF1/k3HPPrbGvvhm109PT2bp1K7t27aJ///4nLLa4uDimTp3Kp59+yrJly5g4cSLvvPMOVquVK664okbZtLQ0tm3bxj333BPqqtlUVbODG4ZBUVERLper0dcxDCM0rKm5X6/U1FS2bdtGZmZmaNm+6qoe6jp06HBc5y8sLASObylm6dQye/ZsZs+ejdfrrXMVBKi/4X60Dz74gGAwWOtvuz7hNtyraygxeizNmaBtT8J5cDmW5mr4N4ZMEkvNoerzvq73eHJyMpGRkTWGOkdHR9d6z1mtVlJSUmr0Pjma3++v0aOharlY0zRrJa9/+OEHAPr161dj37XXXssVV1zBokWLSExMZMeOHTz//PP87ne/azABXqUqqVtQUNCo8keXuf3221m8eDGLFy/moYceYt68eaE6JCYmsmfPntAKjEerapempKTUOu/o0aMZPXo0UNlj5fbbbw8lx9955x2gsl20bds27rrrrmO2FavOX/VfIUSd9a36TKj6grH6v8W+ffvqPMbr9VJUVITT6cTtdmMYxjGvU1Xvbdu2Nfr1SUtLY+vWrezYsSOUCGlMnaOjo1mwYAFTpkzh2WefBSqHajakqu0YFxcXOo9pmui6fkqs3lpVx1OhrnU53vqHUz6spMmyZcsa9c2JVNtPP/2Ex+Op1aPk0KFDvPbaa/Ts2ZMFCxZQWFjIHXfcwfXXX9+oseKJiYkkJyeTlZVVb6O7amz7nXfeyRNPPFHneZYvX85pp53G/PnzQ0mTquXn6hufabPZGtzfGFU3ubq6Hf7nP/+p85jJkyezePFiXnrpJS688MIGz9/UGK+55ho+/fRT3nrrLQzD4NChQ5x33nm1vomdMmUKX331FR988EGjkyZCiAYb51XJLZvNFhrTfzzXCef1qroe1P+ajR8/nqVLl/L2229zzjnn1NgXCARCc+NUDRMI19atW7Hb7fTu3fu4jpek6upLWB9twYIFDB06tNHdiBuTsD6WuhKjx9KUBO2+ffvq3F69Ad2aHt7DjVdRFMaNGxfqTXn48GFuu+023n77be6///7QZ0pzJX6rHiAyMzPrfCCoPodEfWSSWGoOx/qW3ePxNMuwhccee4yHH3641vacnJxa88/98MMPREZG1vgSByoTug8++CB33nknf/zjHwGYNm0aM2bMqHWO+sTHx5OVlcXGjRvrnVOqSl3nvOmmm1i8eDHz589n+vTpob/1oUOH8t133/Hyyy/XSkAFAoHQcsCdOnU6ZqzXXXcd77zzDj/88EOo7KBBg/j666955ZVXGt3rvqrHW0VFRZ3XrPoMqhpatGvXrtAx+fn5vP7666FkTpWqe+HAgQNDiY5jXQcqe4AsW7as0a/PwIEDWbJkCc8++2yjkibVYygqKuKf//wnM2fO5Nlnn8Xr9XLXXXfVe+zmzZux2WzY7fZQ/NnZ2ZSUlNSalqA9a2yv1fYq3PqXl5c3vnA4M8wqiiKuv/76sGalPZXUt3rOvn37BCDGjRtX6xiLxSIiIiLEmjVrQqsNvPvuuwIQ27dvb9R1L7vsslqzeVfRdV0kJSUJQGzYsKHecxiGITp06CAAsX79eqHruvjpp5+E1WoVycnJda704Pf7G9x/rFVYhBBi9uzZAhC33HKLME0ztH3FihUiMjKyzpUV8vPzRXx8vADEM888U+M4IYT45ptvQksOl5eXNxijEPWv6iCEEOXl5cLtdovo6Ghx5ZVXCkC8/fbbtcoVFhaKxMREYbVaxb/+9a9ay6oFg0GxcOFCsWnTptC2+++/X9x5551i586dtc63f/9+MWLECAGISy+9NOzr/PjjjzWWHD7W61V9ybdj/btmZmYKp9MpLBaL+PTTT0PbDcMQd9xxhwDE0KFDaxzTmPeCEL+uHFJ9CU4h5Oo54ZKr5/wqLi5OeDyeWu97IX5dPefQoUPCYrGElt5srOTkZKFpWr2rPtR1zep++OEHAQibzSb8fr8Q4tfPizFjxtR5zOOPPy4Acf/99zc6zuqrIixZsqTW/n/9618CEKeffnqN7VXHhINmXj0nnHjrsm3bNgGI3r17h7ZVrQAxffr0WuX9fn/os/Doz9SjY33ggQcEIGbOnBnaVvWeOnz4sIiIiDjmiidxcXFy9Zx25ETdex977LEG30tTpkwRgNixY0ed+1NTU4XH4wkrprr4fD5RXFwc+snKygrFpet66Kfqb3js2LE1tuu6Lr788kvhdrvF7NmzxZIlS8S//vUvkZCQIG6//fZaZev7qVruffny5fWWqbqH1Lf/ggsuEIC4+eabha7rIhAIiK+++irUvvnoo49CZQOBgPjTn/4Uat9UP8/cuXPFjh07ap3/qaeeEoCYMmVKaFteXl6oDffiiy+KQCBQ4xifzyc+++wz8eOPP4a2VV9Rra56VN2H5s2bJ7Zs2SICgUDoGEAMGjRI5OTkhMrv2LEjdI979913G30dXdfF7t27w3p9Dh8+HGp/PvXUUyIYDNY436pVq8TBgwcbjOHAgQOid+/eAhB33313nXFV3eeHDh1a4zX9v//7P7FgwQIRCATa/U9ZWZn48MMPRVlZWYvH0pbqn5eX1+h2r0yaNKP6kiYff/yxAMTvf//7WsckJCSIkSNH1lj2Mjc3VwA1lkJsyKuvvioA8Ze//KXWvs8//1wAokePHsc8z+233y4A8cc//jHU8Ktayqxv377i2muvFb/5zW/EK6+8Ejpm2rRp9e5vzIPytm3bQg3LPn36iCuvvFKMHz9eKIoi7rzzznob2kuXLhVutzu07Nzll18upk2bJrp16xZK/FS9pg3FKETDSRMhfl3KkiPLhta3vOg333wT+nBIT08XZ599trj66qvF6aefLqKjowVHLZH5xz/+MXTeHj16iAsvvFBceeWVYty4ccJqtQpAdOvWrVbjuTHXee+992q8p471ev3www81rnGs1+z1118XqqoKRVHEuHHjxFVXXSV69uwpAJGUlCS2bNlS43yNTZrMmzdPAOLxxx+vsV0mTcJzvPVvb0mThhLWQvz6gPuPf/xDAGLv3r1hXbehhLUQx5cYPVai93gStNWTEIMGDRJ5eXmhfbt37w41oI9emrE1JE0aG++LL74odu/eXev8VUt8nnnmmaFtx5P4rSvW3bt3C7vdLqxWq1i8eLEQ4tclh6+99tpQHep70K0rSSyTJm3bibr3HitpUpVEWL9+fZ37IyMj61z6t6nqW3K4oXbvwIEDxQ033FCjjfLWW28JTdNqLA3ekIbavVWOdf/68ccfhaIowuFwiOzs7NDnwauvvhpW+2bgwIGhNuwll1wirrjiitA2h8NR6/Mh3LbisdpPc+bMEYD4v//7v1pLDo8aNUoMGTJEREdHi4svvlhMmzZNuFyuOpPGjW2nhdv+C6f9WV8M2dnZoWvU9YVBVduxKvFWRS45fOo4Ge1emTRpRvUlTR599NFQFvhoEyZMECNGjKgzafLJJ5806rrl5eUiKipK9OnTp9a+q666SgBizpw5xzzPunXrBCASExOFz+cTW7ZsEQcPHhTXXntt6BtVQMyYMSN0TE5OTr37G3sD3rJli5g2bZpITEwULpdLDB48WLz00ktCiIYTGrt37xY33XST6NSpk7DZbCI2NlYMHTpUPPLII6KwsDD0mjYU47GuIYQQX375ZejD97rrrmuwLtnZ2eLuu+8Wffv2FS6XS7hcLtG1a1dxwQUXiFdffVWUlJSEyubm5oo33nhDTJ8+XfTv31/ExcUJi8UiYmNjxdixY8Xf//53UVpaelzXKSoqqvGeOtbr5fV6a5z/WK+ZEEKsXr1aTJs2TcTFxQmr1So6duwobr755job+419L5x++unCarWKQ4cO1dgukybhkUmTSg013IX4NWkyadIkMWrUqLCve6yG+/EkRnVdF5MmTWowaXm8je5wGtBC/PrQMWPGjHp/HnjggVrHNFfSJJx4w31wCbfhX1+sL7zwggCEqqritNNOE1dccYXo0KGDiIqKEtdcc02DD7p1JYll0qRta6mkyb333iug7p6w2dnZAhrXMytc9SVNGmr3Op1O8eyzz9Zoo/zyyy8CEGvWrGnUdRtq91ZpTNL34osvFoC46667Qp8Huq6H1b75+OOPxQ033CD69u0roqOjhcvlEj169BA33nij2Lp1a53XDaet2JSkycSJE0VRUZG45ZZbRGpqqrDZbKJnz57iySefrHWPbmw7TYjw2n9CNL792VAMBw8eFD169BCAePDBB2vsq2o7rly5UiZN5L03rONk0qSF1Jc0acjjjz8uXC5XjeE5CxYsEIqiiD179jT6PLfddluD3zKEq/qHR1vU1uNvqrZY/6ysLKGqqrj88str7ZNJk/DIpEmlhhruQlT+naxcuVJomiaeeeaZsK97rIb78SRGq7osT58+vcGk5fE2uhvbgBbi14eOhn4GDhxY65jmSpqEE+/xPLiE2/Cvrw4ffPCBGDlypHA6nSImJkacccYZ4ueffw49zNT3oFtXklgmTdq2lkqaLFy4UAB1ttGrkrsPP/xwWDE1Rn1Jk4b06NFDzJw5s0Yb5Y033hCAOHDgQKPPI9u9NbX1+I9XVdvxsssuq1V/mTQ5dbTKpImqqsf1o2laWJVoi44naVJYWChSU1PF4MGDxYcffij+/e9/i4SEBHHttdeGde2cnBwRGRkpLrnkknDDrlNbv/m29fibqi3W/9ZbbxUWi6XOBxyZNAmPTJo0jq7r4s9//rNQFOW4H1LbQsM9nG8Qm6o54j+Z8Ta3xta/viSxTJq0bS2VNAkGg6JLly7CbrfXGO5QVFQkevToIWw2W1hfxIUbdzhJkyeffFJomiZ+97vfiUWLFokXX3xRxMXFifPPPz+sa8t2b01tPf7jVdV2/OWXX2TSRN57T2i7N6zVc6js5xbuIVIDoqOjWbx4MTfeeCNXXXUVLpeLyy+//JhLax0tMTGRu+66i4ceeohNmzad0KV4Jam5ZWdn89JLLzFr1ix69uzZ0uFIp5AvvviCsWPHHvcS2ffeey/z5s3jscce47///W8zRye1V0888QSqqvLII4+0dChSKzVv3jxWrVoFwKZNm0Lbli1bBsC4ceO48cYbAbBYLMybN4+pU6cyYcIErrzyStxuN++99x6ZmZk8+eSTrWb1y9tuuw2AF198kddff52kpCSuvvpqHn300bDOI9u90tFtx8auviRJxyPspMlZZ53F//zP/5yIWE5ZPXv25JVXXqF79+5omnbc53nwwQd58MEHmzEySTo5UlJSqKioaOkwpFPMwYMH+f7773nuueeO+xyy4S6FSyaJpcZYtWoVr732Wo1tq1evZvXq1aHfq5ImAJMmTWLVqlXMmTOHBQsWEAwG6d+/P3/729+44oormjW2uXPnMnfuXAzDCPtYTdO47bbbOPfcc2W7V2qS6m3H43kvSlI4wk6aJCcnM3HixBMRiyRJkiSdNKmpqWzevJnu3bs36Tyy4S6FQyaJpcZ49dVXefXVV8M6ZsSIEXzxxRcnJqBqZs+ezezZs/F6vURFRZ3w60mSJLW0sJMmkiRJkiS1HZ06dWpTQ2vbWrySJEmSJLVvaksH0N6Ypomu6y0dhiS1K7quN6kLr3RqkA/aUlvX1t/D5d4sYq2bKfdmtXQo0kkk271SaxQMBmXbUWo2sqdJMysvL2fDhg2kp6fjdrsbdYxhGBw4cACr1dpq/rhbY0zhaOvxN1V7qn9RURE//vgj/fr1a+lQpFbM7/ezb98+fD4fDoejpcORTjGGYbB79252797N4cOHyczMpEuXLnTp0iWse3DVRIaRkZEnKtQTRghB3v5vcGjF5O3/Bk9sZxRFaemwpJNg9+7dfPPNNyQnJzfq37w1tlFaY0zhaOvxN1X1+quqSlZWFgcOHGDgwIEtHZrUTsikSTMTQhATE8N7773X6GNM0yQrK4v09HRUtXV0/mmNMYWjrcffVO2t/l26dOGcc85p6TCkVq64uJhnnnmGiIiIRjXcTdMkOzublJSUVvN30hpjCkdbj/945ObmsmnTJsrKyhBCYBgCTVNQFIWIiAj69+9PQkLCMc+j6zrFxcWMGTMGj8dzEiJvXmXFmZQV7cUUGmVFeykrziQyulNLhyWdBH369OHLL79sdPnW2EZpjTGFo63H31R11X/kyJEMHz68hSOT2ouwkiamaR7XRYqKinjiiSf4f//v/x3X8W3Ntddei6ZpBAKBRpUPBoMsXryYKVOmYLVaT3B0jdMaYwpHW4+/qdpT/e12Oy6XS35jKR3T9OnTOXDgAH6/v1HldV2nqKiIHj16YLG0ju8QWmNM4Wjr8Ydr48aNvPLKK5SXlxMVFYWvwiDgN7DZNRxOjeLiYtatW8ef/vQnBgwY0OC5VFUlMTGRfv36tbn7nRCC3H1rEELHxIoQOrn71hARldHm6iKF77zzzsNqtTZ6guPW2EZpjTGFo63H31RH19/lcslep1KzOqEtGq/Xy1NPPcVzzz1HSUnJKZM0URQlrK61wWAQt9tNbGxsq7nRtcaYwtHW42+qU73+0qkpISGBrl27Nrp8MBgkEAgwefLkVvN30hpjCkdbjz8cgUCAOXPmYBgGnTt3Jhg00QNlqA6Bqiq43RHExsZy8OBB3n//ff7whz9gs9laOuwToqw4k9KiPagWB+BDtTgoLdoje5ucQpxOJ06ns1FlW2MbpTXGFI62Hn9Tner1l06840qabNiwgU8++YScnBySkpI4//zzGTJkSGi/z+fj6aef5sknn6S4uBghBH369Gm2oCVJkiRJklrSkiVLyMzMJCEhAUVRKC/1gwBFAQSUl/qJjnURHx/P3r17WbJkSbscZljVy8Q0dSwWF+BDVW3ouk/2NpEkSZLahbAHvd15552MGDGCRx99lJdeeolHH32U4cOH89BDDwGwbt06+vTpwwMPPEBRURHp6em88sorbNy4sbljlyRJkiRJahHLli3DMAxsNhuBQOWwHEWtTA4oqkLAbxAIGNjtdgzDYNmyZS0b8AlS1ctEszhDyRFFUdAszlBvE6l9mTt3Ln369JHzRUiSdMoIK2ny2Wef8fTTTyOEwO12M2TIELp164aqqjz66KO8/fbbTJkyhb179xITE8PTTz/N9u3bmTlz5ik5KZEkSZIkSe1TUVFRKElQXupHCEFVhwpFqeyBUV7qP/K7QlFRUQtFeuJU72WiajWHHqmaDdPUj8x10raXUpZqmj17Nps3b2bdunUtHYokSdJJEVYm4+WXXwbgD3/4Azk5Oaxbt45t27axceNGevbsyYwZM/B6vUyaNImtW7dy2223tdvxu5IkSZIknbqio6MRQhAIGPj9+pGESdUwFKVGbxMhBNHR0S0X7Anyay8TB0cPwJG9TSRJkqT2IqykyYYNG+jUqRPPPPMMdrs9tL137948++yz6LqOx+Phww8/JD4+vtmDlSRJkiRJag1OO+00NE2juKgUv7+C3PyDlJWVhPYrioIQguLCUjRN47TTTmu5YE+A6r1MhAB/RT5GsObqKbK3iSRJknSi7SrIZHnge3YVnLgEfVhJk9zcXAYPHlznUJtRo0YBMH78eNxud/NEJ0mSJEmS1ApNnjyZlOQOFBbmU1pWhGHo6EYwtL+q40lhUT4pyWlMnjy5xWI9EarPZWIaFQjTQAi9RhnZ20SSJEk6kYQQLNy1nByzgIW7lp+wBH1YSZNAIEBUVFSd+zweD1C55KMkSZIkSVJ7ZrVaueCc32LRrPgDlT0sIl2e0H5dD+Itycdhj+CCc2a1q2Uwq3qZGEYAEBjBI3O6qFbAxDSCoR9FUTGMgOxtIkmSJDW7Hfl72J63Cwsa2/N2sSN/zwm5jpydtRnIWcQlSZIk6dSyP7MIj70z6ak9sGgWVFXFW1pAsTePwqLDlJZ7iY9L4ZpL7sBj78z+zKKWDrnZCGEQ8BejaTb0QBkgUFQNYRqoGJhGANPwh340zUbAX4wQRkuHLkmSJLUTQggW7lhO0NRxYCNo6izccWJ6m1jCPWDnzp28/vrrx7X/uuuuC/dybcLs2bOZPXs2Xq+33p44kiRJUsNuvvlm/vnPf/L888/z+9//vqXDkaR6CSFYt3ovBw7uJb8wh/jYjkwYfSF5BfvwlhTiccfQo+sgenUbimaxUlbiZ93qvaRlRIdW3GnLVNVClwHTCfq9ZG7+L6YRJLnLGdiciaxYsYIJoybU6lmjWVyoatjNTkmSJEmq0478PWzN24nL6sQf8OGyOtiat5Md+XvoEd+lWa8V9qfX6tWrWb16dZ37FEWpd7+iKO02aSJJkiQ1zaeffso333xDampqS4ciScdkGgJvUQXfb1qCEILunYcwYvCZKApUVFTgdDpRFAXTBDNgYLNbKCmuwDQEmqXtJ00ArHY3pUV7AXBExBObPBBdN9CFC0dEYrsajiRJkiS1LqFeJkYQBQWBwK7ZKA9WsHDHcrrHdW7WLynCSpp07NixXXxDIkmSJLUeOTk53HzzzXz++edMmzatpcORpGPSLCrjp3bg8ed+xmJRueLyq7ns6lHoepDlK1YwccJQLJaaSQOny4pmaT+jooUQFB76CYCYpIEoigrI4Tengrlz5zJ37lwMQ/57S5LUMqp6maiKSrHfixACD1FE2FwnpLdJWEmTvXv3NtuFJUmSpNZv/vz5rFy5kg0bNrBp0yYCgQD//ve/mTlzZr3HrFu3jjlz5rBmzRqCwSD9+/fn9ttv5/LLL6+z/PXXX8+tt95K//79T1AtJKn5denakdfnv8LCz5ZxyVWnE58USTAYxOGCuMTIU6KnRVLn0yjK2URUQu+WDkU6ieSwdEmSWlJVLxOf7iOgB0EBKxZURcGu2SgLlDd7bxM5uFSSJEmq15///GcyMzOJj48nJSWFzMyGlw1dunQpU6dOxeFwcOWVV+J2u3nvvfe44ooryMrK4o477qhR/oUXXqCsrKzWdklq7RRFYcSI4YwYcWpOAq8oChGeNCI8aS0diiRJknQKqeplEmmLoIxyNFVDNSv3KYpyQnqbtJ9+opIkSVKzmzdvHnv37iU3N5ebbrqpwbK6rjNr1ixUVWXFihW89NJLPPXUU/z000/06NGD++67r0bSZevWrTz66KO89tprqKr8OJLaDj2oI0y5fK4kSZIknUzV5zJxWhzER8QS7fDUKGPXbASNYLOupBNWK/X1119nzZo1de7zer34fL4697399tvcfvvt4UcnSZIktajJkyeTkZHRqLJff/01u3bt4uqrr2bQoEGh7VFRUdx3330EAgFee+210PZvv/2W3NxcunXrhsViwWKxkJmZyR//+Mcax0ttx97ictZpcewtLm/pUE6YkpISpkw5m+un38mm7xvuedVe5R9YT87e5QR8xS0diiRJknQK2XRoKxtzNhNhc6EoClX/q+7o3ibNIaykycyZM5k3b16d+2JiYpg9e3ad+xYtWsRzzz0XfnSSJElSm7Fs2TIAzjzzzFr7pk6dCsDy5ctD2y688EI2btzIjz/+GPpJTU3lnnvu4b///e9JiVlqPkIIlu7LI19xsHRfXrN9u9Pa/Pe//2X//oP8suUHSrzBlg7npDNNnfzsDRQc+hFfWW5LhyNJkiSdIgJ6gJc2vEVpoJyKoI+AEQz9GMKo8buqqPh1f7P1Nmm2OU2EEO22gSRJkiQd244dOwDo3r17rX3JyclERkaGygBER0cTHR1do5zVaiUlJYVu3brVex2/34/f7w/97vV6AQgGgwSDjX+IrSobzjEnWmuMqbF2F5Wxs7AMizDZWVjG9rxiukRHtHRYzcrv9/P6a28Q8BuMmnQWvfol1fi3asv/fo3lzduKHqzAYo3E4U4/5erfkOOt/6n6ekmSJDWWKUze3vQRXn8JmqKCAj79SFtQCIIY+HU/VJv41W6xU1BRiGEaWLSmpT3kRLCSJElSsyguruyqX99qCh6PJ1SmKR577DEefvjhWtsXLVqEy+UK+3yLFy9uckzNrTXG1BAB/KjFUqbYcWBS5vPx9nebGGQU0Dzz1rcOq1evZs/ufUQ4Y+nRozsrV39dZ7m29u/XeIIY63asagWluostBxbWWar91r9xwq1/eXn7Hc4mSZLUVEIIPt22hC25O0lxJ3FR77NIj0oJ7dd1neXLlzNx3EQslprpjUibq8kJE5BJE0mSJKkVaczS9vfee2+NebK8Xi/p6emceeaZeDyeBo6sKRgMsnjxYqZMmdJqlodtjTE1xu6iMr7ZlEW0ouAvKyXa5aJCOOndv3+76W1iGAYvvfQSYGHcqHM4/5JxpKZH1yjTVv/9GstXeoisrXtRFCeDBlyJxeqssb+91/9Yjrf+Vb3lJEmSpNpWZq5lzb4NAFw94AIGJvepsT8YDBKlRpLqTjphnz0yaSJJkiQ1i6oeJvX1JvF6vcTExDT5Ona7HbvdXmu71Wo9rg/L4z3uRGqNMdVHCMHKA4XopiDCpuEH7JqKL6Cz8kAhPeKjUJS239/kq6++Yu+e/bicbiaOP5OOnePrrVdb+vcLx+H8X1AUhaj4njhd9Sco22v9Gyvc+p/Kr5UkSVJDfjq0mc+3V/bqPKfH6bUSJieLXONRkiRJahZVc5lUn7ekyqFDhygtLa1zvhOpbdtdVMaOgjJcFi2URFAUBZdFY0dBGbuLylo4wuYx/435+CqCDB98BoNHdGoXiaBw6MFySvIr/7Zjkge2cDSSJEnSqcDrKwVgTMdhjM8Y0WJxyKSJJElSO1PuzSLWuplyb9ZJve7EiROByrlFjvbll1/WKCO1D0IIlmbmEjRNbFrNJoVNU9FNk6WZue1iovi/P/F3pl97DZdeeindeye1dDgtIiZ5EBHRGTgjT836S5Xmzp1Lnz59GD58eEuHIklSOze+0wh+O+wazut5Rot+WRH28JydO3fy+uuvh7Vv586d4UcmSZIkhU0IQd7+b3BoxeTt/wZPbOeT9iFzxhln0KVLF9566y1uvfVWBg0aBFQO1/nrX/+KzWbjuuuua7brzZ07l7lz52IYRrOdUwrP7qIytuSV4NcNAlYNm/pr4uTo3iZdYyJbMNKmS01N5eFH7m/pMFqMxeoiKWN8S4chtQKzZ89m9uzZeL3eeif+liRJOl5efyk2zYrDUjkUu0tsxxaO6DiSJqtXr2b16tW1tiuKUu8+IcQp141VkiSpJZQVZ1JWtBdTaJQV7aWsOJPI6E7Hfb558+axatUqADZt2hTatmzZMgDGjRvHjTfeCIDFYmHevHlMnTqVCRMmcOWVV+J2u3nvvffIzMzkySefpFOn44/laLLh3rKEEHy5O4eyoIEKVOgmNlvt3iblepClmbl0iY5ok20BXddrzcYvSZIkSVLz8+l+Xvl+AQDXD76MKEfjJ/g/kcJqBXTs2LFNNngkSZJOBUIIcvetQQgdEytC6OTuW0NEVMZx37tXrVrFa6+9VmPb0QnyqqQJwKRJk1i1ahVz5sxhwYIFBINB+vfvz9/+9jeuuOKK46uY1CrtKCjll7wSFMBmUYmyWyrXHq6mPfQ2ufvuu8k56OWSi2Yw5ezhuD2Olg7ppDu8bw0RUem4PGmyHShJkiSdELppMP/H9zlUcphIWwSGMFs6pJCwkiaNWQpSkiRJahllxZmUFu1BtTgAH6rFQWnRnib1Nnn11Vd59dVXwzpmxIgRfPHFF8d1PaltEELw9ub96IaJpipEWjV0QyCEwACChhl6uFYVBb+ht8neJjt37mT58hUU5JXRv8dZDB9RfsolTXxlueQfXEf+wfV0G3IDVlvbS3xJkiRJrZsQgvd++ZydBXuxaVZmDrmMWGd0S4cVIieClSRJageEEBzetwZD92MaQQBU1YZp6kd6n7T9iTil1uP7Q0XklvtRFAWnRSNogs8w8ZsmhqLiN018xq8/dk2j0BfEaGPvw9dffx2/L0ivbkPo0qUTaRlNXzK7rSnM+QkAd2w3mTCRJEmSTohFO1fwQ/bPKIrCNQMvIs2T0tIh1SAH6UqSJLUD3vztFOduRpg6mDoKVhRFQbM4m9zbRJKqO1zmY/GewyQ47QxNiWZoyq+JBF0PsmL5CiYMmYDFYq1xXIRVw6K2ne9qDh48yMKFC6koDzJ6+Nn0GZiCoradXjLNwdB9FOdtBSBWLjMsSZIknQDf7f+BpXvWAHBJn7PpGd+1hSOqTSZNJEmS2jhv/g72/rwA0wyiKBoWWwR+/UhvE81GUK9o8twmrZFcPadlxDpt9E3wUBbUOadbMmq191QwqBGJTnKEA6vV2sBZWr/58+fj9wXp2KEXHdO70KPPqbfMbtHhX/4/e/cdH0d9Jn78MzPbtavei1VsuTcMtqk2YGPTAgSDcQhJHBJylzjkEpJcgORSLr+E5H78ktwlzt0FEiD0YkLHBXDFxhhwr7Js9V53pW2zM9/fH2sLC9tgW2VX0vf9enE5aWdnnpHW2tlnnu/zIEwDuysdpyc31uFIkiRJw0zY0Fl7JJowmT/6Ui7Ii88E/Vnd8vmXf/kXWltb+3TA5uZmvvOd7/RpH5IkSVL0LnDd4VVU7f8Hhu5HVS3YnalYrAlA9IPsJ6tNhpNly5axb98+tm3bFutQRhSLqvK50hxuGZ/XK2EynLS1tfHSSy8R8Ie5eNY1lE7IxGYfWfeZhDBpb9wFRKtMhlPCVZIkSYoPNs3KP8/6EleNvox5JZfGOpzTOqukyfLlyykuLua+++6jrKzsrA508OBBfvjDHzJ69Gj++7//+6yeK0mSJPUmhEnFnmfpaN5PJNyNolqwOdNRtZPv7qua7G0i9V1lpx/zhNfPUFpqc7ZWrFhBMBAiPaWAwvxxTJo+8qosujoq0ENeNM1OYtq4WIcjSZIkDSPmCZNxkh2JzBt9aVwn58/qtsm2bdu4++67+e1vf8t//Md/cNFFFzFv3jwuuugiJkyYQFpaGm63m66uLlpbW9m3bx9btmxhzZo1vP/++wghuOSSS/jjH/84UOcjSZI0IiiKSmrODJqqNqGqGqolAfU0H2JlbxOprw63d/HMvhpGJydw64R8LMO8t8eXvvQlHPYEQl12igszSE51xTqkwSfA5kjGnVJyymSsJEmSJJ2LrnA3D3/wNPNHX8rkrPGxDueMnFXS5LzzzmPTpk288MIL/P73v2fz5s1s2bLlU59z/K7mxRdfzPe+9z0WLVp07tFKkiSNYH5vDaDgSswDICljEu2Nuwh0NaApas/UnOjfXRPT0Huy9oqiEjHCw7K3iTSwOoJhXjpUBwKS7NZhnzABcDgcfOnLt8c6jJjypJbgTilGCNkzSJIkSeofYUPn79tX0NDVzBuH1jI+fQwWLf6Xv55ThLfccgu33HILO3bs4KWXXuKdd95h+/btdHd392yTkJDAjBkzuOKKK7jpppuYPn16f8UsSZI0okSX1rxLW8MOrDYPxVPvQLPYAPNY+bwN0wj1bC8EqBiYRpgTcyOaZiMc6kQIA0WJ/zcoKfZ0w+T5A7UEdZNcj4MFJZmxDmlAGYaBoiinrdoaaRRFkX8rJEmSpH5hCpNndr1MVWctTquDpTMWD4mECfRxes706dOZPn06P//5zwHw+/10dnaSnJyM0+nsj/gkSZJGtEBXA3WHVxMOtgOQkDyqJxGiqhZKpt6BEQn0eo6u62zYsIE5F845aYKJZnGhqkPjDeqzyOk5A0sIwcojjTR2hXBZNW4Znzes+5gAvPLKKzz11FPMuehGFt16PTn5SSOuKss0wnhby0hMHzds/lZI/Uv+7ZUk6WwJIXjlwBr2NZdhUS18ZfotZCakxTqsM9av74YulwuXawSu+5UkSepnwjRoqd1KS+0HgMBidZFTMh93SnGv7ax2D1a7p9f3NF0nIlw4EjKH/NjXT7Ns2TKWLVuG1+slKSkp1uEMO9sbO9nZ2ImiwOfH5ZJoH76vJQDTNPn73//OkfIKkuyHcFv3cvtds7E7RlbioLN5Pw0V6+ho2kvR5MWxDkeKQ/JvryRJZ2t9xXu8V/0RCgpLpnyOopSCWId0VkbWlYAkSdIQYESCVO5bQcjfAkBi2liyi69AszhiHJk0UgQjBm9XNAFweWEGxckJMY5o4L399ttUV1ejYGP6lMsYMyFzxCVMhBC0HRsznJg+NsbRSJIkScPBkbYqVpatA+D68fOGTPPXE42sqwFJkqQhQNXs2BxJRMJdZBdfQWKa/PAiDS6HReNLk0exo7GDi/NSYx3OgBNC8Oijj2Iagqnj52C3OUbkmGG/t5pwoA1VtZKUPjHW4UiSJEnDQFFKPpcWzkRB4ZJRM2MdzjmRSRNJkqQ4EA60o1kcaFYniqKQXTwPhInFNvzv8EvxKdvt4Gp3dqzDGBRbt27l4MGDGIbC+dOvJLcgmdT0kfdvr61hJwBJGROONZuWJEmSpL5RFZXrx83vmao7FA3vjm6SJElxTgiTtoYdHNn9JA0V63q+b7E6ZcJEGnRba9uo8wU+e8Nh5pFHHkEImDT2YlxON5Om58Q6pEEXDnbS1X4UgJSsaTGORpIkSRrKOoJeXjmwhogR6fneUG6sLitNJEmSYkQPeakrX4PfWwOAoQcwDR1VG94NN6X4dKjNx5qjTWiqwj/PKCbFMTIqDQ4dOsSHH35IRDc5f+o83Il2CkuGTkf//tLRuBsQJCQWYHcN/yVZkiRJ0sAI6EH+9tGzNHW1EDEj3DzxmliH1GcyaSJJkjTIhBB0Nu+jsWI9pqmjqBYyR11KStYUFEUWAEqDry0Q5uVD9QDMyE4eMQkTgNLSUpYvX867Gz4iPyOPcZOzUdShezfsXOnhLgBSsmWViSRJknRuIkaEx3esoKmrBY/dzRXFF8c6pH4hkyaSJEmDKKL7qS9fQ1dHBQBOdw65o6/C5kyJbWBD0PLly1m+fDmGYcQ6lCFNN0xeOFBLKGKSn+hkXlFmrEMaVIqiMHv2bGbPno0wBeYQXnPdF3mlV5ORPxurQ46QlSRJks6eKUye3/s6R9qrsFtsfHXGYlKcw+M9Rd7SlCRJGkSKohL0t6AoKpmjLqFw0i0yYXKOli1bxr59+9i2bVusQxmyhBC8Ud5AU3eIBJvGonG5WEZQlYWu672+VlQFTRu5l0Y2Z4qsdpMkSZLOycqydexs2IeqqNwx7WZyPVmxDqnf9Fulyb59+9i8eTPNzc1MmjSJG264AQDTNIlEIthsI6fUV5Ik6URGJIiq2VEUBc3iIK/0GjTNht2VHuvQpBHuw4YOdjd5URS4eVweHvvI6afT2NjIF7/4Ra699nNcM/9WSidmYbFosQ5r0IWDnSiKhtXujnUokiRJ0hC1uepDNlRsBeCWSddSmlYc44j6V59vJ1RXVzN//nymTJnCP/3TP/GTn/yEl156qefxhx56CKfTydtvv93XQ0mSJA05vrYjlO/8O53N+3q+5/LkyoSJFHNCCCo6uwGYV5RJYZIrxhENrieffJKOjg42b9zGprfLefv1A7EOKSaaqzdzePvfaG/YFetQJEmSpCEq252Ow2JnwZi5zMidMqjH9nurSbXuw++tHrBj9Clp0tbWxty5c3nnnXeYNGkS3/zmN0+av7x48WJUVeWVV17pU6CSJElDiREJUVe+mppDr2LoATqa9gzp+fTS8KMoCjePy2PR+Fxm546sJWIdHR28+OKLIGDyuCsAKJ0wsnq5AETC3XhbywCB05Md63AkSZKkIaoktZDvXfx1rii+aFCPK4SgpWYLDq2TlpotA3at3aekyW9/+1sqKir4wQ9+wM6dO/nTn/500jYpKSlMmTKFTZs29eVQkiRJQ0Z3ZxVHdj1BZ/N+AFJzZjBq4qIhPZ9eGj5MIXouKlRFYUJ64oh7bT733HMEg0HycovIzSwlwW2jaPTIGzPc3hQdM+x05+BIGHlJI0mSJOnctXS30djV3PN1kmPwrye6Oyvp7qjAFBrdHRV0d1YOyHH61NPk5ZdfpqioiN/85jef+gMqKSnh3Xff7cuhJEmS4p5p6DRVbaK9MVrmbrUnkjt6Aa7EvBhHJkkfW1vZTFc4wrWjs7GOwKanfr+fZ555BoAZU+ajKAoTpuagjrCfhTANOhp3A5AqxwxLkiRJZ6Er1M3fPnoWvx7gzhm3MSp58K91hRA0V21GiAgmVoSI0Fy1mYSkwn5P3vTpCqGyspIZM2agqp++G5vNRltbW18OJUmSFPdC/paehElK1lRKpn5RJkykuLK/xceWmjZ2N3mp6PTHOpyYeOmll/B6vWRm5JCdNgFVUxg/ZeQtTfG1HSai+7FYXXhSx8Q6HEmSJGmICEfCPLr9edoCHbisTlKdyTGJo7uzkq6Oo6iaHVBQLQ66Oo4OSLVJnypNHA4HPp/vM7erqqoiKWl4zGiWJEk6kRCiJ5vt9OSQOeoS7K4M3MmFMY5MknprDYR4tawegNl5KZSmjrxpKaZp9lSZXHjBAlRVZfS4DJyukTfhr61xJwDJWVNQ1JE3NUg6d8uXL2f58uUYhhHrUCRJGmSmafLUrpeo8dbjsjq5c8ZtuO0Jgx7H8SoTwwhjGhEUVFTVRiQSHJBqkz5VmowfP56PPvqI7u7u027T0tLCzp07mTp1al8OJUmSFHeC3U1U7HmGUKC953tpuRfIhMkgWb58ORMnTmTmzJmxDiXuhQyT5/fXEjZMRiU5ubJwZPavUFWV//7v/+aOO+5gxrTLAJg0PTfGUQ0+PdxFsLsZUEjJHNwpB9LQt2zZMvbt28e2bdtiHYokSYNICMFLB1ZxoKUci2rhK+fdQnpCakxi6e6sxNtWhmmEAYGqRFAAzeIckGqTPiVNbrnlFlpbW7nnnnswTfOU2/zwhz/E7/dz22239eVQkiRJcUOYBs01Wzm6+xmC3U00VW2MdUgjkrxwPzNCCF4/XE+LP4zbZuHmcXlo6shq/HqivLw8vvvd73L9LdNZcudMMrI8sQ5p0FltbkpnfI2CcZ/DYhv8O4SSJEnS0LP26Bber9mBgsIXpt5IYXJ+TOIQQlBz8HUiuh8EqJoNQ9hBUVA1G6YZOdbrpP8m6fQpabJs2TImT57Mww8/zKxZs/j1r38NQHl5Ob/73e+46KKL+Pvf/8706dNZunRpf8QrSZIUUyF/KxV7n6Ol5j1A4EkdTU7J/FiHJUmn9X5dO/uafagKLBqfi9vWp5W5Q5au6yd9z5Pk6NdjdJUdxvPWWrrKDvfrfgeCZnHgTimOdRiSJEnSEGCaJofbKgD43PirmJQ5NmaxdHVUEuxuQEHBYnNhsycB0ZtBiqIMSLVJn3uarFq1iltvvZXNmzezfft2ADZt2sSmTZsQQjBz5kxeeuklrFZrvwQsSZIUC0KYtNVvp7l6C0IYqJqd7OLLSUwbN+LGtUpDS2aCHZdV49KCNAoSXbEOJyaEEHzjG98gIyODr3z5GxQVFpDgsff7MRrfWIm1voHGN1aSPGF8XP5tON78VZIkSZLOlKqq3HneYvY2H2Ja9sSYxSGEoKV6M4pqwWpNwGJx8Ml6ElWzoUcC/drbpM+3m3Jycti0aROrVq3i9ddf58iRI5imSUFBAddccw033nhjXF40SJIknY3O5n00VW0CICG5kJyS+VhtI6+RpjT0FCcn8E8zinFZRm6zzw8++IDdu3djs9mYNvZ6trzdwJXXjKdkbHq/HcN38BC+/QcQViu+/QfwHTxE4vhx/bb//iCEScXuZ9CsLvJKr8bmSI51SJIkSVIc6w77SbBFE+0WzRKzhIke8tJa9yEJSYV0dRxFszjRLMdufnxiGc4nq03cyUV9Pn6/1eguXLiQhQsX9tfuJEmS4kpS+gQ6Ww6SlD6OpIxJMhksxTXDFPjCOsmO6FSYBOvIXJJz3KOPPgrA/HnX4PcpqCpk5yX22/6FENS/8SamrmM6HJi6Tv0bb+IZNzau/lZ0tR9FD/swTR2LTPpKkiRJn6Ij0Mny9//OtOyJXDv2ClSlT509zlmgq5Gag6+gh7tpb9iJYYSxanZMI7rsNtq7xMQ09J73XEVRiRjhfqs2ic2ZS5IkxTk91EXD0bUIMzpSUVE1Rk24meTMyXH1IUiSTuWdyiYe2lHBoTZfrEOJuf3797N161ZUVWXqhLkAlIzNwJXQf2OGfQcP4du3Hy0hARQFLSEB3779+A4e6rdj9If2hmNjhjMnoaojO5EmSZIknZ5fD/C37c/hC3VR1noU3YjEJA5f22Eq9z1PRPdjd6WiqBqaZsM0Qif8F0bFwDTCvb6vaTbCoU6E6Pt49D6/Y0YiEZ5//nnefvtt6urqCAaDp9xOURTefvvtvh5OkiRpQAkh8LYcoKFiHaYRRrPYySi4GEAmS6QhYV+zl6210THYZv81jh+yjleZzJt3Fd4WDTCZND2n3/Z/vMrECOtotmipsGq3E/H746raJORvo9tbDSikZE2NdTiSJElSnIoYER7f8SJNXS0kOTx8dcZi7Jb+u9FwJoQQtNV/SFPVu0B0aXzemGsxjRBGJNBrW13X2bBhA3MunHNSH1XN4uqXmwR92kNzczMLFixg165dnznSJx4uGCRJkj5NRPfTcORtfO1HAHAkZJGYPj7GUUnSmWv2h3j1cD0AF+enMj5t5I3TPVFVVRXvvPMOABeev5CmapOMLDeZOf23NOd4lYkwTfSWFlSrNbqe+oRqk3jobdLeGK0y8aQUY7X33/lLkiRJw4cpTJ7b+xpH26uwW+x89bzFJDsG9z1DmAYNFevoaNoDQErWVLKK5qIoKprFhtXe+9pG03UiwoUjIXPAhs/0KWnyr//6r+zcuZMxY8bwzW9+k9LSUjyekX2BJknS0ORtPUzD0bcxIkEURSU9fzZpuRegxGj9piSdrVDE4IX9teiGoDjZxeWFGbEOKeaeeeYZhBBcesmldLfbgTCTzsvtt/0frzKJBIKY4TCKoiCONdxV7Xb07u64qDYxImE6m/cDkJI9LWZxSJIkSfFtZdk6djXsR1NUvjTtZrI9mYMegx7y4m05CEBW4RxSsqfHvACjT0mT1157jaysLN577z1SU1P7KyZJkqRB1Vr7AU3V0fI/uyud3NELcCTID5zS0CGE4NXD9bQGwnjsFm4al4sqKzz59re/TX5+PqPySinbFcbpslJS2n//tn0HD+Hduw9xLGFicbsJHXssnqpNvK0HMU0dmzMVV2JBzOKQJEmS4ledr5GNFe8DcMvk6xiTVhSTOGzOFPLGXoswTTypJTGJ4ZP6lDQJBAJcffXVMmEiSdKQ5kkrpaVuGylZ00jPnyUbJA4Ry5cvZ/ny5RhG3xt8DXV7mr0caOlCUxVuGZ834qflHOdyubj99tsBmDlbp6MtgGbpn+qxnioTXxcCUC0WLImJ4PX2bBMv1SbJmZOwWF2AEvO7dZIkSVJ8yvVkcfu0m2gPdHJezuRBPbbfVweAyxOtBu2PMcH9qU9XVaWlpQQCgc/eUJIkKY6YRpiujgoS08YCYHMkMea8r6JZHDGOTDoby5YtY9myZXi9XpKSkmIdTkxNSE+kritIutNGnscZ63BiTtd1LBZLrwSB3WElK7f/1jr7Dh7Cu3svwjRRVBVbaip8IiERL9UmiqLiSR0dk2NLkiRJ8U0I0fN+OSVr8Hv5eVsOUle+BlWzUjR5CTZH/F3T9el2y9e+9jXWrVtHTU1Nf8UjSZLUZ35vNanWffi91ad4rIYju56ktuxNujs/flwmTKShzKIqLCzJ4vyclFiHEhcef/xxbr/9djZv3ozPe+qpfn1xvMrE1HXs6WlYPG4AzHAYDAMzHO75T1FVjGCI+jfe/Mym+QNBCHPQjylJkiQNDU3drfzlg6foCHo/e+N+JoSgpeZ9ag+vRAgDpyfnWFVk/OlT0uTb3/42119/PVdeeSWrVq3CNOUbsyRJsRX9A7wFh9ZJS82Wng8pphmhsWI9lftWoIe8WG0e2eRVGtIipuD9ujbMGHwQj2fBYJCnn36asrIyWlvaef7RD3npqe2Egnq/HUNEIoRbWtEcdkQkgqIomMEAZjCIquuYweCxr6P/aQ474ZZWRCTSbzGciXCwk8PbH6GlZmtMEjaSJElS/PKFunjko2c52l7FqwfWDOqxTTNCfflqmmu2AJCacx75Y69H1QZm+k1f9XnR8//+7/8yd+5crr32WiwWCzk5OajqyR9EFEWhvLy8r4eTJEn6VN2dlXR3VGAKje6OCro7K9EsDuoOryIc7AAgOXMyWYWXoWqDO3NekvrTW0cb+aC+g4pOP4sn5Mc6nLjxyiuv0N7eTm5uLnlZk2kor8I0BTZ7//V58R0qo/BLX+ypMDkuEomwYf0Gps+dg8XS+3gWtwd1gEYhnk574y4i4S78vnrSZS8TSZIk6ZhQJMwj25+nPdBJmiuFz0+8etCObegBag69jt9XCyhkF19OStbUQTv+uejTFUR1dTWXXXYZ1dXVCCHQdZ2qqqpTbisbj0mSNNCEEDRXbUaICCZWhIhQc/BVTNNEUcBidZFTMh93SnGsQ5WkPtnd1MkH9R0ATM9Kjmks8SQSifD4448DcMcdX+Lg7kYAJk3P7bfrkHBHJ1VPPYMZCjP6W/+Eu+Tjvye6rmMkJ+HMy8M6yAmSTzINnc6mvQCkZMf3xagkSZI0eAzT4Mld/6DO20CCzcWdM27DbUsYtOO31G3D76tF1WzklV6LO7lw0I59rvqUNPnRj35EVVUVl156Kffccw+lpaW43e7PfqIkSdIA6O6spKvjKKrFAQRRLQ5C/lY0q4uUrKlkF18ue5dIQ15jd5DXyxsAuLQgjbGp8n33uNWrV1NfX09qairTJl/ChlXl2B0WRo/vnzHDQghqnnsBIxDEVZBPQuGoftnvQPC2HsQwQljtiXE3hUCSJEmKDSEE/9i/kkMtR7CqFpaedytprsHth5ZRcDGRcBfpebOwu9IH9djnqk9Jk7feeovCwkLWrFmD3W7vr5gkSZLO2vEqE9OMoKnRZTeqasNUgthdaeSOWSgr3qQhLxgxeOFALRFDUJKSwJxRQ+NiYzCYpsmjjz4KwO23387hfS0AjJ+SjcWi9csx2ra+j/fAQVSLhVG334ai9c9++5sQgraGnQCkZE2V/ZskSZIkADZVbuOD2l0oisIXpt5EQVLuoBzX763B6clFUVRU1UJe6bWDctz+0qd30UAgwKxZs2TCRJKkmOvurMTXXo5p6OihTiA6Pk2zOAl2NdDdWRnrECWpT0whePlQPe0BnSSHhZvG5qLKRGCPbdu2ceTIERISErjy8muoq+5EUWDi1Jx+2X+4rZ26l18FIPvahTiysvplvwMh4Ksj5G9BUTWSMyfFOhxpmFm+fDkTJ05k5syZsQ5FkqSzNC17AjmeTG6asJCJmaUDfjwhBM3VW6jct4Lm6vcG/HgDpU+VJhMnTqStra2/YpEkSTonpmlQffBV9HAXCiqKqqIQnealajb0SIDmqs0kJBXKahNpyGoNhKno7MaiKiwal4fLGp9VDrEya9Ys/vM//5Pm5mbqq/0AFI5Ow53Y9yV5wjSpfvY5jFCYhOJCMuZc1ud9DqTjVSZJ6ePlkkSp3y1btoxly5bh9XpJSkqKdTiSJJ2FRIeHZbO+gkXrv+bopxOdkLMGb+uhY98RCCGG5LV4n35ad999N3feeSd79uxh8uTJ/RWTJEnSGQv5W6nc/yIBby0KCprVgcXqJqh3AfRUm3R1HKW7s1Ku7ZeGrAyXnTunFdHiD5HrccY6nLijKAqXXHIJAKYpyMzx4PH0T8Kgc9dufGXlqDYro5bchnKKKYHxJD3vAjTNSkrWtFiHIkmSJMVYrbeBFn8b07InAgxKwiSi+6k5+BqBrnpAIadk3pCufOzTT+yOO+5g3759XHnllfzyl7/kmmuuYdSo+G2KJknS8CGEoLXuA5qrtxDytyAAmz0Ji9WJ+MS2stpEGi4yXHYyXHJJ7CeFw2Fsto9HiKuqQvGY/uv3kjR1Cnk3fg7VZsWeEf99ZBwJmeSMvirWYUiSJEkx1hbo4JGPnqMr3I2CwtTsCQN+zJC/jeqDL6OHvKianfyx15GQVDDgxx1IfUqaaCc0QPvWt771qdsqikIkEunL4SRJknooikI40IYRCSKEic2RjMV67O67ECdtK6tNpKEoYpqsOFDHhXmpFCa5Yh1OXDp06BDf/OY3+cIXvsCdX70TAFXr30oQRVXJmBvfS3IkSZIk6UR+PcDfPnqWrnA32Z5MxqYVD/gxTUOncv8LGHoAqz2JgvE3YncO7nSegdCnqwohxBn/Z5pmf8UsSdIIJYSJEQn2fJ1ZOAerPRFF1VBVC6ah9/wHZq+vFUXFMMI0V21GiE/WokhSfKjo9LNNS6OiM9qTY9WRJsraunjxYC26Id9HT+Wxxx6js7OTI0eOUF3ZztN/3cbuj2r7Zd++ssMYoVC/7GsweFsPUVe+mmB3c6xDkSRJkmIoYkR4bPsLtHS3keRI5Kvn3YrDOvA9rlTNStaoy3B6cimafNuwSJhAHytNZCJEkqTBEvK3UX9kNZrFSf64G1AUBVWzAgJNs2EaH3+wEQJUDEwjzIkrcTTNRjjUiRAGijLw6zmlgbV8+XKWL1+OYRixDqVfCCFYW9VCq+JgbVULXRGT7Q0doMANpTlY+7l6YjioqalhzZo1ACxdupS92+vwd4fxd/U90RFsbOToQ3/DmpzEmG9/C2uip8/7HGht9dsJdDVgsyfjSMiIdTiSJElSDJjC5Jk9r1DZUYPDYufOGYtJciQO2PGEMImE/VjtbgCSMiaQmD5uWI27l58aJEmKa0KYtNV/RHP1ewhhRPuThDqxOZJRVQslU+/AiAR6PUfXdTZs2MCcC+dgtVp7PaZZXKiq/NM3HAy3CQ5HOrop7/BjESaH2rqp6wphUVXmjkpndIo71uHFpccffxzTNLn44ovJTM9nfdWH0THD03L7tF9hGFQ99SxmJII9PR2LJ/5//oGuRgJdDSiKSnKWbM4vSZI0UpS3VbI+/BHj2iYxPmsMbxx6hz2NB9FUjS9PX0SWe+CS6KahU1e+imBXE0WTb8NiSwAYVgkT6OPynOHm97//PZMmTcLtdpOcnMyVV17J1q1bYx2WJI1YoUAblXufo6nqXYQwSEgupGTql7A5knu2sdo9OBIyT/ovIlyn/P7xLLgkxRMhBGsrm9FNgRWTbt2gPagzJiWBS/LTYh1eXGptbeXVV18FjlWZ7KgDoKA4FU9S30qQm9auw19dg+Z0kL/4liHRPLr92JjhxLSxWKyy/40kSdJIIIRgZfl6Gs02VpavxzRNNCXad3Tx5OspSS0csGNHwt1U7nsBX1s5Eb2bYHfTgB0r1uTt1hMUFhbyu9/9jjFjxhAKhfjDH/7AwoULKS8vJy1NXrRK0mA5VXVJVuEckjImDokPL5J0to50dFPW1o3TotKhaIBAN02mZiahytf8KT399NOEw2GmTp3KpIlTePqv2wCYfF7fqkwCtXU0rIou+cn7/I3YkuO/iimi+/G2HgKQY4YlSZJGkLLWoxxqKceCxqGWcg63VXDN2CuYljORXE/WgB036G+h5sAr6GEfmsVB/tjrcSXmDdjxYu2skiZVVVUA5OXloWlaz9dnKt7HEd988829vn7wwQd56KGH2LNnD3Pnzo1RVJI08gjToL1xT091SU7xfFkhIg1bx6tMIqaJy6KhEJ34ZNdUNte2MiHdI5OFnxAOh1mxYgUQrTIp29+EHjZITnWRW5B8zvs1IxGqnnkWYZgkTZ5Iyvkz+inigdXRtBchDBwJmTg92bEOR5IkSRoEQghWlq1HNyNYsaCbEVaWrac0rXhAEyZdHRXUHnoD09SxOZIpGH9jryrw4eiskiZFRUWoqsq+ffsYO3YsRUVFZ3wh1x8jh5944gk2btzIhx9+yO7duwmHwzzyyCMsXbr0tM/Ztm0bP/vZz9i8eTO6rjNlyhTuueceFi9e/KnHCofD/OUvfyElJYUpU6b0KW5Jkj6bENHG0oqiompWckdfRTjYIatLpGHveJWJy6KhKioOYZDgcmGaUNbWzZGObtnT5BNsNhuPPfYYr732GpdccgkrHt8OwKTpOX36e9H0zjoCtfVYElzk37poSPztEcKkvXEXAKnZ02MbjCRJkjRoylqPcqDlMDbNRjsdWA0r+5vLKGs9ytj0kgE5pq/tCDWHXgMErsQ88sdej2YZ+Kk8sXZWSZNRo0ahKEpPY8XjXw+Wn/zkJ1RWVpKenk5OTg6VlZWfuv3atWtZuHAhDoeDJUuW4PF4WLFiBbfddhvV1dV8//vfP+k5Gzdu5JprriEQCJCdnc2aNWtITU0dqFOSJIlo75L68jUkpo0lNec8AFyJecO6zE+SoHeVifuEpsUWRQUN/BGdtZXNlCQnDIkP8INp1KhRfOtb3wJg3nXj2bezntIJfbuzljprJv6KSlJnXYDVE//TciBamZeSNQVf62E8aaWxDkeSJEkaBEII3jy0lu6wH8M0EAhURSVyQrXJQFw3uBJzsTlTcLqzyCmeh6Jq/X6MeHRWSZOKiopP/XqgPfzww5SWllJYWMhvfvMb7rvvvtNuG4lEuOuuu1BVlQ0bNjB9+nQAfvrTnzJr1izuv/9+brnlFgoLezfHueCCC9ixYwetra089NBDLF68mK1bt5Kenj6QpyZJI1K0d8l2mqu3IISBHvKSnDVFTreRRowTq0xCpol2wgWOoii4LJqsNvmEcDiMzWbr9b20DDeXze97wsCWnETxXXcOqQSVqllJz5tFWu7MIRW3JEmSdO7eOfIu2+p2YgqBqihY0Eh3paCbEQ60HO7XahPTjPRcm2sWB0WTbkXV7CPqPadP03M2bNjAoUOHPnO7srIyNmzY0JdDATB//vyTkhyn884771BeXs7tt9/ekzABSEpK4v777yccDvPYY4+d9Dyn08mYMWOYPXs2Dz/8MKqq8sgjj/Q5dkmSeotOxnmepqpN0d4lSYUUTV4iEybSiHFilYlVU+gI6rT4QxgnbGPTVCKmydrKZoQQMYs1XhiGwZIlS/jxj39Ma2trv+032PRxx/+hehE4VOOWJEmSzlwwEuKJHS/y5K5/YJgGFkUjxZFEguJEVVTsmg3d0FlZtr5frhv0UBcVe56lrX5Hz/c0i2PEvef06dPJ5ZdfztKlS/nb3/72qdv9x3/8B3/7298wDONTt+tP69atA2DBggUnPbZw4UIA1q9f/5n7EUIQCoVO+VgoFOr1mNfrBUDXdXRdP+NYj297Ns8ZaPEY09kY6vH3VTyfvxAmHY07aa3dGp2Mo1pJL7iUxPQJoCj9EnM8n/9gONfzH6k/r1jpVWViCIQATVU4sdBVVpv09tZbb1FVVYXX6yUhIYH3Nx0l4NeZdkE+yannNma3u6KCw3/6b1IuOJ+CWxehaEOn1LitfgcWmxtPagmK0qf7YJIkSdIQYNOsVHXWEoyEcFldpLmSUVDoJAxErxsSbK5+qTYJdjdRfeAVIno3rXXbSM6ciKrZPvuJw9CwvaVbVlYGQGnpyeW62dnZuN3unm2O+9GPfsQNN9xAfn4+bW1t/PnPf6ampoZFixad8hgPPPAAv/jFL076/urVq3G5zv7ibc2aNWf9nIEWjzGdjaEef1/F4/lrSpA02wEAwqYHr57JwYYKoKLfjxWP5z+Yzvb8/X7/AEUifdLxKpOQYeDQrHjDOkIIrKqGAeiG2XMXR1UUQkZkxPc2EULw6KOPAvCFL3wBTbWyb2c9etigpDT9nJImZjhM1dPPIUwBpjmkEiZGJEhT9bsIM0LhxEW4EvNjHZIkSZI0AMrbKilIysWmWVFQ0FQLTouDdFcKiqKcVFFi12x0h/196m3iaztC7eE3EWYEuzOV/HE3xG3CpKvsMJ631tJVOpaUiRMG5BiDkjRpb2/H4RjcrrqdnZ1AdDnOqSQmJvZsc1xdXR1LliyhqamJ1NRUZs6cycaNG5kw4dQ//Pvuu4977rmn52uv10tBQQELFiwgMTHxjGPVdZ01a9Zw1VVX9TTZjbV4jOlsDPX4+yrez7+94SNUzUFi+oQB+QAY7+c/0M71/I9Xy0kDzxCCtoCOXdMIGAahyPHpUWAoKiHTBD7+t2HXNNqDOoYQWEZo0mTLli2UlZXhcrm49dZbKdvfiB42SEpxkl+Yck77rH9jJaHmFmzJieTedGM/RzywOpv3H7uYTcPpkU2zJUmShpvOoJfXD73Drob9XFlyMQvGzKWs9SiVHTV47O7TXkP3pdpECEF7ww4aK6OtNRKSCsgrvQ7NYu+Xc+pvQggOv/AKSm0Dh194hQv+bfyAfLY466RJVVVVr6+7urpO+t5xkUiEvXv3snr1akaPHn1uEQ6ixx9//Ky2t9vt2O0nv4CsVus5fVA71+cNpHiM6WwM9fj7Kh7OPxxop/7o22QVzsWRkAFAZsHsQTl2PJx/LJ3t+Y/kn9Vgs6gq/zyjmG7dYFdTB5ur28hMsHPDmEw2rN/AnBlzsFh6/z4SrBoWdeQuwTheZXLzzTfj8XjYuyNaLTpxWg6KevYXSF3lR2jesAmA/FtvweJy9lusAy06ZngnACnZ00Zs9ZEkSdJwFDEN3q3cxttHNhE2dBRFQTciCCFYWbaeUCSEw2InbESXVQshMITRsy2AqqiEIqGzrjZprFjXM8Y+OXMy2UWXx/WEHN+Bg3j3HyCMFXP/AXwHDpI4YXy/H+eskyZFRUW9fugrVqxgxYoVn/ocIQR33HHH2UfXB8crTD5ZTXKc1+slJeXc7kxJkvTZhDBpa9hBc9VmhDBorFhH4aRbYx2WJMWNRLuVRLuVNw4HsWoqs/JSyU5w4CZCdoJDJrFOsGvXLj766CMsFgu33347ddUddLT5sdo0xk48+zHDRihE1dPPApB24awBucAaSN2dVYSDnaiajaT0cbEOR5IkSeonh1sreOXAGpq6WwAYlZzHTRMWkuvJImJEaA20YbfYCUZO6LkpBDoGoUgoWrJ6jN1ipy3QHm0Yq53Zx36bIxmAzMLLSM0+L66T8kIIDj7/CmZYJ2BxYgkHOPj8K1zwb+P6Pe6zTpqMGjWqJ4iqqipcLtdpx/HabDby8/NZtGgR3/zmN/sW6Vk63sukrKyM888/v9djDQ0NdHV1MWvWrEGNSZJGinCgnbryNQS66oFoaV9OyfwYRyVJ8ac7HKE1EEZVYFK6B5ATck7lmWeeAeC6664jMzOTVS/vBaB0QiY2+9mvNK5/9XXCbe3YUlPIvfFz/RrrYGhviFaZJGeM3KZ8kiRJw82mym28dvAtABJsLq4pvYIZuZNRjzX6tmgWvnfxXXSHe/egi0QirF+/nrmXzsVi6f2e6La5zjhhApCSPR1XYn5PdXg8a921mzVvr2FLcwctOEgnyEUtjRTfeDXp06b267HO+kqjoqKi5/9XVZVbb731M6fnxMLcuXN54IEHWL16NUuWLOn12KpVq3q2kSSp/3yyukRVrWQWXkZy5uS4zlRLUqwk2Cx8b9YY6rqCuKwWOcHoNH784x8zfvx4Lr/8cnydQaqPtgEwaXruOe3PM2Ecnbv3UHDbrWinWGYbz8LBDro6KoDo0hxJkiRpeJiUOZY15RuYkTOFq8Zchst68rLRZEciyY7evTN1XSdJdZPryTrrKtWAr4Gm6nfJH3s9msWOoihDImGyfv167v7Sl6lrbiasWBCKhiIMthyN8MznPscfH/97v37W71Mj2EceeYQxY8b0Vyz9at68eZSUlPDUU0/xne98h+nTpwPR5Tq//vWvsdlsfPnLX45tkJI0zHhbD9FUuRGAhMQCckbPx2o/86bIkjQSWTWVwqRzG5c7UiQkJPS8Z4eCOudfVEhnR+CcxwwnTZqE5ydjUYfgEigjEsLpzkazOHrKqCVJkqSh50hbJYfbKlkwZg4AKc4kfnTpt3DZBqfHlre1jLrDqxDCoLl6M9nFVwzKcftqw4YNfH3pUtqam3DbXTjsSRxvnq+GvNQ0N/H1pUv562OPMWfOnH45Zp+SJl/5ylf6JYgz9fDDD7NpU7Rp2+7du3u+t27dOgAuvfRSvv71rwNgsVh4+OGHWbhwIXPmzGHJkiV4PB5WrFhBZWUlDz74IEVFRYMavyQNd4lpY/G2HMSdUiKrSyTpM+iGiUVV5L+TTxEOh7Farb1+RnaHlfNmjzqn/RmBAJozejE6FBMmAE53FkWTb8M0ZFWSJEnSUOQN+nj90DvsbNgHwJjUIkpSo+9rg5EwEULQWvcBzdWbAXAnF5E56pIBP25/CIfD3HvvvbQ3NZNmcxCyJgAKCBOhqGBzkYZBe0sL9957L+vWrcNm6/sy1kEZOdxfNm3axGOPPdbre++++y7vvvtuz9fHkyYAV1xxBZs2beJnP/sZzz77LLquM2XKFH77299y2223DVrckjRchQPttNS+T3bxlaiaFUVRyR93g/wQKElnYOWRRmp8Aa4qymRMqjvW4cSl//zP/2Tnzp3cc889zJgxo0/76ty7l+qnnyPv5ptImXFeP0UYO6o2NJM+kiRJI5VhGmyu+pC3jmwkFAmjoDC74DyyPYO3HEaYBvVH36GzOZqwScmeRlbhHBRlaEzne/Ollynfs5ckzYKpWTFUC4YZQVNUFASmYsHQbCQKQUV5OW+99RbXXnttn487pJImjz76aM/IwTM1a9Ys3nzzzYEJSJJGKCFM2ht20lT9LsI00KwusgovA5AJE0k6A7phsr/FR9gwsWpD40JlsLW3t/PSSy8RCoWIRKKjFje+dZiCohQKR6ehnsWY4Uh3NzXPrSDiDxCorR2SSRMhBJ3Ne/GkjkGzOGIdjiRJknQWjrRV8dKBVTR1RafiFCTlcuOEBeQn5gxaDEYkSM2h1/F7awCFrKK5pA6h3ljCNHn5j39CD4exOBwENAf+oJ/uQCduZyJORwICiGh2HKZOqNvP2rVrR17SRJKk2AsHO6KTcXx1QLR3SUpW/3aolqQzsXz5cpYvX45hGLEO5awdausibJgkOSwUJA7O2uWh5plnniEUCjFx4kRmzpxJfU0nB/c0UH6gidvvmo3dceaXMLUvvoTu68KRlUn21QsHMOqB4/fVUn/kbZqqNlM642soqhbrkCRJkqQzEDEiPL37ZXyhLlxWJ9eMvYLzc6f0TMUZLMI0CAc7UFUreaXX4E4pHtTj91VzZ5CjfgMFCKDR2tWBHgkBCiE9iNPuQlHAVCxEVCvC8NNcWdkvx5ZJE0mSzsgnq0vkZBwp1pYtW8ayZcvwer0kJSXFOpyzsru5E4DJGUmo8t/PSfx+P8899xwAS5cuRVEU9u6IJmrHTMg8q4RJx85dtG/fiaIqjPrCbUO2l0l7ww4APKljZMJEkiQpzpmmiaJE+5ZZNAvXj5vHkbYqFpbOPeVUnMFgsSVQMO4GgCExIUeYJq2bt2Bxu0mePo2X1x/GpeuETYOubh8CBUVRSXAm4XQkoAgTIUAo0WoTIcCoa0II0efPKjJpIknSGWmu3kJr3QdAtLoku2QeNsfQ+qAqSfGgOxyhvL0bgKmZcrrUqbz44ov4fD4KCwu5/PLL6fIGqSxvBc5uzLDu81HzwosAZF55Ba5RBQMS70DTQz58bUcASM2WlX2SJEnx7Gh7NS/vX8WcoguZkTsZgGnZE5mWPXHQY+lsOQAoJKWPA4ZGsgQg1NxM9bMv0HXkKJYEF+4xY7j5smIq0jN4V7MiIgY2qx1PQkq0n4kwAVAQAIRNgaJqjElIwtR1tD42g5VJk34wlEvEJelMpWRNo7PlAOl5M0nOnCKrSyTpHO1p8SIE5HgcpDntsQ4n7oTDYZ588kkgOqVPVVX27apHCMgtSCY1PeGM9iOEoOaFF4l0+3HmZpO1YP5Ahj2g2ht3AwJXYj52V3qsw5FGOHndK0mn5gt18cahtWyv3wPAuootTM+ZOODLcPzealKt+/B7p5CUVgJE3wNbarfSUrMVRVFxuNKGxPuHME2aN2yk4Y1VdHUFCKMx9oYrWP/eFooLp8PCf8J98DB6ez0edzqKqmACguNDh6Pn3h3oIiklH33eXZiqhb7WZ8qkST8YyiXiknQ64WAHvrZy0nLPB8BqdzNm+lJZFi5JfbSnyQvA1AxZZXIqa9eupbm5mczMTK6++moiEYMDuxsAmDT9LBrmmSbWpCRUi8aoLyxBtQzNSx7TjNDRtBuIJq8lKdbkda8k9WaaJltqPmT14Y2EIiEUFGbmT+PqMZcPeMJECEFLzRYcWictNVtITC1GCIP6I2/hbTkIQGr2edicqQMaR38INjRQ9czzdFVW0e4NUmNLY70tjcr/8x9UHjnK4hv+hatvmEf7grt44dU/4A90kJCQhMVixTRNVFUlEtHx+ztxOhO4YcFdfPH6mVgtff8dDM0rCEmSBowQJu2Nu2iqehdhRrA7U3saRcmEiST1jRCCywsz2NPcyUSZNDmlq666CofDQTgcxmazcXBPA6FgBHeincKStDPej6Jp5N98E5lXzMWWkjKAEQ8sX2sZRiSIxebGk1oS63AkSZKkE9R01vPCvjdo8DUBkJ+Yw40TFlCQdOZLSfuiu7OS7o4KTKHR3VGBt/UQ7Y27jg1sUMguvoKUrCmDEktfhDs6OfS7/8LvD9LcbbAzYyKr9++geu/zCN0gISERRRNUHGymOH8yX7zlB7z85kO0dTRgmmZP3xJVVUlLzeHGa+4iN2M8+7fXMX5CZmx7mlRVVZ3RdjabjdTUVGx9XEskSdLACgc7qC9/C7+vFgBXYv6QyExL0lChKAqjUxIYnXJmS0xGIlVVmTt3bs/XrgQbmdkeikqjZbifRQgBQqCo0TtLQzlhAhAOdQIKKVlTUQZ50oIkSZL06SJmhAZfE06rg6tLL2dm3rRBm4ojhKC5ajNCRDCxYpphKvc+j8XmRrPYyR97HQlJowYllr4SCW5qU4to7G7kbaeLbW8+QdDbCiZMm3QJd975z1y5YAornvwIm91CUf5kln31QQ4d+YiyIzvp6urA7U6mtGQaY0tmYLFEm777OgOYhkCzxDBpUlRUdMZZG1VVmTRpEnfeeSff/va3UVX5xi9J8eKT1SWqaiVz1KUkZ02WF+mSJA0KIQThcBi7vXefl4LiVAqKUxGmOKP9tL2/jfYPP6LgtsXY04Z+0jcj/0KSMyejqrI4WJIkKdZM06TO10h+UnS5aFFKAbdMupYJGaUk2FyDGkt3ZyVdHUdRLQ4gCChEwl3YnCkUTVqM3XXm1ZmDzdR1mt5ZS8oFF2BLTeHPL+yk2jqaD2t2ULlzLcIQJCelsehzd/KVr99E4ejoudzy5fMJ+vUT9nQpkYjO+g0bmDtnTk+y5Diny4rWD8tz+rSHUaNGMWrUKIQQPf8lJiaSlJTU63sFBQVomsauXbv43ve+x3XXXYdpmn0OXpKk/lF3eCWNFesRZgRXYj7FU79ISra8qylJ/anG6+eto000dQdjHUpcev/997n++ut56qmnTvn4mVSZhNvaqXvpFboOH6Fz9+7+DjFmrDY3msUR6zAkSZJGtMqOGv649RH+d9sTtAU6er5/Qd60QU+YHK8yMc0IqhpdzWG1eVBUC1Z7YlxXivurqjj0+/+kYdVb1Dz/AgBXnl9AUlICX7l1HkmJDmbPmMcfHvxf7vvF0p6ECYDbYyc9y93rv7RMNw4XpGW6T3oswdM/Dff79Ino6NGjXHjhhWRlZfGnP/2J9vZ22tvbaWtro6Ojg+XLl5Odnc2FF16Iz+dj3bp1jBkzhtWrV/Pwww/3ywlIktR3iWnjUFUr2UWXM2rC5+UoYUkaANsbO3mvto3369tjHUpcevTRR2lvb6eurg6Abl+IXR/WEArqn/HMKGGaVD/3PEYoTEJRIRlzLhvIcAecoQcIBdpiHYYkSdKI1xXq5oW9r/Pf7z9Ova8Ji2ahubs1pjEdrzL5eGYMKKqK1Z6Iv7OK7s7K2AV3GqauU/fq65T913J8NfWELTYYP45NG7eSatW47ysz+eoXb+YfL63goUcf5IqrJ2O1xUc/xT7Vev7Xf/0XL730Etu3b2f8+PG9HktMTOSb3/wmV1xxBdOnT+dPf/oT3/ve91ixYgXTp0/nySef5Bvf+Eafgpck6dyEg53owU4SkqPrHD2poxl93lIs1sHNkkvSSKEbJgdafQBMyZBJyU/au3cv27ZtQ9M07rjjDgD2765n+9ZqairauXbRZzexa93yHr5Dh1GtFkZ9YXFPT5Ohqr1xN801W0jNnk5W0dzPfoIkSZLUr0xhsrVmO6sPbyCgR6tEL8ibxtWlc3HbYtebTAhBY8UG9HBXdNbuCVTNhh4J0Fy1mYSkwj43QO0v3UcrqHrmOULNzfj8YQ7acni7Pcyhe+4l2B3h7n/6NV/557nYrBolJfHX9LxPSZO//vWvzJ0796SEyYnGjx/P5Zdfzt/+9je+973vMXnyZGbMmMHevXv7cmhJkj7Fqea1w/HeJbtpqtqEqmiUTPsSlmN/9GXCRJIGzqG2LkIRkySHhYJEZ6zDiTuPPvooANdccw3Z2dkYEbNnzPD4Kdmf+fxQSyt1r74OQM7112HPyBiwWAeDMA3aj40ZdiRkxjgaSZKkkccUJv+z7QmqOqLDEXI9Wdw4YQGFyfkxjgza6rfT2bL/WNNzDU2zE+1pEm04r1mcdHUcpbuzEndyUUxjBfDu28/Rvz5CJGLSGFJZ7ypl1XtraKk6gKYo5GQV4kwAwziz3mWx0KekSXl5OVOmfPbdn9TUVDZu3NjzdXFxMTt37uzLoSVJOo1TzWtXFIVwsJP6I2vwe6N//O2eLIQwYhytJI0Mu5s7AZickYQaJ3d94sXRo0dZu3YtAF/+8pcBOFLWTMCvk+C2UTT60xvZCdOk+pnnMMM67jGjSb/kogGPeaD52suJhLvQrE48aaWxDkeSJGnEURWV0tRimrtbWTBmDrPzzxu0qTifpr1xN9UHX0YIE1W1YXMmoygax5MmEH/VJgljRtPtSGSv38Yr3nZ2rvxfRCSM3Wpl/tyb+d4P/plxk3JiHuen6VPSxOVysW3btk/dRgjBBx98gMv18V3sQCCAx+Ppy6ElSTqNT85r7+qoQA95aa56F9PUUVQLmaMuJSVrimz0KkmDoDscoby9G4CpmYkxjib+PP744wDMnTuXkpIShBDs3R7tazJhag6q9ul/pyJd3RiBAJrdRsFttw75ZTkA7Q27AEiRU3MkSZIGhSlM3q/ZQV5iNgVJuQBcXnwhFxXMwG2P3VKc44Rp0FCxnta6DzAjITTNjs2ZgqKoCNG7QiPW1SZGMEjLpnfJvOJyTBQeefMQez0zWL3mj3TWlqOpCkUF41j2re9x4y2X4nBaP3Ofsdand+I5c+bw0ksv8aMf/Yhf//rXaFrvRi2maXL//fdz+PBhbr755p7vHzlyhLy8vL4cWpKkU/jkvHZhRqjY8xyaxY6iKLg8eeSMno/NkRzrUCVpxNjb4kUIyPE4SHP2Txf34aKzs5M333wTgKVLlwLQ1OCjubELTVPPaGmONdFD6XfvJlhfPyxGDAe7m/H7agGF5MypsQ5HkiRp2KvurOOl/auo9TaQm5jNt2d/BVVRsWpWrFp8fKCP6AG8rYeIhLtQVA2rzYMwDQTGsaSJiWnoPdUaiqISMcKDXm3i3X+AmudfINzhBUUha96VeBKsaC43M8+byI5gC1deegvf+9evU1AUvyORP6lPSZN///d/Z+XKlTz44IM8//zz3HrrrRQVFaEoChUVFTz//PNUVFTgdDr5+c9/DkBFRQX79u3j29/+dn/EL0nSCT45r121OjDCXaiahezi+bK6ZJgLh8O89dZbPPbCq+yqqOGpl1fylVs+x/z587HZbLEOb8QSAlxWjakZssrkk5KSknjyySdZt25dz3LfvTuiVSYl49Jxus7sdatarbhGjRqwOAdTe2O0ysSTOgar3R3jaCRJkoav7rCfVYfXs61mJwKBw2LngtzPbj0RC1a7m7zSa6jatwIjEsA0wz2PCQEqBqYR5sTciKbZCIc6EcJAUQa2ajHi91P3ymu0vf8BQggsKSlUh8McfGcnl08pYc70fNz28+js9JGdnYXFEh9Tcc5Un356kyZN4o033uCLX/wiFRUVPPjgg70eF0KQk5PDE088weTJkwFwu92sXbuWsWPH9uXQcWX58uUsX74cw5D9IaTY6ZnXbuhoavRutqraMFULdmcaKVlT43qtoNQ3GzZs4N5776WyspKO7iBCUaj6SLBp1SsUFhbym9/8hjlz5sQ6zBFpdl4qF+SkYIr4bXAWSyUlJT2d8oUQqIqCoipMmp77qc9rWrsOEYmQeeUVKNrQuvg6HWEadHdEx0SmZk+LcTSSJEnDkylMttXuZGXZup6pODNyp3Bt6RVxsRTnuPbG3VisCXhSo++R7uRCSs+/CyMS6LWdruts2LCBORfOwWrtXRmjWVwDvsyzc89eap5fge7rIqQb7HTkseZQDTv//gMKcsbz3bv/jetvjX4OGaotOvr8E5w7dy6HDx/mhRdeYP369dTU1ACQl5fHnDlzuPXWW3E6P54UkJ6ezty5w2t03rJly1i2bBler5ekJDlKUoqNro4KOlsPYhphBAJQetY0Bny1cdNBW+p/GzZs4K677sLr9eL0JONyflzRYFejjTbvuusuHnroIZk4iRFNVdCQScsThUIh7Pbey5UUReHyq8cxe07xp1aZBOrrqX9jJcIwcWRnkzRl8kCHOygUVaNk+pfpaj+K0/PpSSNJkiTp3OxrOsQ/9q0EINuTyU3jF1CUUhDjqD5mmhEaK9bT0bQHVbVSMu0OrPbotZ3V7sFq75140HSdiHDhSMg8KWky0BrXvEX9m6sRCNoUF8/pCax9ewXd7c1YLApOl4v8okSOfTQZsvol7eRwOLjjjju44447+mN3kiSdpUBXIxW7n8aIBFFQOfGvUrx10Jb6Vzgc5t5778Xr9ZKbm0tNhx8UUEwQKnQbkJ+bS11dHffddx9r166VS3UGiSkENd4A+YlOOTHnEwKBADfddBMXXnghP/jBD0668/RpCRNhGFQ99SzCMEmaNIHEyZMGOtxBpaoWEuXEHEmSpH4lhOi5Bp6YOZax6SWMSy/hovzzUeOogbge7qL20OsEuhoASMubicUWv9UZSVOnUPXmW2y3ZfPE9l2U7dqIpiikJKew9Evf4p/vXoLbM/T7ucXPK0SSpLNmmhGaqjZRvuPv6CEviqJhcyRhsydxPHHyyQ7a0vDy1ltvUVlZSUZGBl3BCKYCigAQKAJMBbqCEdLT06moqOCtt96KdcgjRrU3wN93V/HwjoqTOtuPdC+99BKtra3s3LmThIRoKXRzg4+ONv9nPrfxrbcJ1NZhcTnJv/WWYZMINvQAQpixDkOSJGnIKW+rZH34I8rbTr7ONYXJ1prt/Nd7fyMUifYBURWVr563mEtGzYyrhInfV0fF7qcJdDWganYKxt9Iet7MuHqf070+2j74EADDMFl3JMDDtvH85h9Pc3jXRqyaysWzruSZp5/l+/d9eVgkTKCfKk0Aamtrqa2tJRgMnnYbWRYuSf0nHOyg+sBLhAIdRMI+UDXsjlRUzXLSBzRZbTJ8rVu3DsMwsNlsNB4baxsO+rHZndHOYIpCRzBMQUoChmGwbt06rr322hhHPTLsbuoEIMftkP/mTqDrOk888QQAX/7yl3suWLesO0JjvZe5C8cydmLWKZ/rr66hcc07AOQt+jzWxPi9+3a2ag+vQg91klMyH1einDAoSZJ0JoQQrCxfT6PZxsry9YzLHN3znlvTWc9L+1dR460HYGvNduYUzQaIu/fl9sZdNFasRwgTuzON/HHXx9W0SyEE7R9+RN1Lr2AEg9gzMrDm5vHRwSZsabmkpnlQEhx842v/wle+fgM2+8D2URlsfT6bl19+mXvvvZdDhw596naKohCJRPp6OEmSjrHaPCiKFv1P1bBZ3Kjaqf9Jx3peuzRwOjo6UBSFzkCYsB4i0N6KoYchNQObw9VTbeILRMfQdXR0xDrkEUE3TPa3+gCYmil7XZ1o1apVNDY2kp6ezvXXXw9Ac6OPxnovqqqQX5hyyueZuk7V088gTJPkaVNIOW/6IEY9sEKB9p5KQItNTsyRJEk6U2WtRznUUo4FjUMt5ZS1HiU/MYeVh9f1TMWxW+xcNfoyLi44P9bhnlbI34oQJp7UUnJHz0fV4mcpdbijk5rnX8C7/yAgcOTmsnbLNpLSg9x+1VjafGF+eNvfSE1Nw+2On0a6/alPSZM333yTRYsWYZomSUlJlJSUkJgoRypK0kDp6qggIbEARY0mSvJKr6O27A1CgRYURcU0dIC4mtcuDSyL001AjxAJRQh6OzD0MIqiIMxjZf4nVJsoQpCcnBzTeEeKsrYuQhGTRLuFgkTnZz9hhDBNk8ceewyA22+/vae/Ts+Y4bEZuBJOfaHor64m3NKG1eMmf9HNgxPwIGlv3AmAO6UYm0Mm2SRJks6EEIKVZevRzQgObOhmhCd3/gNVUQlEoqsfzsuZzLVjr8AT5yPcswrn4HRnk5g+Pm6u0YUQtL33PnWvvoYRDGEqCludBby2bit7d/0v8+fcyrK772LG7FGxDnXA9Slp8qtf/QrTNPn5z3/OvffeK5sLStIA0cNdNB5dh6+9nIyCi0nPmwmA1ZGIHvahaTZMI9SzfTzMa5cG3rPbjnDEkY1ughrRcSanEvJ24vIk9xrBqgjQIzqqgMsvvzx2AY8gu5u9AEzJTBpRTWCPrysf1zaJ8VljTnp8w4YNHD16FLfbzaJFiwAI+MMcOdgCwKTpOafdt7ukhLHf/y6614tlGN3JMo0wnc37AUjJkmOGJUmSzlRZ61EOtBzGZXUSCgdxWR1UdNSQ4kyiMDmfG8cvoCQ1Pj/Q+721tDfuInfMQhRFRVE1kjImxDqsXiofe5yOXXsAgT8pgz+WdfLuxr8QCQdx2Gx4kuwUl6bHOsxB0adPTTt27GD69On89Kc/7a94JEk6gRAmHU17aarahGmEASWaETlGVS2UTL0j7ua1SwPDMAUhw6S7K8iDf1jBqy/+jYTxY/Fk5OBtriMhKZ2E5LSTnidMk6C3g+TMXK688soYRD6ydOsRytu7AJiSMXKqLz9tXflxzz77LACLFy/uaQB7YHcDhmGSkeUmM+fTf16OrEwcWZkDcwIx0tm8H9MIY3Mkk5AUPyMvJUmS4llPlYmh47a6CBHErtmwahY89gTunr0Uy2mWrceSEIKOxl00VKwHBI6EDNJyL4h1WKfkGTeW9j37eM+Zx19eXUlj7SEURaGoYAy/+MXPmH/17LipihlofXolaZrG+PHj+ysWSZJOEA60U3/kbfy+WgAcCVnkjJ6Pw9U7oxtv89ql/hcyTHY0dPBebRstR2pY96f/pbz8AwCCO33MWPw1Nv/td/g7mnF4ktEsH//ODV0n2NWBzenmgjuW8W55G/Mm5cbqVE7p97//PQ8//DCVlZVYLBZmzJjBAw88wOzZs2Md2jkpa+vCFJDjcZDuGh5d48/EqdaVj00v6bXNb3/7W1544QVuuukmAEzDZP+uaIO+Seed+nXZtHYdCcVFJBQVDWT4MSGEoK1xFwAp2dNQlPiZ4iBJkhTPjleZGMKgI+RFJdrDz21LoNXfwZH2qpPeg2LNNCM0HF1LZ/M+ABLTSuOqwjDU3Eyku7vn/bYuYzR/8Cfx9nP/g6mHsdvsfGHxV/jxz+4mwe2IbbCDrE9Jk6lTp1JTU9NfsUiSdExny0Hqy9dEl9GoFjILLpYX1CNQdzjC+/XtfNjQTmtnN++/8Qr7Vr2E0tGFqirMn34BGfOvp7O4lMstVt57+r/xNTcgTAOUaFWSomp4MnO58AvfJHPKDF7bV8MVE7LjasReYWEhv/vd7xgzZgyhUIg//OEPLFy4kPLyctLSTq6ciXfTMpNId9qImCNnzPCp1pWvLFtPaVpxr7tQiYmJ3HnnnT1fd7QHiERMnC4rJaUZJ+23q/wI9a+9AYrC+B/9AHvGydsMZQFfHeFAG6pqJSl9YqzDkSRJGhKEELxZthZfqAtTCHQl+t4DYNdsdIf9p3wPiiU91EXNoVcJdjcBCpmjLiY15/y4iE+YJs0bNtLwxios7gTG/ev38ekKj72xHzVlFKpFYWzxFH77m39n5kWTYx1uTPQpafLd736XxYsX88EHH3DBBfFZViRJQ5EjIVp+npBUSHbxFbIx4AjTGgjzXm0bOxs76OoO01x5lM2P/4nmqmosAqaOLuKfJpaS7/HwZHouqmGSM2UGN4z/MzW7t1G39yPC3T5sCR5yJ80gf8pMNKsVYQjChklQN3DZ4ydpcvPNvZt6Pvjggzz00EPs2bOHuXPnxiiqc6coCvmJrliHMahOta78QMvhnmqTYDCI3W4/6eIwNT2B2++aRUdbAM3S+zVphEJUPf0sQkDarAuGXcIEwOnJZdTERejBDjSL7AsnSZJ0Jva3HOajuj0YwkRTVFKdyYS6o41fFUUhwebq9R4Ua35fHTWHXsPQA2ianbzSa0lIjo9eK8GGBqqffZ7uymoAlNRUnn/6TUaPPY9rLy7C253H929/nonjx6Fq8XPtONj6lDRZtGgR//Zv/8bChQv55S9/yfXXX8+oUfHxApCkocQ0dLo7q/CkjgbA7kyheMoXsDlT4yIDLQ2u3fUdbDrciN8fxhUyKexysM8I4Ur28NVpk7kwNR1VUbDn5jK6JkCnrx0TgUCQRxFMLDy2JwUEsKsJBQUVhZRUD/YTmsSeiSeeeIKNGzfy4Ycfsnv3bsLhMI888ghLly497XO2bdvGz372MzZv3oyu60yZMoV77rmHxYsXf+qxwuEwf/nLX0hJSWHKlClnFWc8EEKMuH+zx6tMgpEQdvvHd/r8eqDnTt8vfvELGhoa+OEPf8jEib0rKiwWjfTMk6ca1L/6OuG2dmwpyeTe+LlBOZfBpigKCYn5kJgf61AkSZKGBD2i85dtTxI2wmiKRporBbtmI0SwZ5t4qzbRNDvCiGB3pZM/9vq4uBkqDIOmtetoXP0WZsRAtdtYZ83iqb+/SGNDDXd+8cf883dvJCPL89k7GwH63NPkuLvvvpu77777tNsqikIkEunL4SRpWOrqqKTh6DvoIS9Fk27D6ckGwO4aessSpLNnCsHh9i7smkaGxcLuD2v46L1y9ldvZ1L2eUzJTeb8q6czI3Q9yTV1uGw2LO4Ecq69Gs+M6dT86gEiVfX4bQAKSs9+TdRjy7nEsf+bGNHITJmAqs07qxh/8pOfUFlZSXp6Ojk5OVRWVn7q9mvXrmXhwoU4HA6WLFmCx+NhxYoV3HbbbVRXV/P973//pOds3LiRa665hkAgQHZ2NmvWrCE1NfWs4ow1Uwj+trOCXLeTuYXpJFjjrwHdQChrPcr+5jJ0Q6fF34Yda687fet3vsvbb7+NaZq9puy1t3aTnOJCUU++mPUdPETL5vcAKFiyGM0x/NZOC2HKJZeSJElnQTd0/mvrIzR1t6AqKukJqTgsdoTovRw2HqpNTryJYnelUTDhJhyuDFQt9r0GjWCQw8v/h0BtHQDB3AL+z7o9fPThcyhAclISxWM9pGXE95jmwdSnK7pPvkD7a1tJGgkMPUBj5UY6W6KjJi02N6aQicWRImKa7Gn2sqW2jVZ/mFSLhrq5no92bWPdu8/h62onedEyvvWNO8nL8JDSdim1/3iF9MsuJuuqqzgaaOCRLX+ltPEodjOAO6j0JEkATNPs1bfEFCaaZkVtb0JEIihn0SD44YcfprS0lMLCQn7zm99w3333nf68IhHuuusuVFVlw4YNTJ8+HYCf/vSnzJo1i/vvv59bbrmFwsLCXs+74IIL2LFjB62trTz00EMsXryYrVu3kp4+dEbZVXsDNHSFaAvoXFU8vCa8nE60ymQdXeFuTCGwahqaGX3dHb/T9+B//w7TNLn00ksZMyY6hjgY0PnHkztwJ9r53OKpOF0fJ1OMQIDqZ58DIP3Si/GUnjy6eKgzzQhHdj6BJ6WYjIKLUDW5NEeSJOmz1Hob2Nd0CFMI0p0pqIpK2NARQmAIg7Ch9yQqVEUlFAnFpNpED/moLXuTzFGX4ErMA8DliZ8m/JrDgT09jXBbO6tJ5S9/eQqfrwWAWbPm8sc//or8UVkxjjK+9ClpYppmf8UxpC1fvpzly5djGEasQ5GGACEEvtZDNFSs7xkVnJI9jcyCi+WF8wgQjBh81NDB+3Xt+ELRN3erptBcUcVrz/wXNTX7EapCQUYiiy5IIS8jWhaZduFs3KVj0JNcPH9wNTsbop3Xu64pxdfegsvqwH6sJ4IQ4PP58Hg8HL9GCEXC6GaEpZfcinqWE5Xmz59/xtu+8847lJeX89WvfrUnYQKQlJTE/fffz9KlS3nsscdOGlXvdDoZM2YMY8aMYfbs2ZSWlvLII4/wwx/+8KxijaXdzZ0ATEj3YB0h637LWo+yt+lQT2VTiiOZQJcfiN7pUwKCD9a/T7ojpddyroN7omOGLRYVh7P367Ht/Q8Id3ixp6eSc901g3k6g8bbchA91ImvrZzMwstiHY4kSdKQkJ+YQ6ozGU3VMDEJRkLRB4RAxyAUCcEJyRG7xU5boB3DNAZt/HC3t4baQ29gRAI0HH2H4qlfjIuqQn9VNdaUZKye6HWlY941fO/Nn7N1/VMAJCam8uP77+eOL98Q8+VM8Whk1A4PsGXLlrFs2TK8Xi9JSbFfoybFt/ry1XS2HADA7kwlu2ReXGWfpYHzYX07b1c04w/pBPw6StBg7thU/vjAcrZvegPTNEhwaCyZNonP5ReQVFOLqeuoVitCVdgeqmbVu9HeEYqicGH+DI60V1FtdiAcLrqPHUcIQZeioXu0E974LHQGvaxu/JBxxVMG7A1x3bp1ACxYsOCkxxYuXAjA+vXrP3M/QghCoVC/xjaQdMNkf4sPgCkZiTGOZnAc72ViCpPMhDQipoFNsxI4YZsjG/ZjRCJ4RqUwbVp0rKJpCvbtPDZmeHruSa/F9DmXotptOLKy0OzDb2SzEIL2hp0ApGRPjYuLaUmSpHgVjITw6wFSnclYNAs/vvw7dIf9vbaJRCKsX7+euZfOxWLp/fHWbXMNSsIk+rd9B42VGwGBw5VB/rjrB+VvfFfZYTxvraWrdCwpEyf0eszUdRpWrqZ5/QaSp02l8EtfpL6lm9//Yz9hexpCU7n80oX86U+/IC09ZcBjHapk0kSSBpkrMQ9v6yHS8maRnnsBinp2TTmloeXENa16V5iWtm4Un066N0Jit8FTK/7Evq3vowqDOaNHsbR0NDmeRDS7jbQLZ/fsx68HWH14A8FIiPzEHG6auJDshAx+uf4/sVvsH99tiR40ZndcysrKACgtLT3psezsbNxud882x/3oRz/ihhtuID8/n7a2Nv785z9TU1PDokWLTnmMUCjUK6Hi9XoB0HUdXdfPONbj257Nc05nf6uPoG7gsVvIcVnPeZ/9GdNAK2s9yoHmMlxWJzbNhk2DYCiCNwB2RwQlEuHgpr2oikr2xYXsbyyjNK2YyvJWfN4gdoeFUaNTTnmuiefPAIbGz+FEZ/L7C3TVE+huQlE0ElLGDblz/DRD6fU7EM71/Efqz0uSPktAD/K3j57FG+rin2Z+kVRnMsmORJIdvW9O6LpOkuom15OF9SyrafuDaUZoOPJ2z03RxPRx5BTPG5T+JUIIGt9YibW+gcY3VpI8YXzPdWf30QqqnnmOUHN06U19Z4C3lr/KlQsuoig3kYLrbuLHy27i4lnnDXicQ91ZXTVXVVUBkJeXh6ZpPV+fKTlZRxqJQv42jEigZ01jUsYkXIn52BzJsQ1MGjBCCKq8ATbXtDIqyUUJFna8X011ZTvZdgWzLcTESVnMuqmIKTO/RsORg3yhZAznZWahKAop559HznXXonhcPW+4blsC14+bTygS5sKC83r6l3zv4rvi6o5LZ2d0icrpqu4SExN7tjmurq6OJUuW0NTURGpqKjNnzmTjxo1MmDDhlPt44IEH+MUvfnHS91evXo3LdfajftesWXPWz/mkvWoynYqdRNHNyjf39Xl//RHTQBJCsCb8Pp3Chws7/mN1Tt1h0E1o7vTSsG0/QX8AT2YSrqJkHt34DBdbp1J7WMXvU0h1CFavXtmzT1tFFeG8HIjBBW9/+7TfX6KlAofWScBI5dDqdwYxqsET76/fgXa25+/3+z97I0kaYfx6gL9++Ay13gacVgdBPQjOWEd1MiMSpGrfiwT9zYBCVuFlpGRPH7QlLr6Dh/DtP4CwWvHtP4Dv4CESiotoeGMlLZveRQjQ3G5WdNt49o//g81qx6r+hq8svRCn3YJ6imbs0snO6sq5qKgIVVXZt28fY8eOpaio6IxfEHJ6jjTSCNOgpe4DWmvfR7O6GD3tS6iaDUVRZMJkmDKF4ECrjy21bdT7oqPvajv87H2/CQXw+ny89darWJLTyZ35RdIz3dg7s/n1rIuwahqu/DzyPn8jrqJC9jYd5NVdb3Hj+IVMzIxWbZyfe/II3ni843K2Hn/88bPa/r777uOee+7p+drr9VJQUMCCBQtITDzzpTG6rrNmzRquuuqqPv2c/LrBno+OkiRg8bQppDvPvTdRf8U00MpaK3hx0zqEEKg2C5qiohsmhh5GsQgMRaFk7mTc6YlY7FaSXB4cCQnMmnAZrx3ZS3Iy3LxkBm5PdPmNd/ceqtZtwub1Meb73z3rvjvx4rN+f5FwN0d3VQBJTJ5wM46E4dUweKi8fgfKuZ7/8Wo5SZKiusN+Hv7waep9TSTYnHz9/NvJ8cTn30tVs2NzpqCHfeSVXktCUsGgHTsUCvHM//t/rHl/O3Wmg1w1yLxfP8CFxSXg6wKgJX80/75iFYeP7EZVFEYXj+aCy3JIcI68v9F9cVZJk1GjRkWbFh57Izj+tSRJvfl9dTQceZtQoA0AhysD04zIRq/DlG6Y7Gzq5L3aNjqCYSIRE4fNwrSsJGbnprKu2s8b76xmzbrnCQZ8OJ1OCr//VQA8pWPIumg2CUWFpM6aSXvIy/Pbn+dASzkA71Zt60maDAXHK0w+WU1ynNfrJSWlb2tm7XY79lP0urBaref0Qe1cn3ecBYWLC9Jp8YfISUw45/30Z0wDya8HePnQajLd6UzMLGXhmLkIIXjktb0cqGxDEzqGYqWwKJWf/8uknusEt83F4e3tKIpC0Zg0UlKjowwjXV3Uv/QKiqKQev4M7OdQLRRvTvf762g8gKKA052LJzkvBpENjnh+/Q6Gsz3/kfyzkqRP6gp189CHT9PY1YzblsBdF3yBLHdGrMPqRQiBEAaqakFRFHJK5mNEAljtg9fTbMOGDfzwX/6FI4cOERYWTEVljzBZd2QfeckpfGfB1RxwFfDsH/6IrodQVQvX3rCEP/3hPmx2+XnkbJ1V0qSiouJTv5akkc40wjRVbaa9MdrkT7M4yS6aiydtrEwwDmOrjzaxvaGdUChCuEsnuT3M12+aRpLbxiPPr+OPf/h/NNWVowClqYn884xpzCr9+AJg1JLFGKbB+sqtvF2+Cd2MoCkqc4sv5Irii2N3YufgeC+TsrIyzj///F6PNTQ00NXVxaxZs2IR2oBJsFq4ojC+LugGihCCf+xbSWfQS7Y7gy9O/Tx2i41dhxrYsOo9mit24e9qx+FKoqlkOjfOnMzUsdk9z59xoYec/CTsDkvP/mpeeJFIVzfOnGyyrpoXq1MbFEnpEzANXTb/liRJOgVvqIuHPniK5u5WEu1u7rrgdjIS0mIdVi+moVN/5G2EiJBXeh2KoqBq1kHpX3Lchg0buOuuu2ivr8dtc6HbEhGAAljDXmq8nXz72WdxOpOw25xk543h//7uV8ybM2PQYhxuZCNYSeonhh7g6O6n0cPRCRpJ6RPIKrwMzRqHCzClPmkPhlEVhSS7FdMwSfNF6G72k9QaIr/LwG7VaKhs5Af/82dWvv4KQghS7BpfnDiOq4tLsGoa7R9+ROblcwGo6qjlhX1v0NQVbdRVkjKKmyZeTWacXSiciblz5/LAAw+wevVqlixZ0uuxVatW9WwjDU0f1u1id+MBVEVlyZQbsFtsrF+/nq/cdTdN9bUoCCKRMBE9xMEPV3HDe8/x2EN/7PmdK4pCbkFyz/46tu+gY9ceFFVl1O1LhuyynDNlcySRJUcMS5IknZJF0bCoGkmORO664Auku1JjHVIv4WAnNYdeI+RvARSC3U043VmDG0M4zL333ktnWzupVhsBLfo5QxEmQlExrS5SMPGGuunqamPxP/8bv/+PH+CwD+/314EmkyaS1E80qxOnJxu6FHKK55GQLBsfDzd1vgDv1baxv9XHlPRExgRh57YaunwhigGHw8Kk2aOYfF4uNbVV7Nm2FrtiMq+kkDvGjyXVlYAtNYW8m24gcdLEnv12636aulpIsDm5buw8zsuZPGQrk+bNm0dJSQlPPfUU3/nOd5g+fToQXa7z61//GpvNxpe//OV+Odby5ctZvnw5hmH0y/7Oxb5mLxZNZXRyAtowb6bW0t3GKweiDS4XjJlDflIOGzZsYOlXv05TcyuOhCSsVhsdLbWoqobF5qCpvoalX/06j/7tYS6+5FKs1o+nhemdndS++BIAWQvm4cyT1ReSJEkjmcvm5GvnLyFs6KQ6k2MdTi/dnVXUHnoDwwihWZzkj7120BMmAG+99RaVlZUkKgoGKoZiASGIGBEUzYqhWBCqhcwENyGLxucWTpMJk37Qb0mT2tpaamtrCQaDp91mzpw5/XU4SYo5IQTeloMkJBVgsUX7GGQXX4miaINaoicNLCEE5R3dbKlpo7Lz4wkHvqDOlvU1mIaJ02Vl4vRc9tbX80FDJzPsoygpKWHpZZeR1tzKhMwsVJuVrPlXkjF3DorFQqu/nfSE6B2UCRml3DRhIVOzJ+CKw8qkhx9+mE2bNgGwe/funu+tW7cOgEsvvZSvf/3rAFgsFh5++GEWLlzInDlzWLJkCR6PhxUrVlBZWcmDDz5IUVFRv8S1bNkyli1bhtfrPe20noFkCsE7lc10BHVuHpfLxIzBW8scC5uqthE2dEpSRjGnaHbP3a7Wtnbs7lQsFg09FMAwIqiKSoI7mYgJrW3t3HPPD/nW0v/LebOKmHFhIQD1r79JxB/AVZBH1rwrY3x2AyvY3URLzVZSsqcPapNASZKkeNfm7+BoR3VPs3u3rX96g/UXIQRt9R/RVPUuIHAkZJI/9nqsdk9M4nn7zZWEvF48FisBixtTgD/gxR/swm53kZiQiq458Ch+wsJg3bp1XHvttTGJdTjpc9Lk5Zdf5t577+XQoUOfup2cniMNJ3rIS/3Rd+juqMSTWkr+2OgfI83iiHFkUn/a3+JjY3ULTd0hTCGIhCOcX5DGhXmpZCU42B5UsNo0WsI+vv+rf2f/zne5+os/5vIZ+YwrTOXzcy+necMmUmZMJ+f667AlJ9HY1cxLO1ZT72vkB5f8E2579OLgwoL4XWe6adMmHnvssV7fe/fdd3n33Xd7vj6eNAG44oor2LRpEz/72c949tln0XWdKVOm8Nvf/pbbbrtt0OIeaDXeAB1BHZumUnqsqelwdsO4q0h2JHJeziRUReWtt96ivPwomt0DKEQiJoHuaANgu8uNoqpoikDYPRw9WsHO3VuZPP3jhEHuDdcjTJPMeVeiaNppjjo8tDXsxNd+BEW1yKSJJEnSMS3dbTz04VN0Bn1oisr0nEmxDukkjZXraW+I9ipMyphAdvGVqGpsFmu0f7Sdig0bIGIQsTrp0sN0B1owTTO6gTBBmBiKBQMNEfbT0dERk1iHmz79xt98800WLVqEaZokJSVRUlJyVuMeJWmoEcKkvWEnzdVbME0dRdFwJKQjhImiqLEOT+pnLYEQDb4AeiCCoylAWrvORRMLyUyIJseyS1L4tweWs/Llp9HDQWzCoNRex9hR0ekwWQuuImnqFNwlxYQNnZVl69hQsRVTmFg1KzXeesZnjInlKZ6RRx99lEcfffSsnjNr1izefPPNgQkoTuxujiYIJqR7sGrD/9+/qqpcXnxRz9dr167FHwxjdbkRAoxIiIgeQgEcrui1gKooaJqFoK5zsHwHE6d9p+f5FrebwjtuH+zTGHSGHsDbchCAlOxpMY5GkiQpPjR1t/LQB0/hC3WRkZBGSWphrEM6pcS0sXQ27SNz1CUkZ02N6fJpW2oqblUjZArq/H4iRrQgQdM03M4k7DYHCBBASLVj6l6chhmzeIeTPiVNfvWrX2GaJj//+c+59957sdnk+CJp+Ar5W6g/8haBrkYAXJ48skvmYXf2bXyqFB984Qjb6toYleRiTIqbbl8Is7wTW4WXzE4dzYSUNBeRiIkeMfnzY6/y5z/9gY6WOlRhMjU9mW9MmcR5JyzRsLicuEuKOdhSzkv7V9EeOPYhO6OUG8ZfRYpz8JeUSP0jYprsa4k2fZ4yjJfl+PUAmyq3cWXxxVi03pcMlTWNGKbAIgAEIX8HADaHG1X9uHLEggICIiKIO9GO71AZnrFDZ4x2X3U070UIA4crA6c7J9bhSJIkxVxjVzMPffA0XeFustwZ3HX+F3oqb+NBRA9gObZc2uXJZfR5S7FYXYMeh6nr+CurcI8ZHY2lcBS2BDeBSBhFsaCqKglODy67C1BQhOh5bsgUKIrKJFVFCDFke+XFiz4lTXbs2MH06dP56U9/2l/xSFJc6mo/SvXBVwGBqtnIHHUpyZmTZHXJMNDiD/FebRu7m70YpqCivZuGjxo4uLcB0xCkAelZbqbPKqCoJA1FVfg//+dX/M9fnyASMci0aSydNIkrRo9G01ScOTmISATFasUUJs/sfoVdDfsBSHIkcuP4q5iYOTa2Jy312aG2LkIRk0S7hVFJg38hNRiEELy47032NB6kqbuFO6bd3OuxJh+YZvQCzYiEMSJhFEXF7kpEiOPDDwWqEAggYnPSuvk9alb8g9SZ5zPqC8NnqdbpRKsTdwHRKhN50SrFk9///vc8/PDDVFZWYrFYmDFjBg888ACzZ8+OdWjSMFbna+SvHz5NdzhAjieTr5//BRJs8fE+Gu1f8iEtNe9TOOlWHAkZADFJmHSVH6H6uRfQ2zso/pfvoHiSWb+jmjm5Y3nSsg1DCNISM9DUUyxvNU26Az4ykzI5PyW957pUOnd9Sppomsb48eP7KxZJiluuxDwstgQcCZlkF12B1T78+xcMZRWdfrZpaUzs9FOafupqjmqvn801bZS1dfV8Lz/RyaycZLZ/eBDTEGTnJjJ9VgH5RSm0dgaJmAKrqjB6dAmpdoU5haNYMmkibrsDz9gx5N10A47s7J79qYqK25aAoihcVjiLeSWXYrfIirz+EsvpObubvABMzkhEHaYfhD+o28WexoNoSu9lOQARQ5CaNxlFeR3TCGO1OdBSsjGNCCgq5rHqE1UIjIiOqqrkF4yj9tXXAEbMpJyu9qPoYR+aZicxfVysw5GkXgoLC/nd737HmDFjCIVC/OEPf2DhwoWUl5eTljb0Rt5L8c8X6uKhD54ioAfJS8zma+cviZsG+KahU39kDd7WMgB8bWU9SZPBZAQC1L32Bq1btqKbJq9UNrHtjrv55j/9mHEXFvDu1Ju4RYzi9bf/B1+wC5fTg3bCAIqIoRMI+LDZXVxxxTdI+dLtqDJh0md9SppMnTqVmpqa/opFkuKGEQnT2bz32J1BFVWzUTzlC2gWp7xTGOeEEKytaqFVcbC2qoUxaYkn/c5eP9zA9oYOACIRg+SQ4POziilMiZaGJlwxGpvdQk5+EmHd4P/9ZQU7ytpYungh82cVclXpWBIuvoRRySnYUlPIveF6kqZExwTXdNZjt9jISIhecC4YM4cL8qaS6xn8sXTDXaym55hC0KVH1xFPyRyeS6x6jxeeS35i72UlVovK1QsXsHnlY3R3NpKbk4KiuBEIunw+3J4EFMDbHqDd10ZhYSE/HJ+IqKvDPbqY9EsvicFZDb7jzQOTMyfHrHGgJJ3OzTff3OvrBx98kIceeog9e/Ywd+7cGEUlDWceu5uLCy7gUOsR7pxxG05rfAxQCAc7qDn0GiF/K4qiklU0l+TMKYMeR8eu3dS++BLhTi/v1TXx190HaexsQ1NVPvzoPa68ZhzFKSlYC8/juoV3s3bDY3R4mxDmx31LFFUlOSmLK+Z8hbzcyWzYXEfx5AL5+aWP+vQO/t3vfpfFixfzwQcfcMEFF/RXTJIUU762IzRUrCUS7kJRLaRkRf9oxqI071ydSaXFcHWko5uDLV2gmxxs6eJIRzeFSS6EoKdZZ1GSiw9rWkn0RbBU+LBHBKGCDDiWNCkcnYYQgrc27ean//5ryvd/hCc5gzkXz0LMHEXaedMZP2UKSZMnknH5XFSrlaAeZNXhDbxX/RFFKQV844LbURQFh8UuEybDjKoofG1aES3+EOkue6zD6XcR0+Dp3S+jGzqjUwu5rGjWSdsIIcjO8DBr3m3sePtvtDQ3kpGRgc1qRVPBZlEJh8MEwx0kpyTx71/7KlRXo9ltFCxZjKIO/6WNQgjcqaOJ6N0kZw3+xbc09D3xxBNs3LiRDz/8kN27dxMOh3nkkUdYunTpaZ+zbds2fvazn7F58+aeyWX33HMPixcv/tRjhcNh/vKXv5CSksKUKfL1Kg2c+aMv5Yrii07qkxUrXR0V1JWtxDBCWKwu8sZeh8szuNWQQgiqnnqG9g+3U9nRwf/sPMjuujpMwOFw89WvfI1/vfdONE2jscmHARTnT2HUbQ9wpHIHFdW7CQa7cDjcFBVMoaRwOppmxRCCmrpOjIiJxTq8p9QNtD69WhctWsS//du/sXDhQn75y19y/fXXM2rUqP6KTZIGVUT301ixrqcsz2pPwjYEm7yeSaXFcCWEYG1lM8FwBCVkErREeG5fDXaLysX5aczOTaW+ppOK96tIr+nEaoKiQPG4DDKyPl5yVV3fzr2/+B1rV/0DI6LjEjo35Kdy+7zi6M9S0yj9l2+jHGuutbNhH68dfAtfqBuAZEciuhnBpslyyOFsOCZMAFYfXk+ttwGn1cHiydejnqJ3k6IofO7SEt54qpEylxNcTlpbW4lEIoTDYXw+HxaLhdGjR/OLf/1XMt/dignkfO467COk7F9RFFKzp5ES42kL0tD1k5/8hMrKStLT08nJyaGysvJTt1+7di0LFy7E4XCwZMkSPB4PK1as4LbbbqO6uprvf//7Jz1n48aNXHPNNQQCAbKzs1mzZg2pqakDdUrSCHS0vZr1R7dw+9SbsFlsKIoSNwmTbm8N1QdeBsDpziZv7HVYbYO/BF9RFERyEv/9/jZWHqlBF6CqFqZMvZyvfPMuFl0zDbtVQ4+YdKU6ae95W7bgmXoRU6ZcSDAYwuGwg6LQfMK+U1OcCPke1Gd97mly3N13383dd9992m0VRSESifTlcHErluvqpb4TQtDZvI+myo0YRghQSMudQXr+hUOynPpIRzflHX4swqS8w8+Rjm5GpwzPHiymEL36Sexu7mRvkxehG5g2FVMIKjv9ZLjs7Gvy0rSxhsb6aC8Km6pQOimT6TMLSEqJrqcVQvC/j73I/33w/9HV2YrFjHBhZip3nTed/OQUQvv2477oQiBa/tjib+Pl/aspaz0KQLorlZsmLGRMWtHg/iCkQRPQDVRVwT5MRwz7wwG21UaXlCyaeC1JjtNPBtq0aRMfbNtKUlISTz75JAcPHuTtt99m9+7dTJkyhXnz5jFv3jyq/vdhuiMRPGNLSTv272ckkQkT6Vw9/PDDlJaWUlhYyG9+8xvuu+++024biUS46667UFWVDRs2MH36dAB++tOfMmvWLO6//35uueUWCgt7j3W94IIL2LFjB62trTz00EMsXryYrVu3kp6ePpCnJo0Q5W2VPLr9eXRD552jm7m69PJYh9SLy5NLQlIBVnsSWUVzB/W6P9TcjBnWceblYpqC6uxJbPGbhIVCcdFUrlu8lH/66pWkJ39c6W61qPzozll0+fVe+4pEImzYsIE5c2ZjsfQ+B4/LhtUyPK9ZBlOfXhnihLFG/bntUBOrdfVS/2isWEd7Y3S6gcOVQc7o+TgSMmMc1bk5XmmhmwIbJroZ/bokOWFQLtwNUxAxTSKmQDejx48cW2eZ4/640dehNh++UKTncd0UGMeeoykKC0o+Xs6ysryBWl8Q/YT9RkxBxBSoCvzoonE95/7cvlq6dQNhUY/N7QAhINFh5ctTC1l5eA+apjJuchZTL8jHk9h7Le2uXbv4n//8Ff4OHwUOC1+bej4XFhZidSeQc+3VpM7+eJlCZUcND33wNBEzgkW1cEXxRcwtujBu7p5IA2NrXRtb69qYOyqdC/OGX8WEy+bkOxfeyd6mg0zOOnXj0pVbKijN9/C73/0OgNtvv53Ro0czevRorrrqKt544w3SPRPp7tLxdepkX72AupdfpWDJrSMmgeBrO4yqGCSmjUOVFWfSOZo/f/4Zb/vOO+9QXl7OV7/61Z6ECUBSUhL3338/S5cu5bHHHjtp4qXT6WTMmDGMGTOG2bNnU1payiOPPMIPf/jD/joNaYQ61HKEv+9YQcSMMDathHkl8dHLKhzsxGJLQFUtKIpK/rgbBjVZIgyDprXraFj1Ftu9nUy847uMmpDHS5sqmDZ/KUk2lW9+9VomjT51E9oUj4MUT+/rV13XSXZBfqYbq2z6OiD69AoxT2g6I0lDVVLGBDqb95OeP4vUnBlDeozwkY5uytq6cVlUgoDDonKoratXtUlDV5CQYZ6Q3Pg4ceHQ1F6NLddVNtMZ0nu2OZ6s0A0Tt83CFyYV9Gz70PajNHaHThmXx27hX2aO6fl6c00bNd7AKbe1W9ReSZPWQJj6ruAptzUUembPH+nopiMYBtNEFaCYAjUiUBSTRl+ASq+fS64cg82ukeD+eFlFY1s3h6s7uWRaLlMmT+bi0aNJTO3k85Mm4bDZSLv4IrKvXoDF1bunTX5iDumuFNz2BG6asJB0lyxnHmyDXeUnhGBPsxfdELhtw/eiJMWZxKWFJ/cxATha18mq9yr4zw/X0FBZRWZGOl//+td7Hq+r7uDoHoV6Rx2aqjJhSjbpY0sZ+4PvjYiEid9bTap1H00VtQihI0yTlOypsQ5LGgHWrVsHwIIFC056bOHChQCsX7/+M/cjhCAUOvV7uSSdqQPN5Tyx80UiZoTxGWO4Y+rn4+KmUldHBbVlK0lMKyWnZB7AoCZM/FXVVD/3PHt37+WvH+1ib2sbc9oT+O493+aqWYU47KO5bFou2jCtZh3KYv/qlaRBFuxuJtjdTHLmRCC6hnHMjDvRLPHRwftcCSF4u6KZQCRCtxBEFAv+QBgUpVe1ybP7a/CFTr1ULt1l65U0OdDqo8UfPuW2gUjvD6qq2vsDkUVTsKgKVlXFbev9p2ZUopMEq4ZFVbGqx7bT1J7tTzR3VDqzc00sqoJFVXttazl2zOMVNkI3sfqNaJWJEKAAKARCBmsrm/natKKeD26hcITf/vEJnnziMa689fsUZLkpyPLw/asW4jt4CE/paHJvuhFnTnSEcFeomw2VW1kweg4WzYKmatx1we24rHKiUqwMdpVftS9AR1DHpqmMSx1eS9621+/BZXUyLn30abcRQvD6u0cJdHdSsXM1CXaFb3/727iOJRSFEGxZuZeuVkHI3sWoomTyC6N9oUbCvxEhBC01W3BoHQS7AzgSskjKGB/rsKQRoqws2o+ttLT0pMeys7Nxu9092xz3ox/9iBtuuIH8/Hza2tr485//TE1NDYsWLTrlMUKhUK+EitcbXe6q6zq6rp/yOadyfNuzec5Ai8eYzkY8xb+/uYyn97yCKUwmZpSyeOLnEKZANwcuts86fyEE7Q0f0lq7FYBAVzOhoH/QKgHNUJjG1Ws4vOYtntqxk7ePVBNWLNjtDhwuB4nJdqaMjlavmqaBaZ7dzaB4+v3Hwrme/9lsL5Mm0rB0/G6f3zuFpLQSAEwzQkvNVlrrPkRRFJyebOzOaHXAUE+YhCIGb5Q3sr2xAwQcz18oioKiKJS1dfdUm6Q4rFiPJSeOJyKsWvR/k+293zxm5aYSjBhYtY+TG8cTF45PZMG/MDEfBQWrpqAdO+7pXFl05suf8hM/fWqRYZi8u6eOndVt6F0B6nZuo3bfh4S6fDg8HvInnU/u1FkcaNY40tFNSXICr739Pj//919Tc/QAmmnQcWgdLsc8FEUh7/M3EGxo7BkhbAqTbbU7WVm2joAexGFxcGXJxQAk2IbORCWp73Y3RT8gjE/39ExiGg6au1t5cd9KdEPn6+d/4bQ9ecqqOyiv6WD35pdxWEwmTpzEdddd1/N4TUU7NUfbQSiEwwba4Z20f+Qg9YLzB+lMYqu7s5LujgoATCOMMzEXVbPFNihpxOjs7AQ4bQI5MTGxZ5vj6urqWLJkCU1NTaSmpjJz5kw2btzIhAkTTrmPBx54gF/84hcnfX/16tU9ydOzsWbNmrN+zkCLx5jORqzjjwiD9fpHhIVOtppGYsDC6upVg3b8U52/gkGipQq7Fn39B4w0fE12dh8dnJ+VEghgW7eRNXv28NrhSrqFiqFYKCk+n0suv46LZiax9+BW9h7s+7Fi/fuPtbM9f7/ff8bb9lvSpLa2ltraWoLBU5fRA8yZM6e/DidJp/Xx3b5OWmq2kJhaTMBXS/2RtwkHOwBwp4xG04bP5It/HKxjS20bphDYjlV2hLtDJCa4UIDOUKSn2uTLUwo/c3/HzchOPuNtXdbY5GAjusGb++uoLN/O+48tx9dSf2xefbSryaGNq/Bk5HDhV7/Di5rJlkf/zuZ1K8E0SERn8bhS7lxwPunJ0Z4rjsxMHJnRpE69r4l/7F9JVUctALmeLEplk9cRKWKa7G+JJk2mZJy+OepQEzEiPL0rOl54TGoRJamnnoAnhOC1TUcAOH/aBHZ0HeYHP/gB6rHKMCEEW97cjR4xEWggBJ1ha3Q81QgghKC5ajOmGQYEAtCDnT3LByUpHj3++ONntf19993HPffc0/O11+uloKCABQsWkJh45n8XdV1nzZo1XHXVVXHTfyEeYzob8RT/Bb5ZbKvbyefGzj/l9LWBcLrzDwc7qD/8BuEgKEoKGaPmkJQxaVBiOk4IwT1/f4LXyo6iKzYyskq4+LJbuPWO+Vxyfn6/vEfE0+8/Fs71/I9Xy52JPn/Kefnll7n33ns5dOjQp243nKfnSPHl+N0+U2h0dxylat8K/L7oh16LNYHs4ivwpJ6+BH0o8OsGqgIOS3SCVV6iE73aJMlmxWO3gAAdUIlWfLgsWq9qk6EqFNQpP9hMU4OPyxdGm1TWBEKUVe5iw59/je7vxpmYfKzcUgAKpqHjbapjzf+9n00paai6iU0PcWlOFndecD4ZbjcWUyBME+XYB8BQJMzbRzaxsfJ9hBDYLTauGj2HiwvO7/mQKI0sZW3dBCMmHruFwqThU2G0unwDdb5GXFbnaccLA+wpb6W60YfNqvGTf/sXFPObuN0f/y2pqWinqqINIaJ5Es0I06Em05VSyEjo9tPdWUlXx1GiyVoFVbUR8NXS3VmJO7koxtFJI8HxCpNPVpMc5/V6SUlJ6dMx7HY7dvvJN5ysVus5fVA71+cNpHiM6WzEKn5/OIDLFr35VJiaT2Fq/uAe/1iFuR6YgssVrTAXpkHl4dfQQ16sNjf5Y6/D6ckZ8FiEEHTu3EXC2FKsLhcrt1QgzluMraqDi6dew/U3X8/nPzcRp73/f09D/fXbV2d7/mezbZ+SJm+++SaLFi3CNE2SkpIoKSk5q0yzJPW343f7hIhgYiES7qK9cRc2ZyopWVPIHHUpmmXoVph0hyO8V9fGB/XtzMpJ5YqiDIQQHGz14bRoeGwWFJRj9zk/ZtNU/BF9UCfp9BdhCupqOji4p5GKw60YRrQB9ZQZeaSmJ7DmcC2bHv0jur8bV2o6Cr3PTbVYcaWm01lXTai7izETpvKjsaOZnpeHMy+HvJtuxD26pNdzXj6wio/q9gAwOWscnxs3/1NHr0rD3+7m6AeRyRmJvcZcD2WHWyvYUBFd333LpGtJdHhOuZ1pCl7fHB2rPee8fDwuG/DxspPjVSYR3cRQNBQEVjNIRHXy3so9FHxzzpD6m3O2Pq4yiWBGoj2gLNYETDNEc9VmEpIKh/X5S/HheC+TsrIyzj+/95K4hoYGurq6mDXr1A2eJakvPqjdyasH3+bOGYspTB7cZAmcusJcURQUVSO76HJa6z4kr/QaLLaEAY8l3NbOR3/9G395/gXMzCLuuueXJKQ6saWP4ge//DM3zR1DZurwufEykvQpafKrX/0K0zT5+c9/zr333ovNJtfuSrF1/G6fanEAQVSrEzMSInPUJaTnzYx1eOfMG9J5r7aNjxo7iBjRhEi1z48Q4oSJOdppL8yHYrVJty/Ewb0NHNrbhM/78bK/1PQExk3Kwu2xYwjBu2+vxddYhyMxGUVRON7/1TRNhBComgVFUXCnZ9Hd3sKkK67lPLcg/5qFpM6e1VNdcqIriy+hprOBa8deyfiMoV2VJPWPBcVZ5LmdjE+P/387Z6I77OfZPa8CMDv/PCZmjj3ttgKYPT6V++//v3z+gu8Cxb0eP15lYjFDaEIholiwaCqYYaoq2qipaKegePjWmxx/39EsDhCCSDiEZrGjmCpdHUf/P3v3HSfHVSV8/1ehc/fknJPyKGdZ0bItS86ZBWMw2CQBS9yFXfZ9YNldYBcwPDxiF0xeorGNwdmWrGDlnNNocs49nbsr3PePlkaSLVmSZ2SNpPryMba6u6putWYqnDr3HCvbxPKeWLRoEd/61rd47bXXeN/73nfWe6+++urgZyyW4bStZQ9/OfwKAIe6jl+RoMlbM8z7OvaSmT8VAG96OZ60ssseuBamSdPqN/jp977LX/YfImyqSK29VL95kEc+sZRP3T+ZUcVDy/SyXFlDCprs3buXKVOmvK3nu8VyJQghaDvxGoaRwKa6gRg2mw8dCPbWkFkw46p72uePaWxu6WVf1wCGmQyWFPicLCjOoio9GTFf29hN3DBwKja0k1kYQggMQDPMwX2WJYm4oV812SbdnUF2bWkCwGZXqBqbw5gJuWTlegfHLoTAv3ELsmbgNlWMqIHQdeLxEOFoEFWxkZqaDbKCQ7aRQCG8ex/jX/wNNk/y+zNMg01NOwnEQ9w+Jtl+LsuTwefmffQ9m4trGfnSnDZuKM680sMYNvs7jxCMh8jxZHHbyZ/781FkiaO7XkPz1/Hd//o28+Y+i6omLx9OZ5kY2E0NSYAiaUg2G6owiWkGW14+QNE1mG0iTIO+jn34uw9hmjo2mwdJUonEAUlCVuxoetTKNrG8J5YuXUpFRQW///3v+exnP8uUKVOA5HSd//iP/8But/PII49c2UFarimbm3bxt6OvAXBDyUyWj1ryno/h7AxzBT0RouXY3/CmleNwpQGXv3tbuLWN//361/nla6vpjOroko38gjE8cPeHufuheWTneMmxjv9XvSEFTRRFYexYq52e5cozTZ3mo38j1F+HJCuDr0uyjKK6rtqnfZtbetnd4QegJNXF/OIsylPdgycA3TTpi2o4FIXYyYBJksCQZOKDBVGTHIpCf0zDEAJ1BB3Ae7tDHD/UidfnYOL05FOKkvIMyqoyKavKpLwqC9WmvG050xAE+/uRgEQsSiQaIpEIJ9sNA0IyQTdAkUDIyJJEVNORHcl5t43+Fv5y+BU6Qt1ISEwrqKbAlwtgBUyuAqtWrWLVqlUYxqW15rPA3OLpuG0usj2Z2C/QcrGtrW2wYOTnP//5wYAJJLNMGur7UU0NWVbANBEn23dJkoxqXJvZJpFAKx31a4kE29ATQVS7L3lclhVOHXMlSbqqzz+WK+9nP/sZGzduBODAgQODr61btw6A+fPn89hjjwGgqio/+9nPWLZsGQsXLuR973sfPp+PZ555hsbGRr773e9SVlZ2JXbDcg3a2LidF46tAWBh2WyWj1pyRQLDp+u/6z4DAAEAAElEQVRJKahSBIGEoccI++txuKZe9u0feOVVvvy5L1DTN0BCsuFNyWfmrHuYvWgRH/i7KaSluC77GCzvjSEFTSZNmkRLS8twjcVieVdi4W5aa14m2HcCgUBRzm4ffDU97euNxgGJTFdyqtu8ogz6YwnmF2eds/ikKst8Ylo5Ye3sm0Zd19iwfgMLpy1EVc++IfLYFNQRUMw0HtOpO97NsYMddHeGAPD4HFRPLUSSJWRF5uY7xr/jOhRVJrvARXRHmEgsgiJMJE51EHLgtdnBCJMQNgxvKk6XytSZVcRFnFcOr2N7y14A3DYXK0YvId978a2QLVfeypUrWblyJYFA4LxtNoeqN5rg9fpOJuWkMj7r2qprMznvnX+/NN3gf549wI5XnySRSDBz5kwWL148+L4Qgg0v7CNuyMiKE4U4smLCGQFcVRbXVLaJrkXoatzIQM+RZEafHgU4b2vhq+n8Yxl5Nm7cyK9//euzXtu0aRObNm0a/POpoAnAkiVL2LhxI//n//wf/vSnP6FpGhMnTuQ73/kODz300LCNywpYX9/W1W/hlZp1ACwpn8ctVVfm2C6EoLNhA9oZD8tkxY4sKwx0HyE9b8plHZcQgkh6KR3BEMKewowpy5k2+xbuuX8K1eNyL9t2LVfGkIImn/vc53jwwQfZuXMnM2bMGK4xWSwXRQiT/o59dDVtRNeimKaGzZ6CzeEdPHjC1fG0ryscZ2NLD4d7gozJ8PLAuGS2RZrTzgeqz90C9JQUh42Ut1Tg1jQFLzp5HueIq6Ld0TrAkQMd1B/vGSzqKssSJRUZjK3Ou+Dy/mCcbQfbmDo2l5x0NxOrx/OXvzyDjIRPVfHabDgUFSQQUrIkrsNM4I+EkGSF8umVfG/TTwknkr3ZZxROYvmoJXjsVmEuy9sd6BrgRF8YIbjqgya6ofNyzTqWlM/F67hwQbyN+9rYsnUb69a8QWG2jy9+8YtnXYA21vbS0BgEYSJLAvlkx6ozXSvZJkKY+DsP0tW8GdOIA+BJKWKgJ4Rid71jPamRfv6xjFy/+tWv+NWvfnVJy8yaNYuXX3758gzopPciYG0ZmUxh0uBPPjC/qXI+SyvmX7FgcHvdGga6DwPJc40pVBzOdExTu2zHXH9XF3/+n5/ygS9+ma6BGH/c3MHU+75Edk45i2eNYtnSUSjKlX8waRl+Qwqa3HffffzLv/wLy5Yt45vf/Ca33347JSXvfINnsQwHQ0/QWvMi4YGmwQCJojrPWxl7pD7t6wjFeLO5h2O9obNeN0yBIo+MMQ63miNdnDjSBUB6ppsx1XlUjc3G5T5/IWlNN9l1uIX//cNzrF3zEsVVU1n5yce4c0ElH7v7Hn76vSeIJOJkOhwgy5jJ2TgASEJCGAaJYD+pxeV054bREjo5nizuHreMigzrmGU5NyEEB7sDAEzMuboDJgCvnFjPpqYd1PTV87m571yzJxbXeW1rPbvW/olUj50HHrifqqqqwfeFELz81G4ME2QEdqFhnFyfiQBOr1uWTeKaeVVnm/S27aK7eTMATnc2uWVL6GxYi2lqKKoT09AATp6PTExDG9xPSZLRjcSIO/9YLBbLpZIlmYcn3cOh7uMXzFa8nIQQDHQdRCCQFQc2RwrxYPiy1ZMyDIPf/uCH/PeqVfSGExxpUln55Y8yqSqbrBm3cfPsEpz2Id1WW0a4Idc0OeUzn/kMn/nMZ877WUmS0HV9KJuzWAbJigoIJFkhLWscPS1bUdSr52lfWzDKhuYeTvSFTw4QxmX6uKEokzyv850XvkoYhklTXR/HDnUwbU4pOXnJdqansknGVOeRfUZR13Np7Q7x19e28cwzz3J8/2a0eBhV1+iP9VGS+3kA+lMKeWz2Un6+9XW6E3F8Dheny58IEoZBMB7Dq9qYP/MOZubMx5ciWFA2G1V+e50Ui+WU5mAUf0zDrsiMyTh3O96rxfGeOjY2bgfg1qrFF6zZs35PCzWHdhLytzO6PJ+Pf/zjZ71fc6ST/t4okgDV1DEkabDdtylJCHHm77WMInR6mzsxdPOc9YlGuvTciQx0HyY9bwrpuRMRwiQRH0BR7IOZJwBCgIyBaSQ489CmKHYS8QGEMJAk68LaYrFcPYQQHOk+wbjsKiRJQlXUKxIwMbQohpHA7kwlPNCIpoWx2bzYHD7EGZ8b7mv+N1ev5tv/9E8cb2ghjp3M9EKcTh/1NT18+PbxViD8OjGkM/eZUyCG87MWy7mcepInKzYkSSa/8hYMLU577avJjjmK46p52tcciHKiL4wkwYTsFG4oyiTb7bhi4xlO/b1hjh7s5MSRLmLR5N+Hx+sYDJpk5/nIzrvwDehrr73OP/7rj2hrPIZi6jiMBCUuBzeNHcMtkyYxqTwdIQQvbqqjJL+Sz08L8OShA3RFAhhxEwkpWeNGksnxevnY+MmkqTrHD6p88QNXXycly3vvQFcyy2RsphfbVZxuG0qE+fOhFwCYUzyN8Tmj3vHz4ajG2l0tFFVN5f/7+r9RkuM+KwV/oD/KptUncGoBskLNFGotKK5ksTshIBaL4XQ6eeuvmNeVgowJjOygiRCCgZ4jhP2NFFTdevIC3EnF5A8inQw2SZJMxaSHB2uanKJpGhs2bGDhnIVvmxqpqG5k2QqYWCyWq4cQghePr2Fj4w4WlM66YLe1yzMGk4HuI3Q1bcTuTKdk/H3Jjjmmjs2RCpKUPPmcYTiyTerq6vj2V77Khg1vEjNVbI505k1exsy5y7njnkmMGptjXUteR4Z09jZN88IfsliGQSzcRWvNK7hTCsmvSB6wbXYviuoc8U/7hBA0DCTrZ5SnJacPTctLoz+WYGZBxmDR16uZYZgcP9TJsUOddHcEB193e+yMHp/L6AnvXBDLNAXHGvs5VN/LvYurkGWJta+/Rrj5EGlajOn5+dwyZjST8gtxZmaQMXM6wjQxDIHRWE9BtJOc3GL+Nb+IvT11HOroIpxI4LHbmJCXw+TcQtyaCzXSSXdTPboxHZtqnegs56ebJkd6Tk3NuXrn7AshePrQSwTjYXI8WawYfeMFl1mzs4l4Qqcwx8fj71+M/Japgls31BHt85OhRJmcFaLi8S+guJIZcrqus2H9BqYvWnhWlx0A1etDHmE1lt4qFumho34t0WAbACmZo/FlVAIMBkxOsTl82BxnB4AVTUMXbpyenBFXT8pisVguhRCCvx19nS3NuwDIcKe952OIR3poP+OYbBpxgr01hPz1lzXDXAjB5x9+hIM1deiKi4ljb2DStNsYO62K++6biMdz9V+7Wy6N9cjDMqIJYdLXvofu5s0IYWIaCQwtimJLPtWUZXXEPu0TQlDrD7OxuZeWQJRsj53Hp5QjSxI2RebWygsXPb1ayJLEnu3NhINxJFmitCKDMRPyKCpLf9sN15m6+6Ns2d/CM397hb3b1jBt0QNMGZ1NVVEaN5eWoY6qYklVFVmpqaROmkjGrJl4KyuQTnb/kYVghdpMyAaGx4aUCDK7II/ZBXlwRlFKp6KS6vNgBgLcprSgKlbAxPLOavrCxHQTn0M9Z+eqq8XWlt0c7T6BIiu8b9KdF2wvHIpqvLL+ACg2VsybeM7f35nj3Pg3NFImtVH2yAfwjTpd60TTNIy0VFyFhVdV0MA0EnS3bKWvfS/JqZ8q2UWzr/hUTovFYrkSTGHy3JFX2d6yFwmJe8bfyqyiKe/d9o0E3S3b6Gvfw5nH5PTcKTQeeuqyZJhrmoYQArvdzvo9rRRPXoE28Bzj532UojFjuOeByZSWpl/mPbeMVFbQZARoGIiwQ8lk/ECEUVlX7xPN4abFQ7TVvkokkKzS7UuvJL9i6WDA5JSR9rRPCMHxvhAbW3ppD8YAUGWJslQPhimQr/Ib9nAoTs3hLprr+7jt/onIiowkS0ydVYyWMBg1Pucdi7rGNYN9Nd2s3nSQ1a++SO3BjSSC/TiMBKI0jzTvvQDMe+B+ik3ImDmDtCmTBtP/AQKxIHX9zYzPqIQBPza3k1gsjGzoyJwMlQgGb/iEkcCUItjcTsRAP0LXka6iGzrLe8+uSBSluChOcSFfpem3pmmyrXkPAMtHLabAd+EWiB6nSqDmBfYfPMwHF38PKuaevU5No+vZpxkjukmfPpXU6gmXZezvFSEEwb4aOhs2oGvJGlO+jEpySxdic1z9xX8tFovlUpnC5NnDL7OzdT8SEvdX38b0gonv2fYTMT+Nh59BTySbJPjSK8ktSx6TTVMf9gxzIQQbNmzg+9/+Noumz+b+j3yWvoEottHzuHX2Im6YXMa0yQVI12iDBsvFGVLQpKmp6ZI+b3XWeTshBGubeuiVnKxt6qEqM8WaHwcEek/QUbcaw4gjySp5ZYtIzZ4w4r+ben+Y1+u76AonD+Q2RWJ6XjpzCjPwXsVVtQ3DpKm+j+MHO2lu6BucOtrU0E9ZZSYA4yblX3A9pmnyl+df4z9/+Ava6/ejanGcZoJcu40bx4zmnlkTyEpLBkecubmM+uxKAAZiAeraa6nra6K+v5meSB8An5z1Qcb8wxfRgiF+uuO31Pub8Tm82GUb4VAYn883uPyorEoennwPNt/InyJguTirVq1i1apVGIYx7OuuTPdSme7FvIrrccmyzCdnPcL2lr3MK5lxUcu8+eabHD20F4/dRnFx8eDr/r4InW0BygqdyA4HNp+XwrvvvFxDf+8Ik+6WbehaGJsjlbzyxVZ2icVyAZfz2Gu58gYDJpLEQ9V3MCX/vQ2O2xwpqDYXkiSTW7YIX3rF4HvDnWF+/Phxvv/d77Jx9RvEwjEaarpJyVjEXY/MpDjXx/Sxue+YMW25fgzpLq6srOyib2Kt7jnnVucPU+uPoAqTWn+EOn+YynTvlR7WFWXocTrq12AYcZyeHAqrbsXuujrS4QxT0BWOY1dkZuanM6swHY/t6g2WhIJxDu5ppebw6aKuAHkFKYyuzqOwJO0dlx8Ixdl5pBOApTNL0DSNn3z3mwTqm0k1ElTn5XPTqNHMKC0la9JEMubMPmv5o90n+Nux1fRF+s96XZIk8n25aIaOPTONBr2PGlsQe142huogIgQBRUNKdZ489sjsMzpY4kowOs3K5rpWrFy5kpUrVxIIBM4qVDqcrtYsk1Mcqp0FZbMu6rPBcJTvf//7ADz88MMUFRUBoGsGq184Qn9vhPiCciZ+diXx3l5Uz7lbvI90yfRtGUlWkGSFvPIlRAItZBbMsAq1WiwX4b049lqunMqMUva0H+Kh6juYlDfusm9PmAb9nQdIy61GllUkSaZw1G2oNjfyOaaUDkeGeV9fHz/+8Y955o9/ItTdhy45mTruJhbMXk5mlhebLDNz/LUzjd4ydEO6OigpKTln0MQ0Tdrb2weDJKWlpUPZzIj3biPuQgjWNnajmQI7JpqZ/HNFmmfEZ1RcTorqIL/iJqKhDrKL5iCN0Lawhik40D2AYQqm5yeDOpXpHm6tzGVCVgquq7Ct5lvFYxoHdrUC4HLbGT0+h9ETcknLOH+NB90wOVTXy9YDbax7czNNx3ezaMUjLJxahN1u5+bCYvrsTm6sGkXluLFkzJwB4ytpjPewo7+Gid0qY7OThRddNid9kX4kSaLQl0dFRgnl6cWUpRXjsiULTwoheKVmPZqh4bOf+ybOodgJJyK8UrOeUZnl1/Xvl+XCDvcEKEv14L5Kf4eP99TRE+ljbvH0i/5Z7+gN89HP/QfHj9ZSUVrIo48+OvjeprW19PdGcLntVI3LQVIUnDk5l2v4l1Wwv47OhvWk504ksyCZfeNJKcKTUnSFR2axWCwjw9T8airSS0h1Xv4piuGBZjrq15KI9WPoUbKLk1NC7c7LF4xbu3Yt/9/XvkZXUyvxmEFF6XTmTV5GSn4pN903lWprKo7lHIYUNGloaDjve7qu88orr/CZz3yGJUuW8Itf/GIomxrR3m3Evc4fpqYvjFuViQEuVaamL3zdZZsIYdLbtguHKx1fRrKgoC+jcrBjwUijmyb7OgfY3NrLQEzHqcpUZ6fgUBUkSWJG/tWRFXMmIQSdbQGOHexEUWXmL03+PWRme6meVkhBUSrFZenI79B2taM3zNaD7by5s4aDuzZQu389sd42nEKn/IHlQDJD5GOf/xz9TQ30j85hnz1CXd9+/Ls3Dq5HlU8HTQpT8vnw1AcpSy/CqZ67JXNNbz1He07gsbvfsYq6x+7maM8JanrrGZ1Vcc7PWSy90QTPHm1DVSQ+P7MKh3p1BU5C8TBPHXyBUCKMLMnMKZ52Ucs99coe9m1+AVU2+exnP4PbnQyMHj/cyfFDneiBASblSTjtV+eFpBYP0NGwnlB/HQD+rsNk5E97W0cci8Viud7ohs5LNW+wuGwuKc5kBsflDpjoWoTOxg0Eeo4BoNhc2F0Zl3Wbp9idaXSdqCclvZwFC24jp2AMVdPLuf3uiThd1vRty7ldtjxUVVW5/fbbKS4uZtasWcyZM4ePfexjl2tzV51TWSa6aeJWFYKShFeWiOrGdZVtosUDtJ14jUiwFUVx4PIVor6l0OtIoRkmezv9bG7tIxhPZlF57ApzCzNGXAp/W7Of+kMSbRP9lFZkn/dzkXCCmiNdHDvYwUB/cn6oosrMml+G3ZE8PMxddOEAgxCCPz63hj/88Snajm5H1aLYTY0cm42FFZVMz1CwqcmbE/u0an4eWgc9DYPLy5JMUUo+FRkljM063YlDlZXBAMr5tvtKzXriehyn6iBxRhV1QxgkzqiiLksycT1uZZtY3tHB7gEASlPcV13ARAjBnw+9SCgRJtebfdGF+5o6Avz+Nz9DS8S4Yf5Mli9PBjn7e8NsXHMCM5GgMHAcc0cHoSmVpIwbezl3Y1gJ06C3fTc9rdsQpgFIZOZPI6tolhUwsVgs1z3d0Pnffc9yrKeWhv4WPj3nw8iX8dgohIm/8wBdzZsxjQQA6bmTyC6ei6I6L3l9iUSC1atXs2bNGg4cOMCGDRtYunQpN910E3Z7sinB4cOH2bVrFx/84Ac52tjH01sHeGj5I+S7c5HKq7nr/bPIL7SmmVne2WWfvDt58mRmzJjB//zP/1hBkzOczjJRSJiCOAo9UQ1VljjaG6S2P0xVxrWdbRLoOUZ7/RuYRgJZtpFTuvBdHTDfC7X9If5W0044kZyC5XOozCvMYEpuGrZ3yL64EoQQ7NzURMgvsXNTEyXlWW8LELQ2+Tm4p5Xm+tNFXVWbQsXoLMZU52Gzn/9m0TQFJ1r8bDvUwewJeYwuSWf/zp38/FtfJB4M4RUmFZmZLKgoo3psJZ0VPg6mD3DqeXeK00eOJwuXzUlFegkVGSWUpBbiUC+9571hGvRG+3CoDmL66SrqCIGGQVyPc2YZdYfqoC/aj2EaqIpVu8ByNiEEB7oCAEzMufo6p2xp3s2xnlpUWeXvJt6J7QLthU95cWMdsqLgdTv4l3/+CrIsoyUMVr9wFEMz8AXbKBYdpE+felUFTKKhTtpOvEoilqyJ5PYVkle+BIc78wqPzGKxWK68hKHxm71Pc6K3AZussmL0jZc1YALQ1bSJvvbdADg9OeSV34jLe+HObueyYcMGvvKVr9DY2Eg8oRGJaezes5c//vGPlJaW8o//+I/s2rWLl156CS0YRu93s+L+m8nN9FDy4MMsm11KRrrXmopjuSjvyV1DYWEhL7300nuxqavCmVkmXpuNmGEgIRBCkNAFUQRP7q3nnjEFTM5JuyZqY5zJ0BN0NqxloOcoAC5vLgVVt2J3pl3Zgb2DNKediGaQ6lSZV5jJ5NxUVHlkBUtOaWn009zQj6xAc0M/LY1+isvSEUIMBk+62gM01SU70OTmpzCmOpeK0dnvGCzpHYiy43An2w61c+L4UYL9nZjmnYwuSWdMeQUlNpXc0kJmVpRgn1hIZ1kaG7NcIEl4jD5MYQ6ejD8376PDcmJWFZXPz3uccCJy1uu6rrN+/XoWzV+Eqp59mPPa3VbAxHJOLcEo/piGXZEZk+G78AIjSEeom5eOvwHA8tGLyfNdXM2REy1+jjf7mbvsER5d9k9Uj01mdrU29TPQH0GJBqiKHMHu81J4z12XbfyXg6zY0OIDKDYXuSULSckaY2WYWSwWC5DQE/xq79PU9TViV2x8eOqDVGRc/i6n6bmTCPQcJbNwFum5E991xt+GDRt4/PHHCQQCZGdn4++L4JR0ZLtKSpqTQ4cO8b73vY/cnDykuMG4sul01A6wdUMdn3/fVBxXcUdLy5Vx2X9ihBDs37//oioZXy/OzDKRJAmnouARBi6Xm6huENEMBuI6L9Z0sK6xh5XTK/A5ro3vz9AT1B/4HVo8AEhkFc4kq3DWiCr2GtUMdrT3E9Z0llcmK2dnuuw8XF1Ckc+FMoIj0kIIdmxqwNBNVBvousmaF4+QnuGmelohlWOSU3VGj88lEdcZPSGP9MzzF3U1TcHuY11sP9TB4dp2Go9so373aoJd9XicKnP/KVks0pmVyaOfeogmR5SWQh+Sw0FxagGzMkooTy+hJLXgrCDJcD7JSHOmkPaWubeappEqeynw5VrHHstF238yy2RspnfEZZC9E83Q+MP+v6KbOqOzKphXfHHthYUQvLSpHoA51fmDAROAsqosFs/Npu2ptdjRKXrgPlT3+Y8VI4EQJpFAK57UZKtkhyuDwlG34U4pGLFZjBaLxfJei+sJfrnnKRr6m3Godh6d+hBl6cNfDFsIQaD3OPFwNzml84FkgdfKqY8OqVNZIpHgK1/5CoFAgIKCAuIJA5HQEZJCJBSkf6ALXddIJBL0dvfy2F3/SEleBXmlmcxeUGYFTCzvymX9qenp6eFrX/saNTU13HLLLZdzU1eNt2aZnMkuyzgcCj67SW9UQzMFuR7HWQGTY71BCn0uvFfpL7yi2vGmlRLqb6CgahnulMIrPaRBEU1nW2s/O9r7SRgmkgSzCjLIdCWnjZSmjuwbBjiZZVLfh2qTiYdBmDqtjf2Eg3HsDnUwaOLxOZi98MK1SiQJ/vD8m2xb9xdaD25GiodRDAO3IjMpIwNF6wGSaZUV991PWtRPxckgiZXNYbma6KbJkd5TU3OurrnNDf4WusO9eO0eHpxw+0VnU9S39PDHX/2IibOXc/OsuWe9Z2oa+roXSBEh0qdPJbV6wuUY+rCJBNvoqF9LPNJLWfVDg+nevgyr6LPFYrGc6bkjr9LQ34xTdfCRaQ9Rkjb81+LxaD+d9WsJB5oB8GZU4PYVAAy5tfvq1atpbGwkOzsbSZII9IcQkoIQJqFIAFOYKLJMqjcTw9AIayEW3jOdafNHWVNxLO/akH5qKyrOfzESDAbp6+tDCIHdbucb3/jGUDZ1zXhrlsm5KJJMql0lYQrmFJ6uJB3RDJ491oYQglEZXqbmpVGR5hlxRUjfKhEbQJIUbI5kjZackgVkF88bMU/+Qgmdra197OroRzOSBT5yPA7mF2WS7rx6shRMw+T15w8TCsYRwsQQJookJbOZXDYW3Fz1jssHIwl2Helkb003n7xvMg6bwje/+VWe+Z9foCYMVAmyPR7mVZZSNXsc3tmT8OafngIwraD6cu+ixXLZtIdiJHQTn0O9KgKkZxqVWc4nZ32QuJ7A6zh32+1zeflvTxHp2Efd1h5SvfeTiOtseL2GmTeUoQ50ofn92HxeCu++8zKOfmh0LUJX0yYGug8DoCgO9ET4Co/KYrFYRq5bRy2iO9zL3eOWUZSaP6zrNk2d3tbt9LbtQggTSVLIKpyF0zN8berXrVuHYRjY7Xb8A0HMhA6SgoyEx50Kpo7T6UOWJIJRP57iBNMXjh627VuuT5et5TCA3W5n4cKF/Nu//RuzZs0ayqauCaeyTOKGgVOxoRnm4OsGye4spzt9SMQNnU0tvYzN9CFJEmFNJ9/rpCUQ5VhviGO9IVIcKlNy05icm0rqCJzCM9B9lI6GtTg9OZSMuwdJkpEVG3B5x9owEGGHksn4gQijss7/1LjeH+ZPh1vQzWSwJM/rYEFxFqMyvCM+GPVWrc0DdLYFQICJia5oSHaTTHcqoWCc/t4oXt/ZgSrDFByu62btoeMc7qilp78GV7iYfZMKmDU+j+klJfzBEEwqKmDejIlMWn4zpXMXUJJXjjqCplRZLENVnOLm72dV0R9NXHW/+wDFqQWX9PnW1lZ++9vfoioSX/vqFwF4c3UN9TU9+Psi3PfwNEZ/6QtoA35Uz8UHYt4rQpj4uw7R3bQJw0gWgU7LHk92yfwR24HNYrlWrFq1ilWrVmEYxpUeiuUimaaJfLIWX6ozhZWzPzTsNZ5C/gY66teenIIPnrRS8sqWYHcOb/Zmf38/uq7T2NjIwECQVE86DnvyuO+yu0ECCZP0nBT0nijhcHBYt2+5Pg0paFJfX3/e9+x2O9nZ2W8rwng9M4SgL6rhUBRiJwMmSQJDkombJnBGpw9FoT+mYQiBKklkux18eFIpXeE4ezv97O8eIBDX2dDUw4bmHu4dXcD47JHR8cHQ43TUryXQm+y/LkwD00i8J9klQgjWNvXQKzlZ29RDVWbKWScGU4jBm6ICnwtVkcj1JIMllelXR6vnaCTB8UOd1Bzp4vYHJuFwquzY1ICiyNhcgj4tjCkEhpAwZC+6nqx1UlSahhCCY53NvLp/P0c66ggYXXQfqqHvzUP0NXZSPXkK1f/8EQAW3v0Qv5bsTLhpOZ7CkTOVymKB4b9w99hUPLar45xlCpMXjq1hVuHkiy76eophCr75H/+JpmnMnj2bhQsXcmR/O3XHe5BkiQU3J1OYHZkZODIzLrzCK6D52N8I+xsBcLizyCtfMpj6bbFYLq+VK1eycuVKAoEAqalX13TG61EoHuYXu//EwrI5TMkfDzDs17qmodFW+xqGFkW1e8ktXYQvo3JYtyOEYOvWrWzesgW/348sq0giOb3WjozEqXsr6eQDRAkhBGlpacM2Bsv1a0hXh6WlpcM1juuCKst8Ylo5Ye3sC3xd19iwfgMLpy1EVc/OwPDYlLd1acnxOLilIpcby7I52htiT4eflmD0rJTy9lAMuyIP1uN4L0UCrbSdeBUtEQQksopmk1U4811XyL5Udf4wtf4IqjCp9Ueo84epTPfSF02wqaWXnkicD08qRZIkHIrM41PKSbGrIz5YIkxBS5OfowfaaazrQ5zMjjlxpIvUDDfN9X04nBI7jmygrnYveiiM6vVQUTmF6WMX0lzfR0ujHy0lwM/3/pYTR9vwv3mI9t01GOEYMhJuWSFbM3Ce7KLjS8tk1oceu5K7bbGc13BduOumeZlz34bf5qZdbG7ayZ72g/zjgk/hVB0XvexvnnqFv/ztFXweB1/84hfp6QqxZX0dAGMz47iDnZA/MgLw5+NLryAaaCO7eC7peZPfs/OLxWKxXE2C8RBP7voDXaEeXq55gwk5oy66Hf2FCJF82CtJErJiI7d0EbFwJ9lFc5CV4bv/EEKwfv2bfP//ruLAvv0E+/tAgEN14vGko5zKfB68jBcISaGvux9FUVi8ePGwjcVy/bo6HqldQ1IcNlLeMo1G0xS86OR5nJfU6UOVZaqzU6jOTiGc0PGcURz29fpOmgaSgZSpeamMzfRd9ha5wjToad1OT+sOQGBzpCSLvb6HT/9OTYHSTIEdE80UvFLbSYFvgMM9QUQyzkBrMEpRSjLINBKnNZ0pHtM4tLedY4c6CAXig6/n5PsYW51H+agsXnj6AMdq9/LyG7+gp68dTANJSAhJcODQZtZveJplCz9K8aYMbntwPA3/s5qDWw6iCJBlmWxvKrdMm8Z9Dz/M6JuWDqZwWizXgz8caUNVZG6tyCXHMzJqLb2T9mAXL9esBeDmyoWXFDCJxTW+973vArB02e0UFpTwl9/twTQEBdk2vHteoHa3YMwXP4ercGRkbgghCPQcRbG58KaVAZCWU40vvRLVPvKmDlksFstIMBAL8OTOP9AT6SPV6ePx6e8ftoDJqeLbmQUzSM0aA0Bq1pjB/x4ukZjG3zbU8u2vfoP+5mMoioPZ45aw/8R2/KF+ZElGQgACBMnAiQCBIBQaoHLsaG666aZhHZPl+vSeBE38fj//9V//xb//+7+/F5u7Lp0ZMNFNgV2RQYLGgQiNAxGctk4mZqcyNTeNHM+FL7AvtibImQSCQN8JQJCaNY7cssUo6nub6XK60K5MmGTL3APdATpCMRyqQlWGh/lFmYMBk6uBaQp2b2tCmAK7Q2XUuBzGTswjIyt5s9Dc0M+6tet45oUfEImFcLt82DldbySBTl9fB395+Yc4HSozbyhjSf5YjshHmVJczN2338GtjzyMt6xsxGfbWCzDqWEgwjYlC9kfwWVTcF0FU3M0Q+MPB/6KYRqMza5ibvG0S1r++z/+X7o7WnB7fPzbv3yJDa8fJxiI4fXZKWnehCEE6dOmjJiASTzSS0f9WiLBVmx2HxWTP4is2JAk2QqYWCwWy0m1fY2sT+xmTN8ExuZW4Y8O8NNdf6Av0k+aM4XHZ7yfTHf6kLdjaFG6mjfh7zoEQG/rdlIyRw/b9aMQgkA4zs5tm5gxeTLu1HQO1fcxcdZyHKpJ2dT3kZKaQVnxKJ569ecEw714XClndWzUTY1wNIjT7mbhzLsu6YG0xXI+l/UKMRAI8L3vfY8f/vCHBINBK2jyHlFlifeNL2YgrrGvc4C9nX4CcZ0dbf3saOtnRn4at1bmnXf5C9UEeetnITk3UpZVCqtuJRHtI2WYI83n2m7CMInoBhEt+Y9NlgbbOdtkmaikIhsGAkCCj0wuocA3soMlwYEYRw92EBiIsXTFWABcbjvTZhfjS3VSPioLVT0dEBFCsHl9DX999UkisTAeTxqKAHEy4K5pMSLxCIlEFNM0eOH1n7Fg0Xzu//RKltxzD2NvXIJsnUws16HTxzkHzoTOuCwfvquglftLx9fSFerBa/dw//gVl3ShGtcMQvZKJt9wF0vnjifVl0Ik1IQsS0zy9aAd7kh2y7nnrsu4BxfHNBL0tGynt303IJBkhbTcidY0HIvFYnkLIQSv1K6n0+zjldr1ZPsy+dmuP9AfHSDdlcrjM95PhittiNswGeg+QlfTRgw9BkBq9nhySuYPS8CkdyDKzsMdPP2nZ9j92h8JDXSzaOZyHvrY57hncRW+FeNIa7qBLikTLc1H4sdB7ln6cV7d9Dv8wW7MwbqQAlmWyUjJYdkNH6BYzedIfS/jK7KGPEbL9e1dXSHu2rWL559/ns7OTnJzc7nzzjuZNu30065YLMb3v/99vvvd7zIwMIAQgvHjxw/boC0XJ9VhY2FJFvOLM6nzh9nb6ed4b4hC3+nOAhHNwB9LkO91Dh70zlcT5K0MPUZH/Rs4PTlkFswAwOnJxunJvuSx6qYgqumDQZCoZhDWDTyqMljcVgjBL/Y1EtJ0IpqBcbKmxylpTpXmQAy3qmBTJGQELpuKXZFJGIKobp5r01ecYZg01vZy9EAHrU3+wddnzC0lNT35dzVtzrnrB5mGYOu2DfgHunC7fMjImKZOLB4hFg9jmMn6OZIkocoq/QNdbNu2gfs/8AVyq60WwZbr16njHEDcMMl2X/wUlyvlSHcNW5p3AfBg9e2X1F4YYOPeVqIJwYKb7+OLH56Jqsjc/uAkGnfXEPzT8wAUPXDfFe2WI4Qg2FdLZ+N69EQISNYvyS1bhM0xsuusWCwWy5VQ01vP8Z5aVBSO99TySs06+qMDZLjT+diM95PmHNqxMx7ppb1+DdFgOwAOV2ay+HbK0BoEhCIJ9hzvZteBFva99BT7t79Kb8CPIak47B6iCYWGmm4WLK3C7lCheC5ZQvD//uuv6IqHyoJRfOKef6Km+SB1rUeIxiO4HG4qCscxqrgaWbWjSwprnt7IuC/fZWVTW4bkkoMmX/rSl3jiiSfOeu2b3/wm//Iv/8LXv/51duzYwUMPPURjYyNCCEpKSvj617/OI488MmyDtlwaWZKoSvdSle4lnNBxqKef1O3r8rOmvptcj4MpeWlUZ/neVhNkbWM3FWlnd5WJBFpoPfEqeiJEsL+e1OzxqLZkFocQgpiezAKJajphzSB6RkZIqkNlZkHG4Gef2H6CiHbu7hclqa7BoIkkSQQSGuHE6c/aFAmXquBWFVpDMXTTxHsyc8IlDNJO1ivxx7Vz7seVFBiIcmRfO8cPdxGLaoOvF5akMXZiHt6U89/EReMaL+zdwYn6OO2BE6h2EM4Eke5uYppGMs8EVEnCY7PjcDmJ+eyIkIawt6Oo1tNay/VrsP37YCBV0DAQQQgxYo4P57Kr7SAA80tnMjqr4pKWbW3vYvX2BgCWzSlBVZLHAMk0SLzxPMJMTstJrZ4wrGO+VLFQB601LwJgc6SQW7YIX/ql7avFYrFcL4QQvFKzHs3UcWJHM3V6I/3cXLmQGYUTSR1iwATA0KNEg+3Iso2sotlk5E1BkpULL/gONu5r5dm1Jwhu+jN7d6ymKxLBlFSczlTmVi9k4bxbmLGwmnHTSpIBk5MSmoHe0o8p+1BMHVmxMaZsKmPKpr59I0Jgygp6Sz8JzcBxFWSTWkauS/rpefHFF/n+978PQEpKCqNGjSIQCFBXV8c3v/lNxowZwyc/+UkCgQAZGRl87Wtf41Of+hR2+3vfwcVybp63HDBimokqS3SG47xa28mLNe30xRI4ZYEkR1FkJzV9YWr7w3jtKuFEgvb2Q3T31BMXPjQlH1tqFR1tYRaXnp768sSOGszzJHaUpLoGgyaSJA22/5UkcNuUZBDElvwnx312Ucb7xxaiynLyfVXBdvLCv7Y/xE/3NOBWFSQp2WLs1K2PJEm4VYWavvB5s2auhM62IPt3tQLg9toZPT6XsdV5+FLPXYhSCEFTR5DX9h1ka+dm4lIvjm6FhrZOEqaGW0hIpgkIHIqCx27HZbchJxdGMw0ipkY4YvWrt1zfTtU+Qkom8zoUhdr+kXV8OJf3T7qLHa1lTM+feEnLCSH4h3/8CnuPNHHrfZ+gvyabbX0xZs4rxb9vP7HOris6LefMYJXLl09K5mjszjQyC2ciy9ZFrsVisZxPTW89R3tO4FAcaFICt83J0Z5abh21+F0HTIQQaPEB7M40ANwpReSWLcaXXoHN4bvk9RmGydHGfjJSnGSqOorTQWG2FyEEnf2t9EVCOJxpzJq6lLvuvJcZiyZQWp6BrLz9AZ9sGNhsdjTDhLcUtT3Xgw8VE5vNjmwYWP1PLENxST89Tz75JACf+cxn+M///E8cjuST8CNHjnDffffxoQ99CF3XWbJkCX/605/IyrLmj410S8qymVOYwf7uAXa393O0N0TcMEnoJnbspOhh4rKPdU3ddAbDxGIBhKkBhSiqC1X1IgVMElJkcJ3JIIWKZppnBUDcqoLbppLlPjuI9uFJJTgUBYcqDwZQzqf4HAVcTz01PjPL5K3sikxEv3LZJn09YY4e6CA90824SfkAVIzKoqm+j6ox2RSXZyDL5x/Tm3tbWb+/lvr4XhyxBhy76ujcfZya7n4mLl3Evt06rriK2+kgRXJiO7PuCQJZgCtmEDB1TCuGabmOne6wZSYvugCvTSFmmCMuG+2tZElmdtE5nqZdwNq1azl2eD8pNhvzqwpoqu+jtcnP6PE5pE+fhqQoKA7HFZmWE/I30N28meIxdw0Wdi2ounXE/h1YLBbLSJHMMllHRItimAaSgBTFR0SL8krNekZlll/ysTQRG6CzYR3hQAuVkz84OC0yI2/yJY+toT3ArqNd7D/Shru9gQl6G7sP76dg9m0sue8WvvrhWQSXf5tXnvkL1TPvYMrMisFp6edjczn4wOdvJtQXOOt1QzfYuWMHM2bORFHPzoLxZqZic438KbiWke2Sgia7du2irKyMJ5544qyWpOPGjeMHP/gBt956KykpKTz33HP4fJceibRcGS6bwuyCDDKddmr9YRCgGSYmMsKM4VBNavpC+IxuUqUETpsgK6OYNF8WbpuKy6aQ7jw7WPGZGZUo7xAEOFOac2h38ac75ijnPTlciWwTLWFQd7ybowc66OpIZnekprsYOzEPSZJQVHmw2OtbnRktj+sJ1h98jeChNzG3H2X7iUYCsQSqLKMqCn2JPhQBJAxUVUEMfgeneq8lWw8T11GARL59xE9DsFgulzM7bDkVmUBEHwzYjrRsNIC2YCe7Wg9w66hF76pVZCKR4Ac/+AEAd9/5AL0tyemNcxdXkJ6ZDFKkT50yXMO9aFo8QGfDBoL9tQD0tG4nr3wJgHVsslgslouwoXEbO1r3YQgTWZI4VenPY3dztOcENb31Fz2V0zR1+tp309OyHSEMJEkmEmwn9RJrSXX0htl1tIvdRzox2pvJ721gXE8zG9u7+N/awwSjISb0m+RWTuTO900mJ30MK//5K5e0jfSiHNKLcs56TdM09jUfpXBihdUtx3JZXFLQpLu7m9tuu+2sgMkpc+bMAWDBggVWwOQqJIRgXVM3QoBPimJICQwhJbMUTmabZHjTWJFST2HVMmyOd76puNiAyXCMe21jN3HDwKnYBp8cCyEwSAZ/Tl2Ay5JE3NAv+9Pkns4QRw60U3usG+1k/RVJliityGDsxPzTsYxz6A/G2H6og+2HOln5wGQyUpys/vmP2fmjn3C4sxsAVVHJys7mtttv5/6PfIT/2P47Gt/cQ78/RLrb+ZZVJ0+hJoJAIkFemhe5OB3d0LGp1knFcn05V1ZaQhhISNgV6Ypmo51yZtvIiqxS/njgb3SFepAkuH3MTZe8vp/+7JecqGuiuDCP3JTpaAmoHJNNrt6BHkpB9b632SXCNOhr30N36zaEqQMSGflTyS6a/Z6Ow2KxvHurVq1i1apVGMa569FZLq/+6AAvHX+DNXUb0U0dVVbxObwYUR1ZknEodsKJyEVnm4QHmumof4NEzA8kp+PklS/B4cq4pHEJIXjyuYP0+0PMPvIaRkJnfUc3G47vJRwLI8kyqRnZ3HDjfBbcXPVud99iuSIuKWiSSCRITU0953spKclIZHb2pXdOsVx5p56+OmQToSeS02SEgYSMMBM4VJPGqA19zC0XDJi8lwwh6ItqOJRkev1pAkOSiQ+2IEtyKAr9MQ1DCNTLdFO0f1cLtceSAY6UNCdjJ+YzenwOLve5M2p0w+RQbS9bD7VztKGXzIF2Buw2dh7J45bZpUydPp+6vu+gOJ3MnDaDhx59lJtXLMdut6PpJmOeHU31mKn8cPc2+iNxfA4nNkVJ9h2WJDTDIBiP47bZ+fiYqWgtY4ChFfCyWK5Gb81KO9UyHUZG7aO3to0s7S2kK9SDz+FlSfm8S15fV1cX/++/n6R/IMqi+TehJWRS011MG+2k8X9+ierxMPpLn8c2jA86IoFmMmyHiQQmkpp59hPO8EAzHQ3rSET7AHD5CsgrX4LTbU3ltViuJitXrmTlypUEAoHz3hdYLo+6viZ+sftPhBJhYnoct81FhjsdGYmB6ACQPJ9dTLaJEIL2utUMdB8GQLG5yC1ZSErWmAsGWiIxjX013Rys6+VDN1cRa2wkZdxYZk3IpbnTQ/3+fn67YROBRBxJlsnJL+CDH/wQj3/8A7g97zwFx2IZiayKOJaznr7azDCmON3rXAgTJGkw22RdUw+V6d4Rkz6tyjKfmFZO+C3dd3RdY8P6DSycthD1LRkVHpuCeo5sqUslhKCzPcjRA+1MnlE0mOo+blI+SDC2Oo/8otTzflfhqMbr25vYebgdubeLnJ46Mo5vYWfjUdrsgm98IdlaNH/qVL79/1Yxfto0ysrKzt5/ReIOpYtQQT4pqUtZtW0L7cEgZsIcnIIjSzJFqamsnD2XSR4fPqULVRkZf38Wy3vlaqh9dGbbyINdx2jwN+NUHTxYfTse+9vrOV3Id/7rCfr9QXJzyhlfPh1FkblxWRUdv/kZwhR4R1UNa8BECEFPyxacygA9LVtIyTj7CWeov45EtA9FdZFbuoCUrLEj5lxisVgsV4OS1AI8djeBeBCX6iTLnfG2hwDARWWbSJKEajs5TTN3EtnFc1HUczcjANB0g0N1vew62sWRul58gU7yexvYta4XTXKhL7yHBTePwz3Hzk+bJiLt2kaxL4/HP/ZRPvDwA9a0GctV7ZKDJidOnOA3v/nNu3rfajs8Mh1tb+Fody+KGcc0oydfPXnwlaSzsk1G4pz/FIeNFMfZB2JNU/Cik+dxDvtBOhbVqDnSxbGDHfT3JgvgOhwqcxdXApBflEp+0bmfvJxZS8QMDND66mqKGw9wsPYAL7fW4Y9FEJKE5lTZdmgHN05fiKQorLj33nOvT9dhwI/N7WS628lPlt/G9rZWdra34Y9ESHO7mZFfwKyCwmT2CSAG+hG6jmSdvCzXkZFa++iUM9tG2rERSkQwhcFNFfMZlVl+yetLJBLsOnACSZK4/Z4P4/U4mLe4gsSuzZetW054oJGwvwFTKIT9DYT8Dbi8uYPt6LOK5iJJCpmFM97xwtxisVgsSXV9Texo3ccD1bchSzKqonLrqEU8ufMP+Bznf4h5vmyTaKgTSZJxepIzA7KKZuHLqMTlzT3vGLr6Iqze0cT+Ez1IQT/5vQ3M7m3AY8aJefLYRQmb6w5SHF1Pen46U2eX8IGPfIS80lJWrFiBqlrP6C1Xv0v+Kd60aRObNm0653uSJJ33fUmSrKDJCGFoUWTVgSTJCCFYXddKXJdwiQQ6MiAhSTImAvnkNA4hDBQ9QlzyXPE5/1eCEIL2lgGOHuig/kQPppEMKimqTOWYbCrH5rzjso0dQbYebKerL8JnHpwCpsnq/+//Y/3mLezvbMUQJrpNRs1JYe7yxXzhsc8xdeyFK5XLNhtj/uGL6KHQ4GuTgUd1PZlps2jh205WqteHbAVMLNeAi51XPxJrH73VqbaRbpuLvrgfSQLN0KnIKH1X6+sZSDBl2UpKJtfzqY/dRrrbjujpoHbdegCKHrhvWLvlCCHobtqMEDomNkwjQcOBP+DLGEXphPtPFr+2k1M6f9i2abFYLNeqvqifl46v5WDnUQDK04uZVTQFIQRvNuwgrsdxqg4ShgacPJ8Jg4ShnXE+k4nrcV6pWU9Faj49LVvp79yP05NDWfVDSJKMLKtvC5gIIYhrBk578vrRMAU7DneQOdDO9Lo3wZVKIKWYw4aHXXV72H30z8S1GKO1dr5Y8mEAPB4Pd95553v0bVksl98lBU1KSkquqxvla4UQJvFID6H+5JO/aKiD0vH34U4pxBCCMG7schTNtCEhwalMCGEic2oai4xuCOw2cdlrgoxEpilY8+JRYtHkySkz28PYiXlUjc3B7jj3r1EoqrHzSCdbD7Si19eSOdBOXdFUWrtDFOX40Ivy2dXTSsIlkz2hjLnLFvKZhz7B2LxLK45lT0vDnpZ21muapmGkpeIqLLTSIS3XrIudVz8Sax+daTDLxNBwKnYMDGRk7Iqd1bUbGZdddcnn3hc31SObghsXzKIox4epaRz/058RpiB92hRSqycM6z6EBxoJ+euRFQcyQQzDwDTiRIKtaDE/dlf6sG7PYrFYrkUxPc7aus1sbNqRbCMsScwqnML4nFEAGKZBb7QPh+ogpscHl8vCYK5HcNCI0nNG3TqHakcJt1O77zeYegwAuzMdYepIytm19rr6I8nON0c7Kc7xcM9oJ0LXyR83llvnljMqbwIDPznKG4kqdtZsZ9ehNzHMBE6XyoTJE/jYxx4nK3fkZKJbLMPpkoImDQ0Nl2kYV7eRWEXcNBKE/E2E/PWE/Q3oWuSs96PhTtwphaiyzCdnjOX4ob8RHmhEtXsH50YGgyF83tN/1hMhPKmljJ5w57DUBBmphCloaeynobaX+TdWIckSiiIzYUo+4VCCsRPzyM49fx2A5s4ga7Y30nighpzueip7GzjR3sDrbY1Ur+ijMDvZVvPer36VlxKt5Ewq4KEb7mVK/nhk6dr9Xi2WK+VK1j66GKeyTDx2NzbFhldy43K7kCTpkttGhsNhnvjRT+gNVeEVNmaUJ4usdq9bf9mm5ZzKMjGMBKahIUsGICPJMg53JjZn2rBuz2KxWK41pjDZ2bqf105sIJQIA1CVUcbtY5aS5zudzawqKp+f9zjhxOnreiEE/TUvEu2tYVxmFemjbkOSJIx4gFDbdsxwF6Yew+5MI698CZ7UksFlB0Jx9hzvZvfRTpo7g7hiQfJ7G/BtaqQ2RcLILsQW8bJsTjLr8amJY/jNt54gGovi8tgYO24ijz32GEuXLj1nd1WL5VphTTIbBiOhirgQAmHqyErywj8e7ae15sXB92XZhie1GE9aGd608rM64CixNpzhY7jsNhQ1Nrg+RQqRqpye/29gIMLHUGJt4Cx773buPRIOxjl2qINjBzsJBZPR+/JRWRSVJp+QTptz/jT5U7VKtECQnjfewLV6E6V9Hexqb2R7WyMhLYHkdlK3/nk+H/0y6e40FFXlR1/7HnbVjl2xskEslsvpva59dLHOzDLx2ZPTZWQknKoD4JLaRgL8/Oc/5/e/+RVuRwEP3P1FHCevYTNvuIF4bx+pE8YP67QcSGaZBPvrME09uU/IOE5mlkQGmggPNOJNKxvWbVosFsu1REJiV9sBQokwWe4MbhtzI2Ozzp1lmOZMIc2ZMvjnkL+BrkA70YSKGmgnTcRRFBeNda8iCRNVsZNVOIuMgmnI8ulbv6ffqGHz/jZkLU5ufzPT+xrIS/TjdNqJ+LLYr+YRjGZhX19HYUkamdlesvLysDkEEyZO4rHHHmPx4sVWsMRyXbCCJiPAO7VofCemoREONCcL7vU34EktJr/yZgCcnmxcvnxcnly86eW4fAVnHShPOfMJoU1xYJ4xNxJMzDPmRkqSjG4k6G7ajCe1dERP1Wpr9lN/SKJtop/SivO3wTYNk6b6Po4e7KCloZ9TxccdTpWqsTmkpL5TFXGTQ3U9bD3YQXGuj9tuKCfe3c2+5/7E84cOc6SnE9XlQvZ5sKdmkD2liKp541hTv4n7J9wGgNcxvDcvFovl6lLTW8+BrqNE9RhxI4HjjHTpi20beUpTUxO//d/fEQtr3L70dmbNLmHUuORcddXtouR9Dw77+E+dQ3QtAkIgKSqGriCf3A9Nj14V5wyLxWJ5r/VE+vDa3DhtTiRJ4o6xN9HQ38Kc4mmosnLhFZA8Bnc1bSYWjxOM27CpcbqaNlNW/RBOTzay6iSvbAmS6uNQXR+jStIGa5Wk+xwIIZjdf4j8/kZkp5tOTxX17mIMp5eIEWfr7teoGlUGTANg8eLF/OhHP2LOnDnWMd1yXbGCJlfYhVo0vlUi5id0sitBZKAFIU6nm4cDLYMZD5IkUzbhwhfIQhgk4gMoih3TiJ/xOsgYmEaCM4ejKHYS8QGEMJCkkfnjI4Rg56YmQn6JnZuaKCnPOu932t0Z4vXnjwz+Ob8olTHVeVSMykJRzx057+gNs3V/Cye27CO9/QSG3c32sbNZPrcMT3kZRzGpNeK4CvIpnVCBb3I2OeMLUVSFqswybiiZeVn222KxXF2EEDxz6CVC8TCyJBPRomcFTeDi2kae8sQTT9DfF6KsZDyzZs5lwU1VRJpbcBUVXraL21O1TGyOFEw9hmr3EjuZWp4s/upKThO1sk0sFosFgKgW4426TWxu2skNpTNZMfpGAIpS8ilKyb+kdYUHGvH31BKPCbKUENGYA39PLZFAM4Wj76ahI8pf3uxgX81BonGND8zJobCvkfQZ05hdnc+U0dnY2vM59PRr7DUrUdxugpEAO3e/wKHjm1FV6A8fw5vyGQBkWWbu3LnD/p1YLCPdyLzrvY68tUXjWy8shTCRzqhz0XzseRLRvsE/2+w+vOnleNPKcKcUXfKFsSyrVEx6GEOPnvW6pmls2LCBhXMWvi1tXVHd58xaGSlaGv00N/QjK9Dc0E9Lo5/isnQMw6ThRC/xmMb4yQUA5OT7yC9KJTvPx9jqPFLTXedd784jHexYvw+OHSK3v4mqeJQjvR3s6GjhM3fMQpYlQOLx//h31Kd+DaNd6CdLn+R4srhtzI2MzqywIvMWiwWAzc27ONh1DEmScNkcpLtSB7u9n3Kx2SZbtmzhhedfI54wWbb0fdx8x3gSba3U/N9V+EaPpuzRR4a9a9apLBPT1LE5PKiq863DR1bsVraJxWKxAKZpsr11L6/XbiCcSF53d4f7Bh94XqpTWSaJRBSPXUNG4EwkiMYkdu98lTfqJzIQSqDqCXL6m5gy0ET8eIBOl51IzMA9ewH5RamIlNHM+koVu594lW17n2P/oU3IisDukJg8eTIf+9jHsNvtFx6QxXING7l3vteBt7ZoFEKnu2kzdmfG4NO7aKiDqqkfGQxS+NIriNrceNPK8KaVYXdlDPki1ObwYXOcXdhU0TR04cbpybmquq8IIdixqQFDN1FtYOgmm984QUllBieOdBOLatgdCqPH56LakvVabn9g0nnXBcmblt4tW/E//TJFHV30RcOs62hie3szEUVCdbt5Y83LLFk8D4DKykrm3beYDQ3b8Njd3Fy5gFmFU6w5nxaLZVB3qJdf7/4zhjBxqg58Di+aoV+wbeS5sk10XeffvvltImGNCeMWUTp5LD6vyvGfPoUwBYrbdVnajPe27STQV4Oiuk6PSZwdNrGyTSwWiwWO99TxwvE1dIV6AMj2ZHL7mKWMyap81+sM9dfT33UESWhICHRThYRO1DRRjRayBmTKWnopiXbisUnYHQp+KYu2jNH01Tjw9R/j7z4yE0mWeP6F5/nN09/CMAxkBaZNm8bjjz/OjBkzrGC3xYIVNLmiTrdodCIRRggZf9dBwoEWlJNFAAGigTY8aclK19nF86yD1ztoafTTXN+H3akSDsUxhcGJo130dIWwO1TcXjtjJuRiGAL1PPcQoUiCnfua2HqslwdvHktFYSrRrm6O1x1m9YkajgcHsHk9KJnp5GVkcNddd3HjrUvpDveS7ckEYEn5PGyyjQVlswYLOlosFguAPxbgyV2/J2bEsckqLtVJXE8k3xQCDYO4HufMuZEO1UFftB/DNFCVs0/dzzzzDHWNDdhcHqbMv4M7l4+l87XXL1u3HIBEbIDWmpfQE2Ek2YZkyCeHf3XXw7JYLJbhtrFxBy8cWw2Ay+bk5sqFzC6agnKRdUvORYuHaDj0J0wjjiRA01RimhMFDXsigeSUmJ1Vg9Im0D0u+lLL6XIVE5OdSIqCDHi9dqIRDbfXTnV1NYZhMGvWLB577DGmTZs2THtvsVwbrKDJFXJmWjMSKFIcQ5cBc7C1ry+9HG96GQ531uBy1sXm+Z3KMtF1A9mU0BIgy+bgg8+b7xhHSUXmyWk0ZzNNwdG6Hvav3U7kwH6y+lvQK+ez/XAaFYWpZM6Zxd+++1/02BWcWZnMmTOHe++9lznz5rC5dRe/b3iBwt5cPj7z4ZOp9k5urlrwHn8DFovlarCpcQeBeIhxWVXcP+E23PbT0wJ1XWf9+vUsmr8IVT37FO21u98WMAFYvHgJ+b9/HU9GCTctnwRd7XStXQ9A0QP3DXu3HNPQaD72N0wjgayogBisiXU118OyWCwXZ9WqVaxatQrDMC78YQuTcseypm4j0/InsrTyBty2808FvxjRUAcNB/9EPOrHNMAMC+LCgYTAlFQcIk48AXKujdjksRzqLoSTXRrtdgVfhsbWXS/THU3hTu8UAKqqqnj66acpKysb4t5aLNcm68rlCjmVZaKoLpBkQEK1uQbrl2QXz7bSmC9SNJLgxNFudM2gub4Pp9OGFosh6Tp2jwOn24mumyiq8raASULTWfvKTto3bSelow6PFsdhGhzq7eBI5+/4zBeTT2hdubk8/rm/p7Ozk7vvvpu8/Dx2tx3giW0/JxgPAWAiiOqxIZ8MLRbLtW35qCVIksS84umkuc5uU69pGqmylwJf7kVNjexoHaCuV2Py4ofxuuzMn5RH0//7fwhTkD5tCqnVE4Z17EII2mpfJxHtw5VSSGHVcmz200GZq7kelsViuTgrV65k5cqVBAIBUlNTL7zAdcQwDba17KU92Ml9E1YAkOL08Y8LPjVsmccJw0Yg0AOGINqXYPuBNnYdaSEcjuHxupg2roiZEwqJO92kjTWhTyU714s3PcHq9c+x5lerEUKgqiqf+MQnyMjIALACJhbLO7CuXq6As4rn2TwgBLpw4nEke65r8QErjfkCTFPQXN/HsUOdNNX3YRomumai6wYut0qkcwCXFscMO3BkeAgG4uzY1EBRaRqQzNjRQ2Fqf7QK80gDmYagLxpmR08bu/u7iEkg2+1s27KRW265BYD77rsPgBO9Dfxo6y9pD3YBkO5KZcXoG6nOGWP9fVkslnPSDR1ZlpElGVmWB7slDMWxI62sf7mW9mgCXApLZ5UwsO6Nyzotp7dtB8G+GiRJpmTMXbhTCs96/2qth2WxWCxDdaynlheOraE73AvA9IJJlKUXAQw5YCJMA0lWOFDbw5qNW5mRYbB/dyP//bs3ae8JYpoCSUpm+615cz/5WT4++shSFs5zMHZKDi+++hJr1qwZrNe3cOFCHn/88cGAicVieWeXFDRpamoa0sZKSkqGtPy14swsE0mSTnYbODX32yqa9076e8McP9RJzZEuohFt8HWP1057SwCbHXbtXs+xw9uIxsO4HB7GjJ/N+LFzqK/p5k+/WEu96uXLD89A8bjRheB4oJsNPd3U9vegupzgsJOTlcVdd93F5MmTz9r+sZ5afrn7KSB5Aryx4gbmFU8/Z8q8xWKxQPLJ42/3PYvH7ua+8SuGpSh0YCDKxx//FJLkoHrufWTmlHDDpAIS6Tr+/QcpuOO2YZ+WE+yro7t5CwC5ZYvfFjCxWCyW61FnqJsXj7/B8Z46ADx2FzdXLqQktWBY1h+P9NJy/EX2dhSz5aDGsvQtHDpYy3d+8hqhqEZqigebero+iqYbtHUH+P6PX+REQyvrt/wShysZHLnxxhv56Ec/ypgxY4ZlbBbL9eKS7vTKysre9ZN0SZLQdf1dLXsteVuWyTlYLRrPb+3Lx+jtDgPgdNkYNS6HUeNzWP9aDXXNB3h17S/p6WnDNA2SgSjBnuNbSEvJ5pYbPoCtO4+esnIO1/cyqSob5cZF/O8zT4EkYXO7mDt3Lvfeey/z588frCdgChP55LSpUZnlFPhyKU0r4qbK+Xjs7iv0TVgslquBKUyeOvgCR3tqUWWVBaWzyPPlDGmdhmHyg+/8mobGYzicDu6blM/MKWXYVBlbeRljvvwFZHX4A7nRYBsA6bmTSM+dOOzrt1gslqtJTI/zas16trbsRgiBIsncUDqTJeXzcNmcw7KNgZ5jtNetRpg6+Y4YqJU47HFW/epNQlGNrAzvyfuE053LbKpEVoaX3r4Qb2w4itPl5qablvLYY49TVVU1LOOyWK43l3RVVVJScs4b+MbGxsH/PjW3cWBgAEgGS6wMk9PemmVyLla2CQhT0Nbip+ZIF/MWV2J3JH9Ux03Kp6m+jzHVeZSUpSMrMs0N/axbu45nXvgB0VgYj8NzVuaHbuj0D3Tyl9X/Teukm5nk7KC64iYAxs+Zw9KbbqKsrIy7776b/Pz8s5bb1LSTvR2HWDnrQ6iKiizJrJz9oSFVPLdYLNcHIQR/O/o6+zoOI0syD0++d8gBE4CNbxzj6b/+BkmWWPnpj/HphxeiRyKD71+OgAlATul8XL786/KcZLFYLG+lSDKHu2sQQjAhZzTLRy8hyz08012EaVB/7A2CPQeQ41FS8kZTMWU5pdWCb39jHU2tIVzuDCJRFYRAQqBpCULREEKYpKdm41Ql2tsC3P2RT/Ctb/2L9RDWYhmCS7qyamhoOOvPpmny0EMPEQ6H+ed//mc+9KEPkZaWBiSDJr/+9a/593//d2bMmMGf/vSn4RrzVetUlolhJLApDkxDG3zdatGYFByIcfxwJ8cPdxIKJLsx5BWkMnZiHpAMmoybdDqwIYRg8/oa/vrqk8RiEXyuNOQzou0ASMnv0x/sYc2WP3OitZJ//NpnSE9PB+A73/nOWR8XQnCg8ygv16ylP5oM/u1qP8DsoqkAVsDEYrFclNdObGBr824kJB6aeAdjsyuHvM76Ez38/Ge/IhDoo6KqmI99/KNEmpqo/e+fkrvsFrIXLRjW84UQJsBgkXJfxtD3wWKxWK5GQghq+xqpyChBlmRsio37xi9HkRUqM0qHbTvxWJA9254m2tuAU4vgaAPVSEUvtZGTbmP/vn1gGMiSHVM3SSRiRONhEnp8cB2aZqAqboQRoG7vTnRDYFOvj3sJi+VyGNLjqCeeeILnn3+enTt3Ul1dfdZ7qampfPazn+XGG29k+vTpfO973+PLX/7ykAZ7tRPCIBEfQFHsg+0Zk69f3y0add2gvqaH44c6aWseGHzd7lCoHJNDTr7vvMsOBOO8seZV+v2duJy+kzViJIQQxBNRovEw2smTiISMaRqUFI7l5H3A2zT5W3nh+Bqa/K0ApDi83FK1iGkF1edewGKxWM5hQ8M21tZvBuDu8cuYnDd+yOvUNIMXn93O5u0vgSozfemDBIIafX94CiOeINrSMuwB9q6mjcTC3RSNWoFidQazWCzXqNq+RtYndjOmbwJjc98+haUj2MULx9Zwoq+Be8cvZ1bRFABGZ1UM6zhaOro5vvVn2OP9OOI60YM6AbmMAwZkBo6x4r5qsuJdGMiEIv3E4xHMU8FtwOlw43H6UBQVAQhJITvWjapYAROLZSiGdCf+q1/9ikWLFr0tYHKm6upqFi9ezK9//evrPmgiyyoVkx7G0KNnvX69t2iMR3XWv3ocIUCSoKA4jdETcimrykRV357VEUvoHDjSTs3azXB4H527XkUyEjhFApumIySZUCJKMJpMV5cBp2rD7fKgqRITp5aSkZl+1jp10+Cpg8+zv+MIADbFxqKy2SwsnY1dtV/278BisVw7+qJ+Xq1ZB8CtoxYPZqkNlc2mcKLlDSTFJDWnAs1dRfOLr6Bcpm45A91H6GvfA0Ak2GZlmVgslmuSEIJXatfTafbxSu16xuRUDgagQ4kwr594k+2te5N1S2SFiBa9wBovnaabrF57kNYXXqYyrx0yZPwn0unNnE7U5kOSZQL+KJFAFJvkwBCCeCwEJDOgnXY3Locb5Yzp6RICU5JRJTtC15GsjmYWy7s2pLvx2tpaJk2adMHPZWZm8uabbw5lU9cMm8OHzXF25sT11KIxEkpQc7SLUCDGDTcmI/ken4Mx1Xl4vHZGjc/Fl3L+4lmvvriDpjfeJLu7Hp+RLCwciocxTB0zHkay2ZAAj81GKC7jsdnx2B2oUvLk0WeYhCPBt61XlRU0Q0dCYnrhRG6pXEiK8/wZLhaLxXI+Ga40Hpl6P43+VhaXzx229YZCIWrrjyFUmSmLHmSsO4F6cCcCKHrgvmHtlhMNddBetwaArMKZVsDEYrFcs2p66zneU4uKwvGeWmp666lIL2Fz8y7W1G0ifjJjuTp3LCtGLSHDnTas2x8IBnnyL/tJ37Ge3L5mAtFUQuZ0wrlZCEmmpf0Yrd37mDV3Ep7UWVTMuxv18EFU1Ybb5cNuT143m6aJdEZ3Nl3XkGWFinl3I12mWlcWy/ViSL9BHo+H7du3I4Q4b0qwEIIdO3bgGebWh5arh2GYNNX3cfxQJ831fYMZJZNnFuP1JfvWL7hp1NuWE0LQ1BkkN8ON067S9MencK/bTF4wjiwLaowYBxNhDoX8xBJxAkLgtCezQhRJIt+bcnK608mq4qaJHo+TmpqKaZpsb93L+OxRg8GR28cs5eaqBRT4ct+jb8ZisVxLzuy0NSarkjFZwxNoaKrvw5fiID3Ty09/8Vs+/83fkJlVyPT2bQhTkD5tCqnVE4ZlWwBaIkTLsecRwsCbXk5W0ZxhW7fFYrGMJEIIXqlZj2bqOLGjmTqv1Kwny53O/s5k9nGBL5fbx9xERcbwNrYQhkE00EF342rGZyTYUzmVopwMGuyj6A/3c3Dv89Q17yauBZEkiY7uej76kY9SUTaNzPQ8evvbsdmcJB8LipP/BunkNPVQZIDM9Hwqy6dhGgLFqmlisbxrQwqaLF68mGeffZYvf/nLfOc730FRzp5KYRgGX/nKV6itreW+++4b0kAtVx9/X4QjBzo4caSLWFQbfD03P4XRE3JwOM7949fdH2XX0U4O7z5OW1TioRUTmTk+D3dxMX2JMG/GA+xqbyEYT0b+bYoNRZZxnJy/Odh4TTIRyJwsrUvCNFGFSZrHzQ+2/JyucA8tgXbun3AbAJnu9HOOx2KxjEyrVq1i1apVGIZxpYdCQ38zzx5+hUem3EeWZ3i6JwAM9Ed546WjmKbgjgcmsW5/B/nlE5kdqcEW9A/7tBzT1Gk9/gK6FsHhyqCg8tbBIrAWi8VyranpredozwncNhfxRAy3zcnRnhP83cS7qPc3s6xqIdMKJg4GxIeDEIKD63bA4dcwy3RsGWmU53i5YcEstmyReepbP6K1/QRurwNZlkhLS2PFihXccccd2Owq7/vIHHJKv8cXvvRZgsEAmRlZ2Ow2QsEgXp8XLaHR29dDTm4W3//u91i6dA6Kah3HLZahGFLQ5F//9V955ZVXeOKJJ3j66ad58MEHKS8vB5Kddp566imamprweDx84xvfGJYBX4vamv3UH5Jom+intCL7Sg9n2HS0Bji4O1lQ1e2xM2pcDqMn5JKW4X7bZ0ORBHuOd7PrYBvxo4co6KllVLAHqWIWwcgYANJnTOPlX/6cnQ21AGRkZLBixQqUHcf4n1eepj0SBlMkoyaSSTJ0YoKQEUIQSMTJd7vJ7a7nRMiB2+6mMCXvPfo2LBbLcFu5ciUrV64kEAgMtru/EtqCnfxqz5+J6XHeqN/Eg9V3vPt1nXE+KCzJYM2LRwgGg3T2HUWTJ7HjcAcAE8fko22rHfZpOV2NG4iGOpEVB0Vj7kCxajpZLJZr1GCWiaEhhCAmEqQoKUS0KHvaD/EP8z+JTRneaS29tY1s+/kfcHnbcFfYMSJ2ojEH81f8HQ6Xhz17djMQaiElzc3cuXO56667mD9/Pnb76WOx1+fgzrtvJS3jZ3z1q1+loaEBXddJJBKEwkFUVaWqqpJvfetbLFy4cFjHb7Fcr4Z0JBg3bhwvv/wy73//+2lqauJ73/veWe8LISgsLOR3v/sd48cPvXPAtUgIwc5NTYT8Ejs3NVFSnjUi2gtfSiBHmIKWJj/HD3dSUJQ62BK4YnQWrc1+Ro3NoagsHVk+936Foxrf+dFr5HWeoLS3HpuRwKZK1EZ6OX7oBT7498kbEMXh4N4HHyQlI4M77riDuXPnYmgGr+z9Mh+eOJ0f7tpCXyxMqsOBOjinU6AbGgOJOG6bjQ9NnkRKXHBD4QxuGrMQl+389VMsFovlQnrCffx81x+J6XHK0ou5e9yt73pdbz0fNNb209sdZuuulzlS+yYHmw5RPvv9TKjIYvxdi0ksvQF7+vBmyKXlTiI80Exu2WLszrRhXbfFYrGMJKeyTISAYCKEiYmJwGN3c7TnBPX9TcPWHSfh97P7f/9C/4EdeKfY8SuClzfUsG5rC8uXfpqS8WFGjfNw3333kZ+fz2233UZ29jtffy9cuJC1a9eyevVq1qxZw4EDB5g4cSJLly7lpptuOivQYrFYhmbI4dP58+dTU1PDM888w7p162hpaQGgsLCQRYsWcf/99+N0Wjem59PS6Ke5oR9ZgeaGfloa/RSXXdlpIhcbyBnoj3L8cCc1hzsJhxLJ1/oig0ETu0Nl6YqxZy1jmIKa5n7ausPcOKMYYRh0/PpXLDy6D1MIBrQIW/s62drdScg0kRSFl156iUceeQSAW265hVtuuWVwfaqqMu/f/pmJPX6Kd+3gP//v96htrkM/o6WzrMikFWey9KEbyZ69kOnjFlNZ+fZ2chaLxXIp/LEAT+76A+FEhAJfLh+ecj925d0X8z7zfFBf00tHa4BQpIfDJ95EUST+/pMPoztKmFCZBTDsARMApzuLikkPI8lv71xmsVgs14pklsk6golwsoCqBA7JjiLJKIqdcCLCKzXrGZVZPuSHmW17DnHwx08SDEaozR1g6/P1HK5pI6E5kWQ7UVFH1dhkgGT06NGMHj36otdtt9tZsWIFN998My+99BIrVqy45ptKWCxXwrDknDkcDt7//vfz/ve/fzhWd90QQrBjUwOGbqLawNBNdmxqoKg07Ypmm1wokFNzpJOjBzroaA0MvuZwqlSOyWbMhLdPdxFC0NwZZPexLnYd7SI+EEC3u5g5Phef207MMNjT3cyW3m4aAgEUlxMkiYysLJYvX878+fPfcbzZRblkF+VSMWUM1ctn8I8/+T90HGwmMhDGneohd0IRi29cwkNT7qA0rWj4viiLxXLdCsXD/GzXHxiIBchyZ/CRaQ/hHELm2pnnA0WBWFTDNAV7j7+MokjMnz+f2eVlNP/5WdKK7ge8w7YvidgAeiKEO6UQwAqYWCyWa97RnhPsajuAYRookkyqMwU9knwAKEnSYLZJTW/9kLJNjjb28YvVTbTv2cXWzh4SIoHDrmGYHmbMnMnDjzzETTctHRFZ5haL5fys/lNXUEujn+b6PuxOlWgkjs2h0FjbS31ND6UVmciK9J4fRM8ZyNlYf1Yg59QTUEmCotJ0Rk/IpbQi821FpvoCMXYe6WTnkU66+0Jk+9uo6qklK9RJ/P6PYhgCgJxbb+Yvv/4Fmmli93pYsGABd9xxBzfccAPqJbRIM0yDZ4+9Qmp1DjmTi3DoKikpKfTHBnA6nJSkFg7fF2WxWK5rLxxfQ0+4j1RnCo9Nfx9ex9Dqipw6HzicKvFAHEmC+saDHDqyG4fLwWdXfpqmPzxFrLOL7vVvUvqBvxuW/TCNBC3H/kYi5qdw1HJ8GVYWnsVieWcjqQj3u5HQE/xkx+9IGAkUSSHDlYZTdTBAYvAzjiFkmwSP19C1YycV738f+ek2cjJivKwbCJtEmiuX226/jcc/8QHKy8suw95ZLJbLYdiCJocPH2bz5s10d3czYcIE7rzzTiDZM1zXdWte3VucCk7ouoFdUYjHQEvEECY8/ZvdpGW4kCQJRZGZu7hicMpLT2eIN1fXoKgyqiqjqDKKIg/+uXxUFkWlyayQaCRB7bHuMz6noKgSqiqjqgpenwPPyZa/pinQEgYdrQODF+6RaBwBHD3YwbGDnYydmMwiqZ5aQHaul9HjcweXP5ejDX28se4QBT11jO2tI1XSCCQibGhpIPjH73Pvnb8DILuykkcefZTU1FSWL19O+iWmnJvC5HDXcf5y+BWO9pwAJAxhYCIP69MCi8ViOeXOMTcT0+PcNvpG0lxDK0IrhGDz+hoOHNlCU9shBgb6cDl9NDQfRlEFo6bfzObfrmZMfzvu9FQK775zWPZBCJO2E68Sj/ah2tw4vVZhbIvFcmEjpQj3u/XqiQ10hXuQJZksTwZO1YEQ4qzPvJvrx3BrK8//8P/ywutrqPX7+VrCTVV1D8vGBhn3r5/D6c5h3ry5uNznv3a2WCwj05CDJs3NzTz66KOsXbt28LUPfehDg0GTJ598kk996lO89tprLF26dKibu2aceqrodNoQCCQJZFnGFAItoaMlDOwOFcNIzrM8JRbT6OkKnXe9aemuwaBJwB9jy7q68352+twSps0pBZLtgZ/+zS78fVEScR0k0A1QFQ0EbFxTw5jqXCRJoqA4jYLitMH1JDSDg3W97DrayYTyTOZNKiDR10/W1pdZenwvSCb7ulvY2NxEfTiE6vEgBQeoq6ujoiJ5EvrkJz95yd+hEIIj3TW8XvsmbYFOuiO9CAFpTh9eu5tgIAgM7WmBxWKxnCKEGDx+uO0uPjz1gWFZ7+9+8xz//M//jD/QBcLEFKAbCRKJGHabkzm6k7S6/ciZboruv3fYuuX0tGwj2F+HJMkUjb4Dm334pvxYLBbLSCSEoL6/CUVSSHWkIEsyiZPdcwxhkDC0weO8LMnE9fgFrx9PHDrE77/3fV5e8wY94QiaZEdyKjR1vUJ2fxkZ2TksnbIYpyfnvdxVi8UyjIYUNOnr62PRokU0NDRQXV3NwoUL+fGPf3zWZx588EE+/elP87e//c0Kmpx0ZpaJy50s1uRwQWpq8kI4OBAjtzCFFfdWYxgCh+P0X1NWjpdld0/A0E103cQ0kv8+9eecgpTBz9odKhVjstE1422fM3QTp+t0oShDN9ESBlpCR5IkDNM81bAXn9dOKBg/q7aJaQpOtPjZeaST/Sd6iCd0JNMkHNWYN6kAxePmyI4dPH9wH7u7uxBOB4rLhSM9nfnz53P77bdTXFw8pO/xSPcJfrP3meR4MAHI9mTgtrnOemJgZZtYLJahMoXJUwdfoDStkLnF04dlnVrC4Bc/eZp/+fqXiMbDeD2p2O0ODMPANHX8Az0IAa+/+CQz59/AxGX3kTqxeli2Heg9QU/rdgDyKpbi8llZJhaL5doViIdwKMlCr73RfnwOLyYm4WiEln31tB1oJDwQxpPqoWBiKUWTy1FsCg7VQV+0H8M0UN/Sfnjfnj18+6tfZf++fei6SRwbqjubG2aNZsWSEior80jPrqB84h0oqtUUw2K5mg0paPKd73yHhoYGvvSlL/Gd73wHSZLeFjRJT09n4sSJbNy4cUgDvZacmWUiSdLbbvCdLhutjX56uyNv66TjdNkoKc+4qO2kZ7rf1r3mfDJzPOQVpRKNaqgOhf7eIE4tSgwXisOFFtPZsamBwpJUXtzUwM4jnQTCcRCCjEAnkwIN5DsMRn/wM0CyPbA0awZ7dmxFzkinoqKCO++8k+XLl5OZmXmR39TZhBAE4kFSncnA0NjsSopS8qnKKONIdw3+aADXeU5KVraJxWJ5t4QQ/PXIa+xtP8SBjiOMyawkw5327tdnCo4d6mTrhhq+891/JRoPk+LNRD2jLpTN5iArswA9oREM9/OzPbv42G9+Ngx7A7FwN221rwKQkTeFtOzxw7Jei8ViGYn6on5+tvMPpLtSeXTqg3x+3uOEExG2btrCt/7tP2hpasYwDDRNw2az0b6lgc6SOr769X9izg1z8drdqIqKEIJIJILnZLaf2+Nh7/6DJHSFgqLJjJ0wmxU3ecnN1vH6HORXzCOrcBaSJF9ghBaLZaQbUtDkr3/9K2VlZXz7299+x5vQiooKNm3aNJRNXTPOlWUSTxgEouB0GTgdKja7QiymvaeddFqbBmhp6Mdmg137N9FQs4NELIjd6aN01ExmVM+hub6P1qYBOvrCRP0DjAo0MTrUhBr1s7u9hafqarkp3can/+EfAFj+4Q9RGwqybNkyxo0b9673QwhBTW89r9e+SSAe5Ms3fAJVUZElmU/NfoQTvQ387djreOzu827DyjaxWCzv1qsn1rOtZQ8SEg9OvGNIAZPWJj/bNtTR2x1m/+Et9PZ34nGnotoUJEDXNTAFQpIwTJAlCY/LR1soxNrNm7ntttuGvD8DPUcQpo4npZic0gVDXp/FYrGMVJ2hbn6+648E4smp7SEtQpozhf3b9/JPn/sKgUCAnOwcbDYbAwMDpKamomkarU0t/NPnvsKTTz7JmDFjePp3f+b5559ndGEh3/7BD5BtNo53qUxZ/HeUZ43C4SumekyQ8pJmXJ50Ckfdijet7MruvMViGTZDCpo0NjZy2223IcvvHEG12+309fUNZVPXjDOzTAxD58jxnew+uINQOIDXk8K06pmMGz0Dp9NGc33f29r9Xg6nAjnHavfy8hu/oKe3A0wdCQmBYM+xTWzcnM/ypR9h6+tOZiaOMaHpEHV9nfypro6drc0YDgc2j4c1W7aw8uS8f5vNxuc///khjau2r5HXajfQ5G8FwCartATaKUtPTu2RkHilZj1xPY5TdZAwtMFl3+3cVIvFYjllff1W1tVvAeDu8cuYnDe0rIzG2l56u8PYHSrd/uOYpondZiMeDxOOBEnEY7icHjyuFISiImQZu+IkEgvy/N9eHZagSU7JAmyOVFKzxlhPQC0WyzWrJdDOL3b9iYgWJcebxWPT3keK00cikeArX0kGTAoKCpLT0mMxlEAQ0+nE7nSSn59PQ0MDDz30EAUFBWAYJPwDdO7Zy+75t1F+00IqCtPIm3U7U8fnMTbby/hJ+fS2biUtZwJ259VXINdisZzfkIImTqeTYDB4wc81NTVdldW1h9up4EQirtPQcpS/vvwkvf0dJ1u2SYDgwKG1ZKbncdfyxynIHjss2SaabhKKJAhGEgQjWvLf4QQTq7LIy/TQ0ujn9dff4M/PP0EsHsHj9GKXFcTJsEnCNOjpa+eZF36ALD7LzvYtbK7Zh1/XUDwe1JwcqioquOOOO1ixYsWwBCPq+hp5rfZNGvqbAVBllTnFU1lUNgef43SxQsM06I324VAdxPT46RUIgYZBXI9zZiXdd5qbarFYLGfa1rKHl2uSRc6Xj1rC7KKpl7yOeEwjETfwpSanDk6bU4KsSEyeUcQvfv9vaFqc7t5WTNMgOVFTYBg6QpggBLIsI8kSAonampazitFeqlPLSpJERt7kd7UOi8ViuRrU9zfzqz1/Jq7HKUrJ5yPTHsJtdwGwevVqGhsbyc7OHjyeRvuDxHAi9QeJ2UL09vai6zqhUAgHMCMnlxmT55BWPI/tR0061DpuvXsUK2+JUzGmHFlJdgnNKZl3pXbZYrFcRkO6axw7diy7d+8mHA4Pzu97q56eHvbt28fs2bOHsqlrgmkIAv4oze2H+eNz3ycWD+NypaDIZxRkNTV6+tr43dPf5X13f4GcfB+mIVDUsy+SNd0gEE4QOhUEORkQmViZRX5W8u/iwIkefv/aUWJx/ZzjSfHYyc1ws3l9DS+t/jmxeASfOx0FwakqKwJQJRmvO51wtJ8X1vySaWMmEfZ6ycrMYNmyZdxxxx2MHz9+2DI3OkPd/HTn74FksGRW0RQWl88lxfH2zg6qog7OTT2TruusX7+eRfMXoapn/5ifmptqsVgs59Me7OK5w8m6H4vL57KofM4lLW8aJof3tbN7WxNZOV6W31s9WLNqzsIKvvGNf2Xfga1oehxFUZElBZfDjcvuRlFUQJwMcoAQICGwKa5zng8uhr/rICF/IwWVtyArtgsvYLFYLFep4z11/GbvM+imTnl6CR+aej9O9XSb33Xr1mEYBnZ7MtChR6NENYEu2YlqGkg6pmliUxQU02RqTgF3LHqUdiWHTrub/LRUsrI16g/8AT0RpKNeUFC17ErtrsVieQ8M6c7x/vvv58tf/jJf+MIX+O///u9zTtP58pe/TCQS4aGHHhrKpq4Jiipz5/smsupXn0dSNAqKCukdiCMkEMJEkmSEbCcnP5/+vm5Wb/0tix9cRqc/QkFWMmBw4EQPv3v1KPHEuQMhqR77YNDEZpMHAyaKLON12/C57XjdNlLcdjJTkxfgW7dtwD/QicflQ0Yk/ycEuqERi0eIa1HSfNm4XCn0D3SRmlvGv3/xwyxdeuPgCWeo+qMDpLuS2Ui53myqc8fgs3tZXD5nsPDr+aQ5U0h7y2c0TSNV9lLgy8Vms24QLBbLpcn35bBs1CL6owMsq1p00csJIWiq62Pbm/UM9EcBiIQT9HT3k5WdPhhc9njcZGalk9CiZGdl47S7MPr6ECKGrJmD61NSspBVBc108djHH0RRL306TSTYRkf9WoQwGUgpJj1v0iWvw2KxWK4WXocHm6JSlVHKBybfg+0tgWK/3z/YiGFgYIDO1jaczjTsdhuGpOIyTfLT0rHpBh2hMO1mGo2OYnTFhjvLzeJbHJihLegJA5sjhYz8S89CtFgsV5chBU1WrlzJr3/9a372s5+xa9cu7r33XgBqa2v5/ve/z5///Ge2b9/OlClT+PCHPzwc4x2RVq1axapVq05Os3lnW7e9SVtbC7m5OfSHdExARpDQDcBACEGPPwGSnYaGWn744x/j/PzHKVg0EYDgQC+Hdq3DNDSEaWBTBHZFoMrJf9rHANX5yY3FeokcfwYZE8PQ6NU04uEI0cAAiWgU9dOfpuqBBxD2djBi+PsDSAiSU4VOkwAR86P4slFtEmnZGsuX3zos312Tv5XXa9+kvr+ZL8//+GCA5AOT7rFqjlgslitqcfncS5oO09sdYtuGelqb/AC43DbSchPs2vcCT9zzGj/60Y+YNm0aAB/84AdZsWIFH3nkEerr67GHIyjmGecQCQQSmt9PRE0WVL/9zuWXvA9aPEjL8RcQwsSXUUla7vC0LLZYLJaRqsCXyydnfpBMdzqKrLzt/ZSUFKLRKLW1tSTicQzdxIyHcdicCEkiocukZ7gI+sNoyMjudBIeF/MXlzGpuImB7mSdK29aGQVVy6x2whbLdWDINU1effVVHnjgATZv3syePXsA2LhxIxs3bkQIwcyZM3nuueeu6af9K1euZOXKlQQCgQvWbjmVEmgiE43ryDKY0RCxgR5MSUacDFjIsoRhaOzb8Efqb5sNJ4MmRqyX5t3PIisS8jku5IPzJwz+d7i9lW3rViM0DVPTMBMawkw+wZQk6O3pSX4uHBisX5KUDJxIkoTbZsNjc+CUJRJ6hLBhEo5cuI7NhbQMtPNa7QaO99Ql91eSqe9vZkr+hJPjswImFovl8jq6ayPx1/7C0dwUJs5ZQluwk9W1G3mo+g4cajKL7mKPRa1N/bz87EGEAENohBN1rN+5jqPHjiJ0HVPXefEXvyT7RB35K5aTm5tLbm4uX7r3fr7wnW/TF4+QandgO5mxaUoymmEQCPlJycrhW9/61iVn9pmmTsvxFzC0KA53FgWVt1iFXy0WyzVpU9MOCn15g40CcrxZb/uMpmm88MILbNiwgUgkgqIoyAI8rlRcTvfgg0NDUgn0hwjIdiRFZdL8BXxm5RQiHWsY6O4CILtoLpmFM6xjqsVynRhyYYf8/Hw2btzIq6++yosvvkhdXR2maVJcXMzy5cu56667rBvgM5xKCRwIJRACFFnCZiQII1AQmLKCABRFRpYEGWkpVBZnDy5fkJfLjTcuxmazYbfbUVUVVQikWAzCEcaPGzf4WduxGh4ZNQabrKAqMjZZwaaquLMy8eblMvnWZLaIKbswJIVCb0oymiLJydqp4lQN1WQKo93UMTQdQ3r3EfWWQDurazdytPsEkLwhmV4wkRsrbiDDlfau12uxWCyXwjRN6v76LFmdIer++iy5Eyby811/JJyI8ErNOu4ad8slrS+/MBWXR2Lj9hc4sHctgZ5uTF1DNkxmF5dw08QxjLY56d26nfRpU4lnF7DlQDtNRgmfnDqX3+7fQUc0gikGD7zIkkS+28v7Zy5m/vz5lzQeIQTtdauJhbtQVCdFo28fLFRosVgs1wohBKtrN7KmbiMO1cEX5j123mndX/3qV1m3bh2maeJ2uTA1jVR3BqbqQEJgcioAItBNmXikn/KqKn74H59AkXQCTSEU1UlB1a1400rfu520WCxX3JCCJhs2bCAvL4/Ro0ezbNkyli07dxGkmpoa2tvbWbhw4VA2d01IS0tD0w20k1kmqqHhkcHrSwVJIuxMISGrCAEk/Nx///3ceuvpqTBFaWl87f0fINbWTrStjWhbO3roZBFUm5OxxSWDny2oruZOw8BVUIAzPx9XYT7O3FzkM7J+hBDgq8AuDEwhJYvSnhHjEmf8v27o2IUJvop31cEhpsX4yY7foZ1sAzw1v5qlFTeQ6b68LZUtFovlrY5sX4ezsQtNlXA2dvGb5/4v4VwXBb5cllWd/1xlxGLEOjup2ddCzYl+pqX2kOjqouTvHuL+D87m6Ze+RygUIF2SWTp+IosqK0n3erFnZeHMzcGZm4stNYWfvHCYlq4gnpjJDZnZjFlyG/t6uznQ1UYoHsfrcDAxp4BpWTk4JMGh9buYuGTmRe9fX/tuAj3HAInC0bdZ7S8tFss1xxQmLx57g01NOwBYVDaHFIdv8P1EIoEQAocjWQT2zjvvZM/WrSyvGo1ZNonvv/ES/lgEt0vBYZdxOGLE4zYSCYNANEi6y86PfvDdk8s7KBpzJ6rNhc3xzrX2LBbLtWdIQZPFixfz6KOP8vOf//wdP/ef//mf/OIXv7iomh/XukWLFvGTJ3+JJGs47HYcWhREMhVbFiYOLYruTEFLxDATBpPTM9CCQWy+5Emgf+cuOl5dfdY6JVnCkZWFq7AABqfYQNYNc8m6Ye47jkc3BBW+HPJdbjoiIdKdKtLboybJYlmJBPkeLxUpOeiGwHYRHRz6In4y3GkAOG1O5hVPJxAPsbTiBrI8GRf+wiwWi2WYmaZJ7V+fw2UKIk4FV9Qge3cD4t65fGT6QzhUB1owiGyzoTiTmXUDBw7S+pfn6PHr1FJEW9TgwIldrGrZwX/dciOxzk4yy0r5+7//e/SBANUeD678PJw5OSTcKew40sW8Sfk47cnT7rxJGruPymQffx0VE5vDw4zCUmYUlmAYJooikyxsYqLEw9Q++zwTFk0/Z8H1c3H58lFsLrIKZ+NJKbpcX6XFYrFcEaZp8uyRl9nZuh+AO8fezLySGUAyWPLXv/6VX/7yl9x///08+uijACxYsIBff/1f2fnSPhrSc7jj5gpe3fQH/IEuElqCcFRgmmAaNjJSs1n5dwsYnaINbtPlzX3vd9RisYwIQ56eI4S48Icsg4pHTcfhyybU34FbTkU1NJCS9UQkwKbH8Yb76I+GyfOkUNLUTrS5Bdv45LQbd0kJ3sryk5kjBbjy83Hm5SK/yy42qiJxp7Od9KnT+M7ObQzEo6S5nNhkBdM0kWUZzTQYiMXwORx8eso0Fjo6UJV3Dph0hrpZXbuRA51H+cTMD1KWnrxov3XUYmu6lsViuaKObF+Ho6kT3a6AMDFkKGyNMmZLH+17fkW8qws9EqXk/Q+RMWM6AFFDYW9/Oru7/Ow98Ty1LYexKwKnS6WpehxTJyYLrC5evBhInhsbO4Js3NXKvuNH0A0Th13hhkkFAMypzsfb00prTwua6oDzzYuXZDTVgbOj8ZKyTdy+AionP4KsOC78YYvFYrmK6IbOHw/+jYOdx5AkiQcm3Ma0gokkEgmee+45fvWrX9HV2Uk8FOE3P3wSZ6CYeXdMo6wyk5w5s0mrG8A4EqWssJrH7v0aXb1b6IvtJBSO4fU4yUudwpJFuXg90Fy7gZyJC1FUa3qjxXI9G3LQ5GL09/fjdFqVpYUQvLajhUmLP8zOl39EZKAbh01FVVTkkwVaddOgPxHFZXdw77SFtPsKmeo4fdGbMm4sKePGDt+YdB0G/MwqK+WfHXZW7dxOeyiIYQpOFYRVZIlCn4+VM2YxLT8fMdCP0HWkcxT37Qr1sPr/Z+++46uq78ePv87d2YOEECAEwt4oQ/ZwgHvgAC0KWP050lrr6NdVtdq6qrXD1FZphUrFUdyDoWyQJYIgK4QkJIyQQXKz7jyf3x8h14QkkOQmuRnv5+NBbc58n5Pczzn3fd7n8zm8gd0n9qNOdy+bfuqIL2kiCRMhRCDpus7+D5eyP+sYO/JycZY5CTWbOT8mlvN0sCYkop3u18ljt6N0xYbV+1j8n/fZ/v0q8k/lYA0yExUXzogRw7nxxhu56KKLMJ1OXDvdXnbsP8nGXcc4mvtTp9ndO4cRHvLTTbdSirQPPyPU68ZpNKN5f3qaadAVGj8NO6w0DZPXfc5qE6/HicdVjDW4ogNEGdFBCNEebTiyjT05BzAajNw89Br6RfXivffe461/v0X2kaM4Sx1YTSGMGjidoX1GUZBj58jhfHr27sTi1RlkZzvoVH4KpzUOq6Zz2SXxRMVMw+kwERTiQukajnKdckcQR/aE0n/wYSKa8N5bCNH2NDhpcuTIkWo/l5SU1JhWyePx8OOPP7JixQp69+7duAjbEY9XkVfooEe/EcR5b+X75QvIKS1BV05O97uKUdPoEhLK+Zf8nBNDLuT76GCuS+zZbDEZzGb6/+ZBPCUlDAJucrlYtWEDazdu5MCBA/Tv358pEyZw4cSJvpEbTKFh1fpFAcgtzeebwxvYdXwf6vQ7PUPiBnBx74l0CY09c7dCCBEQb//zr7z41nvklJbiVRq6ZsKoPHydnUVc6n4euOf/ccvdv8TaORaD2YxSikMHj7B81buYLUa6dOvE1VdfyQ033EC/fv2qbdvh8vDsv7ZQ5qhIgJiMBs7r35kJw7vSIy6sWtLY7XSjFRfiMZoxVkmYQEVCRVPVE8weoxlDcRFupxtrUM3qEaV0jh1aRqk9m259LiMsOqmpTpkQQrQqE3uMJtt+nAu6n0ffTr144YUXWLLoP5wqdBJqC2PisMsZ0nsUQVYz0Z2DGTh+IP1GVHTcOqhnFIavMsixJGFQLqKiSomILsXjNmC1ejBoOppJo7zEyo+7E9B1C3s+XsP4Af3lwZ8QHViDkyY9e/as1mgsXbqUpUuXnnUdpRRz5sxpeHTtjNlk4P9uG0VxqYv8f/9AsbqU74qL2XE0i4KSUqJDQzi/WwIjw8IIGxJDp9vHEh5ixWxq3uHMLJGRWCIjAQgGZiYlcdXNN/Pll19y+eWXn3O4aF3pvPX9BxSUnQJgcOd+XNR7Il3D5N1PIUTrsWbNGh574neUlzkIt1nRzeF4NQtG5cLgtnO8tIwnU14nzWOlW/c4kpPvQdM0rr1pMj8eup7RY4Zx5ZVXEhISAoDXq5N5opikbhWdrNosJnp1jeBEfinjh3XlgsFdCAmqvf20BlkZ//vHKc4rrDbd4/Wyfft2Ro0ahclorDYvPCaq1oQJQG7Wt5QUZqBpRkyWED/PlBBCNJ9jWYWk/6hxbGghiUn1e7BW7nZUDAWvNLIzCog7mkReoaLvFJg1axYrlyxh3KhpDOt9HgldQjB178JuPZjU/HK6BgURFFzx4G9E72h+sMXh1U2YvS669czHYFCYTF4MhoqHfl799EvzHvAaTezNtTDW7cbYyFfhhRBtX4OTJj169PAlTY4cOUJwcDAxMTXHQgewWCx0796d66+/nnvuuce/SNuJqDAbxqOZnDycSkhkJNO6dGFqnz4UFRURERGBpml4HQ68h1OJLDxOeJf+AYkzrSCTta4d9C8YzIC4PjXmF5QXEmENw2gwYtAMTOs1jr0nU7mk90S6hncJQMRCCFE3l8vF/fclU17mIDrIhjJa8GhmQKFrZjSDCYPmJqeohFdeeYHevftx882ziI6OJqpTMC+/8gfftopKnHy7+zjf7jmOvdTFE/PH0CkiCICbp/cnyGrCYDj3E8nY7nHEdq+eXHa73ezLTqXXsL7nTFj74sk7QP6x7QDE975YOisUQrRaSim2bzxCSaHG9o1H6NEr5pwVHCcLC3hr1WeEnIpk9/odrN/wOYldkrjxhl8yZkJPevXqxdJ3lnB0z2EOBndl45EynLkeoByT0UCZw+PblslsJjckHmeZm5BOGuGdynF5zJiMXkyaF4ezIjES3qkcS2eN4kIzJ4PjUZqxjuiEEB1Bg5MmGRkZvv9vMBi48cYb+fe//92UMbVrSimOf/kVutuDOaz2IcsMVivu0lKOf/kVYf37tXg5oFKKZWlrydELWJa2lv6de/tiKCgvZNXhTew4tpvrBl3K6G7DARjVdZjv/wshREM8//zzLF26lAMHDhAcHMyUKVN46aWX6NmzZ5PtY8WKFWSlZxJhtaIZDLg0Kwpwucood5bhcjsA0DQDuu5lyKDzMVap9FBKcSi7kI27jvHDoTxfJ+ihwRZyT5X7kiZ1VZY0F0fpSY4fXglAp/iRRMTIe/dCiOaVkpJCSkpKo0bFzM4sJCvjFAYjZGWcIjuzkISeUXUuv3LFbtZv2M2eH7aye9d6nOUlaAryOMiAqCIAdF3xzn43qcdCATsAsVHBjB/WlTGD4gi2/dQuK02jOCYEryOPbj2OgVHh8BrBa0J5dDTNAChCjF7iknLYeyiG6KhglLyaI0SH5ldHsG+99RZ9+tSsQhB1Kz5wkOK9+zCGhNSZDNE0DWNICMV791F84CDhA1q22iQ1P52DeWmYMHIwL43U/HQ6h3RiVfomth/9AV1VdFB4pPCoL1Ei73kKIRpr7dq1/PKXv2T06NE4nU4efvhhLrvsMnbv3o3J1DT9lX/47tvoHg8miwVdM6FrZhyOEorLCgEN0LCYrYRarDicxYQGlRMRUfHazfG8UhZ+8SMnC8p820vqFsmE4V0Z1icGk7F5X6Gsi8ddRtaBz1C6l5DIRGJ7jA9IHEKIjiU5OZnk5GTsdruvnawPpRTbNmbgcbqxOorxGE1s25hB98RIdF1x4mgRRw4XcN4FPbAFmTl87DBvLXyJbRu/x+koxoxOQmgws8eM4vJZN5MwdSLG06+wB1lMaJrGkN4xTBzelT7dI2ut+DMZ4d4rTRRkp+J1F2E0hRNutKCUoqSkhNDQivtz5XXRP6ScMeN6EB3bu9lflRdCtG5+3Y3OnTu3qeLoECqrTLwOJ2ZbELrL5ZuO14vucvmSD5rBgMfhbPFqE6UUy1LX4tY92LDg9LpZ8N0STJoR/XQHr3069eSS3pNIjOzeIjEJIdq3ZcuWVfv5zTffJCkpib179zJs2DC/t6/rOtl79mBAA03DfbrKxGoNotRRgtUSTJAtBLOmsOkluJyQvWePb9j1qHArRSUurBYTIwdUdOzaNSbU77j8VXBsBx5XCRZbJN36XHr6CakQQrROh1NPsnz5V6Tu30p5aSG2kEiS0kbh8bgpK/HgdlVUrsR2CcPSxc1DrzzKDytXoaGRFB7OzZdezgXX3MSO8gj+nlHA/ykLnU9v+4qJvbh2am+iwuoeNcxZforjacspKz4B3lI0wGyxYDAYUUphNIDFbKxImphsuJ1FaCU7ieg1sPlPjhCiVfPrDmvJkiUkJSXVuOGtatmyZSQlJfG///3Pn121C8rjwZWXj9FmRXeUV/nnwOB2ozsc1aYbbVZcefkVwwK3kNT8dPbnHSLYHFTRv4rykm0/TpnHQe/oRO4ePYc7Rt4sCRMhOpDFixdz1113MWrUKKxWK5qmsXDhwrOus23bNi6//HIiIyMJCQlh7NixvP/++/XaX1FRRcl1dHS0v6ED4HG5CNUM6Ch0jOiaCQ2FAY1OkV0ICw7DZDCgNCM6FTfPQTq4nU6gooPXO64ZwtN3jOXGi/q1ioQJQGyP8XTqOoru/a+S4YWFEK3a2rVrufzK6bz70Sts+3ENezJ2sf3HNXzw2as8/Pg89vy4HW95IcZT+ynSTrHguyX0nDyQrkldeej/3clv3/yQ/NFzWPSjh92H89F1xYHMU77td44KrjNhopROwfGdpO/+L+UlOYBC0wxYbBEYDLX3VaJpGkZTECWF6ZQWZTbHKRFCtCF+VZosWbKEwsJCLrzwwjqXmTZtGqdOneK///0vN9xwgz+7a/OqDu9blcfjYd3adYyYMrlGKXptw/s2F6UUXxxchdvrJtQcjBMHkdZw8soL6BzSiTtG3iyv4QjRAT3xxBNkZmYSExNDfHw8mZlnv4FcvXo1M2bMwGazMXv2bMLCwli6dCmzZs0iKyuLBx98sM51vV4vDz30EJdffjnduzdNctZis3HjQw+x8YGHcZlCQTeiUJzulqRilITTig02PBY3ljEzKSpXdK7oqoQ+3SObJJampGkGOveYEOgwhBDirNatW8ft8+8gNzefEFsYJoMR/fRzW4/upuDUcf616AniQoLoGxPLiGv74tY99Inrw3nP/JfdB0+xf1cBAFaLidED45gwvCtdOp17pDCPu5yjqV9QZj8KQHBYdzzuElyOQjTNiH56yPeKfqp0dK/7p6pvzYDH6yL3yCZCIhLlHliIDsyvpMkPP/zAsGHDsJxlCC6r1crw4cPZtWuXP7tqN6oO71vJ7XbjjYwgqFu3eo+W0NQ8upeP9y1j+9FdBJmCfBcGq8lCTHA02fYTpOan0y8mKSDxCSECZ8GCBfTt25fExEReeOEFHn300TqX9Xg83HnnnRgMBtatW8eIESMAePLJJxkzZgyPPfYYN9xwA4mJiTXWVUpx9913c+TIETZu3Nikx3D9jTfzyp9SOHjwEOFhndD1Wm5+laK8rIRuCT15/Y//R0xUcJPG0BTK7NnY81OJS5yMVscTUiGEaC1cLhePPPIIBQWnCA+JRlM6FalqHaV7cTtLcbrKKfO6cbod9OydRLfoXszv0YPuYd34w7+34/Z46RoTyoThXTl/QGdslvp/fTEaLRWJEIOJuB6TCI8dyKEdC05Pd/qWUwoMeNG9LqrmRoxGCy5nEUp50bSm6WNLCNH2+PXpP3HiBBMmnPspV7du3di6das/uxLNRFc6u3P2s+zgGg7kp6ErHY+q/jqQ1Wih1FXGstS19O3USzLtQnQwF198cb2XXbVqFWlpacyfP9+XMAGIiIjgscceY968eSxatIgnn3yy2npKKe69916+/vpr1q1bR2xsbFOFD4DZbOaay/8ff8v8HUX2fIKCwjEaf0pSe71uysvtBNlC+NkNyXSKPPcTzJbmdtrJPvgFXo8DkzmEmO5jAh2SEEKc1ddff83htHSCrOFoKECh6zrljlLKnKW+wQXMZisGi5kek2bTObEHcafvNWdO60N0uI2e8eH1vv/0uEoxmmxoBiOawUi3PpeCpmGxRQKQNGwOXk95tXXcbjfr1q1j8tjJNR5gGk3BGAySMBGiI/OrBQgODiY/P/+cy+Xn55+1GkUExqH8DL5KXc1R+wkcHidOj4twaxhRQRG+4TSh4r3OEEsw+/MOSbWJEOKs1qxZA8D06dNrzJsxYwZQ8W57VUopkpOT+eKLL1i7di0JCQlNHld2ZiHh1l7cdO2v+firNymyn0Tpum++ZjAQERHHtZfdSbi11zmHwWxputdN1oHP8Hoc2EJiiY4/L9AhCSHEOa1evRqHw0VYcCjoXkDD5XZS4igGwGQ0EWILw2wLoagkj++2byTt6FzfK5EjB8TVe19KKez5B8hJX0NUl2HEJlSMKGYJqt6Wm61hmK1h1aYZ3W48KhhbSOeAVX0LIVovv5ImgwcPZuPGjRQUFNTZYV9BQQEbNmxgyJAh/uxKNLENmdv4/MDXAFiMZoyagVJzEJG28FqXl2oTIUR9pKamAtC3b98a87p06UJoaKhvmUrJycksWbKEzz77jKCgIE6cOAFUdARbW8Ld6XTidP5UVm2324GKJ4Vut7vG8koptq4/jMvpoXPngcy56Tkys3aRmbWb8vISgoJCSUwYSmLCcIKDbLicHrauP0xc16Et3tZVxl/1OJRSnDi8HEdpLkZTEHG9LsWrg1eveayBVlv8HYkcvxx/1f82dL326Gj2SXQdOP1ajlJgtdqwum3YzEFYLUFogK4UGga6hhpI6lr/YYwredxlnEhfRXFBGgClRUeI6T5WRhUTQjQJv5Im119/PRs3bmTOnDn873//Izi4+vvf5eXl3HrrrZSXl3f4TmBbA6WU7wvAsLgBfJ22nvO7DqVHRFfe2P4OIZbgOr8gSLWJEKI+Kke+iYio/aY3PDzct0yl119/HYBJkyZVm7569WqmTp1aYxvPP/88v/vd72pMX7FiRY3rEICuQ8ZhA043uNxgNpro03MkfXqOrLGsy+nBYIaM9GN88UU2hgDdb69cudL3/4ONJwg1nUChUeiKZf+x9YEJqgGqxt8RyfHL8TdEWVlZM0USWEopyoqpqOpTqqLT7dO3mRGhPz1sVVQM56kpMOpWGpqrLi5I43j6N3jd5YBGTPcLiOk6ShImQogm41fS5K677uLNN99k+fLl9OvXj1tuuYUBAwYAsH//fpYsWcKxY8fo378/9957b5MELBquzFXOqvRN2J3F3DLsWgDCbWE8MjkZq9HC3zYvxOlxYjNZcVXpRdyrvLiq9CJu0Aw4PU6pNhFCNKmqrwPWx6OPPsoDDzzg+9lut5OQkMD06dMJD6+9Wq5ksoM3PtzD4SOniAixoGkaSilKS0sJCQnx/VxU6qJ/jyhuv34IoXUMX9mc7AXp7P1uKYNGXk94dC9KCtM5figdiKBz4lQiYge3eEwN4Xa7WblyJZdcckmHLHGX45fjb8zxV1bLtTe6V9Grx1AM2me4vV5MRhMVKZIz7x813F43BoOBXj2GonsVRtO57zG9Hgc5GWspytsPgDUomq59ZmAL6dzkxyKE6Nj8SpoEBQWxfPlyrrvuOr777jteeeWVavOVUpx33nl89NFHtT79E83L7XWz8ch21qR/i8NTUco+tdc4uoZVvB9qM1nxeD3klxdgNVl9ywCgFG68OD1Oqqb8rSYrBeWn8OqVFz8hhPhJZYXJmdUklex2O1FR/vUVYrVasVqtNaabzeY6v6jkFBdzIKcYS4gFrKaK7giVwl0OymL0tXMW4EBOMTl2N1HRYbVuq7kopSg8sQ2bsYjCE9uI7twXs9mC0WQlImYAMV1HtGg8/jjb76IjkOOX42/I8bfXc2U0Gbh/7sWsWPonjtvzCAoOrrWKRKmKapuE8Ah+Pe9ijKb6VYh4PU7f6zid4kcSkzBWOmwVQjQLv1uW7t27s3XrVj777DOWLVtGZmYmAD169ODSSy/l6quvloqEFqYrnR3HdrMybT1Fpzva6hLWmcv6TiU+tHr23WQ08evxd1Lqql4a6vF4WLt2LVMmTsFkqv5nEmoJloSJEKJWlX2ZpKamMnJk9ddfTpw4QUlJCWPGtOyoL0opPt94GIfTQ5DFisvt9U336OBye33XKaNBw+H08PnGw/RPjGrR61dpUSalhRnoykhpYQalRZmERvak19CbMVtaNoEjhBD+Ukrh2LiG5KFDeW7rZorKSgizWjEbf0qKuL1eip0ugs0W7h06lPINa1Cjh9fZ9ird6xtu3WKLID7pIkzWMILDurbIMQkhOqYm+earaRpXX301V199dVNsTvghr6yA/+xcysmSPAAibeFM7zOZEfGDMdTxbmekLbxGB7But5sIQyhdw+La7RMQIUTTmzJlCs8//zwrVqxg9uzZ1eYtX77ct0xL8ngVeYUObFYT5S7vTzOUwuMFh8tbraLOZjWRX+TA41WY61Ei3hSUUuQe2YRSHnRM6Lqb3CObCIlI9A2TKYQQbYnyeHDl5tM/tiv3jRrJwh9+IKesFF1XFW2uUhgMGl1CQpg3bBj9Y7viystHeTxotdx7ltmPcvzwSrr0uoiQiIpR1sJj+rf0YQkhOiApF2hnIq3hOD0ugsw2pvUaz/iEkVIVIoRoMRdddBFJSUm888473HfffYwYMQKoeF3nueeew2KxcNtttzXJvlJSUkhJScHr9Z51ObPJwP/dNoqSsuojVHg8HtatW8fkyWNqVNSFBVsw17NEvCmUFmVSUpiOwWTDgB3dq2MvOOSrNhFCiLbGYDZjvPnnfPLxctyTQpl47WCO7c/maGo2zlIH1hAb3fp2p+uA7qSbIDtvCHdfNwPDGQkTXfeQm/UtBcd3AJCXvcWXNBFCiJbQZN+m9+7dy6ZNm8jNzWXw4MG+qhNd1/F4PLUOGyn8l1dawObsHVze70IMmgGT0cStw2cSHRxJsDko0OEJIdqBBQsWsGHDBgB2797tm7ZmzRoAJk6cyB133AGAyWRiwYIFzJgxg8mTJzN79mzCwsJYunQpmZmZvPzyy/Ts2bNJ4kpOTiY5ORm73V7naD2VosJsRJ3Rsavb7SYyGLp3Dg1oRZ3X4+L44W/wehygGTFoHpQyoHSPr9pEXnMVQrQ1Sim+2J1LTlwuhBoxeIMIHzuQ8LED8Oo6RoMB0ChGoRvLwZjLFz/kMnDIT21eeUkOx9JW4CovACAidhBxiZMDeFRCiI7I76RJVlYW8+fPZ/Xq1b5pc+fO9SVN3nzzTe69915WrFjBRRdd5O/uxGklzlK+PryBrdk70ZVOl9BYRnUbDkD3iPgARyeEaE82bNjAokWLqk3buHEjGzdu9P1cmTQBmDZtGhs2bOCpp57ivffew+12M3ToUF588UVmzZrVYnG3Rkrp1YbBPLLvI+z5h3CV51dM0CqqZsyWEAxGCyWF6VJtIoRokzxexdGSbAgpAK8ZpUBRMVqaUqAr4PTPeM0QUsCx0mw83pGYDDp5R7eSd3QboDCZg+mSdBFhUUmBOhwhRAfmV9KkoKCAKVOmkJGRwZAhQ5g8eTJ///vfqy1z00038Ytf/IJPP/1UkiZNwOlxsT5zC+sytviGB+4f05vuEdIBlhCieSxcuJCFCxc2aJ0xY8bw1VdfNU9AbYTSvTjK8nCUnsRRmoOj5CQeTzl9zrv9p8oRzYDHVdFht8Fkw2Aw4/I4MZlDQNNwe8ql2kQI0SaZjBrdB+VTeFIjzPLTKJoKKCstJTjEVmXwYQvFrhK6J+ZjMmqUnEon7+hWAMI79aNLz6kYpYJaCBEgfiVNXnzxRTIyMnjooYd48cUX0TStRtIkKiqKoUOH+kq7RePoSmdL9vd8k7aRElcpUFFRcnnfaSRFJwY4OiGEaHvK7FlEm/dSZh9KRKeme3pZcPx7ivL24yzLQym9xnyPqwSztWI0nLCoJApzdmOyhGI0WVFKoco9oGlomobRFCTVJkKINsmreyl2FxFsseHF45seg5fJoYo9ykkeRt/0YIuNYrcdr+4lNCqJyM6DCQlPkM5ehRAB51fS5JNPPqFnz5688MILZ30ClpSUVK2MW1SXVpDJWtcO+hcMZkBcn1qX0dDYdXwvJa5SOgVHMaPPFIbGDZAnj0II0QhKKfKyv8VmLCIv+1vCo3vVuz1VSsdVfory0hwcJTk4Sk+SMOBajCYrAG5XMY7SkwAYjVZsoXHYQuIICu2MLSQOkyXUF0Nhzm6U8mIw1t7vl8FokWoTIUSbZDKa+PX4Oyl1lfmmKaU4lfoF5fmpDOzUh6i+V6BpGl6nndIT3xPXa5pvAIP4pIsDFboQQlTjV9IkMzOTK664AoPh7CMMWCwWCgoK/NlVu6WUYlnaWnL0ApalraV/596+m+LDBUfoEhZLsDkITdO4vN+FHC0+wZhuIzAajOfYshBCtG/1HT2nNqVFmZQWZqArI6WFGees5CgrPkZxfirlpSdxlJ5E6Z5q8x1luYSEdwcgImYAQaFdsIXEYbaG15noqBwxx2gKqnMZqTYRQrRlkbZwIm3hvp9LCjMoKM1BKROqNIcI3YHbaefkkfWge3Hk7ILQSwIYsRBC1ORX0sRms1FcXHzO5Y4cOXLOkQ06qtT8dA7mpWHCyMG8NFLz0wm3hrIsdQ3789KY3PMCLu93IQA9IrvRI7JbgCMWQojWoSGj51SllCL3yCaU8qBjRqmKUWqCw3vgcdlxlORQXnqSqM5DsARFAeAszaXgxE7fNgwGM7aQzthCO2ML6Yw1KNo3zxZSMa0+MXi9LsxGK/rpPqqUUoCO7nX7EimaZsDjdUm1iRCiTTuz7dV1Fxl73sdosqJpGsHh3YntfkGgwxRCiBr8SpoMGDCAHTt2UFpaSkhISK3L5OXlsWvXLi64QBrBMymlWJa6FrfuwYYFp9fNG9v/i0kzgVbxhFFXKtBhCiFEu1JZ4WEwWtEoRSkjhbk/Ur7lr9USEtagKF/SJDi8O1FdhhMUUvGqjSUostooOA2llBeXswij0YLudVaZDga86F4XVXMjRqMFl7MIpbxomt8D3wkhRIv7qe21oVGC7vWge5wYQmLp0utiouKG+tWuCiFEc/HrzuuGG27g4Ycf5oEHHuD111+v9TWdhx9+mLKysg4/zGRtUvPT2Z93iCCTFbuzBLfHw7HicjqHxDCq23Au7TOFmJDoc29ICCFEvVQ+6dR1DwajCaPmwusxADqu8gKswTHYQjoTFBqHpUr1iDW4E116Tm2yOAwGE0nD5uD1lFeb7na7WbduHZPHTsZsNlebZzQFYzBIwkQI0fZUbXvRwKi5UMqApmlYgzoRFTdMquiEEK2WX3dfycnJLFq0iAULFvDdd98xc+ZMANLS0vjTn/7EBx98wNatWxkxYgTz5s1rinjbDV+VideNBw0nLgwYMGgG4kJj+dmwa+XiIYQQTaxaPyIGEwoDJnPFUJiaBt37XUlYdNONpHM2ZmuYbxSdSka3G48KxhbSuUbSRAgh2qoz216XoxizNQyDwUh58VHps0kI0ar53afJ8uXLufHGG9m0aRPff/89ABs2bGDDhg0opRg9ejQff/yx3PydobLKJMQSjEkzUuYsp1NwFJqmkVV0jNT8dPrFtMyNuxBCdARVn3SazRWvlHqVzZe4cDuLyMveTGhU/UfSEUIIcXa1tb0eZSPkdMLa7XFIn01CiFbN7zrf+Ph4NmzYwPLly/niiy84fPgwuq6TkJDAZZddxjXXXCMN4BmqVpmEWSouHqFaELbTw1WWuspYlrqWvp3kxl0IIerS0NFzzhytRlXpM0pGqRFCiOZRe9tb2dG1tL1CiNavyV6OnjFjBjNmzGiqzbVrVatMartxD7EEsz/vkFSbCCHEWTRk9JzannSeyWC04PaUyxNPIYRoItL2CiHaA+miuoVVrTKxGi21LmM1WnB73SxLXVstoSKEEKJxznzSWZszn3gKIYTwj7S9Qoj2oEkqTfLy8liwYAFr1qwhOzsbpRTdu3dn2rRp3H777XTu3LkpdtMunFllUhupNhFCiKZT+aTT63VhNlrRvW7fdNDRvW5fe6xpBjxelzzxFEKIOtT31Uhpe4UQ7YXfSZNPPvmE+fPnU1RUVK0qYt++fXz99de88MILvPXWW1x33XX+7qrNq6wycXqc2ExWXFUuHl7lxVXl4mHQDDg9TunbRAgh/KSUF5ezCKPRgu51VpkOBrzoXhdVm1ij0YLLWYRSXjRNhvgVQoiq6vtqpLS9Qoj2wq8WacuWLdx44414PB5GjRrFbbfdRq9evQDIyMjgP//5D9u2bWPWrFmsX7+eCy64oEmCbqu8upf88gKsJisOz08XD5TCjRenx0nVq4fVZKWg/BRe3YvJKBcPIYRoDIPBRNKwOXg95dWmu91u1q1bx+Sxk2uM8GY0BWMwSLsrhBCNJW2vEKK98KtVeuaZZ/B6vfzxj3/kwQcfrDE/OTmZV199lQcffJBnn32Wzz//3J/dtXkmo4lfj7+TUldZtekej4e1a9cyZeIUTKbqv5JQS7AkTIQQwk9ma5hvaOFKRrcbjwrGFtK5xo27EEII/0nbK4RoD/z6Nr5p0yaGDBlSa8Kk0q9//WsWLlzIxo0b/dlVuxFpCyfSFl5tmtvtJsIQStewOLl4CCGEEEIIIYQQrYRfo+e43W6GDh16zuWGDBmC2+32Z1dCCCFENSkpKQwaNIjRo0cHOhQhhBBCCNFO+ZU0GTBgAFlZWedc7ujRo/Tv39+fXQkhhBDVJCcns3fvXrZt2xboUIQQQgghRDvl1+s5d911F3fffXdFfxxTptS6zNq1a1m/fj2vv/66P7tq1SqHXvN4PADY7fYGre92uykrK8Nut7ea13NaY0wN0dbj95ccvxx/Y46/su2qOhJaW1AZr7S9gdfW4/eXHL8cv7S959Ya/05aY0wN0dbj95ccvxx/c7e9mvKzhX7wwQf55z//yd13311t9Jz09HTefvttXn/9de666y5eeeUVf3bTJmRnZ5OQkBDoMIQQwi9ZWVl079490GHUm7S9Qoj2QNpeIYRoefVpe/1KmhiNxsauiqZpvsqM9kLXdY4dO0ZYWBha1YHnz8Fut5OQkEBWVhbh4eHnXqEFtMaYGqKtx+8vOX45/sYcv1KK4uJiunbtisHg19ubLUra3tajrcfvLzl+OX5pe8+tNf6dtMaYGqKtx+8vOX45/uZue/16PcefIpW2VoJYHwaDwa8nBOHh4a3uD701xtQQbT1+f8nxy/E39PgjIiKaKZrmI21v69PW4/eXHL8cv7S959Ya/05aY0wN0dbj95ccvxx/c7W9fiVNdF33Z3UhhBBCCCGEEEKIVqvt1AAKIYQQQgghhBBCtCC/kiYul6veyx45csSfXbWoe+65B03TeO2111pkf1arlaeeegqr1doi+6uP1hhTQ7T1+P0lxy/H35GPv75a43lqjTE1RFuP319y/HL8Hfn466s1nqfWGFNDtPX4/SXHL8ff3MfvV0ewo0aN4oMPPvCNmFOXTz/9lNtvv528vLzG7qrFfP755zzxxBPk5uby6KOP8otf/CLQIQkhhBBCCCGEECIA/Ko02bFjByNHjuTDDz+sdb7X6+XBBx/kuuuuo7Cw0J9dtYicnBzuuece3n777Q45xrUQQgghhBBCCCF+4ldHsI8++igvvPACN954I7/85S95+eWXMZkqNpmVlcWsWbPYsmULUVFRLFy40K9AFy9ezPr16/nuu+/YvXs3LpeLt956i3nz5tW5zrZt23jqqafYtGkTbreboUOH8sADD3DTTTfVuvz8+fO57777GDp0aKNibOzQa0II0Rp0tGEvhRCiNZC2VwghWl6LDTn8hz/8gUmTJnHrrbfyt7/9jc2bN/Pee++xe/du5s2bR0FBAePGjePdd98lISHBn13xxBNPkJmZSUxMDPHx8WRmZp51+dWrVzNjxgxsNhuzZ88mLCyMpUuXMmvWLLKysnjwwQerLf/aa69RWlpaY3pDHDt2zHecERERGI3GRm9LCFF/ZWVlOByOQIfRbmRlZfk1jGRLk7ZXtDRd1ykuLsbr9QY6FNGOtOW2NywsTKq0hWhmDoeDsrKyQIfR7tSn7fUraQJw6aWX8v333zN79mw2bdrE0KFDKS0tBeChhx7i+eefb5Ib2AULFtC3b18SExN54YUXePTRR+tc1uPxcOedd2IwGFi3bh0jRowA4Mknn2TMmDE89thj3HDDDSQmJgKwf/9+nn32WbZs2eJXhj8sLAyARx55hGnTptV7nGiv10t2djbdu3dvNTf7rTGmhmjr8furIx2/ruvs2bOH7Oxs5s+fT6dOnXC73axYsYLp06d3yJu4xh6/3W4nISHB15a1FZXxPvzww0yZMoXo6Oh6PfVsjZ+T1hhTQ7T1+OvL4XCwbt064uPjmTVrlm+6tD1y/B2x7X3ooYcYP348Xbp0kbY3QNp6/P7qCMevlCI1NZWDBw8ya9Ys3/dYkLa3Jdpev5MmAN27d2fx4sWcd955FBUVoWka8+fP56WXXmqKzQNw8cUX13vZVatWkZaWxvz5830JE6h4AvnYY48xb948Fi1axJNPPgnA5s2byc3NpU+fPr5lvV4vv/rVr1iwYAE7d+6s1341TSM4OJjx48czffr0esfr9XpJTU2lb9++reaD3hpjaoi2Hr+/Otrxjxw5kueff56ioiJ69eqF2+0mODiY8PDwDnvx8Of421qZtaZpmEwmRowYwRVXXFHv9Vrj56Q1xtQQbT3+hrDZbKxcubLaqwnS9sjxd7S2F6Bv375cd9119V6vNbYTrTGmhmjr8furoxz/BRdcwMsvv0xBQUG17iSk7W3+trdJkibLly/n1ltvxW63M2jQIA4ePMhbb72FruukpKQQFBTUFLuptzVr1gDUmriYMWMGAGvXrvVNu/baaxk1alSN5ebNm8f8+fMbtG+j0dhhh3sSIlCsVitGoxG32x3oUESAGI3GFr/WiI4tKCgIr9eLruvt+iZdiHOx2WyBDkGIDsFgMGCz2eR+NwD8Sprous4TTzzBSy+9hK7rPPDAA7z44ots3ryZ2bNns2jRIrZv384HH3xA//79myrmc0pNTQUqMt9n6tKlC6Ghob5lACIjI4mMjKy2nNlsJj4+vlr1yZmcTidOp9P3s91uByrOS0Pec9Z1vdp/W4PWGFNDtPX4/dURj1/XdTweD26323cx6agXlcYef0c9X0IIIeovJSWFlJSUet3r6rrOK6+8wuLFi0lNTaW8vJxu3brx9ddft0CkFZ555hmefvppdu3aVe3p/JlPl00mExEREcTHxzNy5EiuuuoqrrnmGt8gF2fSNI3ExEQyMjJqzPvrX//KP//5Tw4dOoTL5WLKlCm+h7pnm9eenXm+NU0jPDycoUOHMnfuXH7+85+3uWqr5nD8+HGSkpKYP38+f//73wMdjjjNr6TJtGnT2LBhAxERESxcuJCrr74agAkTJrBz507mzJnD8uXLGTVqFH//+9+59dZbmyTocykqKgIqXsepTXh4uG8Zfzz//PP87ne/qzYtLCyMY8eOVUvKVKXrOm+99RafffYZmZmZOBwO4uLiWLNmDWlpaX7HVB+VF7uPP/6Yfv36+aYPHDjwnOtee+21PP/889XW6dq1K99880215Ro6vbk09pw2Js7XXnuNlJQUnnvuuWplqmeeV03TCAkJISkpicsvv5xbbrml2UrpWupvqqnVdi5PnjzJ9OnTue6663jqqadqrHPixAk2btzIyZMnfdNWrlzZYjG3Rg09/vbcuVhbunGHipv32NhYxo0bx4MPPsj48eNbLM7GKi0t5fXXX+eDDz4gIyODU6dOERISwoABA7jkkku444476NGjR7V1zvalI9DkxlWI2iUnJ5OcnIzdbq/zXrvSU089xe9//3vi4uK46qqrCAoKatEHqTk5Ofzxj3/khhtuqHN0zLlz56KUorCw0NdvxH/+8x8WLVpEnz59+O9//8uYMWPqvc8PP/yQX/3qV0RFRXH11Vf72sFzzWvLMjIy6NWrV70SQHPnzgUqXqlJS0tj48aNbNiwgW+++YYlS5a0QLStW3x8PP/v//0//v73v3P//fdX+64mAsevpMn69esZNWoU77//Pj179qw2r1OnTnz11Vc899xzPPXUU8yfP7/FkiZNoT43cI8++igPPPCA7+fK15O6du1aa5ULVHRG+/LLL9d68ejdu3ezDzWXk5PDwoULuf766+t89/+2227zDcF05jByEyZMqHFsZrO51uNt6PSmpOs6aWlpfp3ThsYZHR0NQFxcXK3r3XbbbUDFRSIjI4Nvv/2WH374ga1bt/Lll1/W+SSjITIyMujTpw+TJ0/mn//8Z5P8TVXd5qpVq/yOsT5qO5d9+/bl//2//8frr7/Ok08+WeMi0qVLFyZMmMCoUaNwu92sXLmSSy65pMO+29mY46+slmuP2tKNu91uR9M0fvjhBz788EM++ugjFi9ezC233NJi8Vaq743wpk2buP766zlx4gRBQUGMHTuWLl26UFRUxLZt29i8eTMvvfQSn3/+eYP6KAskuXEVwj/l5eX8+c9/JiIigt27dxMbGwv81P9ES3juuecoKSk56wASCxcurNEnRlpaGo899hjvv/8+06ZNY+PGjdX6SQTYt29frdfYjz/+GID//e9/XHjhhfWe11EsXLiw2s8rV67k8ssv59133+VnP/sZV155ZWACa0V+85vf8Nprr/Hb3/6W9957L9DhCPxMmvziF7/glVdeOetN+WOPPcbEiRNb9GavMutdVzWJ3W4nKirK7/1YrdZa+y8xGAy1vt9cXl7OX//61zovHnWt15RefPFFSkpKeOyxx+rc16JFixrcoVJdy5w5vfIC01Lvfzf2nDYmzsrkRF37XLRoUbWft2zZwtSpU1m1ahUffPABc+bMaXCcZzpzv03xN1W5vqZpLfp7q/xv1X3+3//9HykpKTz99NM1LiIGgwGTyVStPTKbzR0yaVKpocffXs9VW71x1zSNxx57jBdffJH77ruPG2+8sVX+jnbu3MlFF12Ew+Hg4YcfZvbs2QwfPtz32dV1nY8//pjf/OY3ZGdnV1u3ri8drYXcuArReJs2baKkpIQ5c+b42t2WVFZWxqJFixgyZAjnnXdeg9bt3bs37733HmFhYfzrX//i9ttvZ8eOHdWWqatCpLKdS0pKatC8juqSSy7h1ltv5a233uLjjz+WpAnQrVs3pk2bxkcffUROTg5xcXGBDqnD8+sR9F//+td63exMnjy53iPQNIXKJ9O13QyfOHGCkpKSZq90qE3lxeOqq65qcxePpjJgwAB69+4dkH03REvEecEFFzBv3jygojNlcW5nXkSEqI+22vYaDAaeeeYZTCYT+fn5/Pjjj80YZeMopbj11ltxOBw8/fTTPP/88wQHB1dbxmAwMHPmTL777rsana639muCtDlCNNyaNWvQNM1XVbZ48WI0TUPTNB5//PEWi+ODDz6gqKiIm2++udHbeOWVVwgJCeH7779nw4YN1eZpmlat0v7pp59G0zRWr14NQK9evXzHPXXq1DrnVa3iy8rK4he/+AW9e/fGZrMRHR3NlVdeyaZNm2rElpGR4du23W7nhRdeoE+fPpjNZu6///5qyzZ2u+Xl5TzyyCMkJiZitVrp06cPL774Ikqpasfdq1cvoGKgjcrj0jTNd597LpXXxqysrBrzvv32W6655hpiY2OxWq307NmTe++9l2PHjvmWcTgcDB8+vNbrybXXXoumaUycOLHGvFGjRmEwGMjNza0xr77n7MzfwwMPPECvXr1q/B727NnDnDlzSEpKwmazERsby4gRI7j//vs5fvx4jf3fcsstuN3uGpU5IjCa912QKmJiYlpqV0yZMgWAFStW1JhX+eW0cpmW0J4uHv468wJzZkPzq1/9ioSEBGw2GwMHDuTVV1+tszPThjY+9Wl064qzqk8//ZRx48YRHBxMp06duP766zl48GCjzsfgwYMBqvXD0di4q1601q1bx8CBAzGZTDUuWl988QW33347AwcOJDw8nJCQEIYPH85zzz1XrWPjM7d5rgthQy7IlRpzLuUiIuqrPbS9FovFVz3p8XhqzA/0jfCyZcvYs2cP3bt3P+c5jYiIYMiQIdWmna2t/fDDDxk7dizBwcHExMRw4403cujQId8XkzPbgIa0bWeej7Pd6EqbI0TDmEwm5s6dS2JiIgBXXnklc+fOZe7cuVx22WUtFsfnn38OwNSpUxu9jYiICF/MlQmPuowYMYK5c+f6qgKuv/5633Ffeumldc7r0qULUHG/N3z4cFJSUjCbzVxxxRUMGTKE5cuXM3ny5Dqr3crLy7nwwgv5+OOPGT58OFdffXW1ivrGbtflcjF9+nTefPNNRo0axbRp0zh69CiPPPIIv/3tb6sd9/XXXw9UvFZdeVxz586tNVFRm+LiYoAaFfyLFy9m0qRJfPrpp/Tv35+ZM2ditVp5/fXXOf/889m/fz9QMYLTsGHDyMzMrNbFgq7rrFu3DoBt27ZV67utqKiI77//nkGDBtV4oNKYc1ZeXs6UKVNYuHAhI0aMqPZ7+O677xg9ejT//e9/CQsL45prrmHs2LG43W7+8pe/cODAgRrbq/y7/eKLL+p1DkUzUw2waNEitXHjxlrnFRUVqfLy8lrnvfPOO+rXv/51Q3Z1Vs8//7wC1FtvvVXrfLfbrZKSkpTValXff/+9b3phYaHq16+fslgsKj09vcniqVRUVKTCwsLU8uXLq01fv369mjt3rkpMTFSAuvLKK9XcuXPV3Llz1fr165XH41H79u1THo+nyWOq6oYbblBAnb9DQFX+SdQ3JkAlJiY2enp6eroC1NixY9XIkSNVZGSkmjlzprryyitVUFCQAtTcuXNrbGf79u3KZrMpQA0bNkzddNNN6sorr1SDBg1SgPr666+rxf/2228ro9GoADVhwgQ1e/Zs1a9fPwWouLg4tW/fvnrF//rrrytAaZqmJk+erGbNmqUSExNVRESEmjNnTq1/l1XP65mee+45Bag5c+bUOr8hcX/00Ufq+uuv98279tpr1W233abmzp2r3nzzTd9ycXFxKjw8XI0fP17ddNNNasaMGSoqKkoB6sILL6z2Oz9zm5V/t2duc9OmTb5t9O/fX82cOVNNmjRJmUwmZTQa1bvvvtsk51IppdLS0hSgJk2aVG36M888o7Zu3aqUUsrlcqmPP/5YuVyuWs9re9fY4y8qKlKAKioqaqbImkdRUZGyWq3qww8/rDa9PbS9hw8fVoAym83q1KlT1dZr6Oeusr0dN26cmjhxooqOjlYzZ85UM2bM8LWnjz/+uG/5+nz+k5OTFeC7xjf0nNbV1v75z39WgDIYDGrq1Klq9uzZqlevXioqKkrddttttbYPDWnbqp6PMWPGqBEjRqioqCh17bXXqpkzZ6qnn37at1xdbc7OnTvVU089VW270vbI8Xe0thdQixYtqnX+eeedpwCVn59fbXpLtb1xcXHKZDKpsrKyWufX977397//vQLUzTffXGP92tqvKVOmKKDW7xp1zSsqKlLx8fHKaDSqxYsXV5u3bds2FRUVpUJDQ9XJkyd90yvbsMr76C1bttSI39/tTpkypdrf5bZt25TRaFTBwcGquLi4xjpTpkypccyV6rof1nVdjRs3rsY16MiRIyooKEgZjUb1ySef+KZ7vV51//33K0CNGjVKKVXx+7v33ntrXBt27NihADV48GAFqJUrV/rmffrppwpQycnJfp2zqudr3LhxNa7VSinfdevll1+uMW/fvn3q2LFjtZ6zmJgYZbVaq33H/stf/lLtOJSStrcl2t4GJU00TVPz58+vdZ7BYFC33357rfPmzZunDAZDQ3ZVw5tvvum7WTv//PN9XyJr+wKnlFKrVq1SZrNZhYWFqTvvvFM98MADvhvn2v5gm0JdSZNK7eXiceY6TZE0qUx+5Obm+uYdOnRIde3aVQHqo48+qradczU+WVlZvvgb0uieLf6MjAxls9mU2WxWy5Yt8013uVzqZz/7me84GpI0mTx5sgJqNMpKNexiUanyfE6ePLnO39/HH39c42/AbrerK6+8stabn3NdCBtzQW7suaxU20VEkiY/6Yg37rUlTSq1xba3sLBQrV+/Xo0aNUoB6r777qu2Tmu5EZ4wYYIC1Ntvv10tfn+SJmlpacpisSiLxaJWrVrlm+52u9X8+fPrbB8a27ad7Ua3Um1tjiRNapLj73htb11JE5fLpaxWq0pISKg2fdu2bWrOnDmqR48eNb4k1+aHH35QRqNRdevWrUGx5eTkKED16tWrzmXqe9/7j3/8QwHq0ksvrbF+UyVNXn31VQWoBx98sNZY//SnPylA/elPf/JNq9qGbd68udb4/dmuwWBQ+/fvr7FOZZu6evXqGus0JGni8XjUwYMH1bx58xSgrFarOnTokG/+k08+WWuySimlHA6H7zvChg0blMfjUQsXLqzxsLXy+N57770af28PPPCAAtT777/v1zmr+nvYtm1bretcdtllClA7d+6s8/zUpvIau2PHDt80SZrU1BJtb5O9nqMqEjBNtbkaNmzYwKJFi1i0aJGvI6aNGzf6pp35nmHlcMgTJkzgvffe4/XXXycuLo53332XBx98sNnirIvb7Wbv3r0kJCT4RgYB2L59O/PmzWPGjBmYTCaeeOKJWtffuXMnkyZNIigoiF69evHaa681aP8nT54kJyeHhIQEgoKCzrqspmmYTKZqr3dU/qvs9bs5vPzyy9Ve4+rdu7ev/O/M461897C2URgGDBhAfHy87+cFCxZQXl7OTTfd5BsWGyres3/hhRfo2rUr27dvZ+PGjWeN79///jcOh4Obb76ZGTNm+KabzWb+8pe/1HiPvy6VI/vcc889rFu3jmuuuYZZs2bVWK6p4j7TNddcU+NvICwsjFdffRWATz75pEHb+/e//83x48e5//77+dnPflZt3qhRo/jtb39LSUkJixcvrraOP+eyf//+OJ1O9u3b16BYRcfjT9v7/vvvc8UVVxAfH09ERASTJ0+uca05l8a2vZGRkUyaNIkDBw7wt7/9jT//+c/Vlm3M566SwWDgn//8J+Hh4dXWueyyyygrK2P79u31Pr78/HyAJu0r5t///jcul4tbb72VadOm+aabTCb+9Kc/ERoaWut6/rRtf/3rX4mMjKxzvrQ5QjTMvn37cDqdNUac2bhxI1u2bOH8888/53DFAPfffz+dOnVq8P4rX3tuioEfKr/f1DY8fFOp7FJg5syZtc6fNGkSAFu3bq0xLz4+vkZ/UU2x3cTExFpHmascSay2V+Hro/I7hclkol+/fixcuJCwsDCWLFlSrU+S9evXA9S4xkHFazw33nhjteWGDx+O1Wqt1kfMmjVrCAsL4/rrrycxMbHGPKj5+lZjz9nZfg8jR44EKobrXrNmTa2v29am8r6ltj5XRMvyf4zTFrJw4cIGv088ZswYvvrqq+YJqIqUlBRSUlLwer11LlOfi0ddo/3k5uZyySWXMGbMGD7//HN27NjB/fffT0RERL2HcW7IxaPqsJfh4eHVLhI9evSo1/4aKjo6mksuuaTG9Jtvvpl77rmHTZs2oeu6b1SVkSNH8tVXX5GcnMzvf/97Jk6cWOeQvfVpdP/yl7+wfv16JkyYUGeMlduZPXt2jXmdOnVi+vTpZ00q1XaxvfPOO/nnP/9Z67ymirs2qampfPnllxw6dIjS0lJ0XffdFDR0NJHGXFz8PZdyERH15U/b++c//5m+ffuSkpJCaGgob731FhdddBFbt25l+PDh9dp/Y9teq9XKkSNH2LJlC8888wy9e/eu1hdAa7wRbiqVieDKG+KqIiMjmT59Oh9++GGt6zambTvbjW4laXOEaJjKASDObHt/+ctf8otf/ILU1FQuvfTSs27j448/5vDhw9x+++28/fbbDdp/ZbseFhbWoPVqk5eXB1At8d7UKvvhONf9XGUsVZ3t3tyf7Xbv3r3WZSvPaW19RdXH3LlzgYoEfnh4OEOHDmXmzJk1rpOVfffV1e9V5fSjR48CFf2ajB49mg0bNpCRkUGPHj1Yv349kyZNwmg0MnXqVJYsWUJZWRlut5udO3fW2p9JY8/Z2X4PDz/8MBs2bGDNmjVMmzaN0NBQxo0bxxVXXMG8efPqTCBWPtwoLCw8ayyi+bWZpElrlpycTHJyMna7vc4Gxp+Lxz/+8Q80TeODDz4gODiYiy66iPT0dJ599tl6J00acvGobbz65lbZWdiZIiIiiIyMpLCwkFOnTvmeNpyr8an6JLKhjW5dKrdTV6x1bb9S5UXC4XCwa9cu9u/fz5tvvsn48eNr7V28qeKuSinFQw89xKuvvlpnZVhlZ1z11ZiLi7/nUi4ior78aXs/++yzak84L774YoYOHUpKSgpvvPFGvfbvb9v7/fffM2XKFK6++mr27NnjS3a0lhvhyvPTlMmEyqRNQkJCrfNruzH1p22rz8MAaXOEaJhdu3YBNdteg8Fw1oeMlVwuFw899BAvvPBCoyq8Kr+ENvSepjbff/89AIMGDfJ7W3WpHPTghhtuICQkpM7lahvm2GazNct2Kx9UNrWm6lS7tgeOU6ZM8X0/GD58OKdOnfJVkkydOpVFixaxadMmysvL0XW91oFBGnvOzvZ7CA8PZ9WqVWzcuJHPPvuMNWvWsGrVKlauXMnzzz/P+vXrax3ZtfIe4myVkKJlSNKkhfhz8Vi+fDmXX355tVcWbrzxRl5//XUOHz5cr7Hem/Li0Rqcq/GpWn53Ls1ZblnVmReJP/7xj/zmN78hOTmZadOm1ZlAqEtj4n7vvff405/+REJCAq+++irjxo0jNjYWs9mMy+XCarU2+DU7fy7IjSUXEVFf/rS9Z5aEGwwGhgwZQnp6er3372/be95553HXXXfx8ssv8/rrr/te02ktN8IjRoxg48aN7Nixgzlz5jTZdhvKn7btbDe6laTNEaJhKhPW9a3KO9Of//xnYmNjmTVrFk8//XSD1+/cuTMABQUFjdp/paKiIt/Im1VfF2xq3bt358CBAzzyyCO+Vzla83ZbQteuXTlw4ACZmZm+0Sarqnx40K1bN9+0yZMn84c//IE1a9Zw6tQpgGpJE6h4Lae8vLzatKqa65xVDntcOaLQyZMnuf/++1myZAmPP/4477//fo11Ko+hKV+BFY0jSZMW4s/F4+DBg1x55ZXVplXeCB84cKBeSZOmung0lyNHjtQ63W63U1hYSFBQUI2b1bM1Pr/97W959tlngcY1urWJj4/3bae2pw2ZmZnnOsxqHn74Yb7++mtWrFjB7373O/79739Xm99UcVf10UcfAfD6669zxRVXVJt3+PDhBsVfqTEXF3/PpVxERH35e+NeldfrZdu2bdX64TmXpmh7K4f9rfp6SWu5Eb7iiitISUnhgw8+4KWXXmqSJHRl+5CVlVVr+5CVlVVjWnO0bVVJmyPET+rzWvquXbsIDw+v1z3qmXJycvjDH/7AsmXLGh1j586d6dKlC1lZWZSVldW737kzPfjgg5SWljJ69GjGjRvX6HjO5ZJLLuGbb77ho48+atI2vbm2eyaLxQJQ77466mPSpEmsXr2aJUuWcPnll1eb53K5+OCDD3zLVRo/fjwWi8WXNAkPD+f8888HKqqYK/s1qUya1FZp0lLnrHPnzjz99NMsWbKEPXv21LrM/v37sVqtDBw4sNniEPXTPHVXogZ/Lh6nTp2qkTCofO+v8kbuXM68eLQ2+fn5fPPNNzWmv/vuuwCMGzfunK8JVTY+AD/++KNvemVjumTJkhrr1NXo1qZyfm2Z4IKCAl8fAw3xwgsvAPD222/XSBQ0Ju5zXbQq/15qK8+v7bjqs83Kvmgqv7TUh7/nUi4ior78aXvP9Nprr3HkyBHuvffeeq/TFG1v5Zf+qq8dNuZz1xjn+vxfeumlDB48mOzsbP7whz+cdVt2u71a21yXyleOli5dWmNeUVFRre1DY9q2hpA2R4ifJCcns3fvXrZt21br/KysLPLz8xk2bFijEqmPPfYYl156qd9JikmTJuH1en2v1zTE4cOHmTVrFv/6178ICQnhX//6l1+xnMtdd91F586deemll3jjjTd81YSVPB4Py5cvr/PLdUtv90wxMTGYzWbS0tLq9fpVffz85z8nKCiId999ly+++MI3Xdd1HnvsMY4ePcrIkSOrvaYaFBTEmDFjyMzMZMWKFb7+TCpNnTqVrVu3snPnTgYMGEBcXFyN/TbHOfvHP/5Ra5Xql19+CdT+OmpaWhr5+fmMGTOmXhWRonk1OGly6NAh/vOf/9T4d7Z5hw4davLA2xJ/Lx5NxZ+LR0t46KGHfCMxAKSnp/PMM88AFRfoqs7V+FS9cW5Mo1ub+fPnY7Va+e9//8vXX3/tm+52u/n1r39NaWlpA462wnnnnce1116Lx+PhpZdeqjavMXFXXrQOHz5c60WrsqPHN954o1qp+vr16/njH/9Ya4znuhA25uLiz7mUi4ior6Zse7ds2cIjjzzCE088wdChQxu0rj9t7/fff+/rP6Xqk7bWciOsaRqLFy/GZrPx9NNP89hjj9VIDiml+PTTTxk1alSdX7Kqmj9/PhaLhf/85z+sW7fON93r9fLggw/W+qpTY9q2+pI2R4iGqasvqfrYs2cPixcv5pFHHqGwsJDCwkIcDgdKKQoLC3G5XPXeVmXV2ble2Z43bx633347//d//8fMmTMZNGgQffr04f3336dv376sWbOmwe1+Q0VGRvLJJ58QERHBXXfdRc+ePbn88sv52c9+xkUXXURsbCyXXnppg79TNdd2z2SxWLj00ks5ceIEw4cP57bbbuOOO+7grbfeavQ2e/TowT//+U90Xeeqq65i0qRJ3HLLLQwaNIhXXnmFuLi4WkeIq6wecTgcNV6/mTp1Km63G13Xa301B5rnnP3jH/8gKSmJwYMHc8MNNzB79mxGjBjBr3/9a2w2G08++WSNdSr/bs+snhSB0eDXczZu3FjrEKeaptU5TykV0GRBoPlz8YCKqpIzR3eo7IyuIUOpXXHFFXzwwQesWbPmrAmCefPmnXX0nMpERlMaO3YsLpeLPn36cOGFF+J2u/nmm28oKytjzpw5NUaI+Mc//sE999zDoEGDfEMj79+/n127dmGz2aoNH1rZ6M6bN4+rrrqKCRMmkJCQwI4dOzhw4ECdje6ZevXqxSuvvMIvfvELZsyYweTJk+nSpQubN2/m1KlT/OxnP+O///1vg4/96aef5pNPPuHf//43v/3tb+nSpUuj4668aH322Wdce+21jB07FqvVyoQJE5g/fz733XcfCxcu5O9//ztr1qxh2LBhHD16lA0bNvDggw/y8ssv14iv6jaHDx/O+eefj8Vi8W2z8uJy1VVXcdddd/H73/+eIUOGEBUVxYkTJ9ixYweFhYV89NFHDBkyxO9zKRcRUV/+tr2VMjIyuOaaa7jqqqt46qmnGrx+Y9pem83GkSNH2Lx5s++GsWrH34353DXGuT7/UHF+v/76a66//npeeukl/va3vzFu3Di6dOlCUVER27dvJycnB5vNVmfnrlX17t2bl156ifvvv59p06YxZcoU4uLi2Lp1KwUFBcyZM4fFixf7qmCARrVt9SVtjhANU1dfUvVx6NAhXC6X75WKqqKionj99de5++6767Wtm266iV/96le88847PP7443Uut2jRIqBiWPPw8HC6du3KbbfdxjXXXMPVV1/dIgMiQMW98O7du3n11Vf54osvWLt2LVDxyuKUKVO47rrruPjii1vNds+0YMECHnroIVauXMk777yD1+vF4/H4rhWNceutt9K7d29eeOEFNm3axJYtW4iPj+eee+7h8ccfr/UV9alTp/oqH2tLmtT2/8/U1Ofs2Wef5eOPP2bLli188803uFwuunfvzh133MFDDz1U64h277zzDmazudbBIkQAqAZITExUPXv2bPS/9q6oqEiFhYWp5cuXV5v+7LPPKkAtWLCg1vU8Ho/at2+fSkxMVI8//niN+RMmTFDz5s2rNm316tUKUGlpafWOr6ysTEVERKhBgwbVOh8457/hw4fXWCcxMbHWbdVnenp6ugLUlClTVGFhobr33ntV165dlcViUf3791cvv/yy8ng8Nbbz6aefqttvv10NHjxYRUZGquDgYNWvXz91xx13qP379/vOadV1N27cqK666irVqVMnZTabVY8ePdQ999yjsrOz6x2/Ukp99NFH6oILLlBBQUEqKipKXXPNNWrfvn3qqaeeUoB66623aj2vZzNz5kwFqIcffrjGvIbErZRSOTk5as6cOSomJkYZjUYFqLlz5/rm79u3T1111VWqc+fOKjg4WJ133nnqjTfeOOtx5+TkqFtvvVV16dKl1m0qpdTx48fVb37zGzV48GAVHBysgoODVe/evdU111yjFi5cqIqLi/0+l0opdeGFFyqz2axOnDhRbfozzzyjtm7dqpRSyuVyqY8//li5XK5az1F719jjLyoqUoAqKipqpsiaR1FRkbJarerDDz+sNt3ftlcppU6dOqUGDRqkLrjgAlVWVtao+BrT9hoMBhUdHa2mTp2q/vWvfymv11vrug353FVtb2tT1+euPp9/pZQqLi5WL730kho9erSKjY1VJpNJRUZGqgsuuEA99dRTKisrq9Zjr6ut/d///qfGjBnjax9mzpypDhw4oO644w4FqGXLllVbvqFt27nOR6W62pydO3eqp556qtp1RtoeOf6O1vYCatGiRQ1a72xtb25urlq9enW1f3PnzlUxMTFq9erV6ujRow3a1/33368AtX379nrFVNs9Z1vQ1uP3V3s7/qysLGUwGNRNN91UY95f/vIXtXLlymrTpO1t/ra3QUkTcXZ1JU3O5Vw37s8884zq3LlztRv25ORk1bdv3wbH2NouHvW9aW2o9tZ4NlR7Pf6zXUQkafKTjnjjXlvS5FzO1fY6nU41bdo01bNnzxpfmBuqtbW9zaW54/d4PGrQoEFK0zR1/PjxZtlHVWdrcyRpUpMcf8drexuaNDl58qR677331J///GcVGxurrr/+evXBBx+oL7/8ss51nnrqKdWtW7dGxZiTk6NCQ0PV9ddff9blpO1t29rb8d93333KZDKp/fv315gnSZOaWqLtlY5gm0BKSgqDBg1i9OjRDV43NzeXpUuXsnz5csrKyti/fz//+9//+Oqrr3zL3H333ei6zk033cQ333zDyy+/zD//+U9++9vfNnh/jz76KKGhoTz//PMNXleIQPvjH/+IwWBollfERMdSn7b33nvvZe3atfz2t78lPT2dzZs3s3nz5kb1TSJtb8OkpaX5XkOt5HQ6+c1vfsPevXu56KKLfK8yNidpc4RoWj/++COzZs3i/vvv97XDN954I/fcc0+z7K9z5848/PDDfPjhh+zevbtZ9iFEUzp+/DhvvPEGd955Z62v7YjAkCGHm0BycjLJycnY7fZae+4/m8qLR6WlS5eydOlSEhMTfUPKxsbGsnLlSn7xi19wxRVXEBcXx5/+9Kdq77fXV+XF4+mnn2b37t3N3rGVEE1FLiKiKdWn7f3666/RdZ2f//zn1datukx9SdvbMB988AFPPfUUI0eOJCEhAbvdzq5duzh+/DgxMTG89tprzR6DtDlCNL2pU6fi8XhITU2lb9++9eov5Omnn/aNjtgYTz75ZK0dbQrRGsXHx/uGRBathyRNAqy+F48RI0awYcOGJtmnXDxEWyQXEdGU6tP2NjQxci7S9tbfRRddxK5du9i8eTM//PADHo+Hbt26cc899/Doo4/Wq0NZf0mbI4QQQgiQpIkIsJ49e1YbHlIIIYQYPXo0S5YsCXQYQgghhBBInybNQJIAQrQsVdGpdaDDEAEmfwOiJcnfmxAVPB5PoEMQosNwu90tNgy2+IlUmjQxh8NBZmYmHo8Hk0lOrxAtIT09HV3XCQsLC3QoIkCcTidZWVk4nU6sVmugwxHtnFKKtLQ0goOD5eZVdHjp6els2bKF+Pj4ei3v9Xo5duwYVqu11Xx+WmNMDdHW4/dXRzh+pRQHDx6kuLi4RTpCF9XJt/om5na7yc7O5tVXXyUkJKRe6+i6zvHjx4mPj8dgaB3FP60xpoZo6/H7qyMdv67rnDp1iqSkJHr37h3ocEQAnTp1ij//+c+Ehoaiado5l2+Nn5PWGFNDtPX468vhcFBcXMzVV18d6FCECLj+/ftXG3nsXHRdJysri4SEhFbTTrTGmBqircfvr450/BdeeCEDBw4MdBitSlpBJmtdO+hfMJgBcX2aZR+SNGkGs2fP5sSJE7hcrnot7/F4yM3NpWfPnq2mOqU1xtQQbT1+f3Wk49c0jYiICEaOHInZbA50OCKAfvazn3H06FEcDke9lm+Nn5PWGFNDtPX468tsNpOYmEifPs1zcyZEW3LVVVdhMpkoKyur1/Jut5uvv/6aiy++uNVct1tjTA3R1uP3V0c5/rCwMGw2W6DDaFWUUixLW0uOXsCytLX079y7Xg/OGqr93tEEUPfu3Rk0aFC9l3e73Xi9XmbMmNFqPuitMaaGaOvx+6ujH7/omOLi4ujbt2+9l2+Nn5PWGFNDtPX4hRANp2kaISEh9a6wdrvdREREEBsb22raidYYU0O09fj91dGPvyNLzU/nYF4aJowczEsjNT+dfjFJTb6f9l2/JIQQQgghhGgyKSkpDBo0iNGjRwc6FCFEB6aUYlnqWty6BxsW3LqHZalrm6WjdkmaNAG5eAghhBBCiI4gOTmZvXv3sm3btkCHIoTowFLz09mfd4hgcxCaphFsDmJ/3iFS89ObfF+SNGkCcvEQQgghhOjYKjsjTCvIDHQoQgjRrvmqTLxuLMaKV7KsRgtur7tZqk0kaSKEEEIIIYQfzuyMsDnKw4UQQlTwVZlYgjhZmk+5cqKAEEtws1SbSNJECCGEEEL4rSNXWtTWGaEQQoimV7XKxO3x4FVePHjRtOarNpGkiRBCCCGE8EtHrrRoyc4IhRCio6val0mJuxQAq2ZBQ6sYzasZqk0kaSKEEO1MW33ae88996BpGq+99lqgQxFCNFBHrrRoyc4IhRCiI6tWZeJ1oyuFyWDCgsm3THNUm5jOvYgQQoi24synvf0790bTtECHdU6ff/453377LV27dg10KEKIBqqr0qJvp16tpv1RSqErHY/uxa278epe3LoHj+7FpBmJCYn2Lbv3ZCpOr/OnZbyVy3oItYQwvsdI37If7V3GuozN2J3FWAxmDGiEGy2Uuctb3TkQQoi2rjJJHWS2UeiwAxBmCcXtcfqWObPapF9Mkt/7laSJEEK0I7U97W2Ki0VzysnJ4Z577uHLL7/kqquuCnQ4QogGqlpp4XQ5CDbbatys6rqOW/ecTkS48ehePLoHs9FMdFBkxTJKZ+/Jg755lf91eyv+f0xIFOfFD/Htd8kPn/iSGR6vp9p6CRHx3DjkSt+yv1v9Ko4qN9VV9Yzszt1jbvX9/NG+ryh2lta6bHxY52pJk53HfySnNA/QcOluLJib5YZdCCE6usoEvdPjxGMw4dV1TAYjJoMRh/Li8rp9SWqDZsDpcTZZ8lqSJkII0U40x9PexYsXs379er777jt2796Ny+XirbfeYt68eXWus23bNp566ik2bdqE2+1m6NChPPDAA9x00021Lj9//nzuu+8+hg4d2qgYhRAtTymF3VnMyZJ8Fu/6kBJXKSbNhEs5wWPAq7wsS11LQng8z6z9S50l0sO6DOSWYdee3igs3vVRnfscENunWtLkx5MH8eieWpcNtQRX+9loMJzxs/H0zbYJm8labV5iZAIOtwOT0YTJYPItZzaYiLSFVzsHBs2AxWghzBKChoajrByoKA8vdZVJtYkQQjQRr+4lv7wAq8lCkaMEhY7ZaMPpceLGi9PjhCptrdVkpaD8FF7di8noX9pDkiZCCNFO1Odpb0M98cQTZGZmEhMTQ3x8PJmZZ+8nZfXq1cyYMQObzcbs2bMJCwtj6dKlzJo1i6ysLB588MFqy7/22muUlpbWmC6EaB08Xg955afQgLjQWABKnKW8tOF1XF43Do+Tk6crLXRNx4uOju6rtEgvzK6RMDFqhtMJCSMWo8U33WAw0DMqAZN2OqFh/ClhYTKY6HJ6/5Wu7H8Rmqb5Ehtmg8m3bLAlqNqyvx53JwaDAbPBhNFgxKDV3a3fnOHX1evcpOank1OaR6QtHJvJilIKFxXVLFJtIoQQTctkNPHr8XdS6iqjzFXOjycPMrLbUHSvztq1a5kycQomU/X0Rqgl2O+ECUjSRAgh2oWqHWMZT/fxbW2C9+oXLFhA3759SUxM5IUXXuDRRx+tc1mPx8Odd1Z8MVm3bh0jRowA4Mknn2TMmDE89thj3HDDDSQmJgKwf/9+nn32WbZs2YLBIP2SCxFIutI5UniUk6X55J7+d7KsgFPlhSilGBI3wJdMqJqQcHpcGDQjoeZgzEYT5WXlhFlCMBlMlLrK+DptPY9OSsZsMvuSG2dLWNw9ek69Yx6bcH69lw21htR72fqo2uaGWWrftlSbCCFE04q0hfsq/vrG9ALA7XYTYQila1gcZrO5WfYrd6lCCNEOVFaZGDQDp5xFlCpHkwy7dvHFF/uSHOeyatUq0tLSuOWWW3wJE4CIiAgee+wxXC4XixYt8k3fvHkzubm59OnTB5PJhMlkIjMzk1/96lfV1hdCNA1d18krK2B/7iHWZWxha/bOavMXfPcuH+79ivWZW9mfl0ZB2SmUUlhNVkxVEpsGzcD94+/gtvNuQNM0YoKjiAwKJ9gchImKypCf2p80ckrzCDYHYTGaz5owaUsq29wQS3CdyZDmGvpSCCE6qlPlRQHZr1SaNIGUlBRSUlLwer2BDkUI0QFVfeLp9LoAMGlGoGWfdK5ZswaA6dOn15g3Y8YMANauXeubdu211zJq1Kgay82bN4/58+fXuR+n04nT+VOHjnZ7Re/pbrcbt9td73grl23IOs2tNcbUEG09fn+1xuNflb6REyV55JXlk19eiFf/6V4lPqwz58UN9v3cM7I7KIgJiSY2uOJfTHAnQk8nBqoeV6gxmBWp63B73YSag1FK+V7DqfyvxWCm1FXGlwdX0zO8e7uptFBK8eXB1Tg8TqxGC06Pyzfdq7w4PS7fsWpoODzOs56D1vT3IoQQrVWxs4Q/bXyDxMjuzBl+HTazrcX2LUmTJpCcnExycjJ2u52IiIhAhyOE6GCqPvGMNIRT6ipDd1R8MWrJ9+pTU1MB6Nu3b415Xbp0ITQ01LcMQGRkJJGRkdWWM5vNxMfH06dPnzr38/zzz/O73/2uxvQVK1YQHBxcyxpnt3Llygav09xaY0wN0dbjb6w8vZAfPYfJW15IjCGyWfellMKJixJVTqkq9/3XiIGR5oG+5da5vqdUlft+NmAgRAsiRLOhlzj58ssvffNiqfz8OMjjGHkcq3P/ufopdrr2YMSA3WWvNq8ykQmgKy87s/bwn5x3iDVE+XnUrYNX6RxypaErL6dchTXmF5bUfBJ6KDuNz09+gbGWSpuysrLmCFMIIdqV1enf4tY9uLxurGd04N3cJGkihBBt2Jnv1WuaRpg1lCLHTzftLVVtUlRUsc+6ksfh4eG+Zfzx6KOP8sADD/h+ttvtJCQkMH36dMLDw8+yZnVut5uVK1dyySWXNNs7sA3VGmNqiLYevz+UUqRsXUTO0QISYrtz65jLmuSz5tE9FDtLiQr66XP1zu5POFSQget0ZVlVRpOJyyb9tO9OR+Px6N7TlSOdiLCF+f2KjFKKv29/G0OOkXBrWLXppSUlhISG+vYfBBQ5iymJ83LbqKY5J63BZMdkSt3l1aa53W42bdrE+PHja/z9h5iDibCFUZuqSSYhhBA1FTnsbMn+HoDpfSa3+LVEkiZCCNGGpeans/PEHqxGa73fq2/NozhkZGSccxmr1YrVWvMJg9lsbtQX9cau15xaY0wN0dbjb4yDeYdJLUjHhJHUgnQy7NkN+qyVuco5WZrHydJ88soKfB2yFpQXEmSy8eS0+33LKnTcuhuDwUCn4Cg6B3ciNqTiX+eQTpjMJl9iZGLPMU19qHi8Hk45CrGZrL5XAisCU7jxViRzqrRHNpOVQkcRBqOhSUYxaA1izJ2IOWOa2+3mR0MoidHdG/T339E+K0II0VCrDm/Cq3vpFdWD3tH162uvKbWPK5cQQnRASimW/PAJZW4HTo8bs7FiqM3K9+pdXrcvkWLQDDg9zmatNqmsMKmrmsRutxMV1T7K84WoylfxpXuwYcGte2r9rOlK51R50elkSBHje4z0zVuy+5M6Owv1Kh2H2+F7f/vSftO4vN+FRAdHYTIYm/fgalF12MeqPB5Psw/7KIQQomMpKC9k+9FdAFzSe1JAKhbl6iWEEG3U0eITHD51BIOmYTGZ8ehePLrX97TX6XFWe9prNVkpKD+FV/c2y5eXyr5MUlNTGTlyZLV5J06coKSkhDFjmv6ptxCBVtmvULA5CKfLQbDZxv68Q6w+vAmP8nCytIDc0xUkVTtiPS9+MEGnEyGdQ2LILSuoUjUS7aseqXz1rlKX0NgWP8YzVR32sVJLDPsohBCiY1l1eBNepdMnuidJ0T0CEoMkTYQQog1yuB28+8OnxIZ0IiE8npmDL/OV4wfqae+UKVN4/vnnWbFiBbNnz642b/ny5b5lhGhPqvYr5PV6AIXVaKHMXc6yQ2vQ0KolPEwGEzEh0XQO6YTL6/IlTa7sfxFXDbg4QEchhBBCtD4er4eMwiwALukzKWBxSNJECCHaGF3pvL/nc/LKCogJjuKOUTcTYvlp1JhAPe296KKLSEpK4p133uG+++5jxIgRQMXrOs899xwWi4XbbrutxeIRoiVUVpkYDEZKXaXYsPj6ETpVXsTobsMZ1Lmvr2ok0hZea0es7aWDVCGEEKKpmIwmfj3uDlLz00mM7B64OAK2ZyGEEI2yJn0ze3NTMRlMzBk+s1rCpKktWLCADRs2ALB7927ftDVr1gAwceJE7rjjDgBMJhMLFixgxowZTJ48mdmzZxMWFsbSpUvJzMzk5ZdfpmfPnk0WW0pKCikpKXi93nMvLEQzqFplUtkhqhcdOD1qFWU4PC4m9BgtSREhhBCiEYwGIwNi+wQ0BkmaCCFEG1Pmruh88ZqB0+keEd+s+9qwYQOLFi2qNm3jxo1s3LjR93Nl0gRg2rRpbNiwgaeeeor33nsPt9vN0KFDefHFF5k1a1aTxpacnExycjJ2u73OYY6FaE6+KhNNQ1cKo2bEQkV1V1satUqIhpCEtRCiJRzKz6BnVEJAOjw/kyRNhBCijbmy/8UMixtIj8huzb6vhQsXsnDhwgatM2bMGL766qvmCUiIVqJalYmnosokzBqKp+ynIXitRgulrrJmHbVKiJYmCWshRHM7UZLLv757l6igCH417udYTZaAxlPzpVohhBCtjsfrqTbqRkskTIQQdausMtEAHYXJYCT4dKeulc6sNhFCCCHEuX2dth6Fomt4l4AnTECSJkII0eoppfh4/3IWfPcuJc7SQIcjRIdXWWXi8Dgo8zhQShFstuH2evAqLy6v2/fPoBlwepwsS12LUirQoQshhBCt2jH7CfbkHEBD45LeEwMdDiCv5wghRKu39ehOth/9AU3TOFGSSx9rSKBDEqJD8+pe8ssLMBvMuLzu06/daDg9Ttx4cXqcUOVVHKvJSkH5Kby6t9mG/BZCCCHagxVp6wEYHj+QuNDYAEdTQa7cTUA6xBJCNJesomN8un8lADP6TKFPp56BDUgIUTEE4vg7KXWV4fF6KHWVEREUjsfjYe3atUyZOAWTqfotVqglWBImQgghxFkcKTzK/txDaJrGxUmTAh2Oj1y9m4B0iCWEaA4lrlIW7/oQr+5lcOd+TOk5NtAhCSFOi7SFE2kLrzbN7XYTYQila1gcZrM5QJEJIYQQbdPXaRsAOC9+CDEh0QGO5ifSp4kQQrRCuq6z5IdPKHIUExMczY1DrpSRN86QkpLCoEGDGD16dKBDER3MqfIith3dha7rgQ5FCCGEaBfcXjc6OgbNwMVJraMvk0pSaSKEEK3QyrT1pBVkYjGauXXETGwma6BDanWkyk8EyjeHN7D96A9kFmZzw+ArAh2OEEII0eaZjWbuGHkz+WWniA6ODHQ41UiliRBCtEIj4gcRExLNDYOvaDWdYAkh4GRpPt8d2w3ABd3PC3A0QgghRPvSKTgq0CHUIEkTIYRoheJCY7l/3B0M6zIw0KEIIapYeWgdSikGxfYlIaJroMMRQggh2jSlFBsyt1HiLA10KHWSpIkQQrQSTo+LzMJs388mgzGA0QghznTUfoLdOfvR0JjeZ3KgwxFCCCHavNT8dD4/8DV/2vQmLq870OHUSpImQgjRCiil+ODHz/nHtsVszd4Z6HCEELVYfmgtAMPjB9ElrHOAoxFCCCHaNqUUK9LWAXB+1yFYjK1z5DlJmgghRCuwLmMLe3IOYECji/RhIkSrk34qi4N5hyt69e/dunr1F0IIIdqi/XmHyC46jtloZmrPcYEOp04yeo4QQgTYofwMlh1aA8BVAy6hR2S3wAYkhKjBarKQFNWDmJBoYoKjAx2OEEII0aYppVh5aD0A4xLOJ9QaEuCI6iZJEyGECKBCh50luz9BKcX5XYfIaBxCtFJdw+K4c9QteHVvoEMRQggh2rwfTx7gWHEOVpOFKT3HBjqcs5LXc4QQIkA8Xg+Ld31IqauM+LDOXDfwUjRNC3RYbUZKSgqDBg1i9OjRgQ5FdBCapmEyyvMmIYQQwh+60lmZtgGA8QmjCLEEBziis5OkiRBCBMjOEz+SXXScILONW0dcj7mVdn7VWiUnJ7N37162bdsW6FBEO/bjyYN8eXAVZa7yQIcihBBCtAse3Uvv6B6EWkKY1HNMoMM5J3lcIoQQATKy6zAcHhexIdFEB0UGOhwhxBl0XWdZ6hpyS/OxGM1c3HtSoEMSQggh2jyL0czVA6ZzWd9pbeKhoSRNhBAiQDRNY2KivFoiRGu14/geckvzCTLbmNhDPqtCCCFEU2oLCROQ13OEEKJFlbnK+XjfchweZ6BDEUKchcfr4evDFe9bT+05DpvZFuCIhBBCiLbNq3v5349fkl10PNChNIgkTYQQooXoSmfJ7k/YnLWDJT98EuhwhBBnsfXoLgrLiwizhjKux8hAhyOEEEK0eTuO72H70V0s/P4DPF5PoMOpN0maNAEZwUEIUR9fp60nNT8ds8HEpX2nBjocIUQdXB4Xqw5vBOCipAlY2kj5sBBCCNFaeXQv35y+tk7pObZNjUYnSZMmICM4CCHOZe/JVFYd3gTAzMGXER/WOcARCSHqsinrO0pcpUQFRTCq2/BAhyOEEEK0edt9FZwhjE04L9DhNEjbSe8IIUQblVdawHt7PgNgfI+RnBc/JMARCSHO5rz4wZwqL6JXVAImgzHQ4QghhBBtmsfrYXV6xcPDab3Gt5kOYCtJ0kQIIZqRy+Pi7V0f4vQ4SYzszuX9Lgp0SEKIc4iwhXPdoEsDHYYQQgjRLmw5+j1FjmIibGGM6TYi0OE0mLyeI4QQzajQYcfhcRBmDeFnw6+Tp9ZNSPqTEk1NV3qgQxBCCCHaFZfXzZr0bwG4MGlCm+rLpFLbi1gIIdqQzqEx/PKC+RQ67YRbQwMdTruSnJxMcnIydrudiIiIQIcj2oEvD64iv6yQS/tOIS40NtDhCCGEEG2eQTNwYdIEdh7fy8iuwwIdTqNIpYkQQjQDr+71/f9Qawjdw+MDGI0Q4lwKy4v4NmsH+3JTsTtLAh2OEC3mnnvuQdM0XnvttUCHIoRoh0wGI+MSRnLPmFvbbMW1JE2EEKKJ2Z0lvLLxDb4/vifQoQgh6umbwxvx6l6SonrQJ7pnoMMRokV8/vnnfPvtt3Tt2jXQoQghRKslSRMhhGhCHt3Lf3d9REF5IWvSN+OpUnEihGid8koL2H7sBwBm9J2CpmkBjki0RWX2LKLNeymzZwU6lHrJycnhnnvu4e2338ZsblsjWQghWj+H20HKlkV8f3xPm+8zTJImQgjRhL48uIrMwmysJiu3Dp/ZZssQhehIVqStQynFgNg+JEZ2D3Q4og1SSpGX/S02YxF52d+ilPJre4sXL+auu+5i1KhRWK1WNE1j4cKFZ11n27ZtXH755URGRhISEsLYsWN5//3361x+/vz53HfffQwdOtSvWIUQojYbjmwjq+gYqw9/G+hQ/CYdwQohRBPZefxHNh3ZDsCsIVcRExId4IiEEOdyrDiHH07sA2B6n8kBjka0VaVFmZQWZqArI6WFGZQWZRIa2bPR23viiSfIzMwkJiaG+Ph4MjMzz7r86tWrmTFjBjabjdmzZxMWFsbSpUuZNWsWWVlZPPjgg9WWf+211ygtLa0xXQghmkKZu5z1mdsAuLj3RAxa267VaNvRCyFEK3G8+CRLf/wSgAuTxjOoc98ARySEqI+Np2/qhnUZSNewuABHI9oipRS5RzahlAcdM0p5Tv/c+GqTBQsWkJGRQW5uLnffffdZl/V4PNx5550YDAbWrVvHG2+8wSuvvMKuXbvo168fjz32WLWky/79+3n22WdZtGgRBoN8FRBCNL31GVtxepx0CevMkLj+gQ7Hb9JSCiGEnxxuB2/v+hC37qFfpyQu7j0p0CEJIerpuoGXcmX/i5neW6pMROOUFmVSUpiOwWQDNAwmGyWF6ZQWnb065GwuvvhiEhMT67XsqlWrSEtL45ZbbmHEiBG+6RERETz22GO4XC4WLVrkm75582Zyc3Pp06cPJpMJk8lEZmYmv/rVr6qtL4QQjVHqKmPjkYoHEpf0ntTmq0xAXs8RQgi/WUwWhsUNYNeJvcweenW7uDgI0VGYjCYmJo4OdBiiDfF6HDjLC3CVF+AozaMobz+67sFkCgYcGAwWPB4HuUc2ERKR2OwdC69ZswaA6dOn15g3Y8YMANauXeubdu211zJq1Kgay82bN4/58+c3X6BCiA5hbcZmXF43XcO7MCi2fVReS9JECCH8ZNAMXNp3KtN6jcdqsgQ6HCFEPdgdxYRaQyTJKc5J97o4mbUJV1kBzvJ8PO4y3zyvx4nbacdsDfclRzRNw2gK8lWb+NO3SX2kpqYC0LdvzS8nXbp0ITQ01LcMQGRkJJGRkdWWM5vNxMfH06dPnzr343Q6cTqdvp/tdjsAbrcbt9td73grl23IOs2tNcbUEG09fn/J8bee4y9zl7MxcztKKS5MHIfH42n2fTb2+BuyvCRNhBCikY4V59A5uBMmY0VTKgkTIdoGpRT/2bkUl9fN7KFX0TW8S6BDEgGilMLrKa+oHCnLx1legLO8AGtQJ7r0mgqAZjBRmLMHpX4aQt5kCcVii6K06AiaZsBgrN7+G4wW3J7yFqk2KSoqAipex6lNeHi4bxl/PP/88/zud7+rMX3FihUEBwc3eHsrV670O6am1hpjaoi2Hr+/5PgDf/xKKZJUHDl6AYe27CdNO9Bi+27o8ZeVlZ17odMkaSKEEI1QUFbIm9vfoVNQFPPOu5FQa0igQxJC1NOPJw+SbT+OxWgm3BoW6HBEC1BKoXvdGE8nt5VSHNn3Ic6yPLweR43ldc9PFRWaZiA2YSxGUxDWoE5YgqIwmqyUFGZgz9uHyRKKpmnVOn5t6WoTf2VkZJxzmUcffZQHHnjA97PdbichIYHp06cTHh5e73253W5WrlzJJZdcgtlsbky4Ta41xtQQbT1+f8nxy/E35vgrq+XqQ5ImQgjRQC6vm7d3LaXc7UAL1rCZrIEOqUNKSUkhJSUFr9d77oWFOE1XOivS1gEwMXG0JDzbGaUUHlexr2KkonrkFM7yfKxBUfQcMhuoSGp4XCW+hInZGoE1OBprUDTWoE5YgztV226nrqNq7Cf3yCZ03YPZXPvfUEtVm1RWmNRVTWK324mKivJ7P1arFau15vXObDY36otaY9drTq0xpoZo6/H7S44/sMfv8Xp81deB0NDjb8iykjQRQogGUErx8b5lHC8+SYglmDnDrwvoBaIjS05OJjk5GbvdXmdZuhBn2nl8LydL8ggy25iUeEGgw2lXyuxZRJv3UmYfSkSnpGbdl1I6bmcxHncpwWFdfdPTf/gvzvL8WtdxlReilPIlL+KTLsJgtGIJisJgaFg7XjlijtEUVGcypKWqTSr7MklNTWXkyJHV5p04cYKSkhLGjBnTLPsWQgiAU+VF/G3zW4xNOI+L28mIOVXJnb4QQjTAluzv2XFsD5qmccuwa4mw1b8kWQgRWB7dy9dp6wGY0nMsQWZbgCNqP5RS5GV/i81YRF72t4RH92qyygqXowhnWZ5vxBpneUXfI0r3YjTZ6DfqLt+yZmsYLscpLLZILEGdqlSPRGO2RVaLKTi8e6Piqawy8XpdmI1WdK/bNx10dK+7SqewBjxeV7NWm0yZMoXnn3+eFStWMHv27Grzli9f7ltGCCGay6rDGylzl5NZeLTdJUxAkiZCCFFvmYXZfLa/opOpy/pOo3d0YoAjEkI0xPajuygoLyTMGsL4hJHnXkHUW2lRJqWFGejKSGlhRoMrK5TuxeUswllWgNtZRKeuP/1+Thz+hlJ7Vo11NM2AyRKC7nX5OmKN7z0do9GCZjD6fUx1xqoqYjUaLeheZ5XpYMCL7nVRNTdiNFpwOYtQyoumNf2t90UXXURSUhLvvPMO9913HyNGjAAqXtd57rnnsFgs3HbbbU22P3k1UghRVX7ZKbYf+wGA6X0mBzia5iFJEyGEqAdd6Xy4dxlepTMkbgCTEqXUWYi25vCpIwBM6zUBi4x21WQqKy+U8qBjRinPOSsrSu3ZlNmzcZYV4CrPx+UoRCndNz+y82CMpopKIFtoZ7weB5aqfY4ERWO2RaCd8UTTZA5qvgM9zWAwkTRsDl5PebXpbrebdevWMXns5BrvyhtNwQ16BWjBggVs2LABgN27d/umrVmzBoCJEydyxx13AGAymViwYAEzZsxg8uTJzJ49m7CwMJYuXUpmZiYvv/wyPXv2bOTR1iSvRgohqvrm8EaUUvSLSSIxsnEVfK2dJE2agGTchWj/DJqBuSNuYNmhNVw/6PJmHT5SCNE8bh56DaO6DiMpqkegQ2lXKvv3MJhsgAODyUZxYTqnTuzCaLadTowU0LXPDAzGimSCPe8ghSd3V9uOwWDGEhSFNagTSv/pnqpzj4nQyn5lZmsY5jNGXjK63XhUMLaQzn53xrhhwwYWLVpUbdrGjRvZuHGj7+fKpAnAtGnT2LBhA0899RTvvfcebreboUOH8uKLLzJr1iy/YhFCiLrklubz/fE9AEzv3T6rTECSJk1CMu5CdAzRwZHcMuzaQIchhGgkTdPoF9O8HZR2NFVHkdEwYNScuJ1ulO7myL4PsQRF+5LMLscpbCGdAQiJSADlxRLcydfnSMXQve3vXfjGWLhwIQsXLmzQOmPGjOGrr75qnoCEEKIWX6dtQCnFwNi+dI+ID3Q4zUaSJkIIcRZ7cg5gMZrli5YQbdih/Ay6hscR3AKvbnQ0VUeR0b0uNLygQEND192YreGERiZiDYrGaA72rRfeqS/hnfoGMHIhhBD+KHGW8uPJgwBc0mdSgKNpXpI0EUKIOuSU5PL+ns9wez38fORs+nTqGeiQhBANVOoq4+1dS9HQSL5gLrEhnQIdUruhlOLk6SoTszkEg8FMebmDIFs4msGEx1WMyWSjS68L5ZVGIYRoZ0KtITww4U4O5KXRNSwu0OE0K6mBFEKIWjg8Tt7e+SEur5te0T2kDwQh2qg16d/i9LiIDo4iJjg60OG0G0opTqSvpij3R4wmW0VSRNPQMWMwWjAYjBhNQZQUplNalBnocEUTSklJYdCgQYwePTrQoQghAiw6KJJxHWA0OkmaCCHEGZRSfLDnc/LKCoiwhXHL0GswGKS5FKKtKXLY+TZrBwAz+kyWaocmontdHE39ipyMNei6B6XrtS5nMFrQdc/pkXVUC0cpmktycjJ79+5l27ZtgQ5FCBEgBeWFgQ6hRcm3ACGEOMPajM38ePIgRoOROcNnEmoNCXRIQohG+ObwRjy6h55RCfTrJP0SNQVHaS7pu5dQeHIPuteFyRxS5zC/mqZJtYkQQrQz2UXH+eP6f/DODx+jq9qT5u2N9GkihBBVHMrPYPmhtQBcPeASEiK6BjgiIURj5JUVsP3oDwDM6DNFqkz8pJSi8OQecjLWousevB4HBqMZkzkYXff4lgEd3ev2nW9NM+Dxusg9somQiET5PQghRBu3Im0dCoXJYMTQQUY8k6SJEEJUcTD/MEopRnUbxphuIwIdjhCikb5O24CudPrFJNErKiHQ4bR5edmbyTu6FYDQyETK7EdxO4vQvU7fMkqBAW/FKDpVciNGowWXswilvGia3HoKIURblVmYzcG8w2iaxkVJEwMdTouRK5cQQlRxeb8L6R4ez8DYPvJEtJVLSUkhJSUFr9cb6FBEK6MrHYvRjEEzMKPPlECH0y6Exwyg4MQuYrqNIjr+fDyuUrye8mrLuN1u1q1bx+SxkzGbzdXmGU3BGAxy2ymEEG3ZykPrARjVdRidgqMCHE3LkauXEEJQ8SWrssRwWJeBAY5G1EdycjLJycnY7XYiIiICHY5oRQyagZmDLuOS3pMIs4YGOpw2SSmFsywPW0gsANagKPqcNw+jyQaA2RqG2RpWbR2j241HBWML6VwjaSKEEKJtO1xwhEMFGRg1AxcmTQh0OC2qY7yEJIQQZ7E1eycLd3xAmav83AsLIdoMSZg0ju51cTxtBem7l1Bqz/ZNr0yYCCGE6FiUUqxIWwfA6O4jiArqWA+rJGkihOjQsouO8+n+lRzMP8yO43sCHY4Qwk9fp60npyQ30GG0Wc6yPNJ3v0tR3n4AXGUFAY5ItDYpKSkMGjSI0aNHBzoUIUQLsTuLySstwGQwMa3XuECH0+IkaSKE6LBKXWUs3vUhHt3DwNi+jO8xMtAhCSH8cCg/g6/TNvC3zQspc0vlWENUjI7zI+l73sXlOIXJEkrioOuJ6jIs0KGJViY5OZm9e/eybdu2QIcihGghEbZwfjPxbuaddyMRtvBAh9PipE8TIUSHpCudJT98QqHDTkxwNLOGXNlhhk0Toj1SSvmGCx/TfTjB5qAAR9R26F4XJ9JX+6pLQiIT6dp7OiZzcIAjE0II0VpYTBb6dOoZ6DACQr4hCCE6pBWH1nGoIAOz0cycETOxmeVdfSHasn25h8gqOobZaGZar/GBDqdNKS44fDphohGbMJ6E/ldLwkQIIQRKKQ7kpaErPdChBJRUmgghOpw9OQdYk/4tADcMvpwuobEBjkgI4Q9d6aw4XWUyocco6QC2gcJj+uMozSEsug/B4d0CHY4QQohWYm9uKm/vXEpiZHfuHj0HTdMCHVJASNJECNHhRAdHEhUUweDO/RjeZVCgwxFC+OmHE/s5UZKLzWRlcs8LAh1Oq6d73eRlb6FTt1EYTTY0TSOu55RAhyWEEKIV0ZXOykMVI+YkRfXosAkTkKSJEKID6hoWxy/HzsdqtAQ6FCGEn7y6l5Wnh0Gc3HOs9GVyDs6yfI6mfomzvACXs4ju/a4IdEhCCCFaoT05BzhRkovVZGVSzzGBDiegpE8TIUSHoJQitzTf93OwOQijwRjAiIQQTUEBF3Q/j86hMUzoMSrQ4bRqhbl7Sd/zLs7yAkzmYKK7DA90SEIIIVohXel8nbYBgEmJozv8AwmpNBFCdAgbMrexLHU11wycwZjuIwIdjhCiiZgMRib3vICJiaNlBKw66F43JzJWU5S7D4CQiB507TNDOnsVjZKSkkJKSgperzfQoQghmsmuE3s5WZpHkNnGxB6jAx1OwEnSRAjR7h0uyOTL1FUopfDocpMnRHskCZPauRyFZB/4DGd5ARWj44ylU9dRaHK+RCMlJyeTnJyM3W4nIiIi0OEIIZqYrut8k7YRgEmJF8gIk8jrOUKIdiitIJO1rh2kFWRS5LDzzg+foJTivPghjEs4P9DhCSGagNPj4o1t/+XHkwdRSgU6nFbLaLTi9bowmYPpMWgmMd3GSMJECCFEnYpdJdhMVkIsQfLa62lSaSKEaFeUUixLW0uOXsBXh9ZgMZopcZXSJawz1w26tEP3/C1Ee7IhcyuHTx3B7ixhYEwf+WxXoeseNM2IpmkYzUEk9L8akyVEXscRQghxThG2cJIvmMup8iKsJhk0AaTSRAjRzqTmp3MwLw0TRnbl7CW1IJ0gs41bh8/EYjQHOjwhRBMoc5WzLnMrAJf0mYTBILczlZxlBWTsXkJR7l7fNFtIrCRMhBBC1JumaUQHRwY6jFZD7jKEEO2GUoplqWtx6x5MGHF6nNidxdw0+Eo6BUcFOjwhRBNZk/EtTo+TLmGdGRo3INDhtBpFuftI37MEZ3kBeUe3oqQPJyGEEPXk8XpYl7EFh8cZ6FBaHUmaCCHajdT8dPbnHSLYHIRZMxFqCQGQoYWFaEfsjmK+PfIdADP6TJYOYKkYHedY2kqOpa1A6R5CwhPoOWQWmrR9Qggh6mnr0V18eXAV/9i2WPoKO4PcaQgh2gVflYnXjdVY8f5llC0Co2ZkWepaafyFaCdWpW/CrXvoEdmNATF9Ah1OwDnLC8jY857vdZyY7mNJGHitvI4jhBCi3txeN6vTNwEwtvt50k/YGaQjWCFEu7An5wA7ju/GW6UcXdM0QizB7M87RGp+Ov1ikgIYoRDCX4UOO1uzdwJwaZ8pHf6mzuMuJ2P3u+i6G6M5iG59LiMkIiHQYQkhhGhjtmR/T7GzhEhbOKO6DQ90OK2OVJoIIdq8fScP8dqWhTg9TpRSOL0u3zyr0YLb65Zqk3YoJSWFQYMGMXr06ECHIlpIhDWMW0fMZHyPkSRFJwY6nIAzmYOIjj+PkPAEkob+TBImokVI2ytE++LyuFiTvhmAC5MmYGpjr3aWpB4i7OvVlKQearZ9SNJECNFmlbsdfLDnc/65fTHFrhJMBhOxoZ2wmay+Zc6sNukIyuxZRJv3UmbPCnQozSo5OZm9e/eybdu2QIciWoimaQyM7cvVA6YHOpSAcZYX4HIU+n6O6X5Bxes4p/twEqK5SdsrRPuyKes7SlylRAdFMrLr0ECH0yBKKXK+XIb5+AlyvlzWbA9IJWnSBCTjLkTL2597iFc3vcn2oz9gdxZjMpiID+vs68+kqo5UbaKUIi/7W2zGIvKyv233xys6DrfXHegQAq4odz8Zu9/l6MEv0XUPAJpmQJPOcIUQQjSCw+NkbUZFlcnFvSe2ucETig8cpHjffpTZTPG+/RQfONgs+5GrbBOQjLsQLUspxaYj32F3lmAz2TAZTEQHRdY5ikZHqjYpLcqktDADXRkpLcygtCgz0CEJ4bfsouM8t+411p4uH+5odN3D8cPfcCxtObruxmCyoCSJJIQQwk8er4f+Mb3pHBrDiPjBgQ6nQZRSHP/yK3S3G91mQ3e7Of7lV83ywFA6ghVCtBm60jFoBjRNY+bgy9iUuZ2D+elk249h0Ay4Tn+JUErhVV5cXrevo0iDZsDpcbIsdS19O/Vqlx1IKqXIPbIJpTzomFHKQ+6RTYREJLbL4xUdx4pD6yh3O8gpzQ10KC3OWX6Ko6lf4CzLByCm2xhiul8g1SVCCCH8FmoNYfbQq3F73XU+fGytig8cpHjvPowhIeB0YgwJoXjvPooPHCR8QP8m3VfbOjNCiA6pzF3Oe7s/48O9y3zTIm3hTO8zmVOOQqwmKw6P0/fP6XHixouzyjSHx4nVZKWg/FS1EXbak9KiTEoK0zEYbYCGwWSjpDBdqk1Em3a44AgH8w9j1Axc3HtSoMNpUUV5+8nYvQRnWT5GcxA9Bl5HbMI4SZgIIYRoUmajOdAhNMhPVSanX1X1ejBYrehuT7NUm0iliRCiVdt7MpWP9n1FsbMUTdOY2nMsMSHRAJiMJn49/k5KXWXV1vF4PKxdu5YpE6dgMlVv5kItwZiM7a/pU0pxNG0dazftZeuOTArz7UR2imDM+T2whKyj3/lSbSLaHqUUyw+tBWB09xFEB0UGNqAWpJROwfGd6Lqb4PBudOtzmXT2KoQQokmUucr5MnUVU3uO891XtyVVq0wMViuUlKChmq3apP19cxBCtAtlrnI+PbCSncd/BCA2pBM3Dr6iRsMeaQsn0hZebZrb7SbCEErXsDjM5raVOW+s5V++zyP/9wTHcgrxuL1omoZSimWrd7Fg8XpeeNHKpVfMCnSYQjTIgbzDZBZmYzKYuLDX+ECH06I0zUC3vpdhzztAp26jpLpECCFEk1mbsZntR3/gePFJfnHBvDb1YM1XZeJyYw6r+A6gzGbQDBisVtylpRz/8ivC+vdrsuOSpIkQotX58eRBPtq7jBJXRXXJpMQxTO89uV1WiDSFr1d8xt33/ILSUgeR4cEYDAZAAxS6rnP0+CnuTf41b4V2YcqUKYEOV4h60ZXO8kNrABjfYyThtrDABtQC7HkHcDkKiel+AQAWWwQx3ccEOCohhBDtSYmzlE1Z3wFwUdLENpUwgYoqk8IdO9Hdbsy6F6qM+KNpWrNUm8hjCyFEq+L0uPhw71eUuEqJDenEPaNv5fJ+F0rCpBZKKU5m7+ShB39JSUk5MZ1CMfoSJgAaRoOB2JhwiouL+b/fPIDL5QpkyELU21H7CU6U5GI1WZjac1ygw2lWFaPjrOLooWXkZm+mzH400CEJIYRop9ZkbMbtddM9PJ6BsX0CHU6DKKXI+M9iPGVlKF1HdzhqLNMcfZvItxAhRKtiNVm4buAMsoqOc0nvSZIsqYPX4+JE+io++/RDjh3PJyoiGA0DOnq15dTp/4mMCCIjPZ2VK1dyxRVXBCRmIRoiIaIrvx5/JydLcgm2BAU6nGbjKj9FduqXOMvyAIjpNpqgsPgARyWEEKI9sjuK2Zy1A4BL+kxqc1UmJ5Yto+TAQTSDAXN4GMaQ0BqJkeaoNpFKEyFEQJW6yljywyfsOrHXN21I3AAu6zdNEiZ1KC/JIX33O9jzD7D1u4PousJitaB7dVAapWXOKktr6F4di8WC2+Ni5fJPAxa3EA3VOaQTQ+IGBDqMZmPPO0D67iU4y/IwmoJIGHAtsQnjpf8SIYQQzWJ1+rd4dA89IrvRr1NSoMNpkOJDaRz573soXccYHIQxKAjd5cJRWk5JmRdHaTm6y4XucqEZDHgdziarNpFvJEKIgNmTs5+P962gxFVKWkEmg2P7SaLkHMqKj3Fk71KU0jGZQyl16KCB0hVKKXLy7DicbnRdJyy0YuhhqJivASeOpaKUanNPFkTH4fF6KHAU0TmkU6BDaVY5mespOF7xtC84rBtd+16K2RIa4KiEOLeUlBRSUlLwer2BDkUI0QCF5UVsPboTgOm9J7epe8Hyo8dI/9db6G43RqsVg8WCs7SULceO8u2RbIocDiJsNsb16M4FXbthNhox2qy48vJRHg+anwNDyLcTIUSLK3GV8un+lfxwYh8AnUNjuGnwlZIwqYegkDhsIXGYLMHE9ZxKWMi7oDR03Ytm0DCZDBjcYDBqVFwLK7LrSveigNAgI0p50TQ516J12nL0ez4/8A2TEy/gsn7TAh1Os7EFxwDQqetoYhPGSnWJaDOSk5NJTk7GbrcTERER6HCEEPUUZA7ioqQJHLWfoE+nnoEOp96UUmT/70N0p4vo0aPofsN1bNy+nSdfeIHDmUcoLXOhqHhMuDovh6T8XJ555BEmjh2LKTQMQxOMpCl3zUKIFrU7Zz8f71tOqasMTdOY2nMcFyVNkITJWZQVHyMoJA7NYEQzGEkYcC0GoxlN07j82jv45MsdFBcZMJht2CxWLCYdgzJQVlq5BQ3d48BosHH1DXdjMMi5Fq2T0+Ni9eFvUUoRHRwZ6HCanNfjwGiyARAROxBbSCzW08kTIYQQojlZTRYuTJoQ6DAaTNM0EufeyvHPPqfb9TPZtH0bv3z8cex2O8agCGxm0FAoNIwmyDx6lF8+/jhvvvkmkydPbpIY5LGGEKLFHCvO4b+7PqLUVUZcaCzJF8xlRt8pkjCpg1I6uVmbyPzxA05mbfRNN5osABz4YQ//eelV3A4X9rJyvB4NpUwYNBO614CuV/zzeqGozEGkOYTpM6QTWNF6bTyynRJXKdHBUYzqOizQ4TQZXfdwIn01h3ctxuMu802XhIkQQghRu6p9kVgiI0i89WfoJiOPPPIIdrud6JjOuL0aBkPFa0YGg4bbqxEd0xm73c6jjz7aZKNGStJECNFiuobFMS5hJBcmTeCXY+fTPVxGiKiL22knc+//yDu6DQDd60apipFxyhxuHn/gOW6YcSn7Dx8mzBaB2WSluLQAr7f6xcHjdWMvPYXNEswVo2dycNMPLX4sQtRHmbucdRmbAZjeexJGgzHAETVcmT2LaPNeyuxZvmkuRyGZP/5/9u47Tqr6XPz455Sp23svbAEWWHoHKdLEKCo2YtSoEXPv5SY30RRN8tObW2Jyr1GTG1KURI2xJTH2QlE6iEhd+rLsLrtsL9PLqb8/Blaxowu7C+f9evHSnTln5ntmdme+5znP93n+SnfrXjQ1SNBT34cjtFgsFsuFpiPYxW+3/ZnDHTV9PZTPzVBVah/9I927dp92+5o1a6ivrycjIwNfUMU0TQzDxAREQcA0wRdUSU9Pp66ujjVr1vTKeKzLuxaL5awJRIO8duRt5pfNIMUVW/e8aOi8AVV4qi/4u47SXLMGXY8iSnZyBl1MYnqsXVr1sRbuuW0pu6q2I2BSlJLJ5JnL6PA0sXLLs3T7OzAMg9jKThNRFElLzGDB1CVk5Q2n6sV1jJg1HlG0YuaW/mVD3TYiWpTs+AxGZlf09XDOmGmadDRuxSl56WjcSmLqIPxd1TQfewtDV5BkJ7llC4hPLu7roVosFovlArLm2CaOe0+wtWEnQ9JL+3o4n8k0DI4/9Qy+Q0cI1h8nYchgZLcbgHXr1qHrOgYi4aiGYRiE/V3YXQnILheiCOGoRlK8HV3XWbduHZdeeumXHpMVNLFYLL3ONE32th7kpYOrCKlhgmqI28ZeD2AFTD6FYWi01W+kuzWWDeKMyyKvfCF2ZxKmafLXZ9/k4Z/cRYvXgya7mTl+DuMKxxJ2ZJLkTuCOq37M0YZ9HDtxkHA0hMvhpiSvgrKCEciSjCba6FJTUCIKTrezj4/WYnmfPxpg8/H3AJhfNhNxABZFDXrrCXrqMEyJoKeW4wdf6Mk4cSXkkle2EJvD6o5jsVgslnOnNdDOnuYDAMwrvaiPR/PZTNOk8fkX8OzdhyhLFN9yc0/ABMDj8SAIAh5/FE3TCXlbMXQNdAWXIw9RFNAME29AiW3n8fTKuKygicVi6VWBaJAXD61kX+thALITMrmkbGYfj2pg0JQg3vZYR6G0nHFkFExBECXCUY2//3UTf/h//0ZzMIwjLp1rF97O7AUX03CsA8MbAeKQgOHp2QwbPRdFiWK3O/hgjEoCXCnp2Oz2vjg8i+UTtQY6kEWJ7PgMKjLK+no4Z8w0TdqPb8E0NQxs6FqE7pbd2F2ppOdNiP0tD8BAkMVisVgGtjU1mzAxGZ45mLzE7L4ezmdqeXMVnVu3IYgChTfeQEL56XOC5ORkVE0nGtEQBBHZ7kKPhkiMT0c0TAxR6Mk2QdNJTk7ulXFZQROLxdIrPpxdIgoiF5dMZdagqcgDsDZBX7A7k8gpnY8oyael8D+76jBVLTpTp3+ddw5u4cpFdzD7kjGMnlBAMKgQCamnPY6mqazfsIGZMyYhy6e3WXO5bUiydfJm6V/K0or5wfR/7umqNdAEvfUEPLWIshOIINvj0RQ/GfmTySiY3NfDs1gsFssFqNnfRlXrIWBgZJm0b9xM6+q3AMi/+iqSR1Z+ZJuZM2fy+0cfQxRVJMlGQkIKgjMBUZTABMGM1TaJKgqmHtu+N1hBE4vF0it2Ne/jr/teBSAnIZNrR1xGbkJWH4+qf9M1hZa6t0lOH0ZcciEAiWnvR9R3rV9Pk8/H+OLxNL7XSMqIKYyYdjEXXzqU/KIUAOITHMQnOE57XFVVcbohLTMeWy/0prdYzgWXzYnLNvCWjZmmSdvxzehaBJsjBYggSg5EKUqg+xjp+ZMGZCDIYrFYLAPb6pqNAIzMriA7IbOPR/PpgnV1NL34EgA5ly4gbcpHLzh0+yK8s2M/qqohaD7ik9IRdRBECZNYNT9BNzEkUMJe4lOyKSgf1yvjs4ImFoulV4zMqmBD/buMyBzC7EFTBmTni3MpHGjlRPXrqFEfId8JSkd/HVGMfSSHIgoP3XMvzz33JLaEBG75xv1kJ6eTkRXPnMsqSEgceCeWFsvH6Qh20R7qZGh62YANLHjaD+BtP4Bp6Bh6BIjVbpJkFwFPLUFvvVX81WKxWCznVLO/jQNtRxAEgbkl0/t6OJ/JXVRExqwZmJpO5pyLP3L/gWNtfOOfv0ftwXexOeIwtChBbwfxzkQk6f0LhLqmEAz4sDvjGDnrFlZtb6SyPOtLzzGsoInFYvlCfNEAm+u3s6BsJqIoIksy35p0ixUs+QymadDVvIu245sBE5s9gdzyS3oCJvv3HeX7X7+F6rojyJLApOJiFl07iRN1YSZdNMhaWmM5r6yq2cDeloPMLJ7MwsGz+3o4Z8zXeZTjB/6OYWgIgowgSEBsuZwo2VG1MO3HtxCXVDRgg0IWi8ViGXiy4tP52qiraPa3kRmf3tfD+UyCIJB7+WWYpvmR78stO4/yzX/5Nm0natBNgQkLbsfhimfPyj/i87RhmgbAybbDIsnJWYxa8A2yB42k0xtB001sshU0sVgs55Bpmuxq3s8rh1cTViO4bE5mDZoCYAVMPoOmhmiqWUXQUw9AQmopOSVzkWQnpmny+O+e4v/uvxdvKIQkO1l680185+f/gShJFA3q48FbLL2sydfC3paDCAiMyRne18M5I6ah09awmfaGd9DVMKJow+FKBUEErGwTi8VisfQt6h89qwAA3JhJREFUURCpzBpKZdbQvh7KJ4q0ttK+bgN5i69EPLmc/MMBkxdXbuOuu75LwNtJcnIS//fwL6kYMZq2E17W5A7j8NGdVB/bTTjiJzk5hZKikZQWjWbuVSPJzEsiwW3H1gsXHK2gicVi+dx8ET8vHFzJwfZqAHITsxk6APq99weaEqS26mk0NYQgSGQVzyQ5cwSCIBCOKHz/1n9l1eoX0ZCJTy3k6kXL+Oq/XYkoWYEoy/lp5dENAIzKGdbv11p/kBr1caL6DUL+ZjQlgCDKOFxpCKKIaZqnbWtlm1gsFovlXNN0DVnq36f5isfDsT+sQPF4Ee128q664iPbPPLUa/zHvT9CVSIUFBTw1788SknJIEzTZPuao8iSzLhR0ygvHgeCRlp6EgB+X4T6g+2MHZPXa9+7/fvVtFgs/UIsu2QfrxxeQ1iNIAkic0ovYmbxJCu75HOSbG7ikgqJBNvIK1+Iwx1LlWzuCPLU8+9C/TEUwc6QIdO5/Cu3Ujokj5Q092c8qsUyMNV1N3C4owZREJlb2v/XWn+QqgQIB1oBE0GUkOV4BPHjr2JZ2SYWi8ViOZeOdR3n6b0vcnHJNKYW9k4R1N6mBYM9ARNnZgZZ8+Z+ZJvV2+rZdsiPIIiMHDWavz75B1JSkgForOumobYLp9OG3SGTnOoiEPADse9dp9NGQ20XjfUeCopTemXMVtDEYrF8ppVH17OudisAeYnZXDviMrLjM/p4VP2fGvUhiDZkmwtBEMgeNBsQEE8WrIpGo9Q1e2nym+TM/Geuqqhl2IhJjJ1cyNhJhQiidVX60yxfvpzly5ej63pfD8VyBkzT5M2j6wEYnzeSdHdqH4/ozLgTcskpmUvHie0oEQ+CIGLosTomsUwTA0NXe65uCYKIpitWtonFYrFYzirTNFlds5GAEqQ10N7Xw/lYejRK7Yo/EWltw56cSMk3b0eOj/vIdm6XjcTUbH56/0PceMVU7HY7AAF/hBef3Y2i6Ljcsfm0JJ1+4cJml4hEVLZvriO/KLlXvnetioIWi+UzjckZjkO2s6BsJssmft0KmHwOvs6jHNv7NM3HVvek7IuSHVGyoUQi/Mcd3+SrV15FpkOg2BRJT89g9LhpzL9iGOOmFFkBk89h2bJlHDhwgO3bt/f1UCxnoLqzlrruBmRRZk7JtL4ezmdSowGOH3yRaKiz57bE9MHoWghJsmPo0Q/8UxDRMXTltNslyY4S9WKaVoDPMvAtX76cYcOGMWHChL4eisViAWq66lmv7GRD/TZqu48jiRKzS6b29bA+wtA06p94kmB9A7LbRckdt2NPeT8TJBgMctddd/Huu+8ybWQu3/3qWG67dlZPwKS7M8hzj+3A1x1G1/RPDIZ8ONukN1iZJhaL5SO8ER81XccZmzsCgKz4DO6+aBkum9Xq9rMYhkZb/Qa6W6sA0JRQ7KRJjr122zdt54ffXEpLRwuCJLJm5RpccjFpGXHMvayCxGRXXw7fYjnrZFEiMz6dwWklJDkT+3o4nyrgqaPp6Ep0LULzsShFw69DEAREUaZk5I3oWvi07VVVZcOGDcyYPAObzXbafZLs7umSZbEMZMuWLWPZsmX4fD6SkpL6ejgWywXNNE3erFlPq9HFi4dWEmd3Myl/NMn98Pv1xPMv4Dt0BNFuY9Dtt+LMzu6573jDCW74+h2EPC0cOHCAl156icLs94+hqcHDqpf309kWwATiExxoauxChGmaGAZo6vuBFEEUUKJar2WbWN/eFoulh2ma7Giq4tXDa4jqCplxaeQn5QBYAZPPIRrq4sTR13uuSKfljCOjYArCybovv/6PX/KH5Q+iaCrxTif/9R//zqW33cihqhbKKzKRbVZ9GMv5ryS1iO9M+Qaa0X+zLkzToL3hHTqbYllMDnc6uWULTpt02RwJ2BwJp+0nqSqa6cYZl/mRoInFYrFYLL2turOWIx01iIh0hj04ZAezBvW/LBOA1IkT8O4/QOFXryeuuLjn9vd27uKmW/+Frq4uUtPSeeCBB3qySwCqD7ayYVU1um4gSQLx8Q503exZnm2aJoYOinJ69ondIeP3hjF0E8lqOWyxWHqDN+Lj+QNvcKTjGAD5STnYZftn7GWB2Ie1t30/LXXrMA0dyeYit3R+T9FHb7eXb11/M1t3bMVAIDt3KL//46+pnDgagIqROX03eIulD4iCiF3qnyuENSV4sjvOCQCSM0eQVTzTyhKxWCwWS79imiZvVq9HNTQMDMDEJkkk2D9aI6Q/iBtUTMWP70ZyOHpue+mV1/m3u+4hHI6QllXInx5dzvDhg4HY8e1+t4H3ttQDUDokg/FTi1GV0y+6aJrK+g0bmDljHLJ8+gULl9uGZLUctlgsX1Ysu2Qvrxx+i6gWRRZl5pVexEVFExE/oSOE5XSmodFxYjumoROXVEBu6QLkk19YLc1d3HnlVWw/ehBdlBk37nLmzF1Ct7d/fqFZLGfL3paDeCI+phSMxSb1zywMJdxN3YG/oathRNFGdskcktKH9PWwLBaLxWL5iOrOWg51HMUm2TAIIgoi3kiA6s5aBqeX9PXwAOh6bweunBxcebkAPQET0zT5v+W/539++WtUzaB48Bj+/OjDDCl5f8mOaUJzoxeAkePzmTit+GNr/qmqitMNaZnxZy3L0wqaWCwXuKf3vkhV6yEACpJyuXb4V8iMT+/jUQ0somQjr2whIV8jqbljEYRYsOlog4cVLx9gbOUMqju9zJhzK0OHjiW3IIkJ04r6eNQWy7mj6RqvV6/FE/YiCiLTi/pnAUmbMwmHMxXdFiGv/Cs4XL3TqtBisVgslt7Uk2Wiq6Q4k1AFBYfTSVSP8mb1esrTBvV5tzbP3ioanv0rot3O4Du/gyM9ree+tu4Q/1j1HqpmMGbqQv70m/8kN+P0Ja+iKDD3sgrqj3VSXpF1rod/GitoYrFc4EpTizjYfpT5ZTOYXjQBUbCySz6LaRp0Ne9ElBykZFUC4ErIxpUQi457OztZv24dc7+yCJckopfO4payWdgddkaNz2f8tGJEqzuO5QLy7ok9eMJeEhzxTMwf3dfDOY2mhmKdrUQZQRDJG3wpomjraQ1u+fyqGzys3g/llR6GlVhd1iwWi+VseadxJ3tbDxBvj0MQBCRE4uwuZF3iUMfRPs82CRyt4fhfnsE0TJJHj8Keltpzn2maPLvqCCMvWkJZxTj+58e3kZoYq50YCijUHG5jxNg8BEHA7pD7PGACVtDEYrngeMJeAkqop8DrxPzRDE4vIdWV3LcDGyA0NUTT0VUEvfUIgkRcUiF25/vdAza8tpJ//953aQ+FQHJTZCShOO3Y7RIzFwxmUJmVxWO5sCiawtvHNgMwp2Qa9n4UjAj5GjlR/QYJaeVkF88CQLa5+3ZQA5Rpmryyqhp7p8Arq6qp+GZ6n1/ltFgslvONqqusqdnE3/a/RlgNk/ihguQOyU5QCfVptkmo8QS1f3wMQ9NIqhxO/tVXIQgCx44d4+9//zvf+973+NolQ3l+bTU3LJtJgvv9lsJvvrCfgD+KKIkMH517zsf+SaygicVyHjrVr31I13CGZpUBsQnt9hN7eO3IW7hsLr475XYcsh1REK2AyecU9B7nxNE30dUwgiCRVTwTmyPWDk3Xdf77337IU88+iSBCTmoqWckJBNol4uMdzLt8GEkpVjthy4VnS8MOAkqQFFcS4/NG9fVwgFi2WGfTe7Q3vAOYhLwNGLpqZZd8CYfquuis6yYBgc66bg7VdVExKO2zd7RYLBbL51LdWcsLB96kyd9KRIvglJ1IH8oQFwSBOLu7z7JNou0d1D76R/SoQnzpIIpuvAFBkti2bRt3fe/7RMIhMjIyuPXWW/nmVSN79mtq8LD6lQMoUZ2kFBcFxf1reawVNLFYzjMf7Nf+Zs16hmSW4on4eP7A6xztrAMgKz6DsBbBYXXH+VxMQ6e98R06m94DwOFKJa98IQ53LGukqf44/3Ld19h/9DAmMLJ0BCteeJrU7GwGV4aIT3Bgs1vthC0XnrAaYX3dOwDMK52BLPb934GmhmmqWUnQE6vGn5Q+lOxBF1sBky/BNE1eXnUYh2ZiCuDQYj8PvWOKlW1isVgsX1IgGuTVI2+xu3k/pmkSViM4ZAeZ7jQEQcA0zdO276tsE9Xn59gjK1D9AVx5OQy67RZEm43nn3+e+376X3R4wkyfMoGrrrrqtP2OHmpj/cojGIZJVk4i868YhtPVv76TraCJxXKeOdWvXUbicEcNLx5cye6W/UQ1BVmUuaR8JlMLx1u1Sz4n0zQ4fvAfhPxNwMn2o0Uzek6w3njm7/zkB9/HFwoiSjIXzf46d/74W6Rmx+qbpKRZqf6WC9fG+m2E1QiZ8emMzhnW18Mh5G/iRPUbaEoAQZDIHjSb5MzhfT2sAe9UlokL0IhNLq1sE4vFYvnyNEPn/7Y9jjfiQ0CgPH0Q753YS5Ij4RODIX2VbSI67NjTYp/5JUtvR3A4ePDBB3nkj0/Q7YswaNhkrrj5eyQnJwOxgPue7Y1s31wHwKDydGZdMhhZ7vsLLB9mBU0slvPIB/u1O7DhVwK8cng1Ge40ilMKuHb4V0iPS/3sB7L0EASRuKRCIqEOckrmkJg2uOe+PUdaeefJv+MLBUlJzuLSq75HXl4Je7efYMjwbCTJCkxZLmyjsofRFuxkdPbwPg/UGrpC4+FX0LUIdmcyeYO/gtNt1Rj6MkzTxOOP8sxL+7ArJpoABmCI4FCtbBOLxWL5smRRYlrheHY37+eqikt44eBKFF3BZXOi6CoQ+yzWTR1FV3s+b0VBJKqd2046ksPBoNtvRQsEUWWJ7333u7yxai3eQJRR065g6dLbuXp2ec/2ne1B3ttSB0Dl2DwmXTToY1sK9wdW0MRiOY+c6tfutrmIKhFsoo2IpjA+byRXD7+0z09aBgrD0NDVUE+9krS8CSRlDMfmiD95v0GHN8ITrx8ifsp1XBKXQ9mYS3C73GTnJjLnKxVWwMRiIbYU8MZRi/t6GACIkp3sQXPwdx0lp+RiRMlanngmIoqG0/7+tPGRf+ylrr6bkDeM06/iBE5liBs6iIKVbWKxWCxnStM11tZupTS1kJLUIgCmF05geuEEDNOgM9yFQ3YQ0aLv72SaqOhEtSh8IDjikB10hbvRDR1ZOjun/aau463aR9KokQiCgCjL2JOTOHboECvf2kggYjD9sjv4xk2LWTil+LTgTXpmPFNmlmJiMmJM3lkZX2+xgiYWy3kilmWyDlVXibe5iRIh1ZWMJ+Kj2d+OQP+M3PY30VAnJ6rfAEGgeMT1PW1ITwVM3njySX732OP88blnmTM2n7q9EvlTYlXBR4zNY9L0YkQrYGK5wH14fXVfCftbMAyVuKQCABLTykhMK+vjUfVvoYhKS2eIls4gzR1BmjoCtLUFsQkC9y6b1rNd54E25IBCnGYQW6woIGIiGga6KGCIUk+2yes2iYohmYwflkVmirVk0WKxWD5OTVc9Lxx4k45QF3taU/nOlNuRRQlRjM0rRUS+O3UpQSV02n6aprF+/XpmTp+JLJ9+eh9vd5+9gIlp0vC35+l69z0yZzeSe/lXADAMk13HBSpn3owrPonbrp/HrLH5AISCCoZuEH+yxfDwMf2nQ86nsYImFst54kjHMXY170cU6IniyqJEgiOuX/Rr7+9M08Tbvp+WunWYho5kc6FGPD3FXiOhEPfcegcr161BEAR+/9CvKCtZhF01ke0yM+aVUzoko4+PwmLpW6c6d7XuDpAal8K80ukkORPP+ThM06S7ZTdtxzchSnYGVX6tJ/BpiYkoGp3eCHkZ778uj75QxaHqdiTNQNSMk/81EQwTVRROyzYpL0mjuclLa0sAEDBFcKohJF1Fk+yE5ThEoKOmCwew9WgXG9dUk5abyMSx+YypyOxpM2mxWCwXsqAS4rUjb7OzqQqABEcc80tnfKQzDkCyM5HkD32vqqpKkhhPbkIWNtu5K6Da8vqbdL37HoIoEFdcxJo1a8jLy2Po0KEIAhSUj+H6uYOZNCIHgO7OEG++uA+bTeLy60bicPavYq+fxgqaWCznAcMw+NOu54hoEURBIqopPff1h37t/Z2uRWmpfQtfZzUAcUmF5JYtQLbFroge3rGLZTffSn1LE4IA13/lK/zgJz9i3552ag63M+/yClLS4vryECyWPneqc1eL0YnnxE4y3GlMyBt5zoMmuhahuWYN/u4aANyJBRf0UhxVM2jrDtHcEYxlj3QGaekI0e0JIekmP7tzFjY5NjEPHfcQ1xVBkkRssohNlrG5ROw2iYzMeMQPJBBdee1IfvmnbQgtoKJRf2wHJ2p3Eo4GcTniyBs0lqLicTgNGdEm4ZBFImENX00Xq2u7ecMpkTcolUXzBlOYfe4DaxaLxdLXTNNkV/M+Xj38FiE1jIDApIIxXFI2E6fN2dfD+1Tt6zfS+tZaAPKuvoq/v7uN3/3ud6Snp/PUU09x3ZzBTBqezaDcJACaG72sevkASlQjKcWFEtWtoInFYjl3TNPksV3P0eBtQhBEUl1JOGQ7EcJA3/dr7+/C/hZOHH0DNeoDBDILp5KaMxbhZHT/zw88zP8+8L9EVBWn3cFXb/0OP/jP7yJLImMnFTJyXL7VTthi4f3OXQBhNUp2QiZFyfnndAzhQCsnql9HjfoQBJHMohmkZI28IILFum7Q7gnT3BFkZHkG0slies+uOsTu/S2I6vuZI5JmEG+YiKJAR1eQnMwEAEYNy6ZOlsjISiA1I460k/+SU9xI8ulXPKsbPXTWddPWuI+3Nj6B19uKaeiAAJjsOLKVpKQs5lz0dTKLK5l/6TAi3WH272vB54sihDTa9rfRUJLWEzQJRVQcdrln7BaLxXI+q+6s5a/7XgViNcAWD7vknH9vfhFd7+3gxEuvAJA2bw6/fuN1Xn31NfwhhSVfnUdKSgqiKPQETI4eamP9qiMYev9tKfxZrKCJxTKAGabBSwdXsfn4e5imSZormTi7u9/0a+/vTNOkrWEzatSHzZFIXtlCXAnZPfc9/K938dtn/wJAflYRi278CcXFhQgnX15BFKyAiaVXnVreMqRrOEOzBk7tjVOduyK6goGBgICiK5imeU4+b0zTpLt1L231GzBNI/b3XH4prviss/7cAIqisGbNGv7+4uts23WEl15bzTVXXsrcuXOx23s/y8Xjj9LQ6u/JHGnuCNLeHUbXdCTNIPu2SeScXHajt4dI6I7GMkckEZtDwhYnYbOJpKS6cX6gBtNFs0uZObfsM98z04zVKmmpreK1Vb9BUYLEO+KRJRmTWNhE0zU83lZeWfUbvjL/X9m0J4Uf3DGF2fMHc6LBS9WeJo4eaWfM6PeL/z319700NnoZMiyLaRPyKcxOtL6vLBbLeas8bRDDMgdTmJTLRUUTkcT+P6f0HTxEw7N/A8A2bjT3/e05du3eQ4cnwqgZ11Ey9oqeGiymabL3vUbe3VQHQHFZGrMXDumXLYU/ixU0+YCHHnqIFStWUF9fjyzLjB07lvvvv59Jkyb19dAslo8wTIMXD65kQ902IlqURGci8Y6PXyJiZZt8PEEQyC2dR0fjNjKLZiDJDiB2xfa1LbWo9kKy4hMoqbyI6XNvQzAFAr4oXk+Y1HRrOY6ld51a3tJqdPFmzXqGZJYOmBPGU527DDMWMHHKTuo9J87Z540gCESCrZimQUJKKTmlc5Hkc5PavGHDBu6++27q6+vx+4OIqsrfD7/LytdeoKioiJ///OfMmDHjjB/3VDvf5s7YspoJw7J7aoBsrWpi9eba0zJHXJqBaIBNFuloC/QETSaMygG/0pM5kpoe+29Kehw22+kT11PZJIaioPoDaIEAWsCPPTkFV16sWJ/i8VD7l2eRDoqs2/AYSjRAojsZ8QO/qyYCkmQj0Z2ML+Rh7YYnKCgbgaoZ2G0SBcUpFBSnYBpmT3tJwzBpruvGDCgc2lLPgW3Hcaa6qByZw9TxBWRYBWQtFssAV9vdwOqjG7hx9GLcNheCIHDTqMUD5rseQPX5AJNAQR7/+9yz1Dc04g3BtEXLGDJ8DFNH5vRsu3fHiZ6AyYixeUzuxy2FP4sVNPmAoqIiHnzwQcrKyohGozz88MMsWLCAmpoa0tKsdnmW/qUl0M57J/bii/qxiTbibK5+16+9Pwp6jhMONJOeHwuG2hyJ5JTOA2K1YdavWcO02RdT0+ilOX8kN9+5HIw4BBMysxOYe1kFcQmOvjwEy3nq1PIWGYkjHTXnLOBgmAZRTSGqRdEMnfS41J779rUewhPxEdGiRE5uE9UVIloUmyhz0+ire7JMfFH/ySw3gRRnIn4leNY/bz6YyZJdPBt3Qh5JGcPO2efbhg0bWLp0KT6fj4TEFGQFTIeMgIYjPona2lqWLl3Ko48++pmBk7buEAdqu2g9VXekM0RU0cA0ETWD1DgHoytimTOmN0qqV0WWRew2EdklY5NFZEkkPtFBSvz7n1FDK3OoGJGNEYmgBfyxYEhLK6qahq0w1lVI6e6m/s9Pofr96MEgelQ5bWwZM6aTl7cIAEGSCB6rpaZJx+NrJ86VhCCKxBLwTn/dBSDOGY/P14a3djvh2gpspcUIUixY88HJsygKfO3aUbz3XgN1NZ2EwhpKe4gdb9Wwff0xcgel8i/fsC5iWSyWgSekhnnjyFq2n9gDwNvHNnPZkLkAA24+njZpIo70dO7/4wrqjjcSNt3MvnYZJSUl/PPVI0lLcvVsWz40k4N7mhkxJpcRY/t3S+HPYgVNPmDx4sWn/fzAAw/w6KOPsm/fPmbOnNlHo7JYPl5uQhZLKq/giV1/I6xF+k2/9v4g5Gsg1XaAkK+SpLTYSadp6LQ3bqWzaQcAroQc4pIKe/bpamnlnttuY/PevXz/P/+Dr152LatfPUjEEwFg2KgcJs8o+ci6foulN5wKPKiGhhM7qqGdUcChxd9GSA0T0WIBDeVkYCOqKdgkGxeXTO3Z9qk9L9AcaOsJlJwKtkKsKv/dM5b1/Ly+bhsN3qaPfU7nycysU1kmsiijGRoOZGyS7axmt5mmiadtH0FvPXnllyIIIqJkIzlzeK8+z6dRFIW7774bn89Hbm4uHW1eTCR00YZkmERDCrm5uTQ1NXHPPfewdu1aVF3oyRxp6QwxaXg2BVmxeiL1zT5efvsIkhYLkkiqQbxuYieWPRLsDvc896jhWbQc6SAp0YbLruKyqbiEMCl5KcQX5aGqXg5sr6H5hRfItjnQgkFMw+RgaytBVUHTdeKGDSV54gRUVSXi99P+9tvMLH1/SdjK6iN06SqGJGPzduHcuxtVVVEUBTHkQ0xScDglUhwm7Z42FMMA00QAnDY7DpsNBwKiIKCYUTyHV1H7+zCS00HC4HISKoaSNHwYcvz73XtKh2RQOiQDVdE5eqSd995roKGum0hEQ1KMnu1UTee9qhbGjcjGbht4ad4Wi+XCYJome1oO8urhNQSUIAAT8kZxccm0z9izf1E8HkTZhhwfy7KOLy3ha9/4Fpv2dTB10uWUFObwzcUjSYyzo2tGz1zZHW/nmpvHIp/lz+mPm/f3tgF11vSXv/yFjRs3smPHDqqqqlAUhccee4xbbrnlE/fZvn079913H1u2bEFVVSorK7nzzju57rrrPvW5FEXhkUceISUlhcrKyl4+EovlizFMA3800NONYmT2UP7f7H/rN/3a+wPTNOlo3IpT8tLRuJXE1EGoUR9NR98kHGgBIDmzElfC+33ht77+Jnf967fp9Htx2CT0rm6q3jlOxBPrIjF9bhmDh52b2giWC9OpwIPb5iKqRHDbnOxq3sfybU8QZ3cT1aJE9FiQI6JFSXUl863Jt/bs/5e9L9AR7PrYx052Jp4WNOmOeD92W0mUED+0nro8rZhUVzIO2Y5TduCUHThkOw4p9v89wR5dJcOdCgj4fT7g7NVSMnSF5mNv4+s8DIC/s5rE9CG98thnYs2aNdTX15ORkUFU0SEcIqyDbkTBNLEpIYKJcdhdiew/WM2UudfhSszCMDQMXcNUNYpy4snPimPQoEFMm3YVmT4VWYQXXv014UgAw9AwMRAkgVdW2pAlg6yowk/mzGOMoiAA33rxeTqDscm4nBCPPTkZANMwSAuE+J/LFvWM+bGd22ny+xFEEWnfXuQ3Xu+5LyM+gdu//S/IcfHIiQk8uHQpBw4ciN3ZfAL27O7ZNiUlhUGDBsVa3CthdENH1bTY85qg6BrE4s3YJQmHy4We6EKOc6MFQ3j27sOzdx+2pbeRWDEUAD0SQbTZECQJm12iYkQ2FSOyUaIaB/e3npbht6OqmTee28sbL+4jtziVyZMLqBySZRWQPUeWL1/O8uXL0XW9r4disfRbXSEPLx5cyZHOYwBkxqVz1bBLGJRS0McjOzNaIEDN7x7BNA1qh5Rz6eLFKJrBM6trGTv7Bopzklh65QjcThuerhBvvrif8VOLKBuaCXDWAyYfN+8/G9k7A+rM6Sc/+Qn19fWkp6eTk5NDfX39p26/du1aFixYgNPpZMmSJSQkJPD8889z/fXX09DQwF133fWRfTZu3MjChQsJh8NkZ2ezevVqUlNTP+bRLZZzyzAM/rr/FWq7G/jm+BtJdScD/atfe38Q9NYT9NRhmBJBTx1txzfhaduHoSuIkp2ckrkkppUDoGsaD/3gHh598s8YhklyXCL3P/Qr5l79FfzeCEF/lIvmlZOWEf8Zz2qxfHEfDDzE29xEieCQ7ER1hZ3NVWS40z4yAQjJ4dN+TnelICKeDGjYcdocOCQ7DtlBvP30+juLhsxDM3ScNgdOydGzz8cFU+eXfXqW5ZGOYxzqOEqc3Y0kSqcVoT4btZQioQ5OHHkNJeLhVLerhJN/z+faunXr0HUdQRBoajxBOBBAN9/PhnDIduzeAKopoikKR3dvJN6dHFvAIoggCnQ32qlxy9SLMhPq2nGbQ3ETwd9+jHAkgIiBPd6FIyWZqKoQiRq4QxGMkwETUZaw2e2IqoogitgdDlwuFzabDVmSyCsrZ8hd30GOj0OOj2e8U6alpQVZlnv+2Ww2ZFkmLS2NuOLinvFfeumlTJgwoWebU9vZbDZcLhcbNmxAj0QwHQ5S3XEYJ997RdNQDZ2IpqHqOgJgaBr548Yx/Kf38oNvfxspEKDU6SYzzs2pb6/WNW/TufUdEgaXk1gxlIShQ7ElJmB3yIz6UFq3tzOMLIloUZ2mw+3843A7L7pkisvTmTalmLLilAGX8j6QLFu2jGXLluHz+UhKSurr4Vgs/dK6uq0c6TyGLMrMHjSFmcWTB9xFSz0a5dijf8LT1Mzv3nuX/X8L0REI8PWvf52vLRzKpt1N3HRpBQ6bRMuJWEvhaERj97sNlJSnI0pnPzv7w/P+oLee+OTiXn+eAfXOrVixgvLy8p7Cavfcc88nbqtpGkuXLkUURTZs2MDo0aMBuPfee5k4cSI/+tGPuOaaaygqKjptv/Hjx7N79246Ozt59NFHue6669i2bRvp6eln89Aslk9lGAbP7nuZvS0HEQWRlkB7T9DE8j7TNGk/vgXT1DCwoWshmmtWY3el4k7IJbdsAXZnbILXWl/Pt264iV2HDgEwpGQ4d/y//2XuovEAJCQ5ueKro62Jt+WsO5Vl4rI5e066BUEgwR6HZmhMLRxHaWoRDjkWCDmV8fFBt4z99OzJDypM7p11xR8M9iTYP74wcm9lm5imibf9AC11azENHdkeT175QtwfyBg717q7u/H7/XR7vKDpmIKAKEjYbHYEBGTJhj0cBNmNaEJ6Ygbjhk5HEiVsLjdxackUl2ZQOigF/0uv4DAjTGcPNpeD/3fZPASXE1dCAkmlpaSPH9sTCDHa2skdVIwcH4/odPLGf/47kiQhSdJnvr4/+9nPPvfxLVmy5FPvjzY187hhoJng+EBGo10UkU7WLNFNg4iiEFZVJpWVE45EWP/uuxiGwRrgD4sXk5OTw5gxY8huaaXEFMgOR/DsqQLAnZ9HwtAhJA4biruwEOFkR4YFc8u5aEoh725vYO/uZrraAhhhjWN7Wzi2t4XrbxrL8OHZn/tYLRaLpTcYpoEoxD6nLimbRUiNcEnZzNNqhQ0UhqZR99gTNBw6zC83b6BVkrC5XOTkxAq9Di1KZWhR7LhqDrezfuURdN0gMzuB+VcMOycBkw/P+01To/34FuKSinp9/j6ggiZz58793Nu+/fbb1NTUcOutt/YETACSkpL40Y9+xC233MITTzzBvffee9p+LpeLsrIyysrKmDRpEuXl5Tz22GN8//vf763DsFjOiG7oPFP1MvtaDyEJIl8bdRXDMvvmymp/F/TWE/DUIspOIIIoOdHVIAmppeSXX4pwcumBYZi89tAj7Dl8GFmSmD3/q0yfs4TWmjDHj3VRWBL7ErACJpaz7YOBB0zoCnuQTZEkYllkXWEP9Z4mFg2d3+9+H08Fe+Ls7k8cW29lm7Q3bKWzaTsAcUlF5JbNR7ad+24qfr+fuLh4Dtd309Choao6GDp2mxOnMwG73YUACKYJgoBmqLH1KpgMLyzl9vmzSE6ykzusmLzpExAEAdMwCF90EXJCPHJcHKLNxqcuCv5ANgiAw3HuC1ObpskQr48cdxxNoSAZ7rgP/A6YJ48ZRBOCqkquO44hXh+SJPHAAw+wc+dOdu3axcGDB2lubqa5uRmAWRMncte8OfgPHSZ4vIGDVVUUNDTSte1dht33k57nNxQFd5yDWbPKmDWrDK83zJat9Rzc10LErzC4PKNn22dfqMJhk5gxbRBpKe8XKLRYLJbeElEjvHl0Pd6In5tHX40gCLjtLm4cdVVfD+0LMQ2DhmeeY/fWd3hw43rC8XGIrkRGzb+D0ROmv7+daVK14wTbNtYCJ1sKXzLkrC/JOaVn3i85gTCi7CTgqT0r2SYDKmhyJtatWwfA/PnzP3LfggULAFi/fv1nPo5pmkSj0c/czmI5GzRD55m9L7K/7QiSKHHjqKuoyLACJh/HNE3ajm9G1xVsshuIINvjMU0NLeqPpcOf3O5Pr+znaM40Lp94jJxZV5AeX0TQE8XhlBGl/nViajm/fTDw4I34yexQGFcV4PikOLzZ8f22VfipYE9Ui+KUHWe9c1dCaildLTtJz5tIWu54BOHcFWQ2TZOqqir+/ve/s2bNGn7z0/9g39qDjAhF2SLbcNpTsJ3W7t082UTGRBdtmGoAV4Kbf/rJMuZfeulHHl8QRdwF+efqcHqFqWng8fKvEyfz35s20B4MkOJ0IosSgmliGgaaodMdiRBns/OvEyeDx4tNFJkxY0ZPJ6FQKERVVVVPEGX6/PnkLFxAzsIFVO/bx33XXYfTNBlRVs5FTz7JmDFjGDpkCNU//19siYmxLJSKoSQWFrDwkqEsvGQo4ZCCzR6bsIciKvt3nkBQDXZtriMpI45hlTnMmFZEnNvqgmaxWL4c0zTZ13aYlw+txh8NANDoa6Ygqe+yIL8s0zQ58eLLvPnSy/xu62aElGTi0gsYNvM23Ilp7Kvp4OLxhZimyZa1NRzYEwt6Dx+dy5SZJeespfCpLBNdUzDNCAIComhH0yJnJdvkvA2aVFdXA1Be/tETzOzsbOLj43u2OeWHP/whixYtIj8/n66uLn7729/S2NjI1Vdf/bHPEY1GTwuo+E4Wv1NVFVVVP3afj3Nq2zPZ52zrj2M6EwN9/BALmDy77+WTXSkkvjriCsqSiz/XMZ0Px3+mult2035iL9t21vLenmY8HV6S05OYPKGMSWNEQtvf4n/++//4wc9+RmVpCkfqO5m69B466zxEIyppmXHMXjiYhETngH/dvuj7P9CPe6D5YJaJQ7Kj6iqVhwLktkaR9raxOyvurBVT/bJ0Q6cz3IVDdpy1zl1KxIPdmQyAKz6LsjG3ndPsklAoxKuvvsbjTz7DwUNHSE10Em1r48X/eYCCYYvJHLyAlL3v0eVrR7a7EU8GSoh1XQbAME2CkQhpSTnMmTPnnI39bBNtNob84C5KAwGK33mHe3/+c443NqLpOqqmY7OJyHY7ZSUl/MfddzN98mTk+ATED9XYcrvdTJo0iUmTPtpKuM3rJTE9nVAoxO7mJnb/3/8BYBNF8qMKVwyvZERDI62r30KOc/d05EkcOgSwxx7EhGGV2Rw73EE0qOBtC7L1raO8s7aGtOwEJk0qZNKkwo88t8VisXyW7rCXlw6u5FBHDQDp7lSuGnbJgA6YAOihEDXb3mX55o2IKclkl4+hdNIN2BwuLr+ohIvHxz4zBUHAbpcRBJg0o4TKc9xS+P0sEzuaGowVJhcEJNl1VrJNztugidfrBfjEAlWJiYk925zS1NTEkiVLaGtrIzU1lQkTJrBx40YqKio+9jHuv/9+fvrTn37k9lWrVuF2n/nEbvXq1We8z9nWH8d0Jgby+FVT44B6iKAZZqxtCDXbDlHDoTN6jIF8/J+fQZzUTM2hzSz/0zqaW33ouoEIGMAbb+0mId6JbCxHCJt86xu3s+z7P6RAEanZHQt0JqWZiHHdbNzU2KdH0tvO9P0PhUKfvZGl13wwyySsRcjqUMhtU1FlgbTmACmtQbr7abaJLMl8d+rSs9K5yzA0WmvX4u04RPGI63HGnarAf24CJn6/n/954CFefP4F/F1dhEUnsigzbvJslgwuJKO1jZasVLRQApdPvpzn1j2DP9hFnCvhtOPTdJVgOIDTEcfi4dM51hSgYtD5k91gT07GnpzM/GuuYdaiRaxZs4a33nqLqqoqKisrmTNnDnPnzsVut3+hx582bRpr167lyJEj7Nq1qycbxev1ckwUSJk5g2SnE/+Rag7V17N90yYqMjOZdvViyq+7BgCXQ2LJNSMRRJEjNR1s2VrP8aOdaBGNjiYfB/a39ARNDMPE0I1zllZusVgGJsMw2HR8O6trNqLqKpIgMmvQVGYPmjLgCr1+HDkujqk/vpvvpCaz8nAb2cMXIkky184ZzJTKnNO2HT+1iMLSVLJyEj/h0XpfyHcCNeqnq3knhqFhsyciCALRkAKAKNlRtXCvZ5sM/He2Fz355JNntP0999zDnXfe2fOzz+ejoKCA+fPnk5j4+X95VFVl9erVzJs3r990OumPYzoTA338p8xV5tIW7Dzj9mTny/F/lrC/ida6t9m0ZQ/3/+pNgiGF5EQ38snPR9M0ae8OcfxEF5IoML60hEf+8HsMWyad9dW4U0Umzyxm8PDzq53wF33/T2XLWc6+Dy5vcch2fEEfzjcP8/uj3bQQRzYhysOdRL8+Cclu+9LLW86Gs9G5Kxru5kT1a0RDnQCE/c09QZOzyTRNDBP2bT/EoTWbePmPjxOMREhJzGTC8DmMrJzBZdeOp3JwMqLdzhBDoPq3rzI0s4Qr5/4Tqzb9BY+/HcMwiKWZmIiiSGpiBvOn30hxahrrXtrI0H+7ot+8f73Jbrdz6aWXMm/ePF5//XUuvfTSXvnukSSJiooKKioquOGGGzAMg7q6Onbt2sXMBQuIj4/H1HXW/Pd/8+bWTbx29DC/ObSfIS+9wNixYxmclk7q/gPkjx5NRsVQbrxqKILTRdXBVrZta2DatEE9z7X+nXo2vXmY3KIUJk4sYNiwLKRzUMjQYrEMLLqps61xF6quUpxSwOKKS8iMH/gNQzoaG4kA+fn5GK44lPJLyLV3IUkiNy2sYFR5Bp6uEDu3HWfGvHJkWUIQhXMWMAkHWmlv2ELQezyW1aoEkGQXgigi2dxALFv6bGWbnLdBk1MZJh/OJjnF5/ORkpLypZ7D4XB8bAG2U635ztQX3e9s6o9jOhMDbfyqrlLdWcuwzMEAJNuSSI774u38Btrxf16madBat57u1r1EFZWHf/8GwZBCRnoSpqZhmqBoBh1dPlRVRxYFJFnCL+kUDB2Kw+HA71UoHJRKRnZCXx/OWXOm7//5+LvSX31weUv1zkPs/OPbRFt8qNgwBYn9ps6G2kO49+xj3O1zyB1ReMbLWwYab8dhWo69hWGoSDYXeWWXEJd0dpdONDY28tennmLTypX8eOY8jh7z4HNkMmX8NTicCRQXDcOVkkxucTrpKW7kuJO1S1Qd95GDtDoGUZqTwj9ddQ/VDfs5duIg4WgIl8NNSV4F5QXDEWQHQUSyjhxE1S7HbmUyfGGiKFJSUkJJyfsZV4IkMWnuXPzArl27aGho4PDhwxw+fBjV40X1+3mg20Pujl0IooCck0N25QhumjcEV35az+Ps2dOEqujUV3dQX92B0ylTWJrGlMmFDCpNR/wc6/S3vdfItq0CaZmNTJ8y6DO3t1gsA0NUU7BJMqIgYpNsLB62kK6wh3G5lT3dcgayfWvX8e1vfhNXXi5Pv/giDlccEUXDbpO47fLhDClKpaXJx6qX9hONaDidNqbOLj0nY4uEOmhv2Eqg+xhwqra6iWnqiNLHZzKejWyT83Pmxfu1TKqrqxk3btxp97W0tBAIBJg4cWJfDM1i+ViKrvLnXX/naFcdVw27hEn5Y/p6SP2WIIiYJ1uz7t7fRXOLh5TkuNgnqWmiaCbN7V5Mw0ASBdJSk7DbJVpa2nn1pae5+rpbGTel6DOexWI5e04tb3lr7Vsse+RPRDv9JDri0O1JxIpigBz1Emj1UbViMzf9ZjFzp845LwMmhqHRWrcBT9vJNrMJeeSVL0T+hDbGX5au66xft54VTzzNe9vfJd4GkdY2tmUUEs6ZDzY7I/KHk5qdzOAR2ZRXZJH0oa4rkqmjx2Vj6DbsRhRsDoaXjGV4yVhMw+hpjXuKItrQ7dlIpg5YQZPeNnXqVKZOnQpAR0cHu3btii3p2bGDthMnGHn5ZQSPVBNubuHhvz7HkeW/YUhmJnPuuJ3Jc+ZQWFjI0pvHsXVHI7t3N+FtDRCJaBzZ30r1gVbccXa+ccdk0jPjP3EMhmGw/q2jyKrA+reOMnVSEaI48E+mLJYL3YG2I7x0aBUziicxrXACAKWpRZRyfswjN7zyCt/7128RiETIjYvD4/FQlJTEHVdW0uEJU5idyLEjHax78zC6bpCRncCYSWeWAf9FKBEv7Q1b8HUeOXmLQFL6UFyJeTQeejGWZfIpnft6O9vk/Jt9nTRz5kzuv/9+Vq1axZIlS067b+XKlT3bWCz9gaIpPL777xzrqscu2ciMG/hpfr1NU8OYho7NEZu0ZhZOJyG1nIf/8C8YhoHNJmOoGggCNpuIwy6DCRmpcUiSBIKArhusfON5Fl97y3mZIm8ZWNyik1/+5/9ihjQyHS4iUixIIJgmpiBi2uJIw6CzuZX//pe7mL9lC5zDdcPnirf9YE/AJC13AhkFk89Kd5yW2loef/j/eOHV12gNhohIsQnX7MvmMDcrk1kLF7K11kZSWjzlFZnk5CV9chcAUUYsKMXZHQZOP5GORiI4nM7TbnMCYkoGiOfttKvfSE9PZ968ecybNw+ASCSC8+T7oXg8NFx+OV2GzpbGBnb+9rfw29+SmppKicNJRXoGN33tBsz5Q3i3XmFfVQvhrjCGbpKS9n5Nnfd2NJCXlUh2XmwtvaIo/Or/nub1l14iGvbjcCVgd53g3751wxeu6WKxWPqWN+LjpUOrOdAWO2l/78RephSMOy8yS0756+OP818//gmapjGkeBBL7/8DRUWxYJDbaaMgS6ZqRyPbNtZimlBUmsbFC89NS2HT0HoCJolp5aTnT8buTKGu6plYp0zJgfGBzn1gYHygc58giGi60mvZJuftt/ecOXMoKSnh6aef5tvf/jajR48GYst1fvazn2G327n55pv7dpAWC7GUv8d3/Y3a7uM4ZDu3jrme4pSB1X7ybDJNE3/nEVrq1uOMy6Bg6JUnI8gO3Im5dHd7ME3o6PCRnOBEkCQEIDMt4fR06pNVtT0eD6apIwjn7cefZYBYs2YN9fX1JMkypmqiCzKnskxOtas1JBvJdmjsaGf99ne5bNEiAAI1x7AlJ+FIS/vExx8okjOHE/KfICl9aK9WugdQfT66du2hftO77N2xg0c2rEUXZOzOJCqGTmf8mNl8557LSUuJnRBfPsn8XBMrSRa59rZJREKnd5zSNJX1GzYwc8ZEZPn05W4utw1JPn8m2wOF8wMBLHtyMv9YuZJ9+/axY8cOdu3axb59++jq6uLEiSaOJyczMyX2N1WaEE9XJEz6kOHkjpzcU9/E44/w0j/2IQNJSS40Gnjur7/h8JEaTF1DMMEUoOqnG3nh+Uf4+c9/3tNi2WKx9H+GafBOw05WHl1PVFMQBZEZxZO4uGTaeRMwMQyDB3/+cx5f/lsMTWPasBHkXHU3q/d0kZbZzKQROZiGydb1x9i/uwmAYaNymDqr9Ky1FFaVACFvA0kZsQYsDncaWUUzcSfm4YzLODluDSXqRZLsGPr7nftME0R0DF35YOM+JMmOEvX2yrx/QJ01rFixgk2bNgFQVVXVc9u6desAmD59OrfffjsAsiyzYsUKFixYwIwZM1iyZAkJCQk8//zz1NfX88ADD1BcXNwXh2Gx9IhqCo/tfI46TyMO2c5tY6+nKNkKmJyiRgO01L1NoLsWAE0JoGvhni4aakShqzmKzxdGEgX0iIk7IZ1TPT9NBBBANDRsgoYkOykoGY9oXe219APr1q1Di0aRBIGQ6MYEopEgoWiQlKRYcWJFcuEWTEwBNmzaxGWLFmGaJg3P/Y1oRyfu/DySRlaSPKoSR0ZG3x7Q52QYGl3Nu0jNHo0o2RAEkbyyS3r1OXw+H4//+Cd0Hj7CRWWjqVdT8OfOo3ywQV7eYIaPmEJBUQZDRmST4H4/E+BMrkTFJziITzi9rpmqqjjdkJYZb9UI6qfcbjcTJ07sWaKtKAoHDx7k3Y0bsfsDJGVmEThaQ6Dbw4N/exbNMHC63Uy6ZAFjxowhMb0YXTZBgV173uXVN3+NEg3hdsa6JwlmLPCpmjqHjxxl6dKlPProo1bgxGIZAFoD7fxt32s0+poBKEzOY3HFJWQnnP2C5OfS737zGx7/7e8wNI3FEyYjzV2GT5fJSY9naHEqAMFAlKOH2gCYNGMQlWPzzkqWtqaG6GzaQXfLHkzTwBWfjd0VqzuamjP6tG1FUaZk5I3oWvi021VVZcOGDcyYPOMj372S7O6Vef+AOnPYtGkTTzzxxGm3bd68mc2bN/f8fCpoAjB79mw2bdrEfffdx3PPPYeqqlRWVvKLX/yC66+//pyN22L5OJqu8cedz3LccwKn7OC2sddTmHxue5z3V6Zp4mnbR9vxTSejxiJpeRNIz52AIEqYpsmqJ//C/9z/cw6daMQ0QUbEZo9HN05dBXj/g10wBDQljGmTmTNnft8clMXyIc3tLSiREJozAV2UwTQIhH1IkoxhGoiChCbYaA4r6IZKU1NsEmdEIthTU1C6ugg1niDUeILm19/ElZtN8qiRJI0aiTOzf07wlIiXE9WvEwm2oUY85JTO65XH1UIhvFX7aLTb+cPjz7B101oMnwcxFGLUpCtojWRji3dz3Ve/RcWIbMqHZZKeGW8t07Ngt9sZNWoUo0aN6rnNUFXq3tvB1IY69lRVERIEtm/fzvbt22NdG9ramT1pGpt37CaqhEiIS6Xn+rMAgmkgizKSlITP5+Oee+5h7dq11lIdi6WfM0yTE/4WnLKDS8pnMTF/9HmTXfJB8waV8JI7jsvGjKNjwo34TBtFOYnccWUlbmcs6BCf6GT+FcMJ+qOUDun9izK6FqGreSddzbsxjFjWpis+B8PQPnU/myMBm+P0Jg6SqqKZbpxxmWftgsWACpo8/vjjPP7442e0z8SJE3njjTfOzoBOWr58OcuXL0fX9bP6PJbziyzJDE4bRHuwk2+MXUJ+Us5n73QB0NQQJ6rfIORrBMAZl0Vu6Vwc7lidl+rt2/nvH9zN1n37MAyT1LgkQCSkKoiyCwET8+T0VcAETHRBwq8oZLnjufjii/voyCyW0ymhbtANFDGWrRBVQpgYmCjYZBNdB1VXCCkRdF3j5RdfY+7sK7niyoVcufhShqek4N23H8+evQSqjxJuaiHc1ILS7aHgumuAU+t8zyyD4mzxd9XQVLMaQ48iSQ4SUsu+1OPp4TDe/Qdo2fYub6xcxZsHD3EoqKAhIdlEpk0YyeLLLmfmV64kYesJyodnUVCcYrWRtXwm0WajZMpkfv/CPzBNk2NHj7KnqoqdO3fy7vr1NCoKnroafO0NpDviiBgq3f5ObDYHTrsLu2SL1SYyTNzxSdTV1bFmzRouvfTSvj40i8XyIR3BLtLjYtkVOQmZXD9iESWphSQ6Prnw80DU3t5OxsmM1PLLvsJ/doV5vdmOX3IxuDCF2xaNIBJQaGoPkluQDEB2bu/XUTN0la6WXXQ27exZYuOMyyCjYGqvdbo5GwZU0KS/WrZsGcuWLcPn8/W0OrZYPo85JdOZmDeaROf52/b2TImSHU0JIogSmQXTSMke1VMUMurx8b/f+i6bq48gCAKVI6cza95iuta+wu+2r8MX7CbOlXBahxFNVwmGA7gd8dw2ZDjVW6uonD2hrw7P8jEeeughVqxYQX19PbIsM3bsWO6//34mTZrU10M7a3RDp9wwWCNIRA0BWTQIRYIIgklSoguHXSUcEpAEEZcjnnDEj122c+DQfqp/dYQnnnyU0tJSvnHbNxl9xRIK4wR8Bw7i3buX5NHvXzEPHW+g4ZlnSRo1kuSRI3Hm5pzVCUnI10Cq7QAhXyVJabG2sKah09awma7mXUDsSlJe+SXYHGc+GTMUBe/+A3h27cZ38BDBUJSn9x7i1UP70EQbgmRjcMk4pl+0gJ/+103IJwMk8xZZ382WL0YQBErLyyktL2fx4sXoikLNO9v4+S9+gXH4IIbkRFWiaLqKpqvYRBlTsscWiJomkbCBruusW7fOCppYLP2ILxrglUOr2dd2mG9Pvo2ck0twRucM6+OR9b63336be++9l+9/73tcceWVtHvCPNediekwGVWewdcuqaCrLcDKl/ZjGCaXXzeStIyzEzQyTb0nYOJwpZKeP4WE1NJ+Gyw5xQqaWCznUEgNs/roRhaWz8Iu2xEEwQqYANFwF3ZnMoIgIooyeeULESUbdmcyuq4TDgfQTBt/er2aIWMWcFwRGH/1P3PFpTPY9sQq3EMXcXnyCFZvehKPvx3DMIgtzzERRZHUxAwumXYD5bYuav7xCsNnjrNaQfYjRUVFPPjgg5SVlRGNRnn44YdZsGABNTU1pJ0HhU4/Tl1HPaNSUkhKzKTd78Fld6EZKpIICXFORNFAlg1QBQzTID89nx/OuoTXzRQC/hr8Hceoqamh6bifvdu3kZjgJDE5SlxGEfML32+D6N27l0hbB5HVb9O6+m0c6Wkkj6okadRIXHm9uz7ZNE06GrfilLx0NG4lMXUQmhLgRPXrhAMtAKTmjCWzYCqC+MUq7wfb2njuv35GssuFkVBBi5xNwohK3K1ehldMY8rk2UydPpShw7N7AiYWS2+S7HYGz7gI+S9PYjpcaKINhw3EuBRUTcFms5/6+kEwTQzdQFUNPB5PXw/dYrEQK/T6buNu3qheR1SLIggC9Z7GnqDJ+cQ0TZ544gl+85vfoHq9vPzb33HZpZeSmeLmsumDaO8Oc+2cwdTXdLL2jZMthbPiccf13lJC09Dxd9eQkFp+spGDk6yi6QiCTGL64LPSLe9ssIImFss5ElLCrNjxDE3+VgJKkK+Nuqqvh9TnTEOno2k7nSe2k1EwlbTccQA9VbLffeVVfv7Tn5I/biwP/fZ3KJqBZ/gs7rr5JhwBhd1rjxEyHWiSg0H5w/mnK+/maMN+apoOEYkGcTriKM0dSlnBcEx7Ak3RNvL9R1CjKg6X49OGZjmHFi9efNrPDzzwAI8++ij79u07b1vD72o/SNX0McwMDOXVN36LJ9CFIBjExzkRBAkEE1OK4vUp2B1uLr74DubdcwNX5adjGCZum87mzZsR9EJqjtXjCyhsfel5Dh7aiNsVx7hxk1l4yTyuvGIWrrw8PHv24j94mGhHJ61vraP1rXXYU1Mo+5d/wp6a0ivHFPTWE/TUYZgSQU8dQW89Dlc6SsSDKDnILZ1HQmrp53osQ1XxHzmCZ9ceBFlGnD6N5Y88ybbNa2ivqWFCZSUzJk3G1xwmI97O//7iEUaPyyM3P/msVfa3WD4oKSkJJarhlExEQUS0u3E43D0NsEyEnmyTaFS1MpEtln6gxd/GPw68yXHvCQDyE3NYPHwhuQlZfTyyL0ZRFNasWcNbb71FVVUVGzZsYM6cOcydOxdBELj//vt5+eWX0fx+Ls7J5WsVIwhUV5M0fDgXjy/ENE327Wpi24ZjmCYUlqRy8aVDsfVCS2HTNPB2HKKjcRtq1EfBEBvxKYMASM4c8aUf/1yzgiYWyzkQVEKs2PEMzf424u1xzC2d3tdD6nNhfwvNx1YTDXcBEAm2YZqxdp/NR47wi+//gDe3bkUUBBr9ftrb27hxwRCajnVzcNcJlKgOgkBhZQkN9R5kScSWnsCY/AJGs4BQMIQ7zo1wsiCsqhpEjARKL7vECpicob/85S9s3LiRHTt2UFVVhaIoPPbYY9xyyy2fuM/27du577772LJlS08R7jvvvJPrrrvuU59LURQeeeQRUlJSqKys7OUj6R8UXaWq5RB6SwbFOUlcOvUGnl3zGwzdQFF12rt8YIIkCWSkZ3HxjJsZlDeEDVua+PodBQiCgGmaLFy4kNauEF2SyKHD7eByYrO7CYQCrN+4hg2b3uLh36YyZ/ZFzJo1i6lfuQKzsRZf1T58Bw5iqiq25PdP5Dx7q7AlJuAuLEQ4w0ws0zRpP74F09QwkDFNjfbjWyiu/Cr5gy9Dtsdjd376SaOp6/iPVOPZvQfvvv2ooTA7Gxt55Ugt73X+P0zdIC7JSenQIUy+/DJmzRlBJKxROjgDm/3LT/AsljORmT0cTAFdUxEl6YO1x2NOZpvomoKIGNveYrH0mbePbWZNzSYM08Ah21lQNpPJBWMHbKHXDRs2cPfdd1NfX4+maSiKwp49e3j22WfJz88nMzOT5uZmjHCYG8oGMyF3EPszKxleNgQA0zB5Z8Mx9u3q3ZbCpmng76ymvfEdlIgHANnm/swCr/2dFTSxWM6yQDTIih3P0BJoJ8ERx9JxN5AZn97Xw+ozhq7S3rCFrpbdAEg2F9nFs0hILSfq9/PIff/OY3/9K8GogmnC5IlTuP/3vyY7O5uNa6o5VBVL80/LiGPyzBLe2VALiDjcjp45q2maIElIsq1n+YFog6A/ypFqH8MnmP1+7WR/8pOf/IT6+nrS09PJycmhvr7+U7dfu3YtCxYswOl0ntbu/frrr6ehoYG77rrrI/ts3LiRhQsXEg6Hyc7OZvXq1aSmpp6tQ+pTB9qOEFVVpKAdSddITs1j9PBSTDNE2aAsQuEw8XEOxo8sYExlKZpuEAhVU1XjRtcMZJvEsb1/QY14EUSJ8RkSEzIlrp08hm7/cN7dfYLX3u6goXYPSjTCqpWvsHP7Osw77sAwRNJyEnGVjCcl0Ul36x5Sc8ZgGgYnnn8BRfIjJ7mJKy4hoawcV24eomxDECREUcbhfn+5lKGrIAgIgkTQW0/AUwu6gKwGMW1xBDy1BL31xCcXf+Zr0vb2WtrWrkcLhgCTTkXmgS07qDpRjyEIIAjk5Qzm0isX8/9+9HVk2Zq+WPqOYRiE/JmkJGbg8baQ4E45PWZyMtvEMCEU8ZOclE3In4lhGNbSUIvlLKjpqme9spMhXcMZmvXxRcbtkh3DNBieOZhFQ+eR5Oz9IqfnyoYNG1i6dCk+n4+MjAxsNhterzeWAXcyeAJQVlTE98aOp8CdyoHEEo7GDWJso4fK0nQO7WvpCZhMvGgQI8d9uSW7pmkS6K6lvXEr0VAHAJLkIC1vPClZoxCls9PV5lyxZh0Wy1kUiAZ5ZMfTtAU6SHDEc8f4G8iIOz9rNHweYX8LJ46+gRr1AZCUPpSsohlINhd7X32N7971PRo7OzFMyEzPZcyiO7jp61eRUxTrLDRiTC7Hj3UxbkohQ4ZnYxgmPk8Yu0NGVd7vXmWaJoYOiqKf9gVgd8j4vWEM3USSraDJ57VixQrKy8spKiri5z//Offcc88nbqtpGkuXLkUURTZs2MDo0aMBuPfee5k4cSI/+tGPuOaaaygqKjptv/Hjx7N79246Ozt59NFHue6669i2bRvp6edfgDHVlczw7CHsSe2g8tghcgoLuGrxlXgCUbLTZcDEMEUEDEQBPBGBoCgTTHNhnvx9Ng0N09QxP9C1TQYyEuCqeYNZ+s/3cqC2k0TJxwvP/A+SGSXe1YKumwS7G/jWXX8mPS2RcaOHMmfBd5k4fgjxg8vp0najuEMoyj66D+xDOCwiuVzIbjeyO4EhE/655/kaj7xC0NuAaZookW50LQqajiCa6EoIQRJpP77lI9XwTcMgWFeHKy8PyRHL+jJN2FdbS2ZuCbuMwQTcbhIH2ZA7OxheMYXLF13FlVdMISs30Qp4WvqcohiIYZWFU5fwj7cewRfyEOdK/Jgi5H6cdjcLpy5BDKsoioHTaQVNLJbeZJomb9asp9Xo4s2a9QzJjBUVDUSD+KJ+chOzAZhaOI6s+HTK0wb18Yi/HEVRuPvuu/H5fOTm5vZkn57icDjIysqipaUF3eMly57Isbh8agvGcPPCCipLY/OqIcOzOF7bRdnQzF5qKWzS3rCZaLgLUbKTmjOW1OwxSPL50WrdCppYLGeJaZr8Ze8LtAU6SHImsHTcDT0tzS5UomRDUwLI9nhySub0XIHWdIOWpiCBYBiHw03ljGu47ObbyHfYMTrDPfunpMWx5BsTelqGSqLANTePIxJST3seTVNZv2EDM2eMQ5ZPj2y73DYk2Zq0nom5c+d+7m3ffvttampquPXWW3sCJhBb//+jH/2IW265hSeeeIJ77733tP1cLhdlZWWUlZUxadIkysvLeeyxx/j+97/fW4fRbxQm53HT6KtYmNVBTeJ62qnGJptkZaVgF8Oomo47LjapMTQ/eWnFVBRexaVxDmwnf3eLR1wfC5wYOqapY5z8r2noCIKIO97BlMpcIJeCf/shmhpi39E2ams7aKg5RrfPoNvXzZFjW/nzczvJys3ja19bxOghZeQnGKjd3ag+D6ahowWDSOkO5ORYJX3TMMA0MY1YwMbQFQwtCubJLC8dzKCO4JB6sk3ikooI1R/Hs2cP3j17UTw+Cm+4HkrKeORPz7Jjy0pqjlTz3Xlz0eszIKoxY/Zc7vrB1xk3bpD1N2vpVxwOiUuSa/G6OqiYOZtH31lPq7+LqGnEskwEkASRwoQklk6eyQhXB8nJdTgc1jIyi6W3VXfWcqSjBhmJIx01HO6owRcN8Eb1Wlw2F9+d8g1skg1REAd8wARgzZo11NfXk5GR0XMRwYhGEX0+DKcTyekkIz0dWzhMuz/Aqs4A9rGTuf2KSvLT4jAME1EUECWR+YuGfakLESF/E864TERRRhBEMgqmEQ40kZYzDsnm6q1D7hesoInFcpYIgsCiIXP52/7X+Nqoq0h3X5gBk2ioC8fJY3e408gfcjnuhFx8Le2sePxhbv3Wt3h+7VG2+dOYedkdpE2YzvShBdTua+NQSEEQYPioHBKTYx++0oc6YsQnOIhPOL1GiaqqON2QlhmPzTaw0wEHmnXr1gEwf/78j9y3YMECANavX/+Zj2OaJtFotFfH1t+k56TztLeZ4fkeFN1BVkIcmG6iPi92m4QgCOiiGzPSSKqzi/iE4p59ZZv7cz9PYnps/fKMHLhoukl7d5hRM67glZdXsXv7FlqOH6SlvZlnnn6KZ4DrrrmJtIQZlJSmkueKYG86QubwKbgLCwAIVB+l/i9PkziiguwRF9Ol7CQS8rNl0yHe3V2HJ6iR7JaZOHoQU6YNpn7r32CHgtrlBWLnlPu8Cv/54wd57+AeFE0hIyOehKREomqUK68bSV5BMnG9WL3fYulNpqbh9HciOFSmO+KZtPAS3m06wXvNTXhCIZLdbsbn5DIxNw+bJAEqDn8HpqYhWN9JFkuvMU2TN6vXoxoaTuxEdYXl7z6BW3YhCAIpziT8SpBUV3JfD7XXrFu3Dl3Xsdtj35GaptF0vIFIJEKubCM+NwfdBMWZhBoI8U5E4+lrR+MWBF54aheDytOZdnEsG+eLBkzCgVbaG7cS9NSTWXQRaTljAUhILSEhtaTXjrU/sYImvWD58uUsX74c/QNp0pYLl2EaPUWlchOz+dbkWwdskakvQ1NDtNatw9d5lOIR1+OKj1UmdziyeOq/fs7vHn8cn6qQmJPD7HmXc6ium4tu+ireeg8H3m0EIDHZyeSZJSQkOfvyUCxnoLq6GoDy8vKP3JednU18fHzPNqf88Ic/ZNGiReTn59PV1cVvf/tbGhsbufrqqz/2OaLR6GkBFZ8vttxLVVVUVf3YfT7OqW3PZJ8vwzRNNje8x5C0EjLi0ti/fz+PPvIgifEiv/j3W2PbAPB+qq0g2tC1MK31m7G7c3tlaUpKgo1LZ1Rw6YwKwtFlVB1pYvv2d2mt28vmzZvJSiujttZLfYOHlpbDHKvbztSqThYtmsvQ4fl0V+1DDQTpfOc9Og69xw57I7/580aaWj3ohtmTKrxy80Fy/rqJb319BuP0AhTJTX1CCb984S80ttT1jCc1JZslN97AP33jehIS3m/Bfq7el950rn+n+psL6fhLv/tttGCw5+fhwE2qxpYtW5g6dSqy7fQpthwfjw7oH/PaXAivl8VyNlR31nKo4ygumxNf1I+qaYS1CM4EB1cMnc/UgvHnXR0hj8fT8z3b1dVFe1sb2sk5UcAfwBWJYNrsaIKMbnNQkuOEkMprr8daCre3+tFU4wsVUI+GOmhveAd/d83JWwR0Nfyp+5wvrKBJL1i2bBnLli3D5/NZLeUucJ6wlyd2/50rhi6gOCUf4IILmJimia/jEK1169H1KCAQCbTgjMtk69+f5xf//TMOnziBCRTl5pCTlkaCU+aivGRqTgZLbHaJMZMKGTE610rLH2C83lg2wSd9FiYmJvZsc0pTUxNLliyhra2N1NRUJkyYwMaNG6moqPjYx7j//vv56U9/+pHbV61ahdv9+TMwTlm9evUZ7/NFeI0AW9S9iIhcVp/KU5veQtPCDC0vRxajeL0qp1pwnAoEAQjoRJoPcuT4cyjG2SlcNyjbzaDsyYwfP56oqhJwCeghgYPV2zlas539h7bzp8d/RV5uOWMmD2NWeT6pXh9HA3v5xSNvEQxGSU50Idnezw7RVZWmVi//8X9v8p3br6Gb6Zh+EcmWgCCIFJeOZNr0KUyaUIIsCWzcuPGsHFtfOFe/U/3VBX38yUlsPLD/jHYJhUJnaTAWy/nrVJaJoisEFQ2FWKcqUZDIis9gWuGE87IGVlJSEpFIhJqaGlRVxVBV7KJEktOFWxSJdHYRl5dLRooLLSzjEJ289erBWEvhQSdbCp9hwEQJd9PeuA1f5+H3x5E+lPT8Sdidyb18hP2TFTSxWHpJV9jDo+89TXfYy0uHVl6QGSZq1Edz7dsEPbHuKg53Orml82ivaeW+Wy9j9Y730HQTm83O8MmX8cvf3U9hTgrhkEJDbaz18ODhWUyYVozbSs2/YDz55JNntP0999zDnXfe2fOzz+ejoKCA+fPnk5j4+YMKqqqyevVq5s2bd06Wcb1W/TZJDUmMchfhWreVPfv2IsgCl18yHrukgQCyPZFgKEx8fFxPu2wATQkwOEekoGLhOZkEXgt4A1HeGB/Hyy/lsnfHO/i6Wjl+4hCdq2t57x0ng8uK2Lt7G8GwSnpKPAjiqaYhmEBUM9B1g5Z2P3987nW+cfMs/EYWX73tm1w8awhDSvPO+nGca+f6d6q/sY7/ix3/B4OkFovl8zmVZRJvjyOgBNE0jTR3CqIgcqz7ONWdtQxOP7+WioRCIXbu3EkwGESSJGyyTLLDicPuQhOdGHoYIRJBD0cwRAHTgHj7IEwThlZmM+3iMsQv0FK4rWEz/q5YdklCahkZ+ZNP66Z3IbCCJhZLL+gKe3hk+1N4Ij5S3Sl8fcy1F1zAxNO2n9a69RiGiiBIpOdPIi1nLOGmFr5/89fZ2diIYULZkPGMWvzPfGVaJQXZyQC43HYumjuYhEQHGdkJn/5Eln7tVIbJh7NJTvH5fKSkpHyp53A4HDgcjo/cbrPZvtCJ2hfd70zohk5V2yEEQWBYKzx1tB7FhPKCTIrzEjD0KAIimBoiOqau8sEeppJsR1N8yLKIKJ6br+70FBs3XT2Hm66eg6LqbHhnD8+/+CZtx6uor62m+vB+Wtu9JCa4MUWRYFhBFkUiURV/MIKuGwiYSJLAiZZuXK4d/Nt3foksnf+fjefid6o/s47/zI7/Qn6tLJYv4nB7Da9Xv42qqyTY45CdMpIi4JJjy7mDSog3q9dTnjbovMo2cbvdVFRUsH37dkRRJMvlxoxEiYoOdNGGgolDCxLu7qY9opKclEnF4PFMnF7MyPH5n/u10JQgCEJP/bT0/MmYhk5GwRSccZln8xD7LStoYrF8SZ2hbh5572m8ER/p7lSWjv/qgO79/kWZho5hqLgScsgeNAebI5moZrKmJsyYiqk0KO8wbOE3uHTBxcjeKIffaaA4K5HCkliR2JLB519r2QvRqVom1dXVjBs37rT7WlpaCAQCTJw4sS+G1qcOdxwjqISIt7lh71FWHj5KSHGRU7IId1IBasRDWt5E4pJL2bBhAzMmz/jIiZQku89ZwOTD7DaJuReNZe5FsWJvzc0nWLb0Go7VNWK3nVxb3e3H+EDbQ0kSiY9zkhjnoMsbZufufYiCAZz/QROLxWKx9L6QGubVw2vYcnwHnoiPZGesDb2E2JOdKQgCcXY3hzqODvhsE4/Hw2OPPcbNN99MWloss+PHP/4xCxYs4Dv//M90dHQS73SjizICJrpoI2yKBLu7SErP4ppF/8y8y0dQNvTzBTo0NUxn03t0t+whKaOCnJI5ADjd6RQMveKsHedAYM1cLJYvoSPYxSPvPYU34iMjLo07JnztggmYmIaOEvH0/JycNYLcskvwn0jhmwuv5uFf/hJdN9l2oJXW8Vdx83/8kevmzKbzQBstjV4kScTvi/TdAVjOipkzZwKx+iIftnLlytO2uZDsbK4CYFw0jY37D9IdieJwJvGVRXPR1SCS7CQjfyLOuEw0040zLvMj/2yO+D4+ivfl5OSRkjkEURcw/CaBgB1ZcoEpI4tOEt2ppCbm4JDTEMISoi6APb/Pgj4Wi+WjHnroIYYPH058fDzJyclcfPHFbNu2ra+HZbF8rKrWQzy4+VF2nKjCF/VjmAYO6eOXcjskO6qu8mb1+p7C6gOJoig8+eSTXHnllTz11FP84Q9/6LkvMyubrIxyvlU6lBy3G6+q4Ql04fV34gl04ldVst3x3D3vEu772e2fK2Cia1HaG7ZSs+sxupp3Ypo6Srgb0zTO5mEOKNbsxWL5EtbXvYM34iczPp2l475KQj86qTmbIsE2mmvWoGsRSkbdiCjZ6TxWx6/vuY8XNmxANw0Otrez9J/+icWzyuhs9NJytJO6SGzddnFZGpNmDCIx6fzq4W6BOXPmUFJSwtNPP823v/1tRo8eDcSW6/zsZz/Dbrdz88039+0gz7GQGuZg+1EA8mu9PN8RxkBgyLDpjCoJEvVBYtpgJNmJMYC6aMQLMoaqE3U6QZBJjDu5vtkEBDBNMEyBqOnAUH3EceaV+i0Wy9lTVFTEgw8+SFlZGdFolIcffpgFCxZQU1PTc1XbYulrvmiAlw+tYl9rrAip2+ZCFmUSHQmfuNxkoGabmKbJ6tWr+c1vfkNTUxMQy+Cdc/HFBOvqiCsuRlF11r61h/GpGfxk7hLejCZS27iPSDSI3ZlEad5Q5roi5IQ9aMdroWLoJz6foat0teymq2nHyeYN4HRnkFEwhbjk4vNqadOXZQVNLJYv4Yqh87FLdmYPmkK8I66vh3PWGYZGR+M2Opt2ACaS5MDf0cALv/oTf3j6abqCIXTDZPy48fzsNw+TmJhI1/pG6o92ApCS5mbKrBLyCr9cTQvLubVixQo2bdoEQFVVVc9t69atA2D69OncfvvtAMiyzIoVK1iwYAEzZsxgyZIlJCQk8Pzzz1NfX88DDzxAcXFxXxxGn2nxt2OXbKQRj360njGjriB+6CIKR+WjBGLBlOSsEX08yjNjmibDRREEkagBsmQimABmLGZiAgggmEQNEwSR4aKIaZrWJMxi6ScWL1582s8PPPAAjz76KPv27bsgMwIt/U9tdwN/3v13wmoEURCZWTyZQ+1HOe49gSiIKHrsQoNpmuimjqKrPd8xoiAS1aIDprbJ3r17efDBB9m3bx8AGRkZ/NM3v8nkrByOvbKa6oiPoT/8Hs6MDEZ3H0YTJdrSKhnsSGdI8Wh00YFBrBxad6SFzLZNHP7by4z/f0M+8dg7m3fQ0RjLLrO7UsnIn0JCagnCBVaX8fOwgiYWyxnyRfwkOOIRBAFZkrl86Ny+HtI5EfKdoPnYmp4lOQmp5TTv8fG9byzmSEsLum6SmJxBxbybuear11AyuBSAwcOyaG70Mm5KERUjc75Q1W5L39q0aRNPPPHEabdt3ryZzZs39/x8KmgCMHv2bDZt2sR9993Hc889h6qqVFZW8otf/ILrr7++18a1fPlyli9fjq7rvfaYZ0NJaiE/nvEtWo8e5FiyQaArjoTUBObPTMP0H8LhSsUVn9PXwzwjpqYxLjmdpMRM2v3dJLmT4OSk7P2/cBMMk2DYT0ZiJuNS0jE1DcEqemmxfC5/+ctf2LhxIzt27KCqqgpFUXjssce45ZZbPnGf7du3c99997Fly5aez94777yT66677lOfS1EUHnnkEVJSUqisrOzlI7FYvpis+HRkUSIvMZurh19KpjuNLQ3v4ZAdRLTo+xuaJio6US3a810E4JAddIW70Q0dWerfp72rV69m3759uFwubvza15hfPpSGN9eyo2UVumGSmZNCtK0dpbML+UQtbc4c/I4MTEHCFGLHJps6oqkSsKfS5cxGOngQ/6HDJJ7MNjENHU0LY7PHMuNTs0cT6D5GavYYEtOHWMGST9G/f3ssln6mxd/GozueYUzOcL4yeE6/j1r3BtM0aK1bT3frXgBkm5vsQbNJSC2j5vXfU9vajiDaqZy2iHFf+RpjspMZlOLu2b+wJJUlt03A7rA+bgaqxx9/nMcff/yM9pk4cSJvvPHG2RnQScuWLWPZsmX4fL6erj39lSzJJOQUUZU6hFBAwZHqYviwcSjhQehaeMB9log2G6EFNzLjeAKvrv4N3rAPtysBSXo/IKLpKuGwH4fDzYyZSwktuALRCphYLJ/bT37yE+rr60lPTycnJ4f6+vpP3X7t2rUsWLAAp9N5Wpbf9ddfT0NDA3fddddH9tm4cSMLFy4kHA6TnZ3N6tWrSU1NPVuHZLF8KsM0ONh+lGEZ5QiCgNvm4o7xXyPNlYIoxk7ovzt1KUEldNp+mqaxfv16Zk6fiSyfPt+Mt7v7ZcDE5/MRCATIzc0FYOnSpWiKwqUVw+na8C4HN+9FN0xU2U5nSSXF1y8ksSKX9/7zf+mSUqlNm4gu2hBMg+QkH2UlDRyvTsfniUOVnHQklpLe1sThv73MuJ+U4+88QnvjO9js8RQOuyZWQFd2UjziqwNuDtIX+t9v0AA0UK52Wr6cJn8rf9zxDEElzLHuBhRdxSF/fAGq84kgiLHWY4Ddns/hWp3ycWW8sbWOtUoxMxbcjFQxhdkjywk1+TlR3Un7cS8lg9NxOG0IgmAFTCwXLG/E17Pu+u9//zsr/vQ7Ro2/hJuvuh1BEHC4B2bdAMMw2PBOE4X5lVy64Fus3fAEHl8bpvF+0ThBFElOymL2jK9TmDuCDVubmDZ7aM/E12KxfLoVK1ZQXl5OUVERP//5z7nnnns+cVtN01i6dCmiKLJhw4aeelL33nsvEydO5Ec/+hHXXHMNRUVFp+03fvx4du/eTWdnJ48++ijXXXcd27ZtIz3d6mhnObfagp38ff9rHPec4PrKyxmTE1u2mhF3+vdksjOR5A81XVBVlSQxntyErH7fwltRlNh8YMUKBg8ezO9+9zsEQcDljmPE5Ks49sTvEdUois1J9+CRVF4+hysq85EkEV1RaD/ehM+eiS7ZEU0dmxGhpLCBlDQfhmpyaLcT2VAI2FPpdmRBpJlju59EVbwAGIaKpgR7istbAZPPxzqT6QUD6Wqn5Ytp8rXw6I5nCKsR8hNzuG3c9ed1wERXwwBItlih1oy8qWx96T1+9+hy2hWF9IJiirPLUG1OJlx5AwlhjY4jsbolCYlOJs0cZAVKLBc8wzT43btPIooSix2V/O2vf0UUTK69YgozJ+b19fC+FEUxUMIqpghF+SO46fqfUVu/h/rjVYSjAVyOeIoKKxlUNApJsmECSlhFUQycTitoYrF8HnPnfv7lv2+//TY1NTXceuutPQETgKSkJH70ox9xyy238MQTT3Dvvfeetp/L5aKsrIyysjImTZpEeXk5jz32GN///vd76zAslk+lGzrr67bx1rFN6IaOQ7af1r7+fGGaJm+//Ta//vWvOXHiBADdXV3UrXmb4rkXI4kiVQ1+5JwRpCQ4GH3FHK6qyEFVNHa/20ByqpvC0jTeqbgEvSWMzRAxBIhL1UhMC6NqMonpEcRMEa/HTXqyH9vYbITkKErUgyw7ScsdT0r2KESpfweW+iPrrMZi+QyNvmb+uONZwmqEgqRcbht7PS6bs6+HdVaYpom/q5qWunXEJRaQW7qAva+8yv/87GfsrK3FBDIy0gn6fFSOiGdeSQZNtV10mSDLIqMnFjByXD6SbJ0UWSx13Q14Ij4SFIE1j/+K43v2kjusgtmzZnB05x+JSyoke9AcpAEYgHU6Ze7416l0dX+wbfhUdF3jve3vMX7CeKQPpUOnpbhxOq1ph8VyNpwqzD1//vyP3LdgwQIA1q9f/5mPY5om0Wj0M7ezWHpDk6+Fv+1/jWZ/GwCD00tYXHEJya7z6yJ0VVUVDz30EHv3xpa6pyansHjCZMp9ETxvvIk/L5fEYRVcOaMUddoghg9KIxpReW9zHQf2NqMqOkkpLkrK01mycDRv/K0KSRKx2UVKC48hyyaRiA2nU6WivIP2LhcFOXVgxgKnGQWTSM0egyQ7+vaFGMCs2YvF8ikavc2s2PEMES1KYXIet429Hud5+oGjKUFaat/G330MgPbjR1j+g0d4ees7qJqOIcoMmzCf3z/2IDmZqXS2B2iq7cI0oWxoJhOnFxOXcH6+NhbLF7GzOVYBf0yHnf87dAyf6WTuiBlE/LXoWoRIsG1AX+3Jy0kiL+f0ia2qqtQfg+FDM/t9irTFcj6prq4GYu1JPyw7O5v4+PiebU754Q9/yKJFi8jPz6erq4vf/va3NDY2cvXVV3/sc0Sj0dMCKj6fD4j93atn0C791LZnss/Z1h/HdCYG4vi3Nuzg9aNrMU0Tl83JV8ovZlTWMARBOOPj6M/H/8477/Cd73wHAIfNxoLKsYwzJMzjHYQwsedlo+k6qqpSmpdAMBBly9pqDu9vQ9diS15T0t2MGp+Hoigc3HECQzNwOWXiXd0kxnWhaTIIoGkyiXFdtLUXEo3G09UdT1doBMOnjsUwBYx++Pr0hi/6/p/J9lbQxGL5FJ3hbqK6QnFyPreOPT+X5Jimibf9AK31GzB0BRBwC3n84Nv3Ud3WjmZCbvEIhixcysWTRpGUmABAWkY8ky4aRGZOIlm5iZ/+JBbLBUbRFKpaD4FpEn73KAfb2zFFB0lZY/G0xYIpyVmV1lpii8XSK7zeWL2CT1omnpiY2LPNKU1NTSxZsoS2tjZSU1OZMGECGzdupKKi4mMf4/777+enP/3pR25ftWoVbrf7Y/b4dKtXrz7jfc62/jimMzGQxt9lePGoHrLFNIbJBTTtrKeJTy92/Fn6y/Gbptnz/a7rOk6HgyJHHAvT80jwa2ho+B3xtOaXk1qRQ9exGjhWQ3ebQMcJgVOrk5xxJqlZJrbEbg5Wn2D/Yag7JqLp4PVGKcg5iiBoSBLYbCbhsBPJrpOSVMPOvUMAEbuzg9dee4MLoZzYmb7/oVDoszc6yQqaWCyfYlT2MJyyg+LkgvMyYKIqAZqPriLoawDAGZdJTslcjp4wmDBoBI3R/VTMv4WJk2eQL8t4jnYSnVKE2xm7glw5Lr8vh2+x9Fv7248Q1RSKugTe2L0fDYni4tEsmFNIJLgOQRBJSv/4ExOLxWI5F5588skz2v6ee+7hzjvv7PnZ5/NRUFDA/PnzSUz8/BdPVFVl9erVzJs3r99kpPXHMZ2JgTD+qKZwwt9CSUphz21z/G3kJGR+6cfuL8evqirPP/88K1eu5A9/+AN2ux1dN2hWC0hd/TzxYQ9+dwrGmElMuWw6RTlJpwVYmhu9vPnCAbLzEhk1IY+c/KSPXFwJzowSCauEPPvwNHsxDRMEAcMwSUlzAA7c8VFGTinB4c7H6bYRF39+Z4J/0ff/VLbc52EFTSyWD6n3NJLiTCLRGcuoGJJe2scjOntE0UY03EVnSxdP/3krc25YyqzCeP70+rsw8xt8ZaGDUZlJeE/46TYjiKJAW7OflLS4vh66xdKvO5ftbIplk5Q3RFne0ISJwPAxcyhOa8fXAQmpZcgnCy1bLBbLl3Uqw+TD2SSn+Hw+UlJSvtRzOBwOHI6PnnzZbLYvdKL6Rfc7m/rjmM5Efx3/kY5j/OPgmwSVEN+dcjup7mQAClN7tyh6Xx2/aZqsW7eOX//61zQ0NGBqGs/8139x67//OzanA6fLRXXhGIbmJTLz8unkZSbQ0RZg3ZvVJKe6mTCtGICC4jQW3ziW9Mz4T3wuh70TX+s7+Fp3YpoagiAi2VyEIjpxjti8Qo16McJVZJWNuKAyWs/0/T+Tba2gicXyAce6jvP4rr+S6EjgnybcSLxjYAYHQr4GUm0HCPkqSUorOe0+JeLB5ohFrv3Hm3j+N2/z17c2EomqHGx8gMsXXcHMsXl4mwOYXWE8jbEobGFJKpNnlJCUYp3oWfqH/tq5zBfxc7SrDntIJbElxI1fuYu97U3MmjeBQNc7ACRnjujjUVoslvPJqVom1dXVjBs37rT7WlpaCAQCTJw4sS+GZrmAhdQwrx1+ix1NVQCkuJIIKMGeoMn5YP/+/Tz88MPs2rULU9OwKRrzswsZGYzSuXUbmbNmcPlFJTC9hMxUN61NPt58cR8Ntd0A2OweRk8swGaTEAThUwMm0XA3dfueQ9eiGLqCJDuxOxJBkCASC5gKgoAkuwh4agl664lPLj4XL8N5zwqaWCwnHe2s44ldf0M1NJJdiQN2OY5pmnQ0bsUpeelo3Epi6iAEQcA0dDqa3qPzxLukZU1m/RNv8tsn/0yzz4dmwIjKEfzHQ7/EZrMht4WINvkBSEpxMWVmCQWDUvv4yCyWgSHeEcfScTdwfMsGOowGcKZQMrSIqaNMQm0qdmcS7kRraZvFYuk9M2fO5P7772fVqlUsWbLktPtWrlzZs43Fcq7saz3ES4dW4Y8GERCYUjiOBWUzB+z8+sMUReGnP/0pK1euxFBVdH+QGVn5zBxShkO24U3MIa44thQpI9lFU4OH194+SlPDqeAGlAzJYPSEWMDkk6jRADZHLJDicKUQl1yMv+sokuzE5kyOzfE/1KJZlOyoWpj241uISyq6oLJNzhYraGKxEAuYPL7rb2iGxuD0Em4atRjbAOxqoSgKr7z4FK88vwJPt5/klI1cfrXC3Llz6WxYTzTUQcOxE/z0nifYs78RTTewuZMZPecGvnL1NVSOjl39LhiUSkdbgLGTixg+KgdRugCqR1ksvUQUREpSC8m+eDF/PLiVSHuQ9IIk8goq8DpBkl3WBMZisfSqOXPmUFJSwtNPP823v/1tRo8eDcSW6/zsZz/Dbrdz88039+0gLRcE0zR5bt/L7G4+AEBGXBrXDL+UouTz62KB3W7H5/URautgTGoGl4wZQ7LTRXdKLo5Zs5iwYGJPV8m9O07w7sZaAERRoHxYJqPGF3xq9nY40EpH4zsEvQ2UjrkFmz0WOEnJGom3bR+Szf2Jcwkr26T3WUETywXvSMcx/rz7eTRDY2h6KTeOWowsDbw/jQ0bNnD33XdTc/QQmhpFMMEU4PW3v0Nedip3/svlTJownMgx2FXVgCbKlE9cyJDZSxibl8GMiuyexxo+Opfyikxc7vPjaoDF0he++93vcqy+nWGVV7DooonINjdpueM+e0eLxWIBVqxYwaZNmwCoqqrquW3dunUATJ8+ndtvvx0AWZZZsWIFCxYsYMaMGSxZsoSEhASef/556uvreeCBByguLu6VcfXnelKWvicIAqmuFARBYFbxFOaUTBuQ8+oP0zSNF154gTlz5pCamoppmiQPvoSvXSQxkihdaQXIcy7mqnljiXfaUBStZ9+Swens2nac8opMRo3PJz7R+YnPcypYEvDUnbxFIORtICmjIpZN3rAVXVewSQ4MPdYyN5ZpYmDoak8gRRBENF2xsk16ycD/DbZYvoSjnXU9AZOKjHK+NvLKAfnBvmHDBpYuXYrX201SvA1JsIEJSAKqplPf0Ma//+8/uPunF9ERN4tx0wM4h89gbHkZCWGdaGuQdzfWUViciiiJSJJoBUwsli9gTc1GQmqYDK+LHTt2IIoi99x1CZmZX747gMViubBs2rSJJ5544rTbNm/ezObNm3t+PhU0AZg9ezabNm3ivvvu47nnnkNVVSorK/nFL37B9ddf32vj6q/1pCx9pzvsRTU0MuPSALh40FQqs4b2SmecvmaaJhs2bOBXv/oVtdXVvPvMs/zXo4/gSEtj4pgKdmsijqEZXHvxKJx2mZrDbex+t4HkVDfzLh8GQEKik68tnYTN/snLcD4uWJKUPpT0vAnYXSknx6KjRL1Ikh1Dj35gjCCiY+gKH4yNSJIdJerFNHUEYeCd3/Qn1qvXC6yI+8CVEZdKkjOB7PgMvjrySmTxkz/M+itFUbj77rvx+Xykp8ZhqGEMHRAElKhKpyeIomjIAYVfPfALLr7xfoZdchNFNhvhrghRIC7BwbgpRQiiFYW2WL4o3dDZ2rAD1eOj7X9XEu3o4NJrryUzI5XGIy+RlDaExPQhCIK13M1isXy2xx9/nMcff/yM9pk4cSJvvPHG2RmQxfIhhmnwTsMuVh5dR5orhX+ddAuiKCJL8nkRMDlw4AAPPfQQ27duJdLtxWVAWr5C29vrKLj2auZOLGTepEJsosiRA63s2d6I3xcBIBxSiUZUHM7Ycv9PC5joWoT6A3/DjE3gPxIsOUUUZUpG3oiuhU+7XVVVNmzYwIzJMz7SEUaS3Yiidcr/ZVmvYC+wIu4DV5IzkX+ecBMumxNpAAZMANasWUN9fT2pKYkYmh/TMNEM6PYGCYYiCIAgCrjdDrram3C0HSTTNoZwSEeSREZNyGfU+HzkTylCZbFYPtvhjmMElTD5Rzz8rfYEEVNgztxF+DqrCXrqUcIeEtOH9PUwLRaLxWL50tqDnTy//3XqPI0A2CUbITU8YDtPflBLSwvLly/nlX/8g3C3F1HVmFVczsWlQ3GOGEPm7FkA2ESBQ1Ut7N3RSDCgAOB02agcm8ewUTnYHZ98qh0Nd+M4GRSRZCcpWaPQ1fDHBks+yOZIwOZIOO02SVXRTDfOuMx+2XL6fGAFTSwXnH2th9EMndE5sZS5gf7hvnbt26hqBFM3iUYVfP4IgZBCrJC2QLzbTmqKG7vLRksgyt4d7zA4dwyDytOZNGMQCZ+yrtJisXx+O5urwDQ5sW4/QR2SUwuoa3UyuG0fACmZI6wsE4vFYrEMaIZhsKF+G2tqNqEZGg7ZzoKyWUwuGMP/b+++4+Oo78T/v2Zme9Oq92ZZlnu3KTbGgDEECAk9hZqEkIRLcmmEy11CcvdNL7/kLqRfIKQ3Qi4UEwwugI0N2BjjKtuybFmWrL7S9p35/P5YeW1h2bjIkizez8fDD/Ds7Ox71pqPZt/7+bzf+hj5HffQw4/wp/99GCseZ05JBe+om4ZWO4uyd17B/PNqsfU3SNi5tZW1q/YA4PE5mDGnjInTik74RWR6Gc46+robqJxyEx5/CQAFFQul7sgoJkkT8bayuXU7v3/97ygU2e7AOV3JWymLnvbt7Nv9MpaZQFk2Wtp6MU0L0HA6bGQHfTgdBjoWViqGYYDTbXL1jdMoKQ+O9CkIcUZG09LISDLKtrZdBA/08vftDVjoTJp8MRedl020aw2gkVUweaTDFEIIIU5bXzzMLzf+ieZQCwATcsdx3eQryXaf2zPtTdOku6uLnNxcNE3DzD2PkpKnuKG4mMCMBUx61xXMnlNDMp6ipzNCbn66k82EyYXUbzvEhMmFTJhciGE7ftLo6GRJmka0ryWTNJGEyegmSRPxtrGpZSt/2Px/KKWYVTyV8qySkQ7ptCilCB3awZplv+bF17bidevpClBo+L1OkimF3+vC6bQf9RwNZVmYqQR1kyskYSLGhNG0NPL1lm2YlklqbQMtfTGcrgCT515EtmM/XYA/Zxw2u2dEYxRCiKEwmhLWYnh5HG5suoHb7uKauiXMLp56Tn/YV0rx/KpVfOPL/0kgFuVnv/oVgboJXH7BBDy2r7JwdgnTp1USiyZ55YUGtr7egj/g5IZbZ6PpGja7wbveM/OErzFYsiQrr4680vknXIYjRhdJmogxaXdnI6sSG6jrnMLEwvG8dnALf3zjHyilmF0yjRunXHVOTiEMNR3g0Z/+iL+98A/q9x1C03RuuOlqdP0VkokUWQEvcOwvL6XpJOIJNM3i/HnybbcQQ21D8xs4IknOdxZyYOFtdKNx4YIqetqfAyBYMG2EIxRCiKExmhLW4uzb132AIn8BDsOOrum8Z9q12HUbfqdvpEM7I9u3buW/7v8CL69bj0qlyHK72P7EE8yvm8C8yUXMn1JEuDfO2pW72fFGa/9MbtB1jWgkicf31l0mlbI4sPMJkoleJFlybpOkiRhzlFIs272KVquTZbtXEU5F+cvWJ1BKMbd0BtdPvvKcSpgoy2LTM0/y51/8mmfWvURfPI6yg24zuPTCRVw4dzyPPRbgQEsPudl2Bkv4K6Xo7o1SWhRkSo2BUuqc/mZAiNHEUhZV2WW4X9lOl15KZUUl+cFsZtYmaG+MY3cG8GaVj3SYQgghxEmLpxI8vWsVa/e9ysLKeVxddxkAOe7gyAZ2hloOHOBrn/93nn7maayUiU3XWVAzmSU33Mqcj74fgHBvnI3r97Fz6yGUpQAoKPYza34F5dXZJ7yHjoUP4XTnoukGmqaTVzafSOiAJEvOcZI0EWNOfUcDO9t3Y8Nga9tOGrubcNoczC+bybsnXXFOJUwSsR6e/cVX+cz/9whEwUopvL5sKqZdTNn51/D1Tyyh8eUfce/7FvDVnzxLR3cvQb8H+1EFqJJJk+7eCF6Pi4+9bwFWX5v0axdiCOmazlUTLiVWMp8Hv/4siQRU1+Xj8QTwBErxZlVIAVghhBDnjPqOBh7d+hRd0R4Aoqn4qP7Cra9+F/7lK+irnUD25EnH3W/5qvV84v03k4xGAJheWsWSd93G0g/eQk3VkRbJ3V1RdrzRCkBJeZCZ88spKc864flH+1ppP7COvq4GisddTrC/jlmwYCrBgqlDcZpiBMmnJjGmKKVYVr+KpJXChQNLKeyGjfPKZvGuSUvPiYTJnk2b2HOwiakTvXS3bqZ4QoDy4mwcthyyJ15LTu083E47U0uyUJpBZG2SkuZi3nXZx3j8uT/S0dmGZcVA00ApdF0nJ6uIaxbfTElzmPCLSbRF0l5YiKHU29vLbbfdjsNbx7jKhSy5tAZvVgBvVgUq3cpKCCGEGNUiyShP7nyOVw68DkDQncX1k65kQt64EY5scIlEgmeeeYY/fP1bdDU1kb1xI+/5t/u4/PLLcTjSy2fMeBxN19Htdna22dB9eVR44lxz3e3c8PEPUlGWQ1trL7t3tFFTlw9AWWWQqbNLGVebR2FJ4IQxHJ0sSdNIxLrO5mmLESBJEzGm1Hc0sL19Fx67m3gihsfuoi8RYXJ+7ahOmJipFE//5rf88ZFf8cqurbiDHn7+3x/FQmNnRxbn3fAVeikgz++ixu8i1h4h0thDrDvMod0tNBoTKM4u4J5rq9m1fwu7D2wnGg/jdnqpKZ3I+PIpWHY/jbFD6HvqsZJJDMdbr8UUQpxYS+8hQvE+1i17kaam/dTUOPjsfV8iL3ik6Oto/WZOCCGEOGxvVxO/ff1ReuNhNDQuqJjNFeMX47SNzvvF1atXc//999OwezfxnlB6Y0szyz/wAapravh/DzzA/nWv8czjf+Pb3/omxYsv5h0X1qB/+b+5dvFkSoqCtBzoYdnf3mD/3i4cThvlVdk4nDY0TeOCi0+cKBosWSI1S8YuSZqIMUMpxd+2LaM33ofDsGPHwGk4iCSjPL1rNRPyxo26Dy8HGxv5/X//kL8//g/aurvBDdihpjKPuOmlevIV/P71A1TkeZhqM+g9FKa3txcAf8BFOKZYV3MFWnsShQJ7LiV1iyipW4SlFHr/+UYAFMR95bTl17BUtyFzTYQ4c883ruf1na/w3P/7LSls3HzzzeQEHHQ0v0pW/iTpmCOEEOKckO0OkDBT5HlzuHHy1VRll410SMe1evVq7r77bkKhEF40/G4PlqajKwsL2Lb5Da699lqCTjdep5M//frXfHLxxVSXZHHPLRdwYF83j//5dQ42pZcfaRpUjMshlbJwOE8uhkP7XiASakKSJW8PkjQRY8LeriYe3foUm1q2AhpxM4HCjqZpeB0etrfvor6jYdRML1RK8cgX/oNvP/wQlpWuxu12OJk+YxaXXzuNa959G/6c8YR7E1yYF6KjtY+e/ucWlQSYOruUqppc0GBycT6N4XZcHkemb45CEQlH8Hg9aP1bFRCLJKgqysNmjK7kkRDnokQqwRuHdhBesZXWrgh+n58rll5Jb8cuDu17ge5Dmxk3445Rl6wVQogzIS2HxwalFI3dTVRlpwuVZ7kCfGjOeyj2FWAzRu9HxEQiwf33308oFKIwJ4fowVaSup2k4cRIROgLhYgkkpjKosNSXHn1jdz5nW8D0N7axwvP7aKtJf0FpK5rTJhSyIx5ZQSy3Cd83Vj4EDaHH5s9vV9+2Xl0H/KRWzofpyRLxrzRe0UI8RaUUtR3NLCiYS17Ohtpi3SglMLn8OB3+oj0hgFwGg7CiQjL6ldRm1s9Yh9gOjs6iESjlJWlM/fjSr1YLovq/GJKxl+Cd8ZSNIeTnaYdm7caTdNxexxEeuNousa42jymzS4lv8ifOaaZsoiFE7hcdrAUhysnKKVAgTLVgA7ELpedWCSBZSoMm3yQE+JMvHFoJ/FknO2rtxDHwYTKBax9tYUJ+W8AkJU3SRImQogxR1oOn/u6oz08um0ZO9v3cNfsm6nLqwGgPKtkhCN7a8uXL6exsZH8/HziHZ0opUgaDnpjEWLRECgTTQOv14/L7eL6264nKzud1LA7DNpbezEMnYnTipgxtwyv/8RTS2LhQ7Q1vURfVwO5JXMoqFgIgCdQhicwemfjiKElSRNxzmro2s8vN/wRgKSVwlKKPE8OXodnQOHFkZxtopTi5Rdf5Pc//jErVj/Pxddczde/9UV2b1sBVSZf/o/3YQuU8nzTbEpzvFR5XOgJE7cjfWkaNp1L3lFHMNsz6KBu2HRuvH0OsUhywPZUKsmq1au5eNEcbDb7gMfcHjuGbfTWdxHiZI30t50bDm5GbW5ix8Eu0NxMmLKYaZPddO05AGhk5U8ZkbiEEEKIwVjKYn3TazxVv4J4KoGhG5kOOeeKFc89RzIex+zuxopGSel2lG4nlerBApyGnVyXk+yyMva3HuJ3v/k7V111FQBZ2W4WX1lHaUUQt+fEtVrSyZJ19HXt6d+iYabiZ/fkxKglSZMhMNI37m8XKcvkUF8bJYEiAKqzy6nOrqDEX8jO9j30xvvw2AefWjfcs016e3v52yOP8KdHfk1DY2M6iaPDvl0b2LXpDySSFuGUTpd9OjZrMheVOuls7qWrMz0Y72/soqI6B4DSihNP+fP5nfjelFBJJpO4PJBb4MNutx/nmUKc20by286eWIjdHY3sfWojKexUlk1i3NTxaNFdAPiyq7A7fcMakxBCCHE87eFO/rr1KRq69gFQESzlxilXU+DNHeHI3poyTUL1u9iy/HlW/ulRQt099PX0UOT2knL5UIDPHSDl9OKx2dGtBJ0dEZIJk30NzXR3RgjmpGuMjZ9YcMLXGixZEsibQF7pfJzunLN7omLUkqTJEJBpimdXwkzyyoFNrNq7joSZ4PMXfQyXzYmmaXx47vuo72jgqfoVeB2e4yZDhmu2iTJNfviV/+Th3/yaaDjdA95hGMydWcfl75rCtOk1aJpOTsFEGnqC5Oo2uprDdJBOlhSXZTFtdinllbI2UojRbOPBLdgjCRYX1NI9rpTKCRdy0cJKetr+AUCwYOoIRyiEEEKkrWvayD+2LydlpXAYdq6sXcz55bNHdWdJgFQkwsu//C3PPfUUL+/Zzfa2VlrDIVKWiU3X6bMUDs2GhsJm2DFsDkw0LM1AsxQoi3G1ZXh9J1ndFehqfSOTMAn0F3iVZImQpIkYtWLJGGv3b+CFfS8TTqQTED6Hl0N97VQESzP7LatfRTwVx2VzkjDTy1SUUpjKJGEmM4kUXdOJp+JDPtskEolgGAZOpxMrlSLxxhai4QilWUFmzbgAY9ISHIUB8ko3488ZT0HFhXR32ehu2ASk65XU1OUzbVYpeYXyzbQQ54J9Pc0UNXTjyZ3OJYVlJArzqS4IcXB3DJvDhy9YNdIhCiGEeJvY3dnIqsQG6jqnMLFw/DGPe+0eUlaK8blVXD/5HeS4g8Mf5EmwkkkSHR24iooIR5P823cf46n//iJYFkrTSNkcFJZVkdy/h6DNge4OYpIu36eOSgApTUOL9+HQLG665RrsjuP3jIyF29B0W6aYa17pXJSVJLd0niRLRIYkTcSoE0lEWd24jrX7NxDvXzuY7c7i4qrzmVsyfUBFb9My6Yh24rQ5iR29zlApkpjp5x+VHHHanHRGuzAt84wrg9fX1/P7X/yCJx9/nM/9539yww03oGx25lx1HbbJVTir6tjRW0sSKMsJYA9UUlo7DV3XKHApyquyyS3wMXlG8SllwIUQI++2GdezI17EP5qaSGpOZs4pI5UIoWkGwYIpaKP82zshhBBjg1KKZbtX0Wp1smz3KuoKajAtk0ORDkr8hQBMLazjQ3PeS01O5agrUG4lk/Tu3MnW5StZ/s9/EvB5+MTvf4vXbaeotBrLm01hYSGXX3MN77vlXWRHwlx8+Ts42NeHVzPQUG86ogKl6ItHKPF6OK+sfNDXjYXbaG96id6uPfizx1FW904A7M4AJeOvOMtnLc41kjQRo04kFWXV3pdQSlHgy+OS6guYUTgZXT/2Q4jNsPGpC+/OzEQ5LJVKsWrVKi5eeDE228Afc5/Dc9oJk0QiwT+XLeP3P/8Fm17biJVIz2x5ftkyrr/u3Tz17D8hp4lJ+UEUbQTypuNNuWlr6mFPp8H555k4nDY0TePK62T6vhDnqnXr1vHHP/2DWKKWvOwsLl1Yjc/jILtg6oBErRBCCHE21Xc0sLN9NzYMdrbv5vnG9bzS/Dp98TCfvvBufE4vAONzq0Y20KNYySShbdt549kVLHtyGesbGmjo6gCgLC+Xezo7ceXl8YF3Teeuq1dTVJBHW2sfDTvbWPHENi656C7+vOJhQuEuvC4/dsMAzUJTiqSZIhzrw+P08sEp02j4+zJyp0/LJIuOTpYcpul2lLLkCw9xXJI0ESPuULiDPZ2NnF8+G4A8Tw6XjVtIib+Aifnj33K9ZdAVIOgKDNiWTCbJ0n2U+AuHpBCqUorvfeMbPPr7P9Dd1oayLAxdZ15FJddcdQ0X33o5uzc9Qrmrk454CrstQG/veMIH4oRJAFBQ5CcaSeJwymUnxLnKsixSyuR3v/sda9as4Yabcrnq3e/E11+F3zhOMWohhBgrpAHC6KGUYln9KpJWCid2+pIRHt74J/I9ufidPjqiXZmkyWiRSJp85xOfY/k/n2R/dzcAlqaRsrsor5nEzbe/B1swCICetNi9o5cV/2gg3BtHKUVfTFFVMpl3LfkYz7zwCN29bUQwsdkUqZSGhkFOIJ8rF7yPckcPbfuasZJJkske2pvW0du1OxNLIHcCeaXn4fTIMhxxYvLpTYyYptBBVjasZUvrTgDG51SR500PWktqFo5kaED6w9Hh2S2x5oNs+Mtf6WptJc/rZdHEqZTPW0pLTimFU1rp6FgHgGG4CYeqaD2UB+johsb4ugKmzi4hN1/qlQhxrtvevptfPv6/LPvHYwSCudz2/vdSWpJLPNKO05M30uEJIcRZJw0QRo/6jga2t+/CptsI0YuyFCkrRVV2GXfMvAmPY2QT+WY8Ts+Wrbz2z2eY8+53kTWxjtUbD7B81yEae0KYdhfltdO45KqruO0976KmshRNIzMrZNvrB9m5pRUAm92grCqbF6J9pLrjlFVM59ZbvsaefZuIxZ7HsrrR9SAu10WMq5iB3XDwmm5iK/ZwuW4jEmrKJEwkWSJOlSRNxLBSStHQtZ+VDWvZ2XFkWtzk/FrUMWsSR0ZzczOP/uUv/N+jj/Kr3/+e4uJiXMVF3LRwEQsiSVLjF9JgL2C7puHUkmhmL7ruIbdkLg7fJNa9/Bout51J04uZPKMYj/fEfeCFEOeODQc3s+fva7DCSSZVZlNWVkaoYycH6p/Cn1ND2YRrRjpEIYQQbwOHZ5n0xEIAWFjYNTs2wyBpmrjtrhGJy4zF6HljC+uffIpnnlvBSw0NdEbD/MAwWDqxjll1+UxddC0LLr2Y9998DROqimlu6qGhvp2Xnl7Hle+eQn6RH4DxE/OxLEV1bR5llUFsNoPScdk89efNGIaO3RFgwvjZjK8CLAt0nV17Z9MXycamh0BZLLpyJnabTrBwGvFoJzlFsyRZIk6ZJE3EsOmIdPGnNx6nsbsJSGeRZxZN4eLq8yny5Y9obJZl8cILL/Cn3/yGF1asINkXBuDxf/yDuz/8Yf65fj8vj38nmhalyNuBK2JjZkUOnoRFMmGjZtb52Pqn5b/j+qkUlgSw2Y5fqVsIce6JJKO8vm8L216pJ6658Fe8g737utH73gCQKvtCCCHOCqUUbZFOtrftYm93E7fOuI5dHXvZ3r6rv3tkChsGBb58klaS7e27qO9oYELeuGGLMRmNsuzr32T5ypWs27uXjnC63qCl6di8fnrycgHIzXLzvftvonl/OlGy/p/riMdSmeM07unMJE1KK7Iprcge8D5s33gQK2XhdtvRUBTl7UXXLGJJOy5XktLCXcRiXvz+TsJhJ9s2VjNxUgG6bqN43GXD9n6IsUWSJmLY+BxeDoXbMXSDuSXTubjqfHI8wSF9jUQiwfLly3n22WfZvHkzq1ev5rLLLmPJkiU4HMfO+Ojr6+P3v/sdf/ntb2lubMSKxVAKphYVceXcedx81VUAJBMharJ3URlsw2PXqewqomVXJyFA02zMj+n4+kunHD24CyHGjtdbtnFg2QZiSY1gIJ+8qinkZVvsb94PQFb+lBGOUAghxFiRMlPs6drH9vZdbG/bTWe0O/PY/u7mdC0TM0mOO4imaYR6QuiahtNwEE5EWFa/itrc6rPWLScViRA72IKvZhxKKR5+dBXf/MlP0UwTS9PR3AFKJsxiwZIrueX6K5g5sQyAro4If//DayQTR2riuD12KmtyGTchj+LS4y/5skxFqDuKw2kjmTDx+7rweTtJJm1omkLDIsvfhsPRh2XaSCR9hKO9WKbCsEmRdnH6JGkizoqUZbLx4BvsaN/N+6dfh6ZpOG0O3jvtXRT5Cwg4h76+x+rVq7n//vtpbGwkGY9jRqK8tmEDf/jDH6isrOQb3/gGixYtGvCcyN5Gfvr1bxCJRPA7nVw8cTIXXnAxrcUzWXDtQtxFXlobn6fS2EBefoxUAro6sujstDAMnfGTCpg2uwSfX1oGCzHWvdL0Ojue24SJg6l1Cxk3uYRI1zYAvMFKHC5Z2y+EEOLMbWjezN+2PU3STGa2GbrBuOwKJubX0BXrYXv7LrwOD4ZuoNSRJe6apuF1eM7KbJNUX5i2jRtZ9ff/Y+WaNWR7vXz1ycfR7Xb6tHxc+ZXkFlVx/mVXcOO1lzO5uoCWph5SKStzjKxsd3ppjc+genwe1bV5FJUE0PS3TmoYNp0bb59DLJJEKUX7vr8R69VQSmGacXRNAzSygl7yq96Dw5WD22PHsElXHHFmJGkihlQilWD9gU0837iOnlgvADva9zAxvwbgrE0TXL16NXfffTehUIj8/Hysri5SpoXN7UbPzqahoYEPfvCD3HLLLcRjMb717W+jaRqBgnxumDQZv9dH7QWX86peyirlQrcsCrashrYWLDNBKp6kp8tLW2cVGHlMm1vMpOnFuD1Sr0SIkTKcHRzaw50cbNrDnMLxrE11M67uQi6/ZBzdTX8ASLcaFkIIIU6BpSz2dTezvX0XE3LHMS6nAoAcdzZJM0nA6aMur4ZJ+eOpyanCaXOglOJ/XnqYpJnE7xi8M85QzjZJ9vZy6NUNrHjs76xYu5ZX9+8nkkigFOTnZhPv7MJdWMCS8yo57/ePMrkym7bmXhrq2/njc42YpoXX52DCpAI0XUPXNd793pn4/M6TSpS8mc/vxOuzE+7ZRyq2H5vdRTLRB4DN4UY3HKCSBLIsfEFpwiCGhiRNxJCIJKO8tH8DLzS+TCQZBcDv9LGo8jyqs8vP6msnEgnuv/9+QqEQJSUlWPE4yWgs3b4sGgNnek3l3r17+d63v8244hJuv+MOpk2bRqfuYeLN97Kuy8FuDFAQ8DiYWRmk2rsTy0zg9ORRUnM+B1b2Me/iIsbX5UvGWohRYDg7OLzavJnSA32UT72KqhmFOKqK8NsOEUpGMexufMHqs/r6QgghxoZIMsrO9j1sb9/FjvY9RJOxzPbDSZOKYAmfOP8uiv2FxyQ8DnfM8To8x02GDOVsk//69Gd57PF/EE+msBSYuoEzkE9x3Tw++KH348xPd47zWdC8p4u/rtyDZR6Z+RIIuqgen4dpWtj0dL0/f9apF6m1zAShjnq627bg8hQQ7W3GslLYHQGUUsRTCbzO9L1AMt5D2741eLMqz9ryJPH2IkkTMajdnY2sSmygrnMKEwvHn3DftnAHP1z3MPFUAoAcd5DF1Rcwu3gqNuPs/4gtX76cxsZG8vPz0TSNZE8PKdOkN5kinIiT6ukGTcNQCsuyuLC4hMriYpRSPPLENlq73IBiammE4mA10UNR2rd1YCuq5KLLqsjKr0PTdK656ayfihBilJpXOgMzsI0tho+U3cXiBVX0dr4OQDB/CpouhZ+FEEIcXywV56ENf2Jfz4EBy2ncdhcTcscxMe/I/bau6ZQEio45xuGOOfFUvL8AbDKz3VQmCTOZSRLomk48FT/p2SaJ7h6aXlrH8sce47oPfZD8uXOo39/Fq23Ql1J4gkUUTZzP5PMu4cpLL2BqVS5lxQH0/tkiLQd62NfQCUAwx0N1bR7Vtbnk5HlPO3GhlCLad5CeQ1sIddRjWenzjfYeJJXow7C50XQdm8ML0XQxWU3TMGxu+robCPc04gtWndZrC3E0SZqIYyilWLZ7Fa1WJ8t2r6KuoOaYwS5hJnEY6cqneZ4cst1BlFJcUn0h0wsnouvDNxNj5cqVmKaJw+HAjMUwozFiKZPuWHqGiQ74HE4CHg+d8QSe2bMI5KWz4hfPLqOxYRsVrnqseAcH9/XS3VuCYegE88fjDY5D02RWiRBvdyuXPceuTg8hZwC7z8X8mSXoWjH+nPG4fcfe2AohhHj7SqQS7OpspDfRx3lls4D0kpneRBilFEW+fCbmj2diXg0VWaUnfd9sWiYd0U6cNiexVPzIA0qRxCSeisNR9+xOm5POaBemZQ76RWaiu5u9z7/IP//+GM+/8gpbWlowLUVufh43zJ1DUa6X0llLqZq9mCWL5jKlIptET5y9uzr458sHeMf10yitCAJQO6kQp8tOdW0u2bmDLxs6FV2tm+k8uJFErCuzzeEKEsibRKh9O4lYN3b74K+jGw6SqajMNhFDRpIm4hj1HQ3sbN+NDYOd7bsHTOtr7Wtj1d6XqO9o4HMLPoLD5kDTND4w+xZ8Dg/6MCcYDh06xKuvvkooFKK1tZUs0u2DHXYX9pSF19AI2Aw0h5OQK4t4vI2G1vTgG+1rxdP9LKU0kooqLMvA4TKYO62SSdOLcbntw3ouQojRKRKJ8IMf/IC+cJiPfPJL1E2owzDSY10gd8IIRyeEEMNrOOtJnUs6Il3pJTdte9jTtY+UlcJpczCnZDo23UDTNG6ecg1Bl5+g+/SWlNoMG5+68G7CifQXg4lEgudXPs/z//ckra++RtmcmVx07VVctPiiTNdIn8NzTMKkq62Nhz97Hy9s2MC2Q61YlsJSipRmUFhaTfYF5wPg9zj47G2LiXVF2burg9WvtXDUJBkOHQxlkiZ5hT7yCk+/hohS6WKxh7+sTMZ6SMS60HQbgdxagvlTcPtLCPc00rLnmfQskxMsT5LZJmIoSdJEDHB42l/SSuHCQdJKsax+FW6bi5V717Ll0M7MvtvbdzO9aBLAWemGczyNjY2sWLGCFStW8MamTezft49EPE5PRwdejxcLDdPpoCjLRjKmY8Wi6KkUeio9ZTHL76Zp55P0dtaDSmFZGgmzioq6hVw0sVzqlQghMhKpBF/4//6d9rYWJtRN5oPvvwZNS9/cySw0IcTb0XDWkxoup7Is/c3W7n+VF/e9Qnu4c8D2bHcWE/PGkzAT2HQ3AFXZZWcca9AVIOgKDOgaGesJQSrF+sZ9PPnsymO6RsY7Ogg17CV/7hwA9h5s46fPPIOVSJDUbOQWVlI4+QLKplzIhXMnc/GV6fv7ro4wq/6+dcDrFxYHqK7NpWp83mnVJnmzeLSLnkNb6GnfRnHNUnzByvR5Fk7F4Q7iz5mAYUsngJRStO1bg2kmsBtOrKOWJ4GFddTyJE3TSZkJmW0ihoQkTYbAWMq4Hy4u5bG7iSdi2A2DV5o3saerEZct3VZ3SsEELqm+kLKs4mGP7+Mf/zhrnn8eMxolFYliJRLU+APEIhFyHA6UZWFqduxOhc1mgVMjlbChW0nsqRgO3WB2bSCdMAHySqaSXzmV4ooSGUyFEMfY3LqD1X9+gnhbN5ctqUDXdXo7d9PauJqc4tnkFM0Y6RCFEEKcgZNZln5Yb7yPHe27mVJQh9ueThhEkzHaw53omk5VsCyz7Cbfm3vW7i2P7hqZ4/eTisax7A50TcPm9dLQ0MCHPvABPnPLe2nftYsXNm2iLDvIT557FsPtZt2uJHm155FdUkPJpPOZPXU8E0uzcKcUNkPH6K9TEszx4M9y4fM7qR6fR9X4XLx+5xnHf3RR12jvwcz2UMfOTNLE4QricAUHPE8pk0S8B8NwYJnxo7aDjollJo5enYRhOEjEe1DKRNPkY684ffLTMwTGSsY9M8vETOKze4gSoTcWxVImoXgvF5TPZnH1BRT68s96LKZpsmHDBl544QU+8YlPYBgGyrLw7m8i2XqIyUVFzJswiTnl5WRPqOO2//kBzc0HsLncWE47DlsKpcCwmSSdDqIpBz3hEPl5pRQWLMGXlSS/cgEuT95ZPxchxLnr0Wceo/tgNzhyULmLSCRMug5tJhkPkYr3jnR4QgghztCJlqVbyuJAqIUd7bvZ3rabplD6A77DcGRmW88smkKeJ4cJudW47Gc+8+KtvLlrZKSlFRODuN2NMxUh1dEJZorGtjY+9a1vUhEMotDotEziPT143G7mTCwk+pEvMLk0G59SHGzsZueafelzc9qYMa8Mw9DRNI2bbp8zZLOwLTNJ696VA4q6goYvWEWwYPJbdqLTdRvjpt+KmYoO2J5MJlm9ejWLzl+E3T5web1h86Dr8pFXnBn5CRIZ9R0NbGurz7Qw09Hx2t2YysJu2JhVPPWsJkxisRgvvfQSK1as4Pnnn6enswszEWfhwoXMmzcPTde5+eLFXFtRSXB8HW25FWw2c9jdHuNdk+bz8KF/0B6LE/TaAQ2lNHRdYdhjdPfFcNs9fGTJNVz3/iW43I6zdh5CiLGhJxbi+T88jqXsTK6aRY89C6ww4e5GAIIFU0Y4QiGEEGfieMvSc9xBnmtYw872PfQlwgOeUxooGlDDL8cTJMcTHLaYj+4aaUaiWNEYCcOLpdk4FIuRSEQyM1wSlokeLGHigndz15034ilKFy53x01K44p9rx7IHFfXNUorglTX5sFRdUvONGFimUn0/uYRmm4j0teCZSVxuIJk5U8mK38SdsfJL/O3O/3Ynf4B24xkkpTy4PIWHJM0EWIoSNJEABBORPjFq3+gOxaixF+Y2Z7dX6iqM9p90i3LTtXOnTv5+c9/zpo1a4hFIpiRKGY0ggeN+WXlePorivf0xQnNWMwbOfPY3ZmAQwBxcnpbucDnwDN3If9bv42eaDuhPqs/ToWu65QWBrm1bDoLzU4SjQ24JtYN6TkIIcaeZ15byf7X92JqbqZMvIjpc8vpadsCgCdQhsOdPcIRCiGEOBOHl6W7DCcxLYbH7mF7+y4au5vY0LwZAKfNQW1uNRPzapiQVzOsdfwOU0qR7AnR09DAr7//A3q7ukiGeokl4uR6Ali6DVAYNickojhtdjRvNvF4hOxxU7nlxpuYPakic7xYNEVPVxTD0CmtDDJuQh4V1bk4XUPz0VApi76uvenlN6EDjJ/9QXTDjqZpFFZehK7bcftlabw4d0jS5G3OtExeatrA/21/hqZQM5qmEUvF8Tk8mX00TcPrSP8SOXrK4uk6dOgQyWSS0tJSAOKhXp75xz8wIxGybXbmllcwr6yCuoJ8AjU1lBala6fU7+/m75u6DgfFuJIsJpUEiC+vZ1dwDoW+Dv7ng3PYsWcbr76+j95wHJ/HyexplVSXTCe2OkQ8fJCDTz6Fv26CDNRCiONSSrFhy8uU+QuwnEX4Ciu4dGElB7e/AEB2wdQRjlAIIcSZMC2TP7/xOL3xPiyl0JVGthEkkozy4r5XuWL8xZRnlVCVXY5NN4YtLqUUKpVC758x8fJzz/Gb//c1dh1sZn93N/u6u4nEYyQNA6UUEaVjAzSlcLt8+BwuXGYU0+uly4zjjPQRqu8kObEQCtMzNCZMLiA710NFdQ52x9Cd29FFXVPJSGZ7uGc//pz05wfpZCPORZI0eZtSSrG9fRdP7HyOtr4O2iIdaGjke3Jx2Z39VaiPcBoOwonIac822bt3LytWrGDlypVs2bKFq6++mq985SsAlALXV45jWnEx1Tk52MsraM+t5EUrh8lTKhhfWgLA1HG51JZnM6EkgNeCln3dNKxvIhzykbQ5qZkeJje3gwtzqrlgTg3JhEEqaaAbFroRY3dFEdH6Q3Q3taJSKTSZvieEOI4DoRZKQmFuuvwjdCsfRXWFqFgzqUQfhs2FL6dmpEMUQghxGvoSYV49sJln97zIrs4GQEPPdFw58kXhlbUXMz636qzGkp5B0kPf3ka2vfIKWza+xrbt27nuunez6F/uBaCps5t/bnkDpRSWbmBzONFTKbwOFw7DALsHrX89ja7pWIaTmG6DuImZsrDbPdgdBuG+ROZ1A0E3gaB7yM4jFj5Ey96VA4q6GjY3WfmTCOZPxunJHbLXEmIkSNLkbUgpxW82PZppH6xpOpqmk+/NzXTIebPTmW2ydevWTGvgvXv3okwLMxrBikZp2XKkfVnW1Cnc8I6rac8u43krhz3dJnQApEjUt3HZvPR0Qh0ojls0rGsCwGGP4HQqOnJyaO8KMdGX/oURSzhJpmwoBZquYSqFy0gQnGCyLPEOqicVM98mP/pCiONLmEly2j0c0P1oTj+XXTKe7kPPA5CVP1mKygkhxDlo+e7nWdGwlpSZoi3SgVIQcHrxOjxEetO1S870i8KT0XHoEI/911fZum0bu1sOsrezk8RRXTjLy0pZ1P//rxxwUTn/nfhL68gurqa1aScr/vBtfIbCsgcwdR1QoIEiHaul6VjJOA6bwXvffy233XH+kBVzhfRnCctMZloB64azP2Gi4QtWEiyYgi9YjTaMM3SEOJvkru9tSNM0SvyFbG/fzYXlc9jZ0UBbuB1d04nGoux7bQ/7X2+gr6sXX7af8unVVMwch2G3EU/Fj/tLRCk1YNuXv/xldu/ahRmJQjzGpGA2c2snMrusjNLa2sxzfvrEDurVFOgEMEHTqCnNoq4ki5LAkSrkDqcN07Rwu3qoKG/D7+nAn1vNL54vIN/oIJjVRzjmwjTTA/SRuTIaiZSNnOwQJbkWrTGdlKmw22R5jhDiWEopNj+/kZ6yS4nuCeEsyKKmIptI71wMm4ugLM0RQohzQiwZQ9cNHP2FSP1OH6ZlkuXy0xHtItcRpHVzE9vW7sDW0E6qOo/CC+oomlZ+RsvSlVIku7vpa9zH7tdeY/OGDZSVlLL0/vsAONDSwvf++hcs0+yfQWLDcHoJ5paRXTqeiz/+scyxJtVVo7vuoizPS47DjjG1hi1P/pJD3W34HDb0w3e8CtBU5vVjkS7K8/N4763XDVnCJJWM0NO2je62rThcQcrr3gmAw5VFSc1SPFnlp1TUVYhzhSRN3gZSlsmafa9QFihmXE561sZFVecxs3gKWU4/6w+8htPmZM9r9bz0yAp6D/WgLCvz/G0rXsdfkMX5t19CydQKOqNdmJaJzbAN6Hjz8ssv8+ijj+JyuVBKMc/nx2dzMGdyDbNKSvE4HBjFJfQWVVN1TTp/rmkaXpd9QKLE17/0Zu/6JppsOnUTCzBsGn1dDUydvB0zcQhd1wE7TpeDWxdX07hpPYahsJQd3ZYuAGuZFrqhAxqgYxgxplYcpHrmpdiHMNsuhBhbNmzYwH/913/h9vh53we/wZRJ6W4DHn8JHn/JCEcnhBDirTT1HOSlpo1satnK1RMu5fzy2UC6PXCJr5C/bXuali1NbPzti4RauzFiSQwTzMYmzJe2ECgMMuv9C1iWd/KzTZRSbPz9H9m8fh1bt25j18GDNHR1EkumW+teOW0al3/+c2iaRv0hO96SiQTzyvGV15FdPA5/Vj6aruNx2amqnZw5bpUjfb/bvS9EN2DGYiw972b+tOJhesNdeN0+bEb6I52GImUmCUf78Dh93Fk7mfieBpyTJp72e3l0Ude+rgYOfy2ZSvRhmQl0Iz3bJCt/0mm/hhCjnSRNxjClFFvb6nly53N0RLoo9hfw8fPvQtd0HIadXE+688OnLrybZ1c8y32//CNWb5KKojJsShFv78SZl0NK0+jo6GTzL9dy239fz/yp83h62dOsWLGCtWvXEo/HUZaFFYuxfv16Fi1alO7rvvgSuoO5GEXFdORV8KrKZU8IaIdxloOi/jgXTy9hQpabg3u72Lu+KRO/pkFhSYCOgzvo63yZRKwbAMOwkZU/iZzi2Thc2Wz60zME/d2Ylh2tf1ri0RVZ+lepYlp2svzd7HnjdSZNLpRCsEKc4x588EEefPBBzKOmNA+FH//vj7Esi6vesZSP3nHekB5bCCHE2ZFIJdjUuo11+zfSFDpSW2N3Z2MmaeK0OYimYjy38jle/PEzJCNxfF43AWz9MzUg5LHR09LFiz9+Bptu48raizOzTZRSJDq7iDQ1sXfT64S7ulj4yY8D0NXVxR33fx4zHu/fFyzdQHN4COaWMP6KqzIx+XxuFrz/izgdNsoKfJTkeAjYDOwpC5W0yM06Um8k1BWluzMKQCDoIlW/i/lZblJLPszTL/yW7t42LMtKB9/fNTInkM+VC95HtaOTHX/+P+Z+se607nu7Wl6n/cC6AUVd3b5CsvKnEMityyRMhBjrJGkyRjX3tvL4jmfZ09kIgN/pZUHF3EH39eguvvtf3ybSF6a8tCzdQae1FT2ZQvWG8RYW4in10NzczBc/8+94POnOOsqyMKNRcu0OZuXkMmdSOXNq0sURe/ri7CmZypZkWTpR0pN+LU3TqCkLEk+kMq/fdbCXN15u6n8cSsrTPeKrxufi9jjoPvQGnbFudMNBduF0copmYnN4AUglTVzGNmyGSSJpAOnjago0XaEpdThrgrLAbjdxG9swU5dhs8s6SyHOZffeey/33nsvoVCIrKysITnmnv0NLH/iCWzRJBeNqwQg3L2P3q5dBAum4fLmD8nrCCHEuepsJaxPl1KKJ3Y+y8sHXieeSicsDN1gWuFEzi+bRWWwbMC+j299ljW/epZEJI43x4enLwkKLA10BZ6khZXjI9zZx9pfPcuz1dOIpkrZ/OoGtm/fnp5B0tlBbzzOjJJSLvjwhzDcblp7wJldjKHZ8BdU4S2rI6doHIHcInTd4KJ3Ts0kLiaXBgmcV0W8N0FbS4iOptZ0Ob9+5y2qxu1JJyRmzCtjyswSCor9OGyw7GO/pcFRS1VJAR+97n527d/C7gPbicbDuJ1eakonMr58Cpbdz4HYIRz76rGSSQzHWyc4LDMBmp6p26VQpJIRKeoq3vYkaTLG9MXDPL1rFa8ceB2FwqbbuKhyPourL8BpG3ywXL58OY2NjeTn56NpGmYshhmNEbcUPV3d+O12Ajk55OXlcejQIWK9vUwuKmZmbi5zS8upzM5G0zRcJcWo/ux606E+/rG5GziSKJlYnIXHUhxs7CLRHYd0J2HGTcijeX834ybkUVbhItz9Bk6PgduT3iGQNxHLTJGVPzlTcOow3VCUlNpIxDy4BjyiiMXiuFxOMlkTAGwEcm3oxsDuQEIIAfA/v/wRWkxRVDiVeDI9BnW2bqKvaw+aZuDyXjzCEQohxMg6GwnrU2VZVv9S7fR9Zkekm3gqTo4nm/NKZzKndBq+/i/YjmZaJq+8uI5wWy+ugBtbysKWtFCHv2ADjLiJzWXgCrjpa+vle/f9f/xP/Mh9oyI9gwS7G72sIv2NHxCOJVl4+9fQdB1d1yjJ81Fe6Kc424NX16irCGaOsXdHG9s3twyILRB0UVAcoLDYnzk3gNKK7Mz/J1MW62quQGtPolBgz6WkbhEldYvSbZP7Y4n0Bxr3ldOWX8NS3cbxvipUShHtO0hP21ZC7TspqLyI7MJpAGTl1WF3eKWoq3jbk6TJGLOrs5GXD2wCYHrRJN5RewnZ7hP/Qlu5ciWpVIpkMkkoFKKvo4NoLE5KKTQUybZ2Ajk5OJ1OlGkyNyePL1y0GAAjv4CO/EreULlUT65m4rhqACZUZDO5OpdxBX48ZjpRsmf9/sxrNtS3UzupAEi3PbvsHcV0NL/K/m07AIXDlUVW3kS0/mx3TvHMQWPXdRu1s2/HTEUHbE8mk6xevZpFFyzC/qbWwobNI50vhBDHiMfjLPvLY1jKztQJC9jUYbAo3te/hhuCBdNGOEIhhHh764h0sa5pIxua3+Bj591OjjsIwGXjFnBhxRxqcirRtePXrbMZNvLavHg0J4X2AImuHmKJFDFLEVMaqWQMAyh3u7EV5tMcbsbU3Gh2i6ycYnwF1fjKJpBTVE1WbgnXXToRw5X+2m58WZDrL51AttOOljDpaO3j0IEQ2ze3AlBWHKCgyA+kZ1WHuqMUFAcoKPZTWBzA5bYfL+yj4teYXJxPY7gdl8eR+VpQoYiEI3i8ngFL1WORBFVFediMY5fmHF3UNRHtzGwP9+zPJE0Mmwt/zvi3jEuIsU4+OZ7jlFJ0x0KZxMiMokk0dO1jVvFUqrLLBn1OIpGgo6OD4uL+b1E7O+nq6qKvrw9lWahkCoVCQ8Nt2HCRLjpluFwYNhuW2033xLls1vLY06dDb/q4ofo2rl6QTproQG5fkj0NRxIlmgalFUGqa/OpGp+LUopI7wE6m1+lr3tvZj9PoJTckrkMnCFyfHanH7vTP2CbkUySUh5c3oJjkiZCCDGYP/39z0TbQ3h9JZRWTGfBonH0tG0FFG5/CU5PzkiHKIQQ57y++l34l6+gr3YC2ZPfunioZVlsa9/FS/s3UN/RkNm+sfkNLqtZCEBZVvGA5yjLItHVRaylhXjrIWItrRQuvRxnXi6xviixSIS9vb2k+pcYKU1HoaMBlrIw4wmcSRNd03Hmj2fJLZ/F1j/bOT/bQ1mBj4qiwIDZIx3NIfas2UcyceyypaxsN/HYkaXpNXX51NSd+nJPy1TEwglcLjtYKlPDz+vporZyF82t4wlHjsxMcbnsxCIJLFNh9HeNVMriQP2T9Hbu4XAVQE23EcipJVgwBbcUPBfiGJI0OYc1hQ7y+I5naQ938rmFH8Fpc6BpGtdNvjKzj2VZ7N27l61bt7Jlyxa2bNnCzp07qamp4be//S0AOTk56LqOgYZD03E4HRiGExxenFYcm0qR7OnBcLkIx5Ls9VTw93B6loimaYwvC1JXkkWB98jSGcOmY7MZ/YmSbMZNyKOyJndAFr21cTWdBzf2/03Dn1NDbskc3L4ihBBiuK1/7SV0y8708fPRAj7mTi+mYdM/AciWNsNCCHHGlFLseuyvaAcPsOuxvzJ30r8ft0BpLBnjhX0v8/KBTfTE0t/QaWjU5lVzftksJuYNnAER3ruX9hfXEmtppW3fPna3ttLQ2UlDZwf7urv404Ra8vNyCQaDWJrWXzwVbLoN3e7GZnNgt9lxqyT03/tqGlRXlvHuxRMpK/DhM3RCnVFaD4Y49HoL5V4n5KVb7Hq8DpIJE7vDIL/QT0Gx/5RmkZwMw6Zz4+1ziEWSA97Ttsa/0tfVTdX4PvIrrxjwnro9diwzgmFLx6lpOsoyATWgqOubl8ALIY6QpMkISiQSLF++nGeffZbNmzezevVqLrvsMpYsWYLjBMWaQrFelu1axcbmN1Ao7LqN/T3NjM+tGrDfAw88wIoVK4hEjlS8VqkUVjJJy+7dmKaJYRgsXryYh3/yE7LsDgzLQimI2X2kdAcJTUMzI2jRGNFQCJvNRvn4WdSWZzOxJAv34aU36/bToGtMqMvH6Ur/Yrjo8vG4PY7MLwrLSmGmYhi29DRGf/Y4ulpeJ1gwub8TTnBo32AhhDhJlrKYVZJF/rWfBmc2dXOqiPbuJ5noRTec+HNrRzpEIYQ4pyUSCf7vlw/x59/+jlAsRmDbFm7KyefaD9w16H2vpuk83/gy8VQcr83NPO84pugFODrDxLa+Qn3rExRf9Q4C/e10Vzz7HH/58U9o6OykLdyHpmlodhu6zY5mt9Eci5IPnH/BQn70o5+S4zRw2ewk7T5M3YamFErTsJSBlQoT6wuj2+0sveAiYo09rF3fdMwsktbmUGbGSEGRn+tvnUVOrhdNP3sdGn1+Jz6/M/P3vu69pGIHUBikYgdwuzrwBauwzAShjnra9m0h2tvC+Fl3YncGAMgvv5CCioVS1FWIkyRJkxGyevVq7r//fhobG0nG45iRKK9t2MAf/vAHKisr+cY3vsGiRYsGPCdhJnl+7zpW7n2JpJkkHo6RG/ET7HPzoyf/m/379/PHP/4xUzwq2tNDb3s7dqA6O4dKj5dxgSzG5eZRlJOdKRa1ZMkSKkpL2btvHzl2BynDianbAYWl2zEtG5qZoKOlhXETJvGp97ybtgNhdq9709Kb8iCxaCqTNMnOTRfgMpNRulpfp7NlE1n5EymsTJ+X219K7ewPYtiPtFUTQoiRcDDUir4zhdudS9zp5/LFNXS3LAcgK3+i1EESQogzcPi+t/6NNzATCZSuoVmK5+77HN975Fd86b8ewD0ui72d+7h12rsxHA6cNgdL3RMxlq/DHepBmc00xuPs7epkT0cHDZ0d/GtZKTP6kyYt8RgbQz3obidOv5e8onKyCytwBkqoGjeBifPmAXD1VVdQ6vPT1t2Bw+HG0m1oHOm2aGo2UhiEYmHKs4I4VSXN+7sBBswiKeyvR3KYYdPJzfcN6/uqlKJt3xqUSmFhR1kpDu55Fm+gnN7OXVjW4RkpGpFQM1n56aSJdIIT4tTIXeAIWL16NXfffTehUIj8/Hysri5SpoXN7UbPzqahoYG7776bn//855nESSwV5/trfsHr619j78bdRJpDWN1JHEb/LI5kEpVM0tTUREVFBQDvyC9kwXkLKMvKwuhPpOg2A1dxEe7S0nT7MaeTvS1hFl9+G3946Ju0x2O43B40yGTcYxjEYjH8Dgf33PxBdmw6BICma5SWBwddegOQiPXQeXAj3W1bUFZ6HWe4Zx9KWWiajqZpkjARQoy4zs5OktE43aWLSRzqJVhTSDDgIhnKI9p7UJbmCCHEGTh839vT2Ylf1zG8HpQGmgLTtNj+xmbuuuU93HPZhczNymXHux1MvurdABTY8vnDiy/T0NnJ3q5ODsVi6HYbmt2ObrPR7HQwo/91yibP49Kb7gZ3IUl7HnbHkXvMpMOGzZ6ezRJv2Mu/zJzN19e8QEc8jlt3YTOO3MOmzCS9sRgBm4MPT5zMxEoPRRMrKSj2n/VZJKcq3NNIX3cDus0FRLCsJL0d9cQjHRg2Z7qxQv4UsvInYXcMb0JHiLFEkibDLJFIcP/99xMKhSgpKcGKx0lGo1gamNEo9qwsSkpK2L9/P/fccw/ve9/7+PjHP04wGKQ6u4KXmlbRvfEADqVDIkm210uV18e4giJqcvPI9Rxprzb5vPmEfH7cZaW4y0pxFBfTqtzsau5jfHkQw5me2mczNGZHeymePZ+fbN1KazSKFQ+joaFQ6JpOkS+bj02dynl6jANV2YyrzaNqfG5mVsnRYuE2OppfIdRRz+ECUy5PPrklc/Dn1qKdoKq5EEIMtz/96U/86le/YtZ5V1JSspiLL0kvxckvO5+80vkyZgkhxGk6+r43227HNM10W18zfX9ooJFn2GgLx3hk+VpSs2bh2bGXyVeln7+3s5NH9+9LJ0qyAriyApSWllI7oY684iqmzJ6dea09HQ7ImwuAHcgJuBlflkVNWZDKIh+GrqGUovmJp7Bq3s07vTN4au2f6Q61YSlzwH1vdlYRd0ydxWS/jeKml5hw03nHrb0yEsxUnHCoiZY9z2JZKWw2DxBFKZVu5qAbVEy6Hk+gbFTFLcS5SpImw2z58uU0NjaSn5+Ppmkke3pImiYRM0XKtEjt3UtS00imknR2dfLLX/yCSy6+mIWLFvHOuiVkV+zihdIQNbm5jMvNw9+f+DCcDtylpRjmkcrc+ZdeSmLafHbu76J+fzd7Xt5LIplei9kbKWF8WRCArM5m3CZkjb+UO6bdyfYDO9i7fzOxeBiX00t12VRmFxRS072B8BubWXDNFQQmHr9Ya0/bVkIdOwHwZpWTWzIXT6BcBm0hxKhwdD2pTZs2sXPnTpxOJzdeexGXLbkU46hvESVhIoQQp+/wfW+O30+qowOlgQUkTPPIH8siCfSFevnDngaq/UdmREybMYOl11xN9bhafLnlKGcBB7tNDrb30Qpk5x25H51Zm09uwMW40izyfU5iPXFamntoWL+ffUa6gKpKpeg50II9pVNVNo0P3DidvXtfYW/TFmLxPtxOL+NKJ1JZOZfcVCfRjvV0N7WiUim0EezGaJlJIr3NREL7Cfc0EQsfwkzFSMa6sbuC/ffYGg5XEIXCSsVQypR7byGGiCRNhtnKlSsxTROHw4EZi2FGo0RSKTqj0SM76ToaCkNBtpkk2N9PzONwM3fhQoo7uvD0zx5xl6b/68zLQ9OP3NxHYkm++tB6IrHkgNf3OmxUZHvIMdL7KqU49M9/0uipI6Y7UDYvdVWzmVg1C12ZGCqFUoq4ZtAXycUfPsjBJ5/CXzcBTdNQyiLUUY/DFcTtKwQgp3gWqVSU3OLZuLwFZ/cNFUKIU3B0PalUKkU4HCYWjaLrGv92/318/wf/w/w5E0klwniDlZI0EUKIM3D4vleFw+lZELpG0jQ5dFSTAgBN19F1nZy8PKrHjctsb+rSCE6+gR2dEYgA9GQeK8zx0tMXx+9JL7vJ1XV6Iym2rWrgtaPa+0K69l4insLusPHSlHewZ2sL2Vq6MUFV3QIq6xZgWRa6frjtMLThZacvj+pJRcy3jdxHpgP1T9HbuQulrMw2pRSWmQBNQ9ePJHN0I/1eJFMx2vatwZtVKYkTIYaAJE2GWXd3d2bwSvb0YFkWDkPHoes4DQOnbuCwGRh2Gx2RCFV5BZTn5GSenz1nNtnz5maO0dUb4/V93ezcsAO7oXPL5XUAeFx2XA4DK5ai2O8kYDPQ4ybxSBJaw3Sl0pkYlUoROtCCPxol5S7FtFLYzSjBYB8V49vYtyufULeHhGan01dFMH6I7qZWzHiUUNdOOg9uIJnoxZddTXndtQDYnQFKx1+JEEKMJsfUk9IUPXt2Yeg6HqeH/Y1N3H333Xz1Sx9kSo2T3JK5FFQsGOmwhRDinNXd3Y0yTcxkEtX/2d0wHGiGA4cOTkPv/2OjS2n486qYOH3+gGMc6kwnWIrzfNSUZlGZ78NraPR1xSjJOzIrpeVAiH17OtOvYdMpKPJTWBKgqCRAQXEAh9NGMmVxMKrjdPiwkgpTI51RASxNQ88kytNfXjrtPlpjOilTYbedveSDskyi4VYiPU1Ewy2UTbgmk7TXNAOlLOwOP56sMryBcpRS7N/+N+zOLDRdRymVOZamaRg2N33dDYR7GvEFq85a3EK8XUjSZAg8+OCDPPjgg5im+Zb7BoNBlFKZWSaWBrrNSTArC4cZxSAFaBiBAHbdoObyy8madqQIYThhsWt/Bzv3dbOrqZu2rv5MvVI4bQY3XjYhM7V8vG7Qi47WmyTJkRkngSwXuQU+lKXQbP0Z920tZJMumJW025gwvplgXpgYTprfKAal0WYvZ28wyNyZKYKbH8Ey4wAYNjduXxFKKclmCyFGpTfXk9I0jebWJlQsCRi43UEcXg+hUBdf/eaPefjBfyGQO2GkwxZCiHNaVlYWqVgMZbejdA0FmLqLnGAAQyWwW2E0FLoFqXiMnqjOjsauTDJkQkWQ915Si0tBT0eElv0hNm5qyRy/ujaPvIL0vuMn5ZOT56GwNIvcfC+GcexMQbtN5/ZLa3nqz5sxvDp2hwGAQtHX24fP70Hrb6OTTJiYpsVVl0zAbhvaWYdKWcTCbenlNqEmoqHmozrdQDzSnpmtnVc6j7yy+ekEiZauy7J38++xrBR2u3fQ4+uGg2QqKrNNhBgikjQZAvfeey/33nsvoVCIrKysE+67ePFifvvb3xLu6ECzLJQOKd2FqTlIaOCw+jAsSEVj2Ox2Fl40sO3ww49vYXdTN1gKI2XhTCqyHAYuBU4FylLQnzQpLg6QiKYoKPJRUBwgv8hPQZF/QJebTMbdfiTjnhMMkRMMkUwZ5ARD+INhOrsDVJccpLysBd2pYaZ8ON1BcornkJU/SdpxCiFGtTfXk4ql4oQ7e1EYuBweNMPAH3DhSfppPniIDZsPMeMiackohBBn4rya8TxkmiQMA5tuoLBhaTZAYWk2FDY0UiSsFHYU18ybRlXRkTa+Ddva2PR8wzHHzcp2U1QawDCOJANKK7Iprcg+YTxKKbZvPIiVsnC77Ye7DKP6Ow7rKjPxBIfDINybYtvGZiZOKjijxMPhpTWHZ4+0H1hPe9O6AfsYNheeQBneQBm2ozrdONwDz+lwxxzD5j5uTDLbRIihJZ90h9mSJUsoKyhg944dBF1OLM0x4JeHpdmBBF3d3WQXV7Fyl4fLokl8/YmOHDS6e5M40XA6DJweA71/wNR0jXBvnKzs9IyRBZeO5+Kl+glbox2bcdepqajHblPEYnZcriRTalrZva8Yl8OD04BgTillEy7CnzNO1vsLIc4JR9eTAugNdZFIWoCG2+0H3cTpMIinUpiWYuPWNu4a2ZCFEOKcppSi9NBBCj1uDoYjZOsuUjYnCg0NC4VOSndimBbdCYvcQAFZViUqfGTGRV6BD13XyC/0U1jip7A0i8JiP+7+OianyjIVoe5oeqlO4sgM8XSNEEgkBhZPdTht9PZEsUyFcQrLc5RSJGJdRHrSM0kioSaKx12OPyddr8XjL0U3HHgCpXgD5XgCZTg9uW95X62Uom3fGkwzgd1wYpnJzHawsMxkJn5N00mZCZltIsQQkKTJMLPb7Xz8woV8YfduOqIx3B43mn7kl0fMMohGY3jsLt4543I8XUl27G5nztRiAOrKgoSbQgD4Ak4K+meP5BcHyCvwYrMZR17LYQwaw9HenHH3e7oI+DpRlobLGUdZBgFfFwFPN72RIrbtdFAYrWHGghoZfIUQ54yj60nFUnEIJ8kJFhFPJbAZDgwzQiwRhv62k9G4JISFEGIwJ7ss3Uom6Tl0gLtmTuf761+lM5rA6QWboQAd0IhbGpFYErfTxxUXvhfThEMHuhk3IT3Tr7g0wB33XjDg/vZMGLZ0F51YZGCjhEiokYbt/0f1xGvxBCoHPOb22DFOYnmOmYrR27krnSTp2U8qObDYbSS0/0jSJFDKhLn3nPKXj0qZJOI9GIYjs0w+vR10TCwzwdG354bhIBHv6e+kIx/7hDhdcvUMs94dO6mJRviXC+bxi9e20hKOYcUiaIACdM0gO1jO1RfeSHnpJJwunRzXkeU0NXX55Bb4KCjy4/GeXpb9aIcz7h6PRcDbRGVZPXZbHKVAoaEshVIGeTkNdO6ZDuTQ2xM75Yy7EEKMpMP1pAD6ertRygmajtOenpmHshGP9OC0G6DrZGfnnOBoQgjx9nWyy9KVofPqFePomuNn9rRsNvxlEz3dXSgrPcsPFLpmEAwWsuDKKwhMCxArbWXOwqrMMXRDZ6hT2D6/E5/feSROpehpeQ2Po4dk5DVyx08+qS8Gk/E+LCuJs3/5jJmKcXDPs5nHNc3A7S9OzyTJKsPtLTzqsdM7K123MW76rZip6IDtyWSS1atXs+j8Rdjf1BrZsHlkGb0QZ0iuoGGklOLgk08Rj4SZWJjPR979OTa1tNHQtJlIIoHL6aWqfBq15ZPINtsJRvZSRpT8wiN1TYI5HoI5niGLSTc0LlqY7vtumXFSyTgoHU23kUiC25cFysTtjTD9/FKc3vKTzrgLIcRocbieVCjSi+pLYGleOLKaHUuzoaUSxJWF3e5i8eLFIxitEEKc+2yGjY8t+Rg/fPRl8oM93Hnzrexu3ERj02ZikTAut5fK8mnUVM7gYEDhzc/iX66Zf8yH/rMt3NNIuHsvljIId+89bg2QVDIyYLlNItaNP6eGsgnXAGB3ZuHPqcHpzsWTVY7bV3RWkhV2px+70z9gm5FMklIeXN6CYX//hHg7kKTJMFKpFN0HDxA3FHHySLhKmFxZyNTKSSR1N7oySc830Sju3U7AbMfbHcLjGpopiUop4pF2Ir3N5BTNANKFopwuJ/GIBspC123YXUF03Ua8pwe7ww7YScZ7SPRtpKR6oizLEUKMCqfSuWzJkiVUVlayZdsbBGxZKP3w/L4jyyOjYRu9qR4mT5rGkiVLznr8Qggx1rW2mjTsMSmIu7HpFrU186kdfx6maWIYBiiFbiny4zoNe0xaWlMMZ83SwzVClEphYUep1IAaIEopDu17nnB3I/Fo55uerWGZqSN/07RMAkUIMbZI0mQYaTYbr15Rw/bGGJ7G6ThjdizdAs2GIpFucaZAtxy8WjWTSOXrTKysYb7t9P+ZlLKI9rXQ27mb3s5dJOP99VCClThcQQDyKxbgyx7P/u2PYncGMAy79HsXQox6p9K5zOFw8LH7P85n77yX7kgfXrcfm3EkIW2aCULRMF6nnY/d//FMwVghhBCnRynF4y/ugWgKZxKs9G0uKIWCzL2mpYEzqYAUj7+4h7rK7GH7gu5wJxrd5gKiaBj0dOzI3O9qmka0tyWTMHF68vAGyvAEyvEESjFszhO/gBBiTJCkyTAyLZNWPUxE5RNIBEnZUliH79mVlpkprpspbIkgEZVPqx7BtExsxqn9U8XCbXS1vk5v127M5JF1j5pm4A1WoKwjmXGHK5vm+mXS710IMWYppYjFm3nPRbfyt5efpjt0CEsdva5eJyergOvmXUEs3oxSSsY5IYQ4AylT0d4VJU+lx+Cj5wQqBdbh7+cUGEqRpzQ6uqOkTIX9LNfNSyUjRPtaObDzcZLJCBoaNi1JMqkDFocan8/c7+aWzkVZFp5AKbbDdbCEEG8rkjQZRjbDxjVlN/Kn5a+hm3q68Gv/L4zMNEUABZoJ2XtmcM2imSeVMLHMBEpZGDYXAMl4D92H3gDSCQ9fdjX+7PH4gpXoxsC1jtLvXQgx1u1s34O15gBFVRdyd/kc9uzfxJ6m7UTjEYqK4bzZJeS4L6C7Kx9rzXp2Lt5DXX7NSIcthBDnLLtN5/ZLa3nqz5sxvHqmq6NC0dfbh8/vSc+yBpIJE9O0uOqSCdiHsG6eskzisS7i4Tb8ubWZGiNt+16k4+BGEv0zSFR/YVbdsKFpOn3djZn7XX/2uCGLRwhxbpKkyTBSSvHcigM4E0a605pKz1MMBkLUVjZS31hJdyiQ3llTOBMGz604wNzaikGTGalklL6uPfR27SbcvY/ckjnkl18AgDerkuzC6fiyx+ENlKHpg9dFkX7vQoixTinF8ysfIxGrxHLqOAyYVDWD+TNqqao9hNudxLI0tm5yYGk6iVglz698jAk3flrGOSGEOE1KKbZvPIiVsnC77ZnS2x53NzVlOzjUUUckmu4843AYhHtTbNvYzMRJBac19pqpOLFIG/FwW/9/24lHO1DKAqDak4fLm25l7PTkY5kJ0A1s/d1lesMRvP2dcJLxHrnfFUJkSNJkGCVTFuHmXgzATC/mBBTjyprIz+7GNA1efWMioKEAAwg395JMWTjs6aRHMt5Lb9duejt3EwkdoH91KJBeknOYbtgpqr7kLWOSfu9CiLEuZaYwXjpEn2MWukqipRfVU17VQU5uGNPU6Gz3Eel1omtJ+hyFeF/aSOq6FHabdCEQQojTYZmKUHcUh9NGMnF4cY6iqrSB7KxuUA3sbPBzeH26w2mjtyeKZSqMEyzPUcoiGeshFmnDEyjDZk93lexqfZ22/WuO2V83HDg9eSjryAIhhzuIpmk4nFkYNmf/l4UxQGZXCyGOJZ96h5GhaVTkeunRj9QYCfi6KcgOYZoGBdkhKotihPqCmceD2W6M/qyFUhYNr/8W86jkhtOThz9nfKbF2amSfu9CiLFOMy3CiRosw47dTAEGgewwWTkRAGw2RTjkAWXDUIqkYSecqEEzLfktKYQQp8mw6dx4+xxikWRmWzy8n7bGtcRiBsUlYaZfUIrTW5553O2xYxy1PMcyk5lZI7FIG/FIO7FIe6Y2X2ntVQRyawFwefOxOwO4PHk4vfnp/3rS246eLXJ4lrXU8hNCnCy5HRxGhk3nPXfNy/zyUErRvu9vxHoNInEDjxMWXhQlt/wKUol2Yn17UakWdOPI8hhf9jiS8R78OePxZY/D4Tpxx4iTIf3ehRBjWUoZtPtKUQmTuGEHFLXjmtFt6Q4OmgbOYIo+x5FvPDvcpaSUIb8khRDiDPj8Tnz+dIcZpRR7N29E0yyUZkfTLBJ9GympnghAKtFLLHyARDQbR/8ymb7uvRyof/KY42qagdOTi6YdSbD4glWMn3XXW8YktfyEEKdK7geH2dG/PPq695KK7cfmdKMnoxg2O/G+ejr2PQLqyBTCeKQNl7cAgOKaJQN+QQghhDgxl8vG3Z9YSGdXeuq1FW8m1bkJLBumaaEbDsrK4lRNr0B3lgCQm+3B5ZJfkUIIMVQy7X0NFxoRQKenfTu7Nz6MZcYzM6nzyy8kr3QekJ49YrN7MjNHXJ58nJ78/uU1p34/LLX8hBCnQ+4IR8jRUwM1dGxajGQiDljEw4dweQvTHW9yanC4gpnnScJECCFOXWlxFqXFWf3fdD5LyFCYykTTwOHyY5lRAsY2qibNlBtjIYQYYkff9+o2B4YWI9Xf3revuwGHOwdN03F6cjEMR+Z5DleQ2jl3D2EcUstPCHHq5OofIUdPDUxT6LoNTXegaTqltVfiz5F2l0IIMZSOHnttdh99vV0Yhh1NQ6ZhCyHEWXL02Juui6dh2BzQ33S4qGoxwcKpZ71mntTyE0KcDhkBRsAxBaiUwlTOdJszTSMZ76G9aR2+7HHyjacQQgyRwYr/WdhB06TonxBCnCWDjb0p5cbbX5cvGe+hp20r2UUzhiUeqeUnhDhVstZjBBxTgErTUBigaccUnhJCCDE0TlT8T8ZeIYQ4O2TsFUKc6yRpMswGrOk8as3m0XTDgWWlaNu3pr8wlRBCiDMhY68QQgw/GXuFEGOBJE2G2em0ORNCCHFmZOwVQojhJ2OvEGIskKTJMDq6zZmm6VhmMvPncJuzw380Tcfsb3MmWXchhDh9MvYKIcTwk7FXCDFWSCHYYSRtzoQQYvjJ2CuEEMNPxl4hxFghI9IwkjZnQggx/GTsFUKI4SdjrxBirJBRaZhJmzMhhBh+MvYKIcTwk7FXCDEWSE0TIYQQQgghxEl58MEHmTx5MvPmzRvpUIQQYlhI0kQIIYQQQghxUu699162bt3Kyy+/PNKhCCHEsJCkiRBCCCGEEEIIIcQgpKbJEDrcIi0UCp3S85LJJJFIhFAoNGrWdo7GmE7FuR7/mZLzl/M/nfM/PHada+0eZewdPc71+M+UnL+cv4y9b200/pyMxphOxbke/5mS85fzP9tjryRNhlBvby8A5eXlIxyJEEKcvt7eXrKyskY6jJMmY68QYiyQsVcIIYbfyYy9mjrX0tqjmGVZNDc34/f70Y5uPP8WQqEQ5eXl7N+/n0AgcBYjPHmjMaZTca7Hf6bk/OX8T+f8lVL09vZSUlKCrp87qzdl7B09zvX4z5Scv5y/jL1vbTT+nIzGmE7FuR7/mZLzl/M/22OvzDQZQrquU1ZWdtrPDwQCo+4HfTTGdCrO9fjPlJy/nP+pnv+59C3nYTL2jj7nevxnSs5fzl/G3rc2Gn9ORmNMp+Jcj/9MyfnL+Z+tsffcSWcLIYQQQgghhBBCDCNJmgghhBBCCCGEEEIMQpImo4DT6eSBBx7A6XSOdCgZozGmU3Gux3+m5Pzl/N/O53+yRuP7NBpjOhXnevxnSs5fzv/tfP4nazS+T6MxplNxrsd/puT85fzP9vlLIVghhBBCCCGEEEKIQchMEyGEEEIIIYQQQohBSNJECCGEEEIIIYQQYhCSNBFCCCGEEEIIIYQYhCRNhBBCCCGEEEIIIQYhSZMRcODAAb7//e+zdOlSKioqcDgcFBUVccMNN7Bu3boRiysWi/HpT3+aRYsWUVJSgsvloqioiAULFvDQQw+RTCZHLLaTUVVVhaZpg/5ZvHjxSIc3ZH7zm99wzz33MHfuXJxOJ5qm8fDDDx93/1AoxKc//WkqKytxOp1UVVXxuc99jr6+vuELeoiczrUzls7/eL75zW9mftZfeumlYx5/O7wHJ0PG3rNDxt7BjaXrTsbewcnYe3Jk7D07ZOwd3Fi67mTsHdxIjL3SPWcE3H///Xzzm9+kpqaGxYsXk5+fT319PY899hhKKX73u99xyy23DHtc7e3tlJeXM3/+fCZMmEB+fj5dXV089dRTNDY2snTpUp566il0fXTm2qqqquju7uZf//VfB33szjvvHPaYzoaqqioaGxvJy8vD6/XS2NjIQw89NOj5hcNhFi5cyGuvvcbSpUuZNWsWGzdu5J///Cfz5s1j9erVuFyu4T+J03Sq185YO//BvPHGG8ydOxebzUY4HGbt2rWcf/75mcffDu/ByZKx9+yQsffOY/Yda9edjL3HkrH35MnYe3bI2HvnMfuOtetOxt5jjdjYq8Sw++tf/6pWrlx5zPbVq1cru92usrOzVSwWG/a4TNNU8Xj8mO3JZFItXrxYAerxxx8f9rhOVmVlpaqsrBzpMM66Z555Ru3du1cppdTXv/51BaiHHnpo0H2/9KUvKUB9/vOfH7D985//vALU1772tbMd7pA61WtnrJ3/myUSCTV79mx13nnnqVtvvVUBau3atQP2GevvwamQsffskLH3WGPtupOxdyAZe0+NjL1nh4y9xxpr152MvQON5NgrSZNRZunSpQpQL7/88kiHMsAPfvADBajvf//7Ix3Kcb1dfnkc7US/PCzLUiUlJcrn86m+vr4Bj/X19Smfz6fGjRs3TJGefW++dt4O5//AAw8op9OptmzZou64445jfnm8Hd6DoSJj7+mTsXegt9t1J2OvjL1nQsbe0ydj70Bvt+tOxt7hHXtH53yztzG73Q6AzWYb4UiOsCyLZcuWATB16tQRjubE4vE4Dz/8MF/72tf44Q9/OKJrZUdafX09zc3NLFiwAK/XO+Axr9fLggUL2LNnD/v37x+hCIfWm6+dsX7+GzZs4Ktf/SoPPPAAkydPHnSfsf4eDCUZe8+MjL1HvN2uOxl7jzXW34OhJGPvmZGx94i323UnY++xzuZ7MHpGKMG+fftYvnw5xcXFTJs2bcTiSCQSfO1rX0MpRUdHB88++yzbt2/nrrvu4rLLLhuxuE5GS0sLd91114Bt8+bN4/e//z01NTUjFNXIqK+vB6C2tnbQx2tra3n66aepr6+nvLx8OEMbcoNdO2P5/OPxOLfffjszZ87kvvvuO+5+Y/k9GEoy9p45GXuPeDtddzL2Dm4svwdDScbeMydj7xFvp+tOxt7Bnc33QJImo0QymeS2224jHo/zzW9+E8MwRiyWRCLBV77ylczfNU3js5/9LF//+tdHLKaTcdddd3HRRRcxdepUfD4fO3fu5Hvf+x6//vWvueyyy9i8eTN+v3+kwxw2PT09AGRlZQ36eCAQGLDfuep4185YPv8vfelL1NfX8+qrr55wrBjL78FQkbH3zMnYO9Db5bqTsVfG3jMhY++Zk7F3oLfLdSdj78iMvbI8ZxSwLIs777yT1atXc/fdd3PbbbeNaDw+nw+lFKZpsn//fh588EF+8YtfsHjxYkKh0IjGdiIPPPAAl156KQUFBXg8HmbOnMkjjzzCbbfdRmNjIz//+c9HOkQxxEbbtTMc1q5dy3e+8x3+4z/+Y9RPGx7tRtvPj4y94lwx2q6d4SBj79AZbT8/MvaKc8Vou3aGw2gZeyVpMsIsy+IDH/gAv/vd77j11lv5yU9+MtIhZei6TllZGR/96Ef52c9+xosvvshXv/rVkQ7rlN1zzz0AvPjiiyMcyfA6nGU9Xjb18I3A8bKxo91bXTtj8fxTqRR33HEH06dP5/7773/L/cfiezBUZOw9+2TsHZvXnYy9MvaeCRl7zz4Ze8fmdSdj78iOvbI8ZwRZlsVdd93FI488wnvf+14efvjhUdsLfunSpQCsXLlyZAM5DXl5eUC6b/fbyeH1fIfX973ZW637G81O5toZi+ff19eXidvhcAy6zwUXXADA3/72t0yhrLH0HgwFGXuHh4y9Y++6k7FXxt4zIWPv8JCxd+xddzL2jvzYK0mTEXL0D/8tt9zCr3/96xFdz/lWmpubgSOVms8lhyuJV1VVjWwgw6y2tpaSkhJefPFFwuHwgCrS4XCYF198kerq6nOuGNTJXjtj8fydTicf/OAHB31s9erV1NfXc+2115Kfn09VVdWYfA/OlIy9w0fG3rF13cnYK2PvmZCxd/jI2Du2rjsZe0fJ2HuabZLFGTBNM9Nb+qabblLJZHKkQ1JKKbVlyxYVDoeP2R4Oh9WVV16pAPXVr351BCJ7a9u2bRs09m3btqmioiIFqFWrVo1AZGfXifrVK6XUl770JQWoz3/+8wO2f/7zn1eA+trXvjYMUQ6dU712xtr5n8hg/eqVenu9B29Fxt6hJ2PvQ4M+PtauOxl7j0/G3rcmY+/Qk7H3oUEfH2vXnYy9xzfcY6+mlFKnnmoRZ+LLX/4yX/nKV/D5fHzyk58ctDf9u9/9bmbOnDnscX3ve99j4cKFVFVVEQgEOHDgAE899RQdHR1cdNFFPP3007jd7mGN62Qcjn3RokVUVlbi9XrZuXMnTz75JMlkkn/7t3/ja1/72kiHOSR+8Ytf8MILLwCwefNmNmzYwIIFCxg/fjwACxcu5EMf+hCQzqouWLCATZs2sXTpUmbPns2GDRv45z//ybx581i1atWo/Pc8nlO9dsba+Z/InXfeya9+9SvWrl3L+eefn9n+dnoP3oqMvUNPxl4Zew+TsVfG3uORsXfoydgrY+9hMvYO09h7mskdcQYOZ8ZO9Od4GdSz6eWXX1Z33323mjJligoGg8pms6nc3Fx1ySWXqJ/+9Kej5puBwaxcuVLdfPPNqra2VgUCAWWz2VRRUZF617vepZ5++umRDm9IvdXPzx133DFg/+7ubvWv//qvqry8XNntdlVRUaE+85nPqFAoNDIncAZO59oZS+d/IsfLuCv19nkP3oqMvUNPxl4Ze2XslbH3rcjYO/Rk7JWxV8be4R17ZaaJEEIIIYQQQgghxCBGZ8lqIYQQQgghhBBCiBEmSRMhhBBCCCGEEEKIQUjSRAghhBBCCCGEEGIQkjQRQgghhBBCCCGEGIQkTYQQQgghhBBCCCEGIUkTIYQQQgghhBBCiEFI0kQIIYQQQgghhBBiEJI0EUIIIYQQQgghhBiEJE2EEEIIIYQQQgghBiFJEyGEEEIIIYQQQohBSNJECCGEOAdVVVWhaVrmj67r+P1+ysrKuOSSS/jsZz/L+vXrT+oYe/fuHZ6gR7Fz+b04Uezn4nmVl5ejaRr79+8f6VCEEEIIbCMdgBBCCCFO34IFCxg/fjwA0WiU9vZ2Nm7cyMqVK/nud7/LxRdfzC9/+UvGjRs3wpGK0aSqqorGxkYaGhqoqqoa6XAympubaWpqorCwkPLy8pEORwghhJCkiRBCCHEu+9CHPsSdd945YJtSiqeeeop//dd/ZdWqVVx44YWsXbuW6urqAfs9++yzJJNJSktLhzHi0Wmsvhfn2nmtW7cOgLlz545wJEIIIUSaJE2EEEKIMUbTNK666iouvPBC5s+fT319PR/60Id49tlnB+xXU1MzQhGOPmP1vTjXzkuSJkIIIUYbqWkihBBCjFHBYJDvf//7ADz33HO8+uqrAx4/Xr2Lw3VSAH7zm98wf/58fD4f+fn5vPe972Xfvn1AekbLD3/4Q2bOnInX6yUvL48777yTQ4cODRpPNBrlu9/9Lueffz7BYBCXy0VdXR333XcfHR0dgz7n6Fj++te/snDhQgKBAF6vlwULFvDkk08O+rz6+no+8IEPUF1djdPpxOfzUVlZydVXX81DDz10zP4nqv3R1NTExz/+cWpra3G5XGRlZbFgwQJ++tOfYprmkMa9fv167rvvPubPn09RUREOh4PCwkLe+c53snz58kGfcyJvPq+HH34YTdNobGwEoLq6ekBtnP/93//FMAyys7OJRCLHPe6UKVPQNO2453Eie/bs4cMf/jDl5eW4XC6mTJnCz3/+c+BI0mTevHmnfFwhhBDirFBCCCGEOOdUVlYqQD300EMn3M+yLJWTk6MA9fWvf33QYzQ0NAzYDihA3X///cpms6lLL71U3XjjjaqiokIBqry8XHV2dqqbb75ZuVwudeWVV6rrrrtOFRQUKEBNnz5dxePxAcc8cOCAmjZtmgJUTk6OWrJkibruuusyMVRVVam9e/ceE//hWL70pS8pTdPUggUL1C233KJmzJihAKVpmnr00UcHPGfz5s0qEAgoQNXV1anrr79e3XTTTeqCCy5QPp9PzZgx47jv55vfi/Xr12fev4qKCnXLLbeoK6+8UrlcLgWoK6644phzPd24lVLqsssuU7quq2nTpqmrrrpK3XTTTWr27NmZ433/+98/6dgHe+z5559Xd9xxh/J6vQpQN9xwg7rjjjsyf7Zt26be+c53KkD97Gc/O+Z4Sin13HPPKUDV1NQoy7IG3ed4HnvsMeXxeBSgpk2bpm6++ebM+f3nf/6n8vl8ClAtLS2ndFwhhBDibJGkiRBCCHEOOtmkiVJKLVmyRAHq1ltvHfQYx0ua5Obmqtdeey2zPRKJqIULF2Y+8NbU1AxIdLS1tanx48crQP3mN7/JbLcsSy1YsEAB6oMf/KAKhUKZx5LJpPrMZz6jAHXJJZccE/vhWILBoHrppZcGPPbAAw8oQE2YMGHA9rvuuksB6v/9v/93zPEikYhatWrVMdsHey9isVhm+0c+8hGVSCQyj+3evVtVVVUpQH3hC18YkriVUurJJ59Uzc3Nx2xfs2aNCgQCym63q6ampreM/a0eO9FznnnmGQUMmlxSSqkbbrhBAeq73/3uoI8fz9q1a5XT6VQ+n0898cQTAx77zne+ozRNyyTlhBBCiNFClucIIYQQY1xeXh7AcZfAHM9//ud/MmPGjMzf3W43n/70pwHYvHkz//3f/01lZeWA1/noRz8KMKB+ytNPP82LL77IzJkz+clPfoLf7888ZrPZ+Na3vsXUqVNZsWIFb7zxxnFjOe+88wZs+7d/+zeysrLYuXPngPa0ra2tAFx11VXHHMftdrNo0aKTOv8///nPNDY2UlJSwve//33sdnvmsXHjxvGd73wHgP/5n/8hFoudcdwA73jHOyguLj7mOBdccAH33nsvyWSSv//97ycV/+lasmQJU6ZMYdOmTbzwwgsDHmtqauLvf/87Ho+HD3zgAyd9TNM0ufPOO4nH4/zqV7865t/m05/+NGVlZYDUMxFCCDG6SNJECCGEGOMsywLI1Ng4WYMlHWpra4F0smPp0qXHfby5uTmz7YknngDghhtuwGY7tga9ruuZRMaaNWsGjeWd73znMducTmemlfKBAwcy2+fPnw/ARz/6UZ5++unjJjTeysqVKwF4z3veg9PpPObx66+/nuzsbHp7e4+pF3M6cR/W0dHBI488wn333cfdd9/NnXfeyZ133smqVasA2LFjx2mdz6n4xCc+AcAPf/jDAdt/+tOfkkqleP/7308wGDzp4/3xj39kx44dLFq0iOuvv/6YxzVNY8KECYDUMxFCCDG6SPccIYQQYoxrb28HICcn55SeV1FRccw2n88HQHFx8aAJkMOzSI5OVOzZsweAL37xi3zxi1884Wu2tbWddCwAgUDgmNf73Oc+xwsvvMDy5cu58sorsdvtzJgxg0WLFvGe97znpD+UH05ovLlV82GaplFdXU1XV9egyY9TjRvg5z//OZ/61KcIh8PHjSsUCr1l7Gfq1ltv5f777+fRRx/l4MGDFBcXk0gkMgVb/+Vf/uWUjvfXv/4VgDvuuOO4+xw+L5lpIoQQYjSRpIkQQggxhiml2LhxIwDTpk07pefq+vEnpJ7osTc7PNNl4cKFb9kCd8qUKWf8eh6Ph2eeeYaXX36ZZcuWsWbNGtasWcMrr7zC9773PT72sY/x4IMPnvTxzsSpxP3qq69yzz33YBgG3/zmN3nnO99JRUUFHo8HTdP42c9+xj333INS6ixGnObxeLj77rv51re+xc9+9jMeeOAB/vrXv9La2spFF13E9OnTT+l4h2fivHmp0mGRSITXX38dkKSJEEKI0UWSJkIIIcQY9uSTT9LV1QUw6HKa4VBeXg7Au971Lj772c8O2+vOmzcvM6sklUrx2GOPcfvtt/OjH/2IG2+8kUsuueSEzy8tLQWOzJQZTENDw4B9z8Sf//xnlFJ8/OMf57777jvm8fr6+jN+jVNx77338t3vfpef/exnfOELX8gs1TnVWSZApg21x+MZ9PE//vGPxONxampqyM7OPv2ghRBCiCEmNU2EEEKIMaqnp4dPfepTAFx++eXMnDlzROJ4xzveARxJCowEm83GjTfeyBVXXAHAa6+99pbPWbx4MZD+QD9YXZS//e1vdHV14ff7mTNnzhnH2NnZCTCguO5hsVgss8RlKDgcDiCdTDqeiooK3v3ud9Pc3MyXvvQl1qxZQ0lJyaA1Sd7K4WVb27ZtO+axUCjEAw88AEg9EyGEEKOPJE2EEEKIMUYpxVNPPcX8+fOpr6+nuLg4U4tiJLzrXe9i3rx5rF+/nrvuumvQuiVdXV385Cc/OeGH+JP1ox/9aNBiqS0tLbzyyivA4ImJN7vpppuoqKigubmZT3/60wNia2ho4DOf+QwAH//4x3G5XGcc96RJkwD41a9+RW9vb2Z7LBbjYx/7WGZWy1A43Klmy5YtJ9zvk5/8JADf+MY3ALjnnnsGrWXzVhYuXAikuwn19PRktre0tHD11VdnugjJ0hwhhBCjjSzPEUIIIc5hv/jFLzJdXuLxOO3t7WzYsCEza2Hx4sX88pe/PKkkwdmi6zqPPfYYV199Nb/61a/4y1/+wowZM6ioqCCRSLBnzx42b96caUt7Oh/Kj/azn/2Me++9l+rqaqZOnUogEKCtrY3nn3+eaDTKpZdeyrXXXvuWx3E6nfzlL3/hyiuv5Mc//jFPPvkk559/Pr29vTz33HPEYjGuuOKKzCyJM3XXXXfxgx/8gI0bN1JdXc1FF12EYRiZuD/5yU/ygx/8YEhe64YbbmDFihXceuutLF26NLMk5nOf+xx1dXWZ/S666CJmzZrFxo0bsdvtfPjDHz6t1/uP//gPnnjiCdatW0ddXR0XXngh0WiUlStXkpeXR0VFBfv27ZOZJkIIIUYdSZoIIYQQ57AXX3yRF198EQCv10tWVhbTpk1j7ty53HLLLaPmQ2hJSQkvvfQSDz/8MH/84x95/fXXWb9+PTk5OZSUlPCRj3yEa6+9dkhmbHz1q1/liSee4KWXXuKll16ip6eHgoICzjvvPO666y7e+973nnRiZt68ebz22mt885vf5KmnnuJvf/sbTqeTWbNmcfvtt/OhD33ojJM8hwWDQV555RUeeOABnn76aZ566ilyc3NZunQpDzzwAC+88MKQvA6k2zH39vbym9/8hieffDKz/OjWW28dkDSBdC2cjRs3cuONN1JUVHRarzdr1ixWrFjBv//7v/PSSy+xbNkyqqqquPfee/n0pz9NbW0tuq4ze/bsMz43IYQQYihpaqQWFwshhBBCiFHNNE1qampobGxkzZo1XHDBBSMdkhBCCDGspKaJEEIIIYQY1M9+9jMaGxu54IILJGEihBDibUmW5wghhBBCiIwdO3bw7W9/m5aWFpYtW4au63znO98Z6bCEEEKIESFJEyGEEEIIkXHw4EH+93//F4fDwZQpU/jyl7/MhRdeONJhCSGEECNCapoIIYQQQgghhBBCDEJqmgghhBBCCCGEEEIMQpImQgghhBBCCCGEEIOQpIkQQgghhBBCCCHEICRpIoQQQgghhBBCCDEISZoIIYQQQgghhBBCDEKSJkIIIYQQQgghhBCDkKSJEEIIIYQQQgghxCAkaSKEEEIIIYQQQggxCEmaCCGEEEIIIYQQQgxCkiZCCCGEEEIIIYQQg5CkiRBCCCGEEEIIIcQg/n9hKA0Q67p+qwAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "\n", + "\n", + "\n", + "DIMS = np.array([2, 3, 5, 10, 20, 40, 100])\n", + "FUNCTIONS = np.arange(1, 25)\n", + "\n", + "FUNCTIONS = [1, 2, 3, 6, 7, 8, 10, 12, 14]\n", + "# colormap['COVARIANCE-2'] = 'blue'\n", + "\n", + "\n", + "f, axes = plt.subplots(3, 3, figsize=(13, 12), sharex='col', sharey=False)\n", + "\n", + "for (fid, ax) in zip(FUNCTIONS, axes.ravel()):\n", + " f_data = completed_overview.filter(function_id = fid)\n", + "\n", + " for method, color in colormap.items():\n", + " m_data = f_data.filter(algorithm_name = method)\n", + " erts = np.array([ert(m_data.filter(dimension=d)) / d for d in DIMS]) \n", + " label = method.title().replace(\"_\", \" \") if method != \"CMSA\" else method\n", + " marker = \"^\"\n", + " if method == \"pycma\":\n", + " marker = 'o'\n", + " mask = np.isfinite(erts)\n", + " ax.plot(\n", + " DIMS[mask], \n", + " erts[mask], \n", + " label=label, \n", + " marker=marker,\n", + " markersize=8,\n", + " linestyle='dashed',\n", + " alpha=.8,\n", + " linewidth=1.5,\n", + " color=color\n", + " )\n", + "\n", + " ax.grid(which=\"both\", axis='x')\n", + " ax.grid(which=\"major\", axis='y')\n", + " ax.set_yscale(\"log\", base=10)\n", + " ax.set_xscale(\"log\", base=2)\n", + " ax.set_xticks(DIMS[:-1], DIMS[:-1])\n", + " ax.tick_params(axis='both', which='both', labelsize=14)\n", + " \n", + " ax.text(0.01, 0.99,f\"$f_{{{fid}}}$ (\" + f_data['function_name'][0] + \")\",\n", + " transform=ax.transAxes,\n", + " bbox=dict(boxstyle='round,pad=0.1', facecolor='white', alpha=0.5),\n", + " ha='left', va='top', fontsize=15)\n", + "\n", + " if fid == 6:\n", + " ax.set_ylabel(\"Expected Running time (ERT) / $d$\", fontsize=16)\n", + " \n", + " ylim = ax.get_ylim()\n", + " ax.set_ylim(ylim[0], 1.5 * ylim[1])\n", + " if fid == 1:\n", + " ax.set_ylim(10**1.9, 10**3)\n", + "\n", + " if fid == 1:\n", + " ax.legend(ncol=4, fontsize=14, bbox_to_anchor=(3, 1.27))\n", + "\n", + " if fid == 12:\n", + " ax.set_xlabel(f\"Dimensionality $d$\", fontsize=16)\n", + "\n", + "plt.subplots_adjust(hspace=.03, wspace=0.15)\n", + "plt.savefig(\"selected_benchmarks.png\", dpi=500)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d454113", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "shape: (28_700, 12)
data_idalgorithm_namealgorithm_infosuitefunction_namefunction_iddimensioninstancerun_idevalsbest_ylen
u64strstrstrstru16u16u16u32u64f64u32
36034"COVARIANCE""algorithm_info""unknown_suite""RosenbrockRotated"92115884.8342e-9100
36035"COVARIANCE""algorithm_info""unknown_suite""RosenbrockRotated"92124629.7663e-9100
36036"COVARIANCE""algorithm_info""unknown_suite""RosenbrockRotated"92135288.7072e-9100
36037"COVARIANCE""algorithm_info""unknown_suite""RosenbrockRotated"92146664.7348e-9100
36038"COVARIANCE""algorithm_info""unknown_suite""RosenbrockRotated"92155223.7224e-9100
4796"MATRIX""algorithm_info""unknown_suite""Discus"1140196450309.5771e-9100
4797"MATRIX""algorithm_info""unknown_suite""Discus"114019710846348.159048100
4798"MATRIX""algorithm_info""unknown_suite""Discus"114019822306304.009028100
4799"MATRIX""algorithm_info""unknown_suite""Discus"11401996661395.965106100
4800"MATRIX""algorithm_info""unknown_suite""Discus"114011006226410.90027100
" + ], + "text/plain": [ + "shape: (28_700, 12)\n", + "┌─────────┬────────────────┬───────────────┬───────────────┬───┬────────┬───────┬────────────┬─────┐\n", + "│ data_id ┆ algorithm_name ┆ algorithm_inf ┆ suite ┆ … ┆ run_id ┆ evals ┆ best_y ┆ len │\n", + "│ --- ┆ --- ┆ o ┆ --- ┆ ┆ --- ┆ --- ┆ --- ┆ --- │\n", + "│ u64 ┆ str ┆ --- ┆ str ┆ ┆ u32 ┆ u64 ┆ f64 ┆ u32 │\n", + "│ ┆ ┆ str ┆ ┆ ┆ ┆ ┆ ┆ │\n", + "╞═════════╪════════════════╪═══════════════╪═══════════════╪═══╪════════╪═══════╪════════════╪═════╡\n", + "│ 36034 ┆ COVARIANCE ┆ algorithm_inf ┆ unknown_suite ┆ … ┆ 1 ┆ 588 ┆ 4.8342e-9 ┆ 100 │\n", + "│ ┆ ┆ o ┆ ┆ ┆ ┆ ┆ ┆ │\n", + "│ 36035 ┆ COVARIANCE ┆ algorithm_inf ┆ unknown_suite ┆ … ┆ 2 ┆ 462 ┆ 9.7663e-9 ┆ 100 │\n", + "│ ┆ ┆ o ┆ ┆ ┆ ┆ ┆ ┆ │\n", + "│ 36036 ┆ COVARIANCE ┆ algorithm_inf ┆ unknown_suite ┆ … ┆ 3 ┆ 528 ┆ 8.7072e-9 ┆ 100 │\n", + "│ ┆ ┆ o ┆ ┆ ┆ ┆ ┆ ┆ │\n", + "│ 36037 ┆ COVARIANCE ┆ algorithm_inf ┆ unknown_suite ┆ … ┆ 4 ┆ 666 ┆ 4.7348e-9 ┆ 100 │\n", + "│ ┆ ┆ o ┆ ┆ ┆ ┆ ┆ ┆ │\n", + "│ 36038 ┆ COVARIANCE ┆ algorithm_inf ┆ unknown_suite ┆ … ┆ 5 ┆ 522 ┆ 3.7224e-9 ┆ 100 │\n", + "│ ┆ ┆ o ┆ ┆ ┆ ┆ ┆ ┆ │\n", + "│ … ┆ … ┆ … ┆ … ┆ … ┆ … ┆ … ┆ … ┆ … │\n", + "│ 4796 ┆ MATRIX ┆ algorithm_inf ┆ unknown_suite ┆ … ┆ 96 ┆ 45030 ┆ 9.5771e-9 ┆ 100 │\n", + "│ ┆ ┆ o ┆ ┆ ┆ ┆ ┆ ┆ │\n", + "│ 4797 ┆ MATRIX ┆ algorithm_inf ┆ unknown_suite ┆ … ┆ 97 ┆ 10846 ┆ 348.159048 ┆ 100 │\n", + "│ ┆ ┆ o ┆ ┆ ┆ ┆ ┆ ┆ │\n", + "│ 4798 ┆ MATRIX ┆ algorithm_inf ┆ unknown_suite ┆ … ┆ 98 ┆ 22306 ┆ 304.009028 ┆ 100 │\n", + "│ ┆ ┆ o ┆ ┆ ┆ ┆ ┆ ┆ │\n", + "│ 4799 ┆ MATRIX ┆ algorithm_inf ┆ unknown_suite ┆ … ┆ 99 ┆ 6661 ┆ 395.965106 ┆ 100 │\n", + "│ ┆ ┆ o ┆ ┆ ┆ ┆ ┆ ┆ │\n", + "│ 4800 ┆ MATRIX ┆ algorithm_inf ┆ unknown_suite ┆ … ┆ 100 ┆ 6226 ┆ 410.90027 ┆ 100 │\n", + "│ ┆ ┆ o ┆ ┆ ┆ ┆ ┆ ┆ │\n", + "└─────────┴────────────────┴───────────────┴───────────────┴───┴────────┴───────┴────────────┴─────┘" + ] + }, + "execution_count": 117, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "completed_overview" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/scripts/matrix/selected_benchmarks.png b/scripts/matrix/selected_benchmarks.png new file mode 100644 index 0000000..43ae4d1 Binary files /dev/null and b/scripts/matrix/selected_benchmarks.png differ diff --git a/scripts/matrix/test_bbob5d.py b/scripts/matrix/test_bbob5d.py new file mode 100644 index 0000000..9a2a963 --- /dev/null +++ b/scripts/matrix/test_bbob5d.py @@ -0,0 +1,120 @@ +from time import perf_counter +import ioh +import modcma.c_maes as modcma +import iohinspector as ins +import matplotlib.colors as mcolors +import numpy as np + + +def timeit(f): + def inner(*args, **kwargs): + start = perf_counter() + result = f(*args, **kwargs) + stop = perf_counter() + elapsed = stop - start + return elapsed + return inner + + +@timeit +def run_modma(f: ioh.ProblemType, dim: int, n_evaluations): + modules = modcma.parameters.Modules() + # modules.restart_strategy = modcma.options.RestartStrategy.IPOP + # modules.active = True + settings = modcma.Settings( + dim, + budget=n_evaluations, + target=f.optimum.y + 1e-8, + lb=f.bounds.lb, + ub=f.bounds.ub, + sigma0=2.0, + modules=modules, + verbose=False + ) + cma = modcma.ModularCMAES(settings) + cma.run(f) + return cma + +def fix_legend_labels(ax, n_split, algs, groupby_word = None, reorder=False): + colors = dict(zip(algs, mcolors.TABLEAU_COLORS)) + lines = ax.get_lines()[::] + if reorder: + lines = lines[::2] + lines[1::2] + + for line, line_label in zip(lines[:len(lines)//2], lines[len(lines)//2:]): + if (lab:=line_label.get_label()) in colors: + for l in (line, line_label): + l.set_color(colors[lab]) + l.set_linewidth(3) + if groupby_word is not None and groupby_word in lab: + l.set_linestyle('dashed') + else: + l.set_linestyle('solid') + + handles, labels = ax.get_legend_handles_labels() + labels = [l[n_split:] for l in labels[:]] + idx = np.argsort(labels) + ax.legend(np.array(handles)[idx], np.array(labels)[idx], fancybox=True, shadow=True, fontsize=13) + return handles, labels + +def place_legend_below(ax, handles, labels, show = True, legend_nrow = 1, start_legend = 3, loc_y = -.11): + box = ax.get_position() + ax.set_position([box.x0, box.y0 + box.height * 0.1, + box.width, box.height * 0.9]) + + ax.legend().remove() + if show: + ax.legend(np.array(handles), np.array(labels), loc='upper center', + fontsize=13, bbox_to_anchor=(start_legend, loc_y), fancybox=True, shadow=True, ncol=np.ceil(len(labels) / legend_nrow), + ) + + +if __name__ == "__main__": + # modcma.utils.set_seed(43) + # modcma.constants.calc_eigv = True + # name = f"CMA-ES eig={modcma.constants.calc_eigv}" + + # logger = ioh.logger.Analyzer( + # folder_name=name, + # algorithm_name=name, + # root="data" + # ) + + # dim = 5 + # n_rep = 5 + # n_instances = 5 + + # budget = 50_000 * dim + # for i in range(1, 25): + # for ii in range(1, n_instances + 1): + # problem = ioh.get_problem(i, ii, dim) + # problem.attach_logger(logger) + # for r in range(n_rep): + # run_modma(problem, dim, budget) + # print(problem.state.evaluations, problem.state.current_best_internal.y) + # problem.reset() + + import os + manager = ins.DataManager() + algs = [] + for folder in os.listdir("data"): + algs.append(folder) + manager.add_folder(f"data/{folder}") + + + + import matplotlib.pyplot as plt + + f, axes = plt.subplots(5, 5, figsize=(25, 13), sharex=True, sharey=True) + + x_values = ins.get_sequence(1, 50_000 * 5, 50, True, True) + for fid, ax in enumerate(axes.ravel(), 1): + if fid > 24: + break + dt = manager.select(function_ids=[fid]).load(True, True, x_values=x_values) + ins.plot.single_function_fixedbudget(data=dt, ax=ax) + h,l = fix_legend_labels(ax, 1, algs, None) + place_legend_below(ax, h, l, fid == 24, 1) + + plt.tight_layout() + plt.show() \ No newline at end of file diff --git a/scripts/matrix/test_timing.py b/scripts/matrix/test_timing.py new file mode 100644 index 0000000..59e6519 --- /dev/null +++ b/scripts/matrix/test_timing.py @@ -0,0 +1,129 @@ +import sys +import inspect +import warnings + +from time import perf_counter +from pprint import pprint +from dataclasses import dataclass + + +import numpy as np +from modcma import ModularCMAES +import modcma.c_maes as modcma +import cma as pycma +import ioh +from fcmaes import optimizer, retry + +np.random.seed(12) + +def timeit(f): + def inner(*args, **kwargs): + start = perf_counter() + result = f(*args, **kwargs) + stop = perf_counter() + elapsed = stop - start + return elapsed + return inner + + +# @timeit +# def run_modcmapy(f: ioh.ProblemType, dim: int, n_evaluations, x0: np.ndarray): +# cma = ModularCMAES(f, dim, budget=n_evaluations, x0=x0) +# cma.run() +# assert f.state.evaluations >= n_evaluations + + +# @timeit +# def run_fcmaes(f: ioh.ProblemType, dim: int, n_evaluations, x0: np.ndarray): + +# lamb = 4 + np.floor(3 * np.log(dim)).astype(int) +# bounds = np.array([f.bounds.lb, f.bounds.ub]) +# res = optimizer.cmaescpp.minimize( +# f, x0=x0, max_evaluations=n_evaluations, +# stop_hist=0, accuracy=1e-10, stop_fitness=-700, +# popsize=lamb, workers=1, delayed_update=False +# ) + + +# # ret = retry.minimize(f, bounds.T, optimizer=optimizer.Cma_cpp(n_evaluations)) +# assert f.state.evaluations >= n_evaluations +# print(f.state.current_best_internal.y) + + +# @timeit +# def run_modma(f: ioh.ProblemType, dim: int, n_evaluations, x0: np.ndarray): +# modcma.constants.calc_eigv = False +# modules = modcma.parameters.Modules() +# # modules.sample_transformation = modcma.options.SCALED_UNIFORM +# modules.matrix_adaptation = modcma.options.COVARIANCE +# settings = modcma.Settings(dim, +# budget=n_evaluations, +# x0=x0, +# modules=modules, +# lb=f.bounds.lb, +# ub=f.bounds.ub, +# verbose=True +# ) + +# cma = modcma.ModularCMAES(settings) + + +# maxp = 1/(10 * dim * (cma.p.weights.c1 +cma.p.weights.cmu)) +# # print(dim, max(1, maxp), maxp) +# # breakpoint() + +# while cma.step(f): +# pass +# # cma.run(f) +# print(cma.p.stats.t, cma.p.stats.n_updates, f.state.current_best_internal.y) +# assert f.state.evaluations >= n_evaluations +# return cma + + +@timeit +def run_pycma(f: ioh.ProblemType, dim: int, n_evaluations: int, x0: np.ndarray): + options = pycma.CMAOptions() + # options['CMA_active'] = False + # options['maxfevals'] = n_evaluations + options["verbose"] = -1 + options["CMA_diagonal"] = False + # pprint(options) + + cma = pycma.CMAEvolutionStrategy(x0, 2.0, options=options) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + while f.state.evaluations < n_evaluations: + X, y = cma.ask_and_eval(f) + cma.tell(X, y) + breakpoint() + # cma.disp() + assert f.state.evaluations >= n_evaluations + + +if __name__ == "__main__": + n_iters = 2 + n_evals = 2_000 + fid = 12 + dimensions = [5] + names, functions = zip( + *[ + (name, obj) + for name, obj in inspect.getmembers(sys.modules[__name__]) + if name.startswith("run") + ] + ) + data = {name: dict.fromkeys(dimensions) for name in names} + + for d in dimensions: + x0 = np.random.uniform(size=d) + for name, function in zip(names, functions): + data[name][d] = np.array( + [ + function(ioh.get_problem(fid, 1, d), d, n_evals * d, x0) + for _ in range(n_iters) + ] + ) + + print(f"fid: {fid} ({d}D) budget: {d * n_evals}") + for name in names: + print(name, data[name][d].mean(), data[name][d].std()) diff --git a/scripts/matrix/test_timing_modules.py b/scripts/matrix/test_timing_modules.py new file mode 100644 index 0000000..92e6cce --- /dev/null +++ b/scripts/matrix/test_timing_modules.py @@ -0,0 +1,101 @@ +from time import perf_counter +import warnings + +import numpy as np +import modcma.c_maes as modcma +import ioh +import pandas as pd +import matplotlib.pyplot as plt +import cma as pycma + +from pprint import pprint + +np.random.seed(12) + +def run_modma(problem: ioh.ProblemType, x0: np.ndarray, matrix_adaptation = modcma.options.COVARIANCE, max_generations=1000): + modules = modcma.parameters.Modules() + modules.matrix_adaptation = matrix_adaptation + settings = modcma.Settings( + problem.meta_data.n_variables, + x0=x0, + modules=modules, + lb=problem.bounds.lb, + ub=problem.bounds.ub, + verbose=True, + max_generations=max_generations + ) + + cma = modcma.ModularCMAES(settings) + + start = perf_counter() + cma.run(problem) + stop = perf_counter() + elapsed = stop - start + assert cma.p.stats.t == max_generations + return elapsed, cma.p.stats.t, problem.state.evaluations, cma.p.stats.n_updates + + +def run_pycma(problem: ioh.ProblemType, x0: np.ndarray, max_generations=1000): + options = pycma.CMAOptions() + options['CMA_active'] = False + # options['maxfevals'] = n_evaluations + options['conditioncov_alleviate'] = False + options["verbose"] = 10 + options["CMA_diagonal"] = False + pprint(options) + + cma = pycma.CMAEvolutionStrategy(x0, 2.0, options=options) + settings = modcma.Settings(problem.meta_data.n_variables) + assert settings.lambda0 == cma.sp.popsize + np.random.seed(1) + start = perf_counter() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + for i in range(max_generations): + X, y = cma.ask_and_eval(problem) + cma.tell(X, y) + stop = perf_counter() + elapsed = stop - start + + return elapsed, cma.countiter, problem.state.evaluations, cma.sm.count_eigen + +def collect(): + fid = 2 + dims = 2, 3, 5, 10, 20, 40, 100, 200, 500, 1000 + + n_repeats = 15 + options = modcma.options.MatrixAdaptationType.__members__ + del options['COVARIANCE_NO_EIGV'] + + pprint(options) + + stats = [] + for d in dims: + for name, option in options.items(): + for _ in range(n_repeats): + problem = ioh.get_problem(fid, 1, d) + time, n_gen, n_evals, n_updates = run_modma(problem, np.zeros(d), option) + stats.append((name, d, time, n_gen, n_evals, n_updates)) + print(stats[-1]) + + stats = pd.DataFrame(stats, columns=["method", "dim", "time", "n_gen", "n_evals", "n_updates"]) + stats.to_csv("time_stats.csv") + print(stats) + + +if __name__ == "__main__": + fid = 2 + dims = 2, 3, 5, 10, 20, 40, 100, 200, 500, 1000 + n_repeats = 15 + + stats = [] + for d in dims: + for _ in range(n_repeats): + problem = ioh.get_problem(fid, 1, d) + time, n_gen, n_evals, n_updates = run_pycma(problem, np.zeros(d)) + stats.append(("pycma", d, time, n_gen, n_evals, n_updates)) + print(stats[-1]) + stats = pd.DataFrame(stats, columns=["method", "dim", "time", "n_gen", "n_evals", "n_updates"]) + stats.to_csv("time_stats_pycma.csv") + print(stats) + \ No newline at end of file diff --git a/scripts/matrix/time_matrix_adaptation.png b/scripts/matrix/time_matrix_adaptation.png new file mode 100644 index 0000000..b9ef8e2 Binary files /dev/null and b/scripts/matrix/time_matrix_adaptation.png differ diff --git a/scripts/matrix/time_stats.csv b/scripts/matrix/time_stats.csv new file mode 100644 index 0000000..5a077ee --- /dev/null +++ b/scripts/matrix/time_stats.csv @@ -0,0 +1,1051 @@ +,method,dim,time,n_gen,n_evals,n_updates +0,COVARIANCE,2,0.007711483049206436,1000,6000,999 +1,COVARIANCE,2,0.00787417299579829,1000,6000,999 +2,COVARIANCE,2,0.013588677044026554,1000,6000,999 +3,COVARIANCE,2,0.010022761998698115,1000,6000,999 +4,COVARIANCE,2,0.007961716037243605,1000,6000,999 +5,COVARIANCE,2,0.008022750029340386,1000,6000,999 +6,COVARIANCE,2,0.007618403993546963,1000,6000,999 +7,COVARIANCE,2,0.008018087013624609,1000,6000,999 +8,COVARIANCE,2,0.009280618978664279,1000,6000,999 +9,COVARIANCE,2,0.009731311001814902,1000,6000,999 +10,COVARIANCE,2,0.009726746007800102,1000,6000,999 +11,COVARIANCE,2,0.008087928988970816,1000,6000,999 +12,COVARIANCE,2,0.012237735092639923,1000,6000,999 +13,COVARIANCE,2,0.01036987395491451,1000,6000,999 +14,COVARIANCE,2,0.008932383032515645,1000,6000,999 +15,NONE,2,0.010580296977423131,1000,6000,0 +16,NONE,2,0.007887015002779663,1000,6000,0 +17,NONE,2,0.008834864012897015,1000,6000,0 +18,NONE,2,0.010412635980173945,1000,6000,0 +19,NONE,2,0.008776562986895442,1000,6000,0 +20,NONE,2,0.010758780990727246,1000,6000,0 +21,NONE,2,0.007803539978340268,1000,6000,0 +22,NONE,2,0.007634338922798634,1000,6000,0 +23,NONE,2,0.009719033958390355,1000,6000,0 +24,NONE,2,0.007997791981324553,1000,6000,0 +25,NONE,2,0.006933605065569282,1000,6000,0 +26,NONE,2,0.007629985921084881,1000,6000,0 +27,NONE,2,0.006967267952859402,1000,6000,0 +28,NONE,2,0.006994739058427513,1000,6000,0 +29,NONE,2,0.007267759996466339,1000,6000,0 +30,MATRIX,2,0.0075609220657497644,1000,6000,1000 +31,MATRIX,2,0.007714873994700611,1000,6000,1000 +32,MATRIX,2,0.010331543046049774,1000,6000,1000 +33,MATRIX,2,0.008045815979130566,1000,6000,1000 +34,MATRIX,2,0.008376204990781844,1000,6000,1000 +35,MATRIX,2,0.006995436968281865,1000,6000,1000 +36,MATRIX,2,0.007368327002041042,1000,6000,1000 +37,MATRIX,2,0.007643015938811004,1000,6000,1000 +38,MATRIX,2,0.009196370025165379,1000,6000,1000 +39,MATRIX,2,0.006918389932252467,1000,6000,1000 +40,MATRIX,2,0.00840214907657355,1000,6000,1000 +41,MATRIX,2,0.007531062001362443,1000,6000,1000 +42,MATRIX,2,0.011182026006281376,1000,6000,1000 +43,MATRIX,2,0.009086668957024813,1000,6000,1000 +44,MATRIX,2,0.008180306991562247,1000,6000,1000 +45,SEPERABLE,2,0.0072456750785931945,1000,6000,1000 +46,SEPERABLE,2,0.008176972041837871,1000,6000,1000 +47,SEPERABLE,2,0.007220792002044618,1000,6000,1000 +48,SEPERABLE,2,0.007796501973643899,1000,6000,1000 +49,SEPERABLE,2,0.0073612709529697895,1000,6000,1000 +50,SEPERABLE,2,0.007403416093438864,1000,6000,1000 +51,SEPERABLE,2,0.00818293308839202,1000,6000,1000 +52,SEPERABLE,2,0.007085719029419124,1000,6000,1000 +53,SEPERABLE,2,0.011805569985881448,1000,6000,1000 +54,SEPERABLE,2,0.010588491102680564,1000,6000,1000 +55,SEPERABLE,2,0.007113746949471533,1000,6000,1000 +56,SEPERABLE,2,0.009632104076445103,1000,6000,1000 +57,SEPERABLE,2,0.010887198965065181,1000,6000,1000 +58,SEPERABLE,2,0.007644123979844153,1000,6000,1000 +59,SEPERABLE,2,0.0088468809844926,1000,6000,1000 +60,CHOLESKY,2,0.00727443490177393,1000,6000,1000 +61,CHOLESKY,2,0.010283801006153226,1000,6000,1000 +62,CHOLESKY,2,0.0074679270619526505,1000,6000,1000 +63,CHOLESKY,2,0.00752234598621726,1000,6000,1000 +64,CHOLESKY,2,0.008975239004939795,1000,6000,1000 +65,CHOLESKY,2,0.01246929494664073,1000,6000,1000 +66,CHOLESKY,2,0.009200508939102292,1000,6000,1000 +67,CHOLESKY,2,0.009669609018601477,1000,6000,1000 +68,CHOLESKY,2,0.009414260042831302,1000,6000,1000 +69,CHOLESKY,2,0.008480035001412034,1000,6000,1000 +70,CHOLESKY,2,0.008125439984723926,1000,6000,1000 +71,CHOLESKY,2,0.007196316961199045,1000,6000,1000 +72,CHOLESKY,2,0.007346509024500847,1000,6000,1000 +73,CHOLESKY,2,0.00735829200129956,1000,6000,1000 +74,CHOLESKY,2,0.00821810495108366,1000,6000,1000 +75,CMSA,2,0.008155530085787177,1000,6000,1000 +76,CMSA,2,0.009733907994814217,1000,6000,1000 +77,CMSA,2,0.007828367059119046,1000,6000,1000 +78,CMSA,2,0.00928627303801477,1000,6000,1000 +79,CMSA,2,0.008640426094643772,1000,6000,1000 +80,CMSA,2,0.009348203078843653,1000,6000,1000 +81,CMSA,2,0.010446653002873063,1000,6000,1000 +82,CMSA,2,0.007562728016637266,1000,6000,1000 +83,CMSA,2,0.008062280947342515,1000,6000,1000 +84,CMSA,2,0.007502082968130708,1000,6000,1000 +85,CMSA,2,0.009941514930687845,1000,6000,1000 +86,CMSA,2,0.007420271052978933,1000,6000,1000 +87,CMSA,2,0.007882133009843528,1000,6000,1000 +88,CMSA,2,0.007252500974573195,1000,6000,1000 +89,CMSA,2,0.007672390900552273,1000,6000,1000 +90,NATURAL_GRADIENT,2,0.008121661026962101,1000,6000,1000 +91,NATURAL_GRADIENT,2,0.008391448995098472,1000,6000,1000 +92,NATURAL_GRADIENT,2,0.008983712992630899,1000,6000,1000 +93,NATURAL_GRADIENT,2,0.00892684399150312,1000,6000,1000 +94,NATURAL_GRADIENT,2,0.007959551992826164,1000,6000,1000 +95,NATURAL_GRADIENT,2,0.007619902025908232,1000,6000,1000 +96,NATURAL_GRADIENT,2,0.007938891998492181,1000,6000,1000 +97,NATURAL_GRADIENT,2,0.0077221960527822375,1000,6000,1000 +98,NATURAL_GRADIENT,2,0.007947947015054524,1000,6000,1000 +99,NATURAL_GRADIENT,2,0.007548750028945506,1000,6000,1000 +100,NATURAL_GRADIENT,2,0.009637824026867747,1000,6000,1000 +101,NATURAL_GRADIENT,2,0.01032421796116978,1000,6000,1000 +102,NATURAL_GRADIENT,2,0.007606999948620796,1000,6000,1000 +103,NATURAL_GRADIENT,2,0.00865573703777045,1000,6000,1000 +104,NATURAL_GRADIENT,2,0.00781076296698302,1000,6000,1000 +105,COVARIANCE,3,0.010046639945358038,1000,7000,999 +106,COVARIANCE,3,0.011035675997845829,1000,7000,999 +107,COVARIANCE,3,0.010219599935226142,1000,7000,999 +108,COVARIANCE,3,0.010837925015948713,1000,7000,999 +109,COVARIANCE,3,0.010280134971253574,1000,7000,999 +110,COVARIANCE,3,0.01126328599639237,1000,7000,999 +111,COVARIANCE,3,0.011321696918457747,1000,7000,999 +112,COVARIANCE,3,0.010646464070305228,1000,7000,999 +113,COVARIANCE,3,0.010173275019042194,1000,7000,999 +114,COVARIANCE,3,0.013086330029182136,1000,7000,999 +115,COVARIANCE,3,0.012327173026278615,1000,7000,999 +116,COVARIANCE,3,0.013249940937384963,1000,7000,999 +117,COVARIANCE,3,0.010314155020751059,1000,7000,999 +118,COVARIANCE,3,0.010522597935050726,1000,7000,999 +119,COVARIANCE,3,0.012646899907849729,1000,7000,999 +120,NONE,3,0.009607258951291442,1000,7000,0 +121,NONE,3,0.009807296912185848,1000,7000,0 +122,NONE,3,0.009652474895119667,1000,7000,0 +123,NONE,3,0.009476409992203116,1000,7000,0 +124,NONE,3,0.009506572037935257,1000,7000,0 +125,NONE,3,0.008545586955733597,1000,7000,0 +126,NONE,3,0.009733253973536193,1000,7000,0 +127,NONE,3,0.008835495915263891,1000,7000,0 +128,NONE,3,0.010168058914132416,1000,7000,0 +129,NONE,3,0.009375900961458683,1000,7000,0 +130,NONE,3,0.0086107871029526,1000,7000,0 +131,NONE,3,0.009014472016133368,1000,7000,0 +132,NONE,3,0.008834633976221085,1000,7000,0 +133,NONE,3,0.012071282020770013,1000,7000,0 +134,NONE,3,0.010382476029917598,1000,7000,0 +135,MATRIX,3,0.008779859053902328,1000,7000,1000 +136,MATRIX,3,0.009545984910801053,1000,7000,1000 +137,MATRIX,3,0.009312444948591292,1000,7000,1000 +138,MATRIX,3,0.00931821996346116,1000,7000,1000 +139,MATRIX,3,0.009419593960046768,1000,7000,1000 +140,MATRIX,3,0.009502683999016881,1000,7000,1000 +141,MATRIX,3,0.01403449010103941,1000,7000,1000 +142,MATRIX,3,0.0128990039229393,1000,7000,1000 +143,MATRIX,3,0.009880561963655055,1000,7000,1000 +144,MATRIX,3,0.009139794972725213,1000,7000,1000 +145,MATRIX,3,0.008999750949442387,1000,7000,1000 +146,MATRIX,3,0.008996311924420297,1000,7000,1000 +147,MATRIX,3,0.009328008978627622,1000,7000,1000 +148,MATRIX,3,0.009267273941077292,1000,7000,1000 +149,MATRIX,3,0.009884173981845379,1000,7000,1000 +150,SEPERABLE,3,0.008579405024647713,1000,7000,1000 +151,SEPERABLE,3,0.009980604983866215,1000,7000,1000 +152,SEPERABLE,3,0.011705138953402638,1000,7000,1000 +153,SEPERABLE,3,0.009154070052318275,1000,7000,1000 +154,SEPERABLE,3,0.009997481014579535,1000,7000,1000 +155,SEPERABLE,3,0.009055254980921745,1000,7000,1000 +156,SEPERABLE,3,0.010453882976435125,1000,7000,1000 +157,SEPERABLE,3,0.00936168001499027,1000,7000,1000 +158,SEPERABLE,3,0.008752838941290975,1000,7000,1000 +159,SEPERABLE,3,0.009137884946539998,1000,7000,1000 +160,SEPERABLE,3,0.008925743051804602,1000,7000,1000 +161,SEPERABLE,3,0.009549979004077613,1000,7000,1000 +162,SEPERABLE,3,0.010041241999715567,1000,7000,1000 +163,SEPERABLE,3,0.009685758035629988,1000,7000,1000 +164,SEPERABLE,3,0.013026652974076569,1000,7000,1000 +165,CHOLESKY,3,0.01264843507669866,1000,7000,1000 +166,CHOLESKY,3,0.009933545952662826,1000,7000,1000 +167,CHOLESKY,3,0.010719186975620687,1000,7000,1000 +168,CHOLESKY,3,0.00945397699251771,1000,7000,1000 +169,CHOLESKY,3,0.009866437991149724,1000,7000,1000 +170,CHOLESKY,3,0.00989635894075036,1000,7000,1000 +171,CHOLESKY,3,0.009768080897629261,1000,7000,1000 +172,CHOLESKY,3,0.012657254002988338,1000,7000,1000 +173,CHOLESKY,3,0.011553079937584698,1000,7000,1000 +174,CHOLESKY,3,0.009100523893721402,1000,7000,1000 +175,CHOLESKY,3,0.011938906041905284,1000,7000,1000 +176,CHOLESKY,3,0.010714069940149784,1000,7000,1000 +177,CHOLESKY,3,0.009884115075692534,1000,7000,1000 +178,CHOLESKY,3,0.011362285003997386,1000,7000,1000 +179,CHOLESKY,3,0.009531333926133811,1000,7000,1000 +180,CMSA,3,0.00925858598202467,1000,7000,1000 +181,CMSA,3,0.010860243928618729,1000,7000,1000 +182,CMSA,3,0.009655373054556549,1000,7000,1000 +183,CMSA,3,0.009173939004540443,1000,7000,1000 +184,CMSA,3,0.012301420094445348,1000,7000,1000 +185,CMSA,3,0.010564399999566376,1000,7000,1000 +186,CMSA,3,0.01014744199346751,1000,7000,1000 +187,CMSA,3,0.01130485103931278,1000,7000,1000 +188,CMSA,3,0.011262962012551725,1000,7000,1000 +189,CMSA,3,0.010586802032776177,1000,7000,1000 +190,CMSA,3,0.010575794032774866,1000,7000,1000 +191,CMSA,3,0.010831770021468401,1000,7000,1000 +192,CMSA,3,0.011182085028849542,1000,7000,1000 +193,CMSA,3,0.010396020021289587,1000,7000,1000 +194,CMSA,3,0.009606118081137538,1000,7000,1000 +195,NATURAL_GRADIENT,3,0.01098856795579195,1000,7000,1000 +196,NATURAL_GRADIENT,3,0.01330451702233404,1000,7000,1000 +197,NATURAL_GRADIENT,3,0.012580309994518757,1000,7000,1000 +198,NATURAL_GRADIENT,3,0.01008232100866735,1000,7000,1000 +199,NATURAL_GRADIENT,3,0.010378943057730794,1000,7000,1000 +200,NATURAL_GRADIENT,3,0.01072270201984793,1000,7000,1000 +201,NATURAL_GRADIENT,3,0.009555344004184008,1000,7000,1000 +202,NATURAL_GRADIENT,3,0.010915928054600954,1000,7000,1000 +203,NATURAL_GRADIENT,3,0.013145414995960891,1000,7000,1000 +204,NATURAL_GRADIENT,3,0.011716301087290049,1000,7000,1000 +205,NATURAL_GRADIENT,3,0.012564397999085486,1000,7000,1000 +206,NATURAL_GRADIENT,3,0.010176997049711645,1000,7000,1000 +207,NATURAL_GRADIENT,3,0.015080782934091985,1000,7000,1000 +208,NATURAL_GRADIENT,3,0.011131394072435796,1000,7000,1000 +209,NATURAL_GRADIENT,3,0.011149314930662513,1000,7000,1000 +210,COVARIANCE,5,0.015520391054451466,1000,8000,999 +211,COVARIANCE,5,0.015948273008689284,1000,8000,999 +212,COVARIANCE,5,0.01825975498650223,1000,8000,999 +213,COVARIANCE,5,0.017051726928912103,1000,8000,999 +214,COVARIANCE,5,0.014974416000768542,1000,8000,999 +215,COVARIANCE,5,0.01724672899581492,1000,8000,999 +216,COVARIANCE,5,0.015692647895775735,1000,8000,999 +217,COVARIANCE,5,0.01526833395473659,1000,8000,999 +218,COVARIANCE,5,0.016084213042631745,1000,8000,999 +219,COVARIANCE,5,0.01938209601212293,1000,8000,999 +220,COVARIANCE,5,0.015014115022495389,1000,8000,999 +221,COVARIANCE,5,0.016379193984903395,1000,8000,999 +222,COVARIANCE,5,0.015464963973499835,1000,8000,999 +223,COVARIANCE,5,0.015433348016813397,1000,8000,999 +224,COVARIANCE,5,0.015527136973105371,1000,8000,999 +225,NONE,5,0.013214766047894955,1000,8000,0 +226,NONE,5,0.015009810915216804,1000,8000,0 +227,NONE,5,0.013874726020731032,1000,8000,0 +228,NONE,5,0.011615141993388534,1000,8000,0 +229,NONE,5,0.012898415909148753,1000,8000,0 +230,NONE,5,0.012579652015119791,1000,8000,0 +231,NONE,5,0.012674393015913665,1000,8000,0 +232,NONE,5,0.012113644042983651,1000,8000,0 +233,NONE,5,0.011918097035959363,1000,8000,0 +234,NONE,5,0.012093445984646678,1000,8000,0 +235,NONE,5,0.012156452052295208,1000,8000,0 +236,NONE,5,0.014123225002549589,1000,8000,0 +237,NONE,5,0.012426751898601651,1000,8000,0 +238,NONE,5,0.01409814995713532,1000,8000,0 +239,NONE,5,0.013103228993713856,1000,8000,0 +240,MATRIX,5,0.013291540089994669,1000,8000,1000 +241,MATRIX,5,0.013809764990583062,1000,8000,1000 +242,MATRIX,5,0.012993307085707784,1000,8000,1000 +243,MATRIX,5,0.013661809032782912,1000,8000,1000 +244,MATRIX,5,0.013389872969128191,1000,8000,1000 +245,MATRIX,5,0.012983935070224106,1000,8000,1000 +246,MATRIX,5,0.012694712961092591,1000,8000,1000 +247,MATRIX,5,0.012891435995697975,1000,8000,1000 +248,MATRIX,5,0.013133725035004318,1000,8000,1000 +249,MATRIX,5,0.013477892032824457,1000,8000,1000 +250,MATRIX,5,0.013313684961758554,1000,8000,1000 +251,MATRIX,5,0.014374586986377835,1000,8000,1000 +252,MATRIX,5,0.013057979056611657,1000,8000,1000 +253,MATRIX,5,0.013095406000502408,1000,8000,1000 +254,MATRIX,5,0.01631439500488341,1000,8000,1000 +255,SEPERABLE,5,0.01526596606709063,1000,8000,1000 +256,SEPERABLE,5,0.015072238049469888,1000,8000,1000 +257,SEPERABLE,5,0.015444700024090707,1000,8000,1000 +258,SEPERABLE,5,0.013122479082085192,1000,8000,1000 +259,SEPERABLE,5,0.012719676014967263,1000,8000,1000 +260,SEPERABLE,5,0.011602474958635867,1000,8000,1000 +261,SEPERABLE,5,0.011718580964952707,1000,8000,1000 +262,SEPERABLE,5,0.012862669886089861,1000,8000,1000 +263,SEPERABLE,5,0.012612455990165472,1000,8000,1000 +264,SEPERABLE,5,0.012309227022342384,1000,8000,1000 +265,SEPERABLE,5,0.012458633980713785,1000,8000,1000 +266,SEPERABLE,5,0.012890069978311658,1000,8000,1000 +267,SEPERABLE,5,0.015189129975624382,1000,8000,1000 +268,SEPERABLE,5,0.016673500067554414,1000,8000,1000 +269,SEPERABLE,5,0.01749087404459715,1000,8000,1000 +270,CHOLESKY,5,0.01591278100386262,1000,8000,1000 +271,CHOLESKY,5,0.013935594004578888,1000,8000,1000 +272,CHOLESKY,5,0.015243762987665832,1000,8000,1000 +273,CHOLESKY,5,0.014468909008428454,1000,8000,1000 +274,CHOLESKY,5,0.01765754201915115,1000,8000,1000 +275,CHOLESKY,5,0.020126167102716863,1000,8000,1000 +276,CHOLESKY,5,0.0136478659696877,1000,8000,1000 +277,CHOLESKY,5,0.013343982980586588,1000,8000,1000 +278,CHOLESKY,5,0.013175180065445602,1000,8000,1000 +279,CHOLESKY,5,0.014289708924479783,1000,8000,1000 +280,CHOLESKY,5,0.013196449028328061,1000,8000,1000 +281,CHOLESKY,5,0.015757610090076923,1000,8000,1000 +282,CHOLESKY,5,0.01886829803697765,1000,8000,1000 +283,CHOLESKY,5,0.0129279870307073,1000,8000,1000 +284,CHOLESKY,5,0.013196915038861334,1000,8000,1000 +285,CMSA,5,0.013583144987933338,1000,8000,1000 +286,CMSA,5,0.012786183971911669,1000,8000,1000 +287,CMSA,5,0.012807710096240044,1000,8000,1000 +288,CMSA,5,0.012978086015209556,1000,8000,1000 +289,CMSA,5,0.013025904074311256,1000,8000,1000 +290,CMSA,5,0.016044765012338758,1000,8000,1000 +291,CMSA,5,0.017614355077967048,1000,8000,1000 +292,CMSA,5,0.01355104404501617,1000,8000,1000 +293,CMSA,5,0.012836926034651697,1000,8000,1000 +294,CMSA,5,0.01458206307142973,1000,8000,1000 +295,CMSA,5,0.013406955054961145,1000,8000,1000 +296,CMSA,5,0.012871264945715666,1000,8000,1000 +297,CMSA,5,0.012683688895776868,1000,8000,1000 +298,CMSA,5,0.013586043962277472,1000,8000,1000 +299,CMSA,5,0.013987243990413845,1000,8000,1000 +300,NATURAL_GRADIENT,5,0.0142219930421561,1000,8000,1000 +301,NATURAL_GRADIENT,5,0.013994620996527374,1000,8000,1000 +302,NATURAL_GRADIENT,5,0.013727606041356921,1000,8000,1000 +303,NATURAL_GRADIENT,5,0.013687250087969005,1000,8000,1000 +304,NATURAL_GRADIENT,5,0.013794710976071656,1000,8000,1000 +305,NATURAL_GRADIENT,5,0.014601532951928675,1000,8000,1000 +306,NATURAL_GRADIENT,5,0.013539204024709761,1000,8000,1000 +307,NATURAL_GRADIENT,5,0.015167184988968074,1000,8000,1000 +308,NATURAL_GRADIENT,5,0.015508531010709703,1000,8000,1000 +309,NATURAL_GRADIENT,5,0.014381531975232065,1000,8000,1000 +310,NATURAL_GRADIENT,5,0.013700257055461407,1000,8000,1000 +311,NATURAL_GRADIENT,5,0.015230489079840481,1000,8000,1000 +312,NATURAL_GRADIENT,5,0.013924597995355725,1000,8000,1000 +313,NATURAL_GRADIENT,5,0.014006470912136137,1000,8000,1000 +314,NATURAL_GRADIENT,5,0.015192433027550578,1000,8000,1000 +315,COVARIANCE,10,0.02646021405234933,1000,10000,999 +316,COVARIANCE,10,0.026506139081902802,1000,10000,999 +317,COVARIANCE,10,0.030570200993679464,1000,10000,999 +318,COVARIANCE,10,0.026031696004793048,1000,10000,999 +319,COVARIANCE,10,0.026587684988044202,1000,10000,999 +320,COVARIANCE,10,0.02581032703164965,1000,10000,999 +321,COVARIANCE,10,0.025931526091881096,1000,10000,999 +322,COVARIANCE,10,0.026071295025758445,1000,10000,999 +323,COVARIANCE,10,0.026979148969985545,1000,10000,999 +324,COVARIANCE,10,0.026608676998876035,1000,10000,999 +325,COVARIANCE,10,0.027280688984319568,1000,10000,999 +326,COVARIANCE,10,0.032865333021618426,1000,10000,999 +327,COVARIANCE,10,0.033210864989086986,1000,10000,999 +328,COVARIANCE,10,0.026148759061470628,1000,10000,999 +329,COVARIANCE,10,0.02644397597759962,1000,10000,999 +330,NONE,10,0.026142479036934674,1000,10000,0 +331,NONE,10,0.023312255972996354,1000,10000,0 +332,NONE,10,0.02153112494852394,1000,10000,0 +333,NONE,10,0.019915197044610977,1000,10000,0 +334,NONE,10,0.020787537097930908,1000,10000,0 +335,NONE,10,0.020500303944572806,1000,10000,0 +336,NONE,10,0.020010669948533177,1000,10000,0 +337,NONE,10,0.020207438967190683,1000,10000,0 +338,NONE,10,0.020409882999956608,1000,10000,0 +339,NONE,10,0.020347998943179846,1000,10000,0 +340,NONE,10,0.02004936500452459,1000,10000,0 +341,NONE,10,0.020324033917859197,1000,10000,0 +342,NONE,10,0.020778672071173787,1000,10000,0 +343,NONE,10,0.020338958012871444,1000,10000,0 +344,NONE,10,0.02035613590851426,1000,10000,0 +345,MATRIX,10,0.021689401008188725,1000,10000,1000 +346,MATRIX,10,0.023073321906849742,1000,10000,1000 +347,MATRIX,10,0.02183206705376506,1000,10000,1000 +348,MATRIX,10,0.02124449994880706,1000,10000,1000 +349,MATRIX,10,0.021016963990405202,1000,10000,1000 +350,MATRIX,10,0.021939269034191966,1000,10000,1000 +351,MATRIX,10,0.02105342596769333,1000,10000,1000 +352,MATRIX,10,0.021470330888405442,1000,10000,1000 +353,MATRIX,10,0.02149055793415755,1000,10000,1000 +354,MATRIX,10,0.022337739006616175,1000,10000,1000 +355,MATRIX,10,0.0218155849725008,1000,10000,1000 +356,MATRIX,10,0.02140188799239695,1000,10000,1000 +357,MATRIX,10,0.021726403036154807,1000,10000,1000 +358,MATRIX,10,0.021874411962926388,1000,10000,1000 +359,MATRIX,10,0.021406320040114224,1000,10000,1000 +360,SEPERABLE,10,0.02098678902257234,1000,10000,1000 +361,SEPERABLE,10,0.021709173917770386,1000,10000,1000 +362,SEPERABLE,10,0.020785389002412558,1000,10000,1000 +363,SEPERABLE,10,0.022332094027660787,1000,10000,1000 +364,SEPERABLE,10,0.02077528601512313,1000,10000,1000 +365,SEPERABLE,10,0.022091167978942394,1000,10000,1000 +366,SEPERABLE,10,0.022214886965230107,1000,10000,1000 +367,SEPERABLE,10,0.021911683026701212,1000,10000,1000 +368,SEPERABLE,10,0.020935315056703985,1000,10000,1000 +369,SEPERABLE,10,0.021145976963452995,1000,10000,1000 +370,SEPERABLE,10,0.022041255957446992,1000,10000,1000 +371,SEPERABLE,10,0.02210756402928382,1000,10000,1000 +372,SEPERABLE,10,0.02128604508470744,1000,10000,1000 +373,SEPERABLE,10,0.02190954692196101,1000,10000,1000 +374,SEPERABLE,10,0.0227263419656083,1000,10000,1000 +375,CHOLESKY,10,0.023270328994840384,1000,10000,1000 +376,CHOLESKY,10,0.028422462986782193,1000,10000,1000 +377,CHOLESKY,10,0.02374368906021118,1000,10000,1000 +378,CHOLESKY,10,0.024753985926508904,1000,10000,1000 +379,CHOLESKY,10,0.024000969948247075,1000,10000,1000 +380,CHOLESKY,10,0.02398382592946291,1000,10000,1000 +381,CHOLESKY,10,0.024152382044121623,1000,10000,1000 +382,CHOLESKY,10,0.023522427072748542,1000,10000,1000 +383,CHOLESKY,10,0.023312310106121004,1000,10000,1000 +384,CHOLESKY,10,0.025083123007789254,1000,10000,1000 +385,CHOLESKY,10,0.023060551029630005,1000,10000,1000 +386,CHOLESKY,10,0.025752169080078602,1000,10000,1000 +387,CHOLESKY,10,0.02398148097563535,1000,10000,1000 +388,CHOLESKY,10,0.023427941952832043,1000,10000,1000 +389,CHOLESKY,10,0.022713371086865664,1000,10000,1000 +390,CMSA,10,0.02360872400458902,1000,10000,1000 +391,CMSA,10,0.02305560407694429,1000,10000,1000 +392,CMSA,10,0.022069173981435597,1000,10000,1000 +393,CMSA,10,0.02198529802262783,1000,10000,1000 +394,CMSA,10,0.02299428207334131,1000,10000,1000 +395,CMSA,10,0.022192512056790292,1000,10000,1000 +396,CMSA,10,0.023659919039346278,1000,10000,1000 +397,CMSA,10,0.021884319023229182,1000,10000,1000 +398,CMSA,10,0.022995195002295077,1000,10000,1000 +399,CMSA,10,0.022320396965369582,1000,10000,1000 +400,CMSA,10,0.022681621951051056,1000,10000,1000 +401,CMSA,10,0.024453732999973,1000,10000,1000 +402,CMSA,10,0.024230816983617842,1000,10000,1000 +403,CMSA,10,0.022984078037552536,1000,10000,1000 +404,CMSA,10,0.025092235067859292,1000,10000,1000 +405,NATURAL_GRADIENT,10,0.026609597960487008,1000,10000,1000 +406,NATURAL_GRADIENT,10,0.024411317077465355,1000,10000,1000 +407,NATURAL_GRADIENT,10,0.025311942910775542,1000,10000,1000 +408,NATURAL_GRADIENT,10,0.02531970595009625,1000,10000,1000 +409,NATURAL_GRADIENT,10,0.023980543948709965,1000,10000,1000 +410,NATURAL_GRADIENT,10,0.02638119296170771,1000,10000,1000 +411,NATURAL_GRADIENT,10,0.024598201038315892,1000,10000,1000 +412,NATURAL_GRADIENT,10,0.02702373603824526,1000,10000,1000 +413,NATURAL_GRADIENT,10,0.03637300501577556,1000,10000,1000 +414,NATURAL_GRADIENT,10,0.026139874942600727,1000,10000,1000 +415,NATURAL_GRADIENT,10,0.026732825092040002,1000,10000,1000 +416,NATURAL_GRADIENT,10,0.025479499017819762,1000,10000,1000 +417,NATURAL_GRADIENT,10,0.024942960939370096,1000,10000,1000 +418,NATURAL_GRADIENT,10,0.024945261073298752,1000,10000,1000 +419,NATURAL_GRADIENT,10,0.024341779062524438,1000,10000,1000 +420,COVARIANCE,20,0.05663040897343308,1000,12000,999 +421,COVARIANCE,20,0.05804439401254058,1000,12000,999 +422,COVARIANCE,20,0.05684079404454678,1000,12000,999 +423,COVARIANCE,20,0.05779927491676062,1000,12000,999 +424,COVARIANCE,20,0.05688915599603206,1000,12000,999 +425,COVARIANCE,20,0.05635134701151401,1000,12000,999 +426,COVARIANCE,20,0.06009372603148222,1000,12000,999 +427,COVARIANCE,20,0.0575709919212386,1000,12000,999 +428,COVARIANCE,20,0.05634799099061638,1000,12000,999 +429,COVARIANCE,20,0.06416844693012536,1000,12000,999 +430,COVARIANCE,20,0.06445557496044785,1000,12000,999 +431,COVARIANCE,20,0.058972291997633874,1000,12000,999 +432,COVARIANCE,20,0.06199114699847996,1000,12000,999 +433,COVARIANCE,20,0.060354843037202954,1000,12000,999 +434,COVARIANCE,20,0.05763062590267509,1000,12000,999 +435,NONE,20,0.041186148999258876,1000,12000,0 +436,NONE,20,0.04323339695110917,1000,12000,0 +437,NONE,20,0.04448738298378885,1000,12000,0 +438,NONE,20,0.04221319290809333,1000,12000,0 +439,NONE,20,0.046186724095605314,1000,12000,0 +440,NONE,20,0.04411709192208946,1000,12000,0 +441,NONE,20,0.048592316918075085,1000,12000,0 +442,NONE,20,0.04211354104336351,1000,12000,0 +443,NONE,20,0.0421360390027985,1000,12000,0 +444,NONE,20,0.04283862700685859,1000,12000,0 +445,NONE,20,0.040271615027450025,1000,12000,0 +446,NONE,20,0.040806045988574624,1000,12000,0 +447,NONE,20,0.039539886987768114,1000,12000,0 +448,NONE,20,0.04028276400640607,1000,12000,0 +449,NONE,20,0.039995725033804774,1000,12000,0 +450,MATRIX,20,0.042692004004493356,1000,12000,1000 +451,MATRIX,20,0.04034159204456955,1000,12000,1000 +452,MATRIX,20,0.040040911990217865,1000,12000,1000 +453,MATRIX,20,0.04211899801157415,1000,12000,1000 +454,MATRIX,20,0.04132400895468891,1000,12000,1000 +455,MATRIX,20,0.04105830693151802,1000,12000,1000 +456,MATRIX,20,0.043576746014878154,1000,12000,1000 +457,MATRIX,20,0.04218339896760881,1000,12000,1000 +458,MATRIX,20,0.04485260497312993,1000,12000,1000 +459,MATRIX,20,0.044892095029354095,1000,12000,1000 +460,MATRIX,20,0.040366081055253744,1000,12000,1000 +461,MATRIX,20,0.05335828801617026,1000,12000,1000 +462,MATRIX,20,0.04526419995818287,1000,12000,1000 +463,MATRIX,20,0.04195874999277294,1000,12000,1000 +464,MATRIX,20,0.04063175094779581,1000,12000,1000 +465,SEPERABLE,20,0.04028905008453876,1000,12000,1000 +466,SEPERABLE,20,0.03960494208149612,1000,12000,1000 +467,SEPERABLE,20,0.04044364904984832,1000,12000,1000 +468,SEPERABLE,20,0.04073040804360062,1000,12000,1000 +469,SEPERABLE,20,0.03874405298847705,1000,12000,1000 +470,SEPERABLE,20,0.04203873302321881,1000,12000,1000 +471,SEPERABLE,20,0.0410099501023069,1000,12000,1000 +472,SEPERABLE,20,0.03954295499715954,1000,12000,1000 +473,SEPERABLE,20,0.03965203301049769,1000,12000,1000 +474,SEPERABLE,20,0.040845189010724425,1000,12000,1000 +475,SEPERABLE,20,0.040395742980763316,1000,12000,1000 +476,SEPERABLE,20,0.04824292904231697,1000,12000,1000 +477,SEPERABLE,20,0.04523904505185783,1000,12000,1000 +478,SEPERABLE,20,0.03993581107351929,1000,12000,1000 +479,SEPERABLE,20,0.041130085941404104,1000,12000,1000 +480,CHOLESKY,20,0.04239601991139352,1000,12000,1000 +481,CHOLESKY,20,0.043892061919905245,1000,12000,1000 +482,CHOLESKY,20,0.04393823491409421,1000,12000,1000 +483,CHOLESKY,20,0.04203148907981813,1000,12000,1000 +484,CHOLESKY,20,0.043527727015316486,1000,12000,1000 +485,CHOLESKY,20,0.04888434195891023,1000,12000,1000 +486,CHOLESKY,20,0.04411291505675763,1000,12000,1000 +487,CHOLESKY,20,0.044060900923796,1000,12000,1000 +488,CHOLESKY,20,0.045655932975932956,1000,12000,1000 +489,CHOLESKY,20,0.045517346006818116,1000,12000,1000 +490,CHOLESKY,20,0.04509356897324324,1000,12000,1000 +491,CHOLESKY,20,0.04199582198634744,1000,12000,1000 +492,CHOLESKY,20,0.04464197193738073,1000,12000,1000 +493,CHOLESKY,20,0.04228949104435742,1000,12000,1000 +494,CHOLESKY,20,0.04241088090930134,1000,12000,1000 +495,CMSA,20,0.042818896006792784,1000,12000,1000 +496,CMSA,20,0.0414567650295794,1000,12000,1000 +497,CMSA,20,0.0419222810305655,1000,12000,1000 +498,CMSA,20,0.042064632987603545,1000,12000,1000 +499,CMSA,20,0.042399453930556774,1000,12000,1000 +500,CMSA,20,0.041201850050128996,1000,12000,1000 +501,CMSA,20,0.04237480799201876,1000,12000,1000 +502,CMSA,20,0.042923461995087564,1000,12000,1000 +503,CMSA,20,0.04206694196909666,1000,12000,1000 +504,CMSA,20,0.04177639493718743,1000,12000,1000 +505,CMSA,20,0.04314523201901466,1000,12000,1000 +506,CMSA,20,0.043620768934488297,1000,12000,1000 +507,CMSA,20,0.046250030980445445,1000,12000,1000 +508,CMSA,20,0.041995160980150104,1000,12000,1000 +509,CMSA,20,0.04235911904834211,1000,12000,1000 +510,NATURAL_GRADIENT,20,0.047634089016355574,1000,12000,1000 +511,NATURAL_GRADIENT,20,0.04758962104097009,1000,12000,1000 +512,NATURAL_GRADIENT,20,0.04883428895846009,1000,12000,1000 +513,NATURAL_GRADIENT,20,0.04804474499542266,1000,12000,1000 +514,NATURAL_GRADIENT,20,0.05021867493633181,1000,12000,1000 +515,NATURAL_GRADIENT,20,0.050932217040099204,1000,12000,1000 +516,NATURAL_GRADIENT,20,0.04771746799815446,1000,12000,1000 +517,NATURAL_GRADIENT,20,0.04774267098400742,1000,12000,1000 +518,NATURAL_GRADIENT,20,0.04860394599381834,1000,12000,1000 +519,NATURAL_GRADIENT,20,0.04786378296557814,1000,12000,1000 +520,NATURAL_GRADIENT,20,0.049401047988794744,1000,12000,1000 +521,NATURAL_GRADIENT,20,0.04789384198375046,1000,12000,1000 +522,NATURAL_GRADIENT,20,0.04774281906429678,1000,12000,1000 +523,NATURAL_GRADIENT,20,0.05020238598808646,1000,12000,1000 +524,NATURAL_GRADIENT,20,0.052489073015749454,1000,12000,1000 +525,COVARIANCE,40,0.1675972209777683,1000,15000,999 +526,COVARIANCE,40,0.16043446096591651,1000,15000,999 +527,COVARIANCE,40,0.15638411999680102,1000,15000,999 +528,COVARIANCE,40,0.16312111401930451,1000,15000,999 +529,COVARIANCE,40,0.18178791902028024,1000,15000,999 +530,COVARIANCE,40,0.17434375593438745,1000,15000,999 +531,COVARIANCE,40,0.21389896993059665,1000,15000,999 +532,COVARIANCE,40,0.16287107206881046,1000,15000,999 +533,COVARIANCE,40,0.17143149895127863,1000,15000,999 +534,COVARIANCE,40,0.17483992595225573,1000,15000,999 +535,COVARIANCE,40,0.16592577996198088,1000,15000,999 +536,COVARIANCE,40,0.15827252704184502,1000,15000,999 +537,COVARIANCE,40,0.16114346694666892,1000,15000,999 +538,COVARIANCE,40,0.1765812720404938,1000,15000,999 +539,COVARIANCE,40,0.1769464509561658,1000,15000,999 +540,NONE,40,0.08697210904210806,1000,15000,0 +541,NONE,40,0.09199973999056965,1000,15000,0 +542,NONE,40,0.09088940802030265,1000,15000,0 +543,NONE,40,0.08321637404151261,1000,15000,0 +544,NONE,40,0.089786836062558,1000,15000,0 +545,NONE,40,0.08508844405878335,1000,15000,0 +546,NONE,40,0.09177579206880182,1000,15000,0 +547,NONE,40,0.08549892401788384,1000,15000,0 +548,NONE,40,0.08642277703620493,1000,15000,0 +549,NONE,40,0.0811714290175587,1000,15000,0 +550,NONE,40,0.08188776392489672,1000,15000,0 +551,NONE,40,0.08215750497765839,1000,15000,0 +552,NONE,40,0.08106619596946985,1000,15000,0 +553,NONE,40,0.08134444698225707,1000,15000,0 +554,NONE,40,0.08120331901591271,1000,15000,0 +555,MATRIX,40,0.0869275500299409,1000,15000,1000 +556,MATRIX,40,0.08813001995440573,1000,15000,1000 +557,MATRIX,40,0.09130360104609281,1000,15000,1000 +558,MATRIX,40,0.08940214605536312,1000,15000,1000 +559,MATRIX,40,0.09071016090456396,1000,15000,1000 +560,MATRIX,40,0.09033105999697,1000,15000,1000 +561,MATRIX,40,0.08567406295333058,1000,15000,1000 +562,MATRIX,40,0.09444298199377954,1000,15000,1000 +563,MATRIX,40,0.09091241005808115,1000,15000,1000 +564,MATRIX,40,0.08982594998087734,1000,15000,1000 +565,MATRIX,40,0.08940630091819912,1000,15000,1000 +566,MATRIX,40,0.08877934806514531,1000,15000,1000 +567,MATRIX,40,0.09051082201767713,1000,15000,1000 +568,MATRIX,40,0.08908571989741176,1000,15000,1000 +569,MATRIX,40,0.09159162000287324,1000,15000,1000 +570,SEPERABLE,40,0.096538195037283,1000,15000,1000 +571,SEPERABLE,40,0.09407578292302787,1000,15000,1000 +572,SEPERABLE,40,0.0868315560510382,1000,15000,1000 +573,SEPERABLE,40,0.09575606102589518,1000,15000,1000 +574,SEPERABLE,40,0.10772285598795861,1000,15000,1000 +575,SEPERABLE,40,0.09179948293603957,1000,15000,1000 +576,SEPERABLE,40,0.0885527819627896,1000,15000,1000 +577,SEPERABLE,40,0.09804408194031566,1000,15000,1000 +578,SEPERABLE,40,0.09730524697806686,1000,15000,1000 +579,SEPERABLE,40,0.08641196903772652,1000,15000,1000 +580,SEPERABLE,40,0.09571024996694177,1000,15000,1000 +581,SEPERABLE,40,0.09272320696618408,1000,15000,1000 +582,SEPERABLE,40,0.08769402699545026,1000,15000,1000 +583,SEPERABLE,40,0.08529081102460623,1000,15000,1000 +584,SEPERABLE,40,0.089672771980986,1000,15000,1000 +585,CHOLESKY,40,0.09605615399777889,1000,15000,1000 +586,CHOLESKY,40,0.10219482600223273,1000,15000,1000 +587,CHOLESKY,40,0.10053233697544783,1000,15000,1000 +588,CHOLESKY,40,0.09804993390571326,1000,15000,1000 +589,CHOLESKY,40,0.11235227901488543,1000,15000,1000 +590,CHOLESKY,40,0.11050073604565114,1000,15000,1000 +591,CHOLESKY,40,0.11271644302178174,1000,15000,1000 +592,CHOLESKY,40,0.09965297195594758,1000,15000,1000 +593,CHOLESKY,40,0.09669331600889564,1000,15000,1000 +594,CHOLESKY,40,0.09179757395759225,1000,15000,1000 +595,CHOLESKY,40,0.09156222990714014,1000,15000,1000 +596,CHOLESKY,40,0.09298123803455383,1000,15000,1000 +597,CHOLESKY,40,0.09213207999709994,1000,15000,1000 +598,CHOLESKY,40,0.09316910896450281,1000,15000,1000 +599,CHOLESKY,40,0.08980873902328312,1000,15000,1000 +600,CMSA,40,0.09058202698361129,1000,15000,1000 +601,CMSA,40,0.09369755093939602,1000,15000,1000 +602,CMSA,40,0.09097614407073706,1000,15000,1000 +603,CMSA,40,0.08986321999691427,1000,15000,1000 +604,CMSA,40,0.09060414496343583,1000,15000,1000 +605,CMSA,40,0.08845468703657389,1000,15000,1000 +606,CMSA,40,0.09004924492910504,1000,15000,1000 +607,CMSA,40,0.09348341403529048,1000,15000,1000 +608,CMSA,40,0.09461610694415867,1000,15000,1000 +609,CMSA,40,0.09238660894334316,1000,15000,1000 +610,CMSA,40,0.09321941796224564,1000,15000,1000 +611,CMSA,40,0.09343193203676492,1000,15000,1000 +612,CMSA,40,0.10359540197532624,1000,15000,1000 +613,CMSA,40,0.09359396004583687,1000,15000,1000 +614,CMSA,40,0.09644168405793607,1000,15000,1000 +615,NATURAL_GRADIENT,40,0.12956285709515214,1000,15000,1000 +616,NATURAL_GRADIENT,40,0.12723773496691138,1000,15000,1000 +617,NATURAL_GRADIENT,40,0.12435409100726247,1000,15000,1000 +618,NATURAL_GRADIENT,40,0.11891290300991386,1000,15000,1000 +619,NATURAL_GRADIENT,40,0.12032928399275988,1000,15000,1000 +620,NATURAL_GRADIENT,40,0.11909963504876941,1000,15000,1000 +621,NATURAL_GRADIENT,40,0.11749967292416841,1000,15000,1000 +622,NATURAL_GRADIENT,40,0.11757511796895415,1000,15000,1000 +623,NATURAL_GRADIENT,40,0.11835824395529926,1000,15000,1000 +624,NATURAL_GRADIENT,40,0.11811929009854794,1000,15000,1000 +625,NATURAL_GRADIENT,40,0.12283511995337903,1000,15000,1000 +626,NATURAL_GRADIENT,40,0.12387334601953626,1000,15000,1000 +627,NATURAL_GRADIENT,40,0.12001913995482028,1000,15000,1000 +628,NATURAL_GRADIENT,40,0.12347487302031368,1000,15000,1000 +629,NATURAL_GRADIENT,40,0.12266464601270854,1000,15000,1000 +630,COVARIANCE,100,0.5211217780597508,1000,17000,499 +631,COVARIANCE,100,0.5314679120201617,1000,17000,499 +632,COVARIANCE,100,0.5446918380912393,1000,17000,499 +633,COVARIANCE,100,0.528357608942315,1000,17000,499 +634,COVARIANCE,100,0.5175769400084391,1000,17000,499 +635,COVARIANCE,100,0.5315654329024255,1000,17000,499 +636,COVARIANCE,100,0.5266426029847935,1000,17000,499 +637,COVARIANCE,100,0.5226456340169534,1000,17000,499 +638,COVARIANCE,100,0.524569017929025,1000,17000,499 +639,COVARIANCE,100,0.5283362639602274,1000,17000,499 +640,COVARIANCE,100,0.522508871043101,1000,17000,499 +641,COVARIANCE,100,0.527296525076963,1000,17000,499 +642,COVARIANCE,100,0.5208452979568392,1000,17000,499 +643,COVARIANCE,100,0.5243254129309207,1000,17000,499 +644,COVARIANCE,100,0.522555008996278,1000,17000,499 +645,NONE,100,0.2109276979463175,1000,17000,0 +646,NONE,100,0.21247282193508,1000,17000,0 +647,NONE,100,0.21186058898456395,1000,17000,0 +648,NONE,100,0.2114941308973357,1000,17000,0 +649,NONE,100,0.21010668494272977,1000,17000,0 +650,NONE,100,0.21113439498003572,1000,17000,0 +651,NONE,100,0.21051188500132412,1000,17000,0 +652,NONE,100,0.213297835085541,1000,17000,0 +653,NONE,100,0.20862715307157487,1000,17000,0 +654,NONE,100,0.2110031289048493,1000,17000,0 +655,NONE,100,0.21053398901131004,1000,17000,0 +656,NONE,100,0.2099848910002038,1000,17000,0 +657,NONE,100,0.22124319104477763,1000,17000,0 +658,NONE,100,0.2145822710590437,1000,17000,0 +659,NONE,100,0.2110465590376407,1000,17000,0 +660,MATRIX,100,0.23704137606546283,1000,17000,1000 +661,MATRIX,100,0.23524919501505792,1000,17000,1000 +662,MATRIX,100,0.2329338409472257,1000,17000,1000 +663,MATRIX,100,0.23686197504866868,1000,17000,1000 +664,MATRIX,100,0.23883276991546154,1000,17000,1000 +665,MATRIX,100,0.2362937149591744,1000,17000,1000 +666,MATRIX,100,0.23778917896561325,1000,17000,1000 +667,MATRIX,100,0.2368498359574005,1000,17000,1000 +668,MATRIX,100,0.2352685829391703,1000,17000,1000 +669,MATRIX,100,0.2460408869665116,1000,17000,1000 +670,MATRIX,100,0.2381373440148309,1000,17000,1000 +671,MATRIX,100,0.23527923598885536,1000,17000,1000 +672,MATRIX,100,0.23550567706115544,1000,17000,1000 +673,MATRIX,100,0.24027198203839362,1000,17000,1000 +674,MATRIX,100,0.23817343695554882,1000,17000,1000 +675,SEPERABLE,100,0.2158213339280337,1000,17000,1000 +676,SEPERABLE,100,0.21031490399036556,1000,17000,1000 +677,SEPERABLE,100,0.21054256800562143,1000,17000,1000 +678,SEPERABLE,100,0.2233401209814474,1000,17000,1000 +679,SEPERABLE,100,0.21396525099407881,1000,17000,1000 +680,SEPERABLE,100,0.2118780539603904,1000,17000,1000 +681,SEPERABLE,100,0.2181756099453196,1000,17000,1000 +682,SEPERABLE,100,0.2237374719697982,1000,17000,1000 +683,SEPERABLE,100,0.2114433899987489,1000,17000,1000 +684,SEPERABLE,100,0.2128131529316306,1000,17000,1000 +685,SEPERABLE,100,0.2128929119789973,1000,17000,1000 +686,SEPERABLE,100,0.216176851070486,1000,17000,1000 +687,SEPERABLE,100,0.21026831807103008,1000,17000,1000 +688,SEPERABLE,100,0.21981401194352657,1000,17000,1000 +689,SEPERABLE,100,0.20964805700350553,1000,17000,1000 +690,CHOLESKY,100,0.24717775802128017,1000,17000,1000 +691,CHOLESKY,100,0.24491507094353437,1000,17000,1000 +692,CHOLESKY,100,0.2505362560041249,1000,17000,1000 +693,CHOLESKY,100,0.24727875995449722,1000,17000,1000 +694,CHOLESKY,100,0.252864358946681,1000,17000,1000 +695,CHOLESKY,100,0.25051062600687146,1000,17000,1000 +696,CHOLESKY,100,0.2518366069998592,1000,17000,1000 +697,CHOLESKY,100,0.24684920499566942,1000,17000,1000 +698,CHOLESKY,100,0.25841602496802807,1000,17000,1000 +699,CHOLESKY,100,0.24653435696382076,1000,17000,1000 +700,CHOLESKY,100,0.2528309669578448,1000,17000,1000 +701,CHOLESKY,100,0.2489594160579145,1000,17000,1000 +702,CHOLESKY,100,0.2522715840023011,1000,17000,1000 +703,CHOLESKY,100,0.2496099309064448,1000,17000,1000 +704,CHOLESKY,100,0.2503205679822713,1000,17000,1000 +705,CMSA,100,0.263193121063523,1000,17000,1000 +706,CMSA,100,0.2612808699486777,1000,17000,1000 +707,CMSA,100,0.26326917891856283,1000,17000,1000 +708,CMSA,100,0.2617123970994726,1000,17000,1000 +709,CMSA,100,0.26589683000929654,1000,17000,1000 +710,CMSA,100,0.26546415605116636,1000,17000,1000 +711,CMSA,100,0.2668964349431917,1000,17000,1000 +712,CMSA,100,0.2622099219588563,1000,17000,1000 +713,CMSA,100,0.26335593895055354,1000,17000,1000 +714,CMSA,100,0.2604640210047364,1000,17000,1000 +715,CMSA,100,0.27353097195737064,1000,17000,1000 +716,CMSA,100,0.2617066280217841,1000,17000,1000 +717,CMSA,100,0.2606976419920102,1000,17000,1000 +718,CMSA,100,0.2571802199818194,1000,17000,1000 +719,CMSA,100,0.2643855910282582,1000,17000,1000 +720,NATURAL_GRADIENT,100,0.5453195869922638,1000,17000,1000 +721,NATURAL_GRADIENT,100,0.5430001160129905,1000,17000,1000 +722,NATURAL_GRADIENT,100,0.5564697699155658,1000,17000,1000 +723,NATURAL_GRADIENT,100,0.5524597280891612,1000,17000,1000 +724,NATURAL_GRADIENT,100,0.5426087530795485,1000,17000,1000 +725,NATURAL_GRADIENT,100,0.5383712169714272,1000,17000,1000 +726,NATURAL_GRADIENT,100,0.5553588750772178,1000,17000,1000 +727,NATURAL_GRADIENT,100,0.5388020679820329,1000,17000,1000 +728,NATURAL_GRADIENT,100,0.5380138710606843,1000,17000,1000 +729,NATURAL_GRADIENT,100,0.5457986389519647,1000,17000,1000 +730,NATURAL_GRADIENT,100,0.5440328880213201,1000,17000,1000 +731,NATURAL_GRADIENT,100,0.5527954500867054,1000,17000,1000 +732,NATURAL_GRADIENT,100,0.5431316379690543,1000,17000,1000 +733,NATURAL_GRADIENT,100,0.546943218098022,1000,17000,1000 +734,NATURAL_GRADIENT,100,0.5557161860633641,1000,17000,1000 +735,COVARIANCE,200,1.6276547430315986,1000,19000,333 +736,COVARIANCE,200,1.6344481399282813,1000,19000,333 +737,COVARIANCE,200,1.63722063601017,1000,19000,333 +738,COVARIANCE,200,1.6553048150381073,1000,19000,333 +739,COVARIANCE,200,1.6293435350526124,1000,19000,333 +740,COVARIANCE,200,1.6443297719815746,1000,19000,333 +741,COVARIANCE,200,1.623908229987137,1000,19000,333 +742,COVARIANCE,200,1.6348585389787331,1000,19000,333 +743,COVARIANCE,200,1.63553986500483,1000,19000,333 +744,COVARIANCE,200,1.621558653889224,1000,19000,333 +745,COVARIANCE,200,1.636688922997564,1000,19000,333 +746,COVARIANCE,200,1.6443772970233113,1000,19000,333 +747,COVARIANCE,200,1.6384419089881703,1000,19000,333 +748,COVARIANCE,200,1.6987084581051022,1000,19000,333 +749,COVARIANCE,200,1.6260202279081568,1000,19000,333 +750,NONE,200,0.4692869740538299,1000,19000,0 +751,NONE,200,0.4566295159747824,1000,19000,0 +752,NONE,200,0.4682580929948017,1000,19000,0 +753,NONE,200,0.46691234107129276,1000,19000,0 +754,NONE,200,0.4653782460372895,1000,19000,0 +755,NONE,200,0.4646468689898029,1000,19000,0 +756,NONE,200,0.46321476297453046,1000,19000,0 +757,NONE,200,0.4834733019815758,1000,19000,0 +758,NONE,200,0.4723127739271149,1000,19000,0 +759,NONE,200,0.46473444905132055,1000,19000,0 +760,NONE,200,0.46309744007885456,1000,19000,0 +761,NONE,200,0.4582522918935865,1000,19000,0 +762,NONE,200,0.46414424607064575,1000,19000,0 +763,NONE,200,0.4647767199203372,1000,19000,0 +764,NONE,200,0.46498104301281273,1000,19000,0 +765,MATRIX,200,0.553402567980811,1000,19000,1000 +766,MATRIX,200,0.5638177460059524,1000,19000,1000 +767,MATRIX,200,0.5673174570547417,1000,19000,1000 +768,MATRIX,200,0.5543926510727033,1000,19000,1000 +769,MATRIX,200,0.5660771699622273,1000,19000,1000 +770,MATRIX,200,0.5810745960334316,1000,19000,1000 +771,MATRIX,200,0.5607948990073055,1000,19000,1000 +772,MATRIX,200,0.5529277870664373,1000,19000,1000 +773,MATRIX,200,0.5622194370953366,1000,19000,1000 +774,MATRIX,200,0.5811033649370074,1000,19000,1000 +775,MATRIX,200,0.5848764290567487,1000,19000,1000 +776,MATRIX,200,0.5641269140178338,1000,19000,1000 +777,MATRIX,200,0.5894517180277035,1000,19000,1000 +778,MATRIX,200,0.6198483309708536,1000,19000,1000 +779,MATRIX,200,0.6178910969756544,1000,19000,1000 +780,SEPERABLE,200,0.5133765629725531,1000,19000,1000 +781,SEPERABLE,200,0.5419201460899785,1000,19000,1000 +782,SEPERABLE,200,0.530735783977434,1000,19000,1000 +783,SEPERABLE,200,0.5259685689816251,1000,19000,1000 +784,SEPERABLE,200,0.4948198290076107,1000,19000,1000 +785,SEPERABLE,200,0.5006042210152373,1000,19000,1000 +786,SEPERABLE,200,0.503253011032939,1000,19000,1000 +787,SEPERABLE,200,0.48653370304964483,1000,19000,1000 +788,SEPERABLE,200,0.5016892519779503,1000,19000,1000 +789,SEPERABLE,200,0.49153020698577166,1000,19000,1000 +790,SEPERABLE,200,0.483528058975935,1000,19000,1000 +791,SEPERABLE,200,0.5047821799525991,1000,19000,1000 +792,SEPERABLE,200,0.492081810021773,1000,19000,1000 +793,SEPERABLE,200,0.49905854591634125,1000,19000,1000 +794,SEPERABLE,200,0.508737109019421,1000,19000,1000 +795,CHOLESKY,200,0.6297146680299193,1000,19000,1000 +796,CHOLESKY,200,0.6349607160082087,1000,19000,1000 +797,CHOLESKY,200,0.6208933800226077,1000,19000,1000 +798,CHOLESKY,200,0.6183558009797707,1000,19000,1000 +799,CHOLESKY,200,0.6260549139697105,1000,19000,1000 +800,CHOLESKY,200,0.6709181079640985,1000,19000,1000 +801,CHOLESKY,200,0.617852235911414,1000,19000,1000 +802,CHOLESKY,200,0.7612487559672445,1000,19000,1000 +803,CHOLESKY,200,0.5997590960469097,1000,19000,1000 +804,CHOLESKY,200,0.606690707965754,1000,19000,1000 +805,CHOLESKY,200,0.6092035699402913,1000,19000,1000 +806,CHOLESKY,200,0.59141086100135,1000,19000,1000 +807,CHOLESKY,200,0.6070447789970785,1000,19000,1000 +808,CHOLESKY,200,0.5920320670120418,1000,19000,1000 +809,CHOLESKY,200,0.5944536890601739,1000,19000,1000 +810,CMSA,200,0.6936379040125757,1000,19000,1000 +811,CMSA,200,0.7023264149902388,1000,19000,1000 +812,CMSA,200,0.7197850820375606,1000,19000,1000 +813,CMSA,200,0.7444680549670011,1000,19000,1000 +814,CMSA,200,0.6864154509967193,1000,19000,1000 +815,CMSA,200,0.6865132909733802,1000,19000,1000 +816,CMSA,200,0.6944583510048687,1000,19000,1000 +817,CMSA,200,0.6867337230360135,1000,19000,1000 +818,CMSA,200,0.6870940949302167,1000,19000,1000 +819,CMSA,200,0.6885201369877905,1000,19000,1000 +820,CMSA,200,0.6869315609801561,1000,19000,1000 +821,CMSA,200,0.689428644021973,1000,19000,1000 +822,CMSA,200,0.6869403399759904,1000,19000,1000 +823,CMSA,200,0.6905757440254092,1000,19000,1000 +824,CMSA,200,0.6857497419696301,1000,19000,1000 +825,NATURAL_GRADIENT,200,3.223176314961165,1000,19000,1000 +826,NATURAL_GRADIENT,200,3.2235711570829153,1000,19000,1000 +827,NATURAL_GRADIENT,200,3.316529590054415,1000,19000,1000 +828,NATURAL_GRADIENT,200,3.638314520008862,1000,19000,1000 +829,NATURAL_GRADIENT,200,3.562994733103551,1000,19000,1000 +830,NATURAL_GRADIENT,200,3.797183094895445,1000,19000,1000 +831,NATURAL_GRADIENT,200,3.6099800880765542,1000,19000,1000 +832,NATURAL_GRADIENT,200,3.4373397750314325,1000,19000,1000 +833,NATURAL_GRADIENT,200,3.477735339081846,1000,19000,1000 +834,NATURAL_GRADIENT,200,3.5212426621001214,1000,19000,1000 +835,NATURAL_GRADIENT,200,3.3026527389883995,1000,19000,1000 +836,NATURAL_GRADIENT,200,3.6445402359822765,1000,19000,1000 +837,NATURAL_GRADIENT,200,3.4501612439053133,1000,19000,1000 +838,NATURAL_GRADIENT,200,3.63628202106338,1000,19000,1000 +839,NATURAL_GRADIENT,200,3.7623002079781145,1000,19000,1000 +840,COVARIANCE,500,14.078986536012962,1000,22000,199 +841,COVARIANCE,500,14.534440309042111,1000,22000,199 +842,COVARIANCE,500,14.254128084052354,1000,22000,199 +843,COVARIANCE,500,14.71426975808572,1000,22000,199 +844,COVARIANCE,500,14.134189796051942,1000,22000,199 +845,COVARIANCE,500,14.452294233022258,1000,22000,199 +846,COVARIANCE,500,14.392810052959248,1000,22000,199 +847,COVARIANCE,500,14.725059842923656,1000,22000,199 +848,COVARIANCE,500,14.527420224971138,1000,22000,199 +849,COVARIANCE,500,14.20702896406874,1000,22000,199 +850,COVARIANCE,500,14.444894767948426,1000,22000,199 +851,COVARIANCE,500,14.207684737979434,1000,22000,199 +852,COVARIANCE,500,14.352411855012178,1000,22000,199 +853,COVARIANCE,500,14.379097484052181,1000,22000,199 +854,COVARIANCE,500,14.395583431934938,1000,22000,199 +855,NONE,500,1.303843789966777,1000,22000,0 +856,NONE,500,1.303575879894197,1000,22000,0 +857,NONE,500,1.3555413220310584,1000,22000,0 +858,NONE,500,1.3403931179782376,1000,22000,0 +859,NONE,500,1.387032444938086,1000,22000,0 +860,NONE,500,1.4275234770029783,1000,22000,0 +861,NONE,500,1.4356084479950368,1000,22000,0 +862,NONE,500,1.3811395799275488,1000,22000,0 +863,NONE,500,1.3333965230267495,1000,22000,0 +864,NONE,500,1.3404661540407687,1000,22000,0 +865,NONE,500,1.3595061210216954,1000,22000,0 +866,NONE,500,1.3644251270452514,1000,22000,0 +867,NONE,500,1.387764903018251,1000,22000,0 +868,NONE,500,1.300863430951722,1000,22000,0 +869,NONE,500,1.3577587399631739,1000,22000,0 +870,MATRIX,500,2.731360023957677,1000,22000,1000 +871,MATRIX,500,2.563822065014392,1000,22000,1000 +872,MATRIX,500,2.6090825819410384,1000,22000,1000 +873,MATRIX,500,2.796115812030621,1000,22000,1000 +874,MATRIX,500,2.416418357985094,1000,22000,1000 +875,MATRIX,500,2.5366605130257085,1000,22000,1000 +876,MATRIX,500,2.5299156439723447,1000,22000,1000 +877,MATRIX,500,2.5075457129860297,1000,22000,1000 +878,MATRIX,500,2.548185770981945,1000,22000,1000 +879,MATRIX,500,2.6298770499415696,1000,22000,1000 +880,MATRIX,500,2.401932706939988,1000,22000,1000 +881,MATRIX,500,2.436237054062076,1000,22000,1000 +882,MATRIX,500,2.4190751609858125,1000,22000,1000 +883,MATRIX,500,2.4375183060765266,1000,22000,1000 +884,MATRIX,500,2.423400452011265,1000,22000,1000 +885,SEPERABLE,500,1.3044281820766628,1000,21999,1000 +886,SEPERABLE,500,1.4445769659942016,1000,22000,1000 +887,SEPERABLE,500,1.418783736997284,1000,22000,1000 +888,SEPERABLE,500,1.5200424740323797,1000,22000,1000 +889,SEPERABLE,500,1.3735109709668905,1000,22000,1000 +890,SEPERABLE,500,1.3748439530609176,1000,22000,1000 +891,SEPERABLE,500,1.3827809250215068,1000,22000,1000 +892,SEPERABLE,500,1.3842455859994516,1000,22000,1000 +893,SEPERABLE,500,1.434626773931086,1000,22000,1000 +894,SEPERABLE,500,1.395911280065775,1000,22000,1000 +895,SEPERABLE,500,1.3463214469375089,1000,22000,1000 +896,SEPERABLE,500,1.3323092070640996,1000,22000,1000 +897,SEPERABLE,500,1.2955966120352969,1000,22000,1000 +898,SEPERABLE,500,1.3127076670061797,1000,22000,1000 +899,SEPERABLE,500,1.3200665860204026,1000,22000,1000 +900,CHOLESKY,500,2.91848057101015,1000,22000,1000 +901,CHOLESKY,500,2.9723374990280718,1000,22000,1000 +902,CHOLESKY,500,2.9197278500068933,1000,22000,1000 +903,CHOLESKY,500,2.950242604012601,1000,22000,1000 +904,CHOLESKY,500,2.9818693900015205,1000,22000,1000 +905,CHOLESKY,500,2.9399515700060874,1000,22000,1000 +906,CHOLESKY,500,2.9501226210268214,1000,22000,1000 +907,CHOLESKY,500,2.945257848012261,1000,22000,1000 +908,CHOLESKY,500,2.926275947014801,1000,22000,1000 +909,CHOLESKY,500,3.0684031769633293,1000,22000,1000 +910,CHOLESKY,500,3.1267793219303712,1000,22000,1000 +911,CHOLESKY,500,3.0500639299862087,1000,22000,1000 +912,CHOLESKY,500,2.9512416240759194,1000,22000,1000 +913,CHOLESKY,500,3.005105872056447,1000,22000,1000 +914,CHOLESKY,500,2.9517421280033886,1000,22000,1000 +915,CMSA,500,3.8422902560560033,1000,22000,1000 +916,CMSA,500,3.8552295829867944,1000,22000,1000 +917,CMSA,500,3.848688303027302,1000,22000,1000 +918,CMSA,500,3.8993544159457088,1000,22000,1000 +919,CMSA,500,4.034882644074969,1000,22000,1000 +920,CMSA,500,3.9169793620239943,1000,22000,1000 +921,CMSA,500,3.830499325064011,1000,22000,1000 +922,CMSA,500,3.8527399640297517,1000,22000,1000 +923,CMSA,500,3.9175846149446443,1000,22000,1000 +924,CMSA,500,3.9189656289527193,1000,22000,1000 +925,CMSA,500,3.8670819589169696,1000,22000,1000 +926,CMSA,500,3.8485843129456043,1000,22000,1000 +927,CMSA,500,3.8821012030821294,1000,22000,1000 +928,CMSA,500,3.868274135980755,1000,22000,1000 +929,CMSA,500,4.044376590987667,1000,22000,1000 +930,NATURAL_GRADIENT,500,36.326034875935875,1000,22000,1000 +931,NATURAL_GRADIENT,500,36.249864930985495,1000,22000,1000 +932,NATURAL_GRADIENT,500,36.03903782297857,1000,22000,1000 +933,NATURAL_GRADIENT,500,36.26341410295572,1000,22000,1000 +934,NATURAL_GRADIENT,500,36.12294864805881,1000,22000,1000 +935,NATURAL_GRADIENT,500,36.02733282500412,1000,22000,1000 +936,NATURAL_GRADIENT,500,35.307877535931766,1000,22000,1000 +937,NATURAL_GRADIENT,500,35.51227333792485,1000,22000,1000 +938,NATURAL_GRADIENT,500,35.31757023499813,1000,22000,1000 +939,NATURAL_GRADIENT,500,35.475808109971695,1000,22000,1000 +940,NATURAL_GRADIENT,500,35.78349867509678,1000,22000,1000 +941,NATURAL_GRADIENT,500,36.542923270957544,1000,22000,1000 +942,NATURAL_GRADIENT,500,36.924086757935584,1000,22000,1000 +943,NATURAL_GRADIENT,500,36.7024204400368,1000,22000,1000 +944,NATURAL_GRADIENT,500,37.7687555559678,1000,22000,1000 +945,COVARIANCE,1000,58.41599029803183,1000,24000,111 +946,COVARIANCE,1000,59.43219360790681,1000,24000,111 +947,COVARIANCE,1000,59.10424721299205,1000,24000,111 +948,COVARIANCE,1000,60.209893194027245,1000,24000,111 +949,COVARIANCE,1000,59.25614745600615,1000,24000,111 +950,COVARIANCE,1000,59.34579649101943,1000,24000,111 +951,COVARIANCE,1000,60.38034182798583,1000,24000,111 +952,COVARIANCE,1000,58.56632693496067,1000,24000,111 +953,COVARIANCE,1000,60.18430048401933,1000,24000,111 +954,COVARIANCE,1000,58.86595774209127,1000,24000,111 +955,COVARIANCE,1000,59.60145337798167,1000,24000,111 +956,COVARIANCE,1000,60.68191452592146,1000,24000,111 +957,COVARIANCE,1000,60.172513255965896,1000,24000,111 +958,COVARIANCE,1000,59.78943317499943,1000,24000,111 +959,COVARIANCE,1000,59.253135920036584,1000,24000,111 +960,NONE,1000,2.970417827949859,1000,24000,0 +961,NONE,1000,2.868519638082944,1000,24000,0 +962,NONE,1000,2.8360858260421082,1000,24000,0 +963,NONE,1000,2.8325120210647583,1000,24000,0 +964,NONE,1000,3.029673565994017,1000,24000,0 +965,NONE,1000,2.926325927954167,1000,24000,0 +966,NONE,1000,2.8882733390200883,1000,24000,0 +967,NONE,1000,2.896614274010062,1000,24000,0 +968,NONE,1000,2.882313578040339,1000,24000,0 +969,NONE,1000,2.932769438950345,1000,24000,0 +970,NONE,1000,2.8500659170094877,1000,24000,0 +971,NONE,1000,2.9075663780095056,1000,24000,0 +972,NONE,1000,3.0318679090123624,1000,24000,0 +973,NONE,1000,2.9588805930688977,1000,24000,0 +974,NONE,1000,2.9434033109573647,1000,24000,0 +975,MATRIX,1000,8.722839269903488,1000,24000,1000 +976,MATRIX,1000,8.956942931981757,1000,24000,1000 +977,MATRIX,1000,9.040498326998204,1000,24000,1000 +978,MATRIX,1000,8.78873322403524,1000,24000,1000 +979,MATRIX,1000,9.053516569081694,1000,24000,1000 +980,MATRIX,1000,8.608920320053585,1000,24000,1000 +981,MATRIX,1000,8.444880965980701,1000,24000,1000 +982,MATRIX,1000,8.355265622958541,1000,24000,1000 +983,MATRIX,1000,8.63593386602588,1000,24000,1000 +984,MATRIX,1000,8.697479702997953,1000,24000,1000 +985,MATRIX,1000,8.6327155160252,1000,24000,1000 +986,MATRIX,1000,8.713702499051578,1000,24000,1000 +987,MATRIX,1000,8.604626915999688,1000,24000,1000 +988,MATRIX,1000,8.746140907984227,1000,24000,1000 +989,MATRIX,1000,8.58431101392489,1000,24000,1000 +990,SEPERABLE,1000,2.86790823796764,1000,24000,1000 +991,SEPERABLE,1000,2.8567130259471014,1000,24000,1000 +992,SEPERABLE,1000,2.873720269999467,1000,24000,1000 +993,SEPERABLE,1000,2.806458811974153,1000,24000,1000 +994,SEPERABLE,1000,3.05638324492611,1000,24000,1000 +995,SEPERABLE,1000,2.9519467919599265,1000,24000,1000 +996,SEPERABLE,1000,2.947914649033919,1000,24000,1000 +997,SEPERABLE,1000,2.905447002965957,1000,24000,1000 +998,SEPERABLE,1000,2.850440012058243,1000,24000,1000 +999,SEPERABLE,1000,2.868557351990603,1000,24000,1000 +1000,SEPERABLE,1000,2.83484008803498,1000,24000,1000 +1001,SEPERABLE,1000,2.8840317799476907,1000,24000,1000 +1002,SEPERABLE,1000,2.8891381700523198,1000,24000,1000 +1003,SEPERABLE,1000,2.8496195019688457,1000,24000,1000 +1004,SEPERABLE,1000,2.8594833789393306,1000,24000,1000 +1005,CHOLESKY,1000,10.847379466984421,1000,24000,1000 +1006,CHOLESKY,1000,10.738492990029044,1000,24000,1000 +1007,CHOLESKY,1000,10.828010249999352,1000,24000,1000 +1008,CHOLESKY,1000,10.908579673036002,1000,24000,1000 +1009,CHOLESKY,1000,10.820064888917841,1000,24000,1000 +1010,CHOLESKY,1000,10.743897247943096,1000,24000,1000 +1011,CHOLESKY,1000,10.737442129990086,1000,24000,1000 +1012,CHOLESKY,1000,10.78281937702559,1000,24000,1000 +1013,CHOLESKY,1000,10.807967379922047,1000,24000,1000 +1014,CHOLESKY,1000,10.611018877010792,1000,24000,1000 +1015,CHOLESKY,1000,10.75385661306791,1000,24000,1000 +1016,CHOLESKY,1000,10.823747992049903,1000,24000,1000 +1017,CHOLESKY,1000,10.966242205933668,1000,24000,1000 +1018,CHOLESKY,1000,10.957469076034613,1000,24000,1000 +1019,CHOLESKY,1000,10.882842833991162,1000,24000,1000 +1020,CMSA,1000,18.48167527699843,1000,24000,1000 +1021,CMSA,1000,18.591479412047192,1000,24000,1000 +1022,CMSA,1000,18.40754616900813,1000,24000,1000 +1023,CMSA,1000,18.75137915101368,1000,24000,1000 +1024,CMSA,1000,18.366487544961274,1000,24000,1000 +1025,CMSA,1000,18.459502613986842,1000,24000,1000 +1026,CMSA,1000,18.6481174119981,1000,24000,1000 +1027,CMSA,1000,18.72227796795778,1000,24000,1000 +1028,CMSA,1000,18.478381866938435,1000,24000,1000 +1029,CMSA,1000,18.422618427081034,1000,24000,1000 +1030,CMSA,1000,18.526153668994084,1000,24000,1000 +1031,CMSA,1000,18.63221833796706,1000,24000,1000 +1032,CMSA,1000,18.398435153067112,1000,24000,1000 +1033,CMSA,1000,18.71902596007567,1000,24000,1000 +1034,CMSA,1000,18.42646432307083,1000,24000,1000 +1035,NATURAL_GRADIENT,1000,245.23001990607008,1000,23999,1000 +1036,NATURAL_GRADIENT,1000,243.94041941408068,1000,24000,1000 +1037,NATURAL_GRADIENT,1000,244.42857773799915,1000,24000,1000 +1038,NATURAL_GRADIENT,1000,243.97754139103927,1000,24000,1000 +1039,NATURAL_GRADIENT,1000,245.36159782588948,1000,24000,1000 +1040,NATURAL_GRADIENT,1000,245.4515311169671,1000,24000,1000 +1041,NATURAL_GRADIENT,1000,245.86246696009766,1000,24000,1000 +1042,NATURAL_GRADIENT,1000,245.33539306302555,1000,24000,1000 +1043,NATURAL_GRADIENT,1000,246.77985070098657,1000,24000,1000 +1044,NATURAL_GRADIENT,1000,245.47880328900646,1000,24000,1000 +1045,NATURAL_GRADIENT,1000,246.41918917105068,1000,24000,1000 +1046,NATURAL_GRADIENT,1000,245.33992688904982,1000,24000,1000 +1047,NATURAL_GRADIENT,1000,246.63648926699534,1000,24000,1000 +1048,NATURAL_GRADIENT,1000,244.97171986103058,1000,24000,1000 +1049,NATURAL_GRADIENT,1000,246.18604666204192,1000,24000,1000 diff --git a/scripts/matrix/time_stats_pycma.csv b/scripts/matrix/time_stats_pycma.csv new file mode 100644 index 0000000..aaf0fb4 --- /dev/null +++ b/scripts/matrix/time_stats_pycma.csv @@ -0,0 +1,151 @@ +,method,dim,time,n_gen,n_evals,n_updates +0,pycma,2,0.37522817799981567,1000,6000,999 +1,pycma,2,0.35026270899834344,1000,6000,999 +2,pycma,2,0.38061815299806767,1000,6000,999 +3,pycma,2,0.371050321002258,1000,6000,999 +4,pycma,2,0.3834923089998483,1000,6000,999 +5,pycma,2,0.38832244299919694,1000,6000,999 +6,pycma,2,0.3609898909999174,1000,6000,999 +7,pycma,2,0.3540546139993239,1000,6000,999 +8,pycma,2,0.35726390200215974,1000,6000,999 +9,pycma,2,0.37336513600166654,1000,6000,999 +10,pycma,2,0.3485748130005959,1000,6000,999 +11,pycma,2,0.3616545689983468,1000,6000,999 +12,pycma,2,0.3680542699985381,1000,6000,999 +13,pycma,2,0.4078554309999163,1000,6000,999 +14,pycma,2,0.39954378999755136,1000,6000,999 +15,pycma,3,0.4218292980003753,1000,7000,999 +16,pycma,3,0.3625786029988376,1000,7000,999 +17,pycma,3,0.38367819099948974,1000,7000,999 +18,pycma,3,0.3894981869998446,1000,7000,999 +19,pycma,3,0.4074977750024118,1000,7000,999 +20,pycma,3,0.3648669880021771,1000,7000,999 +21,pycma,3,0.3805558249987371,1000,7000,999 +22,pycma,3,0.3384024529987073,1000,7000,999 +23,pycma,3,0.3313572570004908,1000,7000,999 +24,pycma,3,0.3242036880001251,1000,7000,999 +25,pycma,3,0.3904495400020096,1000,7000,999 +26,pycma,3,0.41853813499983517,1000,7000,999 +27,pycma,3,0.3413846920011565,1000,7000,999 +28,pycma,3,0.35553036000055727,1000,7000,999 +29,pycma,3,0.36115081800016924,1000,7000,999 +30,pycma,5,0.38601215000016964,1000,8000,999 +31,pycma,5,0.37565997799902107,1000,8000,999 +32,pycma,5,0.48298051999881864,1000,8000,999 +33,pycma,5,0.3780222640016291,1000,8000,999 +34,pycma,5,0.3550963449997653,1000,8000,999 +35,pycma,5,0.4183996469982958,1000,8000,999 +36,pycma,5,0.38270922800074914,1000,8000,999 +37,pycma,5,0.3744809140007419,1000,8000,999 +38,pycma,5,0.36361514200325473,1000,8000,999 +39,pycma,5,0.3987166539991449,1000,8000,999 +40,pycma,5,0.3683125730021857,1000,8000,999 +41,pycma,5,0.3756415770003514,1000,8000,999 +42,pycma,5,0.403191812998557,1000,8000,999 +43,pycma,5,0.35686238199923537,1000,8000,999 +44,pycma,5,0.35758409399932134,1000,8000,999 +45,pycma,10,0.39849778799907654,1000,10000,999 +46,pycma,10,0.4263918309989094,1000,10000,999 +47,pycma,10,0.4185187240000232,1000,10000,999 +48,pycma,10,0.3943962539997301,1000,10000,999 +49,pycma,10,0.4023051649965055,1000,10000,999 +50,pycma,10,0.414724254002067,1000,10000,999 +51,pycma,10,0.419106314999226,1000,10000,999 +52,pycma,10,0.44850920799945015,1000,10000,999 +53,pycma,10,0.4095437670002866,1000,10000,999 +54,pycma,10,0.4093704320002871,1000,10000,999 +55,pycma,10,0.41309320099753677,1000,10000,999 +56,pycma,10,0.42540818300039973,1000,10000,999 +57,pycma,10,0.4208158609981183,1000,10000,999 +58,pycma,10,0.47719614199741045,1000,10000,999 +59,pycma,10,0.3971106390017667,1000,10000,999 +60,pycma,20,0.5163924760017835,1000,12000,999 +61,pycma,20,0.4847462239995366,1000,12000,999 +62,pycma,20,0.492077203001827,1000,12000,999 +63,pycma,20,0.4922437660025025,1000,12000,999 +64,pycma,20,0.48513822200038703,1000,12000,999 +65,pycma,20,0.5000314070020977,1000,12000,999 +66,pycma,20,0.519929524998588,1000,12000,999 +67,pycma,20,0.5026444150025782,1000,12000,999 +68,pycma,20,0.49061895799968624,1000,12000,999 +69,pycma,20,0.4971232220013917,1000,12000,999 +70,pycma,20,0.48960660999728134,1000,12000,999 +71,pycma,20,0.4893950049990963,1000,12000,999 +72,pycma,20,0.5024925540019467,1000,12000,999 +73,pycma,20,0.4991692269977648,1000,12000,999 +74,pycma,20,0.5198710129989195,1000,12000,999 +75,pycma,40,0.7619934060021478,1000,15000,999 +76,pycma,40,0.8076867339987075,1000,15000,999 +77,pycma,40,0.725300336998771,1000,15000,999 +78,pycma,40,0.7365077829999791,1000,15000,999 +79,pycma,40,0.7172894770010316,1000,15000,999 +80,pycma,40,0.7249836160008272,1000,15000,999 +81,pycma,40,0.7261079090021667,1000,15000,999 +82,pycma,40,0.7452911920008773,1000,15000,999 +83,pycma,40,0.7764294869994046,1000,15000,999 +84,pycma,40,0.7558684180003183,1000,15000,999 +85,pycma,40,0.779672420998395,1000,15000,999 +86,pycma,40,0.7185493069991935,1000,15000,999 +87,pycma,40,0.7282580280007096,1000,15000,999 +88,pycma,40,0.725094862998958,1000,15000,999 +89,pycma,40,0.7324797670007683,1000,15000,999 +90,pycma,100,1.1860735909976938,1000,17000,499 +91,pycma,100,1.179873047000001,1000,17000,499 +92,pycma,100,1.1579261769984441,1000,17000,499 +93,pycma,100,1.1868787919993338,1000,17000,499 +94,pycma,100,1.1689511779986788,1000,17000,499 +95,pycma,100,1.1685953640007938,1000,17000,499 +96,pycma,100,1.1752932720009994,1000,17000,499 +97,pycma,100,1.1767060749989469,1000,17000,499 +98,pycma,100,1.263973029999761,1000,17000,499 +99,pycma,100,1.1710044499996002,1000,17000,499 +100,pycma,100,1.1632260660007887,1000,17000,499 +101,pycma,100,1.162450912001077,1000,17000,499 +102,pycma,100,1.2229755409971403,1000,17000,499 +103,pycma,100,1.1903688200000033,1000,17000,499 +104,pycma,100,1.1521793680003611,1000,17000,499 +105,pycma,200,2.293771057000413,1000,19000,333 +106,pycma,200,2.225037386000622,1000,19000,333 +107,pycma,200,2.2743380059982883,1000,19000,333 +108,pycma,200,2.3483467389996804,1000,19000,333 +109,pycma,200,2.406930986999214,1000,19000,333 +110,pycma,200,2.2418446960000438,1000,19000,333 +111,pycma,200,2.3124730449999333,1000,19000,333 +112,pycma,200,2.239005701998394,1000,19000,333 +113,pycma,200,2.2397954230000323,1000,19000,333 +114,pycma,200,2.29090806000022,1000,19000,333 +115,pycma,200,2.2225995649969263,1000,19000,333 +116,pycma,200,2.279442361003021,1000,19000,333 +117,pycma,200,2.2536674520015367,1000,19000,333 +118,pycma,200,2.274126060998242,1000,19000,333 +119,pycma,200,2.2505753169971285,1000,19000,333 +120,pycma,500,8.518034722001175,1000,22000,199 +121,pycma,500,8.57804089199999,1000,22000,199 +122,pycma,500,8.334146625998983,1000,22000,199 +123,pycma,500,8.673984130000463,1000,22000,199 +124,pycma,500,8.340618348000135,1000,22000,199 +125,pycma,500,8.30776469799821,1000,22000,199 +126,pycma,500,8.358887310998398,1000,22000,199 +127,pycma,500,8.40083719500035,1000,22000,199 +128,pycma,500,8.47234620999734,1000,22000,199 +129,pycma,500,8.808449586998904,1000,22000,199 +130,pycma,500,9.255677911998646,1000,22000,199 +131,pycma,500,8.432874996000464,1000,22000,199 +132,pycma,500,9.10525231200154,1000,22000,199 +133,pycma,500,9.15324209100072,1000,22000,199 +134,pycma,500,9.024118483001075,1000,22000,199 +135,pycma,1000,29.205384237000544,1000,24000,124 +136,pycma,1000,30.037788082001498,1000,24000,124 +137,pycma,1000,30.567731778999587,1000,24000,124 +138,pycma,1000,30.19753730500088,1000,24000,124 +139,pycma,1000,30.272570720997464,1000,24000,124 +140,pycma,1000,31.294434662999265,1000,24000,124 +141,pycma,1000,28.802552930003003,1000,24000,124 +142,pycma,1000,28.48719723999966,1000,24000,124 +143,pycma,1000,29.00229881999985,1000,24000,124 +144,pycma,1000,28.366859042002034,1000,24000,124 +145,pycma,1000,29.541915236000932,1000,24000,124 +146,pycma,1000,29.24515235400031,1000,24000,124 +147,pycma,1000,31.158946973999264,1000,24000,124 +148,pycma,1000,30.647990902998572,1000,24000,124 +149,pycma,1000,29.235625232999155,1000,24000,124 diff --git a/scripts/matrix/time_stats_pycma_old.csv b/scripts/matrix/time_stats_pycma_old.csv new file mode 100644 index 0000000..877290f --- /dev/null +++ b/scripts/matrix/time_stats_pycma_old.csv @@ -0,0 +1,151 @@ +,method,dim,time,n_gen,n_evals,n_updates +0,pycma,2,0.44070486900091055,1000,6000,139 +1,pycma,2,0.3535105579994706,1000,6000,92 +2,pycma,2,0.34650415199939744,1000,6000,999 +3,pycma,2,0.34052088399948843,1000,6000,265 +4,pycma,2,0.33877943699917523,1000,6000,999 +5,pycma,2,0.3604653269994742,1000,6000,330 +6,pycma,2,0.4270241310005076,1000,6000,522 +7,pycma,2,0.3909456579985999,1000,6000,514 +8,pycma,2,0.3604796039999201,1000,6000,999 +9,pycma,2,0.3676343170009204,1000,6000,717 +10,pycma,2,0.3209550939991459,1000,6000,999 +11,pycma,2,0.3492205910006305,1000,6000,582 +12,pycma,2,0.32644210000034946,1000,6000,1000 +13,pycma,2,0.3512573500011058,1000,6000,1000 +14,pycma,2,0.3695678639996913,1000,6000,1000 +15,pycma,3,0.31428850600059377,1000,7000,999 +16,pycma,3,0.3255313700010447,1000,7000,169 +17,pycma,3,0.3277062390006904,1000,7000,1000 +18,pycma,3,0.32783064100112824,1000,7000,147 +19,pycma,3,0.35225674199864443,1000,7000,1000 +20,pycma,3,0.32809918400016613,1000,7000,29 +21,pycma,3,0.35889233999841963,1000,7000,999 +22,pycma,3,0.41217241900085355,1000,7000,123 +23,pycma,3,0.3551948260010249,1000,7000,284 +24,pycma,3,0.347183125999436,1000,7000,247 +25,pycma,3,0.33361990800040076,1000,7000,34 +26,pycma,3,0.3264031080016139,1000,7000,999 +27,pycma,3,0.314500765000048,1000,7000,999 +28,pycma,3,0.3529576929995528,1000,7000,999 +29,pycma,3,0.3224463629994716,1000,7000,999 +30,pycma,5,0.3470342950004124,1000,8000,999 +31,pycma,5,0.3550727570000163,1000,8000,999 +32,pycma,5,0.35636311800044496,1000,8000,999 +33,pycma,5,0.35452685999916866,1000,8000,999 +34,pycma,5,0.36638483600108884,1000,8000,999 +35,pycma,5,0.3533895119999215,1000,8000,999 +36,pycma,5,0.3549639169996226,1000,8000,999 +37,pycma,5,0.3673053849997814,1000,8000,999 +38,pycma,5,0.3537444939993293,1000,8000,999 +39,pycma,5,0.3605149659997551,1000,8000,999 +40,pycma,5,0.344889403999332,1000,8000,999 +41,pycma,5,0.35642473500047345,1000,8000,999 +42,pycma,5,0.3694338360000984,1000,8000,999 +43,pycma,5,0.35963648500001,1000,8000,999 +44,pycma,5,0.3586162999999942,1000,8000,999 +45,pycma,10,0.4479505669987702,1000,10000,999 +46,pycma,10,0.38854656599869486,1000,10000,999 +47,pycma,10,0.4760984330005158,1000,10000,999 +48,pycma,10,0.3963108750012907,1000,10000,999 +49,pycma,10,0.43280168199999025,1000,10000,999 +50,pycma,10,0.4157153799988009,1000,10000,999 +51,pycma,10,0.4148283270005777,1000,10000,999 +52,pycma,10,0.4192642899997736,1000,10000,999 +53,pycma,10,0.4272819960006018,1000,10000,999 +54,pycma,10,0.4289818209999794,1000,10000,999 +55,pycma,10,0.45167281899921363,1000,10000,999 +56,pycma,10,0.4171337070001755,1000,10000,999 +57,pycma,10,0.42829337600051076,1000,10000,999 +58,pycma,10,0.4119511320004676,1000,10000,999 +59,pycma,10,0.43349506800041127,1000,10000,999 +60,pycma,20,0.5119299860016326,1000,12000,999 +61,pycma,20,0.5452679429999989,1000,12000,999 +62,pycma,20,0.5192972789991472,1000,12000,999 +63,pycma,20,0.5362323129993456,1000,12000,999 +64,pycma,20,0.5232196719989588,1000,12000,999 +65,pycma,20,0.5090271150002081,1000,12000,999 +66,pycma,20,0.5173033760001999,1000,12000,999 +67,pycma,20,0.5096404310006619,1000,12000,999 +68,pycma,20,0.5402285910004139,1000,12000,999 +69,pycma,20,0.517333449999569,1000,12000,999 +70,pycma,20,0.5133502469998348,1000,12000,999 +71,pycma,20,0.5270525460000499,1000,12000,999 +72,pycma,20,0.5576650260009046,1000,12000,999 +73,pycma,20,0.4994467739998072,1000,12000,999 +74,pycma,20,0.557016916000066,1000,12000,999 +75,pycma,40,0.7336929040011455,1000,15000,999 +76,pycma,40,0.7317130789997464,1000,15000,999 +77,pycma,40,0.7297067190011148,1000,15000,999 +78,pycma,40,0.7342380509999202,1000,15000,999 +79,pycma,40,0.7314286870005162,1000,15000,999 +80,pycma,40,0.7292081820014573,1000,15000,999 +81,pycma,40,0.7485082120001607,1000,15000,999 +82,pycma,40,0.7461409130009997,1000,15000,999 +83,pycma,40,0.728692406000846,1000,15000,999 +84,pycma,40,0.7272400559995731,1000,15000,999 +85,pycma,40,0.746452909999789,1000,15000,999 +86,pycma,40,0.7587970450003922,1000,15000,999 +87,pycma,40,0.7340016260004631,1000,15000,999 +88,pycma,40,0.8196933950002858,1000,15000,999 +89,pycma,40,0.7440260549992672,1000,15000,999 +90,pycma,100,1.2280191730005754,1000,17000,499 +91,pycma,100,1.2124191270013398,1000,17000,499 +92,pycma,100,1.260799264000525,1000,17000,499 +93,pycma,100,1.1939598099997966,1000,17000,499 +94,pycma,100,1.22324354400007,1000,17000,499 +95,pycma,100,1.2449042730004294,1000,17000,499 +96,pycma,100,1.265080699000464,1000,17000,499 +97,pycma,100,1.312179398999433,1000,17000,499 +98,pycma,100,1.2147428449989093,1000,17000,499 +99,pycma,100,1.226590145000955,1000,17000,499 +100,pycma,100,1.252473305001331,1000,17000,499 +101,pycma,100,1.305377808999765,1000,17000,499 +102,pycma,100,1.2888086089988064,1000,17000,499 +103,pycma,100,1.240838170000643,1000,17000,499 +104,pycma,100,1.2462555649999558,1000,17000,499 +105,pycma,200,2.3404667830000108,1000,19000,333 +106,pycma,200,2.351190655999744,1000,19000,333 +107,pycma,200,2.3616532220003137,1000,19000,333 +108,pycma,200,2.422398778999195,1000,19000,333 +109,pycma,200,2.282302249001077,1000,19000,333 +110,pycma,200,2.2805919560014445,1000,19000,333 +111,pycma,200,2.264775355000893,1000,19000,333 +112,pycma,200,2.300991444999454,1000,19000,333 +113,pycma,200,2.296304252999107,1000,19000,333 +114,pycma,200,2.2572025499994197,1000,19000,333 +115,pycma,200,2.458438073999787,1000,19000,333 +116,pycma,200,2.3183620370000426,1000,19000,333 +117,pycma,200,2.3126392400008626,1000,19000,333 +118,pycma,200,2.32500651600094,1000,19000,333 +119,pycma,200,2.299387005999961,1000,19000,333 +120,pycma,500,8.660937268999987,1000,22000,199 +121,pycma,500,8.705488830999457,1000,22000,199 +122,pycma,500,8.590589580000596,1000,22000,199 +123,pycma,500,8.539211526000145,1000,22000,199 +124,pycma,500,8.439178089000052,1000,22000,199 +125,pycma,500,8.313168066999424,1000,22000,199 +126,pycma,500,8.488405004000015,1000,22000,199 +127,pycma,500,8.64408503300001,1000,22000,199 +128,pycma,500,8.730133470999135,1000,22000,199 +129,pycma,500,8.466917553998428,1000,22000,199 +130,pycma,500,8.477031213998998,1000,22000,199 +131,pycma,500,8.247212429998399,1000,22000,199 +132,pycma,500,8.646479375998751,1000,22000,199 +133,pycma,500,8.708869588999733,1000,22000,199 +134,pycma,500,8.54599751799833,1000,22000,199 +135,pycma,1000,29.4967762590004,1000,24000,124 +136,pycma,1000,29.012167872000646,1000,24000,124 +137,pycma,1000,27.93344243899992,1000,24000,124 +138,pycma,1000,28.30922819099942,1000,24000,124 +139,pycma,1000,27.979058864999388,1000,24000,124 +140,pycma,1000,28.11712050999995,1000,24000,124 +141,pycma,1000,27.912950415000523,1000,24000,124 +142,pycma,1000,28.5883970699997,1000,24000,124 +143,pycma,1000,28.60895138699925,1000,24000,124 +144,pycma,1000,28.46847972299838,1000,24000,124 +145,pycma,1000,27.724415839998983,1000,24000,124 +146,pycma,1000,28.42927620499904,1000,24000,124 +147,pycma,1000,27.965448310000284,1000,24000,124 +148,pycma,1000,28.395208088999425,1000,24000,124 +149,pycma,1000,28.403830919998654,1000,24000,124 diff --git a/scripts/repelling/repelling.py b/scripts/repelling/repelling.py index d5c53ef..aa264ee 100644 --- a/scripts/repelling/repelling.py +++ b/scripts/repelling/repelling.py @@ -13,6 +13,7 @@ import pandas as pd import modcma.c_maes as c_cmaes from modcma.c_maes.cmaescpp.parameters import Solution +from scipy.spatial.distance import mahalanobis base_dir = os.path.realpath(os.path.dirname(__file__)) @@ -148,13 +149,11 @@ def plot( ax.scatter(X[0, :], X[1, :], color=main_color, alpha=0.5) for t, tabu_point in enumerate(cma.p.repelling.archive, 1): - if c_cmaes.constants.repelling_current_cov: - Ct = C - else: - Ct = tabu_point.C + Ct = C theta_t = np.degrees(np.arctan2(Ct[1, 0], Ct[0, 0])) - + + # print(theta_t, np.degrees(np.arctan2(tabu_point.C[1, 0], tabu_point.C[0, 0]))) current = Ellipse( tabu_point.solution.x, @@ -416,7 +415,71 @@ def calc_taboo_potential(fid=3, instance=6, dim=2, n_trials=1): problem.reset() -def interactive(fid=21, instance=6, dim=2, rep=True, coverage=5, save_frames = True): +class CloseToTaboo(c_cmaes.restart.Criterion): + + def __init__(self): + super().__init__("CloseToTaboo") + + def update(self, par: c_cmaes.Parameters): + self.met = False + if len(par.repelling.archive) != 0: + d_sigma = par.mutation.sigma / par.settings.sigma0 + somewhat_converged = d_sigma < 1e-1 + + if somewhat_converged: + for p in par.repelling.archive: + distance = mahalanobis(par.adaptation.m, p.solution.x, par.repelling.C_inv) + threshold = 1 / np.sqrt(par.settings.dim) + if distance < threshold: + print("close to taboo", distance, threshold) + self.met = True + + + def on_update(self, par: c_cmaes.Parameters): + self.met = False + +class TooMuchRepelling(c_cmaes.restart.Criterion): + + def __init__(self): + super().__init__("TooMuchRepelling") + self.decay = 0 + self.alpha = 0.9 + + def update(self, par: c_cmaes.Parameters): + self.met = False + self.decay = (1 - self.alpha) * self.decay + (self.alpha * par.repelling.attempts) + if self.decay > (2 * par.lamb): + self.met = True + + def on_update(self, par: c_cmaes.Parameters): + self.met = False + self.decay = 0 + +class ConvergingToBadBasin(c_cmaes.restart.Criterion): + + def __init__(self): + super().__init__("ConvergingToBadBasin") + + def update(self, par: c_cmaes.Parameters): + self.met = False + if len(par.repelling.archive) != 0: + d_sigma = par.mutation.sigma / par.settings.sigma0 + somewhat_converged = d_sigma < 1e-1 + + if somewhat_converged: + function_values = np.array([p.solution.y for p in par.repelling.archive]) + threshold_value = np.median(function_values) + if threshold_value < par.pop.f.min(): + print("Bad basin", threshold_value, "vs", par.pop.f.min()) + self.met = True + + def on_update(self, par: c_cmaes.Parameters): + self.met = False + + + + +def interactive(fid=21, instance=6, dim=2, rep=True, coverage=5, save_frames = False): lb = -5 ub = 5 @@ -434,11 +497,11 @@ def interactive(fid=21, instance=6, dim=2, rep=True, coverage=5, save_frames = T # c_cmaes.constants.sigma_threshold = 0.25 # c_cmaes.constants.tol_min_sigma = 0.01 - c_cmaes.constants.repelling_current_cov = True modules = c_cmaes.parameters.Modules() - modules.restart_strategy = c_cmaes.options.RESTART - modules.bound_correction = c_cmaes.options.SATURATE + modules.restart_strategy = c_cmaes.options.BIPOP + # modules.bound_correction = c_cmaes.options.SATURATE modules.elitist = True + modules.active = True modules.repelling_restart = rep modules.center_placement = c_cmaes.options.UNIFORM settings = c_cmaes.parameters.Settings( @@ -447,36 +510,56 @@ def interactive(fid=21, instance=6, dim=2, rep=True, coverage=5, save_frames = T sigma0=2.0, budget=10_000 * dim, target=problem.optimum.y + 1e-8, + lb=np.ones(dim) * -5, + ub=np.ones(dim) * 5, ) parameters = c_cmaes.Parameters(settings) parameters.repelling.coverage = coverage cma = c_cmaes.ModularCMAES(parameters) + + # c1 = CloseToTaboo() + # c2 = TooMuchRepelling() + # c3 = ConvergingToBadBasin() + # cma.p.criteria.items = cma.p.criteria.items + [c1, c2, c3] archive_size = 0 while not cma.break_conditions(): + # if any(x.met for x in cma.p.criteria.items): + # breakpoint() + + # print("before start", cma.p.criteria.items) + # print(cma.p.repelling.archive) + # print(cma.p.stats.solutions) + # cma.p.start(problem) + # print() + # print("after start", cma.p.criteria.items) + # print(cma.p.repelling.archive) + # print(cma.p.stats.solutions) cma.mutate(problem) + # print() + # print("after mutate", cma.p.criteria.items) + # print(cma.p.repelling.archive) + # print(cma.p.stats.solutions) + if dim == 2: plot(cma, X, Y, Z, lb, ub, problem) - if save_frames: - plt.savefig(os.path.join( - base_dir, - f"figures/interactive/f{fid}i{instance}r{rep}{cma.p.stats.t:03d}.png" - )) - - if len(cma.p.repelling.archive) != archive_size: - archive_size = len(cma.p.repelling.archive) - for p in cma.p.repelling.archive: - print(f"({p.radius:.2e}, {p.criticality: .2e})", end=", ") - print() + + # if len(cma.p.repelling.archive) != archive_size: + # archive_size = len(cma.p.repelling.archive) + # for p in cma.p.repelling.archive: + # print(f"({p.radius:.2e}, {p.criticality: .2e})", end=", ") + # print() # breakpoint() # time.sleep(1) cma.select() cma.recombine() - cma.adapt(problem) + cma.adapt() + + print(problem.optimum) - print(cma.p.stats.solutions) + print(len(cma.p.stats.solutions)) # breakpoint() final_target = problem.state.current_best.y - problem.optimum.y print("final target: ", final_target, "used budget: ", problem.state.evaluations) diff --git a/scripts/tuning/test.py b/scripts/tuning/test.py index 7913366..630b3ff 100644 --- a/scripts/tuning/test.py +++ b/scripts/tuning/test.py @@ -27,7 +27,7 @@ def get_matrix(key: str) -> modcma.options.MatrixAdaptationType: if key == "matrix": return modcma.options.MatrixAdaptationType.MATRIX if key == "seperable": - return modcma.options.MatrixAdaptationType.SEPERABLE + return modcma.options.MatrixAdaptationType.SEPARABLE if key == "covariance": return modcma.options.MatrixAdaptationType.COVARIANCE if key == "none": diff --git a/setup.py b/setup.py index 6a931f6..81f13f6 100644 --- a/setup.py +++ b/setup.py @@ -13,26 +13,36 @@ __version__ = "1.0.13" -ext = Pybind11Extension( - "modcma.c_maes.cmaescpp", - [x for x in glob("src/*cpp") if "main" not in x], - include_dirs=["include", "external"], - cxx_std=17, -) if platform.system() in ("Linux", "Darwin"): os.environ["CC"] = "g++" os.environ["CXX"] = "g++" - flags = ["-O3", "-fno-math-errno", ] #"-fopenmp" + c_flags = [ + "-O3", + "-fno-math-errno", + "-funroll-loops", + "-ftree-vectorize", + ] + l_flags = [ + "-flto", + ] if platform.system() == "Darwin": - flags.append("-mmacosx-version-min=10.15") + c_flags.append("-mmacosx-version-min=10.15") else: - flags.append("-march=native") - - ext._add_cflags(flags) - ext._add_ldflags(flags) + c_flags.extend([ + "-march=native", + "-mtune=native", + ]) else: - ext._add_cflags(["/O2"]) + c_flags = ["/O2"] +ext = Pybind11Extension( + "modcma.c_maes.cmaescpp", + [x for x in glob("src/*cpp") if "main" not in x], + include_dirs=["include", "external"], + cxx_std=17, + extra_link_args=l_flags, + extra_compile_args=c_flags +) setuptools.setup( name="modcma", diff --git a/src/bounds.cpp b/src/bounds.cpp index 449ab5d..10e43ed 100644 --- a/src/bounds.cpp +++ b/src/bounds.cpp @@ -33,25 +33,31 @@ namespace bounds void BoundCorrection::correct(const Eigen::Index i, parameters::Parameters& p) { + if (!has_bounds) + return; + const auto oob = is_out_of_bounds(p.pop.X.col(i)); if (oob.any()) { n_out_of_bounds++; - p.pop.X.col(i) = correct_x(p.pop.X.col(i), oob); + if (p.settings.modules.bound_correction == parameters::CorrectionMethod::NONE) + return; + + p.pop.X.col(i) = correct_x(p.pop.X.col(i), oob, p.mutation->sigma); p.pop.Y.col(i) = p.adaptation->invert_x(p.pop.X.col(i), p.pop.s(i)); p.pop.Z.col(i) = p.adaptation->invert_y(p.pop.Y.col(i)); } } - Vector COTN::correct_x(const Vector& xi, const Mask& oob) + Vector COTN::correct_x(const Vector& xi, const Mask& oob, const Float sigma) { const Vector y = delta_out_of_bounds(xi, oob); return (oob).select( - lb.array() + db.array() * ((y.array() > 0).cast() - sampler().array().abs()).abs(), y); + lb.array() + db.array() * ((y.array() > 0).cast() - (sigma * sampler().array().abs())).abs(), y); } - Vector Mirror::correct_x(const Vector& xi, const Mask& oob) + Vector Mirror::correct_x(const Vector& xi, const Mask& oob, const Float sigma) { const Vector y = delta_out_of_bounds(xi, oob); return (oob).select( @@ -61,19 +67,19 @@ namespace bounds } - Vector UniformResample::correct_x(const Vector& xi, const Mask& oob) + Vector UniformResample::correct_x(const Vector& xi, const Mask& oob, const Float sigma) { return (oob).select(lb + sampler().cwiseProduct(db), xi); } - Vector Saturate::correct_x(const Vector& xi, const Mask& oob) + Vector Saturate::correct_x(const Vector& xi, const Mask& oob, const Float sigma) { const Vector y = delta_out_of_bounds(xi, oob); return (oob).select( lb.array() + db.array() * (y.array() > 0).cast(), y); } - Vector Toroidal::correct_x(const Vector& xi, const Mask& oob) + Vector Toroidal::correct_x(const Vector& xi, const Mask& oob, const Float sigma) { const Vector y = delta_out_of_bounds(xi, oob); return (oob).select( diff --git a/src/c_maes.cpp b/src/c_maes.cpp index 8244ead..2f26b95 100644 --- a/src/c_maes.cpp +++ b/src/c_maes.cpp @@ -3,8 +3,10 @@ void ModularCMAES::recombine() const { p->adaptation->m_old = p->adaptation->m; - p->adaptation->m = p->adaptation->m_old + ((p->pop.X.leftCols(p->mu).colwise() - p->adaptation->m_old) * p->weights. - positive); + p->adaptation->m = p->adaptation->m_old + ( + (p->pop.X.leftCols(p->mu).colwise() - p->adaptation->m_old) + * p->weights.positive + ); } void ModularCMAES::mutate(FunctionType &objective) const @@ -49,6 +51,6 @@ bool ModularCMAES::break_conditions() const const auto budget_used_up = p->stats.evaluations >= p->settings.budget; const auto exceed_gens = p->settings.max_generations and p->stats.t >= p->settings.max_generations; const auto restart_strategy_criteria = p->settings.modules.restart_strategy == parameters::RestartStrategyType::STOP - and p->criteria.any; + and p->criteria.any(); return exceed_gens or target_reached or budget_used_up or restart_strategy_criteria; } diff --git a/src/common.cpp b/src/common.cpp index 09478e8..0d12936 100644 --- a/src/common.cpp +++ b/src/common.cpp @@ -14,7 +14,8 @@ namespace constants size_t cache_max_doubles = 2'000'000; size_t cache_min_samples = 128; bool cache_samples = false; - bool clip_sigma = false; + bool clip_sigma = false; + bool use_box_muller = false; } namespace utils @@ -168,4 +169,38 @@ namespace functions res += pow(1.0e6, static_cast(i) / (static_cast(x.size()) - 1)) * x(i) * x(i); return res; } + + Float rosenbrock(const Vector& x) { + Float sum = 0.0; + for (auto i = 0; i < x.size() - 1; ++i) { + Float xi = x[i]; + Float xi1 = x[i + 1]; + Float term1 = 100.0 * std::pow(xi1 - xi * xi, 2); + Float term2 = std::pow(1.0 - xi, 2); + sum += term1 + term2; + } + return sum; + } + + Matrix random_rotation_matrix(int n, int seed) { + std::mt19937 gen(seed); + std::normal_distribution<> d(0, 1); + + Matrix A(n, n); + for (int i = 0; i < n; ++i) + for (int j = 0; j < n; ++j) + A(i, j) = d(gen); + + Eigen::HouseholderQR qr(A); + Matrix Q = qr.householderQ(); + + if (Q.determinant() < 0) { + Q.col(0) *= -1; + } + + return Q; + } + + + } diff --git a/src/es.cpp b/src/es.cpp index 3b7e563..bb35d11 100644 --- a/src/es.cpp +++ b/src/es.cpp @@ -15,7 +15,7 @@ namespace es const auto mask = corrector->is_out_of_bounds(x1); if (mask.any()) - x1 = corrector->correct_x(x1, mask); + x1 = corrector->correct_x(x1, mask, sigma); } while (rejection_sampling && n_rej++ < 5*d && bounds::any_out_of_bounds(x1, corrector->lb, corrector->ub) ); return x1; @@ -51,7 +51,7 @@ namespace es const auto mask = corrector->is_out_of_bounds(x); if (mask.any()) - x = corrector->correct_x(x, mask); + x = corrector->correct_x(x, mask, si.mean()); } while (rejection_sampling && n_rej++ < 5*d && bounds::any_out_of_bounds(x, corrector->lb, corrector->ub)); return x; diff --git a/src/interface.cpp b/src/interface.cpp index 9f138a3..5222575 100644 --- a/src/interface.cpp +++ b/src/interface.cpp @@ -15,1152 +15,1164 @@ namespace py = pybind11; template Float random_double() { - static RNG gen; - return gen(rng::GENERATOR); + static RNG gen; + return gen(rng::GENERATOR); } -void define_options(py::module &main) +void define_options(py::module& main) { - auto m = main.def_submodule("options"); - using namespace parameters; - py::enum_(m, "RecombinationWeights") - .value("DEFAULT", parameters::RecombinationWeights::DEFAULT) - .value("EQUAL", parameters::RecombinationWeights::EQUAL) - .value("HALF_POWER_LAMBDA", parameters::RecombinationWeights::HALF_POWER_LAMBDA) - .export_values(); - - py::enum_(m, "BaseSampler") - .value("UNIFORM", BaseSampler::UNIFORM) - .value("SOBOL", BaseSampler::SOBOL) - .value("HALTON", BaseSampler::HALTON) - .export_values(); - - py::enum_(m, "SampleTranformerType") - .value("NONE", SampleTranformerType::NONE) - .value("GAUSSIAN", SampleTranformerType::GAUSSIAN) - .value("SCALED_UNIFORM", SampleTranformerType::SCALED_UNIFORM) - .value("LAPLACE", SampleTranformerType::LAPLACE) - .value("LOGISTIC", SampleTranformerType::LOGISTIC) - .value("CAUCHY", SampleTranformerType::CAUCHY) - .value("DOUBLE_WEIBULL", SampleTranformerType::DOUBLE_WEIBULL) - .export_values(); - - py::enum_(m, "Mirror") - .value("NONE", Mirror::NONE) - .value("MIRRORED", Mirror::MIRRORED) - .value("PAIRWISE", Mirror::PAIRWISE) - .export_values(); - - py::enum_(m, "StepSizeAdaptation") - .value("CSA", StepSizeAdaptation::CSA) - .value("TPA", StepSizeAdaptation::TPA) - .value("MSR", StepSizeAdaptation::MSR) - .value("XNES", StepSizeAdaptation::XNES) - .value("MXNES", StepSizeAdaptation::MXNES) - .value("LPXNES", StepSizeAdaptation::LPXNES) - .value("PSR", StepSizeAdaptation::PSR) - .value("SR", StepSizeAdaptation::PSR) - .export_values(); - - py::enum_(m, "CorrectionMethod") - .value("NONE", CorrectionMethod::NONE) - .value("MIRROR", CorrectionMethod::MIRROR) - .value("COTN", CorrectionMethod::COTN) - .value("UNIFORM_RESAMPLE", CorrectionMethod::UNIFORM_RESAMPLE) - .value("SATURATE", CorrectionMethod::SATURATE) - .value("TOROIDAL", CorrectionMethod::TOROIDAL) - .value("RESAMPLE", CorrectionMethod::RESAMPLE) - .export_values(); - - py::enum_(m, "RestartStrategy") - .value("NONE", RestartStrategyType::NONE) - .value("STOP", RestartStrategyType::STOP) - .value("RESTART", RestartStrategyType::RESTART) - .value("IPOP", RestartStrategyType::IPOP) - .value("BIPOP", RestartStrategyType::BIPOP) - .export_values(); - - py::enum_(m, "MatrixAdaptationType") - .value("COVARIANCE", MatrixAdaptationType::COVARIANCE) - .value("NONE", MatrixAdaptationType::NONE) - .value("MATRIX", MatrixAdaptationType::MATRIX) - .value("SEPERABLE", MatrixAdaptationType::SEPERABLE) - .value("ONEPLUSONE", MatrixAdaptationType::ONEPLUSONE) - .export_values(); - - py::enum_(m, "CenterPlacement") - .value("X0", CenterPlacement::X0) - .value("ZERO", CenterPlacement::ZERO) - .value("UNIFORM", CenterPlacement::UNIFORM) - .export_values(); + auto m = main.def_submodule("options"); + using namespace parameters; + py::enum_(m, "RecombinationWeights") + .value("DEFAULT", parameters::RecombinationWeights::DEFAULT) + .value("EQUAL", parameters::RecombinationWeights::EQUAL) + .value("EXPONENTIAL", parameters::RecombinationWeights::EXPONENTIAL) + .export_values(); + + py::enum_(m, "BaseSampler") + .value("UNIFORM", BaseSampler::UNIFORM) + .value("SOBOL", BaseSampler::SOBOL) + .value("HALTON", BaseSampler::HALTON) + .export_values(); + + py::enum_(m, "SampleTranformerType") + .value("NONE", SampleTranformerType::NONE) + .value("GAUSSIAN", SampleTranformerType::GAUSSIAN) + .value("SCALED_UNIFORM", SampleTranformerType::SCALED_UNIFORM) + .value("LAPLACE", SampleTranformerType::LAPLACE) + .value("LOGISTIC", SampleTranformerType::LOGISTIC) + .value("CAUCHY", SampleTranformerType::CAUCHY) + .value("DOUBLE_WEIBULL", SampleTranformerType::DOUBLE_WEIBULL) + .export_values(); + + py::enum_(m, "Mirror") + .value("NONE", Mirror::NONE) + .value("MIRRORED", Mirror::MIRRORED) + .value("PAIRWISE", Mirror::PAIRWISE) + .export_values(); + + py::enum_(m, "StepSizeAdaptation") + .value("CSA", StepSizeAdaptation::CSA) + .value("TPA", StepSizeAdaptation::TPA) + .value("MSR", StepSizeAdaptation::MSR) + .value("XNES", StepSizeAdaptation::XNES) + .value("MXNES", StepSizeAdaptation::MXNES) + .value("LPXNES", StepSizeAdaptation::LPXNES) + .value("PSR", StepSizeAdaptation::PSR) + .value("SR", StepSizeAdaptation::SR) + .value("SA", StepSizeAdaptation::SA) + .export_values(); + + py::enum_(m, "CorrectionMethod") + .value("NONE", CorrectionMethod::NONE) + .value("MIRROR", CorrectionMethod::MIRROR) + .value("COTN", CorrectionMethod::COTN) + .value("UNIFORM_RESAMPLE", CorrectionMethod::UNIFORM_RESAMPLE) + .value("SATURATE", CorrectionMethod::SATURATE) + .value("TOROIDAL", CorrectionMethod::TOROIDAL) + .value("RESAMPLE", CorrectionMethod::RESAMPLE) + .export_values(); + + py::enum_(m, "RestartStrategy") + .value("NONE", RestartStrategyType::NONE) + .value("STOP", RestartStrategyType::STOP) + .value("RESTART", RestartStrategyType::RESTART) + .value("IPOP", RestartStrategyType::IPOP) + .value("BIPOP", RestartStrategyType::BIPOP) + .export_values(); + + py::enum_(m, "MatrixAdaptationType") + .value("COVARIANCE", MatrixAdaptationType::COVARIANCE) + .value("NONE", MatrixAdaptationType::NONE) + .value("MATRIX", MatrixAdaptationType::MATRIX) + .value("SEPARABLE", MatrixAdaptationType::SEPARABLE) + .value("CHOLESKY", MatrixAdaptationType::CHOLESKY) + .value("CMSA", MatrixAdaptationType::CMSA) + .value("COVARIANCE_NO_EIGV", MatrixAdaptationType::COVARIANCE_NO_EIGV) + .value("NATURAL_GRADIENT", MatrixAdaptationType::NATURAL_GRADIENT) + .export_values(); + + py::enum_(m, "CenterPlacement") + .value("X0", CenterPlacement::X0) + .value("ZERO", CenterPlacement::ZERO) + .value("UNIFORM", CenterPlacement::UNIFORM) + .export_values(); } struct PySampler : sampling::Sampler { - std::function func; + std::function func; - PySampler(size_t d, std::function f) : Sampler::Sampler(d), func(f) {} + PySampler(size_t d, std::function f) : Sampler::Sampler(d), func(f) {} - Vector operator()() override - { - Vector res(d); - for (size_t j = 0; j < d; ++j) - res(j) = func(); - return res; - }; + Vector operator()() override + { + Vector res(d); + for (size_t j = 0; j < d; ++j) + res(j) = func(); + return res; + }; }; -void define_samplers(py::module &main) +void define_samplers(py::module& main) { - using namespace sampling; - - auto m = main.def_submodule("sampling"); - - py::class_>(m, "Sampler") - .def_readonly("d", &Sampler::d) - .def("reset", &Sampler::reset) - .def("expected_length", &Sampler::expected_length); - - py::class_>(m, "PySampler") - .def(py::init>(), py::arg("d"), py::arg("function")) - .def("__call__", &PySampler::operator()); - - py::class_>(m, "Gaussian") - .def(py::init(), py::arg("d")) - .def("__call__", &Gaussian::operator()); - - py::class_>(m, "Uniform") - .def(py::init(), py::arg("d")) - .def("__call__", &Uniform::operator()); - - py::class_>(m, "Sobol") - .def(py::init(), py::arg("d")) - .def_readonly("cache", &Sobol::cache) - .def("__call__", &Sobol::operator()); - - py::class_>(m, "Halton") - .def(py::init(), py::arg("d"), py::arg("scrambled") = true) - .def("__call__", &Halton::operator()); - - py::class_>(m, "Mirrored") - .def(py::init>(), py::arg("sampler")) - .def("__call__", &Mirrored::operator()); - - py::class_>(m, "CachedSampler") - .def(py::init>(), py::arg("sampler")) - .def(py::init, bool>(), py::arg("cache"), py::arg("transform_ppf") = false) - .def("__call__", &CachedSampler::operator()) - .def_readonly("index", &CachedSampler::index) - .def_readonly("n_samples", &CachedSampler::n_samples) - .def_readonly("cache", &CachedSampler::cache); - - py::class_>(m, "Orthogonal") - .def(py::init, size_t>(), - py::arg("sampler"), py::arg("n_samples")) - .def("__call__", &Orthogonal::operator()); - - py::class_>(m, "SampleTransformer") - .def("raw", &SampleTransformer::raw); - - py::class_>(m, "IdentityTransformer") - .def(py::init>(), py::arg("sampler")) - .def("transform", &IdentityTransformer::transform) - .def("__call__", &IdentityTransformer::operator()) - .def("expected_length", &IdentityTransformer::expected_length); - - py::class_>(m, "GaussianTransformer") - .def(py::init>(), py::arg("sampler")) - .def("transform", &GaussianTransformer::transform) - .def("__call__", &GaussianTransformer::operator()) - .def("expected_length", &GaussianTransformer::expected_length); - - py::class_>(m, "UniformScaler") - .def(py::init>(), py::arg("sampler")) - .def("transform", &UniformScaler::transform) - .def("__call__", &UniformScaler::operator()) - .def("expected_length", &UniformScaler::expected_length); - - py::class_>(m, "LaplaceTransformer") - .def(py::init>(), py::arg("sampler")) - .def("transform", &LaplaceTransformer::transform) - .def("__call__", &LaplaceTransformer::operator()) - .def("expected_length", &LaplaceTransformer::expected_length); - - py::class_>(m, "LogisticTransformer") - .def(py::init>(), py::arg("sampler")) - .def("transform", &LogisticTransformer::transform) - .def("__call__", &LogisticTransformer::operator()) - .def("expected_length", &LogisticTransformer::expected_length); - - py::class_>(m, "CauchyTransformer") - .def(py::init>(), py::arg("sampler")) - .def("transform", &CauchyTransformer::transform) - .def("__call__", &CauchyTransformer::operator()) - .def("expected_length", &CauchyTransformer::expected_length); - - py::class_>(m, "DoubleWeibullTransformer") - .def(py::init>(), py::arg("sampler")) - .def("transform", &DoubleWeibullTransformer::transform) - .def("__call__", &DoubleWeibullTransformer::operator()) - .def("expected_length", &DoubleWeibullTransformer::expected_length); + using namespace sampling; + + auto m = main.def_submodule("sampling"); + + py::class_>(m, "Sampler") + .def_readonly("d", &Sampler::d) + .def("reset", &Sampler::reset) + .def("expected_length", &Sampler::expected_length); + + py::class_>(m, "PySampler") + .def(py::init>(), py::arg("d"), py::arg("function")) + .def("__call__", &PySampler::operator()); + + py::class_>(m, "Gaussian") + .def(py::init(), py::arg("d")) + .def("__call__", &Gaussian::operator()); + + py::class_>(m, "Uniform") + .def(py::init(), py::arg("d")) + .def("__call__", &Uniform::operator()); + + py::class_>(m, "Sobol") + .def(py::init(), py::arg("d")) + .def_readonly("cache", &Sobol::cache) + .def("__call__", &Sobol::operator()); + + py::class_>(m, "Halton") + .def(py::init(), py::arg("d"), py::arg("scrambled") = true) + .def("__call__", &Halton::operator()); + + py::class_>(m, "Mirrored") + .def(py::init>(), py::arg("sampler")) + .def("__call__", &Mirrored::operator()); + + py::class_>(m, "CachedSampler") + .def(py::init>(), py::arg("sampler")) + .def(py::init, bool>(), py::arg("cache"), py::arg("transform_ppf") = false) + .def("__call__", &CachedSampler::operator()) + .def_readonly("index", &CachedSampler::index) + .def_readonly("n_samples", &CachedSampler::n_samples) + .def_readonly("cache", &CachedSampler::cache); + + py::class_>(m, "Orthogonal") + .def(py::init, size_t>(), + py::arg("sampler"), py::arg("n_samples")) + .def("__call__", &Orthogonal::operator()); + + py::class_>(m, "SampleTransformer") + .def("raw", &SampleTransformer::raw); + + py::class_>(m, "IdentityTransformer") + .def(py::init>(), py::arg("sampler")) + .def("transform", &IdentityTransformer::transform) + .def("__call__", &IdentityTransformer::operator()) + .def("expected_length", &IdentityTransformer::expected_length); + + py::class_>(m, "GaussianTransformer") + .def(py::init>(), py::arg("sampler")) + .def("transform", &GaussianTransformer::transform) + .def("__call__", &GaussianTransformer::operator()) + .def("expected_length", &GaussianTransformer::expected_length); + + py::class_>(m, "UniformScaler") + .def(py::init>(), py::arg("sampler")) + .def("transform", &UniformScaler::transform) + .def("__call__", &UniformScaler::operator()) + .def("expected_length", &UniformScaler::expected_length); + + py::class_>(m, "LaplaceTransformer") + .def(py::init>(), py::arg("sampler")) + .def("transform", &LaplaceTransformer::transform) + .def("__call__", &LaplaceTransformer::operator()) + .def("expected_length", &LaplaceTransformer::expected_length); + + py::class_>(m, "LogisticTransformer") + .def(py::init>(), py::arg("sampler")) + .def("transform", &LogisticTransformer::transform) + .def("__call__", &LogisticTransformer::operator()) + .def("expected_length", &LogisticTransformer::expected_length); + + py::class_>(m, "CauchyTransformer") + .def(py::init>(), py::arg("sampler")) + .def("transform", &CauchyTransformer::transform) + .def("__call__", &CauchyTransformer::operator()) + .def("expected_length", &CauchyTransformer::expected_length); + + py::class_>(m, "DoubleWeibullTransformer") + .def(py::init>(), py::arg("sampler")) + .def("transform", &DoubleWeibullTransformer::transform) + .def("__call__", &DoubleWeibullTransformer::operator()) + .def("expected_length", &DoubleWeibullTransformer::expected_length); } -void define_utils(py::module &main) +void define_utils(py::module& main) { - auto m = main.def_submodule("utils"); - m.def("cdf", &cdf, py::arg("x")); - m.def("ppf", &ppf, py::arg("x")); - m.def("i8_sobol", &i8_sobol, py::arg("dim_num"), py::arg("seed"), py::arg("quasi")); - m.def("compute_ert", &utils::compute_ert, py::arg("running_times"), py::arg("budget")); - m.def("set_seed", &rng::set_seed, py::arg("seed"), "Set the random seed"); - m.def("random_uniform", &random_double>, "Generate a uniform random number in [0, 1]"); - m.def("random_normal", &random_double>, "Generate a standard normal random number"); - - py::class_(m, "Shuffler") - .def(py::init(), py::arg("start"), py::arg("stop")) - .def(py::init(), py::arg("stop")) - .def("next", &rng::Shuffler::next) - .def_readwrite("start", &rng::Shuffler::start) - .def_readwrite("stop", &rng::Shuffler::stop) - .def_readwrite("n", &rng::Shuffler::n) - .def_readwrite("seed", &rng::Shuffler::seed) - .def_readwrite("offset", &rng::Shuffler::offset) - .def_readwrite("multiplier", &rng::Shuffler::multiplier) - .def_readwrite("modulus", &rng::Shuffler::modulus) - .def_readwrite("found", &rng::Shuffler::found); - - py::class_(m, "CachedShuffleSequence") - .def(py::init(), py::arg("dim")) - .def("fill", &rng::CachedShuffleSequence::fill) - .def("get_index", &rng::CachedShuffleSequence::get_index, py::arg("index")) - .def("next", &rng::CachedShuffleSequence::next); + auto m = main.def_submodule("utils"); + m.def("cdf", &cdf, py::arg("x")); + m.def("ppf", &ppf, py::arg("x")); + m.def("i8_sobol", &i8_sobol, py::arg("dim_num"), py::arg("seed"), py::arg("quasi")); + m.def("compute_ert", &utils::compute_ert, py::arg("running_times"), py::arg("budget")); + m.def("set_seed", &rng::set_seed, py::arg("seed"), "Set the random seed"); + m.def("random_uniform", &random_double>, "Generate a uniform random number in [0, 1]"); + m.def("random_normal", &random_double>, "Generate a standard normal random number"); + + py::class_(m, "Shuffler") + .def(py::init(), py::arg("start"), py::arg("stop")) + .def(py::init(), py::arg("stop")) + .def("next", &rng::Shuffler::next) + .def_readwrite("start", &rng::Shuffler::start) + .def_readwrite("stop", &rng::Shuffler::stop) + .def_readwrite("n", &rng::Shuffler::n) + .def_readwrite("seed", &rng::Shuffler::seed) + .def_readwrite("offset", &rng::Shuffler::offset) + .def_readwrite("multiplier", &rng::Shuffler::multiplier) + .def_readwrite("modulus", &rng::Shuffler::modulus) + .def_readwrite("found", &rng::Shuffler::found); + + py::class_(m, "CachedShuffleSequence") + .def(py::init(), py::arg("dim")) + .def("fill", &rng::CachedShuffleSequence::fill) + .def("get_index", &rng::CachedShuffleSequence::get_index, py::arg("index")) + .def("next", &rng::CachedShuffleSequence::next); } -void define_selection(py::module &main) +void define_selection(py::module& main) { - auto m = main.def_submodule("selection"); - using namespace selection; - py::class_>(m, "Elitsm") - .def(py::init<>()) - .def("__call__", &Elitsm::operator(), py::arg("parameters")); - - py::class_>(m, "NoElitsm") - .def(py::init<>()) - .def("__call__", &NoElitsm::operator(), py::arg("parameters")); - - py::class_>(m, "Pairwise") - .def(py::init<>()) - .def("__call__", &Pairwise::operator(), py::arg("parameters")); - - py::class_>(m, "NoPairwise") - .def(py::init<>()) - .def("__call__", &NoPairwise::operator(), py::arg("parameters")); - - py::class_>(m, "Strategy") - .def(py::init(), py::arg("modules")) - .def("select", &Strategy::select, py::arg("parameters")) - .def_readwrite("pairwise", &Strategy::pairwise) - .def_readwrite("elitsm", &Strategy::elitsm); + auto m = main.def_submodule("selection"); + using namespace selection; + py::class_>(m, "Elitsm") + .def(py::init<>()) + .def("__call__", &Elitsm::operator(), py::arg("parameters")); + + py::class_>(m, "NoElitsm") + .def(py::init<>()) + .def("__call__", &NoElitsm::operator(), py::arg("parameters")); + + py::class_>(m, "Pairwise") + .def(py::init<>()) + .def("__call__", &Pairwise::operator(), py::arg("parameters")); + + py::class_>(m, "NoPairwise") + .def(py::init<>()) + .def("__call__", &NoPairwise::operator(), py::arg("parameters")); + + py::class_>(m, "Strategy") + .def(py::init(), py::arg("modules")) + .def("select", &Strategy::select, py::arg("parameters")) + .def_readwrite("pairwise", &Strategy::pairwise) + .def_readwrite("elitsm", &Strategy::elitsm); } -void define_center_placement(py::module &main) +void define_center_placement(py::module& main) { - auto m = main.def_submodule("center"); - using namespace center; - py::class_>(m, "Placement") - .def("__call__", &Placement::operator(), py::arg("parameters")); + auto m = main.def_submodule("center"); + using namespace center; + py::class_>(m, "Placement") + .def("__call__", &Placement::operator(), py::arg("parameters")); - py::class_>(m, "X0") - .def(py::init<>()); + py::class_>(m, "X0") + .def(py::init<>()); - py::class_>(m, "Uniform") - .def(py::init<>()); + py::class_>(m, "Uniform") + .def(py::init<>()); - py::class_>(m, "Zero") - .def(py::init<>()); + py::class_>(m, "Zero") + .def(py::init<>()); } -void define_repelling(py::module &main) +void define_repelling(py::module& main) { - using namespace repelling; - auto m = main.def_submodule("repelling"); - - py::class_(m, "TabooPoint") - .def(py::init(), py::arg("solution"), py::arg("radius")) - .def("rejects", &TabooPoint::rejects, py::arg("xi"), py::arg("p"), py::arg("attempts")) - .def("shares_basin", &TabooPoint::shares_basin, py::arg("objective"), py::arg("xi"), py::arg("p")) - .def("calculate_criticality", &TabooPoint::calculate_criticality, py::arg("p")) - .def_readwrite("radius", &TabooPoint::radius) - .def_readwrite("n_rep", &TabooPoint::n_rep) - .def_readwrite("solution", &TabooPoint::solution) - .def_readwrite("shrinkage", &TabooPoint::shrinkage) - .def_readwrite("criticality", &TabooPoint::criticality); - - py::class_>(m, "Repelling") - .def(py::init<>()) - .def("is_rejected", &Repelling::is_rejected, py::arg("xi"), py::arg("p")) - .def("update_archive", &Repelling::update_archive, py::arg("objective"), py::arg("p")) - .def("prepare_sampling", &Repelling::prepare_sampling, py::arg("p")) - .def_readwrite("archive", &Repelling::archive) - .def_readwrite("coverage", &Repelling::coverage) - .def_readwrite("attempts", &Repelling::attempts); - - py::class_>(m, "NoRepelling") - .def(py::init<>()); - - m.def("euclidian", &distance::euclidian, py::arg("u"), py::arg("v")); - m.def("manhattan", &distance::manhattan, py::arg("u"), py::arg("v")); - m.def("mahanolobis", &distance::mahanolobis, py::arg("u"), py::arg("v"), py::arg("C_inv")); - m.def("hill_valley_test", &distance::hill_valley_test, - py::arg("u"), py::arg("v"), py::arg("f"), py::arg("n_evals")); + using namespace repelling; + auto m = main.def_submodule("repelling"); + + py::class_(m, "TabooPoint") + .def(py::init(), py::arg("solution"), py::arg("radius")) + .def("rejects", &TabooPoint::rejects, py::arg("xi"), py::arg("p"), py::arg("attempts")) + .def("shares_basin", &TabooPoint::shares_basin, py::arg("objective"), py::arg("xi"), py::arg("p")) + .def("calculate_criticality", &TabooPoint::calculate_criticality, py::arg("p")) + .def_readwrite("radius", &TabooPoint::radius) + .def_readwrite("n_rep", &TabooPoint::n_rep) + .def_readwrite("solution", &TabooPoint::solution) + .def_readwrite("shrinkage", &TabooPoint::shrinkage) + .def_readwrite("criticality", &TabooPoint::criticality) + .def("__repr__", [] (TabooPoint& tb) { + return ""; + }); + + py::class_>(m, "Repelling") + .def(py::init<>()) + .def("is_rejected", &Repelling::is_rejected, py::arg("xi"), py::arg("p")) + .def("update_archive", &Repelling::update_archive, py::arg("objective"), py::arg("p")) + .def("prepare_sampling", &Repelling::prepare_sampling, py::arg("p")) + .def_readwrite("archive", &Repelling::archive) + .def_readwrite("coverage", &Repelling::coverage) + .def_readwrite("attempts", &Repelling::attempts) + ; + + py::class_>(m, "NoRepelling") + .def(py::init<>()); + + m.def("euclidian", &distance::euclidian, py::arg("u"), py::arg("v")); + m.def("manhattan", &distance::manhattan, py::arg("u"), py::arg("v")); + m.def("mahanolobis", &distance::mahanolobis, py::arg("u"), py::arg("v"), py::arg("C_inv")); + m.def("hill_valley_test", &distance::hill_valley_test, + py::arg("u"), py::arg("v"), py::arg("f"), py::arg("n_evals")); } -void define_matrix_adaptation(py::module &main) +void define_matrix_adaptation(py::module& main) { - using namespace matrix_adaptation; - auto m = main.def_submodule("matrix_adaptation"); - py::class_>(m, "Adaptation") - .def_readwrite("m", &Adaptation::m) - .def_readwrite("m_old", &Adaptation::m_old) - .def_readwrite("dm", &Adaptation::dm) - .def_readwrite("ps", &Adaptation::ps) - .def_readwrite("dd", &Adaptation::dd) - .def_readwrite("expected_length_z", &Adaptation::expected_length_z) - .def_readwrite("inv_C", &CovarianceAdaptation::inv_C) - .def("adapt_evolution_paths", &Adaptation::adapt_evolution_paths, - py::arg("pop"), - py::arg("weights"), - py::arg("mutation"), - py::arg("stats"), - py::arg("mu"), - py::arg("lamb")) - .def("adapt_matrix", &Adaptation::adapt_matrix, - py::arg("weights"), - py::arg("modules"), - py::arg("population"), - py::arg("mu"), - py::arg("settings"), - py::arg("stats")) - .def("restart", &Adaptation::restart, py::arg("settings")) - .def("compute_y", &Adaptation::compute_y, py::arg("zi")) - .def("invert_x", &Adaptation::invert_x, py::arg("xi"), py::arg("sigma")) - .def("invert_y", &Adaptation::invert_y, py::arg("yi")) - .def("__repr__", [](Adaptation &dyn) - { - std::stringstream ss; - ss << std::boolalpha; - ss << ""; - return ss.str(); }); - - py::class_>(m, "CovarianceAdaptation") - .def(py::init(), py::arg("dimension"), py::arg("x0"), py::arg("expected_length_z")) - .def_readwrite("pc", &CovarianceAdaptation::pc) - .def_readwrite("d", &CovarianceAdaptation::d) - .def_readwrite("B", &CovarianceAdaptation::B) - .def_readwrite("C", &CovarianceAdaptation::C) - .def_readwrite("inv_root_C", &CovarianceAdaptation::inv_root_C) - .def_readwrite("hs", &CovarianceAdaptation::hs) - .def("adapt_covariance_matrix", &CovarianceAdaptation::adapt_covariance_matrix, - py::arg("weights"), - py::arg("modules"), - py::arg("population"), - py::arg("mu")) - .def("perform_eigendecomposition", &CovarianceAdaptation::perform_eigendecomposition, py::arg("stats")) - .def("__repr__", [](CovarianceAdaptation &dyn) - { - std::stringstream ss; - ss << std::boolalpha; - ss << ""; - return ss.str(); }); - - py::class_>(m, "SeperableAdaptation") - .def(py::init(), py::arg("dimension"), py::arg("x0"), py::arg("expected_length_z")) - .def("__repr__", [](SeperableAdaptation &dyn) - { - std::stringstream ss; - ss << std::boolalpha; - ss << ""; - return ss.str(); }); - - py::class_>(m, "OnePlusOneAdaptation") - .def(py::init(), py::arg("dimension"), py::arg("x0"), py::arg("expected_length_z")) - .def("__repr__", [](SeperableAdaptation &dyn) - { - std::stringstream ss; - ss << std::boolalpha; - ss << ""; - return ss.str(); }); - - py::class_>(m, "MatrixAdaptation") - .def(py::init(), py::arg("dimension"), py::arg("x0"), py::arg("expected_length_z")) - .def_readwrite("M", &MatrixAdaptation::M) - .def_readwrite("M_inv", &MatrixAdaptation::M_inv) - .def("__repr__", [](MatrixAdaptation &dyn) - { - std::stringstream ss; - ss << std::boolalpha; - ss << ""; - return ss.str(); }); - - py::class_>(m, "NoAdaptation") - .def(py::init(), py::arg("dimension"), py::arg("x0"), py::arg("expected_length_z")) - .def("__repr__", [](None &dyn) - { - std::stringstream ss; - ss << std::boolalpha; - ss << ""; - return ss.str(); }); + using namespace matrix_adaptation; + auto m = main.def_submodule("matrix_adaptation"); + py::class_>(m, "Adaptation") + .def_readwrite("m", &Adaptation::m) + .def_readwrite("m_old", &Adaptation::m_old) + .def_readwrite("dm", &Adaptation::dm) + .def_readwrite("ps", &Adaptation::ps) + .def_readwrite("dz", &Adaptation::dz) + .def_readwrite("dd", &Adaptation::dd) + .def_readwrite("expected_length_z", &Adaptation::expected_length_z) + .def("adapt_evolution_paths", &Adaptation::adapt_evolution_paths, + py::arg("pop"), + py::arg("weights"), + py::arg("stats"), + py::arg("settings"), + py::arg("mu"), + py::arg("lamb")) + .def("adapt_evolution_paths_innner", &Adaptation::adapt_evolution_paths_inner, + py::arg("pop"), + py::arg("weights"), + py::arg("stats"), + py::arg("settings"), + py::arg("mu"), + py::arg("lamb")) + .def("adapt_matrix", &Adaptation::adapt_matrix, + py::arg("weights"), + py::arg("modules"), + py::arg("population"), + py::arg("mu"), + py::arg("settings"), + py::arg("stats")) + .def("restart", &Adaptation::restart, py::arg("settings"), py::arg("sigma")) + .def("distance", &Adaptation::distance, py::arg("u"), py::arg("v")) + .def("distance_from_center", &Adaptation::distance_from_center, py::arg("x")) + .def("compute_y", &Adaptation::compute_y, py::arg("zi")) + .def("invert_x", &Adaptation::invert_x, py::arg("xi"), py::arg("sigma")) + .def("invert_y", &Adaptation::invert_y, py::arg("yi")) + .def("__repr__", [] (Adaptation& dyn) + { + std::stringstream ss; + ss << std::boolalpha; + ss << ""; + return ss.str(); }); + + + py::class_>(m, "NoAdaptation") + .def(py::init(), py::arg("dimension"), py::arg("x0"), py::arg("expected_length_z")) + .def("__repr__", [] (None& dyn) + { + std::stringstream ss; + ss << std::boolalpha; + ss << ""; + return ss.str(); }); + + + py::class_>(m, "CovarianceAdaptation") + .def(py::init(), py::arg("dimension"), py::arg("x0"), py::arg("expected_length_z")) + .def_readwrite("pc", &CovarianceAdaptation::pc) + .def_readwrite("d", &CovarianceAdaptation::d) + .def_readwrite("B", &CovarianceAdaptation::B) + .def_readwrite("C", &CovarianceAdaptation::C) + .def_readwrite("A", &CovarianceAdaptation::A) + .def_readwrite("inv_root_C", &CovarianceAdaptation::inv_root_C) + .def_readwrite("hs", &CovarianceAdaptation::hs) + .def("adapt_covariance_matrix", &CovarianceAdaptation::adapt_covariance_matrix, + py::arg("weights"), + py::arg("modules"), + py::arg("population"), + py::arg("mu")) + .def("perform_eigendecomposition", &CovarianceAdaptation::perform_eigendecomposition, py::arg("stats")) + .def("adapt_ps", &CovarianceAdaptation::adapt_ps, py::arg("weights")) + .def("__repr__", [] (CovarianceAdaptation& dyn) + { + std::stringstream ss; + ss << std::boolalpha; + ss << ""; + return ss.str(); }); + + py::class_>(m, "SeparableAdaptation") + .def(py::init(), py::arg("dimension"), py::arg("x0"), py::arg("expected_length_z")) + .def_readwrite("c", &SeparableAdaptation::c) + .def_readwrite("pc", &SeparableAdaptation::pc) + .def_readwrite("d", &SeparableAdaptation::d) + .def("__repr__", [] (SeparableAdaptation& dyn) + { + std::stringstream ss; + ss << std::boolalpha; + ss << ""; + return ss.str(); }); + + py::class_>(m, "MatrixAdaptation") + .def(py::init(), py::arg("dimension"), py::arg("x0"), py::arg("expected_length_z")) + .def_readwrite("M", &MatrixAdaptation::M) + .def_readwrite("M_inv", &MatrixAdaptation::M_inv) + .def("__repr__", [] (MatrixAdaptation& dyn) + { + std::stringstream ss; + ss << std::boolalpha; + ss << ""; + return ss.str(); }); + + py::class_>(m, "CholeskyAdaptation") + .def(py::init(), py::arg("dimension"), py::arg("x0"), py::arg("expected_length_z")) + .def_readwrite("A", &CholeskyAdaptation::A) + .def_readwrite("pc", &CholeskyAdaptation::pc); + + py::class_>(m, "SelfAdaptation") + .def(py::init(), py::arg("dimension"), py::arg("x0"), py::arg("expected_length_z")) + .def_readwrite("A", &SelfAdaptation::A) + .def_readwrite("C", &SelfAdaptation::C); + + py::class_>(m, "CovarianceNoEigvAdaptation") + ; + + py::class_>(m, "NaturalGradientAdaptation") + .def(py::init(), py::arg("dimension"), py::arg("x0"), py::arg("expected_length_z"), py::arg("sigma")) + .def_readwrite("A", &NaturalGradientAdaptation::A) + .def_readwrite("A_inv", &NaturalGradientAdaptation::A_inv) + .def_readwrite("G", &NaturalGradientAdaptation::G) + .def_readwrite("sigma_g", &NaturalGradientAdaptation::sigma_g) + .def("compute_gradients", &NaturalGradientAdaptation::compute_gradients, py::arg("pop"), + py::arg("weights"), + py::arg("stats"), + py::arg("settings"), + py::arg("mu"), + py::arg("lamb") + ) + ; } -void define_parameters(py::module &main) +void define_parameters(py::module& main) { - auto m = main.def_submodule("parameters"); - using namespace parameters; - - py::class_(m, "Modules") - .def(py::init<>()) - .def_readwrite("elitist", &Modules::elitist) - .def_readwrite("active", &Modules::active) - .def_readwrite("orthogonal", &Modules::orthogonal) - .def_readwrite("sequential_selection", &Modules::sequential_selection) - .def_readwrite("threshold_convergence", &Modules::threshold_convergence) - .def_readwrite("sample_sigma", &Modules::sample_sigma) - .def_readwrite("weights", &Modules::weights) - .def_readwrite("sampler", &Modules::sampler) - .def_readwrite("mirrored", &Modules::mirrored) - .def_readwrite("ssa", &Modules::ssa) - .def_readwrite("bound_correction", &Modules::bound_correction) - .def_readwrite("restart_strategy", &Modules::restart_strategy) - .def_readwrite("repelling_restart", &Modules::repelling_restart) - .def_readwrite("matrix_adaptation", &Modules::matrix_adaptation) - .def_readwrite("center_placement", &Modules::center_placement) - .def_readwrite("sample_transformation", &Modules::sample_transformation) - .def("__repr__", [](Modules &mod) - { return to_string(mod); }); - - py::class_(m, "Solution") - .def(py::init<>()) - .def_readwrite("x", &Solution::x) - .def_readwrite("y", &Solution::y) - .def_readwrite("t", &Solution::t) - .def_readwrite("e", &Solution::e) - .def("__repr__", &Solution::repr); - - py::class_(m, "Stats") - .def(py::init<>()) - .def_readwrite("t", &Stats::t) - .def_readwrite("evaluations", &Stats::evaluations) - .def_readwrite("current_avg", &Stats::current_avg) - .def_readwrite("solutions", &Stats::solutions) - .def_readwrite("centers", &Stats::centers) - .def_readwrite("current_best", &Stats::current_best) - .def_readwrite("global_best", &Stats::global_best) - .def_readwrite("has_improved", &Stats::has_improved) - .def_readwrite("success_ratio", &Stats::success_ratio) - .def("__repr__", [](Stats &stats) - { - std::stringstream ss; - ss << std::boolalpha; - ss << ""; - return ss.str(); }); - - py::class_(m, "Weights") - .def( - py::init(), - py::arg("dimension"), - py::arg("mu0"), - py::arg("lambda0"), - py::arg("modules")) - .def_readwrite("mueff", &Weights::mueff) - .def_readwrite("mueff_neg", &Weights::mueff_neg) - .def_readwrite("c1", &Weights::c1) - .def_readwrite("cmu", &Weights::cmu) - .def_readwrite("cc", &Weights::cc) - .def_readwrite("weights", &Weights::weights) - .def_readwrite("positive", &Weights::positive) - .def_readwrite("negative", &Weights::negative) - .def("__repr__", [](Weights &weights) - { - std::stringstream ss; - ss << std::boolalpha; - ss << ""; - return ss.str(); }); - - py::class_>(m, "Settings") - .def(py::init, std::optional, size_to, size_to, std::optional, - std::optional, std::optional, std::optional, - std::optional, std::optional, - std::optional, std::optional, std::optional, - std::optional, bool>(), - py::arg("dim"), - py::arg("modules") = std::nullopt, - py::arg("target") = std::nullopt, - py::arg("max_generations") = std::nullopt, - py::arg("budget") = std::nullopt, - py::arg("sigma0") = std::nullopt, - py::arg("lambda0") = std::nullopt, - py::arg("mu0") = std::nullopt, - py::arg("x0") = std::nullopt, - py::arg("lb") = std::nullopt, - py::arg("ub") = std::nullopt, - py::arg("cs") = std::nullopt, - py::arg("cc") = std::nullopt, - py::arg("cmu") = std::nullopt, - py::arg("c1") = std::nullopt, - py::arg("verbose") = false) - .def_readonly("dim", &Settings::dim) - .def_readonly("modules", &Settings::modules) - .def_readwrite("target", &Settings::target) - .def_readwrite("max_generations", &Settings::max_generations) - .def_readwrite("budget", &Settings::budget) - .def_readwrite("sigma0", &Settings::sigma0) - .def_readwrite("lambda0", &Settings::lambda0) - .def_readwrite("mu0", &Settings::mu0) - .def_readwrite("x0", &Settings::x0) - .def_readwrite("lb", &Settings::lb) - .def_readwrite("ub", &Settings::ub) - .def_readwrite("cs", &Settings::cs) - .def_readwrite("cc", &Settings::cc) - .def_readwrite("cmu", &Settings::cmu) - .def_readwrite("c1", &Settings::c1) - .def_readwrite("verbose", &Settings::verbose) - .def_readonly("volume", &Settings::volume) - .def("__repr__", [](Settings &settings) - { - std::stringstream ss; - ss << std::boolalpha; - ss << ""; - return ss.str(); }); - - ; - - using AdaptationType = std::variant< - std::shared_ptr, - std::shared_ptr, - std::shared_ptr, - std::shared_ptr, - std::shared_ptr>; - - py::class_>(main, "Parameters") - .def(py::init(), py::arg("dimension")) - .def(py::init(), py::arg("settings")) - .def("adapt", &Parameters::adapt) - .def("start", &Parameters::start, py::arg("objective")) - .def("perform_restart", &Parameters::perform_restart, py::arg("objective"), - py::arg("sigma") = std::nullopt) - .def_readwrite("settings", &Parameters::settings) - .def_readwrite("mu", &Parameters::mu) - .def_readwrite("lamb", &Parameters::lambda) - .def_property( - "adaptation", - [](Parameters &self) -> AdaptationType - { - switch (self.settings.modules.matrix_adaptation) - { - case MatrixAdaptationType::MATRIX: - return std::dynamic_pointer_cast(self.adaptation); - case MatrixAdaptationType::NONE: - return std::dynamic_pointer_cast(self.adaptation); - case MatrixAdaptationType::SEPERABLE: - return std::dynamic_pointer_cast(self.adaptation); - case MatrixAdaptationType::ONEPLUSONE: - return std::dynamic_pointer_cast(self.adaptation); - default: - case MatrixAdaptationType::COVARIANCE: - return std::dynamic_pointer_cast(self.adaptation); - } - }, - [](Parameters &self, std::shared_ptr adaptation) - { - self.adaptation = adaptation; - }) - .def_readwrite("criteria", &Parameters::criteria) - .def_readwrite("stats", &Parameters::stats) - .def_readwrite("weights", &Parameters::weights) - .def_readwrite("pop", &Parameters::pop) - .def_readwrite("old_pop", &Parameters::old_pop) - .def_readwrite("sampler", &Parameters::sampler) - .def_readwrite("mutation", &Parameters::mutation) - .def_readwrite("selection", &Parameters::selection) - .def_readwrite("restart_strategy", &Parameters::restart_strategy) - .def_readwrite("repelling", &Parameters::repelling) - .def_readwrite("bounds", &Parameters::bounds) - .def_readwrite("center_placement", &Parameters::center_placement); + auto m = main.def_submodule("parameters"); + using namespace parameters; + + py::class_(m, "Modules") + .def(py::init<>()) + .def_readwrite("elitist", &Modules::elitist) + .def_readwrite("active", &Modules::active) + .def_readwrite("orthogonal", &Modules::orthogonal) + .def_readwrite("sequential_selection", &Modules::sequential_selection) + .def_readwrite("threshold_convergence", &Modules::threshold_convergence) + .def_readwrite("sample_sigma", &Modules::sample_sigma) + .def_readwrite("weights", &Modules::weights) + .def_readwrite("sampler", &Modules::sampler) + .def_readwrite("mirrored", &Modules::mirrored) + .def_readwrite("ssa", &Modules::ssa) + .def_readwrite("bound_correction", &Modules::bound_correction) + .def_readwrite("restart_strategy", &Modules::restart_strategy) + .def_readwrite("repelling_restart", &Modules::repelling_restart) + .def_readwrite("matrix_adaptation", &Modules::matrix_adaptation) + .def_readwrite("center_placement", &Modules::center_placement) + .def_readwrite("sample_transformation", &Modules::sample_transformation) + .def("__repr__", [] (Modules& mod) + { return to_string(mod); }); + + py::class_(m, "Solution") + .def(py::init<>()) + .def_readwrite("x", &Solution::x) + .def_readwrite("y", &Solution::y) + .def_readwrite("t", &Solution::t) + .def_readwrite("e", &Solution::e) + .def("__repr__", &Solution::repr); + + py::class_(m, "Stats") + .def(py::init<>()) + .def_readwrite("t", &Stats::t) + .def_readwrite("evaluations", &Stats::evaluations) + .def_readwrite("current_avg", &Stats::current_avg) + .def_readwrite("solutions", &Stats::solutions) + .def_readwrite("centers", &Stats::centers) + .def_readwrite("current_best", &Stats::current_best) + .def_readwrite("global_best", &Stats::global_best) + .def_readwrite("has_improved", &Stats::has_improved) + .def_readwrite("success_ratio", &Stats::success_ratio) + .def_readwrite("last_update", &Stats::last_update) + .def_readwrite("n_updates", &Stats::n_updates) + .def("__repr__", [] (Stats& stats) + { + std::stringstream ss; + ss << std::boolalpha; + ss << ""; + return ss.str(); }); + + py::class_(m, "Weights") + .def( + py::init(), + py::arg("dimension"), + py::arg("mu0"), + py::arg("lambda0"), + py::arg("modules"), + py::arg("expected_length_z") + ) + .def_readwrite("mueff", &Weights::mueff) + .def_readwrite("mueff_neg", &Weights::mueff_neg) + .def_readwrite("c1", &Weights::c1) + .def_readwrite("cmu", &Weights::cmu) + .def_readwrite("cc", &Weights::cc) + .def_readwrite("cs", &Weights::cs) + .def_readwrite("damps", &Weights::damps) + .def_readwrite("sqrt_cc_mueff", &Weights::sqrt_cc_mueff) + .def_readwrite("sqrt_cs_mueff", &Weights::sqrt_cs_mueff) + .def_readwrite("lazy_update_interval", &Weights::lazy_update_interval) + .def_readwrite("expected_length_z", &Weights::expected_length_z) + .def_readwrite("expected_length_ps", &Weights::expected_length_ps) + .def_readwrite("beta", &Weights::beta) + .def_readwrite("weights", &Weights::weights) + .def_readwrite("positive", &Weights::positive) + .def_readwrite("negative", &Weights::negative) + .def("__repr__", [] (Weights& weights) + { + std::stringstream ss; + ss << std::boolalpha; + ss << ""; + return ss.str(); }); + + py::class_>(m, "Settings") + .def(py::init, std::optional, size_to, size_to, std::optional, + std::optional, std::optional, std::optional, + std::optional, std::optional, + std::optional, std::optional, std::optional, + std::optional, std::optional, std::optional, + bool, bool>(), + py::arg("dim"), + py::arg("modules") = std::nullopt, + py::arg("target") = std::nullopt, + py::arg("max_generations") = std::nullopt, + py::arg("budget") = std::nullopt, + py::arg("sigma0") = std::nullopt, + py::arg("lambda0") = std::nullopt, + py::arg("mu0") = std::nullopt, + py::arg("x0") = std::nullopt, + py::arg("lb") = std::nullopt, + py::arg("ub") = std::nullopt, + py::arg("cs") = std::nullopt, + py::arg("cc") = std::nullopt, + py::arg("cmu") = std::nullopt, + py::arg("c1") = std::nullopt, + py::arg("damps") = std::nullopt, + py::arg("acov") = std::nullopt, + py::arg("verbose") = false, + py::arg("always_compute_eigv") = false + + ) + .def_readonly("dim", &Settings::dim) + .def_readonly("modules", &Settings::modules) + .def_readwrite("target", &Settings::target) + .def_readwrite("max_generations", &Settings::max_generations) + .def_readwrite("budget", &Settings::budget) + .def_readwrite("sigma0", &Settings::sigma0) + .def_readwrite("lambda0", &Settings::lambda0) + .def_readwrite("mu0", &Settings::mu0) + .def_readwrite("x0", &Settings::x0) + .def_readwrite("lb", &Settings::lb) + .def_readwrite("ub", &Settings::ub) + .def_readwrite("cs", &Settings::cs) + .def_readwrite("cc", &Settings::cc) + .def_readwrite("cmu", &Settings::cmu) + .def_readwrite("c1", &Settings::c1) + .def_readwrite("damps", &Settings::damps) + .def_readwrite("acov", &Settings::acov) + .def_readwrite("verbose", &Settings::verbose) + .def_readonly("volume", &Settings::volume) + .def_readonly("one_plus_one", &Settings::one_plus_one) + .def("__repr__", [] (Settings& settings) + { + std::stringstream ss; + ss << std::boolalpha; + ss << ""; + return ss.str(); }); + + ; + + using AdaptationType = std::variant< + std::shared_ptr, + std::shared_ptr, + std::shared_ptr, + std::shared_ptr, + std::shared_ptr, + std::shared_ptr, + std::shared_ptr, + std::shared_ptr + >; + + py::class_>(main, "Parameters") + .def(py::init(), py::arg("dimension")) + .def(py::init(), py::arg("settings")) + .def("adapt", &Parameters::adapt) + .def("start", &Parameters::start, py::arg("objective")) + .def("perform_restart", &Parameters::perform_restart, py::arg("objective"), + py::arg("sigma") = std::nullopt) + .def_readwrite("settings", &Parameters::settings) + .def_readwrite("mu", &Parameters::mu) + .def_readwrite("lamb", &Parameters::lambda) + .def_property( + "adaptation", + [] (Parameters& self) -> AdaptationType + { + switch (self.settings.modules.matrix_adaptation) + { + case MatrixAdaptationType::MATRIX: + return std::dynamic_pointer_cast(self.adaptation); + case MatrixAdaptationType::NONE: + return std::dynamic_pointer_cast(self.adaptation); + case MatrixAdaptationType::SEPARABLE: + return std::dynamic_pointer_cast(self.adaptation); + case MatrixAdaptationType::CHOLESKY: + return std::dynamic_pointer_cast(self.adaptation); + case MatrixAdaptationType::CMSA: + return std::dynamic_pointer_cast(self.adaptation); + case MatrixAdaptationType::COVARIANCE_NO_EIGV: + return std::dynamic_pointer_cast(self.adaptation); + case MatrixAdaptationType::NATURAL_GRADIENT: + return std::dynamic_pointer_cast(self.adaptation); + default: + case MatrixAdaptationType::COVARIANCE: + return std::dynamic_pointer_cast(self.adaptation); + } + }, + [] (Parameters& self, std::shared_ptr adaptation) + { + self.adaptation = adaptation; + }) + .def_readwrite("criteria", &Parameters::criteria) + .def_readwrite("stats", &Parameters::stats) + .def_readwrite("weights", &Parameters::weights) + .def_readwrite("pop", &Parameters::pop) + .def_readwrite("old_pop", &Parameters::old_pop) + .def_readwrite("sampler", &Parameters::sampler) + .def_readwrite("mutation", &Parameters::mutation) + .def_readwrite("selection", &Parameters::selection) + .def_readwrite("restart_strategy", &Parameters::restart_strategy) + .def_readwrite("repelling", &Parameters::repelling) + .def_readwrite("bounds", &Parameters::bounds) + .def_readwrite("center_placement", &Parameters::center_placement); } -void define_bounds(py::module &main) +void define_bounds(py::module& main) { - auto m = main.def_submodule("bounds"); - using namespace bounds; - - py::class_>(m, "BoundCorrection") - .def_readwrite("lb", &BoundCorrection::lb) - .def_readwrite("ub", &BoundCorrection::ub) - .def_readwrite("db", &BoundCorrection::db) - .def_readwrite("diameter", &BoundCorrection::diameter) - .def_readonly("n_out_of_bounds", &BoundCorrection::n_out_of_bounds) - .def("correct", &BoundCorrection::correct, - py::arg("population"), py::arg("m")); - - py::class_>(m, "Resample") - .def(py::init(), py::arg("lb"), py::arg("ub")); - - py::class_>(m, "NoCorrection") - .def(py::init(), py::arg("lb"), py::arg("ub")); - - py::class_>(m, "COTN") - .def(py::init(), py::arg("lb"), py::arg("ub")) - .def_readonly("sampler", &COTN::sampler); - - py::class_>(m, "Mirror") - .def(py::init(), py::arg("lb"), py::arg("ub")); - - py::class_>(m, "UniformResample") - .def(py::init(), py::arg("lb"), py::arg("ub")); - - py::class_>(m, "Saturate") - .def(py::init(), py::arg("lb"), py::arg("ub")); - - py::class_>(m, "Toroidal") - .def(py::init(), py::arg("lb"), py::arg("ub")); + auto m = main.def_submodule("bounds"); + using namespace bounds; + + py::class_>(m, "BoundCorrection") + .def_readwrite("lb", &BoundCorrection::lb) + .def_readwrite("ub", &BoundCorrection::ub) + .def_readwrite("db", &BoundCorrection::db) + .def_readwrite("diameter", &BoundCorrection::diameter) + .def_readwrite("has_bounds", &BoundCorrection::has_bounds) + .def_readonly("n_out_of_bounds", &BoundCorrection::n_out_of_bounds) + .def("correct", &BoundCorrection::correct, + py::arg("population"), py::arg("m")) + .def("delta_out_of_bounds", &BoundCorrection::delta_out_of_bounds, py::arg("xi"), py::arg("oob")) + .def("is_out_of_bounds", &BoundCorrection::is_out_of_bounds, py::arg("xi")) + ; + + py::class_>(m, "Resample") + .def(py::init(), py::arg("lb"), py::arg("ub")); + + py::class_>(m, "NoCorrection") + .def(py::init(), py::arg("lb"), py::arg("ub")); + + py::class_>(m, "COTN") + .def(py::init(), py::arg("lb"), py::arg("ub")) + .def_readonly("sampler", &COTN::sampler); + + py::class_>(m, "Mirror") + .def(py::init(), py::arg("lb"), py::arg("ub")); + + py::class_>(m, "UniformResample") + .def(py::init(), py::arg("lb"), py::arg("ub")); + + py::class_>(m, "Saturate") + .def(py::init(), py::arg("lb"), py::arg("ub")); + + py::class_>(m, "Toroidal") + .def(py::init(), py::arg("lb"), py::arg("ub")); } -void define_mutation(py::module &main) +void define_mutation(py::module& main) { - auto m = main.def_submodule("mutation"); - using namespace mutation; - - py::class_>(m, "ThresholdConvergence") - .def(py::init<>()) - .def_readwrite("init_threshold", &ThresholdConvergence::init_threshold) - .def_readwrite("decay_factor", &ThresholdConvergence::decay_factor) - .def("scale", &ThresholdConvergence::scale, py::arg("population"), py::arg("diameter"), py::arg("budget"), py::arg("evaluations")); - - py::class_>(m, "NoThresholdConvergence") - .def(py::init<>()); - - py::class_>(m, "SequentialSelection") - .def(py::init(), - py::arg("mirror"), - py::arg("mu"), - py::arg("seq_cuttoff_factor") = 1.0) - .def("break_conditions", &SequentialSelection::break_conditions, - py::arg("i"), - py::arg("f"), - py::arg("fopt"), - py::arg("mirror")); - - py::class_>(m, "NoSequentialSelection") - .def(py::init(), - py::arg("mirror"), - py::arg("mu"), - py::arg("seq_cuttoff_factor") = 1.0); - - py::class_>(m, "SigmaSampler") - .def(py::init(), py::arg("dimension")) - .def_readwrite("beta", &SigmaSampler::beta) - .def("sample", &SigmaSampler::sample, py::arg("sigma"), py::arg("population")); - - py::class_>(m, "NoSigmaSampler") - .def(py::init(), py::arg("dimension")); - - py::class_>(m, "Strategy") - .def("adapt", &Strategy::adapt, py::arg("weights"), - py::arg("dynamic"), - py::arg("population"), - py::arg("old_population"), - py::arg("stats"), - py::arg("lamb")) - .def_readwrite("threshold_convergence", &Strategy::tc) - .def_readwrite("sequential_selection", &Strategy::sq) - .def_readwrite("sigma_sampler", &Strategy::ss) - .def_readwrite("cs", &Strategy::cs) - .def_readwrite("sigma", &Strategy::sigma) - .def_readwrite("s", &Strategy::s); - - py::class_>(m, "CSA") - .def( - py::init, std::shared_ptr, std::shared_ptr, Float, Float, Float, Float>(), - py::arg("threshold_convergence"), - py::arg("sequential_selection"), - py::arg("sigma_sampler"), - py::arg("cs"), - py::arg("damps"), - py::arg("sigma0"), - py::arg("expected_length_z")) - .def_readwrite("damps", &CSA::damps) - .def_readwrite("expected_length_z", &CSA::expected_length_z) - .def( - "mutate", &CSA::mutate, py::arg("objective"), - py::arg("n_offspring"), - py::arg("parameters")); - - py::class_>(m, "TPA") - .def(py::init, std::shared_ptr, std::shared_ptr, Float, Float, Float, Float>(), - py::arg("threshold_convergence"), - py::arg("sequential_selection"), - py::arg("sigma_sampler"), - py::arg("cs"), - py::arg("damps"), - py::arg("sigma0"), - py::arg("expected_length_z")) - .def_readwrite("a_tpa", &TPA::a_tpa) - .def_readwrite("b_tpa", &TPA::b_tpa) - .def_readwrite("rank_tpa", &TPA::rank_tpa); - - py::class_>(m, "MSR") - .def(py::init, std::shared_ptr, std::shared_ptr, Float, Float, Float, Float>(), - py::arg("threshold_convergence"), - py::arg("sequential_selection"), - py::arg("sigma_sampler"), - py::arg("cs"), - py::arg("damps"), - py::arg("sigma0"), - py::arg("expected_length_z")); - - py::class_>(m, "PSR") - .def(py::init, std::shared_ptr, std::shared_ptr, Float, Float, Float, Float>(), - py::arg("threshold_convergence"), - py::arg("sequential_selection"), - py::arg("sigma_sampler"), - py::arg("cs"), - py::arg("damps"), - py::arg("sigma0"), - py::arg("expected_length_z")) - .def_readwrite("success_ratio", &PSR::success_ratio); - - py::class_>(m, "XNES") - .def(py::init, std::shared_ptr, std::shared_ptr, Float, Float, Float, Float>(), - py::arg("threshold_convergence"), - py::arg("sequential_selection"), - py::arg("sigma_sampler"), - py::arg("cs"), - py::arg("damps"), - py::arg("sigma0"), - py::arg("expected_length_z")); - - py::class_>(m, "MXNES") - .def(py::init, std::shared_ptr, std::shared_ptr, Float, Float, Float, Float>(), - py::arg("threshold_convergence"), - py::arg("sequential_selection"), - py::arg("sigma_sampler"), - py::arg("cs"), - py::arg("damps"), - py::arg("sigma0"), - py::arg("expected_length_z")); - - py::class_>(m, "LPXNES") - .def(py::init, std::shared_ptr, std::shared_ptr, Float, Float, Float, Float>(), - py::arg("threshold_convergence"), - py::arg("sequential_selection"), - py::arg("sigma_sampler"), - py::arg("cs"), - py::arg("damps"), - py::arg("sigma0"), - py::arg("expected_length_z")); - - py::class_>(m, "SR") - .def(py::init, std::shared_ptr, std::shared_ptr, Float, Float, Float, Float>(), - py::arg("threshold_convergence"), - py::arg("sequential_selection"), - py::arg("sigma_sampler"), - py::arg("cs"), - py::arg("damps"), - py::arg("sigma0"), - py::arg("expected_length_z")) - // .def_staticreadwrite("tgt_success_ratio", &SR::tgt_success_ratio) - ; + auto m = main.def_submodule("mutation"); + using namespace mutation; + + py::class_>(m, "ThresholdConvergence") + .def(py::init<>()) + .def_readwrite("init_threshold", &ThresholdConvergence::init_threshold) + .def_readwrite("decay_factor", &ThresholdConvergence::decay_factor) + .def("scale", &ThresholdConvergence::scale, py::arg("population"), py::arg("diameter"), py::arg("budget"), py::arg("evaluations")); + + py::class_>(m, "NoThresholdConvergence") + .def(py::init<>()); + + py::class_>(m, "SequentialSelection") + .def(py::init(), + py::arg("mirror"), + py::arg("mu"), + py::arg("seq_cuttoff_factor") = 1.0) + .def("break_conditions", &SequentialSelection::break_conditions, + py::arg("i"), + py::arg("f"), + py::arg("fopt"), + py::arg("mirror")); + + py::class_>(m, "NoSequentialSelection") + .def(py::init(), + py::arg("mirror"), + py::arg("mu"), + py::arg("seq_cuttoff_factor") = 1.0); + + py::class_>(m, "SigmaSampler") + .def(py::init(), py::arg("dimension")) + .def("sample", &SigmaSampler::sample, py::arg("sigma"), py::arg("population"), py::arg("tau")); + + py::class_>(m, "NoSigmaSampler") + .def(py::init(), py::arg("dimension")); + + py::class_>(m, "Strategy") + .def( + py::init< + std::shared_ptr, + std::shared_ptr, + std::shared_ptr, + Float + >(), + py::arg("threshold_convergence"), + py::arg("sequential_selection"), + py::arg("sigma_sampler"), + py::arg("sigma0")) + .def("adapt", &Strategy::adapt, py::arg("weights"), + py::arg("dynamic"), + py::arg("population"), + py::arg("old_population"), + py::arg("stats"), + py::arg("lamb")) + .def( + "mutate", &CSA::mutate, py::arg("objective"), + py::arg("n_offspring"), + py::arg("parameters")) + .def_readwrite("threshold_convergence", &Strategy::tc) + .def_readwrite("sequential_selection", &Strategy::sq) + .def_readwrite("sigma_sampler", &Strategy::ss) + .def_readwrite("sigma", &Strategy::sigma) + .def_readwrite("s", &Strategy::s) + ; + + py::class_>(m, "CSA"); + py::class_>(m, "TPA") + .def_readwrite("a_tpa", &TPA::a_tpa) + .def_readwrite("b_tpa", &TPA::b_tpa) + .def_readwrite("rank_tpa", &TPA::rank_tpa); + + py::class_>(m, "MSR"); + py::class_>(m, "PSR") + .def_readwrite("success_ratio", &PSR::success_ratio); + + py::class_>(m, "XNES"); + py::class_>(m, "MXNES"); + py::class_>(m, "LPXNES"); + py::class_>(m, "SR"); + py::class_>(m, "SA"); + + } -void define_population(py::module &main) +void define_population(py::module& main) { - py::class_(main, "Population") - .def(py::init(), py::arg("dimension"), py::arg("n")) - .def(py::init(), py::arg("X"), py::arg("Z"), py::arg("Y"), py::arg("f"), py::arg("s")) - .def("sort", &Population::sort) - .def("resize_cols", &Population::resize_cols, py::arg("size")) - .def("keep_only", &Population::keep_only, py::arg("idx")) - .def_property_readonly("n_finite", &Population::n_finite) - .def("__add__", &Population::operator+=, py::arg("other")) - .def_readwrite("X", &Population::X) - .def_readwrite("Z", &Population::Z) - .def_readwrite("Y", &Population::Y) - .def_readwrite("f", &Population::f) - .def_readwrite("s", &Population::s) - .def_readwrite("d", &Population::d) - .def_readwrite("n", &Population::n); + py::class_(main, "Population") + .def(py::init(), py::arg("dimension"), py::arg("n")) + .def(py::init(), py::arg("X"), py::arg("Z"), py::arg("Y"), py::arg("f"), py::arg("s")) + .def("sort", &Population::sort) + .def("resize_cols", &Population::resize_cols, py::arg("size")) + .def("keep_only", &Population::keep_only, py::arg("idx")) + .def_property_readonly("n_finite", &Population::n_finite) + .def("__add__", &Population::operator+=, py::arg("other")) + .def_readwrite("X", &Population::X) + .def_readwrite("Z", &Population::Z) + .def_readwrite("Y", &Population::Y) + .def_readwrite("f", &Population::f) + .def_readwrite("s", &Population::s) + .def_readwrite("d", &Population::d) + .def_readwrite("n", &Population::n) + .def_readwrite("t", &Population::t); } class constants_w -{ -}; +{}; -void define_constants(py::module &m) +void define_constants(py::module& m) { - py::class_(m, "constants") - - .def_property_static( - "cache_max_doubles", - [](py::object) - { return constants::cache_max_doubles; }, - [](py::object, size_t a) - { constants::cache_max_doubles = a; }) - .def_property_static( - "cache_min_samples", - [](py::object) - { return constants::cache_min_samples; }, - [](py::object, size_t a) - { constants::cache_min_samples = a; }) - .def_property_static( - "cache_samples", - [](py::object) - { return constants::cache_samples; }, - [](py::object, bool a) - { constants::cache_samples = a; }) - .def_property_static( - "clip_sigma", - [](py::object) - { return constants::clip_sigma; }, - [](py::object, bool a) - { constants::clip_sigma = a; }) - ; + py::class_(m, "constants") + .def_property_static( + "cache_max_doubles", + [] (py::object) + { return constants::cache_max_doubles; }, + [] (py::object, size_t a) + { constants::cache_max_doubles = a; }) + .def_property_static( + "cache_min_samples", + [] (py::object) + { return constants::cache_min_samples; }, + [] (py::object, size_t a) + { constants::cache_min_samples = a; }) + .def_property_static( + "cache_samples", + [] (py::object) + { return constants::cache_samples; }, + [] (py::object, bool a) + { constants::cache_samples = a; }) + .def_property_static( + "clip_sigma", + [] (py::object) + { return constants::clip_sigma; }, + [] (py::object, bool a) + { constants::clip_sigma = a; }) + .def_property_static( + "use_box_muller", + [] (py::object) + { return constants::use_box_muller; }, + [] (py::object, bool a) + { constants::use_box_muller = a; }) + ; } -struct PyCriterion: restart::Criterion +struct PyCriterion : restart::Criterion { - PyCriterion(const std::string& name): restart::Criterion(name) {} + PyCriterion(const std::string& name) : restart::Criterion(name) {} - void update(const parameters::Parameters &p) override - { - PYBIND11_OVERRIDE_PURE(void, restart::Criterion, update, p); - } + void update(const parameters::Parameters& p) override + { + PYBIND11_OVERRIDE_PURE(void, restart::Criterion, update, p); + } - void on_reset(const parameters::Parameters &p) override - { - PYBIND11_OVERRIDE(void, restart::Criterion, on_reset, p); - } + void on_reset(const parameters::Parameters& p) override + { + PYBIND11_OVERRIDE(void, restart::Criterion, on_reset, p); + } }; -void define_restart_criteria(py::module &main) +void define_restart_criteria(py::module& main) { - auto m = main.def_submodule("restart"); - using namespace restart; - - py::class_>(m, "Criterion") - .def(py::init(), py::arg("name")) - .def("on_reset", &Criterion::on_reset, py::arg("parameters")) - .def("update", &Criterion::update, py::arg("parameters")) - .def("reset", &Criterion::reset, py::arg("parameters")) - .def_readwrite("met", &Criterion::met) - .def_readwrite("name", &Criterion::name) - .def_readwrite("last_restart", &Criterion::last_restart) - .def("__repr__", [](Criterion &self) - { return "<" + self.name + " met: " + std::to_string(self.met) + ">"; }); - ; - - py::class_>(m, "ExceededMaxIter") - .def(py::init<>()) - .def_readwrite("max_iter", &ExceededMaxIter::max_iter); - - py::class_>(m, "NoImprovement") - .def(py::init<>()) - .def_readwrite("n_bin", &NoImprovement::n_bin) - .def_readwrite("best_fitnesses", &NoImprovement::best_fitnesses); - - py::class_>(m, "MaxSigma") - .def(py::init<>()) - .def_readwrite_static("tolerance", &MaxSigma::tolerance); - - py::class_>(m, "MinSigma") - .def(py::init<>()) - .def_readwrite_static("tolerance", &MinSigma::tolerance); - - py::class_>(m, "UnableToAdapt") - .def(py::init<>()); - - py::class_>(m, "FlatFitness") - .def(py::init<>()) - .def_readwrite("max_flat_fitness", &FlatFitness::max_flat_fitness) - .def_readwrite("flat_fitness_index", &FlatFitness::flat_fitness_index) - .def_readwrite("flat_fitnesses", &FlatFitness::flat_fitnesses); - - py::class_>(m, "TolX") - .def(py::init<>()) - .def_readwrite("tolx_vector", &TolX::tolx_vector) - .def_readwrite_static("tolerance", &TolX::tolerance) - ; - - py::class_>(m, "MaxDSigma") - .def(py::init<>()) - .def_readwrite_static("tolerance", &MaxDSigma::tolerance); - - py::class_>(m, "MinDSigma") - .def(py::init<>()) - .def_readwrite_static("tolerance", &MinDSigma::tolerance); - - py::class_>(m, "ConditionC") - .def(py::init<>()) - .def_readwrite_static("tolerance", &ConditionC::tolerance); - - py::class_>(m, "NoEffectAxis") - .def(py::init<>()) - .def_readwrite_static("tolerance", &NoEffectAxis::tolerance) - ; - - py::class_>(m, "NoEffectCoord") - .def(py::init<>()) - .def_readwrite_static("tolerance", &NoEffectCoord::tolerance); - - py::class_>(m, "Stagnation") - .def(py::init<>()) - .def_readwrite("n_stagnation", &Stagnation::n_stagnation) - .def_readwrite("median_fitnesses", &Stagnation::median_fitnesses) - .def_readwrite("best_fitnesses", &Stagnation::best_fitnesses) - .def_readwrite_static("tolerance", &Stagnation::tolerance); - - py::class_(m, "Criteria") - .def_readwrite("items", &Criteria::items) - .def("reset", &Criteria::reset, py::arg("parameters")) - .def("update", &Criteria::update, py::arg("parameters")) - .def_readonly("any", &Criteria::any); + auto m = main.def_submodule("restart"); + using namespace restart; + + py::class_>(m, "Criterion") + .def(py::init(), py::arg("name")) + .def("on_reset", &Criterion::on_reset, py::arg("parameters")) + .def("update", &Criterion::update, py::arg("parameters")) + .def("reset", &Criterion::reset, py::arg("parameters")) + .def_readwrite("met", &Criterion::met) + .def_readwrite("name", &Criterion::name) + .def_readwrite("last_restart", &Criterion::last_restart) + .def("__repr__", [] (Criterion& self) + { return "<" + self.name + " met: " + std::to_string(self.met) + ">"; }); + ; + + py::class_>(m, "ExceededMaxIter") + .def(py::init<>()) + .def_readwrite("max_iter", &ExceededMaxIter::max_iter); + + py::class_>(m, "NoImprovement") + .def(py::init<>()) + .def_readwrite("n_bin", &NoImprovement::n_bin) + .def_readwrite("best_fitnesses", &NoImprovement::best_fitnesses); + + py::class_>(m, "MaxSigma") + .def(py::init<>()) + .def_readwrite_static("tolerance", &MaxSigma::tolerance); + + py::class_>(m, "MinSigma") + .def(py::init<>()) + .def_readwrite_static("tolerance", &MinSigma::tolerance); + + py::class_>(m, "UnableToAdapt") + .def(py::init<>()); + + py::class_>(m, "FlatFitness") + .def(py::init<>()) + .def_readwrite("max_flat_fitness", &FlatFitness::max_flat_fitness) + .def_readwrite("flat_fitness_index", &FlatFitness::flat_fitness_index) + .def_readwrite("flat_fitnesses", &FlatFitness::flat_fitnesses); + + py::class_>(m, "TolX") + .def(py::init<>()) + .def_readwrite("tolx_vector", &TolX::tolx_vector) + .def_readwrite_static("tolerance", &TolX::tolerance) + ; + + py::class_>(m, "MaxDSigma") + .def(py::init<>()) + .def_readwrite_static("tolerance", &MaxDSigma::tolerance); + + py::class_>(m, "MinDSigma") + .def(py::init<>()) + .def_readwrite_static("tolerance", &MinDSigma::tolerance); + + py::class_>(m, "ConditionC") + .def(py::init<>()) + .def_readwrite_static("tolerance", &ConditionC::tolerance); + + py::class_>(m, "NoEffectAxis") + .def(py::init<>()) + .def_readwrite_static("tolerance", &NoEffectAxis::tolerance) + ; + + py::class_>(m, "NoEffectCoord") + .def(py::init<>()) + .def_readwrite_static("tolerance", &NoEffectCoord::tolerance); + + py::class_>(m, "Stagnation") + .def(py::init<>()) + .def_readwrite("n_stagnation", &Stagnation::n_stagnation) + .def_readwrite("median_fitnesses", &Stagnation::median_fitnesses) + .def_readwrite("best_fitnesses", &Stagnation::best_fitnesses) + .def_readwrite_static("tolerance", &Stagnation::tolerance); + + py::class_(m, "Criteria") + .def_readwrite("items", &Criteria::items) + .def("reset", &Criteria::reset, py::arg("parameters")) + .def("update", &Criteria::update, py::arg("parameters")) + .def("reason", &Criteria::reason) + .def("any", &Criteria::any); + + py::class_>(m, "TooMuchRepelling") + .def(py::init<>()) + .def_readwrite_static("tolerance", &TooMuchRepelling::tolerance); } -void define_restart_strategy(py::module &main) +void define_restart_strategy(py::module& main) { - auto m = main.def_submodule("restart"); - using namespace restart; - - py::class_>(m, "Strategy") - // .def("evaluate", &Strategy::evaluate, py::arg("objective"), py::arg("parameters")) - // .def_readwrite("criteria", &Strategy::criteria) - .def("update", &Strategy::update, py::arg("parameters")); - ; - - py::class_>(m, "IPOP") - // .def(py::init(), py::arg("sigma"), py::arg("dimension"), py::arg("lamb")) - .def_readwrite("ipop_factor", &IPOP::ipop_factor); - - py::class_>(m, "BIPOP") - // .def(py::init(), py::arg("sigma"), py::arg("dimension"), py::arg("lamb"), py::arg("mu"), py::arg("budget")) - .def("large", &BIPOP::large) - .def_readwrite("mu_factor", &BIPOP::mu_factor) - .def_readwrite("lambda_init", &BIPOP::lambda_init) - .def_readwrite("budget", &BIPOP::budget) - .def_readwrite("lambda_large", &BIPOP::lambda_large) - .def_readwrite("lambda_small", &BIPOP::lambda_small) - .def_readwrite("budget_small", &BIPOP::budget_small) - .def_readwrite("budget_large", &BIPOP::budget_large) - .def_readonly("used_budget", &BIPOP::used_budget); + auto m = main.def_submodule("restart"); + using namespace restart; + + py::class_>(m, "Strategy") + // .def("evaluate", &Strategy::evaluate, py::arg("objective"), py::arg("parameters")) + // .def_readwrite("criteria", &Strategy::criteria) + .def("update", &Strategy::update, py::arg("parameters")); + ; + + py::class_>(m, "IPOP") + // .def(py::init(), py::arg("sigma"), py::arg("dimension"), py::arg("lamb")) + .def_readwrite("ipop_factor", &IPOP::ipop_factor); + + py::class_>(m, "BIPOP") + // .def(py::init(), py::arg("sigma"), py::arg("dimension"), py::arg("lamb"), py::arg("mu"), py::arg("budget")) + .def("large", &BIPOP::large) + .def_readwrite("mu_factor", &BIPOP::mu_factor) + .def_readwrite("lambda_init", &BIPOP::lambda_init) + .def_readwrite("budget", &BIPOP::budget) + .def_readwrite("lambda_large", &BIPOP::lambda_large) + .def_readwrite("lambda_small", &BIPOP::lambda_small) + .def_readwrite("budget_small", &BIPOP::budget_small) + .def_readwrite("budget_large", &BIPOP::budget_large) + .def_readonly("used_budget", &BIPOP::used_budget); } -void define_cmaes(py::module &m) +void define_cmaes(py::module& m) { - py::class_(m, "ModularCMAES") - .def(py::init>(), py::arg("parameters")) - .def(py::init(), py::arg("dimension")) - .def(py::init(), py::arg("settings")) - .def("recombine", &ModularCMAES::recombine) - .def("mutate", &ModularCMAES::mutate, py::arg("objective")) - .def("select", &ModularCMAES::select) - .def("adapt", &ModularCMAES::adapt) - .def("step", &ModularCMAES::step, py::arg("objective")) - .def("__call__", &ModularCMAES::operator(), py::arg("objective")) - .def("run", &ModularCMAES::operator(), py::arg("objective")) - .def("break_conditions", &ModularCMAES::break_conditions) - .def_readonly("p", &ModularCMAES::p); + py::class_(m, "ModularCMAES") + .def(py::init>(), py::arg("parameters")) + .def(py::init(), py::arg("dimension")) + .def(py::init(), py::arg("settings")) + .def("recombine", &ModularCMAES::recombine) + .def("mutate", &ModularCMAES::mutate, py::arg("objective")) + .def("select", &ModularCMAES::select) + .def("adapt", &ModularCMAES::adapt) + .def("step", &ModularCMAES::step, py::arg("objective")) + .def("__call__", &ModularCMAES::operator(), py::arg("objective")) + .def("run", &ModularCMAES::operator(), py::arg("objective")) + .def("break_conditions", &ModularCMAES::break_conditions) + .def_readonly("p", &ModularCMAES::p); } -void define_es(py::module &main) +void define_es(py::module& main) { - auto m = main.def_submodule("es"); - parameters::Modules default_modules; - using namespace es; - py::class_>(m, "OnePlusOneES") - .def( - py::init< - size_t, - Vector, - Float, - Float, - size_t, - Float, - parameters::Modules>(), - py::arg("d"), - py::arg("x0"), - py::arg("f0"), - py::arg("sigma0") = 1.0, - py::arg("budget") = 10'000, - py::arg("target") = 1e-8, - py::arg("modules") = default_modules) - .def("__call__", &OnePlusOneES::operator()) - .def("step", &OnePlusOneES::step) - .def("sample", &OnePlusOneES::sample) - .def_readwrite("d", &OnePlusOneES::d) - .def_readwrite("sigma", &OnePlusOneES::sigma) - .def_readwrite("decay", &OnePlusOneES::decay) - .def_readwrite("x", &OnePlusOneES::x) - .def_readwrite("f", &OnePlusOneES::f) - .def_readwrite("t", &OnePlusOneES::t) - .def_readwrite("budget", &OnePlusOneES::budget) - .def_readwrite("target", &OnePlusOneES::target) - .def_readwrite("sampler", &OnePlusOneES::sampler) - .def_readwrite("rejection_sampling", &OnePlusOneES::rejection_sampling) - .def_readwrite("corrector", &OnePlusOneES::corrector); - - py::class_>(m, "MuCommaLambdaES") - .def( - py::init< - size_t, - Vector, - Float, - size_t, - Float, - parameters::Modules>(), - py::arg("d"), - py::arg("x0"), - py::arg("sigma0") = 1.0, - py::arg("budget") = 10'000, - py::arg("target") = 1e-8, - py::arg("modules") = default_modules) - .def("__call__", &MuCommaLambdaES::operator()) - .def("step", &MuCommaLambdaES::step) - .def("sample", &MuCommaLambdaES::sample) - .def_readwrite("d", &MuCommaLambdaES::d) - .def_readwrite("lamb", &MuCommaLambdaES::lambda) - .def_readwrite("mu", &MuCommaLambdaES::mu) - - .def_readwrite("sigma", &MuCommaLambdaES::sigma) - .def_readwrite("m", &MuCommaLambdaES::m) - - .def_readwrite("X", &MuCommaLambdaES::X) - .def_readwrite("S", &MuCommaLambdaES::S) - .def_readwrite("f", &MuCommaLambdaES::f) - - .def_readwrite("tau", &MuCommaLambdaES::tau) - .def_readwrite("tau_i", &MuCommaLambdaES::tau_i) - .def_readwrite("mu_inv", &MuCommaLambdaES::mu_inv) - - .def_readwrite("f_min", &MuCommaLambdaES::f_min) - .def_readwrite("x_min", &MuCommaLambdaES::x_min) - .def_readwrite("t", &MuCommaLambdaES::t) - .def_readwrite("e", &MuCommaLambdaES::e) - .def_readwrite("budget", &MuCommaLambdaES::budget) - .def_readwrite("target", &MuCommaLambdaES::target) - .def_readwrite("sampler", &MuCommaLambdaES::sampler) - .def_readwrite("sigma_sampler", &MuCommaLambdaES::sigma_sampler) - .def_readwrite("rejection_sampling", &MuCommaLambdaES::rejection_sampling) - .def_readwrite("corrector", &MuCommaLambdaES::corrector); + auto m = main.def_submodule("es"); + parameters::Modules default_modules; + using namespace es; + py::class_>(m, "OnePlusOneES") + .def( + py::init< + size_t, + Vector, + Float, + Float, + size_t, + Float, + parameters::Modules>(), + py::arg("d"), + py::arg("x0"), + py::arg("f0"), + py::arg("sigma0") = 1.0, + py::arg("budget") = 10'000, + py::arg("target") = 1e-8, + py::arg("modules") = default_modules) + .def("__call__", &OnePlusOneES::operator()) + .def("step", &OnePlusOneES::step) + .def("sample", &OnePlusOneES::sample) + .def_readwrite("d", &OnePlusOneES::d) + .def_readwrite("sigma", &OnePlusOneES::sigma) + .def_readwrite("decay", &OnePlusOneES::decay) + .def_readwrite("x", &OnePlusOneES::x) + .def_readwrite("f", &OnePlusOneES::f) + .def_readwrite("t", &OnePlusOneES::t) + .def_readwrite("budget", &OnePlusOneES::budget) + .def_readwrite("target", &OnePlusOneES::target) + .def_readwrite("sampler", &OnePlusOneES::sampler) + .def_readwrite("rejection_sampling", &OnePlusOneES::rejection_sampling) + .def_readwrite("corrector", &OnePlusOneES::corrector); + + py::class_>(m, "MuCommaLambdaES") + .def( + py::init< + size_t, + Vector, + Float, + size_t, + Float, + parameters::Modules>(), + py::arg("d"), + py::arg("x0"), + py::arg("sigma0") = 1.0, + py::arg("budget") = 10'000, + py::arg("target") = 1e-8, + py::arg("modules") = default_modules) + .def("__call__", &MuCommaLambdaES::operator()) + .def("step", &MuCommaLambdaES::step) + .def("sample", &MuCommaLambdaES::sample) + .def_readwrite("d", &MuCommaLambdaES::d) + .def_readwrite("lamb", &MuCommaLambdaES::lambda) + .def_readwrite("mu", &MuCommaLambdaES::mu) + + .def_readwrite("sigma", &MuCommaLambdaES::sigma) + .def_readwrite("m", &MuCommaLambdaES::m) + + .def_readwrite("X", &MuCommaLambdaES::X) + .def_readwrite("S", &MuCommaLambdaES::S) + .def_readwrite("f", &MuCommaLambdaES::f) + + .def_readwrite("tau", &MuCommaLambdaES::tau) + .def_readwrite("tau_i", &MuCommaLambdaES::tau_i) + .def_readwrite("mu_inv", &MuCommaLambdaES::mu_inv) + + .def_readwrite("f_min", &MuCommaLambdaES::f_min) + .def_readwrite("x_min", &MuCommaLambdaES::x_min) + .def_readwrite("t", &MuCommaLambdaES::t) + .def_readwrite("e", &MuCommaLambdaES::e) + .def_readwrite("budget", &MuCommaLambdaES::budget) + .def_readwrite("target", &MuCommaLambdaES::target) + .def_readwrite("sampler", &MuCommaLambdaES::sampler) + .def_readwrite("sigma_sampler", &MuCommaLambdaES::sigma_sampler) + .def_readwrite("rejection_sampling", &MuCommaLambdaES::rejection_sampling) + .def_readwrite("corrector", &MuCommaLambdaES::corrector); } PYBIND11_MODULE(cmaescpp, m) { - define_constants(m); - define_options(m); - define_utils(m); - define_population(m); - define_samplers(m); - define_mutation(m); - define_restart_criteria(m); - define_restart_strategy(m); - define_matrix_adaptation(m); - define_center_placement(m); - define_repelling(m); - define_parameters(m); - define_bounds(m); - define_selection(m); - define_cmaes(m); - define_es(m); + define_constants(m); + define_options(m); + define_utils(m); + define_population(m); + define_samplers(m); + define_mutation(m); + define_restart_criteria(m); + define_restart_strategy(m); + define_matrix_adaptation(m); + define_center_placement(m); + define_repelling(m); + define_parameters(m); + define_bounds(m); + define_selection(m); + define_cmaes(m); + define_es(m); } diff --git a/src/main.cpp b/src/main.cpp index 1cf686b..f08bc86 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -1,5 +1,7 @@ #include "c_maes.hpp" +#include "to_string.hpp" #include +#include using namespace std::placeholders; using std::chrono::high_resolution_clock; @@ -7,17 +9,30 @@ using std::chrono::duration_cast; using std::chrono::duration; using std::chrono::milliseconds; +static int dim = 2; +static bool rotated = true; +static functions::ObjectiveFunction fun_t = functions::ObjectiveFunction::SPHERE; +static size_t budget = dim * 10000; -struct Function +struct Ellipse { - size_t evals = 0; + size_t evals; + Matrix R; + FunctionType function; + + Ellipse(const int dim, const bool rotated, const functions::ObjectiveFunction ft) : + evals(0), + R{ rotated ? functions::random_rotation_matrix(dim, 1) : Matrix::Identity(dim, dim) }, + function(functions::get(ft)) + { + } Float operator()(const Vector& x) { evals++; - const auto x_shift = (x.array() - 1.).matrix(); - return functions::ellipse(x_shift); + const auto x_shift = R * (x.array() - 1.).matrix(); + return function(x_shift); } }; @@ -32,38 +47,129 @@ void call(Callable& o) struct Timer { - std::chrono::time_point t1; + std::chrono::time_point t1; Timer() : t1(high_resolution_clock::now()) {} ~Timer() { const auto t2 = high_resolution_clock::now(); const auto ms_int = duration_cast(t2 - t1); - std::cout << "Time elapsed: " << static_cast(ms_int.count()) / 1000.0 << "s\n"; + std::cout << "Time elapsed: " << std::defaultfloat << std::setprecision(5) << + static_cast(ms_int.count()) / 1000.0 << "s\n\n"; } }; +struct Run { + int budget_used; + double fval; + bool solved; +}; -int main() + +Run run_modcma(parameters::MatrixAdaptationType mat_t, parameters::StepSizeAdaptation ssa) { - rng::set_seed(42); - const size_t dim = 10; + //rng::set_seed(412); parameters::Modules m; - parameters::Settings settings(dim, m, 1e-8, std::nullopt, 1000 * dim, 2.0, 1); + m.matrix_adaptation = mat_t; + m.ssa = ssa; + m.active = true; + //m.sampler = parameters::BaseSampler::HALTON; + m.restart_strategy = parameters::RestartStrategyType::STOP; + //m.sample_transformation = parameters::SampleTranformerType::CAUCHY; + //m.elitist = false; + //m.sequential_selection = true; + //m.threshold_convergence = true; + //m.weights = parameters::RecombinationWeights::EQUAL; + //m.repelling_restart = true; + + parameters::Settings settings( + dim, + m, + -std::numeric_limits::infinity(), + std::nullopt, + budget, + 2.0//, + //500 + //1, + //1 + ); + settings.verbose = true; auto p = std::make_shared(settings); - auto cma = ModularCMAES(p); - FunctionType f = Function(); - + Timer t; + FunctionType f = Ellipse(dim, rotated, fun_t); while (cma.step(f)) { - //std::cout << cma.p->stats << std::endl; - //std::cout << cma.p->mutation->sigma << std::endl; - //auto sr = std::dynamic_pointer_cast(cma.p->mutation); - //std::cout << "p_succ: " << sr->success_ratio << ", " << sr->max_success_ratio << std::endl; + + /* std::cout << "evals: " << cma.p->stats.evaluations << "/" << budget << ": "; + std::cout << "iters: " << cma.p->stats.t << ": "; + std::cout << "sigma: " << cma.p->mutation->sigma << ": "; + std::cout << "best_y: " << cma.p->stats.global_best.y; + std::cout << " n_resamples: " << cma.p->repelling->attempts; + std::cout << std::endl;*/ + + if (cma.p->stats.global_best.y < 1e-9) + break; } - std::cout << cma.p->stats << std::endl; + std::cout << "modcmaes: " << parameters::to_string(settings.modules.matrix_adaptation) << std::defaultfloat; + std::cout << " - " << parameters::to_string(settings.modules.ssa); + if (m.active) + std::cout << " ACTIVE"; + + if (m.elitist) + std::cout << " ELITIST"; + + std::cout << "\nfunction: " << functions::to_string(fun_t) << " " << dim << "D"; + if (rotated) + std::cout << " (rotated)"; + const Float budget_used = static_cast(cma.p->stats.evaluations) / static_cast(budget) * 100; + std::cout << "\nevals: " << cma.p->stats.evaluations << "/" << budget; + std::cout << " ~ (" << std::defaultfloat << std::setprecision(3) << budget_used << "%)" << std::endl; + std::cout << "iters: " << cma.p->stats.t << std::endl; + std::cout << "updates: " << cma.p->stats.n_updates << "\n" << std::scientific << std::setprecision(3); + std::cout << "sigma: " << cma.p->mutation->sigma << std::endl; + std::cout << "best_y: " << cma.p->stats.global_best.y << std::endl; + std::cout << "solved: " << std::boolalpha << (cma.p->stats.global_best.y < 1e-8) << std::endl; + return { + (int)cma.p->stats.evaluations, + cma.p->stats.global_best.y, + cma.p->stats.global_best.y < 1e-8 + }; +} + +void ert_exp(parameters::MatrixAdaptationType mat_t, parameters::StepSizeAdaptation ssa, int n_runs) +{ + double rt = 0; + int n_succ = 0; + for (int i = 0; i < n_runs; i++) { + auto run_dat = run_modcma(mat_t, ssa); + rt += run_dat.budget_used; + n_succ += run_dat.solved; + } + std::cout << "ERT:"; + if (n_succ == 0) + { + std::cout << "inf"; + } + else { + std::cout << std::defaultfloat << rt / n_succ; + std::cout << " SR: " << n_succ << "/" << n_runs; + } + std::cout << std::endl; + } + +int main() +{ + auto ssa = parameters::StepSizeAdaptation::CSA; + ert_exp(parameters::MatrixAdaptationType::COVARIANCE, ssa, 100); + //run_modcma(parameters::MatrixAdaptationType::SEPARABLE, ssa);/* + //run_modcma(parameters::MatrixAdaptationType::MATRIX, ssa); + //run_modcma(parameters::MatrixAdaptationType::CHOLESKY, ssa); + //run_modcma(parameters::MatrixAdaptationType::COVARIANCE, ssa); + //run_modcma(parameters::MatrixAdaptationType::COVARIANCE_NO_EIGV, ssa); + //run_modcma(parameters::MatrixAdaptationType::NATURAL_GRADIENT, ssa);*/ +} \ No newline at end of file diff --git a/src/matrix_adaptation.cpp b/src/matrix_adaptation.cpp index ef90444..b94412d 100644 --- a/src/matrix_adaptation.cpp +++ b/src/matrix_adaptation.cpp @@ -4,54 +4,114 @@ namespace matrix_adaptation { using namespace parameters; + Vector Adaptation::invert_x(const Vector& xi, const Float sigma) { return (xi - m) / sigma; } - void CovarianceAdaptation::adapt_evolution_paths(const Population& pop, const Weights& w, - const std::shared_ptr& mutation, - const Stats& stats, const size_t mu, const size_t lambda) + static void one_plus_one_path_update( + Vector& path, + const Population& pop, + const parameters::Stats& stats, + const Float c, + const Float sqrt_c_mueff, + const Vector& v + ) { - dm = (m - m_old) / mutation->sigma; - ps = (1.0 - mutation->cs) * ps + (sqrt(mutation->cs * (2.0 - mutation->cs) * w.mueff) * inv_root_C * dm); + constexpr static Float max_success_ratio = 0.44; + if (!stats.has_improved) + return; - const Float actual_ps_length = ps.norm() / sqrt( - 1.0 - pow(1.0 - mutation->cs, 2.0 * (stats.evaluations / lambda))); - const Float expected_ps_length = (1.4 + (2.0 / (dd + 1.0))) * expected_length_z; + path = (1.0 - c) * path; + if (stats.success_ratio < max_success_ratio) + path += sqrt_c_mueff * v; + } - hs = actual_ps_length < expected_ps_length; - pc = (1.0 - w.cc) * pc + (hs * sqrt(w.cc * (2.0 - w.cc) * w.mueff)) * dm; + void Adaptation::adapt_evolution_paths(const Population& pop, const Weights& w, + const Stats& stats, const parameters::Settings& settings, const size_t lambda, const size_t mu) + { + const auto sigma = pop.s.mean(); + dm = (m - m_old) / sigma; + dz = pop.Z.leftCols(mu) * w.positive.head(mu); + adapt_evolution_paths_inner(pop, w, stats, settings, mu, lambda); } - void CovarianceAdaptation::adapt_covariance_matrix(const Weights& w, const Modules& m, const Population& pop, - const size_t mu) + void Adaptation::adapt_ps(const parameters::Weights& w) { - const auto rank_one = w.c1 * pc * pc.transpose(); - const auto dhs = (1 - hs) * w.cc * (2.0 - w.cc); - const auto old_c = (1 - (w.c1 * dhs) - w.c1 - (w.cmu * w.positive.sum())) * C; + ps = (1.0 - w.cs) * ps + (w.sqrt_cs_mueff * dz); + } - Matrix rank_mu; - if (m.active) + void None::adapt_evolution_paths_inner(const Population& pop, const Weights& w, + const Stats& stats, const parameters::Settings& settings, const size_t mu, const size_t lambda) + { + if (!settings.one_plus_one) + adapt_ps(w); + } + + Vector None::compute_y(const Vector& zi) + { + return zi; + } + + + Vector None::invert_y(const Vector& yi) + { + return yi; + } + + void CovarianceAdaptation::adapt_ps(const Weights& w) + { + ps = (1.0 - w.cs) * ps + (w.sqrt_cs_mueff * inv_root_C * dm); + } + + void CovarianceAdaptation::adapt_evolution_paths_inner(const Population& pop, const Weights& w, + const Stats& stats, const parameters::Settings& settings, const size_t mu, const size_t lambda) + { + if (settings.one_plus_one) { - auto weights = w.weights.topRows(pop.Y.cols()); - rank_mu = w.cmu * ((pop.Y.array().rowwise() * weights.array().transpose()).matrix() * pop.Y.transpose()); + one_plus_one_path_update(pc, pop, stats, w.cc, w.sqrt_cc_mueff, pop.Y.col(0)); + return; } - else - { - rank_mu = w.cmu * ((pop.Y.leftCols(mu).array().rowwise() * w.positive.array().transpose()).matrix() * pop.Y. - leftCols(mu).transpose()); + + adapt_ps(w); + const Float actual_ps_length = ps.norm() / sqrt( + 1.0 - pow(1.0 - w.cs, 2.0 * (stats.evaluations / lambda))); - } - C = old_c + rank_one + rank_mu; + hs = actual_ps_length < w.expected_length_ps; + pc = (1.0 - w.cc) * pc + (hs * w.sqrt_cc_mueff) * dm; + } + + void CovarianceAdaptation::adapt_covariance_matrix( + const Weights& w, + const Modules& m, + const Population& pop, + const size_t mu + ) + { + const auto dhs = (1.0 - hs) * w.cc * (2.0 - w.cc); + const auto& rank_one = w.c1 * pc * pc.transpose(); + + const auto& weights = m.active ? w.weights.topRows(pop.Y.cols()) : w.positive; + const auto& popY = m.active ? pop.Y : pop.Y.leftCols(mu); + + const Float decay = (1 - (w.c1 * dhs) - w.c1 - (w.cmu * weights.sum())); + const auto& old_c = decay * C; - C = C.triangularView().toDenseMatrix() + - C.triangularView().toDenseMatrix().transpose(); + Vector rank_mu_w = weights.eval(); + for (size_t i = mu; i < weights.size() - mu; i++) + rank_mu_w(i) *= dd / (inv_root_C * popY.col(i)).squaredNorm(); + + const auto& rank_mu = w.cmu * (popY * rank_mu_w.asDiagonal() * popY.transpose()); + C = old_c + rank_one + rank_mu; + + C = 0.5 * (C + C.transpose().eval()); } bool CovarianceAdaptation::perform_eigendecomposition(const Settings& settings) { const Eigen::SelfAdjointEigenSolver eigen_solver(C); + if (eigen_solver.info() != Eigen::Success) { if (settings.verbose) @@ -72,167 +132,406 @@ namespace matrix_adaptation } return false; } - inv_C = ((B * d.cwiseInverse().asDiagonal()) * B.transpose()); - d = d.cwiseSqrt(); - inv_root_C = (B * d.cwiseInverse().asDiagonal()) * B.transpose(); + + d.noalias() = d.cwiseSqrt().eval(); + inv_root_C.noalias() = B * d.cwiseInverse().asDiagonal() * B.transpose(); + A.noalias() = B * d.asDiagonal(); return true; } - bool CovarianceAdaptation::adapt_matrix(const Weights& w, const Modules& m, const Population& pop, const size_t mu, - const Settings& settings, const parameters::Stats& stats) + bool CovarianceAdaptation::adapt_matrix_inner(const Weights& w, const Modules& m, const Population& pop, const size_t mu, + const Settings& settings, parameters::Stats& stats) { - adapt_covariance_matrix(w, m, pop, mu); - return perform_eigendecomposition(settings); + + if (static_cast(stats.t) >= static_cast(stats.last_update) + w.lazy_update_interval) + { + stats.last_update = stats.t; + stats.n_updates++; + adapt_covariance_matrix(w, m, pop, mu); + auto succ = perform_eigendecomposition(settings); + if (!succ) + { + std::cout << "t: " << stats.t << ". "; + std::cout << "Eigendecomposition failed! C:\n"; + std::cout << C << std::endl << std::endl; + } + return succ; + } + return true; + } - void CovarianceAdaptation::restart(const Settings& settings) + void CovarianceAdaptation::restart(const Settings& settings, const Float sigma) { + Adaptation::restart(settings, sigma); B = Matrix::Identity(settings.dim, settings.dim); C = Matrix::Identity(settings.dim, settings.dim); + A = Matrix::Identity(settings.dim, settings.dim); inv_root_C = Matrix::Identity(settings.dim, settings.dim); - inv_C = Matrix::Identity(settings.dim, settings.dim); d.setOnes(); - m = settings.x0.value_or(Vector::Zero(settings.dim)); - m_old.setZero(); - dm.setZero(); pc.setZero(); - ps.setZero(); } Vector CovarianceAdaptation::compute_y(const Vector& zi) { - return B * (d.asDiagonal() * zi); + return A * zi; } Vector CovarianceAdaptation::invert_y(const Vector& yi) { - return d.cwiseInverse().asDiagonal() * (B.transpose() * yi); + return (B.transpose() * yi).cwiseQuotient(d); + } + + + void SeparableAdaptation::adapt_evolution_paths_inner(const Population& pop, + const parameters::Weights& w, + const parameters::Stats& stats, const parameters::Settings& settings, size_t mu, size_t lambda) + { + if (settings.one_plus_one) + { + one_plus_one_path_update(pc, pop, stats, w.cc, w.sqrt_cc_mueff, pop.Y.col(0)); + return; + } + + adapt_ps(w); + + const Float actual_ps_length = ps.norm() / sqrt( + 1.0 - pow(1.0 - w.cs, 2.0 * (stats.evaluations / lambda))); + + hs = actual_ps_length < w.expected_length_ps; + pc = (1.0 - w.cc) * pc + (hs * w.sqrt_cc_mueff) * dm; + } + + bool SeparableAdaptation::adapt_matrix_inner(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, size_t mu, + const parameters::Settings& settings, parameters::Stats& stats) + { + + stats.last_update = stats.t; + stats.n_updates++; + + const auto dhs = (1 - hs) * w.cc * (2.0 - w.cc); + + const auto& weights = m.active ? w.weights.topRows(pop.Y.cols()) : w.positive; + const auto& popY = m.active ? pop.Y : pop.Y.leftCols(mu); + const auto decay_c = (1 - (w.c1 * dhs) - w.c1 - (w.cmu * weights.sum())); + + for (size_t j = 0; j < settings.dim; j++) + { + const auto rank_mu = (popY.row(j).array().pow(2) * weights.transpose().array()).sum(); + c(j) = (decay_c * c(j)) + (w.c1 * pow(pc(j), 2)) + (w.cmu * rank_mu); + c(j) = std::max(c(j), Float{ 1e-12 }); + d(j) = std::sqrt(c(j)); + } + + return true; + } + + void SeparableAdaptation::restart(const parameters::Settings& settings, const Float sigma) + { + Adaptation::restart(settings, sigma); + c.setOnes(); + d.setOnes(); + pc.setZero(); + } + + Vector SeparableAdaptation::compute_y(const Vector& zi) + { + return d.array() * zi.array(); } - bool SeperableAdaptation::perform_eigendecomposition(const Settings& settings) + Vector SeparableAdaptation::invert_y(const Vector& yi) { - d = C.diagonal().cwiseSqrt(); - return d.minCoeff() > 0.0; + return yi.array() / d.array(); } - void OnePlusOneAdaptation::adapt_evolution_paths(const Population& pop, const parameters::Weights& w, - const std::shared_ptr& mutation, const parameters::Stats& stats, - size_t mu, size_t lambda) + void MatrixAdaptation::adapt_evolution_paths_inner(const Population& pop, const Weights& w, + const Stats& stats, const parameters::Settings& settings, const size_t mu, const size_t lambda) { - dm = (m - m_old) / mutation->sigma; - if (!stats.has_improved) + if (settings.one_plus_one && !stats.has_improved) return; + adapt_ps(w); + } - if (stats.success_ratio < max_success_ratio) - pc = ((1.0 - w.cc) * pc) + (std::sqrt(w.cc * (2.0 - w.cc)) * pop.Y.col(0)); + bool MatrixAdaptation::adapt_matrix_inner(const Weights& w, const Modules& m, const Population& pop, const size_t mu, + const Settings& settings, parameters::Stats& stats) + { + + stats.last_update = stats.t; + stats.n_updates++; + + const auto& weights = m.active ? w.weights.topRows(pop.Z.cols()) : w.positive; + const auto& popZ = m.active ? pop.Z : pop.Z.leftCols(mu); + const auto& popY = m.active ? pop.Y : pop.Y.leftCols(mu); + + // Normal MA-ES -> O(n^3) + // + // constexpr Float epsilon = 1e-10; + // const auto& Z = popZ * weights.asDiagonal() * popZ.transpose(); + // ZwI.noalias() = (w.cmu / 2.0) * (Z - I); + // ssI.noalias() = (w.c1 / 2.0) * (ps * ps.transpose() - I); + // M = M * (I + ssI + ZwI); + // M_inv = (I - ssI - ZwI + epsilon * I) * M_inv; + + // Fast MA-ES -> O(n^2) + const Float tau_1 = w.c1 / 2.0; + const Float tau_m = w.cmu / 2.0; + const Float decay_m = (1.0 - tau_1 - tau_m); + + M = (decay_m * M) + + (tau_1 * (M * ps) * ps.transpose()) + + (popY * (tau_m * weights).asDiagonal() * popZ.transpose()); + + + if (settings.modules.elitist && !settings.one_plus_one) + M_inv = (decay_m * M_inv) + + (tau_1 * ps * (ps.transpose() * M_inv)) + + ((popY * (tau_m * weights).asDiagonal()) * (popZ.transpose() * M_inv)); else - pc = (1.0 - w.cc) * pc; + outdated_M_inv = true; // Rely on moore penrose pseudo-inv (only when needed) + return true; } - bool OnePlusOneAdaptation::adapt_matrix(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, size_t mu, - const parameters::Settings& settings, const parameters::Stats& stats) + void MatrixAdaptation::restart(const Settings& settings, const Float sigma) { - if (!stats.has_improved) + Adaptation::restart(settings, sigma); + M = Matrix::Identity(settings.dim, settings.dim); + M_inv = Matrix::Identity(settings.dim, settings.dim); + outdated_M_inv = false; + } + + Vector MatrixAdaptation::compute_y(const Vector& zi) + { + return M * zi; + } + + Vector MatrixAdaptation::invert_y(const Vector& yi) + { + if (outdated_M_inv) { + M_inv = M.completeOrthogonalDecomposition().pseudoInverse(); + outdated_M_inv = false; + } + return M_inv * yi; + } + + + void CholeskyAdaptation::adapt_evolution_paths_inner(const Population& pop, + const parameters::Weights& w, + const parameters::Stats& stats, const parameters::Settings& settings, size_t mu, size_t lambda) + { + if (settings.one_plus_one) { - return true; + one_plus_one_path_update(pc, pop, stats, w.cc, w.sqrt_cc_mueff, pop.Y.col(0)); + return; } - return CovarianceAdaptation::adapt_matrix(w, m, pop, mu, settings, stats); + + adapt_ps(w); + pc = (1.0 - w.cc) * pc + (w.sqrt_cc_mueff * dm); } + bool CholeskyAdaptation::adapt_matrix_inner(const parameters::Weights & w, const parameters::Modules & m, const Population & pop, size_t mu, + const parameters::Settings& settings, parameters::Stats& stats) + { + + stats.last_update = stats.t; + stats.n_updates++; + + A *= std::sqrt(1 - w.c1 - w.cmu); + + Eigen::internal::llt_rank_update_lower(A, pc, w.c1); + for (size_t i = 0; i < mu; i++) + Eigen::internal::llt_rank_update_lower(A, pop.Y.col(i), w.cmu * w.positive(i)); + + if (m.active) + for (size_t i = 0; i < pop.Y.cols() - mu; i++) + Eigen::internal::llt_rank_update_lower(A, pop.Y.col(mu + i), w.cmu * w.negative(i)); + return true; + } - void MatrixAdaptation::adapt_evolution_paths(const Population& pop, const Weights& w, - const std::shared_ptr& mutation, - const Stats& stats, const size_t mu, const size_t lambda) + void CholeskyAdaptation::restart(const parameters::Settings& settings, const Float sigma) { - dm = (m - m_old) / mutation->sigma; + Adaptation::restart(settings, sigma); + A = Matrix::Identity(settings.dim, settings.dim); + pc.setZero(); + } - const auto dz = (pop.Z.leftCols(mu).array().rowwise() * w.positive.array().transpose()).rowwise().sum(). - matrix(); - ps = (1.0 - mutation->cs) * ps + (sqrt(mutation->cs * (2.0 - mutation->cs) * w.mueff) * dz); + Vector CholeskyAdaptation::compute_y(const Vector& zi) + { + return A * zi; } - bool MatrixAdaptation::adapt_matrix(const Weights& w, const Modules& m, const Population& pop, const size_t mu, - const Settings& settings, const parameters::Stats& stats) + Vector CholeskyAdaptation::invert_y(const Vector& yi) { - const auto old_m = (1. - (0.5 * w.c1) - (0.5 * w.cmu)) * M; - const auto scaled_ps = (0.5 * w.c1) * (M * ps) * ps.transpose(); + return A.triangularView().solve(yi); + } - const auto old_m_inv = (1. + (0.5 * w.c1) + (0.5 * w.cmu)) * M_inv; - const auto scaled_inv_ps = (0.5 * w.c1) * ps * (ps.transpose() * M); + void SelfAdaptation::adapt_evolution_paths_inner(const Population& pop, const parameters::Weights& w, const parameters::Stats& stats, const parameters::Settings& settings, size_t mu, size_t lambda) + { + + if (!settings.one_plus_one) + adapt_ps(w); + } - Matrix new_m, new_m_inv; - if (m.active) - { - // TODO: Check if we can do this like this - const auto scaled_weights = ((0.5 * w.cmu) * w.weights.topRows(pop.Y.cols())).array().transpose(); - const auto scaled_y = (pop.Y.array().rowwise() * scaled_weights).matrix(); - new_m = scaled_y * pop.Z.transpose(); - new_m_inv = scaled_y * (pop.Z.transpose() * M_inv); - } - else + bool SelfAdaptation::adapt_matrix_inner(const parameters::Weights& w, const parameters::Modules& m, const Population& pop, size_t mu, const parameters::Settings& settings, parameters::Stats& stats) + { + stats.last_update = stats.t; + stats.n_updates++; + + const Float tc = 1.0 + (dd * (dd + 1)) / (2.0 * w.mueff); + const Float tc_inv = 1.0 / tc; + + const auto& weights = m.active ? w.weights.topRows(pop.Y.cols()) : w.positive; + const auto& popY = m.active ? pop.Y : pop.Y.leftCols(mu); + const auto& Y = popY * weights.asDiagonal() * popY.transpose(); + + C = (1.0 - tc_inv) * C + (tc_inv * Y); + C = 0.5 * (C + C.transpose().eval()); + + const Eigen::LLT chol(C); + if (chol.info() != Eigen::Success) { - const auto scaled_weights = ((0.5 * w.cmu) * w.positive).array().transpose(); - const auto scaled_y = (pop.Y.leftCols(mu).array().rowwise() * scaled_weights).matrix(); - new_m = scaled_y * pop.Z.leftCols(mu).transpose(); - new_m_inv = scaled_y * (pop.Z.leftCols(mu).transpose() * M_inv); + if (settings.verbose) + std::cout << "t: " << stats.t << "Cholesky solver failed, we need to restart reason:" + << chol.info() << '\n'; + return false; } + A = chol.matrixL(); - M = old_m + scaled_ps + new_m; - M_inv = old_m_inv - scaled_inv_ps - new_m_inv; return true; } - void MatrixAdaptation::restart(const Settings& settings) + void SelfAdaptation::restart(const parameters::Settings& settings, const Float sigma) { - ps.setOnes(); - m = settings.x0.value_or(Vector::Zero(settings.dim)); - m_old.setZero(); - dm.setZero(); - M = Matrix::Identity(settings.dim, settings.dim); - M_inv = Matrix::Identity(settings.dim, settings.dim); + Adaptation::restart(settings, sigma); + A = Matrix::Identity(settings.dim, settings.dim); + C = Matrix::Identity(settings.dim, settings.dim); } - Vector MatrixAdaptation::compute_y(const Vector& zi) + Vector SelfAdaptation::compute_y(const Vector& zi) { - return M * zi; + return A * zi; } - Vector MatrixAdaptation::invert_y(const Vector& yi) + Vector SelfAdaptation::invert_y(const Vector& yi) { - return M_inv * yi; + return A.triangularView().solve(yi); } + void CovarianceNoEigvAdaptation::adapt_ps(const Weights& w) + { + Adaptation::adapt_ps(w); + } - void None::adapt_evolution_paths(const Population& pop, const Weights& w, - const std::shared_ptr& mutation, const - Stats& stats, const size_t mu, const size_t lambda) + bool CovarianceNoEigvAdaptation::perform_eigendecomposition(const parameters::Settings& settings) { - dm = (m - m_old) / mutation->sigma; + const Eigen::LLT chol(C); + if (chol.info() != Eigen::Success) + { + if (settings.verbose) + { + std::cout << "Cholesky solver failed, we need to restart reason:" + << chol.info() << '\n'; + } + return false; + } - const auto dz = (pop.Z.leftCols(mu).array().rowwise() * w.positive.array().transpose()).rowwise().sum(). - matrix(); - ps = (1.0 - mutation->cs) * ps + (sqrt(mutation->cs * (2.0 - mutation->cs) * w.mueff) * dz); + A = chol.matrixL(); + return true; } - void None::restart(const Settings& settings) + Vector CovarianceNoEigvAdaptation::invert_y(const Vector& yi) { - ps.setZero(); - m = settings.x0.value_or(Vector::Zero(settings.dim)); - m_old.setZero(); - dm.setZero(); + return A.triangularView().solve(yi); } - Vector None::compute_y(const Vector& zi) + void NaturalGradientAdaptation::adapt_evolution_paths_inner( + const Population& pop, + const parameters::Weights& w, + const parameters::Stats& stats, + const parameters::Settings& settings, + size_t mu, + size_t lambda + ) { - return zi; + if (!settings.one_plus_one) + { + adapt_ps(w); + compute_gradients(pop, w, stats, settings, mu, lambda); + return; + } + + if (stats.has_improved) + { + const auto& z = pop.Z.col(0); + G.noalias() = (z * z.transpose() - Matrix::Identity(settings.dim, settings.dim)); + } } + void NaturalGradientAdaptation::compute_gradients( + const Population& pop, + const parameters::Weights& w, + const parameters::Stats& stats, + const parameters::Settings& settings, + size_t mu, + size_t lambda + ) + { + const size_t dim = pop.Z.rows(); - Vector None::invert_y(const Vector& yi) + const auto& weights = settings.modules.active ? w.weights.topRows(pop.Z.cols()) : w.positive; + + G.setZero(); + for (int i = 0; i < weights.rows(); ++i) + { + const auto& z = pop.Z.col(i); + G.noalias() += weights(i) * (z * z.transpose() - Matrix::Identity(dim, dim)); + } + + // Remove isotropic (sigma-related) component: make G trace-free + sigma_g = (G.trace() / dd); + + if (!settings.one_plus_one) + G.diagonal().array() -= sigma_g; + + // Ensure symmetry for numerical stability + G = 0.5 * (G + G.transpose().eval()); + } + + bool NaturalGradientAdaptation::adapt_matrix_inner( + const parameters::Weights& w, const parameters::Modules& m, + const Population& pop, size_t mu, const parameters::Settings& settings, parameters::Stats& stats) { - return yi; + + stats.last_update = stats.t; + stats.n_updates++; + + A *= (w.cc * G).exp(); + outdated_A_inv = true; + + return true; + } + + void NaturalGradientAdaptation::restart(const parameters::Settings& settings, const Float sigma) + { + Adaptation::restart(settings, sigma); + A = Matrix::Identity(settings.dim, settings.dim) / sigma; + A_inv = Matrix::Identity(settings.dim, settings.dim); + G = Matrix::Zero(settings.dim, settings.dim); + outdated_A_inv = false; + sigma_g = 0.; + } + + Vector NaturalGradientAdaptation::compute_y(const Vector& zi) + { + return A * zi; + } + + Vector NaturalGradientAdaptation::invert_y(const Vector& yi) + { + if (outdated_A_inv) + A_inv = A.completeOrthogonalDecomposition().pseudoInverse(); + return A_inv * yi; } } diff --git a/src/mutation.cpp b/src/mutation.cpp index 693d60a..78cb0e8 100644 --- a/src/mutation.cpp +++ b/src/mutation.cpp @@ -5,8 +5,8 @@ namespace mutation { - Vector ThresholdConvergence::scale(const Vector &zi, const Float diameter, const size_t budget, - const size_t evaluations) + Vector ThresholdConvergence::scale(const Vector& zi, const Float diameter, const size_t budget, + const size_t evaluations) { const Float t = init_threshold * diameter * pow(static_cast(budget - evaluations) / static_cast(budget), decay_factor); @@ -15,46 +15,60 @@ namespace mutation return zi; } - bool SequentialSelection::break_conditions(const size_t i, const Float f, Float fopt, const parameters::Mirror &m) + bool SequentialSelection::break_conditions(const size_t i, const Float f, Float fopt, const parameters::Mirror& m) { return (f < fopt) and (i >= seq_cutoff) and (m != parameters::Mirror::PAIRWISE or i % 2 == 0); } - void CSA::adapt(const parameters::Weights &w, std::shared_ptr adaptation, - Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) - - { - sigma *= std::exp((cs / damps) * ((adaptation->ps.norm() / expected_length_z) - 1)); - } - - void CSA::mutate(FunctionType &objective, const size_t n_offspring, parameters::Parameters &p) + void Strategy::mutate(FunctionType& objective, const size_t n_offspring, parameters::Parameters& p) { - ss->sample(sigma, p.pop); + ss->sample(sigma, p.pop, p.weights.beta); p.bounds->n_out_of_bounds = 0; p.repelling->prepare_sampling(p); + for (Eigen::Index i = 0; i < static_cast(n_offspring); ++i) { size_t n_rej = 0; do { - p.pop.Z.col(i) = p.mutation->tc->scale((*p.sampler)(), p.bounds->diameter, p.settings.budget, p.stats.evaluations); - p.pop.Y.col(i) = p.adaptation->compute_y(p.pop.Z.col(i)); - p.pop.X.col(i) = p.pop.Y.col(i) * p.pop.s(i) + p.adaptation->m; + p.pop.t(i) = p.stats.t; + const auto& zi = (*p.sampler)(); + const auto& zi_scaled = p.mutation->tc->scale( + zi, p.bounds->diameter, p.settings.budget, p.stats.evaluations + ); + p.pop.Z.col(i).noalias() = zi_scaled; + p.pop.Y.col(i).noalias() = p.adaptation->compute_y(p.pop.Z.col(i)); + p.pop.X.col(i).noalias() = p.pop.Y.col(i) * p.pop.s(i) + p.adaptation->m; p.bounds->correct(i, p); } while ( - (p.settings.modules.bound_correction == parameters::CorrectionMethod::RESAMPLE && n_rej++ < 5*p.settings.dim && p.bounds->is_out_of_bounds(p.pop.X.col(i)).any()) || p.repelling->is_rejected(p.pop.X.col(i), p)); - + (p.settings.modules.bound_correction == parameters::CorrectionMethod::RESAMPLE && n_rej++ < 5 * p.settings.dim && p.bounds->is_out_of_bounds(p.pop.X.col(i)).any()) || p.repelling->is_rejected(p.pop.X.col(i), p)); + p.pop.f(i) = objective(p.pop.X.col(i)); p.stats.evaluations++; if (sq->break_conditions(i, p.pop.f(i), p.stats.global_best.y, p.settings.modules.mirrored)) + { + // TODO: We should renormalize the weights break; + } + } } - void TPA::mutate(FunctionType &objective, const size_t n_offspring_, parameters::Parameters &p) + void CSA::adapt(const parameters::Weights& w, std::shared_ptr adaptation, + Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) + + { + Float l = (w.cs / w.damps) * ((adaptation->ps.norm() / w.expected_length_z) - 1); + // Clamping as seen in pycma + l = std::min(Float{1.0}, std::max(l, Float{-1.0})); + sigma *= std::exp(l); + } + + + void TPA::mutate(FunctionType& objective, const size_t n_offspring_, parameters::Parameters& p) { - CSA::mutate(objective, n_offspring_, p); + Strategy::mutate(objective, n_offspring_, p); const auto f_pos = objective(p.adaptation->m + (p.mutation->sigma * p.adaptation->dm)); const auto f_neg = objective(p.adaptation->m + (p.mutation->sigma * -p.adaptation->dm)); @@ -62,25 +76,25 @@ namespace mutation this->rank_tpa = f_neg < f_pos ? -a_tpa : a_tpa + b_tpa; } - void TPA::adapt(const parameters::Weights &w, std::shared_ptr adaptation, - Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) + void TPA::adapt(const parameters::Weights& w, std::shared_ptr adaptation, + Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) { - s = ((1.0 - cs) * s) + (cs * this->rank_tpa); + s = ((1.0 - w.cs) * s) + (w.cs * this->rank_tpa); sigma *= std::exp(s); } //! Assumes the vector to be arready sorted - Float median(const Vector &x) + Float median(const Vector& x) { if (x.size() % 2 == 0) return (x(x.size() / 2) + x(x.size() / 2 - 1)) / 2.0; return x(x.size() / 2); } - void MSR::adapt(const parameters::Weights &w, std::shared_ptr adaptation, - Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lamb) + void MSR::adapt(const parameters::Weights& w, std::shared_ptr adaptation, + Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lamb) { const auto n = std::min(pop.n_finite(), old_pop.n_finite()); if (n != 0) @@ -88,18 +102,18 @@ namespace mutation const Float lambda = static_cast(lamb); const Float k = (pop.f.array() < median(old_pop.f)).cast().sum(); const auto z = (2.0 / lambda) * (k - ((lambda + 1.0) / 2.0)); - s = ((1.0 - cs) * s) + (cs * z); + s = ((1.0 - w.cs) * s) + (w.cs * z); sigma *= std::exp(s / (2.0 - (2.0 / adaptation->dd))); } } //! Returns the indices of the elements of query in database - Vector searchsorted(const Vector &query, const Vector &database) + Vector searchsorted(const Vector& query, const Vector& database) { Vector res(query.size()); auto i = 0; - for (const auto &xi : query) + for (const auto& xi : query) { auto it = std::find(std::begin(database), std::end(database), xi); res(i++) = static_cast(std::distance(std::begin(database), it)); @@ -107,9 +121,9 @@ namespace mutation return res; } - void PSR::adapt(const parameters::Weights &w, std::shared_ptr adaptation, - Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) + void PSR::adapt(const parameters::Weights& w, std::shared_ptr adaptation, + Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) { const auto n = std::min(pop.n_finite(), old_pop.n_finite()); if (n != 0) @@ -129,100 +143,102 @@ namespace mutation } const auto z = delta_r / std::pow(n, 2) - success_ratio; - s = (1.0 - cs) * s + (cs * z); + s = (1.0 - w.cs) * s + (w.cs * z); sigma *= std::exp(s / (2.0 - (2.0 / adaptation->dd))); } } - void XNES::adapt(const parameters::Weights &w, std::shared_ptr adaptation, - Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) + void XNES::adapt(const parameters::Weights& w, std::shared_ptr adaptation, + Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) { - // const Float z = ((std::dynamic_pointer_cast(adaptation)->inv_root_C * .Y).colwise().norm().array().pow(2.) - adaptation->dd).matrix() * w.clipped(); - const Float z = ((pop.Z).colwise().norm().array().pow(2.) - adaptation->dd).matrix() * w.clipped(); - sigma *= std::exp((cs / std::sqrt(adaptation->dd)) * z); + if (const auto dynamic = std::dynamic_pointer_cast(adaptation)) + { + sigma *= std::exp(w.cs / 2.0 * dynamic->sigma_g); + return; + } + + const Float z = ((pop.Z).colwise().squaredNorm().array() - adaptation->dd).matrix() * w.clipped(); + sigma *= std::exp((w.cs / std::sqrt(adaptation->dd)) * z); } - void MXNES::adapt(const parameters::Weights &w, std::shared_ptr adaptation, - Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) + void MXNES::adapt(const parameters::Weights& w, std::shared_ptr adaptation, + Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) { - const auto n = std::min(pop.n_finite(), old_pop.n_finite()); - if (n != 0) - { - // const auto z = (w.mueff * std::pow((dynamic.inv_root_C * dynamic.dm).norm(), 2)) - dynamic.dd; - const auto mu = pop.n - lambda; - const auto dz = (pop.Z.leftCols(mu).array().rowwise() * w.positive.array().transpose()).rowwise().sum().matrix(); - const auto z = (w.mueff * std::pow(dz.norm(), 2)) - adaptation->dd; - sigma *= std::exp((cs / adaptation->dd) * z); - } + const Float delta = (w.mueff * adaptation->dz.squaredNorm() - adaptation->dd); + + sigma *= std::exp((w.cs / adaptation->dd) * delta); } - void LPXNES::adapt(const parameters::Weights &w, std::shared_ptr adaptation, - Population &pop, - const Population &old_pop, const parameters::Stats &stats, const size_t lambda) + void LPXNES::adapt(const parameters::Weights& w, std::shared_ptr adaptation, + Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) { - const auto z = std::exp(cs * pop.s.array().log().matrix().dot(w.clipped())); - sigma = std::pow(sigma, 1.0 - cs) * z; + const Float rel_log = (pop.s.array() / sigma).log().matrix().dot(w.clipped()); + sigma *= std::exp(w.cs * rel_log); } void SR::adapt(const parameters::Weights& w, std::shared_ptr adaptation, Population& pop, const Population& old_pop, const parameters::Stats& stats, const size_t lambda) { - sigma *= std::exp((1 / damps) * ((stats.success_ratio - tgt_success_ratio) / (1.0 - tgt_success_ratio))); + sigma *= std::exp((1 / w.damps) * ((stats.success_ratio - tgt_success_ratio) / (1.0 - tgt_success_ratio))); } - std::shared_ptr get(const parameters::Modules &m, const size_t mu, const Float mueff, - const Float d, const Float sigma, const std::optional cs0, - const Float expected_z) + void SA::mutate(FunctionType& objective, const size_t n_offspring, parameters::Parameters& p) + { + Strategy::mutate(objective, n_offspring, p); + mean_sigma = std::exp(p.pop.s.array().log().mean()); + } + + void SA::adapt(const parameters::Weights& w, std::shared_ptr adaptation, + Population& pop, + const Population& old_pop, const parameters::Stats& stats, const size_t lambda) + { + const auto& sigma_l = pop.s.topRows(w.positive.rows()); + sigma = std::exp((w.positive.array() * sigma_l.array().log()).sum()) / mean_sigma; + } + + + std::shared_ptr get(const parameters::Modules& m, const size_t mu, const Float d, const Float sigma) { using namespace parameters; auto tc = m.threshold_convergence - ? std::make_shared() - : std::make_shared(); + ? std::make_shared() + : std::make_shared(); auto sq = m.sequential_selection - ? std::make_shared(m.mirrored, mu) - : std::make_shared(m.mirrored, mu); - - auto ss = (m.sample_sigma or m.ssa == StepSizeAdaptation::LPXNES) - ? std::make_shared(d) - : std::make_shared(d); + ? std::make_shared(m.mirrored, mu) + : std::make_shared(m.mirrored, mu); - Float cs = cs0.value_or(0.3); - Float damps = 0.0; + auto ss = (m.sample_sigma or m.ssa == StepSizeAdaptation::LPXNES or m.ssa == StepSizeAdaptation::SA) + ? std::make_shared(d) + : std::make_shared(d); switch (m.ssa) { case StepSizeAdaptation::TPA: - return std::make_shared(tc, sq, ss, cs, damps, sigma, expected_z); + return std::make_shared(tc, sq, ss, sigma); case StepSizeAdaptation::MSR: - return std::make_shared(tc, sq, ss, cs, damps, sigma, expected_z); + return std::make_shared(tc, sq, ss, sigma); case StepSizeAdaptation::XNES: - cs = cs0.value_or(mueff / (2.0 * std::log(std::max(Float{2.}, d)) * sqrt(d))); - return std::make_shared(tc, sq, ss, cs, damps, sigma, expected_z); + return std::make_shared(tc, sq, ss, sigma); case StepSizeAdaptation::MXNES: - cs = cs0.value_or(1.); - return std::make_shared(tc, sq, ss, cs, damps, sigma, expected_z); + return std::make_shared(tc, sq, ss, sigma); case StepSizeAdaptation::LPXNES: - cs = cs0.value_or(9.0 * mueff / (10.0 * sqrt(d))); - return std::make_shared(tc, sq, ss, cs, damps, sigma, expected_z); + return std::make_shared(tc, sq, ss, sigma); case StepSizeAdaptation::PSR: - cs = cs0.value_or(.9); - return std::make_shared(tc, sq, ss, cs, damps, sigma, expected_z); + return std::make_shared(tc, sq, ss, sigma); case StepSizeAdaptation::SR: - cs = cs0.value_or(1.0 / 12.0); - damps = 1.0 + (d / 2.0); - return std::make_shared(tc, sq, ss, cs, damps, sigma, expected_z); + return std::make_shared(tc, sq, ss, sigma); + case StepSizeAdaptation::SA: + return std::make_shared(tc, sq, ss, sigma); default: case StepSizeAdaptation::CSA: - cs = cs0.value_or((mueff + 2.0) / (d + mueff + 5.0)); - const Float rhs = std::sqrt((mueff - Float(1.0)) / (d + 1)) - 1; - damps = 1.0 + (2.0 * std::max(Float(0.0), rhs) + cs); - return std::make_shared(tc, sq, ss, cs, damps, sigma, expected_z); + return std::make_shared(tc, sq, ss, sigma); } } } diff --git a/src/parameters.cpp b/src/parameters.cpp index 832308a..7f66eb7 100644 --- a/src/parameters.cpp +++ b/src/parameters.cpp @@ -2,34 +2,35 @@ namespace parameters { - Parameters::Parameters(const Settings &settings) : successfull_adaptation(true), - lambda(settings.lambda0), - mu(settings.mu0), - settings(settings), - stats{}, - weights(settings.dim, settings.mu0, settings.lambda0, settings), - pop(settings.dim, settings.lambda0), - old_pop(settings.dim, settings.lambda0), - criteria(restart::Criteria::get(settings.modules)), - sampler(sampling::get(settings.dim, settings.modules, settings.lambda0)), - adaptation(matrix_adaptation::get(settings.modules, settings.dim, - settings.x0.value_or(Vector::Zero(settings.dim)), - sampler->expected_length())), - mutation(mutation::get(settings.modules, - settings.mu0, weights.mueff, - static_cast(settings.dim), - settings.sigma0, - settings.cs, - sampler->expected_length())), - selection(std::make_shared(settings.modules)), - restart_strategy(restart::strategy::get( - settings.modules, - static_cast(settings.lambda0), - static_cast(settings.mu0), - settings.budget)), - bounds(bounds::get(settings.modules.bound_correction, settings.lb, settings.ub)), - repelling(repelling::get(settings.modules)), - center_placement(center::get(settings.modules.center_placement)) + Parameters::Parameters(const Settings& settings) : successfull_adaptation(true), + lambda(settings.lambda0), + mu(settings.mu0), + settings(settings), + stats{}, + sampler(sampling::get(settings.dim, settings.modules, settings.lambda0)), + weights(settings.dim, settings.mu0, settings.lambda0, settings, sampler->expected_length()), + pop(settings.dim, settings.lambda0), + old_pop(settings.dim, settings.lambda0), + criteria(restart::Criteria::get(settings.modules)), + adaptation(matrix_adaptation::get(settings.modules, settings.dim, + settings.x0.value_or(Vector::Zero(settings.dim)), + sampler->expected_length(), + settings.sigma0 + )), + mutation(mutation::get(settings.modules, + settings.mu0, + static_cast(settings.dim), + settings.sigma0 + )), + selection(std::make_shared(settings.modules)), + restart_strategy(restart::strategy::get( + settings.modules, + static_cast(settings.lambda0), + static_cast(settings.mu0), + settings.budget)), + bounds(bounds::get(settings.modules.bound_correction, settings.lb, settings.ub)), + repelling(repelling::get(settings.modules)), + center_placement(center::get(settings.modules.center_placement)) { criteria.reset(*this); } @@ -38,8 +39,16 @@ namespace parameters { } - void Parameters::perform_restart(FunctionType &objective, const std::optional &sigma) + void Parameters::perform_restart(FunctionType& objective, const std::optional& sigma) { + if (settings.verbose) + { + std::cout << "performing restart. t(" << stats.t <<", " << stats.evaluations + << ") reason: " << criteria.reason() << std::endl; + std::cout << "setting parameters (mu, lambda, sigma): (" << + sigma.value_or(settings.sigma0) << ", " + << mu << ", " << lambda << ")\n"; + } stats.solutions.push_back(stats.current_best); stats.evaluations++; stats.centers.emplace_back(adaptation->m, objective(adaptation->m), stats.t - 1, stats.evaluations); @@ -47,17 +56,16 @@ namespace parameters stats.has_improved = false; repelling->update_archive(objective, *this); - weights = Weights(settings.dim, mu, lambda, settings); sampler->reset(settings.modules, lambda); + weights = Weights(settings.dim, mu, lambda, settings, sampler->expected_length()); pop = Population(settings.dim, lambda); old_pop = Population(settings.dim, lambda); - mutation = mutation::get(settings.modules, mu, weights.mueff, - static_cast(settings.dim), - sigma.value_or(settings.sigma0), - settings.cs, sampler->expected_length()); - adaptation->restart(settings); + mutation = mutation::get(settings.modules, mu, + static_cast(settings.dim), + sigma.value_or(settings.sigma0)); + adaptation->restart(settings, sigma.value_or(settings.sigma0)); (*center_placement)(*this); criteria.reset(*this); stats.current_best = {}; @@ -65,7 +73,7 @@ namespace parameters void Parameters::adapt() { - adaptation->adapt_evolution_paths(pop, weights, mutation, stats, mu, lambda); + adaptation->adapt_evolution_paths(pop, weights, stats, settings, lambda, mu); mutation->adapt(weights, adaptation, pop, old_pop, stats, lambda); if (constants::clip_sigma) @@ -77,10 +85,10 @@ namespace parameters stats.t++; } - void Parameters::start(FunctionType &objective) + void Parameters::start(FunctionType& objective) { old_pop = pop; - if (criteria.any) + if (criteria.any()) { const auto sig = restart_strategy->update(*this); perform_restart(objective, sig); @@ -88,12 +96,12 @@ namespace parameters } } -std::ostream &operator<<(std::ostream &os, const parameters::Stats &s) +std::ostream& operator<<(std::ostream& os, const parameters::Stats& s) { return os - << "Stats" - << " t=" << s.t - << " e=" << s.evaluations - << " best=" << s.global_best - << " improved=" << std::boolalpha << s.has_improved; + << "Stats" + << " t=" << s.t + << " e=" << s.evaluations + << " best=" << s.global_best + << " improved=" << std::boolalpha << s.has_improved; } diff --git a/src/population.cpp b/src/population.cpp index 0eae0d0..6475a62 100644 --- a/src/population.cpp +++ b/src/population.cpp @@ -8,6 +8,7 @@ void Population::sort() Y = Y(Eigen::all, idx).eval(); f = f(idx).eval(); s = s(idx).eval(); + t = t(idx).eval(); } Population& Population::operator+=(const Population& other) @@ -17,6 +18,7 @@ Population& Population::operator+=(const Population& other) utils::hstack(Z, other.Z); utils::concat(f, other.f); utils::concat(s, other.s); + utils::concat(t, other.t); n += other.n; return *this; } @@ -29,6 +31,7 @@ void Population::resize_cols(const size_t size) Z.conservativeResize(d, n); f.conservativeResize(n); s.conservativeResize(n); + t.conservativeResize(n); } @@ -39,6 +42,7 @@ void Population::keep_only(const std::vector& idx) Y = Y(Eigen::all, idx).eval(); f = f(idx).eval(); s = s(idx).eval(); + t = t(idx).eval(); n = idx.size(); } diff --git a/src/repelling.cpp b/src/repelling.cpp index 9da3ec2..8ef48ac 100644 --- a/src/repelling.cpp +++ b/src/repelling.cpp @@ -66,8 +66,8 @@ namespace repelling bool TabooPoint::rejects(const Vector &xi, const parameters::Parameters &p, const int attempts) const { const Float rejection_radius = std::pow(shrinkage, attempts) * radius; - const Float delta_xi = distance::mahanolobis(xi, solution.x, p.adaptation->inv_C) / p.mutation->sigma; - + const Float delta_xi = p.adaptation->distance(xi, solution.x) / p.mutation->sigma; + if (delta_xi < rejection_radius) return true; @@ -81,7 +81,8 @@ namespace repelling void TabooPoint::calculate_criticality(const parameters::Parameters &p) { - const Float delta_m = distance::mahanolobis(p.adaptation->m, solution.x, p.adaptation->inv_C) / p.mutation->sigma; + const Float delta_m = p.adaptation->distance_from_center(solution.x) / p.mutation->sigma; + const auto u = delta_m + radius; const auto l = delta_m - radius; criticality = cdf(u) - cdf(l); @@ -95,29 +96,6 @@ namespace repelling std::sort(archive.begin(), archive.end(), [](const TabooPoint &a, const TabooPoint &b) { return a.criticality > b.criticality; }); - - //! If it is not intialized - /* - if (C.cols() != p.settings.dim) - { - C = Matrix::Identity(p.settings.dim, p.settings.dim); - C_inv = Matrix::Identity(p.settings.dim, p.settings.dim); - } - - if (!(p.settings.modules.matrix_adaptation == parameters::MatrixAdaptationType::NONE || - p.settings.modules.matrix_adaptation == parameters::MatrixAdaptationType::MATRIX)) - { - using namespace matrix_adaptation; - const auto dynamic = std::dynamic_pointer_cast(p.adaptation); - - const Float d_sigma = p.mutation->sigma / p.settings.sigma0; - if (d_sigma > constants::sigma_threshold) - { - C = dynamic->C / dynamic->C.maxCoeff(); - C_inv = dynamic->inv_C / dynamic->inv_C.maxCoeff(); - } - } - */ } void Repelling::update_archive(FunctionType &objective, parameters::Parameters &p) @@ -141,13 +119,13 @@ namespace repelling } if (accept_candidate) - archive.emplace_back(candidate_point, 1.0);// , C, C_inv); - + archive.emplace_back(candidate_point, 1.0); + const Float volume_per_n = p.settings.volume / (p.settings.sigma0 * coverage * p.stats.solutions.size()); const Float n = p.adaptation->dd; const Float gamma_f = std::pow(std::tgamma(n / 2.0 + 1.0), 1.0 / n) / std::sqrt(M_PI); for (auto &point : archive) - point.radius = std::pow(volume_per_n * point.n_rep, 1.0 / n) * gamma_f; + point.radius = (std::pow(volume_per_n * point.n_rep, 1.0 / n) * gamma_f) / std::sqrt(n); } bool Repelling::is_rejected(const Vector &xi, parameters::Parameters &p) @@ -168,7 +146,6 @@ namespace repelling } } } - return false; } } diff --git a/src/restart_criteria.cpp b/src/restart_criteria.cpp index b86fb89..a0fc1ed 100644 --- a/src/restart_criteria.cpp +++ b/src/restart_criteria.cpp @@ -21,6 +21,9 @@ namespace // TODO: this is duplicate code Float median(const Vector &x) { + if (x.size() == 1) + return x(0); + if (x.size() % 2 == 0) return (x(x.size() / 2) + x(x.size() / 2 - 1)) / 2.0; return x(x.size() / 2); @@ -28,6 +31,9 @@ namespace Float median(const std::vector &v, const size_t from, const size_t to) { + if (v.size() == 1) + return v[0]; + const size_t n = to - from; if (n % 2 == 0) return (v[from + (n / 2)] + v[from + (n / 2) - 1]) / 2.0; @@ -46,7 +52,9 @@ namespace restart void ExceededMaxIter::on_reset(const parameters::Parameters &p) { - max_iter = static_cast(100 + 50 * std::pow((static_cast(p.settings.dim) + 3), 2.0) / std::sqrt(static_cast(p.lambda))); + max_iter = static_cast( + 100 + 50 * std::pow((static_cast(p.settings.dim) + 3), 2.0) / std::sqrt(static_cast(p.lambda)) + ); } void ExceededMaxIter::update(const parameters::Parameters &p) @@ -60,12 +68,16 @@ namespace restart n_bin = 10 + static_cast(std::ceil(30 * static_cast(p.settings.dim) / static_cast(p.lambda))); } - void NoImprovement::update(const parameters::Parameters &p) + void NoImprovement::update(const parameters::Parameters& p) { const size_t time_since_restart = p.stats.t - last_restart; best_fitnesses.push_back(p.pop.f(0)); - const auto recent_improvement = ptp_tail(best_fitnesses, n_bin); - met = time_since_restart > n_bin and recent_improvement == 0; + met = false; + if (time_since_restart > n_bin) + { + const auto recent_improvement = ptp_tail(best_fitnesses, n_bin); + met = recent_improvement == 0; + } } void MaxSigma::update(const parameters::Parameters &p) @@ -87,8 +99,12 @@ namespace restart { const size_t time_since_restart = p.stats.t - last_restart; flat_fitnesses(p.stats.t % p.settings.dim) = p.pop.f(0) == p.pop.f(flat_fitness_index); - const size_t n_flat_fitness = static_cast(flat_fitnesses.sum()); - met = time_since_restart > static_cast(flat_fitnesses.size()) and n_flat_fitness > max_flat_fitness; + met = false; + if (time_since_restart > static_cast(flat_fitnesses.size())) + { + const size_t n_flat_fitness = static_cast(flat_fitnesses.sum()); + met = n_flat_fitness > max_flat_fitness; + } } void FlatFitness::on_reset(const parameters::Parameters &p) @@ -105,7 +121,7 @@ namespace restart { const Float d_sigma = p.mutation->sigma / p.settings.sigma0; const Float tolx_condition = tolerance * p.settings.sigma0; - tolx_vector.head(p.settings.dim) = dynamic->C.diagonal() * d_sigma; + tolx_vector.head(p.settings.dim) = dynamic->C.diagonal().cwiseSqrt() * d_sigma; tolx_vector.tail(p.settings.dim) = dynamic->pc * d_sigma; met = (tolx_vector.array() < tolx_condition).all(); } @@ -154,7 +170,7 @@ namespace restart { const Eigen::Index t = p.stats.t % p.settings.dim; const auto effect_axis = 0.1 * p.mutation->sigma * std::sqrt(dynamic->d(t)) * dynamic->B.col(t); - met = (effect_axis.array() < tolerance).all(); + met = (effect_axis.array().abs() < tolerance).all(); } } @@ -163,7 +179,7 @@ namespace restart if (const auto dynamic = std::dynamic_pointer_cast(p.adaptation)) { const auto effect_coord = 0.2 * p.mutation->sigma * dynamic->C.diagonal().cwiseSqrt(); - met = (effect_coord.array() < tolerance).all(); + met = (effect_coord.array().abs() < tolerance).any(); } } @@ -174,21 +190,33 @@ namespace restart median_fitnesses.push_back(median(p.pop.f)); best_fitnesses.push_back(p.pop.f(0)); - const bool best_better = median(best_fitnesses, pt, time_since_restart) >= median(best_fitnesses, 0, pt); - const bool median_better = median(median_fitnesses, pt, time_since_restart) >= median(median_fitnesses, 0, pt); - - met = time_since_restart > n_stagnation and (best_better and median_better); + met = false; + if (time_since_restart > n_stagnation) + { + const bool best_better = median(best_fitnesses, pt, time_since_restart) >= median(best_fitnesses, 0, pt); + const bool median_better = median(median_fitnesses, pt, time_since_restart) >= median(median_fitnesses, 0, pt); + met = best_better and median_better; + } } void Stagnation::on_reset(const parameters::Parameters &p) { const auto d = static_cast(p.settings.dim); const auto lambda = static_cast(p.lambda); - n_stagnation = (static_cast(std::min(static_cast(120 + (30 * d / lambda)), 20000))); + n_stagnation = static_cast( + 100 + 100 * std::pow(p.settings.dim, 1.5) / static_cast(p.lambda) + ); + median_fitnesses = {}; best_fitnesses = {}; } + void TooMuchRepelling::update(const parameters::Parameters& p) + { + const Float average_repelling = static_cast(p.repelling->attempts) / static_cast(p.lambda); + met = average_repelling >= tolerance; + } + Criteria Criteria::get(const parameters::Modules modules) { vCriteria criteria{ @@ -205,8 +233,8 @@ namespace restart criteria.push_back(std::make_shared()); criteria.push_back(std::make_shared()); - if (modules.matrix_adaptation == parameters::MatrixAdaptationType::COVARIANCE || - modules.matrix_adaptation == parameters::MatrixAdaptationType::SEPERABLE) + //! TODO: make these compatible with other MA + if (modules.matrix_adaptation == parameters::MatrixAdaptationType::COVARIANCE) { criteria.push_back(std::make_shared()); criteria.push_back(std::make_shared()); @@ -214,6 +242,11 @@ namespace restart criteria.push_back(std::make_shared()); } } + + if (modules.repelling_restart) + { + criteria.push_back(std::make_shared()); + } return Criteria(criteria); } } \ No newline at end of file diff --git a/src/restart_strategy.cpp b/src/restart_strategy.cpp index 9f6c1b8..2f762d2 100644 --- a/src/restart_strategy.cpp +++ b/src/restart_strategy.cpp @@ -8,7 +8,7 @@ namespace restart { Float Strategy::update(parameters::Parameters &p) - { + { return p.settings.sigma0; } @@ -43,7 +43,7 @@ namespace restart lambda_large *= 2; } else - { + { budget_small -= last_used_budget; } diff --git a/src/selection.cpp b/src/selection.cpp index 7071cb9..c27f912 100644 --- a/src/selection.cpp +++ b/src/selection.cpp @@ -23,7 +23,6 @@ namespace selection p.stats.current_avg = p.pop.f.array().mean(); p.stats.update_best(p.pop.X(Eigen::all, 0), p.pop.f(0)); - } void Pairwise::operator()(parameters::Parameters& p) const @@ -41,10 +40,14 @@ namespace selection if (p.stats.t != 0) { p.old_pop.resize_cols(k); - for (Eigen::Index i = 0; i < static_cast(p.old_pop.n); i++) + + if (!p.settings.one_plus_one) { - p.old_pop.Y.col(i) = p.adaptation->invert_x(p.old_pop.X.col(i), p.old_pop.s(i)); - p.old_pop.Z.col(i) = p.adaptation->invert_y(p.old_pop.Y.col(i)); + for (Eigen::Index i = 0; i < static_cast(p.old_pop.n); i++) + { + p.old_pop.Y.col(i).noalias() = p.adaptation->invert_x(p.old_pop.X.col(i), p.old_pop.s(i)); + p.old_pop.Z.col(i).noalias() = p.adaptation->invert_y(p.old_pop.Y.col(i)); + } } p.pop += p.old_pop; } diff --git a/src/weights.cpp b/src/weights.cpp index e6accd5..53a5422 100644 --- a/src/weights.cpp +++ b/src/weights.cpp @@ -1,59 +1,175 @@ #include "weights.hpp" + + +#include +#include +#include +#include + +using Eigen::VectorXd; +using std::log; +using std::sqrt; +using std::max; +using std::min; + namespace parameters { - Weights::Weights(const size_t dim, const size_t mu, const size_t lambda, const Settings& settings) - : weights(lambda), positive(mu), negative(lambda - mu) + static Float get_default_cs(const Settings& settings, const Float mueff, const Float d) { - const Float d = static_cast(dim); - switch (settings.modules.weights) + // TODO: check whether the value of cs needs to be increased when CMA is NONE + + if (settings.modules.matrix_adaptation == MatrixAdaptationType::NATURAL_GRADIENT) + return (9.0 + 3.0 + std::log(d)) / (5.0 * d * std::sqrt(d)); + + switch (settings.modules.ssa) { - case RecombinationWeights::EQUAL: - weights_equal(mu); - break; - case RecombinationWeights::HALF_POWER_LAMBDA: - weights_half_power_lambda(mu, lambda); - break; - case RecombinationWeights::DEFAULT: - weights_default(lambda); - break; + case StepSizeAdaptation::XNES: + //return mueff / (2.0 * std::log(std::max(Float{ 2. }, d)) * sqrt(d)); + return 0.01; + case StepSizeAdaptation::MXNES: + return 1.0; + case StepSizeAdaptation::LPXNES: + return (9.0 * mueff) / (10.0 * sqrt(d)); + case StepSizeAdaptation::PSR: + return 0.9; + case StepSizeAdaptation::SR: + return 2.0 / (d + 2.0); + case StepSizeAdaptation::CSA: + //return (mueff + 2.0) / (d + mueff + 5.0); + return (mueff + 2) / (d + (mueff + 3.0)); + default: + return 0.3; } + } - mueff = std::pow(positive.sum(), 2) / positive.dot(positive); - mueff_neg = std::pow(negative.sum(), 2) / negative.dot(negative); - positive /= positive.sum(); + + static Float get_default_damps(const Settings& settings, const Float mueff, const Float d, const Float cs) + { + switch (settings.modules.ssa) + { + case StepSizeAdaptation::SR: + return 1.0 + (d / 2.0); + case StepSizeAdaptation::CSA: + { + const Float rhs = std::sqrt((mueff - Float(1.0)) / (d + 1)) - 1; + return 1.0 + (2.0 * std::max(Float(0.0), rhs) + cs); + } + default: + return 1.0; + } + } - c1 = settings.c1.value_or(2.0 / (pow(d + 1.3, 2) + mueff)); - Float cmu_default = std::min( - 1.0 - c1, 2.0 * ((mueff - 2.0 + (1.0 / mueff)) / (pow(d + 2.0, 2) + (2.0 * mueff / 2)))); + static Float get_default_c1(const Settings& settings, const Float d, const Float mueff, const Float acov) + { + if (settings.one_plus_one) + return 2.0 / (pow(d, 2) + 6.0); + + return acov / (pow(d + 1.3, 2) + mueff); + } + + static Float get_default_cc(const Settings& settings, const Float d, const Float mueff, const Float cs) + { + if (settings.modules.matrix_adaptation == MatrixAdaptationType::NATURAL_GRADIENT) + return !settings.one_plus_one ? 0.5 * cs : (1.0 / (4.0 * pow(d, 1.5))); + + + if (settings.one_plus_one) + return 2.0 / (d + 2.0); + + return (4.0 + (mueff / d)) / (d + 4.0 + (2.0 * mueff / d)); + } - if (settings.modules.matrix_adaptation == MatrixAdaptationType::SEPERABLE) + static Float get_default_cmu( + const Settings& settings, + const Float d, + const Float mueff, + const Float c1, + const Float acov + ) + { + Float cmu_default = std::min(1.0 - c1, + acov * + (mueff - 2.0 + (1.0 / mueff)) + / (pow(d + 2.0, 2) + (acov * mueff / 2))); + + //Float cmu_default = std::min(1.0 - c1, + // acov * + // (0.25 + mueff + 1.0 / mueff - 2.0) / + // (pow(d + 2., 2.0) + acov * mueff / 2.0)); + + if (settings.modules.matrix_adaptation == MatrixAdaptationType::SEPARABLE) cmu_default *= ((d + 2.0) / 3.0); if (settings.lambda0 == 1) { cmu_default = 2 / (pow(d, 2) + 6.0); } - cmu = settings.cmu.value_or(cmu_default); - cc = settings.cmu.value_or( - (4.0 + (mueff / d)) / (d + 4.0 + (2.0 * mueff / d)) - ); + return cmu_default; + } + + Weights::Weights( + const size_t dim, + const size_t mu, + const size_t lambda, + const Settings& settings, + const Float expected_length_z + ) + : weights(lambda), positive(mu), negative(lambda - mu), expected_length_z(expected_length_z) + { + const Float d = static_cast(dim); + switch (settings.modules.weights) + { + case RecombinationWeights::EQUAL: + weights_equal(mu); + break; + case RecombinationWeights::EXPONENTIAL: + weights_exponential(mu, lambda); + break; + case RecombinationWeights::DEFAULT: + weights_default(mu, lambda); + break; + } + positive /= positive.sum(); + mueff = 1.0 / positive.dot(positive); + mueff_neg = std::pow(negative.sum(), 2) / negative.dot(negative); + acov = settings.acov.value_or(2.0); + c1 = settings.c1.value_or(get_default_c1(settings, d, mueff, acov)); + cmu = settings.cmu.value_or(get_default_cmu(settings, d, mueff, c1, acov)); + cs = settings.cs.value_or(get_default_cs(settings, mueff, d)); + cc = settings.cc.value_or(get_default_cc(settings, d, mueff, cs)); + + damps = settings.damps.value_or(get_default_damps(settings, mueff, d, cs)); + + sqrt_cs_mueff = std::sqrt(cs * (2.0 - cs) * mueff); + sqrt_cc_mueff = std::sqrt(cc * (2.0 - cc) * mueff); + + //const Float amu_neg = 1.0 + (c1 / cmu); const Float amu_neg = 1.0 + (c1 / static_cast(mu)); const Float amueff_neg = 1.0 + ((2.0 * mueff_neg) / (mueff + 2.0)); const Float aposdef_neg = (1.0 - c1 - cmu) / (d * cmu); - const Float neg_scaler = std::min(amu_neg, std::min(amueff_neg, aposdef_neg)); - negative *= (neg_scaler / negative.cwiseAbs().sum()); - weights << positive, negative; + weights << positive, negative; + + + lazy_update_interval = 1.0 / (c1 + cmu + 1e-23) / d / 10.0; + expected_length_ps = (1.4 + (2.0 / (d + 1.0))) * expected_length_z; + + beta = 1.0 / std::sqrt(2.0 * mueff); + if (settings.modules.ssa == StepSizeAdaptation::LPXNES) + beta = std::log(2.0) / (std::sqrt(d) * std::log(d)); } - void Weights::weights_default(const size_t lambda) + + void Weights::weights_default(const size_t mu, const size_t lambda) { - const Float base = std::log((static_cast(lambda) + 1.) / 2.0); + const Float ratio = static_cast(lambda) / static_cast(mu); + const Float base = std::log((static_cast(lambda) + 1.) / std::floor(ratio)); + for (auto i = 0; i < positive.size(); ++i) positive(i) = base - std::log(static_cast(i + 1)); @@ -68,7 +184,7 @@ namespace parameters negative.setConstant(-wi); } - void Weights::weights_half_power_lambda(const size_t mu, const size_t lambda) + void Weights::weights_exponential(const size_t mu, const size_t lambda) { const Float dmu = static_cast(mu); const Float base = (1.0 / pow(2.0, dmu)) / dmu; diff --git a/tests/expected.json b/tests/expected.json index 07d72a8..b61ed6b 100644 --- a/tests/expected.json +++ b/tests/expected.json @@ -1 +1 @@ -{"active_True": [0.5435765665062582, 5437.747040771441, 10.01660437953625, 2.2719832345541144, 0.0, 199.2782474326513, 5.408240310984194, 3.5024014085354476, 16.10070347007742, 33915.32642536684, 5594.267386966566, 2683.7038475009585, 28.144026717256793, 0.385064441869341, 21.044273488406503, 15.751386747089091, 1.3972411708519288, 22.906212773882444, 0.45596232156333016, 2.1902024625011554, 4.5959310419633415, 8.40924249765171, 16.44976652202346, 10.449338566719732], "elitist_True": [0.6699833162594517, 1227.9721054463246, 16.869910137817563, 19.572392428411913, 0.0, 117.99827705243017, 2.4036623604374214, 7.623121487433538, 3.1615579688959676, 14355.480702885849, 29507.974482338646, 2683.7038475009585, 28.411660356917054, 0.6299432344321185, 7.265400966462051, 14.434537776482069, 1.4118850095554683, 23.630235905877907, 0.45596232156333016, 2.1620882643582515, 1.990963814929167, 7.845446138574839, 7.628611542891237, 14.194368416234731], "orthogonal_True": [0.2553427181462442, 118072.3053998135, 2.009699293124982, 10.626106984940423, 2.8786810407197256, 72413.21225551268, 2.5001933979731294, 12.550406947379917, 0.3352504442463413, 25908.565659852648, 1237306.2027346084, 1702.7335324051267, 70.43902585372231, 0.6976581726574436, 12.520744182648356, 12.812916264391731, 58.44704396496184, 66.62435462123153, 2.6677373415628356, 2.15875878922896, 2.737873259042217, 2.0770051329721895, 7.955480122219778, 14.581514536704194], "sequential_True": [19.5719406285756, 7210188.745246258, 13.68179355336543, 23.313513761722596, 0.0, 17730.504224411925, 0.6009155901093554, 184.20999101158142, 384.33208310135416, 23103333.8120513, 32971525.026216425, 2683.7038475009585, 214.39433012072388, 57.622398804713555, 25.180917948707858, 20.162513485514452, 1.977554466397431, 52.204673425080216, 0.45596232156333016, 259.10398673490283, 2.0601778166852416, 11.496305678387733, 8.260780485904256, 43.60153110381541], "threshold_convergence_True": [0.7347847130472621, 3621.5783803371164, 9.440988022081847, 17.58836091588514, 0.0, 214.79259349995988, 2.5001933979731294, 24.54379070614484, 11.6043233309905, 419398.0095371448, 3430.404715228194, 926.4455952079579, 28.411660356917054, 0.6893117820736763, 32.31733995435944, 15.751386747089091, 1.4118850095554683, 12.429946026580991, 0.45596232156333016, 3.055334914776176, 0.892465554399863, 1.9930860112728006, 6.08527369530883, 7.7464244673661105], "step_size_adaptation_csa": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 191.552220667987, 2.06863388735908, 7.623121487433538, 2.4098569399412817, 8829.953859147105, 75722.58537541109, 2683.7038475009585, 28.411660356917054, 0.46048487751008516, 23.765329153563307, 20.162513485514452, 1.4118850095554683, 22.517169808626498, 0.45596232156333016, 1.3074796822154253, 2.011681868741659, 10.719130303012587, 17.153972345559545, 15.819483090191303], "step_size_adaptation_tpa": [0.03767014607534145, 2175.9911995416633, 13.567807397656388, 11.874870433498376, 0.0, 524.5555243872985, 1.3858714981987188, 7.124026768321718, 9.334989121212406, 266.0481801806989, 252465.61185230376, 691.4718777227441, 38.68678171583889, 0.28663649612106273, 11.326830236653748, 15.751386747089091, 3.855740332598697, 32.94246424988947, 0.45596232156333016, 1.9737590640108762, 2.590406047723298, 0.0011090400311043107, 9.77176743348311, 3.9402019732301254], "step_size_adaptation_msr": [0.2128771251222575, 295.5488424459397, 30.87244163004304, 2.4385399363203035, 0.0, 172.463698480376, 2.06863388735908, 7.124026768321718, 9.334989121212406, 1927.7199333450003, 252293.3746024698, 2683.7038475009585, 38.68678171583889, 0.1609115280864648, 23.765329153563307, 15.751386747089091, 3.4535519370597645, 31.366999377372224, 0.45596232156333016, 2.101822038572848, 0.13402589068051024, 1.7724620177602137, 5.498985829998998, 10.501507178292282], "step_size_adaptation_xnes": [3.685803014042314, 53903.6720731987, 5.42438751362041, 8.476988158726298, 0.0, 45.74801255881861, 0.6009155901093554, 3.8843531249961387, 255.5973743918299, 216076.36001404436, 13572.63279995516, 6.0995893250056366, 364.92895618522067, 4.2640726765416295, 3.5220968308719924, 15.751386747089091, 4.013689588890098, 20.109588711161173, 0.45596232156333016, 1.9622096635746784, 4.6098062526230805, 0.0766092844756383, 7.973047124038917, 6.572006292776479], "step_size_adaptation_m-xnes": [0.2128771251222575, 2175.9911995416633, 20.436941805668347, 11.874870433498376, 0.0, 23.67188356114161, 2.06863388735908, 7.124026768321718, 9.334989121212406, 1927.7199333450003, 256392.77602054173, 1447.1339175020742, 38.68678171583889, 0.507065623652203, 23.765329153563307, 15.751386747089091, 2.8882839166643373, 31.366999377372224, 0.45596232156333016, 2.3635090154834093, 0.468273715165139, 0.43657925301521344, 7.819215103671837, 13.559901346383807], "step_size_adaptation_lp-xnes": [1.7332064445719135, 4419.683662806733, 17.098151479760066, 14.562651567781344, 4.766267911300538, 69.25192076187827, 0.6009155901093554, 4.86338515091338, 4.659487885926412, 4387.270026268735, 105506.4994294811, 2512.760076108818, 26.12703076068069, 0.7712613615349452, 5.842531481068854, 8.687032329133258, 7.279161607717867, 61.20592146441307, 3.7441941916707453, 2.4622889158859524, 2.8965324473324405, 12.262312434816211, 17.732168443379564, 3.496022765487944], "step_size_adaptation_psr": [0.2128771251222575, 1088.50577919315, 16.68845567069546, 5.528864747424452, 0.0, 149.5278041999801, 2.06863388735908, 7.124026768321718, 9.334989121212406, 1927.7199333450003, 207576.525365986, 2683.7038475009585, 38.68678171583889, 0.3653061851521225, 23.765329153563307, 15.751386747089091, 3.855740332598697, 31.366999377372224, 0.45596232156333016, 3.8792443115816027, 0.34411638396871275, 8.044092332563332, 3.6171907727392005, 3.6556955151225727], "mirrored_mirrored": [0.7554264630844969, 30.995928352373664, 10.727147924627282, 28.355058498656746, 0.0, 46.996785715812074, 5.408240310984194, 19.205395254336526, 10.73932209031642, 306.75329191214263, 47432.055266400115, 2683.7038475009585, 199.99567611652017, 1.976899305627097, 8.448239046275624, 10.619873785014807, 2.0468365502110726, 3.1760484231994948, 0.45596232156333016, 3.141781349728834, 2.5738103658736144, 2.7415047738254796, 11.184397856402033, 13.905359243714296], "mirrored_mirrored pairwise": [0.0337383526199124, 981.7390996048255, 10.727147924627282, 10.296884748017064, 0.0, 46.996785715812074, 0.6009155901093554, 19.205395254336526, 10.73932209031642, 191683.40442372047, 157018.28884094278, 2683.7038475009585, 199.99567611652017, 1.976899305627097, 29.385496516377007, 17.063577075490553, 6.7589928438885085, 32.397142264452846, 0.45596232156333016, 4.628179175342729, 1.2275961378528064, 2.7415047738254796, 1.3838182113654058, 15.305137607196425], "base_sampler_gaussian": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 191.552220667987, 2.06863388735908, 7.623121487433538, 2.4098569399412817, 8829.953859147105, 75722.58537541109, 2683.7038475009585, 28.411660356917054, 0.46048487751008516, 23.765329153563307, 20.162513485514452, 1.4118850095554683, 22.517169808626498, 0.45596232156333016, 1.3074796822154253, 2.011681868741659, 10.719130303012587, 17.153972345559545, 15.819483090191303], "base_sampler_sobol": [0.5892875981090875, 48001.18013114975, 13.917655789452, 29.56067588781827, 0.0, 0.7172460642807635, 2.5001933979731294, 3.0140086883578876, 7.518727278298435, 467.72756126032994, 2035.209239015712, 1565.8411907388265, 22.404521901426385, 0.21692960177602713, 4.700091505389068, 4.289849820201781, 3.229779796294666, 6.012336634890072, 2.4853063838721, 2.0779235694755376, 1.001291938509303, 0.4849187167015082, 7.94398818014778, 4.500853718658098], "base_sampler_halton": [3.019801975442402, 3317.5551433642418, 40.36750518704198, 23.680814968882338, 0.0, 6.521598610651012, 5.408240310984194, 44.229528720973704, 127.2630285764255, 2115.079873154195, 231.44587184398802, 7536.921161863595, 101.44795694610097, 2.5953504727943497, 33.75364911983205, 3.0324069782469003, 3.419019722668727, 9.804308274920382, 0.2678491799153715, 4.243267425382138, 1.4276966103833042, 0.8717851151576597, 8.873726771455061, 4.536831812468478], "weights_option_default": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 191.552220667987, 2.06863388735908, 7.623121487433538, 2.4098569399412817, 8829.953859147105, 75722.58537541109, 2683.7038475009585, 28.411660356917054, 0.46048487751008516, 23.765329153563307, 20.162513485514452, 1.4118850095554683, 22.517169808626498, 0.45596232156333016, 1.3074796822154253, 2.011681868741659, 10.719130303012587, 17.153972345559545, 15.819483090191303], "weights_option_equal": [0.6983733129352273, 53903.6720731987, 16.490693161011563, 19.983425781178234, 0.0, 708.6726443211569, 5.408240310984194, 1.3914507176564086, 9.829561857450923, 175618.5411670374, 297974.97039696446, 2683.7038475009585, 84.70468494483676, 0.26559027142197483, 11.09507570411469, 12.546429428411361, 4.1243318983523505, 8.982949573228526, 0.3552769868031991, 5.125690097532708, 2.0431895662294046, 1.9396308505454334, 6.357120407300892, 16.044865034720715], "weights_option_1/2^lambda": [0.29537552250716487, 14961.853448878112, 24.071007496708887, 15.363617700667014, 0.0, 120.02761077294967, 2.06863388735908, 12.504251100709112, 10.055671647360192, 658.2473391082618, 158260.00841298996, 2683.7038475009585, 41.97075757134694, 0.34731396273566734, 18.686167007123053, 12.985714196324263, 4.07446399965624, 14.549555170428903, 0.45596232156333016, 2.5857528075054406, 3.0145388205852814, 1.8648570790235144, 16.61377903494147, 4.655668386999551], "local_restart_restart": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 191.552220667987, 2.06863388735908, 7.623121487433538, 2.4098569399412817, 8829.953859147105, 75722.58537541109, 2683.7038475009585, 28.411660356917054, 0.46048487751008516, 23.765329153563307, 20.162513485514452, 1.4118850095554683, 22.517169808626498, 0.45596232156333016, 1.3074796822154253, 2.011681868741659, 10.719130303012587, 17.153972345559545, 15.819483090191303], "local_restart_IPOP": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 191.552220667987, 2.06863388735908, 7.623121487433538, 2.4098569399412817, 8829.953859147105, 75722.58537541109, 2683.7038475009585, 28.411660356917054, 0.46048487751008516, 23.765329153563307, 20.162513485514452, 1.4118850095554683, 22.517169808626498, 0.45596232156333016, 1.3074796822154253, 2.011681868741659, 10.719130303012587, 17.153972345559545, 15.819483090191303], "local_restart_BIPOP": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 191.552220667987, 2.06863388735908, 7.623121487433538, 2.4098569399412817, 8829.953859147105, 75722.58537541109, 2683.7038475009585, 28.411660356917054, 0.46048487751008516, 23.765329153563307, 20.162513485514452, 1.4118850095554683, 22.517169808626498, 0.45596232156333016, 1.3074796822154253, 2.011681868741659, 10.719130303012587, 17.153972345559545, 15.819483090191303], "bound_correction_saturate": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 23.49437556531035, 2.06863388735908, 0.025008654234556386, 48.30190325684181, 7345.1695658282915, 39778.92602634714, 99.69985510848147, 81.24353869555809, 0.21972787749128833, 23.765329153563307, 12.874535394781748, 0.9011977159517945, 11.737632986991393, 0.6914037267515631, 1.3074796822154253, 0.8684924540993583, 0.4972890426678059, 7.630280738996362, 13.415576504676817], "bound_correction_unif_resample": [0.0783516688829975, 833.1172241710985, 19.627044667914628, 9.628926397266323, 5.160304518362756, 7.318815196747931, 5.408240310984194, 1.7357869403053507, 1.0497014040510553, 41678.76392402864, 168961.6744824512, 240.6715351855804, 3.8354665436620783, 0.6842126813587444, 13.958153917706909, 3.6781775698555803, 2.4855552649703982, 31.962561311838936, 0.10223176862010597, 3.162540017099194, 2.0966511126841403, 0.29434737191762333, 5.120439721244383, 12.236723816747919], "bound_correction_COTN": [0.1676093847024217, 4351.211417663173, 6.047980325222354, 21.91257374886981, 3.1244128771674005, 12.501498772381812, 5.408240310984194, 5.838321652506101, 0.14735284098644813, 190.06470116101278, 401.0202318645261, 2683.7038475009585, 123.96634855859544, 0.7422687242316163, 4.997713381178812, 9.253215577323598, 3.996596680370049, 0.6749752706461339, 1.5938722070154228, 2.732266723801698, 2.2807913444204497, 0.0009542510237096514, 5.825774943796979, 11.291346927128016], "bound_correction_toroidal": [1.4444217300032238, 668.4603295428241, 13.575313361038358, 20.61592906071919, 2.022308824107819, 24.33187479432261, 2.06863388735908, 13.504258712369403, 55.92343205593169, 52508.86982339249, 7782.459127523121, 1.103207913159003, 142.56590632579122, 1.851593522186194, 23.765329153563307, 13.681357344351518, 3.1605136092852923, 12.298476154812617, 0.15035979308805913, 1.3074796822154253, 4.6098062526230805, 2.216804022881296, 4.916798493704158, 7.427104342940737], "bound_correction_mirror": [1.1660440655024689, 1651.4439597423875, 8.374450285848463, 16.891738458104978, 5.8032753382871505, 23.566989484378954, 4.51871992360822, 7.7492720281054694, 1.0486600710191158, 25873.673262665758, 900.4477757873159, 1133.397332721584, 70.13442419670166, 1.1138637242611469, 17.042022539322556, 3.953426355571292, 1.2140101666428544, 4.101698783919491, 0.21716792040892763, 3.0087533050106425, 1.9277425466961398, 1.8126859185350808, 10.029522452684994, 14.769096401085445]} \ No newline at end of file +{"active_True": [0.5435765665062582, 5437.747040771441, 10.01660437953625, 2.2719832345541144, 0.0, 199.2782474326513, 5.408240310984194, 3.5024014085354476, 16.10070347007742, 33915.32642536684, 5594.267386966566, 2683.7038475009585, 28.144026717256793, 0.385064441869341, 21.044273488406503, 15.751386747089091, 1.3972411708519288, 22.906212773882444, 0.45596232156333016, 2.1902024625011554, 4.5959310419633415, 8.40924249765171, 16.44976652202346, 10.449338566719732], "elitist_True": [0.6699833162594517, 1227.9721054463246, 16.869910137817563, 19.572392428411913, 0.0, 117.99827705243017, 2.4036623604374214, 7.623121487433538, 3.1615579688959676, 14355.480702885849, 29507.974482338646, 2683.7038475009585, 28.411660356917054, 0.6299432344321185, 7.265400966462051, 14.434537776482069, 1.4118850095554683, 23.630235905877907, 0.45596232156333016, 2.1620882643582515, 1.990963814929167, 7.845446138574839, 7.628611542891237, 14.194368416234731], "orthogonal_True": [0.2553427181462442, 118072.3053998135, 2.009699293124982, 10.626106984940423, 2.8786810407197256, 72413.21225551268, 2.5001933979731294, 12.550406947379917, 0.3352504442463413, 25908.565659852648, 1237306.2027346084, 1702.7335324051267, 70.43902585372231, 0.6976581726574436, 12.520744182648356, 12.812916264391731, 58.44704396496184, 66.62435462123153, 2.6677373415628356, 2.15875878922896, 2.737873259042217, 2.0770051329721895, 7.955480122219778, 14.581514536704194], "sequential_True": [19.5719406285756, 7210188.745246258, 13.68179355336543, 23.313513761722596, 0.0, 17730.504224411925, 0.6009155901093554, 184.20999101158142, 384.33208310135416, 23103333.8120513, 32971525.026216425, 2683.7038475009585, 214.39433012072388, 57.622398804713555, 25.180917948707858, 20.162513485514452, 1.977554466397431, 52.204673425080216, 0.45596232156333016, 259.10398673490283, 2.0601778166852416, 11.496305678387733, 8.260780485904256, 43.60153110381541], "threshold_convergence_True": [0.7347847130472621, 3621.5783803371164, 9.440988022081847, 17.58836091588514, 0.0, 214.79259349995988, 2.5001933979731294, 24.54379070614484, 11.6043233309905, 419398.0095371448, 3430.404715228194, 926.4455952079579, 28.411660356917054, 0.6893117820736763, 32.31733995435944, 15.751386747089091, 1.4118850095554683, 12.429946026580991, 0.45596232156333016, 3.055334914776176, 0.892465554399863, 1.9930860112728006, 6.08527369530883, 7.7464244673661105], "step_size_adaptation_csa": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 191.552220667987, 2.06863388735908, 7.623121487433538, 2.4098569399412817, 8829.953859147105, 75722.58537541109, 2683.7038475009585, 28.411660356917054, 0.46048487751008516, 23.765329153563307, 20.162513485514452, 1.4118850095554683, 22.517169808626498, 0.45596232156333016, 1.3074796822154253, 2.011681868741659, 10.719130303012587, 17.153972345559545, 15.819483090191303], "step_size_adaptation_tpa": [0.03767014607534145, 2175.9911995416633, 13.567807397656388, 11.874870433498376, 0.0, 524.5555243872985, 1.3858714981987188, 7.124026768321718, 9.334989121212406, 266.0481801806989, 252465.61185230376, 691.4718777227441, 38.68678171583889, 0.28663649612106273, 11.326830236653748, 15.751386747089091, 3.855740332598697, 32.94246424988947, 0.45596232156333016, 1.9737590640108762, 2.590406047723298, 0.0011090400311043107, 9.77176743348311, 3.9402019732301254], "step_size_adaptation_msr": [0.2128771251222575, 295.5488424459397, 30.87244163004304, 2.4385399363203035, 0.0, 172.463698480376, 2.06863388735908, 7.124026768321718, 9.334989121212406, 1927.7199333450003, 252293.3746024698, 2683.7038475009585, 38.68678171583889, 0.1609115280864648, 23.765329153563307, 15.751386747089091, 3.4535519370597645, 31.366999377372224, 0.45596232156333016, 2.101822038572848, 0.13402589068051024, 1.7724620177602137, 5.498985829998998, 10.501507178292282], "step_size_adaptation_xnes": [3.685803014042314, 53903.6720731987, 5.42438751362041, 8.476988158726298, 0.0, 45.74801255881861, 0.6009155901093554, 3.8843531249961387, 255.5973743918299, 216076.36001404436, 13572.63279995516, 6.0995893250056366, 364.92895618522067, 4.2640726765416295, 3.5220968308719924, 15.751386747089091, 4.013689588890098, 20.109588711161173, 0.45596232156333016, 1.9622096635746784, 4.6098062526230805, 0.0766092844756383, 7.973047124038917, 6.572006292776479], "step_size_adaptation_m-xnes": [0.2128771251222575, 2175.9911995416633, 20.436941805668347, 11.874870433498376, 0.0, 23.67188356114161, 2.06863388735908, 7.124026768321718, 9.334989121212406, 1927.7199333450003, 256392.77602054173, 1447.1339175020742, 38.68678171583889, 0.507065623652203, 23.765329153563307, 15.751386747089091, 2.8882839166643373, 31.366999377372224, 0.45596232156333016, 2.3635090154834093, 0.468273715165139, 0.43657925301521344, 7.819215103671837, 13.559901346383807], "step_size_adaptation_lp-xnes": [1.7332064445719135, 4419.683662806733, 17.098151479760066, 14.562651567781344, 4.766267911300538, 69.25192076187827, 0.6009155901093554, 4.86338515091338, 4.659487885926412, 4387.270026268735, 105506.4994294811, 2512.760076108818, 26.12703076068069, 0.7712613615349452, 5.842531481068854, 8.687032329133258, 7.279161607717867, 61.20592146441307, 3.7441941916707453, 2.4622889158859524, 2.8965324473324405, 12.262312434816211, 17.732168443379564, 3.496022765487944], "step_size_adaptation_psr": [0.2128771251222575, 1088.50577919315, 16.68845567069546, 5.528864747424452, 0.0, 149.5278041999801, 2.06863388735908, 7.124026768321718, 9.334989121212406, 1927.7199333450003, 207576.525365986, 2683.7038475009585, 38.68678171583889, 0.3653061851521225, 23.765329153563307, 15.751386747089091, 3.855740332598697, 31.366999377372224, 0.45596232156333016, 3.8792443115816027, 0.34411638396871275, 8.044092332563332, 3.6171907727392005, 3.6556955151225727], "mirrored_mirrored": [0.7554264630844969, 30.995928352373664, 10.727147924627282, 28.355058498656746, 0.0, 46.996785715812074, 5.408240310984194, 19.205395254336526, 10.73932209031642, 306.75329191214263, 47432.055266400115, 2683.7038475009585, 199.99567611652017, 1.976899305627097, 8.448239046275624, 10.619873785014807, 2.0468365502110726, 3.1760484231994948, 0.45596232156333016, 3.141781349728834, 2.5738103658736144, 2.7415047738254796, 11.184397856402033, 13.905359243714296], "mirrored_mirrored pairwise": [0.0337383526199124, 981.7390996048255, 10.727147924627282, 10.296884748017064, 0.0, 46.996785715812074, 0.6009155901093554, 19.205395254336526, 10.73932209031642, 191683.40442372047, 157018.28884094278, 2683.7038475009585, 199.99567611652017, 1.976899305627097, 29.385496516377007, 17.063577075490553, 6.7589928438885085, 32.397142264452846, 0.45596232156333016, 4.628179175342729, 1.2275961378528064, 2.7415047738254796, 1.3838182113654058, 15.305137607196425], "base_sampler_gaussian": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 191.552220667987, 2.06863388735908, 7.623121487433538, 2.4098569399412817, 8829.953859147105, 75722.58537541109, 2683.7038475009585, 28.411660356917054, 0.46048487751008516, 23.765329153563307, 20.162513485514452, 1.4118850095554683, 22.517169808626498, 0.45596232156333016, 1.3074796822154253, 2.011681868741659, 10.719130303012587, 17.153972345559545, 15.819483090191303], "base_sampler_sobol": [0.5892875981090875, 48001.18013114975, 13.917655789452, 29.56067588781827, 0.0, 0.7172460642807635, 2.5001933979731294, 3.0140086883578876, 7.518727278298435, 467.72756126032994, 2035.209239015712, 1565.8411907388265, 22.404521901426385, 0.21692960177602713, 4.700091505389068, 4.289849820201781, 3.229779796294666, 6.012336634890072, 2.4853063838721, 2.0779235694755376, 1.001291938509303, 0.4849187167015082, 7.94398818014778, 4.500853718658098], "base_sampler_halton": [0.47696182587839053, 11522.133898186243, 26.153393134456852, 23.431183135694514, 0.0, 2.5336961901681896, 1.6834980525567496, 0.44279172929670246, 9.34883139664984, 2369.655693913885, 964.3252925498961, 78195.25039369489, 66.54872706381528, 0.4599805819432943, 53.087027897551124, 0.45802758622619133, 1.8556466626738177, 11.93084133248119, 0.4608776922036153, 4.293696040644598, 3.2559281887179288, 0.11404958969503612, 10.258905441776156, 14.299641692864355], "weights_option_default": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 191.552220667987, 2.06863388735908, 7.623121487433538, 2.4098569399412817, 8829.953859147105, 75722.58537541109, 2683.7038475009585, 28.411660356917054, 0.46048487751008516, 23.765329153563307, 20.162513485514452, 1.4118850095554683, 22.517169808626498, 0.45596232156333016, 1.3074796822154253, 2.011681868741659, 10.719130303012587, 17.153972345559545, 15.819483090191303], "weights_option_equal": [0.6983733129352273, 53903.6720731987, 16.490693161011563, 19.983425781178234, 0.0, 708.6726443211569, 5.408240310984194, 1.3914507176564086, 9.829561857450923, 175618.5411670374, 297974.97039696446, 2683.7038475009585, 84.70468494483676, 0.26559027142197483, 11.09507570411469, 12.546429428411361, 4.1243318983523505, 8.982949573228526, 0.3552769868031991, 5.125690097532708, 2.0431895662294046, 1.9396308505454334, 6.357120407300892, 16.044865034720715], "weights_option_1/2^lambda": [0.29537552250716487, 14961.853448878112, 24.071007496708887, 15.363617700667014, 0.0, 120.02761077294967, 2.06863388735908, 12.504251100709112, 10.055671647360192, 658.2473391082618, 158260.00841298996, 2683.7038475009585, 41.97075757134694, 0.34731396273566734, 18.686167007123053, 12.985714196324263, 4.07446399965624, 14.549555170428903, 0.45596232156333016, 2.5857528075054406, 3.0145388205852814, 1.8648570790235144, 16.61377903494147, 4.655668386999551], "local_restart_restart": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 191.552220667987, 2.06863388735908, 7.623121487433538, 2.4098569399412817, 8829.953859147105, 75722.58537541109, 2683.7038475009585, 28.411660356917054, 0.46048487751008516, 23.765329153563307, 20.162513485514452, 1.4118850095554683, 22.517169808626498, 0.45596232156333016, 1.3074796822154253, 2.011681868741659, 10.719130303012587, 17.153972345559545, 15.819483090191303], "local_restart_IPOP": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 191.552220667987, 2.06863388735908, 7.623121487433538, 2.4098569399412817, 8829.953859147105, 75722.58537541109, 2683.7038475009585, 28.411660356917054, 0.46048487751008516, 23.765329153563307, 20.162513485514452, 1.4118850095554683, 22.517169808626498, 0.45596232156333016, 1.3074796822154253, 2.011681868741659, 10.719130303012587, 17.153972345559545, 15.819483090191303], "local_restart_BIPOP": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 191.552220667987, 2.06863388735908, 7.623121487433538, 2.4098569399412817, 8829.953859147105, 75722.58537541109, 2683.7038475009585, 28.411660356917054, 0.46048487751008516, 23.765329153563307, 20.162513485514452, 1.4118850095554683, 22.517169808626498, 0.45596232156333016, 1.3074796822154253, 2.011681868741659, 10.719130303012587, 17.153972345559545, 15.819483090191303], "local_restart_STOP": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 191.552220667987, 2.06863388735908, 7.623121487433538, 2.4098569399412817, 8829.953859147105, 75722.58537541109, 2683.7038475009585, 28.411660356917054, 0.46048487751008516, 23.765329153563307, 20.162513485514452, 1.4118850095554683, 22.517169808626498, 0.45596232156333016, 1.3074796822154253, 2.011681868741659, 10.719130303012587, 17.153972345559545, 15.819483090191303], "bound_correction_saturate": [0.6699833162594517, 4858.983093703776, 13.575313361038358, 20.61592906071919, 0.0, 23.49437556531035, 2.06863388735908, 0.025008654234556386, 48.30190325684181, 7345.1695658282915, 39778.92602634714, 99.69985510848147, 81.24353869555809, 0.21972787749128833, 23.765329153563307, 12.874535394781748, 0.9011977159517945, 11.737632986991393, 0.6914037267515631, 1.3074796822154253, 0.8684924540993583, 0.4972890426678059, 7.630280738996362, 13.415576504676817], "bound_correction_unif_resample": [0.0783516688829975, 833.1172241710985, 19.627044667914628, 9.628926397266323, 5.160304518362756, 7.318815196747931, 5.408240310984194, 1.7357869403053507, 1.0497014040510553, 41678.76392402864, 168961.6744824512, 240.6715351855804, 3.8354665436620783, 0.6842126813587444, 13.958153917706909, 3.6781775698555803, 2.4855552649703982, 31.962561311838936, 0.10223176862010597, 3.162540017099194, 2.0966511126841403, 0.29434737191762333, 5.120439721244383, 12.236723816747919], "bound_correction_COTN": [0.1676093847024217, 4351.211417663173, 6.047980325222354, 21.91257374886981, 3.1244128771674005, 12.501498772381812, 5.408240310984194, 5.838321652506101, 0.14735284098644813, 190.06470116101278, 401.0202318645261, 2683.7038475009585, 123.96634855859544, 0.7422687242316163, 4.997713381178812, 9.253215577323598, 3.996596680370049, 0.6749752706461339, 1.5938722070154228, 2.732266723801698, 2.2807913444204497, 0.0009542510237096514, 5.825774943796979, 11.291346927128016], "bound_correction_toroidal": [1.4444217300032238, 668.4603295428241, 13.575313361038358, 20.61592906071919, 2.022308824107819, 24.33187479432261, 2.06863388735908, 13.504258712369403, 55.92343205593169, 52508.86982339249, 7782.459127523121, 1.103207913159003, 142.56590632579122, 1.851593522186194, 23.765329153563307, 13.681357344351518, 3.1605136092852923, 12.298476154812617, 0.15035979308805913, 1.3074796822154253, 4.6098062526230805, 2.216804022881296, 4.916798493704158, 7.427104342940737], "bound_correction_mirror": [1.1660440655024689, 1651.4439597423875, 8.374450285848463, 16.891738458104978, 5.8032753382871505, 23.566989484378954, 4.51871992360822, 7.7492720281054694, 1.0486600710191158, 25873.673262665758, 900.4477757873159, 1133.397332721584, 70.13442419670166, 1.1138637242611469, 17.042022539322556, 3.953426355571292, 1.2140101666428544, 4.101698783919491, 0.21716792040892763, 3.0087533050106425, 1.9277425466961398, 1.8126859185350808, 10.029522452684994, 14.769096401085445]} \ No newline at end of file diff --git a/tests/test_c_adaptation.py b/tests/test_c_adaptation.py index b39cac4..78f4ba9 100644 --- a/tests/test_c_adaptation.py +++ b/tests/test_c_adaptation.py @@ -23,7 +23,7 @@ def test_matrix_adaptation(self): M = cma.p.adaptation.M.copy() z = np.sum(cma.p.weights.positive * cma.p.pop.Z[:, :cma.p.mu], axis=1, keepdims=True) - ps = ((1.0 - cma.p.mutation.cs) * cma.p.adaptation.ps + (np.sqrt(cma.p.mutation.cs * (2.0 - cma.p.mutation.cs) * cma.p.weights.mueff) * z.ravel())).reshape(-1, 1) + ps = ((1.0 - cma.p.weights.cs) * cma.p.adaptation.ps + (np.sqrt(cma.p.weights.cs * (2.0 - cma.p.weights.cs) * cma.p.weights.mueff) * z.ravel())).reshape(-1, 1) old_M = ((1 - 0.5 * cma.p.weights.c1 - 0.5 * cma.p.weights.cmu) * M) scaled_ps = ((0.5 * cma.p.weights.c1) * M.dot(ps).dot(ps.T)) new_M = ((0.5 * cma.p.weights.cmu * cma.p.weights.positive) * cma.p.pop.Y[:, :cma.p.mu]).dot(cma.p.pop.Z[:, :cma.p.mu].T) diff --git a/tests/test_c_bounds.py b/tests/test_c_bounds.py index d7ff399..1af8b1b 100644 --- a/tests/test_c_bounds.py +++ b/tests/test_c_bounds.py @@ -2,7 +2,7 @@ import unittest import numpy as np -from modcma.c_maes import bounds, Population, Parameters, parameters +from modcma.c_maes import bounds, Population, Parameters, parameters, options @@ -16,8 +16,14 @@ class TestBounds(unittest.TestCase): bounds.Toroidal, bounds.UniformResample, ) - __do_nothing = (bounds.NoCorrection, ) + __bound_fixers_options = ( + options.CorrectionMethod.COTN, + options.CorrectionMethod.MIRROR, + options.CorrectionMethod.SATURATE, + options.CorrectionMethod.TOROIDAL, + options.CorrectionMethod.UNIFORM_RESAMPLE, + ) def setUp(self): self.lb, self.ub = np.zeros(2), np.ones(2) * 2 self.par = Parameters(parameters.Settings(2, lambda0=2, lb=self.lb, ub=self.ub)) @@ -30,12 +36,14 @@ def setUp(self): self.par.pop.X = self.par.adaptation.m + (self.par.pop.s * self.par.pop.Y) def test_bound_fixers(self): - for boundcntrl in self.__bound_fixers: + for boundcntrl, option in zip(self.__bound_fixers, self.__bound_fixers_options): + self.par.settings.modules.bound_correction = option method = boundcntrl(self.lb, self.ub) method.correct(1, self.par) self.assertEqual(method.n_out_of_bounds, 0) method.correct(0, self.par) self.assertEqual(method.n_out_of_bounds, 1) + self.assertTrue(np.all(self.par.pop.X <= 2)) self.assertTrue(np.all(np.isclose(self.par.pop.Y.ravel()[1:], 0.9))) self.assertTrue(np.all(np.isclose(self.par.pop.X.ravel()[1:], 1.9))) diff --git a/tests/test_c_mutation.py b/tests/test_c_mutation.py index 560d98e..44f4d9a 100644 --- a/tests/test_c_mutation.py +++ b/tests/test_c_mutation.py @@ -22,9 +22,9 @@ def test_sigma_sampler(self): ss = mutation.SigmaSampler(2) noss = mutation.NoSigmaSampler(2) - ss.sample(2.0, self.pop) + ss.sample(2.0, self.pop, 1) self.assertFalse(np.all(self.pop.s == 2.0)) - noss.sample(2.0, self.pop) + noss.sample(2.0, self.pop, 1) self.assertTrue(np.all(self.pop.s == 2.0)) def test_threshold_convergence(self): @@ -59,7 +59,8 @@ def get_cma(self, ssa, adapt_sigma=True): cma.select() cma.recombine() cma.p.adaptation.adapt_evolution_paths( - cma.p.pop, cma.p.weights, cma.p.mutation, cma.p.stats, cma.p.mu, cma.p.lamb + cma.p.pop, cma.p.weights, cma.p.stats, + cma.p.settings, cma.p.mu, cma.p.lamb ) cma.p.mutation.adapt( cma.p.weights, cma.p.adaptation, cma.p.pop, cma.p.old_pop, cma.p.stats, cma.p.lamb @@ -73,14 +74,14 @@ def test_adapt_csa(self): cma.p.mutation.sigma, cma.p.settings.sigma0 * np.exp( - (cma.p.mutation.cs / cma.p.mutation.damps) + (cma.p.weights.cs / cma.p.weights.damps) * ((np.linalg.norm(cma.p.adaptation.ps) / cma.p.sampler.expected_length()) - 1) ), ) def test_adapt_tpa(self): cma = self.get_cma(options.TPA) - s = ((1 - cma.p.mutation.cs) * 0) + (cma.p.mutation.cs * cma.p.mutation.a_tpa) + s = ((1 - cma.p.weights.cs) * 0) + (cma.p.weights.cs * cma.p.mutation.a_tpa) self.assertAlmostEqual(cma.p.mutation.sigma, cma.p.settings.sigma0 * np.exp(s)) def test_adapt_msr(self): @@ -106,7 +107,7 @@ def test_adapt_xnes(self): self.assertTrue(np.isclose( cma.p.mutation.sigma, cma.p.settings.sigma0 - * np.exp((cma.p.mutation.cs / np.sqrt(cma.p.settings.dim)) * (w * z).sum()), + * np.exp((cma.p.weights.cs / np.sqrt(cma.p.settings.dim)) * (w * z).sum()), )) @@ -115,8 +116,8 @@ def test_adapt_lpxnes(self): w = cma.p.weights.weights.clip(0)[: cma.p.pop.n] - z = np.exp(cma.p.mutation.cs * (w @ np.log(cma.p.pop.s))) - sigma = np.power(cma.p.settings.sigma0, 1 - cma.p.mutation.cs) * z + z = np.exp(cma.p.weights.cs * (w @ np.log(cma.p.pop.s))) + sigma = np.power(cma.p.settings.sigma0, 1 - cma.p.weights.cs) * z self.assertTrue(np.isclose(cma.p.mutation.sigma, sigma)) if __name__ == "__main__": diff --git a/tests/test_modularcmaes.py b/tests/test_modularcmaes.py index a3d0cd2..aeb44ba 100644 --- a/tests/test_modularcmaes.py +++ b/tests/test_modularcmaes.py @@ -127,7 +127,7 @@ def test_n_generations(self): self.assertTrue(any(c.break_conditions)) c = modularcmaes.ModularCMAES(sum, 5) - self.assertEqual(2, len(c.break_conditions)) + self.assertEqual(3, len(c.break_conditions)) def testtpa_mutation(self): """Test tpa mutation.""" diff --git a/tests/test_utils.py b/tests/test_utils.py index 053bd6d..53a58b3 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -93,13 +93,13 @@ def test_ert(self): """Test ert method.""" evals = [5000, 45000, 1000, 100, 10] budget = 10000 - ert, ert_sd, n_succ = utils.ert(evals, budget) - self.assertEqual(n_succ, 4) - self.assertAlmostEqual(ert, 12777.5) + n_succ = 3 + ert, ert_sd, n_succ = utils.ert(evals, n_succ) + self.assertAlmostEqual(ert, 17036.666666666668) self.assertAlmostEqual(ert_sd, 17484.642861665) for evals in ([50000], [], [int(1e10)]): - ert, ert_sd, n_succ = utils.ert(evals, budget) + ert, ert_sd, n_succ = utils.ert(evals, 0) self.assertEqual(ert, float("inf")) self.assertEqual(np.isnan(ert_sd), True) self.assertEqual(n_succ, 0)