ceres取消忽略

main
wangchongwu 4 weeks ago
parent 23f17ab0a7
commit 75fed1cb6e

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

@ -0,0 +1,48 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: mierle@gmail.com (Keir Mierle)
#ifndef CERES_PUBLIC_VERSION_H_
#define CERES_PUBLIC_VERSION_H_
#define CERES_VERSION_MAJOR 1
#define CERES_VERSION_MINOR 14
#define CERES_VERSION_REVISION 0
// Classic CPP stringifcation; the extra level of indirection allows the
// preprocessor to expand the macro before being converted to a string.
#define CERES_TO_STRING_HELPER(x) #x
#define CERES_TO_STRING(x) CERES_TO_STRING_HELPER(x)
// The Ceres version as a string; for example "1.9.0".
#define CERES_VERSION_STRING CERES_TO_STRING(CERES_VERSION_MAJOR) "." \
CERES_TO_STRING(CERES_VERSION_MINOR) "." \
CERES_TO_STRING(CERES_VERSION_REVISION)
#endif // CERES_PUBLIC_VERSION_H_

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

@ -1,159 +0,0 @@
//#define EIGEN_POWER_USE_PREFETCH // Use prefetching in gemm routines
#ifdef EIGEN_POWER_USE_PREFETCH
#define EIGEN_POWER_PREFETCH(p) prefetch(p)
#else
#define EIGEN_POWER_PREFETCH(p)
#endif
namespace Eigen {
namespace internal {
template<typename Scalar, typename Packet, typename DataMapper, typename Index, const Index accRows, const Index accCols>
EIGEN_ALWAYS_INLINE void gemm_extra_row(
const DataMapper& res,
const Scalar* lhs_base,
const Scalar* rhs_base,
Index depth,
Index strideA,
Index offsetA,
Index row,
Index col,
Index rows,
Index cols,
Index remaining_rows,
const Packet& pAlpha,
const Packet& pMask);
template<typename Scalar, typename Packet, typename DataMapper, typename Index, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
EIGEN_STRONG_INLINE void gemm_extra_cols(
const DataMapper& res,
const Scalar* blockA,
const Scalar* blockB,
Index depth,
Index strideA,
Index offsetA,
Index strideB,
Index offsetB,
Index col,
Index rows,
Index cols,
Index remaining_rows,
const Packet& pAlpha,
const Packet& pMask);
template<typename Packet>
EIGEN_ALWAYS_INLINE Packet bmask(const int remaining_rows);
template<typename Scalar, typename Packet, typename Packetc, typename DataMapper, typename Index, const Index accRows, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
EIGEN_ALWAYS_INLINE void gemm_complex_extra_row(
const DataMapper& res,
const Scalar* lhs_base,
const Scalar* rhs_base,
Index depth,
Index strideA,
Index offsetA,
Index strideB,
Index row,
Index col,
Index rows,
Index cols,
Index remaining_rows,
const Packet& pAlphaReal,
const Packet& pAlphaImag,
const Packet& pMask);
template<typename Scalar, typename Packet, typename Packetc, typename DataMapper, typename Index, const Index accCols, bool ConjugateLhs, bool ConjugateRhs, bool LhsIsReal, bool RhsIsReal>
EIGEN_STRONG_INLINE void gemm_complex_extra_cols(
const DataMapper& res,
const Scalar* blockA,
const Scalar* blockB,
Index depth,
Index strideA,
Index offsetA,
Index strideB,
Index offsetB,
Index col,
Index rows,
Index cols,
Index remaining_rows,
const Packet& pAlphaReal,
const Packet& pAlphaImag,
const Packet& pMask);
template<typename Scalar, typename Packet>
EIGEN_ALWAYS_INLINE Packet ploadLhs(const Scalar* lhs);
template<typename DataMapper, typename Packet, typename Index, const Index accCols, int StorageOrder, bool Complex, int N>
EIGEN_ALWAYS_INLINE void bload(PacketBlock<Packet,N>& acc, const DataMapper& res, Index row, Index col);
template<typename Packet, int N>
EIGEN_ALWAYS_INLINE void bscale(PacketBlock<Packet,N>& acc, PacketBlock<Packet,N>& accZ, const Packet& pAlpha);
template<typename Packet, int N>
EIGEN_ALWAYS_INLINE void bscalec(PacketBlock<Packet,N>& aReal, PacketBlock<Packet,N>& aImag, const Packet& bReal, const Packet& bImag, PacketBlock<Packet,N>& cReal, PacketBlock<Packet,N>& cImag);
// Grab two decouples real/imaginary PacketBlocks and return two coupled (real/imaginary pairs) PacketBlocks.
template<typename Packet, typename Packetc, int N>
EIGEN_ALWAYS_INLINE void bcouple_common(PacketBlock<Packet,N>& taccReal, PacketBlock<Packet,N>& taccImag, PacketBlock<Packetc, N>& acc1, PacketBlock<Packetc, N>& acc2)
{
acc1.packet[0].v = vec_mergeh(taccReal.packet[0], taccImag.packet[0]);
if (N > 1) {
acc1.packet[1].v = vec_mergeh(taccReal.packet[1], taccImag.packet[1]);
}
if (N > 2) {
acc1.packet[2].v = vec_mergeh(taccReal.packet[2], taccImag.packet[2]);
}
if (N > 3) {
acc1.packet[3].v = vec_mergeh(taccReal.packet[3], taccImag.packet[3]);
}
acc2.packet[0].v = vec_mergel(taccReal.packet[0], taccImag.packet[0]);
if (N > 1) {
acc2.packet[1].v = vec_mergel(taccReal.packet[1], taccImag.packet[1]);
}
if (N > 2) {
acc2.packet[2].v = vec_mergel(taccReal.packet[2], taccImag.packet[2]);
}
if (N > 3) {
acc2.packet[3].v = vec_mergel(taccReal.packet[3], taccImag.packet[3]);
}
}
template<typename Packet, typename Packetc, int N>
EIGEN_ALWAYS_INLINE void bcouple(PacketBlock<Packet,N>& taccReal, PacketBlock<Packet,N>& taccImag, PacketBlock<Packetc,N*2>& tRes, PacketBlock<Packetc, N>& acc1, PacketBlock<Packetc, N>& acc2)
{
bcouple_common<Packet, Packetc, N>(taccReal, taccImag, acc1, acc2);
acc1.packet[0] = padd<Packetc>(tRes.packet[0], acc1.packet[0]);
if (N > 1) {
acc1.packet[1] = padd<Packetc>(tRes.packet[1], acc1.packet[1]);
}
if (N > 2) {
acc1.packet[2] = padd<Packetc>(tRes.packet[2], acc1.packet[2]);
}
if (N > 3) {
acc1.packet[3] = padd<Packetc>(tRes.packet[3], acc1.packet[3]);
}
acc2.packet[0] = padd<Packetc>(tRes.packet[0+N], acc2.packet[0]);
if (N > 1) {
acc2.packet[1] = padd<Packetc>(tRes.packet[1+N], acc2.packet[1]);
}
if (N > 2) {
acc2.packet[2] = padd<Packetc>(tRes.packet[2+N], acc2.packet[2]);
}
if (N > 3) {
acc2.packet[3] = padd<Packetc>(tRes.packet[3+N], acc2.packet[3]);
}
}
// This is necessary because ploadRhs for double returns a pair of vectors when MMA is enabled.
template<typename Scalar, typename Packet>
EIGEN_ALWAYS_INLINE Packet ploadRhs(const Scalar* rhs)
{
return ploadu<Packet>(rhs);
}
} // end namespace internal
} // end namespace Eigen

@ -51,7 +51,7 @@
#define CERES_NO_SUITESPARSE
// If defined, Ceres was compiled without CUDA.
#define CERES_NO_CUDA
// #define CERES_NO_CUDA
// If defined, Ceres was compiled without Apple's Accelerate framework solvers.
#define CERES_NO_ACCELERATE_SPARSE

@ -33,7 +33,6 @@
# define CERES_DEPRECATED_NO_EXPORT CERES_NO_EXPORT CERES_DEPRECATED
#endif
/* NOLINTNEXTLINE(readability-avoid-unconditional-preprocessor-if) */
#if 0 /* DEFINE_NO_DEPRECATED */
# ifndef CERES_NO_DEPRECATED
# define CERES_NO_DEPRECATED

@ -0,0 +1,49 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2023 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: mierle@gmail.com (Keir Mierle)
#ifndef CERES_PUBLIC_VERSION_H_
#define CERES_PUBLIC_VERSION_H_
#define CERES_VERSION_MAJOR 2
#define CERES_VERSION_MINOR 2
#define CERES_VERSION_REVISION 0
// Classic CPP stringifcation; the extra level of indirection allows the
// preprocessor to expand the macro before being converted to a string.
#define CERES_TO_STRING_HELPER(x) #x
#define CERES_TO_STRING(x) CERES_TO_STRING_HELPER(x)
// The Ceres version as a string; for example "1.9.0".
#define CERES_VERSION_STRING \
CERES_TO_STRING(CERES_VERSION_MAJOR) \
"." CERES_TO_STRING(CERES_VERSION_MINOR) "." CERES_TO_STRING( \
CERES_VERSION_REVISION)
#endif // CERES_PUBLIC_VERSION_H_

@ -22,7 +22,7 @@ extern "C" {
* This module provides an interface to the Cholmod library which is part of the <a href="http://www.suitesparse.com">suitesparse</a> package.
* It provides the two following main factorization classes:
* - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization.
* - class CholmodDecomposition: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial).
* - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial).
*
* For the sake of completeness, this module also propose the two following classes:
* - class CholmodSimplicialLLT

@ -83,8 +83,8 @@
#include <cmath>
#include <cassert>
#include <functional>
#include <sstream>
#ifndef EIGEN_NO_IO
#include <sstream>
#include <iosfwd>
#endif
#include <cstring>
@ -109,8 +109,7 @@
#endif
// required for __cpuid, needs to be included after cmath
// also required for _BitScanReverse on Windows on ARM
#if EIGEN_COMP_MSVC && (EIGEN_ARCH_i386_OR_x86_64 || EIGEN_ARCH_ARM64) && !EIGEN_OS_WINCE
#if EIGEN_COMP_MSVC && EIGEN_ARCH_i386_OR_x86_64 && !EIGEN_OS_WINCE
#include <intrin.h>
#endif
@ -347,7 +346,7 @@ using std::ptrdiff_t;
#include "src/Core/CoreIterators.h"
#include "src/Core/ConditionEstimator.h"
#if defined(EIGEN_VECTORIZE_VSX)
#if defined(EIGEN_VECTORIZE_ALTIVEC) || defined(EIGEN_VECTORIZE_VSX)
#include "src/Core/arch/AltiVec/MatrixProduct.h"
#elif defined EIGEN_VECTORIZE_NEON
#include "src/Core/arch/NEON/GeneralBlockPanelKernel.h"

@ -25,6 +25,8 @@
#include "src/Core/util/DisableStupidWarnings.h"
#include "src/SparseLU/SparseLU_gemm_kernel.h"
#include "src/SparseLU/SparseLU_Structs.h"
#include "src/SparseLU/SparseLU_SupernodalMatrix.h"
#include "src/SparseLU/SparseLUImpl.h"

@ -172,8 +172,7 @@ seqN(FirstType first, SizeType size) {
return ArithmeticSequence<typename internal::cleanup_index_type<FirstType>::type,typename internal::cleanup_index_type<SizeType>::type>(first,size);
}
#if EIGEN_HAS_CXX11
#ifdef EIGEN_PARSED_BY_DOXYGEN
/** \returns an ArithmeticSequence starting at \a f, up (or down) to \a l, and with positive (or negative) increment \a incr
*
@ -184,15 +183,8 @@ seqN(FirstType first, SizeType size) {
*
* \sa seqN(FirstType,SizeType,IncrType), seq(FirstType,LastType)
*/
template<typename FirstType,typename LastType>
auto seq(FirstType f, LastType l) -> decltype(seqN(typename internal::cleanup_index_type<FirstType>::type(f),
( typename internal::cleanup_index_type<LastType>::type(l)
- typename internal::cleanup_index_type<FirstType>::type(f)+fix<1>())))
{
return seqN(typename internal::cleanup_index_type<FirstType>::type(f),
(typename internal::cleanup_index_type<LastType>::type(l)
-typename internal::cleanup_index_type<FirstType>::type(f)+fix<1>()));
}
template<typename FirstType,typename LastType, typename IncrType>
auto seq(FirstType f, LastType l, IncrType incr);
/** \returns an ArithmeticSequence starting at \a f, up (or down) to \a l, and unit increment
*
@ -203,6 +195,22 @@ auto seq(FirstType f, LastType l) -> decltype(seqN(typename internal::cleanup_in
*
* \sa seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType)
*/
template<typename FirstType,typename LastType>
auto seq(FirstType f, LastType l);
#else // EIGEN_PARSED_BY_DOXYGEN
#if EIGEN_HAS_CXX11
template<typename FirstType,typename LastType>
auto seq(FirstType f, LastType l) -> decltype(seqN(typename internal::cleanup_index_type<FirstType>::type(f),
( typename internal::cleanup_index_type<LastType>::type(l)
- typename internal::cleanup_index_type<FirstType>::type(f)+fix<1>())))
{
return seqN(typename internal::cleanup_index_type<FirstType>::type(f),
(typename internal::cleanup_index_type<LastType>::type(l)
-typename internal::cleanup_index_type<FirstType>::type(f)+fix<1>()));
}
template<typename FirstType,typename LastType, typename IncrType>
auto seq(FirstType f, LastType l, IncrType incr)
-> decltype(seqN(typename internal::cleanup_index_type<FirstType>::type(f),
@ -309,27 +317,13 @@ seq(const symbolic::BaseExpr<FirstTypeDerived> &f, const symbolic::BaseExpr<Last
}
#endif // EIGEN_HAS_CXX11
#if EIGEN_HAS_CXX11
/** \cpp11
* \returns a symbolic ArithmeticSequence representing the last \a size elements with a unit increment.
*
* \anchor indexing_lastN
*
* It is a shortcut for: \code seq(last+fix<1>-size, last) \endcode
*
* \sa lastN(SizeType,IncrType, seqN(FirstType,SizeType), seq(FirstType,LastType) */
template<typename SizeType>
auto lastN(SizeType size)
-> decltype(seqN(Eigen::last+fix<1>()-size, size))
{
return seqN(Eigen::last+fix<1>()-size, size);
}
#endif // EIGEN_PARSED_BY_DOXYGEN
#if EIGEN_HAS_CXX11 || defined(EIGEN_PARSED_BY_DOXYGEN)
/** \cpp11
* \returns a symbolic ArithmeticSequence representing the last \a size elements with increment \a incr.
*
* \anchor indexing_lastN_with_incr
*
* It is a shortcut for: \code seqN(last-(size-fix<1>)*incr, size, incr) \endcode
*
* \sa lastN(SizeType), seqN(FirstType,SizeType), seq(FirstType,LastType,IncrType) */
@ -339,6 +333,19 @@ auto lastN(SizeType size, IncrType incr)
{
return seqN(Eigen::last-(size-fix<1>())*incr, size, incr);
}
/** \cpp11
* \returns a symbolic ArithmeticSequence representing the last \a size elements with a unit increment.
*
* It is a shortcut for: \code seq(last+fix<1>-size, last) \endcode
*
* \sa lastN(SizeType,IncrType, seqN(FirstType,SizeType), seq(FirstType,LastType) */
template<typename SizeType>
auto lastN(SizeType size)
-> decltype(seqN(Eigen::last+fix<1>()-size, size))
{
return seqN(Eigen::last+fix<1>()-size, size);
}
#endif
namespace internal {

@ -163,15 +163,7 @@ class Array
#endif
#if EIGEN_HAS_CXX11
/** \brief Construct a row of column vector with fixed size from an arbitrary number of coefficients. \cpp11
*
* \only_for_vectors
*
* This constructor is for 1D array or vectors with more than 4 coefficients.
* There exists C++98 analogue constructors for fixed-size array/vector having 1, 2, 3, or 4 coefficients.
*
* \warning To construct a column (resp. row) vector of fixed length, the number of values passed to this
* constructor must match the the fixed number of rows (resp. columns) of \c *this.
/** \copydoc PlainObjectBase(const Scalar& a0, const Scalar& a1, const Scalar& a2, const Scalar& a3, const ArgTypes&... args)
*
* Example: \include Array_variadic_ctor_cxx11.cpp
* Output: \verbinclude Array_variadic_ctor_cxx11.out

@ -260,19 +260,19 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
}
template<int LoadMode>
EIGEN_DEVICE_FUNC inline PacketScalar packet(Index rowId, Index colId) const
inline PacketScalar packet(Index rowId, Index colId) const
{
return m_xpr.template packet<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value());
}
template<int LoadMode>
EIGEN_DEVICE_FUNC inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
inline void writePacket(Index rowId, Index colId, const PacketScalar& val)
{
m_xpr.template writePacket<Unaligned>(rowId + m_startRow.value(), colId + m_startCol.value(), val);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC inline PacketScalar packet(Index index) const
inline PacketScalar packet(Index index) const
{
return m_xpr.template packet<Unaligned>
(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
@ -280,7 +280,7 @@ template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel, bool H
}
template<int LoadMode>
EIGEN_DEVICE_FUNC inline void writePacket(Index index, const PacketScalar& val)
inline void writePacket(Index index, const PacketScalar& val)
{
m_xpr.template writePacket<Unaligned>
(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
@ -334,17 +334,6 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
enum {
XprTypeIsRowMajor = (int(traits<XprType>::Flags)&RowMajorBit) != 0
};
/** \internal Returns base+offset (unless base is null, in which case returns null).
* Adding an offset to nullptr is undefined behavior, so we must avoid it.
*/
template <typename Scalar>
EIGEN_DEVICE_FUNC EIGEN_CONSTEXPR EIGEN_ALWAYS_INLINE
static Scalar* add_to_nullable_pointer(Scalar* base, Index offset)
{
return base != NULL ? base+offset : NULL;
}
public:
typedef MapBase<BlockType> Base;
@ -355,9 +344,8 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
*/
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
BlockImpl_dense(XprType& xpr, Index i)
: Base((BlockRows == 0 || BlockCols == 0) ? NULL : add_to_nullable_pointer(xpr.data(),
i * ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && (!XprTypeIsRowMajor))
|| ((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && ( XprTypeIsRowMajor)) ? xpr.innerStride() : xpr.outerStride())),
: Base(xpr.data() + i * ( ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && (!XprTypeIsRowMajor))
|| ((BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) && ( XprTypeIsRowMajor)) ? xpr.innerStride() : xpr.outerStride()),
BlockRows==1 ? 1 : xpr.rows(),
BlockCols==1 ? 1 : xpr.cols()),
m_xpr(xpr),
@ -371,8 +359,7 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
*/
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
BlockImpl_dense(XprType& xpr, Index startRow, Index startCol)
: Base((BlockRows == 0 || BlockCols == 0) ? NULL : add_to_nullable_pointer(xpr.data(),
xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol))),
: Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol)),
m_xpr(xpr), m_startRow(startRow), m_startCol(startCol)
{
init();
@ -384,9 +371,7 @@ class BlockImpl_dense<XprType,BlockRows,BlockCols, InnerPanel,true>
BlockImpl_dense(XprType& xpr,
Index startRow, Index startCol,
Index blockRows, Index blockCols)
: Base((blockRows == 0 || blockCols == 0) ? NULL : add_to_nullable_pointer(xpr.data(),
xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol)),
blockRows, blockCols),
: Base(xpr.data()+xpr.innerStride()*(XprTypeIsRowMajor?startCol:startRow) + xpr.outerStride()*(XprTypeIsRowMajor?startRow:startCol), blockRows, blockCols),
m_xpr(xpr), m_startRow(startRow), m_startCol(startCol)
{
init();

@ -14,56 +14,54 @@ namespace Eigen {
namespace internal {
template<typename Derived, int UnrollCount, int InnerSize>
template<typename Derived, int UnrollCount, int Rows>
struct all_unroller
{
enum {
IsRowMajor = (int(Derived::Flags) & int(RowMajor)),
i = (UnrollCount-1) / InnerSize,
j = (UnrollCount-1) % InnerSize
col = (UnrollCount-1) / Rows,
row = (UnrollCount-1) % Rows
};
EIGEN_DEVICE_FUNC static inline bool run(const Derived &mat)
{
return all_unroller<Derived, UnrollCount-1, InnerSize>::run(mat) && mat.coeff(IsRowMajor ? i : j, IsRowMajor ? j : i);
return all_unroller<Derived, UnrollCount-1, Rows>::run(mat) && mat.coeff(row, col);
}
};
template<typename Derived, int InnerSize>
struct all_unroller<Derived, 0, InnerSize>
template<typename Derived, int Rows>
struct all_unroller<Derived, 0, Rows>
{
EIGEN_DEVICE_FUNC static inline bool run(const Derived &/*mat*/) { return true; }
};
template<typename Derived, int InnerSize>
struct all_unroller<Derived, Dynamic, InnerSize>
template<typename Derived, int Rows>
struct all_unroller<Derived, Dynamic, Rows>
{
EIGEN_DEVICE_FUNC static inline bool run(const Derived &) { return false; }
};
template<typename Derived, int UnrollCount, int InnerSize>
template<typename Derived, int UnrollCount, int Rows>
struct any_unroller
{
enum {
IsRowMajor = (int(Derived::Flags) & int(RowMajor)),
i = (UnrollCount-1) / InnerSize,
j = (UnrollCount-1) % InnerSize
col = (UnrollCount-1) / Rows,
row = (UnrollCount-1) % Rows
};
EIGEN_DEVICE_FUNC static inline bool run(const Derived &mat)
{
return any_unroller<Derived, UnrollCount-1, InnerSize>::run(mat) || mat.coeff(IsRowMajor ? i : j, IsRowMajor ? j : i);
return any_unroller<Derived, UnrollCount-1, Rows>::run(mat) || mat.coeff(row, col);
}
};
template<typename Derived, int InnerSize>
struct any_unroller<Derived, 0, InnerSize>
template<typename Derived, int Rows>
struct any_unroller<Derived, 0, Rows>
{
EIGEN_DEVICE_FUNC static inline bool run(const Derived & /*mat*/) { return false; }
};
template<typename Derived, int InnerSize>
struct any_unroller<Derived, Dynamic, InnerSize>
template<typename Derived, int Rows>
struct any_unroller<Derived, Dynamic, Rows>
{
EIGEN_DEVICE_FUNC static inline bool run(const Derived &) { return false; }
};
@ -87,12 +85,12 @@ EIGEN_DEVICE_FUNC inline bool DenseBase<Derived>::all() const
};
Evaluator evaluator(derived());
if(unroll)
return internal::all_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic, InnerSizeAtCompileTime>::run(evaluator);
return internal::all_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic, internal::traits<Derived>::RowsAtCompileTime>::run(evaluator);
else
{
for(Index i = 0; i < derived().outerSize(); ++i)
for(Index j = 0; j < derived().innerSize(); ++j)
if (!evaluator.coeff(IsRowMajor ? i : j, IsRowMajor ? j : i)) return false;
for(Index j = 0; j < cols(); ++j)
for(Index i = 0; i < rows(); ++i)
if (!evaluator.coeff(i, j)) return false;
return true;
}
}
@ -111,12 +109,12 @@ EIGEN_DEVICE_FUNC inline bool DenseBase<Derived>::any() const
};
Evaluator evaluator(derived());
if(unroll)
return internal::any_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic, InnerSizeAtCompileTime>::run(evaluator);
return internal::any_unroller<Evaluator, unroll ? int(SizeAtCompileTime) : Dynamic, internal::traits<Derived>::RowsAtCompileTime>::run(evaluator);
else
{
for(Index i = 0; i < derived().outerSize(); ++i)
for(Index j = 0; j < derived().innerSize(); ++j)
if (evaluator.coeff(IsRowMajor ? i : j, IsRowMajor ? j : i)) return true;
for(Index j = 0; j < cols(); ++j)
for(Index i = 0; i < rows(); ++i)
if (evaluator.coeff(i, j)) return true;
return false;
}
}
@ -158,7 +156,7 @@ inline bool DenseBase<Derived>::allFinite() const
return !((derived()-derived()).hasNaN());
#endif
}
} // end namespace Eigen
#endif // EIGEN_ALLANDANY_H

@ -292,7 +292,7 @@ DenseBase<Derived>::LinSpaced(Index size, const Scalar& low, const Scalar& high)
}
/**
* \copydoc DenseBase::LinSpaced(Index, const DenseBase::Scalar&, const DenseBase::Scalar&)
* \copydoc DenseBase::LinSpaced(Index, const Scalar&, const Scalar&)
* Special version for fixed size types which does not require the size parameter.
*/
template<typename Derived>

@ -324,9 +324,9 @@ template<typename Derived> class DenseBase
typedef Transpose<Derived> TransposeReturnType;
EIGEN_DEVICE_FUNC
TransposeReturnType transpose();
typedef Transpose<const Derived> ConstTransposeReturnType;
typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;
EIGEN_DEVICE_FUNC
const ConstTransposeReturnType transpose() const;
ConstTransposeReturnType transpose() const;
EIGEN_DEVICE_FUNC
void transposeInPlace();

@ -191,8 +191,7 @@ MatrixBase<Derived>::diagonal()
/** This is the const version of diagonal(). */
template<typename Derived>
EIGEN_DEVICE_FUNC inline
const typename MatrixBase<Derived>::ConstDiagonalReturnType
EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::ConstDiagonalReturnType
MatrixBase<Derived>::diagonal() const
{
return ConstDiagonalReturnType(derived());
@ -210,18 +209,18 @@ MatrixBase<Derived>::diagonal() const
*
* \sa MatrixBase::diagonal(), class Diagonal */
template<typename Derived>
EIGEN_DEVICE_FUNC inline Diagonal<Derived, DynamicIndex>
EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::DiagonalDynamicIndexReturnType
MatrixBase<Derived>::diagonal(Index index)
{
return Diagonal<Derived, DynamicIndex>(derived(), index);
return DiagonalDynamicIndexReturnType(derived(), index);
}
/** This is the const version of diagonal(Index). */
template<typename Derived>
EIGEN_DEVICE_FUNC inline const Diagonal<const Derived, DynamicIndex>
EIGEN_DEVICE_FUNC inline typename MatrixBase<Derived>::ConstDiagonalDynamicIndexReturnType
MatrixBase<Derived>::diagonal(Index index) const
{
return Diagonal<const Derived, DynamicIndex>(derived(), index);
return ConstDiagonalDynamicIndexReturnType(derived(), index);
}
/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this
@ -238,20 +237,20 @@ MatrixBase<Derived>::diagonal(Index index) const
template<typename Derived>
template<int Index_>
EIGEN_DEVICE_FUNC
inline Diagonal<Derived, Index_>
inline typename MatrixBase<Derived>::template DiagonalIndexReturnType<Index_>::Type
MatrixBase<Derived>::diagonal()
{
return Diagonal<Derived, Index_>(derived());
return typename DiagonalIndexReturnType<Index_>::Type(derived());
}
/** This is the const version of diagonal<int>(). */
template<typename Derived>
template<int Index_>
EIGEN_DEVICE_FUNC
inline const Diagonal<const Derived, Index_>
inline typename MatrixBase<Derived>::template ConstDiagonalIndexReturnType<Index_>::Type
MatrixBase<Derived>::diagonal() const
{
return Diagonal<const Derived, Index_>(derived());
return typename ConstDiagonalIndexReturnType<Index_>::Type(derived());
}
} // end namespace Eigen

@ -18,9 +18,14 @@ namespace internal {
// with mismatched types, the compiler emits errors about failing to instantiate cwiseProduct BEFORE
// looking at the static assertions. Thus this is a trick to get better compile errors.
template<typename T, typename U,
bool NeedToTranspose = T::IsVectorAtCompileTime && U::IsVectorAtCompileTime &&
((int(T::RowsAtCompileTime) == 1 && int(U::ColsAtCompileTime) == 1) ||
(int(T::ColsAtCompileTime) == 1 && int(U::RowsAtCompileTime) == 1))>
// the NeedToTranspose condition here is taken straight from Assign.h
bool NeedToTranspose = T::IsVectorAtCompileTime
&& U::IsVectorAtCompileTime
&& ((int(T::RowsAtCompileTime) == 1 && int(U::ColsAtCompileTime) == 1)
| // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
// revert to || as soon as not needed anymore.
(int(T::ColsAtCompileTime) == 1 && int(U::RowsAtCompileTime) == 1))
>
struct dot_nocheck
{
typedef scalar_conj_product_op<typename traits<T>::Scalar,typename traits<U>::Scalar> conj_prod;

@ -160,7 +160,7 @@ struct eigen_packet_wrapper
{
EIGEN_ALWAYS_INLINE operator T&() { return m_val; }
EIGEN_ALWAYS_INLINE operator const T&() const { return m_val; }
EIGEN_ALWAYS_INLINE eigen_packet_wrapper() {};
EIGEN_ALWAYS_INLINE eigen_packet_wrapper() {}
EIGEN_ALWAYS_INLINE eigen_packet_wrapper(const T &v) : m_val(v) {}
EIGEN_ALWAYS_INLINE eigen_packet_wrapper& operator=(const T &v) {
m_val = v;
@ -258,7 +258,7 @@ struct ptrue_impl {
// uses a comparison to zero, so this should still work in most cases. We don't
// have another option, since the scalar type requires initialization.
template<typename T>
struct ptrue_impl<T,
struct ptrue_impl<T,
typename internal::enable_if<is_scalar<T>::value && NumTraits<T>::RequireInitialization>::type > {
static EIGEN_DEVICE_FUNC inline T run(const T& /*a*/){
return T(1);
@ -356,16 +356,16 @@ struct bytewise_bitwise_helper {
EIGEN_DEVICE_FUNC static inline T bitwise_and(const T& a, const T& b) {
return binary(a, b, bit_and<unsigned char>());
}
EIGEN_DEVICE_FUNC static inline T bitwise_or(const T& a, const T& b) {
EIGEN_DEVICE_FUNC static inline T bitwise_or(const T& a, const T& b) {
return binary(a, b, bit_or<unsigned char>());
}
EIGEN_DEVICE_FUNC static inline T bitwise_xor(const T& a, const T& b) {
return binary(a, b, bit_xor<unsigned char>());
}
EIGEN_DEVICE_FUNC static inline T bitwise_not(const T& a) {
EIGEN_DEVICE_FUNC static inline T bitwise_not(const T& a) {
return unary(a,bit_not<unsigned char>());
}
private:
template<typename Op>
EIGEN_DEVICE_FUNC static inline T unary(const T& a, Op op) {
@ -440,7 +440,7 @@ struct pselect_impl {
// For scalars, use ternary select.
template<typename Packet>
struct pselect_impl<Packet,
struct pselect_impl<Packet,
typename internal::enable_if<is_scalar<Packet>::value>::type > {
static EIGEN_DEVICE_FUNC inline Packet run(const Packet& mask, const Packet& a, const Packet& b) {
return numext::equal_strict(mask, Packet(0)) ? b : a;
@ -807,7 +807,7 @@ Packet plog10(const Packet& a) { EIGEN_USING_STD(log10); return log10(a); }
template<typename Packet> EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS
Packet plog2(const Packet& a) {
typedef typename internal::unpacket_traits<Packet>::type Scalar;
return pmul(pset1<Packet>(Scalar(EIGEN_LOG2E)), plog(a));
return pmul(pset1<Packet>(Scalar(EIGEN_LOG2E)), plog(a));
}
/** \internal \returns the square-root of \a a (coeff-wise) */
@ -881,7 +881,7 @@ predux(const Packet& a)
template <typename Packet>
EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_mul(
const Packet& a) {
typedef typename unpacket_traits<Packet>::type Scalar;
typedef typename unpacket_traits<Packet>::type Scalar;
return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmul<Scalar>)));
}
@ -889,14 +889,14 @@ EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_mul(
template <typename Packet>
EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(
const Packet &a) {
typedef typename unpacket_traits<Packet>::type Scalar;
typedef typename unpacket_traits<Packet>::type Scalar;
return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin<PropagateFast, Scalar>)));
}
template <int NaNPropagation, typename Packet>
EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(
const Packet& a) {
typedef typename unpacket_traits<Packet>::type Scalar;
typedef typename unpacket_traits<Packet>::type Scalar;
return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmin<NaNPropagation, Scalar>)));
}
@ -904,14 +904,14 @@ EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_min(
template <typename Packet>
EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_max(
const Packet &a) {
typedef typename unpacket_traits<Packet>::type Scalar;
typedef typename unpacket_traits<Packet>::type Scalar;
return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax<PropagateFast, Scalar>)));
}
template <int NaNPropagation, typename Packet>
EIGEN_DEVICE_FUNC inline typename unpacket_traits<Packet>::type predux_max(
const Packet& a) {
typedef typename unpacket_traits<Packet>::type Scalar;
typedef typename unpacket_traits<Packet>::type Scalar;
return predux_helper(a, EIGEN_BINARY_OP_NAN_PROPAGATION(Scalar, (pmax<NaNPropagation, Scalar>)));
}

@ -122,10 +122,10 @@ public:
{}
/** \returns number of rows */
Index rows() const { return internal::index_list_size(m_rowIndices); }
Index rows() const { return internal::size(m_rowIndices); }
/** \returns number of columns */
Index cols() const { return internal::index_list_size(m_colIndices); }
Index cols() const { return internal::size(m_colIndices); }
/** \returns the nested expression */
const typename internal::remove_all<XprType>::type&
@ -189,16 +189,12 @@ struct unary_evaluator<IndexedView<ArgType, RowIndices, ColIndices>, IndexBased>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
CoeffReturnType coeff(Index row, Index col) const
{
eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows()
&& m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols());
return m_argImpl.coeff(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Scalar& coeffRef(Index row, Index col)
{
eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows()
&& m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols());
return m_argImpl.coeffRef(m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
}
@ -208,8 +204,6 @@ struct unary_evaluator<IndexedView<ArgType, RowIndices, ColIndices>, IndexBased>
EIGEN_STATIC_ASSERT_LVALUE(XprType)
Index row = XprType::RowsAtCompileTime == 1 ? 0 : index;
Index col = XprType::RowsAtCompileTime == 1 ? index : 0;
eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows()
&& m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols());
return m_argImpl.coeffRef( m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
}
@ -218,8 +212,6 @@ struct unary_evaluator<IndexedView<ArgType, RowIndices, ColIndices>, IndexBased>
{
Index row = XprType::RowsAtCompileTime == 1 ? 0 : index;
Index col = XprType::RowsAtCompileTime == 1 ? index : 0;
eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows()
&& m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols());
return m_argImpl.coeffRef( m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
}
@ -228,8 +220,6 @@ struct unary_evaluator<IndexedView<ArgType, RowIndices, ColIndices>, IndexBased>
{
Index row = XprType::RowsAtCompileTime == 1 ? 0 : index;
Index col = XprType::RowsAtCompileTime == 1 ? index : 0;
eigen_assert(m_xpr.rowIndices()[row] >= 0 && m_xpr.rowIndices()[row] < m_xpr.nestedExpression().rows()
&& m_xpr.colIndices()[col] >= 0 && m_xpr.colIndices()[col] < m_xpr.nestedExpression().cols());
return m_argImpl.coeff( m_xpr.rowIndices()[row], m_xpr.colIndices()[col]);
}

@ -588,8 +588,12 @@ struct arg_default_impl<Scalar, true> {
EIGEN_DEVICE_FUNC
static inline RealScalar run(const Scalar& x)
{
// There is no official ::arg on device in CUDA/HIP, so we always need to use std::arg.
#if defined(EIGEN_HIP_DEVICE_COMPILE)
// HIP does not seem to have a native device side implementation for the math routine "arg"
using std::arg;
#else
EIGEN_USING_STD(arg);
#endif
return static_cast<RealScalar>(arg(x));
}
};
@ -877,159 +881,13 @@ struct meta_floor_log2<n, lower, upper, meta_floor_log2_bogus>
// no value, error at compile time
};
template <typename BitsType, typename EnableIf = void>
struct count_bits_impl {
static EIGEN_DEVICE_FUNC inline int clz(BitsType bits) {
EIGEN_STATIC_ASSERT(
is_integral<BitsType>::value && !NumTraits<BitsType>::IsSigned,
THIS_TYPE_IS_NOT_SUPPORTED);
int n = CHAR_BIT * sizeof(BitsType);
int shift = n / 2;
while (bits > 0 && shift > 0) {
BitsType y = bits >> shift;
if (y > 0) {
n -= shift;
bits = y;
}
shift /= 2;
}
if (shift == 0) {
--n;
}
return n;
}
static EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) {
EIGEN_STATIC_ASSERT(
is_integral<BitsType>::value && !NumTraits<BitsType>::IsSigned,
THIS_TYPE_IS_NOT_SUPPORTED);
int n = CHAR_BIT * sizeof(BitsType);
int shift = n / 2;
while (bits > 0 && shift > 0) {
BitsType y = bits << shift;
if (y > 0) {
n -= shift;
bits = y;
}
shift /= 2;
}
if (shift == 0) {
--n;
}
return n;
}
};
// Count leading zeros.
template <typename BitsType>
EIGEN_DEVICE_FUNC inline int clz(BitsType bits) {
return count_bits_impl<BitsType>::clz(bits);
}
// Count trailing zeros.
template <typename BitsType>
EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) {
return count_bits_impl<BitsType>::ctz(bits);
}
#if EIGEN_COMP_GNUC || EIGEN_COMP_CLANG
template <typename BitsType>
struct count_bits_impl<BitsType, typename enable_if<sizeof(BitsType) <= sizeof(unsigned int)>::type> {
static const int kNumBits = static_cast<int>(sizeof(BitsType) * CHAR_BIT);
static EIGEN_DEVICE_FUNC inline int clz(BitsType bits) {
EIGEN_STATIC_ASSERT(is_integral<BitsType>::value, THIS_TYPE_IS_NOT_SUPPORTED);
static const int kLeadingBitsOffset = (sizeof(unsigned int) - sizeof(BitsType)) * CHAR_BIT;
return bits == 0 ? kNumBits : __builtin_clz(static_cast<unsigned int>(bits)) - kLeadingBitsOffset;
}
static EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) {
EIGEN_STATIC_ASSERT(is_integral<BitsType>::value, THIS_TYPE_IS_NOT_SUPPORTED);
return bits == 0 ? kNumBits : __builtin_ctz(static_cast<unsigned int>(bits));
}
};
template <typename BitsType>
struct count_bits_impl<
BitsType, typename enable_if<sizeof(unsigned int) < sizeof(BitsType) && sizeof(BitsType) <= sizeof(unsigned long)>::type> {
static const int kNumBits = static_cast<int>(sizeof(BitsType) * CHAR_BIT);
static EIGEN_DEVICE_FUNC inline int clz(BitsType bits) {
EIGEN_STATIC_ASSERT(is_integral<BitsType>::value, THIS_TYPE_IS_NOT_SUPPORTED);
static const int kLeadingBitsOffset = (sizeof(unsigned long) - sizeof(BitsType)) * CHAR_BIT;
return bits == 0 ? kNumBits : __builtin_clzl(static_cast<unsigned long>(bits)) - kLeadingBitsOffset;
}
static EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) {
EIGEN_STATIC_ASSERT(is_integral<BitsType>::value, THIS_TYPE_IS_NOT_SUPPORTED);
return bits == 0 ? kNumBits : __builtin_ctzl(static_cast<unsigned long>(bits));
}
};
template <typename BitsType>
struct count_bits_impl<BitsType, typename enable_if<sizeof(unsigned long) < sizeof(BitsType) &&
sizeof(BitsType) <= sizeof(unsigned long long)>::type> {
static const int kNumBits = static_cast<int>(sizeof(BitsType) * CHAR_BIT);
static EIGEN_DEVICE_FUNC inline int clz(BitsType bits) {
EIGEN_STATIC_ASSERT(is_integral<BitsType>::value, THIS_TYPE_IS_NOT_SUPPORTED);
static const int kLeadingBitsOffset = (sizeof(unsigned long long) - sizeof(BitsType)) * CHAR_BIT;
return bits == 0 ? kNumBits : __builtin_clzll(static_cast<unsigned long long>(bits)) - kLeadingBitsOffset;
}
static EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) {
EIGEN_STATIC_ASSERT(is_integral<BitsType>::value, THIS_TYPE_IS_NOT_SUPPORTED);
return bits == 0 ? kNumBits : __builtin_ctzll(static_cast<unsigned long long>(bits));
}
};
#elif EIGEN_COMP_MSVC
template <typename BitsType>
struct count_bits_impl<BitsType, typename enable_if<sizeof(BitsType) <= sizeof(unsigned long)>::type> {
static const int kNumBits = static_cast<int>(sizeof(BitsType) * CHAR_BIT);
static EIGEN_DEVICE_FUNC inline int clz(BitsType bits) {
EIGEN_STATIC_ASSERT(is_integral<BitsType>::value, THIS_TYPE_IS_NOT_SUPPORTED);
unsigned long out;
_BitScanReverse(&out, static_cast<unsigned long>(bits));
return bits == 0 ? kNumBits : (kNumBits - 1) - static_cast<int>(out);
}
static EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) {
EIGEN_STATIC_ASSERT(is_integral<BitsType>::value, THIS_TYPE_IS_NOT_SUPPORTED);
unsigned long out;
_BitScanForward(&out, static_cast<unsigned long>(bits));
return bits == 0 ? kNumBits : static_cast<int>(out);
}
};
#ifdef _WIN64
template <typename BitsType>
struct count_bits_impl<
BitsType, typename enable_if<sizeof(unsigned long) < sizeof(BitsType) && sizeof(BitsType) <= sizeof(__int64)>::type> {
static const int kNumBits = static_cast<int>(sizeof(BitsType) * CHAR_BIT);
static EIGEN_DEVICE_FUNC inline int clz(BitsType bits) {
EIGEN_STATIC_ASSERT(is_integral<BitsType>::value, THIS_TYPE_IS_NOT_SUPPORTED);
unsigned long out;
_BitScanReverse64(&out, static_cast<unsigned __int64>(bits));
return bits == 0 ? kNumBits : (kNumBits - 1) - static_cast<int>(out);
}
static EIGEN_DEVICE_FUNC inline int ctz(BitsType bits) {
EIGEN_STATIC_ASSERT(is_integral<BitsType>::value, THIS_TYPE_IS_NOT_SUPPORTED);
unsigned long out;
_BitScanForward64(&out, static_cast<unsigned __int64>(bits));
return bits == 0 ? kNumBits : static_cast<int>(out);
}
};
#endif // _WIN64
#endif // EIGEN_COMP_GNUC || EIGEN_COMP_CLANG
template <typename Scalar>
struct random_default_impl<Scalar, false, true> {
static inline Scalar run(const Scalar& x, const Scalar& y) {
if (y <= x) return x;
template<typename Scalar>
struct random_default_impl<Scalar, false, true>
{
static inline Scalar run(const Scalar& x, const Scalar& y)
{
if (y <= x)
return x;
// ScalarU is the unsigned counterpart of Scalar, possibly Scalar itself.
typedef typename make_unsigned<Scalar>::type ScalarU;
// ScalarX is the widest of ScalarU and unsigned int.
@ -1174,15 +1032,11 @@ template<typename T> EIGEN_DEVICE_FUNC bool isinf_msvc_helper(T x)
}
//MSVC defines a _isnan builtin function, but for double only
#ifndef EIGEN_GPU_COMPILE_PHASE
EIGEN_DEVICE_FUNC inline bool isnan_impl(const long double& x) { return _isnan(x)!=0; }
#endif
EIGEN_DEVICE_FUNC inline bool isnan_impl(const double& x) { return _isnan(x)!=0; }
EIGEN_DEVICE_FUNC inline bool isnan_impl(const float& x) { return _isnan(x)!=0; }
#ifndef EIGEN_GPU_COMPILE_PHASE
EIGEN_DEVICE_FUNC inline bool isinf_impl(const long double& x) { return isinf_msvc_helper(x); }
#endif
EIGEN_DEVICE_FUNC inline bool isinf_impl(const double& x) { return isinf_msvc_helper(x); }
EIGEN_DEVICE_FUNC inline bool isinf_impl(const float& x) { return isinf_msvc_helper(x); }
@ -1196,16 +1050,12 @@ EIGEN_DEVICE_FUNC inline bool isinf_impl(const float& x) { return isinf_ms
#define EIGEN_TMP_NOOPT_ATTRIB EIGEN_DEVICE_FUNC inline __attribute__((noinline,optimize("no-finite-math-only")))
#endif
#ifndef EIGEN_GPU_COMPILE_PHASE
template<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const long double& x) { return __builtin_isnan(x); }
#endif
template<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const double& x) { return __builtin_isnan(x); }
template<> EIGEN_TMP_NOOPT_ATTRIB bool isnan_impl(const float& x) { return __builtin_isnan(x); }
template<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const double& x) { return __builtin_isinf(x); }
template<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const float& x) { return __builtin_isinf(x); }
#ifndef EIGEN_GPU_COMPILE_PHASE
template<> EIGEN_TMP_NOOPT_ATTRIB bool isinf_impl(const long double& x) { return __builtin_isinf(x); }
#endif
#undef EIGEN_TMP_NOOPT_ATTRIB
@ -1262,8 +1112,6 @@ EIGEN_ALWAYS_INLINE double mini(const double& x, const double& y)
{
return fmin(x, y);
}
#ifndef EIGEN_GPU_COMPILE_PHASE
template<>
EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE long double mini(const long double& x, const long double& y)
@ -1275,7 +1123,6 @@ EIGEN_ALWAYS_INLINE long double mini(const long double& x, const long double& y)
return fminl(x, y);
#endif
}
#endif
template<typename T>
EIGEN_DEVICE_FUNC
@ -1295,7 +1142,6 @@ EIGEN_ALWAYS_INLINE double maxi(const double& x, const double& y)
{
return fmax(x, y);
}
#ifndef EIGEN_GPU_COMPILE_PHASE
template<>
EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE long double maxi(const long double& x, const long double& y)
@ -1308,7 +1154,6 @@ EIGEN_ALWAYS_INLINE long double maxi(const long double& x, const long double& y)
#endif
}
#endif
#endif
#if defined(SYCL_DEVICE_ONLY)
@ -1465,8 +1310,8 @@ EIGEN_ALWAYS_INLINE double absdiff(const double& x, const double& y)
return fabs(x - y);
}
#if !defined(EIGEN_GPUCC)
// HIP and CUDA do not support long double.
#ifndef EIGEN_GPU_COMPILE_PHASE
template<>
EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE long double absdiff(const long double& x, const long double& y) {

@ -225,6 +225,8 @@ class Matrix
return Base::_set(other);
}
/* Here, doxygen failed to copy the brief information when using \copydoc */
/**
* \brief Copies the generic expression \a other into *this.
* \copydetails DenseBase::operator=(const EigenBase<OtherDerived> &other)
@ -282,15 +284,7 @@ class Matrix
#endif
#if EIGEN_HAS_CXX11
/** \brief Construct a row of column vector with fixed size from an arbitrary number of coefficients. \cpp11
*
* \only_for_vectors
*
* This constructor is for 1D array or vectors with more than 4 coefficients.
* There exists C++98 analogue constructors for fixed-size array/vector having 1, 2, 3, or 4 coefficients.
*
* \warning To construct a column (resp. row) vector of fixed length, the number of values passed to this
* constructor must match the the fixed number of rows (resp. columns) of \c *this.
/** \copydoc PlainObjectBase(const Scalar&, const Scalar&, const Scalar&, const Scalar&, const ArgTypes&... args)
*
* Example: \include Matrix_variadic_ctor_cxx11.cpp
* Output: \verbinclude Matrix_variadic_ctor_cxx11.out
@ -303,8 +297,6 @@ class Matrix
: Base(a0, a1, a2, a3, args...) {}
/** \brief Constructs a Matrix and initializes it from the coefficients given as initializer-lists grouped by row. \cpp11
*
* \anchor matrix_constructor_initializer_list
*
* In the general case, the constructor takes a list of rows, each row being represented as a list of coefficients:
*
@ -488,21 +480,16 @@ class Matrix
#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \
/** \ingroup matrixtypedefs */ \
/** \brief \noop */ \
typedef Matrix<Type, Size, Size> Matrix##SizeSuffix##TypeSuffix; \
/** \ingroup matrixtypedefs */ \
/** \brief \noop */ \
typedef Matrix<Type, Size, 1> Vector##SizeSuffix##TypeSuffix; \
/** \ingroup matrixtypedefs */ \
/** \brief \noop */ \
typedef Matrix<Type, 1, Size> RowVector##SizeSuffix##TypeSuffix;
#define EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \
/** \ingroup matrixtypedefs */ \
/** \brief \noop */ \
typedef Matrix<Type, Size, Dynamic> Matrix##Size##X##TypeSuffix; \
/** \ingroup matrixtypedefs */ \
/** \brief \noop */ \
typedef Matrix<Type, Dynamic, Size> Matrix##X##Size##TypeSuffix;
#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save