aboutsummaryrefslogtreecommitdiff
path: root/include/pybind11/eigen/matrix.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/pybind11/eigen/matrix.h')
-rw-r--r--include/pybind11/eigen/matrix.h714
1 files changed, 714 insertions, 0 deletions
diff --git a/include/pybind11/eigen/matrix.h b/include/pybind11/eigen/matrix.h
new file mode 100644
index 00000000..8d4342f8
--- /dev/null
+++ b/include/pybind11/eigen/matrix.h
@@ -0,0 +1,714 @@
+/*
+ pybind11/eigen/matrix.h: Transparent conversion for dense and sparse Eigen matrices
+
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
+
+ All rights reserved. Use of this source code is governed by a
+ BSD-style license that can be found in the LICENSE file.
+*/
+
+#pragma once
+
+#include "../numpy.h"
+#include "common.h"
+
+/* HINT: To suppress warnings originating from the Eigen headers, use -isystem.
+ See also:
+ https://stackoverflow.com/questions/2579576/i-dir-vs-isystem-dir
+ https://stackoverflow.com/questions/1741816/isystem-for-ms-visual-studio-c-compiler
+*/
+PYBIND11_WARNING_PUSH
+PYBIND11_WARNING_DISABLE_MSVC(5054) // https://github.com/pybind/pybind11/pull/3741
+// C5054: operator '&': deprecated between enumerations of different types
+#if defined(__MINGW32__)
+PYBIND11_WARNING_DISABLE_GCC("-Wmaybe-uninitialized")
+#endif
+
+#include <Eigen/Core>
+#include <Eigen/SparseCore>
+
+PYBIND11_WARNING_POP
+
+// Eigen prior to 3.2.7 doesn't have proper move constructors--but worse, some classes get implicit
+// move constructors that break things. We could detect this an explicitly copy, but an extra copy
+// of matrices seems highly undesirable.
+static_assert(EIGEN_VERSION_AT_LEAST(3, 2, 7),
+ "Eigen matrix support in pybind11 requires Eigen >= 3.2.7");
+
+PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
+
+PYBIND11_WARNING_DISABLE_MSVC(4127)
+
+// Provide a convenience alias for easier pass-by-ref usage with fully dynamic strides:
+using EigenDStride = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
+template <typename MatrixType>
+using EigenDRef = Eigen::Ref<MatrixType, 0, EigenDStride>;
+template <typename MatrixType>
+using EigenDMap = Eigen::Map<MatrixType, 0, EigenDStride>;
+
+PYBIND11_NAMESPACE_BEGIN(detail)
+
+#if EIGEN_VERSION_AT_LEAST(3, 3, 0)
+using EigenIndex = Eigen::Index;
+template <typename Scalar, int Flags, typename StorageIndex>
+using EigenMapSparseMatrix = Eigen::Map<Eigen::SparseMatrix<Scalar, Flags, StorageIndex>>;
+#else
+using EigenIndex = EIGEN_DEFAULT_DENSE_INDEX_TYPE;
+template <typename Scalar, int Flags, typename StorageIndex>
+using EigenMapSparseMatrix = Eigen::MappedSparseMatrix<Scalar, Flags, StorageIndex>;
+#endif
+
+// Matches Eigen::Map, Eigen::Ref, blocks, etc:
+template <typename T>
+using is_eigen_dense_map = all_of<is_template_base_of<Eigen::DenseBase, T>,
+ std::is_base_of<Eigen::MapBase<T, Eigen::ReadOnlyAccessors>, T>>;
+template <typename T>
+using is_eigen_mutable_map = std::is_base_of<Eigen::MapBase<T, Eigen::WriteAccessors>, T>;
+template <typename T>
+using is_eigen_dense_plain
+ = all_of<negation<is_eigen_dense_map<T>>, is_template_base_of<Eigen::PlainObjectBase, T>>;
+template <typename T>
+using is_eigen_sparse = is_template_base_of<Eigen::SparseMatrixBase, T>;
+// Test for objects inheriting from EigenBase<Derived> that aren't captured by the above. This
+// basically covers anything that can be assigned to a dense matrix but that don't have a typical
+// matrix data layout that can be copied from their .data(). For example, DiagonalMatrix and
+// SelfAdjointView fall into this category.
+template <typename T>
+using is_eigen_other
+ = all_of<is_template_base_of<Eigen::EigenBase, T>,
+ negation<any_of<is_eigen_dense_map<T>, is_eigen_dense_plain<T>, is_eigen_sparse<T>>>>;
+
+// Captures numpy/eigen conformability status (returned by EigenProps::conformable()):
+template <bool EigenRowMajor>
+struct EigenConformable {
+ bool conformable = false;
+ EigenIndex rows = 0, cols = 0;
+ EigenDStride stride{0, 0}; // Only valid if negativestrides is false!
+ bool negativestrides = false; // If true, do not use stride!
+
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ EigenConformable(bool fits = false) : conformable{fits} {}
+ // Matrix type:
+ EigenConformable(EigenIndex r, EigenIndex c, EigenIndex rstride, EigenIndex cstride)
+ : conformable{true}, rows{r}, cols{c},
+ // TODO: when Eigen bug #747 is fixed, remove the tests for non-negativity.
+ // http://eigen.tuxfamily.org/bz/show_bug.cgi?id=747
+ stride{EigenRowMajor ? (rstride > 0 ? rstride : 0)
+ : (cstride > 0 ? cstride : 0) /* outer stride */,
+ EigenRowMajor ? (cstride > 0 ? cstride : 0)
+ : (rstride > 0 ? rstride : 0) /* inner stride */},
+ negativestrides{rstride < 0 || cstride < 0} {}
+ // Vector type:
+ EigenConformable(EigenIndex r, EigenIndex c, EigenIndex stride)
+ : EigenConformable(r, c, r == 1 ? c * stride : stride, c == 1 ? r : r * stride) {}
+
+ template <typename props>
+ bool stride_compatible() const {
+ // To have compatible strides, we need (on both dimensions) one of fully dynamic strides,
+ // matching strides, or a dimension size of 1 (in which case the stride value is
+ // irrelevant). Alternatively, if any dimension size is 0, the strides are not relevant
+ // (and numpy ≥ 1.23 sets the strides to 0 in that case, so we need to check explicitly).
+ if (negativestrides) {
+ return false;
+ }
+ if (rows == 0 || cols == 0) {
+ return true;
+ }
+ return (props::inner_stride == Eigen::Dynamic || props::inner_stride == stride.inner()
+ || (EigenRowMajor ? cols : rows) == 1)
+ && (props::outer_stride == Eigen::Dynamic || props::outer_stride == stride.outer()
+ || (EigenRowMajor ? rows : cols) == 1);
+ }
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator bool() const { return conformable; }
+};
+
+template <typename Type>
+struct eigen_extract_stride {
+ using type = Type;
+};
+template <typename PlainObjectType, int MapOptions, typename StrideType>
+struct eigen_extract_stride<Eigen::Map<PlainObjectType, MapOptions, StrideType>> {
+ using type = StrideType;
+};
+template <typename PlainObjectType, int Options, typename StrideType>
+struct eigen_extract_stride<Eigen::Ref<PlainObjectType, Options, StrideType>> {
+ using type = StrideType;
+};
+
+// Helper struct for extracting information from an Eigen type
+template <typename Type_>
+struct EigenProps {
+ using Type = Type_;
+ using Scalar = typename Type::Scalar;
+ using StrideType = typename eigen_extract_stride<Type>::type;
+ static constexpr EigenIndex rows = Type::RowsAtCompileTime, cols = Type::ColsAtCompileTime,
+ size = Type::SizeAtCompileTime;
+ static constexpr bool row_major = Type::IsRowMajor,
+ vector
+ = Type::IsVectorAtCompileTime, // At least one dimension has fixed size 1
+ fixed_rows = rows != Eigen::Dynamic, fixed_cols = cols != Eigen::Dynamic,
+ fixed = size != Eigen::Dynamic, // Fully-fixed size
+ dynamic = !fixed_rows && !fixed_cols; // Fully-dynamic size
+
+ template <EigenIndex i, EigenIndex ifzero>
+ using if_zero = std::integral_constant<EigenIndex, i == 0 ? ifzero : i>;
+ static constexpr EigenIndex inner_stride
+ = if_zero<StrideType::InnerStrideAtCompileTime, 1>::value,
+ outer_stride = if_zero < StrideType::OuterStrideAtCompileTime,
+ vector ? size
+ : row_major ? cols
+ : rows > ::value;
+ static constexpr bool dynamic_stride
+ = inner_stride == Eigen::Dynamic && outer_stride == Eigen::Dynamic;
+ static constexpr bool requires_row_major
+ = !dynamic_stride && !vector && (row_major ? inner_stride : outer_stride) == 1;
+ static constexpr bool requires_col_major
+ = !dynamic_stride && !vector && (row_major ? outer_stride : inner_stride) == 1;
+
+ // Takes an input array and determines whether we can make it fit into the Eigen type. If
+ // the array is a vector, we attempt to fit it into either an Eigen 1xN or Nx1 vector
+ // (preferring the latter if it will fit in either, i.e. for a fully dynamic matrix type).
+ static EigenConformable<row_major> conformable(const array &a) {
+ const auto dims = a.ndim();
+ if (dims < 1 || dims > 2) {
+ return false;
+ }
+
+ if (dims == 2) { // Matrix type: require exact match (or dynamic)
+
+ EigenIndex np_rows = a.shape(0), np_cols = a.shape(1),
+ np_rstride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar)),
+ np_cstride = a.strides(1) / static_cast<ssize_t>(sizeof(Scalar));
+ if ((fixed_rows && np_rows != rows) || (fixed_cols && np_cols != cols)) {
+ return false;
+ }
+
+ return {np_rows, np_cols, np_rstride, np_cstride};
+ }
+
+ // Otherwise we're storing an n-vector. Only one of the strides will be used, but
+ // whichever is used, we want the (single) numpy stride value.
+ const EigenIndex n = a.shape(0),
+ stride = a.strides(0) / static_cast<ssize_t>(sizeof(Scalar));
+
+ if (vector) { // Eigen type is a compile-time vector
+ if (fixed && size != n) {
+ return false; // Vector size mismatch
+ }
+ return {rows == 1 ? 1 : n, cols == 1 ? 1 : n, stride};
+ }
+ if (fixed) {
+ // The type has a fixed size, but is not a vector: abort
+ return false;
+ }
+ if (fixed_cols) {
+ // Since this isn't a vector, cols must be != 1. We allow this only if it exactly
+ // equals the number of elements (rows is Dynamic, and so 1 row is allowed).
+ if (cols != n) {
+ return false;
+ }
+ return {1, n, stride};
+ } // Otherwise it's either fully dynamic, or column dynamic; both become a column vector
+ if (fixed_rows && rows != n) {
+ return false;
+ }
+ return {n, 1, stride};
+ }
+
+ static constexpr bool show_writeable
+ = is_eigen_dense_map<Type>::value && is_eigen_mutable_map<Type>::value;
+ static constexpr bool show_order = is_eigen_dense_map<Type>::value;
+ static constexpr bool show_c_contiguous = show_order && requires_row_major;
+ static constexpr bool show_f_contiguous
+ = !show_c_contiguous && show_order && requires_col_major;
+
+ static constexpr auto descriptor
+ = const_name("numpy.ndarray[") + npy_format_descriptor<Scalar>::name + const_name("[")
+ + const_name<fixed_rows>(const_name<(size_t) rows>(), const_name("m")) + const_name(", ")
+ + const_name<fixed_cols>(const_name<(size_t) cols>(), const_name("n")) + const_name("]")
+ +
+ // For a reference type (e.g. Ref<MatrixXd>) we have other constraints that might need to
+ // be satisfied: writeable=True (for a mutable reference), and, depending on the map's
+ // stride options, possibly f_contiguous or c_contiguous. We include them in the
+ // descriptor output to provide some hint as to why a TypeError is occurring (otherwise
+ // it can be confusing to see that a function accepts a 'numpy.ndarray[float64[3,2]]' and
+ // an error message that you *gave* a numpy.ndarray of the right type and dimensions.
+ const_name<show_writeable>(", flags.writeable", "")
+ + const_name<show_c_contiguous>(", flags.c_contiguous", "")
+ + const_name<show_f_contiguous>(", flags.f_contiguous", "") + const_name("]");
+};
+
+// Casts an Eigen type to numpy array. If given a base, the numpy array references the src data,
+// otherwise it'll make a copy. writeable lets you turn off the writeable flag for the array.
+template <typename props>
+handle
+eigen_array_cast(typename props::Type const &src, handle base = handle(), bool writeable = true) {
+ constexpr ssize_t elem_size = sizeof(typename props::Scalar);
+ array a;
+ if (props::vector) {
+ a = array({src.size()}, {elem_size * src.innerStride()}, src.data(), base);
+ } else {
+ a = array({src.rows(), src.cols()},
+ {elem_size * src.rowStride(), elem_size * src.colStride()},
+ src.data(),
+ base);
+ }
+
+ if (!writeable) {
+ array_proxy(a.ptr())->flags &= ~detail::npy_api::NPY_ARRAY_WRITEABLE_;
+ }
+
+ return a.release();
+}
+
+// Takes an lvalue ref to some Eigen type and a (python) base object, creating a numpy array that
+// reference the Eigen object's data with `base` as the python-registered base class (if omitted,
+// the base will be set to None, and lifetime management is up to the caller). The numpy array is
+// non-writeable if the given type is const.
+template <typename props, typename Type>
+handle eigen_ref_array(Type &src, handle parent = none()) {
+ // none here is to get past array's should-we-copy detection, which currently always
+ // copies when there is no base. Setting the base to None should be harmless.
+ return eigen_array_cast<props>(src, parent, !std::is_const<Type>::value);
+}
+
+// Takes a pointer to some dense, plain Eigen type, builds a capsule around it, then returns a
+// numpy array that references the encapsulated data with a python-side reference to the capsule to
+// tie its destruction to that of any dependent python objects. Const-ness is determined by
+// whether or not the Type of the pointer given is const.
+template <typename props, typename Type, typename = enable_if_t<is_eigen_dense_plain<Type>::value>>
+handle eigen_encapsulate(Type *src) {
+ capsule base(src, [](void *o) { delete static_cast<Type *>(o); });
+ return eigen_ref_array<props>(*src, base);
+}
+
+// Type caster for regular, dense matrix types (e.g. MatrixXd), but not maps/refs/etc. of dense
+// types.
+template <typename Type>
+struct type_caster<Type, enable_if_t<is_eigen_dense_plain<Type>::value>> {
+ using Scalar = typename Type::Scalar;
+ static_assert(!std::is_pointer<Scalar>::value,
+ PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
+ using props = EigenProps<Type>;
+
+ bool load(handle src, bool convert) {
+ // If we're in no-convert mode, only load if given an array of the correct type
+ if (!convert && !isinstance<array_t<Scalar>>(src)) {
+ return false;
+ }
+
+ // Coerce into an array, but don't do type conversion yet; the copy below handles it.
+ auto buf = array::ensure(src);
+
+ if (!buf) {
+ return false;
+ }
+
+ auto dims = buf.ndim();
+ if (dims < 1 || dims > 2) {
+ return false;
+ }
+
+ auto fits = props::conformable(buf);
+ if (!fits) {
+ return false;
+ }
+
+ // Allocate the new type, then build a numpy reference into it
+ value = Type(fits.rows, fits.cols);
+ auto ref = reinterpret_steal<array>(eigen_ref_array<props>(value));
+ if (dims == 1) {
+ ref = ref.squeeze();
+ } else if (ref.ndim() == 1) {
+ buf = buf.squeeze();
+ }
+
+ int result = detail::npy_api::get().PyArray_CopyInto_(ref.ptr(), buf.ptr());
+
+ if (result < 0) { // Copy failed!
+ PyErr_Clear();
+ return false;
+ }
+
+ return true;
+ }
+
+private:
+ // Cast implementation
+ template <typename CType>
+ static handle cast_impl(CType *src, return_value_policy policy, handle parent) {
+ switch (policy) {
+ case return_value_policy::take_ownership:
+ case return_value_policy::automatic:
+ return eigen_encapsulate<props>(src);
+ case return_value_policy::move:
+ return eigen_encapsulate<props>(new CType(std::move(*src)));
+ case return_value_policy::copy:
+ return eigen_array_cast<props>(*src);
+ case return_value_policy::reference:
+ case return_value_policy::automatic_reference:
+ return eigen_ref_array<props>(*src);
+ case return_value_policy::reference_internal:
+ return eigen_ref_array<props>(*src, parent);
+ default:
+ throw cast_error("unhandled return_value_policy: should not happen!");
+ };
+ }
+
+public:
+ // Normal returned non-reference, non-const value:
+ static handle cast(Type &&src, return_value_policy /* policy */, handle parent) {
+ return cast_impl(&src, return_value_policy::move, parent);
+ }
+ // If you return a non-reference const, we mark the numpy array readonly:
+ static handle cast(const Type &&src, return_value_policy /* policy */, handle parent) {
+ return cast_impl(&src, return_value_policy::move, parent);
+ }
+ // lvalue reference return; default (automatic) becomes copy
+ static handle cast(Type &src, return_value_policy policy, handle parent) {
+ if (policy == return_value_policy::automatic
+ || policy == return_value_policy::automatic_reference) {
+ policy = return_value_policy::copy;
+ }
+ return cast_impl(&src, policy, parent);
+ }
+ // const lvalue reference return; default (automatic) becomes copy
+ static handle cast(const Type &src, return_value_policy policy, handle parent) {
+ if (policy == return_value_policy::automatic
+ || policy == return_value_policy::automatic_reference) {
+ policy = return_value_policy::copy;
+ }
+ return cast(&src, policy, parent);
+ }
+ // non-const pointer return
+ static handle cast(Type *src, return_value_policy policy, handle parent) {
+ return cast_impl(src, policy, parent);
+ }
+ // const pointer return
+ static handle cast(const Type *src, return_value_policy policy, handle parent) {
+ return cast_impl(src, policy, parent);
+ }
+
+ static constexpr auto name = props::descriptor;
+
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator Type *() { return &value; }
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator Type &() { return value; }
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator Type &&() && { return std::move(value); }
+ template <typename T>
+ using cast_op_type = movable_cast_op_type<T>;
+
+private:
+ Type value;
+};
+
+// Base class for casting reference/map/block/etc. objects back to python.
+template <typename MapType>
+struct eigen_map_caster {
+ static_assert(!std::is_pointer<typename MapType::Scalar>::value,
+ PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
+
+private:
+ using props = EigenProps<MapType>;
+
+public:
+ // Directly referencing a ref/map's data is a bit dangerous (whatever the map/ref points to has
+ // to stay around), but we'll allow it under the assumption that you know what you're doing
+ // (and have an appropriate keep_alive in place). We return a numpy array pointing directly at
+ // the ref's data (The numpy array ends up read-only if the ref was to a const matrix type.)
+ // Note that this means you need to ensure you don't destroy the object in some other way (e.g.
+ // with an appropriate keep_alive, or with a reference to a statically allocated matrix).
+ static handle cast(const MapType &src, return_value_policy policy, handle parent) {
+ switch (policy) {
+ case return_value_policy::copy:
+ return eigen_array_cast<props>(src);
+ case return_value_policy::reference_internal:
+ return eigen_array_cast<props>(src, parent, is_eigen_mutable_map<MapType>::value);
+ case return_value_policy::reference:
+ case return_value_policy::automatic:
+ case return_value_policy::automatic_reference:
+ return eigen_array_cast<props>(src, none(), is_eigen_mutable_map<MapType>::value);
+ default:
+ // move, take_ownership don't make any sense for a ref/map:
+ pybind11_fail("Invalid return_value_policy for Eigen Map/Ref/Block type");
+ }
+ }
+
+ static constexpr auto name = props::descriptor;
+
+ // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
+ // types but not bound arguments). We still provide them (with an explicitly delete) so that
+ // you end up here if you try anyway.
+ bool load(handle, bool) = delete;
+ operator MapType() = delete;
+ template <typename>
+ using cast_op_type = MapType;
+};
+
+// We can return any map-like object (but can only load Refs, specialized next):
+template <typename Type>
+struct type_caster<Type, enable_if_t<is_eigen_dense_map<Type>::value>> : eigen_map_caster<Type> {};
+
+// Loader for Ref<...> arguments. See the documentation for info on how to make this work without
+// copying (it requires some extra effort in many cases).
+template <typename PlainObjectType, typename StrideType>
+struct type_caster<
+ Eigen::Ref<PlainObjectType, 0, StrideType>,
+ enable_if_t<is_eigen_dense_map<Eigen::Ref<PlainObjectType, 0, StrideType>>::value>>
+ : public eigen_map_caster<Eigen::Ref<PlainObjectType, 0, StrideType>> {
+private:
+ using Type = Eigen::Ref<PlainObjectType, 0, StrideType>;
+ using props = EigenProps<Type>;
+ using Scalar = typename props::Scalar;
+ static_assert(!std::is_pointer<Scalar>::value,
+ PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
+ using MapType = Eigen::Map<PlainObjectType, 0, StrideType>;
+ using Array
+ = array_t<Scalar,
+ array::forcecast
+ | ((props::row_major ? props::inner_stride : props::outer_stride) == 1
+ ? array::c_style
+ : (props::row_major ? props::outer_stride : props::inner_stride) == 1
+ ? array::f_style
+ : 0)>;
+ static constexpr bool need_writeable = is_eigen_mutable_map<Type>::value;
+ // Delay construction (these have no default constructor)
+ std::unique_ptr<MapType> map;
+ std::unique_ptr<Type> ref;
+ // Our array. When possible, this is just a numpy array pointing to the source data, but
+ // sometimes we can't avoid copying (e.g. input is not a numpy array at all, has an
+ // incompatible layout, or is an array of a type that needs to be converted). Using a numpy
+ // temporary (rather than an Eigen temporary) saves an extra copy when we need both type
+ // conversion and storage order conversion. (Note that we refuse to use this temporary copy
+ // when loading an argument for a Ref<M> with M non-const, i.e. a read-write reference).
+ Array copy_or_ref;
+
+public:
+ bool load(handle src, bool convert) {
+ // First check whether what we have is already an array of the right type. If not, we
+ // can't avoid a copy (because the copy is also going to do type conversion).
+ bool need_copy = !isinstance<Array>(src);
+
+ EigenConformable<props::row_major> fits;
+ if (!need_copy) {
+ // We don't need a converting copy, but we also need to check whether the strides are
+ // compatible with the Ref's stride requirements
+ auto aref = reinterpret_borrow<Array>(src);
+
+ if (aref && (!need_writeable || aref.writeable())) {
+ fits = props::conformable(aref);
+ if (!fits) {
+ return false; // Incompatible dimensions
+ }
+ if (!fits.template stride_compatible<props>()) {
+ need_copy = true;
+ } else {
+ copy_or_ref = std::move(aref);
+ }
+ } else {
+ need_copy = true;
+ }
+ }
+
+ if (need_copy) {
+ // We need to copy: If we need a mutable reference, or we're not supposed to convert
+ // (either because we're in the no-convert overload pass, or because we're explicitly
+ // instructed not to copy (via `py::arg().noconvert()`) we have to fail loading.
+ if (!convert || need_writeable) {
+ return false;
+ }
+
+ Array copy = Array::ensure(src);
+ if (!copy) {
+ return false;
+ }
+ fits = props::conformable(copy);
+ if (!fits || !fits.template stride_compatible<props>()) {
+ return false;
+ }
+ copy_or_ref = std::move(copy);
+ loader_life_support::add_patient(copy_or_ref);
+ }
+
+ ref.reset();
+ map.reset(new MapType(data(copy_or_ref),
+ fits.rows,
+ fits.cols,
+ make_stride(fits.stride.outer(), fits.stride.inner())));
+ ref.reset(new Type(*map));
+
+ return true;
+ }
+
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator Type *() { return ref.get(); }
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ operator Type &() { return *ref; }
+ template <typename _T>
+ using cast_op_type = pybind11::detail::cast_op_type<_T>;
+
+private:
+ template <typename T = Type, enable_if_t<is_eigen_mutable_map<T>::value, int> = 0>
+ Scalar *data(Array &a) {
+ return a.mutable_data();
+ }
+
+ template <typename T = Type, enable_if_t<!is_eigen_mutable_map<T>::value, int> = 0>
+ const Scalar *data(Array &a) {
+ return a.data();
+ }
+
+ // Attempt to figure out a constructor of `Stride` that will work.
+ // If both strides are fixed, use a default constructor:
+ template <typename S>
+ using stride_ctor_default = bool_constant<S::InnerStrideAtCompileTime != Eigen::Dynamic
+ && S::OuterStrideAtCompileTime != Eigen::Dynamic
+ && std::is_default_constructible<S>::value>;
+ // Otherwise, if there is a two-index constructor, assume it is (outer,inner) like
+ // Eigen::Stride, and use it:
+ template <typename S>
+ using stride_ctor_dual
+ = bool_constant<!stride_ctor_default<S>::value
+ && std::is_constructible<S, EigenIndex, EigenIndex>::value>;
+ // Otherwise, if there is a one-index constructor, and just one of the strides is dynamic, use
+ // it (passing whichever stride is dynamic).
+ template <typename S>
+ using stride_ctor_outer
+ = bool_constant<!any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value
+ && S::OuterStrideAtCompileTime == Eigen::Dynamic
+ && S::InnerStrideAtCompileTime != Eigen::Dynamic
+ && std::is_constructible<S, EigenIndex>::value>;
+ template <typename S>
+ using stride_ctor_inner
+ = bool_constant<!any_of<stride_ctor_default<S>, stride_ctor_dual<S>>::value
+ && S::InnerStrideAtCompileTime == Eigen::Dynamic
+ && S::OuterStrideAtCompileTime != Eigen::Dynamic
+ && std::is_constructible<S, EigenIndex>::value>;
+
+ template <typename S = StrideType, enable_if_t<stride_ctor_default<S>::value, int> = 0>
+ static S make_stride(EigenIndex, EigenIndex) {
+ return S();
+ }
+ template <typename S = StrideType, enable_if_t<stride_ctor_dual<S>::value, int> = 0>
+ static S make_stride(EigenIndex outer, EigenIndex inner) {
+ return S(outer, inner);
+ }
+ template <typename S = StrideType, enable_if_t<stride_ctor_outer<S>::value, int> = 0>
+ static S make_stride(EigenIndex outer, EigenIndex) {
+ return S(outer);
+ }
+ template <typename S = StrideType, enable_if_t<stride_ctor_inner<S>::value, int> = 0>
+ static S make_stride(EigenIndex, EigenIndex inner) {
+ return S(inner);
+ }
+};
+
+// type_caster for special matrix types (e.g. DiagonalMatrix), which are EigenBase, but not
+// EigenDense (i.e. they don't have a data(), at least not with the usual matrix layout).
+// load() is not supported, but we can cast them into the python domain by first copying to a
+// regular Eigen::Matrix, then casting that.
+template <typename Type>
+struct type_caster<Type, enable_if_t<is_eigen_other<Type>::value>> {
+ static_assert(!std::is_pointer<typename Type::Scalar>::value,
+ PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
+
+protected:
+ using Matrix
+ = Eigen::Matrix<typename Type::Scalar, Type::RowsAtCompileTime, Type::ColsAtCompileTime>;
+ using props = EigenProps<Matrix>;
+
+public:
+ static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
+ handle h = eigen_encapsulate<props>(new Matrix(src));
+ return h;
+ }
+ static handle cast(const Type *src, return_value_policy policy, handle parent) {
+ return cast(*src, policy, parent);
+ }
+
+ static constexpr auto name = props::descriptor;
+
+ // Explicitly delete these: support python -> C++ conversion on these (i.e. these can be return
+ // types but not bound arguments). We still provide them (with an explicitly delete) so that
+ // you end up here if you try anyway.
+ bool load(handle, bool) = delete;
+ operator Type() = delete;
+ template <typename>
+ using cast_op_type = Type;
+};
+
+template <typename Type>
+struct type_caster<Type, enable_if_t<is_eigen_sparse<Type>::value>> {
+ using Scalar = typename Type::Scalar;
+ static_assert(!std::is_pointer<Scalar>::value,
+ PYBIND11_EIGEN_MESSAGE_POINTER_TYPES_ARE_NOT_SUPPORTED);
+ using StorageIndex = remove_reference_t<decltype(*std::declval<Type>().outerIndexPtr())>;
+ using Index = typename Type::Index;
+ static constexpr bool rowMajor = Type::IsRowMajor;
+
+ bool load(handle src, bool) {
+ if (!src) {
+ return false;
+ }
+
+ auto obj = reinterpret_borrow<object>(src);
+ object sparse_module = module_::import("scipy.sparse");
+ object matrix_type = sparse_module.attr(rowMajor ? "csr_matrix" : "csc_matrix");
+
+ if (!type::handle_of(obj).is(matrix_type)) {
+ try {
+ obj = matrix_type(obj);
+ } catch (const error_already_set &) {
+ return false;
+ }
+ }
+
+ auto values = array_t<Scalar>((object) obj.attr("data"));
+ auto innerIndices = array_t<StorageIndex>((object) obj.attr("indices"));
+ auto outerIndices = array_t<StorageIndex>((object) obj.attr("indptr"));
+ auto shape = pybind11::tuple((pybind11::object) obj.attr("shape"));
+ auto nnz = obj.attr("nnz").cast<Index>();
+
+ if (!values || !innerIndices || !outerIndices) {
+ return false;
+ }
+
+ value = EigenMapSparseMatrix<Scalar,
+ Type::Flags &(Eigen::RowMajor | Eigen::ColMajor),
+ StorageIndex>(shape[0].cast<Index>(),
+ shape[1].cast<Index>(),
+ std::move(nnz),
+ outerIndices.mutable_data(),
+ innerIndices.mutable_data(),
+ values.mutable_data());
+
+ return true;
+ }
+
+ static handle cast(const Type &src, return_value_policy /* policy */, handle /* parent */) {
+ const_cast<Type &>(src).makeCompressed();
+
+ object matrix_type
+ = module_::import("scipy.sparse").attr(rowMajor ? "csr_matrix" : "csc_matrix");
+
+ array data(src.nonZeros(), src.valuePtr());
+ array outerIndices((rowMajor ? src.rows() : src.cols()) + 1, src.outerIndexPtr());
+ array innerIndices(src.nonZeros(), src.innerIndexPtr());
+
+ return matrix_type(pybind11::make_tuple(
+ std::move(data), std::move(innerIndices), std::move(outerIndices)),
+ pybind11::make_tuple(src.rows(), src.cols()))
+ .release();
+ }
+
+ PYBIND11_TYPE_CASTER(Type,
+ const_name<(Type::IsRowMajor) != 0>("scipy.sparse.csr_matrix[",
+ "scipy.sparse.csc_matrix[")
+ + npy_format_descriptor<Scalar>::name + const_name("]"));
+};
+
+PYBIND11_NAMESPACE_END(detail)
+PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)