6 #ifndef XGBOOST_LINALG_H_
7 #define XGBOOST_LINALG_H_
9 #include <dmlc/endian.h>
23 #include <type_traits>
33 #if defined(__CUDA__) || defined(__NVCC__)
34 #define LINALG_HD __host__ __device__
46 return (std::is_floating_point<T>::value
48 : (std::is_integral<T>::value ? (std::is_signed<T>::value ?
'i' :
'u') :
'\0'));
52 template <
size_t dim,
typename S,
typename Head,
size_t D>
53 constexpr
size_t Offset(S (&strides)[D],
size_t n, Head head) {
54 static_assert(dim < D);
55 return n + head * strides[dim];
58 template <
size_t dim,
typename S,
size_t D,
typename Head,
typename... Tail>
59 constexpr std::enable_if_t<
sizeof...(Tail) != 0,
size_t>
Offset(S (&strides)[D],
size_t n,
60 Head head, Tail &&...rest) {
61 static_assert(dim < D);
62 return Offset<dim + 1>(strides, n + (head * strides[dim]), std::forward<Tail>(rest)...);
65 template <
int32_t D,
bool f_array = false>
66 constexpr
void CalcStride(
size_t const (&shape)[D],
size_t (&stride)[D]) {
69 for (int32_t s = 1; s < D; ++s) {
70 stride[s] = shape[s - 1] * stride[s - 1];
74 for (int32_t s = D - 2; s >= 0; --s) {
75 stride[s] = shape[s + 1] * stride[s + 1];
88 [[nodiscard]] constexpr
size_t Size()
const {
return end -
beg; }
96 return std::is_same<T, IntTag>::value ? 0 : 1;
99 template <
typename T,
typename... S>
100 constexpr std::enable_if_t<
sizeof...(S) != 0, int32_t>
CalcSliceDim() {
107 for (
auto d : shape) {
113 template <
typename S>
116 template <
typename S>
117 using IndexToTag = std::conditional_t<std::is_integral<RemoveCRType<S>>::value,
IntTag, S>;
119 template <
int32_t n,
typename Fn>
121 #if defined __CUDA_ARCH__
124 for (int32_t i = 0; i < n; ++i) {
129 template <
typename T>
132 for (; v != 0; v &= v - 1) c++;
137 #if defined(__CUDA_ARCH__)
139 #elif defined(__GNUC__) || defined(__clang__)
140 return __builtin_popcount(v);
141 #elif defined(_MSC_VER)
149 #if defined(__CUDA_ARCH__)
151 #elif defined(__GNUC__) || defined(__clang__)
152 return __builtin_popcountll(v);
153 #elif defined(_MSC_VER) && defined(_M_X64)
154 return __popcnt64(v);
160 template <std::
size_t D,
typename Head>
162 static_assert(std::is_integral<std::remove_reference_t<Head>>::value,
"Invalid index type.");
169 template <std::size_t D,
typename Head,
typename... Rest>
171 static_assert(
sizeof...(Rest) < D,
"Index overflow.");
172 static_assert(std::is_integral<std::remove_reference_t<Head>>::value,
"Invalid index type.");
173 arr[D -
sizeof...(Rest) - 1] = head;
174 IndexToArr(arr, std::forward<Rest>(index)...);
177 template <
class T, std::size_t N, std::size_t... Idx>
178 constexpr
auto ArrToTuple(T (&arr)[N], std::index_sequence<Idx...>) {
179 return std::make_tuple(arr[Idx]...);
185 template <
class T, std::
size_t N>
187 return ArrToTuple(arr, std::make_index_sequence<N>{});
193 template <
typename I,
int32_t D>
196 static_assert(std::is_signed<decltype(D)>::value,
197 "Don't change the type without changing the for loop.");
198 for (int32_t dim = D; --dim > 0;) {
199 auto s =
static_cast<std::remove_const_t<std::remove_reference_t<I>
>>(shape[dim]);
202 index[dim] = idx - t * s;
205 index[dim] = idx & (s - 1);
213 template <
size_t dim,
typename I,
int32_t D>
215 static_assert(dim < D);
219 template <
size_t dim, int32_t D,
typename... S,
typename I,
220 std::enable_if_t<
sizeof...(S) != 0> * =
nullptr>
222 static_assert(dim < D);
224 ReshapeImpl<dim + 1>(out_shape, std::forward<S>(rest)...);
227 template <
typename Fn,
typename Tup,
size_t... I>
229 return f(std::get<I>(t)...);
238 template <
typename Fn,
typename Tup>
240 constexpr
auto kSize = std::tuple_size<Tup>::value;
241 return Apply(std::forward<Fn>(f), std::forward<Tup>(t), std::make_index_sequence<kSize>{});
251 template <
class B1,
class... Bn>
253 : std::conditional_t<static_cast<bool>(B1::value), Conjunction<Bn...>, B1> {};
255 template <
typename... Index>
258 template <
typename... Index>
269 template <
typename I>
292 template <
typename T,
int32_t kDim>
316 template <
size_t old_dim,
size_t new_dim,
int32_t D,
typename I>
317 LINALG_HD size_t MakeSliceDim(
size_t new_shape[D],
size_t new_stride[D],
318 detail::RangeTag<I> &&range)
const {
319 static_assert(new_dim < D);
320 static_assert(old_dim < kDim);
321 new_stride[new_dim] = stride_[old_dim];
322 new_shape[new_dim] = range.Size();
323 assert(
static_cast<decltype(shape_[old_dim])
>(range.end) <= shape_[old_dim]);
325 auto offset = stride_[old_dim] * range.beg;
331 template <
size_t old_dim,
size_t new_dim, int32_t D,
typename I,
typename... S>
332 LINALG_HD size_t MakeSliceDim(
size_t new_shape[D],
size_t new_stride[D],
333 detail::RangeTag<I> &&range, S &&...slices)
const {
334 static_assert(new_dim < D);
335 static_assert(old_dim < kDim);
336 new_stride[new_dim] = stride_[old_dim];
337 new_shape[new_dim] = range.Size();
338 assert(
static_cast<decltype(shape_[old_dim])
>(range.end) <= shape_[old_dim]);
340 auto offset = stride_[old_dim] * range.beg;
341 return MakeSliceDim<old_dim + 1, new_dim + 1, D>(new_shape, new_stride,
342 std::forward<S>(slices)...) +
346 template <
size_t old_dim,
size_t new_dim,
int32_t D>
347 LINALG_HD size_t MakeSliceDim(
size_t new_shape[D],
size_t new_stride[D], detail::AllTag)
const {
348 static_assert(new_dim < D);
349 static_assert(old_dim < kDim);
350 new_stride[new_dim] = stride_[old_dim];
351 new_shape[new_dim] = shape_[old_dim];
357 template <
size_t old_dim,
size_t new_dim, int32_t D,
typename... S>
358 LINALG_HD size_t MakeSliceDim(
size_t new_shape[D],
size_t new_stride[D], detail::AllTag,
359 S &&...slices)
const {
360 static_assert(new_dim < D);
361 static_assert(old_dim < kDim);
362 new_stride[new_dim] = stride_[old_dim];
363 new_shape[new_dim] = shape_[old_dim];
364 return MakeSliceDim<old_dim + 1, new_dim + 1, D>(new_shape, new_stride,
365 std::forward<S>(slices)...);
368 template <
size_t old_dim,
size_t new_dim,
int32_t D,
typename Index>
369 LINALG_HD size_t MakeSliceDim(DMLC_ATTRIBUTE_UNUSED
size_t new_shape[D],
370 DMLC_ATTRIBUTE_UNUSED
size_t new_stride[D], Index i)
const {
371 static_assert(old_dim < kDim);
372 return stride_[old_dim] * i;
377 template <
size_t old_dim,
size_t new_dim, int32_t D,
typename Index,
typename... S>
378 LINALG_HD std::enable_if_t<std::is_integral<Index>::value,
size_t> MakeSliceDim(
379 size_t new_shape[D],
size_t new_stride[D], Index i, S &&...slices)
const {
380 static_assert(old_dim < kDim);
381 auto offset = stride_[old_dim] * i;
383 MakeSliceDim<old_dim + 1, new_dim, D>(new_shape, new_stride, std::forward<S>(slices)...);
403 template <
typename I,
int32_t D>
407 template <
typename I,
int32_t D>
409 : data_{data}, ptr_{data_.data()}, device_{device} {
410 static_assert(D > 0 && D <= kDim,
"Invalid shape.");
412 detail::UnrollLoop<D>([&](
auto i) { shape_[i] = shape[i]; });
413 for (
auto i = D; i < kDim; ++i) {
423 detail::CalcStride<kDim, true>(shape_, stride_);
438 template <
typename I, std::
int32_t D>
441 : data_{data}, ptr_{data_.data()}, device_{device} {
442 static_assert(D == kDim,
"Invalid shape & stride.");
443 detail::UnrollLoop<D>([&](
auto i) {
444 shape_[i] = shape[i];
445 stride_[i] = stride[i];
452 std::enable_if_t<common::detail::IsAllowedElementTypeConversion<U, T>::value> * =
nullptr>
454 : data_{that.
Values()}, ptr_{data_.data()}, size_{that.
Size()}, device_{that.
DeviceIdx()} {
455 detail::UnrollLoop<kDim>([&](
auto i) {
456 stride_[i] = that.
Stride(i);
457 shape_[i] = that.
Shape(i);
476 static_assert(
sizeof...(index) <= kDim,
"Invalid index.");
477 size_t offset = detail::Offset<0ul>(stride_, 0ul, std::forward<Index>(index)...);
478 assert(offset < data_.
size() &&
"Out of bound access.");
486 static_assert(
sizeof...(index) <= kDim,
"Invalid index.");
487 size_t offset = detail::Offset<0ul>(stride_, 0ul, std::forward<Index>(index)...);
488 assert(offset < data_.
size() &&
"Out of bound access.");
505 template <
typename... S>
507 static_assert(
sizeof...(slices) <= kDim,
"Invalid slice.");
508 int32_t constexpr kNewDim{detail::CalcSliceDim<detail::IndexToTag<S>...>()};
509 size_t new_shape[kNewDim];
510 size_t new_stride[kNewDim];
511 auto offset = MakeSliceDim<0, 0, kNewDim>(new_shape, new_stride, std::forward<S>(slices)...);
545 static_assert(std::is_same<decltype(stride), decltype(stride_)>::value);
555 static_assert(std::is_same<decltype(stride), decltype(stride_)>::value);
557 detail::CalcStride<kDim, true>(shape_, stride);
573 template <
typename Container,
typename... S,
574 std::enable_if_t<!common::detail::IsSpan<Container>::value &&
575 !std::is_pointer_v<Container>> * =
nullptr>
577 using T = std::conditional_t<std::is_const_v<Container>,
578 std::add_const_t<typename Container::value_type>,
579 typename Container::value_type>;
580 std::size_t in_shape[
sizeof...(S)];
585 template <
typename T,
typename... S>
587 std::size_t in_shape[
sizeof...(S)];
589 return TensorView<T,
sizeof...(S)>{data, in_shape, device};
592 template <
typename T,
typename... S>
597 template <
typename T,
typename... S>
603 template <
typename T,
typename... S>
614 if (idx > std::numeric_limits<uint32_t>::max()) {
615 return detail::UnravelImpl<uint64_t, D>(
static_cast<uint64_t
>(idx), shape);
617 return detail::UnravelImpl<uint32_t, D>(
static_cast<uint32_t
>(idx), shape);
626 template <
typename... S>
628 std::size_t s[
sizeof...(S)];
638 template <
typename T>
648 template <
typename T>
649 auto MakeVec(T *ptr,
size_t s, int32_t device = -1) {
653 template <
typename T>
659 template <
typename T>
670 template <
typename T>
679 template <
typename T,
int32_t D>
682 array_interface[
"data"] = std::vector<Json>(2);
683 array_interface[
"data"][0] =
Integer{
reinterpret_cast<int64_t
>(t.
Values().data())};
684 array_interface[
"data"][1] =
Boolean{
true};
687 array_interface[
"stream"] =
Null{};
689 std::vector<Json> shape(t.
Shape().size());
690 std::vector<Json> stride(t.
Stride().size());
691 for (
size_t i = 0; i < t.
Shape().size(); ++i) {
695 array_interface[
"shape"] =
Array{shape};
696 array_interface[
"strides"] =
Array{stride};
697 array_interface[
"version"] = 3;
699 char constexpr kT = detail::ArrayInterfaceHandler::TypeChar<T>();
700 static_assert(kT !=
'\0');
701 if (DMLC_LITTLE_ENDIAN) {
702 array_interface[
"typestr"] =
String{
"<" + (kT + std::to_string(
sizeof(T)))};
704 array_interface[
"typestr"] =
String{
">" + (kT + std::to_string(
sizeof(T)))};
706 return array_interface;
712 template <
typename T,
int32_t D>
716 res[
"data"][1] =
Boolean{
false};
723 template <
typename T,
int32_t D>
730 template <
typename T,
int32_t D>
741 template <
typename T,
int32_t kDim = 5>
752 template <
typename I, std::
int32_t D>
753 void Initialize(I
const (&shape)[D], std::int32_t device) {
754 static_assert(D <= kDim,
"Invalid shape.");
755 std::copy(shape, shape + D, shape_);
756 for (
auto i = D; i < kDim; ++i) {
775 template <
typename I,
int32_t D>
777 :
Tensor{common::Span<I const, D>{shape}, device, order} {}
779 template <
typename I,
size_t D>
783 std::copy(shape.
data(), shape.
data() + D, shape_);
784 for (
auto i = D; i < kDim; ++i) {
799 template <
typename It,
typename I,
int32_t D>
800 explicit Tensor(It begin, It end, I
const (&shape)[D], std::int32_t device,
Order order =
kC)
803 h_vec.insert(h_vec.begin(), begin, end);
805 this->Initialize(shape, device);
808 template <
typename I,
int32_t D>
809 explicit Tensor(std::initializer_list<T> data, I
const (&shape)[D], std::int32_t device,
815 this->Initialize(shape, device);
821 template <
typename... Index>
823 return this->HostView()(std::forward<Index>(idx)...);
829 template <
typename... Index>
831 return this->HostView()(std::forward<Index>(idx)...);
841 return {span, shape_, device, order_};
844 return {span, shape_, device, order_};
851 return {span, shape_, device, order_};
854 return {span, shape_, device, order_};
861 [[nodiscard]]
size_t Size()
const {
return data_.
Size(); }
863 auto Shape(
size_t i)
const {
return shape_[i]; }
874 template <
typename Fn>
878 <<
"Inconsistent size after modification.";
888 static_assert(
sizeof...(S) <= kDim,
"Invalid shape.");
889 detail::ReshapeImpl<0>(shape_, std::forward<S>(s)...);
890 auto constexpr kEnd =
sizeof...(S);
891 static_assert(kEnd <= kDim,
"Invalid shape.");
892 std::fill(shape_ + kEnd, shape_ + kDim, 1);
904 static_assert(D <= kDim,
"Invalid shape.");
905 std::copy(shape.
data(), shape.
data() + D, this->shape_);
906 std::fill(shape_ + D, shape_ + kDim, 1);
918 template <
typename... S>
920 return this->HostView().Slice(std::forward<S>(slices)...);
925 template <
typename... S>
927 return this->HostView().Slice(std::forward<S>(slices)...);
937 template <
typename T>
940 template <
typename T>
946 template <
typename T,
typename... Index>
948 Tensor<T,
sizeof...(Index)> t;
957 template <
typename T,
typename... Index>
959 Tensor<T,
sizeof...(Index)> t;
962 t.Data()->Fill(std::move(v));
969 template <
typename T,
typename... Index>
971 return Constant(ctx,
static_cast<T
>(0), index...);
975 template <
typename T,
int32_t D>
981 for (
size_t i = 1; i < D; ++i) {
983 shape[i] = r.
Shape(i);
985 CHECK_EQ(shape[i], r.
Shape(i));
994 #if defined(LINALG_HD)
Defines configuration macros and basic types for xgboost.
Definition: host_device_vector.h:87
const T * ConstDevicePointer() const
void Extend(const HostDeviceVector< T > &other)
common::Span< T const > ConstHostSpan() const
Definition: host_device_vector.h:115
std::vector< T > & HostVector()
void Resize(size_t new_size, T v=T())
common::Span< const T > ConstDeviceSpan() const
T * HostPointer()
Definition: host_device_vector.h:112
void SetDevice(int device) const
common::Span< T > DeviceSpan()
common::Span< T > HostSpan()
Definition: host_device_vector.h:113
const T * ConstHostPointer() const
Definition: host_device_vector.h:116
Describes both true and false.
Definition: json.h:312
Data structure representing JSON format.
Definition: json.h:357
static void Dump(Json json, std::string *out, std::ios::openmode mode=std::ios::out)
Encode the JSON object. Optional parameter mode for choosing between text and binary (ubjson) output.
span class implementation, based on ISO++20 span<T>. The interface should be the same.
Definition: span.h:424
constexpr XGBOOST_DEVICE pointer data() const __span_noexcept
Definition: span.h:549
XGBOOST_DEVICE auto subspan() const -> Span< element_type, detail::ExtentValue< Extent, Offset, Count >::value >
Definition: span.h:596
constexpr XGBOOST_DEVICE index_type size() const __span_noexcept
Definition: span.h:554
constexpr XGBOOST_DEVICE bool empty() const __span_noexcept
Definition: span.h:561
A tensor view with static type and dimension. It implements indexing and slicing.
Definition: linalg.h:293
LINALG_HD auto DeviceIdx() const
Obtain the CUDA device ordinal.
Definition: linalg.h:567
LINALG_HD std::size_t Size() const
Number of items in the tensor.
Definition: linalg.h:533
size_t[kDim] ShapeT
Definition: linalg.h:295
LINALG_HD bool CContiguous() const
Whether it's a c-contiguous array.
Definition: linalg.h:543
LINALG_HD auto Stride(size_t i) const
Definition: linalg.h:528
LINALG_HD auto Shape() const
Definition: linalg.h:519
ShapeT StrideT
Definition: linalg.h:296
constexpr static size_t kDimension
Definition: linalg.h:389
LINALG_HD auto Stride() const
Definition: linalg.h:524
LINALG_HD auto Slice(S &&...slices) const
Slice the tensor. The returned tensor has inferred dim and shape. Scalar result is not supported.
Definition: linalg.h:506
LINALG_HD auto Values() const -> decltype(data_) const &
Obtain a reference to the raw data.
Definition: linalg.h:563
LINALG_HD TensorView(common::Span< T > data, I const (&shape)[D], std::int32_t device, Order order)
Definition: linalg.h:408
LINALG_HD bool Contiguous() const
Whether this is a contiguous array, both C and F contiguous returns true.
Definition: linalg.h:537
LINALG_HD T const & operator()(Index &&...index) const
Index the tensor to obtain a scalar value.
Definition: linalg.h:485
LINALG_HD TensorView(TensorView< U, kDim > const &that)
Definition: linalg.h:453
LINALG_HD T & operator()(Index &&...index)
Index the tensor to obtain a scalar value.
Definition: linalg.h:475
constexpr static size_t kValueSize
Definition: linalg.h:388
LINALG_HD TensorView(common::Span< T > data, I const (&shape)[D], std::int32_t device)
Create a tensor with data and shape.
Definition: linalg.h:404
LINALG_HD bool FContiguous() const
Whether it's a f-contiguous array.
Definition: linalg.h:553
LINALG_HD auto Shape(size_t i) const
Definition: linalg.h:523
LINALG_HD TensorView(common::Span< T > data, I const (&shape)[D], I const (&stride)[D], std::int32_t device)
Create a tensor with data, shape and strides. Don't use this constructor if stride can be calculated ...
Definition: linalg.h:439
A tensor storage. To use it for other functionality like slicing one needs to obtain a view first....
Definition: linalg.h:742
auto Slice(S &&...slices)
Get a host view on the slice.
Definition: linalg.h:926
TensorView< T const, kDim > View(int32_t device) const
Definition: linalg.h:847
TensorView< T, kDim > View(int32_t device)
Get a TensorView for this tensor.
Definition: linalg.h:837
auto Slice(S &&...slices) const
Get a host view on the slice.
Definition: linalg.h:919
size_t[kDim] ShapeT
Definition: linalg.h:744
void SetDevice(int32_t device) const
Set device ordinal for this tensor.
Definition: linalg.h:933
HostDeviceVector< T > const * Data() const
Definition: linalg.h:866
void Reshape(size_t(&shape)[D])
Definition: linalg.h:912
auto HostView()
Definition: linalg.h:859
auto Shape(size_t i) const
Definition: linalg.h:863
Tensor(common::Span< I const, D > shape, std::int32_t device, Order order=kC)
Definition: linalg.h:780
HostDeviceVector< T > * Data()
Definition: linalg.h:865
T & operator()(Index &&...idx)
Index operator. Not thread safe, should not be used in performance critical region....
Definition: linalg.h:822
Tensor(I const (&shape)[D], std::int32_t device, Order order=kC)
Create a tensor with shape and device ordinal. The storage is initialized automatically.
Definition: linalg.h:776
auto Shape() const
Definition: linalg.h:862
Tensor(std::initializer_list< T > data, I const (&shape)[D], std::int32_t device, Order order=kC)
Definition: linalg.h:809
void ModifyInplace(Fn &&fn)
Visitor function for modification that changes shape and data.
Definition: linalg.h:875
void Reshape(common::Span< size_t const, D > shape)
Reshape the tensor.
Definition: linalg.h:903
auto HostView() const
Definition: linalg.h:858
T const & operator()(Index &&...idx) const
Index operator. Not thread safe, should not be used in performance critical region....
Definition: linalg.h:830
size_t Size() const
Definition: linalg.h:861
void Reshape(S &&...s)
Reshape the tensor.
Definition: linalg.h:887
int32_t DeviceIdx() const
Definition: linalg.h:934
ShapeT StrideT
Definition: linalg.h:745
Tensor(It begin, It end, I const (&shape)[D], std::int32_t device, Order order=kC)
Definition: linalg.h:800
A device-and-host vector abstraction layer.
#define LINALG_HD
Definition: linalg.h:36
Definition: intrusive_ptr.h:207
LINALG_HD auto UnravelImpl(I idx, common::Span< size_t const, D > shape)
Definition: linalg.h:194
void ReshapeImpl(size_t(&out_shape)[D], I s)
Definition: linalg.h:214
LINALG_HD int Popc(uint32_t v)
Definition: linalg.h:136
std::remove_const_t< std::remove_reference_t< S > > RemoveCRType
Definition: linalg.h:114
constexpr int32_t CalcSliceDim()
Calculate the dimension of sliced tensor.
Definition: linalg.h:95
constexpr LINALG_HD auto UnrollLoop(Fn fn)
Definition: linalg.h:120
std::conditional_t< std::is_integral< RemoveCRType< S > >::value, IntTag, S > IndexToTag
Definition: linalg.h:117
constexpr size_t Offset(S(&strides)[D], size_t n, Head head)
Definition: linalg.h:53
decltype(auto) constexpr LINALG_HD Apply(Fn &&f, Tup &&t, std::index_sequence< I... >)
Definition: linalg.h:228
LINALG_HD void IndexToArr(std::size_t(&arr)[D], Head head)
Definition: linalg.h:161
constexpr void CalcStride(size_t const (&shape)[D], size_t(&stride)[D])
Definition: linalg.h:66
constexpr auto ArrToTuple(T(&arr)[N], std::index_sequence< Idx... >)
Definition: linalg.h:178
int32_t NativePopc(T v)
Definition: linalg.h:130
std::enable_if_t< IsAllIntegral< Index... >::value > EnableIfIntegral
Definition: linalg.h:259
constexpr size_t CalcSize(size_t(&shape)[D])
Definition: linalg.h:105
constexpr detail::RangeTag< I > Range(I beg, I end)
Specify a range of elements in the axis for slicing.
Definition: linalg.h:270
auto MakeVec(T *ptr, size_t s, int32_t device=-1)
Create a vector view from contigious memory.
Definition: linalg.h:649
auto MakeTensorView(Context const *ctx, Container &data, S &&...shape)
Constructor for automatic type deduction.
Definition: linalg.h:576
auto ArrayInterfaceStr(TensorView< T const, D > const &t)
Return string representation of array interface.
Definition: linalg.h:724
LINALG_HD auto UnravelIndex(size_t idx, common::Span< size_t const, D > shape)
Turns linear index into multi-dimension index. Similar to numpy unravel.
Definition: linalg.h:613
void Stack(Tensor< T, D > *l, Tensor< T, D > const &r)
Definition: linalg.h:976
auto Constant(Context const *ctx, T v, Index &&...index)
Create an array with value v.
Definition: linalg.h:958
auto Zeros(Context const *ctx, Index &&...index)
Like np.zeros, return a new array of given shape and type, filled with zeros.
Definition: linalg.h:970
auto Empty(Context const *ctx, Index &&...index)
Create an array without initialization.
Definition: linalg.h:947
constexpr detail::AllTag All()
Specify all elements in the axis for slicing.
Definition: linalg.h:265
Json ArrayInterface(TensorView< T const, D > const &t)
Array Interface defined by numpy.
Definition: linalg.h:680
Order
Definition: linalg.h:274
@ kC
Definition: linalg.h:275
@ kF
Definition: linalg.h:276
JsonInteger Integer
Definition: json.h:593
#define SPAN_CHECK(cond)
Definition: span.h:119
Runtime context for XGBoost. Contains information like threads and device.
Definition: context.h:84
bool IsCPU() const
Is XGBoost running on CPU?
Definition: context.h:133
std::int32_t gpu_id
Definition: context.h:107
static constexpr char TypeChar()
Definition: linalg.h:45
constexpr size_t Size() const
Definition: linalg.h:88
I end
Definition: linalg.h:87
I beg
Definition: linalg.h:86