6 #ifndef XGBOOST_LINALG_H_
7 #define XGBOOST_LINALG_H_
9 #include <dmlc/endian.h>
23 #include <type_traits>
33 #if defined(__CUDA__) || defined(__NVCC__)
34 #define LINALG_HD __host__ __device__
46 return (std::is_floating_point_v<T>
48 : (std::is_integral_v<T> ? (std::is_signed_v<T> ?
'i' :
'u') :
'\0'));
52 template <
size_t dim,
typename S,
typename Head,
size_t D>
53 constexpr
size_t Offset(S (&strides)[D],
size_t n, Head head) {
54 static_assert(dim < D);
55 return n + head * strides[dim];
58 template <
size_t dim,
typename S,
size_t D,
typename Head,
typename... Tail>
59 constexpr std::enable_if_t<
sizeof...(Tail) != 0,
size_t>
Offset(S (&strides)[D],
size_t n,
60 Head head, Tail &&...rest) {
61 static_assert(dim < D);
62 return Offset<dim + 1>(strides, n + (head * strides[dim]), std::forward<Tail>(rest)...);
65 template <
int32_t D,
bool f_array = false>
66 constexpr
void CalcStride(
size_t const (&shape)[D],
size_t (&stride)[D]) {
69 for (int32_t s = 1; s < D; ++s) {
70 stride[s] = shape[s - 1] * stride[s - 1];
74 for (int32_t s = D - 2; s >= 0; --s) {
75 stride[s] = shape[s + 1] * stride[s + 1];
88 [[nodiscard]] constexpr
size_t Size()
const {
return end -
beg; }
96 return std::is_same_v<T, IntTag> ? 0 : 1;
99 template <
typename T,
typename... S>
100 constexpr std::enable_if_t<
sizeof...(S) != 0, int32_t>
CalcSliceDim() {
107 for (
auto d : shape) {
113 template <
typename S>
116 template <
typename S>
119 template <
int32_t n,
typename Fn>
121 #if defined __CUDA_ARCH__
124 for (int32_t i = 0; i < n; ++i) {
129 template <
typename T>
132 for (; v != 0; v &= v - 1) c++;
137 #if defined(__CUDA_ARCH__)
139 #elif defined(__GNUC__) || defined(__clang__)
140 return __builtin_popcount(v);
141 #elif defined(_MSC_VER)
149 #if defined(__CUDA_ARCH__)
151 #elif defined(__GNUC__) || defined(__clang__)
152 return __builtin_popcountll(v);
153 #elif defined(_MSC_VER) && defined(_M_X64)
154 return __popcnt64(v);
160 template <std::
size_t D,
typename Head>
162 static_assert(std::is_integral_v<std::remove_reference_t<Head>>,
"Invalid index type.");
169 template <std::size_t D,
typename Head,
typename... Rest>
171 static_assert(
sizeof...(Rest) < D,
"Index overflow.");
172 static_assert(std::is_integral_v<std::remove_reference_t<Head>>,
"Invalid index type.");
173 arr[D -
sizeof...(Rest) - 1] = head;
174 IndexToArr(arr, std::forward<Rest>(index)...);
177 template <
class T, std::size_t N, std::size_t... Idx>
178 constexpr
auto ArrToTuple(T (&arr)[N], std::index_sequence<Idx...>) {
179 return std::make_tuple(arr[Idx]...);
185 template <
class T, std::
size_t N>
187 return ArrToTuple(arr, std::make_index_sequence<N>{});
193 template <
typename I, std::
int32_t D>
195 std::size_t index[D]{0};
196 static_assert(std::is_signed_v<decltype(D)>,
197 "Don't change the type without changing the for loop.");
198 auto const sptr = shape.
data();
199 for (int32_t dim = D; --dim > 0;) {
200 auto s =
static_cast<std::remove_const_t<std::remove_reference_t<I>
>>(sptr[dim]);
203 index[dim] = idx - t * s;
206 index[dim] = idx & (s - 1);
214 template <
size_t dim,
typename I,
int32_t D>
216 static_assert(dim < D);
220 template <
size_t dim, int32_t D,
typename... S,
typename I,
221 std::enable_if_t<
sizeof...(S) != 0> * =
nullptr>
223 static_assert(dim < D);
225 ReshapeImpl<dim + 1>(out_shape, std::forward<S>(rest)...);
228 template <
typename Fn,
typename Tup,
size_t... I>
230 return f(std::get<I>(t)...);
239 template <
typename Fn,
typename Tup>
241 constexpr
auto kSize = std::tuple_size<Tup>::value;
242 return Apply(std::forward<Fn>(f), std::forward<Tup>(t), std::make_index_sequence<kSize>{});
252 template <
class B1,
class... Bn>
254 : std::conditional_t<static_cast<bool>(B1::value), Conjunction<Bn...>, B1> {};
256 template <
typename... Index>
259 template <
typename... Index>
270 template <
typename I>
293 template <
typename T,
int32_t kDim>
320 template <
size_t old_dim,
size_t new_dim,
int32_t D,
typename I>
321 LINALG_HD size_t MakeSliceDim(std::size_t new_shape[D], std::size_t new_stride[D],
322 detail::RangeTag<I> &&range)
const {
323 static_assert(new_dim < D);
324 static_assert(old_dim < kDim);
325 new_stride[new_dim] = stride_[old_dim];
326 new_shape[new_dim] = range.Size();
327 assert(
static_cast<decltype(shape_[old_dim])
>(range.end) <= shape_[old_dim]);
329 auto offset = stride_[old_dim] * range.beg;
335 template <
size_t old_dim,
size_t new_dim, int32_t D,
typename I,
typename... S>
336 LINALG_HD size_t MakeSliceDim(
size_t new_shape[D],
size_t new_stride[D],
337 detail::RangeTag<I> &&range, S &&...slices)
const {
338 static_assert(new_dim < D);
339 static_assert(old_dim < kDim);
340 new_stride[new_dim] = stride_[old_dim];
341 new_shape[new_dim] = range.Size();
342 assert(
static_cast<decltype(shape_[old_dim])
>(range.end) <= shape_[old_dim]);
344 auto offset = stride_[old_dim] * range.beg;
345 return MakeSliceDim<old_dim + 1, new_dim + 1, D>(new_shape, new_stride,
346 std::forward<S>(slices)...) +
350 template <
size_t old_dim,
size_t new_dim,
int32_t D>
351 LINALG_HD size_t MakeSliceDim(
size_t new_shape[D],
size_t new_stride[D], detail::AllTag)
const {
352 static_assert(new_dim < D);
353 static_assert(old_dim < kDim);
354 new_stride[new_dim] = stride_[old_dim];
355 new_shape[new_dim] = shape_[old_dim];
361 template <
size_t old_dim,
size_t new_dim, int32_t D,
typename... S>
362 LINALG_HD size_t MakeSliceDim(
size_t new_shape[D],
size_t new_stride[D], detail::AllTag,
363 S &&...slices)
const {
364 static_assert(new_dim < D);
365 static_assert(old_dim < kDim);
366 new_stride[new_dim] = stride_[old_dim];
367 new_shape[new_dim] = shape_[old_dim];
368 return MakeSliceDim<old_dim + 1, new_dim + 1, D>(new_shape, new_stride,
369 std::forward<S>(slices)...);
372 template <
size_t old_dim,
size_t new_dim,
int32_t D,
typename Index>
373 LINALG_HD size_t MakeSliceDim(DMLC_ATTRIBUTE_UNUSED
size_t new_shape[D],
374 DMLC_ATTRIBUTE_UNUSED
size_t new_stride[D], Index i)
const {
375 static_assert(old_dim < kDim);
376 return stride_[old_dim] * i;
381 template <
size_t old_dim,
size_t new_dim, int32_t D,
typename Index,
typename... S>
382 LINALG_HD std::enable_if_t<std::is_integral_v<Index>,
size_t> MakeSliceDim(
383 size_t new_shape[D],
size_t new_stride[D], Index i, S &&...slices)
const {
384 static_assert(old_dim < kDim);
385 auto offset = stride_[old_dim] * i;
387 MakeSliceDim<old_dim + 1, new_dim, D>(new_shape, new_stride, std::forward<S>(slices)...);
407 template <
typename I, std::
int32_t D>
411 template <
typename I,
int32_t D>
413 : data_{data}, ptr_{data_.data()}, device_{device} {
414 static_assert(D > 0 && D <= kDim,
"Invalid shape.");
416 detail::UnrollLoop<D>([&](
auto i) { shape_[i] = shape[i]; });
417 for (
auto i = D; i < kDim; ++i) {
427 detail::CalcStride<kDim, true>(shape_, stride_);
442 template <
typename I, std::
int32_t D>
445 : data_{data}, ptr_{data_.data()}, device_{device} {
446 static_assert(D == kDim,
"Invalid shape & stride.");
447 detail::UnrollLoop<D>([&](
auto i) {
448 shape_[i] = shape[i];
449 stride_[i] = stride[i];
456 std::enable_if_t<common::detail::IsAllowedElementTypeConversion<U, T>::value> * =
nullptr>
458 : data_{that.
Values()}, ptr_{data_.data()}, size_{that.
Size()}, device_{that.
Device()} {
459 detail::UnrollLoop<kDim>([&](
auto i) {
460 stride_[i] = that.
Stride(i);
461 shape_[i] = that.
Shape(i);
480 static_assert(
sizeof...(index) <= kDim,
"Invalid index.");
481 size_t offset = detail::Offset<0ul>(stride_, 0ul, std::forward<Index>(index)...);
482 assert(offset < data_.
size() &&
"Out of bound access.");
490 static_assert(
sizeof...(index) <= kDim,
"Invalid index.");
491 size_t offset = detail::Offset<0ul>(stride_, 0ul, std::forward<Index>(index)...);
492 assert(offset < data_.
size() &&
"Out of bound access.");
509 template <
typename... S>
511 static_assert(
sizeof...(slices) <= kDim,
"Invalid slice.");
512 int32_t constexpr kNewDim{detail::CalcSliceDim<detail::IndexToTag<S>...>()};
513 size_t new_shape[kNewDim];
514 size_t new_stride[kNewDim];
515 auto offset = MakeSliceDim<0, 0, kNewDim>(new_shape, new_stride, std::forward<S>(slices)...);
538 [[nodiscard]]
bool Empty()
const {
return Size() == 0; }
550 static_assert(std::is_same_v<decltype(stride), decltype(stride_)>);
560 static_assert(std::is_same_v<decltype(stride), decltype(stride_)>);
562 detail::CalcStride<kDim, true>(shape_, stride);
578 template <
typename Container,
typename... S,
579 std::enable_if_t<!common::detail::IsSpan<Container>::value &&
580 !std::is_pointer_v<Container>> * =
nullptr>
582 using T = std::conditional_t<std::is_const_v<Container>,
583 std::add_const_t<typename Container::value_type>,
584 typename Container::value_type>;
585 std::size_t in_shape[
sizeof...(S)];
592 std::size_t in_shape[
sizeof...(S)];
594 return TensorView<T,
sizeof...(S)>{data, in_shape, device};
604 std::size_t in_shape[
sizeof...(S)];
609 template <
typename T,
typename... S>
615 template <
typename T,
typename... S>
626 if (idx > std::numeric_limits<uint32_t>::max()) {
627 return detail::UnravelImpl<uint64_t, D>(
static_cast<uint64_t
>(idx), shape);
629 return detail::UnravelImpl<uint32_t, D>(
static_cast<uint32_t
>(idx), shape);
638 template <
typename... S>
640 std::size_t s[
sizeof...(S)];
650 template <
typename T>
660 template <
typename T>
665 template <
typename T>
671 template <
typename T>
682 template <
typename T>
691 template <
typename T, std::
int32_t D>
694 array_interface[
"data"] = std::vector<Json>(2);
695 array_interface[
"data"][0] =
Integer{
reinterpret_cast<int64_t
>(t.
Values().data())};
696 array_interface[
"data"][1] =
Boolean{
true};
697 if (t.
Device().IsCUDA()) {
699 array_interface[
"stream"] =
Integer{2};
701 std::vector<Json> shape(t.
Shape().size());
702 std::vector<Json> stride(t.
Stride().size());
703 for (
size_t i = 0; i < t.
Shape().size(); ++i) {
707 array_interface[
"shape"] =
Array{shape};
708 array_interface[
"strides"] =
Array{stride};
709 array_interface[
"version"] = 3;
711 char constexpr kT = detail::ArrayInterfaceHandler::TypeChar<T>();
712 static_assert(kT !=
'\0');
713 if (DMLC_LITTLE_ENDIAN) {
714 array_interface[
"typestr"] =
String{
"<" + (kT + std::to_string(
sizeof(T)))};
716 array_interface[
"typestr"] =
String{
">" + (kT + std::to_string(
sizeof(T)))};
718 return array_interface;
724 template <
typename T,
int32_t D>
728 res[
"data"][1] =
Boolean{
false};
735 template <
typename T,
int32_t D>
742 template <
typename T,
int32_t D>
749 template <
typename T>
761 template <
typename T,
int32_t kDim = 5>
772 template <
typename I, std::
int32_t D>
773 void Initialize(I
const (&shape)[D],
DeviceOrd device) {
774 static_assert(D <= kDim,
"Invalid shape.");
775 std::copy(shape, shape + D, shape_);
776 for (
auto i = D; i < kDim; ++i) {
795 template <
typename I,
int32_t D>
797 :
Tensor{common::
Span<I const, D>{shape}, device, order} {}
799 template <
typename I,
size_t D>
803 std::copy(shape.
data(), shape.
data() + D, shape_);
804 for (
auto i = D; i < kDim; ++i) {
819 template <
typename It,
typename I,
int32_t D>
823 h_vec.insert(h_vec.begin(), begin, end);
825 this->Initialize(shape, device);
828 template <
typename I,
int32_t D>
835 this->Initialize(shape, device);
841 template <
typename... Index>
843 return this->HostView()(std::forward<Index>(idx)...);
849 template <
typename... Index>
851 return this->HostView()(std::forward<Index>(idx)...);
858 if (device.
IsCPU()) {
868 if (device.
IsCPU()) {
881 [[nodiscard]] std::size_t
Size()
const {
return data_.
Size(); }
882 [[nodiscard]]
bool Empty()
const {
return Size() == 0; }
885 auto Shape(
size_t i)
const {
return shape_[i]; }
896 template <
typename Fn>
900 <<
"Inconsistent size after modification.";
910 static_assert(
sizeof...(S) <= kDim,
"Invalid shape.");
911 detail::ReshapeImpl<0>(shape_, std::forward<S>(s)...);
912 auto constexpr kEnd =
sizeof...(S);
913 static_assert(kEnd <= kDim,
"Invalid shape.");
914 std::fill(shape_ + kEnd, shape_ + kDim, 1);
926 static_assert(D <= kDim,
"Invalid shape.");
927 std::copy(shape.
data(), shape.
data() + D, this->shape_);
928 std::fill(shape_ + D, shape_ + kDim, 1);
940 template <
typename... S>
942 return this->HostView().Slice(std::forward<S>(slices)...);
947 template <
typename... S>
949 return this->HostView().Slice(std::forward<S>(slices)...);
959 template <
typename T>
962 template <
typename T>
968 template <
typename T,
typename... Index>
970 Tensor<T,
sizeof...(Index)> t;
979 template <
typename T,
typename... Index>
981 Tensor<T,
sizeof...(Index)> t;
984 t.Data()->Fill(std::move(v));
991 template <
typename T,
typename... Index>
993 return Constant(ctx,
static_cast<T
>(0), index...);
997 template <
typename T,
int32_t D>
1003 for (
size_t i = 1; i < D; ++i) {
1004 if (shape[i] == 0) {
1005 shape[i] = r.
Shape(i);
1007 CHECK_EQ(shape[i], r.
Shape(i));
1016 #if defined(LINALG_HD)
Defines configuration macros and basic types for xgboost.
Definition: host_device_vector.h:87
const T * ConstDevicePointer() const
void Extend(const HostDeviceVector< T > &other)
common::Span< T const > ConstHostSpan() const
Definition: host_device_vector.h:115
std::vector< T > & HostVector()
common::Span< const T > ConstDeviceSpan() const
T * HostPointer()
Definition: host_device_vector.h:112
common::Span< T > DeviceSpan()
common::Span< T > HostSpan()
Definition: host_device_vector.h:113
void SetDevice(DeviceOrd device) const
void Resize(std::size_t new_size)
const T * ConstHostPointer() const
Definition: host_device_vector.h:116
Describes both true and false.
Definition: json.h:336
Data structure representing JSON format.
Definition: json.h:378
static void Dump(Json json, std::string *out, std::ios::openmode mode=std::ios::out)
Encode the JSON object. Optional parameter mode for choosing between text and binary (ubjson) output.
span class implementation, based on ISO++20 span<T>. The interface should be the same.
Definition: span.h:431
constexpr XGBOOST_DEVICE pointer data() const __span_noexcept
Definition: span.h:550
XGBOOST_DEVICE auto subspan() const -> Span< element_type, detail::ExtentValue< Extent, Offset, Count >::value >
Definition: span.h:597
constexpr XGBOOST_DEVICE index_type size() const __span_noexcept
Definition: span.h:555
constexpr XGBOOST_DEVICE bool empty() const __span_noexcept
Definition: span.h:562
A tensor view with static type and dimension. It implements indexing and slicing.
Definition: linalg.h:294
LINALG_HD std::size_t Size() const
Number of items in the tensor.
Definition: linalg.h:537
LINALG_HD TensorView(common::Span< T > data, I const (&shape)[D], DeviceOrd device)
Create a tensor with data and shape.
Definition: linalg.h:408
std::remove_cv_t< T > value_type
Definition: linalg.h:300
T element_type
Definition: linalg.h:299
LINALG_HD bool CContiguous() const
Whether it's a c-contiguous array.
Definition: linalg.h:548
LINALG_HD auto Stride(size_t i) const
Definition: linalg.h:532
LINALG_HD TensorView(common::Span< T > data, I const (&shape)[D], DeviceOrd device, Order order)
Definition: linalg.h:412
LINALG_HD auto Shape() const
Definition: linalg.h:523
ShapeT StrideT
Definition: linalg.h:297
constexpr static size_t kDimension
Definition: linalg.h:393
LINALG_HD auto Stride() const
Definition: linalg.h:528
LINALG_HD auto Slice(S &&...slices) const
Slice the tensor. The returned tensor has inferred dim and shape. Scalar result is not supported.
Definition: linalg.h:510
LINALG_HD auto Values() const -> decltype(data_) const &
Obtain a reference to the raw data.
Definition: linalg.h:568
LINALG_HD bool Contiguous() const
Whether this is a contiguous array, both C and F contiguous returns true.
Definition: linalg.h:542
bool Empty() const
Definition: linalg.h:538
LINALG_HD T const & operator()(Index &&...index) const
Index the tensor to obtain a scalar value.
Definition: linalg.h:489
LINALG_HD TensorView(TensorView< U, kDim > const &that)
Definition: linalg.h:457
LINALG_HD T & operator()(Index &&...index)
Index the tensor to obtain a scalar value.
Definition: linalg.h:479
constexpr static size_t kValueSize
Definition: linalg.h:392
LINALG_HD bool FContiguous() const
Whether it's a f-contiguous array.
Definition: linalg.h:558
LINALG_HD auto Shape(size_t i) const
Definition: linalg.h:527
LINALG_HD TensorView(common::Span< T > data, I const (&shape)[D], I const (&stride)[D], DeviceOrd device)
Create a tensor with data, shape and strides. Don't use this constructor if stride can be calculated ...
Definition: linalg.h:443
std::size_t[kDim] ShapeT
Definition: linalg.h:296
LINALG_HD auto Device() const
Obtain the CUDA device ordinal.
Definition: linalg.h:572
A tensor storage. To use it for other functionality like slicing one needs to obtain a view first....
Definition: linalg.h:762
auto Slice(S &&...slices)
Get a host view on the slice.
Definition: linalg.h:948
bool Empty() const
Definition: linalg.h:882
Tensor(It begin, It end, I const (&shape)[D], DeviceOrd device, Order order=kC)
Definition: linalg.h:820
auto Slice(S &&...slices) const
Get a host view on the slice.
Definition: linalg.h:941
HostDeviceVector< T > const * Data() const
Definition: linalg.h:888
void Reshape(size_t(&shape)[D])
Definition: linalg.h:934
auto View(DeviceOrd device) const
Definition: linalg.h:867
auto HostView()
Definition: linalg.h:878
auto Shape(size_t i) const
Definition: linalg.h:885
HostDeviceVector< T > * Data()
Definition: linalg.h:887
T & operator()(Index &&...idx)
Index operator. Not thread safe, should not be used in performance critical region....
Definition: linalg.h:842
auto View(DeviceOrd device)
Get a TensorView for this tensor.
Definition: linalg.h:857
Tensor(common::Span< I const, D > shape, DeviceOrd device, Order order=kC)
Definition: linalg.h:800
auto Shape() const
Definition: linalg.h:884
void ModifyInplace(Fn &&fn)
Visitor function for modification that changes shape and data.
Definition: linalg.h:897
void SetDevice(DeviceOrd device) const
Set device ordinal for this tensor.
Definition: linalg.h:955
Tensor(std::initializer_list< T > data, I const (&shape)[D], DeviceOrd device, Order order=kC)
Definition: linalg.h:829
void Reshape(common::Span< size_t const, D > shape)
Reshape the tensor.
Definition: linalg.h:925
DeviceOrd Device() const
Definition: linalg.h:956
auto HostView() const
Definition: linalg.h:879
Tensor(I const (&shape)[D], DeviceOrd device, Order order=kC)
Create a tensor with shape and device ordinal. The storage is initialized automatically.
Definition: linalg.h:796
T const & operator()(Index &&...idx) const
Index operator. Not thread safe, should not be used in performance critical region....
Definition: linalg.h:850
std::size_t[kDim] ShapeT
Definition: linalg.h:764
void Reshape(S &&...s)
Reshape the tensor.
Definition: linalg.h:909
ShapeT StrideT
Definition: linalg.h:765
std::size_t Size() const
Definition: linalg.h:881
A device-and-host vector abstraction layer.
#define LINALG_HD
Definition: linalg.h:36
Definition: intrusive_ptr.h:207
constexpr std::size_t dynamic_extent
Definition: span.h:150
Span(std::vector< T > const &) -> Span< T const >
std::conditional_t< std::is_integral_v< RemoveCRType< S > >, IntTag, S > IndexToTag
Definition: linalg.h:117
LINALG_HD auto UnravelImpl(I idx, common::Span< size_t const, D > shape)
Definition: linalg.h:194
void ReshapeImpl(size_t(&out_shape)[D], I s)
Definition: linalg.h:215
LINALG_HD int Popc(uint32_t v)
Definition: linalg.h:136
std::remove_const_t< std::remove_reference_t< S > > RemoveCRType
Definition: linalg.h:114
constexpr int32_t CalcSliceDim()
Calculate the dimension of sliced tensor.
Definition: linalg.h:95
constexpr LINALG_HD auto UnrollLoop(Fn fn)
Definition: linalg.h:120
constexpr size_t Offset(S(&strides)[D], size_t n, Head head)
Definition: linalg.h:53
decltype(auto) constexpr LINALG_HD Apply(Fn &&f, Tup &&t, std::index_sequence< I... >)
Definition: linalg.h:229
LINALG_HD void IndexToArr(std::size_t(&arr)[D], Head head)
Definition: linalg.h:161
constexpr void CalcStride(size_t const (&shape)[D], size_t(&stride)[D])
Definition: linalg.h:66
constexpr auto ArrToTuple(T(&arr)[N], std::index_sequence< Idx... >)
Definition: linalg.h:178
int32_t NativePopc(T v)
Definition: linalg.h:130
std::enable_if_t< IsAllIntegral< Index... >::value > EnableIfIntegral
Definition: linalg.h:260
constexpr size_t CalcSize(size_t(&shape)[D])
Definition: linalg.h:105
constexpr detail::RangeTag< I > Range(I beg, I end)
Specify a range of elements in the axis for slicing.
Definition: linalg.h:271
auto Make1dInterface(T const *vec, std::size_t len)
Definition: linalg.h:750
auto MakeTensorView(Context const *ctx, Container &data, S &&...shape)
Constructor for automatic type deduction.
Definition: linalg.h:581
auto ArrayInterfaceStr(TensorView< T const, D > const &t)
Return string representation of array interface.
Definition: linalg.h:736
auto MakeVec(T *ptr, size_t s, DeviceOrd device=DeviceOrd::CPU())
Create a vector view from contigious memory.
Definition: linalg.h:661
LINALG_HD auto UnravelIndex(size_t idx, common::Span< size_t const, D > shape)
Turns linear index into multi-dimension index. Similar to numpy unravel.
Definition: linalg.h:625
void Stack(Tensor< T, D > *l, Tensor< T, D > const &r)
Definition: linalg.h:998
auto Constant(Context const *ctx, T v, Index &&...index)
Create an array with value v.
Definition: linalg.h:980
auto Zeros(Context const *ctx, Index &&...index)
Like np.zeros, return a new array of given shape and type, filled with zeros.
Definition: linalg.h:992
auto Empty(Context const *ctx, Index &&...index)
Create an array without initialization.
Definition: linalg.h:969
constexpr detail::AllTag All()
Specify all elements in the axis for slicing.
Definition: linalg.h:266
Json ArrayInterface(TensorView< T const, D > const &t)
Array Interface defined by numpy.
Definition: linalg.h:692
Order
Definition: linalg.h:275
@ kC
Definition: linalg.h:276
@ kF
Definition: linalg.h:277
JsonInteger Integer
Definition: json.h:607
#define SPAN_CHECK(cond)
Definition: span.h:127
Runtime context for XGBoost. Contains information like threads and device.
Definition: context.h:133
DeviceOrd Device() const
Get the current device and ordinal.
Definition: context.h:208
bool IsCPU() const
Is XGBoost running on CPU?
Definition: context.h:181
A type for device ordinal. The type is packed into 32-bit for efficient use in viewing types like lin...
Definition: context.h:34
bool IsCUDA() const
Definition: context.h:44
bool IsCPU() const
Definition: context.h:45
constexpr static auto CPU()
Constructor for CPU.
Definition: context.h:64
static constexpr char TypeChar()
Definition: linalg.h:45
constexpr size_t Size() const
Definition: linalg.h:88
I end
Definition: linalg.h:87
I beg
Definition: linalg.h:86