xgboost
hist_util.h
Go to the documentation of this file.
1 
7 #ifndef XGBOOST_COMMON_HIST_UTIL_H_
8 #define XGBOOST_COMMON_HIST_UTIL_H_
9 
10 #include <xgboost/data.h>
12 #include <limits>
13 #include <vector>
14 #include <algorithm>
15 #include <memory>
16 #include <utility>
17 #include <map>
18 
19 #include "row_set.h"
20 #include "threading_utils.h"
21 #include "../tree/param.h"
22 #include "./quantile.h"
23 #include "./timer.h"
24 #include "../include/rabit/rabit.h"
25 
26 namespace xgboost {
27 namespace common {
28 
29 /*
30  * \brief A thin wrapper around dynamically allocated C-style array.
31  * Make sure to call resize() before use.
32  */
33 template<typename T>
34 struct SimpleArray {
36  std::free(ptr_);
37  ptr_ = nullptr;
38  }
39 
40  void resize(size_t n) {
41  T* ptr = static_cast<T*>(std::malloc(n * sizeof(T)));
42  CHECK(ptr) << "Failed to allocate memory";
43  if (ptr_) {
44  std::memcpy(ptr, ptr_, n_ * sizeof(T));
45  std::free(ptr_);
46  }
47  ptr_ = ptr;
48  n_ = n;
49  }
50 
51  T& operator[](size_t idx) {
52  return ptr_[idx];
53  }
54 
55  T& operator[](size_t idx) const {
56  return ptr_[idx];
57  }
58 
59  size_t size() const {
60  return n_;
61  }
62 
63  T back() const {
64  return ptr_[n_-1];
65  }
66 
67  T* data() {
68  return ptr_;
69  }
70 
71  const T* data() const {
72  return ptr_;
73  }
74 
75 
76  T* begin() {
77  return ptr_;
78  }
79 
80  const T* begin() const {
81  return ptr_;
82  }
83 
84  T* end() {
85  return ptr_ + n_;
86  }
87 
88  const T* end() const {
89  return ptr_ + n_;
90  }
91 
92  private:
93  T* ptr_ = nullptr;
94  size_t n_ = 0;
95 };
96 
102 
103 // A CSC matrix representing histogram cuts, used in CPU quantile hist.
105  // Using friends to avoid creating a virtual class, since HistogramCuts is used as value
106  // object in many places.
107  friend class SparseCuts;
108  friend class DenseCuts;
109  friend class CutsBuilder;
110 
111  protected:
112  using BinIdx = uint32_t;
114 
115  std::vector<bst_float> cut_values_;
116  std::vector<uint32_t> cut_ptrs_;
117  std::vector<float> min_vals_; // storing minimum value in a sketch set.
118 
119  public:
120  HistogramCuts();
121  HistogramCuts(HistogramCuts const& that) = delete;
122  HistogramCuts(HistogramCuts&& that) noexcept(true) {
123  *this = std::forward<HistogramCuts&&>(that);
124  }
125  HistogramCuts& operator=(HistogramCuts const& that) = delete;
126  HistogramCuts& operator=(HistogramCuts&& that) noexcept(true) {
127  monitor_ = std::move(that.monitor_);
128  cut_ptrs_ = std::move(that.cut_ptrs_);
129  cut_values_ = std::move(that.cut_values_);
130  min_vals_ = std::move(that.min_vals_);
131  return *this;
132  }
133 
134  /* \brief Build histogram cuts. */
135  void Build(DMatrix* dmat, uint32_t const max_num_bins);
136  /* \brief How many bins a feature has. */
137  uint32_t FeatureBins(uint32_t feature) const {
138  return cut_ptrs_.at(feature+1) - cut_ptrs_[feature];
139  }
140 
141  // Getters. Cuts should be of no use after building histogram indices, but currently
142  // it's deeply linked with quantile_hist, gpu sketcher and gpu_hist. So we preserve
143  // these for now.
144  std::vector<uint32_t> const& Ptrs() const { return cut_ptrs_; }
145  std::vector<float> const& Values() const { return cut_values_; }
146  std::vector<float> const& MinValues() const { return min_vals_; }
147 
148  size_t TotalBins() const { return cut_ptrs_.back(); }
149 
150  BinIdx SearchBin(float value, uint32_t column_id) {
151  auto beg = cut_ptrs_.at(column_id);
152  auto end = cut_ptrs_.at(column_id + 1);
153  auto it = std::upper_bound(cut_values_.cbegin() + beg, cut_values_.cbegin() + end, value);
154  if (it == cut_values_.cend()) {
155  it = cut_values_.cend() - 1;
156  }
157  BinIdx idx = it - cut_values_.cbegin();
158  return idx;
159  }
160 
161  BinIdx SearchBin(Entry const& e) {
162  return SearchBin(e.fvalue, e.index);
163  }
164 };
165 
166 /* \brief An interface for building quantile cuts.
167  *
168  * `DenseCuts' always assumes there are `max_bins` for each feature, which makes it not
169  * suitable for sparse dataset. On the other hand `SparseCuts' uses `GetColumnBatches',
170  * which doubles the memory usage, hence can not be applied to dense dataset.
171  */
172 class CutsBuilder {
173  public:
175 
176  protected:
178  /* \brief return whether group for ranking is used. */
179  static bool UseGroup(DMatrix* dmat);
180 
181  public:
182  explicit CutsBuilder(HistogramCuts* p_cuts) : p_cuts_{p_cuts} {}
183  virtual ~CutsBuilder() = default;
184 
185  static uint32_t SearchGroupIndFromRow(
186  std::vector<bst_uint> const& group_ptr, size_t const base_rowid) {
187  using KIt = std::vector<bst_uint>::const_iterator;
188  KIt res = std::lower_bound(group_ptr.cbegin(), group_ptr.cend() - 1, base_rowid);
189  // Cannot use CHECK_NE because it will try to print the iterator.
190  bool const found = res != group_ptr.cend() - 1;
191  if (!found) {
192  LOG(FATAL) << "Row " << base_rowid << " does not lie in any group!";
193  }
194  uint32_t group_ind = std::distance(group_ptr.cbegin(), res);
195  return group_ind;
196  }
197 
198  void AddCutPoint(WXQSketch::SummaryContainer const& summary) {
199  if (summary.size > 1 && summary.size <= 16) {
200  /* specialized code categorial / ordinal data -- use midpoints */
201  for (size_t i = 1; i < summary.size; ++i) {
202  bst_float cpt = (summary.data[i].value + summary.data[i - 1].value) / 2.0f;
203  if (i == 1 || cpt > p_cuts_->cut_values_.back()) {
204  p_cuts_->cut_values_.push_back(cpt);
205  }
206  }
207  } else {
208  for (size_t i = 2; i < summary.size; ++i) {
209  bst_float cpt = summary.data[i - 1].value;
210  if (i == 2 || cpt > p_cuts_->cut_values_.back()) {
211  p_cuts_->cut_values_.push_back(cpt);
212  }
213  }
214  }
215  }
216 
217  /* \brief Build histogram indices. */
218  virtual void Build(DMatrix* dmat, uint32_t const max_num_bins) = 0;
219 };
220 
222 class SparseCuts : public CutsBuilder {
223  /* \brief Distrbute columns to each thread according to number of entries. */
224  static std::vector<size_t> LoadBalance(SparsePage const& page, size_t const nthreads);
225  Monitor monitor_;
226 
227  public:
228  explicit SparseCuts(HistogramCuts* container) :
229  CutsBuilder(container) {
230  monitor_.Init(__FUNCTION__);
231  }
232 
233  /* \brief Concatonate the built cuts in each thread. */
234  void Concat(std::vector<std::unique_ptr<SparseCuts>> const& cuts, uint32_t n_cols);
235  /* \brief Build histogram indices in single thread. */
236  void SingleThreadBuild(SparsePage const& page, MetaInfo const& info,
237  uint32_t max_num_bins,
238  bool const use_group_ind,
239  uint32_t beg, uint32_t end, uint32_t thread_id);
240  void Build(DMatrix* dmat, uint32_t const max_num_bins) override;
241 };
242 
244 class DenseCuts : public CutsBuilder {
245  protected:
247 
248  public:
249  explicit DenseCuts(HistogramCuts* container) :
250  CutsBuilder(container) {
251  monitor_.Init(__FUNCTION__);
252  }
253  void Init(std::vector<WXQSketch>* sketchs, uint32_t max_num_bins);
254  void Build(DMatrix* p_fmat, uint32_t max_num_bins) override;
255 };
256 
257 // FIXME(trivialfis): Merge this into generic cut builder.
262 size_t DeviceSketch(int device,
263  int max_bin,
264  int gpu_batch_nrows,
265  DMatrix* dmat,
266  HistogramCuts* hmat);
267 
275  std::vector<size_t> row_ptr;
277  std::vector<uint32_t> index;
279  std::vector<size_t> hit_count;
282  // Create a global histogram matrix, given cut
283  void Init(DMatrix* p_fmat, int max_num_bins);
284  // get i-th row
285  inline GHistIndexRow operator[](size_t i) const {
286  return {&index[0] + row_ptr[i],
287  static_cast<GHistIndexRow::index_type>(
288  row_ptr[i + 1] - row_ptr[i])};
289  }
290  inline void GetFeatureCounts(size_t* counts) const {
291  auto nfeature = cut.Ptrs().size() - 1;
292  for (unsigned fid = 0; fid < nfeature; ++fid) {
293  auto ibegin = cut.Ptrs()[fid];
294  auto iend = cut.Ptrs()[fid + 1];
295  for (auto i = ibegin; i < iend; ++i) {
296  counts[fid] += hit_count[i];
297  }
298  }
299  }
300 
301  private:
302  std::vector<size_t> hit_count_tloc_;
303 };
304 
306  const size_t* row_ptr;
307  const uint32_t* index;
308 
309  inline GHistIndexBlock(const size_t* row_ptr, const uint32_t* index)
310  : row_ptr(row_ptr), index(index) {}
311 
312  // get i-th row
313  inline GHistIndexRow operator[](size_t i) const {
314  return {&index[0] + row_ptr[i], row_ptr[i + 1] - row_ptr[i]};
315  }
316 };
317 
318 class ColumnMatrix;
319 
321  public:
322  void Init(const GHistIndexMatrix& gmat,
323  const ColumnMatrix& colmat,
324  const tree::TrainParam& param);
325 
326  inline GHistIndexBlock operator[](size_t i) const {
327  return {blocks_[i].row_ptr_begin, blocks_[i].index_begin};
328  }
329 
330  inline size_t GetNumBlock() const {
331  return blocks_.size();
332  }
333 
334  private:
335  std::vector<size_t> row_ptr_;
336  std::vector<uint32_t> index_;
337  const HistogramCuts* cut_;
338  struct Block {
339  const size_t* row_ptr_begin;
340  const size_t* row_ptr_end;
341  const uint32_t* index_begin;
342  const uint32_t* index_end;
343  };
344  std::vector<Block> blocks_;
345 };
346 
354 
358 void InitilizeHistByZeroes(GHistRow hist, size_t begin, size_t end);
359 
363 void IncrementHist(GHistRow dst, const GHistRow add, size_t begin, size_t end);
364 
368 void CopyHist(GHistRow dst, const GHistRow src, size_t begin, size_t end);
369 
373 void SubtractionHist(GHistRow dst, const GHistRow src1, const GHistRow src2,
374  size_t begin, size_t end);
375 
380  public:
381  // access histogram for i-th node
383  constexpr uint32_t kMax = std::numeric_limits<uint32_t>::max();
384  CHECK_NE(row_ptr_[nid], kMax);
385  tree::GradStats* ptr =
386  const_cast<tree::GradStats*>(dmlc::BeginPtr(data_) + row_ptr_[nid]);
387  return {ptr, nbins_};
388  }
389 
390  // have we computed a histogram for i-th node?
391  bool RowExists(bst_uint nid) const {
392  const uint32_t k_max = std::numeric_limits<uint32_t>::max();
393  return (nid < row_ptr_.size() && row_ptr_[nid] != k_max);
394  }
395 
396  // initialize histogram collection
397  void Init(uint32_t nbins) {
398  if (nbins_ != nbins) {
399  nbins_ = nbins;
400  // quite expensive operation, so let's do this only once
401  data_.clear();
402  }
403  row_ptr_.clear();
404  n_nodes_added_ = 0;
405  }
406 
407  // create an empty histogram for i-th node
408  void AddHistRow(bst_uint nid) {
409  constexpr uint32_t kMax = std::numeric_limits<uint32_t>::max();
410  if (nid >= row_ptr_.size()) {
411  row_ptr_.resize(nid + 1, kMax);
412  }
413  CHECK_EQ(row_ptr_[nid], kMax);
414 
415  if (data_.size() < nbins_ * (nid + 1)) {
416  data_.resize(nbins_ * (nid + 1));
417  }
418 
419  row_ptr_[nid] = nbins_ * n_nodes_added_;
420  n_nodes_added_++;
421  }
422 
423  private:
425  uint32_t nbins_ = 0;
427  uint32_t n_nodes_added_ = 0;
428 
429  std::vector<tree::GradStats> data_;
430 
432  std::vector<size_t> row_ptr_;
433 };
434 
441  public:
442  void Init(size_t nbins) {
443  if (nbins != nbins_) {
444  hist_buffer_.Init(nbins);
445  nbins_ = nbins;
446  }
447  }
448 
449  // Add new elements if needed, mark all hists as unused
450  // targeted_hists - already allocated hists which should contain final results after Reduce() call
451  void Reset(size_t nthreads, size_t nodes, const BlockedSpace2d& space,
452  const std::vector<GHistRow>& targeted_hists) {
453  hist_buffer_.Init(nbins_);
454  tid_nid_to_hist_.clear();
455  hist_memory_.clear();
456  threads_to_nids_map_.clear();
457 
458  targeted_hists_ = targeted_hists;
459 
460  CHECK_EQ(nodes, targeted_hists.size());
461 
462  nodes_ = nodes;
463  nthreads_ = nthreads;
464 
465  MatchThreadsToNodes(space);
466  AllocateAdditionalHistograms();
467  MatchNodeNidPairToHist();
468 
469  hist_was_used_.resize(nthreads * nodes_);
470  std::fill(hist_was_used_.begin(), hist_was_used_.end(), static_cast<int>(false));
471  }
472 
473  // Get specified hist, initialize hist by zeros if it wasn't used before
474  GHistRow GetInitializedHist(size_t tid, size_t nid) {
475  CHECK_LT(nid, nodes_);
476  CHECK_LT(tid, nthreads_);
477 
478  size_t idx = tid_nid_to_hist_.at({tid, nid});
479  GHistRow hist = hist_memory_[idx];
480 
481  if (!hist_was_used_[tid * nodes_ + nid]) {
482  InitilizeHistByZeroes(hist, 0, hist.size());
483  hist_was_used_[tid * nodes_ + nid] = static_cast<int>(true);
484  }
485 
486  return hist;
487  }
488 
489  // Reduce following bins (begin, end] for nid-node in dst across threads
490  void ReduceHist(size_t nid, size_t begin, size_t end) {
491  CHECK_GT(end, begin);
492  CHECK_LT(nid, nodes_);
493 
494  GHistRow dst = targeted_hists_[nid];
495 
496  bool is_updated = false;
497  for (size_t tid = 0; tid < nthreads_; ++tid) {
498  if (hist_was_used_[tid * nodes_ + nid]) {
499  is_updated = true;
500  const size_t idx = tid_nid_to_hist_.at({tid, nid});
501  GHistRow src = hist_memory_[idx];
502 
503  if (dst.data() != src.data()) {
504  IncrementHist(dst, src, begin, end);
505  }
506  }
507  }
508  if (!is_updated) {
509  // In distributed mode - some tree nodes can be empty on local machines,
510  // So we need just set local hist by zeros in this case
511  InitilizeHistByZeroes(dst, begin, end);
512  }
513  }
514 
515  protected:
516  void MatchThreadsToNodes(const BlockedSpace2d& space) {
517  const size_t space_size = space.Size();
518  const size_t chunck_size = space_size / nthreads_ + !!(space_size % nthreads_);
519 
520  threads_to_nids_map_.resize(nthreads_ * nodes_, false);
521 
522  for (size_t tid = 0; tid < nthreads_; ++tid) {
523  size_t begin = chunck_size * tid;
524  size_t end = std::min(begin + chunck_size, space_size);
525 
526  if (begin < space_size) {
527  size_t nid_begin = space.GetFirstDimension(begin);
528  size_t nid_end = space.GetFirstDimension(end-1);
529 
530  for (size_t nid = nid_begin; nid <= nid_end; ++nid) {
531  // true - means thread 'tid' will work to compute partial hist for node 'nid'
532  threads_to_nids_map_[tid * nodes_ + nid] = true;
533  }
534  }
535  }
536  }
537 
539  size_t hist_allocated_additionally = 0;
540 
541  for (size_t nid = 0; nid < nodes_; ++nid) {
542  int nthreads_for_nid = 0;
543 
544  for (size_t tid = 0; tid < nthreads_; ++tid) {
545  if (threads_to_nids_map_[tid * nodes_ + nid]) {
546  nthreads_for_nid++;
547  }
548  }
549 
550  // In distributed mode - some tree nodes can be empty on local machines,
551  // set nthreads_for_nid to 0 in this case.
552  // In another case - allocate additional (nthreads_for_nid - 1) histograms,
553  // because one is already allocated externally (will store final result for the node).
554  hist_allocated_additionally += std::max<int>(0, nthreads_for_nid - 1);
555  }
556 
557  for (size_t i = 0; i < hist_allocated_additionally; ++i) {
558  hist_buffer_.AddHistRow(i);
559  }
560  }
561 
563  size_t hist_total = 0;
564  size_t hist_allocated_additionally = 0;
565 
566  for (size_t nid = 0; nid < nodes_; ++nid) {
567  bool first_hist = true;
568  for (size_t tid = 0; tid < nthreads_; ++tid) {
569  if (threads_to_nids_map_[tid * nodes_ + nid]) {
570  if (first_hist) {
571  hist_memory_.push_back(targeted_hists_[nid]);
572  first_hist = false;
573  } else {
574  hist_memory_.push_back(hist_buffer_[hist_allocated_additionally]);
575  hist_allocated_additionally++;
576  }
577  // map pair {tid, nid} to index of allocated histogram from hist_memory_
578  tid_nid_to_hist_[{tid, nid}] = hist_total++;
579  CHECK_EQ(hist_total, hist_memory_.size());
580  }
581  }
582  }
583  }
584 
586  size_t nbins_ = 0;
588  size_t nthreads_ = 0;
590  size_t nodes_ = 0;
598  std::vector<int> hist_was_used_;
599 
601  std::vector<bool> threads_to_nids_map_;
603  std::vector<GHistRow> targeted_hists_;
605  std::vector<GHistRow> hist_memory_;
607  std::map<std::pair<size_t, size_t>, size_t> tid_nid_to_hist_;
608 };
609 
614  public:
615  // initialize builder
616  inline void Init(size_t nthread, uint32_t nbins) {
617  nthread_ = nthread;
618  nbins_ = nbins;
619  }
620 
621  // construct a histogram via histogram aggregation
622  void BuildHist(const std::vector<GradientPair>& gpair,
623  const RowSetCollection::Elem row_indices,
624  const GHistIndexMatrix& gmat,
625  GHistRow hist);
626  // same, with feature grouping
627  void BuildBlockHist(const std::vector<GradientPair>& gpair,
628  const RowSetCollection::Elem row_indices,
629  const GHistIndexBlockMatrix& gmatb,
630  GHistRow hist);
631  // construct a histogram via subtraction trick
632  void SubtractionTrick(GHistRow self, GHistRow sibling, GHistRow parent);
633 
634  uint32_t GetNumBins() {
635  return nbins_;
636  }
637 
638  private:
640  size_t nthread_;
642  uint32_t nbins_;
643 };
644 
645 
646 } // namespace common
647 } // namespace xgboost
648 #endif // XGBOOST_COMMON_HIST_UTIL_H_
void InitilizeHistByZeroes(GHistRow hist, size_t begin, size_t end)
fill a histogram by zeros
void Init(uint32_t nbins)
Definition: hist_util.h:397
void ReduceHist(size_t nid, size_t begin, size_t end)
Definition: hist_util.h:490
DenseCuts(HistogramCuts *container)
Definition: hist_util.h:249
std::vector< float > min_vals_
Definition: hist_util.h:117
void AddCutPoint(WXQSketch::SummaryContainer const &summary)
Definition: hist_util.h:198
float bst_float
float type, used for storing statistics
Definition: base.h:111
std::vector< uint32_t > cut_ptrs_
Definition: hist_util.h:116
XGBOOST_DEVICE constexpr index_type size() const __span_noexcept
Definition: span.h:521
T * end()
Definition: hist_util.h:84
T back() const
Definition: hist_util.h:63
HistCollection hist_buffer_
Buffer for additional histograms for Parallel processing.
Definition: hist_util.h:592
size_t GetNumBlock() const
Definition: hist_util.h:330
void IncrementHist(GHistRow dst, const GHistRow add, size_t begin, size_t end)
Increment hist as dst += add in range [begin, end)
uint32_t FeatureBins(uint32_t feature) const
Definition: hist_util.h:137
Definition: hist_util.h:305
static uint32_t SearchGroupIndFromRow(std::vector< bst_uint > const &group_ptr, size_t const base_rowid)
Definition: hist_util.h:185
void CopyHist(GHistRow dst, const GHistRow src, size_t begin, size_t end)
Copy hist from src to dst in range [begin, end)
void AddHistRow(bst_uint nid)
Definition: hist_util.h:408
std::size_t index_type
Definition: span.h:394
Meta information about dataset, always sit in memory.
Definition: data.h:39
std::vector< bool > threads_to_nids_map_
Buffer for additional histograms for Parallel processing.
Definition: hist_util.h:601
std::vector< uint32_t > const & Ptrs() const
Definition: hist_util.h:144
util to compute quantiles
The input data structure of xgboost.
BinIdx SearchBin(Entry const &e)
Definition: hist_util.h:161
Definition: hist_util.h:34
HistogramCuts cut
The corresponding cuts.
Definition: hist_util.h:281
void MatchNodeNidPairToHist()
Definition: hist_util.h:562
Internal data structured used by XGBoost during training. There are two ways to create a customized D...
Definition: data.h:428
size_t GetFirstDimension(size_t i) const
Definition: threading_utils.h:87
Cut configuration for dense dataset.
Definition: hist_util.h:244
std::vector< uint32_t > index
The index data.
Definition: hist_util.h:277
In-memory storage unit of sparse batch, stored in CSR format.
Definition: data.h:188
const T * begin() const
Definition: hist_util.h:80
HistogramCuts & operator=(HistogramCuts &&that) noexcept(true)
Definition: hist_util.h:126
Definition: hist_util.h:172
std::vector< size_t > hit_count
hit count of each index
Definition: hist_util.h:279
~SimpleArray()
Definition: hist_util.h:35
Quantile sketch use WXQSummary.
Definition: quantile.h:839
span class implementation, based on ISO++20 span<T>. The interface should be the same.
Definition: span.h:115
T * data()
Definition: hist_util.h:67
builder for histograms of gradient statistics
Definition: hist_util.h:613
GHistIndexRow operator[](size_t i) const
Definition: hist_util.h:285
std::vector< float > const & MinValues() const
Definition: hist_util.h:146
std::vector< bst_float > cut_values_
Definition: hist_util.h:115
GHistIndexBlock operator[](size_t i) const
Definition: hist_util.h:326
XGBOOST_DEVICE constexpr pointer data() const __span_noexcept
Definition: span.h:516
Quick Utility to compute subset of rows.
GHistRow GetInitializedHist(size_t tid, size_t nid)
Definition: hist_util.h:474
void Init(std::string label)
Definition: timer.h:82
size_t TotalBins() const
Definition: hist_util.h:148
HistogramCuts(HistogramCuts &&that) noexcept(true)
Definition: hist_util.h:122
size_t size() const
Definition: hist_util.h:59
Cut configuration for sparse dataset.
Definition: hist_util.h:222
const size_t * row_ptr
Definition: hist_util.h:306
void resize(size_t n)
Definition: hist_util.h:40
const T * end() const
Definition: hist_util.h:88
size_t Size() const
Definition: threading_utils.h:82
histogram of gradient statistics for multiple nodes
Definition: hist_util.h:379
void Reset(size_t nthreads, size_t nodes, const BlockedSpace2d &space, const std::vector< GHistRow > &targeted_hists)
Definition: hist_util.h:451
void MatchThreadsToNodes(const BlockedSpace2d &space)
Definition: hist_util.h:516
Definition: hist_util.h:320
HistogramCuts * p_cuts_
Definition: hist_util.h:177
std::vector< int > hist_was_used_
Marks which hists were used, it means that they should be merged. Contains only {true or false} value...
Definition: hist_util.h:598
GHistRow operator[](bst_uint nid) const
Definition: hist_util.h:382
T * begin()
Definition: hist_util.h:76
common::Monitor monitor_
Definition: hist_util.h:113
size_t DeviceSketch(int device, int max_bin, int gpu_batch_nrows, DMatrix *dmat, HistogramCuts *hmat)
Builds the cut matrix on the GPU.
GHistIndexRow operator[](size_t i) const
Definition: hist_util.h:313
a collection of columns, with support for construction from GHistIndexMatrix.
Definition: column_matrix.h:64
const T * data() const
Definition: hist_util.h:71
SparseCuts(HistogramCuts *container)
Definition: hist_util.h:228
uint32_t BinIdx
Definition: hist_util.h:112
std::vector< GHistRow > targeted_hists_
Contains histograms for final results.
Definition: hist_util.h:603
namespace of xgboost
Definition: base.h:102
data structure to store an instance set, a subset of rows (instances) associated with a particular no...
Definition: row_set.h:23
Definition: threading_utils.h:52
Timing utility used to measure total method execution time over the lifetime of the containing object...
Definition: timer.h:47
std::vector< GHistRow > hist_memory_
Allocated memory for histograms used for construction.
Definition: hist_util.h:605
CutsBuilder(HistogramCuts *p_cuts)
Definition: hist_util.h:182
void Init(size_t nthread, uint32_t nbins)
Definition: hist_util.h:616
std::vector< size_t > row_ptr
row pointer to rows by element position
Definition: hist_util.h:275
Stores temporary histograms to compute them in parallel Supports processing multiple tree-nodes for n...
Definition: hist_util.h:440
void AllocateAdditionalHistograms()
Definition: hist_util.h:538
Element from a sparse vector.
Definition: data.h:142
T & operator[](size_t idx)
Definition: hist_util.h:51
uint32_t bst_uint
unsigned integer type used for feature index.
Definition: base.h:105
Monitor monitor_
Definition: hist_util.h:246
void Init(size_t nbins)
Definition: hist_util.h:442
void SubtractionHist(GHistRow dst, const GHistRow src1, const GHistRow src2, size_t begin, size_t end)
Compute Subtraction: dst = src1 - src2 in range [begin, end)
uint32_t GetNumBins()
Definition: hist_util.h:634
preprocessed global index matrix, in CSR format Transform floating values to integer index in histogr...
Definition: hist_util.h:273
std::vector< float > const & Values() const
Definition: hist_util.h:145
bst_feature_t index
feature index
Definition: data.h:144
bst_float fvalue
feature value
Definition: data.h:146
const uint32_t * index
Definition: hist_util.h:307
void GetFeatureCounts(size_t *counts) const
Definition: hist_util.h:290
std::map< std::pair< size_t, size_t >, size_t > tid_nid_to_hist_
map pair {tid, nid} to index of allocated histogram from hist_memory_
Definition: hist_util.h:607
bool RowExists(bst_uint nid) const
Definition: hist_util.h:391
GHistIndexBlock(const size_t *row_ptr, const uint32_t *index)
Definition: hist_util.h:309
Definition: hist_util.h:104
T & operator[](size_t idx) const
Definition: hist_util.h:55
BinIdx SearchBin(float value, uint32_t column_id)
Definition: hist_util.h:150