xgboost
math.h
Go to the documentation of this file.
1 
7 #ifndef XGBOOST_COMMON_MATH_H_
8 #define XGBOOST_COMMON_MATH_H_
9 
10 #include <xgboost/base.h>
11 
12 #include <algorithm>
13 #include <cmath>
14 #include <limits>
15 #include <utility>
16 #include <vector>
17 
18 namespace xgboost {
19 namespace common {
25 XGBOOST_DEVICE inline float Sigmoid(float x) {
26  return 1.0f / (1.0f + expf(-x));
27 }
28 
29 template <typename T>
30 XGBOOST_DEVICE inline static T Sqr(T a) { return a * a; }
31 
35 template <typename T, typename U>
36 XGBOOST_DEVICE constexpr bool CloseTo(T a, U b) {
37  using Casted =
38  typename std::conditional<
39  std::is_floating_point<T>::value || std::is_floating_point<U>::value,
40  double,
41  typename std::conditional<
42  std::is_signed<T>::value || std::is_signed<U>::value,
43  int64_t,
44  uint64_t>::type>::type;
45  return std::is_floating_point<Casted>::value ?
46  std::abs(static_cast<Casted>(a) -static_cast<Casted>(b)) < 1e-6 : a == b;
47 }
48 
57 template <typename Iterator>
58 XGBOOST_DEVICE inline void Softmax(Iterator start, Iterator end) {
59  static_assert(std::is_same<bst_float,
60  typename std::remove_reference<
61  decltype(std::declval<Iterator>().operator*())>::type
62  >::value,
63  "Values should be of type bst_float");
64  bst_float wmax = *start;
65  for (Iterator i = start+1; i != end; ++i) {
66  wmax = fmaxf(*i, wmax);
67  }
68  double wsum = 0.0f;
69  for (Iterator i = start; i != end; ++i) {
70  *i = expf(*i - wmax);
71  wsum += *i;
72  }
73  for (Iterator i = start; i != end; ++i) {
74  *i /= static_cast<float>(wsum);
75  }
76 }
77 
85 template<typename Iterator>
86 XGBOOST_DEVICE inline Iterator FindMaxIndex(Iterator begin, Iterator end) {
87  Iterator maxit = begin;
88  for (Iterator it = begin; it != end; ++it) {
89  if (*it > *maxit) maxit = it;
90  }
91  return maxit;
92 }
93 
100 inline float LogSum(float x, float y) {
101  if (x < y) {
102  return y + std::log(std::exp(x - y) + 1.0f);
103  } else {
104  return x + std::log(std::exp(y - x) + 1.0f);
105  }
106 }
107 
115 template<typename Iterator>
116 inline float LogSum(Iterator begin, Iterator end) {
117  float mx = *begin;
118  for (Iterator it = begin; it != end; ++it) {
119  mx = std::max(mx, *it);
120  }
121  float sum = 0.0f;
122  for (Iterator it = begin; it != end; ++it) {
123  sum += std::exp(*it - mx);
124  }
125  return mx + std::log(sum);
126 }
127 
128 // comparator functions for sorting pairs in descending order
129 inline static bool CmpFirst(const std::pair<float, unsigned> &a,
130  const std::pair<float, unsigned> &b) {
131  return a.first > b.first;
132 }
133 inline static bool CmpSecond(const std::pair<float, unsigned> &a,
134  const std::pair<float, unsigned> &b) {
135  return a.second > b.second;
136 }
137 
138 // Redefined here to workaround a VC bug that doesn't support overloading for integer
139 // types.
140 template <typename T>
141 XGBOOST_DEVICE typename std::enable_if<
142  std::numeric_limits<T>::is_integer, bool>::type
144  return false;
145 }
146 
147 #if XGBOOST_STRICT_R_MODE && !defined(__CUDA_ARCH__)
148 
149 bool CheckNAN(double v);
150 
151 #else
152 
153 XGBOOST_DEVICE bool inline CheckNAN(float x) {
154 #if defined(__CUDA_ARCH__)
155  return isnan(x);
156 #else
157  return std::isnan(x);
158 #endif // defined(__CUDA_ARCH__)
159 }
160 
161 XGBOOST_DEVICE bool inline CheckNAN(double x) {
162 #if defined(__CUDA_ARCH__)
163  return isnan(x);
164 #else
165  return std::isnan(x);
166 #endif // defined(__CUDA_ARCH__)
167 }
168 
169 #endif // XGBOOST_STRICT_R_MODE && !defined(__CUDA_ARCH__)
170 // GPU version is not uploaded in CRAN anyway.
171 // Specialize only when using R with CPU.
172 #if XGBOOST_STRICT_R_MODE && !defined(XGBOOST_USE_CUDA)
173 double LogGamma(double v);
174 
175 #else // Not R or R with GPU.
176 
177 template<typename T>
178 XGBOOST_DEVICE inline T LogGamma(T v) {
179 #ifdef _MSC_VER
180 
181 #if _MSC_VER >= 1800
182  return lgamma(v);
183 #else
184 #pragma message("Warning: lgamma function was not available until VS2013"\
185  ", poisson regression will be disabled")
186  utils::Error("lgamma function was not available until VS2013");
187  return static_cast<T>(1.0);
188 #endif // _MSC_VER >= 1800
189 
190 #else
191  return lgamma(v);
192 #endif // _MSC_VER
193 }
194 
195 #endif // XGBOOST_STRICT_R_MODE && !defined(XGBOOST_USE_CUDA)
196 
197 } // namespace common
198 } // namespace xgboost
199 #endif // XGBOOST_COMMON_MATH_H_
xgboost::common::FindMaxIndex
XGBOOST_DEVICE Iterator FindMaxIndex(Iterator begin, Iterator end)
Find the maximum iterator within the iterators.
Definition: math.h:86
base.h
defines configuration macros of xgboost.
xgboost::common::Softmax
XGBOOST_DEVICE void Softmax(Iterator start, Iterator end)
Do inplace softmax transformaton on start to end.
Definition: math.h:58
xgboost::common::LogSum
float LogSum(float x, float y)
perform numerically safe logsum
Definition: math.h:100
xgboost::common::CloseTo
constexpr XGBOOST_DEVICE bool CloseTo(T a, U b)
Equality test for both integer and floating point.
Definition: math.h:36
xgboost::common::CheckNAN
XGBOOST_DEVICE std::enable_if< std::numeric_limits< T >::is_integer, bool >::type CheckNAN(T)
Definition: math.h:143
XGBOOST_DEVICE
#define XGBOOST_DEVICE
Tag function as usable by device.
Definition: base.h:84
xgboost::common::LogGamma
XGBOOST_DEVICE T LogGamma(T v)
Definition: math.h:178
xgboost::common::Sigmoid
XGBOOST_DEVICE float Sigmoid(float x)
calculate the sigmoid of the input.
Definition: math.h:25
xgboost
namespace of xgboost
Definition: base.h:110
xgboost::bst_float
float bst_float
float type, used for storing statistics
Definition: base.h:119