xgboost
math.h
Go to the documentation of this file.
1 
7 #ifndef XGBOOST_COMMON_MATH_H_
8 #define XGBOOST_COMMON_MATH_H_
9 
10 #include <xgboost/base.h>
11 
12 #include <algorithm>
13 #include <cmath>
14 #include <limits>
15 #include <utility>
16 #include <vector>
17 
18 namespace xgboost {
19 namespace common {
25 XGBOOST_DEVICE inline float Sigmoid(float x) {
26  return 1.0f / (1.0f + expf(-x));
27 }
28 
32 template <typename T, typename U>
33 XGBOOST_DEVICE constexpr bool CloseTo(T a, U b) {
34  using Casted =
35  typename std::conditional<
36  std::is_floating_point<T>::value || std::is_floating_point<U>::value,
37  double,
38  typename std::conditional<
39  std::is_signed<T>::value || std::is_signed<U>::value,
40  int64_t,
41  uint64_t>::type>::type;
42  return std::is_floating_point<Casted>::value ?
43  std::abs(static_cast<Casted>(a) -static_cast<Casted>(b)) < 1e-6 : a == b;
44 }
45 
54 template <typename Iterator>
55 XGBOOST_DEVICE inline void Softmax(Iterator start, Iterator end) {
56  static_assert(std::is_same<bst_float,
57  typename std::remove_reference<
58  decltype(std::declval<Iterator>().operator*())>::type
59  >::value,
60  "Values should be of type bst_float");
61  bst_float wmax = *start;
62  for (Iterator i = start+1; i != end; ++i) {
63  wmax = fmaxf(*i, wmax);
64  }
65  double wsum = 0.0f;
66  for (Iterator i = start; i != end; ++i) {
67  *i = expf(*i - wmax);
68  wsum += *i;
69  }
70  for (Iterator i = start; i != end; ++i) {
71  *i /= static_cast<float>(wsum);
72  }
73 }
74 
82 template<typename Iterator>
83 XGBOOST_DEVICE inline Iterator FindMaxIndex(Iterator begin, Iterator end) {
84  Iterator maxit = begin;
85  for (Iterator it = begin; it != end; ++it) {
86  if (*it > *maxit) maxit = it;
87  }
88  return maxit;
89 }
90 
97 inline float LogSum(float x, float y) {
98  if (x < y) {
99  return y + std::log(std::exp(x - y) + 1.0f);
100  } else {
101  return x + std::log(std::exp(y - x) + 1.0f);
102  }
103 }
104 
112 template<typename Iterator>
113 inline float LogSum(Iterator begin, Iterator end) {
114  float mx = *begin;
115  for (Iterator it = begin; it != end; ++it) {
116  mx = std::max(mx, *it);
117  }
118  float sum = 0.0f;
119  for (Iterator it = begin; it != end; ++it) {
120  sum += std::exp(*it - mx);
121  }
122  return mx + std::log(sum);
123 }
124 
125 // comparator functions for sorting pairs in descending order
126 inline static bool CmpFirst(const std::pair<float, unsigned> &a,
127  const std::pair<float, unsigned> &b) {
128  return a.first > b.first;
129 }
130 inline static bool CmpSecond(const std::pair<float, unsigned> &a,
131  const std::pair<float, unsigned> &b) {
132  return a.second > b.second;
133 }
134 
135 // Redefined here to workaround a VC bug that doesn't support overloadng for integer
136 // types.
137 template <typename T>
138 XGBOOST_DEVICE typename std::enable_if<
139  std::numeric_limits<T>::is_integer, bool>::type
141  return false;
142 }
143 
144 #if XGBOOST_STRICT_R_MODE && !defined(__CUDA_ARCH__)
145 
146 bool CheckNAN(double v);
147 
148 #else
149 
150 XGBOOST_DEVICE bool inline CheckNAN(float x) {
151 #if defined(__CUDA_ARCH__)
152  return isnan(x);
153 #else
154  return std::isnan(x);
155 #endif // defined(__CUDA_ARCH__)
156 }
157 
158 XGBOOST_DEVICE bool inline CheckNAN(double x) {
159 #if defined(__CUDA_ARCH__)
160  return isnan(x);
161 #else
162  return std::isnan(x);
163 #endif // defined(__CUDA_ARCH__)
164 }
165 
166 #endif // XGBOOST_STRICT_R_MODE && !defined(__CUDA_ARCH__)
167 // GPU version is not uploaded in CRAN anyway.
168 // Specialize only when using R with CPU.
169 #if XGBOOST_STRICT_R_MODE && !defined(XGBOOST_USE_CUDA)
170 double LogGamma(double v);
171 
172 #else // Not R or R with GPU.
173 
174 template<typename T>
175 XGBOOST_DEVICE inline T LogGamma(T v) {
176 #ifdef _MSC_VER
177 
178 #if _MSC_VER >= 1800
179  return lgamma(v);
180 #else
181 #pragma message("Warning: lgamma function was not available until VS2013"\
182  ", poisson regression will be disabled")
183  utils::Error("lgamma function was not available until VS2013");
184  return static_cast<T>(1.0);
185 #endif // _MSC_VER >= 1800
186 
187 #else
188  return lgamma(v);
189 #endif // _MSC_VER
190 }
191 
192 #endif // XGBOOST_STRICT_R_MODE && !defined(XGBOOST_USE_CUDA)
193 
194 } // namespace common
195 } // namespace xgboost
196 #endif // XGBOOST_COMMON_MATH_H_
float bst_float
float type, used for storing statistics
Definition: base.h:111
float LogSum(float x, float y)
perform numerically safe logsum
Definition: math.h:97
XGBOOST_DEVICE void Softmax(Iterator start, Iterator end)
Do inplace softmax transformaton on start to end.
Definition: math.h:55
XGBOOST_DEVICE T LogGamma(T v)
Definition: math.h:175
XGBOOST_DEVICE constexpr bool CloseTo(T a, U b)
Equality test for both integer and floating point.
Definition: math.h:33
XGBOOST_DEVICE Iterator FindMaxIndex(Iterator begin, Iterator end)
Find the maximum iterator within the iterators.
Definition: math.h:83
#define XGBOOST_DEVICE
Tag function as usable by device.
Definition: base.h:84
namespace of xgboost
Definition: base.h:102
defines configuration macros of xgboost.
XGBOOST_DEVICE float Sigmoid(float x)
calculate the sigmoid of the input.
Definition: math.h:25
XGBOOST_DEVICE std::enable_if< std::numeric_limits< T >::is_integer, bool >::type CheckNAN(T)
Definition: math.h:140