7 #ifndef XGBOOST_COMMON_MATH_H_
8 #define XGBOOST_COMMON_MATH_H_
26 float constexpr
kEps = 1e-16;
27 x = std::min(-x, 88.7f);
28 auto denom = expf(x) + 1.0f +
kEps;
29 auto y = 1.0f / denom;
39 template <
typename T,
typename U>
42 typename std::conditional<
43 std::is_floating_point<T>::value || std::is_floating_point<U>::value,
45 typename std::conditional<
46 std::is_signed<T>::value || std::is_signed<U>::value,
48 uint64_t>::type>::type;
49 return std::is_floating_point<Casted>::value ?
50 std::abs(
static_cast<Casted
>(a) -
static_cast<Casted
>(b)) < 1e-6 : a == b;
61 template <
typename Iterator>
64 typename std::remove_reference<
65 decltype(std::declval<Iterator>().
operator*())>::type
67 "Values should be of type bst_float");
69 for (Iterator i = start+1; i != end; ++i) {
70 wmax = fmaxf(*i, wmax);
73 for (Iterator i = start; i != end; ++i) {
77 for (Iterator i = start; i != end; ++i) {
78 *i /=
static_cast<float>(wsum);
89 template<
typename Iterator>
91 Iterator maxit = begin;
92 for (Iterator it = begin; it != end; ++it) {
93 if (*it > *maxit) maxit = it;
106 return y + std::log(std::exp(x - y) + 1.0f);
108 return x + std::log(std::exp(y - x) + 1.0f);
119 template<
typename Iterator>
120 inline float LogSum(Iterator begin, Iterator end) {
122 for (Iterator it = begin; it != end; ++it) {
123 mx = std::max(mx, *it);
126 for (Iterator it = begin; it != end; ++it) {
127 sum += std::exp(*it - mx);
129 return mx + std::log(sum);
133 inline static bool CmpFirst(
const std::pair<float, unsigned> &a,
134 const std::pair<float, unsigned> &b) {
135 return a.first > b.first;
137 inline static bool CmpSecond(
const std::pair<float, unsigned> &a,
138 const std::pair<float, unsigned> &b) {
139 return a.second > b.second;
144 template <
typename T>
146 std::numeric_limits<T>::is_integer,
bool>::type
151 #if XGBOOST_STRICT_R_MODE && !defined(__CUDA_ARCH__)
158 #if defined(__CUDA_ARCH__)
161 return std::isnan(x);
162 #endif // defined(__CUDA_ARCH__)
166 #if defined(__CUDA_ARCH__)
169 return std::isnan(x);
170 #endif // defined(__CUDA_ARCH__)
173 #endif // XGBOOST_STRICT_R_MODE && !defined(__CUDA_ARCH__)
176 #if XGBOOST_STRICT_R_MODE && !defined(XGBOOST_USE_CUDA)
179 #else // Not R or R with GPU.
188 #pragma message("Warning: lgamma function was not available until VS2013"\
189 ", poisson regression will be disabled")
190 utils::Error(
"lgamma function was not available until VS2013");
191 return static_cast<T
>(1.0);
192 #endif // _MSC_VER >= 1800
199 #endif // XGBOOST_STRICT_R_MODE && !defined(XGBOOST_USE_CUDA)
203 #endif // XGBOOST_COMMON_MATH_H_