We read files that can contain all kinds of data types, which we normalize before further processing.
This also works with MSVC, GCC, and Clang on my PC. However, GCC does strange things on our CI.
The code reduced to the bare essentials:
#include <cstdint>
#include <iostream>
#include <type_traits>
template <typename SourceType, typename TargetType>
TargetType convert(SourceType value)
{
if constexpr (std::is_integral_v<SourceType> && std::is_floating_point_v<TargetType>)
{
// Double only has a precision of (53 bits mantissa), some platforms have long double, so we can represent all values accurately.
using DoubleType = std::conditional_t<(std::numeric_limits<SourceType>::digits > std::numeric_limits<double>::digits),
long double,
double>;
// Conversion from integer to double/float: Fit all values into the range [0;1].
DoubleType result = 0.0;
if constexpr (std::is_signed_v<SourceType>)
{
// Solutions looks more complex, because the behavior differs between compilers: MSVC may rely on
// implementation-defined behavior for signed overflows, while Clang/GCC applies wraparound semantics
// when converting negative values to unsigned types.
using Unsigned = std::make_unsigned_t<SourceType>;
constexpr auto min = std::numeric_limits<SourceType>::min();
constexpr auto max = std::numeric_limits<Unsigned>::max();
const Unsigned shifted = static_cast<Unsigned>(static_cast<std::int64_t>(value) - static_cast<std::int64_t>(min));
result = static_cast<DoubleType>(shifted) / static_cast<DoubleType>(max);
std::cout << "min: " << min
<< " shifted: " << shifted
<< " shifted casted: " << static_cast<DoubleType>(shifted)
<< " range: " << max
<< " range casted: " << static_cast<DoubleType>(max)
<< " digits: " << std::numeric_limits<DoubleType>::digits
<< " size: " << sizeof(DoubleType)
<< " result: " << result << '\n';
}
else
{
constexpr auto range = std::numeric_limits<SourceType>::max();
result = static_cast<DoubleType>(value) / static_cast<DoubleType>(range);
}
// Avoid redundant cast warning
if constexpr (std::is_same_v<TargetType, DoubleType>)
{
return result;
}
return static_cast<TargetType>(result);
}
}
Test code:
#include <gtest/gtest.h>
const auto checkValueRangeBounds = []<typename T>(std::type_identity<T>) {
EXPECT_EQ((convert<T, float>(std::numeric_limits<T>::lowest())), 0.0F);
EXPECT_EQ((convert<T, float>(std::numeric_limits<T>::max())), 1.0F);
EXPECT_EQ((convert<T, double>(std::numeric_limits<T>::lowest())), 0.0);
EXPECT_EQ((convert<T, double>(std::numeric_limits<T>::max())), 1.0);
};
TEST(convert, bounds)
{
checkValueRangeBounds(std::type_identity<std::int8_t>{});
checkValueRangeBounds(std::type_identity<std::int16_t>{});
checkValueRangeBounds(std::type_identity<std::int32_t>{});
checkValueRangeBounds(std::type_identity<std::int64_t>{});
checkValueRangeBounds(std::type_identity<std::uint8_t>{});
checkValueRangeBounds(std::type_identity<std::uint16_t>{});
checkValueRangeBounds(std::type_identity<std::uint32_t>{});
checkValueRangeBounds(std::type_identity<std::uint64_t>{});
}
This works for all cases, except std::int64_t on GCC.
The output for this case:
min: -9223372036854775808 shifted: 0 shifted casted: 0 range: 18446744073709551615 range casted: 1.84467e+19 digits: 64 size: 16 result: 0
min: -9223372036854775808 shifted: 18446744073709551615 shifted casted: -1 range: 18446744073709551615 range casted: 1.84467e+19 digits: 64 size: 16 result: -5.42101e-20
It seems he can correctly convert UINT64_MAX to long double in the constexpr case, but not during the runtime, as the expected result is 1.0 and not -5.42101e-20 (nearly 0.0).
Since I can't reproduce it locally with my GCC 11: Does anyone have any idea what causes the GCC to behave differently on our CI (GCC 10 & 14)?
TargetTypewould be first thenSourceTypecould be deduced.Unsigned shifted = static_cast<Unsigned>(static_cast<std::int64_t>(value/2) - static_cast<std::int64_t>(min/2)); shifted *= 2; if (value > 0 && (value & 1)) shifted += 1; if (value < 0 && (value & 1)) shifted -= 1; if (min & 1) shifted -= 1;