The implementation doesn't look too exotic no:
Code: Select all
template<class time_type> class microsec_clock {
// ....
static time_type local_time()
{
return create_time(&c_time::localtime);
}
static time_type create_time(time_converter converter)
{
#ifdef BOOST_HAS_GETTIMEOFDAY
timeval tv;
gettimeofday(&tv, 0);
std::time_t t = tv.tv_sec;
boost::uint32_t sub_sec = tv.tv_usec;
#elif defined(BOOST_HAS_FTIME)
winapi::file_time ft;
winapi::get_system_time_as_file_time(ft);
uint64_t micros = winapi::file_time_to_microseconds(ft);
std::time_t t = static_cast<time_t>(micros / 1000000UL);
boost::uint32_t sub_sec = static_cast<boost::uint32_t>(micros % 1000000UL);
#else
#error Internal Boost.DateTime error: BOOST_DATE_TIME_HAS_HIGH_PRECISION_CLOCK is defined, however neither gettimeofday nor FILETIME support is detected.
#endif
std::tm curr;
std::tm* curr_ptr = converter(&t, &curr);
date_type d(curr_ptr->tm_year + 1900,
curr_ptr->tm_mon + 1,
curr_ptr->tm_mday);
int adjust = static_cast< int >(resolution_traits_type::res_adjust() / 1000000);
time_duration_type td(curr_ptr->tm_hour,
curr_ptr->tm_min,
curr_ptr->tm_sec,
sub_sec * adjust);
return time_type(d,td);
}
However, while gettimeofday is also used by the unix version of SDL_GetTicks(), the win32 SDL timer just wraps around QueryPerformanceCounter (unlike that Boost get_system_time_as_file_time thing), so that might explain where the difference comes from.
Code: Select all
Uint32 SDL_GetTicks (void) // unix
{
#if HAVE_CLOCK_GETTIME
Uint32 ticks;
struct timespec now;
clock_gettime(CLOCK_MONOTONIC,&now);
ticks=(now.tv_sec-start.tv_sec)*1000+(now.tv_nsec-start.tv_nsec)/1000000;
return(ticks);
#else
Uint32 ticks;
struct timeval now;
gettimeofday(&now, NULL);
ticks=(now.tv_sec-start.tv_sec)*1000+(now.tv_usec-start.tv_usec)/1000;
return(ticks);
#endif
}
Uint32 SDL_GetTicks(void) // win32
{
DWORD now, ticks;
#ifndef USE_GETTICKCOUNT
LARGE_INTEGER hires_now;
#endif
#ifdef USE_GETTICKCOUNT
now = GetTickCount();
#else
if (hires_timer_available)
{
QueryPerformanceCounter(&hires_now);
hires_now.QuadPart -= hires_start_ticks.QuadPart;
hires_now.QuadPart *= 1000;
hires_now.QuadPart /= hires_ticks_per_second.QuadPart;
return (DWORD)hires_now.QuadPart;
}
else
{
now = timeGetTime();
}
#endif
if ( now < start ) {
ticks = (TIME_WRAP_VALUE-start) + now;
} else {
ticks = (now - start);
}
return(ticks);
}
In any case, I also support moving back to SDL_GetTicks(), because it's a better timer on unix (clock_gettime does not have skewing issues like gettimeofday and should also be available on all modern distributions), and because QueryPerformanceCounter already has a mostly proven track record on win32.