remarkable-linux/include/linux/time.h
Linus Torvalds c856863988 Merge branch 'misc.compat' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull misc compat stuff updates from Al Viro:
 "This part is basically untangling various compat stuff. Compat
  syscalls moved to their native counterparts, getting rid of quite a
  bit of double-copying and/or set_fs() uses. A lot of field-by-field
  copyin/copyout killed off.

   - kernel/compat.c is much closer to containing just the
     copyin/copyout of compat structs. Not all compat syscalls are gone
     from it yet, but it's getting there.

   - ipc/compat_mq.c killed off completely.

   - block/compat_ioctl.c cleaned up; floppy compat ioctls moved to
     drivers/block/floppy.c where they belong. Yes, there are several
     drivers that implement some of the same ioctls. Some are m68k and
     one is 32bit-only pmac. drivers/block/floppy.c is the only one in
     that bunch that can be built on biarch"

* 'misc.compat' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  mqueue: move compat syscalls to native ones
  usbdevfs: get rid of field-by-field copyin
  compat_hdio_ioctl: get rid of set_fs()
  take floppy compat ioctls to sodding floppy.c
  ipmi: get rid of field-by-field __get_user()
  ipmi: get COMPAT_IPMICTL_RECEIVE_MSG in sync with the native one
  rt_sigtimedwait(): move compat to native
  select: switch compat_{get,put}_fd_set() to compat_{get,put}_bitmap()
  put_compat_rusage(): switch to copy_to_user()
  sigpending(): move compat to native
  getrlimit()/setrlimit(): move compat to native
  times(2): move compat to native
  compat_{get,put}_bitmap(): use unsafe_{get,put}_user()
  fb_get_fscreeninfo(): don't bother with do_fb_ioctl()
  do_sigaltstack(): lift copying to/from userland into callers
  take compat_sys_old_getrlimit() to native syscall
  trim __ARCH_WANT_SYS_OLD_GETRLIMIT
2017-07-06 20:57:13 -07:00

289 lines
8.1 KiB
C

#ifndef _LINUX_TIME_H
#define _LINUX_TIME_H
# include <linux/cache.h>
# include <linux/seqlock.h>
# include <linux/math64.h>
# include <linux/time64.h>
extern struct timezone sys_tz;
int get_timespec64(struct timespec64 *ts,
const struct timespec __user *uts);
int put_timespec64(const struct timespec64 *ts,
struct timespec __user *uts);
int get_itimerspec64(struct itimerspec64 *it,
const struct itimerspec __user *uit);
int put_itimerspec64(const struct itimerspec64 *it,
struct itimerspec __user *uit);
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
static inline int timespec_equal(const struct timespec *a,
const struct timespec *b)
{
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
}
/*
* lhs < rhs: return <0
* lhs == rhs: return 0
* lhs > rhs: return >0
*/
static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
if (lhs->tv_sec > rhs->tv_sec)
return 1;
return lhs->tv_nsec - rhs->tv_nsec;
}
static inline int timeval_compare(const struct timeval *lhs, const struct timeval *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
if (lhs->tv_sec > rhs->tv_sec)
return 1;
return lhs->tv_usec - rhs->tv_usec;
}
extern time64_t mktime64(const unsigned int year, const unsigned int mon,
const unsigned int day, const unsigned int hour,
const unsigned int min, const unsigned int sec);
/**
* Deprecated. Use mktime64().
*/
static inline unsigned long mktime(const unsigned int year,
const unsigned int mon, const unsigned int day,
const unsigned int hour, const unsigned int min,
const unsigned int sec)
{
return mktime64(year, mon, day, hour, min, sec);
}
extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
/*
* timespec_add_safe assumes both values are positive and checks
* for overflow. It will return TIME_T_MAX if the reutrn would be
* smaller then either of the arguments.
*/
extern struct timespec timespec_add_safe(const struct timespec lhs,
const struct timespec rhs);
static inline struct timespec timespec_add(struct timespec lhs,
struct timespec rhs)
{
struct timespec ts_delta;
set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec,
lhs.tv_nsec + rhs.tv_nsec);
return ts_delta;
}
/*
* sub = lhs - rhs, in normalized form
*/
static inline struct timespec timespec_sub(struct timespec lhs,
struct timespec rhs)
{
struct timespec ts_delta;
set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
lhs.tv_nsec - rhs.tv_nsec);
return ts_delta;
}
/*
* Returns true if the timespec is norm, false if denorm:
*/
static inline bool timespec_valid(const struct timespec *ts)
{
/* Dates before 1970 are bogus */
if (ts->tv_sec < 0)
return false;
/* Can't have more nanoseconds then a second */
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return false;
return true;
}
static inline bool timespec_valid_strict(const struct timespec *ts)
{
if (!timespec_valid(ts))
return false;
/* Disallow values that could overflow ktime_t */
if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
return false;
return true;
}
static inline bool timeval_valid(const struct timeval *tv)
{
/* Dates before 1970 are bogus */
if (tv->tv_sec < 0)
return false;
/* Can't have more microseconds then a second */
if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
return false;
return true;
}
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
/*
* Validates if a timespec/timeval used to inject a time offset is valid.
* Offsets can be postive or negative. The value of the timeval/timespec
* is the sum of its fields, but *NOTE*: the field tv_usec/tv_nsec must
* always be non-negative.
*/
static inline bool timeval_inject_offset_valid(const struct timeval *tv)
{
/* We don't check the tv_sec as it can be positive or negative */
/* Can't have more microseconds then a second */
if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
return false;
return true;
}
static inline bool timespec_inject_offset_valid(const struct timespec *ts)
{
/* We don't check the tv_sec as it can be positive or negative */
/* Can't have more nanoseconds then a second */
if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
return false;
return true;
}
/* Some architectures do not supply their own clocksource.
* This is mainly the case in architectures that get their
* inter-tick times by reading the counter on their interval
* timer. Since these timers wrap every tick, they're not really
* useful as clocksources. Wrapping them to act like one is possible
* but not very efficient. So we provide a callout these arches
* can implement for use with the jiffies clocksource to provide
* finer then tick granular time.
*/
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
extern u32 (*arch_gettimeoffset)(void);
#endif
struct itimerval;
extern int do_setitimer(int which, struct itimerval *value,
struct itimerval *ovalue);
extern int do_getitimer(int which, struct itimerval *value);
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
/*
* Similar to the struct tm in userspace <time.h>, but it needs to be here so
* that the kernel source is self contained.
*/
struct tm {
/*
* the number of seconds after the minute, normally in the range
* 0 to 59, but can be up to 60 to allow for leap seconds
*/
int tm_sec;
/* the number of minutes after the hour, in the range 0 to 59*/
int tm_min;
/* the number of hours past midnight, in the range 0 to 23 */
int tm_hour;
/* the day of the month, in the range 1 to 31 */
int tm_mday;
/* the number of months since January, in the range 0 to 11 */
int tm_mon;
/* the number of years since 1900 */
long tm_year;
/* the number of days since Sunday, in the range 0 to 6 */
int tm_wday;
/* the number of days since January 1, in the range 0 to 365 */
int tm_yday;
};
void time64_to_tm(time64_t totalsecs, int offset, struct tm *result);
/**
* time_to_tm - converts the calendar time to local broken-down time
*
* @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
* Coordinated Universal Time (UTC).
* @offset offset seconds adding to totalsecs.
* @result pointer to struct tm variable to receive broken-down time
*/
static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result)
{
time64_to_tm(totalsecs, offset, result);
}
/**
* timespec_to_ns - Convert timespec to nanoseconds
* @ts: pointer to the timespec variable to be converted
*
* Returns the scalar nanosecond representation of the timespec
* parameter.
*/
static inline s64 timespec_to_ns(const struct timespec *ts)
{
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
}
/**
* timeval_to_ns - Convert timeval to nanoseconds
* @ts: pointer to the timeval variable to be converted
*
* Returns the scalar nanosecond representation of the timeval
* parameter.
*/
static inline s64 timeval_to_ns(const struct timeval *tv)
{
return ((s64) tv->tv_sec * NSEC_PER_SEC) +
tv->tv_usec * NSEC_PER_USEC;
}
/**
* ns_to_timespec - Convert nanoseconds to timespec
* @nsec: the nanoseconds value to be converted
*
* Returns the timespec representation of the nsec parameter.
*/
extern struct timespec ns_to_timespec(const s64 nsec);
/**
* ns_to_timeval - Convert nanoseconds to timeval
* @nsec: the nanoseconds value to be converted
*
* Returns the timeval representation of the nsec parameter.
*/
extern struct timeval ns_to_timeval(const s64 nsec);
/**
* timespec_add_ns - Adds nanoseconds to a timespec
* @a: pointer to timespec to be incremented
* @ns: unsigned nanoseconds value to be added
*
* This must always be inlined because its used from the x86-64 vdso,
* which cannot call other kernel functions.
*/
static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
{
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
a->tv_nsec = ns;
}
static inline bool itimerspec64_valid(const struct itimerspec64 *its)
{
if (!timespec64_valid(&(its->it_interval)) ||
!timespec64_valid(&(its->it_value)))
return false;
return true;
}
#endif