1
0
Fork 0

staging/lustre: use 64-bit time for pl_recalc

The ldlm pool calculates elapsed time by comparing the previous and
current get_seconds() values, which is unsafe on 32-bit machines
after 2038.

This changes the code to use time64_t and ktime_get_real_seconds(),
keeping the 'real' instead of 'monotonic' time because of the
debug prints.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Oleg Drokin <green@linuxhacker.ru>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
hifive-unleashed-5.1
Arnd Bergmann 2015-09-27 16:45:17 -04:00 committed by Greg Kroah-Hartman
parent 1f8c37a4e2
commit 8f83409cf2
2 changed files with 17 additions and 17 deletions

View File

@ -256,9 +256,9 @@ struct ldlm_pool {
* server_slv * lock_volume_factor. */ * server_slv * lock_volume_factor. */
atomic_t pl_lock_volume_factor; atomic_t pl_lock_volume_factor;
/** Time when last SLV from server was obtained. */ /** Time when last SLV from server was obtained. */
time_t pl_recalc_time; time64_t pl_recalc_time;
/** Recalculation period for pool. */ /** Recalculation period for pool. */
time_t pl_recalc_period; time64_t pl_recalc_period;
/** Recalculation and shrink operations. */ /** Recalculation and shrink operations. */
const struct ldlm_pool_ops *pl_ops; const struct ldlm_pool_ops *pl_ops;
/** Number of planned locks for next period. */ /** Number of planned locks for next period. */

View File

@ -330,14 +330,14 @@ static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
*/ */
static int ldlm_srv_pool_recalc(struct ldlm_pool *pl) static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
{ {
time_t recalc_interval_sec; time64_t recalc_interval_sec;
recalc_interval_sec = get_seconds() - pl->pl_recalc_time; recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period) if (recalc_interval_sec < pl->pl_recalc_period)
return 0; return 0;
spin_lock(&pl->pl_lock); spin_lock(&pl->pl_lock);
recalc_interval_sec = get_seconds() - pl->pl_recalc_time; recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period) { if (recalc_interval_sec < pl->pl_recalc_period) {
spin_unlock(&pl->pl_lock); spin_unlock(&pl->pl_lock);
return 0; return 0;
@ -358,7 +358,7 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
*/ */
ldlm_pool_recalc_grant_plan(pl); ldlm_pool_recalc_grant_plan(pl);
pl->pl_recalc_time = get_seconds(); pl->pl_recalc_time = ktime_get_real_seconds();
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT, lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
recalc_interval_sec); recalc_interval_sec);
spin_unlock(&pl->pl_lock); spin_unlock(&pl->pl_lock);
@ -467,10 +467,10 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
*/ */
static int ldlm_cli_pool_recalc(struct ldlm_pool *pl) static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
{ {
time_t recalc_interval_sec; time64_t recalc_interval_sec;
int ret; int ret;
recalc_interval_sec = get_seconds() - pl->pl_recalc_time; recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period) if (recalc_interval_sec < pl->pl_recalc_period)
return 0; return 0;
@ -478,7 +478,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
/* /*
* Check if we need to recalc lists now. * Check if we need to recalc lists now.
*/ */
recalc_interval_sec = get_seconds() - pl->pl_recalc_time; recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period) { if (recalc_interval_sec < pl->pl_recalc_period) {
spin_unlock(&pl->pl_lock); spin_unlock(&pl->pl_lock);
return 0; return 0;
@ -513,7 +513,7 @@ out:
* Time of LRU resizing might be longer than period, * Time of LRU resizing might be longer than period,
* so update after LRU resizing rather than before it. * so update after LRU resizing rather than before it.
*/ */
pl->pl_recalc_time = get_seconds(); pl->pl_recalc_time = ktime_get_real_seconds();
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT, lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
recalc_interval_sec); recalc_interval_sec);
spin_unlock(&pl->pl_lock); spin_unlock(&pl->pl_lock);
@ -571,10 +571,10 @@ static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
*/ */
int ldlm_pool_recalc(struct ldlm_pool *pl) int ldlm_pool_recalc(struct ldlm_pool *pl)
{ {
time_t recalc_interval_sec; u32 recalc_interval_sec;
int count; int count;
recalc_interval_sec = get_seconds() - pl->pl_recalc_time; recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec <= 0) if (recalc_interval_sec <= 0)
goto recalc; goto recalc;
@ -599,14 +599,14 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT, lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
count); count);
} }
recalc_interval_sec = pl->pl_recalc_time - get_seconds() + recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
pl->pl_recalc_period; pl->pl_recalc_period;
if (recalc_interval_sec <= 0) { if (recalc_interval_sec <= 0) {
/* Prevent too frequent recalculation. */ /* Prevent too frequent recalculation. */
CDEBUG(D_DLMTRACE, "Negative interval(%ld), " CDEBUG(D_DLMTRACE,
"too short period(%ld)", "Negative interval(%d), too short period(%lld)",
recalc_interval_sec, recalc_interval_sec,
pl->pl_recalc_period); (s64)pl->pl_recalc_period);
recalc_interval_sec = 1; recalc_interval_sec = 1;
} }
@ -893,7 +893,7 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
spin_lock_init(&pl->pl_lock); spin_lock_init(&pl->pl_lock);
atomic_set(&pl->pl_granted, 0); atomic_set(&pl->pl_granted, 0);
pl->pl_recalc_time = get_seconds(); pl->pl_recalc_time = ktime_get_seconds();
atomic_set(&pl->pl_lock_volume_factor, 1); atomic_set(&pl->pl_lock_volume_factor, 1);
atomic_set(&pl->pl_grant_rate, 0); atomic_set(&pl->pl_grant_rate, 0);