1
0
Fork 0

blk-stat: delete useless code

Fix two issues:
- the per-cpu stat flush is unnecessary, nobody uses per-cpu stat except
  sum it to global stat. We can do the calculation there. The flush just
  wastes cpu time.
- some fields are signed int/s64. I don't see the point.

Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Shaohua Li <shli@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
hifive-unleashed-5.1
Shaohua Li 2017-10-06 17:55:59 -07:00 committed by Jens Axboe
parent 53cfdc10a9
commit eca8b53a67
2 changed files with 9 additions and 41 deletions

View File

@ -11,8 +11,6 @@
#include "blk-mq.h"
#include "blk.h"
#define BLK_RQ_STAT_BATCH 64
struct blk_queue_stats {
struct list_head callbacks;
spinlock_t lock;
@ -23,45 +21,21 @@ static void blk_stat_init(struct blk_rq_stat *stat)
{
stat->min = -1ULL;
stat->max = stat->nr_samples = stat->mean = 0;
stat->batch = stat->nr_batch = 0;
}
static void blk_stat_flush_batch(struct blk_rq_stat *stat)
{
const s32 nr_batch = READ_ONCE(stat->nr_batch);
const s32 nr_samples = READ_ONCE(stat->nr_samples);
if (!nr_batch)
return;
if (!nr_samples)
stat->mean = div64_s64(stat->batch, nr_batch);
else {
stat->mean = div64_s64((stat->mean * nr_samples) +
stat->batch,
nr_batch + nr_samples);
}
stat->nr_samples += nr_batch;
stat->nr_batch = stat->batch = 0;
stat->batch = 0;
}
/* src is a per-cpu stat, mean isn't initialized */
static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{
blk_stat_flush_batch(src);
if (!src->nr_samples)
return;
dst->min = min(dst->min, src->min);
dst->max = max(dst->max, src->max);
if (!dst->nr_samples)
dst->mean = src->mean;
else {
dst->mean = div64_s64((src->mean * src->nr_samples) +
(dst->mean * dst->nr_samples),
dst->nr_samples + src->nr_samples);
}
dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
dst->nr_samples + src->nr_samples);
dst->nr_samples += src->nr_samples;
}
@ -69,13 +43,8 @@ static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
{
stat->min = min(stat->min, value);
stat->max = max(stat->max, value);
if (stat->batch + value < stat->batch ||
stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
blk_stat_flush_batch(stat);
stat->batch += value;
stat->nr_batch++;
stat->nr_samples++;
}
void blk_stat_add(struct request *rq)
@ -84,7 +53,7 @@ void blk_stat_add(struct request *rq)
struct blk_stat_callback *cb;
struct blk_rq_stat *stat;
int bucket;
s64 now, value;
u64 now, value;
now = __blk_stat_time(ktime_to_ns(ktime_get()));
if (now < blk_stat_time(&rq->issue_stat))

View File

@ -329,11 +329,10 @@ static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
}
struct blk_rq_stat {
s64 mean;
u64 mean;
u64 min;
u64 max;
s32 nr_samples;
s32 nr_batch;
u32 nr_samples;
u64 batch;
};