1
0
Fork 0

for-4.19/post-20180822

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAlt9on8QHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpj1xEADKBmJlV9aVyxc5w6XggqAGeHqI4afFrl+v
 9fW6WUQMAaBUrr7PMIEJQ0Zm4B7KxgBaEWNtuuj4ULkjpgYm2AuGUuTJSyKz41rS
 Ma+KNyCA2Zmq4SvwGFbcdCuCbUqnoxTycscAgCjuDvIYLW0+nFGNc47ibmu9lZIV
 33Ef5LrxuCjhC2zyNxEdWpUxDCjoYzock85LW+wYyIYLU9uKdoExS+YmT8U+ebA/
 AkXBcxPztNDxwkcsIwgGVoTjwxiowqGz3uueWfyEmYgaCPiNOsxkoNQAtjX4ykQE
 hnqnHWyzJkRwbYo7Vd/bRAZXvszKGYE1YcJmu5QrNf0dK5MSq2o5OYJAEJWbucPj
 m0R2u7O9qbS2JEnxGrm5+oYJwBzwNY5/Lajr15WkljTqobKnqcvn/Hdgz/XdGtek
 0S1QHkkBsF7e+cax8sePWK+O3ilY7pl9CzyZKB/tJngl8A45Jv8xVojg0v3O7oS+
 zZib0rwWg/bwR/uN6uPCDcEsQusqL5YovB7m6NRVshwz6cV1zVNp2q+iOulk7KuC
 MprW4Du9CJf8HA19XtyJIG1XLstnuz+Exy+i5BiimUJ5InoEFDuj/6OZa6Qaczbo
 SrDDvpGtSf4h7czKpE5kV4uZiTOrjuI30TrI+4csdZ7HQIlboxNL72seNTLJs55F
 nbLjRM8L6g==
 =FS7e
 -----END PGP SIGNATURE-----

Merge tag 'for-4.19/post-20180822' of git://git.kernel.dk/linux-block

Pull more block updates from Jens Axboe:

 - Set of bcache fixes and changes (Coly)

 - The flush warn fix (me)

 - Small series of BFQ fixes (Paolo)

 - wbt hang fix (Ming)

 - blktrace fix (Steven)

 - blk-mq hardware queue count update fix (Jianchao)

 - Various little fixes

* tag 'for-4.19/post-20180822' of git://git.kernel.dk/linux-block: (31 commits)
  block/DAC960.c: make some arrays static const, shrinks object size
  blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter
  blk-mq: init hctx sched after update ctx and hctx mapping
  block: remove duplicate initialization
  tracing/blktrace: Fix to allow setting same value
  pktcdvd: fix setting of 'ret' error return for a few cases
  block: change return type to bool
  block, bfq: return nbytes and not zero from struct cftype .write() method
  block, bfq: improve code of bfq_bfqq_charge_time
  block, bfq: reduce write overcharge
  block, bfq: always update the budget of an entity when needed
  block, bfq: readd missing reset of parent-entity service
  blk-wbt: fix IO hang in wbt_wait()
  block: don't warn for flush on read-only device
  bcache: add the missing comments for smp_mb()/smp_wmb()
  bcache: remove unnecessary space before ioctl function pointer arguments
  bcache: add missing SPDX header
  bcache: move open brace at end of function definitions to next line
  bcache: add static const prefix to char * array declarations
  bcache: fix code comments style
  ...
hifive-unleashed-5.1
Linus Torvalds 2018-08-22 13:38:05 -07:00
commit 5bed49adfe
43 changed files with 865 additions and 642 deletions

View File

@ -913,7 +913,8 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
if (ret)
return ret;
return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
return ret ?: nbytes;
}
#ifdef CONFIG_DEBUG_BLK_CGROUP

View File

@ -187,11 +187,25 @@ static const int bfq_stats_min_budgets = 194;
static const int bfq_default_max_budget = 16 * 1024;
/*
* Async to sync throughput distribution is controlled as follows:
* when an async request is served, the entity is charged the number
* of sectors of the request, multiplied by the factor below
* When a sync request is dispatched, the queue that contains that
* request, and all the ancestor entities of that queue, are charged
* with the number of sectors of the request. In constrast, if the
* request is async, then the queue and its ancestor entities are
* charged with the number of sectors of the request, multiplied by
* the factor below. This throttles the bandwidth for async I/O,
* w.r.t. to sync I/O, and it is done to counter the tendency of async
* writes to steal I/O throughput to reads.
*
* The current value of this parameter is the result of a tuning with
* several hardware and software configurations. We tried to find the
* lowest value for which writes do not cause noticeable problems to
* reads. In fact, the lower this parameter, the stabler I/O control,
* in the following respect. The lower this parameter is, the less
* the bandwidth enjoyed by a group decreases
* - when the group does writes, w.r.t. to when it does reads;
* - when other groups do reads, w.r.t. to when they do writes.
*/
static const int bfq_async_charge_factor = 10;
static const int bfq_async_charge_factor = 3;
/* Default timeout values, in jiffies, approximating CFQ defaults. */
const int bfq_timeout = HZ / 8;
@ -853,16 +867,7 @@ static unsigned long bfq_serv_to_charge(struct request *rq,
if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
return blk_rq_sectors(rq);
/*
* If there are no weight-raised queues, then amplify service
* by just the async charge factor; otherwise amplify service
* by twice the async charge factor, to further reduce latency
* for weight-raised queues.
*/
if (bfqq->bfqd->wr_busy_queues == 0)
return blk_rq_sectors(rq) * bfq_async_charge_factor;
return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
return blk_rq_sectors(rq) * bfq_async_charge_factor;
}
/**
@ -3298,6 +3303,27 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
*/
} else
entity->service = 0;
/*
* Reset the received-service counter for every parent entity.
* Differently from what happens with bfqq->entity.service,
* the resetting of this counter never needs to be postponed
* for parent entities. In fact, in case bfqq may have a
* chance to go on being served using the last, partially
* consumed budget, bfqq->entity.service needs to be kept,
* because if bfqq then actually goes on being served using
* the same budget, the last value of bfqq->entity.service is
* needed to properly decrement bfqq->entity.budget by the
* portion already consumed. In contrast, it is not necessary
* to keep entity->service for parent entities too, because
* the bubble up of the new value of bfqq->entity.budget will
* make sure that the budgets of parent entities are correct,
* even in case bfqq and thus parent entities go on receiving
* service with the same budget.
*/
entity = entity->parent;
for_each_entity(entity)
entity->service = 0;
}
/*

View File

@ -130,10 +130,14 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
if (!change_without_lookup) /* lookup needed */
next_in_service = bfq_lookup_next_entity(sd, expiration);
if (next_in_service)
parent_sched_may_change = !sd->next_in_service ||
if (next_in_service) {
bool new_budget_triggers_change =
bfq_update_parent_budget(next_in_service);
parent_sched_may_change = !sd->next_in_service ||
new_budget_triggers_change;
}
sd->next_in_service = next_in_service;
if (!next_in_service)
@ -877,15 +881,11 @@ void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
unsigned long time_ms)
{
struct bfq_entity *entity = &bfqq->entity;
int tot_serv_to_charge = entity->service;
unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout);
if (time_ms > 0 && time_ms < timeout_ms)
tot_serv_to_charge =
(bfqd->bfq_max_budget * time_ms) / timeout_ms;
if (tot_serv_to_charge < entity->service)
tot_serv_to_charge = entity->service;
unsigned long timeout_ms = jiffies_to_msecs(bfq_timeout);
unsigned long bounded_time_ms = min(time_ms, timeout_ms);
int serv_to_charge_for_time =
(bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms;
int tot_serv_to_charge = max(serv_to_charge_for_time, entity->service);
/* Increase budget to avoid inconsistencies */
if (tot_serv_to_charge > entity->budget)

View File

@ -1036,7 +1036,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
INIT_WORK(&q->timeout_work, NULL);
INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP
@ -2162,7 +2161,9 @@ static inline bool should_fail_request(struct hd_struct *part,
static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
{
if (part->policy && op_is_write(bio_op(bio))) {
const int op = bio_op(bio);
if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
char b[BDEVNAME_SIZE];
WARN_ONCE(1,

View File

@ -462,50 +462,6 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q)
blk_mq_sched_free_tags(set, hctx, i);
}
int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
{
struct elevator_queue *e = q->elevator;
int ret;
if (!e)
return 0;
ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
if (ret)
return ret;
if (e->type->ops.mq.init_hctx) {
ret = e->type->ops.mq.init_hctx(hctx, hctx_idx);
if (ret) {
blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
return ret;
}
}
blk_mq_debugfs_register_sched_hctx(q, hctx);
return 0;
}
void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
{
struct elevator_queue *e = q->elevator;
if (!e)
return;
blk_mq_debugfs_unregister_sched_hctx(hctx);
if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
e->type->ops.mq.exit_hctx(hctx, hctx_idx);
hctx->sched_data = NULL;
}
blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
}
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{
struct blk_mq_hw_ctx *hctx;

View File

@ -28,11 +28,6 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx);
void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx);
static inline bool
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
{

View File

@ -320,6 +320,18 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
struct blk_mq_hw_ctx *hctx;
int i;
/*
* __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
* queue_hw_ctx after freeze the queue. So we could use q_usage_counter
* to avoid race with it. __blk_mq_update_nr_hw_queues will users
* synchronize_rcu to ensure all of the users go out of the critical
* section below and see zeroed q_usage_counter.
*/
rcu_read_lock();
if (percpu_ref_is_zero(&q->q_usage_counter)) {
rcu_read_unlock();
return;
}
queue_for_each_hw_ctx(q, hctx, i) {
struct blk_mq_tags *tags = hctx->tags;
@ -335,7 +347,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
}
rcu_read_unlock();
}
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,

View File

@ -2145,8 +2145,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
if (set->ops->exit_request)
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
@ -2214,12 +2212,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
goto exit_hctx;
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
if (!hctx->fq)
goto sched_exit_hctx;
goto exit_hctx;
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
goto free_fq;
@ -2233,8 +2228,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
free_fq:
kfree(hctx->fq);
sched_exit_hctx:
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
@ -2896,10 +2889,81 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return ret;
}
/*
* request_queue and elevator_type pair.
* It is just used by __blk_mq_update_nr_hw_queues to cache
* the elevator_type associated with a request_queue.
*/
struct blk_mq_qe_pair {
struct list_head node;
struct request_queue *q;
struct elevator_type *type;
};
/*
* Cache the elevator_type in qe pair list and switch the
* io scheduler to 'none'
*/
static bool blk_mq_elv_switch_none(struct list_head *head,
struct request_queue *q)
{
struct blk_mq_qe_pair *qe;
if (!q->elevator)
return true;
qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
if (!qe)
return false;
INIT_LIST_HEAD(&qe->node);
qe->q = q;
qe->type = q->elevator->type;
list_add(&qe->node, head);
mutex_lock(&q->sysfs_lock);
/*
* After elevator_switch_mq, the previous elevator_queue will be
* released by elevator_release. The reference of the io scheduler
* module get by elevator_get will also be put. So we need to get
* a reference of the io scheduler module here to prevent it to be
* removed.
*/
__module_get(qe->type->elevator_owner);
elevator_switch_mq(q, NULL);
mutex_unlock(&q->sysfs_lock);
return true;
}
static void blk_mq_elv_switch_back(struct list_head *head,
struct request_queue *q)
{
struct blk_mq_qe_pair *qe;
struct elevator_type *t = NULL;
list_for_each_entry(qe, head, node)
if (qe->q == q) {
t = qe->type;
break;
}
if (!t)
return;
list_del(&qe->node);
kfree(qe);
mutex_lock(&q->sysfs_lock);
elevator_switch_mq(q, t);
mutex_unlock(&q->sysfs_lock);
}
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
int nr_hw_queues)
{
struct request_queue *q;
LIST_HEAD(head);
lockdep_assert_held(&set->tag_list_lock);
@ -2910,6 +2974,18 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_freeze_queue(q);
/*
* Sync with blk_mq_queue_tag_busy_iter.
*/
synchronize_rcu();
/*
* Switch IO scheduler to 'none', cleaning up the data associated
* with the previous scheduler. We will switch back once we are done
* updating the new sw to hw queue mappings.
*/
list_for_each_entry(q, &set->tag_list, tag_set_list)
if (!blk_mq_elv_switch_none(&head, q))
goto switch_back;
set->nr_hw_queues = nr_hw_queues;
blk_mq_update_queue_map(set);
@ -2918,6 +2994,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
blk_mq_queue_reinit(q);
}
switch_back:
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_elv_switch_back(&head, q);
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q);
}

View File

@ -576,12 +576,8 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
struct rq_wb *rwb = RQWB(rqos);
enum wbt_flags flags;
if (!rwb_enabled(rwb))
return;
flags = bio_to_wbt_flags(rwb, bio);
if (!wbt_should_throttle(rwb, bio)) {
if (!(flags & WBT_TRACKED)) {
if (flags & WBT_READ)
wb_timestamp(rwb, &rwb->last_issue);
return;

View File

@ -234,6 +234,8 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq
int elevator_init(struct request_queue *);
int elevator_init_mq(struct request_queue *q);
int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e);
void elevator_exit(struct request_queue *, struct elevator_queue *);
int elv_register_queue(struct request_queue *q);
void elv_unregister_queue(struct request_queue *q);
@ -297,7 +299,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int);
* b) the queue had IO stats enabled when this request was started, and
* c) it's a file system request
*/
static inline int blk_do_io_stat(struct request *rq)
static inline bool blk_do_io_stat(struct request *rq)
{
return rq->rq_disk &&
(rq->rq_flags & RQF_IO_STAT) &&

View File

@ -933,16 +933,13 @@ void elv_unregister(struct elevator_type *e)
}
EXPORT_SYMBOL_GPL(elv_unregister);
static int elevator_switch_mq(struct request_queue *q,
int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e)
{
int ret;
lockdep_assert_held(&q->sysfs_lock);
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
if (q->elevator) {
if (q->elevator->registered)
elv_unregister_queue(q);
@ -968,8 +965,6 @@ static int elevator_switch_mq(struct request_queue *q,
blk_add_trace_msg(q, "elv switch: none");
out:
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return ret;
}
@ -1021,8 +1016,17 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
lockdep_assert_held(&q->sysfs_lock);
if (q->mq_ops)
return elevator_switch_mq(q, new_e);
if (q->mq_ops) {
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
err = elevator_switch_mq(q, new_e);
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return err;
}
/*
* Turn on BYPASS and drain all requests w/ elevator private data.

View File

@ -2428,16 +2428,20 @@ static bool DAC960_V2_ReportDeviceConfiguration(DAC960_Controller_T
{
DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
unsigned char *ReadCacheStatus[] = { "Read Cache Disabled",
"Read Cache Enabled",
"Read Ahead Enabled",
"Intelligent Read Ahead Enabled",
"-", "-", "-", "-" };
unsigned char *WriteCacheStatus[] = { "Write Cache Disabled",
"Logical Device Read Only",
"Write Cache Enabled",
"Intelligent Write Cache Enabled",
"-", "-", "-", "-" };
static const unsigned char *ReadCacheStatus[] = {
"Read Cache Disabled",
"Read Cache Enabled",
"Read Ahead Enabled",
"Intelligent Read Ahead Enabled",
"-", "-", "-", "-"
};
static const unsigned char *WriteCacheStatus[] = {
"Write Cache Disabled",
"Logical Device Read Only",
"Write Cache Enabled",
"Intelligent Write Cache Enabled",
"-", "-", "-", "-"
};
unsigned char *GeometryTranslation;
if (LogicalDeviceInfo == NULL) continue;
switch (LogicalDeviceInfo->DriveGeometry)
@ -4339,14 +4343,16 @@ static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command)
static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command)
{
DAC960_Controller_T *Controller = Command->Controller;
unsigned char *SenseErrors[] = { "NO SENSE", "RECOVERED ERROR",
"NOT READY", "MEDIUM ERROR",
"HARDWARE ERROR", "ILLEGAL REQUEST",
"UNIT ATTENTION", "DATA PROTECT",
"BLANK CHECK", "VENDOR-SPECIFIC",
"COPY ABORTED", "ABORTED COMMAND",
"EQUAL", "VOLUME OVERFLOW",
"MISCOMPARE", "RESERVED" };
static const unsigned char *SenseErrors[] = {
"NO SENSE", "RECOVERED ERROR",
"NOT READY", "MEDIUM ERROR",
"HARDWARE ERROR", "ILLEGAL REQUEST",
"UNIT ATTENTION", "DATA PROTECT",
"BLANK CHECK", "VENDOR-SPECIFIC",
"COPY ABORTED", "ABORTED COMMAND",
"EQUAL", "VOLUME OVERFLOW",
"MISCOMPARE", "RESERVED"
};
unsigned char *CommandName = "UNKNOWN";
switch (Command->CommandType)
{

View File

@ -2740,6 +2740,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->write_congestion_on = write_congestion_on;
pd->write_congestion_off = write_congestion_off;
ret = -ENOMEM;
disk = alloc_disk(1);
if (!disk)
goto out_mem;

View File

@ -2,7 +2,7 @@
config BCACHE
tristate "Block device as cache"
select CRC64
---help---
help
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.
@ -11,7 +11,7 @@ config BCACHE
config BCACHE_DEBUG
bool "Bcache debugging"
depends on BCACHE
---help---
help
Don't select this option unless you're a developer
Enables extra debugging tools, allows expensive runtime checks to be
@ -21,7 +21,7 @@ config BCACHE_CLOSURES_DEBUG
bool "Debug closures"
depends on BCACHE
select DEBUG_FS
---help---
help
Keeps all active closures in a linked list and provides a debugfs
interface to list them, which makes it possible to see asynchronous
operations that get stuck.

View File

@ -87,8 +87,8 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
{
struct cache *ca;
struct bucket *b;
unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
unsigned i;
unsigned int next = c->nbuckets * c->sb.bucket_size / 1024;
unsigned int i;
int r;
atomic_sub(sectors, &c->rescale);
@ -169,7 +169,7 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
#define bucket_prio(b) \
({ \
unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
\
(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
})
@ -244,6 +244,7 @@ static void invalidate_buckets_random(struct cache *ca)
while (!fifo_full(&ca->free_inc)) {
size_t n;
get_random_bytes(&n, sizeof(n));
n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
@ -301,7 +302,7 @@ do { \
static int bch_allocator_push(struct cache *ca, long bucket)
{
unsigned i;
unsigned int i;
/* Prios/gens are actually the most important reserve */
if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
@ -385,7 +386,7 @@ out:
/* Allocation */
long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
{
DEFINE_WAIT(w);
struct bucket *b;
@ -421,7 +422,7 @@ out:
if (expensive_debug_checks(ca->set)) {
size_t iter;
long i;
unsigned j;
unsigned int j;
for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
@ -470,14 +471,14 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
void bch_bucket_free(struct cache_set *c, struct bkey *k)
{
unsigned i;
unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
__bch_bucket_free(PTR_CACHE(c, k, i),
PTR_BUCKET(c, k, i));
}
int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait)
{
int i;
@ -510,10 +511,11 @@ err:
return -1;
}
int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait)
{
int ret;
mutex_lock(&c->bucket_lock);
ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
mutex_unlock(&c->bucket_lock);
@ -524,8 +526,8 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
struct open_bucket {
struct list_head list;
unsigned last_write_point;
unsigned sectors_free;
unsigned int last_write_point;
unsigned int sectors_free;
BKEY_PADDED(key);
};
@ -556,7 +558,7 @@ struct open_bucket {
*/
static struct open_bucket *pick_data_bucket(struct cache_set *c,
const struct bkey *search,
unsigned write_point,
unsigned int write_point,
struct bkey *alloc)
{
struct open_bucket *ret, *ret_task = NULL;
@ -595,12 +597,16 @@ found:
*
* If s->writeback is true, will not fail.
*/
bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
unsigned write_point, unsigned write_prio, bool wait)
bool bch_alloc_sectors(struct cache_set *c,
struct bkey *k,
unsigned int sectors,
unsigned int write_point,
unsigned int write_prio,
bool wait)
{
struct open_bucket *b;
BKEY_PADDED(key) alloc;
unsigned i;
unsigned int i;
/*
* We might have to allocate a new bucket, which we can't do with a
@ -613,7 +619,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
spin_lock(&c->data_bucket_lock);
while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
unsigned watermark = write_prio
unsigned int watermark = write_prio
? RESERVE_MOVINGGC
: RESERVE_NONE;
@ -702,6 +708,7 @@ int bch_open_buckets_alloc(struct cache_set *c)
for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
if (!b)
return -ENOMEM;

View File

@ -252,7 +252,7 @@ struct bcache_device {
struct kobject kobj;
struct cache_set *c;
unsigned id;
unsigned int id;
#define BCACHEDEVNAME_SIZE 12
char name[BCACHEDEVNAME_SIZE];
@ -264,18 +264,19 @@ struct bcache_device {
#define BCACHE_DEV_UNLINK_DONE 2
#define BCACHE_DEV_WB_RUNNING 3
#define BCACHE_DEV_RATE_DW_RUNNING 4
unsigned nr_stripes;
unsigned stripe_size;
unsigned int nr_stripes;
unsigned int stripe_size;
atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes;
struct bio_set bio_split;
unsigned data_csum:1;
unsigned int data_csum:1;
int (*cache_miss)(struct btree *, struct search *,
struct bio *, unsigned);
int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
int (*cache_miss)(struct btree *b, struct search *s,
struct bio *bio, unsigned int sectors);
int (*ioctl)(struct bcache_device *d, fmode_t mode,
unsigned int cmd, unsigned long arg);
};
struct io {
@ -284,7 +285,7 @@ struct io {
struct list_head lru;
unsigned long jiffies;
unsigned sequential;
unsigned int sequential;
sector_t last;
};
@ -358,18 +359,18 @@ struct cached_dev {
struct cache_accounting accounting;
/* The rest of this all shows up in sysfs */
unsigned sequential_cutoff;
unsigned readahead;
unsigned int sequential_cutoff;
unsigned int readahead;
unsigned io_disable:1;
unsigned verify:1;
unsigned bypass_torture_test:1;
unsigned int io_disable:1;
unsigned int verify:1;
unsigned int bypass_torture_test:1;
unsigned partial_stripes_expensive:1;
unsigned writeback_metadata:1;
unsigned writeback_running:1;
unsigned int partial_stripes_expensive:1;
unsigned int writeback_metadata:1;
unsigned int writeback_running:1;
unsigned char writeback_percent;
unsigned writeback_delay;
unsigned int writeback_delay;
uint64_t writeback_rate_target;
int64_t writeback_rate_proportional;
@ -377,16 +378,16 @@ struct cached_dev {
int64_t writeback_rate_integral_scaled;
int32_t writeback_rate_change;
unsigned writeback_rate_update_seconds;
unsigned writeback_rate_i_term_inverse;
unsigned writeback_rate_p_term_inverse;
unsigned writeback_rate_minimum;
unsigned int writeback_rate_update_seconds;
unsigned int writeback_rate_i_term_inverse;
unsigned int writeback_rate_p_term_inverse;
unsigned int writeback_rate_minimum;
enum stop_on_failure stop_when_cache_set_failed;
#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
atomic_t io_errors;
unsigned error_limit;
unsigned offline_seconds;
unsigned int error_limit;
unsigned int offline_seconds;
char backing_dev_name[BDEVNAME_SIZE];
};
@ -447,7 +448,7 @@ struct cache {
* until a gc finishes - otherwise we could pointlessly burn a ton of
* cpu
*/
unsigned invalidate_needs_gc;
unsigned int invalidate_needs_gc;
bool discard; /* Get rid of? */
@ -472,7 +473,7 @@ struct gc_stat {
size_t nkeys;
uint64_t data; /* sectors */
unsigned in_use; /* percent */
unsigned int in_use; /* percent */
};
/*
@ -518,7 +519,7 @@ struct cache_set {
int caches_loaded;
struct bcache_device **devices;
unsigned devices_max_used;
unsigned int devices_max_used;
atomic_t attached_dev_nr;
struct list_head cached_devs;
uint64_t cached_dev_sectors;
@ -548,7 +549,7 @@ struct cache_set {
* Default number of pages for a new btree node - may be less than a
* full bucket
*/
unsigned btree_pages;
unsigned int btree_pages;
/*
* Lists of struct btrees; lru is the list for structs that have memory
@ -571,7 +572,7 @@ struct cache_set {
struct list_head btree_cache_freed;
/* Number of elements in btree_cache + btree_cache_freeable lists */
unsigned btree_cache_used;
unsigned int btree_cache_used;
/*
* If we need to allocate memory for a new btree node and that
@ -613,8 +614,8 @@ struct cache_set {
uint16_t min_prio;
/*
* max(gen - last_gc) for all buckets. When it gets too big we have to gc
* to keep gens from wrapping around.
* max(gen - last_gc) for all buckets. When it gets too big we have to
* gc to keep gens from wrapping around.
*/
uint8_t need_gc;
struct gc_stat gc_stats;
@ -649,7 +650,7 @@ struct cache_set {
struct mutex verify_lock;
#endif
unsigned nr_uuids;
unsigned int nr_uuids;
struct uuid_entry *uuids;
BKEY_PADDED(uuid_bucket);
struct closure uuid_write;
@ -670,12 +671,12 @@ struct cache_set {
struct journal journal;
#define CONGESTED_MAX 1024
unsigned congested_last_us;
unsigned int congested_last_us;
atomic_t congested;
/* The rest of this all shows up in sysfs */
unsigned congested_read_threshold_us;
unsigned congested_write_threshold_us;
unsigned int congested_read_threshold_us;
unsigned int congested_write_threshold_us;
struct time_stats btree_gc_time;
struct time_stats btree_split_time;
@ -694,16 +695,16 @@ struct cache_set {
ON_ERROR_PANIC,
} on_error;
#define DEFAULT_IO_ERROR_LIMIT 8
unsigned error_limit;
unsigned error_decay;
unsigned int error_limit;
unsigned int error_decay;
unsigned short journal_delay_ms;
bool expensive_debug_checks;
unsigned verify:1;
unsigned key_merging_disabled:1;
unsigned gc_always_rewrite:1;
unsigned shrinker_disabled:1;
unsigned copy_gc_enabled:1;
unsigned int verify:1;
unsigned int key_merging_disabled:1;
unsigned int gc_always_rewrite:1;
unsigned int shrinker_disabled:1;
unsigned int copy_gc_enabled:1;
#define BUCKET_HASH_BITS 12
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
@ -712,7 +713,7 @@ struct cache_set {
};
struct bbio {
unsigned submit_time_us;
unsigned int submit_time_us;
union {
struct bkey key;
uint64_t _pad[3];
@ -729,10 +730,10 @@ struct bbio {
#define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
#define btree_blocks(b) \
((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
#define btree_default_blocks(c) \
((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
@ -761,21 +762,21 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
static inline struct cache *PTR_CACHE(struct cache_set *c,
const struct bkey *k,
unsigned ptr)
unsigned int ptr)
{
return c->cache[PTR_DEV(k, ptr)];
}
static inline size_t PTR_BUCKET_NR(struct cache_set *c,
const struct bkey *k,
unsigned ptr)
unsigned int ptr)
{
return sector_to_bucket(c, PTR_OFFSET(k, ptr));
}
static inline struct bucket *PTR_BUCKET(struct cache_set *c,
const struct bkey *k,
unsigned ptr)
unsigned int ptr)
{
return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
}
@ -783,17 +784,18 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
static inline uint8_t gen_after(uint8_t a, uint8_t b)
{
uint8_t r = a - b;
return r > 128U ? 0 : r;
}
static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
unsigned i)
unsigned int i)
{
return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
}
static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
unsigned i)
unsigned int i)
{
return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
}
@ -879,16 +881,16 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
#define BUCKET_GC_GEN_MAX 96U
#define kobj_attribute_write(n, fn) \
static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn)
#define kobj_attribute_rw(n, show, store) \
static struct kobj_attribute ksysfs_##n = \
__ATTR(n, S_IWUSR|S_IRUSR, show, store)
__ATTR(n, 0600, show, store)
static inline void wake_up_allocators(struct cache_set *c)
{
struct cache *ca;
unsigned i;
unsigned int i;
for_each_cache(ca, c, i)
wake_up_process(ca->alloc_thread);
@ -924,40 +926,43 @@ static inline void wait_for_kthread_stop(void)
/* Forward declarations */
void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
blk_status_t, const char *);
void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
const char *);
void bch_bbio_free(struct bio *, struct cache_set *);
struct bio *bch_bbio_alloc(struct cache_set *);
void bch_count_io_errors(struct cache *ca, blk_status_t error,
int is_read, const char *m);
void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
blk_status_t error, const char *m);
void bch_bbio_endio(struct cache_set *c, struct bio *bio,
blk_status_t error, const char *m);
void bch_bbio_free(struct bio *bio, struct cache_set *c);
struct bio *bch_bbio_alloc(struct cache_set *c);
void __bch_submit_bbio(struct bio *, struct cache_set *);
void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
void bch_submit_bbio(struct bio *bio, struct cache_set *c,
struct bkey *k, unsigned int ptr);
uint8_t bch_inc_gen(struct cache *, struct bucket *);
void bch_rescale_priorities(struct cache_set *, int);
uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
void bch_rescale_priorities(struct cache_set *c, int sectors);
bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
void __bch_bucket_free(struct cache *, struct bucket *);
void bch_bucket_free(struct cache_set *, struct bkey *);
void __bch_bucket_free(struct cache *ca, struct bucket *b);
void bch_bucket_free(struct cache_set *c, struct bkey *k);
long bch_bucket_alloc(struct cache *, unsigned, bool);
int __bch_bucket_alloc_set(struct cache_set *, unsigned,
struct bkey *, int, bool);
int bch_bucket_alloc_set(struct cache_set *, unsigned,
struct bkey *, int, bool);
bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
unsigned, unsigned, bool);
long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait);
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait);
bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
unsigned int sectors, unsigned int write_point,
unsigned int write_prio, bool wait);
bool bch_cached_dev_error(struct cached_dev *dc);
__printf(2, 3)
bool bch_cache_set_error(struct cache_set *, const char *, ...);
bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
void bch_prio_write(struct cache *);
void bch_write_bdev_super(struct cached_dev *, struct closure *);
void bch_prio_write(struct cache *ca);
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
extern struct workqueue_struct *bcache_wq;
extern struct mutex bch_register_lock;
@ -969,30 +974,31 @@ extern struct kobj_type bch_cache_set_ktype;
extern struct kobj_type bch_cache_set_internal_ktype;
extern struct kobj_type bch_cache_ktype;
void bch_cached_dev_release(struct kobject *);
void bch_flash_dev_release(struct kobject *);
void bch_cache_set_release(struct kobject *);
void bch_cache_release(struct kobject *);
void bch_cached_dev_release(struct kobject *kobj);
void bch_flash_dev_release(struct kobject *kobj);
void bch_cache_set_release(struct kobject *kobj);
void bch_cache_release(struct kobject *kobj);
int bch_uuid_write(struct cache_set *);
void bcache_write_super(struct cache_set *);
int bch_uuid_write(struct cache_set *c);
void bcache_write_super(struct cache_set *c);
int bch_flash_dev_create(struct cache_set *c, uint64_t size);
int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
void bch_cached_dev_detach(struct cached_dev *);
void bch_cached_dev_run(struct cached_dev *);
void bcache_device_stop(struct bcache_device *);
int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
uint8_t *set_uuid);
void bch_cached_dev_detach(struct cached_dev *dc);
void bch_cached_dev_run(struct cached_dev *dc);
void bcache_device_stop(struct bcache_device *d);
void bch_cache_set_unregister(struct cache_set *);
void bch_cache_set_stop(struct cache_set *);
void bch_cache_set_unregister(struct cache_set *c);
void bch_cache_set_stop(struct cache_set *c);
struct cache_set *bch_cache_set_alloc(struct cache_sb *);
void bch_btree_cache_free(struct cache_set *);
int bch_btree_cache_alloc(struct cache_set *);
void bch_moving_init_cache_set(struct cache_set *);
int bch_open_buckets_alloc(struct cache_set *);
void bch_open_buckets_free(struct cache_set *);
struct cache_set *bch_cache_set_alloc(struct cache_sb *sb);
void bch_btree_cache_free(struct cache_set *c);
int bch_btree_cache_alloc(struct cache_set *c);
void bch_moving_init_cache_set(struct cache_set *c);
int bch_open_buckets_alloc(struct cache_set *c);
void bch_open_buckets_free(struct cache_set *c);
int bch_cache_allocator_start(struct cache *ca);

View File

@ -18,31 +18,31 @@
#ifdef CONFIG_BCACHE_DEBUG
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
{
struct bkey *k, *next;
for (k = i->start; k < bset_bkey_last(i); k = next) {
next = bkey_next(k);
printk(KERN_ERR "block %u key %u/%u: ", set,
(unsigned) ((u64 *) k - i->d), i->keys);
pr_err("block %u key %u/%u: ", set,
(unsigned int) ((u64 *) k - i->d), i->keys);
if (b->ops->key_dump)
b->ops->key_dump(b, k);
else
printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
pr_err("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
if (next < bset_bkey_last(i) &&
bkey_cmp(k, b->ops->is_extents ?
&START_KEY(next) : next) > 0)
printk(KERN_ERR "Key skipped backwards\n");
pr_err("Key skipped backwards\n");
}
}
void bch_dump_bucket(struct btree_keys *b)
{
unsigned i;
unsigned int i;
console_lock();
for (i = 0; i <= b->nsets; i++)
@ -53,7 +53,7 @@ void bch_dump_bucket(struct btree_keys *b)
int __bch_count_data(struct btree_keys *b)
{
unsigned ret = 0;
unsigned int ret = 0;
struct btree_iter iter;
struct bkey *k;
@ -128,7 +128,7 @@ static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
/* Keylists */
int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
{
size_t oldsize = bch_keylist_nkeys(l);
size_t newsize = oldsize + u64s;
@ -180,7 +180,7 @@ void bch_keylist_pop_front(struct keylist *l)
/* Key/pointer manipulation */
void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
unsigned i)
unsigned int i)
{
BUG_ON(i > KEY_PTRS(src));
@ -194,7 +194,7 @@ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
bool __bch_cut_front(const struct bkey *where, struct bkey *k)
{
unsigned i, len = 0;
unsigned int i, len = 0;
if (bkey_cmp(where, &START_KEY(k)) <= 0)
return false;
@ -214,7 +214,7 @@ bool __bch_cut_front(const struct bkey *where, struct bkey *k)
bool __bch_cut_back(const struct bkey *where, struct bkey *k)
{
unsigned len = 0;
unsigned int len = 0;
if (bkey_cmp(where, k) >= 0)
return false;
@ -240,9 +240,9 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k)
#define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
struct bkey_float {
unsigned exponent:BKEY_EXPONENT_BITS;
unsigned m:BKEY_MID_BITS;
unsigned mantissa:BKEY_MANTISSA_BITS;
unsigned int exponent:BKEY_EXPONENT_BITS;
unsigned int m:BKEY_MID_BITS;
unsigned int mantissa:BKEY_MANTISSA_BITS;
} __packed;
/*
@ -311,7 +311,9 @@ void bch_btree_keys_free(struct btree_keys *b)
}
EXPORT_SYMBOL(bch_btree_keys_free);
int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
int bch_btree_keys_alloc(struct btree_keys *b,
unsigned int page_order,
gfp_t gfp)
{
struct bset_tree *t = b->set;
@ -345,7 +347,7 @@ EXPORT_SYMBOL(bch_btree_keys_alloc);
void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
bool *expensive_debug_checks)
{
unsigned i;
unsigned int i;
b->ops = ops;
b->expensive_debug_checks = expensive_debug_checks;
@ -370,7 +372,7 @@ EXPORT_SYMBOL(bch_btree_keys_init);
* return array index next to j when does in-order traverse
* of a binary tree which is stored in a linear array
*/
static unsigned inorder_next(unsigned j, unsigned size)
static unsigned int inorder_next(unsigned int j, unsigned int size)
{
if (j * 2 + 1 < size) {
j = j * 2 + 1;
@ -387,7 +389,7 @@ static unsigned inorder_next(unsigned j, unsigned size)
* return array index previous to j when does in-order traverse
* of a binary tree which is stored in a linear array
*/
static unsigned inorder_prev(unsigned j, unsigned size)
static unsigned int inorder_prev(unsigned int j, unsigned int size)
{
if (j * 2 < size) {
j = j * 2;
@ -400,7 +402,8 @@ static unsigned inorder_prev(unsigned j, unsigned size)
return j;
}
/* I have no idea why this code works... and I'm the one who wrote it
/*
* I have no idea why this code works... and I'm the one who wrote it
*
* However, I do know what it does:
* Given a binary tree constructed in an array (i.e. how you normally implement
@ -413,10 +416,12 @@ static unsigned inorder_prev(unsigned j, unsigned size)
* extra is a function of size:
* extra = (size - rounddown_pow_of_two(size - 1)) << 1;
*/
static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
static unsigned int __to_inorder(unsigned int j,
unsigned int size,
unsigned int extra)
{
unsigned b = fls(j);
unsigned shift = fls(size - 1) - b;
unsigned int b = fls(j);
unsigned int shift = fls(size - 1) - b;
j ^= 1U << (b - 1);
j <<= 1;
@ -433,14 +438,16 @@ static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
* Return the cacheline index in bset_tree->data, where j is index
* from a linear array which stores the auxiliar binary tree
*/
static unsigned to_inorder(unsigned j, struct bset_tree *t)
static unsigned int to_inorder(unsigned int j, struct bset_tree *t)
{
return __to_inorder(j, t->size, t->extra);
}
static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
static unsigned int __inorder_to_tree(unsigned int j,
unsigned int size,
unsigned int extra)
{
unsigned shift;
unsigned int shift;
if (j > extra)
j += j - extra;
@ -457,7 +464,7 @@ static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
* Return an index from a linear array which stores the auxiliar binary
* tree, j is the cacheline index of t->data.
*/
static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t)
{
return __inorder_to_tree(j, t->size, t->extra);
}
@ -468,14 +475,15 @@ void inorder_test(void)
unsigned long done = 0;
ktime_t start = ktime_get();
for (unsigned size = 2;
for (unsigned int size = 2;
size < 65536000;
size++) {
unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
unsigned i = 1, j = rounddown_pow_of_two(size - 1);
unsigned int extra =
(size - rounddown_pow_of_two(size - 1)) << 1;
unsigned int i = 1, j = rounddown_pow_of_two(size - 1);
if (!(size % 4096))
printk(KERN_NOTICE "loop %u, %llu per us\n", size,
pr_notice("loop %u, %llu per us\n", size,
done / ktime_us_delta(ktime_get(), start));
while (1) {
@ -518,30 +526,31 @@ void inorder_test(void)
* of the previous key so we can walk backwards to it from t->tree[j]'s key.
*/
static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
unsigned offset)
static struct bkey *cacheline_to_bkey(struct bset_tree *t,
unsigned int cacheline,
unsigned int offset)
{
return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
}
static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
{
return ((void *) k - (void *) t->data) / BSET_CACHELINE;
}
static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
unsigned cacheline,
static unsigned int bkey_to_cacheline_offset(struct bset_tree *t,
unsigned int cacheline,
struct bkey *k)
{
return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
}
static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j)
{
return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
}
static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j)
{
return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
}
@ -550,7 +559,7 @@ static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
* For the write set - the one we're currently inserting keys into - we don't
* maintain a full search tree, we just keep a simple lookup table in t->prev.
*/
static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline)
{
return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
}
@ -576,14 +585,15 @@ static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
* See make_bfloat() to check when most significant bit of f->exponent
* is set or not.
*/
static inline unsigned bfloat_mantissa(const struct bkey *k,
static inline unsigned int bfloat_mantissa(const struct bkey *k,
struct bkey_float *f)
{
const uint64_t *p = &k->low - (f->exponent >> 6);
return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
}
static void make_bfloat(struct bset_tree *t, unsigned j)
static void make_bfloat(struct bset_tree *t, unsigned int j)
{
struct bkey_float *f = &t->tree[j];
struct bkey *m = tree_to_bkey(t, j);
@ -631,7 +641,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
{
if (t != b->set) {
unsigned j = roundup(t[-1].size,
unsigned int j = roundup(t[-1].size,
64 / sizeof(struct bkey_float));
t->tree = t[-1].tree + j;
@ -686,13 +696,13 @@ void bch_bset_build_written_tree(struct btree_keys *b)
{
struct bset_tree *t = bset_tree_last(b);
struct bkey *prev = NULL, *k = t->data->start;
unsigned j, cacheline = 1;
unsigned int j, cacheline = 1;
b->last_set_unwritten = 0;
bset_alloc_tree(b, t);
t->size = min_t(unsigned,
t->size = min_t(unsigned int,
bkey_to_cacheline(t, bset_bkey_last(t->data)),
b->set->tree + btree_keys_cachelines(b) - t->tree);
@ -732,7 +742,7 @@ EXPORT_SYMBOL(bch_bset_build_written_tree);
void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
{
struct bset_tree *t;
unsigned inorder, j = 1;
unsigned int inorder, j = 1;
for (t = b->set; t <= bset_tree_last(b); t++)
if (k < bset_bkey_last(t->data))
@ -779,14 +789,15 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
struct bset_tree *t,
struct bkey *k)
{
unsigned shift = bkey_u64s(k);
unsigned j = bkey_to_cacheline(t, k);
unsigned int shift = bkey_u64s(k);
unsigned int j = bkey_to_cacheline(t, k);
/* We're getting called from btree_split() or btree_gc, just bail out */
if (!t->size)
return;
/* k is the key we just inserted; we need to find the entry in the
/*
* k is the key we just inserted; we need to find the entry in the
* lookup table for the first key that is strictly greater than k:
* it's either k's cacheline or the next one
*/
@ -794,7 +805,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
table_to_bkey(t, j) <= k)
j++;
/* Adjust all the lookup table entries, and find a new key for any that
/*
* Adjust all the lookup table entries, and find a new key for any that
* have gotten too big
*/
for (; j < t->size; j++) {
@ -819,7 +831,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
k != bset_bkey_last(t->data);
k = bkey_next(k))
if (t->size == bkey_to_cacheline(t, k)) {
t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);
t->prev[t->size] =
bkey_to_cacheline_offset(t, t->size, k);
t->size++;
}
}
@ -867,10 +880,10 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where,
}
EXPORT_SYMBOL(bch_bset_insert);
unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bkey *replace_key)
{
unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
struct bset *i = bset_tree_last(b)->data;
struct bkey *m, *prev = NULL;
struct btree_iter iter;
@ -922,10 +935,10 @@ struct bset_search_iter {
static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
const struct bkey *search)
{
unsigned li = 0, ri = t->size;
unsigned int li = 0, ri = t->size;
while (li + 1 != ri) {
unsigned m = (li + ri) >> 1;
unsigned int m = (li + ri) >> 1;
if (bkey_cmp(table_to_bkey(t, m), search) > 0)
ri = m;
@ -944,7 +957,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
{
struct bkey *l, *r;
struct bkey_float *f;
unsigned inorder, j, n = 1;
unsigned int inorder, j, n = 1;
do {
/*
@ -958,7 +971,8 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
* p = 0;
* but a branch instruction is avoided.
*/
unsigned p = n << 4;
unsigned int p = n << 4;
p &= ((int) (p - t->size)) >> 31;
prefetch(&t->tree[p]);
@ -978,7 +992,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
* to work - that's done in make_bfloat()
*/
if (likely(f->exponent != 127))
n = j * 2 + (((unsigned)
n = j * 2 + (((unsigned int)
(f->mantissa -
bfloat_mantissa(search, f))) >> 31);
else
@ -1109,6 +1123,7 @@ static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
struct bset_tree *start)
{
struct bkey *ret = NULL;
iter->size = ARRAY_SIZE(iter->data);
iter->used = 0;
@ -1184,7 +1199,8 @@ void bch_bset_sort_state_free(struct bset_sort_state *state)
mempool_exit(&state->pool);
}
int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
int bch_bset_sort_state_init(struct bset_sort_state *state,
unsigned int page_order)
{
spin_lock_init(&state->time.lock);
@ -1237,7 +1253,7 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
}
static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
unsigned start, unsigned order, bool fixup,
unsigned int start, unsigned int order, bool fixup,
struct bset_sort_state *state)
{
uint64_t start_time;
@ -1288,7 +1304,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
bch_time_stats_update(&state->time, start_time);
}
void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
struct bset_sort_state *state)
{
size_t order = b->page_order, keys = 0;
@ -1298,7 +1314,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
if (start) {
unsigned i;
unsigned int i;
for (i = start; i <= b->nsets; i++)
keys += b->set[i].data->keys;
@ -1323,8 +1339,8 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
struct bset_sort_state *state)
{
uint64_t start_time = local_clock();
struct btree_iter iter;
bch_btree_iter_init(b, &iter, NULL);
btree_mergesort(b, new->set->data, &iter, false, true);
@ -1338,7 +1354,7 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
{
unsigned crit = SORT_CRIT;
unsigned int crit = SORT_CRIT;
int i;
/* Don't sort if nothing to do */
@ -1367,7 +1383,7 @@ EXPORT_SYMBOL(bch_btree_sort_lazy);
void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
{
unsigned i;
unsigned int i;
for (i = 0; i <= b->nsets; i++) {
struct bset_tree *t = &b->set[i];

View File

@ -163,10 +163,10 @@ struct bset_tree {
*/
/* size of the binary tree and prev array */
unsigned size;
unsigned int size;
/* function of size - precalculated for to_inorder() */
unsigned extra;
unsigned int extra;
/* copy of the last key in the set */
struct bkey end;
@ -187,18 +187,25 @@ struct bset_tree {
};
struct btree_keys_ops {
bool (*sort_cmp)(struct btree_iter_set,
struct btree_iter_set);
struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *);
bool (*insert_fixup)(struct btree_keys *, struct bkey *,
struct btree_iter *, struct bkey *);
bool (*key_invalid)(struct btree_keys *,
const struct bkey *);
bool (*key_bad)(struct btree_keys *, const struct bkey *);
bool (*key_merge)(struct btree_keys *,
struct bkey *, struct bkey *);
void (*key_to_text)(char *, size_t, const struct bkey *);
void (*key_dump)(struct btree_keys *, const struct bkey *);
bool (*sort_cmp)(struct btree_iter_set l,
struct btree_iter_set r);
struct bkey *(*sort_fixup)(struct btree_iter *iter,
struct bkey *tmp);
bool (*insert_fixup)(struct btree_keys *b,
struct bkey *insert,
struct btree_iter *iter,
struct bkey *replace_key);
bool (*key_invalid)(struct btree_keys *bk,
const struct bkey *k);
bool (*key_bad)(struct btree_keys *bk,
const struct bkey *k);
bool (*key_merge)(struct btree_keys *bk,
struct bkey *l, struct bkey *r);
void (*key_to_text)(char *buf,
size_t size,
const struct bkey *k);
void (*key_dump)(struct btree_keys *keys,
const struct bkey *k);
/*
* Only used for deciding whether to use START_KEY(k) or just the key
@ -211,7 +218,7 @@ struct btree_keys {
const struct btree_keys_ops *ops;
uint8_t page_order;
uint8_t nsets;
unsigned last_set_unwritten:1;
unsigned int last_set_unwritten:1;
bool *expensive_debug_checks;
/*
@ -239,12 +246,14 @@ static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
}
static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i)
static inline unsigned int bset_byte_offset(struct btree_keys *b,
struct bset *i)
{
return ((size_t) i) - ((size_t) b->set->data);
}
static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i)
static inline unsigned int bset_sector_offset(struct btree_keys *b,
struct bset *i)
{
return bset_byte_offset(b, i) >> 9;
}
@ -273,25 +282,27 @@ static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
}
static inline struct bset *bset_next_set(struct btree_keys *b,
unsigned block_bytes)
unsigned int block_bytes)
{
struct bset *i = bset_tree_last(b)->data;
return ((void *) i) + roundup(set_bytes(i), block_bytes);
}
void bch_btree_keys_free(struct btree_keys *);
int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t);
void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *,
bool *);
void bch_btree_keys_free(struct btree_keys *b);
int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order,
gfp_t gfp);
void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
bool *expensive_debug_checks);
void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
void bch_bset_build_written_tree(struct btree_keys *);
void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *);
void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *,
struct bkey *);
void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic);
void bch_bset_build_written_tree(struct btree_keys *b);
void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k);
bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r);
void bch_bset_insert(struct btree_keys *b, struct bkey *where,
struct bkey *insert);
unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bkey *replace_key);
enum {
BTREE_INSERT_STATUS_NO_INSERT = 0,
@ -313,18 +324,21 @@ struct btree_iter {
} data[MAX_BSETS];
};
typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *);
typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
struct bkey *bch_btree_iter_next(struct btree_iter *);
struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
struct btree_keys *, ptr_filter_fn);
struct bkey *bch_btree_iter_next(struct btree_iter *iter);
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
struct btree_keys *b,
ptr_filter_fn fn);
void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *,
struct bkey *);
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
struct bkey *end);
struct bkey *bch_btree_iter_init(struct btree_keys *b,
struct btree_iter *iter,
struct bkey *search);
struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *,
const struct bkey *);
struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
const struct bkey *search);
/*
* Returns the first key that is strictly greater than search
@ -349,21 +363,23 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
struct bset_sort_state {
mempool_t pool;
unsigned page_order;
unsigned crit_factor;
unsigned int page_order;
unsigned int crit_factor;
struct time_stats time;
};
void bch_bset_sort_state_free(struct bset_sort_state *);
int bch_bset_sort_state_init(struct bset_sort_state *, unsigned);
void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *);
void bch_btree_sort_into(struct btree_keys *, struct btree_keys *,
struct bset_sort_state *);
void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *,
struct bset_sort_state *);
void bch_btree_sort_partial(struct btree_keys *, unsigned,
struct bset_sort_state *);
void bch_bset_sort_state_free(struct bset_sort_state *state);
int bch_bset_sort_state_init(struct bset_sort_state *state,
unsigned int page_order);
void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state);
void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
struct bset_sort_state *state);
void bch_btree_sort_and_fix_extents(struct btree_keys *b,
struct btree_iter *iter,
struct bset_sort_state *state);
void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
struct bset_sort_state *state);
static inline void bch_btree_sort(struct btree_keys *b,
struct bset_sort_state *state)
@ -377,13 +393,13 @@ struct bset_stats {
size_t floats, failed;
};
void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
/* Bkey utility code */
#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
{
return bkey_idx(i->start, idx);
}
@ -401,10 +417,10 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
}
void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
unsigned);
bool __bch_cut_front(const struct bkey *, struct bkey *);
bool __bch_cut_back(const struct bkey *, struct bkey *);
void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
unsigned int i);
bool __bch_cut_front(const struct bkey *where, struct bkey *k);
bool __bch_cut_back(const struct bkey *where, struct bkey *k);
static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
{
@ -522,18 +538,20 @@ static inline size_t bch_keylist_bytes(struct keylist *l)
return bch_keylist_nkeys(l) * sizeof(uint64_t);
}
struct bkey *bch_keylist_pop(struct keylist *);
void bch_keylist_pop_front(struct keylist *);
int __bch_keylist_realloc(struct keylist *, unsigned);
struct bkey *bch_keylist_pop(struct keylist *l);
void bch_keylist_pop_front(struct keylist *l);
int __bch_keylist_realloc(struct keylist *l, unsigned int u64s);
/* Debug stuff */
#ifdef CONFIG_BCACHE_DEBUG
int __bch_count_data(struct btree_keys *);
void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...);
void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
void bch_dump_bucket(struct btree_keys *);
int __bch_count_data(struct btree_keys *b);
void __printf(2, 3) __bch_check_keys(struct btree_keys *b,
const char *fmt,
...);
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
void bch_dump_bucket(struct btree_keys *b);
#else
@ -541,7 +559,7 @@ static inline int __bch_count_data(struct btree_keys *b) { return -1; }
static inline void __printf(2, 3)
__bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
static inline void bch_dump_bucket(struct btree_keys *b) {}
void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
#endif

View File

@ -183,7 +183,7 @@ static void bch_btree_init_next(struct btree *b)
void bkey_put(struct cache_set *c, struct bkey *k)
{
unsigned i;
unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i))
@ -287,6 +287,7 @@ err:
static void btree_node_read_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
closure_put(cl);
}
@ -435,7 +436,10 @@ static void do_btree_node_write(struct btree *b)
continue_at(cl, btree_node_write_done, NULL);
} else {
/* No problem for multipage bvec since the bio is just allocated */
/*
* No problem for multipage bvec since the bio is
* just allocated
*/
b->bio->bi_vcnt = 0;
bch_bio_map(b->bio, i);
@ -479,7 +483,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
void bch_btree_node_write(struct btree *b, struct closure *parent)
{
unsigned nsets = b->keys.nsets;
unsigned int nsets = b->keys.nsets;
lockdep_assert_held(&b->lock);
@ -581,7 +585,7 @@ static void mca_bucket_free(struct btree *b)
list_move(&b->list, &b->c->btree_cache_freeable);
}
static unsigned btree_order(struct bkey *k)
static unsigned int btree_order(struct bkey *k)
{
return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
}
@ -589,7 +593,7 @@ static unsigned btree_order(struct bkey *k)
static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
{
if (!bch_btree_keys_alloc(&b->keys,
max_t(unsigned,
max_t(unsigned int,
ilog2(b->c->btree_pages),
btree_order(k)),
gfp)) {
@ -604,6 +608,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
struct bkey *k, gfp_t gfp)
{
struct btree *b = kzalloc(sizeof(struct btree), gfp);
if (!b)
return NULL;
@ -620,7 +625,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
return b;
}
static int mca_reap(struct btree *b, unsigned min_order, bool flush)
static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
{
struct closure cl;
@ -746,6 +751,7 @@ void bch_btree_cache_free(struct cache_set *c)
{
struct btree *b;
struct closure cl;
closure_init_stack(&cl);
if (c->shrink.list.next)
@ -786,7 +792,7 @@ void bch_btree_cache_free(struct cache_set *c)
int bch_btree_cache_alloc(struct cache_set *c)
{
unsigned i;
unsigned int i;
for (i = 0; i < mca_reserve(c); i++)
if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
@ -1124,6 +1130,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
struct btree_op *op)
{
struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
if (!IS_ERR_OR_NULL(n)) {
mutex_lock(&n->write_lock);
bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
@ -1136,7 +1143,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
static void make_btree_freeing_key(struct btree *b, struct bkey *k)
{
unsigned i;
unsigned int i;
mutex_lock(&b->c->bucket_lock);
@ -1157,7 +1164,7 @@ static int btree_check_reserve(struct btree *b, struct btree_op *op)
{
struct cache_set *c = b->c;
struct cache *ca;
unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
mutex_lock(&c->bucket_lock);
@ -1181,7 +1188,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
struct bkey *k)
{
uint8_t stale = 0;
unsigned i;
unsigned int i;
struct bucket *g;
/*
@ -1219,7 +1226,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
/* guard against overflow */
SET_GC_SECTORS_USED(g, min_t(unsigned,
SET_GC_SECTORS_USED(g, min_t(unsigned int,
GC_SECTORS_USED(g) + KEY_SIZE(k),
MAX_GC_SECTORS_USED));
@ -1233,7 +1240,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
{
unsigned i;
unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i) &&
@ -1259,7 +1266,7 @@ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{
uint8_t stale = 0;
unsigned keys = 0, good_keys = 0;
unsigned int keys = 0, good_keys = 0;
struct bkey *k;
struct btree_iter iter;
struct bset_tree *t;
@ -1302,16 +1309,18 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
struct gc_merge_info {
struct btree *b;
unsigned keys;
unsigned int keys;
};
static int bch_btree_insert_node(struct btree *, struct btree_op *,
struct keylist *, atomic_t *, struct bkey *);
static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
struct keylist *insert_keys,
atomic_t *journal_ref,
struct bkey *replace_key);
static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
struct gc_stat *gc, struct gc_merge_info *r)
{
unsigned i, nodes = 0, keys = 0, blocks;
unsigned int i, nodes = 0, keys = 0, blocks;
struct btree *new_nodes[GC_MERGE_NODES];
struct keylist keylist;
struct closure cl;
@ -1511,11 +1520,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
return -EINTR;
}
static unsigned btree_gc_count_keys(struct btree *b)
static unsigned int btree_gc_count_keys(struct btree *b)
{
struct bkey *k;
struct btree_iter iter;
unsigned ret = 0;
unsigned int ret = 0;
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
ret += bkey_u64s(k);
@ -1678,7 +1687,7 @@ static void btree_gc_start(struct cache_set *c)
{
struct cache *ca;
struct bucket *b;
unsigned i;
unsigned int i;
if (!c->gc_mark_valid)
return;
@ -1704,7 +1713,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
{
struct bucket *b;
struct cache *ca;
unsigned i;
unsigned int i;
mutex_lock(&c->bucket_lock);
@ -1722,7 +1731,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
struct bcache_device *d = c->devices[i];
struct cached_dev *dc;
struct keybuf_key *w, *n;
unsigned j;
unsigned int j;
if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
continue;
@ -1814,7 +1823,7 @@ static void bch_btree_gc(struct cache_set *c)
static bool gc_should_run(struct cache_set *c)
{
struct cache *ca;
unsigned i;
unsigned int i;
for_each_cache(ca, c, i)
if (ca->invalidate_needs_gc)
@ -1905,7 +1914,7 @@ void bch_initial_gc_finish(struct cache_set *c)
{
struct cache *ca;
struct bucket *b;
unsigned i;
unsigned int i;
bch_btree_gc_finish(c);
@ -1945,7 +1954,7 @@ void bch_initial_gc_finish(struct cache_set *c)
static bool btree_insert_key(struct btree *b, struct bkey *k,
struct bkey *replace_key)
{
unsigned status;
unsigned int status;
BUG_ON(bkey_cmp(k, &b->key) > 0);
@ -2044,7 +2053,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
if (split) {
unsigned keys = 0;
unsigned int keys = 0;
trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
@ -2222,10 +2231,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
rw_lock(true, b, b->level);
if (b->key.ptr[0] != btree_ptr ||
b->seq != seq + 1) {
b->seq != seq + 1) {
op->lock = b->level;
goto out;
}
}
}
SET_KEY_PTRS(check_key, 1);
@ -2300,7 +2309,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys,
void bch_btree_set_root(struct btree *b)
{
unsigned i;
unsigned int i;
struct closure cl;
closure_init_stack(&cl);
@ -2412,7 +2421,7 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
struct refill {
struct btree_op op;
unsigned nr_found;
unsigned int nr_found;
struct keybuf *buf;
struct bkey *end;
keybuf_pred_fn *pred;
@ -2488,6 +2497,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
if (!RB_EMPTY_ROOT(&buf->keys)) {
struct keybuf_key *w;
w = RB_FIRST(&buf->keys, struct keybuf_key, node);
buf->start = START_KEY(&w->key);
@ -2519,6 +2529,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
{
bool ret = false;
struct keybuf_key *p, *w, s;
s.key = *start;
if (bkey_cmp(end, &buf->start) <= 0 ||
@ -2545,6 +2556,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
{
struct keybuf_key *w;
spin_lock(&buf->lock);
w = RB_FIRST(&buf->keys, struct keybuf_key, node);

View File

@ -184,7 +184,7 @@ static inline struct bset *btree_bset_last(struct btree *b)
return bset_tree_last(&b->keys)->data;
}
static inline unsigned bset_block_offset(struct btree *b, struct bset *i)
static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
{
return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
}
@ -213,7 +213,7 @@ struct btree_op {
/* Btree level at which we start taking write locks */
short lock;
unsigned insert_collision:1;
unsigned int insert_collision:1;
};
static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
@ -238,26 +238,28 @@ static inline void rw_unlock(bool w, struct btree *b)
(w ? up_write : up_read)(&b->lock);
}
void bch_btree_node_read_done(struct btree *);
void __bch_btree_node_write(struct btree *, struct closure *);
void bch_btree_node_write(struct btree *, struct closure *);
void bch_btree_node_read_done(struct btree *b);
void __bch_btree_node_write(struct btree *b, struct closure *parent);
void bch_btree_node_write(struct btree *b, struct closure *parent);
void bch_btree_set_root(struct btree *);
struct btree *__bch_btree_node_alloc(struct cache_set *, struct btree_op *,
int, bool, struct btree *);
struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *,
struct bkey *, int, bool, struct btree *);
void bch_btree_set_root(struct btree *b);
struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
int level, bool wait,
struct btree *parent);
struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
struct bkey *k, int level, bool write,
struct btree *parent);
int bch_btree_insert_check_key(struct btree *, struct btree_op *,
struct bkey *);
int bch_btree_insert(struct cache_set *, struct keylist *,
atomic_t *, struct bkey *);
int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
struct bkey *check_key);
int bch_btree_insert(struct cache_set *c, struct keylist *keys,
atomic_t *journal_ref, struct bkey *replace_key);
int bch_gc_thread_start(struct cache_set *);
void bch_initial_gc_finish(struct cache_set *);
void bch_moving_gc(struct cache_set *);
int bch_btree_check(struct cache_set *);
void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
int bch_gc_thread_start(struct cache_set *c);
void bch_initial_gc_finish(struct cache_set *c);
void bch_moving_gc(struct cache_set *c);
int bch_btree_check(struct cache_set *c);
void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
static inline void wake_up_gc(struct cache_set *c)
{
@ -272,9 +274,9 @@ static inline void wake_up_gc(struct cache_set *c)
#define MAP_END_KEY 1
typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
struct bkey *, btree_map_nodes_fn *, int);
typedef int (btree_map_nodes_fn)(struct btree_op *b_op, struct btree *b);
int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_nodes_fn *fn, int flags);
static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_nodes_fn *fn)
@ -290,21 +292,23 @@ static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
}
typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
struct bkey *);
int bch_btree_map_keys(struct btree_op *, struct cache_set *,
struct bkey *, btree_map_keys_fn *, int);
typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b,
struct bkey *k);
int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_keys_fn *fn, int flags);
typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k);
void bch_keybuf_init(struct keybuf *);
void bch_refill_keybuf(struct cache_set *, struct keybuf *,
struct bkey *, keybuf_pred_fn *);
bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
struct bkey *);
void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
struct keybuf_key *bch_keybuf_next(struct keybuf *);
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
struct bkey *, keybuf_pred_fn *);
void bch_keybuf_init(struct keybuf *buf);
void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
struct bkey *end, keybuf_pred_fn *pred);
bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
struct bkey *end);
void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w);
struct keybuf_key *bch_keybuf_next(struct keybuf *buf);
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
struct keybuf *buf,
struct bkey *end,
keybuf_pred_fn *pred);
void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
#endif

View File

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Asynchronous refcounty things
*
@ -162,12 +163,13 @@ static struct dentry *closure_debug;
static int debug_seq_show(struct seq_file *f, void *data)
{
struct closure *cl;
spin_lock_irq(&closure_list_lock);
list_for_each_entry(cl, &closure_list, all) {
int r = atomic_read(&cl->remaining);
seq_printf(f, "%p: %pF -> %pf p %p r %i ",
seq_printf(f, "%p: %pS -> %pS p %p r %i ",
cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK);
@ -177,7 +179,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
r & CLOSURE_RUNNING ? "R" : "");
if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n",
seq_printf(f, " W %pS\n",
(void *) cl->waiting_on);
seq_printf(f, "\n");

View File

@ -159,7 +159,7 @@ struct closure {
#define CLOSURE_MAGIC_DEAD 0xc054dead
#define CLOSURE_MAGIC_ALIVE 0xc054a11e
unsigned magic;
unsigned int magic;
struct list_head all;
unsigned long ip;
unsigned long waiting_on;
@ -289,10 +289,12 @@ static inline void closure_init_stack(struct closure *cl)
}
/**
* closure_wake_up - wake up all closures on a wait list.
* closure_wake_up - wake up all closures on a wait list,
* with memory barrier
*/
static inline void closure_wake_up(struct closure_waitlist *list)
{
/* Memory barrier for the wait list */
smp_mb();
__closure_wake_up(list);
}

View File

@ -67,34 +67,35 @@ void bch_btree_verify(struct btree *b)
if (inmemory->keys != sorted->keys ||
memcmp(inmemory->start,
sorted->start,
(void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
(void *) bset_bkey_last(inmemory) -
(void *) inmemory->start)) {
struct bset *i;
unsigned j;
unsigned int j;
console_lock();
printk(KERN_ERR "*** in memory:\n");
pr_err("*** in memory:\n");
bch_dump_bset(&b->keys, inmemory, 0);
printk(KERN_ERR "*** read back in:\n");
pr_err("*** read back in:\n");
bch_dump_bset(&v->keys, sorted, 0);
for_each_written_bset(b, ondisk, i) {
unsigned block = ((void *) i - (void *) ondisk) /
unsigned int block = ((void *) i - (void *) ondisk) /
block_bytes(b->c);
printk(KERN_ERR "*** on disk block %u:\n", block);
pr_err("*** on disk block %u:\n", block);
bch_dump_bset(&b->keys, i, block);
}
printk(KERN_ERR "*** block %zu not written\n",
pr_err("*** block %zu not written\n",
((void *) i - (void *) ondisk) / block_bytes(b->c));
for (j = 0; j < inmemory->keys; j++)
if (inmemory->d[j] != sorted->d[j])
break;
printk(KERN_ERR "b->written %u\n", b->written);
pr_err("b->written %u\n", b->written);
console_unlock();
panic("verify failed at %u\n", j);
@ -176,9 +177,9 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
while (size) {
struct keybuf_key *w;
unsigned bytes = min(i->bytes, size);
unsigned int bytes = min(i->bytes, size);
int err = copy_to_user(buf, i->buf, bytes);
if (err)
return err;
@ -237,8 +238,8 @@ void bch_debug_init_cache_set(struct cache_set *c)
{
if (!IS_ERR_OR_NULL(bcache_debug)) {
char name[50];
snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
&cache_set_debug_ops);
}

View File

@ -8,8 +8,8 @@ struct cache_set;
#ifdef CONFIG_BCACHE_DEBUG
void bch_btree_verify(struct btree *);
void bch_data_verify(struct cached_dev *, struct bio *);
void bch_btree_verify(struct btree *b);
void bch_data_verify(struct cached_dev *dc, struct bio *bio);
#define expensive_debug_checks(c) ((c)->expensive_debug_checks)
#define key_merging_disabled(c) ((c)->key_merging_disabled)
@ -27,7 +27,7 @@ static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
#endif
#ifdef CONFIG_DEBUG_FS
void bch_debug_init_cache_set(struct cache_set *);
void bch_debug_init_cache_set(struct cache_set *c);
#else
static inline void bch_debug_init_cache_set(struct cache_set *c) {}
#endif

View File

@ -46,7 +46,7 @@ static bool bch_key_sort_cmp(struct btree_iter_set l,
static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
{
unsigned i;
unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) {
@ -67,7 +67,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
{
unsigned i;
unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) {
@ -96,7 +96,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
{
unsigned i = 0;
unsigned int i = 0;
char *out = buf, *end = buf + size;
#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
@ -126,22 +126,22 @@ void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
{
struct btree *b = container_of(keys, struct btree, keys);
unsigned j;
unsigned int j;
char buf[80];
bch_extent_to_text(buf, sizeof(buf), k);
printk(" %s", buf);
pr_err(" %s", buf);
for (j = 0; j < KEY_PTRS(k); j++) {
size_t n = PTR_BUCKET_NR(b->c, k, j);
printk(" bucket %zu", n);
pr_err(" bucket %zu", n);
if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
printk(" prio %i",
pr_err(" prio %i",
PTR_BUCKET(b->c, k, j)->prio);
}
printk(" %s\n", bch_ptr_status(b->c, k));
pr_err(" %s\n", bch_ptr_status(b->c, k));
}
/* Btree ptrs */
@ -166,12 +166,13 @@ bad:
static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
{
struct btree *b = container_of(bk, struct btree, keys);
return __bch_btree_ptr_invalid(b->c, k);
}
static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
{
unsigned i;
unsigned int i;
char buf[80];
struct bucket *g;
@ -204,7 +205,7 @@ err:
static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
{
struct btree *b = container_of(bk, struct btree, keys);
unsigned i;
unsigned int i;
if (!bkey_cmp(k, &ZERO_KEY) ||
!KEY_PTRS(k) ||
@ -327,13 +328,14 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
struct cache_set *c = container_of(b, struct btree, keys)->c;
uint64_t old_offset;
unsigned old_size, sectors_found = 0;
unsigned int old_size, sectors_found = 0;
BUG_ON(!KEY_OFFSET(insert));
BUG_ON(!KEY_SIZE(insert));
while (1) {
struct bkey *k = bch_btree_iter_next(iter);
if (!k)
break;
@ -363,7 +365,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
* k might have been split since we inserted/found the
* key we're replacing
*/
unsigned i;
unsigned int i;
uint64_t offset = KEY_START(k) -
KEY_START(replace_key);
@ -498,11 +500,12 @@ bad:
static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
{
struct btree *b = container_of(bk, struct btree, keys);
return __bch_extent_invalid(b->c, k);
}
static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
unsigned ptr)
unsigned int ptr)
{
struct bucket *g = PTR_BUCKET(b->c, k, ptr);
char buf[80];
@ -534,7 +537,7 @@ err:
static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
{
struct btree *b = container_of(bk, struct btree, keys);
unsigned i, stale;
unsigned int i, stale;
if (!KEY_PTRS(k) ||
bch_extent_invalid(bk, k))
@ -574,10 +577,12 @@ static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
~((uint64_t)1 << 63);
}
static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
static bool bch_extent_merge(struct btree_keys *bk,
struct bkey *l,
struct bkey *r)
{
struct btree *b = container_of(bk, struct btree, keys);
unsigned i;
unsigned int i;
if (key_merging_disabled(b->c))
return false;

View File

@ -8,8 +8,8 @@ extern const struct btree_keys_ops bch_extent_keys_ops;
struct bkey;
struct cache_set;
void bch_extent_to_text(char *, size_t, const struct bkey *);
bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *);
bool __bch_extent_invalid(struct cache_set *, const struct bkey *);
void bch_extent_to_text(char *buf, size_t size, const struct bkey *k);
bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k);
bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k);
#endif /* _BCACHE_EXTENTS_H */

View File

@ -17,6 +17,7 @@
void bch_bbio_free(struct bio *bio, struct cache_set *c)
{
struct bbio *b = container_of(bio, struct bbio, bio);
mempool_free(b, &c->bio_meta);
}
@ -42,9 +43,10 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
}
void bch_submit_bbio(struct bio *bio, struct cache_set *c,
struct bkey *k, unsigned ptr)
struct bkey *k, unsigned int ptr)
{
struct bbio *b = container_of(bio, struct bbio, bio);
bch_bkey_copy_single_ptr(&b->key, k, ptr);
__bch_submit_bbio(bio, c);
}
@ -52,7 +54,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
/* IO errors */
void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
{
unsigned errors;
unsigned int errors;
WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
@ -75,16 +77,16 @@ void bch_count_io_errors(struct cache *ca,
*/
if (ca->set->error_decay) {
unsigned count = atomic_inc_return(&ca->io_count);
unsigned int count = atomic_inc_return(&ca->io_count);
while (count > ca->set->error_decay) {
unsigned errors;
unsigned old = count;
unsigned new = count - ca->set->error_decay;
unsigned int errors;
unsigned int old = count;
unsigned int new = count - ca->set->error_decay;
/*
* First we subtract refresh from count; each time we
* succesfully do so, we rescale the errors once:
* successfully do so, we rescale the errors once:
*/
count = atomic_cmpxchg(&ca->io_count, old, new);
@ -104,7 +106,7 @@ void bch_count_io_errors(struct cache *ca,
}
if (error) {
unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
&ca->io_errors);
errors >>= IO_ERROR_SHIFT;
@ -126,18 +128,18 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
struct cache *ca = PTR_CACHE(c, &b->key, 0);
int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
unsigned threshold = op_is_write(bio_op(bio))
unsigned int threshold = op_is_write(bio_op(bio))
? c->congested_write_threshold_us
: c->congested_read_threshold_us;
if (threshold) {
unsigned t = local_clock_us();
unsigned int t = local_clock_us();
int us = t - b->submit_time_us;
int congested = atomic_read(&c->congested);
if (us > (int) threshold) {
int ms = us / 1024;
c->congested_last_us = t;
ms = min(ms, CONGESTED_MAX + congested);

View File

@ -28,11 +28,12 @@
static void journal_read_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
closure_put(cl);
}
static int journal_read_bucket(struct cache *ca, struct list_head *list,
unsigned bucket_index)
unsigned int bucket_index)
{
struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio;
@ -40,7 +41,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
struct journal_replay *i;
struct jset *j, *data = ca->set->journal.w[0].data;
struct closure cl;
unsigned len, left, offset = 0;
unsigned int len, left, offset = 0;
int ret = 0;
sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
@ -50,7 +51,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
while (offset < ca->sb.bucket_size) {
reread: left = ca->sb.bucket_size - offset;
len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
bio_reset(bio);
bio->bi_iter.bi_sector = bucket + offset;
@ -154,12 +155,12 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
})
struct cache *ca;
unsigned iter;
unsigned int iter;
for_each_cache(ca, c, iter) {
struct journal_device *ja = &ca->journal;
DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
unsigned i, l, r, m;
unsigned int i, l, r, m;
uint64_t seq;
bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
@ -192,7 +193,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
l < ca->sb.njournal_buckets;
l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets,
l + 1))
if (read_bucket(l))
goto bsearch;
@ -304,7 +306,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
k < bset_bkey_last(&i->j);
k = bkey_next(k))
if (!__bch_extent_invalid(c, k)) {
unsigned j;
unsigned int j;
for (j = 0; j < KEY_PTRS(k); j++)
if (ptr_available(c, k, j))
@ -492,7 +494,7 @@ static void journal_reclaim(struct cache_set *c)
struct bkey *k = &c->journal.key;
struct cache *ca;
uint64_t last_seq;
unsigned iter, n = 0;
unsigned int iter, n = 0;
atomic_t p __maybe_unused;
atomic_long_inc(&c->reclaim);
@ -526,7 +528,7 @@ static void journal_reclaim(struct cache_set *c)
for_each_cache(ca, c, iter) {
struct journal_device *ja = &ca->journal;
unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
/* No space available on this device */
if (next == ja->discard_idx)
@ -580,7 +582,7 @@ static void journal_write_endio(struct bio *bio)
closure_put(&w->c->journal.io);
}
static void journal_write(struct closure *);
static void journal_write(struct closure *cl);
static void journal_write_done(struct closure *cl)
{
@ -609,11 +611,12 @@ static void journal_write_unlocked(struct closure *cl)
struct cache *ca;
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
c->sb.block_size;
struct bio *bio;
struct bio_list list;
bio_list_init(&list);
if (!w->need_write) {
@ -705,7 +708,7 @@ static void journal_try_write(struct cache_set *c)
}
static struct journal_write *journal_wait_for_write(struct cache_set *c,
unsigned nkeys)
unsigned int nkeys)
__acquires(&c->journal.lock)
{
size_t sectors;

View File

@ -110,7 +110,7 @@ struct journal {
struct delayed_work work;
/* Number of blocks free in the bucket(s) we're currently writing to */
unsigned blocks_free;
unsigned int blocks_free;
uint64_t seq;
DECLARE_FIFO(atomic_t, pin);
@ -131,13 +131,13 @@ struct journal_device {
uint64_t seq[SB_JOURNAL_BUCKETS];
/* Journal bucket we're currently writing to */
unsigned cur_idx;
unsigned int cur_idx;
/* Last journal bucket that still contains an open journal entry */
unsigned last_idx;
unsigned int last_idx;
/* Next journal bucket to be discarded */
unsigned discard_idx;
unsigned int discard_idx;
#define DISCARD_READY 0
#define DISCARD_IN_FLIGHT 1
@ -167,14 +167,16 @@ struct cache_set;
struct btree_op;
struct keylist;
atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);
void bch_journal_next(struct journal *);
void bch_journal_mark(struct cache_set *, struct list_head *);
void bch_journal_meta(struct cache_set *, struct closure *);
int bch_journal_read(struct cache_set *, struct list_head *);
int bch_journal_replay(struct cache_set *, struct list_head *);
atomic_t *bch_journal(struct cache_set *c,
struct keylist *keys,
struct closure *parent);
void bch_journal_next(struct journal *j);
void bch_journal_mark(struct cache_set *c, struct list_head *list);
void bch_journal_meta(struct cache_set *c, struct closure *cl);
int bch_journal_read(struct cache_set *c, struct list_head *list);
int bch_journal_replay(struct cache_set *c, struct list_head *list);
void bch_journal_free(struct cache_set *);
int bch_journal_alloc(struct cache_set *);
void bch_journal_free(struct cache_set *c);
int bch_journal_alloc(struct cache_set *c);
#endif /* _BCACHE_JOURNAL_H */

View File

@ -23,7 +23,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
{
struct cache_set *c = container_of(buf, struct cache_set,
moving_gc_keys);
unsigned i;
unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i) &&
@ -38,6 +38,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
static void moving_io_destructor(struct closure *cl)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
kfree(io);
}
@ -186,9 +187,10 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
}
static unsigned bucket_heap_top(struct cache *ca)
static unsigned int bucket_heap_top(struct cache *ca)
{
struct bucket *b;
return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
}
@ -196,7 +198,7 @@ void bch_moving_gc(struct cache_set *c)
{
struct cache *ca;
struct bucket *b;
unsigned i;
unsigned int i;
if (!c->copy_gc_enabled)
return;
@ -204,9 +206,9 @@ void bch_moving_gc(struct cache_set *c)
mutex_lock(&c->bucket_lock);
for_each_cache(ca, c, i) {
unsigned sectors_to_move = 0;
unsigned reserve_sectors = ca->sb.bucket_size *
fifo_used(&ca->free[RESERVE_MOVINGGC]);
unsigned int sectors_to_move = 0;
unsigned int reserve_sectors = ca->sb.bucket_size *
fifo_used(&ca->free[RESERVE_MOVINGGC]);
ca->heap.used = 0;

View File

@ -25,9 +25,9 @@
struct kmem_cache *bch_search_cache;
static void bch_data_insert_start(struct closure *);
static void bch_data_insert_start(struct closure *cl);
static unsigned cache_mode(struct cached_dev *dc)
static unsigned int cache_mode(struct cached_dev *dc)
{
return BDEV_CACHE_MODE(&dc->sb);
}
@ -45,6 +45,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
bio_for_each_segment(bv, bio, iter) {
void *d = kmap(bv.bv_page) + bv.bv_offset;
csum = bch_crc64_update(csum, d, bv.bv_len);
kunmap(bv.bv_page);
}
@ -98,7 +99,7 @@ static void bch_data_insert_keys(struct closure *cl)
closure_return(cl);
}
static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
struct cache_set *c)
{
size_t oldsize = bch_keylist_nkeys(l);
@ -125,7 +126,7 @@ static void bch_data_invalidate(struct closure *cl)
bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
while (bio_sectors(bio)) {
unsigned sectors = min(bio_sectors(bio),
unsigned int sectors = min(bio_sectors(bio),
1U << (KEY_SIZE_BITS - 1));
if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
@ -135,7 +136,9 @@ static void bch_data_invalidate(struct closure *cl)
bio->bi_iter.bi_size -= sectors << 9;
bch_keylist_add(&op->insert_keys,
&KEY(op->inode, bio->bi_iter.bi_sector, sectors));
&KEY(op->inode,
bio->bi_iter.bi_sector,
sectors));
}
op->insert_data_done = true;
@ -151,7 +154,7 @@ static void bch_data_insert_error(struct closure *cl)
/*
* Our data write just errored, which means we've got a bunch of keys to
* insert that point to data that wasn't succesfully written.
* insert that point to data that wasn't successfully written.
*
* We don't have to insert those keys but we still have to invalidate
* that region of the cache - so, if we just strip off all the pointers
@ -211,7 +214,7 @@ static void bch_data_insert_start(struct closure *cl)
bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
do {
unsigned i;
unsigned int i;
struct bkey *k;
struct bio_set *split = &op->c->bio_split;
@ -328,7 +331,7 @@ void bch_data_insert(struct closure *cl)
/* Congested? */
unsigned bch_get_congested(struct cache_set *c)
unsigned int bch_get_congested(struct cache_set *c)
{
int i;
long rand;
@ -372,8 +375,8 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
{
struct cache_set *c = dc->disk.c;
unsigned mode = cache_mode(dc);
unsigned sectors, congested = bch_get_congested(c);
unsigned int mode = cache_mode(dc);
unsigned int sectors, congested = bch_get_congested(c);
struct task_struct *task = current;
struct io *i;
@ -469,11 +472,11 @@ struct search {
struct bio *cache_miss;
struct bcache_device *d;
unsigned insert_bio_sectors;
unsigned recoverable:1;
unsigned write:1;
unsigned read_dirty_data:1;
unsigned cache_missed:1;
unsigned int insert_bio_sectors;
unsigned int recoverable:1;
unsigned int write:1;
unsigned int read_dirty_data:1;
unsigned int cache_missed:1;
unsigned long start_time;
@ -514,20 +517,20 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
struct search *s = container_of(op, struct search, op);
struct bio *n, *bio = &s->bio.bio;
struct bkey *bio_key;
unsigned ptr;
unsigned int ptr;
if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
return MAP_CONTINUE;
if (KEY_INODE(k) != s->iop.inode ||
KEY_START(k) > bio->bi_iter.bi_sector) {
unsigned bio_sectors = bio_sectors(bio);
unsigned sectors = KEY_INODE(k) == s->iop.inode
unsigned int bio_sectors = bio_sectors(bio);
unsigned int sectors = KEY_INODE(k) == s->iop.inode
? min_t(uint64_t, INT_MAX,
KEY_START(k) - bio->bi_iter.bi_sector)
: INT_MAX;
int ret = s->d->cache_miss(b, s, bio, sectors);
if (ret != MAP_CONTINUE)
return ret;
@ -623,6 +626,7 @@ static void request_endio(struct bio *bio)
if (bio->bi_status) {
struct search *s = container_of(cl, struct search, cl);
s->iop.status = bio->bi_status;
/* Only cache read errors are recoverable */
s->recoverable = false;
@ -813,7 +817,8 @@ static void cached_dev_read_done(struct closure *cl)
if (s->iop.bio) {
bio_reset(s->iop.bio);
s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
s->iop.bio->bi_iter.bi_sector =
s->cache_miss->bi_iter.bi_sector;
bio_copy_dev(s->iop.bio, s->cache_miss);
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
bch_bio_map(s->iop.bio, NULL);
@ -856,10 +861,10 @@ static void cached_dev_read_done_bh(struct closure *cl)
}
static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors)
struct bio *bio, unsigned int sectors)
{
int ret = MAP_CONTINUE;
unsigned reada = 0;
unsigned int reada = 0;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio;
@ -1212,6 +1217,7 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
}
@ -1226,7 +1232,7 @@ static int cached_dev_congested(void *data, int bits)
return 1;
if (cached_dev_get(dc)) {
unsigned i;
unsigned int i;
struct cache *ca;
for_each_cache(ca, d->c, i) {
@ -1253,9 +1259,9 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
/* Flash backed devices */
static int flash_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors)
struct bio *bio, unsigned int sectors)
{
unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
swap(bio->bi_iter.bi_size, bytes);
zero_fill_bio(bio);
@ -1338,7 +1344,7 @@ static int flash_dev_congested(void *data, int bits)
struct bcache_device *d = data;
struct request_queue *q;
struct cache *ca;
unsigned i;
unsigned int i;
int ret = 0;
for_each_cache(ca, d->c, i) {
@ -1361,8 +1367,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
void bch_request_exit(void)
{
if (bch_search_cache)
kmem_cache_destroy(bch_search_cache);
kmem_cache_destroy(bch_search_cache);
}
int __init bch_request_init(void)

View File

@ -8,7 +8,7 @@ struct data_insert_op {
struct bio *bio;
struct workqueue_struct *wq;
unsigned inode;
unsigned int inode;
uint16_t write_point;
uint16_t write_prio;
blk_status_t status;
@ -17,15 +17,15 @@ struct data_insert_op {
uint16_t flags;
struct {
unsigned bypass:1;
unsigned writeback:1;
unsigned flush_journal:1;
unsigned csum:1;
unsigned int bypass:1;
unsigned int writeback:1;
unsigned int flush_journal:1;
unsigned int csum:1;
unsigned replace:1;
unsigned replace_collision:1;
unsigned int replace:1;
unsigned int replace_collision:1;
unsigned insert_data_done:1;
unsigned int insert_data_done:1;
};
};
@ -33,7 +33,7 @@ struct data_insert_op {
BKEY_PADDED(replace_key);
};
unsigned bch_get_congested(struct cache_set *);
unsigned int bch_get_congested(struct cache_set *c);
void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);

View File

@ -33,11 +33,11 @@
* stored left shifted by 16, and scaled back in the sysfs show() function.
*/
static const unsigned DAY_RESCALE = 288;
static const unsigned HOUR_RESCALE = 12;
static const unsigned FIVE_MINUTE_RESCALE = 1;
static const unsigned accounting_delay = (HZ * 300) / 22;
static const unsigned accounting_weight = 32;
static const unsigned int DAY_RESCALE = 288;
static const unsigned int HOUR_RESCALE = 12;
static const unsigned int FIVE_MINUTE_RESCALE = 1;
static const unsigned int accounting_delay = (HZ * 300) / 22;
static const unsigned int accounting_weight = 32;
/* sysfs reading/writing */
@ -152,7 +152,7 @@ static void scale_accounting(struct timer_list *t)
struct cache_accounting *acc = from_timer(acc, t, timer);
#define move_stat(name) do { \
unsigned t = atomic_xchg(&acc->collector.name, 0); \
unsigned int t = atomic_xchg(&acc->collector.name, 0); \
t <<= 16; \
acc->five_minute.name += t; \
acc->hour.name += t; \
@ -200,6 +200,7 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
bool hit, bool bypass)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
mark_cache_stats(&dc->accounting.collector, hit, bypass);
mark_cache_stats(&c->accounting.collector, hit, bypass);
}
@ -207,6 +208,7 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_readaheads);
atomic_inc(&c->accounting.collector.cache_readaheads);
}
@ -214,6 +216,7 @@ void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_miss_collisions);
atomic_inc(&c->accounting.collector.cache_miss_collisions);
}

View File

@ -23,7 +23,7 @@ struct cache_stats {
unsigned long cache_miss_collisions;
unsigned long sectors_bypassed;
unsigned rescale;
unsigned int rescale;
};
struct cache_accounting {
@ -53,10 +53,13 @@ void bch_cache_accounting_clear(struct cache_accounting *acc);
void bch_cache_accounting_destroy(struct cache_accounting *acc);
void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *,
bool, bool);
void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *);
void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *);
void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int);
void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
bool hit, bool bypass);
void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d);
void bch_mark_cache_miss_collision(struct cache_set *c,
struct bcache_device *d);
void bch_mark_sectors_bypassed(struct cache_set *c,
struct cached_dev *dc,
int sectors);
#endif /* _BCACHE_STATS_H_ */

View File

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* bcache setup/teardown code, and some metadata io - read a superblock and
* figure out what to do with it.
@ -61,7 +62,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
const char *err;
struct cache_sb *s;
struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
unsigned i;
unsigned int i;
if (!bh)
return "IO error";
@ -149,7 +150,8 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
goto err;
err = "Invalid superblock: device too small";
if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
if (get_capacity(bdev->bd_disk) <
sb->bucket_size * sb->nbuckets)
goto err;
err = "Bad UUID";
@ -202,7 +204,7 @@ static void write_bdev_super_endio(struct bio *bio)
static void __write_super(struct cache_sb *sb, struct bio *bio)
{
struct cache_sb *out = page_address(bio_first_page_all(bio));
unsigned i;
unsigned int i;
bio->bi_iter.bi_sector = SB_SECTOR;
bio->bi_iter.bi_size = SB_SIZE;
@ -282,7 +284,7 @@ void bcache_write_super(struct cache_set *c)
{
struct closure *cl = &c->sb_write;
struct cache *ca;
unsigned i;
unsigned int i;
down(&c->sb_write_mutex);
closure_init(cl, &c->cl);
@ -334,7 +336,7 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
{
struct closure *cl = &c->uuid_write;
struct uuid_entry *u;
unsigned i;
unsigned int i;
char buf[80];
BUG_ON(!parent);
@ -415,8 +417,8 @@ static int __uuid_write(struct cache_set *c)
{
BKEY_PADDED(key) k;
struct closure cl;
closure_init_stack(&cl);
closure_init_stack(&cl);
lockdep_assert_held(&bch_register_lock);
if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
@ -456,6 +458,7 @@ static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
static struct uuid_entry *uuid_find_empty(struct cache_set *c)
{
static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
return uuid_find(c, zero_uuid);
}
@ -463,8 +466,8 @@ static struct uuid_entry *uuid_find_empty(struct cache_set *c)
* Bucket priorities/gens:
*
* For each bucket, we store on disk its
* 8 bit gen
* 16 bit priority
* 8 bit gen
* 16 bit priority
*
* See alloc.c for an explanation of the gen. The priority is used to implement
* lru (and in the future other) cache replacement policies; for most purposes
@ -587,7 +590,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
struct prio_set *p = ca->disk_buckets;
struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
struct bucket *b;
unsigned bucket_nr = 0;
unsigned int bucket_nr = 0;
for (b = ca->buckets;
b < ca->buckets + ca->sb.nbuckets;
@ -599,7 +602,8 @@ static void prio_read(struct cache *ca, uint64_t bucket)
prio_io(ca, bucket, REQ_OP_READ, 0);
if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
if (p->csum !=
bch_crc64(&p->magic, bucket_bytes(ca) - 8))
pr_warn("bad csum reading priorities");
if (p->magic != pset_magic(&ca->sb))
@ -619,6 +623,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
static int open_dev(struct block_device *b, fmode_t mode)
{
struct bcache_device *d = b->bd_disk->private_data;
if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
return -ENXIO;
@ -629,6 +634,7 @@ static int open_dev(struct block_device *b, fmode_t mode)
static void release_dev(struct gendisk *b, fmode_t mode)
{
struct bcache_device *d = b->private_data;
closure_put(&d->cl);
}
@ -662,7 +668,7 @@ static void bcache_device_unlink(struct bcache_device *d)
lockdep_assert_held(&bch_register_lock);
if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
unsigned i;
unsigned int i;
struct cache *ca;
sysfs_remove_link(&d->c->kobj, d->name);
@ -676,7 +682,7 @@ static void bcache_device_unlink(struct bcache_device *d)
static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
const char *name)
{
unsigned i;
unsigned int i;
struct cache *ca;
for_each_cache(ca, d->c, i)
@ -715,7 +721,7 @@ static void bcache_device_detach(struct bcache_device *d)
}
static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
unsigned id)
unsigned int id)
{
d->id = id;
d->c = c;
@ -762,7 +768,7 @@ static void bcache_device_free(struct bcache_device *d)
closure_debug_destroy(&d->cl);
}
static int bcache_device_init(struct bcache_device *d, unsigned block_size,
static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
sector_t sectors)
{
struct request_queue *q;
@ -778,7 +784,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (!d->nr_stripes || d->nr_stripes > max_stripes) {
pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
(unsigned)d->nr_stripes);
(unsigned int)d->nr_stripes);
return -ENOMEM;
}
@ -919,6 +925,7 @@ void bch_cached_dev_run(struct cached_dev *dc)
if (!d->c &&
BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
struct closure cl;
closure_init_stack(&cl);
SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
@ -928,8 +935,10 @@ void bch_cached_dev_run(struct cached_dev *dc)
add_disk(d->disk);
bd_link_disk_holder(dc->bdev, dc->disk.disk);
/* won't show up in the uevent file, use udevadm monitor -e instead
* only class / kset properties are persistent */
/*
* won't show up in the uevent file, use udevadm monitor -e instead
* only class / kset properties are persistent
*/
kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
kfree(env[1]);
kfree(env[2]);
@ -976,6 +985,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
{
struct cached_dev *dc = container_of(w, struct cached_dev, detach);
struct closure cl;
closure_init_stack(&cl);
BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
@ -1097,12 +1107,14 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
}
}
/* Deadlocks since we're called via sysfs...
sysfs_remove_file(&dc->kobj, &sysfs_attach);
/*
* Deadlocks since we're called via sysfs...
* sysfs_remove_file(&dc->kobj, &sysfs_attach);
*/
if (bch_is_zero(u->uuid, 16)) {
struct closure cl;
closure_init_stack(&cl);
memcpy(u->uuid, dc->sb.uuid, 16);
@ -1124,11 +1136,11 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
list_move(&dc->list, &c->cached_devs);
calc_cached_dev_sectors(c);
smp_wmb();
/*
* dc->c must be set before dc->count != 0 - paired with the mb in
* cached_dev_get()
*/
smp_wmb();
refcount_set(&dc->count, 1);
/* Block writeback thread, but spawn it */
@ -1212,7 +1224,7 @@ static void cached_dev_flush(struct closure *cl)
continue_at(cl, cached_dev_free, system_wq);
}
static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
{
int ret;
struct io *io;
@ -1320,6 +1332,7 @@ void bch_flash_dev_release(struct kobject *kobj)
static void flash_dev_free(struct closure *cl)
{
struct bcache_device *d = container_of(cl, struct bcache_device, cl);
mutex_lock(&bch_register_lock);
atomic_long_sub(bcache_dev_sectors_dirty(d),
&d->c->flash_dev_dirty_sectors);
@ -1459,17 +1472,18 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
pr_info("CACHE_SET_IO_DISABLE already set");
/* XXX: we can be called from atomic context
acquire_console_sem();
*/
/*
* XXX: we can be called from atomic context
* acquire_console_sem();
*/
printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
pr_err("bcache: error on %pU: ", c->sb.set_uuid);
va_start(args, fmt);
vprintk(fmt, args);
va_end(args);
printk(", disabling caching\n");
pr_err(", disabling caching\n");
if (c->on_error == ON_ERROR_PANIC)
panic("panic forced after error\n");
@ -1481,6 +1495,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
void bch_cache_set_release(struct kobject *kobj)
{
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
kfree(c);
module_put(THIS_MODULE);
}
@ -1489,7 +1504,7 @@ static void cache_set_free(struct closure *cl)
{
struct cache_set *c = container_of(cl, struct cache_set, cl);
struct cache *ca;
unsigned i;
unsigned int i;
if (!IS_ERR_OR_NULL(c->debug))
debugfs_remove(c->debug);
@ -1532,7 +1547,7 @@ static void cache_set_flush(struct closure *cl)
struct cache_set *c = container_of(cl, struct cache_set, caching);
struct cache *ca;
struct btree *b;
unsigned i;
unsigned int i;
bch_cache_accounting_destroy(&c->accounting);
@ -1671,6 +1686,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
{
int iter_size;
struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
if (!c)
return NULL;
@ -1731,8 +1747,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) ||
mempool_init_slab_pool(&c->search, 32, bch_search_cache) ||
mempool_init_kmalloc_pool(&c->bio_meta, 2,
sizeof(struct bbio) + sizeof(struct bio_vec) *
bucket_pages(c)) ||
sizeof(struct bbio) + sizeof(struct bio_vec) *
bucket_pages(c)) ||
mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
@ -1762,7 +1778,7 @@ static void run_cache_set(struct cache_set *c)
struct cached_dev *dc, *t;
struct cache *ca;
struct closure cl;
unsigned i;
unsigned int i;
closure_init_stack(&cl);
@ -1804,7 +1820,9 @@ static void run_cache_set(struct cache_set *c)
goto err;
err = "error reading btree root";
c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL);
c->root = bch_btree_node_get(c, NULL, k,
j->btree_level,
true, NULL);
if (IS_ERR_OR_NULL(c->root))
goto err;
@ -1853,7 +1871,7 @@ static void run_cache_set(struct cache_set *c)
pr_notice("invalidating existing data");
for_each_cache(ca, c, i) {
unsigned j;
unsigned int j;
ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
2, SB_JOURNAL_BUCKETS);
@ -1998,7 +2016,7 @@ err:
void bch_cache_release(struct kobject *kobj)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
unsigned i;
unsigned int i;
if (ca->set) {
BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
@ -2098,7 +2116,9 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
goto err;
}
if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
if (kobject_add(&ca->kobj,
&part_to_dev(bdev->bd_part)->kobj,
"bcache")) {
err = "error calling kobject_add";
ret = -ENOMEM;
goto out;
@ -2127,13 +2147,14 @@ err:
/* Global interfaces/init */
static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
const char *, size_t);
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size);
kobj_attribute_write(register, register_bcache);
kobj_attribute_write(register_quiet, register_bcache);
static bool bch_is_open_backing(struct block_device *bdev) {
static bool bch_is_open_backing(struct block_device *bdev)
{
struct cache_set *c, *tc;
struct cached_dev *dc, *t;
@ -2147,10 +2168,11 @@ static bool bch_is_open_backing(struct block_device *bdev) {
return false;
}
static bool bch_is_open_cache(struct block_device *bdev) {
static bool bch_is_open_cache(struct block_device *bdev)
{
struct cache_set *c, *tc;
struct cache *ca;
unsigned i;
unsigned int i;
list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
for_each_cache(ca, c, i)
@ -2159,7 +2181,8 @@ static bool bch_is_open_cache(struct block_device *bdev) {
return false;
}
static bool bch_is_open(struct block_device *bdev) {
static bool bch_is_open(struct block_device *bdev)
{
return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
}
@ -2216,6 +2239,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
err = "failed to register device";
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
if (!dc)
goto err_close;
@ -2224,6 +2248,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
mutex_unlock(&bch_register_lock);
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
goto err_close;

View File

@ -130,8 +130,10 @@ rw_attribute(btree_shrinker_disabled);
rw_attribute(copy_gc_enabled);
rw_attribute(size);
static ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
size_t selected)
static ssize_t bch_snprint_string_list(char *buf,
size_t size,
const char * const list[],
size_t selected)
{
char *out = buf;
size_t i;
@ -148,7 +150,7 @@ SHOW(__bch_cached_dev)
{
struct cached_dev *dc = container_of(kobj, struct cached_dev,
disk.kobj);
const char *states[] = { "no cache", "clean", "dirty", "inconsistent" };
char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
int wb = dc->writeback_running;
#define var(stat) (dc->stat)
@ -307,7 +309,7 @@ STORE(__cached_dev)
if (v < 0)
return v;
if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) {
if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
SET_BDEV_CACHE_MODE(&dc->sb, v);
bch_write_bdev_super(dc, NULL);
}
@ -341,8 +343,9 @@ STORE(__cached_dev)
add_uevent_var(env, "DRIVER=bcache");
add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
add_uevent_var(env, "CACHED_LABEL=%s", buf);
kobject_uevent_env(
&disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
KOBJ_CHANGE,
env->envp);
kfree(env);
}
@ -459,6 +462,7 @@ STORE(__bch_flash_dev)
if (attr == &sysfs_size) {
uint64_t v;
strtoi_h_or_return(buf, v);
u->sectors = v >> 9;
@ -533,9 +537,9 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf)
op.stats.floats, op.stats.failed);
}
static unsigned bch_root_usage(struct cache_set *c)
static unsigned int bch_root_usage(struct cache_set *c)
{
unsigned bytes = 0;
unsigned int bytes = 0;
struct bkey *k;
struct btree *b;
struct btree_iter iter;
@ -570,9 +574,9 @@ static size_t bch_cache_size(struct cache_set *c)
return ret;
}
static unsigned bch_cache_max_chain(struct cache_set *c)
static unsigned int bch_cache_max_chain(struct cache_set *c)
{
unsigned ret = 0;
unsigned int ret = 0;
struct hlist_head *h;
mutex_lock(&c->bucket_lock);
@ -580,7 +584,7 @@ static unsigned bch_cache_max_chain(struct cache_set *c)
for (h = c->bucket_hash;
h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
h++) {
unsigned i = 0;
unsigned int i = 0;
struct hlist_node *p;
hlist_for_each(p, h)
@ -593,13 +597,13 @@ static unsigned bch_cache_max_chain(struct cache_set *c)
return ret;
}
static unsigned bch_btree_used(struct cache_set *c)
static unsigned int bch_btree_used(struct cache_set *c)
{
return div64_u64(c->gc_stats.key_bytes * 100,
(c->gc_stats.nodes ?: 1) * btree_bytes(c));
}
static unsigned bch_average_key_size(struct cache_set *c)
static unsigned int bch_average_key_size(struct cache_set *c)
{
return c->gc_stats.nkeys
? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
@ -703,6 +707,7 @@ STORE(__bch_cache_set)
if (attr == &sysfs_flash_vol_create) {
int r;
uint64_t v;
strtoi_h_or_return(buf, v);
r = bch_flash_dev_create(c, v);
@ -736,6 +741,7 @@ STORE(__bch_cache_set)
if (attr == &sysfs_prune_cache) {
struct shrink_control sc;
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
c->shrink.scan_objects(&c->shrink, &sc);
@ -789,12 +795,14 @@ STORE_LOCKED(bch_cache_set)
SHOW(bch_cache_set_internal)
{
struct cache_set *c = container_of(kobj, struct cache_set, internal);
return bch_cache_set_show(&c->kobj, attr, buf);
}
STORE(bch_cache_set_internal)
{
struct cache_set *c = container_of(kobj, struct cache_set, internal);
return bch_cache_set_store(&c->kobj, attr, buf, size);
}
@ -996,7 +1004,7 @@ STORE(__bch_cache)
if (v < 0)
return v;
if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) {
if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
mutex_lock(&ca->set->bucket_lock);
SET_CACHE_REPLACEMENT(&ca->sb, v);
mutex_unlock(&ca->set->bucket_lock);

View File

@ -44,9 +44,9 @@ STORE(fn) \
static struct attribute sysfs_##_name = \
{ .name = #_name, .mode = _mode }
#define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
#define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
#define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
#define write_attribute(n) __sysfs_attribute(n, 0200)
#define read_attribute(n) __sysfs_attribute(n, 0444)
#define rw_attribute(n) __sysfs_attribute(n, 0644)
#define sysfs_printf(file, fmt, ...) \
do { \

View File

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* random utiility code, for bcache but in theory not specific to bcache
*
@ -133,6 +134,7 @@ bool bch_is_zero(const char *p, size_t n)
int bch_parse_uuid(const char *s, char *uuid)
{
size_t i, j, x;
memset(uuid, 0, 16);
for (i = 0, j = 0;

View File

@ -289,10 +289,10 @@ do { \
#define ANYSINT_MAX(t) \
((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
int bch_strtoint_h(const char *, int *);
int bch_strtouint_h(const char *, unsigned int *);
int bch_strtoll_h(const char *, long long *);
int bch_strtoull_h(const char *, unsigned long long *);
int bch_strtoint_h(const char *cp, int *res);
int bch_strtouint_h(const char *cp, unsigned int *res);
int bch_strtoll_h(const char *cp, long long *res);
int bch_strtoull_h(const char *cp, unsigned long long *res);
static inline int bch_strtol_h(const char *cp, long *res)
{
@ -348,7 +348,7 @@ static inline int bch_strtoul_h(const char *cp, long *res)
snprintf(buf, size, \
__builtin_types_compatible_p(typeof(var), int) \
? "%i\n" : \
__builtin_types_compatible_p(typeof(var), unsigned) \
__builtin_types_compatible_p(typeof(var), unsigned int) \
? "%u\n" : \
__builtin_types_compatible_p(typeof(var), long) \
? "%li\n" : \
@ -380,7 +380,7 @@ struct time_stats {
void bch_time_stats_update(struct time_stats *stats, uint64_t time);
static inline unsigned local_clock_us(void)
static inline unsigned int local_clock_us(void)
{
return local_clock() >> 10;
}
@ -403,7 +403,8 @@ do { \
__print_time_stat(stats, name, \
average_duration, duration_units); \
sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \
div_u64((stats)->max_duration, NSEC_PER_ ## duration_units));\
div_u64((stats)->max_duration, \
NSEC_PER_ ## duration_units)); \
\
sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
? div_s64(local_clock() - (stats)->last, \
@ -560,9 +561,10 @@ static inline uint64_t bch_crc64_update(uint64_t crc,
}
/* Does linear interpolation between powers of two */
static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
static inline unsigned int fract_exp_two(unsigned int x,
unsigned int fract_bits)
{
unsigned fract = x & ~(~0 << fract_bits);
unsigned int fract = x & ~(~0 << fract_bits);
x >>= fract_bits;
x = 1 << x;

View File

@ -215,7 +215,8 @@ static void update_writeback_rate(struct work_struct *work)
smp_mb();
}
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
static unsigned int writeback_delay(struct cached_dev *dc,
unsigned int sectors)
{
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
!dc->writeback_percent)
@ -249,6 +250,7 @@ static void dirty_init(struct keybuf_key *w)
static void dirty_io_destructor(struct closure *cl)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
kfree(io);
}
@ -263,7 +265,7 @@ static void write_dirty_finish(struct closure *cl)
/* This is kind of a dumb way of signalling errors. */
if (KEY_DIRTY(&w->key)) {
int ret;
unsigned i;
unsigned int i;
struct keylist keys;
bch_keylist_init(&keys);
@ -377,7 +379,7 @@ static void read_dirty_submit(struct closure *cl)
static void read_dirty(struct cached_dev *dc)
{
unsigned delay = 0;
unsigned int delay = 0;
struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
size_t size;
int nk, i;
@ -442,7 +444,8 @@ static void read_dirty(struct cached_dev *dc)
io = kzalloc(sizeof(struct dirty_io) +
sizeof(struct bio_vec) *
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
DIV_ROUND_UP(KEY_SIZE(&w->key),
PAGE_SECTORS),
GFP_KERNEL);
if (!io)
goto err;
@ -465,7 +468,8 @@ static void read_dirty(struct cached_dev *dc)
down(&dc->in_flight);
/* We've acquired a semaphore for the maximum
/*
* We've acquired a semaphore for the maximum
* simultaneous number of writebacks; from here
* everything happens asynchronously.
*/
@ -498,11 +502,11 @@ err:
/* Scan for dirty data */
void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
uint64_t offset, int nr_sectors)
{
struct bcache_device *d = c->devices[inode];
unsigned stripe_offset, stripe, sectors_dirty;
unsigned int stripe_offset, stripe, sectors_dirty;
if (!d)
return;
@ -514,7 +518,7 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
stripe_offset = offset & (d->stripe_size - 1);
while (nr_sectors) {
int s = min_t(unsigned, abs(nr_sectors),
int s = min_t(unsigned int, abs(nr_sectors),
d->stripe_size - stripe_offset);
if (nr_sectors < 0)
@ -538,7 +542,9 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
static bool dirty_pred(struct keybuf *buf, struct bkey *k)
{
struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
struct cached_dev *dc = container_of(buf,
struct cached_dev,
writeback_keys);
BUG_ON(KEY_INODE(k) != dc->disk.id);
@ -548,7 +554,7 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
static void refill_full_stripes(struct cached_dev *dc)
{
struct keybuf *buf = &dc->writeback_keys;
unsigned start_stripe, stripe, next_stripe;
unsigned int start_stripe, stripe, next_stripe;
bool wrapped = false;
stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
@ -688,7 +694,7 @@ static int bch_writeback_thread(void *arg)
read_dirty(dc);
if (searched_full_index) {
unsigned delay = dc->writeback_delay * HZ;
unsigned int delay = dc->writeback_delay * HZ;
while (delay &&
!kthread_should_stop() &&
@ -712,7 +718,7 @@ static int bch_writeback_thread(void *arg)
struct sectors_dirty_init {
struct btree_op op;
unsigned inode;
unsigned int inode;
size_t count;
struct bkey start;
};

View File

@ -28,7 +28,7 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
static inline unsigned offset_to_stripe(struct bcache_device *d,
static inline unsigned int offset_to_stripe(struct bcache_device *d,
uint64_t offset)
{
do_div(offset, d->stripe_size);
@ -37,9 +37,9 @@ static inline unsigned offset_to_stripe(struct bcache_device *d,
static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
uint64_t offset,
unsigned nr_sectors)
unsigned int nr_sectors)
{
unsigned stripe = offset_to_stripe(&dc->disk, offset);
unsigned int stripe = offset_to_stripe(&dc->disk, offset);
while (1) {
if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
@ -54,9 +54,9 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
}
static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
unsigned cache_mode, bool would_skip)
unsigned int cache_mode, bool would_skip)
{
unsigned in_use = dc->disk.c->gc_stats.in_use;
unsigned int in_use = dc->disk.c->gc_stats.in_use;
if (cache_mode != CACHE_MODE_WRITEBACK ||
test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
@ -96,10 +96,11 @@ static inline void bch_writeback_add(struct cached_dev *dc)
}
}
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
uint64_t offset, int nr_sectors);
void bch_sectors_dirty_init(struct bcache_device *);
void bch_cached_dev_writeback_init(struct cached_dev *);
int bch_cached_dev_writeback_start(struct cached_dev *);
void bch_sectors_dirty_init(struct bcache_device *d);
void bch_cached_dev_writeback_init(struct cached_dev *dc);
int bch_cached_dev_writeback_start(struct cached_dev *dc);
#endif

View File

@ -30,10 +30,10 @@ struct bkey {
BITMASK(name, struct bkey, field, offset, size)
#define PTR_FIELD(name, offset, size) \
static inline __u64 name(const struct bkey *k, unsigned i) \
static inline __u64 name(const struct bkey *k, unsigned int i) \
{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
\
static inline void SET_##name(struct bkey *k, unsigned i, __u64 v) \
static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
{ \
k->ptr[i] &= ~(~(~0ULL << size) << offset); \
k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
@ -117,12 +117,14 @@ static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
static inline struct bkey *bkey_next(const struct bkey *k)
{
__u64 *d = (void *) k;
return (struct bkey *) (d + bkey_u64s(k));
}
static inline struct bkey *bkey_idx(const struct bkey *k, unsigned nr_keys)
static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
{
__u64 *d = (void *) k;
return (struct bkey *) (d + nr_keys);
}
/* Enough for a key with 6 pointers */

View File

@ -1829,6 +1829,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
mutex_lock(&q->blk_trace_mutex);
if (attr == &dev_attr_enable) {
if (!!value == !!q->blk_trace) {
ret = 0;
goto out_unlock_bdev;
}
if (value)
ret = blk_trace_setup_queue(q, bdev);
else