You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge tag 'for-4.19/post-20180822' of git://git.kernel.dk/linux-block
Pull more block updates from Jens Axboe: - Set of bcache fixes and changes (Coly) - The flush warn fix (me) - Small series of BFQ fixes (Paolo) - wbt hang fix (Ming) - blktrace fix (Steven) - blk-mq hardware queue count update fix (Jianchao) - Various little fixes * tag 'for-4.19/post-20180822' of git://git.kernel.dk/linux-block: (31 commits) block/DAC960.c: make some arrays static const, shrinks object size blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter blk-mq: init hctx sched after update ctx and hctx mapping block: remove duplicate initialization tracing/blktrace: Fix to allow setting same value pktcdvd: fix setting of 'ret' error return for a few cases block: change return type to bool block, bfq: return nbytes and not zero from struct cftype .write() method block, bfq: improve code of bfq_bfqq_charge_time block, bfq: reduce write overcharge block, bfq: always update the budget of an entity when needed block, bfq: readd missing reset of parent-entity service blk-wbt: fix IO hang in wbt_wait() block: don't warn for flush on read-only device bcache: add the missing comments for smp_mb()/smp_wmb() bcache: remove unnecessary space before ioctl function pointer arguments bcache: add missing SPDX header bcache: move open brace at end of function definitions to next line bcache: add static const prefix to char * array declarations bcache: fix code comments style ...
This commit is contained in:
+2
-1
@@ -913,7 +913,8 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
|
||||
ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
|
||||
return ret ?: nbytes;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_BLK_CGROUP
|
||||
|
||||
+40
-14
@@ -187,11 +187,25 @@ static const int bfq_stats_min_budgets = 194;
|
||||
static const int bfq_default_max_budget = 16 * 1024;
|
||||
|
||||
/*
|
||||
* Async to sync throughput distribution is controlled as follows:
|
||||
* when an async request is served, the entity is charged the number
|
||||
* of sectors of the request, multiplied by the factor below
|
||||
* When a sync request is dispatched, the queue that contains that
|
||||
* request, and all the ancestor entities of that queue, are charged
|
||||
* with the number of sectors of the request. In constrast, if the
|
||||
* request is async, then the queue and its ancestor entities are
|
||||
* charged with the number of sectors of the request, multiplied by
|
||||
* the factor below. This throttles the bandwidth for async I/O,
|
||||
* w.r.t. to sync I/O, and it is done to counter the tendency of async
|
||||
* writes to steal I/O throughput to reads.
|
||||
*
|
||||
* The current value of this parameter is the result of a tuning with
|
||||
* several hardware and software configurations. We tried to find the
|
||||
* lowest value for which writes do not cause noticeable problems to
|
||||
* reads. In fact, the lower this parameter, the stabler I/O control,
|
||||
* in the following respect. The lower this parameter is, the less
|
||||
* the bandwidth enjoyed by a group decreases
|
||||
* - when the group does writes, w.r.t. to when it does reads;
|
||||
* - when other groups do reads, w.r.t. to when they do writes.
|
||||
*/
|
||||
static const int bfq_async_charge_factor = 10;
|
||||
static const int bfq_async_charge_factor = 3;
|
||||
|
||||
/* Default timeout values, in jiffies, approximating CFQ defaults. */
|
||||
const int bfq_timeout = HZ / 8;
|
||||
@@ -853,16 +867,7 @@ static unsigned long bfq_serv_to_charge(struct request *rq,
|
||||
if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
|
||||
return blk_rq_sectors(rq);
|
||||
|
||||
/*
|
||||
* If there are no weight-raised queues, then amplify service
|
||||
* by just the async charge factor; otherwise amplify service
|
||||
* by twice the async charge factor, to further reduce latency
|
||||
* for weight-raised queues.
|
||||
*/
|
||||
if (bfqq->bfqd->wr_busy_queues == 0)
|
||||
return blk_rq_sectors(rq) * bfq_async_charge_factor;
|
||||
|
||||
return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
|
||||
return blk_rq_sectors(rq) * bfq_async_charge_factor;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3298,6 +3303,27 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
|
||||
*/
|
||||
} else
|
||||
entity->service = 0;
|
||||
|
||||
/*
|
||||
* Reset the received-service counter for every parent entity.
|
||||
* Differently from what happens with bfqq->entity.service,
|
||||
* the resetting of this counter never needs to be postponed
|
||||
* for parent entities. In fact, in case bfqq may have a
|
||||
* chance to go on being served using the last, partially
|
||||
* consumed budget, bfqq->entity.service needs to be kept,
|
||||
* because if bfqq then actually goes on being served using
|
||||
* the same budget, the last value of bfqq->entity.service is
|
||||
* needed to properly decrement bfqq->entity.budget by the
|
||||
* portion already consumed. In contrast, it is not necessary
|
||||
* to keep entity->service for parent entities too, because
|
||||
* the bubble up of the new value of bfqq->entity.budget will
|
||||
* make sure that the budgets of parent entities are correct,
|
||||
* even in case bfqq and thus parent entities go on receiving
|
||||
* service with the same budget.
|
||||
*/
|
||||
entity = entity->parent;
|
||||
for_each_entity(entity)
|
||||
entity->service = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
+11
-11
@@ -130,10 +130,14 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
|
||||
if (!change_without_lookup) /* lookup needed */
|
||||
next_in_service = bfq_lookup_next_entity(sd, expiration);
|
||||
|
||||
if (next_in_service)
|
||||
parent_sched_may_change = !sd->next_in_service ||
|
||||
if (next_in_service) {
|
||||
bool new_budget_triggers_change =
|
||||
bfq_update_parent_budget(next_in_service);
|
||||
|
||||
parent_sched_may_change = !sd->next_in_service ||
|
||||
new_budget_triggers_change;
|
||||
}
|
||||
|
||||
sd->next_in_service = next_in_service;
|
||||
|
||||
if (!next_in_service)
|
||||
@@ -877,15 +881,11 @@ void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
unsigned long time_ms)
|
||||
{
|
||||
struct bfq_entity *entity = &bfqq->entity;
|
||||
int tot_serv_to_charge = entity->service;
|
||||
unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout);
|
||||
|
||||
if (time_ms > 0 && time_ms < timeout_ms)
|
||||
tot_serv_to_charge =
|
||||
(bfqd->bfq_max_budget * time_ms) / timeout_ms;
|
||||
|
||||
if (tot_serv_to_charge < entity->service)
|
||||
tot_serv_to_charge = entity->service;
|
||||
unsigned long timeout_ms = jiffies_to_msecs(bfq_timeout);
|
||||
unsigned long bounded_time_ms = min(time_ms, timeout_ms);
|
||||
int serv_to_charge_for_time =
|
||||
(bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms;
|
||||
int tot_serv_to_charge = max(serv_to_charge_for_time, entity->service);
|
||||
|
||||
/* Increase budget to avoid inconsistencies */
|
||||
if (tot_serv_to_charge > entity->budget)
|
||||
|
||||
+3
-2
@@ -1036,7 +1036,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
|
||||
laptop_mode_timer_fn, 0);
|
||||
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
|
||||
INIT_WORK(&q->timeout_work, NULL);
|
||||
INIT_LIST_HEAD(&q->queue_head);
|
||||
INIT_LIST_HEAD(&q->timeout_list);
|
||||
INIT_LIST_HEAD(&q->icq_list);
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
@@ -2162,7 +2161,9 @@ static inline bool should_fail_request(struct hd_struct *part,
|
||||
|
||||
static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
|
||||
{
|
||||
if (part->policy && op_is_write(bio_op(bio))) {
|
||||
const int op = bio_op(bio);
|
||||
|
||||
if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
WARN_ONCE(1,
|
||||
|
||||
@@ -462,50 +462,6 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q)
|
||||
blk_mq_sched_free_tags(set, hctx, i);
|
||||
}
|
||||
|
||||
int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
||||
unsigned int hctx_idx)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
int ret;
|
||||
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (e->type->ops.mq.init_hctx) {
|
||||
ret = e->type->ops.mq.init_hctx(hctx, hctx_idx);
|
||||
if (ret) {
|
||||
blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
blk_mq_debugfs_register_sched_hctx(q, hctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
||||
unsigned int hctx_idx)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (!e)
|
||||
return;
|
||||
|
||||
blk_mq_debugfs_unregister_sched_hctx(hctx);
|
||||
|
||||
if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
|
||||
e->type->ops.mq.exit_hctx(hctx, hctx_idx);
|
||||
hctx->sched_data = NULL;
|
||||
}
|
||||
|
||||
blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
|
||||
}
|
||||
|
||||
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
|
||||
@@ -28,11 +28,6 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
|
||||
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
|
||||
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
|
||||
|
||||
int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
||||
unsigned int hctx_idx);
|
||||
void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
|
||||
unsigned int hctx_idx);
|
||||
|
||||
static inline bool
|
||||
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
|
||||
+13
-1
@@ -320,6 +320,18 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
|
||||
* queue_hw_ctx after freeze the queue. So we could use q_usage_counter
|
||||
* to avoid race with it. __blk_mq_update_nr_hw_queues will users
|
||||
* synchronize_rcu to ensure all of the users go out of the critical
|
||||
* section below and see zeroed q_usage_counter.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
if (percpu_ref_is_zero(&q->q_usage_counter)) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
struct blk_mq_tags *tags = hctx->tags;
|
||||
@@ -335,7 +347,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
||||
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
|
||||
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
|
||||
|
||||
+88
-8
@@ -2145,8 +2145,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
|
||||
if (set->ops->exit_request)
|
||||
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
|
||||
|
||||
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
|
||||
|
||||
if (set->ops->exit_hctx)
|
||||
set->ops->exit_hctx(hctx, hctx_idx);
|
||||
|
||||
@@ -2214,12 +2212,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
||||
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
|
||||
goto free_bitmap;
|
||||
|
||||
if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
|
||||
goto exit_hctx;
|
||||
|
||||
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
|
||||
if (!hctx->fq)
|
||||
goto sched_exit_hctx;
|
||||
goto exit_hctx;
|
||||
|
||||
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
|
||||
goto free_fq;
|
||||
@@ -2233,8 +2228,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
||||
|
||||
free_fq:
|
||||
kfree(hctx->fq);
|
||||
sched_exit_hctx:
|
||||
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
|
||||
exit_hctx:
|
||||
if (set->ops->exit_hctx)
|
||||
set->ops->exit_hctx(hctx, hctx_idx);
|
||||
@@ -2896,10 +2889,81 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* request_queue and elevator_type pair.
|
||||
* It is just used by __blk_mq_update_nr_hw_queues to cache
|
||||
* the elevator_type associated with a request_queue.
|
||||
*/
|
||||
struct blk_mq_qe_pair {
|
||||
struct list_head node;
|
||||
struct request_queue *q;
|
||||
struct elevator_type *type;
|
||||
};
|
||||
|
||||
/*
|
||||
* Cache the elevator_type in qe pair list and switch the
|
||||
* io scheduler to 'none'
|
||||
*/
|
||||
static bool blk_mq_elv_switch_none(struct list_head *head,
|
||||
struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_qe_pair *qe;
|
||||
|
||||
if (!q->elevator)
|
||||
return true;
|
||||
|
||||
qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
|
||||
if (!qe)
|
||||
return false;
|
||||
|
||||
INIT_LIST_HEAD(&qe->node);
|
||||
qe->q = q;
|
||||
qe->type = q->elevator->type;
|
||||
list_add(&qe->node, head);
|
||||
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
/*
|
||||
* After elevator_switch_mq, the previous elevator_queue will be
|
||||
* released by elevator_release. The reference of the io scheduler
|
||||
* module get by elevator_get will also be put. So we need to get
|
||||
* a reference of the io scheduler module here to prevent it to be
|
||||
* removed.
|
||||
*/
|
||||
__module_get(qe->type->elevator_owner);
|
||||
elevator_switch_mq(q, NULL);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void blk_mq_elv_switch_back(struct list_head *head,
|
||||
struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_qe_pair *qe;
|
||||
struct elevator_type *t = NULL;
|
||||
|
||||
list_for_each_entry(qe, head, node)
|
||||
if (qe->q == q) {
|
||||
t = qe->type;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!t)
|
||||
return;
|
||||
|
||||
list_del(&qe->node);
|
||||
kfree(qe);
|
||||
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
elevator_switch_mq(q, t);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
}
|
||||
|
||||
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||
int nr_hw_queues)
|
||||
{
|
||||
struct request_queue *q;
|
||||
LIST_HEAD(head);
|
||||
|
||||
lockdep_assert_held(&set->tag_list_lock);
|
||||
|
||||
@@ -2910,6 +2974,18 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||
|
||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||
blk_mq_freeze_queue(q);
|
||||
/*
|
||||
* Sync with blk_mq_queue_tag_busy_iter.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
/*
|
||||
* Switch IO scheduler to 'none', cleaning up the data associated
|
||||
* with the previous scheduler. We will switch back once we are done
|
||||
* updating the new sw to hw queue mappings.
|
||||
*/
|
||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||
if (!blk_mq_elv_switch_none(&head, q))
|
||||
goto switch_back;
|
||||
|
||||
set->nr_hw_queues = nr_hw_queues;
|
||||
blk_mq_update_queue_map(set);
|
||||
@@ -2918,6 +2994,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
|
||||
blk_mq_queue_reinit(q);
|
||||
}
|
||||
|
||||
switch_back:
|
||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||
blk_mq_elv_switch_back(&head, q);
|
||||
|
||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||
blk_mq_unfreeze_queue(q);
|
||||
}
|
||||
|
||||
+1
-5
@@ -576,12 +576,8 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
|
||||
struct rq_wb *rwb = RQWB(rqos);
|
||||
enum wbt_flags flags;
|
||||
|
||||
if (!rwb_enabled(rwb))
|
||||
return;
|
||||
|
||||
flags = bio_to_wbt_flags(rwb, bio);
|
||||
|
||||
if (!wbt_should_throttle(rwb, bio)) {
|
||||
if (!(flags & WBT_TRACKED)) {
|
||||
if (flags & WBT_READ)
|
||||
wb_timestamp(rwb, &rwb->last_issue);
|
||||
return;
|
||||
|
||||
+3
-1
@@ -234,6 +234,8 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq
|
||||
|
||||
int elevator_init(struct request_queue *);
|
||||
int elevator_init_mq(struct request_queue *q);
|
||||
int elevator_switch_mq(struct request_queue *q,
|
||||
struct elevator_type *new_e);
|
||||
void elevator_exit(struct request_queue *, struct elevator_queue *);
|
||||
int elv_register_queue(struct request_queue *q);
|
||||
void elv_unregister_queue(struct request_queue *q);
|
||||
@@ -297,7 +299,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int);
|
||||
* b) the queue had IO stats enabled when this request was started, and
|
||||
* c) it's a file system request
|
||||
*/
|
||||
static inline int blk_do_io_stat(struct request *rq)
|
||||
static inline bool blk_do_io_stat(struct request *rq)
|
||||
{
|
||||
return rq->rq_disk &&
|
||||
(rq->rq_flags & RQF_IO_STAT) &&
|
||||
|
||||
+12
-8
@@ -933,16 +933,13 @@ void elv_unregister(struct elevator_type *e)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(elv_unregister);
|
||||
|
||||
static int elevator_switch_mq(struct request_queue *q,
|
||||
int elevator_switch_mq(struct request_queue *q,
|
||||
struct elevator_type *new_e)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&q->sysfs_lock);
|
||||
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
|
||||
if (q->elevator) {
|
||||
if (q->elevator->registered)
|
||||
elv_unregister_queue(q);
|
||||
@@ -968,8 +965,6 @@ static int elevator_switch_mq(struct request_queue *q,
|
||||
blk_add_trace_msg(q, "elv switch: none");
|
||||
|
||||
out:
|
||||
blk_mq_unquiesce_queue(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1021,8 +1016,17 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
|
||||
|
||||
lockdep_assert_held(&q->sysfs_lock);
|
||||
|
||||
if (q->mq_ops)
|
||||
return elevator_switch_mq(q, new_e);
|
||||
if (q->mq_ops) {
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
|
||||
err = elevator_switch_mq(q, new_e);
|
||||
|
||||
blk_mq_unquiesce_queue(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Turn on BYPASS and drain all requests w/ elevator private data.
|
||||
|
||||
+24
-18
@@ -2428,16 +2428,20 @@ static bool DAC960_V2_ReportDeviceConfiguration(DAC960_Controller_T
|
||||
{
|
||||
DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
|
||||
Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
|
||||
unsigned char *ReadCacheStatus[] = { "Read Cache Disabled",
|
||||
"Read Cache Enabled",
|
||||
"Read Ahead Enabled",
|
||||
"Intelligent Read Ahead Enabled",
|
||||
"-", "-", "-", "-" };
|
||||
unsigned char *WriteCacheStatus[] = { "Write Cache Disabled",
|
||||
"Logical Device Read Only",
|
||||
"Write Cache Enabled",
|
||||
"Intelligent Write Cache Enabled",
|
||||
"-", "-", "-", "-" };
|
||||
static const unsigned char *ReadCacheStatus[] = {
|
||||
"Read Cache Disabled",
|
||||
"Read Cache Enabled",
|
||||
"Read Ahead Enabled",
|
||||
"Intelligent Read Ahead Enabled",
|
||||
"-", "-", "-", "-"
|
||||
};
|
||||
static const unsigned char *WriteCacheStatus[] = {
|
||||
"Write Cache Disabled",
|
||||
"Logical Device Read Only",
|
||||
"Write Cache Enabled",
|
||||
"Intelligent Write Cache Enabled",
|
||||
"-", "-", "-", "-"
|
||||
};
|
||||
unsigned char *GeometryTranslation;
|
||||
if (LogicalDeviceInfo == NULL) continue;
|
||||
switch (LogicalDeviceInfo->DriveGeometry)
|
||||
@@ -4339,14 +4343,16 @@ static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command)
|
||||
static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command)
|
||||
{
|
||||
DAC960_Controller_T *Controller = Command->Controller;
|
||||
unsigned char *SenseErrors[] = { "NO SENSE", "RECOVERED ERROR",
|
||||
"NOT READY", "MEDIUM ERROR",
|
||||
"HARDWARE ERROR", "ILLEGAL REQUEST",
|
||||
"UNIT ATTENTION", "DATA PROTECT",
|
||||
"BLANK CHECK", "VENDOR-SPECIFIC",
|
||||
"COPY ABORTED", "ABORTED COMMAND",
|
||||
"EQUAL", "VOLUME OVERFLOW",
|
||||
"MISCOMPARE", "RESERVED" };
|
||||
static const unsigned char *SenseErrors[] = {
|
||||
"NO SENSE", "RECOVERED ERROR",
|
||||
"NOT READY", "MEDIUM ERROR",
|
||||
"HARDWARE ERROR", "ILLEGAL REQUEST",
|
||||
"UNIT ATTENTION", "DATA PROTECT",
|
||||
"BLANK CHECK", "VENDOR-SPECIFIC",
|
||||
"COPY ABORTED", "ABORTED COMMAND",
|
||||
"EQUAL", "VOLUME OVERFLOW",
|
||||
"MISCOMPARE", "RESERVED"
|
||||
};
|
||||
unsigned char *CommandName = "UNKNOWN";
|
||||
switch (Command->CommandType)
|
||||
{
|
||||
|
||||
@@ -2740,6 +2740,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
|
||||
pd->write_congestion_on = write_congestion_on;
|
||||
pd->write_congestion_off = write_congestion_off;
|
||||
|
||||
ret = -ENOMEM;
|
||||
disk = alloc_disk(1);
|
||||
if (!disk)
|
||||
goto out_mem;
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
config BCACHE
|
||||
tristate "Block device as cache"
|
||||
select CRC64
|
||||
---help---
|
||||
help
|
||||
Allows a block device to be used as cache for other devices; uses
|
||||
a btree for indexing and the layout is optimized for SSDs.
|
||||
|
||||
@@ -11,7 +11,7 @@ config BCACHE
|
||||
config BCACHE_DEBUG
|
||||
bool "Bcache debugging"
|
||||
depends on BCACHE
|
||||
---help---
|
||||
help
|
||||
Don't select this option unless you're a developer
|
||||
|
||||
Enables extra debugging tools, allows expensive runtime checks to be
|
||||
@@ -21,7 +21,7 @@ config BCACHE_CLOSURES_DEBUG
|
||||
bool "Debug closures"
|
||||
depends on BCACHE
|
||||
select DEBUG_FS
|
||||
---help---
|
||||
help
|
||||
Keeps all active closures in a linked list and provides a debugfs
|
||||
interface to list them, which makes it possible to see asynchronous
|
||||
operations that get stuck.
|
||||
|
||||
+23
-16
@@ -87,8 +87,8 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
|
||||
{
|
||||
struct cache *ca;
|
||||
struct bucket *b;
|
||||
unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
|
||||
unsigned i;
|
||||
unsigned int next = c->nbuckets * c->sb.bucket_size / 1024;
|
||||
unsigned int i;
|
||||
int r;
|
||||
|
||||
atomic_sub(sectors, &c->rescale);
|
||||
@@ -169,7 +169,7 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
|
||||
|
||||
#define bucket_prio(b) \
|
||||
({ \
|
||||
unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
|
||||
unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
|
||||
\
|
||||
(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
|
||||
})
|
||||
@@ -244,6 +244,7 @@ static void invalidate_buckets_random(struct cache *ca)
|
||||
|
||||
while (!fifo_full(&ca->free_inc)) {
|
||||
size_t n;
|
||||
|
||||
get_random_bytes(&n, sizeof(n));
|
||||
|
||||
n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
|
||||
@@ -301,7 +302,7 @@ do { \
|
||||
|
||||
static int bch_allocator_push(struct cache *ca, long bucket)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
/* Prios/gens are actually the most important reserve */
|
||||
if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
|
||||
@@ -385,7 +386,7 @@ out:
|
||||
|
||||
/* Allocation */
|
||||
|
||||
long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
|
||||
long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
|
||||
{
|
||||
DEFINE_WAIT(w);
|
||||
struct bucket *b;
|
||||
@@ -421,7 +422,7 @@ out:
|
||||
if (expensive_debug_checks(ca->set)) {
|
||||
size_t iter;
|
||||
long i;
|
||||
unsigned j;
|
||||
unsigned int j;
|
||||
|
||||
for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
|
||||
BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
|
||||
@@ -470,14 +471,14 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
|
||||
|
||||
void bch_bucket_free(struct cache_set *c, struct bkey *k)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++)
|
||||
__bch_bucket_free(PTR_CACHE(c, k, i),
|
||||
PTR_BUCKET(c, k, i));
|
||||
}
|
||||
|
||||
int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
|
||||
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
|
||||
struct bkey *k, int n, bool wait)
|
||||
{
|
||||
int i;
|
||||
@@ -510,10 +511,11 @@ err:
|
||||
return -1;
|
||||
}
|
||||
|
||||
int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
|
||||
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
|
||||
struct bkey *k, int n, bool wait)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&c->bucket_lock);
|
||||
ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
|
||||
mutex_unlock(&c->bucket_lock);
|
||||
@@ -524,8 +526,8 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
|
||||
|
||||
struct open_bucket {
|
||||
struct list_head list;
|
||||
unsigned last_write_point;
|
||||
unsigned sectors_free;
|
||||
unsigned int last_write_point;
|
||||
unsigned int sectors_free;
|
||||
BKEY_PADDED(key);
|
||||
};
|
||||
|
||||
@@ -556,7 +558,7 @@ struct open_bucket {
|
||||
*/
|
||||
static struct open_bucket *pick_data_bucket(struct cache_set *c,
|
||||
const struct bkey *search,
|
||||
unsigned write_point,
|
||||
unsigned int write_point,
|
||||
struct bkey *alloc)
|
||||
{
|
||||
struct open_bucket *ret, *ret_task = NULL;
|
||||
@@ -595,12 +597,16 @@ found:
|
||||
*
|
||||
* If s->writeback is true, will not fail.
|
||||
*/
|
||||
bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
|
||||
unsigned write_point, unsigned write_prio, bool wait)
|
||||
bool bch_alloc_sectors(struct cache_set *c,
|
||||
struct bkey *k,
|
||||
unsigned int sectors,
|
||||
unsigned int write_point,
|
||||
unsigned int write_prio,
|
||||
bool wait)
|
||||
{
|
||||
struct open_bucket *b;
|
||||
BKEY_PADDED(key) alloc;
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* We might have to allocate a new bucket, which we can't do with a
|
||||
@@ -613,7 +619,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
|
||||
spin_lock(&c->data_bucket_lock);
|
||||
|
||||
while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
|
||||
unsigned watermark = write_prio
|
||||
unsigned int watermark = write_prio
|
||||
? RESERVE_MOVINGGC
|
||||
: RESERVE_NONE;
|
||||
|
||||
@@ -702,6 +708,7 @@ int bch_open_buckets_alloc(struct cache_set *c)
|
||||
|
||||
for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
|
||||
struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
|
||||
|
||||
if (!b)
|
||||
return -ENOMEM;
|
||||
|
||||
|
||||
+101
-95
@@ -252,7 +252,7 @@ struct bcache_device {
|
||||
struct kobject kobj;
|
||||
|
||||
struct cache_set *c;
|
||||
unsigned id;
|
||||
unsigned int id;
|
||||
#define BCACHEDEVNAME_SIZE 12
|
||||
char name[BCACHEDEVNAME_SIZE];
|
||||
|
||||
@@ -264,18 +264,19 @@ struct bcache_device {
|
||||
#define BCACHE_DEV_UNLINK_DONE 2
|
||||
#define BCACHE_DEV_WB_RUNNING 3
|
||||
#define BCACHE_DEV_RATE_DW_RUNNING 4
|
||||
unsigned nr_stripes;
|
||||
unsigned stripe_size;
|
||||
unsigned int nr_stripes;
|
||||
unsigned int stripe_size;
|
||||
atomic_t *stripe_sectors_dirty;
|
||||
unsigned long *full_dirty_stripes;
|
||||
|
||||
struct bio_set bio_split;
|
||||
|
||||
unsigned data_csum:1;
|
||||
unsigned int data_csum:1;
|
||||
|
||||
int (*cache_miss)(struct btree *, struct search *,
|
||||
struct bio *, unsigned);
|
||||
int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
|
||||
int (*cache_miss)(struct btree *b, struct search *s,
|
||||
struct bio *bio, unsigned int sectors);
|
||||
int (*ioctl)(struct bcache_device *d, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
};
|
||||
|
||||
struct io {
|
||||
@@ -284,7 +285,7 @@ struct io {
|
||||
struct list_head lru;
|
||||
|
||||
unsigned long jiffies;
|
||||
unsigned sequential;
|
||||
unsigned int sequential;
|
||||
sector_t last;
|
||||
};
|
||||
|
||||
@@ -358,18 +359,18 @@ struct cached_dev {
|
||||
struct cache_accounting accounting;
|
||||
|
||||
/* The rest of this all shows up in sysfs */
|
||||
unsigned sequential_cutoff;
|
||||
unsigned readahead;
|
||||
unsigned int sequential_cutoff;
|
||||
unsigned int readahead;
|
||||
|
||||
unsigned io_disable:1;
|
||||
unsigned verify:1;
|
||||
unsigned bypass_torture_test:1;
|
||||
unsigned int io_disable:1;
|
||||
unsigned int verify:1;
|
||||
unsigned int bypass_torture_test:1;
|
||||
|
||||
unsigned partial_stripes_expensive:1;
|
||||
unsigned writeback_metadata:1;
|
||||
unsigned writeback_running:1;
|
||||
unsigned int partial_stripes_expensive:1;
|
||||
unsigned int writeback_metadata:1;
|
||||
unsigned int writeback_running:1;
|
||||
unsigned char writeback_percent;
|
||||
unsigned writeback_delay;
|
||||
unsigned int writeback_delay;
|
||||
|
||||
uint64_t writeback_rate_target;
|
||||
int64_t writeback_rate_proportional;
|
||||
@@ -377,16 +378,16 @@ struct cached_dev {
|
||||
int64_t writeback_rate_integral_scaled;
|
||||
int32_t writeback_rate_change;
|
||||
|
||||
unsigned writeback_rate_update_seconds;
|
||||
unsigned writeback_rate_i_term_inverse;
|
||||
unsigned writeback_rate_p_term_inverse;
|
||||
unsigned writeback_rate_minimum;
|
||||
unsigned int writeback_rate_update_seconds;
|
||||
unsigned int writeback_rate_i_term_inverse;
|
||||
unsigned int writeback_rate_p_term_inverse;
|
||||
unsigned int writeback_rate_minimum;
|
||||
|
||||
enum stop_on_failure stop_when_cache_set_failed;
|
||||
#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
|
||||
atomic_t io_errors;
|
||||
unsigned error_limit;
|
||||
unsigned offline_seconds;
|
||||
unsigned int error_limit;
|
||||
unsigned int offline_seconds;
|
||||
|
||||
char backing_dev_name[BDEVNAME_SIZE];
|
||||
};
|
||||
@@ -447,7 +448,7 @@ struct cache {
|
||||
* until a gc finishes - otherwise we could pointlessly burn a ton of
|
||||
* cpu
|
||||
*/
|
||||
unsigned invalidate_needs_gc;
|
||||
unsigned int invalidate_needs_gc;
|
||||
|
||||
bool discard; /* Get rid of? */
|
||||
|
||||
@@ -472,7 +473,7 @@ struct gc_stat {
|
||||
|
||||
size_t nkeys;
|
||||
uint64_t data; /* sectors */
|
||||
unsigned in_use; /* percent */
|
||||
unsigned int in_use; /* percent */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -518,7 +519,7 @@ struct cache_set {
|
||||
int caches_loaded;
|
||||
|
||||
struct bcache_device **devices;
|
||||
unsigned devices_max_used;
|
||||
unsigned int devices_max_used;
|
||||
atomic_t attached_dev_nr;
|
||||
struct list_head cached_devs;
|
||||
uint64_t cached_dev_sectors;
|
||||
@@ -548,7 +549,7 @@ struct cache_set {
|
||||
* Default number of pages for a new btree node - may be less than a
|
||||
* full bucket
|
||||
*/
|
||||
unsigned btree_pages;
|
||||
unsigned int btree_pages;
|
||||
|
||||
/*
|
||||
* Lists of struct btrees; lru is the list for structs that have memory
|
||||
@@ -571,7 +572,7 @@ struct cache_set {
|
||||
struct list_head btree_cache_freed;
|
||||
|
||||
/* Number of elements in btree_cache + btree_cache_freeable lists */
|
||||
unsigned btree_cache_used;
|
||||
unsigned int btree_cache_used;
|
||||
|
||||
/*
|
||||
* If we need to allocate memory for a new btree node and that
|
||||
@@ -613,8 +614,8 @@ struct cache_set {
|
||||
uint16_t min_prio;
|
||||
|
||||
/*
|
||||
* max(gen - last_gc) for all buckets. When it gets too big we have to gc
|
||||
* to keep gens from wrapping around.
|
||||
* max(gen - last_gc) for all buckets. When it gets too big we have to
|
||||
* gc to keep gens from wrapping around.
|
||||
*/
|
||||
uint8_t need_gc;
|
||||
struct gc_stat gc_stats;
|
||||
@@ -649,7 +650,7 @@ struct cache_set {
|
||||
struct mutex verify_lock;
|
||||
#endif
|
||||
|
||||
unsigned nr_uuids;
|
||||
unsigned int nr_uuids;
|
||||
struct uuid_entry *uuids;
|
||||
BKEY_PADDED(uuid_bucket);
|
||||
struct closure uuid_write;
|
||||
@@ -670,12 +671,12 @@ struct cache_set {
|
||||
struct journal journal;
|
||||
|
||||
#define CONGESTED_MAX 1024
|
||||
unsigned congested_last_us;
|
||||
unsigned int congested_last_us;
|
||||
atomic_t congested;
|
||||
|
||||
/* The rest of this all shows up in sysfs */
|
||||
unsigned congested_read_threshold_us;
|
||||
unsigned congested_write_threshold_us;
|
||||
unsigned int congested_read_threshold_us;
|
||||
unsigned int congested_write_threshold_us;
|
||||
|
||||
struct time_stats btree_gc_time;
|
||||
struct time_stats btree_split_time;
|
||||
@@ -694,16 +695,16 @@ struct cache_set {
|
||||
ON_ERROR_PANIC,
|
||||
} on_error;
|
||||
#define DEFAULT_IO_ERROR_LIMIT 8
|
||||
unsigned error_limit;
|
||||
unsigned error_decay;
|
||||
unsigned int error_limit;
|
||||
unsigned int error_decay;
|
||||
|
||||
unsigned short journal_delay_ms;
|
||||
bool expensive_debug_checks;
|
||||
unsigned verify:1;
|
||||
unsigned key_merging_disabled:1;
|
||||
unsigned gc_always_rewrite:1;
|
||||
unsigned shrinker_disabled:1;
|
||||
unsigned copy_gc_enabled:1;
|
||||
unsigned int verify:1;
|
||||
unsigned int key_merging_disabled:1;
|
||||
unsigned int gc_always_rewrite:1;
|
||||
unsigned int shrinker_disabled:1;
|
||||
unsigned int copy_gc_enabled:1;
|
||||
|
||||
#define BUCKET_HASH_BITS 12
|
||||
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
|
||||
@@ -712,7 +713,7 @@ struct cache_set {
|
||||
};
|
||||
|
||||
struct bbio {
|
||||
unsigned submit_time_us;
|
||||
unsigned int submit_time_us;
|
||||
union {
|
||||
struct bkey key;
|
||||
uint64_t _pad[3];
|
||||
@@ -729,10 +730,10 @@ struct bbio {
|
||||
|
||||
#define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
|
||||
#define btree_blocks(b) \
|
||||
((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
|
||||
((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
|
||||
|
||||
#define btree_default_blocks(c) \
|
||||
((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
|
||||
((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
|
||||
|
||||
#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
|
||||
#define bucket_bytes(c) ((c)->sb.bucket_size << 9)
|
||||
@@ -761,21 +762,21 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
|
||||
|
||||
static inline struct cache *PTR_CACHE(struct cache_set *c,
|
||||
const struct bkey *k,
|
||||
unsigned ptr)
|
||||
unsigned int ptr)
|
||||
{
|
||||
return c->cache[PTR_DEV(k, ptr)];
|
||||
}
|
||||
|
||||
static inline size_t PTR_BUCKET_NR(struct cache_set *c,
|
||||
const struct bkey *k,
|
||||
unsigned ptr)
|
||||
unsigned int ptr)
|
||||
{
|
||||
return sector_to_bucket(c, PTR_OFFSET(k, ptr));
|
||||
}
|
||||
|
||||
static inline struct bucket *PTR_BUCKET(struct cache_set *c,
|
||||
const struct bkey *k,
|
||||
unsigned ptr)
|
||||
unsigned int ptr)
|
||||
{
|
||||
return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
|
||||
}
|
||||
@@ -783,17 +784,18 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c,
|
||||
static inline uint8_t gen_after(uint8_t a, uint8_t b)
|
||||
{
|
||||
uint8_t r = a - b;
|
||||
|
||||
return r > 128U ? 0 : r;
|
||||
}
|
||||
|
||||
static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
|
||||
unsigned i)
|
||||
unsigned int i)
|
||||
{
|
||||
return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
|
||||
}
|
||||
|
||||
static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
|
||||
unsigned i)
|
||||
unsigned int i)
|
||||
{
|
||||
return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
|
||||
}
|
||||
@@ -879,16 +881,16 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
|
||||
#define BUCKET_GC_GEN_MAX 96U
|
||||
|
||||
#define kobj_attribute_write(n, fn) \
|
||||
static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn)
|
||||
static struct kobj_attribute ksysfs_##n = __ATTR(n, 0200, NULL, fn)
|
||||
|
||||
#define kobj_attribute_rw(n, show, store) \
|
||||
static struct kobj_attribute ksysfs_##n = \
|
||||
__ATTR(n, S_IWUSR|S_IRUSR, show, store)
|
||||
__ATTR(n, 0600, show, store)
|
||||
|
||||
static inline void wake_up_allocators(struct cache_set *c)
|
||||
{
|
||||
struct cache *ca;
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
for_each_cache(ca, c, i)
|
||||
wake_up_process(ca->alloc_thread);
|
||||
@@ -924,40 +926,43 @@ static inline void wait_for_kthread_stop(void)
|
||||
/* Forward declarations */
|
||||
|
||||
void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio);
|
||||
void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
|
||||
void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
|
||||
blk_status_t, const char *);
|
||||
void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
|
||||
const char *);
|
||||
void bch_bbio_free(struct bio *, struct cache_set *);
|
||||
struct bio *bch_bbio_alloc(struct cache_set *);
|
||||
void bch_count_io_errors(struct cache *ca, blk_status_t error,
|
||||
int is_read, const char *m);
|
||||
void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
|
||||
blk_status_t error, const char *m);
|
||||
void bch_bbio_endio(struct cache_set *c, struct bio *bio,
|
||||
blk_status_t error, const char *m);
|
||||
void bch_bbio_free(struct bio *bio, struct cache_set *c);
|
||||
struct bio *bch_bbio_alloc(struct cache_set *c);
|
||||
|
||||
void __bch_submit_bbio(struct bio *, struct cache_set *);
|
||||
void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
|
||||
void __bch_submit_bbio(struct bio *bio, struct cache_set *c);
|
||||
void bch_submit_bbio(struct bio *bio, struct cache_set *c,
|
||||
struct bkey *k, unsigned int ptr);
|
||||
|
||||
uint8_t bch_inc_gen(struct cache *, struct bucket *);
|
||||
void bch_rescale_priorities(struct cache_set *, int);
|
||||
uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
|
||||
void bch_rescale_priorities(struct cache_set *c, int sectors);
|
||||
|
||||
bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
|
||||
void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
|
||||
bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
|
||||
void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
|
||||
|
||||
void __bch_bucket_free(struct cache *, struct bucket *);
|
||||
void bch_bucket_free(struct cache_set *, struct bkey *);
|
||||
void __bch_bucket_free(struct cache *ca, struct bucket *b);
|
||||
void bch_bucket_free(struct cache_set *c, struct bkey *k);
|
||||
|
||||
long bch_bucket_alloc(struct cache *, unsigned, bool);
|
||||
int __bch_bucket_alloc_set(struct cache_set *, unsigned,
|
||||
struct bkey *, int, bool);
|
||||
int bch_bucket_alloc_set(struct cache_set *, unsigned,
|
||||
struct bkey *, int, bool);
|
||||
bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned,
|
||||
unsigned, unsigned, bool);
|
||||
long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
|
||||
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
|
||||
struct bkey *k, int n, bool wait);
|
||||
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
|
||||
struct bkey *k, int n, bool wait);
|
||||
bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
|
||||
unsigned int sectors, unsigned int write_point,
|
||||
unsigned int write_prio, bool wait);
|
||||
bool bch_cached_dev_error(struct cached_dev *dc);
|
||||
|
||||
__printf(2, 3)
|
||||
bool bch_cache_set_error(struct cache_set *, const char *, ...);
|
||||
bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...);
|
||||
|
||||
void bch_prio_write(struct cache *);
|
||||
void bch_write_bdev_super(struct cached_dev *, struct closure *);
|
||||
void bch_prio_write(struct cache *ca);
|
||||
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
|
||||
|
||||
extern struct workqueue_struct *bcache_wq;
|
||||
extern struct mutex bch_register_lock;
|
||||
@@ -969,30 +974,31 @@ extern struct kobj_type bch_cache_set_ktype;
|
||||
extern struct kobj_type bch_cache_set_internal_ktype;
|
||||
extern struct kobj_type bch_cache_ktype;
|
||||
|
||||
void bch_cached_dev_release(struct kobject *);
|
||||
void bch_flash_dev_release(struct kobject *);
|
||||
void bch_cache_set_release(struct kobject *);
|
||||
void bch_cache_release(struct kobject *);
|
||||
void bch_cached_dev_release(struct kobject *kobj);
|
||||
void bch_flash_dev_release(struct kobject *kobj);
|
||||
void bch_cache_set_release(struct kobject *kobj);
|
||||
void bch_cache_release(struct kobject *kobj);
|
||||
|
||||
int bch_uuid_write(struct cache_set *);
|
||||
void bcache_write_super(struct cache_set *);
|
||||
int bch_uuid_write(struct cache_set *c);
|
||||
void bcache_write_super(struct cache_set *c);
|
||||
|
||||
int bch_flash_dev_create(struct cache_set *c, uint64_t size);
|
||||
|
||||
int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
|
||||
void bch_cached_dev_detach(struct cached_dev *);
|
||||
void bch_cached_dev_run(struct cached_dev *);
|
||||
void bcache_device_stop(struct bcache_device *);
|
||||
int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
|
||||
uint8_t *set_uuid);
|
||||
void bch_cached_dev_detach(struct cached_dev *dc);
|
||||
void bch_cached_dev_run(struct cached_dev *dc);
|
||||
void bcache_device_stop(struct bcache_device *d);
|
||||
|
||||
void bch_cache_set_unregister(struct cache_set *);
|
||||
void bch_cache_set_stop(struct cache_set *);
|
||||
void bch_cache_set_unregister(struct cache_set *c);
|
||||
void bch_cache_set_stop(struct cache_set *c);
|
||||
|
||||
struct cache_set *bch_cache_set_alloc(struct cache_sb *);
|
||||
void bch_btree_cache_free(struct cache_set *);
|
||||
int bch_btree_cache_alloc(struct cache_set *);
|
||||
void bch_moving_init_cache_set(struct cache_set *);
|
||||
int bch_open_buckets_alloc(struct cache_set *);
|
||||
void bch_open_buckets_free(struct cache_set *);
|
||||
struct cache_set *bch_cache_set_alloc(struct cache_sb *sb);
|
||||
void bch_btree_cache_free(struct cache_set *c);
|
||||
int bch_btree_cache_alloc(struct cache_set *c);
|
||||
void bch_moving_init_cache_set(struct cache_set *c);
|
||||
int bch_open_buckets_alloc(struct cache_set *c);
|
||||
void bch_open_buckets_free(struct cache_set *c);
|
||||
|
||||
int bch_cache_allocator_start(struct cache *ca);
|
||||
|
||||
|
||||
+79
-63
@@ -18,31 +18,31 @@
|
||||
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
|
||||
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
|
||||
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set)
|
||||
{
|
||||
struct bkey *k, *next;
|
||||
|
||||
for (k = i->start; k < bset_bkey_last(i); k = next) {
|
||||
next = bkey_next(k);
|
||||
|
||||
printk(KERN_ERR "block %u key %u/%u: ", set,
|
||||
(unsigned) ((u64 *) k - i->d), i->keys);
|
||||
pr_err("block %u key %u/%u: ", set,
|
||||
(unsigned int) ((u64 *) k - i->d), i->keys);
|
||||
|
||||
if (b->ops->key_dump)
|
||||
b->ops->key_dump(b, k);
|
||||
else
|
||||
printk("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
|
||||
pr_err("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k));
|
||||
|
||||
if (next < bset_bkey_last(i) &&
|
||||
bkey_cmp(k, b->ops->is_extents ?
|
||||
&START_KEY(next) : next) > 0)
|
||||
printk(KERN_ERR "Key skipped backwards\n");
|
||||
pr_err("Key skipped backwards\n");
|
||||
}
|
||||
}
|
||||
|
||||
void bch_dump_bucket(struct btree_keys *b)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
console_lock();
|
||||
for (i = 0; i <= b->nsets; i++)
|
||||
@@ -53,7 +53,7 @@ void bch_dump_bucket(struct btree_keys *b)
|
||||
|
||||
int __bch_count_data(struct btree_keys *b)
|
||||
{
|
||||
unsigned ret = 0;
|
||||
unsigned int ret = 0;
|
||||
struct btree_iter iter;
|
||||
struct bkey *k;
|
||||
|
||||
@@ -128,7 +128,7 @@ static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
|
||||
|
||||
/* Keylists */
|
||||
|
||||
int __bch_keylist_realloc(struct keylist *l, unsigned u64s)
|
||||
int __bch_keylist_realloc(struct keylist *l, unsigned int u64s)
|
||||
{
|
||||
size_t oldsize = bch_keylist_nkeys(l);
|
||||
size_t newsize = oldsize + u64s;
|
||||
@@ -180,7 +180,7 @@ void bch_keylist_pop_front(struct keylist *l)
|
||||
/* Key/pointer manipulation */
|
||||
|
||||
void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
|
||||
unsigned i)
|
||||
unsigned int i)
|
||||
{
|
||||
BUG_ON(i > KEY_PTRS(src));
|
||||
|
||||
@@ -194,7 +194,7 @@ void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
|
||||
|
||||
bool __bch_cut_front(const struct bkey *where, struct bkey *k)
|
||||
{
|
||||
unsigned i, len = 0;
|
||||
unsigned int i, len = 0;
|
||||
|
||||
if (bkey_cmp(where, &START_KEY(k)) <= 0)
|
||||
return false;
|
||||
@@ -214,7 +214,7 @@ bool __bch_cut_front(const struct bkey *where, struct bkey *k)
|
||||
|
||||
bool __bch_cut_back(const struct bkey *where, struct bkey *k)
|
||||
{
|
||||
unsigned len = 0;
|
||||
unsigned int len = 0;
|
||||
|
||||
if (bkey_cmp(where, k) >= 0)
|
||||
return false;
|
||||
@@ -240,9 +240,9 @@ bool __bch_cut_back(const struct bkey *where, struct bkey *k)
|
||||
#define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
|
||||
|
||||
struct bkey_float {
|
||||
unsigned exponent:BKEY_EXPONENT_BITS;
|
||||
unsigned m:BKEY_MID_BITS;
|
||||
unsigned mantissa:BKEY_MANTISSA_BITS;
|
||||
unsigned int exponent:BKEY_EXPONENT_BITS;
|
||||
unsigned int m:BKEY_MID_BITS;
|
||||
unsigned int mantissa:BKEY_MANTISSA_BITS;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
@@ -311,7 +311,9 @@ void bch_btree_keys_free(struct btree_keys *b)
|
||||
}
|
||||
EXPORT_SYMBOL(bch_btree_keys_free);
|
||||
|
||||
int bch_btree_keys_alloc(struct btree_keys *b, unsigned page_order, gfp_t gfp)
|
||||
int bch_btree_keys_alloc(struct btree_keys *b,
|
||||
unsigned int page_order,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct bset_tree *t = b->set;
|
||||
|
||||
@@ -345,7 +347,7 @@ EXPORT_SYMBOL(bch_btree_keys_alloc);
|
||||
void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
|
||||
bool *expensive_debug_checks)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
b->ops = ops;
|
||||
b->expensive_debug_checks = expensive_debug_checks;
|
||||
@@ -370,7 +372,7 @@ EXPORT_SYMBOL(bch_btree_keys_init);
|
||||
* return array index next to j when does in-order traverse
|
||||
* of a binary tree which is stored in a linear array
|
||||
*/
|
||||
static unsigned inorder_next(unsigned j, unsigned size)
|
||||
static unsigned int inorder_next(unsigned int j, unsigned int size)
|
||||
{
|
||||
if (j * 2 + 1 < size) {
|
||||
j = j * 2 + 1;
|
||||
@@ -387,7 +389,7 @@ static unsigned inorder_next(unsigned j, unsigned size)
|
||||
* return array index previous to j when does in-order traverse
|
||||
* of a binary tree which is stored in a linear array
|
||||
*/
|
||||
static unsigned inorder_prev(unsigned j, unsigned size)
|
||||
static unsigned int inorder_prev(unsigned int j, unsigned int size)
|
||||
{
|
||||
if (j * 2 < size) {
|
||||
j = j * 2;
|
||||
@@ -400,7 +402,8 @@ static unsigned inorder_prev(unsigned j, unsigned size)
|
||||
return j;
|
||||
}
|
||||
|
||||
/* I have no idea why this code works... and I'm the one who wrote it
|
||||
/*
|
||||
* I have no idea why this code works... and I'm the one who wrote it
|
||||
*
|
||||
* However, I do know what it does:
|
||||
* Given a binary tree constructed in an array (i.e. how you normally implement
|
||||
@@ -413,10 +416,12 @@ static unsigned inorder_prev(unsigned j, unsigned size)
|
||||
* extra is a function of size:
|
||||
* extra = (size - rounddown_pow_of_two(size - 1)) << 1;
|
||||
*/
|
||||
static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
|
||||
static unsigned int __to_inorder(unsigned int j,
|
||||
unsigned int size,
|
||||
unsigned int extra)
|
||||
{
|
||||
unsigned b = fls(j);
|
||||
unsigned shift = fls(size - 1) - b;
|
||||
unsigned int b = fls(j);
|
||||
unsigned int shift = fls(size - 1) - b;
|
||||
|
||||
j ^= 1U << (b - 1);
|
||||
j <<= 1;
|
||||
@@ -433,14 +438,16 @@ static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra)
|
||||
* Return the cacheline index in bset_tree->data, where j is index
|
||||
* from a linear array which stores the auxiliar binary tree
|
||||
*/
|
||||
static unsigned to_inorder(unsigned j, struct bset_tree *t)
|
||||
static unsigned int to_inorder(unsigned int j, struct bset_tree *t)
|
||||
{
|
||||
return __to_inorder(j, t->size, t->extra);
|
||||
}
|
||||
|
||||
static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
|
||||
static unsigned int __inorder_to_tree(unsigned int j,
|
||||
unsigned int size,
|
||||
unsigned int extra)
|
||||
{
|
||||
unsigned shift;
|
||||
unsigned int shift;
|
||||
|
||||
if (j > extra)
|
||||
j += j - extra;
|
||||
@@ -457,7 +464,7 @@ static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra)
|
||||
* Return an index from a linear array which stores the auxiliar binary
|
||||
* tree, j is the cacheline index of t->data.
|
||||
*/
|
||||
static unsigned inorder_to_tree(unsigned j, struct bset_tree *t)
|
||||
static unsigned int inorder_to_tree(unsigned int j, struct bset_tree *t)
|
||||
{
|
||||
return __inorder_to_tree(j, t->size, t->extra);
|
||||
}
|
||||
@@ -468,14 +475,15 @@ void inorder_test(void)
|
||||
unsigned long done = 0;
|
||||
ktime_t start = ktime_get();
|
||||
|
||||
for (unsigned size = 2;
|
||||
for (unsigned int size = 2;
|
||||
size < 65536000;
|
||||
size++) {
|
||||
unsigned extra = (size - rounddown_pow_of_two(size - 1)) << 1;
|
||||
unsigned i = 1, j = rounddown_pow_of_two(size - 1);
|
||||
unsigned int extra =
|
||||
(size - rounddown_pow_of_two(size - 1)) << 1;
|
||||
unsigned int i = 1, j = rounddown_pow_of_two(size - 1);
|
||||
|
||||
if (!(size % 4096))
|
||||
printk(KERN_NOTICE "loop %u, %llu per us\n", size,
|
||||
pr_notice("loop %u, %llu per us\n", size,
|
||||
done / ktime_us_delta(ktime_get(), start));
|
||||
|
||||
while (1) {
|
||||
@@ -518,30 +526,31 @@ void inorder_test(void)
|
||||
* of the previous key so we can walk backwards to it from t->tree[j]'s key.
|
||||
*/
|
||||
|
||||
static struct bkey *cacheline_to_bkey(struct bset_tree *t, unsigned cacheline,
|
||||
unsigned offset)
|
||||
static struct bkey *cacheline_to_bkey(struct bset_tree *t,
|
||||
unsigned int cacheline,
|
||||
unsigned int offset)
|
||||
{
|
||||
return ((void *) t->data) + cacheline * BSET_CACHELINE + offset * 8;
|
||||
}
|
||||
|
||||
static unsigned bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
|
||||
static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k)
|
||||
{
|
||||
return ((void *) k - (void *) t->data) / BSET_CACHELINE;
|
||||
}
|
||||
|
||||
static unsigned bkey_to_cacheline_offset(struct bset_tree *t,
|
||||
unsigned cacheline,
|
||||
static unsigned int bkey_to_cacheline_offset(struct bset_tree *t,
|
||||
unsigned int cacheline,
|
||||
struct bkey *k)
|
||||
{
|
||||
return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0);
|
||||
}
|
||||
|
||||
static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned j)
|
||||
static struct bkey *tree_to_bkey(struct bset_tree *t, unsigned int j)
|
||||
{
|
||||
return cacheline_to_bkey(t, to_inorder(j, t), t->tree[j].m);
|
||||
}
|
||||
|
||||
static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
|
||||
static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned int j)
|
||||
{
|
||||
return (void *) (((uint64_t *) tree_to_bkey(t, j)) - t->prev[j]);
|
||||
}
|
||||
@@ -550,7 +559,7 @@ static struct bkey *tree_to_prev_bkey(struct bset_tree *t, unsigned j)
|
||||
* For the write set - the one we're currently inserting keys into - we don't
|
||||
* maintain a full search tree, we just keep a simple lookup table in t->prev.
|
||||
*/
|
||||
static struct bkey *table_to_bkey(struct bset_tree *t, unsigned cacheline)
|
||||
static struct bkey *table_to_bkey(struct bset_tree *t, unsigned int cacheline)
|
||||
{
|
||||
return cacheline_to_bkey(t, cacheline, t->prev[cacheline]);
|
||||
}
|
||||
@@ -576,14 +585,15 @@ static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift)
|
||||
* See make_bfloat() to check when most significant bit of f->exponent
|
||||
* is set or not.
|
||||
*/
|
||||
static inline unsigned bfloat_mantissa(const struct bkey *k,
|
||||
static inline unsigned int bfloat_mantissa(const struct bkey *k,
|
||||
struct bkey_float *f)
|
||||
{
|
||||
const uint64_t *p = &k->low - (f->exponent >> 6);
|
||||
|
||||
return shrd128(p[-1], p[0], f->exponent & 63) & BKEY_MANTISSA_MASK;
|
||||
}
|
||||
|
||||
static void make_bfloat(struct bset_tree *t, unsigned j)
|
||||
static void make_bfloat(struct bset_tree *t, unsigned int j)
|
||||
{
|
||||
struct bkey_float *f = &t->tree[j];
|
||||
struct bkey *m = tree_to_bkey(t, j);
|
||||
@@ -631,7 +641,7 @@ static void make_bfloat(struct bset_tree *t, unsigned j)
|
||||
static void bset_alloc_tree(struct btree_keys *b, struct bset_tree *t)
|
||||
{
|
||||
if (t != b->set) {
|
||||
unsigned j = roundup(t[-1].size,
|
||||
unsigned int j = roundup(t[-1].size,
|
||||
64 / sizeof(struct bkey_float));
|
||||
|
||||
t->tree = t[-1].tree + j;
|
||||
@@ -686,13 +696,13 @@ void bch_bset_build_written_tree(struct btree_keys *b)
|
||||
{
|
||||
struct bset_tree *t = bset_tree_last(b);
|
||||
struct bkey *prev = NULL, *k = t->data->start;
|
||||
unsigned j, cacheline = 1;
|
||||
unsigned int j, cacheline = 1;
|
||||
|
||||
b->last_set_unwritten = 0;
|
||||
|
||||
bset_alloc_tree(b, t);
|
||||
|
||||
t->size = min_t(unsigned,
|
||||
t->size = min_t(unsigned int,
|
||||
bkey_to_cacheline(t, bset_bkey_last(t->data)),
|
||||
b->set->tree + btree_keys_cachelines(b) - t->tree);
|
||||
|
||||
@@ -732,7 +742,7 @@ EXPORT_SYMBOL(bch_bset_build_written_tree);
|
||||
void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
|
||||
{
|
||||
struct bset_tree *t;
|
||||
unsigned inorder, j = 1;
|
||||
unsigned int inorder, j = 1;
|
||||
|
||||
for (t = b->set; t <= bset_tree_last(b); t++)
|
||||
if (k < bset_bkey_last(t->data))
|
||||
@@ -779,14 +789,15 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
|
||||
struct bset_tree *t,
|
||||
struct bkey *k)
|
||||
{
|
||||
unsigned shift = bkey_u64s(k);
|
||||
unsigned j = bkey_to_cacheline(t, k);
|
||||
unsigned int shift = bkey_u64s(k);
|
||||
unsigned int j = bkey_to_cacheline(t, k);
|
||||
|
||||
/* We're getting called from btree_split() or btree_gc, just bail out */
|
||||
if (!t->size)
|
||||
return;
|
||||
|
||||
/* k is the key we just inserted; we need to find the entry in the
|
||||
/*
|
||||
* k is the key we just inserted; we need to find the entry in the
|
||||
* lookup table for the first key that is strictly greater than k:
|
||||
* it's either k's cacheline or the next one
|
||||
*/
|
||||
@@ -794,7 +805,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
|
||||
table_to_bkey(t, j) <= k)
|
||||
j++;
|
||||
|
||||
/* Adjust all the lookup table entries, and find a new key for any that
|
||||
/*
|
||||
* Adjust all the lookup table entries, and find a new key for any that
|
||||
* have gotten too big
|
||||
*/
|
||||
for (; j < t->size; j++) {
|
||||
@@ -819,7 +831,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
|
||||
k != bset_bkey_last(t->data);
|
||||
k = bkey_next(k))
|
||||
if (t->size == bkey_to_cacheline(t, k)) {
|
||||
t->prev[t->size] = bkey_to_cacheline_offset(t, t->size, k);
|
||||
t->prev[t->size] =
|
||||
bkey_to_cacheline_offset(t, t->size, k);
|
||||
t->size++;
|
||||
}
|
||||
}
|
||||
@@ -867,10 +880,10 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where,
|
||||
}
|
||||
EXPORT_SYMBOL(bch_bset_insert);
|
||||
|
||||
unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
|
||||
unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
|
||||
struct bkey *replace_key)
|
||||
{
|
||||
unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
|
||||
unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
|
||||
struct bset *i = bset_tree_last(b)->data;
|
||||
struct bkey *m, *prev = NULL;
|
||||
struct btree_iter iter;
|
||||
@@ -922,10 +935,10 @@ struct bset_search_iter {
|
||||
static struct bset_search_iter bset_search_write_set(struct bset_tree *t,
|
||||
const struct bkey *search)
|
||||
{
|
||||
unsigned li = 0, ri = t->size;
|
||||
unsigned int li = 0, ri = t->size;
|
||||
|
||||
while (li + 1 != ri) {
|
||||
unsigned m = (li + ri) >> 1;
|
||||
unsigned int m = (li + ri) >> 1;
|
||||
|
||||
if (bkey_cmp(table_to_bkey(t, m), search) > 0)
|
||||
ri = m;
|
||||
@@ -944,7 +957,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
|
||||
{
|
||||
struct bkey *l, *r;
|
||||
struct bkey_float *f;
|
||||
unsigned inorder, j, n = 1;
|
||||
unsigned int inorder, j, n = 1;
|
||||
|
||||
do {
|
||||
/*
|
||||
@@ -958,7 +971,8 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
|
||||
* p = 0;
|
||||
* but a branch instruction is avoided.
|
||||
*/
|
||||
unsigned p = n << 4;
|
||||
unsigned int p = n << 4;
|
||||
|
||||
p &= ((int) (p - t->size)) >> 31;
|
||||
|
||||
prefetch(&t->tree[p]);
|
||||
@@ -978,7 +992,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
|
||||
* to work - that's done in make_bfloat()
|
||||
*/
|
||||
if (likely(f->exponent != 127))
|
||||
n = j * 2 + (((unsigned)
|
||||
n = j * 2 + (((unsigned int)
|
||||
(f->mantissa -
|
||||
bfloat_mantissa(search, f))) >> 31);
|
||||
else
|
||||
@@ -1109,6 +1123,7 @@ static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
|
||||
struct bset_tree *start)
|
||||
{
|
||||
struct bkey *ret = NULL;
|
||||
|
||||
iter->size = ARRAY_SIZE(iter->data);
|
||||
iter->used = 0;
|
||||
|
||||
@@ -1184,7 +1199,8 @@ void bch_bset_sort_state_free(struct bset_sort_state *state)
|
||||
mempool_exit(&state->pool);
|
||||
}
|
||||
|
||||
int bch_bset_sort_state_init(struct bset_sort_state *state, unsigned page_order)
|
||||
int bch_bset_sort_state_init(struct bset_sort_state *state,
|
||||
unsigned int page_order)
|
||||
{
|
||||
spin_lock_init(&state->time.lock);
|
||||
|
||||
@@ -1237,7 +1253,7 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
|
||||
}
|
||||
|
||||
static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
|
||||
unsigned start, unsigned order, bool fixup,
|
||||
unsigned int start, unsigned int order, bool fixup,
|
||||
struct bset_sort_state *state)
|
||||
{
|
||||
uint64_t start_time;
|
||||
@@ -1288,7 +1304,7 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
|
||||
bch_time_stats_update(&state->time, start_time);
|
||||
}
|
||||
|
||||
void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
|
||||
void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
|
||||
struct bset_sort_state *state)
|
||||
{
|
||||
size_t order = b->page_order, keys = 0;
|
||||
@@ -1298,7 +1314,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned start,
|
||||
__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
|
||||
|
||||
if (start) {
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
for (i = start; i <= b->nsets; i++)
|
||||
keys += b->set[i].data->keys;
|
||||
@@ -1323,8 +1339,8 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
|
||||
struct bset_sort_state *state)
|
||||
{
|
||||
uint64_t start_time = local_clock();
|
||||
|
||||
struct btree_iter iter;
|
||||
|
||||
bch_btree_iter_init(b, &iter, NULL);
|
||||
|
||||
btree_mergesort(b, new->set->data, &iter, false, true);
|
||||
@@ -1338,7 +1354,7 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
|
||||
|
||||
void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state)
|
||||
{
|
||||
unsigned crit = SORT_CRIT;
|
||||
unsigned int crit = SORT_CRIT;
|
||||
int i;
|
||||
|
||||
/* Don't sort if nothing to do */
|
||||
@@ -1367,7 +1383,7 @@ EXPORT_SYMBOL(bch_btree_sort_lazy);
|
||||
|
||||
void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *stats)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i <= b->nsets; i++) {
|
||||
struct bset_tree *t = &b->set[i];
|
||||
|
||||
+81
-63
@@ -163,10 +163,10 @@ struct bset_tree {
|
||||
*/
|
||||
|
||||
/* size of the binary tree and prev array */
|
||||
unsigned size;
|
||||
unsigned int size;
|
||||
|
||||
/* function of size - precalculated for to_inorder() */
|
||||
unsigned extra;
|
||||
unsigned int extra;
|
||||
|
||||
/* copy of the last key in the set */
|
||||
struct bkey end;
|
||||
@@ -187,18 +187,25 @@ struct bset_tree {
|
||||
};
|
||||
|
||||
struct btree_keys_ops {
|
||||
bool (*sort_cmp)(struct btree_iter_set,
|
||||
struct btree_iter_set);
|
||||
struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *);
|
||||
bool (*insert_fixup)(struct btree_keys *, struct bkey *,
|
||||
struct btree_iter *, struct bkey *);
|
||||
bool (*key_invalid)(struct btree_keys *,
|
||||
const struct bkey *);
|
||||
bool (*key_bad)(struct btree_keys *, const struct bkey *);
|
||||
bool (*key_merge)(struct btree_keys *,
|
||||
struct bkey *, struct bkey *);
|
||||
void (*key_to_text)(char *, size_t, const struct bkey *);
|
||||
void (*key_dump)(struct btree_keys *, const struct bkey *);
|
||||
bool (*sort_cmp)(struct btree_iter_set l,
|
||||
struct btree_iter_set r);
|
||||
struct bkey *(*sort_fixup)(struct btree_iter *iter,
|
||||
struct bkey *tmp);
|
||||
bool (*insert_fixup)(struct btree_keys *b,
|
||||
struct bkey *insert,
|
||||
struct btree_iter *iter,
|
||||
struct bkey *replace_key);
|
||||
bool (*key_invalid)(struct btree_keys *bk,
|
||||
const struct bkey *k);
|
||||
bool (*key_bad)(struct btree_keys *bk,
|
||||
const struct bkey *k);
|
||||
bool (*key_merge)(struct btree_keys *bk,
|
||||
struct bkey *l, struct bkey *r);
|
||||
void (*key_to_text)(char *buf,
|
||||
size_t size,
|
||||
const struct bkey *k);
|
||||
void (*key_dump)(struct btree_keys *keys,
|
||||
const struct bkey *k);
|
||||
|
||||
/*
|
||||
* Only used for deciding whether to use START_KEY(k) or just the key
|
||||
@@ -211,7 +218,7 @@ struct btree_keys {
|
||||
const struct btree_keys_ops *ops;
|
||||
uint8_t page_order;
|
||||
uint8_t nsets;
|
||||
unsigned last_set_unwritten:1;
|
||||
unsigned int last_set_unwritten:1;
|
||||
bool *expensive_debug_checks;
|
||||
|
||||
/*
|
||||
@@ -239,12 +246,14 @@ static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
|
||||
return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
|
||||
}
|
||||
|
||||
static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i)
|
||||
static inline unsigned int bset_byte_offset(struct btree_keys *b,
|
||||
struct bset *i)
|
||||
{
|
||||
return ((size_t) i) - ((size_t) b->set->data);
|
||||
}
|
||||
|
||||
static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i)
|
||||
static inline unsigned int bset_sector_offset(struct btree_keys *b,
|
||||
struct bset *i)
|
||||
{
|
||||
return bset_byte_offset(b, i) >> 9;
|
||||
}
|
||||
@@ -273,25 +282,27 @@ static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
|
||||
}
|
||||
|
||||
static inline struct bset *bset_next_set(struct btree_keys *b,
|
||||
unsigned block_bytes)
|
||||
unsigned int block_bytes)
|
||||
{
|
||||
struct bset *i = bset_tree_last(b)->data;
|
||||
|
||||
return ((void *) i) + roundup(set_bytes(i), block_bytes);
|
||||
}
|
||||
|
||||
void bch_btree_keys_free(struct btree_keys *);
|
||||
int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t);
|
||||
void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *,
|
||||
bool *);
|
||||
void bch_btree_keys_free(struct btree_keys *b);
|
||||
int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order,
|
||||
gfp_t gfp);
|
||||
void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
|
||||
bool *expensive_debug_checks);
|
||||
|
||||
void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
|
||||
void bch_bset_build_written_tree(struct btree_keys *);
|
||||
void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
|
||||
bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *);
|
||||
void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
|
||||
unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *,
|
||||
struct bkey *);
|
||||
void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic);
|
||||
void bch_bset_build_written_tree(struct btree_keys *b);
|
||||
void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k);
|
||||
bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r);
|
||||
void bch_bset_insert(struct btree_keys *b, struct bkey *where,
|
||||
struct bkey *insert);
|
||||
unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
|
||||
struct bkey *replace_key);
|
||||
|
||||
enum {
|
||||
BTREE_INSERT_STATUS_NO_INSERT = 0,
|
||||
@@ -313,18 +324,21 @@ struct btree_iter {
|
||||
} data[MAX_BSETS];
|
||||
};
|
||||
|
||||
typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *);
|
||||
typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
|
||||
|
||||
struct bkey *bch_btree_iter_next(struct btree_iter *);
|
||||
struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
|
||||
struct btree_keys *, ptr_filter_fn);
|
||||
struct bkey *bch_btree_iter_next(struct btree_iter *iter);
|
||||
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
|
||||
struct btree_keys *b,
|
||||
ptr_filter_fn fn);
|
||||
|
||||
void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
|
||||
struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *,
|
||||
struct bkey *);
|
||||
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
|
||||
struct bkey *end);
|
||||
struct bkey *bch_btree_iter_init(struct btree_keys *b,
|
||||
struct btree_iter *iter,
|
||||
struct bkey *search);
|
||||
|
||||
struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *,
|
||||
const struct bkey *);
|
||||
struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
|
||||
const struct bkey *search);
|
||||
|
||||
/*
|
||||
* Returns the first key that is strictly greater than search
|
||||
@@ -349,21 +363,23 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
|
||||
struct bset_sort_state {
|
||||
mempool_t pool;
|
||||
|
||||
unsigned page_order;
|
||||
unsigned crit_factor;
|
||||
unsigned int page_order;
|
||||
unsigned int crit_factor;
|
||||
|
||||
struct time_stats time;
|
||||
};
|
||||
|
||||
void bch_bset_sort_state_free(struct bset_sort_state *);
|
||||
int bch_bset_sort_state_init(struct bset_sort_state *, unsigned);
|
||||
void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *);
|
||||
void bch_btree_sort_into(struct btree_keys *, struct btree_keys *,
|
||||
struct bset_sort_state *);
|
||||
void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *,
|
||||
struct bset_sort_state *);
|
||||
void bch_btree_sort_partial(struct btree_keys *, unsigned,
|
||||
struct bset_sort_state *);
|
||||
void bch_bset_sort_state_free(struct bset_sort_state *state);
|
||||
int bch_bset_sort_state_init(struct bset_sort_state *state,
|
||||
unsigned int page_order);
|
||||
void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state);
|
||||
void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
|
||||
struct bset_sort_state *state);
|
||||
void bch_btree_sort_and_fix_extents(struct btree_keys *b,
|
||||
struct btree_iter *iter,
|
||||
struct bset_sort_state *state);
|
||||
void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
|
||||
struct bset_sort_state *state);
|
||||
|
||||
static inline void bch_btree_sort(struct btree_keys *b,
|
||||
struct bset_sort_state *state)
|
||||
@@ -377,13 +393,13 @@ struct bset_stats {
|
||||
size_t floats, failed;
|
||||
};
|
||||
|
||||
void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
|
||||
void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
|
||||
|
||||
/* Bkey utility code */
|
||||
|
||||
#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
|
||||
|
||||
static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx)
|
||||
static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
|
||||
{
|
||||
return bkey_idx(i->start, idx);
|
||||
}
|
||||
@@ -401,10 +417,10 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
|
||||
: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
|
||||
}
|
||||
|
||||
void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
|
||||
unsigned);
|
||||
bool __bch_cut_front(const struct bkey *, struct bkey *);
|
||||
bool __bch_cut_back(const struct bkey *, struct bkey *);
|
||||
void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
|
||||
unsigned int i);
|
||||
bool __bch_cut_front(const struct bkey *where, struct bkey *k);
|
||||
bool __bch_cut_back(const struct bkey *where, struct bkey *k);
|
||||
|
||||
static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
|
||||
{
|
||||
@@ -522,18 +538,20 @@ static inline size_t bch_keylist_bytes(struct keylist *l)
|
||||
return bch_keylist_nkeys(l) * sizeof(uint64_t);
|
||||
}
|
||||
|
||||
struct bkey *bch_keylist_pop(struct keylist *);
|
||||
void bch_keylist_pop_front(struct keylist *);
|
||||
int __bch_keylist_realloc(struct keylist *, unsigned);
|
||||
struct bkey *bch_keylist_pop(struct keylist *l);
|
||||
void bch_keylist_pop_front(struct keylist *l);
|
||||
int __bch_keylist_realloc(struct keylist *l, unsigned int u64s);
|
||||
|
||||
/* Debug stuff */
|
||||
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
|
||||
int __bch_count_data(struct btree_keys *);
|
||||
void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...);
|
||||
void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
|
||||
void bch_dump_bucket(struct btree_keys *);
|
||||
int __bch_count_data(struct btree_keys *b);
|
||||
void __printf(2, 3) __bch_check_keys(struct btree_keys *b,
|
||||
const char *fmt,
|
||||
...);
|
||||
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
|
||||
void bch_dump_bucket(struct btree_keys *b);
|
||||
|
||||
#else
|
||||
|
||||
@@ -541,7 +559,7 @@ static inline int __bch_count_data(struct btree_keys *b) { return -1; }
|
||||
static inline void __printf(2, 3)
|
||||
__bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
|
||||
static inline void bch_dump_bucket(struct btree_keys *b) {}
|
||||
void bch_dump_bset(struct btree_keys *, struct bset *, unsigned);
|
||||
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
+42
-30
@@ -183,7 +183,7 @@ static void bch_btree_init_next(struct btree *b)
|
||||
|
||||
void bkey_put(struct cache_set *c, struct bkey *k)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++)
|
||||
if (ptr_available(c, k, i))
|
||||
@@ -287,6 +287,7 @@ err:
|
||||
static void btree_node_read_endio(struct bio *bio)
|
||||
{
|
||||
struct closure *cl = bio->bi_private;
|
||||
|
||||
closure_put(cl);
|
||||
}
|
||||
|
||||
@@ -435,7 +436,10 @@ static void do_btree_node_write(struct btree *b)
|
||||
|
||||
continue_at(cl, btree_node_write_done, NULL);
|
||||
} else {
|
||||
/* No problem for multipage bvec since the bio is just allocated */
|
||||
/*
|
||||
* No problem for multipage bvec since the bio is
|
||||
* just allocated
|
||||
*/
|
||||
b->bio->bi_vcnt = 0;
|
||||
bch_bio_map(b->bio, i);
|
||||
|
||||
@@ -479,7 +483,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
|
||||
|
||||
void bch_btree_node_write(struct btree *b, struct closure *parent)
|
||||
{
|
||||
unsigned nsets = b->keys.nsets;
|
||||
unsigned int nsets = b->keys.nsets;
|
||||
|
||||
lockdep_assert_held(&b->lock);
|
||||
|
||||
@@ -581,7 +585,7 @@ static void mca_bucket_free(struct btree *b)
|
||||
list_move(&b->list, &b->c->btree_cache_freeable);
|
||||
}
|
||||
|
||||
static unsigned btree_order(struct bkey *k)
|
||||
static unsigned int btree_order(struct bkey *k)
|
||||
{
|
||||
return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
|
||||
}
|
||||
@@ -589,7 +593,7 @@ static unsigned btree_order(struct bkey *k)
|
||||
static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
|
||||
{
|
||||
if (!bch_btree_keys_alloc(&b->keys,
|
||||
max_t(unsigned,
|
||||
max_t(unsigned int,
|
||||
ilog2(b->c->btree_pages),
|
||||
btree_order(k)),
|
||||
gfp)) {
|
||||
@@ -604,6 +608,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
|
||||
struct bkey *k, gfp_t gfp)
|
||||
{
|
||||
struct btree *b = kzalloc(sizeof(struct btree), gfp);
|
||||
|
||||
if (!b)
|
||||
return NULL;
|
||||
|
||||
@@ -620,7 +625,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
|
||||
return b;
|
||||
}
|
||||
|
||||
static int mca_reap(struct btree *b, unsigned min_order, bool flush)
|
||||
static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
|
||||
{
|
||||
struct closure cl;
|
||||
|
||||
@@ -746,6 +751,7 @@ void bch_btree_cache_free(struct cache_set *c)
|
||||
{
|
||||
struct btree *b;
|
||||
struct closure cl;
|
||||
|
||||
closure_init_stack(&cl);
|
||||
|
||||
if (c->shrink.list.next)
|
||||
@@ -786,7 +792,7 @@ void bch_btree_cache_free(struct cache_set *c)
|
||||
|
||||
int bch_btree_cache_alloc(struct cache_set *c)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < mca_reserve(c); i++)
|
||||
if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
|
||||
@@ -1124,6 +1130,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
|
||||
struct btree_op *op)
|
||||
{
|
||||
struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
|
||||
|
||||
if (!IS_ERR_OR_NULL(n)) {
|
||||
mutex_lock(&n->write_lock);
|
||||
bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
|
||||
@@ -1136,7 +1143,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
|
||||
|
||||
static void make_btree_freeing_key(struct btree *b, struct bkey *k)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&b->c->bucket_lock);
|
||||
|
||||
@@ -1157,7 +1164,7 @@ static int btree_check_reserve(struct btree *b, struct btree_op *op)
|
||||
{
|
||||
struct cache_set *c = b->c;
|
||||
struct cache *ca;
|
||||
unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
|
||||
unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
|
||||
|
||||
mutex_lock(&c->bucket_lock);
|
||||
|
||||
@@ -1181,7 +1188,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
|
||||
struct bkey *k)
|
||||
{
|
||||
uint8_t stale = 0;
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
struct bucket *g;
|
||||
|
||||
/*
|
||||
@@ -1219,7 +1226,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
|
||||
SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
|
||||
|
||||
/* guard against overflow */
|
||||
SET_GC_SECTORS_USED(g, min_t(unsigned,
|
||||
SET_GC_SECTORS_USED(g, min_t(unsigned int,
|
||||
GC_SECTORS_USED(g) + KEY_SIZE(k),
|
||||
MAX_GC_SECTORS_USED));
|
||||
|
||||
@@ -1233,7 +1240,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
|
||||
|
||||
void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < KEY_PTRS(k); i++)
|
||||
if (ptr_available(c, k, i) &&
|
||||
@@ -1259,7 +1266,7 @@ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
|
||||
static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
|
||||
{
|
||||
uint8_t stale = 0;
|
||||
unsigned keys = 0, good_keys = 0;
|
||||
unsigned int keys = 0, good_keys = 0;
|
||||
struct bkey *k;
|
||||
struct btree_iter iter;
|
||||
struct bset_tree *t;
|
||||
@@ -1302,16 +1309,18 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
|
||||
|
||||
struct gc_merge_info {
|
||||
struct btree *b;
|
||||
unsigned keys;
|
||||
unsigned int keys;
|
||||
};
|
||||
|
||||
static int bch_btree_insert_node(struct btree *, struct btree_op *,
|
||||
struct keylist *, atomic_t *, struct bkey *);
|
||||
static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
|
||||
struct keylist *insert_keys,
|
||||
atomic_t *journal_ref,
|
||||
struct bkey *replace_key);
|
||||
|
||||
static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
|
||||
struct gc_stat *gc, struct gc_merge_info *r)
|
||||
{
|
||||
unsigned i, nodes = 0, keys = 0, blocks;
|
||||
unsigned int i, nodes = 0, keys = 0, blocks;
|
||||
struct btree *new_nodes[GC_MERGE_NODES];
|
||||
struct keylist keylist;
|
||||
struct closure cl;
|
||||
@@ -1511,11 +1520,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
static unsigned btree_gc_count_keys(struct btree *b)
|
||||
static unsigned int btree_gc_count_keys(struct btree *b)
|
||||
{
|
||||
struct bkey *k;
|
||||
struct btree_iter iter;
|
||||
unsigned ret = 0;
|
||||
unsigned int ret = 0;
|
||||
|
||||
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
|
||||
ret += bkey_u64s(k);
|
||||
@@ -1678,7 +1687,7 @@ static void btree_gc_start(struct cache_set *c)
|
||||
{
|
||||
struct cache *ca;
|
||||
struct bucket *b;
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
if (!c->gc_mark_valid)
|
||||
return;
|
||||
@@ -1704,7 +1713,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
|
||||
{
|
||||
struct bucket *b;
|
||||
struct cache *ca;
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&c->bucket_lock);
|
||||
|
||||
@@ -1722,7 +1731,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
|
||||
struct bcache_device *d = c->devices[i];
|
||||
struct cached_dev *dc;
|
||||
struct keybuf_key *w, *n;
|
||||
unsigned j;
|
||||
unsigned int j;
|
||||
|
||||
if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
|
||||
continue;
|
||||
@@ -1814,7 +1823,7 @@ static void bch_btree_gc(struct cache_set *c)
|
||||
static bool gc_should_run(struct cache_set *c)
|
||||
{
|
||||
struct cache *ca;
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
for_each_cache(ca, c, i)
|
||||
if (ca->invalidate_needs_gc)
|
||||
@@ -1905,7 +1914,7 @@ void bch_initial_gc_finish(struct cache_set *c)
|
||||
{
|
||||
struct cache *ca;
|
||||
struct bucket *b;
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
|
||||
bch_btree_gc_finish(c);
|
||||
|
||||
@@ -1945,7 +1954,7 @@ void bch_initial_gc_finish(struct cache_set *c)
|
||||
static bool btree_insert_key(struct btree *b, struct bkey *k,
|
||||
struct bkey *replace_key)
|
||||
{
|
||||
unsigned status;
|
||||
unsigned int status;
|
||||
|
||||
BUG_ON(bkey_cmp(k, &b->key) > 0);
|
||||
|
||||
@@ -2044,7 +2053,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
|
||||
block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
|
||||
|
||||
if (split) {
|
||||
unsigned keys = 0;
|
||||
unsigned int keys = 0;
|
||||
|
||||
trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
|
||||
|
||||
@@ -2222,10 +2231,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
|
||||
rw_lock(true, b, b->level);
|
||||
|
||||
if (b->key.ptr[0] != btree_ptr ||
|
||||
b->seq != seq + 1) {
|
||||
b->seq != seq + 1) {
|
||||
op->lock = b->level;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SET_KEY_PTRS(check_key, 1);
|
||||
@@ -2300,7 +2309,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys,
|
||||
|
||||
void bch_btree_set_root(struct btree *b)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned int i;
|
||||
struct closure cl;
|
||||
|
||||
closure_init_stack(&cl);
|
||||
@@ -2412,7 +2421,7 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
|
||||
|
||||
struct refill {
|
||||
struct btree_op op;
|
||||
unsigned nr_found;
|
||||
unsigned int nr_found;
|
||||
struct keybuf *buf;
|
||||
struct bkey *end;
|
||||
keybuf_pred_fn *pred;
|
||||
@@ -2488,6 +2497,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
|
||||
|
||||
if (!RB_EMPTY_ROOT(&buf->keys)) {
|
||||
struct keybuf_key *w;
|
||||
|
||||
w = RB_FIRST(&buf->keys, struct keybuf_key, node);
|
||||
buf->start = START_KEY(&w->key);
|
||||
|
||||
@@ -2519,6 +2529,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
|
||||
{
|
||||
bool ret = false;
|
||||
struct keybuf_key *p, *w, s;
|
||||
|
||||
s.key = *start;
|
||||
|
||||
if (bkey_cmp(end, &buf->start) <= 0 ||
|
||||
@@ -2545,6 +2556,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
|
||||
struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
|
||||
{
|
||||
struct keybuf_key *w;
|
||||
|
||||
spin_lock(&buf->lock);
|
||||
|
||||
w = RB_FIRST(&buf->keys, struct keybuf_key, node);
|
||||
|
||||
+40
-36
@@ -184,7 +184,7 @@ static inline struct bset *btree_bset_last(struct btree *b)
|
||||
return bset_tree_last(&b->keys)->data;
|
||||
}
|
||||
|
||||
static inline unsigned bset_block_offset(struct btree *b, struct bset *i)
|
||||
static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
|
||||
{
|
||||
return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
|
||||
}
|
||||
@@ -213,7 +213,7 @@ struct btree_op {
|
||||
/* Btree level at which we start taking write locks */
|
||||
short lock;
|
||||
|
||||
unsigned insert_collision:1;
|
||||
unsigned int insert_collision:1;
|
||||
};
|
||||
|
||||
static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
|
||||
@@ -238,26 +238,28 @@ static inline void rw_unlock(bool w, struct btree *b)
|
||||
(w ? up_write : up_read)(&b->lock);
|
||||
}
|
||||
|
||||
void bch_btree_node_read_done(struct btree *);
|
||||
void __bch_btree_node_write(struct btree *, struct closure *);
|
||||
void bch_btree_node_write(struct btree *, struct closure *);
|
||||
void bch_btree_node_read_done(struct btree *b);
|
||||
void __bch_btree_node_write(struct btree *b, struct closure *parent);
|
||||
void bch_btree_node_write(struct btree *b, struct closure *parent);
|
||||
|
||||
void bch_btree_set_root(struct btree *);
|
||||
struct btree *__bch_btree_node_alloc(struct cache_set *, struct btree_op *,
|
||||
int, bool, struct btree *);
|
||||
struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *,
|
||||
struct bkey *, int, bool, struct btree *);
|
||||
void bch_btree_set_root(struct btree *b);
|
||||
struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
|
||||
int level, bool wait,
|
||||
struct btree *parent);
|
||||
struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
|
||||
struct bkey *k, int level, bool write,
|
||||
struct btree *parent);
|
||||
|
||||
int bch_btree_insert_check_key(struct btree *, struct btree_op *,
|
||||
struct bkey *);
|
||||
int bch_btree_insert(struct cache_set *, struct keylist *,
|
||||
atomic_t *, struct bkey *);
|
||||
int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
|
||||
struct bkey *check_key);
|
||||
int bch_btree_insert(struct cache_set *c, struct keylist *keys,
|
||||
atomic_t *journal_ref, struct bkey *replace_key);
|
||||
|
||||
int bch_gc_thread_start(struct cache_set *);
|
||||
void bch_initial_gc_finish(struct cache_set *);
|
||||
void bch_moving_gc(struct cache_set *);
|
||||
int bch_btree_check(struct cache_set *);
|
||||
void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
|
||||
int bch_gc_thread_start(struct cache_set *c);
|
||||
void bch_initial_gc_finish(struct cache_set *c);
|
||||
void bch_moving_gc(struct cache_set *c);
|
||||
int bch_btree_check(struct cache_set *c);
|
||||
void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
|
||||
|
||||
static inline void wake_up_gc(struct cache_set *c)
|
||||
{
|
||||
@@ -272,9 +274,9 @@ static inline void wake_up_gc(struct cache_set *c)
|
||||
|
||||
#define MAP_END_KEY 1
|
||||
|
||||
typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
|
||||
int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
|
||||
struct bkey *, btree_map_nodes_fn *, int);
|
||||
typedef int (btree_map_nodes_fn)(struct btree_op *b_op, struct btree *b);
|
||||
int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
|
||||
struct bkey *from, btree_map_nodes_fn *fn, int flags);
|
||||
|
||||
static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
|
||||
struct bkey *from, btree_map_nodes_fn *fn)
|
||||
@@ -290,21 +292,23 @@ static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
|
||||
return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
|
||||
}
|
||||
|
||||
typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
|
||||
struct bkey *);
|
||||
int bch_btree_map_keys(struct btree_op *, struct cache_set *,
|
||||
struct bkey *, btree_map_keys_fn *, int);
|
||||
typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b,
|
||||
struct bkey *k);
|
||||
int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
|
||||
struct bkey *from, btree_map_keys_fn *fn, int flags);
|
||||
|
||||
typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
|
||||
typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k);
|
||||
|
||||
void bch_keybuf_init(struct keybuf *);
|
||||
void bch_refill_keybuf(struct cache_set *, struct keybuf *,
|
||||
struct bkey *, keybuf_pred_fn *);
|
||||
bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
|
||||
struct bkey *);
|
||||
void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
|
||||
struct keybuf_key *bch_keybuf_next(struct keybuf *);
|
||||
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
|
||||
struct bkey *, keybuf_pred_fn *);
|
||||
void bch_keybuf_init(struct keybuf *buf);
|
||||
void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
|
||||
struct bkey *end, keybuf_pred_fn *pred);
|
||||
bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
|
||||
struct bkey *end);
|
||||
void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w);
|
||||
struct keybuf_key *bch_keybuf_next(struct keybuf *buf);
|
||||
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
|
||||
struct keybuf *buf,
|
||||
struct bkey *end,
|
||||
keybuf_pred_fn *pred);
|
||||
void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
|
||||
#endif
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user