You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
cd1604fab4
Currently both blk-throttle and cfq-iosched implement their own
blkio_group creation code in throtl_get_tg() and cfq_get_cfqg(). This
patch factors out the common code into blkg_lookup_create(), which
returns ERR_PTR value so that transitional failures due to queue
bypass can be distinguished from other failures.
* New plkio_policy_ops methods blkio_alloc_group_fn() and
blkio_link_group_fn added. Both are transitional and will be
removed once the blkg management code is fully moved into
blk-cgroup.c.
* blkio_alloc_group_fn() allocates policy-specific blkg which is
usually a larger data structure with blkg as the first entry and
intiailizes it. Note that initialization of blkg proper, including
percpu stats, is responsibility of blk-cgroup proper.
Note that default config (weight, bps...) initialization is done
from this method; otherwise, we end up violating locking order
between blkcg and q locks via blkcg_get_CONF() functions.
* blkio_link_group_fn() is called under queue_lock and responsible for
linking the blkg to the queue. blkcg side is handled by blk-cgroup
proper.
* The common blkg creation function is named blkg_lookup_create() and
blkiocg_lookup_group() is renamed to blkg_lookup() for consistency.
Also, throtl / cfq related functions are similarly [re]named for
consistency.
This simplifies blkcg policy implementations and enables further
cleanup.
-v2: Vivek noticed that blkg_lookup_create() incorrectly tested
blk_queue_dead() instead of blk_queue_bypass() leading a user of
the function ending up creating a new blkg on bypassing queue.
This is a bug introduced while relocating bypass patches before
this one. Fixed.
-v3: ERR_PTR patch folded into this one. @for_root added to
blkg_lookup_create() to allow creating root group on a bypassed
queue during elevator switch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
109 lines
3.4 KiB
C
109 lines
3.4 KiB
C
#ifndef _CFQ_H
|
|
#define _CFQ_H
|
|
#include "blk-cgroup.h"
|
|
|
|
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
|
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
|
|
struct blkio_group *curr_blkg, bool direction, bool sync)
|
|
{
|
|
blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
|
unsigned long dequeue)
|
|
{
|
|
blkiocg_update_dequeue_stats(blkg, dequeue);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
|
unsigned long time, unsigned long unaccounted_time)
|
|
{
|
|
blkiocg_update_timeslice_used(blkg, time, unaccounted_time);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
|
|
{
|
|
blkiocg_set_start_empty_time(blkg);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
|
bool direction, bool sync)
|
|
{
|
|
blkiocg_update_io_remove_stats(blkg, direction, sync);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
|
|
bool direction, bool sync)
|
|
{
|
|
blkiocg_update_io_merged_stats(blkg, direction, sync);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
|
|
{
|
|
blkiocg_update_idle_time_stats(blkg);
|
|
}
|
|
|
|
static inline void
|
|
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
|
|
{
|
|
blkiocg_update_avg_queue_size_stats(blkg);
|
|
}
|
|
|
|
static inline void
|
|
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
|
|
{
|
|
blkiocg_update_set_idle_time_stats(blkg);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
|
uint64_t bytes, bool direction, bool sync)
|
|
{
|
|
blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
|
|
{
|
|
blkiocg_update_completion_stats(blkg, start_time, io_start_time,
|
|
direction, sync);
|
|
}
|
|
|
|
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
|
|
{
|
|
return blkiocg_del_blkio_group(blkg);
|
|
}
|
|
|
|
#else /* CFQ_GROUP_IOSCHED */
|
|
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
|
|
struct blkio_group *curr_blkg, bool direction, bool sync) {}
|
|
|
|
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
|
unsigned long dequeue) {}
|
|
|
|
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
|
unsigned long time, unsigned long unaccounted_time) {}
|
|
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
|
|
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
|
bool direction, bool sync) {}
|
|
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
|
|
bool direction, bool sync) {}
|
|
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
|
|
{
|
|
}
|
|
static inline void
|
|
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
|
|
|
|
static inline void
|
|
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
|
|
|
|
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
|
uint64_t bytes, bool direction, bool sync) {}
|
|
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
|
|
|
|
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif /* CFQ_GROUP_IOSCHED */
|
|
#endif
|