mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 01:22:07 +00:00
9f13ef678e
blkgs are chained from both blkcgs and request_queues and thus subjected to two locks - blkcg->lock and q->queue_lock. As both blkcg and q can go away anytime, locking during removal is tricky. It's currently solved by wrapping removal inside RCU, which makes the synchronization complex. There are three locks to worry about - the outer RCU, q lock and blkcg lock, and it leads to nasty subtle complications like conditional synchronize_rcu() on queue exit paths. For all other paths, blkcg lock is naturally nested inside q lock and the only exception is blkcg removal path, which is a very cold path and can be implemented as clumsy but conceptually-simple reverse double lock dancing. This patch updates blkg removal path such that blkgs are removed while holding both q and blkcg locks, which is trivial for request queue exit path - blkg_destroy_all(). The blkcg removal path, blkiocg_pre_destroy(), implements reverse double lock dancing essentially identical to ioc_release_fn(). This simplifies blkg locking - no half-dead blkgs to worry about. Now unnecessary RCU annotations will be removed by the next patch. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
119 lines
4.0 KiB
C
119 lines
4.0 KiB
C
#ifndef _CFQ_H
|
|
#define _CFQ_H
|
|
#include "blk-cgroup.h"
|
|
|
|
#ifdef CONFIG_CFQ_GROUP_IOSCHED
|
|
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol,
|
|
struct blkio_group *curr_blkg,
|
|
bool direction, bool sync)
|
|
{
|
|
blkiocg_update_io_add_stats(blkg, pol, curr_blkg, direction, sync);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol, unsigned long dequeue)
|
|
{
|
|
blkiocg_update_dequeue_stats(blkg, pol, dequeue);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol, unsigned long time,
|
|
unsigned long unaccounted_time)
|
|
{
|
|
blkiocg_update_timeslice_used(blkg, pol, time, unaccounted_time);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol)
|
|
{
|
|
blkiocg_set_start_empty_time(blkg, pol);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol, bool direction,
|
|
bool sync)
|
|
{
|
|
blkiocg_update_io_remove_stats(blkg, pol, direction, sync);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol, bool direction,
|
|
bool sync)
|
|
{
|
|
blkiocg_update_io_merged_stats(blkg, pol, direction, sync);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol)
|
|
{
|
|
blkiocg_update_idle_time_stats(blkg, pol);
|
|
}
|
|
|
|
static inline void
|
|
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol)
|
|
{
|
|
blkiocg_update_avg_queue_size_stats(blkg, pol);
|
|
}
|
|
|
|
static inline void
|
|
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol)
|
|
{
|
|
blkiocg_update_set_idle_time_stats(blkg, pol);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol, uint64_t bytes,
|
|
bool direction, bool sync)
|
|
{
|
|
blkiocg_update_dispatch_stats(blkg, pol, bytes, direction, sync);
|
|
}
|
|
|
|
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol, uint64_t start_time,
|
|
uint64_t io_start_time, bool direction, bool sync)
|
|
{
|
|
blkiocg_update_completion_stats(blkg, pol, start_time, io_start_time,
|
|
direction, sync);
|
|
}
|
|
|
|
#else /* CFQ_GROUP_IOSCHED */
|
|
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol,
|
|
struct blkio_group *curr_blkg, bool direction,
|
|
bool sync) { }
|
|
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol, unsigned long dequeue) { }
|
|
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol, unsigned long time,
|
|
unsigned long unaccounted_time) { }
|
|
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol) { }
|
|
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol, bool direction,
|
|
bool sync) { }
|
|
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol, bool direction,
|
|
bool sync) { }
|
|
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol) { }
|
|
static inline void
|
|
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol) { }
|
|
|
|
static inline void
|
|
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol) { }
|
|
|
|
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol, uint64_t bytes,
|
|
bool direction, bool sync) { }
|
|
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
|
|
struct blkio_policy_type *pol, uint64_t start_time,
|
|
uint64_t io_start_time, bool direction, bool sync) { }
|
|
|
|
#endif /* CFQ_GROUP_IOSCHED */
|
|
#endif
|