/* Throttling is performed over 100ms slice and after that slice is renewed */
static unsigned long throtl_slice = HZ/10; /* 100 ms */
-static struct blkio_policy_type blkio_policy_throtl;
+static struct blkcg_policy blkcg_policy_throtl;
/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;
};
struct throtl_grp {
+ /* must be the first member */
+ struct blkg_policy_data pd;
+
/* active throtl group service_tree member */
struct rb_node rb_node;
static void tg_stats_alloc_fn(struct work_struct *);
static DECLARE_DELAYED_WORK(tg_stats_alloc_work, tg_stats_alloc_fn);
-static inline struct throtl_grp *blkg_to_tg(struct blkio_group *blkg)
+static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
+{
+ return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
+}
+
+static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
{
- return blkg_to_pdata(blkg, &blkio_policy_throtl);
+ return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
}
-static inline struct blkio_group *tg_to_blkg(struct throtl_grp *tg)
+static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
{
- return pdata_to_blkg(tg);
+ return pd_to_blkg(&tg->pd);
}
static inline struct throtl_grp *td_root_tg(struct throtl_data *td)
THROTL_TG_FNS(on_rr);
-#define throtl_log_tg(td, tg, fmt, args...) \
- blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
- blkg_path(tg_to_blkg(tg)), ##args); \
+#define throtl_log_tg(td, tg, fmt, args...) do { \
+ char __pbuf[128]; \
+ \
+ blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \
+ blk_add_trace_msg((td)->queue, "throtl %s " fmt, __pbuf, ##args); \
+} while (0)
#define throtl_log(td, fmt, args...) \
blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
goto alloc_stats;
}
-static void throtl_init_blkio_group(struct blkio_group *blkg)
+static void throtl_pd_init(struct blkcg_gq *blkg)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
+ unsigned long flags;
RB_CLEAR_NODE(&tg->rb_node);
bio_list_init(&tg->bio_lists[0]);
* but percpu allocator can't be called from IO path. Queue tg on
* tg_stats_alloc_list and allocate from work item.
*/
- spin_lock(&tg_stats_alloc_lock);
+ spin_lock_irqsave(&tg_stats_alloc_lock, flags);
list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
- spin_unlock(&tg_stats_alloc_lock);
+ spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
}
-static void throtl_exit_blkio_group(struct blkio_group *blkg)
+static void throtl_pd_exit(struct blkcg_gq *blkg)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
+ unsigned long flags;
- spin_lock(&tg_stats_alloc_lock);
+ spin_lock_irqsave(&tg_stats_alloc_lock, flags);
list_del_init(&tg->stats_alloc_node);
- spin_unlock(&tg_stats_alloc_lock);
+ spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
free_percpu(tg->stats_cpu);
}
-static void throtl_reset_group_stats(struct blkio_group *blkg)
+static void throtl_pd_reset_stats(struct blkcg_gq *blkg)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
int cpu;
}
}
-static struct
-throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
+static struct throtl_grp *throtl_lookup_tg(struct throtl_data *td,
+ struct blkcg *blkcg)
{
/*
- * This is the common case when there are no blkio cgroups.
- * Avoid lookup in this case
+ * This is the common case when there are no blkcgs. Avoid lookup
+ * in this case
*/
- if (blkcg == &blkio_root_cgroup)
+ if (blkcg == &blkcg_root)
return td_root_tg(td);
return blkg_to_tg(blkg_lookup(blkcg, td->queue));
}
static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
- struct blkio_cgroup *blkcg)
+ struct blkcg *blkcg)
{
struct request_queue *q = td->queue;
struct throtl_grp *tg = NULL;
/*
- * This is the common case when there are no blkio cgroups.
- * Avoid lookup in this case
+ * This is the common case when there are no blkcgs. Avoid lookup
+ * in this case
*/
- if (blkcg == &blkio_root_cgroup) {
+ if (blkcg == &blkcg_root) {
tg = td_root_tg(td);
} else {
- struct blkio_group *blkg;
+ struct blkcg_gq *blkg;
blkg = blkg_lookup_create(blkcg, q);
return 0;
}
-static void throtl_update_dispatch_stats(struct blkio_group *blkg, u64 bytes,
+static void throtl_update_dispatch_stats(struct blkcg_gq *blkg, u64 bytes,
int rw)
{
struct throtl_grp *tg = blkg_to_tg(blkg);
static void throtl_process_limit_change(struct throtl_data *td)
{
struct request_queue *q = td->queue;
- struct blkio_group *blkg, *n;
+ struct blkcg_gq *blkg, *n;
if (!td->limits_changed)
return;
}
}
-static u64 tg_prfill_cpu_rwstat(struct seq_file *sf, void *pdata, int off)
+static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
{
- struct throtl_grp *tg = pdata;
+ struct throtl_grp *tg = pd_to_tg(pd);
struct blkg_rwstat rwstat = { }, tmp;
int i, cpu;
rwstat.cnt[i] += tmp.cnt[i];
}
- return __blkg_prfill_rwstat(sf, pdata, &rwstat);
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
struct seq_file *sf)
{
- struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
- blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkio_policy_throtl,
+ blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
cft->private, true);
return 0;
}
-static u64 tg_prfill_conf_u64(struct seq_file *sf, void *pdata, int off)
+static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
{
- u64 v = *(u64 *)(pdata + off);
+ struct throtl_grp *tg = pd_to_tg(pd);
+ u64 v = *(u64 *)((void *)tg + off);
if (v == -1)
return 0;
- return __blkg_prfill_u64(sf, pdata, v);
+ return __blkg_prfill_u64(sf, pd, v);
}
-static u64 tg_prfill_conf_uint(struct seq_file *sf, void *pdata, int off)
+static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
+ int off)
{
- unsigned int v = *(unsigned int *)(pdata + off);
+ struct throtl_grp *tg = pd_to_tg(pd);
+ unsigned int v = *(unsigned int *)((void *)tg + off);
if (v == -1)
return 0;
- return __blkg_prfill_u64(sf, pdata, v);
+ return __blkg_prfill_u64(sf, pd, v);
}
static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
struct seq_file *sf)
{
- blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp), tg_prfill_conf_u64,
- &blkio_policy_throtl, cft->private, false);
+ blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
+ &blkcg_policy_throtl, cft->private, false);
return 0;
}
static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
struct seq_file *sf)
{
- blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp), tg_prfill_conf_uint,
- &blkio_policy_throtl, cft->private, false);
+ blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
+ &blkcg_policy_throtl, cft->private, false);
return 0;
}
static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
bool is_u64)
{
- struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
+ struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
struct blkg_conf_ctx ctx;
struct throtl_grp *tg;
struct throtl_data *td;
int ret;
- ret = blkg_conf_prep(blkcg, &blkio_policy_throtl, buf, &ctx);
+ ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
if (ret)
return ret;
cancel_delayed_work_sync(&td->throtl_work);
}
-static struct blkio_policy_type blkio_policy_throtl = {
- .ops = {
- .blkio_init_group_fn = throtl_init_blkio_group,
- .blkio_exit_group_fn = throtl_exit_blkio_group,
- .blkio_reset_group_stats_fn = throtl_reset_group_stats,
- },
- .pdata_size = sizeof(struct throtl_grp),
- .cftypes = throtl_files,
+static struct blkcg_policy blkcg_policy_throtl = {
+ .pd_size = sizeof(struct throtl_grp),
+ .cftypes = throtl_files,
+
+ .pd_init_fn = throtl_pd_init,
+ .pd_exit_fn = throtl_pd_exit,
+ .pd_reset_stats_fn = throtl_pd_reset_stats,
};
bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
struct throtl_data *td = q->td;
struct throtl_grp *tg;
bool rw = bio_data_dir(bio), update_disptime = true;
- struct blkio_cgroup *blkcg;
+ struct blkcg *blkcg;
bool throttled = false;
if (bio->bi_rw & REQ_THROTTLED) {
* just update the dispatch stats in lockless manner and return.
*/
rcu_read_lock();
- blkcg = bio_blkio_cgroup(bio);
+ blkcg = bio_blkcg(bio);
tg = throtl_lookup_tg(td, blkcg);
if (tg) {
if (tg_no_rule_group(tg, rw)) {
struct bio_list bl;
struct bio *bio;
- WARN_ON_ONCE(!queue_is_locked(q));
+ queue_lockdep_assert_held(q);
bio_list_init(&bl);
td->queue = q;
/* activate policy */
- ret = blkcg_activate_policy(q, &blkio_policy_throtl);
+ ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
if (ret)
kfree(td);
return ret;
{
BUG_ON(!q->td);
throtl_shutdown_wq(q);
- blkcg_deactivate_policy(q, &blkio_policy_throtl);
+ blkcg_deactivate_policy(q, &blkcg_policy_throtl);
kfree(q->td);
}
if (!kthrotld_workqueue)
panic("Failed to create kthrotld\n");
- return blkio_policy_register(&blkio_policy_throtl);
+ return blkcg_policy_register(&blkcg_policy_throtl);
}
module_init(throtl_init);