summaryrefslogtreecommitdiff
path: root/include/linux/blk-cgroup.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blk-cgroup.h')
-rw-r--r--include/linux/blk-cgroup.h17
1 files changed, 17 insertions, 0 deletions
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index bed9e43f9426..46139c0f510f 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -22,6 +22,7 @@
#include <linux/atomic.h>
#include <linux/kthread.h>
#include <linux/fs.h>
+#include <linux/blkdev.h>
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
@@ -768,6 +769,21 @@ static inline void blkcg_use_delay(struct blkcg_gq *blkg)
atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
}
+/**
+ * blk_cgroup_mergeable - Determine whether to allow or disallow merges
+ * @rq: request to merge into
+ * @bio: bio to merge
+ *
+ * @bio and @rq should belong to the same cgroup and their issue_as_root should
+ * match. The latter is necessary as we don't want to throttle e.g. a metadata
+ * update because it happens to be next to a regular IO.
+ */
+static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
+{
+ return rq->bio->bi_blkg == bio->bi_blkg &&
+ bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
+}
+
static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
{
int old = atomic_read(&blkg->use_delay);
@@ -868,6 +884,7 @@ static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio) { return true; }
+static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
#define blk_queue_for_each_rl(rl, q) \
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)