summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.c63
-rw-r--r--block/blk-cgroup.h12
-rw-r--r--block/blk-throttle.c31
-rw-r--r--block/cfq-iosched.c37
4 files changed, 48 insertions, 95 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 821a0a393e85..19ee29f1b7c5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -390,7 +390,6 @@ static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
blkg_rwstat_reset(&sc->service_bytes);
blkg_rwstat_reset(&sc->serviced);
- blkg_stat_reset(&sc->sectors);
}
}
@@ -417,6 +416,8 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
struct blkio_group_stats *stats = &pd->stats;
/* queued stats shouldn't be cleared */
+ blkg_rwstat_reset(&stats->service_bytes);
+ blkg_rwstat_reset(&stats->serviced);
blkg_rwstat_reset(&stats->merged);
blkg_rwstat_reset(&stats->service_time);
blkg_rwstat_reset(&stats->wait_time);
@@ -577,66 +578,6 @@ int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
}
EXPORT_SYMBOL_GPL(blkcg_print_rwstat);
-static u64 blkg_prfill_cpu_stat(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- u64 v = 0;
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct blkio_group_stats_cpu *sc =
- per_cpu_ptr(pd->stats_cpu, cpu);
-
- v += blkg_stat_read((void *)sc + off);
- }
-
- return __blkg_prfill_u64(sf, pd, v);
-}
-
-static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct blkg_rwstat rwstat = { }, tmp;
- int i, cpu;
-
- for_each_possible_cpu(cpu) {
- struct blkio_group_stats_cpu *sc =
- per_cpu_ptr(pd->stats_cpu, cpu);
-
- tmp = blkg_rwstat_read((void *)sc + off);
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- rwstat.cnt[i] += tmp.cnt[i];
- }
-
- return __blkg_prfill_rwstat(sf, pd, &rwstat);
-}
-
-/* print per-cpu blkg_stat specified by BLKCG_STAT_PRIV() */
-int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
-{
- struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_stat,
- BLKCG_STAT_POL(cft->private),
- BLKCG_STAT_OFF(cft->private), false);
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkcg_print_cpu_stat);
-
-/* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
-int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
-{
- struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
-
- blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat,
- BLKCG_STAT_POL(cft->private),
- BLKCG_STAT_OFF(cft->private), true);
- return 0;
-}
-EXPORT_SYMBOL_GPL(blkcg_print_cpu_rwstat);
-
/**
* blkg_conf_prep - parse and prepare for per-blkg config update
* @blkcg: target block cgroup
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 0b0a176ee007..c82de47ae69f 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -65,6 +65,10 @@ struct blkg_rwstat {
};
struct blkio_group_stats {
+ /* total bytes transferred */
+ struct blkg_rwstat service_bytes;
+ /* total IOs serviced, post merge */
+ struct blkg_rwstat serviced;
/* number of ios merged */
struct blkg_rwstat merged;
/* total time spent on device in ns, may not be accurate w/ queueing */
@@ -73,6 +77,8 @@ struct blkio_group_stats {
struct blkg_rwstat wait_time;
/* number of IOs queued up */
struct blkg_rwstat queued;
+ /* total sectors transferred */
+ struct blkg_stat sectors;
/* total disk time and nr sectors dispatched by this group */
struct blkg_stat time;
#ifdef CONFIG_DEBUG_BLK_CGROUP
@@ -104,8 +110,6 @@ struct blkio_group_stats_cpu {
struct blkg_rwstat service_bytes;
/* total IOs serviced, post merge */
struct blkg_rwstat serviced;
- /* total sectors transferred */
- struct blkg_stat sectors;
};
struct blkio_group_conf {
@@ -183,10 +187,6 @@ int blkcg_print_stat(struct cgroup *cgrp, struct cftype *cft,
struct seq_file *sf);
int blkcg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
struct seq_file *sf);
-int blkcg_print_cpu_stat(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf);
-int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf);
struct blkg_conf_ctx {
struct gendisk *disk;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5d647edc02a1..cb259bc46f43 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -582,7 +582,6 @@ static void throtl_update_dispatch_stats(struct blkio_group *blkg, u64 bytes,
stats_cpu = this_cpu_ptr(pd->stats_cpu);
- blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
@@ -843,6 +842,36 @@ static void throtl_update_blkio_group_common(struct throtl_data *td,
throtl_schedule_delayed_work(td, 0);
}
+static u64 blkg_prfill_cpu_rwstat(struct seq_file *sf,
+ struct blkg_policy_data *pd, int off)
+{
+ struct blkg_rwstat rwstat = { }, tmp;
+ int i, cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct blkio_group_stats_cpu *sc =
+ per_cpu_ptr(pd->stats_cpu, cpu);
+
+ tmp = blkg_rwstat_read((void *)sc + off);
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ rwstat.cnt[i] += tmp.cnt[i];
+ }
+
+ return __blkg_prfill_rwstat(sf, pd, &rwstat);
+}
+
+/* print per-cpu blkg_rwstat specified by BLKCG_STAT_PRIV() */
+static int blkcg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
+ struct seq_file *sf)
+{
+ struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
+
+ blkcg_print_blkgs(sf, blkcg, blkg_prfill_cpu_rwstat,
+ BLKCG_STAT_POL(cft->private),
+ BLKCG_STAT_OFF(cft->private), true);
+ return 0;
+}
+
static u64 blkg_prfill_conf_u64(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 49913804e8dd..effd89489506 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -624,29 +624,12 @@ static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
struct blkio_policy_type *pol, uint64_t bytes,
bool direction, bool sync)
{
+ struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
- struct blkg_policy_data *pd = blkg->pd[pol->plid];
- struct blkio_group_stats_cpu *stats_cpu;
- unsigned long flags;
- /* If per cpu stats are not allocated yet, don't do any accounting. */
- if (pd->stats_cpu == NULL)
- return;
-
- /*
- * Disabling interrupts to provide mutual exclusion between two
- * writes on same cpu. It probably is not needed for 64bit. Not
- * optimizing that case yet.
- */
- local_irq_save(flags);
-
- stats_cpu = this_cpu_ptr(pd->stats_cpu);
-
- blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
- blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
- blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
-
- local_irq_restore(flags);
+ blkg_stat_add(&stats->sectors, bytes >> 9);
+ blkg_rwstat_add(&stats->serviced, rw, 1);
+ blkg_rwstat_add(&stats->service_bytes, rw, bytes);
}
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
@@ -1520,20 +1503,20 @@ static struct cftype cfq_blkcg_files[] = {
{
.name = "sectors",
.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
- offsetof(struct blkio_group_stats_cpu, sectors)),
- .read_seq_string = blkcg_print_cpu_stat,
+ offsetof(struct blkio_group_stats, sectors)),
+ .read_seq_string = blkcg_print_stat,
},
{
.name = "io_service_bytes",
.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
- offsetof(struct blkio_group_stats_cpu, service_bytes)),
- .read_seq_string = blkcg_print_cpu_rwstat,
+ offsetof(struct blkio_group_stats, service_bytes)),
+ .read_seq_string = blkcg_print_rwstat,
},
{
.name = "io_serviced",
.private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP,
- offsetof(struct blkio_group_stats_cpu, serviced)),
- .read_seq_string = blkcg_print_cpu_rwstat,
+ offsetof(struct blkio_group_stats, serviced)),
+ .read_seq_string = blkcg_print_rwstat,
},
{
.name = "io_service_time",