summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/journal.c
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-04-26 15:39:55 -0700
committerKent Overstreet <koverstreet@google.com>2013-06-26 17:09:15 -0700
commitc37511b863f36c1cc6e18440717fd4cc0e881b8a (patch)
tree64d82c648bd092f38c35c4b808411bc1cdb3a9f0 /drivers/md/bcache/journal.c
parent5794351146199b9ac67a5ab1beab82be8bfd7b5d (diff)
bcache: Fix/revamp tracepoints
The tracepoints were reworked to be more sensible, and fixed a null pointer deref in one of the tracepoints. Converted some of the pr_debug()s to tracepoints - this is partly a performance optimization; it used to be that with DEBUG or CONFIG_DYNAMIC_DEBUG pr_debug() was an empty macro; but at some point it was changed to an empty inline function. Some of the pr_debug() statements had rather expensive function calls as part of the arguments, so this code was getting run unnecessarily even on non debug kernels - in some fast paths, too. Signed-off-by: Kent Overstreet <koverstreet@google.com>
Diffstat (limited to 'drivers/md/bcache/journal.c')
-rw-r--r--drivers/md/bcache/journal.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 970d819d4350..5ca22149b749 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -9,6 +9,8 @@
#include "debug.h"
#include "request.h"
+#include <trace/events/bcache.h>
+
/*
* Journal replay/recovery:
*
@@ -300,7 +302,8 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
for (k = i->j.start;
k < end(&i->j);
k = bkey_next(k)) {
- pr_debug("%s", pkey(k));
+ trace_bcache_journal_replay_key(k);
+
bkey_copy(op->keys.top, k);
bch_keylist_push(&op->keys);
@@ -712,7 +715,8 @@ void bch_journal(struct closure *cl)
spin_lock(&c->journal.lock);
if (journal_full(&c->journal)) {
- /* XXX: tracepoint */
+ trace_bcache_journal_full(c);
+
closure_wait(&c->journal.wait, cl);
journal_reclaim(c);
@@ -728,13 +732,15 @@ void bch_journal(struct closure *cl)
if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
b > c->journal.blocks_free) {
- /* XXX: If we were inserting so many keys that they won't fit in
+ trace_bcache_journal_entry_full(c);
+
+ /*
+ * XXX: If we were inserting so many keys that they won't fit in
* an _empty_ journal write, we'll deadlock. For now, handle
* this in bch_keylist_realloc() - but something to think about.
*/
BUG_ON(!w->data->keys);
- /* XXX: tracepoint */
BUG_ON(!closure_wait(&w->wait, cl));
closure_flush(&c->journal.io);