summaryrefslogtreecommitdiff
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/asoc.h45
-rw-r--r--include/trace/events/btrfs.h6
-rw-r--r--include/trace/events/ext4.h6
-rw-r--r--include/trace/events/sock.h68
-rw-r--r--include/trace/events/udp.h32
-rw-r--r--include/trace/events/vmscan.h77
-rw-r--r--include/trace/events/writeback.h183
-rw-r--r--include/trace/events/xen.h504
8 files changed, 899 insertions, 22 deletions
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index ae973d2e27a1..603f5a0f0365 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -9,6 +9,7 @@
struct snd_soc_jack;
struct snd_soc_codec;
+struct snd_soc_platform;
struct snd_soc_card;
struct snd_soc_dapm_widget;
@@ -59,6 +60,50 @@ DEFINE_EVENT(snd_soc_reg, snd_soc_reg_read,
);
+DECLARE_EVENT_CLASS(snd_soc_preg,
+
+ TP_PROTO(struct snd_soc_platform *platform, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(platform, reg, val),
+
+ TP_STRUCT__entry(
+ __string( name, platform->name )
+ __field( int, id )
+ __field( unsigned int, reg )
+ __field( unsigned int, val )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, platform->name);
+ __entry->id = platform->id;
+ __entry->reg = reg;
+ __entry->val = val;
+ ),
+
+ TP_printk("platform=%s.%d reg=%x val=%x", __get_str(name),
+ (int)__entry->id, (unsigned int)__entry->reg,
+ (unsigned int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_preg, snd_soc_preg_write,
+
+ TP_PROTO(struct snd_soc_platform *platform, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(platform, reg, val)
+
+);
+
+DEFINE_EVENT(snd_soc_preg, snd_soc_preg_read,
+
+ TP_PROTO(struct snd_soc_platform *platform, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(platform, reg, val)
+
+);
+
DECLARE_EVENT_CLASS(snd_soc_card,
TP_PROTO(struct snd_soc_card *card, int val),
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 4114129f0794..b31702ac15be 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -284,7 +284,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__field( long, pages_skipped )
__field( loff_t, range_start )
__field( loff_t, range_end )
- __field( char, nonblocking )
__field( char, for_kupdate )
__field( char, for_reclaim )
__field( char, range_cyclic )
@@ -299,7 +298,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__entry->pages_skipped = wbc->pages_skipped;
__entry->range_start = wbc->range_start;
__entry->range_end = wbc->range_end;
- __entry->nonblocking = wbc->nonblocking;
__entry->for_kupdate = wbc->for_kupdate;
__entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
@@ -310,13 +308,13 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
TP_printk("root = %llu(%s), ino = %lu, page_index = %lu, "
"nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
- "range_end = %llu, nonblocking = %d, for_kupdate = %d, "
+ "range_end = %llu, for_kupdate = %d, "
"for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
show_root_type(__entry->root_objectid),
(unsigned long)__entry->ino, __entry->index,
__entry->nr_to_write, __entry->pages_skipped,
__entry->range_start, __entry->range_end,
- __entry->nonblocking, __entry->for_kupdate,
+ __entry->for_kupdate,
__entry->for_reclaim, __entry->range_cyclic,
(unsigned long)__entry->writeback_index)
);
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 5ce2b2f5f524..6363193a3418 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -380,7 +380,6 @@ TRACE_EVENT(ext4_da_writepages_result,
__field( int, pages_written )
__field( long, pages_skipped )
__field( int, sync_mode )
- __field( char, more_io )
__field( pgoff_t, writeback_index )
),
@@ -391,16 +390,15 @@ TRACE_EVENT(ext4_da_writepages_result,
__entry->pages_written = pages_written;
__entry->pages_skipped = wbc->pages_skipped;
__entry->sync_mode = wbc->sync_mode;
- __entry->more_io = wbc->more_io;
__entry->writeback_index = inode->i_mapping->writeback_index;
),
TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
- " more_io %d sync_mode %d writeback_index %lu",
+ "sync_mode %d writeback_index %lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino, __entry->ret,
__entry->pages_written, __entry->pages_skipped,
- __entry->more_io, __entry->sync_mode,
+ __entry->sync_mode,
(unsigned long) __entry->writeback_index)
);
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
new file mode 100644
index 000000000000..779abb91df81
--- /dev/null
+++ b/include/trace/events/sock.h
@@ -0,0 +1,68 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sock
+
+#if !defined(_TRACE_SOCK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SOCK_H
+
+#include <net/sock.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sock_rcvqueue_full,
+
+ TP_PROTO(struct sock *sk, struct sk_buff *skb),
+
+ TP_ARGS(sk, skb),
+
+ TP_STRUCT__entry(
+ __field(int, rmem_alloc)
+ __field(unsigned int, truesize)
+ __field(int, sk_rcvbuf)
+ ),
+
+ TP_fast_assign(
+ __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
+ __entry->truesize = skb->truesize;
+ __entry->sk_rcvbuf = sk->sk_rcvbuf;
+ ),
+
+ TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d",
+ __entry->rmem_alloc, __entry->truesize, __entry->sk_rcvbuf)
+);
+
+TRACE_EVENT(sock_exceed_buf_limit,
+
+ TP_PROTO(struct sock *sk, struct proto *prot, long allocated),
+
+ TP_ARGS(sk, prot, allocated),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(long *, sysctl_mem)
+ __field(long, allocated)
+ __field(int, sysctl_rmem)
+ __field(int, rmem_alloc)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, prot->name, 32);
+ __entry->sysctl_mem = prot->sysctl_mem;
+ __entry->allocated = allocated;
+ __entry->sysctl_rmem = prot->sysctl_rmem[0];
+ __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
+ ),
+
+ TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld "
+ "sysctl_rmem=%d rmem_alloc=%d",
+ __entry->name,
+ __entry->sysctl_mem[0],
+ __entry->sysctl_mem[1],
+ __entry->sysctl_mem[2],
+ __entry->allocated,
+ __entry->sysctl_rmem,
+ __entry->rmem_alloc)
+);
+
+#endif /* _TRACE_SOCK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/udp.h b/include/trace/events/udp.h
new file mode 100644
index 000000000000..a664bb940973
--- /dev/null
+++ b/include/trace/events/udp.h
@@ -0,0 +1,32 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM udp
+
+#if !defined(_TRACE_UDP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_UDP_H
+
+#include <linux/udp.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(udp_fail_queue_rcv_skb,
+
+ TP_PROTO(int rc, struct sock *sk),
+
+ TP_ARGS(rc, sk),
+
+ TP_STRUCT__entry(
+ __field(int, rc)
+ __field(__u16, lport)
+ ),
+
+ TP_fast_assign(
+ __entry->rc = rc;
+ __entry->lport = inet_sk(sk)->inet_num;
+ ),
+
+ TP_printk("rc=%d port=%hu", __entry->rc, __entry->lport)
+);
+
+#endif /* _TRACE_UDP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index b2c33bd955fa..36851f7f13da 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -179,6 +179,83 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re
TP_ARGS(nr_reclaimed)
);
+TRACE_EVENT(mm_shrink_slab_start,
+ TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
+ long nr_objects_to_shrink, unsigned long pgs_scanned,
+ unsigned long lru_pgs, unsigned long cache_items,
+ unsigned long long delta, unsigned long total_scan),
+
+ TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs,
+ cache_items, delta, total_scan),
+
+ TP_STRUCT__entry(
+ __field(struct shrinker *, shr)
+ __field(void *, shrink)
+ __field(long, nr_objects_to_shrink)
+ __field(gfp_t, gfp_flags)
+ __field(unsigned long, pgs_scanned)
+ __field(unsigned long, lru_pgs)
+ __field(unsigned long, cache_items)
+ __field(unsigned long long, delta)
+ __field(unsigned long, total_scan)
+ ),
+
+ TP_fast_assign(
+ __entry->shr = shr;
+ __entry->shrink = shr->shrink;
+ __entry->nr_objects_to_shrink = nr_objects_to_shrink;
+ __entry->gfp_flags = sc->gfp_mask;
+ __entry->pgs_scanned = pgs_scanned;
+ __entry->lru_pgs = lru_pgs;
+ __entry->cache_items = cache_items;
+ __entry->delta = delta;
+ __entry->total_scan = total_scan;
+ ),
+
+ TP_printk("%pF %p: objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld",
+ __entry->shrink,
+ __entry->shr,
+ __entry->nr_objects_to_shrink,
+ show_gfp_flags(__entry->gfp_flags),
+ __entry->pgs_scanned,
+ __entry->lru_pgs,
+ __entry->cache_items,
+ __entry->delta,
+ __entry->total_scan)
+);
+
+TRACE_EVENT(mm_shrink_slab_end,
+ TP_PROTO(struct shrinker *shr, int shrinker_retval,
+ long unused_scan_cnt, long new_scan_cnt),
+
+ TP_ARGS(shr, shrinker_retval, unused_scan_cnt, new_scan_cnt),
+
+ TP_STRUCT__entry(
+ __field(struct shrinker *, shr)
+ __field(void *, shrink)
+ __field(long, unused_scan)
+ __field(long, new_scan)
+ __field(int, retval)
+ __field(long, total_scan)
+ ),
+
+ TP_fast_assign(
+ __entry->shr = shr;
+ __entry->shrink = shr->shrink;
+ __entry->unused_scan = unused_scan_cnt;
+ __entry->new_scan = new_scan_cnt;
+ __entry->retval = shrinker_retval;
+ __entry->total_scan = new_scan_cnt - unused_scan_cnt;
+ ),
+
+ TP_printk("%pF %p: unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d",
+ __entry->shrink,
+ __entry->shr,
+ __entry->unused_scan,
+ __entry->new_scan,
+ __entry->total_scan,
+ __entry->retval)
+);
DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 4e249b927eaa..6bca4cc0063c 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -8,6 +8,19 @@
#include <linux/device.h>
#include <linux/writeback.h>
+#define show_inode_state(state) \
+ __print_flags(state, "|", \
+ {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \
+ {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \
+ {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \
+ {I_NEW, "I_NEW"}, \
+ {I_WILL_FREE, "I_WILL_FREE"}, \
+ {I_FREEING, "I_FREEING"}, \
+ {I_CLEAR, "I_CLEAR"}, \
+ {I_SYNC, "I_SYNC"}, \
+ {I_REFERENCED, "I_REFERENCED"} \
+ )
+
struct wb_writeback_work;
DECLARE_EVENT_CLASS(writeback_work_class,
@@ -49,6 +62,9 @@ DEFINE_EVENT(writeback_work_class, name, \
DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
+DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
+DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
+DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
TRACE_EVENT(writeback_pages_written,
TP_PROTO(long pages_written),
@@ -88,6 +104,30 @@ DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
DEFINE_WRITEBACK_EVENT(writeback_thread_start);
DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
+DEFINE_WRITEBACK_EVENT(balance_dirty_start);
+DEFINE_WRITEBACK_EVENT(balance_dirty_wait);
+
+TRACE_EVENT(balance_dirty_written,
+
+ TP_PROTO(struct backing_dev_info *bdi, int written),
+
+ TP_ARGS(bdi, written),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(int, written)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(bdi->dev), 32);
+ __entry->written = written;
+ ),
+
+ TP_printk("bdi %s written %d",
+ __entry->name,
+ __entry->written
+ )
+);
DECLARE_EVENT_CLASS(wbc_class,
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
@@ -101,8 +141,6 @@ DECLARE_EVENT_CLASS(wbc_class,
__field(int, for_background)
__field(int, for_reclaim)
__field(int, range_cyclic)
- __field(int, more_io)
- __field(unsigned long, older_than_this)
__field(long, range_start)
__field(long, range_end)
),
@@ -116,15 +154,12 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->for_background = wbc->for_background;
__entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
- __entry->more_io = wbc->more_io;
- __entry->older_than_this = wbc->older_than_this ?
- *wbc->older_than_this : 0;
__entry->range_start = (long)wbc->range_start;
__entry->range_end = (long)wbc->range_end;
),
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
- "bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx "
+ "bgrd=%d reclm=%d cyclic=%d "
"start=0x%lx end=0x%lx",
__entry->name,
__entry->nr_to_write,
@@ -134,8 +169,6 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->for_background,
__entry->for_reclaim,
__entry->range_cyclic,
- __entry->more_io,
- __entry->older_than_this,
__entry->range_start,
__entry->range_end)
)
@@ -144,14 +177,79 @@ DECLARE_EVENT_CLASS(wbc_class,
DEFINE_EVENT(wbc_class, name, \
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
TP_ARGS(wbc, bdi))
-DEFINE_WBC_EVENT(wbc_writeback_start);
-DEFINE_WBC_EVENT(wbc_writeback_written);
-DEFINE_WBC_EVENT(wbc_writeback_wait);
-DEFINE_WBC_EVENT(wbc_balance_dirty_start);
-DEFINE_WBC_EVENT(wbc_balance_dirty_written);
-DEFINE_WBC_EVENT(wbc_balance_dirty_wait);
DEFINE_WBC_EVENT(wbc_writepage);
+TRACE_EVENT(writeback_queue_io,
+ TP_PROTO(struct bdi_writeback *wb,
+ unsigned long *older_than_this,
+ int moved),
+ TP_ARGS(wb, older_than_this, moved),
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(unsigned long, older)
+ __field(long, age)
+ __field(int, moved)
+ ),
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ __entry->older = older_than_this ? *older_than_this : 0;
+ __entry->age = older_than_this ?
+ (jiffies - *older_than_this) * 1000 / HZ : -1;
+ __entry->moved = moved;
+ ),
+ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d",
+ __entry->name,
+ __entry->older, /* older_than_this in jiffies */
+ __entry->age, /* older_than_this in relative milliseconds */
+ __entry->moved)
+);
+
+TRACE_EVENT(global_dirty_state,
+
+ TP_PROTO(unsigned long background_thresh,
+ unsigned long dirty_thresh
+ ),
+
+ TP_ARGS(background_thresh,
+ dirty_thresh
+ ),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, nr_dirty)
+ __field(unsigned long, nr_writeback)
+ __field(unsigned long, nr_unstable)
+ __field(unsigned long, background_thresh)
+ __field(unsigned long, dirty_thresh)
+ __field(unsigned long, dirty_limit)
+ __field(unsigned long, nr_dirtied)
+ __field(unsigned long, nr_written)
+ ),
+
+ TP_fast_assign(
+ __entry->nr_dirty = global_page_state(NR_FILE_DIRTY);
+ __entry->nr_writeback = global_page_state(NR_WRITEBACK);
+ __entry->nr_unstable = global_page_state(NR_UNSTABLE_NFS);
+ __entry->nr_dirtied = global_page_state(NR_DIRTIED);
+ __entry->nr_written = global_page_state(NR_WRITTEN);
+ __entry->background_thresh = background_thresh;
+ __entry->dirty_thresh = dirty_thresh;
+ __entry->dirty_limit = global_dirty_limit;
+ ),
+
+ TP_printk("dirty=%lu writeback=%lu unstable=%lu "
+ "bg_thresh=%lu thresh=%lu limit=%lu "
+ "dirtied=%lu written=%lu",
+ __entry->nr_dirty,
+ __entry->nr_writeback,
+ __entry->nr_unstable,
+ __entry->background_thresh,
+ __entry->dirty_thresh,
+ __entry->dirty_limit,
+ __entry->nr_dirtied,
+ __entry->nr_written
+ )
+);
+
DECLARE_EVENT_CLASS(writeback_congest_waited_template,
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
@@ -187,6 +285,63 @@ DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
TP_ARGS(usec_timeout, usec_delayed)
);
+DECLARE_EVENT_CLASS(writeback_single_inode_template,
+
+ TP_PROTO(struct inode *inode,
+ struct writeback_control *wbc,
+ unsigned long nr_to_write
+ ),
+
+ TP_ARGS(inode, wbc, nr_to_write),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(unsigned long, ino)
+ __field(unsigned long, state)
+ __field(unsigned long, age)
+ __field(unsigned long, writeback_index)
+ __field(long, nr_to_write)
+ __field(unsigned long, wrote)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name,
+ dev_name(inode->i_mapping->backing_dev_info->dev), 32);
+ __entry->ino = inode->i_ino;
+ __entry->state = inode->i_state;
+ __entry->age = (jiffies - inode->dirtied_when) *
+ 1000 / HZ;
+ __entry->writeback_index = inode->i_mapping->writeback_index;
+ __entry->nr_to_write = nr_to_write;
+ __entry->wrote = nr_to_write - wbc->nr_to_write;
+ ),
+
+ TP_printk("bdi %s: ino=%lu state=%s age=%lu "
+ "index=%lu to_write=%ld wrote=%lu",
+ __entry->name,
+ __entry->ino,
+ show_inode_state(__entry->state),
+ __entry->age,
+ __entry->writeback_index,
+ __entry->nr_to_write,
+ __entry->wrote
+ )
+);
+
+DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue,
+ TP_PROTO(struct inode *inode,
+ struct writeback_control *wbc,
+ unsigned long nr_to_write),
+ TP_ARGS(inode, wbc, nr_to_write)
+);
+
+DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
+ TP_PROTO(struct inode *inode,
+ struct writeback_control *wbc,
+ unsigned long nr_to_write),
+ TP_ARGS(inode, wbc, nr_to_write)
+);
+
#endif /* _TRACE_WRITEBACK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
new file mode 100644
index 000000000000..44d8decee09e
--- /dev/null
+++ b/include/trace/events/xen.h
@@ -0,0 +1,504 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xen
+
+#if !defined(_TRACE_XEN_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_XEN_H
+
+#include <linux/tracepoint.h>
+#include <asm/paravirt_types.h>
+#include <asm/xen/trace_types.h>
+
+/* Multicalls */
+DECLARE_EVENT_CLASS(xen_mc__batch,
+ TP_PROTO(enum paravirt_lazy_mode mode),
+ TP_ARGS(mode),
+ TP_STRUCT__entry(
+ __field(enum paravirt_lazy_mode, mode)
+ ),
+ TP_fast_assign(__entry->mode = mode),
+ TP_printk("start batch LAZY_%s",
+ (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" :
+ (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE")
+ );
+#define DEFINE_XEN_MC_BATCH(name) \
+ DEFINE_EVENT(xen_mc__batch, name, \
+ TP_PROTO(enum paravirt_lazy_mode mode), \
+ TP_ARGS(mode))
+
+DEFINE_XEN_MC_BATCH(xen_mc_batch);
+DEFINE_XEN_MC_BATCH(xen_mc_issue);
+
+TRACE_EVENT(xen_mc_entry,
+ TP_PROTO(struct multicall_entry *mc, unsigned nargs),
+ TP_ARGS(mc, nargs),
+ TP_STRUCT__entry(
+ __field(unsigned int, op)
+ __field(unsigned int, nargs)
+ __array(unsigned long, args, 6)
+ ),
+ TP_fast_assign(__entry->op = mc->op;
+ __entry->nargs = nargs;
+ memcpy(__entry->args, mc->args, sizeof(unsigned long) * nargs);
+ memset(__entry->args + nargs, 0, sizeof(unsigned long) * (6 - nargs));
+ ),
+ TP_printk("op %u%s args [%lx, %lx, %lx, %lx, %lx, %lx]",
+ __entry->op, xen_hypercall_name(__entry->op),
+ __entry->args[0], __entry->args[1], __entry->args[2],
+ __entry->args[3], __entry->args[4], __entry->args[5])
+ );
+
+TRACE_EVENT(xen_mc_entry_alloc,
+ TP_PROTO(size_t args),
+ TP_ARGS(args),
+ TP_STRUCT__entry(
+ __field(size_t, args)
+ ),
+ TP_fast_assign(__entry->args = args),
+ TP_printk("alloc entry %zu arg bytes", __entry->args)
+ );
+
+TRACE_EVENT(xen_mc_callback,
+ TP_PROTO(xen_mc_callback_fn_t fn, void *data),
+ TP_ARGS(fn, data),
+ TP_STRUCT__entry(
+ __field(xen_mc_callback_fn_t, fn)
+ __field(void *, data)
+ ),
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->data = data;
+ ),
+ TP_printk("callback %pf, data %p",
+ __entry->fn, __entry->data)
+ );
+
+TRACE_EVENT(xen_mc_flush_reason,
+ TP_PROTO(enum xen_mc_flush_reason reason),
+ TP_ARGS(reason),
+ TP_STRUCT__entry(
+ __field(enum xen_mc_flush_reason, reason)
+ ),
+ TP_fast_assign(__entry->reason = reason),
+ TP_printk("flush reason %s",
+ (__entry->reason == XEN_MC_FL_NONE) ? "NONE" :
+ (__entry->reason == XEN_MC_FL_BATCH) ? "BATCH" :
+ (__entry->reason == XEN_MC_FL_ARGS) ? "ARGS" :
+ (__entry->reason == XEN_MC_FL_CALLBACK) ? "CALLBACK" : "??")
+ );
+
+TRACE_EVENT(xen_mc_flush,
+ TP_PROTO(unsigned mcidx, unsigned argidx, unsigned cbidx),
+ TP_ARGS(mcidx, argidx, cbidx),
+ TP_STRUCT__entry(
+ __field(unsigned, mcidx)
+ __field(unsigned, argidx)
+ __field(unsigned, cbidx)
+ ),
+ TP_fast_assign(__entry->mcidx = mcidx;
+ __entry->argidx = argidx;
+ __entry->cbidx = cbidx),
+ TP_printk("flushing %u hypercalls, %u arg bytes, %u callbacks",
+ __entry->mcidx, __entry->argidx, __entry->cbidx)
+ );
+
+TRACE_EVENT(xen_mc_extend_args,
+ TP_PROTO(unsigned long op, size_t args, enum xen_mc_extend_args res),
+ TP_ARGS(op, args, res),
+ TP_STRUCT__entry(
+ __field(unsigned int, op)
+ __field(size_t, args)
+ __field(enum xen_mc_extend_args, res)
+ ),
+ TP_fast_assign(__entry->op = op;
+ __entry->args = args;
+ __entry->res = res),
+ TP_printk("extending op %u%s by %zu bytes res %s",
+ __entry->op, xen_hypercall_name(__entry->op),
+ __entry->args,
+ __entry->res == XEN_MC_XE_OK ? "OK" :
+ __entry->res == XEN_MC_XE_BAD_OP ? "BAD_OP" :
+ __entry->res == XEN_MC_XE_NO_SPACE ? "NO_SPACE" : "???")
+ );
+
+/* mmu */
+DECLARE_EVENT_CLASS(xen_mmu__set_pte,
+ TP_PROTO(pte_t *ptep, pte_t pteval),
+ TP_ARGS(ptep, pteval),
+ TP_STRUCT__entry(
+ __field(pte_t *, ptep)
+ __field(pteval_t, pteval)
+ ),
+ TP_fast_assign(__entry->ptep = ptep;
+ __entry->pteval = pteval.pte),
+ TP_printk("ptep %p pteval %0*llx (raw %0*llx)",
+ __entry->ptep,
+ (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
+ (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
+ );
+
+#define DEFINE_XEN_MMU_SET_PTE(name) \
+ DEFINE_EVENT(xen_mmu__set_pte, name, \
+ TP_PROTO(pte_t *ptep, pte_t pteval), \
+ TP_ARGS(ptep, pteval))
+
+DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
+DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
+
+TRACE_EVENT(xen_mmu_set_domain_pte,
+ TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid),
+ TP_ARGS(ptep, pteval, domid),
+ TP_STRUCT__entry(
+ __field(pte_t *, ptep)
+ __field(pteval_t, pteval)
+ __field(unsigned, domid)
+ ),
+ TP_fast_assign(__entry->ptep = ptep;
+ __entry->pteval = pteval.pte;
+ __entry->domid = domid),
+ TP_printk("ptep %p pteval %0*llx (raw %0*llx) domid %u",
+ __entry->ptep,
+ (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
+ (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval,
+ __entry->domid)
+ );
+
+TRACE_EVENT(xen_mmu_set_pte_at,
+ TP_PROTO(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval),
+ TP_ARGS(mm, addr, ptep, pteval),
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, addr)
+ __field(pte_t *, ptep)
+ __field(pteval_t, pteval)
+ ),
+ TP_fast_assign(__entry->mm = mm;
+ __entry->addr = addr;
+ __entry->ptep = ptep;
+ __entry->pteval = pteval.pte),
+ TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
+ __entry->mm, __entry->addr, __entry->ptep,
+ (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
+ (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
+ );
+
+TRACE_EVENT(xen_mmu_pte_clear,
+ TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep),
+ TP_ARGS(mm, addr, ptep),
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, addr)
+ __field(pte_t *, ptep)
+ ),
+ TP_fast_assign(__entry->mm = mm;
+ __entry->addr = addr;
+ __entry->ptep = ptep),
+ TP_printk("mm %p addr %lx ptep %p",
+ __entry->mm, __entry->addr, __entry->ptep)
+ );
+
+TRACE_EVENT(xen_mmu_set_pmd,
+ TP_PROTO(pmd_t *pmdp, pmd_t pmdval),
+ TP_ARGS(pmdp, pmdval),
+ TP_STRUCT__entry(
+ __field(pmd_t *, pmdp)
+ __field(pmdval_t, pmdval)
+ ),
+ TP_fast_assign(__entry->pmdp = pmdp;
+ __entry->pmdval = pmdval.pmd),
+ TP_printk("pmdp %p pmdval %0*llx (raw %0*llx)",
+ __entry->pmdp,
+ (int)sizeof(pmdval_t) * 2, (unsigned long long)pmd_val(native_make_pmd(__entry->pmdval)),
+ (int)sizeof(pmdval_t) * 2, (unsigned long long)__entry->pmdval)
+ );
+
+TRACE_EVENT(xen_mmu_pmd_clear,
+ TP_PROTO(pmd_t *pmdp),
+ TP_ARGS(pmdp),
+ TP_STRUCT__entry(
+ __field(pmd_t *, pmdp)
+ ),
+ TP_fast_assign(__entry->pmdp = pmdp),
+ TP_printk("pmdp %p", __entry->pmdp)
+ );
+
+#if PAGETABLE_LEVELS >= 4
+
+TRACE_EVENT(xen_mmu_set_pud,
+ TP_PROTO(pud_t *pudp, pud_t pudval),
+ TP_ARGS(pudp, pudval),
+ TP_STRUCT__entry(
+ __field(pud_t *, pudp)
+ __field(pudval_t, pudval)
+ ),
+ TP_fast_assign(__entry->pudp = pudp;
+ __entry->pudval = native_pud_val(pudval)),
+ TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
+ __entry->pudp,
+ (int)sizeof(pudval_t) * 2, (unsigned long long)pud_val(native_make_pud(__entry->pudval)),
+ (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
+ );
+
+TRACE_EVENT(xen_mmu_set_pgd,
+ TP_PROTO(pgd_t *pgdp, pgd_t *user_pgdp, pgd_t pgdval),
+ TP_ARGS(pgdp, user_pgdp, pgdval),
+ TP_STRUCT__entry(
+ __field(pgd_t *, pgdp)
+ __field(pgd_t *, user_pgdp)
+ __field(pgdval_t, pgdval)
+ ),
+ TP_fast_assign(__entry->pgdp = pgdp;
+ __entry->user_pgdp = user_pgdp;
+ __entry->pgdval = pgdval.pgd),
+ TP_printk("pgdp %p user_pgdp %p pgdval %0*llx (raw %0*llx)",
+ __entry->pgdp, __entry->user_pgdp,
+ (int)sizeof(pgdval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pgdval)),
+ (int)sizeof(pgdval_t) * 2, (unsigned long long)__entry->pgdval)
+ );
+
+TRACE_EVENT(xen_mmu_pud_clear,
+ TP_PROTO(pud_t *pudp),
+ TP_ARGS(pudp),
+ TP_STRUCT__entry(
+ __field(pud_t *, pudp)
+ ),
+ TP_fast_assign(__entry->pudp = pudp),
+ TP_printk("pudp %p", __entry->pudp)
+ );
+#else
+
+TRACE_EVENT(xen_mmu_set_pud,
+ TP_PROTO(pud_t *pudp, pud_t pudval),
+ TP_ARGS(pudp, pudval),
+ TP_STRUCT__entry(
+ __field(pud_t *, pudp)
+ __field(pudval_t, pudval)
+ ),
+ TP_fast_assign(__entry->pudp = pudp;
+ __entry->pudval = native_pud_val(pudval)),
+ TP_printk("pudp %p pudval %0*llx (raw %0*llx)",
+ __entry->pudp,
+ (int)sizeof(pudval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pudval)),
+ (int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
+ );
+
+#endif
+
+TRACE_EVENT(xen_mmu_pgd_clear,
+ TP_PROTO(pgd_t *pgdp),
+ TP_ARGS(pgdp),
+ TP_STRUCT__entry(
+ __field(pgd_t *, pgdp)
+ ),
+ TP_fast_assign(__entry->pgdp = pgdp),
+ TP_printk("pgdp %p", __entry->pgdp)
+ );
+
+DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot,
+ TP_PROTO(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval),
+ TP_ARGS(mm, addr, ptep, pteval),
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, addr)
+ __field(pte_t *, ptep)
+ __field(pteval_t, pteval)
+ ),
+ TP_fast_assign(__entry->mm = mm;
+ __entry->addr = addr;
+ __entry->ptep = ptep;
+ __entry->pteval = pteval.pte),
+ TP_printk("mm %p addr %lx ptep %p pteval %0*llx (raw %0*llx)",
+ __entry->mm, __entry->addr, __entry->ptep,
+ (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
+ (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval)
+ );
+#define DEFINE_XEN_MMU_PTEP_MODIFY_PROT(name) \
+ DEFINE_EVENT(xen_mmu_ptep_modify_prot, name, \
+ TP_PROTO(struct mm_struct *mm, unsigned long addr, \
+ pte_t *ptep, pte_t pteval), \
+ TP_ARGS(mm, addr, ptep, pteval))
+
+DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_start);
+DEFINE_XEN_MMU_PTEP_MODIFY_PROT(xen_mmu_ptep_modify_prot_commit);
+
+TRACE_EVENT(xen_mmu_alloc_ptpage,
+ TP_PROTO(struct mm_struct *mm, unsigned long pfn, unsigned level, bool pinned),
+ TP_ARGS(mm, pfn, level, pinned),
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, pfn)
+ __field(unsigned, level)
+ __field(bool, pinned)
+ ),
+ TP_fast_assign(__entry->mm = mm;
+ __entry->pfn = pfn;
+ __entry->level = level;
+ __entry->pinned = pinned),
+ TP_printk("mm %p pfn %lx level %d %spinned",
+ __entry->mm, __entry->pfn, __entry->level,
+ __entry->pinned ? "" : "un")
+ );
+
+TRACE_EVENT(xen_mmu_release_ptpage,
+ TP_PROTO(unsigned long pfn, unsigned level, bool pinned),
+ TP_ARGS(pfn, level, pinned),
+ TP_STRUCT__entry(
+ __field(unsigned long, pfn)
+ __field(unsigned, level)
+ __field(bool, pinned)
+ ),
+ TP_fast_assign(__entry->pfn = pfn;
+ __entry->level = level;
+ __entry->pinned = pinned),
+ TP_printk("pfn %lx level %d %spinned",
+ __entry->pfn, __entry->level,
+ __entry->pinned ? "" : "un")
+ );
+
+DECLARE_EVENT_CLASS(xen_mmu_pgd,
+ TP_PROTO(struct mm_struct *mm, pgd_t *pgd),
+ TP_ARGS(mm, pgd),
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(pgd_t *, pgd)
+ ),
+ TP_fast_assign(__entry->mm = mm;
+ __entry->pgd = pgd),
+ TP_printk("mm %p pgd %p", __entry->mm, __entry->pgd)
+ );
+#define DEFINE_XEN_MMU_PGD_EVENT(name) \
+ DEFINE_EVENT(xen_mmu_pgd, name, \
+ TP_PROTO(struct mm_struct *mm, pgd_t *pgd), \
+ TP_ARGS(mm, pgd))
+
+DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
+DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
+
+TRACE_EVENT(xen_mmu_flush_tlb,
+ TP_PROTO(int x),
+ TP_ARGS(x),
+ TP_STRUCT__entry(__array(char, x, 0)),
+ TP_fast_assign((void)x),
+ TP_printk("%s", "")
+ );
+
+TRACE_EVENT(xen_mmu_flush_tlb_single,
+ TP_PROTO(unsigned long addr),
+ TP_ARGS(addr),
+ TP_STRUCT__entry(
+ __field(unsigned long, addr)
+ ),
+ TP_fast_assign(__entry->addr = addr),
+ TP_printk("addr %lx", __entry->addr)
+ );
+
+TRACE_EVENT(xen_mmu_flush_tlb_others,
+ TP_PROTO(const struct cpumask *cpus, struct mm_struct *mm,
+ unsigned long addr),
+ TP_ARGS(cpus, mm, addr),
+ TP_STRUCT__entry(
+ __field(unsigned, ncpus)
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, addr)
+ ),
+ TP_fast_assign(__entry->ncpus = cpumask_weight(cpus);
+ __entry->mm = mm;
+ __entry->addr = addr),
+ TP_printk("ncpus %d mm %p addr %lx",
+ __entry->ncpus, __entry->mm, __entry->addr)
+ );
+
+TRACE_EVENT(xen_mmu_write_cr3,
+ TP_PROTO(bool kernel, unsigned long cr3),
+ TP_ARGS(kernel, cr3),
+ TP_STRUCT__entry(
+ __field(bool, kernel)
+ __field(unsigned long, cr3)
+ ),
+ TP_fast_assign(__entry->kernel = kernel;
+ __entry->cr3 = cr3),
+ TP_printk("%s cr3 %lx",
+ __entry->kernel ? "kernel" : "user", __entry->cr3)
+ );
+
+
+/* CPU */
+TRACE_EVENT(xen_cpu_write_ldt_entry,
+ TP_PROTO(struct desc_struct *dt, int entrynum, u64 desc),
+ TP_ARGS(dt, entrynum, desc),
+ TP_STRUCT__entry(
+ __field(struct desc_struct *, dt)
+ __field(int, entrynum)
+ __field(u64, desc)
+ ),
+ TP_fast_assign(__entry->dt = dt;
+ __entry->entrynum = entrynum;
+ __entry->desc = desc;
+ ),
+ TP_printk("dt %p entrynum %d entry %016llx",
+ __entry->dt, __entry->entrynum,
+ (unsigned long long)__entry->desc)
+ );
+
+TRACE_EVENT(xen_cpu_write_idt_entry,
+ TP_PROTO(gate_desc *dt, int entrynum, const gate_desc *ent),
+ TP_ARGS(dt, entrynum, ent),
+ TP_STRUCT__entry(
+ __field(gate_desc *, dt)
+ __field(int, entrynum)
+ ),
+ TP_fast_assign(__entry->dt = dt;
+ __entry->entrynum = entrynum;
+ ),
+ TP_printk("dt %p entrynum %d",
+ __entry->dt, __entry->entrynum)
+ );
+
+TRACE_EVENT(xen_cpu_load_idt,
+ TP_PROTO(const struct desc_ptr *desc),
+ TP_ARGS(desc),
+ TP_STRUCT__entry(
+ __field(unsigned long, addr)
+ ),
+ TP_fast_assign(__entry->addr = desc->address),
+ TP_printk("addr %lx", __entry->addr)
+ );
+
+TRACE_EVENT(xen_cpu_write_gdt_entry,
+ TP_PROTO(struct desc_struct *dt, int entrynum, const void *desc, int type),
+ TP_ARGS(dt, entrynum, desc, type),
+ TP_STRUCT__entry(
+ __field(u64, desc)
+ __field(struct desc_struct *, dt)
+ __field(int, entrynum)
+ __field(int, type)
+ ),
+ TP_fast_assign(__entry->dt = dt;
+ __entry->entrynum = entrynum;
+ __entry->desc = *(u64 *)desc;
+ __entry->type = type;
+ ),
+ TP_printk("dt %p entrynum %d type %d desc %016llx",
+ __entry->dt, __entry->entrynum, __entry->type,
+ (unsigned long long)__entry->desc)
+ );
+
+TRACE_EVENT(xen_cpu_set_ldt,
+ TP_PROTO(const void *addr, unsigned entries),
+ TP_ARGS(addr, entries),
+ TP_STRUCT__entry(
+ __field(const void *, addr)
+ __field(unsigned, entries)
+ ),
+ TP_fast_assign(__entry->addr = addr;
+ __entry->entries = entries),
+ TP_printk("addr %p entries %u",
+ __entry->addr, __entry->entries)
+ );
+
+
+#endif /* _TRACE_XEN_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>