summaryrefslogtreecommitdiff
path: root/drivers/oprofile/buffer_sync.c
diff options
context:
space:
mode:
authorBarry Kasindorf <barry.kasindorf@amd.com>2008-07-22 21:08:54 +0200
committerIngo Molnar <mingo@elte.hu>2008-07-26 11:48:04 +0200
commit345c25730d085c45622ac779da4dbd97dc3a10fe (patch)
tree12f31ee55b97ef9604b37990e05c523178c32901 /drivers/oprofile/buffer_sync.c
parentee648bc77f11b57d15a68d336fc30e343198f893 (diff)
x86/oprofile: add IBS support for AMD CPUs, IBS buffer handling routines
This patchset supports the new profiling hardware available in the latest AMD CPUs in the oProfile driver. Signed-off-by: Barry Kasindorf <barry.kasindorf@amd.com> Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: oprofile-list <oprofile-list@lists.sourceforge.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/oprofile/buffer_sync.c')
-rw-r--r--drivers/oprofile/buffer_sync.c72
1 files changed, 71 insertions, 1 deletions
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 615929f6f0c2..e1782d2df09f 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -5,6 +5,7 @@
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
+ * @author Barry Kasindorf
*
* This is the core of the buffer management. Each
* CPU buffer is processed and entered into the
@@ -272,7 +273,7 @@ static void increment_tail(struct oprofile_cpu_buffer *b)
{
unsigned long new_tail = b->tail_pos + 1;
- rmb();
+ rmb(); /* be sure fifo pointers are synchromized */
if (new_tail < b->buffer_size)
b->tail_pos = new_tail;
@@ -327,6 +328,67 @@ static void add_trace_begin(void)
add_event_entry(TRACE_BEGIN_CODE);
}
+#define IBS_FETCH_CODE_SIZE 2
+#define IBS_OP_CODE_SIZE 5
+#define IBS_EIP(offset) \
+ (((struct op_sample *)&cpu_buf->buffer[(offset)])->eip)
+#define IBS_EVENT(offset) \
+ (((struct op_sample *)&cpu_buf->buffer[(offset)])->event)
+
+/*
+ * Add IBS fetch and op entries to event buffer
+ */
+static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
+ int in_kernel, struct mm_struct *mm)
+{
+ unsigned long rip;
+ int i, count;
+ unsigned long ibs_cookie = 0;
+ off_t offset;
+
+ increment_tail(cpu_buf); /* move to RIP entry */
+
+ rip = IBS_EIP(cpu_buf->tail_pos);
+
+#ifdef __LP64__
+ rip += IBS_EVENT(cpu_buf->tail_pos) << 32;
+#endif
+
+ if (mm) {
+ ibs_cookie = lookup_dcookie(mm, rip, &offset);
+
+ if (ibs_cookie == NO_COOKIE)
+ offset = rip;
+ if (ibs_cookie == INVALID_COOKIE) {
+ atomic_inc(&oprofile_stats.sample_lost_no_mapping);
+ offset = rip;
+ }
+ if (ibs_cookie != last_cookie) {
+ add_cookie_switch(ibs_cookie);
+ last_cookie = ibs_cookie;
+ }
+ } else
+ offset = rip;
+
+ add_event_entry(ESCAPE_CODE);
+ add_event_entry(code);
+ add_event_entry(offset); /* Offset from Dcookie */
+
+ /* we send the Dcookie offset, but send the raw Linear Add also*/
+ add_event_entry(IBS_EIP(cpu_buf->tail_pos));
+ add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
+
+ if (code == IBS_FETCH_CODE)
+ count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/
+ else
+ count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/
+
+ for (i = 0; i < count; i++) {
+ increment_tail(cpu_buf);
+ add_event_entry(IBS_EIP(cpu_buf->tail_pos));
+ add_event_entry(IBS_EVENT(cpu_buf->tail_pos));
+ }
+}
static void add_sample_entry(unsigned long offset, unsigned long event)
{
@@ -524,6 +586,14 @@ void sync_buffer(int cpu)
} else if (s->event == CPU_TRACE_BEGIN) {
state = sb_bt_start;
add_trace_begin();
+ } else if (s->event == IBS_FETCH_BEGIN) {
+ state = sb_bt_start;
+ add_ibs_begin(cpu_buf,
+ IBS_FETCH_CODE, in_kernel, mm);
+ } else if (s->event == IBS_OP_BEGIN) {
+ state = sb_bt_start;
+ add_ibs_begin(cpu_buf,
+ IBS_OP_CODE, in_kernel, mm);
} else {
struct mm_struct *oldmm = mm;