summaryrefslogtreecommitdiff
path: root/tools/perf/util/evlist.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r--tools/perf/util/evlist.c307
1 files changed, 250 insertions, 57 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 8065ce8fa9a5..f9f77bee0b1b 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -14,6 +14,7 @@
#include "target.h"
#include "evlist.h"
#include "evsel.h"
+#include "debug.h"
#include <unistd.h>
#include "parse-events.h"
@@ -48,26 +49,29 @@ struct perf_evlist *perf_evlist__new(void)
return evlist;
}
-void perf_evlist__config(struct perf_evlist *evlist,
- struct perf_record_opts *opts)
+/**
+ * perf_evlist__set_id_pos - set the positions of event ids.
+ * @evlist: selected event list
+ *
+ * Events with compatible sample types all have the same id_pos
+ * and is_pos. For convenience, put a copy on evlist.
+ */
+void perf_evlist__set_id_pos(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel;
- /*
- * Set the evsel leader links before we configure attributes,
- * since some might depend on this info.
- */
- if (opts->group)
- perf_evlist__set_leader(evlist);
+ struct perf_evsel *first = perf_evlist__first(evlist);
- if (evlist->cpus->map[0] < 0)
- opts->no_inherit = true;
+ evlist->id_pos = first->id_pos;
+ evlist->is_pos = first->is_pos;
+}
- list_for_each_entry(evsel, &evlist->entries, node) {
- perf_evsel__config(evsel, opts);
+static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
- if (evlist->nr_entries > 1)
- perf_evsel__set_sample_id(evsel);
- }
+ list_for_each_entry(evsel, &evlist->entries, node)
+ perf_evsel__calc_id_pos(evsel);
+
+ perf_evlist__set_id_pos(evlist);
}
static void perf_evlist__purge(struct perf_evlist *evlist)
@@ -100,15 +104,20 @@ void perf_evlist__delete(struct perf_evlist *evlist)
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
{
list_add_tail(&entry->node, &evlist->entries);
- ++evlist->nr_entries;
+ if (!evlist->nr_entries++)
+ perf_evlist__set_id_pos(evlist);
}
void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
struct list_head *list,
int nr_entries)
{
+ bool set_id_pos = !evlist->nr_entries;
+
list_splice_tail(list, &evlist->entries);
evlist->nr_entries += nr_entries;
+ if (set_id_pos)
+ perf_evlist__set_id_pos(evlist);
}
void __perf_evlist__set_leader(struct list_head *list)
@@ -209,6 +218,21 @@ perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
return NULL;
}
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
+ const char *name)
+{
+ struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
+ (strcmp(evsel->name, name) == 0))
+ return evsel;
+ }
+
+ return NULL;
+}
+
int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler)
{
@@ -232,7 +256,7 @@ void perf_evlist__disable(struct perf_evlist *evlist)
for (cpu = 0; cpu < nr_cpus; cpu++) {
list_for_each_entry(pos, &evlist->entries, node) {
- if (!perf_evsel__is_group_leader(pos))
+ if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue;
for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
@@ -250,7 +274,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
for (cpu = 0; cpu < nr_cpus; cpu++) {
list_for_each_entry(pos, &evlist->entries, node) {
- if (!perf_evsel__is_group_leader(pos))
+ if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue;
for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
@@ -259,6 +283,44 @@ void perf_evlist__enable(struct perf_evlist *evlist)
}
}
+int perf_evlist__disable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ int cpu, thread, err;
+
+ if (!evsel->fd)
+ return 0;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ err = ioctl(FD(evsel, cpu, thread),
+ PERF_EVENT_IOC_DISABLE, 0);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+int perf_evlist__enable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ int cpu, thread, err;
+
+ if (!evsel->fd)
+ return -EINVAL;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ err = ioctl(FD(evsel, cpu, thread),
+ PERF_EVENT_IOC_ENABLE, 0);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
int nr_cpus = cpu_map__nr(evlist->cpus);
@@ -302,6 +364,24 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
{
u64 read_data[4] = { 0, };
int id_idx = 1; /* The first entry is the counter value */
+ u64 id;
+ int ret;
+
+ ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
+ if (!ret)
+ goto add;
+
+ if (errno != ENOTTY)
+ return -1;
+
+ /* Legacy way to get event id.. All hail to old kernels! */
+
+ /*
+ * This way does not work with group format read, so bail
+ * out in that case.
+ */
+ if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
+ return -1;
if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
read(fd, &read_data, sizeof(read_data)) == -1)
@@ -312,25 +392,39 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
++id_idx;
- perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
+ id = read_data[id_idx];
+
+ add:
+ perf_evlist__id_add(evlist, evsel, cpu, thread, id);
return 0;
}
-struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
+struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
{
struct hlist_head *head;
struct perf_sample_id *sid;
int hash;
- if (evlist->nr_entries == 1)
- return perf_evlist__first(evlist);
-
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
head = &evlist->heads[hash];
hlist_for_each_entry(sid, head, node)
if (sid->id == id)
- return sid->evsel;
+ return sid;
+
+ return NULL;
+}
+
+struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
+{
+ struct perf_sample_id *sid;
+
+ if (evlist->nr_entries == 1)
+ return perf_evlist__first(evlist);
+
+ sid = perf_evlist__id2sid(evlist, id);
+ if (sid)
+ return sid->evsel;
if (!perf_evlist__sample_id_all(evlist))
return perf_evlist__first(evlist);
@@ -338,6 +432,60 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
return NULL;
}
+static int perf_evlist__event2id(struct perf_evlist *evlist,
+ union perf_event *event, u64 *id)
+{
+ const u64 *array = event->sample.array;
+ ssize_t n;
+
+ n = (event->header.size - sizeof(event->header)) >> 3;
+
+ if (event->header.type == PERF_RECORD_SAMPLE) {
+ if (evlist->id_pos >= n)
+ return -1;
+ *id = array[evlist->id_pos];
+ } else {
+ if (evlist->is_pos > n)
+ return -1;
+ n -= evlist->is_pos;
+ *id = array[n];
+ }
+ return 0;
+}
+
+static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
+ union perf_event *event)
+{
+ struct perf_evsel *first = perf_evlist__first(evlist);
+ struct hlist_head *head;
+ struct perf_sample_id *sid;
+ int hash;
+ u64 id;
+
+ if (evlist->nr_entries == 1)
+ return first;
+
+ if (!first->attr.sample_id_all &&
+ event->header.type != PERF_RECORD_SAMPLE)
+ return first;
+
+ if (perf_evlist__event2id(evlist, event, &id))
+ return NULL;
+
+ /* Synthesized events have an id of zero */
+ if (!id)
+ return first;
+
+ hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
+ head = &evlist->heads[hash];
+
+ hlist_for_each_entry(sid, head, node) {
+ if (sid->id == id)
+ return sid->evsel;
+ }
+ return NULL;
+}
+
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
{
struct perf_mmap *md = &evlist->mmap[idx];
@@ -403,16 +551,20 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
return event;
}
+static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
+{
+ if (evlist->mmap[idx].base != NULL) {
+ munmap(evlist->mmap[idx].base, evlist->mmap_len);
+ evlist->mmap[idx].base = NULL;
+ }
+}
+
void perf_evlist__munmap(struct perf_evlist *evlist)
{
int i;
- for (i = 0; i < evlist->nr_mmaps; i++) {
- if (evlist->mmap[i].base != NULL) {
- munmap(evlist->mmap[i].base, evlist->mmap_len);
- evlist->mmap[i].base = NULL;
- }
- }
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ __perf_evlist__munmap(evlist, i);
free(evlist->mmap);
evlist->mmap = NULL;
@@ -421,7 +573,7 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
{
evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
- if (cpu_map__all(evlist->cpus))
+ if (cpu_map__empty(evlist->cpus))
evlist->nr_mmaps = thread_map__nr(evlist->threads);
evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
return evlist->mmap != NULL ? 0 : -ENOMEM;
@@ -450,6 +602,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads);
+ pr_debug2("perf event ring buffer mmapped per cpu\n");
for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1;
@@ -477,12 +630,8 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
return 0;
out_unmap:
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- if (evlist->mmap[cpu].base != NULL) {
- munmap(evlist->mmap[cpu].base, evlist->mmap_len);
- evlist->mmap[cpu].base = NULL;
- }
- }
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ __perf_evlist__munmap(evlist, cpu);
return -1;
}
@@ -492,6 +641,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
int thread;
int nr_threads = thread_map__nr(evlist->threads);
+ pr_debug2("perf event ring buffer mmapped per thread\n");
for (thread = 0; thread < nr_threads; thread++) {
int output = -1;
@@ -517,12 +667,8 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
return 0;
out_unmap:
- for (thread = 0; thread < nr_threads; thread++) {
- if (evlist->mmap[thread].base != NULL) {
- munmap(evlist->mmap[thread].base, evlist->mmap_len);
- evlist->mmap[thread].base = NULL;
- }
- }
+ for (thread = 0; thread < nr_threads; thread++)
+ __perf_evlist__munmap(evlist, thread);
return -1;
}
@@ -573,7 +719,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
return -ENOMEM;
}
- if (cpu_map__all(cpus))
+ if (cpu_map__empty(cpus))
return perf_evlist__mmap_per_thread(evlist, prot, mask);
return perf_evlist__mmap_per_cpu(evlist, prot, mask);
@@ -650,20 +796,66 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
{
+ struct perf_evsel *pos;
+
+ if (evlist->nr_entries == 1)
+ return true;
+
+ if (evlist->id_pos < 0 || evlist->is_pos < 0)
+ return false;
+
+ list_for_each_entry(pos, &evlist->entries, node) {
+ if (pos->id_pos != evlist->id_pos ||
+ pos->is_pos != evlist->is_pos)
+ return false;
+ }
+
+ return true;
+}
+
+u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ if (evlist->combined_sample_type)
+ return evlist->combined_sample_type;
+
+ list_for_each_entry(evsel, &evlist->entries, node)
+ evlist->combined_sample_type |= evsel->attr.sample_type;
+
+ return evlist->combined_sample_type;
+}
+
+u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
+{
+ evlist->combined_sample_type = 0;
+ return __perf_evlist__combined_sample_type(evlist);
+}
+
+bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
+{
struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
+ u64 read_format = first->attr.read_format;
+ u64 sample_type = first->attr.sample_type;
list_for_each_entry_continue(pos, &evlist->entries, node) {
- if (first->attr.sample_type != pos->attr.sample_type)
+ if (read_format != pos->attr.read_format)
return false;
}
+ /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
+ if ((sample_type & PERF_SAMPLE_READ) &&
+ !(read_format & PERF_FORMAT_ID)) {
+ return false;
+ }
+
return true;
}
-u64 perf_evlist__sample_type(struct perf_evlist *evlist)
+u64 perf_evlist__read_format(struct perf_evlist *evlist)
{
struct perf_evsel *first = perf_evlist__first(evlist);
- return first->attr.sample_type;
+ return first->attr.read_format;
}
u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
@@ -692,6 +884,9 @@ u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
if (sample_type & PERF_SAMPLE_CPU)
size += sizeof(data->cpu) * 2;
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ size += sizeof(data->id);
out:
return size;
}
@@ -735,6 +930,8 @@ int perf_evlist__open(struct perf_evlist *evlist)
struct perf_evsel *evsel;
int err;
+ perf_evlist__update_id_pos(evlist);
+
list_for_each_entry(evsel, &evlist->entries, node) {
err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
if (err < 0)
@@ -783,13 +980,6 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
/*
- * Do a dummy execvp to get the PLT entry resolved,
- * so we avoid the resolver overhead on the real
- * execvp call.
- */
- execvp("", (char **)argv);
-
- /*
* Tell the parent we're ready to go
*/
close(child_ready_pipe[1]);
@@ -838,7 +1028,7 @@ out_close_ready_pipe:
int perf_evlist__start_workload(struct perf_evlist *evlist)
{
if (evlist->workload.cork_fd > 0) {
- char bf;
+ char bf = 0;
int ret;
/*
* Remove the cork, let it rip!
@@ -857,7 +1047,10 @@ int perf_evlist__start_workload(struct perf_evlist *evlist)
int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
struct perf_sample *sample)
{
- struct perf_evsel *evsel = perf_evlist__first(evlist);
+ struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+ if (!evsel)
+ return -EFAULT;
return perf_evsel__parse_sample(evsel, event, sample);
}