summaryrefslogtreecommitdiff
path: root/include/linux/perf_event.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r--include/linux/perf_event.h33
1 files changed, 30 insertions, 3 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index dda5b0a3ff60..38c8b2554842 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -464,6 +464,7 @@ enum perf_callchain_context {
#define PERF_FLAG_FD_NO_GROUP (1U << 0)
#define PERF_FLAG_FD_OUTPUT (1U << 1)
+#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */
#ifdef __KERNEL__
/*
@@ -471,6 +472,7 @@ enum perf_callchain_context {
*/
#ifdef CONFIG_PERF_EVENTS
+# include <linux/cgroup.h>
# include <asm/perf_event.h>
# include <asm/local64.h>
#endif
@@ -716,6 +718,22 @@ struct swevent_hlist {
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
+#ifdef CONFIG_CGROUP_PERF
+/*
+ * perf_cgroup_info keeps track of time_enabled for a cgroup.
+ * This is a per-cpu dynamically allocated data structure.
+ */
+struct perf_cgroup_info {
+ u64 time;
+ u64 timestamp;
+};
+
+struct perf_cgroup {
+ struct cgroup_subsys_state css;
+ struct perf_cgroup_info *info; /* timing info, one per cpu */
+};
+#endif
+
/**
* struct perf_event - performance event kernel representation:
*/
@@ -832,6 +850,11 @@ struct perf_event {
struct event_filter *filter;
#endif
+#ifdef CONFIG_CGROUP_PERF
+ struct perf_cgroup *cgrp; /* cgroup event is attach to */
+ int cgrp_defer_enabled;
+#endif
+
#endif /* CONFIG_PERF_EVENTS */
};
@@ -886,6 +909,7 @@ struct perf_event_context {
u64 generation;
int pin_count;
struct rcu_head rcu_head;
+ int nr_cgroups; /* cgroup events present */
};
/*
@@ -905,6 +929,9 @@ struct perf_cpu_context {
struct list_head rotation_list;
int jiffies_interval;
struct pmu *active_pmu;
+#ifdef CONFIG_CGROUP_PERF
+ struct perf_cgroup *cgrp;
+#endif
};
struct perf_output_handle {
@@ -1040,11 +1067,11 @@ have_event:
__perf_sw_event(event_id, nr, nmi, regs, addr);
}
-extern atomic_t perf_task_events;
+extern atomic_t perf_sched_events;
static inline void perf_event_task_sched_in(struct task_struct *task)
{
- COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
+ COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task));
}
static inline
@@ -1052,7 +1079,7 @@ void perf_event_task_sched_out(struct task_struct *task, struct task_struct *nex
{
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
- COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
+ COND_STMT(&perf_sched_events, __perf_event_task_sched_out(task, next));
}
extern void perf_event_mmap(struct vm_area_struct *vma);