]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/events/core.c
perf/cgroups: Don't rotate events for cgroups unnecessarily
[linux.git] / kernel / events / core.c
index abbd4b3b96c2a2a1a75dde8b1640b0b286e7c344..23efe6792abcc3d4bf7ab2c0d2cb028ef618d5d4 100644 (file)
@@ -2952,6 +2952,12 @@ static void ctx_sched_out(struct perf_event_context *ctx,
        if (!ctx->nr_active || !(is_active & EVENT_ALL))
                return;
 
+       /*
+        * If we had been multiplexing, no rotations are necessary, now no events
+        * are active.
+        */
+       ctx->rotate_necessary = 0;
+
        perf_pmu_disable(ctx->pmu);
        if (is_active & EVENT_PINNED) {
                list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
@@ -3319,10 +3325,13 @@ static int flexible_sched_in(struct perf_event *event, void *data)
                return 0;
 
        if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
-               if (!group_sched_in(event, sid->cpuctx, sid->ctx))
-                       list_add_tail(&event->active_list, &sid->ctx->flexible_active);
-               else
+               int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
+               if (ret) {
                        sid->can_add_hw = 0;
+                       sid->ctx->rotate_necessary = 1;
+                       return 0;
+               }
+               list_add_tail(&event->active_list, &sid->ctx->flexible_active);
        }
 
        return 0;
@@ -3690,24 +3699,17 @@ ctx_first_active(struct perf_event_context *ctx)
 static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
 {
        struct perf_event *cpu_event = NULL, *task_event = NULL;
-       bool cpu_rotate = false, task_rotate = false;
-       struct perf_event_context *ctx = NULL;
+       struct perf_event_context *task_ctx = NULL;
+       int cpu_rotate, task_rotate;
 
        /*
         * Since we run this from IRQ context, nobody can install new
         * events, thus the event count values are stable.
         */
 
-       if (cpuctx->ctx.nr_events) {
-               if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
-                       cpu_rotate = true;
-       }
-
-       ctx = cpuctx->task_ctx;
-       if (ctx && ctx->nr_events) {
-               if (ctx->nr_events != ctx->nr_active)
-                       task_rotate = true;
-       }
+       cpu_rotate = cpuctx->ctx.rotate_necessary;
+       task_ctx = cpuctx->task_ctx;
+       task_rotate = task_ctx ? task_ctx->rotate_necessary : 0;
 
        if (!(cpu_rotate || task_rotate))
                return false;
@@ -3716,7 +3718,7 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
        perf_pmu_disable(cpuctx->ctx.pmu);
 
        if (task_rotate)
-               task_event = ctx_first_active(ctx);
+               task_event = ctx_first_active(task_ctx);
        if (cpu_rotate)
                cpu_event = ctx_first_active(&cpuctx->ctx);
 
@@ -3724,17 +3726,17 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
         * As per the order given at ctx_resched() first 'pop' task flexible
         * and then, if needed CPU flexible.
         */
-       if (task_event || (ctx && cpu_event))
-               ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
+       if (task_event || (task_ctx && cpu_event))
+               ctx_sched_out(task_ctx, cpuctx, EVENT_FLEXIBLE);
        if (cpu_event)
                cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
 
        if (task_event)
-               rotate_ctx(ctx, task_event);
+               rotate_ctx(task_ctx, task_event);
        if (cpu_event)
                rotate_ctx(&cpuctx->ctx, cpu_event);
 
-       perf_event_sched_in(cpuctx, ctx, current);
+       perf_event_sched_in(cpuctx, task_ctx, current);
 
        perf_pmu_enable(cpuctx->ctx.pmu);
        perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -8532,9 +8534,9 @@ static int perf_tp_event_match(struct perf_event *event,
        if (event->hw.state & PERF_HES_STOPPED)
                return 0;
        /*
-        * All tracepoints are from kernel-space.
+        * If exclude_kernel, only trace user-space tracepoints (uprobes)
         */
-       if (event->attr.exclude_kernel)
+       if (event->attr.exclude_kernel && !user_mode(regs))
                return 0;
 
        if (!perf_tp_filter_match(event, data))
@@ -9874,6 +9876,12 @@ static int pmu_dev_alloc(struct pmu *pmu)
        if (ret)
                goto del_dev;
 
+       if (pmu->attr_update)
+               ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update);
+
+       if (ret)
+               goto del_dev;
+
 out:
        return ret;