]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/events/core.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[linux.git] / kernel / events / core.c
index 88676ff98c0f157e9142833e5289ba21f4b623cf..77a932b54a64fbeb2640b35c1cc4c096994bf1d7 100644 (file)
@@ -3538,14 +3538,15 @@ struct perf_read_data {
        int ret;
 };
 
-static int find_cpu_to_read(struct perf_event *event, int local_cpu)
+static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
 {
-       int event_cpu = event->oncpu;
        u16 local_pkg, event_pkg;
 
        if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
-               event_pkg =  topology_physical_package_id(event_cpu);
-               local_pkg =  topology_physical_package_id(local_cpu);
+               int local_cpu = smp_processor_id();
+
+               event_pkg = topology_physical_package_id(event_cpu);
+               local_pkg = topology_physical_package_id(local_cpu);
 
                if (event_pkg == local_pkg)
                        return local_cpu;
@@ -3675,7 +3676,7 @@ u64 perf_event_read_local(struct perf_event *event)
 
 static int perf_event_read(struct perf_event *event, bool group)
 {
-       int ret = 0, cpu_to_read, local_cpu;
+       int event_cpu, ret = 0;
 
        /*
         * If event is enabled and currently active on a CPU, update the
@@ -3688,21 +3689,25 @@ static int perf_event_read(struct perf_event *event, bool group)
                        .ret = 0,
                };
 
-               local_cpu = get_cpu();
-               cpu_to_read = find_cpu_to_read(event, local_cpu);
-               put_cpu();
+               event_cpu = READ_ONCE(event->oncpu);
+               if ((unsigned)event_cpu >= nr_cpu_ids)
+                       return 0;
+
+               preempt_disable();
+               event_cpu = __perf_event_read_cpu(event, event_cpu);
 
                /*
                 * Purposely ignore the smp_call_function_single() return
                 * value.
                 *
-                * If event->oncpu isn't a valid CPU it means the event got
+                * If event_cpu isn't a valid CPU it means the event got
                 * scheduled out and that will have updated the event count.
                 *
                 * Therefore, either way, we'll have an up-to-date event count
                 * after this.
                 */
-               (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
+               (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
+               preempt_enable();
                ret = data.ret;
        } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
                struct perf_event_context *ctx = event->ctx;
@@ -8090,6 +8095,9 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
        if (task == TASK_TOMBSTONE)
                return;
 
+       if (!ifh->nr_file_filters)
+               return;
+
        mm = get_task_mm(event->ctx->task);
        if (!mm)
                goto restart;
@@ -8260,6 +8268,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
                 * attribute.
                 */
                if (state == IF_STATE_END) {
+                       ret = -EINVAL;
                        if (kernel && event->attr.exclude_kernel)
                                goto fail;
 
@@ -8267,6 +8276,18 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
                                if (!filename)
                                        goto fail;
 
+                               /*
+                                * For now, we only support file-based filters
+                                * in per-task events; doing so for CPU-wide
+                                * events requires additional context switching
+                                * trickery, since same object code will be
+                                * mapped at different virtual addresses in
+                                * different processes.
+                                */
+                               ret = -EOPNOTSUPP;
+                               if (!event->ctx->task)
+                                       goto fail_free_name;
+
                                /* look up the path and grab its inode */
                                ret = kern_path(filename, LOOKUP_FOLLOW, &path);
                                if (ret)
@@ -8282,6 +8303,8 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
                                    !S_ISREG(filter->inode->i_mode))
                                        /* free_filters_list() will iput() */
                                        goto fail;
+
+                               event->addr_filters.nr_file_filters++;
                        }
 
                        /* ready to consume more filters */
@@ -8321,24 +8344,13 @@ perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
        if (WARN_ON_ONCE(event->parent))
                return -EINVAL;
 
-       /*
-        * For now, we only support filtering in per-task events; doing so
-        * for CPU-wide events requires additional context switching trickery,
-        * since same object code will be mapped at different virtual
-        * addresses in different processes.
-        */
-       if (!event->ctx->task)
-               return -EOPNOTSUPP;
-
        ret = perf_event_parse_addr_filter(event, filter_str, &filters);
        if (ret)
-               return ret;
+               goto fail_clear_files;
 
        ret = event->pmu->addr_filters_validate(&filters);
-       if (ret) {
-               free_filters_list(&filters);
-               return ret;
-       }
+       if (ret)
+               goto fail_free_filters;
 
        /* remove existing filters, if any */
        perf_addr_filters_splice(event, &filters);
@@ -8346,6 +8358,14 @@ perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
        /* install new filters */
        perf_event_for_each_child(event, perf_event_addr_filters_apply);
 
+       return ret;
+
+fail_free_filters:
+       free_filters_list(&filters);
+
+fail_clear_files:
+       event->addr_filters.nr_file_filters = 0;
+
        return ret;
 }