if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
return -EINVAL;
- if (!session->evlist->nr_mmaps)
+ if (!session->evlist->core.nr_mmaps)
return -EINVAL;
/* If the cpu_map is empty all online CPUs are involved */
if (priv_size != ARM_SPE_AUXTRACE_PRIV_SIZE)
return -EINVAL;
- if (!session->evlist->nr_mmaps)
+ if (!session->evlist->core.nr_mmaps)
return -EINVAL;
auxtrace_info->type = PERF_AUXTRACE_ARM_SPE;
evlist__disable(evlist);
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(md) < 0)
continue;
if (priv_size != INTEL_BTS_AUXTRACE_PRIV_SIZE)
return -EINVAL;
- if (!session->evlist->nr_mmaps)
+ if (!session->evlist->core.nr_mmaps)
return -EINVAL;
pc = session->evlist->mmap[0].core.base;
filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu);
filter_str_len = filter ? strlen(filter) : 0;
- if (!session->evlist->nr_mmaps)
+ if (!session->evlist->core.nr_mmaps)
return -EINVAL;
pc = session->evlist->mmap[0].core.base;
s64 n, ntotal = 0;
u64 flush_time = ULLONG_MAX, mmap_time;
- for (i = 0; i < kvm->evlist->nr_mmaps; i++) {
+ for (i = 0; i < kvm->evlist->core.nr_mmaps; i++) {
n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time);
if (n < 0)
return -1;
if (!record__aio_enabled(rec))
return;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
struct mmap *map = &maps[i];
if (map->core.base)
int i;
int rc = 0;
- for (i = 0; i < rec->evlist->nr_mmaps; i++) {
+ for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
struct mmap *map = &rec->evlist->mmap[i];
if (!map->auxtrace_mmap.base)
if (record__aio_enabled(rec))
off = record__aio_get_pos(trace_fd);
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
u64 flush = 0;
struct mmap *map = &maps[i];
if (overwrite)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
- for (i = 0; i < top->evlist->nr_mmaps; i++)
+ for (i = 0; i < top->evlist->core.nr_mmaps; i++)
perf_top__mmap_read_idx(top, i);
if (overwrite) {
again:
before = trace->nr_events;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
union perf_event *event;
struct mmap *md;
bool has_user_cpus;
struct perf_cpu_map *cpus;
struct perf_thread_map *threads;
+ int nr_mmaps;
};
/**
{
int i;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
struct mmap *map = &evlist->overwrite_mmap[i];
union perf_event *event;
(*func)();
evlist__disable(evlist);
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
union perf_event *event;
struct mmap *md;
struct mmap *md;
int i, ret;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(md) < 0)
continue;
int i, found;
found = 0;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(md) < 0)
continue;
while (1) {
int before = nr_events;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
union perf_event *event;
struct mmap *md;
while (1) {
int before = total_events;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
union perf_event *event;
struct mmap *md;
struct mmap *md;
int i, ret;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
md = &evlist->mmap[i];
if (perf_mmap__read_init(md) < 0)
continue;
if (!evlist->overwrite_mmap)
return 0;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
int fd = evlist->overwrite_mmap[i].core.fd;
int err;
int i;
if (evlist->mmap)
- for (i = 0; i < evlist->nr_mmaps; i++)
+ for (i = 0; i < evlist->core.nr_mmaps; i++)
perf_mmap__munmap(&evlist->mmap[i]);
if (evlist->overwrite_mmap)
- for (i = 0; i < evlist->nr_mmaps; i++)
+ for (i = 0; i < evlist->core.nr_mmaps; i++)
perf_mmap__munmap(&evlist->overwrite_mmap[i]);
}
int i;
struct mmap *map;
- evlist->nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
+ evlist->core.nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
if (perf_cpu_map__empty(evlist->core.cpus))
- evlist->nr_mmaps = perf_thread_map__nr(evlist->core.threads);
- map = zalloc(evlist->nr_mmaps * sizeof(struct mmap));
+ evlist->core.nr_mmaps = perf_thread_map__nr(evlist->core.threads);
+ map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
if (!map)
return NULL;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
map[i].core.fd = -1;
map[i].core.overwrite = overwrite;
/*
if (!draining)
perf_evlist__poll(evlist, 1000);
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
struct mmap *map = &evlist->mmap[i];
union perf_event *event;
struct perf_evlist core;
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
int nr_groups;
- int nr_mmaps;
bool enabled;
size_t mmap_len;
int id_pos;
{
int i;
- for (i = 0; i < evlist->nr_mmaps; i++) {
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
struct mmap *md = &evlist->mmap[i];
if (md->core.cpu == cpu)