Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (184 commits) perf probe: Clean up probe_point_lazy_walker() return value tracing: Fix irqoff selftest expanding max buffer tracing: Align 4 byte ints together in struct tracer tracing: Export trace_set_clr_event() tracing: Explain about unstable clock on resume with ring buffer warning ftrace/graph: Trace function entry before updating index ftrace: Add .ref.text as one of the safe areas to trace tracing: Adjust conditional expression latency formatting. tracing: Fix event alignment: skb:kfree_skb tracing: Fix event alignment: mce:mce_record tracing: Fix event alignment: kvm:kvm_hv_hypercall tracing: Fix event alignment: module:module_request tracing: Fix event alignment: ftrace:context_switch and ftrace:wakeup tracing: Remove lock_depth from event entry perf header: Stop using 'self' perf session: Use evlist/evsel for managing perf.data attributes perf top: Don't let events to eat up whole header line perf top: Fix events overflow in top command ring-buffer: Remove unused #include <linux/trace_irq.h> tracing: Add an 'overwrite' trace_option. ...
This commit is contained in:
605
tools/perf/util/annotate.c
Normal file
605
tools/perf/util/annotate.c
Normal file
@@ -0,0 +1,605 @@
|
||||
/*
|
||||
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
*
|
||||
* Parts came from builtin-annotate.c, see those files for further
|
||||
* copyright notes.
|
||||
*
|
||||
* Released under the GPL v2. (and only v2, not any later version)
|
||||
*/
|
||||
|
||||
#include "util.h"
|
||||
#include "build-id.h"
|
||||
#include "color.h"
|
||||
#include "cache.h"
|
||||
#include "symbol.h"
|
||||
#include "debug.h"
|
||||
#include "annotate.h"
|
||||
#include <pthread.h>
|
||||
|
||||
int symbol__annotate_init(struct map *map __used, struct symbol *sym)
|
||||
{
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
pthread_mutex_init(¬es->lock, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int symbol__alloc_hist(struct symbol *sym, int nevents)
|
||||
{
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
size_t sizeof_sym_hist = (sizeof(struct sym_hist) +
|
||||
(sym->end - sym->start) * sizeof(u64));
|
||||
|
||||
notes->src = zalloc(sizeof(*notes->src) + nevents * sizeof_sym_hist);
|
||||
if (notes->src == NULL)
|
||||
return -1;
|
||||
notes->src->sizeof_sym_hist = sizeof_sym_hist;
|
||||
notes->src->nr_histograms = nevents;
|
||||
INIT_LIST_HEAD(¬es->src->source);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void symbol__annotate_zero_histograms(struct symbol *sym)
|
||||
{
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
|
||||
pthread_mutex_lock(¬es->lock);
|
||||
if (notes->src != NULL)
|
||||
memset(notes->src->histograms, 0,
|
||||
notes->src->nr_histograms * notes->src->sizeof_sym_hist);
|
||||
pthread_mutex_unlock(¬es->lock);
|
||||
}
|
||||
|
||||
int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
|
||||
int evidx, u64 addr)
|
||||
{
|
||||
unsigned offset;
|
||||
struct annotation *notes;
|
||||
struct sym_hist *h;
|
||||
|
||||
notes = symbol__annotation(sym);
|
||||
if (notes->src == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
|
||||
|
||||
if (addr >= sym->end)
|
||||
return 0;
|
||||
|
||||
offset = addr - sym->start;
|
||||
h = annotation__histogram(notes, evidx);
|
||||
h->sum++;
|
||||
h->addr[offset]++;
|
||||
|
||||
pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64
|
||||
", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name,
|
||||
addr, addr - sym->start, evidx, h->addr[offset]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize)
|
||||
{
|
||||
struct objdump_line *self = malloc(sizeof(*self) + privsize);
|
||||
|
||||
if (self != NULL) {
|
||||
self->offset = offset;
|
||||
self->line = line;
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
void objdump_line__free(struct objdump_line *self)
|
||||
{
|
||||
free(self->line);
|
||||
free(self);
|
||||
}
|
||||
|
||||
static void objdump__add_line(struct list_head *head, struct objdump_line *line)
|
||||
{
|
||||
list_add_tail(&line->node, head);
|
||||
}
|
||||
|
||||
struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
|
||||
struct objdump_line *pos)
|
||||
{
|
||||
list_for_each_entry_continue(pos, head, node)
|
||||
if (pos->offset >= 0)
|
||||
return pos;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int objdump_line__print(struct objdump_line *oline, struct symbol *sym,
|
||||
int evidx, u64 len, int min_pcnt,
|
||||
int printed, int max_lines,
|
||||
struct objdump_line *queue)
|
||||
{
|
||||
static const char *prev_line;
|
||||
static const char *prev_color;
|
||||
|
||||
if (oline->offset != -1) {
|
||||
const char *path = NULL;
|
||||
unsigned int hits = 0;
|
||||
double percent = 0.0;
|
||||
const char *color;
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
struct source_line *src_line = notes->src->lines;
|
||||
struct sym_hist *h = annotation__histogram(notes, evidx);
|
||||
s64 offset = oline->offset;
|
||||
struct objdump_line *next;
|
||||
|
||||
next = objdump__get_next_ip_line(¬es->src->source, oline);
|
||||
|
||||
while (offset < (s64)len &&
|
||||
(next == NULL || offset < next->offset)) {
|
||||
if (src_line) {
|
||||
if (path == NULL)
|
||||
path = src_line[offset].path;
|
||||
percent += src_line[offset].percent;
|
||||
} else
|
||||
hits += h->addr[offset];
|
||||
|
||||
++offset;
|
||||
}
|
||||
|
||||
if (src_line == NULL && h->sum)
|
||||
percent = 100.0 * hits / h->sum;
|
||||
|
||||
if (percent < min_pcnt)
|
||||
return -1;
|
||||
|
||||
if (max_lines && printed >= max_lines)
|
||||
return 1;
|
||||
|
||||
if (queue != NULL) {
|
||||
list_for_each_entry_from(queue, ¬es->src->source, node) {
|
||||
if (queue == oline)
|
||||
break;
|
||||
objdump_line__print(queue, sym, evidx, len,
|
||||
0, 0, 1, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
color = get_percent_color(percent);
|
||||
|
||||
/*
|
||||
* Also color the filename and line if needed, with
|
||||
* the same color than the percentage. Don't print it
|
||||
* twice for close colored addr with the same filename:line
|
||||
*/
|
||||
if (path) {
|
||||
if (!prev_line || strcmp(prev_line, path)
|
||||
|| color != prev_color) {
|
||||
color_fprintf(stdout, color, " %s", path);
|
||||
prev_line = path;
|
||||
prev_color = color;
|
||||
}
|
||||
}
|
||||
|
||||
color_fprintf(stdout, color, " %7.2f", percent);
|
||||
printf(" : ");
|
||||
color_fprintf(stdout, PERF_COLOR_BLUE, "%s\n", oline->line);
|
||||
} else if (max_lines && printed >= max_lines)
|
||||
return 1;
|
||||
else {
|
||||
if (queue)
|
||||
return -1;
|
||||
|
||||
if (!*oline->line)
|
||||
printf(" :\n");
|
||||
else
|
||||
printf(" : %s\n", oline->line);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
|
||||
FILE *file, size_t privsize)
|
||||
{
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
struct objdump_line *objdump_line;
|
||||
char *line = NULL, *tmp, *tmp2, *c;
|
||||
size_t line_len;
|
||||
s64 line_ip, offset = -1;
|
||||
|
||||
if (getline(&line, &line_len, file) < 0)
|
||||
return -1;
|
||||
|
||||
if (!line)
|
||||
return -1;
|
||||
|
||||
while (line_len != 0 && isspace(line[line_len - 1]))
|
||||
line[--line_len] = '\0';
|
||||
|
||||
c = strchr(line, '\n');
|
||||
if (c)
|
||||
*c = 0;
|
||||
|
||||
line_ip = -1;
|
||||
|
||||
/*
|
||||
* Strip leading spaces:
|
||||
*/
|
||||
tmp = line;
|
||||
while (*tmp) {
|
||||
if (*tmp != ' ')
|
||||
break;
|
||||
tmp++;
|
||||
}
|
||||
|
||||
if (*tmp) {
|
||||
/*
|
||||
* Parse hexa addresses followed by ':'
|
||||
*/
|
||||
line_ip = strtoull(tmp, &tmp2, 16);
|
||||
if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
|
||||
line_ip = -1;
|
||||
}
|
||||
|
||||
if (line_ip != -1) {
|
||||
u64 start = map__rip_2objdump(map, sym->start),
|
||||
end = map__rip_2objdump(map, sym->end);
|
||||
|
||||
offset = line_ip - start;
|
||||
if (offset < 0 || (u64)line_ip > end)
|
||||
offset = -1;
|
||||
}
|
||||
|
||||
objdump_line = objdump_line__new(offset, line, privsize);
|
||||
if (objdump_line == NULL) {
|
||||
free(line);
|
||||
return -1;
|
||||
}
|
||||
objdump__add_line(¬es->src->source, objdump_line);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
|
||||
{
|
||||
struct dso *dso = map->dso;
|
||||
char *filename = dso__build_id_filename(dso, NULL, 0);
|
||||
bool free_filename = true;
|
||||
char command[PATH_MAX * 2];
|
||||
FILE *file;
|
||||
int err = 0;
|
||||
char symfs_filename[PATH_MAX];
|
||||
|
||||
if (filename) {
|
||||
snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
|
||||
symbol_conf.symfs, filename);
|
||||
}
|
||||
|
||||
if (filename == NULL) {
|
||||
if (dso->has_build_id) {
|
||||
pr_err("Can't annotate %s: not enough memory\n",
|
||||
sym->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
goto fallback;
|
||||
} else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
|
||||
strstr(command, "[kernel.kallsyms]") ||
|
||||
access(symfs_filename, R_OK)) {
|
||||
free(filename);
|
||||
fallback:
|
||||
/*
|
||||
* If we don't have build-ids or the build-id file isn't in the
|
||||
* cache, or is just a kallsyms file, well, lets hope that this
|
||||
* DSO is the same as when 'perf record' ran.
|
||||
*/
|
||||
filename = dso->long_name;
|
||||
snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
|
||||
symbol_conf.symfs, filename);
|
||||
free_filename = false;
|
||||
}
|
||||
|
||||
if (dso->origin == DSO__ORIG_KERNEL) {
|
||||
char bf[BUILD_ID_SIZE * 2 + 16] = " with build id ";
|
||||
char *build_id_msg = NULL;
|
||||
|
||||
if (dso->annotate_warned)
|
||||
goto out_free_filename;
|
||||
|
||||
if (dso->has_build_id) {
|
||||
build_id__sprintf(dso->build_id,
|
||||
sizeof(dso->build_id), bf + 15);
|
||||
build_id_msg = bf;
|
||||
}
|
||||
err = -ENOENT;
|
||||
dso->annotate_warned = 1;
|
||||
pr_err("Can't annotate %s: No vmlinux file%s was found in the "
|
||||
"path.\nPlease use 'perf buildid-cache -av vmlinux' or "
|
||||
"--vmlinux vmlinux.\n",
|
||||
sym->name, build_id_msg ?: "");
|
||||
goto out_free_filename;
|
||||
}
|
||||
|
||||
pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
|
||||
filename, sym->name, map->unmap_ip(map, sym->start),
|
||||
map->unmap_ip(map, sym->end));
|
||||
|
||||
pr_debug("annotating [%p] %30s : [%p] %30s\n",
|
||||
dso, dso->long_name, sym, sym->name);
|
||||
|
||||
snprintf(command, sizeof(command),
|
||||
"objdump --start-address=0x%016" PRIx64
|
||||
" --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand",
|
||||
map__rip_2objdump(map, sym->start),
|
||||
map__rip_2objdump(map, sym->end),
|
||||
symfs_filename, filename);
|
||||
|
||||
pr_debug("Executing: %s\n", command);
|
||||
|
||||
file = popen(command, "r");
|
||||
if (!file)
|
||||
goto out_free_filename;
|
||||
|
||||
while (!feof(file))
|
||||
if (symbol__parse_objdump_line(sym, map, file, privsize) < 0)
|
||||
break;
|
||||
|
||||
pclose(file);
|
||||
out_free_filename:
|
||||
if (free_filename)
|
||||
free(filename);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void insert_source_line(struct rb_root *root, struct source_line *src_line)
|
||||
{
|
||||
struct source_line *iter;
|
||||
struct rb_node **p = &root->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
iter = rb_entry(parent, struct source_line, node);
|
||||
|
||||
if (src_line->percent > iter->percent)
|
||||
p = &(*p)->rb_left;
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
}
|
||||
|
||||
rb_link_node(&src_line->node, parent, p);
|
||||
rb_insert_color(&src_line->node, root);
|
||||
}
|
||||
|
||||
static void symbol__free_source_line(struct symbol *sym, int len)
|
||||
{
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
struct source_line *src_line = notes->src->lines;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
free(src_line[i].path);
|
||||
|
||||
free(src_line);
|
||||
notes->src->lines = NULL;
|
||||
}
|
||||
|
||||
/* Get the filename:line for the colored entries */
|
||||
static int symbol__get_source_line(struct symbol *sym, struct map *map,
|
||||
int evidx, struct rb_root *root, int len,
|
||||
const char *filename)
|
||||
{
|
||||
u64 start;
|
||||
int i;
|
||||
char cmd[PATH_MAX * 2];
|
||||
struct source_line *src_line;
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
struct sym_hist *h = annotation__histogram(notes, evidx);
|
||||
|
||||
if (!h->sum)
|
||||
return 0;
|
||||
|
||||
src_line = notes->src->lines = calloc(len, sizeof(struct source_line));
|
||||
if (!notes->src->lines)
|
||||
return -1;
|
||||
|
||||
start = map->unmap_ip(map, sym->start);
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
char *path = NULL;
|
||||
size_t line_len;
|
||||
u64 offset;
|
||||
FILE *fp;
|
||||
|
||||
src_line[i].percent = 100.0 * h->addr[i] / h->sum;
|
||||
if (src_line[i].percent <= 0.5)
|
||||
continue;
|
||||
|
||||
offset = start + i;
|
||||
sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset);
|
||||
fp = popen(cmd, "r");
|
||||
if (!fp)
|
||||
continue;
|
||||
|
||||
if (getline(&path, &line_len, fp) < 0 || !line_len)
|
||||
goto next;
|
||||
|
||||
src_line[i].path = malloc(sizeof(char) * line_len + 1);
|
||||
if (!src_line[i].path)
|
||||
goto next;
|
||||
|
||||
strcpy(src_line[i].path, path);
|
||||
insert_source_line(root, &src_line[i]);
|
||||
|
||||
next:
|
||||
pclose(fp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void print_summary(struct rb_root *root, const char *filename)
|
||||
{
|
||||
struct source_line *src_line;
|
||||
struct rb_node *node;
|
||||
|
||||
printf("\nSorted summary for file %s\n", filename);
|
||||
printf("----------------------------------------------\n\n");
|
||||
|
||||
if (RB_EMPTY_ROOT(root)) {
|
||||
printf(" Nothing higher than %1.1f%%\n", MIN_GREEN);
|
||||
return;
|
||||
}
|
||||
|
||||
node = rb_first(root);
|
||||
while (node) {
|
||||
double percent;
|
||||
const char *color;
|
||||
char *path;
|
||||
|
||||
src_line = rb_entry(node, struct source_line, node);
|
||||
percent = src_line->percent;
|
||||
color = get_percent_color(percent);
|
||||
path = src_line->path;
|
||||
|
||||
color_fprintf(stdout, color, " %7.2f %s", percent, path);
|
||||
node = rb_next(node);
|
||||
}
|
||||
}
|
||||
|
||||
static void symbol__annotate_hits(struct symbol *sym, int evidx)
|
||||
{
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
struct sym_hist *h = annotation__histogram(notes, evidx);
|
||||
u64 len = sym->end - sym->start, offset;
|
||||
|
||||
for (offset = 0; offset < len; ++offset)
|
||||
if (h->addr[offset] != 0)
|
||||
printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
|
||||
sym->start + offset, h->addr[offset]);
|
||||
printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
|
||||
}
|
||||
|
||||
int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
|
||||
bool full_paths, int min_pcnt, int max_lines,
|
||||
int context)
|
||||
{
|
||||
struct dso *dso = map->dso;
|
||||
const char *filename = dso->long_name, *d_filename;
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
struct objdump_line *pos, *queue = NULL;
|
||||
int printed = 2, queue_len = 0;
|
||||
int more = 0;
|
||||
u64 len;
|
||||
|
||||
if (full_paths)
|
||||
d_filename = filename;
|
||||
else
|
||||
d_filename = basename(filename);
|
||||
|
||||
len = sym->end - sym->start;
|
||||
|
||||
printf(" Percent | Source code & Disassembly of %s\n", d_filename);
|
||||
printf("------------------------------------------------\n");
|
||||
|
||||
if (verbose)
|
||||
symbol__annotate_hits(sym, evidx);
|
||||
|
||||
list_for_each_entry(pos, ¬es->src->source, node) {
|
||||
if (context && queue == NULL) {
|
||||
queue = pos;
|
||||
queue_len = 0;
|
||||
}
|
||||
|
||||
switch (objdump_line__print(pos, sym, evidx, len, min_pcnt,
|
||||
printed, max_lines, queue)) {
|
||||
case 0:
|
||||
++printed;
|
||||
if (context) {
|
||||
printed += queue_len;
|
||||
queue = NULL;
|
||||
queue_len = 0;
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
/* filtered by max_lines */
|
||||
++more;
|
||||
break;
|
||||
case -1:
|
||||
default:
|
||||
/*
|
||||
* Filtered by min_pcnt or non IP lines when
|
||||
* context != 0
|
||||
*/
|
||||
if (!context)
|
||||
break;
|
||||
if (queue_len == context)
|
||||
queue = list_entry(queue->node.next, typeof(*queue), node);
|
||||
else
|
||||
++queue_len;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return more;
|
||||
}
|
||||
|
||||
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx)
|
||||
{
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
struct sym_hist *h = annotation__histogram(notes, evidx);
|
||||
|
||||
memset(h, 0, notes->src->sizeof_sym_hist);
|
||||
}
|
||||
|
||||
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
|
||||
{
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
struct sym_hist *h = annotation__histogram(notes, evidx);
|
||||
struct objdump_line *pos;
|
||||
int len = sym->end - sym->start;
|
||||
|
||||
h->sum = 0;
|
||||
|
||||
list_for_each_entry(pos, ¬es->src->source, node) {
|
||||
if (pos->offset != -1 && pos->offset < len) {
|
||||
h->addr[pos->offset] = h->addr[pos->offset] * 7 / 8;
|
||||
h->sum += h->addr[pos->offset];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void objdump_line_list__purge(struct list_head *head)
|
||||
{
|
||||
struct objdump_line *pos, *n;
|
||||
|
||||
list_for_each_entry_safe(pos, n, head, node) {
|
||||
list_del(&pos->node);
|
||||
objdump_line__free(pos);
|
||||
}
|
||||
}
|
||||
|
||||
int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
|
||||
bool print_lines, bool full_paths, int min_pcnt,
|
||||
int max_lines)
|
||||
{
|
||||
struct dso *dso = map->dso;
|
||||
const char *filename = dso->long_name;
|
||||
struct rb_root source_line = RB_ROOT;
|
||||
u64 len;
|
||||
|
||||
if (symbol__annotate(sym, map, 0) < 0)
|
||||
return -1;
|
||||
|
||||
len = sym->end - sym->start;
|
||||
|
||||
if (print_lines) {
|
||||
symbol__get_source_line(sym, map, evidx, &source_line,
|
||||
len, filename);
|
||||
print_summary(&source_line, filename);
|
||||
}
|
||||
|
||||
symbol__annotate_printf(sym, map, evidx, full_paths,
|
||||
min_pcnt, max_lines, 0);
|
||||
if (print_lines)
|
||||
symbol__free_source_line(sym, len);
|
||||
|
||||
objdump_line_list__purge(&symbol__annotation(sym)->src->source);
|
||||
|
||||
return 0;
|
||||
}
|
||||
103
tools/perf/util/annotate.h
Normal file
103
tools/perf/util/annotate.h
Normal file
@@ -0,0 +1,103 @@
|
||||
#ifndef __PERF_ANNOTATE_H
|
||||
#define __PERF_ANNOTATE_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#include "types.h"
|
||||
#include "symbol.h"
|
||||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
|
||||
struct objdump_line {
|
||||
struct list_head node;
|
||||
s64 offset;
|
||||
char *line;
|
||||
};
|
||||
|
||||
void objdump_line__free(struct objdump_line *self);
|
||||
struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
|
||||
struct objdump_line *pos);
|
||||
|
||||
struct sym_hist {
|
||||
u64 sum;
|
||||
u64 addr[0];
|
||||
};
|
||||
|
||||
struct source_line {
|
||||
struct rb_node node;
|
||||
double percent;
|
||||
char *path;
|
||||
};
|
||||
|
||||
/** struct annotated_source - symbols with hits have this attached as in sannotation
|
||||
*
|
||||
* @histogram: Array of addr hit histograms per event being monitored
|
||||
* @lines: If 'print_lines' is specified, per source code line percentages
|
||||
* @source: source parsed from objdump -dS
|
||||
*
|
||||
* lines is allocated, percentages calculated and all sorted by percentage
|
||||
* when the annotation is about to be presented, so the percentages are for
|
||||
* one of the entries in the histogram array, i.e. for the event/counter being
|
||||
* presented. It is deallocated right after symbol__{tui,tty,etc}_annotate
|
||||
* returns.
|
||||
*/
|
||||
struct annotated_source {
|
||||
struct list_head source;
|
||||
struct source_line *lines;
|
||||
int nr_histograms;
|
||||
int sizeof_sym_hist;
|
||||
struct sym_hist histograms[0];
|
||||
};
|
||||
|
||||
struct annotation {
|
||||
pthread_mutex_t lock;
|
||||
struct annotated_source *src;
|
||||
};
|
||||
|
||||
struct sannotation {
|
||||
struct annotation annotation;
|
||||
struct symbol symbol;
|
||||
};
|
||||
|
||||
static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
|
||||
{
|
||||
return (((void *)¬es->src->histograms) +
|
||||
(notes->src->sizeof_sym_hist * idx));
|
||||
}
|
||||
|
||||
static inline struct annotation *symbol__annotation(struct symbol *sym)
|
||||
{
|
||||
struct sannotation *a = container_of(sym, struct sannotation, symbol);
|
||||
return &a->annotation;
|
||||
}
|
||||
|
||||
int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
|
||||
int evidx, u64 addr);
|
||||
int symbol__alloc_hist(struct symbol *sym, int nevents);
|
||||
void symbol__annotate_zero_histograms(struct symbol *sym);
|
||||
|
||||
int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
|
||||
int symbol__annotate_init(struct map *map __used, struct symbol *sym);
|
||||
int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx,
|
||||
bool full_paths, int min_pcnt, int max_lines,
|
||||
int context);
|
||||
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
|
||||
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
|
||||
void objdump_line_list__purge(struct list_head *head);
|
||||
|
||||
int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx,
|
||||
bool print_lines, bool full_paths, int min_pcnt,
|
||||
int max_lines);
|
||||
|
||||
#ifdef NO_NEWT_SUPPORT
|
||||
static inline int symbol__tui_annotate(struct symbol *sym __used,
|
||||
struct map *map __used,
|
||||
int evidx __used, int refresh __used)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
|
||||
int refresh);
|
||||
#endif
|
||||
|
||||
#endif /* __PERF_ANNOTATE_H */
|
||||
@@ -14,8 +14,8 @@
|
||||
#include <linux/kernel.h>
|
||||
#include "debug.h"
|
||||
|
||||
static int build_id__mark_dso_hit(event_t *event,
|
||||
struct sample_data *sample __used,
|
||||
static int build_id__mark_dso_hit(union perf_event *event,
|
||||
struct perf_sample *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct addr_location al;
|
||||
@@ -37,13 +37,14 @@ static int build_id__mark_dso_hit(event_t *event,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int event__exit_del_thread(event_t *self, struct sample_data *sample __used,
|
||||
struct perf_session *session)
|
||||
static int perf_event__exit_del_thread(union perf_event *event,
|
||||
struct perf_sample *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct thread *thread = perf_session__findnew(session, self->fork.tid);
|
||||
struct thread *thread = perf_session__findnew(session, event->fork.tid);
|
||||
|
||||
dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
|
||||
self->fork.ppid, self->fork.ptid);
|
||||
dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
|
||||
event->fork.ppid, event->fork.ptid);
|
||||
|
||||
if (thread) {
|
||||
rb_erase(&thread->rb_node, &session->threads);
|
||||
@@ -56,9 +57,9 @@ static int event__exit_del_thread(event_t *self, struct sample_data *sample __us
|
||||
|
||||
struct perf_event_ops build_id__mark_dso_hit_ops = {
|
||||
.sample = build_id__mark_dso_hit,
|
||||
.mmap = event__process_mmap,
|
||||
.fork = event__process_task,
|
||||
.exit = event__exit_del_thread,
|
||||
.mmap = perf_event__process_mmap,
|
||||
.fork = perf_event__process_task,
|
||||
.exit = perf_event__exit_del_thread,
|
||||
};
|
||||
|
||||
char *dso__build_id_filename(struct dso *self, char *bf, size_t size)
|
||||
|
||||
@@ -34,13 +34,14 @@ extern int pager_use_color;
|
||||
extern int use_browser;
|
||||
|
||||
#ifdef NO_NEWT_SUPPORT
|
||||
static inline void setup_browser(void)
|
||||
static inline void setup_browser(bool fallback_to_pager)
|
||||
{
|
||||
setup_pager();
|
||||
if (fallback_to_pager)
|
||||
setup_pager();
|
||||
}
|
||||
static inline void exit_browser(bool wait_for_ok __used) {}
|
||||
#else
|
||||
void setup_browser(void);
|
||||
void setup_browser(bool fallback_to_pager);
|
||||
void exit_browser(bool wait_for_ok);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2009-2010, Frederic Weisbecker <fweisbec@gmail.com>
|
||||
* Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
|
||||
*
|
||||
* Handle the callchains from the stream in an ad-hoc radix tree and then
|
||||
* sort them in an rbtree.
|
||||
@@ -18,7 +18,8 @@
|
||||
#include "util.h"
|
||||
#include "callchain.h"
|
||||
|
||||
bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event)
|
||||
bool ip_callchain__valid(struct ip_callchain *chain,
|
||||
const union perf_event *event)
|
||||
{
|
||||
unsigned int chain_size = event->header.size;
|
||||
chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
|
||||
@@ -26,10 +27,10 @@ bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event)
|
||||
}
|
||||
|
||||
#define chain_for_each_child(child, parent) \
|
||||
list_for_each_entry(child, &parent->children, brothers)
|
||||
list_for_each_entry(child, &parent->children, siblings)
|
||||
|
||||
#define chain_for_each_child_safe(child, next, parent) \
|
||||
list_for_each_entry_safe(child, next, &parent->children, brothers)
|
||||
list_for_each_entry_safe(child, next, &parent->children, siblings)
|
||||
|
||||
static void
|
||||
rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
|
||||
@@ -38,14 +39,14 @@ rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
|
||||
struct rb_node **p = &root->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct callchain_node *rnode;
|
||||
u64 chain_cumul = cumul_hits(chain);
|
||||
u64 chain_cumul = callchain_cumul_hits(chain);
|
||||
|
||||
while (*p) {
|
||||
u64 rnode_cumul;
|
||||
|
||||
parent = *p;
|
||||
rnode = rb_entry(parent, struct callchain_node, rb_node);
|
||||
rnode_cumul = cumul_hits(rnode);
|
||||
rnode_cumul = callchain_cumul_hits(rnode);
|
||||
|
||||
switch (mode) {
|
||||
case CHAIN_FLAT:
|
||||
@@ -104,7 +105,7 @@ static void __sort_chain_graph_abs(struct callchain_node *node,
|
||||
|
||||
chain_for_each_child(child, node) {
|
||||
__sort_chain_graph_abs(child, min_hit);
|
||||
if (cumul_hits(child) >= min_hit)
|
||||
if (callchain_cumul_hits(child) >= min_hit)
|
||||
rb_insert_callchain(&node->rb_root, child,
|
||||
CHAIN_GRAPH_ABS);
|
||||
}
|
||||
@@ -129,7 +130,7 @@ static void __sort_chain_graph_rel(struct callchain_node *node,
|
||||
|
||||
chain_for_each_child(child, node) {
|
||||
__sort_chain_graph_rel(child, min_percent);
|
||||
if (cumul_hits(child) >= min_hit)
|
||||
if (callchain_cumul_hits(child) >= min_hit)
|
||||
rb_insert_callchain(&node->rb_root, child,
|
||||
CHAIN_GRAPH_REL);
|
||||
}
|
||||
@@ -143,7 +144,7 @@ sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
|
||||
rb_root->rb_node = chain_root->node.rb_root.rb_node;
|
||||
}
|
||||
|
||||
int register_callchain_param(struct callchain_param *param)
|
||||
int callchain_register_param(struct callchain_param *param)
|
||||
{
|
||||
switch (param->mode) {
|
||||
case CHAIN_GRAPH_ABS:
|
||||
@@ -189,32 +190,27 @@ create_child(struct callchain_node *parent, bool inherit_children)
|
||||
chain_for_each_child(next, new)
|
||||
next->parent = new;
|
||||
}
|
||||
list_add_tail(&new->brothers, &parent->children);
|
||||
list_add_tail(&new->siblings, &parent->children);
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
|
||||
struct resolved_ip {
|
||||
u64 ip;
|
||||
struct map_symbol ms;
|
||||
};
|
||||
|
||||
struct resolved_chain {
|
||||
u64 nr;
|
||||
struct resolved_ip ips[0];
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Fill the node with callchain values
|
||||
*/
|
||||
static void
|
||||
fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
|
||||
fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
|
||||
{
|
||||
unsigned int i;
|
||||
struct callchain_cursor_node *cursor_node;
|
||||
|
||||
for (i = start; i < chain->nr; i++) {
|
||||
node->val_nr = cursor->nr - cursor->pos;
|
||||
if (!node->val_nr)
|
||||
pr_warning("Warning: empty node in callchain tree\n");
|
||||
|
||||
cursor_node = callchain_cursor_current(cursor);
|
||||
|
||||
while (cursor_node) {
|
||||
struct callchain_list *call;
|
||||
|
||||
call = zalloc(sizeof(*call));
|
||||
@@ -222,23 +218,25 @@ fill_node(struct callchain_node *node, struct resolved_chain *chain, int start)
|
||||
perror("not enough memory for the code path tree");
|
||||
return;
|
||||
}
|
||||
call->ip = chain->ips[i].ip;
|
||||
call->ms = chain->ips[i].ms;
|
||||
call->ip = cursor_node->ip;
|
||||
call->ms.sym = cursor_node->sym;
|
||||
call->ms.map = cursor_node->map;
|
||||
list_add_tail(&call->list, &node->val);
|
||||
|
||||
callchain_cursor_advance(cursor);
|
||||
cursor_node = callchain_cursor_current(cursor);
|
||||
}
|
||||
node->val_nr = chain->nr - start;
|
||||
if (!node->val_nr)
|
||||
pr_warning("Warning: empty node in callchain tree\n");
|
||||
}
|
||||
|
||||
static void
|
||||
add_child(struct callchain_node *parent, struct resolved_chain *chain,
|
||||
int start, u64 period)
|
||||
add_child(struct callchain_node *parent,
|
||||
struct callchain_cursor *cursor,
|
||||
u64 period)
|
||||
{
|
||||
struct callchain_node *new;
|
||||
|
||||
new = create_child(parent, false);
|
||||
fill_node(new, chain, start);
|
||||
fill_node(new, cursor);
|
||||
|
||||
new->children_hit = 0;
|
||||
new->hit = period;
|
||||
@@ -250,9 +248,10 @@ add_child(struct callchain_node *parent, struct resolved_chain *chain,
|
||||
* Then create another child to host the given callchain of new branch
|
||||
*/
|
||||
static void
|
||||
split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
|
||||
struct callchain_list *to_split, int idx_parents, int idx_local,
|
||||
u64 period)
|
||||
split_add_child(struct callchain_node *parent,
|
||||
struct callchain_cursor *cursor,
|
||||
struct callchain_list *to_split,
|
||||
u64 idx_parents, u64 idx_local, u64 period)
|
||||
{
|
||||
struct callchain_node *new;
|
||||
struct list_head *old_tail;
|
||||
@@ -272,14 +271,14 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
|
||||
/* split the hits */
|
||||
new->hit = parent->hit;
|
||||
new->children_hit = parent->children_hit;
|
||||
parent->children_hit = cumul_hits(new);
|
||||
parent->children_hit = callchain_cumul_hits(new);
|
||||
new->val_nr = parent->val_nr - idx_local;
|
||||
parent->val_nr = idx_local;
|
||||
|
||||
/* create a new child for the new branch if any */
|
||||
if (idx_total < chain->nr) {
|
||||
if (idx_total < cursor->nr) {
|
||||
parent->hit = 0;
|
||||
add_child(parent, chain, idx_total, period);
|
||||
add_child(parent, cursor, period);
|
||||
parent->children_hit += period;
|
||||
} else {
|
||||
parent->hit = period;
|
||||
@@ -287,36 +286,41 @@ split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
|
||||
}
|
||||
|
||||
static int
|
||||
append_chain(struct callchain_node *root, struct resolved_chain *chain,
|
||||
unsigned int start, u64 period);
|
||||
append_chain(struct callchain_node *root,
|
||||
struct callchain_cursor *cursor,
|
||||
u64 period);
|
||||
|
||||
static void
|
||||
append_chain_children(struct callchain_node *root, struct resolved_chain *chain,
|
||||
unsigned int start, u64 period)
|
||||
append_chain_children(struct callchain_node *root,
|
||||
struct callchain_cursor *cursor,
|
||||
u64 period)
|
||||
{
|
||||
struct callchain_node *rnode;
|
||||
|
||||
/* lookup in childrens */
|
||||
chain_for_each_child(rnode, root) {
|
||||
unsigned int ret = append_chain(rnode, chain, start, period);
|
||||
unsigned int ret = append_chain(rnode, cursor, period);
|
||||
|
||||
if (!ret)
|
||||
goto inc_children_hit;
|
||||
}
|
||||
/* nothing in children, add to the current node */
|
||||
add_child(root, chain, start, period);
|
||||
add_child(root, cursor, period);
|
||||
|
||||
inc_children_hit:
|
||||
root->children_hit += period;
|
||||
}
|
||||
|
||||
static int
|
||||
append_chain(struct callchain_node *root, struct resolved_chain *chain,
|
||||
unsigned int start, u64 period)
|
||||
append_chain(struct callchain_node *root,
|
||||
struct callchain_cursor *cursor,
|
||||
u64 period)
|
||||
{
|
||||
struct callchain_cursor_node *curr_snap = cursor->curr;
|
||||
struct callchain_list *cnode;
|
||||
unsigned int i = start;
|
||||
u64 start = cursor->pos;
|
||||
bool found = false;
|
||||
u64 matches;
|
||||
|
||||
/*
|
||||
* Lookup in the current node
|
||||
@@ -324,141 +328,134 @@ append_chain(struct callchain_node *root, struct resolved_chain *chain,
|
||||
* anywhere inside a function.
|
||||
*/
|
||||
list_for_each_entry(cnode, &root->val, list) {
|
||||
struct callchain_cursor_node *node;
|
||||
struct symbol *sym;
|
||||
|
||||
if (i == chain->nr)
|
||||
node = callchain_cursor_current(cursor);
|
||||
if (!node)
|
||||
break;
|
||||
|
||||
sym = chain->ips[i].ms.sym;
|
||||
sym = node->sym;
|
||||
|
||||
if (cnode->ms.sym && sym) {
|
||||
if (cnode->ms.sym->start != sym->start)
|
||||
break;
|
||||
} else if (cnode->ip != chain->ips[i].ip)
|
||||
} else if (cnode->ip != node->ip)
|
||||
break;
|
||||
|
||||
if (!found)
|
||||
found = true;
|
||||
i++;
|
||||
|
||||
callchain_cursor_advance(cursor);
|
||||
}
|
||||
|
||||
/* matches not, relay on the parent */
|
||||
if (!found)
|
||||
if (!found) {
|
||||
cursor->curr = curr_snap;
|
||||
cursor->pos = start;
|
||||
return -1;
|
||||
}
|
||||
|
||||
matches = cursor->pos - start;
|
||||
|
||||
/* we match only a part of the node. Split it and add the new chain */
|
||||
if (i - start < root->val_nr) {
|
||||
split_add_child(root, chain, cnode, start, i - start, period);
|
||||
if (matches < root->val_nr) {
|
||||
split_add_child(root, cursor, cnode, start, matches, period);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* we match 100% of the path, increment the hit */
|
||||
if (i - start == root->val_nr && i == chain->nr) {
|
||||
if (matches == root->val_nr && cursor->pos == cursor->nr) {
|
||||
root->hit += period;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* We match the node and still have a part remaining */
|
||||
append_chain_children(root, chain, i, period);
|
||||
append_chain_children(root, cursor, period);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void filter_context(struct ip_callchain *old, struct resolved_chain *new,
|
||||
struct map_symbol *syms)
|
||||
int callchain_append(struct callchain_root *root,
|
||||
struct callchain_cursor *cursor,
|
||||
u64 period)
|
||||
{
|
||||
int i, j = 0;
|
||||
|
||||
for (i = 0; i < (int)old->nr; i++) {
|
||||
if (old->ips[i] >= PERF_CONTEXT_MAX)
|
||||
continue;
|
||||
|
||||
new->ips[j].ip = old->ips[i];
|
||||
new->ips[j].ms = syms[i];
|
||||
j++;
|
||||
}
|
||||
|
||||
new->nr = j;
|
||||
}
|
||||
|
||||
|
||||
int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
|
||||
struct map_symbol *syms, u64 period)
|
||||
{
|
||||
struct resolved_chain *filtered;
|
||||
|
||||
if (!chain->nr)
|
||||
if (!cursor->nr)
|
||||
return 0;
|
||||
|
||||
filtered = zalloc(sizeof(*filtered) +
|
||||
chain->nr * sizeof(struct resolved_ip));
|
||||
if (!filtered)
|
||||
return -ENOMEM;
|
||||
callchain_cursor_commit(cursor);
|
||||
|
||||
filter_context(chain, filtered, syms);
|
||||
append_chain_children(&root->node, cursor, period);
|
||||
|
||||
if (!filtered->nr)
|
||||
goto end;
|
||||
|
||||
append_chain_children(&root->node, filtered, 0, period);
|
||||
|
||||
if (filtered->nr > root->max_depth)
|
||||
root->max_depth = filtered->nr;
|
||||
end:
|
||||
free(filtered);
|
||||
if (cursor->nr > root->max_depth)
|
||||
root->max_depth = cursor->nr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
merge_chain_branch(struct callchain_node *dst, struct callchain_node *src,
|
||||
struct resolved_chain *chain)
|
||||
merge_chain_branch(struct callchain_cursor *cursor,
|
||||
struct callchain_node *dst, struct callchain_node *src)
|
||||
{
|
||||
struct callchain_cursor_node **old_last = cursor->last;
|
||||
struct callchain_node *child, *next_child;
|
||||
struct callchain_list *list, *next_list;
|
||||
int old_pos = chain->nr;
|
||||
int old_pos = cursor->nr;
|
||||
int err = 0;
|
||||
|
||||
list_for_each_entry_safe(list, next_list, &src->val, list) {
|
||||
chain->ips[chain->nr].ip = list->ip;
|
||||
chain->ips[chain->nr].ms = list->ms;
|
||||
chain->nr++;
|
||||
callchain_cursor_append(cursor, list->ip,
|
||||
list->ms.map, list->ms.sym);
|
||||
list_del(&list->list);
|
||||
free(list);
|
||||
}
|
||||
|
||||
if (src->hit)
|
||||
append_chain_children(dst, chain, 0, src->hit);
|
||||
if (src->hit) {
|
||||
callchain_cursor_commit(cursor);
|
||||
append_chain_children(dst, cursor, src->hit);
|
||||
}
|
||||
|
||||
chain_for_each_child_safe(child, next_child, src) {
|
||||
err = merge_chain_branch(dst, child, chain);
|
||||
err = merge_chain_branch(cursor, dst, child);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
list_del(&child->brothers);
|
||||
list_del(&child->siblings);
|
||||
free(child);
|
||||
}
|
||||
|
||||
chain->nr = old_pos;
|
||||
cursor->nr = old_pos;
|
||||
cursor->last = old_last;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int callchain_merge(struct callchain_root *dst, struct callchain_root *src)
|
||||
int callchain_merge(struct callchain_cursor *cursor,
|
||||
struct callchain_root *dst, struct callchain_root *src)
|
||||
{
|
||||
struct resolved_chain *chain;
|
||||
int err;
|
||||
|
||||
chain = malloc(sizeof(*chain) +
|
||||
src->max_depth * sizeof(struct resolved_ip));
|
||||
if (!chain)
|
||||
return -ENOMEM;
|
||||
|
||||
chain->nr = 0;
|
||||
|
||||
err = merge_chain_branch(&dst->node, &src->node, chain);
|
||||
|
||||
free(chain);
|
||||
|
||||
return err;
|
||||
return merge_chain_branch(cursor, &dst->node, &src->node);
|
||||
}
|
||||
|
||||
int callchain_cursor_append(struct callchain_cursor *cursor,
|
||||
u64 ip, struct map *map, struct symbol *sym)
|
||||
{
|
||||
struct callchain_cursor_node *node = *cursor->last;
|
||||
|
||||
if (!node) {
|
||||
node = calloc(sizeof(*node), 1);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
*cursor->last = node;
|
||||
}
|
||||
|
||||
node->ip = ip;
|
||||
node->map = map;
|
||||
node->sym = sym;
|
||||
|
||||
cursor->nr++;
|
||||
|
||||
cursor->last = &node->next;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ enum chain_mode {
|
||||
|
||||
struct callchain_node {
|
||||
struct callchain_node *parent;
|
||||
struct list_head brothers;
|
||||
struct list_head siblings;
|
||||
struct list_head children;
|
||||
struct list_head val;
|
||||
struct rb_node rb_node; /* to sort nodes in an rbtree */
|
||||
@@ -49,9 +49,30 @@ struct callchain_list {
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/*
|
||||
* A callchain cursor is a single linked list that
|
||||
* let one feed a callchain progressively.
|
||||
* It keeps persitent allocated entries to minimize
|
||||
* allocations.
|
||||
*/
|
||||
struct callchain_cursor_node {
|
||||
u64 ip;
|
||||
struct map *map;
|
||||
struct symbol *sym;
|
||||
struct callchain_cursor_node *next;
|
||||
};
|
||||
|
||||
struct callchain_cursor {
|
||||
u64 nr;
|
||||
struct callchain_cursor_node *first;
|
||||
struct callchain_cursor_node **last;
|
||||
u64 pos;
|
||||
struct callchain_cursor_node *curr;
|
||||
};
|
||||
|
||||
static inline void callchain_init(struct callchain_root *root)
|
||||
{
|
||||
INIT_LIST_HEAD(&root->node.brothers);
|
||||
INIT_LIST_HEAD(&root->node.siblings);
|
||||
INIT_LIST_HEAD(&root->node.children);
|
||||
INIT_LIST_HEAD(&root->node.val);
|
||||
|
||||
@@ -61,15 +82,54 @@ static inline void callchain_init(struct callchain_root *root)
|
||||
root->max_depth = 0;
|
||||
}
|
||||
|
||||
static inline u64 cumul_hits(struct callchain_node *node)
|
||||
static inline u64 callchain_cumul_hits(struct callchain_node *node)
|
||||
{
|
||||
return node->hit + node->children_hit;
|
||||
}
|
||||
|
||||
int register_callchain_param(struct callchain_param *param);
|
||||
int callchain_append(struct callchain_root *root, struct ip_callchain *chain,
|
||||
struct map_symbol *syms, u64 period);
|
||||
int callchain_merge(struct callchain_root *dst, struct callchain_root *src);
|
||||
int callchain_register_param(struct callchain_param *param);
|
||||
int callchain_append(struct callchain_root *root,
|
||||
struct callchain_cursor *cursor,
|
||||
u64 period);
|
||||
|
||||
bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event);
|
||||
int callchain_merge(struct callchain_cursor *cursor,
|
||||
struct callchain_root *dst, struct callchain_root *src);
|
||||
|
||||
bool ip_callchain__valid(struct ip_callchain *chain,
|
||||
const union perf_event *event);
|
||||
/*
|
||||
* Initialize a cursor before adding entries inside, but keep
|
||||
* the previously allocated entries as a cache.
|
||||
*/
|
||||
static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
|
||||
{
|
||||
cursor->nr = 0;
|
||||
cursor->last = &cursor->first;
|
||||
}
|
||||
|
||||
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
|
||||
struct map *map, struct symbol *sym);
|
||||
|
||||
/* Close a cursor writing session. Initialize for the reader */
|
||||
static inline void callchain_cursor_commit(struct callchain_cursor *cursor)
|
||||
{
|
||||
cursor->curr = cursor->first;
|
||||
cursor->pos = 0;
|
||||
}
|
||||
|
||||
/* Cursor reading iteration helpers */
|
||||
static inline struct callchain_cursor_node *
|
||||
callchain_cursor_current(struct callchain_cursor *cursor)
|
||||
{
|
||||
if (cursor->pos == cursor->nr)
|
||||
return NULL;
|
||||
|
||||
return cursor->curr;
|
||||
}
|
||||
|
||||
static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
|
||||
{
|
||||
cursor->curr = cursor->curr->next;
|
||||
cursor->pos++;
|
||||
}
|
||||
#endif /* __PERF_CALLCHAIN_H */
|
||||
|
||||
178
tools/perf/util/cgroup.c
Normal file
178
tools/perf/util/cgroup.c
Normal file
@@ -0,0 +1,178 @@
|
||||
#include "util.h"
|
||||
#include "../perf.h"
|
||||
#include "parse-options.h"
|
||||
#include "evsel.h"
|
||||
#include "cgroup.h"
|
||||
#include "debugfs.h" /* MAX_PATH, STR() */
|
||||
#include "evlist.h"
|
||||
|
||||
int nr_cgroups;
|
||||
|
||||
static int
|
||||
cgroupfs_find_mountpoint(char *buf, size_t maxlen)
|
||||
{
|
||||
FILE *fp;
|
||||
char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1];
|
||||
char *token, *saved_ptr;
|
||||
int found = 0;
|
||||
|
||||
fp = fopen("/proc/mounts", "r");
|
||||
if (!fp)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* in order to handle split hierarchy, we need to scan /proc/mounts
|
||||
* and inspect every cgroupfs mount point to find one that has
|
||||
* perf_event subsystem
|
||||
*/
|
||||
while (fscanf(fp, "%*s %"STR(MAX_PATH)"s %"STR(MAX_PATH)"s %"
|
||||
STR(MAX_PATH)"s %*d %*d\n",
|
||||
mountpoint, type, tokens) == 3) {
|
||||
|
||||
if (!strcmp(type, "cgroup")) {
|
||||
|
||||
token = strtok_r(tokens, ",", &saved_ptr);
|
||||
|
||||
while (token != NULL) {
|
||||
if (!strcmp(token, "perf_event")) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
token = strtok_r(NULL, ",", &saved_ptr);
|
||||
}
|
||||
}
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
fclose(fp);
|
||||
if (!found)
|
||||
return -1;
|
||||
|
||||
if (strlen(mountpoint) < maxlen) {
|
||||
strcpy(buf, mountpoint);
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int open_cgroup(char *name)
|
||||
{
|
||||
char path[MAX_PATH+1];
|
||||
char mnt[MAX_PATH+1];
|
||||
int fd;
|
||||
|
||||
|
||||
if (cgroupfs_find_mountpoint(mnt, MAX_PATH+1))
|
||||
return -1;
|
||||
|
||||
snprintf(path, MAX_PATH, "%s/%s", mnt, name);
|
||||
|
||||
fd = open(path, O_RDONLY);
|
||||
if (fd == -1)
|
||||
fprintf(stderr, "no access to cgroup %s\n", path);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
static int add_cgroup(struct perf_evlist *evlist, char *str)
|
||||
{
|
||||
struct perf_evsel *counter;
|
||||
struct cgroup_sel *cgrp = NULL;
|
||||
int n;
|
||||
/*
|
||||
* check if cgrp is already defined, if so we reuse it
|
||||
*/
|
||||
list_for_each_entry(counter, &evlist->entries, node) {
|
||||
cgrp = counter->cgrp;
|
||||
if (!cgrp)
|
||||
continue;
|
||||
if (!strcmp(cgrp->name, str))
|
||||
break;
|
||||
|
||||
cgrp = NULL;
|
||||
}
|
||||
|
||||
if (!cgrp) {
|
||||
cgrp = zalloc(sizeof(*cgrp));
|
||||
if (!cgrp)
|
||||
return -1;
|
||||
|
||||
cgrp->name = str;
|
||||
|
||||
cgrp->fd = open_cgroup(str);
|
||||
if (cgrp->fd == -1) {
|
||||
free(cgrp);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* find corresponding event
|
||||
* if add cgroup N, then need to find event N
|
||||
*/
|
||||
n = 0;
|
||||
list_for_each_entry(counter, &evlist->entries, node) {
|
||||
if (n == nr_cgroups)
|
||||
goto found;
|
||||
n++;
|
||||
}
|
||||
if (cgrp->refcnt == 0)
|
||||
free(cgrp);
|
||||
|
||||
return -1;
|
||||
found:
|
||||
cgrp->refcnt++;
|
||||
counter->cgrp = cgrp;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void close_cgroup(struct cgroup_sel *cgrp)
|
||||
{
|
||||
if (!cgrp)
|
||||
return;
|
||||
|
||||
/* XXX: not reentrant */
|
||||
if (--cgrp->refcnt == 0) {
|
||||
close(cgrp->fd);
|
||||
free(cgrp->name);
|
||||
free(cgrp);
|
||||
}
|
||||
}
|
||||
|
||||
int parse_cgroups(const struct option *opt __used, const char *str,
|
||||
int unset __used)
|
||||
{
|
||||
struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
|
||||
const char *p, *e, *eos = str + strlen(str);
|
||||
char *s;
|
||||
int ret;
|
||||
|
||||
if (list_empty(&evlist->entries)) {
|
||||
fprintf(stderr, "must define events before cgroups\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
p = strchr(str, ',');
|
||||
e = p ? p : eos;
|
||||
|
||||
/* allow empty cgroups, i.e., skip */
|
||||
if (e - str) {
|
||||
/* termination added */
|
||||
s = strndup(str, e - str);
|
||||
if (!s)
|
||||
return -1;
|
||||
ret = add_cgroup(evlist, s);
|
||||
if (ret) {
|
||||
free(s);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
/* nr_cgroups is increased een for empty cgroups */
|
||||
nr_cgroups++;
|
||||
if (!p)
|
||||
break;
|
||||
str = p+1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
17
tools/perf/util/cgroup.h
Normal file
17
tools/perf/util/cgroup.h
Normal file
@@ -0,0 +1,17 @@
|
||||
#ifndef __CGROUP_H__
|
||||
#define __CGROUP_H__
|
||||
|
||||
struct option;
|
||||
|
||||
struct cgroup_sel {
|
||||
char *name;
|
||||
int fd;
|
||||
int refcnt;
|
||||
};
|
||||
|
||||
|
||||
extern int nr_cgroups; /* number of explicit cgroups defined */
|
||||
extern void close_cgroup(struct cgroup_sel *cgrp);
|
||||
extern int parse_cgroups(const struct option *opt, const char *str, int unset);
|
||||
|
||||
#endif /* __CGROUP_H__ */
|
||||
@@ -177,3 +177,8 @@ struct cpu_map *cpu_map__dummy_new(void)
|
||||
|
||||
return cpus;
|
||||
}
|
||||
|
||||
void cpu_map__delete(struct cpu_map *map)
|
||||
{
|
||||
free(map);
|
||||
}
|
||||
|
||||
@@ -8,6 +8,6 @@ struct cpu_map {
|
||||
|
||||
struct cpu_map *cpu_map__new(const char *cpu_list);
|
||||
struct cpu_map *cpu_map__dummy_new(void);
|
||||
void *cpu_map__delete(struct cpu_map *map);
|
||||
void cpu_map__delete(struct cpu_map *map);
|
||||
|
||||
#endif /* __PERF_CPUMAP_H */
|
||||
|
||||
@@ -57,7 +57,7 @@ void ui__warning(const char *format, ...)
|
||||
}
|
||||
#endif
|
||||
|
||||
void trace_event(event_t *event)
|
||||
void trace_event(union perf_event *event)
|
||||
{
|
||||
unsigned char *raw_event = (void *)event;
|
||||
const char *color = PERF_COLOR_BLUE;
|
||||
|
||||
@@ -9,7 +9,7 @@ extern int verbose;
|
||||
extern bool quiet, dump_trace;
|
||||
|
||||
int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
|
||||
void trace_event(event_t *event);
|
||||
void trace_event(union perf_event *event);
|
||||
|
||||
struct ui_progress;
|
||||
|
||||
|
||||
@@ -6,8 +6,9 @@
|
||||
#include "string.h"
|
||||
#include "strlist.h"
|
||||
#include "thread.h"
|
||||
#include "thread_map.h"
|
||||
|
||||
static const char *event__name[] = {
|
||||
static const char *perf_event__names[] = {
|
||||
[0] = "TOTAL",
|
||||
[PERF_RECORD_MMAP] = "MMAP",
|
||||
[PERF_RECORD_LOST] = "LOST",
|
||||
@@ -25,16 +26,16 @@ static const char *event__name[] = {
|
||||
[PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND",
|
||||
};
|
||||
|
||||
const char *event__get_event_name(unsigned int id)
|
||||
const char *perf_event__name(unsigned int id)
|
||||
{
|
||||
if (id >= ARRAY_SIZE(event__name))
|
||||
if (id >= ARRAY_SIZE(perf_event__names))
|
||||
return "INVALID";
|
||||
if (!event__name[id])
|
||||
if (!perf_event__names[id])
|
||||
return "UNKNOWN";
|
||||
return event__name[id];
|
||||
return perf_event__names[id];
|
||||
}
|
||||
|
||||
static struct sample_data synth_sample = {
|
||||
static struct perf_sample synth_sample = {
|
||||
.pid = -1,
|
||||
.tid = -1,
|
||||
.time = -1,
|
||||
@@ -43,9 +44,9 @@ static struct sample_data synth_sample = {
|
||||
.period = 1,
|
||||
};
|
||||
|
||||
static pid_t event__synthesize_comm(event_t *event, pid_t pid, int full,
|
||||
event__handler_t process,
|
||||
struct perf_session *session)
|
||||
static pid_t perf_event__synthesize_comm(union perf_event *event, pid_t pid,
|
||||
int full, perf_event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
char filename[PATH_MAX];
|
||||
char bf[BUFSIZ];
|
||||
@@ -126,9 +127,10 @@ out:
|
||||
return tgid;
|
||||
}
|
||||
|
||||
static int event__synthesize_mmap_events(event_t *event, pid_t pid, pid_t tgid,
|
||||
event__handler_t process,
|
||||
struct perf_session *session)
|
||||
static int perf_event__synthesize_mmap_events(union perf_event *event,
|
||||
pid_t pid, pid_t tgid,
|
||||
perf_event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
char filename[PATH_MAX];
|
||||
FILE *fp;
|
||||
@@ -199,14 +201,14 @@ static int event__synthesize_mmap_events(event_t *event, pid_t pid, pid_t tgid,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int event__synthesize_modules(event__handler_t process,
|
||||
struct perf_session *session,
|
||||
struct machine *machine)
|
||||
int perf_event__synthesize_modules(perf_event__handler_t process,
|
||||
struct perf_session *session,
|
||||
struct machine *machine)
|
||||
{
|
||||
struct rb_node *nd;
|
||||
struct map_groups *kmaps = &machine->kmaps;
|
||||
event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
|
||||
|
||||
union perf_event *event = zalloc((sizeof(event->mmap) +
|
||||
session->id_hdr_size));
|
||||
if (event == NULL) {
|
||||
pr_debug("Not enough memory synthesizing mmap event "
|
||||
"for kernel modules\n");
|
||||
@@ -251,23 +253,24 @@ int event__synthesize_modules(event__handler_t process,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event,
|
||||
pid_t pid, event__handler_t process,
|
||||
static int __event__synthesize_thread(union perf_event *comm_event,
|
||||
union perf_event *mmap_event,
|
||||
pid_t pid, perf_event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
pid_t tgid = event__synthesize_comm(comm_event, pid, 1, process,
|
||||
pid_t tgid = perf_event__synthesize_comm(comm_event, pid, 1, process,
|
||||
session);
|
||||
if (tgid == -1)
|
||||
return -1;
|
||||
return event__synthesize_mmap_events(mmap_event, pid, tgid,
|
||||
return perf_event__synthesize_mmap_events(mmap_event, pid, tgid,
|
||||
process, session);
|
||||
}
|
||||
|
||||
int event__synthesize_thread_map(struct thread_map *threads,
|
||||
event__handler_t process,
|
||||
struct perf_session *session)
|
||||
int perf_event__synthesize_thread_map(struct thread_map *threads,
|
||||
perf_event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
event_t *comm_event, *mmap_event;
|
||||
union perf_event *comm_event, *mmap_event;
|
||||
int err = -1, thread;
|
||||
|
||||
comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
|
||||
@@ -294,12 +297,12 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int event__synthesize_threads(event__handler_t process,
|
||||
struct perf_session *session)
|
||||
int perf_event__synthesize_threads(perf_event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
DIR *proc;
|
||||
struct dirent dirent, *next;
|
||||
event_t *comm_event, *mmap_event;
|
||||
union perf_event *comm_event, *mmap_event;
|
||||
int err = -1;
|
||||
|
||||
comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
|
||||
@@ -357,10 +360,10 @@ static int find_symbol_cb(void *arg, const char *name, char type,
|
||||
return 1;
|
||||
}
|
||||
|
||||
int event__synthesize_kernel_mmap(event__handler_t process,
|
||||
struct perf_session *session,
|
||||
struct machine *machine,
|
||||
const char *symbol_name)
|
||||
int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
|
||||
struct perf_session *session,
|
||||
struct machine *machine,
|
||||
const char *symbol_name)
|
||||
{
|
||||
size_t size;
|
||||
const char *filename, *mmap_name;
|
||||
@@ -374,8 +377,8 @@ int event__synthesize_kernel_mmap(event__handler_t process,
|
||||
* kernels.
|
||||
*/
|
||||
struct process_symbol_args args = { .name = symbol_name, };
|
||||
event_t *event = zalloc(sizeof(event->mmap) + session->id_hdr_size);
|
||||
|
||||
union perf_event *event = zalloc((sizeof(event->mmap) +
|
||||
session->id_hdr_size));
|
||||
if (event == NULL) {
|
||||
pr_debug("Not enough memory synthesizing mmap event "
|
||||
"for kernel modules\n");
|
||||
@@ -421,42 +424,15 @@ int event__synthesize_kernel_mmap(event__handler_t process,
|
||||
return err;
|
||||
}
|
||||
|
||||
static void thread__comm_adjust(struct thread *self, struct hists *hists)
|
||||
int perf_event__process_comm(union perf_event *event,
|
||||
struct perf_sample *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
char *comm = self->comm;
|
||||
struct thread *thread = perf_session__findnew(session, event->comm.tid);
|
||||
|
||||
if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
|
||||
(!symbol_conf.comm_list ||
|
||||
strlist__has_entry(symbol_conf.comm_list, comm))) {
|
||||
u16 slen = strlen(comm);
|
||||
dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid);
|
||||
|
||||
if (hists__new_col_len(hists, HISTC_COMM, slen))
|
||||
hists__set_col_len(hists, HISTC_THREAD, slen + 6);
|
||||
}
|
||||
}
|
||||
|
||||
static int thread__set_comm_adjust(struct thread *self, const char *comm,
|
||||
struct hists *hists)
|
||||
{
|
||||
int ret = thread__set_comm(self, comm);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
thread__comm_adjust(self, hists);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int event__process_comm(event_t *self, struct sample_data *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct thread *thread = perf_session__findnew(session, self->comm.tid);
|
||||
|
||||
dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid);
|
||||
|
||||
if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm,
|
||||
&session->hists)) {
|
||||
if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
|
||||
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
|
||||
return -1;
|
||||
}
|
||||
@@ -464,19 +440,21 @@ int event__process_comm(event_t *self, struct sample_data *sample __used,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int event__process_lost(event_t *self, struct sample_data *sample __used,
|
||||
struct perf_session *session)
|
||||
int perf_event__process_lost(union perf_event *event,
|
||||
struct perf_sample *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
|
||||
self->lost.id, self->lost.lost);
|
||||
session->hists.stats.total_lost += self->lost.lost;
|
||||
event->lost.id, event->lost.lost);
|
||||
session->hists.stats.total_lost += event->lost.lost;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
|
||||
static void perf_event__set_kernel_mmap_len(union perf_event *event,
|
||||
struct map **maps)
|
||||
{
|
||||
maps[MAP__FUNCTION]->start = self->mmap.start;
|
||||
maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
|
||||
maps[MAP__FUNCTION]->start = event->mmap.start;
|
||||
maps[MAP__FUNCTION]->end = event->mmap.start + event->mmap.len;
|
||||
/*
|
||||
* Be a bit paranoid here, some perf.data file came with
|
||||
* a zero sized synthesized MMAP event for the kernel.
|
||||
@@ -485,8 +463,8 @@ static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
|
||||
maps[MAP__FUNCTION]->end = ~0ULL;
|
||||
}
|
||||
|
||||
static int event__process_kernel_mmap(event_t *self,
|
||||
struct perf_session *session)
|
||||
static int perf_event__process_kernel_mmap(union perf_event *event,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct map *map;
|
||||
char kmmap_prefix[PATH_MAX];
|
||||
@@ -494,9 +472,9 @@ static int event__process_kernel_mmap(event_t *self,
|
||||
enum dso_kernel_type kernel_type;
|
||||
bool is_kernel_mmap;
|
||||
|
||||
machine = perf_session__findnew_machine(session, self->mmap.pid);
|
||||
machine = perf_session__findnew_machine(session, event->mmap.pid);
|
||||
if (!machine) {
|
||||
pr_err("Can't find id %d's machine\n", self->mmap.pid);
|
||||
pr_err("Can't find id %d's machine\n", event->mmap.pid);
|
||||
goto out_problem;
|
||||
}
|
||||
|
||||
@@ -506,17 +484,17 @@ static int event__process_kernel_mmap(event_t *self,
|
||||
else
|
||||
kernel_type = DSO_TYPE_GUEST_KERNEL;
|
||||
|
||||
is_kernel_mmap = memcmp(self->mmap.filename,
|
||||
is_kernel_mmap = memcmp(event->mmap.filename,
|
||||
kmmap_prefix,
|
||||
strlen(kmmap_prefix)) == 0;
|
||||
if (self->mmap.filename[0] == '/' ||
|
||||
(!is_kernel_mmap && self->mmap.filename[0] == '[')) {
|
||||
if (event->mmap.filename[0] == '/' ||
|
||||
(!is_kernel_mmap && event->mmap.filename[0] == '[')) {
|
||||
|
||||
char short_module_name[1024];
|
||||
char *name, *dot;
|
||||
|
||||
if (self->mmap.filename[0] == '/') {
|
||||
name = strrchr(self->mmap.filename, '/');
|
||||
if (event->mmap.filename[0] == '/') {
|
||||
name = strrchr(event->mmap.filename, '/');
|
||||
if (name == NULL)
|
||||
goto out_problem;
|
||||
|
||||
@@ -528,10 +506,10 @@ static int event__process_kernel_mmap(event_t *self,
|
||||
"[%.*s]", (int)(dot - name), name);
|
||||
strxfrchar(short_module_name, '-', '_');
|
||||
} else
|
||||
strcpy(short_module_name, self->mmap.filename);
|
||||
strcpy(short_module_name, event->mmap.filename);
|
||||
|
||||
map = machine__new_module(machine, self->mmap.start,
|
||||
self->mmap.filename);
|
||||
map = machine__new_module(machine, event->mmap.start,
|
||||
event->mmap.filename);
|
||||
if (map == NULL)
|
||||
goto out_problem;
|
||||
|
||||
@@ -541,9 +519,9 @@ static int event__process_kernel_mmap(event_t *self,
|
||||
|
||||
map->dso->short_name = name;
|
||||
map->dso->sname_alloc = 1;
|
||||
map->end = map->start + self->mmap.len;
|
||||
map->end = map->start + event->mmap.len;
|
||||
} else if (is_kernel_mmap) {
|
||||
const char *symbol_name = (self->mmap.filename +
|
||||
const char *symbol_name = (event->mmap.filename +
|
||||
strlen(kmmap_prefix));
|
||||
/*
|
||||
* Should be there already, from the build-id table in
|
||||
@@ -558,10 +536,10 @@ static int event__process_kernel_mmap(event_t *self,
|
||||
if (__machine__create_kernel_maps(machine, kernel) < 0)
|
||||
goto out_problem;
|
||||
|
||||
event_set_kernel_mmap_len(machine->vmlinux_maps, self);
|
||||
perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps);
|
||||
perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
|
||||
symbol_name,
|
||||
self->mmap.pgoff);
|
||||
event->mmap.pgoff);
|
||||
if (machine__is_default_guest(machine)) {
|
||||
/*
|
||||
* preload dso of guest kernel and modules
|
||||
@@ -575,22 +553,23 @@ out_problem:
|
||||
return -1;
|
||||
}
|
||||
|
||||
int event__process_mmap(event_t *self, struct sample_data *sample __used,
|
||||
struct perf_session *session)
|
||||
int perf_event__process_mmap(union perf_event *event,
|
||||
struct perf_sample *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct machine *machine;
|
||||
struct thread *thread;
|
||||
struct map *map;
|
||||
u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||||
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||||
int ret = 0;
|
||||
|
||||
dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
|
||||
self->mmap.pid, self->mmap.tid, self->mmap.start,
|
||||
self->mmap.len, self->mmap.pgoff, self->mmap.filename);
|
||||
event->mmap.pid, event->mmap.tid, event->mmap.start,
|
||||
event->mmap.len, event->mmap.pgoff, event->mmap.filename);
|
||||
|
||||
if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
|
||||
cpumode == PERF_RECORD_MISC_KERNEL) {
|
||||
ret = event__process_kernel_mmap(self, session);
|
||||
ret = perf_event__process_kernel_mmap(event, session);
|
||||
if (ret < 0)
|
||||
goto out_problem;
|
||||
return 0;
|
||||
@@ -599,12 +578,12 @@ int event__process_mmap(event_t *self, struct sample_data *sample __used,
|
||||
machine = perf_session__find_host_machine(session);
|
||||
if (machine == NULL)
|
||||
goto out_problem;
|
||||
thread = perf_session__findnew(session, self->mmap.pid);
|
||||
thread = perf_session__findnew(session, event->mmap.pid);
|
||||
if (thread == NULL)
|
||||
goto out_problem;
|
||||
map = map__new(&machine->user_dsos, self->mmap.start,
|
||||
self->mmap.len, self->mmap.pgoff,
|
||||
self->mmap.pid, self->mmap.filename,
|
||||
map = map__new(&machine->user_dsos, event->mmap.start,
|
||||
event->mmap.len, event->mmap.pgoff,
|
||||
event->mmap.pid, event->mmap.filename,
|
||||
MAP__FUNCTION);
|
||||
if (map == NULL)
|
||||
goto out_problem;
|
||||
@@ -617,16 +596,17 @@ out_problem:
|
||||
return 0;
|
||||
}
|
||||
|
||||
int event__process_task(event_t *self, struct sample_data *sample __used,
|
||||
struct perf_session *session)
|
||||
int perf_event__process_task(union perf_event *event,
|
||||
struct perf_sample *sample __used,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct thread *thread = perf_session__findnew(session, self->fork.tid);
|
||||
struct thread *parent = perf_session__findnew(session, self->fork.ptid);
|
||||
struct thread *thread = perf_session__findnew(session, event->fork.tid);
|
||||
struct thread *parent = perf_session__findnew(session, event->fork.ptid);
|
||||
|
||||
dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
|
||||
self->fork.ppid, self->fork.ptid);
|
||||
dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
|
||||
event->fork.ppid, event->fork.ptid);
|
||||
|
||||
if (self->header.type == PERF_RECORD_EXIT) {
|
||||
if (event->header.type == PERF_RECORD_EXIT) {
|
||||
perf_session__remove_thread(session, thread);
|
||||
return 0;
|
||||
}
|
||||
@@ -640,20 +620,22 @@ int event__process_task(event_t *self, struct sample_data *sample __used,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int event__process(event_t *event, struct sample_data *sample,
|
||||
struct perf_session *session)
|
||||
int perf_event__process(union perf_event *event, struct perf_sample *sample,
|
||||
struct perf_session *session)
|
||||
{
|
||||
switch (event->header.type) {
|
||||
case PERF_RECORD_COMM:
|
||||
event__process_comm(event, sample, session);
|
||||
perf_event__process_comm(event, sample, session);
|
||||
break;
|
||||
case PERF_RECORD_MMAP:
|
||||
event__process_mmap(event, sample, session);
|
||||
perf_event__process_mmap(event, sample, session);
|
||||
break;
|
||||
case PERF_RECORD_FORK:
|
||||
case PERF_RECORD_EXIT:
|
||||
event__process_task(event, sample, session);
|
||||
perf_event__process_task(event, sample, session);
|
||||
break;
|
||||
case PERF_RECORD_LOST:
|
||||
perf_event__process_lost(event, sample, session);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -750,24 +732,14 @@ void thread__find_addr_location(struct thread *self,
|
||||
al->sym = NULL;
|
||||
}
|
||||
|
||||
static void dso__calc_col_width(struct dso *self, struct hists *hists)
|
||||
int perf_event__preprocess_sample(const union perf_event *event,
|
||||
struct perf_session *session,
|
||||
struct addr_location *al,
|
||||
struct perf_sample *sample,
|
||||
symbol_filter_t filter)
|
||||
{
|
||||
if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
|
||||
(!symbol_conf.dso_list ||
|
||||
strlist__has_entry(symbol_conf.dso_list, self->name))) {
|
||||
u16 slen = dso__name_len(self);
|
||||
hists__new_col_len(hists, HISTC_DSO, slen);
|
||||
}
|
||||
|
||||
self->slen_calculated = 1;
|
||||
}
|
||||
|
||||
int event__preprocess_sample(const event_t *self, struct perf_session *session,
|
||||
struct addr_location *al, struct sample_data *data,
|
||||
symbol_filter_t filter)
|
||||
{
|
||||
u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||||
struct thread *thread = perf_session__findnew(session, self->ip.pid);
|
||||
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
|
||||
struct thread *thread = perf_session__findnew(session, event->ip.pid);
|
||||
|
||||
if (thread == NULL)
|
||||
return -1;
|
||||
@@ -789,12 +761,12 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session,
|
||||
machine__create_kernel_maps(&session->host_machine);
|
||||
|
||||
thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
|
||||
self->ip.pid, self->ip.ip, al);
|
||||
event->ip.pid, event->ip.ip, al);
|
||||
dump_printf(" ...... dso: %s\n",
|
||||
al->map ? al->map->dso->long_name :
|
||||
al->level == 'H' ? "[hypervisor]" : "<not found>");
|
||||
al->sym = NULL;
|
||||
al->cpu = data->cpu;
|
||||
al->cpu = sample->cpu;
|
||||
|
||||
if (al->map) {
|
||||
if (symbol_conf.dso_list &&
|
||||
@@ -805,23 +777,8 @@ int event__preprocess_sample(const event_t *self, struct perf_session *session,
|
||||
strlist__has_entry(symbol_conf.dso_list,
|
||||
al->map->dso->long_name)))))
|
||||
goto out_filtered;
|
||||
/*
|
||||
* We have to do this here as we may have a dso with no symbol
|
||||
* hit that has a name longer than the ones with symbols
|
||||
* sampled.
|
||||
*/
|
||||
if (!sort_dso.elide && !al->map->dso->slen_calculated)
|
||||
dso__calc_col_width(al->map->dso, &session->hists);
|
||||
|
||||
al->sym = map__find_symbol(al->map, al->addr, filter);
|
||||
} else {
|
||||
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
|
||||
|
||||
if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width &&
|
||||
!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
|
||||
!symbol_conf.dso_list)
|
||||
hists__set_col_len(&session->hists, HISTC_DSO,
|
||||
unresolved_col_width);
|
||||
}
|
||||
|
||||
if (symbol_conf.sym_list && al->sym &&
|
||||
@@ -834,128 +791,3 @@ out_filtered:
|
||||
al->filtered = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int event__parse_id_sample(const event_t *event,
|
||||
struct perf_session *session,
|
||||
struct sample_data *sample)
|
||||
{
|
||||
const u64 *array;
|
||||
u64 type;
|
||||
|
||||
sample->cpu = sample->pid = sample->tid = -1;
|
||||
sample->stream_id = sample->id = sample->time = -1ULL;
|
||||
|
||||
if (!session->sample_id_all)
|
||||
return 0;
|
||||
|
||||
array = event->sample.array;
|
||||
array += ((event->header.size -
|
||||
sizeof(event->header)) / sizeof(u64)) - 1;
|
||||
type = session->sample_type;
|
||||
|
||||
if (type & PERF_SAMPLE_CPU) {
|
||||
u32 *p = (u32 *)array;
|
||||
sample->cpu = *p;
|
||||
array--;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_STREAM_ID) {
|
||||
sample->stream_id = *array;
|
||||
array--;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_ID) {
|
||||
sample->id = *array;
|
||||
array--;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_TIME) {
|
||||
sample->time = *array;
|
||||
array--;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_TID) {
|
||||
u32 *p = (u32 *)array;
|
||||
sample->pid = p[0];
|
||||
sample->tid = p[1];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int event__parse_sample(const event_t *event, struct perf_session *session,
|
||||
struct sample_data *data)
|
||||
{
|
||||
const u64 *array;
|
||||
u64 type;
|
||||
|
||||
if (event->header.type != PERF_RECORD_SAMPLE)
|
||||
return event__parse_id_sample(event, session, data);
|
||||
|
||||
array = event->sample.array;
|
||||
type = session->sample_type;
|
||||
|
||||
if (type & PERF_SAMPLE_IP) {
|
||||
data->ip = event->ip.ip;
|
||||
array++;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_TID) {
|
||||
u32 *p = (u32 *)array;
|
||||
data->pid = p[0];
|
||||
data->tid = p[1];
|
||||
array++;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_TIME) {
|
||||
data->time = *array;
|
||||
array++;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_ADDR) {
|
||||
data->addr = *array;
|
||||
array++;
|
||||
}
|
||||
|
||||
data->id = -1ULL;
|
||||
if (type & PERF_SAMPLE_ID) {
|
||||
data->id = *array;
|
||||
array++;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_STREAM_ID) {
|
||||
data->stream_id = *array;
|
||||
array++;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_CPU) {
|
||||
u32 *p = (u32 *)array;
|
||||
data->cpu = *p;
|
||||
array++;
|
||||
} else
|
||||
data->cpu = -1;
|
||||
|
||||
if (type & PERF_SAMPLE_PERIOD) {
|
||||
data->period = *array;
|
||||
array++;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_READ) {
|
||||
pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_CALLCHAIN) {
|
||||
data->callchain = (struct ip_callchain *)array;
|
||||
array += 1 + data->callchain->nr;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_RAW) {
|
||||
u32 *p = (u32 *)array;
|
||||
data->raw_size = *p;
|
||||
p++;
|
||||
data->raw_data = p;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ struct sample_event {
|
||||
u64 array[];
|
||||
};
|
||||
|
||||
struct sample_data {
|
||||
struct perf_sample {
|
||||
u64 ip;
|
||||
u32 pid, tid;
|
||||
u64 time;
|
||||
@@ -117,7 +117,7 @@ struct tracing_data_event {
|
||||
u32 size;
|
||||
};
|
||||
|
||||
typedef union event_union {
|
||||
union perf_event {
|
||||
struct perf_event_header header;
|
||||
struct ip_event ip;
|
||||
struct mmap_event mmap;
|
||||
@@ -130,50 +130,54 @@ typedef union event_union {
|
||||
struct event_type_event event_type;
|
||||
struct tracing_data_event tracing_data;
|
||||
struct build_id_event build_id;
|
||||
} event_t;
|
||||
};
|
||||
|
||||
void event__print_totals(void);
|
||||
void perf_event__print_totals(void);
|
||||
|
||||
struct perf_session;
|
||||
struct thread_map;
|
||||
|
||||
typedef int (*event__handler_synth_t)(event_t *event,
|
||||
typedef int (*perf_event__handler_synth_t)(union perf_event *event,
|
||||
struct perf_session *session);
|
||||
typedef int (*perf_event__handler_t)(union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_session *session);
|
||||
typedef int (*event__handler_t)(event_t *event, struct sample_data *sample,
|
||||
struct perf_session *session);
|
||||
|
||||
int event__synthesize_thread_map(struct thread_map *threads,
|
||||
event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int event__synthesize_threads(event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int event__synthesize_kernel_mmap(event__handler_t process,
|
||||
struct perf_session *session,
|
||||
struct machine *machine,
|
||||
const char *symbol_name);
|
||||
int perf_event__synthesize_thread_map(struct thread_map *threads,
|
||||
perf_event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int perf_event__synthesize_threads(perf_event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int perf_event__synthesize_kernel_mmap(perf_event__handler_t process,
|
||||
struct perf_session *session,
|
||||
struct machine *machine,
|
||||
const char *symbol_name);
|
||||
|
||||
int event__synthesize_modules(event__handler_t process,
|
||||
struct perf_session *session,
|
||||
struct machine *machine);
|
||||
int perf_event__synthesize_modules(perf_event__handler_t process,
|
||||
struct perf_session *session,
|
||||
struct machine *machine);
|
||||
|
||||
int event__process_comm(event_t *self, struct sample_data *sample,
|
||||
int perf_event__process_comm(union perf_event *event, struct perf_sample *sample,
|
||||
struct perf_session *session);
|
||||
int perf_event__process_lost(union perf_event *event, struct perf_sample *sample,
|
||||
struct perf_session *session);
|
||||
int perf_event__process_mmap(union perf_event *event, struct perf_sample *sample,
|
||||
struct perf_session *session);
|
||||
int perf_event__process_task(union perf_event *event, struct perf_sample *sample,
|
||||
struct perf_session *session);
|
||||
int perf_event__process(union perf_event *event, struct perf_sample *sample,
|
||||
struct perf_session *session);
|
||||
int event__process_lost(event_t *self, struct sample_data *sample,
|
||||
struct perf_session *session);
|
||||
int event__process_mmap(event_t *self, struct sample_data *sample,
|
||||
struct perf_session *session);
|
||||
int event__process_task(event_t *self, struct sample_data *sample,
|
||||
struct perf_session *session);
|
||||
int event__process(event_t *event, struct sample_data *sample,
|
||||
struct perf_session *session);
|
||||
|
||||
struct addr_location;
|
||||
int event__preprocess_sample(const event_t *self, struct perf_session *session,
|
||||
struct addr_location *al, struct sample_data *data,
|
||||
symbol_filter_t filter);
|
||||
int event__parse_sample(const event_t *event, struct perf_session *session,
|
||||
struct sample_data *sample);
|
||||
int perf_event__preprocess_sample(const union perf_event *self,
|
||||
struct perf_session *session,
|
||||
struct addr_location *al,
|
||||
struct perf_sample *sample,
|
||||
symbol_filter_t filter);
|
||||
|
||||
const char *event__get_event_name(unsigned int id);
|
||||
const char *perf_event__name(unsigned int id);
|
||||
|
||||
int perf_event__parse_sample(const union perf_event *event, u64 type,
|
||||
bool sample_id_all, struct perf_sample *sample);
|
||||
|
||||
#endif /* __PERF_RECORD_H */
|
||||
|
||||
394
tools/perf/util/evlist.c
Normal file
394
tools/perf/util/evlist.c
Normal file
@@ -0,0 +1,394 @@
|
||||
/*
|
||||
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
*
|
||||
* Parts came from builtin-{top,stat,record}.c, see those files for further
|
||||
* copyright notes.
|
||||
*
|
||||
* Released under the GPL v2. (and only v2, not any later version)
|
||||
*/
|
||||
#include <poll.h>
|
||||
#include "cpumap.h"
|
||||
#include "thread_map.h"
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
#include "util.h"
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/hash.h>
|
||||
|
||||
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
|
||||
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
|
||||
|
||||
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
|
||||
struct thread_map *threads)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
|
||||
INIT_HLIST_HEAD(&evlist->heads[i]);
|
||||
INIT_LIST_HEAD(&evlist->entries);
|
||||
perf_evlist__set_maps(evlist, cpus, threads);
|
||||
}
|
||||
|
||||
struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
|
||||
struct thread_map *threads)
|
||||
{
|
||||
struct perf_evlist *evlist = zalloc(sizeof(*evlist));
|
||||
|
||||
if (evlist != NULL)
|
||||
perf_evlist__init(evlist, cpus, threads);
|
||||
|
||||
return evlist;
|
||||
}
|
||||
|
||||
static void perf_evlist__purge(struct perf_evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *pos, *n;
|
||||
|
||||
list_for_each_entry_safe(pos, n, &evlist->entries, node) {
|
||||
list_del_init(&pos->node);
|
||||
perf_evsel__delete(pos);
|
||||
}
|
||||
|
||||
evlist->nr_entries = 0;
|
||||
}
|
||||
|
||||
void perf_evlist__exit(struct perf_evlist *evlist)
|
||||
{
|
||||
free(evlist->mmap);
|
||||
free(evlist->pollfd);
|
||||
evlist->mmap = NULL;
|
||||
evlist->pollfd = NULL;
|
||||
}
|
||||
|
||||
void perf_evlist__delete(struct perf_evlist *evlist)
|
||||
{
|
||||
perf_evlist__purge(evlist);
|
||||
perf_evlist__exit(evlist);
|
||||
free(evlist);
|
||||
}
|
||||
|
||||
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
|
||||
{
|
||||
list_add_tail(&entry->node, &evlist->entries);
|
||||
++evlist->nr_entries;
|
||||
}
|
||||
|
||||
int perf_evlist__add_default(struct perf_evlist *evlist)
|
||||
{
|
||||
struct perf_event_attr attr = {
|
||||
.type = PERF_TYPE_HARDWARE,
|
||||
.config = PERF_COUNT_HW_CPU_CYCLES,
|
||||
};
|
||||
struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
|
||||
|
||||
if (evsel == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
perf_evlist__add(evlist, evsel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
|
||||
{
|
||||
int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
|
||||
evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
|
||||
return evlist->pollfd != NULL ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
|
||||
{
|
||||
fcntl(fd, F_SETFL, O_NONBLOCK);
|
||||
evlist->pollfd[evlist->nr_fds].fd = fd;
|
||||
evlist->pollfd[evlist->nr_fds].events = POLLIN;
|
||||
evlist->nr_fds++;
|
||||
}
|
||||
|
||||
static void perf_evlist__id_hash(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel,
|
||||
int cpu, int thread, u64 id)
|
||||
{
|
||||
int hash;
|
||||
struct perf_sample_id *sid = SID(evsel, cpu, thread);
|
||||
|
||||
sid->id = id;
|
||||
sid->evsel = evsel;
|
||||
hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
|
||||
hlist_add_head(&sid->node, &evlist->heads[hash]);
|
||||
}
|
||||
|
||||
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
|
||||
int cpu, int thread, u64 id)
|
||||
{
|
||||
perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
|
||||
evsel->id[evsel->ids++] = id;
|
||||
}
|
||||
|
||||
static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
|
||||
struct perf_evsel *evsel,
|
||||
int cpu, int thread, int fd)
|
||||
{
|
||||
u64 read_data[4] = { 0, };
|
||||
int id_idx = 1; /* The first entry is the counter value */
|
||||
|
||||
if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
|
||||
read(fd, &read_data, sizeof(read_data)) == -1)
|
||||
return -1;
|
||||
|
||||
if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
|
||||
++id_idx;
|
||||
if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
|
||||
++id_idx;
|
||||
|
||||
perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *pos;
|
||||
struct perf_sample_id *sid;
|
||||
int hash;
|
||||
|
||||
if (evlist->nr_entries == 1)
|
||||
return list_entry(evlist->entries.next, struct perf_evsel, node);
|
||||
|
||||
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
|
||||
head = &evlist->heads[hash];
|
||||
|
||||
hlist_for_each_entry(sid, pos, head, node)
|
||||
if (sid->id == id)
|
||||
return sid->evsel;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
|
||||
{
|
||||
/* XXX Move this to perf.c, making it generally available */
|
||||
unsigned int page_size = sysconf(_SC_PAGE_SIZE);
|
||||
struct perf_mmap *md = &evlist->mmap[cpu];
|
||||
unsigned int head = perf_mmap__read_head(md);
|
||||
unsigned int old = md->prev;
|
||||
unsigned char *data = md->base + page_size;
|
||||
union perf_event *event = NULL;
|
||||
|
||||
if (evlist->overwrite) {
|
||||
/*
|
||||
* If we're further behind than half the buffer, there's a chance
|
||||
* the writer will bite our tail and mess up the samples under us.
|
||||
*
|
||||
* If we somehow ended up ahead of the head, we got messed up.
|
||||
*
|
||||
* In either case, truncate and restart at head.
|
||||
*/
|
||||
int diff = head - old;
|
||||
if (diff > md->mask / 2 || diff < 0) {
|
||||
fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
|
||||
|
||||
/*
|
||||
* head points to a known good entry, start there.
|
||||
*/
|
||||
old = head;
|
||||
}
|
||||
}
|
||||
|
||||
if (old != head) {
|
||||
size_t size;
|
||||
|
||||
event = (union perf_event *)&data[old & md->mask];
|
||||
size = event->header.size;
|
||||
|
||||
/*
|
||||
* Event straddles the mmap boundary -- header should always
|
||||
* be inside due to u64 alignment of output.
|
||||
*/
|
||||
if ((old & md->mask) + size != ((old + size) & md->mask)) {
|
||||
unsigned int offset = old;
|
||||
unsigned int len = min(sizeof(*event), size), cpy;
|
||||
void *dst = &evlist->event_copy;
|
||||
|
||||
do {
|
||||
cpy = min(md->mask + 1 - (offset & md->mask), len);
|
||||
memcpy(dst, &data[offset & md->mask], cpy);
|
||||
offset += cpy;
|
||||
dst += cpy;
|
||||
len -= cpy;
|
||||
} while (len);
|
||||
|
||||
event = &evlist->event_copy;
|
||||
}
|
||||
|
||||
old += size;
|
||||
}
|
||||
|
||||
md->prev = old;
|
||||
|
||||
if (!evlist->overwrite)
|
||||
perf_mmap__write_tail(md, old);
|
||||
|
||||
return event;
|
||||
}
|
||||
|
||||
void perf_evlist__munmap(struct perf_evlist *evlist)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
|
||||
if (evlist->mmap[cpu].base != NULL) {
|
||||
munmap(evlist->mmap[cpu].base, evlist->mmap_len);
|
||||
evlist->mmap[cpu].base = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
|
||||
{
|
||||
evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap));
|
||||
return evlist->mmap != NULL ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
|
||||
int mask, int fd)
|
||||
{
|
||||
evlist->mmap[cpu].prev = 0;
|
||||
evlist->mmap[cpu].mask = mask;
|
||||
evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
|
||||
MAP_SHARED, fd, 0);
|
||||
if (evlist->mmap[cpu].base == MAP_FAILED)
|
||||
return -1;
|
||||
|
||||
perf_evlist__add_pollfd(evlist, fd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** perf_evlist__mmap - Create per cpu maps to receive events
|
||||
*
|
||||
* @evlist - list of events
|
||||
* @pages - map length in pages
|
||||
* @overwrite - overwrite older events?
|
||||
*
|
||||
* If overwrite is false the user needs to signal event consuption using:
|
||||
*
|
||||
* struct perf_mmap *m = &evlist->mmap[cpu];
|
||||
* unsigned int head = perf_mmap__read_head(m);
|
||||
*
|
||||
* perf_mmap__write_tail(m, head)
|
||||
*
|
||||
* Using perf_evlist__read_on_cpu does this automatically.
|
||||
*/
|
||||
int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
|
||||
{
|
||||
unsigned int page_size = sysconf(_SC_PAGE_SIZE);
|
||||
int mask = pages * page_size - 1, cpu;
|
||||
struct perf_evsel *first_evsel, *evsel;
|
||||
const struct cpu_map *cpus = evlist->cpus;
|
||||
const struct thread_map *threads = evlist->threads;
|
||||
int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
|
||||
|
||||
if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
evlist->overwrite = overwrite;
|
||||
evlist->mmap_len = (pages + 1) * page_size;
|
||||
first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
|
||||
|
||||
list_for_each_entry(evsel, &evlist->entries, node) {
|
||||
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
|
||||
evsel->sample_id == NULL &&
|
||||
perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
for (cpu = 0; cpu < cpus->nr; cpu++) {
|
||||
for (thread = 0; thread < threads->nr; thread++) {
|
||||
int fd = FD(evsel, cpu, thread);
|
||||
|
||||
if (evsel->idx || thread) {
|
||||
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
|
||||
FD(first_evsel, cpu, 0)) != 0)
|
||||
goto out_unmap;
|
||||
} else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
|
||||
goto out_unmap;
|
||||
|
||||
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
|
||||
perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
|
||||
goto out_unmap;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
for (cpu = 0; cpu < cpus->nr; cpu++) {
|
||||
if (evlist->mmap[cpu].base != NULL) {
|
||||
munmap(evlist->mmap[cpu].base, evlist->mmap_len);
|
||||
evlist->mmap[cpu].base = NULL;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
|
||||
pid_t target_tid, const char *cpu_list)
|
||||
{
|
||||
evlist->threads = thread_map__new(target_pid, target_tid);
|
||||
|
||||
if (evlist->threads == NULL)
|
||||
return -1;
|
||||
|
||||
if (target_tid != -1)
|
||||
evlist->cpus = cpu_map__dummy_new();
|
||||
else
|
||||
evlist->cpus = cpu_map__new(cpu_list);
|
||||
|
||||
if (evlist->cpus == NULL)
|
||||
goto out_delete_threads;
|
||||
|
||||
return 0;
|
||||
|
||||
out_delete_threads:
|
||||
thread_map__delete(evlist->threads);
|
||||
return -1;
|
||||
}
|
||||
|
||||
void perf_evlist__delete_maps(struct perf_evlist *evlist)
|
||||
{
|
||||
cpu_map__delete(evlist->cpus);
|
||||
thread_map__delete(evlist->threads);
|
||||
evlist->cpus = NULL;
|
||||
evlist->threads = NULL;
|
||||
}
|
||||
|
||||
int perf_evlist__set_filters(struct perf_evlist *evlist)
|
||||
{
|
||||
const struct thread_map *threads = evlist->threads;
|
||||
const struct cpu_map *cpus = evlist->cpus;
|
||||
struct perf_evsel *evsel;
|
||||
char *filter;
|
||||
int thread;
|
||||
int cpu;
|
||||
int err;
|
||||
int fd;
|
||||
|
||||
list_for_each_entry(evsel, &evlist->entries, node) {
|
||||
filter = evsel->filter;
|
||||
if (!filter)
|
||||
continue;
|
||||
for (cpu = 0; cpu < cpus->nr; cpu++) {
|
||||
for (thread = 0; thread < threads->nr; thread++) {
|
||||
fd = FD(evsel, cpu, thread);
|
||||
err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
68
tools/perf/util/evlist.h
Normal file
68
tools/perf/util/evlist.h
Normal file
@@ -0,0 +1,68 @@
|
||||
#ifndef __PERF_EVLIST_H
|
||||
#define __PERF_EVLIST_H 1
|
||||
|
||||
#include <linux/list.h>
|
||||
#include "../perf.h"
|
||||
#include "event.h"
|
||||
|
||||
struct pollfd;
|
||||
struct thread_map;
|
||||
struct cpu_map;
|
||||
|
||||
#define PERF_EVLIST__HLIST_BITS 8
|
||||
#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
|
||||
|
||||
struct perf_evlist {
|
||||
struct list_head entries;
|
||||
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
|
||||
int nr_entries;
|
||||
int nr_fds;
|
||||
int mmap_len;
|
||||
bool overwrite;
|
||||
union perf_event event_copy;
|
||||
struct perf_mmap *mmap;
|
||||
struct pollfd *pollfd;
|
||||
struct thread_map *threads;
|
||||
struct cpu_map *cpus;
|
||||
};
|
||||
|
||||
struct perf_evsel;
|
||||
|
||||
struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
|
||||
struct thread_map *threads);
|
||||
void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
|
||||
struct thread_map *threads);
|
||||
void perf_evlist__exit(struct perf_evlist *evlist);
|
||||
void perf_evlist__delete(struct perf_evlist *evlist);
|
||||
|
||||
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
|
||||
int perf_evlist__add_default(struct perf_evlist *evlist);
|
||||
|
||||
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
|
||||
int cpu, int thread, u64 id);
|
||||
|
||||
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
|
||||
void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
|
||||
|
||||
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
|
||||
|
||||
union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu);
|
||||
|
||||
int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
|
||||
int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
|
||||
void perf_evlist__munmap(struct perf_evlist *evlist);
|
||||
|
||||
static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
|
||||
struct cpu_map *cpus,
|
||||
struct thread_map *threads)
|
||||
{
|
||||
evlist->cpus = cpus;
|
||||
evlist->threads = threads;
|
||||
}
|
||||
|
||||
int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
|
||||
pid_t target_tid, const char *cpu_list);
|
||||
void perf_evlist__delete_maps(struct perf_evlist *evlist);
|
||||
int perf_evlist__set_filters(struct perf_evlist *evlist);
|
||||
|
||||
#endif /* __PERF_EVLIST_H */
|
||||
@@ -1,20 +1,34 @@
|
||||
/*
|
||||
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
*
|
||||
* Parts came from builtin-{top,stat,record}.c, see those files for further
|
||||
* copyright notes.
|
||||
*
|
||||
* Released under the GPL v2. (and only v2, not any later version)
|
||||
*/
|
||||
|
||||
#include "evsel.h"
|
||||
#include "../perf.h"
|
||||
#include "evlist.h"
|
||||
#include "util.h"
|
||||
#include "cpumap.h"
|
||||
#include "thread.h"
|
||||
#include "thread_map.h"
|
||||
|
||||
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
|
||||
|
||||
void perf_evsel__init(struct perf_evsel *evsel,
|
||||
struct perf_event_attr *attr, int idx)
|
||||
{
|
||||
evsel->idx = idx;
|
||||
evsel->attr = *attr;
|
||||
INIT_LIST_HEAD(&evsel->node);
|
||||
}
|
||||
|
||||
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
|
||||
{
|
||||
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
|
||||
|
||||
if (evsel != NULL) {
|
||||
evsel->idx = idx;
|
||||
evsel->attr = *attr;
|
||||
INIT_LIST_HEAD(&evsel->node);
|
||||
}
|
||||
if (evsel != NULL)
|
||||
perf_evsel__init(evsel, attr, idx);
|
||||
|
||||
return evsel;
|
||||
}
|
||||
@@ -25,6 +39,22 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
|
||||
return evsel->fd != NULL ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
|
||||
{
|
||||
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
|
||||
if (evsel->sample_id == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
|
||||
if (evsel->id == NULL) {
|
||||
xyarray__delete(evsel->sample_id);
|
||||
evsel->sample_id = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
|
||||
{
|
||||
evsel->counts = zalloc((sizeof(*evsel->counts) +
|
||||
@@ -38,6 +68,14 @@ void perf_evsel__free_fd(struct perf_evsel *evsel)
|
||||
evsel->fd = NULL;
|
||||
}
|
||||
|
||||
void perf_evsel__free_id(struct perf_evsel *evsel)
|
||||
{
|
||||
xyarray__delete(evsel->sample_id);
|
||||
evsel->sample_id = NULL;
|
||||
free(evsel->id);
|
||||
evsel->id = NULL;
|
||||
}
|
||||
|
||||
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
|
||||
{
|
||||
int cpu, thread;
|
||||
@@ -49,10 +87,19 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
|
||||
}
|
||||
}
|
||||
|
||||
void perf_evsel__delete(struct perf_evsel *evsel)
|
||||
void perf_evsel__exit(struct perf_evsel *evsel)
|
||||
{
|
||||
assert(list_empty(&evsel->node));
|
||||
xyarray__delete(evsel->fd);
|
||||
xyarray__delete(evsel->sample_id);
|
||||
free(evsel->id);
|
||||
}
|
||||
|
||||
void perf_evsel__delete(struct perf_evsel *evsel)
|
||||
{
|
||||
perf_evsel__exit(evsel);
|
||||
close_cgroup(evsel->cgrp);
|
||||
free(evsel->name);
|
||||
free(evsel);
|
||||
}
|
||||
|
||||
@@ -128,21 +175,51 @@ int __perf_evsel__read(struct perf_evsel *evsel,
|
||||
}
|
||||
|
||||
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
|
||||
struct thread_map *threads)
|
||||
struct thread_map *threads, bool group, bool inherit)
|
||||
{
|
||||
int cpu, thread;
|
||||
unsigned long flags = 0;
|
||||
int pid = -1;
|
||||
|
||||
if (evsel->fd == NULL &&
|
||||
perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
|
||||
return -1;
|
||||
|
||||
if (evsel->cgrp) {
|
||||
flags = PERF_FLAG_PID_CGROUP;
|
||||
pid = evsel->cgrp->fd;
|
||||
}
|
||||
|
||||
for (cpu = 0; cpu < cpus->nr; cpu++) {
|
||||
int group_fd = -1;
|
||||
/*
|
||||
* Don't allow mmap() of inherited per-task counters. This
|
||||
* would create a performance issue due to all children writing
|
||||
* to the same buffer.
|
||||
*
|
||||
* FIXME:
|
||||
* Proper fix is not to pass 'inherit' to perf_evsel__open*,
|
||||
* but a 'flags' parameter, with 'group' folded there as well,
|
||||
* then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if
|
||||
* O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is
|
||||
* set. Lets go for the minimal fix first tho.
|
||||
*/
|
||||
evsel->attr.inherit = (cpus->map[cpu] >= 0) && inherit;
|
||||
|
||||
for (thread = 0; thread < threads->nr; thread++) {
|
||||
|
||||
if (!evsel->cgrp)
|
||||
pid = threads->map[thread];
|
||||
|
||||
FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
|
||||
threads->map[thread],
|
||||
cpus->map[cpu], -1, 0);
|
||||
pid,
|
||||
cpus->map[cpu],
|
||||
group_fd, flags);
|
||||
if (FD(evsel, cpu, thread) < 0)
|
||||
goto out_close;
|
||||
|
||||
if (group && group_fd == -1)
|
||||
group_fd = FD(evsel, cpu, thread);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -175,10 +252,9 @@ static struct {
|
||||
.threads = { -1, },
|
||||
};
|
||||
|
||||
int perf_evsel__open(struct perf_evsel *evsel,
|
||||
struct cpu_map *cpus, struct thread_map *threads)
|
||||
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
|
||||
struct thread_map *threads, bool group, bool inherit)
|
||||
{
|
||||
|
||||
if (cpus == NULL) {
|
||||
/* Work around old compiler warnings about strict aliasing */
|
||||
cpus = &empty_cpu_map.map;
|
||||
@@ -187,15 +263,135 @@ int perf_evsel__open(struct perf_evsel *evsel,
|
||||
if (threads == NULL)
|
||||
threads = &empty_thread_map.map;
|
||||
|
||||
return __perf_evsel__open(evsel, cpus, threads);
|
||||
return __perf_evsel__open(evsel, cpus, threads, group, inherit);
|
||||
}
|
||||
|
||||
int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
|
||||
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
|
||||
struct cpu_map *cpus, bool group, bool inherit)
|
||||
{
|
||||
return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
|
||||
return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
|
||||
}
|
||||
|
||||
int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
|
||||
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
|
||||
struct thread_map *threads, bool group, bool inherit)
|
||||
{
|
||||
return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
|
||||
return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
|
||||
}
|
||||
|
||||
static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
const u64 *array = event->sample.array;
|
||||
|
||||
array += ((event->header.size -
|
||||
sizeof(event->header)) / sizeof(u64)) - 1;
|
||||
|
||||
if (type & PERF_SAMPLE_CPU) {
|
||||
u32 *p = (u32 *)array;
|
||||
sample->cpu = *p;
|
||||
array--;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_STREAM_ID) {
|
||||
sample->stream_id = *array;
|
||||
array--;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_ID) {
|
||||
sample->id = *array;
|
||||
array--;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_TIME) {
|
||||
sample->time = *array;
|
||||
array--;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_TID) {
|
||||
u32 *p = (u32 *)array;
|
||||
sample->pid = p[0];
|
||||
sample->tid = p[1];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_event__parse_sample(const union perf_event *event, u64 type,
|
||||
bool sample_id_all, struct perf_sample *data)
|
||||
{
|
||||
const u64 *array;
|
||||
|
||||
data->cpu = data->pid = data->tid = -1;
|
||||
data->stream_id = data->id = data->time = -1ULL;
|
||||
|
||||
if (event->header.type != PERF_RECORD_SAMPLE) {
|
||||
if (!sample_id_all)
|
||||
return 0;
|
||||
return perf_event__parse_id_sample(event, type, data);
|
||||
}
|
||||
|
||||
array = event->sample.array;
|
||||
|
||||
if (type & PERF_SAMPLE_IP) {
|
||||
data->ip = event->ip.ip;
|
||||
array++;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_TID) {
|
||||
u32 *p = (u32 *)array;
|
||||
data->pid = p[0];
|
||||
data->tid = p[1];
|
||||
array++;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_TIME) {
|
||||
data->time = *array;
|
||||
array++;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_ADDR) {
|
||||
data->addr = *array;
|
||||
array++;
|
||||
}
|
||||
|
||||
data->id = -1ULL;
|
||||
if (type & PERF_SAMPLE_ID) {
|
||||
data->id = *array;
|
||||
array++;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_STREAM_ID) {
|
||||
data->stream_id = *array;
|
||||
array++;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_CPU) {
|
||||
u32 *p = (u32 *)array;
|
||||
data->cpu = *p;
|
||||
array++;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_PERIOD) {
|
||||
data->period = *array;
|
||||
array++;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_READ) {
|
||||
fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_CALLCHAIN) {
|
||||
data->callchain = (struct ip_callchain *)array;
|
||||
array += 1 + data->callchain->nr;
|
||||
}
|
||||
|
||||
if (type & PERF_SAMPLE_RAW) {
|
||||
u32 *p = (u32 *)array;
|
||||
data->raw_size = *p;
|
||||
p++;
|
||||
data->raw_data = p;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
#include "../../../include/linux/perf_event.h"
|
||||
#include "types.h"
|
||||
#include "xyarray.h"
|
||||
#include "cgroup.h"
|
||||
#include "hist.h"
|
||||
|
||||
struct perf_counts_values {
|
||||
union {
|
||||
@@ -24,31 +26,66 @@ struct perf_counts {
|
||||
struct perf_counts_values cpu[];
|
||||
};
|
||||
|
||||
struct perf_evsel;
|
||||
|
||||
/*
|
||||
* Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
|
||||
* more than one entry in the evlist.
|
||||
*/
|
||||
struct perf_sample_id {
|
||||
struct hlist_node node;
|
||||
u64 id;
|
||||
struct perf_evsel *evsel;
|
||||
};
|
||||
|
||||
/** struct perf_evsel - event selector
|
||||
*
|
||||
* @name - Can be set to retain the original event name passed by the user,
|
||||
* so that when showing results in tools such as 'perf stat', we
|
||||
* show the name used, not some alias.
|
||||
*/
|
||||
struct perf_evsel {
|
||||
struct list_head node;
|
||||
struct perf_event_attr attr;
|
||||
char *filter;
|
||||
struct xyarray *fd;
|
||||
struct xyarray *sample_id;
|
||||
u64 *id;
|
||||
struct perf_counts *counts;
|
||||
int idx;
|
||||
void *priv;
|
||||
int ids;
|
||||
struct hists hists;
|
||||
char *name;
|
||||
union {
|
||||
void *priv;
|
||||
off_t id_offset;
|
||||
};
|
||||
struct cgroup_sel *cgrp;
|
||||
};
|
||||
|
||||
struct cpu_map;
|
||||
struct thread_map;
|
||||
struct perf_evlist;
|
||||
|
||||
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
|
||||
void perf_evsel__init(struct perf_evsel *evsel,
|
||||
struct perf_event_attr *attr, int idx);
|
||||
void perf_evsel__exit(struct perf_evsel *evsel);
|
||||
void perf_evsel__delete(struct perf_evsel *evsel);
|
||||
|
||||
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
|
||||
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
|
||||
int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
|
||||
void perf_evsel__free_fd(struct perf_evsel *evsel);
|
||||
void perf_evsel__free_id(struct perf_evsel *evsel);
|
||||
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
|
||||
|
||||
int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus);
|
||||
int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads);
|
||||
int perf_evsel__open(struct perf_evsel *evsel,
|
||||
struct cpu_map *cpus, struct thread_map *threads);
|
||||
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
|
||||
struct cpu_map *cpus, bool group, bool inherit);
|
||||
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
|
||||
struct thread_map *threads, bool group, bool inherit);
|
||||
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
|
||||
struct thread_map *threads, bool group, bool inherit);
|
||||
|
||||
#define perf_evsel__match(evsel, t, c) \
|
||||
(evsel->attr.type == PERF_TYPE_##t && \
|
||||
|
||||
@@ -11,31 +11,12 @@ static const char *argv0_path;
|
||||
|
||||
const char *system_path(const char *path)
|
||||
{
|
||||
#ifdef RUNTIME_PREFIX
|
||||
static const char *prefix;
|
||||
#else
|
||||
static const char *prefix = PREFIX;
|
||||
#endif
|
||||
struct strbuf d = STRBUF_INIT;
|
||||
|
||||
if (is_absolute_path(path))
|
||||
return path;
|
||||
|
||||
#ifdef RUNTIME_PREFIX
|
||||
assert(argv0_path);
|
||||
assert(is_absolute_path(argv0_path));
|
||||
|
||||
if (!prefix &&
|
||||
!(prefix = strip_path_suffix(argv0_path, PERF_EXEC_PATH)) &&
|
||||
!(prefix = strip_path_suffix(argv0_path, BINDIR)) &&
|
||||
!(prefix = strip_path_suffix(argv0_path, "perf"))) {
|
||||
prefix = PREFIX;
|
||||
fprintf(stderr, "RUNTIME_PREFIX requested, "
|
||||
"but prefix computation failed. "
|
||||
"Using static fallback '%s'.\n", prefix);
|
||||
}
|
||||
#endif
|
||||
|
||||
strbuf_addf(&d, "%s/%s", prefix, path);
|
||||
path = strbuf_detach(&d, NULL);
|
||||
return path;
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
#include "util.h"
|
||||
#include "header.h"
|
||||
#include "../perf.h"
|
||||
@@ -18,89 +20,6 @@
|
||||
|
||||
static bool no_buildid_cache = false;
|
||||
|
||||
/*
|
||||
* Create new perf.data header attribute:
|
||||
*/
|
||||
struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr)
|
||||
{
|
||||
struct perf_header_attr *self = malloc(sizeof(*self));
|
||||
|
||||
if (self != NULL) {
|
||||
self->attr = *attr;
|
||||
self->ids = 0;
|
||||
self->size = 1;
|
||||
self->id = malloc(sizeof(u64));
|
||||
if (self->id == NULL) {
|
||||
free(self);
|
||||
self = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
void perf_header_attr__delete(struct perf_header_attr *self)
|
||||
{
|
||||
free(self->id);
|
||||
free(self);
|
||||
}
|
||||
|
||||
int perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
|
||||
{
|
||||
int pos = self->ids;
|
||||
|
||||
self->ids++;
|
||||
if (self->ids > self->size) {
|
||||
int nsize = self->size * 2;
|
||||
u64 *nid = realloc(self->id, nsize * sizeof(u64));
|
||||
|
||||
if (nid == NULL)
|
||||
return -1;
|
||||
|
||||
self->size = nsize;
|
||||
self->id = nid;
|
||||
}
|
||||
self->id[pos] = id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_header__init(struct perf_header *self)
|
||||
{
|
||||
self->size = 1;
|
||||
self->attr = malloc(sizeof(void *));
|
||||
return self->attr == NULL ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
void perf_header__exit(struct perf_header *self)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < self->attrs; ++i)
|
||||
perf_header_attr__delete(self->attr[i]);
|
||||
free(self->attr);
|
||||
}
|
||||
|
||||
int perf_header__add_attr(struct perf_header *self,
|
||||
struct perf_header_attr *attr)
|
||||
{
|
||||
if (self->frozen)
|
||||
return -1;
|
||||
|
||||
if (self->attrs == self->size) {
|
||||
int nsize = self->size * 2;
|
||||
struct perf_header_attr **nattr;
|
||||
|
||||
nattr = realloc(self->attr, nsize * sizeof(void *));
|
||||
if (nattr == NULL)
|
||||
return -1;
|
||||
|
||||
self->size = nsize;
|
||||
self->attr = nattr;
|
||||
}
|
||||
|
||||
self->attr[self->attrs++] = attr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int event_count;
|
||||
static struct perf_trace_event_type *events;
|
||||
|
||||
@@ -147,19 +66,19 @@ struct perf_file_attr {
|
||||
struct perf_file_section ids;
|
||||
};
|
||||
|
||||
void perf_header__set_feat(struct perf_header *self, int feat)
|
||||
void perf_header__set_feat(struct perf_header *header, int feat)
|
||||
{
|
||||
set_bit(feat, self->adds_features);
|
||||
set_bit(feat, header->adds_features);
|
||||
}
|
||||
|
||||
void perf_header__clear_feat(struct perf_header *self, int feat)
|
||||
void perf_header__clear_feat(struct perf_header *header, int feat)
|
||||
{
|
||||
clear_bit(feat, self->adds_features);
|
||||
clear_bit(feat, header->adds_features);
|
||||
}
|
||||
|
||||
bool perf_header__has_feat(const struct perf_header *self, int feat)
|
||||
bool perf_header__has_feat(const struct perf_header *header, int feat)
|
||||
{
|
||||
return test_bit(feat, self->adds_features);
|
||||
return test_bit(feat, header->adds_features);
|
||||
}
|
||||
|
||||
static int do_write(int fd, const void *buf, size_t size)
|
||||
@@ -228,22 +147,22 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int machine__write_buildid_table(struct machine *self, int fd)
|
||||
static int machine__write_buildid_table(struct machine *machine, int fd)
|
||||
{
|
||||
int err;
|
||||
u16 kmisc = PERF_RECORD_MISC_KERNEL,
|
||||
umisc = PERF_RECORD_MISC_USER;
|
||||
|
||||
if (!machine__is_host(self)) {
|
||||
if (!machine__is_host(machine)) {
|
||||
kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
umisc = PERF_RECORD_MISC_GUEST_USER;
|
||||
}
|
||||
|
||||
err = __dsos__write_buildid_table(&self->kernel_dsos, self->pid,
|
||||
err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
|
||||
kmisc, fd);
|
||||
if (err == 0)
|
||||
err = __dsos__write_buildid_table(&self->user_dsos,
|
||||
self->pid, umisc, fd);
|
||||
err = __dsos__write_buildid_table(&machine->user_dsos,
|
||||
machine->pid, umisc, fd);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -366,12 +285,12 @@ out_free:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int dso__cache_build_id(struct dso *self, const char *debugdir)
|
||||
static int dso__cache_build_id(struct dso *dso, const char *debugdir)
|
||||
{
|
||||
bool is_kallsyms = self->kernel && self->long_name[0] != '/';
|
||||
bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
|
||||
|
||||
return build_id_cache__add_b(self->build_id, sizeof(self->build_id),
|
||||
self->long_name, debugdir, is_kallsyms);
|
||||
return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
|
||||
dso->long_name, debugdir, is_kallsyms);
|
||||
}
|
||||
|
||||
static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
|
||||
@@ -386,14 +305,14 @@ static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int machine__cache_build_ids(struct machine *self, const char *debugdir)
|
||||
static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
|
||||
{
|
||||
int ret = __dsos__cache_build_ids(&self->kernel_dsos, debugdir);
|
||||
ret |= __dsos__cache_build_ids(&self->user_dsos, debugdir);
|
||||
int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
|
||||
ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int perf_session__cache_build_ids(struct perf_session *self)
|
||||
static int perf_session__cache_build_ids(struct perf_session *session)
|
||||
{
|
||||
struct rb_node *nd;
|
||||
int ret;
|
||||
@@ -404,28 +323,28 @@ static int perf_session__cache_build_ids(struct perf_session *self)
|
||||
if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
|
||||
return -1;
|
||||
|
||||
ret = machine__cache_build_ids(&self->host_machine, debugdir);
|
||||
ret = machine__cache_build_ids(&session->host_machine, debugdir);
|
||||
|
||||
for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
|
||||
for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
|
||||
struct machine *pos = rb_entry(nd, struct machine, rb_node);
|
||||
ret |= machine__cache_build_ids(pos, debugdir);
|
||||
}
|
||||
return ret ? -1 : 0;
|
||||
}
|
||||
|
||||
static bool machine__read_build_ids(struct machine *self, bool with_hits)
|
||||
static bool machine__read_build_ids(struct machine *machine, bool with_hits)
|
||||
{
|
||||
bool ret = __dsos__read_build_ids(&self->kernel_dsos, with_hits);
|
||||
ret |= __dsos__read_build_ids(&self->user_dsos, with_hits);
|
||||
bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
|
||||
ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool perf_session__read_build_ids(struct perf_session *self, bool with_hits)
|
||||
static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
|
||||
{
|
||||
struct rb_node *nd;
|
||||
bool ret = machine__read_build_ids(&self->host_machine, with_hits);
|
||||
bool ret = machine__read_build_ids(&session->host_machine, with_hits);
|
||||
|
||||
for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
|
||||
for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
|
||||
struct machine *pos = rb_entry(nd, struct machine, rb_node);
|
||||
ret |= machine__read_build_ids(pos, with_hits);
|
||||
}
|
||||
@@ -433,7 +352,8 @@ static bool perf_session__read_build_ids(struct perf_session *self, bool with_hi
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int perf_header__adds_write(struct perf_header *self, int fd)
|
||||
static int perf_header__adds_write(struct perf_header *header,
|
||||
struct perf_evlist *evlist, int fd)
|
||||
{
|
||||
int nr_sections;
|
||||
struct perf_session *session;
|
||||
@@ -442,13 +362,13 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
|
||||
u64 sec_start;
|
||||
int idx = 0, err;
|
||||
|
||||
session = container_of(self, struct perf_session, header);
|
||||
session = container_of(header, struct perf_session, header);
|
||||
|
||||
if (perf_header__has_feat(self, HEADER_BUILD_ID &&
|
||||
if (perf_header__has_feat(header, HEADER_BUILD_ID &&
|
||||
!perf_session__read_build_ids(session, true)))
|
||||
perf_header__clear_feat(self, HEADER_BUILD_ID);
|
||||
perf_header__clear_feat(header, HEADER_BUILD_ID);
|
||||
|
||||
nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
|
||||
nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
|
||||
if (!nr_sections)
|
||||
return 0;
|
||||
|
||||
@@ -458,28 +378,28 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
|
||||
|
||||
sec_size = sizeof(*feat_sec) * nr_sections;
|
||||
|
||||
sec_start = self->data_offset + self->data_size;
|
||||
sec_start = header->data_offset + header->data_size;
|
||||
lseek(fd, sec_start + sec_size, SEEK_SET);
|
||||
|
||||
if (perf_header__has_feat(self, HEADER_TRACE_INFO)) {
|
||||
if (perf_header__has_feat(header, HEADER_TRACE_INFO)) {
|
||||
struct perf_file_section *trace_sec;
|
||||
|
||||
trace_sec = &feat_sec[idx++];
|
||||
|
||||
/* Write trace info */
|
||||
trace_sec->offset = lseek(fd, 0, SEEK_CUR);
|
||||
read_tracing_data(fd, &evsel_list);
|
||||
read_tracing_data(fd, &evlist->entries);
|
||||
trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
|
||||
}
|
||||
|
||||
if (perf_header__has_feat(self, HEADER_BUILD_ID)) {
|
||||
if (perf_header__has_feat(header, HEADER_BUILD_ID)) {
|
||||
struct perf_file_section *buildid_sec;
|
||||
|
||||
buildid_sec = &feat_sec[idx++];
|
||||
|
||||
/* Write build-ids */
|
||||
buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
|
||||
err = dsos__write_buildid_table(self, fd);
|
||||
err = dsos__write_buildid_table(header, fd);
|
||||
if (err < 0) {
|
||||
pr_debug("failed to write buildid table\n");
|
||||
goto out_free;
|
||||
@@ -518,32 +438,41 @@ int perf_header__write_pipe(int fd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_header__write(struct perf_header *self, int fd, bool at_exit)
|
||||
int perf_session__write_header(struct perf_session *session,
|
||||
struct perf_evlist *evlist,
|
||||
int fd, bool at_exit)
|
||||
{
|
||||
struct perf_file_header f_header;
|
||||
struct perf_file_attr f_attr;
|
||||
struct perf_header_attr *attr;
|
||||
int i, err;
|
||||
struct perf_header *header = &session->header;
|
||||
struct perf_evsel *attr, *pair = NULL;
|
||||
int err;
|
||||
|
||||
lseek(fd, sizeof(f_header), SEEK_SET);
|
||||
|
||||
for (i = 0; i < self->attrs; i++) {
|
||||
attr = self->attr[i];
|
||||
if (session->evlist != evlist)
|
||||
pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
|
||||
|
||||
list_for_each_entry(attr, &evlist->entries, node) {
|
||||
attr->id_offset = lseek(fd, 0, SEEK_CUR);
|
||||
err = do_write(fd, attr->id, attr->ids * sizeof(u64));
|
||||
if (err < 0) {
|
||||
out_err_write:
|
||||
pr_debug("failed to write perf header\n");
|
||||
return err;
|
||||
}
|
||||
if (session->evlist != evlist) {
|
||||
err = do_write(fd, pair->id, pair->ids * sizeof(u64));
|
||||
if (err < 0)
|
||||
goto out_err_write;
|
||||
attr->ids += pair->ids;
|
||||
pair = list_entry(pair->node.next, struct perf_evsel, node);
|
||||
}
|
||||
}
|
||||
|
||||
header->attr_offset = lseek(fd, 0, SEEK_CUR);
|
||||
|
||||
self->attr_offset = lseek(fd, 0, SEEK_CUR);
|
||||
|
||||
for (i = 0; i < self->attrs; i++) {
|
||||
attr = self->attr[i];
|
||||
|
||||
list_for_each_entry(attr, &evlist->entries, node) {
|
||||
f_attr = (struct perf_file_attr){
|
||||
.attr = attr->attr,
|
||||
.ids = {
|
||||
@@ -558,20 +487,20 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
|
||||
}
|
||||
}
|
||||
|
||||
self->event_offset = lseek(fd, 0, SEEK_CUR);
|
||||
self->event_size = event_count * sizeof(struct perf_trace_event_type);
|
||||
header->event_offset = lseek(fd, 0, SEEK_CUR);
|
||||
header->event_size = event_count * sizeof(struct perf_trace_event_type);
|
||||
if (events) {
|
||||
err = do_write(fd, events, self->event_size);
|
||||
err = do_write(fd, events, header->event_size);
|
||||
if (err < 0) {
|
||||
pr_debug("failed to write perf header events\n");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
self->data_offset = lseek(fd, 0, SEEK_CUR);
|
||||
header->data_offset = lseek(fd, 0, SEEK_CUR);
|
||||
|
||||
if (at_exit) {
|
||||
err = perf_header__adds_write(self, fd);
|
||||
err = perf_header__adds_write(header, evlist, fd);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
@@ -581,20 +510,20 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
|
||||
.size = sizeof(f_header),
|
||||
.attr_size = sizeof(f_attr),
|
||||
.attrs = {
|
||||
.offset = self->attr_offset,
|
||||
.size = self->attrs * sizeof(f_attr),
|
||||
.offset = header->attr_offset,
|
||||
.size = evlist->nr_entries * sizeof(f_attr),
|
||||
},
|
||||
.data = {
|
||||
.offset = self->data_offset,
|
||||
.size = self->data_size,
|
||||
.offset = header->data_offset,
|
||||
.size = header->data_size,
|
||||
},
|
||||
.event_types = {
|
||||
.offset = self->event_offset,
|
||||
.size = self->event_size,
|
||||
.offset = header->event_offset,
|
||||
.size = header->event_size,
|
||||
},
|
||||
};
|
||||
|
||||
memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features));
|
||||
memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
|
||||
|
||||
lseek(fd, 0, SEEK_SET);
|
||||
err = do_write(fd, &f_header, sizeof(f_header));
|
||||
@@ -602,26 +531,26 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
|
||||
pr_debug("failed to write perf header\n");
|
||||
return err;
|
||||
}
|
||||
lseek(fd, self->data_offset + self->data_size, SEEK_SET);
|
||||
lseek(fd, header->data_offset + header->data_size, SEEK_SET);
|
||||
|
||||
self->frozen = 1;
|
||||
header->frozen = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_header__getbuffer64(struct perf_header *self,
|
||||
static int perf_header__getbuffer64(struct perf_header *header,
|
||||
int fd, void *buf, size_t size)
|
||||
{
|
||||
if (readn(fd, buf, size) <= 0)
|
||||
return -1;
|
||||
|
||||
if (self->needs_swap)
|
||||
if (header->needs_swap)
|
||||
mem_bswap_64(buf, size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_header__process_sections(struct perf_header *self, int fd,
|
||||
int (*process)(struct perf_file_section *self,
|
||||
int perf_header__process_sections(struct perf_header *header, int fd,
|
||||
int (*process)(struct perf_file_section *section,
|
||||
struct perf_header *ph,
|
||||
int feat, int fd))
|
||||
{
|
||||
@@ -631,7 +560,7 @@ int perf_header__process_sections(struct perf_header *self, int fd,
|
||||
int idx = 0;
|
||||
int err = -1, feat = 1;
|
||||
|
||||
nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
|
||||
nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
|
||||
if (!nr_sections)
|
||||
return 0;
|
||||
|
||||
@@ -641,17 +570,17 @@ int perf_header__process_sections(struct perf_header *self, int fd,
|
||||
|
||||
sec_size = sizeof(*feat_sec) * nr_sections;
|
||||
|
||||
lseek(fd, self->data_offset + self->data_size, SEEK_SET);
|
||||
lseek(fd, header->data_offset + header->data_size, SEEK_SET);
|
||||
|
||||
if (perf_header__getbuffer64(self, fd, feat_sec, sec_size))
|
||||
if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
|
||||
goto out_free;
|
||||
|
||||
err = 0;
|
||||
while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
|
||||
if (perf_header__has_feat(self, feat)) {
|
||||
if (perf_header__has_feat(header, feat)) {
|
||||
struct perf_file_section *sec = &feat_sec[idx++];
|
||||
|
||||
err = process(sec, self, feat, fd);
|
||||
err = process(sec, header, feat, fd);
|
||||
if (err < 0)
|
||||
break;
|
||||
}
|
||||
@@ -662,35 +591,35 @@ out_free:
|
||||
return err;
|
||||
}
|
||||
|
||||
int perf_file_header__read(struct perf_file_header *self,
|
||||
int perf_file_header__read(struct perf_file_header *header,
|
||||
struct perf_header *ph, int fd)
|
||||
{
|
||||
lseek(fd, 0, SEEK_SET);
|
||||
|
||||
if (readn(fd, self, sizeof(*self)) <= 0 ||
|
||||
memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
|
||||
if (readn(fd, header, sizeof(*header)) <= 0 ||
|
||||
memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
|
||||
return -1;
|
||||
|
||||
if (self->attr_size != sizeof(struct perf_file_attr)) {
|
||||
u64 attr_size = bswap_64(self->attr_size);
|
||||
if (header->attr_size != sizeof(struct perf_file_attr)) {
|
||||
u64 attr_size = bswap_64(header->attr_size);
|
||||
|
||||
if (attr_size != sizeof(struct perf_file_attr))
|
||||
return -1;
|
||||
|
||||
mem_bswap_64(self, offsetof(struct perf_file_header,
|
||||
mem_bswap_64(header, offsetof(struct perf_file_header,
|
||||
adds_features));
|
||||
ph->needs_swap = true;
|
||||
}
|
||||
|
||||
if (self->size != sizeof(*self)) {
|
||||
if (header->size != sizeof(*header)) {
|
||||
/* Support the previous format */
|
||||
if (self->size == offsetof(typeof(*self), adds_features))
|
||||
bitmap_zero(self->adds_features, HEADER_FEAT_BITS);
|
||||
if (header->size == offsetof(typeof(*header), adds_features))
|
||||
bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(&ph->adds_features, &self->adds_features,
|
||||
memcpy(&ph->adds_features, &header->adds_features,
|
||||
sizeof(ph->adds_features));
|
||||
/*
|
||||
* FIXME: hack that assumes that if we need swap the perf.data file
|
||||
@@ -704,10 +633,10 @@ int perf_file_header__read(struct perf_file_header *self,
|
||||
perf_header__set_feat(ph, HEADER_BUILD_ID);
|
||||
}
|
||||
|
||||
ph->event_offset = self->event_types.offset;
|
||||
ph->event_size = self->event_types.size;
|
||||
ph->data_offset = self->data.offset;
|
||||
ph->data_size = self->data.size;
|
||||
ph->event_offset = header->event_types.offset;
|
||||
ph->event_size = header->event_types.size;
|
||||
ph->data_offset = header->data.offset;
|
||||
ph->data_size = header->data.size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -766,11 +695,10 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int perf_header__read_build_ids(struct perf_header *self,
|
||||
int input, u64 offset, u64 size)
|
||||
static int perf_header__read_build_ids(struct perf_header *header,
|
||||
int input, u64 offset, u64 size)
|
||||
{
|
||||
struct perf_session *session = container_of(self,
|
||||
struct perf_session, header);
|
||||
struct perf_session *session = container_of(header, struct perf_session, header);
|
||||
struct build_id_event bev;
|
||||
char filename[PATH_MAX];
|
||||
u64 limit = offset + size;
|
||||
@@ -782,7 +710,7 @@ static int perf_header__read_build_ids(struct perf_header *self,
|
||||
if (read(input, &bev, sizeof(bev)) != sizeof(bev))
|
||||
goto out;
|
||||
|
||||
if (self->needs_swap)
|
||||
if (header->needs_swap)
|
||||
perf_event_header__bswap(&bev.header);
|
||||
|
||||
len = bev.header.size - sizeof(bev);
|
||||
@@ -798,13 +726,13 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int perf_file_section__process(struct perf_file_section *self,
|
||||
static int perf_file_section__process(struct perf_file_section *section,
|
||||
struct perf_header *ph,
|
||||
int feat, int fd)
|
||||
{
|
||||
if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) {
|
||||
if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
|
||||
pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
|
||||
"%d, continuing...\n", self->offset, feat);
|
||||
"%d, continuing...\n", section->offset, feat);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -814,7 +742,7 @@ static int perf_file_section__process(struct perf_file_section *self,
|
||||
break;
|
||||
|
||||
case HEADER_BUILD_ID:
|
||||
if (perf_header__read_build_ids(ph, fd, self->offset, self->size))
|
||||
if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
|
||||
pr_debug("Failed to read buildids, continuing...\n");
|
||||
break;
|
||||
default:
|
||||
@@ -824,21 +752,21 @@ static int perf_file_section__process(struct perf_file_section *self,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_file_header__read_pipe(struct perf_pipe_file_header *self,
|
||||
static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
|
||||
struct perf_header *ph, int fd,
|
||||
bool repipe)
|
||||
{
|
||||
if (readn(fd, self, sizeof(*self)) <= 0 ||
|
||||
memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
|
||||
if (readn(fd, header, sizeof(*header)) <= 0 ||
|
||||
memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
|
||||
return -1;
|
||||
|
||||
if (repipe && do_write(STDOUT_FILENO, self, sizeof(*self)) < 0)
|
||||
if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
|
||||
return -1;
|
||||
|
||||
if (self->size != sizeof(*self)) {
|
||||
u64 size = bswap_64(self->size);
|
||||
if (header->size != sizeof(*header)) {
|
||||
u64 size = bswap_64(header->size);
|
||||
|
||||
if (size != sizeof(*self))
|
||||
if (size != sizeof(*header))
|
||||
return -1;
|
||||
|
||||
ph->needs_swap = true;
|
||||
@@ -849,10 +777,10 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *self,
|
||||
|
||||
static int perf_header__read_pipe(struct perf_session *session, int fd)
|
||||
{
|
||||
struct perf_header *self = &session->header;
|
||||
struct perf_header *header = &session->header;
|
||||
struct perf_pipe_file_header f_header;
|
||||
|
||||
if (perf_file_header__read_pipe(&f_header, self, fd,
|
||||
if (perf_file_header__read_pipe(&f_header, header, fd,
|
||||
session->repipe) < 0) {
|
||||
pr_debug("incompatible file format\n");
|
||||
return -EINVAL;
|
||||
@@ -863,18 +791,22 @@ static int perf_header__read_pipe(struct perf_session *session, int fd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_header__read(struct perf_session *session, int fd)
|
||||
int perf_session__read_header(struct perf_session *session, int fd)
|
||||
{
|
||||
struct perf_header *self = &session->header;
|
||||
struct perf_header *header = &session->header;
|
||||
struct perf_file_header f_header;
|
||||
struct perf_file_attr f_attr;
|
||||
u64 f_id;
|
||||
int nr_attrs, nr_ids, i, j;
|
||||
|
||||
session->evlist = perf_evlist__new(NULL, NULL);
|
||||
if (session->evlist == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (session->fd_pipe)
|
||||
return perf_header__read_pipe(session, fd);
|
||||
|
||||
if (perf_file_header__read(&f_header, self, fd) < 0) {
|
||||
if (perf_file_header__read(&f_header, header, fd) < 0) {
|
||||
pr_debug("incompatible file format\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -883,33 +815,39 @@ int perf_header__read(struct perf_session *session, int fd)
|
||||
lseek(fd, f_header.attrs.offset, SEEK_SET);
|
||||
|
||||
for (i = 0; i < nr_attrs; i++) {
|
||||
struct perf_header_attr *attr;
|
||||
struct perf_evsel *evsel;
|
||||
off_t tmp;
|
||||
|
||||
if (perf_header__getbuffer64(self, fd, &f_attr, sizeof(f_attr)))
|
||||
if (perf_header__getbuffer64(header, fd, &f_attr, sizeof(f_attr)))
|
||||
goto out_errno;
|
||||
|
||||
tmp = lseek(fd, 0, SEEK_CUR);
|
||||
evsel = perf_evsel__new(&f_attr.attr, i);
|
||||
|
||||
attr = perf_header_attr__new(&f_attr.attr);
|
||||
if (attr == NULL)
|
||||
return -ENOMEM;
|
||||
if (evsel == NULL)
|
||||
goto out_delete_evlist;
|
||||
/*
|
||||
* Do it before so that if perf_evsel__alloc_id fails, this
|
||||
* entry gets purged too at perf_evlist__delete().
|
||||
*/
|
||||
perf_evlist__add(session->evlist, evsel);
|
||||
|
||||
nr_ids = f_attr.ids.size / sizeof(u64);
|
||||
/*
|
||||
* We don't have the cpu and thread maps on the header, so
|
||||
* for allocating the perf_sample_id table we fake 1 cpu and
|
||||
* hattr->ids threads.
|
||||
*/
|
||||
if (perf_evsel__alloc_id(evsel, 1, nr_ids))
|
||||
goto out_delete_evlist;
|
||||
|
||||
lseek(fd, f_attr.ids.offset, SEEK_SET);
|
||||
|
||||
for (j = 0; j < nr_ids; j++) {
|
||||
if (perf_header__getbuffer64(self, fd, &f_id, sizeof(f_id)))
|
||||
if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
|
||||
goto out_errno;
|
||||
|
||||
if (perf_header_attr__add_id(attr, f_id) < 0) {
|
||||
perf_header_attr__delete(attr);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
if (perf_header__add_attr(self, attr) < 0) {
|
||||
perf_header_attr__delete(attr);
|
||||
return -ENOMEM;
|
||||
perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
|
||||
}
|
||||
|
||||
lseek(fd, tmp, SEEK_SET);
|
||||
@@ -920,93 +858,63 @@ int perf_header__read(struct perf_session *session, int fd)
|
||||
events = malloc(f_header.event_types.size);
|
||||
if (events == NULL)
|
||||
return -ENOMEM;
|
||||
if (perf_header__getbuffer64(self, fd, events,
|
||||
if (perf_header__getbuffer64(header, fd, events,
|
||||
f_header.event_types.size))
|
||||
goto out_errno;
|
||||
event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
|
||||
}
|
||||
|
||||
perf_header__process_sections(self, fd, perf_file_section__process);
|
||||
perf_header__process_sections(header, fd, perf_file_section__process);
|
||||
|
||||
lseek(fd, self->data_offset, SEEK_SET);
|
||||
lseek(fd, header->data_offset, SEEK_SET);
|
||||
|
||||
self->frozen = 1;
|
||||
header->frozen = 1;
|
||||
return 0;
|
||||
out_errno:
|
||||
return -errno;
|
||||
|
||||
out_delete_evlist:
|
||||
perf_evlist__delete(session->evlist);
|
||||
session->evlist = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
u64 perf_header__sample_type(struct perf_header *header)
|
||||
u64 perf_evlist__sample_type(struct perf_evlist *evlist)
|
||||
{
|
||||
struct perf_evsel *pos;
|
||||
u64 type = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < header->attrs; i++) {
|
||||
struct perf_header_attr *attr = header->attr[i];
|
||||
|
||||
list_for_each_entry(pos, &evlist->entries, node) {
|
||||
if (!type)
|
||||
type = attr->attr.sample_type;
|
||||
else if (type != attr->attr.sample_type)
|
||||
type = pos->attr.sample_type;
|
||||
else if (type != pos->attr.sample_type)
|
||||
die("non matching sample_type");
|
||||
}
|
||||
|
||||
return type;
|
||||
}
|
||||
|
||||
bool perf_header__sample_id_all(const struct perf_header *header)
|
||||
bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
|
||||
{
|
||||
bool value = false, first = true;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < header->attrs; i++) {
|
||||
struct perf_header_attr *attr = header->attr[i];
|
||||
struct perf_evsel *pos;
|
||||
|
||||
list_for_each_entry(pos, &evlist->entries, node) {
|
||||
if (first) {
|
||||
value = attr->attr.sample_id_all;
|
||||
value = pos->attr.sample_id_all;
|
||||
first = false;
|
||||
} else if (value != attr->attr.sample_id_all)
|
||||
} else if (value != pos->attr.sample_id_all)
|
||||
die("non matching sample_id_all");
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
struct perf_event_attr *
|
||||
perf_header__find_attr(u64 id, struct perf_header *header)
|
||||
int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
|
||||
perf_event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* We set id to -1 if the data file doesn't contain sample
|
||||
* ids. This can happen when the data file contains one type
|
||||
* of event and in that case, the header can still store the
|
||||
* event attribute information. Check for this and avoid
|
||||
* walking through the entire list of ids which may be large.
|
||||
*/
|
||||
if (id == -1ULL) {
|
||||
if (header->attrs > 0)
|
||||
return &header->attr[0]->attr;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < header->attrs; i++) {
|
||||
struct perf_header_attr *attr = header->attr[i];
|
||||
int j;
|
||||
|
||||
for (j = 0; j < attr->ids; j++) {
|
||||
if (attr->id[j] == id)
|
||||
return &attr->attr;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
|
||||
event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
event_t *ev;
|
||||
union perf_event *ev;
|
||||
size_t size;
|
||||
int err;
|
||||
|
||||
@@ -1033,17 +941,15 @@ int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
|
||||
return err;
|
||||
}
|
||||
|
||||
int event__synthesize_attrs(struct perf_header *self, event__handler_t process,
|
||||
struct perf_session *session)
|
||||
int perf_session__synthesize_attrs(struct perf_session *session,
|
||||
perf_event__handler_t process)
|
||||
{
|
||||
struct perf_header_attr *attr;
|
||||
int i, err = 0;
|
||||
struct perf_evsel *attr;
|
||||
int err = 0;
|
||||
|
||||
for (i = 0; i < self->attrs; i++) {
|
||||
attr = self->attr[i];
|
||||
|
||||
err = event__synthesize_attr(&attr->attr, attr->ids, attr->id,
|
||||
process, session);
|
||||
list_for_each_entry(attr, &session->evlist->entries, node) {
|
||||
err = perf_event__synthesize_attr(&attr->attr, attr->ids,
|
||||
attr->id, process, session);
|
||||
if (err) {
|
||||
pr_debug("failed to create perf header attribute\n");
|
||||
return err;
|
||||
@@ -1053,29 +959,39 @@ int event__synthesize_attrs(struct perf_header *self, event__handler_t process,
|
||||
return err;
|
||||
}
|
||||
|
||||
int event__process_attr(event_t *self, struct perf_session *session)
|
||||
int perf_event__process_attr(union perf_event *event,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct perf_header_attr *attr;
|
||||
unsigned int i, ids, n_ids;
|
||||
struct perf_evsel *evsel;
|
||||
|
||||
attr = perf_header_attr__new(&self->attr.attr);
|
||||
if (attr == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ids = self->header.size;
|
||||
ids -= (void *)&self->attr.id - (void *)self;
|
||||
n_ids = ids / sizeof(u64);
|
||||
|
||||
for (i = 0; i < n_ids; i++) {
|
||||
if (perf_header_attr__add_id(attr, self->attr.id[i]) < 0) {
|
||||
perf_header_attr__delete(attr);
|
||||
if (session->evlist == NULL) {
|
||||
session->evlist = perf_evlist__new(NULL, NULL);
|
||||
if (session->evlist == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (perf_header__add_attr(&session->header, attr) < 0) {
|
||||
perf_header_attr__delete(attr);
|
||||
evsel = perf_evsel__new(&event->attr.attr,
|
||||
session->evlist->nr_entries);
|
||||
if (evsel == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
perf_evlist__add(session->evlist, evsel);
|
||||
|
||||
ids = event->header.size;
|
||||
ids -= (void *)&event->attr.id - (void *)event;
|
||||
n_ids = ids / sizeof(u64);
|
||||
/*
|
||||
* We don't have the cpu and thread maps on the header, so
|
||||
* for allocating the perf_sample_id table we fake 1 cpu and
|
||||
* hattr->ids threads.
|
||||
*/
|
||||
if (perf_evsel__alloc_id(evsel, 1, n_ids))
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < n_ids; i++) {
|
||||
perf_evlist__id_add(session->evlist, evsel, 0, i,
|
||||
event->attr.id[i]);
|
||||
}
|
||||
|
||||
perf_session__update_sample_type(session);
|
||||
@@ -1083,11 +999,11 @@ int event__process_attr(event_t *self, struct perf_session *session)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int event__synthesize_event_type(u64 event_id, char *name,
|
||||
event__handler_t process,
|
||||
struct perf_session *session)
|
||||
int perf_event__synthesize_event_type(u64 event_id, char *name,
|
||||
perf_event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
event_t ev;
|
||||
union perf_event ev;
|
||||
size_t size = 0;
|
||||
int err = 0;
|
||||
|
||||
@@ -1108,8 +1024,8 @@ int event__synthesize_event_type(u64 event_id, char *name,
|
||||
return err;
|
||||
}
|
||||
|
||||
int event__synthesize_event_types(event__handler_t process,
|
||||
struct perf_session *session)
|
||||
int perf_event__synthesize_event_types(perf_event__handler_t process,
|
||||
struct perf_session *session)
|
||||
{
|
||||
struct perf_trace_event_type *type;
|
||||
int i, err = 0;
|
||||
@@ -1117,8 +1033,9 @@ int event__synthesize_event_types(event__handler_t process,
|
||||
for (i = 0; i < event_count; i++) {
|
||||
type = &events[i];
|
||||
|
||||
err = event__synthesize_event_type(type->event_id, type->name,
|
||||
process, session);
|
||||
err = perf_event__synthesize_event_type(type->event_id,
|
||||
type->name, process,
|
||||
session);
|
||||
if (err) {
|
||||
pr_debug("failed to create perf header event type\n");
|
||||
return err;
|
||||
@@ -1128,28 +1045,28 @@ int event__synthesize_event_types(event__handler_t process,
|
||||
return err;
|
||||
}
|
||||
|
||||
int event__process_event_type(event_t *self,
|
||||
struct perf_session *session __unused)
|
||||
int perf_event__process_event_type(union perf_event *event,
|
||||
struct perf_session *session __unused)
|
||||
{
|
||||
if (perf_header__push_event(self->event_type.event_type.event_id,
|
||||
self->event_type.event_type.name) < 0)
|
||||
if (perf_header__push_event(event->event_type.event_type.event_id,
|
||||
event->event_type.event_type.name) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
|
||||
event__handler_t process,
|
||||
int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
|
||||
perf_event__handler_t process,
|
||||
struct perf_session *session __unused)
|
||||
{
|
||||
event_t ev;
|
||||
union perf_event ev;
|
||||
ssize_t size = 0, aligned_size = 0, padding;
|
||||
int err = 0;
|
||||
int err __used = 0;
|
||||
|
||||
memset(&ev, 0, sizeof(ev));
|
||||
|
||||
ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
|
||||
size = read_tracing_data_size(fd, pattrs);
|
||||
size = read_tracing_data_size(fd, &evlist->entries);
|
||||
if (size <= 0)
|
||||
return size;
|
||||
aligned_size = ALIGN(size, sizeof(u64));
|
||||
@@ -1159,16 +1076,16 @@ int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
|
||||
|
||||
process(&ev, NULL, session);
|
||||
|
||||
err = read_tracing_data(fd, pattrs);
|
||||
err = read_tracing_data(fd, &evlist->entries);
|
||||
write_padded(fd, NULL, 0, padding);
|
||||
|
||||
return aligned_size;
|
||||
}
|
||||
|
||||
int event__process_tracing_data(event_t *self,
|
||||
struct perf_session *session)
|
||||
int perf_event__process_tracing_data(union perf_event *event,
|
||||
struct perf_session *session)
|
||||
{
|
||||
ssize_t size_read, padding, size = self->tracing_data.size;
|
||||
ssize_t size_read, padding, size = event->tracing_data.size;
|
||||
off_t offset = lseek(session->fd, 0, SEEK_CUR);
|
||||
char buf[BUFSIZ];
|
||||
|
||||
@@ -1194,12 +1111,12 @@ int event__process_tracing_data(event_t *self,
|
||||
return size_read + padding;
|
||||
}
|
||||
|
||||
int event__synthesize_build_id(struct dso *pos, u16 misc,
|
||||
event__handler_t process,
|
||||
struct machine *machine,
|
||||
struct perf_session *session)
|
||||
int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
|
||||
perf_event__handler_t process,
|
||||
struct machine *machine,
|
||||
struct perf_session *session)
|
||||
{
|
||||
event_t ev;
|
||||
union perf_event ev;
|
||||
size_t len;
|
||||
int err = 0;
|
||||
|
||||
@@ -1222,11 +1139,11 @@ int event__synthesize_build_id(struct dso *pos, u16 misc,
|
||||
return err;
|
||||
}
|
||||
|
||||
int event__process_build_id(event_t *self,
|
||||
struct perf_session *session)
|
||||
int perf_event__process_build_id(union perf_event *event,
|
||||
struct perf_session *session)
|
||||
{
|
||||
__event_process_build_id(&self->build_id,
|
||||
self->build_id.filename,
|
||||
__event_process_build_id(&event->build_id,
|
||||
event->build_id.filename,
|
||||
session);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -9,13 +9,6 @@
|
||||
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
struct perf_header_attr {
|
||||
struct perf_event_attr attr;
|
||||
int ids, size;
|
||||
u64 *id;
|
||||
off_t id_offset;
|
||||
};
|
||||
|
||||
enum {
|
||||
HEADER_TRACE_INFO = 1,
|
||||
HEADER_BUILD_ID,
|
||||
@@ -46,14 +39,12 @@ struct perf_pipe_file_header {
|
||||
|
||||
struct perf_header;
|
||||
|
||||
int perf_file_header__read(struct perf_file_header *self,
|
||||
int perf_file_header__read(struct perf_file_header *header,
|
||||
struct perf_header *ph, int fd);
|
||||
|
||||
struct perf_header {
|
||||
int frozen;
|
||||
int attrs, size;
|
||||
bool needs_swap;
|
||||
struct perf_header_attr **attr;
|
||||
s64 attr_offset;
|
||||
u64 data_offset;
|
||||
u64 data_size;
|
||||
@@ -62,34 +53,25 @@ struct perf_header {
|
||||
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
|
||||
};
|
||||
|
||||
int perf_header__init(struct perf_header *self);
|
||||
void perf_header__exit(struct perf_header *self);
|
||||
struct perf_evlist;
|
||||
|
||||
int perf_header__read(struct perf_session *session, int fd);
|
||||
int perf_header__write(struct perf_header *self, int fd, bool at_exit);
|
||||
int perf_session__read_header(struct perf_session *session, int fd);
|
||||
int perf_session__write_header(struct perf_session *session,
|
||||
struct perf_evlist *evlist,
|
||||
int fd, bool at_exit);
|
||||
int perf_header__write_pipe(int fd);
|
||||
|
||||
int perf_header__add_attr(struct perf_header *self,
|
||||
struct perf_header_attr *attr);
|
||||
|
||||
int perf_header__push_event(u64 id, const char *name);
|
||||
char *perf_header__find_event(u64 id);
|
||||
|
||||
struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr);
|
||||
void perf_header_attr__delete(struct perf_header_attr *self);
|
||||
u64 perf_evlist__sample_type(struct perf_evlist *evlist);
|
||||
bool perf_evlist__sample_id_all(const struct perf_evlist *evlist);
|
||||
void perf_header__set_feat(struct perf_header *header, int feat);
|
||||
void perf_header__clear_feat(struct perf_header *header, int feat);
|
||||
bool perf_header__has_feat(const struct perf_header *header, int feat);
|
||||
|
||||
int perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
|
||||
|
||||
u64 perf_header__sample_type(struct perf_header *header);
|
||||
bool perf_header__sample_id_all(const struct perf_header *header);
|
||||
struct perf_event_attr *
|
||||
perf_header__find_attr(u64 id, struct perf_header *header);
|
||||
void perf_header__set_feat(struct perf_header *self, int feat);
|
||||
void perf_header__clear_feat(struct perf_header *self, int feat);
|
||||
bool perf_header__has_feat(const struct perf_header *self, int feat);
|
||||
|
||||
int perf_header__process_sections(struct perf_header *self, int fd,
|
||||
int (*process)(struct perf_file_section *self,
|
||||
int perf_header__process_sections(struct perf_header *header, int fd,
|
||||
int (*process)(struct perf_file_section *section,
|
||||
struct perf_header *ph,
|
||||
int feat, int fd));
|
||||
|
||||
@@ -97,32 +79,31 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
|
||||
const char *name, bool is_kallsyms);
|
||||
int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir);
|
||||
|
||||
int event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
|
||||
event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int event__synthesize_attrs(struct perf_header *self,
|
||||
event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int event__process_attr(event_t *self, struct perf_session *session);
|
||||
|
||||
int event__synthesize_event_type(u64 event_id, char *name,
|
||||
event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int event__synthesize_event_types(event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int event__process_event_type(event_t *self,
|
||||
struct perf_session *session);
|
||||
|
||||
int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
|
||||
event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int event__process_tracing_data(event_t *self,
|
||||
int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
|
||||
perf_event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int perf_session__synthesize_attrs(struct perf_session *session,
|
||||
perf_event__handler_t process);
|
||||
int perf_event__process_attr(union perf_event *event, struct perf_session *session);
|
||||
|
||||
int event__synthesize_build_id(struct dso *pos, u16 misc,
|
||||
event__handler_t process,
|
||||
struct machine *machine,
|
||||
struct perf_session *session);
|
||||
int event__process_build_id(event_t *self, struct perf_session *session);
|
||||
int perf_event__synthesize_event_type(u64 event_id, char *name,
|
||||
perf_event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int perf_event__synthesize_event_types(perf_event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int perf_event__process_event_type(union perf_event *event,
|
||||
struct perf_session *session);
|
||||
|
||||
int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
|
||||
perf_event__handler_t process,
|
||||
struct perf_session *session);
|
||||
int perf_event__process_tracing_data(union perf_event *event,
|
||||
struct perf_session *session);
|
||||
|
||||
int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
|
||||
perf_event__handler_t process,
|
||||
struct machine *machine,
|
||||
struct perf_session *session);
|
||||
int perf_event__process_build_id(union perf_event *event,
|
||||
struct perf_session *session);
|
||||
#endif /* __PERF_HEADER_H */
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#include "annotate.h"
|
||||
#include "util.h"
|
||||
#include "build-id.h"
|
||||
#include "hist.h"
|
||||
@@ -49,6 +50,15 @@ static void hists__calc_col_len(struct hists *self, struct hist_entry *h)
|
||||
|
||||
if (h->ms.sym)
|
||||
hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen);
|
||||
else {
|
||||
const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
|
||||
|
||||
if (hists__col_len(self, HISTC_DSO) < unresolved_col_width &&
|
||||
!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
|
||||
!symbol_conf.dso_list)
|
||||
hists__set_col_len(self, HISTC_DSO,
|
||||
unresolved_col_width);
|
||||
}
|
||||
|
||||
len = thread__comm_len(h->thread);
|
||||
if (hists__new_col_len(self, HISTC_COMM, len))
|
||||
@@ -211,7 +221,9 @@ void hist_entry__free(struct hist_entry *he)
|
||||
* collapse the histogram
|
||||
*/
|
||||
|
||||
static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
|
||||
static bool hists__collapse_insert_entry(struct hists *self,
|
||||
struct rb_root *root,
|
||||
struct hist_entry *he)
|
||||
{
|
||||
struct rb_node **p = &root->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
@@ -226,8 +238,11 @@ static bool collapse__insert_entry(struct rb_root *root, struct hist_entry *he)
|
||||
|
||||
if (!cmp) {
|
||||
iter->period += he->period;
|
||||
if (symbol_conf.use_callchain)
|
||||
callchain_merge(iter->callchain, he->callchain);
|
||||
if (symbol_conf.use_callchain) {
|
||||
callchain_cursor_reset(&self->callchain_cursor);
|
||||
callchain_merge(&self->callchain_cursor, iter->callchain,
|
||||
he->callchain);
|
||||
}
|
||||
hist_entry__free(he);
|
||||
return false;
|
||||
}
|
||||
@@ -262,7 +277,7 @@ void hists__collapse_resort(struct hists *self)
|
||||
next = rb_next(&n->rb_node);
|
||||
|
||||
rb_erase(&n->rb_node, &self->entries);
|
||||
if (collapse__insert_entry(&tmp, n))
|
||||
if (hists__collapse_insert_entry(self, &tmp, n))
|
||||
hists__inc_nr_entries(self, n);
|
||||
}
|
||||
|
||||
@@ -425,7 +440,7 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
|
||||
u64 cumul;
|
||||
|
||||
child = rb_entry(node, struct callchain_node, rb_node);
|
||||
cumul = cumul_hits(child);
|
||||
cumul = callchain_cumul_hits(child);
|
||||
remaining -= cumul;
|
||||
|
||||
/*
|
||||
@@ -947,225 +962,14 @@ void hists__filter_by_thread(struct hists *self, const struct thread *thread)
|
||||
}
|
||||
}
|
||||
|
||||
static int symbol__alloc_hist(struct symbol *self)
|
||||
int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
|
||||
{
|
||||
struct sym_priv *priv = symbol__priv(self);
|
||||
const int size = (sizeof(*priv->hist) +
|
||||
(self->end - self->start) * sizeof(u64));
|
||||
|
||||
priv->hist = zalloc(size);
|
||||
return priv->hist == NULL ? -1 : 0;
|
||||
return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
|
||||
}
|
||||
|
||||
int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip)
|
||||
int hist_entry__annotate(struct hist_entry *he, size_t privsize)
|
||||
{
|
||||
unsigned int sym_size, offset;
|
||||
struct symbol *sym = self->ms.sym;
|
||||
struct sym_priv *priv;
|
||||
struct sym_hist *h;
|
||||
|
||||
if (!sym || !self->ms.map)
|
||||
return 0;
|
||||
|
||||
priv = symbol__priv(sym);
|
||||
if (priv->hist == NULL && symbol__alloc_hist(sym) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
sym_size = sym->end - sym->start;
|
||||
offset = ip - sym->start;
|
||||
|
||||
pr_debug3("%s: ip=%#" PRIx64 "\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
|
||||
|
||||
if (offset >= sym_size)
|
||||
return 0;
|
||||
|
||||
h = priv->hist;
|
||||
h->sum++;
|
||||
h->ip[offset]++;
|
||||
|
||||
pr_debug3("%#" PRIx64 " %s: period++ [ip: %#" PRIx64 ", %#" PRIx64
|
||||
"] => %" PRIu64 "\n", self->ms.sym->start, self->ms.sym->name,
|
||||
ip, ip - self->ms.sym->start, h->ip[offset]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct objdump_line *objdump_line__new(s64 offset, char *line, size_t privsize)
|
||||
{
|
||||
struct objdump_line *self = malloc(sizeof(*self) + privsize);
|
||||
|
||||
if (self != NULL) {
|
||||
self->offset = offset;
|
||||
self->line = line;
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
void objdump_line__free(struct objdump_line *self)
|
||||
{
|
||||
free(self->line);
|
||||
free(self);
|
||||
}
|
||||
|
||||
static void objdump__add_line(struct list_head *head, struct objdump_line *line)
|
||||
{
|
||||
list_add_tail(&line->node, head);
|
||||
}
|
||||
|
||||
struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
|
||||
struct objdump_line *pos)
|
||||
{
|
||||
list_for_each_entry_continue(pos, head, node)
|
||||
if (pos->offset >= 0)
|
||||
return pos;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int hist_entry__parse_objdump_line(struct hist_entry *self, FILE *file,
|
||||
struct list_head *head, size_t privsize)
|
||||
{
|
||||
struct symbol *sym = self->ms.sym;
|
||||
struct objdump_line *objdump_line;
|
||||
char *line = NULL, *tmp, *tmp2, *c;
|
||||
size_t line_len;
|
||||
s64 line_ip, offset = -1;
|
||||
|
||||
if (getline(&line, &line_len, file) < 0)
|
||||
return -1;
|
||||
|
||||
if (!line)
|
||||
return -1;
|
||||
|
||||
while (line_len != 0 && isspace(line[line_len - 1]))
|
||||
line[--line_len] = '\0';
|
||||
|
||||
c = strchr(line, '\n');
|
||||
if (c)
|
||||
*c = 0;
|
||||
|
||||
line_ip = -1;
|
||||
|
||||
/*
|
||||
* Strip leading spaces:
|
||||
*/
|
||||
tmp = line;
|
||||
while (*tmp) {
|
||||
if (*tmp != ' ')
|
||||
break;
|
||||
tmp++;
|
||||
}
|
||||
|
||||
if (*tmp) {
|
||||
/*
|
||||
* Parse hexa addresses followed by ':'
|
||||
*/
|
||||
line_ip = strtoull(tmp, &tmp2, 16);
|
||||
if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0')
|
||||
line_ip = -1;
|
||||
}
|
||||
|
||||
if (line_ip != -1) {
|
||||
u64 start = map__rip_2objdump(self->ms.map, sym->start),
|
||||
end = map__rip_2objdump(self->ms.map, sym->end);
|
||||
|
||||
offset = line_ip - start;
|
||||
if (offset < 0 || (u64)line_ip > end)
|
||||
offset = -1;
|
||||
}
|
||||
|
||||
objdump_line = objdump_line__new(offset, line, privsize);
|
||||
if (objdump_line == NULL) {
|
||||
free(line);
|
||||
return -1;
|
||||
}
|
||||
objdump__add_line(head, objdump_line);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
|
||||
size_t privsize)
|
||||
{
|
||||
struct symbol *sym = self->ms.sym;
|
||||
struct map *map = self->ms.map;
|
||||
struct dso *dso = map->dso;
|
||||
char *filename = dso__build_id_filename(dso, NULL, 0);
|
||||
bool free_filename = true;
|
||||
char command[PATH_MAX * 2];
|
||||
FILE *file;
|
||||
int err = 0;
|
||||
u64 len;
|
||||
char symfs_filename[PATH_MAX];
|
||||
|
||||
if (filename) {
|
||||
snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
|
||||
symbol_conf.symfs, filename);
|
||||
}
|
||||
|
||||
if (filename == NULL) {
|
||||
if (dso->has_build_id) {
|
||||
pr_err("Can't annotate %s: not enough memory\n",
|
||||
sym->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
goto fallback;
|
||||
} else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
|
||||
strstr(command, "[kernel.kallsyms]") ||
|
||||
access(symfs_filename, R_OK)) {
|
||||
free(filename);
|
||||
fallback:
|
||||
/*
|
||||
* If we don't have build-ids or the build-id file isn't in the
|
||||
* cache, or is just a kallsyms file, well, lets hope that this
|
||||
* DSO is the same as when 'perf record' ran.
|
||||
*/
|
||||
filename = dso->long_name;
|
||||
snprintf(symfs_filename, sizeof(symfs_filename), "%s%s",
|
||||
symbol_conf.symfs, filename);
|
||||
free_filename = false;
|
||||
}
|
||||
|
||||
if (dso->origin == DSO__ORIG_KERNEL) {
|
||||
if (dso->annotate_warned)
|
||||
goto out_free_filename;
|
||||
err = -ENOENT;
|
||||
dso->annotate_warned = 1;
|
||||
pr_err("Can't annotate %s: No vmlinux file was found in the "
|
||||
"path\n", sym->name);
|
||||
goto out_free_filename;
|
||||
}
|
||||
|
||||
pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
|
||||
filename, sym->name, map->unmap_ip(map, sym->start),
|
||||
map->unmap_ip(map, sym->end));
|
||||
|
||||
len = sym->end - sym->start;
|
||||
|
||||
pr_debug("annotating [%p] %30s : [%p] %30s\n",
|
||||
dso, dso->long_name, sym, sym->name);
|
||||
|
||||
snprintf(command, sizeof(command),
|
||||
"objdump --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand",
|
||||
map__rip_2objdump(map, sym->start),
|
||||
map__rip_2objdump(map, sym->end),
|
||||
symfs_filename, filename);
|
||||
|
||||
pr_debug("Executing: %s\n", command);
|
||||
|
||||
file = popen(command, "r");
|
||||
if (!file)
|
||||
goto out_free_filename;
|
||||
|
||||
while (!feof(file))
|
||||
if (hist_entry__parse_objdump_line(self, file, head, privsize) < 0)
|
||||
break;
|
||||
|
||||
pclose(file);
|
||||
out_free_filename:
|
||||
if (free_filename)
|
||||
free(filename);
|
||||
return err;
|
||||
return symbol__annotate(he->ms.sym, he->ms.map, privsize);
|
||||
}
|
||||
|
||||
void hists__inc_nr_events(struct hists *self, u32 type)
|
||||
@@ -1180,8 +984,12 @@ size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
|
||||
size_t ret = 0;
|
||||
|
||||
for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
|
||||
const char *name = event__get_event_name(i);
|
||||
const char *name;
|
||||
|
||||
if (self->stats.nr_events[i] == 0)
|
||||
continue;
|
||||
|
||||
name = perf_event__name(i);
|
||||
if (!strcmp(name, "UNKNOWN"))
|
||||
continue;
|
||||
|
||||
|
||||
@@ -9,33 +9,6 @@ extern struct callchain_param callchain_param;
|
||||
struct hist_entry;
|
||||
struct addr_location;
|
||||
struct symbol;
|
||||
struct rb_root;
|
||||
|
||||
struct objdump_line {
|
||||
struct list_head node;
|
||||
s64 offset;
|
||||
char *line;
|
||||
};
|
||||
|
||||
void objdump_line__free(struct objdump_line *self);
|
||||
struct objdump_line *objdump__get_next_ip_line(struct list_head *head,
|
||||
struct objdump_line *pos);
|
||||
|
||||
struct sym_hist {
|
||||
u64 sum;
|
||||
u64 ip[0];
|
||||
};
|
||||
|
||||
struct sym_ext {
|
||||
struct rb_node node;
|
||||
double percent;
|
||||
char *path;
|
||||
};
|
||||
|
||||
struct sym_priv {
|
||||
struct sym_hist *hist;
|
||||
struct sym_ext *ext;
|
||||
};
|
||||
|
||||
/*
|
||||
* The kernel collects the number of events it couldn't send in a stretch and
|
||||
@@ -69,14 +42,13 @@ enum hist_column {
|
||||
};
|
||||
|
||||
struct hists {
|
||||
struct rb_node rb_node;
|
||||
struct rb_root entries;
|
||||
u64 nr_entries;
|
||||
struct events_stats stats;
|
||||
u64 config;
|
||||
u64 event_stream;
|
||||
u32 type;
|
||||
u16 col_len[HISTC_NR_COLS];
|
||||
/* Best would be to reuse the session callchain cursor */
|
||||
struct callchain_cursor callchain_cursor;
|
||||
};
|
||||
|
||||
struct hist_entry *__hists__add_entry(struct hists *self,
|
||||
@@ -102,9 +74,8 @@ size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);
|
||||
size_t hists__fprintf(struct hists *self, struct hists *pair,
|
||||
bool show_displacement, FILE *fp);
|
||||
|
||||
int hist_entry__inc_addr_samples(struct hist_entry *self, u64 ip);
|
||||
int hist_entry__annotate(struct hist_entry *self, struct list_head *head,
|
||||
size_t privsize);
|
||||
int hist_entry__inc_addr_samples(struct hist_entry *self, int evidx, u64 addr);
|
||||
int hist_entry__annotate(struct hist_entry *self, size_t privsize);
|
||||
|
||||
void hists__filter_by_dso(struct hists *self, const struct dso *dso);
|
||||
void hists__filter_by_thread(struct hists *self, const struct thread *thread);
|
||||
@@ -113,21 +84,18 @@ u16 hists__col_len(struct hists *self, enum hist_column col);
|
||||
void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
|
||||
bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len);
|
||||
|
||||
struct perf_evlist;
|
||||
|
||||
#ifdef NO_NEWT_SUPPORT
|
||||
static inline int hists__browse(struct hists *self __used,
|
||||
const char *helpline __used,
|
||||
const char *ev_name __used)
|
||||
static inline
|
||||
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used,
|
||||
const char *help __used)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int hists__tui_browse_tree(struct rb_root *self __used,
|
||||
const char *help __used)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int hist_entry__tui_annotate(struct hist_entry *self __used)
|
||||
static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
|
||||
int evidx __used)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -135,14 +103,12 @@ static inline int hist_entry__tui_annotate(struct hist_entry *self __used)
|
||||
#define KEY_RIGHT -2
|
||||
#else
|
||||
#include <newt.h>
|
||||
int hists__browse(struct hists *self, const char *helpline,
|
||||
const char *ev_name);
|
||||
int hist_entry__tui_annotate(struct hist_entry *self);
|
||||
int hist_entry__tui_annotate(struct hist_entry *self, int evidx);
|
||||
|
||||
#define KEY_LEFT NEWT_KEY_LEFT
|
||||
#define KEY_RIGHT NEWT_KEY_RIGHT
|
||||
|
||||
int hists__tui_browse_tree(struct rb_root *self, const char *help);
|
||||
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help);
|
||||
#endif
|
||||
|
||||
unsigned int hists__sort_list_width(struct hists *self);
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#include <linux/kernel.h>
|
||||
#include "../../../../include/linux/list.h"
|
||||
|
||||
#ifndef PERF_LIST_H
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#include "../../../include/linux/hw_breakpoint.h"
|
||||
#include "util.h"
|
||||
#include "../perf.h"
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
#include "parse-options.h"
|
||||
#include "parse-events.h"
|
||||
@@ -11,10 +12,6 @@
|
||||
#include "header.h"
|
||||
#include "debugfs.h"
|
||||
|
||||
int nr_counters;
|
||||
|
||||
LIST_HEAD(evsel_list);
|
||||
|
||||
struct event_symbol {
|
||||
u8 type;
|
||||
u64 config;
|
||||
@@ -271,6 +268,9 @@ const char *event_name(struct perf_evsel *evsel)
|
||||
u64 config = evsel->attr.config;
|
||||
int type = evsel->attr.type;
|
||||
|
||||
if (evsel->name)
|
||||
return evsel->name;
|
||||
|
||||
return __event_name(type, config);
|
||||
}
|
||||
|
||||
@@ -449,8 +449,8 @@ parse_single_tracepoint_event(char *sys_name,
|
||||
/* sys + ':' + event + ':' + flags*/
|
||||
#define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
|
||||
static enum event_result
|
||||
parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
|
||||
char *flags)
|
||||
parse_multiple_tracepoint_event(const struct option *opt, char *sys_name,
|
||||
const char *evt_exp, char *flags)
|
||||
{
|
||||
char evt_path[MAXPATHLEN];
|
||||
struct dirent *evt_ent;
|
||||
@@ -483,15 +483,16 @@ parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
|
||||
if (len < 0)
|
||||
return EVT_FAILED;
|
||||
|
||||
if (parse_events(NULL, event_opt, 0))
|
||||
if (parse_events(opt, event_opt, 0))
|
||||
return EVT_FAILED;
|
||||
}
|
||||
|
||||
return EVT_HANDLED_ALL;
|
||||
}
|
||||
|
||||
static enum event_result parse_tracepoint_event(const char **strp,
|
||||
struct perf_event_attr *attr)
|
||||
static enum event_result
|
||||
parse_tracepoint_event(const struct option *opt, const char **strp,
|
||||
struct perf_event_attr *attr)
|
||||
{
|
||||
const char *evt_name;
|
||||
char *flags = NULL, *comma_loc;
|
||||
@@ -530,7 +531,7 @@ static enum event_result parse_tracepoint_event(const char **strp,
|
||||
return EVT_FAILED;
|
||||
if (strpbrk(evt_name, "*?")) {
|
||||
*strp += strlen(sys_name) + evt_length + 1; /* 1 == the ':' */
|
||||
return parse_multiple_tracepoint_event(sys_name, evt_name,
|
||||
return parse_multiple_tracepoint_event(opt, sys_name, evt_name,
|
||||
flags);
|
||||
} else {
|
||||
return parse_single_tracepoint_event(sys_name, evt_name,
|
||||
@@ -740,11 +741,12 @@ parse_event_modifier(const char **strp, struct perf_event_attr *attr)
|
||||
* Symbolic names are (almost) exactly matched.
|
||||
*/
|
||||
static enum event_result
|
||||
parse_event_symbols(const char **str, struct perf_event_attr *attr)
|
||||
parse_event_symbols(const struct option *opt, const char **str,
|
||||
struct perf_event_attr *attr)
|
||||
{
|
||||
enum event_result ret;
|
||||
|
||||
ret = parse_tracepoint_event(str, attr);
|
||||
ret = parse_tracepoint_event(opt, str, attr);
|
||||
if (ret != EVT_FAILED)
|
||||
goto modifier;
|
||||
|
||||
@@ -778,14 +780,17 @@ modifier:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int parse_events(const struct option *opt __used, const char *str, int unset __used)
|
||||
int parse_events(const struct option *opt, const char *str, int unset __used)
|
||||
{
|
||||
struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
|
||||
struct perf_event_attr attr;
|
||||
enum event_result ret;
|
||||
const char *ostr;
|
||||
|
||||
for (;;) {
|
||||
ostr = str;
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
ret = parse_event_symbols(&str, &attr);
|
||||
ret = parse_event_symbols(opt, &str, &attr);
|
||||
if (ret == EVT_FAILED)
|
||||
return -1;
|
||||
|
||||
@@ -794,12 +799,15 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
|
||||
|
||||
if (ret != EVT_HANDLED_ALL) {
|
||||
struct perf_evsel *evsel;
|
||||
evsel = perf_evsel__new(&attr,
|
||||
nr_counters);
|
||||
evsel = perf_evsel__new(&attr, evlist->nr_entries);
|
||||
if (evsel == NULL)
|
||||
return -1;
|
||||
list_add_tail(&evsel->node, &evsel_list);
|
||||
++nr_counters;
|
||||
perf_evlist__add(evlist, evsel);
|
||||
|
||||
evsel->name = calloc(str - ostr + 1, 1);
|
||||
if (!evsel->name)
|
||||
return -1;
|
||||
strncpy(evsel->name, ostr, str - ostr);
|
||||
}
|
||||
|
||||
if (*str == 0)
|
||||
@@ -813,13 +821,14 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
|
||||
return 0;
|
||||
}
|
||||
|
||||
int parse_filter(const struct option *opt __used, const char *str,
|
||||
int parse_filter(const struct option *opt, const char *str,
|
||||
int unset __used)
|
||||
{
|
||||
struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
|
||||
struct perf_evsel *last = NULL;
|
||||
|
||||
if (!list_empty(&evsel_list))
|
||||
last = list_entry(evsel_list.prev, struct perf_evsel, node);
|
||||
if (evlist->nr_entries > 0)
|
||||
last = list_entry(evlist->entries.prev, struct perf_evsel, node);
|
||||
|
||||
if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
|
||||
fprintf(stderr,
|
||||
@@ -849,7 +858,7 @@ static const char * const event_type_descriptors[] = {
|
||||
* Print the events from <debugfs_mount_point>/tracing/events
|
||||
*/
|
||||
|
||||
static void print_tracepoint_events(void)
|
||||
void print_tracepoint_events(const char *subsys_glob, const char *event_glob)
|
||||
{
|
||||
DIR *sys_dir, *evt_dir;
|
||||
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
|
||||
@@ -864,6 +873,9 @@ static void print_tracepoint_events(void)
|
||||
return;
|
||||
|
||||
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
|
||||
if (subsys_glob != NULL &&
|
||||
!strglobmatch(sys_dirent.d_name, subsys_glob))
|
||||
continue;
|
||||
|
||||
snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
|
||||
sys_dirent.d_name);
|
||||
@@ -872,6 +884,10 @@ static void print_tracepoint_events(void)
|
||||
continue;
|
||||
|
||||
for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
|
||||
if (event_glob != NULL &&
|
||||
!strglobmatch(evt_dirent.d_name, event_glob))
|
||||
continue;
|
||||
|
||||
snprintf(evt_path, MAXPATHLEN, "%s:%s",
|
||||
sys_dirent.d_name, evt_dirent.d_name);
|
||||
printf(" %-42s [%s]\n", evt_path,
|
||||
@@ -923,13 +939,61 @@ int is_valid_tracepoint(const char *event_string)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void print_events_type(u8 type)
|
||||
{
|
||||
struct event_symbol *syms = event_symbols;
|
||||
unsigned int i;
|
||||
char name[64];
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
|
||||
if (type != syms->type)
|
||||
continue;
|
||||
|
||||
if (strlen(syms->alias))
|
||||
snprintf(name, sizeof(name), "%s OR %s",
|
||||
syms->symbol, syms->alias);
|
||||
else
|
||||
snprintf(name, sizeof(name), "%s", syms->symbol);
|
||||
|
||||
printf(" %-42s [%s]\n", name,
|
||||
event_type_descriptors[type]);
|
||||
}
|
||||
}
|
||||
|
||||
int print_hwcache_events(const char *event_glob)
|
||||
{
|
||||
unsigned int type, op, i, printed = 0;
|
||||
|
||||
for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
|
||||
for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
|
||||
/* skip invalid cache type */
|
||||
if (!is_cache_op_valid(type, op))
|
||||
continue;
|
||||
|
||||
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
|
||||
char *name = event_cache_name(type, op, i);
|
||||
|
||||
if (event_glob != NULL &&
|
||||
!strglobmatch(name, event_glob))
|
||||
continue;
|
||||
|
||||
printf(" %-42s [%s]\n", name,
|
||||
event_type_descriptors[PERF_TYPE_HW_CACHE]);
|
||||
++printed;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return printed;
|
||||
}
|
||||
|
||||
/*
|
||||
* Print the help text for the event symbols:
|
||||
*/
|
||||
void print_events(void)
|
||||
void print_events(const char *event_glob)
|
||||
{
|
||||
struct event_symbol *syms = event_symbols;
|
||||
unsigned int i, type, op, prev_type = -1;
|
||||
unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0;
|
||||
char name[40];
|
||||
|
||||
printf("\n");
|
||||
@@ -938,8 +1002,16 @@ void print_events(void)
|
||||
for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
|
||||
type = syms->type;
|
||||
|
||||
if (type != prev_type)
|
||||
if (type != prev_type && printed) {
|
||||
printf("\n");
|
||||
printed = 0;
|
||||
ntypes_printed++;
|
||||
}
|
||||
|
||||
if (event_glob != NULL &&
|
||||
!(strglobmatch(syms->symbol, event_glob) ||
|
||||
(syms->alias && strglobmatch(syms->alias, event_glob))))
|
||||
continue;
|
||||
|
||||
if (strlen(syms->alias))
|
||||
sprintf(name, "%s OR %s", syms->symbol, syms->alias);
|
||||
@@ -949,22 +1021,17 @@ void print_events(void)
|
||||
event_type_descriptors[type]);
|
||||
|
||||
prev_type = type;
|
||||
++printed;
|
||||
}
|
||||
|
||||
printf("\n");
|
||||
for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
|
||||
for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
|
||||
/* skip invalid cache type */
|
||||
if (!is_cache_op_valid(type, op))
|
||||
continue;
|
||||
|
||||
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
|
||||
printf(" %-42s [%s]\n",
|
||||
event_cache_name(type, op, i),
|
||||
event_type_descriptors[PERF_TYPE_HW_CACHE]);
|
||||
}
|
||||
}
|
||||
if (ntypes_printed) {
|
||||
printed = 0;
|
||||
printf("\n");
|
||||
}
|
||||
print_hwcache_events(event_glob);
|
||||
|
||||
if (event_glob != NULL)
|
||||
return;
|
||||
|
||||
printf("\n");
|
||||
printf(" %-42s [%s]\n",
|
||||
@@ -977,37 +1044,7 @@ void print_events(void)
|
||||
event_type_descriptors[PERF_TYPE_BREAKPOINT]);
|
||||
printf("\n");
|
||||
|
||||
print_tracepoint_events();
|
||||
print_tracepoint_events(NULL, NULL);
|
||||
|
||||
exit(129);
|
||||
}
|
||||
|
||||
int perf_evsel_list__create_default(void)
|
||||
{
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_event_attr attr;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
attr.type = PERF_TYPE_HARDWARE;
|
||||
attr.config = PERF_COUNT_HW_CPU_CYCLES;
|
||||
|
||||
evsel = perf_evsel__new(&attr, 0);
|
||||
|
||||
if (evsel == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
list_add(&evsel->node, &evsel_list);
|
||||
++nr_counters;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void perf_evsel_list__delete(void)
|
||||
{
|
||||
struct perf_evsel *pos, *n;
|
||||
|
||||
list_for_each_entry_safe(pos, n, &evsel_list, node) {
|
||||
list_del_init(&pos->node);
|
||||
perf_evsel__delete(pos);
|
||||
}
|
||||
nr_counters = 0;
|
||||
}
|
||||
|
||||
@@ -9,11 +9,6 @@
|
||||
struct list_head;
|
||||
struct perf_evsel;
|
||||
|
||||
extern struct list_head evsel_list;
|
||||
|
||||
int perf_evsel_list__create_default(void);
|
||||
void perf_evsel_list__delete(void);
|
||||
|
||||
struct option;
|
||||
|
||||
struct tracepoint_path {
|
||||
@@ -25,8 +20,6 @@ struct tracepoint_path {
|
||||
extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
|
||||
extern bool have_tracepoints(struct list_head *evlist);
|
||||
|
||||
extern int nr_counters;
|
||||
|
||||
const char *event_name(struct perf_evsel *event);
|
||||
extern const char *__event_name(int type, u64 config);
|
||||
|
||||
@@ -35,7 +28,10 @@ extern int parse_filter(const struct option *opt, const char *str, int unset);
|
||||
|
||||
#define EVENTS_HELP_MAX (128*1024)
|
||||
|
||||
extern void print_events(void);
|
||||
void print_events(const char *event_glob);
|
||||
void print_events_type(u8 type);
|
||||
void print_tracepoint_events(const char *subsys_glob, const char *event_glob);
|
||||
int print_hwcache_events(const char *event_glob);
|
||||
extern int is_valid_tracepoint(const char *event_string);
|
||||
|
||||
extern char debugfs_path[];
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#include <string.h>
|
||||
#include <stdarg.h>
|
||||
#include <limits.h>
|
||||
#include <elf.h>
|
||||
|
||||
#undef _GNU_SOURCE
|
||||
#include "util.h"
|
||||
@@ -111,7 +112,25 @@ static struct symbol *__find_kernel_function_by_name(const char *name,
|
||||
NULL);
|
||||
}
|
||||
|
||||
const char *kernel_get_module_path(const char *module)
|
||||
static struct map *kernel_get_module_map(const char *module)
|
||||
{
|
||||
struct rb_node *nd;
|
||||
struct map_groups *grp = &machine.kmaps;
|
||||
|
||||
if (!module)
|
||||
module = "kernel";
|
||||
|
||||
for (nd = rb_first(&grp->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) {
|
||||
struct map *pos = rb_entry(nd, struct map, rb_node);
|
||||
if (strncmp(pos->dso->short_name + 1, module,
|
||||
pos->dso->short_name_len - 2) == 0) {
|
||||
return pos;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dso *kernel_get_module_dso(const char *module)
|
||||
{
|
||||
struct dso *dso;
|
||||
struct map *map;
|
||||
@@ -141,7 +160,13 @@ const char *kernel_get_module_path(const char *module)
|
||||
}
|
||||
}
|
||||
found:
|
||||
return dso->long_name;
|
||||
return dso;
|
||||
}
|
||||
|
||||
const char *kernel_get_module_path(const char *module)
|
||||
{
|
||||
struct dso *dso = kernel_get_module_dso(module);
|
||||
return (dso) ? dso->long_name : NULL;
|
||||
}
|
||||
|
||||
#ifdef DWARF_SUPPORT
|
||||
@@ -384,7 +409,7 @@ int show_line_range(struct line_range *lr, const char *module)
|
||||
setup_pager();
|
||||
|
||||
if (lr->function)
|
||||
fprintf(stdout, "<%s:%d>\n", lr->function,
|
||||
fprintf(stdout, "<%s@%s:%d>\n", lr->function, lr->path,
|
||||
lr->start - lr->offset);
|
||||
else
|
||||
fprintf(stdout, "<%s:%d>\n", lr->path, lr->start);
|
||||
@@ -426,12 +451,14 @@ end:
|
||||
}
|
||||
|
||||
static int show_available_vars_at(int fd, struct perf_probe_event *pev,
|
||||
int max_vls, bool externs)
|
||||
int max_vls, struct strfilter *_filter,
|
||||
bool externs)
|
||||
{
|
||||
char *buf;
|
||||
int ret, i;
|
||||
int ret, i, nvars;
|
||||
struct str_node *node;
|
||||
struct variable_list *vls = NULL, *vl;
|
||||
const char *var;
|
||||
|
||||
buf = synthesize_perf_probe_point(&pev->point);
|
||||
if (!buf)
|
||||
@@ -439,36 +466,45 @@ static int show_available_vars_at(int fd, struct perf_probe_event *pev,
|
||||
pr_debug("Searching variables at %s\n", buf);
|
||||
|
||||
ret = find_available_vars_at(fd, pev, &vls, max_vls, externs);
|
||||
if (ret > 0) {
|
||||
/* Some variables were found */
|
||||
fprintf(stdout, "Available variables at %s\n", buf);
|
||||
for (i = 0; i < ret; i++) {
|
||||
vl = &vls[i];
|
||||
/*
|
||||
* A probe point might be converted to
|
||||
* several trace points.
|
||||
*/
|
||||
fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
|
||||
vl->point.offset);
|
||||
free(vl->point.symbol);
|
||||
if (vl->vars) {
|
||||
strlist__for_each(node, vl->vars)
|
||||
fprintf(stdout, "\t\t%s\n", node->s);
|
||||
strlist__delete(vl->vars);
|
||||
} else
|
||||
fprintf(stdout, "(No variables)\n");
|
||||
}
|
||||
free(vls);
|
||||
} else
|
||||
if (ret <= 0) {
|
||||
pr_err("Failed to find variables at %s (%d)\n", buf, ret);
|
||||
|
||||
goto end;
|
||||
}
|
||||
/* Some variables are found */
|
||||
fprintf(stdout, "Available variables at %s\n", buf);
|
||||
for (i = 0; i < ret; i++) {
|
||||
vl = &vls[i];
|
||||
/*
|
||||
* A probe point might be converted to
|
||||
* several trace points.
|
||||
*/
|
||||
fprintf(stdout, "\t@<%s+%lu>\n", vl->point.symbol,
|
||||
vl->point.offset);
|
||||
free(vl->point.symbol);
|
||||
nvars = 0;
|
||||
if (vl->vars) {
|
||||
strlist__for_each(node, vl->vars) {
|
||||
var = strchr(node->s, '\t') + 1;
|
||||
if (strfilter__compare(_filter, var)) {
|
||||
fprintf(stdout, "\t\t%s\n", node->s);
|
||||
nvars++;
|
||||
}
|
||||
}
|
||||
strlist__delete(vl->vars);
|
||||
}
|
||||
if (nvars == 0)
|
||||
fprintf(stdout, "\t\t(No matched variables)\n");
|
||||
}
|
||||
free(vls);
|
||||
end:
|
||||
free(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Show available variables on given probe point */
|
||||
int show_available_vars(struct perf_probe_event *pevs, int npevs,
|
||||
int max_vls, const char *module, bool externs)
|
||||
int max_vls, const char *module,
|
||||
struct strfilter *_filter, bool externs)
|
||||
{
|
||||
int i, fd, ret = 0;
|
||||
|
||||
@@ -485,7 +521,8 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
|
||||
setup_pager();
|
||||
|
||||
for (i = 0; i < npevs && ret >= 0; i++)
|
||||
ret = show_available_vars_at(fd, &pevs[i], max_vls, externs);
|
||||
ret = show_available_vars_at(fd, &pevs[i], max_vls, _filter,
|
||||
externs);
|
||||
|
||||
close(fd);
|
||||
return ret;
|
||||
@@ -531,7 +568,9 @@ int show_line_range(struct line_range *lr __unused, const char *module __unused)
|
||||
|
||||
int show_available_vars(struct perf_probe_event *pevs __unused,
|
||||
int npevs __unused, int max_vls __unused,
|
||||
const char *module __unused, bool externs __unused)
|
||||
const char *module __unused,
|
||||
struct strfilter *filter __unused,
|
||||
bool externs __unused)
|
||||
{
|
||||
pr_warning("Debuginfo-analysis is not supported.\n");
|
||||
return -ENOSYS;
|
||||
@@ -556,11 +595,11 @@ static int parse_line_num(char **ptr, int *val, const char *what)
|
||||
* The line range syntax is described by:
|
||||
*
|
||||
* SRC[:SLN[+NUM|-ELN]]
|
||||
* FNC[:SLN[+NUM|-ELN]]
|
||||
* FNC[@SRC][:SLN[+NUM|-ELN]]
|
||||
*/
|
||||
int parse_line_range_desc(const char *arg, struct line_range *lr)
|
||||
{
|
||||
char *range, *name = strdup(arg);
|
||||
char *range, *file, *name = strdup(arg);
|
||||
int err;
|
||||
|
||||
if (!name)
|
||||
@@ -610,7 +649,16 @@ int parse_line_range_desc(const char *arg, struct line_range *lr)
|
||||
}
|
||||
}
|
||||
|
||||
if (strchr(name, '.'))
|
||||
file = strchr(name, '@');
|
||||
if (file) {
|
||||
*file = '\0';
|
||||
lr->file = strdup(++file);
|
||||
if (lr->file == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
lr->function = name;
|
||||
} else if (strchr(name, '.'))
|
||||
lr->file = name;
|
||||
else
|
||||
lr->function = name;
|
||||
@@ -1784,9 +1832,12 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
|
||||
}
|
||||
|
||||
/* Loop 2: add all events */
|
||||
for (i = 0; i < npevs && ret >= 0; i++)
|
||||
for (i = 0; i < npevs; i++) {
|
||||
ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs,
|
||||
pkgs[i].ntevs, force_add);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
end:
|
||||
/* Loop 3: cleanup and free trace events */
|
||||
for (i = 0; i < npevs; i++) {
|
||||
@@ -1912,4 +1963,46 @@ int del_perf_probe_events(struct strlist *dellist)
|
||||
|
||||
return ret;
|
||||
}
|
||||
/* TODO: don't use a global variable for filter ... */
|
||||
static struct strfilter *available_func_filter;
|
||||
|
||||
/*
|
||||
* If a symbol corresponds to a function with global binding and
|
||||
* matches filter return 0. For all others return 1.
|
||||
*/
|
||||
static int filter_available_functions(struct map *map __unused,
|
||||
struct symbol *sym)
|
||||
{
|
||||
if (sym->binding == STB_GLOBAL &&
|
||||
strfilter__compare(available_func_filter, sym->name))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int show_available_funcs(const char *module, struct strfilter *_filter)
|
||||
{
|
||||
struct map *map;
|
||||
int ret;
|
||||
|
||||
setup_pager();
|
||||
|
||||
ret = init_vmlinux();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
map = kernel_get_module_map(module);
|
||||
if (!map) {
|
||||
pr_err("Failed to find %s map.\n", (module) ? : "kernel");
|
||||
return -EINVAL;
|
||||
}
|
||||
available_func_filter = _filter;
|
||||
if (map__load(map, filter_available_functions)) {
|
||||
pr_err("Failed to load map.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!dso__sorted_by_name(map->dso, map->type))
|
||||
dso__sort_by_name(map->dso, map->type);
|
||||
|
||||
dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include <stdbool.h>
|
||||
#include "strlist.h"
|
||||
#include "strfilter.h"
|
||||
|
||||
extern bool probe_event_dry_run;
|
||||
|
||||
@@ -126,7 +127,8 @@ extern int show_perf_probe_events(void);
|
||||
extern int show_line_range(struct line_range *lr, const char *module);
|
||||
extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
|
||||
int max_probe_points, const char *module,
|
||||
bool externs);
|
||||
struct strfilter *filter, bool externs);
|
||||
extern int show_available_funcs(const char *module, struct strfilter *filter);
|
||||
|
||||
|
||||
/* Maximum index number of event-name postfix */
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include <ctype.h>
|
||||
#include <dwarf-regs.h>
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include "event.h"
|
||||
#include "debug.h"
|
||||
#include "util.h"
|
||||
@@ -280,6 +281,19 @@ static bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
|
||||
return name ? (strcmp(tname, name) == 0) : false;
|
||||
}
|
||||
|
||||
/* Get callsite line number of inline-function instance */
|
||||
static int die_get_call_lineno(Dwarf_Die *in_die)
|
||||
{
|
||||
Dwarf_Attribute attr;
|
||||
Dwarf_Word ret;
|
||||
|
||||
if (!dwarf_attr(in_die, DW_AT_call_line, &attr))
|
||||
return -ENOENT;
|
||||
|
||||
dwarf_formudata(&attr, &ret);
|
||||
return (int)ret;
|
||||
}
|
||||
|
||||
/* Get type die */
|
||||
static Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
|
||||
{
|
||||
@@ -320,13 +334,23 @@ static Dwarf_Die *die_get_real_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem)
|
||||
return vr_die;
|
||||
}
|
||||
|
||||
static bool die_is_signed_type(Dwarf_Die *tp_die)
|
||||
static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name,
|
||||
Dwarf_Word *result)
|
||||
{
|
||||
Dwarf_Attribute attr;
|
||||
|
||||
if (dwarf_attr(tp_die, attr_name, &attr) == NULL ||
|
||||
dwarf_formudata(&attr, result) != 0)
|
||||
return -ENOENT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool die_is_signed_type(Dwarf_Die *tp_die)
|
||||
{
|
||||
Dwarf_Word ret;
|
||||
|
||||
if (dwarf_attr(tp_die, DW_AT_encoding, &attr) == NULL ||
|
||||
dwarf_formudata(&attr, &ret) != 0)
|
||||
if (die_get_attr_udata(tp_die, DW_AT_encoding, &ret))
|
||||
return false;
|
||||
|
||||
return (ret == DW_ATE_signed_char || ret == DW_ATE_signed ||
|
||||
@@ -335,11 +359,29 @@ static bool die_is_signed_type(Dwarf_Die *tp_die)
|
||||
|
||||
static int die_get_byte_size(Dwarf_Die *tp_die)
|
||||
{
|
||||
Dwarf_Attribute attr;
|
||||
Dwarf_Word ret;
|
||||
|
||||
if (dwarf_attr(tp_die, DW_AT_byte_size, &attr) == NULL ||
|
||||
dwarf_formudata(&attr, &ret) != 0)
|
||||
if (die_get_attr_udata(tp_die, DW_AT_byte_size, &ret))
|
||||
return 0;
|
||||
|
||||
return (int)ret;
|
||||
}
|
||||
|
||||
static int die_get_bit_size(Dwarf_Die *tp_die)
|
||||
{
|
||||
Dwarf_Word ret;
|
||||
|
||||
if (die_get_attr_udata(tp_die, DW_AT_bit_size, &ret))
|
||||
return 0;
|
||||
|
||||
return (int)ret;
|
||||
}
|
||||
|
||||
static int die_get_bit_offset(Dwarf_Die *tp_die)
|
||||
{
|
||||
Dwarf_Word ret;
|
||||
|
||||
if (die_get_attr_udata(tp_die, DW_AT_bit_offset, &ret))
|
||||
return 0;
|
||||
|
||||
return (int)ret;
|
||||
@@ -458,6 +500,151 @@ static Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
|
||||
return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
|
||||
}
|
||||
|
||||
/* Walker on lines (Note: line number will not be sorted) */
|
||||
typedef int (* line_walk_handler_t) (const char *fname, int lineno,
|
||||
Dwarf_Addr addr, void *data);
|
||||
|
||||
struct __line_walk_param {
|
||||
const char *fname;
|
||||
line_walk_handler_t handler;
|
||||
void *data;
|
||||
int retval;
|
||||
};
|
||||
|
||||
static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data)
|
||||
{
|
||||
struct __line_walk_param *lw = data;
|
||||
Dwarf_Addr addr;
|
||||
int lineno;
|
||||
|
||||
if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) {
|
||||
lineno = die_get_call_lineno(in_die);
|
||||
if (lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) {
|
||||
lw->retval = lw->handler(lw->fname, lineno, addr,
|
||||
lw->data);
|
||||
if (lw->retval != 0)
|
||||
return DIE_FIND_CB_FOUND;
|
||||
}
|
||||
}
|
||||
return DIE_FIND_CB_SIBLING;
|
||||
}
|
||||
|
||||
/* Walk on lines of blocks included in given DIE */
|
||||
static int __die_walk_funclines(Dwarf_Die *sp_die,
|
||||
line_walk_handler_t handler, void *data)
|
||||
{
|
||||
struct __line_walk_param lw = {
|
||||
.handler = handler,
|
||||
.data = data,
|
||||
.retval = 0,
|
||||
};
|
||||
Dwarf_Die die_mem;
|
||||
Dwarf_Addr addr;
|
||||
int lineno;
|
||||
|
||||
/* Handle function declaration line */
|
||||
lw.fname = dwarf_decl_file(sp_die);
|
||||
if (lw.fname && dwarf_decl_line(sp_die, &lineno) == 0 &&
|
||||
dwarf_entrypc(sp_die, &addr) == 0) {
|
||||
lw.retval = handler(lw.fname, lineno, addr, data);
|
||||
if (lw.retval != 0)
|
||||
goto done;
|
||||
}
|
||||
die_find_child(sp_die, __die_walk_funclines_cb, &lw, &die_mem);
|
||||
done:
|
||||
return lw.retval;
|
||||
}
|
||||
|
||||
static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data)
|
||||
{
|
||||
struct __line_walk_param *lw = data;
|
||||
|
||||
lw->retval = __die_walk_funclines(sp_die, lw->handler, lw->data);
|
||||
if (lw->retval != 0)
|
||||
return DWARF_CB_ABORT;
|
||||
|
||||
return DWARF_CB_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk on lines inside given PDIE. If the PDIE is subprogram, walk only on
|
||||
* the lines inside the subprogram, otherwise PDIE must be a CU DIE.
|
||||
*/
|
||||
static int die_walk_lines(Dwarf_Die *pdie, line_walk_handler_t handler,
|
||||
void *data)
|
||||
{
|
||||
Dwarf_Lines *lines;
|
||||
Dwarf_Line *line;
|
||||
Dwarf_Addr addr;
|
||||
const char *fname;
|
||||
int lineno, ret = 0;
|
||||
Dwarf_Die die_mem, *cu_die;
|
||||
size_t nlines, i;
|
||||
|
||||
/* Get the CU die */
|
||||
if (dwarf_tag(pdie) == DW_TAG_subprogram)
|
||||
cu_die = dwarf_diecu(pdie, &die_mem, NULL, NULL);
|
||||
else
|
||||
cu_die = pdie;
|
||||
if (!cu_die) {
|
||||
pr_debug2("Failed to get CU from subprogram\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Get lines list in the CU */
|
||||
if (dwarf_getsrclines(cu_die, &lines, &nlines) != 0) {
|
||||
pr_debug2("Failed to get source lines on this CU.\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
pr_debug2("Get %zd lines from this CU\n", nlines);
|
||||
|
||||
/* Walk on the lines on lines list */
|
||||
for (i = 0; i < nlines; i++) {
|
||||
line = dwarf_onesrcline(lines, i);
|
||||
if (line == NULL ||
|
||||
dwarf_lineno(line, &lineno) != 0 ||
|
||||
dwarf_lineaddr(line, &addr) != 0) {
|
||||
pr_debug2("Failed to get line info. "
|
||||
"Possible error in debuginfo.\n");
|
||||
continue;
|
||||
}
|
||||
/* Filter lines based on address */
|
||||
if (pdie != cu_die)
|
||||
/*
|
||||
* Address filtering
|
||||
* The line is included in given function, and
|
||||
* no inline block includes it.
|
||||
*/
|
||||
if (!dwarf_haspc(pdie, addr) ||
|
||||
die_find_inlinefunc(pdie, addr, &die_mem))
|
||||
continue;
|
||||
/* Get source line */
|
||||
fname = dwarf_linesrc(line, NULL, NULL);
|
||||
|
||||
ret = handler(fname, lineno, addr, data);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dwarf lines doesn't include function declarations and inlined
|
||||
* subroutines. We have to check functions list or given function.
|
||||
*/
|
||||
if (pdie != cu_die)
|
||||
ret = __die_walk_funclines(pdie, handler, data);
|
||||
else {
|
||||
struct __line_walk_param param = {
|
||||
.handler = handler,
|
||||
.data = data,
|
||||
.retval = 0,
|
||||
};
|
||||
dwarf_getfuncs(cu_die, __die_walk_culines_cb, ¶m, 0);
|
||||
ret = param.retval;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct __find_variable_param {
|
||||
const char *name;
|
||||
Dwarf_Addr addr;
|
||||
@@ -669,6 +856,8 @@ static_var:
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long))
|
||||
|
||||
static int convert_variable_type(Dwarf_Die *vr_die,
|
||||
struct probe_trace_arg *tvar,
|
||||
const char *cast)
|
||||
@@ -685,6 +874,14 @@ static int convert_variable_type(Dwarf_Die *vr_die,
|
||||
return (tvar->type == NULL) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
if (die_get_bit_size(vr_die) != 0) {
|
||||
/* This is a bitfield */
|
||||
ret = snprintf(buf, 16, "b%d@%d/%zd", die_get_bit_size(vr_die),
|
||||
die_get_bit_offset(vr_die),
|
||||
BYTES_TO_BITS(die_get_byte_size(vr_die)));
|
||||
goto formatted;
|
||||
}
|
||||
|
||||
if (die_get_real_type(vr_die, &type) == NULL) {
|
||||
pr_warning("Failed to get a type information of %s.\n",
|
||||
dwarf_diename(vr_die));
|
||||
@@ -729,29 +926,31 @@ static int convert_variable_type(Dwarf_Die *vr_die,
|
||||
return (tvar->type == NULL) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
ret = die_get_byte_size(&type) * 8;
|
||||
if (ret) {
|
||||
/* Check the bitwidth */
|
||||
if (ret > MAX_BASIC_TYPE_BITS) {
|
||||
pr_info("%s exceeds max-bitwidth."
|
||||
" Cut down to %d bits.\n",
|
||||
dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
|
||||
ret = MAX_BASIC_TYPE_BITS;
|
||||
}
|
||||
ret = BYTES_TO_BITS(die_get_byte_size(&type));
|
||||
if (!ret)
|
||||
/* No size ... try to use default type */
|
||||
return 0;
|
||||
|
||||
ret = snprintf(buf, 16, "%c%d",
|
||||
die_is_signed_type(&type) ? 's' : 'u', ret);
|
||||
if (ret < 0 || ret >= 16) {
|
||||
if (ret >= 16)
|
||||
ret = -E2BIG;
|
||||
pr_warning("Failed to convert variable type: %s\n",
|
||||
strerror(-ret));
|
||||
return ret;
|
||||
}
|
||||
tvar->type = strdup(buf);
|
||||
if (tvar->type == NULL)
|
||||
return -ENOMEM;
|
||||
/* Check the bitwidth */
|
||||
if (ret > MAX_BASIC_TYPE_BITS) {
|
||||
pr_info("%s exceeds max-bitwidth. Cut down to %d bits.\n",
|
||||
dwarf_diename(&type), MAX_BASIC_TYPE_BITS);
|
||||
ret = MAX_BASIC_TYPE_BITS;
|
||||
}
|
||||
ret = snprintf(buf, 16, "%c%d",
|
||||
die_is_signed_type(&type) ? 's' : 'u', ret);
|
||||
|
||||
formatted:
|
||||
if (ret < 0 || ret >= 16) {
|
||||
if (ret >= 16)
|
||||
ret = -E2BIG;
|
||||
pr_warning("Failed to convert variable type: %s\n",
|
||||
strerror(-ret));
|
||||
return ret;
|
||||
}
|
||||
tvar->type = strdup(buf);
|
||||
if (tvar->type == NULL)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1050,157 +1249,102 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int probe_point_line_walker(const char *fname, int lineno,
|
||||
Dwarf_Addr addr, void *data)
|
||||
{
|
||||
struct probe_finder *pf = data;
|
||||
int ret;
|
||||
|
||||
if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0)
|
||||
return 0;
|
||||
|
||||
pf->addr = addr;
|
||||
ret = call_probe_finder(NULL, pf);
|
||||
|
||||
/* Continue if no error, because the line will be in inline function */
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
/* Find probe point from its line number */
|
||||
static int find_probe_point_by_line(struct probe_finder *pf)
|
||||
{
|
||||
Dwarf_Lines *lines;
|
||||
Dwarf_Line *line;
|
||||
size_t nlines, i;
|
||||
Dwarf_Addr addr;
|
||||
int lineno;
|
||||
int ret = 0;
|
||||
|
||||
if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
|
||||
pr_warning("No source lines found.\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
for (i = 0; i < nlines && ret == 0; i++) {
|
||||
line = dwarf_onesrcline(lines, i);
|
||||
if (dwarf_lineno(line, &lineno) != 0 ||
|
||||
lineno != pf->lno)
|
||||
continue;
|
||||
|
||||
/* TODO: Get fileno from line, but how? */
|
||||
if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
|
||||
continue;
|
||||
|
||||
if (dwarf_lineaddr(line, &addr) != 0) {
|
||||
pr_warning("Failed to get the address of the line.\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
pr_debug("Probe line found: line[%d]:%d addr:0x%jx\n",
|
||||
(int)i, lineno, (uintmax_t)addr);
|
||||
pf->addr = addr;
|
||||
|
||||
ret = call_probe_finder(NULL, pf);
|
||||
/* Continuing, because target line might be inlined. */
|
||||
}
|
||||
return ret;
|
||||
return die_walk_lines(&pf->cu_die, probe_point_line_walker, pf);
|
||||
}
|
||||
|
||||
/* Find lines which match lazy pattern */
|
||||
static int find_lazy_match_lines(struct list_head *head,
|
||||
const char *fname, const char *pat)
|
||||
{
|
||||
char *fbuf, *p1, *p2;
|
||||
int fd, line, nlines = -1;
|
||||
struct stat st;
|
||||
FILE *fp;
|
||||
char *line = NULL;
|
||||
size_t line_len;
|
||||
ssize_t len;
|
||||
int count = 0, linenum = 1;
|
||||
|
||||
fd = open(fname, O_RDONLY);
|
||||
if (fd < 0) {
|
||||
pr_warning("Failed to open %s: %s\n", fname, strerror(-fd));
|
||||
fp = fopen(fname, "r");
|
||||
if (!fp) {
|
||||
pr_warning("Failed to open %s: %s\n", fname, strerror(errno));
|
||||
return -errno;
|
||||
}
|
||||
|
||||
if (fstat(fd, &st) < 0) {
|
||||
pr_warning("Failed to get the size of %s: %s\n",
|
||||
fname, strerror(errno));
|
||||
nlines = -errno;
|
||||
goto out_close;
|
||||
while ((len = getline(&line, &line_len, fp)) > 0) {
|
||||
|
||||
if (line[len - 1] == '\n')
|
||||
line[len - 1] = '\0';
|
||||
|
||||
if (strlazymatch(line, pat)) {
|
||||
line_list__add_line(head, linenum);
|
||||
count++;
|
||||
}
|
||||
linenum++;
|
||||
}
|
||||
|
||||
nlines = -ENOMEM;
|
||||
fbuf = malloc(st.st_size + 2);
|
||||
if (fbuf == NULL)
|
||||
goto out_close;
|
||||
if (read(fd, fbuf, st.st_size) < 0) {
|
||||
pr_warning("Failed to read %s: %s\n", fname, strerror(errno));
|
||||
nlines = -errno;
|
||||
goto out_free_fbuf;
|
||||
}
|
||||
fbuf[st.st_size] = '\n'; /* Dummy line */
|
||||
fbuf[st.st_size + 1] = '\0';
|
||||
p1 = fbuf;
|
||||
line = 1;
|
||||
nlines = 0;
|
||||
while ((p2 = strchr(p1, '\n')) != NULL) {
|
||||
*p2 = '\0';
|
||||
if (strlazymatch(p1, pat)) {
|
||||
line_list__add_line(head, line);
|
||||
nlines++;
|
||||
}
|
||||
line++;
|
||||
p1 = p2 + 1;
|
||||
}
|
||||
out_free_fbuf:
|
||||
free(fbuf);
|
||||
out_close:
|
||||
close(fd);
|
||||
return nlines;
|
||||
if (ferror(fp))
|
||||
count = -errno;
|
||||
free(line);
|
||||
fclose(fp);
|
||||
|
||||
if (count == 0)
|
||||
pr_debug("No matched lines found in %s.\n", fname);
|
||||
return count;
|
||||
}
|
||||
|
||||
static int probe_point_lazy_walker(const char *fname, int lineno,
|
||||
Dwarf_Addr addr, void *data)
|
||||
{
|
||||
struct probe_finder *pf = data;
|
||||
int ret;
|
||||
|
||||
if (!line_list__has_line(&pf->lcache, lineno) ||
|
||||
strtailcmp(fname, pf->fname) != 0)
|
||||
return 0;
|
||||
|
||||
pr_debug("Probe line found: line:%d addr:0x%llx\n",
|
||||
lineno, (unsigned long long)addr);
|
||||
pf->addr = addr;
|
||||
ret = call_probe_finder(NULL, pf);
|
||||
|
||||
/*
|
||||
* Continue if no error, because the lazy pattern will match
|
||||
* to other lines
|
||||
*/
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
/* Find probe points from lazy pattern */
|
||||
static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
|
||||
{
|
||||
Dwarf_Lines *lines;
|
||||
Dwarf_Line *line;
|
||||
size_t nlines, i;
|
||||
Dwarf_Addr addr;
|
||||
Dwarf_Die die_mem;
|
||||
int lineno;
|
||||
int ret = 0;
|
||||
|
||||
if (list_empty(&pf->lcache)) {
|
||||
/* Matching lazy line pattern */
|
||||
ret = find_lazy_match_lines(&pf->lcache, pf->fname,
|
||||
pf->pev->point.lazy_line);
|
||||
if (ret == 0) {
|
||||
pr_debug("No matched lines found in %s.\n", pf->fname);
|
||||
return 0;
|
||||
} else if (ret < 0)
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (dwarf_getsrclines(&pf->cu_die, &lines, &nlines) != 0) {
|
||||
pr_warning("No source lines found.\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
for (i = 0; i < nlines && ret >= 0; i++) {
|
||||
line = dwarf_onesrcline(lines, i);
|
||||
|
||||
if (dwarf_lineno(line, &lineno) != 0 ||
|
||||
!line_list__has_line(&pf->lcache, lineno))
|
||||
continue;
|
||||
|
||||
/* TODO: Get fileno from line, but how? */
|
||||
if (strtailcmp(dwarf_linesrc(line, NULL, NULL), pf->fname) != 0)
|
||||
continue;
|
||||
|
||||
if (dwarf_lineaddr(line, &addr) != 0) {
|
||||
pr_debug("Failed to get the address of line %d.\n",
|
||||
lineno);
|
||||
continue;
|
||||
}
|
||||
if (sp_die) {
|
||||
/* Address filtering 1: does sp_die include addr? */
|
||||
if (!dwarf_haspc(sp_die, addr))
|
||||
continue;
|
||||
/* Address filtering 2: No child include addr? */
|
||||
if (die_find_inlinefunc(sp_die, addr, &die_mem))
|
||||
continue;
|
||||
}
|
||||
|
||||
pr_debug("Probe line found: line[%d]:%d addr:0x%llx\n",
|
||||
(int)i, lineno, (unsigned long long)addr);
|
||||
pf->addr = addr;
|
||||
|
||||
ret = call_probe_finder(sp_die, pf);
|
||||
/* Continuing, because target line might be inlined. */
|
||||
}
|
||||
/* TODO: deallocate lines, but how? */
|
||||
return ret;
|
||||
return die_walk_lines(sp_die, probe_point_lazy_walker, pf);
|
||||
}
|
||||
|
||||
/* Callback parameter with return value */
|
||||
@@ -1318,8 +1462,7 @@ static int find_probes(int fd, struct probe_finder *pf)
|
||||
off = 0;
|
||||
line_list__init(&pf->lcache);
|
||||
/* Loop on CUs (Compilation Unit) */
|
||||
while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL) &&
|
||||
ret >= 0) {
|
||||
while (!dwarf_nextcu(dbg, off, &noff, &cuhl, NULL, NULL, NULL)) {
|
||||
/* Get the DIE(Debugging Information Entry) of this CU */
|
||||
diep = dwarf_offdie(dbg, off + cuhl, &pf->cu_die);
|
||||
if (!diep)
|
||||
@@ -1340,6 +1483,8 @@ static int find_probes(int fd, struct probe_finder *pf)
|
||||
pf->lno = pp->line;
|
||||
ret = find_probe_point_by_line(pf);
|
||||
}
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
off = noff;
|
||||
}
|
||||
@@ -1644,91 +1789,28 @@ static int line_range_add_line(const char *src, unsigned int lineno,
|
||||
return line_list__add_line(&lr->line_list, lineno);
|
||||
}
|
||||
|
||||
/* Search function declaration lines */
|
||||
static int line_range_funcdecl_cb(Dwarf_Die *sp_die, void *data)
|
||||
static int line_range_walk_cb(const char *fname, int lineno,
|
||||
Dwarf_Addr addr __used,
|
||||
void *data)
|
||||
{
|
||||
struct dwarf_callback_param *param = data;
|
||||
struct line_finder *lf = param->data;
|
||||
const char *src;
|
||||
int lineno;
|
||||
struct line_finder *lf = data;
|
||||
|
||||
src = dwarf_decl_file(sp_die);
|
||||
if (src && strtailcmp(src, lf->fname) != 0)
|
||||
return DWARF_CB_OK;
|
||||
|
||||
if (dwarf_decl_line(sp_die, &lineno) != 0 ||
|
||||
if ((strtailcmp(fname, lf->fname) != 0) ||
|
||||
(lf->lno_s > lineno || lf->lno_e < lineno))
|
||||
return DWARF_CB_OK;
|
||||
return 0;
|
||||
|
||||
param->retval = line_range_add_line(src, lineno, lf->lr);
|
||||
if (param->retval < 0)
|
||||
return DWARF_CB_ABORT;
|
||||
return DWARF_CB_OK;
|
||||
}
|
||||
if (line_range_add_line(fname, lineno, lf->lr) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
static int find_line_range_func_decl_lines(struct line_finder *lf)
|
||||
{
|
||||
struct dwarf_callback_param param = {.data = (void *)lf, .retval = 0};
|
||||
dwarf_getfuncs(&lf->cu_die, line_range_funcdecl_cb, ¶m, 0);
|
||||
return param.retval;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Find line range from its line number */
|
||||
static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
|
||||
{
|
||||
Dwarf_Lines *lines;
|
||||
Dwarf_Line *line;
|
||||
size_t nlines, i;
|
||||
Dwarf_Addr addr;
|
||||
int lineno, ret = 0;
|
||||
const char *src;
|
||||
Dwarf_Die die_mem;
|
||||
int ret;
|
||||
|
||||
line_list__init(&lf->lr->line_list);
|
||||
if (dwarf_getsrclines(&lf->cu_die, &lines, &nlines) != 0) {
|
||||
pr_warning("No source lines found.\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Search probable lines on lines list */
|
||||
for (i = 0; i < nlines; i++) {
|
||||
line = dwarf_onesrcline(lines, i);
|
||||
if (dwarf_lineno(line, &lineno) != 0 ||
|
||||
(lf->lno_s > lineno || lf->lno_e < lineno))
|
||||
continue;
|
||||
|
||||
if (sp_die) {
|
||||
/* Address filtering 1: does sp_die include addr? */
|
||||
if (dwarf_lineaddr(line, &addr) != 0 ||
|
||||
!dwarf_haspc(sp_die, addr))
|
||||
continue;
|
||||
|
||||
/* Address filtering 2: No child include addr? */
|
||||
if (die_find_inlinefunc(sp_die, addr, &die_mem))
|
||||
continue;
|
||||
}
|
||||
|
||||
/* TODO: Get fileno from line, but how? */
|
||||
src = dwarf_linesrc(line, NULL, NULL);
|
||||
if (strtailcmp(src, lf->fname) != 0)
|
||||
continue;
|
||||
|
||||
ret = line_range_add_line(src, lineno, lf->lr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dwarf lines doesn't include function declarations. We have to
|
||||
* check functions list or given function.
|
||||
*/
|
||||
if (sp_die) {
|
||||
src = dwarf_decl_file(sp_die);
|
||||
if (src && dwarf_decl_line(sp_die, &lineno) == 0 &&
|
||||
(lf->lno_s <= lineno && lf->lno_e >= lineno))
|
||||
ret = line_range_add_line(src, lineno, lf->lr);
|
||||
} else
|
||||
ret = find_line_range_func_decl_lines(lf);
|
||||
ret = die_walk_lines(sp_die ?: &lf->cu_die, line_range_walk_cb, lf);
|
||||
|
||||
/* Update status */
|
||||
if (ret >= 0)
|
||||
@@ -1758,9 +1840,6 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
|
||||
struct line_finder *lf = param->data;
|
||||
struct line_range *lr = lf->lr;
|
||||
|
||||
pr_debug("find (%llx) %s\n",
|
||||
(unsigned long long)dwarf_dieoffset(sp_die),
|
||||
dwarf_diename(sp_die));
|
||||
if (dwarf_tag(sp_die) == DW_TAG_subprogram &&
|
||||
die_compare_name(sp_die, lr->function)) {
|
||||
lf->fname = dwarf_decl_file(sp_die);
|
||||
|
||||
896
tools/perf/util/python.c
Normal file
896
tools/perf/util/python.c
Normal file
@@ -0,0 +1,896 @@
|
||||
#include <Python.h>
|
||||
#include <structmember.h>
|
||||
#include <inttypes.h>
|
||||
#include <poll.h>
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
#include "event.h"
|
||||
#include "cpumap.h"
|
||||
#include "thread_map.h"
|
||||
|
||||
/* Define PyVarObject_HEAD_INIT for python 2.5 */
|
||||
#ifndef PyVarObject_HEAD_INIT
|
||||
# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
|
||||
#endif
|
||||
|
||||
struct throttle_event {
|
||||
struct perf_event_header header;
|
||||
u64 time;
|
||||
u64 id;
|
||||
u64 stream_id;
|
||||
};
|
||||
|
||||
PyMODINIT_FUNC initperf(void);
|
||||
|
||||
#define member_def(type, member, ptype, help) \
|
||||
{ #member, ptype, \
|
||||
offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
|
||||
0, help }
|
||||
|
||||
#define sample_member_def(name, member, ptype, help) \
|
||||
{ #name, ptype, \
|
||||
offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
|
||||
0, help }
|
||||
|
||||
struct pyrf_event {
|
||||
PyObject_HEAD
|
||||
struct perf_sample sample;
|
||||
union perf_event event;
|
||||
};
|
||||
|
||||
#define sample_members \
|
||||
sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \
|
||||
sample_member_def(sample_pid, pid, T_INT, "event pid"), \
|
||||
sample_member_def(sample_tid, tid, T_INT, "event tid"), \
|
||||
sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \
|
||||
sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"), \
|
||||
sample_member_def(sample_id, id, T_ULONGLONG, "event id"), \
|
||||
sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
|
||||
sample_member_def(sample_period, period, T_ULONGLONG, "event period"), \
|
||||
sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
|
||||
|
||||
static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
|
||||
|
||||
static PyMemberDef pyrf_mmap_event__members[] = {
|
||||
sample_members
|
||||
member_def(perf_event_header, type, T_UINT, "event type"),
|
||||
member_def(mmap_event, pid, T_UINT, "event pid"),
|
||||
member_def(mmap_event, tid, T_UINT, "event tid"),
|
||||
member_def(mmap_event, start, T_ULONGLONG, "start of the map"),
|
||||
member_def(mmap_event, len, T_ULONGLONG, "map length"),
|
||||
member_def(mmap_event, pgoff, T_ULONGLONG, "page offset"),
|
||||
member_def(mmap_event, filename, T_STRING_INPLACE, "backing store"),
|
||||
{ .name = NULL, },
|
||||
};
|
||||
|
||||
static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
|
||||
{
|
||||
PyObject *ret;
|
||||
char *s;
|
||||
|
||||
if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRIx64 ", "
|
||||
"length: %#" PRIx64 ", offset: %#" PRIx64 ", "
|
||||
"filename: %s }",
|
||||
pevent->event.mmap.pid, pevent->event.mmap.tid,
|
||||
pevent->event.mmap.start, pevent->event.mmap.len,
|
||||
pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
|
||||
ret = PyErr_NoMemory();
|
||||
} else {
|
||||
ret = PyString_FromString(s);
|
||||
free(s);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static PyTypeObject pyrf_mmap_event__type = {
|
||||
PyVarObject_HEAD_INIT(NULL, 0)
|
||||
.tp_name = "perf.mmap_event",
|
||||
.tp_basicsize = sizeof(struct pyrf_event),
|
||||
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
|
||||
.tp_doc = pyrf_mmap_event__doc,
|
||||
.tp_members = pyrf_mmap_event__members,
|
||||
.tp_repr = (reprfunc)pyrf_mmap_event__repr,
|
||||
};
|
||||
|
||||
static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
|
||||
|
||||
static PyMemberDef pyrf_task_event__members[] = {
|
||||
sample_members
|
||||
member_def(perf_event_header, type, T_UINT, "event type"),
|
||||
member_def(fork_event, pid, T_UINT, "event pid"),
|
||||
member_def(fork_event, ppid, T_UINT, "event ppid"),
|
||||
member_def(fork_event, tid, T_UINT, "event tid"),
|
||||
member_def(fork_event, ptid, T_UINT, "event ptid"),
|
||||
member_def(fork_event, time, T_ULONGLONG, "timestamp"),
|
||||
{ .name = NULL, },
|
||||
};
|
||||
|
||||
static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
|
||||
{
|
||||
return PyString_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
|
||||
"ptid: %u, time: %" PRIu64 "}",
|
||||
pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
|
||||
pevent->event.fork.pid,
|
||||
pevent->event.fork.ppid,
|
||||
pevent->event.fork.tid,
|
||||
pevent->event.fork.ptid,
|
||||
pevent->event.fork.time);
|
||||
}
|
||||
|
||||
static PyTypeObject pyrf_task_event__type = {
|
||||
PyVarObject_HEAD_INIT(NULL, 0)
|
||||
.tp_name = "perf.task_event",
|
||||
.tp_basicsize = sizeof(struct pyrf_event),
|
||||
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
|
||||
.tp_doc = pyrf_task_event__doc,
|
||||
.tp_members = pyrf_task_event__members,
|
||||
.tp_repr = (reprfunc)pyrf_task_event__repr,
|
||||
};
|
||||
|
||||
static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
|
||||
|
||||
static PyMemberDef pyrf_comm_event__members[] = {
|
||||
sample_members
|
||||
member_def(perf_event_header, type, T_UINT, "event type"),
|
||||
member_def(comm_event, pid, T_UINT, "event pid"),
|
||||
member_def(comm_event, tid, T_UINT, "event tid"),
|
||||
member_def(comm_event, comm, T_STRING_INPLACE, "process name"),
|
||||
{ .name = NULL, },
|
||||
};
|
||||
|
||||
static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
|
||||
{
|
||||
return PyString_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
|
||||
pevent->event.comm.pid,
|
||||
pevent->event.comm.tid,
|
||||
pevent->event.comm.comm);
|
||||
}
|
||||
|
||||
static PyTypeObject pyrf_comm_event__type = {
|
||||
PyVarObject_HEAD_INIT(NULL, 0)
|
||||
.tp_name = "perf.comm_event",
|
||||
.tp_basicsize = sizeof(struct pyrf_event),
|
||||
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
|
||||
.tp_doc = pyrf_comm_event__doc,
|
||||
.tp_members = pyrf_comm_event__members,
|
||||
.tp_repr = (reprfunc)pyrf_comm_event__repr,
|
||||
};
|
||||
|
||||
static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
|
||||
|
||||
static PyMemberDef pyrf_throttle_event__members[] = {
|
||||
sample_members
|
||||
member_def(perf_event_header, type, T_UINT, "event type"),
|
||||
member_def(throttle_event, time, T_ULONGLONG, "timestamp"),
|
||||
member_def(throttle_event, id, T_ULONGLONG, "event id"),
|
||||
member_def(throttle_event, stream_id, T_ULONGLONG, "event stream id"),
|
||||
{ .name = NULL, },
|
||||
};
|
||||
|
||||
static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
|
||||
{
|
||||
struct throttle_event *te = (struct throttle_event *)(&pevent->event.header + 1);
|
||||
|
||||
return PyString_FromFormat("{ type: %sthrottle, time: %" PRIu64 ", id: %" PRIu64
|
||||
", stream_id: %" PRIu64 " }",
|
||||
pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
|
||||
te->time, te->id, te->stream_id);
|
||||
}
|
||||
|
||||
static PyTypeObject pyrf_throttle_event__type = {
|
||||
PyVarObject_HEAD_INIT(NULL, 0)
|
||||
.tp_name = "perf.throttle_event",
|
||||
.tp_basicsize = sizeof(struct pyrf_event),
|
||||
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
|
||||
.tp_doc = pyrf_throttle_event__doc,
|
||||
.tp_members = pyrf_throttle_event__members,
|
||||
.tp_repr = (reprfunc)pyrf_throttle_event__repr,
|
||||
};
|
||||
|
||||
static int pyrf_event__setup_types(void)
|
||||
{
|
||||
int err;
|
||||
pyrf_mmap_event__type.tp_new =
|
||||
pyrf_task_event__type.tp_new =
|
||||
pyrf_comm_event__type.tp_new =
|
||||
pyrf_throttle_event__type.tp_new = PyType_GenericNew;
|
||||
err = PyType_Ready(&pyrf_mmap_event__type);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
err = PyType_Ready(&pyrf_task_event__type);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
err = PyType_Ready(&pyrf_comm_event__type);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
err = PyType_Ready(&pyrf_throttle_event__type);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static PyTypeObject *pyrf_event__type[] = {
|
||||
[PERF_RECORD_MMAP] = &pyrf_mmap_event__type,
|
||||
[PERF_RECORD_LOST] = &pyrf_mmap_event__type,
|
||||
[PERF_RECORD_COMM] = &pyrf_comm_event__type,
|
||||
[PERF_RECORD_EXIT] = &pyrf_task_event__type,
|
||||
[PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type,
|
||||
[PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
|
||||
[PERF_RECORD_FORK] = &pyrf_task_event__type,
|
||||
[PERF_RECORD_READ] = &pyrf_mmap_event__type,
|
||||
[PERF_RECORD_SAMPLE] = &pyrf_mmap_event__type,
|
||||
};
|
||||
|
||||
static PyObject *pyrf_event__new(union perf_event *event)
|
||||
{
|
||||
struct pyrf_event *pevent;
|
||||
PyTypeObject *ptype;
|
||||
|
||||
if (event->header.type < PERF_RECORD_MMAP ||
|
||||
event->header.type > PERF_RECORD_SAMPLE)
|
||||
return NULL;
|
||||
|
||||
ptype = pyrf_event__type[event->header.type];
|
||||
pevent = PyObject_New(struct pyrf_event, ptype);
|
||||
if (pevent != NULL)
|
||||
memcpy(&pevent->event, event, event->header.size);
|
||||
return (PyObject *)pevent;
|
||||
}
|
||||
|
||||
struct pyrf_cpu_map {
|
||||
PyObject_HEAD
|
||||
|
||||
struct cpu_map *cpus;
|
||||
};
|
||||
|
||||
static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
|
||||
PyObject *args, PyObject *kwargs)
|
||||
{
|
||||
static char *kwlist[] = { "cpustr", NULL, NULL, };
|
||||
char *cpustr = NULL;
|
||||
|
||||
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
|
||||
kwlist, &cpustr))
|
||||
return -1;
|
||||
|
||||
pcpus->cpus = cpu_map__new(cpustr);
|
||||
if (pcpus->cpus == NULL)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
|
||||
{
|
||||
cpu_map__delete(pcpus->cpus);
|
||||
pcpus->ob_type->tp_free((PyObject*)pcpus);
|
||||
}
|
||||
|
||||
static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
|
||||
{
|
||||
struct pyrf_cpu_map *pcpus = (void *)obj;
|
||||
|
||||
return pcpus->cpus->nr;
|
||||
}
|
||||
|
||||
static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
|
||||
{
|
||||
struct pyrf_cpu_map *pcpus = (void *)obj;
|
||||
|
||||
if (i >= pcpus->cpus->nr)
|
||||
return NULL;
|
||||
|
||||
return Py_BuildValue("i", pcpus->cpus->map[i]);
|
||||
}
|
||||
|
||||
static PySequenceMethods pyrf_cpu_map__sequence_methods = {
|
||||
.sq_length = pyrf_cpu_map__length,
|
||||
.sq_item = pyrf_cpu_map__item,
|
||||
};
|
||||
|
||||
static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
|
||||
|
||||
static PyTypeObject pyrf_cpu_map__type = {
|
||||
PyVarObject_HEAD_INIT(NULL, 0)
|
||||
.tp_name = "perf.cpu_map",
|
||||
.tp_basicsize = sizeof(struct pyrf_cpu_map),
|
||||
.tp_dealloc = (destructor)pyrf_cpu_map__delete,
|
||||
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
|
||||
.tp_doc = pyrf_cpu_map__doc,
|
||||
.tp_as_sequence = &pyrf_cpu_map__sequence_methods,
|
||||
.tp_init = (initproc)pyrf_cpu_map__init,
|
||||
};
|
||||
|
||||
static int pyrf_cpu_map__setup_types(void)
|
||||
{
|
||||
pyrf_cpu_map__type.tp_new = PyType_GenericNew;
|
||||
return PyType_Ready(&pyrf_cpu_map__type);
|
||||
}
|
||||
|
||||
struct pyrf_thread_map {
|
||||
PyObject_HEAD
|
||||
|
||||
struct thread_map *threads;
|
||||
};
|
||||
|
||||
static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
|
||||
PyObject *args, PyObject *kwargs)
|
||||
{
|
||||
static char *kwlist[] = { "pid", "tid", NULL, NULL, };
|
||||
int pid = -1, tid = -1;
|
||||
|
||||
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii",
|
||||
kwlist, &pid, &tid))
|
||||
return -1;
|
||||
|
||||
pthreads->threads = thread_map__new(pid, tid);
|
||||
if (pthreads->threads == NULL)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
|
||||
{
|
||||
thread_map__delete(pthreads->threads);
|
||||
pthreads->ob_type->tp_free((PyObject*)pthreads);
|
||||
}
|
||||
|
||||
static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
|
||||
{
|
||||
struct pyrf_thread_map *pthreads = (void *)obj;
|
||||
|
||||
return pthreads->threads->nr;
|
||||
}
|
||||
|
||||
static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
|
||||
{
|
||||
struct pyrf_thread_map *pthreads = (void *)obj;
|
||||
|
||||
if (i >= pthreads->threads->nr)
|
||||
return NULL;
|
||||
|
||||
return Py_BuildValue("i", pthreads->threads->map[i]);
|
||||
}
|
||||
|
||||
static PySequenceMethods pyrf_thread_map__sequence_methods = {
|
||||
.sq_length = pyrf_thread_map__length,
|
||||
.sq_item = pyrf_thread_map__item,
|
||||
};
|
||||
|
||||
static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
|
||||
|
||||
static PyTypeObject pyrf_thread_map__type = {
|
||||
PyVarObject_HEAD_INIT(NULL, 0)
|
||||
.tp_name = "perf.thread_map",
|
||||
.tp_basicsize = sizeof(struct pyrf_thread_map),
|
||||
.tp_dealloc = (destructor)pyrf_thread_map__delete,
|
||||
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
|
||||
.tp_doc = pyrf_thread_map__doc,
|
||||
.tp_as_sequence = &pyrf_thread_map__sequence_methods,
|
||||
.tp_init = (initproc)pyrf_thread_map__init,
|
||||
};
|
||||
|
||||
static int pyrf_thread_map__setup_types(void)
|
||||
{
|
||||
pyrf_thread_map__type.tp_new = PyType_GenericNew;
|
||||
return PyType_Ready(&pyrf_thread_map__type);
|
||||
}
|
||||
|
||||
struct pyrf_evsel {
|
||||
PyObject_HEAD
|
||||
|
||||
struct perf_evsel evsel;
|
||||
};
|
||||
|
||||
static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
|
||||
PyObject *args, PyObject *kwargs)
|
||||
{
|
||||
struct perf_event_attr attr = {
|
||||
.type = PERF_TYPE_HARDWARE,
|
||||
.config = PERF_COUNT_HW_CPU_CYCLES,
|
||||
.sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
|
||||
};
|
||||
static char *kwlist[] = {
|
||||
"type",
|
||||
"config",
|
||||
"sample_freq",
|
||||
"sample_period",
|
||||
"sample_type",
|
||||
"read_format",
|
||||
"disabled",
|
||||
"inherit",
|
||||
"pinned",
|
||||
"exclusive",
|
||||
"exclude_user",
|
||||
"exclude_kernel",
|
||||
"exclude_hv",
|
||||
"exclude_idle",
|
||||
"mmap",
|
||||
"comm",
|
||||
"freq",
|
||||
"inherit_stat",
|
||||
"enable_on_exec",
|
||||
"task",
|
||||
"watermark",
|
||||
"precise_ip",
|
||||
"mmap_data",
|
||||
"sample_id_all",
|
||||
"wakeup_events",
|
||||
"bp_type",
|
||||
"bp_addr",
|
||||
"bp_len", NULL, NULL, };
|
||||
u64 sample_period = 0;
|
||||
u32 disabled = 0,
|
||||
inherit = 0,
|
||||
pinned = 0,
|
||||
exclusive = 0,
|
||||
exclude_user = 0,
|
||||
exclude_kernel = 0,
|
||||
exclude_hv = 0,
|
||||
exclude_idle = 0,
|
||||
mmap = 0,
|
||||
comm = 0,
|
||||
freq = 1,
|
||||
inherit_stat = 0,
|
||||
enable_on_exec = 0,
|
||||
task = 0,
|
||||
watermark = 0,
|
||||
precise_ip = 0,
|
||||
mmap_data = 0,
|
||||
sample_id_all = 1;
|
||||
int idx = 0;
|
||||
|
||||
if (!PyArg_ParseTupleAndKeywords(args, kwargs,
|
||||
"|iKiKKiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
|
||||
&attr.type, &attr.config, &attr.sample_freq,
|
||||
&sample_period, &attr.sample_type,
|
||||
&attr.read_format, &disabled, &inherit,
|
||||
&pinned, &exclusive, &exclude_user,
|
||||
&exclude_kernel, &exclude_hv, &exclude_idle,
|
||||
&mmap, &comm, &freq, &inherit_stat,
|
||||
&enable_on_exec, &task, &watermark,
|
||||
&precise_ip, &mmap_data, &sample_id_all,
|
||||
&attr.wakeup_events, &attr.bp_type,
|
||||
&attr.bp_addr, &attr.bp_len, &idx))
|
||||
return -1;
|
||||
|
||||
/* union... */
|
||||
if (sample_period != 0) {
|
||||
if (attr.sample_freq != 0)
|
||||
return -1; /* FIXME: throw right exception */
|
||||
attr.sample_period = sample_period;
|
||||
}
|
||||
|
||||
/* Bitfields */
|
||||
attr.disabled = disabled;
|
||||
attr.inherit = inherit;
|
||||
attr.pinned = pinned;
|
||||
attr.exclusive = exclusive;
|
||||
attr.exclude_user = exclude_user;
|
||||
attr.exclude_kernel = exclude_kernel;
|
||||
attr.exclude_hv = exclude_hv;
|
||||
attr.exclude_idle = exclude_idle;
|
||||
attr.mmap = mmap;
|
||||
attr.comm = comm;
|
||||
attr.freq = freq;
|
||||
attr.inherit_stat = inherit_stat;
|
||||
attr.enable_on_exec = enable_on_exec;
|
||||
attr.task = task;
|
||||
attr.watermark = watermark;
|
||||
attr.precise_ip = precise_ip;
|
||||
attr.mmap_data = mmap_data;
|
||||
attr.sample_id_all = sample_id_all;
|
||||
|
||||
perf_evsel__init(&pevsel->evsel, &attr, idx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
|
||||
{
|
||||
perf_evsel__exit(&pevsel->evsel);
|
||||
pevsel->ob_type->tp_free((PyObject*)pevsel);
|
||||
}
|
||||
|
||||
static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
|
||||
PyObject *args, PyObject *kwargs)
|
||||
{
|
||||
struct perf_evsel *evsel = &pevsel->evsel;
|
||||
struct cpu_map *cpus = NULL;
|
||||
struct thread_map *threads = NULL;
|
||||
PyObject *pcpus = NULL, *pthreads = NULL;
|
||||
int group = 0, overwrite = 0;
|
||||
static char *kwlist[] = {"cpus", "threads", "group", "overwrite", NULL, NULL};
|
||||
|
||||
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
|
||||
&pcpus, &pthreads, &group, &overwrite))
|
||||
return NULL;
|
||||
|
||||
if (pthreads != NULL)
|
||||
threads = ((struct pyrf_thread_map *)pthreads)->threads;
|
||||
|
||||
if (pcpus != NULL)
|
||||
cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
|
||||
|
||||
if (perf_evsel__open(evsel, cpus, threads, group, overwrite) < 0) {
|
||||
PyErr_SetFromErrno(PyExc_OSError);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
}
|
||||
|
||||
static PyMethodDef pyrf_evsel__methods[] = {
|
||||
{
|
||||
.ml_name = "open",
|
||||
.ml_meth = (PyCFunction)pyrf_evsel__open,
|
||||
.ml_flags = METH_VARARGS | METH_KEYWORDS,
|
||||
.ml_doc = PyDoc_STR("open the event selector file descriptor table.")
|
||||
},
|
||||
{ .ml_name = NULL, }
|
||||
};
|
||||
|
||||
static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
|
||||
|
||||
static PyTypeObject pyrf_evsel__type = {
|
||||
PyVarObject_HEAD_INIT(NULL, 0)
|
||||
.tp_name = "perf.evsel",
|
||||
.tp_basicsize = sizeof(struct pyrf_evsel),
|
||||
.tp_dealloc = (destructor)pyrf_evsel__delete,
|
||||
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
|
||||
.tp_doc = pyrf_evsel__doc,
|
||||
.tp_methods = pyrf_evsel__methods,
|
||||
.tp_init = (initproc)pyrf_evsel__init,
|
||||
};
|
||||
|
||||
static int pyrf_evsel__setup_types(void)
|
||||
{
|
||||
pyrf_evsel__type.tp_new = PyType_GenericNew;
|
||||
return PyType_Ready(&pyrf_evsel__type);
|
||||
}
|
||||
|
||||
struct pyrf_evlist {
|
||||
PyObject_HEAD
|
||||
|
||||
struct perf_evlist evlist;
|
||||
};
|
||||
|
||||
static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
|
||||
PyObject *args, PyObject *kwargs __used)
|
||||
{
|
||||
PyObject *pcpus = NULL, *pthreads = NULL;
|
||||
struct cpu_map *cpus;
|
||||
struct thread_map *threads;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
|
||||
return -1;
|
||||
|
||||
threads = ((struct pyrf_thread_map *)pthreads)->threads;
|
||||
cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
|
||||
perf_evlist__init(&pevlist->evlist, cpus, threads);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
|
||||
{
|
||||
perf_evlist__exit(&pevlist->evlist);
|
||||
pevlist->ob_type->tp_free((PyObject*)pevlist);
|
||||
}
|
||||
|
||||
static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
|
||||
PyObject *args, PyObject *kwargs)
|
||||
{
|
||||
struct perf_evlist *evlist = &pevlist->evlist;
|
||||
static char *kwlist[] = {"pages", "overwrite",
|
||||
NULL, NULL};
|
||||
int pages = 128, overwrite = false;
|
||||
|
||||
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
|
||||
&pages, &overwrite))
|
||||
return NULL;
|
||||
|
||||
if (perf_evlist__mmap(evlist, pages, overwrite) < 0) {
|
||||
PyErr_SetFromErrno(PyExc_OSError);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
}
|
||||
|
||||
static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
|
||||
PyObject *args, PyObject *kwargs)
|
||||
{
|
||||
struct perf_evlist *evlist = &pevlist->evlist;
|
||||
static char *kwlist[] = {"timeout", NULL, NULL};
|
||||
int timeout = -1, n;
|
||||
|
||||
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
|
||||
return NULL;
|
||||
|
||||
n = poll(evlist->pollfd, evlist->nr_fds, timeout);
|
||||
if (n < 0) {
|
||||
PyErr_SetFromErrno(PyExc_OSError);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return Py_BuildValue("i", n);
|
||||
}
|
||||
|
||||
static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
|
||||
PyObject *args __used, PyObject *kwargs __used)
|
||||
{
|
||||
struct perf_evlist *evlist = &pevlist->evlist;
|
||||
PyObject *list = PyList_New(0);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < evlist->nr_fds; ++i) {
|
||||
PyObject *file;
|
||||
FILE *fp = fdopen(evlist->pollfd[i].fd, "r");
|
||||
|
||||
if (fp == NULL)
|
||||
goto free_list;
|
||||
|
||||
file = PyFile_FromFile(fp, "perf", "r", NULL);
|
||||
if (file == NULL)
|
||||
goto free_list;
|
||||
|
||||
if (PyList_Append(list, file) != 0) {
|
||||
Py_DECREF(file);
|
||||
goto free_list;
|
||||
}
|
||||
|
||||
Py_DECREF(file);
|
||||
}
|
||||
|
||||
return list;
|
||||
free_list:
|
||||
return PyErr_NoMemory();
|
||||
}
|
||||
|
||||
|
||||
static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
|
||||
PyObject *args, PyObject *kwargs __used)
|
||||
{
|
||||
struct perf_evlist *evlist = &pevlist->evlist;
|
||||
PyObject *pevsel;
|
||||
struct perf_evsel *evsel;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "O", &pevsel))
|
||||
return NULL;
|
||||
|
||||
Py_INCREF(pevsel);
|
||||
evsel = &((struct pyrf_evsel *)pevsel)->evsel;
|
||||
evsel->idx = evlist->nr_entries;
|
||||
perf_evlist__add(evlist, evsel);
|
||||
|
||||
return Py_BuildValue("i", evlist->nr_entries);
|
||||
}
|
||||
|
||||
static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
|
||||
PyObject *args, PyObject *kwargs)
|
||||
{
|
||||
struct perf_evlist *evlist = &pevlist->evlist;
|
||||
union perf_event *event;
|
||||
int sample_id_all = 1, cpu;
|
||||
static char *kwlist[] = {"sample_id_all", NULL, NULL};
|
||||
|
||||
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
|
||||
&cpu, &sample_id_all))
|
||||
return NULL;
|
||||
|
||||
event = perf_evlist__read_on_cpu(evlist, cpu);
|
||||
if (event != NULL) {
|
||||
struct perf_evsel *first;
|
||||
PyObject *pyevent = pyrf_event__new(event);
|
||||
struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
|
||||
|
||||
if (pyevent == NULL)
|
||||
return PyErr_NoMemory();
|
||||
|
||||
first = list_entry(evlist->entries.next, struct perf_evsel, node);
|
||||
perf_event__parse_sample(event, first->attr.sample_type, sample_id_all,
|
||||
&pevent->sample);
|
||||
return pyevent;
|
||||
}
|
||||
|
||||
Py_INCREF(Py_None);
|
||||
return Py_None;
|
||||
}
|
||||
|
||||
static PyMethodDef pyrf_evlist__methods[] = {
|
||||
{
|
||||
.ml_name = "mmap",
|
||||
.ml_meth = (PyCFunction)pyrf_evlist__mmap,
|
||||
.ml_flags = METH_VARARGS | METH_KEYWORDS,
|
||||
.ml_doc = PyDoc_STR("mmap the file descriptor table.")
|
||||
},
|
||||
{
|
||||
.ml_name = "poll",
|
||||
.ml_meth = (PyCFunction)pyrf_evlist__poll,
|
||||
.ml_flags = METH_VARARGS | METH_KEYWORDS,
|
||||
.ml_doc = PyDoc_STR("poll the file descriptor table.")
|
||||
},
|
||||
{
|
||||
.ml_name = "get_pollfd",
|
||||
.ml_meth = (PyCFunction)pyrf_evlist__get_pollfd,
|
||||
.ml_flags = METH_VARARGS | METH_KEYWORDS,
|
||||
.ml_doc = PyDoc_STR("get the poll file descriptor table.")
|
||||
},
|
||||
{
|
||||
.ml_name = "add",
|
||||
.ml_meth = (PyCFunction)pyrf_evlist__add,
|
||||
.ml_flags = METH_VARARGS | METH_KEYWORDS,
|
||||
.ml_doc = PyDoc_STR("adds an event selector to the list.")
|
||||
},
|
||||
{
|
||||
.ml_name = "read_on_cpu",
|
||||
.ml_meth = (PyCFunction)pyrf_evlist__read_on_cpu,
|
||||
.ml_flags = METH_VARARGS | METH_KEYWORDS,
|
||||
.ml_doc = PyDoc_STR("reads an event.")
|
||||
},
|
||||
{ .ml_name = NULL, }
|
||||
};
|
||||
|
||||
static Py_ssize_t pyrf_evlist__length(PyObject *obj)
|
||||
{
|
||||
struct pyrf_evlist *pevlist = (void *)obj;
|
||||
|
||||
return pevlist->evlist.nr_entries;
|
||||
}
|
||||
|
||||
static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
|
||||
{
|
||||
struct pyrf_evlist *pevlist = (void *)obj;
|
||||
struct perf_evsel *pos;
|
||||
|
||||
if (i >= pevlist->evlist.nr_entries)
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry(pos, &pevlist->evlist.entries, node)
|
||||
if (i-- == 0)
|
||||
break;
|
||||
|
||||
return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
|
||||
}
|
||||
|
||||
static PySequenceMethods pyrf_evlist__sequence_methods = {
|
||||
.sq_length = pyrf_evlist__length,
|
||||
.sq_item = pyrf_evlist__item,
|
||||
};
|
||||
|
||||
static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
|
||||
|
||||
static PyTypeObject pyrf_evlist__type = {
|
||||
PyVarObject_HEAD_INIT(NULL, 0)
|
||||
.tp_name = "perf.evlist",
|
||||
.tp_basicsize = sizeof(struct pyrf_evlist),
|
||||
.tp_dealloc = (destructor)pyrf_evlist__delete,
|
||||
.tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
|
||||
.tp_as_sequence = &pyrf_evlist__sequence_methods,
|
||||
.tp_doc = pyrf_evlist__doc,
|
||||
.tp_methods = pyrf_evlist__methods,
|
||||
.tp_init = (initproc)pyrf_evlist__init,
|
||||
};
|
||||
|
||||
static int pyrf_evlist__setup_types(void)
|
||||
{
|
||||
pyrf_evlist__type.tp_new = PyType_GenericNew;
|
||||
return PyType_Ready(&pyrf_evlist__type);
|
||||
}
|
||||
|
||||
static struct {
|
||||
const char *name;
|
||||
int value;
|
||||
} perf__constants[] = {
|
||||
{ "TYPE_HARDWARE", PERF_TYPE_HARDWARE },
|
||||
{ "TYPE_SOFTWARE", PERF_TYPE_SOFTWARE },
|
||||
{ "TYPE_TRACEPOINT", PERF_TYPE_TRACEPOINT },
|
||||
{ "TYPE_HW_CACHE", PERF_TYPE_HW_CACHE },
|
||||
{ "TYPE_RAW", PERF_TYPE_RAW },
|
||||
{ "TYPE_BREAKPOINT", PERF_TYPE_BREAKPOINT },
|
||||
|
||||
{ "COUNT_HW_CPU_CYCLES", PERF_COUNT_HW_CPU_CYCLES },
|
||||
{ "COUNT_HW_INSTRUCTIONS", PERF_COUNT_HW_INSTRUCTIONS },
|
||||
{ "COUNT_HW_CACHE_REFERENCES", PERF_COUNT_HW_CACHE_REFERENCES },
|
||||
{ "COUNT_HW_CACHE_MISSES", PERF_COUNT_HW_CACHE_MISSES },
|
||||
{ "COUNT_HW_BRANCH_INSTRUCTIONS", PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
|
||||
{ "COUNT_HW_BRANCH_MISSES", PERF_COUNT_HW_BRANCH_MISSES },
|
||||
{ "COUNT_HW_BUS_CYCLES", PERF_COUNT_HW_BUS_CYCLES },
|
||||
{ "COUNT_HW_CACHE_L1D", PERF_COUNT_HW_CACHE_L1D },
|
||||
{ "COUNT_HW_CACHE_L1I", PERF_COUNT_HW_CACHE_L1I },
|
||||
{ "COUNT_HW_CACHE_LL", PERF_COUNT_HW_CACHE_LL },
|
||||
{ "COUNT_HW_CACHE_DTLB", PERF_COUNT_HW_CACHE_DTLB },
|
||||
{ "COUNT_HW_CACHE_ITLB", PERF_COUNT_HW_CACHE_ITLB },
|
||||
{ "COUNT_HW_CACHE_BPU", PERF_COUNT_HW_CACHE_BPU },
|
||||
{ "COUNT_HW_CACHE_OP_READ", PERF_COUNT_HW_CACHE_OP_READ },
|
||||
{ "COUNT_HW_CACHE_OP_WRITE", PERF_COUNT_HW_CACHE_OP_WRITE },
|
||||
{ "COUNT_HW_CACHE_OP_PREFETCH", PERF_COUNT_HW_CACHE_OP_PREFETCH },
|
||||
{ "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS },
|
||||
{ "COUNT_HW_CACHE_RESULT_MISS", PERF_COUNT_HW_CACHE_RESULT_MISS },
|
||||
|
||||
{ "COUNT_SW_CPU_CLOCK", PERF_COUNT_SW_CPU_CLOCK },
|
||||
{ "COUNT_SW_TASK_CLOCK", PERF_COUNT_SW_TASK_CLOCK },
|
||||
{ "COUNT_SW_PAGE_FAULTS", PERF_COUNT_SW_PAGE_FAULTS },
|
||||
{ "COUNT_SW_CONTEXT_SWITCHES", PERF_COUNT_SW_CONTEXT_SWITCHES },
|
||||
{ "COUNT_SW_CPU_MIGRATIONS", PERF_COUNT_SW_CPU_MIGRATIONS },
|
||||
{ "COUNT_SW_PAGE_FAULTS_MIN", PERF_COUNT_SW_PAGE_FAULTS_MIN },
|
||||
{ "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ },
|
||||
{ "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS },
|
||||
{ "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS },
|
||||
|
||||
{ "SAMPLE_IP", PERF_SAMPLE_IP },
|
||||
{ "SAMPLE_TID", PERF_SAMPLE_TID },
|
||||
{ "SAMPLE_TIME", PERF_SAMPLE_TIME },
|
||||
{ "SAMPLE_ADDR", PERF_SAMPLE_ADDR },
|
||||
{ "SAMPLE_READ", PERF_SAMPLE_READ },
|
||||
{ "SAMPLE_CALLCHAIN", PERF_SAMPLE_CALLCHAIN },
|
||||
{ "SAMPLE_ID", PERF_SAMPLE_ID },
|
||||
{ "SAMPLE_CPU", PERF_SAMPLE_CPU },
|
||||
{ "SAMPLE_PERIOD", PERF_SAMPLE_PERIOD },
|
||||
{ "SAMPLE_STREAM_ID", PERF_SAMPLE_STREAM_ID },
|
||||
{ "SAMPLE_RAW", PERF_SAMPLE_RAW },
|
||||
|
||||
{ "FORMAT_TOTAL_TIME_ENABLED", PERF_FORMAT_TOTAL_TIME_ENABLED },
|
||||
{ "FORMAT_TOTAL_TIME_RUNNING", PERF_FORMAT_TOTAL_TIME_RUNNING },
|
||||
{ "FORMAT_ID", PERF_FORMAT_ID },
|
||||
{ "FORMAT_GROUP", PERF_FORMAT_GROUP },
|
||||
|
||||
{ "RECORD_MMAP", PERF_RECORD_MMAP },
|
||||
{ "RECORD_LOST", PERF_RECORD_LOST },
|
||||
{ "RECORD_COMM", PERF_RECORD_COMM },
|
||||
{ "RECORD_EXIT", PERF_RECORD_EXIT },
|
||||
{ "RECORD_THROTTLE", PERF_RECORD_THROTTLE },
|
||||
{ "RECORD_UNTHROTTLE", PERF_RECORD_UNTHROTTLE },
|
||||
{ "RECORD_FORK", PERF_RECORD_FORK },
|
||||
{ "RECORD_READ", PERF_RECORD_READ },
|
||||
{ "RECORD_SAMPLE", PERF_RECORD_SAMPLE },
|
||||
{ .name = NULL, },
|
||||
};
|
||||
|
||||
static PyMethodDef perf__methods[] = {
|
||||
{ .ml_name = NULL, }
|
||||
};
|
||||
|
||||
PyMODINIT_FUNC initperf(void)
|
||||
{
|
||||
PyObject *obj;
|
||||
int i;
|
||||
PyObject *dict, *module = Py_InitModule("perf", perf__methods);
|
||||
|
||||
if (module == NULL ||
|
||||
pyrf_event__setup_types() < 0 ||
|
||||
pyrf_evlist__setup_types() < 0 ||
|
||||
pyrf_evsel__setup_types() < 0 ||
|
||||
pyrf_thread_map__setup_types() < 0 ||
|
||||
pyrf_cpu_map__setup_types() < 0)
|
||||
return;
|
||||
|
||||
Py_INCREF(&pyrf_evlist__type);
|
||||
PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
|
||||
|
||||
Py_INCREF(&pyrf_evsel__type);
|
||||
PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
|
||||
|
||||
Py_INCREF(&pyrf_thread_map__type);
|
||||
PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
|
||||
|
||||
Py_INCREF(&pyrf_cpu_map__type);
|
||||
PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
|
||||
|
||||
dict = PyModule_GetDict(module);
|
||||
if (dict == NULL)
|
||||
goto error;
|
||||
|
||||
for (i = 0; perf__constants[i].name != NULL; i++) {
|
||||
obj = PyInt_FromLong(perf__constants[i].value);
|
||||
if (obj == NULL)
|
||||
goto error;
|
||||
PyDict_SetItemString(dict, perf__constants[i].name, obj);
|
||||
Py_DECREF(obj);
|
||||
}
|
||||
|
||||
error:
|
||||
if (PyErr_Occurred())
|
||||
PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
|
||||
}
|
||||
@@ -248,8 +248,7 @@ static void python_process_event(int cpu, void *data,
|
||||
context = PyCObject_FromVoidPtr(scripting_context, NULL);
|
||||
|
||||
PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
|
||||
PyTuple_SetItem(t, n++,
|
||||
PyCObject_FromVoidPtr(scripting_context, NULL));
|
||||
PyTuple_SetItem(t, n++, context);
|
||||
|
||||
if (handler) {
|
||||
PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
|
||||
|
||||
@@ -7,6 +7,8 @@
|
||||
#include <sys/types.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
#include "session.h"
|
||||
#include "sort.h"
|
||||
#include "util.h"
|
||||
@@ -19,7 +21,7 @@ static int perf_session__open(struct perf_session *self, bool force)
|
||||
self->fd_pipe = true;
|
||||
self->fd = STDIN_FILENO;
|
||||
|
||||
if (perf_header__read(self, self->fd) < 0)
|
||||
if (perf_session__read_header(self, self->fd) < 0)
|
||||
pr_err("incompatible file format");
|
||||
|
||||
return 0;
|
||||
@@ -51,7 +53,7 @@ static int perf_session__open(struct perf_session *self, bool force)
|
||||
goto out_close;
|
||||
}
|
||||
|
||||
if (perf_header__read(self, self->fd) < 0) {
|
||||
if (perf_session__read_header(self, self->fd) < 0) {
|
||||
pr_err("incompatible file format");
|
||||
goto out_close;
|
||||
}
|
||||
@@ -67,7 +69,7 @@ out_close:
|
||||
|
||||
static void perf_session__id_header_size(struct perf_session *session)
|
||||
{
|
||||
struct sample_data *data;
|
||||
struct perf_sample *data;
|
||||
u64 sample_type = session->sample_type;
|
||||
u16 size = 0;
|
||||
|
||||
@@ -92,21 +94,10 @@ out:
|
||||
session->id_hdr_size = size;
|
||||
}
|
||||
|
||||
void perf_session__set_sample_id_all(struct perf_session *session, bool value)
|
||||
{
|
||||
session->sample_id_all = value;
|
||||
perf_session__id_header_size(session);
|
||||
}
|
||||
|
||||
void perf_session__set_sample_type(struct perf_session *session, u64 type)
|
||||
{
|
||||
session->sample_type = type;
|
||||
}
|
||||
|
||||
void perf_session__update_sample_type(struct perf_session *self)
|
||||
{
|
||||
self->sample_type = perf_header__sample_type(&self->header);
|
||||
self->sample_id_all = perf_header__sample_id_all(&self->header);
|
||||
self->sample_type = perf_evlist__sample_type(self->evlist);
|
||||
self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
|
||||
perf_session__id_header_size(self);
|
||||
}
|
||||
|
||||
@@ -135,13 +126,9 @@ struct perf_session *perf_session__new(const char *filename, int mode,
|
||||
if (self == NULL)
|
||||
goto out;
|
||||
|
||||
if (perf_header__init(&self->header) < 0)
|
||||
goto out_free;
|
||||
|
||||
memcpy(self->filename, filename, len);
|
||||
self->threads = RB_ROOT;
|
||||
INIT_LIST_HEAD(&self->dead_threads);
|
||||
self->hists_tree = RB_ROOT;
|
||||
self->last_match = NULL;
|
||||
/*
|
||||
* On 64bit we can mmap the data file in one go. No need for tiny mmap
|
||||
@@ -162,17 +149,16 @@ struct perf_session *perf_session__new(const char *filename, int mode,
|
||||
if (mode == O_RDONLY) {
|
||||
if (perf_session__open(self, force) < 0)
|
||||
goto out_delete;
|
||||
perf_session__update_sample_type(self);
|
||||
} else if (mode == O_WRONLY) {
|
||||
/*
|
||||
* In O_RDONLY mode this will be performed when reading the
|
||||
* kernel MMAP event, in event__process_mmap().
|
||||
* kernel MMAP event, in perf_event__process_mmap().
|
||||
*/
|
||||
if (perf_session__create_kernel_maps(self) < 0)
|
||||
goto out_delete;
|
||||
}
|
||||
|
||||
perf_session__update_sample_type(self);
|
||||
|
||||
if (ops && ops->ordering_requires_timestamps &&
|
||||
ops->ordered_samples && !self->sample_id_all) {
|
||||
dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
|
||||
@@ -181,9 +167,6 @@ struct perf_session *perf_session__new(const char *filename, int mode,
|
||||
|
||||
out:
|
||||
return self;
|
||||
out_free:
|
||||
free(self);
|
||||
return NULL;
|
||||
out_delete:
|
||||
perf_session__delete(self);
|
||||
return NULL;
|
||||
@@ -214,7 +197,6 @@ static void perf_session__delete_threads(struct perf_session *self)
|
||||
|
||||
void perf_session__delete(struct perf_session *self)
|
||||
{
|
||||
perf_header__exit(&self->header);
|
||||
perf_session__destroy_kernel_maps(self);
|
||||
perf_session__delete_dead_threads(self);
|
||||
perf_session__delete_threads(self);
|
||||
@@ -242,17 +224,16 @@ static bool symbol__match_parent_regex(struct symbol *sym)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
|
||||
struct thread *thread,
|
||||
struct ip_callchain *chain,
|
||||
struct symbol **parent)
|
||||
int perf_session__resolve_callchain(struct perf_session *self,
|
||||
struct thread *thread,
|
||||
struct ip_callchain *chain,
|
||||
struct symbol **parent)
|
||||
{
|
||||
u8 cpumode = PERF_RECORD_MISC_USER;
|
||||
unsigned int i;
|
||||
struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
|
||||
int err;
|
||||
|
||||
if (!syms)
|
||||
return NULL;
|
||||
callchain_cursor_reset(&self->callchain_cursor);
|
||||
|
||||
for (i = 0; i < chain->nr; i++) {
|
||||
u64 ip = chain->ips[i];
|
||||
@@ -281,30 +262,33 @@ struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
|
||||
*parent = al.sym;
|
||||
if (!symbol_conf.use_callchain)
|
||||
break;
|
||||
syms[i].map = al.map;
|
||||
syms[i].sym = al.sym;
|
||||
}
|
||||
|
||||
err = callchain_cursor_append(&self->callchain_cursor,
|
||||
ip, al.map, al.sym);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return syms;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_event_synth_stub(event_t *event __used,
|
||||
static int process_event_synth_stub(union perf_event *event __used,
|
||||
struct perf_session *session __used)
|
||||
{
|
||||
dump_printf(": unhandled!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_event_stub(event_t *event __used,
|
||||
struct sample_data *sample __used,
|
||||
static int process_event_stub(union perf_event *event __used,
|
||||
struct perf_sample *sample __used,
|
||||
struct perf_session *session __used)
|
||||
{
|
||||
dump_printf(": unhandled!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_finished_round_stub(event_t *event __used,
|
||||
static int process_finished_round_stub(union perf_event *event __used,
|
||||
struct perf_session *session __used,
|
||||
struct perf_event_ops *ops __used)
|
||||
{
|
||||
@@ -312,7 +296,7 @@ static int process_finished_round_stub(event_t *event __used,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_finished_round(event_t *event,
|
||||
static int process_finished_round(union perf_event *event,
|
||||
struct perf_session *session,
|
||||
struct perf_event_ops *ops);
|
||||
|
||||
@@ -329,7 +313,7 @@ static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
|
||||
if (handler->exit == NULL)
|
||||
handler->exit = process_event_stub;
|
||||
if (handler->lost == NULL)
|
||||
handler->lost = event__process_lost;
|
||||
handler->lost = perf_event__process_lost;
|
||||
if (handler->read == NULL)
|
||||
handler->read = process_event_stub;
|
||||
if (handler->throttle == NULL)
|
||||
@@ -363,98 +347,98 @@ void mem_bswap_64(void *src, int byte_size)
|
||||
}
|
||||
}
|
||||
|
||||
static void event__all64_swap(event_t *self)
|
||||
static void perf_event__all64_swap(union perf_event *event)
|
||||
{
|
||||
struct perf_event_header *hdr = &self->header;
|
||||
mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
|
||||
struct perf_event_header *hdr = &event->header;
|
||||
mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
|
||||
}
|
||||
|
||||
static void event__comm_swap(event_t *self)
|
||||
static void perf_event__comm_swap(union perf_event *event)
|
||||
{
|
||||
self->comm.pid = bswap_32(self->comm.pid);
|
||||
self->comm.tid = bswap_32(self->comm.tid);
|
||||
event->comm.pid = bswap_32(event->comm.pid);
|
||||
event->comm.tid = bswap_32(event->comm.tid);
|
||||
}
|
||||
|
||||
static void event__mmap_swap(event_t *self)
|
||||
static void perf_event__mmap_swap(union perf_event *event)
|
||||
{
|
||||
self->mmap.pid = bswap_32(self->mmap.pid);
|
||||
self->mmap.tid = bswap_32(self->mmap.tid);
|
||||
self->mmap.start = bswap_64(self->mmap.start);
|
||||
self->mmap.len = bswap_64(self->mmap.len);
|
||||
self->mmap.pgoff = bswap_64(self->mmap.pgoff);
|
||||
event->mmap.pid = bswap_32(event->mmap.pid);
|
||||
event->mmap.tid = bswap_32(event->mmap.tid);
|
||||
event->mmap.start = bswap_64(event->mmap.start);
|
||||
event->mmap.len = bswap_64(event->mmap.len);
|
||||
event->mmap.pgoff = bswap_64(event->mmap.pgoff);
|
||||
}
|
||||
|
||||
static void event__task_swap(event_t *self)
|
||||
static void perf_event__task_swap(union perf_event *event)
|
||||
{
|
||||
self->fork.pid = bswap_32(self->fork.pid);
|
||||
self->fork.tid = bswap_32(self->fork.tid);
|
||||
self->fork.ppid = bswap_32(self->fork.ppid);
|
||||
self->fork.ptid = bswap_32(self->fork.ptid);
|
||||
self->fork.time = bswap_64(self->fork.time);
|
||||
event->fork.pid = bswap_32(event->fork.pid);
|
||||
event->fork.tid = bswap_32(event->fork.tid);
|
||||
event->fork.ppid = bswap_32(event->fork.ppid);
|
||||
event->fork.ptid = bswap_32(event->fork.ptid);
|
||||
event->fork.time = bswap_64(event->fork.time);
|
||||
}
|
||||
|
||||
static void event__read_swap(event_t *self)
|
||||
static void perf_event__read_swap(union perf_event *event)
|
||||
{
|
||||
self->read.pid = bswap_32(self->read.pid);
|
||||
self->read.tid = bswap_32(self->read.tid);
|
||||
self->read.value = bswap_64(self->read.value);
|
||||
self->read.time_enabled = bswap_64(self->read.time_enabled);
|
||||
self->read.time_running = bswap_64(self->read.time_running);
|
||||
self->read.id = bswap_64(self->read.id);
|
||||
event->read.pid = bswap_32(event->read.pid);
|
||||
event->read.tid = bswap_32(event->read.tid);
|
||||
event->read.value = bswap_64(event->read.value);
|
||||
event->read.time_enabled = bswap_64(event->read.time_enabled);
|
||||
event->read.time_running = bswap_64(event->read.time_running);
|
||||
event->read.id = bswap_64(event->read.id);
|
||||
}
|
||||
|
||||
static void event__attr_swap(event_t *self)
|
||||
static void perf_event__attr_swap(union perf_event *event)
|
||||
{
|
||||
size_t size;
|
||||
|
||||
self->attr.attr.type = bswap_32(self->attr.attr.type);
|
||||
self->attr.attr.size = bswap_32(self->attr.attr.size);
|
||||
self->attr.attr.config = bswap_64(self->attr.attr.config);
|
||||
self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period);
|
||||
self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type);
|
||||
self->attr.attr.read_format = bswap_64(self->attr.attr.read_format);
|
||||
self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events);
|
||||
self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type);
|
||||
self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr);
|
||||
self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len);
|
||||
event->attr.attr.type = bswap_32(event->attr.attr.type);
|
||||
event->attr.attr.size = bswap_32(event->attr.attr.size);
|
||||
event->attr.attr.config = bswap_64(event->attr.attr.config);
|
||||
event->attr.attr.sample_period = bswap_64(event->attr.attr.sample_period);
|
||||
event->attr.attr.sample_type = bswap_64(event->attr.attr.sample_type);
|
||||
event->attr.attr.read_format = bswap_64(event->attr.attr.read_format);
|
||||
event->attr.attr.wakeup_events = bswap_32(event->attr.attr.wakeup_events);
|
||||
event->attr.attr.bp_type = bswap_32(event->attr.attr.bp_type);
|
||||
event->attr.attr.bp_addr = bswap_64(event->attr.attr.bp_addr);
|
||||
event->attr.attr.bp_len = bswap_64(event->attr.attr.bp_len);
|
||||
|
||||
size = self->header.size;
|
||||
size -= (void *)&self->attr.id - (void *)self;
|
||||
mem_bswap_64(self->attr.id, size);
|
||||
size = event->header.size;
|
||||
size -= (void *)&event->attr.id - (void *)event;
|
||||
mem_bswap_64(event->attr.id, size);
|
||||
}
|
||||
|
||||
static void event__event_type_swap(event_t *self)
|
||||
static void perf_event__event_type_swap(union perf_event *event)
|
||||
{
|
||||
self->event_type.event_type.event_id =
|
||||
bswap_64(self->event_type.event_type.event_id);
|
||||
event->event_type.event_type.event_id =
|
||||
bswap_64(event->event_type.event_type.event_id);
|
||||
}
|
||||
|
||||
static void event__tracing_data_swap(event_t *self)
|
||||
static void perf_event__tracing_data_swap(union perf_event *event)
|
||||
{
|
||||
self->tracing_data.size = bswap_32(self->tracing_data.size);
|
||||
event->tracing_data.size = bswap_32(event->tracing_data.size);
|
||||
}
|
||||
|
||||
typedef void (*event__swap_op)(event_t *self);
|
||||
typedef void (*perf_event__swap_op)(union perf_event *event);
|
||||
|
||||
static event__swap_op event__swap_ops[] = {
|
||||
[PERF_RECORD_MMAP] = event__mmap_swap,
|
||||
[PERF_RECORD_COMM] = event__comm_swap,
|
||||
[PERF_RECORD_FORK] = event__task_swap,
|
||||
[PERF_RECORD_EXIT] = event__task_swap,
|
||||
[PERF_RECORD_LOST] = event__all64_swap,
|
||||
[PERF_RECORD_READ] = event__read_swap,
|
||||
[PERF_RECORD_SAMPLE] = event__all64_swap,
|
||||
[PERF_RECORD_HEADER_ATTR] = event__attr_swap,
|
||||
[PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap,
|
||||
[PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap,
|
||||
[PERF_RECORD_HEADER_BUILD_ID] = NULL,
|
||||
[PERF_RECORD_HEADER_MAX] = NULL,
|
||||
static perf_event__swap_op perf_event__swap_ops[] = {
|
||||
[PERF_RECORD_MMAP] = perf_event__mmap_swap,
|
||||
[PERF_RECORD_COMM] = perf_event__comm_swap,
|
||||
[PERF_RECORD_FORK] = perf_event__task_swap,
|
||||
[PERF_RECORD_EXIT] = perf_event__task_swap,
|
||||
[PERF_RECORD_LOST] = perf_event__all64_swap,
|
||||
[PERF_RECORD_READ] = perf_event__read_swap,
|
||||
[PERF_RECORD_SAMPLE] = perf_event__all64_swap,
|
||||
[PERF_RECORD_HEADER_ATTR] = perf_event__attr_swap,
|
||||
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
|
||||
[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
|
||||
[PERF_RECORD_HEADER_BUILD_ID] = NULL,
|
||||
[PERF_RECORD_HEADER_MAX] = NULL,
|
||||
};
|
||||
|
||||
struct sample_queue {
|
||||
u64 timestamp;
|
||||
u64 file_offset;
|
||||
event_t *event;
|
||||
union perf_event *event;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
@@ -472,8 +456,8 @@ static void perf_session_free_sample_buffers(struct perf_session *session)
|
||||
}
|
||||
|
||||
static int perf_session_deliver_event(struct perf_session *session,
|
||||
event_t *event,
|
||||
struct sample_data *sample,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_event_ops *ops,
|
||||
u64 file_offset);
|
||||
|
||||
@@ -483,7 +467,7 @@ static void flush_sample_queue(struct perf_session *s,
|
||||
struct ordered_samples *os = &s->ordered_samples;
|
||||
struct list_head *head = &os->samples;
|
||||
struct sample_queue *tmp, *iter;
|
||||
struct sample_data sample;
|
||||
struct perf_sample sample;
|
||||
u64 limit = os->next_flush;
|
||||
u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
|
||||
|
||||
@@ -494,7 +478,7 @@ static void flush_sample_queue(struct perf_session *s,
|
||||
if (iter->timestamp > limit)
|
||||
break;
|
||||
|
||||
event__parse_sample(iter->event, s, &sample);
|
||||
perf_session__parse_sample(s, iter->event, &sample);
|
||||
perf_session_deliver_event(s, iter->event, &sample, ops,
|
||||
iter->file_offset);
|
||||
|
||||
@@ -550,7 +534,7 @@ static void flush_sample_queue(struct perf_session *s,
|
||||
* Flush every events below timestamp 7
|
||||
* etc...
|
||||
*/
|
||||
static int process_finished_round(event_t *event __used,
|
||||
static int process_finished_round(union perf_event *event __used,
|
||||
struct perf_session *session,
|
||||
struct perf_event_ops *ops)
|
||||
{
|
||||
@@ -607,12 +591,12 @@ static void __queue_event(struct sample_queue *new, struct perf_session *s)
|
||||
|
||||
#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
|
||||
|
||||
static int perf_session_queue_event(struct perf_session *s, event_t *event,
|
||||
struct sample_data *data, u64 file_offset)
|
||||
static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
|
||||
struct perf_sample *sample, u64 file_offset)
|
||||
{
|
||||
struct ordered_samples *os = &s->ordered_samples;
|
||||
struct list_head *sc = &os->sample_cache;
|
||||
u64 timestamp = data->time;
|
||||
u64 timestamp = sample->time;
|
||||
struct sample_queue *new;
|
||||
|
||||
if (!timestamp || timestamp == ~0ULL)
|
||||
@@ -648,7 +632,7 @@ static int perf_session_queue_event(struct perf_session *s, event_t *event,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void callchain__printf(struct sample_data *sample)
|
||||
static void callchain__printf(struct perf_sample *sample)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
@@ -660,8 +644,8 @@ static void callchain__printf(struct sample_data *sample)
|
||||
}
|
||||
|
||||
static void perf_session__print_tstamp(struct perf_session *session,
|
||||
event_t *event,
|
||||
struct sample_data *sample)
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
if (event->header.type != PERF_RECORD_SAMPLE &&
|
||||
!session->sample_id_all) {
|
||||
@@ -676,8 +660,8 @@ static void perf_session__print_tstamp(struct perf_session *session,
|
||||
printf("%" PRIu64 " ", sample->time);
|
||||
}
|
||||
|
||||
static void dump_event(struct perf_session *session, event_t *event,
|
||||
u64 file_offset, struct sample_data *sample)
|
||||
static void dump_event(struct perf_session *session, union perf_event *event,
|
||||
u64 file_offset, struct perf_sample *sample)
|
||||
{
|
||||
if (!dump_trace)
|
||||
return;
|
||||
@@ -691,11 +675,11 @@ static void dump_event(struct perf_session *session, event_t *event,
|
||||
perf_session__print_tstamp(session, event, sample);
|
||||
|
||||
printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
|
||||
event->header.size, event__get_event_name(event->header.type));
|
||||
event->header.size, perf_event__name(event->header.type));
|
||||
}
|
||||
|
||||
static void dump_sample(struct perf_session *session, event_t *event,
|
||||
struct sample_data *sample)
|
||||
static void dump_sample(struct perf_session *session, union perf_event *event,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
if (!dump_trace)
|
||||
return;
|
||||
@@ -709,8 +693,8 @@ static void dump_sample(struct perf_session *session, event_t *event,
|
||||
}
|
||||
|
||||
static int perf_session_deliver_event(struct perf_session *session,
|
||||
event_t *event,
|
||||
struct sample_data *sample,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample,
|
||||
struct perf_event_ops *ops,
|
||||
u64 file_offset)
|
||||
{
|
||||
@@ -743,7 +727,7 @@ static int perf_session_deliver_event(struct perf_session *session,
|
||||
}
|
||||
|
||||
static int perf_session__preprocess_sample(struct perf_session *session,
|
||||
event_t *event, struct sample_data *sample)
|
||||
union perf_event *event, struct perf_sample *sample)
|
||||
{
|
||||
if (event->header.type != PERF_RECORD_SAMPLE ||
|
||||
!(session->sample_type & PERF_SAMPLE_CALLCHAIN))
|
||||
@@ -758,7 +742,7 @@ static int perf_session__preprocess_sample(struct perf_session *session,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_session__process_user_event(struct perf_session *session, event_t *event,
|
||||
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
|
||||
struct perf_event_ops *ops, u64 file_offset)
|
||||
{
|
||||
dump_event(session, event, file_offset, NULL);
|
||||
@@ -783,15 +767,16 @@ static int perf_session__process_user_event(struct perf_session *session, event_
|
||||
}
|
||||
|
||||
static int perf_session__process_event(struct perf_session *session,
|
||||
event_t *event,
|
||||
union perf_event *event,
|
||||
struct perf_event_ops *ops,
|
||||
u64 file_offset)
|
||||
{
|
||||
struct sample_data sample;
|
||||
struct perf_sample sample;
|
||||
int ret;
|
||||
|
||||
if (session->header.needs_swap && event__swap_ops[event->header.type])
|
||||
event__swap_ops[event->header.type](event);
|
||||
if (session->header.needs_swap &&
|
||||
perf_event__swap_ops[event->header.type])
|
||||
perf_event__swap_ops[event->header.type](event);
|
||||
|
||||
if (event->header.type >= PERF_RECORD_HEADER_MAX)
|
||||
return -EINVAL;
|
||||
@@ -804,7 +789,7 @@ static int perf_session__process_event(struct perf_session *session,
|
||||
/*
|
||||
* For all kernel events we get the sample data
|
||||
*/
|
||||
event__parse_sample(event, session, &sample);
|
||||
perf_session__parse_sample(session, event, &sample);
|
||||
|
||||
/* Preprocess sample records - precheck callchains */
|
||||
if (perf_session__preprocess_sample(session, event, &sample))
|
||||
@@ -843,7 +828,7 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se
|
||||
static void perf_session__warn_about_errors(const struct perf_session *session,
|
||||
const struct perf_event_ops *ops)
|
||||
{
|
||||
if (ops->lost == event__process_lost &&
|
||||
if (ops->lost == perf_event__process_lost &&
|
||||
session->hists.stats.total_lost != 0) {
|
||||
ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
|
||||
"!\n\nCheck IO/CPU overload!\n\n",
|
||||
@@ -875,7 +860,7 @@ volatile int session_done;
|
||||
static int __perf_session__process_pipe_events(struct perf_session *self,
|
||||
struct perf_event_ops *ops)
|
||||
{
|
||||
event_t event;
|
||||
union perf_event event;
|
||||
uint32_t size;
|
||||
int skip = 0;
|
||||
u64 head;
|
||||
@@ -956,7 +941,7 @@ int __perf_session__process_events(struct perf_session *session,
|
||||
struct ui_progress *progress;
|
||||
size_t page_size, mmap_size;
|
||||
char *buf, *mmaps[8];
|
||||
event_t *event;
|
||||
union perf_event *event;
|
||||
uint32_t size;
|
||||
|
||||
perf_event_ops__fill_defaults(ops);
|
||||
@@ -1001,7 +986,7 @@ remap:
|
||||
file_pos = file_offset + head;
|
||||
|
||||
more:
|
||||
event = (event_t *)(buf + head);
|
||||
event = (union perf_event *)(buf + head);
|
||||
|
||||
if (session->header.needs_swap)
|
||||
perf_event_header__bswap(&event->header);
|
||||
@@ -1134,3 +1119,18 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
|
||||
size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
|
||||
return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
|
||||
}
|
||||
|
||||
size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
|
||||
{
|
||||
struct perf_evsel *pos;
|
||||
size_t ret = fprintf(fp, "Aggregated stats:\n");
|
||||
|
||||
ret += hists__fprintf_nr_events(&session->hists, fp);
|
||||
|
||||
list_for_each_entry(pos, &session->evlist->entries, node) {
|
||||
ret += fprintf(fp, "%s stats:\n", event_name(pos));
|
||||
ret += hists__fprintf_nr_events(&pos->hists, fp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -34,12 +34,12 @@ struct perf_session {
|
||||
struct thread *last_match;
|
||||
struct machine host_machine;
|
||||
struct rb_root machines;
|
||||
struct rb_root hists_tree;
|
||||
struct perf_evlist *evlist;
|
||||
/*
|
||||
* FIXME: should point to the first entry in hists_tree and
|
||||
* be a hists instance. Right now its only 'report'
|
||||
* that is using ->hists_tree while all the rest use
|
||||
* ->hists.
|
||||
* FIXME: Need to split this up further, we need global
|
||||
* stats + per event stats. 'perf diff' also needs
|
||||
* to properly support multiple events in a single
|
||||
* perf.data file.
|
||||
*/
|
||||
struct hists hists;
|
||||
u64 sample_type;
|
||||
@@ -51,15 +51,17 @@ struct perf_session {
|
||||
int cwdlen;
|
||||
char *cwd;
|
||||
struct ordered_samples ordered_samples;
|
||||
char filename[0];
|
||||
struct callchain_cursor callchain_cursor;
|
||||
char filename[0];
|
||||
};
|
||||
|
||||
struct perf_event_ops;
|
||||
|
||||
typedef int (*event_op)(event_t *self, struct sample_data *sample,
|
||||
typedef int (*event_op)(union perf_event *self, struct perf_sample *sample,
|
||||
struct perf_session *session);
|
||||
typedef int (*event_synth_op)(event_t *self, struct perf_session *session);
|
||||
typedef int (*event_op2)(event_t *self, struct perf_session *session,
|
||||
typedef int (*event_synth_op)(union perf_event *self,
|
||||
struct perf_session *session);
|
||||
typedef int (*event_op2)(union perf_event *self, struct perf_session *session,
|
||||
struct perf_event_ops *ops);
|
||||
|
||||
struct perf_event_ops {
|
||||
@@ -94,10 +96,10 @@ int __perf_session__process_events(struct perf_session *self,
|
||||
int perf_session__process_events(struct perf_session *self,
|
||||
struct perf_event_ops *event_ops);
|
||||
|
||||
struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
|
||||
struct thread *thread,
|
||||
struct ip_callchain *chain,
|
||||
struct symbol **parent);
|
||||
int perf_session__resolve_callchain(struct perf_session *self,
|
||||
struct thread *thread,
|
||||
struct ip_callchain *chain,
|
||||
struct symbol **parent);
|
||||
|
||||
bool perf_session__has_traces(struct perf_session *self, const char *msg);
|
||||
|
||||
@@ -110,8 +112,6 @@ void mem_bswap_64(void *src, int byte_size);
|
||||
int perf_session__create_kernel_maps(struct perf_session *self);
|
||||
|
||||
void perf_session__update_sample_type(struct perf_session *self);
|
||||
void perf_session__set_sample_id_all(struct perf_session *session, bool value);
|
||||
void perf_session__set_sample_type(struct perf_session *session, u64 type);
|
||||
void perf_session__remove_thread(struct perf_session *self, struct thread *th);
|
||||
|
||||
static inline
|
||||
@@ -149,9 +149,14 @@ size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
|
||||
size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
|
||||
FILE *fp, bool with_hits);
|
||||
|
||||
static inline
|
||||
size_t perf_session__fprintf_nr_events(struct perf_session *self, FILE *fp)
|
||||
size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
|
||||
|
||||
static inline int perf_session__parse_sample(struct perf_session *session,
|
||||
const union perf_event *event,
|
||||
struct perf_sample *sample)
|
||||
{
|
||||
return hists__fprintf_nr_events(&self->hists, fp);
|
||||
return perf_event__parse_sample(event, session->sample_type,
|
||||
session->sample_id_all, sample);
|
||||
}
|
||||
|
||||
#endif /* __PERF_SESSION_H */
|
||||
|
||||
19
tools/perf/util/setup.py
Normal file
19
tools/perf/util/setup.py
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/python2
|
||||
|
||||
from distutils.core import setup, Extension
|
||||
|
||||
perf = Extension('perf',
|
||||
sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c',
|
||||
'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c',
|
||||
'util/util.c', 'util/xyarray.c', 'util/cgroup.c'],
|
||||
include_dirs = ['util/include'],
|
||||
extra_compile_args = ['-fno-strict-aliasing', '-Wno-write-strings'])
|
||||
|
||||
setup(name='perf',
|
||||
version='0.1',
|
||||
description='Interface with the Linux profiling infrastructure',
|
||||
author='Arnaldo Carvalho de Melo',
|
||||
author_email='acme@redhat.com',
|
||||
license='GPLv2',
|
||||
url='http://perf.wiki.kernel.org',
|
||||
ext_modules=[perf])
|
||||
199
tools/perf/util/strfilter.c
Normal file
199
tools/perf/util/strfilter.c
Normal file
@@ -0,0 +1,199 @@
|
||||
#include "util.h"
|
||||
#include "string.h"
|
||||
#include "strfilter.h"
|
||||
|
||||
/* Operators */
|
||||
static const char *OP_and = "&"; /* Logical AND */
|
||||
static const char *OP_or = "|"; /* Logical OR */
|
||||
static const char *OP_not = "!"; /* Logical NOT */
|
||||
|
||||
#define is_operator(c) ((c) == '|' || (c) == '&' || (c) == '!')
|
||||
#define is_separator(c) (is_operator(c) || (c) == '(' || (c) == ')')
|
||||
|
||||
static void strfilter_node__delete(struct strfilter_node *self)
|
||||
{
|
||||
if (self) {
|
||||
if (self->p && !is_operator(*self->p))
|
||||
free((char *)self->p);
|
||||
strfilter_node__delete(self->l);
|
||||
strfilter_node__delete(self->r);
|
||||
free(self);
|
||||
}
|
||||
}
|
||||
|
||||
void strfilter__delete(struct strfilter *self)
|
||||
{
|
||||
if (self) {
|
||||
strfilter_node__delete(self->root);
|
||||
free(self);
|
||||
}
|
||||
}
|
||||
|
||||
static const char *get_token(const char *s, const char **e)
|
||||
{
|
||||
const char *p;
|
||||
|
||||
while (isspace(*s)) /* Skip spaces */
|
||||
s++;
|
||||
|
||||
if (*s == '\0') {
|
||||
p = s;
|
||||
goto end;
|
||||
}
|
||||
|
||||
p = s + 1;
|
||||
if (!is_separator(*s)) {
|
||||
/* End search */
|
||||
retry:
|
||||
while (*p && !is_separator(*p) && !isspace(*p))
|
||||
p++;
|
||||
/* Escape and special case: '!' is also used in glob pattern */
|
||||
if (*(p - 1) == '\\' || (*p == '!' && *(p - 1) == '[')) {
|
||||
p++;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
end:
|
||||
*e = p;
|
||||
return s;
|
||||
}
|
||||
|
||||
static struct strfilter_node *strfilter_node__alloc(const char *op,
|
||||
struct strfilter_node *l,
|
||||
struct strfilter_node *r)
|
||||
{
|
||||
struct strfilter_node *ret = zalloc(sizeof(struct strfilter_node));
|
||||
|
||||
if (ret) {
|
||||
ret->p = op;
|
||||
ret->l = l;
|
||||
ret->r = r;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct strfilter_node *strfilter_node__new(const char *s,
|
||||
const char **ep)
|
||||
{
|
||||
struct strfilter_node root, *cur, *last_op;
|
||||
const char *e;
|
||||
|
||||
if (!s)
|
||||
return NULL;
|
||||
|
||||
memset(&root, 0, sizeof(root));
|
||||
last_op = cur = &root;
|
||||
|
||||
s = get_token(s, &e);
|
||||
while (*s != '\0' && *s != ')') {
|
||||
switch (*s) {
|
||||
case '&': /* Exchg last OP->r with AND */
|
||||
if (!cur->r || !last_op->r)
|
||||
goto error;
|
||||
cur = strfilter_node__alloc(OP_and, last_op->r, NULL);
|
||||
if (!cur)
|
||||
goto nomem;
|
||||
last_op->r = cur;
|
||||
last_op = cur;
|
||||
break;
|
||||
case '|': /* Exchg the root with OR */
|
||||
if (!cur->r || !root.r)
|
||||
goto error;
|
||||
cur = strfilter_node__alloc(OP_or, root.r, NULL);
|
||||
if (!cur)
|
||||
goto nomem;
|
||||
root.r = cur;
|
||||
last_op = cur;
|
||||
break;
|
||||
case '!': /* Add NOT as a leaf node */
|
||||
if (cur->r)
|
||||
goto error;
|
||||
cur->r = strfilter_node__alloc(OP_not, NULL, NULL);
|
||||
if (!cur->r)
|
||||
goto nomem;
|
||||
cur = cur->r;
|
||||
break;
|
||||
case '(': /* Recursively parses inside the parenthesis */
|
||||
if (cur->r)
|
||||
goto error;
|
||||
cur->r = strfilter_node__new(s + 1, &s);
|
||||
if (!s)
|
||||
goto nomem;
|
||||
if (!cur->r || *s != ')')
|
||||
goto error;
|
||||
e = s + 1;
|
||||
break;
|
||||
default:
|
||||
if (cur->r)
|
||||
goto error;
|
||||
cur->r = strfilter_node__alloc(NULL, NULL, NULL);
|
||||
if (!cur->r)
|
||||
goto nomem;
|
||||
cur->r->p = strndup(s, e - s);
|
||||
if (!cur->r->p)
|
||||
goto nomem;
|
||||
}
|
||||
s = get_token(e, &e);
|
||||
}
|
||||
if (!cur->r)
|
||||
goto error;
|
||||
*ep = s;
|
||||
return root.r;
|
||||
nomem:
|
||||
s = NULL;
|
||||
error:
|
||||
*ep = s;
|
||||
strfilter_node__delete(root.r);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse filter rule and return new strfilter.
|
||||
* Return NULL if fail, and *ep == NULL if memory allocation failed.
|
||||
*/
|
||||
struct strfilter *strfilter__new(const char *rules, const char **err)
|
||||
{
|
||||
struct strfilter *ret = zalloc(sizeof(struct strfilter));
|
||||
const char *ep = NULL;
|
||||
|
||||
if (ret)
|
||||
ret->root = strfilter_node__new(rules, &ep);
|
||||
|
||||
if (!ret || !ret->root || *ep != '\0') {
|
||||
if (err)
|
||||
*err = ep;
|
||||
strfilter__delete(ret);
|
||||
ret = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool strfilter_node__compare(struct strfilter_node *self,
|
||||
const char *str)
|
||||
{
|
||||
if (!self || !self->p)
|
||||
return false;
|
||||
|
||||
switch (*self->p) {
|
||||
case '|': /* OR */
|
||||
return strfilter_node__compare(self->l, str) ||
|
||||
strfilter_node__compare(self->r, str);
|
||||
case '&': /* AND */
|
||||
return strfilter_node__compare(self->l, str) &&
|
||||
strfilter_node__compare(self->r, str);
|
||||
case '!': /* NOT */
|
||||
return !strfilter_node__compare(self->r, str);
|
||||
default:
|
||||
return strglobmatch(str, self->p);
|
||||
}
|
||||
}
|
||||
|
||||
/* Return true if STR matches the filter rules */
|
||||
bool strfilter__compare(struct strfilter *self, const char *str)
|
||||
{
|
||||
if (!self)
|
||||
return false;
|
||||
return strfilter_node__compare(self->root, str);
|
||||
}
|
||||
48
tools/perf/util/strfilter.h
Normal file
48
tools/perf/util/strfilter.h
Normal file
@@ -0,0 +1,48 @@
|
||||
#ifndef __PERF_STRFILTER_H
|
||||
#define __PERF_STRFILTER_H
|
||||
/* General purpose glob matching filter */
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
/* A node of string filter */
|
||||
struct strfilter_node {
|
||||
struct strfilter_node *l; /* Tree left branche (for &,|) */
|
||||
struct strfilter_node *r; /* Tree right branche (for !,&,|) */
|
||||
const char *p; /* Operator or rule */
|
||||
};
|
||||
|
||||
/* String filter */
|
||||
struct strfilter {
|
||||
struct strfilter_node *root;
|
||||
};
|
||||
|
||||
/**
|
||||
* strfilter__new - Create a new string filter
|
||||
* @rules: Filter rule, which is a combination of glob expressions.
|
||||
* @err: Pointer which points an error detected on @rules
|
||||
*
|
||||
* Parse @rules and return new strfilter. Return NULL if an error detected.
|
||||
* In that case, *@err will indicate where it is detected, and *@err is NULL
|
||||
* if a memory allocation is failed.
|
||||
*/
|
||||
struct strfilter *strfilter__new(const char *rules, const char **err);
|
||||
|
||||
/**
|
||||
* strfilter__compare - compare given string and a string filter
|
||||
* @self: String filter
|
||||
* @str: target string
|
||||
*
|
||||
* Compare @str and @self. Return true if the str match the rule
|
||||
*/
|
||||
bool strfilter__compare(struct strfilter *self, const char *str);
|
||||
|
||||
/**
|
||||
* strfilter__delete - delete a string filter
|
||||
* @self: String filter to delete
|
||||
*
|
||||
* Delete @self.
|
||||
*/
|
||||
void strfilter__delete(struct strfilter *self);
|
||||
|
||||
#endif
|
||||
@@ -207,7 +207,6 @@ struct dso *dso__new(const char *name)
|
||||
dso__set_short_name(self, self->name);
|
||||
for (i = 0; i < MAP__NR_TYPES; ++i)
|
||||
self->symbols[i] = self->symbol_names[i] = RB_ROOT;
|
||||
self->slen_calculated = 0;
|
||||
self->origin = DSO__ORIG_NOT_FOUND;
|
||||
self->loaded = 0;
|
||||
self->sorted_by_name = 0;
|
||||
@@ -1525,8 +1524,8 @@ int dso__load(struct dso *self, struct map *map, symbol_filter_t filter)
|
||||
symbol_conf.symfs, self->long_name);
|
||||
break;
|
||||
case DSO__ORIG_GUEST_KMODULE:
|
||||
if (map->groups && map->groups->machine)
|
||||
root_dir = map->groups->machine->root_dir;
|
||||
if (map->groups && machine)
|
||||
root_dir = machine->root_dir;
|
||||
else
|
||||
root_dir = "";
|
||||
snprintf(name, size, "%s%s%s", symbol_conf.symfs,
|
||||
|
||||
@@ -132,7 +132,6 @@ struct dso {
|
||||
struct rb_root symbol_names[MAP__NR_TYPES];
|
||||
enum dso_kernel_type kernel;
|
||||
u8 adjust_symbols:1;
|
||||
u8 slen_calculated:1;
|
||||
u8 has_build_id:1;
|
||||
u8 hit:1;
|
||||
u8 annotate_warned:1;
|
||||
|
||||
@@ -7,61 +7,6 @@
|
||||
#include "util.h"
|
||||
#include "debug.h"
|
||||
|
||||
/* Skip "." and ".." directories */
|
||||
static int filter(const struct dirent *dir)
|
||||
{
|
||||
if (dir->d_name[0] == '.')
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct thread_map *thread_map__new_by_pid(pid_t pid)
|
||||
{
|
||||
struct thread_map *threads;
|
||||
char name[256];
|
||||
int items;
|
||||
struct dirent **namelist = NULL;
|
||||
int i;
|
||||
|
||||
sprintf(name, "/proc/%d/task", pid);
|
||||
items = scandir(name, &namelist, filter, NULL);
|
||||
if (items <= 0)
|
||||
return NULL;
|
||||
|
||||
threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
|
||||
if (threads != NULL) {
|
||||
for (i = 0; i < items; i++)
|
||||
threads->map[i] = atoi(namelist[i]->d_name);
|
||||
threads->nr = items;
|
||||
}
|
||||
|
||||
for (i=0; i<items; i++)
|
||||
free(namelist[i]);
|
||||
free(namelist);
|
||||
|
||||
return threads;
|
||||
}
|
||||
|
||||
struct thread_map *thread_map__new_by_tid(pid_t tid)
|
||||
{
|
||||
struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
|
||||
|
||||
if (threads != NULL) {
|
||||
threads->map[0] = tid;
|
||||
threads->nr = 1;
|
||||
}
|
||||
|
||||
return threads;
|
||||
}
|
||||
|
||||
struct thread_map *thread_map__new(pid_t pid, pid_t tid)
|
||||
{
|
||||
if (pid != -1)
|
||||
return thread_map__new_by_pid(pid);
|
||||
return thread_map__new_by_tid(tid);
|
||||
}
|
||||
|
||||
static struct thread *thread__new(pid_t pid)
|
||||
{
|
||||
struct thread *self = zalloc(sizeof(*self));
|
||||
|
||||
@@ -18,24 +18,10 @@ struct thread {
|
||||
int comm_len;
|
||||
};
|
||||
|
||||
struct thread_map {
|
||||
int nr;
|
||||
int map[];
|
||||
};
|
||||
|
||||
struct perf_session;
|
||||
|
||||
void thread__delete(struct thread *self);
|
||||
|
||||
struct thread_map *thread_map__new_by_pid(pid_t pid);
|
||||
struct thread_map *thread_map__new_by_tid(pid_t tid);
|
||||
struct thread_map *thread_map__new(pid_t pid, pid_t tid);
|
||||
|
||||
static inline void thread_map__delete(struct thread_map *threads)
|
||||
{
|
||||
free(threads);
|
||||
}
|
||||
|
||||
int thread__set_comm(struct thread *self, const char *comm);
|
||||
int thread__comm_len(struct thread *self);
|
||||
struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
|
||||
|
||||
64
tools/perf/util/thread_map.c
Normal file
64
tools/perf/util/thread_map.c
Normal file
@@ -0,0 +1,64 @@
|
||||
#include <dirent.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include "thread_map.h"
|
||||
|
||||
/* Skip "." and ".." directories */
|
||||
static int filter(const struct dirent *dir)
|
||||
{
|
||||
if (dir->d_name[0] == '.')
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct thread_map *thread_map__new_by_pid(pid_t pid)
|
||||
{
|
||||
struct thread_map *threads;
|
||||
char name[256];
|
||||
int items;
|
||||
struct dirent **namelist = NULL;
|
||||
int i;
|
||||
|
||||
sprintf(name, "/proc/%d/task", pid);
|
||||
items = scandir(name, &namelist, filter, NULL);
|
||||
if (items <= 0)
|
||||
return NULL;
|
||||
|
||||
threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
|
||||
if (threads != NULL) {
|
||||
for (i = 0; i < items; i++)
|
||||
threads->map[i] = atoi(namelist[i]->d_name);
|
||||
threads->nr = items;
|
||||
}
|
||||
|
||||
for (i=0; i<items; i++)
|
||||
free(namelist[i]);
|
||||
free(namelist);
|
||||
|
||||
return threads;
|
||||
}
|
||||
|
||||
struct thread_map *thread_map__new_by_tid(pid_t tid)
|
||||
{
|
||||
struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
|
||||
|
||||
if (threads != NULL) {
|
||||
threads->map[0] = tid;
|
||||
threads->nr = 1;
|
||||
}
|
||||
|
||||
return threads;
|
||||
}
|
||||
|
||||
struct thread_map *thread_map__new(pid_t pid, pid_t tid)
|
||||
{
|
||||
if (pid != -1)
|
||||
return thread_map__new_by_pid(pid);
|
||||
return thread_map__new_by_tid(tid);
|
||||
}
|
||||
|
||||
void thread_map__delete(struct thread_map *threads)
|
||||
{
|
||||
free(threads);
|
||||
}
|
||||
15
tools/perf/util/thread_map.h
Normal file
15
tools/perf/util/thread_map.h
Normal file
@@ -0,0 +1,15 @@
|
||||
#ifndef __PERF_THREAD_MAP_H
|
||||
#define __PERF_THREAD_MAP_H
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
struct thread_map {
|
||||
int nr;
|
||||
int map[];
|
||||
};
|
||||
|
||||
struct thread_map *thread_map__new_by_pid(pid_t pid);
|
||||
struct thread_map *thread_map__new_by_tid(pid_t tid);
|
||||
struct thread_map *thread_map__new(pid_t pid, pid_t tid);
|
||||
void thread_map__delete(struct thread_map *threads);
|
||||
#endif /* __PERF_THREAD_MAP_H */
|
||||
238
tools/perf/util/top.c
Normal file
238
tools/perf/util/top.c
Normal file
@@ -0,0 +1,238 @@
|
||||
/*
|
||||
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
*
|
||||
* Refactored from builtin-top.c, see that files for further copyright notes.
|
||||
*
|
||||
* Released under the GPL v2. (and only v2, not any later version)
|
||||
*/
|
||||
|
||||
#include "cpumap.h"
|
||||
#include "event.h"
|
||||
#include "evlist.h"
|
||||
#include "evsel.h"
|
||||
#include "parse-events.h"
|
||||
#include "symbol.h"
|
||||
#include "top.h"
|
||||
#include <inttypes.h>
|
||||
|
||||
/*
|
||||
* Ordering weight: count-1 * count-2 * ... / count-n
|
||||
*/
|
||||
static double sym_weight(const struct sym_entry *sym, struct perf_top *top)
|
||||
{
|
||||
double weight = sym->snap_count;
|
||||
int counter;
|
||||
|
||||
if (!top->display_weighted)
|
||||
return weight;
|
||||
|
||||
for (counter = 1; counter < top->evlist->nr_entries - 1; counter++)
|
||||
weight *= sym->count[counter];
|
||||
|
||||
weight /= (sym->count[counter] + 1);
|
||||
|
||||
return weight;
|
||||
}
|
||||
|
||||
static void perf_top__remove_active_sym(struct perf_top *top, struct sym_entry *syme)
|
||||
{
|
||||
pthread_mutex_lock(&top->active_symbols_lock);
|
||||
list_del_init(&syme->node);
|
||||
pthread_mutex_unlock(&top->active_symbols_lock);
|
||||
}
|
||||
|
||||
static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
|
||||
{
|
||||
struct rb_node **p = &tree->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct sym_entry *iter;
|
||||
|
||||
while (*p != NULL) {
|
||||
parent = *p;
|
||||
iter = rb_entry(parent, struct sym_entry, rb_node);
|
||||
|
||||
if (se->weight > iter->weight)
|
||||
p = &(*p)->rb_left;
|
||||
else
|
||||
p = &(*p)->rb_right;
|
||||
}
|
||||
|
||||
rb_link_node(&se->rb_node, parent, p);
|
||||
rb_insert_color(&se->rb_node, tree);
|
||||
}
|
||||
|
||||
#define SNPRINTF(buf, size, fmt, args...) \
|
||||
({ \
|
||||
size_t r = snprintf(buf, size, fmt, ## args); \
|
||||
r > size ? size : r; \
|
||||
})
|
||||
|
||||
size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
|
||||
{
|
||||
struct perf_evsel *counter;
|
||||
float samples_per_sec = top->samples / top->delay_secs;
|
||||
float ksamples_per_sec = top->kernel_samples / top->delay_secs;
|
||||
float esamples_percent = (100.0 * top->exact_samples) / top->samples;
|
||||
size_t ret = 0;
|
||||
|
||||
if (!perf_guest) {
|
||||
ret = SNPRINTF(bf, size,
|
||||
" PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
|
||||
" exact: %4.1f%% [", samples_per_sec,
|
||||
100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
|
||||
samples_per_sec)),
|
||||
esamples_percent);
|
||||
} else {
|
||||
float us_samples_per_sec = top->us_samples / top->delay_secs;
|
||||
float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
|
||||
float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs;
|
||||
|
||||
ret = SNPRINTF(bf, size,
|
||||
" PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
|
||||
" guest kernel:%4.1f%% guest us:%4.1f%%"
|
||||
" exact: %4.1f%% [", samples_per_sec,
|
||||
100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
|
||||
samples_per_sec)),
|
||||
100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) /
|
||||
samples_per_sec)),
|
||||
100.0 - (100.0 * ((samples_per_sec -
|
||||
guest_kernel_samples_per_sec) /
|
||||
samples_per_sec)),
|
||||
100.0 - (100.0 * ((samples_per_sec -
|
||||
guest_us_samples_per_sec) /
|
||||
samples_per_sec)),
|
||||
esamples_percent);
|
||||
}
|
||||
|
||||
if (top->evlist->nr_entries == 1 || !top->display_weighted) {
|
||||
struct perf_evsel *first;
|
||||
first = list_entry(top->evlist->entries.next, struct perf_evsel, node);
|
||||
ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
|
||||
(uint64_t)first->attr.sample_period,
|
||||
top->freq ? "Hz" : "");
|
||||
}
|
||||
|
||||
if (!top->display_weighted) {
|
||||
ret += SNPRINTF(bf + ret, size - ret, "%s",
|
||||
event_name(top->sym_evsel));
|
||||
} else {
|
||||
/*
|
||||
* Don't let events eat all the space. Leaving 30 bytes
|
||||
* for the rest should be enough.
|
||||
*/
|
||||
size_t last_pos = size - 30;
|
||||
|
||||
list_for_each_entry(counter, &top->evlist->entries, node) {
|
||||
ret += SNPRINTF(bf + ret, size - ret, "%s%s",
|
||||
counter->idx ? "/" : "",
|
||||
event_name(counter));
|
||||
if (ret > last_pos) {
|
||||
sprintf(bf + last_pos - 3, "..");
|
||||
ret = last_pos - 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret += SNPRINTF(bf + ret, size - ret, "], ");
|
||||
|
||||
if (top->target_pid != -1)
|
||||
ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %d",
|
||||
top->target_pid);
|
||||
else if (top->target_tid != -1)
|
||||
ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %d",
|
||||
top->target_tid);
|
||||
else
|
||||
ret += SNPRINTF(bf + ret, size - ret, " (all");
|
||||
|
||||
if (top->cpu_list)
|
||||
ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
|
||||
top->evlist->cpus->nr > 1 ? "s" : "", top->cpu_list);
|
||||
else {
|
||||
if (top->target_tid != -1)
|
||||
ret += SNPRINTF(bf + ret, size - ret, ")");
|
||||
else
|
||||
ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
|
||||
top->evlist->cpus->nr,
|
||||
top->evlist->cpus->nr > 1 ? "s" : "");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void perf_top__reset_sample_counters(struct perf_top *top)
|
||||
{
|
||||
top->samples = top->us_samples = top->kernel_samples =
|
||||
top->exact_samples = top->guest_kernel_samples =
|
||||
top->guest_us_samples = 0;
|
||||
}
|
||||
|
||||
float perf_top__decay_samples(struct perf_top *top, struct rb_root *root)
|
||||
{
|
||||
struct sym_entry *syme, *n;
|
||||
float sum_ksamples = 0.0;
|
||||
int snap = !top->display_weighted ? top->sym_counter : 0, j;
|
||||
|
||||
/* Sort the active symbols */
|
||||
pthread_mutex_lock(&top->active_symbols_lock);
|
||||
syme = list_entry(top->active_symbols.next, struct sym_entry, node);
|
||||
pthread_mutex_unlock(&top->active_symbols_lock);
|
||||
|
||||
top->rb_entries = 0;
|
||||
list_for_each_entry_safe_from(syme, n, &top->active_symbols, node) {
|
||||
syme->snap_count = syme->count[snap];
|
||||
if (syme->snap_count != 0) {
|
||||
|
||||
if ((top->hide_user_symbols &&
|
||||
syme->origin == PERF_RECORD_MISC_USER) ||
|
||||
(top->hide_kernel_symbols &&
|
||||
syme->origin == PERF_RECORD_MISC_KERNEL)) {
|
||||
perf_top__remove_active_sym(top, syme);
|
||||
continue;
|
||||
}
|
||||
syme->weight = sym_weight(syme, top);
|
||||
|
||||
if ((int)syme->snap_count >= top->count_filter) {
|
||||
rb_insert_active_sym(root, syme);
|
||||
++top->rb_entries;
|
||||
}
|
||||
sum_ksamples += syme->snap_count;
|
||||
|
||||
for (j = 0; j < top->evlist->nr_entries; j++)
|
||||
syme->count[j] = top->zero ? 0 : syme->count[j] * 7 / 8;
|
||||
} else
|
||||
perf_top__remove_active_sym(top, syme);
|
||||
}
|
||||
|
||||
return sum_ksamples;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the longest symbol name that will be displayed
|
||||
*/
|
||||
void perf_top__find_widths(struct perf_top *top, struct rb_root *root,
|
||||
int *dso_width, int *dso_short_width, int *sym_width)
|
||||
{
|
||||
struct rb_node *nd;
|
||||
int printed = 0;
|
||||
|
||||
*sym_width = *dso_width = *dso_short_width = 0;
|
||||
|
||||
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
|
||||
struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node);
|
||||
struct symbol *sym = sym_entry__symbol(syme);
|
||||
|
||||
if (++printed > top->print_entries ||
|
||||
(int)syme->snap_count < top->count_filter)
|
||||
continue;
|
||||
|
||||
if (syme->map->dso->long_name_len > *dso_width)
|
||||
*dso_width = syme->map->dso->long_name_len;
|
||||
|
||||
if (syme->map->dso->short_name_len > *dso_short_width)
|
||||
*dso_short_width = syme->map->dso->short_name_len;
|
||||
|
||||
if (sym->namelen > *sym_width)
|
||||
*sym_width = sym->namelen;
|
||||
}
|
||||
}
|
||||
66
tools/perf/util/top.h
Normal file
66
tools/perf/util/top.h
Normal file
@@ -0,0 +1,66 @@
|
||||
#ifndef __PERF_TOP_H
|
||||
#define __PERF_TOP_H 1
|
||||
|
||||
#include "types.h"
|
||||
#include "../perf.h"
|
||||
#include <stddef.h>
|
||||
#include <pthread.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
|
||||
struct perf_evlist;
|
||||
struct perf_evsel;
|
||||
|
||||
struct sym_entry {
|
||||
struct rb_node rb_node;
|
||||
struct list_head node;
|
||||
unsigned long snap_count;
|
||||
double weight;
|
||||
int skip;
|
||||
u8 origin;
|
||||
struct map *map;
|
||||
unsigned long count[0];
|
||||
};
|
||||
|
||||
static inline struct symbol *sym_entry__symbol(struct sym_entry *self)
|
||||
{
|
||||
return ((void *)self) + symbol_conf.priv_size;
|
||||
}
|
||||
|
||||
struct perf_top {
|
||||
struct perf_evlist *evlist;
|
||||
/*
|
||||
* Symbols will be added here in perf_event__process_sample and will
|
||||
* get out after decayed.
|
||||
*/
|
||||
struct list_head active_symbols;
|
||||
pthread_mutex_t active_symbols_lock;
|
||||
pthread_cond_t active_symbols_cond;
|
||||
u64 samples;
|
||||
u64 kernel_samples, us_samples;
|
||||
u64 exact_samples;
|
||||
u64 guest_us_samples, guest_kernel_samples;
|
||||
int print_entries, count_filter, delay_secs;
|
||||
int display_weighted, freq, rb_entries, sym_counter;
|
||||
pid_t target_pid, target_tid;
|
||||
bool hide_kernel_symbols, hide_user_symbols, zero;
|
||||
const char *cpu_list;
|
||||
struct sym_entry *sym_filter_entry;
|
||||
struct perf_evsel *sym_evsel;
|
||||
};
|
||||
|
||||
size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size);
|
||||
void perf_top__reset_sample_counters(struct perf_top *top);
|
||||
float perf_top__decay_samples(struct perf_top *top, struct rb_root *root);
|
||||
void perf_top__find_widths(struct perf_top *top, struct rb_root *root,
|
||||
int *dso_width, int *dso_short_width, int *sym_width);
|
||||
|
||||
#ifdef NO_NEWT_SUPPORT
|
||||
static inline int perf_top__tui_browser(struct perf_top *top __used)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int perf_top__tui_browser(struct perf_top *top);
|
||||
#endif
|
||||
#endif /* __PERF_TOP_H */
|
||||
@@ -153,7 +153,7 @@ void parse_proc_kallsyms(char *file, unsigned int size __unused)
|
||||
char *next = NULL;
|
||||
char *addr_str;
|
||||
char ch;
|
||||
int ret;
|
||||
int ret __used;
|
||||
int i;
|
||||
|
||||
line = strtok_r(file, "\n", &next);
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#include "libslang.h"
|
||||
#include "ui.h"
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
@@ -156,6 +157,20 @@ void ui_browser__add_exit_keys(struct ui_browser *self, int keys[])
|
||||
}
|
||||
}
|
||||
|
||||
void __ui_browser__show_title(struct ui_browser *browser, const char *title)
|
||||
{
|
||||
SLsmg_gotorc(0, 0);
|
||||
ui_browser__set_color(browser, NEWT_COLORSET_ROOT);
|
||||
slsmg_write_nstring(title, browser->width);
|
||||
}
|
||||
|
||||
void ui_browser__show_title(struct ui_browser *browser, const char *title)
|
||||
{
|
||||
pthread_mutex_lock(&ui__lock);
|
||||
__ui_browser__show_title(browser, title);
|
||||
pthread_mutex_unlock(&ui__lock);
|
||||
}
|
||||
|
||||
int ui_browser__show(struct ui_browser *self, const char *title,
|
||||
const char *helpline, ...)
|
||||
{
|
||||
@@ -178,9 +193,8 @@ int ui_browser__show(struct ui_browser *self, const char *title,
|
||||
if (self->sb == NULL)
|
||||
return -1;
|
||||
|
||||
SLsmg_gotorc(0, 0);
|
||||
ui_browser__set_color(self, NEWT_COLORSET_ROOT);
|
||||
slsmg_write_nstring(title, self->width);
|
||||
pthread_mutex_lock(&ui__lock);
|
||||
__ui_browser__show_title(self, title);
|
||||
|
||||
ui_browser__add_exit_keys(self, keys);
|
||||
newtFormAddComponent(self->form, self->sb);
|
||||
@@ -188,25 +202,30 @@ int ui_browser__show(struct ui_browser *self, const char *title,
|
||||
va_start(ap, helpline);
|
||||
ui_helpline__vpush(helpline, ap);
|
||||
va_end(ap);
|
||||
pthread_mutex_unlock(&ui__lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ui_browser__hide(struct ui_browser *self)
|
||||
{
|
||||
pthread_mutex_lock(&ui__lock);
|
||||
newtFormDestroy(self->form);
|
||||
self->form = NULL;
|
||||
ui_helpline__pop();
|
||||
pthread_mutex_unlock(&ui__lock);
|
||||
}
|
||||
|
||||
int ui_browser__refresh(struct ui_browser *self)
|
||||
{
|
||||
int row;
|
||||
|
||||
pthread_mutex_lock(&ui__lock);
|
||||
newtScrollbarSet(self->sb, self->index, self->nr_entries - 1);
|
||||
row = self->refresh(self);
|
||||
ui_browser__set_color(self, HE_COLORSET_NORMAL);
|
||||
SLsmg_fill_region(self->y + row, self->x,
|
||||
self->height - row, self->width, ' ');
|
||||
pthread_mutex_unlock(&ui__lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ struct ui_browser {
|
||||
u32 nr_entries;
|
||||
};
|
||||
|
||||
|
||||
void ui_browser__set_color(struct ui_browser *self, int color);
|
||||
void ui_browser__set_percent_color(struct ui_browser *self,
|
||||
double percent, bool current);
|
||||
@@ -35,6 +34,8 @@ void ui_browser__reset_index(struct ui_browser *self);
|
||||
void ui_browser__gotorc(struct ui_browser *self, int y, int x);
|
||||
void ui_browser__add_exit_key(struct ui_browser *self, int key);
|
||||
void ui_browser__add_exit_keys(struct ui_browser *self, int keys[]);
|
||||
void __ui_browser__show_title(struct ui_browser *browser, const char *title);
|
||||
void ui_browser__show_title(struct ui_browser *browser, const char *title);
|
||||
int ui_browser__show(struct ui_browser *self, const char *title,
|
||||
const char *helpline, ...);
|
||||
void ui_browser__hide(struct ui_browser *self);
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
#include "../browser.h"
|
||||
#include "../helpline.h"
|
||||
#include "../libslang.h"
|
||||
#include "../../annotate.h"
|
||||
#include "../../hist.h"
|
||||
#include "../../sort.h"
|
||||
#include "../../symbol.h"
|
||||
#include "../../annotate.h"
|
||||
#include <pthread.h>
|
||||
|
||||
static void ui__error_window(const char *fmt, ...)
|
||||
{
|
||||
@@ -42,8 +45,6 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
|
||||
struct objdump_line_rb_node *olrb = objdump_line__rb(ol);
|
||||
ui_browser__set_percent_color(self, olrb->percent, current_entry);
|
||||
slsmg_printf(" %7.2f ", olrb->percent);
|
||||
if (!current_entry)
|
||||
ui_browser__set_color(self, HE_COLORSET_CODE);
|
||||
} else {
|
||||
ui_browser__set_percent_color(self, 0, current_entry);
|
||||
slsmg_write_nstring(" ", 9);
|
||||
@@ -55,35 +56,40 @@ static void annotate_browser__write(struct ui_browser *self, void *entry, int ro
|
||||
slsmg_write_nstring(" ", width - 18);
|
||||
else
|
||||
slsmg_write_nstring(ol->line, width - 18);
|
||||
|
||||
if (!current_entry)
|
||||
ui_browser__set_color(self, HE_COLORSET_CODE);
|
||||
}
|
||||
|
||||
static double objdump_line__calc_percent(struct objdump_line *self,
|
||||
struct list_head *head,
|
||||
struct symbol *sym)
|
||||
struct symbol *sym, int evidx)
|
||||
{
|
||||
double percent = 0.0;
|
||||
|
||||
if (self->offset != -1) {
|
||||
int len = sym->end - sym->start;
|
||||
unsigned int hits = 0;
|
||||
struct sym_priv *priv = symbol__priv(sym);
|
||||
struct sym_ext *sym_ext = priv->ext;
|
||||
struct sym_hist *h = priv->hist;
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
struct source_line *src_line = notes->src->lines;
|
||||
struct sym_hist *h = annotation__histogram(notes, evidx);
|
||||
s64 offset = self->offset;
|
||||
struct objdump_line *next = objdump__get_next_ip_line(head, self);
|
||||
|
||||
struct objdump_line *next;
|
||||
|
||||
next = objdump__get_next_ip_line(¬es->src->source, self);
|
||||
while (offset < (s64)len &&
|
||||
(next == NULL || offset < next->offset)) {
|
||||
if (sym_ext) {
|
||||
percent += sym_ext[offset].percent;
|
||||
if (src_line) {
|
||||
percent += src_line[offset].percent;
|
||||
} else
|
||||
hits += h->ip[offset];
|
||||
hits += h->addr[offset];
|
||||
|
||||
++offset;
|
||||
}
|
||||
|
||||
if (sym_ext == NULL && h->sum)
|
||||
/*
|
||||
* If the percentage wasn't already calculated in
|
||||
* symbol__get_source_line, do it now:
|
||||
*/
|
||||
if (src_line == NULL && h->sum)
|
||||
percent = 100.0 * hits / h->sum;
|
||||
}
|
||||
|
||||
@@ -133,103 +139,161 @@ static void annotate_browser__set_top(struct annotate_browser *self,
|
||||
self->curr_hot = nd;
|
||||
}
|
||||
|
||||
static int annotate_browser__run(struct annotate_browser *self)
|
||||
static void annotate_browser__calc_percent(struct annotate_browser *browser,
|
||||
int evidx)
|
||||
{
|
||||
struct rb_node *nd;
|
||||
struct hist_entry *he = self->b.priv;
|
||||
int key;
|
||||
struct symbol *sym = browser->b.priv;
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
struct objdump_line *pos;
|
||||
|
||||
if (ui_browser__show(&self->b, he->ms.sym->name,
|
||||
"<-, -> or ESC: exit, TAB/shift+TAB: cycle thru samples") < 0)
|
||||
return -1;
|
||||
browser->entries = RB_ROOT;
|
||||
|
||||
pthread_mutex_lock(¬es->lock);
|
||||
|
||||
list_for_each_entry(pos, ¬es->src->source, node) {
|
||||
struct objdump_line_rb_node *rbpos = objdump_line__rb(pos);
|
||||
rbpos->percent = objdump_line__calc_percent(pos, sym, evidx);
|
||||
if (rbpos->percent < 0.01) {
|
||||
RB_CLEAR_NODE(&rbpos->rb_node);
|
||||
continue;
|
||||
}
|
||||
objdump__insert_line(&browser->entries, rbpos);
|
||||
}
|
||||
pthread_mutex_unlock(¬es->lock);
|
||||
|
||||
browser->curr_hot = rb_last(&browser->entries);
|
||||
}
|
||||
|
||||
static int annotate_browser__run(struct annotate_browser *self, int evidx,
|
||||
int refresh)
|
||||
{
|
||||
struct rb_node *nd = NULL;
|
||||
struct symbol *sym = self->b.priv;
|
||||
/*
|
||||
* To allow builtin-annotate to cycle thru multiple symbols by
|
||||
* RIGHT To allow builtin-annotate to cycle thru multiple symbols by
|
||||
* examining the exit key for this function.
|
||||
*/
|
||||
ui_browser__add_exit_key(&self->b, NEWT_KEY_RIGHT);
|
||||
int exit_keys[] = { 'H', NEWT_KEY_TAB, NEWT_KEY_UNTAB,
|
||||
NEWT_KEY_RIGHT, 0 };
|
||||
int key;
|
||||
|
||||
if (ui_browser__show(&self->b, sym->name,
|
||||
"<-, -> or ESC: exit, TAB/shift+TAB: "
|
||||
"cycle hottest lines, H: Hottest") < 0)
|
||||
return -1;
|
||||
|
||||
ui_browser__add_exit_keys(&self->b, exit_keys);
|
||||
annotate_browser__calc_percent(self, evidx);
|
||||
|
||||
if (self->curr_hot)
|
||||
annotate_browser__set_top(self, self->curr_hot);
|
||||
|
||||
nd = self->curr_hot;
|
||||
if (nd) {
|
||||
int tabs[] = { NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0 };
|
||||
ui_browser__add_exit_keys(&self->b, tabs);
|
||||
}
|
||||
|
||||
if (refresh != 0)
|
||||
newtFormSetTimer(self->b.form, refresh);
|
||||
|
||||
while (1) {
|
||||
key = ui_browser__run(&self->b);
|
||||
|
||||
if (refresh != 0) {
|
||||
annotate_browser__calc_percent(self, evidx);
|
||||
/*
|
||||
* Current line focus got out of the list of most active
|
||||
* lines, NULL it so that if TAB|UNTAB is pressed, we
|
||||
* move to curr_hot (current hottest line).
|
||||
*/
|
||||
if (nd != NULL && RB_EMPTY_NODE(nd))
|
||||
nd = NULL;
|
||||
}
|
||||
|
||||
switch (key) {
|
||||
case -1:
|
||||
/*
|
||||
* FIXME we need to check if it was
|
||||
* es.reason == NEWT_EXIT_TIMER
|
||||
*/
|
||||
if (refresh != 0)
|
||||
symbol__annotate_decay_histogram(sym, evidx);
|
||||
continue;
|
||||
case NEWT_KEY_TAB:
|
||||
nd = rb_prev(nd);
|
||||
if (nd == NULL)
|
||||
nd = rb_last(&self->entries);
|
||||
annotate_browser__set_top(self, nd);
|
||||
if (nd != NULL) {
|
||||
nd = rb_prev(nd);
|
||||
if (nd == NULL)
|
||||
nd = rb_last(&self->entries);
|
||||
} else
|
||||
nd = self->curr_hot;
|
||||
break;
|
||||
case NEWT_KEY_UNTAB:
|
||||
nd = rb_next(nd);
|
||||
if (nd == NULL)
|
||||
nd = rb_first(&self->entries);
|
||||
annotate_browser__set_top(self, nd);
|
||||
if (nd != NULL)
|
||||
nd = rb_next(nd);
|
||||
if (nd == NULL)
|
||||
nd = rb_first(&self->entries);
|
||||
else
|
||||
nd = self->curr_hot;
|
||||
break;
|
||||
case 'H':
|
||||
nd = self->curr_hot;
|
||||
break;
|
||||
default:
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nd != NULL)
|
||||
annotate_browser__set_top(self, nd);
|
||||
}
|
||||
out:
|
||||
ui_browser__hide(&self->b);
|
||||
return key;
|
||||
}
|
||||
|
||||
int hist_entry__tui_annotate(struct hist_entry *self)
|
||||
int hist_entry__tui_annotate(struct hist_entry *he, int evidx)
|
||||
{
|
||||
return symbol__tui_annotate(he->ms.sym, he->ms.map, evidx, 0);
|
||||
}
|
||||
|
||||
int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
|
||||
int refresh)
|
||||
{
|
||||
struct objdump_line *pos, *n;
|
||||
struct objdump_line_rb_node *rbpos;
|
||||
LIST_HEAD(head);
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
struct annotate_browser browser = {
|
||||
.b = {
|
||||
.entries = &head,
|
||||
.entries = ¬es->src->source,
|
||||
.refresh = ui_browser__list_head_refresh,
|
||||
.seek = ui_browser__list_head_seek,
|
||||
.write = annotate_browser__write,
|
||||
.priv = self,
|
||||
.priv = sym,
|
||||
},
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (self->ms.sym == NULL)
|
||||
if (sym == NULL)
|
||||
return -1;
|
||||
|
||||
if (self->ms.map->dso->annotate_warned)
|
||||
if (map->dso->annotate_warned)
|
||||
return -1;
|
||||
|
||||
if (hist_entry__annotate(self, &head, sizeof(*rbpos)) < 0) {
|
||||
if (symbol__annotate(sym, map, sizeof(struct objdump_line_rb_node)) < 0) {
|
||||
ui__error_window(ui_helpline__last_msg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ui_helpline__push("Press <- or ESC to exit");
|
||||
|
||||
list_for_each_entry(pos, &head, node) {
|
||||
list_for_each_entry(pos, ¬es->src->source, node) {
|
||||
struct objdump_line_rb_node *rbpos;
|
||||
size_t line_len = strlen(pos->line);
|
||||
|
||||
if (browser.b.width < line_len)
|
||||
browser.b.width = line_len;
|
||||
rbpos = objdump_line__rb(pos);
|
||||
rbpos->idx = browser.b.nr_entries++;
|
||||
rbpos->percent = objdump_line__calc_percent(pos, &head, self->ms.sym);
|
||||
if (rbpos->percent < 0.01)
|
||||
continue;
|
||||
objdump__insert_line(&browser.entries, rbpos);
|
||||
}
|
||||
|
||||
/*
|
||||
* Position the browser at the hottest line.
|
||||
*/
|
||||
browser.curr_hot = rb_last(&browser.entries);
|
||||
if (browser.curr_hot)
|
||||
annotate_browser__set_top(&browser, browser.curr_hot);
|
||||
|
||||
browser.b.width += 18; /* Percentage */
|
||||
ret = annotate_browser__run(&browser);
|
||||
list_for_each_entry_safe(pos, n, &head, node) {
|
||||
ret = annotate_browser__run(&browser, evidx, refresh);
|
||||
list_for_each_entry_safe(pos, n, ¬es->src->source, node) {
|
||||
list_del(&pos->node);
|
||||
objdump_line__free(pos);
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@
|
||||
#include <newt.h>
|
||||
#include <linux/rbtree.h>
|
||||
|
||||
#include "../../evsel.h"
|
||||
#include "../../evlist.h"
|
||||
#include "../../hist.h"
|
||||
#include "../../pstack.h"
|
||||
#include "../../sort.h"
|
||||
@@ -292,7 +294,8 @@ static int hist_browser__run(struct hist_browser *self, const char *title)
|
||||
{
|
||||
int key;
|
||||
int exit_keys[] = { 'a', '?', 'h', 'C', 'd', 'D', 'E', 't',
|
||||
NEWT_KEY_ENTER, NEWT_KEY_RIGHT, NEWT_KEY_LEFT, 0, };
|
||||
NEWT_KEY_ENTER, NEWT_KEY_RIGHT, NEWT_KEY_LEFT,
|
||||
NEWT_KEY_TAB, NEWT_KEY_UNTAB, 0, };
|
||||
|
||||
self->b.entries = &self->hists->entries;
|
||||
self->b.nr_entries = self->hists->nr_entries;
|
||||
@@ -377,7 +380,7 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self,
|
||||
while (node) {
|
||||
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
|
||||
struct rb_node *next = rb_next(node);
|
||||
u64 cumul = cumul_hits(child);
|
||||
u64 cumul = callchain_cumul_hits(child);
|
||||
struct callchain_list *chain;
|
||||
char folded_sign = ' ';
|
||||
int first = true;
|
||||
@@ -638,6 +641,9 @@ static void ui_browser__hists_seek(struct ui_browser *self,
|
||||
struct rb_node *nd;
|
||||
bool first = true;
|
||||
|
||||
if (self->nr_entries == 0)
|
||||
return;
|
||||
|
||||
switch (whence) {
|
||||
case SEEK_SET:
|
||||
nd = hists__filter_entries(rb_first(self->entries));
|
||||
@@ -797,8 +803,11 @@ static int hists__browser_title(struct hists *self, char *bf, size_t size,
|
||||
return printed;
|
||||
}
|
||||
|
||||
int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
|
||||
static int perf_evsel__hists_browse(struct perf_evsel *evsel,
|
||||
const char *helpline, const char *ev_name,
|
||||
bool left_exits)
|
||||
{
|
||||
struct hists *self = &evsel->hists;
|
||||
struct hist_browser *browser = hist_browser__new(self);
|
||||
struct pstack *fstack;
|
||||
const struct thread *thread_filter = NULL;
|
||||
@@ -818,8 +827,8 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
|
||||
hists__browser_title(self, msg, sizeof(msg), ev_name,
|
||||
dso_filter, thread_filter);
|
||||
while (1) {
|
||||
const struct thread *thread;
|
||||
const struct dso *dso;
|
||||
const struct thread *thread = NULL;
|
||||
const struct dso *dso = NULL;
|
||||
char *options[16];
|
||||
int nr_options = 0, choice = 0, i,
|
||||
annotate = -2, zoom_dso = -2, zoom_thread = -2,
|
||||
@@ -827,8 +836,10 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
|
||||
|
||||
key = hist_browser__run(browser, msg);
|
||||
|
||||
thread = hist_browser__selected_thread(browser);
|
||||
dso = browser->selection->map ? browser->selection->map->dso : NULL;
|
||||
if (browser->he_selection != NULL) {
|
||||
thread = hist_browser__selected_thread(browser);
|
||||
dso = browser->selection->map ? browser->selection->map->dso : NULL;
|
||||
}
|
||||
|
||||
switch (key) {
|
||||
case NEWT_KEY_TAB:
|
||||
@@ -839,7 +850,8 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
|
||||
*/
|
||||
goto out_free_stack;
|
||||
case 'a':
|
||||
if (browser->selection->map == NULL &&
|
||||
if (browser->selection == NULL ||
|
||||
browser->selection->map == NULL ||
|
||||
browser->selection->map->dso->annotate_warned)
|
||||
continue;
|
||||
goto do_annotate;
|
||||
@@ -858,6 +870,7 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
|
||||
"E Expand all callchains\n"
|
||||
"d Zoom into current DSO\n"
|
||||
"t Zoom into current Thread\n"
|
||||
"TAB/UNTAB Switch events\n"
|
||||
"q/CTRL+C Exit browser");
|
||||
continue;
|
||||
case NEWT_KEY_ENTER:
|
||||
@@ -867,8 +880,14 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
|
||||
case NEWT_KEY_LEFT: {
|
||||
const void *top;
|
||||
|
||||
if (pstack__empty(fstack))
|
||||
if (pstack__empty(fstack)) {
|
||||
/*
|
||||
* Go back to the perf_evsel_menu__run or other user
|
||||
*/
|
||||
if (left_exits)
|
||||
goto out_free_stack;
|
||||
continue;
|
||||
}
|
||||
top = pstack__pop(fstack);
|
||||
if (top == &dso_filter)
|
||||
goto zoom_out_dso;
|
||||
@@ -877,14 +896,16 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
|
||||
continue;
|
||||
}
|
||||
case NEWT_KEY_ESCAPE:
|
||||
if (!ui__dialog_yesno("Do you really want to exit?"))
|
||||
if (!left_exits &&
|
||||
!ui__dialog_yesno("Do you really want to exit?"))
|
||||
continue;
|
||||
/* Fall thru */
|
||||
default:
|
||||
goto out_free_stack;
|
||||
}
|
||||
|
||||
if (browser->selection->sym != NULL &&
|
||||
if (browser->selection != NULL &&
|
||||
browser->selection->sym != NULL &&
|
||||
!browser->selection->map->dso->annotate_warned &&
|
||||
asprintf(&options[nr_options], "Annotate %s",
|
||||
browser->selection->sym->name) > 0)
|
||||
@@ -903,7 +924,8 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
|
||||
(dso->kernel ? "the Kernel" : dso->short_name)) > 0)
|
||||
zoom_dso = nr_options++;
|
||||
|
||||
if (browser->selection->map != NULL &&
|
||||
if (browser->selection != NULL &&
|
||||
browser->selection->map != NULL &&
|
||||
asprintf(&options[nr_options], "Browse map details") > 0)
|
||||
browse_map = nr_options++;
|
||||
|
||||
@@ -923,19 +945,11 @@ int hists__browse(struct hists *self, const char *helpline, const char *ev_name)
|
||||
if (choice == annotate) {
|
||||
struct hist_entry *he;
|
||||
do_annotate:
|
||||
if (browser->selection->map->dso->origin == DSO__ORIG_KERNEL) {
|
||||
browser->selection->map->dso->annotate_warned = 1;
|
||||
ui_helpline__puts("No vmlinux file found, can't "
|
||||
"annotate with just a "
|
||||
"kallsyms file");
|
||||
continue;
|
||||
}
|
||||
|
||||
he = hist_browser__selected_entry(browser);
|
||||
if (he == NULL)
|
||||
continue;
|
||||
|
||||
hist_entry__tui_annotate(he);
|
||||
hist_entry__tui_annotate(he, evsel->idx);
|
||||
} else if (choice == browse_map)
|
||||
map__browse(browser->selection->map);
|
||||
else if (choice == zoom_dso) {
|
||||
@@ -984,30 +998,141 @@ out:
|
||||
return key;
|
||||
}
|
||||
|
||||
int hists__tui_browse_tree(struct rb_root *self, const char *help)
|
||||
struct perf_evsel_menu {
|
||||
struct ui_browser b;
|
||||
struct perf_evsel *selection;
|
||||
};
|
||||
|
||||
static void perf_evsel_menu__write(struct ui_browser *browser,
|
||||
void *entry, int row)
|
||||
{
|
||||
struct rb_node *first = rb_first(self), *nd = first, *next;
|
||||
int key = 0;
|
||||
struct perf_evsel_menu *menu = container_of(browser,
|
||||
struct perf_evsel_menu, b);
|
||||
struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node);
|
||||
bool current_entry = ui_browser__is_current_entry(browser, row);
|
||||
unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE];
|
||||
const char *ev_name = event_name(evsel);
|
||||
char bf[256], unit;
|
||||
|
||||
while (nd) {
|
||||
struct hists *hists = rb_entry(nd, struct hists, rb_node);
|
||||
const char *ev_name = __event_name(hists->type, hists->config);
|
||||
ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
|
||||
HE_COLORSET_NORMAL);
|
||||
|
||||
nr_events = convert_unit(nr_events, &unit);
|
||||
snprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
|
||||
unit, unit == ' ' ? "" : " ", ev_name);
|
||||
slsmg_write_nstring(bf, browser->width);
|
||||
|
||||
if (current_entry)
|
||||
menu->selection = evsel;
|
||||
}
|
||||
|
||||
static int perf_evsel_menu__run(struct perf_evsel_menu *menu, const char *help)
|
||||
{
|
||||
int exit_keys[] = { NEWT_KEY_ENTER, NEWT_KEY_RIGHT, 0, };
|
||||
struct perf_evlist *evlist = menu->b.priv;
|
||||
struct perf_evsel *pos;
|
||||
const char *ev_name, *title = "Available samples";
|
||||
int key;
|
||||
|
||||
if (ui_browser__show(&menu->b, title,
|
||||
"ESC: exit, ENTER|->: Browse histograms") < 0)
|
||||
return -1;
|
||||
|
||||
ui_browser__add_exit_keys(&menu->b, exit_keys);
|
||||
|
||||
while (1) {
|
||||
key = ui_browser__run(&menu->b);
|
||||
|
||||
switch (key) {
|
||||
case NEWT_KEY_RIGHT:
|
||||
case NEWT_KEY_ENTER:
|
||||
if (!menu->selection)
|
||||
continue;
|
||||
pos = menu->selection;
|
||||
browse_hists:
|
||||
ev_name = event_name(pos);
|
||||
key = perf_evsel__hists_browse(pos, help, ev_name, true);
|
||||
ui_browser__show_title(&menu->b, title);
|
||||
break;
|
||||
case NEWT_KEY_LEFT:
|
||||
continue;
|
||||
case NEWT_KEY_ESCAPE:
|
||||
if (!ui__dialog_yesno("Do you really want to exit?"))
|
||||
continue;
|
||||
/* Fall thru */
|
||||
default:
|
||||
goto out;
|
||||
}
|
||||
|
||||
key = hists__browse(hists, help, ev_name);
|
||||
switch (key) {
|
||||
case NEWT_KEY_TAB:
|
||||
next = rb_next(nd);
|
||||
if (next)
|
||||
nd = next;
|
||||
break;
|
||||
if (pos->node.next == &evlist->entries)
|
||||
pos = list_entry(evlist->entries.next, struct perf_evsel, node);
|
||||
else
|
||||
pos = list_entry(pos->node.next, struct perf_evsel, node);
|
||||
goto browse_hists;
|
||||
case NEWT_KEY_UNTAB:
|
||||
if (nd == first)
|
||||
continue;
|
||||
nd = rb_prev(nd);
|
||||
if (pos->node.prev == &evlist->entries)
|
||||
pos = list_entry(evlist->entries.prev, struct perf_evsel, node);
|
||||
else
|
||||
pos = list_entry(pos->node.prev, struct perf_evsel, node);
|
||||
goto browse_hists;
|
||||
case 'q':
|
||||
case CTRL('c'):
|
||||
goto out;
|
||||
default:
|
||||
return key;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
ui_browser__hide(&menu->b);
|
||||
return key;
|
||||
}
|
||||
|
||||
static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
|
||||
const char *help)
|
||||
{
|
||||
struct perf_evsel *pos;
|
||||
struct perf_evsel_menu menu = {
|
||||
.b = {
|
||||
.entries = &evlist->entries,
|
||||
.refresh = ui_browser__list_head_refresh,
|
||||
.seek = ui_browser__list_head_seek,
|
||||
.write = perf_evsel_menu__write,
|
||||
.nr_entries = evlist->nr_entries,
|
||||
.priv = evlist,
|
||||
},
|
||||
};
|
||||
|
||||
ui_helpline__push("Press ESC to exit");
|
||||
|
||||
list_for_each_entry(pos, &evlist->entries, node) {
|
||||
const char *ev_name = event_name(pos);
|
||||
size_t line_len = strlen(ev_name) + 7;
|
||||
|
||||
if (menu.b.width < line_len)
|
||||
menu.b.width = line_len;
|
||||
/*
|
||||
* Cache the evsel name, tracepoints have a _high_ cost per
|
||||
* event_name() call.
|
||||
*/
|
||||
if (pos->name == NULL)
|
||||
pos->name = strdup(ev_name);
|
||||
}
|
||||
|
||||
return perf_evsel_menu__run(&menu, help);
|
||||
}
|
||||
|
||||
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help)
|
||||
{
|
||||
|
||||
if (evlist->nr_entries == 1) {
|
||||
struct perf_evsel *first = list_entry(evlist->entries.next,
|
||||
struct perf_evsel, node);
|
||||
const char *ev_name = event_name(first);
|
||||
return perf_evsel__hists_browse(first, help, ev_name, false);
|
||||
}
|
||||
|
||||
return __perf_evlist__tui_browse_hists(evlist, help);
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ static int ui_entry__read(const char *title, char *bf, size_t size, int width)
|
||||
out_free_form:
|
||||
newtPopWindow();
|
||||
newtFormDestroy(form);
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
struct map_browser {
|
||||
|
||||
213
tools/perf/util/ui/browsers/top.c
Normal file
213
tools/perf/util/ui/browsers/top.c
Normal file
@@ -0,0 +1,213 @@
|
||||
/*
|
||||
* Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
*
|
||||
* Parts came from builtin-{top,stat,record}.c, see those files for further
|
||||
* copyright notes.
|
||||
*
|
||||
* Released under the GPL v2. (and only v2, not any later version)
|
||||
*/
|
||||
#include "../browser.h"
|
||||
#include "../../annotate.h"
|
||||
#include "../helpline.h"
|
||||
#include "../libslang.h"
|
||||
#include "../util.h"
|
||||
#include "../../evlist.h"
|
||||
#include "../../hist.h"
|
||||
#include "../../sort.h"
|
||||
#include "../../symbol.h"
|
||||
#include "../../top.h"
|
||||
|
||||
struct perf_top_browser {
|
||||
struct ui_browser b;
|
||||
struct rb_root root;
|
||||
struct sym_entry *selection;
|
||||
float sum_ksamples;
|
||||
int dso_width;
|
||||
int dso_short_width;
|
||||
int sym_width;
|
||||
};
|
||||
|
||||
static void perf_top_browser__write(struct ui_browser *browser, void *entry, int row)
|
||||
{
|
||||
struct perf_top_browser *top_browser = container_of(browser, struct perf_top_browser, b);
|
||||
struct sym_entry *syme = rb_entry(entry, struct sym_entry, rb_node);
|
||||
bool current_entry = ui_browser__is_current_entry(browser, row);
|
||||
struct symbol *symbol = sym_entry__symbol(syme);
|
||||
struct perf_top *top = browser->priv;
|
||||
int width = browser->width;
|
||||
double pcnt;
|
||||
|
||||
pcnt = 100.0 - (100.0 * ((top_browser->sum_ksamples - syme->snap_count) /
|
||||
top_browser->sum_ksamples));
|
||||
ui_browser__set_percent_color(browser, pcnt, current_entry);
|
||||
|
||||
if (top->evlist->nr_entries == 1 || !top->display_weighted) {
|
||||
slsmg_printf("%20.2f ", syme->weight);
|
||||
width -= 24;
|
||||
} else {
|
||||
slsmg_printf("%9.1f %10ld ", syme->weight, syme->snap_count);
|
||||
width -= 23;
|
||||
}
|
||||
|
||||
slsmg_printf("%4.1f%%", pcnt);
|
||||
width -= 7;
|
||||
|
||||
if (verbose) {
|
||||
slsmg_printf(" %016" PRIx64, symbol->start);
|
||||
width -= 17;
|
||||
}
|
||||
|
||||
slsmg_printf(" %-*.*s ", top_browser->sym_width, top_browser->sym_width,
|
||||
symbol->name);
|
||||
width -= top_browser->sym_width;
|
||||
slsmg_write_nstring(width >= syme->map->dso->long_name_len ?
|
||||
syme->map->dso->long_name :
|
||||
syme->map->dso->short_name, width);
|
||||
|
||||
if (current_entry)
|
||||
top_browser->selection = syme;
|
||||
}
|
||||
|
||||
static void perf_top_browser__update_rb_tree(struct perf_top_browser *browser)
|
||||
{
|
||||
struct perf_top *top = browser->b.priv;
|
||||
u64 top_idx = browser->b.top_idx;
|
||||
|
||||
browser->root = RB_ROOT;
|
||||
browser->b.top = NULL;
|
||||
browser->sum_ksamples = perf_top__decay_samples(top, &browser->root);
|
||||
/*
|
||||
* No active symbols
|
||||
*/
|
||||
if (top->rb_entries == 0)
|
||||
return;
|
||||
|
||||
perf_top__find_widths(top, &browser->root, &browser->dso_width,
|
||||
&browser->dso_short_width,
|
||||
&browser->sym_width);
|
||||
if (browser->sym_width + browser->dso_width > browser->b.width - 29) {
|
||||
browser->dso_width = browser->dso_short_width;
|
||||
if (browser->sym_width + browser->dso_width > browser->b.width - 29)
|
||||
browser->sym_width = browser->b.width - browser->dso_width - 29;
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust the ui_browser indexes since the entries in the browser->root
|
||||
* rb_tree may have changed, then seek it from start, so that we get a
|
||||
* possible new top of the screen.
|
||||
*/
|
||||
browser->b.nr_entries = top->rb_entries;
|
||||
|
||||
if (top_idx >= browser->b.nr_entries) {
|
||||
if (browser->b.height >= browser->b.nr_entries)
|
||||
top_idx = browser->b.nr_entries - browser->b.height;
|
||||
else
|
||||
top_idx = 0;
|
||||
}
|
||||
|
||||
if (browser->b.index >= top_idx + browser->b.height)
|
||||
browser->b.index = top_idx + browser->b.index - browser->b.top_idx;
|
||||
|
||||
if (browser->b.index >= browser->b.nr_entries)
|
||||
browser->b.index = browser->b.nr_entries - 1;
|
||||
|
||||
browser->b.top_idx = top_idx;
|
||||
browser->b.seek(&browser->b, top_idx, SEEK_SET);
|
||||
}
|
||||
|
||||
static void perf_top_browser__annotate(struct perf_top_browser *browser)
|
||||
{
|
||||
struct sym_entry *syme = browser->selection;
|
||||
struct symbol *sym = sym_entry__symbol(syme);
|
||||
struct annotation *notes = symbol__annotation(sym);
|
||||
struct perf_top *top = browser->b.priv;
|
||||
|
||||
if (notes->src != NULL)
|
||||
goto do_annotation;
|
||||
|
||||
pthread_mutex_lock(¬es->lock);
|
||||
|
||||
top->sym_filter_entry = NULL;
|
||||
|
||||
if (symbol__alloc_hist(sym, top->evlist->nr_entries) < 0) {
|
||||
pr_err("Not enough memory for annotating '%s' symbol!\n",
|
||||
sym->name);
|
||||
pthread_mutex_unlock(¬es->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
top->sym_filter_entry = syme;
|
||||
|
||||
pthread_mutex_unlock(¬es->lock);
|
||||
do_annotation:
|
||||
symbol__tui_annotate(sym, syme->map, 0, top->delay_secs * 1000);
|
||||
}
|
||||
|
||||
static int perf_top_browser__run(struct perf_top_browser *browser)
|
||||
{
|
||||
int key;
|
||||
char title[160];
|
||||
struct perf_top *top = browser->b.priv;
|
||||
int delay_msecs = top->delay_secs * 1000;
|
||||
int exit_keys[] = { 'a', NEWT_KEY_ENTER, NEWT_KEY_RIGHT, 0, };
|
||||
|
||||
perf_top_browser__update_rb_tree(browser);
|
||||
perf_top__header_snprintf(top, title, sizeof(title));
|
||||
perf_top__reset_sample_counters(top);
|
||||
|
||||
if (ui_browser__show(&browser->b, title,
|
||||
"ESC: exit, ENTER|->|a: Live Annotate") < 0)
|
||||
return -1;
|
||||
|
||||
newtFormSetTimer(browser->b.form, delay_msecs);
|
||||
ui_browser__add_exit_keys(&browser->b, exit_keys);
|
||||
|
||||
while (1) {
|
||||
key = ui_browser__run(&browser->b);
|
||||
|
||||
switch (key) {
|
||||
case -1:
|
||||
/* FIXME we need to check if it was es.reason == NEWT_EXIT_TIMER */
|
||||
perf_top_browser__update_rb_tree(browser);
|
||||
perf_top__header_snprintf(top, title, sizeof(title));
|
||||
perf_top__reset_sample_counters(top);
|
||||
ui_browser__set_color(&browser->b, NEWT_COLORSET_ROOT);
|
||||
SLsmg_gotorc(0, 0);
|
||||
slsmg_write_nstring(title, browser->b.width);
|
||||
break;
|
||||
case 'a':
|
||||
case NEWT_KEY_RIGHT:
|
||||
case NEWT_KEY_ENTER:
|
||||
if (browser->selection)
|
||||
perf_top_browser__annotate(browser);
|
||||
break;
|
||||
case NEWT_KEY_LEFT:
|
||||
continue;
|
||||
case NEWT_KEY_ESCAPE:
|
||||
if (!ui__dialog_yesno("Do you really want to exit?"))
|
||||
continue;
|
||||
/* Fall thru */
|
||||
default:
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
ui_browser__hide(&browser->b);
|
||||
return key;
|
||||
}
|
||||
|
||||
int perf_top__tui_browser(struct perf_top *top)
|
||||
{
|
||||
struct perf_top_browser browser = {
|
||||
.b = {
|
||||
.entries = &browser.root,
|
||||
.refresh = ui_browser__rb_tree_refresh,
|
||||
.seek = ui_browser__rb_tree_seek,
|
||||
.write = perf_top_browser__write,
|
||||
.priv = top,
|
||||
},
|
||||
};
|
||||
|
||||
ui_helpline__push("Press <- or ESC to exit");
|
||||
return perf_top_browser__run(&browser);
|
||||
}
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
#include "../debug.h"
|
||||
#include "helpline.h"
|
||||
#include "ui.h"
|
||||
|
||||
void ui_helpline__pop(void)
|
||||
{
|
||||
@@ -55,7 +56,8 @@ int ui_helpline__show_help(const char *format, va_list ap)
|
||||
int ret;
|
||||
static int backlog;
|
||||
|
||||
ret = vsnprintf(ui_helpline__last_msg + backlog,
|
||||
pthread_mutex_lock(&ui__lock);
|
||||
ret = vsnprintf(ui_helpline__last_msg + backlog,
|
||||
sizeof(ui_helpline__last_msg) - backlog, format, ap);
|
||||
backlog += ret;
|
||||
|
||||
@@ -64,6 +66,7 @@ int ui_helpline__show_help(const char *format, va_list ap)
|
||||
newtRefresh();
|
||||
backlog = 0;
|
||||
}
|
||||
pthread_mutex_unlock(&ui__lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -13,11 +13,11 @@
|
||||
|
||||
#if SLANG_VERSION < 20104
|
||||
#define slsmg_printf(msg, args...) \
|
||||
SLsmg_printf((char *)msg, ##args)
|
||||
SLsmg_printf((char *)(msg), ##args)
|
||||
#define slsmg_write_nstring(msg, len) \
|
||||
SLsmg_write_nstring((char *)msg, len)
|
||||
SLsmg_write_nstring((char *)(msg), len)
|
||||
#define sltt_set_color(obj, name, fg, bg) \
|
||||
SLtt_set_color(obj,(char *)name, (char *)fg, (char *)bg)
|
||||
SLtt_set_color(obj,(char *)(name), (char *)(fg), (char *)(bg))
|
||||
#else
|
||||
#define slsmg_printf SLsmg_printf
|
||||
#define slsmg_write_nstring SLsmg_write_nstring
|
||||
|
||||
@@ -6,6 +6,9 @@
|
||||
#include "../debug.h"
|
||||
#include "browser.h"
|
||||
#include "helpline.h"
|
||||
#include "ui.h"
|
||||
|
||||
pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
static void newt_suspend(void *d __used)
|
||||
{
|
||||
@@ -14,11 +17,12 @@ static void newt_suspend(void *d __used)
|
||||
newtResume();
|
||||
}
|
||||
|
||||
void setup_browser(void)
|
||||
void setup_browser(bool fallback_to_pager)
|
||||
{
|
||||
if (!isatty(1) || !use_browser || dump_trace) {
|
||||
use_browser = 0;
|
||||
setup_pager();
|
||||
if (fallback_to_pager)
|
||||
setup_pager();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
8
tools/perf/util/ui/ui.h
Normal file
8
tools/perf/util/ui/ui.h
Normal file
@@ -0,0 +1,8 @@
|
||||
#ifndef _PERF_UI_H_
|
||||
#define _PERF_UI_H_ 1
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
extern pthread_mutex_t ui__lock;
|
||||
|
||||
#endif /* _PERF_UI_H_ */
|
||||
@@ -9,6 +9,7 @@
|
||||
#include "../debug.h"
|
||||
#include "browser.h"
|
||||
#include "helpline.h"
|
||||
#include "ui.h"
|
||||
#include "util.h"
|
||||
|
||||
static void newt_form__set_exit_keys(newtComponent self)
|
||||
@@ -118,10 +119,12 @@ void ui__warning(const char *format, ...)
|
||||
va_list args;
|
||||
|
||||
va_start(args, format);
|
||||
if (use_browser > 0)
|
||||
if (use_browser > 0) {
|
||||
pthread_mutex_lock(&ui__lock);
|
||||
newtWinMessagev((char *)warning_str, (char *)ok,
|
||||
(char *)format, args);
|
||||
else
|
||||
pthread_mutex_unlock(&ui__lock);
|
||||
} else
|
||||
vfprintf(stderr, format, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
@@ -70,9 +70,7 @@
|
||||
#include <sys/poll.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/ioctl.h>
|
||||
#ifndef NO_SYS_SELECT_H
|
||||
#include <sys/select.h>
|
||||
#endif
|
||||
#include <netinet/in.h>
|
||||
#include <netinet/tcp.h>
|
||||
#include <arpa/inet.h>
|
||||
@@ -83,10 +81,6 @@
|
||||
#include "types.h"
|
||||
#include <sys/ttydefaults.h>
|
||||
|
||||
#ifndef NO_ICONV
|
||||
#include <iconv.h>
|
||||
#endif
|
||||
|
||||
extern const char *graph_line;
|
||||
extern const char *graph_dotted_line;
|
||||
extern char buildid_dir[];
|
||||
@@ -236,26 +230,6 @@ static inline int sane_case(int x, int high)
|
||||
return x;
|
||||
}
|
||||
|
||||
#ifndef DIR_HAS_BSD_GROUP_SEMANTICS
|
||||
# define FORCE_DIR_SET_GID S_ISGID
|
||||
#else
|
||||
# define FORCE_DIR_SET_GID 0
|
||||
#endif
|
||||
|
||||
#ifdef NO_NSEC
|
||||
#undef USE_NSEC
|
||||
#define ST_CTIME_NSEC(st) 0
|
||||
#define ST_MTIME_NSEC(st) 0
|
||||
#else
|
||||
#ifdef USE_ST_TIMESPEC
|
||||
#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctimespec.tv_nsec))
|
||||
#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtimespec.tv_nsec))
|
||||
#else
|
||||
#define ST_CTIME_NSEC(st) ((unsigned int)((st).st_ctim.tv_nsec))
|
||||
#define ST_MTIME_NSEC(st) ((unsigned int)((st).st_mtim.tv_nsec))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
int mkdir_p(char *path, mode_t mode);
|
||||
int copyfile(const char *from, const char *to);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user