Merge branch 'perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into perf/core
This commit is contained in:
@@ -2946,11 +2946,6 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
__weak
|
||||
void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* We assume there is only KVM supporting the callbacks.
|
||||
|
||||
@@ -229,23 +229,6 @@ config FTRACE_SYSCALLS
|
||||
help
|
||||
Basic tracer to catch the syscall entry and exit events.
|
||||
|
||||
config BOOT_TRACER
|
||||
bool "Trace boot initcalls"
|
||||
select GENERIC_TRACER
|
||||
select CONTEXT_SWITCH_TRACER
|
||||
help
|
||||
This tracer helps developers to optimize boot times: it records
|
||||
the timings of the initcalls and traces key events and the identity
|
||||
of tasks that can cause boot delays, such as context-switches.
|
||||
|
||||
Its aim is to be parsed by the scripts/bootgraph.pl tool to
|
||||
produce pretty graphics about boot inefficiencies, giving a visual
|
||||
representation of the delays during initcalls - but the raw
|
||||
/debug/tracing/trace text output is readable too.
|
||||
|
||||
You must pass in initcall_debug and ftrace=initcall to the kernel
|
||||
command line to enable this on bootup.
|
||||
|
||||
config TRACE_BRANCH_PROFILING
|
||||
bool
|
||||
select GENERIC_TRACER
|
||||
@@ -371,26 +354,6 @@ config STACK_TRACER
|
||||
|
||||
Say N if unsure.
|
||||
|
||||
config KMEMTRACE
|
||||
bool "Trace SLAB allocations"
|
||||
select GENERIC_TRACER
|
||||
help
|
||||
kmemtrace provides tracing for slab allocator functions, such as
|
||||
kmalloc, kfree, kmem_cache_alloc, kmem_cache_free, etc. Collected
|
||||
data is then fed to the userspace application in order to analyse
|
||||
allocation hotspots, internal fragmentation and so on, making it
|
||||
possible to see how well an allocator performs, as well as debug
|
||||
and profile kernel code.
|
||||
|
||||
This requires an userspace application to use. See
|
||||
Documentation/trace/kmemtrace.txt for more information.
|
||||
|
||||
Saying Y will make the kernel somewhat larger and slower. However,
|
||||
if you disable kmemtrace at run-time or boot-time, the performance
|
||||
impact is minimal (depending on the arch the kernel is built for).
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config WORKQUEUE_TRACER
|
||||
bool "Trace workqueues"
|
||||
select GENERIC_TRACER
|
||||
|
||||
@@ -38,10 +38,8 @@ obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
|
||||
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
|
||||
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
|
||||
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
|
||||
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
|
||||
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
|
||||
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
|
||||
obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
|
||||
obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
|
||||
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
|
||||
ifeq ($(CONFIG_BLOCK),y)
|
||||
|
||||
@@ -1,529 +0,0 @@
|
||||
/*
|
||||
* Memory allocator tracing
|
||||
*
|
||||
* Copyright (C) 2008 Eduard - Gabriel Munteanu
|
||||
* Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
|
||||
* Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
*/
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/dcache.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include <linux/kmemtrace.h>
|
||||
|
||||
#include "trace_output.h"
|
||||
#include "trace.h"
|
||||
|
||||
/* Select an alternative, minimalistic output than the original one */
|
||||
#define TRACE_KMEM_OPT_MINIMAL 0x1
|
||||
|
||||
static struct tracer_opt kmem_opts[] = {
|
||||
/* Default disable the minimalistic output */
|
||||
{ TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct tracer_flags kmem_tracer_flags = {
|
||||
.val = 0,
|
||||
.opts = kmem_opts
|
||||
};
|
||||
|
||||
static struct trace_array *kmemtrace_array;
|
||||
|
||||
/* Trace allocations */
|
||||
static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
|
||||
unsigned long call_site,
|
||||
const void *ptr,
|
||||
size_t bytes_req,
|
||||
size_t bytes_alloc,
|
||||
gfp_t gfp_flags,
|
||||
int node)
|
||||
{
|
||||
struct ftrace_event_call *call = &event_kmem_alloc;
|
||||
struct trace_array *tr = kmemtrace_array;
|
||||
struct kmemtrace_alloc_entry *entry;
|
||||
struct ring_buffer_event *event;
|
||||
|
||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
|
||||
if (!event)
|
||||
return;
|
||||
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, 0, 0);
|
||||
|
||||
entry->ent.type = TRACE_KMEM_ALLOC;
|
||||
entry->type_id = type_id;
|
||||
entry->call_site = call_site;
|
||||
entry->ptr = ptr;
|
||||
entry->bytes_req = bytes_req;
|
||||
entry->bytes_alloc = bytes_alloc;
|
||||
entry->gfp_flags = gfp_flags;
|
||||
entry->node = node;
|
||||
|
||||
if (!filter_check_discard(call, entry, tr->buffer, event))
|
||||
ring_buffer_unlock_commit(tr->buffer, event);
|
||||
|
||||
trace_wake_up();
|
||||
}
|
||||
|
||||
static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
|
||||
unsigned long call_site,
|
||||
const void *ptr)
|
||||
{
|
||||
struct ftrace_event_call *call = &event_kmem_free;
|
||||
struct trace_array *tr = kmemtrace_array;
|
||||
struct kmemtrace_free_entry *entry;
|
||||
struct ring_buffer_event *event;
|
||||
|
||||
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
|
||||
if (!event)
|
||||
return;
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, 0, 0);
|
||||
|
||||
entry->ent.type = TRACE_KMEM_FREE;
|
||||
entry->type_id = type_id;
|
||||
entry->call_site = call_site;
|
||||
entry->ptr = ptr;
|
||||
|
||||
if (!filter_check_discard(call, entry, tr->buffer, event))
|
||||
ring_buffer_unlock_commit(tr->buffer, event);
|
||||
|
||||
trace_wake_up();
|
||||
}
|
||||
|
||||
static void kmemtrace_kmalloc(void *ignore,
|
||||
unsigned long call_site,
|
||||
const void *ptr,
|
||||
size_t bytes_req,
|
||||
size_t bytes_alloc,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
|
||||
bytes_req, bytes_alloc, gfp_flags, -1);
|
||||
}
|
||||
|
||||
static void kmemtrace_kmem_cache_alloc(void *ignore,
|
||||
unsigned long call_site,
|
||||
const void *ptr,
|
||||
size_t bytes_req,
|
||||
size_t bytes_alloc,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
|
||||
bytes_req, bytes_alloc, gfp_flags, -1);
|
||||
}
|
||||
|
||||
static void kmemtrace_kmalloc_node(void *ignore,
|
||||
unsigned long call_site,
|
||||
const void *ptr,
|
||||
size_t bytes_req,
|
||||
size_t bytes_alloc,
|
||||
gfp_t gfp_flags,
|
||||
int node)
|
||||
{
|
||||
kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
|
||||
bytes_req, bytes_alloc, gfp_flags, node);
|
||||
}
|
||||
|
||||
static void kmemtrace_kmem_cache_alloc_node(void *ignore,
|
||||
unsigned long call_site,
|
||||
const void *ptr,
|
||||
size_t bytes_req,
|
||||
size_t bytes_alloc,
|
||||
gfp_t gfp_flags,
|
||||
int node)
|
||||
{
|
||||
kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
|
||||
bytes_req, bytes_alloc, gfp_flags, node);
|
||||
}
|
||||
|
||||
static void
|
||||
kmemtrace_kfree(void *ignore, unsigned long call_site, const void *ptr)
|
||||
{
|
||||
kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
|
||||
}
|
||||
|
||||
static void kmemtrace_kmem_cache_free(void *ignore,
|
||||
unsigned long call_site, const void *ptr)
|
||||
{
|
||||
kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
|
||||
}
|
||||
|
||||
static int kmemtrace_start_probes(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = register_trace_kmalloc(kmemtrace_kmalloc, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
err = register_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
err = register_trace_kfree(kmemtrace_kfree, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void kmemtrace_stop_probes(void)
|
||||
{
|
||||
unregister_trace_kmalloc(kmemtrace_kmalloc, NULL);
|
||||
unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
|
||||
unregister_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
|
||||
unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
|
||||
unregister_trace_kfree(kmemtrace_kfree, NULL);
|
||||
unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
|
||||
}
|
||||
|
||||
static int kmem_trace_init(struct trace_array *tr)
|
||||
{
|
||||
kmemtrace_array = tr;
|
||||
|
||||
tracing_reset_online_cpus(tr);
|
||||
|
||||
kmemtrace_start_probes();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kmem_trace_reset(struct trace_array *tr)
|
||||
{
|
||||
kmemtrace_stop_probes();
|
||||
}
|
||||
|
||||
static void kmemtrace_headers(struct seq_file *s)
|
||||
{
|
||||
/* Don't need headers for the original kmemtrace output */
|
||||
if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
|
||||
return;
|
||||
|
||||
seq_printf(s, "#\n");
|
||||
seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS "
|
||||
" POINTER NODE CALLER\n");
|
||||
seq_printf(s, "# FREE | | | | "
|
||||
" | | | |\n");
|
||||
seq_printf(s, "# |\n\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* The following functions give the original output from kmemtrace,
|
||||
* plus the origin CPU, since reordering occurs in-kernel now.
|
||||
*/
|
||||
|
||||
#define KMEMTRACE_USER_ALLOC 0
|
||||
#define KMEMTRACE_USER_FREE 1
|
||||
|
||||
struct kmemtrace_user_event {
|
||||
u8 event_id;
|
||||
u8 type_id;
|
||||
u16 event_size;
|
||||
u32 cpu;
|
||||
u64 timestamp;
|
||||
unsigned long call_site;
|
||||
unsigned long ptr;
|
||||
};
|
||||
|
||||
struct kmemtrace_user_event_alloc {
|
||||
size_t bytes_req;
|
||||
size_t bytes_alloc;
|
||||
unsigned gfp_flags;
|
||||
int node;
|
||||
};
|
||||
|
||||
static enum print_line_t
|
||||
kmemtrace_print_alloc(struct trace_iterator *iter, int flags,
|
||||
struct trace_event *event)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct kmemtrace_alloc_entry *entry;
|
||||
int ret;
|
||||
|
||||
trace_assign_type(entry, iter->ent);
|
||||
|
||||
ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu "
|
||||
"bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
|
||||
entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr,
|
||||
(unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc,
|
||||
(unsigned long)entry->gfp_flags, entry->node);
|
||||
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
kmemtrace_print_free(struct trace_iterator *iter, int flags,
|
||||
struct trace_event *event)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct kmemtrace_free_entry *entry;
|
||||
int ret;
|
||||
|
||||
trace_assign_type(entry, iter->ent);
|
||||
|
||||
ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n",
|
||||
entry->type_id, (void *)entry->call_site,
|
||||
(unsigned long)entry->ptr);
|
||||
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags,
|
||||
struct trace_event *event)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct kmemtrace_alloc_entry *entry;
|
||||
struct kmemtrace_user_event *ev;
|
||||
struct kmemtrace_user_event_alloc *ev_alloc;
|
||||
|
||||
trace_assign_type(entry, iter->ent);
|
||||
|
||||
ev = trace_seq_reserve(s, sizeof(*ev));
|
||||
if (!ev)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ev->event_id = KMEMTRACE_USER_ALLOC;
|
||||
ev->type_id = entry->type_id;
|
||||
ev->event_size = sizeof(*ev) + sizeof(*ev_alloc);
|
||||
ev->cpu = iter->cpu;
|
||||
ev->timestamp = iter->ts;
|
||||
ev->call_site = entry->call_site;
|
||||
ev->ptr = (unsigned long)entry->ptr;
|
||||
|
||||
ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
|
||||
if (!ev_alloc)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ev_alloc->bytes_req = entry->bytes_req;
|
||||
ev_alloc->bytes_alloc = entry->bytes_alloc;
|
||||
ev_alloc->gfp_flags = entry->gfp_flags;
|
||||
ev_alloc->node = entry->node;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
kmemtrace_print_free_user(struct trace_iterator *iter, int flags,
|
||||
struct trace_event *event)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct kmemtrace_free_entry *entry;
|
||||
struct kmemtrace_user_event *ev;
|
||||
|
||||
trace_assign_type(entry, iter->ent);
|
||||
|
||||
ev = trace_seq_reserve(s, sizeof(*ev));
|
||||
if (!ev)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ev->event_id = KMEMTRACE_USER_FREE;
|
||||
ev->type_id = entry->type_id;
|
||||
ev->event_size = sizeof(*ev);
|
||||
ev->cpu = iter->cpu;
|
||||
ev->timestamp = iter->ts;
|
||||
ev->call_site = entry->call_site;
|
||||
ev->ptr = (unsigned long)entry->ptr;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
/* The two other following provide a more minimalistic output */
|
||||
static enum print_line_t
|
||||
kmemtrace_print_alloc_compress(struct trace_iterator *iter)
|
||||
{
|
||||
struct kmemtrace_alloc_entry *entry;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
int ret;
|
||||
|
||||
trace_assign_type(entry, iter->ent);
|
||||
|
||||
/* Alloc entry */
|
||||
ret = trace_seq_printf(s, " + ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Type */
|
||||
switch (entry->type_id) {
|
||||
case KMEMTRACE_TYPE_KMALLOC:
|
||||
ret = trace_seq_printf(s, "K ");
|
||||
break;
|
||||
case KMEMTRACE_TYPE_CACHE:
|
||||
ret = trace_seq_printf(s, "C ");
|
||||
break;
|
||||
case KMEMTRACE_TYPE_PAGES:
|
||||
ret = trace_seq_printf(s, "P ");
|
||||
break;
|
||||
default:
|
||||
ret = trace_seq_printf(s, "? ");
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Requested */
|
||||
ret = trace_seq_printf(s, "%4zu ", entry->bytes_req);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Allocated */
|
||||
ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Flags
|
||||
* TODO: would be better to see the name of the GFP flag names
|
||||
*/
|
||||
ret = trace_seq_printf(s, "%08x ", entry->gfp_flags);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Pointer to allocated */
|
||||
ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Node and call site*/
|
||||
ret = trace_seq_printf(s, "%4d %pf\n", entry->node,
|
||||
(void *)entry->call_site);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
kmemtrace_print_free_compress(struct trace_iterator *iter)
|
||||
{
|
||||
struct kmemtrace_free_entry *entry;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
int ret;
|
||||
|
||||
trace_assign_type(entry, iter->ent);
|
||||
|
||||
/* Free entry */
|
||||
ret = trace_seq_printf(s, " - ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Type */
|
||||
switch (entry->type_id) {
|
||||
case KMEMTRACE_TYPE_KMALLOC:
|
||||
ret = trace_seq_printf(s, "K ");
|
||||
break;
|
||||
case KMEMTRACE_TYPE_CACHE:
|
||||
ret = trace_seq_printf(s, "C ");
|
||||
break;
|
||||
case KMEMTRACE_TYPE_PAGES:
|
||||
ret = trace_seq_printf(s, "P ");
|
||||
break;
|
||||
default:
|
||||
ret = trace_seq_printf(s, "? ");
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Skip requested/allocated/flags */
|
||||
ret = trace_seq_printf(s, " ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Pointer to allocated */
|
||||
ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Skip node and print call site*/
|
||||
ret = trace_seq_printf(s, " %pf\n", (void *)entry->call_site);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_entry *entry = iter->ent;
|
||||
|
||||
if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
|
||||
switch (entry->type) {
|
||||
case TRACE_KMEM_ALLOC:
|
||||
return kmemtrace_print_alloc_compress(iter);
|
||||
case TRACE_KMEM_FREE:
|
||||
return kmemtrace_print_free_compress(iter);
|
||||
default:
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
}
|
||||
}
|
||||
|
||||
static struct trace_event_functions kmem_trace_alloc_funcs = {
|
||||
.trace = kmemtrace_print_alloc,
|
||||
.binary = kmemtrace_print_alloc_user,
|
||||
};
|
||||
|
||||
static struct trace_event kmem_trace_alloc = {
|
||||
.type = TRACE_KMEM_ALLOC,
|
||||
.funcs = &kmem_trace_alloc_funcs,
|
||||
};
|
||||
|
||||
static struct trace_event_functions kmem_trace_free_funcs = {
|
||||
.trace = kmemtrace_print_free,
|
||||
.binary = kmemtrace_print_free_user,
|
||||
};
|
||||
|
||||
static struct trace_event kmem_trace_free = {
|
||||
.type = TRACE_KMEM_FREE,
|
||||
.funcs = &kmem_trace_free_funcs,
|
||||
};
|
||||
|
||||
static struct tracer kmem_tracer __read_mostly = {
|
||||
.name = "kmemtrace",
|
||||
.init = kmem_trace_init,
|
||||
.reset = kmem_trace_reset,
|
||||
.print_line = kmemtrace_print_line,
|
||||
.print_header = kmemtrace_headers,
|
||||
.flags = &kmem_tracer_flags
|
||||
};
|
||||
|
||||
void kmemtrace_init(void)
|
||||
{
|
||||
/* earliest opportunity to start kmem tracing */
|
||||
}
|
||||
|
||||
static int __init init_kmem_tracer(void)
|
||||
{
|
||||
if (!register_ftrace_event(&kmem_trace_alloc)) {
|
||||
pr_warning("Warning: could not register kmem events\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!register_ftrace_event(&kmem_trace_free)) {
|
||||
pr_warning("Warning: could not register kmem events\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (register_tracer(&kmem_tracer) != 0) {
|
||||
pr_warning("Warning: could not register the kmem tracer\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
device_initcall(init_kmem_tracer);
|
||||
@@ -4596,9 +4596,6 @@ __init static int tracer_alloc_buffers(void)
|
||||
|
||||
register_tracer(&nop_trace);
|
||||
current_trace = &nop_trace;
|
||||
#ifdef CONFIG_BOOT_TRACER
|
||||
register_tracer(&boot_tracer);
|
||||
#endif
|
||||
/* All seems OK, enable tracing */
|
||||
tracing_disabled = 0;
|
||||
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
#include <linux/mmiotrace.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <trace/boot.h>
|
||||
#include <linux/kmemtrace.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
|
||||
#include <linux/trace_seq.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
|
||||
@@ -29,26 +26,15 @@ enum trace_type {
|
||||
TRACE_MMIO_RW,
|
||||
TRACE_MMIO_MAP,
|
||||
TRACE_BRANCH,
|
||||
TRACE_BOOT_CALL,
|
||||
TRACE_BOOT_RET,
|
||||
TRACE_GRAPH_RET,
|
||||
TRACE_GRAPH_ENT,
|
||||
TRACE_USER_STACK,
|
||||
TRACE_KMEM_ALLOC,
|
||||
TRACE_KMEM_FREE,
|
||||
TRACE_BLK,
|
||||
TRACE_KSYM,
|
||||
|
||||
__TRACE_LAST_TYPE,
|
||||
};
|
||||
|
||||
enum kmemtrace_type_id {
|
||||
KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */
|
||||
KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */
|
||||
KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */
|
||||
};
|
||||
|
||||
extern struct tracer boot_tracer;
|
||||
|
||||
#undef __field
|
||||
#define __field(type, item) type item;
|
||||
@@ -209,17 +195,11 @@ extern void __ftrace_bad_type(void);
|
||||
TRACE_MMIO_RW); \
|
||||
IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
|
||||
TRACE_MMIO_MAP); \
|
||||
IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
|
||||
IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
|
||||
IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
|
||||
IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
|
||||
TRACE_GRAPH_ENT); \
|
||||
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
|
||||
TRACE_GRAPH_RET); \
|
||||
IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
|
||||
TRACE_KMEM_ALLOC); \
|
||||
IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
|
||||
TRACE_KMEM_FREE); \
|
||||
IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\
|
||||
__ftrace_bad_type(); \
|
||||
} while (0)
|
||||
|
||||
@@ -1,185 +0,0 @@
|
||||
/*
|
||||
* ring buffer based initcalls tracer
|
||||
*
|
||||
* Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/time.h>
|
||||
|
||||
#include "trace.h"
|
||||
#include "trace_output.h"
|
||||
|
||||
static struct trace_array *boot_trace;
|
||||
static bool pre_initcalls_finished;
|
||||
|
||||
/* Tells the boot tracer that the pre_smp_initcalls are finished.
|
||||
* So we are ready .
|
||||
* It doesn't enable sched events tracing however.
|
||||
* You have to call enable_boot_trace to do so.
|
||||
*/
|
||||
void start_boot_trace(void)
|
||||
{
|
||||
pre_initcalls_finished = true;
|
||||
}
|
||||
|
||||
void enable_boot_trace(void)
|
||||
{
|
||||
if (boot_trace && pre_initcalls_finished)
|
||||
tracing_start_sched_switch_record();
|
||||
}
|
||||
|
||||
void disable_boot_trace(void)
|
||||
{
|
||||
if (boot_trace && pre_initcalls_finished)
|
||||
tracing_stop_sched_switch_record();
|
||||
}
|
||||
|
||||
static int boot_trace_init(struct trace_array *tr)
|
||||
{
|
||||
boot_trace = tr;
|
||||
|
||||
if (!tr)
|
||||
return 0;
|
||||
|
||||
tracing_reset_online_cpus(tr);
|
||||
|
||||
tracing_sched_switch_assign_trace(tr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
initcall_call_print_line(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_entry *entry = iter->ent;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct trace_boot_call *field;
|
||||
struct boot_trace_call *call;
|
||||
u64 ts;
|
||||
unsigned long nsec_rem;
|
||||
int ret;
|
||||
|
||||
trace_assign_type(field, entry);
|
||||
call = &field->boot_call;
|
||||
ts = iter->ts;
|
||||
nsec_rem = do_div(ts, NSEC_PER_SEC);
|
||||
|
||||
ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
|
||||
(unsigned long)ts, nsec_rem, call->func, call->caller);
|
||||
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
else
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
initcall_ret_print_line(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_entry *entry = iter->ent;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct trace_boot_ret *field;
|
||||
struct boot_trace_ret *init_ret;
|
||||
u64 ts;
|
||||
unsigned long nsec_rem;
|
||||
int ret;
|
||||
|
||||
trace_assign_type(field, entry);
|
||||
init_ret = &field->boot_ret;
|
||||
ts = iter->ts;
|
||||
nsec_rem = do_div(ts, NSEC_PER_SEC);
|
||||
|
||||
ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
|
||||
"returned %d after %llu msecs\n",
|
||||
(unsigned long) ts,
|
||||
nsec_rem,
|
||||
init_ret->func, init_ret->result, init_ret->duration);
|
||||
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
else
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t initcall_print_line(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_entry *entry = iter->ent;
|
||||
|
||||
switch (entry->type) {
|
||||
case TRACE_BOOT_CALL:
|
||||
return initcall_call_print_line(iter);
|
||||
case TRACE_BOOT_RET:
|
||||
return initcall_ret_print_line(iter);
|
||||
default:
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
}
|
||||
}
|
||||
|
||||
struct tracer boot_tracer __read_mostly =
|
||||
{
|
||||
.name = "initcall",
|
||||
.init = boot_trace_init,
|
||||
.reset = tracing_reset_online_cpus,
|
||||
.print_line = initcall_print_line,
|
||||
};
|
||||
|
||||
void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
|
||||
{
|
||||
struct ftrace_event_call *call = &event_boot_call;
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer *buffer;
|
||||
struct trace_boot_call *entry;
|
||||
struct trace_array *tr = boot_trace;
|
||||
|
||||
if (!tr || !pre_initcalls_finished)
|
||||
return;
|
||||
|
||||
/* Get its name now since this function could
|
||||
* disappear because it is in the .init section.
|
||||
*/
|
||||
sprint_symbol(bt->func, (unsigned long)fn);
|
||||
preempt_disable();
|
||||
|
||||
buffer = tr->buffer;
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_CALL,
|
||||
sizeof(*entry), 0, 0);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->boot_call = *bt;
|
||||
if (!filter_check_discard(call, entry, buffer, event))
|
||||
trace_buffer_unlock_commit(buffer, event, 0, 0);
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
|
||||
{
|
||||
struct ftrace_event_call *call = &event_boot_ret;
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer *buffer;
|
||||
struct trace_boot_ret *entry;
|
||||
struct trace_array *tr = boot_trace;
|
||||
|
||||
if (!tr || !pre_initcalls_finished)
|
||||
return;
|
||||
|
||||
sprint_symbol(bt->func, (unsigned long)fn);
|
||||
preempt_disable();
|
||||
|
||||
buffer = tr->buffer;
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET,
|
||||
sizeof(*entry), 0, 0);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->boot_ret = *bt;
|
||||
if (!filter_check_discard(call, entry, buffer, event))
|
||||
trace_buffer_unlock_commit(buffer, event, 0, 0);
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
@@ -271,33 +271,6 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map,
|
||||
__entry->map_id, __entry->opcode)
|
||||
);
|
||||
|
||||
FTRACE_ENTRY(boot_call, trace_boot_call,
|
||||
|
||||
TRACE_BOOT_CALL,
|
||||
|
||||
F_STRUCT(
|
||||
__field_struct( struct boot_trace_call, boot_call )
|
||||
__field_desc( pid_t, boot_call, caller )
|
||||
__array_desc( char, boot_call, func, KSYM_SYMBOL_LEN)
|
||||
),
|
||||
|
||||
F_printk("%d %s", __entry->caller, __entry->func)
|
||||
);
|
||||
|
||||
FTRACE_ENTRY(boot_ret, trace_boot_ret,
|
||||
|
||||
TRACE_BOOT_RET,
|
||||
|
||||
F_STRUCT(
|
||||
__field_struct( struct boot_trace_ret, boot_ret )
|
||||
__array_desc( char, boot_ret, func, KSYM_SYMBOL_LEN)
|
||||
__field_desc( int, boot_ret, result )
|
||||
__field_desc( unsigned long, boot_ret, duration )
|
||||
),
|
||||
|
||||
F_printk("%s %d %lx",
|
||||
__entry->func, __entry->result, __entry->duration)
|
||||
);
|
||||
|
||||
#define TRACE_FUNC_SIZE 30
|
||||
#define TRACE_FILE_SIZE 20
|
||||
@@ -318,41 +291,6 @@ FTRACE_ENTRY(branch, trace_branch,
|
||||
__entry->func, __entry->file, __entry->correct)
|
||||
);
|
||||
|
||||
FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry,
|
||||
|
||||
TRACE_KMEM_ALLOC,
|
||||
|
||||
F_STRUCT(
|
||||
__field( enum kmemtrace_type_id, type_id )
|
||||
__field( unsigned long, call_site )
|
||||
__field( const void *, ptr )
|
||||
__field( size_t, bytes_req )
|
||||
__field( size_t, bytes_alloc )
|
||||
__field( gfp_t, gfp_flags )
|
||||
__field( int, node )
|
||||
),
|
||||
|
||||
F_printk("type:%u call_site:%lx ptr:%p req:%zi alloc:%zi"
|
||||
" flags:%x node:%d",
|
||||
__entry->type_id, __entry->call_site, __entry->ptr,
|
||||
__entry->bytes_req, __entry->bytes_alloc,
|
||||
__entry->gfp_flags, __entry->node)
|
||||
);
|
||||
|
||||
FTRACE_ENTRY(kmem_free, kmemtrace_free_entry,
|
||||
|
||||
TRACE_KMEM_FREE,
|
||||
|
||||
F_STRUCT(
|
||||
__field( enum kmemtrace_type_id, type_id )
|
||||
__field( unsigned long, call_site )
|
||||
__field( const void *, ptr )
|
||||
),
|
||||
|
||||
F_printk("type:%u call_site:%lx ptr:%p",
|
||||
__entry->type_id, __entry->call_site, __entry->ptr)
|
||||
);
|
||||
|
||||
FTRACE_ENTRY(ksym_trace, ksym_trace_entry,
|
||||
|
||||
TRACE_KSYM,
|
||||
|
||||
@@ -9,8 +9,6 @@
|
||||
#include <linux/kprobes.h>
|
||||
#include "trace.h"
|
||||
|
||||
EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
|
||||
|
||||
static char *perf_trace_buf[4];
|
||||
|
||||
/*
|
||||
|
||||
@@ -33,12 +33,13 @@ static DEFINE_MUTEX(sample_timer_lock);
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer);
|
||||
|
||||
struct stack_frame {
|
||||
struct stack_frame_user {
|
||||
const void __user *next_fp;
|
||||
unsigned long return_address;
|
||||
};
|
||||
|
||||
static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
|
||||
static int
|
||||
copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -125,7 +126,7 @@ trace_kernel(struct pt_regs *regs, struct trace_array *tr,
|
||||
static void timer_notify(struct pt_regs *regs, int cpu)
|
||||
{
|
||||
struct trace_array_cpu *data;
|
||||
struct stack_frame frame;
|
||||
struct stack_frame_user frame;
|
||||
struct trace_array *tr;
|
||||
const void __user *fp;
|
||||
int is_user;
|
||||
|
||||
Reference in New Issue
Block a user