Kernel源码笔记目录
Ftrace
int __init register_tracer ( struct tracer * type)
{
struct tracer * t;
int ret = 0 ;
if ( ! type-> name) {
pr_info ( "Tracer must have a name\n" ) ;
return - 1 ;
}
if ( strlen ( type-> name) >= MAX_TRACER_SIZE) {
pr_info ( "Tracer has a name longer than %d\n" , MAX_TRACER_SIZE) ;
return - 1 ;
}
if ( security_locked_down ( LOCKDOWN_TRACEFS) ) {
pr_warn ( "Can not register tracer %s due to lockdown\n" ,
type-> name) ;
return - EPERM;
}
mutex_lock ( & trace_types_lock) ;
tracing_selftest_running = true;
for ( t = trace_types; t; t = t-> next) {
if ( strcmp ( type-> name, t-> name) == 0 ) {
pr_info ( "Tracer %s already registered\n" ,
type-> name) ;
ret = - 1 ;
goto out;
}
}
if ( ! type-> set_flag)
type-> set_flag = & dummy_set_flag;
if ( ! type-> flags) {
type-> flags = kmalloc ( sizeof ( * type-> flags) , GFP_KERNEL) ;
if ( ! type-> flags) {
ret = - ENOMEM;
goto out;
}
type-> flags-> val = 0 ;
type-> flags-> opts = dummy_tracer_opt;
} else
if ( ! type-> flags-> opts)
type-> flags-> opts = dummy_tracer_opt;
type-> flags-> trace = type;
ret = run_tracer_selftest ( type) ;
if ( ret < 0 )
goto out;
type-> next = trace_types;
trace_types = type;
add_tracer_options ( & global_trace, type) ;
out:
tracing_selftest_running = false;
mutex_unlock ( & trace_types_lock) ;
if ( ret || ! default_bootup_tracer)
goto out_unlock;
if ( strncmp ( default_bootup_tracer, type-> name, MAX_TRACER_SIZE) )
goto out_unlock;
printk ( KERN_INFO "Starting tracer '%s'\n" , type-> name) ;
tracing_set_tracer ( & global_trace, type-> name) ;
default_bootup_tracer = NULL ;
apply_trace_boot_options ( ) ;
disable_tracing_selftest ( "running a tracer" ) ;
out_unlock:
return ret;
}
static int run_tracer_selftest ( struct tracer * type)
{
struct trace_array * tr = & global_trace;
struct tracer * saved_tracer = tr-> current_trace;
int ret;
if ( ! type-> selftest || tracing_selftest_disabled)
return 0 ;
if ( ! selftests_can_run)
return save_selftest ( type) ;
if ( ! tracing_is_on ( ) ) {
pr_warn ( "Selftest for tracer %s skipped due to tracing disabled\n" ,
type-> name) ;
return 0 ;
}
tracing_reset_online_cpus ( & tr-> array_buffer) ;
将当前的trace设置为新trace
tr-> current_trace = type;
#ifdef CONFIG_TRACER_MAX_TRACE
if ( type-> use_max_tr) {
/ 如果设置了最大的ring_buffer, 则将新tracer的max_buffer进行扩展
if ( ring_buffer_expanded)
ring_buffer_resize ( tr-> max_buffer. buffer, trace_buf_size,
RING_BUFFER_ALL_CPUS) ;
tr-> allocated_snapshot = true;
}
#endif
pr_info ( "Testing tracer %s: " , type-> name) ;
ret = type-> selftest ( type, tr) ;
tr-> current_trace = saved_tracer;
if ( ret) {
printk ( KERN_CONT "FAILED!\n" ) ;
WARN_ON ( 1 ) ;
return - 1 ;
}
tracing_reset_online_cpus ( & tr-> array_buffer) ;
#ifdef CONFIG_TRACER_MAX_TRACE
if ( type-> use_max_tr) {
tr-> allocated_snapshot = false;
if ( ring_buffer_expanded)
ring_buffer_resize ( tr-> max_buffer. buffer, 1 ,
RING_BUFFER_ALL_CPUS) ;
}
#endif
printk ( KERN_CONT "PASSED\n" ) ;
return 0 ;
}
void tracing_reset_online_cpus ( struct trace_buffer * buf)
{
struct ring_buffer * buffer = buf-> buffer;
int cpu;
if ( ! buffer)
return ;
ring_buffer_record_disable ( buffer) ;
synchronize_sched ( ) ;
buf-> time_start = buffer_ftrace_now ( buf, buf-> cpu) ;
for_each_online_cpu ( cpu)
ring_buffer_reset_cpu ( buffer, cpu) ;
ring_buffer_record_enable ( buffer) ;
}
trace用户空间接口文件初始化
tracer_init_tracefs-> init_tracer_tracefs
static void
init_tracer_tracefs ( struct trace_array * tr, struct dentry * d_tracer)
{
struct trace_event_file * file;
int cpu;
trace_create_file ( "available_tracers" , 0444 , d_tracer,
tr, & show_traces_fops) ;
trace_create_file ( "current_tracer" , 0644 , d_tracer,
tr, & set_tracer_fops) ;
trace_create_file ( "tracing_cpumask" , 0644 , d_tracer,
tr, & tracing_cpumask_fops) ;
trace_create_file ( "trace_options" , 0644 , d_tracer,
tr, & tracing_iter_fops) ;
trace_create_file ( "trace" , 0644 , d_tracer,
tr, & tracing_fops) ;
trace_create_file ( "trace_pipe" , 0444 , d_tracer,
tr, & tracing_pipe_fops) ;
trace_create_file ( "buffer_size_kb" , 0644 , d_tracer,
tr, & tracing_entries_fops) ;
trace_create_file ( "buffer_total_size_kb" , 0444 , d_tracer,
tr, & tracing_total_entries_fops) ;
trace_create_file ( "free_buffer" , 0200 , d_tracer,
tr, & tracing_free_buffer_fops) ;
trace_create_file ( "trace_marker" , 0220 , d_tracer,
tr, & tracing_mark_fops) ;
file = __find_event_file ( tr, "ftrace" , "print" ) ;
if ( file && file-> dir)
trace_create_file ( "trigger" , 0644 , file-> dir, file,
& event_trigger_fops) ;
tr-> trace_marker_file = file;
trace_create_file ( "trace_marker_raw" , 0220 , d_tracer,
tr, & tracing_mark_raw_fops) ;
trace_create_file ( "trace_clock" , 0644 , d_tracer, tr,
& trace_clock_fops) ;
trace_create_file ( "tracing_on" , 0644 , d_tracer,
tr, & rb_simple_fops) ;
trace_create_file ( "timestamp_mode" , 0444 , d_tracer, tr,
& trace_time_stamp_mode_fops) ;
create_trace_options_dir ( tr) ;
#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
trace_create_file ( "tracing_max_latency" , 0644 , d_tracer,
& tr-> max_latency, & tracing_max_lat_fops) ;
#endif
if ( ftrace_create_function_files ( tr, d_tracer) )
WARN ( 1 , "Could not allocate function filter files" ) ;
#ifdef CONFIG_TRACER_SNAPSHOT
trace_create_file ( "snapshot" , 0644 , d_tracer,
tr, & snapshot_fops) ;
#endif
for_each_tracing_cpu ( cpu)
tracing_init_tracefs_percpu ( tr, cpu) ;
ftrace_init_tracefs ( tr, d_tracer) ;
}
情景1:echo ftrace > /debug/tracing/current_tracer
static const struct file_operations set_tracer_fops = {
. open = tracing_open_generic,
. read = tracing_set_trace_read,
. write = tracing_set_trace_write,
. llseek = generic_file_llseek,
} ;
用户空间写入这个文件,会调到set_tracer_fops的tracing_set_trace_write函数
static ssize_t
tracing_set_trace_write ( struct file * filp, const char __user * ubuf,
size_t cnt, loff_t * ppos)
{
struct trace_array * tr = filp-> private_data;
char buf[ MAX_TRACER_SIZE+ 1 ] ;
int i;
size_t ret;
int err;
err = tracing_set_tracer ( tr, buf) ;
if ( err)
return err;
* ppos + = ret;
return ret;
}
tracing_set_trace_write通过考贝用户空间传过来的字符串,然后调用tracing_set_tracer。
static int tracing_set_tracer ( struct trace_array * tr, const char * buf)
{
struct tracer * t;
#ifdef CONFIG_TRACER_MAX_TRACE
bool had_max_tr;
#endif
int ret = 0 ;
mutex_lock ( & trace_types_lock) ;
if ( ! ring_buffer_expanded) {
ret = __tracing_resize_ring_buffer ( tr, trace_buf_size,
RING_BUFFER_ALL_CPUS) ;
if ( ret < 0 )
goto out;
ret = 0 ;
}
for ( t = trace_types; t; t = t-> next) {
if ( strcmp ( t-> name, buf) == 0 )
break ;
}
if ( ! t) {
ret = - EINVAL;
goto out;
}
if ( t == tr-> current_trace)
goto out;
if ( system_state < SYSTEM_RUNNING && t-> noboot) {
pr_warn ( "Tracer '%s' is not allowed on command line, ignored\n" ,
t-> name) ;
goto out;
}
if ( ! trace_ok_for_array ( t, tr) ) {
ret = - EINVAL;
goto out;
}
if ( tr-> current_trace-> ref) {
ret = - EBUSY;
goto out;
}
trace_branch_disable ( ) ;
tr-> current_trace-> enabled-- ;
if ( tr-> current_trace-> reset)
tr-> current_trace-> reset ( tr) ;
tr-> current_trace = & nop_trace;
#ifdef CONFIG_TRACER_MAX_TRACE
had_max_tr = tr-> allocated_snapshot;
if ( had_max_tr && ! t-> use_max_tr) {
synchronize_sched ( ) ;
free_snapshot ( tr) ;
}
#endif
#ifdef CONFIG_TRACER_MAX_TRACE
if ( t-> use_max_tr && ! had_max_tr) {
ret = tracing_alloc_snapshot_instance ( tr) ;
if ( ret < 0 )
goto out;
}
#endif
if ( t-> init) {
ret = tracer_init ( t, tr) ;
if ( ret)
goto out;
}
tr-> current_trace = t;
tr-> current_trace-> enabled++ ;
trace_branch_enable ( tr) ;
out:
mutex_unlock ( & trace_types_lock) ;
return ret;
}
Trace function
static struct tracer function_trace __tracer_data =
{
. name = "function" ,
. init = function_trace_init,
. reset = function_trace_reset,
. start = function_trace_start,
. flags = & func_flags,
. set_flag = func_set_flag,
. allow_instances = true,
#ifdef CONFIG_FTRACE_SELFTEST
. selftest = trace_selftest_startup_function,
#endif
} ;
__init int init_function_trace ( void )
{
init_func_cmd_traceon ( ) ;
return register_tracer ( & function_trace) ;
}
static struct ftrace_func_command ftrace_traceon_cmd = {
. name = "traceon" ,
. func = ftrace_trace_onoff_callback,
} ;
static struct ftrace_func_command ftrace_traceoff_cmd = {
. name = "traceoff" ,
. func = ftrace_trace_onoff_callback,
} ;
static struct ftrace_func_command ftrace_stacktrace_cmd = {
. name = "stacktrace" ,
. func = ftrace_stacktrace_callback,
} ;
static struct ftrace_func_command ftrace_dump_cmd = {
. name = "dump" ,
. func = ftrace_dump_callback,
} ;
static struct ftrace_func_command ftrace_cpudump_cmd = {
. name = "cpudump" ,
. func = ftrace_cpudump_callback,
} ;
__init int register_ftrace_command ( struct ftrace_func_command * cmd)
{
struct ftrace_func_command * p;
int ret = 0 ;
mutex_lock ( & ftrace_cmd_mutex) ;
list_for_each_entry ( p, & ftrace_commands, list) {
if ( strcmp ( cmd-> name, p-> name) == 0 ) {
ret = - EBUSY;
goto out_unlock;
}
}
list_add ( & cmd-> list, & ftrace_commands) ;
out_unlock:
mutex_unlock ( & ftrace_cmd_mutex) ;
return ret;
}
__init int trace_selftest_startup_function ( struct tracer * trace, struct trace_array * tr)
{
int save_ftrace_enabled = ftrace_enabled;
unsigned long count;
int ret;
#ifdef CONFIG_DYNAMIC_FTRACE
if ( ftrace_filter_param) {
printk ( KERN_CONT " ... kernel command line filter set: force PASS ... " ) ;
return 0 ;
}
#endif
msleep ( 1 ) ;
ftrace_enabled = 1 ;
ret = tracer_init ( trace, tr) ;
if ( ret) {
warn_failed_init_tracer ( trace, ret) ;
goto out;
}
msleep ( 100 ) ;
tracing_stop ( ) ;
ftrace_enabled = 0 ;
ret = trace_test_buffer ( & tr-> trace_buffer, & count) ;
ftrace_enabled = 1 ;
trace-> reset ( tr) ;
tracing_start ( ) ;
if ( ! ret && ! count) {
printk ( KERN_CONT ".. no entries found .." ) ;
ret = - 1 ;
goto out;
}
ret = trace_selftest_startup_dynamic_tracing ( trace, tr,
DYN_FTRACE_TEST_NAME) ;
if ( ret)
goto out;
ret = trace_selftest_function_recursion ( ) ;
if ( ret)
goto out;
ret = trace_selftest_function_regs ( ) ;
out:
ftrace_enabled = save_ftrace_enabled;
if ( ret)
ftrace_kill ( ) ;
return ret;
}
static int function_trace_init ( struct trace_array * tr)
{
ftrace_func_t func;
if ( ! tr-> ops)
return - ENOMEM;
if ( tr-> flags & TRACE_ARRAY_FL_GLOBAL &&
func_flags. val & TRACE_FUNC_OPT_STACK)
func = function_stack_trace_call;
else
func = function_trace_call;
ftrace_init_array_ops ( tr, func) ;
tr-> trace_buffer. cpu = get_cpu ( ) ;
put_cpu ( ) ;
tracing_start_cmdline_record ( ) ;
tracing_start_function_trace ( tr) ;
return 0 ;
}