Commit 7a77a5ef authored by Philippe Gerum's avatar Philippe Gerum

ftrace: ipipe: enable tracing from the head domain

parent 5da0d8e4
......@@ -141,6 +141,7 @@ enum {
FTRACE_OPS_FL_PID = 1 << 14,
FTRACE_OPS_FL_RCU = 1 << 15,
FTRACE_OPS_FL_TRACE_ARRAY = 1 << 16,
FTRACE_OPS_FL_IPIPE_EXCLUSIVE = 1 << 17,
};
#ifdef CONFIG_DYNAMIC_FTRACE
......
......@@ -480,6 +480,7 @@ config DYNAMIC_FTRACE
bool "enable/disable function tracing dynamically"
depends on FUNCTION_TRACER
depends on HAVE_DYNAMIC_FTRACE
depends on !IPIPE
default y
help
This option will modify all the calls to function tracing
......
......@@ -33,6 +33,7 @@
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/rcupdate.h>
#include <linux/ipipe.h>
#include <trace/events/sched.h>
......@@ -271,8 +272,17 @@ static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
static void update_ftrace_function(void)
{
struct ftrace_ops *ops;
ftrace_func_t func;
for (ops = ftrace_ops_list;
ops != &ftrace_list_end; ops = ops->next)
if (ops->flags & FTRACE_OPS_FL_IPIPE_EXCLUSIVE) {
set_function_trace_op = ops;
func = ops->func;
goto set_pointers;
}
/*
* Prepare the ftrace_ops that the arch callback will use.
* If there's only one ftrace_ops registered, the ftrace_ops_list
......@@ -302,6 +312,7 @@ static void update_ftrace_function(void)
update_function_graph_func();
set_pointers:
/* If there's no change, then do nothing more here */
if (ftrace_trace_function == func)
return;
......@@ -2689,6 +2700,9 @@ void __weak arch_ftrace_update_code(int command)
static void ftrace_run_update_code(int command)
{
#ifdef CONFIG_IPIPE
unsigned long flags;
#endif /* CONFIG_IPIPE */
int ret;
ret = ftrace_arch_code_modify_prepare();
......@@ -2702,7 +2716,13 @@ static void ftrace_run_update_code(int command)
* is safe. The stop_machine() is the safest, but also
* produces the most overhead.
*/
#ifdef CONFIG_IPIPE
flags = ipipe_critical_enter(NULL);
__ftrace_modify_code(&command);
ipipe_critical_exit(flags);
#else /* !CONFIG_IPIPE */
arch_ftrace_update_code(command);
#endif /* !CONFIG_IPIPE */
ret = ftrace_arch_code_modify_post_process();
FTRACE_WARN_ON(ret);
......@@ -5661,10 +5681,10 @@ static int ftrace_process_locs(struct module *mod,
* reason to cause large interrupt latencies while we do it.
*/
if (!mod)
local_irq_save(flags);
flags = hard_local_irq_save();
ftrace_update_code(mod, start_pg);
if (!mod)
local_irq_restore(flags);
hard_local_irq_restore(flags);
ret = 0;
out:
mutex_unlock(&ftrace_lock);
......@@ -5917,9 +5937,11 @@ void __init ftrace_init(void)
unsigned long count, flags;
int ret;
local_irq_save(flags);
flags = hard_local_irq_save_notrace();
ret = ftrace_dyn_arch_init();
local_irq_restore(flags);
hard_local_irq_restore_notrace(flags);
/* ftrace_dyn_arch_init places the return code in addr */
if (ret)
goto failed;
......@@ -6075,7 +6097,16 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
}
} while_for_each_ftrace_op(op);
out:
preempt_enable_notrace();
#ifdef CONFIG_IPIPE
if (hard_irqs_disabled() || !__ipipe_root_p)
/*
* Nothing urgent to schedule here. At latest the timer tick
* will pick up whatever the tracing functions kicked off.
*/
preempt_enable_no_resched_notrace();
else
#endif
preempt_enable_notrace();
trace_clear_recursion(bit);
}
......
......@@ -2575,7 +2575,8 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
static __always_inline int
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned int val = cpu_buffer->current_context;
unsigned long flags;
unsigned int val;
int bit;
if (in_interrupt()) {
......@@ -2588,19 +2589,30 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
} else
bit = RB_CTX_NORMAL;
if (unlikely(val & (1 << bit)))
flags = hard_local_irq_save();
val = cpu_buffer->current_context;
if (unlikely(val & (1 << bit))) {
hard_local_irq_restore(flags);
return 1;
}
val |= (1 << bit);
cpu_buffer->current_context = val;
hard_local_irq_restore(flags);
return 0;
}
static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned long flags;
flags = hard_local_irq_save();
cpu_buffer->current_context &= cpu_buffer->current_context - 1;
hard_local_irq_restore(flags);
}
/**
......
......@@ -2910,8 +2910,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
flags = hard_local_irq_save();
pc = preempt_count();
preempt_disable_notrace();
tbuffer = get_trace_buf();
if (!tbuffer) {
......@@ -2924,7 +2925,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
goto out;
local_save_flags(flags);
size = sizeof(*entry) + sizeof(u32) * len;
buffer = tr->trace_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
......@@ -2945,7 +2945,7 @@ out:
put_trace_buf();
out_nobuffer:
preempt_enable_notrace();
hard_local_irq_restore(flags);
unpause_graph_tracing();
return len;
......
......@@ -96,7 +96,7 @@ u64 notrace trace_clock_global(void)
int this_cpu;
u64 now;
local_irq_save(flags);
flags = hard_local_irq_save_notrace();
this_cpu = raw_smp_processor_id();
now = sched_clock_cpu(this_cpu);
......@@ -122,7 +122,7 @@ u64 notrace trace_clock_global(void)
arch_spin_unlock(&trace_clock_struct.lock);
out:
local_irq_restore(flags);
hard_local_irq_restore_notrace(flags);
return now;
}
......
......@@ -172,7 +172,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
* Need to use raw, since this must be called before the
* recursive protection is performed.
*/
local_irq_save(flags);
flags = hard_local_irq_save();
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
......@@ -192,7 +192,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
}
atomic_dec(&data->disabled);
local_irq_restore(flags);
hard_local_irq_restore(flags);
}
static struct tracer_opt func_opts[] = {
......
......@@ -408,7 +408,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
if (tracing_thresh)
return 1;
local_irq_save(flags);
flags = hard_local_irq_save_notrace();
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
......@@ -420,7 +420,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
}
atomic_dec(&data->disabled);
local_irq_restore(flags);
hard_local_irq_restore_notrace(flags);
return ret;
}
......@@ -482,7 +482,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
int cpu;
int pc;
local_irq_save(flags);
flags = hard_local_irq_save_notrace();
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
......@@ -491,7 +491,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
__trace_graph_return(tr, trace, flags, pc);
}
atomic_dec(&data->disabled);
local_irq_restore(flags);
hard_local_irq_restore_notrace(flags);
}
void set_graph_array(struct trace_array *tr)
......
......@@ -483,28 +483,28 @@ inline void print_irqtrace_events(struct task_struct *curr)
*/
void trace_hardirqs_on(void)
{
if (!preempt_trace() && irq_trace())
if (ipipe_root_p && !preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL(trace_hardirqs_on);
void trace_hardirqs_off(void)
{
if (!preempt_trace() && irq_trace())
if (ipipe_root_p && !preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL(trace_hardirqs_off);
__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
if (ipipe_root_p && !preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
if (ipipe_root_p && !preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment