mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-30 23:50:07 +00:00 
			
		
		
		
	 018378c55b
			
		
	
	
		018378c55b
		
	
	
	
	
		
			
			Cleanup. Factor the common code in save_stack_address() and save_stack_address_nosched(). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Roland McGrath <roland@redhat.com> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Vegard Nossum <vegard.nossum@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> LKML-Reference: <20100603193243.GA31534@redhat.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
		
			
				
	
	
		
			160 lines
		
	
	
		
			3.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			160 lines
		
	
	
		
			3.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * Stack trace management functions
 | |
|  *
 | |
|  *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 | |
|  */
 | |
| #include <linux/sched.h>
 | |
| #include <linux/stacktrace.h>
 | |
| #include <linux/module.h>
 | |
| #include <linux/uaccess.h>
 | |
| #include <asm/stacktrace.h>
 | |
| 
 | |
| static void save_stack_warning(void *data, char *msg)
 | |
| {
 | |
| }
 | |
| 
 | |
| static void
 | |
| save_stack_warning_symbol(void *data, char *msg, unsigned long symbol)
 | |
| {
 | |
| }
 | |
| 
 | |
| static int save_stack_stack(void *data, char *name)
 | |
| {
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static void
 | |
| __save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched)
 | |
| {
 | |
| 	struct stack_trace *trace = data;
 | |
| #ifdef CONFIG_FRAME_POINTER
 | |
| 	if (!reliable)
 | |
| 		return;
 | |
| #endif
 | |
| 	if (nosched && in_sched_functions(addr))
 | |
| 		return;
 | |
| 	if (trace->skip > 0) {
 | |
| 		trace->skip--;
 | |
| 		return;
 | |
| 	}
 | |
| 	if (trace->nr_entries < trace->max_entries)
 | |
| 		trace->entries[trace->nr_entries++] = addr;
 | |
| }
 | |
| 
 | |
| static void save_stack_address(void *data, unsigned long addr, int reliable)
 | |
| {
 | |
| 	return __save_stack_address(data, addr, reliable, false);
 | |
| }
 | |
| 
 | |
| static void
 | |
| save_stack_address_nosched(void *data, unsigned long addr, int reliable)
 | |
| {
 | |
| 	return __save_stack_address(data, addr, reliable, true);
 | |
| }
 | |
| 
 | |
| static const struct stacktrace_ops save_stack_ops = {
 | |
| 	.warning	= save_stack_warning,
 | |
| 	.warning_symbol	= save_stack_warning_symbol,
 | |
| 	.stack		= save_stack_stack,
 | |
| 	.address	= save_stack_address,
 | |
| 	.walk_stack	= print_context_stack,
 | |
| };
 | |
| 
 | |
| static const struct stacktrace_ops save_stack_ops_nosched = {
 | |
| 	.warning	= save_stack_warning,
 | |
| 	.warning_symbol	= save_stack_warning_symbol,
 | |
| 	.stack		= save_stack_stack,
 | |
| 	.address	= save_stack_address_nosched,
 | |
| 	.walk_stack	= print_context_stack,
 | |
| };
 | |
| 
 | |
| /*
 | |
|  * Save stack-backtrace addresses into a stack_trace buffer.
 | |
|  */
 | |
| void save_stack_trace(struct stack_trace *trace)
 | |
| {
 | |
| 	dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
 | |
| 	if (trace->nr_entries < trace->max_entries)
 | |
| 		trace->entries[trace->nr_entries++] = ULONG_MAX;
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(save_stack_trace);
 | |
| 
 | |
| void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp)
 | |
| {
 | |
| 	dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace);
 | |
| 	if (trace->nr_entries < trace->max_entries)
 | |
| 		trace->entries[trace->nr_entries++] = ULONG_MAX;
 | |
| }
 | |
| 
 | |
| void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 | |
| {
 | |
| 	dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
 | |
| 	if (trace->nr_entries < trace->max_entries)
 | |
| 		trace->entries[trace->nr_entries++] = ULONG_MAX;
 | |
| }
 | |
| EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
 | |
| 
 | |
| /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
 | |
| 
 | |
| struct stack_frame_user {
 | |
| 	const void __user	*next_fp;
 | |
| 	unsigned long		ret_addr;
 | |
| };
 | |
| 
 | |
| static int
 | |
| copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
 | |
| {
 | |
| 	int ret;
 | |
| 
 | |
| 	if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
 | |
| 		return 0;
 | |
| 
 | |
| 	ret = 1;
 | |
| 	pagefault_disable();
 | |
| 	if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
 | |
| 		ret = 0;
 | |
| 	pagefault_enable();
 | |
| 
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| static inline void __save_stack_trace_user(struct stack_trace *trace)
 | |
| {
 | |
| 	const struct pt_regs *regs = task_pt_regs(current);
 | |
| 	const void __user *fp = (const void __user *)regs->bp;
 | |
| 
 | |
| 	if (trace->nr_entries < trace->max_entries)
 | |
| 		trace->entries[trace->nr_entries++] = regs->ip;
 | |
| 
 | |
| 	while (trace->nr_entries < trace->max_entries) {
 | |
| 		struct stack_frame_user frame;
 | |
| 
 | |
| 		frame.next_fp = NULL;
 | |
| 		frame.ret_addr = 0;
 | |
| 		if (!copy_stack_frame(fp, &frame))
 | |
| 			break;
 | |
| 		if ((unsigned long)fp < regs->sp)
 | |
| 			break;
 | |
| 		if (frame.ret_addr) {
 | |
| 			trace->entries[trace->nr_entries++] =
 | |
| 				frame.ret_addr;
 | |
| 		}
 | |
| 		if (fp == frame.next_fp)
 | |
| 			break;
 | |
| 		fp = frame.next_fp;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| void save_stack_trace_user(struct stack_trace *trace)
 | |
| {
 | |
| 	/*
 | |
| 	 * Trace user stack if we are not a kernel thread
 | |
| 	 */
 | |
| 	if (current->mm) {
 | |
| 		__save_stack_trace_user(trace);
 | |
| 	}
 | |
| 	if (trace->nr_entries < trace->max_entries)
 | |
| 		trace->entries[trace->nr_entries++] = ULONG_MAX;
 | |
| }
 | |
| 
 |