mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-31 05:14:38 +00:00 
			
		
		
		
	 478735e388
			
		
	
	
		478735e388
		
	
	
	
	
		
			
			In /proc/stat, the number of per-IRQ event is shown by making a sum each irq's events on all cpus. But we can make use of kstat_irqs(). kstat_irqs() do the same calculation, If !CONFIG_GENERIC_HARDIRQ, it's not a big cost. (Both of the number of cpus and irqs are small.) If a system is very big and CONFIG_GENERIC_HARDIRQ, it does for_each_irq() for_each_cpu() - look up a radix tree - read desc->irq_stat[cpu] This seems not efficient. This patch adds kstat_irqs() for CONFIG_GENRIC_HARDIRQ and change the calculation as for_each_irq() look up radix tree for_each_cpu() - read desc->irq_stat[cpu] This reduces cost. A test on (4096cpusp, 256 nodes, 4592 irqs) host (by Jack Steiner) %time cat /proc/stat > /dev/null Before Patch: 2.459 sec After Patch : .561 sec [akpm@linux-foundation.org: unexport kstat_irqs, coding-style tweaks] [akpm@linux-foundation.org: fix unused variable 'per_irq_sum'] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Tested-by: Jack Steiner <steiner@sgi.com> Acked-by: Jack Steiner <steiner@sgi.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			171 lines
		
	
	
		
			4.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			171 lines
		
	
	
		
			4.9 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #include <linux/cpumask.h>
 | |
| #include <linux/fs.h>
 | |
| #include <linux/init.h>
 | |
| #include <linux/interrupt.h>
 | |
| #include <linux/kernel_stat.h>
 | |
| #include <linux/proc_fs.h>
 | |
| #include <linux/sched.h>
 | |
| #include <linux/seq_file.h>
 | |
| #include <linux/slab.h>
 | |
| #include <linux/time.h>
 | |
| #include <linux/irqnr.h>
 | |
| #include <asm/cputime.h>
 | |
| 
 | |
| #ifndef arch_irq_stat_cpu
 | |
| #define arch_irq_stat_cpu(cpu) 0
 | |
| #endif
 | |
| #ifndef arch_irq_stat
 | |
| #define arch_irq_stat() 0
 | |
| #endif
 | |
| #ifndef arch_idle_time
 | |
| #define arch_idle_time(cpu) 0
 | |
| #endif
 | |
| 
 | |
| static int show_stat(struct seq_file *p, void *v)
 | |
| {
 | |
| 	int i, j;
 | |
| 	unsigned long jif;
 | |
| 	cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
 | |
| 	cputime64_t guest, guest_nice;
 | |
| 	u64 sum = 0;
 | |
| 	u64 sum_softirq = 0;
 | |
| 	unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
 | |
| 	struct timespec boottime;
 | |
| 
 | |
| 	user = nice = system = idle = iowait =
 | |
| 		irq = softirq = steal = cputime64_zero;
 | |
| 	guest = guest_nice = cputime64_zero;
 | |
| 	getboottime(&boottime);
 | |
| 	jif = boottime.tv_sec;
 | |
| 
 | |
| 	for_each_possible_cpu(i) {
 | |
| 		user = cputime64_add(user, kstat_cpu(i).cpustat.user);
 | |
| 		nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
 | |
| 		system = cputime64_add(system, kstat_cpu(i).cpustat.system);
 | |
| 		idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
 | |
| 		idle = cputime64_add(idle, arch_idle_time(i));
 | |
| 		iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
 | |
| 		irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
 | |
| 		softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
 | |
| 		steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
 | |
| 		guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
 | |
| 		guest_nice = cputime64_add(guest_nice,
 | |
| 			kstat_cpu(i).cpustat.guest_nice);
 | |
| 		sum += kstat_cpu_irqs_sum(i);
 | |
| 		sum += arch_irq_stat_cpu(i);
 | |
| 
 | |
| 		for (j = 0; j < NR_SOFTIRQS; j++) {
 | |
| 			unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
 | |
| 
 | |
| 			per_softirq_sums[j] += softirq_stat;
 | |
| 			sum_softirq += softirq_stat;
 | |
| 		}
 | |
| 	}
 | |
| 	sum += arch_irq_stat();
 | |
| 
 | |
| 	seq_printf(p, "cpu  %llu %llu %llu %llu %llu %llu %llu %llu %llu "
 | |
| 		"%llu\n",
 | |
| 		(unsigned long long)cputime64_to_clock_t(user),
 | |
| 		(unsigned long long)cputime64_to_clock_t(nice),
 | |
| 		(unsigned long long)cputime64_to_clock_t(system),
 | |
| 		(unsigned long long)cputime64_to_clock_t(idle),
 | |
| 		(unsigned long long)cputime64_to_clock_t(iowait),
 | |
| 		(unsigned long long)cputime64_to_clock_t(irq),
 | |
| 		(unsigned long long)cputime64_to_clock_t(softirq),
 | |
| 		(unsigned long long)cputime64_to_clock_t(steal),
 | |
| 		(unsigned long long)cputime64_to_clock_t(guest),
 | |
| 		(unsigned long long)cputime64_to_clock_t(guest_nice));
 | |
| 	for_each_online_cpu(i) {
 | |
| 
 | |
| 		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
 | |
| 		user = kstat_cpu(i).cpustat.user;
 | |
| 		nice = kstat_cpu(i).cpustat.nice;
 | |
| 		system = kstat_cpu(i).cpustat.system;
 | |
| 		idle = kstat_cpu(i).cpustat.idle;
 | |
| 		idle = cputime64_add(idle, arch_idle_time(i));
 | |
| 		iowait = kstat_cpu(i).cpustat.iowait;
 | |
| 		irq = kstat_cpu(i).cpustat.irq;
 | |
| 		softirq = kstat_cpu(i).cpustat.softirq;
 | |
| 		steal = kstat_cpu(i).cpustat.steal;
 | |
| 		guest = kstat_cpu(i).cpustat.guest;
 | |
| 		guest_nice = kstat_cpu(i).cpustat.guest_nice;
 | |
| 		seq_printf(p,
 | |
| 			"cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
 | |
| 			"%llu\n",
 | |
| 			i,
 | |
| 			(unsigned long long)cputime64_to_clock_t(user),
 | |
| 			(unsigned long long)cputime64_to_clock_t(nice),
 | |
| 			(unsigned long long)cputime64_to_clock_t(system),
 | |
| 			(unsigned long long)cputime64_to_clock_t(idle),
 | |
| 			(unsigned long long)cputime64_to_clock_t(iowait),
 | |
| 			(unsigned long long)cputime64_to_clock_t(irq),
 | |
| 			(unsigned long long)cputime64_to_clock_t(softirq),
 | |
| 			(unsigned long long)cputime64_to_clock_t(steal),
 | |
| 			(unsigned long long)cputime64_to_clock_t(guest),
 | |
| 			(unsigned long long)cputime64_to_clock_t(guest_nice));
 | |
| 	}
 | |
| 	seq_printf(p, "intr %llu", (unsigned long long)sum);
 | |
| 
 | |
| 	/* sum again ? it could be updated? */
 | |
| 	for_each_irq_nr(j)
 | |
| 		seq_printf(p, " %u", kstat_irqs(j));
 | |
| 
 | |
| 	seq_printf(p,
 | |
| 		"\nctxt %llu\n"
 | |
| 		"btime %lu\n"
 | |
| 		"processes %lu\n"
 | |
| 		"procs_running %lu\n"
 | |
| 		"procs_blocked %lu\n",
 | |
| 		nr_context_switches(),
 | |
| 		(unsigned long)jif,
 | |
| 		total_forks,
 | |
| 		nr_running(),
 | |
| 		nr_iowait());
 | |
| 
 | |
| 	seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
 | |
| 
 | |
| 	for (i = 0; i < NR_SOFTIRQS; i++)
 | |
| 		seq_printf(p, " %u", per_softirq_sums[i]);
 | |
| 	seq_printf(p, "\n");
 | |
| 
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static int stat_open(struct inode *inode, struct file *file)
 | |
| {
 | |
| 	unsigned size = 4096 * (1 + num_possible_cpus() / 32);
 | |
| 	char *buf;
 | |
| 	struct seq_file *m;
 | |
| 	int res;
 | |
| 
 | |
| 	/* don't ask for more than the kmalloc() max size, currently 128 KB */
 | |
| 	if (size > 128 * 1024)
 | |
| 		size = 128 * 1024;
 | |
| 	buf = kmalloc(size, GFP_KERNEL);
 | |
| 	if (!buf)
 | |
| 		return -ENOMEM;
 | |
| 
 | |
| 	res = single_open(file, show_stat, NULL);
 | |
| 	if (!res) {
 | |
| 		m = file->private_data;
 | |
| 		m->buf = buf;
 | |
| 		m->size = size;
 | |
| 	} else
 | |
| 		kfree(buf);
 | |
| 	return res;
 | |
| }
 | |
| 
 | |
| static const struct file_operations proc_stat_operations = {
 | |
| 	.open		= stat_open,
 | |
| 	.read		= seq_read,
 | |
| 	.llseek		= seq_lseek,
 | |
| 	.release	= single_release,
 | |
| };
 | |
| 
 | |
| static int __init proc_stat_init(void)
 | |
| {
 | |
| 	proc_create("stat", 0, NULL, &proc_stat_operations);
 | |
| 	return 0;
 | |
| }
 | |
| module_init(proc_stat_init);
 |