mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-31 14:30:50 +00:00 
			
		
		
		
	 41edafdb78
			
		
	
	
		41edafdb78
		
	
	
	
	
		
			
			Impact: Optimization Several paravirt ops implementations simply return their arguments, the most obvious being the make_pte/pte_val class of operations on native. On 32-bit, the identity function is literally a no-op, as the calling convention uses the same registers for the first argument and return. On 64-bit, it can be implemented with a single "mov". This patch adds special identity functions for 32 and 64 bit argument, and machinery to recognize them and replace them with either nops or a mov as appropriate. At the moment, the only users for the identity functions are the pagetable entry conversion functions. The result is a measureable improvement on pagetable-heavy benchmarks (2-3%, reducing the pvops overhead from 5 to 2%). Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
		
			
				
	
	
		
			62 lines
		
	
	
		
			1.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			62 lines
		
	
	
		
			1.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #include <asm/paravirt.h>
 | |
| 
 | |
| DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
 | |
| DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
 | |
| DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
 | |
| DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
 | |
| DEF_NATIVE(pv_cpu_ops, iret, "iret");
 | |
| DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
 | |
| DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
 | |
| DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
 | |
| DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
 | |
| DEF_NATIVE(pv_cpu_ops, clts, "clts");
 | |
| DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
 | |
| 
 | |
| unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
 | |
| {
 | |
| 	/* arg in %eax, return in %eax */
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
 | |
| {
 | |
| 	/* arg in %edx:%eax, return in %edx:%eax */
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
 | |
| 		      unsigned long addr, unsigned len)
 | |
| {
 | |
| 	const unsigned char *start, *end;
 | |
| 	unsigned ret;
 | |
| 
 | |
| #define PATCH_SITE(ops, x)					\
 | |
| 		case PARAVIRT_PATCH(ops.x):			\
 | |
| 			start = start_##ops##_##x;		\
 | |
| 			end = end_##ops##_##x;			\
 | |
| 			goto patch_site
 | |
| 	switch (type) {
 | |
| 		PATCH_SITE(pv_irq_ops, irq_disable);
 | |
| 		PATCH_SITE(pv_irq_ops, irq_enable);
 | |
| 		PATCH_SITE(pv_irq_ops, restore_fl);
 | |
| 		PATCH_SITE(pv_irq_ops, save_fl);
 | |
| 		PATCH_SITE(pv_cpu_ops, iret);
 | |
| 		PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
 | |
| 		PATCH_SITE(pv_mmu_ops, read_cr2);
 | |
| 		PATCH_SITE(pv_mmu_ops, read_cr3);
 | |
| 		PATCH_SITE(pv_mmu_ops, write_cr3);
 | |
| 		PATCH_SITE(pv_cpu_ops, clts);
 | |
| 		PATCH_SITE(pv_cpu_ops, read_tsc);
 | |
| 
 | |
| 	patch_site:
 | |
| 		ret = paravirt_patch_insns(ibuf, len, start, end);
 | |
| 		break;
 | |
| 
 | |
| 	default:
 | |
| 		ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
 | |
| 		break;
 | |
| 	}
 | |
| #undef PATCH_SITE
 | |
| 	return ret;
 | |
| }
 |