mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-31 16:38:31 +00:00 
			
		
		
		
	 a98d24b71b
			
		
	
	
		a98d24b71b
		
	
	
	
	
		
			
			Use the allocator bitmap to lookup active interrupts. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ingo Molnar <mingo@elte.hu>
		
			
				
	
	
		
			488 lines
		
	
	
		
			11 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			488 lines
		
	
	
		
			11 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
 | |
|  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
 | |
|  *
 | |
|  * This file contains the interrupt descriptor management code
 | |
|  *
 | |
|  * Detailed information is available in Documentation/DocBook/genericirq
 | |
|  *
 | |
|  */
 | |
| #include <linux/irq.h>
 | |
| #include <linux/slab.h>
 | |
| #include <linux/module.h>
 | |
| #include <linux/interrupt.h>
 | |
| #include <linux/kernel_stat.h>
 | |
| #include <linux/radix-tree.h>
 | |
| #include <linux/bitmap.h>
 | |
| 
 | |
| #include "internals.h"
 | |
| 
 | |
| /*
 | |
|  * lockdep: we want to handle all irq_desc locks as a single lock-class:
 | |
|  */
 | |
| struct lock_class_key irq_desc_lock_class;
 | |
| 
 | |
| #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
 | |
| static void __init init_irq_default_affinity(void)
 | |
| {
 | |
| 	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
 | |
| 	cpumask_setall(irq_default_affinity);
 | |
| }
 | |
| #else
 | |
| static void __init init_irq_default_affinity(void)
 | |
| {
 | |
| }
 | |
| #endif
 | |
| 
 | |
| #ifdef CONFIG_SMP
 | |
| static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
 | |
| {
 | |
| 	if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
 | |
| 		return -ENOMEM;
 | |
| 
 | |
| #ifdef CONFIG_GENERIC_PENDING_IRQ
 | |
| 	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
 | |
| 		free_cpumask_var(desc->irq_data.affinity);
 | |
| 		return -ENOMEM;
 | |
| 	}
 | |
| #endif
 | |
| 	return 0;
 | |
| }
 | |
| 
 | |
| static void desc_smp_init(struct irq_desc *desc, int node)
 | |
| {
 | |
| 	desc->node = node;
 | |
| 	cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
 | |
| }
 | |
| 
 | |
| #else
 | |
| static inline int
 | |
| alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
 | |
| static inline void desc_smp_init(struct irq_desc *desc, int node) { }
 | |
| #endif
 | |
| 
 | |
| static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
 | |
| {
 | |
| 	desc->irq_data.irq = irq;
 | |
| 	desc->irq_data.chip = &no_irq_chip;
 | |
| 	desc->irq_data.chip_data = NULL;
 | |
| 	desc->irq_data.handler_data = NULL;
 | |
| 	desc->irq_data.msi_desc = NULL;
 | |
| 	desc->status = IRQ_DEFAULT_INIT_FLAGS;
 | |
| 	desc->handle_irq = handle_bad_irq;
 | |
| 	desc->depth = 1;
 | |
| 	desc->name = NULL;
 | |
| 	memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
 | |
| 	desc_smp_init(desc, node);
 | |
| }
 | |
| 
 | |
| int nr_irqs = NR_IRQS;
 | |
| EXPORT_SYMBOL_GPL(nr_irqs);
 | |
| 
 | |
| DEFINE_RAW_SPINLOCK(sparse_irq_lock);
 | |
| static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
 | |
| 
 | |
| #ifdef CONFIG_SPARSE_IRQ
 | |
| 
 | |
| static struct irq_desc irq_desc_init = {
 | |
| 	.status		= IRQ_DEFAULT_INIT_FLAGS,
 | |
| 	.handle_irq	= handle_bad_irq,
 | |
| 	.depth		= 1,
 | |
| 	.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
 | |
| };
 | |
| 
 | |
| void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
 | |
| {
 | |
| 	void *ptr;
 | |
| 
 | |
| 	ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
 | |
| 			   GFP_ATOMIC, node);
 | |
| 
 | |
| 	/*
 | |
| 	 * don't overwite if can not get new one
 | |
| 	 * init_copy_kstat_irqs() could still use old one
 | |
| 	 */
 | |
| 	if (ptr) {
 | |
| 		printk(KERN_DEBUG "  alloc kstat_irqs on node %d\n", node);
 | |
| 		desc->kstat_irqs = ptr;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
 | |
| {
 | |
| 	memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
 | |
| 
 | |
| 	raw_spin_lock_init(&desc->lock);
 | |
| 	desc->irq_data.irq = irq;
 | |
| #ifdef CONFIG_SMP
 | |
| 	desc->irq_data.node = node;
 | |
| #endif
 | |
| 	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
 | |
| 	init_kstat_irqs(desc, node, nr_cpu_ids);
 | |
| 	if (!desc->kstat_irqs) {
 | |
| 		printk(KERN_ERR "can not alloc kstat_irqs\n");
 | |
| 		BUG_ON(1);
 | |
| 	}
 | |
| 	if (!alloc_desc_masks(desc, node, false)) {
 | |
| 		printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
 | |
| 		BUG_ON(1);
 | |
| 	}
 | |
| 	init_desc_masks(desc);
 | |
| 	arch_init_chip_data(desc, node);
 | |
| }
 | |
| 
 | |
| static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
 | |
| 
 | |
| static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
 | |
| {
 | |
| 	radix_tree_insert(&irq_desc_tree, irq, desc);
 | |
| }
 | |
| 
 | |
| struct irq_desc *irq_to_desc(unsigned int irq)
 | |
| {
 | |
| 	return radix_tree_lookup(&irq_desc_tree, irq);
 | |
| }
 | |
| 
 | |
| void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
 | |
| {
 | |
| 	void **ptr;
 | |
| 
 | |
| 	ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
 | |
| 	if (ptr)
 | |
| 		radix_tree_replace_slot(ptr, desc);
 | |
| }
 | |
| 
 | |
| static void delete_irq_desc(unsigned int irq)
 | |
| {
 | |
| 	radix_tree_delete(&irq_desc_tree, irq);
 | |
| }
 | |
| 
 | |
| #ifdef CONFIG_SMP
 | |
| static void free_masks(struct irq_desc *desc)
 | |
| {
 | |
| #ifdef CONFIG_GENERIC_PENDING_IRQ
 | |
| 	free_cpumask_var(desc->pending_mask);
 | |
| #endif
 | |
| 	free_cpumask_var(desc->affinity);
 | |
| }
 | |
| #else
 | |
| static inline void free_masks(struct irq_desc *desc) { }
 | |
| #endif
 | |
| 
 | |
| static struct irq_desc *alloc_desc(int irq, int node)
 | |
| {
 | |
| 	struct irq_desc *desc;
 | |
| 	gfp_t gfp = GFP_KERNEL;
 | |
| 
 | |
| 	desc = kzalloc_node(sizeof(*desc), gfp, node);
 | |
| 	if (!desc)
 | |
| 		return NULL;
 | |
| 	/* allocate based on nr_cpu_ids */
 | |
| 	desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs),
 | |
| 					 gfp, node);
 | |
| 	if (!desc->kstat_irqs)
 | |
| 		goto err_desc;
 | |
| 
 | |
| 	if (alloc_masks(desc, gfp, node))
 | |
| 		goto err_kstat;
 | |
| 
 | |
| 	raw_spin_lock_init(&desc->lock);
 | |
| 	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
 | |
| 
 | |
| 	desc_set_defaults(irq, desc, node);
 | |
| 
 | |
| 	return desc;
 | |
| 
 | |
| err_kstat:
 | |
| 	kfree(desc->kstat_irqs);
 | |
| err_desc:
 | |
| 	kfree(desc);
 | |
| 	return NULL;
 | |
| }
 | |
| 
 | |
| static void free_desc(unsigned int irq)
 | |
| {
 | |
| 	struct irq_desc *desc = irq_to_desc(irq);
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	unregister_irq_proc(irq, desc);
 | |
| 
 | |
| 	raw_spin_lock_irqsave(&sparse_irq_lock, flags);
 | |
| 	delete_irq_desc(irq);
 | |
| 	raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
 | |
| 
 | |
| 	free_masks(desc);
 | |
| 	kfree(desc->kstat_irqs);
 | |
| 	kfree(desc);
 | |
| }
 | |
| 
 | |
| static int alloc_descs(unsigned int start, unsigned int cnt, int node)
 | |
| {
 | |
| 	struct irq_desc *desc;
 | |
| 	unsigned long flags;
 | |
| 	int i;
 | |
| 
 | |
| 	for (i = 0; i < cnt; i++) {
 | |
| 		desc = alloc_desc(start + i, node);
 | |
| 		if (!desc)
 | |
| 			goto err;
 | |
| 		raw_spin_lock_irqsave(&sparse_irq_lock, flags);
 | |
| 		irq_insert_desc(start + i, desc);
 | |
| 		raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
 | |
| 	}
 | |
| 	return start;
 | |
| 
 | |
| err:
 | |
| 	for (i--; i >= 0; i--)
 | |
| 		free_desc(start + i);
 | |
| 
 | |
| 	raw_spin_lock_irqsave(&sparse_irq_lock, flags);
 | |
| 	bitmap_clear(allocated_irqs, start, cnt);
 | |
| 	raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
 | |
| 	return -ENOMEM;
 | |
| }
 | |
| 
 | |
| static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
 | |
| 	[0 ... NR_IRQS_LEGACY-1] = {
 | |
| 		.status		= IRQ_DEFAULT_INIT_FLAGS,
 | |
| 		.handle_irq	= handle_bad_irq,
 | |
| 		.depth		= 1,
 | |
| 		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
 | |
| 	}
 | |
| };
 | |
| 
 | |
| static unsigned int *kstat_irqs_legacy;
 | |
| 
 | |
| int __init early_irq_init(void)
 | |
| {
 | |
| 	struct irq_desc *desc;
 | |
| 	int legacy_count;
 | |
| 	int node;
 | |
| 	int i;
 | |
| 
 | |
| 	init_irq_default_affinity();
 | |
| 
 | |
| 	 /* initialize nr_irqs based on nr_cpu_ids */
 | |
| 	arch_probe_nr_irqs();
 | |
| 	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
 | |
| 
 | |
| 	desc = irq_desc_legacy;
 | |
| 	legacy_count = ARRAY_SIZE(irq_desc_legacy);
 | |
| 	node = first_online_node;
 | |
| 
 | |
| 	/* allocate based on nr_cpu_ids */
 | |
| 	kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
 | |
| 					  sizeof(int), GFP_NOWAIT, node);
 | |
| 
 | |
| 	irq_desc_init.irq_data.chip = &no_irq_chip;
 | |
| 
 | |
| 	for (i = 0; i < legacy_count; i++) {
 | |
| 		desc[i].irq_data.irq = i;
 | |
| 		desc[i].irq_data.chip = &no_irq_chip;
 | |
| #ifdef CONFIG_SMP
 | |
| 		desc[i].irq_data.node = node;
 | |
| #endif
 | |
| 		desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
 | |
| 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
 | |
| 		alloc_desc_masks(&desc[i], node, true);
 | |
| 		init_desc_masks(&desc[i]);
 | |
| 		irq_insert_desc(i, &desc[i]);
 | |
| 	}
 | |
| 
 | |
| 	return arch_early_irq_init();
 | |
| }
 | |
| 
 | |
| struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
 | |
| {
 | |
| 	struct irq_desc *desc;
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	if (irq >= nr_irqs) {
 | |
| 		WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
 | |
| 			irq, nr_irqs);
 | |
| 		return NULL;
 | |
| 	}
 | |
| 
 | |
| 	desc = irq_to_desc(irq);
 | |
| 	if (desc)
 | |
| 		return desc;
 | |
| 
 | |
| 	raw_spin_lock_irqsave(&sparse_irq_lock, flags);
 | |
| 
 | |
| 	/* We have to check it to avoid races with another CPU */
 | |
| 	desc = irq_to_desc(irq);
 | |
| 	if (desc)
 | |
| 		goto out_unlock;
 | |
| 
 | |
| 	desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
 | |
| 
 | |
| 	printk(KERN_DEBUG "  alloc irq_desc for %d on node %d\n", irq, node);
 | |
| 	if (!desc) {
 | |
| 		printk(KERN_ERR "can not alloc irq_desc\n");
 | |
| 		BUG_ON(1);
 | |
| 	}
 | |
| 	init_one_irq_desc(irq, desc, node);
 | |
| 
 | |
| 	irq_insert_desc(irq, desc);
 | |
| 
 | |
| out_unlock:
 | |
| 	raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
 | |
| 
 | |
| 	return desc;
 | |
| }
 | |
| 
 | |
| #else /* !CONFIG_SPARSE_IRQ */
 | |
| 
 | |
| struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
 | |
| 	[0 ... NR_IRQS-1] = {
 | |
| 		.status		= IRQ_DEFAULT_INIT_FLAGS,
 | |
| 		.handle_irq	= handle_bad_irq,
 | |
| 		.depth		= 1,
 | |
| 		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
 | |
| 	}
 | |
| };
 | |
| 
 | |
| static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
 | |
| int __init early_irq_init(void)
 | |
| {
 | |
| 	struct irq_desc *desc;
 | |
| 	int count;
 | |
| 	int i;
 | |
| 
 | |
| 	init_irq_default_affinity();
 | |
| 
 | |
| 	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
 | |
| 
 | |
| 	desc = irq_desc;
 | |
| 	count = ARRAY_SIZE(irq_desc);
 | |
| 
 | |
| 	for (i = 0; i < count; i++) {
 | |
| 		desc[i].irq_data.irq = i;
 | |
| 		desc[i].irq_data.chip = &no_irq_chip;
 | |
| 		alloc_desc_masks(&desc[i], 0, true);
 | |
| 		init_desc_masks(&desc[i]);
 | |
| 		desc[i].kstat_irqs = kstat_irqs_all[i];
 | |
| 		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
 | |
| 	}
 | |
| 	return arch_early_irq_init();
 | |
| }
 | |
| 
 | |
| struct irq_desc *irq_to_desc(unsigned int irq)
 | |
| {
 | |
| 	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
 | |
| }
 | |
| 
 | |
| struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
 | |
| {
 | |
| 	return irq_to_desc(irq);
 | |
| }
 | |
| 
 | |
| #ifdef CONFIG_SMP
 | |
| static inline int desc_node(struct irq_desc *desc)
 | |
| {
 | |
| 	return desc->irq_data.node;
 | |
| }
 | |
| #else
 | |
| static inline int desc_node(struct irq_desc *desc) { return 0; }
 | |
| #endif
 | |
| 
 | |
| static void free_desc(unsigned int irq)
 | |
| {
 | |
| 	struct irq_desc *desc = irq_to_desc(irq);
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	raw_spin_lock_irqsave(&desc->lock, flags);
 | |
| 	desc_set_defaults(irq, desc, desc_node(desc));
 | |
| 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 | |
| }
 | |
| 
 | |
| static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
 | |
| {
 | |
| 	return start;
 | |
| }
 | |
| #endif /* !CONFIG_SPARSE_IRQ */
 | |
| 
 | |
| /* Dynamic interrupt handling */
 | |
| 
 | |
| /**
 | |
|  * irq_free_descs - free irq descriptors
 | |
|  * @from:	Start of descriptor range
 | |
|  * @cnt:	Number of consecutive irqs to free
 | |
|  */
 | |
| void irq_free_descs(unsigned int from, unsigned int cnt)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	int i;
 | |
| 
 | |
| 	if (from >= nr_irqs || (from + cnt) > nr_irqs)
 | |
| 		return;
 | |
| 
 | |
| 	for (i = 0; i < cnt; i++)
 | |
| 		free_desc(from + i);
 | |
| 
 | |
| 	raw_spin_lock_irqsave(&sparse_irq_lock, flags);
 | |
| 	bitmap_clear(allocated_irqs, from, cnt);
 | |
| 	raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * irq_alloc_descs - allocate and initialize a range of irq descriptors
 | |
|  * @irq:	Allocate for specific irq number if irq >= 0
 | |
|  * @from:	Start the search from this irq number
 | |
|  * @cnt:	Number of consecutive irqs to allocate.
 | |
|  * @node:	Preferred node on which the irq descriptor should be allocated
 | |
|  *
 | |
|  * Returns the first irq number or error code
 | |
|  */
 | |
| int __ref
 | |
| irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 	int start, ret;
 | |
| 
 | |
| 	if (!cnt)
 | |
| 		return -EINVAL;
 | |
| 
 | |
| 	raw_spin_lock_irqsave(&sparse_irq_lock, flags);
 | |
| 
 | |
| 	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
 | |
| 	ret = -EEXIST;
 | |
| 	if (irq >=0 && start != irq)
 | |
| 		goto err;
 | |
| 
 | |
| 	ret = -ENOMEM;
 | |
| 	if (start >= nr_irqs)
 | |
| 		goto err;
 | |
| 
 | |
| 	bitmap_set(allocated_irqs, start, cnt);
 | |
| 	raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
 | |
| 	return alloc_descs(start, cnt, node);
 | |
| 
 | |
| err:
 | |
| 	raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
 | |
| 	return ret;
 | |
| }
 | |
| 
 | |
| /**
 | |
|  * irq_get_next_irq - get next allocated irq number
 | |
|  * @offset:	where to start the search
 | |
|  *
 | |
|  * Returns next irq number after offset or nr_irqs if none is found.
 | |
|  */
 | |
| unsigned int irq_get_next_irq(unsigned int offset)
 | |
| {
 | |
| 	return find_next_bit(allocated_irqs, nr_irqs, offset);
 | |
| }
 | |
| 
 | |
| /* Statistics access */
 | |
| void clear_kstat_irqs(struct irq_desc *desc)
 | |
| {
 | |
| 	memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
 | |
| }
 | |
| 
 | |
| unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
 | |
| {
 | |
| 	struct irq_desc *desc = irq_to_desc(irq);
 | |
| 	return desc ? desc->kstat_irqs[cpu] : 0;
 | |
| }
 |