mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-31 18:36:37 +00:00 
			
		
		
		
	 3715863aa1
			
		
	
	
		3715863aa1
		
	
	
	
	
		
			
			iommu_is_span_boundary is used internally in the IOMMU helper (lib/iommu-helper.c), a primitive function that judges whether a memory area spans LLD's segment boundary or not. It's difficult to convert some IOMMUs to use the IOMMU helper but iommu_is_span_boundary is still useful for them. So this patch exports it. This is needed for the parisc iommu fixes. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Kyle McMartin <kyle@parisc-linux.org> Cc: Matthew Wilcox <matthew@wil.cx> Cc: Grant Grundler <grundler@parisc-linux.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			83 lines
		
	
	
		
			1.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			83 lines
		
	
	
		
			1.8 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * IOMMU helper functions for the free area management
 | |
|  */
 | |
| 
 | |
| #include <linux/module.h>
 | |
| #include <linux/bitops.h>
 | |
| 
 | |
| static unsigned long find_next_zero_area(unsigned long *map,
 | |
| 					 unsigned long size,
 | |
| 					 unsigned long start,
 | |
| 					 unsigned int nr,
 | |
| 					 unsigned long align_mask)
 | |
| {
 | |
| 	unsigned long index, end, i;
 | |
| again:
 | |
| 	index = find_next_zero_bit(map, size, start);
 | |
| 
 | |
| 	/* Align allocation */
 | |
| 	index = (index + align_mask) & ~align_mask;
 | |
| 
 | |
| 	end = index + nr;
 | |
| 	if (end >= size)
 | |
| 		return -1;
 | |
| 	for (i = index; i < end; i++) {
 | |
| 		if (test_bit(i, map)) {
 | |
| 			start = i+1;
 | |
| 			goto again;
 | |
| 		}
 | |
| 	}
 | |
| 	return index;
 | |
| }
 | |
| 
 | |
| static inline void set_bit_area(unsigned long *map, unsigned long i,
 | |
| 				int len)
 | |
| {
 | |
| 	unsigned long end = i + len;
 | |
| 	while (i < end) {
 | |
| 		__set_bit(i, map);
 | |
| 		i++;
 | |
| 	}
 | |
| }
 | |
| 
 | |
| int iommu_is_span_boundary(unsigned int index, unsigned int nr,
 | |
| 			   unsigned long shift,
 | |
| 			   unsigned long boundary_size)
 | |
| {
 | |
| 	BUG_ON(!is_power_of_2(boundary_size));
 | |
| 
 | |
| 	shift = (shift + index) & (boundary_size - 1);
 | |
| 	return shift + nr > boundary_size;
 | |
| }
 | |
| 
 | |
| unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
 | |
| 			       unsigned long start, unsigned int nr,
 | |
| 			       unsigned long shift, unsigned long boundary_size,
 | |
| 			       unsigned long align_mask)
 | |
| {
 | |
| 	unsigned long index;
 | |
| again:
 | |
| 	index = find_next_zero_area(map, size, start, nr, align_mask);
 | |
| 	if (index != -1) {
 | |
| 		if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
 | |
| 			/* we could do more effectively */
 | |
| 			start = index + 1;
 | |
| 			goto again;
 | |
| 		}
 | |
| 		set_bit_area(map, index, nr);
 | |
| 	}
 | |
| 	return index;
 | |
| }
 | |
| EXPORT_SYMBOL(iommu_area_alloc);
 | |
| 
 | |
| void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
 | |
| {
 | |
| 	unsigned long end = start + nr;
 | |
| 
 | |
| 	while (start < end) {
 | |
| 		__clear_bit(start, map);
 | |
| 		start++;
 | |
| 	}
 | |
| }
 | |
| EXPORT_SYMBOL(iommu_area_free);
 |