mirror of
				https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
				synced 2025-10-31 20:06:46 +00:00 
			
		
		
		
	 38388301b7
			
		
	
	
		38388301b7
		
	
	
	
	
		
			
			swiotlb_sync_single_range_for_cpu and swiotlb_sync_single_range_for_device are unnecessary because swiotlb_sync_single_for_cpu and swiotlb_sync_single_for_device can be used instead. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			90 lines
		
	
	
		
			2.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			90 lines
		
	
	
		
			2.5 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef __LINUX_SWIOTLB_H
 | |
| #define __LINUX_SWIOTLB_H
 | |
| 
 | |
| #include <linux/types.h>
 | |
| 
 | |
| struct device;
 | |
| struct dma_attrs;
 | |
| struct scatterlist;
 | |
| 
 | |
| extern int swiotlb_force;
 | |
| 
 | |
| /*
 | |
|  * Maximum allowable number of contiguous slabs to map,
 | |
|  * must be a power of 2.  What is the appropriate value ?
 | |
|  * The complexity of {map,unmap}_single is linearly dependent on this value.
 | |
|  */
 | |
| #define IO_TLB_SEGSIZE	128
 | |
| 
 | |
| /*
 | |
|  * log of the size of each IO TLB slab.  The number of slabs is command line
 | |
|  * controllable.
 | |
|  */
 | |
| #define IO_TLB_SHIFT 11
 | |
| 
 | |
| extern void swiotlb_init(int verbose);
 | |
| 
 | |
| extern void
 | |
| *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 | |
| 			dma_addr_t *dma_handle, gfp_t flags);
 | |
| 
 | |
| extern void
 | |
| swiotlb_free_coherent(struct device *hwdev, size_t size,
 | |
| 		      void *vaddr, dma_addr_t dma_handle);
 | |
| 
 | |
| extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
 | |
| 				   unsigned long offset, size_t size,
 | |
| 				   enum dma_data_direction dir,
 | |
| 				   struct dma_attrs *attrs);
 | |
| extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
 | |
| 			       size_t size, enum dma_data_direction dir,
 | |
| 			       struct dma_attrs *attrs);
 | |
| 
 | |
| extern int
 | |
| swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
 | |
| 	       int direction);
 | |
| 
 | |
| extern void
 | |
| swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
 | |
| 		 int direction);
 | |
| 
 | |
| extern int
 | |
| swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
 | |
| 		     enum dma_data_direction dir, struct dma_attrs *attrs);
 | |
| 
 | |
| extern void
 | |
| swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 | |
| 		       int nelems, enum dma_data_direction dir,
 | |
| 		       struct dma_attrs *attrs);
 | |
| 
 | |
| extern void
 | |
| swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
 | |
| 			    size_t size, enum dma_data_direction dir);
 | |
| 
 | |
| extern void
 | |
| swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
 | |
| 			int nelems, enum dma_data_direction dir);
 | |
| 
 | |
| extern void
 | |
| swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
 | |
| 			       size_t size, enum dma_data_direction dir);
 | |
| 
 | |
| extern void
 | |
| swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 | |
| 			   int nelems, enum dma_data_direction dir);
 | |
| 
 | |
| extern int
 | |
| swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
 | |
| 
 | |
| extern int
 | |
| swiotlb_dma_supported(struct device *hwdev, u64 mask);
 | |
| 
 | |
| #ifdef CONFIG_SWIOTLB
 | |
| extern void __init swiotlb_free(void);
 | |
| #else
 | |
| static inline void swiotlb_free(void) { }
 | |
| #endif
 | |
| 
 | |
| extern void swiotlb_print_info(void);
 | |
| #endif /* __LINUX_SWIOTLB_H */
 |