mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-30 13:03:01 +00:00

AMD SME added __sme_set/__sme_clr primitives to modify the DMA address for
encrypted/decrypted traffic. However this doesn't fit in with other models,
e.g., Arm CCA where the meanings are the opposite. i.e., "decrypted" traffic
has a bit set and "encrypted" traffic has the top bit cleared.
In preparation for adding the support for Arm CCA DMA conversions, convert the
existing primitives to more generic ones that can be provided by the backends.
i.e., add helpers to
1. dma_addr_encrypted - Convert a DMA address to "encrypted" [ == __sme_set() ]
2. dma_addr_unencrypted - Convert a DMA address to "decrypted" [ None exists today ]
3. dma_addr_canonical - Clear any "encryption"/"decryption" bits from DMA
address [ SME uses __sme_clr() ] and convert to a canonical DMA address.
Since the original __sme_xxx helpers come from linux/mem_encrypt.h, use that
as the home for the new definitions and provide dummy ones when none is provided
by the architectures.
With the above, phys_to_dma_unencrypted() uses the newly added dma_addr_unencrypted()
helper and to make it a bit more easier to read and avoid double conversion,
provide __phys_to_dma().
Suggested-by: Robin Murphy <robin.murphy@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
Fixes: 42be24a417
("arm64: Enable memory encrypt for Realms")
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20250227144150.1667735-3-suzuki.poulose@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
156 lines
4.2 KiB
C
156 lines
4.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Internals of the DMA direct mapping implementation. Only for use by the
|
|
* DMA mapping code and IOMMU drivers.
|
|
*/
|
|
#ifndef _LINUX_DMA_DIRECT_H
|
|
#define _LINUX_DMA_DIRECT_H 1
|
|
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/dma-map-ops.h>
|
|
#include <linux/memblock.h> /* for min_low_pfn */
|
|
#include <linux/mem_encrypt.h>
|
|
#include <linux/swiotlb.h>
|
|
|
|
extern u64 zone_dma_limit;
|
|
|
|
/*
|
|
* Record the mapping of CPU physical to DMA addresses for a given region.
|
|
*/
|
|
struct bus_dma_region {
|
|
phys_addr_t cpu_start;
|
|
dma_addr_t dma_start;
|
|
u64 size;
|
|
};
|
|
|
|
static inline dma_addr_t translate_phys_to_dma(struct device *dev,
|
|
phys_addr_t paddr)
|
|
{
|
|
const struct bus_dma_region *m;
|
|
|
|
for (m = dev->dma_range_map; m->size; m++) {
|
|
u64 offset = paddr - m->cpu_start;
|
|
|
|
if (paddr >= m->cpu_start && offset < m->size)
|
|
return m->dma_start + offset;
|
|
}
|
|
|
|
/* make sure dma_capable fails when no translation is available */
|
|
return DMA_MAPPING_ERROR;
|
|
}
|
|
|
|
static inline phys_addr_t translate_dma_to_phys(struct device *dev,
|
|
dma_addr_t dma_addr)
|
|
{
|
|
const struct bus_dma_region *m;
|
|
|
|
for (m = dev->dma_range_map; m->size; m++) {
|
|
u64 offset = dma_addr - m->dma_start;
|
|
|
|
if (dma_addr >= m->dma_start && offset < m->size)
|
|
return m->cpu_start + offset;
|
|
}
|
|
|
|
return (phys_addr_t)-1;
|
|
}
|
|
|
|
static inline dma_addr_t dma_range_map_min(const struct bus_dma_region *map)
|
|
{
|
|
dma_addr_t ret = (dma_addr_t)U64_MAX;
|
|
|
|
for (; map->size; map++)
|
|
ret = min(ret, map->dma_start);
|
|
return ret;
|
|
}
|
|
|
|
static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map)
|
|
{
|
|
dma_addr_t ret = 0;
|
|
|
|
for (; map->size; map++)
|
|
ret = max(ret, map->dma_start + map->size - 1);
|
|
return ret;
|
|
}
|
|
|
|
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
|
|
#include <asm/dma-direct.h>
|
|
#ifndef phys_to_dma_unencrypted
|
|
#define phys_to_dma_unencrypted phys_to_dma
|
|
#endif
|
|
#else
|
|
static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
{
|
|
if (dev->dma_range_map)
|
|
return translate_phys_to_dma(dev, paddr);
|
|
return paddr;
|
|
}
|
|
|
|
static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
|
|
phys_addr_t paddr)
|
|
{
|
|
return dma_addr_unencrypted(__phys_to_dma(dev, paddr));
|
|
}
|
|
/*
|
|
* If memory encryption is supported, phys_to_dma will set the memory encryption
|
|
* bit in the DMA address, and dma_to_phys will clear it.
|
|
* phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb
|
|
* buffers.
|
|
*/
|
|
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
{
|
|
return dma_addr_encrypted(__phys_to_dma(dev, paddr));
|
|
}
|
|
|
|
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
|
{
|
|
phys_addr_t paddr;
|
|
|
|
dma_addr = dma_addr_canonical(dma_addr);
|
|
if (dev->dma_range_map)
|
|
paddr = translate_dma_to_phys(dev, dma_addr);
|
|
else
|
|
paddr = dma_addr;
|
|
|
|
return paddr;
|
|
}
|
|
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
|
|
|
|
#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
|
bool force_dma_unencrypted(struct device *dev);
|
|
#else
|
|
static inline bool force_dma_unencrypted(struct device *dev)
|
|
{
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
|
|
|
|
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
|
|
bool is_ram)
|
|
{
|
|
dma_addr_t end = addr + size - 1;
|
|
|
|
if (addr == DMA_MAPPING_ERROR)
|
|
return false;
|
|
if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
|
|
min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
|
|
return false;
|
|
|
|
return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
|
|
}
|
|
|
|
u64 dma_direct_get_required_mask(struct device *dev);
|
|
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|
gfp_t gfp, unsigned long attrs);
|
|
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
|
|
dma_addr_t dma_addr, unsigned long attrs);
|
|
struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
|
|
void dma_direct_free_pages(struct device *dev, size_t size,
|
|
struct page *page, dma_addr_t dma_addr,
|
|
enum dma_data_direction dir);
|
|
int dma_direct_supported(struct device *dev, u64 mask);
|
|
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
|
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
|
|
|
#endif /* _LINUX_DMA_DIRECT_H */
|