dma-mapping: convert dma_direct_*map_page to be phys_addr_t based

Convert the DMA direct mapping functions to accept physical addresses
directly instead of page+offset parameters. The functions were already
operating on physical addresses internally, so this change eliminates
the redundant page-to-physical conversion at the API boundary.

The functions dma_direct_map_page() and dma_direct_unmap_page() are
renamed to dma_direct_map_phys() and dma_direct_unmap_phys() respectively,
with their calling convention changed from (struct page *page,
unsigned long offset) to (phys_addr_t phys).

Architecture-specific functions arch_dma_map_page_direct() and
arch_dma_unmap_page_direct() are similarly renamed to
arch_dma_map_phys_direct() and arch_dma_unmap_phys_direct().

The is_pci_p2pdma_page() checks are replaced with DMA_ATTR_MMIO checks
to allow integration with dma_direct_map_resource and dma_direct_map_phys()
is extended to support MMIO path either.

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/bb15a22f76dc2e26683333ff54e789606cfbfcf0.1757423202.git.leonro@nvidia.com
This commit is contained in:
Leon Romanovsky
2025-09-09 16:27:35 +03:00
committed by Marek Szyprowski
parent f9374de14c
commit e53d29f957
5 changed files with 48 additions and 33 deletions

View File

@@ -14,7 +14,7 @@
#define can_map_direct(dev, addr) \ #define can_map_direct(dev, addr) \
((dev)->bus_dma_limit >= phys_to_dma((dev), (addr))) ((dev)->bus_dma_limit >= phys_to_dma((dev), (addr)))
bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr) bool arch_dma_map_phys_direct(struct device *dev, phys_addr_t addr)
{ {
if (likely(!dev->bus_dma_limit)) if (likely(!dev->bus_dma_limit))
return false; return false;
@@ -24,7 +24,7 @@ bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr)
#define is_direct_handle(dev, h) ((h) >= (dev)->archdata.dma_offset) #define is_direct_handle(dev, h) ((h) >= (dev)->archdata.dma_offset)
bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle) bool arch_dma_unmap_phys_direct(struct device *dev, dma_addr_t dma_handle)
{ {
if (likely(!dev->bus_dma_limit)) if (likely(!dev->bus_dma_limit))
return false; return false;

View File

@@ -395,15 +395,15 @@ void *arch_dma_set_uncached(void *addr, size_t size);
void arch_dma_clear_uncached(void *addr, size_t size); void arch_dma_clear_uncached(void *addr, size_t size);
#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr); bool arch_dma_map_phys_direct(struct device *dev, phys_addr_t addr);
bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle); bool arch_dma_unmap_phys_direct(struct device *dev, dma_addr_t dma_handle);
bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg, bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
int nents); int nents);
bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
int nents); int nents);
#else #else
#define arch_dma_map_page_direct(d, a) (false) #define arch_dma_map_phys_direct(d, a) (false)
#define arch_dma_unmap_page_direct(d, a) (false) #define arch_dma_unmap_phys_direct(d, a) (false)
#define arch_dma_map_sg_direct(d, s, n) (false) #define arch_dma_map_sg_direct(d, s, n) (false)
#define arch_dma_unmap_sg_direct(d, s, n) (false) #define arch_dma_unmap_sg_direct(d, s, n) (false)
#endif #endif

View File

@@ -448,7 +448,7 @@ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
if (sg_dma_is_bus_address(sg)) if (sg_dma_is_bus_address(sg))
sg_dma_unmark_bus_address(sg); sg_dma_unmark_bus_address(sg);
else else
dma_direct_unmap_page(dev, sg->dma_address, dma_direct_unmap_phys(dev, sg->dma_address,
sg_dma_len(sg), dir, attrs); sg_dma_len(sg), dir, attrs);
} }
} }
@@ -471,8 +471,8 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
*/ */
break; break;
case PCI_P2PDMA_MAP_NONE: case PCI_P2PDMA_MAP_NONE:
sg->dma_address = dma_direct_map_page(dev, sg_page(sg), sg->dma_address = dma_direct_map_phys(dev, sg_phys(sg),
sg->offset, sg->length, dir, attrs); sg->length, dir, attrs);
if (sg->dma_address == DMA_MAPPING_ERROR) { if (sg->dma_address == DMA_MAPPING_ERROR) {
ret = -EIO; ret = -EIO;
goto out_unmap; goto out_unmap;

View File

@@ -80,42 +80,57 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev,
arch_dma_mark_clean(paddr, size); arch_dma_mark_clean(paddr, size);
} }
static inline dma_addr_t dma_direct_map_page(struct device *dev, static inline dma_addr_t dma_direct_map_phys(struct device *dev,
struct page *page, unsigned long offset, size_t size, phys_addr_t phys, size_t size, enum dma_data_direction dir,
enum dma_data_direction dir, unsigned long attrs) unsigned long attrs)
{ {
phys_addr_t phys = page_to_phys(page) + offset; dma_addr_t dma_addr;
dma_addr_t dma_addr = phys_to_dma(dev, phys);
if (is_swiotlb_force_bounce(dev)) { if (is_swiotlb_force_bounce(dev)) {
if (is_pci_p2pdma_page(page)) if (attrs & DMA_ATTR_MMIO)
return DMA_MAPPING_ERROR; goto err_overflow;
return swiotlb_map(dev, phys, size, dir, attrs); return swiotlb_map(dev, phys, size, dir, attrs);
} }
if (attrs & DMA_ATTR_MMIO) {
dma_addr = phys;
if (unlikely(!dma_capable(dev, dma_addr, size, false)))
goto err_overflow;
} else {
dma_addr = phys_to_dma(dev, phys);
if (unlikely(!dma_capable(dev, dma_addr, size, true)) || if (unlikely(!dma_capable(dev, dma_addr, size, true)) ||
dma_kmalloc_needs_bounce(dev, size, dir)) { dma_kmalloc_needs_bounce(dev, size, dir)) {
if (is_pci_p2pdma_page(page))
return DMA_MAPPING_ERROR;
if (is_swiotlb_active(dev)) if (is_swiotlb_active(dev))
return swiotlb_map(dev, phys, size, dir, attrs); return swiotlb_map(dev, phys, size, dir, attrs);
dev_WARN_ONCE(dev, 1, goto err_overflow;
}
}
if (!dev_is_dma_coherent(dev) &&
!(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
arch_sync_dma_for_device(phys, size, dir);
return dma_addr;
err_overflow:
dev_WARN_ONCE(
dev, 1,
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
} }
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) static inline void dma_direct_unmap_phys(struct device *dev, dma_addr_t addr,
arch_sync_dma_for_device(phys, size, dir);
return dma_addr;
}
static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
{ {
phys_addr_t phys = dma_to_phys(dev, addr); phys_addr_t phys;
if (attrs & DMA_ATTR_MMIO)
/* nothing to do: uncached and no swiotlb */
return;
phys = dma_to_phys(dev, addr);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_direct_sync_single_for_cpu(dev, addr, size, dir); dma_direct_sync_single_for_cpu(dev, addr, size, dir);

View File

@@ -166,8 +166,8 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
if (dma_map_direct(dev, ops) || if (dma_map_direct(dev, ops) ||
arch_dma_map_page_direct(dev, phys + size)) arch_dma_map_phys_direct(dev, phys + size))
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); addr = dma_direct_map_phys(dev, phys, size, dir, attrs);
else if (use_dma_iommu(dev)) else if (use_dma_iommu(dev))
addr = iommu_dma_map_phys(dev, phys, size, dir, attrs); addr = iommu_dma_map_phys(dev, phys, size, dir, attrs);
else else
@@ -187,8 +187,8 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops) || if (dma_map_direct(dev, ops) ||
arch_dma_unmap_page_direct(dev, addr + size)) arch_dma_unmap_phys_direct(dev, addr + size))
dma_direct_unmap_page(dev, addr, size, dir, attrs); dma_direct_unmap_phys(dev, addr, size, dir, attrs);
else if (use_dma_iommu(dev)) else if (use_dma_iommu(dev))
iommu_dma_unmap_phys(dev, addr, size, dir, attrs); iommu_dma_unmap_phys(dev, addr, size, dir, attrs);
else else