mirror of
https://github.com/torvalds/linux.git
synced 2024-12-30 14:52:05 +00:00
dma-mapping: add dma_opt_mapping_size()
Streaming DMA mapping involving an IOMMU may be much slower for larger total mapping size. This is because every IOMMU DMA mapping requires an IOVA to be allocated and freed. IOVA sizes above a certain limit are not cached, which can have a big impact on DMA mapping performance. Provide an API for device drivers to know this "optimal" limit, such that they may try to produce mapping which don't exceed it. Signed-off-by: John Garry <john.garry@huawei.com> Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Acked-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
942a8186eb
commit
a229cc14f3
@ -204,6 +204,20 @@ Returns the maximum size of a mapping for the device. The size parameter
|
|||||||
of the mapping functions like dma_map_single(), dma_map_page() and
|
of the mapping functions like dma_map_single(), dma_map_page() and
|
||||||
others should not be larger than the returned value.
|
others should not be larger than the returned value.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
size_t
|
||||||
|
dma_opt_mapping_size(struct device *dev);
|
||||||
|
|
||||||
|
Returns the maximum optimal size of a mapping for the device.
|
||||||
|
|
||||||
|
Mapping larger buffers may take much longer in certain scenarios. In
|
||||||
|
addition, for high-rate short-lived streaming mappings, the upfront time
|
||||||
|
spent on the mapping may account for an appreciable part of the total
|
||||||
|
request lifetime. As such, if splitting larger requests incurs no
|
||||||
|
significant performance penalty, then device drivers are advised to
|
||||||
|
limit total DMA streaming mappings length to the returned value.
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
bool
|
bool
|
||||||
|
@ -69,6 +69,7 @@ struct dma_map_ops {
|
|||||||
int (*dma_supported)(struct device *dev, u64 mask);
|
int (*dma_supported)(struct device *dev, u64 mask);
|
||||||
u64 (*get_required_mask)(struct device *dev);
|
u64 (*get_required_mask)(struct device *dev);
|
||||||
size_t (*max_mapping_size)(struct device *dev);
|
size_t (*max_mapping_size)(struct device *dev);
|
||||||
|
size_t (*opt_mapping_size)(void);
|
||||||
unsigned long (*get_merge_boundary)(struct device *dev);
|
unsigned long (*get_merge_boundary)(struct device *dev);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -144,6 +144,7 @@ int dma_set_mask(struct device *dev, u64 mask);
|
|||||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||||
u64 dma_get_required_mask(struct device *dev);
|
u64 dma_get_required_mask(struct device *dev);
|
||||||
size_t dma_max_mapping_size(struct device *dev);
|
size_t dma_max_mapping_size(struct device *dev);
|
||||||
|
size_t dma_opt_mapping_size(struct device *dev);
|
||||||
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
|
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
|
||||||
unsigned long dma_get_merge_boundary(struct device *dev);
|
unsigned long dma_get_merge_boundary(struct device *dev);
|
||||||
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
|
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
|
||||||
@ -266,6 +267,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
|
|||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
static inline size_t dma_opt_mapping_size(struct device *dev)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
|
@ -773,6 +773,18 @@ size_t dma_max_mapping_size(struct device *dev)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dma_max_mapping_size);
|
EXPORT_SYMBOL_GPL(dma_max_mapping_size);
|
||||||
|
|
||||||
|
size_t dma_opt_mapping_size(struct device *dev)
|
||||||
|
{
|
||||||
|
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||||
|
size_t size = SIZE_MAX;
|
||||||
|
|
||||||
|
if (ops && ops->opt_mapping_size)
|
||||||
|
size = ops->opt_mapping_size();
|
||||||
|
|
||||||
|
return min(dma_max_mapping_size(dev), size);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
|
||||||
|
|
||||||
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
||||||
{
|
{
|
||||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||||
|
Loading…
Reference in New Issue
Block a user