mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 23:51:39 +00:00
iommu/iova: Reorganise some code
The iova_cache_{get,put}() calls really represent top-level lifecycle management for the whole IOVA library, so it's long been rather confusing to have them buried right in the middle of the allocator implementation details. Move them to a more expected position at the end of the file, where it will then also be easier to expand them. With this, we can also move the rcache hotplug handler (plus another stray function) into the rcache portion of the file. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Acked-by: David Rientjes <rientjes@google.com> Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com> Reviewed-by: John Garry <john.g.garry@oracle.com> Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com> Link: https://lore.kernel.org/r/d4753562f4faa0e6b3aeebcbf88fdb60cc22d715.1707144953.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
e7b3533c81
commit
7f845d8b2e
@ -24,24 +24,8 @@ static bool iova_rcache_insert(struct iova_domain *iovad,
|
||||
static unsigned long iova_rcache_get(struct iova_domain *iovad,
|
||||
unsigned long size,
|
||||
unsigned long limit_pfn);
|
||||
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
|
||||
static void free_iova_rcaches(struct iova_domain *iovad);
|
||||
|
||||
unsigned long iova_rcache_range(void)
|
||||
{
|
||||
return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
|
||||
}
|
||||
|
||||
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct iova_domain *iovad;
|
||||
|
||||
iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
|
||||
|
||||
free_cpu_cached_iovas(cpu, iovad);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
|
||||
static void free_global_cached_iovas(struct iova_domain *iovad);
|
||||
|
||||
static struct iova *to_iova(struct rb_node *node)
|
||||
@ -252,53 +236,6 @@ static void free_iova_mem(struct iova *iova)
|
||||
kmem_cache_free(iova_cache, iova);
|
||||
}
|
||||
|
||||
int iova_cache_get(void)
|
||||
{
|
||||
int err = -ENOMEM;
|
||||
|
||||
mutex_lock(&iova_cache_mutex);
|
||||
if (!iova_cache_users) {
|
||||
iova_cache = kmem_cache_create("iommu_iova", sizeof(struct iova), 0,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!iova_cache)
|
||||
goto out_err;
|
||||
|
||||
err = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead",
|
||||
NULL, iova_cpuhp_dead);
|
||||
if (err) {
|
||||
pr_err("IOVA: Couldn't register cpuhp handler: %pe\n", ERR_PTR(err));
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
iova_cache_users++;
|
||||
mutex_unlock(&iova_cache_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
kmem_cache_destroy(iova_cache);
|
||||
mutex_unlock(&iova_cache_mutex);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iova_cache_get);
|
||||
|
||||
void iova_cache_put(void)
|
||||
{
|
||||
mutex_lock(&iova_cache_mutex);
|
||||
if (WARN_ON(!iova_cache_users)) {
|
||||
mutex_unlock(&iova_cache_mutex);
|
||||
return;
|
||||
}
|
||||
iova_cache_users--;
|
||||
if (!iova_cache_users) {
|
||||
cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
|
||||
kmem_cache_destroy(iova_cache);
|
||||
}
|
||||
mutex_unlock(&iova_cache_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iova_cache_put);
|
||||
|
||||
/**
|
||||
* alloc_iova - allocates an iova
|
||||
* @iovad: - iova domain in question
|
||||
@ -653,6 +590,11 @@ struct iova_rcache {
|
||||
struct delayed_work work;
|
||||
};
|
||||
|
||||
unsigned long iova_rcache_range(void)
|
||||
{
|
||||
return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
|
||||
}
|
||||
|
||||
static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
|
||||
{
|
||||
struct iova_magazine *mag;
|
||||
@ -989,5 +931,63 @@ static void free_global_cached_iovas(struct iova_domain *iovad)
|
||||
spin_unlock_irqrestore(&rcache->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
|
||||
{
|
||||
struct iova_domain *iovad;
|
||||
|
||||
iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
|
||||
|
||||
free_cpu_cached_iovas(cpu, iovad);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iova_cache_get(void)
|
||||
{
|
||||
int err = -ENOMEM;
|
||||
|
||||
mutex_lock(&iova_cache_mutex);
|
||||
if (!iova_cache_users) {
|
||||
iova_cache = kmem_cache_create("iommu_iova", sizeof(struct iova), 0,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!iova_cache)
|
||||
goto out_err;
|
||||
|
||||
err = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead",
|
||||
NULL, iova_cpuhp_dead);
|
||||
if (err) {
|
||||
pr_err("IOVA: Couldn't register cpuhp handler: %pe\n", ERR_PTR(err));
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
iova_cache_users++;
|
||||
mutex_unlock(&iova_cache_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
kmem_cache_destroy(iova_cache);
|
||||
mutex_unlock(&iova_cache_mutex);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iova_cache_get);
|
||||
|
||||
void iova_cache_put(void)
|
||||
{
|
||||
mutex_lock(&iova_cache_mutex);
|
||||
if (WARN_ON(!iova_cache_users)) {
|
||||
mutex_unlock(&iova_cache_mutex);
|
||||
return;
|
||||
}
|
||||
iova_cache_users--;
|
||||
if (!iova_cache_users) {
|
||||
cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
|
||||
kmem_cache_destroy(iova_cache);
|
||||
}
|
||||
mutex_unlock(&iova_cache_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iova_cache_put);
|
||||
|
||||
MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
Loading…
Reference in New Issue
Block a user