mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
iommufd: Add additional invariant assertions
These are on performance paths so we protect them using the CONFIG_IOMMUFD_TEST to not take a hit during normal operation. These are useful when running the test suite and syzkaller to find data structure inconsistencies early. Link: https://lore.kernel.org/r/18-v6-a196d26f289e+11787-iommufd_jgg@nvidia.com Tested-by: Yi Liu <yi.l.liu@intel.com> Tested-by: Matthew Rosato <mjrosato@linux.ibm.com> # s390 Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
e26eed4f62
commit
52f528583b
@ -625,6 +625,11 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
|
||||
struct iopt_area *area;
|
||||
int rc;
|
||||
|
||||
/* Driver's ops don't support pin_pages */
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
|
||||
WARN_ON(access->iova_alignment != PAGE_SIZE || !access->ops->unmap))
|
||||
return -EINVAL;
|
||||
|
||||
if (!length)
|
||||
return -EINVAL;
|
||||
if (check_add_overflow(iova, length - 1, &last_iova))
|
||||
|
@ -251,6 +251,11 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt,
|
||||
(uintptr_t)elm->pages->uptr + elm->start_byte, length);
|
||||
if (rc)
|
||||
goto out_unlock;
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
|
||||
WARN_ON(iopt_check_iova(iopt, *dst_iova, length))) {
|
||||
rc = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
} else {
|
||||
rc = iopt_check_iova(iopt, *dst_iova, length);
|
||||
if (rc)
|
||||
@ -277,6 +282,8 @@ out_unlock:
|
||||
|
||||
static void iopt_abort_area(struct iopt_area *area)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
|
||||
WARN_ON(area->pages);
|
||||
if (area->iopt) {
|
||||
down_write(&area->iopt->iova_rwsem);
|
||||
interval_tree_remove(&area->node, &area->iopt->area_itree);
|
||||
@ -642,6 +649,9 @@ void iopt_destroy_table(struct io_pagetable *iopt)
|
||||
{
|
||||
struct interval_tree_node *node;
|
||||
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
|
||||
iopt_remove_reserved_iova(iopt, NULL);
|
||||
|
||||
while ((node = interval_tree_iter_first(&iopt->allowed_itree, 0,
|
||||
ULONG_MAX))) {
|
||||
interval_tree_remove(node, &iopt->allowed_itree);
|
||||
@ -688,6 +698,8 @@ static void iopt_unfill_domain(struct io_pagetable *iopt,
|
||||
continue;
|
||||
|
||||
mutex_lock(&pages->mutex);
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
|
||||
WARN_ON(!area->storage_domain);
|
||||
if (area->storage_domain == domain)
|
||||
area->storage_domain = storage_domain;
|
||||
mutex_unlock(&pages->mutex);
|
||||
@ -792,6 +804,16 @@ static int iopt_check_iova_alignment(struct io_pagetable *iopt,
|
||||
(iopt_area_length(area) & align_mask) ||
|
||||
(area->page_offset & align_mask))
|
||||
return -EADDRINUSE;
|
||||
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) {
|
||||
struct iommufd_access *access;
|
||||
unsigned long index;
|
||||
|
||||
xa_for_each(&iopt->access_list, index, access)
|
||||
if (WARN_ON(access->iova_alignment >
|
||||
new_iova_alignment))
|
||||
return -EADDRINUSE;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -101,6 +101,9 @@ static inline size_t iopt_area_length(struct iopt_area *area)
|
||||
static inline unsigned long iopt_area_start_byte(struct iopt_area *area,
|
||||
unsigned long iova)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
|
||||
WARN_ON(iova < iopt_area_iova(area) ||
|
||||
iova > iopt_area_last_iova(area));
|
||||
return (iova - iopt_area_iova(area)) + area->page_offset +
|
||||
iopt_area_index(area) * PAGE_SIZE;
|
||||
}
|
||||
|
@ -162,12 +162,20 @@ void interval_tree_double_span_iter_next(
|
||||
|
||||
static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages)
|
||||
{
|
||||
pages->npinned += npages;
|
||||
int rc;
|
||||
|
||||
rc = check_add_overflow(pages->npinned, npages, &pages->npinned);
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
|
||||
WARN_ON(rc || pages->npinned > pages->npages);
|
||||
}
|
||||
|
||||
static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages)
|
||||
{
|
||||
pages->npinned -= npages;
|
||||
int rc;
|
||||
|
||||
rc = check_sub_overflow(pages->npinned, npages, &pages->npinned);
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
|
||||
WARN_ON(rc || pages->npinned > pages->npages);
|
||||
}
|
||||
|
||||
static void iopt_pages_err_unpin(struct iopt_pages *pages,
|
||||
@ -189,6 +197,9 @@ static void iopt_pages_err_unpin(struct iopt_pages *pages,
|
||||
static unsigned long iopt_area_index_to_iova(struct iopt_area *area,
|
||||
unsigned long index)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
|
||||
WARN_ON(index < iopt_area_index(area) ||
|
||||
index > iopt_area_last_index(area));
|
||||
index -= iopt_area_index(area);
|
||||
if (index == 0)
|
||||
return iopt_area_iova(area);
|
||||
@ -198,6 +209,9 @@ static unsigned long iopt_area_index_to_iova(struct iopt_area *area,
|
||||
static unsigned long iopt_area_index_to_iova_last(struct iopt_area *area,
|
||||
unsigned long index)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
|
||||
WARN_ON(index < iopt_area_index(area) ||
|
||||
index > iopt_area_last_index(area));
|
||||
if (index == iopt_area_last_index(area))
|
||||
return iopt_area_last_iova(area);
|
||||
return iopt_area_iova(area) - area->page_offset +
|
||||
@ -286,6 +300,8 @@ static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns)
|
||||
{
|
||||
if (!batch->total_pfns)
|
||||
return;
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
|
||||
WARN_ON(batch->total_pfns != batch->npfns[0]);
|
||||
skip_pfns = min(batch->total_pfns, skip_pfns);
|
||||
batch->pfns[0] += skip_pfns;
|
||||
batch->npfns[0] -= skip_pfns;
|
||||
@ -301,6 +317,8 @@ static int __batch_init(struct pfn_batch *batch, size_t max_pages, void *backup,
|
||||
batch->pfns = temp_kmalloc(&size, backup, backup_len);
|
||||
if (!batch->pfns)
|
||||
return -ENOMEM;
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(size < elmsz))
|
||||
return -EINVAL;
|
||||
batch->array_size = size / elmsz;
|
||||
batch->npfns = (u32 *)(batch->pfns + batch->array_size);
|
||||
batch_clear(batch);
|
||||
@ -429,6 +447,10 @@ static int batch_iommu_map_small(struct iommu_domain *domain,
|
||||
unsigned long start_iova = iova;
|
||||
int rc;
|
||||
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
|
||||
WARN_ON(paddr % PAGE_SIZE || iova % PAGE_SIZE ||
|
||||
size % PAGE_SIZE);
|
||||
|
||||
while (size) {
|
||||
rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot);
|
||||
if (rc)
|
||||
@ -718,6 +740,10 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user,
|
||||
uintptr_t uptr;
|
||||
long rc;
|
||||
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
|
||||
WARN_ON(last_index < start_index))
|
||||
return -EINVAL;
|
||||
|
||||
if (!user->upages) {
|
||||
/* All undone in pfn_reader_destroy() */
|
||||
user->upages_len =
|
||||
@ -956,6 +982,10 @@ static int pfn_reader_fill_span(struct pfn_reader *pfns)
|
||||
struct iopt_area *area;
|
||||
int rc;
|
||||
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
|
||||
WARN_ON(span->last_used < start_index))
|
||||
return -EINVAL;
|
||||
|
||||
if (span->is_used == 1) {
|
||||
batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns,
|
||||
start_index, span->last_used);
|
||||
@ -1008,6 +1038,10 @@ static int pfn_reader_next(struct pfn_reader *pfns)
|
||||
while (pfns->batch_end_index != pfns->last_index + 1) {
|
||||
unsigned int npfns = pfns->batch.total_pfns;
|
||||
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
|
||||
WARN_ON(interval_tree_double_span_iter_done(&pfns->span)))
|
||||
return -EINVAL;
|
||||
|
||||
rc = pfn_reader_fill_span(pfns);
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -1091,6 +1125,10 @@ static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages,
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
|
||||
WARN_ON(last_index < start_index))
|
||||
return -EINVAL;
|
||||
|
||||
rc = pfn_reader_init(pfns, pages, start_index, last_index);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
Loading…
Reference in New Issue
Block a user