From 9906b9352a35427508d974db3a496eb3c49e2010 Mon Sep 17 00:00:00 2001 From: "Longpeng(Mike)" Date: Thu, 14 Oct 2021 13:38:39 +0800 Subject: [PATCH] iommu/vt-d: Avoid duplicate removing in __domain_mapping() The __domain_mapping() always removes the pages in the range from 'iov_pfn' to 'end_pfn', but the 'end_pfn' is always the last pfn of the range that the caller wants to map. This would introduce too many duplicated removing and leads the map operation take too long, for example: Map iova=0x100000,nr_pages=0x7d61800 iov_pfn: 0x100000, end_pfn: 0x7e617ff iov_pfn: 0x140000, end_pfn: 0x7e617ff iov_pfn: 0x180000, end_pfn: 0x7e617ff iov_pfn: 0x1c0000, end_pfn: 0x7e617ff iov_pfn: 0x200000, end_pfn: 0x7e617ff ... it takes about 50ms in total. We can reduce the cost by recalculate the 'end_pfn' and limit it to the boundary of the end of this pte page. Map iova=0x100000,nr_pages=0x7d61800 iov_pfn: 0x100000, end_pfn: 0x13ffff iov_pfn: 0x140000, end_pfn: 0x17ffff iov_pfn: 0x180000, end_pfn: 0x1bffff iov_pfn: 0x1c0000, end_pfn: 0x1fffff iov_pfn: 0x200000, end_pfn: 0x23ffff ... it only need 9ms now. This also removes a meaningless BUG_ON() in __domain_mapping(). Signed-off-by: Longpeng(Mike) Tested-by: Liujunjie Link: https://lore.kernel.org/r/20211008000433.1115-1-longpeng2@huawei.com Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20211014053839.727419-10-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.c | 11 ++++++----- include/linux/intel-iommu.h | 6 ++++++ 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 16a35669a9d0..0bde0c8b4126 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2479,12 +2479,17 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, return -ENOMEM; first_pte = pte; + lvl_pages = lvl_to_nr_pages(largepage_lvl); + /* It is large page*/ if (largepage_lvl > 1) { unsigned long end_pfn; + unsigned long pages_to_remove; pteval |= DMA_PTE_LARGE_PAGE; - end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1; + pages_to_remove = min_t(unsigned long, nr_pages, + nr_pte_to_next_page(pte) * lvl_pages); + end_pfn = iov_pfn + pages_to_remove - 1; switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl); } else { pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; @@ -2506,10 +2511,6 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, WARN_ON(1); } - lvl_pages = lvl_to_nr_pages(largepage_lvl); - - BUG_ON(nr_pages < lvl_pages); - nr_pages -= lvl_pages; iov_pfn += lvl_pages; phys_pfn += lvl_pages; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 52481625838c..69230fd695ea 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -710,6 +710,12 @@ static inline bool first_pte_in_page(struct dma_pte *pte) return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE); } +static inline int nr_pte_to_next_page(struct dma_pte *pte) +{ + return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) : + (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte; +} + extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);