forked from Minki/linux
iommu/intel: update to new mmu_notifier semantic
Calls to mmu_notifier_invalidate_page() were replaced by calls to mmu_notifier_invalidate_range() and are now bracketed by calls to mmu_notifier_invalidate_range_start()/end() Remove now useless invalidate_page callback. Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: iommu@lists.linux-foundation.org Cc: Joerg Roedel <jroedel@suse.de> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
f0d1c713d6
commit
30ef7d2c05
@ -223,14 +223,6 @@ static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm,
|
||||
intel_flush_svm_range(svm, address, 1, 1, 0);
|
||||
}
|
||||
|
||||
static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm,
|
||||
unsigned long address)
|
||||
{
|
||||
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
|
||||
|
||||
intel_flush_svm_range(svm, address, 1, 1, 0);
|
||||
}
|
||||
|
||||
/* Pages have been freed at this point */
|
||||
static void intel_invalidate_range(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
@ -285,7 +277,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
static const struct mmu_notifier_ops intel_mmuops = {
|
||||
.release = intel_mm_release,
|
||||
.change_pte = intel_change_pte,
|
||||
.invalidate_page = intel_invalidate_page,
|
||||
.invalidate_range = intel_invalidate_range,
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user