dax: remove VM_MIXEDMAP for fsdax and device dax
This patch is reworked from an earlier patch that Dan has posted: https://patchwork.kernel.org/patch/10131727/ VM_MIXEDMAP is used by dax to direct mm paths like vm_normal_page() that the memory page it is dealing with is not typical memory from the linear map. The get_user_pages_fast() path, since it does not resolve the vma, is already using {pte,pmd}_devmap() as a stand-in for VM_MIXEDMAP, so we use that as a VM_MIXEDMAP replacement in some locations. In the cases where there is no pte to consult we fallback to using vma_is_dax() to detect the VM_MIXEDMAP special case. Now that we have explicit driver pfn_t-flag opt-in/opt-out for get_user_pages() support for DAX we can stop setting VM_MIXEDMAP. This also means we no longer need to worry about safely manipulating vm_flags in a future where we support dynamically changing the dax mode of a file. DAX should also now be supported with madvise_behavior(), vma_merge(), and copy_page_range(). This patch has been tested against ndctl unit test. It has also been tested against xfstests commit: 625515d using fake pmem created by memmap and no additional issues have been observed. Link: http://lkml.kernel.org/r/152847720311.55924.16999195879201817653.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Dave Jiang <dave.jiang@intel.com> Acked-by: Dan Williams <dan.j.williams@intel.com> Cc: Jan Kara <jack@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
		
							parent
							
								
									e36488c83b
								
							
						
					
					
						commit
						e1fb4a0864
					
				| @ -474,7 +474,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma) | ||||
| 		return rc; | ||||
| 
 | ||||
| 	vma->vm_ops = &dax_vm_ops; | ||||
| 	vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; | ||||
| 	vma->vm_flags |= VM_HUGEPAGE; | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -126,7 +126,6 @@ static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma) | ||||
| 
 | ||||
| 	file_accessed(file); | ||||
| 	vma->vm_ops = &ext2_dax_vm_ops; | ||||
| 	vma->vm_flags |= VM_MIXEDMAP; | ||||
| 	return 0; | ||||
| } | ||||
| #else | ||||
|  | ||||
| @ -374,7 +374,7 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) | ||||
| 	file_accessed(file); | ||||
| 	if (IS_DAX(file_inode(file))) { | ||||
| 		vma->vm_ops = &ext4_dax_vm_ops; | ||||
| 		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; | ||||
| 		vma->vm_flags |= VM_HUGEPAGE; | ||||
| 	} else { | ||||
| 		vma->vm_ops = &ext4_file_vm_ops; | ||||
| 	} | ||||
|  | ||||
| @ -1169,7 +1169,7 @@ xfs_file_mmap( | ||||
| 	file_accessed(filp); | ||||
| 	vma->vm_ops = &xfs_file_vm_ops; | ||||
| 	if (IS_DAX(file_inode(filp))) | ||||
| 		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; | ||||
| 		vma->vm_flags |= VM_HUGEPAGE; | ||||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
|  | ||||
							
								
								
									
										6
									
								
								mm/hmm.c
									
									
									
									
									
								
							
							
						
						
									
										6
									
								
								mm/hmm.c
									
									
									
									
									
								
							| @ -676,7 +676,8 @@ int hmm_vma_get_pfns(struct hmm_range *range) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	/* FIXME support hugetlb fs */ | ||||
| 	if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { | ||||
| 	if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || | ||||
| 			vma_is_dax(vma)) { | ||||
| 		hmm_pfns_special(range); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
| @ -849,7 +850,8 @@ int hmm_vma_fault(struct hmm_range *range, bool block) | ||||
| 		return -EINVAL; | ||||
| 
 | ||||
| 	/* FIXME support hugetlb fs */ | ||||
| 	if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { | ||||
| 	if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || | ||||
| 			vma_is_dax(vma)) { | ||||
| 		hmm_pfns_special(range); | ||||
| 		return -EINVAL; | ||||
| 	} | ||||
|  | ||||
| @ -762,11 +762,11 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, | ||||
| 	 * but we need to be consistent with PTEs and architectures that | ||||
| 	 * can't support a 'special' bit. | ||||
| 	 */ | ||||
| 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); | ||||
| 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && | ||||
| 			!pfn_t_devmap(pfn)); | ||||
| 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == | ||||
| 						(VM_PFNMAP|VM_MIXEDMAP)); | ||||
| 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); | ||||
| 	BUG_ON(!pfn_t_devmap(pfn)); | ||||
| 
 | ||||
| 	if (addr < vma->vm_start || addr >= vma->vm_end) | ||||
| 		return VM_FAULT_SIGBUS; | ||||
|  | ||||
							
								
								
									
										3
									
								
								mm/ksm.c
									
									
									
									
									
								
							
							
						
						
									
										3
									
								
								mm/ksm.c
									
									
									
									
									
								
							| @ -2430,6 +2430,9 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | ||||
| 				 VM_HUGETLB | VM_MIXEDMAP)) | ||||
| 			return 0;		/* just ignore the advice */ | ||||
| 
 | ||||
| 		if (vma_is_dax(vma)) | ||||
| 			return 0; | ||||
| 
 | ||||
| #ifdef VM_SAO | ||||
| 		if (*vm_flags & VM_SAO) | ||||
| 			return 0; | ||||
|  | ||||
| @ -859,6 +859,10 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | ||||
| 				return NULL; | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		if (pte_devmap(pte)) | ||||
| 			return NULL; | ||||
| 
 | ||||
| 		print_bad_pte(vma, addr, pte, NULL); | ||||
| 		return NULL; | ||||
| 	} | ||||
| @ -923,6 +927,8 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if (pmd_devmap(pmd)) | ||||
| 		return NULL; | ||||
| 	if (is_zero_pfn(pfn)) | ||||
| 		return NULL; | ||||
| 	if (unlikely(pfn > highest_memmap_pfn)) | ||||
|  | ||||
| @ -2951,7 +2951,8 @@ int migrate_vma(const struct migrate_vma_ops *ops, | ||||
| 	/* Sanity check the arguments */ | ||||
| 	start &= PAGE_MASK; | ||||
| 	end &= PAGE_MASK; | ||||
| 	if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) | ||||
| 	if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) || | ||||
| 			vma_is_dax(vma)) | ||||
| 		return -EINVAL; | ||||
| 	if (start < vma->vm_start || start >= vma->vm_end) | ||||
| 		return -EINVAL; | ||||
|  | ||||
| @ -527,7 +527,8 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, | ||||
| 	vm_flags_t old_flags = vma->vm_flags; | ||||
| 
 | ||||
| 	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || | ||||
| 	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) | ||||
| 	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || | ||||
| 	    vma_is_dax(vma)) | ||||
| 		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ | ||||
| 		goto out; | ||||
| 
 | ||||
|  | ||||
| @ -1796,11 +1796,12 @@ out: | ||||
| 
 | ||||
| 	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); | ||||
| 	if (vm_flags & VM_LOCKED) { | ||||
| 		if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || | ||||
| 					vma == get_gate_vma(current->mm))) | ||||
| 			mm->locked_vm += (len >> PAGE_SHIFT); | ||||
| 		else | ||||
| 		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || | ||||
| 					is_vm_hugetlb_page(vma) || | ||||
| 					vma == get_gate_vma(current->mm)) | ||||
| 			vma->vm_flags &= VM_LOCKED_CLEAR_MASK; | ||||
| 		else | ||||
| 			mm->locked_vm += (len >> PAGE_SHIFT); | ||||
| 	} | ||||
| 
 | ||||
| 	if (file) | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user