forked from Minki/linux
mm/hmm: remove the page_shift member from struct hmm_range
All users pass PAGE_SIZE here, and if we wanted to support single entries for huge pages we should really just add a HMM_FAULT_HUGEPAGE flag instead that uses the huge page size instead of having the caller calculate that size once, just for the hmm code to verify it. Link: https://lore.kernel.org/r/20190806160554.14046-8-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
fac555ac93
commit
7f08263d9b
@ -818,7 +818,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||
0 : range->flags[HMM_PFN_WRITE];
|
||||
range->pfn_flags_mask = 0;
|
||||
range->pfns = pfns;
|
||||
range->page_shift = PAGE_SHIFT;
|
||||
range->start = start;
|
||||
range->end = start + ttm->num_pages * PAGE_SIZE;
|
||||
|
||||
|
@ -680,7 +680,6 @@ nouveau_svm_fault(struct nvif_notify *notify)
|
||||
args.i.p.addr + args.i.p.size, fn - fi);
|
||||
|
||||
/* Have HMM fault pages within the fault window to the GPU. */
|
||||
range.page_shift = PAGE_SHIFT;
|
||||
range.start = args.i.p.addr;
|
||||
range.end = args.i.p.addr + args.i.p.size;
|
||||
range.pfns = args.phys;
|
||||
|
@ -158,7 +158,6 @@ enum hmm_pfn_value_e {
|
||||
* @values: pfn value for some special case (none, special, error, ...)
|
||||
* @default_flags: default flags for the range (write, read, ... see hmm doc)
|
||||
* @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
|
||||
* @page_shift: device virtual address shift value (should be >= PAGE_SHIFT)
|
||||
* @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
|
||||
* @valid: pfns array did not change since it has been fill by an HMM function
|
||||
*/
|
||||
@ -172,31 +171,10 @@ struct hmm_range {
|
||||
const uint64_t *values;
|
||||
uint64_t default_flags;
|
||||
uint64_t pfn_flags_mask;
|
||||
uint8_t page_shift;
|
||||
uint8_t pfn_shift;
|
||||
bool valid;
|
||||
};
|
||||
|
||||
/*
|
||||
* hmm_range_page_shift() - return the page shift for the range
|
||||
* @range: range being queried
|
||||
* Return: page shift (page size = 1 << page shift) for the range
|
||||
*/
|
||||
static inline unsigned hmm_range_page_shift(const struct hmm_range *range)
|
||||
{
|
||||
return range->page_shift;
|
||||
}
|
||||
|
||||
/*
|
||||
* hmm_range_page_size() - return the page size for the range
|
||||
* @range: range being queried
|
||||
* Return: page size for the range in bytes
|
||||
*/
|
||||
static inline unsigned long hmm_range_page_size(const struct hmm_range *range)
|
||||
{
|
||||
return 1UL << hmm_range_page_shift(range);
|
||||
}
|
||||
|
||||
/*
|
||||
* hmm_range_wait_until_valid() - wait for range to be valid
|
||||
* @range: range affected by invalidation to wait on
|
||||
|
42
mm/hmm.c
42
mm/hmm.c
@ -345,13 +345,12 @@ static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
|
||||
struct hmm_vma_walk *hmm_vma_walk = walk->private;
|
||||
struct hmm_range *range = hmm_vma_walk->range;
|
||||
uint64_t *pfns = range->pfns;
|
||||
unsigned long i, page_size;
|
||||
unsigned long i;
|
||||
|
||||
hmm_vma_walk->last = addr;
|
||||
page_size = hmm_range_page_size(range);
|
||||
i = (addr - range->start) >> range->page_shift;
|
||||
i = (addr - range->start) >> PAGE_SHIFT;
|
||||
|
||||
for (; addr < end; addr += page_size, i++) {
|
||||
for (; addr < end; addr += PAGE_SIZE, i++) {
|
||||
pfns[i] = range->values[HMM_PFN_NONE];
|
||||
if (fault || write_fault) {
|
||||
int ret;
|
||||
@ -779,7 +778,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
|
||||
struct mm_walk *walk)
|
||||
{
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
unsigned long addr = start, i, pfn, mask, size, pfn_inc;
|
||||
unsigned long addr = start, i, pfn, mask;
|
||||
struct hmm_vma_walk *hmm_vma_walk = walk->private;
|
||||
struct hmm_range *range = hmm_vma_walk->range;
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
@ -790,24 +789,12 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
|
||||
pte_t entry;
|
||||
int ret = 0;
|
||||
|
||||
size = huge_page_size(h);
|
||||
mask = size - 1;
|
||||
if (range->page_shift != PAGE_SHIFT) {
|
||||
/* Make sure we are looking at a full page. */
|
||||
if (start & mask)
|
||||
return -EINVAL;
|
||||
if (end < (start + size))
|
||||
return -EINVAL;
|
||||
pfn_inc = size >> PAGE_SHIFT;
|
||||
} else {
|
||||
pfn_inc = 1;
|
||||
size = PAGE_SIZE;
|
||||
}
|
||||
mask = huge_page_size(h) - 1;
|
||||
|
||||
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
|
||||
entry = huge_ptep_get(pte);
|
||||
|
||||
i = (start - range->start) >> range->page_shift;
|
||||
i = (start - range->start) >> PAGE_SHIFT;
|
||||
orig_pfn = range->pfns[i];
|
||||
range->pfns[i] = range->values[HMM_PFN_NONE];
|
||||
cpu_flags = pte_to_hmm_pfn_flags(range, entry);
|
||||
@ -819,8 +806,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift);
|
||||
for (; addr < end; addr += size, i++, pfn += pfn_inc)
|
||||
pfn = pte_pfn(entry) + ((start & mask) >> PAGE_SHIFT);
|
||||
for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
|
||||
range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
|
||||
cpu_flags;
|
||||
hmm_vma_walk->last = end;
|
||||
@ -857,14 +844,13 @@ static void hmm_pfns_clear(struct hmm_range *range,
|
||||
*/
|
||||
int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror)
|
||||
{
|
||||
unsigned long mask = ((1UL << range->page_shift) - 1UL);
|
||||
struct hmm *hmm = mirror->hmm;
|
||||
unsigned long flags;
|
||||
|
||||
range->valid = false;
|
||||
range->hmm = NULL;
|
||||
|
||||
if ((range->start & mask) || (range->end & mask))
|
||||
if ((range->start & (PAGE_SIZE - 1)) || (range->end & (PAGE_SIZE - 1)))
|
||||
return -EINVAL;
|
||||
if (range->start >= range->end)
|
||||
return -EINVAL;
|
||||
@ -971,16 +957,6 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags)
|
||||
if (vma == NULL || (vma->vm_flags & device_vma))
|
||||
return -EFAULT;
|
||||
|
||||
if (is_vm_hugetlb_page(vma)) {
|
||||
if (huge_page_shift(hstate_vma(vma)) !=
|
||||
range->page_shift &&
|
||||
range->page_shift != PAGE_SHIFT)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (range->page_shift != PAGE_SHIFT)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(vma->vm_flags & VM_READ)) {
|
||||
/*
|
||||
* If vma do not allow read access, then assume that it
|
||||
|
Loading…
Reference in New Issue
Block a user