mirror of
https://github.com/torvalds/linux.git
synced 2024-12-28 22:02:28 +00:00
e511c4a3d2
Up till now, dax_direct_access() is used implicitly for normal access, but for the purpose of recovery write, dax range with poison is requested. To make the interface clear, introduce enum dax_access_mode { DAX_ACCESS, DAX_RECOVERY_WRITE, } where DAX_ACCESS is used for normal dax access, and DAX_RECOVERY_WRITE is used for dax recovery write. Suggested-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Jane Chu <jane.chu@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Mike Snitzer <snitzer@redhat.com> Reviewed-by: Vivek Goyal <vgoyal@redhat.com> Link: https://lore.kernel.org/r/165247982851.52965.11024212198889762949.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
52 lines
1.3 KiB
C
52 lines
1.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (c) 2014-2016, Intel Corporation.
|
|
*/
|
|
#include "test/nfit_test.h"
|
|
#include <linux/blkdev.h>
|
|
#include <linux/dax.h>
|
|
#include <pmem.h>
|
|
#include <nd.h>
|
|
|
|
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
|
|
long nr_pages, enum dax_access_mode mode, void **kaddr,
|
|
pfn_t *pfn)
|
|
{
|
|
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
|
|
|
|
if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
|
|
PFN_PHYS(nr_pages))))
|
|
return -EIO;
|
|
|
|
/*
|
|
* Limit dax to a single page at a time given vmalloc()-backed
|
|
* in the nfit_test case.
|
|
*/
|
|
if (get_nfit_res(pmem->phys_addr + offset)) {
|
|
struct page *page;
|
|
|
|
if (kaddr)
|
|
*kaddr = pmem->virt_addr + offset;
|
|
page = vmalloc_to_page(pmem->virt_addr + offset);
|
|
if (pfn)
|
|
*pfn = page_to_pfn_t(page);
|
|
pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
|
|
__func__, pmem, pgoff, page_to_pfn(page));
|
|
|
|
return 1;
|
|
}
|
|
|
|
if (kaddr)
|
|
*kaddr = pmem->virt_addr + offset;
|
|
if (pfn)
|
|
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
|
|
|
|
/*
|
|
* If badblocks are present, limit known good range to the
|
|
* requested range.
|
|
*/
|
|
if (unlikely(pmem->bb.count))
|
|
return nr_pages;
|
|
return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
|
|
}
|