2021-08-23 04:26:22 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
|
2015-07-30 19:17:43 +00:00
|
|
|
/*
|
2017-04-13 03:29:29 +00:00
|
|
|
* Copyright(c) 2015-2017 Intel Corporation.
|
2015-07-30 19:17:43 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
2017-02-08 17:51:30 +00:00
|
|
|
#include <linux/sched/signal.h>
|
2015-07-30 19:17:43 +00:00
|
|
|
#include <linux/device.h>
|
2016-03-08 19:15:28 +00:00
|
|
|
#include <linux/module.h>
|
2015-07-30 19:17:43 +00:00
|
|
|
|
|
|
|
#include "hfi.h"
|
|
|
|
|
2016-03-08 19:15:28 +00:00
|
|
|
static unsigned long cache_size = 256;
|
|
|
|
module_param(cache_size, ulong, S_IRUGO | S_IWUSR);
|
|
|
|
MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine whether the caller can pin pages.
|
|
|
|
*
|
|
|
|
* This function should be used in the implementation of buffer caches.
|
|
|
|
* The cache implementation should call this function prior to attempting
|
|
|
|
* to pin buffer pages in order to determine whether they should do so.
|
|
|
|
* The function computes cache limits based on the configured ulimit and
|
|
|
|
* cache size. Use of this function is especially important for caches
|
|
|
|
* which are not limited in any other way (e.g. by HW resources) and, thus,
|
|
|
|
* could keeping caching buffers.
|
|
|
|
*
|
|
|
|
*/
|
2016-07-28 19:21:19 +00:00
|
|
|
bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
|
|
|
|
u32 nlocked, u32 npages)
|
2015-07-30 19:17:43 +00:00
|
|
|
{
|
2016-03-08 19:15:28 +00:00
|
|
|
unsigned long ulimit = rlimit(RLIMIT_MEMLOCK), pinned, cache_limit,
|
|
|
|
size = (cache_size * (1UL << 20)); /* convert to bytes */
|
2017-04-13 03:29:29 +00:00
|
|
|
unsigned int usr_ctxts =
|
|
|
|
dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
|
2015-12-08 22:10:09 +00:00
|
|
|
bool can_lock = capable(CAP_IPC_LOCK);
|
2016-03-08 19:15:28 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate per-cache size. The calculation below uses only a quarter
|
|
|
|
* of the available per-context limit. This leaves space for other
|
|
|
|
* pinning. Should we worry about shared ctxts?
|
|
|
|
*/
|
|
|
|
cache_limit = (ulimit / usr_ctxts) / 4;
|
|
|
|
|
|
|
|
/* If ulimit isn't set to "unlimited" and is smaller than cache_size. */
|
|
|
|
if (ulimit != (-1UL) && size > cache_limit)
|
|
|
|
size = cache_limit;
|
|
|
|
|
|
|
|
/* Convert to number of pages */
|
|
|
|
size = DIV_ROUND_UP(size, PAGE_SIZE);
|
2015-07-30 19:17:43 +00:00
|
|
|
|
2019-02-06 17:59:15 +00:00
|
|
|
pinned = atomic64_read(&mm->pinned_vm);
|
2015-07-30 19:17:43 +00:00
|
|
|
|
2016-03-08 19:15:28 +00:00
|
|
|
/* First, check the absolute limit against all pinned pages. */
|
|
|
|
if (pinned + npages >= ulimit && !can_lock)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return ((nlocked + npages) <= size) || can_lock;
|
|
|
|
}
|
|
|
|
|
2016-07-28 19:21:19 +00:00
|
|
|
int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,
|
|
|
|
bool writable, struct page **pages)
|
2016-03-08 19:15:28 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2019-05-14 00:17:18 +00:00
|
|
|
unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0);
|
2015-07-30 19:17:43 +00:00
|
|
|
|
2020-01-31 06:13:02 +00:00
|
|
|
ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages);
|
2015-12-08 22:10:09 +00:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2019-02-06 17:59:15 +00:00
|
|
|
atomic64_add(ret, &mm->pinned_vm);
|
2015-07-30 19:17:43 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-08 19:15:33 +00:00
|
|
|
void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
|
|
|
|
size_t npages, bool dirty)
|
2015-07-30 19:17:43 +00:00
|
|
|
{
|
2020-01-31 06:13:35 +00:00
|
|
|
unpin_user_pages_dirty_lock(p, npages, dirty);
|
2015-07-30 19:17:43 +00:00
|
|
|
|
2016-03-08 19:15:33 +00:00
|
|
|
if (mm) { /* during close after signal, mm can be NULL */
|
2019-02-06 17:59:15 +00:00
|
|
|
atomic64_sub(npages, &mm->pinned_vm);
|
2015-07-30 19:17:43 +00:00
|
|
|
}
|
|
|
|
}
|