RDMA v6.10 merge window

Normal set of driver updates and small fixes:
 
 - Small improvements and fixes for erdma, efa, hfi1, bnxt_re
 
 - Fix a UAF crash after module unload on leaking restrack entry
 
 - Continue adding full RDMA support in mana with support for EQs, GID's
   and CQs
 
 - Improvements to the mkey cache in mlx5
 
 - DSCP traffic class support in hns and several bug fixes
 
 - Cap the maximum number of MADs in the receive queue to avoid OOM
 
 - Another batch of rxe bug fixes from large scale testing
 
 - __iowrite64_copy() optimizations for write combining MMIO memory
 
 - Remove NULL checks before dev_put/hold()
 
 - EFA support for receive with immediate
 
 - Fix a recent memleaking regression in a cma error path
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRRRCHOFoQz/8F5bUaFwuHvBreFYQUCZkeo2gAKCRCFwuHvBreF
 YbuNAQChzGmS4F0JAn5Wj0CDvkZghELqtvzEb92SzqcgdyQafAD/fC7f23LJ4OsO
 1ZIaQEZu7j9DVg5PKFZ7WfdXjGTKqwA=
 =QRXg
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma updates from Jason Gunthorpe:
 "Aside from the usual things this has an arch update for
  __iowrite64_copy() used by the RDMA drivers.

  This API was intended to generate large 64 byte MemWr TLPs on PCI.
  These days most processors had done this by just repeating writel() in
  a loop. S390 and some new ARM64 designs require a special helper to
  get this to generate.

   - Small improvements and fixes for erdma, efa, hfi1, bnxt_re

   - Fix a UAF crash after module unload on leaking restrack entry

   - Continue adding full RDMA support in mana with support for EQs,
     GID's and CQs

   - Improvements to the mkey cache in mlx5

   - DSCP traffic class support in hns and several bug fixes

   - Cap the maximum number of MADs in the receive queue to avoid OOM

   - Another batch of rxe bug fixes from large scale testing

   - __iowrite64_copy() optimizations for write combining MMIO memory

   - Remove NULL checks before dev_put/hold()

   - EFA support for receive with immediate

   - Fix a recent memleaking regression in a cma error path"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (70 commits)
  RDMA/cma: Fix kmemleak in rdma_core observed during blktests nvme/rdma use siw
  RDMA/IPoIB: Fix format truncation compilation errors
  bnxt_re: avoid shift undefined behavior in bnxt_qplib_alloc_init_hwq
  RDMA/efa: Support QP with unsolicited write w/ imm. receive
  IB/hfi1: Remove generic .ndo_get_stats64
  IB/hfi1: Do not use custom stat allocator
  RDMA/hfi1: Use RMW accessors for changing LNKCTL2
  RDMA/mana_ib: implement uapi for creation of rnic cq
  RDMA/mana_ib: boundary check before installing cq callbacks
  RDMA/mana_ib: introduce a helper to remove cq callbacks
  RDMA/mana_ib: create and destroy RNIC cqs
  RDMA/mana_ib: create EQs for RNIC CQs
  RDMA/core: Remove NULL check before dev_{put, hold}
  RDMA/ipoib: Remove NULL check before dev_{put, hold}
  RDMA/mlx5: Remove NULL check before dev_{put, hold}
  RDMA/mlx5: Track DCT, DCI and REG_UMR QPs as diver_detail resources.
  RDMA/core: Add an option to display driver-specific QPs in the rdmatool
  RDMA/efa: Add shutdown notifier
  RDMA/mana_ib: Fix missing ret value
  IB/mlx5: Use __iowrite64_copy() for write combining stores
  ...
This commit is contained in:
Linus Torvalds 2024-05-18 13:04:15 -07:00
commit 25f4874662
77 changed files with 1584 additions and 768 deletions

View File

@ -139,6 +139,138 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
#define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l)) #define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l))
#define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l)) #define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l))
/*
* The ARM64 iowrite implementation is intended to support drivers that want to
* use write combining. For instance PCI drivers using write combining with a 64
* byte __iowrite64_copy() expect to get a 64 byte MemWr TLP on the PCIe bus.
*
* Newer ARM core have sensitive write combining buffers, it is important that
* the stores be contiguous blocks of store instructions. Normal memcpy
* approaches have a very low chance to generate write combining.
*
* Since this is the only API on ARM64 that should be used with write combining
* it also integrates the DGH hint which is supposed to lower the latency to
* emit the large TLP from the CPU.
*/
static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to,
const u32 *from, size_t count)
{
switch (count) {
case 8:
asm volatile("str %w0, [%8, #4 * 0]\n"
"str %w1, [%8, #4 * 1]\n"
"str %w2, [%8, #4 * 2]\n"
"str %w3, [%8, #4 * 3]\n"
"str %w4, [%8, #4 * 4]\n"
"str %w5, [%8, #4 * 5]\n"
"str %w6, [%8, #4 * 6]\n"
"str %w7, [%8, #4 * 7]\n"
:
: "rZ"(from[0]), "rZ"(from[1]), "rZ"(from[2]),
"rZ"(from[3]), "rZ"(from[4]), "rZ"(from[5]),
"rZ"(from[6]), "rZ"(from[7]), "r"(to));
break;
case 4:
asm volatile("str %w0, [%4, #4 * 0]\n"
"str %w1, [%4, #4 * 1]\n"
"str %w2, [%4, #4 * 2]\n"
"str %w3, [%4, #4 * 3]\n"
:
: "rZ"(from[0]), "rZ"(from[1]), "rZ"(from[2]),
"rZ"(from[3]), "r"(to));
break;
case 2:
asm volatile("str %w0, [%2, #4 * 0]\n"
"str %w1, [%2, #4 * 1]\n"
:
: "rZ"(from[0]), "rZ"(from[1]), "r"(to));
break;
case 1:
__raw_writel(*from, to);
break;
default:
BUILD_BUG();
}
}
void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count);
static inline void __const_iowrite32_copy(void __iomem *to, const void *from,
size_t count)
{
if (count == 8 || count == 4 || count == 2 || count == 1) {
__const_memcpy_toio_aligned32(to, from, count);
dgh();
} else {
__iowrite32_copy_full(to, from, count);
}
}
#define __iowrite32_copy(to, from, count) \
(__builtin_constant_p(count) ? \
__const_iowrite32_copy(to, from, count) : \
__iowrite32_copy_full(to, from, count))
static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to,
const u64 *from, size_t count)
{
switch (count) {
case 8:
asm volatile("str %x0, [%8, #8 * 0]\n"
"str %x1, [%8, #8 * 1]\n"
"str %x2, [%8, #8 * 2]\n"
"str %x3, [%8, #8 * 3]\n"
"str %x4, [%8, #8 * 4]\n"
"str %x5, [%8, #8 * 5]\n"
"str %x6, [%8, #8 * 6]\n"
"str %x7, [%8, #8 * 7]\n"
:
: "rZ"(from[0]), "rZ"(from[1]), "rZ"(from[2]),
"rZ"(from[3]), "rZ"(from[4]), "rZ"(from[5]),
"rZ"(from[6]), "rZ"(from[7]), "r"(to));
break;
case 4:
asm volatile("str %x0, [%4, #8 * 0]\n"
"str %x1, [%4, #8 * 1]\n"
"str %x2, [%4, #8 * 2]\n"
"str %x3, [%4, #8 * 3]\n"
:
: "rZ"(from[0]), "rZ"(from[1]), "rZ"(from[2]),
"rZ"(from[3]), "r"(to));
break;
case 2:
asm volatile("str %x0, [%2, #8 * 0]\n"
"str %x1, [%2, #8 * 1]\n"
:
: "rZ"(from[0]), "rZ"(from[1]), "r"(to));
break;
case 1:
__raw_writeq(*from, to);
break;
default:
BUILD_BUG();
}
}
void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count);
static inline void __const_iowrite64_copy(void __iomem *to, const void *from,
size_t count)
{
if (count == 8 || count == 4 || count == 2 || count == 1) {
__const_memcpy_toio_aligned64(to, from, count);
dgh();
} else {
__iowrite64_copy_full(to, from, count);
}
}
#define __iowrite64_copy(to, from, count) \
(__builtin_constant_p(count) ? \
__const_iowrite64_copy(to, from, count) : \
__iowrite64_copy_full(to, from, count))
/* /*
* I/O memory mapping functions. * I/O memory mapping functions.
*/ */

View File

@ -37,6 +37,48 @@ void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
} }
EXPORT_SYMBOL(__memcpy_fromio); EXPORT_SYMBOL(__memcpy_fromio);
/*
* This generates a memcpy that works on a from/to address which is aligned to
* bits. Count is in terms of the number of bits sized quantities to copy. It
* optimizes to use the STR groupings when possible so that it is WC friendly.
*/
#define memcpy_toio_aligned(to, from, count, bits) \
({ \
volatile u##bits __iomem *_to = to; \
const u##bits *_from = from; \
size_t _count = count; \
const u##bits *_end_from = _from + ALIGN_DOWN(_count, 8); \
\
for (; _from < _end_from; _from += 8, _to += 8) \
__const_memcpy_toio_aligned##bits(_to, _from, 8); \
if ((_count % 8) >= 4) { \
__const_memcpy_toio_aligned##bits(_to, _from, 4); \
_from += 4; \
_to += 4; \
} \
if ((_count % 4) >= 2) { \
__const_memcpy_toio_aligned##bits(_to, _from, 2); \
_from += 2; \
_to += 2; \
} \
if (_count % 2) \
__const_memcpy_toio_aligned##bits(_to, _from, 1); \
})
void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count)
{
memcpy_toio_aligned(to, from, count, 64);
dgh();
}
EXPORT_SYMBOL(__iowrite64_copy_full);
void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count)
{
memcpy_toio_aligned(to, from, count, 32);
dgh();
}
EXPORT_SYMBOL(__iowrite32_copy_full);
/* /*
* Copy data from "real" memory space to IO memory space. * Copy data from "real" memory space to IO memory space.
*/ */

View File

@ -73,6 +73,21 @@ static inline void ioport_unmap(void __iomem *p)
#define __raw_writel zpci_write_u32 #define __raw_writel zpci_write_u32
#define __raw_writeq zpci_write_u64 #define __raw_writeq zpci_write_u64
/* combine single writes by using store-block insn */
static inline void __iowrite32_copy(void __iomem *to, const void *from,
size_t count)
{
zpci_memcpy_toio(to, from, count * 4);
}
#define __iowrite32_copy __iowrite32_copy
static inline void __iowrite64_copy(void __iomem *to, const void *from,
size_t count)
{
zpci_memcpy_toio(to, from, count * 8);
}
#define __iowrite64_copy __iowrite64_copy
#endif /* CONFIG_PCI */ #endif /* CONFIG_PCI */
#include <asm-generic/io.h> #include <asm-generic/io.h>

View File

@ -250,12 +250,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return 0; return 0;
} }
/* combine single writes by using store-block insn */
void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
zpci_memcpy_toio(to, from, count * 8);
}
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot) unsigned long prot)
{ {

View File

@ -209,6 +209,23 @@ void memset_io(volatile void __iomem *, int, size_t);
#define memcpy_toio memcpy_toio #define memcpy_toio memcpy_toio
#define memset_io memset_io #define memset_io memset_io
#ifdef CONFIG_X86_64
/*
* Commit 0f07496144c2 ("[PATCH] Add faster __iowrite32_copy routine for
* x86_64") says that circa 2006 rep movsl is noticeably faster than a copy
* loop.
*/
static inline void __iowrite32_copy(void __iomem *to, const void *from,
size_t count)
{
asm volatile("rep ; movsl"
: "=&c"(count), "=&D"(to), "=&S"(from)
: "0"(count), "1"(to), "2"(from)
: "memory");
}
#define __iowrite32_copy __iowrite32_copy
#endif
/* /*
* ISA space is 'always mapped' on a typical x86 system, no need to * ISA space is 'always mapped' on a typical x86 system, no need to
* explicitly ioremap() it. The fact that the ISA IO space is mapped * explicitly ioremap() it. The fact that the ISA IO space is mapped

View File

@ -53,7 +53,6 @@ ifneq ($(CONFIG_X86_CMPXCHG64),y)
lib-y += atomic64_386_32.o lib-y += atomic64_386_32.o
endif endif
else else
obj-y += iomap_copy_64.o
ifneq ($(CONFIG_GENERIC_CSUM),y) ifneq ($(CONFIG_GENERIC_CSUM),y)
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
endif endif

View File

@ -1,15 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2006 PathScale, Inc. All Rights Reserved.
*/
#include <linux/linkage.h>
/*
* override generic version in lib/iomap_copy.c
*/
SYM_FUNC_START(__iowrite32_copy)
movl %edx,%ecx
rep movsl
RET
SYM_FUNC_END(__iowrite32_copy)

View File

@ -715,8 +715,10 @@ cma_validate_port(struct ib_device *device, u32 port,
rcu_read_lock(); rcu_read_lock();
ndev = rcu_dereference(sgid_attr->ndev); ndev = rcu_dereference(sgid_attr->ndev);
if (!net_eq(dev_net(ndev), dev_addr->net) || if (!net_eq(dev_net(ndev), dev_addr->net) ||
ndev->ifindex != bound_if_index) ndev->ifindex != bound_if_index) {
rdma_put_gid_attr(sgid_attr);
sgid_attr = ERR_PTR(-ENODEV); sgid_attr = ERR_PTR(-ENODEV);
}
rcu_read_unlock(); rcu_read_unlock();
goto out; goto out;
} }

View File

@ -2174,8 +2174,7 @@ int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
spin_unlock_irqrestore(&pdata->netdev_lock, flags); spin_unlock_irqrestore(&pdata->netdev_lock, flags);
add_ndev_hash(pdata); add_ndev_hash(pdata);
if (old_ndev) __dev_put(old_ndev);
__dev_put(old_ndev);
return 0; return 0;
} }
@ -2235,8 +2234,7 @@ struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
spin_lock(&pdata->netdev_lock); spin_lock(&pdata->netdev_lock);
res = rcu_dereference_protected( res = rcu_dereference_protected(
pdata->netdev, lockdep_is_held(&pdata->netdev_lock)); pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
if (res) dev_hold(res);
dev_hold(res);
spin_unlock(&pdata->netdev_lock); spin_unlock(&pdata->netdev_lock);
} }
@ -2311,9 +2309,7 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev,
if (filter(ib_dev, port, idev, filter_cookie)) if (filter(ib_dev, port, idev, filter_cookie))
cb(ib_dev, port, idev, cookie); cb(ib_dev, port, idev, cookie);
dev_put(idev);
if (idev)
dev_put(idev);
} }
} }

View File

@ -93,8 +93,7 @@ static struct net_device *rdma_get_xmit_slave_udp(struct ib_device *device,
slave = netdev_get_xmit_slave(master, skb, slave = netdev_get_xmit_slave(master, skb,
!!(device->lag_flags & !!(device->lag_flags &
RDMA_LAG_FLAGS_HASH_ALL_SLAVES)); RDMA_LAG_FLAGS_HASH_ALL_SLAVES));
if (slave) dev_hold(slave);
dev_hold(slave);
rcu_read_unlock(); rcu_read_unlock();
kfree_skb(skb); kfree_skb(skb);
return slave; return slave;

View File

@ -137,6 +137,8 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING, [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING,
.len = RDMA_NLDEV_ATTR_EMPTY_STRING }, .len = RDMA_NLDEV_ATTR_EMPTY_STRING },
[RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_RES_SUBTYPE] = { .type = NLA_NUL_STRING,
.len = RDMA_NLDEV_ATTR_EMPTY_STRING },
[RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
[RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED },
@ -164,6 +166,7 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 },
[RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 },
[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE] = { .type = NLA_U8 }, [RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_DRIVER_DETAILS] = { .type = NLA_U8 },
}; };
static int put_driver_name_print_type(struct sk_buff *msg, const char *name, static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@ -399,7 +402,8 @@ err:
return -EMSGSIZE; return -EMSGSIZE;
} }
static int fill_res_info(struct sk_buff *msg, struct ib_device *device) static int fill_res_info(struct sk_buff *msg, struct ib_device *device,
bool show_details)
{ {
static const char * const names[RDMA_RESTRACK_MAX] = { static const char * const names[RDMA_RESTRACK_MAX] = {
[RDMA_RESTRACK_PD] = "pd", [RDMA_RESTRACK_PD] = "pd",
@ -424,7 +428,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
for (i = 0; i < RDMA_RESTRACK_MAX; i++) { for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
if (!names[i]) if (!names[i])
continue; continue;
curr = rdma_restrack_count(device, i); curr = rdma_restrack_count(device, i, show_details);
ret = fill_res_info_entry(msg, names[i], curr); ret = fill_res_info_entry(msg, names[i], curr);
if (ret) if (ret)
goto err; goto err;
@ -1305,6 +1309,7 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
bool show_details = false;
struct ib_device *device; struct ib_device *device;
struct sk_buff *msg; struct sk_buff *msg;
u32 index; u32 index;
@ -1320,6 +1325,9 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!device) if (!device)
return -EINVAL; return -EINVAL;
if (tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS])
show_details = nla_get_u8(tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]);
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) { if (!msg) {
ret = -ENOMEM; ret = -ENOMEM;
@ -1334,7 +1342,7 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
goto err_free; goto err_free;
} }
ret = fill_res_info(msg, device); ret = fill_res_info(msg, device, show_details);
if (ret) if (ret)
goto err_free; goto err_free;
@ -1364,7 +1372,7 @@ static int _nldev_res_get_dumpit(struct ib_device *device,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
0, NLM_F_MULTI); 0, NLM_F_MULTI);
if (!nlh || fill_res_info(skb, device)) { if (!nlh || fill_res_info(skb, device, false)) {
nlmsg_cancel(skb, nlh); nlmsg_cancel(skb, nlh);
goto out; goto out;
} }
@ -1534,6 +1542,7 @@ static int res_get_common_dumpit(struct sk_buff *skb,
struct rdma_restrack_entry *res; struct rdma_restrack_entry *res;
struct rdma_restrack_root *rt; struct rdma_restrack_root *rt;
int err, ret = 0, idx = 0; int err, ret = 0, idx = 0;
bool show_details = false;
struct nlattr *table_attr; struct nlattr *table_attr;
struct nlattr *entry_attr; struct nlattr *entry_attr;
struct ib_device *device; struct ib_device *device;
@ -1562,6 +1571,9 @@ static int res_get_common_dumpit(struct sk_buff *skb,
if (!device) if (!device)
return -EINVAL; return -EINVAL;
if (tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS])
show_details = nla_get_u8(tb[RDMA_NLDEV_ATTR_DRIVER_DETAILS]);
/* /*
* If no PORT_INDEX is supplied, we will return all QPs from that device * If no PORT_INDEX is supplied, we will return all QPs from that device
*/ */
@ -1599,6 +1611,9 @@ static int res_get_common_dumpit(struct sk_buff *skb,
* objects. * objects.
*/ */
xa_for_each(&rt->xa, id, res) { xa_for_each(&rt->xa, id, res) {
if (xa_get_mark(&rt->xa, res->id, RESTRACK_DD) && !show_details)
goto next;
if (idx < start || !rdma_restrack_get(res)) if (idx < start || !rdma_restrack_get(res))
goto next; goto next;

View File

@ -37,22 +37,6 @@ int rdma_restrack_init(struct ib_device *dev)
return 0; return 0;
} }
static const char *type2str(enum rdma_restrack_type type)
{
static const char * const names[RDMA_RESTRACK_MAX] = {
[RDMA_RESTRACK_PD] = "PD",
[RDMA_RESTRACK_CQ] = "CQ",
[RDMA_RESTRACK_QP] = "QP",
[RDMA_RESTRACK_CM_ID] = "CM_ID",
[RDMA_RESTRACK_MR] = "MR",
[RDMA_RESTRACK_CTX] = "CTX",
[RDMA_RESTRACK_COUNTER] = "COUNTER",
[RDMA_RESTRACK_SRQ] = "SRQ",
};
return names[type];
};
/** /**
* rdma_restrack_clean() - clean resource tracking * rdma_restrack_clean() - clean resource tracking
* @dev: IB device * @dev: IB device
@ -60,47 +44,14 @@ static const char *type2str(enum rdma_restrack_type type)
void rdma_restrack_clean(struct ib_device *dev) void rdma_restrack_clean(struct ib_device *dev)
{ {
struct rdma_restrack_root *rt = dev->res; struct rdma_restrack_root *rt = dev->res;
struct rdma_restrack_entry *e;
char buf[TASK_COMM_LEN];
bool found = false;
const char *owner;
int i; int i;
for (i = 0 ; i < RDMA_RESTRACK_MAX; i++) { for (i = 0 ; i < RDMA_RESTRACK_MAX; i++) {
struct xarray *xa = &dev->res[i].xa; struct xarray *xa = &dev->res[i].xa;
if (!xa_empty(xa)) { WARN_ON(!xa_empty(xa));
unsigned long index;
if (!found) {
pr_err("restrack: %s", CUT_HERE);
dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n");
}
xa_for_each(xa, index, e) {
if (rdma_is_kernel_res(e)) {
owner = e->kern_name;
} else {
/*
* There is no need to call get_task_struct here,
* because we can be here only if there are more
* get_task_struct() call than put_task_struct().
*/
get_task_comm(buf, e->task);
owner = buf;
}
pr_err("restrack: %s %s object allocated by %s is not freed\n",
rdma_is_kernel_res(e) ? "Kernel" :
"User",
type2str(e->type), owner);
}
found = true;
}
xa_destroy(xa); xa_destroy(xa);
} }
if (found)
pr_err("restrack: %s", CUT_HERE);
kfree(rt); kfree(rt);
} }
@ -108,8 +59,10 @@ void rdma_restrack_clean(struct ib_device *dev)
* rdma_restrack_count() - the current usage of specific object * rdma_restrack_count() - the current usage of specific object
* @dev: IB device * @dev: IB device
* @type: actual type of object to operate * @type: actual type of object to operate
* @show_details: count driver specific objects
*/ */
int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type) int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
bool show_details)
{ {
struct rdma_restrack_root *rt = &dev->res[type]; struct rdma_restrack_root *rt = &dev->res[type];
struct rdma_restrack_entry *e; struct rdma_restrack_entry *e;
@ -117,8 +70,11 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
u32 cnt = 0; u32 cnt = 0;
xa_lock(&rt->xa); xa_lock(&rt->xa);
xas_for_each(&xas, e, U32_MAX) xas_for_each(&xas, e, U32_MAX) {
if (xa_get_mark(&rt->xa, e->id, RESTRACK_DD) && !show_details)
continue;
cnt++; cnt++;
}
xa_unlock(&rt->xa); xa_unlock(&rt->xa);
return cnt; return cnt;
} }
@ -247,6 +203,9 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
ret = xa_insert(&rt->xa, res->id, res, GFP_KERNEL); ret = xa_insert(&rt->xa, res->id, res, GFP_KERNEL);
if (ret) if (ret)
res->id = 0; res->id = 0;
if (qp->qp_type >= IB_QPT_DRIVER)
xa_set_mark(&rt->xa, res->id, RESTRACK_DD);
} else if (res->type == RDMA_RESTRACK_COUNTER) { } else if (res->type == RDMA_RESTRACK_COUNTER) {
/* Special case to ensure that cntn points to right counter */ /* Special case to ensure that cntn points to right counter */
struct rdma_counter *counter; struct rdma_counter *counter;

View File

@ -601,8 +601,7 @@ static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port,
rcu_read_lock(); rcu_read_lock();
master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev); master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
if (master_ndev) dev_hold(master_ndev);
dev_hold(master_ndev);
rcu_read_unlock(); rcu_read_unlock();
if (master_ndev) { if (master_ndev) {

View File

@ -63,6 +63,8 @@ MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
#define MAX_UMAD_RECV_LIST_SIZE 200000
enum { enum {
IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS, IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS,
IB_UMAD_MAX_AGENTS = 32, IB_UMAD_MAX_AGENTS = 32,
@ -113,6 +115,7 @@ struct ib_umad_file {
struct mutex mutex; struct mutex mutex;
struct ib_umad_port *port; struct ib_umad_port *port;
struct list_head recv_list; struct list_head recv_list;
atomic_t recv_list_size;
struct list_head send_list; struct list_head send_list;
struct list_head port_list; struct list_head port_list;
spinlock_t send_lock; spinlock_t send_lock;
@ -180,24 +183,28 @@ static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
return file->agents_dead ? NULL : file->agent[id]; return file->agents_dead ? NULL : file->agent[id];
} }
static int queue_packet(struct ib_umad_file *file, static int queue_packet(struct ib_umad_file *file, struct ib_mad_agent *agent,
struct ib_mad_agent *agent, struct ib_umad_packet *packet, bool is_recv_mad)
struct ib_umad_packet *packet)
{ {
int ret = 1; int ret = 1;
mutex_lock(&file->mutex); mutex_lock(&file->mutex);
if (is_recv_mad &&
atomic_read(&file->recv_list_size) > MAX_UMAD_RECV_LIST_SIZE)
goto unlock;
for (packet->mad.hdr.id = 0; for (packet->mad.hdr.id = 0;
packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
packet->mad.hdr.id++) packet->mad.hdr.id++)
if (agent == __get_agent(file, packet->mad.hdr.id)) { if (agent == __get_agent(file, packet->mad.hdr.id)) {
list_add_tail(&packet->list, &file->recv_list); list_add_tail(&packet->list, &file->recv_list);
atomic_inc(&file->recv_list_size);
wake_up_interruptible(&file->recv_wait); wake_up_interruptible(&file->recv_wait);
ret = 0; ret = 0;
break; break;
} }
unlock:
mutex_unlock(&file->mutex); mutex_unlock(&file->mutex);
return ret; return ret;
@ -224,7 +231,7 @@ static void send_handler(struct ib_mad_agent *agent,
if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
packet->length = IB_MGMT_MAD_HDR; packet->length = IB_MGMT_MAD_HDR;
packet->mad.hdr.status = ETIMEDOUT; packet->mad.hdr.status = ETIMEDOUT;
if (!queue_packet(file, agent, packet)) if (!queue_packet(file, agent, packet, false))
return; return;
} }
kfree(packet); kfree(packet);
@ -284,7 +291,7 @@ static void recv_handler(struct ib_mad_agent *agent,
rdma_destroy_ah_attr(&ah_attr); rdma_destroy_ah_attr(&ah_attr);
} }
if (queue_packet(file, agent, packet)) if (queue_packet(file, agent, packet, true))
goto err2; goto err2;
return; return;
@ -409,6 +416,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
list_del(&packet->list); list_del(&packet->list);
atomic_dec(&file->recv_list_size);
mutex_unlock(&file->mutex); mutex_unlock(&file->mutex);
@ -421,6 +429,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
/* Requeue packet */ /* Requeue packet */
mutex_lock(&file->mutex); mutex_lock(&file->mutex);
list_add(&packet->list, &file->recv_list); list_add(&packet->list, &file->recv_list);
atomic_inc(&file->recv_list_size);
mutex_unlock(&file->mutex); mutex_unlock(&file->mutex);
} else { } else {
if (packet->recv_wc) if (packet->recv_wc)

View File

@ -1013,7 +1013,8 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.stride = sizeof(struct sq_sge); hwq_attr.stride = sizeof(struct sq_sge);
hwq_attr.depth = bnxt_qplib_get_depth(sq); hwq_attr.depth = bnxt_qplib_get_depth(sq);
hwq_attr.aux_stride = psn_sz; hwq_attr.aux_stride = psn_sz;
hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode); hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
: 0;
/* Update msn tbl size */ /* Update msn tbl size */
if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) { if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)); hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));

View File

@ -110,7 +110,10 @@ struct efa_admin_create_qp_cmd {
* virtual (IOVA returned by MR registration) * virtual (IOVA returned by MR registration)
* 1 : rq_virt - If set, RQ ring base address is * 1 : rq_virt - If set, RQ ring base address is
* virtual (IOVA returned by MR registration) * virtual (IOVA returned by MR registration)
* 7:2 : reserved - MBZ * 2 : unsolicited_write_recv - If set, work requests
* will not be consumed for incoming RDMA write with
* immediate
* 7:3 : reserved - MBZ
*/ */
u8 flags; u8 flags;
@ -663,7 +666,9 @@ struct efa_admin_feature_device_attr_desc {
* polling is supported * polling is supported
* 3 : rdma_write - If set, RDMA Write is supported * 3 : rdma_write - If set, RDMA Write is supported
* on TX queues * on TX queues
* 31:4 : reserved - MBZ * 4 : unsolicited_write_recv - If set, unsolicited
* write with imm. receive is supported
* 31:5 : reserved - MBZ
*/ */
u32 device_caps; u32 device_caps;
@ -1009,6 +1014,7 @@ struct efa_admin_host_info {
/* create_qp_cmd */ /* create_qp_cmd */
#define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0) #define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK BIT(0)
#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1) #define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK BIT(1)
#define EFA_ADMIN_CREATE_QP_CMD_UNSOLICITED_WRITE_RECV_MASK BIT(2)
/* modify_qp_cmd */ /* modify_qp_cmd */
#define EFA_ADMIN_MODIFY_QP_CMD_QP_STATE_MASK BIT(0) #define EFA_ADMIN_MODIFY_QP_CMD_QP_STATE_MASK BIT(0)
@ -1044,6 +1050,7 @@ struct efa_admin_host_info {
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK BIT(1) #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK BIT(1)
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_DATA_POLLING_128_MASK BIT(2) #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_DATA_POLLING_128_MASK BIT(2)
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_WRITE_MASK BIT(3) #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_WRITE_MASK BIT(3)
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_UNSOLICITED_WRITE_RECV_MASK BIT(4)
/* create_eq_cmd */ /* create_eq_cmd */
#define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) #define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)

View File

@ -32,6 +32,9 @@ int efa_com_create_qp(struct efa_com_dev *edev,
params->rq_depth; params->rq_depth;
create_qp_cmd.uar = params->uarn; create_qp_cmd.uar = params->uarn;
if (params->unsolicited_write_recv)
EFA_SET(&create_qp_cmd.flags, EFA_ADMIN_CREATE_QP_CMD_UNSOLICITED_WRITE_RECV, 1);
err = efa_com_cmd_exec(aq, err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&create_qp_cmd, (struct efa_admin_aq_entry *)&create_qp_cmd,
sizeof(create_qp_cmd), sizeof(create_qp_cmd),

View File

@ -27,6 +27,7 @@ struct efa_com_create_qp_params {
u16 pd; u16 pd;
u16 uarn; u16 uarn;
u8 qp_type; u8 qp_type;
u8 unsolicited_write_recv : 1;
}; };
struct efa_com_create_qp_result { struct efa_com_create_qp_result {

View File

@ -671,11 +671,22 @@ static void efa_remove(struct pci_dev *pdev)
efa_remove_device(pdev); efa_remove_device(pdev);
} }
static void efa_shutdown(struct pci_dev *pdev)
{
struct efa_dev *dev = pci_get_drvdata(pdev);
efa_destroy_eqs(dev);
efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_SHUTDOWN);
efa_free_irq(dev, &dev->admin_irq);
efa_disable_msix(dev);
}
static struct pci_driver efa_pci_driver = { static struct pci_driver efa_pci_driver = {
.name = DRV_MODULE_NAME, .name = DRV_MODULE_NAME,
.id_table = efa_pci_tbl, .id_table = efa_pci_tbl,
.probe = efa_probe, .probe = efa_probe,
.remove = efa_remove, .remove = efa_remove,
.shutdown = efa_shutdown,
}; };
module_pci_driver(efa_pci_driver); module_pci_driver(efa_pci_driver);

View File

@ -263,6 +263,9 @@ int efa_query_device(struct ib_device *ibdev,
if (EFA_DEV_CAP(dev, RDMA_WRITE)) if (EFA_DEV_CAP(dev, RDMA_WRITE))
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE; resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_WRITE;
if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV;
if (dev->neqs) if (dev->neqs)
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS; resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS;
@ -639,6 +642,7 @@ int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
struct efa_ibv_create_qp cmd = {}; struct efa_ibv_create_qp cmd = {};
struct efa_qp *qp = to_eqp(ibqp); struct efa_qp *qp = to_eqp(ibqp);
struct efa_ucontext *ucontext; struct efa_ucontext *ucontext;
u16 supported_efa_flags = 0;
int err; int err;
ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext, ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
@ -676,13 +680,23 @@ int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
goto err_out; goto err_out;
} }
if (cmd.comp_mask) { if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_90)) {
ibdev_dbg(&dev->ibdev, ibdev_dbg(&dev->ibdev,
"Incompatible ABI params, unknown fields in udata\n"); "Incompatible ABI params, unknown fields in udata\n");
err = -EINVAL; err = -EINVAL;
goto err_out; goto err_out;
} }
if (EFA_DEV_CAP(dev, UNSOLICITED_WRITE_RECV))
supported_efa_flags |= EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV;
if (cmd.flags & ~supported_efa_flags) {
ibdev_dbg(&dev->ibdev, "Unsupported EFA QP create flags[%#x], supported[%#x]\n",
cmd.flags, supported_efa_flags);
err = -EOPNOTSUPP;
goto err_out;
}
create_qp_params.uarn = ucontext->uarn; create_qp_params.uarn = ucontext->uarn;
create_qp_params.pd = to_epd(ibqp->pd)->pdn; create_qp_params.pd = to_epd(ibqp->pd)->pdn;
@ -722,6 +736,9 @@ int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
create_qp_params.rq_base_addr = qp->rq_dma_addr; create_qp_params.rq_base_addr = qp->rq_dma_addr;
} }
if (cmd.flags & EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV)
create_qp_params.unsolicited_write_recv = true;
err = efa_com_create_qp(&dev->edev, &create_qp_params, err = efa_com_create_qp(&dev->edev, &create_qp_params,
&create_qp_resp); &create_qp_resp);
if (err) if (err)

View File

@ -33,7 +33,8 @@ struct erdma_eq {
atomic64_t notify_num; atomic64_t notify_num;
void __iomem *db; void __iomem *db;
u64 *db_record; u64 *dbrec;
dma_addr_t dbrec_dma;
}; };
struct erdma_cmdq_sq { struct erdma_cmdq_sq {
@ -48,7 +49,8 @@ struct erdma_cmdq_sq {
u16 wqebb_cnt; u16 wqebb_cnt;
u64 *db_record; u64 *dbrec;
dma_addr_t dbrec_dma;
}; };
struct erdma_cmdq_cq { struct erdma_cmdq_cq {
@ -61,7 +63,8 @@ struct erdma_cmdq_cq {
u32 ci; u32 ci;
u32 cmdsn; u32 cmdsn;
u64 *db_record; u64 *dbrec;
dma_addr_t dbrec_dma;
atomic64_t armed_num; atomic64_t armed_num;
}; };
@ -177,9 +180,6 @@ enum {
ERDMA_RES_CNT = 2, ERDMA_RES_CNT = 2,
}; };
#define ERDMA_EXTRA_BUFFER_SIZE ERDMA_DB_SIZE
#define WARPPED_BUFSIZE(size) ((size) + ERDMA_EXTRA_BUFFER_SIZE)
struct erdma_dev { struct erdma_dev {
struct ib_device ibdev; struct ib_device ibdev;
struct net_device *netdev; struct net_device *netdev;
@ -213,6 +213,7 @@ struct erdma_dev {
atomic_t num_ctx; atomic_t num_ctx;
struct list_head cep_list; struct list_head cep_list;
struct dma_pool *db_pool;
struct dma_pool *resp_pool; struct dma_pool *resp_pool;
}; };

View File

@ -14,7 +14,7 @@ static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) | FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) |
FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn); FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn);
*cmdq->cq.db_record = db_data; *cmdq->cq.dbrec = db_data;
writeq(db_data, dev->func_bar + ERDMA_CMDQ_CQDB_REG); writeq(db_data, dev->func_bar + ERDMA_CMDQ_CQDB_REG);
atomic64_inc(&cmdq->cq.armed_num); atomic64_inc(&cmdq->cq.armed_num);
@ -25,7 +25,7 @@ static void kick_cmdq_db(struct erdma_cmdq *cmdq)
struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq); struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi); u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi);
*cmdq->sq.db_record = db_data; *cmdq->sq.dbrec = db_data;
writeq(db_data, dev->func_bar + ERDMA_CMDQ_SQDB_REG); writeq(db_data, dev->func_bar + ERDMA_CMDQ_SQDB_REG);
} }
@ -89,20 +89,18 @@ static int erdma_cmdq_sq_init(struct erdma_dev *dev)
{ {
struct erdma_cmdq *cmdq = &dev->cmdq; struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_cmdq_sq *sq = &cmdq->sq; struct erdma_cmdq_sq *sq = &cmdq->sq;
u32 buf_size;
sq->wqebb_cnt = SQEBB_COUNT(ERDMA_CMDQ_SQE_SIZE); sq->wqebb_cnt = SQEBB_COUNT(ERDMA_CMDQ_SQE_SIZE);
sq->depth = cmdq->max_outstandings * sq->wqebb_cnt; sq->depth = cmdq->max_outstandings * sq->wqebb_cnt;
buf_size = sq->depth << SQEBB_SHIFT; sq->qbuf = dma_alloc_coherent(&dev->pdev->dev, sq->depth << SQEBB_SHIFT,
&sq->qbuf_dma_addr, GFP_KERNEL);
sq->qbuf =
dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
&sq->qbuf_dma_addr, GFP_KERNEL);
if (!sq->qbuf) if (!sq->qbuf)
return -ENOMEM; return -ENOMEM;
sq->db_record = (u64 *)(sq->qbuf + buf_size); sq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &sq->dbrec_dma);
if (!sq->dbrec)
goto err_out;
spin_lock_init(&sq->lock); spin_lock_init(&sq->lock);
@ -111,30 +109,33 @@ static int erdma_cmdq_sq_init(struct erdma_dev *dev)
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_L_REG, erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_L_REG,
lower_32_bits(sq->qbuf_dma_addr)); lower_32_bits(sq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_DEPTH_REG, sq->depth); erdma_reg_write32(dev, ERDMA_REGS_CMDQ_DEPTH_REG, sq->depth);
erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG, erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG, sq->dbrec_dma);
sq->qbuf_dma_addr + buf_size);
return 0; return 0;
err_out:
dma_free_coherent(&dev->pdev->dev, sq->depth << SQEBB_SHIFT,
sq->qbuf, sq->qbuf_dma_addr);
return -ENOMEM;
} }
static int erdma_cmdq_cq_init(struct erdma_dev *dev) static int erdma_cmdq_cq_init(struct erdma_dev *dev)
{ {
struct erdma_cmdq *cmdq = &dev->cmdq; struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_cmdq_cq *cq = &cmdq->cq; struct erdma_cmdq_cq *cq = &cmdq->cq;
u32 buf_size;
cq->depth = cmdq->sq.depth; cq->depth = cmdq->sq.depth;
buf_size = cq->depth << CQE_SHIFT; cq->qbuf = dma_alloc_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
&cq->qbuf_dma_addr, GFP_KERNEL);
cq->qbuf =
dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
&cq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
if (!cq->qbuf) if (!cq->qbuf)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&cq->lock); spin_lock_init(&cq->lock);
cq->db_record = (u64 *)(cq->qbuf + buf_size); cq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &cq->dbrec_dma);
if (!cq->dbrec)
goto err_out;
atomic64_set(&cq->armed_num, 0); atomic64_set(&cq->armed_num, 0);
@ -142,24 +143,25 @@ static int erdma_cmdq_cq_init(struct erdma_dev *dev)
upper_32_bits(cq->qbuf_dma_addr)); upper_32_bits(cq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_L_REG, erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_L_REG,
lower_32_bits(cq->qbuf_dma_addr)); lower_32_bits(cq->qbuf_dma_addr));
erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG, erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG, cq->dbrec_dma);
cq->qbuf_dma_addr + buf_size);
return 0; return 0;
err_out:
dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT, cq->qbuf,
cq->qbuf_dma_addr);
return -ENOMEM;
} }
static int erdma_cmdq_eq_init(struct erdma_dev *dev) static int erdma_cmdq_eq_init(struct erdma_dev *dev)
{ {
struct erdma_cmdq *cmdq = &dev->cmdq; struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_eq *eq = &cmdq->eq; struct erdma_eq *eq = &cmdq->eq;
u32 buf_size;
eq->depth = cmdq->max_outstandings; eq->depth = cmdq->max_outstandings;
buf_size = eq->depth << EQE_SHIFT; eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
&eq->qbuf_dma_addr, GFP_KERNEL);
eq->qbuf =
dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
&eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
if (!eq->qbuf) if (!eq->qbuf)
return -ENOMEM; return -ENOMEM;
@ -167,17 +169,24 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
atomic64_set(&eq->event_num, 0); atomic64_set(&eq->event_num, 0);
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG; eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG;
eq->db_record = (u64 *)(eq->qbuf + buf_size); eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
if (!eq->dbrec)
goto err_out;
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG, erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
upper_32_bits(eq->qbuf_dma_addr)); upper_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_L_REG, erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_L_REG,
lower_32_bits(eq->qbuf_dma_addr)); lower_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_DEPTH_REG, eq->depth); erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_DEPTH_REG, eq->depth);
erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG, erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
eq->qbuf_dma_addr + buf_size);
return 0; return 0;
err_out:
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
eq->qbuf_dma_addr);
return -ENOMEM;
} }
int erdma_cmdq_init(struct erdma_dev *dev) int erdma_cmdq_init(struct erdma_dev *dev)
@ -211,17 +220,17 @@ int erdma_cmdq_init(struct erdma_dev *dev)
return 0; return 0;
err_destroy_cq: err_destroy_cq:
dma_free_coherent(&dev->pdev->dev, dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT,
(cmdq->cq.depth << CQE_SHIFT) +
ERDMA_EXTRA_BUFFER_SIZE,
cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr); cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
dma_pool_free(dev->db_pool, cmdq->cq.dbrec, cmdq->cq.dbrec_dma);
err_destroy_sq: err_destroy_sq:
dma_free_coherent(&dev->pdev->dev, dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
(cmdq->sq.depth << SQEBB_SHIFT) +
ERDMA_EXTRA_BUFFER_SIZE,
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr); cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
dma_pool_free(dev->db_pool, cmdq->sq.dbrec, cmdq->sq.dbrec_dma);
return err; return err;
} }
@ -238,18 +247,20 @@ void erdma_cmdq_destroy(struct erdma_dev *dev)
clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state); clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
dma_free_coherent(&dev->pdev->dev, dma_free_coherent(&dev->pdev->dev, cmdq->eq.depth << EQE_SHIFT,
(cmdq->eq.depth << EQE_SHIFT) +
ERDMA_EXTRA_BUFFER_SIZE,
cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr); cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
dma_free_coherent(&dev->pdev->dev,
(cmdq->sq.depth << SQEBB_SHIFT) + dma_pool_free(dev->db_pool, cmdq->eq.dbrec, cmdq->eq.dbrec_dma);
ERDMA_EXTRA_BUFFER_SIZE,
dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr); cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
dma_free_coherent(&dev->pdev->dev,
(cmdq->cq.depth << CQE_SHIFT) + dma_pool_free(dev->db_pool, cmdq->sq.dbrec, cmdq->sq.dbrec_dma);
ERDMA_EXTRA_BUFFER_SIZE,
dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT,
cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr); cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
dma_pool_free(dev->db_pool, cmdq->cq.dbrec, cmdq->cq.dbrec_dma);
} }
static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq) static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)

View File

@ -26,7 +26,7 @@ static void notify_cq(struct erdma_cq *cq, u8 solcitied)
FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) | FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) |
FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci); FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci);
*cq->kern_cq.db_record = db_data; *cq->kern_cq.dbrec = db_data;
writeq(db_data, cq->kern_cq.db); writeq(db_data, cq->kern_cq.db);
} }

View File

@ -13,7 +13,7 @@ void notify_eq(struct erdma_eq *eq)
u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) | u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1); FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
*eq->db_record = db_data; *eq->dbrec = db_data;
writeq(db_data, eq->db); writeq(db_data, eq->db);
atomic64_inc(&eq->notify_num); atomic64_inc(&eq->notify_num);
@ -83,14 +83,11 @@ void erdma_aeq_event_handler(struct erdma_dev *dev)
int erdma_aeq_init(struct erdma_dev *dev) int erdma_aeq_init(struct erdma_dev *dev)
{ {
struct erdma_eq *eq = &dev->aeq; struct erdma_eq *eq = &dev->aeq;
u32 buf_size;
eq->depth = ERDMA_DEFAULT_EQ_DEPTH; eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
buf_size = eq->depth << EQE_SHIFT;
eq->qbuf = eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size), &eq->qbuf_dma_addr, GFP_KERNEL);
&eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
if (!eq->qbuf) if (!eq->qbuf)
return -ENOMEM; return -ENOMEM;
@ -99,26 +96,34 @@ int erdma_aeq_init(struct erdma_dev *dev)
atomic64_set(&eq->notify_num, 0); atomic64_set(&eq->notify_num, 0);
eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG; eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
eq->db_record = (u64 *)(eq->qbuf + buf_size); eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
if (!eq->dbrec)
goto err_out;
erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG, erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
upper_32_bits(eq->qbuf_dma_addr)); upper_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG, erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
lower_32_bits(eq->qbuf_dma_addr)); lower_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth); erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
eq->qbuf_dma_addr + buf_size);
return 0; return 0;
err_out:
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
eq->qbuf_dma_addr);
return -ENOMEM;
} }
void erdma_aeq_destroy(struct erdma_dev *dev) void erdma_aeq_destroy(struct erdma_dev *dev)
{ {
struct erdma_eq *eq = &dev->aeq; struct erdma_eq *eq = &dev->aeq;
dma_free_coherent(&dev->pdev->dev, dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
WARPPED_BUFSIZE(eq->depth << EQE_SHIFT), eq->qbuf,
eq->qbuf_dma_addr); eq->qbuf_dma_addr);
dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
} }
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb) void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
@ -209,7 +214,6 @@ static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq) static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
{ {
struct erdma_cmdq_create_eq_req req; struct erdma_cmdq_create_eq_req req;
dma_addr_t db_info_dma_addr;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON, erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_CREATE_EQ); CMDQ_OPCODE_CREATE_EQ);
@ -219,9 +223,8 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
req.qtype = ERDMA_EQ_TYPE_CEQ; req.qtype = ERDMA_EQ_TYPE_CEQ;
/* Vector index is the same as EQN. */ /* Vector index is the same as EQN. */
req.vector_idx = eqn; req.vector_idx = eqn;
db_info_dma_addr = eq->qbuf_dma_addr + (eq->depth << EQE_SHIFT); req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
req.db_dma_addr_l = lower_32_bits(db_info_dma_addr); req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL); return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
} }
@ -229,12 +232,11 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn) static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
{ {
struct erdma_eq *eq = &dev->ceqs[ceqn].eq; struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
int ret; int ret;
eq->qbuf = eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size), eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
&eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO); &eq->qbuf_dma_addr, GFP_KERNEL);
if (!eq->qbuf) if (!eq->qbuf)
return -ENOMEM; return -ENOMEM;
@ -242,10 +244,16 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
atomic64_set(&eq->event_num, 0); atomic64_set(&eq->event_num, 0);
atomic64_set(&eq->notify_num, 0); atomic64_set(&eq->notify_num, 0);
eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG + eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
(ceqn + 1) * ERDMA_DB_SIZE; (ceqn + 1) * ERDMA_DB_SIZE;
eq->db_record = (u64 *)(eq->qbuf + buf_size);
eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
if (!eq->dbrec) {
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
eq->qbuf, eq->qbuf_dma_addr);
return -ENOMEM;
}
eq->ci = 0; eq->ci = 0;
dev->ceqs[ceqn].dev = dev; dev->ceqs[ceqn].dev = dev;
@ -259,7 +267,6 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn) static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
{ {
struct erdma_eq *eq = &dev->ceqs[ceqn].eq; struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
struct erdma_cmdq_destroy_eq_req req; struct erdma_cmdq_destroy_eq_req req;
int err; int err;
@ -276,8 +283,9 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
if (err) if (err)
return; return;
dma_free_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size), eq->qbuf, dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
eq->qbuf_dma_addr); eq->qbuf_dma_addr);
dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
} }
int erdma_ceqs_init(struct erdma_dev *dev) int erdma_ceqs_init(struct erdma_dev *dev)

View File

@ -240,7 +240,7 @@ struct erdma_cmdq_create_cq_req {
u32 qbuf_addr_l; u32 qbuf_addr_l;
u32 qbuf_addr_h; u32 qbuf_addr_h;
u32 cfg1; u32 cfg1;
u64 cq_db_info_addr; u64 cq_dbrec_dma;
u32 first_page_offset; u32 first_page_offset;
u32 cfg2; u32 cfg2;
}; };
@ -335,8 +335,8 @@ struct erdma_cmdq_create_qp_req {
u64 rq_buf_addr; u64 rq_buf_addr;
u32 sq_mtt_cfg; u32 sq_mtt_cfg;
u32 rq_mtt_cfg; u32 rq_mtt_cfg;
u64 sq_db_info_dma_addr; u64 sq_dbrec_dma;
u64 rq_db_info_dma_addr; u64 rq_dbrec_dma;
u64 sq_mtt_entry[3]; u64 sq_mtt_entry[3];
u64 rq_mtt_entry[3]; u64 rq_mtt_entry[3];

View File

@ -178,16 +178,26 @@ static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
if (!dev->resp_pool) if (!dev->resp_pool)
return -ENOMEM; return -ENOMEM;
dev->db_pool = dma_pool_create("erdma_db_pool", &pdev->dev,
ERDMA_DB_SIZE, ERDMA_DB_SIZE, 0);
if (!dev->db_pool) {
ret = -ENOMEM;
goto destroy_resp_pool;
}
ret = dma_set_mask_and_coherent(&pdev->dev, ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(ERDMA_PCI_WIDTH)); DMA_BIT_MASK(ERDMA_PCI_WIDTH));
if (ret) if (ret)
goto destroy_pool; goto destroy_db_pool;
dma_set_max_seg_size(&pdev->dev, UINT_MAX); dma_set_max_seg_size(&pdev->dev, UINT_MAX);
return 0; return 0;
destroy_pool: destroy_db_pool:
dma_pool_destroy(dev->db_pool);
destroy_resp_pool:
dma_pool_destroy(dev->resp_pool); dma_pool_destroy(dev->resp_pool);
return ret; return ret;
@ -195,6 +205,7 @@ destroy_pool:
static void erdma_device_uninit(struct erdma_dev *dev) static void erdma_device_uninit(struct erdma_dev *dev)
{ {
dma_pool_destroy(dev->db_pool);
dma_pool_destroy(dev->resp_pool); dma_pool_destroy(dev->resp_pool);
} }

View File

@ -492,7 +492,7 @@ static void kick_sq_db(struct erdma_qp *qp, u16 pi)
u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) | u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) |
FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi); FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi);
*(u64 *)qp->kern_qp.sq_db_info = db_data; *(u64 *)qp->kern_qp.sq_dbrec = db_data;
writeq(db_data, qp->kern_qp.hw_sq_db); writeq(db_data, qp->kern_qp.hw_sq_db);
} }
@ -557,7 +557,7 @@ static int erdma_post_recv_one(struct erdma_qp *qp,
return -EINVAL; return -EINVAL;
} }
*(u64 *)qp->kern_qp.rq_db_info = *(u64 *)rqe; *(u64 *)qp->kern_qp.rq_dbrec = *(u64 *)rqe;
writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db); writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db);
qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] = qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] =

View File

@ -76,10 +76,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr; req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr;
req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr; req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr;
req.sq_db_info_dma_addr = qp->kern_qp.sq_buf_dma_addr + req.sq_dbrec_dma = qp->kern_qp.sq_dbrec_dma;
(qp->attrs.sq_size << SQEBB_SHIFT); req.rq_dbrec_dma = qp->kern_qp.rq_dbrec_dma;
req.rq_db_info_dma_addr = qp->kern_qp.rq_buf_dma_addr +
(qp->attrs.rq_size << RQE_SHIFT);
} else { } else {
user_qp = &qp->user_qp; user_qp = &qp->user_qp;
req.sq_cqn_mtt_cfg = FIELD_PREP( req.sq_cqn_mtt_cfg = FIELD_PREP(
@ -107,8 +105,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
assemble_qbuf_mtt_for_cmd(&user_qp->rq_mem, &req.rq_mtt_cfg, assemble_qbuf_mtt_for_cmd(&user_qp->rq_mem, &req.rq_mtt_cfg,
&req.rq_buf_addr, req.rq_mtt_entry); &req.rq_buf_addr, req.rq_mtt_entry);
req.sq_db_info_dma_addr = user_qp->sq_db_info_dma_addr; req.sq_dbrec_dma = user_qp->sq_dbrec_dma;
req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr; req.rq_dbrec_dma = user_qp->rq_dbrec_dma;
if (uctx->ext_db.enable) { if (uctx->ext_db.enable) {
req.sq_cqn_mtt_cfg |= req.sq_cqn_mtt_cfg |=
@ -209,8 +207,7 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
ERDMA_MR_MTT_0LEVEL); ERDMA_MR_MTT_0LEVEL);
req.first_page_offset = 0; req.first_page_offset = 0;
req.cq_db_info_addr = req.cq_dbrec_dma = cq->kern_cq.dbrec_dma;
cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
} else { } else {
mem = &cq->user_cq.qbuf_mem; mem = &cq->user_cq.qbuf_mem;
req.cfg0 |= req.cfg0 |=
@ -233,7 +230,7 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
mem->mtt_nents); mem->mtt_nents);
req.first_page_offset = mem->page_offset; req.first_page_offset = mem->page_offset;
req.cq_db_info_addr = cq->user_cq.db_info_dma_addr; req.cq_dbrec_dma = cq->user_cq.dbrec_dma;
if (uctx->ext_db.enable) { if (uctx->ext_db.enable) {
req.cfg1 |= FIELD_PREP( req.cfg1 |= FIELD_PREP(
@ -482,16 +479,24 @@ static void free_kernel_qp(struct erdma_qp *qp)
vfree(qp->kern_qp.rwr_tbl); vfree(qp->kern_qp.rwr_tbl);
if (qp->kern_qp.sq_buf) if (qp->kern_qp.sq_buf)
dma_free_coherent( dma_free_coherent(&dev->pdev->dev,
&dev->pdev->dev, qp->attrs.sq_size << SQEBB_SHIFT,
WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT), qp->kern_qp.sq_buf,
qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr); qp->kern_qp.sq_buf_dma_addr);
if (qp->kern_qp.sq_dbrec)
dma_pool_free(dev->db_pool, qp->kern_qp.sq_dbrec,
qp->kern_qp.sq_dbrec_dma);
if (qp->kern_qp.rq_buf) if (qp->kern_qp.rq_buf)
dma_free_coherent( dma_free_coherent(&dev->pdev->dev,
&dev->pdev->dev, qp->attrs.rq_size << RQE_SHIFT,
WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT), qp->kern_qp.rq_buf,
qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr); qp->kern_qp.rq_buf_dma_addr);
if (qp->kern_qp.rq_dbrec)
dma_pool_free(dev->db_pool, qp->kern_qp.rq_dbrec,
qp->kern_qp.rq_dbrec_dma);
} }
static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp, static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
@ -516,20 +521,27 @@ static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
if (!kqp->swr_tbl || !kqp->rwr_tbl) if (!kqp->swr_tbl || !kqp->rwr_tbl)
goto err_out; goto err_out;
size = (qp->attrs.sq_size << SQEBB_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE; size = qp->attrs.sq_size << SQEBB_SHIFT;
kqp->sq_buf = dma_alloc_coherent(&dev->pdev->dev, size, kqp->sq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
&kqp->sq_buf_dma_addr, GFP_KERNEL); &kqp->sq_buf_dma_addr, GFP_KERNEL);
if (!kqp->sq_buf) if (!kqp->sq_buf)
goto err_out; goto err_out;
size = (qp->attrs.rq_size << RQE_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE; kqp->sq_dbrec =
dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->sq_dbrec_dma);
if (!kqp->sq_dbrec)
goto err_out;
size = qp->attrs.rq_size << RQE_SHIFT;
kqp->rq_buf = dma_alloc_coherent(&dev->pdev->dev, size, kqp->rq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
&kqp->rq_buf_dma_addr, GFP_KERNEL); &kqp->rq_buf_dma_addr, GFP_KERNEL);
if (!kqp->rq_buf) if (!kqp->rq_buf)
goto err_out; goto err_out;
kqp->sq_db_info = kqp->sq_buf + (qp->attrs.sq_size << SQEBB_SHIFT); kqp->rq_dbrec =
kqp->rq_db_info = kqp->rq_buf + (qp->attrs.rq_size << RQE_SHIFT); dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->rq_dbrec_dma);
if (!kqp->rq_dbrec)
goto err_out;
return 0; return 0;
@ -864,9 +876,9 @@ erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx,
} }
static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx, static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
u64 va, u32 len, u64 db_info_va) u64 va, u32 len, u64 dbrec_va)
{ {
dma_addr_t db_info_dma_addr; dma_addr_t dbrec_dma;
u32 rq_offset; u32 rq_offset;
int ret; int ret;
@ -889,14 +901,14 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
if (ret) if (ret)
goto put_sq_mtt; goto put_sq_mtt;
ret = erdma_map_user_dbrecords(uctx, db_info_va, ret = erdma_map_user_dbrecords(uctx, dbrec_va,
&qp->user_qp.user_dbr_page, &qp->user_qp.user_dbr_page,
&db_info_dma_addr); &dbrec_dma);
if (ret) if (ret)
goto put_rq_mtt; goto put_rq_mtt;
qp->user_qp.sq_db_info_dma_addr = db_info_dma_addr; qp->user_qp.sq_dbrec_dma = dbrec_dma;
qp->user_qp.rq_db_info_dma_addr = db_info_dma_addr + ERDMA_DB_SIZE; qp->user_qp.rq_dbrec_dma = dbrec_dma + ERDMA_DB_SIZE;
return 0; return 0;
@ -1237,9 +1249,10 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
return err; return err;
if (rdma_is_kernel_res(&cq->ibcq.res)) { if (rdma_is_kernel_res(&cq->ibcq.res)) {
dma_free_coherent(&dev->pdev->dev, dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr); cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
dma_pool_free(dev->db_pool, cq->kern_cq.dbrec,
cq->kern_cq.dbrec_dma);
} else { } else {
erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page); erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
put_mtt_entries(dev, &cq->user_cq.qbuf_mem); put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
@ -1279,16 +1292,7 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
wait_for_completion(&qp->safe_free); wait_for_completion(&qp->safe_free);
if (rdma_is_kernel_res(&qp->ibqp.res)) { if (rdma_is_kernel_res(&qp->ibqp.res)) {
vfree(qp->kern_qp.swr_tbl); free_kernel_qp(qp);
vfree(qp->kern_qp.rwr_tbl);
dma_free_coherent(
&dev->pdev->dev,
WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
dma_free_coherent(
&dev->pdev->dev,
WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
} else { } else {
put_mtt_entries(dev, &qp->user_qp.sq_mem); put_mtt_entries(dev, &qp->user_qp.sq_mem);
put_mtt_entries(dev, &qp->user_qp.rq_mem); put_mtt_entries(dev, &qp->user_qp.rq_mem);
@ -1588,7 +1592,7 @@ static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va, ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va,
&cq->user_cq.user_dbr_page, &cq->user_cq.user_dbr_page,
&cq->user_cq.db_info_dma_addr); &cq->user_cq.dbrec_dma);
if (ret) if (ret)
put_mtt_entries(dev, &cq->user_cq.qbuf_mem); put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
@ -1600,19 +1604,27 @@ static int erdma_init_kernel_cq(struct erdma_cq *cq)
struct erdma_dev *dev = to_edev(cq->ibcq.device); struct erdma_dev *dev = to_edev(cq->ibcq.device);
cq->kern_cq.qbuf = cq->kern_cq.qbuf =
dma_alloc_coherent(&dev->pdev->dev, dma_alloc_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
&cq->kern_cq.qbuf_dma_addr, GFP_KERNEL); &cq->kern_cq.qbuf_dma_addr, GFP_KERNEL);
if (!cq->kern_cq.qbuf) if (!cq->kern_cq.qbuf)
return -ENOMEM; return -ENOMEM;
cq->kern_cq.db_record = cq->kern_cq.dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL,
(u64 *)(cq->kern_cq.qbuf + (cq->depth << CQE_SHIFT)); &cq->kern_cq.dbrec_dma);
if (!cq->kern_cq.dbrec)
goto err_out;
spin_lock_init(&cq->kern_cq.lock); spin_lock_init(&cq->kern_cq.lock);
/* use default cqdb addr */ /* use default cqdb addr */
cq->kern_cq.db = dev->func_bar + ERDMA_BAR_CQDB_SPACE_OFFSET; cq->kern_cq.db = dev->func_bar + ERDMA_BAR_CQDB_SPACE_OFFSET;
return 0; return 0;
err_out:
dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
return -ENOMEM;
} }
int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
@ -1676,9 +1688,10 @@ err_free_res:
erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page); erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
put_mtt_entries(dev, &cq->user_cq.qbuf_mem); put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
} else { } else {
dma_free_coherent(&dev->pdev->dev, dma_free_coherent(&dev->pdev->dev, depth << CQE_SHIFT,
WARPPED_BUFSIZE(depth << CQE_SHIFT),
cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr); cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
dma_pool_free(dev->db_pool, cq->kern_cq.dbrec,
cq->kern_cq.dbrec_dma);
} }
err_out_xa: err_out_xa:

View File

@ -140,8 +140,8 @@ struct erdma_uqp {
struct erdma_mem sq_mem; struct erdma_mem sq_mem;
struct erdma_mem rq_mem; struct erdma_mem rq_mem;
dma_addr_t sq_db_info_dma_addr; dma_addr_t sq_dbrec_dma;
dma_addr_t rq_db_info_dma_addr; dma_addr_t rq_dbrec_dma;
struct erdma_user_dbrecords_page *user_dbr_page; struct erdma_user_dbrecords_page *user_dbr_page;
@ -167,8 +167,11 @@ struct erdma_kqp {
void *rq_buf; void *rq_buf;
dma_addr_t rq_buf_dma_addr; dma_addr_t rq_buf_dma_addr;
void *sq_db_info; void *sq_dbrec;
void *rq_db_info; void *rq_dbrec;
dma_addr_t sq_dbrec_dma;
dma_addr_t rq_dbrec_dma;
u8 sig_all; u8 sig_all;
}; };
@ -246,13 +249,14 @@ struct erdma_kcq_info {
spinlock_t lock; spinlock_t lock;
u8 __iomem *db; u8 __iomem *db;
u64 *db_record; u64 *dbrec;
dma_addr_t dbrec_dma;
}; };
struct erdma_ucq_info { struct erdma_ucq_info {
struct erdma_mem qbuf_mem; struct erdma_mem qbuf_mem;
struct erdma_user_dbrecords_page *user_dbr_page; struct erdma_user_dbrecords_page *user_dbr_page;
dma_addr_t db_info_dma_addr; dma_addr_t dbrec_dma;
}; };
struct erdma_cq { struct erdma_cq {

View File

@ -21,36 +21,25 @@ static int hfi1_ipoib_dev_init(struct net_device *dev)
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
int ret; int ret;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
ret = priv->netdev_ops->ndo_init(dev); ret = priv->netdev_ops->ndo_init(dev);
if (ret) if (ret)
goto out_ret; return ret;
ret = hfi1_netdev_add_data(priv->dd, ret = hfi1_netdev_add_data(priv->dd,
qpn_from_mac(priv->netdev->dev_addr), qpn_from_mac(priv->netdev->dev_addr),
dev); dev);
if (ret < 0) { if (ret < 0) {
priv->netdev_ops->ndo_uninit(dev); priv->netdev_ops->ndo_uninit(dev);
goto out_ret; return ret;
} }
return 0; return 0;
out_ret:
free_percpu(dev->tstats);
dev->tstats = NULL;
return ret;
} }
static void hfi1_ipoib_dev_uninit(struct net_device *dev) static void hfi1_ipoib_dev_uninit(struct net_device *dev)
{ {
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
free_percpu(dev->tstats);
dev->tstats = NULL;
hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr)); hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr));
priv->netdev_ops->ndo_uninit(dev); priv->netdev_ops->ndo_uninit(dev);
@ -107,7 +96,6 @@ static const struct net_device_ops hfi1_ipoib_netdev_ops = {
.ndo_uninit = hfi1_ipoib_dev_uninit, .ndo_uninit = hfi1_ipoib_dev_uninit,
.ndo_open = hfi1_ipoib_dev_open, .ndo_open = hfi1_ipoib_dev_open,
.ndo_stop = hfi1_ipoib_dev_stop, .ndo_stop = hfi1_ipoib_dev_stop,
.ndo_get_stats64 = dev_get_tstats64,
}; };
static int hfi1_ipoib_mcast_attach(struct net_device *dev, static int hfi1_ipoib_mcast_attach(struct net_device *dev,
@ -173,9 +161,6 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
hfi1_ipoib_txreq_deinit(priv); hfi1_ipoib_txreq_deinit(priv);
hfi1_ipoib_rxq_deinit(priv->netdev); hfi1_ipoib_rxq_deinit(priv->netdev);
free_percpu(dev->tstats);
dev->tstats = NULL;
} }
static void hfi1_ipoib_set_id(struct net_device *dev, int id) static void hfi1_ipoib_set_id(struct net_device *dev, int id)
@ -234,6 +219,7 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
netdev->priv_destructor = hfi1_ipoib_netdev_dtor; netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
netdev->needs_free_netdev = true; netdev->needs_free_netdev = true;
netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
return 0; return 0;
} }

View File

@ -1207,14 +1207,11 @@ retry:
(u32)lnkctl2); (u32)lnkctl2);
/* only write to parent if target is not as high as ours */ /* only write to parent if target is not as high as ours */
if ((lnkctl2 & PCI_EXP_LNKCTL2_TLS) < target_vector) { if ((lnkctl2 & PCI_EXP_LNKCTL2_TLS) < target_vector) {
lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; ret = pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL2,
lnkctl2 |= target_vector; PCI_EXP_LNKCTL2_TLS,
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__, target_vector);
(u32)lnkctl2);
ret = pcie_capability_write_word(parent,
PCI_EXP_LNKCTL2, lnkctl2);
if (ret) { if (ret) {
dd_dev_err(dd, "Unable to write to PCI config\n"); dd_dev_err(dd, "Unable to change parent PCI target speed\n");
return_error = 1; return_error = 1;
goto done; goto done;
} }
@ -1223,22 +1220,11 @@ retry:
} }
dd_dev_info(dd, "%s: setting target link speed\n", __func__); dd_dev_info(dd, "%s: setting target link speed\n", __func__);
ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2); ret = pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL2,
PCI_EXP_LNKCTL2_TLS,
target_vector);
if (ret) { if (ret) {
dd_dev_err(dd, "Unable to read from PCI config\n"); dd_dev_err(dd, "Unable to change device PCI target speed\n");
return_error = 1;
goto done;
}
dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
(u32)lnkctl2);
lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
lnkctl2 |= target_vector;
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
(u32)lnkctl2);
ret = pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2);
if (ret) {
dd_dev_err(dd, "Unable to write to PCI config\n");
return_error = 1; return_error = 1;
goto done; goto done;
} }

View File

@ -59,8 +59,10 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
struct hns_roce_ib_create_ah_resp resp = {}; struct hns_roce_ib_create_ah_resp resp = {};
struct hns_roce_ah *ah = to_hr_ah(ibah); struct hns_roce_ah *ah = to_hr_ah(ibah);
int ret = 0; u8 tclass = get_tclass(grh);
u32 max_sl; u8 priority = 0;
u8 tc_mode = 0;
int ret;
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -74,16 +76,23 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
ah->av.hop_limit = grh->hop_limit; ah->av.hop_limit = grh->hop_limit;
ah->av.flowlabel = grh->flow_label; ah->av.flowlabel = grh->flow_label;
ah->av.udp_sport = get_ah_udp_sport(ah_attr); ah->av.udp_sport = get_ah_udp_sport(ah_attr);
ah->av.tclass = get_tclass(grh); ah->av.tclass = tclass;
ah->av.sl = rdma_ah_get_sl(ah_attr); ret = hr_dev->hw->get_dscp(hr_dev, tclass, &tc_mode, &priority);
max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1); if (ret == -EOPNOTSUPP)
if (unlikely(ah->av.sl > max_sl)) { ret = 0;
ibdev_err_ratelimited(&hr_dev->ib_dev,
"failed to set sl, sl (%u) shouldn't be larger than %u.\n", if (ret && grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
ah->av.sl, max_sl); return ret;
if (tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
ah->av.sl = priority;
else
ah->av.sl = rdma_ah_get_sl(ah_attr);
if (!check_sl_valid(hr_dev, ah->av.sl))
return -EINVAL; return -EINVAL;
}
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE); memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
@ -99,6 +108,8 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
} }
if (udata) { if (udata) {
resp.priority = ah->av.sl;
resp.tc_mode = tc_mode;
memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN); memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
ret = ib_copy_to_udata(udata, &resp, ret = ib_copy_to_udata(udata, &resp,
min(udata->outlen, sizeof(resp))); min(udata->outlen, sizeof(resp)));

View File

@ -153,8 +153,7 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
return total; return total;
} }
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, int hns_roce_get_umem_bufs(dma_addr_t *bufs, int buf_cnt, struct ib_umem *umem,
int buf_cnt, struct ib_umem *umem,
unsigned int page_shift) unsigned int page_shift)
{ {
struct ib_block_iter biter; struct ib_block_iter biter;

View File

@ -149,7 +149,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
return ret; return ret;
} }
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret); ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
goto err_put; goto err_put;
@ -163,7 +163,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
return 0; return 0;
err_xa: err_xa:
xa_erase(&cq_table->array, hr_cq->cqn); xa_erase_irq(&cq_table->array, hr_cq->cqn);
err_put: err_put:
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
@ -182,7 +182,7 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret, dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn); hr_cq->cqn);
xa_erase(&cq_table->array, hr_cq->cqn); xa_erase_irq(&cq_table->array, hr_cq->cqn);
/* Waiting interrupt process procedure carried out */ /* Waiting interrupt process procedure carried out */
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq); synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
@ -476,13 +476,6 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
struct ib_event event; struct ib_event event;
struct ib_cq *ibcq; struct ib_cq *ibcq;
hr_cq = xa_load(&hr_dev->cq_table.array,
cqn & (hr_dev->caps.num_cqs - 1));
if (!hr_cq) {
dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
return;
}
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
@ -491,7 +484,16 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
return; return;
} }
refcount_inc(&hr_cq->refcount); xa_lock(&hr_dev->cq_table.array);
hr_cq = xa_load(&hr_dev->cq_table.array,
cqn & (hr_dev->caps.num_cqs - 1));
if (hr_cq)
refcount_inc(&hr_cq->refcount);
xa_unlock(&hr_dev->cq_table.array);
if (!hr_cq) {
dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
return;
}
ibcq = &hr_cq->ib_cq; ibcq = &hr_cq->ib_cq;
if (ibcq->event_handler) { if (ibcq->event_handler) {
@ -534,4 +536,5 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
ida_destroy(&hr_dev->cq_table.bank[i].ida); ida_destroy(&hr_dev->cq_table.bank[i].ida);
mutex_destroy(&hr_dev->cq_table.bank_mutex);
} }

View File

@ -100,6 +100,9 @@
#define CQ_BANKID_SHIFT 2 #define CQ_BANKID_SHIFT 2
#define CQ_BANKID_MASK GENMASK(1, 0) #define CQ_BANKID_MASK GENMASK(1, 0)
#define HNS_ROCE_MAX_CQ_COUNT 0xFFFF
#define HNS_ROCE_MAX_CQ_PERIOD 0xFFFF
enum { enum {
SERV_TYPE_RC, SERV_TYPE_RC,
SERV_TYPE_UC, SERV_TYPE_UC,
@ -645,6 +648,8 @@ struct hns_roce_qp {
struct hns_user_mmap_entry *dwqe_mmap_entry; struct hns_user_mmap_entry *dwqe_mmap_entry;
u32 config; u32 config;
enum hns_roce_cong_type cong_type; enum hns_roce_cong_type cong_type;
u8 tc_mode;
u8 priority;
}; };
struct hns_roce_ib_iboe { struct hns_roce_ib_iboe {
@ -923,8 +928,7 @@ struct hns_roce_hw {
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev, int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags, struct hns_roce_mr *mr, int flags,
void *mb_buf); void *mb_buf);
int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf, int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
struct hns_roce_mr *mr);
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw); int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev, void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
@ -950,6 +954,8 @@ struct hns_roce_hw {
int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer); int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
int (*query_hw_counter)(struct hns_roce_dev *hr_dev, int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
u64 *stats, u32 port, int *hw_counters); u64 *stats, u32 port, int *hw_counters);
int (*get_dscp)(struct hns_roce_dev *hr_dev, u8 dscp,
u8 *tc_mode, u8 *priority);
const struct ib_device_ops *hns_roce_dev_ops; const struct ib_device_ops *hns_roce_dev_ops;
const struct ib_device_ops *hns_roce_dev_srq_ops; const struct ib_device_ops *hns_roce_dev_srq_ops;
}; };
@ -1228,7 +1234,7 @@ struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, struct hns_roce_buf *buf, int buf_cnt, struct hns_roce_buf *buf,
unsigned int page_shift); unsigned int page_shift);
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, int hns_roce_get_umem_bufs(dma_addr_t *bufs,
int buf_cnt, struct ib_umem *umem, int buf_cnt, struct ib_umem *umem,
unsigned int page_shift); unsigned int page_shift);
@ -1292,4 +1298,6 @@ struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
size_t length, size_t length,
enum hns_roce_mmap_type mmap_type); enum hns_roce_mmap_type mmap_type);
bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);
#endif /* _HNS_ROCE_DEVICE_H */ #endif /* _HNS_ROCE_DEVICE_H */

View File

@ -281,7 +281,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
return hem; return hem;
fail: fail:
hns_roce_free_hem(hr_dev, hem); kfree(hem);
return NULL; return NULL;
} }
@ -877,6 +877,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
if (hns_roce_check_whether_mhop(hr_dev, table->type)) { if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
hns_roce_cleanup_mhop_hem_table(hr_dev, table); hns_roce_cleanup_mhop_hem_table(hr_dev, table);
mutex_destroy(&table->mutex);
return; return;
} }
@ -891,6 +892,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
hns_roce_free_hem(hr_dev, table->hem[i]); hns_roce_free_hem(hr_dev, table->hem[i]);
} }
mutex_destroy(&table->mutex);
kfree(table->hem); kfree(table->hem);
} }
@ -986,15 +988,13 @@ static void hem_list_free_all(struct hns_roce_dev *hr_dev,
} }
} }
static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr, static void hem_list_link_bt(void *base_addr, u64 table_addr)
u64 table_addr)
{ {
*(u64 *)(base_addr) = table_addr; *(u64 *)(base_addr) = table_addr;
} }
/* assign L0 table address to hem from root bt */ /* assign L0 table address to hem from root bt */
static void hem_list_assign_bt(struct hns_roce_dev *hr_dev, static void hem_list_assign_bt(struct hns_roce_hem_item *hem, void *cpu_addr,
struct hns_roce_hem_item *hem, void *cpu_addr,
u64 phy_addr) u64 phy_addr)
{ {
hem->addr = cpu_addr; hem->addr = cpu_addr;
@ -1163,8 +1163,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
if (level > 1) { if (level > 1) {
pre = hem_ptrs[level - 1]; pre = hem_ptrs[level - 1];
step = (cur->start - pre->start) / step * BA_BYTE_LEN; step = (cur->start - pre->start) / step * BA_BYTE_LEN;
hem_list_link_bt(hr_dev, pre->addr + step, hem_list_link_bt(pre->addr + step, cur->dma_addr);
cur->dma_addr);
} }
} }
@ -1222,7 +1221,7 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
if (!hem) if (!hem)
return -ENOMEM; return -ENOMEM;
hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base); hem_list_assign_bt(hem, cpu_base, phy_base);
list_add(&hem->list, branch_head); list_add(&hem->list, branch_head);
list_add(&hem->sibling, leaf_head); list_add(&hem->sibling, leaf_head);
@ -1245,7 +1244,7 @@ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
/* if exist mid bt, link L1 to L0 */ /* if exist mid bt, link L1 to L0 */
list_for_each_entry_safe(hem, temp_hem, branch_head, list) { list_for_each_entry_safe(hem, temp_hem, branch_head, list) {
offset = (hem->start - r->offset) / step * BA_BYTE_LEN; offset = (hem->start - r->offset) / step * BA_BYTE_LEN;
hem_list_link_bt(hr_dev, cpu_base + offset, hem->dma_addr); hem_list_link_bt(cpu_base + offset, hem->dma_addr);
total++; total++;
} }

View File

@ -57,16 +57,16 @@ enum {
}; };
#define check_whether_bt_num_3(type, hop_num) \ #define check_whether_bt_num_3(type, hop_num) \
(type < HEM_TYPE_MTT && hop_num == 2) ((type) < HEM_TYPE_MTT && (hop_num) == 2)
#define check_whether_bt_num_2(type, hop_num) \ #define check_whether_bt_num_2(type, hop_num) \
((type < HEM_TYPE_MTT && hop_num == 1) || \ (((type) < HEM_TYPE_MTT && (hop_num) == 1) || \
(type >= HEM_TYPE_MTT && hop_num == 2)) ((type) >= HEM_TYPE_MTT && (hop_num) == 2))
#define check_whether_bt_num_1(type, hop_num) \ #define check_whether_bt_num_1(type, hop_num) \
((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \ (((type) < HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0) || \
(type >= HEM_TYPE_MTT && hop_num == 1) || \ ((type) >= HEM_TYPE_MTT && (hop_num) == 1) || \
(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0)) ((type) >= HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0))
struct hns_roce_hem { struct hns_roce_hem {
void *buf; void *buf;

View File

@ -443,10 +443,6 @@ static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit); hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass); hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel); hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel);
if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
return -EINVAL;
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl); hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl);
ud_sq_wqe->sgid_index = ah->av.gid_index; ud_sq_wqe->sgid_index = ah->av.gid_index;
@ -2105,7 +2101,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
caps->gmv_bt_num * caps->gmv_bt_num *
(HNS_HW_PAGE_SIZE / caps->gmv_entry_sz)); (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz));
caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE / caps->gmv_entry_num = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
caps->gmv_entry_sz); caps->gmv_entry_sz);
} else { } else {
u32 func_num = max_t(u32, 1, hr_dev->func_num); u32 func_num = max_t(u32, 1, hr_dev->func_num);
@ -2671,6 +2667,8 @@ static void free_mr_exit(struct hns_roce_dev *hr_dev)
kfree(free_mr->rsv_pd); kfree(free_mr->rsv_pd);
free_mr->rsv_pd = NULL; free_mr->rsv_pd = NULL;
} }
mutex_destroy(&free_mr->mutex);
} }
static int free_mr_alloc_res(struct hns_roce_dev *hr_dev) static int free_mr_alloc_res(struct hns_roce_dev *hr_dev)
@ -2821,8 +2819,10 @@ static int free_mr_init(struct hns_roce_dev *hr_dev)
mutex_init(&free_mr->mutex); mutex_init(&free_mr->mutex);
ret = free_mr_alloc_res(hr_dev); ret = free_mr_alloc_res(hr_dev);
if (ret) if (ret) {
mutex_destroy(&free_mr->mutex);
return ret; return ret;
}
ret = free_mr_modify_qp(hr_dev); ret = free_mr_modify_qp(hr_dev);
if (ret) if (ret)
@ -3208,13 +3208,14 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
/* Aligned to the hardware address access unit */ /* Aligned to the hardware address access unit */
for (i = 0; i < ARRAY_SIZE(pages); i++) for (i = 0; i < ARRAY_SIZE(pages); i++)
pages[i] >>= 6; pages[i] >>= MPT_PBL_BUF_ADDR_S;
pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr); pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_size = cpu_to_le32(mr->npages);
mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3); mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> MPT_PBL_BA_ADDR_S);
hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); hr_reg_write(mpt_entry, MPT_PBL_BA_H,
upper_32_bits(pbl_ba >> MPT_PBL_BA_ADDR_S));
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0])); hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0]));
@ -3307,8 +3308,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
return ret; return ret;
} }
static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev, static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
void *mb_buf, struct hns_roce_mr *mr)
{ {
dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr); dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
struct hns_roce_v2_mpt_entry *mpt_entry; struct hns_roce_v2_mpt_entry *mpt_entry;
@ -3335,8 +3335,10 @@ static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_size = cpu_to_le32(mr->npages);
mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3)); mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >>
hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); MPT_PBL_BA_ADDR_S));
hr_reg_write(mpt_entry, MPT_PBL_BA_H,
upper_32_bits(pbl_ba >> MPT_PBL_BA_ADDR_S));
return 0; return 0;
} }
@ -3582,14 +3584,14 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift)); to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ, hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ,
to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift)); to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> 3); hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> CQC_CQE_BA_L_S);
hr_reg_write(cq_context, CQC_CQE_BA_H, (dma_handle >> (32 + 3))); hr_reg_write(cq_context, CQC_CQE_BA_H, dma_handle >> CQC_CQE_BA_H_S);
hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN, hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN,
hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB); hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB);
hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_L, hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_L,
((u32)hr_cq->db.dma) >> 1); ((u32)hr_cq->db.dma) >> 1);
hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_H, hr_reg_write(cq_context, CQC_CQE_DB_RECORD_ADDR_H,
hr_cq->db.dma >> 32); hr_cq->db.dma >> CQC_CQE_DB_RECORD_ADDR_H_S);
hr_reg_write(cq_context, CQC_CQ_MAX_CNT, hr_reg_write(cq_context, CQC_CQ_MAX_CNT,
HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM); HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
hr_reg_write(cq_context, CQC_CQ_PERIOD, hr_reg_write(cq_context, CQC_CQ_PERIOD,
@ -3711,8 +3713,9 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
wc->status == IB_WC_WR_FLUSH_ERR)) wc->status == IB_WC_WR_FLUSH_ERR))
return; return;
ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status); ibdev_err_ratelimited(&hr_dev->ib_dev, "error cqe status 0x%x:\n",
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe, cqe_status);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 4, cqe,
cq->cqe_size, false); cq->cqe_size, false);
wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS); wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
@ -4217,8 +4220,7 @@ static void set_access_flags(struct hns_roce_qp *hr_qp,
} }
static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp, static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *context)
struct hns_roce_v2_qp_context *qpc_mask)
{ {
hr_reg_write(context, QPC_SGE_SHIFT, hr_reg_write(context, QPC_SGE_SHIFT,
to_hr_hem_entries_shift(hr_qp->sge.sge_cnt, to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
@ -4240,7 +4242,6 @@ static inline int get_pdn(struct ib_pd *ib_pd)
} }
static void modify_qp_reset_to_init(struct ib_qp *ibqp, static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask) struct hns_roce_v2_qp_context *qpc_mask)
{ {
@ -4259,7 +4260,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs)); hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs));
set_qpc_wqe_cnt(hr_qp, context, qpc_mask); set_qpc_wqe_cnt(hr_qp, context);
/* No VLAN need to set 0xFFF */ /* No VLAN need to set 0xFFF */
hr_reg_write(context, QPC_VLAN_ID, 0xfff); hr_reg_write(context, QPC_VLAN_ID, 0xfff);
@ -4300,7 +4301,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
} }
static void modify_qp_init_to_init(struct ib_qp *ibqp, static void modify_qp_init_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask) struct hns_roce_v2_qp_context *qpc_mask)
{ {
@ -4521,16 +4521,16 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
return -EINVAL; return -EINVAL;
} }
hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> 4); hr_reg_write(context, QPC_TRRL_BA_L, trrl_ba >> QPC_TRRL_BA_L_S);
hr_reg_clear(qpc_mask, QPC_TRRL_BA_L); hr_reg_clear(qpc_mask, QPC_TRRL_BA_L);
context->trrl_ba = cpu_to_le32(trrl_ba >> (16 + 4)); context->trrl_ba = cpu_to_le32(trrl_ba >> QPC_TRRL_BA_M_S);
qpc_mask->trrl_ba = 0; qpc_mask->trrl_ba = 0;
hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> (32 + 16 + 4)); hr_reg_write(context, QPC_TRRL_BA_H, trrl_ba >> QPC_TRRL_BA_H_S);
hr_reg_clear(qpc_mask, QPC_TRRL_BA_H); hr_reg_clear(qpc_mask, QPC_TRRL_BA_H);
context->irrl_ba = cpu_to_le32(irrl_ba >> 6); context->irrl_ba = cpu_to_le32(irrl_ba >> QPC_IRRL_BA_L_S);
qpc_mask->irrl_ba = 0; qpc_mask->irrl_ba = 0;
hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> (32 + 6)); hr_reg_write(context, QPC_IRRL_BA_H, irrl_ba >> QPC_IRRL_BA_H_S);
hr_reg_clear(qpc_mask, QPC_IRRL_BA_H); hr_reg_clear(qpc_mask, QPC_IRRL_BA_H);
hr_reg_enable(context, QPC_RMT_E2E); hr_reg_enable(context, QPC_RMT_E2E);
@ -4592,8 +4592,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_clear(qpc_mask, QPC_TRRL_HEAD_MAX); hr_reg_clear(qpc_mask, QPC_TRRL_HEAD_MAX);
hr_reg_clear(qpc_mask, QPC_TRRL_TAIL_MAX); hr_reg_clear(qpc_mask, QPC_TRRL_TAIL_MAX);
#define MAX_LP_SGEN 3
/* rocee send 2^lp_sgen_ini segs every time */ /* rocee send 2^lp_sgen_ini segs every time */
hr_reg_write(context, QPC_LP_SGEN_INI, 3); hr_reg_write(context, QPC_LP_SGEN_INI, MAX_LP_SGEN);
hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI); hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI);
if (udata && ibqp->qp_type == IB_QPT_RC && if (udata && ibqp->qp_type == IB_QPT_RC &&
@ -4619,8 +4620,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
return 0; return 0;
} }
static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, int attr_mask,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask) struct hns_roce_v2_qp_context *qpc_mask)
{ {
@ -4685,7 +4685,7 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
*tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1); *tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1);
list_for_each_entry(hr_dip, &hr_dev->dip_list, node) { list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16)) { if (!memcmp(grh->dgid.raw, hr_dip->dgid, GID_LEN_V2)) {
*dip_idx = hr_dip->dip_idx; *dip_idx = hr_dip->dip_idx;
goto out; goto out;
} }
@ -4828,6 +4828,69 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
return 0; return 0;
} }
static int hns_roce_hw_v2_get_dscp(struct hns_roce_dev *hr_dev, u8 dscp,
u8 *tc_mode, u8 *priority)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
if (!ops->get_dscp_prio)
return -EOPNOTSUPP;
return ops->get_dscp_prio(handle, dscp, tc_mode, priority);
}
bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl)
{
u32 max_sl;
max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
if (unlikely(sl > max_sl)) {
ibdev_err_ratelimited(&hr_dev->ib_dev,
"failed to set SL(%u). Shouldn't be larger than %u.\n",
sl, max_sl);
return false;
}
return true;
}
static int hns_roce_set_sl(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
ret = hns_roce_hw_v2_get_dscp(hr_dev, get_tclass(&attr->ah_attr.grh),
&hr_qp->tc_mode, &hr_qp->priority);
if (ret && ret != -EOPNOTSUPP &&
grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
ibdev_err_ratelimited(ibdev,
"failed to get dscp, ret = %d.\n", ret);
return ret;
}
if (hr_qp->tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
hr_qp->sl = hr_qp->priority;
else
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
if (!check_sl_valid(hr_dev, hr_qp->sl))
return -EINVAL;
hr_reg_write(context, QPC_SL, hr_qp->sl);
hr_reg_clear(qpc_mask, QPC_SL);
return 0;
}
static int hns_roce_v2_set_path(struct ib_qp *ibqp, static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, const struct ib_qp_attr *attr,
int attr_mask, int attr_mask,
@ -4843,25 +4906,18 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
int is_roce_protocol; int is_roce_protocol;
u16 vlan_id = 0xffff; u16 vlan_id = 0xffff;
bool is_udp = false; bool is_udp = false;
u32 max_sl;
u8 ib_port; u8 ib_port;
u8 hr_port; u8 hr_port;
int ret; int ret;
max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
if (unlikely(sl > max_sl)) {
ibdev_err_ratelimited(ibdev,
"failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
sl, max_sl);
return -EINVAL;
}
/* /*
* If free_mr_en of qp is set, it means that this qp comes from * If free_mr_en of qp is set, it means that this qp comes from
* free mr. This qp will perform the loopback operation. * free mr. This qp will perform the loopback operation.
* In the loopback scenario, only sl needs to be set. * In the loopback scenario, only sl needs to be set.
*/ */
if (hr_qp->free_mr_en) { if (hr_qp->free_mr_en) {
if (!check_sl_valid(hr_dev, sl))
return -EINVAL;
hr_reg_write(context, QPC_SL, sl); hr_reg_write(context, QPC_SL, sl);
hr_reg_clear(qpc_mask, QPC_SL); hr_reg_clear(qpc_mask, QPC_SL);
hr_qp->sl = sl; hr_qp->sl = sl;
@ -4931,11 +4987,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw)); memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
hr_qp->sl = sl; return hns_roce_set_sl(ibqp, attr, context, qpc_mask);
hr_reg_write(context, QPC_SL, hr_qp->sl);
hr_reg_clear(qpc_mask, QPC_SL);
return 0;
} }
static bool check_qp_state(enum ib_qp_state cur_state, static bool check_qp_state(enum ib_qp_state cur_state,
@ -4982,15 +5034,14 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, hr_dev->caps.qpc_sz); memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
modify_qp_reset_to_init(ibqp, attr, context, qpc_mask); modify_qp_reset_to_init(ibqp, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, attr, context, qpc_mask); modify_qp_init_to_init(ibqp, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context, ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask, udata); qpc_mask, udata);
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context, ret = modify_qp_rtr_to_rts(ibqp, attr_mask, context, qpc_mask);
qpc_mask);
} }
return ret; return ret;
@ -5802,7 +5853,7 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
dev_info(hr_dev->dev, dev_info(hr_dev->dev,
"cq_period(%u) reached the upper limit, adjusted to 65.\n", "cq_period(%u) reached the upper limit, adjusted to 65.\n",
cq_period); cq_period);
cq_period = HNS_ROCE_MAX_CQ_PERIOD; cq_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08;
} }
cq_period *= HNS_ROCE_CLOCK_ADJUST; cq_period *= HNS_ROCE_CLOCK_ADJUST;
} }
@ -6735,6 +6786,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.query_srqc = hns_roce_v2_query_srqc, .query_srqc = hns_roce_v2_query_srqc,
.query_sccc = hns_roce_v2_query_sccc, .query_sccc = hns_roce_v2_query_sccc,
.query_hw_counter = hns_roce_hw_v2_query_counter, .query_hw_counter = hns_roce_hw_v2_query_counter,
.get_dscp = hns_roce_hw_v2_get_dscp,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops, .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops, .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
}; };

View File

@ -276,6 +276,10 @@ struct hns_roce_v2_cq_context {
__le32 byte_64_se_cqe_idx; __le32 byte_64_se_cqe_idx;
}; };
#define CQC_CQE_BA_L_S 3
#define CQC_CQE_BA_H_S (32 + CQC_CQE_BA_L_S)
#define CQC_CQE_DB_RECORD_ADDR_H_S 32
#define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0 #define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0
#define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0 #define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0
@ -447,6 +451,12 @@ struct hns_roce_v2_qp_context {
struct hns_roce_v2_qp_context_ex ext; struct hns_roce_v2_qp_context_ex ext;
}; };
#define QPC_TRRL_BA_L_S 4
#define QPC_TRRL_BA_M_S (16 + QPC_TRRL_BA_L_S)
#define QPC_TRRL_BA_H_S (32 + QPC_TRRL_BA_M_S)
#define QPC_IRRL_BA_L_S 6
#define QPC_IRRL_BA_H_S (32 + QPC_IRRL_BA_L_S)
#define QPC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context, h, l) #define QPC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_qp_context, h, l)
#define QPC_TST QPC_FIELD_LOC(2, 0) #define QPC_TST QPC_FIELD_LOC(2, 0)
@ -716,6 +726,9 @@ struct hns_roce_v2_mpt_entry {
__le32 byte_64_buf_pa1; __le32 byte_64_buf_pa1;
}; };
#define MPT_PBL_BUF_ADDR_S 6
#define MPT_PBL_BA_ADDR_S 3
#define MPT_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_mpt_entry, h, l) #define MPT_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_mpt_entry, h, l)
#define MPT_ST MPT_FIELD_LOC(1, 0) #define MPT_ST MPT_FIELD_LOC(1, 0)
@ -1334,7 +1347,7 @@ struct fmea_ram_ecc {
/* only for RNR timeout issue of HIP08 */ /* only for RNR timeout issue of HIP08 */
#define HNS_ROCE_CLOCK_ADJUST 1000 #define HNS_ROCE_CLOCK_ADJUST 1000
#define HNS_ROCE_MAX_CQ_PERIOD 65 #define HNS_ROCE_MAX_CQ_PERIOD_HIP08 65
#define HNS_ROCE_MAX_EQ_PERIOD 65 #define HNS_ROCE_MAX_EQ_PERIOD 65
#define HNS_ROCE_RNR_TIMER_10NS 1 #define HNS_ROCE_RNR_TIMER_10NS 1
#define HNS_ROCE_1US_CFG 999 #define HNS_ROCE_1US_CFG 999

View File

@ -37,9 +37,11 @@
#include <rdma/ib_smi.h> #include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include "hnae3.h"
#include "hns_roce_common.h" #include "hns_roce_common.h"
#include "hns_roce_device.h" #include "hns_roce_device.h"
#include "hns_roce_hem.h" #include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port, static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
const u8 *addr) const u8 *addr)
@ -192,6 +194,12 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
IB_ATOMIC_HCA : IB_ATOMIC_NONE; IB_ATOMIC_HCA : IB_ATOMIC_NONE;
props->max_pkeys = 1; props->max_pkeys = 1;
props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay; props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
props->max_ah = INT_MAX;
props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD;
props->cq_caps.max_cq_moderation_count = HNS_ROCE_MAX_CQ_COUNT;
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
props->max_srq = hr_dev->caps.num_srqs; props->max_srq = hr_dev->caps.num_srqs;
props->max_srq_wr = hr_dev->caps.max_srq_wrs; props->max_srq_wr = hr_dev->caps.max_srq_wrs;
@ -421,6 +429,9 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
return 0; return 0;
error_fail_copy_to_udata: error_fail_copy_to_udata:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
mutex_destroy(&context->page_mutex);
hns_roce_dealloc_uar_entry(context); hns_roce_dealloc_uar_entry(context);
error_fail_uar_entry: error_fail_uar_entry:
@ -437,6 +448,10 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
mutex_destroy(&context->page_mutex);
hns_roce_dealloc_uar_entry(context); hns_roce_dealloc_uar_entry(context);
ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx); ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
@ -925,6 +940,15 @@ err_unmap_dmpt:
return ret; return ret;
} }
static void hns_roce_teardown_hca(struct hns_roce_dev *hr_dev)
{
hns_roce_cleanup_bitmap(hr_dev);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
mutex_destroy(&hr_dev->pgdir_mutex);
}
/** /**
* hns_roce_setup_hca - setup host channel adapter * hns_roce_setup_hca - setup host channel adapter
* @hr_dev: pointer to hns roce device * @hr_dev: pointer to hns roce device
@ -973,6 +997,10 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
err_uar_table_free: err_uar_table_free:
ida_destroy(&hr_dev->uar_ida.ida); ida_destroy(&hr_dev->uar_ida.ida);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
mutex_destroy(&hr_dev->pgdir_mutex);
return ret; return ret;
} }
@ -1118,7 +1146,7 @@ error_failed_register_device:
hr_dev->hw->hw_exit(hr_dev); hr_dev->hw->hw_exit(hr_dev);
error_failed_engine_init: error_failed_engine_init:
hns_roce_cleanup_bitmap(hr_dev); hns_roce_teardown_hca(hr_dev);
error_failed_setup_hca: error_failed_setup_hca:
hns_roce_cleanup_hem(hr_dev); hns_roce_cleanup_hem(hr_dev);
@ -1148,7 +1176,7 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
if (hr_dev->hw->hw_exit) if (hr_dev->hw->hw_exit)
hr_dev->hw->hw_exit(hr_dev); hr_dev->hw->hw_exit(hr_dev);
hns_roce_cleanup_bitmap(hr_dev); hns_roce_teardown_hca(hr_dev);
hns_roce_cleanup_hem(hr_dev); hns_roce_cleanup_hem(hr_dev);
if (hr_dev->cmd_mod) if (hr_dev->cmd_mod)

View File

@ -162,7 +162,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
if (mr->type != MR_TYPE_FRMR) if (mr->type != MR_TYPE_FRMR)
ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr); ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
else else
ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr); ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
if (ret) { if (ret) {
dev_err(dev, "failed to write mtpt, ret = %d.\n", ret); dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
goto err_page; goto err_page;
@ -441,18 +441,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr); struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct hns_roce_mtr *mtr = &mr->pbl_mtr; struct hns_roce_mtr *mtr = &mr->pbl_mtr;
int ret = 0; int ret, sg_num = 0;
mr->npages = 0; mr->npages = 0;
mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count, mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
sizeof(dma_addr_t), GFP_KERNEL); sizeof(dma_addr_t), GFP_KERNEL);
if (!mr->page_list) if (!mr->page_list)
return ret; return sg_num;
ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
if (ret < 1) { if (sg_num < 1) {
ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n", ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret); mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
goto err_page_list; goto err_page_list;
} }
@ -463,17 +463,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages); ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret); ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
ret = 0; sg_num = 0;
} else { } else {
mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size); mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
ret = mr->npages;
} }
err_page_list: err_page_list:
kvfree(mr->page_list); kvfree(mr->page_list);
mr->page_list = NULL; mr->page_list = NULL;
return ret; return sg_num;
} }
static void hns_roce_mw_free(struct hns_roce_dev *hr_dev, static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
@ -756,7 +755,7 @@ static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
return -ENOMEM; return -ENOMEM;
if (mtr->umem) if (mtr->umem)
npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count, npage = hns_roce_get_umem_bufs(pages, page_count,
mtr->umem, page_shift); mtr->umem, page_shift);
else else
npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count, npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count,

View File

@ -410,7 +410,8 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
bankid = get_qp_bankid(hr_qp->qpn); bankid = get_qp_bankid(hr_qp->qpn);
ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3); ida_free(&hr_dev->qp_table.bank[bankid].ida,
hr_qp->qpn / HNS_ROCE_QP_BANK_NUM);
mutex_lock(&hr_dev->qp_table.bank_mutex); mutex_lock(&hr_dev->qp_table.bank_mutex);
hr_dev->qp_table.bank[bankid].inuse--; hr_dev->qp_table.bank[bankid].inuse--;
@ -1117,7 +1118,6 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
} }
static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, struct ib_udata *udata,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
@ -1140,7 +1140,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret); ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret);
return ret; goto err_out;
} }
if (!udata) { if (!udata) {
@ -1148,7 +1148,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n", ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n",
ret); ret);
return ret; goto err_out;
} }
} }
@ -1219,6 +1219,8 @@ err_qpn:
free_qp_buf(hr_dev, hr_qp); free_qp_buf(hr_dev, hr_qp);
err_buf: err_buf:
free_kernel_wrid(hr_qp); free_kernel_wrid(hr_qp);
err_out:
mutex_destroy(&hr_qp->mutex);
return ret; return ret;
} }
@ -1234,6 +1236,7 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
free_qp_buf(hr_dev, hr_qp); free_qp_buf(hr_dev, hr_qp);
free_kernel_wrid(hr_qp); free_kernel_wrid(hr_qp);
free_qp_db(hr_dev, hr_qp, udata); free_qp_db(hr_dev, hr_qp, udata);
mutex_destroy(&hr_qp->mutex);
} }
static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type, static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
@ -1271,7 +1274,6 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
struct ib_device *ibdev = qp->device; struct ib_device *ibdev = qp->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
struct hns_roce_qp *hr_qp = to_hr_qp(qp); struct hns_roce_qp *hr_qp = to_hr_qp(qp);
struct ib_pd *pd = qp->pd;
int ret; int ret;
ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata); ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
@ -1286,7 +1288,7 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
} }
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); ret = hns_roce_create_qp_common(hr_dev, init_attr, udata, hr_qp);
if (ret) if (ret)
ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n", ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n",
init_attr->qp_type, ret); init_attr->qp_type, ret);
@ -1386,6 +1388,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata) int attr_mask, struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_ib_modify_qp_resp resp = {};
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state; enum ib_qp_state cur_state, new_state;
int ret = -EINVAL; int ret = -EINVAL;
@ -1427,6 +1430,18 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state, ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
new_state, udata); new_state, udata);
if (ret)
goto out;
if (udata && udata->outlen) {
resp.tc_mode = hr_qp->tc_mode;
resp.priority = hr_qp->sl;
ret = ib_copy_to_udata(udata, &resp,
min(udata->outlen, sizeof(resp)));
if (ret)
ibdev_err_ratelimited(&hr_dev->ib_dev,
"failed to copy modify qp resp.\n");
}
out: out:
mutex_unlock(&hr_qp->mutex); mutex_unlock(&hr_qp->mutex);
@ -1561,5 +1576,7 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
ida_destroy(&hr_dev->qp_table.bank[i].ida); ida_destroy(&hr_dev->qp_table.bank[i].ida);
mutex_destroy(&hr_dev->qp_table.bank_mutex);
mutex_destroy(&hr_dev->qp_table.scc_mutex);
kfree(hr_dev->qp_table.idx_table.spare_idx); kfree(hr_dev->qp_table.idx_table.spare_idx);
} }

View File

@ -123,7 +123,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
return ret; return ret;
} }
ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); ret = xa_err(xa_store_irq(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
if (ret) { if (ret) {
ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret); ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
goto err_put; goto err_put;
@ -136,7 +136,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
return 0; return 0;
err_xa: err_xa:
xa_erase(&srq_table->xa, srq->srqn); xa_erase_irq(&srq_table->xa, srq->srqn);
err_put: err_put:
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
@ -154,7 +154,7 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n", dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
ret, srq->srqn); ret, srq->srqn);
xa_erase(&srq_table->xa, srq->srqn); xa_erase_irq(&srq_table->xa, srq->srqn);
if (refcount_dec_and_test(&srq->refcount)) if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free); complete(&srq->free);
@ -250,7 +250,7 @@ static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev,
hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr); hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
} }
static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) static int alloc_srq_wrid(struct hns_roce_srq *srq)
{ {
srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL); srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) if (!srq->wrid)
@ -366,7 +366,7 @@ static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
goto err_idx; goto err_idx;
if (!udata) { if (!udata) {
ret = alloc_srq_wrid(hr_dev, srq); ret = alloc_srq_wrid(srq);
if (ret) if (ret)
goto err_wqe_buf; goto err_wqe_buf;
} }
@ -518,6 +518,7 @@ err_srq_db:
err_srq_buf: err_srq_buf:
free_srq_buf(hr_dev, srq); free_srq_buf(hr_dev, srq);
err_out: err_out:
mutex_destroy(&srq->mutex);
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_SRQ_CREATE_ERR_CNT]); atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_SRQ_CREATE_ERR_CNT]);
return ret; return ret;
@ -532,6 +533,7 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
free_srqn(hr_dev, srq); free_srqn(hr_dev, srq);
free_srq_db(hr_dev, srq, udata); free_srq_db(hr_dev, srq, udata);
free_srq_buf(hr_dev, srq); free_srq_buf(hr_dev, srq);
mutex_destroy(&srq->mutex);
return 0; return 0;
} }

View File

@ -9,23 +9,23 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
struct mana_ib_create_cq_resp resp = {};
struct mana_ib_ucontext *mana_ucontext;
struct ib_device *ibdev = ibcq->device; struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {}; struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev; struct mana_ib_dev *mdev;
struct gdma_context *gc; bool is_rnic_cq;
u32 doorbell;
int err; int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
gc = mdev_to_gc(mdev);
if (udata->inlen < sizeof(ucmd)) cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
cq->cq_handle = INVALID_MANA_HANDLE;
if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
return -EINVAL; return -EINVAL;
if (attr->comp_vector > gc->max_num_queues)
return -EINVAL;
cq->comp_vector = attr->comp_vector;
err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
if (err) { if (err) {
ibdev_dbg(ibdev, ibdev_dbg(ibdev,
@ -33,42 +33,54 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return err; return err;
} }
if (attr->cqe > mdev->adapter_caps.max_qp_wr) { is_rnic_cq = !!(ucmd.flags & MANA_IB_CREATE_RNIC_CQ);
if (!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) {
ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe); ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
return -EINVAL; return -EINVAL;
} }
cq->cqe = attr->cqe; cq->cqe = attr->cqe;
cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
IB_ACCESS_LOCAL_WRITE); if (err) {
if (IS_ERR(cq->umem)) { ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
err = PTR_ERR(cq->umem);
ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
err);
return err; return err;
} }
err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region); mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
if (err) { ibucontext);
ibdev_dbg(ibdev, doorbell = mana_ucontext->doorbell;
"Failed to create dma region for create cq, %d\n",
err); if (is_rnic_cq) {
goto err_release_umem; err = mana_ib_gd_create_cq(mdev, cq, doorbell);
if (err) {
ibdev_dbg(ibdev, "Failed to create RNIC cq, %d\n", err);
goto err_destroy_queue;
}
err = mana_ib_install_cq_cb(mdev, cq);
if (err) {
ibdev_dbg(ibdev, "Failed to install cq callback, %d\n", err);
goto err_destroy_rnic_cq;
}
} }
ibdev_dbg(ibdev, resp.cqid = cq->queue.id;
"create_dma_region ret %d gdma_region 0x%llx\n", err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
err, cq->gdma_region); if (err) {
ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
/* goto err_remove_cq_cb;
* The CQ ID is not known at this time. The ID is generated at create_qp }
*/
cq->id = INVALID_QUEUE_ID;
return 0; return 0;
err_release_umem: err_remove_cq_cb:
ib_umem_release(cq->umem); mana_ib_remove_cq_cb(mdev, cq);
err_destroy_rnic_cq:
mana_ib_gd_destroy_cq(mdev, cq);
err_destroy_queue:
mana_ib_destroy_queue(mdev, &cq->queue);
return err; return err;
} }
@ -77,25 +89,17 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
struct ib_device *ibdev = ibcq->device; struct ib_device *ibdev = ibcq->device;
struct mana_ib_dev *mdev; struct mana_ib_dev *mdev;
struct gdma_context *gc;
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
gc = mdev_to_gc(mdev);
err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region); mana_ib_remove_cq_cb(mdev, cq);
if (err) {
ibdev_dbg(ibdev,
"Failed to destroy dma region, %d\n", err);
return err;
}
if (cq->id != INVALID_QUEUE_ID) { /* Ignore return code as there is not much we can do about it.
kfree(gc->cq_table[cq->id]); * The error message is printed inside.
gc->cq_table[cq->id] = NULL; */
} mana_ib_gd_destroy_cq(mdev, cq);
ib_umem_release(cq->umem); mana_ib_destroy_queue(mdev, &cq->queue);
return 0; return 0;
} }
@ -113,8 +117,10 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
struct gdma_context *gc = mdev_to_gc(mdev); struct gdma_context *gc = mdev_to_gc(mdev);
struct gdma_queue *gdma_cq; struct gdma_queue *gdma_cq;
if (cq->queue.id >= gc->max_num_cqs)
return -EINVAL;
/* Create CQ table entry */ /* Create CQ table entry */
WARN_ON(gc->cq_table[cq->id]); WARN_ON(gc->cq_table[cq->queue.id]);
gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL); gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
if (!gdma_cq) if (!gdma_cq)
return -ENOMEM; return -ENOMEM;
@ -122,7 +128,18 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
gdma_cq->cq.context = cq; gdma_cq->cq.context = cq;
gdma_cq->type = GDMA_CQ; gdma_cq->type = GDMA_CQ;
gdma_cq->cq.callback = mana_ib_cq_handler; gdma_cq->cq.callback = mana_ib_cq_handler;
gdma_cq->id = cq->id; gdma_cq->id = cq->queue.id;
gc->cq_table[cq->id] = gdma_cq; gc->cq_table[cq->queue.id] = gdma_cq;
return 0; return 0;
} }
void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
{
struct gdma_context *gc = mdev_to_gc(mdev);
if (cq->queue.id >= gc->max_num_cqs || cq->queue.id == INVALID_QUEUE_ID)
return;
kfree(gc->cq_table[cq->queue.id]);
gc->cq_table[cq->queue.id] = NULL;
}

View File

@ -15,6 +15,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
.driver_id = RDMA_DRIVER_MANA, .driver_id = RDMA_DRIVER_MANA,
.uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION, .uverbs_abi_ver = MANA_IB_UVERBS_ABI_VERSION,
.add_gid = mana_ib_gd_add_gid,
.alloc_pd = mana_ib_alloc_pd, .alloc_pd = mana_ib_alloc_pd,
.alloc_ucontext = mana_ib_alloc_ucontext, .alloc_ucontext = mana_ib_alloc_ucontext,
.create_cq = mana_ib_create_cq, .create_cq = mana_ib_create_cq,
@ -23,18 +24,21 @@ static const struct ib_device_ops mana_ib_dev_ops = {
.create_wq = mana_ib_create_wq, .create_wq = mana_ib_create_wq,
.dealloc_pd = mana_ib_dealloc_pd, .dealloc_pd = mana_ib_dealloc_pd,
.dealloc_ucontext = mana_ib_dealloc_ucontext, .dealloc_ucontext = mana_ib_dealloc_ucontext,
.del_gid = mana_ib_gd_del_gid,
.dereg_mr = mana_ib_dereg_mr, .dereg_mr = mana_ib_dereg_mr,
.destroy_cq = mana_ib_destroy_cq, .destroy_cq = mana_ib_destroy_cq,
.destroy_qp = mana_ib_destroy_qp, .destroy_qp = mana_ib_destroy_qp,
.destroy_rwq_ind_table = mana_ib_destroy_rwq_ind_table, .destroy_rwq_ind_table = mana_ib_destroy_rwq_ind_table,
.destroy_wq = mana_ib_destroy_wq, .destroy_wq = mana_ib_destroy_wq,
.disassociate_ucontext = mana_ib_disassociate_ucontext, .disassociate_ucontext = mana_ib_disassociate_ucontext,
.get_link_layer = mana_ib_get_link_layer,
.get_port_immutable = mana_ib_get_port_immutable, .get_port_immutable = mana_ib_get_port_immutable,
.mmap = mana_ib_mmap, .mmap = mana_ib_mmap,
.modify_qp = mana_ib_modify_qp, .modify_qp = mana_ib_modify_qp,
.modify_wq = mana_ib_modify_wq, .modify_wq = mana_ib_modify_wq,
.query_device = mana_ib_query_device, .query_device = mana_ib_query_device,
.query_gid = mana_ib_query_gid, .query_gid = mana_ib_query_gid,
.query_pkey = mana_ib_query_pkey,
.query_port = mana_ib_query_port, .query_port = mana_ib_query_port,
.reg_user_mr = mana_ib_reg_user_mr, .reg_user_mr = mana_ib_reg_user_mr,
@ -51,8 +55,10 @@ static int mana_ib_probe(struct auxiliary_device *adev,
{ {
struct mana_adev *madev = container_of(adev, struct mana_adev, adev); struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
struct gdma_dev *mdev = madev->mdev; struct gdma_dev *mdev = madev->mdev;
struct net_device *upper_ndev;
struct mana_context *mc; struct mana_context *mc;
struct mana_ib_dev *dev; struct mana_ib_dev *dev;
u8 mac_addr[ETH_ALEN];
int ret; int ret;
mc = mdev->driver_data; mc = mdev->driver_data;
@ -74,9 +80,25 @@ static int mana_ib_probe(struct auxiliary_device *adev,
* num_comp_vectors needs to set to the max MSIX index * num_comp_vectors needs to set to the max MSIX index
* when interrupts and event queues are implemented * when interrupts and event queues are implemented
*/ */
dev->ib_dev.num_comp_vectors = 1; dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues;
dev->ib_dev.dev.parent = mdev->gdma_context->dev; dev->ib_dev.dev.parent = mdev->gdma_context->dev;
rcu_read_lock(); /* required to get upper dev */
upper_ndev = netdev_master_upper_dev_get_rcu(mc->ports[0]);
if (!upper_ndev) {
rcu_read_unlock();
ret = -ENODEV;
ibdev_err(&dev->ib_dev, "Failed to get master netdev");
goto free_ib_device;
}
ether_addr_copy(mac_addr, upper_ndev->dev_addr);
ret = ib_device_set_netdev(&dev->ib_dev, upper_ndev, 1);
rcu_read_unlock();
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
goto free_ib_device;
}
ret = mana_gd_register_device(&mdev->gdma_context->mana_ib); ret = mana_gd_register_device(&mdev->gdma_context->mana_ib);
if (ret) { if (ret) {
ibdev_err(&dev->ib_dev, "Failed to register device, ret %d", ibdev_err(&dev->ib_dev, "Failed to register device, ret %d",
@ -92,15 +114,36 @@ static int mana_ib_probe(struct auxiliary_device *adev,
goto deregister_device; goto deregister_device;
} }
ret = mana_ib_create_eqs(dev);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
goto deregister_device;
}
ret = mana_ib_gd_create_rnic_adapter(dev);
if (ret)
goto destroy_eqs;
ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d",
ret);
goto destroy_rnic;
}
ret = ib_register_device(&dev->ib_dev, "mana_%d", ret = ib_register_device(&dev->ib_dev, "mana_%d",
mdev->gdma_context->dev); mdev->gdma_context->dev);
if (ret) if (ret)
goto deregister_device; goto destroy_rnic;
dev_set_drvdata(&adev->dev, dev); dev_set_drvdata(&adev->dev, dev);
return 0; return 0;
destroy_rnic:
mana_ib_gd_destroy_rnic_adapter(dev);
destroy_eqs:
mana_ib_destroy_eqs(dev);
deregister_device: deregister_device:
mana_gd_deregister_device(dev->gdma_dev); mana_gd_deregister_device(dev->gdma_dev);
free_ib_device: free_ib_device:
@ -113,9 +156,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev); struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
ib_unregister_device(&dev->ib_dev); ib_unregister_device(&dev->ib_dev);
mana_ib_gd_destroy_rnic_adapter(dev);
mana_ib_destroy_eqs(dev);
mana_gd_deregister_device(dev->gdma_dev); mana_gd_deregister_device(dev->gdma_dev);
ib_dealloc_device(&dev->ib_dev); ib_dealloc_device(&dev->ib_dev);
} }

View File

@ -237,6 +237,47 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret); ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
} }
int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
struct mana_ib_queue *queue)
{
struct ib_umem *umem;
int err;
queue->umem = NULL;
queue->id = INVALID_QUEUE_ID;
queue->gdma_region = GDMA_INVALID_DMA_REGION;
umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
return err;
}
err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
if (err) {
ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
goto free_umem;
}
queue->umem = umem;
ibdev_dbg(&mdev->ib_dev, "created dma region 0x%llx\n", queue->gdma_region);
return 0;
free_umem:
ib_umem_release(umem);
return err;
}
void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
{
/* Ignore return code as there is not much we can do about it.
* The error message is printed inside.
*/
mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
ib_umem_release(queue->umem);
}
static int static int
mana_ib_gd_first_dma_region(struct mana_ib_dev *dev, mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
struct gdma_context *gc, struct gdma_context *gc,
@ -484,11 +525,18 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num, int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable) struct ib_port_immutable *immutable)
{ {
/* struct ib_port_attr attr;
* This version only support RAW_PACKET int err;
* other values need to be filled for other types
*/ err = ib_query_port(ibdev, port_num, &attr);
if (err)
return err;
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
if (port_num == 1)
immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
return 0; return 0;
} }
@ -514,7 +562,42 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
int mana_ib_query_port(struct ib_device *ibdev, u32 port, int mana_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props) struct ib_port_attr *props)
{ {
/* This version doesn't return port properties */ struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
if (!ndev)
return -EINVAL;
memset(props, 0, sizeof(*props));
props->max_mtu = IB_MTU_4096;
props->active_mtu = ib_mtu_int_to_enum(ndev->mtu);
if (netif_carrier_ok(ndev) && netif_running(ndev)) {
props->state = IB_PORT_ACTIVE;
props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
} else {
props->state = IB_PORT_DOWN;
props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
}
props->active_width = IB_WIDTH_4X;
props->active_speed = IB_SPEED_EDR;
props->pkey_tbl_len = 1;
if (port == 1)
props->gid_tbl_len = 16;
return 0;
}
enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num)
{
return IB_LINK_LAYER_ETHERNET;
}
int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
{
if (index != 0)
return -EINVAL;
*pkey = IB_DEFAULT_PKEY_FULL;
return 0; return 0;
} }
@ -570,3 +653,238 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
return 0; return 0;
} }
int mana_ib_create_eqs(struct mana_ib_dev *mdev)
{
struct gdma_context *gc = mdev_to_gc(mdev);
struct gdma_queue_spec spec = {};
int err, i;
spec.type = GDMA_EQ;
spec.monitor_avl_buf = false;
spec.queue_size = EQ_SIZE;
spec.eq.callback = NULL;
spec.eq.context = mdev;
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
spec.eq.msix_index = 0;
err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq);
if (err)
return err;
mdev->eqs = kcalloc(mdev->ib_dev.num_comp_vectors, sizeof(struct gdma_queue *),
GFP_KERNEL);
if (!mdev->eqs) {
err = -ENOMEM;
goto destroy_fatal_eq;
}
for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) {
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->eqs[i]);
if (err)
goto destroy_eqs;
}
return 0;
destroy_eqs:
while (i-- > 0)
mana_gd_destroy_queue(gc, mdev->eqs[i]);
kfree(mdev->eqs);
destroy_fatal_eq:
mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
return err;
}
void mana_ib_destroy_eqs(struct mana_ib_dev *mdev)
{
struct gdma_context *gc = mdev_to_gc(mdev);
int i;
mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++)
mana_gd_destroy_queue(gc, mdev->eqs[i]);
kfree(mdev->eqs);
}
int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
{
struct mana_rnic_create_adapter_resp resp = {};
struct mana_rnic_create_adapter_req req = {};
struct gdma_context *gc = mdev_to_gc(mdev);
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
req.hdr.req.msg_version = GDMA_MESSAGE_V2;
req.hdr.dev_id = gc->mana_ib.dev_id;
req.notify_eq_id = mdev->fatal_err_eq->id;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
ibdev_err(&mdev->ib_dev, "Failed to create RNIC adapter err %d", err);
return err;
}
mdev->adapter_handle = resp.adapter;
return 0;
}
int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
{
struct mana_rnic_destroy_adapter_resp resp = {};
struct mana_rnic_destroy_adapter_req req = {};
struct gdma_context *gc;
int err;
gc = mdev_to_gc(mdev);
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
req.hdr.dev_id = gc->mana_ib.dev_id;
req.adapter = mdev->adapter_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
ibdev_err(&mdev->ib_dev, "Failed to destroy RNIC adapter err %d", err);
return err;
}
return 0;
}
int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
{
struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
struct mana_rnic_config_addr_resp resp = {};
struct gdma_context *gc = mdev_to_gc(mdev);
struct mana_rnic_config_addr_req req = {};
int err;
if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
return -EINVAL;
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
req.hdr.dev_id = gc->mana_ib.dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_ADD;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
return err;
}
return 0;
}
int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
{
struct mana_ib_dev *mdev = container_of(attr->device, struct mana_ib_dev, ib_dev);
enum rdma_network_type ntype = rdma_gid_attr_network_type(attr);
struct mana_rnic_config_addr_resp resp = {};
struct gdma_context *gc = mdev_to_gc(mdev);
struct mana_rnic_config_addr_req req = {};
int err;
if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
return -EINVAL;
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
req.hdr.dev_id = gc->mana_ib.dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_REMOVE;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
return err;
}
return 0;
}
int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac)
{
struct mana_rnic_config_mac_addr_resp resp = {};
struct mana_rnic_config_mac_addr_req req = {};
struct gdma_context *gc = mdev_to_gc(mdev);
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
req.hdr.dev_id = gc->mana_ib.dev_id;
req.adapter = mdev->adapter_handle;
req.op = op;
copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
ibdev_err(&mdev->ib_dev, "Failed to config Mac addr err %d", err);
return err;
}
return 0;
}
int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell)
{
struct gdma_context *gc = mdev_to_gc(mdev);
struct mana_rnic_create_cq_resp resp = {};
struct mana_rnic_create_cq_req req = {};
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
req.hdr.dev_id = gc->mana_ib.dev_id;
req.adapter = mdev->adapter_handle;
req.gdma_region = cq->queue.gdma_region;
req.eq_id = mdev->eqs[cq->comp_vector]->id;
req.doorbell_page = doorbell;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
ibdev_err(&mdev->ib_dev, "Failed to create cq err %d", err);
return err;
}
cq->queue.id = resp.cq_id;
cq->cq_handle = resp.cq_handle;
/* The GDMA region is now owned by the CQ handle */
cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
return 0;
}
int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
{
struct gdma_context *gc = mdev_to_gc(mdev);
struct mana_rnic_destroy_cq_resp resp = {};
struct mana_rnic_destroy_cq_req req = {};
int err;
if (cq->cq_handle == INVALID_MANA_HANDLE)
return 0;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
req.hdr.dev_id = gc->mana_ib.dev_id;
req.adapter = mdev->adapter_handle;
req.cq_handle = cq->cq_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
ibdev_err(&mdev->ib_dev, "Failed to destroy cq err %d", err);
return err;
}
return 0;
}

View File

@ -45,19 +45,26 @@ struct mana_ib_adapter_caps {
u32 max_inline_data_size; u32 max_inline_data_size;
}; };
struct mana_ib_queue {
struct ib_umem *umem;
u64 gdma_region;
u64 id;
};
struct mana_ib_dev { struct mana_ib_dev {
struct ib_device ib_dev; struct ib_device ib_dev;
struct gdma_dev *gdma_dev; struct gdma_dev *gdma_dev;
mana_handle_t adapter_handle;
struct gdma_queue *fatal_err_eq;
struct gdma_queue **eqs;
struct mana_ib_adapter_caps adapter_caps; struct mana_ib_adapter_caps adapter_caps;
}; };
struct mana_ib_wq { struct mana_ib_wq {
struct ib_wq ibwq; struct ib_wq ibwq;
struct ib_umem *umem; struct mana_ib_queue queue;
int wqe; int wqe;
u32 wq_buf_size; u32 wq_buf_size;
u64 gdma_region;
u64 id;
mana_handle_t rx_object; mana_handle_t rx_object;
}; };
@ -82,22 +89,17 @@ struct mana_ib_mr {
struct mana_ib_cq { struct mana_ib_cq {
struct ib_cq ibcq; struct ib_cq ibcq;
struct ib_umem *umem; struct mana_ib_queue queue;
int cqe; int cqe;
u64 gdma_region;
u64 id;
u32 comp_vector; u32 comp_vector;
mana_handle_t cq_handle;
}; };
struct mana_ib_qp { struct mana_ib_qp {
struct ib_qp ibqp; struct ib_qp ibqp;
/* Work queue info */ mana_handle_t qp_handle;
struct ib_umem *sq_umem; struct mana_ib_queue raw_sq;
int sqe;
u64 sq_gdma_region;
u64 sq_id;
mana_handle_t tx_object;
/* The port on the IB device, starting with 1 */ /* The port on the IB device, starting with 1 */
u32 port; u32 port;
@ -114,6 +116,12 @@ struct mana_ib_rwq_ind_table {
enum mana_ib_command_code { enum mana_ib_command_code {
MANA_IB_GET_ADAPTER_CAP = 0x30001, MANA_IB_GET_ADAPTER_CAP = 0x30001,
MANA_IB_CREATE_ADAPTER = 0x30002,
MANA_IB_DESTROY_ADAPTER = 0x30003,
MANA_IB_CONFIG_IP_ADDR = 0x30004,
MANA_IB_CONFIG_MAC_ADDR = 0x30005,
MANA_IB_CREATE_CQ = 0x30008,
MANA_IB_DESTROY_CQ = 0x30009,
}; };
struct mana_ib_query_adapter_caps_req { struct mana_ib_query_adapter_caps_req {
@ -142,6 +150,86 @@ struct mana_ib_query_adapter_caps_resp {
u32 max_inline_data_size; u32 max_inline_data_size;
}; /* HW Data */ }; /* HW Data */
struct mana_rnic_create_adapter_req {
struct gdma_req_hdr hdr;
u32 notify_eq_id;
u32 reserved;
u64 feature_flags;
}; /*HW Data */
struct mana_rnic_create_adapter_resp {
struct gdma_resp_hdr hdr;
mana_handle_t adapter;
}; /* HW Data */
struct mana_rnic_destroy_adapter_req {
struct gdma_req_hdr hdr;
mana_handle_t adapter;
}; /*HW Data */
struct mana_rnic_destroy_adapter_resp {
struct gdma_resp_hdr hdr;
}; /* HW Data */
enum mana_ib_addr_op {
ADDR_OP_ADD = 1,
ADDR_OP_REMOVE = 2,
};
enum sgid_entry_type {
SGID_TYPE_IPV4 = 1,
SGID_TYPE_IPV6 = 2,
};
struct mana_rnic_config_addr_req {
struct gdma_req_hdr hdr;
mana_handle_t adapter;
enum mana_ib_addr_op op;
enum sgid_entry_type sgid_type;
u8 ip_addr[16];
}; /* HW Data */
struct mana_rnic_config_addr_resp {
struct gdma_resp_hdr hdr;
}; /* HW Data */
struct mana_rnic_config_mac_addr_req {
struct gdma_req_hdr hdr;
mana_handle_t adapter;
enum mana_ib_addr_op op;
u8 mac_addr[ETH_ALEN];
u8 reserved[6];
}; /* HW Data */
struct mana_rnic_config_mac_addr_resp {
struct gdma_resp_hdr hdr;
}; /* HW Data */
struct mana_rnic_create_cq_req {
struct gdma_req_hdr hdr;
mana_handle_t adapter;
u64 gdma_region;
u32 eq_id;
u32 doorbell_page;
}; /* HW Data */
struct mana_rnic_create_cq_resp {
struct gdma_resp_hdr hdr;
mana_handle_t cq_handle;
u32 cq_id;
u32 reserved;
}; /* HW Data */
struct mana_rnic_destroy_cq_req {
struct gdma_req_hdr hdr;
mana_handle_t adapter;
mana_handle_t cq_handle;
}; /* HW Data */
struct mana_rnic_destroy_cq_resp {
struct gdma_resp_hdr hdr;
}; /* HW Data */
static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev) static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
{ {
return mdev->gdma_dev->gdma_context; return mdev->gdma_dev->gdma_context;
@ -158,7 +246,16 @@ static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32
return mc->ports[port - 1]; return mc->ports[port - 1];
} }
static inline void copy_in_reverse(u8 *dst, const u8 *src, u32 size)
{
u32 i;
for (i = 0; i < size; i++)
dst[size - 1 - i] = src[i];
}
int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq); int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
mana_handle_t *gdma_region); mana_handle_t *gdma_region);
@ -169,6 +266,10 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
mana_handle_t gdma_region); mana_handle_t gdma_region);
int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
struct mana_ib_queue *queue);
void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
struct ib_wq *mana_ib_create_wq(struct ib_pd *pd, struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr, struct ib_wq_init_attr *init_attr,
struct ib_udata *udata); struct ib_udata *udata);
@ -231,4 +332,26 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext); void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev); int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
int mana_ib_create_eqs(struct mana_ib_dev *mdev);
void mana_ib_destroy_eqs(struct mana_ib_dev *mdev);
int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev);
int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev);
int mana_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
enum rdma_link_layer mana_ib_get_link_layer(struct ib_device *device, u32 port_num);
int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context);
int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context);
int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac);
int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell);
int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
#endif #endif

View File

@ -135,7 +135,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
} }
ibdev_dbg(ibdev, ibdev_dbg(ibdev,
"create_dma_region ret %d gdma_region %llx\n", err, "created dma region for user-mr 0x%llx\n",
dma_region_handle); dma_region_handle);
mr_params.pd_handle = pd->pd_handle; mr_params.pd_handle = pd->pd_handle;

View File

@ -95,11 +95,9 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
struct mana_ib_dev *mdev = struct mana_ib_dev *mdev =
container_of(pd->device, struct mana_ib_dev, ib_dev); container_of(pd->device, struct mana_ib_dev, ib_dev);
struct gdma_context *gc = mdev_to_gc(mdev);
struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl; struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
struct mana_ib_create_qp_rss_resp resp = {}; struct mana_ib_create_qp_rss_resp resp = {};
struct mana_ib_create_qp_rss ucmd = {}; struct mana_ib_create_qp_rss ucmd = {};
struct gdma_queue **gdma_cq_allocated;
mana_handle_t *mana_ind_table; mana_handle_t *mana_ind_table;
struct mana_port_context *mpc; struct mana_port_context *mpc;
unsigned int ind_tbl_size; unsigned int ind_tbl_size;
@ -173,13 +171,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
goto fail; goto fail;
} }
gdma_cq_allocated = kcalloc(ind_tbl_size, sizeof(*gdma_cq_allocated),
GFP_KERNEL);
if (!gdma_cq_allocated) {
ret = -ENOMEM;
goto fail;
}
qp->port = port; qp->port = port;
for (i = 0; i < ind_tbl_size; i++) { for (i = 0; i < ind_tbl_size; i++) {
@ -192,13 +183,13 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
ibcq = ibwq->cq; ibcq = ibwq->cq;
cq = container_of(ibcq, struct mana_ib_cq, ibcq); cq = container_of(ibcq, struct mana_ib_cq, ibcq);
wq_spec.gdma_region = wq->gdma_region; wq_spec.gdma_region = wq->queue.gdma_region;
wq_spec.queue_size = wq->wq_buf_size; wq_spec.queue_size = wq->wq_buf_size;
cq_spec.gdma_region = cq->gdma_region; cq_spec.gdma_region = cq->queue.gdma_region;
cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE; cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0; cq_spec.modr_ctx_id = 0;
eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues]; eq = &mpc->ac->eqs[cq->comp_vector];
cq_spec.attached_eq = eq->eq->id; cq_spec.attached_eq = eq->eq->id;
ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ, ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
@ -210,18 +201,18 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
} }
/* The GDMA regions are now owned by the WQ object */ /* The GDMA regions are now owned by the WQ object */
wq->gdma_region = GDMA_INVALID_DMA_REGION; wq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
cq->gdma_region = GDMA_INVALID_DMA_REGION; cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
wq->id = wq_spec.queue_index; wq->queue.id = wq_spec.queue_index;
cq->id = cq_spec.queue_index; cq->queue.id = cq_spec.queue_index;
ibdev_dbg(&mdev->ib_dev, ibdev_dbg(&mdev->ib_dev,
"ret %d rx_object 0x%llx wq id %llu cq id %llu\n", "rx_object 0x%llx wq id %llu cq id %llu\n",
ret, wq->rx_object, wq->id, cq->id); wq->rx_object, wq->queue.id, cq->queue.id);
resp.entries[i].cqid = cq->id; resp.entries[i].cqid = cq->queue.id;
resp.entries[i].wqid = wq->id; resp.entries[i].wqid = wq->queue.id;
mana_ind_table[i] = wq->rx_object; mana_ind_table[i] = wq->rx_object;
@ -229,8 +220,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
ret = mana_ib_install_cq_cb(mdev, cq); ret = mana_ib_install_cq_cb(mdev, cq);
if (ret) if (ret)
goto fail; goto fail;
gdma_cq_allocated[i] = gc->cq_table[cq->id];
} }
resp.num_entries = i; resp.num_entries = i;
@ -250,7 +239,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
goto fail; goto fail;
} }
kfree(gdma_cq_allocated);
kfree(mana_ind_table); kfree(mana_ind_table);
return 0; return 0;
@ -262,13 +250,10 @@ fail:
wq = container_of(ibwq, struct mana_ib_wq, ibwq); wq = container_of(ibwq, struct mana_ib_wq, ibwq);
cq = container_of(ibcq, struct mana_ib_cq, ibcq); cq = container_of(ibcq, struct mana_ib_cq, ibcq);
gc->cq_table[cq->id] = NULL; mana_ib_remove_cq_cb(mdev, cq);
kfree(gdma_cq_allocated[i]);
mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object); mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
} }
kfree(gdma_cq_allocated);
kfree(mana_ind_table); kfree(mana_ind_table);
return ret; return ret;
@ -287,15 +272,12 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
struct mana_ib_ucontext *mana_ucontext = struct mana_ib_ucontext *mana_ucontext =
rdma_udata_to_drv_context(udata, struct mana_ib_ucontext, rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
ibucontext); ibucontext);
struct gdma_context *gc = mdev_to_gc(mdev);
struct mana_ib_create_qp_resp resp = {}; struct mana_ib_create_qp_resp resp = {};
struct mana_ib_create_qp ucmd = {}; struct mana_ib_create_qp ucmd = {};
struct gdma_queue *gdma_cq = NULL;
struct mana_obj_spec wq_spec = {}; struct mana_obj_spec wq_spec = {};
struct mana_obj_spec cq_spec = {}; struct mana_obj_spec cq_spec = {};
struct mana_port_context *mpc; struct mana_port_context *mpc;
struct net_device *ndev; struct net_device *ndev;
struct ib_umem *umem;
struct mana_eq *eq; struct mana_eq *eq;
int eq_vec; int eq_vec;
u32 port; u32 port;
@ -344,56 +326,39 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n", ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
ucmd.sq_buf_addr, ucmd.port); ucmd.sq_buf_addr, ucmd.port);
umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size, err = mana_ib_create_queue(mdev, ucmd.sq_buf_addr, ucmd.sq_buf_size, &qp->raw_sq);
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
ibdev_dbg(&mdev->ib_dev,
"Failed to get umem for create qp-raw, err %d\n",
err);
goto err_free_vport;
}
qp->sq_umem = umem;
err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
&qp->sq_gdma_region);
if (err) { if (err) {
ibdev_dbg(&mdev->ib_dev, ibdev_dbg(&mdev->ib_dev,
"Failed to create dma region for create qp-raw, %d\n", "Failed to create queue for create qp-raw, err %d\n", err);
err); goto err_free_vport;
goto err_release_umem;
} }
ibdev_dbg(&mdev->ib_dev,
"create_dma_region ret %d gdma_region 0x%llx\n",
err, qp->sq_gdma_region);
/* Create a WQ on the same port handle used by the Ethernet */ /* Create a WQ on the same port handle used by the Ethernet */
wq_spec.gdma_region = qp->sq_gdma_region; wq_spec.gdma_region = qp->raw_sq.gdma_region;
wq_spec.queue_size = ucmd.sq_buf_size; wq_spec.queue_size = ucmd.sq_buf_size;
cq_spec.gdma_region = send_cq->gdma_region; cq_spec.gdma_region = send_cq->queue.gdma_region;
cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE; cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0; cq_spec.modr_ctx_id = 0;
eq_vec = send_cq->comp_vector % gc->max_num_queues; eq_vec = send_cq->comp_vector;
eq = &mpc->ac->eqs[eq_vec]; eq = &mpc->ac->eqs[eq_vec];
cq_spec.attached_eq = eq->eq->id; cq_spec.attached_eq = eq->eq->id;
err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec, err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
&cq_spec, &qp->tx_object); &cq_spec, &qp->qp_handle);
if (err) { if (err) {
ibdev_dbg(&mdev->ib_dev, ibdev_dbg(&mdev->ib_dev,
"Failed to create wq for create raw-qp, err %d\n", "Failed to create wq for create raw-qp, err %d\n",
err); err);
goto err_destroy_dma_region; goto err_destroy_queue;
} }
/* The GDMA regions are now owned by the WQ object */ /* The GDMA regions are now owned by the WQ object */
qp->sq_gdma_region = GDMA_INVALID_DMA_REGION; qp->raw_sq.gdma_region = GDMA_INVALID_DMA_REGION;
send_cq->gdma_region = GDMA_INVALID_DMA_REGION; send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
qp->sq_id = wq_spec.queue_index; qp->raw_sq.id = wq_spec.queue_index;
send_cq->id = cq_spec.queue_index; send_cq->queue.id = cq_spec.queue_index;
/* Create CQ table entry */ /* Create CQ table entry */
err = mana_ib_install_cq_cb(mdev, send_cq); err = mana_ib_install_cq_cb(mdev, send_cq);
@ -401,11 +366,11 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
goto err_destroy_wq_obj; goto err_destroy_wq_obj;
ibdev_dbg(&mdev->ib_dev, ibdev_dbg(&mdev->ib_dev,
"ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err, "qp->qp_handle 0x%llx sq id %llu cq id %llu\n",
qp->tx_object, qp->sq_id, send_cq->id); qp->qp_handle, qp->raw_sq.id, send_cq->queue.id);
resp.sqid = qp->sq_id; resp.sqid = qp->raw_sq.id;
resp.cqid = send_cq->id; resp.cqid = send_cq->queue.id;
resp.tx_vp_offset = pd->tx_vp_offset; resp.tx_vp_offset = pd->tx_vp_offset;
err = ib_copy_to_udata(udata, &resp, sizeof(resp)); err = ib_copy_to_udata(udata, &resp, sizeof(resp));
@ -413,23 +378,19 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
ibdev_dbg(&mdev->ib_dev, ibdev_dbg(&mdev->ib_dev,
"Failed copy udata for create qp-raw, %d\n", "Failed copy udata for create qp-raw, %d\n",
err); err);
goto err_release_gdma_cq; goto err_remove_cq_cb;
} }
return 0; return 0;
err_release_gdma_cq: err_remove_cq_cb:
kfree(gdma_cq); mana_ib_remove_cq_cb(mdev, send_cq);
gc->cq_table[send_cq->id] = NULL;
err_destroy_wq_obj: err_destroy_wq_obj:
mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object); mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
err_destroy_dma_region: err_destroy_queue:
mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region); mana_ib_destroy_queue(mdev, &qp->raw_sq);
err_release_umem:
ib_umem_release(umem);
err_free_vport: err_free_vport:
mana_ib_uncfg_vport(mdev, pd, port); mana_ib_uncfg_vport(mdev, pd, port);
@ -503,12 +464,9 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
mpc = netdev_priv(ndev); mpc = netdev_priv(ndev);
pd = container_of(ibpd, struct mana_ib_pd, ibpd); pd = container_of(ibpd, struct mana_ib_pd, ibpd);
mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object); mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
if (qp->sq_umem) { mana_ib_destroy_queue(mdev, &qp->raw_sq);
mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
ib_umem_release(qp->sq_umem);
}
mana_ib_uncfg_vport(mdev, pd, qp->port); mana_ib_uncfg_vport(mdev, pd, qp->port);

View File

@ -13,7 +13,6 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
container_of(pd->device, struct mana_ib_dev, ib_dev); container_of(pd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_create_wq ucmd = {}; struct mana_ib_create_wq ucmd = {};
struct mana_ib_wq *wq; struct mana_ib_wq *wq;
struct ib_umem *umem;
int err; int err;
if (udata->inlen < sizeof(ucmd)) if (udata->inlen < sizeof(ucmd))
@ -32,39 +31,18 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr); ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr);
umem = ib_umem_get(pd->device, ucmd.wq_buf_addr, ucmd.wq_buf_size, err = mana_ib_create_queue(mdev, ucmd.wq_buf_addr, ucmd.wq_buf_size, &wq->queue);
IB_ACCESS_LOCAL_WRITE); if (err) {
if (IS_ERR(umem)) {
err = PTR_ERR(umem);
ibdev_dbg(&mdev->ib_dev, ibdev_dbg(&mdev->ib_dev,
"Failed to get umem for create wq, err %d\n", err); "Failed to create queue for create wq, %d\n", err);
goto err_free_wq; goto err_free_wq;
} }
wq->umem = umem;
wq->wqe = init_attr->max_wr; wq->wqe = init_attr->max_wr;
wq->wq_buf_size = ucmd.wq_buf_size; wq->wq_buf_size = ucmd.wq_buf_size;
wq->rx_object = INVALID_MANA_HANDLE; wq->rx_object = INVALID_MANA_HANDLE;
err = mana_ib_create_zero_offset_dma_region(mdev, wq->umem, &wq->gdma_region);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create dma region for create wq, %d\n",
err);
goto err_release_umem;
}
ibdev_dbg(&mdev->ib_dev,
"create_dma_region ret %d gdma_region 0x%llx\n",
err, wq->gdma_region);
/* WQ ID is returned at wq_create time, doesn't know the value yet */
return &wq->ibwq; return &wq->ibwq;
err_release_umem:
ib_umem_release(umem);
err_free_wq: err_free_wq:
kfree(wq); kfree(wq);
@ -86,8 +64,7 @@ int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev); mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
mana_ib_gd_destroy_dma_region(mdev, wq->gdma_region); mana_ib_destroy_queue(mdev, &wq->queue);
ib_umem_release(wq->umem);
kfree(wq); kfree(wq);

View File

@ -264,8 +264,7 @@ static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
*/ */
read_lock(&ibdev->port[port_num - 1].roce.netdev_lock); read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
ndev = ibdev->port[port_num - 1].roce.netdev; ndev = ibdev->port[port_num - 1].roce.netdev;
if (ndev) dev_hold(ndev);
dev_hold(ndev);
read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock); read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
out: out:

View File

@ -30,6 +30,7 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/io.h>
#include <rdma/ib_umem_odp.h> #include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h" #include "mlx5_ib.h"
#include <linux/jiffies.h> #include <linux/jiffies.h>
@ -108,7 +109,6 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
__be32 mmio_wqe[16] = {}; __be32 mmio_wqe[16] = {};
unsigned long flags; unsigned long flags;
unsigned int idx; unsigned int idx;
int i;
if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)) if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
return -EIO; return -EIO;
@ -148,10 +148,8 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
* we hit doorbell * we hit doorbell
*/ */
wmb(); wmb();
for (i = 0; i < 8; i++) __iowrite64_copy(bf->bfreg->map + bf->offset, mmio_wqe,
mlx5_write64(&mmio_wqe[i * 2], sizeof(mmio_wqe) / 8);
bf->bfreg->map + bf->offset + i * 8);
io_stop_wc();
bf->offset ^= bf->buf_size; bf->offset ^= bf->buf_size;

View File

@ -643,9 +643,10 @@ struct mlx5_ib_mkey {
unsigned int ndescs; unsigned int ndescs;
struct wait_queue_head wait; struct wait_queue_head wait;
refcount_t usecount; refcount_t usecount;
/* User Mkey must hold either a rb_key or a cache_ent. */ /* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
struct mlx5r_cache_rb_key rb_key; struct mlx5r_cache_rb_key rb_key;
struct mlx5_cache_ent *cache_ent; struct mlx5_cache_ent *cache_ent;
u8 cacheable : 1;
}; };
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)

View File

@ -1158,6 +1158,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
if (IS_ERR(mr)) if (IS_ERR(mr))
return mr; return mr;
mr->mmkey.rb_key = rb_key; mr->mmkey.rb_key = rb_key;
mr->mmkey.cacheable = true;
return mr; return mr;
} }
@ -1168,6 +1169,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->ibmr.pd = pd; mr->ibmr.pd = pd;
mr->umem = umem; mr->umem = umem;
mr->page_shift = order_base_2(page_size); mr->page_shift = order_base_2(page_size);
mr->mmkey.cacheable = true;
set_mr_fields(dev, mr, umem->length, access_flags, iova); set_mr_fields(dev, mr, umem->length, access_flags, iova);
return mr; return mr;
@ -1570,7 +1572,8 @@ static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
unsigned int diffs = current_access_flags ^ target_access_flags; unsigned int diffs = current_access_flags ^ target_access_flags;
if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING)) IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING |
IB_ACCESS_REMOTE_ATOMIC))
return false; return false;
return mlx5r_umr_can_reconfig(dev, current_access_flags, return mlx5r_umr_can_reconfig(dev, current_access_flags,
target_access_flags); target_access_flags);
@ -1835,6 +1838,23 @@ end:
return ret; return ret;
} }
static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
{
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr))
return 0;
if (ent) {
spin_lock_irq(&ent->mkeys_queue.lock);
ent->in_use--;
mr->mmkey.cache_ent = NULL;
spin_unlock_irq(&ent->mkeys_queue.lock);
}
return destroy_mkey(dev, mr);
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{ {
struct mlx5_ib_mr *mr = to_mmr(ibmr); struct mlx5_ib_mr *mr = to_mmr(ibmr);
@ -1880,16 +1900,9 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
} }
/* Stop DMA */ /* Stop DMA */
if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length)) rc = mlx5_revoke_mr(mr);
if (mlx5r_umr_revoke_mr(mr) || if (rc)
cache_ent_find_and_store(dev, mr)) return rc;
mr->mmkey.cache_ent = NULL;
if (!mr->mmkey.cache_ent) {
rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
if (rc)
return rc;
}
if (mr->umem) { if (mr->umem) {
bool is_odp = is_odp_mr(mr); bool is_odp = is_odp_mr(mr);

View File

@ -3097,7 +3097,6 @@ static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
switch (qp->type) { switch (qp->type) {
case MLX5_IB_QPT_DCT: case MLX5_IB_QPT_DCT:
err = create_dct(dev, pd, qp, params); err = create_dct(dev, pd, qp, params);
rdma_restrack_no_track(&qp->ibqp.res);
break; break;
case MLX5_IB_QPT_DCI: case MLX5_IB_QPT_DCI:
err = create_dci(dev, pd, qp, params); err = create_dci(dev, pd, qp, params);
@ -3109,9 +3108,9 @@ static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err = mlx5_ib_create_gsi(pd, qp, params->attr); err = mlx5_ib_create_gsi(pd, qp, params->attr);
break; break;
case MLX5_IB_QPT_HW_GSI: case MLX5_IB_QPT_HW_GSI:
case MLX5_IB_QPT_REG_UMR:
rdma_restrack_no_track(&qp->ibqp.res); rdma_restrack_no_track(&qp->ibqp.res);
fallthrough; fallthrough;
case MLX5_IB_QPT_REG_UMR:
default: default:
if (params->udata) if (params->udata)
err = create_user_qp(dev, pd, qp, params); err = create_user_qp(dev, pd, qp, params);

View File

@ -156,6 +156,34 @@ static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq)
return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_CQ, cq->mcq.cqn); return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_CQ, cq->mcq.cqn);
} }
static int fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp)
{
struct mlx5_ib_qp *qp = to_mqp(ibqp);
int ret;
if (qp->type < IB_QPT_DRIVER)
return 0;
switch (qp->type) {
case MLX5_IB_QPT_REG_UMR:
ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE,
"REG_UMR");
break;
case MLX5_IB_QPT_DCT:
ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE, "DCT");
break;
case MLX5_IB_QPT_DCI:
ret = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUBTYPE, "DCI");
break;
default:
return 0;
}
if (ret)
return ret;
return nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, IB_QPT_DRIVER);
}
static int fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp) static int fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp)
{ {
struct mlx5_ib_dev *dev = to_mdev(ibqp->device); struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
@ -168,6 +196,7 @@ static const struct ib_device_ops restrack_ops = {
.fill_res_cq_entry_raw = fill_res_cq_entry_raw, .fill_res_cq_entry_raw = fill_res_cq_entry_raw,
.fill_res_mr_entry = fill_res_mr_entry, .fill_res_mr_entry = fill_res_mr_entry,
.fill_res_mr_entry_raw = fill_res_mr_entry_raw, .fill_res_mr_entry_raw = fill_res_mr_entry_raw,
.fill_res_qp_entry = fill_res_qp_entry,
.fill_res_qp_entry_raw = fill_res_qp_entry_raw, .fill_res_qp_entry_raw = fill_res_qp_entry_raw,
.fill_stat_mr_entry = fill_stat_mr_entry, .fill_stat_mr_entry = fill_stat_mr_entry,
}; };

View File

@ -122,25 +122,16 @@ void retransmit_timer(struct timer_list *t)
spin_lock_irqsave(&qp->state_lock, flags); spin_lock_irqsave(&qp->state_lock, flags);
if (qp->valid) { if (qp->valid) {
qp->comp.timeout = 1; qp->comp.timeout = 1;
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->send_task);
} }
spin_unlock_irqrestore(&qp->state_lock, flags); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{ {
int must_sched; rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_SENDER_SCHED);
skb_queue_tail(&qp->resp_pkts, skb); skb_queue_tail(&qp->resp_pkts, skb);
rxe_sched_task(&qp->send_task);
must_sched = skb_queue_len(&qp->resp_pkts) > 1;
if (must_sched != 0)
rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
if (must_sched)
rxe_sched_task(&qp->comp.task);
else
rxe_run_task(&qp->comp.task);
} }
static inline enum comp_state get_wqe(struct rxe_qp *qp, static inline enum comp_state get_wqe(struct rxe_qp *qp,
@ -325,7 +316,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn; qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) { if (qp->req.wait_psn) {
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
rxe_sched_task(&qp->req.task); qp->req.again = 1;
} }
} }
return COMPST_ERROR_RETRY; return COMPST_ERROR_RETRY;
@ -476,7 +467,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/ */
if (qp->req.wait_fence) { if (qp->req.wait_fence) {
qp->req.wait_fence = 0; qp->req.wait_fence = 0;
rxe_sched_task(&qp->req.task); qp->req.again = 1;
} }
} }
@ -515,7 +506,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) { if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0; qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0; qp->req.need_rd_atomic = 0;
rxe_sched_task(&qp->req.task); qp->req.again = 1;
} }
} }
@ -541,7 +532,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
if (qp->req.wait_psn) { if (qp->req.wait_psn) {
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
rxe_sched_task(&qp->req.task); qp->req.again = 1;
} }
} }
@ -654,6 +645,8 @@ int rxe_completer(struct rxe_qp *qp)
int ret; int ret;
unsigned long flags; unsigned long flags;
qp->req.again = 0;
spin_lock_irqsave(&qp->state_lock, flags); spin_lock_irqsave(&qp->state_lock, flags);
if (!qp->valid || qp_state(qp) == IB_QPS_ERR || if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
qp_state(qp) == IB_QPS_RESET) { qp_state(qp) == IB_QPS_RESET) {
@ -737,7 +730,7 @@ int rxe_completer(struct rxe_qp *qp)
if (qp->req.wait_psn) { if (qp->req.wait_psn) {
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
rxe_sched_task(&qp->req.task); qp->req.again = 1;
} }
state = COMPST_DONE; state = COMPST_DONE;
@ -792,7 +785,7 @@ int rxe_completer(struct rxe_qp *qp)
RXE_CNT_COMP_RETRY); RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1; qp->req.need_retry = 1;
qp->comp.started_retry = 1; qp->comp.started_retry = 1;
rxe_sched_task(&qp->req.task); qp->req.again = 1;
} }
goto done; goto done;
@ -843,8 +836,9 @@ done:
ret = 0; ret = 0;
goto out; goto out;
exit: exit:
ret = -EAGAIN; ret = (qp->req.again) ? 0 : -EAGAIN;
out: out:
qp->req.again = 0;
if (pkt) if (pkt)
free_pkt(pkt); free_pkt(pkt);
return ret; return ret;

View File

@ -14,7 +14,7 @@ static const struct rdma_stat_desc rxe_counter_descs[] = {
[RXE_CNT_RCV_RNR].name = "rcvd_rnr_err", [RXE_CNT_RCV_RNR].name = "rcvd_rnr_err",
[RXE_CNT_SND_RNR].name = "send_rnr_err", [RXE_CNT_SND_RNR].name = "send_rnr_err",
[RXE_CNT_RCV_SEQ_ERR].name = "rcvd_seq_err", [RXE_CNT_RCV_SEQ_ERR].name = "rcvd_seq_err",
[RXE_CNT_COMPLETER_SCHED].name = "ack_deferred", [RXE_CNT_SENDER_SCHED].name = "ack_deferred",
[RXE_CNT_RETRY_EXCEEDED].name = "retry_exceeded_err", [RXE_CNT_RETRY_EXCEEDED].name = "retry_exceeded_err",
[RXE_CNT_RNR_RETRY_EXCEEDED].name = "retry_rnr_exceeded_err", [RXE_CNT_RNR_RETRY_EXCEEDED].name = "retry_rnr_exceeded_err",
[RXE_CNT_COMP_RETRY].name = "completer_retry_err", [RXE_CNT_COMP_RETRY].name = "completer_retry_err",

View File

@ -18,7 +18,7 @@ enum rxe_counters {
RXE_CNT_RCV_RNR, RXE_CNT_RCV_RNR,
RXE_CNT_SND_RNR, RXE_CNT_SND_RNR,
RXE_CNT_RCV_SEQ_ERR, RXE_CNT_RCV_SEQ_ERR,
RXE_CNT_COMPLETER_SCHED, RXE_CNT_SENDER_SCHED,
RXE_CNT_RETRY_EXCEEDED, RXE_CNT_RETRY_EXCEEDED,
RXE_CNT_RNR_RETRY_EXCEEDED, RXE_CNT_RNR_RETRY_EXCEEDED,
RXE_CNT_COMP_RETRY, RXE_CNT_COMP_RETRY,

View File

@ -164,7 +164,8 @@ void rxe_dealloc(struct ib_device *ib_dev);
int rxe_completer(struct rxe_qp *qp); int rxe_completer(struct rxe_qp *qp);
int rxe_requester(struct rxe_qp *qp); int rxe_requester(struct rxe_qp *qp);
int rxe_responder(struct rxe_qp *qp); int rxe_sender(struct rxe_qp *qp);
int rxe_receiver(struct rxe_qp *qp);
/* rxe_icrc.c */ /* rxe_icrc.c */
int rxe_icrc_init(struct rxe_dev *rxe); int rxe_icrc_init(struct rxe_dev *rxe);

View File

@ -345,46 +345,52 @@ int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
static void rxe_skb_tx_dtor(struct sk_buff *skb) static void rxe_skb_tx_dtor(struct sk_buff *skb)
{ {
struct sock *sk = skb->sk; struct net_device *ndev = skb->dev;
struct rxe_qp *qp = sk->sk_user_data; struct rxe_dev *rxe;
int skb_out = atomic_dec_return(&qp->skb_out); unsigned int qp_index;
struct rxe_qp *qp;
int skb_out;
if (unlikely(qp->need_req_skb && rxe = rxe_get_dev_from_net(ndev);
skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) if (!rxe && is_vlan_dev(ndev))
rxe_sched_task(&qp->req.task); rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev));
if (WARN_ON(!rxe))
return;
qp_index = (int)(uintptr_t)skb->sk->sk_user_data;
if (!qp_index)
return;
qp = rxe_pool_get_index(&rxe->qp_pool, qp_index);
if (!qp)
goto put_dev;
skb_out = atomic_dec_return(&qp->skb_out);
if (qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)
rxe_sched_task(&qp->send_task);
rxe_put(qp); rxe_put(qp);
put_dev:
ib_device_put(&rxe->ib_dev);
sock_put(skb->sk);
} }
static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
{ {
int err; int err;
struct sock *sk = pkt->qp->sk->sk;
sock_hold(sk);
skb->sk = sk;
skb->destructor = rxe_skb_tx_dtor; skb->destructor = rxe_skb_tx_dtor;
skb->sk = pkt->qp->sk->sk;
rxe_get(pkt->qp);
atomic_inc(&pkt->qp->skb_out); atomic_inc(&pkt->qp->skb_out);
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP))
err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
} else if (skb->protocol == htons(ETH_P_IPV6)) { else
err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
} else {
rxe_dbg_qp(pkt->qp, "Unknown layer 3 protocol: %d\n",
skb->protocol);
atomic_dec(&pkt->qp->skb_out);
rxe_put(pkt->qp);
kfree_skb(skb);
return -EINVAL;
}
if (unlikely(net_xmit_eval(err))) { return err;
rxe_dbg_qp(pkt->qp, "error sending packet: %d\n", err);
return -EAGAIN;
}
return 0;
} }
/* fix up a send packet to match the packets /* fix up a send packet to match the packets
@ -392,8 +398,15 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
*/ */
static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt) static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
{ {
struct sock *sk = pkt->qp->sk->sk;
memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt)); memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
sock_hold(sk);
skb->sk = sk;
skb->destructor = rxe_skb_tx_dtor;
atomic_inc(&pkt->qp->skb_out);
if (skb->protocol == htons(ETH_P_IP)) if (skb->protocol == htons(ETH_P_IP))
skb_pull(skb, sizeof(struct iphdr)); skb_pull(skb, sizeof(struct iphdr));
else else
@ -440,12 +453,6 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
return err; return err;
} }
if ((qp_type(qp) != IB_QPT_RC) &&
(pkt->mask & RXE_END_MASK)) {
pkt->wqe->state = wqe_state_done;
rxe_sched_task(&qp->comp.task);
}
rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS); rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
goto done; goto done;

View File

@ -119,7 +119,7 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem, int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
bool sleepable) bool sleepable)
{ {
int err; int err = -EINVAL;
gfp_t gfp_flags; gfp_t gfp_flags;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem) if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
@ -147,7 +147,7 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
err_cnt: err_cnt:
atomic_dec(&pool->num_elem); atomic_dec(&pool->num_elem);
return -EINVAL; return err;
} }
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)

View File

@ -244,7 +244,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
if (err < 0) if (err < 0)
return err; return err;
qp->sk->sk->sk_user_data = qp; qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index;
/* pick a source UDP port number for this QP based on /* pick a source UDP port number for this QP based on
* the source QPN. this spreads traffic for different QPs * the source QPN. this spreads traffic for different QPs
@ -265,8 +265,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->req.opcode = -1; qp->req.opcode = -1;
qp->comp.opcode = -1; qp->comp.opcode = -1;
rxe_init_task(&qp->req.task, qp, rxe_requester); rxe_init_task(&qp->send_task, qp, rxe_sender);
rxe_init_task(&qp->comp.task, qp, rxe_completer);
qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
if (init->qp_type == IB_QPT_RC) { if (init->qp_type == IB_QPT_RC) {
@ -337,7 +336,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
return err; return err;
} }
rxe_init_task(&qp->resp.task, qp, rxe_responder); rxe_init_task(&qp->recv_task, qp, rxe_receiver);
qp->resp.opcode = OPCODE_NONE; qp->resp.opcode = OPCODE_NONE;
qp->resp.msn = 0; qp->resp.msn = 0;
@ -514,14 +513,12 @@ err1:
static void rxe_qp_reset(struct rxe_qp *qp) static void rxe_qp_reset(struct rxe_qp *qp)
{ {
/* stop tasks from running */ /* stop tasks from running */
rxe_disable_task(&qp->resp.task); rxe_disable_task(&qp->recv_task);
rxe_disable_task(&qp->comp.task); rxe_disable_task(&qp->send_task);
rxe_disable_task(&qp->req.task);
/* drain work and packet queuesc */ /* drain work and packet queuesc */
rxe_requester(qp); rxe_sender(qp);
rxe_completer(qp); rxe_receiver(qp);
rxe_responder(qp);
if (qp->rq.queue) if (qp->rq.queue)
rxe_queue_reset(qp->rq.queue); rxe_queue_reset(qp->rq.queue);
@ -548,9 +545,8 @@ static void rxe_qp_reset(struct rxe_qp *qp)
cleanup_rd_atomic_resources(qp); cleanup_rd_atomic_resources(qp);
/* reenable tasks */ /* reenable tasks */
rxe_enable_task(&qp->resp.task); rxe_enable_task(&qp->recv_task);
rxe_enable_task(&qp->comp.task); rxe_enable_task(&qp->send_task);
rxe_enable_task(&qp->req.task);
} }
/* move the qp to the error state */ /* move the qp to the error state */
@ -562,9 +558,8 @@ void rxe_qp_error(struct rxe_qp *qp)
qp->attr.qp_state = IB_QPS_ERR; qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */ /* drain work and packet queues */
rxe_sched_task(&qp->resp.task); rxe_sched_task(&qp->recv_task);
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->send_task);
rxe_sched_task(&qp->req.task);
spin_unlock_irqrestore(&qp->state_lock, flags); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
@ -575,8 +570,7 @@ static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
spin_lock_irqsave(&qp->state_lock, flags); spin_lock_irqsave(&qp->state_lock, flags);
qp->attr.sq_draining = 1; qp->attr.sq_draining = 1;
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->send_task);
rxe_sched_task(&qp->req.task);
spin_unlock_irqrestore(&qp->state_lock, flags); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
@ -821,19 +815,15 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
del_timer_sync(&qp->rnr_nak_timer); del_timer_sync(&qp->rnr_nak_timer);
} }
if (qp->resp.task.func) if (qp->recv_task.func)
rxe_cleanup_task(&qp->resp.task); rxe_cleanup_task(&qp->recv_task);
if (qp->req.task.func) if (qp->send_task.func)
rxe_cleanup_task(&qp->req.task); rxe_cleanup_task(&qp->send_task);
if (qp->comp.task.func)
rxe_cleanup_task(&qp->comp.task);
/* flush out any receive wr's or pending requests */ /* flush out any receive wr's or pending requests */
rxe_requester(qp); rxe_sender(qp);
rxe_completer(qp); rxe_receiver(qp);
rxe_responder(qp);
if (qp->sq.queue) if (qp->sq.queue)
rxe_queue_cleanup(qp->sq.queue); rxe_queue_cleanup(qp->sq.queue);

View File

@ -108,7 +108,7 @@ void rnr_nak_timer(struct timer_list *t)
/* request a send queue retry */ /* request a send queue retry */
qp->req.need_retry = 1; qp->req.need_retry = 1;
qp->req.wait_for_rnr_timer = 0; qp->req.wait_for_rnr_timer = 0;
rxe_sched_task(&qp->req.task); rxe_sched_task(&qp->send_task);
} }
spin_unlock_irqrestore(&qp->state_lock, flags); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
@ -545,6 +545,8 @@ static void update_wqe_state(struct rxe_qp *qp,
if (pkt->mask & RXE_END_MASK) { if (pkt->mask & RXE_END_MASK) {
if (qp_type(qp) == IB_QPT_RC) if (qp_type(qp) == IB_QPT_RC)
wqe->state = wqe_state_pending; wqe->state = wqe_state_pending;
else
wqe->state = wqe_state_done;
} else { } else {
wqe->state = wqe_state_processing; wqe->state = wqe_state_processing;
} }
@ -573,30 +575,6 @@ static void update_wqe_psn(struct rxe_qp *qp,
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK; qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
} }
static void save_state(struct rxe_send_wqe *wqe,
struct rxe_qp *qp,
struct rxe_send_wqe *rollback_wqe,
u32 *rollback_psn)
{
rollback_wqe->state = wqe->state;
rollback_wqe->first_psn = wqe->first_psn;
rollback_wqe->last_psn = wqe->last_psn;
rollback_wqe->dma = wqe->dma;
*rollback_psn = qp->req.psn;
}
static void rollback_state(struct rxe_send_wqe *wqe,
struct rxe_qp *qp,
struct rxe_send_wqe *rollback_wqe,
u32 rollback_psn)
{
wqe->state = rollback_wqe->state;
wqe->first_psn = rollback_wqe->first_psn;
wqe->last_psn = rollback_wqe->last_psn;
wqe->dma = rollback_wqe->dma;
qp->req.psn = rollback_psn;
}
static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt) static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
{ {
qp->req.opcode = pkt->opcode; qp->req.opcode = pkt->opcode;
@ -655,12 +633,6 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
wqe->status = IB_WC_SUCCESS; wqe->status = IB_WC_SUCCESS;
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
/* There is no ack coming for local work requests
* which can lead to a deadlock. So go ahead and complete
* it now.
*/
rxe_sched_task(&qp->comp.task);
return 0; return 0;
} }
@ -676,8 +648,6 @@ int rxe_requester(struct rxe_qp *qp)
int opcode; int opcode;
int err; int err;
int ret; int ret;
struct rxe_send_wqe rollback_wqe;
u32 rollback_psn;
struct rxe_queue *q = qp->sq.queue; struct rxe_queue *q = qp->sq.queue;
struct rxe_ah *ah; struct rxe_ah *ah;
struct rxe_av *av; struct rxe_av *av;
@ -786,7 +756,6 @@ int rxe_requester(struct rxe_qp *qp)
qp->req.wqe_index); qp->req.wqe_index);
wqe->state = wqe_state_done; wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS; wqe->status = IB_WC_SUCCESS;
rxe_sched_task(&qp->comp.task);
goto done; goto done;
} }
payload = mtu; payload = mtu;
@ -799,9 +768,6 @@ int rxe_requester(struct rxe_qp *qp)
pkt.mask = rxe_opcode[opcode].mask; pkt.mask = rxe_opcode[opcode].mask;
pkt.wqe = wqe; pkt.wqe = wqe;
/* save wqe state before we build and send packet */
save_state(wqe, qp, &rollback_wqe, &rollback_psn);
av = rxe_get_av(&pkt, &ah); av = rxe_get_av(&pkt, &ah);
if (unlikely(!av)) { if (unlikely(!av)) {
rxe_dbg_qp(qp, "Failed no address vector\n"); rxe_dbg_qp(qp, "Failed no address vector\n");
@ -834,31 +800,14 @@ int rxe_requester(struct rxe_qp *qp)
if (ah) if (ah)
rxe_put(ah); rxe_put(ah);
/* update wqe state as though we had sent it */
update_wqe_state(qp, wqe, &pkt);
update_wqe_psn(qp, wqe, &pkt, payload);
err = rxe_xmit_packet(qp, &pkt, skb); err = rxe_xmit_packet(qp, &pkt, skb);
if (err) { if (err) {
if (err != -EAGAIN) { wqe->status = IB_WC_LOC_QP_OP_ERR;
wqe->status = IB_WC_LOC_QP_OP_ERR; goto err;
goto err;
}
/* the packet was dropped so reset wqe to the state
* before we sent it so we can try to resend
*/
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
/* force a delay until the dropped packet is freed and
* the send queue is drained below the low water mark
*/
qp->need_req_skb = 1;
rxe_sched_task(&qp->req.task);
goto exit;
} }
update_wqe_state(qp, wqe, &pkt);
update_wqe_psn(qp, wqe, &pkt, payload);
update_state(qp, &pkt); update_state(qp, &pkt);
/* A non-zero return value will cause rxe_do_task to /* A non-zero return value will cause rxe_do_task to
@ -878,3 +827,20 @@ exit:
out: out:
return ret; return ret;
} }
int rxe_sender(struct rxe_qp *qp)
{
int req_ret;
int comp_ret;
/* process the send queue */
req_ret = rxe_requester(qp);
/* process the response queue */
comp_ret = rxe_completer(qp);
/* exit the task loop if both requester and completer
* are ready
*/
return (req_ret && comp_ret) ? -EAGAIN : 0;
}

View File

@ -49,18 +49,8 @@ static char *resp_state_name[] = {
/* rxe_recv calls here to add a request packet to the input queue */ /* rxe_recv calls here to add a request packet to the input queue */
void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{ {
int must_sched;
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
skb_queue_tail(&qp->req_pkts, skb); skb_queue_tail(&qp->req_pkts, skb);
rxe_sched_task(&qp->recv_task);
must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
(skb_queue_len(&qp->req_pkts) > 1);
if (must_sched)
rxe_sched_task(&qp->resp.task);
else
rxe_run_task(&qp->resp.task);
} }
static inline enum resp_states get_req(struct rxe_qp *qp, static inline enum resp_states get_req(struct rxe_qp *qp,
@ -1485,7 +1475,7 @@ static void flush_recv_queue(struct rxe_qp *qp, bool notify)
qp->resp.wqe = NULL; qp->resp.wqe = NULL;
} }
int rxe_responder(struct rxe_qp *qp) int rxe_receiver(struct rxe_qp *qp)
{ {
struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
enum resp_states state; enum resp_states state;

View File

@ -888,6 +888,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
{ {
int err = 0; int err = 0;
unsigned long flags; unsigned long flags;
int good = 0;
spin_lock_irqsave(&qp->sq.sq_lock, flags); spin_lock_irqsave(&qp->sq.sq_lock, flags);
while (ibwr) { while (ibwr) {
@ -895,18 +896,16 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
if (err) { if (err) {
*bad_wr = ibwr; *bad_wr = ibwr;
break; break;
} else {
good++;
} }
ibwr = ibwr->next; ibwr = ibwr->next;
} }
spin_unlock_irqrestore(&qp->sq.sq_lock, flags); spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
if (!err) /* kickoff processing of any posted wqes */
rxe_sched_task(&qp->req.task); if (good)
rxe_sched_task(&qp->send_task);
spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_ERR)
rxe_sched_task(&qp->comp.task);
spin_unlock_irqrestore(&qp->state_lock, flags);
return err; return err;
} }
@ -936,7 +935,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
if (qp->is_user) { if (qp->is_user) {
/* Utilize process context to do protocol processing */ /* Utilize process context to do protocol processing */
rxe_run_task(&qp->req.task); rxe_sched_task(&qp->send_task);
} else { } else {
err = rxe_post_send_kernel(qp, wr, bad_wr); err = rxe_post_send_kernel(qp, wr, bad_wr);
if (err) if (err)
@ -1046,7 +1045,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->state_lock, flags); spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_ERR) if (qp_state(qp) == IB_QPS_ERR)
rxe_sched_task(&qp->resp.task); rxe_sched_task(&qp->recv_task);
spin_unlock_irqrestore(&qp->state_lock, flags); spin_unlock_irqrestore(&qp->state_lock, flags);
return err; return err;

View File

@ -113,7 +113,7 @@ struct rxe_req_info {
int need_retry; int need_retry;
int wait_for_rnr_timer; int wait_for_rnr_timer;
int noack_pkts; int noack_pkts;
struct rxe_task task; int again;
}; };
struct rxe_comp_info { struct rxe_comp_info {
@ -124,7 +124,6 @@ struct rxe_comp_info {
int started_retry; int started_retry;
u32 retry_cnt; u32 retry_cnt;
u32 rnr_retry; u32 rnr_retry;
struct rxe_task task;
}; };
enum rdatm_res_state { enum rdatm_res_state {
@ -196,7 +195,6 @@ struct rxe_resp_info {
unsigned int res_head; unsigned int res_head;
unsigned int res_tail; unsigned int res_tail;
struct resp_res *res; struct resp_res *res;
struct rxe_task task;
}; };
struct rxe_qp { struct rxe_qp {
@ -229,6 +227,9 @@ struct rxe_qp {
struct sk_buff_head req_pkts; struct sk_buff_head req_pkts;
struct sk_buff_head resp_pkts; struct sk_buff_head resp_pkts;
struct rxe_task send_task;
struct rxe_task recv_task;
struct rxe_req_info req; struct rxe_req_info req;
struct rxe_comp_info comp; struct rxe_comp_info comp;
struct rxe_resp_info resp; struct rxe_resp_info resp;

View File

@ -329,8 +329,7 @@ static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
rcu_read_lock(); rcu_read_lock();
master = netdev_master_upper_dev_get_rcu(dev); master = netdev_master_upper_dev_get_rcu(dev);
if (master) dev_hold(master);
dev_hold(master);
rcu_read_unlock(); rcu_read_unlock();
if (master) if (master)

View File

@ -184,8 +184,12 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
ppriv = ipoib_priv(pdev); ppriv = ipoib_priv(pdev);
snprintf(intf_name, sizeof(intf_name), "%s.%04x", /* If you increase IFNAMSIZ, update snprintf below
ppriv->dev->name, pkey); * to allow longer names.
*/
BUILD_BUG_ON(IFNAMSIZ != 16);
snprintf(intf_name, sizeof(intf_name), "%.10s.%04x", ppriv->dev->name,
pkey);
ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
if (IS_ERR(ndev)) { if (IS_ERR(ndev)) {

View File

@ -2068,8 +2068,6 @@ static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num)
__iowrite64_copy(ring->tqp->mem_base, desc, __iowrite64_copy(ring->tqp->mem_base, desc,
(sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) / (sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) /
HNS3_BYTES_PER_64BIT); HNS3_BYTES_PER_64BIT);
io_stop_wc();
} }
static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring) static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
@ -2088,8 +2086,6 @@ static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
u64_stats_update_begin(&ring->syncp); u64_stats_update_begin(&ring->syncp);
ring->stats.tx_mem_doorbell += ring->pending_buf; ring->stats.tx_mem_doorbell += ring->pending_buf;
u64_stats_update_end(&ring->syncp); u64_stats_update_end(&ring->syncp);
io_stop_wc();
} }
static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,

View File

@ -16,9 +16,15 @@
struct device; struct device;
struct resource; struct resource;
__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); #ifndef __iowrite32_copy
void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
#endif
void __ioread32_copy(void *to, const void __iomem *from, size_t count); void __ioread32_copy(void *to, const void __iomem *from, size_t count);
#ifndef __iowrite64_copy
void __iowrite64_copy(void __iomem *to, const void *from, size_t count); void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
#endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
int ioremap_page_range(unsigned long addr, unsigned long end, int ioremap_page_range(unsigned long addr, unsigned long end,

View File

@ -14,6 +14,9 @@
#include <uapi/rdma/rdma_netlink.h> #include <uapi/rdma/rdma_netlink.h>
#include <linux/xarray.h> #include <linux/xarray.h>
/* Mark entry as containing driver specific details, it is used to provide QP subtype for now */
#define RESTRACK_DD XA_MARK_1
struct ib_device; struct ib_device;
struct sk_buff; struct sk_buff;
@ -116,8 +119,8 @@ struct rdma_restrack_entry {
u32 id; u32 id;
}; };
int rdma_restrack_count(struct ib_device *dev, int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
enum rdma_restrack_type type); bool show_details);
/** /**
* rdma_is_kernel_res() - check the owner of resource * rdma_is_kernel_res() - check the owner of resource
* @res: resource entry * @res: resource entry

View File

@ -85,11 +85,17 @@ enum {
EFA_QP_DRIVER_TYPE_SRD = 0, EFA_QP_DRIVER_TYPE_SRD = 0,
}; };
enum {
EFA_CREATE_QP_WITH_UNSOLICITED_WRITE_RECV = 1 << 0,
};
struct efa_ibv_create_qp { struct efa_ibv_create_qp {
__u32 comp_mask; __u32 comp_mask;
__u32 rq_ring_size; /* bytes */ __u32 rq_ring_size; /* bytes */
__u32 sq_ring_size; /* bytes */ __u32 sq_ring_size; /* bytes */
__u32 driver_qp_type; __u32 driver_qp_type;
__u16 flags;
__u8 reserved_90[6];
}; };
struct efa_ibv_create_qp_resp { struct efa_ibv_create_qp_resp {
@ -123,6 +129,7 @@ enum {
EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID = 1 << 3, EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID = 1 << 3,
EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128 = 1 << 4, EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128 = 1 << 4,
EFA_QUERY_DEVICE_CAPS_RDMA_WRITE = 1 << 5, EFA_QUERY_DEVICE_CAPS_RDMA_WRITE = 1 << 5,
EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV = 1 << 6,
}; };
struct efa_ibv_ex_query_device_resp { struct efa_ibv_ex_query_device_resp {

View File

@ -109,6 +109,12 @@ struct hns_roce_ib_create_qp_resp {
__aligned_u64 dwqe_mmap_key; __aligned_u64 dwqe_mmap_key;
}; };
struct hns_roce_ib_modify_qp_resp {
__u8 tc_mode;
__u8 priority;
__u8 reserved[6];
};
enum { enum {
HNS_ROCE_EXSGE_FLAGS = 1 << 0, HNS_ROCE_EXSGE_FLAGS = 1 << 0,
HNS_ROCE_RQ_INLINE_FLAGS = 1 << 1, HNS_ROCE_RQ_INLINE_FLAGS = 1 << 1,
@ -143,7 +149,8 @@ struct hns_roce_ib_alloc_pd_resp {
struct hns_roce_ib_create_ah_resp { struct hns_roce_ib_create_ah_resp {
__u8 dmac[6]; __u8 dmac[6];
__u8 reserved[2]; __u8 priority;
__u8 tc_mode;
}; };
#endif /* HNS_ABI_USER_H */ #endif /* HNS_ABI_USER_H */

View File

@ -16,8 +16,20 @@
#define MANA_IB_UVERBS_ABI_VERSION 1 #define MANA_IB_UVERBS_ABI_VERSION 1
enum mana_ib_create_cq_flags {
MANA_IB_CREATE_RNIC_CQ = 1 << 0,
};
struct mana_ib_create_cq { struct mana_ib_create_cq {
__aligned_u64 buf_addr; __aligned_u64 buf_addr;
__u16 flags;
__u16 reserved0;
__u32 reserved1;
};
struct mana_ib_create_cq_resp {
__u32 cqid;
__u32 reserved;
}; };
struct mana_ib_create_qp { struct mana_ib_create_qp {

View File

@ -558,6 +558,12 @@ enum rdma_nldev_attr {
RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE, /* u8 */ RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE, /* u8 */
RDMA_NLDEV_ATTR_DRIVER_DETAILS, /* u8 */
/*
* QP subtype string, used for driver QPs
*/
RDMA_NLDEV_ATTR_RES_SUBTYPE, /* string */
/* /*
* Always the end * Always the end
*/ */

View File

@ -16,9 +16,8 @@
* time. Order of access is not guaranteed, nor is a memory barrier * time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards. * performed afterwards.
*/ */
void __attribute__((weak)) __iowrite32_copy(void __iomem *to, #ifndef __iowrite32_copy
const void *from, void __iowrite32_copy(void __iomem *to, const void *from, size_t count)
size_t count)
{ {
u32 __iomem *dst = to; u32 __iomem *dst = to;
const u32 *src = from; const u32 *src = from;
@ -28,6 +27,7 @@ void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
__raw_writel(*src++, dst++); __raw_writel(*src++, dst++);
} }
EXPORT_SYMBOL_GPL(__iowrite32_copy); EXPORT_SYMBOL_GPL(__iowrite32_copy);
#endif
/** /**
* __ioread32_copy - copy data from MMIO space, in 32-bit units * __ioread32_copy - copy data from MMIO space, in 32-bit units
@ -60,9 +60,8 @@ EXPORT_SYMBOL_GPL(__ioread32_copy);
* time. Order of access is not guaranteed, nor is a memory barrier * time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards. * performed afterwards.
*/ */
void __attribute__((weak)) __iowrite64_copy(void __iomem *to, #ifndef __iowrite64_copy
const void *from, void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
size_t count)
{ {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
u64 __iomem *dst = to; u64 __iomem *dst = to;
@ -75,5 +74,5 @@ void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
__iowrite32_copy(to, from, count * 2); __iowrite32_copy(to, from, count * 2);
#endif #endif
} }
EXPORT_SYMBOL_GPL(__iowrite64_copy); EXPORT_SYMBOL_GPL(__iowrite64_copy);
#endif