mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"This cycle saw a focus on rxe and bnxt_re drivers:
- Code cleanups for irdma, rxe, rtrs, hns, vmw_pvrdma
- rxe uses workqueues instead of tasklets
- rxe has better compliance around access checks for MRs and rereg_mr
- mana supportst he 'v2' FW interface for RX coalescing
- hfi1 bug fix for stale cache entries in its MR cache
- mlx5 buf fix to handle FW failures when destroying QPs
- erdma HW has a new doorbell allocation mechanism for uverbs that is
secure
- Lots of small cleanups and rework in bnxt_re:
- Use the common mmap functions
- Support disassociation
- Improve FW command flow
- support for 'low latency push'"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (71 commits)
RDMA/bnxt_re: Fix an IS_ERR() vs NULL check
RDMA/bnxt_re: Fix spelling mistake "priviledged" -> "privileged"
RDMA/bnxt_re: Remove duplicated include in bnxt_re/main.c
RDMA/bnxt_re: Refactor code around bnxt_qplib_map_rc()
RDMA/bnxt_re: Remove incorrect return check from slow path
RDMA/bnxt_re: Enable low latency push
RDMA/bnxt_re: Reorg the bar mapping
RDMA/bnxt_re: Move the interface version to chip context structure
RDMA/bnxt_re: Query function capabilities from firmware
RDMA/bnxt_re: Optimize the bnxt_re_init_hwrm_hdr usage
RDMA/bnxt_re: Add disassociate ucontext support
RDMA/bnxt_re: Use the common mmap helper functions
RDMA/bnxt_re: Initialize opcode while sending message
RDMA/cma: Remove NULL check before dev_{put, hold}
RDMA/rxe: Simplify cq->notify code
RDMA/rxe: Fixes mr access supported list
RDMA/bnxt_re: optimize the parameters passed to helper functions
RDMA/bnxt_re: remove redundant cmdq_bitmap
RDMA/bnxt_re: use firmware provided max request timeout
RDMA/bnxt_re: cancel all control path command waiters upon error
...
This commit is contained in:
@@ -4805,8 +4805,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
|
||||
event->param.ud.qkey = id_priv->qkey;
|
||||
|
||||
out:
|
||||
if (ndev)
|
||||
dev_put(ndev);
|
||||
dev_put(ndev);
|
||||
}
|
||||
|
||||
static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
|
||||
|
||||
@@ -39,6 +39,7 @@
|
||||
|
||||
#ifndef __BNXT_RE_H__
|
||||
#define __BNXT_RE_H__
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
#include "hw_counters.h"
|
||||
#define ROCE_DRV_MODULE_NAME "bnxt_re"
|
||||
|
||||
@@ -179,10 +180,14 @@ struct bnxt_re_dev {
|
||||
#define BNXT_RE_ROCEV2_IPV4_PACKET 2
|
||||
#define BNXT_RE_ROCEV2_IPV6_PACKET 3
|
||||
|
||||
#define BNXT_RE_CHECK_RC(x) ((x) && ((x) != -ETIMEDOUT))
|
||||
|
||||
static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
if (rdev)
|
||||
return &rdev->ibdev.dev;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
extern const struct uapi_definition bnxt_re_uapi_defs[];
|
||||
#endif
|
||||
|
||||
@@ -61,6 +61,15 @@
|
||||
|
||||
#include "bnxt_re.h"
|
||||
#include "ib_verbs.h"
|
||||
|
||||
#include <rdma/uverbs_types.h>
|
||||
#include <rdma/uverbs_std_types.h>
|
||||
|
||||
#include <rdma/ib_user_ioctl_cmds.h>
|
||||
|
||||
#define UVERBS_MODULE_NAME bnxt_re
|
||||
#include <rdma/uverbs_named_ioctl.h>
|
||||
|
||||
#include <rdma/bnxt_re-abi.h>
|
||||
|
||||
static int __from_ib_access_flags(int iflags)
|
||||
@@ -534,12 +543,57 @@ fail:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct bnxt_re_user_mmap_entry*
|
||||
bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
|
||||
enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
|
||||
{
|
||||
struct bnxt_re_user_mmap_entry *entry;
|
||||
int ret;
|
||||
|
||||
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return NULL;
|
||||
|
||||
entry->mem_offset = mem_offset;
|
||||
entry->mmap_flag = mmap_flag;
|
||||
entry->uctx = uctx;
|
||||
|
||||
switch (mmap_flag) {
|
||||
case BNXT_RE_MMAP_SH_PAGE:
|
||||
ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
|
||||
&entry->rdma_entry, PAGE_SIZE, 0);
|
||||
break;
|
||||
case BNXT_RE_MMAP_UC_DB:
|
||||
case BNXT_RE_MMAP_WC_DB:
|
||||
ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
|
||||
&entry->rdma_entry, PAGE_SIZE);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
kfree(entry);
|
||||
return NULL;
|
||||
}
|
||||
if (offset)
|
||||
*offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
/* Protection Domains */
|
||||
int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
|
||||
{
|
||||
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
|
||||
struct bnxt_re_dev *rdev = pd->rdev;
|
||||
|
||||
if (udata) {
|
||||
rdma_user_mmap_entry_remove(pd->pd_db_mmap);
|
||||
pd->pd_db_mmap = NULL;
|
||||
}
|
||||
|
||||
bnxt_re_destroy_fence_mr(pd);
|
||||
|
||||
if (pd->qplib_pd.id) {
|
||||
@@ -558,7 +612,8 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
|
||||
udata, struct bnxt_re_ucontext, ib_uctx);
|
||||
struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
|
||||
int rc;
|
||||
struct bnxt_re_user_mmap_entry *entry = NULL;
|
||||
int rc = 0;
|
||||
|
||||
pd->rdev = rdev;
|
||||
if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
|
||||
@@ -568,15 +623,15 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
}
|
||||
|
||||
if (udata) {
|
||||
struct bnxt_re_pd_resp resp;
|
||||
struct bnxt_re_pd_resp resp = {};
|
||||
|
||||
if (!ucntx->dpi.dbr) {
|
||||
/* Allocate DPI in alloc_pd to avoid failing of
|
||||
* ibv_devinfo and family of application when DPIs
|
||||
* are depleted.
|
||||
*/
|
||||
if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
|
||||
&ucntx->dpi, ucntx)) {
|
||||
if (bnxt_qplib_alloc_dpi(&rdev->qplib_res,
|
||||
&ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) {
|
||||
rc = -ENOMEM;
|
||||
goto dbfail;
|
||||
}
|
||||
@@ -585,12 +640,21 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
resp.pdid = pd->qplib_pd.id;
|
||||
/* Still allow mapping this DBR to the new user PD. */
|
||||
resp.dpi = ucntx->dpi.dpi;
|
||||
resp.dbr = (u64)ucntx->dpi.umdbr;
|
||||
|
||||
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
|
||||
entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
|
||||
BNXT_RE_MMAP_UC_DB, &resp.dbr);
|
||||
|
||||
if (!entry) {
|
||||
rc = -ENOMEM;
|
||||
goto dbfail;
|
||||
}
|
||||
|
||||
pd->pd_db_mmap = &entry->rdma_entry;
|
||||
|
||||
rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
|
||||
if (rc) {
|
||||
ibdev_err(&rdev->ibdev,
|
||||
"Failed to copy user response\n");
|
||||
rdma_user_mmap_entry_remove(pd->pd_db_mmap);
|
||||
rc = -EFAULT;
|
||||
goto dbfail;
|
||||
}
|
||||
}
|
||||
@@ -614,12 +678,20 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
|
||||
{
|
||||
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
|
||||
struct bnxt_re_dev *rdev = ah->rdev;
|
||||
bool block = true;
|
||||
int rc = 0;
|
||||
|
||||
bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
|
||||
!(flags & RDMA_DESTROY_AH_SLEEPABLE));
|
||||
block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
|
||||
rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
|
||||
if (BNXT_RE_CHECK_RC(rc)) {
|
||||
if (rc == -ETIMEDOUT)
|
||||
rc = 0;
|
||||
else
|
||||
goto fail;
|
||||
}
|
||||
atomic_dec(&rdev->ah_count);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
|
||||
@@ -3955,6 +4027,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
|
||||
container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
|
||||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
|
||||
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
|
||||
struct bnxt_re_user_mmap_entry *entry;
|
||||
struct bnxt_re_uctx_resp resp = {};
|
||||
u32 chip_met_rev_num = 0;
|
||||
int rc;
|
||||
@@ -3993,6 +4066,16 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
|
||||
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
|
||||
resp.mode = rdev->chip_ctx->modes.wqe_mode;
|
||||
|
||||
if (rdev->chip_ctx->modes.db_push)
|
||||
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
|
||||
|
||||
entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
|
||||
if (!entry) {
|
||||
rc = -ENOMEM;
|
||||
goto cfail;
|
||||
}
|
||||
uctx->shpage_mmap = &entry->rdma_entry;
|
||||
|
||||
rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
|
||||
if (rc) {
|
||||
ibdev_err(ibdev, "Failed to copy user context");
|
||||
@@ -4016,6 +4099,8 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
|
||||
|
||||
struct bnxt_re_dev *rdev = uctx->rdev;
|
||||
|
||||
rdma_user_mmap_entry_remove(uctx->shpage_mmap);
|
||||
uctx->shpage_mmap = NULL;
|
||||
if (uctx->shpg)
|
||||
free_page((unsigned long)uctx->shpg);
|
||||
|
||||
@@ -4023,8 +4108,7 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
|
||||
/* Free DPI only if this is the first PD allocated by the
|
||||
* application and mark the context dpi as NULL
|
||||
*/
|
||||
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
|
||||
&rdev->qplib_res.dpi_tbl, &uctx->dpi);
|
||||
bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi);
|
||||
uctx->dpi.dbr = NULL;
|
||||
}
|
||||
}
|
||||
@@ -4035,27 +4119,177 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
|
||||
struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
|
||||
struct bnxt_re_ucontext,
|
||||
ib_uctx);
|
||||
struct bnxt_re_dev *rdev = uctx->rdev;
|
||||
struct bnxt_re_user_mmap_entry *bnxt_entry;
|
||||
struct rdma_user_mmap_entry *rdma_entry;
|
||||
int ret = 0;
|
||||
u64 pfn;
|
||||
|
||||
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
|
||||
rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
|
||||
if (!rdma_entry)
|
||||
return -EINVAL;
|
||||
|
||||
if (vma->vm_pgoff) {
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
||||
PAGE_SIZE, vma->vm_page_prot)) {
|
||||
ibdev_err(&rdev->ibdev, "Failed to map DPI");
|
||||
return -EAGAIN;
|
||||
}
|
||||
} else {
|
||||
pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
|
||||
if (remap_pfn_range(vma, vma->vm_start,
|
||||
pfn, PAGE_SIZE, vma->vm_page_prot)) {
|
||||
ibdev_err(&rdev->ibdev, "Failed to map shared page");
|
||||
return -EAGAIN;
|
||||
}
|
||||
bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
|
||||
rdma_entry);
|
||||
|
||||
switch (bnxt_entry->mmap_flag) {
|
||||
case BNXT_RE_MMAP_WC_DB:
|
||||
pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
|
||||
ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
|
||||
pgprot_writecombine(vma->vm_page_prot),
|
||||
rdma_entry);
|
||||
break;
|
||||
case BNXT_RE_MMAP_UC_DB:
|
||||
pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
|
||||
ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
|
||||
pgprot_noncached(vma->vm_page_prot),
|
||||
rdma_entry);
|
||||
break;
|
||||
case BNXT_RE_MMAP_SH_PAGE:
|
||||
ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
rdma_user_mmap_entry_put(rdma_entry);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
|
||||
{
|
||||
struct bnxt_re_user_mmap_entry *bnxt_entry;
|
||||
|
||||
bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
|
||||
rdma_entry);
|
||||
|
||||
kfree(bnxt_entry);
|
||||
}
|
||||
|
||||
static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
|
||||
{
|
||||
struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
|
||||
enum bnxt_re_alloc_page_type alloc_type;
|
||||
struct bnxt_re_user_mmap_entry *entry;
|
||||
enum bnxt_re_mmap_flag mmap_flag;
|
||||
struct bnxt_qplib_chip_ctx *cctx;
|
||||
struct bnxt_re_ucontext *uctx;
|
||||
struct bnxt_re_dev *rdev;
|
||||
u64 mmap_offset;
|
||||
u32 length;
|
||||
u32 dpi;
|
||||
u64 dbr;
|
||||
int err;
|
||||
|
||||
uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
|
||||
if (IS_ERR(uctx))
|
||||
return PTR_ERR(uctx);
|
||||
|
||||
err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rdev = uctx->rdev;
|
||||
cctx = rdev->chip_ctx;
|
||||
|
||||
switch (alloc_type) {
|
||||
case BNXT_RE_ALLOC_WC_PAGE:
|
||||
if (cctx->modes.db_push) {
|
||||
if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi,
|
||||
uctx, BNXT_QPLIB_DPI_TYPE_WC))
|
||||
return -ENOMEM;
|
||||
length = PAGE_SIZE;
|
||||
dpi = uctx->wcdpi.dpi;
|
||||
dbr = (u64)uctx->wcdpi.umdbr;
|
||||
mmap_flag = BNXT_RE_MMAP_WC_DB;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
entry = bnxt_re_mmap_entry_insert(uctx, dbr, mmap_flag, &mmap_offset);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
uobj->object = entry;
|
||||
uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
|
||||
err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
|
||||
&mmap_offset, sizeof(mmap_offset));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
|
||||
&length, sizeof(length));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
|
||||
&dpi, sizeof(length));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
|
||||
enum rdma_remove_reason why,
|
||||
struct uverbs_attr_bundle *attrs)
|
||||
{
|
||||
struct bnxt_re_user_mmap_entry *entry = uobject->object;
|
||||
struct bnxt_re_ucontext *uctx = entry->uctx;
|
||||
|
||||
switch (entry->mmap_flag) {
|
||||
case BNXT_RE_MMAP_WC_DB:
|
||||
if (uctx && uctx->wcdpi.dbr) {
|
||||
struct bnxt_re_dev *rdev = uctx->rdev;
|
||||
|
||||
bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi);
|
||||
uctx->wcdpi.dbr = NULL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
goto exit;
|
||||
}
|
||||
rdma_user_mmap_entry_remove(&entry->rdma_entry);
|
||||
exit:
|
||||
return 0;
|
||||
}
|
||||
|
||||
DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE,
|
||||
UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE,
|
||||
BNXT_RE_OBJECT_ALLOC_PAGE,
|
||||
UVERBS_ACCESS_NEW,
|
||||
UA_MANDATORY),
|
||||
UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE,
|
||||
enum bnxt_re_alloc_page_type,
|
||||
UA_MANDATORY),
|
||||
UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
|
||||
UVERBS_ATTR_TYPE(u64),
|
||||
UA_MANDATORY),
|
||||
UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
|
||||
UVERBS_ATTR_TYPE(u32),
|
||||
UA_MANDATORY),
|
||||
UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI,
|
||||
UVERBS_ATTR_TYPE(u32),
|
||||
UA_MANDATORY));
|
||||
|
||||
DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE,
|
||||
UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE,
|
||||
BNXT_RE_OBJECT_ALLOC_PAGE,
|
||||
UVERBS_ACCESS_DESTROY,
|
||||
UA_MANDATORY));
|
||||
|
||||
DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
|
||||
UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup),
|
||||
&UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
|
||||
&UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
|
||||
|
||||
const struct uapi_definition bnxt_re_uapi_defs[] = {
|
||||
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
|
||||
{}
|
||||
};
|
||||
|
||||
@@ -60,6 +60,8 @@ struct bnxt_re_pd {
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct bnxt_qplib_pd qplib_pd;
|
||||
struct bnxt_re_fence_data fence;
|
||||
struct rdma_user_mmap_entry *pd_db_mmap;
|
||||
struct rdma_user_mmap_entry *pd_wcdb_mmap;
|
||||
};
|
||||
|
||||
struct bnxt_re_ah {
|
||||
@@ -134,8 +136,23 @@ struct bnxt_re_ucontext {
|
||||
struct ib_ucontext ib_uctx;
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct bnxt_qplib_dpi dpi;
|
||||
struct bnxt_qplib_dpi wcdpi;
|
||||
void *shpg;
|
||||
spinlock_t sh_lock; /* protect shpg */
|
||||
struct rdma_user_mmap_entry *shpage_mmap;
|
||||
};
|
||||
|
||||
enum bnxt_re_mmap_flag {
|
||||
BNXT_RE_MMAP_SH_PAGE,
|
||||
BNXT_RE_MMAP_UC_DB,
|
||||
BNXT_RE_MMAP_WC_DB,
|
||||
};
|
||||
|
||||
struct bnxt_re_user_mmap_entry {
|
||||
struct rdma_user_mmap_entry rdma_entry;
|
||||
struct bnxt_re_ucontext *uctx;
|
||||
u64 mem_offset;
|
||||
u8 mmap_flag;
|
||||
};
|
||||
|
||||
static inline u16 bnxt_re_get_swqe_size(int nsge)
|
||||
@@ -213,6 +230,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
|
||||
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
|
||||
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
|
||||
|
||||
|
||||
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
|
||||
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
|
||||
|
||||
@@ -83,6 +83,45 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
|
||||
unsigned long event, void *ptr);
|
||||
static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev);
|
||||
static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev);
|
||||
static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
|
||||
|
||||
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
|
||||
u32 *offset);
|
||||
static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_qplib_chip_ctx *cctx;
|
||||
struct bnxt_en_dev *en_dev;
|
||||
struct bnxt_qplib_res *res;
|
||||
u32 l2db_len = 0;
|
||||
u32 offset = 0;
|
||||
u32 barlen;
|
||||
int rc;
|
||||
|
||||
res = &rdev->qplib_res;
|
||||
en_dev = rdev->en_dev;
|
||||
cctx = rdev->chip_ctx;
|
||||
|
||||
/* Issue qcfg */
|
||||
rc = bnxt_re_hwrm_qcfg(rdev, &l2db_len, &offset);
|
||||
if (rc)
|
||||
dev_info(rdev_to_dev(rdev),
|
||||
"Couldn't get DB bar size, Low latency framework is disabled\n");
|
||||
/* set register offsets for both UC and WC */
|
||||
res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
|
||||
BNXT_QPLIB_DBR_PF_DB_OFFSET;
|
||||
res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
|
||||
|
||||
/* If WC mapping is disabled by L2 driver then en_dev->l2_db_size
|
||||
* is equal to the DB-Bar actual size. This indicates that L2
|
||||
* is mapping entire bar as UC-. RoCE driver can't enable WC mapping
|
||||
* in such cases and DB-push will be disabled.
|
||||
*/
|
||||
barlen = pci_resource_len(res->pdev, RCFW_DBR_PCI_BAR_REGION);
|
||||
if (cctx->modes.db_push && l2db_len && en_dev->l2_db_size != barlen) {
|
||||
res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
|
||||
dev_info(rdev_to_dev(rdev), "Low latency framework is enabled\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
|
||||
{
|
||||
@@ -91,6 +130,9 @@ static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
|
||||
cctx = rdev->chip_ctx;
|
||||
cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
|
||||
mode : BNXT_QPLIB_WQE_MODE_STATIC;
|
||||
if (bnxt_re_hwrm_qcaps(rdev))
|
||||
dev_err(rdev_to_dev(rdev),
|
||||
"Failed to query hwrm qcaps\n");
|
||||
}
|
||||
|
||||
static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
|
||||
@@ -112,6 +154,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
|
||||
{
|
||||
struct bnxt_qplib_chip_ctx *chip_ctx;
|
||||
struct bnxt_en_dev *en_dev;
|
||||
int rc;
|
||||
|
||||
en_dev = rdev->en_dev;
|
||||
|
||||
@@ -130,6 +173,12 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
|
||||
rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
|
||||
|
||||
bnxt_re_set_drv_mode(rdev, wqe_mode);
|
||||
|
||||
bnxt_re_set_db_offset(rdev);
|
||||
rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (bnxt_qplib_determine_atomics(en_dev->pdev))
|
||||
ibdev_info(&rdev->ibdev,
|
||||
"platform doesn't support global atomics.");
|
||||
@@ -283,15 +332,21 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
|
||||
for (indx = 0; indx < rdev->num_msix; indx++)
|
||||
rdev->en_dev->msix_entries[indx].vector = ent[indx].vector;
|
||||
|
||||
bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
|
||||
false);
|
||||
rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
|
||||
false);
|
||||
if (rc) {
|
||||
ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n");
|
||||
return;
|
||||
}
|
||||
for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
|
||||
nq = &rdev->nq[indx - 1];
|
||||
rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
|
||||
msix_ent[indx].vector, false);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
|
||||
indx - 1);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -315,12 +370,11 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr,
|
||||
u16 opcd, u16 crid, u16 trid)
|
||||
static void bnxt_re_init_hwrm_hdr(struct input *hdr, u16 opcd)
|
||||
{
|
||||
hdr->req_type = cpu_to_le16(opcd);
|
||||
hdr->cmpl_ring = cpu_to_le16(crid);
|
||||
hdr->target_id = cpu_to_le16(trid);
|
||||
hdr->cmpl_ring = cpu_to_le16(-1);
|
||||
hdr->target_id = cpu_to_le16(-1);
|
||||
}
|
||||
|
||||
static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
|
||||
@@ -334,13 +388,60 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
|
||||
fw_msg->timeout = timeout;
|
||||
}
|
||||
|
||||
/* Query device config using common hwrm */
|
||||
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
|
||||
u32 *offset)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct hwrm_func_qcfg_output resp = {0};
|
||||
struct hwrm_func_qcfg_input req = {0};
|
||||
struct bnxt_fw_msg fw_msg;
|
||||
int rc;
|
||||
|
||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCFG);
|
||||
req.fid = cpu_to_le16(0xffff);
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
||||
rc = bnxt_send_msg(en_dev, &fw_msg);
|
||||
if (!rc) {
|
||||
*db_len = PAGE_ALIGN(le16_to_cpu(resp.l2_doorbell_bar_size_kb) * 1024);
|
||||
*offset = PAGE_ALIGN(le16_to_cpu(resp.legacy_l2_db_size_kb) * 1024);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Query function capabilities using common hwrm */
|
||||
int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct hwrm_func_qcaps_output resp = {};
|
||||
struct hwrm_func_qcaps_input req = {};
|
||||
struct bnxt_qplib_chip_ctx *cctx;
|
||||
struct bnxt_fw_msg fw_msg = {};
|
||||
int rc;
|
||||
|
||||
cctx = rdev->chip_ctx;
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCAPS);
|
||||
req.fid = cpu_to_le16(0xffff);
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
||||
|
||||
rc = bnxt_send_msg(en_dev, &fw_msg);
|
||||
if (rc)
|
||||
return rc;
|
||||
cctx->modes.db_push = le32_to_cpu(resp.flags) & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
|
||||
u16 fw_ring_id, int type)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev;
|
||||
struct hwrm_ring_free_input req = {0};
|
||||
struct hwrm_ring_free_input req = {};
|
||||
struct hwrm_ring_free_output resp;
|
||||
struct bnxt_fw_msg fw_msg;
|
||||
struct bnxt_fw_msg fw_msg = {};
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!rdev)
|
||||
@@ -354,9 +455,7 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
|
||||
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
|
||||
return 0;
|
||||
|
||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||
|
||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_FREE);
|
||||
req.ring_type = type;
|
||||
req.ring_id = cpu_to_le16(fw_ring_id);
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
@@ -373,16 +472,15 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
|
||||
u16 *fw_ring_id)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct hwrm_ring_alloc_input req = {0};
|
||||
struct hwrm_ring_alloc_input req = {};
|
||||
struct hwrm_ring_alloc_output resp;
|
||||
struct bnxt_fw_msg fw_msg;
|
||||
struct bnxt_fw_msg fw_msg = {};
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!en_dev)
|
||||
return rc;
|
||||
|
||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC);
|
||||
req.enables = 0;
|
||||
req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
|
||||
if (ring_attr->pages > 1) {
|
||||
@@ -411,7 +509,7 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct hwrm_stat_ctx_free_input req = {};
|
||||
struct hwrm_stat_ctx_free_output resp = {};
|
||||
struct bnxt_fw_msg fw_msg;
|
||||
struct bnxt_fw_msg fw_msg = {};
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!en_dev)
|
||||
@@ -420,9 +518,7 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
|
||||
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
|
||||
return 0;
|
||||
|
||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||
|
||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE);
|
||||
req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
|
||||
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
|
||||
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
|
||||
@@ -439,10 +535,10 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
|
||||
u32 *fw_stats_ctx_id)
|
||||
{
|
||||
struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
|
||||
struct hwrm_stat_ctx_alloc_output resp = {0};
|
||||
struct hwrm_stat_ctx_alloc_input req = {0};
|
||||
struct hwrm_stat_ctx_alloc_output resp = {};
|
||||
struct hwrm_stat_ctx_alloc_input req = {};
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct bnxt_fw_msg fw_msg;
|
||||
struct bnxt_fw_msg fw_msg = {};
|
||||
int rc = -EINVAL;
|
||||
|
||||
*fw_stats_ctx_id = INVALID_STATS_CTX_ID;
|
||||
@@ -450,9 +546,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
|
||||
if (!en_dev)
|
||||
return rc;
|
||||
|
||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||
|
||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC);
|
||||
req.update_period_ms = cpu_to_le32(1000);
|
||||
req.stats_dma_addr = cpu_to_le64(dma_map);
|
||||
req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
|
||||
@@ -466,6 +560,10 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnxt_re_disassociate_ucontext(struct ib_ucontext *ibcontext)
|
||||
{
|
||||
}
|
||||
|
||||
/* Device */
|
||||
|
||||
static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
|
||||
@@ -532,6 +630,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
|
||||
.destroy_qp = bnxt_re_destroy_qp,
|
||||
.destroy_srq = bnxt_re_destroy_srq,
|
||||
.device_group = &bnxt_re_dev_attr_group,
|
||||
.disassociate_ucontext = bnxt_re_disassociate_ucontext,
|
||||
.get_dev_fw_str = bnxt_re_query_fw_str,
|
||||
.get_dma_mr = bnxt_re_get_dma_mr,
|
||||
.get_hw_stats = bnxt_re_ib_get_hw_stats,
|
||||
@@ -539,6 +638,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
|
||||
.get_port_immutable = bnxt_re_get_port_immutable,
|
||||
.map_mr_sg = bnxt_re_map_mr_sg,
|
||||
.mmap = bnxt_re_mmap,
|
||||
.mmap_free = bnxt_re_mmap_free,
|
||||
.modify_qp = bnxt_re_modify_qp,
|
||||
.modify_srq = bnxt_re_modify_srq,
|
||||
.poll_cq = bnxt_re_poll_cq,
|
||||
@@ -579,6 +679,9 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
|
||||
ibdev->dev.parent = &rdev->en_dev->pdev->dev;
|
||||
ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
|
||||
|
||||
if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
|
||||
ibdev->driver_def = bnxt_re_uapi_defs;
|
||||
|
||||
ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
|
||||
ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
|
||||
if (ret)
|
||||
@@ -822,7 +925,6 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
|
||||
|
||||
if (rdev->qplib_res.dpi_tbl.max) {
|
||||
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
|
||||
&rdev->qplib_res.dpi_tbl,
|
||||
&rdev->dpi_privileged);
|
||||
}
|
||||
if (rdev->qplib_res.rcfw) {
|
||||
@@ -850,9 +952,9 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
|
||||
rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res,
|
||||
&rdev->dpi_privileged,
|
||||
rdev);
|
||||
rdev, BNXT_QPLIB_DPI_TYPE_KERNEL);
|
||||
if (rc)
|
||||
goto dealloc_res;
|
||||
|
||||
@@ -892,7 +994,6 @@ free_nq:
|
||||
bnxt_qplib_free_nq(&rdev->nq[i]);
|
||||
}
|
||||
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
|
||||
&rdev->qplib_res.dpi_tbl,
|
||||
&rdev->dpi_privileged);
|
||||
dealloc_res:
|
||||
bnxt_qplib_free_res(&rdev->qplib_res);
|
||||
@@ -963,12 +1064,6 @@ static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
|
||||
if (!ib_device_try_get(&rdev->ibdev))
|
||||
return 0;
|
||||
|
||||
if (!sgid_tbl) {
|
||||
ibdev_err(&rdev->ibdev, "QPLIB: SGID table not allocated");
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (index = 0; index < sgid_tbl->active; index++) {
|
||||
gid_idx = sgid_tbl->hw_id[index];
|
||||
|
||||
@@ -986,7 +1081,7 @@ static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
|
||||
rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
|
||||
rdev->qplib_res.netdev->dev_addr);
|
||||
}
|
||||
out:
|
||||
|
||||
ib_device_put(&rdev->ibdev);
|
||||
return rc;
|
||||
}
|
||||
@@ -1039,14 +1134,13 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
|
||||
static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct hwrm_ver_get_output resp = {0};
|
||||
struct hwrm_ver_get_input req = {0};
|
||||
struct bnxt_fw_msg fw_msg;
|
||||
struct hwrm_ver_get_output resp = {};
|
||||
struct hwrm_ver_get_input req = {};
|
||||
struct bnxt_qplib_chip_ctx *cctx;
|
||||
struct bnxt_fw_msg fw_msg = {};
|
||||
int rc = 0;
|
||||
|
||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
|
||||
HWRM_VER_GET, -1, -1);
|
||||
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VER_GET);
|
||||
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
|
||||
req.hwrm_intf_min = HWRM_VERSION_MINOR;
|
||||
req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
|
||||
@@ -1058,11 +1152,18 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
|
||||
rc);
|
||||
return;
|
||||
}
|
||||
rdev->qplib_ctx.hwrm_intf_ver =
|
||||
|
||||
cctx = rdev->chip_ctx;
|
||||
cctx->hwrm_intf_ver =
|
||||
(u64)le16_to_cpu(resp.hwrm_intf_major) << 48 |
|
||||
(u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 |
|
||||
(u64)le16_to_cpu(resp.hwrm_intf_build) << 16 |
|
||||
le16_to_cpu(resp.hwrm_intf_patch);
|
||||
|
||||
cctx->hwrm_cmd_max_timeout = le16_to_cpu(resp.max_req_timeout);
|
||||
|
||||
if (!cctx->hwrm_cmd_max_timeout)
|
||||
cctx->hwrm_cmd_max_timeout = RCFW_FW_STALL_MAX_TIMEOUT;
|
||||
}
|
||||
|
||||
static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
|
||||
@@ -1200,7 +1301,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
|
||||
db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
|
||||
vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
|
||||
rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
|
||||
vid, db_offt, rdev->is_virtfn,
|
||||
vid, db_offt,
|
||||
&bnxt_re_aeq_handler);
|
||||
if (rc) {
|
||||
ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n",
|
||||
@@ -1497,6 +1598,7 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
|
||||
*/
|
||||
set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
|
||||
set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
|
||||
wake_up_all(&rdev->rcfw.cmdq.waitq);
|
||||
mutex_unlock(&bnxt_re_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -399,6 +399,9 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
|
||||
|
||||
void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
|
||||
{
|
||||
if (!nq->requested)
|
||||
return;
|
||||
|
||||
tasklet_disable(&nq->nq_tasklet);
|
||||
/* Mask h/w interrupt */
|
||||
bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
|
||||
@@ -406,11 +409,12 @@ void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
|
||||
synchronize_irq(nq->msix_vec);
|
||||
if (kill)
|
||||
tasklet_kill(&nq->nq_tasklet);
|
||||
if (nq->requested) {
|
||||
irq_set_affinity_hint(nq->msix_vec, NULL);
|
||||
free_irq(nq->msix_vec, nq);
|
||||
nq->requested = false;
|
||||
}
|
||||
|
||||
irq_set_affinity_hint(nq->msix_vec, NULL);
|
||||
free_irq(nq->msix_vec, nq);
|
||||
kfree(nq->name);
|
||||
nq->name = NULL;
|
||||
nq->requested = false;
|
||||
}
|
||||
|
||||
void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
|
||||
@@ -436,6 +440,7 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
|
||||
int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
|
||||
int msix_vector, bool need_init)
|
||||
{
|
||||
struct bnxt_qplib_res *res = nq->res;
|
||||
int rc;
|
||||
|
||||
if (nq->requested)
|
||||
@@ -447,10 +452,17 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
|
||||
else
|
||||
tasklet_enable(&nq->nq_tasklet);
|
||||
|
||||
snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
|
||||
nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
|
||||
nq_indx, pci_name(res->pdev));
|
||||
if (!nq->name)
|
||||
return -ENOMEM;
|
||||
rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
kfree(nq->name);
|
||||
nq->name = NULL;
|
||||
tasklet_disable(&nq->nq_tasklet);
|
||||
return rc;
|
||||
}
|
||||
|
||||
cpumask_clear(&nq->mask);
|
||||
cpumask_set_cpu(nq_indx, &nq->mask);
|
||||
@@ -461,7 +473,7 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
|
||||
nq->msix_vec, nq_indx);
|
||||
}
|
||||
nq->requested = true;
|
||||
bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
|
||||
bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -471,7 +483,6 @@ static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
|
||||
resource_size_t reg_base;
|
||||
struct bnxt_qplib_nq_db *nq_db;
|
||||
struct pci_dev *pdev;
|
||||
int rc = 0;
|
||||
|
||||
pdev = nq->pdev;
|
||||
nq_db = &nq->nq_db;
|
||||
@@ -481,8 +492,7 @@ static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
|
||||
if (!nq_db->reg.bar_base) {
|
||||
dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
|
||||
nq_db->reg.bar_id);
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
reg_base = nq_db->reg.bar_base + reg_offt;
|
||||
@@ -492,15 +502,14 @@ static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
|
||||
if (!nq_db->reg.bar_reg) {
|
||||
dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
|
||||
nq_db->reg.bar_id);
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nq_db->dbinfo.db = nq_db->reg.bar_reg;
|
||||
nq_db->dbinfo.hwq = &nq->hwq;
|
||||
nq_db->dbinfo.xid = nq->ring_id;
|
||||
fail:
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
|
||||
@@ -614,7 +623,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
|
||||
hwq_attr.type = HWQ_TYPE_QUEUE;
|
||||
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
|
||||
if (rc)
|
||||
goto exit;
|
||||
return rc;
|
||||
|
||||
srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
|
||||
GFP_KERNEL);
|
||||
@@ -659,7 +668,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
|
||||
srq->dbinfo.xid = srq->id;
|
||||
srq->dbinfo.db = srq->dpi->dbr;
|
||||
srq->dbinfo.max_slot = 1;
|
||||
srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
|
||||
srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
|
||||
if (srq->threshold)
|
||||
bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
|
||||
srq->arm_req = false;
|
||||
@@ -668,7 +677,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
|
||||
fail:
|
||||
bnxt_qplib_free_hwq(res, &srq->hwq);
|
||||
kfree(srq->swq);
|
||||
exit:
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -732,15 +741,14 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
|
||||
struct rq_wqe *srqe;
|
||||
struct sq_sge *hw_sge;
|
||||
u32 sw_prod, sw_cons, count = 0;
|
||||
int i, rc = 0, next;
|
||||
int i, next;
|
||||
|
||||
spin_lock(&srq_hwq->lock);
|
||||
if (srq->start_idx == srq->last_idx) {
|
||||
dev_err(&srq_hwq->pdev->dev,
|
||||
"FP: SRQ (0x%x) is full!\n", srq->id);
|
||||
rc = -EINVAL;
|
||||
spin_unlock(&srq_hwq->lock);
|
||||
goto done;
|
||||
return -EINVAL;
|
||||
}
|
||||
next = srq->start_idx;
|
||||
srq->start_idx = srq->swq[next].next_idx;
|
||||
@@ -781,22 +789,19 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
|
||||
srq->arm_req = false;
|
||||
bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
|
||||
}
|
||||
done:
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* QP */
|
||||
|
||||
static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
|
||||
{
|
||||
int rc = 0;
|
||||
int indx;
|
||||
|
||||
que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
|
||||
if (!que->swq) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (!que->swq)
|
||||
return -ENOMEM;
|
||||
|
||||
que->swq_start = 0;
|
||||
que->swq_last = que->max_wqe - 1;
|
||||
@@ -804,8 +809,8 @@ static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
|
||||
que->swq[indx].next_idx = indx + 1;
|
||||
que->swq[que->swq_last].next_idx = 0; /* Make it circular */
|
||||
que->swq_last = 0;
|
||||
out:
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
@@ -839,7 +844,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
hwq_attr.type = HWQ_TYPE_QUEUE;
|
||||
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
|
||||
if (rc)
|
||||
goto exit;
|
||||
return rc;
|
||||
|
||||
rc = bnxt_qplib_alloc_init_swq(sq);
|
||||
if (rc)
|
||||
@@ -927,7 +932,6 @@ sq_swq:
|
||||
kfree(sq->swq);
|
||||
fail_sq:
|
||||
bnxt_qplib_free_hwq(res, &sq->hwq);
|
||||
exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -992,7 +996,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
|
||||
hwq_attr.type = HWQ_TYPE_QUEUE;
|
||||
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
|
||||
if (rc)
|
||||
goto exit;
|
||||
return rc;
|
||||
|
||||
rc = bnxt_qplib_alloc_init_swq(sq);
|
||||
if (rc)
|
||||
@@ -1140,7 +1144,6 @@ sq_swq:
|
||||
kfree(sq->swq);
|
||||
fail_sq:
|
||||
bnxt_qplib_free_hwq(res, &sq->hwq);
|
||||
exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -1614,7 +1617,7 @@ static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
|
||||
il_src = (void *)wqe->sg_list[indx].addr;
|
||||
t_len += len;
|
||||
if (t_len > qp->max_inline_data)
|
||||
goto bad;
|
||||
return -ENOMEM;
|
||||
while (len) {
|
||||
if (pull_dst) {
|
||||
pull_dst = false;
|
||||
@@ -1638,8 +1641,6 @@ static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
|
||||
}
|
||||
|
||||
return t_len;
|
||||
bad:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
|
||||
@@ -2069,7 +2070,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
|
||||
hwq_attr.sginfo = &cq->sg_info;
|
||||
rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
|
||||
if (rc)
|
||||
goto exit;
|
||||
return rc;
|
||||
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
CMDQ_BASE_OPCODE_CREATE_CQ,
|
||||
@@ -2104,7 +2105,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
|
||||
cq->dbinfo.hwq = &cq->hwq;
|
||||
cq->dbinfo.xid = cq->id;
|
||||
cq->dbinfo.db = cq->dpi->dbr;
|
||||
cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
|
||||
cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
|
||||
|
||||
bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
|
||||
|
||||
@@ -2112,7 +2113,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
|
||||
|
||||
fail:
|
||||
bnxt_qplib_free_hwq(res, &cq->hwq);
|
||||
exit:
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -2505,7 +2505,6 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
|
||||
struct bnxt_qplib_qp *qp;
|
||||
struct bnxt_qplib_q *rq;
|
||||
u32 wr_id_idx;
|
||||
int rc = 0;
|
||||
|
||||
qp = (struct bnxt_qplib_qp *)((unsigned long)
|
||||
le64_to_cpu(hwcqe->qp_handle));
|
||||
@@ -2516,7 +2515,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
|
||||
if (qp->rq.flushed) {
|
||||
dev_dbg(&cq->hwq.pdev->dev,
|
||||
"%s: QP in Flush QP = %p\n", __func__, qp);
|
||||
goto done;
|
||||
return 0;
|
||||
}
|
||||
|
||||
cqe = *pcqe;
|
||||
@@ -2572,8 +2571,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
|
||||
@@ -2586,7 +2584,6 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
|
||||
struct bnxt_qplib_qp *qp;
|
||||
struct bnxt_qplib_q *rq;
|
||||
u32 wr_id_idx;
|
||||
int rc = 0;
|
||||
|
||||
qp = (struct bnxt_qplib_qp *)((unsigned long)
|
||||
le64_to_cpu(hwcqe->qp_handle));
|
||||
@@ -2597,7 +2594,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
|
||||
if (qp->rq.flushed) {
|
||||
dev_dbg(&cq->hwq.pdev->dev,
|
||||
"%s: QP in Flush QP = %p\n", __func__, qp);
|
||||
goto done;
|
||||
return 0;
|
||||
}
|
||||
cqe = *pcqe;
|
||||
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
|
||||
@@ -2659,8 +2656,8 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
|
||||
bnxt_qplib_add_flush_qp(qp);
|
||||
}
|
||||
}
|
||||
done:
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
|
||||
@@ -2687,7 +2684,6 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
|
||||
struct bnxt_qplib_srq *srq;
|
||||
struct bnxt_qplib_cqe *cqe;
|
||||
u32 wr_id_idx;
|
||||
int rc = 0;
|
||||
|
||||
qp = (struct bnxt_qplib_qp *)((unsigned long)
|
||||
le64_to_cpu(hwcqe->qp_handle));
|
||||
@@ -2698,7 +2694,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
|
||||
if (qp->rq.flushed) {
|
||||
dev_dbg(&cq->hwq.pdev->dev,
|
||||
"%s: QP in Flush QP = %p\n", __func__, qp);
|
||||
goto done;
|
||||
return 0;
|
||||
}
|
||||
cqe = *pcqe;
|
||||
cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
|
||||
@@ -2767,8 +2763,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
|
||||
@@ -2790,11 +2785,8 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
|
||||
|
||||
qp = (struct bnxt_qplib_qp *)((unsigned long)
|
||||
le64_to_cpu(hwcqe->qp_handle));
|
||||
if (!qp) {
|
||||
dev_err(&cq->hwq.pdev->dev,
|
||||
"FP: CQ Process terminal qp is NULL\n");
|
||||
if (!qp)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Must block new posting of SQ and RQ */
|
||||
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
|
||||
|
||||
@@ -472,7 +472,7 @@ typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq,
|
||||
struct bnxt_qplib_nq {
|
||||
struct pci_dev *pdev;
|
||||
struct bnxt_qplib_res *res;
|
||||
char name[32];
|
||||
char *name;
|
||||
struct bnxt_qplib_hwq hwq;
|
||||
struct bnxt_qplib_nq_db nq_db;
|
||||
u16 ring_id;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -45,13 +45,13 @@
|
||||
#define RCFW_COMM_PCI_BAR_REGION 0
|
||||
#define RCFW_COMM_CONS_PCI_BAR_REGION 2
|
||||
#define RCFW_COMM_BASE_OFFSET 0x600
|
||||
#define RCFW_PF_COMM_PROD_OFFSET 0xc
|
||||
#define RCFW_VF_COMM_PROD_OFFSET 0xc
|
||||
#define RCFW_PF_VF_COMM_PROD_OFFSET 0xc
|
||||
#define RCFW_COMM_TRIG_OFFSET 0x100
|
||||
#define RCFW_COMM_SIZE 0x104
|
||||
|
||||
#define RCFW_DBR_PCI_BAR_REGION 2
|
||||
#define RCFW_DBR_BASE_PAGE_SHIFT 12
|
||||
#define RCFW_FW_STALL_MAX_TIMEOUT 40
|
||||
|
||||
/* Cmdq contains a fix number of a 16-Byte slots */
|
||||
struct bnxt_qplib_cmdqe {
|
||||
@@ -67,11 +67,12 @@ static inline void bnxt_qplib_rcfw_cmd_prep(struct cmdq_base *req,
|
||||
req->cmd_size = cmd_size;
|
||||
}
|
||||
|
||||
/* Shadow queue depth for non blocking command */
|
||||
#define RCFW_CMD_NON_BLOCKING_SHADOW_QD 64
|
||||
#define RCFW_CMD_WAIT_TIME_MS 20000 /* 20 Seconds timeout */
|
||||
|
||||
/* CMDQ elements */
|
||||
#define BNXT_QPLIB_CMDQE_MAX_CNT_256 256
|
||||
#define BNXT_QPLIB_CMDQE_MAX_CNT_8192 8192
|
||||
#define BNXT_QPLIB_CMDQE_MAX_CNT 8192
|
||||
#define BNXT_QPLIB_CMDQE_BYTES(depth) ((depth) * BNXT_QPLIB_CMDQE_UNITS)
|
||||
|
||||
static inline u32 bnxt_qplib_cmdqe_npages(u32 depth)
|
||||
@@ -89,6 +90,26 @@ static inline u32 bnxt_qplib_cmdqe_page_size(u32 depth)
|
||||
return (bnxt_qplib_cmdqe_npages(depth) * PAGE_SIZE);
|
||||
}
|
||||
|
||||
/* Get the number of command units required for the req. The
|
||||
* function returns correct value only if called before
|
||||
* setting using bnxt_qplib_set_cmd_slots
|
||||
*/
|
||||
static inline u32 bnxt_qplib_get_cmd_slots(struct cmdq_base *req)
|
||||
{
|
||||
u32 cmd_units = 0;
|
||||
|
||||
if (HAS_TLV_HEADER(req)) {
|
||||
struct roce_tlv *tlv_req = (struct roce_tlv *)req;
|
||||
|
||||
cmd_units = tlv_req->total_size;
|
||||
} else {
|
||||
cmd_units = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
|
||||
BNXT_QPLIB_CMDQE_UNITS;
|
||||
}
|
||||
|
||||
return cmd_units;
|
||||
}
|
||||
|
||||
static inline u32 bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
|
||||
{
|
||||
u32 cmd_byte = 0;
|
||||
@@ -106,11 +127,10 @@ static inline u32 bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
|
||||
return cmd_byte;
|
||||
}
|
||||
|
||||
#define RCFW_MAX_COOKIE_VALUE 0x7FFF
|
||||
#define RCFW_MAX_COOKIE_VALUE (BNXT_QPLIB_CMDQE_MAX_CNT - 1)
|
||||
#define RCFW_CMD_IS_BLOCKING 0x8000
|
||||
#define RCFW_BLOCKED_CMD_WAIT_COUNT 20000000UL /* 20 sec */
|
||||
|
||||
#define HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK 0x1000900020011ULL
|
||||
#define HWRM_VERSION_DEV_ATTR_MAX_DPI 0x1000A0000000DULL
|
||||
|
||||
/* Crsq buf is 1024-Byte */
|
||||
struct bnxt_qplib_crsbe {
|
||||
@@ -132,6 +152,12 @@ typedef int (*aeq_handler_t)(struct bnxt_qplib_rcfw *, void *, void *);
|
||||
struct bnxt_qplib_crsqe {
|
||||
struct creq_qp_event *resp;
|
||||
u32 req_size;
|
||||
/* Free slots at the time of submission */
|
||||
u32 free_slots;
|
||||
u8 opcode;
|
||||
bool is_waiter_alive;
|
||||
bool is_internal_cmd;
|
||||
bool is_in_used;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_rcfw_sbuf {
|
||||
@@ -149,7 +175,7 @@ struct bnxt_qplib_qp_node {
|
||||
|
||||
#define FIRMWARE_INITIALIZED_FLAG (0)
|
||||
#define FIRMWARE_FIRST_FLAG (31)
|
||||
#define FIRMWARE_TIMED_OUT (3)
|
||||
#define FIRMWARE_STALL_DETECTED (3)
|
||||
#define ERR_DEVICE_DETACHED (4)
|
||||
|
||||
struct bnxt_qplib_cmdq_mbox {
|
||||
@@ -163,7 +189,7 @@ struct bnxt_qplib_cmdq_ctx {
|
||||
struct bnxt_qplib_cmdq_mbox cmdq_mbox;
|
||||
wait_queue_head_t waitq;
|
||||
unsigned long flags;
|
||||
unsigned long *cmdq_bitmap;
|
||||
unsigned long last_seen;
|
||||
u32 seq_num;
|
||||
};
|
||||
|
||||
@@ -186,6 +212,7 @@ struct bnxt_qplib_creq_ctx {
|
||||
u16 ring_id;
|
||||
int msix_vec;
|
||||
bool requested; /*irq handler installed */
|
||||
char *irq_name;
|
||||
};
|
||||
|
||||
/* RCFW Communication Channels */
|
||||
@@ -200,6 +227,11 @@ struct bnxt_qplib_rcfw {
|
||||
u64 oos_prev;
|
||||
u32 init_oos_stats;
|
||||
u32 cmdq_depth;
|
||||
atomic_t rcfw_intr_enabled;
|
||||
struct semaphore rcfw_inflight;
|
||||
atomic_t timeout_send;
|
||||
/* cached from chip cctx for quick reference in slow path */
|
||||
u16 max_timeout;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_cmdqmsg {
|
||||
@@ -234,7 +266,7 @@ int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
|
||||
bool need_init);
|
||||
int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
|
||||
int msix_vector,
|
||||
int cp_bar_reg_off, int virt_fn,
|
||||
int cp_bar_reg_off,
|
||||
aeq_handler_t aeq_handler);
|
||||
|
||||
struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
|
||||
|
||||
@@ -696,44 +696,76 @@ static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
|
||||
}
|
||||
|
||||
/* DPIs */
|
||||
int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
|
||||
struct bnxt_qplib_dpi *dpi,
|
||||
void *app)
|
||||
int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_dpi *dpi,
|
||||
void *app, u8 type)
|
||||
{
|
||||
struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
|
||||
struct bnxt_qplib_reg_desc *reg;
|
||||
u32 bit_num;
|
||||
u64 umaddr;
|
||||
|
||||
reg = &dpit->wcreg;
|
||||
mutex_lock(&res->dpi_tbl_lock);
|
||||
|
||||
bit_num = find_first_bit(dpit->tbl, dpit->max);
|
||||
if (bit_num == dpit->max)
|
||||
if (bit_num == dpit->max) {
|
||||
mutex_unlock(&res->dpi_tbl_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Found unused DPI */
|
||||
clear_bit(bit_num, dpit->tbl);
|
||||
dpit->app_tbl[bit_num] = app;
|
||||
|
||||
dpi->dpi = bit_num;
|
||||
dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
|
||||
dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
|
||||
dpi->bit = bit_num;
|
||||
dpi->dpi = bit_num + (reg->offset - dpit->ucreg.offset) / PAGE_SIZE;
|
||||
|
||||
umaddr = reg->bar_base + reg->offset + bit_num * PAGE_SIZE;
|
||||
dpi->umdbr = umaddr;
|
||||
|
||||
switch (type) {
|
||||
case BNXT_QPLIB_DPI_TYPE_KERNEL:
|
||||
/* privileged dbr was already mapped just initialize it. */
|
||||
dpi->umdbr = dpit->ucreg.bar_base +
|
||||
dpit->ucreg.offset + bit_num * PAGE_SIZE;
|
||||
dpi->dbr = dpit->priv_db;
|
||||
dpi->dpi = dpi->bit;
|
||||
break;
|
||||
case BNXT_QPLIB_DPI_TYPE_WC:
|
||||
dpi->dbr = ioremap_wc(umaddr, PAGE_SIZE);
|
||||
break;
|
||||
default:
|
||||
dpi->dbr = ioremap(umaddr, PAGE_SIZE);
|
||||
break;
|
||||
}
|
||||
|
||||
dpi->type = type;
|
||||
mutex_unlock(&res->dpi_tbl_lock);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_dpi_tbl *dpit,
|
||||
struct bnxt_qplib_dpi *dpi)
|
||||
struct bnxt_qplib_dpi *dpi)
|
||||
{
|
||||
if (dpi->dpi >= dpit->max) {
|
||||
dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
|
||||
dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
|
||||
dpi->dpi);
|
||||
struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
|
||||
|
||||
mutex_lock(&res->dpi_tbl_lock);
|
||||
if (dpi->dpi && dpi->type != BNXT_QPLIB_DPI_TYPE_KERNEL)
|
||||
pci_iounmap(res->pdev, dpi->dbr);
|
||||
|
||||
if (test_and_set_bit(dpi->bit, dpit->tbl)) {
|
||||
dev_warn(&res->pdev->dev,
|
||||
"Freeing an unused DPI? dpi = %d, bit = %d\n",
|
||||
dpi->dpi, dpi->bit);
|
||||
mutex_unlock(&res->dpi_tbl_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (dpit->app_tbl)
|
||||
dpit->app_tbl[dpi->dpi] = NULL;
|
||||
dpit->app_tbl[dpi->bit] = NULL;
|
||||
memset(dpi, 0, sizeof(*dpi));
|
||||
|
||||
mutex_unlock(&res->dpi_tbl_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -742,52 +774,38 @@ static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
|
||||
{
|
||||
kfree(dpit->tbl);
|
||||
kfree(dpit->app_tbl);
|
||||
if (dpit->dbr_bar_reg_iomem)
|
||||
pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
|
||||
memset(dpit, 0, sizeof(*dpit));
|
||||
dpit->tbl = NULL;
|
||||
dpit->app_tbl = NULL;
|
||||
dpit->max = 0;
|
||||
}
|
||||
|
||||
static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_dpi_tbl *dpit,
|
||||
u32 dbr_offset)
|
||||
static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_dev_attr *dev_attr)
|
||||
{
|
||||
u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
|
||||
resource_size_t bar_reg_base;
|
||||
u32 dbr_len, bytes;
|
||||
struct bnxt_qplib_dpi_tbl *dpit;
|
||||
struct bnxt_qplib_reg_desc *reg;
|
||||
unsigned long bar_len;
|
||||
u32 dbr_offset;
|
||||
u32 bytes;
|
||||
|
||||
if (dpit->dbr_bar_reg_iomem) {
|
||||
dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
|
||||
dbr_bar_reg);
|
||||
return -EALREADY;
|
||||
dpit = &res->dpi_tbl;
|
||||
reg = &dpit->wcreg;
|
||||
|
||||
if (!bnxt_qplib_is_chip_gen_p5(res->cctx)) {
|
||||
/* Offest should come from L2 driver */
|
||||
dbr_offset = dev_attr->l2_db_size;
|
||||
dpit->ucreg.offset = dbr_offset;
|
||||
dpit->wcreg.offset = dbr_offset;
|
||||
}
|
||||
|
||||
bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
|
||||
if (!bar_reg_base) {
|
||||
dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
|
||||
dbr_bar_reg);
|
||||
return -ENOMEM;
|
||||
}
|
||||
bar_len = pci_resource_len(res->pdev, reg->bar_id);
|
||||
dpit->max = (bar_len - reg->offset) / PAGE_SIZE;
|
||||
if (dev_attr->max_dpi)
|
||||
dpit->max = min_t(u32, dpit->max, dev_attr->max_dpi);
|
||||
|
||||
dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
|
||||
if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
|
||||
dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
|
||||
dbr_len);
|
||||
if (!dpit->dbr_bar_reg_iomem) {
|
||||
dev_err(&res->pdev->dev,
|
||||
"FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dpit->unmapped_dbr = bar_reg_base + dbr_offset;
|
||||
dpit->max = dbr_len / PAGE_SIZE;
|
||||
|
||||
dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
|
||||
dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
|
||||
if (!dpit->app_tbl)
|
||||
goto unmap_io;
|
||||
return -ENOMEM;
|
||||
|
||||
bytes = dpit->max >> 3;
|
||||
if (!bytes)
|
||||
@@ -797,17 +815,14 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
|
||||
if (!dpit->tbl) {
|
||||
kfree(dpit->app_tbl);
|
||||
dpit->app_tbl = NULL;
|
||||
goto unmap_io;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset((u8 *)dpit->tbl, 0xFF, bytes);
|
||||
dpit->priv_db = dpit->ucreg.bar_reg + dpit->ucreg.offset;
|
||||
|
||||
return 0;
|
||||
|
||||
unmap_io:
|
||||
pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
|
||||
dpit->dbr_bar_reg_iomem = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Stats */
|
||||
@@ -874,7 +889,7 @@ int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
|
||||
rc = bnxt_qplib_alloc_dpi_tbl(res, dev_attr);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
@@ -884,6 +899,46 @@ fail:
|
||||
return rc;
|
||||
}
|
||||
|
||||
void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res)
|
||||
{
|
||||
struct bnxt_qplib_reg_desc *reg;
|
||||
|
||||
reg = &res->dpi_tbl.ucreg;
|
||||
if (reg->bar_reg)
|
||||
pci_iounmap(res->pdev, reg->bar_reg);
|
||||
reg->bar_reg = NULL;
|
||||
reg->bar_base = 0;
|
||||
reg->len = 0;
|
||||
reg->bar_id = 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res)
|
||||
{
|
||||
struct bnxt_qplib_reg_desc *ucreg;
|
||||
struct bnxt_qplib_reg_desc *wcreg;
|
||||
|
||||
wcreg = &res->dpi_tbl.wcreg;
|
||||
wcreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
|
||||
wcreg->bar_base = pci_resource_start(res->pdev, wcreg->bar_id);
|
||||
|
||||
ucreg = &res->dpi_tbl.ucreg;
|
||||
ucreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
|
||||
ucreg->bar_base = pci_resource_start(res->pdev, ucreg->bar_id);
|
||||
ucreg->len = ucreg->offset + PAGE_SIZE;
|
||||
if (!ucreg->len || ((ucreg->len & (PAGE_SIZE - 1)) != 0)) {
|
||||
dev_err(&res->pdev->dev, "QPLIB: invalid dbr length %d",
|
||||
(int)ucreg->len);
|
||||
return -EINVAL;
|
||||
}
|
||||
ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len);
|
||||
if (!ucreg->bar_reg) {
|
||||
dev_err(&res->pdev->dev, "privileged dpi map failed!");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bnxt_qplib_determine_atomics(struct pci_dev *dev)
|
||||
{
|
||||
int comp;
|
||||
|
||||
@@ -47,7 +47,7 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
|
||||
|
||||
struct bnxt_qplib_drv_modes {
|
||||
u8 wqe_mode;
|
||||
/* Other modes to follow here */
|
||||
bool db_push;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_chip_ctx {
|
||||
@@ -55,9 +55,14 @@ struct bnxt_qplib_chip_ctx {
|
||||
u8 chip_rev;
|
||||
u8 chip_metal;
|
||||
u16 hw_stats_size;
|
||||
u16 hwrm_cmd_max_timeout;
|
||||
struct bnxt_qplib_drv_modes modes;
|
||||
u64 hwrm_intf_ver;
|
||||
};
|
||||
|
||||
#define BNXT_QPLIB_DBR_PF_DB_OFFSET 0x10000
|
||||
#define BNXT_QPLIB_DBR_VF_DB_OFFSET 0x4000
|
||||
|
||||
#define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *))
|
||||
#define PTR_MAX_IDX_PER_PG (PTR_CNT_PER_PG - 1)
|
||||
#define PTR_PG(x) (((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG)
|
||||
@@ -109,6 +114,7 @@ enum bnxt_qplib_hwrm_pg_size {
|
||||
struct bnxt_qplib_reg_desc {
|
||||
u8 bar_id;
|
||||
resource_size_t bar_base;
|
||||
unsigned long offset;
|
||||
void __iomem *bar_reg;
|
||||
size_t len;
|
||||
};
|
||||
@@ -185,18 +191,27 @@ struct bnxt_qplib_sgid_tbl {
|
||||
u8 *vlan;
|
||||
};
|
||||
|
||||
enum {
|
||||
BNXT_QPLIB_DPI_TYPE_KERNEL = 0,
|
||||
BNXT_QPLIB_DPI_TYPE_UC = 1,
|
||||
BNXT_QPLIB_DPI_TYPE_WC = 2
|
||||
};
|
||||
|
||||
struct bnxt_qplib_dpi {
|
||||
u32 dpi;
|
||||
u32 bit;
|
||||
void __iomem *dbr;
|
||||
u64 umdbr;
|
||||
u8 type;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_dpi_tbl {
|
||||
void **app_tbl;
|
||||
unsigned long *tbl;
|
||||
u16 max;
|
||||
void __iomem *dbr_bar_reg_iomem;
|
||||
u64 unmapped_dbr;
|
||||
struct bnxt_qplib_reg_desc ucreg; /* Hold entire DB bar. */
|
||||
struct bnxt_qplib_reg_desc wcreg;
|
||||
void __iomem *priv_db;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_stats {
|
||||
@@ -241,7 +256,6 @@ struct bnxt_qplib_ctx {
|
||||
struct bnxt_qplib_tqm_ctx tqm_ctx;
|
||||
struct bnxt_qplib_stats stats;
|
||||
struct bnxt_qplib_vf_res vf_res;
|
||||
u64 hwrm_intf_ver;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_res {
|
||||
@@ -253,6 +267,8 @@ struct bnxt_qplib_res {
|
||||
struct bnxt_qplib_pd_tbl pd_tbl;
|
||||
struct bnxt_qplib_sgid_tbl sgid_tbl;
|
||||
struct bnxt_qplib_dpi_tbl dpi_tbl;
|
||||
/* To protect the dpi table bit map */
|
||||
struct mutex dpi_tbl_lock;
|
||||
bool prio;
|
||||
bool is_vf;
|
||||
};
|
||||
@@ -344,11 +360,10 @@ int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pd_tbl,
|
||||
int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_pd_tbl *pd_tbl,
|
||||
struct bnxt_qplib_pd *pd);
|
||||
int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
|
||||
struct bnxt_qplib_dpi *dpi,
|
||||
void *app);
|
||||
int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_dpi *dpi,
|
||||
void *app, u8 type);
|
||||
int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_dpi_tbl *dpi_tbl,
|
||||
struct bnxt_qplib_dpi *dpi);
|
||||
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
|
||||
int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
|
||||
@@ -361,6 +376,9 @@ void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
|
||||
int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_ctx *ctx,
|
||||
bool virt_fn, bool is_p5);
|
||||
int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res);
|
||||
void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res);
|
||||
|
||||
int bnxt_qplib_determine_atomics(struct pci_dev *dev);
|
||||
|
||||
static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_hwq *hwq, u32 cnt)
|
||||
|
||||
@@ -170,6 +170,9 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
|
||||
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
|
||||
}
|
||||
|
||||
if (rcfw->res->cctx->hwrm_intf_ver >= HWRM_VERSION_DEV_ATTR_MAX_DPI)
|
||||
attr->max_dpi = le32_to_cpu(sb->max_dpi);
|
||||
|
||||
attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
|
||||
bail:
|
||||
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
|
||||
@@ -233,10 +236,6 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
int index;
|
||||
|
||||
if (!sgid_tbl) {
|
||||
dev_err(&res->pdev->dev, "SGID table not allocated\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Do we need a sgid_lock here? */
|
||||
if (!sgid_tbl->active) {
|
||||
dev_err(&res->pdev->dev, "SGID table has no active entries\n");
|
||||
@@ -297,10 +296,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
int i, free_idx;
|
||||
|
||||
if (!sgid_tbl) {
|
||||
dev_err(&res->pdev->dev, "SGID table not allocated\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Do we need a sgid_lock here? */
|
||||
if (sgid_tbl->active == sgid_tbl->max) {
|
||||
dev_err(&res->pdev->dev, "SGID table is full\n");
|
||||
@@ -468,13 +463,14 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
|
||||
bool block)
|
||||
int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
|
||||
bool block)
|
||||
{
|
||||
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
|
||||
struct creq_destroy_ah_resp resp = {};
|
||||
struct bnxt_qplib_cmdqmsg msg = {};
|
||||
struct cmdq_destroy_ah req = {};
|
||||
int rc;
|
||||
|
||||
/* Clean up the AH table in the device */
|
||||
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
|
||||
@@ -485,7 +481,8 @@ void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
|
||||
|
||||
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
|
||||
sizeof(resp), block);
|
||||
bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* MRW */
|
||||
|
||||
@@ -72,6 +72,7 @@ struct bnxt_qplib_dev_attr {
|
||||
u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
|
||||
bool is_atomic;
|
||||
u16 dev_cap_flags;
|
||||
u32 max_dpi;
|
||||
};
|
||||
|
||||
struct bnxt_qplib_pd {
|
||||
@@ -327,8 +328,8 @@ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_ctx *ctx);
|
||||
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
|
||||
bool block);
|
||||
void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
|
||||
bool block);
|
||||
int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
|
||||
bool block);
|
||||
int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
|
||||
struct bnxt_qplib_mrw *mrw);
|
||||
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
|
||||
|
||||
@@ -128,13 +128,8 @@ struct erdma_devattr {
|
||||
|
||||
int numa_node;
|
||||
enum erdma_cc_alg cc;
|
||||
u32 grp_num;
|
||||
u32 irq_num;
|
||||
|
||||
bool disable_dwqe;
|
||||
u16 dwqe_pages;
|
||||
u16 dwqe_entries;
|
||||
|
||||
u32 max_qp;
|
||||
u32 max_send_wr;
|
||||
u32 max_recv_wr;
|
||||
@@ -215,15 +210,6 @@ struct erdma_dev {
|
||||
u32 next_alloc_qpn;
|
||||
u32 next_alloc_cqn;
|
||||
|
||||
spinlock_t db_bitmap_lock;
|
||||
/* We provide max 64 uContexts that each has one SQ doorbell Page. */
|
||||
DECLARE_BITMAP(sdb_page, ERDMA_DWQE_TYPE0_CNT);
|
||||
/*
|
||||
* We provide max 496 uContexts that each has one SQ normal Db,
|
||||
* and one directWQE db.
|
||||
*/
|
||||
DECLARE_BITMAP(sdb_entry, ERDMA_DWQE_TYPE1_CNT);
|
||||
|
||||
atomic_t num_ctx;
|
||||
struct list_head cep_list;
|
||||
};
|
||||
@@ -268,6 +254,8 @@ static inline u32 erdma_reg_read32_filed(struct erdma_dev *dev, u32 reg,
|
||||
return FIELD_GET(filed_mask, val);
|
||||
}
|
||||
|
||||
#define ERDMA_GET(val, name) FIELD_GET(ERDMA_CMD_##name##_MASK, val)
|
||||
|
||||
int erdma_cmdq_init(struct erdma_dev *dev);
|
||||
void erdma_finish_cmdq_init(struct erdma_dev *dev);
|
||||
void erdma_cmdq_destroy(struct erdma_dev *dev);
|
||||
|
||||
@@ -82,19 +82,6 @@
|
||||
#define ERDMA_BAR_CQDB_SPACE_OFFSET \
|
||||
(ERDMA_BAR_RQDB_SPACE_OFFSET + ERDMA_BAR_RQDB_SPACE_SIZE)
|
||||
|
||||
/* Doorbell page resources related. */
|
||||
/*
|
||||
* Max # of parallelly issued directSQE is 3072 per device,
|
||||
* hardware organizes this into 24 group, per group has 128 credits.
|
||||
*/
|
||||
#define ERDMA_DWQE_MAX_GRP_CNT 24
|
||||
#define ERDMA_DWQE_NUM_PER_GRP 128
|
||||
|
||||
#define ERDMA_DWQE_TYPE0_CNT 64
|
||||
#define ERDMA_DWQE_TYPE1_CNT 496
|
||||
/* type1 DB contains 2 DBs, takes 256Byte. */
|
||||
#define ERDMA_DWQE_TYPE1_CNT_PER_PAGE 16
|
||||
|
||||
#define ERDMA_SDB_SHARED_PAGE_INDEX 95
|
||||
|
||||
/* Doorbell related. */
|
||||
@@ -134,7 +121,7 @@
|
||||
|
||||
/* CMDQ related. */
|
||||
#define ERDMA_CMDQ_MAX_OUTSTANDING 128
|
||||
#define ERDMA_CMDQ_SQE_SIZE 64
|
||||
#define ERDMA_CMDQ_SQE_SIZE 128
|
||||
|
||||
/* cmdq sub module definition. */
|
||||
enum CMDQ_WQE_SUB_MOD {
|
||||
@@ -159,6 +146,9 @@ enum CMDQ_COMMON_OPCODE {
|
||||
CMDQ_OPCODE_DESTROY_EQ = 1,
|
||||
CMDQ_OPCODE_QUERY_FW_INFO = 2,
|
||||
CMDQ_OPCODE_CONF_MTU = 3,
|
||||
CMDQ_OPCODE_CONF_DEVICE = 5,
|
||||
CMDQ_OPCODE_ALLOC_DB = 8,
|
||||
CMDQ_OPCODE_FREE_DB = 9,
|
||||
};
|
||||
|
||||
/* cmdq-SQE HDR */
|
||||
@@ -196,11 +186,41 @@ struct erdma_cmdq_destroy_eq_req {
|
||||
u8 qtype;
|
||||
};
|
||||
|
||||
/* config device cfg */
|
||||
#define ERDMA_CMD_CONFIG_DEVICE_PS_EN_MASK BIT(31)
|
||||
#define ERDMA_CMD_CONFIG_DEVICE_PGSHIFT_MASK GENMASK(4, 0)
|
||||
|
||||
struct erdma_cmdq_config_device_req {
|
||||
u64 hdr;
|
||||
u32 cfg;
|
||||
u32 rsvd[5];
|
||||
};
|
||||
|
||||
struct erdma_cmdq_config_mtu_req {
|
||||
u64 hdr;
|
||||
u32 mtu;
|
||||
};
|
||||
|
||||
/* ext db requests(alloc and free) cfg */
|
||||
#define ERDMA_CMD_EXT_DB_CQ_EN_MASK BIT(2)
|
||||
#define ERDMA_CMD_EXT_DB_RQ_EN_MASK BIT(1)
|
||||
#define ERDMA_CMD_EXT_DB_SQ_EN_MASK BIT(0)
|
||||
|
||||
struct erdma_cmdq_ext_db_req {
|
||||
u64 hdr;
|
||||
u32 cfg;
|
||||
u16 rdb_off;
|
||||
u16 sdb_off;
|
||||
u16 rsvd0;
|
||||
u16 cdb_off;
|
||||
u32 rsvd1[3];
|
||||
};
|
||||
|
||||
/* alloc db response qword 0 definition */
|
||||
#define ERDMA_CMD_ALLOC_DB_RESP_RDB_MASK GENMASK_ULL(63, 48)
|
||||
#define ERDMA_CMD_ALLOC_DB_RESP_CDB_MASK GENMASK_ULL(47, 32)
|
||||
#define ERDMA_CMD_ALLOC_DB_RESP_SDB_MASK GENMASK_ULL(15, 0)
|
||||
|
||||
/* create_cq cfg0 */
|
||||
#define ERDMA_CMD_CREATE_CQ_DEPTH_MASK GENMASK(31, 24)
|
||||
#define ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK GENMASK(23, 20)
|
||||
@@ -209,8 +229,12 @@ struct erdma_cmdq_config_mtu_req {
|
||||
/* create_cq cfg1 */
|
||||
#define ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK GENMASK(31, 16)
|
||||
#define ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK BIT(15)
|
||||
#define ERDMA_CMD_CREATE_CQ_MTT_DB_CFG_MASK BIT(11)
|
||||
#define ERDMA_CMD_CREATE_CQ_EQN_MASK GENMASK(9, 0)
|
||||
|
||||
/* create_cq cfg2 */
|
||||
#define ERDMA_CMD_CREATE_CQ_DB_CFG_MASK GENMASK(15, 0)
|
||||
|
||||
struct erdma_cmdq_create_cq_req {
|
||||
u64 hdr;
|
||||
u32 cfg0;
|
||||
@@ -219,6 +243,7 @@ struct erdma_cmdq_create_cq_req {
|
||||
u32 cfg1;
|
||||
u64 cq_db_info_addr;
|
||||
u32 first_page_offset;
|
||||
u32 cfg2;
|
||||
};
|
||||
|
||||
/* regmr/deregmr cfg0 */
|
||||
@@ -278,6 +303,7 @@ struct erdma_cmdq_modify_qp_req {
|
||||
|
||||
/* create qp cqn_mtt_cfg */
|
||||
#define ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK GENMASK(31, 28)
|
||||
#define ERDMA_CMD_CREATE_QP_DB_CFG_MASK BIT(25)
|
||||
#define ERDMA_CMD_CREATE_QP_CQN_MASK GENMASK(23, 0)
|
||||
|
||||
/* create qp mtt_cfg */
|
||||
@@ -285,6 +311,10 @@ struct erdma_cmdq_modify_qp_req {
|
||||
#define ERDMA_CMD_CREATE_QP_MTT_CNT_MASK GENMASK(11, 1)
|
||||
#define ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK BIT(0)
|
||||
|
||||
/* create qp db cfg */
|
||||
#define ERDMA_CMD_CREATE_QP_SQDB_CFG_MASK GENMASK(31, 16)
|
||||
#define ERDMA_CMD_CREATE_QP_RQDB_CFG_MASK GENMASK(15, 0)
|
||||
|
||||
#define ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK GENMASK_ULL(31, 0)
|
||||
|
||||
struct erdma_cmdq_create_qp_req {
|
||||
@@ -299,6 +329,11 @@ struct erdma_cmdq_create_qp_req {
|
||||
u32 rq_mtt_cfg;
|
||||
u64 sq_db_info_dma_addr;
|
||||
u64 rq_db_info_dma_addr;
|
||||
|
||||
u64 sq_mtt_entry[3];
|
||||
u64 rq_mtt_entry[3];
|
||||
|
||||
u32 db_cfg;
|
||||
};
|
||||
|
||||
struct erdma_cmdq_destroy_qp_req {
|
||||
@@ -329,6 +364,7 @@ struct erdma_cmdq_reflush_req {
|
||||
|
||||
enum {
|
||||
ERDMA_DEV_CAP_FLAGS_ATOMIC = 1 << 7,
|
||||
ERDMA_DEV_CAP_FLAGS_EXTEND_DB = 1 << 3,
|
||||
};
|
||||
|
||||
#define ERDMA_CMD_INFO0_FW_VER_MASK GENMASK_ULL(31, 0)
|
||||
|
||||
@@ -130,33 +130,6 @@ static irqreturn_t erdma_comm_irq_handler(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void erdma_dwqe_resource_init(struct erdma_dev *dev)
|
||||
{
|
||||
int total_pages, type0, type1;
|
||||
|
||||
dev->attrs.grp_num = erdma_reg_read32(dev, ERDMA_REGS_GRP_NUM_REG);
|
||||
|
||||
if (dev->attrs.grp_num < 4)
|
||||
dev->attrs.disable_dwqe = true;
|
||||
else
|
||||
dev->attrs.disable_dwqe = false;
|
||||
|
||||
/* One page contains 4 goups. */
|
||||
total_pages = dev->attrs.grp_num * 4;
|
||||
|
||||
if (dev->attrs.grp_num >= ERDMA_DWQE_MAX_GRP_CNT) {
|
||||
dev->attrs.grp_num = ERDMA_DWQE_MAX_GRP_CNT;
|
||||
type0 = ERDMA_DWQE_TYPE0_CNT;
|
||||
type1 = ERDMA_DWQE_TYPE1_CNT / ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
|
||||
} else {
|
||||
type1 = total_pages / 3;
|
||||
type0 = total_pages - type1 - 1;
|
||||
}
|
||||
|
||||
dev->attrs.dwqe_pages = type0;
|
||||
dev->attrs.dwqe_entries = type1 * ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
|
||||
}
|
||||
|
||||
static int erdma_request_vectors(struct erdma_dev *dev)
|
||||
{
|
||||
int expect_irq_num = min(num_possible_cpus() + 1, ERDMA_NUM_MSIX_VEC);
|
||||
@@ -199,8 +172,6 @@ static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
erdma_dwqe_resource_init(dev);
|
||||
|
||||
ret = dma_set_mask_and_coherent(&pdev->dev,
|
||||
DMA_BIT_MASK(ERDMA_PCI_WIDTH));
|
||||
if (ret)
|
||||
@@ -426,6 +397,22 @@ static int erdma_dev_attrs_init(struct erdma_dev *dev)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int erdma_device_config(struct erdma_dev *dev)
|
||||
{
|
||||
struct erdma_cmdq_config_device_req req = {};
|
||||
|
||||
if (!(dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_EXTEND_DB))
|
||||
return 0;
|
||||
|
||||
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
|
||||
CMDQ_OPCODE_CONF_DEVICE);
|
||||
|
||||
req.cfg = FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PGSHIFT_MASK, PAGE_SHIFT) |
|
||||
FIELD_PREP(ERDMA_CMD_CONFIG_DEVICE_PS_EN_MASK, 1);
|
||||
|
||||
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
|
||||
}
|
||||
|
||||
static int erdma_res_cb_init(struct erdma_dev *dev)
|
||||
{
|
||||
int i, j;
|
||||
@@ -512,6 +499,10 @@ static int erdma_ib_device_add(struct pci_dev *pdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = erdma_device_config(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ibdev->node_type = RDMA_NODE_RNIC;
|
||||
memcpy(ibdev->node_desc, ERDMA_NODE_DESC, sizeof(ERDMA_NODE_DESC));
|
||||
|
||||
@@ -537,10 +528,6 @@ static int erdma_ib_device_add(struct pci_dev *pdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spin_lock_init(&dev->db_bitmap_lock);
|
||||
bitmap_zero(dev->sdb_page, ERDMA_DWQE_TYPE0_CNT);
|
||||
bitmap_zero(dev->sdb_entry, ERDMA_DWQE_TYPE1_CNT);
|
||||
|
||||
atomic_set(&dev->num_ctx, 0);
|
||||
|
||||
mac = erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_L_REG);
|
||||
|
||||
@@ -19,10 +19,11 @@
|
||||
#include "erdma_cm.h"
|
||||
#include "erdma_verbs.h"
|
||||
|
||||
static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
|
||||
static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
|
||||
{
|
||||
struct erdma_cmdq_create_qp_req req;
|
||||
struct erdma_dev *dev = to_edev(qp->ibqp.device);
|
||||
struct erdma_pd *pd = to_epd(qp->ibqp.pd);
|
||||
struct erdma_cmdq_create_qp_req req;
|
||||
struct erdma_uqp *user_qp;
|
||||
u64 resp0, resp1;
|
||||
int err;
|
||||
@@ -93,6 +94,16 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
|
||||
|
||||
req.sq_db_info_dma_addr = user_qp->sq_db_info_dma_addr;
|
||||
req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
|
||||
|
||||
if (uctx->ext_db.enable) {
|
||||
req.sq_cqn_mtt_cfg |=
|
||||
FIELD_PREP(ERDMA_CMD_CREATE_QP_DB_CFG_MASK, 1);
|
||||
req.db_cfg =
|
||||
FIELD_PREP(ERDMA_CMD_CREATE_QP_SQDB_CFG_MASK,
|
||||
uctx->ext_db.sdb_off) |
|
||||
FIELD_PREP(ERDMA_CMD_CREATE_QP_RQDB_CFG_MASK,
|
||||
uctx->ext_db.rdb_off);
|
||||
}
|
||||
}
|
||||
|
||||
err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
|
||||
@@ -146,11 +157,12 @@ post_cmd:
|
||||
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
|
||||
}
|
||||
|
||||
static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
|
||||
static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
|
||||
{
|
||||
struct erdma_dev *dev = to_edev(cq->ibcq.device);
|
||||
struct erdma_cmdq_create_cq_req req;
|
||||
u32 page_size;
|
||||
struct erdma_mem *mtt;
|
||||
u32 page_size;
|
||||
|
||||
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
|
||||
CMDQ_OPCODE_CREATE_CQ);
|
||||
@@ -192,6 +204,13 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
|
||||
|
||||
req.first_page_offset = mtt->page_offset;
|
||||
req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
|
||||
|
||||
if (uctx->ext_db.enable) {
|
||||
req.cfg1 |= FIELD_PREP(
|
||||
ERDMA_CMD_CREATE_CQ_MTT_DB_CFG_MASK, 1);
|
||||
req.cfg2 = FIELD_PREP(ERDMA_CMD_CREATE_CQ_DB_CFG_MASK,
|
||||
uctx->ext_db.cdb_off);
|
||||
}
|
||||
}
|
||||
|
||||
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
|
||||
@@ -753,7 +772,7 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
|
||||
qp->attrs.state = ERDMA_QP_STATE_IDLE;
|
||||
INIT_DELAYED_WORK(&qp->reflush_dwork, erdma_flush_worker);
|
||||
|
||||
ret = create_qp_cmd(dev, qp);
|
||||
ret = create_qp_cmd(uctx, qp);
|
||||
if (ret)
|
||||
goto err_out_cmd;
|
||||
|
||||
@@ -1130,62 +1149,73 @@ void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
#define ERDMA_SDB_PAGE 0
|
||||
#define ERDMA_SDB_ENTRY 1
|
||||
#define ERDMA_SDB_SHARED 2
|
||||
|
||||
static void alloc_db_resources(struct erdma_dev *dev,
|
||||
struct erdma_ucontext *ctx)
|
||||
static int alloc_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx,
|
||||
bool ext_db_en)
|
||||
{
|
||||
u32 bitmap_idx;
|
||||
struct erdma_devattr *attrs = &dev->attrs;
|
||||
struct erdma_cmdq_ext_db_req req = {};
|
||||
u64 val0, val1;
|
||||
int ret;
|
||||
|
||||
if (attrs->disable_dwqe)
|
||||
goto alloc_normal_db;
|
||||
/*
|
||||
* CAP_SYS_RAWIO is required if hardware does not support extend
|
||||
* doorbell mechanism.
|
||||
*/
|
||||
if (!ext_db_en && !capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
|
||||
/* Try to alloc independent SDB page. */
|
||||
spin_lock(&dev->db_bitmap_lock);
|
||||
bitmap_idx = find_first_zero_bit(dev->sdb_page, attrs->dwqe_pages);
|
||||
if (bitmap_idx != attrs->dwqe_pages) {
|
||||
set_bit(bitmap_idx, dev->sdb_page);
|
||||
spin_unlock(&dev->db_bitmap_lock);
|
||||
|
||||
ctx->sdb_type = ERDMA_SDB_PAGE;
|
||||
ctx->sdb_idx = bitmap_idx;
|
||||
ctx->sdb_page_idx = bitmap_idx;
|
||||
ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET +
|
||||
(bitmap_idx << PAGE_SHIFT);
|
||||
ctx->sdb_page_off = 0;
|
||||
|
||||
return;
|
||||
if (!ext_db_en) {
|
||||
ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET;
|
||||
ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET;
|
||||
ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bitmap_idx = find_first_zero_bit(dev->sdb_entry, attrs->dwqe_entries);
|
||||
if (bitmap_idx != attrs->dwqe_entries) {
|
||||
set_bit(bitmap_idx, dev->sdb_entry);
|
||||
spin_unlock(&dev->db_bitmap_lock);
|
||||
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
|
||||
CMDQ_OPCODE_ALLOC_DB);
|
||||
|
||||
ctx->sdb_type = ERDMA_SDB_ENTRY;
|
||||
ctx->sdb_idx = bitmap_idx;
|
||||
ctx->sdb_page_idx = attrs->dwqe_pages +
|
||||
bitmap_idx / ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
|
||||
ctx->sdb_page_off = bitmap_idx % ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
|
||||
req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) |
|
||||
FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
|
||||
FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
|
||||
|
||||
ctx->sdb = dev->func_bar_addr + ERDMA_BAR_SQDB_SPACE_OFFSET +
|
||||
(ctx->sdb_page_idx << PAGE_SHIFT);
|
||||
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &val0, &val1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctx->ext_db.enable = true;
|
||||
ctx->ext_db.sdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_SDB);
|
||||
ctx->ext_db.rdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_RDB);
|
||||
ctx->ext_db.cdb_off = ERDMA_GET(val0, ALLOC_DB_RESP_CDB);
|
||||
|
||||
ctx->sdb = dev->func_bar_addr + (ctx->ext_db.sdb_off << PAGE_SHIFT);
|
||||
ctx->cdb = dev->func_bar_addr + (ctx->ext_db.rdb_off << PAGE_SHIFT);
|
||||
ctx->rdb = dev->func_bar_addr + (ctx->ext_db.cdb_off << PAGE_SHIFT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_db_resources(struct erdma_dev *dev, struct erdma_ucontext *ctx)
|
||||
{
|
||||
struct erdma_cmdq_ext_db_req req = {};
|
||||
int ret;
|
||||
|
||||
if (!ctx->ext_db.enable)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_unlock(&dev->db_bitmap_lock);
|
||||
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
|
||||
CMDQ_OPCODE_FREE_DB);
|
||||
|
||||
alloc_normal_db:
|
||||
ctx->sdb_type = ERDMA_SDB_SHARED;
|
||||
ctx->sdb_idx = 0;
|
||||
ctx->sdb_page_idx = ERDMA_SDB_SHARED_PAGE_INDEX;
|
||||
ctx->sdb_page_off = 0;
|
||||
req.cfg = FIELD_PREP(ERDMA_CMD_EXT_DB_CQ_EN_MASK, 1) |
|
||||
FIELD_PREP(ERDMA_CMD_EXT_DB_RQ_EN_MASK, 1) |
|
||||
FIELD_PREP(ERDMA_CMD_EXT_DB_SQ_EN_MASK, 1);
|
||||
|
||||
ctx->sdb = dev->func_bar_addr + (ctx->sdb_page_idx << PAGE_SHIFT);
|
||||
req.sdb_off = ctx->ext_db.sdb_off;
|
||||
req.rdb_off = ctx->ext_db.rdb_off;
|
||||
req.cdb_off = ctx->ext_db.cdb_off;
|
||||
|
||||
ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
|
||||
if (ret)
|
||||
ibdev_err_ratelimited(&dev->ibdev,
|
||||
"free db resources failed %d", ret);
|
||||
}
|
||||
|
||||
static void erdma_uctx_user_mmap_entries_remove(struct erdma_ucontext *uctx)
|
||||
@@ -1207,71 +1237,67 @@ int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&ctx->dbrecords_page_list);
|
||||
mutex_init(&ctx->dbrecords_page_mutex);
|
||||
|
||||
alloc_db_resources(dev, ctx);
|
||||
|
||||
ctx->rdb = dev->func_bar_addr + ERDMA_BAR_RQDB_SPACE_OFFSET;
|
||||
ctx->cdb = dev->func_bar_addr + ERDMA_BAR_CQDB_SPACE_OFFSET;
|
||||
|
||||
if (udata->outlen < sizeof(uresp)) {
|
||||
ret = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&ctx->dbrecords_page_list);
|
||||
mutex_init(&ctx->dbrecords_page_mutex);
|
||||
|
||||
ret = alloc_db_resources(dev, ctx,
|
||||
!!(dev->attrs.cap_flags &
|
||||
ERDMA_DEV_CAP_FLAGS_EXTEND_DB));
|
||||
if (ret)
|
||||
goto err_out;
|
||||
|
||||
ctx->sq_db_mmap_entry = erdma_user_mmap_entry_insert(
|
||||
ctx, (void *)ctx->sdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.sdb);
|
||||
if (!ctx->sq_db_mmap_entry) {
|
||||
ret = -ENOMEM;
|
||||
goto err_out;
|
||||
goto err_free_ext_db;
|
||||
}
|
||||
|
||||
ctx->rq_db_mmap_entry = erdma_user_mmap_entry_insert(
|
||||
ctx, (void *)ctx->rdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.rdb);
|
||||
if (!ctx->rq_db_mmap_entry) {
|
||||
ret = -EINVAL;
|
||||
goto err_out;
|
||||
goto err_put_mmap_entries;
|
||||
}
|
||||
|
||||
ctx->cq_db_mmap_entry = erdma_user_mmap_entry_insert(
|
||||
ctx, (void *)ctx->cdb, PAGE_SIZE, ERDMA_MMAP_IO_NC, &uresp.cdb);
|
||||
if (!ctx->cq_db_mmap_entry) {
|
||||
ret = -EINVAL;
|
||||
goto err_out;
|
||||
goto err_put_mmap_entries;
|
||||
}
|
||||
|
||||
uresp.dev_id = dev->pdev->device;
|
||||
uresp.sdb_type = ctx->sdb_type;
|
||||
uresp.sdb_offset = ctx->sdb_page_off;
|
||||
|
||||
ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
|
||||
if (ret)
|
||||
goto err_out;
|
||||
goto err_put_mmap_entries;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
err_put_mmap_entries:
|
||||
erdma_uctx_user_mmap_entries_remove(ctx);
|
||||
|
||||
err_free_ext_db:
|
||||
free_db_resources(dev, ctx);
|
||||
|
||||
err_out:
|
||||
atomic_dec(&dev->num_ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void erdma_dealloc_ucontext(struct ib_ucontext *ibctx)
|
||||
{
|
||||
struct erdma_ucontext *ctx = to_ectx(ibctx);
|
||||
struct erdma_dev *dev = to_edev(ibctx->device);
|
||||
|
||||
spin_lock(&dev->db_bitmap_lock);
|
||||
if (ctx->sdb_type == ERDMA_SDB_PAGE)
|
||||
clear_bit(ctx->sdb_idx, dev->sdb_page);
|
||||
else if (ctx->sdb_type == ERDMA_SDB_ENTRY)
|
||||
clear_bit(ctx->sdb_idx, dev->sdb_entry);
|
||||
struct erdma_ucontext *ctx = to_ectx(ibctx);
|
||||
|
||||
erdma_uctx_user_mmap_entries_remove(ctx);
|
||||
|
||||
spin_unlock(&dev->db_bitmap_lock);
|
||||
|
||||
free_db_resources(dev, ctx);
|
||||
atomic_dec(&dev->num_ctx);
|
||||
}
|
||||
|
||||
@@ -1438,7 +1464,7 @@ int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
|
||||
goto err_out_xa;
|
||||
}
|
||||
|
||||
ret = create_cq_cmd(dev, cq);
|
||||
ret = create_cq_cmd(ctx, cq);
|
||||
if (ret)
|
||||
goto err_free_res;
|
||||
|
||||
|
||||
@@ -31,13 +31,18 @@ struct erdma_user_mmap_entry {
|
||||
u8 mmap_flag;
|
||||
};
|
||||
|
||||
struct erdma_ext_db_info {
|
||||
bool enable;
|
||||
u16 sdb_off;
|
||||
u16 rdb_off;
|
||||
u16 cdb_off;
|
||||
};
|
||||
|
||||
struct erdma_ucontext {
|
||||
struct ib_ucontext ibucontext;
|
||||
|
||||
u32 sdb_type;
|
||||
u32 sdb_idx;
|
||||
u32 sdb_page_idx;
|
||||
u32 sdb_page_off;
|
||||
struct erdma_ext_db_info ext_db;
|
||||
|
||||
u64 sdb;
|
||||
u64 rdb;
|
||||
u64 cdb;
|
||||
|
||||
@@ -215,11 +215,11 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
|
||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
ret = sdma_txadd_page(dd,
|
||||
NULL,
|
||||
txreq,
|
||||
skb_frag_page(frag),
|
||||
frag->bv_offset,
|
||||
skb_frag_size(frag));
|
||||
skb_frag_size(frag),
|
||||
NULL, NULL, NULL);
|
||||
if (unlikely(ret))
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -19,8 +19,7 @@ static int mmu_notifier_range_start(struct mmu_notifier *,
|
||||
const struct mmu_notifier_range *);
|
||||
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
|
||||
unsigned long, unsigned long);
|
||||
static void do_remove(struct mmu_rb_handler *handler,
|
||||
struct list_head *del_list);
|
||||
static void release_immediate(struct kref *refcount);
|
||||
static void handle_remove(struct work_struct *work);
|
||||
|
||||
static const struct mmu_notifier_ops mn_opts = {
|
||||
@@ -106,7 +105,11 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
|
||||
}
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
|
||||
do_remove(handler, &del_list);
|
||||
while (!list_empty(&del_list)) {
|
||||
rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
|
||||
list_del(&rbnode->list);
|
||||
kref_put(&rbnode->refcount, release_immediate);
|
||||
}
|
||||
|
||||
/* Now the mm may be freed. */
|
||||
mmdrop(handler->mn.mm);
|
||||
@@ -121,7 +124,7 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
|
||||
trace_hfi1_mmu_rb_insert(mnode);
|
||||
|
||||
if (current->mm != handler->mn.mm)
|
||||
return -EPERM;
|
||||
@@ -134,12 +137,6 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
|
||||
}
|
||||
__mmu_int_rb_insert(mnode, &handler->root);
|
||||
list_add_tail(&mnode->list, &handler->lru_list);
|
||||
|
||||
ret = handler->ops->insert(handler->ops_arg, mnode);
|
||||
if (ret) {
|
||||
__mmu_int_rb_remove(mnode, &handler->root);
|
||||
list_del(&mnode->list); /* remove from LRU list */
|
||||
}
|
||||
mnode->handler = handler;
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
@@ -183,6 +180,49 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
|
||||
return node;
|
||||
}
|
||||
|
||||
/*
|
||||
* Must NOT call while holding mnode->handler->lock.
|
||||
* mnode->handler->ops->remove() may sleep and mnode->handler->lock is a
|
||||
* spinlock.
|
||||
*/
|
||||
static void release_immediate(struct kref *refcount)
|
||||
{
|
||||
struct mmu_rb_node *mnode =
|
||||
container_of(refcount, struct mmu_rb_node, refcount);
|
||||
trace_hfi1_mmu_release_node(mnode);
|
||||
mnode->handler->ops->remove(mnode->handler->ops_arg, mnode);
|
||||
}
|
||||
|
||||
/* Caller must hold mnode->handler->lock */
|
||||
static void release_nolock(struct kref *refcount)
|
||||
{
|
||||
struct mmu_rb_node *mnode =
|
||||
container_of(refcount, struct mmu_rb_node, refcount);
|
||||
list_move(&mnode->list, &mnode->handler->del_list);
|
||||
queue_work(mnode->handler->wq, &mnode->handler->del_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* struct mmu_rb_node->refcount kref_put() callback.
|
||||
* Adds mmu_rb_node to mmu_rb_node->handler->del_list and queues
|
||||
* handler->del_work on handler->wq.
|
||||
* Does not remove mmu_rb_node from handler->lru_list or handler->rb_root.
|
||||
* Acquires mmu_rb_node->handler->lock; do not call while already holding
|
||||
* handler->lock.
|
||||
*/
|
||||
void hfi1_mmu_rb_release(struct kref *refcount)
|
||||
{
|
||||
struct mmu_rb_node *mnode =
|
||||
container_of(refcount, struct mmu_rb_node, refcount);
|
||||
struct mmu_rb_handler *handler = mnode->handler;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
list_move(&mnode->list, &mnode->handler->del_list);
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
queue_work(handler->wq, &handler->del_work);
|
||||
}
|
||||
|
||||
void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
||||
{
|
||||
struct mmu_rb_node *rbnode, *ptr;
|
||||
@@ -197,6 +237,10 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
|
||||
/* refcount == 1 implies mmu_rb_handler has only rbnode ref */
|
||||
if (kref_read(&rbnode->refcount) > 1)
|
||||
continue;
|
||||
|
||||
if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
|
||||
&stop)) {
|
||||
__mmu_int_rb_remove(rbnode, &handler->root);
|
||||
@@ -209,7 +253,8 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
|
||||
handler->ops->remove(handler->ops_arg, rbnode);
|
||||
trace_hfi1_mmu_rb_evict(rbnode);
|
||||
kref_put(&rbnode->refcount, release_immediate);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -221,7 +266,6 @@ static int mmu_notifier_range_start(struct mmu_notifier *mn,
|
||||
struct rb_root_cached *root = &handler->root;
|
||||
struct mmu_rb_node *node, *ptr = NULL;
|
||||
unsigned long flags;
|
||||
bool added = false;
|
||||
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
|
||||
@@ -229,39 +273,17 @@ static int mmu_notifier_range_start(struct mmu_notifier *mn,
|
||||
/* Guard against node removal. */
|
||||
ptr = __mmu_int_rb_iter_next(node, range->start,
|
||||
range->end - 1);
|
||||
trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
|
||||
if (handler->ops->invalidate(handler->ops_arg, node)) {
|
||||
__mmu_int_rb_remove(node, root);
|
||||
/* move from LRU list to delete list */
|
||||
list_move(&node->list, &handler->del_list);
|
||||
added = true;
|
||||
}
|
||||
trace_hfi1_mmu_mem_invalidate(node);
|
||||
/* Remove from rb tree and lru_list. */
|
||||
__mmu_int_rb_remove(node, root);
|
||||
list_del_init(&node->list);
|
||||
kref_put(&node->refcount, release_nolock);
|
||||
}
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
|
||||
if (added)
|
||||
queue_work(handler->wq, &handler->del_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call the remove function for the given handler and the list. This
|
||||
* is expected to be called with a delete list extracted from handler.
|
||||
* The caller should not be holding the handler lock.
|
||||
*/
|
||||
static void do_remove(struct mmu_rb_handler *handler,
|
||||
struct list_head *del_list)
|
||||
{
|
||||
struct mmu_rb_node *node;
|
||||
|
||||
while (!list_empty(del_list)) {
|
||||
node = list_first_entry(del_list, struct mmu_rb_node, list);
|
||||
list_del(&node->list);
|
||||
handler->ops->remove(handler->ops_arg, node);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Work queue function to remove all nodes that have been queued up to
|
||||
* be removed. The key feature is that mm->mmap_lock is not being held
|
||||
@@ -274,11 +296,17 @@ static void handle_remove(struct work_struct *work)
|
||||
del_work);
|
||||
struct list_head del_list;
|
||||
unsigned long flags;
|
||||
struct mmu_rb_node *node;
|
||||
|
||||
/* remove anything that is queued to get removed */
|
||||
spin_lock_irqsave(&handler->lock, flags);
|
||||
list_replace_init(&handler->del_list, &del_list);
|
||||
spin_unlock_irqrestore(&handler->lock, flags);
|
||||
|
||||
do_remove(handler, &del_list);
|
||||
while (!list_empty(&del_list)) {
|
||||
node = list_first_entry(&del_list, struct mmu_rb_node, list);
|
||||
list_del(&node->list);
|
||||
trace_hfi1_mmu_release_node(node);
|
||||
handler->ops->remove(handler->ops_arg, node);
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user