Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma updates from Jason Gunthorpe:
 "Quite a small cycle this time, even with the rc8. I suppose everyone
  went to sleep over xmas.

   - Minor driver updates for hfi1, cxgb4, erdma, hns, irdma, mlx5, siw,
     mana

   - inline CQE support for hns

   - Have mlx5 display device error codes

   - Pinned DMABUF support for irdma

   - Continued rxe cleanups, particularly converting the MRs to use
     xarray

   - Improvements to what can be cached in the mlx5 mkey cache"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (61 commits)
  IB/mlx5: Extend debug control for CC parameters
  IB/hfi1: Fix sdma.h tx->num_descs off-by-one errors
  IB/hfi1: Fix math bugs in hfi1_can_pin_pages()
  RDMA/irdma: Add support for dmabuf pin memory regions
  RDMA/mlx5: Use query_special_contexts for mkeys
  net/mlx5e: Use query_special_contexts for mkeys
  net/mlx5: Change define name for 0x100 lkey value
  net/mlx5: Expose bits for querying special mkeys
  RDMA/rxe: Fix missing memory barriers in rxe_queue.h
  RDMA/mana_ib: Fix a bug when the PF indicates more entries for registering memory on first packet
  RDMA/rxe: Remove rxe_alloc()
  RDMA/cma: Distinguish between sockaddr_in and sockaddr_in6 by size
  Subject: RDMA/rxe: Handle zero length rdma
  iw_cxgb4: Fix potential NULL dereference in c4iw_fill_res_cm_id_entry()
  RDMA/mlx5: Use rdma_umem_for_each_dma_block()
  RDMA/umem: Remove unused 'work' member from struct ib_umem
  RDMA/irdma: Cap MSIX used to online CPUs + 1
  RDMA/mlx5: Check reg_create() create for errors
  RDMA/restrack: Correct spelling
  RDMA/cxgb4: Fix potential null-ptr-deref in pass_establish()
  ...
This commit is contained in:
Linus Torvalds
2023-02-24 15:11:03 -08:00
65 changed files with 2116 additions and 1601 deletions

View File

@@ -479,13 +479,20 @@ static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
if (sa->sa_family != sb->sa_family)
return sa->sa_family - sb->sa_family;
if (sa->sa_family == AF_INET)
return memcmp((char *)&((struct sockaddr_in *)sa)->sin_addr,
(char *)&((struct sockaddr_in *)sb)->sin_addr,
if (sa->sa_family == AF_INET &&
__builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) {
return memcmp(&((struct sockaddr_in *)sa)->sin_addr,
&((struct sockaddr_in *)sb)->sin_addr,
sizeof(((struct sockaddr_in *)sa)->sin_addr));
}
return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
&((struct sockaddr_in6 *)sb)->sin6_addr);
if (sa->sa_family == AF_INET6 &&
__builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) {
return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
&((struct sockaddr_in6 *)sb)->sin6_addr);
}
return -1;
}
static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv)
@@ -2819,8 +2826,8 @@ int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
}
EXPORT_SYMBOL(rdma_set_min_rnr_timer);
static void route_set_path_rec_inbound(struct cma_work *work,
struct sa_path_rec *path_rec)
static int route_set_path_rec_inbound(struct cma_work *work,
struct sa_path_rec *path_rec)
{
struct rdma_route *route = &work->id->id.route;
@@ -2828,14 +2835,15 @@ static void route_set_path_rec_inbound(struct cma_work *work,
route->path_rec_inbound =
kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL);
if (!route->path_rec_inbound)
return;
return -ENOMEM;
}
*route->path_rec_inbound = *path_rec;
return 0;
}
static void route_set_path_rec_outbound(struct cma_work *work,
struct sa_path_rec *path_rec)
static int route_set_path_rec_outbound(struct cma_work *work,
struct sa_path_rec *path_rec)
{
struct rdma_route *route = &work->id->id.route;
@@ -2843,14 +2851,15 @@ static void route_set_path_rec_outbound(struct cma_work *work,
route->path_rec_outbound =
kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL);
if (!route->path_rec_outbound)
return;
return -ENOMEM;
}
*route->path_rec_outbound = *path_rec;
return 0;
}
static void cma_query_handler(int status, struct sa_path_rec *path_rec,
int num_prs, void *context)
unsigned int num_prs, void *context)
{
struct cma_work *work = context;
struct rdma_route *route;
@@ -2865,13 +2874,15 @@ static void cma_query_handler(int status, struct sa_path_rec *path_rec,
if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP))
*route->path_rec = path_rec[i];
else if (path_rec[i].flags & IB_PATH_INBOUND)
route_set_path_rec_inbound(work, &path_rec[i]);
status = route_set_path_rec_inbound(work, &path_rec[i]);
else if (path_rec[i].flags & IB_PATH_OUTBOUND)
route_set_path_rec_outbound(work, &path_rec[i]);
}
if (!route->path_rec) {
status = -EINVAL;
goto fail;
status = route_set_path_rec_outbound(work,
&path_rec[i]);
else
status = -EINVAL;
if (status)
goto fail;
}
route->num_pri_alt_paths = 1;
@@ -3541,121 +3552,6 @@ err:
return ret;
}
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
const struct sockaddr *dst_addr)
{
struct sockaddr_storage zero_sock = {};
if (src_addr && src_addr->sa_family)
return rdma_bind_addr(id, src_addr);
/*
* When the src_addr is not specified, automatically supply an any addr
*/
zero_sock.ss_family = dst_addr->sa_family;
if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
struct sockaddr_in6 *src_addr6 =
(struct sockaddr_in6 *)&zero_sock;
struct sockaddr_in6 *dst_addr6 =
(struct sockaddr_in6 *)dst_addr;
src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
id->route.addr.dev_addr.bound_dev_if =
dst_addr6->sin6_scope_id;
} else if (dst_addr->sa_family == AF_IB) {
((struct sockaddr_ib *)&zero_sock)->sib_pkey =
((struct sockaddr_ib *)dst_addr)->sib_pkey;
}
return rdma_bind_addr(id, (struct sockaddr *)&zero_sock);
}
/*
* If required, resolve the source address for bind and leave the id_priv in
* state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
* calls made by ULP, a previously bound ID will not be re-bound and src_addr is
* ignored.
*/
static int resolve_prepare_src(struct rdma_id_private *id_priv,
struct sockaddr *src_addr,
const struct sockaddr *dst_addr)
{
int ret;
memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
/* For a well behaved ULP state will be RDMA_CM_IDLE */
ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
if (ret)
goto err_dst;
if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
RDMA_CM_ADDR_QUERY))) {
ret = -EINVAL;
goto err_dst;
}
}
if (cma_family(id_priv) != dst_addr->sa_family) {
ret = -EINVAL;
goto err_state;
}
return 0;
err_state:
cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
err_dst:
memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
return ret;
}
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
const struct sockaddr *dst_addr, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv =
container_of(id, struct rdma_id_private, id);
int ret;
ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
if (ret)
return ret;
if (cma_any_addr(dst_addr)) {
ret = cma_resolve_loopback(id_priv);
} else {
if (dst_addr->sa_family == AF_IB) {
ret = cma_resolve_ib_addr(id_priv);
} else {
/*
* The FSM can return back to RDMA_CM_ADDR_BOUND after
* rdma_resolve_ip() is called, eg through the error
* path in addr_handler(). If this happens the existing
* request must be canceled before issuing a new one.
* Since canceling a request is a bit slow and this
* oddball path is rare, keep track once a request has
* been issued. The track turns out to be a permanent
* state since this is the only cancel as it is
* immediately before rdma_resolve_ip().
*/
if (id_priv->used_resolve_ip)
rdma_addr_cancel(&id->route.addr.dev_addr);
else
id_priv->used_resolve_ip = 1;
ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
&id->route.addr.dev_addr,
timeout_ms, addr_handler,
false, id_priv);
}
}
if (ret)
goto err;
return 0;
err:
cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
return ret;
}
EXPORT_SYMBOL(rdma_resolve_addr);
int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
{
struct rdma_id_private *id_priv;
@@ -4058,27 +3954,26 @@ err:
}
EXPORT_SYMBOL(rdma_listen);
int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
static int rdma_bind_addr_dst(struct rdma_id_private *id_priv,
struct sockaddr *addr, const struct sockaddr *daddr)
{
struct rdma_id_private *id_priv;
struct sockaddr *id_daddr;
int ret;
struct sockaddr *daddr;
if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
addr->sa_family != AF_IB)
return -EAFNOSUPPORT;
id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
return -EINVAL;
ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr);
if (ret)
goto err1;
memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
if (!cma_any_addr(addr)) {
ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr);
if (ret)
goto err1;
@@ -4098,8 +3993,10 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
}
#endif
}
daddr = cma_dst_addr(id_priv);
daddr->sa_family = addr->sa_family;
id_daddr = cma_dst_addr(id_priv);
if (daddr != id_daddr)
memcpy(id_daddr, daddr, rdma_addr_size(addr));
id_daddr->sa_family = addr->sa_family;
ret = cma_get_port(id_priv);
if (ret)
@@ -4115,6 +4012,127 @@ err1:
cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
return ret;
}
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
const struct sockaddr *dst_addr)
{
struct rdma_id_private *id_priv =
container_of(id, struct rdma_id_private, id);
struct sockaddr_storage zero_sock = {};
if (src_addr && src_addr->sa_family)
return rdma_bind_addr_dst(id_priv, src_addr, dst_addr);
/*
* When the src_addr is not specified, automatically supply an any addr
*/
zero_sock.ss_family = dst_addr->sa_family;
if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
struct sockaddr_in6 *src_addr6 =
(struct sockaddr_in6 *)&zero_sock;
struct sockaddr_in6 *dst_addr6 =
(struct sockaddr_in6 *)dst_addr;
src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
id->route.addr.dev_addr.bound_dev_if =
dst_addr6->sin6_scope_id;
} else if (dst_addr->sa_family == AF_IB) {
((struct sockaddr_ib *)&zero_sock)->sib_pkey =
((struct sockaddr_ib *)dst_addr)->sib_pkey;
}
return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr);
}
/*
* If required, resolve the source address for bind and leave the id_priv in
* state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
* calls made by ULP, a previously bound ID will not be re-bound and src_addr is
* ignored.
*/
static int resolve_prepare_src(struct rdma_id_private *id_priv,
struct sockaddr *src_addr,
const struct sockaddr *dst_addr)
{
int ret;
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
/* For a well behaved ULP state will be RDMA_CM_IDLE */
ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
if (ret)
return ret;
if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
RDMA_CM_ADDR_QUERY)))
return -EINVAL;
}
if (cma_family(id_priv) != dst_addr->sa_family) {
ret = -EINVAL;
goto err_state;
}
return 0;
err_state:
cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
return ret;
}
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
const struct sockaddr *dst_addr, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv =
container_of(id, struct rdma_id_private, id);
int ret;
ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
if (ret)
return ret;
if (cma_any_addr(dst_addr)) {
ret = cma_resolve_loopback(id_priv);
} else {
if (dst_addr->sa_family == AF_IB) {
ret = cma_resolve_ib_addr(id_priv);
} else {
/*
* The FSM can return back to RDMA_CM_ADDR_BOUND after
* rdma_resolve_ip() is called, eg through the error
* path in addr_handler(). If this happens the existing
* request must be canceled before issuing a new one.
* Since canceling a request is a bit slow and this
* oddball path is rare, keep track once a request has
* been issued. The track turns out to be a permanent
* state since this is the only cancel as it is
* immediately before rdma_resolve_ip().
*/
if (id_priv->used_resolve_ip)
rdma_addr_cancel(&id->route.addr.dev_addr);
else
id_priv->used_resolve_ip = 1;
ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
&id->route.addr.dev_addr,
timeout_ms, addr_handler,
false, id_priv);
}
}
if (ret)
goto err;
return 0;
err:
cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
return ret;
}
EXPORT_SYMBOL(rdma_resolve_addr);
int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
{
struct rdma_id_private *id_priv =
container_of(id, struct rdma_id_private, id);
return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv));
}
EXPORT_SYMBOL(rdma_bind_addr);
static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)

View File

@@ -106,7 +106,7 @@ struct ib_sa_device {
struct ib_sa_query {
void (*callback)(struct ib_sa_query *sa_query, int status,
int num_prs, struct ib_sa_mad *mad);
struct ib_sa_mad *mad);
void (*release)(struct ib_sa_query *);
struct ib_sa_client *client;
struct ib_sa_port *port;
@@ -118,12 +118,6 @@ struct ib_sa_query {
u32 seq; /* Local svc request sequence number */
unsigned long timeout; /* Local svc timeout */
u8 path_use; /* How will the pathrecord be used */
/* A separate buffer to save pathrecords of a response, as in cases
* like IB/netlink, mulptiple pathrecords are supported, so that
* mad->data is not large enough to hold them
*/
void *resp_pr_data;
};
#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
@@ -132,7 +126,7 @@ struct ib_sa_query {
struct ib_sa_path_query {
void (*callback)(int status, struct sa_path_rec *rec,
int num_paths, void *context);
unsigned int num_paths, void *context);
void *context;
struct ib_sa_query sa_query;
struct sa_path_rec *conv_pr;
@@ -690,6 +684,8 @@ static const struct ib_field guidinfo_rec_table[] = {
.size_bits = 512 },
};
#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
{
query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
@@ -874,30 +870,21 @@ static void send_handler(struct ib_mad_agent *agent,
static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
const struct nlmsghdr *nlh)
{
struct ib_path_rec_data *srec, *drec;
struct sa_path_rec recs[RDMA_PRIMARY_PATH_MAX_REC_NUM];
struct ib_sa_path_query *path_query;
struct ib_path_rec_data *rec_data;
struct ib_mad_send_wc mad_send_wc;
const struct nlattr *head, *curr;
struct ib_sa_mad *mad = NULL;
int len, rem, num_prs = 0;
int len, rem, status = -EIO;
unsigned int num_prs = 0;
u32 mask = 0;
int status = -EIO;
if (!query->callback)
goto out;
path_query = container_of(query, struct ib_sa_path_query, sa_query);
mad = query->mad_buf->mad;
if (!path_query->conv_pr &&
(be16_to_cpu(mad->mad_hdr.attr_id) == IB_SA_ATTR_PATH_REC)) {
/* Need a larger buffer for possible multiple PRs */
query->resp_pr_data = kvcalloc(RDMA_PRIMARY_PATH_MAX_REC_NUM,
sizeof(*drec), GFP_KERNEL);
if (!query->resp_pr_data) {
query->callback(query, -ENOMEM, 0, NULL);
return;
}
}
head = (const struct nlattr *) nlmsg_data(nlh);
len = nlmsg_len(nlh);
@@ -917,36 +904,41 @@ static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
break;
}
drec = (struct ib_path_rec_data *)query->resp_pr_data;
nla_for_each_attr(curr, head, len, rem) {
if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD)
continue;
srec = nla_data(curr);
if ((srec->flags & mask) != mask)
rec_data = nla_data(curr);
if ((rec_data->flags & mask) != mask)
continue;
status = 0;
if (!drec) {
memcpy(mad->data, srec->path_rec,
sizeof(srec->path_rec));
num_prs = 1;
break;
if ((query->flags & IB_SA_QUERY_OPA) ||
path_query->conv_pr) {
mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
memcpy(mad->data, rec_data->path_rec,
sizeof(rec_data->path_rec));
query->callback(query, 0, mad);
goto out;
}
memcpy(drec, srec, sizeof(*drec));
drec++;
status = 0;
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
rec_data->path_rec, &recs[num_prs]);
recs[num_prs].flags = rec_data->flags;
recs[num_prs].rec_type = SA_PATH_REC_TYPE_IB;
sa_path_set_dmac_zero(&recs[num_prs]);
num_prs++;
if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM)
break;
}
if (!status)
if (!status) {
mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
query->callback(query, status, num_prs, mad);
kvfree(query->resp_pr_data);
query->resp_pr_data = NULL;
path_query->callback(status, recs, num_prs,
path_query->context);
} else
query->callback(query, status, mad);
out:
mad_send_wc.send_buf = query->mad_buf;
@@ -1451,11 +1443,26 @@ static int opa_pr_query_possible(struct ib_sa_client *client,
return PR_IB_SUPPORTED;
}
static void ib_sa_pr_callback_single(struct ib_sa_path_query *query,
int status, struct ib_sa_mad *mad)
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
int status, struct ib_sa_mad *mad)
{
struct ib_sa_path_query *query =
container_of(sa_query, struct ib_sa_path_query, sa_query);
struct sa_path_rec rec = {};
if (!mad) {
query->callback(status, NULL, 0, query->context);
return;
}
if (sa_query->flags & IB_SA_QUERY_OPA) {
ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
mad->data, &rec);
rec.rec_type = SA_PATH_REC_TYPE_OPA;
query->callback(status, &rec, 1, query->context);
return;
}
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
mad->data, &rec);
rec.rec_type = SA_PATH_REC_TYPE_IB;
@@ -1472,71 +1479,6 @@ static void ib_sa_pr_callback_single(struct ib_sa_path_query *query,
}
}
/**
* ib_sa_pr_callback_multiple() - Parse path records then do callback.
*
* In a multiple-PR case the PRs are saved in "query->resp_pr_data"
* (instead of"mad->data") and with "ib_path_rec_data" structure format,
* so that rec->flags can be set to indicate the type of PR.
* This is valid only in IB fabric.
*/
static void ib_sa_pr_callback_multiple(struct ib_sa_path_query *query,
int status, int num_prs,
struct ib_path_rec_data *rec_data)
{
struct sa_path_rec *rec;
int i;
rec = kvcalloc(num_prs, sizeof(*rec), GFP_KERNEL);
if (!rec) {
query->callback(-ENOMEM, NULL, 0, query->context);
return;
}
for (i = 0; i < num_prs; i++) {
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
rec_data[i].path_rec, rec + i);
rec[i].rec_type = SA_PATH_REC_TYPE_IB;
sa_path_set_dmac_zero(rec + i);
rec[i].flags = rec_data[i].flags;
}
query->callback(status, rec, num_prs, query->context);
kvfree(rec);
}
static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
int status, int num_prs,
struct ib_sa_mad *mad)
{
struct ib_sa_path_query *query =
container_of(sa_query, struct ib_sa_path_query, sa_query);
struct sa_path_rec rec;
if (!mad || !num_prs) {
query->callback(status, NULL, 0, query->context);
return;
}
if (sa_query->flags & IB_SA_QUERY_OPA) {
if (num_prs != 1) {
query->callback(-EINVAL, NULL, 0, query->context);
return;
}
ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
mad->data, &rec);
rec.rec_type = SA_PATH_REC_TYPE_OPA;
query->callback(status, &rec, num_prs, query->context);
} else {
if (!sa_query->resp_pr_data)
ib_sa_pr_callback_single(query, status, mad);
else
ib_sa_pr_callback_multiple(query, status, num_prs,
sa_query->resp_pr_data);
}
}
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
{
struct ib_sa_path_query *query =
@@ -1578,7 +1520,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct sa_path_rec *resp,
int num_paths, void *context),
unsigned int num_paths, void *context),
void *context,
struct ib_sa_query **sa_query)
{
@@ -1677,8 +1619,7 @@ err1:
EXPORT_SYMBOL(ib_sa_path_rec_get);
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
int status, int num_prs,
struct ib_sa_mad *mad)
int status, struct ib_sa_mad *mad)
{
struct ib_sa_mcmember_query *query =
container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
@@ -1769,8 +1710,7 @@ err1:
/* Support GuidInfoRecord */
static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
int status, int num_paths,
struct ib_sa_mad *mad)
int status, struct ib_sa_mad *mad)
{
struct ib_sa_guidinfo_query *query =
container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
@@ -1879,8 +1819,7 @@ static void ib_classportinfo_cb(void *context)
}
static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
int status, int num_prs,
struct ib_sa_mad *mad)
int status, struct ib_sa_mad *mad)
{
unsigned long flags;
struct ib_sa_classport_info_query *query =
@@ -2055,13 +1994,13 @@ static void send_handler(struct ib_mad_agent *agent,
/* No callback -- already got recv */
break;
case IB_WC_RESP_TIMEOUT_ERR:
query->callback(query, -ETIMEDOUT, 0, NULL);
query->callback(query, -ETIMEDOUT, NULL);
break;
case IB_WC_WR_FLUSH_ERR:
query->callback(query, -EINTR, 0, NULL);
query->callback(query, -EINTR, NULL);
break;
default:
query->callback(query, -EIO, 0, NULL);
query->callback(query, -EIO, NULL);
break;
}
@@ -2089,10 +2028,10 @@ static void recv_handler(struct ib_mad_agent *mad_agent,
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
query->callback(query,
mad_recv_wc->recv_buf.mad->mad_hdr.status ?
-EINVAL : 0, 1,
-EINVAL : 0,
(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
else
query->callback(query, -EIO, 0, NULL);
query->callback(query, -EIO, NULL);
}
ib_free_recv_mad(mad_recv_wc);

View File

@@ -2676,6 +2676,9 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
u16 tcp_opt = ntohs(req->tcp_opt);
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
@@ -4144,6 +4147,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
if (neigh->dev->flags & IFF_LOOPBACK) {
pdev = ip_dev_find(&init_net, iph->daddr);
if (!pdev) {
pr_err("%s - failed to find device!\n", __func__);
goto free_dst;
}
e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
pdev, 0);
pi = (struct port_info *)netdev_priv(pdev);

View File

@@ -767,7 +767,7 @@ static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
goto out;
wc->wr_id = cookie;
wc->qp = qhp ? &qhp->ibqp : NULL;
wc->qp = &qhp->ibqp;
wc->vendor_err = CQE_STATUS(&cqe);
wc->wc_flags = 0;

View File

@@ -238,7 +238,7 @@ int c4iw_fill_res_cm_id_entry(struct sk_buff *msg,
if (rdma_nl_put_driver_u64_hex(msg, "history", epcp->history))
goto err_cancel_table;
if (epcp->state == LISTEN) {
if (listen_ep) {
if (rdma_nl_put_driver_u32(msg, "stid", listen_ep->stid))
goto err_cancel_table;
if (rdma_nl_put_driver_u32(msg, "backlog", listen_ep->backlog))

View File

@@ -122,9 +122,7 @@ struct fw_ri_dsgl {
__be16 nsge;
__be32 len0;
__be64 addr0;
#ifndef C99_NOT_SUPPORTED
struct fw_ri_dsge_pair sge[];
#endif
};
struct fw_ri_sge {
@@ -138,9 +136,7 @@ struct fw_ri_isgl {
__u8 r1;
__be16 nsge;
__be32 r2;
#ifndef C99_NOT_SUPPORTED
struct fw_ri_sge sge[];
#endif
};
struct fw_ri_immd {
@@ -148,9 +144,7 @@ struct fw_ri_immd {
__u8 r1;
__be16 r2;
__be32 immdlen;
#ifndef C99_NOT_SUPPORTED
__u8 data[];
#endif
};
struct fw_ri_tpte {
@@ -320,9 +314,7 @@ struct fw_ri_res_wr {
__be32 op_nres;
__be32 len16_pkd;
__u64 cookie;
#ifndef C99_NOT_SUPPORTED
struct fw_ri_res res[];
#endif
};
#define FW_RI_RES_WR_NRES_S 0
@@ -562,12 +554,10 @@ struct fw_ri_rdma_write_wr {
__be32 plen;
__be32 stag_sink;
__be64 to_sink;
#ifndef C99_NOT_SUPPORTED
union {
struct fw_ri_immd immd_src[0];
struct fw_ri_isgl isgl_src[0];
DECLARE_FLEX_ARRAY(struct fw_ri_immd, immd_src);
DECLARE_FLEX_ARRAY(struct fw_ri_isgl, isgl_src);
} u;
#endif
};
struct fw_ri_send_wr {
@@ -581,12 +571,10 @@ struct fw_ri_send_wr {
__be32 plen;
__be32 r3;
__be64 r4;
#ifndef C99_NOT_SUPPORTED
union {
struct fw_ri_immd immd_src[0];
struct fw_ri_isgl isgl_src[0];
DECLARE_FLEX_ARRAY(struct fw_ri_immd, immd_src);
DECLARE_FLEX_ARRAY(struct fw_ri_isgl, isgl_src);
} u;
#endif
};
#define FW_RI_SEND_WR_SENDOP_S 0
@@ -618,12 +606,10 @@ struct fw_ri_rdma_write_cmpl_wr {
struct fw_ri_isgl isgl_src;
} u_cmpl;
__be64 r3;
#ifndef C99_NOT_SUPPORTED
union fw_ri_write {
struct fw_ri_immd immd_src[0];
struct fw_ri_isgl isgl_src[0];
DECLARE_FLEX_ARRAY(struct fw_ri_immd, immd_src);
DECLARE_FLEX_ARRAY(struct fw_ri_isgl, isgl_src);
} u;
#endif
};
struct fw_ri_rdma_read_wr {

View File

@@ -397,7 +397,7 @@ struct erdma_write_sqe {
__le32 rsvd;
struct erdma_sge sgl[0];
struct erdma_sge sgl[];
};
struct erdma_send_sqe {
@@ -408,7 +408,7 @@ struct erdma_send_sqe {
};
__le32 length;
struct erdma_sge sgl[0];
struct erdma_sge sgl[];
};
struct erdma_readreq_sqe {

View File

@@ -1110,12 +1110,14 @@ int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
prot = pgprot_device(vma->vm_page_prot);
break;
default:
return -EINVAL;
err = -EINVAL;
goto put_entry;
}
err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE,
prot, rdma_entry);
put_entry:
rdma_user_mmap_entry_put(rdma_entry);
return err;
}

View File

@@ -1056,7 +1056,7 @@ static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
static void handle_temp_err(struct hfi1_devdata *dd);
static void dc_shutdown(struct hfi1_devdata *dd);
static void dc_start(struct hfi1_devdata *dd);
static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp,
unsigned int *np);
static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
@@ -13362,7 +13362,6 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
int ret;
unsigned ngroups;
int rmt_count;
int user_rmt_reduced;
u32 n_usr_ctxts;
u32 send_contexts = chip_send_contexts(dd);
u32 rcv_contexts = chip_rcv_contexts(dd);
@@ -13421,28 +13420,34 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
(num_kernel_contexts + n_usr_ctxts),
&node_affinity.real_cpu_mask);
/*
* The RMT entries are currently allocated as shown below:
* 1. QOS (0 to 128 entries);
* 2. FECN (num_kernel_context - 1 + num_user_contexts +
* num_netdev_contexts);
* 3. netdev (num_netdev_contexts).
* It should be noted that FECN oversubscribe num_netdev_contexts
* entries of RMT because both netdev and PSM could allocate any receive
* context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
* and PSM FECN must reserve an RMT entry for each possible PSM receive
* context.
* RMT entries are allocated as follows:
* 1. QOS (0 to 128 entries)
* 2. FECN (num_kernel_context - 1 [a] + num_user_contexts +
* num_netdev_contexts [b])
* 3. netdev (NUM_NETDEV_MAP_ENTRIES)
*
* Notes:
* [a] Kernel contexts (except control) are included in FECN if kernel
* TID_RDMA is active.
* [b] Netdev and user contexts are randomly allocated from the same
* context pool, so FECN must cover all contexts in the pool.
*/
rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2);
if (HFI1_CAP_IS_KSET(TID_RDMA))
rmt_count += num_kernel_contexts - 1;
if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
dd_dev_err(dd,
"RMT size is reducing the number of user receive contexts from %u to %d\n",
n_usr_ctxts,
user_rmt_reduced);
/* recalculate */
n_usr_ctxts = user_rmt_reduced;
rmt_count = qos_rmt_entries(num_kernel_contexts - 1, NULL, NULL)
+ (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1
: 0)
+ n_usr_ctxts
+ num_netdev_contexts
+ NUM_NETDEV_MAP_ENTRIES;
if (rmt_count > NUM_MAP_ENTRIES) {
int over = rmt_count - NUM_MAP_ENTRIES;
/* try to squish user contexts, minimum of 1 */
if (over >= n_usr_ctxts) {
dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n");
return -EINVAL;
}
dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n",
n_usr_ctxts, n_usr_ctxts - over);
n_usr_ctxts -= over;
}
/* the first N are kernel contexts, the rest are user/netdev contexts */
@@ -14299,15 +14304,15 @@ static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
}
/* return the number of RSM map table entries that will be used for QOS */
static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp,
unsigned int *np)
{
int i;
unsigned int m, n;
u8 max_by_vl = 0;
uint max_by_vl = 0;
/* is QOS active at all? */
if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
if (n_krcv_queues < MIN_KERNEL_KCTXTS ||
num_vls == 1 ||
krcvqsset <= 1)
goto no_qos;
@@ -14365,7 +14370,7 @@ static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
if (!rmt)
goto bail;
rmt_entries = qos_rmt_entries(dd, &m, &n);
rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n);
if (rmt_entries == 0)
goto bail;
qpns_per_vl = 1 << m;

View File

@@ -133,12 +133,13 @@ static inline struct tid_group *tid_group_pop(struct exp_tid_set *set)
return grp;
}
static inline u32 rcventry2tidinfo(u32 rcventry)
static inline u32 create_tid(u32 rcventry, u32 npages)
{
u32 pair = rcventry & ~0x1;
return EXP_TID_SET(IDX, pair >> 1) |
EXP_TID_SET(CTRL, 1 << (rcventry - pair));
EXP_TID_SET(CTRL, 1 << (rcventry - pair)) |
EXP_TID_SET(LEN, npages);
}
/**

View File

@@ -306,6 +306,17 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
return reqs;
}
static inline void mmap_cdbg(u16 ctxt, u8 subctxt, u8 type, u8 mapio, u8 vmf,
u64 memaddr, void *memvirt, dma_addr_t memdma,
ssize_t memlen, struct vm_area_struct *vma)
{
hfi1_cdbg(PROC,
"%u:%u type:%u io/vf/dma:%d/%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx",
ctxt, subctxt, type, mapio, vmf, !!memdma,
memaddr ?: (u64)memvirt, memlen,
vma->vm_end - vma->vm_start, vma->vm_flags);
}
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
{
struct hfi1_filedata *fd = fp->private_data;
@@ -315,6 +326,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
u64 token = vma->vm_pgoff << PAGE_SHIFT,
memaddr = 0;
void *memvirt = NULL;
dma_addr_t memdma = 0;
u8 subctxt, mapio = 0, vmf = 0, type;
ssize_t memlen = 0;
int ret = 0;
@@ -334,6 +346,11 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
goto done;
}
/*
* vm_pgoff is used as a buffer selector cookie. Always mmap from
* the beginning.
*/
vma->vm_pgoff = 0;
flags = vma->vm_flags;
switch (type) {
@@ -355,7 +372,8 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
mapio = 1;
break;
case PIO_CRED:
case PIO_CRED: {
u64 cr_page_offset;
if (flags & VM_WRITE) {
ret = -EPERM;
goto done;
@@ -365,10 +383,11 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
* second or third page allocated for credit returns (if number
* of enabled contexts > 64 and 128 respectively).
*/
memvirt = dd->cr_base[uctxt->numa_id].va;
memaddr = virt_to_phys(memvirt) +
(((u64)uctxt->sc->hw_free -
(u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
cr_page_offset = ((u64)uctxt->sc->hw_free -
(u64)dd->cr_base[uctxt->numa_id].va) &
PAGE_MASK;
memvirt = dd->cr_base[uctxt->numa_id].va + cr_page_offset;
memdma = dd->cr_base[uctxt->numa_id].dma + cr_page_offset;
memlen = PAGE_SIZE;
flags &= ~VM_MAYWRITE;
flags |= VM_DONTCOPY | VM_DONTEXPAND;
@@ -378,14 +397,16 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
* memory been flagged as non-cached?
*/
/* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
mapio = 1;
break;
}
case RCV_HDRQ:
memlen = rcvhdrq_size(uctxt);
memvirt = uctxt->rcvhdrq;
memdma = uctxt->rcvhdrq_dma;
break;
case RCV_EGRBUF: {
unsigned long addr;
unsigned long vm_start_save;
unsigned long vm_end_save;
int i;
/*
* The RcvEgr buffer need to be handled differently
@@ -404,24 +425,34 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
goto done;
}
vm_flags_clear(vma, VM_MAYWRITE);
addr = vma->vm_start;
/*
* Mmap multiple separate allocations into a single vma. From
* here, dma_mmap_coherent() calls dma_direct_mmap(), which
* requires the mmap to exactly fill the vma starting at
* vma_start. Adjust the vma start and end for each eager
* buffer segment mapped. Restore the originals when done.
*/
vm_start_save = vma->vm_start;
vm_end_save = vma->vm_end;
vma->vm_end = vma->vm_start;
for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
memlen = uctxt->egrbufs.buffers[i].len;
memvirt = uctxt->egrbufs.buffers[i].addr;
ret = remap_pfn_range(
vma, addr,
/*
* virt_to_pfn() does the same, but
* it's not available on x86_64
* when CONFIG_MMU is enabled.
*/
PFN_DOWN(__pa(memvirt)),
memlen,
vma->vm_page_prot);
if (ret < 0)
memdma = uctxt->egrbufs.buffers[i].dma;
vma->vm_end += memlen;
mmap_cdbg(ctxt, subctxt, type, mapio, vmf, memaddr,
memvirt, memdma, memlen, vma);
ret = dma_mmap_coherent(&dd->pcidev->dev, vma,
memvirt, memdma, memlen);
if (ret < 0) {
vma->vm_start = vm_start_save;
vma->vm_end = vm_end_save;
goto done;
addr += memlen;
}
vma->vm_start += memlen;
}
vma->vm_start = vm_start_save;
vma->vm_end = vm_end_save;
ret = 0;
goto done;
}
@@ -481,6 +512,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
}
memlen = PAGE_SIZE;
memvirt = (void *)hfi1_rcvhdrtail_kvaddr(uctxt);
memdma = uctxt->rcvhdrqtailaddr_dma;
flags &= ~VM_MAYWRITE;
break;
case SUBCTXT_UREGS:
@@ -529,14 +561,15 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
}
vm_flags_reset(vma, flags);
hfi1_cdbg(PROC,
"%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
vma->vm_end - vma->vm_start, vma->vm_flags);
mmap_cdbg(ctxt, subctxt, type, mapio, vmf, memaddr, memvirt, memdma,
memlen, vma);
if (vmf) {
vma->vm_pgoff = PFN_DOWN(memaddr);
vma->vm_ops = &vm_ops;
ret = 0;
} else if (memdma) {
ret = dma_mmap_coherent(&dd->pcidev->dev, vma,
memvirt, memdma, memlen);
} else if (mapio) {
ret = io_remap_pfn_range(vma, vma->vm_start,
PFN_DOWN(memaddr),

View File

@@ -464,7 +464,7 @@ bail:
*
* This wrapper is the free function that matches hfi1_create_ctxtdata().
* When a context is done being used (kernel or user), this function is called
* for the "final" put to match the kref init from hf1i_create_ctxtdata().
* for the "final" put to match the kref init from hfi1_create_ctxtdata().
* Other users of the context do a get/put sequence to make sure that the
* structure isn't removed while in use.
*/

View File

@@ -3160,8 +3160,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
{
int rval = 0;
tx->num_desc++;
if ((unlikely(tx->num_desc == tx->desc_limit))) {
if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
rval = _extend_sdma_tx_descs(dd, tx);
if (rval) {
__sdma_txclean(dd, tx);
@@ -3174,6 +3173,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
SDMA_MAP_NONE,
dd->sdma_pad_phys,
sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
tx->num_desc++;
_sdma_close_tx(dd, tx);
return rval;
}

View File

@@ -631,14 +631,13 @@ static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
static inline void _sdma_close_tx(struct hfi1_devdata *dd,
struct sdma_txreq *tx)
{
tx->descp[tx->num_desc].qw[0] |=
SDMA_DESC0_LAST_DESC_FLAG;
tx->descp[tx->num_desc].qw[1] |=
dd->default_desc1;
u16 last_desc = tx->num_desc - 1;
tx->descp[last_desc].qw[0] |= SDMA_DESC0_LAST_DESC_FLAG;
tx->descp[last_desc].qw[1] |= dd->default_desc1;
if (tx->flags & SDMA_TXREQ_F_URGENT)
tx->descp[tx->num_desc].qw[1] |=
(SDMA_DESC1_HEAD_TO_HOST_FLAG |
SDMA_DESC1_INT_REQ_FLAG);
tx->descp[last_desc].qw[1] |= (SDMA_DESC1_HEAD_TO_HOST_FLAG |
SDMA_DESC1_INT_REQ_FLAG);
}
static inline int _sdma_txadd_daddr(
@@ -655,6 +654,7 @@ static inline int _sdma_txadd_daddr(
type,
addr, len);
WARN_ON(len > tx->tlen);
tx->num_desc++;
tx->tlen -= len;
/* special cases for last */
if (!tx->tlen) {
@@ -666,7 +666,6 @@ static inline int _sdma_txadd_daddr(
_sdma_close_tx(dd, tx);
}
}
tx->num_desc++;
return rval;
}

View File

@@ -27,8 +27,7 @@ static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq);
static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
struct tid_group *grp,
unsigned int start, u16 count,
struct tid_group *grp, u16 count,
u32 *tidlist, unsigned int *tididx,
unsigned int *pmapped);
static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo);
@@ -250,7 +249,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
int ret = 0, need_group = 0, pinned;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
unsigned int ngroups, pageidx = 0, pageset_count,
unsigned int ngroups, pageset_count,
tididx = 0, mapped, mapped_pages = 0;
u32 *tidlist = NULL;
struct tid_user_buf *tidbuf;
@@ -332,7 +331,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
tid_group_pop(&uctxt->tid_group_list);
ret = program_rcvarray(fd, tidbuf, grp,
pageidx, dd->rcv_entries.group_size,
dd->rcv_entries.group_size,
tidlist, &tididx, &mapped);
/*
* If there was a failure to program the RcvArray
@@ -348,11 +347,10 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
tid_group_add_tail(grp, &uctxt->tid_full_list);
ngroups--;
pageidx += ret;
mapped_pages += mapped;
}
while (pageidx < pageset_count) {
while (tididx < pageset_count) {
struct tid_group *grp, *ptr;
/*
* If we don't have any partially used tid groups, check
@@ -374,11 +372,11 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
*/
list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
list) {
unsigned use = min_t(unsigned, pageset_count - pageidx,
unsigned use = min_t(unsigned, pageset_count - tididx,
grp->size - grp->used);
ret = program_rcvarray(fd, tidbuf, grp,
pageidx, use, tidlist,
use, tidlist,
&tididx, &mapped);
if (ret < 0) {
hfi1_cdbg(TID,
@@ -390,11 +388,10 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
tid_group_move(grp,
&uctxt->tid_used_list,
&uctxt->tid_full_list);
pageidx += ret;
mapped_pages += mapped;
need_group = 0;
/* Check if we are done so we break out early */
if (pageidx >= pageset_count)
if (tididx >= pageset_count)
break;
} else if (WARN_ON(ret == 0)) {
/*
@@ -638,7 +635,6 @@ static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
* struct tid_pageset holding information on physically contiguous
* chunks from the user buffer), and other fields.
* @grp: RcvArray group
* @start: starting index into sets array
* @count: number of struct tid_pageset's to program
* @tidlist: the array of u32 elements when the information about the
* programmed RcvArray entries is to be encoded.
@@ -658,14 +654,14 @@ static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
* number of RcvArray entries programmed.
*/
static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
struct tid_group *grp,
unsigned int start, u16 count,
struct tid_group *grp, u16 count,
u32 *tidlist, unsigned int *tididx,
unsigned int *pmapped)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
u16 idx;
unsigned int start = *tididx;
u32 tidinfo = 0, rcventry, useidx = 0;
int mapped = 0;
@@ -710,8 +706,7 @@ static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
return ret;
mapped += npages;
tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
EXP_TID_SET(LEN, npages);
tidinfo = create_tid(rcventry - uctxt->expected_base, npages);
tidlist[(*tididx)++] = tidinfo;
grp->used++;
grp->map |= 1 << useidx++;
@@ -795,20 +790,20 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo)
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
struct tid_rb_node *node;
u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
u32 tidctrl = EXP_TID_GET(tidinfo, CTRL);
u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
if (tididx >= uctxt->expected_count) {
dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
tididx, uctxt->ctxt);
return -EINVAL;
}
if (tidctrl == 0x3)
if (tidctrl == 0x3 || tidctrl == 0x0)
return -EINVAL;
rcventry = tididx + (tidctrl - 1);
if (rcventry >= uctxt->expected_count) {
dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
rcventry, uctxt->ctxt);
return -EINVAL;
}
node = fd->entry_to_rb[rcventry];
if (!node || node->rcventry != (uctxt->expected_base + rcventry))
return -EBADF;
@@ -920,9 +915,8 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
spin_lock(&fdata->invalid_lock);
if (fdata->invalid_tid_idx < uctxt->expected_count) {
fdata->invalid_tids[fdata->invalid_tid_idx] =
rcventry2tidinfo(node->rcventry - uctxt->expected_base);
fdata->invalid_tids[fdata->invalid_tid_idx] |=
EXP_TID_SET(LEN, node->npages);
create_tid(node->rcventry - uctxt->expected_base,
node->npages);
if (!fdata->invalid_tid_idx) {
unsigned long *ev;

View File

@@ -29,33 +29,52 @@ MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)");
bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
u32 nlocked, u32 npages)
{
unsigned long ulimit = rlimit(RLIMIT_MEMLOCK), pinned, cache_limit,
size = (cache_size * (1UL << 20)); /* convert to bytes */
unsigned int usr_ctxts =
dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
bool can_lock = capable(CAP_IPC_LOCK);
unsigned long ulimit_pages;
unsigned long cache_limit_pages;
unsigned int usr_ctxts;
/*
* Calculate per-cache size. The calculation below uses only a quarter
* of the available per-context limit. This leaves space for other
* pinning. Should we worry about shared ctxts?
* Perform RLIMIT_MEMLOCK based checks unless CAP_IPC_LOCK is present.
*/
cache_limit = (ulimit / usr_ctxts) / 4;
if (!capable(CAP_IPC_LOCK)) {
ulimit_pages =
DIV_ROUND_DOWN_ULL(rlimit(RLIMIT_MEMLOCK), PAGE_SIZE);
/* If ulimit isn't set to "unlimited" and is smaller than cache_size. */
if (ulimit != (-1UL) && size > cache_limit)
size = cache_limit;
/*
* Pinning these pages would exceed this process's locked memory
* limit.
*/
if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages)
return false;
/* Convert to number of pages */
size = DIV_ROUND_UP(size, PAGE_SIZE);
/*
* Only allow 1/4 of the user's RLIMIT_MEMLOCK to be used for HFI
* caches. This fraction is then equally distributed among all
* existing user contexts. Note that if RLIMIT_MEMLOCK is
* 'unlimited' (-1), the value of this limit will be > 2^42 pages
* (2^64 / 2^12 / 2^8 / 2^2).
*
* The effectiveness of this check may be reduced if I/O occurs on
* some user contexts before all user contexts are created. This
* check assumes that this process is the only one using this
* context (e.g., the corresponding fd was not passed to another
* process for concurrent access) as there is no per-context,
* per-process tracking of pinned pages. It also assumes that each
* user context has only one cache to limit.
*/
usr_ctxts = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
if (nlocked + npages > (ulimit_pages / usr_ctxts / 4))
return false;
}
pinned = atomic64_read(&mm->pinned_vm);
/* First, check the absolute limit against all pinned pages. */
if (pinned + npages >= ulimit && !can_lock)
/*
* Pinning these pages would exceed the size limit for this cache.
*/
cache_limit_pages = cache_size * (1024 * 1024) / PAGE_SIZE;
if (nlocked + npages > cache_limit_pages)
return false;
return ((nlocked + npages) <= size) || can_lock;
return true;
}
int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,

View File

@@ -1598,13 +1598,11 @@ static const char * const driver_cntr_names[] = {
"DRIVER_EgrHdrFull"
};
static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
static struct rdma_stat_desc *dev_cntr_descs;
static struct rdma_stat_desc *port_cntr_descs;
int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
static int num_dev_cntrs;
static int num_port_cntrs;
static int cntr_names_initialized;
/*
* Convert a list of names separated by '\n' into an array of NULL terminated
@@ -1615,8 +1613,8 @@ static int init_cntr_names(const char *names_in, const size_t names_len,
int num_extra_names, int *num_cntrs,
struct rdma_stat_desc **cntr_descs)
{
struct rdma_stat_desc *q;
char *names_out, *p;
struct rdma_stat_desc *names_out;
char *p;
int i, n;
n = 0;
@@ -1624,65 +1622,45 @@ static int init_cntr_names(const char *names_in, const size_t names_len,
if (names_in[i] == '\n')
n++;
names_out =
kzalloc((n + num_extra_names) * sizeof(*q) + names_len,
GFP_KERNEL);
names_out = kzalloc((n + num_extra_names) * sizeof(*names_out)
+ names_len,
GFP_KERNEL);
if (!names_out) {
*num_cntrs = 0;
*cntr_descs = NULL;
return -ENOMEM;
}
p = names_out + (n + num_extra_names) * sizeof(*q);
p = (char *)&names_out[n + num_extra_names];
memcpy(p, names_in, names_len);
q = (struct rdma_stat_desc *)names_out;
for (i = 0; i < n; i++) {
q[i].name = p;
names_out[i].name = p;
p = strchr(p, '\n');
*p++ = '\0';
}
*num_cntrs = n;
*cntr_descs = (struct rdma_stat_desc *)names_out;
*cntr_descs = names_out;
return 0;
}
static int init_counters(struct ib_device *ibdev)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
int i, err = 0;
mutex_lock(&cntr_names_lock);
if (cntr_names_initialized)
goto out_unlock;
err = init_cntr_names(dd->cntrnames, dd->cntrnameslen, num_driver_cntrs,
&num_dev_cntrs, &dev_cntr_descs);
if (err)
goto out_unlock;
for (i = 0; i < num_driver_cntrs; i++)
dev_cntr_descs[num_dev_cntrs + i].name = driver_cntr_names[i];
err = init_cntr_names(dd->portcntrnames, dd->portcntrnameslen, 0,
&num_port_cntrs, &port_cntr_descs);
if (err) {
kfree(dev_cntr_descs);
dev_cntr_descs = NULL;
goto out_unlock;
}
cntr_names_initialized = 1;
out_unlock:
mutex_unlock(&cntr_names_lock);
return err;
}
static struct rdma_hw_stats *hfi1_alloc_hw_device_stats(struct ib_device *ibdev)
{
if (init_counters(ibdev))
return NULL;
if (!dev_cntr_descs) {
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
int i, err;
err = init_cntr_names(dd->cntrnames, dd->cntrnameslen,
num_driver_cntrs,
&num_dev_cntrs, &dev_cntr_descs);
if (err)
return NULL;
for (i = 0; i < num_driver_cntrs; i++)
dev_cntr_descs[num_dev_cntrs + i].name =
driver_cntr_names[i];
}
return rdma_alloc_hw_stats_struct(dev_cntr_descs,
num_dev_cntrs + num_driver_cntrs,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
@@ -1691,8 +1669,16 @@ static struct rdma_hw_stats *hfi1_alloc_hw_device_stats(struct ib_device *ibdev)
static struct rdma_hw_stats *hfi_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num)
{
if (init_counters(ibdev))
return NULL;
if (!port_cntr_descs) {
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
int err;
err = init_cntr_names(dd->portcntrnames, dd->portcntrnameslen,
0,
&num_port_cntrs, &port_cntr_descs);
if (err)
return NULL;
}
return rdma_alloc_hw_stats_struct(port_cntr_descs, num_port_cntrs,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -1917,13 +1903,10 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
del_timer_sync(&dev->mem_timer);
verbs_txreq_exit(dev);
mutex_lock(&cntr_names_lock);
kfree(dev_cntr_descs);
kfree(port_cntr_descs);
dev_cntr_descs = NULL;
port_cntr_descs = NULL;
cntr_names_initialized = 0;
mutex_unlock(&cntr_names_lock);
}
void hfi1_cnp_rcv(struct hfi1_packet *packet)

View File

@@ -144,6 +144,7 @@ enum {
HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12),
HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
HNS_ROCE_CAP_FLAG_STASH = BIT(17),
HNS_ROCE_CAP_FLAG_CQE_INLINE = BIT(19),
};
#define HNS_ROCE_DB_TYPE_COUNT 2
@@ -567,21 +568,6 @@ struct hns_roce_mbox_msg {
struct hns_roce_dev;
struct hns_roce_rinl_sge {
void *addr;
u32 len;
};
struct hns_roce_rinl_wqe {
struct hns_roce_rinl_sge *sg_list;
u32 sge_cnt;
};
struct hns_roce_rinl_buf {
struct hns_roce_rinl_wqe *wqe_list;
u32 wqe_cnt;
};
enum {
HNS_ROCE_FLUSH_FLAG = 0,
};
@@ -632,7 +618,6 @@ struct hns_roce_qp {
/* 0: flush needed, 1: unneeded */
unsigned long flush_flag;
struct hns_roce_work flush_work;
struct hns_roce_rinl_buf rq_inl_buf;
struct list_head node; /* all qps are on a list */
struct list_head rq_node; /* all recv qps are on a list */
struct list_head sq_node; /* all send qps are on a list */
@@ -887,7 +872,7 @@ struct hns_roce_hw {
u32 step_idx);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state);
enum ib_qp_state new_state, struct ib_udata *udata);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
void (*dereg_mr)(struct hns_roce_dev *hr_dev);

View File

@@ -821,22 +821,10 @@ static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
u32 wqe_idx, u32 max_sge)
{
struct hns_roce_rinl_sge *sge_list;
void *wqe = NULL;
u32 i;
wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
/* rq support inline data */
if (hr_qp->rq_inl_buf.wqe_cnt) {
sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
for (i = 0; i < wr->num_sge; i++) {
sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
sge_list[i].len = wr->sg_list[i].length;
}
}
}
static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
@@ -2849,7 +2837,7 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
attr->port_num = 1;
attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
IB_QPS_INIT);
IB_QPS_INIT, NULL);
if (ret) {
ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n",
ret);
@@ -2871,7 +2859,7 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num);
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
IB_QPS_RTR);
IB_QPS_RTR, NULL);
hr_dev->loop_idc = loopback;
if (ret) {
ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n",
@@ -2886,7 +2874,7 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT;
attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT;
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_RTR,
IB_QPS_RTS);
IB_QPS_RTS, NULL);
if (ret)
ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n",
ret);
@@ -3730,39 +3718,6 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
return 0;
}
static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
struct hns_roce_qp *qp,
struct ib_wc *wc)
{
struct hns_roce_rinl_sge *sge_list;
u32 wr_num, wr_cnt, sge_num;
u32 sge_cnt, data_len, size;
void *wqe_buf;
wr_num = hr_reg_read(cqe, CQE_WQE_IDX);
wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
data_len = wc->byte_len;
for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
size = min(sge_list[sge_cnt].len, data_len);
memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
data_len -= size;
wqe_buf += size;
}
if (unlikely(data_len)) {
wc->status = IB_WC_LOC_LEN_ERR;
return -EAGAIN;
}
return 0;
}
static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
int num_entries, struct ib_wc *wc)
{
@@ -3974,22 +3929,10 @@ static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
wc->opcode = ib_opcode;
}
static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
struct hns_roce_v2_cqe *cqe)
{
return wc->qp->qp_type != IB_QPT_UD && wc->qp->qp_type != IB_QPT_GSI &&
(hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
hr_reg_read(cqe, CQE_RQ_INLINE);
}
static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
{
struct hns_roce_qp *qp = to_hr_qp(wc->qp);
u32 hr_opcode;
int ib_opcode;
int ret;
wc->byte_len = le32_to_cpu(cqe->byte_cnt);
@@ -4014,12 +3957,6 @@ static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
else
wc->opcode = ib_opcode;
if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
if (unlikely(ret))
return ret;
}
wc->sl = hr_reg_read(cqe, CQE_SL);
wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN);
wc->slid = 0;
@@ -4445,10 +4382,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
upper_32_bits(hr_qp->rdb.dma));
if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
hr_reg_write_bool(context, QPC_RQIE,
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE);
hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
if (ibqp->srq) {
@@ -4639,8 +4572,11 @@ static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
struct hns_roce_v2_qp_context *qpc_mask,
struct ib_udata *udata)
{
struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
struct hns_roce_ucontext, ibucontext);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
@@ -4760,6 +4696,26 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_write(context, QPC_LP_SGEN_INI, 3);
hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI);
if (udata && ibqp->qp_type == IB_QPT_RC &&
(uctx->config & HNS_ROCE_RQ_INLINE_FLAGS)) {
hr_reg_write_bool(context, QPC_RQIE,
hr_dev->caps.flags &
HNS_ROCE_CAP_FLAG_RQ_INLINE);
hr_reg_clear(qpc_mask, QPC_RQIE);
}
if (udata &&
(ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_XRC_TGT) &&
(uctx->config & HNS_ROCE_CQE_INLINE_FLAGS)) {
hr_reg_write_bool(context, QPC_CQEIE,
hr_dev->caps.flags &
HNS_ROCE_CAP_FLAG_CQE_INLINE);
hr_reg_clear(qpc_mask, QPC_CQEIE);
hr_reg_write(context, QPC_CQEIS, 0);
hr_reg_clear(qpc_mask, QPC_CQEIS);
}
return 0;
}
@@ -5107,7 +5063,8 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
enum ib_qp_state cur_state,
enum ib_qp_state new_state,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
struct hns_roce_v2_qp_context *qpc_mask,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
int ret = 0;
@@ -5124,7 +5081,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
modify_qp_init_to_init(ibqp, attr, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
qpc_mask, udata);
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
qpc_mask);
@@ -5329,7 +5286,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp,
static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state)
enum ib_qp_state new_state, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -5352,7 +5309,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
new_state, context, qpc_mask);
new_state, context, qpc_mask, udata);
if (ret)
goto out;
@@ -5555,7 +5512,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
if (modify_qp_is_ok(hr_qp)) {
/* Modify qp to reset before destroying qp */
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET);
hr_qp->state, IB_QPS_RESET, udata);
if (ret)
ibdev_err(ibdev,
"failed to modify QP to RST, ret = %d.\n",

View File

@@ -531,7 +531,8 @@ struct hns_roce_v2_qp_context {
#define QPC_RQ_RTY_TX_ERR QPC_FIELD_LOC(607, 607)
#define QPC_RX_CQN QPC_FIELD_LOC(631, 608)
#define QPC_XRC_QP_TYPE QPC_FIELD_LOC(632, 632)
#define QPC_RSV3 QPC_FIELD_LOC(634, 633)
#define QPC_CQEIE QPC_FIELD_LOC(633, 633)
#define QPC_CQEIS QPC_FIELD_LOC(634, 634)
#define QPC_MIN_RNR_TIME QPC_FIELD_LOC(639, 635)
#define QPC_RQ_PRODUCER_IDX QPC_FIELD_LOC(655, 640)
#define QPC_RQ_CONSUMER_IDX QPC_FIELD_LOC(671, 656)

Some files were not shown because too many files have changed in this diff Show More