You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branches 'amso1100', 'cma', 'cxgb3', 'cxgb4', 'fdr', 'ipath', 'ipoib', 'misc', 'mlx4', 'misc', 'nes', 'qib' and 'xrc' into for-next
This commit is contained in:
@@ -889,6 +889,8 @@ retest:
|
||||
break;
|
||||
case IB_CM_ESTABLISHED:
|
||||
spin_unlock_irq(&cm_id_priv->lock);
|
||||
if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
|
||||
break;
|
||||
ib_send_cm_dreq(cm_id, NULL, 0);
|
||||
goto retest;
|
||||
case IB_CM_DREQ_SENT:
|
||||
@@ -1008,7 +1010,6 @@ static void cm_format_req(struct cm_req_msg *req_msg,
|
||||
req_msg->service_id = param->service_id;
|
||||
req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
|
||||
cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
|
||||
cm_req_set_resp_res(req_msg, param->responder_resources);
|
||||
cm_req_set_init_depth(req_msg, param->initiator_depth);
|
||||
cm_req_set_remote_resp_timeout(req_msg,
|
||||
param->remote_cm_response_timeout);
|
||||
@@ -1017,12 +1018,16 @@ static void cm_format_req(struct cm_req_msg *req_msg,
|
||||
cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
|
||||
cm_req_set_local_resp_timeout(req_msg,
|
||||
param->local_cm_response_timeout);
|
||||
cm_req_set_retry_count(req_msg, param->retry_count);
|
||||
req_msg->pkey = param->primary_path->pkey;
|
||||
cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
|
||||
cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
|
||||
cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
|
||||
cm_req_set_srq(req_msg, param->srq);
|
||||
|
||||
if (param->qp_type != IB_QPT_XRC_INI) {
|
||||
cm_req_set_resp_res(req_msg, param->responder_resources);
|
||||
cm_req_set_retry_count(req_msg, param->retry_count);
|
||||
cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
|
||||
cm_req_set_srq(req_msg, param->srq);
|
||||
}
|
||||
|
||||
if (pri_path->hop_limit <= 1) {
|
||||
req_msg->primary_local_lid = pri_path->slid;
|
||||
@@ -1080,7 +1085,8 @@ static int cm_validate_req_param(struct ib_cm_req_param *param)
|
||||
if (!param->primary_path)
|
||||
return -EINVAL;
|
||||
|
||||
if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
|
||||
if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
|
||||
param->qp_type != IB_QPT_XRC_INI)
|
||||
return -EINVAL;
|
||||
|
||||
if (param->private_data &&
|
||||
@@ -1601,18 +1607,24 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
|
||||
cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
|
||||
rep_msg->local_comm_id = cm_id_priv->id.local_id;
|
||||
rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
|
||||
cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
|
||||
cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
|
||||
rep_msg->resp_resources = param->responder_resources;
|
||||
rep_msg->initiator_depth = param->initiator_depth;
|
||||
cm_rep_set_target_ack_delay(rep_msg,
|
||||
cm_id_priv->av.port->cm_dev->ack_delay);
|
||||
cm_rep_set_failover(rep_msg, param->failover_accepted);
|
||||
cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
|
||||
cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
|
||||
cm_rep_set_srq(rep_msg, param->srq);
|
||||
rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
|
||||
|
||||
if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
|
||||
rep_msg->initiator_depth = param->initiator_depth;
|
||||
cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
|
||||
cm_rep_set_srq(rep_msg, param->srq);
|
||||
cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
|
||||
} else {
|
||||
cm_rep_set_srq(rep_msg, 1);
|
||||
cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
|
||||
}
|
||||
|
||||
if (param->private_data && param->private_data_len)
|
||||
memcpy(rep_msg->private_data, param->private_data,
|
||||
param->private_data_len);
|
||||
@@ -1660,7 +1672,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
|
||||
cm_id_priv->initiator_depth = param->initiator_depth;
|
||||
cm_id_priv->responder_resources = param->responder_resources;
|
||||
cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
|
||||
cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
|
||||
cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
|
||||
|
||||
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||
return ret;
|
||||
@@ -1731,7 +1743,7 @@ error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_send_cm_rtu);
|
||||
|
||||
static void cm_format_rep_event(struct cm_work *work)
|
||||
static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
|
||||
{
|
||||
struct cm_rep_msg *rep_msg;
|
||||
struct ib_cm_rep_event_param *param;
|
||||
@@ -1740,7 +1752,7 @@ static void cm_format_rep_event(struct cm_work *work)
|
||||
param = &work->cm_event.param.rep_rcvd;
|
||||
param->remote_ca_guid = rep_msg->local_ca_guid;
|
||||
param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
|
||||
param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
|
||||
param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
|
||||
param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
|
||||
param->responder_resources = rep_msg->initiator_depth;
|
||||
param->initiator_depth = rep_msg->resp_resources;
|
||||
@@ -1808,7 +1820,7 @@ static int cm_rep_handler(struct cm_work *work)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cm_format_rep_event(work);
|
||||
cm_format_rep_event(work, cm_id_priv->qp_type);
|
||||
|
||||
spin_lock_irq(&cm_id_priv->lock);
|
||||
switch (cm_id_priv->id.state) {
|
||||
@@ -1823,7 +1835,7 @@ static int cm_rep_handler(struct cm_work *work)
|
||||
|
||||
cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
|
||||
cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
|
||||
cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
|
||||
cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
|
||||
|
||||
spin_lock(&cm.lock);
|
||||
/* Check for duplicate REP. */
|
||||
@@ -1850,7 +1862,7 @@ static int cm_rep_handler(struct cm_work *work)
|
||||
|
||||
cm_id_priv->id.state = IB_CM_REP_RCVD;
|
||||
cm_id_priv->id.remote_id = rep_msg->local_comm_id;
|
||||
cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
|
||||
cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
|
||||
cm_id_priv->initiator_depth = rep_msg->resp_resources;
|
||||
cm_id_priv->responder_resources = rep_msg->initiator_depth;
|
||||
cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
|
||||
@@ -3492,7 +3504,8 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
|
||||
qp_attr->path_mtu = cm_id_priv->path_mtu;
|
||||
qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
|
||||
qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
|
||||
if (cm_id_priv->qp_type == IB_QPT_RC) {
|
||||
if (cm_id_priv->qp_type == IB_QPT_RC ||
|
||||
cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
|
||||
*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
|
||||
IB_QP_MIN_RNR_TIMER;
|
||||
qp_attr->max_dest_rd_atomic =
|
||||
@@ -3537,15 +3550,21 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
|
||||
if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
|
||||
*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
|
||||
qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
|
||||
if (cm_id_priv->qp_type == IB_QPT_RC) {
|
||||
*qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
|
||||
IB_QP_RNR_RETRY |
|
||||
switch (cm_id_priv->qp_type) {
|
||||
case IB_QPT_RC:
|
||||
case IB_QPT_XRC_INI:
|
||||
*qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
|
||||
IB_QP_MAX_QP_RD_ATOMIC;
|
||||
qp_attr->timeout = cm_id_priv->av.timeout;
|
||||
qp_attr->retry_cnt = cm_id_priv->retry_count;
|
||||
qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
|
||||
qp_attr->max_rd_atomic =
|
||||
cm_id_priv->initiator_depth;
|
||||
qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
|
||||
/* fall through */
|
||||
case IB_QPT_XRC_TGT:
|
||||
*qp_attr_mask |= IB_QP_TIMEOUT;
|
||||
qp_attr->timeout = cm_id_priv->av.timeout;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (cm_id_priv->alt_av.ah_attr.dlid) {
|
||||
*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2004 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2004, 2011 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
|
||||
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
|
||||
*
|
||||
@@ -86,7 +86,7 @@ struct cm_req_msg {
|
||||
__be16 pkey;
|
||||
/* path MTU:4, RDC exists:1, RNR retry count:3. */
|
||||
u8 offset50;
|
||||
/* max CM Retries:4, SRQ:1, rsvd:3 */
|
||||
/* max CM Retries:4, SRQ:1, extended transport type:3 */
|
||||
u8 offset51;
|
||||
|
||||
__be16 primary_local_lid;
|
||||
@@ -175,6 +175,11 @@ static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
|
||||
switch(transport_type) {
|
||||
case 0: return IB_QPT_RC;
|
||||
case 1: return IB_QPT_UC;
|
||||
case 3:
|
||||
switch (req_msg->offset51 & 0x7) {
|
||||
case 1: return IB_QPT_XRC_TGT;
|
||||
default: return 0;
|
||||
}
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
@@ -188,6 +193,12 @@ static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
|
||||
req_msg->offset40) &
|
||||
0xFFFFFFF9) | 0x2);
|
||||
break;
|
||||
case IB_QPT_XRC_INI:
|
||||
req_msg->offset40 = cpu_to_be32((be32_to_cpu(
|
||||
req_msg->offset40) &
|
||||
0xFFFFFFF9) | 0x6);
|
||||
req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
|
||||
break;
|
||||
default:
|
||||
req_msg->offset40 = cpu_to_be32(be32_to_cpu(
|
||||
req_msg->offset40) &
|
||||
@@ -527,6 +538,23 @@ static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
|
||||
(be32_to_cpu(rep_msg->offset12) & 0x000000FF));
|
||||
}
|
||||
|
||||
static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg)
|
||||
{
|
||||
return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8);
|
||||
}
|
||||
|
||||
static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn)
|
||||
{
|
||||
rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) |
|
||||
(be32_to_cpu(rep_msg->offset16) & 0x000000FF));
|
||||
}
|
||||
|
||||
static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type)
|
||||
{
|
||||
return (qp_type == IB_QPT_XRC_INI) ?
|
||||
cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg);
|
||||
}
|
||||
|
||||
static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
|
||||
{
|
||||
return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
|
||||
|
||||
@@ -81,6 +81,7 @@ static DEFINE_IDR(sdp_ps);
|
||||
static DEFINE_IDR(tcp_ps);
|
||||
static DEFINE_IDR(udp_ps);
|
||||
static DEFINE_IDR(ipoib_ps);
|
||||
static DEFINE_IDR(ib_ps);
|
||||
|
||||
struct cma_device {
|
||||
struct list_head list;
|
||||
@@ -1179,6 +1180,15 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
|
||||
event->param.conn.qp_num = req_data->remote_qpn;
|
||||
}
|
||||
|
||||
static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
|
||||
{
|
||||
return (((ib_event->event == IB_CM_REQ_RECEIVED) ||
|
||||
(ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
|
||||
((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
|
||||
(id->qp_type == IB_QPT_UD)) ||
|
||||
(!id->qp_type));
|
||||
}
|
||||
|
||||
static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
||||
{
|
||||
struct rdma_id_private *listen_id, *conn_id;
|
||||
@@ -1186,13 +1196,16 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
||||
int offset, ret;
|
||||
|
||||
listen_id = cm_id->context;
|
||||
if (!cma_check_req_qp_type(&listen_id->id, ib_event))
|
||||
return -EINVAL;
|
||||
|
||||
if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
|
||||
return -ECONNABORTED;
|
||||
|
||||
memset(&event, 0, sizeof event);
|
||||
offset = cma_user_data_offset(listen_id->id.ps);
|
||||
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
|
||||
if (listen_id->id.qp_type == IB_QPT_UD) {
|
||||
if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
|
||||
conn_id = cma_new_udp_id(&listen_id->id, ib_event);
|
||||
event.param.ud.private_data = ib_event->private_data + offset;
|
||||
event.param.ud.private_data_len =
|
||||
@@ -1328,6 +1341,8 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
|
||||
switch (iw_event->status) {
|
||||
case 0:
|
||||
event.event = RDMA_CM_EVENT_ESTABLISHED;
|
||||
event.param.conn.initiator_depth = iw_event->ird;
|
||||
event.param.conn.responder_resources = iw_event->ord;
|
||||
break;
|
||||
case -ECONNRESET:
|
||||
case -ECONNREFUSED:
|
||||
@@ -1343,6 +1358,8 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
|
||||
break;
|
||||
case IW_CM_EVENT_ESTABLISHED:
|
||||
event.event = RDMA_CM_EVENT_ESTABLISHED;
|
||||
event.param.conn.initiator_depth = iw_event->ird;
|
||||
event.param.conn.responder_resources = iw_event->ord;
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
@@ -1433,8 +1450,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
||||
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
|
||||
event.param.conn.private_data = iw_event->private_data;
|
||||
event.param.conn.private_data_len = iw_event->private_data_len;
|
||||
event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
|
||||
event.param.conn.responder_resources = attr.max_qp_rd_atom;
|
||||
event.param.conn.initiator_depth = iw_event->ird;
|
||||
event.param.conn.responder_resources = iw_event->ord;
|
||||
|
||||
/*
|
||||
* Protect against the user destroying conn_id from another thread
|
||||
@@ -2234,6 +2251,9 @@ static int cma_get_port(struct rdma_id_private *id_priv)
|
||||
case RDMA_PS_IPOIB:
|
||||
ps = &ipoib_ps;
|
||||
break;
|
||||
case RDMA_PS_IB:
|
||||
ps = &ib_ps;
|
||||
break;
|
||||
default:
|
||||
return -EPROTONOSUPPORT;
|
||||
}
|
||||
@@ -2569,7 +2589,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
|
||||
req.service_id = cma_get_service_id(id_priv->id.ps,
|
||||
(struct sockaddr *) &route->addr.dst_addr);
|
||||
req.qp_num = id_priv->qp_num;
|
||||
req.qp_type = IB_QPT_RC;
|
||||
req.qp_type = id_priv->id.qp_type;
|
||||
req.starting_psn = id_priv->seq_num;
|
||||
req.responder_resources = conn_param->responder_resources;
|
||||
req.initiator_depth = conn_param->initiator_depth;
|
||||
@@ -2616,14 +2636,16 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
iw_param.ord = conn_param->initiator_depth;
|
||||
iw_param.ird = conn_param->responder_resources;
|
||||
iw_param.private_data = conn_param->private_data;
|
||||
iw_param.private_data_len = conn_param->private_data_len;
|
||||
if (id_priv->id.qp)
|
||||
if (conn_param) {
|
||||
iw_param.ord = conn_param->initiator_depth;
|
||||
iw_param.ird = conn_param->responder_resources;
|
||||
iw_param.private_data = conn_param->private_data;
|
||||
iw_param.private_data_len = conn_param->private_data_len;
|
||||
iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
|
||||
} else {
|
||||
memset(&iw_param, 0, sizeof iw_param);
|
||||
iw_param.qpn = id_priv->qp_num;
|
||||
else
|
||||
iw_param.qpn = conn_param->qp_num;
|
||||
}
|
||||
ret = iw_cm_connect(cm_id, &iw_param);
|
||||
out:
|
||||
if (ret) {
|
||||
@@ -2765,14 +2787,20 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
||||
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
if (id->qp_type == IB_QPT_UD)
|
||||
ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
|
||||
conn_param->private_data,
|
||||
conn_param->private_data_len);
|
||||
else if (conn_param)
|
||||
ret = cma_accept_ib(id_priv, conn_param);
|
||||
else
|
||||
ret = cma_rep_recv(id_priv);
|
||||
if (id->qp_type == IB_QPT_UD) {
|
||||
if (conn_param)
|
||||
ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
|
||||
conn_param->private_data,
|
||||
conn_param->private_data_len);
|
||||
else
|
||||
ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
|
||||
NULL, 0);
|
||||
} else {
|
||||
if (conn_param)
|
||||
ret = cma_accept_ib(id_priv, conn_param);
|
||||
else
|
||||
ret = cma_rep_recv(id_priv);
|
||||
}
|
||||
break;
|
||||
case RDMA_TRANSPORT_IWARP:
|
||||
ret = cma_accept_iw(id_priv, conn_param);
|
||||
@@ -3460,6 +3488,7 @@ static void __exit cma_cleanup(void)
|
||||
idr_destroy(&tcp_ps);
|
||||
idr_destroy(&udp_ps);
|
||||
idr_destroy(&ipoib_ps);
|
||||
idr_destroy(&ib_ps);
|
||||
}
|
||||
|
||||
module_init(cma_init);
|
||||
|
||||
@@ -1596,6 +1596,9 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
|
||||
mad->mad_hdr.class_version].class;
|
||||
if (!class)
|
||||
goto out;
|
||||
if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >=
|
||||
IB_MGMT_MAX_METHODS)
|
||||
goto out;
|
||||
method = class->method_table[convert_mgmt_class(
|
||||
mad->mad_hdr.mgmt_class)];
|
||||
if (method)
|
||||
|
||||
@@ -185,17 +185,35 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
rate = (25 * attr.active_speed) / 10;
|
||||
|
||||
switch (attr.active_speed) {
|
||||
case 2: speed = " DDR"; break;
|
||||
case 4: speed = " QDR"; break;
|
||||
case 2:
|
||||
speed = " DDR";
|
||||
break;
|
||||
case 4:
|
||||
speed = " QDR";
|
||||
break;
|
||||
case 8:
|
||||
speed = " FDR10";
|
||||
rate = 10;
|
||||
break;
|
||||
case 16:
|
||||
speed = " FDR";
|
||||
rate = 14;
|
||||
break;
|
||||
case 32:
|
||||
speed = " EDR";
|
||||
rate = 25;
|
||||
break;
|
||||
}
|
||||
|
||||
rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed;
|
||||
rate *= ib_width_enum_to_int(attr.active_width);
|
||||
if (rate < 0)
|
||||
return -EINVAL;
|
||||
|
||||
return sprintf(buf, "%d%s Gb/sec (%dX%s)\n",
|
||||
rate / 10, rate % 10 ? ".5" : "",
|
||||
rate, (attr.active_speed == 1) ? ".5" : "",
|
||||
ib_width_enum_to_int(attr.active_width), speed);
|
||||
}
|
||||
|
||||
|
||||
@@ -1122,7 +1122,7 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
|
||||
if (copy_from_user(&hdr, buf, sizeof(hdr)))
|
||||
return -EFAULT;
|
||||
|
||||
if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
|
||||
if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
|
||||
return -EINVAL;
|
||||
|
||||
if (hdr.in + sizeof(hdr) > len)
|
||||
|
||||
@@ -276,7 +276,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
|
||||
ucma_set_event_context(ctx, event, uevent);
|
||||
uevent->resp.event = event->event;
|
||||
uevent->resp.status = event->status;
|
||||
if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
|
||||
if (cm_id->qp_type == IB_QPT_UD)
|
||||
ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
|
||||
else
|
||||
ucma_copy_conn_event(&uevent->resp.param.conn,
|
||||
@@ -377,6 +377,9 @@ static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_
|
||||
case RDMA_PS_IPOIB:
|
||||
*qp_type = IB_QPT_UD;
|
||||
return 0;
|
||||
case RDMA_PS_IB:
|
||||
*qp_type = cmd->qp_type;
|
||||
return 0;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -1270,7 +1273,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
|
||||
if (copy_from_user(&hdr, buf, sizeof(hdr)))
|
||||
return -EFAULT;
|
||||
|
||||
if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
|
||||
if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
|
||||
return -EINVAL;
|
||||
|
||||
if (hdr.in + sizeof(hdr) > len)
|
||||
|
||||
@@ -458,8 +458,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (packet->mad.hdr.id < 0 ||
|
||||
packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
|
||||
if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
@@ -703,7 +702,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
|
||||
mutex_lock(&file->port->file_mutex);
|
||||
mutex_lock(&file->mutex);
|
||||
|
||||
if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
|
||||
if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -76,6 +76,8 @@ struct ib_uverbs_device {
|
||||
struct ib_device *ib_dev;
|
||||
int devnum;
|
||||
struct cdev cdev;
|
||||
struct rb_root xrcd_tree;
|
||||
struct mutex xrcd_tree_mutex;
|
||||
};
|
||||
|
||||
struct ib_uverbs_event_file {
|
||||
@@ -120,6 +122,16 @@ struct ib_uevent_object {
|
||||
u32 events_reported;
|
||||
};
|
||||
|
||||
struct ib_uxrcd_object {
|
||||
struct ib_uobject uobject;
|
||||
atomic_t refcnt;
|
||||
};
|
||||
|
||||
struct ib_usrq_object {
|
||||
struct ib_uevent_object uevent;
|
||||
struct ib_uxrcd_object *uxrcd;
|
||||
};
|
||||
|
||||
struct ib_uqp_object {
|
||||
struct ib_uevent_object uevent;
|
||||
struct list_head mcast_list;
|
||||
@@ -142,6 +154,7 @@ extern struct idr ib_uverbs_ah_idr;
|
||||
extern struct idr ib_uverbs_cq_idr;
|
||||
extern struct idr ib_uverbs_qp_idr;
|
||||
extern struct idr ib_uverbs_srq_idr;
|
||||
extern struct idr ib_uverbs_xrcd_idr;
|
||||
|
||||
void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
|
||||
|
||||
@@ -161,6 +174,7 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
|
||||
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
|
||||
void ib_uverbs_event_handler(struct ib_event_handler *handler,
|
||||
struct ib_event *event);
|
||||
void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd);
|
||||
|
||||
#define IB_UVERBS_DECLARE_CMD(name) \
|
||||
ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
|
||||
@@ -181,6 +195,7 @@ IB_UVERBS_DECLARE_CMD(poll_cq);
|
||||
IB_UVERBS_DECLARE_CMD(req_notify_cq);
|
||||
IB_UVERBS_DECLARE_CMD(destroy_cq);
|
||||
IB_UVERBS_DECLARE_CMD(create_qp);
|
||||
IB_UVERBS_DECLARE_CMD(open_qp);
|
||||
IB_UVERBS_DECLARE_CMD(query_qp);
|
||||
IB_UVERBS_DECLARE_CMD(modify_qp);
|
||||
IB_UVERBS_DECLARE_CMD(destroy_qp);
|
||||
@@ -195,5 +210,8 @@ IB_UVERBS_DECLARE_CMD(create_srq);
|
||||
IB_UVERBS_DECLARE_CMD(modify_srq);
|
||||
IB_UVERBS_DECLARE_CMD(query_srq);
|
||||
IB_UVERBS_DECLARE_CMD(destroy_srq);
|
||||
IB_UVERBS_DECLARE_CMD(create_xsrq);
|
||||
IB_UVERBS_DECLARE_CMD(open_xrcd);
|
||||
IB_UVERBS_DECLARE_CMD(close_xrcd);
|
||||
|
||||
#endif /* UVERBS_H */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -72,6 +72,7 @@ DEFINE_IDR(ib_uverbs_ah_idr);
|
||||
DEFINE_IDR(ib_uverbs_cq_idr);
|
||||
DEFINE_IDR(ib_uverbs_qp_idr);
|
||||
DEFINE_IDR(ib_uverbs_srq_idr);
|
||||
DEFINE_IDR(ib_uverbs_xrcd_idr);
|
||||
|
||||
static DEFINE_SPINLOCK(map_lock);
|
||||
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
|
||||
@@ -107,6 +108,10 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
|
||||
[IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
|
||||
[IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq,
|
||||
[IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
|
||||
[IB_USER_VERBS_CMD_OPEN_XRCD] = ib_uverbs_open_xrcd,
|
||||
[IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd,
|
||||
[IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq,
|
||||
[IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp
|
||||
};
|
||||
|
||||
static void ib_uverbs_add_one(struct ib_device *device);
|
||||
@@ -202,8 +207,12 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
|
||||
container_of(uobj, struct ib_uqp_object, uevent.uobject);
|
||||
|
||||
idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
|
||||
ib_uverbs_detach_umcast(qp, uqp);
|
||||
ib_destroy_qp(qp);
|
||||
if (qp != qp->real_qp) {
|
||||
ib_close_qp(qp);
|
||||
} else {
|
||||
ib_uverbs_detach_umcast(qp, uqp);
|
||||
ib_destroy_qp(qp);
|
||||
}
|
||||
ib_uverbs_release_uevent(file, &uqp->uevent);
|
||||
kfree(uqp);
|
||||
}
|
||||
@@ -241,6 +250,18 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
|
||||
kfree(uobj);
|
||||
}
|
||||
|
||||
mutex_lock(&file->device->xrcd_tree_mutex);
|
||||
list_for_each_entry_safe(uobj, tmp, &context->xrcd_list, list) {
|
||||
struct ib_xrcd *xrcd = uobj->object;
|
||||
struct ib_uxrcd_object *uxrcd =
|
||||
container_of(uobj, struct ib_uxrcd_object, uobject);
|
||||
|
||||
idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
|
||||
ib_uverbs_dealloc_xrcd(file->device, xrcd);
|
||||
kfree(uxrcd);
|
||||
}
|
||||
mutex_unlock(&file->device->xrcd_tree_mutex);
|
||||
|
||||
list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) {
|
||||
struct ib_pd *pd = uobj->object;
|
||||
|
||||
@@ -557,8 +578,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
|
||||
if (hdr.in_words * 4 != count)
|
||||
return -EINVAL;
|
||||
|
||||
if (hdr.command < 0 ||
|
||||
hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
|
||||
if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
|
||||
!uverbs_cmd_table[hdr.command])
|
||||
return -EINVAL;
|
||||
|
||||
@@ -741,6 +761,8 @@ static void ib_uverbs_add_one(struct ib_device *device)
|
||||
|
||||
kref_init(&uverbs_dev->ref);
|
||||
init_completion(&uverbs_dev->comp);
|
||||
uverbs_dev->xrcd_tree = RB_ROOT;
|
||||
mutex_init(&uverbs_dev->xrcd_tree_mutex);
|
||||
|
||||
spin_lock(&map_lock);
|
||||
devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
|
||||
|
||||
+348
-28
File diff suppressed because it is too large
Load Diff
@@ -288,6 +288,11 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
|
||||
cm_event.private_data_len =
|
||||
be32_to_cpu(req->private_data_length);
|
||||
cm_event.private_data = req->private_data;
|
||||
/*
|
||||
* Until ird/ord negotiation via MPAv2 support is added, send
|
||||
* max supported values
|
||||
*/
|
||||
cm_event.ird = cm_event.ord = 128;
|
||||
|
||||
if (cm_id->event_handler)
|
||||
cm_id->event_handler(cm_id, &cm_event);
|
||||
|
||||
@@ -183,6 +183,11 @@ static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
|
||||
case IW_CM_EVENT_ESTABLISHED:
|
||||
c2_set_qp_state(req->qp,
|
||||
C2_QP_STATE_RTS);
|
||||
/*
|
||||
* Until ird/ord negotiation via MPAv2 support is added, send
|
||||
* max supported values
|
||||
*/
|
||||
cm_event.ird = cm_event.ord = 128;
|
||||
case IW_CM_EVENT_CLOSE:
|
||||
|
||||
/*
|
||||
|
||||
@@ -753,6 +753,11 @@ static void connect_request_upcall(struct iwch_ep *ep)
|
||||
event.private_data_len = ep->plen;
|
||||
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
|
||||
event.provider_data = ep;
|
||||
/*
|
||||
* Until ird/ord negotiation via MPAv2 support is added, send max
|
||||
* supported values
|
||||
*/
|
||||
event.ird = event.ord = 8;
|
||||
if (state_read(&ep->parent_ep->com) != DEAD) {
|
||||
get_ep(&ep->com);
|
||||
ep->parent_ep->com.cm_id->event_handler(
|
||||
@@ -770,6 +775,11 @@ static void established_upcall(struct iwch_ep *ep)
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
memset(&event, 0, sizeof(event));
|
||||
event.event = IW_CM_EVENT_ESTABLISHED;
|
||||
/*
|
||||
* Until ird/ord negotiation via MPAv2 support is added, send max
|
||||
* supported values
|
||||
*/
|
||||
event.ird = event.ord = 8;
|
||||
if (ep->com.cm_id) {
|
||||
PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
|
||||
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
|
||||
|
||||
@@ -46,6 +46,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
|
||||
struct ib_event event;
|
||||
struct iwch_qp_attributes attrs;
|
||||
struct iwch_qp *qhp;
|
||||
unsigned long flag;
|
||||
|
||||
spin_lock(&rnicp->lock);
|
||||
qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
|
||||
@@ -94,7 +95,9 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
|
||||
if (qhp->ibqp.event_handler)
|
||||
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
|
||||
|
||||
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||
|
||||
if (atomic_dec_and_test(&qhp->refcnt))
|
||||
wake_up(&qhp->wait);
|
||||
@@ -107,6 +110,7 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
|
||||
struct iwch_cq *chp;
|
||||
struct iwch_qp *qhp;
|
||||
u32 cqid = RSPQ_CQID(rsp_msg);
|
||||
unsigned long flag;
|
||||
|
||||
rnicp = (struct iwch_dev *) rdev_p->ulp;
|
||||
spin_lock(&rnicp->lock);
|
||||
@@ -170,7 +174,9 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
|
||||
*/
|
||||
if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
|
||||
dst_confirm(qhp->ep->dst);
|
||||
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||
break;
|
||||
|
||||
case TPT_ERR_STAG:
|
||||
|
||||
@@ -190,6 +190,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
|
||||
chp->rhp = rhp;
|
||||
chp->ibcq.cqe = 1 << chp->cq.size_log2;
|
||||
spin_lock_init(&chp->lock);
|
||||
spin_lock_init(&chp->comp_handler_lock);
|
||||
atomic_set(&chp->refcnt, 1);
|
||||
init_waitqueue_head(&chp->wait);
|
||||
if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
|
||||
|
||||
@@ -103,6 +103,7 @@ struct iwch_cq {
|
||||
struct iwch_dev *rhp;
|
||||
struct t3_cq cq;
|
||||
spinlock_t lock;
|
||||
spinlock_t comp_handler_lock;
|
||||
atomic_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
u32 __user *user_rptr_addr;
|
||||
|
||||
@@ -822,8 +822,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
|
||||
flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
|
||||
spin_unlock(&qhp->lock);
|
||||
spin_unlock_irqrestore(&rchp->lock, *flag);
|
||||
if (flushed)
|
||||
if (flushed) {
|
||||
spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
|
||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
|
||||
}
|
||||
|
||||
/* locking hierarchy: cq lock first, then qp lock. */
|
||||
spin_lock_irqsave(&schp->lock, *flag);
|
||||
@@ -833,8 +836,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
|
||||
flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
|
||||
spin_unlock(&qhp->lock);
|
||||
spin_unlock_irqrestore(&schp->lock, *flag);
|
||||
if (flushed)
|
||||
if (flushed) {
|
||||
spin_lock_irqsave(&schp->comp_handler_lock, *flag);
|
||||
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
|
||||
}
|
||||
|
||||
/* deref */
|
||||
if (atomic_dec_and_test(&qhp->refcnt))
|
||||
@@ -853,11 +859,15 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
|
||||
if (qhp->ibqp.uobject) {
|
||||
cxio_set_wq_in_error(&qhp->wq);
|
||||
cxio_set_cq_in_error(&rchp->cq);
|
||||
spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
|
||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
|
||||
if (schp != rchp) {
|
||||
cxio_set_cq_in_error(&schp->cq);
|
||||
spin_lock_irqsave(&schp->comp_handler_lock, *flag);
|
||||
(*schp->ibcq.comp_handler)(&schp->ibcq,
|
||||
schp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user