You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'wr-cleanup' into k.o/for-4.4
This commit is contained in:
@@ -126,7 +126,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
|
||||
mad_send_wr = container_of(send_buf,
|
||||
struct ib_mad_send_wr_private,
|
||||
send_buf);
|
||||
mad_send_wr->send_wr.wr.ud.port_num = port_num;
|
||||
mad_send_wr->send_wr.port_num = port_num;
|
||||
}
|
||||
|
||||
if (ib_post_send_mad(send_buf, NULL)) {
|
||||
|
||||
@@ -752,7 +752,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
||||
struct ib_device *device = mad_agent_priv->agent.device;
|
||||
u8 port_num;
|
||||
struct ib_wc mad_wc;
|
||||
struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
|
||||
struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
|
||||
size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
|
||||
u16 out_mad_pkey_index = 0;
|
||||
u16 drslid;
|
||||
@@ -761,7 +761,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
||||
|
||||
if (rdma_cap_ib_switch(device) &&
|
||||
smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
|
||||
port_num = send_wr->wr.ud.port_num;
|
||||
port_num = send_wr->port_num;
|
||||
else
|
||||
port_num = mad_agent_priv->agent.port_num;
|
||||
|
||||
@@ -832,9 +832,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
||||
}
|
||||
|
||||
build_smp_wc(mad_agent_priv->agent.qp,
|
||||
send_wr->wr_id, drslid,
|
||||
send_wr->wr.ud.pkey_index,
|
||||
send_wr->wr.ud.port_num, &mad_wc);
|
||||
send_wr->wr.wr_id, drslid,
|
||||
send_wr->pkey_index,
|
||||
send_wr->port_num, &mad_wc);
|
||||
|
||||
if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
|
||||
mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
|
||||
@@ -894,7 +894,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
||||
|
||||
local->mad_send_wr = mad_send_wr;
|
||||
if (opa) {
|
||||
local->mad_send_wr->send_wr.wr.ud.pkey_index = out_mad_pkey_index;
|
||||
local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
|
||||
local->return_wc_byte_len = mad_size;
|
||||
}
|
||||
/* Reference MAD agent until send side of local completion handled */
|
||||
@@ -1039,14 +1039,14 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
|
||||
|
||||
mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
|
||||
|
||||
mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
|
||||
mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
|
||||
mad_send_wr->send_wr.num_sge = 2;
|
||||
mad_send_wr->send_wr.opcode = IB_WR_SEND;
|
||||
mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
|
||||
mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
|
||||
mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
|
||||
mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
|
||||
mad_send_wr->send_wr.wr.wr_id = (unsigned long) mad_send_wr;
|
||||
mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
|
||||
mad_send_wr->send_wr.wr.num_sge = 2;
|
||||
mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
|
||||
mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
|
||||
mad_send_wr->send_wr.remote_qpn = remote_qpn;
|
||||
mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
|
||||
mad_send_wr->send_wr.pkey_index = pkey_index;
|
||||
|
||||
if (rmpp_active) {
|
||||
ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
|
||||
@@ -1151,7 +1151,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
|
||||
|
||||
/* Set WR ID to find mad_send_wr upon completion */
|
||||
qp_info = mad_send_wr->mad_agent_priv->qp_info;
|
||||
mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
|
||||
mad_send_wr->send_wr.wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
|
||||
mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
|
||||
|
||||
mad_agent = mad_send_wr->send_buf.mad_agent;
|
||||
@@ -1179,7 +1179,7 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
|
||||
|
||||
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
|
||||
if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
|
||||
ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
|
||||
ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
|
||||
&bad_send_wr);
|
||||
list = &qp_info->send_queue.list;
|
||||
} else {
|
||||
@@ -1244,7 +1244,7 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
|
||||
* request associated with the completion
|
||||
*/
|
||||
next_send_buf = send_buf->next;
|
||||
mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
|
||||
mad_send_wr->send_wr.ah = send_buf->ah;
|
||||
|
||||
if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
|
||||
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
|
||||
@@ -2457,7 +2457,7 @@ retry:
|
||||
ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
|
||||
|
||||
if (queued_send_wr) {
|
||||
ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
|
||||
ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
|
||||
&bad_send_wr);
|
||||
if (ret) {
|
||||
dev_err(&port_priv->device->dev,
|
||||
@@ -2515,7 +2515,7 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv,
|
||||
struct ib_send_wr *bad_send_wr;
|
||||
|
||||
mad_send_wr->retry = 0;
|
||||
ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr,
|
||||
ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
|
||||
&bad_send_wr);
|
||||
if (ret)
|
||||
ib_mad_send_done_handler(port_priv, wc);
|
||||
@@ -2713,7 +2713,7 @@ static void local_completions(struct work_struct *work)
|
||||
build_smp_wc(recv_mad_agent->agent.qp,
|
||||
(unsigned long) local->mad_send_wr,
|
||||
be16_to_cpu(IB_LID_PERMISSIVE),
|
||||
local->mad_send_wr->send_wr.wr.ud.pkey_index,
|
||||
local->mad_send_wr->send_wr.pkey_index,
|
||||
recv_mad_agent->agent.port_num, &wc);
|
||||
|
||||
local->mad_priv->header.recv_wc.wc = &wc;
|
||||
|
||||
@@ -123,7 +123,7 @@ struct ib_mad_send_wr_private {
|
||||
struct ib_mad_send_buf send_buf;
|
||||
u64 header_mapping;
|
||||
u64 payload_mapping;
|
||||
struct ib_send_wr send_wr;
|
||||
struct ib_ud_wr send_wr;
|
||||
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
|
||||
__be64 tid;
|
||||
unsigned long timeout;
|
||||
|
||||
@@ -2427,6 +2427,12 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
|
||||
return in_len;
|
||||
}
|
||||
|
||||
static void *alloc_wr(size_t wr_size, __u32 num_sge)
|
||||
{
|
||||
return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
|
||||
num_sge * sizeof (struct ib_sge), GFP_KERNEL);
|
||||
};
|
||||
|
||||
ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
|
||||
struct ib_device *ib_dev,
|
||||
const char __user *buf, int in_len,
|
||||
@@ -2475,14 +2481,83 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
|
||||
user_wr->num_sge * sizeof (struct ib_sge),
|
||||
GFP_KERNEL);
|
||||
if (!next) {
|
||||
ret = -ENOMEM;
|
||||
if (is_ud) {
|
||||
struct ib_ud_wr *ud;
|
||||
|
||||
if (user_wr->opcode != IB_WR_SEND &&
|
||||
user_wr->opcode != IB_WR_SEND_WITH_IMM) {
|
||||
ret = -EINVAL;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
ud = alloc_wr(sizeof(*ud), user_wr->num_sge);
|
||||
if (!ud) {
|
||||
ret = -ENOMEM;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext);
|
||||
if (!ud->ah) {
|
||||
kfree(ud);
|
||||
ret = -EINVAL;
|
||||
goto out_put;
|
||||
}
|
||||
ud->remote_qpn = user_wr->wr.ud.remote_qpn;
|
||||
ud->remote_qkey = user_wr->wr.ud.remote_qkey;
|
||||
|
||||
next = &ud->wr;
|
||||
} else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
|
||||
user_wr->opcode == IB_WR_RDMA_WRITE ||
|
||||
user_wr->opcode == IB_WR_RDMA_READ) {
|
||||
struct ib_rdma_wr *rdma;
|
||||
|
||||
rdma = alloc_wr(sizeof(*rdma), user_wr->num_sge);
|
||||
if (!rdma) {
|
||||
ret = -ENOMEM;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
rdma->remote_addr = user_wr->wr.rdma.remote_addr;
|
||||
rdma->rkey = user_wr->wr.rdma.rkey;
|
||||
|
||||
next = &rdma->wr;
|
||||
} else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
|
||||
user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
|
||||
struct ib_atomic_wr *atomic;
|
||||
|
||||
atomic = alloc_wr(sizeof(*atomic), user_wr->num_sge);
|
||||
if (!atomic) {
|
||||
ret = -ENOMEM;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
atomic->remote_addr = user_wr->wr.atomic.remote_addr;
|
||||
atomic->compare_add = user_wr->wr.atomic.compare_add;
|
||||
atomic->swap = user_wr->wr.atomic.swap;
|
||||
atomic->rkey = user_wr->wr.atomic.rkey;
|
||||
|
||||
next = &atomic->wr;
|
||||
} else if (user_wr->opcode == IB_WR_SEND ||
|
||||
user_wr->opcode == IB_WR_SEND_WITH_IMM ||
|
||||
user_wr->opcode == IB_WR_SEND_WITH_INV) {
|
||||
next = alloc_wr(sizeof(*next), user_wr->num_sge);
|
||||
if (!next) {
|
||||
ret = -ENOMEM;
|
||||
goto out_put;
|
||||
}
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
|
||||
user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
|
||||
next->ex.imm_data =
|
||||
(__be32 __force) user_wr->ex.imm_data;
|
||||
} else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
|
||||
next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
|
||||
}
|
||||
|
||||
if (!last)
|
||||
wr = next;
|
||||
else
|
||||
@@ -2495,60 +2570,6 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
|
||||
next->opcode = user_wr->opcode;
|
||||
next->send_flags = user_wr->send_flags;
|
||||
|
||||
if (is_ud) {
|
||||
if (next->opcode != IB_WR_SEND &&
|
||||
next->opcode != IB_WR_SEND_WITH_IMM) {
|
||||
ret = -EINVAL;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
|
||||
file->ucontext);
|
||||
if (!next->wr.ud.ah) {
|
||||
ret = -EINVAL;
|
||||
goto out_put;
|
||||
}
|
||||
next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
|
||||
next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
|
||||
if (next->opcode == IB_WR_SEND_WITH_IMM)
|
||||
next->ex.imm_data =
|
||||
(__be32 __force) user_wr->ex.imm_data;
|
||||
} else {
|
||||
switch (next->opcode) {
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
next->ex.imm_data =
|
||||
(__be32 __force) user_wr->ex.imm_data;
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_READ:
|
||||
next->wr.rdma.remote_addr =
|
||||
user_wr->wr.rdma.remote_addr;
|
||||
next->wr.rdma.rkey =
|
||||
user_wr->wr.rdma.rkey;
|
||||
break;
|
||||
case IB_WR_SEND_WITH_IMM:
|
||||
next->ex.imm_data =
|
||||
(__be32 __force) user_wr->ex.imm_data;
|
||||
break;
|
||||
case IB_WR_SEND_WITH_INV:
|
||||
next->ex.invalidate_rkey =
|
||||
user_wr->ex.invalidate_rkey;
|
||||
break;
|
||||
case IB_WR_ATOMIC_CMP_AND_SWP:
|
||||
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
||||
next->wr.atomic.remote_addr =
|
||||
user_wr->wr.atomic.remote_addr;
|
||||
next->wr.atomic.compare_add =
|
||||
user_wr->wr.atomic.compare_add;
|
||||
next->wr.atomic.swap = user_wr->wr.atomic.swap;
|
||||
next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
|
||||
case IB_WR_SEND:
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto out_put;
|
||||
}
|
||||
}
|
||||
|
||||
if (next->num_sge) {
|
||||
next->sg_list = (void *) next +
|
||||
ALIGN(sizeof *next, sizeof (struct ib_sge));
|
||||
@@ -2582,8 +2603,8 @@ out_put:
|
||||
put_qp_read(qp);
|
||||
|
||||
while (wr) {
|
||||
if (is_ud && wr->wr.ud.ah)
|
||||
put_ah_read(wr->wr.ud.ah);
|
||||
if (is_ud && ud_wr(wr)->ah)
|
||||
put_ah_read(ud_wr(wr)->ah);
|
||||
next = wr->next;
|
||||
kfree(wr);
|
||||
wr = next;
|
||||
|
||||
@@ -95,8 +95,8 @@ static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
|
||||
wqe->write.reserved[0] = 0;
|
||||
wqe->write.reserved[1] = 0;
|
||||
wqe->write.reserved[2] = 0;
|
||||
wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
|
||||
wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
|
||||
wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
|
||||
wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
|
||||
|
||||
if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
|
||||
plen = 4;
|
||||
@@ -137,8 +137,8 @@ static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
|
||||
wqe->read.local_inv = 0;
|
||||
wqe->read.reserved[0] = 0;
|
||||
wqe->read.reserved[1] = 0;
|
||||
wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
|
||||
wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr);
|
||||
wqe->read.rem_stag = cpu_to_be32(rdma_wr(wr)->rkey);
|
||||
wqe->read.rem_to = cpu_to_be64(rdma_wr(wr)->remote_addr);
|
||||
wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
|
||||
wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
|
||||
wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
|
||||
@@ -146,27 +146,27 @@ static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr,
|
||||
static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *send_wr,
|
||||
u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
|
||||
{
|
||||
struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr);
|
||||
int i;
|
||||
__be64 *p;
|
||||
|
||||
if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH)
|
||||
if (wr->page_list_len > T3_MAX_FASTREG_DEPTH)
|
||||
return -EINVAL;
|
||||
*wr_cnt = 1;
|
||||
wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
|
||||
wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length);
|
||||
wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
|
||||
wqe->fastreg.va_base_lo_fbo =
|
||||
cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff);
|
||||
wqe->fastreg.stag = cpu_to_be32(wr->rkey);
|
||||
wqe->fastreg.len = cpu_to_be32(wr->length);
|
||||
wqe->fastreg.va_base_hi = cpu_to_be32(wr->iova_start >> 32);
|
||||
wqe->fastreg.va_base_lo_fbo = cpu_to_be32(wr->iova_start & 0xffffffff);
|
||||
wqe->fastreg.page_type_perms = cpu_to_be32(
|
||||
V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) |
|
||||
V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) |
|
||||
V_FR_PAGE_COUNT(wr->page_list_len) |
|
||||
V_FR_PAGE_SIZE(wr->page_shift-12) |
|
||||
V_FR_TYPE(TPT_VATO) |
|
||||
V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags)));
|
||||
V_FR_PERMS(iwch_ib_to_tpt_access(wr->access_flags)));
|
||||
p = &wqe->fastreg.pbl_addrs[0];
|
||||
for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) {
|
||||
for (i = 0; i < wr->page_list_len; i++, p++) {
|
||||
|
||||
/* If we need a 2nd WR, then set it up */
|
||||
if (i == T3_MAX_FASTREG_FRAG) {
|
||||
@@ -175,14 +175,14 @@ static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr,
|
||||
Q_PTR2IDX((wq->wptr+1), wq->size_log2));
|
||||
build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
|
||||
Q_GENBIT(wq->wptr + 1, wq->size_log2),
|
||||
0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG,
|
||||
0, 1 + wr->page_list_len - T3_MAX_FASTREG_FRAG,
|
||||
T3_EOP);
|
||||
|
||||
p = &wqe->pbl_frag.pbl_addrs[0];
|
||||
}
|
||||
*p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
|
||||
*p = cpu_to_be64((u64)wr->page_list->page_list[i]);
|
||||
}
|
||||
*flit_cnt = 5 + wr->wr.fast_reg.page_list_len;
|
||||
*flit_cnt = 5 + wr->page_list_len;
|
||||
if (*flit_cnt > 15)
|
||||
*flit_cnt = 15;
|
||||
return 0;
|
||||
|
||||
@@ -528,8 +528,8 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
|
||||
if (wr->num_sge > T4_MAX_SEND_SGE)
|
||||
return -EINVAL;
|
||||
wqe->write.r2 = 0;
|
||||
wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
|
||||
wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
|
||||
wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
|
||||
wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
|
||||
if (wr->num_sge) {
|
||||
if (wr->send_flags & IB_SEND_INLINE) {
|
||||
ret = build_immd(sq, wqe->write.u.immd_src, wr,
|
||||
@@ -566,10 +566,10 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
|
||||
if (wr->num_sge > 1)
|
||||
return -EINVAL;
|
||||
if (wr->num_sge) {
|
||||
wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
|
||||
wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
|
||||
wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
|
||||
wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
|
||||
>> 32));
|
||||
wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
|
||||
wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
|
||||
wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
|
||||
wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
|
||||
wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
|
||||
@@ -606,39 +606,36 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
|
||||
}
|
||||
|
||||
static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
|
||||
struct ib_send_wr *wr, u8 *len16, u8 t5dev)
|
||||
struct ib_send_wr *send_wr, u8 *len16, u8 t5dev)
|
||||
{
|
||||
|
||||
struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr);
|
||||
struct fw_ri_immd *imdp;
|
||||
__be64 *p;
|
||||
int i;
|
||||
int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
|
||||
int pbllen = roundup(wr->page_list_len * sizeof(u64), 32);
|
||||
int rem;
|
||||
|
||||
if (wr->wr.fast_reg.page_list_len >
|
||||
t4_max_fr_depth(use_dsgl))
|
||||
if (wr->page_list_len > t4_max_fr_depth(use_dsgl))
|
||||
return -EINVAL;
|
||||
|
||||
wqe->fr.qpbinde_to_dcacpu = 0;
|
||||
wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
|
||||
wqe->fr.pgsz_shift = wr->page_shift - 12;
|
||||
wqe->fr.addr_type = FW_RI_VA_BASED_TO;
|
||||
wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
|
||||
wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access_flags);
|
||||
wqe->fr.len_hi = 0;
|
||||
wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
|
||||
wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
|
||||
wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
|
||||
wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
|
||||
0xffffffff);
|
||||
wqe->fr.len_lo = cpu_to_be32(wr->length);
|
||||
wqe->fr.stag = cpu_to_be32(wr->rkey);
|
||||
wqe->fr.va_hi = cpu_to_be32(wr->iova_start >> 32);
|
||||
wqe->fr.va_lo_fbo = cpu_to_be32(wr->iova_start & 0xffffffff);
|
||||
|
||||
if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
|
||||
struct c4iw_fr_page_list *c4pl =
|
||||
to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
|
||||
to_c4iw_fr_page_list(wr->page_list);
|
||||
struct fw_ri_dsgl *sglp;
|
||||
|
||||
for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
|
||||
wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
|
||||
cpu_to_be64((u64)
|
||||
wr->wr.fast_reg.page_list->page_list[i]);
|
||||
for (i = 0; i < wr->page_list_len; i++) {
|
||||
wr->page_list->page_list[i] = (__force u64)
|
||||
cpu_to_be64((u64)wr->page_list->page_list[i]);
|
||||
}
|
||||
|
||||
sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
|
||||
@@ -657,9 +654,8 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
|
||||
imdp->immdlen = cpu_to_be32(pbllen);
|
||||
p = (__be64 *)(imdp + 1);
|
||||
rem = pbllen;
|
||||
for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
|
||||
*p = cpu_to_be64(
|
||||
(u64)wr->wr.fast_reg.page_list->page_list[i]);
|
||||
for (i = 0; i < wr->page_list_len; i++) {
|
||||
*p = cpu_to_be64((u64)wr->page_list->page_list[i]);
|
||||
rem -= sizeof(*p);
|
||||
if (++p == (__be64 *)&sq->queue[sq->size])
|
||||
p = (__be64 *)sq->queue;
|
||||
|
||||
@@ -457,7 +457,8 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
struct ib_grh *grh, struct ib_mad *mad)
|
||||
{
|
||||
struct ib_sge list;
|
||||
struct ib_send_wr wr, *bad_wr;
|
||||
struct ib_ud_wr wr;
|
||||
struct ib_send_wr *bad_wr;
|
||||
struct mlx4_ib_demux_pv_ctx *tun_ctx;
|
||||
struct mlx4_ib_demux_pv_qp *tun_qp;
|
||||
struct mlx4_rcv_tunnel_mad *tun_mad;
|
||||
@@ -582,18 +583,18 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
list.length = sizeof (struct mlx4_rcv_tunnel_mad);
|
||||
list.lkey = tun_ctx->pd->local_dma_lkey;
|
||||
|
||||
wr.wr.ud.ah = ah;
|
||||
wr.wr.ud.port_num = port;
|
||||
wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
|
||||
wr.wr.ud.remote_qpn = dqpn;
|
||||
wr.next = NULL;
|
||||
wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
|
||||
wr.sg_list = &list;
|
||||
wr.num_sge = 1;
|
||||
wr.opcode = IB_WR_SEND;
|
||||
wr.send_flags = IB_SEND_SIGNALED;
|
||||
wr.ah = ah;
|
||||
wr.port_num = port;
|
||||
wr.remote_qkey = IB_QP_SET_QKEY;
|
||||
wr.remote_qpn = dqpn;
|
||||
wr.wr.next = NULL;
|
||||
wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
|
||||
wr.wr.sg_list = &list;
|
||||
wr.wr.num_sge = 1;
|
||||
wr.wr.opcode = IB_WR_SEND;
|
||||
wr.wr.send_flags = IB_SEND_SIGNALED;
|
||||
|
||||
ret = ib_post_send(src_qp, &wr, &bad_wr);
|
||||
ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
|
||||
out:
|
||||
if (ret)
|
||||
ib_destroy_ah(ah);
|
||||
@@ -1186,7 +1187,8 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
|
||||
{
|
||||
struct ib_sge list;
|
||||
struct ib_send_wr wr, *bad_wr;
|
||||
struct ib_ud_wr wr;
|
||||
struct ib_send_wr *bad_wr;
|
||||
struct mlx4_ib_demux_pv_ctx *sqp_ctx;
|
||||
struct mlx4_ib_demux_pv_qp *sqp;
|
||||
struct mlx4_mad_snd_buf *sqp_mad;
|
||||
@@ -1257,17 +1259,17 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
list.length = sizeof (struct mlx4_mad_snd_buf);
|
||||
list.lkey = sqp_ctx->pd->local_dma_lkey;
|
||||
|
||||
wr.wr.ud.ah = ah;
|
||||
wr.wr.ud.port_num = port;
|
||||
wr.wr.ud.pkey_index = wire_pkey_ix;
|
||||
wr.wr.ud.remote_qkey = qkey;
|
||||
wr.wr.ud.remote_qpn = remote_qpn;
|
||||
wr.next = NULL;
|
||||
wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
|
||||
wr.sg_list = &list;
|
||||
wr.num_sge = 1;
|
||||
wr.opcode = IB_WR_SEND;
|
||||
wr.send_flags = IB_SEND_SIGNALED;
|
||||
wr.ah = ah;
|
||||
wr.port_num = port;
|
||||
wr.pkey_index = wire_pkey_ix;
|
||||
wr.remote_qkey = qkey;
|
||||
wr.remote_qpn = remote_qpn;
|
||||
wr.wr.next = NULL;
|
||||
wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
|
||||
wr.wr.sg_list = &list;
|
||||
wr.wr.num_sge = 1;
|
||||
wr.wr.opcode = IB_WR_SEND;
|
||||
wr.wr.send_flags = IB_SEND_SIGNALED;
|
||||
if (s_mac)
|
||||
memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
|
||||
if (vlan_id < 0x1000)
|
||||
@@ -1275,7 +1277,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
|
||||
to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id);
|
||||
|
||||
|
||||
ret = ib_post_send(send_qp, &wr, &bad_wr);
|
||||
ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
|
||||
out:
|
||||
if (ret)
|
||||
ib_destroy_ah(ah);
|
||||
|
||||
@@ -321,21 +321,21 @@ err_free:
|
||||
int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
|
||||
struct ib_mw_bind *mw_bind)
|
||||
{
|
||||
struct ib_send_wr wr;
|
||||
struct ib_bind_mw_wr wr;
|
||||
struct ib_send_wr *bad_wr;
|
||||
int ret;
|
||||
|
||||
memset(&wr, 0, sizeof(wr));
|
||||
wr.opcode = IB_WR_BIND_MW;
|
||||
wr.wr_id = mw_bind->wr_id;
|
||||
wr.send_flags = mw_bind->send_flags;
|
||||
wr.wr.bind_mw.mw = mw;
|
||||
wr.wr.bind_mw.bind_info = mw_bind->bind_info;
|
||||
wr.wr.bind_mw.rkey = ib_inc_rkey(mw->rkey);
|
||||
wr.wr.opcode = IB_WR_BIND_MW;
|
||||
wr.wr.wr_id = mw_bind->wr_id;
|
||||
wr.wr.send_flags = mw_bind->send_flags;
|
||||
wr.mw = mw;
|
||||
wr.bind_info = mw_bind->bind_info;
|
||||
wr.rkey = ib_inc_rkey(mw->rkey);
|
||||
|
||||
ret = mlx4_ib_post_send(qp, &wr, &bad_wr);
|
||||
ret = mlx4_ib_post_send(qp, &wr.wr, &bad_wr);
|
||||
if (!ret)
|
||||
mw->rkey = wr.wr.bind_mw.rkey;
|
||||
mw->rkey = wr.rkey;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2133,14 +2133,14 @@ static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey)
|
||||
}
|
||||
|
||||
static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
|
||||
struct ib_send_wr *wr,
|
||||
struct ib_ud_wr *wr,
|
||||
void *wqe, unsigned *mlx_seg_len)
|
||||
{
|
||||
struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device);
|
||||
struct ib_device *ib_dev = &mdev->ib_dev;
|
||||
struct mlx4_wqe_mlx_seg *mlx = wqe;
|
||||
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
|
||||
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
|
||||
struct mlx4_ib_ah *ah = to_mah(wr->ah);
|
||||
u16 pkey;
|
||||
u32 qkey;
|
||||
int send_size;
|
||||
@@ -2148,13 +2148,13 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
|
||||
int spc;
|
||||
int i;
|
||||
|
||||
if (wr->opcode != IB_WR_SEND)
|
||||
if (wr->wr.opcode != IB_WR_SEND)
|
||||
return -EINVAL;
|
||||
|
||||
send_size = 0;
|
||||
|
||||
for (i = 0; i < wr->num_sge; ++i)
|
||||
send_size += wr->sg_list[i].length;
|
||||
for (i = 0; i < wr->wr.num_sge; ++i)
|
||||
send_size += wr->wr.sg_list[i].length;
|
||||
|
||||
/* for proxy-qp0 sends, need to add in size of tunnel header */
|
||||
/* for tunnel-qp0 sends, tunnel header is already in s/g list */
|
||||
@@ -2179,11 +2179,11 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
|
||||
mlx->rlid = sqp->ud_header.lrh.destination_lid;
|
||||
|
||||
sqp->ud_header.lrh.virtual_lane = 0;
|
||||
sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
|
||||
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
|
||||
ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
|
||||
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
|
||||
if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
|
||||
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
|
||||
else
|
||||
sqp->ud_header.bth.destination_qpn =
|
||||
cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]);
|
||||
@@ -2255,14 +2255,14 @@ static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac)
|
||||
}
|
||||
}
|
||||
|
||||
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
||||
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
|
||||
void *wqe, unsigned *mlx_seg_len)
|
||||
{
|
||||
struct ib_device *ib_dev = sqp->qp.ibqp.device;
|
||||
struct mlx4_wqe_mlx_seg *mlx = wqe;
|
||||
struct mlx4_wqe_ctrl_seg *ctrl = wqe;
|
||||
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
|
||||
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
|
||||
struct mlx4_ib_ah *ah = to_mah(wr->ah);
|
||||
union ib_gid sgid;
|
||||
u16 pkey;
|
||||
int send_size;
|
||||
@@ -2276,8 +2276,8 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
||||
bool is_grh;
|
||||
|
||||
send_size = 0;
|
||||
for (i = 0; i < wr->num_sge; ++i)
|
||||
send_size += wr->sg_list[i].length;
|
||||
for (i = 0; i < wr->wr.num_sge; ++i)
|
||||
send_size += wr->wr.sg_list[i].length;
|
||||
|
||||
is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET;
|
||||
is_grh = mlx4_ib_ah_grh_present(ah);
|
||||
@@ -2357,7 +2357,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
||||
mlx->rlid = sqp->ud_header.lrh.destination_lid;
|
||||
}
|
||||
|
||||
switch (wr->opcode) {
|
||||
switch (wr->wr.opcode) {
|
||||
case IB_WR_SEND:
|
||||
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
|
||||
sqp->ud_header.immediate_present = 0;
|
||||
@@ -2365,7 +2365,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
||||
case IB_WR_SEND_WITH_IMM:
|
||||
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
|
||||
sqp->ud_header.immediate_present = 1;
|
||||
sqp->ud_header.immediate_data = wr->ex.imm_data;
|
||||
sqp->ud_header.immediate_data = wr->wr.ex.imm_data;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@@ -2408,16 +2408,16 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
||||
if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
|
||||
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
|
||||
}
|
||||
sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
|
||||
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
|
||||
if (!sqp->qp.ibqp.qp_num)
|
||||
ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
|
||||
else
|
||||
ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey);
|
||||
ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
|
||||
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
|
||||
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
|
||||
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
|
||||
sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
|
||||
sqp->qkey : wr->wr.ud.remote_qkey);
|
||||
sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
|
||||
sqp->qkey : wr->remote_qkey);
|
||||
sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
|
||||
|
||||
header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
|
||||
@@ -2505,43 +2505,45 @@ static __be32 convert_access(int acc)
|
||||
cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
|
||||
}
|
||||
|
||||
static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)
|
||||
static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg,
|
||||
struct ib_fast_reg_wr *wr)
|
||||
{
|
||||
struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
|
||||
struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->page_list);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < wr->wr.fast_reg.page_list_len; ++i)
|
||||
for (i = 0; i < wr->page_list_len; ++i)
|
||||
mfrpl->mapped_page_list[i] =
|
||||
cpu_to_be64(wr->wr.fast_reg.page_list->page_list[i] |
|
||||
cpu_to_be64(wr->page_list->page_list[i] |
|
||||
MLX4_MTT_FLAG_PRESENT);
|
||||
|
||||
fseg->flags = convert_access(wr->wr.fast_reg.access_flags);
|
||||
fseg->mem_key = cpu_to_be32(wr->wr.fast_reg.rkey);
|
||||
fseg->flags = convert_access(wr->access_flags);
|
||||
fseg->mem_key = cpu_to_be32(wr->rkey);
|
||||
fseg->buf_list = cpu_to_be64(mfrpl->map);
|
||||
fseg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
|
||||
fseg->reg_len = cpu_to_be64(wr->wr.fast_reg.length);
|
||||
fseg->start_addr = cpu_to_be64(wr->iova_start);
|
||||
fseg->reg_len = cpu_to_be64(wr->length);
|
||||
fseg->offset = 0; /* XXX -- is this just for ZBVA? */
|
||||
fseg->page_size = cpu_to_be32(wr->wr.fast_reg.page_shift);
|
||||
fseg->page_size = cpu_to_be32(wr->page_shift);
|
||||
fseg->reserved[0] = 0;
|
||||
fseg->reserved[1] = 0;
|
||||
}
|
||||
|
||||
static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr)
|
||||
static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg,
|
||||
struct ib_bind_mw_wr *wr)
|
||||
{
|
||||
bseg->flags1 =
|
||||
convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) &
|
||||
convert_access(wr->bind_info.mw_access_flags) &
|
||||
cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ |
|
||||
MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE |
|
||||
MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC);
|
||||
bseg->flags2 = 0;
|
||||
if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2)
|
||||
if (wr->mw->type == IB_MW_TYPE_2)
|
||||
bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2);
|
||||
if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED)
|
||||
if (wr->bind_info.mw_access_flags & IB_ZERO_BASED)
|
||||
bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED);
|
||||
bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey);
|
||||
bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey);
|
||||
bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr);
|
||||
bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length);
|
||||
bseg->new_rkey = cpu_to_be32(wr->rkey);
|
||||
bseg->lkey = cpu_to_be32(wr->bind_info.mr->lkey);
|
||||
bseg->addr = cpu_to_be64(wr->bind_info.addr);
|
||||
bseg->length = cpu_to_be64(wr->bind_info.length);
|
||||
}
|
||||
|
||||
static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)
|
||||
@@ -2558,46 +2560,47 @@ static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg,
|
||||
rseg->reserved = 0;
|
||||
}
|
||||
|
||||
static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
|
||||
static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg,
|
||||
struct ib_atomic_wr *wr)
|
||||
{
|
||||
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
|
||||
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
} else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
|
||||
if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
aseg->swap_add = cpu_to_be64(wr->swap);
|
||||
aseg->compare = cpu_to_be64(wr->compare_add);
|
||||
} else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
|
||||
aseg->swap_add = cpu_to_be64(wr->compare_add);
|
||||
aseg->compare = cpu_to_be64(wr->compare_add_mask);
|
||||
} else {
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
aseg->swap_add = cpu_to_be64(wr->compare_add);
|
||||
aseg->compare = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg,
|
||||
struct ib_send_wr *wr)
|
||||
struct ib_atomic_wr *wr)
|
||||
{
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
|
||||
aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
|
||||
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
|
||||
aseg->swap_add = cpu_to_be64(wr->swap);
|
||||
aseg->swap_add_mask = cpu_to_be64(wr->swap_mask);
|
||||
aseg->compare = cpu_to_be64(wr->compare_add);
|
||||
aseg->compare_mask = cpu_to_be64(wr->compare_add_mask);
|
||||
}
|
||||
|
||||
static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
|
||||
struct ib_send_wr *wr)
|
||||
struct ib_ud_wr *wr)
|
||||
{
|
||||
memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av));
|
||||
dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
|
||||
dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan;
|
||||
memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6);
|
||||
memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av));
|
||||
dseg->dqpn = cpu_to_be32(wr->remote_qpn);
|
||||
dseg->qkey = cpu_to_be32(wr->remote_qkey);
|
||||
dseg->vlan = to_mah(wr->ah)->av.eth.vlan;
|
||||
memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6);
|
||||
}
|
||||
|
||||
static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
|
||||
struct mlx4_wqe_datagram_seg *dseg,
|
||||
struct ib_send_wr *wr,
|
||||
struct ib_ud_wr *wr,
|
||||
enum mlx4_ib_qp_type qpt)
|
||||
{
|
||||
union mlx4_ext_av *av = &to_mah(wr->wr.ud.ah)->av;
|
||||
union mlx4_ext_av *av = &to_mah(wr->ah)->av;
|
||||
struct mlx4_av sqp_av = {0};
|
||||
int port = *((u8 *) &av->ib.port_pd) & 0x3;
|
||||
|
||||
@@ -2616,18 +2619,18 @@ static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev,
|
||||
dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY);
|
||||
}
|
||||
|
||||
static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len)
|
||||
static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len)
|
||||
{
|
||||
struct mlx4_wqe_inline_seg *inl = wqe;
|
||||
struct mlx4_ib_tunnel_header hdr;
|
||||
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
|
||||
struct mlx4_ib_ah *ah = to_mah(wr->ah);
|
||||
int spc;
|
||||
int i;
|
||||
|
||||
memcpy(&hdr.av, &ah->av, sizeof hdr.av);
|
||||
hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index);
|
||||
hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
|
||||
hdr.remote_qpn = cpu_to_be32(wr->remote_qpn);
|
||||
hdr.pkey_index = cpu_to_be16(wr->pkey_index);
|
||||
hdr.qkey = cpu_to_be32(wr->remote_qkey);
|
||||
memcpy(hdr.mac, ah->av.eth.mac, 6);
|
||||
hdr.vlan = ah->av.eth.vlan;
|
||||
|
||||
@@ -2699,22 +2702,22 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
|
||||
dseg->addr = cpu_to_be64(sg->addr);
|
||||
}
|
||||
|
||||
static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
|
||||
static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr,
|
||||
struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
|
||||
__be32 *lso_hdr_sz, __be32 *blh)
|
||||
{
|
||||
unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
|
||||
unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16);
|
||||
|
||||
if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE))
|
||||
*blh = cpu_to_be32(1 << 6);
|
||||
|
||||
if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
|
||||
wr->num_sge > qp->sq.max_gs - (halign >> 4)))
|
||||
wr->wr.num_sge > qp->sq.max_gs - (halign >> 4)))
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
|
||||
memcpy(wqe->header, wr->header, wr->hlen);
|
||||
|
||||
*lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
|
||||
*lso_hdr_sz = cpu_to_be32(wr->mss << 16 | wr->hlen);
|
||||
*lso_seg_len = halign;
|
||||
return 0;
|
||||
}
|
||||
@@ -2813,11 +2816,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
case IB_WR_ATOMIC_CMP_AND_SWP:
|
||||
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
||||
case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
|
||||
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
|
||||
wr->wr.atomic.rkey);
|
||||
set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
|
||||
atomic_wr(wr)->rkey);
|
||||
wqe += sizeof (struct mlx4_wqe_raddr_seg);
|
||||
|
||||
set_atomic_seg(wqe, wr);
|
||||
set_atomic_seg(wqe, atomic_wr(wr));
|
||||
wqe += sizeof (struct mlx4_wqe_atomic_seg);
|
||||
|
||||
size += (sizeof (struct mlx4_wqe_raddr_seg) +
|
||||
@@ -2826,11 +2829,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
break;
|
||||
|
||||
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
|
||||
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
|
||||
wr->wr.atomic.rkey);
|
||||
set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
|
||||
atomic_wr(wr)->rkey);
|
||||
wqe += sizeof (struct mlx4_wqe_raddr_seg);
|
||||
|
||||
set_masked_atomic_seg(wqe, wr);
|
||||
set_masked_atomic_seg(wqe, atomic_wr(wr));
|
||||
wqe += sizeof (struct mlx4_wqe_masked_atomic_seg);
|
||||
|
||||
size += (sizeof (struct mlx4_wqe_raddr_seg) +
|
||||
@@ -2841,8 +2844,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
case IB_WR_RDMA_READ:
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
|
||||
wr->wr.rdma.rkey);
|
||||
set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
|
||||
rdma_wr(wr)->rkey);
|
||||
wqe += sizeof (struct mlx4_wqe_raddr_seg);
|
||||
size += sizeof (struct mlx4_wqe_raddr_seg) / 16;
|
||||
break;
|
||||
@@ -2858,7 +2861,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
case IB_WR_FAST_REG_MR:
|
||||
ctrl->srcrb_flags |=
|
||||
cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
|
||||
set_fmr_seg(wqe, wr);
|
||||
set_fmr_seg(wqe, fast_reg_wr(wr));
|
||||
wqe += sizeof (struct mlx4_wqe_fmr_seg);
|
||||
size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
|
||||
break;
|
||||
@@ -2866,7 +2869,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
case IB_WR_BIND_MW:
|
||||
ctrl->srcrb_flags |=
|
||||
cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
|
||||
set_bind_seg(wqe, wr);
|
||||
set_bind_seg(wqe, bind_mw_wr(wr));
|
||||
wqe += sizeof(struct mlx4_wqe_bind_seg);
|
||||
size += sizeof(struct mlx4_wqe_bind_seg) / 16;
|
||||
break;
|
||||
@@ -2877,7 +2880,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
break;
|
||||
|
||||
case MLX4_IB_QPT_TUN_SMI_OWNER:
|
||||
err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen);
|
||||
err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
|
||||
ctrl, &seglen);
|
||||
if (unlikely(err)) {
|
||||
*bad_wr = wr;
|
||||
goto out;
|
||||
@@ -2888,19 +2892,20 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
case MLX4_IB_QPT_TUN_SMI:
|
||||
case MLX4_IB_QPT_TUN_GSI:
|
||||
/* this is a UD qp used in MAD responses to slaves. */
|
||||
set_datagram_seg(wqe, wr);
|
||||
set_datagram_seg(wqe, ud_wr(wr));
|
||||
/* set the forced-loopback bit in the data seg av */
|
||||
*(__be32 *) wqe |= cpu_to_be32(0x80000000);
|
||||
wqe += sizeof (struct mlx4_wqe_datagram_seg);
|
||||
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
|
||||
break;
|
||||
case MLX4_IB_QPT_UD:
|
||||
set_datagram_seg(wqe, wr);
|
||||
set_datagram_seg(wqe, ud_wr(wr));
|
||||
wqe += sizeof (struct mlx4_wqe_datagram_seg);
|
||||
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
|
||||
|
||||
if (wr->opcode == IB_WR_LSO) {
|
||||
err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh);
|
||||
err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen,
|
||||
&lso_hdr_sz, &blh);
|
||||
if (unlikely(err)) {
|
||||
*bad_wr = wr;
|
||||
goto out;
|
||||
@@ -2912,7 +2917,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
break;
|
||||
|
||||
case MLX4_IB_QPT_PROXY_SMI_OWNER:
|
||||
err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen);
|
||||
err = build_sriov_qp0_header(to_msqp(qp), ud_wr(wr),
|
||||
ctrl, &seglen);
|
||||
if (unlikely(err)) {
|
||||
*bad_wr = wr;
|
||||
goto out;
|
||||
@@ -2923,7 +2929,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
add_zero_len_inline(wqe);
|
||||
wqe += 16;
|
||||
size++;
|
||||
build_tunnel_header(wr, wqe, &seglen);
|
||||
build_tunnel_header(ud_wr(wr), wqe, &seglen);
|
||||
wqe += seglen;
|
||||
size += seglen / 16;
|
||||
break;
|
||||
@@ -2933,18 +2939,20 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
* In this case we first add a UD segment targeting
|
||||
* the tunnel qp, and then add a header with address
|
||||
* information */
|
||||
set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, wr,
|
||||
set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe,
|
||||
ud_wr(wr),
|
||||
qp->mlx4_ib_qp_type);
|
||||
wqe += sizeof (struct mlx4_wqe_datagram_seg);
|
||||
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
|
||||
build_tunnel_header(wr, wqe, &seglen);
|
||||
build_tunnel_header(ud_wr(wr), wqe, &seglen);
|
||||
wqe += seglen;
|
||||
size += seglen / 16;
|
||||
break;
|
||||
|
||||
case MLX4_IB_QPT_SMI:
|
||||
case MLX4_IB_QPT_GSI:
|
||||
err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen);
|
||||
err = build_mlx_header(to_msqp(qp), ud_wr(wr), ctrl,
|
||||
&seglen);
|
||||
if (unlikely(err)) {
|
||||
*bad_wr = wr;
|
||||
goto out;
|
||||
|
||||
@@ -245,6 +245,7 @@ enum mlx5_ib_qp_flags {
|
||||
};
|
||||
|
||||
struct mlx5_umr_wr {
|
||||
struct ib_send_wr wr;
|
||||
union {
|
||||
u64 virt_addr;
|
||||
u64 offset;
|
||||
@@ -257,6 +258,11 @@ struct mlx5_umr_wr {
|
||||
u32 mkey;
|
||||
};
|
||||
|
||||
static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
|
||||
{
|
||||
return container_of(wr, struct mlx5_umr_wr, wr);
|
||||
}
|
||||
|
||||
struct mlx5_shared_mr_info {
|
||||
int mr_id;
|
||||
struct ib_umem *umem;
|
||||
|
||||
@@ -687,7 +687,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
|
||||
int access_flags)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->device);
|
||||
struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
|
||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
|
||||
sg->addr = dma;
|
||||
sg->length = ALIGN(sizeof(u64) * n, 64);
|
||||
@@ -715,7 +715,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
|
||||
static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
|
||||
struct ib_send_wr *wr, u32 key)
|
||||
{
|
||||
struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
|
||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
|
||||
wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
|
||||
wr->opcode = MLX5_IB_WR_UMR;
|
||||
@@ -752,7 +752,8 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
|
||||
struct device *ddev = dev->ib_dev.dma_device;
|
||||
struct umr_common *umrc = &dev->umrc;
|
||||
struct mlx5_ib_umr_context umr_context;
|
||||
struct ib_send_wr wr, *bad;
|
||||
struct mlx5_umr_wr umrwr;
|
||||
struct ib_send_wr *bad;
|
||||
struct mlx5_ib_mr *mr;
|
||||
struct ib_sge sg;
|
||||
int size;
|
||||
@@ -798,14 +799,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
|
||||
goto free_pas;
|
||||
}
|
||||
|
||||
memset(&wr, 0, sizeof(wr));
|
||||
wr.wr_id = (u64)(unsigned long)&umr_context;
|
||||
prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift,
|
||||
virt_addr, len, access_flags);
|
||||
memset(&umrwr, 0, sizeof(umrwr));
|
||||
umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
|
||||
prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key,
|
||||
page_shift, virt_addr, len, access_flags);
|
||||
|
||||
mlx5_ib_init_umr_context(&umr_context);
|
||||
down(&umrc->sem);
|
||||
err = ib_post_send(umrc->qp, &wr, &bad);
|
||||
err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
|
||||
if (err) {
|
||||
mlx5_ib_warn(dev, "post send failed, err %d\n", err);
|
||||
goto unmap_dma;
|
||||
@@ -851,8 +852,8 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
|
||||
int size;
|
||||
__be64 *pas;
|
||||
dma_addr_t dma;
|
||||
struct ib_send_wr wr, *bad;
|
||||
struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr.wr.fast_reg;
|
||||
struct ib_send_wr *bad;
|
||||
struct mlx5_umr_wr wr;
|
||||
struct ib_sge sg;
|
||||
int err = 0;
|
||||
const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
|
||||
@@ -917,26 +918,26 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
|
||||
dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
|
||||
|
||||
memset(&wr, 0, sizeof(wr));
|
||||
wr.wr_id = (u64)(unsigned long)&umr_context;
|
||||
wr.wr.wr_id = (u64)(unsigned long)&umr_context;
|
||||
|
||||
sg.addr = dma;
|
||||
sg.length = ALIGN(npages * sizeof(u64),
|
||||
MLX5_UMR_MTT_ALIGNMENT);
|
||||
sg.lkey = dev->umrc.pd->local_dma_lkey;
|
||||
|
||||
wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
|
||||
wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
|
||||
MLX5_IB_SEND_UMR_UPDATE_MTT;
|
||||
wr.sg_list = &sg;
|
||||
wr.num_sge = 1;
|
||||
wr.opcode = MLX5_IB_WR_UMR;
|
||||
umrwr->npages = sg.length / sizeof(u64);
|
||||
umrwr->page_shift = PAGE_SHIFT;
|
||||
umrwr->mkey = mr->mmr.key;
|
||||
umrwr->target.offset = start_page_index;
|
||||
wr.wr.sg_list = &sg;
|
||||
wr.wr.num_sge = 1;
|
||||
wr.wr.opcode = MLX5_IB_WR_UMR;
|
||||
wr.npages = sg.length / sizeof(u64);
|
||||
wr.page_shift = PAGE_SHIFT;
|
||||
wr.mkey = mr->mmr.key;
|
||||
wr.target.offset = start_page_index;
|
||||
|
||||
mlx5_ib_init_umr_context(&umr_context);
|
||||
down(&umrc->sem);
|
||||
err = ib_post_send(umrc->qp, &wr, &bad);
|
||||
err = ib_post_send(umrc->qp, &wr.wr, &bad);
|
||||
if (err) {
|
||||
mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
|
||||
} else {
|
||||
@@ -1122,16 +1123,17 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
{
|
||||
struct umr_common *umrc = &dev->umrc;
|
||||
struct mlx5_ib_umr_context umr_context;
|
||||
struct ib_send_wr wr, *bad;
|
||||
struct mlx5_umr_wr umrwr;
|
||||
struct ib_send_wr *bad;
|
||||
int err;
|
||||
|
||||
memset(&wr, 0, sizeof(wr));
|
||||
wr.wr_id = (u64)(unsigned long)&umr_context;
|
||||
prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
|
||||
memset(&umrwr.wr, 0, sizeof(umrwr));
|
||||
umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
|
||||
prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key);
|
||||
|
||||
mlx5_ib_init_umr_context(&umr_context);
|
||||
down(&umrc->sem);
|
||||
err = ib_post_send(umrc->qp, &wr, &bad);
|
||||
err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
|
||||
if (err) {
|
||||
up(&umrc->sem);
|
||||
mlx5_ib_dbg(dev, "err %d\n", err);
|
||||
|
||||
@@ -1838,9 +1838,9 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
|
||||
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
|
||||
struct ib_send_wr *wr)
|
||||
{
|
||||
memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av));
|
||||
dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV);
|
||||
dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
|
||||
memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
|
||||
dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
|
||||
dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
|
||||
}
|
||||
|
||||
static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
|
||||
@@ -1908,7 +1908,7 @@ static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
}
|
||||
|
||||
umr->flags = (1 << 5); /* fail if not free */
|
||||
umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len);
|
||||
umr->klm_octowords = get_klm_octo(fast_reg_wr(wr)->page_list_len);
|
||||
umr->mkey_mask = frwr_mkey_mask();
|
||||
}
|
||||
|
||||
@@ -1952,7 +1952,7 @@ static __be64 get_umr_update_mtt_mask(void)
|
||||
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
struct ib_send_wr *wr)
|
||||
{
|
||||
struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
|
||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
|
||||
memset(umr, 0, sizeof(*umr));
|
||||
|
||||
@@ -1996,20 +1996,20 @@ static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
|
||||
return;
|
||||
}
|
||||
|
||||
seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) |
|
||||
seg->flags = get_umr_flags(fast_reg_wr(wr)->access_flags) |
|
||||
MLX5_ACCESS_MODE_MTT;
|
||||
*writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
|
||||
seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
|
||||
seg->qpn_mkey7_0 = cpu_to_be32((fast_reg_wr(wr)->rkey & 0xff) | 0xffffff00);
|
||||
seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
|
||||
seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
|
||||
seg->len = cpu_to_be64(wr->wr.fast_reg.length);
|
||||
seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2);
|
||||
seg->log2_page_size = wr->wr.fast_reg.page_shift;
|
||||
seg->start_addr = cpu_to_be64(fast_reg_wr(wr)->iova_start);
|
||||
seg->len = cpu_to_be64(fast_reg_wr(wr)->length);
|
||||
seg->xlt_oct_size = cpu_to_be32((fast_reg_wr(wr)->page_list_len + 1) / 2);
|
||||
seg->log2_page_size = fast_reg_wr(wr)->page_shift;
|
||||
}
|
||||
|
||||
static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
|
||||
{
|
||||
struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg;
|
||||
struct mlx5_umr_wr *umrwr = umr_wr(wr);
|
||||
|
||||
memset(seg, 0, sizeof(*seg));
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
|
||||
@@ -2034,15 +2034,15 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
|
||||
struct mlx5_ib_pd *pd,
|
||||
int writ)
|
||||
{
|
||||
struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
|
||||
u64 *page_list = wr->wr.fast_reg.page_list->page_list;
|
||||
struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(fast_reg_wr(wr)->page_list);
|
||||
u64 *page_list = fast_reg_wr(wr)->page_list->page_list;
|
||||
u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < wr->wr.fast_reg.page_list_len; i++)
|
||||
for (i = 0; i < fast_reg_wr(wr)->page_list_len; i++)
|
||||
mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
|
||||
dseg->addr = cpu_to_be64(mfrpl->map);
|
||||
dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
|
||||
dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * fast_reg_wr(wr)->page_list_len, 64));
|
||||
dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
|
||||
}
|
||||
|
||||
@@ -2224,22 +2224,22 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
|
||||
void **seg, int *size)
|
||||
static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
|
||||
struct mlx5_ib_qp *qp, void **seg, int *size)
|
||||
{
|
||||
struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs;
|
||||
struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
|
||||
struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
|
||||
struct ib_mr *sig_mr = wr->sig_mr;
|
||||
struct mlx5_bsf *bsf;
|
||||
u32 data_len = wr->sg_list->length;
|
||||
u32 data_key = wr->sg_list->lkey;
|
||||
u64 data_va = wr->sg_list->addr;
|
||||
u32 data_len = wr->wr.sg_list->length;
|
||||
u32 data_key = wr->wr.sg_list->lkey;
|
||||
u64 data_va = wr->wr.sg_list->addr;
|
||||
int ret;
|
||||
int wqe_size;
|
||||
|
||||
if (!wr->wr.sig_handover.prot ||
|
||||
(data_key == wr->wr.sig_handover.prot->lkey &&
|
||||
data_va == wr->wr.sig_handover.prot->addr &&
|
||||
data_len == wr->wr.sig_handover.prot->length)) {
|
||||
if (!wr->prot ||
|
||||
(data_key == wr->prot->lkey &&
|
||||
data_va == wr->prot->addr &&
|
||||
data_len == wr->prot->length)) {
|
||||
/**
|
||||
* Source domain doesn't contain signature information
|
||||
* or data and protection are interleaved in memory.
|
||||
@@ -2273,8 +2273,8 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
|
||||
struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
|
||||
struct mlx5_stride_block_entry *data_sentry;
|
||||
struct mlx5_stride_block_entry *prot_sentry;
|
||||
u32 prot_key = wr->wr.sig_handover.prot->lkey;
|
||||
u64 prot_va = wr->wr.sig_handover.prot->addr;
|
||||
u32 prot_key = wr->prot->lkey;
|
||||
u64 prot_va = wr->prot->addr;
|
||||
u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
|
||||
int prot_size;
|
||||
|
||||
@@ -2326,16 +2326,16 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
|
||||
}
|
||||
|
||||
static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
|
||||
struct ib_send_wr *wr, u32 nelements,
|
||||
struct ib_sig_handover_wr *wr, u32 nelements,
|
||||
u32 length, u32 pdn)
|
||||
{
|
||||
struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
|
||||
struct ib_mr *sig_mr = wr->sig_mr;
|
||||
u32 sig_key = sig_mr->rkey;
|
||||
u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
|
||||
|
||||
memset(seg, 0, sizeof(*seg));
|
||||
|
||||
seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) |
|
||||
seg->flags = get_umr_flags(wr->access_flags) |
|
||||
MLX5_ACCESS_MODE_KLM;
|
||||
seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
|
||||
seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
|
||||
@@ -2346,7 +2346,7 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
|
||||
}
|
||||
|
||||
static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
struct ib_send_wr *wr, u32 nelements)
|
||||
u32 nelements)
|
||||
{
|
||||
memset(umr, 0, sizeof(*umr));
|
||||
|
||||
@@ -2357,37 +2357,37 @@ static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
}
|
||||
|
||||
|
||||
static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
|
||||
static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
|
||||
void **seg, int *size)
|
||||
{
|
||||
struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr);
|
||||
struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
|
||||
struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
|
||||
u32 pdn = get_pd(qp)->pdn;
|
||||
u32 klm_oct_size;
|
||||
int region_len, ret;
|
||||
|
||||
if (unlikely(wr->num_sge != 1) ||
|
||||
unlikely(wr->wr.sig_handover.access_flags &
|
||||
IB_ACCESS_REMOTE_ATOMIC) ||
|
||||
if (unlikely(wr->wr.num_sge != 1) ||
|
||||
unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) ||
|
||||
unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
|
||||
unlikely(!sig_mr->sig->sig_status_checked))
|
||||
return -EINVAL;
|
||||
|
||||
/* length of the protected region, data + protection */
|
||||
region_len = wr->sg_list->length;
|
||||
if (wr->wr.sig_handover.prot &&
|
||||
(wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey ||
|
||||
wr->wr.sig_handover.prot->addr != wr->sg_list->addr ||
|
||||
wr->wr.sig_handover.prot->length != wr->sg_list->length))
|
||||
region_len += wr->wr.sig_handover.prot->length;
|
||||
region_len = wr->wr.sg_list->length;
|
||||
if (wr->prot &&
|
||||
(wr->prot->lkey != wr->wr.sg_list->lkey ||
|
||||
wr->prot->addr != wr->wr.sg_list->addr ||
|
||||
wr->prot->length != wr->wr.sg_list->length))
|
||||
region_len += wr->prot->length;
|
||||
|
||||
/**
|
||||
* KLM octoword size - if protection was provided
|
||||
* then we use strided block format (3 octowords),
|
||||
* else we use single KLM (1 octoword)
|
||||
**/
|
||||
klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1;
|
||||
klm_oct_size = wr->prot ? 3 : 1;
|
||||
|
||||
set_sig_umr_segment(*seg, wr, klm_oct_size);
|
||||
set_sig_umr_segment(*seg, klm_oct_size);
|
||||
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
|
||||
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
|
||||
if (unlikely((*seg == qp->sq.qend)))
|
||||
@@ -2454,8 +2454,8 @@ static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
|
||||
if (unlikely((*seg == qp->sq.qend)))
|
||||
*seg = mlx5_get_send_wqe(qp, 0);
|
||||
if (!li) {
|
||||
if (unlikely(wr->wr.fast_reg.page_list_len >
|
||||
wr->wr.fast_reg.page_list->max_page_list_len))
|
||||
if (unlikely(fast_reg_wr(wr)->page_list_len >
|
||||
fast_reg_wr(wr)->page_list->max_page_list_len))
|
||||
return -ENOMEM;
|
||||
|
||||
set_frwr_pages(*seg, wr, mdev, pd, writ);
|
||||
@@ -2627,7 +2627,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
switch (ibqp->qp_type) {
|
||||
case IB_QPT_XRC_INI:
|
||||
xrc = seg;
|
||||
xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
|
||||
seg += sizeof(*xrc);
|
||||
size += sizeof(*xrc) / 16;
|
||||
/* fall through */
|
||||
@@ -2636,8 +2635,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
case IB_WR_RDMA_READ:
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
set_raddr_seg(seg, wr->wr.rdma.remote_addr,
|
||||
wr->wr.rdma.rkey);
|
||||
set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
|
||||
rdma_wr(wr)->rkey);
|
||||
seg += sizeof(struct mlx5_wqe_raddr_seg);
|
||||
size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
|
||||
break;
|
||||
@@ -2666,7 +2665,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
case IB_WR_FAST_REG_MR:
|
||||
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
|
||||
qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
|
||||
ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
|
||||
ctrl->imm = cpu_to_be32(fast_reg_wr(wr)->rkey);
|
||||
err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
|
||||
if (err) {
|
||||
mlx5_ib_warn(dev, "\n");
|
||||
@@ -2678,7 +2677,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
|
||||
case IB_WR_REG_SIG_MR:
|
||||
qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
|
||||
mr = to_mmr(wr->wr.sig_handover.sig_mr);
|
||||
mr = to_mmr(sig_handover_wr(wr)->sig_mr);
|
||||
|
||||
ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
|
||||
err = set_sig_umr_wr(wr, qp, &seg, &size);
|
||||
@@ -2706,7 +2705,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem,
|
||||
err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem,
|
||||
mr->sig->psv_memory.psv_idx, &seg,
|
||||
&size);
|
||||
if (err) {
|
||||
@@ -2728,7 +2727,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
}
|
||||
|
||||
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
|
||||
err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire,
|
||||
err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
|
||||
mr->sig->psv_wire.psv_idx, &seg,
|
||||
&size);
|
||||
if (err) {
|
||||
@@ -2752,8 +2751,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
switch (wr->opcode) {
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
set_raddr_seg(seg, wr->wr.rdma.remote_addr,
|
||||
wr->wr.rdma.rkey);
|
||||
set_raddr_seg(seg, rdma_wr(wr)->remote_addr,
|
||||
rdma_wr(wr)->rkey);
|
||||
seg += sizeof(struct mlx5_wqe_raddr_seg);
|
||||
size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
|
||||
break;
|
||||
@@ -2780,7 +2779,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
goto out;
|
||||
}
|
||||
qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
|
||||
ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
|
||||
ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
|
||||
set_reg_umr_segment(seg, wr);
|
||||
seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
|
||||
size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
|
||||
|
||||
@@ -1476,7 +1476,7 @@ void mthca_free_qp(struct mthca_dev *dev,
|
||||
|
||||
/* Create UD header for an MLX send and build a data segment for it */
|
||||
static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
|
||||
int ind, struct ib_send_wr *wr,
|
||||
int ind, struct ib_ud_wr *wr,
|
||||
struct mthca_mlx_seg *mlx,
|
||||
struct mthca_data_seg *data)
|
||||
{
|
||||
@@ -1485,10 +1485,10 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
|
||||
u16 pkey;
|
||||
|
||||
ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0,
|
||||
mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
|
||||
mthca_ah_grh_present(to_mah(wr->ah)), 0,
|
||||
&sqp->ud_header);
|
||||
|
||||
err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
|
||||
err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header);
|
||||
if (err)
|
||||
return err;
|
||||
mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
|
||||
@@ -1499,7 +1499,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
|
||||
mlx->rlid = sqp->ud_header.lrh.destination_lid;
|
||||
mlx->vcrc = 0;
|
||||
|
||||
switch (wr->opcode) {
|
||||
switch (wr->wr.opcode) {
|
||||
case IB_WR_SEND:
|
||||
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
|
||||
sqp->ud_header.immediate_present = 0;
|
||||
@@ -1507,7 +1507,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
|
||||
case IB_WR_SEND_WITH_IMM:
|
||||
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
|
||||
sqp->ud_header.immediate_present = 1;
|
||||
sqp->ud_header.immediate_data = wr->ex.imm_data;
|
||||
sqp->ud_header.immediate_data = wr->wr.ex.imm_data;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@@ -1516,18 +1516,18 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
|
||||
sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
|
||||
if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
|
||||
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
|
||||
sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
|
||||
sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
|
||||
if (!sqp->qp.ibqp.qp_num)
|
||||
ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
|
||||
sqp->pkey_index, &pkey);
|
||||
else
|
||||
ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
|
||||
wr->wr.ud.pkey_index, &pkey);
|
||||
wr->pkey_index, &pkey);
|
||||
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
|
||||
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
|
||||
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
|
||||
sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
|
||||
sqp->qkey : wr->wr.ud.remote_qkey);
|
||||
sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
|
||||
sqp->qkey : wr->remote_qkey);
|
||||
sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
|
||||
|
||||
header_size = ib_ud_header_pack(&sqp->ud_header,
|
||||
@@ -1569,34 +1569,34 @@ static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
|
||||
}
|
||||
|
||||
static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
|
||||
struct ib_send_wr *wr)
|
||||
struct ib_atomic_wr *wr)
|
||||
{
|
||||
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
|
||||
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
aseg->swap_add = cpu_to_be64(wr->swap);
|
||||
aseg->compare = cpu_to_be64(wr->compare_add);
|
||||
} else {
|
||||
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
|
||||
aseg->swap_add = cpu_to_be64(wr->compare_add);
|
||||
aseg->compare = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
|
||||
struct ib_send_wr *wr)
|
||||
struct ib_ud_wr *wr)
|
||||
{
|
||||
useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
|
||||
useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
|
||||
useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
|
||||
useg->lkey = cpu_to_be32(to_mah(wr->ah)->key);
|
||||
useg->av_addr = cpu_to_be64(to_mah(wr->ah)->avdma);
|
||||
useg->dqpn = cpu_to_be32(wr->remote_qpn);
|
||||
useg->qkey = cpu_to_be32(wr->remote_qkey);
|
||||
|
||||
}
|
||||
|
||||
static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
|
||||
struct ib_send_wr *wr)
|
||||
struct ib_ud_wr *wr)
|
||||
{
|
||||
memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
|
||||
useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
|
||||
useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
|
||||
memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE);
|
||||
useg->dqpn = cpu_to_be32(wr->remote_qpn);
|
||||
useg->qkey = cpu_to_be32(wr->remote_qkey);
|
||||
}
|
||||
|
||||
int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
@@ -1664,11 +1664,11 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
switch (wr->opcode) {
|
||||
case IB_WR_ATOMIC_CMP_AND_SWP:
|
||||
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
||||
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
|
||||
wr->wr.atomic.rkey);
|
||||
set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
|
||||
atomic_wr(wr)->rkey);
|
||||
wqe += sizeof (struct mthca_raddr_seg);
|
||||
|
||||
set_atomic_seg(wqe, wr);
|
||||
set_atomic_seg(wqe, atomic_wr(wr));
|
||||
wqe += sizeof (struct mthca_atomic_seg);
|
||||
size += (sizeof (struct mthca_raddr_seg) +
|
||||
sizeof (struct mthca_atomic_seg)) / 16;
|
||||
@@ -1677,8 +1677,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
case IB_WR_RDMA_READ:
|
||||
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
|
||||
wr->wr.rdma.rkey);
|
||||
set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
|
||||
rdma_wr(wr)->rkey);
|
||||
wqe += sizeof (struct mthca_raddr_seg);
|
||||
size += sizeof (struct mthca_raddr_seg) / 16;
|
||||
break;
|
||||
@@ -1694,8 +1694,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
switch (wr->opcode) {
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
|
||||
wr->wr.rdma.rkey);
|
||||
set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
|
||||
rdma_wr(wr)->rkey);
|
||||
wqe += sizeof (struct mthca_raddr_seg);
|
||||
size += sizeof (struct mthca_raddr_seg) / 16;
|
||||
break;
|
||||
@@ -1708,13 +1708,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
break;
|
||||
|
||||
case UD:
|
||||
set_tavor_ud_seg(wqe, wr);
|
||||
set_tavor_ud_seg(wqe, ud_wr(wr));
|
||||
wqe += sizeof (struct mthca_tavor_ud_seg);
|
||||
size += sizeof (struct mthca_tavor_ud_seg) / 16;
|
||||
break;
|
||||
|
||||
case MLX:
|
||||
err = build_mlx_header(dev, to_msqp(qp), ind, wr,
|
||||
err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr),
|
||||
wqe - sizeof (struct mthca_next_seg),
|
||||
wqe);
|
||||
if (err) {
|
||||
@@ -2005,11 +2005,11 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
switch (wr->opcode) {
|
||||
case IB_WR_ATOMIC_CMP_AND_SWP:
|
||||
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
||||
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
|
||||
wr->wr.atomic.rkey);
|
||||
set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
|
||||
atomic_wr(wr)->rkey);
|
||||
wqe += sizeof (struct mthca_raddr_seg);
|
||||
|
||||
set_atomic_seg(wqe, wr);
|
||||
set_atomic_seg(wqe, atomic_wr(wr));
|
||||
wqe += sizeof (struct mthca_atomic_seg);
|
||||
size += (sizeof (struct mthca_raddr_seg) +
|
||||
sizeof (struct mthca_atomic_seg)) / 16;
|
||||
@@ -2018,8 +2018,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
case IB_WR_RDMA_READ:
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
|
||||
wr->wr.rdma.rkey);
|
||||
set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
|
||||
rdma_wr(wr)->rkey);
|
||||
wqe += sizeof (struct mthca_raddr_seg);
|
||||
size += sizeof (struct mthca_raddr_seg) / 16;
|
||||
break;
|
||||
@@ -2035,8 +2035,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
switch (wr->opcode) {
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
|
||||
wr->wr.rdma.rkey);
|
||||
set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
|
||||
rdma_wr(wr)->rkey);
|
||||
wqe += sizeof (struct mthca_raddr_seg);
|
||||
size += sizeof (struct mthca_raddr_seg) / 16;
|
||||
break;
|
||||
@@ -2049,13 +2049,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
break;
|
||||
|
||||
case UD:
|
||||
set_arbel_ud_seg(wqe, wr);
|
||||
set_arbel_ud_seg(wqe, ud_wr(wr));
|
||||
wqe += sizeof (struct mthca_arbel_ud_seg);
|
||||
size += sizeof (struct mthca_arbel_ud_seg) / 16;
|
||||
break;
|
||||
|
||||
case MLX:
|
||||
err = build_mlx_header(dev, to_msqp(qp), ind, wr,
|
||||
err = build_mlx_header(dev, to_msqp(qp), ind, ud_wr(wr),
|
||||
wqe - sizeof (struct mthca_next_seg),
|
||||
wqe);
|
||||
if (err) {
|
||||
|
||||
@@ -3372,9 +3372,9 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
|
||||
wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
|
||||
|
||||
set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
|
||||
ib_wr->wr.rdma.rkey);
|
||||
rdma_wr(ib_wr)->rkey);
|
||||
set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
|
||||
ib_wr->wr.rdma.remote_addr);
|
||||
rdma_wr(ib_wr)->remote_addr);
|
||||
|
||||
if ((ib_wr->send_flags & IB_SEND_INLINE) &&
|
||||
((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
|
||||
@@ -3409,9 +3409,9 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
|
||||
}
|
||||
|
||||
set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
|
||||
ib_wr->wr.rdma.remote_addr);
|
||||
rdma_wr(ib_wr)->remote_addr);
|
||||
set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
|
||||
ib_wr->wr.rdma.rkey);
|
||||
rdma_wr(ib_wr)->rkey);
|
||||
set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
|
||||
ib_wr->sg_list->length);
|
||||
set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
|
||||
@@ -3428,15 +3428,16 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
|
||||
case IB_WR_FAST_REG_MR:
|
||||
{
|
||||
int i;
|
||||
int flags = ib_wr->wr.fast_reg.access_flags;
|
||||
struct ib_fast_reg_wr *fwr = fast_reg_wr(ib_wr);
|
||||
int flags = fwr->access_flags;
|
||||
struct nes_ib_fast_reg_page_list *pnesfrpl =
|
||||
container_of(ib_wr->wr.fast_reg.page_list,
|
||||
container_of(fwr->page_list,
|
||||
struct nes_ib_fast_reg_page_list,
|
||||
ibfrpl);
|
||||
u64 *src_page_list = pnesfrpl->ibfrpl.page_list;
|
||||
u64 *dst_page_list = pnesfrpl->nes_wqe_pbl.kva;
|
||||
|
||||
if (ib_wr->wr.fast_reg.page_list_len >
|
||||
if (fwr->page_list_len >
|
||||
(NES_4K_PBL_CHUNK_SIZE / sizeof(u64))) {
|
||||
nes_debug(NES_DBG_IW_TX, "SQ_FMR: bad page_list_len\n");
|
||||
err = -EINVAL;
|
||||
@@ -3445,19 +3446,19 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
|
||||
wqe_misc = NES_IWARP_SQ_OP_FAST_REG;
|
||||
set_wqe_64bit_value(wqe->wqe_words,
|
||||
NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX,
|
||||
ib_wr->wr.fast_reg.iova_start);
|
||||
fwr->iova_start);
|
||||
set_wqe_32bit_value(wqe->wqe_words,
|
||||
NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
|
||||
ib_wr->wr.fast_reg.length);
|
||||
fwr->length);
|
||||
set_wqe_32bit_value(wqe->wqe_words,
|
||||
NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0);
|
||||
set_wqe_32bit_value(wqe->wqe_words,
|
||||
NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX,
|
||||
ib_wr->wr.fast_reg.rkey);
|
||||
fwr->rkey);
|
||||
/* Set page size: */
|
||||
if (ib_wr->wr.fast_reg.page_shift == 12) {
|
||||
if (fwr->page_shift == 12) {
|
||||
wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_4K;
|
||||
} else if (ib_wr->wr.fast_reg.page_shift == 21) {
|
||||
} else if (fwr->page_shift == 21) {
|
||||
wqe_misc |= NES_IWARP_SQ_FMR_WQE_PAGE_SIZE_2M;
|
||||
} else {
|
||||
nes_debug(NES_DBG_IW_TX, "Invalid page shift,"
|
||||
@@ -3480,11 +3481,11 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
|
||||
wqe_misc |= NES_IWARP_SQ_FMR_WQE_RIGHTS_ENABLE_WINDOW_BIND;
|
||||
|
||||
/* Fill in PBL info: */
|
||||
if (ib_wr->wr.fast_reg.page_list_len >
|
||||
if (fwr->page_list_len >
|
||||
pnesfrpl->ibfrpl.max_page_list_len) {
|
||||
nes_debug(NES_DBG_IW_TX, "Invalid page list length,"
|
||||
" ib_wr=%p, value=%u, max=%u\n",
|
||||
ib_wr, ib_wr->wr.fast_reg.page_list_len,
|
||||
ib_wr, fwr->page_list_len,
|
||||
pnesfrpl->ibfrpl.max_page_list_len);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
@@ -3496,19 +3497,19 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
|
||||
|
||||
set_wqe_32bit_value(wqe->wqe_words,
|
||||
NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX,
|
||||
ib_wr->wr.fast_reg.page_list_len * 8);
|
||||
fwr->page_list_len * 8);
|
||||
|
||||
for (i = 0; i < ib_wr->wr.fast_reg.page_list_len; i++)
|
||||
for (i = 0; i < fwr->page_list_len; i++)
|
||||
dst_page_list[i] = cpu_to_le64(src_page_list[i]);
|
||||
|
||||
nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %llx, "
|
||||
"length: %d, rkey: %0x, pgl_paddr: %llx, "
|
||||
"page_list_len: %u, wqe_misc: %x\n",
|
||||
(unsigned long long) ib_wr->wr.fast_reg.iova_start,
|
||||
ib_wr->wr.fast_reg.length,
|
||||
ib_wr->wr.fast_reg.rkey,
|
||||
(unsigned long long) fwr->iova_start,
|
||||
fwr->length,
|
||||
fwr->rkey,
|
||||
(unsigned long long) pnesfrpl->nes_wqe_pbl.paddr,
|
||||
ib_wr->wr.fast_reg.page_list_len,
|
||||
fwr->page_list_len,
|
||||
wqe_misc);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1997,13 +1997,13 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
|
||||
{
|
||||
struct ocrdma_ewqe_ud_hdr *ud_hdr =
|
||||
(struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
|
||||
struct ocrdma_ah *ah = get_ocrdma_ah(wr->wr.ud.ah);
|
||||
struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
|
||||
|
||||
ud_hdr->rsvd_dest_qpn = wr->wr.ud.remote_qpn;
|
||||
ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
|
||||
if (qp->qp_type == IB_QPT_GSI)
|
||||
ud_hdr->qkey = qp->qkey;
|
||||
else
|
||||
ud_hdr->qkey = wr->wr.ud.remote_qkey;
|
||||
ud_hdr->qkey = ud_wr(wr)->remote_qkey;
|
||||
ud_hdr->rsvd_ahid = ah->id;
|
||||
if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
|
||||
hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
|
||||
@@ -2106,9 +2106,9 @@ static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
|
||||
status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
|
||||
if (status)
|
||||
return status;
|
||||
ext_rw->addr_lo = wr->wr.rdma.remote_addr;
|
||||
ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
|
||||
ext_rw->lrkey = wr->wr.rdma.rkey;
|
||||
ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
|
||||
ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
|
||||
ext_rw->lrkey = rdma_wr(wr)->rkey;
|
||||
ext_rw->len = hdr->total_len;
|
||||
return 0;
|
||||
}
|
||||
@@ -2126,13 +2126,14 @@ static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
|
||||
hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
|
||||
hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
|
||||
|
||||
ext_rw->addr_lo = wr->wr.rdma.remote_addr;
|
||||
ext_rw->addr_hi = upper_32_bits(wr->wr.rdma.remote_addr);
|
||||
ext_rw->lrkey = wr->wr.rdma.rkey;
|
||||
ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
|
||||
ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
|
||||
ext_rw->lrkey = rdma_wr(wr)->rkey;
|
||||
ext_rw->len = hdr->total_len;
|
||||
}
|
||||
|
||||
static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
|
||||
static void build_frmr_pbes(struct ib_fast_reg_wr *wr,
|
||||
struct ocrdma_pbl *pbl_tbl,
|
||||
struct ocrdma_hw_mr *hwmr)
|
||||
{
|
||||
int i;
|
||||
@@ -2144,12 +2145,12 @@ static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
|
||||
num_pbes = 0;
|
||||
|
||||
/* go through the OS phy regions & fill hw pbe entries into pbls. */
|
||||
for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
|
||||
for (i = 0; i < wr->page_list_len; i++) {
|
||||
/* number of pbes can be more for one OS buf, when
|
||||
* buffers are of different sizes.
|
||||
* split the ib_buf to one or more pbes.
|
||||
*/
|
||||
buf_addr = wr->wr.fast_reg.page_list->page_list[i];
|
||||
buf_addr = wr->page_list->page_list[i];
|
||||
pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
|
||||
pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
|
||||
num_pbes += 1;
|
||||
@@ -2178,9 +2179,10 @@ static int get_encoded_page_size(int pg_sz)
|
||||
|
||||
|
||||
static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
|
||||
struct ib_send_wr *wr)
|
||||
struct ib_send_wr *send_wr)
|
||||
{
|
||||
u64 fbo;
|
||||
struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr);
|
||||
struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
|
||||
struct ocrdma_mr *mr;
|
||||
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
|
||||
@@ -2188,33 +2190,32 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
|
||||
|
||||
wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
|
||||
|
||||
if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
|
||||
if (wr->page_list_len > dev->attr.max_pages_per_frmr)
|
||||
return -EINVAL;
|
||||
|
||||
hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
|
||||
hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
|
||||
|
||||
if (wr->wr.fast_reg.page_list_len == 0)
|
||||
if (wr->page_list_len == 0)
|
||||
BUG();
|
||||
if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
|
||||
if (wr->access_flags & IB_ACCESS_LOCAL_WRITE)
|
||||
hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
|
||||
if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
|
||||
if (wr->access_flags & IB_ACCESS_REMOTE_WRITE)
|
||||
hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
|
||||
if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
|
||||
if (wr->access_flags & IB_ACCESS_REMOTE_READ)
|
||||
hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
|
||||
hdr->lkey = wr->wr.fast_reg.rkey;
|
||||
hdr->total_len = wr->wr.fast_reg.length;
|
||||
hdr->lkey = wr->rkey;
|
||||
hdr->total_len = wr->length;
|
||||
|
||||
fbo = wr->wr.fast_reg.iova_start -
|
||||
(wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
|
||||
fbo = wr->iova_start - (wr->page_list->page_list[0] & PAGE_MASK);
|
||||
|
||||
fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
|
||||
fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
|
||||
fast_reg->va_hi = upper_32_bits(wr->iova_start);
|
||||
fast_reg->va_lo = (u32) (wr->iova_start & 0xffffffff);
|
||||
fast_reg->fbo_hi = upper_32_bits(fbo);
|
||||
fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
|
||||
fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
|
||||
fast_reg->num_sges = wr->page_list_len;
|
||||
fast_reg->size_sge =
|
||||
get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
|
||||
get_encoded_page_size(1 << wr->page_shift);
|
||||
mr = (struct ocrdma_mr *) (unsigned long)
|
||||
dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
|
||||
build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
|
||||
|
||||
@@ -338,12 +338,13 @@ bail:
|
||||
/*
|
||||
* Initialize the memory region specified by the work reqeust.
|
||||
*/
|
||||
int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
|
||||
int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *send_wr)
|
||||
{
|
||||
struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr);
|
||||
struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
|
||||
struct qib_pd *pd = to_ipd(qp->ibqp.pd);
|
||||
struct qib_mregion *mr;
|
||||
u32 rkey = wr->wr.fast_reg.rkey;
|
||||
u32 rkey = wr->rkey;
|
||||
unsigned i, n, m;
|
||||
int ret = -EINVAL;
|
||||
unsigned long flags;
|
||||
@@ -360,22 +361,22 @@ int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
|
||||
if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
|
||||
goto bail;
|
||||
|
||||
if (wr->wr.fast_reg.page_list_len > mr->max_segs)
|
||||
if (wr->page_list_len > mr->max_segs)
|
||||
goto bail;
|
||||
|
||||
ps = 1UL << wr->wr.fast_reg.page_shift;
|
||||
if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
|
||||
ps = 1UL << wr->page_shift;
|
||||
if (wr->length > ps * wr->page_list_len)
|
||||
goto bail;
|
||||
|
||||
mr->user_base = wr->wr.fast_reg.iova_start;
|
||||
mr->iova = wr->wr.fast_reg.iova_start;
|
||||
mr->user_base = wr->iova_start;
|
||||
mr->iova = wr->iova_start;
|
||||
mr->lkey = rkey;
|
||||
mr->length = wr->wr.fast_reg.length;
|
||||
mr->access_flags = wr->wr.fast_reg.access_flags;
|
||||
page_list = wr->wr.fast_reg.page_list->page_list;
|
||||
mr->length = wr->length;
|
||||
mr->access_flags = wr->access_flags;
|
||||
page_list = wr->page_list->page_list;
|
||||
m = 0;
|
||||
n = 0;
|
||||
for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
|
||||
for (i = 0; i < wr->page_list_len; i++) {
|
||||
mr->map[m]->segs[n].vaddr = (void *) page_list[i];
|
||||
mr->map[m]->segs[n].length = ps;
|
||||
if (++n == QIB_SEGSZ) {
|
||||
|
||||
@@ -436,7 +436,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
|
||||
if (qp->ibqp.qp_type == IB_QPT_UD ||
|
||||
qp->ibqp.qp_type == IB_QPT_SMI ||
|
||||
qp->ibqp.qp_type == IB_QPT_GSI)
|
||||
atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
|
||||
atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
|
||||
if (++qp->s_last >= qp->s_size)
|
||||
qp->s_last = 0;
|
||||
}
|
||||
|
||||
@@ -373,10 +373,11 @@ int qib_make_rc_req(struct qib_qp *qp)
|
||||
qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
ohdr->u.rc.reth.vaddr =
|
||||
cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
|
||||
cpu_to_be64(wqe->rdma_wr.remote_addr);
|
||||
ohdr->u.rc.reth.rkey =
|
||||
cpu_to_be32(wqe->wr.wr.rdma.rkey);
|
||||
cpu_to_be32(wqe->rdma_wr.rkey);
|
||||
ohdr->u.rc.reth.length = cpu_to_be32(len);
|
||||
hwords += sizeof(struct ib_reth) / sizeof(u32);
|
||||
wqe->lpsn = wqe->psn;
|
||||
@@ -386,15 +387,15 @@ int qib_make_rc_req(struct qib_qp *qp)
|
||||
len = pmtu;
|
||||
break;
|
||||
}
|
||||
if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
|
||||
if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE)
|
||||
qp->s_state = OP(RDMA_WRITE_ONLY);
|
||||
else {
|
||||
qp->s_state =
|
||||
OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
|
||||
qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
|
||||
/* Immediate data comes after RETH */
|
||||
ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
|
||||
ohdr->u.rc.imm_data =
|
||||
wqe->rdma_wr.wr.ex.imm_data;
|
||||
hwords += 1;
|
||||
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
|
||||
if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED)
|
||||
bth0 |= IB_BTH_SOLICITED;
|
||||
}
|
||||
bth2 |= IB_BTH_REQ_ACK;
|
||||
@@ -424,10 +425,11 @@ int qib_make_rc_req(struct qib_qp *qp)
|
||||
qp->s_next_psn += (len - 1) / pmtu;
|
||||
wqe->lpsn = qp->s_next_psn++;
|
||||
}
|
||||
|
||||
ohdr->u.rc.reth.vaddr =
|
||||
cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
|
||||
cpu_to_be64(wqe->rdma_wr.remote_addr);
|
||||
ohdr->u.rc.reth.rkey =
|
||||
cpu_to_be32(wqe->wr.wr.rdma.rkey);
|
||||
cpu_to_be32(wqe->rdma_wr.rkey);
|
||||
ohdr->u.rc.reth.length = cpu_to_be32(len);
|
||||
qp->s_state = OP(RDMA_READ_REQUEST);
|
||||
hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
|
||||
@@ -455,24 +457,24 @@ int qib_make_rc_req(struct qib_qp *qp)
|
||||
qp->s_lsn++;
|
||||
wqe->lpsn = wqe->psn;
|
||||
}
|
||||
if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
|
||||
qp->s_state = OP(COMPARE_SWAP);
|
||||
ohdr->u.atomic_eth.swap_data = cpu_to_be64(
|
||||
wqe->wr.wr.atomic.swap);
|
||||
wqe->atomic_wr.swap);
|
||||
ohdr->u.atomic_eth.compare_data = cpu_to_be64(
|
||||
wqe->wr.wr.atomic.compare_add);
|
||||
wqe->atomic_wr.compare_add);
|
||||
} else {
|
||||
qp->s_state = OP(FETCH_ADD);
|
||||
ohdr->u.atomic_eth.swap_data = cpu_to_be64(
|
||||
wqe->wr.wr.atomic.compare_add);
|
||||
wqe->atomic_wr.compare_add);
|
||||
ohdr->u.atomic_eth.compare_data = 0;
|
||||
}
|
||||
ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
|
||||
wqe->wr.wr.atomic.remote_addr >> 32);
|
||||
wqe->atomic_wr.remote_addr >> 32);
|
||||
ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
|
||||
wqe->wr.wr.atomic.remote_addr);
|
||||
wqe->atomic_wr.remote_addr);
|
||||
ohdr->u.atomic_eth.rkey = cpu_to_be32(
|
||||
wqe->wr.wr.atomic.rkey);
|
||||
wqe->atomic_wr.rkey);
|
||||
hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
|
||||
ss = NULL;
|
||||
len = 0;
|
||||
@@ -597,9 +599,9 @@ int qib_make_rc_req(struct qib_qp *qp)
|
||||
*/
|
||||
len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
|
||||
ohdr->u.rc.reth.vaddr =
|
||||
cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
|
||||
cpu_to_be64(wqe->rdma_wr.remote_addr + len);
|
||||
ohdr->u.rc.reth.rkey =
|
||||
cpu_to_be32(wqe->wr.wr.rdma.rkey);
|
||||
cpu_to_be32(wqe->rdma_wr.rkey);
|
||||
ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
|
||||
qp->s_state = OP(RDMA_READ_REQUEST);
|
||||
hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
|
||||
|
||||
@@ -459,8 +459,8 @@ again:
|
||||
if (wqe->length == 0)
|
||||
break;
|
||||
if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
|
||||
wqe->wr.wr.rdma.remote_addr,
|
||||
wqe->wr.wr.rdma.rkey,
|
||||
wqe->rdma_wr.remote_addr,
|
||||
wqe->rdma_wr.rkey,
|
||||
IB_ACCESS_REMOTE_WRITE)))
|
||||
goto acc_err;
|
||||
qp->r_sge.sg_list = NULL;
|
||||
@@ -472,8 +472,8 @@ again:
|
||||
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
|
||||
goto inv_err;
|
||||
if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
|
||||
wqe->wr.wr.rdma.remote_addr,
|
||||
wqe->wr.wr.rdma.rkey,
|
||||
wqe->rdma_wr.remote_addr,
|
||||
wqe->rdma_wr.rkey,
|
||||
IB_ACCESS_REMOTE_READ)))
|
||||
goto acc_err;
|
||||
release = 0;
|
||||
@@ -490,18 +490,18 @@ again:
|
||||
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
|
||||
goto inv_err;
|
||||
if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
|
||||
wqe->wr.wr.atomic.remote_addr,
|
||||
wqe->wr.wr.atomic.rkey,
|
||||
wqe->atomic_wr.remote_addr,
|
||||
wqe->atomic_wr.rkey,
|
||||
IB_ACCESS_REMOTE_ATOMIC)))
|
||||
goto acc_err;
|
||||
/* Perform atomic OP and save result. */
|
||||
maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
|
||||
sdata = wqe->wr.wr.atomic.compare_add;
|
||||
sdata = wqe->atomic_wr.compare_add;
|
||||
*(u64 *) sqp->s_sge.sge.vaddr =
|
||||
(wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
|
||||
(wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
|
||||
(u64) atomic64_add_return(sdata, maddr) - sdata :
|
||||
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
|
||||
sdata, wqe->wr.wr.atomic.swap);
|
||||
sdata, wqe->atomic_wr.swap);
|
||||
qib_put_mr(qp->r_sge.sge.mr);
|
||||
qp->r_sge.num_sge = 0;
|
||||
goto send_comp;
|
||||
@@ -785,7 +785,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
|
||||
if (qp->ibqp.qp_type == IB_QPT_UD ||
|
||||
qp->ibqp.qp_type == IB_QPT_SMI ||
|
||||
qp->ibqp.qp_type == IB_QPT_GSI)
|
||||
atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
|
||||
atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
|
||||
|
||||
/* See ch. 11.2.4.1 and 10.7.3.1 */
|
||||
if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
|
||||
|
||||
@@ -129,9 +129,9 @@ int qib_make_uc_req(struct qib_qp *qp)
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
ohdr->u.rc.reth.vaddr =
|
||||
cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
|
||||
cpu_to_be64(wqe->rdma_wr.remote_addr);
|
||||
ohdr->u.rc.reth.rkey =
|
||||
cpu_to_be32(wqe->wr.wr.rdma.rkey);
|
||||
cpu_to_be32(wqe->rdma_wr.rkey);
|
||||
ohdr->u.rc.reth.length = cpu_to_be32(len);
|
||||
hwords += sizeof(struct ib_reth) / 4;
|
||||
if (len > pmtu) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user