Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (42 commits)
  IB/qib: Add missing <linux/slab.h> include
  IB/ehca: Drop unnecessary NULL test
  RDMA/nes: Fix confusing if statement indentation
  IB/ehca: Init irq tasklet before irq can happen
  RDMA/nes: Fix misindented code
  RDMA/nes: Fix showing wqm_quanta
  RDMA/nes: Get rid of "set but not used" variables
  RDMA/nes: Read firmware version from correct place
  IB/srp: Export req_lim via sysfs
  IB/srp: Make receive buffer handling more robust
  IB/srp: Use print_hex_dump()
  IB: Rename RAW_ETY to RAW_ETHERTYPE
  RDMA/nes: Fix two sparse warnings
  RDMA/cxgb3: Make needlessly global iwch_l2t_send() static
  IB/iser: Make needlessly global iser_alloc_rx_descriptors() static
  RDMA/cxgb4: Add timeouts when waiting for FW responses
  IB/qib: Fix race between qib_error_qp() and receive packet processing
  IB/qib: Limit the number of packets processed per interrupt
  IB/qib: Allow writes to the diag_counters to be able to clear them
  IB/qib: Set cfgctxts to number of CPUs by default
  ...
This commit is contained in:
Linus Torvalds
2010-08-07 17:08:02 -07:00
47 changed files with 580 additions and 499 deletions
+6 -4
View File
@@ -2409,10 +2409,12 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
msg_response = CM_MSG_RESPONSE_REP;
break;
case IB_CM_ESTABLISHED:
cm_state = cm_id->state;
lap_state = IB_CM_MRA_LAP_SENT;
msg_response = CM_MSG_RESPONSE_OTHER;
break;
if (cm_id->lap_state == IB_CM_LAP_RCVD) {
cm_state = cm_id->state;
lap_state = IB_CM_MRA_LAP_SENT;
msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
default:
ret = -EINVAL;
goto error1;
-2
View File
@@ -1085,7 +1085,6 @@ err_cdev:
static void ib_umad_kill_port(struct ib_umad_port *port)
{
struct ib_umad_file *file;
int already_dead;
int id;
dev_set_drvdata(port->dev, NULL);
@@ -1103,7 +1102,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
list_for_each_entry(file, &port->file_list, port_list) {
mutex_lock(&file->mutex);
already_dead = file->agents_dead;
file->agents_dead = 1;
mutex_unlock(&file->mutex);
+2 -2
View File
@@ -310,8 +310,8 @@ EXPORT_SYMBOL(ib_create_qp);
static const struct {
int valid;
enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETY + 1];
enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETY + 1];
enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETHERTYPE + 1];
enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETHERTYPE + 1];
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 },
+1 -1
View File
@@ -137,7 +137,7 @@ static void stop_ep_timer(struct iwch_ep *ep)
put_ep(&ep->com);
}
int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e)
{
int error = 0;
struct cxio_rdev *rdev;
+2 -2
View File
@@ -371,7 +371,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
qhp->wq.sq_size_log2);
if (num_wrs <= 0) {
if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
err = -ENOMEM;
goto out;
@@ -554,7 +554,7 @@ int iwch_bind_mw(struct ib_qp *qp,
}
num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
qhp->wq.sq_size_log2);
if ((num_wrs) <= 0) {
if (num_wrs == 0) {
spin_unlock_irqrestore(&qhp->lock, flag);
return -ENOMEM;
}
+50 -41
View File
@@ -61,6 +61,10 @@ static char *states[] = {
NULL,
};
static int dack_mode;
module_param(dack_mode, int, 0644);
MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
int c4iw_max_read_depth = 8;
module_param(c4iw_max_read_depth, int, 0644);
MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
@@ -469,11 +473,12 @@ static int send_connect(struct c4iw_ep *ep)
__func__);
return -ENOMEM;
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
opt0 = KEEP_ALIVE(1) |
DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
L2T_IDX(ep->l2t->idx) |
@@ -780,11 +785,11 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
}
if (ep->com.cm_id) {
PDBG("%s ep %p tid %u status %d\n", __func__, ep,
ep->hwtid, status);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
}
PDBG("%s ep %p tid %u status %d\n", __func__, ep,
ep->hwtid, status);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
if (status < 0) {
ep->com.cm_id->rem_ref(ep->com.cm_id);
ep->com.cm_id = NULL;
@@ -845,8 +850,10 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
INIT_TP_WR(req, ep->hwtid);
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
ep->hwtid));
req->credit_dack = cpu_to_be32(credits);
set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx);
req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
F_RX_DACK_CHANGE |
V_RX_DACK_MODE(dack_mode));
set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
c4iw_ofld_send(&ep->com.dev->rdev, skb);
return credits;
}
@@ -1264,6 +1271,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
wscale = compute_wscale(rcv_win);
opt0 = KEEP_ALIVE(1) |
DELACK(1) |
WND_SCALE(wscale) |
MSS_IDX(mtu_idx) |
L2T_IDX(ep->l2t->idx) |
@@ -1287,7 +1295,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
ep->hwtid));
rpl->opt0 = cpu_to_be64(opt0);
rpl->opt2 = cpu_to_be32(opt2);
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
return;
@@ -1344,7 +1352,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
u16 rss_qid;
u32 mtu;
int step;
int txq_idx;
int txq_idx, ctrlq_idx;
parent_ep = lookup_stid(t, stid);
PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
@@ -1376,6 +1384,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
txq_idx = cxgb4_port_idx(pdev) * step;
ctrlq_idx = cxgb4_port_idx(pdev);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
dev_put(pdev);
@@ -1387,6 +1396,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(dst->neighbour->dev) * step];
@@ -1426,6 +1436,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
child_ep->rss_qid = rss_qid;
child_ep->mtu = mtu;
child_ep->txq_idx = txq_idx;
child_ep->ctrlq_idx = ctrlq_idx;
PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
tx_chan, smac_idx, rss_qid);
@@ -1473,8 +1484,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
int closing = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(hdr);
int start_timer = 0;
int stop_timer = 0;
ep = lookup_tid(t, tid);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1511,7 +1520,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
wake_up(&ep->com.waitq);
break;
case FPDU_MODE:
start_timer = 1;
start_ep_timer(ep);
__state_set(&ep->com, CLOSING);
closing = 1;
peer_close_upcall(ep);
@@ -1524,7 +1533,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
disconnect = 0;
break;
case MORIBUND:
stop_timer = 1;
stop_ep_timer(ep);
if (ep->com.cm_id && ep->com.qp) {
attrs.next_state = C4IW_QP_STATE_IDLE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
@@ -1547,10 +1556,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
}
if (start_timer)
start_ep_timer(ep);
if (stop_timer)
stop_ep_timer(ep);
if (disconnect)
c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
if (release)
@@ -1579,7 +1584,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned long flags;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(req);
int stop_timer = 0;
ep = lookup_tid(t, tid);
if (is_neg_adv_abort(req->status)) {
@@ -1594,10 +1598,10 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
case CONNECTING:
break;
case MPA_REQ_WAIT:
stop_timer = 1;
stop_ep_timer(ep);
break;
case MPA_REQ_SENT:
stop_timer = 1;
stop_ep_timer(ep);
connect_reply_upcall(ep, -ECONNRESET);
break;
case MPA_REP_SENT:
@@ -1621,7 +1625,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
break;
case MORIBUND:
case CLOSING:
stop_timer = 1;
stop_ep_timer(ep);
/*FALLTHROUGH*/
case FPDU_MODE:
if (ep->com.cm_id && ep->com.qp) {
@@ -1667,8 +1671,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
rpl->cmd = CPL_ABORT_NO_RST;
c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
out:
if (stop_timer)
stop_ep_timer(ep);
if (release)
release_ep_resources(ep);
return 0;
@@ -1683,7 +1685,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
int release = 0;
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(rpl);
int stop_timer = 0;
ep = lookup_tid(t, tid);
@@ -1697,7 +1698,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
__state_set(&ep->com, MORIBUND);
break;
case MORIBUND:
stop_timer = 1;
stop_ep_timer(ep);
if ((ep->com.cm_id) && (ep->com.qp)) {
attrs.next_state = C4IW_QP_STATE_IDLE;
c4iw_modify_qp(ep->com.qp->rhp,
@@ -1717,8 +1718,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
break;
}
spin_unlock_irqrestore(&ep->com.lock, flags);
if (stop_timer)
stop_ep_timer(ep);
if (release)
release_ep_resources(ep);
return 0;
@@ -1957,6 +1956,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->txq_idx = cxgb4_port_idx(pdev) * step;
step = ep->com.dev->rdev.lldi.nrxq /
ep->com.dev->rdev.lldi.nchan;
ep->ctrlq_idx = cxgb4_port_idx(pdev);
ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(pdev) * step];
dev_put(pdev);
@@ -1971,6 +1971,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
step = ep->com.dev->rdev.lldi.ntxq /
ep->com.dev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev);
step = ep->com.dev->rdev.lldi.nrxq /
ep->com.dev->rdev.lldi.nchan;
ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
@@ -2049,8 +2050,15 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto fail3;
/* wait for pass_open_rpl */
wait_event(ep->com.waitq, ep->com.rpl_done);
err = ep->com.rpl_err;
wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
if (ep->com.rpl_done)
err = ep->com.rpl_err;
else {
printk(KERN_ERR MOD "Device %s not responding!\n",
pci_name(ep->com.dev->rdev.lldi.pdev));
ep->com.dev->rdev.flags = T4_FATAL_ERROR;
err = -EIO;
}
if (!err) {
cm_id->provider_data = ep;
goto out;
@@ -2079,10 +2087,17 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
err = listen_stop(ep);
if (err)
goto done;
wait_event(ep->com.waitq, ep->com.rpl_done);
wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
if (ep->com.rpl_done)
err = ep->com.rpl_err;
else {
printk(KERN_ERR MOD "Device %s not responding!\n",
pci_name(ep->com.dev->rdev.lldi.pdev));
ep->com.dev->rdev.flags = T4_FATAL_ERROR;
err = -EIO;
}
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
done:
err = ep->com.rpl_err;
cm_id->rem_ref(cm_id);
c4iw_put_ep(&ep->com);
return err;
@@ -2095,8 +2110,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
int close = 0;
int fatal = 0;
struct c4iw_rdev *rdev;
int start_timer = 0;
int stop_timer = 0;
spin_lock_irqsave(&ep->com.lock, flags);
@@ -2120,7 +2133,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
ep->com.state = ABORTING;
else {
ep->com.state = CLOSING;
start_timer = 1;
start_ep_timer(ep);
}
set_bit(CLOSE_SENT, &ep->com.flags);
break;
@@ -2128,7 +2141,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
close = 1;
if (abrupt) {
stop_timer = 1;
stop_ep_timer(ep);
ep->com.state = ABORTING;
} else
ep->com.state = MORIBUND;
@@ -2146,10 +2159,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
}
spin_unlock_irqrestore(&ep->com.lock, flags);
if (start_timer)
start_ep_timer(ep);
if (stop_timer)
stop_ep_timer(ep);
if (close) {
if (abrupt)
ret = abort_connection(ep, NULL, gfp);
@@ -2244,7 +2253,7 @@ static void process_work(struct work_struct *work)
{
struct sk_buff *skb = NULL;
struct c4iw_dev *dev;
struct cpl_act_establish *rpl = cplhdr(skb);
struct cpl_act_establish *rpl;
unsigned int opcode;
int ret;
+2 -2
View File
@@ -43,7 +43,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
int ret;
wr_len = sizeof *res_wr + sizeof *res;
skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
@@ -118,7 +118,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
/* build fw_ri_res_wr */
wr_len = sizeof *res_wr + sizeof *res;
skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto err4;
+1
View File
@@ -619,6 +619,7 @@ struct c4iw_ep {
u16 plen;
u16 rss_qid;
u16 txq_idx;
u16 ctrlq_idx;
u8 tos;
};
+1 -1
View File
@@ -59,7 +59,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
wr_len = roundup(sizeof *req + sizeof *sc +
roundup(copy_len, T4_ULPTX_MIN_IO), 16);
skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+125 -115
View File
@@ -130,7 +130,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
/* build fw_ri_res_wr */
wr_len = sizeof *res_wr + 2 * sizeof *res;
skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto err7;
@@ -162,7 +162,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
V_FW_RI_RES_WR_DCAEN(0) |
V_FW_RI_RES_WR_DCACPU(0) |
V_FW_RI_RES_WR_FBMIN(3) |
V_FW_RI_RES_WR_FBMIN(2) |
V_FW_RI_RES_WR_FBMAX(3) |
V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
V_FW_RI_RES_WR_CIDXFTHRESH(0) |
@@ -185,7 +185,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
V_FW_RI_RES_WR_DCAEN(0) |
V_FW_RI_RES_WR_DCACPU(0) |
V_FW_RI_RES_WR_FBMIN(3) |
V_FW_RI_RES_WR_FBMIN(2) |
V_FW_RI_RES_WR_FBMAX(3) |
V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
V_FW_RI_RES_WR_CIDXFTHRESH(0) |
@@ -235,12 +235,78 @@ err1:
return -ENOMEM;
}
static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
struct ib_send_wr *wr, int max, u32 *plenp)
{
u8 *dstp, *srcp;
u32 plen = 0;
int i;
int rem, len;
dstp = (u8 *)immdp->data;
for (i = 0; i < wr->num_sge; i++) {
if ((plen + wr->sg_list[i].length) > max)
return -EMSGSIZE;
srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
plen += wr->sg_list[i].length;
rem = wr->sg_list[i].length;
while (rem) {
if (dstp == (u8 *)&sq->queue[sq->size])
dstp = (u8 *)sq->queue;
if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
len = rem;
else
len = (u8 *)&sq->queue[sq->size] - dstp;
memcpy(dstp, srcp, len);
dstp += len;
srcp += len;
rem -= len;
}
}
immdp->op = FW_RI_DATA_IMMD;
immdp->r1 = 0;
immdp->r2 = 0;
immdp->immdlen = cpu_to_be32(plen);
*plenp = plen;
return 0;
}
static int build_isgl(__be64 *queue_start, __be64 *queue_end,
struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
int num_sge, u32 *plenp)
{
int i;
u32 plen = 0;
__be64 *flitp = (__be64 *)isglp->sge;
for (i = 0; i < num_sge; i++) {
if ((plen + sg_list[i].length) < plen)
return -EMSGSIZE;
plen += sg_list[i].length;
*flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
sg_list[i].length);
if (++flitp == queue_end)
flitp = queue_start;
*flitp = cpu_to_be64(sg_list[i].addr);
if (++flitp == queue_end)
flitp = queue_start;
}
isglp->op = FW_RI_DATA_ISGL;
isglp->r1 = 0;
isglp->nsge = cpu_to_be16(num_sge);
isglp->r2 = 0;
if (plenp)
*plenp = plen;
return 0;
}
static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
struct ib_send_wr *wr, u8 *len16)
{
u32 plen;
int size;
u8 *datap;
int ret;
if (wr->num_sge > T4_MAX_SEND_SGE)
return -EINVAL;
@@ -267,43 +333,23 @@ static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
default:
return -EINVAL;
}
plen = 0;
if (wr->num_sge) {
if (wr->send_flags & IB_SEND_INLINE) {
datap = (u8 *)wqe->send.u.immd_src[0].data;
for (i = 0; i < wr->num_sge; i++) {
if ((plen + wr->sg_list[i].length) >
T4_MAX_SEND_INLINE) {
return -EMSGSIZE;
}
plen += wr->sg_list[i].length;
memcpy(datap,
(void *)(unsigned long)wr->sg_list[i].addr,
wr->sg_list[i].length);
datap += wr->sg_list[i].length;
}
wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
wqe->send.u.immd_src[0].r1 = 0;
wqe->send.u.immd_src[0].r2 = 0;
wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen);
ret = build_immd(sq, wqe->send.u.immd_src, wr,
T4_MAX_SEND_INLINE, &plen);
if (ret)
return ret;
size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
plen;
} else {
for (i = 0; i < wr->num_sge; i++) {
if ((plen + wr->sg_list[i].length) < plen)
return -EMSGSIZE;
plen += wr->sg_list[i].length;
wqe->send.u.isgl_src[0].sge[i].stag =
cpu_to_be32(wr->sg_list[i].lkey);
wqe->send.u.isgl_src[0].sge[i].len =
cpu_to_be32(wr->sg_list[i].length);
wqe->send.u.isgl_src[0].sge[i].to =
cpu_to_be64(wr->sg_list[i].addr);
}
wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL;
wqe->send.u.isgl_src[0].r1 = 0;
wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge);
wqe->send.u.isgl_src[0].r2 = 0;
ret = build_isgl((__be64 *)sq->queue,
(__be64 *)&sq->queue[sq->size],
wqe->send.u.isgl_src,
wr->sg_list, wr->num_sge, &plen);
if (ret)
return ret;
size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
wr->num_sge * sizeof(struct fw_ri_sge);
}
@@ -313,62 +359,40 @@ static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
wqe->send.u.immd_src[0].r2 = 0;
wqe->send.u.immd_src[0].immdlen = 0;
size = sizeof wqe->send + sizeof(struct fw_ri_immd);
plen = 0;
}
*len16 = DIV_ROUND_UP(size, 16);
wqe->send.plen = cpu_to_be32(plen);
return 0;
}
static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
struct ib_send_wr *wr, u8 *len16)
{
int i;
u32 plen;
int size;
u8 *datap;
int ret;
if (wr->num_sge > T4_MAX_WRITE_SGE)
if (wr->num_sge > T4_MAX_SEND_SGE)
return -EINVAL;
wqe->write.r2 = 0;
wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
plen = 0;
if (wr->num_sge) {
if (wr->send_flags & IB_SEND_INLINE) {
datap = (u8 *)wqe->write.u.immd_src[0].data;
for (i = 0; i < wr->num_sge; i++) {
if ((plen + wr->sg_list[i].length) >
T4_MAX_WRITE_INLINE) {
return -EMSGSIZE;
}
plen += wr->sg_list[i].length;
memcpy(datap,
(void *)(unsigned long)wr->sg_list[i].addr,
wr->sg_list[i].length);
datap += wr->sg_list[i].length;
}
wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
wqe->write.u.immd_src[0].r1 = 0;
wqe->write.u.immd_src[0].r2 = 0;
wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen);
ret = build_immd(sq, wqe->write.u.immd_src, wr,
T4_MAX_WRITE_INLINE, &plen);
if (ret)
return ret;
size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
plen;
} else {
for (i = 0; i < wr->num_sge; i++) {
if ((plen + wr->sg_list[i].length) < plen)
return -EMSGSIZE;
plen += wr->sg_list[i].length;
wqe->write.u.isgl_src[0].sge[i].stag =
cpu_to_be32(wr->sg_list[i].lkey);
wqe->write.u.isgl_src[0].sge[i].len =
cpu_to_be32(wr->sg_list[i].length);
wqe->write.u.isgl_src[0].sge[i].to =
cpu_to_be64(wr->sg_list[i].addr);
}
wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL;
wqe->write.u.isgl_src[0].r1 = 0;
wqe->write.u.isgl_src[0].nsge =
cpu_to_be16(wr->num_sge);
wqe->write.u.isgl_src[0].r2 = 0;
ret = build_isgl((__be64 *)sq->queue,
(__be64 *)&sq->queue[sq->size],
wqe->write.u.isgl_src,
wr->sg_list, wr->num_sge, &plen);
if (ret)
return ret;
size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
wr->num_sge * sizeof(struct fw_ri_sge);
}
@@ -378,6 +402,7 @@ static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
wqe->write.u.immd_src[0].r2 = 0;
wqe->write.u.immd_src[0].immdlen = 0;
size = sizeof wqe->write + sizeof(struct fw_ri_immd);
plen = 0;
}
*len16 = DIV_ROUND_UP(size, 16);
wqe->write.plen = cpu_to_be32(plen);
@@ -416,29 +441,13 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
struct ib_recv_wr *wr, u8 *len16)
{
int i;
int plen = 0;
int ret;
for (i = 0; i < wr->num_sge; i++) {
if ((plen + wr->sg_list[i].length) < plen)
return -EMSGSIZE;
plen += wr->sg_list[i].length;
wqe->recv.isgl.sge[i].stag =
cpu_to_be32(wr->sg_list[i].lkey);
wqe->recv.isgl.sge[i].len =
cpu_to_be32(wr->sg_list[i].length);
wqe->recv.isgl.sge[i].to =
cpu_to_be64(wr->sg_list[i].addr);
}
for (; i < T4_MAX_RECV_SGE; i++) {
wqe->recv.isgl.sge[i].stag = 0;
wqe->recv.isgl.sge[i].len = 0;
wqe->recv.isgl.sge[i].to = 0;
}
wqe->recv.isgl.op = FW_RI_DATA_ISGL;
wqe->recv.isgl.r1 = 0;
wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge);
wqe->recv.isgl.r2 = 0;
ret = build_isgl((__be64 *)qhp->wq.rq.queue,
(__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
&wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
if (ret)
return ret;
*len16 = DIV_ROUND_UP(sizeof wqe->recv +
wr->num_sge * sizeof(struct fw_ri_sge), 16);
return 0;
@@ -547,7 +556,9 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
break;
}
wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx];
wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
fw_flags = 0;
if (wr->send_flags & IB_SEND_SOLICITED)
fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
@@ -564,12 +575,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->opcode = FW_RI_SEND;
else
swsqe->opcode = FW_RI_SEND_WITH_INV;
err = build_rdma_send(wqe, wr, &len16);
err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
break;
case IB_WR_RDMA_WRITE:
fw_opcode = FW_RI_RDMA_WRITE_WR;
swsqe->opcode = FW_RI_RDMA_WRITE;
err = build_rdma_write(wqe, wr, &len16);
err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
break;
case IB_WR_RDMA_READ:
case IB_WR_RDMA_READ_WITH_INV:
@@ -619,8 +630,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->opcode, swsqe->read_len);
wr = wr->next;
num_wrs--;
t4_sq_produce(&qhp->wq);
idx++;
t4_sq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
}
if (t4_wq_db_enabled(&qhp->wq))
t4_ring_sq_db(&qhp->wq, idx);
@@ -656,7 +667,9 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
*bad_wr = wr;
break;
}
wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx];
wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
qhp->wq.rq.wq_pidx *
T4_EQ_ENTRY_SIZE);
if (num_wrs)
err = build_rdma_recv(qhp, wqe, wr, &len16);
else
@@ -675,15 +688,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wqe->recv.r2[1] = 0;
wqe->recv.r2[2] = 0;
wqe->recv.len16 = len16;
if (len16 < 5)
wqe->flits[8] = 0;
PDBG("%s cookie 0x%llx pidx %u\n", __func__,
(unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
t4_rq_produce(&qhp->wq);
t4_rq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
wr = wr->next;
num_wrs--;
idx++;
}
if (t4_wq_db_enabled(&qhp->wq))
t4_ring_rq_db(&qhp->wq, idx);
@@ -951,7 +961,8 @@ static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
__flush_qp(qhp, rchp, schp, flag);
}
static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
struct c4iw_ep *ep)
{
struct fw_ri_wr *wqe;
int ret;
@@ -959,12 +970,12 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
struct sk_buff *skb;
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
qhp->ep->hwtid);
ep->hwtid);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
memset(wqe, 0, sizeof *wqe);
@@ -972,7 +983,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
FW_WR_OP(FW_RI_INIT_WR) |
FW_WR_COMPL(1));
wqe->flowid_len16 = cpu_to_be32(
FW_WR_FLOWID(qhp->ep->hwtid) |
FW_WR_FLOWID(ep->hwtid) |
FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
wqe->cookie = (u64)&wr_wait;
@@ -1035,7 +1046,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
qhp->ep->hwtid);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
@@ -1202,17 +1213,16 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
case C4IW_QP_STATE_CLOSING:
BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
qhp->attr.state = C4IW_QP_STATE_CLOSING;
ep = qhp->ep;
if (!internal) {
abort = 0;
disconnect = 1;
ep = qhp->ep;
c4iw_get_ep(&ep->com);
}
spin_unlock_irqrestore(&qhp->lock, flag);
ret = rdma_fini(rhp, qhp);
ret = rdma_fini(rhp, qhp, ep);
spin_lock_irqsave(&qhp->lock, flag);
if (ret) {
ep = qhp->ep;
c4iw_get_ep(&ep->com);
disconnect = abort = 1;
goto err;
+15 -17
View File
@@ -65,10 +65,10 @@ struct t4_status_page {
u8 db_off;
};
#define T4_EQ_SIZE 64
#define T4_EQ_ENTRY_SIZE 64
#define T4_SQ_NUM_SLOTS 4
#define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS)
#define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
@@ -84,7 +84,7 @@ struct t4_status_page {
#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
#define T4_RQ_NUM_SLOTS 2
#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
#define T4_MAX_RECV_SGE 4
union t4_wr {
@@ -97,20 +97,18 @@ union t4_wr {
struct fw_ri_fr_nsmr_wr fr;
struct fw_ri_inv_lstag_wr inv;
struct t4_status_page status;
__be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
};
union t4_recv_wr {
struct fw_ri_recv_wr recv;
struct t4_status_page status;
__be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
};
static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
enum fw_wr_opcodes opcode, u8 flags, u8 len16)
{
int slots_used;
wqe->send.opcode = (u8)opcode;
wqe->send.flags = flags;
wqe->send.wrid = wrid;
@@ -118,12 +116,6 @@ static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
wqe->send.r1[1] = 0;
wqe->send.r1[2] = 0;
wqe->send.len16 = len16;
slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE);
while (slots_used < T4_SQ_NUM_SLOTS) {
wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0;
slots_used++;
}
}
/* CQE/AE status codes */
@@ -289,6 +281,7 @@ struct t4_sq {
u16 size;
u16 cidx;
u16 pidx;
u16 wq_pidx;
};
struct t4_swrqe {
@@ -310,6 +303,7 @@ struct t4_rq {
u16 size;
u16 cidx;
u16 pidx;
u16 wq_pidx;
};
struct t4_wq {
@@ -340,11 +334,14 @@ static inline u32 t4_rq_avail(struct t4_wq *wq)
return wq->rq.size - 1 - wq->rq.in_use;
}
static inline void t4_rq_produce(struct t4_wq *wq)
static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
{
wq->rq.in_use++;
if (++wq->rq.pidx == wq->rq.size)
wq->rq.pidx = 0;
wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
}
static inline void t4_rq_consume(struct t4_wq *wq)
@@ -370,11 +367,14 @@ static inline u32 t4_sq_avail(struct t4_wq *wq)
return wq->sq.size - 1 - wq->sq.in_use;
}
static inline void t4_sq_produce(struct t4_wq *wq)
static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
{
wq->sq.in_use++;
if (++wq->sq.pidx == wq->sq.size)
wq->sq.pidx = 0;
wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
}
static inline void t4_sq_consume(struct t4_wq *wq)
@@ -386,14 +386,12 @@ static inline void t4_sq_consume(struct t4_wq *wq)
static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
{
inc *= T4_SQ_NUM_SLOTS;
wmb();
writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
}
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
{
inc *= T4_RQ_NUM_SLOTS;
wmb();
writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
}
+10
View File
@@ -826,4 +826,14 @@ struct ulptx_idata {
#define S_ULPTX_NSGE 0
#define M_ULPTX_NSGE 0xFFFF
#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
#define S_RX_DACK_MODE 29
#define M_RX_DACK_MODE 0x3
#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
#define S_RX_DACK_CHANGE 31
#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
#endif /* _T4FW_RI_API_H_ */
+4 -4
View File
@@ -122,21 +122,21 @@ int ehca_create_eq(struct ehca_shca *shca,
/* register interrupt handlers and initialize work queues */
if (type == EHCA_EQ) {
tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
IRQF_DISABLED, "ehca_eq",
(void *)shca);
if (ret < 0)
ehca_err(ib_dev, "Can't map interrupt handler.");
tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
} else if (type == EHCA_NEQ) {
tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
IRQF_DISABLED, "ehca_neq",
(void *)shca);
if (ret < 0)
ehca_err(ib_dev, "Can't map interrupt handler.");
tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
}
eq->is_initialized = 1;
+2 -1
View File
@@ -360,7 +360,8 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
* a firmware property, so it's valid across all adapters
*/
if (ehca_lock_hcalls == -1)
ehca_lock_hcalls = !(shca->hca_cap & HCA_CAP_H_ALLOC_RES_SYNC);
ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC,
shca->hca_cap);
/* translate supported MR page sizes; always support 4K */
shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
-5
View File
@@ -933,11 +933,6 @@ int ehca_unmap_fmr(struct list_head *fmr_list)
/* check all FMR belong to same SHCA, and check internal flag */
list_for_each_entry(ib_fmr, fmr_list, list) {
prev_shca = shca;
if (!ib_fmr) {
ehca_gen_err("bad fmr=%p in list", ib_fmr);
ret = -EINVAL;
goto unmap_fmr_exit0;
}
shca = container_of(ib_fmr->device, struct ehca_shca,
ib_device);
e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
+1 -1
View File
@@ -251,7 +251,7 @@ static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
return ST_UD;
case IB_QPT_RAW_IPV6:
return -EINVAL;
case IB_QPT_RAW_ETY:
case IB_QPT_RAW_ETHERTYPE:
return -EINVAL;
default:
ehca_gen_err("Invalid ibqptype=%x", ibqptype);
+28 -4
View File
@@ -269,6 +269,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
struct ehca_cq *cq,
struct ehca_alloc_cq_parms *param)
{
int rc;
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
@@ -283,8 +284,19 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
param->act_nr_of_entries = (u32)outs[3];
param->act_pages = (u32)outs[4];
if (ret == H_SUCCESS)
hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
if (ret == H_SUCCESS) {
rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
if (rc) {
ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
rc, outs[5]);
ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
cq->ipz_cq_handle.handle, /* r5 */
0, 0, 0, 0, 0);
ret = H_NO_MEM;
}
}
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
@@ -295,6 +307,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_alloc_qp_parms *parms, int is_user)
{
int rc;
u64 ret;
u64 allocate_controls, max_r10_reg, r11, r12;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
@@ -358,8 +371,19 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
parms->rqueue.queue_size =
(u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
if (ret == H_SUCCESS)
hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
if (ret == H_SUCCESS) {
rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
if (rc) {
ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
rc, outs[6]);
ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
parms->qp_handle.handle, /* r5 */
0, 0, 0, 0, 0);
ret = H_NO_MEM;
}
}
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
+5 -6
View File
@@ -42,10 +42,9 @@
#include "ehca_classes.h"
#include "hipz_hw.h"
int hcall_map_page(u64 physaddr, u64 *mapaddr)
u64 hcall_map_page(u64 physaddr)
{
*mapaddr = (u64)(ioremap(physaddr, EHCA_PAGESIZE));
return 0;
return (u64)ioremap(physaddr, EHCA_PAGESIZE);
}
int hcall_unmap_page(u64 mapaddr)
@@ -58,9 +57,9 @@ int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
u64 paddr_kernel, u64 paddr_user)
{
if (!is_user) {
int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle);
if (ret)
return ret;
galpas->kernel.fw_handle = hcall_map_page(paddr_kernel);
if (!galpas->kernel.fw_handle)
return -ENOMEM;
} else
galpas->kernel.fw_handle = 0;
+1 -1
View File
@@ -83,7 +83,7 @@ int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
int hcp_galpas_dtor(struct h_galpas *galpas);
int hcall_map_page(u64 physaddr, u64 * mapaddr);
u64 hcall_map_page(u64 physaddr);
int hcall_unmap_page(u64 mapaddr);
+10 -3
View File
@@ -390,6 +390,8 @@ done:
ipath_enable_armlaunch(dd);
}
static void cleanup_device(struct ipath_devdata *dd);
static int __devinit ipath_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -616,8 +618,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
goto bail;
bail_irqsetup:
if (pdev->irq)
free_irq(pdev->irq, dd);
cleanup_device(dd);
if (dd->ipath_irq)
dd->ipath_f_free_irq(dd);
if (dd->ipath_f_cleanup)
dd->ipath_f_cleanup(dd);
bail_iounmap:
iounmap((volatile void __iomem *) dd->ipath_kregbase);
@@ -635,7 +642,7 @@ bail:
return ret;
}
static void __devexit cleanup_device(struct ipath_devdata *dd)
static void cleanup_device(struct ipath_devdata *dd)
{
int port;
struct ipath_portdata **tmp;

Some files were not shown because too many files have changed in this diff Show More