You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (81 commits) RDMA/cxgb3: Fix the T3A workaround checks IB/ipath: Remove unnecessary cast IPoIB: Constify seq_operations function pointer tables RDMA/cxgb3: Mark QP as privileged based on user capabilities RDMA/cxgb3: Fix page shift calculation in build_phys_page_list() RDMA/cxgb3: Flush the receive queue when closing IB/ipath: Trivial simplification of ipath_make_ud_req() IB/mthca: Update latest "native Arbel" firmware revision IPoIB: Remove redundant check of netif_queue_stopped() in xmit handler IB/ipath: Add mappings from HW register to PortInfo port physical state IB/ipath: Changes to support PIO bandwidth check on IBA7220 IB/ipath: Minor cleanup of unused fields and chip-specific errors IB/ipath: New sysfs entries to control 7220 features IB/ipath: Add new chip-specific functions to older chips, consistent init IB/ipath: Remove unused MDIO interface code IB/ehca: Prevent RDMA-related connection failures on some eHCA2 hardware IB/ehca: Add "port connection autodetect mode" IB/ehca: Define array to store SMI/GSI QPs IB/ehca: Remove CQ-QP-link before destroying QP in error path of create_qp() IB/iser: Add change_queue_depth method ...
This commit is contained in:
@@ -295,16 +295,6 @@ Who: linuxppc-dev@ozlabs.org
|
||||
|
||||
---------------------------
|
||||
|
||||
What: mthca driver's MSI support
|
||||
When: January 2008
|
||||
Files: drivers/infiniband/hw/mthca/*.[ch]
|
||||
Why: All mthca hardware also supports MSI-X, which provides
|
||||
strictly more functionality than MSI. So there is no point in
|
||||
having both MSI-X and MSI support in the driver.
|
||||
Who: Roland Dreier <rolandd@cisco.com>
|
||||
|
||||
---------------------------
|
||||
|
||||
What: sk98lin network driver
|
||||
When: Feburary 2008
|
||||
Why: In kernel tree version of driver is unmaintained. Sk98lin driver
|
||||
|
||||
+288
-18
File diff suppressed because it is too large
Load Diff
@@ -488,7 +488,8 @@ void rdma_destroy_qp(struct rdma_cm_id *id)
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_destroy_qp);
|
||||
|
||||
static int cma_modify_qp_rtr(struct rdma_id_private *id_priv)
|
||||
static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
|
||||
struct rdma_conn_param *conn_param)
|
||||
{
|
||||
struct ib_qp_attr qp_attr;
|
||||
int qp_attr_mask, ret;
|
||||
@@ -514,13 +515,16 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (conn_param)
|
||||
qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
|
||||
ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
|
||||
out:
|
||||
mutex_unlock(&id_priv->qp_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cma_modify_qp_rts(struct rdma_id_private *id_priv)
|
||||
static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
|
||||
struct rdma_conn_param *conn_param)
|
||||
{
|
||||
struct ib_qp_attr qp_attr;
|
||||
int qp_attr_mask, ret;
|
||||
@@ -536,6 +540,8 @@ static int cma_modify_qp_rts(struct rdma_id_private *id_priv)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (conn_param)
|
||||
qp_attr.max_rd_atomic = conn_param->initiator_depth;
|
||||
ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
|
||||
out:
|
||||
mutex_unlock(&id_priv->qp_mutex);
|
||||
@@ -866,11 +872,11 @@ static int cma_rep_recv(struct rdma_id_private *id_priv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cma_modify_qp_rtr(id_priv);
|
||||
ret = cma_modify_qp_rtr(id_priv, NULL);
|
||||
if (ret)
|
||||
goto reject;
|
||||
|
||||
ret = cma_modify_qp_rts(id_priv);
|
||||
ret = cma_modify_qp_rts(id_priv, NULL);
|
||||
if (ret)
|
||||
goto reject;
|
||||
|
||||
@@ -1122,8 +1128,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
||||
cm_id->cm_handler = cma_ib_handler;
|
||||
|
||||
ret = conn_id->id.event_handler(&conn_id->id, &event);
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
cma_enable_remove(conn_id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Destroy the CM ID by returning a non-zero value. */
|
||||
conn_id->cm_id.ib = NULL;
|
||||
@@ -1262,6 +1270,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
||||
struct net_device *dev = NULL;
|
||||
struct rdma_cm_event event;
|
||||
int ret;
|
||||
struct ib_device_attr attr;
|
||||
|
||||
listen_id = cm_id->context;
|
||||
if (cma_disable_remove(listen_id, CMA_LISTEN))
|
||||
@@ -1311,10 +1320,19 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
||||
sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
|
||||
*sin = iw_event->remote_addr;
|
||||
|
||||
ret = ib_query_device(conn_id->id.device, &attr);
|
||||
if (ret) {
|
||||
cma_enable_remove(conn_id);
|
||||
rdma_destroy_id(new_cm_id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(&event, 0, sizeof event);
|
||||
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
|
||||
event.param.conn.private_data = iw_event->private_data;
|
||||
event.param.conn.private_data_len = iw_event->private_data_len;
|
||||
event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
|
||||
event.param.conn.responder_resources = attr.max_qp_rd_atom;
|
||||
ret = conn_id->id.event_handler(&conn_id->id, &event);
|
||||
if (ret) {
|
||||
/* User wants to destroy the CM ID */
|
||||
@@ -2272,7 +2290,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
|
||||
sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
|
||||
cm_id->remote_addr = *sin;
|
||||
|
||||
ret = cma_modify_qp_rtr(id_priv);
|
||||
ret = cma_modify_qp_rtr(id_priv, conn_param);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -2335,25 +2353,15 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
|
||||
struct rdma_conn_param *conn_param)
|
||||
{
|
||||
struct ib_cm_rep_param rep;
|
||||
struct ib_qp_attr qp_attr;
|
||||
int qp_attr_mask, ret;
|
||||
int ret;
|
||||
|
||||
if (id_priv->id.qp) {
|
||||
ret = cma_modify_qp_rtr(id_priv);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = cma_modify_qp_rtr(id_priv, conn_param);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
qp_attr.qp_state = IB_QPS_RTS;
|
||||
ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr,
|
||||
&qp_attr_mask);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
qp_attr.max_rd_atomic = conn_param->initiator_depth;
|
||||
ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
ret = cma_modify_qp_rts(id_priv, conn_param);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
memset(&rep, 0, sizeof rep);
|
||||
rep.qp_num = id_priv->qp_num;
|
||||
@@ -2378,7 +2386,7 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
|
||||
struct iw_cm_conn_param iw_param;
|
||||
int ret;
|
||||
|
||||
ret = cma_modify_qp_rtr(id_priv);
|
||||
ret = cma_modify_qp_rtr(id_priv, conn_param);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -2598,11 +2606,9 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
|
||||
/* IPv6 address is an SA assigned MGID. */
|
||||
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
|
||||
} else {
|
||||
ip_ib_mc_map(sin->sin_addr.s_addr, mc_map);
|
||||
ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
|
||||
if (id_priv->id.ps == RDMA_PS_UDP)
|
||||
mc_map[7] = 0x01; /* Use RDMA CM signature */
|
||||
mc_map[8] = ib_addr_get_pkey(dev_addr) >> 8;
|
||||
mc_map[9] = (unsigned char) ib_addr_get_pkey(dev_addr);
|
||||
*mgid = *(union ib_gid *) (mc_map + 4);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,7 +139,7 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
|
||||
static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
|
||||
{
|
||||
int ret;
|
||||
struct ib_pool_fmr *fmr;
|
||||
struct ib_pool_fmr *fmr, *next;
|
||||
LIST_HEAD(unmap_list);
|
||||
LIST_HEAD(fmr_list);
|
||||
|
||||
@@ -158,6 +158,20 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* The free_list may hold FMRs that have been put there
|
||||
* because they haven't reached the max_remap count.
|
||||
* Invalidate their mapping as well.
|
||||
*/
|
||||
list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
|
||||
if (fmr->remap_count == 0)
|
||||
continue;
|
||||
hlist_del_init(&fmr->cache_node);
|
||||
fmr->remap_count = 0;
|
||||
list_add_tail(&fmr->fmr->list, &fmr_list);
|
||||
list_move(&fmr->list, &unmap_list);
|
||||
}
|
||||
|
||||
list_splice(&pool->dirty_list, &unmap_list);
|
||||
INIT_LIST_HEAD(&pool->dirty_list);
|
||||
pool->dirty_len = 0;
|
||||
@@ -182,8 +196,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
|
||||
struct ib_fmr_pool *pool = pool_ptr;
|
||||
|
||||
do {
|
||||
if (pool->dirty_len >= pool->dirty_watermark ||
|
||||
atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
|
||||
if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
|
||||
ib_fmr_batch_release(pool);
|
||||
|
||||
atomic_inc(&pool->flush_ser);
|
||||
@@ -194,8 +207,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
|
||||
}
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (pool->dirty_len < pool->dirty_watermark &&
|
||||
atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
|
||||
if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
|
||||
!kthread_should_stop())
|
||||
schedule();
|
||||
__set_current_state(TASK_RUNNING);
|
||||
@@ -369,11 +381,6 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
|
||||
if (fmr->remap_count) {
|
||||
INIT_LIST_HEAD(&fmr_list);
|
||||
list_add_tail(&fmr->fmr->list, &fmr_list);
|
||||
ib_unmap_fmr(&fmr_list);
|
||||
}
|
||||
ib_dealloc_fmr(fmr->fmr);
|
||||
list_del(&fmr->list);
|
||||
kfree(fmr);
|
||||
@@ -511,8 +518,10 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
|
||||
list_add_tail(&fmr->list, &pool->free_list);
|
||||
} else {
|
||||
list_add_tail(&fmr->list, &pool->dirty_list);
|
||||
++pool->dirty_len;
|
||||
wake_up_process(pool->thread);
|
||||
if (++pool->dirty_len >= pool->dirty_watermark) {
|
||||
atomic_inc(&pool->req_ser);
|
||||
wake_up_process(pool->thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -701,7 +701,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
||||
}
|
||||
|
||||
/* Check to post send on QP or process locally */
|
||||
if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD)
|
||||
if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
|
||||
smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
|
||||
goto out;
|
||||
|
||||
local = kmalloc(sizeof *local, GFP_ATOMIC);
|
||||
@@ -752,8 +753,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
||||
port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
|
||||
mad_agent_priv->agent.port_num);
|
||||
if (port_priv) {
|
||||
mad_priv->mad.mad.mad_hdr.tid =
|
||||
((struct ib_mad *)smp)->mad_hdr.tid;
|
||||
memcpy(&mad_priv->mad.mad, smp, sizeof(struct ib_mad));
|
||||
recv_mad_agent = find_mad_agent(port_priv,
|
||||
&mad_priv->mad.mad);
|
||||
}
|
||||
@@ -1100,7 +1100,9 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
|
||||
mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
|
||||
/* Timeout will be updated after send completes */
|
||||
mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
|
||||
mad_send_wr->retries = send_buf->retries;
|
||||
mad_send_wr->max_retries = send_buf->retries;
|
||||
mad_send_wr->retries_left = send_buf->retries;
|
||||
send_buf->retries = 0;
|
||||
/* Reference for work request to QP + response */
|
||||
mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
|
||||
mad_send_wr->status = IB_WC_SUCCESS;
|
||||
@@ -1931,15 +1933,6 @@ local:
|
||||
if (port_priv->device->process_mad) {
|
||||
int ret;
|
||||
|
||||
if (!response) {
|
||||
printk(KERN_ERR PFX "No memory for response MAD\n");
|
||||
/*
|
||||
* Is it better to assume that
|
||||
* it wouldn't be processed ?
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = port_priv->device->process_mad(port_priv->device, 0,
|
||||
port_priv->port_num,
|
||||
wc, &recv->grh,
|
||||
@@ -2282,8 +2275,6 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
|
||||
|
||||
/* Empty wait list to prevent receives from finding a request */
|
||||
list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
|
||||
/* Empty local completion list as well */
|
||||
list_splice_init(&mad_agent_priv->local_list, &cancel_list);
|
||||
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
|
||||
|
||||
/* Report all cancelled requests */
|
||||
@@ -2445,9 +2436,12 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!mad_send_wr->retries--)
|
||||
if (!mad_send_wr->retries_left)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
mad_send_wr->retries_left--;
|
||||
mad_send_wr->send_buf.retries++;
|
||||
|
||||
mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
|
||||
|
||||
if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
|
||||
|
||||
@@ -131,7 +131,8 @@ struct ib_mad_send_wr_private {
|
||||
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
|
||||
__be64 tid;
|
||||
unsigned long timeout;
|
||||
int retries;
|
||||
int max_retries;
|
||||
int retries_left;
|
||||
int retry;
|
||||
int refcount;
|
||||
enum ib_wc_status status;
|
||||
|
||||
@@ -684,7 +684,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
|
||||
|
||||
if (seg_num > mad_send_wr->last_ack) {
|
||||
adjust_last_ack(mad_send_wr, seg_num);
|
||||
mad_send_wr->retries = mad_send_wr->send_buf.retries;
|
||||
mad_send_wr->retries_left = mad_send_wr->max_retries;
|
||||
}
|
||||
mad_send_wr->newwin = newwin;
|
||||
if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
|
||||
|
||||
@@ -73,11 +73,20 @@ struct mcast_device {
|
||||
};
|
||||
|
||||
enum mcast_state {
|
||||
MCAST_IDLE,
|
||||
MCAST_JOINING,
|
||||
MCAST_MEMBER,
|
||||
MCAST_ERROR,
|
||||
};
|
||||
|
||||
enum mcast_group_state {
|
||||
MCAST_IDLE,
|
||||
MCAST_BUSY,
|
||||
MCAST_ERROR
|
||||
MCAST_GROUP_ERROR,
|
||||
MCAST_PKEY_EVENT
|
||||
};
|
||||
|
||||
enum {
|
||||
MCAST_INVALID_PKEY_INDEX = 0xFFFF
|
||||
};
|
||||
|
||||
struct mcast_member;
|
||||
@@ -93,9 +102,10 @@ struct mcast_group {
|
||||
struct mcast_member *last_join;
|
||||
int members[3];
|
||||
atomic_t refcount;
|
||||
enum mcast_state state;
|
||||
enum mcast_group_state state;
|
||||
struct ib_sa_query *query;
|
||||
int query_id;
|
||||
u16 pkey_index;
|
||||
};
|
||||
|
||||
struct mcast_member {
|
||||
@@ -378,9 +388,19 @@ static int fail_join(struct mcast_group *group, struct mcast_member *member,
|
||||
static void process_group_error(struct mcast_group *group)
|
||||
{
|
||||
struct mcast_member *member;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
u16 pkey_index;
|
||||
|
||||
if (group->state == MCAST_PKEY_EVENT)
|
||||
ret = ib_find_pkey(group->port->dev->device,
|
||||
group->port->port_num,
|
||||
be16_to_cpu(group->rec.pkey), &pkey_index);
|
||||
|
||||
spin_lock_irq(&group->lock);
|
||||
if (group->state == MCAST_PKEY_EVENT && !ret &&
|
||||
group->pkey_index == pkey_index)
|
||||
goto out;
|
||||
|
||||
while (!list_empty(&group->active_list)) {
|
||||
member = list_entry(group->active_list.next,
|
||||
struct mcast_member, list);
|
||||
@@ -399,6 +419,7 @@ static void process_group_error(struct mcast_group *group)
|
||||
}
|
||||
|
||||
group->rec.join_state = 0;
|
||||
out:
|
||||
group->state = MCAST_BUSY;
|
||||
spin_unlock_irq(&group->lock);
|
||||
}
|
||||
@@ -415,9 +436,9 @@ static void mcast_work_handler(struct work_struct *work)
|
||||
retest:
|
||||
spin_lock_irq(&group->lock);
|
||||
while (!list_empty(&group->pending_list) ||
|
||||
(group->state == MCAST_ERROR)) {
|
||||
(group->state != MCAST_BUSY)) {
|
||||
|
||||
if (group->state == MCAST_ERROR) {
|
||||
if (group->state != MCAST_BUSY) {
|
||||
spin_unlock_irq(&group->lock);
|
||||
process_group_error(group);
|
||||
goto retest;
|
||||
@@ -494,12 +515,19 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
|
||||
void *context)
|
||||
{
|
||||
struct mcast_group *group = context;
|
||||
u16 pkey_index = MCAST_INVALID_PKEY_INDEX;
|
||||
|
||||
if (status)
|
||||
process_join_error(group, status);
|
||||
else {
|
||||
ib_find_pkey(group->port->dev->device, group->port->port_num,
|
||||
be16_to_cpu(rec->pkey), &pkey_index);
|
||||
|
||||
spin_lock_irq(&group->port->lock);
|
||||
group->rec = *rec;
|
||||
if (group->state == MCAST_BUSY &&
|
||||
group->pkey_index == MCAST_INVALID_PKEY_INDEX)
|
||||
group->pkey_index = pkey_index;
|
||||
if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
|
||||
rb_erase(&group->node, &group->port->table);
|
||||
mcast_insert(group->port, group, 1);
|
||||
@@ -539,6 +567,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
|
||||
|
||||
group->port = port;
|
||||
group->rec.mgid = *mgid;
|
||||
group->pkey_index = MCAST_INVALID_PKEY_INDEX;
|
||||
INIT_LIST_HEAD(&group->pending_list);
|
||||
INIT_LIST_HEAD(&group->active_list);
|
||||
INIT_WORK(&group->work, mcast_work_handler);
|
||||
@@ -707,7 +736,8 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
|
||||
}
|
||||
EXPORT_SYMBOL(ib_init_ah_from_mcmember);
|
||||
|
||||
static void mcast_groups_lost(struct mcast_port *port)
|
||||
static void mcast_groups_event(struct mcast_port *port,
|
||||
enum mcast_group_state state)
|
||||
{
|
||||
struct mcast_group *group;
|
||||
struct rb_node *node;
|
||||
@@ -721,7 +751,8 @@ static void mcast_groups_lost(struct mcast_port *port)
|
||||
atomic_inc(&group->refcount);
|
||||
queue_work(mcast_wq, &group->work);
|
||||
}
|
||||
group->state = MCAST_ERROR;
|
||||
if (group->state != MCAST_GROUP_ERROR)
|
||||
group->state = state;
|
||||
spin_unlock(&group->lock);
|
||||
}
|
||||
spin_unlock_irqrestore(&port->lock, flags);
|
||||
@@ -731,16 +762,20 @@ static void mcast_event_handler(struct ib_event_handler *handler,
|
||||
struct ib_event *event)
|
||||
{
|
||||
struct mcast_device *dev;
|
||||
int index;
|
||||
|
||||
dev = container_of(handler, struct mcast_device, event_handler);
|
||||
index = event->element.port_num - dev->start_port;
|
||||
|
||||
switch (event->event) {
|
||||
case IB_EVENT_PORT_ERR:
|
||||
case IB_EVENT_LID_CHANGE:
|
||||
case IB_EVENT_SM_CHANGE:
|
||||
case IB_EVENT_CLIENT_REREGISTER:
|
||||
mcast_groups_lost(&dev->port[event->element.port_num -
|
||||
dev->start_port]);
|
||||
mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
|
||||
break;
|
||||
case IB_EVENT_PKEY_CHANGE:
|
||||
mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
||||
@@ -59,7 +59,8 @@ extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
|
||||
u8 node_type, int port_num);
|
||||
|
||||
/*
|
||||
* Return 1 if the SMP should be handled by the local SMA/SM via process_mad
|
||||
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
|
||||
* via process_mad
|
||||
*/
|
||||
static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
|
||||
struct ib_device *device)
|
||||
@@ -71,4 +72,19 @@ static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
|
||||
(smp->hop_ptr == smp->hop_cnt + 1)) ?
|
||||
IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
|
||||
* via process_mad
|
||||
*/
|
||||
static inline enum smi_action smi_check_local_returning_smp(struct ib_smp *smp,
|
||||
struct ib_device *device)
|
||||
{
|
||||
/* C14-13:3 -- We're at the end of the DR segment of path */
|
||||
/* C14-13:4 -- Hop Pointer == 0 -> give to SM */
|
||||
return ((device->process_mad &&
|
||||
ib_get_smp_direction(smp) &&
|
||||
!smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD);
|
||||
}
|
||||
|
||||
#endif /* __SMI_H_ */
|
||||
|
||||
@@ -106,6 +106,9 @@ enum {
|
||||
IB_UCM_MAX_DEVICES = 32
|
||||
};
|
||||
|
||||
/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
|
||||
extern struct class cm_class;
|
||||
|
||||
#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
|
||||
|
||||
static void ib_ucm_add_one(struct ib_device *device);
|
||||
@@ -1199,7 +1202,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ib_ucm_release_class_dev(struct class_device *class_dev)
|
||||
static void ucm_release_class_dev(struct class_device *class_dev)
|
||||
{
|
||||
struct ib_ucm_device *dev;
|
||||
|
||||
@@ -1217,11 +1220,6 @@ static const struct file_operations ucm_fops = {
|
||||
.poll = ib_ucm_poll,
|
||||
};
|
||||
|
||||
static struct class ucm_class = {
|
||||
.name = "infiniband_cm",
|
||||
.release = ib_ucm_release_class_dev
|
||||
};
|
||||
|
||||
static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
|
||||
{
|
||||
struct ib_ucm_device *dev;
|
||||
@@ -1257,9 +1255,10 @@ static void ib_ucm_add_one(struct ib_device *device)
|
||||
if (cdev_add(&ucm_dev->dev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1))
|
||||
goto err;
|
||||
|
||||
ucm_dev->class_dev.class = &ucm_class;
|
||||
ucm_dev->class_dev.class = &cm_class;
|
||||
ucm_dev->class_dev.dev = device->dma_device;
|
||||
ucm_dev->class_dev.devt = ucm_dev->dev.dev;
|
||||
ucm_dev->class_dev.release = ucm_release_class_dev;
|
||||
snprintf(ucm_dev->class_dev.class_id, BUS_ID_SIZE, "ucm%d",
|
||||
ucm_dev->devnum);
|
||||
if (class_device_register(&ucm_dev->class_dev))
|
||||
@@ -1306,40 +1305,34 @@ static int __init ib_ucm_init(void)
|
||||
"infiniband_cm");
|
||||
if (ret) {
|
||||
printk(KERN_ERR "ucm: couldn't register device number\n");
|
||||
goto err;
|
||||
goto error1;
|
||||
}
|
||||
|
||||
ret = class_register(&ucm_class);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "ucm: couldn't create class infiniband_cm\n");
|
||||
goto err_chrdev;
|
||||
}
|
||||
|
||||
ret = class_create_file(&ucm_class, &class_attr_abi_version);
|
||||
ret = class_create_file(&cm_class, &class_attr_abi_version);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
|
||||
goto err_class;
|
||||
goto error2;
|
||||
}
|
||||
|
||||
ret = ib_register_client(&ucm_client);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "ucm: couldn't register client\n");
|
||||
goto err_class;
|
||||
goto error3;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_class:
|
||||
class_unregister(&ucm_class);
|
||||
err_chrdev:
|
||||
error3:
|
||||
class_remove_file(&cm_class, &class_attr_abi_version);
|
||||
error2:
|
||||
unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
|
||||
err:
|
||||
error1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ib_ucm_cleanup(void)
|
||||
{
|
||||
ib_unregister_client(&ucm_client);
|
||||
class_unregister(&ucm_class);
|
||||
class_remove_file(&cm_class, &class_attr_abi_version);
|
||||
unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
|
||||
idr_destroy(&ctx_id_table);
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/completion.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/idr.h>
|
||||
@@ -991,6 +992,96 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
|
||||
{
|
||||
/* Acquire mutex's based on pointer comparison to prevent deadlock. */
|
||||
if (file1 < file2) {
|
||||
mutex_lock(&file1->mut);
|
||||
mutex_lock(&file2->mut);
|
||||
} else {
|
||||
mutex_lock(&file2->mut);
|
||||
mutex_lock(&file1->mut);
|
||||
}
|
||||
}
|
||||
|
||||
static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
|
||||
{
|
||||
if (file1 < file2) {
|
||||
mutex_unlock(&file2->mut);
|
||||
mutex_unlock(&file1->mut);
|
||||
} else {
|
||||
mutex_unlock(&file1->mut);
|
||||
mutex_unlock(&file2->mut);
|
||||
}
|
||||
}
|
||||
|
||||
static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
|
||||
{
|
||||
struct ucma_event *uevent, *tmp;
|
||||
|
||||
list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
|
||||
if (uevent->ctx == ctx)
|
||||
list_move_tail(&uevent->list, &file->event_list);
|
||||
}
|
||||
|
||||
static ssize_t ucma_migrate_id(struct ucma_file *new_file,
|
||||
const char __user *inbuf,
|
||||
int in_len, int out_len)
|
||||
{
|
||||
struct rdma_ucm_migrate_id cmd;
|
||||
struct rdma_ucm_migrate_resp resp;
|
||||
struct ucma_context *ctx;
|
||||
struct file *filp;
|
||||
struct ucma_file *cur_file;
|
||||
int ret = 0;
|
||||
|
||||
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
/* Get current fd to protect against it being closed */
|
||||
filp = fget(cmd.fd);
|
||||
if (!filp)
|
||||
return -ENOENT;
|
||||
|
||||
/* Validate current fd and prevent destruction of id. */
|
||||
ctx = ucma_get_ctx(filp->private_data, cmd.id);
|
||||
if (IS_ERR(ctx)) {
|
||||
ret = PTR_ERR(ctx);
|
||||
goto file_put;
|
||||
}
|
||||
|
||||
cur_file = ctx->file;
|
||||
if (cur_file == new_file) {
|
||||
resp.events_reported = ctx->events_reported;
|
||||
goto response;
|
||||
}
|
||||
|
||||
/*
|
||||
* Migrate events between fd's, maintaining order, and avoiding new
|
||||
* events being added before existing events.
|
||||
*/
|
||||
ucma_lock_files(cur_file, new_file);
|
||||
mutex_lock(&mut);
|
||||
|
||||
list_move_tail(&ctx->list, &new_file->ctx_list);
|
||||
ucma_move_events(ctx, new_file);
|
||||
ctx->file = new_file;
|
||||
resp.events_reported = ctx->events_reported;
|
||||
|
||||
mutex_unlock(&mut);
|
||||
ucma_unlock_files(cur_file, new_file);
|
||||
|
||||
response:
|
||||
if (copy_to_user((void __user *)(unsigned long)cmd.response,
|
||||
&resp, sizeof(resp)))
|
||||
ret = -EFAULT;
|
||||
|
||||
ucma_put_ctx(ctx);
|
||||
file_put:
|
||||
fput(filp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
|
||||
const char __user *inbuf,
|
||||
int in_len, int out_len) = {
|
||||
@@ -1012,6 +1103,7 @@ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
|
||||
[RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
|
||||
[RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
|
||||
[RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
|
||||
[RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
|
||||
};
|
||||
|
||||
static ssize_t ucma_write(struct file *filp, const char __user *buf,
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
* Copyright (c) 2004 Topspin Communications. All rights reserved.
|
||||
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
|
||||
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
||||
* Copyright (c) 2008 Cisco. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@@ -42,7 +43,7 @@
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/compat.h>
|
||||
|
||||
@@ -94,7 +95,7 @@ struct ib_umad_port {
|
||||
struct class_device *sm_class_dev;
|
||||
struct semaphore sm_sem;
|
||||
|
||||
struct rw_semaphore mutex;
|
||||
struct mutex file_mutex;
|
||||
struct list_head file_list;
|
||||
|
||||
struct ib_device *ib_dev;
|
||||
@@ -110,11 +111,11 @@ struct ib_umad_device {
|
||||
};
|
||||
|
||||
struct ib_umad_file {
|
||||
struct mutex mutex;
|
||||
struct ib_umad_port *port;
|
||||
struct list_head recv_list;
|
||||
struct list_head send_list;
|
||||
struct list_head port_list;
|
||||
spinlock_t recv_lock;
|
||||
spinlock_t send_lock;
|
||||
wait_queue_head_t recv_wait;
|
||||
struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
|
||||
@@ -156,7 +157,7 @@ static int hdr_size(struct ib_umad_file *file)
|
||||
sizeof (struct ib_user_mad_hdr_old);
|
||||
}
|
||||
|
||||
/* caller must hold port->mutex at least for reading */
|
||||
/* caller must hold file->mutex */
|
||||
static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
|
||||
{
|
||||
return file->agents_dead ? NULL : file->agent[id];
|
||||
@@ -168,32 +169,30 @@ static int queue_packet(struct ib_umad_file *file,
|
||||
{
|
||||
int ret = 1;
|
||||
|
||||
down_read(&file->port->mutex);
|
||||
mutex_lock(&file->mutex);
|
||||
|
||||
for (packet->mad.hdr.id = 0;
|
||||
packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
|
||||
packet->mad.hdr.id++)
|
||||
if (agent == __get_agent(file, packet->mad.hdr.id)) {
|
||||
spin_lock_irq(&file->recv_lock);
|
||||
list_add_tail(&packet->list, &file->recv_list);
|
||||
spin_unlock_irq(&file->recv_lock);
|
||||
wake_up_interruptible(&file->recv_wait);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
up_read(&file->port->mutex);
|
||||
mutex_unlock(&file->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dequeue_send(struct ib_umad_file *file,
|
||||
struct ib_umad_packet *packet)
|
||||
{
|
||||
{
|
||||
spin_lock_irq(&file->send_lock);
|
||||
list_del(&packet->list);
|
||||
spin_unlock_irq(&file->send_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void send_handler(struct ib_mad_agent *agent,
|
||||
struct ib_mad_send_wc *send_wc)
|
||||
@@ -341,10 +340,10 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
|
||||
if (count < hdr_size(file))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irq(&file->recv_lock);
|
||||
mutex_lock(&file->mutex);
|
||||
|
||||
while (list_empty(&file->recv_list)) {
|
||||
spin_unlock_irq(&file->recv_lock);
|
||||
mutex_unlock(&file->mutex);
|
||||
|
||||
if (filp->f_flags & O_NONBLOCK)
|
||||
return -EAGAIN;
|
||||
@@ -353,13 +352,13 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
|
||||
!list_empty(&file->recv_list)))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
spin_lock_irq(&file->recv_lock);
|
||||
mutex_lock(&file->mutex);
|
||||
}
|
||||
|
||||
packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
|
||||
list_del(&packet->list);
|
||||
|
||||
spin_unlock_irq(&file->recv_lock);
|
||||
mutex_unlock(&file->mutex);
|
||||
|
||||
if (packet->recv_wc)
|
||||
ret = copy_recv_mad(file, buf, packet, count);
|
||||
@@ -368,9 +367,9 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
|
||||
|
||||
if (ret < 0) {
|
||||
/* Requeue packet */
|
||||
spin_lock_irq(&file->recv_lock);
|
||||
mutex_lock(&file->mutex);
|
||||
list_add(&packet->list, &file->recv_list);
|
||||
spin_unlock_irq(&file->recv_lock);
|
||||
mutex_unlock(&file->mutex);
|
||||
} else {
|
||||
if (packet->recv_wc)
|
||||
ib_free_recv_mad(packet->recv_wc);
|
||||
@@ -481,7 +480,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
||||
goto err;
|
||||
}
|
||||
|
||||
down_read(&file->port->mutex);
|
||||
mutex_lock(&file->mutex);
|
||||
|
||||
agent = __get_agent(file, packet->mad.hdr.id);
|
||||
if (!agent) {
|
||||
@@ -577,7 +576,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
||||
if (ret)
|
||||
goto err_send;
|
||||
|
||||
up_read(&file->port->mutex);
|
||||
mutex_unlock(&file->mutex);
|
||||
return count;
|
||||
|
||||
err_send:
|
||||
@@ -587,7 +586,7 @@ err_msg:
|
||||
err_ah:
|
||||
ib_destroy_ah(ah);
|
||||
err_up:
|
||||
up_read(&file->port->mutex);
|
||||
mutex_unlock(&file->mutex);
|
||||
err:
|
||||
kfree(packet);
|
||||
return ret;
|
||||
@@ -613,11 +612,12 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
|
||||
{
|
||||
struct ib_user_mad_reg_req ureq;
|
||||
struct ib_mad_reg_req req;
|
||||
struct ib_mad_agent *agent;
|
||||
struct ib_mad_agent *agent = NULL;
|
||||
int agent_id;
|
||||
int ret;
|
||||
|
||||
down_write(&file->port->mutex);
|
||||
mutex_lock(&file->port->file_mutex);
|
||||
mutex_lock(&file->mutex);
|
||||
|
||||
if (!file->port->ib_dev) {
|
||||
ret = -EPIPE;
|
||||
@@ -666,13 +666,13 @@ found:
|
||||
send_handler, recv_handler, file);
|
||||
if (IS_ERR(agent)) {
|
||||
ret = PTR_ERR(agent);
|
||||
agent = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (put_user(agent_id,
|
||||
(u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
|
||||
ret = -EFAULT;
|
||||
ib_unregister_mad_agent(agent);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -690,7 +690,13 @@ found:
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
up_write(&file->port->mutex);
|
||||
mutex_unlock(&file->mutex);
|
||||
|
||||
if (ret && agent)
|
||||
ib_unregister_mad_agent(agent);
|
||||
|
||||
mutex_unlock(&file->port->file_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -703,7 +709,8 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
|
||||
if (get_user(id, arg))
|
||||
return -EFAULT;
|
||||
|
||||
down_write(&file->port->mutex);
|
||||
mutex_lock(&file->port->file_mutex);
|
||||
mutex_lock(&file->mutex);
|
||||
|
||||
if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
|
||||
ret = -EINVAL;
|
||||
@@ -714,11 +721,13 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
|
||||
file->agent[id] = NULL;
|
||||
|
||||
out:
|
||||
up_write(&file->port->mutex);
|
||||
mutex_unlock(&file->mutex);
|
||||
|
||||
if (agent)
|
||||
ib_unregister_mad_agent(agent);
|
||||
|
||||
mutex_unlock(&file->port->file_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -726,12 +735,12 @@ static long ib_umad_enable_pkey(struct ib_umad_file *file)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
down_write(&file->port->mutex);
|
||||
mutex_lock(&file->mutex);
|
||||
if (file->already_used)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
file->use_pkey_index = 1;
|
||||
up_write(&file->port->mutex);
|
||||
mutex_unlock(&file->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -783,7 +792,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
|
||||
if (!port)
|
||||
return -ENXIO;
|
||||
|
||||
down_write(&port->mutex);
|
||||
mutex_lock(&port->file_mutex);
|
||||
|
||||
if (!port->ib_dev) {
|
||||
ret = -ENXIO;
|
||||
@@ -797,7 +806,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_init(&file->recv_lock);
|
||||
mutex_init(&file->mutex);
|
||||
spin_lock_init(&file->send_lock);
|
||||
INIT_LIST_HEAD(&file->recv_list);
|
||||
INIT_LIST_HEAD(&file->send_list);
|
||||
@@ -809,7 +818,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
|
||||
list_add_tail(&file->port_list, &port->file_list);
|
||||
|
||||
out:
|
||||
up_write(&port->mutex);
|
||||
mutex_unlock(&port->file_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -821,7 +830,8 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
|
||||
int already_dead;
|
||||
int i;
|
||||
|
||||
down_write(&file->port->mutex);
|
||||
mutex_lock(&file->port->file_mutex);
|
||||
mutex_lock(&file->mutex);
|
||||
|
||||
already_dead = file->agents_dead;
|
||||
file->agents_dead = 1;
|
||||
@@ -834,14 +844,14 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
|
||||
|
||||
list_del(&file->port_list);
|
||||
|
||||
downgrade_write(&file->port->mutex);
|
||||
mutex_unlock(&file->mutex);
|
||||
|
||||
if (!already_dead)
|
||||
for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
|
||||
if (file->agent[i])
|
||||
ib_unregister_mad_agent(file->agent[i]);
|
||||
|
||||
up_read(&file->port->mutex);
|
||||
mutex_unlock(&file->port->file_mutex);
|
||||
|
||||
kfree(file);
|
||||
kref_put(&dev->ref, ib_umad_release_dev);
|
||||
@@ -914,10 +924,10 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
down_write(&port->mutex);
|
||||
mutex_lock(&port->file_mutex);
|
||||
if (port->ib_dev)
|
||||
ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
|
||||
up_write(&port->mutex);
|
||||
mutex_unlock(&port->file_mutex);
|
||||
|
||||
up(&port->sm_sem);
|
||||
|
||||
@@ -981,7 +991,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
|
||||
port->ib_dev = device;
|
||||
port->port_num = port_num;
|
||||
init_MUTEX(&port->sm_sem);
|
||||
init_rwsem(&port->mutex);
|
||||
mutex_init(&port->file_mutex);
|
||||
INIT_LIST_HEAD(&port->file_list);
|
||||
|
||||
port->dev = cdev_alloc();
|
||||
@@ -1052,6 +1062,7 @@ err_cdev:
|
||||
static void ib_umad_kill_port(struct ib_umad_port *port)
|
||||
{
|
||||
struct ib_umad_file *file;
|
||||
int already_dead;
|
||||
int id;
|
||||
|
||||
class_set_devdata(port->class_dev, NULL);
|
||||
@@ -1067,42 +1078,22 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
|
||||
umad_port[port->dev_num] = NULL;
|
||||
spin_unlock(&port_lock);
|
||||
|
||||
down_write(&port->mutex);
|
||||
mutex_lock(&port->file_mutex);
|
||||
|
||||
port->ib_dev = NULL;
|
||||
|
||||
/*
|
||||
* Now go through the list of files attached to this port and
|
||||
* unregister all of their MAD agents. We need to hold
|
||||
* port->mutex while doing this to avoid racing with
|
||||
* ib_umad_close(), but we can't hold the mutex for writing
|
||||
* while calling ib_unregister_mad_agent(), since that might
|
||||
* deadlock by calling back into queue_packet(). So we
|
||||
* downgrade our lock to a read lock, and then drop and
|
||||
* reacquire the write lock for the next iteration.
|
||||
*
|
||||
* We do list_del_init() on the file's list_head so that the
|
||||
* list_del in ib_umad_close() is still OK, even after the
|
||||
* file is removed from the list.
|
||||
*/
|
||||
while (!list_empty(&port->file_list)) {
|
||||
file = list_entry(port->file_list.next, struct ib_umad_file,
|
||||
port_list);
|
||||
|
||||
list_for_each_entry(file, &port->file_list, port_list) {
|
||||
mutex_lock(&file->mutex);
|
||||
already_dead = file->agents_dead;
|
||||
file->agents_dead = 1;
|
||||
list_del_init(&file->port_list);
|
||||
|
||||
downgrade_write(&port->mutex);
|
||||
mutex_unlock(&file->mutex);
|
||||
|
||||
for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
|
||||
if (file->agent[id])
|
||||
ib_unregister_mad_agent(file->agent[id]);
|
||||
|
||||
up_read(&port->mutex);
|
||||
down_write(&port->mutex);
|
||||
}
|
||||
|
||||
up_write(&port->mutex);
|
||||
mutex_unlock(&port->file_mutex);
|
||||
|
||||
clear_bit(port->dev_num, dev_map);
|
||||
}
|
||||
|
||||
@@ -179,7 +179,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
|
||||
setup.size = 1UL << cq->size_log2;
|
||||
setup.credits = 65535;
|
||||
setup.credit_thres = 1;
|
||||
if (rdev_p->t3cdev_p->type == T3B)
|
||||
if (rdev_p->t3cdev_p->type != T3A)
|
||||
setup.ovfl_mode = 0;
|
||||
else
|
||||
setup.ovfl_mode = 1;
|
||||
@@ -584,7 +584,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
|
||||
{
|
||||
u32 i, nr_wqe, copy_len;
|
||||
u8 *copy_data;
|
||||
u8 wr_len, utx_len; /* lenght in 8 byte flit */
|
||||
u8 wr_len, utx_len; /* length in 8 byte flit */
|
||||
enum t3_wr_flags flag;
|
||||
__be64 *wqe;
|
||||
u64 utx_cmd;
|
||||
|
||||
@@ -315,7 +315,7 @@ struct t3_rdma_init_wr {
|
||||
__be32 ird;
|
||||
__be64 qp_dma_addr; /* 7 */
|
||||
__be32 qp_dma_size; /* 8 */
|
||||
u32 irs;
|
||||
__be32 irs;
|
||||
};
|
||||
|
||||
struct t3_genbit {
|
||||
@@ -324,7 +324,8 @@ struct t3_genbit {
|
||||
};
|
||||
|
||||
enum rdma_init_wr_flags {
|
||||
RECVS_POSTED = 1,
|
||||
RECVS_POSTED = (1<<0),
|
||||
PRIV_QP = (1<<1),
|
||||
};
|
||||
|
||||
union t3_wr {
|
||||
|
||||
@@ -1118,7 +1118,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
status2errno(rpl->status));
|
||||
connect_reply_upcall(ep, status2errno(rpl->status));
|
||||
state_set(&ep->com, DEAD);
|
||||
if (ep->com.tdev->type == T3B && act_open_has_tid(rpl->status))
|
||||
if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
|
||||
release_tid(ep->com.tdev, GET_TID(rpl), NULL);
|
||||
cxgb3_free_atid(ep->com.tdev, ep->atid);
|
||||
dst_release(ep->dst);
|
||||
@@ -1249,7 +1249,7 @@ static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
|
||||
skb_trim(skb, sizeof(struct cpl_tid_release));
|
||||
skb_get(skb);
|
||||
|
||||
if (tdev->type == T3B)
|
||||
if (tdev->type != T3A)
|
||||
release_tid(tdev, hwtid, skb);
|
||||
else {
|
||||
struct cpl_pass_accept_rpl *rpl;
|
||||
|
||||
@@ -122,6 +122,13 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list,
|
||||
*total_size += buffer_list[i].size;
|
||||
if (i > 0)
|
||||
mask |= buffer_list[i].addr;
|
||||
else
|
||||
mask |= buffer_list[i].addr & PAGE_MASK;
|
||||
if (i != num_phys_buf - 1)
|
||||
mask |= buffer_list[i].addr + buffer_list[i].size;
|
||||
else
|
||||
mask |= (buffer_list[i].addr + buffer_list[i].size +
|
||||
PAGE_SIZE - 1) & PAGE_MASK;
|
||||
}
|
||||
|
||||
if (*total_size > 0xFFFFFFFFULL)
|
||||
|
||||
@@ -39,6 +39,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
@@ -645,7 +646,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
if (udata && t3b_device(rhp)) {
|
||||
if (udata && !t3a_device(rhp)) {
|
||||
uresp.pbl_addr = (mhp->attr.pbl_addr -
|
||||
rhp->rdev.rnic_info.pbl_base) >> 3;
|
||||
PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__,
|
||||
@@ -1053,7 +1054,9 @@ static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
|
||||
struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
|
||||
|
||||
PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
|
||||
rtnl_lock();
|
||||
lldev->ethtool_ops->get_drvinfo(lldev, &info);
|
||||
rtnl_unlock();
|
||||
return sprintf(buf, "%s\n", info.fw_version);
|
||||
}
|
||||
|
||||
@@ -1065,7 +1068,9 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
|
||||
struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
|
||||
|
||||
PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
|
||||
rtnl_lock();
|
||||
lldev->ethtool_ops->get_drvinfo(lldev, &info);
|
||||
rtnl_unlock();
|
||||
return sprintf(buf, "%s\n", info.driver);
|
||||
}
|
||||
|
||||
|
||||
@@ -208,36 +208,19 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
|
||||
static int iwch_build_rdma_recv(struct iwch_dev *rhp, union t3_wr *wqe,
|
||||
struct ib_recv_wr *wr)
|
||||
{
|
||||
int i, err = 0;
|
||||
u32 pbl_addr[4];
|
||||
u8 page_size[4];
|
||||
int i;
|
||||
if (wr->num_sge > T3_MAX_SGE)
|
||||
return -EINVAL;
|
||||
err = iwch_sgl2pbl_map(rhp, wr->sg_list, wr->num_sge, pbl_addr,
|
||||
page_size);
|
||||
if (err)
|
||||
return err;
|
||||
wqe->recv.pagesz[0] = page_size[0];
|
||||
wqe->recv.pagesz[1] = page_size[1];
|
||||
wqe->recv.pagesz[2] = page_size[2];
|
||||
wqe->recv.pagesz[3] = page_size[3];
|
||||
wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
|
||||
for (i = 0; i < wr->num_sge; i++) {
|
||||
wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
|
||||
wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
|
||||
|
||||
/* to in the WQE == the offset into the page */
|
||||
wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) %
|
||||
(1UL << (12 + page_size[i])));
|
||||
|
||||
/* pbl_addr is the adapters address in the PBL */
|
||||
wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
|
||||
wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
|
||||
}
|
||||
for (; i < T3_MAX_SGE; i++) {
|
||||
wqe->recv.sgl[i].stag = 0;
|
||||
wqe->recv.sgl[i].len = 0;
|
||||
wqe->recv.sgl[i].to = 0;
|
||||
wqe->recv.pbl_addr[i] = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -659,6 +642,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
|
||||
cxio_flush_rq(&qhp->wq, &rchp->cq, count);
|
||||
spin_unlock(&qhp->lock);
|
||||
spin_unlock_irqrestore(&rchp->lock, *flag);
|
||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
|
||||
|
||||
/* locking heirarchy: cq lock first, then qp lock. */
|
||||
spin_lock_irqsave(&schp->lock, *flag);
|
||||
@@ -668,6 +652,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
|
||||
cxio_flush_sq(&qhp->wq, &schp->cq, count);
|
||||
spin_unlock(&qhp->lock);
|
||||
spin_unlock_irqrestore(&schp->lock, *flag);
|
||||
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
|
||||
|
||||
/* deref */
|
||||
if (atomic_dec_and_test(&qhp->refcnt))
|
||||
@@ -678,7 +663,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
|
||||
|
||||
static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
|
||||
{
|
||||
if (t3b_device(qhp->rhp))
|
||||
if (qhp->ibqp.uobject)
|
||||
cxio_set_wq_in_error(&qhp->wq);
|
||||
else
|
||||
__flush_qp(qhp, flag);
|
||||
@@ -732,6 +717,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
|
||||
init_attr.qp_dma_addr = qhp->wq.dma_addr;
|
||||
init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
|
||||
init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0;
|
||||
init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0;
|
||||
init_attr.irs = qhp->ep->rcv_seq;
|
||||
PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
|
||||
"flags 0x%x qpcaps 0x%x\n", __FUNCTION__,
|
||||
@@ -847,10 +833,11 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
|
||||
disconnect = 1;
|
||||
ep = qhp->ep;
|
||||
}
|
||||
flush_qp(qhp, &flag);
|
||||
break;
|
||||
case IWCH_QP_STATE_TERMINATE:
|
||||
qhp->attr.state = IWCH_QP_STATE_TERMINATE;
|
||||
if (t3b_device(qhp->rhp))
|
||||
if (qhp->ibqp.uobject)
|
||||
cxio_set_wq_in_error(&qhp->wq);
|
||||
if (!internal)
|
||||
terminate = 1;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* IBM eServer eHCA Infiniband device driver for Linux on POWER
|
||||
*
|
||||
* adress vector functions
|
||||
* address vector functions
|
||||
*
|
||||
* Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
|
||||
* Khadija Souissi <souissik@de.ibm.com>
|
||||
|
||||
@@ -94,7 +94,11 @@ struct ehca_sma_attr {
|
||||
|
||||
struct ehca_sport {
|
||||
struct ib_cq *ibcq_aqp1;
|
||||
struct ib_qp *ibqp_aqp1;
|
||||
struct ib_qp *ibqp_sqp[2];
|
||||
/* lock to serialze modify_qp() calls for sqp in normal
|
||||
* and irq path (when event PORT_ACTIVE is received first time)
|
||||
*/
|
||||
spinlock_t mod_sqp_lock;
|
||||
enum ib_port_state port_state;
|
||||
struct ehca_sma_attr saved_attr;
|
||||
};
|
||||
@@ -141,6 +145,14 @@ enum ehca_ext_qp_type {
|
||||
EQPT_SRQ = 3,
|
||||
};
|
||||
|
||||
/* struct to cache modify_qp()'s parms for GSI/SMI qp */
|
||||
struct ehca_mod_qp_parm {
|
||||
int mask;
|
||||
struct ib_qp_attr attr;
|
||||
};
|
||||
|
||||
#define EHCA_MOD_QP_PARM_MAX 4
|
||||
|
||||
struct ehca_qp {
|
||||
union {
|
||||
struct ib_qp ib_qp;
|
||||
@@ -164,10 +176,18 @@ struct ehca_qp {
|
||||
struct ehca_cq *recv_cq;
|
||||
unsigned int sqerr_purgeflag;
|
||||
struct hlist_node list_entries;
|
||||
/* array to cache modify_qp()'s parms for GSI/SMI qp */
|
||||
struct ehca_mod_qp_parm *mod_qp_parm;
|
||||
int mod_qp_parm_idx;
|
||||
/* mmap counter for resources mapped into user space */
|
||||
u32 mm_count_squeue;
|
||||
u32 mm_count_rqueue;
|
||||
u32 mm_count_galpa;
|
||||
/* unsolicited ack circumvention */
|
||||
int unsol_ack_circ;
|
||||
int mtu_shift;
|
||||
u32 message_count;
|
||||
u32 packet_count;
|
||||
};
|
||||
|
||||
#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
|
||||
@@ -323,6 +343,7 @@ extern int ehca_port_act_time;
|
||||
extern int ehca_use_hp_mr;
|
||||
extern int ehca_scaling_code;
|
||||
extern int ehca_lock_hcalls;
|
||||
extern int ehca_nr_ports;
|
||||
|
||||
struct ipzu_queue_resp {
|
||||
u32 qe_size; /* queue entry size */
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user