You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'linux-2.6'
This commit is contained in:
@@ -40,7 +40,6 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
@@ -43,6 +43,8 @@
|
||||
|
||||
#include "core_priv.h"
|
||||
|
||||
#define PFX "fmr_pool: "
|
||||
|
||||
enum {
|
||||
IB_FMR_MAX_REMAPS = 32,
|
||||
|
||||
@@ -150,7 +152,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
|
||||
|
||||
#ifdef DEBUG
|
||||
if (fmr->ref_count !=0) {
|
||||
printk(KERN_WARNING "Unmapping FMR 0x%08x with ref count %d",
|
||||
printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d",
|
||||
fmr, fmr->ref_count);
|
||||
}
|
||||
#endif
|
||||
@@ -168,7 +170,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
|
||||
|
||||
ret = ib_unmap_fmr(&fmr_list);
|
||||
if (ret)
|
||||
printk(KERN_WARNING "ib_unmap_fmr returned %d", ret);
|
||||
printk(KERN_WARNING PFX "ib_unmap_fmr returned %d", ret);
|
||||
|
||||
spin_lock_irq(&pool->pool_lock);
|
||||
list_splice(&unmap_list, &pool->free_list);
|
||||
@@ -226,20 +228,20 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
|
||||
device = pd->device;
|
||||
if (!device->alloc_fmr || !device->dealloc_fmr ||
|
||||
!device->map_phys_fmr || !device->unmap_fmr) {
|
||||
printk(KERN_WARNING "Device %s does not support fast memory regions",
|
||||
printk(KERN_INFO PFX "Device %s does not support FMRs\n",
|
||||
device->name);
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
attr = kmalloc(sizeof *attr, GFP_KERNEL);
|
||||
if (!attr) {
|
||||
printk(KERN_WARNING "couldn't allocate device attr struct");
|
||||
printk(KERN_WARNING PFX "couldn't allocate device attr struct");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = ib_query_device(device, attr);
|
||||
if (ret) {
|
||||
printk(KERN_WARNING "couldn't query device");
|
||||
printk(KERN_WARNING PFX "couldn't query device: %d", ret);
|
||||
kfree(attr);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@@ -253,7 +255,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
|
||||
|
||||
pool = kmalloc(sizeof *pool, GFP_KERNEL);
|
||||
if (!pool) {
|
||||
printk(KERN_WARNING "couldn't allocate pool struct");
|
||||
printk(KERN_WARNING PFX "couldn't allocate pool struct");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@@ -270,7 +272,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
|
||||
kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
|
||||
GFP_KERNEL);
|
||||
if (!pool->cache_bucket) {
|
||||
printk(KERN_WARNING "Failed to allocate cache in pool");
|
||||
printk(KERN_WARNING PFX "Failed to allocate cache in pool");
|
||||
ret = -ENOMEM;
|
||||
goto out_free_pool;
|
||||
}
|
||||
@@ -294,7 +296,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
|
||||
"ib_fmr(%s)",
|
||||
device->name);
|
||||
if (IS_ERR(pool->thread)) {
|
||||
printk(KERN_WARNING "couldn't start cleanup thread");
|
||||
printk(KERN_WARNING PFX "couldn't start cleanup thread");
|
||||
ret = PTR_ERR(pool->thread);
|
||||
goto out_free_pool;
|
||||
}
|
||||
@@ -311,8 +313,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
|
||||
fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64),
|
||||
GFP_KERNEL);
|
||||
if (!fmr) {
|
||||
printk(KERN_WARNING "failed to allocate fmr struct "
|
||||
"for FMR %d", i);
|
||||
printk(KERN_WARNING PFX "failed to allocate fmr "
|
||||
"struct for FMR %d", i);
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
@@ -323,7 +325,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
|
||||
|
||||
fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
|
||||
if (IS_ERR(fmr->fmr)) {
|
||||
printk(KERN_WARNING "fmr_create failed for FMR %d", i);
|
||||
printk(KERN_WARNING PFX "fmr_create failed "
|
||||
"for FMR %d", i);
|
||||
kfree(fmr);
|
||||
goto out_fail;
|
||||
}
|
||||
@@ -378,7 +381,7 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
|
||||
}
|
||||
|
||||
if (i < pool->pool_size)
|
||||
printk(KERN_WARNING "pool still has %d regions registered",
|
||||
printk(KERN_WARNING PFX "pool still has %d regions registered",
|
||||
pool->pool_size - i);
|
||||
|
||||
kfree(pool->cache_bucket);
|
||||
@@ -463,8 +466,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
|
||||
list_add(&fmr->list, &pool->free_list);
|
||||
spin_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
|
||||
printk(KERN_WARNING "fmr_map returns %d\n",
|
||||
result);
|
||||
printk(KERN_WARNING PFX "fmr_map returns %d\n", result);
|
||||
|
||||
return ERR_PTR(result);
|
||||
}
|
||||
@@ -516,7 +518,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
|
||||
|
||||
#ifdef DEBUG
|
||||
if (fmr->ref_count < 0)
|
||||
printk(KERN_WARNING "FMR %p has ref count %d < 0",
|
||||
printk(KERN_WARNING PFX "FMR %p has ref count %d < 0",
|
||||
fmr, fmr->ref_count);
|
||||
#endif
|
||||
|
||||
|
||||
@@ -39,7 +39,6 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
@@ -2771,7 +2771,7 @@ static int ib_mad_port_open(struct ib_device *device,
|
||||
cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
|
||||
port_priv->cq = ib_create_cq(port_priv->device,
|
||||
ib_mad_thread_completion_handler,
|
||||
NULL, port_priv, cq_size);
|
||||
NULL, port_priv, cq_size, 0);
|
||||
if (IS_ERR(port_priv->cq)) {
|
||||
printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
|
||||
ret = PTR_ERR(port_priv->cq);
|
||||
|
||||
@@ -39,7 +39,6 @@
|
||||
|
||||
#include <linux/completion.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <rdma/ib_mad.h>
|
||||
#include <rdma/ib_smi.h>
|
||||
|
||||
@@ -34,7 +34,6 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
|
||||
@@ -40,7 +40,6 @@
|
||||
#include <linux/random.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/idr.h>
|
||||
|
||||
@@ -40,7 +40,6 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/rwsem.h>
|
||||
|
||||
@@ -802,6 +802,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
|
||||
INIT_LIST_HEAD(&obj->async_list);
|
||||
|
||||
cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
|
||||
cmd.comp_vector,
|
||||
file->ucontext, &udata);
|
||||
if (IS_ERR(cq)) {
|
||||
ret = PTR_ERR(cq);
|
||||
|
||||
@@ -752,7 +752,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
|
||||
spin_unlock(&map_lock);
|
||||
|
||||
uverbs_dev->ib_dev = device;
|
||||
uverbs_dev->num_comp_vectors = 1;
|
||||
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
|
||||
|
||||
uverbs_dev->dev = cdev_alloc();
|
||||
if (!uverbs_dev->dev)
|
||||
|
||||
@@ -609,11 +609,11 @@ EXPORT_SYMBOL(ib_destroy_qp);
|
||||
struct ib_cq *ib_create_cq(struct ib_device *device,
|
||||
ib_comp_handler comp_handler,
|
||||
void (*event_handler)(struct ib_event *, void *),
|
||||
void *cq_context, int cqe)
|
||||
void *cq_context, int cqe, int comp_vector)
|
||||
{
|
||||
struct ib_cq *cq;
|
||||
|
||||
cq = device->create_cq(device, cqe, NULL, NULL);
|
||||
cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
|
||||
|
||||
if (!IS_ERR(cq)) {
|
||||
cq->device = device;
|
||||
|
||||
@@ -519,7 +519,7 @@ extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
|
||||
extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
|
||||
extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
|
||||
extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
|
||||
extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify);
|
||||
extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
|
||||
/* CM */
|
||||
extern int c2_llp_connect(struct iw_cm_id *cm_id,
|
||||
|
||||
@@ -217,17 +217,19 @@ int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
|
||||
return npolled;
|
||||
}
|
||||
|
||||
int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
|
||||
{
|
||||
struct c2_mq_shared __iomem *shared;
|
||||
struct c2_cq *cq;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
cq = to_c2cq(ibcq);
|
||||
shared = cq->mq.peer;
|
||||
|
||||
if (notify == IB_CQ_NEXT_COMP)
|
||||
if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
|
||||
writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
|
||||
else if (notify == IB_CQ_SOLICITED)
|
||||
else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
|
||||
writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
|
||||
else
|
||||
return -EINVAL;
|
||||
@@ -241,7 +243,13 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
*/
|
||||
readb(&shared->armed);
|
||||
|
||||
return 0;
|
||||
if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
|
||||
spin_lock_irqsave(&cq->lock, flags);
|
||||
ret = !c2_mq_empty(&cq->mq);
|
||||
spin_unlock_irqrestore(&cq->lock, flags);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
|
||||
|
||||
@@ -290,7 +290,7 @@ static int c2_destroy_qp(struct ib_qp *ib_qp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries,
|
||||
static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
@@ -795,6 +795,7 @@ int c2_register_device(struct c2_dev *dev)
|
||||
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
|
||||
memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
|
||||
dev->ibdev.phys_port_cnt = 1;
|
||||
dev->ibdev.num_comp_vectors = 1;
|
||||
dev->ibdev.dma_device = &dev->pcidev->dev;
|
||||
dev->ibdev.query_device = c2_query_device;
|
||||
dev->ibdev.query_port = c2_query_port;
|
||||
|
||||
@@ -114,7 +114,10 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
#include "firmware_exports.h"
|
||||
|
||||
#define T3_MAX_SGE 4
|
||||
#define T3_MAX_INLINE 64
|
||||
|
||||
#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
|
||||
#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
|
||||
|
||||
@@ -1109,6 +1109,15 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
|
||||
/*
|
||||
* We get 2 abort replies from the HW. The first one must
|
||||
* be ignored except for scribbling that we need one more.
|
||||
*/
|
||||
if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) {
|
||||
ep->flags |= ABORT_REQ_IN_PROGRESS;
|
||||
return CPL_RET_BUF_DONE;
|
||||
}
|
||||
|
||||
close_complete_upcall(ep);
|
||||
state_set(&ep->com, DEAD);
|
||||
release_ep_resources(ep);
|
||||
@@ -1189,6 +1198,7 @@ static int listen_stop(struct iwch_listen_ep *ep)
|
||||
}
|
||||
req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
|
||||
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
|
||||
req->cpu_idx = 0;
|
||||
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid));
|
||||
skb->priority = 1;
|
||||
ep->com.tdev->send(ep->com.tdev, skb);
|
||||
@@ -1475,6 +1485,15 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
int ret;
|
||||
int state;
|
||||
|
||||
/*
|
||||
* We get 2 peer aborts from the HW. The first one must
|
||||
* be ignored except for scribbling that we need one more.
|
||||
*/
|
||||
if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) {
|
||||
ep->flags |= PEER_ABORT_IN_PROGRESS;
|
||||
return CPL_RET_BUF_DONE;
|
||||
}
|
||||
|
||||
if (is_neg_adv_abort(req->status)) {
|
||||
PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep,
|
||||
ep->hwtid);
|
||||
|
||||
@@ -143,6 +143,11 @@ enum iwch_ep_state {
|
||||
DEAD,
|
||||
};
|
||||
|
||||
enum iwch_ep_flags {
|
||||
PEER_ABORT_IN_PROGRESS = (1 << 0),
|
||||
ABORT_REQ_IN_PROGRESS = (1 << 1),
|
||||
};
|
||||
|
||||
struct iwch_ep_common {
|
||||
struct iw_cm_id *cm_id;
|
||||
struct iwch_qp *qp;
|
||||
@@ -181,6 +186,7 @@ struct iwch_ep {
|
||||
u16 plen;
|
||||
u32 ird;
|
||||
u32 ord;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id)
|
||||
|
||||
@@ -139,7 +139,7 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries,
|
||||
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
|
||||
struct ib_ucontext *ib_context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
@@ -292,7 +292,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
||||
{
|
||||
struct iwch_dev *rhp;
|
||||
struct iwch_cq *chp;
|
||||
@@ -303,7 +303,7 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
|
||||
chp = to_iwch_cq(ibcq);
|
||||
rhp = chp->rhp;
|
||||
if (notify == IB_CQ_SOLICITED)
|
||||
if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
|
||||
cq_op = CQ_ARM_SE;
|
||||
else
|
||||
cq_op = CQ_ARM_AN;
|
||||
@@ -317,9 +317,11 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
|
||||
PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr);
|
||||
err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
|
||||
spin_unlock_irqrestore(&chp->lock, flag);
|
||||
if (err)
|
||||
if (err < 0)
|
||||
printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
|
||||
chp->cq.cqid);
|
||||
if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
|
||||
err = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -780,6 +782,9 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
|
||||
if (rqsize > T3_MAX_RQ_SIZE)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (attrs->cap.max_inline_data > T3_MAX_INLINE)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/*
|
||||
* NOTE: The SQ and total WQ sizes don't need to be
|
||||
* a power of two. However, all the code assumes
|
||||
@@ -1107,6 +1112,7 @@ int iwch_register_device(struct iwch_dev *dev)
|
||||
dev->ibdev.node_type = RDMA_NODE_RNIC;
|
||||
memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
|
||||
dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
|
||||
dev->ibdev.num_comp_vectors = 1;
|
||||
dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
|
||||
dev->ibdev.query_device = iwch_query_device;
|
||||
dev->ibdev.query_port = iwch_query_port;
|
||||
|
||||
@@ -471,43 +471,62 @@ int iwch_bind_mw(struct ib_qp *qp,
|
||||
return err;
|
||||
}
|
||||
|
||||
static void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, int tagged)
|
||||
static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
|
||||
u8 *layer_type, u8 *ecode)
|
||||
{
|
||||
switch (t3err) {
|
||||
int status = TPT_ERR_INTERNAL_ERR;
|
||||
int tagged = 0;
|
||||
int opcode = -1;
|
||||
int rqtype = 0;
|
||||
int send_inv = 0;
|
||||
|
||||
if (rsp_msg) {
|
||||
status = CQE_STATUS(rsp_msg->cqe);
|
||||
opcode = CQE_OPCODE(rsp_msg->cqe);
|
||||
rqtype = RQ_TYPE(rsp_msg->cqe);
|
||||
send_inv = (opcode == T3_SEND_WITH_INV) ||
|
||||
(opcode == T3_SEND_WITH_SE_INV);
|
||||
tagged = (opcode == T3_RDMA_WRITE) ||
|
||||
(rqtype && (opcode == T3_READ_RESP));
|
||||
}
|
||||
|
||||
switch (status) {
|
||||
case TPT_ERR_STAG:
|
||||
if (tagged == 1) {
|
||||
*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
|
||||
*ecode = DDPT_INV_STAG;
|
||||
} else if (tagged == 2) {
|
||||
if (send_inv) {
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
|
||||
*ecode = RDMAP_CANT_INV_STAG;
|
||||
} else {
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
*ecode = RDMAP_INV_STAG;
|
||||
}
|
||||
break;
|
||||
case TPT_ERR_PDID:
|
||||
case TPT_ERR_QPID:
|
||||
case TPT_ERR_ACCESS:
|
||||
if (tagged == 1) {
|
||||
*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
|
||||
*ecode = DDPT_STAG_NOT_ASSOC;
|
||||
} else if (tagged == 2) {
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
if ((opcode == T3_SEND_WITH_INV) ||
|
||||
(opcode == T3_SEND_WITH_SE_INV))
|
||||
*ecode = RDMAP_CANT_INV_STAG;
|
||||
else
|
||||
*ecode = RDMAP_STAG_NOT_ASSOC;
|
||||
}
|
||||
break;
|
||||
case TPT_ERR_QPID:
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
*ecode = RDMAP_STAG_NOT_ASSOC;
|
||||
break;
|
||||
case TPT_ERR_ACCESS:
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
*ecode = RDMAP_ACC_VIOL;
|
||||
break;
|
||||
case TPT_ERR_WRAP:
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
*ecode = RDMAP_TO_WRAP;
|
||||
break;
|
||||
case TPT_ERR_BOUND:
|
||||
if (tagged == 1) {
|
||||
if (tagged) {
|
||||
*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
|
||||
*ecode = DDPT_BASE_BOUNDS;
|
||||
} else if (tagged == 2) {
|
||||
} else {
|
||||
*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
|
||||
*ecode = RDMAP_BASE_BOUNDS;
|
||||
} else {
|
||||
*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
|
||||
*ecode = DDPU_MSG_TOOBIG;
|
||||
}
|
||||
break;
|
||||
case TPT_ERR_INVALIDATE_SHARED_MR:
|
||||
@@ -591,8 +610,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
|
||||
{
|
||||
union t3_wr *wqe;
|
||||
struct terminate_message *term;
|
||||
int status;
|
||||
int tagged = 0;
|
||||
struct sk_buff *skb;
|
||||
|
||||
PDBG("%s %d\n", __FUNCTION__, __LINE__);
|
||||
@@ -610,17 +627,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
|
||||
|
||||
/* immediate data starts here. */
|
||||
term = (struct terminate_message *)wqe->send.sgl;
|
||||
if (rsp_msg) {
|
||||
status = CQE_STATUS(rsp_msg->cqe);
|
||||
if (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)
|
||||
tagged = 1;
|
||||
if ((CQE_OPCODE(rsp_msg->cqe) == T3_READ_REQ) ||
|
||||
(CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP))
|
||||
tagged = 2;
|
||||
} else {
|
||||
status = TPT_ERR_INTERNAL_ERR;
|
||||
}
|
||||
build_term_codes(status, &term->layer_etype, &term->ecode, tagged);
|
||||
build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
|
||||
build_fw_riwrh((void *)wqe, T3_WR_SEND,
|
||||
T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 1,
|
||||
qhp->ep->hwtid, 5);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user