You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB/core: Use kmemdup() instead of kmalloc()+memcpy() IB/iser: Fix error flow in iser_create_ib_conn_res() IB/iser: Enhance disconnection logic for multi-pathing IB/iser: Remove buggy back-pointer setting IB/iser: Add asynchronous event handler MAINTAINERS: Add cxgb4 and iw_cxgb4 entries RDMA/cxgb3: Shrink .text with compile-time init of handlers arrays IPoIB: Allow disabling/enabling TSO on the fly through ethtool IB/mlx4: Add support for masked atomic operations IB/core: Add support for masked atomic operations RDMA/cma: Randomize local port allocation RDMA/nes: Make unnecessarily global functions static RDMA/nes: Make nesadapter->phy_lock usage consistent RDMA/cxgb4: Add driver for Chelsio T4 RNIC IB/mthca: Use the dma state API instead of pci equivalents RDMA/amso1100: Use the dma state API instead of pci equivalents RDMA/cxgb3: Don't free skbs on NET_XMIT_* indications from LLD RDMA/cxgb3: Use the dma state API instead of pci equivalents IB: Explicitly rule out llseek to avoid BKL in default_llseek()
This commit is contained in:
+14
@@ -1749,6 +1749,20 @@ W: http://www.openfabrics.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/cxgb3/
|
||||
|
||||
CXGB4 ETHERNET DRIVER (CXGB4)
|
||||
M: Dimitris Michailidis <dm@chelsio.com>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.chelsio.com
|
||||
S: Supported
|
||||
F: drivers/net/cxgb4/
|
||||
|
||||
CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
|
||||
M: Steve Wise <swise@chelsio.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
W: http://www.openfabrics.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/cxgb4/
|
||||
|
||||
CYBERPRO FB DRIVER
|
||||
M: Russell King <linux@arm.linux.org.uk>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
|
||||
@@ -46,6 +46,7 @@ source "drivers/infiniband/hw/ipath/Kconfig"
|
||||
source "drivers/infiniband/hw/ehca/Kconfig"
|
||||
source "drivers/infiniband/hw/amso1100/Kconfig"
|
||||
source "drivers/infiniband/hw/cxgb3/Kconfig"
|
||||
source "drivers/infiniband/hw/cxgb4/Kconfig"
|
||||
source "drivers/infiniband/hw/mlx4/Kconfig"
|
||||
source "drivers/infiniband/hw/nes/Kconfig"
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
|
||||
obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
|
||||
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
|
||||
obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
|
||||
obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/
|
||||
obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
|
||||
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
|
||||
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
|
||||
|
||||
@@ -79,7 +79,6 @@ static DEFINE_IDR(sdp_ps);
|
||||
static DEFINE_IDR(tcp_ps);
|
||||
static DEFINE_IDR(udp_ps);
|
||||
static DEFINE_IDR(ipoib_ps);
|
||||
static int next_port;
|
||||
|
||||
struct cma_device {
|
||||
struct list_head list;
|
||||
@@ -1677,13 +1676,13 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
|
||||
if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
|
||||
return -EINVAL;
|
||||
|
||||
id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
|
||||
id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
|
||||
GFP_KERNEL);
|
||||
if (!id->route.path_rec) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
|
||||
id->route.num_paths = num_paths;
|
||||
return 0;
|
||||
err:
|
||||
@@ -1970,47 +1969,33 @@ err1:
|
||||
|
||||
static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
|
||||
{
|
||||
struct rdma_bind_list *bind_list;
|
||||
int port, ret, low, high;
|
||||
|
||||
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
|
||||
if (!bind_list)
|
||||
return -ENOMEM;
|
||||
|
||||
retry:
|
||||
/* FIXME: add proper port randomization per like inet_csk_get_port */
|
||||
do {
|
||||
ret = idr_get_new_above(ps, bind_list, next_port, &port);
|
||||
} while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
|
||||
|
||||
if (ret)
|
||||
goto err1;
|
||||
static unsigned int last_used_port;
|
||||
int low, high, remaining;
|
||||
unsigned int rover;
|
||||
|
||||
inet_get_local_port_range(&low, &high);
|
||||
if (port > high) {
|
||||
if (next_port != low) {
|
||||
idr_remove(ps, port);
|
||||
next_port = low;
|
||||
goto retry;
|
||||
}
|
||||
ret = -EADDRNOTAVAIL;
|
||||
goto err2;
|
||||
remaining = (high - low) + 1;
|
||||
rover = net_random() % remaining + low;
|
||||
retry:
|
||||
if (last_used_port != rover &&
|
||||
!idr_find(ps, (unsigned short) rover)) {
|
||||
int ret = cma_alloc_port(ps, id_priv, rover);
|
||||
/*
|
||||
* Remember previously used port number in order to avoid
|
||||
* re-using same port immediately after it is closed.
|
||||
*/
|
||||
if (!ret)
|
||||
last_used_port = rover;
|
||||
if (ret != -EADDRNOTAVAIL)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (port == high)
|
||||
next_port = low;
|
||||
else
|
||||
next_port = port + 1;
|
||||
|
||||
bind_list->ps = ps;
|
||||
bind_list->port = (unsigned short) port;
|
||||
cma_bind_port(bind_list, id_priv);
|
||||
return 0;
|
||||
err2:
|
||||
idr_remove(ps, port);
|
||||
err1:
|
||||
kfree(bind_list);
|
||||
return ret;
|
||||
if (--remaining) {
|
||||
rover++;
|
||||
if ((rover < low) || (rover > high))
|
||||
rover = low;
|
||||
goto retry;
|
||||
}
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
|
||||
@@ -2995,12 +2980,7 @@ static void cma_remove_one(struct ib_device *device)
|
||||
|
||||
static int __init cma_init(void)
|
||||
{
|
||||
int ret, low, high, remaining;
|
||||
|
||||
get_random_bytes(&next_port, sizeof next_port);
|
||||
inet_get_local_port_range(&low, &high);
|
||||
remaining = (high - low) + 1;
|
||||
next_port = ((unsigned int) next_port % remaining) + low;
|
||||
int ret;
|
||||
|
||||
cma_wq = create_singlethread_workqueue("rdma_cm");
|
||||
if (!cma_wq)
|
||||
|
||||
@@ -291,13 +291,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
|
||||
}
|
||||
|
||||
if (mad_reg_req) {
|
||||
reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
|
||||
reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
|
||||
if (!reg_req) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto error3;
|
||||
}
|
||||
/* Make a copy of the MAD registration request */
|
||||
memcpy(reg_req, mad_reg_req, sizeof *reg_req);
|
||||
}
|
||||
|
||||
/* Now, fill in the various structures */
|
||||
|
||||
@@ -1181,7 +1181,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp)
|
||||
file->filp = filp;
|
||||
file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
|
||||
|
||||
return 0;
|
||||
return nonseekable_open(inode, filp);
|
||||
}
|
||||
|
||||
static int ib_ucm_close(struct inode *inode, struct file *filp)
|
||||
@@ -1229,6 +1229,7 @@ static const struct file_operations ucm_fops = {
|
||||
.release = ib_ucm_close,
|
||||
.write = ib_ucm_write,
|
||||
.poll = ib_ucm_poll,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
@@ -1220,7 +1220,8 @@ static int ucma_open(struct inode *inode, struct file *filp)
|
||||
|
||||
filp->private_data = file;
|
||||
file->filp = filp;
|
||||
return 0;
|
||||
|
||||
return nonseekable_open(inode, filp);
|
||||
}
|
||||
|
||||
static int ucma_close(struct inode *inode, struct file *filp)
|
||||
@@ -1250,6 +1251,7 @@ static const struct file_operations ucma_fops = {
|
||||
.release = ucma_close,
|
||||
.write = ucma_write,
|
||||
.poll = ucma_poll,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static struct miscdevice ucma_misc = {
|
||||
|
||||
@@ -781,7 +781,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct ib_umad_port *port;
|
||||
struct ib_umad_file *file;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
|
||||
if (port)
|
||||
@@ -814,6 +814,8 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
|
||||
|
||||
list_add_tail(&file->port_list, &port->file_list);
|
||||
|
||||
ret = nonseekable_open(inode, filp);
|
||||
|
||||
out:
|
||||
mutex_unlock(&port->file_mutex);
|
||||
return ret;
|
||||
@@ -866,7 +868,8 @@ static const struct file_operations umad_fops = {
|
||||
.compat_ioctl = ib_umad_compat_ioctl,
|
||||
#endif
|
||||
.open = ib_umad_open,
|
||||
.release = ib_umad_close
|
||||
.release = ib_umad_close,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static int ib_umad_sm_open(struct inode *inode, struct file *filp)
|
||||
@@ -903,7 +906,7 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
|
||||
|
||||
filp->private_data = port;
|
||||
|
||||
return 0;
|
||||
return nonseekable_open(inode, filp);
|
||||
|
||||
fail:
|
||||
kref_put(&port->umad_dev->ref, ib_umad_release_dev);
|
||||
@@ -933,7 +936,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
|
||||
static const struct file_operations umad_sm_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ib_umad_sm_open,
|
||||
.release = ib_umad_sm_close
|
||||
.release = ib_umad_sm_close,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static struct ib_client umad_client = {
|
||||
|
||||
@@ -369,7 +369,8 @@ static const struct file_operations uverbs_event_fops = {
|
||||
.read = ib_uverbs_event_read,
|
||||
.poll = ib_uverbs_event_poll,
|
||||
.release = ib_uverbs_event_close,
|
||||
.fasync = ib_uverbs_event_fasync
|
||||
.fasync = ib_uverbs_event_fasync,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
|
||||
@@ -623,7 +624,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
|
||||
|
||||
filp->private_data = file;
|
||||
|
||||
return 0;
|
||||
return nonseekable_open(inode, filp);
|
||||
|
||||
err_module:
|
||||
module_put(dev->ib_dev->owner);
|
||||
@@ -651,7 +652,8 @@ static const struct file_operations uverbs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.write = ib_uverbs_write,
|
||||
.open = ib_uverbs_open,
|
||||
.release = ib_uverbs_close
|
||||
.release = ib_uverbs_close,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static const struct file_operations uverbs_mmap_fops = {
|
||||
@@ -659,7 +661,8 @@ static const struct file_operations uverbs_mmap_fops = {
|
||||
.write = ib_uverbs_write,
|
||||
.mmap = ib_uverbs_mmap,
|
||||
.open = ib_uverbs_open,
|
||||
.release = ib_uverbs_close
|
||||
.release = ib_uverbs_close,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static struct ib_client uverbs_client = {
|
||||
|
||||
@@ -250,7 +250,7 @@ struct c2_array {
|
||||
struct sp_chunk {
|
||||
struct sp_chunk *next;
|
||||
dma_addr_t dma_addr;
|
||||
DECLARE_PCI_UNMAP_ADDR(mapping);
|
||||
DEFINE_DMA_UNMAP_ADDR(mapping);
|
||||
u16 head;
|
||||
u16 shared_ptr[0];
|
||||
};
|
||||
|
||||
@@ -49,7 +49,7 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
|
||||
return -ENOMEM;
|
||||
|
||||
new_head->dma_addr = dma_addr;
|
||||
pci_unmap_addr_set(new_head, mapping, new_head->dma_addr);
|
||||
dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
|
||||
|
||||
new_head->next = NULL;
|
||||
new_head->head = 0;
|
||||
@@ -81,7 +81,7 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
|
||||
while (root) {
|
||||
next = root->next;
|
||||
dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
|
||||
pci_unmap_addr(root, mapping));
|
||||
dma_unmap_addr(root, mapping));
|
||||
root = next;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -257,7 +257,7 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
|
||||
static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
|
||||
{
|
||||
dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
|
||||
mq->msg_pool.host, pci_unmap_addr(mq, mapping));
|
||||
mq->msg_pool.host, dma_unmap_addr(mq, mapping));
|
||||
}
|
||||
|
||||
static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
|
||||
@@ -278,7 +278,7 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
|
||||
NULL, /* peer (currently unknown) */
|
||||
C2_MQ_HOST_TARGET);
|
||||
|
||||
pci_unmap_addr_set(mq, mapping, mq->host_dma);
|
||||
dma_unmap_addr_set(mq, mapping, mq->host_dma);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ struct c2_mq {
|
||||
u8 __iomem *adapter;
|
||||
} msg_pool;
|
||||
dma_addr_t host_dma;
|
||||
DECLARE_PCI_UNMAP_ADDR(mapping);
|
||||
DEFINE_DMA_UNMAP_ADDR(mapping);
|
||||
u16 hint_count;
|
||||
u16 priv;
|
||||
struct c2_mq_shared __iomem *peer;
|
||||
|
||||
@@ -50,7 +50,7 @@
|
||||
|
||||
struct c2_buf_list {
|
||||
void *buf;
|
||||
DECLARE_PCI_UNMAP_ADDR(mapping)
|
||||
DEFINE_DMA_UNMAP_ADDR(mapping);
|
||||
};
|
||||
|
||||
|
||||
|
||||
@@ -524,7 +524,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
|
||||
err = -ENOMEM;
|
||||
goto bail1;
|
||||
}
|
||||
pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
|
||||
dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
|
||||
pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
|
||||
(unsigned long long) c2dev->rep_vq.host_dma);
|
||||
c2_mq_rep_init(&c2dev->rep_vq,
|
||||
@@ -545,7 +545,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
|
||||
err = -ENOMEM;
|
||||
goto bail2;
|
||||
}
|
||||
pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
|
||||
dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
|
||||
pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
|
||||
(unsigned long long) c2dev->aeq.host_dma);
|
||||
c2_mq_rep_init(&c2dev->aeq,
|
||||
@@ -596,11 +596,11 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
|
||||
bail3:
|
||||
dma_free_coherent(&c2dev->pcidev->dev,
|
||||
c2dev->aeq.q_size * c2dev->aeq.msg_size,
|
||||
q2_pages, pci_unmap_addr(&c2dev->aeq, mapping));
|
||||
q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
|
||||
bail2:
|
||||
dma_free_coherent(&c2dev->pcidev->dev,
|
||||
c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
|
||||
q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping));
|
||||
q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
|
||||
bail1:
|
||||
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
|
||||
bail0:
|
||||
@@ -637,13 +637,13 @@ void __devexit c2_rnic_term(struct c2_dev *c2dev)
|
||||
dma_free_coherent(&c2dev->pcidev->dev,
|
||||
c2dev->aeq.q_size * c2dev->aeq.msg_size,
|
||||
c2dev->aeq.msg_pool.host,
|
||||
pci_unmap_addr(&c2dev->aeq, mapping));
|
||||
dma_unmap_addr(&c2dev->aeq, mapping));
|
||||
|
||||
/* Free the verbs reply queue */
|
||||
dma_free_coherent(&c2dev->pcidev->dev,
|
||||
c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
|
||||
c2dev->rep_vq.msg_pool.host,
|
||||
pci_unmap_addr(&c2dev->rep_vq, mapping));
|
||||
dma_unmap_addr(&c2dev->rep_vq, mapping));
|
||||
|
||||
/* Free the MQ shared pointer pool */
|
||||
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
|
||||
|
||||
@@ -174,7 +174,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
|
||||
kfree(cq->sw_queue);
|
||||
return -ENOMEM;
|
||||
}
|
||||
pci_unmap_addr_set(cq, mapping, cq->dma_addr);
|
||||
dma_unmap_addr_set(cq, mapping, cq->dma_addr);
|
||||
memset(cq->queue, 0, size);
|
||||
setup.id = cq->cqid;
|
||||
setup.base_addr = (u64) (cq->dma_addr);
|
||||
@@ -297,7 +297,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
|
||||
goto err4;
|
||||
|
||||
memset(wq->queue, 0, depth * sizeof(union t3_wr));
|
||||
pci_unmap_addr_set(wq, mapping, wq->dma_addr);
|
||||
dma_unmap_addr_set(wq, mapping, wq->dma_addr);
|
||||
wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
|
||||
if (!kernel_domain)
|
||||
wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
|
||||
@@ -325,7 +325,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
|
||||
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
|
||||
(1UL << (cq->size_log2))
|
||||
* sizeof(struct t3_cqe), cq->queue,
|
||||
pci_unmap_addr(cq, mapping));
|
||||
dma_unmap_addr(cq, mapping));
|
||||
cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
|
||||
return err;
|
||||
}
|
||||
@@ -336,7 +336,7 @@ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
|
||||
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
|
||||
(1UL << (wq->size_log2))
|
||||
* sizeof(union t3_wr), wq->queue,
|
||||
pci_unmap_addr(wq, mapping));
|
||||
dma_unmap_addr(wq, mapping));
|
||||
kfree(wq->sq);
|
||||
cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
|
||||
kfree(wq->rq);
|
||||
@@ -537,7 +537,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
|
||||
dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
|
||||
rdev_p->ctrl_qp.dma_addr);
|
||||
rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
|
||||
memset(rdev_p->ctrl_qp.workq, 0,
|
||||
@@ -583,7 +583,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
|
||||
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
|
||||
(1UL << T3_CTRL_QP_SIZE_LOG2)
|
||||
* sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
|
||||
pci_unmap_addr(&rdev_p->ctrl_qp, mapping));
|
||||
dma_unmap_addr(&rdev_p->ctrl_qp, mapping));
|
||||
return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
|
||||
}
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ struct cxio_hal_ctrl_qp {
|
||||
wait_queue_head_t waitq;/* wait for RspQ/CQE msg */
|
||||
union t3_wr *workq; /* the work request queue */
|
||||
dma_addr_t dma_addr; /* pci bus address of the workq */
|
||||
DECLARE_PCI_UNMAP_ADDR(mapping)
|
||||
DEFINE_DMA_UNMAP_ADDR(mapping);
|
||||
void __iomem *doorbell;
|
||||
};
|
||||
|
||||
|
||||
@@ -691,7 +691,7 @@ struct t3_swrq {
|
||||
struct t3_wq {
|
||||
union t3_wr *queue; /* DMA accessable memory */
|
||||
dma_addr_t dma_addr; /* DMA address for HW */
|
||||
DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */
|
||||
DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
|
||||
u32 error; /* 1 once we go to ERROR */
|
||||
u32 qpid;
|
||||
u32 wptr; /* idx to next available WR slot */
|
||||
@@ -718,7 +718,7 @@ struct t3_cq {
|
||||
u32 wptr;
|
||||
u32 size_log2;
|
||||
dma_addr_t dma_addr;
|
||||
DECLARE_PCI_UNMAP_ADDR(mapping)
|
||||
DEFINE_DMA_UNMAP_ADDR(mapping);
|
||||
struct t3_cqe *queue;
|
||||
struct t3_cqe *sw_queue;
|
||||
u32 sw_rptr;
|
||||
|
||||
@@ -47,8 +47,6 @@ MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
|
||||
cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
|
||||
|
||||
static void open_rnic_dev(struct t3cdev *);
|
||||
static void close_rnic_dev(struct t3cdev *);
|
||||
static void iwch_event_handler(struct t3cdev *, u32, u32);
|
||||
|
||||
@@ -102,12 +102,9 @@ static unsigned int cong_flavor = 1;
|
||||
module_param(cong_flavor, uint, 0644);
|
||||
MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
|
||||
|
||||
static void process_work(struct work_struct *work);
|
||||
static struct workqueue_struct *workq;
|
||||
static DECLARE_WORK(skb_work, process_work);
|
||||
|
||||
static struct sk_buff_head rxq;
|
||||
static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
|
||||
|
||||
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
|
||||
static void ep_timeout(unsigned long arg);
|
||||
@@ -151,7 +148,7 @@ int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2
|
||||
return -EIO;
|
||||
}
|
||||
error = l2t_send(tdev, skb, l2e);
|
||||
if (error)
|
||||
if (error < 0)
|
||||
kfree_skb(skb);
|
||||
return error;
|
||||
}
|
||||
@@ -167,7 +164,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
|
||||
return -EIO;
|
||||
}
|
||||
error = cxgb3_ofld_send(tdev, skb);
|
||||
if (error)
|
||||
if (error < 0)
|
||||
kfree_skb(skb);
|
||||
return error;
|
||||
}
|
||||
@@ -302,27 +299,6 @@ static void release_ep_resources(struct iwch_ep *ep)
|
||||
put_ep(&ep->com);
|
||||
}
|
||||
|
||||
static void process_work(struct work_struct *work)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
void *ep;
|
||||
struct t3cdev *tdev;
|
||||
int ret;
|
||||
|
||||
while ((skb = skb_dequeue(&rxq))) {
|
||||
ep = *((void **) (skb->cb));
|
||||
tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
|
||||
ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
|
||||
if (ret & CPL_RET_BUF_DONE)
|
||||
kfree_skb(skb);
|
||||
|
||||
/*
|
||||
* ep was referenced in sched(), and is freed here.
|
||||
*/
|
||||
put_ep((struct iwch_ep_common *)ep);
|
||||
}
|
||||
}
|
||||
|
||||
static int status2errno(int status)
|
||||
{
|
||||
switch (status) {
|
||||
@@ -2157,7 +2133,49 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
|
||||
|
||||
/*
|
||||
* All the CM events are handled on a work queue to have a safe context.
|
||||
* These are the real handlers that are called from the work queue.
|
||||
*/
|
||||
static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
|
||||
[CPL_ACT_ESTABLISH] = act_establish,
|
||||
[CPL_ACT_OPEN_RPL] = act_open_rpl,
|
||||
[CPL_RX_DATA] = rx_data,
|
||||
[CPL_TX_DMA_ACK] = tx_ack,
|
||||
[CPL_ABORT_RPL_RSS] = abort_rpl,
|
||||
[CPL_ABORT_RPL] = abort_rpl,
|
||||
[CPL_PASS_OPEN_RPL] = pass_open_rpl,
|
||||
[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
|
||||
[CPL_PASS_ACCEPT_REQ] = pass_accept_req,
|
||||
[CPL_PASS_ESTABLISH] = pass_establish,
|
||||
[CPL_PEER_CLOSE] = peer_close,
|
||||
[CPL_ABORT_REQ_RSS] = peer_abort,
|
||||
[CPL_CLOSE_CON_RPL] = close_con_rpl,
|
||||
[CPL_RDMA_TERMINATE] = terminate,
|
||||
[CPL_RDMA_EC_STATUS] = ec_status,
|
||||
};
|
||||
|
||||
static void process_work(struct work_struct *work)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
void *ep;
|
||||
struct t3cdev *tdev;
|
||||
int ret;
|
||||
|
||||
while ((skb = skb_dequeue(&rxq))) {
|
||||
ep = *((void **) (skb->cb));
|
||||
tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
|
||||
ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
|
||||
if (ret & CPL_RET_BUF_DONE)
|
||||
kfree_skb(skb);
|
||||
|
||||
/*
|
||||
* ep was referenced in sched(), and is freed here.
|
||||
*/
|
||||
put_ep((struct iwch_ep_common *)ep);
|
||||
}
|
||||
}
|
||||
|
||||
static DECLARE_WORK(skb_work, process_work);
|
||||
|
||||
static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
{
|
||||
struct iwch_ep_common *epc = ctx;
|
||||
@@ -2189,6 +2207,29 @@ static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
return CPL_RET_BUF_DONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* All upcalls from the T3 Core go to sched() to schedule the
|
||||
* processing on a work queue.
|
||||
*/
|
||||
cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
|
||||
[CPL_ACT_ESTABLISH] = sched,
|
||||
[CPL_ACT_OPEN_RPL] = sched,
|
||||
[CPL_RX_DATA] = sched,
|
||||
[CPL_TX_DMA_ACK] = sched,
|
||||
[CPL_ABORT_RPL_RSS] = sched,
|
||||
[CPL_ABORT_RPL] = sched,
|
||||
[CPL_PASS_OPEN_RPL] = sched,
|
||||
[CPL_CLOSE_LISTSRV_RPL] = sched,
|
||||
[CPL_PASS_ACCEPT_REQ] = sched,
|
||||
[CPL_PASS_ESTABLISH] = sched,
|
||||
[CPL_PEER_CLOSE] = sched,
|
||||
[CPL_CLOSE_CON_RPL] = sched,
|
||||
[CPL_ABORT_REQ_RSS] = sched,
|
||||
[CPL_RDMA_TERMINATE] = sched,
|
||||
[CPL_RDMA_EC_STATUS] = sched,
|
||||
[CPL_SET_TCB_RPL] = set_tcb_rpl,
|
||||
};
|
||||
|
||||
int __init iwch_cm_init(void)
|
||||
{
|
||||
skb_queue_head_init(&rxq);
|
||||
@@ -2197,46 +2238,6 @@ int __init iwch_cm_init(void)
|
||||
if (!workq)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* All upcalls from the T3 Core go to sched() to
|
||||
* schedule the processing on a work queue.
|
||||
*/
|
||||
t3c_handlers[CPL_ACT_ESTABLISH] = sched;
|
||||
t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
|
||||
t3c_handlers[CPL_RX_DATA] = sched;
|
||||
t3c_handlers[CPL_TX_DMA_ACK] = sched;
|
||||
t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
|
||||
t3c_handlers[CPL_ABORT_RPL] = sched;
|
||||
t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
|
||||
t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
|
||||
t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
|
||||
t3c_handlers[CPL_PASS_ESTABLISH] = sched;
|
||||
t3c_handlers[CPL_PEER_CLOSE] = sched;
|
||||
t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
|
||||
t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
|
||||
t3c_handlers[CPL_RDMA_TERMINATE] = sched;
|
||||
t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
|
||||
t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
|
||||
|
||||
/*
|
||||
* These are the real handlers that are called from a
|
||||
* work queue.
|
||||
*/
|
||||
work_handlers[CPL_ACT_ESTABLISH] = act_establish;
|
||||
work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
|
||||
work_handlers[CPL_RX_DATA] = rx_data;
|
||||
work_handlers[CPL_TX_DMA_ACK] = tx_ack;
|
||||
work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
|
||||
work_handlers[CPL_ABORT_RPL] = abort_rpl;
|
||||
work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
|
||||
work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
|
||||
work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
|
||||
work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
|
||||
work_handlers[CPL_PEER_CLOSE] = peer_close;
|
||||
work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
|
||||
work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
|
||||
work_handlers[CPL_RDMA_TERMINATE] = terminate;
|
||||
work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user