mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"A usual cycle for RDMA with a typical mix of driver and core subsystem
updates:
- Driver minor changes and bug fixes for mlx5, efa, rxe, vmw_pvrdma,
hns, usnic, qib, qedr, cxgb4, hns, bnxt_re
- Various rtrs fixes and updates
- Bug fix for mlx4 CM emulation for virtualization scenarios where
MRA wasn't working right
- Use tracepoints instead of pr_debug in the CM code
- Scrub the locking in ucma and cma to close more syzkaller bugs
- Use tasklet_setup in the subsystem
- Revert the idea that 'destroy' operations are not allowed to fail
at the driver level. This proved unworkable from a HW perspective.
- Revise how the umem API works so drivers make fewer mistakes using
it
- XRC support for qedr
- Convert uverbs objects RWQ and MW to new the allocation scheme
- Large queue entry sizes for hns
- Use hmm_range_fault() for mlx5 On Demand Paging
- uverbs APIs to inspect the GID table instead of sysfs
- Move some of the RDMA code for building large page SGLs into
lib/scatterlist"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (191 commits)
RDMA/ucma: Fix use after free in destroy id flow
RDMA/rxe: Handle skb_clone() failure in rxe_recv.c
RDMA/rxe: Move the definitions for rxe_av.network_type to uAPI
RDMA: Explicitly pass in the dma_device to ib_register_device
lib/scatterlist: Do not limit max_segment to PAGE_ALIGNED values
IB/mlx4: Convert rej_tmout radix-tree to XArray
RDMA/rxe: Fix bug rejecting all multicast packets
RDMA/rxe: Fix skb lifetime in rxe_rcv_mcast_pkt()
RDMA/rxe: Remove duplicate entries in struct rxe_mr
IB/hfi,rdmavt,qib,opa_vnic: Update MAINTAINERS
IB/rdmavt: Fix sizeof mismatch
MAINTAINERS: CISCO VIC LOW LATENCY NIC DRIVER
RDMA/bnxt_re: Fix sizeof mismatch for allocation of pbl_tbl.
RDMA/bnxt_re: Use rdma_umem_for_each_dma_block()
RDMA/umem: Move to allocate SG table from pages
lib/scatterlist: Add support in dynamic allocation of SG table from pages
tools/testing/scatterlist: Show errors in human readable form
tools/testing/scatterlist: Rejuvenate bit-rotten test
RDMA/ipoib: Set rtnl_link_ops for ipoib interfaces
RDMA/uverbs: Expose the new GID query API to user space
...
This commit is contained in:
@@ -429,6 +429,7 @@ ForEachMacros:
|
||||
- 'rbtree_postorder_for_each_entry_safe'
|
||||
- 'rdma_for_each_block'
|
||||
- 'rdma_for_each_port'
|
||||
- 'rdma_umem_for_each_dma_block'
|
||||
- 'resource_list_for_each_entry'
|
||||
- 'resource_list_for_each_entry_safe'
|
||||
- 'rhl_for_each_entry_rcu'
|
||||
|
||||
@@ -258,23 +258,6 @@ Description:
|
||||
userspace ABI compatibility of umad & issm devices.
|
||||
|
||||
|
||||
What: /sys/class/infiniband_cm/ucmN/ibdev
|
||||
Date: Oct, 2005
|
||||
KernelVersion: v2.6.14
|
||||
Contact: linux-rdma@vger.kernel.org
|
||||
Description:
|
||||
(RO) Display Infiniband (IB) device name
|
||||
|
||||
|
||||
What: /sys/class/infiniband_cm/abi_version
|
||||
Date: Oct, 2005
|
||||
KernelVersion: v2.6.14
|
||||
Contact: linux-rdma@vger.kernel.org
|
||||
Description:
|
||||
(RO) Value is incremented if any changes are made that break
|
||||
userspace ABI compatibility of ucm devices.
|
||||
|
||||
|
||||
What: /sys/class/infiniband_verbs/uverbsN/ibdev
|
||||
What: /sys/class/infiniband_verbs/uverbsN/abi_version
|
||||
Date: Sept, 2005
|
||||
|
||||
17
MAINTAINERS
17
MAINTAINERS
@@ -4256,7 +4256,6 @@ F: drivers/net/ethernet/cisco/enic/
|
||||
CISCO VIC LOW LATENCY NIC DRIVER
|
||||
M: Christian Benvenuti <benve@cisco.com>
|
||||
M: Nelson Escobar <neescoba@cisco.com>
|
||||
M: Parvi Kaustubhi <pkaustub@cisco.com>
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/usnic/
|
||||
|
||||
@@ -7793,8 +7792,8 @@ F: include/linux/cciss*.h
|
||||
F: include/uapi/linux/cciss*.h
|
||||
|
||||
HFI1 DRIVER
|
||||
M: Mike Marciniszyn <mike.marciniszyn@intel.com>
|
||||
M: Dennis Dalessandro <dennis.dalessandro@intel.com>
|
||||
M: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
|
||||
M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/hfi1
|
||||
@@ -12999,8 +12998,8 @@ S: Maintained
|
||||
F: drivers/char/hw_random/optee-rng.c
|
||||
|
||||
OPA-VNIC DRIVER
|
||||
M: Dennis Dalessandro <dennis.dalessandro@intel.com>
|
||||
M: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
|
||||
M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
|
||||
M: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/ulp/opa_vnic
|
||||
@@ -14301,8 +14300,8 @@ F: drivers/firmware/qemu_fw_cfg.c
|
||||
F: include/uapi/linux/qemu_fw_cfg.h
|
||||
|
||||
QIB DRIVER
|
||||
M: Dennis Dalessandro <dennis.dalessandro@intel.com>
|
||||
M: Mike Marciniszyn <mike.marciniszyn@intel.com>
|
||||
M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
|
||||
M: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/qib/
|
||||
@@ -14727,8 +14726,8 @@ S: Maintained
|
||||
F: drivers/net/ethernet/rdc/r6040.c
|
||||
|
||||
RDMAVT - RDMA verbs software
|
||||
M: Dennis Dalessandro <dennis.dalessandro@intel.com>
|
||||
M: Mike Marciniszyn <mike.marciniszyn@intel.com>
|
||||
M: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
|
||||
M: Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/sw/rdmavt
|
||||
|
||||
@@ -806,30 +806,27 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
|
||||
struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
|
||||
struct page **pages, unsigned int nr_pages)
|
||||
{
|
||||
struct sg_table *sg = NULL;
|
||||
struct sg_table *sg;
|
||||
struct scatterlist *sge;
|
||||
size_t max_segment = 0;
|
||||
int ret;
|
||||
|
||||
sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
|
||||
if (!sg) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (!sg)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (dev)
|
||||
max_segment = dma_max_mapping_size(dev->dev);
|
||||
if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
|
||||
max_segment = SCATTERLIST_MAX_SEGMENT;
|
||||
ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
|
||||
sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
|
||||
nr_pages << PAGE_SHIFT,
|
||||
max_segment, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
max_segment,
|
||||
NULL, 0, GFP_KERNEL);
|
||||
if (IS_ERR(sge)) {
|
||||
kfree(sg);
|
||||
sg = ERR_CAST(sge);
|
||||
}
|
||||
return sg;
|
||||
out:
|
||||
kfree(sg);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_prime_pages_to_sg);
|
||||
|
||||
|
||||
@@ -403,6 +403,7 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
|
||||
unsigned int max_segment = i915_sg_segment_size();
|
||||
struct sg_table *st;
|
||||
unsigned int sg_page_sizes;
|
||||
struct scatterlist *sg;
|
||||
int ret;
|
||||
|
||||
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
||||
@@ -410,13 +411,12 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
alloc_table:
|
||||
ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
|
||||
0, num_pages << PAGE_SHIFT,
|
||||
max_segment,
|
||||
GFP_KERNEL);
|
||||
if (ret) {
|
||||
sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
|
||||
num_pages << PAGE_SHIFT, max_segment,
|
||||
NULL, 0, GFP_KERNEL);
|
||||
if (IS_ERR(sg)) {
|
||||
kfree(st);
|
||||
return ERR_PTR(ret);
|
||||
return ERR_CAST(sg);
|
||||
}
|
||||
|
||||
ret = i915_gem_gtt_prepare_pages(obj, st);
|
||||
|
||||
@@ -432,6 +432,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
||||
int ret = 0;
|
||||
static size_t sgl_size;
|
||||
static size_t sgt_size;
|
||||
struct scatterlist *sg;
|
||||
|
||||
if (vmw_tt->mapped)
|
||||
return 0;
|
||||
@@ -454,13 +455,15 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = __sg_alloc_table_from_pages
|
||||
(&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
|
||||
(unsigned long) vsgt->num_pages << PAGE_SHIFT,
|
||||
dma_get_max_seg_size(dev_priv->dev->dev),
|
||||
GFP_KERNEL);
|
||||
if (unlikely(ret != 0))
|
||||
sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
|
||||
vsgt->num_pages, 0,
|
||||
(unsigned long) vsgt->num_pages << PAGE_SHIFT,
|
||||
dma_get_max_seg_size(dev_priv->dev->dev),
|
||||
NULL, 0, GFP_KERNEL);
|
||||
if (IS_ERR(sg)) {
|
||||
ret = PTR_ERR(sg);
|
||||
goto out_sg_alloc_fail;
|
||||
}
|
||||
|
||||
if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
|
||||
uint64_t over_alloc =
|
||||
|
||||
@@ -48,6 +48,7 @@ config INFINIBAND_ON_DEMAND_PAGING
|
||||
depends on INFINIBAND_USER_MEM
|
||||
select MMU_NOTIFIER
|
||||
select INTERVAL_TREE
|
||||
select HMM_MIRROR
|
||||
default y
|
||||
help
|
||||
On demand paging support for the InfiniBand subsystem.
|
||||
|
||||
@@ -17,7 +17,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
|
||||
ib_core-$(CONFIG_SECURITY_INFINIBAND) += security.o
|
||||
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
|
||||
|
||||
ib_cm-y := cm.o
|
||||
ib_cm-y := cm.o cm_trace.o
|
||||
|
||||
iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o
|
||||
|
||||
|
||||
@@ -647,13 +647,12 @@ static void process_one_req(struct work_struct *_work)
|
||||
req->callback = NULL;
|
||||
|
||||
spin_lock_bh(&lock);
|
||||
/*
|
||||
* Although the work will normally have been canceled by the workqueue,
|
||||
* it can still be requeued as long as it is on the req_list.
|
||||
*/
|
||||
cancel_delayed_work(&req->work);
|
||||
if (!list_empty(&req->list)) {
|
||||
/*
|
||||
* Although the work will normally have been canceled by the
|
||||
* workqueue, it can still be requeued as long as it is on the
|
||||
* req_list.
|
||||
*/
|
||||
cancel_delayed_work(&req->work);
|
||||
list_del_init(&req->list);
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
@@ -133,7 +133,11 @@ static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
|
||||
}
|
||||
|
||||
static const char * const gid_type_str[] = {
|
||||
/* IB/RoCE v1 value is set for IB_GID_TYPE_IB and IB_GID_TYPE_ROCE for
|
||||
* user space compatibility reasons.
|
||||
*/
|
||||
[IB_GID_TYPE_IB] = "IB/RoCE v1",
|
||||
[IB_GID_TYPE_ROCE] = "IB/RoCE v1",
|
||||
[IB_GID_TYPE_ROCE_UDP_ENCAP] = "RoCE v2",
|
||||
};
|
||||
|
||||
@@ -1220,7 +1224,7 @@ EXPORT_SYMBOL(ib_get_cached_port_state);
|
||||
const struct ib_gid_attr *
|
||||
rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index)
|
||||
{
|
||||
const struct ib_gid_attr *attr = ERR_PTR(-EINVAL);
|
||||
const struct ib_gid_attr *attr = ERR_PTR(-ENODATA);
|
||||
struct ib_gid_table *table;
|
||||
unsigned long flags;
|
||||
|
||||
@@ -1243,6 +1247,67 @@ done:
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_get_gid_attr);
|
||||
|
||||
/**
|
||||
* rdma_query_gid_table - Reads GID table entries of all the ports of a device up to max_entries.
|
||||
* @device: The device to query.
|
||||
* @entries: Entries where GID entries are returned.
|
||||
* @max_entries: Maximum number of entries that can be returned.
|
||||
* Entries array must be allocated to hold max_entries number of entries.
|
||||
* @num_entries: Updated to the number of entries that were successfully read.
|
||||
*
|
||||
* Returns number of entries on success or appropriate error code.
|
||||
*/
|
||||
ssize_t rdma_query_gid_table(struct ib_device *device,
|
||||
struct ib_uverbs_gid_entry *entries,
|
||||
size_t max_entries)
|
||||
{
|
||||
const struct ib_gid_attr *gid_attr;
|
||||
ssize_t num_entries = 0, ret;
|
||||
struct ib_gid_table *table;
|
||||
unsigned int port_num, i;
|
||||
struct net_device *ndev;
|
||||
unsigned long flags;
|
||||
|
||||
rdma_for_each_port(device, port_num) {
|
||||
if (!rdma_ib_or_roce(device, port_num))
|
||||
continue;
|
||||
|
||||
table = rdma_gid_table(device, port_num);
|
||||
read_lock_irqsave(&table->rwlock, flags);
|
||||
for (i = 0; i < table->sz; i++) {
|
||||
if (!is_gid_entry_valid(table->data_vec[i]))
|
||||
continue;
|
||||
if (num_entries >= max_entries) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
gid_attr = &table->data_vec[i]->attr;
|
||||
|
||||
memcpy(&entries->gid, &gid_attr->gid,
|
||||
sizeof(gid_attr->gid));
|
||||
entries->gid_index = gid_attr->index;
|
||||
entries->port_num = gid_attr->port_num;
|
||||
entries->gid_type = gid_attr->gid_type;
|
||||
ndev = rcu_dereference_protected(
|
||||
gid_attr->ndev,
|
||||
lockdep_is_held(&table->rwlock));
|
||||
if (ndev)
|
||||
entries->netdev_ifindex = ndev->ifindex;
|
||||
|
||||
num_entries++;
|
||||
entries++;
|
||||
}
|
||||
read_unlock_irqrestore(&table->rwlock, flags);
|
||||
}
|
||||
|
||||
return num_entries;
|
||||
err:
|
||||
read_unlock_irqrestore(&table->rwlock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_query_gid_table);
|
||||
|
||||
/**
|
||||
* rdma_put_gid_attr - Release reference to the GID attribute
|
||||
* @attr: Pointer to the GID attribute whose reference
|
||||
@@ -1299,7 +1364,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
|
||||
struct ib_gid_table_entry *entry =
|
||||
container_of(attr, struct ib_gid_table_entry, attr);
|
||||
struct ib_device *device = entry->attr.device;
|
||||
struct net_device *ndev = ERR_PTR(-ENODEV);
|
||||
struct net_device *ndev = ERR_PTR(-EINVAL);
|
||||
u8 port_num = entry->attr.port_num;
|
||||
struct ib_gid_table *table;
|
||||
unsigned long flags;
|
||||
@@ -1311,8 +1376,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
|
||||
valid = is_gid_entry_valid(table->data_vec[attr->index]);
|
||||
if (valid) {
|
||||
ndev = rcu_dereference(attr->ndev);
|
||||
if (!ndev ||
|
||||
(ndev && ((READ_ONCE(ndev->flags) & IFF_UP) == 0)))
|
||||
if (!ndev)
|
||||
ndev = ERR_PTR(-ENODEV);
|
||||
}
|
||||
read_unlock_irqrestore(&table->rwlock, flags);
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include <rdma/ib_cm.h>
|
||||
#include "cm_msgs.h"
|
||||
#include "core_priv.h"
|
||||
#include "cm_trace.h"
|
||||
|
||||
MODULE_AUTHOR("Sean Hefty");
|
||||
MODULE_DESCRIPTION("InfiniBand CM");
|
||||
@@ -201,7 +202,6 @@ static struct attribute *cm_counter_default_attrs[] = {
|
||||
struct cm_port {
|
||||
struct cm_device *cm_dev;
|
||||
struct ib_mad_agent *mad_agent;
|
||||
struct kobject port_obj;
|
||||
u8 port_num;
|
||||
struct list_head cm_priv_prim_list;
|
||||
struct list_head cm_priv_altr_list;
|
||||
@@ -1563,6 +1563,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
|
||||
cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
|
||||
cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
|
||||
|
||||
trace_icm_send_req(&cm_id_priv->id);
|
||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||
ret = ib_post_send_mad(cm_id_priv->msg, NULL);
|
||||
if (ret) {
|
||||
@@ -1610,6 +1611,9 @@ static int cm_issue_rej(struct cm_port *port,
|
||||
IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
|
||||
}
|
||||
|
||||
trace_icm_issue_rej(
|
||||
IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
|
||||
IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
if (ret)
|
||||
cm_free_msg(msg);
|
||||
@@ -1961,6 +1965,7 @@ static void cm_dup_req_handler(struct cm_work *work,
|
||||
}
|
||||
spin_unlock_irq(&cm_id_priv->lock);
|
||||
|
||||
trace_icm_send_dup_req(&cm_id_priv->id);
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
if (ret)
|
||||
goto free;
|
||||
@@ -2124,8 +2129,7 @@ static int cm_req_handler(struct cm_work *work)
|
||||
|
||||
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
|
||||
if (!listen_cm_id_priv) {
|
||||
pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
|
||||
be32_to_cpu(cm_id_priv->id.local_id));
|
||||
trace_icm_no_listener_err(&cm_id_priv->id);
|
||||
cm_id_priv->id.state = IB_CM_IDLE;
|
||||
ret = -EINVAL;
|
||||
goto destroy;
|
||||
@@ -2274,8 +2278,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
|
||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||
if (cm_id->state != IB_CM_REQ_RCVD &&
|
||||
cm_id->state != IB_CM_MRA_REQ_SENT) {
|
||||
pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
|
||||
be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
|
||||
trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -2289,6 +2292,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
|
||||
msg->timeout_ms = cm_id_priv->timeout_ms;
|
||||
msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
|
||||
|
||||
trace_icm_send_rep(cm_id);
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||
@@ -2348,8 +2352,7 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id,
|
||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||
if (cm_id->state != IB_CM_REP_RCVD &&
|
||||
cm_id->state != IB_CM_MRA_REP_SENT) {
|
||||
pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
|
||||
be32_to_cpu(cm_id->local_id), cm_id->state);
|
||||
trace_icm_send_cm_rtu_err(cm_id);
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
@@ -2361,6 +2364,7 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id,
|
||||
cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
|
||||
private_data, private_data_len);
|
||||
|
||||
trace_icm_send_rtu(cm_id);
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||
@@ -2442,6 +2446,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
|
||||
goto unlock;
|
||||
spin_unlock_irq(&cm_id_priv->lock);
|
||||
|
||||
trace_icm_send_dup_rep(&cm_id_priv->id);
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
if (ret)
|
||||
goto free;
|
||||
@@ -2465,7 +2470,7 @@ static int cm_rep_handler(struct cm_work *work)
|
||||
cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
|
||||
if (!cm_id_priv) {
|
||||
cm_dup_rep_handler(work);
|
||||
pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
|
||||
trace_icm_remote_no_priv_err(
|
||||
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -2479,11 +2484,10 @@ static int cm_rep_handler(struct cm_work *work)
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
pr_debug(
|
||||
"%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
|
||||
__func__, cm_id_priv->id.state,
|
||||
trace_icm_rep_unknown_err(
|
||||
IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
|
||||
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
|
||||
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
|
||||
cm_id_priv->id.state);
|
||||
spin_unlock_irq(&cm_id_priv->lock);
|
||||
goto error;
|
||||
}
|
||||
@@ -2500,7 +2504,7 @@ static int cm_rep_handler(struct cm_work *work)
|
||||
spin_unlock(&cm.lock);
|
||||
spin_unlock_irq(&cm_id_priv->lock);
|
||||
ret = -EINVAL;
|
||||
pr_debug("%s: Failed to insert remote id %d\n", __func__,
|
||||
trace_icm_insert_failed_err(
|
||||
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
|
||||
goto error;
|
||||
}
|
||||
@@ -2517,9 +2521,8 @@ static int cm_rep_handler(struct cm_work *work)
|
||||
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
|
||||
NULL, 0);
|
||||
ret = -EINVAL;
|
||||
pr_debug(
|
||||
"%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
|
||||
__func__, IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
|
||||
trace_icm_staleconn_err(
|
||||
IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
|
||||
IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
|
||||
|
||||
if (cur_cm_id_priv) {
|
||||
@@ -2646,9 +2649,7 @@ static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
|
||||
return -EINVAL;
|
||||
|
||||
if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
|
||||
pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
|
||||
be32_to_cpu(cm_id_priv->id.local_id),
|
||||
cm_id_priv->id.state);
|
||||
trace_icm_dreq_skipped(&cm_id_priv->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -2667,6 +2668,7 @@ static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
|
||||
msg->timeout_ms = cm_id_priv->timeout_ms;
|
||||
msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
|
||||
|
||||
trace_icm_send_dreq(&cm_id_priv->id);
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
if (ret) {
|
||||
cm_enter_timewait(cm_id_priv);
|
||||
@@ -2722,10 +2724,7 @@ static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
|
||||
return -EINVAL;
|
||||
|
||||
if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
|
||||
pr_debug(
|
||||
"%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
|
||||
__func__, be32_to_cpu(cm_id_priv->id.local_id),
|
||||
cm_id_priv->id.state);
|
||||
trace_icm_send_drep_err(&cm_id_priv->id);
|
||||
kfree(private_data);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -2740,6 +2739,7 @@ static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
|
||||
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
|
||||
private_data, private_data_len);
|
||||
|
||||
trace_icm_send_drep(&cm_id_priv->id);
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
if (ret) {
|
||||
cm_free_msg(msg);
|
||||
@@ -2789,6 +2789,9 @@ static int cm_issue_drep(struct cm_port *port,
|
||||
IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
|
||||
IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
|
||||
|
||||
trace_icm_issue_drep(
|
||||
IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
|
||||
IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
if (ret)
|
||||
cm_free_msg(msg);
|
||||
@@ -2810,9 +2813,8 @@ static int cm_dreq_handler(struct cm_work *work)
|
||||
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
|
||||
counter[CM_DREQ_COUNTER]);
|
||||
cm_issue_drep(work->port, work->mad_recv_wc);
|
||||
pr_debug(
|
||||
"%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
|
||||
__func__, IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
|
||||
trace_icm_no_priv_err(
|
||||
IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
|
||||
IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -2858,9 +2860,7 @@ static int cm_dreq_handler(struct cm_work *work)
|
||||
counter[CM_DREQ_COUNTER]);
|
||||
goto unlock;
|
||||
default:
|
||||
pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
|
||||
__func__, be32_to_cpu(cm_id_priv->id.local_id),
|
||||
cm_id_priv->id.state);
|
||||
trace_icm_dreq_unknown_err(&cm_id_priv->id);
|
||||
goto unlock;
|
||||
}
|
||||
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
|
||||
@@ -2945,12 +2945,11 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
|
||||
state);
|
||||
break;
|
||||
default:
|
||||
pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
|
||||
be32_to_cpu(cm_id_priv->id.local_id),
|
||||
cm_id_priv->id.state);
|
||||
trace_icm_send_unknown_rej_err(&cm_id_priv->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
trace_icm_send_rej(&cm_id_priv->id, reason);
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
if (ret) {
|
||||
cm_free_msg(msg);
|
||||
@@ -3060,9 +3059,7 @@ static int cm_rej_handler(struct cm_work *work)
|
||||
}
|
||||
fallthrough;
|
||||
default:
|
||||
pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
|
||||
__func__, be32_to_cpu(cm_id_priv->id.local_id),
|
||||
cm_id_priv->id.state);
|
||||
trace_icm_rej_unknown_err(&cm_id_priv->id);
|
||||
spin_unlock_irq(&cm_id_priv->lock);
|
||||
goto out;
|
||||
}
|
||||
@@ -3118,9 +3115,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
|
||||
}
|
||||
fallthrough;
|
||||
default:
|
||||
pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
|
||||
__func__, be32_to_cpu(cm_id_priv->id.local_id),
|
||||
cm_id_priv->id.state);
|
||||
trace_icm_send_mra_unknown_err(&cm_id_priv->id);
|
||||
ret = -EINVAL;
|
||||
goto error1;
|
||||
}
|
||||
@@ -3133,6 +3128,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
|
||||
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
|
||||
msg_response, service_timeout,
|
||||
private_data, private_data_len);
|
||||
trace_icm_send_mra(cm_id);
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
if (ret)
|
||||
goto error2;
|
||||
@@ -3229,9 +3225,7 @@ static int cm_mra_handler(struct cm_work *work)
|
||||
counter[CM_MRA_COUNTER]);
|
||||
fallthrough;
|
||||
default:
|
||||
pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
|
||||
__func__, be32_to_cpu(cm_id_priv->id.local_id),
|
||||
cm_id_priv->id.state);
|
||||
trace_icm_mra_unknown_err(&cm_id_priv->id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -3505,10 +3499,12 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
|
||||
msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
|
||||
|
||||
spin_lock_irqsave(&cm_id_priv->lock, flags);
|
||||
if (cm_id->state == IB_CM_IDLE)
|
||||
if (cm_id->state == IB_CM_IDLE) {
|
||||
trace_icm_send_sidr_req(&cm_id_priv->id);
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
else
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
|
||||
@@ -3670,6 +3666,7 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
|
||||
|
||||
cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
|
||||
param);
|
||||
trace_icm_send_sidr_rep(&cm_id_priv->id);
|
||||
ret = ib_post_send_mad(msg, NULL);
|
||||
if (ret) {
|
||||
cm_free_msg(msg);
|
||||
@@ -3767,8 +3764,7 @@ static void cm_process_send_error(struct ib_mad_send_buf *msg,
|
||||
if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
|
||||
goto discard;
|
||||
|
||||
pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
|
||||
state, ib_wc_status_msg(wc_status));
|
||||
trace_icm_mad_send_err(state, wc_status);
|
||||
switch (state) {
|
||||
case IB_CM_REQ_SENT:
|
||||
case IB_CM_MRA_REQ_RCVD:
|
||||
@@ -3891,7 +3887,7 @@ static void cm_work_handler(struct work_struct *_work)
|
||||
ret = cm_timewait_handler(work);
|
||||
break;
|
||||
default:
|
||||
pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
|
||||
trace_icm_handler_err(work->cm_event.event);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@@ -3927,8 +3923,7 @@ static int cm_establish(struct ib_cm_id *cm_id)
|
||||
ret = -EISCONN;
|
||||
break;
|
||||
default:
|
||||
pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
|
||||
be32_to_cpu(cm_id->local_id), cm_id->state);
|
||||
trace_icm_establish_err(cm_id);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@@ -4125,9 +4120,7 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
|
||||
__func__, be32_to_cpu(cm_id_priv->id.local_id),
|
||||
cm_id_priv->id.state);
|
||||
trace_icm_qp_init_err(&cm_id_priv->id);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@@ -4175,9 +4168,7 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
|
||||
__func__, be32_to_cpu(cm_id_priv->id.local_id),
|
||||
cm_id_priv->id.state);
|
||||
trace_icm_qp_rtr_err(&cm_id_priv->id);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@@ -4237,9 +4228,7 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
|
||||
__func__, be32_to_cpu(cm_id_priv->id.local_id),
|
||||
cm_id_priv->id.state);
|
||||
trace_icm_qp_rts_err(&cm_id_priv->id);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@@ -4295,20 +4284,6 @@ static struct kobj_type cm_counter_obj_type = {
|
||||
.default_attrs = cm_counter_default_attrs
|
||||
};
|
||||
|
||||
static char *cm_devnode(struct device *dev, umode_t *mode)
|
||||
{
|
||||
if (mode)
|
||||
*mode = 0666;
|
||||
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
|
||||
}
|
||||
|
||||
struct class cm_class = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "infiniband_cm",
|
||||
.devnode = cm_devnode,
|
||||
};
|
||||
EXPORT_SYMBOL(cm_class);
|
||||
|
||||
static int cm_create_port_fs(struct cm_port *port)
|
||||
{
|
||||
int i, ret;
|
||||
@@ -4511,12 +4486,6 @@ static int __init ib_cm_init(void)
|
||||
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
|
||||
INIT_LIST_HEAD(&cm.timewait_list);
|
||||
|
||||
ret = class_register(&cm_class);
|
||||
if (ret) {
|
||||
ret = -ENOMEM;
|
||||
goto error1;
|
||||
}
|
||||
|
||||
cm.wq = alloc_workqueue("ib_cm", 0, 1);
|
||||
if (!cm.wq) {
|
||||
ret = -ENOMEM;
|
||||
@@ -4531,8 +4500,6 @@ static int __init ib_cm_init(void)
|
||||
error3:
|
||||
destroy_workqueue(cm.wq);
|
||||
error2:
|
||||
class_unregister(&cm_class);
|
||||
error1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -4553,7 +4520,6 @@ static void __exit ib_cm_cleanup(void)
|
||||
kfree(timewait_info);
|
||||
}
|
||||
|
||||
class_unregister(&cm_class);
|
||||
WARN_ON(!xa_empty(&cm.local_id_table));
|
||||
}
|
||||
|
||||
|
||||
15
drivers/infiniband/core/cm_trace.c
Normal file
15
drivers/infiniband/core/cm_trace.c
Normal file
@@ -0,0 +1,15 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Trace points for the IB Connection Manager.
|
||||
*
|
||||
* Author: Chuck Lever <chuck.lever@oracle.com>
|
||||
*
|
||||
* Copyright (c) 2020, Oracle and/or its affiliates.
|
||||
*/
|
||||
|
||||
#include <rdma/rdma_cm.h>
|
||||
#include "cma_priv.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
|
||||
#include "cm_trace.h"
|
||||
414
drivers/infiniband/core/cm_trace.h
Normal file
414
drivers/infiniband/core/cm_trace.h
Normal file
@@ -0,0 +1,414 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Trace point definitions for the RDMA Connect Manager.
|
||||
*
|
||||
* Author: Chuck Lever <chuck.lever@oracle.com>
|
||||
*
|
||||
* Copyright (c) 2020 Oracle and/or its affiliates.
|
||||
*/
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM ib_cma
|
||||
|
||||
#if !defined(_TRACE_IB_CMA_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
|
||||
#define _TRACE_IB_CMA_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <rdma/ib_cm.h>
|
||||
#include <trace/events/rdma.h>
|
||||
|
||||
/*
|
||||
* enum ib_cm_state, from include/rdma/ib_cm.h
|
||||
*/
|
||||
#define IB_CM_STATE_LIST \
|
||||
ib_cm_state(IDLE) \
|
||||
ib_cm_state(LISTEN) \
|
||||
ib_cm_state(REQ_SENT) \
|
||||
ib_cm_state(REQ_RCVD) \
|
||||
ib_cm_state(MRA_REQ_SENT) \
|
||||
ib_cm_state(MRA_REQ_RCVD) \
|
||||
ib_cm_state(REP_SENT) \
|
||||
ib_cm_state(REP_RCVD) \
|
||||
ib_cm_state(MRA_REP_SENT) \
|
||||
ib_cm_state(MRA_REP_RCVD) \
|
||||
ib_cm_state(ESTABLISHED) \
|
||||
ib_cm_state(DREQ_SENT) \
|
||||
ib_cm_state(DREQ_RCVD) \
|
||||
ib_cm_state(TIMEWAIT) \
|
||||
ib_cm_state(SIDR_REQ_SENT) \
|
||||
ib_cm_state_end(SIDR_REQ_RCVD)
|
||||
|
||||
#undef ib_cm_state
|
||||
#undef ib_cm_state_end
|
||||
#define ib_cm_state(x) TRACE_DEFINE_ENUM(IB_CM_##x);
|
||||
#define ib_cm_state_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
|
||||
|
||||
IB_CM_STATE_LIST
|
||||
|
||||
#undef ib_cm_state
|
||||
#undef ib_cm_state_end
|
||||
#define ib_cm_state(x) { IB_CM_##x, #x },
|
||||
#define ib_cm_state_end(x) { IB_CM_##x, #x }
|
||||
|
||||
#define show_ib_cm_state(x) \
|
||||
__print_symbolic(x, IB_CM_STATE_LIST)
|
||||
|
||||
/*
|
||||
* enum ib_cm_lap_state, from include/rdma/ib_cm.h
|
||||
*/
|
||||
#define IB_CM_LAP_STATE_LIST \
|
||||
ib_cm_lap_state(LAP_UNINIT) \
|
||||
ib_cm_lap_state(LAP_IDLE) \
|
||||
ib_cm_lap_state(LAP_SENT) \
|
||||
ib_cm_lap_state(LAP_RCVD) \
|
||||
ib_cm_lap_state(MRA_LAP_SENT) \
|
||||
ib_cm_lap_state_end(MRA_LAP_RCVD)
|
||||
|
||||
#undef ib_cm_lap_state
|
||||
#undef ib_cm_lap_state_end
|
||||
#define ib_cm_lap_state(x) TRACE_DEFINE_ENUM(IB_CM_##x);
|
||||
#define ib_cm_lap_state_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
|
||||
|
||||
IB_CM_LAP_STATE_LIST
|
||||
|
||||
#undef ib_cm_lap_state
|
||||
#undef ib_cm_lap_state_end
|
||||
#define ib_cm_lap_state(x) { IB_CM_##x, #x },
|
||||
#define ib_cm_lap_state_end(x) { IB_CM_##x, #x }
|
||||
|
||||
#define show_ib_cm_lap_state(x) \
|
||||
__print_symbolic(x, IB_CM_LAP_STATE_LIST)
|
||||
|
||||
/*
|
||||
* enum ib_cm_rej_reason, from include/rdma/ib_cm.h
|
||||
*/
|
||||
#define IB_CM_REJ_REASON_LIST \
|
||||
ib_cm_rej_reason(REJ_NO_QP) \
|
||||
ib_cm_rej_reason(REJ_NO_EEC) \
|
||||
ib_cm_rej_reason(REJ_NO_RESOURCES) \
|
||||
ib_cm_rej_reason(REJ_TIMEOUT) \
|
||||
ib_cm_rej_reason(REJ_UNSUPPORTED) \
|
||||
ib_cm_rej_reason(REJ_INVALID_COMM_ID) \
|
||||
ib_cm_rej_reason(REJ_INVALID_COMM_INSTANCE) \
|
||||
ib_cm_rej_reason(REJ_INVALID_SERVICE_ID) \
|
||||
ib_cm_rej_reason(REJ_INVALID_TRANSPORT_TYPE) \
|
||||
ib_cm_rej_reason(REJ_STALE_CONN) \
|
||||
ib_cm_rej_reason(REJ_RDC_NOT_EXIST) \
|
||||
ib_cm_rej_reason(REJ_INVALID_GID) \
|
||||
ib_cm_rej_reason(REJ_INVALID_LID) \
|
||||
ib_cm_rej_reason(REJ_INVALID_SL) \
|
||||
ib_cm_rej_reason(REJ_INVALID_TRAFFIC_CLASS) \
|
||||
ib_cm_rej_reason(REJ_INVALID_HOP_LIMIT) \
|
||||
ib_cm_rej_reason(REJ_INVALID_PACKET_RATE) \
|
||||
ib_cm_rej_reason(REJ_INVALID_ALT_GID) \
|
||||
ib_cm_rej_reason(REJ_INVALID_ALT_LID) \
|
||||
ib_cm_rej_reason(REJ_INVALID_ALT_SL) \
|
||||
ib_cm_rej_reason(REJ_INVALID_ALT_TRAFFIC_CLASS) \
|
||||
ib_cm_rej_reason(REJ_INVALID_ALT_HOP_LIMIT) \
|
||||
ib_cm_rej_reason(REJ_INVALID_ALT_PACKET_RATE) \
|
||||
ib_cm_rej_reason(REJ_PORT_CM_REDIRECT) \
|
||||
ib_cm_rej_reason(REJ_PORT_REDIRECT) \
|
||||
ib_cm_rej_reason(REJ_INVALID_MTU) \
|
||||
ib_cm_rej_reason(REJ_INSUFFICIENT_RESP_RESOURCES) \
|
||||
ib_cm_rej_reason(REJ_CONSUMER_DEFINED) \
|
||||
ib_cm_rej_reason(REJ_INVALID_RNR_RETRY) \
|
||||
ib_cm_rej_reason(REJ_DUPLICATE_LOCAL_COMM_ID) \
|
||||
ib_cm_rej_reason(REJ_INVALID_CLASS_VERSION) \
|
||||
ib_cm_rej_reason(REJ_INVALID_FLOW_LABEL) \
|
||||
ib_cm_rej_reason(REJ_INVALID_ALT_FLOW_LABEL) \
|
||||
ib_cm_rej_reason_end(REJ_VENDOR_OPTION_NOT_SUPPORTED)
|
||||
|
||||
#undef ib_cm_rej_reason
|
||||
#undef ib_cm_rej_reason_end
|
||||
#define ib_cm_rej_reason(x) TRACE_DEFINE_ENUM(IB_CM_##x);
|
||||
#define ib_cm_rej_reason_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
|
||||
|
||||
IB_CM_REJ_REASON_LIST
|
||||
|
||||
#undef ib_cm_rej_reason
|
||||
#undef ib_cm_rej_reason_end
|
||||
#define ib_cm_rej_reason(x) { IB_CM_##x, #x },
|
||||
#define ib_cm_rej_reason_end(x) { IB_CM_##x, #x }
|
||||
|
||||
#define show_ib_cm_rej_reason(x) \
|
||||
__print_symbolic(x, IB_CM_REJ_REASON_LIST)
|
||||
|
||||
DECLARE_EVENT_CLASS(icm_id_class,
|
||||
TP_PROTO(
|
||||
const struct ib_cm_id *cm_id
|
||||
),
|
||||
|
||||
TP_ARGS(cm_id),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const void *, cm_id) /* for eBPF scripts */
|
||||
__field(unsigned int, local_id)
|
||||
__field(unsigned int, remote_id)
|
||||
__field(unsigned long, state)
|
||||
__field(unsigned long, lap_state)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cm_id = cm_id;
|
||||
__entry->local_id = be32_to_cpu(cm_id->local_id);
|
||||
__entry->remote_id = be32_to_cpu(cm_id->remote_id);
|
||||
__entry->state = cm_id->state;
|
||||
__entry->lap_state = cm_id->lap_state;
|
||||
),
|
||||
|
||||
TP_printk("local_id=%u remote_id=%u state=%s lap_state=%s",
|
||||
__entry->local_id, __entry->remote_id,
|
||||
show_ib_cm_state(__entry->state),
|
||||
show_ib_cm_lap_state(__entry->lap_state)
|
||||
)
|
||||
);
|
||||
|
||||
#define DEFINE_CM_SEND_EVENT(name) \
|
||||
DEFINE_EVENT(icm_id_class, \
|
||||
icm_send_##name, \
|
||||
TP_PROTO( \
|
||||
const struct ib_cm_id *cm_id \
|
||||
), \
|
||||
TP_ARGS(cm_id))
|
||||
|
||||
DEFINE_CM_SEND_EVENT(req);
|
||||
DEFINE_CM_SEND_EVENT(rep);
|
||||
DEFINE_CM_SEND_EVENT(dup_req);
|
||||
DEFINE_CM_SEND_EVENT(dup_rep);
|
||||
DEFINE_CM_SEND_EVENT(rtu);
|
||||
DEFINE_CM_SEND_EVENT(mra);
|
||||
DEFINE_CM_SEND_EVENT(sidr_req);
|
||||
DEFINE_CM_SEND_EVENT(sidr_rep);
|
||||
DEFINE_CM_SEND_EVENT(dreq);
|
||||
DEFINE_CM_SEND_EVENT(drep);
|
||||
|
||||
TRACE_EVENT(icm_send_rej,
|
||||
TP_PROTO(
|
||||
const struct ib_cm_id *cm_id,
|
||||
enum ib_cm_rej_reason reason
|
||||
),
|
||||
|
||||
TP_ARGS(cm_id, reason),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(const void *, cm_id)
|
||||
__field(u32, local_id)
|
||||
__field(u32, remote_id)
|
||||
__field(unsigned long, state)
|
||||
__field(unsigned long, reason)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cm_id = cm_id;
|
||||
__entry->local_id = be32_to_cpu(cm_id->local_id);
|
||||
__entry->remote_id = be32_to_cpu(cm_id->remote_id);
|
||||
__entry->state = cm_id->state;
|
||||
__entry->reason = reason;
|
||||
),
|
||||
|
||||
TP_printk("local_id=%u remote_id=%u state=%s reason=%s",
|
||||
__entry->local_id, __entry->remote_id,
|
||||
show_ib_cm_state(__entry->state),
|
||||
show_ib_cm_rej_reason(__entry->reason)
|
||||
)
|
||||
);
|
||||
|
||||
#define DEFINE_CM_ERR_EVENT(name) \
|
||||
DEFINE_EVENT(icm_id_class, \
|
||||
icm_##name##_err, \
|
||||
TP_PROTO( \
|
||||
const struct ib_cm_id *cm_id \
|
||||
), \
|
||||
TP_ARGS(cm_id))
|
||||
|
||||
DEFINE_CM_ERR_EVENT(send_cm_rtu);
|
||||
DEFINE_CM_ERR_EVENT(establish);
|
||||
DEFINE_CM_ERR_EVENT(no_listener);
|
||||
DEFINE_CM_ERR_EVENT(send_drep);
|
||||
DEFINE_CM_ERR_EVENT(dreq_unknown);
|
||||
DEFINE_CM_ERR_EVENT(send_unknown_rej);
|
||||
DEFINE_CM_ERR_EVENT(rej_unknown);
|
||||
DEFINE_CM_ERR_EVENT(send_mra_unknown);
|
||||
DEFINE_CM_ERR_EVENT(mra_unknown);
|
||||
DEFINE_CM_ERR_EVENT(qp_init);
|
||||
DEFINE_CM_ERR_EVENT(qp_rtr);
|
||||
DEFINE_CM_ERR_EVENT(qp_rts);
|
||||
|
||||
DEFINE_EVENT(icm_id_class, \
|
||||
icm_dreq_skipped, \
|
||||
TP_PROTO( \
|
||||
const struct ib_cm_id *cm_id \
|
||||
), \
|
||||
TP_ARGS(cm_id) \
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(icm_local_class,
|
||||
TP_PROTO(
|
||||
unsigned int local_id,
|
||||
unsigned int remote_id
|
||||
),
|
||||
|
||||
TP_ARGS(local_id, remote_id),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, local_id)
|
||||
__field(unsigned int, remote_id)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->local_id = local_id;
|
||||
__entry->remote_id = remote_id;
|
||||
),
|
||||
|
||||
TP_printk("local_id=%u remote_id=%u",
|
||||
__entry->local_id, __entry->remote_id
|
||||
)
|
||||
);
|
||||
|
||||
#define DEFINE_CM_LOCAL_EVENT(name) \
|
||||
DEFINE_EVENT(icm_local_class, \
|
||||
icm_##name, \
|
||||
TP_PROTO( \
|
||||
unsigned int local_id, \
|
||||
unsigned int remote_id \
|
||||
), \
|
||||
TP_ARGS(local_id, remote_id))
|
||||
|
||||
DEFINE_CM_LOCAL_EVENT(issue_rej);
|
||||
DEFINE_CM_LOCAL_EVENT(issue_drep);
|
||||
DEFINE_CM_LOCAL_EVENT(staleconn_err);
|
||||
DEFINE_CM_LOCAL_EVENT(no_priv_err);
|
||||
|
||||
DECLARE_EVENT_CLASS(icm_remote_class,
|
||||
TP_PROTO(
|
||||
u32 remote_id
|
||||
),
|
||||
|
||||
TP_ARGS(remote_id),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, remote_id)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->remote_id = remote_id;
|
||||
),
|
||||
|
||||
TP_printk("remote_id=%u",
|
||||
__entry->remote_id
|
||||
)
|
||||
);
|
||||
|
||||
#define DEFINE_CM_REMOTE_EVENT(name) \
|
||||
DEFINE_EVENT(icm_remote_class, \
|
||||
icm_##name, \
|
||||
TP_PROTO( \
|
||||
u32 remote_id \
|
||||
), \
|
||||
TP_ARGS(remote_id))
|
||||
|
||||
DEFINE_CM_REMOTE_EVENT(remote_no_priv_err);
|
||||
DEFINE_CM_REMOTE_EVENT(insert_failed_err);
|
||||
|
||||
TRACE_EVENT(icm_send_rep_err,
|
||||
TP_PROTO(
|
||||
__be32 local_id,
|
||||
enum ib_cm_state state
|
||||
),
|
||||
|
||||
TP_ARGS(local_id, state),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, local_id)
|
||||
__field(unsigned long, state)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->local_id = be32_to_cpu(local_id);
|
||||
__entry->state = state;
|
||||
),
|
||||
|
||||
TP_printk("local_id=%u state=%s",
|
||||
__entry->local_id, show_ib_cm_state(__entry->state)
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(icm_rep_unknown_err,
|
||||
TP_PROTO(
|
||||
unsigned int local_id,
|
||||
unsigned int remote_id,
|
||||
enum ib_cm_state state
|
||||
),
|
||||
|
||||
TP_ARGS(local_id, remote_id, state),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, local_id)
|
||||
__field(unsigned int, remote_id)
|
||||
__field(unsigned long, state)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->local_id = local_id;
|
||||
__entry->remote_id = remote_id;
|
||||
__entry->state = state;
|
||||
),
|
||||
|
||||
TP_printk("local_id=%u remote_id=%u state=%s",
|
||||
__entry->local_id, __entry->remote_id,
|
||||
show_ib_cm_state(__entry->state)
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(icm_handler_err,
|
||||
TP_PROTO(
|
||||
enum ib_cm_event_type event
|
||||
),
|
||||
|
||||
TP_ARGS(event),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, event)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->event = event;
|
||||
),
|
||||
|
||||
TP_printk("unhandled event=%s",
|
||||
rdma_show_ib_cm_event(__entry->event)
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(icm_mad_send_err,
|
||||
TP_PROTO(
|
||||
enum ib_cm_state state,
|
||||
enum ib_wc_status wc_status
|
||||
),
|
||||
|
||||
TP_ARGS(state, wc_status),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, state)
|
||||
__field(unsigned long, wc_status)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->state = state;
|
||||
__entry->wc_status = wc_status;
|
||||
),
|
||||
|
||||
TP_printk("state=%s completion status=%s",
|
||||
show_ib_cm_state(__entry->state),
|
||||
rdma_show_wc_status(__entry->wc_status)
|
||||
)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_IB_CMA_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH ../../drivers/infiniband/core
|
||||
#define TRACE_INCLUDE_FILE cm_trace
|
||||
|
||||
#include <trace/define_trace.h>
|
||||
File diff suppressed because it is too large
Load Diff
@@ -123,16 +123,17 @@ static ssize_t default_roce_mode_store(struct config_item *item,
|
||||
{
|
||||
struct cma_device *cma_dev;
|
||||
struct cma_dev_port_group *group;
|
||||
int gid_type = ib_cache_gid_parse_type_str(buf);
|
||||
int gid_type;
|
||||
ssize_t ret;
|
||||
|
||||
if (gid_type < 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = cma_configfs_params_get(item, &cma_dev, &group);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
gid_type = ib_cache_gid_parse_type_str(buf);
|
||||
if (gid_type < 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = cma_set_default_gid_type(cma_dev, group->port_num, gid_type);
|
||||
|
||||
cma_configfs_params_put(cma_dev);
|
||||
|
||||
@@ -17,46 +17,6 @@
|
||||
#include <linux/tracepoint.h>
|
||||
#include <trace/events/rdma.h>
|
||||
|
||||
/*
|
||||
* enum ib_cm_event_type, from include/rdma/ib_cm.h
|
||||
*/
|
||||
#define IB_CM_EVENT_LIST \
|
||||
ib_cm_event(REQ_ERROR) \
|
||||
ib_cm_event(REQ_RECEIVED) \
|
||||
ib_cm_event(REP_ERROR) \
|
||||
ib_cm_event(REP_RECEIVED) \
|
||||
ib_cm_event(RTU_RECEIVED) \
|
||||
ib_cm_event(USER_ESTABLISHED) \
|
||||
ib_cm_event(DREQ_ERROR) \
|
||||
ib_cm_event(DREQ_RECEIVED) \
|
||||
ib_cm_event(DREP_RECEIVED) \
|
||||
ib_cm_event(TIMEWAIT_EXIT) \
|
||||
ib_cm_event(MRA_RECEIVED) \
|
||||
ib_cm_event(REJ_RECEIVED) \
|
||||
ib_cm_event(LAP_ERROR) \
|
||||
ib_cm_event(LAP_RECEIVED) \
|
||||
ib_cm_event(APR_RECEIVED) \
|
||||
ib_cm_event(SIDR_REQ_ERROR) \
|
||||
ib_cm_event(SIDR_REQ_RECEIVED) \
|
||||
ib_cm_event_end(SIDR_REP_RECEIVED)
|
||||
|
||||
#undef ib_cm_event
|
||||
#undef ib_cm_event_end
|
||||
|
||||
#define ib_cm_event(x) TRACE_DEFINE_ENUM(IB_CM_##x);
|
||||
#define ib_cm_event_end(x) TRACE_DEFINE_ENUM(IB_CM_##x);
|
||||
|
||||
IB_CM_EVENT_LIST
|
||||
|
||||
#undef ib_cm_event
|
||||
#undef ib_cm_event_end
|
||||
|
||||
#define ib_cm_event(x) { IB_CM_##x, #x },
|
||||
#define ib_cm_event_end(x) { IB_CM_##x, #x }
|
||||
|
||||
#define rdma_show_ib_cm_event(x) \
|
||||
__print_symbolic(x, IB_CM_EVENT_LIST)
|
||||
|
||||
|
||||
DECLARE_EVENT_CLASS(cma_fsm_class,
|
||||
TP_PROTO(
|
||||
|
||||
@@ -44,6 +44,7 @@
|
||||
#include <rdma/ib_mad.h>
|
||||
#include <rdma/restrack.h>
|
||||
#include "mad_priv.h"
|
||||
#include "restrack.h"
|
||||
|
||||
/* Total number of ports combined across all struct ib_devices's */
|
||||
#define RDMA_MAX_PORTS 8192
|
||||
@@ -352,6 +353,7 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
|
||||
INIT_LIST_HEAD(&qp->rdma_mrs);
|
||||
INIT_LIST_HEAD(&qp->sig_mrs);
|
||||
|
||||
rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
|
||||
/*
|
||||
* We don't track XRC QPs for now, because they don't have PD
|
||||
* and more importantly they are created internaly by driver,
|
||||
@@ -359,14 +361,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
|
||||
*/
|
||||
is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT;
|
||||
if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) {
|
||||
qp->res.type = RDMA_RESTRACK_QP;
|
||||
if (uobj)
|
||||
rdma_restrack_uadd(&qp->res);
|
||||
else
|
||||
rdma_restrack_kadd(&qp->res);
|
||||
} else
|
||||
qp->res.valid = false;
|
||||
|
||||
rdma_restrack_parent_name(&qp->res, &pd->res);
|
||||
rdma_restrack_add(&qp->res);
|
||||
}
|
||||
return qp;
|
||||
}
|
||||
|
||||
|
||||
@@ -80,8 +80,9 @@ static struct rdma_counter *rdma_counter_alloc(struct ib_device *dev, u8 port,
|
||||
|
||||
counter->device = dev;
|
||||
counter->port = port;
|
||||
counter->res.type = RDMA_RESTRACK_COUNTER;
|
||||
counter->stats = dev->ops.counter_alloc_stats(counter);
|
||||
|
||||
rdma_restrack_new(&counter->res, RDMA_RESTRACK_COUNTER);
|
||||
counter->stats = dev->ops.counter_alloc_stats(counter);
|
||||
if (!counter->stats)
|
||||
goto err_stats;
|
||||
|
||||
@@ -107,6 +108,7 @@ err_mode:
|
||||
mutex_unlock(&port_counter->lock);
|
||||
kfree(counter->stats);
|
||||
err_stats:
|
||||
rdma_restrack_put(&counter->res);
|
||||
kfree(counter);
|
||||
return NULL;
|
||||
}
|
||||
@@ -248,13 +250,8 @@ next:
|
||||
static void rdma_counter_res_add(struct rdma_counter *counter,
|
||||
struct ib_qp *qp)
|
||||
{
|
||||
if (rdma_is_kernel_res(&qp->res)) {
|
||||
rdma_restrack_set_task(&counter->res, qp->res.kern_name);
|
||||
rdma_restrack_kadd(&counter->res);
|
||||
} else {
|
||||
rdma_restrack_attach_task(&counter->res, qp->res.task);
|
||||
rdma_restrack_uadd(&counter->res);
|
||||
}
|
||||
rdma_restrack_parent_name(&counter->res, &qp->res);
|
||||
rdma_restrack_add(&counter->res);
|
||||
}
|
||||
|
||||
static void counter_release(struct kref *kref)
|
||||
|
||||
@@ -197,24 +197,22 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
|
||||
}
|
||||
|
||||
/**
|
||||
* __ib_alloc_cq_user - allocate a completion queue
|
||||
* __ib_alloc_cq allocate a completion queue
|
||||
* @dev: device to allocate the CQ for
|
||||
* @private: driver private data, accessible from cq->cq_context
|
||||
* @nr_cqe: number of CQEs to allocate
|
||||
* @comp_vector: HCA completion vectors for this CQ
|
||||
* @poll_ctx: context to poll the CQ from.
|
||||
* @caller: module owner name.
|
||||
* @udata: Valid user data or NULL for kernel object
|
||||
*
|
||||
* This is the proper interface to allocate a CQ for in-kernel users. A
|
||||
* CQ allocated with this interface will automatically be polled from the
|
||||
* specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
|
||||
* to use this CQ abstraction.
|
||||
*/
|
||||
struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
|
||||
int nr_cqe, int comp_vector,
|
||||
enum ib_poll_context poll_ctx,
|
||||
const char *caller, struct ib_udata *udata)
|
||||
struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
|
||||
int comp_vector, enum ib_poll_context poll_ctx,
|
||||
const char *caller)
|
||||
{
|
||||
struct ib_cq_init_attr cq_attr = {
|
||||
.cqe = nr_cqe,
|
||||
@@ -237,15 +235,13 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
|
||||
if (!cq->wc)
|
||||
goto out_free_cq;
|
||||
|
||||
cq->res.type = RDMA_RESTRACK_CQ;
|
||||
rdma_restrack_set_task(&cq->res, caller);
|
||||
rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
|
||||
rdma_restrack_set_name(&cq->res, caller);
|
||||
|
||||
ret = dev->ops.create_cq(cq, &cq_attr, NULL);
|
||||
if (ret)
|
||||
goto out_free_wc;
|
||||
|
||||
rdma_restrack_kadd(&cq->res);
|
||||
|
||||
rdma_dim_init(cq);
|
||||
|
||||
switch (cq->poll_ctx) {
|
||||
@@ -271,21 +267,22 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
|
||||
goto out_destroy_cq;
|
||||
}
|
||||
|
||||
rdma_restrack_add(&cq->res);
|
||||
trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx);
|
||||
return cq;
|
||||
|
||||
out_destroy_cq:
|
||||
rdma_dim_destroy(cq);
|
||||
rdma_restrack_del(&cq->res);
|
||||
cq->device->ops.destroy_cq(cq, udata);
|
||||
cq->device->ops.destroy_cq(cq, NULL);
|
||||
out_free_wc:
|
||||
rdma_restrack_put(&cq->res);
|
||||
kfree(cq->wc);
|
||||
out_free_cq:
|
||||
kfree(cq);
|
||||
trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(__ib_alloc_cq_user);
|
||||
EXPORT_SYMBOL(__ib_alloc_cq);
|
||||
|
||||
/**
|
||||
* __ib_alloc_cq_any - allocate a completion queue
|
||||
@@ -310,18 +307,19 @@ struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
|
||||
atomic_inc_return(&counter) %
|
||||
min_t(int, dev->num_comp_vectors, num_online_cpus());
|
||||
|
||||
return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
|
||||
caller, NULL);
|
||||
return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
|
||||
caller);
|
||||
}
|
||||
EXPORT_SYMBOL(__ib_alloc_cq_any);
|
||||
|
||||
/**
|
||||
* ib_free_cq_user - free a completion queue
|
||||
* ib_free_cq - free a completion queue
|
||||
* @cq: completion queue to free.
|
||||
* @udata: User data or NULL for kernel object
|
||||
*/
|
||||
void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
|
||||
void ib_free_cq(struct ib_cq *cq)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
|
||||
return;
|
||||
if (WARN_ON_ONCE(cq->cqe_used))
|
||||
@@ -343,12 +341,13 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
|
||||
|
||||
rdma_dim_destroy(cq);
|
||||
trace_cq_free(cq);
|
||||
ret = cq->device->ops.destroy_cq(cq, NULL);
|
||||
WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
|
||||
rdma_restrack_del(&cq->res);
|
||||
cq->device->ops.destroy_cq(cq, udata);
|
||||
kfree(cq->wc);
|
||||
kfree(cq);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_free_cq_user);
|
||||
EXPORT_SYMBOL(ib_free_cq);
|
||||
|
||||
void ib_cq_pool_init(struct ib_device *dev)
|
||||
{
|
||||
|
||||
@@ -1177,58 +1177,23 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void setup_dma_device(struct ib_device *device)
|
||||
static void setup_dma_device(struct ib_device *device,
|
||||
struct device *dma_device)
|
||||
{
|
||||
struct device *parent = device->dev.parent;
|
||||
|
||||
WARN_ON_ONCE(device->dma_device);
|
||||
|
||||
#ifdef CONFIG_DMA_OPS
|
||||
if (device->dev.dma_ops) {
|
||||
/*
|
||||
* The caller provided custom DMA operations. Copy the
|
||||
* DMA-related fields that are used by e.g. dma_alloc_coherent()
|
||||
* into device->dev.
|
||||
*/
|
||||
device->dma_device = &device->dev;
|
||||
if (!device->dev.dma_mask) {
|
||||
if (parent)
|
||||
device->dev.dma_mask = parent->dma_mask;
|
||||
else
|
||||
WARN_ON_ONCE(true);
|
||||
}
|
||||
if (!device->dev.coherent_dma_mask) {
|
||||
if (parent)
|
||||
device->dev.coherent_dma_mask =
|
||||
parent->coherent_dma_mask;
|
||||
else
|
||||
WARN_ON_ONCE(true);
|
||||
}
|
||||
} else
|
||||
#endif /* CONFIG_DMA_OPS */
|
||||
{
|
||||
/*
|
||||
* The caller did not provide custom DMA operations. Use the
|
||||
* DMA mapping operations of the parent device.
|
||||
*/
|
||||
WARN_ON_ONCE(!parent);
|
||||
device->dma_device = parent;
|
||||
}
|
||||
|
||||
if (!device->dev.dma_parms) {
|
||||
if (parent) {
|
||||
/*
|
||||
* The caller did not provide DMA parameters, so
|
||||
* 'parent' probably represents a PCI device. The PCI
|
||||
* core sets the maximum segment size to 64
|
||||
* KB. Increase this parameter to 2 GB.
|
||||
*/
|
||||
device->dev.dma_parms = parent->dma_parms;
|
||||
dma_set_max_seg_size(device->dma_device, SZ_2G);
|
||||
} else {
|
||||
WARN_ON_ONCE(true);
|
||||
}
|
||||
/*
|
||||
* If the caller does not provide a DMA capable device then the IB
|
||||
* device will be used. In this case the caller should fully setup the
|
||||
* ibdev for DMA. This usually means using dma_virt_ops.
|
||||
*/
|
||||
#ifdef CONFIG_DMA_VIRT_OPS
|
||||
if (!dma_device) {
|
||||
device->dev.dma_ops = &dma_virt_ops;
|
||||
dma_device = &device->dev;
|
||||
}
|
||||
#endif
|
||||
WARN_ON(!dma_device);
|
||||
device->dma_device = dma_device;
|
||||
WARN_ON(!device->dma_device->dma_parms);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1241,7 +1206,6 @@ static int setup_device(struct ib_device *device)
|
||||
struct ib_udata uhw = {.outlen = 0, .inlen = 0};
|
||||
int ret;
|
||||
|
||||
setup_dma_device(device);
|
||||
ib_device_check_mandatory(device);
|
||||
|
||||
ret = setup_port_data(device);
|
||||
@@ -1354,7 +1318,10 @@ static void prevent_dealloc_device(struct ib_device *ib_dev)
|
||||
* ib_register_device - Register an IB device with IB core
|
||||
* @device: Device to register
|
||||
* @name: unique string device name. This may include a '%' which will
|
||||
* cause a unique index to be added to the passed device name.
|
||||
* cause a unique index to be added to the passed device name.
|
||||
* @dma_device: pointer to a DMA-capable device. If %NULL, then the IB
|
||||
* device will be used. In this case the caller should fully
|
||||
* setup the ibdev for DMA. This usually means using dma_virt_ops.
|
||||
*
|
||||
* Low-level drivers use ib_register_device() to register their
|
||||
* devices with the IB core. All registered clients will receive a
|
||||
@@ -1365,7 +1332,8 @@ static void prevent_dealloc_device(struct ib_device *ib_dev)
|
||||
* asynchronously then the device pointer may become freed as soon as this
|
||||
* function returns.
|
||||
*/
|
||||
int ib_register_device(struct ib_device *device, const char *name)
|
||||
int ib_register_device(struct ib_device *device, const char *name,
|
||||
struct device *dma_device)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -1373,6 +1341,7 @@ int ib_register_device(struct ib_device *device, const char *name)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
setup_dma_device(device, dma_device);
|
||||
ret = setup_device(device);
|
||||
if (ret)
|
||||
return ret;
|
||||
@@ -2697,7 +2666,9 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
|
||||
SET_OBJ_SIZE(dev_ops, ib_ah);
|
||||
SET_OBJ_SIZE(dev_ops, ib_counters);
|
||||
SET_OBJ_SIZE(dev_ops, ib_cq);
|
||||
SET_OBJ_SIZE(dev_ops, ib_mw);
|
||||
SET_OBJ_SIZE(dev_ops, ib_pd);
|
||||
SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
|
||||
SET_OBJ_SIZE(dev_ops, ib_srq);
|
||||
SET_OBJ_SIZE(dev_ops, ib_ucontext);
|
||||
SET_OBJ_SIZE(dev_ops, ib_xrcd);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user