You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull inifiniband/rdma updates from Doug Ledford:
"This is a fairly sizeable set of changes. I've put them through a
decent amount of testing prior to sending the pull request due to
that.
There are still a few fixups that I know are coming, but I wanted to
go ahead and get the big, sizable chunk into your hands sooner rather
than waiting for those last few fixups.
Of note is the fact that this creates what is intended to be a
temporary area in the drivers/staging tree specifically for some
cleanups and additions that are coming for the RDMA stack. We
deprecated two drivers (ipath and amso1100) and are waiting to hear
back if we can deprecate another one (ehca). We also put Intel's new
hfi1 driver into this area because it needs to be refactored and a
transfer library created out of the factored out code, and then it and
the qib driver and the soft-roce driver should all be modified to use
that library.
I expect drivers/staging/rdma to be around for three or four kernel
releases and then to go away as all of the work is completed and final
deletions of deprecated drivers are done.
Summary of changes for 4.3:
- Create drivers/staging/rdma
- Move amso1100 driver to staging/rdma and schedule for deletion
- Move ipath driver to staging/rdma and schedule for deletion
- Add hfi1 driver to staging/rdma and set TODO for move to regular
tree
- Initial support for namespaces to be used on RDMA devices
- Add RoCE GID table handling to the RDMA core caching code
- Infrastructure to support handling of devices with differing read
and write scatter gather capabilities
- Various iSER updates
- Kill off unsafe usage of global mr registrations
- Update SRP driver
- Misc mlx4 driver updates
- Support for the mr_alloc verb
- Support for a netlink interface between kernel and user space cache
daemon to speed path record queries and route resolution
- Ininitial support for safe hot removal of verbs devices"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (136 commits)
IB/ipoib: Suppress warning for send only join failures
IB/ipoib: Clean up send-only multicast joins
IB/srp: Fix possible protection fault
IB/core: Move SM class defines from ib_mad.h to ib_smi.h
IB/core: Remove unnecessary defines from ib_mad.h
IB/hfi1: Add PSM2 user space header to header_install
IB/hfi1: Add CSRs for CONFIG_SDMA_VERBOSITY
mlx5: Fix incorrect wc pkey_index assignment for GSI messages
IB/mlx5: avoid destroying a NULL mr in reg_user_mr error flow
IB/uverbs: reject invalid or unknown opcodes
IB/cxgb4: Fix if statement in pick_local_ip6adddrs
IB/sa: Fix rdma netlink message flags
IB/ucma: HW Device hot-removal support
IB/mlx4_ib: Disassociate support
IB/uverbs: Enable device removal when there are active user space applications
IB/uverbs: Explicitly pass ib_dev to uverbs commands
IB/uverbs: Fix race between ib_uverbs_open and remove_one
IB/uverbs: Fix reference counting usage of event files
IB/core: Make ib_dealloc_pd return void
IB/srp: Create an insecure all physical rkey only if needed
...
This commit is contained in:
@@ -64,3 +64,23 @@ MTHCA
|
||||
fw_ver - Firmware version
|
||||
hca_type - HCA type: "MT23108", "MT25208 (MT23108 compat mode)",
|
||||
or "MT25208"
|
||||
|
||||
HFI1
|
||||
|
||||
The hfi1 driver also creates these additional files:
|
||||
|
||||
hw_rev - hardware revision
|
||||
board_id - manufacturing board id
|
||||
tempsense - thermal sense information
|
||||
serial - board serial number
|
||||
nfreectxts - number of free user contexts
|
||||
nctxts - number of allowed contexts (PSM2)
|
||||
chip_reset - diagnostic (root only)
|
||||
boardversion - board version
|
||||
ports/1/
|
||||
CMgtA/
|
||||
cc_settings_bin - CCA tables used by PSM2
|
||||
cc_table_bin
|
||||
sc2v/ - 32 files (0 - 31) used to translate sl->vl
|
||||
sl2sc/ - 32 files (0 - 31) used to translate sl->sc
|
||||
vl2mtu/ - 16 (0 - 15) files used to determine MTU for vl
|
||||
|
||||
+8
-1
@@ -5341,6 +5341,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma.git
|
||||
S: Supported
|
||||
F: Documentation/infiniband/
|
||||
F: drivers/infiniband/
|
||||
F: drivers/staging/rdma/
|
||||
F: include/uapi/linux/if_infiniband.h
|
||||
F: include/uapi/rdma/
|
||||
F: include/rdma/
|
||||
@@ -5598,7 +5599,7 @@ IPATH DRIVER
|
||||
M: Mike Marciniszyn <infinipath@intel.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/infiniband/hw/ipath/
|
||||
F: drivers/staging/rdma/ipath/
|
||||
|
||||
IPMI SUBSYSTEM
|
||||
M: Corey Minyard <minyard@acm.org>
|
||||
@@ -9976,6 +9977,12 @@ M: Arnaud Patard <arnaud.patard@rtp-net.org>
|
||||
S: Odd Fixes
|
||||
F: drivers/staging/xgifb/
|
||||
|
||||
HFI1 DRIVER
|
||||
M: Mike Marciniszyn <infinipath@intel.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/staging/rdma/hfi1
|
||||
|
||||
STARFIRE/DURALAN NETWORK DRIVER
|
||||
M: Ion Badulescu <ionut@badula.org>
|
||||
S: Odd Fixes
|
||||
|
||||
@@ -55,10 +55,8 @@ config INFINIBAND_ADDR_TRANS
|
||||
default y
|
||||
|
||||
source "drivers/infiniband/hw/mthca/Kconfig"
|
||||
source "drivers/infiniband/hw/ipath/Kconfig"
|
||||
source "drivers/infiniband/hw/qib/Kconfig"
|
||||
source "drivers/infiniband/hw/ehca/Kconfig"
|
||||
source "drivers/infiniband/hw/amso1100/Kconfig"
|
||||
source "drivers/infiniband/hw/cxgb3/Kconfig"
|
||||
source "drivers/infiniband/hw/cxgb4/Kconfig"
|
||||
source "drivers/infiniband/hw/mlx4/Kconfig"
|
||||
|
||||
@@ -9,7 +9,8 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
|
||||
$(user_access-y)
|
||||
|
||||
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
|
||||
device.o fmr_pool.o cache.o netlink.o
|
||||
device.o fmr_pool.o cache.o netlink.o \
|
||||
roce_gid_mgmt.o
|
||||
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
|
||||
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
|
||||
|
||||
|
||||
+680
-109
File diff suppressed because it is too large
Load Diff
+137
-78
@@ -58,7 +58,7 @@ MODULE_DESCRIPTION("InfiniBand CM");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
static void cm_add_one(struct ib_device *device);
|
||||
static void cm_remove_one(struct ib_device *device);
|
||||
static void cm_remove_one(struct ib_device *device, void *client_data);
|
||||
|
||||
static struct ib_client cm_client = {
|
||||
.name = "cm",
|
||||
@@ -213,13 +213,15 @@ struct cm_id_private {
|
||||
spinlock_t lock; /* Do not acquire inside cm.lock */
|
||||
struct completion comp;
|
||||
atomic_t refcount;
|
||||
/* Number of clients sharing this ib_cm_id. Only valid for listeners.
|
||||
* Protected by the cm.lock spinlock. */
|
||||
int listen_sharecount;
|
||||
|
||||
struct ib_mad_send_buf *msg;
|
||||
struct cm_timewait_info *timewait_info;
|
||||
/* todo: use alternate port on send failure */
|
||||
struct cm_av av;
|
||||
struct cm_av alt_av;
|
||||
struct ib_cm_compare_data *compare_data;
|
||||
|
||||
void *private_data;
|
||||
__be64 tid;
|
||||
@@ -440,40 +442,6 @@ static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
|
||||
return cm_id_priv;
|
||||
}
|
||||
|
||||
static void cm_mask_copy(u32 *dst, const u32 *src, const u32 *mask)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IB_CM_COMPARE_SIZE; i++)
|
||||
dst[i] = src[i] & mask[i];
|
||||
}
|
||||
|
||||
static int cm_compare_data(struct ib_cm_compare_data *src_data,
|
||||
struct ib_cm_compare_data *dst_data)
|
||||
{
|
||||
u32 src[IB_CM_COMPARE_SIZE];
|
||||
u32 dst[IB_CM_COMPARE_SIZE];
|
||||
|
||||
if (!src_data || !dst_data)
|
||||
return 0;
|
||||
|
||||
cm_mask_copy(src, src_data->data, dst_data->mask);
|
||||
cm_mask_copy(dst, dst_data->data, src_data->mask);
|
||||
return memcmp(src, dst, sizeof(src));
|
||||
}
|
||||
|
||||
static int cm_compare_private_data(u32 *private_data,
|
||||
struct ib_cm_compare_data *dst_data)
|
||||
{
|
||||
u32 src[IB_CM_COMPARE_SIZE];
|
||||
|
||||
if (!dst_data)
|
||||
return 0;
|
||||
|
||||
cm_mask_copy(src, private_data, dst_data->mask);
|
||||
return memcmp(src, dst_data->data, sizeof(src));
|
||||
}
|
||||
|
||||
/*
|
||||
* Trivial helpers to strip endian annotation and compare; the
|
||||
* endianness doesn't actually matter since we just need a stable
|
||||
@@ -506,18 +474,14 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
|
||||
struct cm_id_private *cur_cm_id_priv;
|
||||
__be64 service_id = cm_id_priv->id.service_id;
|
||||
__be64 service_mask = cm_id_priv->id.service_mask;
|
||||
int data_cmp;
|
||||
|
||||
while (*link) {
|
||||
parent = *link;
|
||||
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
|
||||
service_node);
|
||||
data_cmp = cm_compare_data(cm_id_priv->compare_data,
|
||||
cur_cm_id_priv->compare_data);
|
||||
if ((cur_cm_id_priv->id.service_mask & service_id) ==
|
||||
(service_mask & cur_cm_id_priv->id.service_id) &&
|
||||
(cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
|
||||
!data_cmp)
|
||||
(cm_id_priv->id.device == cur_cm_id_priv->id.device))
|
||||
return cur_cm_id_priv;
|
||||
|
||||
if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
|
||||
@@ -528,8 +492,6 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
|
||||
link = &(*link)->rb_left;
|
||||
else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
|
||||
link = &(*link)->rb_right;
|
||||
else if (data_cmp < 0)
|
||||
link = &(*link)->rb_left;
|
||||
else
|
||||
link = &(*link)->rb_right;
|
||||
}
|
||||
@@ -539,20 +501,16 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
|
||||
}
|
||||
|
||||
static struct cm_id_private * cm_find_listen(struct ib_device *device,
|
||||
__be64 service_id,
|
||||
u32 *private_data)
|
||||
__be64 service_id)
|
||||
{
|
||||
struct rb_node *node = cm.listen_service_table.rb_node;
|
||||
struct cm_id_private *cm_id_priv;
|
||||
int data_cmp;
|
||||
|
||||
while (node) {
|
||||
cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
|
||||
data_cmp = cm_compare_private_data(private_data,
|
||||
cm_id_priv->compare_data);
|
||||
if ((cm_id_priv->id.service_mask & service_id) ==
|
||||
cm_id_priv->id.service_id &&
|
||||
(cm_id_priv->id.device == device) && !data_cmp)
|
||||
(cm_id_priv->id.device == device))
|
||||
return cm_id_priv;
|
||||
|
||||
if (device < cm_id_priv->id.device)
|
||||
@@ -563,8 +521,6 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device,
|
||||
node = node->rb_left;
|
||||
else if (be64_gt(service_id, cm_id_priv->id.service_id))
|
||||
node = node->rb_right;
|
||||
else if (data_cmp < 0)
|
||||
node = node->rb_left;
|
||||
else
|
||||
node = node->rb_right;
|
||||
}
|
||||
@@ -859,9 +815,15 @@ retest:
|
||||
spin_lock_irq(&cm_id_priv->lock);
|
||||
switch (cm_id->state) {
|
||||
case IB_CM_LISTEN:
|
||||
cm_id->state = IB_CM_IDLE;
|
||||
spin_unlock_irq(&cm_id_priv->lock);
|
||||
|
||||
spin_lock_irq(&cm.lock);
|
||||
if (--cm_id_priv->listen_sharecount > 0) {
|
||||
/* The id is still shared. */
|
||||
cm_deref_id(cm_id_priv);
|
||||
spin_unlock_irq(&cm.lock);
|
||||
return;
|
||||
}
|
||||
rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
|
||||
spin_unlock_irq(&cm.lock);
|
||||
break;
|
||||
@@ -930,7 +892,6 @@ retest:
|
||||
wait_for_completion(&cm_id_priv->comp);
|
||||
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
|
||||
cm_free_work(work);
|
||||
kfree(cm_id_priv->compare_data);
|
||||
kfree(cm_id_priv->private_data);
|
||||
kfree(cm_id_priv);
|
||||
}
|
||||
@@ -941,11 +902,23 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id)
|
||||
}
|
||||
EXPORT_SYMBOL(ib_destroy_cm_id);
|
||||
|
||||
int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
|
||||
struct ib_cm_compare_data *compare_data)
|
||||
/**
|
||||
* __ib_cm_listen - Initiates listening on the specified service ID for
|
||||
* connection and service ID resolution requests.
|
||||
* @cm_id: Connection identifier associated with the listen request.
|
||||
* @service_id: Service identifier matched against incoming connection
|
||||
* and service ID resolution requests. The service ID should be specified
|
||||
* network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
|
||||
* assign a service ID to the caller.
|
||||
* @service_mask: Mask applied to service ID used to listen across a
|
||||
* range of service IDs. If set to 0, the service ID is matched
|
||||
* exactly. This parameter is ignored if %service_id is set to
|
||||
* IB_CM_ASSIGN_SERVICE_ID.
|
||||
*/
|
||||
static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
|
||||
__be64 service_mask)
|
||||
{
|
||||
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
|
||||
@@ -958,20 +931,9 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
|
||||
if (cm_id->state != IB_CM_IDLE)
|
||||
return -EINVAL;
|
||||
|
||||
if (compare_data) {
|
||||
cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
|
||||
GFP_KERNEL);
|
||||
if (!cm_id_priv->compare_data)
|
||||
return -ENOMEM;
|
||||
cm_mask_copy(cm_id_priv->compare_data->data,
|
||||
compare_data->data, compare_data->mask);
|
||||
memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
|
||||
sizeof(compare_data->mask));
|
||||
}
|
||||
|
||||
cm_id->state = IB_CM_LISTEN;
|
||||
++cm_id_priv->listen_sharecount;
|
||||
|
||||
spin_lock_irqsave(&cm.lock, flags);
|
||||
if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
|
||||
cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
|
||||
cm_id->service_mask = ~cpu_to_be64(0);
|
||||
@@ -980,18 +942,95 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
|
||||
cm_id->service_mask = service_mask;
|
||||
}
|
||||
cur_cm_id_priv = cm_insert_listen(cm_id_priv);
|
||||
spin_unlock_irqrestore(&cm.lock, flags);
|
||||
|
||||
if (cur_cm_id_priv) {
|
||||
cm_id->state = IB_CM_IDLE;
|
||||
kfree(cm_id_priv->compare_data);
|
||||
cm_id_priv->compare_data = NULL;
|
||||
--cm_id_priv->listen_sharecount;
|
||||
ret = -EBUSY;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&cm.lock, flags);
|
||||
ret = __ib_cm_listen(cm_id, service_id, service_mask);
|
||||
spin_unlock_irqrestore(&cm.lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_cm_listen);
|
||||
|
||||
/**
|
||||
* Create a new listening ib_cm_id and listen on the given service ID.
|
||||
*
|
||||
* If there's an existing ID listening on that same device and service ID,
|
||||
* return it.
|
||||
*
|
||||
* @device: Device associated with the cm_id. All related communication will
|
||||
* be associated with the specified device.
|
||||
* @cm_handler: Callback invoked to notify the user of CM events.
|
||||
* @service_id: Service identifier matched against incoming connection
|
||||
* and service ID resolution requests. The service ID should be specified
|
||||
* network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
|
||||
* assign a service ID to the caller.
|
||||
*
|
||||
* Callers should call ib_destroy_cm_id when done with the listener ID.
|
||||
*/
|
||||
struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
|
||||
ib_cm_handler cm_handler,
|
||||
__be64 service_id)
|
||||
{
|
||||
struct cm_id_private *cm_id_priv;
|
||||
struct ib_cm_id *cm_id;
|
||||
unsigned long flags;
|
||||
int err = 0;
|
||||
|
||||
/* Create an ID in advance, since the creation may sleep */
|
||||
cm_id = ib_create_cm_id(device, cm_handler, NULL);
|
||||
if (IS_ERR(cm_id))
|
||||
return cm_id;
|
||||
|
||||
spin_lock_irqsave(&cm.lock, flags);
|
||||
|
||||
if (service_id == IB_CM_ASSIGN_SERVICE_ID)
|
||||
goto new_id;
|
||||
|
||||
/* Find an existing ID */
|
||||
cm_id_priv = cm_find_listen(device, service_id);
|
||||
if (cm_id_priv) {
|
||||
if (cm_id->cm_handler != cm_handler || cm_id->context) {
|
||||
/* Sharing an ib_cm_id with different handlers is not
|
||||
* supported */
|
||||
spin_unlock_irqrestore(&cm.lock, flags);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
atomic_inc(&cm_id_priv->refcount);
|
||||
++cm_id_priv->listen_sharecount;
|
||||
spin_unlock_irqrestore(&cm.lock, flags);
|
||||
|
||||
ib_destroy_cm_id(cm_id);
|
||||
cm_id = &cm_id_priv->id;
|
||||
return cm_id;
|
||||
}
|
||||
|
||||
new_id:
|
||||
/* Use newly created ID */
|
||||
err = __ib_cm_listen(cm_id, service_id, 0);
|
||||
|
||||
spin_unlock_irqrestore(&cm.lock, flags);
|
||||
|
||||
if (err) {
|
||||
ib_destroy_cm_id(cm_id);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
return cm_id;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_cm_insert_listen);
|
||||
|
||||
static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
|
||||
enum cm_msg_sequence msg_seq)
|
||||
{
|
||||
@@ -1268,6 +1307,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
|
||||
primary_path->packet_life_time =
|
||||
cm_req_get_primary_local_ack_timeout(req_msg);
|
||||
primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
|
||||
primary_path->service_id = req_msg->service_id;
|
||||
|
||||
if (req_msg->alt_local_lid) {
|
||||
memset(alt_path, 0, sizeof *alt_path);
|
||||
@@ -1289,9 +1329,28 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
|
||||
alt_path->packet_life_time =
|
||||
cm_req_get_alt_local_ack_timeout(req_msg);
|
||||
alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
|
||||
alt_path->service_id = req_msg->service_id;
|
||||
}
|
||||
}
|
||||
|
||||
static u16 cm_get_bth_pkey(struct cm_work *work)
|
||||
{
|
||||
struct ib_device *ib_dev = work->port->cm_dev->ib_device;
|
||||
u8 port_num = work->port->port_num;
|
||||
u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
|
||||
u16 pkey;
|
||||
int ret;
|
||||
|
||||
ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
|
||||
if (ret) {
|
||||
dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
|
||||
port_num, pkey_index, ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return pkey;
|
||||
}
|
||||
|
||||
static void cm_format_req_event(struct cm_work *work,
|
||||
struct cm_id_private *cm_id_priv,
|
||||
struct ib_cm_id *listen_id)
|
||||
@@ -1302,6 +1361,7 @@ static void cm_format_req_event(struct cm_work *work,
|
||||
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
|
||||
param = &work->cm_event.param.req_rcvd;
|
||||
param->listen_id = listen_id;
|
||||
param->bth_pkey = cm_get_bth_pkey(work);
|
||||
param->port = cm_id_priv->av.port->port_num;
|
||||
param->primary_path = &work->path[0];
|
||||
if (req_msg->alt_local_lid)
|
||||
@@ -1484,8 +1544,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
|
||||
|
||||
/* Find matching listen request. */
|
||||
listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
|
||||
req_msg->service_id,
|
||||
req_msg->private_data);
|
||||
req_msg->service_id);
|
||||
if (!listen_cm_id_priv) {
|
||||
cm_cleanup_timewait(cm_id_priv->timewait_info);
|
||||
spin_unlock_irq(&cm.lock);
|
||||
@@ -2992,6 +3051,8 @@ static void cm_format_sidr_req_event(struct cm_work *work,
|
||||
param = &work->cm_event.param.sidr_req_rcvd;
|
||||
param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
|
||||
param->listen_id = listen_id;
|
||||
param->service_id = sidr_req_msg->service_id;
|
||||
param->bth_pkey = cm_get_bth_pkey(work);
|
||||
param->port = work->port->port_num;
|
||||
work->cm_event.private_data = &sidr_req_msg->private_data;
|
||||
}
|
||||
@@ -3031,8 +3092,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
|
||||
}
|
||||
cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
|
||||
cur_cm_id_priv = cm_find_listen(cm_id->device,
|
||||
sidr_req_msg->service_id,
|
||||
sidr_req_msg->private_data);
|
||||
sidr_req_msg->service_id);
|
||||
if (!cur_cm_id_priv) {
|
||||
spin_unlock_irq(&cm.lock);
|
||||
cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
|
||||
@@ -3886,9 +3946,9 @@ free:
|
||||
kfree(cm_dev);
|
||||
}
|
||||
|
||||
static void cm_remove_one(struct ib_device *ib_device)
|
||||
static void cm_remove_one(struct ib_device *ib_device, void *client_data)
|
||||
{
|
||||
struct cm_device *cm_dev;
|
||||
struct cm_device *cm_dev = client_data;
|
||||
struct cm_port *port;
|
||||
struct ib_port_modify port_modify = {
|
||||
.clr_port_cap_mask = IB_PORT_CM_SUP
|
||||
@@ -3896,7 +3956,6 @@ static void cm_remove_one(struct ib_device *ib_device)
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
cm_dev = ib_get_client_data(ib_device, &cm_client);
|
||||
if (!cm_dev)
|
||||
return;
|
||||
|
||||
|
||||
+492
-165
File diff suppressed because it is too large
Load Diff
@@ -43,12 +43,58 @@ int ib_device_register_sysfs(struct ib_device *device,
|
||||
u8, struct kobject *));
|
||||
void ib_device_unregister_sysfs(struct ib_device *device);
|
||||
|
||||
int ib_sysfs_setup(void);
|
||||
void ib_sysfs_cleanup(void);
|
||||
|
||||
int ib_cache_setup(void);
|
||||
void ib_cache_setup(void);
|
||||
void ib_cache_cleanup(void);
|
||||
|
||||
int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
|
||||
struct ib_qp_attr *qp_attr, int *qp_attr_mask);
|
||||
|
||||
typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
|
||||
struct net_device *idev, void *cookie);
|
||||
|
||||
typedef int (*roce_netdev_filter)(struct ib_device *device, u8 port,
|
||||
struct net_device *idev, void *cookie);
|
||||
|
||||
void ib_enum_roce_netdev(struct ib_device *ib_dev,
|
||||
roce_netdev_filter filter,
|
||||
void *filter_cookie,
|
||||
roce_netdev_callback cb,
|
||||
void *cookie);
|
||||
void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
|
||||
void *filter_cookie,
|
||||
roce_netdev_callback cb,
|
||||
void *cookie);
|
||||
|
||||
int ib_cache_gid_find_by_port(struct ib_device *ib_dev,
|
||||
const union ib_gid *gid,
|
||||
u8 port, struct net_device *ndev,
|
||||
u16 *index);
|
||||
|
||||
enum ib_cache_gid_default_mode {
|
||||
IB_CACHE_GID_DEFAULT_MODE_SET,
|
||||
IB_CACHE_GID_DEFAULT_MODE_DELETE
|
||||
};
|
||||
|
||||
void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
|
||||
struct net_device *ndev,
|
||||
enum ib_cache_gid_default_mode mode);
|
||||
|
||||
int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
|
||||
union ib_gid *gid, struct ib_gid_attr *attr);
|
||||
|
||||
int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
|
||||
union ib_gid *gid, struct ib_gid_attr *attr);
|
||||
|
||||
int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
|
||||
struct net_device *ndev);
|
||||
|
||||
int roce_gid_mgmt_init(void);
|
||||
void roce_gid_mgmt_cleanup(void);
|
||||
|
||||
int roce_rescan_device(struct ib_device *ib_dev);
|
||||
|
||||
int ib_cache_setup_one(struct ib_device *device);
|
||||
void ib_cache_cleanup_one(struct ib_device *device);
|
||||
void ib_cache_release_one(struct ib_device *device);
|
||||
|
||||
#endif /* _CORE_PRIV_H */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -338,13 +338,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
|
||||
goto error1;
|
||||
}
|
||||
|
||||
mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
|
||||
IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(mad_agent_priv->agent.mr)) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto error2;
|
||||
}
|
||||
|
||||
if (mad_reg_req) {
|
||||
reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
|
||||
if (!reg_req) {
|
||||
@@ -429,8 +422,6 @@ error4:
|
||||
spin_unlock_irqrestore(&port_priv->reg_lock, flags);
|
||||
kfree(reg_req);
|
||||
error3:
|
||||
ib_dereg_mr(mad_agent_priv->agent.mr);
|
||||
error2:
|
||||
kfree(mad_agent_priv);
|
||||
error1:
|
||||
return ret;
|
||||
@@ -590,7 +581,6 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
|
||||
wait_for_completion(&mad_agent_priv->comp);
|
||||
|
||||
kfree(mad_agent_priv->reg_req);
|
||||
ib_dereg_mr(mad_agent_priv->agent.mr);
|
||||
kfree(mad_agent_priv);
|
||||
}
|
||||
|
||||
@@ -1038,7 +1028,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
|
||||
|
||||
mad_send_wr->mad_agent_priv = mad_agent_priv;
|
||||
mad_send_wr->sg_list[0].length = hdr_len;
|
||||
mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
|
||||
mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
|
||||
|
||||
/* OPA MADs don't have to be the full 2048 bytes */
|
||||
if (opa && base_version == OPA_MGMT_BASE_VERSION &&
|
||||
@@ -1047,7 +1037,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
|
||||
else
|
||||
mad_send_wr->sg_list[1].length = mad_size - hdr_len;
|
||||
|
||||
mad_send_wr->sg_list[1].lkey = mad_agent->mr->lkey;
|
||||
mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
|
||||
|
||||
mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
|
||||
mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
|
||||
@@ -2885,7 +2875,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
|
||||
struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
|
||||
|
||||
/* Initialize common scatter list fields */
|
||||
sg_list.lkey = (*qp_info->port_priv->mr).lkey;
|
||||
sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
|
||||
|
||||
/* Initialize common receive WR fields */
|
||||
recv_wr.next = NULL;
|
||||
@@ -3201,13 +3191,6 @@ static int ib_mad_port_open(struct ib_device *device,
|
||||
goto error4;
|
||||
}
|
||||
|
||||
port_priv->mr = ib_get_dma_mr(port_priv->pd, IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(port_priv->mr)) {
|
||||
dev_err(&device->dev, "Couldn't get ib_mad DMA MR\n");
|
||||
ret = PTR_ERR(port_priv->mr);
|
||||
goto error5;
|
||||
}
|
||||
|
||||
if (has_smi) {
|
||||
ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
|
||||
if (ret)
|
||||
@@ -3248,8 +3231,6 @@ error8:
|
||||
error7:
|
||||
destroy_mad_qp(&port_priv->qp_info[0]);
|
||||
error6:
|
||||
ib_dereg_mr(port_priv->mr);
|
||||
error5:
|
||||
ib_dealloc_pd(port_priv->pd);
|
||||
error4:
|
||||
ib_destroy_cq(port_priv->cq);
|
||||
@@ -3284,7 +3265,6 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
|
||||
destroy_workqueue(port_priv->wq);
|
||||
destroy_mad_qp(&port_priv->qp_info[1]);
|
||||
destroy_mad_qp(&port_priv->qp_info[0]);
|
||||
ib_dereg_mr(port_priv->mr);
|
||||
ib_dealloc_pd(port_priv->pd);
|
||||
ib_destroy_cq(port_priv->cq);
|
||||
cleanup_recv_queue(&port_priv->qp_info[1]);
|
||||
@@ -3335,7 +3315,7 @@ error:
|
||||
}
|
||||
}
|
||||
|
||||
static void ib_mad_remove_device(struct ib_device *device)
|
||||
static void ib_mad_remove_device(struct ib_device *device, void *client_data)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
||||
@@ -199,7 +199,6 @@ struct ib_mad_port_private {
|
||||
int port_num;
|
||||
struct ib_cq *cq;
|
||||
struct ib_pd *pd;
|
||||
struct ib_mr *mr;
|
||||
|
||||
spinlock_t reg_lock;
|
||||
struct ib_mad_mgmt_version_table version[MAX_MGMT_VERSION];
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
#include "sa.h"
|
||||
|
||||
static void mcast_add_one(struct ib_device *device);
|
||||
static void mcast_remove_one(struct ib_device *device);
|
||||
static void mcast_remove_one(struct ib_device *device, void *client_data);
|
||||
|
||||
static struct ib_client mcast_client = {
|
||||
.name = "ib_multicast",
|
||||
@@ -840,13 +840,12 @@ static void mcast_add_one(struct ib_device *device)
|
||||
ib_register_event_handler(&dev->event_handler);
|
||||
}
|
||||
|
||||
static void mcast_remove_one(struct ib_device *device)
|
||||
static void mcast_remove_one(struct ib_device *device, void *client_data)
|
||||
{
|
||||
struct mcast_device *dev;
|
||||
struct mcast_device *dev = client_data;
|
||||
struct mcast_port *port;
|
||||
int i;
|
||||
|
||||
dev = ib_get_client_data(device, &mcast_client);
|
||||
if (!dev)
|
||||
return;
|
||||
|
||||
|
||||
@@ -49,6 +49,14 @@ static DEFINE_MUTEX(ibnl_mutex);
|
||||
static struct sock *nls;
|
||||
static LIST_HEAD(client_list);
|
||||
|
||||
int ibnl_chk_listeners(unsigned int group)
|
||||
{
|
||||
if (netlink_has_listeners(nls, group) == 0)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ibnl_chk_listeners);
|
||||
|
||||
int ibnl_add_client(int index, int nops,
|
||||
const struct ibnl_client_cbs cb_table[])
|
||||
{
|
||||
@@ -151,6 +159,23 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
!client->cb_table[op].dump)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* For response or local service set_timeout request,
|
||||
* there is no need to use netlink_dump_start.
|
||||
*/
|
||||
if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
|
||||
(index == RDMA_NL_LS &&
|
||||
op == RDMA_NL_LS_OP_SET_TIMEOUT)) {
|
||||
struct netlink_callback cb = {
|
||||
.skb = skb,
|
||||
.nlh = nlh,
|
||||
.dump = client->cb_table[op].dump,
|
||||
.module = client->cb_table[op].module,
|
||||
};
|
||||
|
||||
return cb.dump(skb, &cb);
|
||||
}
|
||||
|
||||
{
|
||||
struct netlink_dump_control c = {
|
||||
.dump = client->cb_table[op].dump,
|
||||
@@ -165,9 +190,39 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void ibnl_rcv_reply_skb(struct sk_buff *skb)
|
||||
{
|
||||
struct nlmsghdr *nlh;
|
||||
int msglen;
|
||||
|
||||
/*
|
||||
* Process responses until there is no more message or the first
|
||||
* request. Generally speaking, it is not recommended to mix responses
|
||||
* with requests.
|
||||
*/
|
||||
while (skb->len >= nlmsg_total_size(0)) {
|
||||
nlh = nlmsg_hdr(skb);
|
||||
|
||||
if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
|
||||
return;
|
||||
|
||||
/* Handle response only */
|
||||
if (nlh->nlmsg_flags & NLM_F_REQUEST)
|
||||
return;
|
||||
|
||||
ibnl_rcv_msg(skb, nlh);
|
||||
|
||||
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
|
||||
if (msglen > skb->len)
|
||||
msglen = skb->len;
|
||||
skb_pull(skb, msglen);
|
||||
}
|
||||
}
|
||||
|
||||
static void ibnl_rcv(struct sk_buff *skb)
|
||||
{
|
||||
mutex_lock(&ibnl_mutex);
|
||||
ibnl_rcv_reply_skb(skb);
|
||||
netlink_rcv_skb(skb, &ibnl_rcv_msg);
|
||||
mutex_unlock(&ibnl_mutex);
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -457,29 +457,6 @@ static struct kobj_type port_type = {
|
||||
.default_attrs = port_default_attrs
|
||||
};
|
||||
|
||||
static void ib_device_release(struct device *device)
|
||||
{
|
||||
struct ib_device *dev = container_of(device, struct ib_device, dev);
|
||||
|
||||
kfree(dev->port_immutable);
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
static int ib_device_uevent(struct device *device,
|
||||
struct kobj_uevent_env *env)
|
||||
{
|
||||
struct ib_device *dev = container_of(device, struct ib_device, dev);
|
||||
|
||||
if (add_uevent_var(env, "NAME=%s", dev->name))
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* It would be nice to pass the node GUID with the event...
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct attribute **
|
||||
alloc_group_attrs(ssize_t (*show)(struct ib_port *,
|
||||
struct port_attribute *, char *buf),
|
||||
@@ -702,12 +679,6 @@ static struct device_attribute *ib_class_attributes[] = {
|
||||
&dev_attr_node_desc
|
||||
};
|
||||
|
||||
static struct class ib_class = {
|
||||
.name = "infiniband",
|
||||
.dev_release = ib_device_release,
|
||||
.dev_uevent = ib_device_uevent,
|
||||
};
|
||||
|
||||
/* Show a given an attribute in the statistics group */
|
||||
static ssize_t show_protocol_stat(const struct device *device,
|
||||
struct device_attribute *attr, char *buf,
|
||||
@@ -846,14 +817,12 @@ int ib_device_register_sysfs(struct ib_device *device,
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
class_dev->class = &ib_class;
|
||||
class_dev->parent = device->dma_device;
|
||||
dev_set_name(class_dev, "%s", device->name);
|
||||
dev_set_drvdata(class_dev, device);
|
||||
device->dev.parent = device->dma_device;
|
||||
ret = dev_set_name(class_dev, "%s", device->name);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
INIT_LIST_HEAD(&device->port_list);
|
||||
|
||||
ret = device_register(class_dev);
|
||||
ret = device_add(class_dev);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@@ -916,13 +885,3 @@ void ib_device_unregister_sysfs(struct ib_device *device)
|
||||
|
||||
device_unregister(&device->dev);
|
||||
}
|
||||
|
||||
int ib_sysfs_setup(void)
|
||||
{
|
||||
return class_register(&ib_class);
|
||||
}
|
||||
|
||||
void ib_sysfs_cleanup(void)
|
||||
{
|
||||
class_unregister(&ib_class);
|
||||
}
|
||||
|
||||
@@ -109,7 +109,7 @@ enum {
|
||||
#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
|
||||
|
||||
static void ib_ucm_add_one(struct ib_device *device);
|
||||
static void ib_ucm_remove_one(struct ib_device *device);
|
||||
static void ib_ucm_remove_one(struct ib_device *device, void *client_data);
|
||||
|
||||
static struct ib_client ucm_client = {
|
||||
.name = "ucm",
|
||||
@@ -658,8 +658,7 @@ static ssize_t ib_ucm_listen(struct ib_ucm_file *file,
|
||||
if (result)
|
||||
goto out;
|
||||
|
||||
result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask,
|
||||
NULL);
|
||||
result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask);
|
||||
out:
|
||||
ib_ucm_ctx_put(ctx);
|
||||
return result;
|
||||
@@ -1310,9 +1309,9 @@ err:
|
||||
return;
|
||||
}
|
||||
|
||||
static void ib_ucm_remove_one(struct ib_device *device)
|
||||
static void ib_ucm_remove_one(struct ib_device *device, void *client_data)
|
||||
{
|
||||
struct ib_ucm_device *ucm_dev = ib_get_client_data(device, &ucm_client);
|
||||
struct ib_ucm_device *ucm_dev = client_data;
|
||||
|
||||
if (!ucm_dev)
|
||||
return;
|
||||
|
||||
+132
-14
@@ -74,6 +74,7 @@ struct ucma_file {
|
||||
struct list_head ctx_list;
|
||||
struct list_head event_list;
|
||||
wait_queue_head_t poll_wait;
|
||||
struct workqueue_struct *close_wq;
|
||||
};
|
||||
|
||||
struct ucma_context {
|
||||
@@ -89,6 +90,13 @@ struct ucma_context {
|
||||
|
||||
struct list_head list;
|
||||
struct list_head mc_list;
|
||||
/* mark that device is in process of destroying the internal HW
|
||||
* resources, protected by the global mut
|
||||
*/
|
||||
int closing;
|
||||
/* sync between removal event and id destroy, protected by file mut */
|
||||
int destroying;
|
||||
struct work_struct close_work;
|
||||
};
|
||||
|
||||
struct ucma_multicast {
|
||||
@@ -107,6 +115,7 @@ struct ucma_event {
|
||||
struct list_head list;
|
||||
struct rdma_cm_id *cm_id;
|
||||
struct rdma_ucm_event_resp resp;
|
||||
struct work_struct close_work;
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(mut);
|
||||
@@ -132,8 +141,12 @@ static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
|
||||
|
||||
mutex_lock(&mut);
|
||||
ctx = _ucma_find_context(id, file);
|
||||
if (!IS_ERR(ctx))
|
||||
atomic_inc(&ctx->ref);
|
||||
if (!IS_ERR(ctx)) {
|
||||
if (ctx->closing)
|
||||
ctx = ERR_PTR(-EIO);
|
||||
else
|
||||
atomic_inc(&ctx->ref);
|
||||
}
|
||||
mutex_unlock(&mut);
|
||||
return ctx;
|
||||
}
|
||||
@@ -144,6 +157,28 @@ static void ucma_put_ctx(struct ucma_context *ctx)
|
||||
complete(&ctx->comp);
|
||||
}
|
||||
|
||||
static void ucma_close_event_id(struct work_struct *work)
|
||||
{
|
||||
struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
|
||||
|
||||
rdma_destroy_id(uevent_close->cm_id);
|
||||
kfree(uevent_close);
|
||||
}
|
||||
|
||||
static void ucma_close_id(struct work_struct *work)
|
||||
{
|
||||
struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
|
||||
|
||||
/* once all inflight tasks are finished, we close all underlying
|
||||
* resources. The context is still alive till its explicit destryoing
|
||||
* by its creator.
|
||||
*/
|
||||
ucma_put_ctx(ctx);
|
||||
wait_for_completion(&ctx->comp);
|
||||
/* No new events will be generated after destroying the id. */
|
||||
rdma_destroy_id(ctx->cm_id);
|
||||
}
|
||||
|
||||
static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
|
||||
{
|
||||
struct ucma_context *ctx;
|
||||
@@ -152,6 +187,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
|
||||
if (!ctx)
|
||||
return NULL;
|
||||
|
||||
INIT_WORK(&ctx->close_work, ucma_close_id);
|
||||
atomic_set(&ctx->ref, 1);
|
||||
init_completion(&ctx->comp);
|
||||
INIT_LIST_HEAD(&ctx->mc_list);
|
||||
@@ -242,6 +278,44 @@ static void ucma_set_event_context(struct ucma_context *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
/* Called with file->mut locked for the relevant context. */
|
||||
static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
|
||||
{
|
||||
struct ucma_context *ctx = cm_id->context;
|
||||
struct ucma_event *con_req_eve;
|
||||
int event_found = 0;
|
||||
|
||||
if (ctx->destroying)
|
||||
return;
|
||||
|
||||
/* only if context is pointing to cm_id that it owns it and can be
|
||||
* queued to be closed, otherwise that cm_id is an inflight one that
|
||||
* is part of that context event list pending to be detached and
|
||||
* reattached to its new context as part of ucma_get_event,
|
||||
* handled separately below.
|
||||
*/
|
||||
if (ctx->cm_id == cm_id) {
|
||||
mutex_lock(&mut);
|
||||
ctx->closing = 1;
|
||||
mutex_unlock(&mut);
|
||||
queue_work(ctx->file->close_wq, &ctx->close_work);
|
||||
return;
|
||||
}
|
||||
|
||||
list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
|
||||
if (con_req_eve->cm_id == cm_id &&
|
||||
con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
|
||||
list_del(&con_req_eve->list);
|
||||
INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
|
||||
queue_work(ctx->file->close_wq, &con_req_eve->close_work);
|
||||
event_found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!event_found)
|
||||
printk(KERN_ERR "ucma_removal_event_handler: warning: connect request event wasn't found\n");
|
||||
}
|
||||
|
||||
static int ucma_event_handler(struct rdma_cm_id *cm_id,
|
||||
struct rdma_cm_event *event)
|
||||
{
|
||||
@@ -276,14 +350,21 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
|
||||
* We ignore events for new connections until userspace has set
|
||||
* their context. This can only happen if an error occurs on a
|
||||
* new connection before the user accepts it. This is okay,
|
||||
* since the accept will just fail later.
|
||||
* since the accept will just fail later. However, we do need
|
||||
* to release the underlying HW resources in case of a device
|
||||
* removal event.
|
||||
*/
|
||||
if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
|
||||
ucma_removal_event_handler(cm_id);
|
||||
|
||||
kfree(uevent);
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_add_tail(&uevent->list, &ctx->file->event_list);
|
||||
wake_up_interruptible(&ctx->file->poll_wait);
|
||||
if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
|
||||
ucma_removal_event_handler(cm_id);
|
||||
out:
|
||||
mutex_unlock(&ctx->file->mut);
|
||||
return ret;
|
||||
@@ -442,9 +523,15 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
|
||||
}
|
||||
|
||||
/*
|
||||
* We cannot hold file->mut when calling rdma_destroy_id() or we can
|
||||
* deadlock. We also acquire file->mut in ucma_event_handler(), and
|
||||
* rdma_destroy_id() will wait until all callbacks have completed.
|
||||
* ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
|
||||
* this point, no new events will be reported from the hardware. However, we
|
||||
* still need to cleanup the UCMA context for this ID. Specifically, there
|
||||
* might be events that have not yet been consumed by the user space software.
|
||||
* These might include pending connect requests which we have not completed
|
||||
* processing. We cannot call rdma_destroy_id while holding the lock of the
|
||||
* context (file->mut), as it might cause a deadlock. We therefore extract all
|
||||
* relevant events from the context pending events list while holding the
|
||||
* mutex. After that we release them as needed.
|
||||
*/
|
||||
static int ucma_free_ctx(struct ucma_context *ctx)
|
||||
{
|
||||
@@ -452,8 +539,6 @@ static int ucma_free_ctx(struct ucma_context *ctx)
|
||||
struct ucma_event *uevent, *tmp;
|
||||
LIST_HEAD(list);
|
||||
|
||||
/* No new events will be generated after destroying the id. */
|
||||
rdma_destroy_id(ctx->cm_id);
|
||||
|
||||
ucma_cleanup_multicast(ctx);
|
||||
|
||||
@@ -501,10 +586,24 @@ static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
ucma_put_ctx(ctx);
|
||||
wait_for_completion(&ctx->comp);
|
||||
resp.events_reported = ucma_free_ctx(ctx);
|
||||
mutex_lock(&ctx->file->mut);
|
||||
ctx->destroying = 1;
|
||||
mutex_unlock(&ctx->file->mut);
|
||||
|
||||
flush_workqueue(ctx->file->close_wq);
|
||||
/* At this point it's guaranteed that there is no inflight
|
||||
* closing task */
|
||||
mutex_lock(&mut);
|
||||
if (!ctx->closing) {
|
||||
mutex_unlock(&mut);
|
||||
ucma_put_ctx(ctx);
|
||||
wait_for_completion(&ctx->comp);
|
||||
rdma_destroy_id(ctx->cm_id);
|
||||
} else {
|
||||
mutex_unlock(&mut);
|
||||
}
|
||||
|
||||
resp.events_reported = ucma_free_ctx(ctx);
|
||||
if (copy_to_user((void __user *)(unsigned long)cmd.response,
|
||||
&resp, sizeof(resp)))
|
||||
ret = -EFAULT;
|
||||
@@ -1321,10 +1420,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
|
||||
mc = ERR_PTR(-ENOENT);
|
||||
else if (mc->ctx->file != file)
|
||||
mc = ERR_PTR(-EINVAL);
|
||||
else {
|
||||
else if (!atomic_inc_not_zero(&mc->ctx->ref))
|
||||
mc = ERR_PTR(-ENXIO);
|
||||
else
|
||||
idr_remove(&multicast_idr, mc->id);
|
||||
atomic_inc(&mc->ctx->ref);
|
||||
}
|
||||
mutex_unlock(&mut);
|
||||
|
||||
if (IS_ERR(mc)) {
|
||||
@@ -1529,6 +1628,7 @@ static int ucma_open(struct inode *inode, struct file *filp)
|
||||
INIT_LIST_HEAD(&file->ctx_list);
|
||||
init_waitqueue_head(&file->poll_wait);
|
||||
mutex_init(&file->mut);
|
||||
file->close_wq = create_singlethread_workqueue("ucma_close_id");
|
||||
|
||||
filp->private_data = file;
|
||||
file->filp = filp;
|
||||
@@ -1543,16 +1643,34 @@ static int ucma_close(struct inode *inode, struct file *filp)
|
||||
|
||||
mutex_lock(&file->mut);
|
||||
list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
|
||||
ctx->destroying = 1;
|
||||
mutex_unlock(&file->mut);
|
||||
|
||||
mutex_lock(&mut);
|
||||
idr_remove(&ctx_idr, ctx->id);
|
||||
mutex_unlock(&mut);
|
||||
|
||||
flush_workqueue(file->close_wq);
|
||||
/* At that step once ctx was marked as destroying and workqueue
|
||||
* was flushed we are safe from any inflights handlers that
|
||||
* might put other closing task.
|
||||
*/
|
||||
mutex_lock(&mut);
|
||||
if (!ctx->closing) {
|
||||
mutex_unlock(&mut);
|
||||
/* rdma_destroy_id ensures that no event handlers are
|
||||
* inflight for that id before releasing it.
|
||||
*/
|
||||
rdma_destroy_id(ctx->cm_id);
|
||||
} else {
|
||||
mutex_unlock(&mut);
|
||||
}
|
||||
|
||||
ucma_free_ctx(ctx);
|
||||
mutex_lock(&file->mut);
|
||||
}
|
||||
mutex_unlock(&file->mut);
|
||||
destroy_workqueue(file->close_wq);
|
||||
kfree(file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ static DEFINE_SPINLOCK(port_lock);
|
||||
static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
|
||||
|
||||
static void ib_umad_add_one(struct ib_device *device);
|
||||
static void ib_umad_remove_one(struct ib_device *device);
|
||||
static void ib_umad_remove_one(struct ib_device *device, void *client_data);
|
||||
|
||||
static void ib_umad_release_dev(struct kobject *kobj)
|
||||
{
|
||||
@@ -1322,9 +1322,9 @@ free:
|
||||
kobject_put(&umad_dev->kobj);
|
||||
}
|
||||
|
||||
static void ib_umad_remove_one(struct ib_device *device)
|
||||
static void ib_umad_remove_one(struct ib_device *device, void *client_data)
|
||||
{
|
||||
struct ib_umad_device *umad_dev = ib_get_client_data(device, &umad_client);
|
||||
struct ib_umad_device *umad_dev = client_data;
|
||||
int i;
|
||||
|
||||
if (!umad_dev)
|
||||
|
||||
@@ -85,15 +85,20 @@
|
||||
*/
|
||||
|
||||
struct ib_uverbs_device {
|
||||
struct kref ref;
|
||||
atomic_t refcount;
|
||||
int num_comp_vectors;
|
||||
struct completion comp;
|
||||
struct device *dev;
|
||||
struct ib_device *ib_dev;
|
||||
struct ib_device __rcu *ib_dev;
|
||||
int devnum;
|
||||
struct cdev cdev;
|
||||
struct rb_root xrcd_tree;
|
||||
struct mutex xrcd_tree_mutex;
|
||||
struct kobject kobj;
|
||||
struct srcu_struct disassociate_srcu;
|
||||
struct mutex lists_mutex; /* protect lists */
|
||||
struct list_head uverbs_file_list;
|
||||
struct list_head uverbs_events_file_list;
|
||||
};
|
||||
|
||||
struct ib_uverbs_event_file {
|
||||
@@ -105,6 +110,7 @@ struct ib_uverbs_event_file {
|
||||
wait_queue_head_t poll_wait;
|
||||
struct fasync_struct *async_queue;
|
||||
struct list_head event_list;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct ib_uverbs_file {
|
||||
@@ -114,6 +120,8 @@ struct ib_uverbs_file {
|
||||
struct ib_ucontext *ucontext;
|
||||
struct ib_event_handler event_handler;
|
||||
struct ib_uverbs_event_file *async_file;
|
||||
struct list_head list;
|
||||
int is_closed;
|
||||
};
|
||||
|
||||
struct ib_uverbs_event {
|
||||
@@ -177,7 +185,9 @@ extern struct idr ib_uverbs_rule_idr;
|
||||
void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
|
||||
|
||||
struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
|
||||
struct ib_device *ib_dev,
|
||||
int is_async);
|
||||
void ib_uverbs_free_async_event_file(struct ib_uverbs_file *uverbs_file);
|
||||
struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
|
||||
|
||||
void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
|
||||
@@ -212,6 +222,7 @@ struct ib_uverbs_flow_spec {
|
||||
|
||||
#define IB_UVERBS_DECLARE_CMD(name) \
|
||||
ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
|
||||
struct ib_device *ib_dev, \
|
||||
const char __user *buf, int in_len, \
|
||||
int out_len)
|
||||
|
||||
@@ -253,6 +264,7 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
|
||||
|
||||
#define IB_UVERBS_DECLARE_EX_CMD(name) \
|
||||
int ib_uverbs_ex_##name(struct ib_uverbs_file *file, \
|
||||
struct ib_device *ib_dev, \
|
||||
struct ib_udata *ucore, \
|
||||
struct ib_udata *uhw)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user