Merge branches 'i40iw', 'sriov' and 'hfi1' into k.o/for-4.6

This commit is contained in:
Doug Ledford
2016-03-21 17:32:23 -04:00
224 changed files with 24076 additions and 17256 deletions
+2 -1
View File
@@ -78,9 +78,10 @@ HFI1
chip_reset - diagnostic (root only) chip_reset - diagnostic (root only)
boardversion - board version boardversion - board version
ports/1/ ports/1/
CMgtA/ CCMgtA/
cc_settings_bin - CCA tables used by PSM2 cc_settings_bin - CCA tables used by PSM2
cc_table_bin cc_table_bin
cc_prescan - enable prescaning for faster BECN response
sc2v/ - 32 files (0 - 31) used to translate sl->vl sc2v/ - 32 files (0 - 31) used to translate sl->vl
sl2sc/ - 32 files (0 - 31) used to translate sl->sc sl2sc/ - 32 files (0 - 31) used to translate sl->sc
vl2mtu/ - 16 (0 - 15) files used to determine MTU for vl vl2mtu/ - 16 (0 - 15) files used to determine MTU for vl
+6
View File
@@ -9085,6 +9085,12 @@ L: rds-devel@oss.oracle.com (moderated for non-subscribers)
S: Supported S: Supported
F: net/rds/ F: net/rds/
RDMAVT - RDMA verbs software
M: Dennis Dalessandro <dennis.dalessandro@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/sw/rdmavt
READ-COPY UPDATE (RCU) READ-COPY UPDATE (RCU)
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
M: Josh Triplett <josh@joshtriplett.org> M: Josh Triplett <josh@joshtriplett.org>
+2
View File
@@ -83,4 +83,6 @@ source "drivers/infiniband/ulp/srpt/Kconfig"
source "drivers/infiniband/ulp/iser/Kconfig" source "drivers/infiniband/ulp/iser/Kconfig"
source "drivers/infiniband/ulp/isert/Kconfig" source "drivers/infiniband/ulp/isert/Kconfig"
source "drivers/infiniband/sw/rdmavt/Kconfig"
endif # INFINIBAND endif # INFINIBAND
+1
View File
@@ -1,3 +1,4 @@
obj-$(CONFIG_INFINIBAND) += core/ obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND) += hw/ obj-$(CONFIG_INFINIBAND) += hw/
obj-$(CONFIG_INFINIBAND) += ulp/ obj-$(CONFIG_INFINIBAND) += ulp/
obj-$(CONFIG_INFINIBAND) += sw/
+7 -8
View File
@@ -1043,8 +1043,8 @@ static void ib_cache_update(struct ib_device *device,
ret = ib_query_port(device, port, tprops); ret = ib_query_port(device, port, tprops);
if (ret) { if (ret) {
printk(KERN_WARNING "ib_query_port failed (%d) for %s\n", pr_warn("ib_query_port failed (%d) for %s\n",
ret, device->name); ret, device->name);
goto err; goto err;
} }
@@ -1067,8 +1067,8 @@ static void ib_cache_update(struct ib_device *device,
for (i = 0; i < pkey_cache->table_len; ++i) { for (i = 0; i < pkey_cache->table_len; ++i) {
ret = ib_query_pkey(device, port, i, pkey_cache->table + i); ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
if (ret) { if (ret) {
printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n", pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
ret, device->name, i); ret, device->name, i);
goto err; goto err;
} }
} }
@@ -1078,8 +1078,8 @@ static void ib_cache_update(struct ib_device *device,
ret = ib_query_gid(device, port, i, ret = ib_query_gid(device, port, i,
gid_cache->table + i, NULL); gid_cache->table + i, NULL);
if (ret) { if (ret) {
printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n", pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
ret, device->name, i); ret, device->name, i);
goto err; goto err;
} }
} }
@@ -1161,8 +1161,7 @@ int ib_cache_setup_one(struct ib_device *device)
GFP_KERNEL); GFP_KERNEL);
if (!device->cache.pkey_cache || if (!device->cache.pkey_cache ||
!device->cache.lmc_cache) { !device->cache.lmc_cache) {
printk(KERN_WARNING "Couldn't allocate cache " pr_warn("Couldn't allocate cache for %s\n", device->name);
"for %s\n", device->name);
return -ENOMEM; return -ENOMEM;
} }
+15 -7
View File
@@ -1206,6 +1206,10 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
req->has_gid = true; req->has_gid = true;
req->service_id = req_param->primary_path->service_id; req->service_id = req_param->primary_path->service_id;
req->pkey = be16_to_cpu(req_param->primary_path->pkey); req->pkey = be16_to_cpu(req_param->primary_path->pkey);
if (req->pkey != req_param->bth_pkey)
pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
"RDMA CMA: in the future this may cause the request to be dropped\n",
req_param->bth_pkey, req->pkey);
break; break;
case IB_CM_SIDR_REQ_RECEIVED: case IB_CM_SIDR_REQ_RECEIVED:
req->device = sidr_param->listen_id->device; req->device = sidr_param->listen_id->device;
@@ -1213,6 +1217,10 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
req->has_gid = false; req->has_gid = false;
req->service_id = sidr_param->service_id; req->service_id = sidr_param->service_id;
req->pkey = sidr_param->pkey; req->pkey = sidr_param->pkey;
if (req->pkey != sidr_param->bth_pkey)
pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
"RDMA CMA: in the future this may cause the request to be dropped\n",
sidr_param->bth_pkey, req->pkey);
break; break;
default: default:
return -EINVAL; return -EINVAL;
@@ -1713,7 +1721,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
break; break;
default: default:
printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", pr_err("RDMA CMA: unexpected IB CM event: %d\n",
ib_event->event); ib_event->event);
goto out; goto out;
} }
@@ -2186,8 +2194,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
ret = rdma_listen(id, id_priv->backlog); ret = rdma_listen(id, id_priv->backlog);
if (ret) if (ret)
printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, " pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
"listening on device %s\n", ret, cma_dev->device->name); ret, cma_dev->device->name);
} }
static void cma_listen_on_all(struct rdma_id_private *id_priv) static void cma_listen_on_all(struct rdma_id_private *id_priv)
@@ -3239,7 +3247,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
event.status = 0; event.status = 0;
break; break;
default: default:
printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", pr_err("RDMA CMA: unexpected IB CM event: %d\n",
ib_event->event); ib_event->event);
goto out; goto out;
} }
@@ -4003,8 +4011,8 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
if ((dev_addr->bound_dev_if == ndev->ifindex) && if ((dev_addr->bound_dev_if == ndev->ifindex) &&
(net_eq(dev_net(ndev), dev_addr->net)) && (net_eq(dev_net(ndev), dev_addr->net)) &&
memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", pr_info("RDMA CM addr change for ndev %s used by id %p\n",
ndev->name, &id_priv->id); ndev->name, &id_priv->id);
work = kzalloc(sizeof *work, GFP_KERNEL); work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work) if (!work)
return -ENOMEM; return -ENOMEM;
@@ -4287,7 +4295,7 @@ static int __init cma_init(void)
goto err; goto err;
if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table)) if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n"); pr_warn("RDMA CMA: failed to add netlink callback\n");
cma_configfs_init(); cma_configfs_init();
return 0; return 0;
+29 -15
View File
@@ -115,8 +115,8 @@ static int ib_device_check_mandatory(struct ib_device *device)
for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
if (!*(void **) ((void *) device + mandatory_table[i].offset)) { if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
printk(KERN_WARNING "Device %s is missing mandatory function %s\n", pr_warn("Device %s is missing mandatory function %s\n",
device->name, mandatory_table[i].name); device->name, mandatory_table[i].name);
return -EINVAL; return -EINVAL;
} }
} }
@@ -255,8 +255,8 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
context = kmalloc(sizeof *context, GFP_KERNEL); context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context) { if (!context) {
printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n", pr_warn("Couldn't allocate client context for %s/%s\n",
device->name, client->name); device->name, client->name);
return -ENOMEM; return -ENOMEM;
} }
@@ -343,28 +343,29 @@ int ib_register_device(struct ib_device *device,
ret = read_port_immutable(device); ret = read_port_immutable(device);
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't create per port immutable data %s\n", pr_warn("Couldn't create per port immutable data %s\n",
device->name); device->name);
goto out; goto out;
} }
ret = ib_cache_setup_one(device); ret = ib_cache_setup_one(device);
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
goto out; goto out;
} }
memset(&device->attrs, 0, sizeof(device->attrs)); memset(&device->attrs, 0, sizeof(device->attrs));
ret = device->query_device(device, &device->attrs, &uhw); ret = device->query_device(device, &device->attrs, &uhw);
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't query the device attributes\n"); pr_warn("Couldn't query the device attributes\n");
ib_cache_cleanup_one(device);
goto out; goto out;
} }
ret = ib_device_register_sysfs(device, port_callback); ret = ib_device_register_sysfs(device, port_callback);
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't register device %s with driver model\n", pr_warn("Couldn't register device %s with driver model\n",
device->name); device->name);
ib_cache_cleanup_one(device); ib_cache_cleanup_one(device);
goto out; goto out;
} }
@@ -565,8 +566,8 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
goto out; goto out;
} }
printk(KERN_WARNING "No client context found for %s/%s\n", pr_warn("No client context found for %s/%s\n",
device->name, client->name); device->name, client->name);
out: out:
spin_unlock_irqrestore(&device->client_data_lock, flags); spin_unlock_irqrestore(&device->client_data_lock, flags);
@@ -649,10 +650,23 @@ int ib_query_port(struct ib_device *device,
u8 port_num, u8 port_num,
struct ib_port_attr *port_attr) struct ib_port_attr *port_attr)
{ {
union ib_gid gid;
int err;
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
return -EINVAL; return -EINVAL;
return device->query_port(device, port_num, port_attr); memset(port_attr, 0, sizeof(*port_attr));
err = device->query_port(device, port_num, port_attr);
if (err || port_attr->subnet_prefix)
return err;
err = ib_query_gid(device, port_num, 0, &gid, NULL);
if (err)
return err;
port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
return 0;
} }
EXPORT_SYMBOL(ib_query_port); EXPORT_SYMBOL(ib_query_port);
@@ -959,13 +973,13 @@ static int __init ib_core_init(void)
ret = class_register(&ib_class); ret = class_register(&ib_class);
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); pr_warn("Couldn't create InfiniBand device class\n");
goto err_comp; goto err_comp;
} }
ret = ibnl_init(); ret = ibnl_init();
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't init IB netlink interface\n"); pr_warn("Couldn't init IB netlink interface\n");
goto err_sysfs; goto err_sysfs;
} }
+15 -22
View File
@@ -150,8 +150,8 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
#ifdef DEBUG #ifdef DEBUG
if (fmr->ref_count !=0) { if (fmr->ref_count !=0) {
printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d\n", pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
fmr, fmr->ref_count); fmr, fmr->ref_count);
} }
#endif #endif
} }
@@ -167,7 +167,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
ret = ib_unmap_fmr(&fmr_list); ret = ib_unmap_fmr(&fmr_list);
if (ret) if (ret)
printk(KERN_WARNING PFX "ib_unmap_fmr returned %d\n", ret); pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
spin_lock_irq(&pool->pool_lock); spin_lock_irq(&pool->pool_lock);
list_splice(&unmap_list, &pool->free_list); list_splice(&unmap_list, &pool->free_list);
@@ -222,8 +222,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
device = pd->device; device = pd->device;
if (!device->alloc_fmr || !device->dealloc_fmr || if (!device->alloc_fmr || !device->dealloc_fmr ||
!device->map_phys_fmr || !device->unmap_fmr) { !device->map_phys_fmr || !device->unmap_fmr) {
printk(KERN_INFO PFX "Device %s does not support FMRs\n", pr_info(PFX "Device %s does not support FMRs\n", device->name);
device->name);
return ERR_PTR(-ENOSYS); return ERR_PTR(-ENOSYS);
} }
@@ -233,13 +232,10 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
max_remaps = device->attrs.max_map_per_fmr; max_remaps = device->attrs.max_map_per_fmr;
pool = kmalloc(sizeof *pool, GFP_KERNEL); pool = kmalloc(sizeof *pool, GFP_KERNEL);
if (!pool) { if (!pool)
printk(KERN_WARNING PFX "couldn't allocate pool struct\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
pool->cache_bucket = NULL; pool->cache_bucket = NULL;
pool->flush_function = params->flush_function; pool->flush_function = params->flush_function;
pool->flush_arg = params->flush_arg; pool->flush_arg = params->flush_arg;
@@ -251,7 +247,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
GFP_KERNEL); GFP_KERNEL);
if (!pool->cache_bucket) { if (!pool->cache_bucket) {
printk(KERN_WARNING PFX "Failed to allocate cache in pool\n"); pr_warn(PFX "Failed to allocate cache in pool\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out_free_pool; goto out_free_pool;
} }
@@ -275,7 +271,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
"ib_fmr(%s)", "ib_fmr(%s)",
device->name); device->name);
if (IS_ERR(pool->thread)) { if (IS_ERR(pool->thread)) {
printk(KERN_WARNING PFX "couldn't start cleanup thread\n"); pr_warn(PFX "couldn't start cleanup thread\n");
ret = PTR_ERR(pool->thread); ret = PTR_ERR(pool->thread);
goto out_free_pool; goto out_free_pool;
} }
@@ -294,11 +290,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
for (i = 0; i < params->pool_size; ++i) { for (i = 0; i < params->pool_size; ++i) {
fmr = kmalloc(bytes_per_fmr, GFP_KERNEL); fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
if (!fmr) { if (!fmr)
printk(KERN_WARNING PFX "failed to allocate fmr "
"struct for FMR %d\n", i);
goto out_fail; goto out_fail;
}
fmr->pool = pool; fmr->pool = pool;
fmr->remap_count = 0; fmr->remap_count = 0;
@@ -307,8 +300,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr); fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
if (IS_ERR(fmr->fmr)) { if (IS_ERR(fmr->fmr)) {
printk(KERN_WARNING PFX "fmr_create failed " pr_warn(PFX "fmr_create failed for FMR %d\n",
"for FMR %d\n", i); i);
kfree(fmr); kfree(fmr);
goto out_fail; goto out_fail;
} }
@@ -363,8 +356,8 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
} }
if (i < pool->pool_size) if (i < pool->pool_size)
printk(KERN_WARNING PFX "pool still has %d regions registered\n", pr_warn(PFX "pool still has %d regions registered\n",
pool->pool_size - i); pool->pool_size - i);
kfree(pool->cache_bucket); kfree(pool->cache_bucket);
kfree(pool); kfree(pool);
@@ -463,7 +456,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
list_add(&fmr->list, &pool->free_list); list_add(&fmr->list, &pool->free_list);
spin_unlock_irqrestore(&pool->pool_lock, flags); spin_unlock_irqrestore(&pool->pool_lock, flags);
printk(KERN_WARNING PFX "fmr_map returns %d\n", result); pr_warn(PFX "fmr_map returns %d\n", result);
return ERR_PTR(result); return ERR_PTR(result);
} }
@@ -517,8 +510,8 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
#ifdef DEBUG #ifdef DEBUG
if (fmr->ref_count < 0) if (fmr->ref_count < 0)
printk(KERN_WARNING PFX "FMR %p has ref count %d < 0\n", pr_warn(PFX "FMR %p has ref count %d < 0\n",
fmr, fmr->ref_count); fmr, fmr->ref_count);
#endif #endif
spin_unlock_irqrestore(&pool->pool_lock, flags); spin_unlock_irqrestore(&pool->pool_lock, flags);
+164 -26
View File
@@ -50,6 +50,8 @@
#include <rdma/iw_cm.h> #include <rdma/iw_cm.h>
#include <rdma/ib_addr.h> #include <rdma/ib_addr.h>
#include <rdma/iw_portmap.h>
#include <rdma/rdma_netlink.h>
#include "iwcm.h" #include "iwcm.h"
@@ -57,6 +59,16 @@ MODULE_AUTHOR("Tom Tucker");
MODULE_DESCRIPTION("iWARP CM"); MODULE_DESCRIPTION("iWARP CM");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
static struct ibnl_client_cbs iwcm_nl_cb_table[] = {
[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
[RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
[RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
[RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
[RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
};
static struct workqueue_struct *iwcm_wq; static struct workqueue_struct *iwcm_wq;
struct iwcm_work { struct iwcm_work {
struct work_struct work; struct work_struct work;
@@ -402,6 +414,11 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
} }
spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_unlock_irqrestore(&cm_id_priv->lock, flags);
if (cm_id->mapped) {
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
}
(void)iwcm_deref_id(cm_id_priv); (void)iwcm_deref_id(cm_id_priv);
} }
@@ -426,6 +443,97 @@ void iw_destroy_cm_id(struct iw_cm_id *cm_id)
} }
EXPORT_SYMBOL(iw_destroy_cm_id); EXPORT_SYMBOL(iw_destroy_cm_id);
/**
* iw_cm_check_wildcard - If IP address is 0 then use original
* @pm_addr: sockaddr containing the ip to check for wildcard
* @cm_addr: sockaddr containing the actual IP address
* @cm_outaddr: sockaddr to set IP addr which leaving port
*
* Checks the pm_addr for wildcard and then sets cm_outaddr's
* IP to the actual (cm_addr).
*/
static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
struct sockaddr_storage *cm_addr,
struct sockaddr_storage *cm_outaddr)
{
if (pm_addr->ss_family == AF_INET) {
struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
if (pm4_addr->sin_addr.s_addr == INADDR_ANY) {
struct sockaddr_in *cm4_addr =
(struct sockaddr_in *)cm_addr;
struct sockaddr_in *cm4_outaddr =
(struct sockaddr_in *)cm_outaddr;
cm4_outaddr->sin_addr = cm4_addr->sin_addr;
}
} else {
struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr;
if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) {
struct sockaddr_in6 *cm6_addr =
(struct sockaddr_in6 *)cm_addr;
struct sockaddr_in6 *cm6_outaddr =
(struct sockaddr_in6 *)cm_outaddr;
cm6_outaddr->sin6_addr = cm6_addr->sin6_addr;
}
}
}
/**
* iw_cm_map - Use portmapper to map the ports
* @cm_id: connection manager pointer
* @active: Indicates the active side when true
* returns nonzero for error only if iwpm_create_mapinfo() fails
*
* Tries to add a mapping for a port using the Portmapper. If
* successful in mapping the IP/Port it will check the remote
* mapped IP address for a wildcard IP address and replace the
* zero IP address with the remote_addr.
*/
static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
{
struct iwpm_dev_data pm_reg_msg;
struct iwpm_sa_data pm_msg;
int status;
cm_id->m_local_addr = cm_id->local_addr;
cm_id->m_remote_addr = cm_id->remote_addr;
memcpy(pm_reg_msg.dev_name, cm_id->device->name,
sizeof(pm_reg_msg.dev_name));
memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname,
sizeof(pm_reg_msg.if_name));
if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
!iwpm_valid_pid())
return 0;
cm_id->mapped = true;
pm_msg.loc_addr = cm_id->local_addr;
pm_msg.rem_addr = cm_id->remote_addr;
if (active)
status = iwpm_add_and_query_mapping(&pm_msg,
RDMA_NL_IWCM);
else
status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM);
if (!status) {
cm_id->m_local_addr = pm_msg.mapped_loc_addr;
if (active) {
cm_id->m_remote_addr = pm_msg.mapped_rem_addr;
iw_cm_check_wildcard(&pm_msg.mapped_rem_addr,
&cm_id->remote_addr,
&cm_id->m_remote_addr);
}
}
return iwpm_create_mapinfo(&cm_id->local_addr,
&cm_id->m_local_addr,
RDMA_NL_IWCM);
}
/* /*
* CM_ID <-- LISTEN * CM_ID <-- LISTEN
* *
@@ -452,7 +560,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
case IW_CM_STATE_IDLE: case IW_CM_STATE_IDLE:
cm_id_priv->state = IW_CM_STATE_LISTEN; cm_id_priv->state = IW_CM_STATE_LISTEN;
spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_unlock_irqrestore(&cm_id_priv->lock, flags);
ret = cm_id->device->iwcm->create_listen(cm_id, backlog); ret = iw_cm_map(cm_id, false);
if (!ret)
ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
if (ret) if (ret)
cm_id_priv->state = IW_CM_STATE_IDLE; cm_id_priv->state = IW_CM_STATE_IDLE;
spin_lock_irqsave(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -582,39 +692,37 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
spin_lock_irqsave(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->state != IW_CM_STATE_IDLE) { if (cm_id_priv->state != IW_CM_STATE_IDLE) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = -EINVAL;
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); goto err;
wake_up_all(&cm_id_priv->connect_wait);
return -EINVAL;
} }
/* Get the ib_qp given the QPN */ /* Get the ib_qp given the QPN */
qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
if (!qp) { if (!qp) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = -EINVAL;
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); goto err;
wake_up_all(&cm_id_priv->connect_wait);
return -EINVAL;
} }
cm_id->device->iwcm->add_ref(qp); cm_id->device->iwcm->add_ref(qp);
cm_id_priv->qp = qp; cm_id_priv->qp = qp;
cm_id_priv->state = IW_CM_STATE_CONN_SENT; cm_id_priv->state = IW_CM_STATE_CONN_SENT;
spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_unlock_irqrestore(&cm_id_priv->lock, flags);
ret = cm_id->device->iwcm->connect(cm_id, iw_param); ret = iw_cm_map(cm_id, true);
if (ret) { if (!ret)
spin_lock_irqsave(&cm_id_priv->lock, flags); ret = cm_id->device->iwcm->connect(cm_id, iw_param);
if (cm_id_priv->qp) { if (!ret)
cm_id->device->iwcm->rem_ref(qp); return 0; /* success */
cm_id_priv->qp = NULL;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
cm_id_priv->state = IW_CM_STATE_IDLE;
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
wake_up_all(&cm_id_priv->connect_wait);
}
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->qp) {
cm_id->device->iwcm->rem_ref(qp);
cm_id_priv->qp = NULL;
}
cm_id_priv->state = IW_CM_STATE_IDLE;
err:
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
wake_up_all(&cm_id_priv->connect_wait);
return ret; return ret;
} }
EXPORT_SYMBOL(iw_cm_connect); EXPORT_SYMBOL(iw_cm_connect);
@@ -656,8 +764,23 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
goto out; goto out;
cm_id->provider_data = iw_event->provider_data; cm_id->provider_data = iw_event->provider_data;
cm_id->local_addr = iw_event->local_addr; cm_id->m_local_addr = iw_event->local_addr;
cm_id->remote_addr = iw_event->remote_addr; cm_id->m_remote_addr = iw_event->remote_addr;
cm_id->local_addr = listen_id_priv->id.local_addr;
ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr,
&iw_event->remote_addr,
&cm_id->remote_addr,
RDMA_NL_IWCM);
if (ret) {
cm_id->remote_addr = iw_event->remote_addr;
} else {
iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr,
&iw_event->local_addr,
&cm_id->local_addr);
iw_event->local_addr = cm_id->local_addr;
iw_event->remote_addr = cm_id->remote_addr;
}
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
cm_id_priv->state = IW_CM_STATE_CONN_RECV; cm_id_priv->state = IW_CM_STATE_CONN_RECV;
@@ -753,8 +876,10 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
if (iw_event->status == 0) { if (iw_event->status == 0) {
cm_id_priv->id.local_addr = iw_event->local_addr; cm_id_priv->id.m_local_addr = iw_event->local_addr;
cm_id_priv->id.remote_addr = iw_event->remote_addr; cm_id_priv->id.m_remote_addr = iw_event->remote_addr;
iw_event->local_addr = cm_id_priv->id.local_addr;
iw_event->remote_addr = cm_id_priv->id.remote_addr;
cm_id_priv->state = IW_CM_STATE_ESTABLISHED; cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
} else { } else {
/* REJECTED or RESET */ /* REJECTED or RESET */
@@ -1044,6 +1169,17 @@ EXPORT_SYMBOL(iw_cm_init_qp_attr);
static int __init iw_cm_init(void) static int __init iw_cm_init(void)
{ {
int ret;
ret = iwpm_init(RDMA_NL_IWCM);
if (ret)
pr_err("iw_cm: couldn't init iwpm\n");
ret = ibnl_add_client(RDMA_NL_IWCM, RDMA_NL_IWPM_NUM_OPS,
iwcm_nl_cb_table);
if (ret)
pr_err("iw_cm: couldn't register netlink callbacks\n");
iwcm_wq = create_singlethread_workqueue("iw_cm_wq"); iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
if (!iwcm_wq) if (!iwcm_wq)
return -ENOMEM; return -ENOMEM;
@@ -1063,6 +1199,8 @@ static void __exit iw_cm_cleanup(void)
{ {
unregister_net_sysctl_table(iwcm_ctl_table_hdr); unregister_net_sysctl_table(iwcm_ctl_table_hdr);
destroy_workqueue(iwcm_wq); destroy_workqueue(iwcm_wq);
ibnl_remove_client(RDMA_NL_IWCM);
iwpm_exit(RDMA_NL_IWCM);
} }
module_init(iw_cm_init); module_init(iw_cm_init);
+6 -6
View File
@@ -88,8 +88,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_REG_PID_SEQ); ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_REG_PID_SEQ);
if (ret) if (ret)
goto pid_query_error; goto pid_query_error;
ret = ibnl_put_attr(skb, nlh, IWPM_IFNAME_SIZE, ret = ibnl_put_attr(skb, nlh, IFNAMSIZ,
pm_msg->if_name, IWPM_NLA_REG_IF_NAME); pm_msg->if_name, IWPM_NLA_REG_IF_NAME);
if (ret) if (ret)
goto pid_query_error; goto pid_query_error;
ret = ibnl_put_attr(skb, nlh, IWPM_DEVNAME_SIZE, ret = ibnl_put_attr(skb, nlh, IWPM_DEVNAME_SIZE,
@@ -394,7 +394,7 @@ register_pid_response_exit:
/* always for found nlmsg_request */ /* always for found nlmsg_request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier(); barrier();
wake_up(&nlmsg_request->waitq); up(&nlmsg_request->sem);
return 0; return 0;
} }
EXPORT_SYMBOL(iwpm_register_pid_cb); EXPORT_SYMBOL(iwpm_register_pid_cb);
@@ -463,7 +463,7 @@ add_mapping_response_exit:
/* always for found request */ /* always for found request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier(); barrier();
wake_up(&nlmsg_request->waitq); up(&nlmsg_request->sem);
return 0; return 0;
} }
EXPORT_SYMBOL(iwpm_add_mapping_cb); EXPORT_SYMBOL(iwpm_add_mapping_cb);
@@ -555,7 +555,7 @@ query_mapping_response_exit:
/* always for found request */ /* always for found request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier(); barrier();
wake_up(&nlmsg_request->waitq); up(&nlmsg_request->sem);
return 0; return 0;
} }
EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb); EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
@@ -749,7 +749,7 @@ int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
/* always for found request */ /* always for found request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier(); barrier();
wake_up(&nlmsg_request->waitq); up(&nlmsg_request->sem);
return 0; return 0;
} }
EXPORT_SYMBOL(iwpm_mapping_error_cb); EXPORT_SYMBOL(iwpm_mapping_error_cb);
+7 -7
View File
@@ -254,9 +254,9 @@ void iwpm_add_remote_info(struct iwpm_remote_info *rem_info)
} }
int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr, int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
struct sockaddr_storage *mapped_rem_addr, struct sockaddr_storage *mapped_rem_addr,
struct sockaddr_storage *remote_addr, struct sockaddr_storage *remote_addr,
u8 nl_client) u8 nl_client)
{ {
struct hlist_node *tmp_hlist_node; struct hlist_node *tmp_hlist_node;
struct hlist_head *hash_bucket_head; struct hlist_head *hash_bucket_head;
@@ -322,6 +322,8 @@ struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
nlmsg_request->nl_client = nl_client; nlmsg_request->nl_client = nl_client;
nlmsg_request->request_done = 0; nlmsg_request->request_done = 0;
nlmsg_request->err_code = 0; nlmsg_request->err_code = 0;
sema_init(&nlmsg_request->sem, 1);
down(&nlmsg_request->sem);
return nlmsg_request; return nlmsg_request;
} }
@@ -364,11 +366,9 @@ struct iwpm_nlmsg_request *iwpm_find_nlmsg_request(__u32 echo_seq)
int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request) int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request)
{ {
int ret; int ret;
init_waitqueue_head(&nlmsg_request->waitq);
ret = wait_event_timeout(nlmsg_request->waitq, ret = down_timeout(&nlmsg_request->sem, IWPM_NL_TIMEOUT);
(nlmsg_request->request_done != 0), IWPM_NL_TIMEOUT); if (ret) {
if (!ret) {
ret = -EINVAL; ret = -EINVAL;
pr_info("%s: Timeout %d sec for netlink request (seq = %u)\n", pr_info("%s: Timeout %d sec for netlink request (seq = %u)\n",
__func__, (IWPM_NL_TIMEOUT/HZ), nlmsg_request->nlmsg_seq); __func__, (IWPM_NL_TIMEOUT/HZ), nlmsg_request->nlmsg_seq);
+1 -1
View File
@@ -69,7 +69,7 @@ struct iwpm_nlmsg_request {
u8 nl_client; u8 nl_client;
u8 request_done; u8 request_done;
u16 err_code; u16 err_code;
wait_queue_head_t waitq; struct semaphore sem;
struct kref kref; struct kref kref;
}; };
+6 -8
View File
@@ -44,7 +44,7 @@ static u64 value_read(int offset, int size, void *structure)
case 4: return be32_to_cpup((__be32 *) (structure + offset)); case 4: return be32_to_cpup((__be32 *) (structure + offset));
case 8: return be64_to_cpup((__be64 *) (structure + offset)); case 8: return be64_to_cpup((__be64 *) (structure + offset));
default: default:
printk(KERN_WARNING "Field size %d bits not handled\n", size * 8); pr_warn("Field size %d bits not handled\n", size * 8);
return 0; return 0;
} }
} }
@@ -104,9 +104,8 @@ void ib_pack(const struct ib_field *desc,
} else { } else {
if (desc[i].offset_bits % 8 || if (desc[i].offset_bits % 8 ||
desc[i].size_bits % 8) { desc[i].size_bits % 8) {
printk(KERN_WARNING "Structure field %s of size %d " pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
"bits is not byte-aligned\n", desc[i].field_name, desc[i].size_bits);
desc[i].field_name, desc[i].size_bits);
} }
if (desc[i].struct_size_bytes) if (desc[i].struct_size_bytes)
@@ -132,7 +131,7 @@ static void value_write(int offset, int size, u64 val, void *structure)
case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break; case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break;
case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break; case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break;
default: default:
printk(KERN_WARNING "Field size %d bits not handled\n", size * 8); pr_warn("Field size %d bits not handled\n", size * 8);
} }
} }
@@ -188,9 +187,8 @@ void ib_unpack(const struct ib_field *desc,
} else { } else {
if (desc[i].offset_bits % 8 || if (desc[i].offset_bits % 8 ||
desc[i].size_bits % 8) { desc[i].size_bits % 8) {
printk(KERN_WARNING "Structure field %s of size %d " pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
"bits is not byte-aligned\n", desc[i].field_name, desc[i].size_bits);
desc[i].field_name, desc[i].size_bits);
} }
memcpy(structure + desc[i].struct_offset_bytes, memcpy(structure + desc[i].struct_offset_bytes,
+11 -7
View File
@@ -864,13 +864,12 @@ static void update_sm_ah(struct work_struct *work)
struct ib_ah_attr ah_attr; struct ib_ah_attr ah_attr;
if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
printk(KERN_WARNING "Couldn't query port\n"); pr_warn("Couldn't query port\n");
return; return;
} }
new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL); new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
if (!new_ah) { if (!new_ah) {
printk(KERN_WARNING "Couldn't allocate new SM AH\n");
return; return;
} }
@@ -880,16 +879,21 @@ static void update_sm_ah(struct work_struct *work)
new_ah->pkey_index = 0; new_ah->pkey_index = 0;
if (ib_find_pkey(port->agent->device, port->port_num, if (ib_find_pkey(port->agent->device, port->port_num,
IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
printk(KERN_ERR "Couldn't find index for default PKey\n"); pr_err("Couldn't find index for default PKey\n");
memset(&ah_attr, 0, sizeof ah_attr); memset(&ah_attr, 0, sizeof ah_attr);
ah_attr.dlid = port_attr.sm_lid; ah_attr.dlid = port_attr.sm_lid;
ah_attr.sl = port_attr.sm_sl; ah_attr.sl = port_attr.sm_sl;
ah_attr.port_num = port->port_num; ah_attr.port_num = port->port_num;
if (port_attr.grh_required) {
ah_attr.ah_flags = IB_AH_GRH;
ah_attr.grh.dgid.global.subnet_prefix = cpu_to_be64(port_attr.subnet_prefix);
ah_attr.grh.dgid.global.interface_id = cpu_to_be64(IB_SA_WELL_KNOWN_GUID);
}
new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr); new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
if (IS_ERR(new_ah->ah)) { if (IS_ERR(new_ah->ah)) {
printk(KERN_WARNING "Couldn't create new SM AH\n"); pr_warn("Couldn't create new SM AH\n");
kfree(new_ah); kfree(new_ah);
return; return;
} }
@@ -1221,7 +1225,7 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
rec.net = NULL; rec.net = NULL;
rec.ifindex = 0; rec.ifindex = 0;
rec.gid_type = IB_GID_TYPE_IB; rec.gid_type = IB_GID_TYPE_IB;
memset(rec.dmac, 0, ETH_ALEN); eth_zero_addr(rec.dmac);
query->callback(status, &rec, query->context); query->callback(status, &rec, query->context);
} else } else
query->callback(status, NULL, query->context); query->callback(status, NULL, query->context);
@@ -1800,13 +1804,13 @@ static int __init ib_sa_init(void)
ret = ib_register_client(&sa_client); ret = ib_register_client(&sa_client);
if (ret) { if (ret) {
printk(KERN_ERR "Couldn't register ib_sa client\n"); pr_err("Couldn't register ib_sa client\n");
goto err1; goto err1;
} }
ret = mcast_init(); ret = mcast_init();
if (ret) { if (ret) {
printk(KERN_ERR "Couldn't initialize multicast handling\n"); pr_err("Couldn't initialize multicast handling\n");
goto err2; goto err2;
} }
+4 -4
View File
@@ -1234,7 +1234,7 @@ static int find_overflow_devnum(void)
ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES, ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
"infiniband_cm"); "infiniband_cm");
if (ret) { if (ret) {
printk(KERN_ERR "ucm: couldn't register dynamic device number\n"); pr_err("ucm: couldn't register dynamic device number\n");
return ret; return ret;
} }
} }
@@ -1329,19 +1329,19 @@ static int __init ib_ucm_init(void)
ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES, ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
"infiniband_cm"); "infiniband_cm");
if (ret) { if (ret) {
printk(KERN_ERR "ucm: couldn't register device number\n"); pr_err("ucm: couldn't register device number\n");
goto error1; goto error1;
} }
ret = class_create_file(&cm_class, &class_attr_abi_version.attr); ret = class_create_file(&cm_class, &class_attr_abi_version.attr);
if (ret) { if (ret) {
printk(KERN_ERR "ucm: couldn't create abi_version attribute\n"); pr_err("ucm: couldn't create abi_version attribute\n");
goto error2; goto error2;
} }
ret = ib_register_client(&ucm_client); ret = ib_register_client(&ucm_client);
if (ret) { if (ret) {
printk(KERN_ERR "ucm: couldn't register client\n"); pr_err("ucm: couldn't register client\n");
goto error3; goto error3;
} }
return 0; return 0;
+3 -3
View File
@@ -314,7 +314,7 @@ static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
} }
} }
if (!event_found) if (!event_found)
printk(KERN_ERR "ucma_removal_event_handler: warning: connect request event wasn't found\n"); pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
} }
static int ucma_event_handler(struct rdma_cm_id *cm_id, static int ucma_event_handler(struct rdma_cm_id *cm_id,
@@ -1716,13 +1716,13 @@ static int __init ucma_init(void)
ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
if (ret) { if (ret) {
printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n"); pr_err("rdma_ucm: couldn't create abi_version attr\n");
goto err1; goto err1;
} }
ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table); ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
if (!ucma_ctl_table_hdr) { if (!ucma_ctl_table_hdr) {
printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n"); pr_err("rdma_ucm: couldn't register sysctl paths\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err2; goto err2;
} }
+11 -12
View File
@@ -479,8 +479,8 @@ int ib_ud_header_unpack(void *buf,
buf += IB_LRH_BYTES; buf += IB_LRH_BYTES;
if (header->lrh.link_version != 0) { if (header->lrh.link_version != 0) {
printk(KERN_WARNING "Invalid LRH.link_version %d\n", pr_warn("Invalid LRH.link_version %d\n",
header->lrh.link_version); header->lrh.link_version);
return -EINVAL; return -EINVAL;
} }
@@ -496,20 +496,20 @@ int ib_ud_header_unpack(void *buf,
buf += IB_GRH_BYTES; buf += IB_GRH_BYTES;
if (header->grh.ip_version != 6) { if (header->grh.ip_version != 6) {
printk(KERN_WARNING "Invalid GRH.ip_version %d\n", pr_warn("Invalid GRH.ip_version %d\n",
header->grh.ip_version); header->grh.ip_version);
return -EINVAL; return -EINVAL;
} }
if (header->grh.next_header != 0x1b) { if (header->grh.next_header != 0x1b) {
printk(KERN_WARNING "Invalid GRH.next_header 0x%02x\n", pr_warn("Invalid GRH.next_header 0x%02x\n",
header->grh.next_header); header->grh.next_header);
return -EINVAL; return -EINVAL;
} }
break; break;
default: default:
printk(KERN_WARNING "Invalid LRH.link_next_header %d\n", pr_warn("Invalid LRH.link_next_header %d\n",
header->lrh.link_next_header); header->lrh.link_next_header);
return -EINVAL; return -EINVAL;
} }
@@ -525,14 +525,13 @@ int ib_ud_header_unpack(void *buf,
header->immediate_present = 1; header->immediate_present = 1;
break; break;
default: default:
printk(KERN_WARNING "Invalid BTH.opcode 0x%02x\n", pr_warn("Invalid BTH.opcode 0x%02x\n", header->bth.opcode);
header->bth.opcode);
return -EINVAL; return -EINVAL;
} }
if (header->bth.transport_header_version != 0) { if (header->bth.transport_header_version != 0) {
printk(KERN_WARNING "Invalid BTH.transport_header_version %d\n", pr_warn("Invalid BTH.transport_header_version %d\n",
header->bth.transport_header_version); header->bth.transport_header_version);
return -EINVAL; return -EINVAL;
} }
+25 -17
View File
@@ -402,7 +402,7 @@ static void copy_query_dev_fields(struct ib_uverbs_file *file,
resp->hw_ver = attr->hw_ver; resp->hw_ver = attr->hw_ver;
resp->max_qp = attr->max_qp; resp->max_qp = attr->max_qp;
resp->max_qp_wr = attr->max_qp_wr; resp->max_qp_wr = attr->max_qp_wr;
resp->device_cap_flags = attr->device_cap_flags; resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
resp->max_sge = attr->max_sge; resp->max_sge = attr->max_sge;
resp->max_sge_rd = attr->max_sge_rd; resp->max_sge_rd = attr->max_sge_rd;
resp->max_cq = attr->max_cq; resp->max_cq = attr->max_cq;
@@ -1174,6 +1174,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
struct ib_uobject *uobj; struct ib_uobject *uobj;
struct ib_pd *pd; struct ib_pd *pd;
struct ib_mw *mw; struct ib_mw *mw;
struct ib_udata udata;
int ret; int ret;
if (out_len < sizeof(resp)) if (out_len < sizeof(resp))
@@ -1195,7 +1196,12 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
goto err_free; goto err_free;
} }
mw = pd->device->alloc_mw(pd, cmd.mw_type); INIT_UDATA(&udata, buf + sizeof(cmd),
(unsigned long)cmd.response + sizeof(resp),
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof(resp));
mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata);
if (IS_ERR(mw)) { if (IS_ERR(mw)) {
ret = PTR_ERR(mw); ret = PTR_ERR(mw);
goto err_put; goto err_put;
@@ -1970,7 +1976,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
resp_size); resp_size);
INIT_UDATA(&uhw, buf + sizeof(cmd), INIT_UDATA(&uhw, buf + sizeof(cmd),
(unsigned long)cmd.response + resp_size, (unsigned long)cmd.response + resp_size,
in_len - sizeof(cmd), out_len - resp_size); in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - resp_size);
memset(&cmd_ex, 0, sizeof(cmd_ex)); memset(&cmd_ex, 0, sizeof(cmd_ex));
cmd_ex.user_handle = cmd.user_handle; cmd_ex.user_handle = cmd.user_handle;
@@ -3085,6 +3092,14 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
!capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW)) !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
return -EPERM; return -EPERM;
if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
return -EINVAL;
if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
(cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
return -EINVAL;
if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
return -EINVAL; return -EINVAL;
@@ -3413,7 +3428,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof cmd, INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp, (unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp); in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof resp);
ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
if (ret) if (ret)
@@ -3439,7 +3455,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof cmd, INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp, (unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp); in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof resp);
ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
if (ret) if (ret)
@@ -3583,9 +3600,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
struct ib_udata *ucore, struct ib_udata *ucore,
struct ib_udata *uhw) struct ib_udata *uhw)
{ {
struct ib_uverbs_ex_query_device_resp resp; struct ib_uverbs_ex_query_device_resp resp = { {0} };
struct ib_uverbs_ex_query_device cmd; struct ib_uverbs_ex_query_device cmd;
struct ib_device_attr attr; struct ib_device_attr attr = {0};
int err; int err;
if (ucore->inlen < sizeof(cmd)) if (ucore->inlen < sizeof(cmd))
@@ -3606,14 +3623,11 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
if (ucore->outlen < resp.response_length) if (ucore->outlen < resp.response_length)
return -ENOSPC; return -ENOSPC;
memset(&attr, 0, sizeof(attr));
err = ib_dev->query_device(ib_dev, &attr, uhw); err = ib_dev->query_device(ib_dev, &attr, uhw);
if (err) if (err)
return err; return err;
copy_query_dev_fields(file, ib_dev, &resp.base, &attr); copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
resp.comp_mask = 0;
if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
goto end; goto end;
@@ -3626,9 +3640,6 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
attr.odp_caps.per_transport_caps.uc_odp_caps; attr.odp_caps.per_transport_caps.uc_odp_caps;
resp.odp_caps.per_transport_caps.ud_odp_caps = resp.odp_caps.per_transport_caps.ud_odp_caps =
attr.odp_caps.per_transport_caps.ud_odp_caps; attr.odp_caps.per_transport_caps.ud_odp_caps;
resp.odp_caps.reserved = 0;
#else
memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
#endif #endif
resp.response_length += sizeof(resp.odp_caps); resp.response_length += sizeof(resp.odp_caps);
@@ -3646,8 +3657,5 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
end: end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length); err = ib_copy_to_udata(ucore, &resp, resp.response_length);
if (err) return err;
return err;
return 0;
} }
+39 -41
View File
@@ -683,12 +683,28 @@ out:
return ev_file; return ev_file;
} }
static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
{
u64 mask;
if (command <= IB_USER_VERBS_CMD_OPEN_QP)
mask = ib_dev->uverbs_cmd_mask;
else
mask = ib_dev->uverbs_ex_cmd_mask;
if (mask & ((u64)1 << command))
return 0;
return -1;
}
static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos) size_t count, loff_t *pos)
{ {
struct ib_uverbs_file *file = filp->private_data; struct ib_uverbs_file *file = filp->private_data;
struct ib_device *ib_dev; struct ib_device *ib_dev;
struct ib_uverbs_cmd_hdr hdr; struct ib_uverbs_cmd_hdr hdr;
__u32 command;
__u32 flags; __u32 flags;
int srcu_key; int srcu_key;
ssize_t ret; ssize_t ret;
@@ -707,37 +723,34 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out; goto out;
} }
if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
IB_USER_VERBS_CMD_COMMAND_MASK)) {
ret = -EINVAL;
goto out;
}
command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
if (verify_command_mask(ib_dev, command)) {
ret = -EOPNOTSUPP;
goto out;
}
if (!file->ucontext &&
command != IB_USER_VERBS_CMD_GET_CONTEXT) {
ret = -EINVAL;
goto out;
}
flags = (hdr.command & flags = (hdr.command &
IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT; IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
if (!flags) { if (!flags) {
__u32 command;
if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
IB_USER_VERBS_CMD_COMMAND_MASK)) {
ret = -EINVAL;
goto out;
}
command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
if (command >= ARRAY_SIZE(uverbs_cmd_table) || if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
!uverbs_cmd_table[command]) { !uverbs_cmd_table[command]) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
if (!file->ucontext &&
command != IB_USER_VERBS_CMD_GET_CONTEXT) {
ret = -EINVAL;
goto out;
}
if (!(ib_dev->uverbs_cmd_mask & (1ull << command))) {
ret = -ENOSYS;
goto out;
}
if (hdr.in_words * 4 != count) { if (hdr.in_words * 4 != count) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
@@ -749,21 +762,11 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
hdr.out_words * 4); hdr.out_words * 4);
} else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) { } else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) {
__u32 command;
struct ib_uverbs_ex_cmd_hdr ex_hdr; struct ib_uverbs_ex_cmd_hdr ex_hdr;
struct ib_udata ucore; struct ib_udata ucore;
struct ib_udata uhw; struct ib_udata uhw;
size_t written_count = count; size_t written_count = count;
if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
IB_USER_VERBS_CMD_COMMAND_MASK)) {
ret = -EINVAL;
goto out;
}
command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) || if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
!uverbs_ex_cmd_table[command]) { !uverbs_ex_cmd_table[command]) {
ret = -ENOSYS; ret = -ENOSYS;
@@ -775,11 +778,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out; goto out;
} }
if (!(ib_dev->uverbs_ex_cmd_mask & (1ull << command))) {
ret = -ENOSYS;
goto out;
}
if (count < (sizeof(hdr) + sizeof(ex_hdr))) { if (count < (sizeof(hdr) + sizeof(ex_hdr))) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
@@ -1058,7 +1056,7 @@ static int find_overflow_devnum(void)
ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES, ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
"infiniband_verbs"); "infiniband_verbs");
if (ret) { if (ret) {
printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n"); pr_err("user_verbs: couldn't register dynamic device number\n");
return ret; return ret;
} }
} }
@@ -1279,14 +1277,14 @@ static int __init ib_uverbs_init(void)
ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES, ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
"infiniband_verbs"); "infiniband_verbs");
if (ret) { if (ret) {
printk(KERN_ERR "user_verbs: couldn't register device number\n"); pr_err("user_verbs: couldn't register device number\n");
goto out; goto out;
} }
uverbs_class = class_create(THIS_MODULE, "infiniband_verbs"); uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
if (IS_ERR(uverbs_class)) { if (IS_ERR(uverbs_class)) {
ret = PTR_ERR(uverbs_class); ret = PTR_ERR(uverbs_class);
printk(KERN_ERR "user_verbs: couldn't create class infiniband_verbs\n"); pr_err("user_verbs: couldn't create class infiniband_verbs\n");
goto out_chrdev; goto out_chrdev;
} }
@@ -1294,13 +1292,13 @@ static int __init ib_uverbs_init(void)
ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
if (ret) { if (ret) {
printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); pr_err("user_verbs: couldn't create abi_version attribute\n");
goto out_class; goto out_class;
} }
ret = ib_register_client(&uverbs_client); ret = ib_register_client(&uverbs_client);
if (ret) { if (ret) {
printk(KERN_ERR "user_verbs: couldn't register client\n"); pr_err("user_verbs: couldn't register client\n");
goto out_class; goto out_class;
} }
+206
View File
@@ -1551,6 +1551,46 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
} }
EXPORT_SYMBOL(ib_check_mr_status); EXPORT_SYMBOL(ib_check_mr_status);
int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
int state)
{
if (!device->set_vf_link_state)
return -ENOSYS;
return device->set_vf_link_state(device, vf, port, state);
}
EXPORT_SYMBOL(ib_set_vf_link_state);
int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
struct ifla_vf_info *info)
{
if (!device->get_vf_config)
return -ENOSYS;
return device->get_vf_config(device, vf, port, info);
}
EXPORT_SYMBOL(ib_get_vf_config);
int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
struct ifla_vf_stats *stats)
{
if (!device->get_vf_stats)
return -ENOSYS;
return device->get_vf_stats(device, vf, port, stats);
}
EXPORT_SYMBOL(ib_get_vf_stats);
int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
int type)
{
if (!device->set_vf_guid)
return -ENOSYS;
return device->set_vf_guid(device, vf, port, guid, type);
}
EXPORT_SYMBOL(ib_set_vf_guid);
/** /**
* ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
* and set it the memory region. * and set it the memory region.
@@ -1567,6 +1607,8 @@ EXPORT_SYMBOL(ib_check_mr_status);
* - The last sg element is allowed to have length less than page_size. * - The last sg element is allowed to have length less than page_size.
* - If sg_nents total byte length exceeds the mr max_num_sge * page_size * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
* then only max_num_sg entries will be mapped. * then only max_num_sg entries will be mapped.
* - If the MR was allocated with type IB_MR_TYPE_SG_GAPS_REG, non of these
* constraints holds and the page_size argument is ignored.
* *
* Returns the number of sg elements that were mapped to the memory region. * Returns the number of sg elements that were mapped to the memory region.
* *
@@ -1657,3 +1699,167 @@ next_page:
return i; return i;
} }
EXPORT_SYMBOL(ib_sg_to_pages); EXPORT_SYMBOL(ib_sg_to_pages);
struct ib_drain_cqe {
struct ib_cqe cqe;
struct completion done;
};
static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
cqe);
complete(&cqe->done);
}
/*
* Post a WR and block until its completion is reaped for the SQ.
*/
static void __ib_drain_sq(struct ib_qp *qp)
{
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct ib_drain_cqe sdrain;
struct ib_send_wr swr = {}, *bad_swr;
int ret;
if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
return;
}
swr.wr_cqe = &sdrain.cqe;
sdrain.cqe.done = ib_drain_qp_done;
init_completion(&sdrain.done);
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
return;
}
ret = ib_post_send(qp, &swr, &bad_swr);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
return;
}
wait_for_completion(&sdrain.done);
}
/*
* Post a WR and block until its completion is reaped for the RQ.
*/
static void __ib_drain_rq(struct ib_qp *qp)
{
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct ib_drain_cqe rdrain;
struct ib_recv_wr rwr = {}, *bad_rwr;
int ret;
if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
return;
}
rwr.wr_cqe = &rdrain.cqe;
rdrain.cqe.done = ib_drain_qp_done;
init_completion(&rdrain.done);
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
return;
}
ret = ib_post_recv(qp, &rwr, &bad_rwr);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
return;
}
wait_for_completion(&rdrain.done);
}
/**
* ib_drain_sq() - Block until all SQ CQEs have been consumed by the
* application.
* @qp: queue pair to drain
*
* If the device has a provider-specific drain function, then
* call that. Otherwise call the generic drain function
* __ib_drain_sq().
*
* The caller must:
*
* ensure there is room in the CQ and SQ for the drain work request and
* completion.
*
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
* IB_POLL_DIRECT.
*
* ensure that there are no other contexts that are posting WRs concurrently.
* Otherwise the drain is not guaranteed.
*/
void ib_drain_sq(struct ib_qp *qp)
{
if (qp->device->drain_sq)
qp->device->drain_sq(qp);
else
__ib_drain_sq(qp);
}
EXPORT_SYMBOL(ib_drain_sq);
/**
* ib_drain_rq() - Block until all RQ CQEs have been consumed by the
* application.
* @qp: queue pair to drain
*
* If the device has a provider-specific drain function, then
* call that. Otherwise call the generic drain function
* __ib_drain_rq().
*
* The caller must:
*
* ensure there is room in the CQ and RQ for the drain work request and
* completion.
*
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
* IB_POLL_DIRECT.
*
* ensure that there are no other contexts that are posting WRs concurrently.
* Otherwise the drain is not guaranteed.
*/
void ib_drain_rq(struct ib_qp *qp)
{
if (qp->device->drain_rq)
qp->device->drain_rq(qp);
else
__ib_drain_rq(qp);
}
EXPORT_SYMBOL(ib_drain_rq);
/**
* ib_drain_qp() - Block until all CQEs have been consumed by the
* application on both the RQ and SQ.
* @qp: queue pair to drain
*
* The caller must:
*
* ensure there is room in the CQ(s), SQ, and RQ for drain work requests
* and completions.
*
* allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
* IB_POLL_DIRECT.
*
* ensure that there are no other contexts that are posting WRs concurrently.
* Otherwise the drain is not guaranteed.
*/
void ib_drain_qp(struct ib_qp *qp)
{
ib_drain_sq(qp);
ib_drain_rq(qp);
}
EXPORT_SYMBOL(ib_drain_qp);

Some files were not shown because too many files have changed in this diff Show More