Merge branch 'nvmf-4.10' of git://git.infradead.org/nvme-fabrics into for-4.10/block

Sagi writes:

The major addition here is the nvme FC transport implementation
from James.

What else:
- some cleanups and memory leak fixes in the host side fabrics code from Bart
- possible rcu violation fix from Sasha
- logging change from Max
- small include cleanup
This commit is contained in:
Jens Axboe
2016-12-06 08:06:19 -07:00
24 changed files with 7313 additions and 34 deletions
+10
View File
@@ -8659,6 +8659,16 @@ L: linux-nvme@lists.infradead.org
S: Supported
F: drivers/nvme/target/
NVM EXPRESS FC TRANSPORT DRIVERS
M: James Smart <james.smart@broadcom.com>
L: linux-nvme@lists.infradead.org
S: Supported
F: include/linux/nvme-fc.h
F: include/linux/nvme-fc-driver.h
F: drivers/nvme/host/fc.c
F: drivers/nvme/target/fc.c
F: drivers/nvme/target/fcloop.c
NVMEM FRAMEWORK
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
M: Maxime Ripard <maxime.ripard@free-electrons.com>
+17
View File
@@ -43,3 +43,20 @@ config NVME_RDMA
from https://github.com/linux-nvme/nvme-cli.
If unsure, say N.
config NVME_FC
tristate "NVM Express over Fabrics FC host driver"
depends on BLOCK
depends on HAS_DMA
select NVME_CORE
select NVME_FABRICS
select SG_POOL
help
This provides support for the NVMe over Fabrics protocol using
the FC transport. This allows you to use remote block devices
exported using the NVMe protocol set.
To configure a NVMe over Fabrics controller use the nvme-cli tool
from https://github.com/linux-nvme/nvme-cli.
If unsure, say N.
+3
View File
@@ -2,6 +2,7 @@ obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
nvme-core-y := core.o
nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o
@@ -12,3 +13,5 @@ nvme-y += pci.o
nvme-fabrics-y += fabrics.o
nvme-rdma-y += rdma.o
nvme-fc-y += fc.o
+2 -1
View File
@@ -303,7 +303,6 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
memset(cmnd, 0, sizeof(*cmnd));
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
cmnd->rw.command_id = req->tag;
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
@@ -345,6 +344,8 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
else
nvme_setup_rw(ns, req, cmd);
cmd->common.command_id = req->tag;
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
+4 -3
View File
@@ -576,7 +576,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
nqnlen = strlen(opts->subsysnqn);
if (nqnlen >= NVMF_NQN_SIZE) {
pr_err("%s needs to be < %d bytes\n",
opts->subsysnqn, NVMF_NQN_SIZE);
opts->subsysnqn, NVMF_NQN_SIZE);
ret = -EINVAL;
goto out;
}
@@ -666,10 +666,12 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
if (nqnlen >= NVMF_NQN_SIZE) {
pr_err("%s needs to be < %d bytes\n",
p, NVMF_NQN_SIZE);
kfree(p);
ret = -EINVAL;
goto out;
}
opts->host = nvmf_host_add(p);
kfree(p);
if (!opts->host) {
ret = -ENOMEM;
goto out;
@@ -825,8 +827,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
out_unlock:
mutex_unlock(&nvmf_transports_mutex);
out_free_opts:
nvmf_host_put(opts->host);
kfree(opts);
nvmf_free_options(opts);
return ERR_PTR(ret);
}
File diff suppressed because it is too large Load Diff
-1
View File
@@ -611,7 +611,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out;
cmnd.common.command_id = req->tag;
blk_mq_start_request(req);
spin_lock_irq(&nvmeq->q_lock);
+11 -3
View File
@@ -28,7 +28,6 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <rdma/ib_cm.h>
#include <linux/nvme-rdma.h>
#include "nvme.h"
@@ -241,7 +240,9 @@ out_free_ring:
static void nvme_rdma_qp_event(struct ib_event *event, void *context)
{
pr_debug("QP event %d\n", event->event);
pr_debug("QP event %s (%d)\n",
ib_event_msg(event->event), event->event);
}
static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
@@ -1398,7 +1399,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
c->common.command_id = rq->tag;
blk_mq_start_request(rq);
map_len = nvme_map_len(rq);
@@ -1904,6 +1904,14 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
opts->queue_size = ctrl->ctrl.maxcmd;
}
if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
/* warn if sqsize is lower than queue_size */
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl sqsize %u, clamping down\n",
opts->queue_size, ctrl->ctrl.sqsize + 1);
opts->queue_size = ctrl->ctrl.sqsize + 1;
}
if (opts->nr_io_queues) {
ret = nvme_rdma_create_io_queues(ctrl);
if (ret)
+2 -9
View File
@@ -1280,10 +1280,6 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
u16 idx, u16 bd_len, u8 llbaa)
{
u16 bd_num;
bd_num = bd_len / ((llbaa == 0) ?
SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
/* Store block descriptor info if a FORMAT UNIT comes later */
/* TODO Saving 1st BD info; what to do if multiple BD received? */
if (llbaa == 0) {
@@ -1528,7 +1524,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
int nvme_sc;
struct nvme_id_ns *id_ns;
u8 i;
u8 flbas, nlbaf;
u8 nlbaf;
u8 selected_lbaf = 0xFF;
u32 cdw10 = 0;
struct nvme_command c;
@@ -1539,7 +1535,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
if (res)
return res;
flbas = (id_ns->flbas) & 0x0F;
nlbaf = id_ns->nlbaf;
for (i = 0; i < nlbaf; i++) {
@@ -2168,12 +2163,10 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *cmd)
{
u8 immed, pcmod, no_flush, start;
u8 immed, no_flush;
immed = cmd[1] & 0x01;
pcmod = cmd[3] & 0x0f;
no_flush = cmd[4] & 0x04;
start = cmd[4] & 0x01;
if (immed != 0) {
return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
+24
View File
@@ -34,3 +34,27 @@ config NVME_TARGET_RDMA
devices over RDMA.
If unsure, say N.
config NVME_TARGET_FC
tristate "NVMe over Fabrics FC target driver"
depends on NVME_TARGET
depends on HAS_DMA
help
This enables the NVMe FC target support, which allows exporting NVMe
devices over FC.
If unsure, say N.
config NVME_TARGET_FCLOOP
tristate "NVMe over Fabrics FC Transport Loopback Test driver"
depends on NVME_TARGET
select NVME_CORE
select NVME_FABRICS
select SG_POOL
depends on NVME_FC
depends on NVME_TARGET_FC
help
This enables the NVMe FC loopback test support, which can be useful
to test NVMe-FC transport interfaces.
If unsure, say N.
+4
View File
@@ -2,8 +2,12 @@
obj-$(CONFIG_NVME_TARGET) += nvmet.o
obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o
obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o
obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
nvmet-y += core.o configfs.o admin-cmd.o io-cmd.o fabrics-cmd.o \
discovery.o
nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
nvme-fcloop-y += fcloop.o
+17 -3
View File
@@ -37,6 +37,8 @@ static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
return sprintf(page, "ipv6\n");
case NVMF_ADDR_FAMILY_IB:
return sprintf(page, "ib\n");
case NVMF_ADDR_FAMILY_FC:
return sprintf(page, "fc\n");
default:
return sprintf(page, "\n");
}
@@ -59,6 +61,8 @@ static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
} else if (sysfs_streq(page, "ib")) {
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
} else if (sysfs_streq(page, "fc")) {
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC;
} else {
pr_err("Invalid value '%s' for adrfam\n", page);
return -EINVAL;
@@ -209,6 +213,8 @@ static ssize_t nvmet_addr_trtype_show(struct config_item *item,
return sprintf(page, "rdma\n");
case NVMF_TRTYPE_LOOP:
return sprintf(page, "loop\n");
case NVMF_TRTYPE_FC:
return sprintf(page, "fc\n");
default:
return sprintf(page, "\n");
}
@@ -229,6 +235,12 @@ static void nvmet_port_init_tsas_loop(struct nvmet_port *port)
memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
}
static void nvmet_port_init_tsas_fc(struct nvmet_port *port)
{
port->disc_addr.trtype = NVMF_TRTYPE_FC;
memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
}
static ssize_t nvmet_addr_trtype_store(struct config_item *item,
const char *page, size_t count)
{
@@ -244,6 +256,8 @@ static ssize_t nvmet_addr_trtype_store(struct config_item *item,
nvmet_port_init_tsas_rdma(port);
} else if (sysfs_streq(page, "loop")) {
nvmet_port_init_tsas_loop(port);
} else if (sysfs_streq(page, "fc")) {
nvmet_port_init_tsas_fc(port);
} else {
pr_err("Invalid value '%s' for trtype\n", page);
return -EINVAL;
@@ -271,7 +285,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
mutex_lock(&subsys->lock);
ret = -EBUSY;
if (nvmet_ns_enabled(ns))
if (ns->enabled)
goto out_unlock;
kfree(ns->device_path);
@@ -307,7 +321,7 @@ static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
int ret = 0;
mutex_lock(&subsys->lock);
if (nvmet_ns_enabled(ns)) {
if (ns->enabled) {
ret = -EBUSY;
goto out_unlock;
}
@@ -339,7 +353,7 @@ CONFIGFS_ATTR(nvmet_ns_, device_nguid);
static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
{
return sprintf(page, "%d\n", nvmet_ns_enabled(to_nvmet_ns(item)));
return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
}
static ssize_t nvmet_ns_enable_store(struct config_item *item,
+8 -6
View File
@@ -264,7 +264,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
int ret = 0;
mutex_lock(&subsys->lock);
if (!list_empty(&ns->dev_link))
if (ns->enabled)
goto out_unlock;
ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
@@ -309,6 +309,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
ns->enabled = true;
ret = 0;
out_unlock:
mutex_unlock(&subsys->lock);
@@ -325,11 +326,11 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
struct nvmet_ctrl *ctrl;
mutex_lock(&subsys->lock);
if (list_empty(&ns->dev_link)) {
mutex_unlock(&subsys->lock);
return;
}
list_del_init(&ns->dev_link);
if (!ns->enabled)
goto out_unlock;
ns->enabled = false;
list_del_rcu(&ns->dev_link);
mutex_unlock(&subsys->lock);
/*
@@ -351,6 +352,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
if (ns->bdev)
blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
out_unlock:
mutex_unlock(&subsys->lock);
}
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
-1
View File
@@ -194,7 +194,6 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
BUG_ON(iod->req.sg_cnt > req->nr_phys_segments);
}
iod->cmd.common.command_id = req->tag;
blk_mq_start_request(req);
schedule_work(&iod->work);
+1 -5
View File
@@ -47,6 +47,7 @@ struct nvmet_ns {
loff_t size;
u8 nguid[16];
bool enabled;
struct nvmet_subsys *subsys;
const char *device_path;
@@ -61,11 +62,6 @@ static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
return container_of(to_config_group(item), struct nvmet_ns, group);
}
static inline bool nvmet_ns_enabled(struct nvmet_ns *ns)
{
return !list_empty_careful(&ns->dev_link);
}
struct nvmet_cq {
u16 qid;
u16 size;
+6 -2
View File
@@ -1044,8 +1044,10 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
}
ret = nvmet_sq_init(&queue->nvme_sq);
if (ret)
if (ret) {
ret = NVME_RDMA_CM_NO_RSC;
goto out_free_queue;
}
ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
if (ret)
@@ -1114,6 +1116,7 @@ out_destroy_sq:
out_free_queue:
kfree(queue);
out_reject:
pr_debug("rejecting connect request with status code %d\n", ret);
nvmet_rdma_cm_reject(cm_id, ret);
return NULL;
}
@@ -1127,7 +1130,8 @@ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
rdma_notify(queue->cm_id, event->event);
break;
default:
pr_err("received unrecognized IB QP event %d\n", event->event);
pr_err("received IB QP event: %s (%d)\n",
ib_event_msg(event->event), event->event);
break;
}
}
File diff suppressed because it is too large Load Diff
+268
View File
@@ -0,0 +1,268 @@
/*
* Copyright (c) 2016 Avago Technologies. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful.
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
* INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
* THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
* See the GNU General Public License for more details, a copy of which
* can be found in the file COPYING included with this package
*
*/
/*
* This file contains definitions relative to FC-NVME r1.11 and a few
* newer items
*/
#ifndef _NVME_FC_H
#define _NVME_FC_H 1
#define NVME_CMD_SCSI_ID 0xFD
#define NVME_CMD_FC_ID FC_TYPE_NVME
/* FC-NVME Cmd IU Flags */
#define FCNVME_CMD_FLAGS_DIRMASK 0x03
#define FCNVME_CMD_FLAGS_WRITE 0x01
#define FCNVME_CMD_FLAGS_READ 0x02
struct nvme_fc_cmd_iu {
__u8 scsi_id;
__u8 fc_id;
__be16 iu_len;
__u8 rsvd4[3];
__u8 flags;
__be64 connection_id;
__be32 csn;
__be32 data_len;
struct nvme_command sqe;
__be32 rsvd88[2];
};
#define NVME_FC_SIZEOF_ZEROS_RSP 12
struct nvme_fc_ersp_iu {
__u8 rsvd0[2];
__be16 iu_len;
__be32 rsn;
__be32 xfrd_len;
__be32 rsvd12;
struct nvme_completion cqe;
/* for now - no additional payload */
};
/* FC-NVME r1.03/16-119v0 NVME Link Services */
enum {
FCNVME_LS_RSVD = 0,
FCNVME_LS_RJT = 1,
FCNVME_LS_ACC = 2,
FCNVME_LS_CREATE_ASSOCIATION = 3,
FCNVME_LS_CREATE_CONNECTION = 4,
FCNVME_LS_DISCONNECT = 5,
};
/* FC-NVME r1.03/16-119v0 NVME Link Service Descriptors */
enum {
FCNVME_LSDESC_RSVD = 0x0,
FCNVME_LSDESC_RQST = 0x1,
FCNVME_LSDESC_RJT = 0x2,
FCNVME_LSDESC_CREATE_ASSOC_CMD = 0x3,
FCNVME_LSDESC_CREATE_CONN_CMD = 0x4,
FCNVME_LSDESC_DISCONN_CMD = 0x5,
FCNVME_LSDESC_CONN_ID = 0x6,
FCNVME_LSDESC_ASSOC_ID = 0x7,
};
/* ********** start of Link Service Descriptors ********** */
/*
* fills in length of a descriptor. Struture minus descriptor header
*/
static inline __be32 fcnvme_lsdesc_len(size_t sz)
{
return cpu_to_be32(sz - (2 * sizeof(u32)));
}
struct fcnvme_ls_rqst_w0 {
u8 ls_cmd; /* FCNVME_LS_xxx */
u8 zeros[3];
};
/* FCNVME_LSDESC_RQST */
struct fcnvme_lsdesc_rqst {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
struct fcnvme_ls_rqst_w0 w0;
__be32 rsvd12;
};
/* FCNVME_LSDESC_RJT */
struct fcnvme_lsdesc_rjt {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
u8 rsvd8;
/*
* Reject reason and explanaction codes are generic
* to ELs's from LS-3.
*/
u8 reason_code;
u8 reason_explanation;
u8 vendor;
__be32 rsvd12;
};
#define FCNVME_ASSOC_HOSTID_LEN 64
#define FCNVME_ASSOC_HOSTNQN_LEN 256
#define FCNVME_ASSOC_SUBNQN_LEN 256
/* FCNVME_LSDESC_CREATE_ASSOC_CMD */
struct fcnvme_lsdesc_cr_assoc_cmd {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
__be16 ersp_ratio;
__be16 rsvd10;
__be32 rsvd12[9];
__be16 cntlid;
__be16 sqsize;
__be32 rsvd52;
u8 hostid[FCNVME_ASSOC_HOSTID_LEN];
u8 hostnqn[FCNVME_ASSOC_HOSTNQN_LEN];
u8 subnqn[FCNVME_ASSOC_SUBNQN_LEN];
u8 rsvd632[384];
};
/* FCNVME_LSDESC_CREATE_CONN_CMD */
struct fcnvme_lsdesc_cr_conn_cmd {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
__be16 ersp_ratio;
__be16 rsvd10;
__be32 rsvd12[9];
__be16 qid;
__be16 sqsize;
__be32 rsvd52;
};
/* Disconnect Scope Values */
enum {
FCNVME_DISCONN_ASSOCIATION = 0,
FCNVME_DISCONN_CONNECTION = 1,
};
/* FCNVME_LSDESC_DISCONN_CMD */
struct fcnvme_lsdesc_disconn_cmd {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
u8 rsvd8[3];
/* note: scope is really a 1 bit field */
u8 scope; /* FCNVME_DISCONN_xxx */
__be32 rsvd12;
__be64 id;
};
/* FCNVME_LSDESC_CONN_ID */
struct fcnvme_lsdesc_conn_id {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
__be64 connection_id;
};
/* FCNVME_LSDESC_ASSOC_ID */
struct fcnvme_lsdesc_assoc_id {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
__be64 association_id;
};
/* r_ctl values */
enum {
FCNVME_RS_RCTL_DATA = 1,
FCNVME_RS_RCTL_XFER_RDY = 5,
FCNVME_RS_RCTL_RSP = 8,
};
/* ********** start of Link Services ********** */
/* FCNVME_LS_RJT */
struct fcnvme_ls_rjt {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_rqst rqst;
struct fcnvme_lsdesc_rjt rjt;
};
/* FCNVME_LS_ACC */
struct fcnvme_ls_acc_hdr {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_rqst rqst;
/* Followed by cmd-specific ACC descriptors, see next definitions */
};
/* FCNVME_LS_CREATE_ASSOCIATION */
struct fcnvme_ls_cr_assoc_rqst {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_cr_assoc_cmd assoc_cmd;
};
struct fcnvme_ls_cr_assoc_acc {
struct fcnvme_ls_acc_hdr hdr;
struct fcnvme_lsdesc_assoc_id associd;
struct fcnvme_lsdesc_conn_id connectid;
};
/* FCNVME_LS_CREATE_CONNECTION */
struct fcnvme_ls_cr_conn_rqst {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_assoc_id associd;
struct fcnvme_lsdesc_cr_conn_cmd connect_cmd;
};
struct fcnvme_ls_cr_conn_acc {
struct fcnvme_ls_acc_hdr hdr;
struct fcnvme_lsdesc_conn_id connectid;
};
/* FCNVME_LS_DISCONNECT */
struct fcnvme_ls_disconnect_rqst {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_assoc_id associd;
struct fcnvme_lsdesc_disconn_cmd discon_cmd;
};
struct fcnvme_ls_disconnect_acc {
struct fcnvme_ls_acc_hdr hdr;
};
/*
* Yet to be defined in FC-NVME:
*/
#define NVME_FC_CONNECT_TIMEOUT_SEC 2 /* 2 seconds */
#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */
#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */
#endif /* _NVME_FC_H */

Some files were not shown because too many files have changed in this diff Show More