mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge branch 'for-4.3/drivers' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe:
"On top of the 4.3 core block IO changes, here are the driver related
changes for 4.3. Basically just NVMe and nbd this time around:
- NVMe:
- PRACT PI improvement from Alok Pandey.
- Cleanups and improvements on submission queue doorbell and
writing, using CMB if available. From Jon Derrick.
- From Keith, support for setting queue maximum segments, and
reset support.
- Also from Jon, fixup of u64 division issue on 32-bit archs and
wiring up of the reset support through and ioctl.
- Two small cleanups from Matias and Sunad
- Various code cleanups and fixes from Markus Pargmann"
* 'for-4.3/drivers' of git://git.kernel.dk/linux-block:
NVMe: Using PRACT bit to generate and verify PI by controller
NVMe:Remove unreachable code in nvme_abort_req
NVMe: Add nvme subsystem reset IOCTL
NVMe: Add nvme subsystem reset support
NVMe: removed unused nn var from nvme_dev_add
NVMe: Set queue max segments
nbd: flags is a u32 variable
nbd: Rename functions for clearness of recv/send path
nbd: Change 'disconnect' to be boolean
nbd: Add debugfs entries
nbd: Remove variable 'pid'
nbd: Move clear queue debug message
nbd: Remove 'harderror' and propagate error properly
nbd: restructure sock_shutdown
nbd: sock_shutdown, remove conditional lock
nbd: Fix timeout detection
nvme: Fixes u64 division which breaks i386 builds
NVMe: Use CMB for the IO SQes if available
NVMe: Unify SQ entry writing and doorbell ringing
This commit is contained in:
File diff suppressed because it is too large
Load Diff
@@ -72,6 +72,10 @@ module_param(nvme_char_major, int, 0);
|
||||
static int use_threaded_interrupts;
|
||||
module_param(use_threaded_interrupts, int, 0);
|
||||
|
||||
static bool use_cmb_sqes = true;
|
||||
module_param(use_cmb_sqes, bool, 0644);
|
||||
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
|
||||
|
||||
static DEFINE_SPINLOCK(dev_list_lock);
|
||||
static LIST_HEAD(dev_list);
|
||||
static struct task_struct *nvme_thread;
|
||||
@@ -103,6 +107,7 @@ struct nvme_queue {
|
||||
char irqname[24]; /* nvme4294967295-65535\0 */
|
||||
spinlock_t q_lock;
|
||||
struct nvme_command *sq_cmds;
|
||||
struct nvme_command __iomem *sq_cmds_io;
|
||||
volatile struct nvme_completion *cqes;
|
||||
struct blk_mq_tags **tags;
|
||||
dma_addr_t sq_dma_addr;
|
||||
@@ -379,27 +384,28 @@ static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag,
|
||||
*
|
||||
* Safe to use from interrupt context
|
||||
*/
|
||||
static int __nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
|
||||
static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
|
||||
struct nvme_command *cmd)
|
||||
{
|
||||
u16 tail = nvmeq->sq_tail;
|
||||
|
||||
memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
|
||||
if (nvmeq->sq_cmds_io)
|
||||
memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
|
||||
else
|
||||
memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
|
||||
|
||||
if (++tail == nvmeq->q_depth)
|
||||
tail = 0;
|
||||
writel(tail, nvmeq->q_db);
|
||||
nvmeq->sq_tail = tail;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
|
||||
static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
spin_lock_irqsave(&nvmeq->q_lock, flags);
|
||||
ret = __nvme_submit_cmd(nvmeq, cmd);
|
||||
__nvme_submit_cmd(nvmeq, cmd);
|
||||
spin_unlock_irqrestore(&nvmeq->q_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __le64 **iod_list(struct nvme_iod *iod)
|
||||
@@ -730,18 +736,16 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
|
||||
static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req,
|
||||
struct nvme_iod *iod)
|
||||
{
|
||||
struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
|
||||
struct nvme_command cmnd;
|
||||
|
||||
memcpy(cmnd, req->cmd, sizeof(struct nvme_command));
|
||||
cmnd->rw.command_id = req->tag;
|
||||
memcpy(&cmnd, req->cmd, sizeof(cmnd));
|
||||
cmnd.rw.command_id = req->tag;
|
||||
if (req->nr_phys_segments) {
|
||||
cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
|
||||
cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
|
||||
cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
|
||||
cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
|
||||
}
|
||||
|
||||
if (++nvmeq->sq_tail == nvmeq->q_depth)
|
||||
nvmeq->sq_tail = 0;
|
||||
writel(nvmeq->sq_tail, nvmeq->q_db);
|
||||
__nvme_submit_cmd(nvmeq, &cmnd);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -754,45 +758,41 @@ static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
|
||||
{
|
||||
struct nvme_dsm_range *range =
|
||||
(struct nvme_dsm_range *)iod_list(iod)[0];
|
||||
struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
|
||||
struct nvme_command cmnd;
|
||||
|
||||
range->cattr = cpu_to_le32(0);
|
||||
range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
|
||||
range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
|
||||
|
||||
memset(cmnd, 0, sizeof(*cmnd));
|
||||
cmnd->dsm.opcode = nvme_cmd_dsm;
|
||||
cmnd->dsm.command_id = req->tag;
|
||||
cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
|
||||
cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
|
||||
cmnd->dsm.nr = 0;
|
||||
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
|
||||
memset(&cmnd, 0, sizeof(cmnd));
|
||||
cmnd.dsm.opcode = nvme_cmd_dsm;
|
||||
cmnd.dsm.command_id = req->tag;
|
||||
cmnd.dsm.nsid = cpu_to_le32(ns->ns_id);
|
||||
cmnd.dsm.prp1 = cpu_to_le64(iod->first_dma);
|
||||
cmnd.dsm.nr = 0;
|
||||
cmnd.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
|
||||
|
||||
if (++nvmeq->sq_tail == nvmeq->q_depth)
|
||||
nvmeq->sq_tail = 0;
|
||||
writel(nvmeq->sq_tail, nvmeq->q_db);
|
||||
__nvme_submit_cmd(nvmeq, &cmnd);
|
||||
}
|
||||
|
||||
static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
|
||||
int cmdid)
|
||||
{
|
||||
struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
|
||||
struct nvme_command cmnd;
|
||||
|
||||
memset(cmnd, 0, sizeof(*cmnd));
|
||||
cmnd->common.opcode = nvme_cmd_flush;
|
||||
cmnd->common.command_id = cmdid;
|
||||
cmnd->common.nsid = cpu_to_le32(ns->ns_id);
|
||||
memset(&cmnd, 0, sizeof(cmnd));
|
||||
cmnd.common.opcode = nvme_cmd_flush;
|
||||
cmnd.common.command_id = cmdid;
|
||||
cmnd.common.nsid = cpu_to_le32(ns->ns_id);
|
||||
|
||||
if (++nvmeq->sq_tail == nvmeq->q_depth)
|
||||
nvmeq->sq_tail = 0;
|
||||
writel(nvmeq->sq_tail, nvmeq->q_db);
|
||||
__nvme_submit_cmd(nvmeq, &cmnd);
|
||||
}
|
||||
|
||||
static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
|
||||
struct nvme_ns *ns)
|
||||
{
|
||||
struct request *req = iod_get_private(iod);
|
||||
struct nvme_command *cmnd;
|
||||
struct nvme_command cmnd;
|
||||
u16 control = 0;
|
||||
u32 dsmgmt = 0;
|
||||
|
||||
@@ -804,19 +804,16 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
|
||||
if (req->cmd_flags & REQ_RAHEAD)
|
||||
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
|
||||
|
||||
cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
|
||||
memset(cmnd, 0, sizeof(*cmnd));
|
||||
memset(&cmnd, 0, sizeof(cmnd));
|
||||
cmnd.rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
|
||||
cmnd.rw.command_id = req->tag;
|
||||
cmnd.rw.nsid = cpu_to_le32(ns->ns_id);
|
||||
cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
|
||||
cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
|
||||
cmnd.rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
|
||||
cmnd.rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
|
||||
|
||||
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
|
||||
cmnd->rw.command_id = req->tag;
|
||||
cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
|
||||
cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
|
||||
cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
|
||||
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
|
||||
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
|
||||
|
||||
if (blk_integrity_rq(req)) {
|
||||
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
|
||||
if (ns->ms) {
|
||||
switch (ns->pi_type) {
|
||||
case NVME_NS_DPS_PI_TYPE3:
|
||||
control |= NVME_RW_PRINFO_PRCHK_GUARD;
|
||||
@@ -825,19 +822,21 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
|
||||
case NVME_NS_DPS_PI_TYPE2:
|
||||
control |= NVME_RW_PRINFO_PRCHK_GUARD |
|
||||
NVME_RW_PRINFO_PRCHK_REF;
|
||||
cmnd->rw.reftag = cpu_to_le32(
|
||||
cmnd.rw.reftag = cpu_to_le32(
|
||||
nvme_block_nr(ns, blk_rq_pos(req)));
|
||||
break;
|
||||
}
|
||||
} else if (ns->ms)
|
||||
control |= NVME_RW_PRINFO_PRACT;
|
||||
if (blk_integrity_rq(req))
|
||||
cmnd.rw.metadata =
|
||||
cpu_to_le64(sg_dma_address(iod->meta_sg));
|
||||
else
|
||||
control |= NVME_RW_PRINFO_PRACT;
|
||||
}
|
||||
|
||||
cmnd->rw.control = cpu_to_le16(control);
|
||||
cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
|
||||
cmnd.rw.control = cpu_to_le16(control);
|
||||
cmnd.rw.dsmgmt = cpu_to_le32(dsmgmt);
|
||||
|
||||
if (++nvmeq->sq_tail == nvmeq->q_depth)
|
||||
nvmeq->sq_tail = 0;
|
||||
writel(nvmeq->sq_tail, nvmeq->q_db);
|
||||
__nvme_submit_cmd(nvmeq, &cmnd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1080,7 +1079,8 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
|
||||
c.common.command_id = req->tag;
|
||||
|
||||
blk_mq_free_request(req);
|
||||
return __nvme_submit_cmd(nvmeq, &c);
|
||||
__nvme_submit_cmd(nvmeq, &c);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
|
||||
@@ -1103,7 +1103,8 @@ static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
|
||||
|
||||
cmd->common.command_id = req->tag;
|
||||
|
||||
return nvme_submit_cmd(nvmeq, cmd);
|
||||
nvme_submit_cmd(nvmeq, cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
|
||||
@@ -1315,12 +1316,7 @@ static void nvme_abort_req(struct request *req)
|
||||
|
||||
dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
|
||||
nvmeq->qid);
|
||||
if (nvme_submit_cmd(dev->queues[0], &cmd) < 0) {
|
||||
dev_warn(nvmeq->q_dmadev,
|
||||
"Could not abort I/O %d QID %d",
|
||||
req->tag, nvmeq->qid);
|
||||
blk_mq_free_request(abort_req);
|
||||
}
|
||||
nvme_submit_cmd(dev->queues[0], &cmd);
|
||||
}
|
||||
|
||||
static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
|
||||
@@ -1374,7 +1370,8 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
|
||||
{
|
||||
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
|
||||
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
|
||||
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
|
||||
if (nvmeq->sq_cmds)
|
||||
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
|
||||
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
|
||||
kfree(nvmeq);
|
||||
}
|
||||
@@ -1447,6 +1444,47 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
|
||||
spin_unlock_irq(&nvmeq->q_lock);
|
||||
}
|
||||
|
||||
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
|
||||
int entry_size)
|
||||
{
|
||||
int q_depth = dev->q_depth;
|
||||
unsigned q_size_aligned = roundup(q_depth * entry_size, dev->page_size);
|
||||
|
||||
if (q_size_aligned * nr_io_queues > dev->cmb_size) {
|
||||
u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
|
||||
mem_per_q = round_down(mem_per_q, dev->page_size);
|
||||
q_depth = div_u64(mem_per_q, entry_size);
|
||||
|
||||
/*
|
||||
* Ensure the reduced q_depth is above some threshold where it
|
||||
* would be better to map queues in system memory with the
|
||||
* original depth
|
||||
*/
|
||||
if (q_depth < 64)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return q_depth;
|
||||
}
|
||||
|
||||
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
|
||||
int qid, int depth)
|
||||
{
|
||||
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
|
||||
unsigned offset = (qid - 1) *
|
||||
roundup(SQ_SIZE(depth), dev->page_size);
|
||||
nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
|
||||
nvmeq->sq_cmds_io = dev->cmb + offset;
|
||||
} else {
|
||||
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
|
||||
&nvmeq->sq_dma_addr, GFP_KERNEL);
|
||||
if (!nvmeq->sq_cmds)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
|
||||
int depth)
|
||||
{
|
||||
@@ -1459,9 +1497,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
|
||||
if (!nvmeq->cqes)
|
||||
goto free_nvmeq;
|
||||
|
||||
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
|
||||
&nvmeq->sq_dma_addr, GFP_KERNEL);
|
||||
if (!nvmeq->sq_cmds)
|
||||
if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
|
||||
goto free_cqdma;
|
||||
|
||||
nvmeq->q_dmadev = dev->dev;
|
||||
@@ -1696,6 +1732,12 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
|
||||
page_shift = dev_page_max;
|
||||
}
|
||||
|
||||
dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
|
||||
NVME_CAP_NSSRC(cap) : 0;
|
||||
|
||||
if (dev->subsystem && (readl(&dev->bar->csts) & NVME_CSTS_NSSRO))
|
||||
writel(NVME_CSTS_NSSRO, &dev->bar->csts);
|
||||
|
||||
result = nvme_disable_ctrl(dev, cap);
|
||||
if (result < 0)
|
||||
return result;
|
||||
@@ -1856,6 +1898,15 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns,
|
||||
return status;
|
||||
}
|
||||
|
||||
static int nvme_subsys_reset(struct nvme_dev *dev)
|
||||
{
|
||||
if (!dev->subsystem)
|
||||
return -ENOTTY;
|
||||
|
||||
writel(0x4E564D65, &dev->bar->nssr); /* "NVMe" */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
@@ -1989,7 +2040,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
|
||||
!ns->ext)
|
||||
nvme_init_integrity(ns);
|
||||
|
||||
if (ns->ms && !blk_get_integrity(disk))
|
||||
if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
|
||||
set_capacity(disk, 0);
|
||||
else
|
||||
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
|
||||
@@ -2020,7 +2071,10 @@ static int nvme_kthread(void *data)
|
||||
spin_lock(&dev_list_lock);
|
||||
list_for_each_entry_safe(dev, next, &dev_list, node) {
|
||||
int i;
|
||||
if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
|
||||
u32 csts = readl(&dev->bar->csts);
|
||||
|
||||
if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
|
||||
csts & NVME_CSTS_CFS) {
|
||||
if (work_busy(&dev->reset_work))
|
||||
continue;
|
||||
list_del_init(&dev->node);
|
||||
@@ -2080,8 +2134,11 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
|
||||
list_add_tail(&ns->list, &dev->namespaces);
|
||||
|
||||
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
|
||||
if (dev->max_hw_sectors)
|
||||
if (dev->max_hw_sectors) {
|
||||
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
|
||||
blk_queue_max_segments(ns->queue,
|
||||
((dev->max_hw_sectors << 9) / dev->page_size) + 1);
|
||||
}
|
||||
if (dev->stripe_size)
|
||||
blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
|
||||
if (dev->vwc & NVME_CTRL_VWC_PRESENT)
|
||||
@@ -2159,6 +2216,58 @@ static int set_queue_count(struct nvme_dev *dev, int count)
|
||||
return min(result & 0xffff, result >> 16) + 1;
|
||||
}
|
||||
|
||||
static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
|
||||
{
|
||||
u64 szu, size, offset;
|
||||
u32 cmbloc;
|
||||
resource_size_t bar_size;
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
void __iomem *cmb;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
if (!use_cmb_sqes)
|
||||
return NULL;
|
||||
|
||||
dev->cmbsz = readl(&dev->bar->cmbsz);
|
||||
if (!(NVME_CMB_SZ(dev->cmbsz)))
|
||||
return NULL;
|
||||
|
||||
cmbloc = readl(&dev->bar->cmbloc);
|
||||
|
||||
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
|
||||
size = szu * NVME_CMB_SZ(dev->cmbsz);
|
||||
offset = szu * NVME_CMB_OFST(cmbloc);
|
||||
bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));
|
||||
|
||||
if (offset > bar_size)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Controllers may support a CMB size larger than their BAR,
|
||||
* for example, due to being behind a bridge. Reduce the CMB to
|
||||
* the reported size of the BAR
|
||||
*/
|
||||
if (size > bar_size - offset)
|
||||
size = bar_size - offset;
|
||||
|
||||
dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
|
||||
cmb = ioremap_wc(dma_addr, size);
|
||||
if (!cmb)
|
||||
return NULL;
|
||||
|
||||
dev->cmb_dma_addr = dma_addr;
|
||||
dev->cmb_size = size;
|
||||
return cmb;
|
||||
}
|
||||
|
||||
static inline void nvme_release_cmb(struct nvme_dev *dev)
|
||||
{
|
||||
if (dev->cmb) {
|
||||
iounmap(dev->cmb);
|
||||
dev->cmb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
|
||||
{
|
||||
return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
|
||||
@@ -2177,6 +2286,15 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
|
||||
if (result < nr_io_queues)
|
||||
nr_io_queues = result;
|
||||
|
||||
if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
|
||||
result = nvme_cmb_qdepth(dev, nr_io_queues,
|
||||
sizeof(struct nvme_command));
|
||||
if (result > 0)
|
||||
dev->q_depth = result;
|
||||
else
|
||||
nvme_release_cmb(dev);
|
||||
}
|
||||
|
||||
size = db_bar_size(dev, nr_io_queues);
|
||||
if (size > 8192) {
|
||||
iounmap(dev->bar);
|
||||
@@ -2344,7 +2462,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
int res;
|
||||
unsigned nn;
|
||||
struct nvme_id_ctrl *ctrl;
|
||||
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
|
||||
|
||||
@@ -2354,7 +2471,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
nn = le32_to_cpup(&ctrl->nn);
|
||||
dev->oncs = le16_to_cpup(&ctrl->oncs);
|
||||
dev->abort_limit = ctrl->acl + 1;
|
||||
dev->vwc = ctrl->vwc;
|
||||
@@ -2440,6 +2556,8 @@ static int nvme_dev_map(struct nvme_dev *dev)
|
||||
dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
|
||||
dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
|
||||
dev->dbs = ((void __iomem *)dev->bar) + 4096;
|
||||
if (readl(&dev->bar->vs) >= NVME_VS(1, 2))
|
||||
dev->cmb = nvme_map_cmb(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -2820,6 +2938,8 @@ static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
|
||||
case NVME_IOCTL_RESET:
|
||||
dev_warn(dev->dev, "resetting controller\n");
|
||||
return nvme_reset(dev);
|
||||
case NVME_IOCTL_SUBSYS_RESET:
|
||||
return nvme_subsys_reset(dev);
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
@@ -3145,6 +3265,7 @@ static void nvme_remove(struct pci_dev *pdev)
|
||||
nvme_dev_remove_admin(dev);
|
||||
device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
|
||||
nvme_free_queues(dev, 0);
|
||||
nvme_release_cmb(dev);
|
||||
nvme_release_prp_pools(dev);
|
||||
kref_put(&dev->kref, nvme_free_dev);
|
||||
}
|
||||
|
||||
@@ -28,18 +28,32 @@ struct nvme_bar {
|
||||
__u32 cc; /* Controller Configuration */
|
||||
__u32 rsvd1; /* Reserved */
|
||||
__u32 csts; /* Controller Status */
|
||||
__u32 rsvd2; /* Reserved */
|
||||
__u32 nssr; /* Subsystem Reset */
|
||||
__u32 aqa; /* Admin Queue Attributes */
|
||||
__u64 asq; /* Admin SQ Base Address */
|
||||
__u64 acq; /* Admin CQ Base Address */
|
||||
__u32 cmbloc; /* Controller Memory Buffer Location */
|
||||
__u32 cmbsz; /* Controller Memory Buffer Size */
|
||||
};
|
||||
|
||||
#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
|
||||
#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
|
||||
#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
|
||||
#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
|
||||
#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
|
||||
#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
|
||||
|
||||
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
|
||||
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
|
||||
#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
|
||||
#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
|
||||
|
||||
#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
|
||||
#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
|
||||
#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
|
||||
#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
|
||||
#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
|
||||
|
||||
enum {
|
||||
NVME_CC_ENABLE = 1 << 0,
|
||||
NVME_CC_CSS_NVM = 0 << 4,
|
||||
@@ -55,6 +69,7 @@ enum {
|
||||
NVME_CC_IOCQES = 4 << 20,
|
||||
NVME_CSTS_RDY = 1 << 0,
|
||||
NVME_CSTS_CFS = 1 << 1,
|
||||
NVME_CSTS_NSSRO = 1 << 4,
|
||||
NVME_CSTS_SHST_NORMAL = 0 << 2,
|
||||
NVME_CSTS_SHST_OCCUR = 1 << 2,
|
||||
NVME_CSTS_SHST_CMPLT = 2 << 2,
|
||||
@@ -97,9 +112,14 @@ struct nvme_dev {
|
||||
char serial[20];
|
||||
char model[40];
|
||||
char firmware_rev[8];
|
||||
bool subsystem;
|
||||
u32 max_hw_sectors;
|
||||
u32 stripe_size;
|
||||
u32 page_size;
|
||||
void __iomem *cmb;
|
||||
dma_addr_t cmb_dma_addr;
|
||||
u64 cmb_size;
|
||||
u32 cmbsz;
|
||||
u16 oncs;
|
||||
u16 abort_limit;
|
||||
u8 event_limit;
|
||||
|
||||
@@ -584,5 +584,6 @@ struct nvme_passthru_cmd {
|
||||
#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
|
||||
#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
|
||||
#define NVME_IOCTL_RESET _IO('N', 0x44)
|
||||
#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45)
|
||||
|
||||
#endif /* _UAPI_LINUX_NVME_H */
|
||||
|
||||
Reference in New Issue
Block a user