Merge tag 'cxl-for-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull cxl updates from Dan Williams:
 "More preparation and plumbing work in the CXL subsystem.

  From an end user perspective the highlight here is lighting up the CXL
  Persistent Memory related commands (label read / write) with the
  generic ioctl() front-end in LIBNVDIMM.

  Otherwise, the ability to instantiate new persistent and volatile
  memory regions is still on track for v5.17.

  Summary:

   - Fix support for platforms that do not enumerate every ACPI0016 (CXL
     Host Bridge) in the CHBS (ACPI Host Bridge Structure).

   - Introduce a common pci_find_dvsec_capability() helper, clean up
     open coded implementations in various drivers.

   - Add 'cxl_test' for regression testing CXL subsystem ABIs.
     'cxl_test' is a module built from tools/testing/cxl/ that mocks up
     a CXL topology to augment the nascent support for emulation of CXL
     devices in QEMU.

   - Convert libnvdimm to use the uuid API.

   - Complete the definition of CXL namespace labels in libnvdimm.

   - Tunnel libnvdimm label operations from nd_ioctl() back to the CXL
     mailbox driver. Enable 'ndctl {read,write}-labels' for CXL.

   - Continue to sort and refactor functionality into distinct driver
     and core-infrastructure buckets. For example, mailbox handling is
     now a generic core capability consumed by the PCI and cxl_test
     drivers"

* tag 'cxl-for-5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (34 commits)
  ocxl: Use pci core's DVSEC functionality
  cxl/pci: Use pci core's DVSEC functionality
  PCI: Add pci_find_dvsec_capability to find designated VSEC
  cxl/pci: Split cxl_pci_setup_regs()
  cxl/pci: Add @base to cxl_register_map
  cxl/pci: Make more use of cxl_register_map
  cxl/pci: Remove pci request/release regions
  cxl/pci: Fix NULL vs ERR_PTR confusion
  cxl/pci: Remove dev_dbg for unknown register blocks
  cxl/pci: Convert register block identifiers to an enum
  cxl/acpi: Do not fail cxl_acpi_probe() based on a missing CHBS
  cxl/pci: Disambiguate cxl_pci further from cxl_mem
  Documentation/cxl: Add bus internal docs
  cxl/core: Split decoder setup into alloc + add
  tools/testing/cxl: Introduce a mock memory device + driver
  cxl/mbox: Move command definitions to common location
  cxl/bus: Populate the target list at decoder create
  tools/testing/cxl: Introduce a mocked-up CXL port hierarchy
  cxl/pmem: Add support for multiple nvdimm-bridge objects
  cxl/pmem: Translate NVDIMM label commands to CXL label commands
  ...
This commit is contained in:
Linus Torvalds
2021-11-08 11:49:48 -08:00
36 changed files with 3275 additions and 1496 deletions

View File

@@ -39,12 +39,18 @@ CXL Core
.. kernel-doc:: drivers/cxl/core/bus.c
:doc: cxl core
.. kernel-doc:: drivers/cxl/core/bus.c
:identifiers:
.. kernel-doc:: drivers/cxl/core/pmem.c
:doc: cxl pmem
.. kernel-doc:: drivers/cxl/core/regs.c
:doc: cxl registers
.. kernel-doc:: drivers/cxl/core/mbox.c
:doc: cxl mbox
External Interfaces
===================

View File

@@ -107,7 +107,8 @@ static int get_max_afu_index(struct pci_dev *dev, int *afu_idx)
int pos;
u32 val;
pos = find_dvsec_from_pos(dev, OCXL_DVSEC_FUNC_ID, 0);
pos = pci_find_dvsec_capability(dev, PCI_VENDOR_ID_IBM,
OCXL_DVSEC_FUNC_ID);
if (!pos)
return -ESRCH;

View File

@@ -52,6 +52,12 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
return -EINVAL;
}
if (CFMWS_INTERLEAVE_WAYS(cfmws) > CXL_DECODER_MAX_INTERLEAVE) {
dev_err(dev, "CFMWS Interleave Ways (%d) too large\n",
CFMWS_INTERLEAVE_WAYS(cfmws));
return -EINVAL;
}
expected_len = struct_size((cfmws), interleave_targets,
CFMWS_INTERLEAVE_WAYS(cfmws));
@@ -71,11 +77,11 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
static void cxl_add_cfmws_decoders(struct device *dev,
struct cxl_port *root_port)
{
int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct acpi_cedt_cfmws *cfmws;
struct cxl_decoder *cxld;
acpi_size len, cur = 0;
void *cedt_subtable;
unsigned long flags;
int rc;
len = acpi_cedt->length - sizeof(*acpi_cedt);
@@ -83,6 +89,7 @@ static void cxl_add_cfmws_decoders(struct device *dev,
while (cur < len) {
struct acpi_cedt_header *c = cedt_subtable + cur;
int i;
if (c->type != ACPI_CEDT_TYPE_CFMWS) {
cur += c->length;
@@ -108,24 +115,39 @@ static void cxl_add_cfmws_decoders(struct device *dev,
continue;
}
flags = cfmws_to_decoder_flags(cfmws->restrictions);
cxld = devm_cxl_add_decoder(dev, root_port,
CFMWS_INTERLEAVE_WAYS(cfmws),
cfmws->base_hpa, cfmws->window_size,
CFMWS_INTERLEAVE_WAYS(cfmws),
CFMWS_INTERLEAVE_GRANULARITY(cfmws),
CXL_DECODER_EXPANDER,
flags);
for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
target_map[i] = cfmws->interleave_targets[i];
if (IS_ERR(cxld)) {
cxld = cxl_decoder_alloc(root_port,
CFMWS_INTERLEAVE_WAYS(cfmws));
if (IS_ERR(cxld))
goto next;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
cxld->target_type = CXL_DECODER_EXPANDER;
cxld->range = (struct range) {
.start = cfmws->base_hpa,
.end = cfmws->base_hpa + cfmws->window_size - 1,
};
cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
cxld->interleave_granularity =
CFMWS_INTERLEAVE_GRANULARITY(cfmws);
rc = cxl_decoder_add(cxld, target_map);
if (rc)
put_device(&cxld->dev);
else
rc = cxl_decoder_autoremove(dev, cxld);
if (rc) {
dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",
cfmws->base_hpa, cfmws->base_hpa +
cfmws->window_size - 1);
} else {
dev_dbg(dev, "add: %s range %#llx-%#llx\n",
dev_name(&cxld->dev), cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
goto next;
}
dev_dbg(dev, "add: %s range %#llx-%#llx\n",
dev_name(&cxld->dev), cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
next:
cur += c->length;
}
}
@@ -182,15 +204,7 @@ static resource_size_t get_chbcr(struct acpi_cedt_chbs *chbs)
return IS_ERR(chbs) ? CXL_RESOURCE_NONE : chbs->base;
}
struct cxl_walk_context {
struct device *dev;
struct pci_bus *root;
struct cxl_port *port;
int error;
int count;
};
static int match_add_root_ports(struct pci_dev *pdev, void *data)
__mock int match_add_root_ports(struct pci_dev *pdev, void *data)
{
struct cxl_walk_context *ctx = data;
struct pci_bus *root_bus = ctx->root;
@@ -239,7 +253,8 @@ static struct cxl_dport *find_dport_by_dev(struct cxl_port *port, struct device
return NULL;
}
static struct acpi_device *to_cxl_host_bridge(struct device *dev)
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
struct device *dev)
{
struct acpi_device *adev = to_acpi_device(dev);
@@ -257,11 +272,12 @@ static struct acpi_device *to_cxl_host_bridge(struct device *dev)
*/
static int add_host_bridge_uport(struct device *match, void *arg)
{
struct acpi_device *bridge = to_cxl_host_bridge(match);
struct cxl_port *root_port = arg;
struct device *host = root_port->dev.parent;
struct acpi_device *bridge = to_cxl_host_bridge(host, match);
struct acpi_pci_root *pci_root;
struct cxl_walk_context ctx;
int single_port_map[1], rc;
struct cxl_decoder *cxld;
struct cxl_dport *dport;
struct cxl_port *port;
@@ -272,7 +288,7 @@ static int add_host_bridge_uport(struct device *match, void *arg)
dport = find_dport_by_dev(root_port, match);
if (!dport) {
dev_dbg(host, "host bridge expected and not found\n");
return -ENODEV;
return 0;
}
port = devm_cxl_add_port(host, match, dport->component_reg_phys,
@@ -297,22 +313,46 @@ static int add_host_bridge_uport(struct device *match, void *arg)
return -ENODEV;
if (ctx.error)
return ctx.error;
if (ctx.count > 1)
return 0;
/* TODO: Scan CHBCR for HDM Decoder resources */
/*
* In the single-port host-bridge case there are no HDM decoders
* in the CHBCR and a 1:1 passthrough decode is implied.
* Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability
* Structure) single ported host-bridges need not publish a decoder
* capability when a passthrough decode can be assumed, i.e. all
* transactions that the uport sees are claimed and passed to the single
* dport. Disable the range until the first CXL region is enumerated /
* activated.
*/
if (ctx.count == 1) {
cxld = devm_cxl_add_passthrough_decoder(host, port);
if (IS_ERR(cxld))
return PTR_ERR(cxld);
cxld = cxl_decoder_alloc(port, 1);
if (IS_ERR(cxld))
return PTR_ERR(cxld);
cxld->interleave_ways = 1;
cxld->interleave_granularity = PAGE_SIZE;
cxld->target_type = CXL_DECODER_EXPANDER;
cxld->range = (struct range) {
.start = 0,
.end = -1,
};
device_lock(&port->dev);
dport = list_first_entry(&port->dports, typeof(*dport), list);
device_unlock(&port->dev);
single_port_map[0] = dport->port_id;
rc = cxl_decoder_add(cxld, single_port_map);
if (rc)
put_device(&cxld->dev);
else
rc = cxl_decoder_autoremove(host, cxld);
if (rc == 0)
dev_dbg(host, "add: %s\n", dev_name(&cxld->dev));
}
return 0;
return rc;
}
static int add_host_bridge_dport(struct device *match, void *arg)
@@ -323,7 +363,7 @@ static int add_host_bridge_dport(struct device *match, void *arg)
struct acpi_cedt_chbs *chbs;
struct cxl_port *root_port = arg;
struct device *host = root_port->dev.parent;
struct acpi_device *bridge = to_cxl_host_bridge(match);
struct acpi_device *bridge = to_cxl_host_bridge(host, match);
if (!bridge)
return 0;
@@ -337,9 +377,11 @@ static int add_host_bridge_dport(struct device *match, void *arg)
}
chbs = cxl_acpi_match_chbs(host, uid);
if (IS_ERR(chbs))
dev_dbg(host, "No CHBS found for Host Bridge: %s\n",
dev_name(match));
if (IS_ERR(chbs)) {
dev_warn(host, "No CHBS found for Host Bridge: %s\n",
dev_name(match));
return 0;
}
rc = cxl_add_dport(root_port, match, uid, get_chbcr(chbs));
if (rc) {
@@ -375,6 +417,17 @@ static int add_root_nvdimm_bridge(struct device *match, void *data)
return 1;
}
static u32 cedt_instance(struct platform_device *pdev)
{
const bool *native_acpi0017 = acpi_device_get_match_data(&pdev->dev);
if (native_acpi0017 && *native_acpi0017)
return 0;
/* for cxl_test request a non-canonical instance */
return U32_MAX;
}
static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
@@ -388,7 +441,7 @@ static int cxl_acpi_probe(struct platform_device *pdev)
return PTR_ERR(root_port);
dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));
status = acpi_get_table(ACPI_SIG_CEDT, 0, &acpi_cedt);
status = acpi_get_table(ACPI_SIG_CEDT, cedt_instance(pdev), &acpi_cedt);
if (ACPI_FAILURE(status))
return -ENXIO;
@@ -419,9 +472,11 @@ out:
return 0;
}
static bool native_acpi0017 = true;
static const struct acpi_device_id cxl_acpi_ids[] = {
{ "ACPI0017", 0 },
{ "", 0 },
{ "ACPI0017", (unsigned long) &native_acpi0017 },
{ },
};
MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);

View File

@@ -6,3 +6,4 @@ cxl_core-y := bus.o
cxl_core-y += pmem.o
cxl_core-y += regs.o
cxl_core-y += memdev.o
cxl_core-y += mbox.o

View File

@@ -453,50 +453,57 @@ err:
}
EXPORT_SYMBOL_GPL(cxl_add_dport);
static struct cxl_decoder *
cxl_decoder_alloc(struct cxl_port *port, int nr_targets, resource_size_t base,
resource_size_t len, int interleave_ways,
int interleave_granularity, enum cxl_decoder_type type,
unsigned long flags)
static int decoder_populate_targets(struct cxl_decoder *cxld,
struct cxl_port *port, int *target_map)
{
struct cxl_decoder *cxld;
int rc = 0, i;
if (!target_map)
return 0;
device_lock(&port->dev);
if (list_empty(&port->dports)) {
rc = -EINVAL;
goto out_unlock;
}
for (i = 0; i < cxld->nr_targets; i++) {
struct cxl_dport *dport = find_dport(port, target_map[i]);
if (!dport) {
rc = -ENXIO;
goto out_unlock;
}
cxld->target[i] = dport;
}
out_unlock:
device_unlock(&port->dev);
return rc;
}
struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets)
{
struct cxl_decoder *cxld, cxld_const_init = {
.nr_targets = nr_targets,
};
struct device *dev;
int rc = 0;
if (interleave_ways < 1)
if (nr_targets > CXL_DECODER_MAX_INTERLEAVE || nr_targets < 1)
return ERR_PTR(-EINVAL);
device_lock(&port->dev);
if (list_empty(&port->dports))
rc = -EINVAL;
device_unlock(&port->dev);
if (rc)
return ERR_PTR(rc);
cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
if (!cxld)
return ERR_PTR(-ENOMEM);
memcpy(cxld, &cxld_const_init, sizeof(cxld_const_init));
rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
if (rc < 0)
goto err;
*cxld = (struct cxl_decoder) {
.id = rc,
.range = {
.start = base,
.end = base + len - 1,
},
.flags = flags,
.interleave_ways = interleave_ways,
.interleave_granularity = interleave_granularity,
.target_type = type,
};
/* handle implied target_list */
if (interleave_ways == 1)
cxld->target[0] =
list_first_entry(&port->dports, struct cxl_dport, list);
cxld->id = rc;
dev = &cxld->dev;
device_initialize(dev);
device_set_pm_not_required(dev);
@@ -514,41 +521,47 @@ err:
kfree(cxld);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(cxl_decoder_alloc);
struct cxl_decoder *
devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
resource_size_t base, resource_size_t len,
int interleave_ways, int interleave_granularity,
enum cxl_decoder_type type, unsigned long flags)
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
{
struct cxl_decoder *cxld;
struct cxl_port *port;
struct device *dev;
int rc;
cxld = cxl_decoder_alloc(port, nr_targets, base, len, interleave_ways,
interleave_granularity, type, flags);
if (IS_ERR(cxld))
return cxld;
if (WARN_ON_ONCE(!cxld))
return -EINVAL;
if (WARN_ON_ONCE(IS_ERR(cxld)))
return PTR_ERR(cxld);
if (cxld->interleave_ways < 1)
return -EINVAL;
port = to_cxl_port(cxld->dev.parent);
rc = decoder_populate_targets(cxld, port, target_map);
if (rc)
return rc;
dev = &cxld->dev;
rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id);
if (rc)
goto err;
return rc;
rc = device_add(dev);
if (rc)
goto err;
rc = devm_add_action_or_reset(host, unregister_cxl_dev, dev);
if (rc)
return ERR_PTR(rc);
return cxld;
err:
put_device(dev);
return ERR_PTR(rc);
return device_add(dev);
}
EXPORT_SYMBOL_GPL(devm_cxl_add_decoder);
EXPORT_SYMBOL_GPL(cxl_decoder_add);
static void cxld_unregister(void *dev)
{
device_unregister(dev);
}
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
{
return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
}
EXPORT_SYMBOL_GPL(cxl_decoder_autoremove);
/**
* __cxl_driver_register - register a driver for the cxl bus
@@ -635,6 +648,8 @@ static __init int cxl_core_init(void)
{
int rc;
cxl_mbox_init();
rc = cxl_memdev_init();
if (rc)
return rc;
@@ -646,6 +661,7 @@ static __init int cxl_core_init(void)
err:
cxl_memdev_exit();
cxl_mbox_exit();
return rc;
}
@@ -653,6 +669,7 @@ static void cxl_core_exit(void)
{
bus_unregister(&cxl_bus_type);
cxl_memdev_exit();
cxl_mbox_exit();
}
module_init(cxl_core_init);

View File

@@ -9,12 +9,15 @@ extern const struct device_type cxl_nvdimm_type;
extern struct attribute_group cxl_base_attribute_group;
static inline void unregister_cxl_dev(void *dev)
{
device_unregister(dev);
}
struct cxl_send_command;
struct cxl_mem_query_commands;
int cxl_query_cmd(struct cxl_memdev *cxlmd,
struct cxl_mem_query_commands __user *q);
int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s);
int cxl_memdev_init(void);
void cxl_memdev_exit(void);
void cxl_mbox_init(void);
void cxl_mbox_exit(void);
#endif /* __CXL_CORE_H__ */

787
drivers/cxl/core/mbox.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -8,6 +8,8 @@
#include <cxlmem.h>
#include "core.h"
static DECLARE_RWSEM(cxl_memdev_rwsem);
/*
* An entire PCI topology full of devices should be enough for any
* config
@@ -132,16 +134,53 @@ static const struct device_type cxl_memdev_type = {
.groups = cxl_memdev_attribute_groups,
};
/**
* set_exclusive_cxl_commands() - atomically disable user cxl commands
* @cxlm: cxl_mem instance to modify
* @cmds: bitmap of commands to mark exclusive
*
* Grab the cxl_memdev_rwsem in write mode to flush in-flight
* invocations of the ioctl path and then disable future execution of
* commands with the command ids set in @cmds.
*/
void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
{
down_write(&cxl_memdev_rwsem);
bitmap_or(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
EXPORT_SYMBOL_GPL(set_exclusive_cxl_commands);
/**
* clear_exclusive_cxl_commands() - atomically enable user cxl commands
* @cxlm: cxl_mem instance to modify
* @cmds: bitmap of commands to mark available for userspace
*/
void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
{
down_write(&cxl_memdev_rwsem);
bitmap_andnot(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
EXPORT_SYMBOL_GPL(clear_exclusive_cxl_commands);
static void cxl_memdev_shutdown(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
down_write(&cxl_memdev_rwsem);
cxlmd->cxlm = NULL;
up_write(&cxl_memdev_rwsem);
}
static void cxl_memdev_unregister(void *_cxlmd)
{
struct cxl_memdev *cxlmd = _cxlmd;
struct device *dev = &cxlmd->dev;
struct cdev *cdev = &cxlmd->cdev;
const struct cdevm_file_operations *cdevm_fops;
cdevm_fops = container_of(cdev->ops, typeof(*cdevm_fops), fops);
cdevm_fops->shutdown(dev);
cxl_memdev_shutdown(dev);
cdev_device_del(&cxlmd->cdev, dev);
put_device(dev);
}
@@ -149,7 +188,6 @@ static void cxl_memdev_unregister(void *_cxlmd)
static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
const struct file_operations *fops)
{
struct pci_dev *pdev = cxlm->pdev;
struct cxl_memdev *cxlmd;
struct device *dev;
struct cdev *cdev;
@@ -166,7 +204,7 @@ static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
dev = &cxlmd->dev;
device_initialize(dev);
dev->parent = &pdev->dev;
dev->parent = cxlm->dev;
dev->bus = &cxl_bus_type;
dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
dev->type = &cxl_memdev_type;
@@ -181,16 +219,72 @@ err:
return ERR_PTR(rc);
}
static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case CXL_MEM_QUERY_COMMANDS:
return cxl_query_cmd(cxlmd, (void __user *)arg);
case CXL_MEM_SEND_COMMAND:
return cxl_send_cmd(cxlmd, (void __user *)arg);
default:
return -ENOTTY;
}
}
static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct cxl_memdev *cxlmd = file->private_data;
int rc = -ENXIO;
down_read(&cxl_memdev_rwsem);
if (cxlmd->cxlm)
rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
up_read(&cxl_memdev_rwsem);
return rc;
}
static int cxl_memdev_open(struct inode *inode, struct file *file)
{
struct cxl_memdev *cxlmd =
container_of(inode->i_cdev, typeof(*cxlmd), cdev);
get_device(&cxlmd->dev);
file->private_data = cxlmd;
return 0;
}
static int cxl_memdev_release_file(struct inode *inode, struct file *file)
{
struct cxl_memdev *cxlmd =
container_of(inode->i_cdev, typeof(*cxlmd), cdev);
put_device(&cxlmd->dev);
return 0;
}
static const struct file_operations cxl_memdev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = cxl_memdev_ioctl,
.open = cxl_memdev_open,
.release = cxl_memdev_release_file,
.compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
struct cxl_memdev *
devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
const struct cdevm_file_operations *cdevm_fops)
devm_cxl_add_memdev(struct cxl_mem *cxlm)
{
struct cxl_memdev *cxlmd;
struct device *dev;
struct cdev *cdev;
int rc;
cxlmd = cxl_memdev_alloc(cxlm, &cdevm_fops->fops);
cxlmd = cxl_memdev_alloc(cxlm, &cxl_memdev_fops);
if (IS_ERR(cxlmd))
return cxlmd;
@@ -210,7 +304,7 @@ devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
if (rc)
goto err;
rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
rc = devm_add_action_or_reset(cxlm->dev, cxl_memdev_unregister, cxlmd);
if (rc)
return ERR_PTR(rc);
return cxlmd;
@@ -220,7 +314,7 @@ err:
* The cdev was briefly live, shutdown any ioctl operations that
* saw that state.
*/
cdevm_fops->shutdown(dev);
cxl_memdev_shutdown(dev);
put_device(dev);
return ERR_PTR(rc);
}

View File

@@ -2,6 +2,7 @@
/* Copyright(c) 2020 Intel Corporation. */
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <cxlmem.h>
#include <cxl.h>
#include "core.h"
@@ -20,10 +21,13 @@
* operations, for example, namespace label access commands.
*/
static DEFINE_IDA(cxl_nvdimm_bridge_ida);
static void cxl_nvdimm_bridge_release(struct device *dev)
{
struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
ida_free(&cxl_nvdimm_bridge_ida, cxl_nvb->id);
kfree(cxl_nvb);
}
@@ -47,16 +51,38 @@ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
}
EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
__mock int match_nvdimm_bridge(struct device *dev, const void *data)
{
return dev->type == &cxl_nvdimm_bridge_type;
}
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
{
struct device *dev;
dev = bus_find_device(&cxl_bus_type, NULL, cxl_nvd, match_nvdimm_bridge);
if (!dev)
return NULL;
return to_cxl_nvdimm_bridge(dev);
}
EXPORT_SYMBOL_GPL(cxl_find_nvdimm_bridge);
static struct cxl_nvdimm_bridge *
cxl_nvdimm_bridge_alloc(struct cxl_port *port)
{
struct cxl_nvdimm_bridge *cxl_nvb;
struct device *dev;
int rc;
cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
if (!cxl_nvb)
return ERR_PTR(-ENOMEM);
rc = ida_alloc(&cxl_nvdimm_bridge_ida, GFP_KERNEL);
if (rc < 0)
goto err;
cxl_nvb->id = rc;
dev = &cxl_nvb->dev;
cxl_nvb->port = port;
cxl_nvb->state = CXL_NVB_NEW;
@@ -67,6 +93,10 @@ cxl_nvdimm_bridge_alloc(struct cxl_port *port)
dev->type = &cxl_nvdimm_bridge_type;
return cxl_nvb;
err:
kfree(cxl_nvb);
return ERR_PTR(rc);
}
static void unregister_nvb(void *_cxl_nvb)
@@ -119,7 +149,7 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
return cxl_nvb;
dev = &cxl_nvb->dev;
rc = dev_set_name(dev, "nvdimm-bridge");
rc = dev_set_name(dev, "nvdimm-bridge%d", cxl_nvb->id);
if (rc)
goto err;
@@ -192,6 +222,11 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
return cxl_nvd;
}
static void cxl_nvd_unregister(void *dev)
{
device_unregister(dev);
}
/**
* devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
* @host: same host as @cxlmd
@@ -221,7 +256,7 @@ int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
dev_name(dev));
return devm_add_action_or_reset(host, unregister_cxl_dev, dev);
return devm_add_action_or_reset(host, cxl_nvd_unregister, dev);
err:
put_device(dev);

View File

@@ -114,7 +114,17 @@ struct cxl_device_reg_map {
struct cxl_reg_map memdev;
};
/**
* struct cxl_register_map - DVSEC harvested register block mapping parameters
* @base: virtual base of the register-block-BAR + @block_offset
* @block_offset: offset to start of register block in @barno
* @reg_type: see enum cxl_regloc_type
* @barno: PCI BAR number containing the register block
* @component_map: cxl_reg_map for component registers
* @device_map: cxl_reg_maps for device registers
*/
struct cxl_register_map {
void __iomem *base;
u64 block_offset;
u8 reg_type;
u8 barno;
@@ -155,6 +165,12 @@ enum cxl_decoder_type {
CXL_DECODER_EXPANDER = 3,
};
/*
* Current specification goes up to 8, double that seems a reasonable
* software max for the foreseeable future
*/
#define CXL_DECODER_MAX_INTERLEAVE 16
/**
* struct cxl_decoder - CXL address range decode configuration
* @dev: this decoder's device
@@ -164,6 +180,7 @@ enum cxl_decoder_type {
* @interleave_granularity: data stride per dport
* @target_type: accelerator vs expander (type2 vs type3) selector
* @flags: memory type capabilities and locking
* @nr_targets: number of elements in @target
* @target: active ordered target list in current decoder configuration
*/
struct cxl_decoder {
@@ -174,6 +191,7 @@ struct cxl_decoder {
int interleave_granularity;
enum cxl_decoder_type target_type;
unsigned long flags;
const int nr_targets;
struct cxl_dport *target[];
};
@@ -186,6 +204,7 @@ enum cxl_nvdimm_brige_state {
};
struct cxl_nvdimm_bridge {
int id;
struct device dev;
struct cxl_port *port;
struct nvdimm_bus *nvdimm_bus;
@@ -200,6 +219,14 @@ struct cxl_nvdimm {
struct nvdimm *nvdimm;
};
struct cxl_walk_context {
struct device *dev;
struct pci_bus *root;
struct cxl_port *port;
int error;
int count;
};
/**
* struct cxl_port - logical collection of upstream port devices and
* downstream port devices to construct a CXL memory
@@ -246,25 +273,9 @@ int cxl_add_dport(struct cxl_port *port, struct device *dport, int port_id,
struct cxl_decoder *to_cxl_decoder(struct device *dev);
bool is_root_decoder(struct device *dev);
struct cxl_decoder *
devm_cxl_add_decoder(struct device *host, struct cxl_port *port, int nr_targets,
resource_size_t base, resource_size_t len,
int interleave_ways, int interleave_granularity,
enum cxl_decoder_type type, unsigned long flags);
/*
* Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure)
* single ported host-bridges need not publish a decoder capability when a
* passthrough decode can be assumed, i.e. all transactions that the uport sees
* are claimed and passed to the single dport. Default the range a 0-base
* 0-length until the first CXL region is activated.
*/
static inline struct cxl_decoder *
devm_cxl_add_passthrough_decoder(struct device *host, struct cxl_port *port)
{
return devm_cxl_add_decoder(host, port, 1, 0, 0, 1, PAGE_SIZE,
CXL_DECODER_EXPANDER, 0);
}
struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets);
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
extern struct bus_type cxl_bus_type;
@@ -298,4 +309,13 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd);
/*
* Unit test builds overrides this to __weak, find the 'strong' version
* of these symbols in tools/testing/cxl/.
*/
#ifndef __mock
#define __mock static
#endif
#endif /* __CXL_H__ */

View File

@@ -2,6 +2,7 @@
/* Copyright(c) 2020-2021 Intel Corporation. */
#ifndef __CXL_MEM_H__
#define __CXL_MEM_H__
#include <uapi/linux/cxl_mem.h>
#include <linux/cdev.h>
#include "cxl.h"
@@ -28,21 +29,6 @@
(FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
CXLMDEV_RESET_NEEDED_NOT)
/**
* struct cdevm_file_operations - devm coordinated cdev file operations
* @fops: file operations that are synchronized against @shutdown
* @shutdown: disconnect driver data
*
* @shutdown is invoked in the devres release path to disconnect any
* driver instance data from @dev. It assumes synchronization with any
* fops operation that requires driver data. After @shutdown an
* operation may only reference @device data.
*/
struct cdevm_file_operations {
struct file_operations fops;
void (*shutdown)(struct device *dev);
};
/**
* struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
* @dev: driver core device object
@@ -62,13 +48,50 @@ static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
return container_of(dev, struct cxl_memdev, dev);
}
struct cxl_memdev *
devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
const struct cdevm_file_operations *cdevm_fops);
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_mem *cxlm);
/**
* struct cxl_mbox_cmd - A command to be submitted to hardware.
* @opcode: (input) The command set and command submitted to hardware.
* @payload_in: (input) Pointer to the input payload.
* @payload_out: (output) Pointer to the output payload. Must be allocated by
* the caller.
* @size_in: (input) Number of bytes to load from @payload_in.
* @size_out: (input) Max number of bytes loaded into @payload_out.
* (output) Number of bytes generated by the device. For fixed size
* outputs commands this is always expected to be deterministic. For
* variable sized output commands, it tells the exact number of bytes
* written.
* @return_code: (output) Error code returned from hardware.
*
* This is the primary mechanism used to send commands to the hardware.
* All the fields except @payload_* correspond exactly to the fields described in
* Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and
* @payload_out are written to, and read from the Command Payload Registers
* defined in CXL 2.0 8.2.8.4.8.
*/
struct cxl_mbox_cmd {
u16 opcode;
void *payload_in;
void *payload_out;
size_t size_in;
size_t size_out;
u16 return_code;
#define CXL_MBOX_SUCCESS 0
};
/*
* CXL 2.0 - Memory capacity multiplier
* See Section 8.2.9.5
*
* Volatile, Persistent, and Partition capacities are specified to be in
* multiples of 256MB - define a multiplier to convert to/from bytes.
*/
#define CXL_CAPACITY_MULTIPLIER SZ_256M
/**
* struct cxl_mem - A CXL memory device
* @pdev: The PCI device associated with this CXL device.
* @dev: The device associated with this CXL device.
* @cxlmd: Logical memory device chardev / interface
* @regs: Parsed register blocks
* @payload_size: Size of space for payload
@@ -78,11 +101,24 @@ devm_cxl_add_memdev(struct device *host, struct cxl_mem *cxlm,
* @mbox_mutex: Mutex to synchronize mailbox access.
* @firmware_version: Firmware version for the memory device.
* @enabled_cmds: Hardware commands found enabled in CEL.
* @pmem_range: Persistent memory capacity information.
* @ram_range: Volatile memory capacity information.
* @exclusive_cmds: Commands that are kernel-internal only
* @pmem_range: Active Persistent memory capacity configuration
* @ram_range: Active Volatile memory capacity configuration
* @total_bytes: sum of all possible capacities
* @volatile_only_bytes: hard volatile capacity
* @persistent_only_bytes: hard persistent capacity
* @partition_align_bytes: alignment size for partition-able capacity
* @active_volatile_bytes: sum of hard + soft volatile
* @active_persistent_bytes: sum of hard + soft persistent
* @next_volatile_bytes: volatile capacity change pending device reset
* @next_persistent_bytes: persistent capacity change pending device reset
* @mbox_send: @dev specific transport for transmitting mailbox commands
*
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
*/
struct cxl_mem {
struct pci_dev *pdev;
struct device *dev;
struct cxl_memdev *cxlmd;
struct cxl_regs regs;
@@ -91,7 +127,8 @@ struct cxl_mem {
size_t lsa_size;
struct mutex mbox_mutex; /* Protects device mailbox and firmware */
char firmware_version[0x10];
unsigned long *enabled_cmds;
DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
struct range pmem_range;
struct range ram_range;
@@ -104,5 +141,124 @@ struct cxl_mem {
u64 active_persistent_bytes;
u64 next_volatile_bytes;
u64 next_persistent_bytes;
int (*mbox_send)(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd);
};
enum cxl_opcode {
CXL_MBOX_OP_INVALID = 0x0000,
CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID,
CXL_MBOX_OP_GET_FW_INFO = 0x0200,
CXL_MBOX_OP_ACTIVATE_FW = 0x0202,
CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400,
CXL_MBOX_OP_GET_LOG = 0x0401,
CXL_MBOX_OP_IDENTIFY = 0x4000,
CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100,
CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101,
CXL_MBOX_OP_GET_LSA = 0x4102,
CXL_MBOX_OP_SET_LSA = 0x4103,
CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200,
CXL_MBOX_OP_GET_ALERT_CONFIG = 0x4201,
CXL_MBOX_OP_SET_ALERT_CONFIG = 0x4202,
CXL_MBOX_OP_GET_SHUTDOWN_STATE = 0x4203,
CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204,
CXL_MBOX_OP_GET_POISON = 0x4300,
CXL_MBOX_OP_INJECT_POISON = 0x4301,
CXL_MBOX_OP_CLEAR_POISON = 0x4302,
CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303,
CXL_MBOX_OP_SCAN_MEDIA = 0x4304,
CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305,
CXL_MBOX_OP_MAX = 0x10000
};
#define DEFINE_CXL_CEL_UUID \
UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96, 0xb1, 0x62, \
0x3b, 0x3f, 0x17)
#define DEFINE_CXL_VENDOR_DEBUG_UUID \
UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 0xd6, 0x07, 0x19, \
0x40, 0x3d, 0x86)
struct cxl_mbox_get_supported_logs {
__le16 entries;
u8 rsvd[6];
struct cxl_gsl_entry {
uuid_t uuid;
__le32 size;
} __packed entry[];
} __packed;
struct cxl_cel_entry {
__le16 opcode;
__le16 effect;
} __packed;
struct cxl_mbox_get_log {
uuid_t uuid;
__le32 offset;
__le32 length;
} __packed;
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
struct cxl_mbox_identify {
char fw_revision[0x10];
__le64 total_capacity;
__le64 volatile_capacity;
__le64 persistent_capacity;
__le64 partition_align;
__le16 info_event_log_size;
__le16 warning_event_log_size;
__le16 failure_event_log_size;
__le16 fatal_event_log_size;
__le32 lsa_size;
u8 poison_list_max_mer[3];
__le16 inject_poison_limit;
u8 poison_caps;
u8 qos_telemetry_caps;
} __packed;
struct cxl_mbox_get_lsa {
u32 offset;
u32 length;
} __packed;
struct cxl_mbox_set_lsa {
u32 offset;
u32 reserved;
u8 data[];
} __packed;
/**
* struct cxl_mem_command - Driver representation of a memory device command
* @info: Command information as it exists for the UAPI
* @opcode: The actual bits used for the mailbox protocol
* @flags: Set of flags effecting driver behavior.
*
* * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag
* will be enabled by the driver regardless of what hardware may have
* advertised.
*
* The cxl_mem_command is the driver's internal representation of commands that
* are supported by the driver. Some of these commands may not be supported by
* the hardware. The driver will use @info to validate the fields passed in by
* the user then submit the @opcode to the hardware.
*
* See struct cxl_command_info.
*/
struct cxl_mem_command {
struct cxl_command_info info;
enum cxl_opcode opcode;
u32 flags;
#define CXL_CMD_FLAG_NONE 0
#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
};
int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
size_t in_size, void *out, size_t out_size);
int cxl_mem_identify(struct cxl_mem *cxlm);
int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm);
int cxl_mem_create_range_info(struct cxl_mem *cxlm);
struct cxl_mem *cxl_mem_create(struct device *dev);
void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
#endif /* __CXL_MEM_H__ */

File diff suppressed because it is too large Load Diff

View File

@@ -20,13 +20,15 @@
#define CXL_REGLOC_BIR_MASK GENMASK(2, 0)
/* Register Block Identifier (RBI) */
#define CXL_REGLOC_RBI_MASK GENMASK(15, 8)
#define CXL_REGLOC_RBI_EMPTY 0
#define CXL_REGLOC_RBI_COMPONENT 1
#define CXL_REGLOC_RBI_VIRT 2
#define CXL_REGLOC_RBI_MEMDEV 3
#define CXL_REGLOC_RBI_TYPES CXL_REGLOC_RBI_MEMDEV + 1
enum cxl_regloc_type {
CXL_REGLOC_RBI_EMPTY = 0,
CXL_REGLOC_RBI_COMPONENT,
CXL_REGLOC_RBI_VIRT,
CXL_REGLOC_RBI_MEMDEV,
CXL_REGLOC_RBI_TYPES
};
#define CXL_REGLOC_RBI_MASK GENMASK(15, 8)
#define CXL_REGLOC_ADDR_MASK GENMASK(31, 16)
#endif /* __CXL_PCI_H__ */

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
#include <linux/libnvdimm.h>
#include <asm/unaligned.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/ndctl.h>
@@ -16,48 +17,55 @@
*/
static struct workqueue_struct *cxl_pmem_wq;
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
static void clear_exclusive(void *cxlm)
{
clear_exclusive_cxl_commands(cxlm, exclusive_cmds);
}
static void unregister_nvdimm(void *nvdimm)
{
nvdimm_delete(nvdimm);
}
static int match_nvdimm_bridge(struct device *dev, const void *data)
{
return strcmp(dev_name(dev), "nvdimm-bridge") == 0;
}
static struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(void)
{
struct device *dev;
dev = bus_find_device(&cxl_bus_type, NULL, NULL, match_nvdimm_bridge);
if (!dev)
return NULL;
return to_cxl_nvdimm_bridge(dev);
}
static int cxl_nvdimm_probe(struct device *dev)
{
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
unsigned long flags = 0, cmd_mask = 0;
struct cxl_mem *cxlm = cxlmd->cxlm;
struct cxl_nvdimm_bridge *cxl_nvb;
unsigned long flags = 0;
struct nvdimm *nvdimm;
int rc = -ENXIO;
int rc;
cxl_nvb = cxl_find_nvdimm_bridge();
cxl_nvb = cxl_find_nvdimm_bridge(cxl_nvd);
if (!cxl_nvb)
return -ENXIO;
device_lock(&cxl_nvb->dev);
if (!cxl_nvb->nvdimm_bus)
if (!cxl_nvb->nvdimm_bus) {
rc = -ENXIO;
goto out;
}
set_exclusive_cxl_commands(cxlm, exclusive_cmds);
rc = devm_add_action_or_reset(dev, clear_exclusive, cxlm);
if (rc)
goto out;
set_bit(NDD_LABELING, &flags);
nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags, 0, 0,
NULL);
if (!nvdimm)
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags,
cmd_mask, 0, NULL);
if (!nvdimm) {
rc = -ENOMEM;
goto out;
}
dev_set_drvdata(dev, nvdimm);
rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
out:
device_unlock(&cxl_nvb->dev);
@@ -72,11 +80,120 @@ static struct cxl_driver cxl_nvdimm_driver = {
.id = CXL_DEVICE_NVDIMM,
};
static int cxl_pmem_get_config_size(struct cxl_mem *cxlm,
struct nd_cmd_get_config_size *cmd,
unsigned int buf_len)
{
if (sizeof(*cmd) > buf_len)
return -EINVAL;
*cmd = (struct nd_cmd_get_config_size) {
.config_size = cxlm->lsa_size,
.max_xfer = cxlm->payload_size,
};
return 0;
}
static int cxl_pmem_get_config_data(struct cxl_mem *cxlm,
struct nd_cmd_get_config_data_hdr *cmd,
unsigned int buf_len)
{
struct cxl_mbox_get_lsa get_lsa;
int rc;
if (sizeof(*cmd) > buf_len)
return -EINVAL;
if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
return -EINVAL;
get_lsa = (struct cxl_mbox_get_lsa) {
.offset = cmd->in_offset,
.length = cmd->in_length,
};
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LSA, &get_lsa,
sizeof(get_lsa), cmd->out_buf,
cmd->in_length);
cmd->status = 0;
return rc;
}
static int cxl_pmem_set_config_data(struct cxl_mem *cxlm,
struct nd_cmd_set_config_hdr *cmd,
unsigned int buf_len)
{
struct cxl_mbox_set_lsa *set_lsa;
int rc;
if (sizeof(*cmd) > buf_len)
return -EINVAL;
/* 4-byte status follows the input data in the payload */
if (struct_size(cmd, in_buf, cmd->in_length) + 4 > buf_len)
return -EINVAL;
set_lsa =
kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL);
if (!set_lsa)
return -ENOMEM;
*set_lsa = (struct cxl_mbox_set_lsa) {
.offset = cmd->in_offset,
};
memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_SET_LSA, set_lsa,
struct_size(set_lsa, data, cmd->in_length),
NULL, 0);
/*
* Set "firmware" status (4-packed bytes at the end of the input
* payload.
*/
put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]);
kvfree(set_lsa);
return rc;
}
static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
void *buf, unsigned int buf_len)
{
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
struct cxl_mem *cxlm = cxlmd->cxlm;
if (!test_bit(cmd, &cmd_mask))
return -ENOTTY;
switch (cmd) {
case ND_CMD_GET_CONFIG_SIZE:
return cxl_pmem_get_config_size(cxlm, buf, buf_len);
case ND_CMD_GET_CONFIG_DATA:
return cxl_pmem_get_config_data(cxlm, buf, buf_len);
case ND_CMD_SET_CONFIG_DATA:
return cxl_pmem_set_config_data(cxlm, buf, buf_len);
default:
return -ENOTTY;
}
}
static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc)
{
return -ENOTTY;
/*
* No firmware response to translate, let the transport error
* code take precedence.
*/
*cmd_rc = 0;
if (!nvdimm)
return -ENOTTY;
return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
}
static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
@@ -194,6 +311,10 @@ static __init int cxl_pmem_init(void)
{
int rc;
set_bit(CXL_MEM_COMMAND_ID_SET_PARTITION_INFO, exclusive_cmds);
set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds);
set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds);
cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0);
if (!cxl_pmem_wq)
return -ENXIO;

View File

@@ -33,18 +33,7 @@
static int find_dvsec(struct pci_dev *dev, int dvsec_id)
{
int vsec = 0;
u16 vendor, id;
while ((vsec = pci_find_next_ext_capability(dev, vsec,
OCXL_EXT_CAP_ID_DVSEC))) {
pci_read_config_word(dev, vsec + OCXL_DVSEC_VENDOR_OFFSET,
&vendor);
pci_read_config_word(dev, vsec + OCXL_DVSEC_ID_OFFSET, &id);
if (vendor == PCI_VENDOR_ID_IBM && id == dvsec_id)
return vsec;
}
return 0;
return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_IBM, dvsec_id);
}
static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx)

View File

@@ -973,7 +973,7 @@ static int btt_arena_write_layout(struct arena_info *arena)
u64 sum;
struct btt_sb *super;
struct nd_btt *nd_btt = arena->nd_btt;
const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
const uuid_t *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
ret = btt_map_init(arena);
if (ret)
@@ -988,8 +988,8 @@ static int btt_arena_write_layout(struct arena_info *arena)
return -ENOMEM;
strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
memcpy(super->uuid, nd_btt->uuid, 16);
memcpy(super->parent_uuid, parent_uuid, 16);
export_uuid(super->uuid, nd_btt->uuid);
export_uuid(super->parent_uuid, parent_uuid);
super->flags = cpu_to_le32(arena->flags);
super->version_major = cpu_to_le16(arena->version_major);
super->version_minor = cpu_to_le16(arena->version_minor);
@@ -1574,7 +1574,8 @@ static void btt_blk_cleanup(struct btt *btt)
* Pointer to a new struct btt on success, NULL on failure.
*/
static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
u32 lbasize, u8 *uuid, struct nd_region *nd_region)
u32 lbasize, uuid_t *uuid,
struct nd_region *nd_region)
{
int ret;
struct btt *btt;
@@ -1693,7 +1694,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
}
nd_region = to_nd_region(nd_btt->dev.parent);
btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
nd_region);
nd_region);
if (!btt)
return -ENOMEM;
nd_btt->btt = btt;

View File

@@ -180,8 +180,8 @@ bool is_nd_btt(struct device *dev)
EXPORT_SYMBOL(is_nd_btt);
static struct device *__nd_btt_create(struct nd_region *nd_region,
unsigned long lbasize, u8 *uuid,
struct nd_namespace_common *ndns)
unsigned long lbasize, uuid_t *uuid,
struct nd_namespace_common *ndns)
{
struct nd_btt *nd_btt;
struct device *dev;
@@ -244,14 +244,16 @@ struct device *nd_btt_create(struct nd_region *nd_region)
*/
bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super)
{
const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
const uuid_t *ns_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
uuid_t parent_uuid;
u64 checksum;
if (memcmp(super->signature, BTT_SIG, BTT_SIG_LEN) != 0)
return false;
if (!guid_is_null((guid_t *)&super->parent_uuid))
if (memcmp(super->parent_uuid, parent_uuid, 16) != 0)
import_uuid(&parent_uuid, super->parent_uuid);
if (!uuid_is_null(&parent_uuid))
if (!uuid_equal(&parent_uuid, ns_uuid))
return false;
checksum = le64_to_cpu(super->checksum);
@@ -319,7 +321,7 @@ static int __nd_btt_probe(struct nd_btt *nd_btt,
return rc;
nd_btt->lbasize = le32_to_cpu(btt_sb->external_lbasize);
nd_btt->uuid = kmemdup(btt_sb->uuid, 16, GFP_KERNEL);
nd_btt->uuid = kmemdup(&btt_sb->uuid, sizeof(uuid_t), GFP_KERNEL);
if (!nd_btt->uuid)
return -ENOMEM;

View File

@@ -207,38 +207,6 @@ struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus)
}
EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
static bool is_uuid_sep(char sep)
{
if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0')
return true;
return false;
}
static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
size_t len)
{
const char *str = buf;
u8 uuid[16];
int i;
for (i = 0; i < 16; i++) {
if (!isxdigit(str[0]) || !isxdigit(str[1])) {
dev_dbg(dev, "pos: %d buf[%zd]: %c buf[%zd]: %c\n",
i, str - buf, str[0],
str + 1 - buf, str[1]);
return -EINVAL;
}
uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]);
str += 2;
if (is_uuid_sep(*str))
str++;
}
memcpy(uuid_out, uuid, sizeof(uuid));
return 0;
}
/**
* nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
* @dev: container device for the uuid property
@@ -249,21 +217,21 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
* (driver detached)
* LOCKING: expects nd_device_lock() is held on entry
*/
int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf,
size_t len)
{
u8 uuid[16];
uuid_t uuid;
int rc;
if (dev->driver)
return -EBUSY;
rc = nd_uuid_parse(dev, uuid, buf, len);
rc = uuid_parse(buf, &uuid);
if (rc)
return rc;
kfree(*uuid_out);
*uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
*uuid_out = kmemdup(&uuid, sizeof(uuid), GFP_KERNEL);
if (!(*uuid_out))
return -ENOMEM;

View File

@@ -17,6 +17,14 @@ static guid_t nvdimm_btt2_guid;
static guid_t nvdimm_pfn_guid;
static guid_t nvdimm_dax_guid;
static uuid_t nvdimm_btt_uuid;
static uuid_t nvdimm_btt2_uuid;
static uuid_t nvdimm_pfn_uuid;
static uuid_t nvdimm_dax_uuid;
static uuid_t cxl_region_uuid;
static uuid_t cxl_namespace_uuid;
static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
static u32 best_seq(u32 a, u32 b)
@@ -321,7 +329,8 @@ static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
return true;
}
char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid,
u32 flags)
{
if (!label_id || !uuid)
return NULL;
@@ -351,7 +360,7 @@ static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd,
{
u64 sum, sum_save;
if (!namespace_label_has(ndd, checksum))
if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum))
return true;
sum_save = nsl_get_checksum(ndd, nd_label);
@@ -366,7 +375,7 @@ static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd,
{
u64 sum;
if (!namespace_label_has(ndd, checksum))
if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum))
return;
nsl_set_checksum(ndd, nd_label, 0);
sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
@@ -400,9 +409,9 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
struct nd_namespace_label *nd_label;
struct nd_region *nd_region = NULL;
u8 label_uuid[NSLABEL_UUID_LEN];
struct nd_label_id label_id;
struct resource *res;
uuid_t label_uuid;
u32 flags;
nd_label = to_label(ndd, slot);
@@ -410,11 +419,11 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
if (!slot_valid(ndd, nd_label, slot))
continue;
memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
nsl_get_uuid(ndd, nd_label, &label_uuid);
flags = nsl_get_flags(ndd, nd_label);
if (test_bit(NDD_NOBLK, &nvdimm->flags))
flags &= ~NSLABEL_FLAG_LOCAL;
nd_label_gen_id(&label_id, label_uuid, flags);
nd_label_gen_id(&label_id, &label_uuid, flags);
res = nvdimm_allocate_dpa(ndd, &label_id,
nsl_get_dpa(ndd, nd_label),
nsl_get_rawsize(ndd, nd_label));
@@ -724,7 +733,7 @@ static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
- (unsigned long) to_namespace_index(ndd, 0);
}
static enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
static enum nvdimm_claim_class guid_to_nvdimm_cclass(guid_t *guid)
{
if (guid_equal(guid, &nvdimm_btt_guid))
return NVDIMM_CCLASS_BTT;
@@ -740,6 +749,23 @@ static enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
return NVDIMM_CCLASS_UNKNOWN;
}
/* CXL labels store UUIDs instead of GUIDs for the same data */
static enum nvdimm_claim_class uuid_to_nvdimm_cclass(uuid_t *uuid)
{
if (uuid_equal(uuid, &nvdimm_btt_uuid))
return NVDIMM_CCLASS_BTT;
else if (uuid_equal(uuid, &nvdimm_btt2_uuid))
return NVDIMM_CCLASS_BTT2;
else if (uuid_equal(uuid, &nvdimm_pfn_uuid))
return NVDIMM_CCLASS_PFN;
else if (uuid_equal(uuid, &nvdimm_dax_uuid))
return NVDIMM_CCLASS_DAX;
else if (uuid_equal(uuid, &uuid_null))
return NVDIMM_CCLASS_NONE;
return NVDIMM_CCLASS_UNKNOWN;
}
static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
guid_t *target)
{
@@ -761,6 +787,28 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
return &guid_null;
}
/* CXL labels store UUIDs instead of GUIDs for the same data */
static const uuid_t *to_abstraction_uuid(enum nvdimm_claim_class claim_class,
uuid_t *target)
{
if (claim_class == NVDIMM_CCLASS_BTT)
return &nvdimm_btt_uuid;
else if (claim_class == NVDIMM_CCLASS_BTT2)
return &nvdimm_btt2_uuid;
else if (claim_class == NVDIMM_CCLASS_PFN)
return &nvdimm_pfn_uuid;
else if (claim_class == NVDIMM_CCLASS_DAX)
return &nvdimm_dax_uuid;
else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
/*
* If we're modifying a namespace for which we don't
* know the claim_class, don't touch the existing uuid.
*/
return target;
} else
return &uuid_null;
}
static void reap_victim(struct nd_mapping *nd_mapping,
struct nd_label_ent *victim)
{
@@ -775,18 +823,18 @@ static void reap_victim(struct nd_mapping *nd_mapping,
static void nsl_set_type_guid(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, guid_t *guid)
{
if (namespace_label_has(ndd, type_guid))
guid_copy(&nd_label->type_guid, guid);
if (efi_namespace_label_has(ndd, type_guid))
guid_copy(&nd_label->efi.type_guid, guid);
}
bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, guid_t *guid)
{
if (!namespace_label_has(ndd, type_guid))
if (ndd->cxl || !efi_namespace_label_has(ndd, type_guid))
return true;
if (!guid_equal(&nd_label->type_guid, guid)) {
if (!guid_equal(&nd_label->efi.type_guid, guid)) {
dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid,
&nd_label->type_guid);
&nd_label->efi.type_guid);
return false;
}
return true;
@@ -796,19 +844,34 @@ static void nsl_set_claim_class(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
enum nvdimm_claim_class claim_class)
{
if (!namespace_label_has(ndd, abstraction_guid))
if (ndd->cxl) {
uuid_t uuid;
import_uuid(&uuid, nd_label->cxl.abstraction_uuid);
export_uuid(nd_label->cxl.abstraction_uuid,
to_abstraction_uuid(claim_class, &uuid));
return;
guid_copy(&nd_label->abstraction_guid,
}
if (!efi_namespace_label_has(ndd, abstraction_guid))
return;
guid_copy(&nd_label->efi.abstraction_guid,
to_abstraction_guid(claim_class,
&nd_label->abstraction_guid));
&nd_label->efi.abstraction_guid));
}
enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
if (!namespace_label_has(ndd, abstraction_guid))
if (ndd->cxl) {
uuid_t uuid;
import_uuid(&uuid, nd_label->cxl.abstraction_uuid);
return uuid_to_nvdimm_cclass(&uuid);
}
if (!efi_namespace_label_has(ndd, abstraction_guid))
return NVDIMM_CCLASS_NONE;
return to_nvdimm_cclass(&nd_label->abstraction_guid);
return guid_to_nvdimm_cclass(&nd_label->efi.abstraction_guid);
}
static int __pmem_label_update(struct nd_region *nd_region,
@@ -851,10 +914,11 @@ static int __pmem_label_update(struct nd_region *nd_region,
nd_label = to_label(ndd, slot);
memset(nd_label, 0, sizeof_namespace_label(ndd));
memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
nsl_set_uuid(ndd, nd_label, nspm->uuid);
nsl_set_name(ndd, nd_label, nspm->alt_name);
nsl_set_flags(ndd, nd_label, flags);
nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings);
nsl_set_nrange(ndd, nd_label, 1);
nsl_set_position(ndd, nd_label, pos);
nsl_set_isetcookie(ndd, nd_label, cookie);
nsl_set_rawsize(ndd, nd_label, resource_size(res));
@@ -878,9 +942,8 @@ static int __pmem_label_update(struct nd_region *nd_region,
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
if (!label_ent->label)
continue;
if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
|| memcmp(nspm->uuid, label_ent->label->uuid,
NSLABEL_UUID_LEN) == 0)
if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags) ||
nsl_uuid_equal(ndd, label_ent->label, nspm->uuid))
reap_victim(nd_mapping, label_ent);
}
@@ -941,7 +1004,7 @@ static void nsl_set_blk_isetcookie(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
u64 isetcookie)
{
if (namespace_label_has(ndd, type_guid)) {
if (efi_namespace_label_has(ndd, type_guid)) {
nsl_set_isetcookie(ndd, nd_label, isetcookie);
return;
}
@@ -952,7 +1015,7 @@ bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
u64 isetcookie)
{
if (!namespace_label_has(ndd, type_guid))
if (!efi_namespace_label_has(ndd, type_guid))
return true;
if (nsl_get_isetcookie(ndd, nd_label) != isetcookie) {
@@ -968,7 +1031,7 @@ static void nsl_set_blk_nlabel(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label, int nlabel,
bool first)
{
if (!namespace_label_has(ndd, type_guid)) {
if (!efi_namespace_label_has(ndd, type_guid)) {
nsl_set_nlabel(ndd, nd_label, 0); /* N/A */
return;
}
@@ -979,7 +1042,7 @@ static void nsl_set_blk_position(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label,
bool first)
{
if (!namespace_label_has(ndd, type_guid)) {
if (!efi_namespace_label_has(ndd, type_guid)) {
nsl_set_position(ndd, nd_label, 0);
return;
}
@@ -1005,7 +1068,6 @@ static int __blk_label_update(struct nd_region *nd_region,
unsigned long *free, *victim_map = NULL;
struct resource *res, **old_res_list;
struct nd_label_id label_id;
u8 uuid[NSLABEL_UUID_LEN];
int min_dpa_idx = 0;
LIST_HEAD(list);
u32 nslot, slot;
@@ -1043,8 +1105,7 @@ static int __blk_label_update(struct nd_region *nd_region,
/* mark unused labels for garbage collection */
for_each_clear_bit_le(slot, free, nslot) {
nd_label = to_label(ndd, slot);
memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid))
continue;
res = to_resource(ndd, nd_label);
if (res && is_old_resource(res, old_res_list,
@@ -1113,7 +1174,7 @@ static int __blk_label_update(struct nd_region *nd_region,
nd_label = to_label(ndd, slot);
memset(nd_label, 0, sizeof_namespace_label(ndd));
memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
nsl_set_uuid(ndd, nd_label, nsblk->uuid);
nsl_set_name(ndd, nd_label, nsblk->alt_name);
nsl_set_flags(ndd, nd_label, NSLABEL_FLAG_LOCAL);
@@ -1161,8 +1222,7 @@ static int __blk_label_update(struct nd_region *nd_region,
if (!nd_label)
continue;
nlabel++;
memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid))
continue;
nlabel--;
list_move(&label_ent->list, &list);
@@ -1192,8 +1252,7 @@ static int __blk_label_update(struct nd_region *nd_region,
}
for_each_clear_bit_le(slot, free, nslot) {
nd_label = to_label(ndd, slot);
memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
if (!nsl_uuid_equal(ndd, nd_label, nsblk->uuid))
continue;
res = to_resource(ndd, nd_label);
res->flags &= ~DPA_RESOURCE_ADJUSTED;
@@ -1273,12 +1332,11 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
return max(num_labels, old_num_labels);
}
static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
static int del_labels(struct nd_mapping *nd_mapping, uuid_t *uuid)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_label_ent *label_ent, *e;
struct nd_namespace_index *nsindex;
u8 label_uuid[NSLABEL_UUID_LEN];
unsigned long *free;
LIST_HEAD(list);
u32 nslot, slot;
@@ -1298,8 +1356,7 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
if (!nd_label)
continue;
active++;
memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
if (!nsl_uuid_equal(ndd, nd_label, uuid))
continue;
active--;
slot = to_slot(ndd, nd_label);
@@ -1395,5 +1452,13 @@ int __init nd_label_init(void)
WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
WARN_ON(uuid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_uuid));
WARN_ON(uuid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_uuid));
WARN_ON(uuid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_uuid));
WARN_ON(uuid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_uuid));
WARN_ON(uuid_parse(CXL_REGION_UUID, &cxl_region_uuid));
WARN_ON(uuid_parse(CXL_NAMESPACE_UUID, &cxl_namespace_uuid));
return 0;
}

View File

@@ -34,6 +34,7 @@ enum {
* struct nd_namespace_index - label set superblock
* @sig: NAMESPACE_INDEX\0
* @flags: placeholder
* @labelsize: log2 size (v1 labels 128 bytes v2 labels 256 bytes)
* @seq: sequence number for this index
* @myoff: offset of this index in label area
* @mysize: size of this index struct
@@ -43,7 +44,7 @@ enum {
* @major: label area major version
* @minor: label area minor version
* @checksum: fletcher64 of all fields
* @free[0]: bitmap, nlabel bits
* @free: bitmap, nlabel bits
*
* The size of free[] is rounded up so the total struct size is a
* multiple of NSINDEX_ALIGN bytes. Any bits this allocates beyond
@@ -66,7 +67,39 @@ struct nd_namespace_index {
};
/**
* struct nd_namespace_label - namespace superblock
* struct cxl_region_label - CXL 2.0 Table 211
* @type: uuid identifying this label format (region)
* @uuid: uuid for the region this label describes
* @flags: NSLABEL_FLAG_UPDATING (all other flags reserved)
* @nlabel: 1 per interleave-way in the region
* @position: this label's position in the set
* @dpa: start address in device-local capacity for this label
* @rawsize: size of this label's contribution to region
* @hpa: mandatory system physical address to map this region
* @slot: slot id of this label in label area
* @ig: interleave granularity (1 << @ig) * 256 bytes
* @align: alignment in SZ_256M blocks
* @reserved: reserved
* @checksum: fletcher64 sum of this label
*/
struct cxl_region_label {
u8 type[NSLABEL_UUID_LEN];
u8 uuid[NSLABEL_UUID_LEN];
__le32 flags;
__le16 nlabel;
__le16 position;
__le64 dpa;
__le64 rawsize;
__le64 hpa;
__le32 slot;
__le32 ig;
__le32 align;
u8 reserved[0xac];
__le64 checksum;
};
/**
* struct nvdimm_efi_label - namespace superblock
* @uuid: UUID per RFC 4122
* @name: optional name (NULL-terminated)
* @flags: see NSLABEL_FLAG_*
@@ -77,9 +110,14 @@ struct nd_namespace_index {
* @dpa: DPA of NVM range on this DIMM
* @rawsize: size of namespace
* @slot: slot of this label in label area
* @unused: must be zero
* @align: physical address alignment of the namespace
* @reserved: reserved
* @type_guid: copy of struct acpi_nfit_system_address.range_guid
* @abstraction_guid: personality id (btt, btt2, fsdax, devdax....)
* @reserved2: reserved
* @checksum: fletcher64 sum of this object
*/
struct nd_namespace_label {
struct nvdimm_efi_label {
u8 uuid[NSLABEL_UUID_LEN];
u8 name[NSLABEL_NAME_LEN];
__le32 flags;
@@ -92,7 +130,7 @@ struct nd_namespace_label {
__le32 slot;
/*
* Accessing fields past this point should be gated by a
* namespace_label_has() check.
* efi_namespace_label_has() check.
*/
u8 align;
u8 reserved[3];
@@ -102,11 +140,57 @@ struct nd_namespace_label {
__le64 checksum;
};
/**
* struct nvdimm_cxl_label - CXL 2.0 Table 212
* @type: uuid identifying this label format (namespace)
* @uuid: uuid for the namespace this label describes
* @name: friendly name for the namespace
* @flags: NSLABEL_FLAG_UPDATING (all other flags reserved)
* @nrange: discontiguous namespace support
* @position: this label's position in the set
* @dpa: start address in device-local capacity for this label
* @rawsize: size of this label's contribution to namespace
* @slot: slot id of this label in label area
* @align: alignment in SZ_256M blocks
* @region_uuid: host interleave set identifier
* @abstraction_uuid: personality driver for this namespace
* @lbasize: address geometry for disk-like personalities
* @reserved: reserved
* @checksum: fletcher64 sum of this label
*/
struct nvdimm_cxl_label {
u8 type[NSLABEL_UUID_LEN];
u8 uuid[NSLABEL_UUID_LEN];
u8 name[NSLABEL_NAME_LEN];
__le32 flags;
__le16 nrange;
__le16 position;
__le64 dpa;
__le64 rawsize;
__le32 slot;
__le32 align;
u8 region_uuid[16];
u8 abstraction_uuid[16];
__le16 lbasize;
u8 reserved[0x56];
__le64 checksum;
};
struct nd_namespace_label {
union {
struct nvdimm_cxl_label cxl;
struct nvdimm_efi_label efi;
};
};
#define NVDIMM_BTT_GUID "8aed63a2-29a2-4c66-8b12-f05d15d3922a"
#define NVDIMM_BTT2_GUID "18633bfc-1735-4217-8ac9-17239282d3f8"
#define NVDIMM_PFN_GUID "266400ba-fb9f-4677-bcb0-968f11d0d225"
#define NVDIMM_DAX_GUID "97a86d9c-3cdd-4eda-986f-5068b4f80088"
#define CXL_REGION_UUID "529d7c61-da07-47c4-a93f-ecdf2c06f444"
#define CXL_NAMESPACE_UUID "68bb2c0a-5a77-4937-9f85-3caf41a0f93c"
/**
* struct nd_label_id - identifier string for dpa allocation
* @id: "{blk|pmem}-<namespace uuid>"

Some files were not shown because too many files have changed in this diff Show More