You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
cxl: Driver code for powernv PCIe based cards for userspace access
This is the core of the cxl driver. It adds support for using cxl cards in the powernv environment only (ie POWER8 bare metal). It allows access to cxl accelerators by userspace using the /dev/cxl/afuM.N char devices. The kernel driver has no knowledge of the function implemented by the accelerator. It provides services to userspace via the /dev/cxl/afuM.N devices. When a program opens this device and runs the start work IOCTL, the accelerator will have coherent access to that processes memory using the same virtual addresses. That process may mmap the device to access any MMIO space the accelerator provides. Also, reads on the device will allow interrupts to be received. These services are further documented in a later patch in Documentation/powerpc/cxl.txt. Documentation of the cxl hardware architecture and userspace API is provided in subsequent patches. Signed-off-by: Ian Munsie <imunsie@au1.ibm.com> Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
committed by
Michael Ellerman
parent
10542ca015
commit
f204e0b8ce
193
drivers/misc/cxl/context.c
Normal file
193
drivers/misc/cxl/context.c
Normal file
@@ -0,0 +1,193 @@
|
||||
/*
|
||||
* Copyright 2014 IBM Corp.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pid.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/idr.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/copro.h>
|
||||
|
||||
#include "cxl.h"
|
||||
|
||||
/*
|
||||
* Allocates space for a CXL context.
|
||||
*/
|
||||
struct cxl_context *cxl_context_alloc(void)
|
||||
{
|
||||
return kzalloc(sizeof(struct cxl_context), GFP_KERNEL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialises a CXL context.
|
||||
*/
|
||||
int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
|
||||
{
|
||||
int i;
|
||||
|
||||
spin_lock_init(&ctx->sste_lock);
|
||||
ctx->afu = afu;
|
||||
ctx->master = master;
|
||||
ctx->pid = NULL; /* Set in start work ioctl */
|
||||
|
||||
/*
|
||||
* Allocate the segment table before we put it in the IDR so that we
|
||||
* can always access it when dereferenced from IDR. For the same
|
||||
* reason, the segment table is only destroyed after the context is
|
||||
* removed from the IDR. Access to this in the IOCTL is protected by
|
||||
* Linux filesytem symantics (can't IOCTL until open is complete).
|
||||
*/
|
||||
i = cxl_alloc_sst(ctx);
|
||||
if (i)
|
||||
return i;
|
||||
|
||||
INIT_WORK(&ctx->fault_work, cxl_handle_fault);
|
||||
|
||||
init_waitqueue_head(&ctx->wq);
|
||||
spin_lock_init(&ctx->lock);
|
||||
|
||||
ctx->irq_bitmap = NULL;
|
||||
ctx->pending_irq = false;
|
||||
ctx->pending_fault = false;
|
||||
ctx->pending_afu_err = false;
|
||||
|
||||
/*
|
||||
* When we have to destroy all contexts in cxl_context_detach_all() we
|
||||
* end up with afu_release_irqs() called from inside a
|
||||
* idr_for_each_entry(). Hence we need to make sure that anything
|
||||
* dereferenced from this IDR is ok before we allocate the IDR here.
|
||||
* This clears out the IRQ ranges to ensure this.
|
||||
*/
|
||||
for (i = 0; i < CXL_IRQ_RANGES; i++)
|
||||
ctx->irqs.range[i] = 0;
|
||||
|
||||
mutex_init(&ctx->status_mutex);
|
||||
|
||||
ctx->status = OPENED;
|
||||
|
||||
/*
|
||||
* Allocating IDR! We better make sure everything's setup that
|
||||
* dereferences from it.
|
||||
*/
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&afu->contexts_lock);
|
||||
i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
|
||||
ctx->afu->num_procs, GFP_NOWAIT);
|
||||
spin_unlock(&afu->contexts_lock);
|
||||
idr_preload_end();
|
||||
if (i < 0)
|
||||
return i;
|
||||
|
||||
ctx->pe = i;
|
||||
ctx->elem = &ctx->afu->spa[i];
|
||||
ctx->pe_inserted = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Map a per-context mmio space into the given vma.
|
||||
*/
|
||||
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
|
||||
{
|
||||
u64 len = vma->vm_end - vma->vm_start;
|
||||
len = min(len, ctx->psn_size);
|
||||
|
||||
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
|
||||
}
|
||||
|
||||
/* make sure there is a valid per process space for this AFU */
|
||||
if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
|
||||
pr_devel("AFU doesn't support mmio space\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Can't mmap until the AFU is enabled */
|
||||
if (!ctx->afu->enabled)
|
||||
return -EBUSY;
|
||||
|
||||
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
|
||||
ctx->psn_phys, ctx->pe , ctx->master);
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
return vm_iomap_memory(vma, ctx->psn_phys, len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Detach a context from the hardware. This disables interrupts and doesn't
|
||||
* return until all outstanding interrupts for this context have completed. The
|
||||
* hardware should no longer access *ctx after this has returned.
|
||||
*/
|
||||
static void __detach_context(struct cxl_context *ctx)
|
||||
{
|
||||
enum cxl_context_status status;
|
||||
|
||||
mutex_lock(&ctx->status_mutex);
|
||||
status = ctx->status;
|
||||
ctx->status = CLOSED;
|
||||
mutex_unlock(&ctx->status_mutex);
|
||||
if (status != STARTED)
|
||||
return;
|
||||
|
||||
WARN_ON(cxl_detach_process(ctx));
|
||||
afu_release_irqs(ctx);
|
||||
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
|
||||
wake_up_all(&ctx->wq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Detach the given context from the AFU. This doesn't actually
|
||||
* free the context but it should stop the context running in hardware
|
||||
* (ie. prevent this context from generating any further interrupts
|
||||
* so that it can be freed).
|
||||
*/
|
||||
void cxl_context_detach(struct cxl_context *ctx)
|
||||
{
|
||||
__detach_context(ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Detach all contexts on the given AFU.
|
||||
*/
|
||||
void cxl_context_detach_all(struct cxl_afu *afu)
|
||||
{
|
||||
struct cxl_context *ctx;
|
||||
int tmp;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&afu->contexts_idr, ctx, tmp)
|
||||
/*
|
||||
* Anything done in here needs to be setup before the IDR is
|
||||
* created and torn down after the IDR removed
|
||||
*/
|
||||
__detach_context(ctx);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void cxl_context_free(struct cxl_context *ctx)
|
||||
{
|
||||
spin_lock(&ctx->afu->contexts_lock);
|
||||
idr_remove(&ctx->afu->contexts_idr, ctx->pe);
|
||||
spin_unlock(&ctx->afu->contexts_lock);
|
||||
synchronize_rcu();
|
||||
|
||||
free_page((u64)ctx->sstp);
|
||||
ctx->sstp = NULL;
|
||||
|
||||
put_pid(ctx->pid);
|
||||
kfree(ctx);
|
||||
}
|
||||
629
drivers/misc/cxl/cxl.h
Normal file
629
drivers/misc/cxl/cxl.h
Normal file
File diff suppressed because it is too large
Load Diff
132
drivers/misc/cxl/debugfs.c
Normal file
132
drivers/misc/cxl/debugfs.c
Normal file
@@ -0,0 +1,132 @@
|
||||
/*
|
||||
* Copyright 2014 IBM Corp.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "cxl.h"
|
||||
|
||||
static struct dentry *cxl_debugfs;
|
||||
|
||||
void cxl_stop_trace(struct cxl *adapter)
|
||||
{
|
||||
int slice;
|
||||
|
||||
/* Stop the trace */
|
||||
cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL);
|
||||
|
||||
/* Stop the slice traces */
|
||||
spin_lock(&adapter->afu_list_lock);
|
||||
for (slice = 0; slice < adapter->slices; slice++) {
|
||||
if (adapter->afu[slice])
|
||||
cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE, 0x8000000000000000LL);
|
||||
}
|
||||
spin_unlock(&adapter->afu_list_lock);
|
||||
}
|
||||
|
||||
/* Helpers to export CXL mmaped IO registers via debugfs */
|
||||
static int debugfs_io_u64_get(void *data, u64 *val)
|
||||
{
|
||||
*val = in_be64((u64 __iomem *)data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int debugfs_io_u64_set(void *data, u64 val)
|
||||
{
|
||||
out_be64((u64 __iomem *)data, val);
|
||||
return 0;
|
||||
}
|
||||
DEFINE_SIMPLE_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set, "0x%016llx\n");
|
||||
|
||||
static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
|
||||
struct dentry *parent, u64 __iomem *value)
|
||||
{
|
||||
return debugfs_create_file(name, mode, parent, (void *)value, &fops_io_x64);
|
||||
}
|
||||
|
||||
int cxl_debugfs_adapter_add(struct cxl *adapter)
|
||||
{
|
||||
struct dentry *dir;
|
||||
char buf[32];
|
||||
|
||||
if (!cxl_debugfs)
|
||||
return -ENODEV;
|
||||
|
||||
snprintf(buf, 32, "card%i", adapter->adapter_num);
|
||||
dir = debugfs_create_dir(buf, cxl_debugfs);
|
||||
if (IS_ERR(dir))
|
||||
return PTR_ERR(dir);
|
||||
adapter->debugfs = dir;
|
||||
|
||||
debugfs_create_io_x64("fir1", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR1));
|
||||
debugfs_create_io_x64("fir2", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR2));
|
||||
debugfs_create_io_x64("fir_cntl", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_FIR_CNTL));
|
||||
debugfs_create_io_x64("err_ivte", S_IRUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_ErrIVTE));
|
||||
|
||||
debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1_addr(adapter, CXL_PSL_TRACE));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cxl_debugfs_adapter_remove(struct cxl *adapter)
|
||||
{
|
||||
debugfs_remove_recursive(adapter->debugfs);
|
||||
}
|
||||
|
||||
int cxl_debugfs_afu_add(struct cxl_afu *afu)
|
||||
{
|
||||
struct dentry *dir;
|
||||
char buf[32];
|
||||
|
||||
if (!afu->adapter->debugfs)
|
||||
return -ENODEV;
|
||||
|
||||
snprintf(buf, 32, "psl%i.%i", afu->adapter->adapter_num, afu->slice);
|
||||
dir = debugfs_create_dir(buf, afu->adapter->debugfs);
|
||||
if (IS_ERR(dir))
|
||||
return PTR_ERR(dir);
|
||||
afu->debugfs = dir;
|
||||
|
||||
debugfs_create_io_x64("fir", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_FIR_SLICE_An));
|
||||
debugfs_create_io_x64("serr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SERR_An));
|
||||
debugfs_create_io_x64("afu_debug", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_AFU_DEBUG_An));
|
||||
debugfs_create_io_x64("sr", S_IRUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SR_An));
|
||||
|
||||
debugfs_create_io_x64("dsisr", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DSISR_An));
|
||||
debugfs_create_io_x64("dar", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_DAR_An));
|
||||
debugfs_create_io_x64("sstp0", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP0_An));
|
||||
debugfs_create_io_x64("sstp1", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_SSTP1_An));
|
||||
debugfs_create_io_x64("err_status", S_IRUSR, dir, _cxl_p2n_addr(afu, CXL_PSL_ErrStat_An));
|
||||
|
||||
debugfs_create_io_x64("trace", S_IRUSR | S_IWUSR, dir, _cxl_p1n_addr(afu, CXL_PSL_SLICE_TRACE));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cxl_debugfs_afu_remove(struct cxl_afu *afu)
|
||||
{
|
||||
debugfs_remove_recursive(afu->debugfs);
|
||||
}
|
||||
|
||||
int __init cxl_debugfs_init(void)
|
||||
{
|
||||
struct dentry *ent;
|
||||
ent = debugfs_create_dir("cxl", NULL);
|
||||
if (IS_ERR(ent))
|
||||
return PTR_ERR(ent);
|
||||
cxl_debugfs = ent;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cxl_debugfs_exit(void)
|
||||
{
|
||||
debugfs_remove_recursive(cxl_debugfs);
|
||||
}
|
||||
291
drivers/misc/cxl/fault.c
Normal file
291
drivers/misc/cxl/fault.c
Normal file
@@ -0,0 +1,291 @@
|
||||
/*
|
||||
* Copyright 2014 IBM Corp.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pid.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/moduleparam.h>
|
||||
|
||||
#undef MODULE_PARAM_PREFIX
|
||||
#define MODULE_PARAM_PREFIX "cxl" "."
|
||||
#include <asm/current.h>
|
||||
#include <asm/copro.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
#include "cxl.h"
|
||||
|
||||
static struct cxl_sste* find_free_sste(struct cxl_sste *primary_group,
|
||||
bool sec_hash,
|
||||
struct cxl_sste *secondary_group,
|
||||
unsigned int *lru)
|
||||
{
|
||||
unsigned int i, entry;
|
||||
struct cxl_sste *sste, *group = primary_group;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
for (entry = 0; entry < 8; entry++) {
|
||||
sste = group + entry;
|
||||
if (!(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
|
||||
return sste;
|
||||
}
|
||||
if (!sec_hash)
|
||||
break;
|
||||
group = secondary_group;
|
||||
}
|
||||
/* Nothing free, select an entry to cast out */
|
||||
if (sec_hash && (*lru & 0x8))
|
||||
sste = secondary_group + (*lru & 0x7);
|
||||
else
|
||||
sste = primary_group + (*lru & 0x7);
|
||||
*lru = (*lru + 1) & 0xf;
|
||||
|
||||
return sste;
|
||||
}
|
||||
|
||||
static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
|
||||
{
|
||||
/* mask is the group index, we search primary and secondary here. */
|
||||
unsigned int mask = (ctx->sst_size >> 7)-1; /* SSTP0[SegTableSize] */
|
||||
bool sec_hash = 1;
|
||||
struct cxl_sste *sste;
|
||||
unsigned int hash;
|
||||
unsigned long flags;
|
||||
|
||||
|
||||
sec_hash = !!(cxl_p1n_read(ctx->afu, CXL_PSL_SR_An) & CXL_PSL_SR_An_SC);
|
||||
|
||||
if (slb->vsid & SLB_VSID_B_1T)
|
||||
hash = (slb->esid >> SID_SHIFT_1T) & mask;
|
||||
else /* 256M */
|
||||
hash = (slb->esid >> SID_SHIFT) & mask;
|
||||
|
||||
spin_lock_irqsave(&ctx->sste_lock, flags);
|
||||
sste = find_free_sste(ctx->sstp + (hash << 3), sec_hash,
|
||||
ctx->sstp + ((~hash & mask) << 3), &ctx->sst_lru);
|
||||
|
||||
pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
|
||||
sste - ctx->sstp, slb->vsid, slb->esid);
|
||||
|
||||
sste->vsid_data = cpu_to_be64(slb->vsid);
|
||||
sste->esid_data = cpu_to_be64(slb->esid);
|
||||
spin_unlock_irqrestore(&ctx->sste_lock, flags);
|
||||
}
|
||||
|
||||
static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm,
|
||||
u64 ea)
|
||||
{
|
||||
struct copro_slb slb = {0,0};
|
||||
int rc;
|
||||
|
||||
if (!(rc = copro_calculate_slb(mm, ea, &slb))) {
|
||||
cxl_load_segment(ctx, &slb);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void cxl_ack_ae(struct cxl_context *ctx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
cxl_ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
|
||||
|
||||
spin_lock_irqsave(&ctx->lock, flags);
|
||||
ctx->pending_fault = true;
|
||||
ctx->fault_addr = ctx->dar;
|
||||
ctx->fault_dsisr = ctx->dsisr;
|
||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
|
||||
wake_up_all(&ctx->wq);
|
||||
}
|
||||
|
||||
static int cxl_handle_segment_miss(struct cxl_context *ctx,
|
||||
struct mm_struct *mm, u64 ea)
|
||||
{
|
||||
int rc;
|
||||
|
||||
pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea);
|
||||
|
||||
if ((rc = cxl_fault_segment(ctx, mm, ea)))
|
||||
cxl_ack_ae(ctx);
|
||||
else {
|
||||
|
||||
mb(); /* Order seg table write to TFC MMIO write */
|
||||
cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void cxl_handle_page_fault(struct cxl_context *ctx,
|
||||
struct mm_struct *mm, u64 dsisr, u64 dar)
|
||||
{
|
||||
unsigned flt = 0;
|
||||
int result;
|
||||
unsigned long access, flags;
|
||||
|
||||
if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
|
||||
pr_devel("copro_handle_mm_fault failed: %#x\n", result);
|
||||
return cxl_ack_ae(ctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* update_mmu_cache() will not have loaded the hash since current->trap
|
||||
* is not a 0x400 or 0x300, so just call hash_page_mm() here.
|
||||
*/
|
||||
access = _PAGE_PRESENT;
|
||||
if (dsisr & CXL_PSL_DSISR_An_S)
|
||||
access |= _PAGE_RW;
|
||||
if ((!ctx->kernel) || ~(dar & (1ULL << 63)))
|
||||
access |= _PAGE_USER;
|
||||
local_irq_save(flags);
|
||||
hash_page_mm(mm, dar, access, 0x300);
|
||||
local_irq_restore(flags);
|
||||
|
||||
pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
|
||||
cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
|
||||
}
|
||||
|
||||
void cxl_handle_fault(struct work_struct *fault_work)
|
||||
{
|
||||
struct cxl_context *ctx =
|
||||
container_of(fault_work, struct cxl_context, fault_work);
|
||||
u64 dsisr = ctx->dsisr;
|
||||
u64 dar = ctx->dar;
|
||||
struct task_struct *task;
|
||||
struct mm_struct *mm;
|
||||
|
||||
if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
|
||||
cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
|
||||
cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
|
||||
/* Most likely explanation is harmless - a dedicated process
|
||||
* has detached and these were cleared by the PSL purge, but
|
||||
* warn about it just in case */
|
||||
dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. "
|
||||
"DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
|
||||
|
||||
if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
|
||||
pr_devel("cxl_handle_fault unable to get task %i\n",
|
||||
pid_nr(ctx->pid));
|
||||
cxl_ack_ae(ctx);
|
||||
return;
|
||||
}
|
||||
if (!(mm = get_task_mm(task))) {
|
||||
pr_devel("cxl_handle_fault unable to get mm %i\n",
|
||||
pid_nr(ctx->pid));
|
||||
cxl_ack_ae(ctx);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (dsisr & CXL_PSL_DSISR_An_DS)
|
||||
cxl_handle_segment_miss(ctx, mm, dar);
|
||||
else if (dsisr & CXL_PSL_DSISR_An_DM)
|
||||
cxl_handle_page_fault(ctx, mm, dsisr, dar);
|
||||
else
|
||||
WARN(1, "cxl_handle_fault has nothing to handle\n");
|
||||
|
||||
mmput(mm);
|
||||
out:
|
||||
put_task_struct(task);
|
||||
}
|
||||
|
||||
static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
|
||||
{
|
||||
int rc;
|
||||
struct task_struct *task;
|
||||
struct mm_struct *mm;
|
||||
|
||||
if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
|
||||
pr_devel("cxl_prefault_one unable to get task %i\n",
|
||||
pid_nr(ctx->pid));
|
||||
return;
|
||||
}
|
||||
if (!(mm = get_task_mm(task))) {
|
||||
pr_devel("cxl_prefault_one unable to get mm %i\n",
|
||||
pid_nr(ctx->pid));
|
||||
put_task_struct(task);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = cxl_fault_segment(ctx, mm, ea);
|
||||
|
||||
mmput(mm);
|
||||
put_task_struct(task);
|
||||
}
|
||||
|
||||
static u64 next_segment(u64 ea, u64 vsid)
|
||||
{
|
||||
if (vsid & SLB_VSID_B_1T)
|
||||
ea |= (1ULL << 40) - 1;
|
||||
else
|
||||
ea |= (1ULL << 28) - 1;
|
||||
|
||||
return ea + 1;
|
||||
}
|
||||
|
||||
static void cxl_prefault_vma(struct cxl_context *ctx)
|
||||
{
|
||||
u64 ea, last_esid = 0;
|
||||
struct copro_slb slb;
|
||||
struct vm_area_struct *vma;
|
||||
int rc;
|
||||
struct task_struct *task;
|
||||
struct mm_struct *mm;
|
||||
|
||||
if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
|
||||
pr_devel("cxl_prefault_vma unable to get task %i\n",
|
||||
pid_nr(ctx->pid));
|
||||
return;
|
||||
}
|
||||
if (!(mm = get_task_mm(task))) {
|
||||
pr_devel("cxl_prefault_vm unable to get mm %i\n",
|
||||
pid_nr(ctx->pid));
|
||||
goto out1;
|
||||
}
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
for (ea = vma->vm_start; ea < vma->vm_end;
|
||||
ea = next_segment(ea, slb.vsid)) {
|
||||
rc = copro_calculate_slb(mm, ea, &slb);
|
||||
if (rc)
|
||||
continue;
|
||||
|
||||
if (last_esid == slb.esid)
|
||||
continue;
|
||||
|
||||
cxl_load_segment(ctx, &slb);
|
||||
last_esid = slb.esid;
|
||||
}
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
mmput(mm);
|
||||
out1:
|
||||
put_task_struct(task);
|
||||
}
|
||||
|
||||
void cxl_prefault(struct cxl_context *ctx, u64 wed)
|
||||
{
|
||||
switch (ctx->afu->prefault_mode) {
|
||||
case CXL_PREFAULT_WED:
|
||||
cxl_prefault_one(ctx, wed);
|
||||
break;
|
||||
case CXL_PREFAULT_ALL:
|
||||
cxl_prefault_vma(ctx);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
508
drivers/misc/cxl/file.c
Normal file
508
drivers/misc/cxl/file.c
Normal file
File diff suppressed because it is too large
Load Diff
402
drivers/misc/cxl/irq.c
Normal file
402
drivers/misc/cxl/irq.c
Normal file
@@ -0,0 +1,402 @@
|
||||
/*
|
||||
* Copyright 2014 IBM Corp.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pid.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <misc/cxl.h>
|
||||
|
||||
#include "cxl.h"
|
||||
|
||||
/* XXX: This is implementation specific */
|
||||
static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat)
|
||||
{
|
||||
u64 fir1, fir2, fir_slice, serr, afu_debug;
|
||||
|
||||
fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
|
||||
fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
|
||||
fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
|
||||
serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
|
||||
afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
|
||||
|
||||
dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
|
||||
dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%.16llx\n", fir1);
|
||||
dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%.16llx\n", fir2);
|
||||
dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
|
||||
dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
|
||||
dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
|
||||
|
||||
dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
|
||||
cxl_stop_trace(ctx->afu->adapter);
|
||||
|
||||
return cxl_ack_irq(ctx, 0, errstat);
|
||||
}
|
||||
|
||||
irqreturn_t cxl_slice_irq_err(int irq, void *data)
|
||||
{
|
||||
struct cxl_afu *afu = data;
|
||||
u64 fir_slice, errstat, serr, afu_debug;
|
||||
|
||||
WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
|
||||
|
||||
serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
|
||||
fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
|
||||
errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
|
||||
afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
|
||||
dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
|
||||
dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
|
||||
dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%.16llx\n", errstat);
|
||||
dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
|
||||
|
||||
cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t cxl_irq_err(int irq, void *data)
|
||||
{
|
||||
struct cxl *adapter = data;
|
||||
u64 fir1, fir2, err_ivte;
|
||||
|
||||
WARN(1, "CXL ERROR interrupt %i\n", irq);
|
||||
|
||||
err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
|
||||
dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%.16llx\n", err_ivte);
|
||||
|
||||
dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
|
||||
cxl_stop_trace(adapter);
|
||||
|
||||
fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
|
||||
fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
|
||||
|
||||
dev_crit(&adapter->dev, "PSL_FIR1: 0x%.16llx\nPSL_FIR2: 0x%.16llx\n", fir1, fir2);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
|
||||
{
|
||||
ctx->dsisr = dsisr;
|
||||
ctx->dar = dar;
|
||||
schedule_work(&ctx->fault_work);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t cxl_irq(int irq, void *data)
|
||||
{
|
||||
struct cxl_context *ctx = data;
|
||||
struct cxl_irq_info irq_info;
|
||||
u64 dsisr, dar;
|
||||
int result;
|
||||
|
||||
if ((result = cxl_get_irq(ctx, &irq_info))) {
|
||||
WARN(1, "Unable to get CXL IRQ Info: %i\n", result);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
dsisr = irq_info.dsisr;
|
||||
dar = irq_info.dar;
|
||||
|
||||
pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
|
||||
|
||||
if (dsisr & CXL_PSL_DSISR_An_DS) {
|
||||
/*
|
||||
* We don't inherently need to sleep to handle this, but we do
|
||||
* need to get a ref to the task's mm, which we can't do from
|
||||
* irq context without the potential for a deadlock since it
|
||||
* takes the task_lock. An alternate option would be to keep a
|
||||
* reference to the task's mm the entire time it has cxl open,
|
||||
* but to do that we need to solve the issue where we hold a
|
||||
* ref to the mm, but the mm can hold a ref to the fd after an
|
||||
* mmap preventing anything from being cleaned up.
|
||||
*/
|
||||
pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe);
|
||||
return schedule_cxl_fault(ctx, dsisr, dar);
|
||||
}
|
||||
|
||||
if (dsisr & CXL_PSL_DSISR_An_M)
|
||||
pr_devel("CXL interrupt: PTE not found\n");
|
||||
if (dsisr & CXL_PSL_DSISR_An_P)
|
||||
pr_devel("CXL interrupt: Storage protection violation\n");
|
||||
if (dsisr & CXL_PSL_DSISR_An_A)
|
||||
pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n");
|
||||
if (dsisr & CXL_PSL_DSISR_An_S)
|
||||
pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n");
|
||||
if (dsisr & CXL_PSL_DSISR_An_K)
|
||||
pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n");
|
||||
|
||||
if (dsisr & CXL_PSL_DSISR_An_DM) {
|
||||
/*
|
||||
* In some cases we might be able to handle the fault
|
||||
* immediately if hash_page would succeed, but we still need
|
||||
* the task's mm, which as above we can't get without a lock
|
||||
*/
|
||||
pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe);
|
||||
return schedule_cxl_fault(ctx, dsisr, dar);
|
||||
}
|
||||
if (dsisr & CXL_PSL_DSISR_An_ST)
|
||||
WARN(1, "CXL interrupt: Segment Table PTE not found\n");
|
||||
if (dsisr & CXL_PSL_DSISR_An_UR)
|
||||
pr_devel("CXL interrupt: AURP PTE not found\n");
|
||||
if (dsisr & CXL_PSL_DSISR_An_PE)
|
||||
return handle_psl_slice_error(ctx, dsisr, irq_info.errstat);
|
||||
if (dsisr & CXL_PSL_DSISR_An_AE) {
|
||||
pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info.afu_err);
|
||||
|
||||
if (ctx->pending_afu_err) {
|
||||
/*
|
||||
* This shouldn't happen - the PSL treats these errors
|
||||
* as fatal and will have reset the AFU, so there's not
|
||||
* much point buffering multiple AFU errors.
|
||||
* OTOH if we DO ever see a storm of these come in it's
|
||||
* probably best that we log them somewhere:
|
||||
*/
|
||||
dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
|
||||
"undelivered to pe %i: %.llx\n",
|
||||
ctx->pe, irq_info.afu_err);
|
||||
} else {
|
||||
spin_lock(&ctx->lock);
|
||||
ctx->afu_err = irq_info.afu_err;
|
||||
ctx->pending_afu_err = 1;
|
||||
spin_unlock(&ctx->lock);
|
||||
|
||||
wake_up_all(&ctx->wq);
|
||||
}
|
||||
|
||||
cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
|
||||
}
|
||||
if (dsisr & CXL_PSL_DSISR_An_OC)
|
||||
pr_devel("CXL interrupt: OS Context Warning\n");
|
||||
|
||||
WARN(1, "Unhandled CXL PSL IRQ\n");
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t cxl_irq_multiplexed(int irq, void *data)
|
||||
{
|
||||
struct cxl_afu *afu = data;
|
||||
struct cxl_context *ctx;
|
||||
int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ctx = idr_find(&afu->contexts_idr, ph);
|
||||
if (ctx) {
|
||||
ret = cxl_irq(irq, ctx);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
WARN(1, "Unable to demultiplex CXL PSL IRQ\n");
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t cxl_irq_afu(int irq, void *data)
|
||||
{
|
||||
struct cxl_context *ctx = data;
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
|
||||
int irq_off, afu_irq = 1;
|
||||
__u16 range;
|
||||
int r;
|
||||
|
||||
for (r = 1; r < CXL_IRQ_RANGES; r++) {
|
||||
irq_off = hwirq - ctx->irqs.offset[r];
|
||||
range = ctx->irqs.range[r];
|
||||
if (irq_off >= 0 && irq_off < range) {
|
||||
afu_irq += irq_off;
|
||||
break;
|
||||
}
|
||||
afu_irq += range;
|
||||
}
|
||||
if (unlikely(r >= CXL_IRQ_RANGES)) {
|
||||
WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
|
||||
ctx->pe, irq, hwirq);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n",
|
||||
afu_irq, ctx->pe, irq, hwirq);
|
||||
|
||||
if (unlikely(!ctx->irq_bitmap)) {
|
||||
WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n");
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
spin_lock(&ctx->lock);
|
||||
set_bit(afu_irq - 1, ctx->irq_bitmap);
|
||||
ctx->pending_irq = true;
|
||||
spin_unlock(&ctx->lock);
|
||||
|
||||
wake_up_all(&ctx->wq);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
|
||||
irq_handler_t handler, void *cookie)
|
||||
{
|
||||
unsigned int virq;
|
||||
int result;
|
||||
|
||||
/* IRQ Domain? */
|
||||
virq = irq_create_mapping(NULL, hwirq);
|
||||
if (!virq) {
|
||||
dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
cxl_setup_irq(adapter, hwirq, virq);
|
||||
|
||||
pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
|
||||
|
||||
result = request_irq(virq, handler, 0, "cxl", cookie);
|
||||
if (result) {
|
||||
dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return virq;
|
||||
}
|
||||
|
||||
void cxl_unmap_irq(unsigned int virq, void *cookie)
|
||||
{
|
||||
free_irq(virq, cookie);
|
||||
irq_dispose_mapping(virq);
|
||||
}
|
||||
|
||||
static int cxl_register_one_irq(struct cxl *adapter,
|
||||
irq_handler_t handler,
|
||||
void *cookie,
|
||||
irq_hw_number_t *dest_hwirq,
|
||||
unsigned int *dest_virq)
|
||||
{
|
||||
int hwirq, virq;
|
||||
|
||||
if ((hwirq = cxl_alloc_one_irq(adapter)) < 0)
|
||||
return hwirq;
|
||||
|
||||
if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie)))
|
||||
goto err;
|
||||
|
||||
*dest_hwirq = hwirq;
|
||||
*dest_virq = virq;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
cxl_release_one_irq(adapter, hwirq);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int cxl_register_psl_err_irq(struct cxl *adapter)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter,
|
||||
&adapter->err_hwirq,
|
||||
&adapter->err_virq)))
|
||||
return rc;
|
||||
|
||||
cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cxl_release_psl_err_irq(struct cxl *adapter)
|
||||
{
|
||||
cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
|
||||
cxl_unmap_irq(adapter->err_virq, adapter);
|
||||
cxl_release_one_irq(adapter, adapter->err_hwirq);
|
||||
}
|
||||
|
||||
int cxl_register_serr_irq(struct cxl_afu *afu)
|
||||
{
|
||||
u64 serr;
|
||||
int rc;
|
||||
|
||||
if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu,
|
||||
&afu->serr_hwirq,
|
||||
&afu->serr_virq)))
|
||||
return rc;
|
||||
|
||||
serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
|
||||
serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
|
||||
cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cxl_release_serr_irq(struct cxl_afu *afu)
|
||||
{
|
||||
cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
|
||||
cxl_unmap_irq(afu->serr_virq, afu);
|
||||
cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
|
||||
}
|
||||
|
||||
int cxl_register_psl_irq(struct cxl_afu *afu)
|
||||
{
|
||||
return cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu,
|
||||
&afu->psl_hwirq, &afu->psl_virq);
|
||||
}
|
||||
|
||||
void cxl_release_psl_irq(struct cxl_afu *afu)
|
||||
{
|
||||
cxl_unmap_irq(afu->psl_virq, afu);
|
||||
cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
|
||||
}
|
||||
|
||||
int afu_register_irqs(struct cxl_context *ctx, u32 count)
|
||||
{
|
||||
irq_hw_number_t hwirq;
|
||||
int rc, r, i;
|
||||
|
||||
if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
|
||||
return rc;
|
||||
|
||||
/* Multiplexed PSL Interrupt */
|
||||
ctx->irqs.offset[0] = ctx->afu->psl_hwirq;
|
||||
ctx->irqs.range[0] = 1;
|
||||
|
||||
ctx->irq_count = count;
|
||||
ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
|
||||
sizeof(*ctx->irq_bitmap), GFP_KERNEL);
|
||||
if (!ctx->irq_bitmap)
|
||||
return -ENOMEM;
|
||||
for (r = 1; r < CXL_IRQ_RANGES; r++) {
|
||||
hwirq = ctx->irqs.offset[r];
|
||||
for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
|
||||
cxl_map_irq(ctx->afu->adapter, hwirq,
|
||||
cxl_irq_afu, ctx);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void afu_release_irqs(struct cxl_context *ctx)
|
||||
{
|
||||
irq_hw_number_t hwirq;
|
||||
unsigned int virq;
|
||||
int r, i;
|
||||
|
||||
for (r = 1; r < CXL_IRQ_RANGES; r++) {
|
||||
hwirq = ctx->irqs.offset[r];
|
||||
for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
|
||||
virq = irq_find_mapping(NULL, hwirq);
|
||||
if (virq)
|
||||
cxl_unmap_irq(virq, ctx);
|
||||
}
|
||||
}
|
||||
|
||||
cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
|
||||
}
|
||||
230
drivers/misc/cxl/main.c
Normal file
230
drivers/misc/cxl/main.c
Normal file
@@ -0,0 +1,230 @@
|
||||
/*
|
||||
* Copyright 2014 IBM Corp.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/pci.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <misc/cxl.h>
|
||||
|
||||
#include "cxl.h"
|
||||
|
||||
static DEFINE_SPINLOCK(adapter_idr_lock);
|
||||
static DEFINE_IDR(cxl_adapter_idr);
|
||||
|
||||
uint cxl_verbose;
|
||||
module_param_named(verbose, cxl_verbose, uint, 0600);
|
||||
MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
|
||||
|
||||
static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
|
||||
{
|
||||
struct task_struct *task;
|
||||
unsigned long flags;
|
||||
if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
|
||||
pr_devel("%s unable to get task %i\n",
|
||||
__func__, pid_nr(ctx->pid));
|
||||
return;
|
||||
}
|
||||
|
||||
if (task->mm != mm)
|
||||
goto out_put;
|
||||
|
||||
pr_devel("%s matched mm - card: %i afu: %i pe: %i\n", __func__,
|
||||
ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe);
|
||||
|
||||
spin_lock_irqsave(&ctx->sste_lock, flags);
|
||||
memset(ctx->sstp, 0, ctx->sst_size);
|
||||
spin_unlock_irqrestore(&ctx->sste_lock, flags);
|
||||
mb();
|
||||
cxl_afu_slbia(ctx->afu);
|
||||
out_put:
|
||||
put_task_struct(task);
|
||||
}
|
||||
|
||||
static inline void cxl_slbia_core(struct mm_struct *mm)
|
||||
{
|
||||
struct cxl *adapter;
|
||||
struct cxl_afu *afu;
|
||||
struct cxl_context *ctx;
|
||||
int card, slice, id;
|
||||
|
||||
pr_devel("%s called\n", __func__);
|
||||
|
||||
spin_lock(&adapter_idr_lock);
|
||||
idr_for_each_entry(&cxl_adapter_idr, adapter, card) {
|
||||
/* XXX: Make this lookup faster with link from mm to ctx */
|
||||
spin_lock(&adapter->afu_list_lock);
|
||||
for (slice = 0; slice < adapter->slices; slice++) {
|
||||
afu = adapter->afu[slice];
|
||||
if (!afu->enabled)
|
||||
continue;
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&afu->contexts_idr, ctx, id)
|
||||
_cxl_slbia(ctx, mm);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
spin_unlock(&adapter->afu_list_lock);
|
||||
}
|
||||
spin_unlock(&adapter_idr_lock);
|
||||
}
|
||||
|
||||
static struct cxl_calls cxl_calls = {
|
||||
.cxl_slbia = cxl_slbia_core,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
int cxl_alloc_sst(struct cxl_context *ctx)
|
||||
{
|
||||
unsigned long vsid;
|
||||
u64 ea_mask, size, sstp0, sstp1;
|
||||
|
||||
sstp0 = 0;
|
||||
sstp1 = 0;
|
||||
|
||||
ctx->sst_size = PAGE_SIZE;
|
||||
ctx->sst_lru = 0;
|
||||
ctx->sstp = (struct cxl_sste *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!ctx->sstp) {
|
||||
pr_err("cxl_alloc_sst: Unable to allocate segment table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
pr_devel("SSTP allocated at 0x%p\n", ctx->sstp);
|
||||
|
||||
vsid = get_kernel_vsid((u64)ctx->sstp, mmu_kernel_ssize) << 12;
|
||||
|
||||
sstp0 |= (u64)mmu_kernel_ssize << CXL_SSTP0_An_B_SHIFT;
|
||||
sstp0 |= (SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp) << 50;
|
||||
|
||||
size = (((u64)ctx->sst_size >> 8) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT;
|
||||
if (unlikely(size & ~CXL_SSTP0_An_SegTableSize_MASK)) {
|
||||
WARN(1, "Impossible segment table size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
sstp0 |= size;
|
||||
|
||||
if (mmu_kernel_ssize == MMU_SEGSIZE_256M)
|
||||
ea_mask = 0xfffff00ULL;
|
||||
else
|
||||
ea_mask = 0xffffffff00ULL;
|
||||
|
||||
sstp0 |= vsid >> (50-14); /* Top 14 bits of VSID */
|
||||
sstp1 |= (vsid << (64-(50-14))) & ~ea_mask;
|
||||
sstp1 |= (u64)ctx->sstp & ea_mask;
|
||||
sstp1 |= CXL_SSTP1_An_V;
|
||||
|
||||
pr_devel("Looked up %#llx: slbfee. %#llx (ssize: %x, vsid: %#lx), copied to SSTP0: %#llx, SSTP1: %#llx\n",
|
||||
(u64)ctx->sstp, (u64)ctx->sstp & ESID_MASK, mmu_kernel_ssize, vsid, sstp0, sstp1);
|
||||
|
||||
/* Store calculated sstp hardware points for use later */
|
||||
ctx->sstp0 = sstp0;
|
||||
ctx->sstp1 = sstp1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Find a CXL adapter by it's number and increase it's refcount */
|
||||
struct cxl *get_cxl_adapter(int num)
|
||||
{
|
||||
struct cxl *adapter;
|
||||
|
||||
spin_lock(&adapter_idr_lock);
|
||||
if ((adapter = idr_find(&cxl_adapter_idr, num)))
|
||||
get_device(&adapter->dev);
|
||||
spin_unlock(&adapter_idr_lock);
|
||||
|
||||
return adapter;
|
||||
}
|
||||
|
||||
int cxl_alloc_adapter_nr(struct cxl *adapter)
|
||||
{
|
||||
int i;
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&adapter_idr_lock);
|
||||
i = idr_alloc(&cxl_adapter_idr, adapter, 0, 0, GFP_NOWAIT);
|
||||
spin_unlock(&adapter_idr_lock);
|
||||
idr_preload_end();
|
||||
if (i < 0)
|
||||
return i;
|
||||
|
||||
adapter->adapter_num = i;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cxl_remove_adapter_nr(struct cxl *adapter)
|
||||
{
|
||||
idr_remove(&cxl_adapter_idr, adapter->adapter_num);
|
||||
}
|
||||
|
||||
int cxl_afu_select_best_mode(struct cxl_afu *afu)
|
||||
{
|
||||
if (afu->modes_supported & CXL_MODE_DIRECTED)
|
||||
return cxl_afu_activate_mode(afu, CXL_MODE_DIRECTED);
|
||||
|
||||
if (afu->modes_supported & CXL_MODE_DEDICATED)
|
||||
return cxl_afu_activate_mode(afu, CXL_MODE_DEDICATED);
|
||||
|
||||
dev_warn(&afu->dev, "No supported programming modes available\n");
|
||||
/* We don't fail this so the user can inspect sysfs */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init init_cxl(void)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_HVMODE))
|
||||
return -EPERM;
|
||||
|
||||
if ((rc = cxl_file_init()))
|
||||
return rc;
|
||||
|
||||
cxl_debugfs_init();
|
||||
|
||||
if ((rc = register_cxl_calls(&cxl_calls)))
|
||||
goto err;
|
||||
|
||||
if ((rc = pci_register_driver(&cxl_pci_driver)))
|
||||
goto err1;
|
||||
|
||||
return 0;
|
||||
err1:
|
||||
unregister_cxl_calls(&cxl_calls);
|
||||
err:
|
||||
cxl_debugfs_exit();
|
||||
cxl_file_exit();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void exit_cxl(void)
|
||||
{
|
||||
pci_unregister_driver(&cxl_pci_driver);
|
||||
|
||||
cxl_debugfs_exit();
|
||||
cxl_file_exit();
|
||||
unregister_cxl_calls(&cxl_calls);
|
||||
}
|
||||
|
||||
module_init(init_cxl);
|
||||
module_exit(exit_cxl);
|
||||
|
||||
MODULE_DESCRIPTION("IBM Coherent Accelerator");
|
||||
MODULE_AUTHOR("Ian Munsie <imunsie@au1.ibm.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
683
drivers/misc/cxl/native.c
Normal file
683
drivers/misc/cxl/native.c
Normal file
File diff suppressed because it is too large
Load Diff
1000
drivers/misc/cxl/pci.c
Normal file
1000
drivers/misc/cxl/pci.c
Normal file
File diff suppressed because it is too large
Load Diff
385
drivers/misc/cxl/sysfs.c
Normal file
385
drivers/misc/cxl/sysfs.c
Normal file
@@ -0,0 +1,385 @@
|
||||
/*
|
||||
* Copyright 2014 IBM Corp.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
#include "cxl.h"
|
||||
|
||||
#define to_afu_chardev_m(d) dev_get_drvdata(d)
|
||||
|
||||
/********* Adapter attributes **********************************************/
|
||||
|
||||
static ssize_t caia_version_show(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cxl *adapter = to_cxl_adapter(device);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%i.%i\n", adapter->caia_major,
|
||||
adapter->caia_minor);
|
||||
}
|
||||
|
||||
static ssize_t psl_revision_show(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cxl *adapter = to_cxl_adapter(device);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_rev);
|
||||
}
|
||||
|
||||
static ssize_t base_image_show(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cxl *adapter = to_cxl_adapter(device);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->base_image);
|
||||
}
|
||||
|
||||
static ssize_t image_loaded_show(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cxl *adapter = to_cxl_adapter(device);
|
||||
|
||||
if (adapter->user_image_loaded)
|
||||
return scnprintf(buf, PAGE_SIZE, "user\n");
|
||||
return scnprintf(buf, PAGE_SIZE, "factory\n");
|
||||
}
|
||||
|
||||
static struct device_attribute adapter_attrs[] = {
|
||||
__ATTR_RO(caia_version),
|
||||
__ATTR_RO(psl_revision),
|
||||
__ATTR_RO(base_image),
|
||||
__ATTR_RO(image_loaded),
|
||||
};
|
||||
|
||||
|
||||
/********* AFU master specific attributes **********************************/
|
||||
|
||||
static ssize_t mmio_size_show_master(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cxl_afu *afu = to_afu_chardev_m(device);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
|
||||
}
|
||||
|
||||
static ssize_t pp_mmio_off_show(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cxl_afu *afu = to_afu_chardev_m(device);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_offset);
|
||||
}
|
||||
|
||||
static ssize_t pp_mmio_len_show(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cxl_afu *afu = to_afu_chardev_m(device);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
|
||||
}
|
||||
|
||||
static struct device_attribute afu_master_attrs[] = {
|
||||
__ATTR(mmio_size, S_IRUGO, mmio_size_show_master, NULL),
|
||||
__ATTR_RO(pp_mmio_off),
|
||||
__ATTR_RO(pp_mmio_len),
|
||||
};
|
||||
|
||||
|
||||
/********* AFU attributes **************************************************/
|
||||
|
||||
static ssize_t mmio_size_show(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cxl_afu *afu = to_cxl_afu(device);
|
||||
|
||||
if (afu->pp_size)
|
||||
return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_size);
|
||||
return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->adapter->ps_size);
|
||||
}
|
||||
|
||||
static ssize_t reset_store_afu(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct cxl_afu *afu = to_cxl_afu(device);
|
||||
int rc;
|
||||
|
||||
/* Not safe to reset if it is currently in use */
|
||||
spin_lock(&afu->contexts_lock);
|
||||
if (!idr_is_empty(&afu->contexts_idr)) {
|
||||
rc = -EBUSY;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((rc = cxl_afu_reset(afu)))
|
||||
goto err;
|
||||
|
||||
rc = count;
|
||||
err:
|
||||
spin_unlock(&afu->contexts_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t irqs_min_show(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cxl_afu *afu = to_cxl_afu(device);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%i\n", afu->pp_irqs);
|
||||
}
|
||||
|
||||
static ssize_t irqs_max_show(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cxl_afu *afu = to_cxl_afu(device);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%i\n", afu->irqs_max);
|
||||
}
|
||||
|
||||
static ssize_t irqs_max_store(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct cxl_afu *afu = to_cxl_afu(device);
|
||||
ssize_t ret;
|
||||
int irqs_max;
|
||||
|
||||
ret = sscanf(buf, "%i", &irqs_max);
|
||||
if (ret != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (irqs_max < afu->pp_irqs)
|
||||
return -EINVAL;
|
||||
|
||||
if (irqs_max > afu->adapter->user_irqs)
|
||||
return -EINVAL;
|
||||
|
||||
afu->irqs_max = irqs_max;
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t modes_supported_show(struct device *device,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct cxl_afu *afu = to_cxl_afu(device);
|
||||
char *p = buf, *end = buf + PAGE_SIZE;
|
||||
|
||||
if (afu->modes_supported & CXL_MODE_DEDICATED)
|
||||
p += scnprintf(p, end - p, "dedicated_process\n");
|
||||
if (afu->modes_supported & CXL_MODE_DIRECTED)
|
||||
p += scnprintf(p, end - p, "afu_directed\n");
|
||||
return (p - buf);
|
||||
}
|
||||
|
||||
static ssize_t prefault_mode_show(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cxl_afu *afu = to_cxl_afu(device);
|
||||
|
||||
switch (afu->prefault_mode) {
|
||||
case CXL_PREFAULT_WED:
|
||||
return scnprintf(buf, PAGE_SIZE, "work_element_descriptor\n");
|
||||
case CXL_PREFAULT_ALL:
|
||||
return scnprintf(buf, PAGE_SIZE, "all\n");
|
||||
default:
|
||||
return scnprintf(buf, PAGE_SIZE, "none\n");
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t prefault_mode_store(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct cxl_afu *afu = to_cxl_afu(device);
|
||||
enum prefault_modes mode = -1;
|
||||
|
||||
if (!strncmp(buf, "work_element_descriptor", 23))
|
||||
mode = CXL_PREFAULT_WED;
|
||||
if (!strncmp(buf, "all", 3))
|
||||
mode = CXL_PREFAULT_ALL;
|
||||
if (!strncmp(buf, "none", 4))
|
||||
mode = CXL_PREFAULT_NONE;
|
||||
|
||||
if (mode == -1)
|
||||
return -EINVAL;
|
||||
|
||||
afu->prefault_mode = mode;
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t mode_show(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cxl_afu *afu = to_cxl_afu(device);
|
||||
|
||||
if (afu->current_mode == CXL_MODE_DEDICATED)
|
||||
return scnprintf(buf, PAGE_SIZE, "dedicated_process\n");
|
||||
if (afu->current_mode == CXL_MODE_DIRECTED)
|
||||
return scnprintf(buf, PAGE_SIZE, "afu_directed\n");
|
||||
return scnprintf(buf, PAGE_SIZE, "none\n");
|
||||
}
|
||||
|
||||
static ssize_t mode_store(struct device *device, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct cxl_afu *afu = to_cxl_afu(device);
|
||||
int old_mode, mode = -1;
|
||||
int rc = -EBUSY;
|
||||
|
||||
/* can't change this if we have a user */
|
||||
spin_lock(&afu->contexts_lock);
|
||||
if (!idr_is_empty(&afu->contexts_idr))
|
||||
goto err;
|
||||
|
||||
if (!strncmp(buf, "dedicated_process", 17))
|
||||
mode = CXL_MODE_DEDICATED;
|
||||
if (!strncmp(buf, "afu_directed", 12))
|
||||
mode = CXL_MODE_DIRECTED;
|
||||
if (!strncmp(buf, "none", 4))
|
||||
mode = 0;
|
||||
|
||||
if (mode == -1) {
|
||||
rc = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* cxl_afu_deactivate_mode needs to be done outside the lock, prevent
|
||||
* other contexts coming in before we are ready:
|
||||
*/
|
||||
old_mode = afu->current_mode;
|
||||
afu->current_mode = 0;
|
||||
afu->num_procs = 0;
|
||||
|
||||
spin_unlock(&afu->contexts_lock);
|
||||
|
||||
if ((rc = _cxl_afu_deactivate_mode(afu, old_mode)))
|
||||
return rc;
|
||||
if ((rc = cxl_afu_activate_mode(afu, mode)))
|
||||
return rc;
|
||||
|
||||
return count;
|
||||
err:
|
||||
spin_unlock(&afu->contexts_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t api_version_show(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION);
|
||||
}
|
||||
|
||||
static ssize_t api_version_compatible_show(struct device *device,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%i\n", CXL_API_VERSION_COMPATIBLE);
|
||||
}
|
||||
|
||||
static struct device_attribute afu_attrs[] = {
|
||||
__ATTR_RO(mmio_size),
|
||||
__ATTR_RO(irqs_min),
|
||||
__ATTR_RW(irqs_max),
|
||||
__ATTR_RO(modes_supported),
|
||||
__ATTR_RW(mode),
|
||||
__ATTR_RW(prefault_mode),
|
||||
__ATTR_RO(api_version),
|
||||
__ATTR_RO(api_version_compatible),
|
||||
__ATTR(reset, S_IWUSR, NULL, reset_store_afu),
|
||||
};
|
||||
|
||||
|
||||
|
||||
int cxl_sysfs_adapter_add(struct cxl *adapter)
|
||||
{
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
|
||||
if ((rc = device_create_file(&adapter->dev, &adapter_attrs[i])))
|
||||
goto err;
|
||||
}
|
||||
return 0;
|
||||
err:
|
||||
for (i--; i >= 0; i--)
|
||||
device_remove_file(&adapter->dev, &adapter_attrs[i]);
|
||||
return rc;
|
||||
}
|
||||
void cxl_sysfs_adapter_remove(struct cxl *adapter)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++)
|
||||
device_remove_file(&adapter->dev, &adapter_attrs[i]);
|
||||
}
|
||||
|
||||
int cxl_sysfs_afu_add(struct cxl_afu *afu)
|
||||
{
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
|
||||
if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (i--; i >= 0; i--)
|
||||
device_remove_file(&afu->dev, &afu_attrs[i]);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void cxl_sysfs_afu_remove(struct cxl_afu *afu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(afu_attrs); i++)
|
||||
device_remove_file(&afu->dev, &afu_attrs[i]);
|
||||
}
|
||||
|
||||
int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
|
||||
{
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
|
||||
if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (i--; i >= 0; i--)
|
||||
device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++)
|
||||
device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
|
||||
}
|
||||
Reference in New Issue
Block a user