driver: rknpu: Update rknpu driver, version: 0.9.7

* Add state init on prob and reset
* Fix implicit declaration of function 'rockchip_uninit_opp_table' for kernel 5.10.160
* Add nbuf sgt support

Signed-off-by: Felix Zeng <felix.zeng@rock-chips.com>
Change-Id: Ibb88d88709ba5dea7debaafa44deb206c9a1f1af
This commit is contained in:
Felix Zeng
2024-04-24 12:41:31 +08:00
parent 68bfc914a6
commit 5776643dfd
7 changed files with 355 additions and 59 deletions

View File

@@ -32,7 +32,7 @@
#define DRIVER_DATE "20240424"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 9
#define DRIVER_PATCHLEVEL 6
#define DRIVER_PATCHLEVEL 7
#define LOG_TAG "RKNPU"
@@ -54,6 +54,7 @@
#define LOG_DEV_ERROR(dev, fmt, args...) dev_err(dev, LOG_TAG ": " fmt, ##args)
#define RKNPU_MAX_IOMMU_DOMAIN_NUM 16
#define RKNPU_CACHE_SG_TABLE_NUM 2
struct rknpu_irqs_data {
const char *name;
@@ -84,6 +85,8 @@ struct rknpu_config {
__u32 core_mask;
const struct rknpu_amount_data *amount_top;
const struct rknpu_amount_data *amount_core;
void (*state_init)(struct rknpu_device *rknpu_dev);
int (*cache_sgt_init)(struct rknpu_device *rknpu_dev);
};
struct rknpu_timer {
@@ -170,6 +173,7 @@ struct rknpu_device {
int iommu_domain_num;
int iommu_domain_id;
struct iommu_domain *iommu_domains[RKNPU_MAX_IOMMU_DOMAIN_NUM];
struct sg_table *cache_sgt[RKNPU_CACHE_SG_TABLE_NUM];
};
struct rknpu_session {

View File

@@ -61,6 +61,8 @@ struct rknpu_gem_object {
struct sg_table *sgt;
struct drm_mm_node mm_node;
int iommu_domain_id;
unsigned int core_mask;
unsigned int cache_with_sgt;
};
enum rknpu_cache_type {
@@ -69,11 +71,10 @@ enum rknpu_cache_type {
};
/* create a new buffer with gem object */
struct rknpu_gem_object *rknpu_gem_object_create(struct drm_device *dev,
unsigned int flags,
unsigned long size,
unsigned long sram_size,
int iommu_domain_id);
struct rknpu_gem_object *
rknpu_gem_object_create(struct drm_device *dev, unsigned int flags,
unsigned long size, unsigned long sram_size,
int iommu_domain_id, unsigned int core_mask);
/* destroy a buffer with gem object */
void rknpu_gem_object_destroy(struct rknpu_gem_object *rknpu_obj);

View File

@@ -158,7 +158,7 @@ struct rknpu_mem_create {
__u64 dma_addr;
__u64 sram_size;
__s32 iommu_domain_id;
__u32 reserved;
__u32 core_mask;
};
/**

View File

@@ -395,7 +395,11 @@ int rknpu_devfreq_init(struct rknpu_device *rknpu_dev)
err_remove_governor:
devfreq_remove_governor(&devfreq_rknpu_ondemand);
err_uinit_table:
#if KERNEL_VERSION(5, 10, 198) <= LINUX_VERSION_CODE
rockchip_uninit_opp_table(dev, info);
#else
dev_pm_opp_of_remove_table(dev);
#endif
return ret;
}
@@ -729,7 +733,11 @@ out:
err_remove_governor:
devfreq_remove_governor(&devfreq_rknpu_ondemand);
err_remove_table:
#if KERNEL_VERSION(5, 10, 198) <= LINUX_VERSION_CODE
rockchip_uninit_opp_table(dev, &rknpu_dev->opp_info);
#else
dev_pm_opp_of_remove_table(dev);
#endif
rknpu_dev->devfreq = NULL;
@@ -790,6 +798,10 @@ void rknpu_devfreq_remove(struct rknpu_device *rknpu_dev)
}
if (rknpu_dev->devfreq)
devfreq_remove_governor(&devfreq_rknpu_ondemand);
#if KERNEL_VERSION(5, 10, 198) <= LINUX_VERSION_CODE
rockchip_uninit_opp_table(rknpu_dev->dev, &rknpu_dev->opp_info);
#else
dev_pm_opp_of_remove_table(rknpu_dev->dev);
#endif
}
EXPORT_SYMBOL(rknpu_devfreq_remove);

View File

@@ -108,6 +108,57 @@ static const struct rknpu_amount_data rknpu_core_amount = {
.offset_wt_rd = 0x243c,
};
static void rk3576_state_init(struct rknpu_device *rknpu_dev)
{
void __iomem *rknpu_core_base = rknpu_dev->base[0];
writel(0x1, rknpu_core_base + 0x10);
writel(0, rknpu_core_base + 0x1004);
writel(0x80000000, rknpu_core_base + 0x1024);
writel(1, rknpu_core_base + 0x1004);
writel(0x80000000, rknpu_core_base + 0x1024);
writel(0x1e, rknpu_core_base + 0x1004);
}
static int rk3576_cache_sgt_init(struct rknpu_device *rknpu_dev)
{
struct sg_table *sgt = NULL;
struct scatterlist *sgl = NULL;
uint64_t block_size_kb[4] = { 448, 64, 448, 64 };
uint64_t block_offset_kb[4] = { 0, 896, 448, 960 };
int core_num = rknpu_dev->config->num_irqs;
int ret = 0, i = 0, j = 0;
for (i = 0; i < core_num; i++) {
sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!sgt)
goto out_free_table;
ret = sg_alloc_table(sgt, core_num, GFP_KERNEL);
if (ret) {
kfree(sgt);
goto out_free_table;
}
rknpu_dev->cache_sgt[i] = sgt;
for_each_sgtable_sg(sgt, sgl, j) {
sg_set_page(sgl, NULL,
block_size_kb[i * core_num + j] * 1024,
block_offset_kb[i * core_num + j] * 1024);
}
}
return 0;
out_free_table:
for (i = 0; i < core_num; i++) {
if (rknpu_dev->cache_sgt[i]) {
sg_free_table(rknpu_dev->cache_sgt[i]);
kfree(rknpu_dev->cache_sgt[i]);
rknpu_dev->cache_sgt[i] = NULL;
}
}
return ret;
}
static const struct rknpu_config rk356x_rknpu_config = {
.bw_priority_addr = 0xfe180008,
.bw_priority_length = 0x10,
@@ -125,6 +176,8 @@ static const struct rknpu_config rk356x_rknpu_config = {
.core_mask = 0x1,
.amount_top = &rknpu_old_top_amount,
.amount_core = NULL,
.state_init = NULL,
.cache_sgt_init = NULL,
};
static const struct rknpu_config rk3588_rknpu_config = {
@@ -144,6 +197,8 @@ static const struct rknpu_config rk3588_rknpu_config = {
.core_mask = 0x7,
.amount_top = NULL,
.amount_core = NULL,
.state_init = NULL,
.cache_sgt_init = NULL,
};
static const struct rknpu_config rk3583_rknpu_config = {
@@ -163,6 +218,8 @@ static const struct rknpu_config rk3583_rknpu_config = {
.core_mask = 0x3,
.amount_top = NULL,
.amount_core = NULL,
.state_init = NULL,
.cache_sgt_init = NULL,
};
static const struct rknpu_config rv1106_rknpu_config = {
@@ -182,6 +239,8 @@ static const struct rknpu_config rv1106_rknpu_config = {
.core_mask = 0x1,
.amount_top = &rknpu_old_top_amount,
.amount_core = NULL,
.state_init = NULL,
.cache_sgt_init = NULL,
};
static const struct rknpu_config rk3562_rknpu_config = {
@@ -201,6 +260,8 @@ static const struct rknpu_config rk3562_rknpu_config = {
.core_mask = 0x1,
.amount_top = &rknpu_old_top_amount,
.amount_core = NULL,
.state_init = NULL,
.cache_sgt_init = NULL,
};
static const struct rknpu_config rk3576_rknpu_config = {
@@ -220,6 +281,8 @@ static const struct rknpu_config rk3576_rknpu_config = {
.core_mask = 0x3,
.amount_top = &rknpu_top_amount,
.amount_core = &rknpu_core_amount,
.state_init = rk3576_state_init,
.cache_sgt_init = rk3576_cache_sgt_init,
};
/* driver probe and init */
@@ -581,16 +644,16 @@ static int rknpu_action_ioctl(struct drm_device *dev, void *data,
return rknpu_action(rknpu_dev, (struct rknpu_action *)data);
}
#define RKNPU_IOCTL(func) \
static int __##func(struct drm_device *dev, void *data, \
struct drm_file *file_priv) \
{ \
struct rknpu_device *rknpu_dev = dev_get_drvdata(dev->dev); \
int ret = -EINVAL; \
rknpu_power_get(rknpu_dev); \
ret = func(dev, data, file_priv); \
rknpu_power_put_delay(rknpu_dev); \
return ret; \
#define RKNPU_IOCTL(func) \
static int __##func(struct drm_device *dev, void *data, \
struct drm_file *file_priv) \
{ \
struct rknpu_device *rknpu_dev = dev_get_drvdata(dev->dev); \
int ret = -EINVAL; \
rknpu_power_get(rknpu_dev); \
ret = func(dev, data, file_priv); \
rknpu_power_put_delay(rknpu_dev); \
return ret; \
}
RKNPU_IOCTL(rknpu_action_ioctl);
@@ -929,6 +992,9 @@ static int rknpu_power_on(struct rknpu_device *rknpu_dev)
ret);
}
if (rknpu_dev->config->state_init != NULL)
rknpu_dev->config->state_init(rknpu_dev);
out:
#ifndef FPGA_PLATFORM
rknpu_devfreq_unlock(rknpu_dev);
@@ -1419,8 +1485,11 @@ static int rknpu_probe(struct platform_device *pdev)
}
if (IS_ENABLED(CONFIG_NO_GKI) && rknpu_dev->iommu_en &&
rknpu_dev->config->nbuf_size > 0)
rknpu_dev->config->nbuf_size > 0) {
rknpu_find_nbuf_resource(rknpu_dev);
if (rknpu_dev->config->cache_sgt_init != NULL)
rknpu_dev->config->cache_sgt_init(rknpu_dev);
}
if (rknpu_dev->iommu_en)
rknpu_iommu_init_domain(rknpu_dev);
@@ -1464,6 +1533,16 @@ static int rknpu_remove(struct platform_device *pdev)
rknpu_debugger_remove(rknpu_dev);
rknpu_cancel_timer(rknpu_dev);
if (rknpu_dev->config->cache_sgt_init != NULL) {
for (i = 0; i < RKNPU_CACHE_SG_TABLE_NUM; i++) {
if (rknpu_dev->cache_sgt[i]) {
sg_free_table(rknpu_dev->cache_sgt[i]);
kfree(rknpu_dev->cache_sgt[i]);
rknpu_dev->cache_sgt[i] = NULL;
}
}
}
for (i = 0; i < rknpu_dev->config->num_irqs; i++) {
WARN_ON(rknpu_dev->subcore_datas[i].job);
WARN_ON(!list_empty(&rknpu_dev->subcore_datas[i].todo_list));
@@ -1539,10 +1618,8 @@ static int rknpu_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops rknpu_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(rknpu_suspend,
rknpu_resume)
SET_RUNTIME_PM_OPS(rknpu_runtime_suspend, rknpu_runtime_resume,
NULL)
SET_SYSTEM_SLEEP_PM_OPS(rknpu_suspend, rknpu_resume) SET_RUNTIME_PM_OPS(
rknpu_runtime_suspend, rknpu_runtime_resume, NULL)
};
#endif

View File

@@ -418,6 +418,50 @@ static void rknpu_gem_release(struct rknpu_gem_object *rknpu_obj)
kfree(rknpu_obj);
}
static int rknpu_iommu_map_with_cache_sgt(struct iommu_domain *domain,
struct rknpu_device *rknpu_dev,
struct rknpu_gem_object *rknpu_obj,
unsigned long cache_size)
{
phys_addr_t cache_start = 0;
unsigned long iova_start = rknpu_obj->iova_start;
struct scatterlist *s = NULL;
unsigned long length = cache_size;
unsigned long size = 0;
int i = 0;
int ret = 0;
int index = 0;
switch (rknpu_obj->core_mask) {
case RKNPU_CORE0_MASK:
index = 0;
break;
case RKNPU_CORE1_MASK:
index = 1;
break;
default:
break;
}
for_each_sgtable_sg(rknpu_dev->cache_sgt[index], s, i) {
cache_start = rknpu_dev->nbuf_start + s->offset;
size = length < s->length ? length : s->length;
ret = iommu_map(domain, iova_start, cache_start, size,
IOMMU_READ | IOMMU_WRITE);
if (ret) {
LOG_ERROR("cache iommu_map error: %d\n", ret);
return ret;
}
length -= size;
iova_start += size;
if (length == 0)
break;
}
return ret;
}
static int rknpu_gem_alloc_buf_with_cache(struct rknpu_gem_object *rknpu_obj,
enum rknpu_cache_type cache_type)
{
@@ -500,9 +544,14 @@ static int rknpu_gem_alloc_buf_with_cache(struct rknpu_gem_object *rknpu_obj,
* |<- - - - - - - iova_size - - - - - - ->|
*
*/
ret = iommu_map(domain, rknpu_obj->iova_start,
cache_start + cache_offset, cache_size,
IOMMU_READ | IOMMU_WRITE);
if (!rknpu_obj->cache_with_sgt)
ret = iommu_map(domain, rknpu_obj->iova_start,
cache_start + cache_offset, cache_size,
IOMMU_READ | IOMMU_WRITE);
else
ret = rknpu_iommu_map_with_cache_sgt(domain, rknpu_dev,
rknpu_obj, cache_size);
if (ret) {
LOG_ERROR("cache iommu_map error: %d\n", ret);
goto free_iova;
@@ -625,11 +674,10 @@ static void rknpu_gem_free_buf_with_cache(struct rknpu_gem_object *rknpu_obj,
}
}
struct rknpu_gem_object *rknpu_gem_object_create(struct drm_device *drm,
unsigned int flags,
unsigned long size,
unsigned long sram_size,
int iommu_domain_id)
struct rknpu_gem_object *
rknpu_gem_object_create(struct drm_device *drm, unsigned int flags,
unsigned long size, unsigned long sram_size,
int iommu_domain_id, unsigned int core_mask)
{
struct rknpu_device *rknpu_dev = drm->dev_private;
struct rknpu_gem_object *rknpu_obj = NULL;
@@ -671,6 +719,8 @@ struct rknpu_gem_object *rknpu_gem_object_create(struct drm_device *drm,
if (sram_size != 0)
sram_size = round_up(sram_size, PAGE_SIZE);
rknpu_obj->cache_with_sgt = 0;
sram_free_size = rknpu_dev->sram_mm->free_chunks *
rknpu_dev->sram_mm->chunk_size;
if (sram_free_size > 0) {
@@ -704,9 +754,22 @@ struct rknpu_gem_object *rknpu_gem_object_create(struct drm_device *drm,
} else if (IS_ENABLED(CONFIG_NO_GKI) &&
(flags & RKNPU_MEM_TRY_ALLOC_NBUF) &&
rknpu_dev->nbuf_size > 0) {
size_t nbuf_size = remain_ddr_size <= rknpu_dev->nbuf_size ?
remain_ddr_size :
rknpu_dev->nbuf_size;
size_t nbuf_size = rknpu_dev->nbuf_size;
rknpu_obj->cache_with_sgt = 0;
if (core_mask == RKNPU_CORE_AUTO_MASK ||
core_mask == RKNPU_CORE0_MASK ||
core_mask == RKNPU_CORE1_MASK) {
if (rknpu_dev->cache_sgt[0])
rknpu_obj->cache_with_sgt = 1;
nbuf_size = rknpu_dev->nbuf_size /
rknpu_dev->config->num_irqs;
}
rknpu_obj->core_mask = core_mask;
nbuf_size = remain_ddr_size <= nbuf_size ? remain_ddr_size :
nbuf_size;
if (nbuf_size > 0) {
rknpu_obj->nbuf_size = nbuf_size;
@@ -797,7 +860,8 @@ int rknpu_gem_create_ioctl(struct drm_device *drm, void *data,
if (!rknpu_obj) {
rknpu_obj = rknpu_gem_object_create(drm, args->flags,
args->size, args->sram_size,
args->iommu_domain_id);
args->iommu_domain_id,
args->core_mask);
if (IS_ERR(rknpu_obj))
return PTR_ERR(rknpu_obj);
@@ -906,6 +970,53 @@ static int rknpu_gem_mmap_pages(struct rknpu_gem_object *rknpu_obj,
}
#endif
static int rknpu_remap_pfn_with_cache_sgt(struct rknpu_device *rknpu_dev,
struct rknpu_gem_object *rknpu_obj,
struct vm_area_struct *vma,
unsigned long cache_size)
{
phys_addr_t cache_start = 0;
unsigned long vm_start = vma->vm_start;
struct scatterlist *s = NULL;
unsigned long length = cache_size;
unsigned long size = 0;
int i = 0;
int ret = 0;
int index = 0;
switch (rknpu_obj->core_mask) {
case RKNPU_CORE0_MASK:
index = 0;
break;
case RKNPU_CORE1_MASK:
index = 1;
break;
default:
break;
}
for_each_sgtable_sg(rknpu_dev->cache_sgt[index], s, i) {
cache_start = rknpu_dev->nbuf_start + s->offset;
size = length < s->length ? length : s->length;
vma->vm_pgoff = __phys_to_pfn(cache_start);
ret = remap_pfn_range(vma, vm_start, vma->vm_pgoff, size,
vma->vm_page_prot);
if (ret) {
LOG_ERROR("cache remap_pfn_range error: %d\n", ret);
return ret;
}
length -= size;
vm_start += size;
if (length == 0)
break;
}
return ret;
}
static int rknpu_gem_mmap_cache(struct rknpu_gem_object *rknpu_obj,
struct vm_area_struct *vma,
enum rknpu_cache_type cache_type)
@@ -951,10 +1062,16 @@ static int rknpu_gem_mmap_cache(struct rknpu_gem_object *rknpu_obj,
* NOTE: This conversion carries a risk because the resulting PFN is not a true
* page frame number and may not be valid or usable in all contexts.
*/
vma->vm_pgoff = __phys_to_pfn(cache_start + cache_offset);
ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, cache_size,
vma->vm_page_prot);
if (!rknpu_obj->cache_with_sgt) {
vma->vm_pgoff = __phys_to_pfn(cache_start + cache_offset);
ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
cache_size, vma->vm_page_prot);
} else
ret = rknpu_remap_pfn_with_cache_sgt(rknpu_dev, rknpu_obj, vma,
cache_size);
if (ret)
return -EAGAIN;
@@ -1053,7 +1170,7 @@ int rknpu_gem_dumb_create(struct drm_file *file_priv, struct drm_device *drm,
else
flags = RKNPU_MEM_CONTIGUOUS | RKNPU_MEM_WRITE_COMBINE;
rknpu_obj = rknpu_gem_object_create(drm, flags, args->size, 0, 0);
rknpu_obj = rknpu_gem_object_create(drm, flags, args->size, 0, 0, 0);
if (IS_ERR(rknpu_obj)) {
LOG_DEV_ERROR(drm->dev, "gem object allocate failed.\n");
return PTR_ERR(rknpu_obj);
@@ -1376,16 +1493,78 @@ int rknpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
return rknpu_gem_mmap_obj(obj, vma);
}
static int rknpu_cache_sync_with_sg(struct rknpu_device *rknpu_dev,
struct rknpu_gem_object *rknpu_obj,
unsigned long *length,
unsigned long *offset, uint32_t dir)
{
struct scatterlist *s = NULL;
int i = 0;
int index = 0;
void __iomem *cache_start = 0;
unsigned long cache_length = 0;
switch (rknpu_obj->core_mask) {
case RKNPU_CORE0_MASK:
index = 0;
break;
case RKNPU_CORE1_MASK:
index = 1;
break;
default:
break;
}
for_each_sgtable_sg(rknpu_dev->cache_sgt[index], s, i) {
cache_start = rknpu_dev->nbuf_base_io + s->offset;
cache_length = (*offset + *length) <= s->length ?
*length :
s->length - *offset;
if (dir & RKNPU_MEM_SYNC_TO_DEVICE) {
#if KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE
__dma_map_area(cache_start, cache_length,
DMA_TO_DEVICE);
#else
dcache_clean_poc((unsigned long)cache_start,
(unsigned long)cache_start +
cache_length);
#endif
}
if (dir & RKNPU_MEM_SYNC_FROM_DEVICE) {
#if KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE
__dma_unmap_area(cache_start, cache_length,
DMA_FROM_DEVICE);
#else
dcache_inval_poc((unsigned long)cache_start,
(unsigned long)cache_start +
cache_length);
#endif
}
*length = (*offset + *length) <= s->length ?
0 :
*length - cache_length;
*offset = 0;
if (*length == 0)
break;
}
return 0;
}
static int rknpu_cache_sync(struct rknpu_gem_object *rknpu_obj,
unsigned long *length, unsigned long *offset,
enum rknpu_cache_type cache_type)
enum rknpu_cache_type cache_type, uint32_t dir)
{
#if KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE
struct drm_gem_object *obj = &rknpu_obj->base;
struct rknpu_device *rknpu_dev = obj->dev->dev_private;
void __iomem *cache_base_io = NULL;
unsigned long cache_offset = 0;
unsigned long cache_size = 0;
void __iomem *cache_start = 0;
unsigned long cache_length = 0;
switch (cache_type) {
case RKNPU_CACHE_SRAM:
@@ -1404,26 +1583,46 @@ static int rknpu_cache_sync(struct rknpu_gem_object *rknpu_obj,
return -EINVAL;
}
if ((*offset + *length) <= cache_size) {
__dma_map_area(cache_base_io + *offset + cache_offset, *length,
DMA_TO_DEVICE);
__dma_unmap_area(cache_base_io + *offset + cache_offset,
*length, DMA_FROM_DEVICE);
*length = 0;
*offset = 0;
} else if (*offset >= cache_size) {
if (*offset >= cache_size) {
*offset -= cache_size;
} else {
unsigned long cache_length = cache_size - *offset;
__dma_map_area(cache_base_io + *offset + cache_offset,
cache_length, DMA_TO_DEVICE);
__dma_unmap_area(cache_base_io + *offset + cache_offset,
cache_length, DMA_FROM_DEVICE);
*length -= cache_length;
*offset = 0;
return 0;
}
if (!rknpu_obj->cache_with_sgt) {
cache_start = cache_base_io + cache_offset;
cache_length = (*offset + *length) <= cache_size ?
*length :
cache_size - *offset;
if (dir & RKNPU_MEM_SYNC_TO_DEVICE) {
#if KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE
__dma_map_area(cache_start, cache_length,
DMA_TO_DEVICE);
#else
dcache_clean_poc((unsigned long)cache_start,
(unsigned long)cache_start +
cache_length);
#endif
}
if (dir & RKNPU_MEM_SYNC_FROM_DEVICE) {
#if KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE
__dma_unmap_area(cache_start, cache_length,
DMA_FROM_DEVICE);
#else
dcache_inval_poc((unsigned long)cache_start,
(unsigned long)cache_start +
cache_length);
#endif
}
*length = (*offset + *length) <= cache_size ?
0 :
*length - cache_length;
*offset = 0;
} else {
rknpu_cache_sync_with_sg(rknpu_dev, rknpu_obj, length, offset,
dir);
}
return 0;
}
@@ -1470,11 +1669,11 @@ int rknpu_gem_sync_ioctl(struct drm_device *dev, void *data,
IS_ENABLED(CONFIG_ROCKCHIP_RKNPU_SRAM) &&
rknpu_obj->sram_size > 0) {
rknpu_cache_sync(rknpu_obj, &length, &offset,
RKNPU_CACHE_SRAM);
RKNPU_CACHE_SRAM, args->flags);
} else if (IS_ENABLED(CONFIG_NO_GKI) &&
rknpu_obj->nbuf_size > 0) {
rknpu_cache_sync(rknpu_obj, &length, &offset,
RKNPU_CACHE_NBUF);
RKNPU_CACHE_NBUF, args->flags);
}
for_each_sg(rknpu_obj->sgt->sgl, sg, rknpu_obj->sgt->nents, i) {

View File

@@ -148,6 +148,9 @@ int rknpu_soft_reset(struct rknpu_device *rknpu_dev)
rknpu_dev->soft_reseting = false;
if (rknpu_dev->config->state_init != NULL)
rknpu_dev->config->state_init(rknpu_dev);
mutex_unlock(&rknpu_dev->reset_lock);
#endif