mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'drm-misc-fixes-2025-03-20' of ssh://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes
A sched fence reference leak fix, two fence fixes for v3d, two overflow fixes for quaic, and a iommu handling fix for host1x. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <mripard@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250320-valiant-outstanding-nightingale-e9acae@houat
This commit is contained in:
@@ -172,9 +172,10 @@ static void free_slice(struct kref *kref)
|
||||
static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
|
||||
struct sg_table *sgt_in, u64 size, u64 offset)
|
||||
{
|
||||
int total_len, len, nents, offf = 0, offl = 0;
|
||||
struct scatterlist *sg, *sgn, *sgf, *sgl;
|
||||
unsigned int len, nents, offf, offl;
|
||||
struct sg_table *sgt;
|
||||
size_t total_len;
|
||||
int ret, j;
|
||||
|
||||
/* find out number of relevant nents needed for this mem */
|
||||
@@ -182,6 +183,8 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
|
||||
sgf = NULL;
|
||||
sgl = NULL;
|
||||
nents = 0;
|
||||
offf = 0;
|
||||
offl = 0;
|
||||
|
||||
size = size ? size : PAGE_SIZE;
|
||||
for_each_sgtable_dma_sg(sgt_in, sg, j) {
|
||||
@@ -554,6 +557,7 @@ static bool invalid_sem(struct qaic_sem *sem)
|
||||
static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
|
||||
u32 count, u64 total_size)
|
||||
{
|
||||
u64 total;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
@@ -563,7 +567,8 @@ static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_
|
||||
invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
|
||||
return -EINVAL;
|
||||
|
||||
if (slice_ent[i].offset + slice_ent[i].size > total_size)
|
||||
if (check_add_overflow(slice_ent[i].offset, slice_ent[i].size, &total) ||
|
||||
total > total_size)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@@ -259,9 +259,16 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
|
||||
struct drm_sched_fence *s_fence = job->s_fence;
|
||||
|
||||
dma_fence_get(&s_fence->finished);
|
||||
if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
|
||||
drm_sched_entity_kill_jobs_cb))
|
||||
if (!prev ||
|
||||
dma_fence_add_callback(prev, &job->finish_cb,
|
||||
drm_sched_entity_kill_jobs_cb)) {
|
||||
/*
|
||||
* Adding callback above failed.
|
||||
* dma_fence_put() checks for NULL.
|
||||
*/
|
||||
dma_fence_put(prev);
|
||||
drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
|
||||
}
|
||||
|
||||
prev = &s_fence->finished;
|
||||
}
|
||||
|
||||
@@ -226,8 +226,12 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
|
||||
struct dma_fence *fence;
|
||||
unsigned long irqflags;
|
||||
|
||||
if (unlikely(job->base.base.s_fence->finished.error))
|
||||
if (unlikely(job->base.base.s_fence->finished.error)) {
|
||||
spin_lock_irqsave(&v3d->job_lock, irqflags);
|
||||
v3d->bin_job = NULL;
|
||||
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Lock required around bin_job update vs
|
||||
* v3d_overflow_mem_work().
|
||||
@@ -281,8 +285,10 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
|
||||
struct drm_device *dev = &v3d->drm;
|
||||
struct dma_fence *fence;
|
||||
|
||||
if (unlikely(job->base.base.s_fence->finished.error))
|
||||
if (unlikely(job->base.base.s_fence->finished.error)) {
|
||||
v3d->render_job = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
v3d->render_job = job;
|
||||
|
||||
@@ -327,11 +333,17 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
|
||||
struct drm_device *dev = &v3d->drm;
|
||||
struct dma_fence *fence;
|
||||
|
||||
if (unlikely(job->base.base.s_fence->finished.error)) {
|
||||
v3d->tfu_job = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
v3d->tfu_job = job;
|
||||
|
||||
fence = v3d_fence_create(v3d, V3D_TFU);
|
||||
if (IS_ERR(fence))
|
||||
return NULL;
|
||||
|
||||
v3d->tfu_job = job;
|
||||
if (job->base.irq_fence)
|
||||
dma_fence_put(job->base.irq_fence);
|
||||
job->base.irq_fence = dma_fence_get(fence);
|
||||
@@ -369,6 +381,11 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
|
||||
struct dma_fence *fence;
|
||||
int i, csd_cfg0_reg;
|
||||
|
||||
if (unlikely(job->base.base.s_fence->finished.error)) {
|
||||
v3d->csd_job = NULL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
v3d->csd_job = job;
|
||||
|
||||
v3d_invalidate_caches(v3d);
|
||||
|
||||
@@ -361,6 +361,10 @@ static bool host1x_wants_iommu(struct host1x *host1x)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns ERR_PTR on failure, NULL if the translation is IDENTITY, otherwise a
|
||||
* valid paging domain.
|
||||
*/
|
||||
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
|
||||
@@ -385,6 +389,8 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
|
||||
* Similarly, if host1x is already attached to an IOMMU (via the DMA
|
||||
* API), don't try to attach again.
|
||||
*/
|
||||
if (domain && domain->type == IOMMU_DOMAIN_IDENTITY)
|
||||
domain = NULL;
|
||||
if (!host1x_wants_iommu(host) || domain)
|
||||
return domain;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user