You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
video: rockchip: mpp: optimize the schedule of enc/dec
Remove the isr thread to reduce one thread schedule. Signed-off-by: Yandong Lin <yandong.lin@rock-chips.com> Change-Id: I4721fc6066c8580e5955f8c79025e46b96c82d85
This commit is contained in:
@@ -1187,7 +1187,7 @@ static int av1dec_probe(struct platform_device *pdev)
|
||||
dec->reg_base[AV1DEC_CLASS_VCD] = mpp->reg_base;
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq,
|
||||
mpp_dev_isr_sched,
|
||||
NULL,
|
||||
IRQF_SHARED,
|
||||
dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
|
||||
@@ -516,9 +516,9 @@ void mpp_free_task(struct kref *ref)
|
||||
}
|
||||
session = task->session;
|
||||
|
||||
mpp_debug_func(DEBUG_TASK_INFO, "task %d:%d free state 0x%lx abort %d\n",
|
||||
session->index, task->task_id, task->state,
|
||||
atomic_read(&task->abort_request));
|
||||
mpp_debug_func(DEBUG_TASK_INFO, "session %d:%d task %d state 0x%lx abort %d\n",
|
||||
session->device_type, session->index, task->task_index,
|
||||
task->state, atomic_read(&task->abort_request));
|
||||
|
||||
mpp = mpp_get_task_used_device(task, session);
|
||||
if (mpp->dev_ops->free_task)
|
||||
@@ -531,62 +531,34 @@ void mpp_free_task(struct kref *ref)
|
||||
|
||||
static void mpp_task_timeout_work(struct work_struct *work_s)
|
||||
{
|
||||
struct mpp_dev *mpp;
|
||||
struct mpp_session *session;
|
||||
struct mpp_task *task = container_of(to_delayed_work(work_s),
|
||||
struct mpp_task,
|
||||
timeout_work);
|
||||
struct mpp_dev *mpp;
|
||||
struct mpp_session *session = task->session;
|
||||
|
||||
if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
|
||||
mpp_err("task has been handled\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!task->session) {
|
||||
if (!session) {
|
||||
mpp_err("task %p, task->session is null.\n", task);
|
||||
return;
|
||||
}
|
||||
|
||||
session = task->session;
|
||||
mpp_err("task %d:%d:%d processing time out!\n", session->pid,
|
||||
session->index, task->task_id);
|
||||
|
||||
if (!session->mpp) {
|
||||
mpp_err("session %d:%d, session mpp is null.\n", session->pid,
|
||||
session->index);
|
||||
mpp = mpp_get_task_used_device(task, session);
|
||||
if (!mpp) {
|
||||
mpp_err("session %d:%d mpp is null\n", session->device_type, session->index);
|
||||
return;
|
||||
}
|
||||
disable_irq(mpp->irq);
|
||||
if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
|
||||
mpp_err("task has been handled\n");
|
||||
return;
|
||||
}
|
||||
mpp_err("session %d:%d task %d processing time out!\n",
|
||||
session->device_type, session->index, task->task_index);
|
||||
|
||||
mpp_task_dump_timing(task, ktime_us_delta(ktime_get(), task->on_create));
|
||||
|
||||
mpp = mpp_get_task_used_device(task, session);
|
||||
|
||||
/* disable core irq */
|
||||
disable_irq(mpp->irq);
|
||||
/* disable mmu irq */
|
||||
if (mpp->iommu_info && mpp->iommu_info->got_irq)
|
||||
disable_irq(mpp->iommu_info->irq);
|
||||
|
||||
/* hardware maybe dead, reset it */
|
||||
mpp_reset_up_read(mpp->reset_group);
|
||||
mpp_dev_reset(mpp);
|
||||
mpp_power_off(mpp);
|
||||
|
||||
mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
|
||||
set_bit(TASK_STATE_TIMEOUT, &task->state);
|
||||
set_bit(TASK_STATE_DONE, &task->state);
|
||||
/* Wake up the GET thread */
|
||||
wake_up(&task->wait);
|
||||
|
||||
/* remove task from taskqueue running list */
|
||||
mpp_taskqueue_pop_running(mpp->queue, task);
|
||||
|
||||
/* enable core irq */
|
||||
enable_irq(mpp->irq);
|
||||
/* enable mmu irq */
|
||||
if (mpp->iommu_info && mpp->iommu_info->got_irq)
|
||||
enable_irq(mpp->iommu_info->irq);
|
||||
|
||||
mpp_taskqueue_trigger_work(mpp);
|
||||
}
|
||||
|
||||
@@ -647,6 +619,9 @@ static int mpp_process_task_default(struct mpp_session *session,
|
||||
*/
|
||||
atomic_inc(&session->task_count);
|
||||
mpp_session_push_pending(session, task);
|
||||
mpp_debug_func(DEBUG_TASK_INFO, "session %d:%d task %d state 0x%lx\n",
|
||||
session->device_type, session->index,
|
||||
task->task_index, task->state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -712,6 +687,9 @@ int mpp_dev_reset(struct mpp_dev *mpp)
|
||||
{
|
||||
dev_info(mpp->dev, "resetting...\n");
|
||||
|
||||
disable_irq(mpp->irq);
|
||||
if (mpp->iommu_info && mpp->iommu_info->got_irq)
|
||||
disable_irq(mpp->iommu_info->irq);
|
||||
/*
|
||||
* before running, we have to switch grf ctrl bit to ensure
|
||||
* working in current hardware
|
||||
@@ -740,6 +718,10 @@ int mpp_dev_reset(struct mpp_dev *mpp)
|
||||
mpp_reset_up_write(mpp->reset_group);
|
||||
mpp_iommu_up_write(mpp->iommu_info);
|
||||
|
||||
enable_irq(mpp->irq);
|
||||
if (mpp->iommu_info && mpp->iommu_info->got_irq)
|
||||
enable_irq(mpp->iommu_info->irq);
|
||||
|
||||
dev_info(mpp->dev, "reset done\n");
|
||||
|
||||
return 0;
|
||||
@@ -779,6 +761,7 @@ static int mpp_task_run(struct mpp_dev *mpp,
|
||||
{
|
||||
int ret;
|
||||
u32 timing_en;
|
||||
struct mpp_session *session = task->session;
|
||||
|
||||
mpp_debug_enter();
|
||||
|
||||
@@ -819,8 +802,9 @@ static int mpp_task_run(struct mpp_dev *mpp,
|
||||
}
|
||||
|
||||
mpp_power_on(mpp);
|
||||
mpp_debug_func(DEBUG_TASK_INFO, "pid %d run %s\n",
|
||||
task->session->pid, dev_name(mpp->dev));
|
||||
mpp_debug_func(DEBUG_TASK_INFO, "%s session %d:%d task %d state 0x%lx\n",
|
||||
dev_name(mpp->dev), session->device_type,
|
||||
session->index, task->task_index, task->state);
|
||||
|
||||
if (mpp->auto_freq_en && mpp->hw_ops->set_freq)
|
||||
mpp->hw_ops->set_freq(mpp, task);
|
||||
@@ -834,6 +818,36 @@ static int mpp_task_run(struct mpp_dev *mpp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void try_process_running_task(struct mpp_dev *mpp)
|
||||
{
|
||||
struct mpp_task *mpp_task, *n;
|
||||
struct mpp_taskqueue *queue = mpp->queue;
|
||||
|
||||
/* try process running task */
|
||||
list_for_each_entry_safe(mpp_task, n, &queue->running_list, queue_link) {
|
||||
mpp = mpp_get_task_used_device(mpp_task, mpp_task->session);
|
||||
disable_irq(mpp->irq);
|
||||
if (!test_bit(TASK_STATE_HANDLE, &mpp_task->state)) {
|
||||
enable_irq(mpp->irq);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* process timeout task */
|
||||
if (test_bit(TASK_STATE_TIMEOUT, &mpp_task->state)) {
|
||||
atomic_inc(&mpp->reset_request);
|
||||
mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
|
||||
}
|
||||
|
||||
if (mpp->auto_freq_en && mpp->hw_ops->reduce_freq &&
|
||||
list_empty(&mpp->queue->pending_list))
|
||||
mpp->hw_ops->reduce_freq(mpp);
|
||||
|
||||
if (mpp->dev_ops->isr)
|
||||
mpp->dev_ops->isr(mpp);
|
||||
enable_irq(mpp->irq);
|
||||
}
|
||||
}
|
||||
|
||||
static void mpp_task_worker_default(struct kthread_work *work_s)
|
||||
{
|
||||
struct mpp_task *task;
|
||||
@@ -842,6 +856,7 @@ static void mpp_task_worker_default(struct kthread_work *work_s)
|
||||
|
||||
mpp_debug_enter();
|
||||
|
||||
try_process_running_task(mpp);
|
||||
again:
|
||||
task = mpp_taskqueue_get_pending_task(queue);
|
||||
if (!task)
|
||||
@@ -2244,60 +2259,39 @@ irqreturn_t mpp_dev_irq(int irq, void *param)
|
||||
if (mpp->dev_ops->irq)
|
||||
irq_ret = mpp->dev_ops->irq(mpp);
|
||||
|
||||
if (task) {
|
||||
if (irq_ret == IRQ_WAKE_THREAD) {
|
||||
/* if wait or delayed work timeout, abort request will turn on,
|
||||
* isr should not to response, and handle it in delayed work
|
||||
*/
|
||||
if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
|
||||
mpp_err("error, task has been handled, irq_status %08x\n",
|
||||
mpp->irq_status);
|
||||
irq_ret = IRQ_HANDLED;
|
||||
goto done;
|
||||
}
|
||||
if (timing_en) {
|
||||
task->on_cancel_timeout = ktime_get();
|
||||
set_bit(TASK_TIMING_TO_CANCEL, &task->state);
|
||||
}
|
||||
cancel_delayed_work(&task->timeout_work);
|
||||
/* normal condition, set state and wake up isr thread */
|
||||
set_bit(TASK_STATE_IRQ, &task->state);
|
||||
}
|
||||
|
||||
if (irq_ret == IRQ_WAKE_THREAD)
|
||||
mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
|
||||
} else {
|
||||
if (!task) {
|
||||
mpp_debug(DEBUG_IRQ_CHECK, "error, task is null\n");
|
||||
irq_ret = IRQ_HANDLED;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (irq_ret == IRQ_WAKE_THREAD) {
|
||||
/* if wait or delayed work timeout, abort request will turn on,
|
||||
* isr should not to response, and handle it in delayed work
|
||||
*/
|
||||
if (test_and_set_bit(TASK_STATE_HANDLE, &task->state)) {
|
||||
dev_err(mpp->dev, "error, task %d has been handled, irq_status %#x\n",
|
||||
task->task_index, mpp->irq_status);
|
||||
irq_ret = IRQ_HANDLED;
|
||||
goto done;
|
||||
}
|
||||
if (timing_en) {
|
||||
task->on_cancel_timeout = ktime_get();
|
||||
set_bit(TASK_TIMING_TO_CANCEL, &task->state);
|
||||
}
|
||||
cancel_delayed_work(&task->timeout_work);
|
||||
/* normal condition, set state and wake up isr thread */
|
||||
set_bit(TASK_STATE_IRQ, &task->state);
|
||||
task->irq_status = mpp->irq_status;
|
||||
mpp_iommu_dev_deactivate(mpp->iommu_info, mpp);
|
||||
irq_ret = IRQ_HANDLED;
|
||||
mpp_taskqueue_trigger_work(mpp);
|
||||
}
|
||||
|
||||
done:
|
||||
return irq_ret;
|
||||
}
|
||||
|
||||
irqreturn_t mpp_dev_isr_sched(int irq, void *param)
|
||||
{
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
struct mpp_dev *mpp = param;
|
||||
struct mpp_task *task = mpp->cur_task;
|
||||
|
||||
if (task && mpp->srv->timing_en) {
|
||||
task->on_isr = ktime_get();
|
||||
set_bit(TASK_TIMING_ISR, &task->state);
|
||||
}
|
||||
|
||||
if (mpp->auto_freq_en &&
|
||||
mpp->hw_ops->reduce_freq &&
|
||||
list_empty(&mpp->queue->pending_list))
|
||||
mpp->hw_ops->reduce_freq(mpp);
|
||||
|
||||
if (mpp->dev_ops->isr)
|
||||
ret = mpp->dev_ops->isr(mpp);
|
||||
|
||||
/* trigger current queue to run next task */
|
||||
mpp_taskqueue_trigger_work(mpp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
u32 mpp_get_grf(struct mpp_grf_info *grf_info)
|
||||
{
|
||||
u32 val = 0;
|
||||
@@ -2416,7 +2410,6 @@ void mpp_task_dump_timing(struct mpp_task *task, s64 time_diff)
|
||||
LOG_TIMING(state, TASK_TIMING_RUN_END, "run end", task->on_run_end, s);
|
||||
LOG_TIMING(state, TASK_TIMING_IRQ, "irq", task->on_irq, s);
|
||||
LOG_TIMING(state, TASK_TIMING_TO_CANCEL, "timeout cancel", task->on_cancel_timeout, s);
|
||||
LOG_TIMING(state, TASK_TIMING_ISR, "isr", task->on_isr, s);
|
||||
LOG_TIMING(state, TASK_TIMING_FINISH, "finish", task->on_finish, s);
|
||||
}
|
||||
|
||||
|
||||
@@ -389,8 +389,7 @@ enum mpp_task_state {
|
||||
TASK_TIMING_RUN_END = 21,
|
||||
TASK_TIMING_IRQ = 22,
|
||||
TASK_TIMING_TO_CANCEL = 23,
|
||||
TASK_TIMING_ISR = 24,
|
||||
TASK_TIMING_FINISH = 25,
|
||||
TASK_TIMING_FINISH = 24,
|
||||
};
|
||||
|
||||
/* The context for the a task */
|
||||
@@ -429,7 +428,6 @@ struct mpp_task {
|
||||
ktime_t on_run_end;
|
||||
ktime_t on_irq;
|
||||
ktime_t on_cancel_timeout;
|
||||
ktime_t on_isr;
|
||||
ktime_t on_finish;
|
||||
|
||||
/* hardware info for current task */
|
||||
@@ -437,6 +435,7 @@ struct mpp_task {
|
||||
u32 task_index;
|
||||
u32 task_id;
|
||||
u32 *reg;
|
||||
u32 irq_status;
|
||||
/* event for session wait thread */
|
||||
wait_queue_head_t wait;
|
||||
|
||||
@@ -651,7 +650,6 @@ int mpp_power_off(struct mpp_dev *mpp);
|
||||
int mpp_dev_reset(struct mpp_dev *mpp);
|
||||
|
||||
irqreturn_t mpp_dev_irq(int irq, void *param);
|
||||
irqreturn_t mpp_dev_isr_sched(int irq, void *param);
|
||||
|
||||
struct reset_control *mpp_reset_control_get(struct mpp_dev *mpp,
|
||||
enum MPP_RESET_TYPE type,
|
||||
|
||||
@@ -997,7 +997,7 @@ static int iep2_probe(struct platform_device *pdev)
|
||||
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq,
|
||||
mpp_dev_isr_sched,
|
||||
NULL,
|
||||
IRQF_SHARED,
|
||||
dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
|
||||
@@ -602,7 +602,7 @@ static int jpgdec_probe(struct platform_device *pdev)
|
||||
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq,
|
||||
mpp_dev_isr_sched,
|
||||
NULL,
|
||||
IRQF_SHARED,
|
||||
dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
|
||||
@@ -1999,7 +1999,7 @@ static int rkvdec_probe(struct platform_device *pdev)
|
||||
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq,
|
||||
mpp_dev_isr_sched,
|
||||
NULL,
|
||||
IRQF_SHARED,
|
||||
dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
|
||||
@@ -1613,6 +1613,7 @@ static int rkvdec2_probe_default(struct platform_device *pdev)
|
||||
struct rkvdec2_dev *dec = NULL;
|
||||
struct mpp_dev *mpp = NULL;
|
||||
const struct of_device_id *match = NULL;
|
||||
irq_handler_t irq_proc = NULL;
|
||||
int ret = 0;
|
||||
|
||||
dec = devm_kzalloc(dev, sizeof(*dec), GFP_KERNEL);
|
||||
@@ -1637,20 +1638,18 @@ static int rkvdec2_probe_default(struct platform_device *pdev)
|
||||
rkvdec2_alloc_rcbbuf(pdev, dec);
|
||||
rkvdec2_link_init(pdev, dec);
|
||||
|
||||
irq_proc = mpp_dev_irq;
|
||||
if (dec->link_dec) {
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
rkvdec2_link_irq_proc, NULL,
|
||||
IRQF_SHARED, dev_name(dev), mpp);
|
||||
irq_proc = rkvdec2_link_irq_proc;
|
||||
mpp->dev_ops->process_task = rkvdec2_link_process_task;
|
||||
mpp->dev_ops->wait_result = rkvdec2_link_wait_result;
|
||||
mpp->dev_ops->task_worker = rkvdec2_link_worker;
|
||||
mpp->dev_ops->deinit = rkvdec2_link_session_deinit;
|
||||
kthread_init_work(&mpp->work, rkvdec2_link_worker);
|
||||
} else {
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq, mpp_dev_isr_sched,
|
||||
IRQF_SHARED, dev_name(dev), mpp);
|
||||
}
|
||||
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq, irq_proc, NULL,
|
||||
IRQF_SHARED, dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
dev_err(dev, "register interrupter runtime failed\n");
|
||||
return -EINVAL;
|
||||
|
||||
@@ -1504,9 +1504,6 @@ static int rkvdec2_soft_ccu_dequeue(struct mpp_taskqueue *queue)
|
||||
|
||||
mpp_task->on_cancel_timeout = mpp_task->on_irq;
|
||||
set_bit(TASK_TIMING_TO_CANCEL, &mpp_task->state);
|
||||
|
||||
mpp_task->on_isr = mpp_task->on_irq;
|
||||
set_bit(TASK_TIMING_ISR, &mpp_task->state);
|
||||
}
|
||||
|
||||
set_bit(TASK_STATE_HANDLE, &mpp_task->state);
|
||||
|
||||
@@ -1463,7 +1463,7 @@ static int rkvenc_probe(struct platform_device *pdev)
|
||||
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq,
|
||||
mpp_dev_isr_sched,
|
||||
NULL,
|
||||
IRQF_SHARED,
|
||||
dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
|
||||
@@ -2546,7 +2546,7 @@ static int rkvenc_core_probe(struct platform_device *pdev)
|
||||
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq,
|
||||
mpp_dev_isr_sched,
|
||||
NULL,
|
||||
IRQF_ONESHOT,
|
||||
dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
@@ -2595,7 +2595,7 @@ static int rkvenc_probe_default(struct platform_device *pdev)
|
||||
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq,
|
||||
mpp_dev_isr_sched,
|
||||
NULL,
|
||||
IRQF_SHARED,
|
||||
dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
|
||||
@@ -386,7 +386,7 @@ static int mpp_service_probe(struct platform_device *pdev)
|
||||
|
||||
kthread_init_worker(&queue->worker);
|
||||
queue->kworker_task = kthread_run(kthread_worker_fn, &queue->worker,
|
||||
"queue_work%d", i);
|
||||
"mpp_worker_%d", i);
|
||||
srv->task_queues[i] = queue;
|
||||
}
|
||||
|
||||
|
||||
@@ -725,7 +725,7 @@ static int vdpp_probe(struct platform_device *pdev)
|
||||
/* get irq */
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq,
|
||||
mpp_dev_isr_sched,
|
||||
NULL,
|
||||
IRQF_SHARED,
|
||||
dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
|
||||
@@ -926,7 +926,7 @@ static int vdpu_probe(struct platform_device *pdev)
|
||||
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq,
|
||||
mpp_dev_isr_sched,
|
||||
NULL,
|
||||
IRQF_SHARED,
|
||||
dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
|
||||
@@ -762,7 +762,7 @@ static int vdpu_probe(struct platform_device *pdev)
|
||||
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq,
|
||||
mpp_dev_isr_sched,
|
||||
NULL,
|
||||
IRQF_SHARED,
|
||||
dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
|
||||
@@ -754,7 +754,7 @@ static int vepu_probe(struct platform_device *pdev)
|
||||
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq,
|
||||
mpp_dev_isr_sched,
|
||||
NULL,
|
||||
IRQF_SHARED,
|
||||
dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
|
||||
@@ -1135,7 +1135,7 @@ static int vepu_core_probe(struct platform_device *pdev)
|
||||
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq,
|
||||
mpp_dev_isr_sched,
|
||||
NULL,
|
||||
IRQF_SHARED,
|
||||
dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
@@ -1185,7 +1185,7 @@ static int vepu_probe_default(struct platform_device *pdev)
|
||||
|
||||
ret = devm_request_threaded_irq(dev, mpp->irq,
|
||||
mpp_dev_irq,
|
||||
mpp_dev_isr_sched,
|
||||
NULL,
|
||||
IRQF_SHARED,
|
||||
dev_name(dev), mpp);
|
||||
if (ret) {
|
||||
|
||||
Reference in New Issue
Block a user