Hi, Jason: On Fri, 2023-12-22 at 12:52 +0800, Jason-JH.Lin wrote: > To support secure video path feature, GCE have to read/write > registgers > in the secure world. GCE will enable the secure access permission to > the > HW who wants to access the secure content buffer. > > Add CMDQ secure mailbox driver to make CMDQ client user is able to > sending their HW settings to the secure world. So that GCE can > execute > all instructions to configure HW in the secure world. > > Signed-off-by: Jason-JH.Lin <jason-jh.lin@xxxxxxxxxxxx> > --- [snip] > + > +static void cmdq_sec_task_exec_work(struct work_struct *work_item) > +{ > + struct cmdq_sec_task *sec_task = container_of(work_item, > + struct > cmdq_sec_task, exec_work); > + struct cmdq_sec_thread *sec_thread = container_of(sec_task- > >task.thread, > + struct > cmdq_sec_thread, thread); > + struct cmdq_sec *cmdq = container_of(sec_thread->thread.chan- > >mbox, > + struct cmdq_sec, mbox); > + struct cmdq_sec_data *data; > + unsigned long flags; > + int err; > + > + dev_dbg(cmdq->mbox.dev, "%s gce:%#lx sec_task:%p pkt:%p > thread:%u", > + __func__, (unsigned long)cmdq->base_pa, > + sec_task, sec_task->task.pkt, sec_thread->idx); > + > + if (!sec_task->task.pkt->sec_data) { > + dev_err(cmdq->mbox.dev, "pkt:%p without sec_data", > sec_task->task.pkt); > + return; > + } > + data = (struct cmdq_sec_data *)sec_task->task.pkt->sec_data; > + > + mutex_lock(&cmdq->exec_lock); > + > + spin_lock_irqsave(&sec_thread->thread.chan->lock, flags); > + if (!sec_thread->task_cnt) { > + mod_timer(&sec_thread->timeout, jiffies + > + msecs_to_jiffies(sec_thread->timeout_ms)); > + sec_thread->wait_cookie = 1; > + sec_thread->next_cookie = 1; > + sec_thread->task_cnt = 0; > + __raw_writel(0, (void __iomem *)cmdq->shared_mem->va + > + CMDQ_SEC_SHARED_THR_CNT_OFFSET + > sec_thread->idx * sizeof(u32)); > + } > + > + sec_task->reset_exec = sec_thread->task_cnt ? false : true; > + sec_task->wait_cookie = sec_thread->next_cookie; > + sec_thread->next_cookie = (sec_thread->next_cookie + 1) % > CMDQ_MAX_COOKIE_VALUE; > + list_add_tail(&sec_task->task.list_entry, &sec_thread- > >thread.task_busy_list); > + sec_thread->task_cnt += 1; > + spin_unlock_irqrestore(&sec_thread->thread.chan->lock, flags); > + sec_task->trigger = sched_clock(); > + > + if (!atomic_cmpxchg(&cmdq_path_res, 0, 1)) { > + err = cmdq_sec_task_submit(cmdq, NULL, > CMD_CMDQ_IWC_PATH_RES_ALLOCATE, > + CMDQ_INVALID_THREAD, NULL); > + if (err) { > + atomic_set(&cmdq_path_res, 0); > + goto task_end; > + } > + } > + > + if (sec_thread->task_cnt > CMDQ_MAX_TASK_IN_SECURE_THREAD) { > + dev_err(cmdq->mbox.dev, "task_cnt:%u cannot more than > %u sec_task:%p thread:%u", > + sec_thread->task_cnt, > CMDQ_MAX_TASK_IN_SECURE_THREAD, > + sec_task, sec_thread->idx); > + err = -EMSGSIZE; > + goto task_end; > + } > + > + err = cmdq_sec_task_submit(cmdq, sec_task, > CMD_CMDQ_IWC_SUBMIT_TASK, > + sec_thread->idx, NULL); > + if (err) > + dev_err(cmdq->mbox.dev, "cmdq_sec_task_submit err:%d > sec_task:%p thread:%u", > + err, sec_task, sec_thread->idx); > + > +task_end: > + if (err) { > + struct cmdq_cb_data cb_data; > + > + cb_data.sta = err; > + cb_data.pkt = sec_task->task.pkt; > + mbox_chan_received_data(sec_thread->thread.chan, > &cb_data); > + > + spin_lock_irqsave(&sec_thread->thread.chan->lock, > flags); > + if (!sec_thread->task_cnt) > + dev_err(cmdq->mbox.dev, "thread:%u task_cnt:%u > cannot below zero", > + sec_thread->idx, sec_thread->task_cnt); > + else > + sec_thread->task_cnt -= 1; > + > + sec_thread->next_cookie = (sec_thread->next_cookie - 1 > + > + CMDQ_MAX_COOKIE_VALUE) % CMDQ_MAX_COOKIE_VALUE; > + list_del(&sec_task->task.list_entry); > + dev_dbg(cmdq->mbox.dev, "gce:%#lx err:%d sec_task:%p > pkt:%p", > + (unsigned long)cmdq->base_pa, err, sec_task, > sec_task->task.pkt); > + dev_dbg(cmdq->mbox.dev, "thread:%u task_cnt:%u > wait_cookie:%u next_cookie:%u", > + sec_thread->idx, sec_thread->task_cnt, > + sec_thread->wait_cookie, sec_thread- > >next_cookie); > + spin_unlock_irqrestore(&sec_thread->thread.chan->lock, > flags); > + > + kfree(sec_task); > + } > + > + mutex_unlock(&cmdq->exec_lock); > +} > + > +static int cmdq_sec_mbox_send_data(struct mbox_chan *chan, void > *data) > +{ > + struct cmdq_pkt *pkt = (struct cmdq_pkt *)data; > + struct cmdq_sec_data *sec_data = (struct cmdq_sec_data *)pkt- > >sec_data; > + struct cmdq_thread *thread = (struct cmdq_thread *)chan- > >con_priv; > + struct cmdq_sec_thread *sec_thread = container_of(thread, > struct cmdq_sec_thread, thread); > + struct cmdq_sec_task *sec_task; > + > + if (!sec_data) > + return -EINVAL; > + > + sec_task = kzalloc(sizeof(*sec_task), GFP_ATOMIC); > + if (!sec_task) > + return -ENOMEM; > + > + sec_task->task.pkt = pkt; > + sec_task->task.thread = thread; > + sec_task->scenario = sec_data->scenario; > + sec_task->engine_flag = sec_data->engs_need_dapc | sec_data- > >engs_need_sec_port; > + > + INIT_WORK(&sec_task->exec_work, cmdq_sec_task_exec_work); > + queue_work(sec_thread->task_exec_wq, &sec_task->exec_work); It's not necessary to queue a work here. Squash cmdq_sec_task_exec_work() into this function. Regards, CK > + return 0; > +} > +