On 3/11/2024 8:34 AM, Dmitry Baryshkov wrote: > +int qcom_pdm_add_domains(const struct qcom_pdm_domain_data * const *data, size_t num_data) > +{ > + int ret; > + int i; > + > + mutex_lock(&qcom_pdm_mutex); > + > + if (qcom_pdm_server_added) { > + ret = qmi_del_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE, > + SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE); > + if (ret) > + goto err_out; > + } > + > + for (i = 0; i < num_data; i++) { > + ret = qcom_pdm_add_domain_locked(data[i]); > + if (ret) > + goto err; > + } > + > + ret = qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE, > + SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE); > + if (ret) { > + pr_err("PDM: error adding server %d\n", ret); > + goto err; > + } > + > + qcom_pdm_server_added = true; > + > + mutex_unlock(&qcom_pdm_mutex); > + > + return 0; > + > +err: > + while (--i >= 0) > + qcom_pdm_del_domain_locked(data[i]); > + > + qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE, > + SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE); > + > +err_out: > + mutex_unlock(&qcom_pdm_mutex); > + > + return ret; > +} > +EXPORT_SYMBOL_GPL(qcom_pdm_add_domains); > + > +void qcom_pdm_del_domains(const struct qcom_pdm_domain_data * const *data, size_t num_data) > +{ > + int i; > + > + mutex_lock(&qcom_pdm_mutex); > + > + if (qcom_pdm_server_added) { > + qmi_del_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE, > + SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE); > + } I am confused as to why we need to reset the qmi handle anytime qcom_pdm_del_domains or qcom_pdm_add_domains is called. Is this to trigger some kind of re-broadcast type notification to clients because the service list has been updated? My concern would be that this causes some kind of unintended side-effect on the rprocs that are not restarting. > + > + for (i = 0; i < num_data; i++) > + qcom_pdm_del_domain_locked(data[i]); > + > + qmi_add_server(&qcom_pdm_handle, SERVREG_QMI_SERVICE, > + SERVREG_QMI_VERSION, SERVREG_QMI_INSTANCE); > + qcom_pdm_server_added = true; > + > + mutex_unlock(&qcom_pdm_mutex); > +} > +EXPORT_SYMBOL_GPL(qcom_pdm_del_domains); > + > +static void qcom_pdm_get_domain_list(struct qmi_handle *qmi, > + struct sockaddr_qrtr *sq, > + struct qmi_txn *txn, > + const void *decoded) > +{ > + const struct servreg_loc_get_domain_list_req *req = decoded; > + struct servreg_loc_get_domain_list_resp *rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); > + struct qcom_pdm_service *service; > + u32 offset; > + int ret; > + > + offset = req->offset_valid ? req->offset : 0; > + > + rsp->rsp.result = QMI_RESULT_SUCCESS_V01; > + rsp->rsp.error = QMI_ERR_NONE_V01; > + > + rsp->db_revision_valid = true; > + rsp->db_revision = 1; > + > + rsp->total_domains_valid = true; > + rsp->total_domains = 0; > + > + mutex_lock(&qcom_pdm_mutex); > + > + service = qcom_pdm_find_locked(req->name); > + if (service) { > + struct qcom_pdm_domain *domain; > + > + rsp->domain_list_valid = true; > + rsp->domain_list_len = 0; > + > + list_for_each_entry(domain, &service->domains, list) { > + u32 i = rsp->total_domains++; > + > + if (i >= offset && i < SERVREG_LOC_MAX_DOMAINS) { > + u32 j = rsp->domain_list_len++; > + > + strscpy(rsp->domain_list[j].name, domain->name, > + sizeof(rsp->domain_list[i].name)); > + rsp->domain_list[j].instance_id = domain->instance_id; > + > + pr_debug("PDM: found %s / %d\n", domain->name, > + domain->instance_id); > + } > + } > + > + } > + > + mutex_unlock(&qcom_pdm_mutex); > + > + pr_debug("PDM: service '%s' offset %d returning %d domains (of %d)\n", req->name, > + req->offset_valid ? req->offset : -1, rsp->domain_list_len, rsp->total_domains); > + > + ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_GET_DOMAIN_LIST, > + 2658, > + servreg_loc_get_domain_list_resp_ei, rsp); Other QMI clients like pdr_interface have macros for the message size. Can we considering adding one instead of using 2658 directly? > + if (ret) > + pr_err("Error sending servreg response: %d\n", ret); > + > + kfree(rsp); > +} > + > +static void qcom_pdm_pfr(struct qmi_handle *qmi, > + struct sockaddr_qrtr *sq, > + struct qmi_txn *txn, > + const void *decoded) > +{ > + const struct servreg_loc_pfr_req *req = decoded; > + struct servreg_loc_pfr_resp rsp = {}; > + int ret; > + > + pr_warn_ratelimited("PDM: service '%s' crash: '%s'\n", req->service, req->reason); > + > + rsp.rsp.result = QMI_RESULT_SUCCESS_V01; > + rsp.rsp.error = QMI_ERR_NONE_V01; > + > + ret = qmi_send_response(qmi, sq, txn, SERVREG_LOC_PFR, > + SERVREG_LOC_PFR_RESP_MSG_SIZE, > + servreg_loc_pfr_resp_ei, &rsp); > + if (ret) > + pr_err("Error sending servreg response: %d\n", ret); > +} > + > diff --git a/drivers/soc/qcom/qcom_pdm_msg.h b/drivers/soc/qcom/qcom_pdm_msg.h > new file mode 100644 > index 000000000000..e576b87c67c0 > --- /dev/null > +++ b/drivers/soc/qcom/qcom_pdm_msg.h > @@ -0,0 +1,66 @@ > +// SPDX-License-Identifier: BSD-3-Clause > +/* > + * Copyright (c) 2018, Linaro Ltd. > + * Copyright (c) 2016, Bjorn Andersson > + */ > + > +#ifndef __QMI_SERVREG_LOC_H__ > +#define __QMI_SERVREG_LOC_H__ > + Should we update the header guards to reflect the new file name? > +#include <linux/types.h> > +#include <linux/soc/qcom/qmi.h> > + > +#define SERVREG_QMI_SERVICE 64 > +#define SERVREG_QMI_VERSION 257 > +#define SERVREG_QMI_INSTANCE 0 > +#define QMI_RESULT_SUCCESS 0 > +#define QMI_RESULT_FAILURE 1 > +#define QMI_ERR_NONE 0 > +#define QMI_ERR_INTERNAL 1 > +#define QMI_ERR_MALFORMED_MSG 2 I think these common QMI macro definitions are wrong. They should match what is defined in <soc/qcom/qmi.h>. This is a bug in the userspace pd-mapper header as well. > +#endif > diff --git a/include/linux/soc/qcom/pd_mapper.h b/include/linux/soc/qcom/pd_mapper.h > new file mode 100644 > index 000000000000..86438b7ca6fe > --- /dev/null > +++ b/include/linux/soc/qcom/pd_mapper.h > @@ -0,0 +1,39 @@ > +/* SPDX-License-Identifier: GPL-2.0 */ > +/* > + * Qualcomm Protection Domain mapper > + * > + * Copyright (c) 2023 Linaro Ltd. > + */ > +#ifndef __QCOM_PD_MAPPER__ > +#define __QCOM_PD_MAPPER__ > + > +struct qcom_pdm_domain_data { > + const char *domain; > + u32 instance_id; > + /* NULL-terminated array */ > + const char * services[]; s/char * services[]/char *services[]/