[PATCH 5/6] acpi/cppc: Add support for optional CPPC registers

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Yazen Ghannam <Yazen.Ghannam@xxxxxxx>

Newer AMD processors support a subset of the optional CPPC registers.
Create show, store and helper routines for supported CPPC registers.

Signed-off-by: Yazen Ghannam <Yazen.Ghannam@xxxxxxx>
[ carved out into a patch, cleaned up, productized ]
Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@xxxxxxx>
---
 drivers/acpi/cppc_acpi.c | 119 ++++++++++++++++++++++++++++++++++++---
 include/acpi/cppc_acpi.h |   3 +
 2 files changed, 114 insertions(+), 8 deletions(-)

diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 7cb23b369fc7..f8827ba7015d 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -196,6 +196,17 @@ show_cppc_data_ro(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
 
 show_cppc_data_ro(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
 show_cppc_data_ro(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
+show_cppc_data(cppc_get_perf, cppc_perf_ctrls, desired_perf);
+show_cppc_data(cppc_get_perf, cppc_perf_ctrls, max_perf);
+show_cppc_data(cppc_get_perf, cppc_perf_ctrls, min_perf);
+show_cppc_data(cppc_get_perf, cppc_perf_ctrls, energy_perf);
+show_cppc_data(cppc_get_perf, cppc_perf_ctrls, auto_sel_enable);
+
+store_cppc_data_rw(cppc_perf_ctrls, desired_perf, DESIRED_PERF);
+store_cppc_data_rw(cppc_perf_ctrls, max_perf, MAX_PERF);
+store_cppc_data_rw(cppc_perf_ctrls, min_perf, MIN_PERF);
+store_cppc_data_rw(cppc_perf_ctrls, energy_perf, ENERGY_PERF);
+store_cppc_data_rw(cppc_perf_ctrls, auto_sel_enable, AUTO_SEL_ENABLE);
 
 static ssize_t show_feedback_ctrs(struct kobject *kobj,
 		struct attribute *attr, char *buf)
@@ -768,6 +779,21 @@ int set_cppc_attrs(struct cpc_desc *cpc, int entries)
 		case CTR_WRAP_TIME:
 			cppc_attrs[attr_i++] = &wraparound_time.attr;
 			break;
+		case MAX_PERF:
+			cppc_attrs[attr_i++] = &max_perf.attr;
+			break;
+		case MIN_PERF:
+			cppc_attrs[attr_i++] = &min_perf.attr;
+			break;
+		case ENERGY_PERF:
+			cppc_attrs[attr_i++] = &energy_perf.attr;
+			break;
+		case AUTO_SEL_ENABLE:
+			cppc_attrs[attr_i++] = &auto_sel_enable.attr;
+			break;
+		case DESIRED_PERF:
+			cppc_attrs[attr_i++] = &desired_perf.attr;
+			break;
 		}
 	}
 
@@ -1348,7 +1374,7 @@ int cppc_set_reg(int cpu, struct cppc_perf_ctrls *perf_ctrls,
 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
 	struct cppc_pcc_data *pcc_ss_data = NULL;
 	struct cpc_register_resource *reg;
-	int ret = 0;
+	int ret = 0, regs_in_pcc = 0;
 	u32 value;
 
 	if (!cpc_desc) {
@@ -1360,6 +1386,18 @@ int cppc_set_reg(int cpu, struct cppc_perf_ctrls *perf_ctrls,
 	case DESIRED_PERF:
 		value = perf_ctrls->desired_perf;
 		break;
+	case MAX_PERF:
+		value = perf_ctrls->max_perf;
+		break;
+	case MIN_PERF:
+		value = perf_ctrls->min_perf;
+		break;
+	case ENERGY_PERF:
+		value = perf_ctrls->energy_perf;
+		break;
+	case AUTO_SEL_ENABLE:
+		value = perf_ctrls->auto_sel_enable;
+		break;
 	default:
 		pr_debug("CPC register index #%d not writeable\n", reg_idx);
 		return -EINVAL;
@@ -1375,6 +1413,7 @@ int cppc_set_reg(int cpu, struct cppc_perf_ctrls *perf_ctrls,
 	 * achieve that goal here
 	 */
 	if (CPC_IN_PCC(reg)) {
+		regs_in_pcc = 1;
 		if (pcc_ss_id < 0) {
 			pr_debug("Invalid pcc_ss_id\n");
 			return -ENODEV;
@@ -1397,13 +1436,10 @@ int cppc_set_reg(int cpu, struct cppc_perf_ctrls *perf_ctrls,
 		cpc_desc->write_cmd_status = 0;
 	}
 
-	/*
-	 * Skip writing MIN/MAX until Linux knows how to come up with
-	 * useful values.
-	 */
-	cpc_write(cpu, reg, value);
+	if (CPC_SUPPORTED(reg))
+		cpc_write(cpu, reg, value);
 
-	if (CPC_IN_PCC(reg))
+	if (regs_in_pcc)
 		up_read(&pcc_ss_data->pcc_lock);	/* END Phase-I */
 	/*
 	 * This is Phase-II where we transfer the ownership of PCC to Platform
@@ -1451,7 +1487,7 @@ int cppc_set_reg(int cpu, struct cppc_perf_ctrls *perf_ctrls,
 	 * case during a CMD_READ and if there are pending writes it delivers
 	 * the write command before servicing the read command
 	 */
-	if (CPC_IN_PCC(reg)) {
+	if (regs_in_pcc) {
 		if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
 			/* Update only if there are pending write commands */
 			if (pcc_ss_data->pending_pcc_write_cmd)
@@ -1469,6 +1505,73 @@ int cppc_set_reg(int cpu, struct cppc_perf_ctrls *perf_ctrls,
 }
 EXPORT_SYMBOL_GPL(cppc_set_reg);
 
+int cppc_get_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
+{
+	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+	struct cpc_register_resource *desired_reg, *max_reg, *min_reg;
+	struct cpc_register_resource *energy_reg, *auto_sel_enable_reg;
+	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+	u64 desired, max, min, energy, auto_sel_enable;
+	struct cppc_pcc_data *pcc_ss_data = NULL;
+	int ret = 0, regs_in_pcc = 0;
+
+	if (!cpc_desc) {
+		pr_debug("No CPC descriptor for CPU: %d\n", cpu);
+		return -ENODEV;
+	}
+
+	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
+	max_reg = &cpc_desc->cpc_regs[MAX_PERF];
+	min_reg = &cpc_desc->cpc_regs[MIN_PERF];
+	energy_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
+	auto_sel_enable_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
+
+	if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(max_reg) ||
+	    CPC_IN_PCC(min_reg) || CPC_IN_PCC(energy_reg) ||
+	    CPC_IN_PCC(auto_sel_enable_reg)) {
+		pcc_ss_data = pcc_data[pcc_ss_id];
+		down_write(&pcc_ss_data->pcc_lock);
+		regs_in_pcc = 1;
+
+		/*Ring doorbell once to update PCC subspace */
+		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
+			ret = -EIO;
+			goto out_err;
+		}
+	}
+
+	/* desired_perf is the only mandatory value in perf_ctrls */
+	if (cpc_read(cpu, desired_reg, &desired))
+		ret = -EFAULT;
+
+	if (CPC_SUPPORTED(max_reg) && cpc_read(cpu, max_reg, &max))
+		ret = -EFAULT;
+
+	if (CPC_SUPPORTED(min_reg) && cpc_read(cpu, min_reg, &min))
+		ret = -EFAULT;
+
+	if (CPC_SUPPORTED(energy_reg) && cpc_read(cpu, energy_reg, &energy))
+		ret = -EFAULT;
+
+	if (CPC_SUPPORTED(auto_sel_enable_reg) &&
+	    cpc_read(cpu, auto_sel_enable_reg, &auto_sel_enable))
+		ret = -EFAULT;
+
+	if (!ret) {
+		perf_ctrls->desired_perf = desired;
+		perf_ctrls->max_perf = max;
+		perf_ctrls->min_perf = min;
+		perf_ctrls->energy_perf = energy;
+		perf_ctrls->auto_sel_enable = auto_sel_enable;
+	}
+
+out_err:
+	if (regs_in_pcc)
+		up_write(&pcc_ss_data->pcc_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cppc_get_perf);
+
 /**
  * cppc_get_transition_latency - returns frequency transition latency in ns
  *
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index ba3b3fb64572..6f651235933c 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -117,6 +117,8 @@ struct cppc_perf_ctrls {
 	u32 max_perf;
 	u32 min_perf;
 	u32 desired_perf;
+	u32 auto_sel_enable;
+	u32 energy_perf;
 };
 
 struct cppc_perf_fb_ctrs {
@@ -140,6 +142,7 @@ struct cppc_cpudata {
 extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
 extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
 extern int cppc_set_reg(int cpu, struct cppc_perf_ctrls *perf_ctrls, enum cppc_regs reg_idx);
+extern int cppc_get_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
 extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
 extern int acpi_get_psd_map(struct cppc_cpudata **);
 extern unsigned int cppc_get_transition_latency(int cpu);
-- 
2.17.1





[Index of Archives]     [Linux IBM ACPI]     [Linux Power Management]     [Linux Kernel]     [Linux Laptop]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]     [Linux Resources]

  Powered by Linux