Re: [PATCH V4 2/2] pwm: Add GPIO PWM driver

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Uwe,

Am 05.02.24 um 11:09 schrieb Uwe Kleine-König:
Hello,

On Sun, Feb 04, 2024 at 11:08:51PM +0100, Stefan Wahren wrote:
+static enum hrtimer_restart pwm_gpio_timer(struct hrtimer *gpio_timer)
+{
+	struct pwm_gpio *gpwm = container_of(gpio_timer, struct pwm_gpio,
+					     gpio_timer);
+	unsigned long next_toggle;
+	unsigned long flags;
+	bool new_level;
+
+	spin_lock_irqsave(&gpwm->lock, flags);
+
+	/* Apply new state at end of current period */
+	if (!gpwm->level && gpwm->changing) {
+		gpwm->changing = false;
+		gpwm->state = gpwm->next_state;
+		new_level = !!gpwm->state.duty_cycle;
+	} else {
+		new_level = !gpwm->level;
+	}
+
+	next_toggle = pwm_gpio_toggle(gpwm, new_level);
+	if (next_toggle) {
+		hrtimer_forward(gpio_timer, hrtimer_get_expires(gpio_timer),
+				ns_to_ktime(next_toggle));
How does this work in relation to hrtimer_resolution? If the resolution
is (say) 300 and next_toggle is 2000. Does the trigger trigger at
hrtimer_get_expires(...) + 1800, or at ... + 2100?

If you assume we have period = 6000 and duty_cycle = 2000, the delta to
forward the driver alternates between 1800 and 3900 (if rounding down)
or between 2100 and 4200 if rounding up. Both is not optimal.

Ideally you'd round down the active phase (i.e. pick 1800) and for the
inactive phase you'd use rounddown(period) - rounddown(duty_cycle) which
gives 4200 here. Does this make sense?
oh dear, looks like a can of worms. I will look into it. Btw according
to multi_v7_defconfig the hrtimer_resolution on the Pi is 1 ns.

Does it make sense to log the hrtimer_resolution e.g. at probe?

+	}
+
+	spin_unlock_irqrestore(&gpwm->lock, flags);
+
+	return next_toggle ? HRTIMER_RESTART : HRTIMER_NORESTART;
+}
+
+static int pwm_gpio_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+			  const struct pwm_state *state)
+{
+	struct pwm_gpio *gpwm = container_of(chip, struct pwm_gpio, chip);
+	bool invert = state->polarity == PWM_POLARITY_INVERSED;
+	unsigned long flags;
+
+	if (state->duty_cycle && state->duty_cycle < hrtimer_resolution)
+		return -EINVAL;
+
+	if (state->duty_cycle != state->period &&
+	    (state->period - state->duty_cycle < hrtimer_resolution))
+		return -EINVAL;
If you assume that hrtimer_resolution = 300 again, you don't want to
refuse

	.duty_cycle = 200
	.period = 200

do you?
Actually i had rejected it. Yes, this specific corner case does work
with such a resolution. But if the user want a steady level, the user
would use a plain GPIO not a PWM. As soon the duty cycle get lower this
would be rejected and as a user i would be confused.

Another issue here, is that we don't have a good interface to tell the
limitations.
I think you want:

	mininterval = min(state->duty_cycle, state->period - state->duty_cycle);
	if (mininterval && mininterval < hrtimer_resolution)
		return -EINVAL;

to catch both cases in a single check.

+	if (!state->enabled) {
+		hrtimer_cancel(&gpwm->gpio_timer);
+	} else if (!gpwm->running) {
+		/*
+		 * This just enables the output, but pwm_gpio_toggle()
+		 * really starts the duty cycle.
+		 */
+		int ret = gpiod_direction_output(gpwm->gpio, invert);
+
+		if (ret)
+			return ret;
+	}
+
+	spin_lock_irqsave(&gpwm->lock, flags);
+
+	if (!state->enabled) {
+		gpwm->state = *state;
+		gpwm->running = false;
+		gpwm->changing = false;
+
+		gpiod_set_value(gpwm->gpio, invert);
+	} else if (gpwm->running) {
+		gpwm->next_state = *state;
+		gpwm->changing = true;
+	} else {
+		unsigned long next_toggle;
+
+		gpwm->state = *state;
+		gpwm->changing = false;
+
+		next_toggle = pwm_gpio_toggle(gpwm, !!state->duty_cycle);
+		if (next_toggle) {
+			hrtimer_start(&gpwm->gpio_timer, next_toggle,
+				      HRTIMER_MODE_REL);
+		}
The curly braces can be dropped here and in a few more locations.

+	}
+
+	spin_unlock_irqrestore(&gpwm->lock, flags);
+
+	return 0;
+}
+
+static int pwm_gpio_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+			       struct pwm_state *state)
+{
+	struct pwm_gpio *gpwm = container_of(chip, struct pwm_gpio, chip);
+	unsigned long flags;
+
+	spin_lock_irqsave(&gpwm->lock, flags);
+
+	if (gpwm->changing)
+		*state = gpwm->next_state;
+	else
+		*state = gpwm->state;
+
+	spin_unlock_irqrestore(&gpwm->lock, flags);
+
+	return 0;
+}
+
+static const struct pwm_ops pwm_gpio_ops = {
+	.apply = pwm_gpio_apply,
+	.get_state = pwm_gpio_get_state,
+};
+
+static int pwm_gpio_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct pwm_gpio *gpwm;
+	int ret;
+
+	gpwm = devm_kzalloc(dev, sizeof(*gpwm), GFP_KERNEL);
+	if (!gpwm)
+		return -ENOMEM;
+
+	spin_lock_init(&gpwm->lock);
+
+	gpwm->gpio = devm_gpiod_get(dev, NULL, GPIOD_ASIS);
+	if (IS_ERR(gpwm->gpio)) {
+		return dev_err_probe(dev, PTR_ERR(gpwm->gpio),
+				     "could not get gpio\n");
+	}
+
+	if (gpiod_cansleep(gpwm->gpio)) {
+		return dev_err_probe(dev, -EINVAL,
+				     "sleeping GPIO %d not supported\n",
+				     desc_to_gpio(gpwm->gpio));
+	}
Is it still state of the art to add gpio numbers to error messages?
I was unsure how to handle this user-friendly. Just simply logging
"sleeping GPIO not supported" doesn't provide a reference on the
affected GPIO. GPIO names are optional, so maybe empty.

I'm open to suggestions :-)

Best regards

+	gpwm->chip.dev = dev;
+	gpwm->chip.ops = &pwm_gpio_ops;
+	gpwm->chip.npwm = 1;
+	gpwm->chip.atomic = true;
+
+	hrtimer_init(&gpwm->gpio_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	gpwm->gpio_timer.function = pwm_gpio_timer;
+
+	ret = pwmchip_add(&gpwm->chip);
+	if (ret < 0)
+		return dev_err_probe(dev, ret, "could not add pwmchip\n");
+
+	platform_set_drvdata(pdev, gpwm);
+
+	return 0;
+}
Best regards
Uwe







[Index of Archives]     [Linux SPI]     [Linux Kernel]     [Linux ARM (vger)]     [Linux ARM MSM]     [Linux Omap]     [Linux Arm]     [Linux Tegra]     [Fedora ARM]     [Linux for Samsung SOC]     [eCos]     [Linux Fastboot]     [Gcc Help]     [Git]     [DCCP]     [IETF Announce]     [Security]     [Linux MIPS]     [Yosemite Campsites]

  Powered by Linux