+ clockevents-add-broadcast-support.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     clockevents: Add broadcast support
has been added to the -mm tree.  Its filename is
     clockevents-add-broadcast-support.patch

See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find
out what to do about this

------------------------------------------------------
Subject: clockevents: Add broadcast support


x86 needs timer broadcasting for C-States which stop the local APIC timer
Add this functionality to clockevents, so highres/dyntick timers can work
on such systems.

Signed-of-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>

Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 include/linux/clockchips.h |   21 +++
 kernel/time/clockevents.c  |  201 +++++++++++++++++++++++++++++++++--
 2 files changed, 213 insertions(+), 9 deletions(-)

diff -puN include/linux/clockchips.h~clockevents-add-broadcast-support include/linux/clockchips.h
--- a/include/linux/clockchips.h~clockevents-add-broadcast-support
+++ a/include/linux/clockchips.h
@@ -114,9 +114,30 @@ extern int clockevents_set_next_event(kt
 extern int clockevents_next_event_available(void);
 extern void clockevents_resume_events(void);
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+extern void clockevents_set_broadcast(struct clock_event_device *evt,
+				      int broadcast);
+extern void clockevents_set_global_broadcast(struct clock_event_device *evt,
+					     int broadcast);
+extern int clockevents_register_broadcast(void (*fun)(cpumask_t *mask));
 #else
+static inline void clockevents_set_broadcast(struct clock_event_device *evt,
+					     int broadcast)
+{
+}
+#endif
+
+#else
+
 # define clockevents_init()		do { } while(0)
 # define clockevents_resume_events()	do { } while(0)
+
+struct clock_event_device;
+static inline void clockevents_set_broadcast(struct clock_event_device *evt,
+					     int broadcast)
+{
+}
+
 #endif
 
 #endif
diff -puN kernel/time/clockevents.c~clockevents-add-broadcast-support kernel/time/clockevents.c
--- a/kernel/time/clockevents.c~clockevents-add-broadcast-support
+++ a/kernel/time/clockevents.c
@@ -49,6 +49,7 @@ struct local_events {
 	int installed;
 	struct event_descr events[MAX_CLOCK_EVENTS];
 	struct clock_event_device *nextevt;
+	ktime_t	expires_next;
 };
 
 /* Variables related to the global event device */
@@ -66,6 +67,12 @@ static DEFINE_SPINLOCK(events_lock);
 /* Variables related to the per cpu local event devices */
 static DEFINE_PER_CPU(struct local_events, local_eventdevices);
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+static void clockevents_check_broadcast(struct event_descr *descr);
+#else
+static inline void clockevents_check_broadcast(struct event_descr *descr) { }
+#endif
+
 /*
  * Math helper. Convert a latch value (device ticks) to nanoseconds
  */
@@ -301,6 +308,7 @@ static void recalc_active_event(struct e
 		       descr->event->name);
 	}
 	descr->real_caps = caps;
+	clockevents_check_broadcast(descr);
 }
 
 /*
@@ -464,6 +472,24 @@ int clockevents_init_next_event(void)
 	return ret;
 }
 
+/*
+ * Reprogram the clock event device. Internal helper function
+ */
+static void do_clockevents_set_next_event(struct clock_event_device *nextevt,
+					 int64_t delta)
+{
+	unsigned long long clc;
+
+	if (delta > nextevt->max_delta_ns)
+		delta = nextevt->max_delta_ns;
+	if (delta < nextevt->min_delta_ns)
+		delta = nextevt->min_delta_ns;
+
+	clc = delta * nextevt->mult;
+	clc >>= nextevt->shift;
+	nextevt->set_next_event((unsigned long)clc, nextevt);
+}
+
 /**
  * clockevents_set_next_event - Reprogram the clock event device.
  * @expires:	absolute expiry time (monotonic clock)
@@ -472,29 +498,186 @@ int clockevents_init_next_event(void)
  *
  * Returns 0 on success, -ETIME when the event is in the past and force is not
  * set.
+ * Called with interrupts disabled.
  */
 int clockevents_set_next_event(ktime_t expires, int force)
 {
 	struct local_events *devices = &__get_cpu_var(local_eventdevices);
 	int64_t delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
 	struct clock_event_device *nextevt = devices->nextevt;
-	unsigned long long clc;
 
-	if (delta <= 0 && !force)
+	if (delta <= 0 && !force) {
+		devices->expires_next.tv64 = KTIME_MAX;
 		return -ETIME;
+	}
 
-	if (delta > nextevt->max_delta_ns)
-		delta = nextevt->max_delta_ns;
-	if (delta < nextevt->min_delta_ns)
-		delta = nextevt->min_delta_ns;
+	devices->expires_next = expires;
 
-	clc = delta * nextevt->mult;
-	clc >>= nextevt->shift;
-	nextevt->set_next_event((unsigned long)clc, devices->nextevt);
+	do_clockevents_set_next_event(nextevt, delta);
 
 	return 0;
 }
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+
+static cpumask_t global_event_broadcast;
+static cpumask_t local_event_broadcast;
+static void (*broadcast_function)(cpumask_t *mask);
+static void (*global_event_handler)(struct pt_regs *regs);
+
+/**
+ * clockevents_set_broadcast - switch next event device from/to broadcast mode
+ *
+ * Called, when the PM code enters a state, where the next event device is
+ * switched off.
+ *
+ * Called with interrupts disabled !
+ */
+void clockevents_set_broadcast(struct clock_event_device *evt, int broadcast)
+{
+	struct local_events *devices = &__get_cpu_var(local_eventdevices);
+	struct clock_event_device *glblevt = global_eventdevice.event;
+	int cpu = smp_processor_id();
+	ktime_t expires = { .tv64 = KTIME_MAX };
+	int64_t delta;
+	unsigned long flags;
+
+	if (devices->nextevt != evt)
+		return;
+
+	spin_lock_irqsave(&events_lock, flags);
+
+	if (broadcast) {
+		cpu_set(cpu, local_event_broadcast);
+		evt->set_mode(CLOCK_EVT_SHUTDOWN, evt);
+	} else {
+		cpu_clear(cpu, local_event_broadcast);
+		evt->set_mode(CLOCK_EVT_ONESHOT, evt);
+	}
+
+	/* Reprogram the broadcast device */
+	for (cpu = first_cpu(local_event_broadcast); cpu != NR_CPUS;
+	     cpu = next_cpu(cpu, local_event_broadcast)) {
+		devices = &per_cpu(local_eventdevices, cpu);
+		if (devices->expires_next.tv64 < expires.tv64)
+			expires = devices->expires_next;
+	}
+
+	if (expires.tv64 != KTIME_MAX) {
+		delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
+		do_clockevents_set_next_event(glblevt, delta);
+	}
+
+	spin_unlock_irqrestore(&events_lock, flags);
+}
+
+/**
+ * clockevents_set_global_broadcast - mark event device for global broadcast
+ *
+ * Switch an event device from / to global broadcasting. This is only relevant
+ * when the system has not switched to high resolution mode.
+ */
+void clockevents_set_global_broadcast(struct clock_event_device *evt,
+				      int broadcast)
+{
+	struct local_events *devices = &__get_cpu_var(local_eventdevices);
+	int cpu = smp_processor_id();
+	unsigned long flags;
+
+	spin_lock_irqsave(&events_lock, flags);
+
+	if (broadcast) {
+		if (!cpu_isset(cpu, global_event_broadcast)) {
+			cpu_set(cpu, global_event_broadcast);
+			if (devices->nextevt != evt)
+				evt->set_mode(CLOCK_EVT_SHUTDOWN, evt);
+		}
+	} else {
+		if (cpu_isset(cpu, global_event_broadcast)) {
+			cpu_clear(cpu, global_event_broadcast);
+			if (devices->nextevt != evt)
+				evt->set_mode(CLOCK_EVT_PERIODIC, evt);
+		}
+	}
+
+	spin_unlock_irqrestore(&events_lock, flags);
+}
+
+/*
+ * Broadcast tick handler:
+ */
+static void handle_tick_broadcast(struct pt_regs *regs)
+{
+	/* Call the original handler global tick handler */
+	global_event_handler(regs);
+	broadcast_function(&global_event_broadcast);
+}
+
+/*
+ * Broadcast next event handler:
+ */
+static void handle_nextevt_broadcast(struct pt_regs *regs)
+{
+	struct local_events *devices;
+	ktime_t now = ktime_get();
+	cpumask_t mask;
+	int cpu;
+
+	spin_lock(&events_lock);
+	/* Find all expired events */
+	for (cpu = first_cpu(local_event_broadcast); cpu != NR_CPUS;
+	     cpu = next_cpu(cpu, local_event_broadcast)) {
+		devices = &per_cpu(local_eventdevices, cpu);
+		if (devices->expires_next.tv64 <= now.tv64)
+			cpu_set(cpu, mask);
+	}
+	spin_unlock(&events_lock);
+	/* Wakeup the cpus which have an expired event */
+	broadcast_function(&mask);
+}
+
+/*
+ * Check, if the reconfigured event device is the global broadcast device.
+ *
+ * Called with interrupts disabled and events_lock held
+ */
+static void clockevents_check_broadcast(struct event_descr *descr)
+{
+	if (descr != &global_eventdevice)
+		return;
+
+	/* The device was disabled. switch it to oneshot mode instead */
+	if (!descr->real_caps) {
+		global_event_handler = NULL;
+		descr->event->set_mode(CLOCK_EVT_ONESHOT, descr->event);
+		descr->event->event_handler = handle_nextevt_broadcast;
+	} else {
+		global_event_handler = descr->event->event_handler;
+		descr->event->event_handler = handle_tick_broadcast;
+	}
+
+}
+
+/*
+ * Install a broadcast function
+ */
+int clockevents_register_broadcast(void (*fun)(cpumask_t *mask))
+{
+	unsigned long flags;
+
+	if (broadcast_function)
+		return -EBUSY;
+
+	spin_lock_irqsave(&events_lock, flags);
+	broadcast_function = fun;
+	clockevents_check_broadcast(&global_eventdevice);
+	spin_unlock_irqrestore(&events_lock, flags);
+
+	return 0;
+}
+
+#endif
+
 /*
  * Resume the cpu local clock events
  */
_

Patches currently in -mm which might be from tglx@xxxxxxxxxxxxx are

printk-timed-ratelimit.patch
schedule-removal-of-futex_fd.patch
setup_irq-better-mismatch-debugging.patch
gtod-exponential-update_wall_time.patch
gtod-persistent-clock-support-core.patch
gtod-persistent-clock-support-i386.patch
time-uninline-jiffiesh.patch
time-uninline-jiffiesh-fix.patch
time-fix-msecs_to_jiffies-bug.patch
time-fix-timeout-overflow.patch
cleanup-uninline-irq_enter-and-move-it-into-a-function.patch
dynticks-extend-next_timer_interrupt-to-use-a-reference-jiffie.patch
dynticks-extend-next_timer_interrupt-to-use-a-reference-jiffie-remove-incorrect-warning-in-kernel-timerc.patch
hrtimers-namespace-and-enum-cleanup.patch
hrtimers-clean-up-locking.patch
hrtimers-state-tracking.patch
hrtimers-clean-up-callback-tracking.patch
hrtimers-move-and-add-documentation.patch
clockevents-core.patch
clockevents-drivers-for-i386.patch
high-res-timers-core.patch
gtod-mark-tsc-unusable-for-highres-timers.patch
dynticks-core.patch
dynticks-add-nohz-stats-to-proc-stat.patch
dynticks-i386-arch-code.patch
high-res-timers-dynticks-enable-i386-support.patch
debugging-feature-timer-stats.patch
highres-timer-core-fix-status-check.patch
highres-timer-core-fix-commandline-setup.patch
clockevents-smp-on-up-features.patch
highres-depend-on-clockevents.patch
i386-apic-cleanup.patch
pm-timer-allow-early-access.patch
i386-lapic-timer-calibration.patch
clockevents-add-broadcast-support.patch
acpi-include-apic-h.patch
acpi-keep-track-of-timer-broadcast.patch
i386-apic-timer-use-clockevents-broadcast.patch
acpi-verify-lapic-timer.patch
round_jiffies-infrastructure.patch
round_jiffies-infrastructure-fix.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux