mirror of
https://github.com/Dasharo/zephyr.git
synced 2026-03-06 14:57:20 -08:00
tests: kernel: timer_behavior: improve timer_tick_train output
Print the "perfect" reference period for easier evaluation. Suggest a remedy to the missed ticks problem. Still, that wasn't satisfactory. Implemented a count of missed ticks to get to the bottom of this issue. Found that missed ticks always came to a perfect count of 40. Incidentally, the busy loop prints a line every 250 ms and the test spans 10 seconds. There are no such coincidences. Turns out that CONFIG_PRINTK_SYNC was set by default. This disables IRQs for the serial output duration, which can be quite long at 115200 bauds. Given a 60-ish character line length, this represents more than 5 ms of no IRQ servicing during a timer latency measurement test which is bad. So make sure CONFIG_PRINTK_SYNC=n for proper statistics. Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
committed by
Anas Nashif
parent
0dec4cf927
commit
b60cb9cc80
@@ -1,3 +1,8 @@
|
||||
CONFIG_ZTEST=y
|
||||
CONFIG_ZTEST_NEW_API=y
|
||||
CONFIG_CBPRINTF_FP_SUPPORT=y
|
||||
|
||||
# Make sure this is off. Otherwise a single 60-character line at
|
||||
# 115200 bauds would grab the printk lock and disable IRQs for more
|
||||
# than 5 ms screwing up timer latency statistics.
|
||||
CONFIG_PRINTK_SYNC=n
|
||||
|
||||
@@ -14,10 +14,11 @@
|
||||
#define MAX_CALLBACKS (CONFIG_SYS_CLOCK_TICKS_PER_SEC*TEST_SECONDS)/TIMERS
|
||||
|
||||
struct timer_wrapper {
|
||||
uint32_t callbacks;
|
||||
struct k_timer tm;
|
||||
uint32_t last_isr;
|
||||
int64_t last_scheduled;
|
||||
struct k_timer tm;
|
||||
uint32_t callbacks;
|
||||
uint32_t late_callbacks;
|
||||
uint32_t last_isr;
|
||||
uint32_t max_delta;
|
||||
};
|
||||
|
||||
@@ -35,6 +36,9 @@ void tm_fn(struct k_timer *tm)
|
||||
uint32_t delta = now - tm_wrap->last_isr;
|
||||
|
||||
tm_wrap->max_delta = delta > tm_wrap->max_delta ? delta : tm_wrap->max_delta;
|
||||
if (delta >= k_ticks_to_cyc_floor32(TIMERS + 1)) {
|
||||
tm_wrap->late_callbacks++;
|
||||
}
|
||||
}
|
||||
tm_wrap->last_isr = now;
|
||||
tm_wrap->callbacks++;
|
||||
@@ -122,17 +126,33 @@ ZTEST(timer_tick_train, test_one_tick_timer_train)
|
||||
|
||||
uint32_t max_delta = 0;
|
||||
|
||||
TC_PRINT(" Perfect delta %u cycles or %u us\n",
|
||||
k_ticks_to_cyc_floor32(TIMERS), k_ticks_to_us_near32(TIMERS));
|
||||
for (int i = 0; i < TIMERS; i++) {
|
||||
TC_PRINT("Timer %d max delta %u cycles or %u us\n", i, timers[i].max_delta,
|
||||
k_cyc_to_us_near32(timers[i].max_delta));
|
||||
TC_PRINT("Timer %d max delta %u cycles or %u us, "
|
||||
"%u late callbacks (%u.%u%%)\n",
|
||||
i, timers[i].max_delta,
|
||||
k_cyc_to_us_near32(timers[i].max_delta),
|
||||
timers[i].late_callbacks,
|
||||
(1000 * timers[i].late_callbacks + MAX_CALLBACKS/2) / MAX_CALLBACKS / 10,
|
||||
(1000 * timers[i].late_callbacks + MAX_CALLBACKS/2) / MAX_CALLBACKS % 10);
|
||||
max_delta = timers[i].max_delta > max_delta ? timers[i].max_delta : max_delta;
|
||||
}
|
||||
|
||||
for (int i = 0; i < TIMERS; i++) {
|
||||
k_timer_stop(&timers[i].tm);
|
||||
}
|
||||
|
||||
if (max_delta >= k_ticks_to_cyc_floor32(TIMERS + 1)) {
|
||||
TC_PRINT("!! Some ticks were missed.\n");
|
||||
TC_PRINT("!! Consider making CONFIG_SYS_CLOCK_TICKS_PER_SEC smaller.\n");
|
||||
/* should this fail the test? */
|
||||
}
|
||||
|
||||
const uint32_t maximum_busy_loops = TEST_SECONDS * 4;
|
||||
|
||||
if (busy_loops < (maximum_busy_loops - maximum_busy_loops/10)) {
|
||||
TC_PRINT("!! The busy loop didn't run as much as expected.\n");
|
||||
TC_PRINT("!! Consider making CONFIG_SYS_CLOCK_TICKS_PER_SEC smaller.\n");
|
||||
}
|
||||
|
||||
/* On some platforms, where the tick period is short, like on nRF
|
||||
* platforms where it is ~30 us, execution of the timer handlers
|
||||
* can take significant part of the CPU time, so accept if at least
|
||||
|
||||
Reference in New Issue
Block a user