Harden playback/cache path for malformed media and concurrent timeline updates

- Invalidate timeline cache on ApplyJsonDiff() clip insert (remove affected frame range).
  - Add lock in Timeline::ClearAllCache() for safe concurrent access.
  - Make VideoCacheThread cross-thread state safe (atomics + seek-state mutex).
  - Lock CacheMemory::Contains() to avoid races.
  - Handle malformed audio streams in FFmpegReader by disabling invalid audio and continuing video-only.
  - Add FPS/timebase safety fallbacks in FFmpeg frame/PTS math.
  - Guard Frame::GetSamplesPerFrame() against invalid inputs.
  - Add/adjust regression tests for cache invalidation and invalid rate handling.
This commit is contained in:
Jonathan Thomas
2026-02-11 20:11:47 -06:00
parent 57c1fb2ec3
commit d70e80eac4
9 changed files with 238 additions and 83 deletions

View File

@@ -70,6 +70,9 @@ void CacheMemory::Add(std::shared_ptr<Frame> frame)
// Check if frame is already contained in cache
bool CacheMemory::Contains(int64_t frame_number) {
// Create a scoped lock, to protect the cache from multiple threads
const std::lock_guard<std::recursive_mutex> lock(*cacheMutex);
if (frames.count(frame_number) > 0) {
return true;
} else {

View File

@@ -561,23 +561,81 @@ void FFmpegReader::Open() {
// Audio encoding does not typically use more than 2 threads (most codecs use 1 thread)
aCodecCtx->thread_count = std::min(FF_AUDIO_NUM_PROCESSORS, 2);
if (aCodec == NULL) {
throw InvalidCodec("A valid audio codec could not be found for this file.", path);
bool audio_opened = false;
if (aCodec != NULL) {
// Init options
AVDictionary *opts = NULL;
av_dict_set(&opts, "strict", "experimental", 0);
// Open audio codec
audio_opened = (avcodec_open2(aCodecCtx, aCodec, &opts) >= 0);
// Free options
av_dict_free(&opts);
}
// Init options
AVDictionary *opts = NULL;
av_dict_set(&opts, "strict", "experimental", 0);
if (audio_opened) {
// Update the File Info struct with audio details (if an audio stream is found)
UpdateAudioInfo();
// Open audio codec
if (avcodec_open2(aCodecCtx, aCodec, &opts) < 0)
throw InvalidCodec("An audio codec was found, but could not be opened.", path);
// Disable malformed audio stream metadata (prevents divide-by-zero / invalid resampling math)
const bool invalid_audio_info =
(info.channels <= 0) ||
(info.sample_rate <= 0) ||
(info.audio_timebase.num <= 0) ||
(info.audio_timebase.den <= 0);
if (invalid_audio_info) {
ZmqLogger::Instance()->AppendDebugMethod(
"FFmpegReader::Open (Disable invalid audio stream)",
"channels", info.channels,
"sample_rate", info.sample_rate,
"audio_timebase.num", info.audio_timebase.num,
"audio_timebase.den", info.audio_timebase.den);
info.has_audio = false;
info.audio_stream_index = -1;
audioStream = -1;
packet_status.audio_eof = true;
if (aCodecCtx) {
if (avcodec_is_open(aCodecCtx)) {
avcodec_flush_buffers(aCodecCtx);
}
AV_FREE_CONTEXT(aCodecCtx);
aCodecCtx = nullptr;
}
aStream = nullptr;
}
} else {
// Keep decoding video, but disable bad/unsupported audio stream.
ZmqLogger::Instance()->AppendDebugMethod(
"FFmpegReader::Open (Audio codec unavailable; disabling audio)",
"audioStream", audioStream);
info.has_audio = false;
info.audio_stream_index = -1;
audioStream = -1;
packet_status.audio_eof = true;
if (aCodecCtx) {
AV_FREE_CONTEXT(aCodecCtx);
aCodecCtx = nullptr;
}
aStream = nullptr;
}
}
// Free options
av_dict_free(&opts);
// Update the File Info struct with audio details (if an audio stream is found)
UpdateAudioInfo();
// Guard invalid frame-rate / timebase values from malformed streams.
if (info.fps.num <= 0 || info.fps.den <= 0) {
ZmqLogger::Instance()->AppendDebugMethod(
"FFmpegReader::Open (Invalid FPS detected; applying fallback)",
"fps.num", info.fps.num,
"fps.den", info.fps.den);
info.fps.num = 30;
info.fps.den = 1;
}
if (info.video_timebase.num <= 0 || info.video_timebase.den <= 0) {
ZmqLogger::Instance()->AppendDebugMethod(
"FFmpegReader::Open (Invalid video_timebase detected; applying fallback)",
"video_timebase.num", info.video_timebase.num,
"video_timebase.den", info.video_timebase.den);
info.video_timebase = info.fps.Reciprocal();
}
// Add format metadata (if any)
@@ -2236,12 +2294,17 @@ void FFmpegReader::UpdatePTSOffset() {
int64_t FFmpegReader::ConvertVideoPTStoFrame(int64_t pts) {
// Apply PTS offset
int64_t previous_video_frame = current_video_frame;
const double fps_value = (info.fps.num > 0 && info.fps.den > 0) ? info.fps.ToDouble() : 30.0;
const double video_timebase_value =
(info.video_timebase.num > 0 && info.video_timebase.den > 0)
? info.video_timebase.ToDouble()
: (1.0 / 30.0);
// Get the video packet start time (in seconds)
double video_seconds = (double(pts) * info.video_timebase.ToDouble()) + pts_offset_seconds;
double video_seconds = (double(pts) * video_timebase_value) + pts_offset_seconds;
// Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
int64_t frame = round(video_seconds * info.fps.ToDouble()) + 1;
int64_t frame = round(video_seconds * fps_value) + 1;
// Keep track of the expected video frame #
if (current_video_frame == 0)
@@ -2264,11 +2327,17 @@ int64_t FFmpegReader::ConvertVideoPTStoFrame(int64_t pts) {
// Convert Frame Number into Video PTS
int64_t FFmpegReader::ConvertFrameToVideoPTS(int64_t frame_number) {
const double fps_value = (info.fps.num > 0 && info.fps.den > 0) ? info.fps.ToDouble() : 30.0;
const double video_timebase_value =
(info.video_timebase.num > 0 && info.video_timebase.den > 0)
? info.video_timebase.ToDouble()
: (1.0 / 30.0);
// Get timestamp of this frame (in seconds)
double seconds = (double(frame_number - 1) / info.fps.ToDouble()) + pts_offset_seconds;
double seconds = (double(frame_number - 1) / fps_value) + pts_offset_seconds;
// Calculate the # of video packets in this timestamp
int64_t video_pts = round(seconds / info.video_timebase.ToDouble());
int64_t video_pts = round(seconds / video_timebase_value);
// Apply PTS offset (opposite)
return video_pts;
@@ -2276,11 +2345,17 @@ int64_t FFmpegReader::ConvertFrameToVideoPTS(int64_t frame_number) {
// Convert Frame Number into Video PTS
int64_t FFmpegReader::ConvertFrameToAudioPTS(int64_t frame_number) {
const double fps_value = (info.fps.num > 0 && info.fps.den > 0) ? info.fps.ToDouble() : 30.0;
const double audio_timebase_value =
(info.audio_timebase.num > 0 && info.audio_timebase.den > 0)
? info.audio_timebase.ToDouble()
: (1.0 / 48000.0);
// Get timestamp of this frame (in seconds)
double seconds = (double(frame_number - 1) / info.fps.ToDouble()) + pts_offset_seconds;
double seconds = (double(frame_number - 1) / fps_value) + pts_offset_seconds;
// Calculate the # of audio packets in this timestamp
int64_t audio_pts = round(seconds / info.audio_timebase.ToDouble());
int64_t audio_pts = round(seconds / audio_timebase_value);
// Apply PTS offset (opposite)
return audio_pts;
@@ -2288,11 +2363,17 @@ int64_t FFmpegReader::ConvertFrameToAudioPTS(int64_t frame_number) {
// Calculate Starting video frame and sample # for an audio PTS
AudioLocation FFmpegReader::GetAudioPTSLocation(int64_t pts) {
const double audio_timebase_value =
(info.audio_timebase.num > 0 && info.audio_timebase.den > 0)
? info.audio_timebase.ToDouble()
: (1.0 / 48000.0);
const double fps_value = (info.fps.num > 0 && info.fps.den > 0) ? info.fps.ToDouble() : 30.0;
// Get the audio packet start time (in seconds)
double audio_seconds = (double(pts) * info.audio_timebase.ToDouble()) + pts_offset_seconds;
double audio_seconds = (double(pts) * audio_timebase_value) + pts_offset_seconds;
// Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
double frame = (audio_seconds * info.fps.ToDouble()) + 1;
double frame = (audio_seconds * fps_value) + 1;
// Frame # as a whole number (no more decimals)
int64_t whole_frame = int64_t(frame);

View File

@@ -455,9 +455,9 @@ void Frame::SetFrameNumber(int64_t new_number)
// Calculate the # of samples per video frame (for a specific frame number and frame rate)
int Frame::GetSamplesPerFrame(int64_t number, Fraction fps, int sample_rate, int channels)
{
// Directly return 0 if there are no channels
// Directly return 0 for invalid audio/frame-rate parameters
// so that we do not need to deal with NaNs later
if (channels == 0) return 0;
if (channels <= 0 || sample_rate <= 0 || fps.num <= 0 || fps.den <= 0) return 0;
// Get the total # of samples for the previous frame, and the current frame (rounded)
double fps_rate = fps.Reciprocal().ToDouble();

View File

@@ -53,25 +53,28 @@ namespace openshot
return false;
}
if (min_frames_ahead < 0) {
const int64_t ready_min = min_frames_ahead.load();
if (ready_min < 0) {
return true;
}
const int64_t cached_index = last_cached_index.load();
const int64_t playhead = requested_display_frame.load();
int dir = computeDirection();
if (dir > 0) {
return (last_cached_index >= requested_display_frame + min_frames_ahead);
return (cached_index >= playhead + ready_min);
}
return (last_cached_index <= requested_display_frame - min_frames_ahead);
return (cached_index <= playhead - ready_min);
}
void VideoCacheThread::setSpeed(int new_speed)
{
// Only update last_speed and last_dir when new_speed != 0
if (new_speed != 0) {
last_speed = new_speed;
last_dir = (new_speed > 0 ? 1 : -1);
last_speed.store(new_speed);
last_dir.store(new_speed > 0 ? 1 : -1);
}
speed = new_speed;
speed.store(new_speed);
}
// Get the size in bytes of a frame (rough estimate)
@@ -106,29 +109,38 @@ namespace openshot
void VideoCacheThread::Seek(int64_t new_position, bool start_preroll)
{
if (start_preroll) {
userSeeked = true;
bool should_mark_seek = false;
bool should_preroll = false;
int64_t new_cached_count = cached_frame_count.load();
if (start_preroll) {
should_mark_seek = true;
CacheBase* cache = reader ? reader->GetCache() : nullptr;
if (cache && !cache->Contains(new_position))
{
// If user initiated seek, and current frame not found (
Timeline* timeline = static_cast<Timeline*>(reader);
timeline->ClearAllCache();
cached_frame_count = 0;
preroll_on_next_fill = true;
if (Timeline* timeline = dynamic_cast<Timeline*>(reader)) {
timeline->ClearAllCache();
}
new_cached_count = 0;
should_preroll = true;
}
else if (cache)
{
cached_frame_count = cache->Count();
preroll_on_next_fill = false;
}
else {
preroll_on_next_fill = false;
new_cached_count = cache->Count();
}
}
{
std::lock_guard<std::mutex> guard(seek_state_mutex);
requested_display_frame.store(new_position);
cached_frame_count.store(new_cached_count);
if (start_preroll) {
preroll_on_next_fill.store(should_preroll);
userSeeked.store(should_mark_seek);
}
}
requested_display_frame = new_position;
}
void VideoCacheThread::Seek(int64_t new_position)
@@ -139,13 +151,17 @@ namespace openshot
int VideoCacheThread::computeDirection() const
{
// If speed ≠ 0, use its sign; if speed==0, keep last_dir
return (speed != 0 ? (speed > 0 ? 1 : -1) : last_dir);
const int current_speed = speed.load();
if (current_speed != 0) {
return (current_speed > 0 ? 1 : -1);
}
return last_dir.load();
}
void VideoCacheThread::handleUserSeek(int64_t playhead, int dir)
{
// Place last_cached_index just “behind” playhead in the given dir
last_cached_index = playhead - dir;
last_cached_index.store(playhead - dir);
}
void VideoCacheThread::handleUserSeekWithPreroll(int64_t playhead,
@@ -162,7 +178,7 @@ namespace openshot
preroll_start = std::min<int64_t>(timeline_end, playhead + preroll_frames);
}
}
last_cached_index = preroll_start - dir;
last_cached_index.store(preroll_start - dir);
}
int64_t VideoCacheThread::computePrerollFrames(const Settings* settings) const
@@ -187,9 +203,10 @@ namespace openshot
{
if (paused && !cache->Contains(playhead)) {
// If paused and playhead not in cache, clear everything
Timeline* timeline = static_cast<Timeline*>(reader);
timeline->ClearAllCache();
cached_frame_count = 0;
if (Timeline* timeline = dynamic_cast<Timeline*>(reader)) {
timeline->ClearAllCache();
}
cached_frame_count.store(0);
return true;
}
return false;
@@ -224,7 +241,7 @@ namespace openshot
ReaderBase* reader)
{
bool window_full = true;
int64_t next_frame = last_cached_index + dir;
int64_t next_frame = last_cached_index.load() + dir;
// Advance from last_cached_index toward window boundary
while ((dir > 0 && next_frame <= window_end) ||
@@ -234,7 +251,7 @@ namespace openshot
break;
}
// If a Seek was requested mid-caching, bail out immediately
if (userSeeked) {
if (userSeeked.load()) {
break;
}
@@ -243,7 +260,7 @@ namespace openshot
try {
auto framePtr = reader->GetFrame(next_frame);
cache->Add(framePtr);
cached_frame_count = cache->Count();
cached_frame_count.store(cache->Count());
}
catch (const OutOfBoundsFrame&) {
break;
@@ -254,7 +271,7 @@ namespace openshot
cache->Touch(next_frame);
}
last_cached_index = next_frame;
last_cached_index.store(next_frame);
next_frame += dir;
}
@@ -272,27 +289,31 @@ namespace openshot
// If caching disabled or no reader, mark cache as ready and sleep briefly
if (!settings->ENABLE_PLAYBACK_CACHING || !cache) {
cached_frame_count = (cache ? cache->Count() : 0);
min_frames_ahead = -1;
cached_frame_count.store(cache ? cache->Count() : 0);
min_frames_ahead.store(-1);
std::this_thread::sleep_for(double_micro_sec(50000));
continue;
}
// init local vars
min_frames_ahead = settings->VIDEO_CACHE_MIN_PREROLL_FRAMES;
min_frames_ahead.store(settings->VIDEO_CACHE_MIN_PREROLL_FRAMES);
Timeline* timeline = static_cast<Timeline*>(reader);
Timeline* timeline = dynamic_cast<Timeline*>(reader);
if (!timeline) {
std::this_thread::sleep_for(double_micro_sec(50000));
continue;
}
int64_t timeline_end = timeline->GetMaxFrame();
int64_t playhead = requested_display_frame;
bool paused = (speed == 0);
int64_t playhead = requested_display_frame.load();
bool paused = (speed.load() == 0);
int64_t preroll_frames = computePrerollFrames(settings);
cached_frame_count = cache->Count();
cached_frame_count.store(cache->Count());
// Compute effective direction (±1)
int dir = computeDirection();
if (speed != 0) {
last_dir = dir;
if (speed.load() != 0) {
last_dir.store(dir);
}
// Compute bytes_per_frame, max_bytes, and capacity once
@@ -313,16 +334,25 @@ namespace openshot
}
// Handle a user-initiated seek
bool use_preroll = preroll_on_next_fill;
if (userSeeked) {
bool did_user_seek = false;
bool use_preroll = false;
{
std::lock_guard<std::mutex> guard(seek_state_mutex);
playhead = requested_display_frame.load();
did_user_seek = userSeeked.load();
use_preroll = preroll_on_next_fill.load();
if (did_user_seek) {
userSeeked.store(false);
preroll_on_next_fill.store(false);
}
}
if (did_user_seek) {
if (use_preroll) {
handleUserSeekWithPreroll(playhead, dir, timeline_end, preroll_frames);
}
else {
handleUserSeek(playhead, dir);
}
userSeeked = false;
preroll_on_next_fill = false;
}
else if (!paused && capacity >= 1) {
// In playback mode, check if last_cached_index drifted outside the new window
@@ -339,8 +369,8 @@ namespace openshot
);
bool outside_window =
(dir > 0 && last_cached_index > window_end) ||
(dir < 0 && last_cached_index < window_begin);
(dir > 0 && last_cached_index.load() > window_end) ||
(dir < 0 && last_cached_index.load() < window_begin);
if (outside_window) {
handleUserSeek(playhead, dir);
}
@@ -362,7 +392,7 @@ namespace openshot
ready_target = 0;
}
int64_t configured_min = settings->VIDEO_CACHE_MIN_PREROLL_FRAMES;
min_frames_ahead = std::min<int64_t>(configured_min, ready_target);
min_frames_ahead.store(std::min<int64_t>(configured_min, ready_target));
// If paused and playhead is no longer in cache, clear everything
bool did_clear = clearCacheIfPaused(playhead, paused, cache);

View File

@@ -17,6 +17,8 @@
#include <AppConfig.h>
#include <juce_audio_basics/juce_audio_basics.h>
#include <atomic>
#include <mutex>
#include <memory>
namespace openshot
@@ -57,7 +59,7 @@ namespace openshot
void setSpeed(int new_speed);
/// @return The current speed (1=normal, 2=fast, 1=rewind, etc.)
int getSpeed() const { return speed; }
int getSpeed() const { return speed.load(); }
/// Seek to a specific frame (no preroll).
void Seek(int64_t new_position);
@@ -175,23 +177,24 @@ namespace openshot
std::shared_ptr<Frame> last_cached_frame; ///< Last frame pointer added to cache.
int speed; ///< Current playback speed (0=paused, >0 forward, <0 backward).
int last_speed; ///< Last non-zero speed (for tracking).
int last_dir; ///< Last direction sign (+1 forward, 1 backward).
bool userSeeked; ///< True if Seek(..., true) was called (forces a cache reset).
bool preroll_on_next_fill; ///< True if next cache rebuild should include preroll offset.
std::atomic<int> speed; ///< Current playback speed (0=paused, >0 forward, <0 backward).
std::atomic<int> last_speed; ///< Last non-zero speed (for tracking).
std::atomic<int> last_dir; ///< Last direction sign (+1 forward, 1 backward).
std::atomic<bool> userSeeked; ///< True if Seek(..., true) was called (forces a cache reset).
std::atomic<bool> preroll_on_next_fill; ///< True if next cache rebuild should include preroll offset.
int64_t requested_display_frame; ///< Frame index the user requested.
std::atomic<int64_t> requested_display_frame; ///< Frame index the user requested.
int64_t current_display_frame; ///< Currently displayed frame (unused here, reserved).
int64_t cached_frame_count; ///< Estimated count of frames currently stored in cache.
std::atomic<int64_t> cached_frame_count; ///< Estimated count of frames currently stored in cache.
int64_t min_frames_ahead; ///< Minimum number of frames considered “ready” (pre-roll).
std::atomic<int64_t> min_frames_ahead; ///< Minimum number of frames considered “ready” (pre-roll).
int64_t timeline_max_frame; ///< Highest valid frame index in the timeline.
ReaderBase* reader; ///< The source reader (e.g., Timeline, FFmpegReader).
bool force_directional_cache; ///< (Reserved for future use).
int64_t last_cached_index; ///< Index of the most recently cached frame.
std::atomic<int64_t> last_cached_index; ///< Index of the most recently cached frame.
mutable std::mutex seek_state_mutex; ///< Protects coherent seek state updates/consumption.
};
} // namespace openshot

View File

@@ -1461,6 +1461,11 @@ void Timeline::apply_json_to_clips(Json::Value change) {
// Add clip to timeline
AddClip(clip);
// Calculate start and end frames that this impacts, and remove those frames from the cache
int64_t new_starting_frame = (clip->Position() * info.fps.ToDouble()) + 1;
int64_t new_ending_frame = ((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
} else if (change_type == "update") {
// Update existing clip
@@ -1747,6 +1752,8 @@ void Timeline::apply_json_to_timeline(Json::Value change) {
// Clear all caches
void Timeline::ClearAllCache(bool deep) {
// Get lock (prevent getting frames while this happens)
const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
// Clear primary cache
if (final_cache) {

View File

@@ -138,6 +138,14 @@ TEST_CASE( "Copy_Constructor", "[libopenshot][frame]" )
CHECK(f1.GetAudioSamplesCount() == f2.GetAudioSamplesCount());
}
TEST_CASE( "GetSamplesPerFrame invalid rate inputs", "[libopenshot][frame]" )
{
CHECK(Frame::GetSamplesPerFrame(/*frame_number=*/1, Fraction(0, 1), /*sample_rate=*/44100, /*channels=*/2) == 0);
CHECK(Frame::GetSamplesPerFrame(/*frame_number=*/1, Fraction(30, 0), /*sample_rate=*/44100, /*channels=*/2) == 0);
CHECK(Frame::GetSamplesPerFrame(/*frame_number=*/1, Fraction(30, 1), /*sample_rate=*/0, /*channels=*/2) == 0);
CHECK(Frame::GetSamplesPerFrame(/*frame_number=*/1, Fraction(30, 1), /*sample_rate=*/44100, /*channels=*/0) == 0);
}
#ifdef USE_OPENCV
TEST_CASE( "Convert_Image", "[libopenshot][opencv][frame]" )
{

View File

@@ -1010,6 +1010,29 @@ TEST_CASE( "ApplyJSONDiff and FrameMappers", "[libopenshot][timeline]" )
CHECK(clip1.Reader()->Name() == "QtImageReader");
}
TEST_CASE( "ApplyJSONDiff insert invalidates overlapping timeline cache", "[libopenshot][timeline]" )
{
// Create timeline with no clips so cached frames are black placeholders
Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO);
t.Open();
// Cache a frame in the area where we'll insert a new clip
std::shared_ptr<Frame> cached_before = t.GetFrame(10);
REQUIRE(cached_before != nullptr);
REQUIRE(t.GetCache() != nullptr);
REQUIRE(t.GetCache()->Contains(10));
// Insert clip via JSON diff overlapping frame 10
std::stringstream path1;
path1 << TEST_MEDIA_PATH << "interlaced.png";
std::stringstream json_change;
json_change << "[{\"type\":\"insert\",\"key\":[\"clips\"],\"value\":{\"id\":\"INSERT_CACHE_INVALIDATE\",\"layer\":1,\"position\":0.0,\"start\":0,\"end\":10,\"reader\":{\"acodec\":\"\",\"audio_bit_rate\":0,\"audio_stream_index\":-1,\"audio_timebase\":{\"den\":1,\"num\":1},\"channel_layout\":4,\"channels\":0,\"display_ratio\":{\"den\":1,\"num\":1},\"duration\":3600.0,\"file_size\":\"160000\",\"fps\":{\"den\":1,\"num\":30},\"has_audio\":false,\"has_single_image\":true,\"has_video\":true,\"height\":200,\"interlaced_frame\":false,\"metadata\":{},\"path\":\"" << path1.str() << "\",\"pixel_format\":-1,\"pixel_ratio\":{\"den\":1,\"num\":1},\"sample_rate\":0,\"top_field_first\":true,\"type\":\"QtImageReader\",\"vcodec\":\"\",\"video_bit_rate\":0,\"video_length\":\"108000\",\"video_stream_index\":-1,\"video_timebase\":{\"den\":30,\"num\":1},\"width\":200}},\"partial\":false}]";
t.ApplyJsonDiff(json_change.str());
// Overlapping cached frame should be invalidated
CHECK(!t.GetCache()->Contains(10));
}
TEST_CASE( "ApplyJSONDiff Update Reader Info", "[libopenshot][timeline]" )
{
// Create a timeline

View File

@@ -36,12 +36,12 @@ public:
using VideoCacheThread::handleUserSeekWithPreroll;
using VideoCacheThread::computePrerollFrames;
int64_t getLastCachedIndex() const { return last_cached_index; }
void setLastCachedIndex(int64_t v) { last_cached_index = v; }
void setPlayhead(int64_t v) { requested_display_frame = v; }
void setMinFramesAhead(int64_t v) { min_frames_ahead = v; }
void setLastDir(int d) { last_dir = d; }
void forceUserSeekFlag() { userSeeked = true; }
int64_t getLastCachedIndex() const { return last_cached_index.load(); }
void setLastCachedIndex(int64_t v) { last_cached_index.store(v); }
void setPlayhead(int64_t v) { requested_display_frame.store(v); }
void setMinFramesAhead(int64_t v) { min_frames_ahead.store(v); }
void setLastDir(int d) { last_dir.store(d); }
void forceUserSeekFlag() { userSeeked.store(true); }
};
// ----------------------------------------------------------------------------