You've already forked libopenshot
mirror of
https://github.com/OpenShot/libopenshot.git
synced 2026-03-02 08:53:52 -08:00
Large timeline clean-up, speed-up, and fix concurrency bugs:
- make Add/Remove Effect methods thread safe - Fix RemoveClip memory leak - Improve performance of sorting clips by position and layer, cache some common accessors, and speed up "clip intersection" logic - Don't resize audio container in loop - do it once - Large refactor of looping through clips and finding top clip - Protect ClearAllCache from empty Readers, prevent crash - Expanded unit tests to include RemoveEffect, and test many of the changes in the commit.
This commit is contained in:
183
src/Timeline.cpp
183
src/Timeline.cpp
@@ -21,6 +21,9 @@
|
||||
|
||||
#include <QDir>
|
||||
#include <QFileInfo>
|
||||
#include <unordered_map>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
|
||||
using namespace openshot;
|
||||
|
||||
@@ -357,6 +360,9 @@ void Timeline::AddClip(Clip* clip)
|
||||
// Add an effect to the timeline
|
||||
void Timeline::AddEffect(EffectBase* effect)
|
||||
{
|
||||
// Get lock (prevent getting frames while this happens)
|
||||
const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
|
||||
|
||||
// Assign timeline to effect
|
||||
effect->ParentTimeline(this);
|
||||
|
||||
@@ -370,14 +376,16 @@ void Timeline::AddEffect(EffectBase* effect)
|
||||
// Remove an effect from the timeline
|
||||
void Timeline::RemoveEffect(EffectBase* effect)
|
||||
{
|
||||
// Get lock (prevent getting frames while this happens)
|
||||
const std::lock_guard<std::recursive_mutex> guard(getFrameMutex);
|
||||
|
||||
effects.remove(effect);
|
||||
|
||||
// Delete effect object (if timeline allocated it)
|
||||
bool allocated = allocated_effects.count(effect);
|
||||
if (allocated) {
|
||||
if (allocated_effects.count(effect)) {
|
||||
allocated_effects.erase(effect); // erase before nulling the pointer
|
||||
delete effect;
|
||||
effect = NULL;
|
||||
allocated_effects.erase(effect);
|
||||
}
|
||||
|
||||
// Sort effects
|
||||
@@ -393,11 +401,10 @@ void Timeline::RemoveClip(Clip* clip)
|
||||
clips.remove(clip);
|
||||
|
||||
// Delete clip object (if timeline allocated it)
|
||||
bool allocated = allocated_clips.count(clip);
|
||||
if (allocated) {
|
||||
if (allocated_clips.count(clip)) {
|
||||
allocated_clips.erase(clip); // erase before nulling the pointer
|
||||
delete clip;
|
||||
clip = NULL;
|
||||
allocated_clips.erase(clip);
|
||||
}
|
||||
|
||||
// Sort clips
|
||||
@@ -551,8 +558,9 @@ std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int
|
||||
for (auto effect : effects)
|
||||
{
|
||||
// Does clip intersect the current requested time
|
||||
long effect_start_position = round(effect->Position() * info.fps.ToDouble()) + 1;
|
||||
long effect_end_position = round((effect->Position() + (effect->Duration())) * info.fps.ToDouble());
|
||||
const double fpsD = info.fps.ToDouble();
|
||||
int64_t effect_start_position = static_cast<int64_t>(std::llround(effect->Position() * fpsD)) + 1;
|
||||
int64_t effect_end_position = static_cast<int64_t>(std::llround((effect->Position() + effect->Duration()) * fpsD));
|
||||
|
||||
bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
|
||||
|
||||
@@ -560,8 +568,8 @@ std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int
|
||||
if (does_effect_intersect)
|
||||
{
|
||||
// Determine the frame needed for this clip (based on the position on the timeline)
|
||||
long effect_start_frame = (effect->Start() * info.fps.ToDouble()) + 1;
|
||||
long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
|
||||
int64_t effect_start_frame = static_cast<int64_t>(std::llround(effect->Start() * fpsD)) + 1;
|
||||
int64_t effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
|
||||
|
||||
if (!options->is_top_clip)
|
||||
continue; // skip effect, if overlapped/covered by another clip on same layer
|
||||
@@ -626,14 +634,13 @@ std::shared_ptr<Frame> Timeline::GetOrCreateFrame(std::shared_ptr<Frame> backgro
|
||||
void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, int64_t clip_frame_number, bool is_top_clip, float max_volume)
|
||||
{
|
||||
// Create timeline options (with details about this current frame request)
|
||||
TimelineInfoStruct* options = new TimelineInfoStruct();
|
||||
options->is_top_clip = is_top_clip;
|
||||
options->is_before_clip_keyframes = true;
|
||||
TimelineInfoStruct options{};
|
||||
options.is_top_clip = is_top_clip;
|
||||
options.is_before_clip_keyframes = true;
|
||||
|
||||
// Get the clip's frame, composited on top of the current timeline frame
|
||||
std::shared_ptr<Frame> source_frame;
|
||||
source_frame = GetOrCreateFrame(new_frame, source_clip, clip_frame_number, options);
|
||||
delete options;
|
||||
source_frame = GetOrCreateFrame(new_frame, source_clip, clip_frame_number, &options);
|
||||
|
||||
// No frame found... so bail
|
||||
if (!source_frame)
|
||||
@@ -656,6 +663,12 @@ void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, in
|
||||
"clip_frame_number", clip_frame_number);
|
||||
|
||||
if (source_frame->GetAudioChannelsCount() == info.channels && source_clip->has_audio.GetInt(clip_frame_number) != 0)
|
||||
{
|
||||
// Ensure timeline frame matches the source samples once per frame
|
||||
if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount()){
|
||||
new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
|
||||
}
|
||||
|
||||
for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
|
||||
{
|
||||
// Get volume from previous frame and this frame
|
||||
@@ -692,18 +705,11 @@ void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, in
|
||||
if (!isEqual(previous_volume, 1.0) || !isEqual(volume, 1.0))
|
||||
source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
|
||||
|
||||
// TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
|
||||
// Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
|
||||
// number of samples returned is variable... and does not match the number expected.
|
||||
// This is a crude solution at best. =)
|
||||
if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount()){
|
||||
// Force timeline frame to match the source frame
|
||||
new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
|
||||
}
|
||||
// Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
|
||||
// be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
|
||||
new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
|
||||
}
|
||||
}
|
||||
else
|
||||
// Debug output
|
||||
ZmqLogger::Instance()->AppendDebugMethod(
|
||||
@@ -1005,67 +1011,90 @@ std::shared_ptr<Frame> Timeline::GetFrame(int64_t requested_frame)
|
||||
"clips.size()", clips.size(),
|
||||
"nearby_clips.size()", nearby_clips.size());
|
||||
|
||||
// Find Clips near this time
|
||||
for (auto clip : nearby_clips) {
|
||||
long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
|
||||
long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble());
|
||||
bool does_clip_intersect = (clip_start_position <= requested_frame && clip_end_position >= requested_frame);
|
||||
// Precompute per-clip timing for this requested frame
|
||||
struct ClipInfo {
|
||||
Clip* clip;
|
||||
int64_t start_pos;
|
||||
int64_t end_pos;
|
||||
int64_t start_frame;
|
||||
int64_t frame_number;
|
||||
bool intersects;
|
||||
};
|
||||
std::vector<ClipInfo> clip_infos;
|
||||
clip_infos.reserve(nearby_clips.size());
|
||||
const double fpsD = info.fps.ToDouble();
|
||||
|
||||
for (auto clip : nearby_clips) {
|
||||
int64_t start_pos = static_cast<int64_t>(std::llround(clip->Position() * fpsD)) + 1;
|
||||
int64_t end_pos = static_cast<int64_t>(std::llround((clip->Position() + clip->Duration()) * fpsD));
|
||||
bool intersects = (start_pos <= requested_frame && end_pos >= requested_frame);
|
||||
int64_t start_frame = static_cast<int64_t>(std::llround(clip->Start() * fpsD)) + 1;
|
||||
int64_t frame_number = requested_frame - start_pos + start_frame;
|
||||
clip_infos.push_back({clip, start_pos, end_pos, start_frame, frame_number, intersects});
|
||||
}
|
||||
|
||||
// Determine top clip per layer (linear, no nested loop)
|
||||
std::unordered_map<int, int64_t> top_start_for_layer;
|
||||
std::unordered_map<int, Clip*> top_clip_for_layer;
|
||||
for (const auto& ci : clip_infos) {
|
||||
if (!ci.intersects) continue;
|
||||
const int layer = ci.clip->Layer();
|
||||
auto it = top_start_for_layer.find(layer);
|
||||
if (it == top_start_for_layer.end() || ci.start_pos > it->second) {
|
||||
top_start_for_layer[layer] = ci.start_pos; // strictly greater to match prior logic
|
||||
top_clip_for_layer[layer] = ci.clip;
|
||||
}
|
||||
}
|
||||
|
||||
// Compute max_volume across all overlapping clips once
|
||||
float max_volume_sum = 0.0f;
|
||||
for (const auto& ci : clip_infos) {
|
||||
if (!ci.intersects) continue;
|
||||
if (ci.clip->Reader() && ci.clip->Reader()->info.has_audio &&
|
||||
ci.clip->has_audio.GetInt(ci.frame_number) != 0) {
|
||||
max_volume_sum += static_cast<float>(ci.clip->volume.GetValue(ci.frame_number));
|
||||
}
|
||||
}
|
||||
|
||||
// Compose intersecting clips in a single pass
|
||||
for (const auto& ci : clip_infos) {
|
||||
// Debug output
|
||||
ZmqLogger::Instance()->AppendDebugMethod(
|
||||
"Timeline::GetFrame (Does clip intersect)",
|
||||
"requested_frame", requested_frame,
|
||||
"clip->Position()", clip->Position(),
|
||||
"clip->Duration()", clip->Duration(),
|
||||
"does_clip_intersect", does_clip_intersect);
|
||||
"clip->Position()", ci.clip->Position(),
|
||||
"clip->Duration()", ci.clip->Duration(),
|
||||
"does_clip_intersect", ci.intersects);
|
||||
|
||||
// Clip is visible
|
||||
if (does_clip_intersect) {
|
||||
// Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
|
||||
bool is_top_clip = true;
|
||||
float max_volume = 0.0;
|
||||
for (auto nearby_clip : nearby_clips) {
|
||||
long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1;
|
||||
long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1;
|
||||
long nearby_clip_start_frame = (nearby_clip->Start() * info.fps.ToDouble()) + 1;
|
||||
long nearby_clip_frame_number = requested_frame - nearby_clip_start_position + nearby_clip_start_frame;
|
||||
|
||||
// Determine if top clip
|
||||
if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
|
||||
nearby_clip_start_position <= requested_frame && nearby_clip_end_position >= requested_frame &&
|
||||
nearby_clip_start_position > clip_start_position && is_top_clip == true) {
|
||||
is_top_clip = false;
|
||||
}
|
||||
|
||||
// Determine max volume of overlapping clips
|
||||
if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio &&
|
||||
nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 &&
|
||||
nearby_clip_start_position <= requested_frame && nearby_clip_end_position >= requested_frame) {
|
||||
max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number);
|
||||
}
|
||||
}
|
||||
if (ci.intersects) {
|
||||
// Is this the top clip on its layer?
|
||||
bool is_top_clip = false;
|
||||
const int layer = ci.clip->Layer();
|
||||
auto top_it = top_clip_for_layer.find(layer);
|
||||
if (top_it != top_clip_for_layer.end())
|
||||
is_top_clip = (top_it->second == ci.clip);
|
||||
|
||||
// Determine the frame needed for this clip (based on the position on the timeline)
|
||||
long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
|
||||
long clip_frame_number = requested_frame - clip_start_position + clip_start_frame;
|
||||
int64_t clip_frame_number = ci.frame_number;
|
||||
|
||||
// Debug output
|
||||
ZmqLogger::Instance()->AppendDebugMethod(
|
||||
"Timeline::GetFrame (Calculate clip's frame #)",
|
||||
"clip->Position()", clip->Position(),
|
||||
"clip->Start()", clip->Start(),
|
||||
"clip->Position()", ci.clip->Position(),
|
||||
"clip->Start()", ci.clip->Start(),
|
||||
"info.fps.ToFloat()", info.fps.ToFloat(),
|
||||
"clip_frame_number", clip_frame_number);
|
||||
|
||||
// Add clip's frame as layer
|
||||
add_layer(new_frame, clip, clip_frame_number, is_top_clip, max_volume);
|
||||
add_layer(new_frame, ci.clip, clip_frame_number, is_top_clip, max_volume_sum);
|
||||
|
||||
} else {
|
||||
// Debug output
|
||||
ZmqLogger::Instance()->AppendDebugMethod(
|
||||
"Timeline::GetFrame (clip does not intersect)",
|
||||
"requested_frame", requested_frame,
|
||||
"does_clip_intersect", does_clip_intersect);
|
||||
"does_clip_intersect", ci.intersects);
|
||||
}
|
||||
|
||||
} // end clip loop
|
||||
@@ -1097,15 +1126,17 @@ std::vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame, in
|
||||
std::vector<Clip*> matching_clips;
|
||||
|
||||
// Calculate time of frame
|
||||
float min_requested_frame = requested_frame;
|
||||
float max_requested_frame = requested_frame + (number_of_frames - 1);
|
||||
const int64_t min_requested_frame = requested_frame;
|
||||
const int64_t max_requested_frame = requested_frame + (number_of_frames - 1);
|
||||
|
||||
// Find Clips at this time
|
||||
matching_clips.reserve(clips.size());
|
||||
const double fpsD = info.fps.ToDouble();
|
||||
for (auto clip : clips)
|
||||
{
|
||||
// Does clip intersect the current requested time
|
||||
long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
|
||||
long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
|
||||
int64_t clip_start_position = static_cast<int64_t>(std::llround(clip->Position() * fpsD)) + 1;
|
||||
int64_t clip_end_position = static_cast<int64_t>(std::llround((clip->Position() + clip->Duration()) * fpsD)) + 1;
|
||||
|
||||
bool does_clip_intersect =
|
||||
(clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
|
||||
@@ -1724,18 +1755,24 @@ void Timeline::ClearAllCache(bool deep) {
|
||||
// Loop through all clips
|
||||
try {
|
||||
for (const auto clip : clips) {
|
||||
// Clear cache on clip
|
||||
clip->Reader()->GetCache()->Clear();
|
||||
// Clear cache on clip and reader if present
|
||||
if (clip->Reader()) {
|
||||
if (auto rc = clip->Reader()->GetCache())
|
||||
rc->Clear();
|
||||
|
||||
// Clear nested Reader (if deep clear requested)
|
||||
if (deep && clip->Reader()->Name() == "FrameMapper") {
|
||||
FrameMapper *nested_reader = static_cast<FrameMapper *>(clip->Reader());
|
||||
if (nested_reader->Reader() && nested_reader->Reader()->GetCache())
|
||||
nested_reader->Reader()->GetCache()->Clear();
|
||||
// Clear nested Reader (if deep clear requested)
|
||||
if (deep && clip->Reader()->Name() == "FrameMapper") {
|
||||
FrameMapper *nested_reader = static_cast<FrameMapper *>(clip->Reader());
|
||||
if (nested_reader->Reader()) {
|
||||
if (auto nc = nested_reader->Reader()->GetCache())
|
||||
nc->Clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear clip cache
|
||||
clip->GetCache()->Clear();
|
||||
if (auto cc = clip->GetCache())
|
||||
cc->Clear();
|
||||
}
|
||||
} catch (const ReaderClosed & e) {
|
||||
// ...
|
||||
|
||||
@@ -46,12 +46,18 @@ namespace openshot {
|
||||
/// Comparison method for sorting clip pointers (by Layer and then Position). Clips are sorted
|
||||
/// from lowest layer to top layer (since that is the sequence they need to be combined), and then
|
||||
/// by position (left to right).
|
||||
struct CompareClips{
|
||||
bool operator()( openshot::Clip* lhs, openshot::Clip* rhs){
|
||||
if( lhs->Layer() < rhs->Layer() ) return true;
|
||||
if( lhs->Layer() == rhs->Layer() && lhs->Position() <= rhs->Position() ) return true;
|
||||
return false;
|
||||
}};
|
||||
struct CompareClips {
|
||||
bool operator()(openshot::Clip* lhs, openshot::Clip* rhs) const {
|
||||
// Strict-weak ordering (no <=) to keep sort well-defined
|
||||
if (lhs == rhs) return false; // irreflexive
|
||||
if (lhs->Layer() != rhs->Layer())
|
||||
return lhs->Layer() < rhs->Layer();
|
||||
if (lhs->Position() != rhs->Position())
|
||||
return lhs->Position() < rhs->Position();
|
||||
// Stable tie-breaker on address to avoid equivalence when layer/position match
|
||||
return std::less<openshot::Clip*>()(lhs, rhs);
|
||||
}
|
||||
};
|
||||
|
||||
/// Comparison method for sorting effect pointers (by Position, Layer, and Order). Effects are sorted
|
||||
/// from lowest layer to top layer (since that is sequence clips are combined), and then by
|
||||
|
||||
@@ -757,52 +757,203 @@ TEST_CASE( "Multi-threaded Timeline GetFrame", "[libopenshot][timeline]" )
|
||||
t = NULL;
|
||||
}
|
||||
|
||||
TEST_CASE( "Multi-threaded Timeline Add/Remove Clip", "[libopenshot][timeline]" )
|
||||
{
|
||||
// Create timeline
|
||||
Timeline *t = new Timeline(1280, 720, Fraction(24, 1), 48000, 2, LAYOUT_STEREO);
|
||||
t->Open();
|
||||
// ---------------------------------------------------------------------------
|
||||
// New tests to validate removing timeline-level effects (incl. threading/locks)
|
||||
// Paste at the end of tests/Timeline.cpp
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Calculate test video path
|
||||
TEST_CASE( "RemoveEffect basic", "[libopenshot][timeline]" )
|
||||
{
|
||||
// Create a simple timeline
|
||||
Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO);
|
||||
|
||||
// Two timeline-level effects
|
||||
Negate e1; e1.Id("E1"); e1.Layer(0);
|
||||
Negate e2; e2.Id("E2"); e2.Layer(1);
|
||||
|
||||
t.AddEffect(&e1);
|
||||
t.AddEffect(&e2);
|
||||
|
||||
// Sanity check
|
||||
REQUIRE(t.Effects().size() == 2);
|
||||
REQUIRE(t.GetEffect("E1") != nullptr);
|
||||
REQUIRE(t.GetEffect("E2") != nullptr);
|
||||
|
||||
// Remove one effect and verify it is truly gone
|
||||
t.RemoveEffect(&e1);
|
||||
auto effects_after = t.Effects();
|
||||
CHECK(effects_after.size() == 1);
|
||||
CHECK(t.GetEffect("E1") == nullptr);
|
||||
CHECK(t.GetEffect("E2") != nullptr);
|
||||
CHECK(std::find(effects_after.begin(), effects_after.end(), &e1) == effects_after.end());
|
||||
|
||||
// Removing the same (already-removed) effect should be a no-op
|
||||
t.RemoveEffect(&e1);
|
||||
CHECK(t.Effects().size() == 1);
|
||||
}
|
||||
|
||||
TEST_CASE( "RemoveEffect not present is no-op", "[libopenshot][timeline]" )
|
||||
{
|
||||
Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO);
|
||||
|
||||
Negate existing; existing.Id("KEEP"); existing.Layer(0);
|
||||
Negate never_added; never_added.Id("GHOST"); never_added.Layer(1);
|
||||
|
||||
t.AddEffect(&existing);
|
||||
REQUIRE(t.Effects().size() == 1);
|
||||
|
||||
// Try to remove an effect pointer that was never added
|
||||
t.RemoveEffect(&never_added);
|
||||
|
||||
// State should be unchanged
|
||||
CHECK(t.Effects().size() == 1);
|
||||
CHECK(t.GetEffect("KEEP") != nullptr);
|
||||
CHECK(t.GetEffect("GHOST") == nullptr);
|
||||
}
|
||||
|
||||
TEST_CASE( "RemoveEffect while open (active pipeline safety)", "[libopenshot][timeline]" )
|
||||
{
|
||||
// Timeline with one visible clip so we can request frames
|
||||
Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO);
|
||||
std::stringstream path;
|
||||
path << TEST_MEDIA_PATH << "front3.png";
|
||||
Clip clip(path.str());
|
||||
clip.Layer(0);
|
||||
t.AddClip(&clip);
|
||||
|
||||
// Add a timeline-level effect and open the timeline
|
||||
Negate neg; neg.Id("NEG"); neg.Layer(1);
|
||||
t.AddEffect(&neg);
|
||||
|
||||
t.Open();
|
||||
// Touch the pipeline before removal
|
||||
std::shared_ptr<Frame> f1 = t.GetFrame(1);
|
||||
REQUIRE(f1 != nullptr);
|
||||
|
||||
// Remove the effect while open, this should be safe and effective
|
||||
t.RemoveEffect(&neg);
|
||||
CHECK(t.GetEffect("NEG") == nullptr);
|
||||
CHECK(t.Effects().size() == 0);
|
||||
|
||||
// Touch the pipeline again after removal (should not crash / deadlock)
|
||||
std::shared_ptr<Frame> f2 = t.GetFrame(2);
|
||||
REQUIRE(f2 != nullptr);
|
||||
|
||||
// Close reader
|
||||
t.Close();
|
||||
}
|
||||
|
||||
TEST_CASE( "RemoveEffect preserves ordering of remaining effects", "[libopenshot][timeline]" )
|
||||
{
|
||||
// Create a timeline
|
||||
Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO);
|
||||
|
||||
// Add effects out of order (Layer/Position/Order)
|
||||
Negate a; a.Id("A"); a.Layer(0); a.Position(0.0); a.Order(0);
|
||||
Negate b1; b1.Id("B-1"); b1.Layer(1); b1.Position(0.0); b1.Order(3);
|
||||
Negate b; b.Id("B"); b.Layer(1); b.Position(0.0); b.Order(0);
|
||||
Negate b2; b2.Id("B-2"); b2.Layer(1); b2.Position(0.5); b2.Order(2);
|
||||
Negate b3; b3.Id("B-3"); b3.Layer(1); b3.Position(0.5); b3.Order(1);
|
||||
Negate c; c.Id("C"); c.Layer(2); c.Position(0.0); c.Order(0);
|
||||
|
||||
t.AddEffect(&c);
|
||||
t.AddEffect(&b);
|
||||
t.AddEffect(&a);
|
||||
t.AddEffect(&b3);
|
||||
t.AddEffect(&b2);
|
||||
t.AddEffect(&b1);
|
||||
|
||||
// Remove a middle effect and verify ordering is still deterministic
|
||||
t.RemoveEffect(&b);
|
||||
|
||||
std::list<EffectBase*> effects = t.Effects();
|
||||
REQUIRE(effects.size() == 5);
|
||||
|
||||
int n = 0;
|
||||
for (auto effect : effects) {
|
||||
switch (n) {
|
||||
case 0:
|
||||
CHECK(effect->Layer() == 0);
|
||||
CHECK(effect->Id() == "A");
|
||||
CHECK(effect->Order() == 0);
|
||||
break;
|
||||
case 1:
|
||||
CHECK(effect->Layer() == 1);
|
||||
CHECK(effect->Id() == "B-1");
|
||||
CHECK(effect->Position() == Approx(0.0).margin(0.0001));
|
||||
CHECK(effect->Order() == 3);
|
||||
break;
|
||||
case 2:
|
||||
CHECK(effect->Layer() == 1);
|
||||
CHECK(effect->Id() == "B-2");
|
||||
CHECK(effect->Position() == Approx(0.5).margin(0.0001));
|
||||
CHECK(effect->Order() == 2);
|
||||
break;
|
||||
case 3:
|
||||
CHECK(effect->Layer() == 1);
|
||||
CHECK(effect->Id() == "B-3");
|
||||
CHECK(effect->Position() == Approx(0.5).margin(0.0001));
|
||||
CHECK(effect->Order() == 1);
|
||||
break;
|
||||
case 4:
|
||||
CHECK(effect->Layer() == 2);
|
||||
CHECK(effect->Id() == "C");
|
||||
CHECK(effect->Order() == 0);
|
||||
break;
|
||||
}
|
||||
++n;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE( "Multi-threaded Timeline Add/Remove Effect", "[libopenshot][timeline]" )
|
||||
{
|
||||
// Create timeline with a clip so frames can be requested
|
||||
Timeline *t = new Timeline(1280, 720, Fraction(24, 1), 48000, 2, LAYOUT_STEREO);
|
||||
std::stringstream path;
|
||||
path << TEST_MEDIA_PATH << "test.mp4";
|
||||
Clip *clip = new Clip(path.str());
|
||||
clip->Layer(0);
|
||||
t->AddClip(clip);
|
||||
t->Open();
|
||||
|
||||
// A successful test will NOT crash - since this causes many threads to
|
||||
// call the same Timeline methods asynchronously, to verify mutexes and multi-threaded
|
||||
// access does not seg fault or crash this test.
|
||||
// A successful test will NOT crash - many threads will add/remove effects
|
||||
// while also requesting frames, exercising locks around effect mutation.
|
||||
#pragma omp parallel
|
||||
{
|
||||
// Run the following loop in all threads
|
||||
int64_t clip_count = 10;
|
||||
for (int clip_index = 1; clip_index <= clip_count; clip_index++) {
|
||||
// Create clip
|
||||
Clip* clip_video = new Clip(path.str());
|
||||
clip_video->Layer(omp_get_thread_num());
|
||||
int64_t effect_count = 10;
|
||||
for (int i = 0; i < effect_count; ++i) {
|
||||
// Each thread creates its own effect
|
||||
Negate *neg = new Negate();
|
||||
std::stringstream sid;
|
||||
sid << "NEG_T" << omp_get_thread_num() << "_I" << i;
|
||||
neg->Id(sid.str());
|
||||
neg->Layer(1 + omp_get_thread_num()); // spread across layers
|
||||
|
||||
// Add clip to timeline
|
||||
t->AddClip(clip_video);
|
||||
// Add the effect
|
||||
t->AddEffect(neg);
|
||||
|
||||
// Loop through all timeline frames - each new clip makes the timeline longer
|
||||
for (long int frame = 10; frame >= 1; frame--) {
|
||||
// Touch a few frames to exercise the render pipeline with the effect
|
||||
for (long int frame = 1; frame <= 6; ++frame) {
|
||||
std::shared_ptr<Frame> f = t->GetFrame(frame);
|
||||
t->GetMaxFrame();
|
||||
REQUIRE(f != nullptr);
|
||||
}
|
||||
|
||||
// Remove clip
|
||||
t->RemoveClip(clip_video);
|
||||
delete clip_video;
|
||||
clip_video = NULL;
|
||||
// Remove the effect and destroy it
|
||||
t->RemoveEffect(neg);
|
||||
delete neg;
|
||||
neg = nullptr;
|
||||
}
|
||||
|
||||
// Clear all clips after loop is done
|
||||
// This is designed to test the mutex for Clear()
|
||||
t->Clear();
|
||||
// Clear all effects at the end from within threads (should be safe)
|
||||
// This also exercises internal sorting/locking paths
|
||||
t->Clear();
|
||||
}
|
||||
|
||||
// Close and delete timeline object
|
||||
t->Close();
|
||||
delete t;
|
||||
t = NULL;
|
||||
t = nullptr;
|
||||
delete clip;
|
||||
clip = nullptr;
|
||||
}
|
||||
|
||||
TEST_CASE( "ApplyJSONDiff and FrameMappers", "[libopenshot][timeline]" )
|
||||
|
||||
Reference in New Issue
Block a user