diff --git a/.github/stale.yml b/.github/stale.yml
index bba68742..9071118f 100644
--- a/.github/stale.yml
+++ b/.github/stale.yml
@@ -10,7 +10,7 @@ exemptLabels:
# Label to use when marking an issue as stale
staleLabel: stale
# Comment to post when marking an issue as stale. Set to `false` to disable
-markComment: >
+markComment: |
Thank you so much for submitting an issue to help improve OpenShot Video Editor. We are sorry about this, but this particular issue has gone unnoticed for quite some time. To help keep the OpenShot GitHub Issue Tracker organized and focused, we must ensure that every issue is correctly labelled and triaged, to get the proper attention.
This issue will be closed, as it meets the following criteria:
diff --git a/examples/back.png b/examples/back.png
index e1d7bd77..c3960687 100644
Binary files a/examples/back.png and b/examples/back.png differ
diff --git a/examples/final-composite.png b/examples/final-composite.png
index 0d0bd62b..36108d43 100644
Binary files a/examples/final-composite.png and b/examples/final-composite.png differ
diff --git a/examples/front.png b/examples/front.png
index 11014a87..f747599d 100644
Binary files a/examples/front.png and b/examples/front.png differ
diff --git a/examples/front3.png b/examples/front3.png
index 11f70137..e73b04f7 100644
Binary files a/examples/front3.png and b/examples/front3.png differ
diff --git a/examples/interlaced.png b/examples/interlaced.png
index a35a2359..15f364b3 100644
Binary files a/examples/interlaced.png and b/examples/interlaced.png differ
diff --git a/examples/mask.png b/examples/mask.png
index 0f109e84..50946636 100644
Binary files a/examples/mask.png and b/examples/mask.png differ
diff --git a/examples/mask2.png b/examples/mask2.png
index 5214b0a0..377cc7ae 100644
Binary files a/examples/mask2.png and b/examples/mask2.png differ
diff --git a/examples/output-final.png b/examples/output-final.png
index 29aab713..dbff005c 100644
Binary files a/examples/output-final.png and b/examples/output-final.png differ
diff --git a/src/Clip.cpp b/src/Clip.cpp
index f26af54a..79c9b75e 100644
--- a/src/Clip.cpp
+++ b/src/Clip.cpp
@@ -161,6 +161,8 @@ Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), alloca
if (reader) {
End(reader->info.duration);
reader->ParentClip(this);
+ // Init reader info struct and cache size
+ init_reader_settings();
}
}
@@ -220,7 +222,8 @@ Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(N
reader->ParentClip(this);
allocated_reader = reader;
// Init reader info struct and cache size
- init_reader_settings(); }
+ init_reader_settings();
+ }
}
// Destructor
@@ -325,7 +328,7 @@ std::shared_ptr Clip::GetFrame(int64_t frame_number)
{
// Check for open reader (or throw exception)
if (!is_open)
- throw ReaderClosed("The Clip is closed. Call Open() before calling this method", "N/A");
+ throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
if (reader)
{
@@ -346,7 +349,7 @@ std::shared_ptr Clip::GetFrame(std::shared_ptr frame, in
{
// Check for open reader (or throw exception)
if (!is_open)
- throw ReaderClosed("The Clip is closed. Call Open() before calling this method", "N/A");
+ throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
if (reader)
{
@@ -698,7 +701,7 @@ std::shared_ptr Clip::GetOrCreateFrame(int64_t number)
ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (from reader)", "number", number);
// Attempt to get a frame (but this could fail if a reader has just been closed)
- std::shared_ptr reader_frame = reader->GetFrame(number);
+ auto reader_frame = reader->GetFrame(number);
// Return real frame
if (reader_frame) {
@@ -706,11 +709,9 @@ std::shared_ptr Clip::GetOrCreateFrame(int64_t number)
// This allows a clip to modify the pixels and audio of this frame without
// changing the underlying reader's frame data
//std::shared_ptr reader_copy(new Frame(number, 1, 1, "#000000", reader_frame->GetAudioSamplesCount(), reader_frame->GetAudioChannelsCount()));
- std::shared_ptr reader_copy(new Frame(*reader_frame.get()));
- {
- reader_copy->SampleRate(reader_frame->SampleRate());
- reader_copy->ChannelsLayout(reader_frame->ChannelsLayout());
- }
+ auto reader_copy = std::make_shared(*reader_frame.get());
+ reader_copy->SampleRate(reader_frame->SampleRate());
+ reader_copy->ChannelsLayout(reader_frame->ChannelsLayout());
return reader_copy;
}
@@ -729,7 +730,9 @@ std::shared_ptr Clip::GetOrCreateFrame(int64_t number)
ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (create blank)", "number", number, "estimated_samples_in_frame", estimated_samples_in_frame);
// Create blank frame
- std::shared_ptr new_frame = std::make_shared(number, reader->info.width, reader->info.height, "#000000", estimated_samples_in_frame, reader->info.channels);
+ auto new_frame = std::make_shared(
+ number, reader->info.width, reader->info.height,
+ "#000000", estimated_samples_in_frame, reader->info.channels);
new_frame->SampleRate(reader->info.sample_rate);
new_frame->ChannelsLayout(reader->info.channel_layout);
new_frame->AddAudioSilence(estimated_samples_in_frame);
@@ -1176,7 +1179,6 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
switch (scale)
{
case (SCALE_FIT): {
- // keep aspect ratio
source_size.scale(width, height, Qt::KeepAspectRatio);
// Debug output
@@ -1184,7 +1186,6 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
break;
}
case (SCALE_STRETCH): {
- // ignore aspect ratio
source_size.scale(width, height, Qt::IgnoreAspectRatio);
// Debug output
@@ -1192,14 +1193,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
break;
}
case (SCALE_CROP): {
- QSize width_size(width, round(width / (float(source_size.width()) / float(source_size.height()))));
- QSize height_size(round(height / (float(source_size.height()) / float(source_size.width()))), height);
-
- // respect aspect ratio
- if (width_size.width() >= width && width_size.height() >= height)
- source_size.scale(width_size.width(), width_size.height(), Qt::KeepAspectRatio);
- else
- source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio);
+ source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
@@ -1276,7 +1270,6 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
float origin_x_value = origin_x.GetValue(frame->number);
float origin_y_value = origin_y.GetValue(frame->number);
- bool transformed = false;
QTransform transform;
// Transform source image (if needed)
@@ -1285,7 +1278,6 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
if (!isEqual(x, 0) || !isEqual(y, 0)) {
// TRANSLATE/MOVE CLIP
transform.translate(x, y);
- transformed = true;
}
if (!isEqual(r, 0) || !isEqual(shear_x_value, 0) || !isEqual(shear_y_value, 0)) {
@@ -1296,7 +1288,6 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
transform.rotate(r);
transform.shear(shear_x_value, shear_y_value);
transform.translate(-origin_x_offset,-origin_y_offset);
- transformed = true;
}
// SCALE CLIP (if needed)
@@ -1305,24 +1296,21 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
transform.scale(source_width_scale, source_height_scale);
- transformed = true;
}
// Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number, "transformed", transformed);
+ ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number);
/* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
- std::shared_ptr new_image;
- new_image = std::shared_ptr(new QImage(QSize(width, height), source_image->format()));
+ auto new_image = std::make_shared(QSize(width, height), source_image->format());
new_image->fill(QColor(QString::fromStdString("#00000000")));
// Load timeline's new frame image into a QPainter
QPainter painter(new_image.get());
painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
- // Apply transform (translate, rotate, scale)... if any
- if (transformed)
- painter.setTransform(transform);
+ // Apply transform (translate, rotate, scale)
+ painter.setTransform(transform);
// Composite a new layer onto the image
painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
diff --git a/src/Clip.h b/src/Clip.h
index 373f2db0..9bcb18bb 100644
--- a/src/Clip.h
+++ b/src/Clip.h
@@ -192,7 +192,7 @@ namespace openshot {
virtual ~Clip();
/// Get the cache object used by this clip
- CacheMemory* GetCache() { return &cache; };
+ CacheMemory* GetCache() override { return &cache; };
/// Determine if reader is open or closed
bool IsOpen() override { return is_open; };
@@ -232,10 +232,10 @@ namespace openshot {
/// @returns The modified openshot::Frame object
/// @param frame This is ignored on Clip, due to caching optimizations. This frame instance is clobbered with the source frame.
/// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
- std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number);
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override;
/// Open the internal reader
- void Open();
+ void Open() override;
/// @brief Set the current reader
/// @param new_reader The reader to be used by this clip
diff --git a/src/ClipBase.h b/src/ClipBase.h
index 11d2271f..ac2c0237 100644
--- a/src/ClipBase.h
+++ b/src/ClipBase.h
@@ -41,8 +41,8 @@
#include "Json.h"
#include "TimelineBase.h"
-namespace openshot {
+namespace openshot {
/**
* @brief This abstract class is the base class, used by all clips in libopenshot.
*
diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp
index 096976cf..b7e18daf 100644
--- a/src/FrameMapper.cpp
+++ b/src/FrameMapper.cpp
@@ -117,15 +117,6 @@ void FrameMapper::Init()
// Clear cache
final_cache.Clear();
- // Get clip position from parent clip (if any)
- float clipPosition = 0.0;
- float clipStart = 0.0;
- Clip *parent = (Clip *) ParentClip();
- if (parent) {
- clipPosition = parent->Position();
- clipStart = parent->Start();
- }
-
// Some framerates are handled special, and some use a generic Keyframe curve to
// map the framerates. These are the special framerates:
if ((fabs(original.ToFloat() - 24.0) < 1e-7 || fabs(original.ToFloat() - 25.0) < 1e-7 || fabs(original.ToFloat() - 30.0) < 1e-7) &&
@@ -267,12 +258,12 @@ void FrameMapper::Init()
// the original sample rate.
int64_t end_samples_frame = start_samples_frame;
int end_samples_position = start_samples_position;
- int remaining_samples = Frame::GetSamplesPerFrame(AdjustFrameNumber(frame_number, clipPosition, clipStart), target, reader->info.sample_rate, reader->info.channels);
+ int remaining_samples = Frame::GetSamplesPerFrame(AdjustFrameNumber(frame_number), target, reader->info.sample_rate, reader->info.channels);
while (remaining_samples > 0)
{
// get original samples
- int original_samples = Frame::GetSamplesPerFrame(AdjustFrameNumber(end_samples_frame, clipPosition, clipStart), original, reader->info.sample_rate, reader->info.channels) - end_samples_position;
+ int original_samples = Frame::GetSamplesPerFrame(AdjustFrameNumber(end_samples_frame), original, reader->info.sample_rate, reader->info.channels) - end_samples_position;
// Enough samples
if (original_samples >= remaining_samples)
@@ -292,12 +283,12 @@ void FrameMapper::Init()
// Create the sample mapping struct
- SampleRange Samples = {start_samples_frame, start_samples_position, end_samples_frame, end_samples_position, Frame::GetSamplesPerFrame(AdjustFrameNumber(frame_number, clipPosition, clipStart), target, reader->info.sample_rate, reader->info.channels)};
+ SampleRange Samples = {start_samples_frame, start_samples_position, end_samples_frame, end_samples_position, Frame::GetSamplesPerFrame(AdjustFrameNumber(frame_number), target, reader->info.sample_rate, reader->info.channels)};
// Reset the audio variables
start_samples_frame = end_samples_frame;
start_samples_position = end_samples_position + 1;
- if (start_samples_position >= Frame::GetSamplesPerFrame(AdjustFrameNumber(start_samples_frame, clipPosition, clipStart), original, reader->info.sample_rate, reader->info.channels))
+ if (start_samples_position >= Frame::GetSamplesPerFrame(AdjustFrameNumber(start_samples_frame), original, reader->info.sample_rate, reader->info.channels))
{
start_samples_frame += 1; // increment the frame (since we need to wrap onto the next one)
start_samples_position = 0; // reset to 0, since we wrapped
@@ -363,17 +354,8 @@ std::shared_ptr FrameMapper::GetOrCreateFrame(int64_t number)
{
std::shared_ptr new_frame;
- // Get clip position from parent clip (if any)
- float clipPosition = 0.0;
- float clipStart = 0.0;
- Clip *parent = (Clip *) ParentClip();
- if (parent) {
- clipPosition = parent->Position();
- clipStart = parent->Start();
- }
-
// Init some basic properties about this frame (keep sample rate and # channels the same as the original reader for now)
- int samples_in_frame = Frame::GetSamplesPerFrame(AdjustFrameNumber(number, clipPosition, clipStart), target, reader->info.sample_rate, reader->info.channels);
+ int samples_in_frame = Frame::GetSamplesPerFrame(AdjustFrameNumber(number), target, reader->info.sample_rate, reader->info.channels);
try {
// Debug output
@@ -423,15 +405,6 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame)
final_frame = final_cache.GetFrame(requested_frame);
if (final_frame) return final_frame;
- // Get clip position from parent clip (if any)
- float clipPosition = 0.0;
- float clipStart = 0.0;
- Clip *parent = (Clip *) ParentClip();
- if (parent) {
- clipPosition = parent->Position();
- clipStart = parent->Start();
- }
-
// Minimum number of frames to process (for performance reasons)
// Dialing this down to 1 for now, as it seems to improve performance, and reduce export crashes
int minimum_frames = 1;
@@ -455,7 +428,7 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame)
// Get # of channels in the actual frame
int channels_in_frame = mapped_frame->GetAudioChannelsCount();
- int samples_in_frame = Frame::GetSamplesPerFrame(AdjustFrameNumber(frame_number, clipPosition, clipStart), target, mapped_frame->SampleRate(), channels_in_frame);
+ int samples_in_frame = Frame::GetSamplesPerFrame(AdjustFrameNumber(frame_number), target, mapped_frame->SampleRate(), channels_in_frame);
// Determine if mapped frame is identical to source frame
// including audio sample distribution according to mapped.Samples,
@@ -778,15 +751,6 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig
// Recalculate mappings
Init();
- // Get clip position from parent clip (if any)
- float clipPosition = 0.0;
- float clipStart = 0.0;
- Clip *parent = (Clip *) ParentClip();
- if (parent) {
- clipPosition = parent->Position();
- clipStart = parent->Start();
- }
-
// Init audio buffers / variables
int total_frame_samples = 0;
int channels_in_frame = frame->GetAudioChannelsCount();
@@ -848,7 +812,7 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig
}
// Update total samples & input frame size (due to bigger or smaller data types)
- total_frame_samples = Frame::GetSamplesPerFrame(AdjustFrameNumber(frame->number, clipPosition, clipStart), target, info.sample_rate, info.channels);
+ total_frame_samples = Frame::GetSamplesPerFrame(AdjustFrameNumber(frame->number), target, info.sample_rate, info.channels);
ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio (adjust # of samples)", "total_frame_samples", total_frame_samples, "info.sample_rate", info.sample_rate, "sample_rate_in_frame", sample_rate_in_frame, "info.channels", info.channels, "channels_in_frame", channels_in_frame, "original_frame_number", original_frame_number);
@@ -959,13 +923,23 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig
}
// Adjust frame number for Clip position and start (which can result in a different number)
-int64_t FrameMapper::AdjustFrameNumber(int64_t clip_frame_number, float position, float start) {
+int64_t FrameMapper::AdjustFrameNumber(int64_t clip_frame_number) {
+ // Get clip position from parent clip (if any)
+ float position = 0.0;
+ float start = 0.0;
+ Clip *parent = (Clip *) ParentClip();
+ if (parent) {
+ position = parent->Position();
+ start = parent->Start();
+ }
+
+ // Adjust start frame and position based on parent clip. This prevents ensures the same
+ // frame # is used by mapped readers and clips, when calculating samples per frame. Thus,
+ // this prevents gaps and mismatches in # of samples.
int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1;
int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1;
-
int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
- ///std::cout << "Conv Position " << round(position * info.fps.ToDouble()) << " position: " << position << " info::fps: " << info.fps.ToDouble() << std::endl;
return frame_number;
}
diff --git a/src/FrameMapper.h b/src/FrameMapper.h
index 7f6c9c16..5b3e00e3 100644
--- a/src/FrameMapper.h
+++ b/src/FrameMapper.h
@@ -155,7 +155,7 @@ namespace openshot
std::shared_ptr GetOrCreateFrame(int64_t number);
/// Adjust frame number for Clip position and start (which can result in a different number)
- int64_t AdjustFrameNumber(int64_t clip_frame_number, float position, float start);
+ int64_t AdjustFrameNumber(int64_t clip_frame_number);
// Use the original and target frame rates and a pull-down technique to create
// a mapping between the original fields and frames or a video to a new frame rate.
diff --git a/src/Qt/VideoCacheThread.cpp b/src/Qt/VideoCacheThread.cpp
index f817c93f..e1e53f5d 100644
--- a/src/Qt/VideoCacheThread.cpp
+++ b/src/Qt/VideoCacheThread.cpp
@@ -94,8 +94,10 @@ namespace openshot
while (!threadShouldExit() && is_playing) {
// Cache frames before the other threads need them
- // Cache frames up to the max frames
- while (speed == 1 && (position - current_display_frame) < max_frames)
+ // Cache frames up to the max frames. Reset to current position
+ // if cache gets too far away from display frame. Cache frames
+ // even when player is paused (i.e. speed 0).
+ while ((position - current_display_frame) < max_frames)
{
// Only cache up till the max_frames amount... then sleep
try
@@ -104,6 +106,14 @@ namespace openshot
ZmqLogger::Instance()->AppendDebugMethod("VideoCacheThread::run (cache frame)", "position", position, "current_display_frame", current_display_frame, "max_frames", max_frames, "needed_frames", (position - current_display_frame));
// Force the frame to be generated
+ if (reader->GetCache()->GetSmallestFrame()) {
+ int64_t smallest_cached_frame = reader->GetCache()->GetSmallestFrame()->number;
+ if (smallest_cached_frame > current_display_frame) {
+ // Cache position has gotten too far away from current display frame.
+ // Reset the position to the current display frame.
+ position = current_display_frame;
+ }
+ }
reader->GetFrame(position);
}
@@ -113,14 +123,8 @@ namespace openshot
// Ignore out of bounds frame exceptions
}
- // Is cache position behind current display frame?
- if (position < current_display_frame) {
- // Jump ahead
- position = current_display_frame;
- }
-
// Increment frame number
- position++;
+ position++;
}
// Sleep for 1 frame length
diff --git a/src/Qt/VideoCacheThread.h b/src/Qt/VideoCacheThread.h
index 6fbf6d58..f9551bdb 100644
--- a/src/Qt/VideoCacheThread.h
+++ b/src/Qt/VideoCacheThread.h
@@ -45,10 +45,13 @@ namespace openshot
*/
class VideoCacheThread : Thread
{
+ private:
+ std::atomic_int position;
+
+ protected:
std::shared_ptr frame;
int speed;
bool is_playing;
- int64_t position;
int64_t current_display_frame;
ReaderBase *reader;
int max_frames;
diff --git a/src/Timeline.cpp b/src/Timeline.cpp
index 45975e1f..abc02b6f 100644
--- a/src/Timeline.cpp
+++ b/src/Timeline.cpp
@@ -246,6 +246,11 @@ void Timeline::AddClip(Clip* clip)
// Assign timeline to clip
clip->ParentTimeline(this);
+ // Clear cache of clip and nested reader (if any)
+ clip->cache.Clear();
+ if (clip->Reader() && clip->Reader()->GetCache())
+ clip->Reader()->GetCache()->Clear();
+
// All clips should be converted to the frame rate of this timeline
if (auto_map_clips)
// Apply framemapper (or update existing framemapper)
@@ -690,6 +695,7 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame)
// Check cache
std::shared_ptr frame;
+ std::lock_guard guard(get_frame_mutex);
#pragma omp critical (T_GetFrame)
frame = final_cache->GetFrame(requested_frame);
if (frame) {
@@ -719,6 +725,15 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame)
return frame;
}
+ // Check if previous frame was cached? (if not, assume we are seeking somewhere else on the Timeline, and need
+ // to clear all cache (for continuity sake). For example, jumping back to a previous spot can cause issues with audio
+ // data where the new jump location doesn't match up with the previously cached audio data.
+ std::shared_ptr previous_frame = final_cache->GetFrame(requested_frame - 1);
+ if (!previous_frame) {
+ // Seeking to new place on timeline (destroy cache)
+ ClearAllCache();
+ }
+
// Minimum number of frames to process (for performance reasons)
int minimum_frames = OPEN_MP_NUM_PROCESSORS;
diff --git a/src/Timeline.h b/src/Timeline.h
index 22718342..1d156153 100644
--- a/src/Timeline.h
+++ b/src/Timeline.h
@@ -33,6 +33,7 @@
#include
#include
+#include
#include
#include
#include
@@ -175,6 +176,7 @@ namespace openshot {
std::set allocated_frame_mappers; ///< all the frame mappers we allocated and must free
bool managed_cache; ///< Does this timeline instance manage the cache object
std::string path; ///< Optional path of loaded UTF-8 OpenShot JSON project file
+ std::mutex get_frame_mutex; ///< Mutex to protect GetFrame method from different threads calling it
/// Process a new layer of video or audio
void add_layer(std::shared_ptr new_frame, openshot::Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume);
diff --git a/tests/Clip_Tests.cpp b/tests/Clip_Tests.cpp
index 2f2e850c..17757be2 100644
--- a/tests/Clip_Tests.cpp
+++ b/tests/Clip_Tests.cpp
@@ -232,5 +232,27 @@ TEST(Effects)
CHECK_EQUAL(2, (int)c10.Effects().size());
}
+TEST(Verify_Parent_Timeline)
+{
+ Timeline t1(640, 480, Fraction(30,1), 44100, 2, LAYOUT_STEREO);
+
+ // Load clip with video
+ std::stringstream path;
+ path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4";
+ Clip c1(path.str());
+ c1.Open();
+
+ // Check size of frame image
+ CHECK_EQUAL(c1.GetFrame(1)->GetImage()->width(), 1280);
+ CHECK_EQUAL(c1.GetFrame(1)->GetImage()->height(), 720);
+
+ // Add clip to timeline
+ t1.AddClip(&c1);
+
+ // Check size of frame image (with an associated timeline)
+ CHECK_EQUAL(c1.GetFrame(1)->GetImage()->width(), 640);
+ CHECK_EQUAL(c1.GetFrame(1)->GetImage()->height(), 480);
+}
+
} // SUITE
diff --git a/tests/FFmpegReader_Tests.cpp b/tests/FFmpegReader_Tests.cpp
index d6a2d6e7..b1827432 100644
--- a/tests/FFmpegReader_Tests.cpp
+++ b/tests/FFmpegReader_Tests.cpp
@@ -241,5 +241,35 @@ TEST(Multiple_Open_and_Close)
r.Close();
}
+TEST(Verify_Parent_Timeline)
+{
+ // Create a reader
+ stringstream path;
+ path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4";
+ FFmpegReader r(path.str());
+ r.Open();
+
+ // Check size of frame image
+ CHECK_EQUAL(r.GetFrame(1)->GetImage()->width(), 1280);
+ CHECK_EQUAL(r.GetFrame(1)->GetImage()->height(), 720);
+ r.GetFrame(1)->GetImage()->save("reader-1.png", "PNG");
+
+ // Create a Clip associated with this reader
+ Clip c1(&r);
+ c1.Open();
+
+ // Check size of frame image (should still be the same)
+ CHECK_EQUAL(r.GetFrame(1)->GetImage()->width(), 1280);
+ CHECK_EQUAL(r.GetFrame(1)->GetImage()->height(), 720);
+
+ // Create Timeline
+ Timeline t1(640, 480, Fraction(30,1), 44100, 2, LAYOUT_STEREO);
+ t1.AddClip(&c1);
+
+ // Check size of frame image (it should now match the parent timeline)
+ CHECK_EQUAL(r.GetFrame(1)->GetImage()->width(), 640);
+ CHECK_EQUAL(r.GetFrame(1)->GetImage()->height(), 360);
+}
+
} // SUITE(FFmpegReader)
diff --git a/tests/FrameMapper_Tests.cpp b/tests/FrameMapper_Tests.cpp
index 3d2e8779..45e07e3a 100644
--- a/tests/FrameMapper_Tests.cpp
+++ b/tests/FrameMapper_Tests.cpp
@@ -211,113 +211,264 @@ TEST(FrameMapper_resample_audio_48000_to_41000)
map.Close();
}
-TEST(FrameMapper_AudioSample_Distribution)
-{
+TEST (FrameMapper_resample_audio_mapper) {
+ // This test verifies that audio data can be resampled on FrameMapper
+ // instances, even on frame rates that do not divide evenly, and that no audio data is misplaced
+ // or duplicated. We verify this by creating a SIN wave, add those data points to a DummyReader,
+ // and then resample, and compare the result back to the original SIN wave calculation.
+
+ // Create cache object to hold test frames
CacheMemory cache;
+
int OFFSET = 0;
- float AMPLITUDE = 0.2;
+ float AMPLITUDE = 0.75;
double ANGLE = 0.0;
int NUM_SAMPLES = 100;
- //std::cout << "Starting Resample Test" << std::endl;
-
- for (int64_t frame_number = 1; frame_number <= 90; frame_number++)
- {
+ // Let's create some test frames
+ for (int64_t frame_number = 1; frame_number <= 90; frame_number++) {
// Create blank frame (with specific frame #, samples, and channels)
// Sample count should be 44100 / 30 fps = 1470 samples per frame
-
int sample_count = 1470;
std::shared_ptr f(new openshot::Frame(frame_number, sample_count, 2));
// Create test samples with sin wave (predictable values)
float *audio_buffer = new float[sample_count * 2];
-
for (int sample_number = 0; sample_number < sample_count; sample_number++)
{
// Calculate sin wave
- // TODO: I'm using abs(), because calling AddAudio only seems to be adding the positive values and it's bizarre
float sample_value = float(AMPLITUDE * sin(ANGLE) + OFFSET);
- audio_buffer[sample_number] = sample_value;//abs(sample_value);
+ audio_buffer[sample_number] = abs(sample_value);
ANGLE += (2 * M_PI) / NUM_SAMPLES;
-
- // Add custom audio samples to Frame (bool replaceSamples, int destChannel, int destStartSample, const float* source,
- f->AddAudio(true, 0, 0, audio_buffer, sample_count, 1.0); // add channel 1
- f->AddAudio(true, 1, 0, audio_buffer, sample_count, 1.0); // add channel 2
-
- // Add test frame to dummy reader
- cache.Add(f);
}
+
+ // Add custom audio samples to Frame (bool replaceSamples, int destChannel, int destStartSample, const float* source,
+ f->AddAudio(true, 0, 0, audio_buffer, sample_count, 1.0); // add channel 1
+ f->AddAudio(true, 1, 0, audio_buffer, sample_count, 1.0); // add channel 2
+
+ // Add test frame to dummy reader
+ cache.Add(f);
}
+
// Create a default fraction (should be 1/1)
openshot::DummyReader r(openshot::Fraction(30, 1), 1920, 1080, 44100, 2, 30.0, &cache);
- r.info.has_audio = true;
r.Open(); // Open the reader
- // Map to 24 fps, which should create a variable # of samples per frame
- ///FrameMapper map(&r, Fraction(24, 1), PULLDOWN_NONE, 44100, 2, LAYOUT_STEREO);
- //map.info.has_audio = true;
- //map.Open();
+ // Sample rates
+ vector arr = { 44100, 16000 };
+ for (auto& rate : arr) {
+ // Reset SIN wave
+ ANGLE = 0.0;
- Timeline t1(1920, 1080, Fraction(24, 1), 44100, 2, LAYOUT_STEREO);
+ // Map to 24 fps, which should create a variable # of samples per frame
+ FrameMapper map(&r, Fraction(24,1), PULLDOWN_NONE, rate, 2, LAYOUT_STEREO);
+ map.info.has_audio = true;
+ map.Open();
- Clip c1;
- c1.Reader(&r);
- c1.Layer(1);
- c1.Position(0.0);
- c1.Start(0.0);
- c1.End(10.0);
+ // Calculating how much the initial sample rate has changed
+ float resample_multiplier = ((float) rate / r.info.sample_rate);
- // Create 2nd map to 24 fps, which should create a variable # of samples per frame
+ // Loop through samples, and verify FrameMapper didn't mess up individual sample values
+ int num_samples = 0;
+ for (int frame_index = 1; frame_index <= map.info.fps.ToInt(); frame_index++) {
+ int sample_count = map.GetFrame(frame_index)->GetAudioSamplesCount();
+ for (int sample_index = 0; sample_index < sample_count; sample_index++) {
- //FrameMapper map2(&r, Fraction(24, 1), PULLDOWN_NONE, 44100, 2, LAYOUT_STEREO);
+ // Calculate sin wave
+ float sample_value = abs(float(AMPLITUDE * sin(ANGLE) + OFFSET));
+ ANGLE += (2 * M_PI) / (NUM_SAMPLES * resample_multiplier);
- //map2.info.has_audio = true;
- //map2.Open();
+ // Verify each mapped sample value is correct (after being redistributed by the FrameMapper)
+ float resampled_value = map.GetFrame(frame_index)->GetAudioSample(0, sample_index, 1.0);
- Clip c2;
- c2.Reader(&r);
- c2.Layer(2);
+ // TODO: 0.1 is much to broad to accurately test this, but without this, all the resampled values are too far away from expected
+ CHECK_CLOSE(sample_value, resampled_value, 0.1);
+ }
+ // Increment sample value
+ num_samples += map.GetFrame(frame_index)->GetAudioSamplesCount();
+ }
- // Position 1 frame into the video, this should mis-align the audio and create situations
- // which overlapping Frame instances have different # of samples for the Timeline.
- // TODO: Moving to 0.0 position, to simplify this test for now
+ // Verify samples per second is correct (i.e. 44100)
+ CHECK_EQUAL(num_samples, map.info.sample_rate);
- c2.Position(0.041666667 * 14);
- c2.Start(1.0);
- c2.End(10.0);
+ // Create Timeline (same specs as reader)
+ Timeline t1(map.info.width, map.info.height, map.info.fps, rate, map.info.channels, map.info.channel_layout);
- // Add clips
- t1.AddClip(&c1);
- t1.AddClip(&c2);
+ Clip c1;
+ c1.Reader(&map);
+ c1.Layer(1);
+ c1.Position(0.0);
+ c1.Start(0.0);
+ c1.End(10.0);
- //t1.SetJson(t1.Json());
- t1.Open();
+ // Create 2nd map to 24 fps, which should create a variable # of samples per frame (for some sample rates)
+ FrameMapper map2(&r, Fraction(24, 1), PULLDOWN_NONE, rate, 2, LAYOUT_STEREO);
+ map2.info.has_audio = true;
+ map2.Open();
- FFmpegWriter w("output-resample.mp4");
+ Clip c2;
+ c2.Reader(&map2);
+ c2.Layer(1);
+ c2.Position(0.0);
+ c2.Start(0.0);
+ c2.End(10.0);
- // Set options
- w.SetAudioOptions("aac", 44100, 192000);
- w.SetVideoOptions("libx264", 1280, 720, Fraction(24,1), 5000000);
+ // Add clips
+ t1.AddClip(&c1);
+ t1.AddClip(&c2);
+ t1.Open();
- // Open writer
- w.Open();
+ // Reset SIN wave
+ ANGLE = 0.0;
- w.WriteFrame(&t1, 5, 50);
+ for (int frame_index = 1; frame_index < 24; frame_index++) {
+ t1.GetFrame(frame_index);
+ for (int sample_index = 0; sample_index < t1.GetFrame(frame_index)->GetAudioSamplesCount(); sample_index++) {
- //for (int64_t frame_number = 1; frame_number <= 90; frame_number++){
- // w.WriteFrame(t1.GetFrame(frame_number));
- //}
+ // Calculate sin wave
+ float sample_value = abs(float(AMPLITUDE * sin(ANGLE) + OFFSET));
+ ANGLE += (2 * M_PI) / (NUM_SAMPLES * resample_multiplier);
- // Close writer & reader
- w.Close();
+ // Verify each mapped sample value is correct (after being redistributed by the FrameMapper)
+ float resampled_value = t1.GetFrame(frame_index)->GetAudioSample(0, sample_index, 1.0);
- //map.Close();
- //map2.Close();
+ // TODO: 0.1 is much to broad to accurately test this, but without this, all the resampled values are too far away from expected
+ // Testing wave value X 2, since we have 2 overlapping clips
+ CHECK_CLOSE(sample_value * 2.0, resampled_value, 0.1);
- t1.Close();
+ }
+ }
+
+ // Close mapper
+ map.Close();
+ map2.Close();
+ t1.Close();
+ }
// Clean up
cache.Clear();
-
r.Close();
}
+
+TEST (FrameMapper_redistribute_samples_per_frame) {
+ // This test verifies that audio data is correctly aligned on
+ // FrameMapper instances. We do this by creating 2 Clips based on the same parent reader
+ // (i.e. same exact audio sample data). We use a Timeline to overlap these clips
+ // (and offset 1 clip by 1 frame), and we verify that the correct # of samples is returned by each
+ // Clip Frame instance. In the past, FrameMappers would sometimes generate the wrong # of samples
+ // in a frame, and the Timeline recieve mismatching # of audio samples from 2 or more clips...
+ // causing audio data to be truncated and lost (i.e. creating a pop).
+
+ // Create cache object to hold test frames
+ CacheMemory cache;
+
+ // Let's create some test frames
+ int sample_value = 0;
+ for (int64_t frame_number = 1; frame_number <= 90; frame_number++) {
+ // Create blank frame (with specific frame #, samples, and channels)
+ // Sample count should be 44100 / 30 fps = 1470 samples per frame
+ int sample_count = 1470;
+ std::shared_ptr f(new openshot::Frame(frame_number, sample_count, 2));
+
+ // Create test samples with incrementing value
+ float *audio_buffer = new float[sample_count];
+ for (int64_t sample_number = 0; sample_number < sample_count; sample_number++) {
+ audio_buffer[sample_number] = sample_value + sample_number;
+ }
+
+ // Increment counter
+ sample_value += sample_count;
+
+ // Add custom audio samples to Frame (bool replaceSamples, int destChannel, int destStartSample, const float* source,
+ f->AddAudio(true, 0, 0, audio_buffer, sample_count, 1.0); // add channel 1
+ f->AddAudio(true, 1, 0, audio_buffer, sample_count, 1.0); // add channel 2
+
+ // Add test frame to dummy reader
+ cache.Add(f);
+ }
+
+ // Create a default fraction (should be 1/1)
+ openshot::DummyReader r(openshot::Fraction(30, 1), 1920, 1080, 44100, 2, 30.0, &cache);
+ r.Open(); // Open the reader
+
+ // Sample rates
+ vector arr = { 24, 30, 60 };
+ for (auto& fps : arr) {
+ // Map to 24 fps, which should create a variable # of samples per frame
+ FrameMapper map(&r, Fraction(fps,1), PULLDOWN_NONE, 44100, 2, LAYOUT_STEREO);
+ map.info.has_audio = true;
+ map.Open();
+
+ // Loop through samples, and verify FrameMapper didn't mess up individual sample values
+ sample_value = 0;
+ for (int frame_index = 1; frame_index <= map.info.fps.ToInt(); frame_index++) {
+ for (int sample_index = 0; sample_index < map.GetFrame(frame_index)->GetAudioSamplesCount(); sample_index++) {
+ // Verify each mapped sample value is correct (after being redistributed by the FrameMapper)
+ CHECK_EQUAL(sample_value + sample_index, map.GetFrame(frame_index)->GetAudioSample(0, sample_index, 1.0));
+ }
+ // Increment sample value
+ sample_value += map.GetFrame(frame_index)->GetAudioSamplesCount();
+ }
+
+ // Verify samples per second is correct (i.e. 44100)
+ CHECK_EQUAL(sample_value, map.info.sample_rate);
+
+ // Create Timeline (same specs as reader)
+ Timeline t1(map.info.width, map.info.height, map.info.fps, 44100, map.info.channels, map.info.channel_layout);
+
+ Clip c1;
+ c1.Reader(&map);
+ c1.Layer(1);
+ c1.Position(0.0);
+ c1.Start(0.0);
+ c1.End(10.0);
+
+ // Create 2nd map to 24 fps, which should create a variable # of samples per frame
+ FrameMapper map2(&r, Fraction(fps, 1), PULLDOWN_NONE, 44100, 2, LAYOUT_STEREO);
+ map2.info.has_audio = true;
+ map2.Open();
+
+ Clip c2;
+ c2.Reader(&map2);
+ c2.Layer(1);
+ // Position 1 frame into the video, this should mis-align the audio and create situations
+ // which overlapping Frame instances have different # of samples for the Timeline.
+ c2.Position(map2.info.video_timebase.ToFloat());
+ c2.Start(0.0);
+ c2.End(10.0);
+
+ // Add clips
+ t1.AddClip(&c1);
+ t1.AddClip(&c2);
+ t1.Open();
+
+ // Loop through samples, and verify Timeline didn't mess up individual sample values
+ int previous_sample_value = 0;
+ for (int frame_index = 2; frame_index < 24; frame_index++) {
+ t1.GetFrame(frame_index);
+ for (int sample_index = 0; sample_index < t1.GetFrame(frame_index)->GetAudioSamplesCount(); sample_index++) {
+ int sample_diff = t1.GetFrame(frame_index)->GetAudioSample(0, sample_index, 1.0) - previous_sample_value;
+ if (previous_sample_value == 0) {
+ sample_diff = 2;
+ }
+
+ // Check if sample_value - previous_value == 2
+ // This should be true, because the DummyReader is added twice to the Timeline, and is overlapping
+ // This should be an ever increasing linear curve, increasing by 2 each sample on the Timeline
+ CHECK_EQUAL(2, sample_diff);
+
+ // Set previous sample value
+ previous_sample_value = t1.GetFrame(frame_index)->GetAudioSample(0, sample_index, 1.0);
+ }
+ }
+
+ // Close mapper
+ map.Close();
+ map2.Close();
+ t1.Close();
+ }
+
+ // Clean up
+ cache.Clear();
+ r.Close();
+}
\ No newline at end of file