From 2a90aa09a93c62c4b1eeb059cf13f55c12b1fa53 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Mon, 19 Apr 2021 20:38:03 -0400 Subject: [PATCH 01/71] ReaderBase: Make DisplayInfo testable - The function now takes a pointer to the output stream it will write to. The _default_ for that argument is a pointer to std::cout. - Any unit tests which wish to test the functionality can capture the output by passing an alternate buffer: std::stringstream output; reader.DisplayInfo(&output); CHECK(output.str() == "Expected output"); --- src/ReaderBase.cpp | 86 ++++++++++++++++++++++-------------------- src/ReaderBase.h | 8 ++-- tests/FFmpegReader.cpp | 46 ++++++++++++++++++---- tests/ReaderBase.cpp | 2 +- 4 files changed, 88 insertions(+), 54 deletions(-) diff --git a/src/ReaderBase.cpp b/src/ReaderBase.cpp index 127fefbe..63b5cada 100644 --- a/src/ReaderBase.cpp +++ b/src/ReaderBase.cpp @@ -28,8 +28,14 @@ * along with OpenShot Library. If not, see . */ +#include +#include +#include + #include "ReaderBase.h" +#include "Json.h" + using namespace openshot; /// Constructor for the base reader, where many things are initialized. @@ -67,49 +73,49 @@ ReaderBase::ReaderBase() } // Display file information -void ReaderBase::DisplayInfo() { - std::cout << std::fixed << std::setprecision(2) << std::boolalpha; - std::cout << "----------------------------" << std::endl; - std::cout << "----- File Information -----" << std::endl; - std::cout << "----------------------------" << std::endl; - std::cout << "--> Has Video: " << info.has_video << std::endl; - std::cout << "--> Has Audio: " << info.has_audio << std::endl; - std::cout << "--> Has Single Image: " << info.has_single_image << std::endl; - std::cout << "--> Duration: " << info.duration << " Seconds" << std::endl; - std::cout << "--> File Size: " << double(info.file_size) / 1024 / 1024 << " MB" << std::endl; - std::cout << "----------------------------" << std::endl; - std::cout << "----- Video Attributes -----" << std::endl; - std::cout << "----------------------------" << std::endl; - std::cout << "--> Width: " << info.width << std::endl; - std::cout << "--> Height: " << info.height << std::endl; - std::cout << "--> Pixel Format: " << info.pixel_format << std::endl; - std::cout << "--> Frames Per Second: " << info.fps.ToDouble() << " (" << info.fps.num << "/" << info.fps.den << ")" << std::endl; - std::cout << "--> Video Bit Rate: " << info.video_bit_rate/1000 << " kb/s" << std::endl; - std::cout << "--> Pixel Ratio: " << info.pixel_ratio.ToDouble() << " (" << info.pixel_ratio.num << "/" << info.pixel_ratio.den << ")" << std::endl; - std::cout << "--> Display Aspect Ratio: " << info.display_ratio.ToDouble() << " (" << info.display_ratio.num << "/" << info.display_ratio.den << ")" << std::endl; - std::cout << "--> Video Codec: " << info.vcodec << std::endl; - std::cout << "--> Video Length: " << info.video_length << " Frames" << std::endl; - std::cout << "--> Video Stream Index: " << info.video_stream_index << std::endl; - std::cout << "--> Video Timebase: " << info.video_timebase.ToDouble() << " (" << info.video_timebase.num << "/" << info.video_timebase.den << ")" << std::endl; - std::cout << "--> Interlaced: " << info.interlaced_frame << std::endl; - std::cout << "--> Interlaced: Top Field First: " << info.top_field_first << std::endl; - std::cout << "----------------------------" << std::endl; - std::cout << "----- Audio Attributes -----" << std::endl; - std::cout << "----------------------------" << std::endl; - std::cout << "--> Audio Codec: " << info.acodec << std::endl; - std::cout << "--> Audio Bit Rate: " << info.audio_bit_rate/1000 << " kb/s" << std::endl; - std::cout << "--> Sample Rate: " << info.sample_rate << " Hz" << std::endl; - std::cout << "--> # of Channels: " << info.channels << std::endl; - std::cout << "--> Channel Layout: " << info.channel_layout << std::endl; - std::cout << "--> Audio Stream Index: " << info.audio_stream_index << std::endl; - std::cout << "--> Audio Timebase: " << info.audio_timebase.ToDouble() << " (" << info.audio_timebase.num << "/" << info.audio_timebase.den << ")" << std::endl; - std::cout << "----------------------------" << std::endl; - std::cout << "--------- Metadata ---------" << std::endl; - std::cout << "----------------------------" << std::endl; +void ReaderBase::DisplayInfo(std::ostream* out) { + *out << std::fixed << std::setprecision(2) << std::boolalpha; + *out << "----------------------------" << std::endl; + *out << "----- File Information -----" << std::endl; + *out << "----------------------------" << std::endl; + *out << "--> Has Video: " << info.has_video << std::endl; + *out << "--> Has Audio: " << info.has_audio << std::endl; + *out << "--> Has Single Image: " << info.has_single_image << std::endl; + *out << "--> Duration: " << info.duration << " Seconds" << std::endl; + *out << "--> File Size: " << double(info.file_size) / 1024 / 1024 << " MB" << std::endl; + *out << "----------------------------" << std::endl; + *out << "----- Video Attributes -----" << std::endl; + *out << "----------------------------" << std::endl; + *out << "--> Width: " << info.width << std::endl; + *out << "--> Height: " << info.height << std::endl; + *out << "--> Pixel Format: " << info.pixel_format << std::endl; + *out << "--> Frames Per Second: " << info.fps.ToDouble() << " (" << info.fps.num << "/" << info.fps.den << ")" << std::endl; + *out << "--> Video Bit Rate: " << info.video_bit_rate/1000 << " kb/s" << std::endl; + *out << "--> Pixel Ratio: " << info.pixel_ratio.ToDouble() << " (" << info.pixel_ratio.num << "/" << info.pixel_ratio.den << ")" << std::endl; + *out << "--> Display Aspect Ratio: " << info.display_ratio.ToDouble() << " (" << info.display_ratio.num << "/" << info.display_ratio.den << ")" << std::endl; + *out << "--> Video Codec: " << info.vcodec << std::endl; + *out << "--> Video Length: " << info.video_length << " Frames" << std::endl; + *out << "--> Video Stream Index: " << info.video_stream_index << std::endl; + *out << "--> Video Timebase: " << info.video_timebase.ToDouble() << " (" << info.video_timebase.num << "/" << info.video_timebase.den << ")" << std::endl; + *out << "--> Interlaced: " << info.interlaced_frame << std::endl; + *out << "--> Interlaced: Top Field First: " << info.top_field_first << std::endl; + *out << "----------------------------" << std::endl; + *out << "----- Audio Attributes -----" << std::endl; + *out << "----------------------------" << std::endl; + *out << "--> Audio Codec: " << info.acodec << std::endl; + *out << "--> Audio Bit Rate: " << info.audio_bit_rate/1000 << " kb/s" << std::endl; + *out << "--> Sample Rate: " << info.sample_rate << " Hz" << std::endl; + *out << "--> # of Channels: " << info.channels << std::endl; + *out << "--> Channel Layout: " << info.channel_layout << std::endl; + *out << "--> Audio Stream Index: " << info.audio_stream_index << std::endl; + *out << "--> Audio Timebase: " << info.audio_timebase.ToDouble() << " (" << info.audio_timebase.num << "/" << info.audio_timebase.den << ")" << std::endl; + *out << "----------------------------" << std::endl; + *out << "--------- Metadata ---------" << std::endl; + *out << "----------------------------" << std::endl; // Iterate through metadata for (auto it : info.metadata) - std::cout << "--> " << it.first << ": " << it.second << std::endl; + *out << "--> " << it.first << ": " << it.second << std::endl; } // Generate Json::Value for this object diff --git a/src/ReaderBase.h b/src/ReaderBase.h index 7b7847a8..4d2d3afa 100644 --- a/src/ReaderBase.h +++ b/src/ReaderBase.h @@ -31,11 +31,9 @@ #ifndef OPENSHOT_READER_BASE_H #define OPENSHOT_READER_BASE_H -#include -#include #include -#include -#include +#include + #include "CacheMemory.h" #include "ChannelLayouts.h" #include "ClipBase.h" @@ -120,7 +118,7 @@ namespace openshot virtual void Close() = 0; /// Display file information in the standard output stream (stdout) - void DisplayInfo(); + void DisplayInfo(std::ostream* out=&std::cout); /// Get the cache object used by this reader (note: not all readers use cache) virtual openshot::CacheBase* GetCache() = 0; diff --git a/tests/FFmpegReader.cpp b/tests/FFmpegReader.cpp index 217d601c..7911487a 100644 --- a/tests/FFmpegReader.cpp +++ b/tests/FFmpegReader.cpp @@ -39,7 +39,6 @@ #include "Timeline.h" #include "Json.h" -using namespace std; using namespace openshot; TEST_CASE( "Invalid_Path", "[libopenshot][ffmpegreader]" ) @@ -51,7 +50,7 @@ TEST_CASE( "Invalid_Path", "[libopenshot][ffmpegreader]" ) TEST_CASE( "GetFrame_Before_Opening", "[libopenshot][ffmpegreader]" ) { // Create a reader - stringstream path; + std::stringstream path; path << TEST_MEDIA_PATH << "piano.wav"; FFmpegReader r(path.str()); @@ -62,7 +61,7 @@ TEST_CASE( "GetFrame_Before_Opening", "[libopenshot][ffmpegreader]" ) TEST_CASE( "Check_Audio_File", "[libopenshot][ffmpegreader]" ) { // Create a reader - stringstream path; + std::stringstream path; path << TEST_MEDIA_PATH << "piano.wav"; FFmpegReader r(path.str()); r.Open(); @@ -92,7 +91,7 @@ TEST_CASE( "Check_Audio_File", "[libopenshot][ffmpegreader]" ) TEST_CASE( "Check_Video_File", "[libopenshot][ffmpegreader]" ) { // Create a reader - stringstream path; + std::stringstream path; path << TEST_MEDIA_PATH << "test.mp4"; FFmpegReader r(path.str()); r.Open(); @@ -138,7 +137,7 @@ TEST_CASE( "Check_Video_File", "[libopenshot][ffmpegreader]" ) TEST_CASE( "Seek", "[libopenshot][ffmpegreader]" ) { // Create a reader - stringstream path; + std::stringstream path; path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4"; FFmpegReader r(path.str()); r.Open(); @@ -195,7 +194,7 @@ TEST_CASE( "Seek", "[libopenshot][ffmpegreader]" ) TEST_CASE( "Frame_Rate", "[libopenshot][ffmpegreader]" ) { // Create a reader - stringstream path; + std::stringstream path; path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4"; FFmpegReader r(path.str()); r.Open(); @@ -211,7 +210,7 @@ TEST_CASE( "Frame_Rate", "[libopenshot][ffmpegreader]" ) TEST_CASE( "Multiple_Open_and_Close", "[libopenshot][ffmpegreader]" ) { // Create a reader - stringstream path; + std::stringstream path; path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4"; FFmpegReader r(path.str()); r.Open(); @@ -247,7 +246,7 @@ TEST_CASE( "Multiple_Open_and_Close", "[libopenshot][ffmpegreader]" ) TEST_CASE( "verify parent Timeline", "[libopenshot][ffmpegreader]" ) { // Create a reader - stringstream path; + std::stringstream path; path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4"; FFmpegReader r(path.str()); r.Open(); @@ -273,3 +272,34 @@ TEST_CASE( "verify parent Timeline", "[libopenshot][ffmpegreader]" ) CHECK(r.GetFrame(1)->GetImage()->width() == 640); CHECK(r.GetFrame(1)->GetImage()->height() == 360); } + +TEST_CASE( "DisplayInfo", "[libopenshot][clip]" ) +{ + // Create a reader + std::stringstream path; + path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4"; + FFmpegReader r(path.str()); + r.Open(); + + std::string expected(R"(---------------------------- +----- File Information ----- +---------------------------- +--> Has Video: true +--> Has Audio: true +--> Has Single Image: false +--> Duration: 51.95 Seconds +--> File Size: 7.26 MB +---------------------------- +----- Video Attributes ----- +---------------------------- +--> Width: 1280 +--> Height: 720)"); + + // Store the DisplayInfo() text in 'output' + std::stringstream output; + r.DisplayInfo(&output); + + // Compare a [0, expected.size()) substring of output to expected + auto compare_value = output.str().compare(0, expected.size(), expected); + CHECK(compare_value == 0); +} diff --git a/tests/ReaderBase.cpp b/tests/ReaderBase.cpp index 94880e00..d071d36d 100644 --- a/tests/ReaderBase.cpp +++ b/tests/ReaderBase.cpp @@ -42,7 +42,7 @@ using namespace openshot; // Since it is not possible to instantiate an abstract class, this test creates // a new derived class, in order to test the base class file info struct. -TEST_CASE( "ReaderBase_Derived_Class", "[libopenshot][readerbase]" ) +TEST_CASE( "derived class", "[libopenshot][readerbase]" ) { // Create a new derived class from type ReaderBase class TestReader : public ReaderBase From 9d79b394c4835c57d372a695efa4cbe974f44767 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Mon, 19 Apr 2021 20:57:47 -0400 Subject: [PATCH 02/71] Make remaining print functions testable --- src/Color.cpp | 2 ++ src/EffectBase.cpp | 23 ++++++++------ src/EffectBase.h | 2 +- src/FrameMapper.cpp | 18 +++++++++-- src/FrameMapper.h | 4 +-- src/KeyFrame.cpp | 22 ++++++------- src/KeyFrame.h | 6 ++-- src/WriterBase.cpp | 77 +++++++++++++++++++++++---------------------- src/WriterBase.h | 4 +-- 9 files changed, 89 insertions(+), 69 deletions(-) diff --git a/src/Color.cpp b/src/Color.cpp index e848f1f7..c877cbd4 100644 --- a/src/Color.cpp +++ b/src/Color.cpp @@ -28,6 +28,8 @@ * along with OpenShot Library. If not, see . */ +#include + #include "Color.h" #include "Exceptions.h" diff --git a/src/EffectBase.cpp b/src/EffectBase.cpp index b75a0820..a475c7eb 100644 --- a/src/EffectBase.cpp +++ b/src/EffectBase.cpp @@ -28,6 +28,9 @@ * along with OpenShot Library. If not, see . */ +#include +#include + #include "EffectBase.h" #include "Exceptions.h" @@ -57,16 +60,16 @@ void EffectBase::InitEffectInfo() } // Display file information -void EffectBase::DisplayInfo() { - std::cout << std::fixed << std::setprecision(2) << std::boolalpha; - std::cout << "----------------------------" << std::endl; - std::cout << "----- Effect Information -----" << std::endl; - std::cout << "----------------------------" << std::endl; - std::cout << "--> Name: " << info.name << std::endl; - std::cout << "--> Description: " << info.description << std::endl; - std::cout << "--> Has Video: " << info.has_video << std::endl; - std::cout << "--> Has Audio: " << info.has_audio << std::endl; - std::cout << "----------------------------" << std::endl; +void EffectBase::DisplayInfo(std::ostream* out) { + *out << std::fixed << std::setprecision(2) << std::boolalpha; + *out << "----------------------------" << std::endl; + *out << "----- Effect Information -----" << std::endl; + *out << "----------------------------" << std::endl; + *out << "--> Name: " << info.name << std::endl; + *out << "--> Description: " << info.description << std::endl; + *out << "--> Has Video: " << info.has_video << std::endl; + *out << "--> Has Audio: " << info.has_audio << std::endl; + *out << "----------------------------" << std::endl; } // Constrain a color value from 0 to 255 diff --git a/src/EffectBase.h b/src/EffectBase.h index dc78a7c9..05936500 100644 --- a/src/EffectBase.h +++ b/src/EffectBase.h @@ -87,7 +87,7 @@ namespace openshot EffectInfoStruct info; /// Display effect information in the standard output stream (stdout) - void DisplayInfo(); + void DisplayInfo(std::ostream* out=&std::cout); /// Constrain a color value from 0 to 255 int constrain(int color_value); diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp index 0e3b0272..832f794a 100644 --- a/src/FrameMapper.cpp +++ b/src/FrameMapper.cpp @@ -28,6 +28,10 @@ * along with OpenShot Library. If not, see . */ +#include +#include +#include + #include "FrameMapper.h" #include "Exceptions.h" #include "Clip.h" @@ -611,7 +615,7 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame) return final_cache.GetFrame(requested_frame); } -void FrameMapper::PrintMapping() +void FrameMapper::PrintMapping(std::ostream* out) { // Check if mappings are dirty (and need to be recalculated) if (is_dirty) @@ -622,8 +626,16 @@ void FrameMapper::PrintMapping() for (float map = 1; map <= frames.size(); map++) { MappedFrame frame = frames[map - 1]; - cout << "Target frame #: " << map << " mapped to original frame #:\t(" << frame.Odd.Frame << " odd, " << frame.Even.Frame << " even)" << endl; - cout << " - Audio samples mapped to frame " << frame.Samples.frame_start << ":" << frame.Samples.sample_start << " to frame " << frame.Samples.frame_end << ":" << frame.Samples.sample_end << endl; + *out << "Target frame #: " << map + << " mapped to original frame #:\t(" + << frame.Odd.Frame << " odd, " + << frame.Even.Frame << " even)" << std::endl; + + *out << " - Audio samples mapped to frame " + << frame.Samples.frame_start << ":" + << frame.Samples.sample_start << " to frame " + << frame.Samples.frame_end << ":" + << frame.Samples.sample_end << endl; } } diff --git a/src/FrameMapper.h b/src/FrameMapper.h index 62615cfb..813c644f 100644 --- a/src/FrameMapper.h +++ b/src/FrameMapper.h @@ -33,9 +33,9 @@ #include #include -#include #include #include + #include "CacheMemory.h" #include "ReaderBase.h" #include "Frame.h" @@ -211,7 +211,7 @@ namespace openshot void Open() override; /// Print all of the original frames and which new frames they map to - void PrintMapping(); + void PrintMapping(std::ostream* out=&std::cout); /// Get the current reader ReaderBase* Reader(); diff --git a/src/KeyFrame.cpp b/src/KeyFrame.cpp index 4107f2de..ef8b045e 100644 --- a/src/KeyFrame.cpp +++ b/src/KeyFrame.cpp @@ -28,9 +28,6 @@ * along with OpenShot Library. If not, see . */ -#include "KeyFrame.h" -#include "Exceptions.h" - #include #include #include @@ -38,6 +35,9 @@ #include // For std::cout #include // For std::setprecision +#include "KeyFrame.h" +#include "Exceptions.h" + using namespace std; using namespace openshot; @@ -559,21 +559,21 @@ void Keyframe::UpdatePoint(int64_t index, Point p) { AddPoint(p); } -void Keyframe::PrintPoints() const { - cout << fixed << setprecision(4); +void Keyframe::PrintPoints(std::ostream* out) const { + *out << std::fixed << std::setprecision(4); for (std::vector::const_iterator it = Points.begin(); it != Points.end(); it++) { Point p = *it; - cout << p.co.X << "\t" << p.co.Y << endl; + *out << p.co.X << "\t" << p.co.Y << std::endl; } } -void Keyframe::PrintValues() const { - cout << fixed << setprecision(4); - cout << "Frame Number (X)\tValue (Y)\tIs Increasing\tRepeat Numerator\tRepeat Denominator\tDelta (Y Difference)\n"; +void Keyframe::PrintValues(std::ostream* out) const { + *out << std::fixed << std::setprecision(4); + *out << "Frame Number (X)\tValue (Y)\tIs Increasing\tRepeat Numerator\tRepeat Denominator\tDelta (Y Difference)\n"; for (int64_t i = 1; i < GetLength(); ++i) { - cout << i << "\t" << GetValue(i) << "\t" << IsIncreasing(i) << "\t" ; - cout << GetRepeatFraction(i).num << "\t" << GetRepeatFraction(i).den << "\t" << GetDelta(i) << "\n"; + *out << i << "\t" << GetValue(i) << "\t" << IsIncreasing(i) << "\t" ; + *out << GetRepeatFraction(i).num << "\t" << GetRepeatFraction(i).den << "\t" << GetDelta(i) << "\n"; } } diff --git a/src/KeyFrame.h b/src/KeyFrame.h index 6da34cac..45624dd2 100644 --- a/src/KeyFrame.h +++ b/src/KeyFrame.h @@ -31,7 +31,7 @@ #ifndef OPENSHOT_KEYFRAME_H #define OPENSHOT_KEYFRAME_H -#include +#include #include #include "Fraction.h" @@ -160,10 +160,10 @@ namespace openshot { void UpdatePoint(int64_t index, Point p); /// Print a list of points - void PrintPoints() const; + void PrintPoints(std::ostream* out=&std::cout) const; /// Print just the Y value of the point's primary coordinate - void PrintValues() const; + void PrintValues(std::ostream* out=&std::cout) const; }; diff --git a/src/WriterBase.cpp b/src/WriterBase.cpp index fff93988..8faab981 100644 --- a/src/WriterBase.cpp +++ b/src/WriterBase.cpp @@ -28,6 +28,9 @@ * along with OpenShot Library. If not, see . */ +#include +#include + #include "WriterBase.h" #include "Exceptions.h" @@ -100,43 +103,43 @@ void WriterBase::CopyReaderInfo(ReaderBase* reader) } // Display file information -void WriterBase::DisplayInfo() { - std::cout << std::fixed << std::setprecision(2) << std::boolalpha; - std::cout << "----------------------------" << std::endl; - std::cout << "----- File Information -----" << std::endl; - std::cout << "----------------------------" << std::endl; - std::cout << "--> Has Video: " << info.has_video << std::endl; - std::cout << "--> Has Audio: " << info.has_audio << std::endl; - std::cout << "--> Has Single Image: " << info.has_single_image << std::endl; - std::cout << "--> Duration: " << info.duration << " Seconds" << std::endl; - std::cout << "--> File Size: " << double(info.file_size) / 1024 / 1024 << " MB" << std::endl; - std::cout << "----------------------------" << std::endl; - std::cout << "----- Video Attributes -----" << std::endl; - std::cout << "----------------------------" << std::endl; - std::cout << "--> Width: " << info.width << std::endl; - std::cout << "--> Height: " << info.height << std::endl; - std::cout << "--> Pixel Format: " << info.pixel_format << std::endl; - std::cout << "--> Frames Per Second: " << info.fps.ToDouble() << " (" << info.fps.num << "/" << info.fps.den << ")" << std::endl; - std::cout << "--> Video Bit Rate: " << info.video_bit_rate/1000 << " kb/s" << std::endl; - std::cout << "--> Pixel Ratio: " << info.pixel_ratio.ToDouble() << " (" << info.pixel_ratio.num << "/" << info.pixel_ratio.den << ")" << std::endl; - std::cout << "--> Display Aspect Ratio: " << info.display_ratio.ToDouble() << " (" << info.display_ratio.num << "/" << info.display_ratio.den << ")" << std::endl; - std::cout << "--> Video Codec: " << info.vcodec << std::endl; - std::cout << "--> Video Length: " << info.video_length << " Frames" << std::endl; - std::cout << "--> Video Stream Index: " << info.video_stream_index << std::endl; - std::cout << "--> Video Timebase: " << info.video_timebase.ToDouble() << " (" << info.video_timebase.num << "/" << info.video_timebase.den << ")" << std::endl; - std::cout << "--> Interlaced: " << info.interlaced_frame << std::endl; - std::cout << "--> Interlaced: Top Field First: " << info.top_field_first << std::endl; - std::cout << "----------------------------" << std::endl; - std::cout << "----- Audio Attributes -----" << std::endl; - std::cout << "----------------------------" << std::endl; - std::cout << "--> Audio Codec: " << info.acodec << std::endl; - std::cout << "--> Audio Bit Rate: " << info.audio_bit_rate/1000 << " kb/s" << std::endl; - std::cout << "--> Sample Rate: " << info.sample_rate << " Hz" << std::endl; - std::cout << "--> # of Channels: " << info.channels << std::endl; - std::cout << "--> Channel Layout: " << info.channel_layout << std::endl; - std::cout << "--> Audio Stream Index: " << info.audio_stream_index << std::endl; - std::cout << "--> Audio Timebase: " << info.audio_timebase.ToDouble() << " (" << info.audio_timebase.num << "/" << info.audio_timebase.den << ")" << std::endl; - std::cout << "----------------------------" << std::endl; +void WriterBase::DisplayInfo(std::ostream* out) { + *out << std::fixed << std::setprecision(2) << std::boolalpha; + *out << "----------------------------" << std::endl; + *out << "----- File Information -----" << std::endl; + *out << "----------------------------" << std::endl; + *out << "--> Has Video: " << info.has_video << std::endl; + *out << "--> Has Audio: " << info.has_audio << std::endl; + *out << "--> Has Single Image: " << info.has_single_image << std::endl; + *out << "--> Duration: " << info.duration << " Seconds" << std::endl; + *out << "--> File Size: " << double(info.file_size) / 1024 / 1024 << " MB" << std::endl; + *out << "----------------------------" << std::endl; + *out << "----- Video Attributes -----" << std::endl; + *out << "----------------------------" << std::endl; + *out << "--> Width: " << info.width << std::endl; + *out << "--> Height: " << info.height << std::endl; + *out << "--> Pixel Format: " << info.pixel_format << std::endl; + *out << "--> Frames Per Second: " << info.fps.ToDouble() << " (" << info.fps.num << "/" << info.fps.den << ")" << std::endl; + *out << "--> Video Bit Rate: " << info.video_bit_rate/1000 << " kb/s" << std::endl; + *out << "--> Pixel Ratio: " << info.pixel_ratio.ToDouble() << " (" << info.pixel_ratio.num << "/" << info.pixel_ratio.den << ")" << std::endl; + *out << "--> Display Aspect Ratio: " << info.display_ratio.ToDouble() << " (" << info.display_ratio.num << "/" << info.display_ratio.den << ")" << std::endl; + *out << "--> Video Codec: " << info.vcodec << std::endl; + *out << "--> Video Length: " << info.video_length << " Frames" << std::endl; + *out << "--> Video Stream Index: " << info.video_stream_index << std::endl; + *out << "--> Video Timebase: " << info.video_timebase.ToDouble() << " (" << info.video_timebase.num << "/" << info.video_timebase.den << ")" << std::endl; + *out << "--> Interlaced: " << info.interlaced_frame << std::endl; + *out << "--> Interlaced: Top Field First: " << info.top_field_first << std::endl; + *out << "----------------------------" << std::endl; + *out << "----- Audio Attributes -----" << std::endl; + *out << "----------------------------" << std::endl; + *out << "--> Audio Codec: " << info.acodec << std::endl; + *out << "--> Audio Bit Rate: " << info.audio_bit_rate/1000 << " kb/s" << std::endl; + *out << "--> Sample Rate: " << info.sample_rate << " Hz" << std::endl; + *out << "--> # of Channels: " << info.channels << std::endl; + *out << "--> Channel Layout: " << info.channel_layout << std::endl; + *out << "--> Audio Stream Index: " << info.audio_stream_index << std::endl; + *out << "--> Audio Timebase: " << info.audio_timebase.ToDouble() << " (" << info.audio_timebase.num << "/" << info.audio_timebase.den << ")" << std::endl; + *out << "----------------------------" << std::endl; } // Generate JSON string of this object diff --git a/src/WriterBase.h b/src/WriterBase.h index d18f329d..3939ca6e 100644 --- a/src/WriterBase.h +++ b/src/WriterBase.h @@ -32,7 +32,7 @@ #define OPENSHOT_WRITER_BASE_H #include -#include + #include "ChannelLayouts.h" #include "Fraction.h" #include "Frame.h" @@ -113,7 +113,7 @@ namespace openshot void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object /// Display file information in the standard output stream (stdout) - void DisplayInfo(); + void DisplayInfo(std::ostream* out=&std::cout); /// Open the writer (and start initializing streams) virtual void Open() = 0; From aac42a7a0c108c46aa8883eb2f94cd434dc5ecd3 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Wed, 5 May 2021 12:51:48 -0400 Subject: [PATCH 03/71] tests/FrameMapper: Add PrintMapping() test --- tests/FrameMapper.cpp | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/tests/FrameMapper.cpp b/tests/FrameMapper.cpp index 20ca76e8..fc1f70d4 100644 --- a/tests/FrameMapper.cpp +++ b/tests/FrameMapper.cpp @@ -191,7 +191,7 @@ TEST_CASE( "30_fps_to_24_fps_Pulldown_None", "[libopenshot][framemapper]" ) TEST_CASE( "resample_audio_48000_to_41000", "[libopenshot][framemapper]" ) { - // Create a reader: 24 fps, 2 channels, 48000 sample rate + // Create a reader std::stringstream path; path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4"; FFmpegReader r(path.str()); @@ -619,6 +619,42 @@ TEST_CASE( "Distribute samples", "[libopenshot][framemapper]" ) { } // for rates } +TEST_CASE( "PrintMapping", "[libopenshot][framemapper]" ) +{ + const std::string expected( + R"(Target frame #: 1 mapped to original frame #: (1 odd, 1 even) + - Audio samples mapped to frame 1:0 to frame 1:1599 +Target frame #: 2 mapped to original frame #: (2 odd, 2 even) + - Audio samples mapped to frame 1:1600 to frame 2:1199 +Target frame #: 3 mapped to original frame #: (2 odd, 3 even) + - Audio samples mapped to frame 2:1200 to frame 3:799 +Target frame #: 4 mapped to original frame #: (3 odd, 4 even) + - Audio samples mapped to frame 3:800 to frame 4:399 +Target frame #: 5 mapped to original frame #: (4 odd, 4 even) + - Audio samples mapped to frame 4:400 to frame 4:1999 +Target frame #: 6 mapped to original frame #: (5 odd, 5 even) + - Audio samples mapped to frame 5:0 to frame 5:1599 +Target frame #: 7 mapped to original frame #: (6 odd, 6 even) + - Audio samples mapped to frame 5:1600 to frame 6:1199 +Target frame #: 8 mapped to original frame #: (6 odd, 7 even) + - Audio samples mapped to frame 6:1200 to frame 7:799 +Target frame #: 9 mapped to original frame #: (7 odd, 8 even) + - Audio samples mapped to frame 7:800 to frame 8:399 +Target frame #: 10 mapped to original frame #: (8 odd, 8 even) + - Audio samples mapped to frame 8:400 to frame 8:1999)"); + + DummyReader r(Fraction(24,1), 720, 480, 48000, 2, 5.0); + // Create mapping 24 fps and 30 fps + FrameMapper mapping( + &r, Fraction(30, 1), PULLDOWN_CLASSIC, 48000, 2, LAYOUT_STEREO); + std::stringstream mapping_out; + mapping.PrintMapping(&mapping_out); + + // Compare a [0, expected.size()) substring of output to expected + auto compare_value = mapping_out.str().compare(0, expected.size(), expected); + CHECK(compare_value == 0); +} + TEST_CASE( "Json", "[libopenshot][framemapper]" ) { DummyReader r(Fraction(30,1), 1280, 720, 48000, 2, 5.0); From 528919027cad09196514b887027fc1a002d3e000 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Fri, 7 May 2021 23:05:16 -0400 Subject: [PATCH 04/71] Code formatting --- src/FrameMapper.cpp | 115 +++++++++++++++++++++++++++++------------ tests/FFmpegReader.cpp | 2 +- 2 files changed, 82 insertions(+), 35 deletions(-) diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp index 832f794a..dde65ddf 100644 --- a/src/FrameMapper.cpp +++ b/src/FrameMapper.cpp @@ -521,7 +521,7 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame) copy_samples.sample_end += EXTRA_INPUT_SAMPLES; int samples_per_end_frame = Frame::GetSamplesPerFrame(copy_samples.frame_end, original, - reader->info.sample_rate, reader->info.channels); + reader->info.sample_rate, reader->info.channels); if (copy_samples.sample_end >= samples_per_end_frame) { // check for wrapping @@ -537,7 +537,7 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame) copy_samples.sample_start += EXTRA_INPUT_SAMPLES; int samples_per_start_frame = Frame::GetSamplesPerFrame(copy_samples.frame_start, original, - reader->info.sample_rate, reader->info.channels); + reader->info.sample_rate, reader->info.channels); if (copy_samples.sample_start >= samples_per_start_frame) { // check for wrapping @@ -628,14 +628,14 @@ void FrameMapper::PrintMapping(std::ostream* out) MappedFrame frame = frames[map - 1]; *out << "Target frame #: " << map << " mapped to original frame #:\t(" - << frame.Odd.Frame << " odd, " - << frame.Even.Frame << " even)" << std::endl; + << frame.Odd.Frame << " odd, " + << frame.Even.Frame << " even)" << std::endl; *out << " - Audio samples mapped to frame " << frame.Samples.frame_start << ":" - << frame.Samples.sample_start << " to frame " - << frame.Samples.frame_end << ":" - << frame.Samples.sample_end << endl; + << frame.Samples.sample_start << " to frame " + << frame.Samples.frame_end << ":" + << frame.Samples.sample_end << endl; } } @@ -745,7 +745,14 @@ void FrameMapper::SetJsonValue(const Json::Value root) { // Change frame rate or audio mapping details void FrameMapper::ChangeMapping(Fraction target_fps, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout) { - ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ChangeMapping", "target_fps.num", target_fps.num, "target_fps.den", target_fps.den, "target_pulldown", target_pulldown, "target_sample_rate", target_sample_rate, "target_channels", target_channels, "target_channel_layout", target_channel_layout); + ZmqLogger::Instance()->AppendDebugMethod( + "FrameMapper::ChangeMapping", + "target_fps.num", target_fps.num, + "target_fps.den", target_fps.den, + "target_pulldown", target_pulldown, + "target_sample_rate", target_sample_rate, + "target_channels", target_channels, + "target_channel_layout", target_channel_layout); // Mark as dirty is_dirty = true; @@ -791,7 +798,13 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig int samples_in_frame = frame->GetAudioSamplesCount(); ChannelLayout channel_layout_in_frame = frame->ChannelsLayout(); - ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio", "frame->number", frame->number, "original_frame_number", original_frame_number, "channels_in_frame", channels_in_frame, "samples_in_frame", samples_in_frame, "sample_rate_in_frame", sample_rate_in_frame); + ZmqLogger::Instance()->AppendDebugMethod( + "FrameMapper::ResampleMappedAudio", + "frame->number", frame->number, + "original_frame_number", original_frame_number, + "channels_in_frame", channels_in_frame, + "samples_in_frame", samples_in_frame, + "sample_rate_in_frame", sample_rate_in_frame); // Get audio sample array float* frame_samples_float = NULL; @@ -827,7 +840,14 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig delete[] frame_samples_float; frame_samples_float = NULL; - ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio (got sample data from frame)", "frame->number", frame->number, "total_frame_samples", total_frame_samples, "target channels", info.channels, "channels_in_frame", channels_in_frame, "target sample_rate", info.sample_rate, "samples_in_frame", samples_in_frame); + ZmqLogger::Instance()->AppendDebugMethod( + "FrameMapper::ResampleMappedAudio (got sample data from frame)", + "frame->number", frame->number, + "total_frame_samples", total_frame_samples, + "target channels", info.channels, + "channels_in_frame", channels_in_frame, + "target sample_rate", info.sample_rate, + "samples_in_frame", samples_in_frame); // Create input frame (and allocate arrays) @@ -835,8 +855,10 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig AV_RESET_FRAME(audio_frame); audio_frame->nb_samples = total_frame_samples / channels_in_frame; - int error_code = avcodec_fill_audio_frame(audio_frame, channels_in_frame, AV_SAMPLE_FMT_S16, (uint8_t *) frame_samples, - audio_frame->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * channels_in_frame, 1); + int buf_size = audio_frame->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * channels_in_frame; + int error_code = avcodec_fill_audio_frame( + audio_frame, channels_in_frame, AV_SAMPLE_FMT_S16, + (uint8_t *) frame_samples, buf_size, 1); if (error_code < 0) { @@ -847,7 +869,14 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig // Update total samples & input frame size (due to bigger or smaller data types) total_frame_samples = Frame::GetSamplesPerFrame(AdjustFrameNumber(frame->number), target, info.sample_rate, info.channels); - ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio (adjust # of samples)", "total_frame_samples", total_frame_samples, "info.sample_rate", info.sample_rate, "sample_rate_in_frame", sample_rate_in_frame, "info.channels", info.channels, "channels_in_frame", channels_in_frame, "original_frame_number", original_frame_number); + ZmqLogger::Instance()->AppendDebugMethod( + "FrameMapper::ResampleMappedAudio (adjust # of samples)", + "total_frame_samples", total_frame_samples, + "info.sample_rate", info.sample_rate, + "sample_rate_in_frame", sample_rate_in_frame, + "info.channels", info.channels, + "channels_in_frame", channels_in_frame, + "original_frame_number", original_frame_number); // Create output frame (and allocate arrays) AVFrame *audio_converted = AV_ALLOCATE_FRAME(); @@ -855,32 +884,39 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig audio_converted->nb_samples = total_frame_samples; av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, total_frame_samples, AV_SAMPLE_FMT_S16, 0); - ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio (preparing for resample)", "in_sample_fmt", AV_SAMPLE_FMT_S16, "out_sample_fmt", AV_SAMPLE_FMT_S16, "in_sample_rate", sample_rate_in_frame, "out_sample_rate", info.sample_rate, "in_channels", channels_in_frame, "out_channels", info.channels); + ZmqLogger::Instance()->AppendDebugMethod( + "FrameMapper::ResampleMappedAudio (preparing for resample)", + "in_sample_fmt", AV_SAMPLE_FMT_S16, + "out_sample_fmt", AV_SAMPLE_FMT_S16, + "in_sample_rate", sample_rate_in_frame, + "out_sample_rate", info.sample_rate, + "in_channels", channels_in_frame, + "out_channels", info.channels); int nb_samples = 0; // setup resample context if (!avr) { avr = SWR_ALLOC(); - av_opt_set_int(avr, "in_channel_layout", channel_layout_in_frame, 0); - av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0); - av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); - av_opt_set_int(avr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); - av_opt_set_int(avr, "in_sample_rate", sample_rate_in_frame, 0); - av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0); - av_opt_set_int(avr, "in_channels", channels_in_frame, 0); - av_opt_set_int(avr, "out_channels", info.channels, 0); + av_opt_set_int(avr, "in_channel_layout", channel_layout_in_frame, 0); + av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0); + av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); + av_opt_set_int(avr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); + av_opt_set_int(avr, "in_sample_rate", sample_rate_in_frame, 0); + av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0); + av_opt_set_int(avr, "in_channels", channels_in_frame, 0); + av_opt_set_int(avr, "out_channels", info.channels, 0); SWR_INIT(avr); } // Convert audio samples - nb_samples = SWR_CONVERT(avr, // audio resample context - audio_converted->data, // output data pointers - audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown) - audio_converted->nb_samples, // maximum number of samples that the output buffer can hold - audio_frame->data, // input data pointers - audio_frame->linesize[0], // input plane size, in bytes (0 if unknown) - audio_frame->nb_samples); // number of input samples to convert + nb_samples = SWR_CONVERT(avr, // audio resample context + audio_converted->data, // output data pointers + audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown) + audio_converted->nb_samples, // maximum number of samples that the output buffer can hold + audio_frame->data, // input data pointers + audio_frame->linesize[0], // input plane size, in bytes (0 if unknown) + audio_frame->nb_samples); // number of input samples to convert // Create a new array (to hold all resampled S16 audio samples) int16_t* resampled_samples = new int16_t[(nb_samples * info.channels)]; @@ -899,7 +935,14 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig int channel_buffer_size = nb_samples; frame->ResizeAudio(info.channels, channel_buffer_size, info.sample_rate, info.channel_layout); - ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio (Audio successfully resampled)", "nb_samples", nb_samples, "total_frame_samples", total_frame_samples, "info.sample_rate", info.sample_rate, "channels_in_frame", channels_in_frame, "info.channels", info.channels, "info.channel_layout", info.channel_layout); + ZmqLogger::Instance()->AppendDebugMethod( + "FrameMapper::ResampleMappedAudio (Audio successfully resampled)", + "nb_samples", nb_samples, + "total_frame_samples", total_frame_samples, + "info.sample_rate", info.sample_rate, + "channels_in_frame", channels_in_frame, + "info.channels", info.channels, + "info.channel_layout", info.channel_layout); // Array of floats (to hold samples for each channel) float *channel_buffer = new float[channel_buffer_size]; @@ -939,7 +982,10 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig // Add samples to frame for this channel frame->AddAudio(true, channel_filter, 0, channel_buffer, position, 1.0f); - ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio (Add audio to channel)", "number of samples", position, "channel_filter", channel_filter); + ZmqLogger::Instance()->AppendDebugMethod( + "FrameMapper::ResampleMappedAudio (Add audio to channel)", + "number of samples", position, + "channel_filter", channel_filter); } // Update frame's audio meta data @@ -967,9 +1013,10 @@ int64_t FrameMapper::AdjustFrameNumber(int64_t clip_frame_number) { start = parent->Start(); } - // Adjust start frame and position based on parent clip. This prevents ensures the same - // frame # is used by mapped readers and clips, when calculating samples per frame. Thus, - // this prevents gaps and mismatches in # of samples. + // Adjust start frame and position based on parent clip. + // This ensures the same frame # is used by mapped readers and clips, + // when calculating samples per frame. + // Thus, this prevents gaps and mismatches in # of samples. int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1; int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1; int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame; diff --git a/tests/FFmpegReader.cpp b/tests/FFmpegReader.cpp index 7911487a..747ae0e2 100644 --- a/tests/FFmpegReader.cpp +++ b/tests/FFmpegReader.cpp @@ -273,7 +273,7 @@ TEST_CASE( "verify parent Timeline", "[libopenshot][ffmpegreader]" ) CHECK(r.GetFrame(1)->GetImage()->height() == 360); } -TEST_CASE( "DisplayInfo", "[libopenshot][clip]" ) +TEST_CASE( "DisplayInfo", "[libopenshot][ffmpegreader]" ) { // Create a reader std::stringstream path; From 70ea2659d8bdcd20d9bd37a651f62f0ddf3b2fd5 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Fri, 7 May 2021 23:27:57 -0400 Subject: [PATCH 05/71] tests/FFmpegWriter: Add DisplayInfo test --- tests/FFmpegWriter.cpp | 77 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 75 insertions(+), 2 deletions(-) diff --git a/tests/FFmpegWriter.cpp b/tests/FFmpegWriter.cpp index 059bbb4d..adb555b2 100644 --- a/tests/FFmpegWriter.cpp +++ b/tests/FFmpegWriter.cpp @@ -45,7 +45,7 @@ using namespace openshot; TEST_CASE( "Webm", "[libopenshot][ffmpegwriter]" ) { // Reader - stringstream path; + std::stringstream path; path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4"; FFmpegReader r(path.str()); r.Open(); @@ -92,7 +92,7 @@ TEST_CASE( "Webm", "[libopenshot][ffmpegwriter]" ) TEST_CASE( "Options_Overloads", "[libopenshot][ffmpegwriter]" ) { // Reader - stringstream path; + std::stringstream path; path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4"; FFmpegReader r(path.str()); r.Open(); @@ -129,3 +129,76 @@ TEST_CASE( "Options_Overloads", "[libopenshot][ffmpegwriter]" ) CHECK_FALSE(r1.info.interlaced_frame); CHECK(r1.info.top_field_first == true); } + + +TEST_CASE( "DisplayInfo", "[libopenshot][ffmpegwriter]" ) +{ + // Reader + std::stringstream path; + path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4"; + FFmpegReader r(path.str()); + r.Open(); + + /* WRITER ---------------- */ + FFmpegWriter w("output1.webm"); + + // Set options + w.SetAudioOptions(true, "libvorbis", 44100, 2, LAYOUT_STEREO, 188000); + w.SetVideoOptions( + true, "libvpx", + Fraction(24,1), + 1280, 720, + Fraction(1,1), + false, false, + 30000000); + + // Open writer + w.Open(); + + std::string expected( + R"(---------------------------- +----- File Information ----- +---------------------------- +--> Has Video: true +--> Has Audio: true +--> Has Single Image: false +--> Duration: 0.00 Seconds +--> File Size: 0.00 MB +---------------------------- +----- Video Attributes ----- +---------------------------- +--> Width: 1280 +--> Height: 720 +--> Pixel Format: -1 +--> Frames Per Second: 24.00 (24/1) +--> Video Bit Rate: 30000 kb/s +--> Pixel Ratio: 1.00 (1/1) +--> Display Aspect Ratio: 1.78 (16/9) +--> Video Codec: libvpx +--> Video Length: 0 Frames +--> Video Stream Index: -1 +--> Video Timebase: 0.04 (1/24) +--> Interlaced: false +--> Interlaced: Top Field First: false +---------------------------- +----- Audio Attributes ----- +---------------------------- +--> Audio Codec: libvorbis +--> Audio Bit Rate: 188 kb/s +--> Sample Rate: 44100 Hz +--> # of Channels: 2 +--> Channel Layout: 3 +--> Audio Stream Index: -1 +--> Audio Timebase: 1.00 (1/1) +----------------------------)"); + + // Store the DisplayInfo() text in 'output' + std::stringstream output; + w.DisplayInfo(&output); + + w.Close(); + + // Compare a [0, expected.size()) substring of output to expected + auto compare_value = output.str().compare(0, expected.size(), expected); + CHECK(compare_value == 0); +} From bf80251a493b28d6c83643e7735a65ca142aa70a Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Fri, 27 Nov 2020 00:33:52 -0500 Subject: [PATCH 06/71] Add operator<< for Coordinate, Fraction, Point --- src/Coordinate.h | 12 ++++++++++++ src/Fraction.h | 13 +++++++++++-- src/Point.h | 25 +++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 2 deletions(-) diff --git a/src/Coordinate.h b/src/Coordinate.h index 0a3ba978..f2b8b5fb 100644 --- a/src/Coordinate.h +++ b/src/Coordinate.h @@ -75,6 +75,18 @@ namespace openshot { void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object }; + /// Stream output operator for openshot::Coordinate + template + std::basic_ostream& + operator<<(std::basic_ostream& o, const openshot::Coordinate& co) { + std::basic_ostringstream s; + s.flags(o.flags()); + s.imbue(o.getloc()); + s.precision(o.precision()); + s << "(" << co.X << ", " << co.Y << ")"; + return o << s.str(); + }; + } #endif diff --git a/src/Fraction.h b/src/Fraction.h index fb36e88b..a09db625 100644 --- a/src/Fraction.h +++ b/src/Fraction.h @@ -84,7 +84,16 @@ namespace openshot { Fraction Reciprocal() const; }; - + // Stream output operator for openshot::Fraction + template + std::basic_ostream& + operator<<(std::basic_ostream& o, const openshot::Fraction& frac) { + std::basic_ostringstream s; + s.flags(o.flags()); + s.imbue(o.getloc()); + s.precision(o.precision()); + s << "Fraction(" << frac.num << ", " << frac.den << ")"; + return o << s.str(); + }; } - #endif diff --git a/src/Point.h b/src/Point.h index 1795c469..2602fb9f 100644 --- a/src/Point.h +++ b/src/Point.h @@ -126,6 +126,31 @@ namespace openshot }; + // Stream output operator for openshot::Point + template + std::basic_ostream& + operator<<(std::basic_ostream& o, const openshot::Point& p) { + std::basic_ostringstream s; + s.flags(o.flags()); + s.imbue(o.getloc()); + s.precision(o.precision()); + s << "co" << p.co; + switch(p.interpolation) { + case(openshot::LINEAR): + s << " interpolation(LINEAR)"; + break; + case(openshot::CONSTANT): + s << " interpolation(CONSTANT)"; + break; + case(openshot::BEZIER): + s << " interpolation(BEZIER)" + << " handle_left" << p.handle_left + << " handle_right" << p.handle_right; + break; + } + return o << s.str(); + }; + } #endif From 032ca616dcb152e253fa2256201832b1a04a064f Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Fri, 27 Nov 2020 00:34:59 -0500 Subject: [PATCH 07/71] Tests: test << for Coordinate, Fraction, Point --- tests/Fraction.cpp | 13 +++++++++++++ tests/Point.cpp | 23 +++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/tests/Fraction.cpp b/tests/Fraction.cpp index 8736abaf..57d37a79 100644 --- a/tests/Fraction.cpp +++ b/tests/Fraction.cpp @@ -32,6 +32,10 @@ #include "Fraction.h" +#include +#include +#include + using namespace std; using namespace openshot; @@ -148,3 +152,12 @@ TEST_CASE( "Reciprocal", "[libopenshot][fraction]" ) CHECK(f1.ToFloat() == Approx(1.77777f).margin(0.00001)); CHECK(f1.ToDouble() == Approx(1.77777f).margin(0.00001)); } + +TEST_CASE( "Operator ostream", "[libopenshot][fraction]" ) +{ + std::stringstream output; + openshot::Fraction f3(30000, 1001); + + output << f3; + CHECK(output.str() == "Fraction(30000, 1001)"); +} diff --git a/tests/Point.cpp b/tests/Point.cpp index 6d53f65b..f4e7792b 100644 --- a/tests/Point.cpp +++ b/tests/Point.cpp @@ -188,3 +188,26 @@ TEST_CASE( "SetJson", "[libopenshot][point]" ) CHECK(p1.handle_type == openshot::HandleType::MANUAL); CHECK(p1.interpolation == openshot::InterpolationType::CONSTANT); } + + +TEST_CASE( "Operator ostream", "[libopenshot][point]" ) +{ + openshot::Coordinate c1(10, 5); + + std::stringstream output1; + openshot::Point p1(c1, openshot::InterpolationType::LINEAR); + output1 << p1; + CHECK(output1.str() == "co(10, 5) interpolation(LINEAR)"); + + std::stringstream output2; + openshot::Point p2(c1, openshot::InterpolationType::CONSTANT); + output2 << p2; + CHECK(output2.str() == "co(10, 5) interpolation(CONSTANT)"); + + std::stringstream output3; + openshot::Point p3(c1, openshot::InterpolationType::BEZIER); + output3 << p3; + CHECK( + output3.str() == + "co(10, 5) interpolation(BEZIER) handle_left(0.5, 1) handle_right(0.5, 0)"); +} From f6013666de2802c4d2af1c27799ba5a3ba8a0c99 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Fri, 11 Jun 2021 05:30:38 -0400 Subject: [PATCH 08/71] KeyFrame: New PrintPoints() and PrintValues() --- src/KeyFrame.cpp | 67 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 49 insertions(+), 18 deletions(-) diff --git a/src/KeyFrame.cpp b/src/KeyFrame.cpp index ef8b045e..e25f4d7c 100644 --- a/src/KeyFrame.cpp +++ b/src/KeyFrame.cpp @@ -28,16 +28,18 @@ * along with OpenShot Library. If not, see . */ -#include -#include -#include -#include // For assert() -#include // For std::cout -#include // For std::setprecision - #include "KeyFrame.h" #include "Exceptions.h" +#include // For std::lower_bound, std::move_backward +#include // For std::less, std::less_equal, etc… +#include // For std::swap +#include // For std::accumulate +#include // For assert() +#include // For fabs, round +#include // For std::cout +#include // For std::setprecision + using namespace std; using namespace openshot; @@ -560,21 +562,50 @@ void Keyframe::UpdatePoint(int64_t index, Point p) { } void Keyframe::PrintPoints(std::ostream* out) const { - *out << std::fixed << std::setprecision(4); - for (std::vector::const_iterator it = Points.begin(); it != Points.end(); it++) { - Point p = *it; - *out << p.co.X << "\t" << p.co.Y << std::endl; - } + *out << std::right << std::setprecision(4) << std::setfill(' '); + for (const auto& p : Points) { + *out << std::defaultfloat + << std::setw(6) << p.co.X + << std::setw(14) << std::fixed << p.co.Y + << '\n'; + } + *out << std::flush; } void Keyframe::PrintValues(std::ostream* out) const { - *out << std::fixed << std::setprecision(4); - *out << "Frame Number (X)\tValue (Y)\tIs Increasing\tRepeat Numerator\tRepeat Denominator\tDelta (Y Difference)\n"; + // Column widths + std::vector w{10, 12, 8, 11, 19}; - for (int64_t i = 1; i < GetLength(); ++i) { - *out << i << "\t" << GetValue(i) << "\t" << IsIncreasing(i) << "\t" ; - *out << GetRepeatFraction(i).num << "\t" << GetRepeatFraction(i).den << "\t" << GetDelta(i) << "\n"; - } + *out << std::right << std::setfill(' ') << std::boolalpha + << std::setprecision(4); + // Headings + *out << "│" + << std::setw(w[0]) << "Frame# (X)" << " │" + << std::setw(w[1]) << "Y Value" << " │" + << std::setw(w[2]) << "Delta Y" << " │ " + << std::setw(w[3]) << "Increasing?" << " │ " + << std::setw(w[4]) << std::left << "Repeat Fraction" << std::right + << "│\n"; + // Divider + *out << "├───────────" + << "┼─────────────" + << "┼─────────" + << "┼─────────────" + << "┼────────────────────┤\n"; + + for (int64_t i = 1; i < GetLength(); ++i) { + *out << "│" + << std::setw(w[0]-2) << std::defaultfloat << i + << (Contains(Point(i, 1)) ? " *" : " ") << " │" + << std::setw(w[1]) << std::fixed << GetValue(i) << " │" + << std::setw(w[2]) << std::defaultfloat << std::showpos + << GetDelta(i) << " │ " << std::noshowpos + << std::setw(w[3]) << IsIncreasing(i) << " │ " + << std::setw(w[4]) << std::left << GetRepeatFraction(i) + << std::right << "│\n"; + } + *out << " * = Keyframe point (non-interpolated)\n"; + *out << std::flush; } From 6a1579edd0d8e7bd0588ccd43a56d8fa2f52a16b Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Fri, 11 Jun 2021 05:49:42 -0400 Subject: [PATCH 09/71] tests/KeyFrame: Tests for Print__() methods --- tests/KeyFrame.cpp | 71 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 70 insertions(+), 1 deletion(-) diff --git a/tests/KeyFrame.cpp b/tests/KeyFrame.cpp index 599c973f..3f186729 100644 --- a/tests/KeyFrame.cpp +++ b/tests/KeyFrame.cpp @@ -516,6 +516,75 @@ TEST_CASE( "std::vector constructor", "[libopenshot][keyframe]" ) CHECK(k1.GetValue(10) == Approx(30.0f).margin(0.0001)); } +TEST_CASE( "PrintPoints", "[libopenshot][keyframe]" ) +{ + std::vector points{ + Point(1, 10), + Point(225, 397), + Point(430, -153.4), + Point(999, 12345.678) + }; + Keyframe k1(points); + + std::stringstream output; + k1.PrintPoints(&output); + + const std::string expected = +R"( 1 10.0000 + 225 397.0000 + 430 -153.4000 + 999 12345.6777)"; + + // Ensure the two strings are equal up to the limits of 'expected' + CHECK(output.str().compare(0, expected.size(), expected) == 0); +} + +TEST_CASE( "PrintValues", "[libopenshot][keyframe]" ) +{ + std::vector points{ + Point(1, 10), + Point(225, 397), + Point(430, -153.4), + Point(999, 12345.678) + }; + Keyframe k1(points); + + std::stringstream output; + k1.PrintValues(&output); + + const std::string expected = +R"(│Frame# (X) │ Y Value │ Delta Y │ Increasing? │ Repeat Fraction │ +├───────────┼─────────────┼─────────┼─────────────┼────────────────────┤ +│ 1 * │ 10.0000 │ +10 │ true │ Fraction(1, 7) │ +│ 2 │ 10.0104 │ +0 │ true │ Fraction(2, 7) │ +│ 3 │ 10.0414 │ +0 │ true │ Fraction(3, 7) │ +│ 4 │ 10.0942 │ +0 │ true │ Fraction(4, 7) │ +│ 5 │ 10.1665 │ +0 │ true │ Fraction(5, 7) │ +│ 6 │ 10.2633 │ +0 │ true │ Fraction(6, 7) │ +│ 7 │ 10.3794 │ +0 │ true │ Fraction(7, 7) │ +│ 8 │ 10.5193 │ +1 │ true │ Fraction(1, 5) │ +│ 9 │ 10.6807 │ +0 │ true │ Fraction(2, 5) │ +│ 10 │ 10.8636 │ +0 │ true │ Fraction(3, 5) │ +│ 11 │ 11.0719 │ +0 │ true │ Fraction(4, 5) │ +│ 12 │ 11.3021 │ +0 │ true │ Fraction(5, 5) │ +│ 13 │ 11.5542 │ +1 │ true │ Fraction(1, 4) │ +│ 14 │ 11.8334 │ +0 │ true │ Fraction(2, 4) │ +│ 15 │ 12.1349 │ +0 │ true │ Fraction(3, 4) │ +│ 16 │ 12.4587 │ +0 │ true │ Fraction(4, 4) │ +│ 17 │ 12.8111 │ +1 │ true │ Fraction(1, 2) │ +│ 18 │ 13.1863 │ +0 │ true │ Fraction(2, 2) │ +│ 19 │ 13.5840 │ +1 │ true │ Fraction(1, 3) │ +│ 20 │ 14.0121 │ +0 │ true │ Fraction(2, 3) │ +│ 21 │ 14.4632 │ +0 │ true │ Fraction(3, 3) │ +│ 22 │ 14.9460 │ +1 │ true │ Fraction(1, 2) │ +│ 23 │ 15.4522 │ +0 │ true │ Fraction(2, 2) │ +│ 24 │ 15.9818 │ +1 │ true │ Fraction(1, 1) │ +│ 25 │ 16.5446 │ +1 │ true │ Fraction(1, 2) │)"; + + // Ensure the two strings are equal up to the limits of 'expected' + CHECK(output.str().compare(0, expected.size(), expected) == 0); +} + #ifdef USE_OPENCV TEST_CASE( "TrackedObjectBBox init", "[libopenshot][keyframe]" ) { @@ -735,4 +804,4 @@ TEST_CASE( "GetBoxValues", "[libopenshot][keyframe]" ) CHECK(boxValues["h"] == 20.0); CHECK(boxValues["ang"] == 30.0); } -#endif \ No newline at end of file +#endif From e14adb71ba0be036815483d7c4293ab8c2726bf1 Mon Sep 17 00:00:00 2001 From: Brenno Date: Sun, 20 Jun 2021 15:33:58 -0300 Subject: [PATCH 10/71] Implemented Initial audio effects - Compressor - Equalizer - Distortion - Noise --- src/CMakeLists.txt | 6 +- src/EffectInfo.cpp | 17 ++ src/Effects.h | 7 + src/Enums.h | 32 ++++ src/Frame.h | 3 +- src/audio_effects/Compressor.cpp | 260 +++++++++++++++++++++++++++++ src/audio_effects/Compressor.h | 126 ++++++++++++++ src/audio_effects/Distortion.cpp | 247 +++++++++++++++++++++++++++ src/audio_effects/Distortion.h | 135 +++++++++++++++ src/audio_effects/Noise.cpp | 149 +++++++++++++++++ src/audio_effects/Noise.h | 104 ++++++++++++ src/audio_effects/ParametricEQ.cpp | 195 ++++++++++++++++++++++ src/audio_effects/ParametricEQ.h | 200 ++++++++++++++++++++++ 13 files changed, 1479 insertions(+), 2 deletions(-) create mode 100644 src/audio_effects/Compressor.cpp create mode 100644 src/audio_effects/Compressor.h create mode 100644 src/audio_effects/Distortion.cpp create mode 100644 src/audio_effects/Distortion.h create mode 100644 src/audio_effects/Noise.cpp create mode 100644 src/audio_effects/Noise.h create mode 100644 src/audio_effects/ParametricEQ.cpp create mode 100644 src/audio_effects/ParametricEQ.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 46663664..135bfe4f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -130,7 +130,11 @@ set(EFFECTS_SOURCES effects/Pixelate.cpp effects/Saturation.cpp effects/Shift.cpp - effects/Wave.cpp) + effects/Wave.cpp + audio_effects/Noise.cpp + audio_effects/Distortion.cpp + audio_effects/ParametricEQ.cpp + audio_effects/Compressor.cpp) # Qt video player components set(QT_PLAYER_SOURCES diff --git a/src/EffectInfo.cpp b/src/EffectInfo.cpp index fb292dde..4c8d6f33 100644 --- a/src/EffectInfo.cpp +++ b/src/EffectInfo.cpp @@ -88,6 +88,18 @@ EffectBase* EffectInfo::CreateEffect(std::string effect_type) { else if (effect_type == "Wave") return new Wave(); + else if(effect_type == "Noise") + return new Noise(); + + else if(effect_type == "Distortion") + return new Distortion(); + + else if(effect_type == "ParametricEQ") + return new ParametricEQ(); + + else if(effect_type == "Compressor") + return new Compressor(); + #ifdef USE_OPENCV else if(effect_type == "Stabilizer") return new Stabilizer(); @@ -124,6 +136,11 @@ Json::Value EffectInfo::JsonValue() { root.append(Saturation().JsonInfo()); root.append(Shift().JsonInfo()); root.append(Wave().JsonInfo()); + /* Audio */ + root.append(Noise().JsonInfo()); + root.append(Distortion().JsonInfo()); + root.append(ParametricEQ().JsonInfo()); + root.append(Compressor().JsonInfo()); #ifdef USE_OPENCV root.append(Stabilizer().JsonInfo()); diff --git a/src/Effects.h b/src/Effects.h index e4abc958..0dca06ba 100644 --- a/src/Effects.h +++ b/src/Effects.h @@ -48,6 +48,13 @@ #include "effects/Shift.h" #include "effects/Wave.h" +/* Audio Effects */ +#include "audio_effects/Noise.h" +#include "audio_effects/Distortion.h" +#include "audio_effects/ParametricEQ.h" +#include "audio_effects/Compressor.h" + +/* OpenCV Effects */ #ifdef USE_OPENCV #include "effects/ObjectDetection.h" #include "effects/Tracker.h" diff --git a/src/Enums.h b/src/Enums.h index 387191ea..5377c6d7 100644 --- a/src/Enums.h +++ b/src/Enums.h @@ -80,5 +80,37 @@ namespace openshot VOLUME_MIX_AVERAGE, ///< Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100% VOLUME_MIX_REDUCE ///< Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%) }; + + + /// This enumeration determines the distortion type of Distortion Effect. + enum DistortionType + { + HARD_CLIPPING, + SOFT_CLIPPING, + EXPONENTIAL, + FULL_WAVE_RECTIFIER, + HALF_WAVE_RECTIFIER, + }; + + /// This enumeration determines the filter type of ParametricEQ Effect. + enum FilterType + { + LOW_PASS, + HIGH_PASS, + LOW_SHELF, + HIGH_SHELF, + BAND_PASS, + BAND_STOP, + PEAKING_NOTCH, + }; + + /// This enumeration determines the compressor mode of compressor Effect. + enum CompressorMode + { + COMPRESSOR, + LIMITER, + EXPANDER, + NOISE_GATE, + }; } #endif diff --git a/src/Frame.h b/src/Frame.h index 18a22d11..e35ea9ca 100644 --- a/src/Frame.h +++ b/src/Frame.h @@ -109,7 +109,7 @@ namespace openshot private: std::shared_ptr image; std::shared_ptr wave_image; - std::shared_ptr audio; + std::shared_ptr previewApp; juce::CriticalSection addingImageSection; juce::CriticalSection addingAudioSection; @@ -131,6 +131,7 @@ namespace openshot int constrain(int color_value); public: + std::shared_ptr audio; int64_t number; ///< This is the frame number (starting at 1) bool has_audio_data; ///< This frame has been loaded with audio data bool has_image_data; ///< This frame has been loaded with pixel data diff --git a/src/audio_effects/Compressor.cpp b/src/audio_effects/Compressor.cpp new file mode 100644 index 00000000..d9d049c7 --- /dev/null +++ b/src/audio_effects/Compressor.cpp @@ -0,0 +1,260 @@ +/** + * @file + * @brief Source file for Compressor audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#include "Compressor.h" +#include "Exceptions.h" + +using namespace openshot; + +/// Blank constructor, useful when using Json to load the effect properties +Compressor::Compressor() : mode(COMPRESSOR), threshold(1), ratio(1), attack(1), release(1), makeup_gain(1), bypass(false) { + // Init effect properties + init_effect_details(); +} + +// Default constructor +Compressor::Compressor(openshot::CompressorMode new_mode, Keyframe new_threshold, Keyframe new_ratio, Keyframe new_attack, Keyframe new_release, Keyframe new_makeup_gain, Keyframe new_bypass) : + mode(new_mode), threshold(new_threshold), ratio(new_ratio), attack(new_attack), release(new_release), makeup_gain(new_makeup_gain), bypass(new_bypass) +{ + // Init effect properties + init_effect_details(); +} + +// Init effect settings +void Compressor::init_effect_details() +{ + /// Initialize the values of the EffectInfo struct. + InitEffectInfo(); + + /// Set the effect info + info.class_name = "Compressor"; + info.name = "Compressor"; + info.description = "Add compressor on the frame's sound."; + info.has_audio = true; + info.has_video = false; + + input_level = 0.0f; + yl_prev = 0.0f; + + +} + +// This method is required for all derived classes of EffectBase, and returns a +// modified openshot::Frame object +std::shared_ptr Compressor::GetFrame(std::shared_ptr frame, int64_t frame_number) +{ + // Adding Compressor + const int num_input_channels = frame->audio->getNumChannels(); + const int num_output_channels = frame->audio->getNumChannels(); + const int num_samples = frame->audio->getNumSamples(); + + mixed_down_input.setSize(1, num_samples); + inverse_sample_rate = 1.0f / frame->SampleRate(); //(float)getSampleRate(); + inverseE = 1.0f / M_E; + + if ((bool)bypass.GetValue(frame_number)) + return frame; + + mixed_down_input.clear(); + + for (int channel = 0; channel < num_input_channels; ++channel) + mixed_down_input.addFrom(0, 0, *frame->audio, channel, 0, num_samples, 1.0f / num_input_channels); + + for (int sample = 0; sample < num_samples; ++sample) { + bool expander = (bool)mode; + float T = threshold.GetValue(frame_number); + float R = ratio.GetValue(frame_number); + float alphaA = calculateAttackOrRelease(attack.GetValue(frame_number)); + float alphaR = calculateAttackOrRelease(release.GetValue(frame_number)); + float gain = makeup_gain.GetValue(frame_number); + float input_squared = powf(mixed_down_input.getSample(0, sample), 2.0f); + + if (expander) { + const float average_factor = 0.9999f; + input_level = average_factor * input_level + (1.0f - average_factor) * input_squared; + } else { + input_level = input_squared; + } + xg = (input_level <= 1e-6f) ? -60.0f : 10.0f * log10f(input_level); + + + // Expander + if (expander) { + if (xg > T) + yg = xg; + else + yg = T + (xg - T) * R; + + xl = xg - yg; + + if (xl < yl_prev) + yl = alphaA * yl_prev + (1.0f - alphaA) * xl; + else + yl = alphaR * yl_prev + (1.0f - alphaR) * xl; + + // Compressor + } else { + if (xg < T) + yg = xg; + else + yg = T + (xg - T) / R; + + xl = xg - yg; + + if (xl > yl_prev) + yl = alphaA * yl_prev + (1.0f - alphaA) * xl; + else + yl = alphaR * yl_prev + (1.0f - alphaR) * xl; + } + + control = powf (10.0f, (gain - yl) * 0.05f); + yl_prev = yl; + + for (int channel = 0; channel < num_input_channels; ++channel) { + float new_value = frame->audio->getSample(channel, sample)*control; + frame->audio->setSample(channel, sample, new_value); + } + } + + for (int channel = num_input_channels; channel < num_output_channels; ++channel) + frame->audio->clear(channel, 0, num_samples); + + // return the modified frame + return frame; +} + +float Compressor::calculateAttackOrRelease(float value) +{ + if (value == 0.0f) + return 0.0f; + else + return pow (inverseE, inverse_sample_rate / value); +} + +// Generate JSON string of this object +std::string Compressor::Json() const { + + // Return formatted string + return JsonValue().toStyledString(); +} + +// Generate Json::Value for this object +Json::Value Compressor::JsonValue() const { + + // Create root json object + Json::Value root = EffectBase::JsonValue(); // get parent properties + root["type"] = info.class_name; + root["mode"] = mode; + root["threshold"] = threshold.JsonValue(); + root["ratio"] = ratio.JsonValue(); + root["attack"] = attack.JsonValue(); + root["release"] = release.JsonValue(); + root["makeup_gain"] = makeup_gain.JsonValue(); + root["bypass"] = bypass.JsonValue(); + + // return JsonValue + return root; +} + +// Load JSON string into this object +void Compressor::SetJson(const std::string value) { + + // Parse JSON string into JSON objects + try + { + const Json::Value root = openshot::stringToJson(value); + // Set all values that match + SetJsonValue(root); + } + catch (const std::exception& e) + { + // Error parsing JSON (or missing keys) + throw InvalidJSON("JSON is invalid (missing keys or invalid data types)"); + } +} + +// Load Json::Value into this object +void Compressor::SetJsonValue(const Json::Value root) { + + // Set parent data + EffectBase::SetJsonValue(root); + + // Set data from Json (if key is found) + if (!root["mode"].isNull()) + mode = (CompressorMode)root["mode"].asInt(); + + if (!root["threshold"].isNull()) + threshold.SetJsonValue(root["threshold"]); + + if (!root["ratio"].isNull()) + ratio.SetJsonValue(root["ratio"]); + + if (!root["attack"].isNull()) + attack.SetJsonValue(root["attack"]); + + if (!root["release"].isNull()) + release.SetJsonValue(root["release"]); + + if (!root["makeup_gain"].isNull()) + makeup_gain.SetJsonValue(root["makeup_gain"]); + + if (!root["bypass"].isNull()) + bypass.SetJsonValue(root["bypass"]); +} + +// Get all properties for a specific frame +std::string Compressor::PropertiesJSON(int64_t requested_frame) const { + + // Generate JSON properties list + Json::Value root; + root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame); + root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame); + root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame); + + // Keyframes + root["mode"] = add_property_json("Mode", mode, "int", "", NULL, 0, 3, false, requested_frame); + root["threshold"] = add_property_json("Threshold (dB)", threshold.GetValue(requested_frame), "float", "", &threshold, -60, 60, false, requested_frame); + root["ratio"] = add_property_json("Ratio", ratio.GetValue(requested_frame), "float", "", &ratio, 1, 100, false, requested_frame); + root["attack"] = add_property_json("Attack", attack.GetValue(requested_frame), "float", "", &attack, 0.1, 100, false, requested_frame); + root["release"] = add_property_json("Release", release.GetValue(requested_frame), "float", "", &release, 10, 1000, false, requested_frame); + root["makeup_gain"] = add_property_json("Makeup gain", makeup_gain.GetValue(requested_frame), "float", "", &makeup_gain, -12, 12, false, requested_frame); + root["bypass"] = add_property_json("Bypass", bypass.GetValue(requested_frame), "bool", "", &bypass, 0, 1, false, requested_frame); + + // Add mode choices (dropdown style) + root["mode"]["choices"].append(add_property_choice_json("Compressor", COMPRESSOR, mode)); + root["mode"]["choices"].append(add_property_choice_json("Limiter", LIMITER, mode)); + root["mode"]["choices"].append(add_property_choice_json("Expander", EXPANDER, mode)); + root["mode"]["choices"].append(add_property_choice_json("Noise Gate", NOISE_GATE, mode)); + + // Return formatted string + return root.toStyledString(); +} diff --git a/src/audio_effects/Compressor.h b/src/audio_effects/Compressor.h new file mode 100644 index 00000000..abe5e4ac --- /dev/null +++ b/src/audio_effects/Compressor.h @@ -0,0 +1,126 @@ +/** + * @file + * @brief Header file for Compressor audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_COMPRESSOR_AUDIO_EFFECT_H +#define OPENSHOT_COMPRESSOR_AUDIO_EFFECT_H + +#include "../EffectBase.h" + +#include "../Frame.h" +#include "../Json.h" +#include "../KeyFrame.h" +#include "../Enums.h" + +#include +#include +#include + + +namespace openshot +{ + + /** + * @brief This class adds a compressor into the audio + * + */ + class Compressor : public EffectBase + { + private: + /// Init effect settings + void init_effect_details(); + + + public: + openshot::CompressorMode mode; + Keyframe threshold; + Keyframe ratio; + Keyframe attack; + Keyframe release; + Keyframe makeup_gain; + Keyframe bypass; + + juce::AudioSampleBuffer mixed_down_input; + float xl; + float yl; + float xg; + float yg; + float control; + + float input_level; + float yl_prev; + + float inverse_sample_rate; + float inverseE; + + /// Blank constructor, useful when using Json to load the effect properties + Compressor(); + + /// Default constructor + /// + /// @param new_level The audio default Compressor level (between 1 and 100) + Compressor(openshot::CompressorMode new_mode, Keyframe new_threshold, Keyframe new_ratio, Keyframe new_attack, Keyframe new_release, Keyframe new_makeup_gain, Keyframe new_bypass); + + float calculateAttackOrRelease(float value); + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { + return GetFrame(std::make_shared(), frame_number); + } + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// modified openshot::Frame object + /// + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. + /// + /// @returns The modified openshot::Frame object + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + + // Get and Set JSON methods + std::string Json() const override; ///< Generate JSON string of this object + void SetJson(const std::string value) override; ///< Load JSON string into this object + Json::Value JsonValue() const override; ///< Generate Json::Value for this object + void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object + + /// Get all properties for a specific frame (perfect for a UI to display the current state + /// of all properties at any time) + std::string PropertiesJSON(int64_t requested_frame) const override; + }; + +} + +#endif diff --git a/src/audio_effects/Distortion.cpp b/src/audio_effects/Distortion.cpp new file mode 100644 index 00000000..8faa7d30 --- /dev/null +++ b/src/audio_effects/Distortion.cpp @@ -0,0 +1,247 @@ +/** + * @file + * @brief Source file for Distortion audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#include "Distortion.h" +#include "Exceptions.h" + +using namespace openshot; + +/// Blank constructor, useful when using Json to load the effect properties +Distortion::Distortion() : distortion_type(HARD_CLIPPING), input_gain(0), output_gain(0), tone(0) { + // Init effect properties + init_effect_details(); +} + +// Default constructor +Distortion::Distortion(openshot::DistortionType new_distortion_type, Keyframe new_input_gain, Keyframe new_output_gain, Keyframe new_tone) : + distortion_type(new_distortion_type), input_gain(new_input_gain), output_gain(new_output_gain), tone(new_tone) +{ + // Init effect properties + init_effect_details(); +} + +// Init effect settings +void Distortion::init_effect_details() +{ + /// Initialize the values of the EffectInfo struct. + InitEffectInfo(); + + /// Set the effect info + info.class_name = "Distortion"; + info.name = "Distortion"; + info.description = "Add distortion on the frame's sound."; + info.has_audio = true; + info.has_video = false; +} + +// This method is required for all derived classes of EffectBase, and returns a +// modified openshot::Frame object +std::shared_ptr Distortion::GetFrame(std::shared_ptr frame, int64_t frame_number) +{ + filters.clear(); + + for (int i = 0; i < frame->audio->getNumChannels(); ++i) { + Filter* filter; + filters.add (filter = new Filter()); + } + + updateFilters(frame_number); + + // Add distortion + for (int channel = 0; channel < frame->audio->getNumChannels(); channel++) + { + //auto *inBuffer = frame->audio->getReadPointer(channel); + auto *channelData = frame->audio->getWritePointer(channel); + float out; + + for (auto sample = 0; sample < frame->audio->getNumSamples(); ++sample) + { + + const int input_gain_value = (int)input_gain.GetValue(frame_number); + const int output_gain_value = (int)output_gain.GetValue(frame_number); + const float in = channelData[sample]*powf(10.0f, input_gain_value * 0.05f); + + // Use the current distortion type + switch (distortion_type) { + + case HARD_CLIPPING: { + float threshold = 0.5f; + if (in > threshold) + out = threshold; + else if (in < -threshold) + out = -threshold; + else + out = in; + break; + } + + case SOFT_CLIPPING: { + float threshold1 = 1.0f / 3.0f; + float threshold2 = 2.0f / 3.0f; + if (in > threshold2) + out = 1.0f; + else if (in > threshold1) + out = 1.0f - powf (2.0f - 3.0f * in, 2.0f) / 3.0f; + else if (in < -threshold2) + out = -1.0f; + else if (in < -threshold1) + out = -1.0f + powf (2.0f + 3.0f * in, 2.0f) / 3.0f; + else + out = 2.0f * in; + out *= 0.5f; + break; + } + + case EXPONENTIAL: { + if (in > 0.0f) + out = 1.0f - expf (-in); + else + out = -1.0f + expf (in); + break; + } + + case FULL_WAVE_RECTIFIER: { + out = fabsf (in); + break; + } + + case HALF_WAVE_RECTIFIER: { + if (in > 0.0f) + out = in; + else + out = 0.0f; + break; + } + } + + float filtered = filters[channel]->processSingleSampleRaw(out); + channelData[sample] = filtered*powf(10.0f, output_gain_value * 0.05f); + } + } + + // return the modified frame + return frame; +} + +void Distortion::updateFilters(int64_t frame_number) +{ + double discreteFrequency = M_PI * 0.01; + double gain = pow(10.0, (float)tone.GetValue(frame_number) * 0.05); + + for (int i = 0; i < filters.size(); ++i) + filters[i]->updateCoefficients(discreteFrequency, gain); +} + +// Generate JSON string of this object +std::string Distortion::Json() const { + + // Return formatted string + return JsonValue().toStyledString(); +} + +// Generate Json::Value for this object +Json::Value Distortion::JsonValue() const { + + // Create root json object + Json::Value root = EffectBase::JsonValue(); // get parent properties + root["type"] = info.class_name; + root["distortion_type"] = distortion_type; + root["input_gain"] = input_gain.JsonValue(); + root["output_gain"] = output_gain.JsonValue(); + root["tone"] = tone.JsonValue(); + + // return JsonValue + return root; +} + +// Load JSON string into this object +void Distortion::SetJson(const std::string value) { + + // Parse JSON string into JSON objects + try + { + const Json::Value root = openshot::stringToJson(value); + // Set all values that match + SetJsonValue(root); + } + catch (const std::exception& e) + { + // Error parsing JSON (or missing keys) + throw InvalidJSON("JSON is invalid (missing keys or invalid data types)"); + } +} + +// Load Json::Value into this object +void Distortion::SetJsonValue(const Json::Value root) { + + // Set parent data + EffectBase::SetJsonValue(root); + + // Set data from Json (if key is found) + if (!root["distortion_type"].isNull()) + distortion_type = (DistortionType)root["distortion_type"].asInt(); + + if (!root["input_gain"].isNull()) + input_gain.SetJsonValue(root["input_gain"]); + + if (!root["output_gain"].isNull()) + output_gain.SetJsonValue(root["output_gain"]); + + if (!root["tone"].isNull()) + tone.SetJsonValue(root["tone"]); +} + +// Get all properties for a specific frame +std::string Distortion::PropertiesJSON(int64_t requested_frame) const { + + // Generate JSON properties list + Json::Value root; + root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame); + root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame); + root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame); + + // Keyframes + root["distortion_type"] = add_property_json("Distortion Type", distortion_type, "int", "", NULL, 0, 3, false, requested_frame); + root["input_gain"] = add_property_json("Input Gain (dB)", input_gain.GetValue(requested_frame), "int", "", &input_gain, -24, 24, false, requested_frame); + root["output_gain"] = add_property_json("Output Gain (dB)", output_gain.GetValue(requested_frame), "int", "", &output_gain, -24, 24, false, requested_frame); + root["tone"] = add_property_json("Tone (dB)", tone.GetValue(requested_frame), "int", "", &tone, -24, 24, false, requested_frame); + + // Add distortion_type choices (dropdown style) + root["distortion_type"]["choices"].append(add_property_choice_json("Hard Clipping", HARD_CLIPPING, distortion_type)); + root["distortion_type"]["choices"].append(add_property_choice_json("Soft Clipping", SOFT_CLIPPING, distortion_type)); + root["distortion_type"]["choices"].append(add_property_choice_json("Exponential", EXPONENTIAL, distortion_type)); + root["distortion_type"]["choices"].append(add_property_choice_json("Full Wave Rectifier", FULL_WAVE_RECTIFIER, distortion_type)); + root["distortion_type"]["choices"].append(add_property_choice_json("Half Wave Rectifier", HALF_WAVE_RECTIFIER, distortion_type)); + + // Return formatted string + return root.toStyledString(); +} diff --git a/src/audio_effects/Distortion.h b/src/audio_effects/Distortion.h new file mode 100644 index 00000000..a3b3e8ea --- /dev/null +++ b/src/audio_effects/Distortion.h @@ -0,0 +1,135 @@ +/** + * @file + * @brief Header file for Distortion audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_DISTORTION_AUDIO_EFFECT_H +#define OPENSHOT_DISTORTION_AUDIO_EFFECT_H +#define _USE_MATH_DEFINES + +#include "../EffectBase.h" + +#include "../Frame.h" +#include "../Json.h" +#include "../KeyFrame.h" +#include "../Enums.h" + +#include +#include +#include +// #include + + +namespace openshot +{ + + /** + * @brief This class adds a noise into the audio + * + */ + class Distortion : public EffectBase + { + private: + /// Init effect settings + void init_effect_details(); + + public: + openshot::DistortionType distortion_type; + Keyframe input_gain; + Keyframe output_gain; + Keyframe tone; + + /// Blank constructor, useful when using Json to load the effect properties + Distortion(); + + /// Default constructor + /// + /// @param new_level The audio default distortion level (between 1 and 100) + Distortion(openshot::DistortionType new_distortion_type, Keyframe new_input_gain, Keyframe new_output_gain, Keyframe new_tone); + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { + return GetFrame(std::make_shared(), frame_number); + } + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// modified openshot::Frame object + /// + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. + /// + /// @returns The modified openshot::Frame object + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + + // Get and Set JSON methods + std::string Json() const override; ///< Generate JSON string of this object + void SetJson(const std::string value) override; ///< Load JSON string into this object + Json::Value JsonValue() const override; ///< Generate Json::Value for this object + void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object + + /// Get all properties for a specific frame (perfect for a UI to display the current state + /// of all properties at any time) + std::string PropertiesJSON(int64_t requested_frame) const override; + + class Filter : public juce::IIRFilter + { + public: + void updateCoefficients (const double discreteFrequency, + const double gain) noexcept + { + jassert (discreteFrequency > 0); + + double tan_half_wc = tan (discreteFrequency / 2.0); + double sqrt_gain = sqrt (gain); + + coefficients = juce::IIRCoefficients (/* b0 */ sqrt_gain * tan_half_wc + gain, + /* b1 */ sqrt_gain * tan_half_wc - gain, + /* b2 */ 0.0, + /* a0 */ sqrt_gain * tan_half_wc + 1.0, + /* a1 */ sqrt_gain * tan_half_wc - 1.0, + /* a2 */ 0.0); + + setCoefficients (coefficients); + } + }; + + juce::OwnedArray filters; + + void updateFilters(int64_t frame_number); + }; + +} + +#endif diff --git a/src/audio_effects/Noise.cpp b/src/audio_effects/Noise.cpp new file mode 100644 index 00000000..357cb769 --- /dev/null +++ b/src/audio_effects/Noise.cpp @@ -0,0 +1,149 @@ +/** + * @file + * @brief Source file for Noise audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#include "Noise.h" +#include "Exceptions.h" + +using namespace openshot; + +/// Blank constructor, useful when using Json to load the effect properties +Noise::Noise() : level(30) { + // Init effect properties + init_effect_details(); +} + +// Default constructor +Noise::Noise(Keyframe new_level) : level(new_level) +{ + // Init effect properties + init_effect_details(); +} + +// Init effect settings +void Noise::init_effect_details() +{ + /// Initialize the values of the EffectInfo struct. + InitEffectInfo(); + + /// Set the effect info + info.class_name = "Noise"; + info.name = "Noise"; + info.description = "Add white noise on the frame's sound."; + info.has_audio = true; + info.has_video = false; +} + +// This method is required for all derived classes of EffectBase, and returns a +// modified openshot::Frame object +std::shared_ptr Noise::GetFrame(std::shared_ptr frame, int64_t frame_number) +{ + // Adding Noise + srand ( time(NULL) ); + int noise = level.GetValue(frame_number); + + for (int channel = 0; channel < frame->audio->getNumChannels(); channel++) + { + auto *buffer = frame->audio->getWritePointer(channel); + + for (auto sample = 0; sample < frame->audio->getNumSamples(); ++sample) + { + buffer[sample] = buffer[sample]*(1 - (1+(float)noise)/100) + buffer[sample]*0.0001*(rand()%100+1)*noise; + } + } + + + // return the modified frame + return frame; +} + +// Generate JSON string of this object +std::string Noise::Json() const { + + // Return formatted string + return JsonValue().toStyledString(); +} + +// Generate Json::Value for this object +Json::Value Noise::JsonValue() const { + + // Create root json object + Json::Value root = EffectBase::JsonValue(); // get parent properties + root["type"] = info.class_name; + root["level"] = level.JsonValue(); + + // return JsonValue + return root; +} + +// Load JSON string into this object +void Noise::SetJson(const std::string value) { + + // Parse JSON string into JSON objects + try + { + const Json::Value root = openshot::stringToJson(value); + // Set all values that match + SetJsonValue(root); + } + catch (const std::exception& e) + { + // Error parsing JSON (or missing keys) + throw InvalidJSON("JSON is invalid (missing keys or invalid data types)"); + } +} + +// Load Json::Value into this object +void Noise::SetJsonValue(const Json::Value root) { + + // Set parent data + EffectBase::SetJsonValue(root); + + // Set data from Json (if key is found) + if (!root["level"].isNull()) + level.SetJsonValue(root["level"]); +} + +// Get all properties for a specific frame +std::string Noise::PropertiesJSON(int64_t requested_frame) const { + + // Generate JSON properties list + Json::Value root; + root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame); + root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame); + root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame); + + // Keyframes + root["level"] = add_property_json("Level", level.GetValue(requested_frame), "int", "", &level, 0, 100, false, requested_frame); + + // Return formatted string + return root.toStyledString(); +} diff --git a/src/audio_effects/Noise.h b/src/audio_effects/Noise.h new file mode 100644 index 00000000..9de41458 --- /dev/null +++ b/src/audio_effects/Noise.h @@ -0,0 +1,104 @@ +/** + * @file + * @brief Header file for Noise audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_NOISE_AUDIO_EFFECT_H +#define OPENSHOT_NOISE_AUDIO_EFFECT_H + +#include "../EffectBase.h" + +#include "../Frame.h" +#include "../Json.h" +#include "../KeyFrame.h" + +#include +#include +#include +#include + + +namespace openshot +{ + + /** + * @brief This class adds a noise into the audio + * + */ + class Noise : public EffectBase + { + private: + /// Init effect settings + void init_effect_details(); + + public: + Keyframe level; ///< Noise level keyframe. The amount of noise inserted on the audio. + + /// Blank constructor, useful when using Json to load the effect properties + Noise(); + + /// Default constructor + /// + /// @param new_level The audio default noise level (between 1 and 100) + Noise(Keyframe new_level); + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { + return GetFrame(std::make_shared(), frame_number); + } + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// modified openshot::Frame object + /// + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. + /// + /// @returns The modified openshot::Frame object + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + + // Get and Set JSON methods + std::string Json() const override; ///< Generate JSON string of this object + void SetJson(const std::string value) override; ///< Load JSON string into this object + Json::Value JsonValue() const override; ///< Generate Json::Value for this object + void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object + + /// Get all properties for a specific frame (perfect for a UI to display the current state + /// of all properties at any time) + std::string PropertiesJSON(int64_t requested_frame) const override; + }; + +} + +#endif diff --git a/src/audio_effects/ParametricEQ.cpp b/src/audio_effects/ParametricEQ.cpp new file mode 100644 index 00000000..d3714f14 --- /dev/null +++ b/src/audio_effects/ParametricEQ.cpp @@ -0,0 +1,195 @@ +/** + * @file + * @brief Source file for ParametricEQ audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#include "ParametricEQ.h" +#include "Exceptions.h" + +using namespace openshot; + +/// Blank constructor, useful when using Json to load the effect properties +ParametricEQ::ParametricEQ() : filter_type(LOW_PASS), frequency(500), gain(0), q_factor(0) { + // Init effect properties + init_effect_details(); +} + + +// Default constructor +ParametricEQ::ParametricEQ(openshot::FilterType new_filter_type, Keyframe new_frequency, Keyframe new_gain, Keyframe new_q_factor) : + filter_type(new_filter_type), frequency(new_frequency), gain(new_gain), q_factor(new_q_factor) +{ + // Init effect properties + init_effect_details(); +} + +// Init effect settings +void ParametricEQ::init_effect_details() +{ + /// Initialize the values of the EffectInfo struct. + InitEffectInfo(); + + /// Set the effect info + info.class_name = "ParametricEQ"; + info.name = "Parametric EQ"; + info.description = "Add equalization on the frame's sound."; + info.has_audio = true; + info.has_video = false; +} + +// This method is required for all derived classes of EffectBase, and returns a +// modified openshot::Frame object +std::shared_ptr ParametricEQ::GetFrame(std::shared_ptr frame, int64_t frame_number) +{ + filters.clear(); + + for (int i = 0; i < frame->audio->getNumChannels(); ++i) { + Filter* filter; + filters.add(filter = new Filter()); + } + + const int num_input_channels = frame->audio->getNumChannels(); + const int num_output_channels = frame->audio->getNumChannels(); + const int num_samples = frame->audio->getNumSamples(); + updateFilters(frame_number, num_samples); + + // Add distortion + for (int channel = 0; channel < frame->audio->getNumChannels(); channel++) + { + auto *channel_data = frame->audio->getWritePointer(channel); + filters[channel]->processSamples(channel_data, num_samples); + } + + for (int channel = num_input_channels; channel < num_output_channels; ++channel) + { + frame->audio->clear(channel, 0, num_samples); + } + + // return the modified frame + return frame; +} + +void ParametricEQ::updateFilters(int64_t frame_number, double sample_rate) +{ + double discrete_frequency = 2.0 * M_PI * (double)frequency.GetValue(frame_number) / sample_rate; + double q_value = (double)q_factor.GetValue(frame_number); + double gain_value = pow(10.0, (double)gain.GetValue(frame_number) * 0.05); + int filter_type_value = (int)filter_type; + + for (int i = 0; i < filters.size(); ++i) + filters[i]->updateCoefficients(discrete_frequency, q_value, gain_value, filter_type_value); +} + +// Generate JSON string of this object +std::string ParametricEQ::Json() const { + + // Return formatted string + return JsonValue().toStyledString(); +} + +// Generate Json::Value for this object +Json::Value ParametricEQ::JsonValue() const { + + // Create root json object + Json::Value root = EffectBase::JsonValue(); // get parent properties + root["type"] = info.class_name; + root["filter_type"] = filter_type; + root["frequency"] = frequency.JsonValue();; + root["q_factor"] = q_factor.JsonValue(); + root["gain"] = gain.JsonValue(); + + // return JsonValue + return root; +} + +// Load JSON string into this object +void ParametricEQ::SetJson(const std::string value) { + + // Parse JSON string into JSON objects + try + { + const Json::Value root = openshot::stringToJson(value); + // Set all values that match + SetJsonValue(root); + } + catch (const std::exception& e) + { + // Error parsing JSON (or missing keys) + throw InvalidJSON("JSON is invalid (missing keys or invalid data types)"); + } +} + +// Load Json::Value into this object +void ParametricEQ::SetJsonValue(const Json::Value root) { + + // Set parent data + EffectBase::SetJsonValue(root); + + // Set data from Json (if key is found) + if (!root["filter_type"].isNull()) + filter_type = (FilterType)root["filter_type"].asInt(); + + if (!root["frequency"].isNull()) + frequency.SetJsonValue(root["frequency"]); + + if (!root["gain"].isNull()) + gain.SetJsonValue(root["gain"]); + + if (!root["q_factor"].isNull()) + q_factor.SetJsonValue(root["q_factor"]); +} + +// Get all properties for a specific frame +std::string ParametricEQ::PropertiesJSON(int64_t requested_frame) const { + + // Generate JSON properties list + Json::Value root; + root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame); + root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame); + root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame); + + // Keyframes + root["filter_type"] = add_property_json("Filter Type", filter_type, "int", "", NULL, 0, 3, false, requested_frame); + root["frequency"] = add_property_json("Frequency (Hz)", frequency.GetValue(requested_frame), "int", "", &frequency, 20, 20000, false, requested_frame); + root["gain"] = add_property_json("Gain (dB)", gain.GetValue(requested_frame), "int", "", &gain, -24, 24, false, requested_frame); + root["q_factor"] = add_property_json("Q Factor", q_factor.GetValue(requested_frame), "float", "", &q_factor, 0, 20, false, requested_frame); + + // Add filter_type choices (dropdown style) + root["filter_type"]["choices"].append(add_property_choice_json("Low Pass", LOW_PASS, filter_type)); + root["filter_type"]["choices"].append(add_property_choice_json("High Pass", HIGH_PASS, filter_type)); + root["filter_type"]["choices"].append(add_property_choice_json("Low Shelf", LOW_SHELF, filter_type)); + root["filter_type"]["choices"].append(add_property_choice_json("High Shelf", HIGH_SHELF, filter_type)); + root["filter_type"]["choices"].append(add_property_choice_json("Band Pass", BAND_PASS, filter_type)); + root["filter_type"]["choices"].append(add_property_choice_json("Band Stop", BAND_STOP, filter_type)); + root["filter_type"]["choices"].append(add_property_choice_json("Peaking Notch", PEAKING_NOTCH, filter_type)); + + // Return formatted string + return root.toStyledString(); +} diff --git a/src/audio_effects/ParametricEQ.h b/src/audio_effects/ParametricEQ.h new file mode 100644 index 00000000..f69d5143 --- /dev/null +++ b/src/audio_effects/ParametricEQ.h @@ -0,0 +1,200 @@ +/** + * @file + * @brief Header file for Parametric EQ audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_PARAMETRIC_EQ_AUDIO_EFFECT_H +#define OPENSHOT_PARAMETRIC_EQ_AUDIO_EFFECT_H +#define _USE_MATH_DEFINES + +#include "../EffectBase.h" + +#include "../Frame.h" +#include "../Json.h" +#include "../KeyFrame.h" +#include "../Enums.h" + +#include +#include +#include +// #include + + +namespace openshot +{ + + /** + * @brief This class adds a noise into the audio + * + */ + class ParametricEQ : public EffectBase + { + private: + /// Init effect settings + void init_effect_details(); + + public: + openshot::FilterType filter_type; + Keyframe frequency; + Keyframe q_factor; + Keyframe gain; + + /// Blank constructor, useful when using Json to load the effect properties + ParametricEQ(); + + /// Default constructor + /// + /// @param new_level + ParametricEQ(openshot::FilterType new_filter_type, Keyframe new_frequency, Keyframe new_gain, Keyframe new_q_factor); + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { + return GetFrame(std::make_shared(), frame_number); + } + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// modified openshot::Frame object + /// + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. + /// + /// @returns The modified openshot::Frame object + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + + // Get and Set JSON methods + std::string Json() const override; ///< Generate JSON string of this object + void SetJson(const std::string value) override; ///< Load JSON string into this object + Json::Value JsonValue() const override; ///< Generate Json::Value for this object + void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object + + /// Get all properties for a specific frame (perfect for a UI to display the current state + /// of all properties at any time) + std::string PropertiesJSON(int64_t requested_frame) const override; + + class Filter : public IIRFilter + { + public: + void updateCoefficients (const double discrete_frequency, + const double q_factor, + const double gain, + const int filter_type) noexcept + { + jassert (discrete_frequency > 0); + jassert (q_factor > 0); + + double bandwidth = jmin (discrete_frequency / q_factor, M_PI * 0.99); + double two_cos_wc = -2.0 * cos (discrete_frequency); + double tan_half_bw = tan (bandwidth / 2.0); + double tan_half_wc = tan (discrete_frequency / 2.0); + double sqrt_gain = sqrt (gain); + + switch (filter_type) { + case 0 /*filterTypeLowPass*/: { + coefficients = IIRCoefficients (/* b0 */ tan_half_wc, + /* b1 */ tan_half_wc, + /* b2 */ 0.0, + /* a0 */ tan_half_wc + 1.0, + /* a1 */ tan_half_wc - 1.0, + /* a2 */ 0.0); + break; + } + case 1 /*filterTypeHighPass*/: { + coefficients = IIRCoefficients (/* b0 */ 1.0, + /* b1 */ -1.0, + /* b2 */ 0.0, + /* a0 */ tan_half_wc + 1.0, + /* a1 */ tan_half_wc - 1.0, + /* a2 */ 0.0); + break; + } + case 2 /*filterTypeLowShelf*/: { + coefficients = IIRCoefficients (/* b0 */ gain * tan_half_wc + sqrt_gain, + /* b1 */ gain * tan_half_wc - sqrt_gain, + /* b2 */ 0.0, + /* a0 */ tan_half_wc + sqrt_gain, + /* a1 */ tan_half_wc - sqrt_gain, + /* a2 */ 0.0); + break; + } + case 3 /*filterTypeHighShelf*/: { + coefficients = IIRCoefficients (/* b0 */ sqrt_gain * tan_half_wc + gain, + /* b1 */ sqrt_gain * tan_half_wc - gain, + /* b2 */ 0.0, + /* a0 */ sqrt_gain * tan_half_wc + 1.0, + /* a1 */ sqrt_gain * tan_half_wc - 1.0, + /* a2 */ 0.0); + break; + } + case 4 /*filterTypeBandPass*/: { + coefficients = IIRCoefficients (/* b0 */ tan_half_bw, + /* b1 */ 0.0, + /* b2 */ -tan_half_bw, + /* a0 */ 1.0 + tan_half_bw, + /* a1 */ two_cos_wc, + /* a2 */ 1.0 - tan_half_bw); + break; + } + case 5 /*filterTypeBandStop*/: { + coefficients = IIRCoefficients (/* b0 */ 1.0, + /* b1 */ two_cos_wc, + /* b2 */ 1.0, + /* a0 */ 1.0 + tan_half_bw, + /* a1 */ two_cos_wc, + /* a2 */ 1.0 - tan_half_bw); + break; + } + case 6 /*filterTypePeakingNotch*/: { + coefficients = IIRCoefficients (/* b0 */ sqrt_gain + gain * tan_half_bw, + /* b1 */ sqrt_gain * two_cos_wc, + /* b2 */ sqrt_gain - gain * tan_half_bw, + /* a0 */ sqrt_gain + tan_half_bw, + /* a1 */ sqrt_gain * two_cos_wc, + /* a2 */ sqrt_gain - tan_half_bw); + break; + } + } + + setCoefficients (coefficients); + } + }; + + juce::OwnedArray filters; + + void updateFilters(int64_t frame_number, double sample_rate); + }; + +} + +#endif From 02f8936557c693cd5466577e3f374e3091d805f5 Mon Sep 17 00:00:00 2001 From: Brenno Date: Sun, 20 Jun 2021 16:05:42 -0300 Subject: [PATCH 11/71] Fixed avresample dependency --- src/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 135bfe4f..a2a8f956 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -294,7 +294,7 @@ mark_as_advanced(QT_VERSION_STR) # Find FFmpeg libraries (used for video encoding / decoding) find_package(FFmpeg REQUIRED COMPONENTS avcodec avformat avutil swscale) -set(all_comps avcodec avformat avutil swscale) +set(all_comps avcodec avformat avutil swscale avresample) if(TARGET FFmpeg::swresample) list(APPEND all_comps swresample) else() From 09eb807507d5127bfe6eec00077be9f1196784b6 Mon Sep 17 00:00:00 2001 From: Frank Dana Date: Thu, 24 Jun 2021 22:05:59 -0700 Subject: [PATCH 12/71] CMake: USE_SWIG_DEPENDENCIES for CMake 3.20+ (#691) --- bindings/python/CMakeLists.txt | 6 ++++++ bindings/ruby/CMakeLists.txt | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/bindings/python/CMakeLists.txt b/bindings/python/CMakeLists.txt index 7d588921..8116ccfd 100644 --- a/bindings/python/CMakeLists.txt +++ b/bindings/python/CMakeLists.txt @@ -95,6 +95,12 @@ if (DEFINED _inc) set_property(SOURCE openshot.i PROPERTY INCLUDE_DIRECTORIES ${_inc}) endif() +### (FINALLY!) +### Properly manage dependencies (regenerate bindings after changes) +if (CMAKE_VERSION VERSION_GREATER 3.20) + set_property(SOURCE openshot.i PROPERTY USE_SWIG_DEPENDENCIES TRUE) +endif() + ### Add the SWIG interface file (which defines all the SWIG methods) if (CMAKE_VERSION VERSION_LESS 3.8.0) swig_add_module(pyopenshot python openshot.i) diff --git a/bindings/ruby/CMakeLists.txt b/bindings/ruby/CMakeLists.txt index b145494b..36f4a17d 100644 --- a/bindings/ruby/CMakeLists.txt +++ b/bindings/ruby/CMakeLists.txt @@ -111,6 +111,12 @@ if (DEFINED _inc) set_property(SOURCE openshot.i PROPERTY INCLUDE_DIRECTORIES ${_inc}) endif() +### (FINALLY!) +### Properly manage dependencies (regenerate bindings after changes) +if (CMAKE_VERSION VERSION_GREATER 3.20) + set_property(SOURCE openshot.i PROPERTY USE_SWIG_DEPENDENCIES TRUE) +endif() + ### Add the SWIG interface file (which defines all the SWIG methods) if (CMAKE_VERSION VERSION_LESS 3.8.0) swig_add_module(rbopenshot ruby openshot.i) From 93fb2eedf5663a55d8eac0979a672d35e7c78ef0 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Fri, 25 Jun 2021 11:46:25 -0400 Subject: [PATCH 13/71] Enhance FindFFmpeg.cmake - Add fallback component version parsing from the individual //version.h headers - Switch to FFmpeg__ variable names, standard for CMake component variables - No longer probe for non-requested components - Use HANDLE_COMPONENT in find_package_handle_standard_args - Parse, export overall FFmpeg_VERSION from libavutil/ffversion.h --- cmake/Modules/FindFFmpeg.cmake | 221 ++++++++++++++++++++++----------- 1 file changed, 151 insertions(+), 70 deletions(-) diff --git a/cmake/Modules/FindFFmpeg.cmake b/cmake/Modules/FindFFmpeg.cmake index b6da9244..64d7fff4 100644 --- a/cmake/Modules/FindFFmpeg.cmake +++ b/cmake/Modules/FindFFmpeg.cmake @@ -34,21 +34,23 @@ This module defines the following variables: :: - FFMPEG_FOUND - System has the all required components. - FFMPEG_INCLUDE_DIRS - Include directory necessary for using the required components headers. - FFMPEG_LIBRARIES - Link these to use the required ffmpeg components. - FFMPEG_DEFINITIONS - Compiler switches required for using the required ffmpeg components. + FFmpeg_FOUND - System has the all required components. + FFmpeg_INCLUDE_DIRS - Include directory necessary for using the required components headers. + FFmpeg_LIBRARIES - Link these to use the required ffmpeg components. + FFmpeg_DEFINITIONS - Compiler switches required for using the required ffmpeg components. + FFmpeg_VERSION - The FFmpeg package version found. -For each component, ``_FOUND`` will be set if the component is available. - -For each ``_FOUND``, the following variables will be defined: +For each component, ``FFmpeg__FOUND`` will be set if the component is available. + +For each ``FFmpeg__FOUND``, the following variables will be defined: :: - _INCLUDE_DIRS - Include directory necessary for using the headers - _LIBRARIES - Link these to use - _DEFINITIONS - Compiler switches required for using - _VERSION - The components version + FFmpeg__INCLUDE_DIRS - Include directory necessary for using the + headers + FFmpeg__LIBRARIES - Link these to use + FFmpeg__DEFINITIONS - Compiler switches required for + FFmpeg__VERSION - The components version Backwards compatibility ^^^^^^^^^^^^^^^^^^^^^^^ @@ -57,10 +59,20 @@ For compatibility with previous versions of this module, uppercase names for FFmpeg and for all components are also recognized, and all-uppercase versions of the cache variables are also created. +Revision history +^^^^^^^^^^^^^^^^ +ca. 2019 - Create CMake targets for discovered components +2019-06-25 - No longer probe for non-requested components + - Added fallback version.h parsing for components, when + pkgconfig is missing + - Added parsing of libavutil/ffversion.h for FFmpeg_VERSION + - Adopt standard FFmpeg__ variable names + - Switch to full signature for FPHSA, use HANDLE_COMPONENTS + Copyright (c) 2006, Matthias Kretz, Copyright (c) 2008, Alexander Neundorf, Copyright (c) 2011, Michael Jansen, -Copyright (c) 2019, FeRD (Frank Dana) +Copyright (c) 2019-2021, FeRD (Frank Dana) Redistribution and use is allowed according to the terms of the BSD license. For details see the accompanying COPYING-CMAKE-SCRIPTS file. @@ -84,9 +96,9 @@ endif () # Marks the given component as found if both *_LIBRARIES AND *_INCLUDE_DIRS is present. # macro(set_component_found _component ) - if (${_component}_LIBRARIES AND ${_component}_INCLUDE_DIRS) + if (FFmpeg_${_component}_LIBRARIES AND FFmpeg_${_component}_INCLUDE_DIRS) # message(STATUS "FFmpeg - ${_component} found.") - set(${_component}_FOUND TRUE) + set(FFmpeg_${_component}_FOUND TRUE) else () if (NOT FFmpeg_FIND_QUIETLY AND NOT FFMPEG_FIND_QUIETLY) message(STATUS "FFmpeg - ${_component} not found.") @@ -94,6 +106,34 @@ macro(set_component_found _component ) endif () endmacro() +# +### Macro: parse_lib_version +# +# Reads the file '${_pkgconfig}/version.h' in the component's _INCLUDE_DIR, +# and parses #define statements for COMPONENT_VERSION_(MAJOR|MINOR|PATCH) +# into a dotted string ${_component}_VERSION. +# +# Needed if the version is not supplied via pkgconfig's PC_${_component}_VERSION +macro(parse_lib_version _component _libname ) + set(_version_h "${FFmpeg_${_component}_INCLUDE_DIRS}/${_libname}/version.h") + if(EXISTS "${_version_h}") + #message(STATUS "Parsing ${_component} version from ${_version_h}") + string(TOUPPER "${_libname}" _prefix) + set(_parts) + foreach(_lvl MAJOR MINOR MICRO) + file(STRINGS "${_version_h}" _lvl_version + REGEX "^[ \t]*#define[ \t]+${_prefix}_VERSION_${_lvl}[ \t]+[0-9]+[ \t]*$") + string(REGEX REPLACE + "^.*${_prefix}_VERSION_${_lvl}[ \t]+([0-9]+)[ \t]*$" + "\\1" + _lvl_match "${_lvl_version}") + list(APPEND _parts "${_lvl_match}") + endforeach() + list(JOIN _parts "." FFmpeg_${_component}_VERSION) + message(STATUS "Found ${_component} version: ${FFmpeg_${_component}_VERSION}") + endif() +endmacro() + # ### Macro: find_component # @@ -109,9 +149,9 @@ macro(find_component _component _pkgconfig _library _header) if (PKG_CONFIG_FOUND) pkg_check_modules(PC_${_component} ${_pkgconfig}) endif () - endif (NOT WIN32) + endif() - find_path(${_component}_INCLUDE_DIRS ${_header} + find_path(FFmpeg_${_component}_INCLUDE_DIRS ${_header} HINTS /opt/ /opt/include/ @@ -123,7 +163,7 @@ macro(find_component _component _pkgconfig _library _header) ffmpeg ) - find_library(${_component}_LIBRARIES NAMES ${_library} + find_library(FFmpeg_${_component}_LIBRARIES NAMES ${_library} HINTS ${PC_${_component}_LIBDIR} ${PC_${_component}_LIBRARY_DIRS} @@ -132,56 +172,86 @@ macro(find_component _component _pkgconfig _library _header) $ENV{FFMPEGDIR}/bin/ ) - set(${_component}_DEFINITIONS ${PC_${_component}_CFLAGS_OTHER} CACHE STRING "The ${_component} CFLAGS.") - set(${_component}_VERSION ${PC_${_component}_VERSION} CACHE STRING "The ${_component} version number.") + set(FFmpeg_${_component}_DEFINITIONS ${PC_${_component}_CFLAGS_OTHER} CACHE STRING "The ${_component} CFLAGS.") + + # Take version from PkgConfig, or parse from its version.h header + if (PC_${_component}_VERSION) + set(FFmpeg_${_component}_VERSION ${PC_${_component}_VERSION}) + else() + parse_lib_version(${_component} ${_pkgconfig}) + endif() + + set(FFmpeg_${_component}_VERSION ${FFmpeg_${_component}_VERSION} CACHE STRING "The ${_component} version number.") set_component_found(${_component}) mark_as_advanced( - ${_component}_INCLUDE_DIRS - ${_component}_LIBRARIES - ${_component}_DEFINITIONS - ${_component}_VERSION + FFmpeg_${_component}_INCLUDE_DIRS + FFmpeg_${_component}_LIBRARIES + FFmpeg_${_component}_DEFINITIONS + FFmpeg_${_component}_VERSION ) endmacro() +# +### Macro: parse_ff_version +# +# Read the libavutil/ffversion.h file and extract the definition +# for FFMPEG_VERSION, to use as our version string. +macro (parse_ff_version) + set(_header "${FFmpeg_avutil_INCLUDE_DIRS}/libavutil/ffversion.h") + if(EXISTS "${_header}") + #message(STATUS "Parsing ffmpeg version from ${_header}") + file(STRINGS "${_header}" _version_def + REGEX "^#define[ \t]+FFMPEG_VERSION[ \t]+\".*\"[ \t]*$") + string(REGEX REPLACE + "^.*FFMPEG_VERSION[ \t]+\"(.*)\".*$" + "\\1" + FFmpeg_VERSION "${_version_def}") + #message(STATUS "Found FFmpeg version: ${FFmpeg_VERSION}") + endif() +endmacro() -# Check for cached results. If there are skip the costly part. -if (NOT FFmpeg_LIBRARIES) +# Configs for all possible component. +set(avcodec_params libavcodec avcodec libavcodec/avcodec.h) +set(avdevice_params libavdevice avdevice libavdevice/avdevice.h) +set(avformat_params libavformat avformat libavformat/avformat.h) +set(avfilter_params libavfilter avfilter libavfilter/avfilter.h) +set(avutil_params libavutil avutil libavutil/avutil.h) +set(postproc_params libpostproc postproc libpostproc/postprocess.h) +set(swscale_params libswscale swscale libswscale/swscale.h) +set(swresample_params libswresample swresample libswresample/swresample.h) +set(avresample_params libavresample avresample libavresample/avresample.h) - # Check for all possible component. - find_component(avcodec libavcodec avcodec libavcodec/avcodec.h) - find_component(avdevice libavdevice avdevice libavdevice/avdevice.h) - find_component(avformat libavformat avformat libavformat/avformat.h) - find_component(avfilter libavfilter avfilter libavfilter/avfilter.h) - find_component(avutil libavutil avutil libavutil/avutil.h) - find_component(postproc libpostproc postproc libpostproc/postprocess.h) - find_component(swscale libswscale swscale libswscale/swscale.h) - find_component(swresample libswresample swresample libswresample/swresample.h) - find_component(avresample libavresample avresample libavresample/avresample.h) -else() - # Just set the noncached _FOUND vars for the components. - foreach(_component ${FFmpeg_ALL_COMPONENTS}) - set_component_found(${_component}) - endforeach () -endif() - -# Check if the requested components were found and add their stuff to the FFmpeg_* vars. -foreach (_component ${FFmpeg_FIND_COMPONENTS}) +# Gather configs for each requested component +foreach(_component ${FFmpeg_FIND_COMPONENTS}) string(TOLOWER "${_component}" _component) - if (${_component}_FOUND) + # Only probe if not already _FOUND (expensive) + if (NOT FFmpeg_${_component}_FOUND) + find_component(${_component} ${${_component}_params}) + endif() + + # Add the component's configs to the FFmpeg_* variables + if (FFmpeg_${_component}_FOUND) # message(STATUS "Requested component ${_component} present.") - set(FFmpeg_LIBRARIES ${FFmpeg_LIBRARIES} ${${_component}_LIBRARIES}) - set(FFmpeg_DEFINITIONS ${FFmpeg_DEFINITIONS} ${${_component}_DEFINITIONS}) - list(APPEND FFmpeg_INCLUDE_DIRS ${${_component}_INCLUDE_DIRS}) + set(FFmpeg_LIBRARIES ${FFmpeg_LIBRARIES} ${FFmpeg_${_component}_LIBRARIES}) + set(FFmpeg_DEFINITIONS ${FFmpeg_DEFINITIONS} ${FFmpeg_${_component}_DEFINITIONS}) + list(APPEND FFmpeg_INCLUDE_DIRS ${FFmpeg_${_component}_INCLUDE_DIRS}) else () # message(STATUS "Requested component ${_component} missing.") endif () -endforeach () +endforeach() + +# Make sure we've probed for avutil +if (NOT FFmpeg_avutil_FOUND) + find_component(avutil libavutil avutil libavutil/avutil.h) +endif() +# Get the overall FFmpeg version from libavutil/ffversion.h +parse_ff_version() # Build the result lists with duplicates removed, in case of repeated -# invocations. +# invocations or component redundancy. if (FFmpeg_INCLUDE_DIRS) list(REMOVE_DUPLICATES FFmpeg_INCLUDE_DIRS) endif() @@ -196,57 +266,68 @@ endif () set(FFmpeg_INCLUDE_DIRS ${FFmpeg_INCLUDE_DIRS} CACHE STRING "The FFmpeg include directories." FORCE) set(FFmpeg_LIBRARIES ${FFmpeg_LIBRARIES} CACHE STRING "The FFmpeg libraries." FORCE) set(FFmpeg_DEFINITIONS ${FFmpeg_DEFINITIONS} CACHE STRING "The FFmpeg cflags." FORCE) +set(FFmpeg_VERSION ${FFmpeg_VERSION} CACHE STRING "The overall FFmpeg version.") -mark_as_advanced(FFmpeg_INCLUDE_DIRS +mark_as_advanced( + FFmpeg_INCLUDE_DIRS FFmpeg_LIBRARIES - FFmpeg_DEFINITIONS) + FFmpeg_DEFINITIONS + FFmpeg_VERSION +) # Backwards compatibility -foreach(_suffix INCLUDE_DIRS LIBRARIES DEFINITIONS) +foreach(_suffix INCLUDE_DIRS LIBRARIES DEFINITIONS VERSION) get_property(_help CACHE FFmpeg_${_suffix} PROPERTY HELPSTRING) set(FFMPEG_${_suffix} ${FFmpeg_${_suffix}} CACHE STRING "${_help}" FORCE) mark_as_advanced(FFMPEG_${_suffix}) endforeach() foreach(_component ${FFmpeg_ALL_COMPONENTS}) - if(${_component}_FOUND) + if(FFmpeg_${_component}_FOUND) string(TOUPPER "${_component}" _uc_component) - set(${_uc_component}_FOUND TRUE) + set(FFMPEG_${_uc_component}_FOUND TRUE) foreach(_suffix INCLUDE_DIRS LIBRARIES DEFINITIONS VERSION) - get_property(_help CACHE ${_component}_${_suffix} PROPERTY HELPSTRING) - set(${_uc_component}_${_suffix} ${${_component}_${_suffix}} CACHE STRING "${_help}" FORCE) - mark_as_advanced(${_uc_component}_${_suffix}) + get_property(_help CACHE FFmpeg_${_component}_${_suffix} PROPERTY HELPSTRING) + set(FFMPEG_${_uc_component}_${_suffix} ${FFmpeg_${_component}_${_suffix}} CACHE STRING "${_help}" FORCE) + mark_as_advanced(FFMPEG_${_uc_component}_${_suffix}) endforeach() endif() endforeach() # Compile the list of required vars set(_FFmpeg_REQUIRED_VARS FFmpeg_LIBRARIES FFmpeg_INCLUDE_DIRS) -foreach (_component ${FFmpeg_FIND_COMPONENTS}) - list(APPEND _FFmpeg_REQUIRED_VARS - ${_component}_LIBRARIES - ${_component}_INCLUDE_DIRS) -endforeach () +# XXX: HANDLE_COMPONENTS should take care of this, maybe? -FeRD +# foreach (_component ${FFmpeg_FIND_COMPONENTS}) +# list(APPEND _FFmpeg_REQUIRED_VARS +# FFmpeg_${_component}_LIBRARIES +# FFmpeg_${_component}_INCLUDE_DIRS) +# endforeach () # Give a nice error message if some of the required vars are missing. -find_package_handle_standard_args(FFmpeg DEFAULT_MSG ${_FFmpeg_REQUIRED_VARS}) +find_package_handle_standard_args(FFmpeg + REQUIRED_VARS ${_FFmpeg_REQUIRED_VARS} + VERSION_VAR FFmpeg_VERSION + HANDLE_COMPONENTS +) # Export targets for each found component -foreach (_component ${FFmpeg_ALL_COMPONENTS}) +foreach (_component ${FFmpeg_FIND_COMPONENTS}) - if(${_component}_FOUND) - # message(STATUS "Creating IMPORTED target FFmpeg::${_component}") + if(FFmpeg_${_component}_FOUND) + #message(STATUS "Creating IMPORTED target FFmpeg::${_component}") if(NOT TARGET FFmpeg::${_component}) add_library(FFmpeg::${_component} UNKNOWN IMPORTED) set_target_properties(FFmpeg::${_component} PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES "${${_component}_INCLUDE_DIRS}") + INTERFACE_INCLUDE_DIRECTORIES + "${FFmpeg_${_component}_INCLUDE_DIRS}") set_property(TARGET FFmpeg::${_component} APPEND PROPERTY - INTERFACE_COMPILE_DEFINITIONS "${${_component}_DEFINITIONS}") + INTERFACE_COMPILE_DEFINITIONS + "${FFmpeg_${_component}_DEFINITIONS}") set_property(TARGET FFmpeg::${_component} APPEND PROPERTY - IMPORTED_LOCATION "${${_component}_LIBRARIES}") + IMPORTED_LOCATION "${FFmpeg_${_component}_LIBRARIES}") endif() endif() From 8f53e7e922d03fed9d5494ef33167d1a3568a729 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Fri, 25 Jun 2021 11:49:49 -0400 Subject: [PATCH 14/71] CMake: Adapt to FindFFmpeg.cmake changes --- src/CMakeLists.txt | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 46663664..0af366d7 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -288,7 +288,10 @@ mark_as_advanced(QT_VERSION_STR) ################### FFMPEG ##################### # Find FFmpeg libraries (used for video encoding / decoding) -find_package(FFmpeg REQUIRED COMPONENTS avcodec avformat avutil swscale) +find_package(FFmpeg REQUIRED + COMPONENTS avcodec avformat avutil swscale + OPTIONAL_COMPONENTS swresample avresample +) set(all_comps avcodec avformat avutil swscale) if(TARGET FFmpeg::swresample) @@ -302,17 +305,17 @@ foreach(ff_comp IN LISTS all_comps) if(TARGET FFmpeg::${ff_comp}) target_link_libraries(openshot PUBLIC FFmpeg::${ff_comp}) # Keep track of some FFmpeg lib versions, to embed in our version header - if(${ff_comp} IN_LIST version_comps AND ${ff_comp}_VERSION) + if(${ff_comp} IN_LIST version_comps AND FFmpeg_${ff_comp}_VERSION) string(TOUPPER ${ff_comp} v_name) - set(${v_name}_VERSION_STR ${${ff_comp}_VERSION} CACHE STRING "${ff_comp} version used" FORCE) + set(${v_name}_VERSION_STR ${FFmpeg_${ff_comp}_VERSION} CACHE STRING "${ff_comp} version used" FORCE) mark_as_advanced(${v_name}_VERSION_STR) endif() endif() endforeach() # Version check for hardware-acceleration code -if(USE_HW_ACCEL AND avcodec_VERSION) - if(${avcodec_VERSION} VERSION_GREATER 57.107.100) +if(USE_HW_ACCEL AND FFmpeg_avcodec_VERSION) + if(${FFmpeg_avcodec_VERSION} VERSION_GREATER "57.107.100") set(HAVE_HW_ACCEL TRUE) endif() endif() @@ -537,4 +540,3 @@ endif() set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Jonathan Thomas") #required include(CPack) - From e2b685de4713bd35fac0735a2ff3a604e66cf1c3 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Fri, 25 Jun 2021 12:37:58 -0400 Subject: [PATCH 15/71] FindFFmpeg: Don't report missing components find_package_handle_standard_args() will take care of that --- cmake/Modules/FindFFmpeg.cmake | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmake/Modules/FindFFmpeg.cmake b/cmake/Modules/FindFFmpeg.cmake index 64d7fff4..4f5093d7 100644 --- a/cmake/Modules/FindFFmpeg.cmake +++ b/cmake/Modules/FindFFmpeg.cmake @@ -99,10 +99,10 @@ macro(set_component_found _component ) if (FFmpeg_${_component}_LIBRARIES AND FFmpeg_${_component}_INCLUDE_DIRS) # message(STATUS "FFmpeg - ${_component} found.") set(FFmpeg_${_component}_FOUND TRUE) - else () - if (NOT FFmpeg_FIND_QUIETLY AND NOT FFMPEG_FIND_QUIETLY) - message(STATUS "FFmpeg - ${_component} not found.") - endif () + #else () + # if (NOT FFmpeg_FIND_QUIETLY AND NOT FFMPEG_FIND_QUIETLY) + # message(STATUS "FFmpeg - ${_component} not found.") + # endif () endif () endmacro() From bdf6e9f854359ca13473ce67ef570a413d0c64a8 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Fri, 25 Jun 2021 15:57:52 -0500 Subject: [PATCH 16/71] Fix invalid starting PTS value, preventing blank 1st frames on some thumbnailing and video players. --- src/FFmpegWriter.cpp | 109 ++++++++++++++++++++++--------------------- src/QtPlayer.cpp | 2 +- 2 files changed, 56 insertions(+), 55 deletions(-) diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp index 4cfc4633..009d498e 100644 --- a/src/FFmpegWriter.cpp +++ b/src/FFmpegWriter.cpp @@ -92,6 +92,10 @@ FFmpegWriter::FFmpegWriter(const std::string& path) : info.has_audio = false; info.has_video = false; + // Init timestamps + write_video_count = 0; + write_audio_count = 0; + // Initialize FFMpeg, and register all formats and codecs AV_REGISTER_ALL @@ -910,60 +914,63 @@ void FFmpegWriter::flush_encoders() { } // FLUSH AUDIO ENCODER - if (info.has_audio) - for (;;) { + if (info.has_audio) { + for (;;) { + AVPacket pkt; + av_init_packet(&pkt); + pkt.data = NULL; + pkt.size = 0; + pkt.pts = pkt.dts = write_audio_count; - // Increment PTS (in samples and scaled to the codec's timebase) - // for some reason, it requires me to multiply channels X 2 - write_audio_count += av_rescale_q(audio_input_position / (audio_codec_ctx->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)), av_make_q(1, info.sample_rate), audio_codec_ctx->time_base); - - AVPacket pkt; - av_init_packet(&pkt); - pkt.data = NULL; - pkt.size = 0; - pkt.pts = pkt.dts = write_audio_count; - - /* encode the image */ - int error_code = 0; - int got_packet = 0; + /* encode the image */ + int error_code = 0; + int got_packet = 0; #if IS_FFMPEG_3_2 - error_code = avcodec_send_frame(audio_codec_ctx, NULL); + error_code = avcodec_send_frame(audio_codec_ctx, NULL); #else - error_code = avcodec_encode_audio2(audio_codec_ctx, &pkt, NULL, &got_packet); + error_code = avcodec_encode_audio2(audio_codec_ctx, &pkt, NULL, &got_packet); #endif - if (error_code < 0) { - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (std::string)av_err2str(error_code) + "]", "error_code", error_code); - } - if (!got_packet) { - break; - } + if (error_code < 0) { + ZmqLogger::Instance()->AppendDebugMethod( + "FFmpegWriter::flush_encoders ERROR [" + (std::string) av_err2str(error_code) + "]", + "error_code", error_code); + } + if (!got_packet) { + break; + } - // Since the PTS can change during encoding, set the value again. This seems like a huge hack, - // but it fixes lots of PTS related issues when I do this. - pkt.pts = pkt.dts = write_audio_count; + // Since the PTS can change during encoding, set the value again. This seems like a huge hack, + // but it fixes lots of PTS related issues when I do this. + pkt.pts = pkt.dts = write_audio_count; - // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase) - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base); - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base); - if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base); + // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase) + if (pkt.pts != AV_NOPTS_VALUE) + pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base); + if (pkt.dts != AV_NOPTS_VALUE) + pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base); + if (pkt.duration > 0) + pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base); - // set stream - pkt.stream_index = audio_st->index; - pkt.flags |= AV_PKT_FLAG_KEY; + // set stream + pkt.stream_index = audio_st->index; + pkt.flags |= AV_PKT_FLAG_KEY; - // Write packet - error_code = av_interleaved_write_frame(oc, &pkt); - if (error_code < 0) { - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (std::string)av_err2str(error_code) + "]", "error_code", error_code); - } + // Write packet + error_code = av_interleaved_write_frame(oc, &pkt); + if (error_code < 0) { + ZmqLogger::Instance()->AppendDebugMethod( + "FFmpegWriter::flush_encoders ERROR [" + (std::string) av_err2str(error_code) + "]", + "error_code", error_code); + } - // deallocate memory for packet - AV_FREE_PACKET(&pkt); - } + // deallocate memory for packet + AV_FREE_PACKET(&pkt); + } + // Increment PTS (in samples and scaled to the codec's timebase) + // for some reason, it requires me to multiply channels X 2 + write_audio_count += av_rescale_q(audio_input_position / (audio_codec_ctx->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)), av_make_q(1, info.sample_rate), audio_codec_ctx->time_base); + } } @@ -2042,8 +2049,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra pkt.data = (uint8_t *) frame_final->data; pkt.size = sizeof(AVPicture); - // Increment PTS (in frames and scaled to the codec's timebase) - write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base); + // Set PTS (in frames and scaled to the codec's timebase) pkt.pts = write_video_count; /* write the compressed frame in the media file */ @@ -2065,9 +2071,6 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra pkt.size = 0; pkt.pts = pkt.dts = AV_NOPTS_VALUE; - // Increment PTS (in frames and scaled to the codec's timebase) - write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base); - // Assign the initial AVFrame PTS from the frame counter frame_final->pts = write_video_count; #if USE_HW_ACCEL @@ -2142,11 +2145,6 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra /* if zero size, it means the image was buffered */ if (error_code == 0 && got_packet_ptr) { - - // Since the PTS can change during encoding, set the value again. This seems like a huge hack, - // but it fixes lots of PTS related issues when I do this. - //pkt.pts = pkt.dts = write_video_count; - // set the timestamp if (pkt.pts != AV_NOPTS_VALUE) pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base); @@ -2176,6 +2174,9 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra #endif // USE_HW_ACCEL } + // Increment PTS (in frames and scaled to the codec's timebase) + write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base); + // Success return true; } diff --git a/src/QtPlayer.cpp b/src/QtPlayer.cpp index c43cdf80..f3697f71 100644 --- a/src/QtPlayer.cpp +++ b/src/QtPlayer.cpp @@ -119,7 +119,7 @@ namespace openshot void QtPlayer::Play() { - // Set mode to playing, and speed to normal + // Set mode to playing, and speed to normal mode = PLAYBACK_PLAY; Speed(1); From 64e578241a75528b0e81c3774682016a53592fdf Mon Sep 17 00:00:00 2001 From: Jackson Date: Fri, 25 Jun 2021 18:54:27 -0500 Subject: [PATCH 17/71] Reduced, but didn't eliminate empty time at the beginning --- src/FFmpegWriter.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp index 009d498e..32fd346f 100644 --- a/src/FFmpegWriter.cpp +++ b/src/FFmpegWriter.cpp @@ -1836,10 +1836,6 @@ void FFmpegWriter::write_audio_packets(bool is_final) { audio_encoder_buffer_size, 0); } - // Increment PTS (in samples) - write_audio_count += FFMIN(audio_input_frame_size, audio_input_position); - frame_final->pts = write_audio_count; // Set the AVFrame's PTS - // Init the packet AVPacket pkt; av_init_packet(&pkt); @@ -1911,6 +1907,10 @@ void FFmpegWriter::write_audio_packets(bool is_final) { ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code); } + // Increment PTS (in samples) + write_audio_count += FFMIN(audio_input_frame_size, audio_input_position); + frame_final->pts = write_audio_count; // Set the AVFrame's PTS + // deallocate AVFrame av_freep(&(frame_final->data[0])); AV_FREE_FRAME(&frame_final); From 68f03b5b6cb6e1f10dfa66c094ed776ff5f832f4 Mon Sep 17 00:00:00 2001 From: Frank Dana Date: Sat, 26 Jun 2021 02:18:16 -0700 Subject: [PATCH 18/71] FFmpeg: Move av/swresample decision into CMake (#693) * FFmpeg: Move av/swresample decision into CMake By making the determination as to which resampling library to use in the `src/CMakeLists.txt` code, only that library needs to be linked with libopenshot (and not the unused one), plus the choice can be displayed in the FeatureSummary at configure time. * src/CMakeLists: Fix FFmpeg hwaccel version check --- src/CMakeLists.txt | 23 +- src/FFmpegUtilities.h | 561 +++++++++++++++++++++------------------ src/OpenShotVersion.h.in | 1 + 3 files changed, 315 insertions(+), 270 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 0af366d7..dc61e2e1 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -294,13 +294,18 @@ find_package(FFmpeg REQUIRED ) set(all_comps avcodec avformat avutil swscale) -if(TARGET FFmpeg::swresample) - list(APPEND all_comps swresample) -else() - list(APPEND all_comps avresample) -endif() set(version_comps avcodec avformat avutil) +# Pick a resampler. Prefer swresample if possible +if(TARGET FFmpeg::swresample AND ${FFmpeg_avformat_VERSION} VERSION_GREATER "57.0.0") + set(resample_lib swresample) + set(USE_SW TRUE) +else() + set(resample_lib avresample) + set(USE_SW FALSE) +endif() +list(APPEND all_comps ${resample_lib}) + foreach(ff_comp IN LISTS all_comps) if(TARGET FFmpeg::${ff_comp}) target_link_libraries(openshot PUBLIC FFmpeg::${ff_comp}) @@ -313,9 +318,15 @@ foreach(ff_comp IN LISTS all_comps) endif() endforeach() +# Indicate which resampler we linked with, and set a config header flag +add_feature_info("FFmpeg ${resample_lib}" TRUE "Audio resampling uses ${resample_lib}") +# Set the appropriate flag in OpenShotVersion.h +set(FFMPEG_USE_SWRESAMPLE ${USE_SW} CACHE BOOL "libswresample used for audio resampling" FORCE) +mark_as_advanced(FFMPEG_USE_SWRESAMPLE) + # Version check for hardware-acceleration code if(USE_HW_ACCEL AND FFmpeg_avcodec_VERSION) - if(${FFmpeg_avcodec_VERSION} VERSION_GREATER "57.107.100") + if(${FFmpeg_avcodec_VERSION} VERSION_GREATER "57.106") set(HAVE_HW_ACCEL TRUE) endif() endif() diff --git a/src/FFmpegUtilities.h b/src/FFmpegUtilities.h index fe46a212..d8b1587f 100644 --- a/src/FFmpegUtilities.h +++ b/src/FFmpegUtilities.h @@ -31,269 +31,302 @@ #ifndef OPENSHOT_FFMPEG_UTILITIES_H #define OPENSHOT_FFMPEG_UTILITIES_H - // Required for libavformat to build on Windows - #ifndef INT64_C - #define INT64_C(c) (c ## LL) - #define UINT64_C(c) (c ## ULL) - #endif - - #ifndef IS_FFMPEG_3_2 - #define IS_FFMPEG_3_2 (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 64, 101)) - #endif - - #ifndef USE_HW_ACCEL - #define USE_HW_ACCEL (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 107, 100)) - #endif - - // Include the FFmpeg headers - extern "C" { - #include - #include - #if (LIBAVFORMAT_VERSION_MAJOR >= 57) - #include //PM - #endif - #include - // Change this to the first version swrescale works - #if (LIBAVFORMAT_VERSION_MAJOR >= 57) - #define USE_SW - #endif - #ifdef USE_SW - #include - #else - #include - #endif - #include - #include - #include - - // libavutil changed folders at some point - #if LIBAVFORMAT_VERSION_MAJOR >= 53 - #include - #else - #include - #endif - - // channel header refactored - #if LIBAVFORMAT_VERSION_MAJOR >= 54 - #include - #endif - - #if IS_FFMPEG_3_2 - #include "libavutil/imgutils.h" - #endif - } - - // This was removed from newer versions of FFmpeg (but still used in libopenshot) - #ifndef AVCODEC_MAX_AUDIO_FRAME_SIZE - #define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio - #endif - #ifndef AV_ERROR_MAX_STRING_SIZE - #define AV_ERROR_MAX_STRING_SIZE 64 - #endif - #ifndef AUDIO_PACKET_ENCODING_SIZE - #define AUDIO_PACKET_ENCODING_SIZE 768000 // 48khz * S16 (2 bytes) * max channels (8) - #endif - - // This wraps an unsafe C macro to be C++ compatible function - inline static const std::string av_make_error_string(int errnum) - { - char errbuf[AV_ERROR_MAX_STRING_SIZE]; - av_strerror(errnum, errbuf, AV_ERROR_MAX_STRING_SIZE); - return (std::string)errbuf; - } - - // Redefine the C macro to use our new C++ function - #undef av_err2str - #define av_err2str(errnum) av_make_error_string(errnum).c_str() - - // Define this for compatibility - #ifndef PixelFormat - #define PixelFormat AVPixelFormat - #endif - #ifndef PIX_FMT_RGBA - #define PIX_FMT_RGBA AV_PIX_FMT_RGBA - #endif - #ifndef PIX_FMT_NONE - #define PIX_FMT_NONE AV_PIX_FMT_NONE - #endif - #ifndef PIX_FMT_RGB24 - #define PIX_FMT_RGB24 AV_PIX_FMT_RGB24 - #endif - #ifndef PIX_FMT_YUV420P - #define PIX_FMT_YUV420P AV_PIX_FMT_YUV420P - #endif - #ifndef PIX_FMT_YUV444P - #define PIX_FMT_YUV444P AV_PIX_FMT_YUV444P - #endif - - // Does ffmpeg pixel format contain an alpha channel? - inline static const bool ffmpeg_has_alpha(PixelFormat pix_fmt) { - const AVPixFmtDescriptor *fmt_desc = av_pix_fmt_desc_get(pix_fmt); - return bool(fmt_desc->flags & AV_PIX_FMT_FLAG_ALPHA); - } - - // FFmpeg's libavutil/common.h defines an RSHIFT incompatible with Ruby's - // definition in ruby/config.h, so we move it to FF_RSHIFT - #ifdef RSHIFT - #define FF_RSHIFT(a, b) RSHIFT(a, b) - #undef RSHIFT - #endif - - #ifdef USE_SW - #define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \ - swr_convert(ctx, out, out_count, (const uint8_t **)in, in_count) - #define SWR_ALLOC() swr_alloc() - #define SWR_CLOSE(ctx) {} - #define SWR_FREE(ctx) swr_free(ctx) - #define SWR_INIT(ctx) swr_init(ctx) - #define SWRCONTEXT SwrContext - #else - #define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \ - avresample_convert(ctx, out, linesize, out_count, (uint8_t **)in, linesize2, in_count) - #define SWR_ALLOC() avresample_alloc_context() - #define SWR_CLOSE(ctx) avresample_close(ctx) - #define SWR_FREE(ctx) avresample_free(ctx) - #define SWR_INIT(ctx) avresample_open(ctx) - #define SWRCONTEXT AVAudioResampleContext - #endif - - - #if (LIBAVFORMAT_VERSION_MAJOR >= 58) - #define AV_REGISTER_ALL - #define AVCODEC_REGISTER_ALL - #define AV_FILENAME url - #define AV_SET_FILENAME(oc, f) oc->AV_FILENAME = av_strdup(f) - #define MY_INPUT_BUFFER_PADDING_SIZE AV_INPUT_BUFFER_PADDING_SIZE - #define AV_ALLOCATE_FRAME() av_frame_alloc() - #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1) - #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame) - #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame) - #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet) - #define AV_FREE_CONTEXT(av_context) avcodec_free_context(&av_context) - #define AV_GET_CODEC_TYPE(av_stream) av_stream->codecpar->codec_type - #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codecpar->codec_id - #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) \ - ({ AVCodecContext *context = avcodec_alloc_context3(av_codec); \ - avcodec_parameters_to_context(context, av_stream->codecpar); \ - context; }) - #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_codec; - #define AV_GET_CODEC_FROM_STREAM(av_stream,codec_in) - #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_stream->codecpar - #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) (AVPixelFormat) av_stream->codecpar->format - #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_stream->codecpar->format - #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) av_image_get_buffer_size(pix_fmt, width, height, 1) - #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) av_image_fill_arrays(av_frame->data, av_frame->linesize, buffer, pix_fmt, width, height, 1) - #define AV_OUTPUT_CONTEXT(output_context, path) avformat_alloc_output_context2( output_context, NULL, NULL, path) - #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0) - #define AV_OPTION_SET( av_stream, priv_data, name, value, avcodec) av_opt_set(priv_data, name, value, 0); avcodec_parameters_from_context(av_stream->codecpar, avcodec); - #define AV_FORMAT_NEW_STREAM(oc, st_codec_ctx, av_codec, av_st) av_st = avformat_new_stream(oc, NULL);\ - if (!av_st) \ - throw OutOfMemory("Could not allocate memory for the video stream.", path); \ - c = avcodec_alloc_context3(av_codec); \ - st_codec_ctx = c; \ - av_st->codecpar->codec_id = av_codec->id; - #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec_ctx) avcodec_parameters_from_context(av_stream->codecpar, av_codec_ctx); - #elif IS_FFMPEG_3_2 - #define AV_REGISTER_ALL av_register_all(); - #define AVCODEC_REGISTER_ALL avcodec_register_all(); - #define AV_FILENAME filename - #define AV_SET_FILENAME(oc, f) snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", f) - #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE - #define AV_ALLOCATE_FRAME() av_frame_alloc() - #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1) - #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame) - #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame) - #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet) - #define AV_FREE_CONTEXT(av_context) avcodec_free_context(&av_context) - #define AV_GET_CODEC_TYPE(av_stream) av_stream->codecpar->codec_type - #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codecpar->codec_id - #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) \ - ({ AVCodecContext *context = avcodec_alloc_context3(av_codec); \ - avcodec_parameters_to_context(context, av_stream->codecpar); \ - context; }) - #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_codec; - #define AV_GET_CODEC_FROM_STREAM(av_stream,codec_in) - #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_stream->codecpar - #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) (AVPixelFormat) av_stream->codecpar->format - #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_stream->codecpar->format - #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) av_image_get_buffer_size(pix_fmt, width, height, 1) - #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) av_image_fill_arrays(av_frame->data, av_frame->linesize, buffer, pix_fmt, width, height, 1) - #define AV_OUTPUT_CONTEXT(output_context, path) avformat_alloc_output_context2( output_context, NULL, NULL, path) - #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0) - #define AV_OPTION_SET( av_stream, priv_data, name, value, avcodec) av_opt_set(priv_data, name, value, 0); avcodec_parameters_from_context(av_stream->codecpar, avcodec); - #define AV_FORMAT_NEW_STREAM(oc, st_codec, av_codec, av_st) av_st = avformat_new_stream(oc, NULL);\ - if (!av_st) \ - throw OutOfMemory("Could not allocate memory for the video stream.", path); \ - _Pragma ("GCC diagnostic push"); \ - _Pragma ("GCC diagnostic ignored \"-Wdeprecated-declarations\""); \ - avcodec_get_context_defaults3(av_st->codec, av_codec); \ - c = av_st->codec; \ - _Pragma ("GCC diagnostic pop"); \ - st_codec = c; - #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) avcodec_parameters_from_context(av_stream->codecpar, av_codec); - #elif LIBAVFORMAT_VERSION_MAJOR >= 55 - #define AV_REGISTER_ALL av_register_all(); - #define AVCODEC_REGISTER_ALL avcodec_register_all(); - #define AV_FILENAME filename - #define AV_SET_FILENAME(oc, f) snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", f) - #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE - #define AV_ALLOCATE_FRAME() av_frame_alloc() - #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height) - #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame) - #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame) - #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet) - #define AV_FREE_CONTEXT(av_context) avcodec_close(av_context) - #define AV_GET_CODEC_TYPE(av_stream) av_stream->codec->codec_type - #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codec->codec_id - #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) av_stream->codec - #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_stream->codec - #define AV_GET_CODEC_FROM_STREAM(av_stream, codec_in) codec_in = av_stream->codec; - #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_context - #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) av_context->pix_fmt - #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_context->sample_fmt - #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) avpicture_get_size(pix_fmt, width, height) - #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) avpicture_fill((AVPicture *) av_frame, buffer, pix_fmt, width, height) - #define AV_OUTPUT_CONTEXT(output_context, path) oc = avformat_alloc_context() - #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0) - #define AV_OPTION_SET(av_stream, priv_data, name, value, avcodec) av_opt_set (priv_data, name, value, 0) - #define AV_FORMAT_NEW_STREAM( oc, av_context, av_codec, av_st) av_st = avformat_new_stream(oc, av_codec); \ - if (!av_st) \ - throw OutOfMemory("Could not allocate memory for the video stream.", path); \ - avcodec_get_context_defaults3(av_st->codec, av_codec); \ - c = av_st->codec; - #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) - #else - #define AV_REGISTER_ALL av_register_all(); - #define AVCODEC_REGISTER_ALL avcodec_register_all(); - #define AV_FILENAME filename - #define AV_SET_FILENAME(oc, f) snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", f) - #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE - #define AV_ALLOCATE_FRAME() avcodec_alloc_frame() - #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height) - #define AV_RESET_FRAME(av_frame) avcodec_get_frame_defaults(av_frame) - #define AV_FREE_FRAME(av_frame) avcodec_free_frame(av_frame) - #define AV_FREE_PACKET(av_packet) av_free_packet(av_packet) - #define AV_FREE_CONTEXT(av_context) avcodec_close(av_context) - #define AV_GET_CODEC_TYPE(av_stream) av_stream->codec->codec_type - #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codec->codec_id - #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) av_stream->codec - #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_stream->codec - #define AV_GET_CODEC_FROM_STREAM(av_stream, codec_in ) codec_in = av_stream->codec; - #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_context - #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) av_context->pix_fmt - #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_context->sample_fmt - #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) avpicture_get_size(pix_fmt, width, height) - #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) avpicture_fill((AVPicture *) av_frame, buffer, pix_fmt, width, height) - #define AV_OUTPUT_CONTEXT(output_context, path) oc = avformat_alloc_context() - #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0) - #define AV_OPTION_SET(av_stream, priv_data, name, value, avcodec) av_opt_set (priv_data, name, value, 0) - #define AV_FORMAT_NEW_STREAM( oc, av_context, av_codec, av_st) av_st = avformat_new_stream(oc, av_codec); \ - if (!av_st) \ - throw OutOfMemory("Could not allocate memory for the video stream.", path); \ - avcodec_get_context_defaults3(av_st->codec, av_codec); \ - c = av_st->codec; - #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) - #endif - +#include "OpenShotVersion.h" // For FFMPEG_USE_SWRESAMPLE +// Required for libavformat to build on Windows +#ifndef INT64_C +#define INT64_C(c) (c ## LL) +#define UINT64_C(c) (c ## ULL) #endif + +#ifndef IS_FFMPEG_3_2 +#define IS_FFMPEG_3_2 (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 64, 101)) +#endif + +#ifndef USE_HW_ACCEL +#define USE_HW_ACCEL (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 107, 100)) +#endif + +#ifndef USE_SW +#define USE_SW FFMPEG_USE_SWRESAMPLE +#endif + +// Include the FFmpeg headers +extern "C" { + #include + #include + +#if (LIBAVFORMAT_VERSION_MAJOR >= 57) + #include //PM +#endif + #include + +#if USE_SW + #include +#else + #include +#endif + + #include + #include + #include + + // libavutil changed folders at some point +#if LIBAVFORMAT_VERSION_MAJOR >= 53 + #include +#else + #include +#endif + + // channel header refactored +#if LIBAVFORMAT_VERSION_MAJOR >= 54 + #include +#endif + +#if IS_FFMPEG_3_2 + #include "libavutil/imgutils.h" +#endif +} + +// This was removed from newer versions of FFmpeg (but still used in libopenshot) +#ifndef AVCODEC_MAX_AUDIO_FRAME_SIZE + // 1 second of 48khz 32bit audio + #define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 +#endif +#ifndef AV_ERROR_MAX_STRING_SIZE + #define AV_ERROR_MAX_STRING_SIZE 64 +#endif +#ifndef AUDIO_PACKET_ENCODING_SIZE + // 48khz * S16 (2 bytes) * max channels (8) + #define AUDIO_PACKET_ENCODING_SIZE 768000 +#endif + +// This wraps an unsafe C macro to be C++ compatible function +inline static const std::string av_make_error_string(int errnum) +{ + char errbuf[AV_ERROR_MAX_STRING_SIZE]; + av_strerror(errnum, errbuf, AV_ERROR_MAX_STRING_SIZE); + return (std::string)errbuf; +} + +// Redefine the C macro to use our new C++ function +#undef av_err2str +#define av_err2str(errnum) av_make_error_string(errnum).c_str() + +// Define this for compatibility +#ifndef PixelFormat + #define PixelFormat AVPixelFormat +#endif +#ifndef PIX_FMT_RGBA + #define PIX_FMT_RGBA AV_PIX_FMT_RGBA +#endif +#ifndef PIX_FMT_NONE + #define PIX_FMT_NONE AV_PIX_FMT_NONE +#endif +#ifndef PIX_FMT_RGB24 + #define PIX_FMT_RGB24 AV_PIX_FMT_RGB24 +#endif +#ifndef PIX_FMT_YUV420P + #define PIX_FMT_YUV420P AV_PIX_FMT_YUV420P +#endif +#ifndef PIX_FMT_YUV444P + #define PIX_FMT_YUV444P AV_PIX_FMT_YUV444P +#endif + +// Does ffmpeg pixel format contain an alpha channel? +inline static const bool ffmpeg_has_alpha(PixelFormat pix_fmt) { + const AVPixFmtDescriptor *fmt_desc = av_pix_fmt_desc_get(pix_fmt); + return bool(fmt_desc->flags & AV_PIX_FMT_FLAG_ALPHA); +} + +// FFmpeg's libavutil/common.h defines an RSHIFT incompatible with Ruby's +// definition in ruby/config.h, so we move it to FF_RSHIFT +#ifdef RSHIFT + #define FF_RSHIFT(a, b) RSHIFT(a, b) + #undef RSHIFT +#endif + +// libswresample/libavresample API switching +#if USE_SW + #define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \ + swr_convert(ctx, out, out_count, (const uint8_t **)in, in_count) + #define SWR_ALLOC() swr_alloc() + #define SWR_CLOSE(ctx) {} + #define SWR_FREE(ctx) swr_free(ctx) + #define SWR_INIT(ctx) swr_init(ctx) + #define SWRCONTEXT SwrContext + +#else + #define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \ + avresample_convert(ctx, out, linesize, out_count, (uint8_t **)in, linesize2, in_count) + #define SWR_ALLOC() avresample_alloc_context() + #define SWR_CLOSE(ctx) avresample_close(ctx) + #define SWR_FREE(ctx) avresample_free(ctx) + #define SWR_INIT(ctx) avresample_open(ctx) + #define SWRCONTEXT AVAudioResampleContext +#endif + + +#if (LIBAVFORMAT_VERSION_MAJOR >= 58) + #define AV_REGISTER_ALL + #define AVCODEC_REGISTER_ALL + #define AV_FILENAME url + #define AV_SET_FILENAME(oc, f) oc->AV_FILENAME = av_strdup(f) + #define MY_INPUT_BUFFER_PADDING_SIZE AV_INPUT_BUFFER_PADDING_SIZE + #define AV_ALLOCATE_FRAME() av_frame_alloc() + #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) \ + av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1) + #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame) + #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame) + #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet) + #define AV_FREE_CONTEXT(av_context) avcodec_free_context(&av_context) + #define AV_GET_CODEC_TYPE(av_stream) av_stream->codecpar->codec_type + #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codecpar->codec_id + #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) \ + ({ AVCodecContext *context = avcodec_alloc_context3(av_codec); \ + avcodec_parameters_to_context(context, av_stream->codecpar); \ + context; }) + #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_codec; + #define AV_GET_CODEC_FROM_STREAM(av_stream,codec_in) + #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_stream->codecpar + #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) (AVPixelFormat) av_stream->codecpar->format + #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_stream->codecpar->format + #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) \ + av_image_get_buffer_size(pix_fmt, width, height, 1) + #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) \ + av_image_fill_arrays(av_frame->data, av_frame->linesize, buffer, pix_fmt, width, height, 1) + #define AV_OUTPUT_CONTEXT(output_context, path) avformat_alloc_output_context2( output_context, NULL, NULL, path) + #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0) + #define AV_OPTION_SET( av_stream, priv_data, name, value, avcodec) \ + av_opt_set(priv_data, name, value, 0); \ + avcodec_parameters_from_context(av_stream->codecpar, avcodec); + #define AV_FORMAT_NEW_STREAM(oc, st_codec_ctx, av_codec, av_st) \ + av_st = avformat_new_stream(oc, NULL);\ + if (!av_st) \ + throw OutOfMemory("Could not allocate memory for the video stream.", path); \ + c = avcodec_alloc_context3(av_codec); \ + st_codec_ctx = c; \ + av_st->codecpar->codec_id = av_codec->id; + #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec_ctx) \ + avcodec_parameters_from_context(av_stream->codecpar, av_codec_ctx); + +#elif IS_FFMPEG_3_2 + #define AV_REGISTER_ALL av_register_all(); + #define AVCODEC_REGISTER_ALL avcodec_register_all(); + #define AV_FILENAME filename + #define AV_SET_FILENAME(oc, f) snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", f) + #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE + #define AV_ALLOCATE_FRAME() av_frame_alloc() + #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) \ + av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1) + #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame) + #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame) + #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet) + #define AV_FREE_CONTEXT(av_context) avcodec_free_context(&av_context) + #define AV_GET_CODEC_TYPE(av_stream) av_stream->codecpar->codec_type + #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codecpar->codec_id + #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) \ + ({ AVCodecContext *context = avcodec_alloc_context3(av_codec); \ + avcodec_parameters_to_context(context, av_stream->codecpar); \ + context; }) + #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_codec; + #define AV_GET_CODEC_FROM_STREAM(av_stream,codec_in) + #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_stream->codecpar + #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) \ + (AVPixelFormat) av_stream->codecpar->format + #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_stream->codecpar->format + #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) av_image_get_buffer_size(pix_fmt, width, height, 1) + #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) \ + av_image_fill_arrays(av_frame->data, av_frame->linesize, buffer, pix_fmt, width, height, 1) + #define AV_OUTPUT_CONTEXT(output_context, path) \ + avformat_alloc_output_context2( output_context, NULL, NULL, path) + #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0) + #define AV_OPTION_SET( av_stream, priv_data, name, value, avcodec) \ + av_opt_set(priv_data, name, value, 0); \ + avcodec_parameters_from_context(av_stream->codecpar, avcodec); + #define AV_FORMAT_NEW_STREAM(oc, st_codec, av_codec, av_st) \ + av_st = avformat_new_stream(oc, NULL);\ + if (!av_st) \ + throw OutOfMemory("Could not allocate memory for the video stream.", path); \ + _Pragma ("GCC diagnostic push"); \ + _Pragma ("GCC diagnostic ignored \"-Wdeprecated-declarations\""); \ + avcodec_get_context_defaults3(av_st->codec, av_codec); \ + c = av_st->codec; \ + _Pragma ("GCC diagnostic pop"); \ + st_codec = c; + #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) \ + avcodec_parameters_from_context(av_stream->codecpar, av_codec); + +#elif LIBAVFORMAT_VERSION_MAJOR >= 55 + #define AV_REGISTER_ALL av_register_all(); + #define AVCODEC_REGISTER_ALL avcodec_register_all(); + #define AV_FILENAME filename + #define AV_SET_FILENAME(oc, f) snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", f) + #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE + #define AV_ALLOCATE_FRAME() av_frame_alloc() + #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) \ + avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height) + #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame) + #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame) + #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet) + #define AV_FREE_CONTEXT(av_context) avcodec_close(av_context) + #define AV_GET_CODEC_TYPE(av_stream) av_stream->codec->codec_type + #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codec->codec_id + #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) av_stream->codec + #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_stream->codec + #define AV_GET_CODEC_FROM_STREAM(av_stream, codec_in) codec_in = av_stream->codec; + #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_context + #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) av_context->pix_fmt + #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_context->sample_fmt + #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) avpicture_get_size(pix_fmt, width, height) + #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) \ + avpicture_fill((AVPicture *) av_frame, buffer, pix_fmt, width, height) + #define AV_OUTPUT_CONTEXT(output_context, path) oc = avformat_alloc_context() + #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0) + #define AV_OPTION_SET(av_stream, priv_data, name, value, avcodec) av_opt_set (priv_data, name, value, 0) + #define AV_FORMAT_NEW_STREAM( oc, av_context, av_codec, av_st) \ + av_st = avformat_new_stream(oc, av_codec); \ + if (!av_st) \ + throw OutOfMemory("Could not allocate memory for the video stream.", path); \ + avcodec_get_context_defaults3(av_st->codec, av_codec); \ + c = av_st->codec; + #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) + +#else + #define AV_REGISTER_ALL av_register_all(); + #define AVCODEC_REGISTER_ALL avcodec_register_all(); + #define AV_FILENAME filename + #define AV_SET_FILENAME(oc, f) snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", f) + #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE + #define AV_ALLOCATE_FRAME() avcodec_alloc_frame() + #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) \ + avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height) + #define AV_RESET_FRAME(av_frame) avcodec_get_frame_defaults(av_frame) + #define AV_FREE_FRAME(av_frame) avcodec_free_frame(av_frame) + #define AV_FREE_PACKET(av_packet) av_free_packet(av_packet) + #define AV_FREE_CONTEXT(av_context) avcodec_close(av_context) + #define AV_GET_CODEC_TYPE(av_stream) av_stream->codec->codec_type + #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codec->codec_id + #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) av_stream->codec + #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_stream->codec + #define AV_GET_CODEC_FROM_STREAM(av_stream, codec_in ) codec_in = av_stream->codec; + #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_context + #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) av_context->pix_fmt + #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_context->sample_fmt + #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) avpicture_get_size(pix_fmt, width, height) + #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) \ + avpicture_fill((AVPicture *) av_frame, buffer, pix_fmt, width, height) + #define AV_OUTPUT_CONTEXT(output_context, path) oc = avformat_alloc_context() + #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0) + #define AV_OPTION_SET(av_stream, priv_data, name, value, avcodec) av_opt_set (priv_data, name, value, 0) + #define AV_FORMAT_NEW_STREAM( oc, av_context, av_codec, av_st) \ + av_st = avformat_new_stream(oc, av_codec); \ + if (!av_st) \ + throw OutOfMemory("Could not allocate memory for the video stream.", path); \ + avcodec_get_context_defaults3(av_st->codec, av_codec); \ + c = av_st->codec; + #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) +#endif + + +#endif // OPENSHOT_FFMPEG_UTILITIES_H diff --git a/src/OpenShotVersion.h.in b/src/OpenShotVersion.h.in index 197242d3..b6f41890 100644 --- a/src/OpenShotVersion.h.in +++ b/src/OpenShotVersion.h.in @@ -51,6 +51,7 @@ #cmakedefine01 HAVE_IMAGEMAGICK #cmakedefine01 HAVE_RESVG #cmakedefine01 HAVE_OPENCV +#cmakedefine01 FFMPEG_USE_SWRESAMPLE #cmakedefine01 APPIMAGE_BUILD #include From 1cec1843c628f2f9895609e4dc8154f1fbcde41c Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Sat, 26 Jun 2021 15:48:16 -0500 Subject: [PATCH 19/71] We already init values in constructor, and moving incrementing to later in function. --- src/FFmpegWriter.cpp | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp index 32fd346f..52d0d8a2 100644 --- a/src/FFmpegWriter.cpp +++ b/src/FFmpegWriter.cpp @@ -92,10 +92,6 @@ FFmpegWriter::FFmpegWriter(const std::string& path) : info.has_audio = false; info.has_video = false; - // Init timestamps - write_video_count = 0; - write_audio_count = 0; - // Initialize FFMpeg, and register all formats and codecs AV_REGISTER_ALL @@ -1836,6 +1832,9 @@ void FFmpegWriter::write_audio_packets(bool is_final) { audio_encoder_buffer_size, 0); } + // Set the AVFrame's PTS + frame_final->pts = write_audio_count; + // Init the packet AVPacket pkt; av_init_packet(&pkt); @@ -1907,9 +1906,8 @@ void FFmpegWriter::write_audio_packets(bool is_final) { ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code); } - // Increment PTS (in samples) + // Increment PTS write_audio_count += FFMIN(audio_input_frame_size, audio_input_position); - frame_final->pts = write_audio_count; // Set the AVFrame's PTS // deallocate AVFrame av_freep(&(frame_final->data[0])); From 5b524aba2f8dddfaf5a7acc130114e2b9fa91c29 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Sat, 26 Jun 2021 15:56:58 -0500 Subject: [PATCH 20/71] Experimental conversion of timestamp rescaling to use the av_packet_rescale_ts() method. I'm just not sure the backwards compatibility of this approach with older FFmpeg versions. --- src/FFmpegWriter.cpp | 35 +++++------------------------------ 1 file changed, 5 insertions(+), 30 deletions(-) diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp index 52d0d8a2..845c1099 100644 --- a/src/FFmpegWriter.cpp +++ b/src/FFmpegWriter.cpp @@ -870,12 +870,7 @@ void FFmpegWriter::flush_encoders() { avcodec_flush_buffers(video_codec_ctx); break; } - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base); - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base); - if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base); + av_packet_rescale_ts(&pkt, video_codec_ctx->time_base, video_st->time_base); pkt.stream_index = video_st->index; error_code = av_interleaved_write_frame(oc, &pkt); } @@ -894,12 +889,7 @@ void FFmpegWriter::flush_encoders() { } // set the timestamp - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base); - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base); - if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base); + av_packet_rescale_ts(&pkt, video_codec_ctx->time_base, video_st->time_base); pkt.stream_index = video_st->index; // Write packet @@ -940,12 +930,7 @@ void FFmpegWriter::flush_encoders() { pkt.pts = pkt.dts = write_audio_count; // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase) - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base); - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base); - if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base); + av_packet_rescale_ts(&pkt, audio_codec_ctx->time_base, audio_st->time_base); // set stream pkt.stream_index = audio_st->index; @@ -1887,12 +1872,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { pkt.pts = pkt.dts = write_audio_count; // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase) - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base); - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base); - if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base); + av_packet_rescale_ts(&pkt, audio_codec_ctx->time_base, audio_st->time_base); // set stream pkt.stream_index = audio_st->index; @@ -2144,12 +2124,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra /* if zero size, it means the image was buffered */ if (error_code == 0 && got_packet_ptr) { // set the timestamp - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base); - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base); - if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base); + av_packet_rescale_ts(&pkt, video_codec_ctx->time_base, video_st->time_base); pkt.stream_index = video_st->index; /* write the compressed frame in the media file */ From 850140df7d9e2c2c1434d54341efd9e22659f56f Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Sat, 26 Jun 2021 17:24:15 -0500 Subject: [PATCH 21/71] Switch to pkt.duration for incrementing PTS, and rename the 2 timestamp variables to something more sane. --- src/FFmpegWriter.cpp | 41 ++++++++++++++++++++--------------------- src/FFmpegWriter.h | 4 ++-- 2 files changed, 22 insertions(+), 23 deletions(-) diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp index 845c1099..92e9f8fa 100644 --- a/src/FFmpegWriter.cpp +++ b/src/FFmpegWriter.cpp @@ -84,7 +84,7 @@ FFmpegWriter::FFmpegWriter(const std::string& path) : path(path), fmt(NULL), oc(NULL), audio_st(NULL), video_st(NULL), samples(NULL), audio_outbuf(NULL), audio_outbuf_size(0), audio_input_frame_size(0), audio_input_position(0), initial_audio_input_frame_size(0), img_convert_ctx(NULL), cache_size(8), num_of_rescalers(32), - rescaler_position(0), video_codec_ctx(NULL), audio_codec_ctx(NULL), is_writing(false), write_video_count(0), write_audio_count(0), + rescaler_position(0), video_codec_ctx(NULL), audio_codec_ctx(NULL), is_writing(false), video_timestamp(0), audio_timestamp(0), original_sample_rate(0), original_channels(0), avr(NULL), avr_planar(NULL), is_open(false), prepare_streams(false), write_header(false), write_trailer(false), audio_encoder_buffer_size(0), audio_encoder_buffer(NULL) { @@ -847,7 +847,7 @@ void FFmpegWriter::flush_encoders() { for (;;) { // Increment PTS (in frames and scaled to the codec's timebase) - write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base); + video_timestamp += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base); AVPacket pkt; av_init_packet(&pkt); @@ -906,7 +906,7 @@ void FFmpegWriter::flush_encoders() { av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; - pkt.pts = pkt.dts = write_audio_count; + pkt.pts = pkt.dts = audio_timestamp; /* encode the image */ int error_code = 0; @@ -927,7 +927,7 @@ void FFmpegWriter::flush_encoders() { // Since the PTS can change during encoding, set the value again. This seems like a huge hack, // but it fixes lots of PTS related issues when I do this. - pkt.pts = pkt.dts = write_audio_count; + pkt.pts = pkt.dts = audio_timestamp; // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase) av_packet_rescale_ts(&pkt, audio_codec_ctx->time_base, audio_st->time_base); @@ -944,13 +944,12 @@ void FFmpegWriter::flush_encoders() { "error_code", error_code); } - // deallocate memory for packet + // Increment PTS by duration of packet + audio_timestamp += pkt.duration; + + // deallocate memory for packet AV_FREE_PACKET(&pkt); } - - // Increment PTS (in samples and scaled to the codec's timebase) - // for some reason, it requires me to multiply channels X 2 - write_audio_count += av_rescale_q(audio_input_position / (audio_codec_ctx->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)), av_make_q(1, info.sample_rate), audio_codec_ctx->time_base); } } @@ -1015,8 +1014,8 @@ void FFmpegWriter::Close() { } // Reset frame counters - write_video_count = 0; - write_audio_count = 0; + video_timestamp = 0; + audio_timestamp = 0; // Free the context which frees the streams too avformat_free_context(oc); @@ -1818,7 +1817,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { } // Set the AVFrame's PTS - frame_final->pts = write_audio_count; + frame_final->pts = audio_timestamp; // Init the packet AVPacket pkt; @@ -1827,7 +1826,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { pkt.size = audio_encoder_buffer_size; // Set the packet's PTS prior to encoding - pkt.pts = pkt.dts = write_audio_count; + pkt.pts = pkt.dts = audio_timestamp; /* encode the audio samples */ int got_packet_ptr = 0; @@ -1869,7 +1868,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { // Since the PTS can change during encoding, set the value again. This seems like a huge hack, // but it fixes lots of PTS related issues when I do this. - pkt.pts = pkt.dts = write_audio_count; + pkt.pts = pkt.dts = audio_timestamp; // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase) av_packet_rescale_ts(&pkt, audio_codec_ctx->time_base, audio_st->time_base); @@ -1886,8 +1885,8 @@ void FFmpegWriter::write_audio_packets(bool is_final) { ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code); } - // Increment PTS - write_audio_count += FFMIN(audio_input_frame_size, audio_input_position); + // Increment PTS by duration of packet + audio_timestamp += pkt.duration; // deallocate AVFrame av_freep(&(frame_final->data[0])); @@ -2028,7 +2027,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra pkt.size = sizeof(AVPicture); // Set PTS (in frames and scaled to the codec's timebase) - pkt.pts = write_video_count; + pkt.pts = video_timestamp; /* write the compressed frame in the media file */ int error_code = av_interleaved_write_frame(oc, &pkt); @@ -2050,7 +2049,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra pkt.pts = pkt.dts = AV_NOPTS_VALUE; // Assign the initial AVFrame PTS from the frame counter - frame_final->pts = write_video_count; + frame_final->pts = video_timestamp; #if USE_HW_ACCEL if (hw_en_on && hw_en_supported) { if (!(hw_frame = av_frame_alloc())) { @@ -2135,6 +2134,9 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra } } + // Increment PTS (in frames and scaled to the codec's timebase) + video_timestamp += pkt.duration; + // Deallocate packet AV_FREE_PACKET(&pkt); #if USE_HW_ACCEL @@ -2147,9 +2149,6 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra #endif // USE_HW_ACCEL } - // Increment PTS (in frames and scaled to the codec's timebase) - write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base); - // Success return true; } diff --git a/src/FFmpegWriter.h b/src/FFmpegWriter.h index 44f7cb35..79564b5c 100644 --- a/src/FFmpegWriter.h +++ b/src/FFmpegWriter.h @@ -151,8 +151,8 @@ namespace openshot { int cache_size; bool is_writing; bool is_open; - int64_t write_video_count; - int64_t write_audio_count; + int64_t video_timestamp; + int64_t audio_timestamp; bool prepare_streams; bool write_header; From 0f2f2e1514597e385e28b8fee9bee8845b5154b9 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Sun, 27 Jun 2021 15:35:12 -0400 Subject: [PATCH 22/71] Unit tests: --output-on-failure --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index a3535ec9..7fd0cb15 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -200,6 +200,7 @@ if(BUILD_TESTING) if(VERBOSE_TESTS) list(APPEND CTEST_OPTIONS "-VV") endif() + list(APPEND CTEST_OPTIONS "--output-on-failure") add_subdirectory(tests) endif() add_feature_info("Unit tests" ${BUILD_TESTING} "Compile unit tests for library functions") From b7d71cdc5216b1c2838ab4a7389999e30888213e Mon Sep 17 00:00:00 2001 From: Jackson Date: Thu, 1 Jul 2021 12:08:27 -0500 Subject: [PATCH 23/71] Only get rotation from metadata if 0 keyframes --- src/Clip.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Clip.cpp b/src/Clip.cpp index 8caaf836..eeffe894 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -122,12 +122,9 @@ void Clip::init_reader_settings() { // Init reader's rotation (if any) void Clip::init_reader_rotation() { // Only init rotation from reader when needed - if (rotation.GetCount() > 1) + if (rotation.GetCount() > 0) // Do nothing if more than 1 rotation Point return; - else if (rotation.GetCount() == 1 && rotation.GetValue(1) != 0.0) - // Do nothing if 1 Point, and it's not the default value - return; // Init rotation if (reader && reader->info.metadata.count("rotate") > 0) { From 21519f3bc3974cd4e6783390819742f93c234660 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Fri, 2 Jul 2021 20:39:30 -0400 Subject: [PATCH 24/71] Coordinate.h: Fix indentation, docs --- src/Coordinate.h | 90 +++++++++++++++++++++++++----------------------- 1 file changed, 46 insertions(+), 44 deletions(-) diff --git a/src/Coordinate.h b/src/Coordinate.h index f2b8b5fb..881b9807 100644 --- a/src/Coordinate.h +++ b/src/Coordinate.h @@ -37,55 +37,57 @@ namespace openshot { - /** - * @brief This class represents a Cartesian coordinate (X, Y) used in the Keyframe animation system. - * - * Animation involves the changing (i.e. interpolation) of numbers over time. A series of Coordinate - * objects allows us to plot a specific curve or line used during interpolation. In other words, it helps us - * control how a number changes over time (quickly or slowly). - * - * Please see the following Example Code: - * \code - * Coordinate c1(2,4); - * assert(c1.X == 2.0f); - * assert(c1.Y == 4.0f); - * \endcode - */ - class Coordinate { - public: - double X; ///< The X value of the coordinate (usually representing the frame #) - double Y; ///< The Y value of the coordinate (usually representing the value of the property being animated) +/** + * @brief A Cartesian coordinate (X, Y) used in the Keyframe animation system. + * + * Animation involves the changing (i.e. interpolation) of numbers over time. + * A series of Coordinate objects allows us to plot a specific curve or line + * used during interpolation. In other words, it helps us control how a + * value changes over time — whether it's increasing or decreasing + * (the direction of the slope) and how quickly (the steepness of the curve). + * + * Please see the following Example Code: + * \code + * Coordinate c1(2,4); + * assert(c1.X == 2.0f); + * assert(c1.Y == 4.0f); + * \endcode + */ +class Coordinate { +public: + double X; ///< The X value of the coordinate (usually representing the frame #) + double Y; ///< The Y value of the coordinate (usually representing the value of the property being animated) - /// The default constructor, which defaults to (0,0) - Coordinate(); + /// The default constructor, which defaults to (0,0) + Coordinate(); - /// @brief Constructor which also sets the X and Y - /// @param x The X coordinate (usually representing the frame #) - /// @param y The Y coordinate (usually representing the value of the property being animated) - Coordinate(double x, double y); + /// @brief Constructor which also sets the X and Y + /// @param x The X coordinate (usually representing the frame #) + /// @param y The Y coordinate (usually representing the value of the property being animated) + Coordinate(double x, double y); - /// @brief Constructor which accepts a std::pair tuple for {X, Y} - /// @param co A std::pair tuple containing (X, Y) - Coordinate(const std::pair& co); + /// @brief Constructor which accepts a std::pair tuple for {X, Y} + /// @param co A std::pair tuple containing (X, Y) + Coordinate(const std::pair& co); - // Get and Set JSON methods - std::string Json() const; ///< Generate JSON string of this object - Json::Value JsonValue() const; ///< Generate Json::Value for this object - void SetJson(const std::string value); ///< Load JSON string into this object - void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object - }; + // Get and Set JSON methods + std::string Json() const; ///< Generate JSON string of this object + Json::Value JsonValue() const; ///< Generate Json::Value for this object + void SetJson(const std::string value); ///< Load JSON string into this object + void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object +}; - /// Stream output operator for openshot::Coordinate - template - std::basic_ostream& - operator<<(std::basic_ostream& o, const openshot::Coordinate& co) { - std::basic_ostringstream s; - s.flags(o.flags()); - s.imbue(o.getloc()); - s.precision(o.precision()); - s << "(" << co.X << ", " << co.Y << ")"; - return o << s.str(); - }; +/// Stream output operator for openshot::Coordinate +template +std::basic_ostream& +operator<<(std::basic_ostream& o, const openshot::Coordinate& co) { + std::basic_ostringstream s; + s.flags(o.flags()); + s.imbue(o.getloc()); + s.precision(o.precision()); + s << "(" << co.X << ", " << co.Y << ")"; + return o << s.str(); +}; } From b3c43166fa81ea5211a54a56b87af307c416fada Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Fri, 2 Jul 2021 20:41:04 -0400 Subject: [PATCH 25/71] Fraction.h: Fix indentation --- src/Fraction.h | 93 +++++++++++++++++++++++++------------------------- 1 file changed, 47 insertions(+), 46 deletions(-) diff --git a/src/Fraction.h b/src/Fraction.h index a09db625..32064969 100644 --- a/src/Fraction.h +++ b/src/Fraction.h @@ -38,62 +38,63 @@ namespace openshot { - /** - * @brief This class represents a fraction - * - * Fractions are often used in video editing to represent ratios and rates, for example: - * pixel ratios, frames per second, timebase, and other common ratios. Fractions are preferred - * over decimals due to their increased precision. - */ - class Fraction { - public: - int num; /// pair); + /// Constructor that accepts a (num, den) pair + Fraction(std::pair pair); - /// Constructor that takes a vector of length 2 (containing {num, den}) - Fraction(std::vector vector); + /// Constructor that takes a vector of length 2 (containing {num, den}) + Fraction(std::vector vector); - /// Constructor that takes a key-value mapping (keys: 'num'. 'den') - Fraction(std::map mapping); + /// Constructor that takes a key-value mapping (keys: 'num'. 'den') + Fraction(std::map mapping); - /// Calculate the greatest common denominator - int GreatestCommonDenominator(); + /// Calculate the greatest common denominator + int GreatestCommonDenominator(); - /// Reduce this fraction (i.e. 640/480 = 4/3) - void Reduce(); + /// Reduce this fraction (i.e. 640/480 = 4/3) + void Reduce(); - /// Return this fraction as a float (i.e. 1/2 = 0.5) - float ToFloat(); + /// Return this fraction as a float (i.e. 1/2 = 0.5) + float ToFloat(); - /// Return this fraction as a double (i.e. 1/2 = 0.5) - double ToDouble() const; + /// Return this fraction as a double (i.e. 1/2 = 0.5) + double ToDouble() const; - /// Return a rounded integer of the fraction (for example 30000/1001 returns 30) - int ToInt(); + /// Return a rounded integer of the fraction (for example 30000/1001 returns 30) + int ToInt(); - /// Return the reciprocal as a Fraction - Fraction Reciprocal() const; - }; + /// Return the reciprocal as a Fraction + Fraction Reciprocal() const; +}; - // Stream output operator for openshot::Fraction - template - std::basic_ostream& - operator<<(std::basic_ostream& o, const openshot::Fraction& frac) { - std::basic_ostringstream s; - s.flags(o.flags()); - s.imbue(o.getloc()); - s.precision(o.precision()); - s << "Fraction(" << frac.num << ", " << frac.den << ")"; - return o << s.str(); - }; -} +// Stream output operator for openshot::Fraction +template +std::basic_ostream& +operator<<(std::basic_ostream& o, const openshot::Fraction& frac) { + std::basic_ostringstream s; + s.flags(o.flags()); + s.imbue(o.getloc()); + s.precision(o.precision()); + s << "Fraction(" << frac.num << ", " << frac.den << ")"; + return o << s.str(); +}; + +} // namespace openshot #endif From 584e075f678efa1fbbabccc32f7df7da70d707e6 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Fri, 2 Jul 2021 21:03:26 -0400 Subject: [PATCH 26/71] Point: Rethink stream formatting --- src/Point.h | 196 ++++++++++++++++++++++++------------------------ tests/Point.cpp | 24 +++--- 2 files changed, 108 insertions(+), 112 deletions(-) diff --git a/src/Point.h b/src/Point.h index 2602fb9f..48ebea45 100644 --- a/src/Point.h +++ b/src/Point.h @@ -37,120 +37,118 @@ namespace openshot { - /** - * @brief This controls how a Keyframe uses this point to interpolate between two points. - * - * Bezier is a smooth curve. Linear is a straight line. Constant is a jump from the - * previous point to this one. - */ - enum InterpolationType { - BEZIER, ///< Bezier curves are quadratic curves, which create a smooth curve. - LINEAR, ///< Linear curves are angular, straight lines between two points. - CONSTANT ///< Constant curves jump from their previous position to a new one (with no interpolation). - }; +/** + * @brief This controls how a Keyframe uses this point to interpolate between two points. + * + * Bezier is a smooth curve. Linear is a straight line. Constant is a jump from the + * previous point to this one. + */ +enum InterpolationType { + BEZIER, ///< Bezier curves are quadratic curves, which create a smooth curve. + LINEAR, ///< Linear curves are angular, straight lines between two points. + CONSTANT ///< Constant curves jump from their previous position to a new one (with no interpolation). +}; - /** - * @brief When BEZIER interpolation is used, the point's left and right handles are used - * to influence the direction of the curve. - * - * AUTO will try and adjust the handles automatically, to achieve the smoothest curves. - * MANUAL will leave the handles alone, making it the responsibility of the user to set them. - */ - enum HandleType { - AUTO, ///< Automatically adjust the handles to achieve the smoothest curve - MANUAL ///< Do not automatically adjust handles (set them manually) - }; +/** + * @brief When BEZIER interpolation is used, the point's left and right handles are used + * to influence the direction of the curve. + * + * AUTO will try and adjust the handles automatically, to achieve the smoothest curves. + * MANUAL will leave the handles alone, making it the responsibility of the user to set them. + */ +enum HandleType { + AUTO, ///< Automatically adjust the handles to achieve the smoothest curve + MANUAL ///< Do not automatically adjust handles (set them manually) +}; - /** - * @brief A Point is the basic building block of a key-frame curve. - * - * Points have a primary coordinate and a left and right handle coordinate. - * The handles are used to influence the direction of the curve as it - * moves between the primary coordinate and the next primary coordinate when the - * interpolation mode is BEZIER. When using LINEAR or CONSTANT, the handles are - * ignored. - * - * Please see the following Example Code: - * \code - * Coordinate c1(3,9); - * Point p1(c1, BEZIER); - * assert(c1.X == 3); - * assert(c1.Y == 9); - * - * \endcode - */ - class Point { - public: - Coordinate co; ///< This is the primary coordinate - Coordinate handle_left; ///< This is the left handle coordinate (in percentages from 0 to 1) - Coordinate handle_right; ///< This is the right handle coordinate (in percentages from 0 to 1) - InterpolationType interpolation; ///< This is the interpolation mode - HandleType handle_type; ///< This is the handle mode +/** + * @brief A Point is the basic building block of a key-frame curve. + * + * Points have a primary coordinate and a left and right handle coordinate. + * The handles are used to influence the direction of the curve as it + * moves between the primary coordinate and the next primary coordinate when the + * interpolation mode is BEZIER. When using LINEAR or CONSTANT, the handles are + * ignored. + * + * Please see the following Example Code: + * \code + * Coordinate c1(3,9); + * Point p1(c1, BEZIER); + * assert(c1.X == 3); + * assert(c1.Y == 9); + * + * \endcode + */ +class Point { +public: + Coordinate co; ///< This is the primary coordinate + Coordinate handle_left; ///< This is the left handle coordinate (in percentages from 0 to 1) + Coordinate handle_right; ///< This is the right handle coordinate (in percentages from 0 to 1) + InterpolationType interpolation; ///< This is the interpolation mode + HandleType handle_type; ///< This is the handle mode - /// Default constructor (defaults to 1,0) - Point(); + /// Default constructor (defaults to 1,0) + Point(); - /// Constructor which creates a single coordinate at X=1 - Point(float y); + /// Constructor which creates a single coordinate at X=1 + Point(float y); - /// Constructor which also creates a Point and sets the X and Y of the Point. - Point(float x, float y); + /// Constructor which also creates a Point and sets the X and Y of the Point. + Point(float x, float y); - /// Constructor which also creates a Point and sets the X,Y, and interpolation of the Point. - Point(float x, float y, InterpolationType interpolation); + /// Constructor which also creates a Point and sets the X,Y, and interpolation of the Point. + Point(float x, float y, InterpolationType interpolation); - /// Constructor which takes a coordinate - Point(const Coordinate& co); + /// Constructor which takes a coordinate + Point(const Coordinate& co); - /// Constructor which takes a coordinate and interpolation mode - Point(const Coordinate& co, InterpolationType interpolation); + /// Constructor which takes a coordinate and interpolation mode + Point(const Coordinate& co, InterpolationType interpolation); - /// Constructor which takes a coordinate, interpolation mode, and handle type - Point(const Coordinate& co, InterpolationType interpolation, HandleType handle_type); + /// Constructor which takes a coordinate, interpolation mode, and handle type + Point(const Coordinate& co, InterpolationType interpolation, HandleType handle_type); - /// Set the left and right handles to a percent of the primary coordinate (0 to 1) - /// Defaults to a smooth curve (Ease in and out) - void Initialize_Handles(); + /// Set the left and right handles to a percent of the primary coordinate (0 to 1) + /// Defaults to a smooth curve (Ease in and out) + void Initialize_Handles(); - /// Set the left handle to a percent of the primary coordinate (0 to 1) - void Initialize_LeftHandle(float x, float y); + /// Set the left handle to a percent of the primary coordinate (0 to 1) + void Initialize_LeftHandle(float x, float y); - /// Set the right handle to a percent of the primary coordinate (0 to 1) - void Initialize_RightHandle(float x, float y); + /// Set the right handle to a percent of the primary coordinate (0 to 1) + void Initialize_RightHandle(float x, float y); - // Get and Set JSON methods - std::string Json() const; ///< Generate JSON string of this object - Json::Value JsonValue() const; ///< Generate Json::Value for this object - void SetJson(const std::string value); ///< Load JSON string into this object - void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object + // Get and Set JSON methods + std::string Json() const; ///< Generate JSON string of this object + Json::Value JsonValue() const; ///< Generate Json::Value for this object + void SetJson(const std::string value); ///< Load JSON string into this object + void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object - }; +}; - // Stream output operator for openshot::Point - template - std::basic_ostream& - operator<<(std::basic_ostream& o, const openshot::Point& p) { - std::basic_ostringstream s; - s.flags(o.flags()); - s.imbue(o.getloc()); - s.precision(o.precision()); - s << "co" << p.co; - switch(p.interpolation) { - case(openshot::LINEAR): - s << " interpolation(LINEAR)"; - break; - case(openshot::CONSTANT): - s << " interpolation(CONSTANT)"; - break; - case(openshot::BEZIER): - s << " interpolation(BEZIER)" - << " handle_left" << p.handle_left - << " handle_right" << p.handle_right; - break; - } - return o << s.str(); - }; +// Stream output operator for openshot::Point +template +std::basic_ostream& +operator<<(std::basic_ostream& o, const openshot::Point& p) { + std::basic_ostringstream s; + s.flags(o.flags()); + s.imbue(o.getloc()); + s.precision(o.precision()); + s << "co" << p.co; + switch(p.interpolation) { + case(InterpolationType::LINEAR): + s << " LINEAR"; + break; + case(InterpolationType::CONSTANT): + s << " CONSTANT"; + break; + case(InterpolationType::BEZIER): + s << " BEZIER[L" << p.handle_left << ",R" << p.handle_right << ']'; + break; + } + return o << s.str(); +}; -} +} // namespace openshot #endif diff --git a/tests/Point.cpp b/tests/Point.cpp index f4e7792b..57cff7c7 100644 --- a/tests/Point.cpp +++ b/tests/Point.cpp @@ -52,7 +52,7 @@ TEST_CASE( "default constructor", "[libopenshot][point]" ) CHECK(p.interpolation == openshot::InterpolationType::BEZIER); CHECK(p.handle_type == openshot::HandleType::AUTO); } -TEST_CASE( "XY constructor", "[libopenshot][point]" ) +TEST_CASE( "x,y ctor", "[libopenshot][point]" ) { // Create a point with X and Y values openshot::Point p1(2,9); @@ -62,7 +62,7 @@ TEST_CASE( "XY constructor", "[libopenshot][point]" ) CHECK(p1.interpolation == openshot::InterpolationType::BEZIER); } -TEST_CASE( "std::pair constructor", "[libopenshot][point]" ) +TEST_CASE( "std::pair ctor", "[libopenshot][point]" ) { // Create a point from a std::pair std::pair coordinates(22, 5); @@ -72,7 +72,7 @@ TEST_CASE( "std::pair constructor", "[libopenshot][point]" ) CHECK(p1.co.Y == Approx(5.0f).margin(0.00001)); } -TEST_CASE( "constructor from Coordinate", "[libopenshot][point]" ) +TEST_CASE( "Coordinate ctor", "[libopenshot][point]" ) { // Create a point with a coordinate openshot::Coordinate c1(3,7); @@ -83,7 +83,7 @@ TEST_CASE( "constructor from Coordinate", "[libopenshot][point]" ) CHECK(p1.interpolation == openshot::InterpolationType::BEZIER); } -TEST_CASE( "constructor from Coordinate, LINEAR", "[libopenshot][point]" ) +TEST_CASE( "Coordinate ctor, LINEAR", "[libopenshot][point]" ) { // Create a point with a coordinate and interpolation openshot::Coordinate c1(3,9); @@ -95,7 +95,7 @@ TEST_CASE( "constructor from Coordinate, LINEAR", "[libopenshot][point]" ) CHECK(p1.interpolation == openshot::InterpolationType::LINEAR); } -TEST_CASE( "constructor from Coordinate, BEZIER", "[libopenshot][point]" ) +TEST_CASE( "Coordinate ctor, BEZIER", "[libopenshot][point]" ) { // Create a point with a coordinate and interpolation openshot::Coordinate c1(3,9); @@ -107,7 +107,7 @@ TEST_CASE( "constructor from Coordinate, BEZIER", "[libopenshot][point]" ) CHECK(p1.interpolation == openshot::InterpolationType::BEZIER); } -TEST_CASE( "constructor from Coordinate, CONSTANT", "[libopenshot][point]" ) +TEST_CASE( "Coordinate ctor, CONSTANT", "[libopenshot][point]" ) { // Create a point with a coordinate and interpolation openshot::Coordinate c1(2,8); @@ -119,7 +119,7 @@ TEST_CASE( "constructor from Coordinate, CONSTANT", "[libopenshot][point]" ) CHECK(p1.interpolation == openshot::InterpolationType::CONSTANT); } -TEST_CASE( "constructor from Coordinate, BEZIER+AUTO", "[libopenshot][point]" ) +TEST_CASE( "Coordinate ctor, BEZIER+AUTO", "[libopenshot][point]" ) { // Create a point with a coordinate and interpolation openshot::Coordinate c1(3,9); @@ -133,7 +133,7 @@ TEST_CASE( "constructor from Coordinate, BEZIER+AUTO", "[libopenshot][point]" ) CHECK(p1.handle_type == openshot::HandleType::AUTO); } -TEST_CASE( "constructor from Coordinate, BEZIER+MANUAL", "[libopenshot][point]" ) +TEST_CASE( "Coordinate ctor, BEZIER+MANUAL", "[libopenshot][point]" ) { // Create a point with a coordinate and interpolation openshot::Coordinate c1(3,9); @@ -197,17 +197,15 @@ TEST_CASE( "Operator ostream", "[libopenshot][point]" ) std::stringstream output1; openshot::Point p1(c1, openshot::InterpolationType::LINEAR); output1 << p1; - CHECK(output1.str() == "co(10, 5) interpolation(LINEAR)"); + CHECK(output1.str() == "co(10, 5) LINEAR"); std::stringstream output2; openshot::Point p2(c1, openshot::InterpolationType::CONSTANT); output2 << p2; - CHECK(output2.str() == "co(10, 5) interpolation(CONSTANT)"); + CHECK(output2.str() == "co(10, 5) CONSTANT"); std::stringstream output3; openshot::Point p3(c1, openshot::InterpolationType::BEZIER); output3 << p3; - CHECK( - output3.str() == - "co(10, 5) interpolation(BEZIER) handle_left(0.5, 1) handle_right(0.5, 0)"); + CHECK(output3.str() == "co(10, 5) BEZIER[L(0.5, 1),R(0.5, 0)]"); } From 9ca63b321a91da727c62b4d93b9948ed1065aca7 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Sat, 3 Jul 2021 17:01:55 -0500 Subject: [PATCH 27/71] Reverting video_timestamp increment logic which used pkt.duration. In some codecs (such as vp8), this approach breaks due to differences in the timebase vs the framerate. For example, if the timebase is an inverse of the FPS, everything works. But if the timebase is not, for example 1/1000000, this approach breaks. --- src/FFmpegReader.cpp | 4 ++-- src/FFmpegWriter.cpp | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp index b97d7345..98d39ce1 100644 --- a/src/FFmpegReader.cpp +++ b/src/FFmpegReader.cpp @@ -768,12 +768,12 @@ void FFmpegReader::UpdateVideoInfo() { // Check for valid duration (if found) if (info.duration <= 0.0f && pFormatCtx->duration >= 0) // Use the format's duration - info.duration = pFormatCtx->duration / AV_TIME_BASE; + info.duration = float(pFormatCtx->duration) / AV_TIME_BASE; // Calculate duration from filesize and bitrate (if any) if (info.duration <= 0.0f && info.video_bit_rate > 0 && info.file_size > 0) // Estimate from bitrate, total bytes, and framerate - info.duration = (info.file_size / info.video_bit_rate); + info.duration = float(info.file_size) / info.video_bit_rate; // No duration found in stream of file if (info.duration <= 0.0f) { diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp index 92e9f8fa..f3b12800 100644 --- a/src/FFmpegWriter.cpp +++ b/src/FFmpegWriter.cpp @@ -947,7 +947,7 @@ void FFmpegWriter::flush_encoders() { // Increment PTS by duration of packet audio_timestamp += pkt.duration; - // deallocate memory for packet + // deallocate memory for packet AV_FREE_PACKET(&pkt); } } @@ -1885,8 +1885,8 @@ void FFmpegWriter::write_audio_packets(bool is_final) { ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code); } - // Increment PTS by duration of packet - audio_timestamp += pkt.duration; + // Increment PTS (no pkt.duration, so calculate with maths) + audio_timestamp += FFMIN(audio_input_frame_size, audio_input_position); // deallocate AVFrame av_freep(&(frame_final->data[0])); @@ -2134,9 +2134,6 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra } } - // Increment PTS (in frames and scaled to the codec's timebase) - video_timestamp += pkt.duration; - // Deallocate packet AV_FREE_PACKET(&pkt); #if USE_HW_ACCEL @@ -2149,6 +2146,9 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra #endif // USE_HW_ACCEL } + // Increment PTS (in frames and scaled to the codec's timebase) + video_timestamp += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base); + // Success return true; } From dd859001edcc6a2d33b1cc0687b3be64905666ee Mon Sep 17 00:00:00 2001 From: Frank Dana Date: Sat, 3 Jul 2021 18:50:58 -0400 Subject: [PATCH 28/71] Unit tests: --output-on-failure (#697) From db7f0fb5e7d36bdd7d15221720b6123fad76820e Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Wed, 7 Jul 2021 00:08:27 -0500 Subject: [PATCH 29/71] Fix audio encoding on FFmpeg 4.4. AVFrame was missing a few properties that need to be set in newer versions of FFmpeg. This is also compatible with FFmpeg 3+. --- src/FFmpegWriter.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp index f3b12800..d1ad3e63 100644 --- a/src/FFmpegWriter.cpp +++ b/src/FFmpegWriter.cpp @@ -1722,7 +1722,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { // Convert to planar (if needed by audio codec) AVFrame *frame_final = AV_ALLOCATE_FRAME(); AV_RESET_FRAME(frame_final); - if (av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)) { + if (av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)) { ZmqLogger::Instance()->AppendDebugMethod( "FFmpegWriter::write_audio_packets (2nd resampling for Planar formats)", "in_sample_fmt", output_sample_fmt, @@ -1768,8 +1768,11 @@ void FFmpegWriter::write_audio_packets(bool is_final) { // Create output frame (and allocate arrays) frame_final->nb_samples = audio_input_frame_size; + frame_final->channels = info.channels; + frame_final->format = audio_codec_ctx->sample_fmt; + frame_final->channel_layout = info.channel_layout; av_samples_alloc(frame_final->data, frame_final->linesize, info.channels, - frame_final->nb_samples, audio_codec_ctx->sample_fmt, 0); + frame_final->nb_samples, audio_codec_ctx->sample_fmt, 0); // Convert audio samples int nb_samples = SWR_CONVERT( @@ -1786,7 +1789,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { if (nb_samples > 0) { memcpy(samples, frame_final->data[0], nb_samples * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) * info.channels); - } + } // deallocate AVFrame av_freep(&(audio_frame->data[0])); From 92b62517a693c455885c7d2dad0fe44e3ac92164 Mon Sep 17 00:00:00 2001 From: Jackson Date: Wed, 7 Jul 2021 15:29:42 -0500 Subject: [PATCH 30/71] removed comment with number --- src/Clip.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Clip.cpp b/src/Clip.cpp index eeffe894..5b427035 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -121,9 +121,8 @@ void Clip::init_reader_settings() { // Init reader's rotation (if any) void Clip::init_reader_rotation() { - // Only init rotation from reader when needed + // Dont init rotation if clip has keyframes if (rotation.GetCount() > 0) - // Do nothing if more than 1 rotation Point return; // Init rotation From 22b740880959a055fad1b59e56c480a101e77e64 Mon Sep 17 00:00:00 2001 From: Brenno Date: Sat, 10 Jul 2021 19:43:09 -0300 Subject: [PATCH 31/71] Added voice robotization and whisperization effect working --- src/CMakeLists.txt | 7 +- src/EffectInfo.cpp | 8 + src/Effects.h | 2 + src/Enums.h | 36 +++ src/audio_effects/Distortion.cpp | 10 +- src/audio_effects/Pitch.cpp | 418 +++++++++++++++++++++++++++++ src/audio_effects/Pitch.h | 145 ++++++++++ src/audio_effects/Robotization.cpp | 208 ++++++++++++++ src/audio_effects/Robotization.h | 161 +++++++++++ src/audio_effects/STFT.cpp | 175 ++++++++++++ src/audio_effects/STFT.h | 72 +++++ 11 files changed, 1235 insertions(+), 7 deletions(-) create mode 100644 src/audio_effects/Pitch.cpp create mode 100644 src/audio_effects/Pitch.h create mode 100644 src/audio_effects/Robotization.cpp create mode 100644 src/audio_effects/Robotization.h create mode 100644 src/audio_effects/STFT.cpp create mode 100644 src/audio_effects/STFT.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index a2a8f956..1a5b2e62 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -131,10 +131,13 @@ set(EFFECTS_SOURCES effects/Saturation.cpp effects/Shift.cpp effects/Wave.cpp + audio_effects/STFT.cpp audio_effects/Noise.cpp audio_effects/Distortion.cpp audio_effects/ParametricEQ.cpp - audio_effects/Compressor.cpp) + audio_effects/Compressor.cpp + audio_effects/Pitch.cpp + audio_effects/Robotization.cpp) # Qt video player components set(QT_PLAYER_SOURCES @@ -294,7 +297,7 @@ mark_as_advanced(QT_VERSION_STR) # Find FFmpeg libraries (used for video encoding / decoding) find_package(FFmpeg REQUIRED COMPONENTS avcodec avformat avutil swscale) -set(all_comps avcodec avformat avutil swscale avresample) +set(all_comps avcodec avformat avutil swscale) if(TARGET FFmpeg::swresample) list(APPEND all_comps swresample) else() diff --git a/src/EffectInfo.cpp b/src/EffectInfo.cpp index 4c8d6f33..92410871 100644 --- a/src/EffectInfo.cpp +++ b/src/EffectInfo.cpp @@ -100,6 +100,12 @@ EffectBase* EffectInfo::CreateEffect(std::string effect_type) { else if(effect_type == "Compressor") return new Compressor(); + else if(effect_type == "Pitch") + return new Pitch(); + + else if(effect_type == "Robotization") + return new Robotization(); + #ifdef USE_OPENCV else if(effect_type == "Stabilizer") return new Stabilizer(); @@ -141,6 +147,8 @@ Json::Value EffectInfo::JsonValue() { root.append(Distortion().JsonInfo()); root.append(ParametricEQ().JsonInfo()); root.append(Compressor().JsonInfo()); + root.append(Pitch().JsonInfo()); + root.append(Robotization().JsonInfo()); #ifdef USE_OPENCV root.append(Stabilizer().JsonInfo()); diff --git a/src/Effects.h b/src/Effects.h index 0dca06ba..9fec4669 100644 --- a/src/Effects.h +++ b/src/Effects.h @@ -53,6 +53,8 @@ #include "audio_effects/Distortion.h" #include "audio_effects/ParametricEQ.h" #include "audio_effects/Compressor.h" +#include "audio_effects/Pitch.h" +#include "audio_effects/Robotization.h" /* OpenCV Effects */ #ifdef USE_OPENCV diff --git a/src/Enums.h b/src/Enums.h index 5377c6d7..764df9a2 100644 --- a/src/Enums.h +++ b/src/Enums.h @@ -112,5 +112,41 @@ namespace openshot EXPANDER, NOISE_GATE, }; + + /// This enumeration determines the FFT size. + enum FFTSize + { + FFT_SIZE_32, + FFT_SIZE_64, + FFT_SIZE_128, + FFT_SIZE_256, + FFT_SIZE_512, + FFT_SIZE_1024, + FFT_SIZE_2048, + FFT_SIZE_4096, + FFT_SIZE_8192, + }; + + /// This enumeration determines the hop size. + enum HopSize { + HOP_SIZE_2, + HOP_SIZE_4, + HOP_SIZE_8, + }; + + /// This enumeration determines the window type. + enum WindowType { + RECTANGULAR, + BART_LETT, + HANN, + HAMMING, + }; + + enum RobotizationEffectType { + PASS_THROUGH, + ROBOTIZATION, + WHISPERIZATION, + }; + } #endif diff --git a/src/audio_effects/Distortion.cpp b/src/audio_effects/Distortion.cpp index 8faa7d30..1266c56d 100644 --- a/src/audio_effects/Distortion.cpp +++ b/src/audio_effects/Distortion.cpp @@ -78,7 +78,7 @@ std::shared_ptr Distortion::GetFrame(std::shared_ptraudio->getNumChannels(); channel++) { //auto *inBuffer = frame->audio->getReadPointer(channel); - auto *channelData = frame->audio->getWritePointer(channel); + auto *channel_data = frame->audio->getWritePointer(channel); float out; for (auto sample = 0; sample < frame->audio->getNumSamples(); ++sample) @@ -86,7 +86,7 @@ std::shared_ptr Distortion::GetFrame(std::shared_ptr Distortion::GetFrame(std::shared_ptrprocessSingleSampleRaw(out); - channelData[sample] = filtered*powf(10.0f, output_gain_value * 0.05f); + channel_data[sample] = filtered*powf(10.0f, output_gain_value * 0.05f); } } @@ -152,11 +152,11 @@ std::shared_ptr Distortion::GetFrame(std::shared_ptrupdateCoefficients(discreteFrequency, gain); + filters[i]->updateCoefficients(discrete_frequency, gain); } // Generate JSON string of this object diff --git a/src/audio_effects/Pitch.cpp b/src/audio_effects/Pitch.cpp new file mode 100644 index 00000000..134daa91 --- /dev/null +++ b/src/audio_effects/Pitch.cpp @@ -0,0 +1,418 @@ +/** + * @file + * @brief Source file for Pitch audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#include "Pitch.h" +#include "Exceptions.h" + +using namespace openshot; + +/// Blank constructor, useful when using Json to load the effect properties +Pitch::Pitch() : shift(0), fft_size(FFT_SIZE_32), hop_size(HOP_SIZE_2), window_type(BART_LETT){ + // Init effect properties + init_effect_details(); +} + +// Default constructor +Pitch::Pitch(Keyframe new_shift, openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type) : + shift(new_shift), fft_size(new_fft_size), hop_size(new_hop_size), window_type(new_window_type) +{ + // Init effect properties + init_effect_details(); +} + +// Init effect settings +void Pitch::init_effect_details() +{ + /// Initialize the values of the EffectInfo struct. + InitEffectInfo(); + + /// Set the effect info + info.class_name = "Pitch"; + info.name = "Pitch Shift"; + info.description = "Change pitch of the frame's sound."; + info.has_audio = true; + info.has_video = false; +} + +// This method is required for all derived classes of EffectBase, and returns a +// modified openshot::Frame object +std::shared_ptr Pitch::GetFrame(std::shared_ptr frame, int64_t frame_number) +{ + const ScopedLock sl (lock); + ScopedNoDenormals noDenormals; + + updateFftSize(frame); + updateHopSize(); + updateAnalysisWindow(); + updateWindowScaleFactor(); + + // const ScopedLock sl (lock); + // ScopedNoDenormals noDenormals; + + // copy of the AudioBuffer frame->audio object (not the pointer) + // input_buffer = std::make_shared>(*frame->audio); + // output_buffer = std::make_shared>(*frame->audio); + // frame->audio; + + const int num_input_channels = frame->audio->getNumChannels(); + const int num_output_channels = frame->audio->getNumChannels(); + const int num_samples = frame->audio->getNumSamples(); + + int current_input_buffer_write_position; + int current_output_buffer_write_position; + int current_output_buffer_read_position; + int current_samples_since_last_FFT; + + float shift_value = powf(2.0f, shift.GetValue(frame_number) / 12.0f); + int hop_size_value = 1 << ((int)hop_size + 1); + int fft_size_value = 1 << ((int)fft_size + 5); + + float ratio = roundf(shift_value*(float)hop_size_value/(float)hop_size_value); + int resampled_length = floorf((float)fft_size_value/ratio); + juce::HeapBlock resampled_output(resampled_length, true); + juce::HeapBlock synthesis_window(resampled_length, true); + updateWindow(synthesis_window, resampled_length); + + for (int channel = 0; channel < num_input_channels; channel++) + { + float* channel_data = frame->audio->getWritePointer(channel); + + current_input_buffer_write_position = input_buffer_write_position; + current_output_buffer_write_position = output_buffer_write_position; + current_output_buffer_read_position = output_buffer_read_position; + current_samples_since_last_FFT = samples_since_last_FFT; + + for (int sample = 0; sample < num_samples; ++sample) + { + const float in = channel_data[sample]; + channel_data[sample] = output_buffer.getSample(channel, current_output_buffer_read_position); + + output_buffer.setSample(channel, current_output_buffer_read_position, 0.0f); + if (++current_output_buffer_read_position >= output_buffer_length) + current_output_buffer_read_position = 0; + + input_buffer.setSample(channel, current_input_buffer_write_position, in); + if (++current_input_buffer_write_position >= input_buffer_length) + current_input_buffer_write_position = 0; + + if (++current_samples_since_last_FFT >= hop_size_value) + { + current_samples_since_last_FFT = 0; + + int input_buffer_index = current_input_buffer_write_position; + + for (int index = 0; index < fft_size_value; ++index) { + fft_time_domain[index].real(sqrtf(fft_window[index]) * input_buffer.getSample(channel, input_buffer_index)); + fft_time_domain[index].imag(0.0f); + + if (++input_buffer_index >= input_buffer_length) + input_buffer_index = 0; + } + + fft->perform(fft_time_domain, fft_frequency_domain, false); + + /* + if (paramShift.isSmoothing()) + needToResetPhases = true; + if (shift == paramShift.getTargetValue() && needToResetPhases) { + inputPhase.clear(); + outputPhase.clear(); + needToResetPhases = false; + } + */ + + for (int index = 0; index < fft_size_value; ++index) { + float magnitude = abs(fft_frequency_domain[index]); + float phase = arg(fft_frequency_domain[index]); + + float phase_deviation = phase - input_phase.getSample(channel, index) - omega[index] * (float)hop_size_value; + float delta_phi = omega[index] * hop_size_value + princArg(phase_deviation); + float new_phase = princArg(output_phase.getSample(channel, index) + delta_phi * ratio); + + input_phase.setSample(channel, index, phase); + output_phase.setSample(channel, index, new_phase); + fft_frequency_domain[index] = std::polar(magnitude, new_phase); + } + + fft->perform(fft_frequency_domain, fft_time_domain, true); + + for (int index = 0; index < resampled_length; ++index) { + float x = (float)index * (float)fft_size_value / (float)resampled_length; + int ix = (int)floorf(x); + float dx = x - (float)ix; + + float sample1 = fft_time_domain[ix].real(); + float sample2 = fft_time_domain[(ix + 1) % fft_size_value].real(); + resampled_output[index] = sample1 + dx * (sample2 - sample1); + resampled_output[index] *= sqrtf(synthesis_window[index]); + } + + int output_buffer_index = current_output_buffer_write_position; + + for (int index = 0; index < resampled_length; ++index) { + float out = output_buffer.getSample(channel, output_buffer_index); + out += resampled_output[index] * window_scale_factor; + output_buffer.setSample(channel, output_buffer_index, out); + + if (++output_buffer_index >= output_buffer_length) + output_buffer_index = 0; + } + + current_output_buffer_write_position += hop_size_value; + if (current_output_buffer_write_position >= output_buffer_length) + current_output_buffer_write_position = 0; + } + + } + } + + input_buffer_write_position = current_input_buffer_write_position; + output_buffer_write_position = current_output_buffer_write_position; + current_output_buffer_read_position = current_output_buffer_read_position; + samples_since_last_FFT = current_samples_since_last_FFT; + + for (int channel = num_input_channels; channel < num_output_channels; ++channel) + frame->audio->clear(channel, 0, num_samples); + + // frame->audio = std::make_shared>(output_buffer); + + // return the modified frame + return frame; +} + +void Pitch::updateFftSize(std::shared_ptr frame) +{ + int fft_size_value = 1 << ((int)fft_size + 5); + fft = std::make_unique(log2(fft_size_value)); + + input_buffer_length = fft_size_value; + input_buffer_write_position = 0; + input_buffer.clear(); + input_buffer.setSize(frame->audio->getNumChannels(), input_buffer_length); + + float max_ratio = powf(2.0f, -12/12.0f); + + output_buffer_length = (int)floorf ((float)fft_size_value / max_ratio); + + output_buffer_write_position = 0; + output_buffer_read_position = 0; + output_buffer.clear(); + output_buffer.setSize(frame->audio->getNumChannels(), output_buffer_length); + + fft_window.realloc(fft_size_value); + fft_window.clear(fft_size_value); + + fft_time_domain.realloc(fft_size_value); + fft_time_domain.clear(fft_size_value); + + fft_frequency_domain.realloc(fft_size_value); + fft_frequency_domain.clear(fft_size_value); + + samples_since_last_FFT = 0; + + //====================================== + + omega.realloc(fft_size_value); + + for (int index = 0; index < fft_size_value; ++index) + omega[index] = 2.0f * M_PI * index / (float)fft_size_value; + + input_phase.clear(); + input_phase.setSize(frame->audio->getNumChannels(), output_buffer_length); + + output_phase.clear(); + output_phase.setSize(frame->audio->getNumChannels(), output_buffer_length); +} + + +void Pitch::updateHopSize() +{ + int hop_size_value = 1 << ((int)hop_size + 1); + int fft_size_value = 1 << ((int)fft_size + 5); + overlap = hop_size_value; + + if (overlap != 0) { + hop_size_value = fft_size_value / overlap; + // hop_size = hop_size_value; + output_buffer_write_position = hop_size_value % output_buffer_length; + } +} + + +void Pitch::updateWindowScaleFactor() +{ + int fft_size_value = 1 << ((int)fft_size + 5); + float window_sum = 0.0f; + + for (int sample = 0; sample < fft_size_value; ++sample) + window_sum += fft_window[sample]; + + window_scale_factor = 0.0f; + + if (overlap != 0 && window_sum != 0.0f) + window_scale_factor = 1.0f / (float)overlap / window_sum * (float)fft_size_value; +} + + +void Pitch::updateAnalysisWindow() +{ + int fft_size_value = 1 << ((int)fft_size + 5); + updateWindow(fft_window, fft_size_value); +} + +void Pitch::updateWindow(const juce::HeapBlock &window, const int window_length) +{ + switch ((int)window_type) { + case BART_LETT: { + for (int sample = 0; sample < window_length; ++sample) + window[sample] = 1.0f - fabs(2.0f * (float)sample / (float)(window_length - 1) - 1.0f); + break; + } + case HANN: { + for (int sample = 0; sample < window_length; ++sample) + window[sample] = 0.5f - 0.5f * cosf(2.0f * M_PI * (float)sample / (float)(window_length - 1)); + break; + } + case HAMMING: { + for (int sample = 0; sample < window_length; ++sample) + window[sample] = 0.54f - 0.46f * cosf(2.0f * M_PI * (float)sample / (float)(window_length - 1)); + break; + } + } +} + +float Pitch::princArg(const float phase) +{ + if (phase >= 0.0f) + return fmod(phase + M_PI, 2.0f * M_PI) - M_PI; + else + return fmod(phase + M_PI, -2.0f * M_PI) + M_PI; +} + +// Generate JSON string of this object +std::string Pitch::Json() const { + + // Return formatted string + return JsonValue().toStyledString(); +} + +// Generate Json::Value for this object +Json::Value Pitch::JsonValue() const { + + // Create root json object + Json::Value root = EffectBase::JsonValue(); // get parent properties + root["type"] = info.class_name; + root["shift"] = shift.JsonValue(); + root["fft_size"] = fft_size; + root["hop_size"] = hop_size; + root["window_type"] = window_type; + + // return JsonValue + return root; +} + +// Load JSON string into this object +void Pitch::SetJson(const std::string value) { + + // Parse JSON string into JSON objects + try + { + const Json::Value root = openshot::stringToJson(value); + // Set all values that match + SetJsonValue(root); + } + catch (const std::exception& e) + { + // Error parsing JSON (or missing keys) + throw InvalidJSON("JSON is invalid (missing keys or invalid data types)"); + } +} + +// Load Json::Value into this object +void Pitch::SetJsonValue(const Json::Value root) { + + // Set parent data + EffectBase::SetJsonValue(root); + + if (!root["fft_size"].isNull()) + fft_size = (FFTSize)root["fft_size"].asInt(); + + if (!root["hop_size"].isNull()) + hop_size = (HopSize)root["hop_size"].asInt(); + + if (!root["window_type"].isNull()) + window_type = (WindowType)root["window_type"].asInt(); + + // Set data from Json (if key is found) + if (!root["shift"].isNull()) + shift.SetJsonValue(root["shift"]); +} + +// Get all properties for a specific frame +std::string Pitch::PropertiesJSON(int64_t requested_frame) const { + + // Generate JSON properties list + Json::Value root; + root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame); + root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame); + root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame); + + // Keyframes + root["shift"] = add_property_json("Shift", shift.GetValue(requested_frame), "float", "", &shift, -12, 12, false, requested_frame); + root["fft_size"] = add_property_json("FFT Size", fft_size, "int", "", NULL, 0, 8, false, requested_frame); + root["hop_size"] = add_property_json("Hop Size", hop_size, "int", "", NULL, 0, 2, false, requested_frame); + root["window_type"] = add_property_json("Window Type", window_type, "int", "", NULL, 0, 2, false, requested_frame); + + // Add fft_size choices (dropdown style) + root["fft_size"]["choices"].append(add_property_choice_json("32", FFT_SIZE_32, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("64", FFT_SIZE_64, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("128", FFT_SIZE_128, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("256", FFT_SIZE_256, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("512", FFT_SIZE_512, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("1024", FFT_SIZE_1024, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("2048", FFT_SIZE_2048, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("4096", FFT_SIZE_4096, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("8192", FFT_SIZE_8192, fft_size)); + + // Add hop_size choices (dropdown style) + root["hop_size"]["choices"].append(add_property_choice_json("2", HOP_SIZE_2, hop_size)); + root["hop_size"]["choices"].append(add_property_choice_json("4", HOP_SIZE_4, hop_size)); + root["hop_size"]["choices"].append(add_property_choice_json("8", HOP_SIZE_8, hop_size)); + + // Add window_type choices (dropdown style) + root["window_type"]["choices"].append(add_property_choice_json("Bart Lett", BART_LETT, window_type)); + root["window_type"]["choices"].append(add_property_choice_json("Hann", HANN, window_type)); + root["window_type"]["choices"].append(add_property_choice_json("Hamming", HAMMING, window_type)); + + // Return formatted string + return root.toStyledString(); +} diff --git a/src/audio_effects/Pitch.h b/src/audio_effects/Pitch.h new file mode 100644 index 00000000..9b2ff234 --- /dev/null +++ b/src/audio_effects/Pitch.h @@ -0,0 +1,145 @@ +/** + * @file + * @brief Header file for Pitch audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_PITCH_AUDIO_EFFECT_H +#define OPENSHOT_PITCH_AUDIO_EFFECT_H +#define _USE_MATH_DEFINES + +#include "../EffectBase.h" + +#include "../Frame.h" +#include "../Json.h" +#include "../KeyFrame.h" +#include "../Enums.h" + +#include +#include +#include +#include + + +namespace openshot +{ + + /** + * @brief This class adds a pitch into the audio + * + */ + class Pitch : public EffectBase + { + private: + /// Init effect settings + void init_effect_details(); + + public: + Keyframe shift; ///< Pitch shift keyframe. The pitch shift inserted on the audio. + openshot::FFTSize fft_size; + openshot::HopSize hop_size; + openshot::WindowType window_type; + + /// Blank constructor, useful when using Json to load the effect properties + Pitch(); + + /// Default constructor + /// + /// @param new_level The audio default pitch level (between 1 and 100) + Pitch(Keyframe new_shift, openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type); + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { + return GetFrame(std::make_shared(), frame_number); + } + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// modified openshot::Frame object + /// + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. + /// + /// @returns The modified openshot::Frame object + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + + // Get and Set JSON methods + std::string Json() const override; ///< Generate JSON string of this object + void SetJson(const std::string value) override; ///< Load JSON string into this object + Json::Value JsonValue() const override; ///< Generate Json::Value for this object + void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object + + /// Get all properties for a specific frame (perfect for a UI to display the current state + /// of all properties at any time) + std::string PropertiesJSON(int64_t requested_frame) const override; + + + void updateFftSize(std::shared_ptr frame); + void updateHopSize(); + void updateAnalysisWindow(); + void updateWindow(const juce::HeapBlock& window, const int window_length); + void updateWindowScaleFactor(); + float princArg(const float phase); + + + juce::CriticalSection lock; + std::unique_ptr fft; + + int input_buffer_length; + int input_buffer_write_position; + juce::AudioSampleBuffer input_buffer; + + int output_buffer_length; + int output_buffer_write_position; + int output_buffer_read_position; + juce::AudioSampleBuffer output_buffer; + + juce::HeapBlock fft_window; + juce::HeapBlock> fft_time_domain; + juce::HeapBlock> fft_frequency_domain; + + int samples_since_last_FFT; + + int overlap; + int hopSize; + float window_scale_factor; + + juce::HeapBlock omega; + juce::AudioSampleBuffer input_phase; + juce::AudioSampleBuffer output_phase; + bool need_to_reset_phases; + }; + +} + +#endif diff --git a/src/audio_effects/Robotization.cpp b/src/audio_effects/Robotization.cpp new file mode 100644 index 00000000..f98ed2a6 --- /dev/null +++ b/src/audio_effects/Robotization.cpp @@ -0,0 +1,208 @@ +/** + * @file + * @brief Source file for Robotization audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#include "Robotization.h" +#include "Exceptions.h" + +using namespace openshot; + +/// Blank constructor, useful when using Json to load the effect properties +Robotization::Robotization() : fft_size(FFT_SIZE_2048), hop_size(HOP_SIZE_8), window_type(RECTANGULAR), effect_type(ROBOTIZATION), stft(*this) { + // Init effect properties + init_effect_details(); +} + +// Default constructor +Robotization::Robotization(openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type, openshot::RobotizationEffectType new_effect_type) : + fft_size(new_fft_size), hop_size(new_hop_size), window_type(new_window_type), effect_type(new_effect_type), stft(*this) +{ + // Init effect properties + init_effect_details(); +} + +// Init effect settings +void Robotization::init_effect_details() +{ + /// Initialize the values of the EffectInfo struct. + InitEffectInfo(); + // stft = RobotizationWhisperizationEffect(*this); + + /// Set the effect info + info.class_name = "Robotization"; + info.name = "Robotization"; + info.description = "Robotization effect on the frame's sound."; + info.has_audio = true; + info.has_video = false; +} + +// This method is required for all derived classes of EffectBase, and returns a +// modified openshot::Frame object +std::shared_ptr Robotization::GetFrame(std::shared_ptr frame, int64_t frame_number) +{ + const ScopedLock sl (lock); + ScopedNoDenormals noDenormals; + + // const ScopedLock sl (lock); + // ScopedNoDenormals noDenormals; + + // copy of the AudioBuffer frame->audio object (not the pointer) + // input_buffer = std::make_shared>(*frame->audio); + // output_buffer = std::make_shared>(*frame->audio); + // frame->audio; + + const int num_input_channels = frame->audio->getNumChannels(); + const int num_output_channels = frame->audio->getNumChannels(); + const int num_samples = frame->audio->getNumSamples(); + const int hop_size_value = 1 << ((int)hop_size + 1); + const int fft_size_value = 1 << ((int)fft_size + 5); + + stft.setup(num_input_channels); + stft.updateParameters((int)fft_size_value, + (int)hop_size_value, + (int)window_type); + + stft.process(*frame->audio); + + for (int channel = num_input_channels; channel < num_output_channels; ++channel) + frame->audio->clear(channel, 0, num_samples); + + // return the modified frame + return frame; +} + +// Generate JSON string of this object +std::string Robotization::Json() const { + + // Return formatted string + return JsonValue().toStyledString(); +} + +// Generate Json::Value for this object +Json::Value Robotization::JsonValue() const { + + // Create root json object + Json::Value root = EffectBase::JsonValue(); // get parent properties + root["type"] = info.class_name; + root["fft_size"] = fft_size; + root["hop_size"] = hop_size; + root["window_type"] = window_type; + root["effect_type"] = effect_type; + + // return JsonValue + return root; +} + +// Load JSON string into this object +void Robotization::SetJson(const std::string value) { + + // Parse JSON string into JSON objects + try + { + const Json::Value root = openshot::stringToJson(value); + // Set all values that match + SetJsonValue(root); + } + catch (const std::exception& e) + { + // Error parsing JSON (or missing keys) + throw InvalidJSON("JSON is invalid (missing keys or invalid data types)"); + } +} + +// Load Json::Value into this object +void Robotization::SetJsonValue(const Json::Value root) { + + // Set parent data + EffectBase::SetJsonValue(root); + + if (!root["fft_size"].isNull()) + fft_size = (FFTSize)root["fft_size"].asInt(); + + if (!root["hop_size"].isNull()) + hop_size = (HopSize)root["hop_size"].asInt(); + + if (!root["window_type"].isNull()) + window_type = (WindowType)root["window_type"].asInt(); + + if (!root["effect_type"].isNull()) + effect_type = (RobotizationEffectType)root["effect_type"].asInt(); + + // Set data from Json (if key is found) + // if (!root["shift"].isNull()) + // shift.SetJsonValue(root["shift"]); +} + +// Get all properties for a specific frame +std::string Robotization::PropertiesJSON(int64_t requested_frame) const { + + // Generate JSON properties list + Json::Value root; + root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame); + root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame); + root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame); + + // Keyframes + root["fft_size"] = add_property_json("FFT Size", fft_size, "int", "", NULL, 0, 8, false, requested_frame); + root["hop_size"] = add_property_json("Hop Size", hop_size, "int", "", NULL, 0, 2, false, requested_frame); + root["window_type"] = add_property_json("Window Type", window_type, "int", "", NULL, 0, 3, false, requested_frame); + root["effect_type"] = add_property_json("Effect Type", effect_type, "int", "", NULL, 0, 2, false, requested_frame); + + // Add fft_size choices (dropdown style) + root["fft_size"]["choices"].append(add_property_choice_json("32", FFT_SIZE_32, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("64", FFT_SIZE_64, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("128", FFT_SIZE_128, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("256", FFT_SIZE_256, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("512", FFT_SIZE_512, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("1024", FFT_SIZE_1024, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("2048", FFT_SIZE_2048, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("4096", FFT_SIZE_4096, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("8192", FFT_SIZE_8192, fft_size)); + + // Add hop_size choices (dropdown style) + root["hop_size"]["choices"].append(add_property_choice_json("2", HOP_SIZE_2, hop_size)); + root["hop_size"]["choices"].append(add_property_choice_json("4", HOP_SIZE_4, hop_size)); + root["hop_size"]["choices"].append(add_property_choice_json("8", HOP_SIZE_8, hop_size)); + + // Add window_type choices (dropdown style) + root["window_type"]["choices"].append(add_property_choice_json("Rectangular", RECTANGULAR, window_type)); + root["window_type"]["choices"].append(add_property_choice_json("Bart Lett", BART_LETT, window_type)); + root["window_type"]["choices"].append(add_property_choice_json("Hann", HANN, window_type)); + root["window_type"]["choices"].append(add_property_choice_json("Hamming", HAMMING, window_type)); + + // Add effect_type choices (dropdown style) + root["effect_type"]["choices"].append(add_property_choice_json("Pass Through", PASS_THROUGH, effect_type)); + root["effect_type"]["choices"].append(add_property_choice_json("Robotization", ROBOTIZATION, effect_type)); + root["effect_type"]["choices"].append(add_property_choice_json("Whisperization", WHISPERIZATION, effect_type)); + + // Return formatted string + return root.toStyledString(); +} diff --git a/src/audio_effects/Robotization.h b/src/audio_effects/Robotization.h new file mode 100644 index 00000000..aa6cfdbf --- /dev/null +++ b/src/audio_effects/Robotization.h @@ -0,0 +1,161 @@ +/** + * @file + * @brief Header file for Robotization audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_ROBOTIZATION_AUDIO_EFFECT_H +#define OPENSHOT_ROBOTIZATION_AUDIO_EFFECT_H +#define _USE_MATH_DEFINES + +#include "../EffectBase.h" + +#include "../Frame.h" +#include "../Json.h" +#include "../KeyFrame.h" +#include "../Enums.h" +#include "STFT.h" + +#include +#include +#include +#include + + +namespace openshot +{ + + /** + * @brief This class adds a robotization effect into the audio + * + */ + class Robotization : public EffectBase + { + private: + /// Init effect settings + void init_effect_details(); + + public: + // Keyframe shift; ///< Robotization shift keyframe. The Robotization shift inserted on the audio. + openshot::FFTSize fft_size; + openshot::HopSize hop_size; + openshot::WindowType window_type; + openshot::RobotizationEffectType effect_type; + + /// Blank constructor, useful when using Json to load the effect properties + Robotization(); + + /// Default constructor + /// + /// @param new_level The audio default Robotization level (between 1 and 100) + Robotization(openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type, openshot::RobotizationEffectType new_effect_type); + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { + return GetFrame(std::make_shared(), frame_number); + } + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// modified openshot::Frame object + /// + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. + /// + /// @returns The modified openshot::Frame object + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + + // Get and Set JSON methods + std::string Json() const override; ///< Generate JSON string of this object + void SetJson(const std::string value) override; ///< Load JSON string into this object + Json::Value JsonValue() const override; ///< Generate Json::Value for this object + void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object + + /// Get all properties for a specific frame (perfect for a UI to display the current state + /// of all properties at any time) + std::string PropertiesJSON(int64_t requested_frame) const override; + + + class RobotizationWhisperizationEffect : public STFT + { + public: + RobotizationWhisperizationEffect (Robotization& p) : parent (p) { } + + private: + void modification() override + { + fft->perform(time_domain_buffer, frequency_domain_buffer, false); + + switch ((int)parent.effect_type) { + case PASS_THROUGH: { + // nothing + break; + } + case ROBOTIZATION: { + for (int index = 0; index < fft_size; ++index) { + float magnitude = abs (frequency_domain_buffer[index]); + frequency_domain_buffer[index].real (magnitude); + frequency_domain_buffer[index].imag (0.0f); + } + break; + } + case WHISPERIZATION: { + for (int index = 0; index < fft_size / 2 + 1; ++index) { + float magnitude = abs (frequency_domain_buffer[index]); + float phase = 2.0f * M_PI * (float)rand() / (float)RAND_MAX; + + frequency_domain_buffer[index].real (magnitude * cosf (phase)); + frequency_domain_buffer[index].imag (magnitude * sinf (phase)); + if (index > 0 && index < fft_size / 2) { + frequency_domain_buffer[fft_size - index].real (magnitude * cosf (phase)); + frequency_domain_buffer[fft_size - index].imag (magnitude * sinf (-phase)); + } + } + break; + } + } + + fft->perform (frequency_domain_buffer, time_domain_buffer, true); + } + + Robotization& parent; + }; + + juce::CriticalSection lock; + RobotizationWhisperizationEffect stft; + std::unique_ptr fft; + }; + +} + +#endif diff --git a/src/audio_effects/STFT.cpp b/src/audio_effects/STFT.cpp new file mode 100644 index 00000000..aff2729c --- /dev/null +++ b/src/audio_effects/STFT.cpp @@ -0,0 +1,175 @@ +#include "STFT.h" + +using namespace openshot; + +void STFT::setup(const int num_input_channels) +{ + num_channels = (num_input_channels > 0) ? num_input_channels : 1; +} + +void STFT::updateParameters(const int new_fft_size, const int new_overlap, const int new_window_type) +{ + updateFftSize(new_fft_size); + updateHopSize(new_overlap); + updateWindow(new_window_type); +} + +void STFT::process(juce::AudioSampleBuffer &block) +{ + num_samples = block.getNumSamples(); + + for (int channel = 0; channel < num_channels; ++channel) { + float* channelData = block.getWritePointer (channel); + + current_input_buffer_write_position = input_buffer_write_position; + current_output_buffer_write_position = output_buffer_write_position; + current_output_buffer_read_position = output_buffer_read_position; + current_samples_since_last_FFT = samples_since_last_FFT; + + for (int sample = 0; sample < num_samples; ++sample) { + const float inputSample = channelData[sample]; + input_buffer.setSample (channel, current_input_buffer_write_position, inputSample); + if (++current_input_buffer_write_position >= input_buffer_length) + current_input_buffer_write_position = 0; + + channelData[sample] = output_buffer.getSample (channel, current_output_buffer_read_position); + + output_buffer.setSample (channel, current_output_buffer_read_position, 0.0f); + if (++current_output_buffer_read_position >= output_buffer_length) + current_output_buffer_read_position = 0; + + if (++current_samples_since_last_FFT >= hop_size) { + current_samples_since_last_FFT = 0; + analysis (channel); + modification(); + synthesis (channel); + } + } + } + + input_buffer_write_position = current_input_buffer_write_position; + output_buffer_write_position = current_output_buffer_write_position; + output_buffer_read_position = current_output_buffer_read_position; + samples_since_last_FFT = current_samples_since_last_FFT; +} + +void STFT::updateFftSize(const int new_fft_size) +{ + fft_size = new_fft_size; + fft = std::make_unique(log2 (fft_size)); + + input_buffer_length = fft_size; + input_buffer.clear(); + input_buffer.setSize(num_channels, input_buffer_length); + + output_buffer_length = fft_size; + output_buffer.clear(); + output_buffer.setSize(num_channels, output_buffer_length); + + fft_window.realloc(fft_size); + fft_window.clear(fft_size); + + time_domain_buffer.realloc(fft_size); + time_domain_buffer.clear(fft_size); + + frequency_domain_buffer.realloc(fft_size); + frequency_domain_buffer.clear(fft_size); + + input_buffer_write_position = 0; + output_buffer_write_position = 0; + output_buffer_read_position = 0; + samples_since_last_FFT = 0; +} + +void STFT::updateHopSize(const int new_overlap) +{ + overlap = new_overlap; + + if (overlap != 0) { + hop_size = fft_size / overlap; + output_buffer_write_position = hop_size % output_buffer_length; + } +} + +void STFT::updateWindow(const int new_window_type) +{ + switch (new_window_type) { + case RECTANGULAR: { + for (int sample = 0; sample < fft_size; ++sample) + fft_window[sample] = 1.0f; + break; + } + case BART_LETT: { + for (int sample = 0; sample < fft_size; ++sample) + fft_window[sample] = 1.0f - fabs (2.0f * (float)sample / (float)(fft_size - 1) - 1.0f); + break; + } + case HANN: { + for (int sample = 0; sample < fft_size; ++sample) + fft_window[sample] = 0.5f - 0.5f * cosf (2.0f * M_PI * (float)sample / (float)(fft_size - 1)); + break; + } + case HAMMING: { + for (int sample = 0; sample < fft_size; ++sample) + fft_window[sample] = 0.54f - 0.46f * cosf (2.0f * M_PI * (float)sample / (float)(fft_size - 1)); + break; + } + } + + float window_sum = 0.0f; + for (int sample = 0; sample < fft_size; ++sample) + window_sum += fft_window[sample]; + + window_scale_factor = 0.0f; + if (overlap != 0 && window_sum != 0.0f) + window_scale_factor = 1.0f / (float)overlap / window_sum * (float)fft_size; +} + +void STFT::analysis(const int channel) +{ + int input_buffer_index = current_input_buffer_write_position; + for (int index = 0; index < fft_size; ++index) { + time_domain_buffer[index].real(fft_window[index] * input_buffer.getSample (channel, input_buffer_index)); + time_domain_buffer[index].imag(0.0f); + + if (++input_buffer_index >= input_buffer_length) + input_buffer_index = 0; + } +} + +void STFT::modification() +{ + fft->perform(time_domain_buffer, frequency_domain_buffer, false); + + for (int index = 0; index < fft_size / 2 + 1; ++index) { + float magnitude = abs(frequency_domain_buffer[index]); + float phase = arg(frequency_domain_buffer[index]); + + frequency_domain_buffer[index].real(magnitude * cosf (phase)); + frequency_domain_buffer[index].imag(magnitude * sinf (phase)); + + if (index > 0 && index < fft_size / 2) { + frequency_domain_buffer[fft_size - index].real(magnitude * cosf (phase)); + frequency_domain_buffer[fft_size - index].imag(magnitude * sinf (-phase)); + } + } + + fft->perform(frequency_domain_buffer, time_domain_buffer, true); +} + +void STFT::synthesis(const int channel) +{ + int output_buffer_index = current_output_buffer_write_position; + for (int index = 0; index < fft_size; ++index) { + float output_sample = output_buffer.getSample (channel, output_buffer_index); + output_sample += time_domain_buffer[index].real() * window_scale_factor; + output_buffer.setSample (channel, output_buffer_index, output_sample); + + if (++output_buffer_index >= output_buffer_length) + output_buffer_index = 0; + } + + current_output_buffer_write_position += hop_size; + if (current_output_buffer_write_position >= output_buffer_length) + current_output_buffer_write_position = 0; +} \ No newline at end of file diff --git a/src/audio_effects/STFT.h b/src/audio_effects/STFT.h new file mode 100644 index 00000000..b27439c0 --- /dev/null +++ b/src/audio_effects/STFT.h @@ -0,0 +1,72 @@ +#pragma once + +#ifndef OPENSHOT_STFT_AUDIO_EFFECT_H +#define OPENSHOT_STFT_AUDIO_EFFECT_H +#define _USE_MATH_DEFINES + +#include "../EffectBase.h" +#include "../Enums.h" + +namespace openshot +{ + + class STFT + { + public: + STFT() : num_channels (1) { } + + virtual ~STFT() { } + + void setup(const int num_input_channels); + + void updateParameters(const int new_fft_size, const int new_overlap, const int new_window_type); + + void process(juce::AudioSampleBuffer &block); + + private: + void updateFftSize(const int new_fft_size); + + void updateHopSize(const int new_overlap); + + void updateWindow(const int new_window_type); + + void analysis(const int channel); + + virtual void modification(); + + void synthesis(const int channel); + + protected: + int num_channels; + int num_samples; + + int fft_size; + std::unique_ptr fft; + + int input_buffer_length; + juce::AudioSampleBuffer input_buffer; + + int output_buffer_length; + juce::AudioSampleBuffer output_buffer; + + juce::HeapBlock fft_window; + juce::HeapBlock> time_domain_buffer; + juce::HeapBlock> frequency_domain_buffer; + + int overlap; + int hop_size; + float window_scale_factor; + + int input_buffer_write_position; + int output_buffer_write_position; + int output_buffer_read_position; + int samples_since_last_FFT; + + int current_input_buffer_write_position; + int current_output_buffer_write_position; + int current_output_buffer_read_position; + int current_samples_since_last_FFT; + }; +} + +#endif From aca031454dcb83ea13c980c26f30e31d70e164d0 Mon Sep 17 00:00:00 2001 From: Brenno Date: Sat, 10 Jul 2021 19:44:54 -0300 Subject: [PATCH 32/71] Fixed weird noises in the audio robotization effect --- src/audio_effects/Robotization.cpp | 22 ++------- src/audio_effects/Robotization.h | 16 +++---- src/audio_effects/STFT.cpp | 74 +++++++++++++++++------------- 3 files changed, 54 insertions(+), 58 deletions(-) diff --git a/src/audio_effects/Robotization.cpp b/src/audio_effects/Robotization.cpp index f98ed2a6..8e087814 100644 --- a/src/audio_effects/Robotization.cpp +++ b/src/audio_effects/Robotization.cpp @@ -34,7 +34,7 @@ using namespace openshot; /// Blank constructor, useful when using Json to load the effect properties -Robotization::Robotization() : fft_size(FFT_SIZE_2048), hop_size(HOP_SIZE_8), window_type(RECTANGULAR), effect_type(ROBOTIZATION), stft(*this) { +Robotization::Robotization() : fft_size(FFT_SIZE_512), hop_size(HOP_SIZE_2), window_type(RECTANGULAR), effect_type(ROBOTIZATION), stft(*this) { // Init effect properties init_effect_details(); } @@ -52,7 +52,6 @@ void Robotization::init_effect_details() { /// Initialize the values of the EffectInfo struct. InitEffectInfo(); - // stft = RobotizationWhisperizationEffect(*this); /// Set the effect info info.class_name = "Robotization"; @@ -69,30 +68,19 @@ std::shared_ptr Robotization::GetFrame(std::shared_ptraudio object (not the pointer) - // input_buffer = std::make_shared>(*frame->audio); - // output_buffer = std::make_shared>(*frame->audio); - // frame->audio; - const int num_input_channels = frame->audio->getNumChannels(); const int num_output_channels = frame->audio->getNumChannels(); const int num_samples = frame->audio->getNumSamples(); const int hop_size_value = 1 << ((int)hop_size + 1); const int fft_size_value = 1 << ((int)fft_size + 5); - stft.setup(num_input_channels); + stft.setup(num_output_channels); stft.updateParameters((int)fft_size_value, (int)hop_size_value, (int)window_type); stft.process(*frame->audio); - for (int channel = num_input_channels; channel < num_output_channels; ++channel) - frame->audio->clear(channel, 0, num_samples); - // return the modified frame return frame; } @@ -188,9 +176,9 @@ std::string Robotization::PropertiesJSON(int64_t requested_frame) const { root["fft_size"]["choices"].append(add_property_choice_json("8192", FFT_SIZE_8192, fft_size)); // Add hop_size choices (dropdown style) - root["hop_size"]["choices"].append(add_property_choice_json("2", HOP_SIZE_2, hop_size)); - root["hop_size"]["choices"].append(add_property_choice_json("4", HOP_SIZE_4, hop_size)); - root["hop_size"]["choices"].append(add_property_choice_json("8", HOP_SIZE_8, hop_size)); + root["hop_size"]["choices"].append(add_property_choice_json("1/2", HOP_SIZE_2, hop_size)); + root["hop_size"]["choices"].append(add_property_choice_json("1/4", HOP_SIZE_4, hop_size)); + root["hop_size"]["choices"].append(add_property_choice_json("1/8", HOP_SIZE_8, hop_size)); // Add window_type choices (dropdown style) root["window_type"]["choices"].append(add_property_choice_json("Rectangular", RECTANGULAR, window_type)); diff --git a/src/audio_effects/Robotization.h b/src/audio_effects/Robotization.h index aa6cfdbf..db4d35b0 100644 --- a/src/audio_effects/Robotization.h +++ b/src/audio_effects/Robotization.h @@ -123,19 +123,19 @@ namespace openshot } case ROBOTIZATION: { for (int index = 0; index < fft_size; ++index) { - float magnitude = abs (frequency_domain_buffer[index]); - frequency_domain_buffer[index].real (magnitude); - frequency_domain_buffer[index].imag (0.0f); + float magnitude = abs(frequency_domain_buffer[index]); + frequency_domain_buffer[index].real(magnitude); + frequency_domain_buffer[index].imag(0.0f); } break; } case WHISPERIZATION: { for (int index = 0; index < fft_size / 2 + 1; ++index) { - float magnitude = abs (frequency_domain_buffer[index]); + float magnitude = abs(frequency_domain_buffer[index]); float phase = 2.0f * M_PI * (float)rand() / (float)RAND_MAX; - frequency_domain_buffer[index].real (magnitude * cosf (phase)); - frequency_domain_buffer[index].imag (magnitude * sinf (phase)); + frequency_domain_buffer[index].real(magnitude * cosf (phase)); + frequency_domain_buffer[index].imag(magnitude * sinf (phase)); if (index > 0 && index < fft_size / 2) { frequency_domain_buffer[fft_size - index].real (magnitude * cosf (phase)); frequency_domain_buffer[fft_size - index].imag (magnitude * sinf (-phase)); @@ -145,10 +145,10 @@ namespace openshot } } - fft->perform (frequency_domain_buffer, time_domain_buffer, true); + fft->perform(frequency_domain_buffer, time_domain_buffer, true); } - Robotization& parent; + Robotization &parent; }; juce::CriticalSection lock; diff --git a/src/audio_effects/STFT.cpp b/src/audio_effects/STFT.cpp index aff2729c..26e35907 100644 --- a/src/audio_effects/STFT.cpp +++ b/src/audio_effects/STFT.cpp @@ -19,7 +19,7 @@ void STFT::process(juce::AudioSampleBuffer &block) num_samples = block.getNumSamples(); for (int channel = 0; channel < num_channels; ++channel) { - float* channelData = block.getWritePointer (channel); + float *channel_data = block.getWritePointer(channel); current_input_buffer_write_position = input_buffer_write_position; current_output_buffer_write_position = output_buffer_write_position; @@ -27,22 +27,23 @@ void STFT::process(juce::AudioSampleBuffer &block) current_samples_since_last_FFT = samples_since_last_FFT; for (int sample = 0; sample < num_samples; ++sample) { - const float inputSample = channelData[sample]; - input_buffer.setSample (channel, current_input_buffer_write_position, inputSample); + const float input_sample = channel_data[sample]; + + input_buffer.setSample(channel, current_input_buffer_write_position, input_sample); if (++current_input_buffer_write_position >= input_buffer_length) current_input_buffer_write_position = 0; - channelData[sample] = output_buffer.getSample (channel, current_output_buffer_read_position); + channel_data[sample] = output_buffer.getSample(channel, current_output_buffer_read_position); - output_buffer.setSample (channel, current_output_buffer_read_position, 0.0f); + output_buffer.setSample(channel, current_output_buffer_read_position, 0.0f); if (++current_output_buffer_read_position >= output_buffer_length) current_output_buffer_read_position = 0; if (++current_samples_since_last_FFT >= hop_size) { current_samples_since_last_FFT = 0; - analysis (channel); + analysis(channel); modification(); - synthesis (channel); + synthesis(channel); } } } @@ -53,41 +54,48 @@ void STFT::process(juce::AudioSampleBuffer &block) samples_since_last_FFT = current_samples_since_last_FFT; } + void STFT::updateFftSize(const int new_fft_size) { - fft_size = new_fft_size; - fft = std::make_unique(log2 (fft_size)); + if (new_fft_size != fft_size) + { + fft_size = new_fft_size; + fft = std::make_unique(log2(fft_size)); - input_buffer_length = fft_size; - input_buffer.clear(); - input_buffer.setSize(num_channels, input_buffer_length); + input_buffer_length = fft_size; + input_buffer.clear(); + input_buffer.setSize(num_channels, input_buffer_length); - output_buffer_length = fft_size; - output_buffer.clear(); - output_buffer.setSize(num_channels, output_buffer_length); + output_buffer_length = fft_size; + output_buffer.clear(); + output_buffer.setSize(num_channels, output_buffer_length); - fft_window.realloc(fft_size); - fft_window.clear(fft_size); + fft_window.realloc(fft_size); + fft_window.clear(fft_size); - time_domain_buffer.realloc(fft_size); - time_domain_buffer.clear(fft_size); + time_domain_buffer.realloc(fft_size); + time_domain_buffer.clear(fft_size); - frequency_domain_buffer.realloc(fft_size); - frequency_domain_buffer.clear(fft_size); - - input_buffer_write_position = 0; - output_buffer_write_position = 0; - output_buffer_read_position = 0; - samples_since_last_FFT = 0; + frequency_domain_buffer.realloc(fft_size); + frequency_domain_buffer.clear(fft_size); + + input_buffer_write_position = 0; + output_buffer_write_position = 0; + output_buffer_read_position = 0; + samples_since_last_FFT = 0; + } } void STFT::updateHopSize(const int new_overlap) { - overlap = new_overlap; + if (new_overlap != overlap) + { + overlap = new_overlap; - if (overlap != 0) { - hop_size = fft_size / overlap; - output_buffer_write_position = hop_size % output_buffer_length; + if (overlap != 0) { + hop_size = fft_size / overlap; + output_buffer_write_position = hop_size % output_buffer_length; + } } } @@ -129,7 +137,7 @@ void STFT::analysis(const int channel) { int input_buffer_index = current_input_buffer_write_position; for (int index = 0; index < fft_size; ++index) { - time_domain_buffer[index].real(fft_window[index] * input_buffer.getSample (channel, input_buffer_index)); + time_domain_buffer[index].real(fft_window[index] * input_buffer.getSample(channel, input_buffer_index)); time_domain_buffer[index].imag(0.0f); if (++input_buffer_index >= input_buffer_length) @@ -161,9 +169,9 @@ void STFT::synthesis(const int channel) { int output_buffer_index = current_output_buffer_write_position; for (int index = 0; index < fft_size; ++index) { - float output_sample = output_buffer.getSample (channel, output_buffer_index); + float output_sample = output_buffer.getSample(channel, output_buffer_index); output_sample += time_domain_buffer[index].real() * window_scale_factor; - output_buffer.setSample (channel, output_buffer_index, output_sample); + output_buffer.setSample(channel, output_buffer_index, output_sample); if (++output_buffer_index >= output_buffer_length) output_buffer_index = 0; From 995e3f0a3a757d691c412f34c6d5822845b8d952 Mon Sep 17 00:00:00 2001 From: Brenno Date: Sat, 10 Jul 2021 19:49:10 -0300 Subject: [PATCH 33/71] Spliting secundary effects from Robotization and Compressor --- src/CMakeLists.txt | 5 +- src/EffectInfo.cpp | 10 +- src/Effects.h | 3 +- src/Enums.h | 17 +- src/audio_effects/Compressor.cpp | 72 +-- src/audio_effects/Compressor.h | 3 +- src/audio_effects/Expander.cpp | 228 ++++++++++ src/audio_effects/Expander.h | 125 ++++++ src/audio_effects/Pitch.cpp | 418 ------------------ src/audio_effects/Robotization.cpp | 40 +- src/audio_effects/Robotization.h | 45 +- src/audio_effects/STFT.cpp | 15 +- src/audio_effects/STFT.h | 20 +- src/audio_effects/Whisperization.cpp | 200 +++++++++ .../{Pitch.h => Whisperization.h} | 58 +-- 15 files changed, 649 insertions(+), 610 deletions(-) create mode 100644 src/audio_effects/Expander.cpp create mode 100644 src/audio_effects/Expander.h delete mode 100644 src/audio_effects/Pitch.cpp create mode 100644 src/audio_effects/Whisperization.cpp rename src/audio_effects/{Pitch.h => Whisperization.h} (70%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 1a5b2e62..f9a7df34 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -136,8 +136,9 @@ set(EFFECTS_SOURCES audio_effects/Distortion.cpp audio_effects/ParametricEQ.cpp audio_effects/Compressor.cpp - audio_effects/Pitch.cpp - audio_effects/Robotization.cpp) + audio_effects/Expander.cpp + audio_effects/Robotization.cpp + audio_effects/Whisperization.cpp) # Qt video player components set(QT_PLAYER_SOURCES diff --git a/src/EffectInfo.cpp b/src/EffectInfo.cpp index 92410871..54f73bad 100644 --- a/src/EffectInfo.cpp +++ b/src/EffectInfo.cpp @@ -100,12 +100,15 @@ EffectBase* EffectInfo::CreateEffect(std::string effect_type) { else if(effect_type == "Compressor") return new Compressor(); - else if(effect_type == "Pitch") - return new Pitch(); + else if(effect_type == "Expander") + return new Expander(); else if(effect_type == "Robotization") return new Robotization(); + else if(effect_type == "Whisperization") + return new Whisperization(); + #ifdef USE_OPENCV else if(effect_type == "Stabilizer") return new Stabilizer(); @@ -147,8 +150,9 @@ Json::Value EffectInfo::JsonValue() { root.append(Distortion().JsonInfo()); root.append(ParametricEQ().JsonInfo()); root.append(Compressor().JsonInfo()); - root.append(Pitch().JsonInfo()); + root.append(Expander().JsonInfo()); root.append(Robotization().JsonInfo()); + root.append(Whisperization().JsonInfo()); #ifdef USE_OPENCV root.append(Stabilizer().JsonInfo()); diff --git a/src/Effects.h b/src/Effects.h index 9fec4669..81317b41 100644 --- a/src/Effects.h +++ b/src/Effects.h @@ -53,8 +53,9 @@ #include "audio_effects/Distortion.h" #include "audio_effects/ParametricEQ.h" #include "audio_effects/Compressor.h" -#include "audio_effects/Pitch.h" +#include "audio_effects/Expander.h" #include "audio_effects/Robotization.h" +#include "audio_effects/Whisperization.h" /* OpenCV Effects */ #ifdef USE_OPENCV diff --git a/src/Enums.h b/src/Enums.h index 764df9a2..601b2de9 100644 --- a/src/Enums.h +++ b/src/Enums.h @@ -104,15 +104,6 @@ namespace openshot PEAKING_NOTCH, }; - /// This enumeration determines the compressor mode of compressor Effect. - enum CompressorMode - { - COMPRESSOR, - LIMITER, - EXPANDER, - NOISE_GATE, - }; - /// This enumeration determines the FFT size. enum FFTSize { @@ -141,12 +132,6 @@ namespace openshot HANN, HAMMING, }; - - enum RobotizationEffectType { - PASS_THROUGH, - ROBOTIZATION, - WHISPERIZATION, - }; - + } #endif diff --git a/src/audio_effects/Compressor.cpp b/src/audio_effects/Compressor.cpp index d9d049c7..7ba5c8c8 100644 --- a/src/audio_effects/Compressor.cpp +++ b/src/audio_effects/Compressor.cpp @@ -34,14 +34,14 @@ using namespace openshot; /// Blank constructor, useful when using Json to load the effect properties -Compressor::Compressor() : mode(COMPRESSOR), threshold(1), ratio(1), attack(1), release(1), makeup_gain(1), bypass(false) { +Compressor::Compressor() : threshold(-10), ratio(1), attack(1), release(1), makeup_gain(1), bypass(false) { // Init effect properties init_effect_details(); } // Default constructor -Compressor::Compressor(openshot::CompressorMode new_mode, Keyframe new_threshold, Keyframe new_ratio, Keyframe new_attack, Keyframe new_release, Keyframe new_makeup_gain, Keyframe new_bypass) : - mode(new_mode), threshold(new_threshold), ratio(new_ratio), attack(new_attack), release(new_release), makeup_gain(new_makeup_gain), bypass(new_bypass) +Compressor::Compressor(Keyframe new_threshold, Keyframe new_ratio, Keyframe new_attack, Keyframe new_release, Keyframe new_makeup_gain, Keyframe new_bypass) : + threshold(new_threshold), ratio(new_ratio), attack(new_attack), release(new_release), makeup_gain(new_makeup_gain), bypass(new_bypass) { // Init effect properties init_effect_details(); @@ -56,7 +56,7 @@ void Compressor::init_effect_details() /// Set the effect info info.class_name = "Compressor"; info.name = "Compressor"; - info.description = "Add compressor on the frame's sound."; + info.description = "Add Compressor on the frame's sound."; info.has_audio = true; info.has_video = false; @@ -88,7 +88,6 @@ std::shared_ptr Compressor::GetFrame(std::shared_ptraudio, channel, 0, num_samples, 1.0f / num_input_channels); for (int sample = 0; sample < num_samples; ++sample) { - bool expander = (bool)mode; float T = threshold.GetValue(frame_number); float R = ratio.GetValue(frame_number); float alphaA = calculateAttackOrRelease(attack.GetValue(frame_number)); @@ -96,43 +95,21 @@ std::shared_ptr Compressor::GetFrame(std::shared_ptr T) - yg = xg; - else - yg = T + (xg - T) * R; + if (xg < T) + yg = xg; + else + yg = T + (xg - T) / R; - xl = xg - yg; + xl = xg - yg; - if (xl < yl_prev) - yl = alphaA * yl_prev + (1.0f - alphaA) * xl; - else - yl = alphaR * yl_prev + (1.0f - alphaR) * xl; - - // Compressor - } else { - if (xg < T) - yg = xg; - else - yg = T + (xg - T) / R; - - xl = xg - yg; - - if (xl > yl_prev) - yl = alphaA * yl_prev + (1.0f - alphaA) * xl; - else - yl = alphaR * yl_prev + (1.0f - alphaR) * xl; - } + if (xl > yl_prev) + yl = alphaA * yl_prev + (1.0f - alphaA) * xl; + else + yl = alphaR * yl_prev + (1.0f - alphaR) * xl; control = powf (10.0f, (gain - yl) * 0.05f); yl_prev = yl; @@ -171,7 +148,6 @@ Json::Value Compressor::JsonValue() const { // Create root json object Json::Value root = EffectBase::JsonValue(); // get parent properties root["type"] = info.class_name; - root["mode"] = mode; root["threshold"] = threshold.JsonValue(); root["ratio"] = ratio.JsonValue(); root["attack"] = attack.JsonValue(); @@ -207,9 +183,6 @@ void Compressor::SetJsonValue(const Json::Value root) { EffectBase::SetJsonValue(root); // Set data from Json (if key is found) - if (!root["mode"].isNull()) - mode = (CompressorMode)root["mode"].asInt(); - if (!root["threshold"].isNull()) threshold.SetJsonValue(root["threshold"]); @@ -241,20 +214,13 @@ std::string Compressor::PropertiesJSON(int64_t requested_frame) const { root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame); // Keyframes - root["mode"] = add_property_json("Mode", mode, "int", "", NULL, 0, 3, false, requested_frame); - root["threshold"] = add_property_json("Threshold (dB)", threshold.GetValue(requested_frame), "float", "", &threshold, -60, 60, false, requested_frame); + root["threshold"] = add_property_json("Threshold (dB)", threshold.GetValue(requested_frame), "float", "", &threshold, -60, 0, false, requested_frame); root["ratio"] = add_property_json("Ratio", ratio.GetValue(requested_frame), "float", "", &ratio, 1, 100, false, requested_frame); - root["attack"] = add_property_json("Attack", attack.GetValue(requested_frame), "float", "", &attack, 0.1, 100, false, requested_frame); - root["release"] = add_property_json("Release", release.GetValue(requested_frame), "float", "", &release, 10, 1000, false, requested_frame); - root["makeup_gain"] = add_property_json("Makeup gain", makeup_gain.GetValue(requested_frame), "float", "", &makeup_gain, -12, 12, false, requested_frame); + root["attack"] = add_property_json("Attack (ms)", attack.GetValue(requested_frame), "float", "", &attack, 0.1, 100, false, requested_frame); + root["release"] = add_property_json("Release (ms)", release.GetValue(requested_frame), "float", "", &release, 10, 1000, false, requested_frame); + root["makeup_gain"] = add_property_json("Makeup gain (dB)", makeup_gain.GetValue(requested_frame), "float", "", &makeup_gain, -12, 12, false, requested_frame); root["bypass"] = add_property_json("Bypass", bypass.GetValue(requested_frame), "bool", "", &bypass, 0, 1, false, requested_frame); - // Add mode choices (dropdown style) - root["mode"]["choices"].append(add_property_choice_json("Compressor", COMPRESSOR, mode)); - root["mode"]["choices"].append(add_property_choice_json("Limiter", LIMITER, mode)); - root["mode"]["choices"].append(add_property_choice_json("Expander", EXPANDER, mode)); - root["mode"]["choices"].append(add_property_choice_json("Noise Gate", NOISE_GATE, mode)); - // Return formatted string return root.toStyledString(); } diff --git a/src/audio_effects/Compressor.h b/src/audio_effects/Compressor.h index abe5e4ac..7dcdb252 100644 --- a/src/audio_effects/Compressor.h +++ b/src/audio_effects/Compressor.h @@ -58,7 +58,6 @@ namespace openshot public: - openshot::CompressorMode mode; Keyframe threshold; Keyframe ratio; Keyframe attack; @@ -85,7 +84,7 @@ namespace openshot /// Default constructor /// /// @param new_level The audio default Compressor level (between 1 and 100) - Compressor(openshot::CompressorMode new_mode, Keyframe new_threshold, Keyframe new_ratio, Keyframe new_attack, Keyframe new_release, Keyframe new_makeup_gain, Keyframe new_bypass); + Compressor(Keyframe new_threshold, Keyframe new_ratio, Keyframe new_attack, Keyframe new_release, Keyframe new_makeup_gain, Keyframe new_bypass); float calculateAttackOrRelease(float value); diff --git a/src/audio_effects/Expander.cpp b/src/audio_effects/Expander.cpp new file mode 100644 index 00000000..2c6a86c3 --- /dev/null +++ b/src/audio_effects/Expander.cpp @@ -0,0 +1,228 @@ +/** + * @file + * @brief Source file for Expander audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#include "Expander.h" +#include "Exceptions.h" + +using namespace openshot; + +/// Blank constructor, useful when using Json to load the effect properties +Expander::Expander() : threshold(-10), ratio(1), attack(1), release(1), makeup_gain(1), bypass(false) { + // Init effect properties + init_effect_details(); +} + +// Default constructor +Expander::Expander(Keyframe new_threshold, Keyframe new_ratio, Keyframe new_attack, Keyframe new_release, Keyframe new_makeup_gain, Keyframe new_bypass) : + threshold(new_threshold), ratio(new_ratio), attack(new_attack), release(new_release), makeup_gain(new_makeup_gain), bypass(new_bypass) +{ + // Init effect properties + init_effect_details(); +} + +// Init effect settings +void Expander::init_effect_details() +{ + /// Initialize the values of the EffectInfo struct. + InitEffectInfo(); + + /// Set the effect info + info.class_name = "Expander"; + info.name = "Expander"; + info.description = "Add Expander on the frame's sound."; + info.has_audio = true; + info.has_video = false; + + input_level = 0.0f; + yl_prev = 0.0f; + + +} + +// This method is required for all derived classes of EffectBase, and returns a +// modified openshot::Frame object +std::shared_ptr Expander::GetFrame(std::shared_ptr frame, int64_t frame_number) +{ + // Adding Expander + const int num_input_channels = frame->audio->getNumChannels(); + const int num_output_channels = frame->audio->getNumChannels(); + const int num_samples = frame->audio->getNumSamples(); + + mixed_down_input.setSize(1, num_samples); + inverse_sample_rate = 1.0f / frame->SampleRate(); //(float)getSampleRate(); + inverseE = 1.0f / M_E; + + if ((bool)bypass.GetValue(frame_number)) + return frame; + + mixed_down_input.clear(); + + for (int channel = 0; channel < num_input_channels; ++channel) + mixed_down_input.addFrom(0, 0, *frame->audio, channel, 0, num_samples, 1.0f / num_input_channels); + + for (int sample = 0; sample < num_samples; ++sample) { + float T = threshold.GetValue(frame_number); + float R = ratio.GetValue(frame_number); + float alphaA = calculateAttackOrRelease(attack.GetValue(frame_number)); + float alphaR = calculateAttackOrRelease(release.GetValue(frame_number)); + float gain = makeup_gain.GetValue(frame_number); + float input_squared = powf(mixed_down_input.getSample(0, sample), 2.0f); + + const float average_factor = 0.9999f; + input_level = average_factor * input_level + (1.0f - average_factor) * input_squared; + + xg = (input_level <= 1e-6f) ? -60.0f : 10.0f * log10f(input_level); + + if (xg > T) + yg = xg; + else + yg = T + (xg - T) * R; + + xl = xg - yg; + + if (xl < yl_prev) + yl = alphaA * yl_prev + (1.0f - alphaA) * xl; + else + yl = alphaR * yl_prev + (1.0f - alphaR) * xl; + + + control = powf (10.0f, (gain - yl) * 0.05f); + yl_prev = yl; + + for (int channel = 0; channel < num_input_channels; ++channel) { + float new_value = frame->audio->getSample(channel, sample)*control; + frame->audio->setSample(channel, sample, new_value); + } + } + + for (int channel = num_input_channels; channel < num_output_channels; ++channel) + frame->audio->clear(channel, 0, num_samples); + + // return the modified frame + return frame; +} + +float Expander::calculateAttackOrRelease(float value) +{ + if (value == 0.0f) + return 0.0f; + else + return pow (inverseE, inverse_sample_rate / value); +} + +// Generate JSON string of this object +std::string Expander::Json() const { + + // Return formatted string + return JsonValue().toStyledString(); +} + +// Generate Json::Value for this object +Json::Value Expander::JsonValue() const { + + // Create root json object + Json::Value root = EffectBase::JsonValue(); // get parent properties + root["type"] = info.class_name; + root["threshold"] = threshold.JsonValue(); + root["ratio"] = ratio.JsonValue(); + root["attack"] = attack.JsonValue(); + root["release"] = release.JsonValue(); + root["makeup_gain"] = makeup_gain.JsonValue(); + root["bypass"] = bypass.JsonValue(); + + // return JsonValue + return root; +} + +// Load JSON string into this object +void Expander::SetJson(const std::string value) { + + // Parse JSON string into JSON objects + try + { + const Json::Value root = openshot::stringToJson(value); + // Set all values that match + SetJsonValue(root); + } + catch (const std::exception& e) + { + // Error parsing JSON (or missing keys) + throw InvalidJSON("JSON is invalid (missing keys or invalid data types)"); + } +} + +// Load Json::Value into this object +void Expander::SetJsonValue(const Json::Value root) { + + // Set parent data + EffectBase::SetJsonValue(root); + + // Set data from Json (if key is found) + if (!root["threshold"].isNull()) + threshold.SetJsonValue(root["threshold"]); + + if (!root["ratio"].isNull()) + ratio.SetJsonValue(root["ratio"]); + + if (!root["attack"].isNull()) + attack.SetJsonValue(root["attack"]); + + if (!root["release"].isNull()) + release.SetJsonValue(root["release"]); + + if (!root["makeup_gain"].isNull()) + makeup_gain.SetJsonValue(root["makeup_gain"]); + + if (!root["bypass"].isNull()) + bypass.SetJsonValue(root["bypass"]); +} + +// Get all properties for a specific frame +std::string Expander::PropertiesJSON(int64_t requested_frame) const { + + // Generate JSON properties list + Json::Value root; + root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame); + root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame); + root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame); + + // Keyframes + root["threshold"] = add_property_json("Threshold (dB)", threshold.GetValue(requested_frame), "float", "", &threshold, -60, 0, false, requested_frame); + root["ratio"] = add_property_json("Ratio", ratio.GetValue(requested_frame), "float", "", &ratio, 1, 100, false, requested_frame); + root["attack"] = add_property_json("Attack (ms)", attack.GetValue(requested_frame), "float", "", &attack, 0.1, 100, false, requested_frame); + root["release"] = add_property_json("Release (ms)", release.GetValue(requested_frame), "float", "", &release, 10, 1000, false, requested_frame); + root["makeup_gain"] = add_property_json("Makeup gain (dB)", makeup_gain.GetValue(requested_frame), "float", "", &makeup_gain, -12, 12, false, requested_frame); + root["bypass"] = add_property_json("Bypass", bypass.GetValue(requested_frame), "bool", "", &bypass, 0, 1, false, requested_frame); + + // Return formatted string + return root.toStyledString(); +} diff --git a/src/audio_effects/Expander.h b/src/audio_effects/Expander.h new file mode 100644 index 00000000..7fc37afa --- /dev/null +++ b/src/audio_effects/Expander.h @@ -0,0 +1,125 @@ +/** + * @file + * @brief Header file for Expander audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_EXPANDER_AUDIO_EFFECT_H +#define OPENSHOT_EXPANDER_AUDIO_EFFECT_H + +#include "../EffectBase.h" + +#include "../Frame.h" +#include "../Json.h" +#include "../KeyFrame.h" +#include "../Enums.h" + +#include +#include +#include + + +namespace openshot +{ + + /** + * @brief This class adds a Expander into the audio + * + */ + class Expander : public EffectBase + { + private: + /// Init effect settings + void init_effect_details(); + + + public: + Keyframe threshold; + Keyframe ratio; + Keyframe attack; + Keyframe release; + Keyframe makeup_gain; + Keyframe bypass; + + juce::AudioSampleBuffer mixed_down_input; + float xl; + float yl; + float xg; + float yg; + float control; + + float input_level; + float yl_prev; + + float inverse_sample_rate; + float inverseE; + + /// Blank constructor, useful when using Json to load the effect properties + Expander(); + + /// Default constructor + /// + /// @param new_level The audio default Expander level (between 1 and 100) + Expander(Keyframe new_threshold, Keyframe new_ratio, Keyframe new_attack, Keyframe new_release, Keyframe new_makeup_gain, Keyframe new_bypass); + + float calculateAttackOrRelease(float value); + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { + return GetFrame(std::make_shared(), frame_number); + } + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// modified openshot::Frame object + /// + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. + /// + /// @returns The modified openshot::Frame object + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + + // Get and Set JSON methods + std::string Json() const override; ///< Generate JSON string of this object + void SetJson(const std::string value) override; ///< Load JSON string into this object + Json::Value JsonValue() const override; ///< Generate Json::Value for this object + void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object + + /// Get all properties for a specific frame (perfect for a UI to display the current state + /// of all properties at any time) + std::string PropertiesJSON(int64_t requested_frame) const override; + }; + +} + +#endif diff --git a/src/audio_effects/Pitch.cpp b/src/audio_effects/Pitch.cpp deleted file mode 100644 index 134daa91..00000000 --- a/src/audio_effects/Pitch.cpp +++ /dev/null @@ -1,418 +0,0 @@ -/** - * @file - * @brief Source file for Pitch audio effect class - * @author - * - * @ref License - */ - -/* LICENSE - * - * Copyright (c) 2008-2019 OpenShot Studios, LLC - * . This file is part of - * OpenShot Library (libopenshot), an open-source project dedicated to - * delivering high quality video editing and animation solutions to the - * world. For more information visit . - * - * OpenShot Library (libopenshot) is free software: you can redistribute it - * and/or modify it under the terms of the GNU Lesser General Public License - * as published by the Free Software Foundation, either version 3 of the - * License, or (at your option) any later version. - * - * OpenShot Library (libopenshot) is distributed in the hope that it will be - * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with OpenShot Library. If not, see . - */ - -#include "Pitch.h" -#include "Exceptions.h" - -using namespace openshot; - -/// Blank constructor, useful when using Json to load the effect properties -Pitch::Pitch() : shift(0), fft_size(FFT_SIZE_32), hop_size(HOP_SIZE_2), window_type(BART_LETT){ - // Init effect properties - init_effect_details(); -} - -// Default constructor -Pitch::Pitch(Keyframe new_shift, openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type) : - shift(new_shift), fft_size(new_fft_size), hop_size(new_hop_size), window_type(new_window_type) -{ - // Init effect properties - init_effect_details(); -} - -// Init effect settings -void Pitch::init_effect_details() -{ - /// Initialize the values of the EffectInfo struct. - InitEffectInfo(); - - /// Set the effect info - info.class_name = "Pitch"; - info.name = "Pitch Shift"; - info.description = "Change pitch of the frame's sound."; - info.has_audio = true; - info.has_video = false; -} - -// This method is required for all derived classes of EffectBase, and returns a -// modified openshot::Frame object -std::shared_ptr Pitch::GetFrame(std::shared_ptr frame, int64_t frame_number) -{ - const ScopedLock sl (lock); - ScopedNoDenormals noDenormals; - - updateFftSize(frame); - updateHopSize(); - updateAnalysisWindow(); - updateWindowScaleFactor(); - - // const ScopedLock sl (lock); - // ScopedNoDenormals noDenormals; - - // copy of the AudioBuffer frame->audio object (not the pointer) - // input_buffer = std::make_shared>(*frame->audio); - // output_buffer = std::make_shared>(*frame->audio); - // frame->audio; - - const int num_input_channels = frame->audio->getNumChannels(); - const int num_output_channels = frame->audio->getNumChannels(); - const int num_samples = frame->audio->getNumSamples(); - - int current_input_buffer_write_position; - int current_output_buffer_write_position; - int current_output_buffer_read_position; - int current_samples_since_last_FFT; - - float shift_value = powf(2.0f, shift.GetValue(frame_number) / 12.0f); - int hop_size_value = 1 << ((int)hop_size + 1); - int fft_size_value = 1 << ((int)fft_size + 5); - - float ratio = roundf(shift_value*(float)hop_size_value/(float)hop_size_value); - int resampled_length = floorf((float)fft_size_value/ratio); - juce::HeapBlock resampled_output(resampled_length, true); - juce::HeapBlock synthesis_window(resampled_length, true); - updateWindow(synthesis_window, resampled_length); - - for (int channel = 0; channel < num_input_channels; channel++) - { - float* channel_data = frame->audio->getWritePointer(channel); - - current_input_buffer_write_position = input_buffer_write_position; - current_output_buffer_write_position = output_buffer_write_position; - current_output_buffer_read_position = output_buffer_read_position; - current_samples_since_last_FFT = samples_since_last_FFT; - - for (int sample = 0; sample < num_samples; ++sample) - { - const float in = channel_data[sample]; - channel_data[sample] = output_buffer.getSample(channel, current_output_buffer_read_position); - - output_buffer.setSample(channel, current_output_buffer_read_position, 0.0f); - if (++current_output_buffer_read_position >= output_buffer_length) - current_output_buffer_read_position = 0; - - input_buffer.setSample(channel, current_input_buffer_write_position, in); - if (++current_input_buffer_write_position >= input_buffer_length) - current_input_buffer_write_position = 0; - - if (++current_samples_since_last_FFT >= hop_size_value) - { - current_samples_since_last_FFT = 0; - - int input_buffer_index = current_input_buffer_write_position; - - for (int index = 0; index < fft_size_value; ++index) { - fft_time_domain[index].real(sqrtf(fft_window[index]) * input_buffer.getSample(channel, input_buffer_index)); - fft_time_domain[index].imag(0.0f); - - if (++input_buffer_index >= input_buffer_length) - input_buffer_index = 0; - } - - fft->perform(fft_time_domain, fft_frequency_domain, false); - - /* - if (paramShift.isSmoothing()) - needToResetPhases = true; - if (shift == paramShift.getTargetValue() && needToResetPhases) { - inputPhase.clear(); - outputPhase.clear(); - needToResetPhases = false; - } - */ - - for (int index = 0; index < fft_size_value; ++index) { - float magnitude = abs(fft_frequency_domain[index]); - float phase = arg(fft_frequency_domain[index]); - - float phase_deviation = phase - input_phase.getSample(channel, index) - omega[index] * (float)hop_size_value; - float delta_phi = omega[index] * hop_size_value + princArg(phase_deviation); - float new_phase = princArg(output_phase.getSample(channel, index) + delta_phi * ratio); - - input_phase.setSample(channel, index, phase); - output_phase.setSample(channel, index, new_phase); - fft_frequency_domain[index] = std::polar(magnitude, new_phase); - } - - fft->perform(fft_frequency_domain, fft_time_domain, true); - - for (int index = 0; index < resampled_length; ++index) { - float x = (float)index * (float)fft_size_value / (float)resampled_length; - int ix = (int)floorf(x); - float dx = x - (float)ix; - - float sample1 = fft_time_domain[ix].real(); - float sample2 = fft_time_domain[(ix + 1) % fft_size_value].real(); - resampled_output[index] = sample1 + dx * (sample2 - sample1); - resampled_output[index] *= sqrtf(synthesis_window[index]); - } - - int output_buffer_index = current_output_buffer_write_position; - - for (int index = 0; index < resampled_length; ++index) { - float out = output_buffer.getSample(channel, output_buffer_index); - out += resampled_output[index] * window_scale_factor; - output_buffer.setSample(channel, output_buffer_index, out); - - if (++output_buffer_index >= output_buffer_length) - output_buffer_index = 0; - } - - current_output_buffer_write_position += hop_size_value; - if (current_output_buffer_write_position >= output_buffer_length) - current_output_buffer_write_position = 0; - } - - } - } - - input_buffer_write_position = current_input_buffer_write_position; - output_buffer_write_position = current_output_buffer_write_position; - current_output_buffer_read_position = current_output_buffer_read_position; - samples_since_last_FFT = current_samples_since_last_FFT; - - for (int channel = num_input_channels; channel < num_output_channels; ++channel) - frame->audio->clear(channel, 0, num_samples); - - // frame->audio = std::make_shared>(output_buffer); - - // return the modified frame - return frame; -} - -void Pitch::updateFftSize(std::shared_ptr frame) -{ - int fft_size_value = 1 << ((int)fft_size + 5); - fft = std::make_unique(log2(fft_size_value)); - - input_buffer_length = fft_size_value; - input_buffer_write_position = 0; - input_buffer.clear(); - input_buffer.setSize(frame->audio->getNumChannels(), input_buffer_length); - - float max_ratio = powf(2.0f, -12/12.0f); - - output_buffer_length = (int)floorf ((float)fft_size_value / max_ratio); - - output_buffer_write_position = 0; - output_buffer_read_position = 0; - output_buffer.clear(); - output_buffer.setSize(frame->audio->getNumChannels(), output_buffer_length); - - fft_window.realloc(fft_size_value); - fft_window.clear(fft_size_value); - - fft_time_domain.realloc(fft_size_value); - fft_time_domain.clear(fft_size_value); - - fft_frequency_domain.realloc(fft_size_value); - fft_frequency_domain.clear(fft_size_value); - - samples_since_last_FFT = 0; - - //====================================== - - omega.realloc(fft_size_value); - - for (int index = 0; index < fft_size_value; ++index) - omega[index] = 2.0f * M_PI * index / (float)fft_size_value; - - input_phase.clear(); - input_phase.setSize(frame->audio->getNumChannels(), output_buffer_length); - - output_phase.clear(); - output_phase.setSize(frame->audio->getNumChannels(), output_buffer_length); -} - - -void Pitch::updateHopSize() -{ - int hop_size_value = 1 << ((int)hop_size + 1); - int fft_size_value = 1 << ((int)fft_size + 5); - overlap = hop_size_value; - - if (overlap != 0) { - hop_size_value = fft_size_value / overlap; - // hop_size = hop_size_value; - output_buffer_write_position = hop_size_value % output_buffer_length; - } -} - - -void Pitch::updateWindowScaleFactor() -{ - int fft_size_value = 1 << ((int)fft_size + 5); - float window_sum = 0.0f; - - for (int sample = 0; sample < fft_size_value; ++sample) - window_sum += fft_window[sample]; - - window_scale_factor = 0.0f; - - if (overlap != 0 && window_sum != 0.0f) - window_scale_factor = 1.0f / (float)overlap / window_sum * (float)fft_size_value; -} - - -void Pitch::updateAnalysisWindow() -{ - int fft_size_value = 1 << ((int)fft_size + 5); - updateWindow(fft_window, fft_size_value); -} - -void Pitch::updateWindow(const juce::HeapBlock &window, const int window_length) -{ - switch ((int)window_type) { - case BART_LETT: { - for (int sample = 0; sample < window_length; ++sample) - window[sample] = 1.0f - fabs(2.0f * (float)sample / (float)(window_length - 1) - 1.0f); - break; - } - case HANN: { - for (int sample = 0; sample < window_length; ++sample) - window[sample] = 0.5f - 0.5f * cosf(2.0f * M_PI * (float)sample / (float)(window_length - 1)); - break; - } - case HAMMING: { - for (int sample = 0; sample < window_length; ++sample) - window[sample] = 0.54f - 0.46f * cosf(2.0f * M_PI * (float)sample / (float)(window_length - 1)); - break; - } - } -} - -float Pitch::princArg(const float phase) -{ - if (phase >= 0.0f) - return fmod(phase + M_PI, 2.0f * M_PI) - M_PI; - else - return fmod(phase + M_PI, -2.0f * M_PI) + M_PI; -} - -// Generate JSON string of this object -std::string Pitch::Json() const { - - // Return formatted string - return JsonValue().toStyledString(); -} - -// Generate Json::Value for this object -Json::Value Pitch::JsonValue() const { - - // Create root json object - Json::Value root = EffectBase::JsonValue(); // get parent properties - root["type"] = info.class_name; - root["shift"] = shift.JsonValue(); - root["fft_size"] = fft_size; - root["hop_size"] = hop_size; - root["window_type"] = window_type; - - // return JsonValue - return root; -} - -// Load JSON string into this object -void Pitch::SetJson(const std::string value) { - - // Parse JSON string into JSON objects - try - { - const Json::Value root = openshot::stringToJson(value); - // Set all values that match - SetJsonValue(root); - } - catch (const std::exception& e) - { - // Error parsing JSON (or missing keys) - throw InvalidJSON("JSON is invalid (missing keys or invalid data types)"); - } -} - -// Load Json::Value into this object -void Pitch::SetJsonValue(const Json::Value root) { - - // Set parent data - EffectBase::SetJsonValue(root); - - if (!root["fft_size"].isNull()) - fft_size = (FFTSize)root["fft_size"].asInt(); - - if (!root["hop_size"].isNull()) - hop_size = (HopSize)root["hop_size"].asInt(); - - if (!root["window_type"].isNull()) - window_type = (WindowType)root["window_type"].asInt(); - - // Set data from Json (if key is found) - if (!root["shift"].isNull()) - shift.SetJsonValue(root["shift"]); -} - -// Get all properties for a specific frame -std::string Pitch::PropertiesJSON(int64_t requested_frame) const { - - // Generate JSON properties list - Json::Value root; - root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame); - root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame); - root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); - root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); - root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame); - - // Keyframes - root["shift"] = add_property_json("Shift", shift.GetValue(requested_frame), "float", "", &shift, -12, 12, false, requested_frame); - root["fft_size"] = add_property_json("FFT Size", fft_size, "int", "", NULL, 0, 8, false, requested_frame); - root["hop_size"] = add_property_json("Hop Size", hop_size, "int", "", NULL, 0, 2, false, requested_frame); - root["window_type"] = add_property_json("Window Type", window_type, "int", "", NULL, 0, 2, false, requested_frame); - - // Add fft_size choices (dropdown style) - root["fft_size"]["choices"].append(add_property_choice_json("32", FFT_SIZE_32, fft_size)); - root["fft_size"]["choices"].append(add_property_choice_json("64", FFT_SIZE_64, fft_size)); - root["fft_size"]["choices"].append(add_property_choice_json("128", FFT_SIZE_128, fft_size)); - root["fft_size"]["choices"].append(add_property_choice_json("256", FFT_SIZE_256, fft_size)); - root["fft_size"]["choices"].append(add_property_choice_json("512", FFT_SIZE_512, fft_size)); - root["fft_size"]["choices"].append(add_property_choice_json("1024", FFT_SIZE_1024, fft_size)); - root["fft_size"]["choices"].append(add_property_choice_json("2048", FFT_SIZE_2048, fft_size)); - root["fft_size"]["choices"].append(add_property_choice_json("4096", FFT_SIZE_4096, fft_size)); - root["fft_size"]["choices"].append(add_property_choice_json("8192", FFT_SIZE_8192, fft_size)); - - // Add hop_size choices (dropdown style) - root["hop_size"]["choices"].append(add_property_choice_json("2", HOP_SIZE_2, hop_size)); - root["hop_size"]["choices"].append(add_property_choice_json("4", HOP_SIZE_4, hop_size)); - root["hop_size"]["choices"].append(add_property_choice_json("8", HOP_SIZE_8, hop_size)); - - // Add window_type choices (dropdown style) - root["window_type"]["choices"].append(add_property_choice_json("Bart Lett", BART_LETT, window_type)); - root["window_type"]["choices"].append(add_property_choice_json("Hann", HANN, window_type)); - root["window_type"]["choices"].append(add_property_choice_json("Hamming", HAMMING, window_type)); - - // Return formatted string - return root.toStyledString(); -} diff --git a/src/audio_effects/Robotization.cpp b/src/audio_effects/Robotization.cpp index 8e087814..7f07e117 100644 --- a/src/audio_effects/Robotization.cpp +++ b/src/audio_effects/Robotization.cpp @@ -33,15 +33,16 @@ using namespace openshot; + /// Blank constructor, useful when using Json to load the effect properties -Robotization::Robotization() : fft_size(FFT_SIZE_512), hop_size(HOP_SIZE_2), window_type(RECTANGULAR), effect_type(ROBOTIZATION), stft(*this) { +Robotization::Robotization() : fft_size(FFT_SIZE_512), hop_size(HOP_SIZE_2), window_type(RECTANGULAR), stft(*this) { // Init effect properties init_effect_details(); } // Default constructor -Robotization::Robotization(openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type, openshot::RobotizationEffectType new_effect_type) : - fft_size(new_fft_size), hop_size(new_hop_size), window_type(new_window_type), effect_type(new_effect_type), stft(*this) +Robotization::Robotization(openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type) : + fft_size(new_fft_size), hop_size(new_hop_size), window_type(new_window_type), stft(*this) { // Init effect properties init_effect_details(); @@ -85,6 +86,19 @@ std::shared_ptr Robotization::GetFrame(std::shared_ptrperform(time_domain_buffer, frequency_domain_buffer, false); + + for (int index = 0; index < fft_size; ++index) { + float magnitude = abs(frequency_domain_buffer[index]); + frequency_domain_buffer[index].real(magnitude); + frequency_domain_buffer[index].imag(0.0f); + } + + fft->perform(frequency_domain_buffer, time_domain_buffer, true); +} + // Generate JSON string of this object std::string Robotization::Json() const { @@ -101,7 +115,6 @@ Json::Value Robotization::JsonValue() const { root["fft_size"] = fft_size; root["hop_size"] = hop_size; root["window_type"] = window_type; - root["effect_type"] = effect_type; // return JsonValue return root; @@ -138,13 +151,6 @@ void Robotization::SetJsonValue(const Json::Value root) { if (!root["window_type"].isNull()) window_type = (WindowType)root["window_type"].asInt(); - - if (!root["effect_type"].isNull()) - effect_type = (RobotizationEffectType)root["effect_type"].asInt(); - - // Set data from Json (if key is found) - // if (!root["shift"].isNull()) - // shift.SetJsonValue(root["shift"]); } // Get all properties for a specific frame @@ -162,18 +168,13 @@ std::string Robotization::PropertiesJSON(int64_t requested_frame) const { root["fft_size"] = add_property_json("FFT Size", fft_size, "int", "", NULL, 0, 8, false, requested_frame); root["hop_size"] = add_property_json("Hop Size", hop_size, "int", "", NULL, 0, 2, false, requested_frame); root["window_type"] = add_property_json("Window Type", window_type, "int", "", NULL, 0, 3, false, requested_frame); - root["effect_type"] = add_property_json("Effect Type", effect_type, "int", "", NULL, 0, 2, false, requested_frame); // Add fft_size choices (dropdown style) - root["fft_size"]["choices"].append(add_property_choice_json("32", FFT_SIZE_32, fft_size)); - root["fft_size"]["choices"].append(add_property_choice_json("64", FFT_SIZE_64, fft_size)); root["fft_size"]["choices"].append(add_property_choice_json("128", FFT_SIZE_128, fft_size)); root["fft_size"]["choices"].append(add_property_choice_json("256", FFT_SIZE_256, fft_size)); root["fft_size"]["choices"].append(add_property_choice_json("512", FFT_SIZE_512, fft_size)); root["fft_size"]["choices"].append(add_property_choice_json("1024", FFT_SIZE_1024, fft_size)); root["fft_size"]["choices"].append(add_property_choice_json("2048", FFT_SIZE_2048, fft_size)); - root["fft_size"]["choices"].append(add_property_choice_json("4096", FFT_SIZE_4096, fft_size)); - root["fft_size"]["choices"].append(add_property_choice_json("8192", FFT_SIZE_8192, fft_size)); // Add hop_size choices (dropdown style) root["hop_size"]["choices"].append(add_property_choice_json("1/2", HOP_SIZE_2, hop_size)); @@ -186,11 +187,6 @@ std::string Robotization::PropertiesJSON(int64_t requested_frame) const { root["window_type"]["choices"].append(add_property_choice_json("Hann", HANN, window_type)); root["window_type"]["choices"].append(add_property_choice_json("Hamming", HAMMING, window_type)); - // Add effect_type choices (dropdown style) - root["effect_type"]["choices"].append(add_property_choice_json("Pass Through", PASS_THROUGH, effect_type)); - root["effect_type"]["choices"].append(add_property_choice_json("Robotization", ROBOTIZATION, effect_type)); - root["effect_type"]["choices"].append(add_property_choice_json("Whisperization", WHISPERIZATION, effect_type)); - // Return formatted string return root.toStyledString(); -} +} \ No newline at end of file diff --git a/src/audio_effects/Robotization.h b/src/audio_effects/Robotization.h index db4d35b0..07838213 100644 --- a/src/audio_effects/Robotization.h +++ b/src/audio_effects/Robotization.h @@ -64,7 +64,6 @@ namespace openshot openshot::FFTSize fft_size; openshot::HopSize hop_size; openshot::WindowType window_type; - openshot::RobotizationEffectType effect_type; /// Blank constructor, useful when using Json to load the effect properties Robotization(); @@ -72,7 +71,7 @@ namespace openshot /// Default constructor /// /// @param new_level The audio default Robotization level (between 1 and 100) - Robotization(openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type, openshot::RobotizationEffectType new_effect_type); + Robotization(openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type); /// @brief This method is required for all derived classes of ClipBase, and returns a /// new openshot::Frame object. All Clip keyframes and effects are resolved into @@ -106,53 +105,19 @@ namespace openshot std::string PropertiesJSON(int64_t requested_frame) const override; - class RobotizationWhisperizationEffect : public STFT + class RobotizationEffect : public STFT { public: - RobotizationWhisperizationEffect (Robotization& p) : parent (p) { } + RobotizationEffect (Robotization& p) : parent (p) { } private: - void modification() override - { - fft->perform(time_domain_buffer, frequency_domain_buffer, false); - - switch ((int)parent.effect_type) { - case PASS_THROUGH: { - // nothing - break; - } - case ROBOTIZATION: { - for (int index = 0; index < fft_size; ++index) { - float magnitude = abs(frequency_domain_buffer[index]); - frequency_domain_buffer[index].real(magnitude); - frequency_domain_buffer[index].imag(0.0f); - } - break; - } - case WHISPERIZATION: { - for (int index = 0; index < fft_size / 2 + 1; ++index) { - float magnitude = abs(frequency_domain_buffer[index]); - float phase = 2.0f * M_PI * (float)rand() / (float)RAND_MAX; - - frequency_domain_buffer[index].real(magnitude * cosf (phase)); - frequency_domain_buffer[index].imag(magnitude * sinf (phase)); - if (index > 0 && index < fft_size / 2) { - frequency_domain_buffer[fft_size - index].real (magnitude * cosf (phase)); - frequency_domain_buffer[fft_size - index].imag (magnitude * sinf (-phase)); - } - } - break; - } - } - - fft->perform(frequency_domain_buffer, time_domain_buffer, true); - } + void modification(const int channel) override; Robotization &parent; }; juce::CriticalSection lock; - RobotizationWhisperizationEffect stft; + RobotizationEffect stft; std::unique_ptr fft; }; diff --git a/src/audio_effects/STFT.cpp b/src/audio_effects/STFT.cpp index 26e35907..3b87e6e3 100644 --- a/src/audio_effects/STFT.cpp +++ b/src/audio_effects/STFT.cpp @@ -32,7 +32,7 @@ void STFT::process(juce::AudioSampleBuffer &block) input_buffer.setSample(channel, current_input_buffer_write_position, input_sample); if (++current_input_buffer_write_position >= input_buffer_length) current_input_buffer_write_position = 0; - + // diff channel_data[sample] = output_buffer.getSample(channel, current_output_buffer_read_position); output_buffer.setSample(channel, current_output_buffer_read_position, 0.0f); @@ -42,7 +42,7 @@ void STFT::process(juce::AudioSampleBuffer &block) if (++current_samples_since_last_FFT >= hop_size) { current_samples_since_last_FFT = 0; analysis(channel); - modification(); + modification(channel); synthesis(channel); } } @@ -99,9 +99,12 @@ void STFT::updateHopSize(const int new_overlap) } } + void STFT::updateWindow(const int new_window_type) { - switch (new_window_type) { + window_type = new_window_type; + + switch (window_type) { case RECTANGULAR: { for (int sample = 0; sample < fft_size; ++sample) fft_window[sample] = 1.0f; @@ -123,7 +126,7 @@ void STFT::updateWindow(const int new_window_type) break; } } - + float window_sum = 0.0f; for (int sample = 0; sample < fft_size; ++sample) window_sum += fft_window[sample]; @@ -133,6 +136,8 @@ void STFT::updateWindow(const int new_window_type) window_scale_factor = 1.0f / (float)overlap / window_sum * (float)fft_size; } + + void STFT::analysis(const int channel) { int input_buffer_index = current_input_buffer_write_position; @@ -145,7 +150,7 @@ void STFT::analysis(const int channel) } } -void STFT::modification() +void STFT::modification(const int channel) { fft->perform(time_domain_buffer, frequency_domain_buffer, false); diff --git a/src/audio_effects/STFT.h b/src/audio_effects/STFT.h index b27439c0..71f71a78 100644 --- a/src/audio_effects/STFT.h +++ b/src/audio_effects/STFT.h @@ -19,22 +19,23 @@ namespace openshot void setup(const int num_input_channels); - void updateParameters(const int new_fft_size, const int new_overlap, const int new_window_type); - void process(juce::AudioSampleBuffer &block); - private: - void updateFftSize(const int new_fft_size); + void updateParameters(const int new_fft_size, const int new_overlap, const int new_window_type); + + virtual void updateFftSize(const int new_fft_size); - void updateHopSize(const int new_overlap); + virtual void updateHopSize(const int new_overlap); - void updateWindow(const int new_window_type); + virtual void updateWindow(const int new_window_type); + + private: - void analysis(const int channel); + virtual void modification(const int channel); - virtual void modification(); + virtual void analysis(const int channel); - void synthesis(const int channel); + virtual void synthesis(const int channel); protected: int num_channels; @@ -55,6 +56,7 @@ namespace openshot int overlap; int hop_size; + int window_type; float window_scale_factor; int input_buffer_write_position; diff --git a/src/audio_effects/Whisperization.cpp b/src/audio_effects/Whisperization.cpp new file mode 100644 index 00000000..70ef31ef --- /dev/null +++ b/src/audio_effects/Whisperization.cpp @@ -0,0 +1,200 @@ +/** + * @file + * @brief Source file for Whisperization audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#include "Whisperization.h" +#include "Exceptions.h" + +using namespace openshot; + +/// Blank constructor, useful when using Json to load the effect properties +Whisperization::Whisperization() : fft_size(FFT_SIZE_512), hop_size(HOP_SIZE_8), window_type(RECTANGULAR), stft(*this) { + // Init effect properties + init_effect_details(); +} + +// Default constructor +Whisperization::Whisperization(openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type) : + fft_size(new_fft_size), hop_size(new_hop_size), window_type(new_window_type), stft(*this) +{ + // Init effect properties + init_effect_details(); +} + +// Init effect settings +void Whisperization::init_effect_details() +{ + /// Initialize the values of the EffectInfo struct. + InitEffectInfo(); + + /// Set the effect info + info.class_name = "Whisperization"; + info.name = "Whisperization"; + info.description = "Whisperization effect on the frame's sound."; + info.has_audio = true; + info.has_video = false; +} + +// This method is required for all derived classes of EffectBase, and returns a +// modified openshot::Frame object +std::shared_ptr Whisperization::GetFrame(std::shared_ptr frame, int64_t frame_number) +{ + const ScopedLock sl (lock); + ScopedNoDenormals noDenormals; + + const int num_input_channels = frame->audio->getNumChannels(); + const int num_output_channels = frame->audio->getNumChannels(); + const int num_samples = frame->audio->getNumSamples(); + const int hop_size_value = 1 << ((int)hop_size + 1); + const int fft_size_value = 1 << ((int)fft_size + 5); + + stft.setup(num_output_channels); + stft.updateParameters((int)fft_size_value, + (int)hop_size_value, + (int)window_type); + + stft.process(*frame->audio); + + // return the modified frame + return frame; +} + +void Whisperization::WhisperizationEffect::modification(const int channel) +{ + fft->perform(time_domain_buffer, frequency_domain_buffer, false); + + for (int index = 0; index < fft_size / 2 + 1; ++index) { + float magnitude = abs(frequency_domain_buffer[index]); + float phase = 2.0f * M_PI * (float)rand() / (float)RAND_MAX; + + frequency_domain_buffer[index].real(magnitude * cosf(phase)); + frequency_domain_buffer[index].imag(magnitude * sinf(phase)); + + if (index > 0 && index < fft_size / 2) { + frequency_domain_buffer[fft_size - index].real(magnitude * cosf (phase)); + frequency_domain_buffer[fft_size - index].imag(magnitude * sinf (-phase)); + } + } + + fft->perform(frequency_domain_buffer, time_domain_buffer, true); +} + + +// Generate JSON string of this object +std::string Whisperization::Json() const { + + // Return formatted string + return JsonValue().toStyledString(); +} + +// Generate Json::Value for this object +Json::Value Whisperization::JsonValue() const { + + // Create root json object + Json::Value root = EffectBase::JsonValue(); // get parent properties + root["type"] = info.class_name; + root["fft_size"] = fft_size; + root["hop_size"] = hop_size; + root["window_type"] = window_type; + + // return JsonValue + return root; +} + +// Load JSON string into this object +void Whisperization::SetJson(const std::string value) { + + // Parse JSON string into JSON objects + try + { + const Json::Value root = openshot::stringToJson(value); + // Set all values that match + SetJsonValue(root); + } + catch (const std::exception& e) + { + // Error parsing JSON (or missing keys) + throw InvalidJSON("JSON is invalid (missing keys or invalid data types)"); + } +} + +// Load Json::Value into this object +void Whisperization::SetJsonValue(const Json::Value root) { + + // Set parent data + EffectBase::SetJsonValue(root); + + if (!root["fft_size"].isNull()) + fft_size = (FFTSize)root["fft_size"].asInt(); + + if (!root["hop_size"].isNull()) + hop_size = (HopSize)root["hop_size"].asInt(); + + if (!root["window_type"].isNull()) + window_type = (WindowType)root["window_type"].asInt(); +} + +// Get all properties for a specific frame +std::string Whisperization::PropertiesJSON(int64_t requested_frame) const { + + // Generate JSON properties list + Json::Value root; + root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame); + root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame); + root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame); + + // Keyframes + root["fft_size"] = add_property_json("FFT Size", fft_size, "int", "", NULL, 0, 8, false, requested_frame); + root["hop_size"] = add_property_json("Hop Size", hop_size, "int", "", NULL, 0, 2, false, requested_frame); + root["window_type"] = add_property_json("Window Type", window_type, "int", "", NULL, 0, 3, false, requested_frame); + + // Add fft_size choices (dropdown style) + root["fft_size"]["choices"].append(add_property_choice_json("128", FFT_SIZE_128, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("256", FFT_SIZE_256, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("512", FFT_SIZE_512, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("1024", FFT_SIZE_1024, fft_size)); + root["fft_size"]["choices"].append(add_property_choice_json("2048", FFT_SIZE_2048, fft_size)); + + // Add hop_size choices (dropdown style) + root["hop_size"]["choices"].append(add_property_choice_json("1/2", HOP_SIZE_2, hop_size)); + root["hop_size"]["choices"].append(add_property_choice_json("1/4", HOP_SIZE_4, hop_size)); + root["hop_size"]["choices"].append(add_property_choice_json("1/8", HOP_SIZE_8, hop_size)); + + // Add window_type choices (dropdown style) + root["window_type"]["choices"].append(add_property_choice_json("Rectangular", RECTANGULAR, window_type)); + root["window_type"]["choices"].append(add_property_choice_json("Bart Lett", BART_LETT, window_type)); + root["window_type"]["choices"].append(add_property_choice_json("Hann", HANN, window_type)); + root["window_type"]["choices"].append(add_property_choice_json("Hamming", HAMMING, window_type)); + + + // Return formatted string + return root.toStyledString(); +} diff --git a/src/audio_effects/Pitch.h b/src/audio_effects/Whisperization.h similarity index 70% rename from src/audio_effects/Pitch.h rename to src/audio_effects/Whisperization.h index 9b2ff234..81add56b 100644 --- a/src/audio_effects/Pitch.h +++ b/src/audio_effects/Whisperization.h @@ -1,6 +1,6 @@ /** * @file - * @brief Header file for Pitch audio effect class + * @brief Header file for whisperization audio effect class * @author * * @ref License @@ -28,8 +28,8 @@ * along with OpenShot Library. If not, see . */ -#ifndef OPENSHOT_PITCH_AUDIO_EFFECT_H -#define OPENSHOT_PITCH_AUDIO_EFFECT_H +#ifndef OPENSHOT_WHISPERIZATION_AUDIO_EFFECT_H +#define OPENSHOT_WHISPERIZATION_AUDIO_EFFECT_H #define _USE_MATH_DEFINES #include "../EffectBase.h" @@ -38,6 +38,7 @@ #include "../Json.h" #include "../KeyFrame.h" #include "../Enums.h" +#include "STFT.h" #include #include @@ -49,28 +50,27 @@ namespace openshot { /** - * @brief This class adds a pitch into the audio + * @brief This class adds a whisperization effect into the audio * */ - class Pitch : public EffectBase + class Whisperization : public EffectBase { private: /// Init effect settings void init_effect_details(); public: - Keyframe shift; ///< Pitch shift keyframe. The pitch shift inserted on the audio. openshot::FFTSize fft_size; openshot::HopSize hop_size; openshot::WindowType window_type; /// Blank constructor, useful when using Json to load the effect properties - Pitch(); + Whisperization(); /// Default constructor /// - /// @param new_level The audio default pitch level (between 1 and 100) - Pitch(Keyframe new_shift, openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type); + /// @param new_level The audio default Whisperization level (between 1 and 100) + Whisperization(openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type); /// @brief This method is required for all derived classes of ClipBase, and returns a /// new openshot::Frame object. All Clip keyframes and effects are resolved into @@ -104,40 +104,20 @@ namespace openshot std::string PropertiesJSON(int64_t requested_frame) const override; - void updateFftSize(std::shared_ptr frame); - void updateHopSize(); - void updateAnalysisWindow(); - void updateWindow(const juce::HeapBlock& window, const int window_length); - void updateWindowScaleFactor(); - float princArg(const float phase); + class WhisperizationEffect : public STFT + { + public: + WhisperizationEffect (Whisperization& p) : parent (p) { } + private: + void modification(const int channel) override; + + Whisperization &parent; + }; juce::CriticalSection lock; + WhisperizationEffect stft; std::unique_ptr fft; - - int input_buffer_length; - int input_buffer_write_position; - juce::AudioSampleBuffer input_buffer; - - int output_buffer_length; - int output_buffer_write_position; - int output_buffer_read_position; - juce::AudioSampleBuffer output_buffer; - - juce::HeapBlock fft_window; - juce::HeapBlock> fft_time_domain; - juce::HeapBlock> fft_frequency_domain; - - int samples_since_last_FFT; - - int overlap; - int hopSize; - float window_scale_factor; - - juce::HeapBlock omega; - juce::AudioSampleBuffer input_phase; - juce::AudioSampleBuffer output_phase; - bool need_to_reset_phases; }; } From 8c0342480bd05f02e20d4729d8057a6216341e8f Mon Sep 17 00:00:00 2001 From: Brenno Date: Sat, 10 Jul 2021 19:49:58 -0300 Subject: [PATCH 34/71] Adjusting parameters of distortion --- src/audio_effects/Distortion.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/audio_effects/Distortion.cpp b/src/audio_effects/Distortion.cpp index 1266c56d..b42ff0bc 100644 --- a/src/audio_effects/Distortion.cpp +++ b/src/audio_effects/Distortion.cpp @@ -34,7 +34,7 @@ using namespace openshot; /// Blank constructor, useful when using Json to load the effect properties -Distortion::Distortion() : distortion_type(HARD_CLIPPING), input_gain(0), output_gain(0), tone(0) { +Distortion::Distortion() : distortion_type(HARD_CLIPPING), input_gain(10), output_gain(-10), tone(5) { // Init effect properties init_effect_details(); } From 5018c8d15fe116b56fdcc5c4be9d454ce5c15af1 Mon Sep 17 00:00:00 2001 From: Brenno Date: Sat, 10 Jul 2021 19:53:49 -0300 Subject: [PATCH 35/71] Adding description on the audio effects and cleaning the code --- src/audio_effects/Compressor.cpp | 4 +--- src/audio_effects/Distortion.cpp | 20 ++++++++++++++++++-- src/audio_effects/Distortion.h | 18 +----------------- src/audio_effects/Expander.cpp | 2 +- src/audio_effects/ParametricEQ.cpp | 2 +- src/audio_effects/ParametricEQ.h | 14 +++++++------- src/audio_effects/Robotization.cpp | 2 +- src/audio_effects/Whisperization.cpp | 2 +- src/audio_effects/Whisperization.h | 2 +- 9 files changed, 32 insertions(+), 34 deletions(-) diff --git a/src/audio_effects/Compressor.cpp b/src/audio_effects/Compressor.cpp index 7ba5c8c8..5ee2f629 100644 --- a/src/audio_effects/Compressor.cpp +++ b/src/audio_effects/Compressor.cpp @@ -56,14 +56,12 @@ void Compressor::init_effect_details() /// Set the effect info info.class_name = "Compressor"; info.name = "Compressor"; - info.description = "Add Compressor on the frame's sound."; + info.description = "Add compressor effect on the frame's audio. This effect reduces the volume of loud sounds or amplify quiet sounds."; info.has_audio = true; info.has_video = false; input_level = 0.0f; yl_prev = 0.0f; - - } // This method is required for all derived classes of EffectBase, and returns a diff --git a/src/audio_effects/Distortion.cpp b/src/audio_effects/Distortion.cpp index b42ff0bc..af76a1ba 100644 --- a/src/audio_effects/Distortion.cpp +++ b/src/audio_effects/Distortion.cpp @@ -56,11 +56,12 @@ void Distortion::init_effect_details() /// Set the effect info info.class_name = "Distortion"; info.name = "Distortion"; - info.description = "Add distortion on the frame's sound."; + info.description = "Add distortion on the frame's audio. This effect alters the audio by clipping the signal."; info.has_audio = true; info.has_video = false; } + // This method is required for all derived classes of EffectBase, and returns a // modified openshot::Frame object std::shared_ptr Distortion::GetFrame(std::shared_ptr frame, int64_t frame_number) @@ -77,7 +78,6 @@ std::shared_ptr Distortion::GetFrame(std::shared_ptraudio->getNumChannels(); channel++) { - //auto *inBuffer = frame->audio->getReadPointer(channel); auto *channel_data = frame->audio->getWritePointer(channel); float out; @@ -166,6 +166,22 @@ std::string Distortion::Json() const { return JsonValue().toStyledString(); } +void Distortion::Filter::updateCoefficients(const double discrete_frequency, const double gain) +{ + jassert(discrete_frequency > 0); + + double tan_half_wc = tan(discrete_frequency / 2.0); + double sqrt_gain = sqrt(gain); + + coefficients = juce::IIRCoefficients(/* b0 */ sqrt_gain * tan_half_wc + gain, + /* b1 */ sqrt_gain * tan_half_wc - gain, + /* b2 */ 0.0, + /* a0 */ sqrt_gain * tan_half_wc + 1.0, + /* a1 */ sqrt_gain * tan_half_wc - 1.0, + /* a2 */ 0.0); + setCoefficients(coefficients); +} + // Generate Json::Value for this object Json::Value Distortion::JsonValue() const { diff --git a/src/audio_effects/Distortion.h b/src/audio_effects/Distortion.h index a3b3e8ea..46577e9d 100644 --- a/src/audio_effects/Distortion.h +++ b/src/audio_effects/Distortion.h @@ -106,23 +106,7 @@ namespace openshot class Filter : public juce::IIRFilter { public: - void updateCoefficients (const double discreteFrequency, - const double gain) noexcept - { - jassert (discreteFrequency > 0); - - double tan_half_wc = tan (discreteFrequency / 2.0); - double sqrt_gain = sqrt (gain); - - coefficients = juce::IIRCoefficients (/* b0 */ sqrt_gain * tan_half_wc + gain, - /* b1 */ sqrt_gain * tan_half_wc - gain, - /* b2 */ 0.0, - /* a0 */ sqrt_gain * tan_half_wc + 1.0, - /* a1 */ sqrt_gain * tan_half_wc - 1.0, - /* a2 */ 0.0); - - setCoefficients (coefficients); - } + void updateCoefficients(const double discrete_frequency, const double gain) noexcept; }; juce::OwnedArray filters; diff --git a/src/audio_effects/Expander.cpp b/src/audio_effects/Expander.cpp index 2c6a86c3..0d7c641b 100644 --- a/src/audio_effects/Expander.cpp +++ b/src/audio_effects/Expander.cpp @@ -56,7 +56,7 @@ void Expander::init_effect_details() /// Set the effect info info.class_name = "Expander"; info.name = "Expander"; - info.description = "Add Expander on the frame's sound."; + info.description = "Add Expander on the frame's audio track. Louder parts of the audio becomes relatively louder and quieter parts becomes quieter."; info.has_audio = true; info.has_video = false; diff --git a/src/audio_effects/ParametricEQ.cpp b/src/audio_effects/ParametricEQ.cpp index d3714f14..7ca9d3d6 100644 --- a/src/audio_effects/ParametricEQ.cpp +++ b/src/audio_effects/ParametricEQ.cpp @@ -57,7 +57,7 @@ void ParametricEQ::init_effect_details() /// Set the effect info info.class_name = "ParametricEQ"; info.name = "Parametric EQ"; - info.description = "Add equalization on the frame's sound."; + info.description = "Add equalization on the frame's sound. This effect is a filter that allows you to adjust the volume level of a frequency within an audio."; info.has_audio = true; info.has_video = false; } diff --git a/src/audio_effects/ParametricEQ.h b/src/audio_effects/ParametricEQ.h index f69d5143..e8a87f13 100644 --- a/src/audio_effects/ParametricEQ.h +++ b/src/audio_effects/ParametricEQ.h @@ -121,7 +121,7 @@ namespace openshot double sqrt_gain = sqrt (gain); switch (filter_type) { - case 0 /*filterTypeLowPass*/: { + case 0 /* LOW_PASS */: { coefficients = IIRCoefficients (/* b0 */ tan_half_wc, /* b1 */ tan_half_wc, /* b2 */ 0.0, @@ -130,7 +130,7 @@ namespace openshot /* a2 */ 0.0); break; } - case 1 /*filterTypeHighPass*/: { + case 1 /* HIGH_PASS */: { coefficients = IIRCoefficients (/* b0 */ 1.0, /* b1 */ -1.0, /* b2 */ 0.0, @@ -139,7 +139,7 @@ namespace openshot /* a2 */ 0.0); break; } - case 2 /*filterTypeLowShelf*/: { + case 2 /* LOW_SHELF */: { coefficients = IIRCoefficients (/* b0 */ gain * tan_half_wc + sqrt_gain, /* b1 */ gain * tan_half_wc - sqrt_gain, /* b2 */ 0.0, @@ -148,7 +148,7 @@ namespace openshot /* a2 */ 0.0); break; } - case 3 /*filterTypeHighShelf*/: { + case 3 /* HIGH_SHELF */: { coefficients = IIRCoefficients (/* b0 */ sqrt_gain * tan_half_wc + gain, /* b1 */ sqrt_gain * tan_half_wc - gain, /* b2 */ 0.0, @@ -157,7 +157,7 @@ namespace openshot /* a2 */ 0.0); break; } - case 4 /*filterTypeBandPass*/: { + case 4 /* BAND_PASS */: { coefficients = IIRCoefficients (/* b0 */ tan_half_bw, /* b1 */ 0.0, /* b2 */ -tan_half_bw, @@ -166,7 +166,7 @@ namespace openshot /* a2 */ 1.0 - tan_half_bw); break; } - case 5 /*filterTypeBandStop*/: { + case 5 /* BAND_STOP */: { coefficients = IIRCoefficients (/* b0 */ 1.0, /* b1 */ two_cos_wc, /* b2 */ 1.0, @@ -175,7 +175,7 @@ namespace openshot /* a2 */ 1.0 - tan_half_bw); break; } - case 6 /*filterTypePeakingNotch*/: { + case 6 /* PEAKING_NOTCH */: { coefficients = IIRCoefficients (/* b0 */ sqrt_gain + gain * tan_half_bw, /* b1 */ sqrt_gain * two_cos_wc, /* b2 */ sqrt_gain - gain * tan_half_bw, diff --git a/src/audio_effects/Robotization.cpp b/src/audio_effects/Robotization.cpp index 7f07e117..1f9c8ac0 100644 --- a/src/audio_effects/Robotization.cpp +++ b/src/audio_effects/Robotization.cpp @@ -57,7 +57,7 @@ void Robotization::init_effect_details() /// Set the effect info info.class_name = "Robotization"; info.name = "Robotization"; - info.description = "Robotization effect on the frame's sound."; + info.description = "Add robotization effect on the frame's audio track. This effect transforms the voice present in an audio into a robotic voice effect."; info.has_audio = true; info.has_video = false; } diff --git a/src/audio_effects/Whisperization.cpp b/src/audio_effects/Whisperization.cpp index 70ef31ef..e36fbfa0 100644 --- a/src/audio_effects/Whisperization.cpp +++ b/src/audio_effects/Whisperization.cpp @@ -56,7 +56,7 @@ void Whisperization::init_effect_details() /// Set the effect info info.class_name = "Whisperization"; info.name = "Whisperization"; - info.description = "Whisperization effect on the frame's sound."; + info.description = "Add whisperization effect on the frame's audio track. This effect transforms the voice present in an audio into a whispering voice effect."; info.has_audio = true; info.has_video = false; } diff --git a/src/audio_effects/Whisperization.h b/src/audio_effects/Whisperization.h index 81add56b..5659a861 100644 --- a/src/audio_effects/Whisperization.h +++ b/src/audio_effects/Whisperization.h @@ -107,7 +107,7 @@ namespace openshot class WhisperizationEffect : public STFT { public: - WhisperizationEffect (Whisperization& p) : parent (p) { } + WhisperizationEffect(Whisperization& p) : parent (p) { } private: void modification(const int channel) override; From 018cbd5252b82a82903592e41f9105318456e099 Mon Sep 17 00:00:00 2001 From: Brenno Date: Sat, 10 Jul 2021 19:54:42 -0300 Subject: [PATCH 36/71] Bugfix --- src/audio_effects/Distortion.cpp | 2 -- src/audio_effects/Distortion.h | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/audio_effects/Distortion.cpp b/src/audio_effects/Distortion.cpp index af76a1ba..10abf64d 100644 --- a/src/audio_effects/Distortion.cpp +++ b/src/audio_effects/Distortion.cpp @@ -168,8 +168,6 @@ std::string Distortion::Json() const { void Distortion::Filter::updateCoefficients(const double discrete_frequency, const double gain) { - jassert(discrete_frequency > 0); - double tan_half_wc = tan(discrete_frequency / 2.0); double sqrt_gain = sqrt(gain); diff --git a/src/audio_effects/Distortion.h b/src/audio_effects/Distortion.h index 46577e9d..63839b82 100644 --- a/src/audio_effects/Distortion.h +++ b/src/audio_effects/Distortion.h @@ -106,7 +106,7 @@ namespace openshot class Filter : public juce::IIRFilter { public: - void updateCoefficients(const double discrete_frequency, const double gain) noexcept; + void updateCoefficients(const double discrete_frequency, const double gain); }; juce::OwnedArray filters; From 2d60bb118b089d5e9851b4451fcc4850da0f6215 Mon Sep 17 00:00:00 2001 From: Frank Dana Date: Mon, 12 Jul 2021 21:24:24 -0700 Subject: [PATCH 37/71] CMake: Fix ENABLE_MAGICK (#701) --- src/CMakeLists.txt | 79 +++++++++++++++++++++++----------------------- 1 file changed, 40 insertions(+), 39 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index dc61e2e1..3dda17d2 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -186,56 +186,57 @@ target_link_libraries(openshot PUBLIC OpenShot::Audio) ### # Find the ImageMagick++ library -find_package(ImageMagick COMPONENTS Magick++ MagickCore) +if (ENABLE_MAGICK) + find_package(ImageMagick COMPONENTS Magick++ MagickCore) -if(ImageMagick_FOUND) - if(NOT TARGET ImageMagick::Magick++ AND NOT TARGET Magick++_TARGET) - add_library(Magick++_TARGET INTERFACE) + if(ImageMagick_FOUND) + if(NOT TARGET ImageMagick::Magick++ AND NOT TARGET Magick++_TARGET) + add_library(Magick++_TARGET INTERFACE) - # Include ImageMagick++ headers (needed for compile) - set_property(TARGET Magick++_TARGET APPEND PROPERTY - INTERFACE_INCLUDE_DIRECTORIES ${ImageMagick_INCLUDE_DIRS}) + # Include ImageMagick++ headers (needed for compile) + set_property(TARGET Magick++_TARGET APPEND PROPERTY + INTERFACE_INCLUDE_DIRECTORIES ${ImageMagick_INCLUDE_DIRS}) + + # Set the Quantum Depth that ImageMagick was built with (default to 16 bits) + if(NOT DEFINED MAGICKCORE_QUANTUM_DEPTH) + set(MAGICKCORE_QUANTUM_DEPTH 16) + endif() + if(NOT DEFINED MAGICKCORE_HDRI_ENABLE) + set(MAGICKCORE_HDRI_ENABLE 0) + endif() + + set_property(TARGET Magick++_TARGET APPEND PROPERTY + INTERFACE_COMPILE_DEFINITIONS + MAGICKCORE_QUANTUM_DEPTH=${MAGICKCORE_QUANTUM_DEPTH}) + set_property(TARGET Magick++_TARGET APPEND PROPERTY + INTERFACE_COMPILE_DEFINITIONS + MAGICKCORE_HDRI_ENABLE=${MAGICKCORE_HDRI_ENABLE}) + + target_link_libraries(Magick++_TARGET INTERFACE + ${ImageMagick_LIBRARIES}) + + # Alias to our namespaced name + add_library(ImageMagick::Magick++ ALIAS Magick++_TARGET) - # Set the Quantum Depth that ImageMagick was built with (default to 16 bits) - if(NOT DEFINED MAGICKCORE_QUANTUM_DEPTH) - set(MAGICKCORE_QUANTUM_DEPTH 16) - endif() - if(NOT DEFINED MAGICKCORE_HDRI_ENABLE) - set(MAGICKCORE_HDRI_ENABLE 0) endif() - set_property(TARGET Magick++_TARGET APPEND PROPERTY - INTERFACE_COMPILE_DEFINITIONS - MAGICKCORE_QUANTUM_DEPTH=${MAGICKCORE_QUANTUM_DEPTH}) - set_property(TARGET Magick++_TARGET APPEND PROPERTY - INTERFACE_COMPILE_DEFINITIONS - MAGICKCORE_HDRI_ENABLE=${MAGICKCORE_HDRI_ENABLE}) + # Add optional ImageMagic-dependent sources + target_sources(openshot PRIVATE + ImageReader.cpp + ImageWriter.cpp + TextReader.cpp) - target_link_libraries(Magick++_TARGET INTERFACE - ${ImageMagick_LIBRARIES}) + # define a preprocessor macro (used in the C++ source) + target_compile_definitions(openshot PUBLIC USE_IMAGEMAGICK=1) - # Alias to our namespaced name - add_library(ImageMagick::Magick++ ALIAS Magick++_TARGET) + # Link with ImageMagick library + target_link_libraries(openshot PUBLIC ImageMagick::Magick++) + set(HAVE_IMAGEMAGICK TRUE CACHE BOOL "Building with ImageMagick support" FORCE) + mark_as_advanced(HAVE_IMAGEMAGICK) endif() - - # Add optional ImageMagic-dependent sources - target_sources(openshot PRIVATE - ImageReader.cpp - ImageWriter.cpp - TextReader.cpp) - - # define a preprocessor macro (used in the C++ source) - target_compile_definitions(openshot PUBLIC USE_IMAGEMAGICK=1) - - # Link with ImageMagick library - target_link_libraries(openshot PUBLIC ImageMagick::Magick++) - - set(HAVE_IMAGEMAGICK TRUE CACHE BOOL "Building with ImageMagick support" FORCE) - mark_as_advanced(HAVE_IMAGEMAGICK) endif() - ################### JSONCPP ##################### # Include jsoncpp headers (needed for JSON parsing) if (USE_SYSTEM_JSONCPP) From 70aedd35c5a840300fdf28f31f881f864421d1cd Mon Sep 17 00:00:00 2001 From: Frank Dana Date: Mon, 12 Jul 2021 22:28:37 -0700 Subject: [PATCH 38/71] Eliminate useless 'const' (#703) --- src/FFmpegUtilities.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/FFmpegUtilities.h b/src/FFmpegUtilities.h index d8b1587f..f095e72a 100644 --- a/src/FFmpegUtilities.h +++ b/src/FFmpegUtilities.h @@ -134,7 +134,7 @@ inline static const std::string av_make_error_string(int errnum) #endif // Does ffmpeg pixel format contain an alpha channel? -inline static const bool ffmpeg_has_alpha(PixelFormat pix_fmt) { +inline static bool ffmpeg_has_alpha(PixelFormat pix_fmt) { const AVPixFmtDescriptor *fmt_desc = av_pix_fmt_desc_get(pix_fmt); return bool(fmt_desc->flags & AV_PIX_FMT_FLAG_ALPHA); } From 60c19f1da22b787aac6835364bcdf8e29ae97e74 Mon Sep 17 00:00:00 2001 From: Brenno Date: Fri, 16 Jul 2021 16:51:52 -0300 Subject: [PATCH 39/71] Added Delay and Echo effects --- src/CMakeLists.txt | 2 + src/EffectInfo.cpp | 8 ++ src/Effects.h | 2 + src/audio_effects/Compressor.cpp | 2 +- src/audio_effects/Delay.cpp | 190 +++++++++++++++++++++++++++ src/audio_effects/Delay.h | 110 ++++++++++++++++ src/audio_effects/Distortion.h | 2 +- src/audio_effects/Echo.cpp | 200 +++++++++++++++++++++++++++++ src/audio_effects/Echo.h | 112 ++++++++++++++++ src/audio_effects/Expander.cpp | 2 +- src/audio_effects/Expander.h | 2 +- src/audio_effects/ParametricEQ.cpp | 1 - src/audio_effects/ParametricEQ.h | 2 +- src/audio_effects/Robotization.h | 1 - 14 files changed, 629 insertions(+), 7 deletions(-) create mode 100644 src/audio_effects/Delay.cpp create mode 100644 src/audio_effects/Delay.h create mode 100644 src/audio_effects/Echo.cpp create mode 100644 src/audio_effects/Echo.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 345ebbd3..e9436217 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -133,6 +133,8 @@ set(EFFECTS_SOURCES effects/Wave.cpp audio_effects/STFT.cpp audio_effects/Noise.cpp + audio_effects/Delay.cpp + audio_effects/Echo.cpp audio_effects/Distortion.cpp audio_effects/ParametricEQ.cpp audio_effects/Compressor.cpp diff --git a/src/EffectInfo.cpp b/src/EffectInfo.cpp index 54f73bad..c6011fbc 100644 --- a/src/EffectInfo.cpp +++ b/src/EffectInfo.cpp @@ -91,6 +91,12 @@ EffectBase* EffectInfo::CreateEffect(std::string effect_type) { else if(effect_type == "Noise") return new Noise(); + else if(effect_type == "Delay") + return new Delay(); + + else if(effect_type == "Echo") + return new Echo(); + else if(effect_type == "Distortion") return new Distortion(); @@ -147,6 +153,8 @@ Json::Value EffectInfo::JsonValue() { root.append(Wave().JsonInfo()); /* Audio */ root.append(Noise().JsonInfo()); + root.append(Delay().JsonInfo()); + root.append(Echo().JsonInfo()); root.append(Distortion().JsonInfo()); root.append(ParametricEQ().JsonInfo()); root.append(Compressor().JsonInfo()); diff --git a/src/Effects.h b/src/Effects.h index 81317b41..c0da0741 100644 --- a/src/Effects.h +++ b/src/Effects.h @@ -50,6 +50,8 @@ /* Audio Effects */ #include "audio_effects/Noise.h" +#include "audio_effects/Delay.h" +#include "audio_effects/Echo.h" #include "audio_effects/Distortion.h" #include "audio_effects/ParametricEQ.h" #include "audio_effects/Compressor.h" diff --git a/src/audio_effects/Compressor.cpp b/src/audio_effects/Compressor.cpp index 5ee2f629..cf706e6e 100644 --- a/src/audio_effects/Compressor.cpp +++ b/src/audio_effects/Compressor.cpp @@ -74,7 +74,7 @@ std::shared_ptr Compressor::GetFrame(std::shared_ptraudio->getNumSamples(); mixed_down_input.setSize(1, num_samples); - inverse_sample_rate = 1.0f / frame->SampleRate(); //(float)getSampleRate(); + inverse_sample_rate = 1.0f / frame->SampleRate(); inverseE = 1.0f / M_E; if ((bool)bypass.GetValue(frame_number)) diff --git a/src/audio_effects/Delay.cpp b/src/audio_effects/Delay.cpp new file mode 100644 index 00000000..467ba756 --- /dev/null +++ b/src/audio_effects/Delay.cpp @@ -0,0 +1,190 @@ +/** + * @file + * @brief Source file for Delay audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#include "Delay.h" +#include "Exceptions.h" + +using namespace openshot; + +/// Blank constructor, useful when using Json to load the effect properties +Delay::Delay() : delay_time(1) { + // Init effect properties + init_effect_details(); +} + +// Default constructor +Delay::Delay(Keyframe new_delay_time) : delay_time(new_delay_time) +{ + // Init effect properties + init_effect_details(); +} + +// Init effect settings +void Delay::init_effect_details() +{ + /// Initialize the values of the EffectInfo struct. + InitEffectInfo(); + + /// Set the effect info + info.class_name = "Delay"; + info.name = "Delay"; + info.description = "Adjust the synchronism between the audio and video track."; + info.has_audio = true; + info.has_video = false; + initialized = false; +} + +void Delay::setup(std::shared_ptr frame) +{ + if (!initialized) + { + const float max_delay_time = 5; + delay_buffer_samples = (int)(max_delay_time * (float)frame->SampleRate()) + 1; + + if (delay_buffer_samples < 1) + delay_buffer_samples = 1; + + delay_buffer_channels = frame->audio->getNumChannels(); + delay_buffer.setSize(delay_buffer_channels, delay_buffer_samples); + delay_buffer.clear(); + delay_write_position = 0; + initialized = true; + } +} + +// This method is required for all derived classes of EffectBase, and returns a +// modified openshot::Frame object +std::shared_ptr Delay::GetFrame(std::shared_ptr frame, int64_t frame_number) +{ + const float delay_time_value = (float)delay_time.GetValue(frame_number)*(float)frame->SampleRate(); + int local_write_position; + + setup(frame); + + for (int channel = 0; channel < frame->audio->getNumChannels(); channel++) + { + float *channel_data = frame->audio->getWritePointer(channel); + float *delay_data = delay_buffer.getWritePointer(channel); + local_write_position = delay_write_position; + + for (auto sample = 0; sample < frame->audio->getNumSamples(); ++sample) + { + const float in = (float)(channel_data[sample]); + float out = 0.0f; + + float read_position = fmodf((float)local_write_position - delay_time_value + (float)delay_buffer_samples, delay_buffer_samples); + int local_read_position = floorf(read_position); + + if (local_read_position != local_write_position) + { + float fraction = read_position - (float)local_read_position; + float delayed1 = delay_data[(local_read_position + 0)]; + float delayed2 = delay_data[(local_read_position + 1) % delay_buffer_samples]; + out = (float)(delayed1 + fraction * (delayed2 - delayed1)); + + channel_data[sample] = in + (out - in); + delay_data[local_write_position] = in; + } + + if (++local_write_position >= delay_buffer_samples) + local_write_position -= delay_buffer_samples; + } + } + + delay_write_position = local_write_position; + + // return the modified frame + return frame; +} + +// Generate JSON string of this object +std::string Delay::Json() const { + + // Return formatted string + return JsonValue().toStyledString(); +} + +// Generate Json::Value for this object +Json::Value Delay::JsonValue() const { + + // Create root json object + Json::Value root = EffectBase::JsonValue(); // get parent properties + root["type"] = info.class_name; + root["delay_time"] = delay_time.JsonValue(); + + // return JsonValue + return root; +} + +// Load JSON string into this object +void Delay::SetJson(const std::string value) { + + // Parse JSON string into JSON objects + try + { + const Json::Value root = openshot::stringToJson(value); + // Set all values that match + SetJsonValue(root); + } + catch (const std::exception& e) + { + // Error parsing JSON (or missing keys) + throw InvalidJSON("JSON is invalid (missing keys or invalid data types)"); + } +} + +// Load Json::Value into this object +void Delay::SetJsonValue(const Json::Value root) { + + // Set parent data + EffectBase::SetJsonValue(root); + + // Set data from Json (if key is found) + if (!root["delay_time"].isNull()) + delay_time.SetJsonValue(root["delay_time"]); +} + +// Get all properties for a specific frame +std::string Delay::PropertiesJSON(int64_t requested_frame) const { + + // Generate JSON properties list + Json::Value root; + root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame); + root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame); + root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame); + + // Keyframes + root["delay_time"] = add_property_json("Delay Time", delay_time.GetValue(requested_frame), "float", "", &delay_time, 0, 5, false, requested_frame); + + // Return formatted string + return root.toStyledString(); +} diff --git a/src/audio_effects/Delay.h b/src/audio_effects/Delay.h new file mode 100644 index 00000000..a693c292 --- /dev/null +++ b/src/audio_effects/Delay.h @@ -0,0 +1,110 @@ +/** + * @file + * @brief Header file for Delay audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_DELAY_AUDIO_EFFECT_H +#define OPENSHOT_DELAY_AUDIO_EFFECT_H + +#include "../EffectBase.h" + +#include "../Frame.h" +#include "../Json.h" +#include "../KeyFrame.h" + +#include +#include +#include +#include + + +namespace openshot +{ + + /** + * @brief This class adds a delay into the audio + * + */ + class Delay : public EffectBase + { + private: + /// Init effect settings + void init_effect_details(); + + public: + Keyframe delay_time; + + juce::AudioSampleBuffer delay_buffer; + int delay_buffer_samples; + int delay_buffer_channels; + int delay_write_position; + bool initialized; + + /// Blank constructor, useful when using Json to load the effect properties + Delay(); + + /// Default constructor + Delay(Keyframe new_delay_time); + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { + return GetFrame(std::make_shared(), frame_number); + } + + void setup(std::shared_ptr frame); + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// modified openshot::Frame object + /// + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. + /// + /// @returns The modified openshot::Frame object + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + + // Get and Set JSON methods + std::string Json() const override; ///< Generate JSON string of this object + void SetJson(const std::string value) override; ///< Load JSON string into this object + Json::Value JsonValue() const override; ///< Generate Json::Value for this object + void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object + + /// Get all properties for a specific frame (perfect for a UI to display the current state + /// of all properties at any time) + std::string PropertiesJSON(int64_t requested_frame) const override; + }; + +} + +#endif diff --git a/src/audio_effects/Distortion.h b/src/audio_effects/Distortion.h index 63839b82..8163a075 100644 --- a/src/audio_effects/Distortion.h +++ b/src/audio_effects/Distortion.h @@ -49,7 +49,7 @@ namespace openshot { /** - * @brief This class adds a noise into the audio + * @brief This class adds a distortion into the audio * */ class Distortion : public EffectBase diff --git a/src/audio_effects/Echo.cpp b/src/audio_effects/Echo.cpp new file mode 100644 index 00000000..010f2d4c --- /dev/null +++ b/src/audio_effects/Echo.cpp @@ -0,0 +1,200 @@ +/** + * @file + * @brief Source file for Echo audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#include "Echo.h" +#include "Exceptions.h" + +using namespace openshot; + +/// Blank constructor, useful when using Json to load the effect properties +Echo::Echo() : echo_time(0.1), feedback(0.5), mix(0.5) { + // Init effect properties + init_effect_details(); +} + +// Default constructor +Echo::Echo(Keyframe new_echo_time, Keyframe new_feedback, Keyframe new_mix) : + echo_time(new_echo_time), feedback(new_feedback), mix(new_mix) +{ + // Init effect properties + init_effect_details(); +} + +// Init effect settings +void Echo::init_effect_details() +{ + /// Initialize the values of the EffectInfo struct. + InitEffectInfo(); + + /// Set the effect info + info.class_name = "Echo"; + info.name = "Echo"; + info.description = "Add echo on the frame's sound."; + info.has_audio = true; + info.has_video = false; + initialized = false; +} + +void Echo::setup(std::shared_ptr frame) +{ + if (!initialized) + { + const float max_echo_time = 5; + echo_buffer_samples = (int)(max_echo_time * (float)frame->SampleRate()) + 1; + + if (echo_buffer_samples < 1) + echo_buffer_samples = 1; + + echo_buffer_channels = frame->audio->getNumChannels(); + echo_buffer.setSize(echo_buffer_channels, echo_buffer_samples); + echo_buffer.clear(); + echo_write_position = 0; + initialized = true; + } +} + +// This method is required for all derived classes of EffectBase, and returns a +// modified openshot::Frame object +std::shared_ptr Echo::GetFrame(std::shared_ptr frame, int64_t frame_number) +{ + const float echo_time_value = (float)echo_time.GetValue(frame_number)*(float)frame->SampleRate(); + const float feedback_value = feedback.GetValue(frame_number); + const float mix_value = mix.GetValue(frame_number); + int local_write_position; + + setup(frame); + + for (int channel = 0; channel < frame->audio->getNumChannels(); channel++) + { + float *channel_data = frame->audio->getWritePointer(channel); + float *echo_data = echo_buffer.getWritePointer(channel); + local_write_position = echo_write_position; + + for (auto sample = 0; sample < frame->audio->getNumSamples(); ++sample) + { + const float in = (float)(channel_data[sample]); + float out = 0.0f; + + float read_position = fmodf((float)local_write_position - echo_time_value + (float)echo_buffer_samples, echo_buffer_samples); + int local_read_position = floorf(read_position); + + if (local_read_position != local_write_position) + { + float fraction = read_position - (float)local_read_position; + float echoed1 = echo_data[(local_read_position + 0)]; + float echoed2 = echo_data[(local_read_position + 1) % echo_buffer_samples]; + out = (float)(echoed1 + fraction * (echoed2 - echoed1)); + channel_data[sample] = in + mix_value*(out - in); + echo_data[local_write_position] = in + out*feedback_value; + } + + if (++local_write_position >= echo_buffer_samples) + local_write_position -= echo_buffer_samples; + } + } + + echo_write_position = local_write_position; + + // return the modified frame + return frame; +} + +// Generate JSON string of this object +std::string Echo::Json() const { + + // Return formatted string + return JsonValue().toStyledString(); +} + +// Generate Json::Value for this object +Json::Value Echo::JsonValue() const { + + // Create root json object + Json::Value root = EffectBase::JsonValue(); // get parent properties + root["type"] = info.class_name; + root["echo_time"] = echo_time.JsonValue(); + root["feedback"] = feedback.JsonValue(); + root["mix"] = mix.JsonValue(); + + // return JsonValue + return root; +} + +// Load JSON string into this object +void Echo::SetJson(const std::string value) { + + // Parse JSON string into JSON objects + try + { + const Json::Value root = openshot::stringToJson(value); + // Set all values that match + SetJsonValue(root); + } + catch (const std::exception& e) + { + // Error parsing JSON (or missing keys) + throw InvalidJSON("JSON is invalid (missing keys or invalid data types)"); + } +} + +// Load Json::Value into this object +void Echo::SetJsonValue(const Json::Value root) { + + // Set parent data + EffectBase::SetJsonValue(root); + + // Set data from Json (if key is found) + if (!root["echo_time"].isNull()) + echo_time.SetJsonValue(root["echo_time"]); + if (!root["feedback"].isNull()) + feedback.SetJsonValue(root["feedback"]); + if (!root["mix"].isNull()) + mix.SetJsonValue(root["mix"]); +} + +// Get all properties for a specific frame +std::string Echo::PropertiesJSON(int64_t requested_frame) const { + + // Generate JSON properties list + Json::Value root; + root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame); + root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame); + root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame); + root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame); + + // Keyframes + root["echo_time"] = add_property_json("Time", echo_time.GetValue(requested_frame), "float", "", &echo_time, 0, 5, false, requested_frame); + root["feedback"] = add_property_json("Feedback", feedback.GetValue(requested_frame), "float", "", &feedback, 0, 1, false, requested_frame); + root["mix"] = add_property_json("Mix", mix.GetValue(requested_frame), "float", "", &mix, 0, 1, false, requested_frame); + + // Return formatted string + return root.toStyledString(); +} diff --git a/src/audio_effects/Echo.h b/src/audio_effects/Echo.h new file mode 100644 index 00000000..9a120b6e --- /dev/null +++ b/src/audio_effects/Echo.h @@ -0,0 +1,112 @@ +/** + * @file + * @brief Header file for Echo audio effect class + * @author + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_ECHO_AUDIO_EFFECT_H +#define OPENSHOT_ECHO_AUDIO_EFFECT_H + +#include "../EffectBase.h" + +#include "../Frame.h" +#include "../Json.h" +#include "../KeyFrame.h" + +#include +#include +#include +#include + + +namespace openshot +{ + + /** + * @brief This class adds a echo into the audio + * + */ + class Echo : public EffectBase + { + private: + /// Init effect settings + void init_effect_details(); + + public: + Keyframe echo_time; + Keyframe feedback; + Keyframe mix; + + juce::AudioSampleBuffer echo_buffer; + int echo_buffer_samples; + int echo_buffer_channels; + int echo_write_position; + bool initialized; + + /// Blank constructor, useful when using Json to load the effect properties + Echo(); + + /// Default constructor + Echo(Keyframe new_echo_time, Keyframe new_feedback, Keyframe new_mix); + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { + return GetFrame(std::make_shared(), frame_number); + } + + void setup(std::shared_ptr frame); + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// modified openshot::Frame object + /// + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. + /// + /// @returns The modified openshot::Frame object + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + + // Get and Set JSON methods + std::string Json() const override; ///< Generate JSON string of this object + void SetJson(const std::string value) override; ///< Load JSON string into this object + Json::Value JsonValue() const override; ///< Generate Json::Value for this object + void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object + + /// Get all properties for a specific frame (perfect for a UI to display the current state + /// of all properties at any time) + std::string PropertiesJSON(int64_t requested_frame) const override; + }; + +} + +#endif diff --git a/src/audio_effects/Expander.cpp b/src/audio_effects/Expander.cpp index 0d7c641b..468988c9 100644 --- a/src/audio_effects/Expander.cpp +++ b/src/audio_effects/Expander.cpp @@ -76,7 +76,7 @@ std::shared_ptr Expander::GetFrame(std::shared_ptraudio->getNumSamples(); mixed_down_input.setSize(1, num_samples); - inverse_sample_rate = 1.0f / frame->SampleRate(); //(float)getSampleRate(); + inverse_sample_rate = 1.0f / frame->SampleRate(); inverseE = 1.0f / M_E; if ((bool)bypass.GetValue(frame_number)) diff --git a/src/audio_effects/Expander.h b/src/audio_effects/Expander.h index 7fc37afa..4eee84af 100644 --- a/src/audio_effects/Expander.h +++ b/src/audio_effects/Expander.h @@ -47,7 +47,7 @@ namespace openshot { /** - * @brief This class adds a Expander into the audio + * @brief This class adds a expander (or noise gate) into the audio * */ class Expander : public EffectBase diff --git a/src/audio_effects/ParametricEQ.cpp b/src/audio_effects/ParametricEQ.cpp index 7ca9d3d6..46e384ea 100644 --- a/src/audio_effects/ParametricEQ.cpp +++ b/src/audio_effects/ParametricEQ.cpp @@ -78,7 +78,6 @@ std::shared_ptr ParametricEQ::GetFrame(std::shared_ptraudio->getNumSamples(); updateFilters(frame_number, num_samples); - // Add distortion for (int channel = 0; channel < frame->audio->getNumChannels(); channel++) { auto *channel_data = frame->audio->getWritePointer(channel); diff --git a/src/audio_effects/ParametricEQ.h b/src/audio_effects/ParametricEQ.h index e8a87f13..62da54ec 100644 --- a/src/audio_effects/ParametricEQ.h +++ b/src/audio_effects/ParametricEQ.h @@ -49,7 +49,7 @@ namespace openshot { /** - * @brief This class adds a noise into the audio + * @brief This class adds a equalization into the audio * */ class ParametricEQ : public EffectBase diff --git a/src/audio_effects/Robotization.h b/src/audio_effects/Robotization.h index 07838213..88a61116 100644 --- a/src/audio_effects/Robotization.h +++ b/src/audio_effects/Robotization.h @@ -60,7 +60,6 @@ namespace openshot void init_effect_details(); public: - // Keyframe shift; ///< Robotization shift keyframe. The Robotization shift inserted on the audio. openshot::FFTSize fft_size; openshot::HopSize hop_size; openshot::WindowType window_type; From 66f3ae19bddf98ddb3d389ba0c33d0aaf2ac7439 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Wed, 21 Jul 2021 13:02:06 -0500 Subject: [PATCH 40/71] Updating audio descriptions --- src/audio_effects/Compressor.cpp | 2 +- src/audio_effects/Distortion.cpp | 2 +- src/audio_effects/Echo.cpp | 2 +- src/audio_effects/Expander.cpp | 2 +- src/audio_effects/Noise.cpp | 2 +- src/audio_effects/ParametricEQ.cpp | 2 +- src/audio_effects/Robotization.cpp | 2 +- src/audio_effects/Whisperization.cpp | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/audio_effects/Compressor.cpp b/src/audio_effects/Compressor.cpp index cf706e6e..b0cb34e0 100644 --- a/src/audio_effects/Compressor.cpp +++ b/src/audio_effects/Compressor.cpp @@ -56,7 +56,7 @@ void Compressor::init_effect_details() /// Set the effect info info.class_name = "Compressor"; info.name = "Compressor"; - info.description = "Add compressor effect on the frame's audio. This effect reduces the volume of loud sounds or amplify quiet sounds."; + info.description = "Reduce the volume of loud sounds or amplify quiet sounds."; info.has_audio = true; info.has_video = false; diff --git a/src/audio_effects/Distortion.cpp b/src/audio_effects/Distortion.cpp index 10abf64d..1f4d58b6 100644 --- a/src/audio_effects/Distortion.cpp +++ b/src/audio_effects/Distortion.cpp @@ -56,7 +56,7 @@ void Distortion::init_effect_details() /// Set the effect info info.class_name = "Distortion"; info.name = "Distortion"; - info.description = "Add distortion on the frame's audio. This effect alters the audio by clipping the signal."; + info.description = "Alter the audio by clipping the signal."; info.has_audio = true; info.has_video = false; } diff --git a/src/audio_effects/Echo.cpp b/src/audio_effects/Echo.cpp index 010f2d4c..442a5df1 100644 --- a/src/audio_effects/Echo.cpp +++ b/src/audio_effects/Echo.cpp @@ -56,7 +56,7 @@ void Echo::init_effect_details() /// Set the effect info info.class_name = "Echo"; info.name = "Echo"; - info.description = "Add echo on the frame's sound."; + info.description = "Reflection of sound with a delay after the direct sound."; info.has_audio = true; info.has_video = false; initialized = false; diff --git a/src/audio_effects/Expander.cpp b/src/audio_effects/Expander.cpp index 468988c9..3d60974f 100644 --- a/src/audio_effects/Expander.cpp +++ b/src/audio_effects/Expander.cpp @@ -56,7 +56,7 @@ void Expander::init_effect_details() /// Set the effect info info.class_name = "Expander"; info.name = "Expander"; - info.description = "Add Expander on the frame's audio track. Louder parts of the audio becomes relatively louder and quieter parts becomes quieter."; + info.description = "Louder parts of audio becomes relatively louder and quieter parts becomes quieter."; info.has_audio = true; info.has_video = false; diff --git a/src/audio_effects/Noise.cpp b/src/audio_effects/Noise.cpp index 357cb769..5a329ba6 100644 --- a/src/audio_effects/Noise.cpp +++ b/src/audio_effects/Noise.cpp @@ -55,7 +55,7 @@ void Noise::init_effect_details() /// Set the effect info info.class_name = "Noise"; info.name = "Noise"; - info.description = "Add white noise on the frame's sound."; + info.description = "Random signal having equal intensity at different frequencies."; info.has_audio = true; info.has_video = false; } diff --git a/src/audio_effects/ParametricEQ.cpp b/src/audio_effects/ParametricEQ.cpp index 46e384ea..5f87d67e 100644 --- a/src/audio_effects/ParametricEQ.cpp +++ b/src/audio_effects/ParametricEQ.cpp @@ -57,7 +57,7 @@ void ParametricEQ::init_effect_details() /// Set the effect info info.class_name = "ParametricEQ"; info.name = "Parametric EQ"; - info.description = "Add equalization on the frame's sound. This effect is a filter that allows you to adjust the volume level of a frequency within an audio."; + info.description = "Filter that allows you to adjust the volume level of a frequency in the audio track."; info.has_audio = true; info.has_video = false; } diff --git a/src/audio_effects/Robotization.cpp b/src/audio_effects/Robotization.cpp index 1f9c8ac0..d1687150 100644 --- a/src/audio_effects/Robotization.cpp +++ b/src/audio_effects/Robotization.cpp @@ -57,7 +57,7 @@ void Robotization::init_effect_details() /// Set the effect info info.class_name = "Robotization"; info.name = "Robotization"; - info.description = "Add robotization effect on the frame's audio track. This effect transforms the voice present in an audio into a robotic voice effect."; + info.description = "Transform the voice present in an audio track into a robotic voice effect."; info.has_audio = true; info.has_video = false; } diff --git a/src/audio_effects/Whisperization.cpp b/src/audio_effects/Whisperization.cpp index e36fbfa0..7c235e26 100644 --- a/src/audio_effects/Whisperization.cpp +++ b/src/audio_effects/Whisperization.cpp @@ -56,7 +56,7 @@ void Whisperization::init_effect_details() /// Set the effect info info.class_name = "Whisperization"; info.name = "Whisperization"; - info.description = "Add whisperization effect on the frame's audio track. This effect transforms the voice present in an audio into a whispering voice effect."; + info.description = "Transform the voice present in an audio track into a whispering voice effect."; info.has_audio = true; info.has_video = false; } From 9662bcbeea506151be2fbb547c466e854ab9aae3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Aug 2021 09:49:28 -0400 Subject: [PATCH 41/71] Bump codecov/codecov-action from 1 to 2.0.2 (#709) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 1 to 2.0.2. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v1...v2.0.2) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 58881d81..94db6047 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -67,7 +67,7 @@ jobs: cmake --build . --target install -- VERBOSE=1 popd - - uses: codecov/codecov-action@v1 + - uses: codecov/codecov-action@v2.0.2 if: ${{ matrix.compiler == 'clang' }} with: file: build/coverage.info From 606dded4ec27153ed5de54aa02091a2272568d88 Mon Sep 17 00:00:00 2001 From: Brenno Date: Tue, 10 Aug 2021 14:59:45 -0300 Subject: [PATCH 42/71] Prevent re initializing ParametricEQ for each frame --- src/audio_effects/ParametricEQ.cpp | 97 ++++++++++++++++++++++++++++-- src/audio_effects/ParametricEQ.h | 81 +------------------------ 2 files changed, 94 insertions(+), 84 deletions(-) diff --git a/src/audio_effects/ParametricEQ.cpp b/src/audio_effects/ParametricEQ.cpp index 5f87d67e..87997b86 100644 --- a/src/audio_effects/ParametricEQ.cpp +++ b/src/audio_effects/ParametricEQ.cpp @@ -60,18 +60,24 @@ void ParametricEQ::init_effect_details() info.description = "Filter that allows you to adjust the volume level of a frequency in the audio track."; info.has_audio = true; info.has_video = false; + initialized = false; } // This method is required for all derived classes of EffectBase, and returns a // modified openshot::Frame object std::shared_ptr ParametricEQ::GetFrame(std::shared_ptr frame, int64_t frame_number) { - filters.clear(); + if (!initialized) + { + filters.clear(); - for (int i = 0; i < frame->audio->getNumChannels(); ++i) { - Filter* filter; - filters.add(filter = new Filter()); - } + for (int i = 0; i < frame->audio->getNumChannels(); ++i) { + Filter *filter; + filters.add(filter = new Filter()); + } + + initialized = true; + } const int num_input_channels = frame->audio->getNumChannels(); const int num_output_channels = frame->audio->getNumChannels(); @@ -93,6 +99,87 @@ std::shared_ptr ParametricEQ::GetFrame(std::shared_ptr 0); - jassert (q_factor > 0); - - double bandwidth = jmin (discrete_frequency / q_factor, M_PI * 0.99); - double two_cos_wc = -2.0 * cos (discrete_frequency); - double tan_half_bw = tan (bandwidth / 2.0); - double tan_half_wc = tan (discrete_frequency / 2.0); - double sqrt_gain = sqrt (gain); - - switch (filter_type) { - case 0 /* LOW_PASS */: { - coefficients = IIRCoefficients (/* b0 */ tan_half_wc, - /* b1 */ tan_half_wc, - /* b2 */ 0.0, - /* a0 */ tan_half_wc + 1.0, - /* a1 */ tan_half_wc - 1.0, - /* a2 */ 0.0); - break; - } - case 1 /* HIGH_PASS */: { - coefficients = IIRCoefficients (/* b0 */ 1.0, - /* b1 */ -1.0, - /* b2 */ 0.0, - /* a0 */ tan_half_wc + 1.0, - /* a1 */ tan_half_wc - 1.0, - /* a2 */ 0.0); - break; - } - case 2 /* LOW_SHELF */: { - coefficients = IIRCoefficients (/* b0 */ gain * tan_half_wc + sqrt_gain, - /* b1 */ gain * tan_half_wc - sqrt_gain, - /* b2 */ 0.0, - /* a0 */ tan_half_wc + sqrt_gain, - /* a1 */ tan_half_wc - sqrt_gain, - /* a2 */ 0.0); - break; - } - case 3 /* HIGH_SHELF */: { - coefficients = IIRCoefficients (/* b0 */ sqrt_gain * tan_half_wc + gain, - /* b1 */ sqrt_gain * tan_half_wc - gain, - /* b2 */ 0.0, - /* a0 */ sqrt_gain * tan_half_wc + 1.0, - /* a1 */ sqrt_gain * tan_half_wc - 1.0, - /* a2 */ 0.0); - break; - } - case 4 /* BAND_PASS */: { - coefficients = IIRCoefficients (/* b0 */ tan_half_bw, - /* b1 */ 0.0, - /* b2 */ -tan_half_bw, - /* a0 */ 1.0 + tan_half_bw, - /* a1 */ two_cos_wc, - /* a2 */ 1.0 - tan_half_bw); - break; - } - case 5 /* BAND_STOP */: { - coefficients = IIRCoefficients (/* b0 */ 1.0, - /* b1 */ two_cos_wc, - /* b2 */ 1.0, - /* a0 */ 1.0 + tan_half_bw, - /* a1 */ two_cos_wc, - /* a2 */ 1.0 - tan_half_bw); - break; - } - case 6 /* PEAKING_NOTCH */: { - coefficients = IIRCoefficients (/* b0 */ sqrt_gain + gain * tan_half_bw, - /* b1 */ sqrt_gain * two_cos_wc, - /* b2 */ sqrt_gain - gain * tan_half_bw, - /* a0 */ sqrt_gain + tan_half_bw, - /* a1 */ sqrt_gain * two_cos_wc, - /* a2 */ sqrt_gain - tan_half_bw); - break; - } - } - - setCoefficients (coefficients); - } + const int filter_type); }; juce::OwnedArray filters; From 7e419b9d6458d3e489104049f76dd7658bfe3391 Mon Sep 17 00:00:00 2001 From: Brenno Date: Tue, 10 Aug 2021 15:03:01 -0300 Subject: [PATCH 43/71] Fixed Draw Box property in ObjectDetection Effect --- src/effects/ObjectDetection.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/effects/ObjectDetection.cpp b/src/effects/ObjectDetection.cpp index 13093475..b279a6fc 100644 --- a/src/effects/ObjectDetection.cpp +++ b/src/effects/ObjectDetection.cpp @@ -146,13 +146,22 @@ std::shared_ptr ObjectDetection::GetFrame(std::shared_ptr frame, i // DrawRectangleRGBA(cv_image, box, bg_rgba, bg_alpha, 1, true); // DrawRectangleRGBA(cv_image, box, stroke_rgba, stroke_alpha, stroke_width, false); - + + cv::Rect2d box( (int)( (trackedBox.cx-trackedBox.width/2)*fw), (int)( (trackedBox.cy-trackedBox.height/2)*fh), (int)( trackedBox.width*fw), (int)( trackedBox.height*fh) ); + + // If the Draw Box property is off, then make the box invisible + if (trackedObject->draw_box.GetValue(frame_number) == 0) + { + bg_alpha = 1.0; + stroke_alpha = 1.0; + } + drawPred(detections.classIds.at(i), detections.confidences.at(i), box, cv_image, detections.objectIds.at(i), bg_rgba, bg_alpha, 1, true, draw_text); drawPred(detections.classIds.at(i), detections.confidences.at(i), @@ -166,6 +175,7 @@ std::shared_ptr ObjectDetection::GetFrame(std::shared_ptr frame, i if (parentTimeline){ // Get the Tracked Object's child clip Clip* childClip = parentTimeline->GetClip(trackedObject->ChildClipId()); + if (childClip){ std::shared_ptr f(new Frame(1, frame->GetWidth(), frame->GetHeight(), "#00000000")); // Get the image of the child clip for this frame From 369555b4b013f8871a9140470c9e6dfe2d9c6927 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Mon, 9 Aug 2021 09:54:59 -0400 Subject: [PATCH 44/71] Fix pedantic warnings (unnecessary ;s) --- src/Coordinate.cpp | 6 +++--- src/Coordinate.h | 2 +- src/FFmpegUtilities.h | 2 +- src/Fraction.cpp | 11 +++++------ src/Fraction.h | 2 +- src/Frame.cpp | 8 ++++---- src/Point.cpp | 12 ++++++------ src/Point.h | 2 +- 8 files changed, 22 insertions(+), 23 deletions(-) diff --git a/src/Coordinate.cpp b/src/Coordinate.cpp index 6a4abb21..d2a2ed3c 100644 --- a/src/Coordinate.cpp +++ b/src/Coordinate.cpp @@ -34,14 +34,14 @@ using namespace openshot; // Default constructor for a coordinate, delegating to the full signature -Coordinate::Coordinate() : Coordinate::Coordinate(0, 0) {}; +Coordinate::Coordinate() : Coordinate::Coordinate(0, 0) {} // Constructor which also allows the user to set the X and Y -Coordinate::Coordinate(double x, double y) : X(x), Y(y) {}; +Coordinate::Coordinate(double x, double y) : X(x), Y(y) {} // Constructor which accepts a std::pair for (X, Y) Coordinate::Coordinate(const std::pair& co) - : X(co.first), Y(co.second) {}; + : X(co.first), Y(co.second) {} // Generate JSON string of this object std::string Coordinate::Json() const { diff --git a/src/Coordinate.h b/src/Coordinate.h index 881b9807..d4576cd2 100644 --- a/src/Coordinate.h +++ b/src/Coordinate.h @@ -87,7 +87,7 @@ operator<<(std::basic_ostream& o, const openshot::Coordinate& co) s.precision(o.precision()); s << "(" << co.X << ", " << co.Y << ")"; return o << s.str(); -}; +} } diff --git a/src/FFmpegUtilities.h b/src/FFmpegUtilities.h index fe46a212..7579f11f 100644 --- a/src/FFmpegUtilities.h +++ b/src/FFmpegUtilities.h @@ -127,7 +127,7 @@ #endif // Does ffmpeg pixel format contain an alpha channel? - inline static const bool ffmpeg_has_alpha(PixelFormat pix_fmt) { + inline static bool ffmpeg_has_alpha(PixelFormat pix_fmt) { const AVPixFmtDescriptor *fmt_desc = av_pix_fmt_desc_get(pix_fmt); return bool(fmt_desc->flags & AV_PIX_FMT_FLAG_ALPHA); } diff --git a/src/Fraction.cpp b/src/Fraction.cpp index d4898326..cb13c393 100644 --- a/src/Fraction.cpp +++ b/src/Fraction.cpp @@ -34,21 +34,20 @@ using namespace openshot; // Delegating constructors -Fraction::Fraction() : Fraction::Fraction(1, 1) {}; +Fraction::Fraction() : Fraction::Fraction(1, 1) {} Fraction::Fraction(std::pair pair) - : Fraction::Fraction(pair.first, pair.second) {}; + : Fraction::Fraction(pair.first, pair.second) {} Fraction::Fraction(std::map mapping) - : Fraction::Fraction(mapping["num"], mapping["den"]) {}; + : Fraction::Fraction(mapping["num"], mapping["den"]) {} Fraction::Fraction(std::vector vector) - : Fraction::Fraction(vector[0], vector[1]) {}; + : Fraction::Fraction(vector[0], vector[1]) {} // Full constructor Fraction::Fraction(int num, int den) : - num(num), den(den) { -} + num(num), den(den) {} // Return this fraction as a float (i.e. 1/2 = 0.5) float Fraction::ToFloat() { diff --git a/src/Fraction.h b/src/Fraction.h index 32064969..3033cb92 100644 --- a/src/Fraction.h +++ b/src/Fraction.h @@ -94,7 +94,7 @@ operator<<(std::basic_ostream& o, const openshot::Fraction& frac) s.precision(o.precision()); s << "Fraction(" << frac.num << ", " << frac.den << ")"; return o << s.str(); -}; +} } // namespace openshot #endif diff --git a/src/Frame.cpp b/src/Frame.cpp index 244b065f..aed38777 100644 --- a/src/Frame.cpp +++ b/src/Frame.cpp @@ -66,15 +66,15 @@ Frame::Frame(int64_t number, int width, int height, std::string color, int sampl } // Delegating Constructor - blank frame -Frame::Frame() : Frame::Frame(1, 1, 1, "#000000", 0, 2) {}; +Frame::Frame() : Frame::Frame(1, 1, 1, "#000000", 0, 2) {} // Delegating Constructor - image only Frame::Frame(int64_t number, int width, int height, std::string color) - : Frame::Frame(number, width, height, color, 0, 2) {}; + : Frame::Frame(number, width, height, color, 0, 2) {} // Delegating Constructor - audio only Frame::Frame(int64_t number, int samples, int channels) - : Frame::Frame(number, 1, 1, "#000000", samples, channels) {}; + : Frame::Frame(number, 1, 1, "#000000", samples, channels) {} // Copy constructor @@ -918,7 +918,7 @@ cv::Mat Frame::Qimage2mat( std::shared_ptr& qimage) { cv::mixChannels( &mat, 1, &mat2, 1, from_to, 3 ); cv::cvtColor(mat2, mat2, cv::COLOR_RGB2BGR); return mat2; -}; +} // Get pointer to OpenCV image object cv::Mat Frame::GetImageCV() diff --git a/src/Point.cpp b/src/Point.cpp index 44b6883f..b11aa7ce 100644 --- a/src/Point.cpp +++ b/src/Point.cpp @@ -35,24 +35,24 @@ using namespace std; using namespace openshot; // Default constructor -Point::Point() : Point::Point(Coordinate(1, 0), BEZIER, AUTO) {}; +Point::Point() : Point::Point(Coordinate(1, 0), BEZIER, AUTO) {} // Constructor which creates a single coordinate at X=1 -Point::Point(float y) : Point::Point(Coordinate(1, y), CONSTANT, AUTO) {}; +Point::Point(float y) : Point::Point(Coordinate(1, y), CONSTANT, AUTO) {} // Constructor which creates a Bezier curve with point at (x, y) -Point::Point(float x, float y) : Point::Point(Coordinate(x, y), BEZIER, AUTO) {}; +Point::Point(float x, float y) : Point::Point(Coordinate(x, y), BEZIER, AUTO) {} // Constructor which also creates a Point, setting X,Y, and interpolation. Point::Point(float x, float y, InterpolationType interpolation) - : Point::Point(Coordinate(x, y), interpolation, AUTO) {}; + : Point::Point(Coordinate(x, y), interpolation, AUTO) {} // Direct Coordinate-accepting constructors -Point::Point(const Coordinate& co) : Point::Point(co, BEZIER, AUTO) {}; +Point::Point(const Coordinate& co) : Point::Point(co, BEZIER, AUTO) {} Point::Point(const Coordinate& co, InterpolationType interpolation) - : Point::Point(co, interpolation, AUTO) {}; + : Point::Point(co, interpolation, AUTO) {} Point::Point(const Coordinate& co, InterpolationType interpolation, HandleType handle_type) : co(co), interpolation(interpolation), handle_type(handle_type) { diff --git a/src/Point.h b/src/Point.h index 48ebea45..118e6d39 100644 --- a/src/Point.h +++ b/src/Point.h @@ -147,7 +147,7 @@ operator<<(std::basic_ostream& o, const openshot::Point& p) { break; } return o << s.str(); -}; +} } // namespace openshot From 29b6810c32b71d35ca256f67a563882fdc366ce2 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Mon, 9 Aug 2021 10:00:58 -0400 Subject: [PATCH 45/71] Init members in initializer lists --- src/AudioReaderSource.cpp | 22 +++++++++++++--------- src/ClipBase.h | 16 +++++++--------- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/src/AudioReaderSource.cpp b/src/AudioReaderSource.cpp index acd69deb..19fbb6ca 100644 --- a/src/AudioReaderSource.cpp +++ b/src/AudioReaderSource.cpp @@ -35,21 +35,25 @@ using namespace std; using namespace openshot; // Constructor that reads samples from a reader -AudioReaderSource::AudioReaderSource(ReaderBase *audio_reader, int64_t starting_frame_number, int buffer_size) - : reader(audio_reader), frame_number(starting_frame_number), - size(buffer_size), position(0), frame_position(0), estimated_frame(0), speed(1) { - - // Initialize an audio buffer (based on reader) - buffer = new juce::AudioSampleBuffer(reader->info.channels, size); - - // initialize the audio samples to zero (silence) +AudioReaderSource::AudioReaderSource( + ReaderBase *audio_reader, int64_t starting_frame_number, int buffer_size +) : + position(0), + size(buffer_size), + buffer(new juce::AudioSampleBuffer(audio_reader->info.channels, buffer_size)), + speed(1), + reader(audio_reader), + frame_number(starting_frame_number), + frame_position(0), + estimated_frame(0) +{ + // Zero the buffer contents buffer->clear(); } // Destructor AudioReaderSource::~AudioReaderSource() { - // Clear and delete the buffer delete buffer; buffer = NULL; } diff --git a/src/ClipBase.h b/src/ClipBase.h index c38b9790..2f138a5c 100644 --- a/src/ClipBase.h +++ b/src/ClipBase.h @@ -68,15 +68,13 @@ namespace openshot { CacheMemory cache; /// Constructor for the base clip - ClipBase() { - // Initialize values - position = 0.0; - layer = 0; - start = 0.0; - end = 0.0; - previous_properties = ""; - timeline = NULL; - }; + ClipBase() : + position(0.0), + layer(0), + start(0.0), + end(0.0), + previous_properties(""), + timeline(nullptr) {} // Compare a clip using the Position() property bool operator< ( ClipBase& a) { return (Position() < a.Position()); } From fbe02428376118336f8405e0ec2b45a0858097fc Mon Sep 17 00:00:00 2001 From: Frank Dana Date: Wed, 11 Aug 2021 03:58:45 -0400 Subject: [PATCH 46/71] FFmpeg: Create, use av_err2string() (#689) - Previously 'av_make_error_string' was defined in FFmpegUtilities.h for the sole purpose of redefining `av_err2str()` as a call to that function. `av_err2str()` was then used in our code, often in string contexts where its output was cast to `std::string`. - Since that was excessively circular, instead the function is named `av_err2string()`, and it's used directly in contexts where a std::string is expected. - `av_err2str()` is still #defined as `av_err2string(...).c_str()` --- src/FFmpegUtilities.h | 6 +++--- src/FFmpegWriter.cpp | 22 +++++++++++----------- src/FrameMapper.cpp | 4 +++- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/src/FFmpegUtilities.h b/src/FFmpegUtilities.h index f095e72a..42358eed 100644 --- a/src/FFmpegUtilities.h +++ b/src/FFmpegUtilities.h @@ -102,16 +102,16 @@ extern "C" { #endif // This wraps an unsafe C macro to be C++ compatible function -inline static const std::string av_make_error_string(int errnum) +inline static const std::string av_err2string(int errnum) { char errbuf[AV_ERROR_MAX_STRING_SIZE]; av_strerror(errnum, errbuf, AV_ERROR_MAX_STRING_SIZE); - return (std::string)errbuf; + return static_cast(errbuf); } // Redefine the C macro to use our new C++ function #undef av_err2str -#define av_err2str(errnum) av_make_error_string(errnum).c_str() +#define av_err2str(errnum) av_err2string(errnum).c_str() // Define this for compatibility #ifndef PixelFormat diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp index d1ad3e63..8707756c 100644 --- a/src/FFmpegWriter.cpp +++ b/src/FFmpegWriter.cpp @@ -67,7 +67,7 @@ static int set_hwframe_ctx(AVCodecContext *ctx, AVBufferRef *hw_device_ctx, int6 frames_ctx->initial_pool_size = 20; if ((err = av_hwframe_ctx_init(hw_frames_ref)) < 0) { std::clog << "Failed to initialize HW frame context. " << - "Error code: " << av_err2str(err) << "\n"; + "Error code: " << av_err2string(err) << "\n"; av_buffer_unref(&hw_frames_ref); return err; } @@ -882,7 +882,7 @@ void FFmpegWriter::flush_encoders() { #endif // IS_FFMPEG_3_2 if (error_code < 0) { - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code); + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + av_err2string(error_code) + "]", "error_code", error_code); } if (!got_packet) { break; @@ -895,7 +895,7 @@ void FFmpegWriter::flush_encoders() { // Write packet error_code = av_interleaved_write_frame(oc, &pkt); if (error_code < 0) { - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (std::string)av_err2str(error_code) + "]", "error_code", error_code); + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + av_err2string(error_code) + "]", "error_code", error_code); } } @@ -918,7 +918,7 @@ void FFmpegWriter::flush_encoders() { #endif if (error_code < 0) { ZmqLogger::Instance()->AppendDebugMethod( - "FFmpegWriter::flush_encoders ERROR [" + (std::string) av_err2str(error_code) + "]", + "FFmpegWriter::flush_encoders ERROR [" + av_err2string(error_code) + "]", "error_code", error_code); } if (!got_packet) { @@ -940,7 +940,7 @@ void FFmpegWriter::flush_encoders() { error_code = av_interleaved_write_frame(oc, &pkt); if (error_code < 0) { ZmqLogger::Instance()->AppendDebugMethod( - "FFmpegWriter::flush_encoders ERROR [" + (std::string) av_err2str(error_code) + "]", + "FFmpegWriter::flush_encoders ERROR [" + av_err2string(error_code) + "]", "error_code", error_code); } @@ -1492,7 +1492,7 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) { int err; if ((err = set_hwframe_ctx(video_codec_ctx, hw_device_ctx, info.width, info.height)) < 0) { ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_video (set_hwframe_ctx) ERROR faled to set hwframe context", - "width", info.width, "height", info.height, av_err2str(err), -1); + "width", info.width, "height", info.height, av_err2string(err), -1); } } #endif // USE_HW_ACCEL @@ -1598,7 +1598,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { // Fill input frame with sample data int error_code = avcodec_fill_audio_frame(audio_frame, channels_in_frame, AV_SAMPLE_FMT_S16, (uint8_t *) all_queued_samples, all_queued_samples_size, 0); if (error_code < 0) { - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code); + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + av_err2string(error_code) + "]", "error_code", error_code); } // Do not convert audio to planar format (yet). We need to keep everything interleaved at this point. @@ -1885,7 +1885,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { } if (error_code < 0) { - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code); + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + av_err2string(error_code) + "]", "error_code", error_code); } // Increment PTS (no pkt.duration, so calculate with maths) @@ -2035,7 +2035,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra /* write the compressed frame in the media file */ int error_code = av_interleaved_write_frame(oc, &pkt); if (error_code < 0) { - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code); + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + av_err2string(error_code) + "]", "error_code", error_code); return false; } @@ -2116,7 +2116,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra // Write video packet (older than FFmpeg 3.2) error_code = avcodec_encode_video2(video_codec_ctx, &pkt, frame_final, &got_packet_ptr); if (error_code != 0) { - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code); + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + av_err2string(error_code) + "]", "error_code", error_code); } if (got_packet_ptr == 0) { ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet (Frame gotpacket error)"); @@ -2132,7 +2132,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra /* write the compressed frame in the media file */ int result = av_interleaved_write_frame(oc, &pkt); if (result < 0) { - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + (std::string) av_err2str(result) + "]", "result", result); + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + av_err2string(result) + "]", "result", result); return false; } } diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp index 0e3b0272..9700b6ea 100644 --- a/src/FrameMapper.cpp +++ b/src/FrameMapper.cpp @@ -828,7 +828,9 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig if (error_code < 0) { - ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio ERROR [" + (std::string)av_err2str(error_code) + "]", "error_code", error_code); + ZmqLogger::Instance()->AppendDebugMethod( + "FrameMapper::ResampleMappedAudio ERROR [" + av_err2string(error_code) + "]", + "error_code", error_code); throw ErrorEncodingVideo("Error while resampling audio in frame mapper", frame->number); } From 1c2786de357f60ef1c22c282e7f9b3cd5f23c694 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Wed, 11 Aug 2021 12:10:46 -0500 Subject: [PATCH 47/71] SCALE_NONE was incorrectly sizing SVG, Image, and Video files. Since OpenShot changes the preview window size during editing, the SCALE_NONE clips should always be scaled to the ratio: preview / timeline... so they stay relative sized to the timeline size. For example, if an SVG is 500px wide, and the project is 1000px wide... the SVG in SCALE_NONE mode should always be 50% with width of the video, regardless of how small you make the preview window. --- src/Clip.cpp | 10 ++---- src/FFmpegReader.cpp | 16 ++++++++-- src/QtImageReader.cpp | 74 +++++++++++++++++++++++++++++++------------ src/QtImageReader.h | 2 +- 4 files changed, 71 insertions(+), 31 deletions(-) diff --git a/src/Clip.cpp b/src/Clip.cpp index 5b427035..35d0fba1 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -1380,13 +1380,9 @@ QTransform Clip::get_transform(std::shared_ptr frame, int width, int heig break; } case (SCALE_NONE): { - // Calculate ratio of source size to project size - // Even with no scaling, previews need to be adjusted correctly - // (otherwise NONE scaling draws the frame image outside of the preview) - float source_width_ratio = source_size.width() / float(width); - float source_height_ratio = source_size.height() / float(height); - source_size.scale(width * source_width_ratio, height * source_height_ratio, Qt::KeepAspectRatio); - + // Image is already the original size (i.e. no scaling mode) relative + // to the preview window size (i.e. timeline / preview ratio). No further + // scaling is needed here. // Debug output ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); break; diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp index 98d39ce1..c1eaa748 100644 --- a/src/FFmpegReader.cpp +++ b/src/FFmpegReader.cpp @@ -33,6 +33,7 @@ #include "FFmpegReader.h" #include "Exceptions.h" +#include "Timeline.h" #include // for std::this_thread::sleep_for #include // for std::chrono::milliseconds @@ -1285,9 +1286,18 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) { } } else { - // No scaling, use original image size (slower) - max_width = info.width; - max_height = info.height; + // Scale video to equivalent unscaled size + // Since the preview window can change sizes, we want to always + // scale against the ratio of original video size to timeline size + float preview_ratio = 1.0; + if (parent->ParentTimeline()) { + Timeline *t = (Timeline *) parent->ParentTimeline(); + preview_ratio = t->preview_width / float(t->info.width); + } + float max_scale_x = parent->scale_x.GetMaxPoint().co.Y; + float max_scale_y = parent->scale_y.GetMaxPoint().co.Y; + max_width = info.width * max_scale_x * preview_ratio; + max_height = info.height * max_scale_y * preview_ratio; } } diff --git a/src/QtImageReader.cpp b/src/QtImageReader.cpp index 29243ed4..1800a0f2 100644 --- a/src/QtImageReader.cpp +++ b/src/QtImageReader.cpp @@ -68,10 +68,14 @@ void QtImageReader::Open() if (!is_open) { bool loaded = false; + QSize default_svg_size; // Check for SVG files and rasterizing them to QImages if (path.toLower().endsWith(".svg") || path.toLower().endsWith(".svgz")) { - loaded = load_svg_path(path); + default_svg_size = load_svg_path(path); + if (!default_svg_size.isEmpty()) { + loaded = true; + } } if (!loaded) { @@ -100,8 +104,15 @@ void QtImageReader::Open() info.file_size = image->byteCount(); #endif info.vcodec = "QImage"; - info.width = image->width(); - info.height = image->height(); + if (!default_svg_size.isEmpty()) { + // Use default SVG size (if detected) + info.width = default_svg_size.width(); + info.height = default_svg_size.height(); + } else { + // Use Qt Image size as a fallback + info.width = image->width(); + info.height = image->height(); + } info.pixel_ratio.num = 1; info.pixel_ratio.den = 1; info.duration = 60 * 60 * 1; // 1 hour duration @@ -196,7 +207,7 @@ QSize QtImageReader::calculate_max_size() { int max_width = info.width; int max_height = info.height; if (max_width == 0 || max_height == 0) { - // If no size determined yet, default to 4K + // If no size determined yet max_width = 1920; max_height = 1080; } @@ -227,11 +238,23 @@ QSize QtImageReader::calculate_max_size() { if (width_size.width() >= max_width && width_size.height() >= max_height) { max_width = std::max(max_width, width_size.width()); max_height = std::max(max_height, width_size.height()); - } - else { + } else { max_width = std::max(max_width, height_size.width()); max_height = std::max(max_height, height_size.height()); } + } else if (parent->scale == SCALE_NONE) { + // Scale images to equivalent unscaled size + // Since the preview window can change sizes, we want to always + // scale against the ratio of original image size to timeline size + float preview_ratio = 1.0; + if (parent->ParentTimeline()) { + Timeline *t = (Timeline *) parent->ParentTimeline(); + preview_ratio = t->preview_width / float(t->info.width); + } + float max_scale_x = parent->scale_x.GetMaxPoint().co.Y; + float max_scale_y = parent->scale_y.GetMaxPoint().co.Y; + max_width = info.width * max_scale_x * preview_ratio; + max_height = info.height * max_scale_y * preview_ratio; } } @@ -240,8 +263,9 @@ QSize QtImageReader::calculate_max_size() { } // Load an SVG file with Resvg or fallback with Qt -bool QtImageReader::load_svg_path(QString) { +QSize QtImageReader::load_svg_path(QString) { bool loaded = false; + QSize default_size(0,0); // Calculate max image size QSize current_max_size = calculate_max_size(); @@ -250,8 +274,12 @@ bool QtImageReader::load_svg_path(QString) { // Use libresvg for parsing/rasterizing SVG ResvgRenderer renderer(path); if (renderer.isValid()) { + // Set default SVG size + default_size.setWidth(renderer.defaultSize().width()); + default_size.setHeight(renderer.defaultSize().height()); + // Scale SVG size to keep aspect ratio, and fill the max_size as best as possible - QSize svg_size(renderer.defaultSize().width(), renderer.defaultSize().height()); + QSize svg_size(default_size.width(), default_size.height()); svg_size.scale(current_max_size.width(), current_max_size.height(), Qt::KeepAspectRatio); // Load SVG at max size @@ -269,22 +297,28 @@ bool QtImageReader::load_svg_path(QString) { image = std::make_shared(); loaded = image->load(path); - if (loaded && (image->width() < current_max_size.width() || image->height() < current_max_size.height())) { - // Load SVG into larger/project size (so image is not blurry) - QSize svg_size = image->size().scaled(current_max_size.width(), current_max_size.height(), Qt::KeepAspectRatio); - if (QCoreApplication::instance()) { - // Requires QApplication to be running (for QPixmap support) - // Re-rasterize SVG image to max size - image = std::make_shared(QIcon(path).pixmap(svg_size).toImage()); - } else { - // Scale image without re-rasterizing it (due to lack of QApplication) - image = std::make_shared(image->scaled( - svg_size.width(), svg_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation)); + if (loaded) { + // Set default SVG size + default_size.setWidth(image->width()); + default_size.setHeight(image->height()); + + if (image->width() < current_max_size.width() || image->height() < current_max_size.height()) { + // Load SVG into larger/project size (so image is not blurry) + QSize svg_size = image->size().scaled(current_max_size.width(), current_max_size.height(), Qt::KeepAspectRatio); + if (QCoreApplication::instance()) { + // Requires QApplication to be running (for QPixmap support) + // Re-rasterize SVG image to max size + image = std::make_shared(QIcon(path).pixmap(svg_size).toImage()); + } else { + // Scale image without re-rasterizing it (due to lack of QApplication) + image = std::make_shared(image->scaled( + svg_size.width(), svg_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation)); + } } } } - return loaded; + return default_size; } // Generate JSON string of this object diff --git a/src/QtImageReader.h b/src/QtImageReader.h index 0dc359b1..687e85e1 100644 --- a/src/QtImageReader.h +++ b/src/QtImageReader.h @@ -77,7 +77,7 @@ namespace openshot /// /// @returns Success as a boolean /// @param path The file path of the SVG file - bool load_svg_path(QString path); + QSize load_svg_path(QString path); /// Calculate the max_size QSize, based on parent timeline and parent clip settings QSize calculate_max_size(); From 7af95c22a2c969fff806ec1be1597c74636388bc Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Wed, 11 Aug 2021 15:19:21 -0500 Subject: [PATCH 48/71] Fixing SVG unit tests to correctly check the default SVG size --- tests/QtImageReader.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/QtImageReader.cpp b/tests/QtImageReader.cpp index 6bfeebe4..ad19d17d 100644 --- a/tests/QtImageReader.cpp +++ b/tests/QtImageReader.cpp @@ -65,10 +65,10 @@ TEST_CASE( "Check_SVG_Loading", "[libopenshot][qtimagereader]" ) r.Open(); // Get frame, with no Timeline or Clip - // Default SVG scaling sizes things to 1920x1080 + // Size should be equal to default SVG size std::shared_ptr f = r.GetFrame(1); - CHECK(f->GetImage()->width() == 1080); - CHECK(f->GetImage()->height() == 1080); + CHECK(f->GetImage()->width() == 72); + CHECK(f->GetImage()->height() == 72); Fraction fps(30000,1000); Timeline t1(640, 480, fps, 44100, 2, LAYOUT_STEREO); From 4d5bf9ba8c99615e7b4a3af139263b1ed59138be Mon Sep 17 00:00:00 2001 From: Brenno Date: Fri, 13 Aug 2021 21:15:58 -0300 Subject: [PATCH 49/71] Bug fix. Prevent adding the same clip as child in the Tracker and Object Detection Effects --- src/TrackedObjectBBox.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/TrackedObjectBBox.cpp b/src/TrackedObjectBBox.cpp index 451cde04..e577e4d0 100644 --- a/src/TrackedObjectBBox.cpp +++ b/src/TrackedObjectBBox.cpp @@ -395,10 +395,13 @@ void TrackedObjectBBox::SetJsonValue(const Json::Value root) // Set the protobuf data path by the given JSON object if (!root["protobuf_data_path"].isNull()) protobufDataPath = root["protobuf_data_path"].asString(); + // Set the id of the child clip - if (!root["child_clip_id"].isNull() && root["child_clip_id"].asString() != ""){ + if (!root["child_clip_id"].isNull() && root["child_clip_id"].asString() != "" && root["child_clip_id"].asString() != Id()){ Clip* parentClip = (Clip *) ParentClip(); - ChildClipId(root["child_clip_id"].asString()); + + if (root["child_clip_id"].asString() != parentClip->Id()) + ChildClipId(root["child_clip_id"].asString()); } // Set the Keyframes by the given JSON object From d246964f0fe15a1e626289c07b515bce698b5873 Mon Sep 17 00:00:00 2001 From: Frank Dana Date: Mon, 16 Aug 2021 18:46:14 -0400 Subject: [PATCH 50/71] Link catch-main wtih Catch2::Catch2 (#714) Newer versions of Catch2 require C++14 to compile, a requirement that won't be propagated to the object library unless we link it to the IMPORTED Catch2::Catch2 target. --- tests/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 55504a49..822c4362 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -95,6 +95,7 @@ endif() # Create object library for test executable main(), # to avoid recompiling for every test add_library(catch-main OBJECT catch_main.cpp) +target_link_libraries(catch-main PUBLIC Catch2::Catch2) foreach(tname ${OPENSHOT_TESTS}) add_executable(openshot-${tname}-test ${tname}.cpp $) From 73b009d38f678ca77a1b1564e1f2492b8ff1fed7 Mon Sep 17 00:00:00 2001 From: Jackson Date: Thu, 19 Aug 2021 15:40:39 -0500 Subject: [PATCH 51/71] Update Version to 0.2.6 SO 20 --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7fd0cb15..a36eed24 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,8 +40,8 @@ For more information, please visit . set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/Modules") ################ PROJECT VERSION #################### -set(PROJECT_VERSION_FULL "0.2.5-dev3") -set(PROJECT_SO_VERSION 19) +set(PROJECT_VERSION_FULL "0.2.6") +set(PROJECT_SO_VERSION 20) # Remove the dash and anything following, to get the #.#.# version for project() STRING(REGEX REPLACE "\-.*$" "" VERSION_NUM "${PROJECT_VERSION_FULL}") From 928c37bc6e5285f8b2f55492d6ad0329faca5ddf Mon Sep 17 00:00:00 2001 From: Jackson Date: Mon, 23 Aug 2021 15:50:58 -0500 Subject: [PATCH 52/71] set default crop border to 0 --- src/effects/Crop.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/effects/Crop.cpp b/src/effects/Crop.cpp index bf89b257..8644c3d6 100644 --- a/src/effects/Crop.cpp +++ b/src/effects/Crop.cpp @@ -34,7 +34,7 @@ using namespace openshot; /// Blank constructor, useful when using Json to load the effect properties -Crop::Crop() : left(0.1), top(0.1), right(0.1), bottom(0.1) { +Crop::Crop() : left(0.0), top(0.0), right(0.0), bottom(0.0) { // Init effect properties init_effect_details(); } From 569b5918d0a29afd767ab30a7b9ccee01afb8811 Mon Sep 17 00:00:00 2001 From: Jackson Date: Wed, 25 Aug 2021 11:34:52 -0500 Subject: [PATCH 53/71] Merge master 0.2.6 into develop --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a36eed24..ebd98c7c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,7 +40,7 @@ For more information, please visit . set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/Modules") ################ PROJECT VERSION #################### -set(PROJECT_VERSION_FULL "0.2.6") +set(PROJECT_VERSION_FULL "0.2.6-dev") set(PROJECT_SO_VERSION 20) # Remove the dash and anything following, to get the #.#.# version for project() From f9e5db6c9bd6d418229684d9d3a4b2fa75fd6bfc Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Fri, 3 Sep 2021 15:44:21 -0500 Subject: [PATCH 54/71] Adding an X and Y offset to the current crop effect, to better support previous clip crop parameters (for migrating crop options from OpenShot 2.5.1 to OpenShot 2.6). Also refactoring the crop effect, to actually copy the pixels into a new image buffer. --- src/effects/Crop.cpp | 90 ++++++++++++++++++++++++++++++++------------ src/effects/Crop.h | 3 +- 2 files changed, 67 insertions(+), 26 deletions(-) diff --git a/src/effects/Crop.cpp b/src/effects/Crop.cpp index 8644c3d6..8ae943cf 100644 --- a/src/effects/Crop.cpp +++ b/src/effects/Crop.cpp @@ -1,6 +1,6 @@ /** * @file - * @brief Source file for Crop effect class + * @brief Source file for Crop effect class (cropping any side, with x/y offsets) * @author Jonathan Thomas * * @ref License @@ -34,14 +34,14 @@ using namespace openshot; /// Blank constructor, useful when using Json to load the effect properties -Crop::Crop() : left(0.0), top(0.0), right(0.0), bottom(0.0) { +Crop::Crop() : left(0.0), top(0.0), right(0.0), bottom(0.0), x(0.0), y(0.0) { // Init effect properties init_effect_details(); } // Default constructor Crop::Crop(Keyframe left, Keyframe top, Keyframe right, Keyframe bottom) : - left(left), top(top), right(right), bottom(bottom) + left(left), top(top), right(right), bottom(bottom), x(0.0), y(0.0) { // Init effect properties init_effect_details(); @@ -68,10 +68,10 @@ std::shared_ptr Crop::GetFrame(std::shared_ptr // Get the frame's image std::shared_ptr frame_image = frame->GetImage(); - // Get transparent color (and create small transparent image) - auto tempColor = std::make_shared( - frame_image->width(), 1, QImage::Format_RGBA8888_Premultiplied); - tempColor->fill(QColor(QString::fromStdString("transparent"))); + // Get transparent color target image (which will become the cropped image) + auto cropped_image = std::make_shared( + frame_image->width(), frame_image->height(), QImage::Format_RGBA8888_Premultiplied); + cropped_image->fill(QColor(QString::fromStdString("transparent"))); // Get current keyframe values double left_value = left.GetValue(frame_number); @@ -79,37 +79,69 @@ std::shared_ptr Crop::GetFrame(std::shared_ptr double right_value = right.GetValue(frame_number); double bottom_value = bottom.GetValue(frame_number); + // Get the current shift amount (if any... to slide the image around in the cropped area) + double x_shift = x.GetValue(frame_number); + double y_shift = y.GetValue(frame_number); + // Get pixel array pointers unsigned char *pixels = (unsigned char *) frame_image->bits(); - unsigned char *color_pixels = (unsigned char *) tempColor->bits(); + unsigned char *cropped_pixels = (unsigned char *) cropped_image->bits(); // Get pixels sizes of all crop sides int top_bar_height = top_value * frame_image->height(); int bottom_bar_height = bottom_value * frame_image->height(); int left_bar_width = left_value * frame_image->width(); int right_bar_width = right_value * frame_image->width(); + int column_offset = x_shift * frame_image->width(); + int row_offset = y_shift * frame_image->height(); - // Loop through rows + // Image copy variables + int image_width = frame_image->width(); + int src_start = left_bar_width; + int dst_start = left_bar_width; + int copy_length = frame_image->width() - right_bar_width - left_bar_width; + + // Adjust for x offset + int copy_offset = 0; + + if (column_offset < 0) { + // dest to the right + src_start += column_offset; + if (src_start < 0) { + int diff = 0 - src_start; // how far under 0 are we? + src_start = 0; + dst_start += diff; + copy_offset = -diff; + } else { + copy_offset = 0; + } + + } else { + // dest to the left + src_start += column_offset; + if (image_width - src_start >= copy_length) { + // We have plenty pixels, use original copy-length + copy_offset = 0; + } else { + // We don't have enough pixels, shorten copy-length + copy_offset = (image_width - src_start) - copy_length; + } + } + + // Loop through rows of pixels for (int row = 0; row < frame_image->height(); row++) { - - // Top & Bottom Crop - if ((top_bar_height > 0.0 && row <= top_bar_height) || (bottom_bar_height > 0.0 && row >= frame_image->height() - bottom_bar_height)) { - memcpy(&pixels[row * frame_image->width() * 4], color_pixels, sizeof(char) * frame_image->width() * 4); - } else { - // Left Crop - if (left_bar_width > 0.0) { - memcpy(&pixels[row * frame_image->width() * 4], color_pixels, sizeof(char) * left_bar_width * 4); - } - - // Right Crop - if (right_bar_width > 0.0) { - memcpy(&pixels[((row * frame_image->width()) + (frame_image->width() - right_bar_width)) * 4], color_pixels, sizeof(char) * right_bar_width * 4); - } + int adjusted_row = row + row_offset; + // Is this row visible? + if (adjusted_row >= top_bar_height && adjusted_row < (frame_image->height() - bottom_bar_height) && (copy_length + copy_offset > 0)) { + // Copy image (row by row, with offsets for x and y offset, and src/dst starting points for column filtering) + memcpy(&cropped_pixels[((adjusted_row * frame_image->width()) + dst_start) * 4], + &pixels[((row * frame_image->width()) + src_start) * 4], + sizeof(char) * (copy_length + copy_offset) * 4); } } - // Cleanup colors and arrays - tempColor.reset(); + // Set frame image + frame->AddImage(cropped_image); // return the modified frame return frame; @@ -132,6 +164,8 @@ Json::Value Crop::JsonValue() const { root["top"] = top.JsonValue(); root["right"] = right.JsonValue(); root["bottom"] = bottom.JsonValue(); + root["x"] = x.JsonValue(); + root["y"] = y.JsonValue(); // return JsonValue return root; @@ -169,6 +203,10 @@ void Crop::SetJsonValue(const Json::Value root) { right.SetJsonValue(root["right"]); if (!root["bottom"].isNull()) bottom.SetJsonValue(root["bottom"]); + if (!root["x"].isNull()) + x.SetJsonValue(root["x"]); + if (!root["y"].isNull()) + y.SetJsonValue(root["y"]); } // Get all properties for a specific frame @@ -188,6 +226,8 @@ std::string Crop::PropertiesJSON(int64_t requested_frame) const { root["top"] = add_property_json("Top Size", top.GetValue(requested_frame), "float", "", &top, 0.0, 1.0, false, requested_frame); root["right"] = add_property_json("Right Size", right.GetValue(requested_frame), "float", "", &right, 0.0, 1.0, false, requested_frame); root["bottom"] = add_property_json("Bottom Size", bottom.GetValue(requested_frame), "float", "", &bottom, 0.0, 1.0, false, requested_frame); + root["x"] = add_property_json("X Offset", x.GetValue(requested_frame), "float", "", &x, -1.0, 1.0, false, requested_frame); + root["y"] = add_property_json("Y Offset", y.GetValue(requested_frame), "float", "", &y, -1.0, 1.0, false, requested_frame); // Set the parent effect which properties this effect will inherit root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame); diff --git a/src/effects/Crop.h b/src/effects/Crop.h index f43f549c..a09c1257 100644 --- a/src/effects/Crop.h +++ b/src/effects/Crop.h @@ -60,11 +60,12 @@ namespace openshot public: - Color color; ///< Color of bars Keyframe left; ///< Size of left bar Keyframe top; ///< Size of top bar Keyframe right; ///< Size of right bar Keyframe bottom; ///< Size of bottom bar + Keyframe x; ///< X-offset + Keyframe y; ///< Y-offset /// Blank constructor, useful when using Json to load the effect properties Crop(); From 5f91ddc39b101aa876eb7c107f6c2bc96c768bec Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Fri, 3 Sep 2021 17:45:28 -0500 Subject: [PATCH 55/71] Reversing y_offset direction to match previous OpenShot crop behavior --- src/effects/Crop.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/effects/Crop.cpp b/src/effects/Crop.cpp index 8ae943cf..5ef78520 100644 --- a/src/effects/Crop.cpp +++ b/src/effects/Crop.cpp @@ -130,7 +130,7 @@ std::shared_ptr Crop::GetFrame(std::shared_ptr // Loop through rows of pixels for (int row = 0; row < frame_image->height(); row++) { - int adjusted_row = row + row_offset; + int adjusted_row = row - row_offset; // Is this row visible? if (adjusted_row >= top_bar_height && adjusted_row < (frame_image->height() - bottom_bar_height) && (copy_length + copy_offset > 0)) { // Copy image (row by row, with offsets for x and y offset, and src/dst starting points for column filtering) From 82498b267d2e2e4f1aab15ebb50c107d19a8096b Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Sat, 4 Sep 2021 17:45:40 -0500 Subject: [PATCH 56/71] Bumping version to 0.2.7, SO 20 --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ebd98c7c..3eb27179 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,8 +40,8 @@ For more information, please visit . set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/Modules") ################ PROJECT VERSION #################### -set(PROJECT_VERSION_FULL "0.2.6-dev") -set(PROJECT_SO_VERSION 20) +set(PROJECT_VERSION_FULL "0.2.7") +set(PROJECT_SO_VERSION 21) # Remove the dash and anything following, to get the #.#.# version for project() STRING(REGEX REPLACE "\-.*$" "" VERSION_NUM "${PROJECT_VERSION_FULL}") From 4106f1f0a85be3d284ab80ed2d457c6f6b319f7a Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Sat, 4 Sep 2021 17:45:40 -0500 Subject: [PATCH 57/71] Bumping version to 0.2.7, SO 21 --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ebd98c7c..3eb27179 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,8 +40,8 @@ For more information, please visit . set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/Modules") ################ PROJECT VERSION #################### -set(PROJECT_VERSION_FULL "0.2.6-dev") -set(PROJECT_SO_VERSION 20) +set(PROJECT_VERSION_FULL "0.2.7") +set(PROJECT_SO_VERSION 21) # Remove the dash and anything following, to get the #.#.# version for project() STRING(REGEX REPLACE "\-.*$" "" VERSION_NUM "${PROJECT_VERSION_FULL}") From 003d44d7d769d0a30dd617b1c036f220349a9595 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Mon, 6 Sep 2021 17:59:46 -0400 Subject: [PATCH 58/71] CVTracker: Clean up spacing --- src/CVTracker.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/CVTracker.cpp b/src/CVTracker.cpp index b4fb85ea..3d6b87c3 100644 --- a/src/CVTracker.cpp +++ b/src/CVTracker.cpp @@ -207,7 +207,7 @@ cv::Rect2d CVTracker::filter_box_jitter(size_t frameId){ float curr_box_height = bbox.height; // keep the last width and height if the difference is less than 1% float threshold = 0.01; - + cv::Rect2d filtered_box = bbox; if(std::abs(1-(curr_box_width/last_box_width)) <= threshold){ filtered_box.width = last_box_width; @@ -299,13 +299,13 @@ void CVTracker::SetJson(const std::string value) { // Load Json::Value into this object void CVTracker::SetJsonValue(const Json::Value root) { - // Set data from Json (if key is found) - if (!root["protobuf_data_path"].isNull()){ - protobuf_data_path = (root["protobuf_data_path"].asString()); - } + // Set data from Json (if key is found) + if (!root["protobuf_data_path"].isNull()){ + protobuf_data_path = (root["protobuf_data_path"].asString()); + } if (!root["tracker-type"].isNull()){ - trackerType = (root["tracker-type"].asString()); - } + trackerType = (root["tracker-type"].asString()); + } if (!root["region"].isNull()){ double x = root["region"]["normalized_x"].asDouble(); From 31bfdf0e7c8c637d7ef17873c778b14bedc0cd49 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Mon, 6 Sep 2021 18:00:13 -0400 Subject: [PATCH 59/71] Fix CVTracker test for OpenCV 4.5.2+ --- tests/CVTracker.cpp | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/tests/CVTracker.cpp b/tests/CVTracker.cpp index bb447065..62a82a04 100644 --- a/tests/CVTracker.cpp +++ b/tests/CVTracker.cpp @@ -73,10 +73,10 @@ TEST_CASE( "Track_Video", "[libopenshot][opencv][tracker]" ) int height = ((float)fd.y2*360) - y; // Compare if tracked data is equal to pre-tested ones - CHECK(x >= 255); CHECK(x <= 257); - CHECK(y >= 133); CHECK(y <= 135); - CHECK(width >= 179); CHECK(width <= 181); - CHECK(height >= 165); CHECK(height <= 168); + CHECK(x == Approx(256).margin(1)); + CHECK(y == Approx(134).margin(1)); + CHECK(width == Approx(180).margin(1)); + CHECK(height == Approx(166).margin(2)); } @@ -95,10 +95,15 @@ TEST_CASE( "SaveLoad_Protobuf", "[libopenshot][opencv][tracker]" ) { "protobuf_data_path": "kcf_tracker.data", "tracker-type": "KCF", - "region": {"x": 294, "y": 102, "width": 180, "height": 166, "first-frame": 1} + "region": { + "normalized_x": 0.46, + "normalized_y": 0.28, + "normalized_width": 0.28, + "normalized_height": 0.46, + "first-frame": 1 + } } )proto"; - // Create first tracker CVTracker kcfTracker_1(json_data, tracker_pc); @@ -120,7 +125,13 @@ TEST_CASE( "SaveLoad_Protobuf", "[libopenshot][opencv][tracker]" ) { "protobuf_data_path": "kcf_tracker.data", "tracker_type": "", - "region": {"x": -1, "y": -1, "width": -1, "height": -1, "first-frame": 1} + "region": { + "normalized_x": 0.1, + "normalized_y": 0.1, + "normalized_width": 0.5, + "normalized_height": 0.5, + "first-frame": 1 + } } )proto"; // Create second tracker @@ -138,8 +149,9 @@ TEST_CASE( "SaveLoad_Protobuf", "[libopenshot][opencv][tracker]" ) float height_2 = fd_2.y2 - y_2; // Compare first tracker data with second tracker data - CHECK((int)(x_1 * 640) == (int)(x_2 * 640)); - CHECK((int)(y_1 * 360) == (int)(y_2 * 360)); - CHECK((int)(width_1 * 640) == (int)(width_2 * 640)); - CHECK((int)(height_1 * 360) == (int)(height_2 * 360)); + CHECK(x_1 == Approx(x_2).margin(0.01)); + CHECK(y_1 == Approx(y_2).margin(0.01)); + CHECK(width_1 == Approx(width_2).margin(0.01)); + CHECK(height_1 == Approx(height_2).margin(0.01)); + } From 01d40b22fb10d87adb3e7c1e972fa1f845c62a7a Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Wed, 8 Sep 2021 12:35:32 -0400 Subject: [PATCH 60/71] CVTracker: Add some error-case unit tests --- tests/CVTracker.cpp | 56 +++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 5 deletions(-) diff --git a/tests/CVTracker.cpp b/tests/CVTracker.cpp index 62a82a04..8415ecef 100644 --- a/tests/CVTracker.cpp +++ b/tests/CVTracker.cpp @@ -40,8 +40,46 @@ using namespace openshot; -// Just for the tracker constructor, it won't be used -ProcessingController tracker_pc; +TEST_CASE( "initialization", "[libopenshot][opencv][tracker]" ) +{ + std::string bad_json = R"proto( + } + [1, 2, 3, "a"] + } )proto"; + ProcessingController badPC; + CVTracker* badTracker; + CHECK_THROWS_AS( + badTracker = new CVTracker(bad_json, badPC), + openshot::InvalidJSON + ); + + std::string json1 = R"proto( + { + "tracker-type": "KCF" + } )proto"; + + ProcessingController pc1; + CVTracker tracker1(json1, pc1); + CHECK(pc1.GetError() == true); + CHECK(pc1.GetErrorMessage() == "No initial bounding box selected"); + + std::string json2 = R"proto( + { + "tracker-type": "KCF", + "region": { + "normalized_x": 0.459375, + "normalized_y": 0.28333, + "normalized_width": -0.28125, + "normalized_height": -0.461111 + } + } )proto"; + + // Create tracker + ProcessingController pc2; + CVTracker tracker2(json2, pc2); + CHECK(pc2.GetError() == true); + CHECK(pc2.GetErrorMessage() == "No first-frame"); +} TEST_CASE( "Track_Video", "[libopenshot][opencv][tracker]" ) { @@ -57,10 +95,17 @@ TEST_CASE( "Track_Video", "[libopenshot][opencv][tracker]" ) { "protobuf_data_path": "kcf_tracker.data", "tracker-type": "KCF", - "region": {"normalized_x": 0.459375, "normalized_y": 0.28333, "normalized_width": 0.28125, "normalized_height": 0.461111, "first-frame": 1} + "region": { + "normalized_x": 0.459375, + "normalized_y": 0.28333, + "normalized_width": 0.28125, + "normalized_height": 0.461111, + "first-frame": 1 + } } )proto"; // Create tracker + ProcessingController tracker_pc; CVTracker kcfTracker(json_data, tracker_pc); // Track clip for frames 0-20 @@ -105,6 +150,7 @@ TEST_CASE( "SaveLoad_Protobuf", "[libopenshot][opencv][tracker]" ) } )proto"; // Create first tracker + ProcessingController tracker_pc; CVTracker kcfTracker_1(json_data, tracker_pc); // Track clip for frames 0-20 @@ -128,8 +174,8 @@ TEST_CASE( "SaveLoad_Protobuf", "[libopenshot][opencv][tracker]" ) "region": { "normalized_x": 0.1, "normalized_y": 0.1, - "normalized_width": 0.5, - "normalized_height": 0.5, + "normalized_width": -0.5, + "normalized_height": -0.5, "first-frame": 1 } } )proto"; From aee95d3bacbfc458337df1d88665e3e3279426cd Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Wed, 8 Sep 2021 12:35:57 -0400 Subject: [PATCH 61/71] CVTracker: Fix bug in JSON error handling --- src/CVTracker.cpp | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/CVTracker.cpp b/src/CVTracker.cpp index 3d6b87c3..b3891674 100644 --- a/src/CVTracker.cpp +++ b/src/CVTracker.cpp @@ -314,20 +314,22 @@ void CVTracker::SetJsonValue(const Json::Value root) { double h = root["region"]["normalized_height"].asDouble(); cv::Rect2d prev_bbox(x,y,w,h); bbox = prev_bbox; + + if (!root["region"]["first-frame"].isNull()){ + start = root["region"]["first-frame"].asInt64(); + json_interval = true; + } + else{ + processingController->SetError(true, "No first-frame"); + error = true; + } + } else{ processingController->SetError(true, "No initial bounding box selected"); error = true; } - if (!root["region"]["first-frame"].isNull()){ - start = root["region"]["first-frame"].asInt64(); - json_interval = true; - } - else{ - processingController->SetError(true, "No first-frame"); - error = true; - } } /* From fea43bdd8ad8bf2f02550f3f898a235b26ad67f3 Mon Sep 17 00:00:00 2001 From: Frank Dana Date: Wed, 8 Sep 2021 17:47:10 -0400 Subject: [PATCH 62/71] CI: Build audio lib & cache (#727) * CI: Build audio lib & cache * Also key audio cache on CMakeLists.txt hash * Exclude audio sources from coverage --- .github/workflows/ci.yml | 32 +++++++++++++++++++++++++++----- CMakeLists.txt | 1 + 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 94db6047..50cd44c9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,11 +13,17 @@ jobs: steps: - uses: actions/checkout@v2 - # Work around a codecov issue detecting commit SHAs - # see: https://community.codecov.io/t/issue-detecting-commit-sha-please-run-actions-checkout-with-fetch-depth-1-or-set-to-0/2571 with: + # Work around a codecov issue detecting commit SHAs + # see: https://community.codecov.io/t/issue-detecting-commit-sha-please-run-actions-checkout-with-fetch-depth-1-or-set-to-0/2571 fetch-depth: 0 + - name: Checkout OpenShotAudio + uses: actions/checkout@v2 + with: + repository: OpenShot/libopenshot-audio + path: audio + - uses: haya14busa/action-cond@v1 id: coverage with: @@ -28,12 +34,11 @@ jobs: - name: Install dependencies shell: bash run: | - sudo add-apt-repository ppa:openshot.developers/libopenshot-daily sudo apt update sudo apt remove libzmq5 # See actions/virtual-environments#3317 sudo apt install \ cmake swig doxygen graphviz curl lcov \ - libopenshot-audio-dev libasound2-dev \ + libasound2-dev \ qtbase5-dev qtbase5-dev-tools \ libfdk-aac-dev libavcodec-dev libavformat-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libpostproc-dev libswresample-dev \ libzmq3-dev libmagick++-dev \ @@ -43,13 +48,30 @@ jobs: wget https://launchpad.net/ubuntu/+archive/primary/+files/catch2_2.13.0-1_all.deb sudo dpkg -i catch2_2.13.0-1_all.deb + - uses: actions/cache@v2 + id: cache + with: + path: audio/build + key: audio-${{ matrix.os }}-${{ matrix.compiler }}-${{ hashFiles('audio/CMakeLists.txt') }} + + - name: Build OpenShotAudio (if not cached) + if: steps.cache.outputs.cache-hit != 'true' + shell: bash + run: | + pushd audio + if [ ! -d build ]; then + mkdir build + cmake -B build -S . + fi + cmake --build build + popd - name: Build libopenshot shell: bash run: | mkdir build pushd build - cmake -B . -S .. -DCMAKE_INSTALL_PREFIX:PATH="dist" -DCMAKE_BUILD_TYPE="Debug" "${{ steps.coverage.outputs.value }}" + cmake -B . -S .. -DCMAKE_INSTALL_PREFIX:PATH="dist" -DCMAKE_BUILD_TYPE="Debug" -DOpenShotAudio_ROOT="../audio/build" "${{ steps.coverage.outputs.value }}" cmake --build . -- VERBOSE=1 popd diff --git a/CMakeLists.txt b/CMakeLists.txt index ebd98c7c..6db2c2be 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -212,6 +212,7 @@ if (ENABLE_COVERAGE AND DEFINED UNIT_TEST_TARGETS) "examples/*" "${CMAKE_CURRENT_BINARY_DIR}/bindings/*" "${CMAKE_CURRENT_BINARY_DIR}/src/*_autogen/*" + "audio/*" ) setup_target_for_coverage_lcov( NAME coverage From 2dbb8d53133ec474a0b564046e744fe5843daf18 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Sep 2021 17:47:51 -0400 Subject: [PATCH 63/71] Bump codecov/codecov-action from 2.0.2 to 2.0.3 (#722) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 2.0.2 to 2.0.3. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v2.0.2...v2.0.3) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 50cd44c9..1ca60297 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -89,7 +89,7 @@ jobs: cmake --build . --target install -- VERBOSE=1 popd - - uses: codecov/codecov-action@v2.0.2 + - uses: codecov/codecov-action@v2.0.3 if: ${{ matrix.compiler == 'clang' }} with: file: build/coverage.info From cc41c5514cbeafd5f2af01519b0844d2403fd9f0 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Fri, 10 Sep 2021 18:32:22 -0500 Subject: [PATCH 64/71] Bump version for dev --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b948de0d..721ac085 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,7 +40,7 @@ For more information, please visit . set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/Modules") ################ PROJECT VERSION #################### -set(PROJECT_VERSION_FULL "0.2.7") +set(PROJECT_VERSION_FULL "0.2.7-dev") set(PROJECT_SO_VERSION 21) # Remove the dash and anything following, to get the #.#.# version for project() From 62b7370b0239d4359d9efec8b7733474bb37423d Mon Sep 17 00:00:00 2001 From: Frank Dana Date: Thu, 16 Sep 2021 12:34:05 -0400 Subject: [PATCH 65/71] Add explicit QtSvg dependency (#731) * Add explicit QtSvg dependency * Add QtSvg to CI dependencies --- .github/workflows/ci.yml | 2 +- src/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1ca60297..367fc48c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -39,7 +39,7 @@ jobs: sudo apt install \ cmake swig doxygen graphviz curl lcov \ libasound2-dev \ - qtbase5-dev qtbase5-dev-tools \ + qtbase5-dev qtbase5-dev-tools libqt5svg5-dev \ libfdk-aac-dev libavcodec-dev libavformat-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libpostproc-dev libswresample-dev \ libzmq3-dev libmagick++-dev \ libopencv-dev libprotobuf-dev protobuf-compiler diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 15a7bcc8..e43c1c3a 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -284,7 +284,7 @@ endif () ################# QT5 ################### # Find QT5 libraries -set(_qt_components Core Gui Widgets) +set(_qt_components Core Gui Widgets Svg) find_package(Qt5 COMPONENTS ${_qt_components} REQUIRED) foreach(_qt_comp IN LISTS _qt_components) From e45d798c903e690037a1897cf7262630a571a098 Mon Sep 17 00:00:00 2001 From: "FeRD (Frank Dana)" Date: Thu, 10 Jun 2021 16:32:53 -0400 Subject: [PATCH 66/71] Revert final_cache size breakage Reverting the "Adjusting Timeline final_cache to match the video caching thread max_frames, so one doesn't clobber the other." part of a previous commit. Setting the cache size in SetMaxSize() ignores OpenShot's cache preferences. It prevents using a cache larger than the preview buffer, which is often a desirable thing. This partially reverts commit 0c4e1bcce4d8cbdc1edc752fe3339acf94ac8eb1. --- src/Timeline.cpp | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/src/Timeline.cpp b/src/Timeline.cpp index c2aea38d..8bed50cf 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -80,11 +80,12 @@ Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int cha info.acodec = "openshot::timeline"; info.vcodec = "openshot::timeline"; - // Init cache - final_cache = new CacheMemory(); - // Init max image size SetMaxSize(info.width, info.height); + + // Init cache + final_cache = new CacheMemory(); + final_cache->SetMaxBytesFromInfo(max_concurrent_frames * 4, info.width, info.height, info.sample_rate, info.channels); } // Delegating constructor that copies parameters from a provided ReaderInfo @@ -95,7 +96,7 @@ Timeline::Timeline(const ReaderInfo info) : // Constructor for the timeline (which loads a JSON structure from a file path, and initializes a timeline) Timeline::Timeline(const std::string& projectPath, bool convert_absolute_paths) : is_open(false), auto_map_clips(true), managed_cache(true), path(projectPath), - max_concurrent_frames(OPEN_MP_NUM_PROCESSORS) { + max_concurrent_frames(OPEN_MP_NUM_PROCESSORS) { // Create CrashHandler and Attach (incase of errors) CrashHandler::Instance(); @@ -212,11 +213,12 @@ Timeline::Timeline(const std::string& projectPath, bool convert_absolute_paths) info.has_video = true; info.has_audio = true; - // Init cache - final_cache = new CacheMemory(); - // Init max image size SetMaxSize(info.width, info.height); + + // Init cache + final_cache = new CacheMemory(); + final_cache->SetMaxBytesFromInfo(max_concurrent_frames * 4, info.width, info.height, info.sample_rate, info.channels); } Timeline::~Timeline() { @@ -1064,7 +1066,7 @@ void Timeline::SetJsonValue(const Json::Value root) { // on it's parent timeline. Setting the parent timeline of the clip here // allows attaching it to an object when exporting the project (because) // the exporter script initializes the clip and it's effects - // before setting it's parent timeline. + // before setting its parent timeline. c->ParentTimeline(this); // Load Json into Clip @@ -1530,7 +1532,4 @@ void Timeline::SetMaxSize(int width, int height) { // Update preview settings preview_width = display_ratio_size.width(); preview_height = display_ratio_size.height(); - - // Update timeline cache size - final_cache->SetMaxBytesFromInfo(max_concurrent_frames * 4, preview_width, preview_height, info.sample_rate, info.channels); -} \ No newline at end of file +} From 3df837944b18eb001ba015fe666b7f80e5f4161a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Sep 2021 21:11:48 -0400 Subject: [PATCH 67/71] Bump codecov/codecov-action from 2.0.3 to 2.1.0 (#733) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 2.0.3 to 2.1.0. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v2.0.3...v2.1.0) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 367fc48c..91bf3f2b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -89,7 +89,7 @@ jobs: cmake --build . --target install -- VERBOSE=1 popd - - uses: codecov/codecov-action@v2.0.3 + - uses: codecov/codecov-action@v2.1.0 if: ${{ matrix.compiler == 'clang' }} with: file: build/coverage.info From d41c482cb21fe22cb310622441d6304eb87b1a1f Mon Sep 17 00:00:00 2001 From: Frank Dana Date: Wed, 22 Sep 2021 01:37:58 -0400 Subject: [PATCH 68/71] CI: Fix Clang builds, coverage (#736) --- .github/workflows/ci.yml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 91bf3f2b..ea7c3ff1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,9 +6,12 @@ jobs: strategy: matrix: os: [ubuntu-18.04, ubuntu-20.04] - compiler: [gcc, clang] + compiler: + - { cc: gcc, cpp: g++ } + - { cc: clang, cpp: clang++ } env: - CC: ${{ matrix.compiler }} + CC: ${{ matrix.compiler.cc }} + CXX: ${{ matrix.compiler.cpp }} CODECOV_TOKEN: 'dc94d508-39d3-4369-b1c6-321749f96f7c' steps: @@ -27,7 +30,7 @@ jobs: - uses: haya14busa/action-cond@v1 id: coverage with: - cond: ${{ matrix.compiler == 'clang' }} + cond: ${{ matrix.compiler.cc == 'gcc' }} if_true: "-DENABLE_COVERAGE:BOOL=1" if_false: "-DENABLE_COVERAGE:BOOL=0" @@ -52,7 +55,7 @@ jobs: id: cache with: path: audio/build - key: audio-${{ matrix.os }}-${{ matrix.compiler }}-${{ hashFiles('audio/CMakeLists.txt') }} + key: audio-${{ matrix.os }}-${{ matrix.compiler.cpp }}-${{ hashFiles('audio/CMakeLists.txt') }} - name: Build OpenShotAudio (if not cached) if: steps.cache.outputs.cache-hit != 'true' @@ -90,6 +93,6 @@ jobs: popd - uses: codecov/codecov-action@v2.1.0 - if: ${{ matrix.compiler == 'clang' }} + if: ${{ matrix.compiler.cc == 'gcc' }} with: file: build/coverage.info From c128b6caf164f514c4a651811f6d0ab7f33bb72c Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Wed, 22 Sep 2021 16:09:20 -0500 Subject: [PATCH 69/71] Protecting a caption effect with no Clip (i.e. effect added directly to timeline) --- src/effects/Caption.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/effects/Caption.cpp b/src/effects/Caption.cpp index 8d9bec92..a19b8e2d 100644 --- a/src/effects/Caption.cpp +++ b/src/effects/Caption.cpp @@ -129,7 +129,7 @@ std::shared_ptr Caption::GetFrame(std::shared_ptrParentTimeline() != NULL) { + if (clip && clip->ParentTimeline() != NULL) { timeline = (Timeline*) clip->ParentTimeline(); } else if (this->ParentTimeline() != NULL) { timeline = (Timeline*) this->ParentTimeline(); From d4d200f14bf789e6a2a9b8fb0ba59b7f015b3b76 Mon Sep 17 00:00:00 2001 From: Frank Dana Date: Thu, 23 Sep 2021 03:03:08 -0400 Subject: [PATCH 70/71] CMake: Set -DDEBUG for debug builds (#739) --- CMakeLists.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 721ac085..9ed9de68 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -99,6 +99,12 @@ if(ENABLE_TESTS) set(BUILD_TESTING ${ENABLE_TESTS}) endif() +### JUCE requires one of -DDEBUG or -DNDEBUG set on the +### compile command line. CMake automatically sets -DNDEBUG +### on all non-debug configs, so we'll just add -DDEBUG to +### the debug build flags +set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DDEBUG") + #### Work around a GCC < 9 bug with handling of _Pragma() in macros #### See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578 if ((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") AND From 88d3011455ae0779f39ab676223dc85bf2b5e3a5 Mon Sep 17 00:00:00 2001 From: Frank Dana Date: Mon, 27 Sep 2021 07:14:48 -0400 Subject: [PATCH 71/71] Unit tests: Use == to compare strings (#741) When there's a mismatch, Catch2 will output the contents of both strings rather than a meaningless .compare() numeric value. --- tests/FFmpegReader.cpp | 3 +-- tests/FFmpegWriter.cpp | 3 +-- tests/FrameMapper.cpp | 3 +-- tests/KeyFrame.cpp | 4 ++-- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/tests/FFmpegReader.cpp b/tests/FFmpegReader.cpp index 747ae0e2..c5696b76 100644 --- a/tests/FFmpegReader.cpp +++ b/tests/FFmpegReader.cpp @@ -300,6 +300,5 @@ TEST_CASE( "DisplayInfo", "[libopenshot][ffmpegreader]" ) r.DisplayInfo(&output); // Compare a [0, expected.size()) substring of output to expected - auto compare_value = output.str().compare(0, expected.size(), expected); - CHECK(compare_value == 0); + CHECK(output.str().substr(0, expected.size()) == expected); } diff --git a/tests/FFmpegWriter.cpp b/tests/FFmpegWriter.cpp index adb555b2..6a9a0211 100644 --- a/tests/FFmpegWriter.cpp +++ b/tests/FFmpegWriter.cpp @@ -199,6 +199,5 @@ TEST_CASE( "DisplayInfo", "[libopenshot][ffmpegwriter]" ) w.Close(); // Compare a [0, expected.size()) substring of output to expected - auto compare_value = output.str().compare(0, expected.size(), expected); - CHECK(compare_value == 0); + CHECK(output.str().substr(0, expected.size()) == expected); } diff --git a/tests/FrameMapper.cpp b/tests/FrameMapper.cpp index fc1f70d4..7553345b 100644 --- a/tests/FrameMapper.cpp +++ b/tests/FrameMapper.cpp @@ -651,8 +651,7 @@ Target frame #: 10 mapped to original frame #: (8 odd, 8 even) mapping.PrintMapping(&mapping_out); // Compare a [0, expected.size()) substring of output to expected - auto compare_value = mapping_out.str().compare(0, expected.size(), expected); - CHECK(compare_value == 0); + CHECK(mapping_out.str().substr(0, expected.size()) == expected); } TEST_CASE( "Json", "[libopenshot][framemapper]" ) diff --git a/tests/KeyFrame.cpp b/tests/KeyFrame.cpp index 3f186729..7b5ec149 100644 --- a/tests/KeyFrame.cpp +++ b/tests/KeyFrame.cpp @@ -536,7 +536,7 @@ R"( 1 10.0000 999 12345.6777)"; // Ensure the two strings are equal up to the limits of 'expected' - CHECK(output.str().compare(0, expected.size(), expected) == 0); + CHECK(output.str().substr(0, expected.size()) == expected); } TEST_CASE( "PrintValues", "[libopenshot][keyframe]" ) @@ -582,7 +582,7 @@ R"(│Frame# (X) │ Y Value │ Delta Y │ Increasing? │ Repeat Fraction │ 25 │ 16.5446 │ +1 │ true │ Fraction(1, 2) │)"; // Ensure the two strings are equal up to the limits of 'expected' - CHECK(output.str().compare(0, expected.size(), expected) == 0); + CHECK(output.str().substr(0, expected.size()) == expected); } #ifdef USE_OPENCV