From b612f3339de60f0ae2faf4164021498ea2030b4b Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Mon, 1 Jun 2015 00:20:14 -0700 Subject: [PATCH] Replaced ImagMagick with QImage on almost all key methods and classes. Reprogrammed all effects and the entire rendering pipeline to use QImage and QTransforms, primarily for increases in speed and stability. libopenshot is more than 10X faster on many of the most CPU heavy tasks. This was a huge change, and still has a few minor issues relating to BlackMagick Decklink and Text rendering.... which should be resolved very soon. Also, much work has been done on memory management / leak detection, and optimizations with multi-threading... including a new thread cacher class used by the video playback (which is smoother than ever). --- include/Cache.h | 25 +- include/ChunkReader.h | 3 + include/Clip.h | 12 +- include/Color.h | 30 +- include/DecklinkReader.h | 3 + include/DummyReader.h | 3 + include/FFmpegReader.h | 3 + include/FFmpegWriter.h | 2 +- include/Frame.h | 69 ++--- include/FrameMapper.h | 3 + include/ImageReader.h | 3 + include/ImageWriter.h | 7 - include/KeyFrame.h | 1 + include/OpenMPUtilities.h | 6 +- include/OpenShot.h | 1 + include/Qt/PlayerDemo.h | 1 + include/Qt/PlayerPrivate.h | 2 + include/Qt/VideoCacheThread.h | 92 ++++++ include/QtImageReader.h | 110 +++++++ include/ReaderBase.h | 7 +- include/TextReader.h | 3 + include/Timeline.h | 21 +- include/effects/Mask.h | 21 +- src/AudioReaderSource.cpp | 6 +- src/AudioResampler.cpp | 2 - src/CMakeLists.txt | 5 +- src/Cache.cpp | 98 ++++-- src/Clip.cpp | 118 +++---- src/Color.cpp | 47 ++- src/DecklinkInput.cpp | 6 +- src/DecklinkOutput.cpp | 27 +- src/DummyReader.cpp | 6 +- src/FFmpegReader.cpp | 225 +++++++------- src/FFmpegWriter.cpp | 75 ++--- src/Frame.cpp | 479 ++++++++++++----------------- src/FrameMapper.cpp | 42 ++- src/ImageReader.cpp | 4 +- src/ImageWriter.cpp | 135 ++------ src/KeyFrame.cpp | 192 ++++++------ src/Qt/PlayerDemo.cpp | 11 +- src/Qt/PlayerPrivate.cpp | 50 +-- src/Qt/VideoCacheThread.cpp | 100 ++++++ src/Qt/VideoPlaybackThread.cpp | 17 +- src/Qt/VideoRenderer.cpp | 2 - src/QtImageReader.cpp | 179 +++++++++++ src/QtPlayer.cpp | 34 +- src/ReaderBase.cpp | 7 - src/RendererBase.cpp | 2 +- src/TextReader.cpp | 3 +- src/Timeline.cpp | 274 +++++++++++------ src/bindings/python/openshot.i | 4 + src/bindings/ruby/openshot.i | 4 + src/effects/ChromaKey.cpp | 32 +- src/effects/Deinterlace.cpp | 33 +- src/effects/Mask.cpp | 130 +++++--- src/effects/Negate.cpp | 4 +- src/examples/Example.cpp | 334 +++++--------------- src/examples/ExampleBlackmagic.cpp | 2 +- tests/Cache_Tests.cpp | 2 + tests/Clip_Tests.cpp | 21 +- tests/Color_Tests.cpp | 58 +++- tests/FFmpegReader_Tests.cpp | 42 +-- tests/FFmpegWriter_Tests.cpp | 13 +- tests/ImageWriter_Tests.cpp | 17 +- tests/ReaderBase_Tests.cpp | 1 + tests/Timeline_Tests.cpp | 93 +++--- 66 files changed, 1974 insertions(+), 1390 deletions(-) create mode 100644 include/Qt/VideoCacheThread.h create mode 100644 include/QtImageReader.h create mode 100644 src/Qt/VideoCacheThread.cpp create mode 100644 src/QtImageReader.cpp diff --git a/include/Cache.h b/include/Cache.h index 856731fc..c9f67076 100644 --- a/include/Cache.h +++ b/include/Cache.h @@ -36,6 +36,14 @@ namespace openshot { +// struct ReaderInfo +// { +// int height; ///< The height of the video (in pixels) +// int width; ///< The width of the video (in pixesl) +// int sample_rate; ///< The number of audio samples per second (44100 is a common sample rate) +// int channels; ///< The number of audio channels used in the audio stream +// }; + /** * @brief This class is a cache manager for Frame objects. * @@ -44,9 +52,9 @@ namespace openshot { * it critical to keep these Frames cached for performance reasons. However, the larger the cache, the more memory * is required. You can set the max number of bytes to cache. */ - class Cache { + class Cache + { private: - int64 total_bytes; ///< This is the current total bytes (that are in this cache) int64 max_bytes; ///< This is the max number of bytes to cache (0 = no limit) map > frames; ///< This map holds the frame number and Frame objects deque frame_numbers; ///< This queue holds a sequential list of cached Frame numbers @@ -54,6 +62,10 @@ namespace openshot { /// Clean up cached frames that exceed the max number of bytes void CleanUp(); + /// Section lock for multiple threads + CriticalSection *cacheCriticalSection; + + public: /// Default constructor, no max bytes Cache(); @@ -62,6 +74,9 @@ namespace openshot { /// @param max_bytes The maximum bytes to allow in the cache. Once exceeded, the cache will purge the oldest frames. Cache(int64 max_bytes); + // Default destructor + ~Cache(); + /// @brief Add a Frame to the cache /// @param frame_number The frame number of the cached frame /// @param frame The openshot::Frame object needing to be cached. @@ -85,7 +100,7 @@ namespace openshot { tr1::shared_ptr GetFrame(int frame_number); /// Gets the maximum bytes value - int64 GetBytes() { return total_bytes; }; + int64 GetBytes(); /// Gets the maximum bytes value int64 GetMaxBytes() { return max_bytes; }; @@ -105,6 +120,10 @@ namespace openshot { /// @param number_of_bytes The maximum bytes to allow in the cache. Once exceeded, the cache will purge the oldest frames. void SetMaxBytes(int64 number_of_bytes) { max_bytes = number_of_bytes; CleanUp(); }; + /// @brief Set maximum bytes to a different amount based on a ReaderInfo struct + /// @param number_of_bytes The maximum bytes to allow in the cache. Once exceeded, the cache will purge the oldest frames. + void SetMaxBytesFromInfo(int number_of_frames, int width, int height, int sample_rate, int channels); + }; diff --git a/include/ChunkReader.h b/include/ChunkReader.h index 3d6d86c9..ab4a0c6a 100644 --- a/include/ChunkReader.h +++ b/include/ChunkReader.h @@ -144,6 +144,9 @@ namespace openshot /// @param new_size The number of frames per chunk void SetChunkSize(int new_size) { chunk_size = new_size; }; + /// Get the cache object used by this reader (always return NULL for this reader) + Cache* GetCache() { return NULL; }; + /// @brief Get an openshot::Frame object for a specific frame number of this reader. /// @returns The requested frame (containing the image and audio) /// @param requested_frame The frame number you want to retrieve diff --git a/include/Clip.h b/include/Clip.h index b3d8d1e1..3f9b398e 100644 --- a/include/Clip.h +++ b/include/Clip.h @@ -35,6 +35,7 @@ #include #include +#include #include "JuceLibraryCode/JuceHeader.h" #include "AudioResampler.h" #include "ClipBase.h" @@ -46,6 +47,7 @@ #include "Fraction.h" #include "FrameMapper.h" #include "ImageReader.h" +#include "QtImageReader.h" #include "TextReader.h" #include "ChunkReader.h" #include "KeyFrame.h" @@ -102,6 +104,10 @@ namespace openshot { * @endcode */ class Clip : public ClipBase { + protected: + /// Section lock for multiple threads + CriticalSection getFrameCriticalSection; + private: bool waveform; ///< Should a waveform be used instead of the clip's image list effects; /// #include #include +#include +#include +#include +#include #include #include #include "Magick++.h" @@ -56,18 +60,6 @@ using namespace std; namespace openshot { - - /** - * @brief The pixel format supported by renderers. - * - * Currently only RGB_888 is supported. - * - * @see QuantumType - */ - enum OSPixelFormat { - RGB_888, - }; - /** * @brief This class represents a single frame of video (i.e. image & audio data) * @@ -125,11 +117,10 @@ namespace openshot class Frame { private: - tr1::shared_ptr image; - tr1::shared_ptr wave_image; + tr1::shared_ptr image; + tr1::shared_ptr wave_image; tr1::shared_ptr audio; - tr1::shared_ptr qimage; - unsigned char *qbuffer; + const unsigned char *qbuffer; Fraction pixel_ratio; int channels; ChannelLayout channel_layout; @@ -168,13 +159,16 @@ namespace openshot void AddColor(int width, int height, string color); /// Add (or replace) pixel data to the frame - void AddImage(int width, int height, const string map, const Magick::StorageType type, const void *pixels_); + void AddImage(int width, int height, int bytes_per_pixel, QImage::Format type, const unsigned char *pixels_); /// Add (or replace) pixel data to the frame - void AddImage(tr1::shared_ptr new_image); + void AddImage(tr1::shared_ptr new_image); /// Add (or replace) pixel data to the frame (for only the odd or even lines) - void AddImage(tr1::shared_ptr new_image, bool only_odd_lines); + void AddImage(tr1::shared_ptr new_image, bool only_odd_lines); + + /// Add (or replace) pixel data to the frame from an ImageMagick Image + void AddMagickImage(tr1::shared_ptr new_image); /// Add audio samples to a specific channel void AddAudio(bool replaceSamples, int destChannel, int destStartSample, const float* source, int numSamples, float gainToApplyToSource); @@ -182,18 +176,6 @@ namespace openshot /// Apply gain ramp (i.e. fading volume) void ApplyGainRamp(int destChannel, int destStartSample, int numSamples, float initial_gain, float final_gain); - /// Composite a new image on top of the existing image - void AddImage(tr1::shared_ptr new_image, float alpha); - - /// Experimental method to add effects to this frame - void AddEffect(string name); - - /// Experimental method to add overlay images to this frame - void AddOverlay(Frame* frame); - - /// Experimental method to add the frame number on top of the image - void AddOverlayNumber(int overlay_number); - /// Channel Layout of audio samples. A frame needs to keep track of this, since Writers do not always /// know the original channel layout of a frame's audio samples (i.e. mono, stereo, 5 point surround, etc...) ChannelLayout ChannelsLayout(); @@ -201,6 +183,9 @@ namespace openshot // Set the channel layout of audio samples (i.e. mono, stereo, 5 point surround, etc...) void ChannelsLayout(ChannelLayout new_channel_layout) { channel_layout = new_channel_layout; }; + /// Clean up buffer after QImage is deleted + static void cleanUpBuffer(void *info); + /// Clear the waveform image (and deallocate it's memory) void ClearWaveform(); @@ -233,20 +218,20 @@ namespace openshot /// Get the size in bytes of this frame (rough estimate) int64 GetBytes(); - /// Get pointer to Magick++ image object - tr1::shared_ptr GetImage(); + /// Get pointer to Qt QImage image object + tr1::shared_ptr GetImage(); - /// Get pointer to QImage of frame - tr1::shared_ptr GetQImage(); + /// Get pointer to ImageMagick image object + tr1::shared_ptr GetMagickImage(); /// Set Pixel Aspect Ratio Fraction GetPixelRatio() { return pixel_ratio; }; /// Get pixel data (as packets) - const Magick::PixelPacket* GetPixels(); + const unsigned char* GetPixels(); /// Get pixel data (for only a single scan-line) - const Magick::PixelPacket* GetPixels(int row); + const unsigned char* GetPixels(int row); /// Get height of image int GetHeight(); @@ -258,10 +243,10 @@ namespace openshot static int GetSamplesPerFrame(int frame_number, Fraction fps, int sample_rate, int channels); /// Get an audio waveform image - tr1::shared_ptr GetWaveform(int width, int height, int Red, int Green, int Blue); + tr1::shared_ptr GetWaveform(int width, int height, int Red, int Green, int Blue, int Alpha); /// Get an audio waveform image pixels - const Magick::PixelPacket* GetWaveformPixels(int width, int height, int Red, int Green, int Blue); + const unsigned char* GetWaveformPixels(int width, int height, int Red, int Green, int Blue, int Alpha); /// Get height of image int GetWidth(); @@ -269,9 +254,6 @@ namespace openshot /// Resize audio container to hold more (or less) samples and channels void ResizeAudio(int channels, int length, int sample_rate, ChannelLayout channel_layout); - /// Rotate the image - void Rotate(float degrees); - /// Get the original sample rate of this frame's audio data int SampleRate(); @@ -292,9 +274,6 @@ namespace openshot void Thumbnail(string path, int new_width, int new_height, string mask_path, string overlay_path, string background_color, bool ignore_aspect) throw(InvalidFile); - /// Make colors in a specific range transparent - void TransparentColors(string color, double fuzz); - /// Play audio samples for this frame void Play(); }; diff --git a/include/FrameMapper.h b/include/FrameMapper.h index 19fc2a00..f3e47ded 100644 --- a/include/FrameMapper.h +++ b/include/FrameMapper.h @@ -175,6 +175,9 @@ namespace openshot /// Get a frame based on the target frame rate and the new frame number of a frame MappedFrame GetMappedFrame(int TargetFrameNumber) throw(OutOfBoundsFrame); + /// Get the cache object used by this reader + Cache* GetCache() { return &final_cache; }; + /// @brief This method is required for all derived classes of ReaderBase, and return the /// openshot::Frame object, which contains the image and audio information for that /// frame of video. diff --git a/include/ImageReader.h b/include/ImageReader.h index b9e7fa5d..50d86d96 100644 --- a/include/ImageReader.h +++ b/include/ImageReader.h @@ -80,6 +80,9 @@ namespace openshot /// Close File void Close(); + /// Get the cache object used by this reader (always returns NULL for this object) + Cache* GetCache() { return NULL; }; + /// Get an openshot::Frame object for a specific frame number of this reader. All numbers /// return the same Frame, since they all share the same image data. /// diff --git a/include/ImageWriter.h b/include/ImageWriter.h index 211e9f4b..83ad9e25 100644 --- a/include/ImageWriter.h +++ b/include/ImageWriter.h @@ -97,13 +97,6 @@ namespace openshot bool combine_frames; tr1::shared_ptr last_frame; - deque > spooled_video_frames; - deque > queued_video_frames; - deque > processed_frames; - deque > deallocate_frames; - - /// write all queued frames - void write_queued_frames(); public: diff --git a/include/KeyFrame.h b/include/KeyFrame.h index ed106cdb..25486870 100644 --- a/include/KeyFrame.h +++ b/include/KeyFrame.h @@ -29,6 +29,7 @@ #define OPENSHOT_KEYFRAME_H #include +#include #include #include #include diff --git a/include/OpenMPUtilities.h b/include/OpenMPUtilities.h index 64f1fc58..fc4ff3a5 100644 --- a/include/OpenMPUtilities.h +++ b/include/OpenMPUtilities.h @@ -31,10 +31,6 @@ #include // Calculate the # of OpenMP Threads to allow (HACK / WORK-AROUND for an ImageMagick bug: preventing use of all 8 cores) - #ifdef OPENSHOT_IMAGEMAGICK_COMPATIBILITY - #define OPEN_MP_NUM_PROCESSORS (omp_get_num_procs() <= 4 ? omp_get_num_procs() : 4) - #else - #define OPEN_MP_NUM_PROCESSORS omp_get_num_procs() - #endif + #define OPEN_MP_NUM_PROCESSORS omp_get_num_procs() #endif diff --git a/include/OpenShot.h b/include/OpenShot.h index 04d4a1df..e5edeb52 100644 --- a/include/OpenShot.h +++ b/include/OpenShot.h @@ -127,6 +127,7 @@ #include "PlayerBase.h" #include "Point.h" #include "Profiles.h" +#include "QtImageReader.h" #include "Sleep.h" #include "TextReader.h" #include "Timeline.h" diff --git a/include/Qt/PlayerDemo.h b/include/Qt/PlayerDemo.h index 7b150147..a3e18bd8 100644 --- a/include/Qt/PlayerDemo.h +++ b/include/Qt/PlayerDemo.h @@ -33,6 +33,7 @@ #include #include #include +#include #include "VideoRenderWidget.h" diff --git a/include/Qt/PlayerPrivate.h b/include/Qt/PlayerPrivate.h index c960689a..3c6549d6 100644 --- a/include/Qt/PlayerPrivate.h +++ b/include/Qt/PlayerPrivate.h @@ -34,6 +34,7 @@ #include "../../include/AudioReaderSource.h" #include "../../include/Qt/AudioPlaybackThread.h" #include "../../include/Qt/VideoPlaybackThread.h" +#include "../../include/Qt/VideoCacheThread.h" namespace openshot { @@ -51,6 +52,7 @@ namespace openshot ReaderBase *reader; /// The reader which powers this player AudioPlaybackThread *audioPlayback; /// The audio thread VideoPlaybackThread *videoPlayback; /// The video thread + VideoCacheThread *videoCache; /// The cache thread int speed; /// The speed and direction to playback a reader (1=normal, 2=fast, 3=faster, -1=rewind, etc...) RendererBase *renderer; int last_video_position; /// The last frame actually displayed diff --git a/include/Qt/VideoCacheThread.h b/include/Qt/VideoCacheThread.h new file mode 100644 index 00000000..be3a71ca --- /dev/null +++ b/include/Qt/VideoCacheThread.h @@ -0,0 +1,92 @@ +/** + * @file + * @brief Source file for VideoCacheThread class + * @author Jonathan Thomas + * + * @section LICENSE + * + * Copyright (c) 2008-2014 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_VIDEO_CACHE_THREAD_H +#define OPENSHOT_VIDEO_CACHE_THREAD_H + +#include "../../include/OpenMPUtilities.h" +#include "../../include/ReaderBase.h" +#include "../../include/RendererBase.h" + +namespace openshot +{ + using juce::Thread; + using juce::WaitableEvent; + + /** + * @brief The video cache class. + */ + class VideoCacheThread : Thread + { + std::tr1::shared_ptr frame; + int speed; + bool is_playing; + int position; + int current_display_frame; + ReaderBase *reader; + int max_frames; + + /// Constructor + VideoCacheThread(); + /// Destructor + ~VideoCacheThread(); + + /// Get the currently playing frame number (if any) + int getCurrentFramePosition(); + + /// Get Speed (The speed and direction to playback a reader (1=normal, 2=fast, 3=faster, -1=rewind, etc...) + int getSpeed() const { return speed; } + + /// Play the audio + void Play(); + + /// Seek the audio thread + void Seek(int new_position); + + /// Set the currently displaying frame number + void setCurrentFramePosition(int current_frame_number); + + /// Set Speed (The speed and direction to playback a reader (1=normal, 2=fast, 3=faster, -1=rewind, etc...) + void setSpeed(int new_speed) { speed = new_speed; } + + /// Stop the audio playback + void Stop(); + + /// Start the thread + void run(); + + /// Set the current thread's reader + void Reader(ReaderBase *new_reader) { reader=new_reader; }; + + /// Parent class of VideoCacheThread + friend class PlayerPrivate; + friend class QtPlayer; + }; + +} + +#endif // OPENSHOT_VIDEO_CACHE_THREAD_H diff --git a/include/QtImageReader.h b/include/QtImageReader.h new file mode 100644 index 00000000..e53dc088 --- /dev/null +++ b/include/QtImageReader.h @@ -0,0 +1,110 @@ +/** + * @file + * @brief Header file for QtImageReader class + * @author Jonathan Thomas + * + * @section LICENSE + * + * Copyright (c) 2008-2014 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_QIMAGE_READER_H +#define OPENSHOT_QIMAGE_READER_H + +#include "ReaderBase.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "Cache.h" +#include "Exceptions.h" + +using namespace std; + +namespace openshot +{ + + /** + * @brief This class uses the Qt library, to open image files, and return + * openshot::Frame objects containing the image. + * + * @code + * // Create a reader for a video + * QtImageReader r("MyAwesomeImage.jpeg"); + * r.Open(); // Open the reader + * + * // Get frame number 1 from the video + * tr1::shared_ptr f = r.GetFrame(1); + * + * // Now that we have an openshot::Frame object, lets have some fun! + * f->Display(); // Display the frame on the screen + * + * // Close the reader + * r.Close(); + * @endcode + */ + class QtImageReader : public ReaderBase + { + private: + string path; + tr1::shared_ptr image; + bool is_open; + + public: + + /// Constructor for QtImageReader. This automatically opens the media file and loads + /// frame 1, or it throws one of the following exceptions. + QtImageReader(string path) throw(InvalidFile); + + /// Close File + void Close(); + + /// Get the cache object used by this reader (always returns NULL for this object) + Cache* GetCache() { return NULL; }; + + /// Get an openshot::Frame object for a specific frame number of this reader. All numbers + /// return the same Frame, since they all share the same image data. + /// + /// @returns The requested frame (containing the image) + /// @param requested_frame The frame number that is requested. + tr1::shared_ptr GetFrame(int requested_frame) throw(ReaderClosed); + + /// Determine if reader is open or closed + bool IsOpen() { return is_open; }; + + /// Get and Set JSON methods + string Json(); ///< Generate JSON string of this object + void SetJson(string value) throw(InvalidJSON); ///< Load JSON string into this object + Json::Value JsonValue(); ///< Generate Json::JsonValue for this object + void SetJsonValue(Json::Value root) throw(InvalidFile); ///< Load Json::JsonValue into this object + + /// Open File - which is called by the constructor automatically + void Open() throw(InvalidFile); + }; + +} + +#endif diff --git a/include/ReaderBase.h b/include/ReaderBase.h index 3555aa0f..dc6ef932 100644 --- a/include/ReaderBase.h +++ b/include/ReaderBase.h @@ -32,6 +32,7 @@ #include #include #include +#include "Cache.h" #include "ChannelLayouts.h" #include "Fraction.h" #include "Frame.h" @@ -93,6 +94,7 @@ namespace openshot protected: /// Section lock for multiple threads CriticalSection getFrameCriticalSection; + CriticalSection processingCriticalSection; /// Debug JSON root Json::Value debug_root; @@ -129,6 +131,9 @@ namespace openshot /// Test method to draw a bitmap on a Qt QGraphicsScene void DrawFrameOnScene(string path, long _graphics_scene_address); + /// Get the cache object used by this reader (note: not all readers use cache) + virtual Cache* GetCache() = 0; + /// This method is required for all derived classes of ReaderBase, and returns the /// openshot::Frame object, which contains the image and audio information for that /// frame of video. @@ -138,7 +143,7 @@ namespace openshot virtual tr1::shared_ptr GetFrame(int number) = 0; /// A thread safe version of GetFrame. - tr1::shared_ptr GetFrameSafe(int number); + //tr1::shared_ptr GetFrameSafe(int number); /// Determine if reader is open or closed virtual bool IsOpen() = 0; diff --git a/include/TextReader.h b/include/TextReader.h index 5d441f6c..b5bc28ca 100644 --- a/include/TextReader.h +++ b/include/TextReader.h @@ -116,6 +116,9 @@ namespace openshot /// Close Reader void Close(); + /// Get the cache object used by this reader (always returns NULL for this object) + Cache* GetCache() { return NULL; }; + /// Get an openshot::Frame object for a specific frame number of this reader. All numbers /// return the same Frame, since they all share the same image data. /// diff --git a/include/Timeline.h b/include/Timeline.h index a91e9a20..29fdda44 100644 --- a/include/Timeline.h +++ b/include/Timeline.h @@ -30,6 +30,8 @@ #include #include +#include +#include #include "Magick++.h" #include "Cache.h" #include "Color.h" @@ -39,6 +41,7 @@ #include "Effects.h" #include "Fraction.h" #include "Frame.h" +#include "FrameMapper.h" #include "KeyFrame.h" #include "OpenMPUtilities.h" #include "ReaderBase.h" @@ -139,6 +142,7 @@ namespace openshot { class Timeline : public ReaderBase { private: bool is_open; /// clips; /// closing_clips; /// open_clips; /// new_frame, Clip* source_clip, int clip_frame_number, int timeline_frame_number, bool is_top_clip); + /// Apply a FrameMapper to a clip which matches the settings of this timeline + void apply_mapper_to_clip(Clip* clip); + /// Apply JSON Diffs to various objects contained in this timeline void apply_json_to_clips(Json::Value change) throw(InvalidJSONKey); /// Clips() { return clips; }; @@ -206,6 +222,9 @@ namespace openshot { /// Return the list of effects on the timeline list Effects() { return effects; }; + /// Get the cache object used by this reader + Cache* GetCache() { return &final_cache; }; + /// Get an openshot::Frame object for a specific frame number of this timeline. /// /// @returns The requested frame (containing the image) diff --git a/include/effects/Mask.h b/include/effects/Mask.h index 7c669450..c5e580eb 100644 --- a/include/effects/Mask.h +++ b/include/effects/Mask.h @@ -44,6 +44,7 @@ #include "../ReaderBase.h" #include "../FFmpegReader.h" #include "../ImageReader.h" +#include "../QtImageReader.h" #include "../ChunkReader.h" using namespace std; @@ -62,17 +63,20 @@ namespace openshot { private: ReaderBase *reader; - Keyframe brightness; - Keyframe contrast; - tr1::shared_ptr mask; + + /// Constrain a color value from 0 to 255 + int constrain(int color_value); + + /// Get grayscale mask image + tr1::shared_ptr get_grayscale_mask(tr1::shared_ptr mask_frame_image, int width, int height, float brightness, float contrast); /// Init effect settings void init_effect_details(); - /// Set brightness and contrast - void set_brightness_and_contrast(tr1::shared_ptr image, float brightness, float contrast); - public: + bool replace_image; ///< Replace the frame image with a grayscale image representing the mask. Great for debugging a mask. + Keyframe brightness; ///< Brightness keyframe to control the wipe / mask effect. A constant value here will prevent animation. + Keyframe contrast; ///< Contrast keyframe to control the hardness of the wipe effect / mask. /// Blank constructor, useful when using Json to load the effect properties Mask(); @@ -107,6 +111,11 @@ namespace openshot /// of all properties at any time) string PropertiesJSON(int requested_frame); + /// Get the reader object of the mask grayscale image + ReaderBase* Reader() { return reader; }; + + /// Set a new reader to be used by the mask effect (grayscale image) + void Reader(ReaderBase *new_reader) { reader = new_reader; }; }; } diff --git a/src/AudioReaderSource.cpp b/src/AudioReaderSource.cpp index f6338e3e..c054b8c8 100644 --- a/src/AudioReaderSource.cpp +++ b/src/AudioReaderSource.cpp @@ -51,8 +51,8 @@ AudioReaderSource::~AudioReaderSource() }; // Get more samples from the reader -void AudioReaderSource::GetMoreSamplesFromReader() { - +void AudioReaderSource::GetMoreSamplesFromReader() +{ // Determine the amount of samples needed to fill up this buffer int amount_needed = position; // replace these used samples int amount_remaining = size - amount_needed; // these are unused samples, and need to be carried forward @@ -86,7 +86,7 @@ void AudioReaderSource::GetMoreSamplesFromReader() { if (frame_position == 0) { try { // Get frame object - frame = reader->GetFrameSafe(frame_number); + frame = reader->GetFrame(frame_number); frame_number = frame_number + speed; } catch (const ReaderClosed & e) { diff --git a/src/AudioResampler.cpp b/src/AudioResampler.cpp index 92d849ca..442a91d9 100644 --- a/src/AudioResampler.cpp +++ b/src/AudioResampler.cpp @@ -43,8 +43,6 @@ AudioResampler::AudioResampler() isPrepared = false; // Init buffer source - buffer = new AudioSampleBuffer(2, 1); - buffer->clear(); buffer_source = new AudioBufferSource(buffer); // Init resampling source diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 1dc8a70c..2014ef4c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -78,6 +78,8 @@ include_directories(${FFMPEG_INCLUDE_DIR}) # Find JUCE-based openshot Audio libraries FIND_PACKAGE(OpenShotAudio REQUIRED) +message('LIBOPENSHOT_AUDIO_INCLUDE_DIRS: ${LIBOPENSHOT_AUDIO_INCLUDE_DIRS}') + # Include Juce headers (needed for compile) include_directories(${LIBOPENSHOT_AUDIO_INCLUDE_DIRS}) @@ -148,7 +150,7 @@ endif(OPENMP_FOUND) include_directories("../thirdparty/jsoncpp/include") ############### PROFILING ################# -#set(PROFILER "/usr/lib/libprofiler.so") +#set(PROFILER "/usr/lib/libprofiler.so.0.3.2") #set(PROFILER "/usr/lib/libtcmalloc.so.4") #### GET LIST OF EFFECT FILES #### @@ -186,6 +188,7 @@ SET ( OPENSHOT_SOURCE_FILES PlayerBase.cpp Point.cpp Profiles.cpp + QtImageReader.cpp QtPlayer.cpp TextReader.cpp Timeline.cpp diff --git a/src/Cache.cpp b/src/Cache.cpp index 24b10ac2..ac7150fa 100644 --- a/src/Cache.cpp +++ b/src/Cache.cpp @@ -31,14 +31,34 @@ using namespace std; using namespace openshot; // Default constructor, no max frames -Cache::Cache() : max_bytes(0), total_bytes(0) { }; +Cache::Cache() : max_bytes(0) { + // Init the critical section + cacheCriticalSection = new CriticalSection(); +}; // Constructor that sets the max frames to cache -Cache::Cache(int64 max_bytes) : max_bytes(max_bytes), total_bytes(0) { }; +Cache::Cache(int64 max_bytes) : max_bytes(max_bytes) { + // Init the critical section + cacheCriticalSection = new CriticalSection(); +}; + +// Default destructor +Cache::~Cache() +{ + frames.clear(); + frame_numbers.clear(); + + // remove critical section + delete cacheCriticalSection; + cacheCriticalSection = NULL; +} // Add a Frame to the cache void Cache::Add(int frame_number, tr1::shared_ptr frame) { + // Create a scoped lock, to protect the cache from multiple threads + const GenericScopedLock lock(*cacheCriticalSection); + // Remove frame if it already exists if (Exists(frame_number)) // Move frame to front of queue @@ -49,9 +69,6 @@ void Cache::Add(int frame_number, tr1::shared_ptr frame) frames[frame_number] = frame; frame_numbers.push_front(frame_number); - // Increment total bytes (of cache) - total_bytes += frame->GetBytes(); - // Clean up old frames CleanUp(); } @@ -60,6 +77,9 @@ void Cache::Add(int frame_number, tr1::shared_ptr frame) // Check for the existance of a frame in the cache bool Cache::Exists(int frame_number) { + // Create a scoped lock, to protect the cache from multiple threads + const GenericScopedLock lock(*cacheCriticalSection); + // Is frame number cached if (frames.count(frame_number)) return true; @@ -70,6 +90,9 @@ bool Cache::Exists(int frame_number) // Get a frame from the cache tr1::shared_ptr Cache::GetFrame(int frame_number) { + // Create a scoped lock, to protect the cache from multiple threads + const GenericScopedLock lock(*cacheCriticalSection); + // Does frame exists in cache? if (Exists(frame_number)) { @@ -87,6 +110,9 @@ tr1::shared_ptr Cache::GetFrame(int frame_number) // Get the smallest frame number tr1::shared_ptr Cache::GetSmallestFrame() { + // Create a scoped lock, to protect the cache from multiple threads + const GenericScopedLock lock(*cacheCriticalSection); + tr1::shared_ptr f; // Loop through frame numbers @@ -105,14 +131,31 @@ tr1::shared_ptr Cache::GetSmallestFrame() return f; } +// Gets the maximum bytes value +int64 Cache::GetBytes() +{ + // Create a scoped lock, to protect the cache from multiple threads + const GenericScopedLock lock(*cacheCriticalSection); + + int64 total_bytes = 0; + + // Loop through frames, and calculate total bytes + deque::reverse_iterator itr; + for(itr = frame_numbers.rbegin(); itr != frame_numbers.rend(); ++itr) + { + //cout << "get bytes from frame " << *itr << ", frames.count(" << *itr << "): " << frames.count(*itr) << endl; + //if (frames.count(*itr) > 0) + total_bytes += frames[*itr]->GetBytes(); + } + + return total_bytes; +} + // Remove a specific frame void Cache::Remove(int frame_number) { - // Get the frame (or throw exception) - tr1::shared_ptr f = GetFrame(frame_number); - - // Decrement the total bytes (for this cache) - total_bytes -= f->GetBytes(); + // Create a scoped lock, to protect the cache from multiple threads + const GenericScopedLock lock(*cacheCriticalSection); // Loop through frame numbers deque::iterator itr; @@ -133,6 +176,9 @@ void Cache::Remove(int frame_number) // Move frame to front of queue (so it lasts longer) void Cache::MoveToFront(int frame_number) { + // Create a scoped lock, to protect the cache from multiple threads + const GenericScopedLock lock(*cacheCriticalSection); + // Does frame exists in cache? if (Exists(frame_number)) { @@ -159,21 +205,19 @@ void Cache::MoveToFront(int frame_number) // Clear the cache of all frames void Cache::Clear() { - deque::iterator itr; - for(itr = frame_numbers.begin(); itr != frame_numbers.end(); ++itr) - // Remove frame from map - frames.erase(*itr); + // Create a scoped lock, to protect the cache from multiple threads + const GenericScopedLock lock(*cacheCriticalSection); - // pop each of the frames from the queue... which empties the queue - while(!frame_numbers.empty()) frame_numbers.pop_back(); - - // Reset total bytes (of cache) - total_bytes = 0; + frames.clear(); + frame_numbers.clear(); } // Count the frames in the queue int Cache::Count() { + // Create a scoped lock, to protect the cache from multiple threads + const GenericScopedLock lock(*cacheCriticalSection); + // Return the number of frames in the cache return frames.size(); } @@ -181,11 +225,13 @@ int Cache::Count() // Clean up cached frames that exceed the number in our max_bytes variable void Cache::CleanUp() { + // Create a scoped lock, to protect the cache from multiple threads + const GenericScopedLock lock(*cacheCriticalSection); + // Do we auto clean up? if (max_bytes > 0) { - // check against max bytes (and always leave at least 20 frames in the cache) - while (total_bytes > max_bytes && frame_numbers.size() > 20) + while (GetBytes() > max_bytes && frame_numbers.size() > 20) { // Remove the oldest frame int frame_to_remove = frame_numbers.back(); @@ -196,7 +242,6 @@ void Cache::CleanUp() } } - // Display a list of cached frame numbers void Cache::Display() { @@ -211,6 +256,11 @@ void Cache::Display() } } - - +// Set maximum bytes to a different amount based on a ReaderInfo struct +void Cache::SetMaxBytesFromInfo(int number_of_frames, int width, int height, int sample_rate, int channels) +{ + // n frames X height X width X 4 colors of chars X audio channels X 4 byte floats + int64 bytes = number_of_frames * (height * width * 4 + (sample_rate * channels * 4)); + SetMaxBytes(bytes); +} diff --git a/src/Clip.cpp b/src/Clip.cpp index 94c3c49a..d05de948 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -60,7 +60,7 @@ void Clip::init_settings() volume = Keyframe(1.0); // Init audio waveform color - wave_color = (Color){Keyframe(0), Keyframe(28672), Keyframe(65280)}; + wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255); // Init crop settings crop_gravity = GRAVITY_CENTER; @@ -85,6 +85,7 @@ void Clip::init_settings() reader = NULL; resampler = NULL; audio_cache = NULL; + manage_reader = false; } // Default Constructor for a clip @@ -95,11 +96,14 @@ Clip::Clip() } // Constructor with reader -Clip::Clip(ReaderBase* reader) : reader(reader) +Clip::Clip(ReaderBase* new_reader) { // Init all default settings init_settings(); + // Set the reader + reader = new_reader; + // Open and Close the reader (to set the duration of the clip) Open(); Close(); @@ -136,7 +140,7 @@ Clip::Clip(string path) try { // Try an image reader - reader = new ImageReader(path); + reader = new QtImageReader(path); } catch(...) { try @@ -149,8 +153,26 @@ Clip::Clip(string path) } // Update duration - if (reader) + if (reader) { End(reader->info.duration); + manage_reader = true; + } +} + +// Destructor +Clip::~Clip() +{ + // Delete the reader if clip created it + if (manage_reader && reader) { + delete reader; + reader = NULL; + } + + // Close the resampler + if (resampler) { + delete resampler; + resampler = NULL; + } } /// Set the current reader @@ -235,23 +257,21 @@ tr1::shared_ptr Clip::GetFrame(int requested_frame) throw(ReaderClosed) new_frame_number = time.GetInt(requested_frame); - // Now that we have re-mapped what frame number is needed, go and get the frame pointer - tr1::shared_ptr original_frame = reader->GetFrameSafe(new_frame_number); + tr1::shared_ptr original_frame = reader->GetFrame(new_frame_number); // Create a new frame tr1::shared_ptr frame(new Frame(new_frame_number, 1, 1, "#000000", original_frame->GetAudioSamplesCount(), original_frame->GetAudioChannelsCount())); frame->SampleRate(original_frame->SampleRate()); + frame->ChannelsLayout(original_frame->ChannelsLayout()); // Copy the image from the odd field - frame->AddImage(original_frame->GetImage()); + frame->AddImage(tr1::shared_ptr(new QImage(*original_frame->GetImage()))); // Loop through each channel, add audio for (int channel = 0; channel < original_frame->GetAudioChannelsCount(); channel++) frame->AddAudio(true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0); - - // Get time mapped frame number (used to increase speed, change direction, etc...) tr1::shared_ptr new_frame = get_time_mapped_frame(frame, requested_frame); @@ -320,7 +340,7 @@ tr1::shared_ptr Clip::get_time_mapped_frame(tr1::shared_ptr frame, resampler = new AudioResampler(); // Get new frame number - int new_frame_number = time.GetInt(frame_number); + int new_frame_number = round(time.GetValue(frame_number)); // Create a new frame int samples_in_frame = Frame::GetSamplesPerFrame(new_frame_number, reader->info.fps, reader->info.sample_rate, frame->GetAudioChannelsCount()); @@ -341,41 +361,32 @@ tr1::shared_ptr Clip::get_time_mapped_frame(tr1::shared_ptr frame, // Determine if we are speeding up or slowing down if (time.GetRepeatFraction(frame_number).den > 1) { + // SLOWING DOWN AUDIO // Resample data, and return new buffer pointer - AudioSampleBuffer *buffer = NULL; + AudioSampleBuffer *resampled_buffer = NULL; int resampled_buffer_size = 0; - if (time.GetRepeatFraction(frame_number).num == 1) - { - // SLOW DOWN audio (split audio) - samples = new juce::AudioSampleBuffer(channels, number_of_samples); - samples->clear(); + // SLOW DOWN audio (split audio) + samples = new juce::AudioSampleBuffer(channels, number_of_samples); + samples->clear(); - // Loop through channels, and get audio samples - for (int channel = 0; channel < channels; channel++) - // Get the audio samples for this channel - samples->addFrom(channel, 0, reader->GetFrame(new_frame_number)->GetAudioSamples(channel), number_of_samples, 1.0f); + // Loop through channels, and get audio samples + for (int channel = 0; channel < channels; channel++) + // Get the audio samples for this channel + samples->addFrom(channel, 0, reader->GetFrame(new_frame_number)->GetAudioSamples(channel), number_of_samples, 1.0f); - // Reverse the samples (if needed) - if (!time.IsIncreasing(frame_number)) - reverse_buffer(samples); + // Reverse the samples (if needed) + if (!time.IsIncreasing(frame_number)) + reverse_buffer(samples); - // Resample audio to be X times slower (where X is the denominator of the repeat fraction) - resampler->SetBuffer(samples, 1.0 / time.GetRepeatFraction(frame_number).den); + // Resample audio to be X times slower (where X is the denominator of the repeat fraction) + resampler->SetBuffer(samples, 1.0 / time.GetRepeatFraction(frame_number).den); - // Resample the data (since it's the 1st slice) - buffer = resampler->GetResampledBuffer(); + // Resample the data (since it's the 1st slice) + resampled_buffer = resampler->GetResampledBuffer(); - // Save the resampled data in the cache - audio_cache = new juce::AudioSampleBuffer(channels, buffer->getNumSamples()); - audio_cache->clear(); - for (int channel = 0; channel < channels; channel++) - // Get the audio samples for this channel - audio_cache->addFrom(channel, 0, buffer->getReadPointer(channel), buffer->getNumSamples(), 1.0f); - } - - // Get the length of the resampled buffer - resampled_buffer_size = audio_cache->getNumSamples(); + // Get the length of the resampled buffer (if one exists) + resampled_buffer_size = resampled_buffer->getNumSamples(); // Just take the samples we need for the requested frame int start = (number_of_samples * (time.GetRepeatFraction(frame_number).num - 1)); @@ -383,25 +394,10 @@ tr1::shared_ptr Clip::get_time_mapped_frame(tr1::shared_ptr frame, start -= 1; for (int channel = 0; channel < channels; channel++) // Add new (slower) samples, to the frame object - new_frame->AddAudio(true, channel, 0, audio_cache->getReadPointer(channel, start), number_of_samples, 1.0f); - - // Clean up if the final section - if (time.GetRepeatFraction(frame_number).num == time.GetRepeatFraction(frame_number).den) - { - // Clear, since we don't want it maintain state yet - delete audio_cache; - audio_cache = NULL; - } + new_frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, start), number_of_samples, 1.0f); // Clean up - buffer = NULL; - - - // Determine next unique frame (after these repeating frames) - //int next_unique_frame = time.GetInt(frame_number + (time.GetRepeatFraction(frame_number).den - time.GetRepeatFraction(frame_number).num) + 1); - //if (next_unique_frame != new_frame_number) - // // Overlay the next frame on top of this frame (to create a smoother slow motion effect) - // new_frame->AddImage(reader->GetFrame(next_unique_frame)->GetImage(), float(time.GetRepeatFraction(frame_number).num) / float(time.GetRepeatFraction(frame_number).den)); + resampled_buffer = NULL; } else if (abs(delta) > 1 && abs(delta) < 100) @@ -509,10 +505,6 @@ tr1::shared_ptr Clip::get_time_mapped_frame(tr1::shared_ptr frame, } - // clean up - //delete resampler; - //resampler = NULL; - delete samples; samples = NULL; @@ -780,6 +772,12 @@ void Clip::SetJsonValue(Json::Value root) { reader = new FFmpegReader(root["reader"]["path"].asString()); reader->SetJsonValue(root["reader"]); + } else if (type == "QtImageReader") { + + // Create new reader + reader = new QtImageReader(root["reader"]["path"].asString()); + reader->SetJsonValue(root["reader"]); + } else if (type == "ImageReader") { // Create new reader @@ -805,6 +803,10 @@ void Clip::SetJsonValue(Json::Value root) { reader->SetJsonValue(root["reader"]); } + // mark as managed reader + if (reader) + manage_reader = true; + // Re-Open reader (if needed) if (already_open) reader->Open(); diff --git a/src/Color.cpp b/src/Color.cpp index 2fe0d0bf..f351970c 100644 --- a/src/Color.cpp +++ b/src/Color.cpp @@ -29,14 +29,56 @@ using namespace openshot; +// Constructor which takes R,G,B,A +Color::Color(unsigned char Red, unsigned char Green, unsigned char Blue, unsigned char Alpha) +{ + // Set initial points + red.AddPoint(1, (float)Red); + green.AddPoint(1, (float)Green); + blue.AddPoint(1, (float)Blue); + alpha.AddPoint(1, (float)Alpha); +} + +// Constructor which takes 4 existing Keyframe curves +Color::Color(Keyframe Red, Keyframe Green, Keyframe Blue, Keyframe Alpha) +{ + // Assign existing keyframes + red = Red; + green = Green; + blue = Blue; + alpha = Alpha; +} + +// Constructor which takes a HEX color code +Color::Color(string color_hex) +{ + // Create a QColor from hex + QColor color(QString::fromStdString(color_hex)); + red.AddPoint(1, color.red()); + green.AddPoint(1, color.green()); + blue.AddPoint(1, color.blue()); + alpha.AddPoint(1, color.alpha()); +} + // Get the HEX value of a color at a specific frame string Color::GetColorHex(int frame_number) { int r = red.GetInt(frame_number); int g = green.GetInt(frame_number); int b = blue.GetInt(frame_number); + int a = alpha.GetInt(frame_number); - return QColor( r,g,b ).name().toStdString(); + return QColor( r,g,b,a ).name().toStdString(); +} + +// Get the distance between 2 RGB pairs (alpha is ignored) +long Color::GetDistance(long R1, long G1, long B1, long R2, long G2, long B2) +{ + long rmean = ( R1 + R2 ) / 2; + long r = R1 - R2; + long g = G1 - G2; + long b = B1 - B2; + return sqrt((((512+rmean)*r*r)>>8) + 4*g*g + (((767-rmean)*b*b)>>8)); } // Generate JSON string of this object @@ -54,6 +96,7 @@ Json::Value Color::JsonValue() { root["red"] = red.JsonValue(); root["green"] = green.JsonValue(); root["blue"] = blue.JsonValue(); + root["alpha"] = alpha.JsonValue(); // return JsonValue return root; @@ -92,4 +135,6 @@ void Color::SetJsonValue(Json::Value root) { green.SetJsonValue(root["green"]); if (!root["blue"].isNull()) blue.SetJsonValue(root["blue"]); + if (!root["alpha"].isNull()) + blue.SetJsonValue(root["alpha"]); } diff --git a/src/DecklinkInput.cpp b/src/DecklinkInput.cpp index 93f98074..3647c05e 100644 --- a/src/DecklinkInput.cpp +++ b/src/DecklinkInput.cpp @@ -245,10 +245,8 @@ omp_set_nested(true); tr1::shared_ptr f(new openshot::Frame(copy_frameCount, width, height, "#000000", 2048, 2)); // Add Image data to openshot frame - f->AddImage(width, height, "ARGB", Magick::CharPixel, (uint8_t*)frameBytes); - - // TEST EFFECTS - f->TransparentColors("#2d751f", 10.0); + // TODO: Fix Decklink support with QImage Upgrade + //f->AddImage(width, height, "ARGB", Magick::CharPixel, (uint8_t*)frameBytes); #pragma omp critical (blackmagic_input_queue) { diff --git a/src/DecklinkOutput.cpp b/src/DecklinkOutput.cpp index 486ec796..dfd99c04 100644 --- a/src/DecklinkOutput.cpp +++ b/src/DecklinkOutput.cpp @@ -239,21 +239,22 @@ void DeckLinkOutputDelegate::WriteFrame(tr1::shared_ptr frame) int numBytes = frame->GetHeight() * frame->GetWidth() * 4; uint8_t *castBytes = new uint8_t[numBytes]; + // TODO: Fix Decklink support with QImage Upgrade // Get a list of pixels in our frame's image. Each pixel is represented by // a PixelPacket struct, which has 4 properties: .red, .blue, .green, .alpha - const Magick::PixelPacket *pixel_packets = frame->GetPixels(); - - // loop through ImageMagic pixel structs, and put the colors in a regular array, and move the - // colors around to match the Decklink order (ARGB). - for (int packet = 0, row = 0; row < numBytes; packet++, row+=4) - { - // Update buffer (which is already linked to the AVFrame: pFrameRGB) - // Each color needs to be scaled to 8 bit (using the ImageMagick built-in ScaleQuantumToChar function) - castBytes[row] = MagickCore::ScaleQuantumToChar((Magick::Quantum) 0); // alpha - castBytes[row+1] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixel_packets[packet].red); - castBytes[row+2] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixel_packets[packet].green); - castBytes[row+3] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixel_packets[packet].blue); - } +// const Magick::PixelPacket *pixel_packets = frame->GetPixels(); +// +// // loop through ImageMagic pixel structs, and put the colors in a regular array, and move the +// // colors around to match the Decklink order (ARGB). +// for (int packet = 0, row = 0; row < numBytes; packet++, row+=4) +// { +// // Update buffer (which is already linked to the AVFrame: pFrameRGB) +// // Each color needs to be scaled to 8 bit (using the ImageMagick built-in ScaleQuantumToChar function) +// castBytes[row] = MagickCore::ScaleQuantumToChar((Magick::Quantum) 0); // alpha +// castBytes[row+1] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixel_packets[packet].red); +// castBytes[row+2] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixel_packets[packet].green); +// castBytes[row+3] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixel_packets[packet].blue); +// } #pragma omp critical (blackmagic_output_queue) { diff --git a/src/DummyReader.cpp b/src/DummyReader.cpp index 55172462..015899cd 100644 --- a/src/DummyReader.cpp +++ b/src/DummyReader.cpp @@ -80,9 +80,6 @@ void DummyReader::Open() throw(InvalidFile) // Create or get frame object image_frame = tr1::shared_ptr(new Frame(1, info.width, info.height, "#000000", info.sample_rate, info.channels)); - // Add Image data to frame - image_frame->AddImage(tr1::shared_ptr(new Magick::Image(Magick::Geometry(info.width, info.height), Magick::Color("#000000")))); - // Mark as "open" is_open = true; } @@ -108,6 +105,9 @@ tr1::shared_ptr DummyReader::GetFrame(int requested_frame) throw(ReaderCl if (image_frame) { + // Create a scoped lock, allowing only a single thread to run the following code at one time + const GenericScopedLock lock(getFrameCriticalSection); + // Always return same frame (regardless of which frame number was requested) image_frame->number = requested_frame; return image_frame; diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp index 18bba251..3e02c6eb 100644 --- a/src/FFmpegReader.cpp +++ b/src/FFmpegReader.cpp @@ -44,9 +44,8 @@ FFmpegReader::FFmpegReader(string path) throw(InvalidFile, NoStreamsFound, Inval avcodec_register_all(); // Init cache - int64 bytes = 720 * 1280 * 4 + (44100 * 2 * 4); - working_cache = Cache(0); - final_cache = Cache(20 * bytes); // 20 frames X 720 video, 4 colors of chars, 2 audio channels of 4 byte floats + working_cache.SetMaxBytes(0); + final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 4, info.width, info.height, info.sample_rate, info.channels); // Open and Close the reader, to populate it's attributes (such as height, width, etc...) Open(); @@ -205,6 +204,9 @@ void FFmpegReader::Open() throw(InvalidFile, NoStreamsFound, InvalidCodec) previous_packet_location.frame = -1; previous_packet_location.sample_start = 0; + // Adjust cache size based on size of frame and audio + final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 4, info.width, info.height, info.sample_rate, info.channels); + // Mark as "open" is_open = true; } @@ -418,6 +420,18 @@ tr1::shared_ptr FFmpegReader::GetFrame(int requested_frame) throw(OutOfBo } else { + // Create a scoped lock, allowing only a single thread to run the following code at one time + const GenericScopedLock lock(getFrameCriticalSection); + + // Check the cache a 2nd time (due to a potential previous lock) + if (final_cache.Exists(requested_frame)) { + // Debug output + AppendDebugMethod("FFmpegReader::GetFrame", "returned cached frame on 2nd look", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1); + + // Return the cached frame + return final_cache.GetFrame(requested_frame); + } + // Frame is not in cache // Reset seek count seek_count = 0; @@ -483,7 +497,7 @@ tr1::shared_ptr FFmpegReader::ReadStream(int requested_frame) // Loop through the stream until the correct frame is found while (true) { - #pragma omp critical (packet_cache) + // Get the next packet into a local variable called packet packet_error = GetNextPacket(); // Wait if too many frames are being processed @@ -506,7 +520,7 @@ tr1::shared_ptr FFmpegReader::ReadStream(int requested_frame) { // Check the status of a seek (if any) if (is_seeking) - #pragma omp critical (openshot_cache) + #pragma omp critical (openshot_seek) check_seek = CheckSeek(true); else check_seek = false; @@ -519,7 +533,7 @@ tr1::shared_ptr FFmpegReader::ReadStream(int requested_frame) continue; } - #pragma omp critical (packet_cache) + // Get the AVFrame from the current packet frame_finished = GetAVFrame(); // Check if the AVFrame is finished and set it @@ -538,7 +552,7 @@ tr1::shared_ptr FFmpegReader::ReadStream(int requested_frame) { // Check the status of a seek (if any) if (is_seeking) - #pragma omp critical (openshot_cache) + #pragma omp critical (openshot_seek) check_seek = CheckSeek(false); else check_seek = false; @@ -563,17 +577,14 @@ tr1::shared_ptr FFmpegReader::ReadStream(int requested_frame) // Check if working frames are 'finished' bool is_cache_found = false; - #pragma omp critical (openshot_cache) - { - if (!is_seeking) - CheckWorkingFrames(false); + if (!is_seeking) + CheckWorkingFrames(false); - // Check if requested 'final' frame is available - is_cache_found = final_cache.Exists(requested_frame); + // Check if requested 'final' frame is available + is_cache_found = final_cache.Exists(requested_frame); - // Increment frames processed - packets_processed++; - } + // Increment frames processed + packets_processed++; // Break once the frame is found if (is_cache_found && packets_processed >= minimum_packets) @@ -621,18 +632,23 @@ tr1::shared_ptr FFmpegReader::ReadStream(int requested_frame) // Get the next packet (if any) int FFmpegReader::GetNextPacket() { + int found_packet = 0; AVPacket *next_packet = new AVPacket(); - int found_packet = av_read_frame(pFormatCtx, next_packet); + found_packet = av_read_frame(pFormatCtx, next_packet); if (found_packet >= 0) { - // Add packet to packet cache - packets[next_packet] = next_packet; + #pragma omp critical (packet_cache) + { + // Add packet to packet cache + packets[next_packet] = next_packet; - // Update current packet pointer - packet = packets[next_packet]; + // Update current packet pointer + packet = packets[next_packet]; + } // end omp critical - }else + } + else { // Free packet, since it's unused av_free_packet(next_packet); @@ -646,10 +662,11 @@ int FFmpegReader::GetNextPacket() // Get an AVFrame (if any) bool FFmpegReader::GetAVFrame() { - // Decode video frame - int frameFinished = 0; + int frameFinished = -1; + // Decode video frame AVFrame *next_frame = avcodec_alloc_frame(); + #pragma omp critical (packet_cache) avcodec_decode_video2(pCodecCtx, next_frame, &frameFinished, packet); // is frame finished @@ -661,9 +678,12 @@ bool FFmpegReader::GetAVFrame() avpicture_alloc(copyFrame, pCodecCtx->pix_fmt, info.width, info.height); av_picture_copy(copyFrame, (AVPicture *) next_frame, pCodecCtx->pix_fmt, info.width, info.height); - // add to AVFrame cache (if frame finished) - frames[copyFrame] = copyFrame; - pFrame = frames[copyFrame]; + #pragma omp critical (packet_cache) + { + // add to AVFrame cache (if frame finished) + frames[copyFrame] = copyFrame; + pFrame = frames[copyFrame]; + } // Detect interlaced frame (only once) if (!check_interlace) @@ -672,6 +692,7 @@ bool FFmpegReader::GetAVFrame() info.interlaced_frame = next_frame->interlaced_frame; info.top_field_first = next_frame->top_field_first; } + } else { @@ -736,12 +757,9 @@ void FFmpegReader::ProcessVideoPacket(int requested_frame) // Are we close enough to decode the frame? if ((current_frame) < (requested_frame - 20)) { - #pragma omp critical (packet_cache) - { - // Remove frame and packet - RemoveAVFrame(pFrame); - RemoveAVPacket(packet); - } + // Remove frame and packet + RemoveAVFrame(pFrame); + RemoveAVPacket(packet); // Debug output AppendDebugMethod("FFmpegReader::ProcessVideoPacket (Skipped)", "requested_frame", requested_frame, "current_frame", current_frame, "", -1, "", -1, "", -1, "", -1); @@ -758,7 +776,6 @@ void FFmpegReader::ProcessVideoPacket(int requested_frame) int height = info.height; int width = info.width; long int video_length = info.video_length; - Cache *my_cache = &working_cache; AVPacket *my_packet = packets[packet]; AVPicture *my_frame = frames[pFrame]; @@ -776,7 +793,7 @@ void FFmpegReader::ProcessVideoPacket(int requested_frame) if (!seek_video_frame_found && is_seeking) seek_video_frame_found = current_frame; - #pragma omp task firstprivate(current_frame, my_cache, my_packet, my_frame, height, width, video_length, pix_fmt, img_convert_ctx) + #pragma omp task firstprivate(current_frame, my_packet, my_frame, height, width, video_length, pix_fmt, img_convert_ctx) { // Create variables for a RGB Frame (since most videos are not in RGB, we must convert it) AVFrame *pFrameRGB = NULL; @@ -801,34 +818,28 @@ void FFmpegReader::ProcessVideoPacket(int requested_frame) sws_scale(img_convert_ctx, my_frame->data, my_frame->linesize, 0, height, pFrameRGB->data, pFrameRGB->linesize); - tr1::shared_ptr f; - #pragma omp critical (openshot_cache) - // Create or get frame object - f = CreateFrame(current_frame); + // Create or get the existing frame object + tr1::shared_ptr f = CreateFrame(current_frame); // Add Image data to frame - f->AddImage(width, height, "RGBA", Magick::CharPixel, buffer); + f->AddImage(width, height, 4, QImage::Format_RGBA8888, buffer); - #pragma omp critical (openshot_cache) - // Update working cache - my_cache->Add(f->number, f); + // Update working cache + working_cache.Add(f->number, f); // Free the RGB image av_free(buffer); avcodec_free_frame(&pFrameRGB); - #pragma omp critical (packet_cache) - { - // Remove frame and packet - RemoveAVFrame(my_frame); - RemoveAVPacket(my_packet); - } + // Remove frame and packet + RemoveAVFrame(my_frame); + RemoveAVPacket(my_packet); // Remove video frame from list of processing video frames - #pragma omp critical (processing_list) { - processing_video_frames.erase(current_frame); - processed_video_frames[current_frame] = current_frame; + const GenericScopedLock lock(processingCriticalSection); + processing_video_frames.erase(current_frame); + processed_video_frames[current_frame] = current_frame; } // Debug output @@ -836,7 +847,6 @@ void FFmpegReader::ProcessVideoPacket(int requested_frame) } // end omp task - } // Process an audio packet @@ -845,7 +855,6 @@ void FFmpegReader::ProcessAudioPacket(int requested_frame, int target_frame, int // Are we close enough to decode the frame's audio? if (target_frame < (requested_frame - 20)) { - #pragma omp critical (packet_cache) // Remove packet RemoveAVPacket(packet); @@ -857,7 +866,6 @@ void FFmpegReader::ProcessAudioPacket(int requested_frame, int target_frame, int } // Init some local variables (for OpenMP) - Cache *my_cache = &working_cache; AVPacket *my_packet = packets[packet]; // Track 1st audio packet after a successful seek @@ -951,7 +959,7 @@ void FFmpegReader::ProcessAudioPacket(int requested_frame, int target_frame, int // Process the audio samples in a separate thread (this includes resampling to 16 bit integer, and storing // in a openshot::Frame object). - #pragma omp task firstprivate(requested_frame, target_frame, my_cache, starting_sample, my_packet, audio_frame) + #pragma omp task firstprivate(requested_frame, target_frame, starting_sample, my_packet, audio_frame) { // Allocate audio buffer int16_t *audio_buf = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; @@ -965,6 +973,7 @@ void FFmpegReader::ProcessAudioPacket(int requested_frame, int target_frame, int av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_frame->nb_samples, AV_SAMPLE_FMT_S16, 0); AVAudioResampleContext *avr = NULL; + int nb_samples = 0; #pragma ordered { // setup resample context @@ -978,7 +987,6 @@ void FFmpegReader::ProcessAudioPacket(int requested_frame, int target_frame, int av_opt_set_int(avr, "in_channels", info.channels, 0); av_opt_set_int(avr, "out_channels", info.channels, 0); int r = avresample_open(avr); - int nb_samples = 0; // Convert audio samples nb_samples = avresample_convert(avr, // audio resample context @@ -1056,10 +1064,8 @@ void FFmpegReader::ProcessAudioPacket(int requested_frame, int target_frame, int if (samples > remaining_samples) samples = remaining_samples; - tr1::shared_ptr f; - #pragma omp critical (openshot_cache) - // Create or get frame object - f = CreateFrame(starting_frame_number); + // Create or get the existing frame object + tr1::shared_ptr f = CreateFrame(starting_frame_number); // Determine if this frame was "partially" filled in if (samples_per_frame == start + samples) @@ -1069,15 +1075,13 @@ void FFmpegReader::ProcessAudioPacket(int requested_frame, int target_frame, int // Add samples for current channel to the frame. Reduce the volume to 98%, to prevent // some louder samples from maxing out at 1.0 (not sure why this happens) - #pragma omp critical (openshot_adding_audio) f->AddAudio(true, channel_filter, start, iterate_channel_buffer, samples, 0.98f); // Debug output AppendDebugMethod("FFmpegReader::ProcessAudioPacket (f->AddAudio)", "frame", starting_frame_number, "start", start, "samples", samples, "channel", channel_filter, "partial_frame", partial_frame, "samples_per_frame", samples_per_frame); // Add or update cache - #pragma omp critical (openshot_cache) - my_cache->Add(f->number, f); + working_cache.Add(f->number, f); // Decrement remaining samples remaining_samples -= samples; @@ -1120,7 +1124,7 @@ void FFmpegReader::ProcessAudioPacket(int requested_frame, int target_frame, int } } - #pragma omp critical (packet_cache) + // Remove this packet RemoveAVPacket(my_packet); // Debug output @@ -1152,10 +1156,13 @@ void FFmpegReader::Seek(int requested_frame) throw(TooManySeeks) working_cache.Clear(); // Clear processed lists - processing_audio_frames.clear(); - processing_video_frames.clear(); - processed_video_frames.clear(); - processed_audio_frames.clear(); + { + const GenericScopedLock lock(processingCriticalSection); + processing_audio_frames.clear(); + processing_video_frames.clear(); + processed_video_frames.clear(); + processed_audio_frames.clear(); + } // Reset the last frame variable last_frame = 0; @@ -1388,31 +1395,28 @@ AudioLocation FFmpegReader::GetAudioPTSLocation(int pts) // Create a new Frame (or return an existing one) and add it to the working queue. tr1::shared_ptr FFmpegReader::CreateFrame(int requested_frame) { + tr1::shared_ptr output; // Check working cache if (working_cache.Exists(requested_frame)) - { // Return existing frame - tr1::shared_ptr output = working_cache.GetFrame(requested_frame); - - return output; - } + output = working_cache.GetFrame(requested_frame); else { // Create a new frame on the working cache - tr1::shared_ptr f(new Frame(requested_frame, info.width, info.height, "#000000", Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate, info.channels), info.channels)); - f->SetPixelRatio(info.pixel_ratio.num, info.pixel_ratio.den); // update pixel ratio - f->ChannelsLayout(info.channel_layout); // update audio channel layout from the parent reader - f->SampleRate(info.sample_rate); // update the frame's sample rate of the parent reader + output = tr1::shared_ptr(new Frame(requested_frame, info.width, info.height, "#000000", Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate, info.channels), info.channels)); + output->SetPixelRatio(info.pixel_ratio.num, info.pixel_ratio.den); // update pixel ratio + output->ChannelsLayout(info.channel_layout); // update audio channel layout from the parent reader + output->SampleRate(info.sample_rate); // update the frame's sample rate of the parent reader - working_cache.Add(requested_frame, f); + working_cache.Add(requested_frame, output); // Set the largest processed frame (if this is larger) if (requested_frame > largest_frame_processed) largest_frame_processed = requested_frame; - - // Return new frame - return f; } + + // Return new frame + return output; } // Determine if frame is partial due to seek @@ -1433,7 +1437,6 @@ bool FFmpegReader::IsPartialFrame(int requested_frame) { // Check the working queue, and move finished frames to the finished queue void FFmpegReader::CheckWorkingFrames(bool end_of_stream) { - // Loop through all working queue frames while (true) { @@ -1444,8 +1447,14 @@ void FFmpegReader::CheckWorkingFrames(bool end_of_stream) // Get the front frame of working cache tr1::shared_ptr f(working_cache.GetSmallestFrame()); - bool is_video_ready = processed_video_frames.count(f->number); - bool is_audio_ready = processed_audio_frames.count(f->number); + bool is_video_ready = false; + bool is_audio_ready = false; + { // limit scope of next few lines + const GenericScopedLock lock(processingCriticalSection); + is_video_ready = processed_video_frames.count(f->number); + is_audio_ready = processed_audio_frames.count(f->number); + } + if (previous_packet_location.frame == f->number && !end_of_stream) is_audio_ready = false; // don't finalize the last processed audio frame bool is_seek_trash = IsPartialFrame(f->number); @@ -1455,7 +1464,7 @@ void FFmpegReader::CheckWorkingFrames(bool end_of_stream) if (!info.has_audio) is_audio_ready = true; // Debug output - AppendDebugMethod("FFmpegReader::CheckWorkingFrames", "frame_number", f->number, "is_video_ready", is_video_ready, "is_audio_ready", is_audio_ready, "processed_video_frames.count(f->number)", processed_video_frames.count(f->number), "processed_audio_frames.count(f->number)", processed_audio_frames.count(f->number), "", -1); + AppendDebugMethod("FFmpegReader::CheckWorkingFrames", "frame_number", f->number, "is_video_ready", is_video_ready, "is_audio_ready", is_audio_ready, "", -1, "", -1, "", -1); // Check if working frame is final if ((!end_of_stream && is_video_ready && is_audio_ready) || end_of_stream || is_seek_trash || working_cache.Count() >= 200) @@ -1611,35 +1620,43 @@ void FFmpegReader::CheckFPS() // Remove AVFrame from cache (and deallocate it's memory) void FFmpegReader::RemoveAVFrame(AVPicture* remove_frame) { - // Remove pFrame (if exists) - if (frames.count(remove_frame)) + #pragma omp critical (packet_cache) { - // Free memory - avpicture_free(frames[remove_frame]); + // Remove pFrame (if exists) + if (frames.count(remove_frame)) + { + // Free memory + avpicture_free(frames[remove_frame]); - // Remove from cache - frames.erase(remove_frame); + // Remove from cache + frames.erase(remove_frame); - // Delete the object - delete remove_frame; - } + // Delete the object + delete remove_frame; + } + + } // end omp critical } // Remove AVPacket from cache (and deallocate it's memory) void FFmpegReader::RemoveAVPacket(AVPacket* remove_packet) { - // Remove packet (if any) - if (packets.count(remove_packet)) + #pragma omp critical (packet_cache) { - // deallocate memory for packet - av_free_packet(remove_packet); + // Remove packet (if any) + if (packets.count(remove_packet)) + { + // deallocate memory for packet + av_free_packet(remove_packet); - // Remove from cache - packets.erase(remove_packet); + // Remove from cache + packets.erase(remove_packet); - // Delete the object - delete remove_packet; - } + // Delete the object + delete remove_packet; + } + + } // end omp critical } /// Get the smallest video frame that is still being processed diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp index d2f8fbb1..7bb742f2 100644 --- a/src/FFmpegWriter.cpp +++ b/src/FFmpegWriter.cpp @@ -466,6 +466,7 @@ void FFmpegWriter::write_queued_frames() AVFrame *av_frame = av_frames[frame]; // deallocate AVPicture and AVFrame + av_freep(&av_frame[0]); // picture buffer avcodec_free_frame(&av_frame); av_frames.erase(frame); } @@ -660,6 +661,9 @@ void FFmpegWriter::flush_encoders() if (error_code != 0) { AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1); } + + // deallocate memory for packet + av_free_packet(&pkt); } @@ -687,6 +691,12 @@ void FFmpegWriter::close_audio(AVFormatContext *oc, AVStream *st) avresample_free(&avr); avr = NULL; } + + if (avr_planar) { + avresample_close(avr_planar); + avresample_free(&avr_planar); + avr_planar = NULL; + } } // Close the writer @@ -721,8 +731,8 @@ void FFmpegWriter::Close() write_video_count = 0; write_audio_count = 0; - // Free the stream - av_free(oc); + // Free the context + av_freep(&oc); // Close writer is_open = false; @@ -1060,7 +1070,7 @@ void FFmpegWriter::write_audio_packets(bool final) int samples_position = 0; - AppendDebugMethod("FFmpegWriter::write_audio_packets", "final", final, "total_frame_samples", total_frame_samples, "remaining_frame_samples", remaining_frame_samples, "channels_in_frame", channels_in_frame, "samples_in_frame", samples_in_frame, "", -1); + AppendDebugMethod("FFmpegWriter::write_audio_packets", "final", final, "total_frame_samples", total_frame_samples, "channel_layout_in_frame", channel_layout_in_frame, "channels_in_frame", channels_in_frame, "samples_in_frame", samples_in_frame, "LAYOUT_MONO", LAYOUT_MONO); // Keep track of the original sample format AVSampleFormat output_sample_fmt = audio_codec->sample_fmt; @@ -1134,7 +1144,7 @@ void FFmpegWriter::write_audio_packets(bool final) // Convert audio samples nb_samples = avresample_convert(avr, // audio resample context - audio_converted->data, // output data pointers + audio_converted->data, // output data pointers audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown) audio_converted->nb_samples, // maximum number of samples that the output buffer can hold audio_frame->data, // input data pointers @@ -1149,9 +1159,11 @@ void FFmpegWriter::write_audio_packets(bool final) // Update remaining samples (since we just resampled the audio, and things might have changed) remaining_frame_samples = nb_samples * (float(av_get_bytes_per_sample(output_sample_fmt)) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)) * info.channels; + //remaining_frame_samples = nb_samples * info.channels; // Remove converted audio av_freep(&audio_frame[0]); // this deletes the all_queued_samples array + all_queued_samples = NULL; // this array cleared with above call avcodec_free_frame(&audio_frame); av_freep(&audio_converted[0]); avcodec_free_frame(&audio_converted); @@ -1332,21 +1344,24 @@ void FFmpegWriter::write_audio_packets(bool final) final = false; } - // Delete arrays + // Delete arrays (if needed) if (all_resampled_samples) { delete[] all_resampled_samples; all_resampled_samples = NULL; } + if (all_queued_samples) { + delete[] all_queued_samples; + all_queued_samples = NULL; + } } // end task } // Allocate an AVFrame object -AVFrame* FFmpegWriter::allocate_avframe(PixelFormat pix_fmt, int width, int height, int *buffer_size) +AVFrame* FFmpegWriter::allocate_avframe(PixelFormat pix_fmt, int width, int height, int *buffer_size, uint8_t *new_buffer) { // Create an RGB AVFrame AVFrame *new_av_frame = NULL; - uint8_t *new_buffer = NULL; // Allocate an AVFrame structure new_av_frame = avcodec_alloc_frame(); @@ -1355,10 +1370,15 @@ AVFrame* FFmpegWriter::allocate_avframe(PixelFormat pix_fmt, int width, int heig // Determine required buffer size and allocate buffer *buffer_size = avpicture_get_size(pix_fmt, width, height); - new_buffer = new uint8_t[*buffer_size]; - // Attach buffer to AVFrame - avpicture_fill((AVPicture *)new_av_frame, new_buffer, pix_fmt, width, height); + // Create buffer (if not provided) + if (!new_buffer) + { + // New Buffer + new_buffer = new uint8_t[*buffer_size]; + // Attach buffer to AVFrame + avpicture_fill((AVPicture *)new_av_frame, new_buffer, pix_fmt, width, height); + } // return AVFrame return new_av_frame; @@ -1391,37 +1411,18 @@ void FFmpegWriter::process_video_packet(tr1::shared_ptr frame) int bytes_source = 0; int bytes_final = 0; AVFrame *frame_source = NULL; - const Magick::PixelPacket *pixel_packets = NULL; + const uchar *pixels = NULL; // Get a list of pixels from source image - pixel_packets = frame->GetPixels(); + pixels = frame->GetPixels(); // Init AVFrame for source image & final (converted image) - frame_source = allocate_avframe(PIX_FMT_RGB24, source_image_width, source_image_height, &bytes_source); - AVFrame *frame_final = allocate_avframe(video_codec->pix_fmt, info.width, info.height, &bytes_final); + frame_source = allocate_avframe(PIX_FMT_RGBA, source_image_width, source_image_height, &bytes_source, (uint8_t*) pixels); + AVFrame *frame_final = allocate_avframe(video_codec->pix_fmt, info.width, info.height, &bytes_final, NULL); - // Determine how many colors we are copying (3 or 4) - int step = 3; // rgb - if ( video_st->codec->pix_fmt == PIX_FMT_RGBA || video_st->codec->pix_fmt == PIX_FMT_ARGB || video_st->codec->pix_fmt == PIX_FMT_BGRA ) - step = 4; // rgba - - // Fill the AVFrame with RGB image data - int source_total_pixels = source_image_width * source_image_height; - for (int packet = 0, row = 0; packet < source_total_pixels; packet++, row+=step) - { - // Update buffer (which is already linked to the AVFrame: pFrameRGB) - // Each color needs to be scaled to 8 bit (using the ImageMagick built-in ScaleQuantumToChar function) - frame_source->data[0][row] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixel_packets[packet].red); - frame_source->data[0][row+1] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixel_packets[packet].green); - frame_source->data[0][row+2] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixel_packets[packet].blue); - - // Copy alpha channel (if needed) - if (step == 4) - frame_source->data[0][row+3] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixel_packets[packet].opacity); - } - - - AppendDebugMethod("FFmpegWriter::process_video_packet", "frame->number", frame->number, "bytes_source", bytes_source, "bytes_final", bytes_final, "step", step, "", -1, "", -1); + // Fill with data + avpicture_fill((AVPicture *) frame_source, (uint8_t*)pixels, PIX_FMT_RGBA, source_image_width, source_image_height); + AppendDebugMethod("FFmpegWriter::process_video_packet", "frame->number", frame->number, "bytes_source", bytes_source, "bytes_final", bytes_final, "", -1, "", -1, "", -1); // Resize & convert pixel format sws_scale(scaler, frame_source->data, frame_source->linesize, 0, @@ -1566,7 +1567,7 @@ void FFmpegWriter::InitScalers(int source_width, int source_height) for (int x = 0; x < num_of_rescalers; x++) { // Init the software scaler from FFMpeg - img_convert_ctx = sws_getContext(source_width, source_height, PIX_FMT_RGB24, info.width, info.height, c->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL); + img_convert_ctx = sws_getContext(source_width, source_height, PIX_FMT_RGBA, info.width, info.height, c->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL); // Add rescaler to vector image_rescalers.push_back(img_convert_ctx); diff --git a/src/Frame.cpp b/src/Frame.cpp index 4ceb4340..f4489a0a 100644 --- a/src/Frame.cpp +++ b/src/Frame.cpp @@ -35,7 +35,6 @@ Frame::Frame() : number(1), pixel_ratio(1,1), channels(2), width(1), height(1), channel_layout(LAYOUT_STEREO), sample_rate(44100), qbuffer(NULL) { // Init the image magic and audio buffer - image = tr1::shared_ptr(new Magick::Image(Magick::Geometry(1,1), Magick::Color("red"))); audio = tr1::shared_ptr(new juce::AudioSampleBuffer(channels, 0)); // initialize the audio samples to zero (silence) @@ -48,7 +47,6 @@ Frame::Frame(int number, int width, int height, string color) channel_layout(LAYOUT_STEREO), sample_rate(44100), qbuffer(NULL) { // Init the image magic and audio buffer - image = tr1::shared_ptr(new Magick::Image(Magick::Geometry(1, 1), Magick::Color(color))); audio = tr1::shared_ptr(new juce::AudioSampleBuffer(channels, 0)); // initialize the audio samples to zero (silence) @@ -61,7 +59,6 @@ Frame::Frame(int number, int width, int height, const string map, const Magick:: channel_layout(LAYOUT_STEREO), sample_rate(44100), qbuffer(NULL) { // Init the image magic and audio buffer - image = tr1::shared_ptr(new Magick::Image(width, height, map, type, pixels)); audio = tr1::shared_ptr(new juce::AudioSampleBuffer(channels, 0)); // initialize the audio samples to zero (silence) @@ -74,7 +71,6 @@ Frame::Frame(int number, int samples, int channels) : channel_layout(LAYOUT_STEREO), sample_rate(44100), qbuffer(NULL) { // Init the image magic and audio buffer - image = tr1::shared_ptr(new Magick::Image(Magick::Geometry(1, 1), Magick::Color("white"))); audio = tr1::shared_ptr(new juce::AudioSampleBuffer(channels, samples)); // initialize the audio samples to zero (silence) @@ -87,7 +83,6 @@ Frame::Frame(int number, int width, int height, string color, int samples, int c channel_layout(LAYOUT_STEREO), sample_rate(44100), qbuffer(NULL) { // Init the image magic and audio buffer - image = tr1::shared_ptr(new Magick::Image(Magick::Geometry(1, 1), Magick::Color(color))); audio = tr1::shared_ptr(new juce::AudioSampleBuffer(channels, samples)); // initialize the audio samples to zero (silence) @@ -106,14 +101,14 @@ Frame::Frame ( const Frame &other ) void Frame::DeepCopy(const Frame& other) { number = other.number; - image = tr1::shared_ptr(new Magick::Image(*(other.image))); + image = tr1::shared_ptr(new QImage(*(other.image))); audio = tr1::shared_ptr(new juce::AudioSampleBuffer(*(other.audio))); pixel_ratio = Fraction(other.pixel_ratio.num, other.pixel_ratio.den); channels = other.channels; channel_layout = other.channel_layout; if (other.wave_image) - wave_image = tr1::shared_ptr(new Magick::Image(*(other.wave_image))); + wave_image = tr1::shared_ptr(new QImage(*(other.wave_image))); } // Descructor @@ -121,64 +116,30 @@ Frame::~Frame() { // Clear all pointers image.reset(); audio.reset(); - audio.reset(); - qimage.reset(); - - if (qbuffer) - { - delete qbuffer; - qbuffer = NULL; - } } // Display the frame image to the screen (primarily used for debugging reasons) void Frame::Display() { - // Make a copy of the image (since we might resize it) - Magick::Image copy; - copy = *image; + // Create new image object, and fill with pixel data + tr1::shared_ptr magick_image = GetMagickImage(); - // display the image (if any) - if (copy.size().width() > 1 && copy.size().height() > 1) - { - // Resize image (if needed) - if (pixel_ratio.num != 1 || pixel_ratio.den != 1) - { - // Calculate correct DAR (display aspect ratio) - int new_width = copy.size().width(); - int new_height = copy.size().height() * pixel_ratio.Reciprocal().ToDouble(); - - // Resize image - Magick::Geometry new_size(new_width, new_height); - new_size.aspect(true); - copy.resize(new_size); - } - - // Disply image - copy.display(); - } + // Disply image + magick_image->display(); } // Get an audio waveform image -tr1::shared_ptr Frame::GetWaveform(int width, int height, int Red, int Green, int Blue) +tr1::shared_ptr Frame::GetWaveform(int width, int height, int Red, int Green, int Blue, int Alpha) { // Clear any existing waveform image ClearWaveform(); // Init a list of lines - list lines; - lines.push_back(Magick::DrawableFillColor(Magick::Color((Magick::Quantum)Red, (Magick::Quantum)Green, (Magick::Quantum)Blue))); - lines.push_back(Magick::DrawablePointSize(16)); + QVector lines; + QVector labels; - // Calculate 1/2 the width of an image based on the # of samples + // Calculate width of an image based on the # of samples int total_samples = audio->getNumSamples(); - - // Determine how many samples can be skipped (to speed things up) - int step = 1; - if (total_samples > width) - // Set the # of samples to move forward for each pixel we draw - step = round((float) total_samples / (float) width) + 1; - if (total_samples > 0) { // If samples are present... @@ -193,70 +154,69 @@ tr1::shared_ptr Frame::GetWaveform(int width, int height, int Red { int X = 0; - // Change stroke and color - lines.push_back(Magick::DrawableStrokeColor(Magick::Color((Magick::Quantum)Red, (Magick::Quantum)Green, (Magick::Quantum)Blue))); - lines.push_back(Magick::DrawableStrokeWidth(1)); - // Get audio for this channel const float *samples = audio->getReadPointer(channel); - for (int sample = 0; sample < audio->getNumSamples(); sample+=step, X++) + for (int sample = 0; sample < audio->getNumSamples(); sample++, X++) { // Sample value (scaled to -100 to 100) float value = samples[sample] * 100; // Append a line segment for each sample - if (value != 0.0) + if (value != 0.0) { // LINE - lines.push_back(Magick::DrawableLine(X,Y, X,Y-value)); // sample=X coordinate, Y=100 is the middle - else + lines.push_back(QPointF(X,Y)); + lines.push_back(QPointF(X,Y-value)); + } + else { // DOT - lines.push_back(Magick::DrawablePoint(X,Y)); + lines.push_back(QPointF(X,Y)); + lines.push_back(QPointF(X,Y)); + } } - // Add Channel Label -// stringstream label; -// label << "Channel " << channel; -// lines.push_back(Magick::DrawableStrokeColor("#ffffff")); -// lines.push_back(Magick::DrawableFillColor("#ffffff")); -// lines.push_back(Magick::DrawableStrokeWidth(0.1)); -// lines.push_back(Magick::DrawableText(5, Y - 5, label.str())); + // Add Channel Label Coordinate + labels.push_back(QPointF(5, Y - 5)); // Increment Y Y += (200 + height_padding); total_width = X; } - // Create image - wave_image = tr1::shared_ptr(new Magick::Image(Magick::Geometry(total_width, total_height), Magick::Color("none"))); - wave_image->backgroundColor(Magick::Color("none")); + // Create blank image + wave_image = tr1::shared_ptr(new QImage(total_width, total_height, QImage::Format_RGBA8888)); + wave_image->fill(QColor(QString::fromStdString("#000000"))); + + // Load QPainter with wave_image device + QPainter painter(wave_image.get()); + + // Set pen color + painter.setPen(QColor(Red, Green, Blue, Alpha)); // Draw the waveform - wave_image->draw(lines); + painter.drawLines(lines); + painter.end(); + + // Loop through the channels labels (and draw the text) + // TODO: Configure Fonts in Qt5 correctly, so the drawText method does not crash +// painter.setFont(QFont(QString("Arial"), 16, 1, false)); +// for (int channel = 0; channel < labels.size(); channel++) { +// stringstream label; +// label << "Channel " << channel; +// painter.drawText(labels.at(channel), QString::fromStdString(label.str())); +// } // Resize Image (if requested) - if (width != total_width || height != total_height) - { - Magick::Geometry new_size(width, height); - new_size.aspect(true); - wave_image->scale(new_size); + if (width != total_width || height != total_height) { + QImage scaled_wave_image = wave_image->scaled(width, height, Qt::IgnoreAspectRatio, Qt::FastTransformation); + wave_image = tr1::shared_ptr(new QImage(scaled_wave_image)); } - } else { // No audio samples present - wave_image = tr1::shared_ptr(new Magick::Image(Magick::Geometry(width, height), Magick::Color("none"))); - wave_image->backgroundColor(Magick::Color("none")); - - // Add Channel Label - lines.push_back(Magick::DrawableStrokeColor("#ffffff")); - lines.push_back(Magick::DrawableFillColor("#ffffff")); - lines.push_back(Magick::DrawableStrokeWidth(0.1)); - lines.push_back(Magick::DrawableText((width / 2) - 100, height / 2, "No Audio Samples Found")); - - // Draw the waveform - wave_image->draw(lines); + wave_image = tr1::shared_ptr(new QImage(width, height, QImage::Format_RGBA8888)); + wave_image->fill(QColor(QString::fromStdString("#000000"))); } // Return new image @@ -271,23 +231,46 @@ void Frame::ClearWaveform() } // Get an audio waveform image pixels -const Magick::PixelPacket* Frame::GetWaveformPixels(int width, int height, int Red, int Green, int Blue) +const unsigned char* Frame::GetWaveformPixels(int width, int height, int Red, int Green, int Blue, int Alpha) { // Get audio wave form image - wave_image = GetWaveform(width, height, Red, Green, Blue); + wave_image = GetWaveform(width, height, Red, Green, Blue, Alpha); // Return array of pixel packets - return wave_image->getConstPixels(0,0, wave_image->columns(), wave_image->rows()); + return wave_image->bits(); } // Display the wave form void Frame::DisplayWaveform() { // Get audio wave form image - GetWaveform(720, 480, 0, 28672, 65280); + GetWaveform(720, 480, 0, 123, 255, 255); - // Display Image - wave_image->display(); + QRgb const *tmpBits = (const QRgb*)wave_image->bits(); + + // Create new image object, and fill with pixel data + tr1::shared_ptr magick_image = tr1::shared_ptr(new Magick::Image(wave_image->width(), wave_image->height(),"RGBA", Magick::CharPixel, tmpBits)); + + // Give image a transparent background color + magick_image->backgroundColor(Magick::Color("none")); + magick_image->virtualPixelMethod(Magick::TransparentVirtualPixelMethod); + magick_image->matte(true); + + // Resize image (if needed) + if (pixel_ratio.num != 1 || pixel_ratio.den != 1) + { + // Calculate correct DAR (display aspect ratio) + int new_width = magick_image->size().width(); + int new_height = magick_image->size().height() * pixel_ratio.Reciprocal().ToDouble(); + + // Resize image + Magick::Geometry new_size(new_width, new_height); + new_size.aspect(true); + magick_image->resize(new_size); + } + + // Disply image + magick_image->display(); // Deallocate waveform image ClearWaveform(); @@ -355,7 +338,7 @@ float* Frame::GetInterleavedAudioSamples(int new_sample_rate, AudioResampler* re int num_of_samples = audio->getNumSamples(); // Resample to new sample rate (if needed) - if (new_sample_rate != sample_rate) + if (new_sample_rate != sample_rate && resampler) { // YES, RESAMPLE AUDIO resampler->SetBuffer(audio.get(), sample_rate, new_sample_rate); @@ -422,17 +405,17 @@ int64 Frame::GetBytes() } // Get pixel data (as packets) -const Magick::PixelPacket* Frame::GetPixels() +const unsigned char* Frame::GetPixels() { // Return array of pixel packets - return image->getConstPixels(0,0, image->columns(), image->rows()); + return image->bits(); } // Get pixel data (for only a single scan-line) -const Magick::PixelPacket* Frame::GetPixels(int row) +const unsigned char* Frame::GetPixels(int row) { // Return array of pixel packets - return image->getConstPixels(0,row, image->columns(), 1); + return image->scanLine(row); } // Set Pixel Aspect Ratio @@ -503,50 +486,27 @@ ChannelLayout Frame::ChannelsLayout() return channel_layout; } -// Make colors in a specific range transparent -void Frame::TransparentColors(string color, double fuzz) -{ - // Get the max quantum size (i.e. 255, 65535, etc...) - using namespace Magick; - Magick::Quantum max_range = QuantumRange; - - // Make this range of colors transparent - image->colorFuzz(fuzz * max_range / 100.0); - image->transparent(Magick::Color(color)); - //image->colorFuzz(0); - image->negate(); - //image->flip(); - //image->flop(); - //image->solarize(50.0); - - -} // Save the frame image to the specified path. The image format is determined from the extension (i.e. image.PNG, image.JPEG) void Frame::Save(string path, float scale) { - // Make a copy of the image (since we might resize it) - Magick::Image copy; - copy = *image; - - // Maintain alpha channel - copy.backgroundColor(Magick::Color("none")); - copy.matte(true); + // Create new image object, and fill with pixel data + tr1::shared_ptr copy = GetMagickImage(); // display the image (if any) - if (copy.size().width() > 1 && copy.size().height() > 1) + if (copy->size().width() > 1 && copy->size().height() > 1) { // Resize image (if needed) if (pixel_ratio.num != 1 || pixel_ratio.den != 1) { // Calculate correct DAR (display aspect ratio) - int new_width = copy.size().width(); - int new_height = copy.size().height() * pixel_ratio.Reciprocal().ToDouble(); + int new_width = copy->size().width(); + int new_height = copy->size().height() * pixel_ratio.Reciprocal().ToDouble(); // Resize image Magick::Geometry new_size(new_width, new_height); new_size.aspect(true); - copy.resize(new_size); + copy->resize(new_size); } } @@ -554,23 +514,23 @@ void Frame::Save(string path, float scale) if (abs(scale) > 1.001 || abs(scale) < 0.999) { // Resize image - Magick::Geometry new_size(copy.size().width() * scale, copy.size().height() * scale); + Magick::Geometry new_size(copy->size().width() * scale, copy->size().height() * scale); new_size.aspect(true); - copy.resize(new_size); + copy->resize(new_size); } // save the image - copy.write(path); + copy->write(path); } // Thumbnail the frame image to the specified path. The image format is determined from the extension (i.e. image.PNG, image.JPEG) void Frame::Thumbnail(string path, int new_width, int new_height, string mask_path, string overlay_path, string background_color, bool ignore_aspect) throw(InvalidFile) { - // Make a copy of the image (since we might resize it) - tr1::shared_ptr copy = tr1::shared_ptr(new Magick::Image(*image)); + // Create new image object, and fill with pixel data + tr1::shared_ptr copy = GetMagickImage(); copy->virtualPixelMethod(Magick::TransparentVirtualPixelMethod); - copy->matte(true); + // Set background color if (background_color != "") @@ -709,116 +669,91 @@ void Frame::Thumbnail(string path, int new_width, int new_height, string mask_pa void Frame::AddColor(int width, int height, string color) { // Create new image object, and fill with pixel data - image = tr1::shared_ptr(new Magick::Image(Magick::Geometry(width, height), Magick::Color(color))); + image = tr1::shared_ptr(new QImage(width, height, QImage::Format_RGBA8888)); - // Give image a transparent background color - image->backgroundColor(Magick::Color("#000000ff")); - image->virtualPixelMethod(Magick::TransparentVirtualPixelMethod); - image->matte(true); + // Fill with solid color + image->fill(QColor(QString::fromStdString(color))); // Update height and width - width = image->columns(); - height = image->rows(); + width = image->width(); + height = image->height(); } // Add (or replace) pixel data to the frame -void Frame::AddImage(int width, int height, const string map, const Magick::StorageType type, const void *pixels) +void Frame::AddImage(int width, int height, int bytes_per_pixel, QImage::Format type, const unsigned char *pixels_) { + // Create new buffer + int buffer_size = width * height * bytes_per_pixel; + qbuffer = new unsigned char[buffer_size](); + + // Copy buffer data + memcpy((unsigned char*)qbuffer, pixels_, buffer_size); + // Create new image object, and fill with pixel data - image = tr1::shared_ptr(new Magick::Image(width, height, map, type, pixels)); + image = tr1::shared_ptr(new QImage(qbuffer, width, height, width * bytes_per_pixel, type, (QImageCleanupFunction) &openshot::Frame::cleanUpBuffer, (void*) qbuffer)); - // Give image a transparent background color - image->backgroundColor(Magick::Color("none")); - image->virtualPixelMethod(Magick::TransparentVirtualPixelMethod); - image->matte(true); + // Always convert to RGBA8888 (if different) + if (image->format() != QImage::Format_RGBA8888) + image->convertToFormat(QImage::Format_RGBA8888); // Update height and width - width = image->columns(); - height = image->rows(); + width = image->width(); + height = image->height(); } // Add (or replace) pixel data to the frame -void Frame::AddImage(tr1::shared_ptr new_image) +void Frame::AddImage(tr1::shared_ptr new_image) { + // Ignore blank images + if (!new_image) + return; + // assign image data - image = tr1::shared_ptr(new Magick::Image(*new_image.get())); - image->virtualPixelMethod(Magick::TransparentVirtualPixelMethod); - image->matte(true); + image = new_image; + + // Always convert to RGBA8888 (if different) + if (image->format() != QImage::Format_RGBA8888) + image->convertToFormat(QImage::Format_RGBA8888); // Update height and width - width = image->columns(); - height = image->rows(); + width = image->width(); + height = image->height(); } // Add (or replace) pixel data to the frame (for only the odd or even lines) -void Frame::AddImage(tr1::shared_ptr new_image, bool only_odd_lines) +void Frame::AddImage(tr1::shared_ptr new_image, bool only_odd_lines) { - // Replace image (if needed) - if (image->columns() == 1) { - image = tr1::shared_ptr(new Magick::Image(*new_image.get())); - image->virtualPixelMethod(Magick::TransparentVirtualPixelMethod); - image->matte(true); - } + // Ignore blank new_image + if (!new_image) + return; - // Loop through each odd or even line, and copy it to the image - int starting_row = 0; - if (!only_odd_lines) - // even rows - starting_row = 1; + // Check for blank source image + if (!image) + // Replace the blank source image + AddImage(new_image); - // Replace some rows of pixels - for (int row = starting_row; row < new_image->rows(); row += 2) - { - // Get row of pixels from original image - Magick::PixelPacket* row_pixels = image->getPixels(0, row, image->columns(), 1); - const Magick::PixelPacket* new_row_pixels = new_image->getConstPixels(0, row, image->columns(), 1); + // Ignore image of different sizes or formats + if (image == new_image || image->size() != image->size() || image->format() != image->format()) + return; - // Copy pixels - for (int col = 0; col < image->columns(); col++) - row_pixels[col] = new_row_pixels[col]; + // Get the frame's image + const unsigned char* pixels = image->bits(); + const unsigned char* new_pixels = new_image->bits(); - // Replace them with updated pixels - image->syncPixels(); + // Loop through the scanlines of the image (even or odd) + int start = 0; + if (only_odd_lines) + start = 1; + for (int row = start; row < image->height(); row += 2) { + memcpy((unsigned char*)pixels, new_pixels + (row * image->bytesPerLine()), image->bytesPerLine()); + new_pixels += image->bytesPerLine(); } // Update height and width - width = image->columns(); - height = image->rows(); + width = image->width(); + height = image->height(); } -// Composite a new image on top of the existing image -void Frame::AddImage(tr1::shared_ptr new_image, float alpha) -{ - // Get the max quantum size (i.e. 255, 65535, etc...) - using namespace Magick; - Magick::Quantum max_range = QuantumRange; - - // Replace image (if needed) - if (image->columns() == 1) { - image = tr1::shared_ptr(new Magick::Image(*new_image.get())); - image->virtualPixelMethod(Magick::TransparentVirtualPixelMethod); - image->matte(true); - } - else - { - // Calculate opacity of new image - int new_opacity = max_range * (1.0 - alpha); - if (new_opacity < 0) - new_opacity = 0; // completely invisible - else if (new_opacity > max_range) - new_opacity = max_range; - - // Set opacity - new_image->opacity(new_opacity); - - // Composite image on top of current image - image->composite(*new_image.get(), 0, 0, Magick::DissolveCompositeOp); - } - - // Update height and width - width = image->columns(); - height = image->rows(); -} // Resize audio container to hold more (or less) samples and channels void Frame::ResizeAudio(int channels, int length, int rate, ChannelLayout layout) @@ -855,88 +790,69 @@ void Frame::ApplyGainRamp(int destChannel, int destStartSample, int numSamples, audio->applyGainRamp(destChannel, destStartSample, numSamples, initial_gain, final_gain); } -// Experimental method to add effects to this frame -void Frame::AddEffect(string name) -{ - if (name == "negate") - image->negate(false); - else if (name == "flip") - image->flip(); - else if (name == "oilPaint") - image->oilPaint(3.0); - else if (name == "swirl") - image->swirl(30.0); -} - -// Rotate the image -void Frame::Rotate(float degrees) -{ - image->rotate(degrees); -} - -// Experimental method to add overlay images to this frame -void Frame::AddOverlay(Frame* frame) -{ - // Get overlay image (if any) - tr1::shared_ptr overlay = frame->GetImage(); - - // Composite image onto this image - image->composite(*overlay, Magick::SouthEastGravity, Magick::OverCompositeOp); -} - -// Experimental method to add the frame number on top of the image -void Frame::AddOverlayNumber(int overlay_number) -{ - stringstream label; - if (overlay_number > 0) - label << overlay_number; - else - label << number; - - // Drawable text - list lines; - - lines.push_back(Magick::DrawableGravity(Magick::NorthWestGravity)); - lines.push_back(Magick::DrawableStrokeColor("#ffffff")); - lines.push_back(Magick::DrawableFillColor("#ffffff")); - lines.push_back(Magick::DrawableStrokeWidth(0.1)); - lines.push_back(Magick::DrawablePointSize(24)); - lines.push_back(Magick::DrawableText(5, 5, label.str())); - - image->draw(lines); -} - // Get pointer to Magick++ image object -tr1::shared_ptr Frame::GetImage() +tr1::shared_ptr Frame::GetImage() { + // Check for blank image + if (!image) + // Fill with black + AddColor(width, height, "#000000"); + return image; } -// Get pointer to QImage of frame -tr1::shared_ptr Frame::GetQImage() +// Get pointer to ImageMagick image object +tr1::shared_ptr Frame::GetMagickImage() { - const int BPP = 3; - const std::size_t bufferSize = width * height * BPP; + // Check for blank image + if (!image) + // Fill with black + AddColor(width, height, "#000000"); + + // Get the pixels from the frame image + QRgb const *tmpBits = (const QRgb*)image->bits(); + + // Create new image object, and fill with pixel data + tr1::shared_ptr magick_image = tr1::shared_ptr(new Magick::Image(image->width(), image->height(),"RGBA", Magick::CharPixel, tmpBits)); + + // Give image a transparent background color + magick_image->backgroundColor(Magick::Color("none")); + magick_image->virtualPixelMethod(Magick::TransparentVirtualPixelMethod); + magick_image->matte(true); + + return magick_image; +} + +// Get pointer to QImage of frame +void Frame::AddMagickImage(tr1::shared_ptr new_image) +{ + const int BPP = 4; + const std::size_t bufferSize = new_image->columns() * new_image->rows() * BPP; /// Use realloc for fast memory allocation. /// TODO: consider locking the buffer for mt safety //qbuffer = reinterpret_cast(realloc(qbuffer, bufferSize)); qbuffer = new unsigned char[bufferSize](); + unsigned char *buffer = (unsigned char*)qbuffer; // Iterate through the pixel packets, and load our own buffer // Each color needs to be scaled to 8 bit (using the ImageMagick built-in ScaleQuantumToChar function) - const Magick::PixelPacket *pixels = GetPixels(); - for (int n = 0, i = 0; n < width * height; n += 1, i += 3) { - qbuffer[i+0] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixels[n].red); - qbuffer[i+1] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixels[n].green); - qbuffer[i+2] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixels[n].blue); + int numcopied = 0; + Magick::PixelPacket *pixels = new_image->getPixels(0,0, new_image->columns(), new_image->rows()); + for (int n = 0, i = 0; n < new_image->columns() * new_image->rows(); n += 1, i += 4) { + buffer[i+0] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixels[n].red); + buffer[i+1] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixels[n].green); + buffer[i+2] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixels[n].blue); + buffer[i+3] = 255 - MagickCore::ScaleQuantumToChar((Magick::Quantum) pixels[n].opacity); + numcopied+=4; } // Create QImage of frame data - qimage = tr1::shared_ptr(new QImage(qbuffer, width, height, width * BPP, QImage::Format_RGB888)); + image = tr1::shared_ptr(new QImage(qbuffer, width, height, width * BPP, QImage::Format_RGBA8888, (QImageCleanupFunction) &cleanUpBuffer, (void*) qbuffer)); - // Return QImage - return qimage; + // Update height and width + width = image->width(); + height = image->height(); } // Play audio samples for this frame @@ -1004,3 +920,16 @@ void Frame::Play() } + + +// Clean up buffer after QImage is deleted +void Frame::cleanUpBuffer(void *info) +{ + if (info) + { + // Remove buffer since QImage tells us to + unsigned char* ptr_to_qbuffer = (unsigned char*) info; + delete ptr_to_qbuffer; + ptr_to_qbuffer = NULL; + } +} diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp index b50fc410..576f8b2a 100644 --- a/src/FrameMapper.cpp +++ b/src/FrameMapper.cpp @@ -301,16 +301,22 @@ MappedFrame FrameMapper::GetMappedFrame(int TargetFrameNumber) throw(OutOfBounds // Get an openshot::Frame object for a specific frame number of this reader. tr1::shared_ptr FrameMapper::GetFrame(int requested_frame) throw(ReaderClosed) { + // Check final cache, and just return the frame (if it's available) + if (final_cache.Exists(requested_frame)) + return final_cache.GetFrame(requested_frame); + + // Create a scoped lock, allowing only a single thread to run the following code at one time + const GenericScopedLock lock(getFrameCriticalSection); + // Check if mappings are dirty (and need to be recalculated) if (is_dirty) // Recalculate mappings Init(); - // Check final cache, and just return the frame (if it's available) + // Check final cache a 2nd time (due to potential lock already generating this frame) if (final_cache.Exists(requested_frame)) return final_cache.GetFrame(requested_frame); - // Minimum number of frames to process (for performance reasons) int minimum_frames = OPEN_MP_NUM_PROCESSORS; @@ -330,7 +336,7 @@ tr1::shared_ptr FrameMapper::GetFrame(int requested_frame) throw(ReaderCl { // Get the mapped frame MappedFrame mapped = GetMappedFrame(frame_number); - tr1::shared_ptr mapped_frame = reader->GetFrameSafe(mapped.Odd.Frame); + tr1::shared_ptr mapped_frame = reader->GetFrame(mapped.Odd.Frame); int channels_in_frame = mapped_frame->GetAudioChannelsCount(); // Init some basic properties about this frame @@ -339,17 +345,20 @@ tr1::shared_ptr FrameMapper::GetFrame(int requested_frame) throw(ReaderCl // Create a new frame tr1::shared_ptr frame(new Frame(frame_number, 1, 1, "#000000", samples_in_frame, channels_in_frame)); frame->SampleRate(mapped_frame->SampleRate()); + frame->ChannelsLayout(mapped_frame->ChannelsLayout()); + // Copy the image from the odd field - frame->AddImage(reader->GetFrameSafe(mapped.Odd.Frame)->GetImage(), true); + frame->AddImage(tr1::shared_ptr(new QImage(*reader->GetFrame(mapped.Odd.Frame)->GetImage())), true); if (mapped.Odd.Frame != mapped.Even.Frame) // Add even lines (if different than the previous image) - frame->AddImage(reader->GetFrameSafe(mapped.Even.Frame)->GetImage(), false); + frame->AddImage(tr1::shared_ptr(new QImage(*reader->GetFrame(mapped.Even.Frame)->GetImage())), false); + // Copy the samples int samples_copied = 0; int starting_frame = mapped.Samples.frame_start; - while (samples_copied < mapped.Samples.total) + while (info.has_audio && samples_copied < mapped.Samples.total) { // Init number of samples to copy this iteration int remaining_samples = mapped.Samples.total - samples_copied; @@ -359,7 +368,7 @@ tr1::shared_ptr FrameMapper::GetFrame(int requested_frame) throw(ReaderCl for (int channel = 0; channel < channels_in_frame; channel++) { // number of original samples on this frame - tr1::shared_ptr original_frame = reader->GetFrameSafe(starting_frame); + tr1::shared_ptr original_frame = reader->GetFrame(starting_frame); int original_samples = original_frame->GetAudioSamplesCount(); if (starting_frame == mapped.Samples.frame_start) @@ -402,15 +411,16 @@ tr1::shared_ptr FrameMapper::GetFrame(int requested_frame) throw(ReaderCl starting_frame++; } - // Resample audio on frame (if needed) - if (info.sample_rate != frame->SampleRate() || info.channels != frame->GetAudioChannelsCount() || - info.channel_layout != frame->ChannelsLayout()) + if (info.has_audio && + ( info.sample_rate != frame->SampleRate() || + info.channels != frame->GetAudioChannelsCount() || + info.channel_layout != frame->ChannelsLayout())) // Resample audio and correct # of channels if needed ResampleMappedAudio(frame, mapped.Odd.Frame); + // Add frame to final cache - #pragma omp critical (openshot_cache) final_cache.Add(frame->number, frame); } // for loop @@ -622,7 +632,7 @@ void FrameMapper::ResampleMappedAudio(tr1::shared_ptr frame, int original } // Update total samples & input frame size (due to bigger or smaller data types) - total_frame_samples = Frame::GetSamplesPerFrame(original_frame_number, target, info.sample_rate, info.channels); + total_frame_samples = Frame::GetSamplesPerFrame(frame->number, target, info.sample_rate, info.channels) + FF_INPUT_BUFFER_PADDING_SIZE; AppendDebugMethod("FrameMapper::ResampleMappedAudio (adjust # of samples)", "total_frame_samples", total_frame_samples, "info.sample_rate", info.sample_rate, "sample_rate_in_frame", sample_rate_in_frame, "info.channels", info.channels, "channels_in_frame", channels_in_frame, "original_frame_number", original_frame_number); @@ -664,10 +674,10 @@ void FrameMapper::ResampleMappedAudio(tr1::shared_ptr frame, int original } // Create a new array (to hold all resampled S16 audio samples) - int16_t* resampled_samples = new int16_t[nb_samples * info.channels]; + int16_t* resampled_samples = new int16_t[(nb_samples * info.channels) + FF_INPUT_BUFFER_PADDING_SIZE]; // Copy audio samples over original samples - memcpy(resampled_samples, audio_converted->data[0], nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * info.channels); + memcpy(resampled_samples, audio_converted->data[0], (nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * info.channels)); // Free frames av_freep(&audio_frame[0]); @@ -725,6 +735,10 @@ void FrameMapper::ResampleMappedAudio(tr1::shared_ptr frame, int original AppendDebugMethod("FrameMapper::ResampleMappedAudio (Add audio to channel)", "number of samples", position, "channel_filter", channel_filter, "", -1, "", -1, "", -1, "", -1); } + // Update frame's audio meta data + frame->SampleRate(info.sample_rate); + frame->ChannelsLayout(info.channel_layout); + // clear channel buffer delete[] channel_buffer; channel_buffer = NULL; diff --git a/src/ImageReader.cpp b/src/ImageReader.cpp index 3a9cffcb..fa0bc2a6 100644 --- a/src/ImageReader.cpp +++ b/src/ImageReader.cpp @@ -113,9 +113,7 @@ tr1::shared_ptr ImageReader::GetFrame(int requested_frame) throw(ReaderCl tr1::shared_ptr image_frame(new Frame(requested_frame, image->size().width(), image->size().height(), "#000000", 0, 2)); // Add Image data to frame - tr1::shared_ptr copy_image(new Magick::Image(*image.get())); - copy_image->modifyImage(); // actually copy the image data to this object - image_frame->AddImage(copy_image); + image_frame->AddMagickImage(image); // return frame object return image_frame; diff --git a/src/ImageWriter.cpp b/src/ImageWriter.cpp index 611b4f40..0289c0a7 100644 --- a/src/ImageWriter.cpp +++ b/src/ImageWriter.cpp @@ -90,132 +90,35 @@ void ImageWriter::WriteFrame(tr1::shared_ptr frame) throw(WriterClosed) { // Check for open reader (or throw exception) if (!is_open) - throw WriterClosed("The FFmpegWriter is closed. Call Open() before calling this method.", path); + throw WriterClosed("The ImageWriter is closed. Call Open() before calling this method.", path); - // Add frame pointer to "queue", waiting to be processed the next - // time the WriteFrames() method is called. - spooled_video_frames.push_back(frame); - AppendDebugMethod("ImageWriter::WriteFrame", "frame->number", frame->number, "spooled_video_frames.size()", spooled_video_frames.size(), "cache_size", cache_size, "is_writing", is_writing, "", -1, "", -1); + // Copy and resize image + tr1::shared_ptr frame_image = frame->GetMagickImage(); + frame_image->magick( info.vcodec ); + frame_image->backgroundColor(Magick::Color("none")); + frame_image->matte(true); + frame_image->quality(image_quality); + frame_image->animationDelay(info.video_timebase.ToFloat() * 100); + frame_image->animationIterations(number_of_loops); - // Write the frames once it reaches the correct cache size - if (spooled_video_frames.size() == cache_size) - { - // Is writer currently writing? - if (!is_writing) - // Write frames to video file - write_queued_frames(); + // Calculate correct DAR (display aspect ratio) + int new_width = info.width; + int new_height = info.height * frame->GetPixelRatio().Reciprocal().ToDouble(); - else - { - // YES, WRITING... so wait until it finishes, before writing again - while (is_writing) - Sleep(1); // sleep for 250 milliseconds + // Resize image + Magick::Geometry new_size(new_width, new_height); + new_size.aspect(true); + frame_image->resize(new_size); - // Write frames to video file - write_queued_frames(); - } - } + + // Put resized frame in vector (waiting to be written) + frames.push_back(*frame_image.get()); // Keep track of the last frame added last_frame = frame; } -// Write all frames in the queue to the video file. -void ImageWriter::write_queued_frames() -{ - AppendDebugMethod("ImageWriter::write_queued_frames", "spooled_video_frames.size()", spooled_video_frames.size(), "", -1, "", -1, "", -1, "", -1, "", -1); - - // Flip writing flag - is_writing = true; - - // Transfer spool to queue - queued_video_frames = spooled_video_frames; - - // Empty spool - spooled_video_frames.clear(); - - // Set the number of threads in OpenMP - omp_set_num_threads(OPEN_MP_NUM_PROCESSORS); - // Allow nested OpenMP sections - omp_set_nested(true); - - #pragma omp parallel - { - #pragma omp single - { - // Loop through each queued image frame - while (!queued_video_frames.empty()) - { - // Get front frame (from the queue) - tr1::shared_ptr frame = queued_video_frames.front(); - - // Add to processed queue - processed_frames.push_back(frame); - - // Copy and resize image - tr1::shared_ptr frame_image = frame->GetImage(); - frame_image->magick( info.vcodec ); - frame_image->backgroundColor(Magick::Color("none")); - frame_image->matte(true); - frame_image->quality(image_quality); - frame_image->animationDelay(info.video_timebase.ToFloat() * 100); - frame_image->animationIterations(number_of_loops); - - // Calculate correct DAR (display aspect ratio) - int new_width = info.width; - int new_height = info.height * frame->GetPixelRatio().Reciprocal().ToDouble(); - - // Resize image - Magick::Geometry new_size(new_width, new_height); - new_size.aspect(true); - frame_image->resize(new_size); - - // Put resized frame in vector (waiting to be written) - frames.push_back(*frame_image.get()); - - // Remove front item - queued_video_frames.pop_front(); - - } // end while - } // end omp single - - #pragma omp single - { - // Loop back through the frames (in order), and write them to the video file - while (!processed_frames.empty()) - { - // Get front frame (from the queue) - tr1::shared_ptr frame = processed_frames.front(); - - // Add to deallocate queue (so we can remove the AVFrames when we are done) - deallocate_frames.push_back(frame); - - // Write frame to video file - // write_video_packet(frame, frame_final); - - // Remove front item - processed_frames.pop_front(); - } - - // Loop through, and deallocate AVFrames - while (!deallocate_frames.empty()) - { - // Get front frame (from the queue) - tr1::shared_ptr frame = deallocate_frames.front(); - - // Remove front item - deallocate_frames.pop_front(); - } - - // Done writing - is_writing = false; - - } // end omp single - } // end omp parallel - -} - // Write a block of frames from a reader void ImageWriter::WriteFrame(ReaderBase* reader, int start, int length) throw(WriterClosed) { diff --git a/src/KeyFrame.cpp b/src/KeyFrame.cpp index ff7c4d56..2d2f66e9 100644 --- a/src/KeyFrame.cpp +++ b/src/KeyFrame.cpp @@ -300,7 +300,6 @@ Json::Value Keyframe::JsonValue() { Point existing_point = Points[x]; root["Points"].append(existing_point.JsonValue()); } - //root["Auto_Handle_Percentage"] = Auto_Handle_Percentage; // return JsonValue return root; @@ -487,6 +486,7 @@ void Keyframe::PrintPoints() { if (needs_update) Process(); + cout << fixed << setprecision(4); for (vector::iterator it = Points.begin(); it != Points.end(); it++) { Point p = *it; cout << p.co.X << "\t" << p.co.Y << endl; @@ -498,6 +498,7 @@ void Keyframe::PrintValues() { if (needs_update) Process(); + cout << fixed << setprecision(4); cout << "Frame Number (X)\tValue (Y)\tIs Increasing\tRepeat Numerator\tRepeat Denominator\tDelta (Y Difference)" << endl; for (vector::iterator it = Values.begin() + 1; it != Values.end(); it++) { @@ -507,102 +508,103 @@ void Keyframe::PrintValues() { } void Keyframe::Process() { - // only process if needed - if (!needs_update) - return; - else + + #pragma omp critical (keyframe_process) + { + // only process if needed + if (needs_update && Points.size() > 0) + { + + // Clear all values + Values.clear(); + + // fill in all values between 1 and 1st point's co.X + Point p1 = Points[0]; + if (Points.size() > 1) + // Fill in previous X values (before 1st point) + for (int x = 0; x < p1.co.X; x++) + Values.push_back(Coordinate(Values.size(), p1.co.Y)); + else + // Add a single value (since we only have 1 point) + Values.push_back(Coordinate(Values.size(), p1.co.Y)); + + // Loop through each pair of points (1 less than the max points). Each + // pair of points is used to process a segment of the keyframe. + Point p2(0, 0); + for (int x = 0; x < Points.size() - 1; x++) { + p1 = Points[x]; + p2 = Points[x + 1]; + + // process segment p1,p2 + ProcessSegment(x, p1, p2); + } + + // Loop through each Value, and set the direction of the coordinate. This is used + // when time mapping, to determine what direction the audio waveforms play. + bool increasing = true; + int repeat_count = 1; + int last_value = 0; + for (vector::iterator it = Values.begin() + 1; it != Values.end(); it++) { + int current_value = int(round((*it).Y)); + int next_value = int(round((*it).Y)); + int prev_value = int(round((*it).Y)); + if (it + 1 != Values.end()) + next_value = int(round((*(it + 1)).Y)); + if (it - 1 >= Values.begin()) + prev_value = int(round((*(it - 1)).Y)); + + // Loop forward and look for the next unique value (to determine direction) + for (vector::iterator direction_it = it + 1; direction_it != Values.end(); direction_it++) { + int next = int(round((*direction_it).Y)); + + // Detect direction + if (current_value < next) + { + increasing = true; + break; + } + else if (current_value > next) + { + increasing = false; + break; + } + } + + // Set direction + (*it).IsIncreasing(increasing); + + // Detect repeated Y value + if (current_value == last_value) + // repeated, so increment count + repeat_count++; + else + // reset repeat counter + repeat_count = 1; + + // Detect how many 'more' times it's repeated + int additional_repeats = 0; + for (vector::iterator repeat_it = it + 1; repeat_it != Values.end(); repeat_it++) { + int next = int(round((*repeat_it).Y)); + if (next == current_value) + // repeated, so increment count + additional_repeats++; + else + break; // stop looping + } + + // Set repeat fraction + (*it).Repeat(Fraction(repeat_count, repeat_count + additional_repeats)); + + // Set delta (i.e. different from previous unique Y value) + (*it).Delta(current_value - last_value); + + // track the last value + last_value = current_value; + } + } + // reset flag needs_update = false; - - // do not process if no points are found - if (Points.size() == 0) - return; - - // Clear all values - Values.clear(); - - // fill in all values between 1 and 1st point's co.X - Point p1 = Points[0]; - if (Points.size() > 1) - // Fill in previous X values (before 1st point) - for (int x = 0; x < p1.co.X; x++) - Values.push_back(Coordinate(Values.size(), p1.co.Y)); - else - // Add a single value (since we only have 1 point) - Values.push_back(Coordinate(Values.size(), p1.co.Y)); - - // Loop through each pair of points (1 less than the max points). Each - // pair of points is used to process a segment of the keyframe. - Point p2(0, 0); - for (int x = 0; x < Points.size() - 1; x++) { - p1 = Points[x]; - p2 = Points[x + 1]; - - // process segment p1,p2 - ProcessSegment(x, p1, p2); - } - - // Loop through each Value, and set the direction of the coordinate. This is used - // when time mapping, to determine what direction the audio waveforms play. - bool increasing = true; - int repeat_count = 1; - int last_value = 0; - for (vector::iterator it = Values.begin() + 1; it != Values.end(); it++) { - int current_value = int(round((*it).Y)); - int next_value = int(round((*it).Y)); - int prev_value = int(round((*it).Y)); - if (it + 1 != Values.end()) - next_value = int(round((*(it + 1)).Y)); - if (it - 1 >= Values.begin()) - prev_value = int(round((*(it - 1)).Y)); - - // Loop forward and look for the next unique value (to determine direction) - for (vector::iterator direction_it = it + 1; direction_it != Values.end(); direction_it++) { - int next = int(round((*direction_it).Y)); - - // Detect direction - if (current_value < next) - { - increasing = true; - break; - } - else if (current_value > next) - { - increasing = false; - break; - } - } - - // Set direction - (*it).IsIncreasing(increasing); - - // Detect repeated Y value - if (current_value == last_value) - // repeated, so increment count - repeat_count++; - else - // reset repeat counter - repeat_count = 1; - - // Detect how many 'more' times it's repeated - int additional_repeats = 0; - for (vector::iterator repeat_it = it + 1; repeat_it != Values.end(); repeat_it++) { - int next = int(round((*repeat_it).Y)); - if (next == current_value) - // repeated, so increment count - additional_repeats++; - else - break; // stop looping - } - - // Set repeat fraction - (*it).Repeat(Fraction(repeat_count, repeat_count + additional_repeats)); - - // Set delta (i.e. different from previous unique Y value) - (*it).Delta(current_value - last_value); - - // track the last value - last_value = current_value; } } diff --git a/src/Qt/PlayerDemo.cpp b/src/Qt/PlayerDemo.cpp index 6fd2b920..7037d757 100644 --- a/src/Qt/PlayerDemo.cpp +++ b/src/Qt/PlayerDemo.cpp @@ -63,8 +63,6 @@ PlayerDemo::PlayerDemo(QWidget *parent) PlayerDemo::~PlayerDemo() { - player->Stop(); - delete player; } void PlayerDemo::keyPressEvent(QKeyEvent *event) @@ -123,6 +121,15 @@ void PlayerDemo::keyPressEvent(QKeyEvent *event) player->Speed(0); player->Seek(player->Position() + 1); } + else if (event->key() == Qt::Key_Escape) { + cout << "QUIT PLAYER" << endl; + QWidget *pWin = QApplication::activeWindow(); + pWin->hide(); + + player->Stop(); + + QApplication::quit(); + } event->accept(); QWidget::keyPressEvent(event); diff --git a/src/Qt/PlayerPrivate.cpp b/src/Qt/PlayerPrivate.cpp index cbe9ac3f..b589599a 100644 --- a/src/Qt/PlayerPrivate.cpp +++ b/src/Qt/PlayerPrivate.cpp @@ -35,16 +35,16 @@ namespace openshot : renderer(rb), Thread("player"), video_position(1), audio_position(0) , audioPlayback(new AudioPlaybackThread()) , videoPlayback(new VideoPlaybackThread(rb)) + , videoCache(new VideoCacheThread()) , speed(1), reader(NULL), last_video_position(1) { } // Destructor PlayerPrivate::~PlayerPrivate() { - if (isThreadRunning()) stopThread(500); - if (audioPlayback->isThreadRunning() && reader->info.has_audio) audioPlayback->stopThread(500); - if (videoPlayback->isThreadRunning() && reader->info.has_video) videoPlayback->stopThread(500); + stopPlayback(1000); delete audioPlayback; + delete videoCache; delete videoPlayback; } @@ -55,15 +55,13 @@ namespace openshot if (!reader) return; - // Kill audio and video threads (if they are currently running) - if (audioPlayback->isThreadRunning() && reader->info.has_audio) audioPlayback->stopThread(-1); - if (videoPlayback->isThreadRunning() && reader->info.has_video) videoPlayback->stopThread(-1); - // Start the threads if (reader->info.has_audio) audioPlayback->startThread(1); - if (reader->info.has_video) - videoPlayback->startThread(2); + if (reader->info.has_video) { + videoCache->startThread(2); + videoPlayback->startThread(3); + } while (!threadShouldExit()) { @@ -74,7 +72,7 @@ namespace openshot const Time t1 = Time::getCurrentTime(); // Get the current video frame (if it's different) - frame = getFrame(); + frame = getFrame(); // Experimental Pausing Code (if frame has not changed) if ((speed == 0 && video_position == last_video_position) || (video_position > reader->info.video_length)) { @@ -121,24 +119,19 @@ namespace openshot // the video to catch up. sleep_time += (video_frame_diff * (1000.0 / reader->info.fps.ToDouble())); + else if (video_frame_diff < -4 && reader->info.has_audio && reader->info.has_video) { // Skip frame(s) to catch up to the audio (if more than 4 frames behind) video_position++; sleep_time = 0; } - // Sleep (leaving the video frame on the screen for the correct amount of time) if (sleep_time > 0) sleep(sleep_time); // Debug output std::cout << "video frame diff: " << video_frame_diff << std::endl; - } - - // Kill audio and video threads (if they are still running) - if (audioPlayback->isThreadRunning() && reader->info.has_audio) audioPlayback->stopThread(-1); - if (videoPlayback->isThreadRunning() && reader->info.has_video) videoPlayback->stopThread(-1); } // Get the next displayed frame (based on speed and direction) @@ -149,13 +142,18 @@ namespace openshot if (video_position + speed >= 1 && video_position + speed <= reader->info.video_length) video_position = video_position + speed; - if (frame && frame->number == video_position) { + if (frame && frame->number == video_position && video_position == last_video_position) { // return cached frame return frame; } else + { + // Update cache on which frame was retrieved + videoCache->current_display_frame = video_position; + // return frame from reader - return reader->GetFrameSafe(video_position); + return reader->GetFrame(video_position); + } } catch (const ReaderClosed & e) { // ... @@ -170,17 +168,21 @@ namespace openshot // Start video/audio playback bool PlayerPrivate::startPlayback() { - if (video_position < 0) return false; - stopPlayback(-1); - startThread(1); - return true; + if (video_position < 0) return false; + + stopPlayback(-1); + startThread(1); + return true; } // Stop video/audio playback void PlayerPrivate::stopPlayback(int timeOutMilliseconds) { - if (audioPlayback->isThreadRunning() && reader->info.has_audio) audioPlayback->stopThread(-1); - if (videoPlayback->isThreadRunning() && reader->info.has_video) videoPlayback->stopThread(-1); + if (isThreadRunning()) stopThread(timeOutMilliseconds); + if (audioPlayback->isThreadRunning() && reader->info.has_audio) audioPlayback->stopThread(timeOutMilliseconds); + if (videoCache->isThreadRunning() && reader->info.has_video) videoCache->stopThread(timeOutMilliseconds); + if (videoPlayback->isThreadRunning() && reader->info.has_video) videoPlayback->stopThread(timeOutMilliseconds); + } diff --git a/src/Qt/VideoCacheThread.cpp b/src/Qt/VideoCacheThread.cpp new file mode 100644 index 00000000..6891ee45 --- /dev/null +++ b/src/Qt/VideoCacheThread.cpp @@ -0,0 +1,100 @@ +/** + * @file + * @brief Source file for VideoCacheThread class + * @author Jonathan Thomas + * + * @section LICENSE + * + * Copyright (c) 2008-2014 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#include "../../include/Qt/VideoCacheThread.h" + +namespace openshot +{ + // Constructor + VideoCacheThread::VideoCacheThread() + : Thread("video-cache"), speed(1), is_playing(false), position(1) + , reader(NULL), max_frames(OPEN_MP_NUM_PROCESSORS * 2), current_display_frame(1) + { + } + + // Destructor + VideoCacheThread::~VideoCacheThread() + { + } + + // Get the currently playing frame number (if any) + int VideoCacheThread::getCurrentFramePosition() + { + if (frame) + return frame->number; + else + return 0; + } + + // Set the currently playing frame number (if any) + void VideoCacheThread::setCurrentFramePosition(int current_frame_number) + { + current_display_frame = current_frame_number; + } + + // Seek the audio thread + void VideoCacheThread::Seek(int new_position) + { + position = new_position; + } + + // Play the audio + void VideoCacheThread::Play() { + // Start playing + is_playing = true; + } + + // Stop the audio + void VideoCacheThread::Stop() { + // Stop playing + is_playing = false; + } + + // Start the thread + void VideoCacheThread::run() + { + while (!threadShouldExit()) { + + // Calculate sleep time for frame rate + double frame_time = (1000.0 / reader->info.fps.ToDouble()); + + // Cache frames before the other threads need them + // Cache frames up to the max frames + while ((position - current_display_frame) < max_frames) + { + // Only cache up till the max_frames amount... then sleep + if (reader) + reader->GetFrame(position); + + // Increment frame number + position++; + } + } + + return; + } +} diff --git a/src/Qt/VideoPlaybackThread.cpp b/src/Qt/VideoPlaybackThread.cpp index 85c05899..d3fe3aa9 100644 --- a/src/Qt/VideoPlaybackThread.cpp +++ b/src/Qt/VideoPlaybackThread.cpp @@ -56,11 +56,18 @@ namespace openshot { while (!threadShouldExit()) { // Make other threads wait on the render event - render.wait(); - // Render the frame to the screen - renderer->paint(frame); - // Signal to other threads that the rendered event has completed - rendered.signal(); + bool need_render = render.wait(500); + + if (need_render) + { + // Render the frame to the screen + renderer->paint(frame); + + // Signal to other threads that the rendered event has completed + rendered.signal(); + } } + + return; } } diff --git a/src/Qt/VideoRenderer.cpp b/src/Qt/VideoRenderer.cpp index 2421e450..da2e51a6 100644 --- a/src/Qt/VideoRenderer.cpp +++ b/src/Qt/VideoRenderer.cpp @@ -29,7 +29,6 @@ #include #include -using openshot::OSPixelFormat; VideoRenderer::VideoRenderer(QObject *parent) : QObject(parent) @@ -48,7 +47,6 @@ void VideoRenderer::OverrideWidget(long qwidget_address) } -//void VideoRenderer::render(OSPixelFormat /*format*/, int width, int height, int bytesPerLine, unsigned char *data) void VideoRenderer::render(tr1::shared_ptr image) { emit present(*image); diff --git a/src/QtImageReader.cpp b/src/QtImageReader.cpp new file mode 100644 index 00000000..c274ab28 --- /dev/null +++ b/src/QtImageReader.cpp @@ -0,0 +1,179 @@ +/** + * @file + * @brief Source file for QtImageReader class + * @author Jonathan Thomas + * + * @section LICENSE + * + * Copyright (c) 2008-2014 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#include "../include/QtImageReader.h" + +using namespace openshot; + +QtImageReader::QtImageReader(string path) throw(InvalidFile) : path(path), is_open(false) +{ + // Open and Close the reader, to populate it's attributes (such as height, width, etc...) + Open(); + Close(); +} + +// Open image file +void QtImageReader::Open() throw(InvalidFile) +{ + // Open reader if not already open + if (!is_open) + { + // Attempt to open file + image = tr1::shared_ptr(new QImage()); + bool success = image->load(QString::fromStdString(path)); + + // Set pixel format + image = tr1::shared_ptr(new QImage(image->convertToFormat(QImage::Format_RGBA8888))); + + if (!success) + // raise exception + throw InvalidFile("File could not be opened.", path); + + // Update image properties + info.has_audio = false; + info.has_video = true; + info.file_size = image->byteCount(); + info.vcodec = "QImage"; + info.width = image->width(); + info.height = image->height(); + info.pixel_ratio.num = 1; + info.pixel_ratio.den = 1; + info.duration = 60 * 60 * 24; // 24 hour duration + info.fps.num = 30; + info.fps.den = 1; + info.video_timebase.num = 1; + info.video_timebase.den = 30; + info.video_length = round(info.duration * info.fps.ToDouble()); + + // Calculate the DAR (display aspect ratio) + Fraction size(info.width * info.pixel_ratio.num, info.height * info.pixel_ratio.den); + + // Reduce size fraction + size.Reduce(); + + // Set the ratio based on the reduced fraction + info.display_ratio.num = size.num; + info.display_ratio.den = size.den; + + // Mark as "open" + is_open = true; + } +} + +// Close image file +void QtImageReader::Close() +{ + // Close all objects, if reader is 'open' + if (is_open) + { + // Mark as "closed" + is_open = false; + + // Delete the image + image.reset(); + + info.vcodec = ""; + info.acodec = ""; + } +} + +// Get an openshot::Frame object for a specific frame number of this reader. +tr1::shared_ptr QtImageReader::GetFrame(int requested_frame) throw(ReaderClosed) +{ + // Check for open reader (or throw exception) + if (!is_open) + throw ReaderClosed("The Image is closed. Call Open() before calling this method.", path); + + // Create or get frame object + tr1::shared_ptr image_frame(new Frame(requested_frame, info.width, info.height, "#000000", Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate, info.channels), info.channels)); + + // Add Image data to frame + image_frame->AddImage(image); + + // return frame object + return image_frame; +} + +// Generate JSON string of this object +string QtImageReader::Json() { + + // Return formatted string + return JsonValue().toStyledString(); +} + +// Generate Json::JsonValue for this object +Json::Value QtImageReader::JsonValue() { + + // Create root json object + Json::Value root = ReaderBase::JsonValue(); // get parent properties + root["type"] = "QtImageReader"; + root["path"] = path; + + // return JsonValue + return root; +} + +// Load JSON string into this object +void QtImageReader::SetJson(string value) throw(InvalidJSON) { + + // Parse JSON string into JSON objects + Json::Value root; + Json::Reader reader; + bool success = reader.parse( value, root ); + if (!success) + // Raise exception + throw InvalidJSON("JSON could not be parsed (or is invalid)", ""); + + try + { + // Set all values that match + SetJsonValue(root); + } + catch (exception e) + { + // Error parsing JSON (or missing keys) + throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", ""); + } +} + +// Load Json::JsonValue into this object +void QtImageReader::SetJsonValue(Json::Value root) throw(InvalidFile) { + + // Set parent data + ReaderBase::SetJsonValue(root); + + // Set data from Json (if key is found) + if (!root["path"].isNull()) + path = root["path"].asString(); + + // Re-Open path, and re-init everything (if needed) + if (is_open) + { + Close(); + Open(); + } +} diff --git a/src/QtPlayer.cpp b/src/QtPlayer.cpp index 73a64fb3..66875091 100644 --- a/src/QtPlayer.cpp +++ b/src/QtPlayer.cpp @@ -26,7 +26,9 @@ * along with OpenShot Library. If not, see . */ +#include "../include/Clip.h" #include "../include/FFmpegReader.h" +#include "../include/Timeline.h" #include "../include/QtPlayer.h" #include "../include/Qt/PlayerPrivate.h" #include "../include/Qt/VideoRenderer.h" @@ -54,8 +56,27 @@ QtPlayer::~QtPlayer() void QtPlayer::SetSource(const std::string &source) { reader = new FFmpegReader(source); - //reader->debug = true; + reader->debug = false; reader->Open(); + + // experimental timeline code + //Clip *c = new Clip(source); + //c->scale = SCALE_NONE; + //c->rotation.AddPoint(1, 0.0); + //c->rotation.AddPoint(1000, 360.0); + //c->Waveform(true); + + //Timeline *t = new Timeline(c->Reader()->info.width, c->Reader()->info.height, c->Reader()->info.fps, c->Reader()->info.sample_rate, c->Reader()->info.channels); + //Timeline *t = new Timeline(1280, 720, openshot::Fraction(24,1), 44100, 2, LAYOUT_STEREO); + //t->debug = true; openshot::Fraction(30,1) + //t->info = c->Reader()->info; + //t->info.fps = openshot::Fraction(12,1); + //t->GetCache()->SetMaxBytesFromInfo(40, c->Reader()->info.width, c->Reader()->info.height, c->Reader()->info.sample_rate, c->Reader()->info.channels); + + //t->AddClip(c); + //t->Open(); + + // Set the reader Reader(reader); } @@ -100,10 +121,16 @@ void QtPlayer::Seek(int new_frame) { // Check for seek if (new_frame > 0) { + // Notify cache thread that seek has occurred + p->videoCache->Seek(new_frame); + // Update current position p->video_position = new_frame; - // Notify audio thread that seek has occured + // Clear last position (to force refresh) + p->last_video_position = 1; + + // Notify audio thread that seek has occurred p->audioPlayback->Seek(new_frame); } } @@ -112,6 +139,7 @@ void QtPlayer::Stop() { mode = PLAYBACK_STOPPED; p->stopPlayback(); + p->videoCache->Stop(); p->video_position = 0; threads_started = false; } @@ -122,6 +150,7 @@ void QtPlayer::Reader(ReaderBase *new_reader) cout << "Reader SET: " << new_reader << endl; reader = new_reader; p->reader = new_reader; + p->videoCache->Reader(new_reader); p->audioPlayback->Reader(new_reader); } @@ -150,6 +179,7 @@ float QtPlayer::Speed() { void QtPlayer::Speed(float new_speed) { speed = new_speed; p->speed = new_speed; + p->videoCache->setSpeed(new_speed); if (p->reader->info.has_audio) p->audioPlayback->setSpeed(new_speed); } diff --git a/src/ReaderBase.cpp b/src/ReaderBase.cpp index 6afd18ad..46333dc5 100644 --- a/src/ReaderBase.cpp +++ b/src/ReaderBase.cpp @@ -303,10 +303,3 @@ void ReaderBase::DrawFrameOnScene(string path, long _graphics_scene_address) { scene->addItem(item); } - -// Lock reader and get a frame -tr1::shared_ptr ReaderBase::GetFrameSafe(int number) -{ - const GenericScopedLock lock(getFrameCriticalSection); - return GetFrame(number); -} diff --git a/src/RendererBase.cpp b/src/RendererBase.cpp index 48c763f7..3027a498 100644 --- a/src/RendererBase.cpp +++ b/src/RendererBase.cpp @@ -38,5 +38,5 @@ RendererBase::~RendererBase() void RendererBase::paint(const std::tr1::shared_ptr & frame) { - this->render(frame->GetQImage()); + this->render(frame->GetImage()); } diff --git a/src/TextReader.cpp b/src/TextReader.cpp index ec52a0cc..414be660 100644 --- a/src/TextReader.cpp +++ b/src/TextReader.cpp @@ -153,7 +153,8 @@ tr1::shared_ptr TextReader::GetFrame(int requested_frame) throw(ReaderClo // Add Image data to frame tr1::shared_ptr copy_image(new Magick::Image(*image.get())); copy_image->modifyImage(); // actually copy the image data to this object - image_frame->AddImage(copy_image); + //TODO: Reimplement this with QImage + //image_frame->AddImage(copy_image); // return frame object return image_frame; diff --git a/src/Timeline.cpp b/src/Timeline.cpp index bd5caa09..b12bb394 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -30,7 +30,8 @@ using namespace openshot; // Default Constructor for the timeline (which sets the canvas width and height) -Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels) : is_open(false) +Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) : + is_open(false), auto_map_clips(true) { // Init viewport size (curve based, because it can be animated) viewport_scale = Keyframe(100.0); @@ -42,26 +43,30 @@ Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int cha color.green = Keyframe(0.0); color.blue = Keyframe(0.0); - // Init cache - int64 bytes = height * width * 4 + (44100 * 2 * 4); - final_cache = Cache(2 * bytes); // 20 frames, 4 colors of chars, 2 audio channels of 4 byte floats - // Init FileInfo struct (clear all values) info.width = width; info.height = height; info.fps = fps; info.sample_rate = sample_rate; info.channels = channels; + info.channel_layout = channel_layout; info.video_timebase = fps.Reciprocal(); info.duration = 60 * 30; // 30 minute default duration + info.has_audio = true; + info.has_video = true; + info.video_length = info.fps.ToFloat() * info.duration; + + // Init cache + final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 3, info.width, info.height, info.sample_rate, info.channels); } // Add an openshot::Clip to the timeline void Timeline::AddClip(Clip* clip) throw(ReaderClosed) { - // All clips must be converted to the frame rate of this timeline, - // so assign the same frame rate to each clip. - clip->Reader()->info.fps = info.fps; + // All clips should be converted to the frame rate of this timeline + if (auto_map_clips) + // Apply framemapper (or update existing framemapper) + apply_mapper_to_clip(clip); // Add clip to list clips.push_back(clip); @@ -92,6 +97,48 @@ void Timeline::RemoveClip(Clip* clip) clips.remove(clip); } +// Apply a FrameMapper to a clip which matches the settings of this timeline +void Timeline::apply_mapper_to_clip(Clip* clip) +{ + // Determine type of reader + ReaderBase* clip_reader = NULL; + if (typeid(clip->Reader()) == typeid(FrameMapper)) + { + // Get the existing reader + clip_reader = (ReaderBase*) clip->Reader(); + + } else { + + // Create a new FrameMapper to wrap the current reader + clip_reader = (ReaderBase*) new FrameMapper(clip->Reader(), info.fps, PULLDOWN_NONE, info.sample_rate, info.channels, info.channel_layout); + } + + // Update the mapping + FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader; + clip_mapped_reader->ChangeMapping(info.fps, PULLDOWN_NONE, info.sample_rate, info.channels, info.channel_layout); + + // Update clip reader + clip->Reader(clip_reader); +} + +// Apply the timeline's framerate and samplerate to all clips +void Timeline::ApplyMapperToClips() +{ + // Clear all cached frames + final_cache.Clear(); + + // Loop through all clips + list::iterator clip_itr; + for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr) + { + // Get clip object from the iterator + Clip *clip = (*clip_itr); + + // Apply framemapper (or update existing framemapper) + apply_mapper_to_clip(clip); + } +} + // Calculate time of a frame number, based on a framerate float Timeline::calculate_time(int number, Fraction rate) { @@ -151,7 +198,9 @@ void Timeline::add_layer(tr1::shared_ptr new_frame, Clip* source_clip, in // Get the clip's frame & image tr1::shared_ptr source_frame; - #pragma omp critical (reader_lock) + + + //#pragma omp ordered source_frame = tr1::shared_ptr(source_clip->GetFrame(clip_frame_number)); // No frame found... so bail @@ -167,7 +216,7 @@ void Timeline::add_layer(tr1::shared_ptr new_frame, Clip* source_clip, in source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer()); // Declare an image to hold the source frame's image - tr1::shared_ptr source_image; + tr1::shared_ptr source_image; /* COPY AUDIO - with correct volume */ if (source_clip->Reader()->info.has_audio) { @@ -190,9 +239,19 @@ void Timeline::add_layer(tr1::shared_ptr new_frame, Clip* source_clip, in if (!isEqual(previous_volume, volume)) source_frame->ApplyGainRamp(channel, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume); + // TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame. + // Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the + // number of samples returned is variable... and does not match the number expected. + // This is a crude solution at best. =) + if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount()) + // Force timeline frame to match the source frame + new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout); + // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen). + #pragma omp critical (openshot_adding_audio) new_frame->AddAudio(false, channel, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), initial_volume); + } else // Debug output @@ -218,62 +277,74 @@ void Timeline::add_layer(tr1::shared_ptr new_frame, Clip* source_clip, in int red = source_clip->wave_color.red.GetInt(clip_frame_number); int green = source_clip->wave_color.green.GetInt(clip_frame_number); int blue = source_clip->wave_color.blue.GetInt(clip_frame_number); + int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number); // Generate Waveform Dynamically (the size of the timeline) - source_image = source_frame->GetWaveform(info.width, info.height, red, green, blue); + source_image = source_frame->GetWaveform(info.width, info.height, red, green, blue, alpha); } // Get some basic image properties - int source_width = source_image->columns(); - int source_height = source_image->rows(); + int source_width = source_image->width(); + int source_height = source_image->height(); /* ALPHA & OPACITY */ if (source_clip->alpha.GetValue(clip_frame_number) != 0) { - float alpha = 1.0 - source_clip->alpha.GetValue(clip_frame_number); - source_image->quantumOperator(Magick::OpacityChannel, Magick::MultiplyEvaluateOperator, alpha); + float alpha = source_clip->alpha.GetValue(clip_frame_number); + + // Get source image's pixels + unsigned char *pixels = (unsigned char *) source_image->bits(); + + // Loop through pixels + for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4) + { + // Get the alpha values from the pixel + int A = pixels[byte_index + 3]; + + // Apply alpha to pixel + pixels[byte_index + 3] *= (1.0 - alpha); + } // Debug output AppendDebugMethod("Timeline::add_layer (Set Alpha & Opacity)", "alpha", alpha, "source_frame->number", source_frame->number, "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1); } /* RESIZE SOURCE IMAGE - based on scale type */ - Magick::Geometry new_size(info.width, info.height); switch (source_clip->scale) { case (SCALE_FIT): - new_size.aspect(false); // respect aspect ratio - source_image->resize(new_size); - source_width = source_image->size().width(); - source_height = source_image->size().height(); + // keep aspect ratio + source_image = tr1::shared_ptr(new QImage(source_image->scaled(info.width, info.height, Qt::KeepAspectRatio, Qt::SmoothTransformation))); + source_width = source_image->width(); + source_height = source_image->height(); // Debug output - AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "new_size.aspect()", new_size.aspect(), "", -1, "", -1); + AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1); break; case (SCALE_STRETCH): - new_size.aspect(true); // ignore aspect ratio - source_image->resize(new_size); - source_width = source_image->size().width(); - source_height = source_image->size().height(); + // ignore aspect ratio + source_image = tr1::shared_ptr(new QImage(source_image->scaled(info.width, info.height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation))); + source_width = source_image->width(); + source_height = source_image->height(); // Debug output - AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "new_size.aspect()", new_size.aspect(), "", -1, "", -1); + AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1); break; case (SCALE_CROP): Magick::Geometry width_size(info.width, round(info.width / (float(source_width) / float(source_height)))); Magick::Geometry height_size(round(info.height / (float(source_height) / float(source_width))), info.height); - new_size.aspect(false); // respect aspect ratio + // respect aspect ratio if (width_size.width() >= info.width && width_size.height() >= info.height) - source_image->resize(width_size); // width is larger, so resize to it + source_image = tr1::shared_ptr(new QImage(source_image->scaled(width_size.width(), width_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation))); else - source_image->resize(height_size); // height is larger, so resize to it - source_width = source_image->size().width(); - source_height = source_image->size().height(); + source_image = tr1::shared_ptr(new QImage(source_image->scaled(height_size.width(), height_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation))); // height is larger, so resize to it + source_width = source_image->width(); + source_height = source_image->height(); // Debug output - AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "new_size.aspect()", new_size.aspect(), "", -1, "", -1); + AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1); break; } @@ -324,76 +395,80 @@ void Timeline::add_layer(tr1::shared_ptr new_frame, Clip* source_clip, in /* LOCATION, ROTATION, AND SCALE */ float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees - x += info.width * source_clip->location_x.GetValue(clip_frame_number); // move in percentage of final width - y += info.height * source_clip->location_y.GetValue(clip_frame_number); // move in percentage of final height + x += (info.width * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width + y += (info.height * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height bool is_x_animated = source_clip->location_x.Points.size() > 1; bool is_y_animated = source_clip->location_y.Points.size() > 1; int offset_x = -1; int offset_y = -1; bool transformed = false; + QTransform transform; if ((!isEqual(x, 0) || !isEqual(y, 0)) && (isEqual(r, 0) && isEqual(sx, 1) && isEqual(sy, 1) && !is_x_animated && !is_y_animated)) { // SIMPLE OFFSET AppendDebugMethod("Timeline::add_layer (Transform: SIMPLE)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy); // If only X and Y are different, and no animation is being used (just set the offset for speed) - offset_x = round(x); - offset_y = round(y); transformed = true; + // Set QTransform + transform.translate(x, y); + } else if (!isEqual(r, 0) || !isEqual(x, 0) || !isEqual(y, 0) || !isEqual(sx, 1) || !isEqual(sy, 1)) { // COMPLEX DISTORTION AppendDebugMethod("Timeline::add_layer (Transform: COMPLEX)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy); - /* RESIZE SOURCE CANVAS - to the same size as timeline canvas */ - if (source_width != info.width || source_height != info.height) - { - // Debug output - AppendDebugMethod("Timeline::add_layer (Transform: COMPLEX: Resize Source Canvas)", "source_frame->number", source_frame->number, "source_frame->GetWidth()", source_frame->GetWidth(), "info.width", info.width, "source_frame->GetHeight()", source_frame->GetHeight(), "info.height", info.height, "", -1); + // Use the QTransform object, which can be very CPU intensive + transformed = true; - source_image->borderColor(Magick::Color("none")); - source_image->border(Magick::Geometry(1, 1, 0, 0, false, false)); // prevent stretching of edge pixels (during the canvas resize) - source_image->size(Magick::Geometry(info.width, info.height, 0, 0, false, false)); // resize the canvas (to prevent clipping) + // Set QTransform + if (!isEqual(r, 0)) { + // ROTATE CLIP + float origin_x = x + (source_width / 2.0); + float origin_y = y + (source_height / 2.0); + transform.translate(origin_x, origin_y); + transform.rotate(r); + transform.translate(-origin_x,-origin_y); } - // Debug output - AppendDebugMethod("Timeline::add_layer (Transform: COMPLEX: Prepare for ScaleRotateTranslateDistortion)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy); + // Set QTransform + if (!isEqual(x, 0) || !isEqual(y, 0)) { + // TRANSLATE/MOVE CLIP + transform.translate(x, y); + } - // Use the distort operator, which is very CPU intensive - // origin X,Y Scale Angle NewX,NewY - double distort_args[7] = {(source_width/2.0),(source_height/2.0), sx,sy, r, x+(scaled_source_width/2.0),y+(scaled_source_height/2.0) }; - source_image->distort(Magick::ScaleRotateTranslateDistortion, 7, distort_args, false); - transformed = true; + if (!isEqual(sx, 0) || !isEqual(sy, 0)) { + // TRANSLATE/MOVE CLIP + transform.scale(sx, sy); + } // Debug output AppendDebugMethod("Timeline::add_layer (Transform: COMPLEX: Completed ScaleRotateTranslateDistortion)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy); } - - /* Is this the 1st layer? If so, add a background color below the image */ - if (new_frame->GetImage()->columns() == 1) - { - // Debug output - AppendDebugMethod("Timeline::add_layer (Transform: 1st Layer, Generate Solid Color Image", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->columns()", new_frame->GetImage()->columns(), "transformed", transformed, "", -1); - - /* CREATE BACKGROUND COLOR - needed if this is the 1st layer */ - int red = color.red.GetInt(timeline_frame_number); - int green = color.green.GetInt(timeline_frame_number); - int blue = color.blue.GetInt(timeline_frame_number); - new_frame->AddColor(info.width, info.height, Magick::Color((Magick::Quantum)red, (Magick::Quantum)green, (Magick::Quantum)blue, 0)); - } - // Debug output - AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->columns()", new_frame->GetImage()->columns(), "transformed", transformed, "", -1); + AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1); /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */ - tr1::shared_ptr new_image = new_frame->GetImage(); - new_image->composite(*source_image.get(), offset_x, offset_y, Magick::OverCompositeOp); + tr1::shared_ptr new_image = new_frame->GetImage(); + + // Load timeline's new frame image into a QPainter + QPainter painter(new_image.get()); + + // Apply transform (translate, rotate, scale)... if any + if (transformed) + painter.setTransform(transform); + + // Composite a new layer onto the image + painter.setCompositionMode(QPainter::CompositionMode_SourceOver); + painter.setRenderHint(QPainter::SmoothPixmapTransform, true); + painter.drawImage(0, 0, *source_image); + painter.end(); // Debug output - AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->columns()", new_frame->GetImage()->columns(), "transformed", transformed, "", -1); + AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1); } // Update the list of 'opened' clips @@ -478,6 +553,10 @@ bool Timeline::isEqual(double a, double b) // Get an openshot::Frame object for a specific frame number of this reader. tr1::shared_ptr Timeline::GetFrame(int requested_frame) throw(ReaderClosed) { + // Check for open reader (or throw exception) + if (!is_open) + throw ReaderClosed("The Timeline is closed. Call Open() before calling this method.", ""); + // Adjust out of bounds frame number if (requested_frame < 1) requested_frame = 1; @@ -492,8 +571,20 @@ tr1::shared_ptr Timeline::GetFrame(int requested_frame) throw(ReaderClose } else { + // Create a scoped lock, allowing only a single thread to run the following code at one time + const GenericScopedLock lock(getFrameCriticalSection); + + // Check cache again (due to locking) + if (final_cache.Exists(requested_frame)) { + // Debug output + AppendDebugMethod("Timeline::GetFrame (Cached frame found on 2nd look)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1); + + // Return cached frame + return final_cache.GetFrame(requested_frame); + } + // Minimum number of frames to process (for performance reasons) - int minimum_frames = 1; + int minimum_frames = OPEN_MP_NUM_PROCESSORS; // Get a list of clips that intersect with the requested section of timeline // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing' @@ -502,24 +593,36 @@ tr1::shared_ptr Timeline::GetFrame(int requested_frame) throw(ReaderClose // TODO: OpenMP is disabled in this function, due to conditional calls the ImageMagick methods, which also // contain OpenMP parallel regions. This is a violation of OpenMP, and causes the threads to hang in some cases. // Set the number of threads in OpenMP - //omp_set_num_threads(OPEN_MP_NUM_PROCESSORS); + omp_set_num_threads(OPEN_MP_NUM_PROCESSORS); // Allow nested OpenMP sections - //omp_set_nested(true); + omp_set_nested(true); // Debug output AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS, "", -1, "", -1, "", -1); - //#pragma omp parallel - //{ + #pragma omp parallel + { // Loop through all requested frames - //#pragma omp for firstprivate(nearby_clips, requested_frame, minimum_frames) + #pragma omp for firstprivate(nearby_clips, requested_frame, minimum_frames) for (int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++) { // Debug output AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1); + // Init some basic properties about this frame + int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels); + // Create blank frame (which will become the requested frame) - tr1::shared_ptr new_frame(tr1::shared_ptr(new Frame(frame_number, info.width, info.height, "#000000", 0, info.channels))); + tr1::shared_ptr new_frame(tr1::shared_ptr(new Frame(frame_number, info.width, info.height, "#000000", samples_in_frame, info.channels))); + new_frame->SampleRate(info.sample_rate); + new_frame->ChannelsLayout(info.channel_layout); + + // Debug output + AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1); + + // Add Background Color to 1st layer + new_frame->AddColor(info.width, info.height, color.GetColorHex(frame_number)); + // Calculate time of frame float requested_time = calculate_time(frame_number, info.fps); @@ -571,28 +674,14 @@ tr1::shared_ptr Timeline::GetFrame(int requested_frame) throw(ReaderClose } // end clip loop - // Check for empty frame image (and fill with color) - if (new_frame->GetImage()->columns() == 1) - { - // Debug output - AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1); - - int red = color.red.GetInt(frame_number); - int green = color.green.GetInt(frame_number); - int blue = color.blue.GetInt(frame_number); - #pragma omp critical (openshot_add_color) - new_frame->AddColor(info.width, info.height, Magick::Color((Magick::Quantum)red, (Magick::Quantum)green, (Magick::Quantum)blue)); - } - // Debug output AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1); // Add final frame to cache - #pragma omp critical (timeline_cache) final_cache.Add(frame_number, new_frame); } // end frame loop - //} // end parallel + } // end parallel // Debug output AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1); @@ -826,6 +915,9 @@ void Timeline::ApplyJsonDiff(string value) throw(InvalidJSON, InvalidJSONKey) { // Error parsing JSON (or missing keys) throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", ""); } + + // Adjust cache (in case something changed) + final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 4, info.width, info.height, info.sample_rate, info.channels); } // Apply JSON diff to clips diff --git a/src/bindings/python/openshot.i b/src/bindings/python/openshot.i index 096033f6..682bb92a 100644 --- a/src/bindings/python/openshot.i +++ b/src/bindings/python/openshot.i @@ -54,6 +54,7 @@ #include "../../../include/ReaderBase.h" #include "../../../include/WriterBase.h" #include "../../../include/Cache.h" +#include "../../../include/ChannelLayouts.h" #include "../../../include/ChunkReader.h" #include "../../../include/ChunkWriter.h" #include "../../../include/ClipBase.h" @@ -74,6 +75,7 @@ #include "../../../include/PlayerBase.h" #include "../../../include/Point.h" #include "../../../include/Profiles.h" +#include "../../../include/QtImageReader.h" #include "../../../include/QtPlayer.h" #include "../../../include/KeyFrame.h" #include "../../../include/RendererBase.h" @@ -94,6 +96,7 @@ %include "../../../include/ReaderBase.h" %include "../../../include/WriterBase.h" %include "../../../include/Cache.h" +%include "../../../include/ChannelLayouts.h" %include "../../../include/ChunkReader.h" %include "../../../include/ChunkWriter.h" %include "../../../include/ClipBase.h" @@ -117,6 +120,7 @@ %include "../../../include/PlayerBase.h" %include "../../../include/Point.h" %include "../../../include/Profiles.h" +%include "../../../include/QtImageReader.h" %include "../../../include/QtPlayer.h" %include "../../../include/KeyFrame.h" %include "../../../include/RendererBase.h" diff --git a/src/bindings/ruby/openshot.i b/src/bindings/ruby/openshot.i index 00a85805..12371c8c 100644 --- a/src/bindings/ruby/openshot.i +++ b/src/bindings/ruby/openshot.i @@ -60,6 +60,7 @@ namespace tr1 #include "../../../include/ReaderBase.h" #include "../../../include/WriterBase.h" #include "../../../include/Cache.h" +#include "../../../include/ChannelLayouts.h" #include "../../../include/ChunkReader.h" #include "../../../include/ChunkWriter.h" #include "../../../include/ClipBase.h" @@ -80,6 +81,7 @@ namespace tr1 #include "../../../include/PlayerBase.h" #include "../../../include/Point.h" #include "../../../include/Profiles.h" +#include "../../../include/QtImageReader.h" #include "../../../include/QtPlayer.h" #include "../../../include/KeyFrame.h" #include "../../../include/RendererBase.h" @@ -100,6 +102,7 @@ namespace tr1 %include "../../../include/ReaderBase.h" %include "../../../include/WriterBase.h" %include "../../../include/Cache.h" +%include "../../../include/ChannelLayouts.h" %include "../../../include/ChunkReader.h" %include "../../../include/ChunkWriter.h" %include "../../../include/ClipBase.h" @@ -123,6 +126,7 @@ namespace tr1 %include "../../../include/PlayerBase.h" %include "../../../include/Point.h" %include "../../../include/Profiles.h" +%include "../../../include/QtImageReader.h" %include "../../../include/QtPlayer.h" %include "../../../include/KeyFrame.h" %include "../../../include/RendererBase.h" diff --git a/src/effects/ChromaKey.cpp b/src/effects/ChromaKey.cpp index 5ea1ea8f..979cdddb 100644 --- a/src/effects/ChromaKey.cpp +++ b/src/effects/ChromaKey.cpp @@ -54,13 +54,33 @@ ChromaKey::ChromaKey(Color color, Keyframe fuzz) : color(color), fuzz(fuzz) // modified openshot::Frame object tr1::shared_ptr ChromaKey::GetFrame(tr1::shared_ptr frame, int frame_number) { - // Get the max quantum size (i.e. 255, 65535, etc...) - using namespace Magick; - Magick::Quantum max_range = QuantumRange; + // Determine the current HSL (Hue, Saturation, Lightness) for the Chrome + int threshold = fuzz.GetInt(frame_number); + long mask_R = color.red.GetInt(frame_number); + long mask_G = color.green.GetInt(frame_number); + long mask_B = color.blue.GetInt(frame_number); - // Make this range of colors transparent - frame->GetImage()->colorFuzz(fuzz.GetValue(frame_number) * max_range / 100.0); - frame->GetImage()->transparent(Magick::Color((Magick::Quantum)color.red.GetInt(frame_number), (Magick::Quantum)color.green.GetInt(frame_number), (Magick::Quantum)color.blue.GetInt(frame_number))); + // Get source image's pixels + tr1::shared_ptr image = frame->GetImage(); + unsigned char *pixels = (unsigned char *) image->bits(); + + // Loop through pixels + for (int pixel = 0, byte_index=0; pixel < image->width() * image->height(); pixel++, byte_index+=4) + { + // Get the RGB values from the pixel + unsigned char R = pixels[byte_index]; + unsigned char G = pixels[byte_index + 1]; + unsigned char B = pixels[byte_index + 2]; + unsigned char A = pixels[byte_index + 3]; + + // Get distance between mask color and pixel color + long distance = Color::GetDistance((long)R, (long)G, (long)B, mask_R, mask_G, mask_B); + + // Alpha out the pixel (if color similar) + if (distance <= threshold) + // MATCHED - Make pixel transparent + pixels[byte_index + 3] = 0; + } // return the modified frame return frame; diff --git a/src/effects/Deinterlace.cpp b/src/effects/Deinterlace.cpp index 20c18f53..08f54a96 100644 --- a/src/effects/Deinterlace.cpp +++ b/src/effects/Deinterlace.cpp @@ -51,21 +51,32 @@ Deinterlace::Deinterlace(bool UseOddLines) : isOdd(UseOddLines) // modified openshot::Frame object tr1::shared_ptr Deinterlace::GetFrame(tr1::shared_ptr frame, int frame_number) { - // Calculate the new size (used to shrink and expand the image, to remove interlacing) - Magick::Geometry original_size = frame->GetImage()->size(); - Magick::Geometry frame_size = frame->GetImage()->size(); - frame_size.aspect(false); // allow the image to be re-sized to an invalid aspect ratio - frame_size.height(frame_size.height() / 2.0); // height set to 50% of original height + // Get original size of frame's image + int original_width = frame->GetImage()->width(); + int original_height = frame->GetImage()->height(); + // Get the frame's image + tr1::shared_ptr image = frame->GetImage(); + const unsigned char* pixels = image->bits(); + + // Create a smaller, new image + QImage deinterlaced_image(image->width(), image->height() / 2, QImage::Format_RGBA8888); + const unsigned char* deinterlaced_pixels = deinterlaced_image.bits(); + + // Loop through the scanlines of the image (even or odd) + int start = 0; if (isOdd) - // Roll the image by 1 pixel, to use the ODD horizontal lines (instead of the even ones) - frame->GetImage()->roll(0,1); + start = 1; + for (int row = start; row < image->height(); row += 2) { + memcpy((unsigned char*)deinterlaced_pixels, pixels + (row * image->bytesPerLine()), image->bytesPerLine()); + deinterlaced_pixels += image->bytesPerLine(); + } - // Resample the image to 50% height (to remove every other line) - frame->GetImage()->sample(frame_size); + // Resize deinterlaced image back to original size, and update frame's image + image = tr1::shared_ptr(new QImage(deinterlaced_image.scaled(original_width, original_height, Qt::IgnoreAspectRatio, Qt::FastTransformation))); - // Resize image back to original height - frame->GetImage()->resize(original_size); + // Update image on frame + frame->AddImage(image); // return the modified frame return frame; diff --git a/src/effects/Mask.cpp b/src/effects/Mask.cpp index b232f4b2..52826ac9 100644 --- a/src/effects/Mask.cpp +++ b/src/effects/Mask.cpp @@ -30,14 +30,14 @@ using namespace openshot; /// Blank constructor, useful when using Json to load the effect properties -Mask::Mask() : reader(NULL) { +Mask::Mask() : reader(NULL), replace_image(false) { // Init effect properties init_effect_details(); } // Default constructor Mask::Mask(ReaderBase *mask_reader, Keyframe mask_brightness, Keyframe mask_contrast) throw(InvalidFile, ReaderClosed) : - reader(mask_reader), brightness(mask_brightness), contrast(mask_contrast) + reader(mask_reader), brightness(mask_brightness), contrast(mask_contrast), replace_image(false) { // Init effect properties init_effect_details(); @@ -56,71 +56,95 @@ void Mask::init_effect_details() info.has_video = true; } -// Set brightness and contrast (brightness between 100 and -100) -void Mask::set_brightness_and_contrast(tr1::shared_ptr image, float brightness, float contrast) +// Constrain a color value from 0 to 255 +int Mask::constrain(int color_value) { - // Determine if white or black image is needed - if (brightness >= -100.0 and brightness <= 0.0) - { - // Make mask darker - double black_alpha = abs(brightness) / 100.0; - tr1::shared_ptr black = tr1::shared_ptr(new Magick::Image(mask->size(), Magick::Color("Black"))); - black->matte(true); - black->quantumOperator(Magick::OpacityChannel, Magick::MultiplyEvaluateOperator, black_alpha); - image->composite(*black.get(), 0, 0, Magick::OverCompositeOp); + // Constrain new color from 0 to 255 + if (color_value < 0) + color_value = 0; + else if (color_value > 255) + color_value = 255; - } - else if (brightness > 0.0 and brightness <= 100.0) + return color_value; +} + +// Get grayscale mask image +tr1::shared_ptr Mask::get_grayscale_mask(tr1::shared_ptr mask_frame_image, int width, int height, float brightness, float contrast) +{ + // Get pixels for mask image + unsigned char *pixels = (unsigned char *) mask_frame_image->bits(); + + // Convert the mask image to grayscale + // Loop through pixels + for (int pixel = 0, byte_index=0; pixel < mask_frame_image->width() * mask_frame_image->height(); pixel++, byte_index+=4) { - // Make mask whiter - double white_alpha = brightness / 100.0; - tr1::shared_ptr white = tr1::shared_ptr(new Magick::Image(mask->size(), Magick::Color("White"))); - white->matte(true); - white->quantumOperator(Magick::OpacityChannel, Magick::MultiplyEvaluateOperator, white_alpha); - image->composite(*white.get(), 0, 0, Magick::OverCompositeOp); + // Get the RGB values from the pixel + int R = pixels[byte_index]; + int G = pixels[byte_index + 1]; + int B = pixels[byte_index + 2]; + + // Get the average luminosity + int gray_value = qGray(R, G, B); + + // Adjust the contrast + int factor = (259 * (contrast + 255)) / (255 * (259 - contrast)); + gray_value = constrain((factor * (gray_value - 128)) + 128); + + // Adjust the brightness + gray_value += (255 * brightness); + + // Constrain the value from 0 to 255 + gray_value = constrain(gray_value); + + // Set all pixels to gray value + pixels[byte_index] = gray_value; + pixels[byte_index + 1] = gray_value; + pixels[byte_index + 2] = gray_value; + pixels[byte_index + 3] = 255; } - // Set Contrast - image->sigmoidalContrast(true, contrast); - + // Resize mask image to match frame size + return tr1::shared_ptr(new QImage(mask_frame_image->scaled(width, height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation))); } // This method is required for all derived classes of EffectBase, and returns a // modified openshot::Frame object tr1::shared_ptr Mask::GetFrame(tr1::shared_ptr frame, int frame_number) { - // Check if reader is open + // Get the mask image (from the mask reader) + tr1::shared_ptr frame_image = frame->GetImage(); + + // Check if mask reader is open if (!reader->IsOpen()) + #pragma omp critical (open_mask_reader) reader->Open(); // Get the mask image (from the mask reader) - mask = reader->GetFrame(frame_number)->GetImage(); - mask->type(Magick::GrayscaleType); // convert to grayscale - mask->matte(false); // Remove transparency from the image. This is required for the composite operator to copy the brightness of each pixel into the alpha channel + tr1::shared_ptr mask = tr1::shared_ptr(new QImage(*reader->GetFrame(frame_number)->GetImage())); - // Resize mask to match this frame size (if different) - if (frame->GetImage()->size() != mask->size()) + // Convert mask to grayscale and resize to frame size + mask = get_grayscale_mask(mask, frame_image->width(), frame_image->height(), brightness.GetValue(frame_number), contrast.GetValue(frame_number)); + + + // Get pixels for frame image + unsigned char *pixels = (unsigned char *) frame_image->bits(); + unsigned char *mask_pixels = (unsigned char *) mask->bits(); + + // Convert the mask image to grayscale + // Loop through pixels + for (int pixel = 0, byte_index=0; pixel < frame_image->width() * frame_image->height(); pixel++, byte_index+=4) { - Magick::Geometry new_size(frame->GetImage()->size().width(), frame->GetImage()->size().height()); - new_size.aspect(true); - mask->resize(new_size); + // Get the RGB values from the pixel + int Frame_Alpha = pixels[byte_index + 3]; + int Mask_Value = constrain(Frame_Alpha - (int)mask_pixels[byte_index]); // Red pixel (all colors should have the same value here) + + // Set all pixels to gray value + pixels[byte_index + 3] = Mask_Value; } - cout << "brightness.GetValue(" << frame_number << "): " << brightness.GetValue(frame_number) << endl; - cout << "contrast.GetValue(" << frame_number << "): " << contrast.GetValue(frame_number) << endl; - - // Set the brightness of the mask (from a user-defined curve) - set_brightness_and_contrast(mask, brightness.GetValue(frame_number), contrast.GetValue(frame_number)); - - // Get copy of our source frame's image - tr1::shared_ptr copy_source = tr1::shared_ptr(new Magick::Image(*frame->GetImage().get())); - copy_source->channel(Magick::MatteChannel); // extract alpha channel as grayscale image - copy_source->matte(false); // remove alpha channel - copy_source->negate(true); // negate source alpha channel before multiplying mask - copy_source->composite(*mask.get(), 0, 0, Magick::MultiplyCompositeOp); // multiply mask grayscale (i.e. combine the 2 grayscale images) - - // Copy the combined alpha channel back to the frame - frame->GetImage()->composite(*copy_source.get(), 0, 0, Magick::CopyOpacityCompositeOp); + // Replace the frame's image with the current mask (good for debugging) + if (replace_image) + frame->AddImage(mask); // not typically called when using a mask // return the modified frame return frame; @@ -142,6 +166,7 @@ Json::Value Mask::JsonValue() { root["brightness"] = brightness.JsonValue(); root["contrast"] = contrast.JsonValue(); root["reader"] = reader->JsonValue(); + root["replace_image"] = replace_image; // return JsonValue return root; @@ -177,6 +202,8 @@ void Mask::SetJsonValue(Json::Value root) { EffectBase::SetJsonValue(root); // Set data from Json (if key is found) + if (!root["replace_image"].isNull()) + replace_image = root["replace_image"].asBool(); if (!root["brightness"].isNull()) brightness.SetJsonValue(root["brightness"]); if (!root["contrast"].isNull()) @@ -210,6 +237,12 @@ void Mask::SetJsonValue(Json::Value root) { reader = new ImageReader(root["reader"]["path"].asString()); reader->SetJsonValue(root["reader"]); + } else if (type == "QtImageReader") { + + // Create new reader + reader = new QtImageReader(root["reader"]["path"].asString()); + reader->SetJsonValue(root["reader"]); + } else if (type == "ChunkReader") { // Create new reader @@ -237,6 +270,7 @@ string Mask::PropertiesJSON(int requested_frame) { root["start"] = add_property_json("Start", Start(), "float", "", false, 0, 0, 1000 * 60 * 30, CONSTANT, -1, false); root["end"] = add_property_json("End", End(), "float", "", false, 0, 0, 1000 * 60 * 30, CONSTANT, -1, false); root["duration"] = add_property_json("Duration", Duration(), "float", "", false, 0, 0, 1000 * 60 * 30, CONSTANT, -1, true); + root["replace_image"] = add_property_json("Replace Image", replace_image, "bool", "", false, 0, 0, 1, CONSTANT, -1, false); // Keyframes root["brightness"] = add_property_json("Brightness", brightness.GetValue(requested_frame), "float", "", brightness.Contains(requested_point), brightness.GetCount(), -10000, 10000, brightness.GetClosestPoint(requested_point).interpolation, brightness.GetClosestPoint(requested_point).co.X, false); diff --git a/src/effects/Negate.cpp b/src/effects/Negate.cpp index f8d3c732..61690fb3 100644 --- a/src/effects/Negate.cpp +++ b/src/effects/Negate.cpp @@ -46,8 +46,8 @@ Negate::Negate() // modified openshot::Frame object tr1::shared_ptr Negate::GetFrame(tr1::shared_ptr frame, int frame_number) { - // Make this range of colors transparent - frame->GetImage()->negate(); + // Make a negative of the images pixels + frame->GetImage()->invertPixels(); // return the modified frame return frame; diff --git a/src/examples/Example.cpp b/src/examples/Example.cpp index 6b7f9820..9c42dae5 100644 --- a/src/examples/Example.cpp +++ b/src/examples/Example.cpp @@ -40,61 +40,70 @@ using namespace openshot; using namespace tr1; + int main(int argc, char* argv[]) { -// Timeline t10(1280, 720, Fraction(24,1), 44100, 2); -// t10.debug = false; -// Clip c10("/home/jonathan/Videos/sintel_trailer-720p.mp4"); -// c10.rotation.AddPoint(1, 0.0); -// c10.rotation.AddPoint(1000, 360.0); -// c10.Open(); -// c10.Position(1.05); -// -// Negate n; -// c10.AddEffect(&n); -// -// // add clip to timeline -// t10.AddClip(&c10); -// -// for (int z = 0; z<1000; z++) { -// t10.GetFrame(z); -// cout << z << endl; -// } -// return 0; -// -// -// // Test getting lots of JSON -// cout << "starting..." << endl; -// -// Json::Value root; -// root = Json::Value(Json::arrayValue); -// for (int outer = 0; outer < 1000; outer++) { -// openshot::Keyframe k; -// //cout << "creating " << outer << endl; -// for (int z = 0; z<10; z++) { -// openshot::Point p(z * 10, 1 * z * outer, BEZIER); -// k.AddPoint(p); -// } -// root.append(k.JsonValue()); -// } -// //cout << root.toStyledString() << endl; -// -// // Test loading lots of JSON -// for (int z = 0; z<1000; z++) { -// //cout << "loading " << z << endl; -// Json::Value keyframe_json = root[z]; -// openshot::Keyframe k; -// k.SetJsonValue(keyframe_json); -// } -// -// cout << "Successfully ended" << endl; -// return 0; + FFmpegReader r110("/home/jonathan/apps/libopenshot/src/examples/piano-mono.wav"); + r110.Open(); + + FrameMapper m110(&r110, Fraction(24,1), PULLDOWN_NONE, 22050, 2, LAYOUT_STEREO); + m110.Open(); + + Clip c110(&m110); + c110.Open(); + + Timeline t10(1280, 720, Fraction(24,1), 22050, 2, LAYOUT_STEREO); + t10.debug = false; + //Clip c20("/home/jonathan/Pictures/DSC00660.JPG"); + //c20.End(1000.0); + //c20.Layer(-1); + //c20.scale = SCALE_STRETCH; + //c20.rotation.AddPoint(1, 0.0); + //c20.rotation.AddPoint(1000, 360.0); + Clip c10("/home/jonathan/apps/libopenshot/src/examples/piano-mono.wav"); + c10.volume.AddPoint(1, 0.0); + c10.volume.AddPoint(100, 1.0); +// c10.time.AddPoint(1, 1); +// c10.time.AddPoint(300, 900); +// c10.time.AddPoint(600, 300); +// c10.time.PrintValues(); + + //Color background((unsigned char)0, (unsigned char)255, (unsigned char)0, (unsigned char)0); + //background.red.AddPoint(1000, 255); + //background.green.AddPoint(1000, 0); + //t10.color = background; + + Color black; + black.red = Keyframe(0); + black.green = Keyframe(0); + black.blue = Keyframe(0); + + Keyframe brightness; + brightness.AddPoint(300, -1.0, BEZIER); + brightness.AddPoint(370, 0.5, BEZIER); + brightness.AddPoint(425, -0.5, BEZIER); + brightness.AddPoint(600, 1.0, BEZIER); + + //Negate e; + //Deinterlace e(false); + //ChromaKey e(black, Keyframe(30)); + //QtImageReader mask_reader("/home/jonathan/apps/openshot-qt/src/transitions/extra/big_cross_right_barr.png"); + //QtImageReader mask_reader1("/home/jonathan/apps/openshot-qt/src/transitions/extra/big_barr.png"); + //Mask e(&mask_reader, brightness, Keyframe(3.0)); + //c10.AddEffect(&e); + //Mask e1(&mask_reader1, brightness, Keyframe(3.0)); + //c10.AddEffect(&e1); + + // add clip to timeline + t10.AddClip(&c10); + //t10.AddClip(&c20); + t10.Open(); // Reader - FFmpegReader r9("/home/jonathan/apps/libopenshot/src/examples/piano-mono.wav"); - r9.Open(); - r9.debug = true; +// FFmpegReader r9("/home/jonathan/Videos/sintel_trailer-720p.mp4"); +// r9.Open(); +// r9.debug = true; // Mapper //FrameMapper map(&r9, Fraction(24,1), PULLDOWN_NONE, 48000, 2, LAYOUT_STEREO); @@ -103,14 +112,15 @@ int main(int argc, char* argv[]) //map.Open(); /* WRITER ---------------- */ - FFmpegWriter w9("/home/jonathan/output1.mp3"); + FFmpegWriter w9("/home/jonathan/output-pops.mp3"); w9.debug = false; //ImageWriter w9("/home/jonathan/output.gif"); // Set options - w9.SetAudioOptions(true, "libmp3lame", r9.info.sample_rate, r9.info.channels, r9.info.channel_layout, 120000); - //w9.SetAudioOptions(true, "libmp3lame", 44100, r9.info.channels, r9.info.channel_layout, 120000); - //w9.SetVideoOptions(true, "libvpx", map.info.fps, map.info.width, map.info.height, map.info.pixel_ratio, false, false, 1500000); + //w9.SetAudioOptions(true, "libvorbis", t10.info.sample_rate, t10.info.channels, t10.info.channel_layout, 120000); + w9.SetAudioOptions(true, "libmp3lame", 22050, t10.info.channels, t10.info.channel_layout, 120000); + //w9.SetVideoOptions(true, "libvpx", t10.info.fps, t10.info.width, t10.info.height, t10.info.pixel_ratio, false, false, 1500000); + //w9.SetVideoOptions(true, "libx264", t10.info.fps, t10.info.width, t10.info.height, t10.info.pixel_ratio, false, false, 1500000); //w9.SetVideoOptions(true, "rawvideo", r9.info.fps, 400, 2, r9.info.pixel_ratio, false, false, 20000000); //w9.SetVideoOptions("GIF", r9.info.fps, r9.info.width, r9.info.height, 70, 1, true); @@ -134,23 +144,29 @@ int main(int argc, char* argv[]) // 147000 frames, 28100 frames //for (int frame = 1; frame <= (r9.info.video_length - 1); frame++) //for (int z = 0; z < 2; z++) - for (int frame = 1; frame <= 300; frame++) + for (int frame = 1; frame <= 120; frame++) //int frame = 1; //while (true) { //int frame_number = (rand() % 750) + 1; - int frame_number = ( frame); + int frame_number = frame; cout << "get " << frame << " (frame: " << frame_number << ") " << endl; - tr1::shared_ptr f = r9.GetFrame(frame_number); + tr1::shared_ptr f = t10.GetFrame(frame_number); + cout << "mapped frame channel layouts: " << f->ChannelsLayout() << endl; cout << "display it (" << f->number << ", " << f << ")" << endl; //r9.GetFrame(frame_number)->DisplayWaveform(); - if (frame >= 7) - f->DisplayWaveform(); + //if (frame >= 495) + // f->DisplayWaveform(); + //f->Display(); + //f->Save("/home/jonathan/test.png", 1.0); //f->AddColor(r9.info.width, r9.info.height, "blue"); - //w9.WriteFrame(f); + w9.WriteFrame(f); //frame++; + + //if (frame >= 100) + // break; } cout << "done looping" << endl; @@ -162,8 +178,8 @@ int main(int argc, char* argv[]) w9.Close(); // Close timeline - r9.Close(); - //map.Close(); + //r9.Close(); + t10.Close(); /* ---------------- */ cout << "happy ending" << endl; @@ -173,194 +189,10 @@ int main(int argc, char* argv[]) - - FFmpegReader sinelReader("/home/jonathan/Videos/sintel_trailer-720p.mp4"); - //sinelReader.debug = true; - sinelReader.Open(); - - // init random #s - //srand(time(NULL)); - - // Seek test - int x = 0; - while (true) { - x++; - int frame_number = (rand() % 625) + 1; - cout << "X: " << x << ", Frame: " << frame_number << endl; - tr1::shared_ptr f = sinelReader.GetFrame(frame_number); - //f->AddOverlayNumber(frame_number); - //f->Display(); - f->DisplayWaveform(); - - //f->DisplayWaveform(); - // sinelReader.debug = true; - - //if (x == 7655) - // break; - } - - //cout << sinelReader.OutputDebugJSON() << endl; - sinelReader.Close(); - return 0; - - -// Timeline t1000(1280, 720, Fraction(24,1), 44100, 2); -// t1000.SetJson("{\"width\": 1280, \"clips\": [{\"position\": 0, \"layer\": 4, \"gravity\": 4, \"reader\": {\"width\": 640, \"file_size\": \"10998\", \"video_stream_index\": -1, \"duration\": 86400, \"top_field_first\": true, \"pixel_format\": -1, \"type\": \"ImageReader\", \"pixel_ratio\": {\"num\": 1, \"den\": 1}, \"video_timebase\": {\"num\": 1, \"den\": 30}, \"audio_bit_rate\": 0, \"has_audio\": false, \"sample_rate\": 0, \"audio_stream_index\": -1, \"video_bit_rate\": 0, \"fps\": {\"num\": 30, \"den\": 1}, \"channels\": 0, \"vcodec\": \"Joint Photographic Experts Group JFIF format\", \"video_length\": \"2592000\", \"interlaced_frame\": false, \"path\": \"/home/jonathan/Pictures/100_0685 (copy).JPG\", \"height\": 360, \"audio_timebase\": {\"num\": 1, \"den\": 1}, \"display_ratio\": {\"num\": 16, \"den\": 9}, \"has_video\": true, \"acodec\": \"\"}, \"title\": \"40319877_640.jpg\", \"duration\": 86400, \"scale\": 1, \"rotation\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"crop_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"volume\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"time\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"waveform\": false, \"scale_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"wave_color\": {\"blue\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 65280}, \"interpolation\": 2}]}, \"green\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 28672}, \"interpolation\": 2}]}, \"red\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}}, \"crop_width\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"crop_height\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"shear_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"id\": \"F8GFFDCHSB\", \"location_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"location_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"shear_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"end\": 23, \"perspective_c2_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c2_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"alpha\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"perspective_c1_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"image\": \"/home/jonathan/.openshot_qt/thumbnail/LEUJBK9QMI.png\", \"file_id\": \"LEUJBK9QMI\", \"crop_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"perspective_c1_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"start\": 0, \"perspective_c4_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c4_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"scale_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"anchor\": 0, \"perspective_c3_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c3_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"$$hashKey\": \"00Y\"}, {\"position\": 8.64, \"image\": \"/home/jonathan/.openshot_qt/thumbnail/LEUJBK9QMI.png\", \"gravity\": 4, \"reader\": {\"width\": 640, \"pixel_ratio\": {\"num\": 1, \"den\": 1}, \"video_stream_index\": -1, \"duration\": 86400, \"video_length\": \"2592000\", \"pixel_format\": -1, \"audio_timebase\": {\"num\": 1, \"den\": 1}, \"file_size\": \"10998\", \"video_timebase\": {\"num\": 1, \"den\": 30}, \"audio_bit_rate\": 0, \"has_audio\": false, \"sample_rate\": 0, \"audio_stream_index\": -1, \"video_bit_rate\": 0, \"fps\": {\"num\": 30, \"den\": 1}, \"channels\": 0, \"vcodec\": \"Joint Photographic Experts Group JFIF format\", \"top_field_first\": true, \"interlaced_frame\": false, \"path\": \"/home/jonathan/Pictures/100_0685 (copy).JPG\", \"height\": 360, \"display_ratio\": {\"num\": 16, \"den\": 9}, \"has_video\": true, \"acodec\": \"\", \"type\": \"ImageReader\"}, \"title\": \"40319877_640.jpg\", \"duration\": 86400, \"scale\": 1, \"rotation\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"crop_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"volume\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"time\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"waveform\": false, \"scale_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"wave_color\": {\"blue\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 65280}, \"interpolation\": 2}]}, \"green\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 28672}, \"interpolation\": 2}]}, \"red\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}}, \"crop_width\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"crop_height\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"end\": 24, \"id\": \"CIKGBFTVVY\", \"location_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"location_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"shear_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"shear_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"perspective_c2_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c2_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"alpha\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"perspective_c1_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"layer\": 3, \"file_id\": \"LEUJBK9QMI\", \"crop_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"perspective_c1_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"start\": 0, \"perspective_c4_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c4_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"scale_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"anchor\": 0, \"perspective_c3_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c3_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"$$hashKey\": \"011\"}, {\"position\": 40.16, \"image\": \"/home/jonathan/.openshot_qt/thumbnail/LEUJBK9QMI.png\", \"gravity\": 4, \"reader\": {\"width\": 640, \"pixel_ratio\": {\"num\": 1, \"den\": 1}, \"video_stream_index\": -1, \"duration\": 86400, \"video_length\": \"2592000\", \"pixel_format\": -1, \"audio_timebase\": {\"num\": 1, \"den\": 1}, \"file_size\": \"10998\", \"video_timebase\": {\"num\": 1, \"den\": 30}, \"audio_bit_rate\": 0, \"has_audio\": false, \"sample_rate\": 0, \"audio_stream_index\": -1, \"video_bit_rate\": 0, \"fps\": {\"num\": 30, \"den\": 1}, \"channels\": 0, \"vcodec\": \"Joint Photographic Experts Group JFIF format\", \"top_field_first\": true, \"interlaced_frame\": false, \"path\": \"/home/jonathan/Pictures/100_0685 (copy).JPG\", \"height\": 360, \"display_ratio\": {\"num\": 16, \"den\": 9}, \"has_video\": true, \"acodec\": \"\", \"type\": \"ImageReader\"}, \"title\": \"40319877_640.jpg\", \"duration\": 86400, \"scale\": 1, \"rotation\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"crop_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"volume\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"time\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"waveform\": false, \"scale_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"wave_color\": {\"blue\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 65280}, \"interpolation\": 2}]}, \"green\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 28672}, \"interpolation\": 2}]}, \"red\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}}, \"crop_width\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"crop_height\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"end\": 47, \"id\": \"HFCX8JEV29\", \"location_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"location_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"shear_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"shear_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"perspective_c2_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c2_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"alpha\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"perspective_c1_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"layer\": 4, \"file_id\": \"LEUJBK9QMI\", \"crop_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"perspective_c1_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"start\": 0, \"perspective_c4_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c4_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"scale_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"anchor\": 0, \"perspective_c3_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c3_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"$$hashKey\": \"01B\"}], \"fps\": 30, \"progress\": [[0, 30, \"rendering\"], [40, 50, \"complete\"], [100, 150, \"complete\"]], \"duration\": 600, \"scale\": 16, \"tick_pixels\": 100, \"settings\": {}, \"files\": [{\"width\": 640, \"path\": \"/home/jonathan/Pictures/100_0685 (copy).JPG\", \"file_size\": \"10998\", \"video_stream_index\": -1, \"duration\": 86400.0, \"top_field_first\": true, \"pixel_format\": -1, \"type\": \"ImageReader\", \"pixel_ratio\": {\"num\": 1, \"den\": 1}, \"video_timebase\": {\"num\": 1, \"den\": 30}, \"audio_bit_rate\": 0, \"has_audio\": false, \"sample_rate\": 0, \"audio_stream_index\": -1, \"video_bit_rate\": 0, \"fps\": {\"num\": 30, \"den\": 1}, \"channels\": 0, \"vcodec\": \"Joint Photographic Experts Group JFIF format\", \"video_length\": \"2592000\", \"interlaced_frame\": false, \"media_type\": \"image\", \"id\": \"LEUJBK9QMI\", \"acodec\": \"\", \"audio_timebase\": {\"num\": 1, \"den\": 1}, \"display_ratio\": {\"num\": 16, \"den\": 9}, \"has_video\": true, \"height\": 360}], \"playhead_position\": 0, \"markers\": [{\"location\": 16, \"icon\": \"yellow.png\"}, {\"location\": 120, \"icon\": \"green.png\"}, {\"location\": 300, \"icon\": \"red.png\"}, {\"location\": 10, \"icon\": \"purple.png\"}], \"height\": 720, \"layers\": [{\"y\": 0, \"number\": 4}, {\"y\": 0, \"number\": 3}, {\"y\": 0, \"number\": 2}, {\"y\": 0, \"number\": 1}, {\"y\": 0, \"number\": 0}]}"); -// t1000.GetFrame(0)->Display(); -// //t1000.GetFrame(0)->Thumbnail("/home/jonathan/output.png", 320, 180, "/home/jonathan/Downloads/mask.png", "/home/jonathan/Downloads/overlay.png", "", false); -// t1000.GetFrame(0)->Thumbnail("/home/jonathan/output.png", 134, 88, "/home/jonathan/Downloads/mask.png", "/home/jonathan/Downloads/overlay.png", "#000", false); - -// t1000.ApplyJsonDiff("[{\"key\": [\"clips\", {\"id\": \"BMCWP7ACMR\"}], \"value\": {\"end\": 8, \"perspective_c1_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c1_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"location_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"location_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"reader\": {\"acodec\": \"\", \"channels\": 0, \"video_timebase\": {\"den\": 30, \"num\": 1}, \"type\": \"ImageReader\", \"video_length\": \"2592000\", \"has_video\": true, \"video_bit_rate\": 0, \"display_ratio\": {\"den\": 79, \"num\": 100}, \"vcodec\": \"Portable Network Graphics\", \"audio_stream_index\": -1, \"top_field_first\": true, \"fps\": {\"den\": 1, \"num\": 30}, \"has_audio\": false, \"interlaced_frame\": false, \"sample_rate\": 0, \"file_size\": \"412980\", \"pixel_ratio\": {\"den\": 1, \"num\": 1}, \"video_stream_index\": -1, \"audio_timebase\": {\"den\": 1, \"num\": 1}, \"pixel_format\": -1, \"duration\": 86400, \"height\": 1975, \"path\": \"/home/jonathan/Downloads/openshot_studios_banner1.png\", \"audio_bit_rate\": 0, \"width\": 2500}, \"crop_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"crop_width\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"gravity\": 4, \"id\": \"BMCWP7ACMR\", \"scale_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"shear_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"shear_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"position\": 0, \"layer\": 3, \"$$hashKey\": \"00J\", \"alpha\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"rotation\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"waveform\": false, \"time\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"crop_height\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c4_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c4_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"wave_color\": {\"red\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"green\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 28672}, \"interpolation\": 2}]}, \"blue\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 65280}, \"interpolation\": 2}]}}, \"crop_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"start\": 0, \"perspective_c3_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"scale\": 1, \"perspective_c2_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c2_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"anchor\": 0, \"image\": \"/home/jonathan/.openshot_qt/thumbnail/JJNH7JOX9M.png\", \"file_id\": \"JJNH7JOX9M\", \"scale_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"duration\": 86400, \"perspective_c3_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"title\": \"openshot_studios_banner1.png\", \"volume\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}}, \"partial\": false, \"type\": \"update\"}]"); -// t1000.GetFrame(0)->Display(); -// t1000.ApplyJsonDiff("[{\"key\": [\"clips\", {\"id\": \"BMCWP7ACMR\"}], \"value\": {\"end\": 50, \"perspective_c1_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c1_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"location_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"location_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"reader\": {\"acodec\": \"\", \"channels\": 0, \"video_timebase\": {\"den\": 30, \"num\": 1}, \"type\": \"ImageReader\", \"video_length\": \"2592000\", \"has_video\": true, \"video_bit_rate\": 0, \"display_ratio\": {\"den\": 79, \"num\": 100}, \"vcodec\": \"Portable Network Graphics\", \"audio_stream_index\": -1, \"top_field_first\": true, \"fps\": {\"den\": 1, \"num\": 30}, \"has_audio\": false, \"interlaced_frame\": false, \"sample_rate\": 0, \"file_size\": \"412980\", \"pixel_ratio\": {\"den\": 1, \"num\": 1}, \"video_stream_index\": -1, \"audio_timebase\": {\"den\": 1, \"num\": 1}, \"pixel_format\": -1, \"duration\": 86400, \"height\": 1975, \"path\": \"/home/jonathan/Downloads/openshot_studios_banner1.png\", \"audio_bit_rate\": 0, \"width\": 2500}, \"crop_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"crop_width\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"gravity\": 4, \"id\": \"BMCWP7ACMR\", \"scale_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"shear_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"shear_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"position\": 0, \"layer\": 3, \"$$hashKey\": \"00J\", \"alpha\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"rotation\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"waveform\": false, \"time\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"crop_height\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c4_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c4_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"wave_color\": {\"red\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"green\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 28672}, \"interpolation\": 2}]}, \"blue\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 65280}, \"interpolation\": 2}]}}, \"crop_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"start\": 0, \"perspective_c3_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"scale\": 1, \"perspective_c2_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c2_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"anchor\": 0, \"image\": \"/home/jonathan/.openshot_qt/thumbnail/JJNH7JOX9M.png\", \"file_id\": \"JJNH7JOX9M\", \"scale_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"duration\": 86400, \"perspective_c3_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"title\": \"openshot_studios_banner1.png\", \"volume\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}}, \"partial\": false, \"type\": \"update\"}]"); -// t1000.GetFrame(0)->Display(); -// t1000.ApplyJsonDiff("[{\"key\": [\"clips\"], \"value\": {\"end\": 8.0, \"perspective_c1_y\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": -1.0}, \"interpolation\": 2}]}, \"perspective_c1_x\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": -1.0}, \"interpolation\": 2}]}, \"location_y\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 0.0}, \"interpolation\": 2}]}, \"location_x\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 0.0}, \"interpolation\": 2}]}, \"reader\": {\"acodec\": \"\", \"channels\": 0, \"video_length\": \"2592000\", \"video_timebase\": {\"den\": 30, \"num\": 1}, \"type\": \"ImageReader\", \"fps\": {\"den\": 1, \"num\": 30}, \"interlaced_frame\": false, \"video_bit_rate\": 0, \"display_ratio\": {\"den\": 1, \"num\": 1}, \"vcodec\": \"Portable Network Graphics\", \"audio_stream_index\": -1, \"top_field_first\": true, \"has_audio\": false, \"has_video\": true, \"sample_rate\": 0, \"file_size\": \"544426\", \"pixel_ratio\": {\"den\": 1, \"num\": 1}, \"video_stream_index\": -1, \"audio_timebase\": {\"den\": 1, \"num\": 1}, \"pixel_format\": -1, \"duration\": 86400.0, \"height\": 1347, \"path\": \"/home/jonathan/Downloads/OSlogo.png\", \"audio_bit_rate\": 0, \"width\": 1347}, \"crop_width\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": -1.0}, \"interpolation\": 2}]}, \"gravity\": 4, \"id\": \"0EW6OJW1N9\", \"title\": \"OSlogo.png\", \"file_id\": \"1YD3C3IZHX\", \"shear_x\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 0.0}, \"interpolation\": 2}]}, \"shear_y\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 0.0}, \"interpolation\": 2}]}, \"position\": 0.0, \"layer\": 0, \"wave_color\": {\"red\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 0.0}, \"interpolation\": 2}]}, \"green\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 28672.0}, \"interpolation\": 2}]}, \"blue\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 65280.0}, \"interpolation\": 2}]}}, \"alpha\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 0.0}, \"interpolation\": 2}]}, \"rotation\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 0.0}, \"interpolation\": 2}]}, \"waveform\": false, \"time\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 0.0}, \"interpolation\": 2}]}, \"perspective_c4_x\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": -1.0}, \"interpolation\": 2}]}, \"crop_height\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": -1.0}, \"interpolation\": 2}]}, \"perspective_c4_y\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": -1.0}, \"interpolation\": 2}]}, \"crop_x\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 0.0}, \"interpolation\": 2}]}, \"crop_y\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 0.0}, \"interpolation\": 2}]}, \"start\": 0.0, \"scale\": 1, \"perspective_c2_x\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": -1.0}, \"interpolation\": 2}]}, \"perspective_c2_y\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": -1.0}, \"interpolation\": 2}]}, \"anchor\": 0, \"image\": \"/home/jonathan/.openshot_qt/thumbnail/1YD3C3IZHX.png\", \"scale_y\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 1.0}, \"interpolation\": 2}]}, \"scale_x\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 1.0}, \"interpolation\": 2}]}, \"duration\": 86400.0, \"perspective_c3_y\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": -1.0}, \"interpolation\": 2}]}, \"perspective_c3_x\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": -1.0}, \"interpolation\": 2}]}, \"volume\": {\"Points\": [{\"co\": {\"X\": 0.0, \"Y\": 1.0}, \"interpolation\": 2}]}}, \"partial\": false, \"type\": \"insert\"}]"); -// t1000.GetFrame(0)->Display(); -// t1000.ApplyJsonDiff("[{\"key\": [\"clips\", {\"id\": \"0EW6OJW1N9\"}], \"value\": {\"end\": 8, \"perspective_c1_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c1_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"location_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"location_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"reader\": {\"acodec\": \"\", \"channels\": 0, \"video_timebase\": {\"den\": 30, \"num\": 1}, \"type\": \"ImageReader\", \"video_length\": \"2592000\", \"has_video\": true, \"video_bit_rate\": 0, \"display_ratio\": {\"den\": 1, \"num\": 1}, \"vcodec\": \"Portable Network Graphics\", \"audio_stream_index\": -1, \"top_field_first\": true, \"fps\": {\"den\": 1, \"num\": 30}, \"has_audio\": false, \"interlaced_frame\": false, \"sample_rate\": 0, \"file_size\": \"544426\", \"pixel_ratio\": {\"den\": 1, \"num\": 1}, \"video_stream_index\": -1, \"audio_timebase\": {\"den\": 1, \"num\": 1}, \"pixel_format\": -1, \"duration\": 86400, \"height\": 1347, \"path\": \"/home/jonathan/Downloads/OSlogo.png\", \"audio_bit_rate\": 0, \"width\": 1347}, \"crop_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"crop_width\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"gravity\": 4, \"id\": \"0EW6OJW1N9\", \"scale_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"shear_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"shear_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"position\": 0, \"layer\": 4, \"$$hashKey\": \"00Q\", \"alpha\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"rotation\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"waveform\": false, \"time\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"crop_height\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c4_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c4_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"wave_color\": {\"red\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"green\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 28672}, \"interpolation\": 2}]}, \"blue\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 65280}, \"interpolation\": 2}]}}, \"crop_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"start\": 0, \"perspective_c3_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"scale\": 1, \"perspective_c2_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c2_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"anchor\": 0, \"image\": \"/home/jonathan/.openshot_qt/thumbnail/1YD3C3IZHX.png\", \"file_id\": \"1YD3C3IZHX\", \"scale_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"duration\": 86400, \"perspective_c3_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"title\": \"OSlogo.png\", \"volume\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}}, \"partial\": false, \"type\": \"update\"}]"); -// t1000.GetFrame(0)->Display(); -// t1000.ApplyJsonDiff("[{\"key\": [\"clips\", {\"id\": \"0EW6OJW1N9\"}], \"value\": {\"end\": 35, \"perspective_c1_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c1_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"location_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"location_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"reader\": {\"acodec\": \"\", \"channels\": 0, \"video_timebase\": {\"den\": 30, \"num\": 1}, \"type\": \"ImageReader\", \"video_length\": \"2592000\", \"has_video\": true, \"video_bit_rate\": 0, \"display_ratio\": {\"den\": 1, \"num\": 1}, \"vcodec\": \"Portable Network Graphics\", \"audio_stream_index\": -1, \"top_field_first\": true, \"fps\": {\"den\": 1, \"num\": 30}, \"has_audio\": false, \"interlaced_frame\": false, \"sample_rate\": 0, \"file_size\": \"544426\", \"pixel_ratio\": {\"den\": 1, \"num\": 1}, \"video_stream_index\": -1, \"audio_timebase\": {\"den\": 1, \"num\": 1}, \"pixel_format\": -1, \"duration\": 86400, \"height\": 1347, \"path\": \"/home/jonathan/Downloads/OSlogo.png\", \"audio_bit_rate\": 0, \"width\": 1347}, \"crop_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"crop_width\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"gravity\": 4, \"id\": \"0EW6OJW1N9\", \"scale_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"shear_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"shear_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"position\": 0, \"layer\": 4, \"$$hashKey\": \"00Q\", \"alpha\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"rotation\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"waveform\": false, \"time\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"crop_height\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c4_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c4_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"wave_color\": {\"red\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"green\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 28672}, \"interpolation\": 2}]}, \"blue\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 65280}, \"interpolation\": 2}]}}, \"crop_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 0}, \"interpolation\": 2}]}, \"start\": 0, \"perspective_c3_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"scale\": 1, \"perspective_c2_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"perspective_c2_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"anchor\": 0, \"image\": \"/home/jonathan/.openshot_qt/thumbnail/1YD3C3IZHX.png\", \"file_id\": \"1YD3C3IZHX\", \"scale_x\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}, \"duration\": 86400, \"perspective_c3_y\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": -1}, \"interpolation\": 2}]}, \"title\": \"OSlogo.png\", \"volume\": {\"Points\": [{\"co\": {\"X\": 0, \"Y\": 1}, \"interpolation\": 2}]}}, \"partial\": false, \"type\": \"update\"}]"); -// t1000.GetFrame(0)->Display(); - -// return 0; - - /* - - FFmpegReader sinelReader("/home/jonathan/Videos/sintel_trailer-720p.mp4"); - sinelReader.Open(); - - AudioReaderSource readerSource(&sinelReader, 1, 10000); - for (int z = 0; z < 2000; z++) { - // Get audio chunks - int chunk_size = 750; - juce::AudioSampleBuffer *master_buffer = new juce::AudioSampleBuffer(sinelReader.info.channels, chunk_size); - master_buffer->clear(); - const AudioSourceChannelInfo info = {master_buffer, 0, chunk_size}; - - // Get next audio block - readerSource.getNextAudioBlock(info); - - // Delete buffer - master_buffer->clear(); - delete master_buffer; - } - - return 0; - - Profile p("/home/jonathan/Apps/openshot/openshot/profiles/atsc_1080p_25"); - return 0; - - Timeline t77(640, 480, Fraction(24,1), 44100, 2); - t77.ApplyJsonDiff("[{\"type\":\"insert\",\"key\":[\"effects\",\"effect\"],\"value\":{\"end\":0,\"id\":\"e004\",\"layer\":0,\"order\":0,\"position\":0,\"start\":0,\"type\":\"Negate\"}}]"); - cout << t77.Json() << endl; - t77.ApplyJsonDiff("[{\"type\":\"update\",\"key\":[\"effects\",\"effect\",{\"id\":\"e004\"}],\"value\":{\"order\":10.5,\"position\":11.6,\"start\":12.7}}]"); - cout << t77.Json() << endl; - t77.ApplyJsonDiff("[{\"type\":\"delete\",\"key\":[\"effects\",\"effect\",{\"id\":\"e004\"}],\"value\":{}}]"); - cout << t77.Json() << endl; - t77.ApplyJsonDiff("[{\"type\":\"insert\",\"key\":[\"color\"],\"value\":{\"blue\":{\"Points\":[{\"co\":{\"X\":0,\"Y\":30},\"interpolation\":2}]},\"green\":{\"Points\":[{\"co\":{\"X\":0,\"Y\":20},\"interpolation\":2}]},\"red\":{\"Points\":[{\"co\":{\"X\":0,\"Y\":10},\"interpolation\":2}]}}}]"); - cout << t77.Json() << endl; - t77.ApplyJsonDiff("[{\"type\":\"delete\",\"key\":[\"color\"],\"value\":{}}]"); - cout << t77.Json() << endl; - return 0; - - //FFmpegReader r2("/home/jonathan/Videos/sintel_trailer-720p.mp4"); - //r2.Open(); - //cout << r2.Json() << endl; - //r2.SetJson("{\"acodec\":\"\",\"audio_bit_rate\":0,\"audio_stream_index\":-1,\"audio_timebase\":{\"den\":1,\"num\":1},\"channels\":0,\"display_ratio\":{\"den\":9,\"num\":16},\"duration\":10.03333377838135,\"file_size\":\"208835074\",\"fps\":{\"den\":1,\"num\":30},\"has_audio\":false,\"has_video\":true,\"height\":1080,\"interlaced_frame\":false,\"path\":\"/home/jonathan/Videos/space_undulation_hd.mov\",\"pixel_format\":13,\"pixel_ratio\":{\"den\":72,\"num\":72},\"sample_rate\":0,\"top_field_first\":false,\"type\":\"FFmpegReader\",\"vcodec\":\"mjpeg\",\"video_bit_rate\":166513021,\"video_length\":\"301\",\"video_stream_index\":0,\"video_timebase\":{\"den\":30,\"num\":1},\"width\":1920}"); - Clip c1; - c1.SetJson("{\"alpha\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":100,\"Y\":100,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":100,\"Y\":50,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":100,\"Y\":20,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"anchor\":0,\"crop_height\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"crop_width\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"crop_x\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"crop_y\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"end\":0,\"gravity\":4,\"layer\":0,\"location_x\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"location_y\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"perspective_c1_x\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"perspective_c1_y\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"perspective_c2_x\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"perspective_c2_y\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"perspective_c3_x\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"perspective_c3_y\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"perspective_c4_x\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"perspective_c4_y\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":-1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"position\":0,\"rotation\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"scale\":1,\"scale_x\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"scale_y\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"shear_x\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"shear_y\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"start\":0,\"time\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"volume\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":1,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"wave_color\":{\"blue\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":65280,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":65280,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":65280,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"green\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":28672,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":28672,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":28672,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]},\"red\":{\"Auto_Handle_Percentage\":1,\"Points\":[{\"co\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_left\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_right\":{\"X\":0,\"Y\":0,\"delta\":0,\"increasing\":true,\"repeated\":{\"den\":1,\"num\":1}},\"handle_type\":0,\"interpolation\":0}]}},\"waveform\":false}"); - //c1.Reader(&r2); - cout << c1.Json() << endl; - //c1.Open(); - //c1.GetFrame(150)->Save("test.bmp", 1.0); - return 0; - - */ - - - // Image of interlaced frame -// ImageReader ir("/home/jonathan/apps/libopenshot/src/examples/interlaced.png"); -// ir.Open(); -// -// // FrameMapper to de-interlace frame -// //FrameMapper fm(&ir, Fraction(24,1), PULLDOWN_NONE); -// //fm.DeInterlaceFrame(ir.GetFrame(1), true)->Display(); -// Deinterlace de(false); -// de.GetFrame(ir.GetFrame(1), 1)->Display(); -// -// -// return 0; - - - // Reader - FFmpegReader r1("/home/jonathan/Videos/sintel_trailer-720p.mp4"); - r1.Open(); - r1.DisplayInfo(); - r1.info.has_audio = false; - //r1.enable_seek = true; - - // FrameMapper - //FrameMapper r(&r1, Fraction(24,1), PULLDOWN_ADVANCED); - //r.PrintMapping(); - - /* WRITER ---------------- */ - FFmpegWriter w("/home/jonathan/output.mp4"); - - // Set options - //w.SetAudioOptions(true, "libvorbis", 48000, 2, 188000); - w.SetAudioOptions(true, "libmp3lame", 44100, 1, LAYOUT_STEREO, 12800); - w.SetVideoOptions(true, "mpeg4", Fraction(24,1), 1280, 720, Fraction(1,1), false, false, 30000000); - //w.SetVideoOptions(true, "libmp3lame", openshot::Fraction(30,1), 720, 360, Fraction(1,1), false, false, 3000000); - - // Prepare Streams - w.PrepareStreams(); - - // Write header - w.WriteHeader(); - - // Output stream info - w.OutputStreamInfo(); - - //for (int frame = 3096; frame <= 3276; frame++) - for (int frame = 1; frame <= 200; frame++) - { -// tr1::shared_ptr f(new Frame(frame, 1280, 720, "#000000", 44100, 2)); -// if (frame % 2 == 0) -// f->AddColor(1280, 720, "Yellow"); -// else -// f->AddColor(1280, 720, "Black"); -// -// f->AddOverlayNumber(f->number); -// cout << f->number << endl; -// w.WriteFrame(f); - - tr1::shared_ptr f = r1.GetFrame(frame); - if (f) - { - //if (frame >= 250) - // f->DisplayWaveform(); - //f->AddOverlayNumber(frame); - //f->Display(); - - // Write frame - //f->Display(); - cout << "queue frame " << frame << " (" << f->number << ", " << f << ")" << endl; - w.WriteFrame(f); - } - } - - // Write Footer - w.WriteTrailer(); - - // Close writer & reader - w.Close(); - - // Close timeline - r1.Close(); - /* ---------------- */ - - - cout << "Successfully Finished Timeline DEMO" << endl; - return 0; - } +//int main(int argc, char* argv[]) +//{ +// for (int z = 0; z<10; z++) +// main2(); +//} diff --git a/src/examples/ExampleBlackmagic.cpp b/src/examples/ExampleBlackmagic.cpp index bc9bf2ab..232df182 100644 --- a/src/examples/ExampleBlackmagic.cpp +++ b/src/examples/ExampleBlackmagic.cpp @@ -43,7 +43,7 @@ int main(int argc, char *argv[]) struct tm * timeinfo; /* TIMELINE ---------------- */ - Timeline t(1920, 1080, Fraction(30,1), 48000, 2); + Timeline t(1920, 1080, Fraction(30,1), 48000, 2, LAYOUT_STEREO); // Create background video ImageReader b1("/home/jonathan/Pictures/moon.jpg"); diff --git a/tests/Cache_Tests.cpp b/tests/Cache_Tests.cpp index 4e3a486d..a678c17a 100644 --- a/tests/Cache_Tests.cpp +++ b/tests/Cache_Tests.cpp @@ -58,6 +58,7 @@ TEST(Cache_Max_Bytes_Constructor) { // Add blank frame to the cache tr1::shared_ptr f(new Frame(i, 320, 240, "#000000")); + f->AddColor(320, 240, "#000000"); c.Add(i, f); } @@ -69,6 +70,7 @@ TEST(Cache_Max_Bytes_Constructor) { // Add blank frame to the cache tr1::shared_ptr f(new Frame(i, 320, 240, "#000000")); + f->AddColor(320, 240, "#000000"); c.Add(i, f); } diff --git a/tests/Clip_Tests.cpp b/tests/Clip_Tests.cpp index 540cdedf..cb4b503d 100644 --- a/tests/Clip_Tests.cpp +++ b/tests/Clip_Tests.cpp @@ -212,13 +212,14 @@ TEST(Clip_Effects) tr1::shared_ptr f = c10.GetFrame(500); // Get the image data - const Magick::PixelPacket* pixels = f->GetPixels(10); + const unsigned char* pixels = f->GetPixels(10); + int pixel_index = 112 * 4; // pixel 112 (4 bytes per pixel) // Check image properties on scanline 10, pixel 112 - CHECK_EQUAL(65535, pixels[112].red); - CHECK_EQUAL(65535, pixels[112].blue); - CHECK_EQUAL(65535, pixels[112].green); - CHECK_EQUAL(0, pixels[112].opacity); + CHECK_EQUAL(255, (int)pixels[pixel_index]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 1]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 2]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 3]); // Check the # of Effects CHECK_EQUAL(1, c10.Effects().size()); @@ -233,14 +234,14 @@ TEST(Clip_Effects) // Get the image data pixels = f->GetPixels(10); + pixel_index = 112 * 4; // pixel 112 (4 bytes per pixel) // Check image properties on scanline 10, pixel 112 - CHECK_EQUAL(0, pixels[112].red); - CHECK_EQUAL(0, pixels[112].blue); - CHECK_EQUAL(0, pixels[112].green); - CHECK_EQUAL(0, pixels[112].opacity); + CHECK_EQUAL(0, (int)pixels[pixel_index]); + CHECK_EQUAL(0, (int)pixels[pixel_index + 1]); + CHECK_EQUAL(0, (int)pixels[pixel_index + 2]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 3]); // Check the # of Effects CHECK_EQUAL(2, c10.Effects().size()); - } diff --git a/tests/Color_Tests.cpp b/tests/Color_Tests.cpp index 1ab710a1..4c613d7c 100644 --- a/tests/Color_Tests.cpp +++ b/tests/Color_Tests.cpp @@ -47,19 +47,19 @@ TEST(Color_Animate_Colors) Color c1; // Set starting color (on frame 0) - c1.red.AddPoint(0, 12000); - c1.green.AddPoint(0, 5000); - c1.blue.AddPoint(0, 1000); + c1.red.AddPoint(1, 0); + c1.green.AddPoint(1, 120); + c1.blue.AddPoint(1, 255); // Set ending color (on frame 1000) - c1.red.AddPoint(1000, 32000); - c1.green.AddPoint(1000, 12000); - c1.blue.AddPoint(1000, 5000); + c1.red.AddPoint(1000, 0); + c1.green.AddPoint(1000, 255); + c1.blue.AddPoint(1000, 65); // Check the color at frame 500 - CHECK_CLOSE(22011, c1.red.GetInt(500), 0.01); - CHECK_CLOSE(8504, c1.green.GetInt(500), 0.01); - CHECK_CLOSE(3002, c1.blue.GetInt(500), 0.01); + CHECK_CLOSE(0, c1.red.GetInt(500), 0.01); + CHECK_CLOSE(187, c1.green.GetInt(500), 0.01); + CHECK_CLOSE(160, c1.blue.GetInt(500), 0.01); } TEST(Color_HEX_Value) @@ -79,5 +79,45 @@ TEST(Color_HEX_Value) } +TEST(Color_HEX_Constructor) +{ + // Color + openshot::Color c("#4586db"); + c.red.AddPoint(100, 255); + c.green.AddPoint(100, 255); + c.blue.AddPoint(100, 255); + CHECK_EQUAL("#4586db", c.GetColorHex(1)); + CHECK_EQUAL("#a2c2ed", c.GetColorHex(50)); + CHECK_EQUAL("#ffffff", c.GetColorHex(100)); +} +TEST(Color_Distance) +{ + // Color + openshot::Color c1("#040a0c"); + openshot::Color c2("#0c0c04"); + openshot::Color c3("#000000"); + openshot::Color c4("#ffffff"); + + CHECK_CLOSE(19.0f, Color::GetDistance(c1.red.GetInt(1), c1.blue.GetInt(1), c1.green.GetInt(1), c2.red.GetInt(1), c2.blue.GetInt(1), c2.green.GetInt(1)), 0.001); + CHECK_CLOSE(764.0f, Color::GetDistance(c3.red.GetInt(1), c3.blue.GetInt(1), c3.green.GetInt(1), c4.red.GetInt(1), c4.blue.GetInt(1), c4.green.GetInt(1)), 0.001); +} + +TEST(Color_RGBA_Constructor) +{ + // Color + openshot::Color c(69, 134, 219, 255); + c.red.AddPoint(100, 255); + c.green.AddPoint(100, 255); + c.blue.AddPoint(100, 255); + + CHECK_EQUAL("#4586db", c.GetColorHex(1)); + CHECK_EQUAL("#a2c2ed", c.GetColorHex(50)); + CHECK_EQUAL("#ffffff", c.GetColorHex(100)); + + // Color with alpha + openshot::Color c1(69, 134, 219, 128); + CHECK_EQUAL("#4586db", c1.GetColorHex(1)); + CHECK_EQUAL(128, c1.alpha.GetInt(1)); +} diff --git a/tests/FFmpegReader_Tests.cpp b/tests/FFmpegReader_Tests.cpp index 5bcca0b4..a6f37579 100644 --- a/tests/FFmpegReader_Tests.cpp +++ b/tests/FFmpegReader_Tests.cpp @@ -84,25 +84,27 @@ TEST(FFmpegReader_Check_Video_File) tr1::shared_ptr f = r.GetFrame(1); // Get the image data - const Magick::PixelPacket* pixels = f->GetPixels(10); + const unsigned char* pixels = f->GetPixels(10); + int pixel_index = 112 * 4; // pixel 112 (4 bytes per pixel) // Check image properties on scanline 10, pixel 112 - CHECK_EQUAL(5397, pixels[112].red); - CHECK_EQUAL(0, pixels[112].blue); - CHECK_EQUAL(49087, pixels[112].green); - CHECK_EQUAL(0, pixels[112].opacity); + CHECK_EQUAL(21, (int)pixels[pixel_index]); + CHECK_EQUAL(191, (int)pixels[pixel_index + 1]); + CHECK_EQUAL(0, (int)pixels[pixel_index + 2]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 3]); // Get frame 1 f = r.GetFrame(2); // Get the next frame pixels = f->GetPixels(10); + pixel_index = 112 * 4; // pixel 112 (4 bytes per pixel) // Check image properties on scanline 10, pixel 112 - CHECK_EQUAL(0, pixels[112].red); - CHECK_EQUAL(48316, pixels[112].blue); - CHECK_EQUAL(24672, pixels[112].green); - CHECK_EQUAL(0, pixels[112].opacity); + CHECK_EQUAL(0, (int)pixels[pixel_index]); + CHECK_EQUAL(96, (int)pixels[pixel_index + 1]); + CHECK_EQUAL(188, (int)pixels[pixel_index + 2]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 3]); // Close reader r.Close(); @@ -117,68 +119,46 @@ TEST(FFmpegReader_Seek) // Get frame tr1::shared_ptr f = r.GetFrame(1); CHECK_EQUAL(1, f->number); - //f->Display(); - //f->DisplayWaveform(); // Get frame f = r.GetFrame(300); CHECK_EQUAL(300, f->number); - //f->Display(); - //f->DisplayWaveform(); // Get frame f = r.GetFrame(301); CHECK_EQUAL(301, f->number); - //f->Display(); - //f->DisplayWaveform(); // Get frame f = r.GetFrame(315); CHECK_EQUAL(315, f->number); - //f->Display(); - //f->DisplayWaveform(); // Get frame f = r.GetFrame(275); CHECK_EQUAL(275, f->number); - //f->Display(); - //f->DisplayWaveform(); // Get frame f = r.GetFrame(270); CHECK_EQUAL(270, f->number); - //f->Display(); - //f->DisplayWaveform(); // Get frame f = r.GetFrame(500); CHECK_EQUAL(500, f->number); - //f->Display(); - //f->DisplayWaveform(); // Get frame f = r.GetFrame(100); CHECK_EQUAL(100, f->number); - //f->Display(); - //f->DisplayWaveform(); // Get frame f = r.GetFrame(600); CHECK_EQUAL(600, f->number); - //f->Display(); - //f->DisplayWaveform(); // Get frame f = r.GetFrame(1); CHECK_EQUAL(1, f->number); - //f->Display(); - //f->DisplayWaveform(); // Get frame f = r.GetFrame(700); CHECK_EQUAL(700, f->number); - //f->Display(); - //f->DisplayWaveform(); // Close reader r.Close(); diff --git a/tests/FFmpegWriter_Tests.cpp b/tests/FFmpegWriter_Tests.cpp index 9502da4c..bb5f8f4f 100644 --- a/tests/FFmpegWriter_Tests.cpp +++ b/tests/FFmpegWriter_Tests.cpp @@ -66,11 +66,12 @@ TEST(FFmpegWriter_Test_Webm) tr1::shared_ptr f = r1.GetFrame(8); // Get the image data for row 500 - const Magick::PixelPacket* pixels = f->GetPixels(500); + const unsigned char* pixels = f->GetPixels(500); + int pixel_index = 112 * 4; // pixel 112 (4 bytes per pixel) - // Check pixel values on scanline 500, pixel 600 - CHECK_EQUAL(2056, pixels[600].red); - CHECK_EQUAL(2056, pixels[600].blue); - CHECK_EQUAL(2056, pixels[600].green); - CHECK_EQUAL(0, pixels[600].opacity); + // Check image properties on scanline 10, pixel 112 + CHECK_EQUAL(23, (int)pixels[pixel_index]); + CHECK_EQUAL(23, (int)pixels[pixel_index + 1]); + CHECK_EQUAL(23, (int)pixels[pixel_index + 2]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 3]); } diff --git a/tests/ImageWriter_Tests.cpp b/tests/ImageWriter_Tests.cpp index 17abf43a..5f82bd1c 100644 --- a/tests/ImageWriter_Tests.cpp +++ b/tests/ImageWriter_Tests.cpp @@ -47,14 +47,14 @@ TEST(ImageWriter_Test_Webm) w.Open(); // Write some frames (start on frame 500 and go to frame 510) - w.WriteFrame(&r, 500, 510); + w.WriteFrame(&r, 500, 504); // Close writer & reader w.Close(); r.Close(); // Open up the 5th frame from the newly created GIF - ImageReader r1("output1.gif[5]"); + ImageReader r1("output1.gif[4]"); r1.Open(); // Verify various settings @@ -65,11 +65,12 @@ TEST(ImageWriter_Test_Webm) tr1::shared_ptr f = r1.GetFrame(8); // Get the image data for row 500 - const Magick::PixelPacket* pixels = f->GetPixels(500); + const unsigned char* pixels = f->GetPixels(500); + int pixel_index = 230 * 4; // pixel 230 (4 bytes per pixel) - // Check pixel values on scanline 500, pixel 600 - CHECK_EQUAL(4883, pixels[600].red); - CHECK_EQUAL(2570, pixels[600].blue); - CHECK_EQUAL(3341, pixels[600].green); - CHECK_EQUAL(0, pixels[600].opacity); + // Check image properties + CHECK_EQUAL(20, (int)pixels[pixel_index]); + CHECK_EQUAL(18, (int)pixels[pixel_index + 1]); + CHECK_EQUAL(11, (int)pixels[pixel_index + 2]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 3]); } diff --git a/tests/ReaderBase_Tests.cpp b/tests/ReaderBase_Tests.cpp index fd10f01a..247f083c 100644 --- a/tests/ReaderBase_Tests.cpp +++ b/tests/ReaderBase_Tests.cpp @@ -40,6 +40,7 @@ TEST(ReaderBase_Derived_Class) { public: TestReader() { }; + Cache* GetCache() { return NULL; }; tr1::shared_ptr GetFrame(int number) { tr1::shared_ptr f(new Frame()); return f; } void Close() { }; void Open() { }; diff --git a/tests/Timeline_Tests.cpp b/tests/Timeline_Tests.cpp index 3d40fe8d..4c8b661b 100644 --- a/tests/Timeline_Tests.cpp +++ b/tests/Timeline_Tests.cpp @@ -35,14 +35,14 @@ TEST(Timeline_Constructor) { // Create a default fraction (should be 1/1) Fraction fps(30000,1000); - Timeline t1(640, 480, fps, 44100, 2); + Timeline t1(640, 480, fps, 44100, 2, LAYOUT_STEREO); // Check values CHECK_EQUAL(640, t1.info.width); CHECK_EQUAL(480, t1.info.height); // Create a default fraction (should be 1/1) - Timeline t2(300, 240, fps, 44100, 2); + Timeline t2(300, 240, fps, 44100, 2, LAYOUT_STEREO); // Check values CHECK_EQUAL(300, t2.info.width); @@ -53,7 +53,7 @@ TEST(Timeline_Width_and_Height_Functions) { // Create a default fraction (should be 1/1) Fraction fps(30000,1000); - Timeline t1(640, 480, fps, 44100, 2); + Timeline t1(640, 480, fps, 44100, 2, LAYOUT_STEREO); // Check values CHECK_EQUAL(640, t1.info.width); @@ -78,7 +78,7 @@ TEST(Timeline_Framerate) { // Create a default fraction (should be 1/1) Fraction fps(24,1); - Timeline t1(640, 480, fps, 44100, 2); + Timeline t1(640, 480, fps, 44100, 2, LAYOUT_STEREO); // Check values CHECK_CLOSE(24.0f, t1.info.fps.ToFloat(), 0.00001); @@ -97,7 +97,7 @@ TEST(Timeline_Check_Two_Track_Video) clip_overlay.End(0.5); // Make the duration of the overlay 1/2 second // Create a timeline - Timeline t(640, 480, Fraction(30, 1), 44100, 2); + Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); // Add clips t.AddClip(&clip_video); @@ -110,37 +110,40 @@ TEST(Timeline_Check_Two_Track_Video) tr1::shared_ptr f = t.GetFrame(1); // Get the image data - const Magick::PixelPacket* pixels = f->GetPixels(200); + const unsigned char* pixels = f->GetPixels(200); + int pixel_index = 230 * 4; // pixel 230 (4 bytes per pixel) - // Check image properties on scanline, column 230 - CHECK_EQUAL(5397, pixels[230].red); - CHECK_EQUAL(0, pixels[230].blue); - CHECK_EQUAL(49087, pixels[230].green); - CHECK_EQUAL(0, pixels[230].opacity); + // Check image properties + CHECK_EQUAL(21, (int)pixels[pixel_index]); + CHECK_EQUAL(191, (int)pixels[pixel_index + 1]); + CHECK_EQUAL(0, (int)pixels[pixel_index + 2]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 3]); // Get frame f = t.GetFrame(2); // Get scanline 190 of pixels pixels = f->GetPixels(190); + pixel_index = 230 * 4; // pixel 230 (4 bytes per pixel) - // Check image properties on scanline, column 230 - CHECK_EQUAL(64764, pixels[230].red); - CHECK_EQUAL(63993, pixels[230].blue); - CHECK_EQUAL(64764, pixels[230].green); - CHECK_EQUAL(0, pixels[230].opacity); + // Check image properties + CHECK_EQUAL(252, (int)pixels[pixel_index]); + CHECK_EQUAL(252, (int)pixels[pixel_index + 1]); + CHECK_EQUAL(249, (int)pixels[pixel_index + 2]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 3]); // Get frame f = t.GetFrame(3); // Get scanline 190 of pixels pixels = f->GetPixels(190); + pixel_index = 230 * 4; // pixel 230 (4 bytes per pixel) - // Check image properties on scanline, column 230 - CHECK_EQUAL(64771, pixels[230].red); - CHECK_EQUAL(63429, pixels[230].blue); - CHECK_EQUAL(64193, pixels[230].green); - CHECK_EQUAL(0, pixels[230].opacity); + // Check image properties + CHECK_EQUAL(25, (int)pixels[pixel_index]); + CHECK_EQUAL(189, (int)pixels[pixel_index + 1]); + CHECK_EQUAL(0, (int)pixels[pixel_index + 2]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 3]); // Get frame @@ -148,48 +151,52 @@ TEST(Timeline_Check_Two_Track_Video) // Get scanline 190 of pixels pixels = f->GetPixels(190); + pixel_index = 230 * 4; // pixel 230 (4 bytes per pixel) - // Check image properties on scanline, column 230 - CHECK_EQUAL(64507, pixels[230].red); - CHECK_EQUAL(63736, pixels[230].blue); - CHECK_EQUAL(64507, pixels[230].green); - CHECK_EQUAL(0, pixels[230].opacity); + // Check image properties + CHECK_EQUAL(251, (int)pixels[pixel_index]); + CHECK_EQUAL(251, (int)pixels[pixel_index + 1]); + CHECK_EQUAL(248, (int)pixels[pixel_index + 2]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 3]); // Get frame f = t.GetFrame(5); // Get scanline 190 of pixels pixels = f->GetPixels(190); + pixel_index = 230 * 4; // pixel 230 (4 bytes per pixel) - // Check image properties on scanline, column 230 - CHECK_EQUAL(6437, pixels[230].red); - CHECK_EQUAL(0, pixels[230].blue); - CHECK_EQUAL(48399, pixels[230].green); - CHECK_EQUAL(0, pixels[230].opacity); + // Check image properties + CHECK_EQUAL(25, (int)pixels[pixel_index]); + CHECK_EQUAL(189, (int)pixels[pixel_index + 1]); + CHECK_EQUAL(0, (int)pixels[pixel_index + 2]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 3]); // Get frame f = t.GetFrame(25); // Get scanline 190 of pixels pixels = f->GetPixels(190); + pixel_index = 230 * 4; // pixel 230 (4 bytes per pixel) - // Check image properties on scanline, column 230 - CHECK_EQUAL(5397, pixels[230].red); - CHECK_EQUAL(0, pixels[230].blue); - CHECK_EQUAL(49087, pixels[230].green); - CHECK_EQUAL(0, pixels[230].opacity); + // Check image properties + CHECK_EQUAL(251, (int)pixels[pixel_index]); + CHECK_EQUAL(251, (int)pixels[pixel_index + 1]); + CHECK_EQUAL(248, (int)pixels[pixel_index + 2]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 3]); // Get frame f = t.GetFrame(4); // Get scanline 190 of pixels pixels = f->GetPixels(190); + pixel_index = 230 * 4; // pixel 230 (4 bytes per pixel) - // Check image properties on scanline, column 230 - CHECK_EQUAL(64771, pixels[230].red); - CHECK_EQUAL(63429, pixels[230].blue); - CHECK_EQUAL(64193, pixels[230].green); - CHECK_EQUAL(0, pixels[230].opacity); + // Check image properties + CHECK_EQUAL(252, (int)pixels[pixel_index]); + CHECK_EQUAL(250, (int)pixels[pixel_index + 1]); + CHECK_EQUAL(247, (int)pixels[pixel_index + 2]); + CHECK_EQUAL(255, (int)pixels[pixel_index + 3]); // Close reader t.Close(); @@ -198,7 +205,7 @@ TEST(Timeline_Check_Two_Track_Video) TEST(Timeline_Clip_Order) { // Create a timeline - Timeline t(640, 480, Fraction(30, 1), 44100, 2); + Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); // Add some clips out of order Clip clip_top("../../src/examples/front3.png"); @@ -286,7 +293,7 @@ TEST(Timeline_Clip_Order) TEST(Timeline_Effect_Order) { // Create a timeline - Timeline t(640, 480, Fraction(30, 1), 44100, 2); + Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); // Add some effects out of order Negate effect_top;