Merge branch 'develop' into clip-refactor-keyframes

# Conflicts:
#	include/Clip.h
#	include/ReaderBase.h
#	include/Timeline.h
#	src/Clip.cpp
#	src/FFmpegReader.cpp
#	src/QtImageReader.cpp
#	src/ReaderBase.cpp
This commit is contained in:
Jonathan Thomas
2020-10-05 23:14:44 -05:00
27 changed files with 622 additions and 241 deletions

4
.github/labeler.yml vendored Normal file
View File

@@ -0,0 +1,4 @@
# Add 'build' label to CMake changes
build:
- /**/CMakeList.txt
- /cmake/**/*.cmake

19
.github/workflows/label.yml vendored Normal file
View File

@@ -0,0 +1,19 @@
# This workflow will triage pull requests and apply a label based on the
# paths that are modified in the pull request.
#
# To use this workflow, you will need to set up a .github/labeler.yml
# file with configuration. For more information, see:
# https://github.com/actions/labeler/blob/master/README.md
name: Labeler
on: [pull_request]
jobs:
label:
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v2
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"

View File

@@ -43,7 +43,7 @@ mac-builder:
- unzip artifacts.zip
- export LIBOPENSHOT_AUDIO_DIR=$CI_PROJECT_DIR/build/install-x64
- mkdir -p build; cd build;
- cmake -DCMAKE_EXE_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_SHARED_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR/build/install-x64" -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -DCMAKE_PREFIX_PATH=/usr/local/qt5.15.X/qt5.15/5.15.0/clang_64/ -DPYTHON_INCLUDE_DIR=/Library/Frameworks/Python.framework/Versions/3.6/include/python3.6m -DPYTHON_LIBRARY=/Library/Frameworks/Python.framework/Versions/3.6/lib/libpython3.6.dylib -DPYTHON_MODULE_PATH=python -DPython_FRAMEWORKS=/Library/Frameworks/Python.framework/ -D"CMAKE_BUILD_TYPE:STRING=Release" -D"CMAKE_OSX_SYSROOT=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk" -D"CMAKE_OSX_DEPLOYMENT_TARGET=10.9" -D"CMAKE_INSTALL_RPATH_USE_LINK_PATH=1" -D"ENABLE_RUBY=0" ../
- cmake -DCMAKE_EXE_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_SHARED_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR/build/install-x64" -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -DCMAKE_PREFIX_PATH=/usr/local/qt5.15.X/qt5.15/5.15.0/clang_64/ -DPYTHON_INCLUDE_DIR=/Library/Frameworks/Python.framework/Versions/3.6/include/python3.6m -DPYTHON_LIBRARY=/Library/Frameworks/Python.framework/Versions/3.6/lib/libpython3.6.dylib -DPYTHON_MODULE_PATH=python -DPython_FRAMEWORKS=/Library/Frameworks/Python.framework/ -D"CMAKE_BUILD_TYPE:STRING=Release" -D"CMAKE_OSX_SYSROOT=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk" -D"CMAKE_OSX_DEPLOYMENT_TARGET=10.9" -D"CMAKE_INSTALL_RPATH_USE_LINK_PATH=1" -D"ENABLE_RUBY=0" ../
- make
- make install
- echo -e "CI_PROJECT_NAME:$CI_PROJECT_NAME\nCI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME\nCI_COMMIT_SHA:$CI_COMMIT_SHA\nCI_JOB_ID:$CI_JOB_ID" > "install-x64/share/$CI_PROJECT_NAME"

View File

@@ -77,6 +77,7 @@ option(ENABLE_IWYU "Enable 'Include What You Use' scanner (CMake 3.3+)" OFF)
option(ENABLE_TESTS "Build unit tests (requires UnitTest++)" ON)
option(ENABLE_DOCS "Build API documentation (requires Doxygen)" ON)
option(APPIMAGE_BUILD "Build to install in an AppImage (Linux only)" OFF)
option(ENABLE_MAGICK "Use ImageMagick, if available" ON)
# Legacy commandline override
if (DISABLE_TESTS)
@@ -129,9 +130,9 @@ add_feature_info("Coverage" ENABLE_COVERAGE "analyze test coverage and generate
# -DDEBUG for debug builds. We'll do this for all OSes, even
# though only MacOS requires it.
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DDEBUG")
# Make sure we've picked some build type, default to debug
# Make sure we've picked some build type, default to release
if(NOT DEFINED CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "")
set(CMAKE_BUILD_TYPE "Debug")
set(CMAKE_BUILD_TYPE "Release")
endif()
############## PROCESS src/ DIRECTORIES ##############

View File

@@ -1,6 +1,6 @@
OpenShot Video Library (libopenshot) is a free, open-source C++ library dedicated to
delivering high quality video editing, animation, and playback solutions to the
world.
OpenShot Video Library (libopenshot) is a free, open-source C++ library
dedicated to delivering high quality video editing, animation, and playback
solutions to the world.
## Build Status
@@ -15,7 +15,8 @@ world.
* Time Mapping (Curve-based Slow Down, Speed Up, Reverse)
* Audio Mixing & Resampling (Curve-based)
* Audio Plug-ins (VST & AU)
* Audio Drivers (ASIO, WASAPI, DirectSound, CoreAudio, iPhone Audio, ALSA, JACK, and Android)
* Audio Drivers (ASIO, WASAPI, DirectSound, CoreAudio, iPhone Audio,
ALSA, JACK, and Android)
* Telecine and Inverse Telecine (Film to TV, TV to Film)
* Frame Rate Conversions
* Multi-Processor Support (Performance)
@@ -27,8 +28,8 @@ world.
## Install
Detailed instructions for building libopenshot and libopenshot-audio for each OS. These instructions
are also available in the /docs/ source folder.
Detailed instructions for building libopenshot and libopenshot-audio for
each OS. These instructions are also available in the `/docs/` source folder.
* [Linux](https://github.com/OpenShot/libopenshot/wiki/Linux-Build-Instructions)
* [Mac](https://github.com/OpenShot/libopenshot/wiki/Mac-Build-Instructions)
@@ -36,10 +37,12 @@ are also available in the /docs/ source folder.
## Hardware Acceleration
OpenShot now supports experimental hardware acceleration, both for encoding and
decoding videos. When enabled, this can either speed up those operations or slow
them down, depending on the power and features supported by your graphics card.
Please see [doc/HW-ACCELL.md](doc/HW-ACCEL.md) for more information.
OpenShot now supports experimental hardware acceleration, both for encoding
and decoding videos. When enabled, this can either speed up those operations
or slow them down, depending on the power and features supported by your
graphics card.
Please see [`doc/HW-ACCEL.md`](doc/HW-ACCEL.md) for more information.
## Documentation
@@ -51,10 +54,11 @@ make doc
## Developers
Are you interested in becoming more involved in the development of
OpenShot? Build exciting new features, fix bugs, make friends, and become a hero!
Please read the [step-by-step](https://github.com/OpenShot/openshot-qt/wiki/Become-a-Developer)
instructions for getting source code, configuring dependencies, and building OpenShot.
Are you interested in becoming more involved in the development of OpenShot?
Build exciting new features, fix bugs, make friends, and become a hero!
Please read the [step-by-step](https://github.com/OpenShot/openshot-qt/wiki/Become-a-Developer)
instructions for getting source code, configuring dependencies, and building
OpenShot.
## Report a bug
@@ -72,7 +76,7 @@ https://github.com/OpenShot/libopenshot/issues
### License
Copyright (c) 2008-2019 OpenShot Studios, LLC.
Copyright (c) 2008-2020 OpenShot Studios, LLC.
OpenShot Library (libopenshot) is free software: you can redistribute it
and/or modify it under the terms of the GNU Lesser General Public License

View File

@@ -1,24 +0,0 @@
# - Try to find ZMQ
# Once done this will define
# ZMQ_FOUND - System has ZMQ
# ZMQ_INCLUDE_DIRS - The ZMQ include directories
# ZMQ_LIBRARIES - The libraries needed to use ZMQ
# ZMQ_DEFINITIONS - Compiler switches required for using ZMQ
find_path ( ZMQ_INCLUDE_DIR zmq.h
PATHS /usr/include/
/usr/local/include/
$ENV{ZMQDIR}/include/ )
find_library ( ZMQ_LIBRARY NAMES zmq
PATHS /usr/lib/
/usr/local/lib/
$ENV{ZMQDIR}/lib/ )
set ( ZMQ_LIBRARIES ${ZMQ_LIBRARY} )
set ( ZMQ_INCLUDE_DIRS ${ZMQ_INCLUDE_DIR} )
include ( FindPackageHandleStandardArgs )
# handle the QUIETLY and REQUIRED arguments and set ZMQ_FOUND to TRUE
# if all listed variables are TRUE
find_package_handle_standard_args ( ZMQ DEFAULT_MSG ZMQ_LIBRARY ZMQ_INCLUDE_DIR )

View File

@@ -38,8 +38,6 @@
#include "Frame.h"
#include "Exceptions.h"
#include <QDir>
#include <QString>
#include <QTextStream>
namespace openshot {

View File

@@ -188,6 +188,9 @@ namespace openshot {
/// Return the list of effects on the timeline
std::list<openshot::EffectBase*> Effects() { return effects; };
/// Look up an effect by ID
openshot::EffectBase* GetEffect(const std::string& id);
/// @brief This method is required for all derived classes of ClipBase, and returns a
/// new openshot::Frame object. All Clip keyframes and effects are resolved into
/// pixels.
@@ -278,8 +281,6 @@ namespace openshot {
openshot::Keyframe has_audio; ///< An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
openshot::Keyframe has_video; ///< An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
};
} // namespace
}
#endif
#endif // OPENSHOT_CLIP_H

View File

@@ -122,6 +122,9 @@
#ifndef PIX_FMT_YUV420P
#define PIX_FMT_YUV420P AV_PIX_FMT_YUV420P
#endif
#ifndef PIX_FMT_YUV444P
#define PIX_FMT_YUV444P AV_PIX_FMT_YUV444P
#endif
// FFmpeg's libavutil/common.h defines an RSHIFT incompatible with Ruby's
// definition in ruby/config.h, so we move it to FF_RSHIFT

44
include/QtUtilities.h Normal file
View File

@@ -0,0 +1,44 @@
/**
* @file
* @brief Header file for QtUtilities (compatibiity overlay)
* @author FeRD (Frank Dana) <ferdnyc@gmail.com>
*/
/* LICENSE
*
* Copyright (c) 2008-2020 OpenShot Studios, LLC
* <http://www.openshotstudios.com/>. This file is part of
* OpenShot Library (libopenshot), an open-source project dedicated to
* delivering high quality video editing and animation solutions to the
* world. For more information visit <http://www.openshot.org/>.
*
* OpenShot Library (libopenshot) is free software: you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* OpenShot Library (libopenshot) is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef OPENSHOT_QT_UTILITIES_H
#define OPENSHOT_QT_UTILITIES_H
#include <Qt>
#include <QTextStream>
// Fix Qt::endl for older Qt versions
// From: https://bugreports.qt.io/browse/QTBUG-82680
#if QT_VERSION < QT_VERSION_CHECK(5, 14, 0)
namespace Qt {
using TextStreamFunction = QTextStream& (*)(QTextStream&);
constexpr TextStreamFunction endl = ::endl;
}
#endif
#endif // OPENSHOT_QT_UTILITIES_H

View File

@@ -63,7 +63,7 @@ namespace openshot {
/// from lowest layer to top layer (since that is the sequence they need to be combined), and then
/// by position (left to right).
struct CompareClips{
bool operator()( Clip* lhs, Clip* rhs){
bool operator()( openshot::Clip* lhs, openshot::Clip* rhs){
if( lhs->Layer() < rhs->Layer() ) return true;
if( lhs->Layer() == rhs->Layer() && lhs->Position() <= rhs->Position() ) return true;
return false;
@@ -73,13 +73,28 @@ namespace openshot {
/// from lowest layer to top layer (since that is sequence clips are combined), and then by
/// position, and then by effect order.
struct CompareEffects{
bool operator()( EffectBase* lhs, EffectBase* rhs){
bool operator()( openshot::EffectBase* lhs, openshot::EffectBase* rhs){
if( lhs->Layer() < rhs->Layer() ) return true;
if( lhs->Layer() == rhs->Layer() && lhs->Position() < rhs->Position() ) return true;
if( lhs->Layer() == rhs->Layer() && lhs->Position() == rhs->Position() && lhs->Order() > rhs->Order() ) return true;
return false;
}};
/// Comparison method for finding the far end of the timeline, by locating
/// the Clip with the highest end-frame number using std::max_element
struct CompareClipEndFrames {
bool operator()(const openshot::Clip* lhs, const openshot::Clip* rhs) {
return (lhs->Position() + lhs->Duration())
<= (rhs->Position() + rhs->Duration());
}};
/// Like CompareClipEndFrames, but for effects
struct CompareEffectEndFrames {
bool operator()(const openshot::EffectBase* lhs, const openshot::EffectBase* rhs) {
return (lhs->Position() + lhs->Duration())
<= (rhs->Position() + rhs->Duration());
}};
/**
* @brief This class represents a timeline
*
@@ -148,33 +163,33 @@ namespace openshot {
* t.Close();
* @endcode
*/
class Timeline : public TimelineBase, public ReaderBase {
class Timeline : public openshot::TimelineBase, public openshot::ReaderBase {
private:
bool is_open; ///<Is Timeline Open?
bool auto_map_clips; ///< Auto map framerates and sample rates to all clips
std::list<Clip*> clips; ///<List of clips on this timeline
std::list<Clip*> closing_clips; ///<List of clips that need to be closed
std::map<Clip*, Clip*> open_clips; ///<List of 'opened' clips on this timeline
std::list<EffectBase*> effects; ///<List of clips on this timeline
CacheBase *final_cache; ///<Final cache of timeline frames
std::set<FrameMapper*> allocated_frame_mappers; ///< all the frame mappers we allocated and must free
std::list<openshot::Clip*> clips; ///<List of clips on this timeline
std::list<openshot::Clip*> closing_clips; ///<List of clips that need to be closed
std::map<openshot::Clip*, openshot::Clip*> open_clips; ///<List of 'opened' clips on this timeline
std::list<openshot::EffectBase*> effects; ///<List of clips on this timeline
openshot::CacheBase *final_cache; ///<Final cache of timeline frames
std::set<openshot::FrameMapper*> allocated_frame_mappers; ///< all the frame mappers we allocated and must free
bool managed_cache; ///< Does this timeline instance manage the cache object
std::string path; ///< Optional path of loaded UTF-8 OpenShot JSON project file
/// Process a new layer of video or audio
void add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume);
void add_layer(std::shared_ptr<openshot::Frame> new_frame, openshot::Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume);
/// Apply a FrameMapper to a clip which matches the settings of this timeline
void apply_mapper_to_clip(Clip* clip);
void apply_mapper_to_clip(openshot::Clip* clip);
/// Apply JSON Diffs to various objects contained in this timeline
void apply_json_to_clips(Json::Value change); ///<Apply JSON diff to clips
void apply_json_to_effects(Json::Value change); ///< Apply JSON diff to effects
void apply_json_to_effects(Json::Value change, EffectBase* existing_effect); ///<Apply JSON diff to a specific effect
void apply_json_to_effects(Json::Value change, openshot::EffectBase* existing_effect); ///<Apply JSON diff to a specific effect
void apply_json_to_timeline(Json::Value change); ///<Apply JSON diff to timeline properties
/// Calculate time of a frame number, based on a framerate
double calculate_time(int64_t number, Fraction rate);
double calculate_time(int64_t number, openshot::Fraction rate);
/// Find intersecting (or non-intersecting) openshot::Clip objects
///
@@ -182,13 +197,13 @@ namespace openshot {
/// @param requested_frame The frame number that is requested.
/// @param number_of_frames The number of frames to check
/// @param include Include or Exclude intersecting clips
std::vector<Clip*> find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include);
std::vector<openshot::Clip*> find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include);
/// Get or generate a blank frame
std::shared_ptr<Frame> GetOrCreateFrame(Clip* clip, int64_t number);
std::shared_ptr<openshot::Frame> GetOrCreateFrame(openshot::Clip* clip, int64_t number);
/// Apply effects to the source frame (if any)
std::shared_ptr<Frame> apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, int layer);
std::shared_ptr<openshot::Frame> apply_effects(std::shared_ptr<openshot::Frame> frame, int64_t timeline_frame_number, int layer);
/// Compare 2 floating point numbers for equality
bool isEqual(double a, double b);
@@ -200,7 +215,7 @@ namespace openshot {
void sort_effects();
/// Update the list of 'opened' clips
void update_open_clips(Clip *clip, bool does_clip_intersect);
void update_open_clips(openshot::Clip *clip, bool does_clip_intersect);
public:
@@ -211,7 +226,7 @@ namespace openshot {
/// @param sample_rate The sample rate of the timeline's audio
/// @param channels The number of audio channels of the timeline
/// @param channel_layout The channel layout (i.e. mono, stereo, 3 point surround, etc...)
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout);
Timeline(int width, int height, openshot::Fraction fps, int sample_rate, int channels, openshot::ChannelLayout channel_layout);
/// @brief Constructor for the timeline (which loads a JSON structure from a file path, and initializes a timeline)
/// @param projectPath The path of the UTF-8 *.osp project file (JSON contents). Contents will be loaded automatically.
@@ -222,11 +237,11 @@ namespace openshot {
/// @brief Add an openshot::Clip to the timeline
/// @param clip Add an openshot::Clip to the timeline. A clip can contain any type of Reader.
void AddClip(Clip* clip);
void AddClip(openshot::Clip* clip);
/// @brief Add an effect to the timeline
/// @param effect Add an effect to the timeline. An effect can modify the audio or video of an openshot::Frame.
void AddEffect(EffectBase* effect);
void AddEffect(openshot::EffectBase* effect);
/// Apply the timeline's framerate and samplerate to all clips
void ApplyMapperToClips();
@@ -241,34 +256,48 @@ namespace openshot {
void ClearAllCache();
/// Return a list of clips on the timeline
std::list<Clip*> Clips() { return clips; };
std::list<openshot::Clip*> Clips() { return clips; };
/// Look up a single clip by ID
openshot::ClipBase* GetClip(const std::string& id);
/// Look up a clip effect by ID
openshot::EffectBase* GetClipEffect(const std::string& id);
/// Look up a timeline effect by ID
openshot::EffectBase* GetEffect(const std::string& id);
/// Look up the end time of the latest timeline element
double GetMaxTime();
/// Look up the end frame number of the latest element on the timeline
int64_t GetMaxFrame();
/// Close the timeline reader (and any resources it was consuming)
void Close() override;
/// Return the list of effects on the timeline
std::list<EffectBase*> Effects() { return effects; };
std::list<openshot::EffectBase*> Effects() { return effects; };
/// Get the cache object used by this reader
CacheBase* GetCache() override { return final_cache; };
openshot::CacheBase* GetCache() override { return final_cache; };
/// Set the cache object used by this reader. You must now manage the lifecycle
/// of this cache object though (Timeline will not delete it for you).
void SetCache(CacheBase* new_cache);
void SetCache(openshot::CacheBase* new_cache);
/// Get an openshot::Frame object for a specific frame number of this timeline.
///
/// @returns The requested frame (containing the image)
/// @param requested_frame The frame number that is requested.
std::shared_ptr<Frame> GetFrame(int64_t requested_frame) override;
std::shared_ptr<openshot::Frame> GetFrame(int64_t requested_frame) override;
// Curves for the viewport
Keyframe viewport_scale; ///<Curve representing the scale of the viewport (0 to 100)
Keyframe viewport_x; ///<Curve representing the x coordinate for the viewport
Keyframe viewport_y; ///<Curve representing the y coordinate for the viewport
openshot::Keyframe viewport_scale; ///<Curve representing the scale of the viewport (0 to 100)
openshot::Keyframe viewport_x; ///<Curve representing the x coordinate for the viewport
openshot::Keyframe viewport_y; ///<Curve representing the y coordinate for the viewport
// Background color
Color color; ///<Background color of timeline canvas
openshot::Color color; ///<Background color of timeline canvas
/// Determine if reader is open or closed
bool IsOpen() override { return is_open; };
@@ -297,14 +326,13 @@ namespace openshot {
/// @brief Remove an openshot::Clip from the timeline
/// @param clip Remove an openshot::Clip from the timeline.
void RemoveClip(Clip* clip);
void RemoveClip(openshot::Clip* clip);
/// @brief Remove an effect from the timeline
/// @param effect Remove an effect from the timeline.
void RemoveEffect(EffectBase* effect);
void RemoveEffect(openshot::EffectBase* effect);
};
}
#endif
#endif // OPENSHOT_TIMELINE_H

View File

@@ -65,29 +65,31 @@ endif()
################ IMAGE MAGICK ##################
# Set the Quantum Depth that ImageMagick was built with (default to 16 bits)
IF (MAGICKCORE_QUANTUM_DEPTH)
add_definitions( -DMAGICKCORE_QUANTUM_DEPTH=${MAGICKCORE_QUANTUM_DEPTH} )
ELSE (MAGICKCORE_QUANTUM_DEPTH)
add_definitions( -DMAGICKCORE_QUANTUM_DEPTH=16 )
ENDIF (MAGICKCORE_QUANTUM_DEPTH)
IF (MAGICKCORE_HDRI_ENABLE)
add_definitions( -DMAGICKCORE_HDRI_ENABLE=${MAGICKCORE_HDRI_ENABLE} )
ELSE (MAGICKCORE_HDRI_ENABLE)
add_definitions( -DMAGICKCORE_HDRI_ENABLE=0 )
ENDIF (MAGICKCORE_HDRI_ENABLE)
if(ENABLE_MAGICK)
IF (MAGICKCORE_QUANTUM_DEPTH)
add_definitions( -DMAGICKCORE_QUANTUM_DEPTH=${MAGICKCORE_QUANTUM_DEPTH} )
ELSE (MAGICKCORE_QUANTUM_DEPTH)
add_definitions( -DMAGICKCORE_QUANTUM_DEPTH=16 )
ENDIF (MAGICKCORE_QUANTUM_DEPTH)
IF (MAGICKCORE_HDRI_ENABLE)
add_definitions( -DMAGICKCORE_HDRI_ENABLE=${MAGICKCORE_HDRI_ENABLE} )
ELSE (MAGICKCORE_HDRI_ENABLE)
add_definitions( -DMAGICKCORE_HDRI_ENABLE=0 )
ENDIF (MAGICKCORE_HDRI_ENABLE)
# Find the ImageMagick++ library
find_package(ImageMagick COMPONENTS Magick++ MagickWand MagickCore)
if (ImageMagick_FOUND)
# Include ImageMagick++ headers (needed for compile)
include_directories(${ImageMagick_INCLUDE_DIRS})
# Find the ImageMagick++ library
find_package(ImageMagick COMPONENTS Magick++ MagickWand MagickCore)
if (ImageMagick_FOUND)
# Include ImageMagick++ headers (needed for compile)
include_directories(${ImageMagick_INCLUDE_DIRS})
# define a global var (used in the C++)
add_definitions( -DUSE_IMAGEMAGICK=1 )
list(APPEND CMAKE_SWIG_FLAGS "-DUSE_IMAGEMAGICK=1")
# define a global var (used in the C++)
add_definitions( -DUSE_IMAGEMAGICK=1 )
list(APPEND CMAKE_SWIG_FLAGS "-DUSE_IMAGEMAGICK=1")
set(HAVE_IMAGEMAGICK TRUE CACHE BOOL "Building with ImageMagick support" FORCE)
mark_as_advanced(HAVE_IMAGEMAGICK)
set(HAVE_IMAGEMAGICK TRUE CACHE BOOL "Building with ImageMagick support" FORCE)
mark_as_advanced(HAVE_IMAGEMAGICK)
endif()
endif()
################# LIBOPENSHOT-AUDIO ###################

View File

@@ -29,6 +29,10 @@
*/
#include "../include/CacheDisk.h"
#include "../include/QtUtilities.h"
#include <Qt>
#include <QString>
#include <QTextStream>
using namespace std;
using namespace openshot;
@@ -191,10 +195,10 @@ void CacheDisk::Add(std::shared_ptr<Frame> frame)
if (audio_file.open(QIODevice::WriteOnly)) {
QTextStream audio_stream(&audio_file);
audio_stream << frame->SampleRate() << endl;
audio_stream << frame->GetAudioChannelsCount() << endl;
audio_stream << frame->GetAudioSamplesCount() << endl;
audio_stream << frame->ChannelsLayout() << endl;
audio_stream << frame->SampleRate() << Qt::endl;
audio_stream << frame->GetAudioChannelsCount() << Qt::endl;
audio_stream << frame->GetAudioSamplesCount() << Qt::endl;
audio_stream << frame->ChannelsLayout() << Qt::endl;
// Loop through all samples
for (int channel = 0; channel < frame->GetAudioChannelsCount(); channel++)
@@ -202,7 +206,7 @@ void CacheDisk::Add(std::shared_ptr<Frame> frame)
// Get audio for this channel
float *samples = frame->GetAudioSamples(channel);
for (int sample = 0; sample < frame->GetAudioSamplesCount(); sample++)
audio_stream << samples[sample] << endl;
audio_stream << samples[sample] << Qt::endl;
}
}

View File

@@ -416,6 +416,18 @@ std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> frame, in
throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
}
// Look up an effect by ID
openshot::EffectBase* Clip::GetEffect(const std::string& id)
{
// Find the matching effect (if any)
for (const auto& effect : effects) {
if (effect->Id() == id) {
return effect;
}
}
return nullptr;
}
// Get file extension
std::string Clip::get_file_extension(std::string path)
{

View File

@@ -33,6 +33,9 @@
#include "../include/FFmpegReader.h"
#include <thread> // for std::this_thread::sleep_for
#include <chrono> // for std::chrono::milliseconds
#define ENABLE_VAAPI 0
#if HAVE_HW_ACCEL
@@ -925,7 +928,7 @@ std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame) {
// Wait if too many frames are being processed
while (processing_video_frames_size + processing_audio_frames_size >= minimum_packets) {
usleep(2500);
std::this_thread::sleep_for(std::chrono::milliseconds(3));
const GenericScopedLock <CriticalSection> lock(processingCriticalSection);
processing_video_frames_size = processing_video_frames.size();
processing_audio_frames_size = processing_audio_frames.size();
@@ -1717,7 +1720,7 @@ void FFmpegReader::Seek(int64_t requested_frame) {
// Wait for any processing frames to complete
while (processing_video_frames_size + processing_audio_frames_size > 0) {
usleep(2500);
std::this_thread::sleep_for(std::chrono::milliseconds(3));
const GenericScopedLock <CriticalSection> lock(processingCriticalSection);
processing_video_frames_size = processing_video_frames.size();
processing_audio_frames_size = processing_audio_frames.size();
@@ -1866,8 +1869,20 @@ void FFmpegReader::UpdatePTSOffset(bool is_video) {
// VIDEO PACKET
if (video_pts_offset == 99999) // Has the offset been set yet?
{
// Find the difference between PTS and frame number (no more than 10 timebase units allowed)
video_pts_offset = 0 - std::max(GetVideoPTS(), (int64_t) info.video_timebase.ToInt() * 10);
// Find the difference between PTS and frame number
video_pts_offset = 0 - GetVideoPTS();
// Find the difference between PTS and frame number
// Also, determine if PTS is invalid (too far away from zero)
// We compare the PTS to the timebase value equal to 1 second (which means the PTS
// must be within the -1 second to +1 second of zero, otherwise we ignore it)
// TODO: Please see https://github.com/OpenShot/libopenshot/pull/565#issuecomment-690985272
// for ideas to improve this logic.
int64_t max_offset = info.video_timebase.Reciprocal().ToFloat();
if (video_pts_offset < -max_offset || video_pts_offset > max_offset) {
// Ignore PTS, it seems invalid
video_pts_offset = 0;
}
// debug output
ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::UpdatePTSOffset (Video)", "video_pts_offset", video_pts_offset, "is_video", is_video);
@@ -1876,8 +1891,18 @@ void FFmpegReader::UpdatePTSOffset(bool is_video) {
// AUDIO PACKET
if (audio_pts_offset == 99999) // Has the offset been set yet?
{
// Find the difference between PTS and frame number (no more than 10 timebase units allowed)
audio_pts_offset = 0 - std::max(packet->pts, (int64_t) info.audio_timebase.ToInt() * 10);
// Find the difference between PTS and frame number
// Also, determine if PTS is invalid (too far away from zero)
// We compare the PTS to the timebase value equal to 1 second (which means the PTS
// must be within the -1 second to +1 second of zero, otherwise we ignore it)
// TODO: Please see https://github.com/OpenShot/libopenshot/pull/565#issuecomment-690985272
// for ideas to improve this logic.
audio_pts_offset = 0 - packet->pts;
int64_t max_offset = info.audio_timebase.Reciprocal().ToFloat();
if (audio_pts_offset < -max_offset || audio_pts_offset > max_offset) {
// Ignore PTS, it seems invalid
audio_pts_offset = 0;
}
// debug output
ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::UpdatePTSOffset (Audio)", "audio_pts_offset", audio_pts_offset, "is_video", is_video);

View File

@@ -438,6 +438,7 @@ void FFmpegWriter::SetOption(StreamType stream, std::string name, std::string va
av_opt_set_int(c->priv_data, "qp", std::min(std::stoi(value), 51), 0); // 0-51
if (std::stoi(value) == 0) {
av_opt_set(c->priv_data, "preset", "veryslow", 0);
c->pix_fmt = PIX_FMT_YUV444P; // no chroma subsampling
}
break;
case AV_CODEC_ID_HEVC :
@@ -498,6 +499,7 @@ void FFmpegWriter::SetOption(StreamType stream, std::string name, std::string va
av_opt_set_int(c->priv_data, "crf", std::min(std::stoi(value), 51), 0); // 0-51
if (std::stoi(value) == 0) {
av_opt_set(c->priv_data, "preset", "veryslow", 0);
c->pix_fmt = PIX_FMT_YUV444P; // no chroma subsampling
}
break;
case AV_CODEC_ID_HEVC :
@@ -537,7 +539,7 @@ void FFmpegWriter::SetOption(StreamType stream, std::string name, std::string va
switch (c->codec_id) {
case AV_CODEC_ID_AV1 :
c->bit_rate = 0;
if (strstr(info.vcodec.c_str(), "svt_av1") != NULL) {
if (strstr(info.vcodec.c_str(), "svtav1") != NULL) {
av_opt_set_int(c->priv_data, "qp", std::min(std::stoi(value),63), 0);
}
else if (strstr(info.vcodec.c_str(), "rav1e") != NULL) {
@@ -1237,7 +1239,7 @@ AVStream *FFmpegWriter::add_video_stream() {
info.video_bit_rate = calculated_quality;
} // medium
}
if (strstr(info.vcodec.c_str(), "svt_av1") != NULL) {
if (strstr(info.vcodec.c_str(), "svtav1") != NULL) {
av_opt_set_int(c->priv_data, "preset", 6, 0);
av_opt_set_int(c->priv_data, "forced-idr",1,0);
}

View File

@@ -30,6 +30,9 @@
#include "../include/Frame.h"
#include <thread> // for std::this_thread::sleep_for
#include <chrono> // for std::chrono::milliseconds
using namespace std;
using namespace openshot;
@@ -1028,7 +1031,7 @@ void Frame::Play()
while (transport1.isPlaying())
{
cout << "playing" << endl;
usleep(1000000);
std::this_thread::sleep_for(std::chrono::seconds(1));
}
cout << "DONE!!!" << endl;

View File

@@ -487,7 +487,7 @@ std::shared_ptr<Frame> FrameMapper::GetFrame(int64_t requested_frame)
// includes some additional input samples on first iteration,
// and continues the offset to ensure that the sample rate
// converter isn't input limited.
const int EXTRA_INPUT_SAMPLES = 20;
const int EXTRA_INPUT_SAMPLES = 100;
// Extend end sample count by an additional EXTRA_INPUT_SAMPLES samples
copy_samples.sample_end += EXTRA_INPUT_SAMPLES;

View File

@@ -31,6 +31,9 @@
#include "../../include/Qt/AudioPlaybackThread.h"
#include <thread> // for std::this_thread::sleep_for
#include <chrono> // for std::chrono::milliseconds
namespace openshot
{
@@ -194,7 +197,7 @@ namespace openshot
transport.start();
while (!threadShouldExit() && transport.isPlaying() && is_playing)
usleep(2500);
std::this_thread::sleep_for(std::chrono::milliseconds(2));
// Stop audio and shutdown transport
Stop();

View File

@@ -31,6 +31,9 @@
#include "../../include/Qt/PlayerPrivate.h"
#include <thread> // for std::this_thread::sleep_for
#include <chrono> // for std::chrono milliseconds, high_resolution_clock
namespace openshot
{
// Constructor
@@ -54,109 +57,120 @@ namespace openshot
// Start thread
void PlayerPrivate::run()
{
// bail if no reader set
if (!reader)
return;
// bail if no reader set
if (!reader)
return;
// Start the threads
if (reader->info.has_audio)
audioPlayback->startThread(8);
if (reader->info.has_video) {
videoCache->startThread(2);
videoPlayback->startThread(4);
}
// Start the threads
if (reader->info.has_audio)
audioPlayback->startThread(8);
if (reader->info.has_video) {
videoCache->startThread(2);
videoPlayback->startThread(4);
}
while (!threadShouldExit()) {
using std::chrono::duration_cast;
// Calculate the milliseconds a single frame should stay on the screen
double frame_time = (1000.0 / reader->info.fps.ToDouble());
// Types for storing time durations in whole and fractional milliseconds
using ms = std::chrono::milliseconds;
using double_ms = std::chrono::duration<double, ms::period>;
// Get the start time (to track how long a frame takes to render)
const Time t1 = Time::getCurrentTime();
// Calculate on-screen time for a single frame in milliseconds
const auto frame_duration = double_ms(1000.0 / reader->info.fps.ToDouble());
// Get the current video frame (if it's different)
frame = getFrame();
while (!threadShouldExit()) {
// Get the start time (to track how long a frame takes to render)
const auto time1 = std::chrono::high_resolution_clock::now();
// Experimental Pausing Code (if frame has not changed)
if ((speed == 0 && video_position == last_video_position) || (video_position > reader->info.video_length)) {
speed = 0;
sleep(frame_time);
continue;
}
// Get the current video frame (if it's different)
frame = getFrame();
// Set the video frame on the video thread and render frame
videoPlayback->frame = frame;
videoPlayback->render.signal();
// Experimental Pausing Code (if frame has not changed)
if ((speed == 0 && video_position == last_video_position)
|| (video_position > reader->info.video_length)
) {
speed = 0;
std::this_thread::sleep_for(frame_duration);
continue;
}
// Keep track of the last displayed frame
last_video_position = video_position;
// Set the video frame on the video thread and render frame
videoPlayback->frame = frame;
videoPlayback->render.signal();
// How many frames ahead or behind is the video thread?
int64_t video_frame_diff = 0;
if (reader->info.has_audio && reader->info.has_video) {
if (speed != 1)
// Set audio frame again (since we are not in normal speed, and not paused)
audioPlayback->Seek(video_position);
// Keep track of the last displayed frame
last_video_position = video_position;
// Only calculate this if a reader contains both an audio and video thread
audio_position = audioPlayback->getCurrentFramePosition();
video_frame_diff = video_position - audio_position;
}
// How many frames ahead or behind is the video thread?
int64_t video_frame_diff = 0;
if (reader->info.has_audio && reader->info.has_video) {
if (speed != 1)
// Set audio frame again (since we are not in normal speed, and not paused)
audioPlayback->Seek(video_position);
// Get the end time (to track how long a frame takes to render)
const Time t2 = Time::getCurrentTime();
// Only calculate this if a reader contains both an audio and video thread
audio_position = audioPlayback->getCurrentFramePosition();
video_frame_diff = video_position - audio_position;
}
// Determine how many milliseconds it took to render the frame
int64_t render_time = t2.toMilliseconds() - t1.toMilliseconds();
// Get the end time (to track how long a frame takes to render)
const auto time2 = std::chrono::high_resolution_clock::now();
// Calculate the amount of time to sleep (by subtracting the render time)
int sleep_time = int(frame_time - render_time);
// Determine how many milliseconds it took to render the frame
const auto render_time = double_ms(time2 - time1);
// Debug
ZmqLogger::Instance()->AppendDebugMethod("PlayerPrivate::run (determine sleep)", "video_frame_diff", video_frame_diff, "video_position", video_position, "audio_position", audio_position, "speed", speed, "render_time", render_time, "sleep_time", sleep_time);
// Calculate the amount of time to sleep (by subtracting the render time)
auto sleep_time = duration_cast<ms>(frame_duration - render_time);
// Adjust drift (if more than a few frames off between audio and video)
if (video_frame_diff > 0 && reader->info.has_audio && reader->info.has_video)
// Since the audio and video threads are running independently, they will quickly get out of sync.
// To fix this, we calculate how far ahead or behind the video frame is, and adjust the amount of time
// the frame is displayed on the screen (i.e. the sleep time). If a frame is ahead of the audio,
// we sleep for longer. If a frame is behind the audio, we sleep less (or not at all), in order for
// the video to catch up.
sleep_time += (video_frame_diff * (1000.0 / reader->info.fps.ToDouble()));
// Debug
ZmqLogger::Instance()->AppendDebugMethod("PlayerPrivate::run (determine sleep)", "video_frame_diff", video_frame_diff, "video_position", video_position, "audio_position", audio_position, "speed", speed, "render_time(ms)", render_time.count(), "sleep_time(ms)", sleep_time.count());
// Adjust drift (if more than a few frames off between audio and video)
if (video_frame_diff > 0 && reader->info.has_audio && reader->info.has_video) {
// Since the audio and video threads are running independently,
// they will quickly get out of sync. To fix this, we calculate
// how far ahead or behind the video frame is, and adjust the amount
// of time the frame is displayed on the screen (i.e. the sleep time).
// If a frame is ahead of the audio, we sleep for longer.
// If a frame is behind the audio, we sleep less (or not at all),
// in order for the video to catch up.
sleep_time += duration_cast<ms>(video_frame_diff * frame_duration);
}
else if (video_frame_diff < -10 && reader->info.has_audio && reader->info.has_video) {
// Skip frame(s) to catch up to the audio (if more than 10 frames behind)
video_position += abs(video_frame_diff) / 2; // Seek forward 1/2 the difference
sleep_time = 0; // Don't sleep now... immediately go to next position
}
else if (video_frame_diff < -10 && reader->info.has_audio && reader->info.has_video) {
// Skip frame(s) to catch up to the audio (if more than 10 frames behind)
video_position += std::fabs(video_frame_diff) / 2; // Seek forward 1/2 the difference
sleep_time = sleep_time.zero(); // Don't sleep now... immediately go to next position
}
// Sleep (leaving the video frame on the screen for the correct amount of time)
if (sleep_time > 0) usleep(sleep_time * 1000);
// Sleep (leaving the video frame on the screen for the correct amount of time)
if (sleep_time > sleep_time.zero()) {
std::this_thread::sleep_for(sleep_time);
}
}
}
}
// Get the next displayed frame (based on speed and direction)
std::shared_ptr<openshot::Frame> PlayerPrivate::getFrame()
{
try {
// Get the next frame (based on speed)
if (video_position + speed >= 1 && video_position + speed <= reader->info.video_length)
video_position = video_position + speed;
try {
// Get the next frame (based on speed)
if (video_position + speed >= 1 && video_position + speed <= reader->info.video_length)
video_position = video_position + speed;
if (frame && frame->number == video_position && video_position == last_video_position) {
// return cached frame
return frame;
}
else
{
// Update cache on which frame was retrieved
videoCache->setCurrentFramePosition(video_position);
if (frame && frame->number == video_position && video_position == last_video_position) {
// return cached frame
return frame;
}
else
{
// Update cache on which frame was retrieved
videoCache->setCurrentFramePosition(video_position);
// return frame from reader
return reader->GetFrame(video_position);
}
// return frame from reader
return reader->GetFrame(video_position);
}
} catch (const ReaderClosed & e) {
// ...

Some files were not shown because too many files have changed in this diff Show More