diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0809c04d..46c75816 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -24,7 +24,7 @@
# along with OpenShot Library. If not, see .
################################################################################
-cmake_minimum_required(VERSION 3.1...3.14 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.2...3.14 FATAL_ERROR)
message("\
-----------------------------------------------------------------
@@ -56,7 +56,7 @@ STRING(REGEX REPLACE "\-.*$" "" VERSION_NUM "${PROJECT_VERSION_FULL}")
PROJECT(libopenshot LANGUAGES C CXX VERSION ${VERSION_NUM})
message("
-Generating build files for OpenShot
+Generating build files for OpenShot with CMake ${CMAKE_VERSION}
Building ${PROJECT_NAME} (version ${PROJECT_VERSION})
SO/API/ABI Version: ${PROJECT_SO_VERSION}
")
@@ -115,5 +115,6 @@ install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/doc/html/
endif()
############# PROCESS tests/ DIRECTORY ##############
-add_subdirectory(tests)
-
+if(NOT DISABLE_TESTS)
+ add_subdirectory(tests)
+endif()
diff --git a/cmake/Modules/FindFFmpeg.cmake b/cmake/Modules/FindFFmpeg.cmake
index c4eb7ca3..b2409e05 100644
--- a/cmake/Modules/FindFFmpeg.cmake
+++ b/cmake/Modules/FindFFmpeg.cmake
@@ -1,42 +1,82 @@
# vim: ts=2 sw=2
-# - Try to find the required ffmpeg components(default: AVFORMAT, AVUTIL, AVCODEC)
-#
-# Once done this will define
-# FFMPEG_FOUND - System has the all required components.
-# FFMPEG_INCLUDE_DIRS - Include directory necessary for using the required components headers.
-# FFMPEG_LIBRARIES - Link these to use the required ffmpeg components.
-# FFMPEG_DEFINITIONS - Compiler switches required for using the required ffmpeg components.
-#
-# For each of the components it will additionally set.
-# - AVCODEC
-# - AVDEVICE
-# - AVFORMAT
-# - AVFILTER
-# - AVUTIL
-# - POSTPROC
-# - SWSCALE
-# - SWRESAMPLE
-# - AVRESAMPLE
-# the following variables will be defined
-# _FOUND - System has
-# _INCLUDE_DIRS - Include directory necessary for using the headers
-# _LIBRARIES - Link these to use
-# _DEFINITIONS - Compiler switches required for using
-# _VERSION - The components version
-#
-# Copyright (c) 2006, Matthias Kretz,
-# Copyright (c) 2008, Alexander Neundorf,
-# Copyright (c) 2011, Michael Jansen,
-#
-# Redistribution and use is allowed according to the terms of the BSD license.
-# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
+#[=======================================================================[.rst:
+FindFFmpeg
+----------
+Try to find the requested ffmpeg components(default: avformat, avutil, avcodec)
+IMPORTED targets
+^^^^^^^^^^^^^^^^
+
+This module defines :prop_tgt:`IMPORTED` targets ``FFmpeg:`` for
+each found component (see below).
+
+Components
+^^^^^^^^^^
+
+The module recognizes the following components:
+
+::
+
+ avcodec - target FFmpeg::avcodec
+ avdevice - target FFmpeg::avdevice
+ avformat - target FFmpeg::avformat
+ avfilter - target FFmpeg::avfilter
+ avutil - target FFmpeg::avutil
+ postproc - target FFmpeg::postproc
+ swscale - target FFmpeg::swscale
+ swresample - target FFmpeg::swresample
+ avresample - target FFmpeg::avresample
+
+Result Variables
+^^^^^^^^^^^^^^^^
+
+This module defines the following variables:
+
+::
+
+ FFMPEG_FOUND - System has the all required components.
+ FFMPEG_INCLUDE_DIRS - Include directory necessary for using the required components headers.
+ FFMPEG_LIBRARIES - Link these to use the required ffmpeg components.
+ FFMPEG_DEFINITIONS - Compiler switches required for using the required ffmpeg components.
+
+For each component, ``_FOUND`` will be set if the component is available.
+
+For each ``_FOUND``, the following variables will be defined:
+
+::
+
+ _INCLUDE_DIRS - Include directory necessary for using the headers
+ _LIBRARIES - Link these to use
+ _DEFINITIONS - Compiler switches required for using
+ _VERSION - The components version
+
+Backwards compatibility
+^^^^^^^^^^^^^^^^^^^^^^^
+
+For compatibility with previous versions of this module, uppercase names
+for FFmpeg and for all components are also recognized, and all-uppercase
+versions of the cache variables are also created.
+
+Copyright (c) 2006, Matthias Kretz,
+Copyright (c) 2008, Alexander Neundorf,
+Copyright (c) 2011, Michael Jansen,
+Copyright (c) 2019, FeRD (Frank Dana)
+
+Redistribution and use is allowed according to the terms of the BSD license.
+For details see the accompanying COPYING-CMAKE-SCRIPTS file.
+#]=======================================================================]
include(FindPackageHandleStandardArgs)
-# The default components were taken from a survey over other FindFFMPEG.cmake files
-if (NOT FFmpeg_FIND_COMPONENTS)
- set(FFmpeg_FIND_COMPONENTS AVCODEC AVFORMAT AVUTIL)
+set(FFmpeg_ALL_COMPONENTS avcodec avdevice avformat avfilter avutil postproc swscale swresample avresample)
+
+# Default to all components, if not specified
+if (FFMPEG_FIND_COMPONENTS AND NOT FFmpeg_FIND_COMPONENTS)
+ set(FFmpeg_FIND_COMPONENTS ${FFMPEG_FIND_COMPONENTS})
endif ()
+if (NOT FFmpeg_FIND_COMPONENTS)
+ set(FFmpeg_FIND_COMPONENTS ${FFmpeg_ALL_COMPONENTS})
+endif ()
+
#
### Macro: set_component_found
@@ -44,12 +84,14 @@ endif ()
# Marks the given component as found if both *_LIBRARIES AND *_INCLUDE_DIRS is present.
#
macro(set_component_found _component )
- if (${_component}_LIBRARIES AND ${_component}_INCLUDE_DIRS)
- # message(STATUS " - ${_component} found.")
- set(${_component}_FOUND TRUE)
- else ()
- # message(STATUS " - ${_component} not found.")
- endif ()
+ if (${_component}_LIBRARIES AND ${_component}_INCLUDE_DIRS)
+ # message(STATUS "FFmpeg - ${_component} found.")
+ set(${_component}_FOUND TRUE)
+ else ()
+ if (NOT FFmpeg_FIND_QUIETLY AND NOT FFMPEG_FIND_QUIETLY)
+ message(STATUS "FFmpeg - ${_component} not found.")
+ endif ()
+ endif ()
endmacro()
#
@@ -60,102 +102,146 @@ endmacro()
#
macro(find_component _component _pkgconfig _library _header)
- if (NOT WIN32)
- # use pkg-config to get the directories and then use these values
- # in the FIND_PATH() and FIND_LIBRARY() calls
- find_package(PkgConfig)
- if (PKG_CONFIG_FOUND)
- pkg_check_modules(PC_${_component} ${_pkgconfig})
- endif ()
- endif (NOT WIN32)
+ if (NOT WIN32)
+ # use pkg-config to get the directories and then use these values
+ # in the FIND_PATH() and FIND_LIBRARY() calls
+ find_package(PkgConfig)
+ if (PKG_CONFIG_FOUND)
+ pkg_check_modules(PC_${_component} ${_pkgconfig})
+ endif ()
+ endif (NOT WIN32)
- find_path(${_component}_INCLUDE_DIRS ${_header}
- HINTS
- /opt/
- /opt/include/
- ${PC_LIB${_component}_INCLUDEDIR}
- ${PC_LIB${_component}_INCLUDE_DIRS}
- $ENV{FFMPEGDIR}/include/
- $ENV{FFMPEGDIR}/include/ffmpeg/
- PATH_SUFFIXES
- ffmpeg
- )
+ find_path(${_component}_INCLUDE_DIRS ${_header}
+ HINTS
+ /opt/
+ /opt/include/
+ ${PC_${_component}_INCLUDEDIR}
+ ${PC_${_component}_INCLUDE_DIRS}
+ $ENV{FFMPEGDIR}/include/
+ $ENV{FFMPEGDIR}/include/ffmpeg/
+ PATH_SUFFIXES
+ ffmpeg
+ )
- find_library(${_component}_LIBRARIES NAMES ${_library}
- HINTS
- ${PC_LIB${_component}_LIBDIR}
- ${PC_LIB${_component}_LIBRARY_DIRS}
- $ENV{FFMPEGDIR}/lib/
- $ENV{FFMPEGDIR}/lib/ffmpeg/
- $ENV{FFMPEGDIR}/bin/
- )
+ find_library(${_component}_LIBRARIES NAMES ${_library}
+ HINTS
+ ${PC_${_component}_LIBDIR}
+ ${PC_${_component}_LIBRARY_DIRS}
+ $ENV{FFMPEGDIR}/lib/
+ $ENV{FFMPEGDIR}/lib/ffmpeg/
+ $ENV{FFMPEGDIR}/bin/
+ )
- set(${_component}_DEFINITIONS ${PC_${_component}_CFLAGS_OTHER} CACHE STRING "The ${_component} CFLAGS.")
- set(${_component}_VERSION ${PC_${_component}_VERSION} CACHE STRING "The ${_component} version number.")
+ set(${_component}_DEFINITIONS ${PC_${_component}_CFLAGS_OTHER} CACHE STRING "The ${_component} CFLAGS.")
+ set(${_component}_VERSION ${PC_${_component}_VERSION} CACHE STRING "The ${_component} version number.")
- set_component_found(${_component})
+ set_component_found(${_component})
- mark_as_advanced(
- ${_component}_INCLUDE_DIRS
- ${_component}_LIBRARIES
- ${_component}_DEFINITIONS
- ${_component}_VERSION)
+ mark_as_advanced(
+ ${_component}_INCLUDE_DIRS
+ ${_component}_LIBRARIES
+ ${_component}_DEFINITIONS
+ ${_component}_VERSION
+ )
endmacro()
# Check for cached results. If there are skip the costly part.
-if (NOT FFMPEG_LIBRARIES)
+if (NOT FFmpeg_LIBRARIES)
- # Check for all possible component.
- find_component(AVCODEC libavcodec avcodec libavcodec/avcodec.h)
- find_component(AVFORMAT libavformat avformat libavformat/avformat.h)
- find_component(AVDEVICE libavdevice avdevice libavdevice/avdevice.h)
- find_component(AVUTIL libavutil avutil libavutil/avutil.h)
- find_component(AVFILTER libavfilter avfilter libavfilter/avfilter.h)
- find_component(SWSCALE libswscale swscale libswscale/swscale.h)
- find_component(POSTPROC libpostproc postproc libpostproc/postprocess.h)
- find_component(SWRESAMPLE libswresample swresample libswresample/swresample.h)
- find_component(AVRESAMPLE libavresample avresample libavresample/avresample.h)
+ # Check for all possible component.
+ find_component(avcodec libavcodec avcodec libavcodec/avcodec.h)
+ find_component(avdevice libavdevice avdevice libavdevice/avdevice.h)
+ find_component(avformat libavformat avformat libavformat/avformat.h)
+ find_component(avfilter libavfilter avfilter libavfilter/avfilter.h)
+ find_component(avutil libavutil avutil libavutil/avutil.h)
+ find_component(postproc libpostproc postproc libpostproc/postprocess.h)
+ find_component(swscale libswscale swscale libswscale/swscale.h)
+ find_component(swresample libswresample swresample libswresample/swresample.h)
+ find_component(avresample libavresample avresample libavresample/avresample.h)
+else()
+ # Just set the noncached _FOUND vars for the components.
+ foreach(_component ${FFmpeg_ALL_COMPONENTS})
+ set_component_found(${_component})
+ endforeach ()
+endif()
- # Check if the required components were found and add their stuff to the FFMPEG_* vars.
- foreach (_component ${FFmpeg_FIND_COMPONENTS})
- if (${_component}_FOUND)
- # message(STATUS "Required component ${_component} present.")
- set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} ${${_component}_LIBRARIES})
- set(FFMPEG_DEFINITIONS ${FFMPEG_DEFINITIONS} ${${_component}_DEFINITIONS})
- list(APPEND FFMPEG_INCLUDE_DIRS ${${_component}_INCLUDE_DIRS})
- else ()
- # message(STATUS "Required component ${_component} missing.")
- endif ()
- endforeach ()
-
- # Build the include path with duplicates removed.
- if (FFMPEG_INCLUDE_DIRS)
- list(REMOVE_DUPLICATES FFMPEG_INCLUDE_DIRS)
- endif ()
-
- # cache the vars.
- set(FFMPEG_INCLUDE_DIRS ${FFMPEG_INCLUDE_DIRS} CACHE STRING "The FFmpeg include directories." FORCE)
- set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} CACHE STRING "The FFmpeg libraries." FORCE)
- set(FFMPEG_DEFINITIONS ${FFMPEG_DEFINITIONS} CACHE STRING "The FFmpeg cflags." FORCE)
-
- mark_as_advanced(FFMPEG_INCLUDE_DIRS
- FFMPEG_LIBRARIES
- FFMPEG_DEFINITIONS)
-
-endif ()
-
-# Now set the noncached _FOUND vars for the components.
-foreach (_component AVCODEC AVDEVICE AVFORMAT AVUTIL POSTPROCESS SWSCALE SWRESAMPLE AVRESAMPLE)
- set_component_found(${_component})
+# Check if the requested components were found and add their stuff to the FFmpeg_* vars.
+foreach (_component ${FFmpeg_FIND_COMPONENTS})
+ string(TOLOWER "${_component}" _component)
+ if (${_component}_FOUND)
+ # message(STATUS "Requested component ${_component} present.")
+ set(FFmpeg_LIBRARIES ${FFmpeg_LIBRARIES} ${${_component}_LIBRARIES})
+ set(FFmpeg_DEFINITIONS ${FFmpeg_DEFINITIONS} ${${_component}_DEFINITIONS})
+ list(APPEND FFmpeg_INCLUDE_DIRS ${${_component}_INCLUDE_DIRS})
+ else ()
+ # message(STATUS "Requested component ${_component} missing.")
+ endif ()
endforeach ()
+# Build the include path with duplicates removed.
+if (FFmpeg_INCLUDE_DIRS)
+ list(REMOVE_DUPLICATES FFmpeg_INCLUDE_DIRS)
+endif ()
+
+# cache the vars.
+set(FFmpeg_INCLUDE_DIRS ${FFmpeg_INCLUDE_DIRS} CACHE STRING "The FFmpeg include directories." FORCE)
+set(FFmpeg_LIBRARIES ${FFmpeg_LIBRARIES} CACHE STRING "The FFmpeg libraries." FORCE)
+set(FFmpeg_DEFINITIONS ${FFmpeg_DEFINITIONS} CACHE STRING "The FFmpeg cflags." FORCE)
+
+mark_as_advanced(FFmpeg_INCLUDE_DIRS
+ FFmpeg_LIBRARIES
+ FFmpeg_DEFINITIONS)
+
+# Backwards compatibility
+foreach(_suffix INCLUDE_DIRS LIBRARIES DEFINITIONS)
+ get_property(_help CACHE FFmpeg_${_suffix} PROPERTY HELPSTRING)
+ set(FFMPEG_${_suffix} ${FFmpeg_${_suffix}} CACHE STRING "${_help}" FORCE)
+ mark_as_advanced(FFMPEG_${_suffix})
+endforeach()
+foreach(_component ${FFmpeg_ALL_COMPONENTS})
+ if(${_component}_FOUND)
+ string(TOUPPER "${_component}" _uc_component)
+ set(${_uc_component}_FOUND TRUE)
+ foreach(_suffix INCLUDE_DIRS LIBRARIES DEFINITIONS VERSION)
+ get_property(_help CACHE ${_component}_${_suffix} PROPERTY HELPSTRING)
+ set(${_uc_component}_${_suffix} ${${_component}_${_suffix}} CACHE STRING "${_help}" FORCE)
+ mark_as_advanced(${_uc_component}_${_suffix})
+ endforeach()
+ endif()
+endforeach()
+
# Compile the list of required vars
-set(_FFmpeg_REQUIRED_VARS FFMPEG_LIBRARIES FFMPEG_INCLUDE_DIRS)
+set(_FFmpeg_REQUIRED_VARS FFmpeg_LIBRARIES FFmpeg_INCLUDE_DIRS)
foreach (_component ${FFmpeg_FIND_COMPONENTS})
- list(APPEND _FFmpeg_REQUIRED_VARS ${_component}_LIBRARIES ${_component}_INCLUDE_DIRS)
+ list(APPEND _FFmpeg_REQUIRED_VARS
+ ${_component}_LIBRARIES
+ ${_component}_INCLUDE_DIRS)
endforeach ()
# Give a nice error message if some of the required vars are missing.
find_package_handle_standard_args(FFmpeg DEFAULT_MSG ${_FFmpeg_REQUIRED_VARS})
+
+# Export targets for each found component
+foreach (_component ${FFmpeg_ALL_COMPONENTS})
+
+ if(${_component}_FOUND)
+ # message(STATUS "Creating IMPORTED target FFmpeg::${_component}")
+
+ if(NOT TARGET FFmpeg::${_component})
+ add_library(FFmpeg::${_component} UNKNOWN IMPORTED)
+
+ set_target_properties(FFmpeg::${_component} PROPERTIES
+ INTERFACE_INCLUDE_DIRECTORIES "${${_component}_INCLUDE_DIRS}")
+
+ set_property(TARGET FFmpeg::${_component} APPEND PROPERTY
+ INTERFACE_COMPILE_DEFINITIONS "${${_component}_DEFINITIONS}")
+
+ set_property(TARGET FFmpeg::${_component} APPEND PROPERTY
+ IMPORTED_LOCATION "${${_component}_LIBRARIES}")
+ endif()
+
+ endif()
+
+endforeach()
diff --git a/include/AudioBufferSource.h b/include/AudioBufferSource.h
index 4addb37d..d1eba986 100644
--- a/include/AudioBufferSource.h
+++ b/include/AudioBufferSource.h
@@ -54,25 +54,25 @@ namespace openshot
* The JUCE library cannot play audio directly from an AudioSampleBuffer, so this class exposes
* an AudioSampleBuffer as a AudioSource, so that JUCE can play the audio.
*/
- class AudioBufferSource : public PositionableAudioSource
+ class AudioBufferSource : public juce::PositionableAudioSource
{
private:
int position;
int start;
bool repeat;
- AudioSampleBuffer *buffer;
+ juce::AudioSampleBuffer *buffer;
public:
/// @brief Default constructor
/// @param audio_buffer This buffer contains the samples you want to play through JUCE.
- AudioBufferSource(AudioSampleBuffer *audio_buffer);
+ AudioBufferSource(juce::AudioSampleBuffer *audio_buffer);
/// Destructor
~AudioBufferSource();
/// @brief Get the next block of audio samples
/// @param info This struct informs us of which samples are needed next.
- void getNextAudioBlock (const AudioSourceChannelInfo& info);
+ void getNextAudioBlock (const juce::AudioSourceChannelInfo& info);
/// Prepare to play this audio source
void prepareToPlay(int, double);
@@ -82,13 +82,13 @@ namespace openshot
/// @brief Set the next read position of this source
/// @param newPosition The sample # to start reading from
- void setNextReadPosition (int64 newPosition);
+ void setNextReadPosition (juce::int64 newPosition);
/// Get the next read position of this source
- int64 getNextReadPosition() const;
+ juce::int64 getNextReadPosition() const;
/// Get the total length (in samples) of this audio source
- int64 getTotalLength() const;
+ juce::int64 getTotalLength() const;
/// Determines if this audio source should repeat when it reaches the end
bool isLooping() const;
@@ -98,7 +98,7 @@ namespace openshot
void setLooping (bool shouldLoop);
/// Update the internal buffer used by this source
- void setBuffer (AudioSampleBuffer *audio_buffer);
+ void setBuffer (juce::AudioSampleBuffer *audio_buffer);
};
}
diff --git a/include/AudioReaderSource.h b/include/AudioReaderSource.h
index 679aed61..6e2f541e 100644
--- a/include/AudioReaderSource.h
+++ b/include/AudioReaderSource.h
@@ -54,13 +54,13 @@ namespace openshot
*
* This allows any reader to play audio through JUCE (our audio framework).
*/
- class AudioReaderSource : public PositionableAudioSource
+ class AudioReaderSource : public juce::PositionableAudioSource
{
private:
int position; /// The position of the audio source (index of buffer)
bool repeat; /// Repeat the audio source when finished
int size; /// The size of the internal buffer
- AudioSampleBuffer *buffer; /// The audio sample buffer
+ juce::AudioSampleBuffer *buffer; /// The audio sample buffer
int speed; /// The speed and direction to playback a reader (1=normal, 2=fast, 3=faster, -1=rewind, etc...)
ReaderBase *reader; /// The reader to pull samples from
@@ -90,7 +90,7 @@ namespace openshot
/// @brief Get the next block of audio samples
/// @param info This struct informs us of which samples are needed next.
- void getNextAudioBlock (const AudioSourceChannelInfo& info);
+ void getNextAudioBlock (const juce::AudioSourceChannelInfo& info);
/// Prepare to play this audio source
void prepareToPlay(int, double);
@@ -100,13 +100,13 @@ namespace openshot
/// @brief Set the next read position of this source
/// @param newPosition The sample # to start reading from
- void setNextReadPosition (int64 newPosition);
+ void setNextReadPosition (juce::int64 newPosition);
/// Get the next read position of this source
- int64 getNextReadPosition() const;
+ juce::int64 getNextReadPosition() const;
/// Get the total length (in samples) of this audio source
- int64 getTotalLength() const;
+ juce::int64 getTotalLength() const;
/// Determines if this audio source should repeat when it reaches the end
bool isLooping() const;
@@ -116,7 +116,7 @@ namespace openshot
void setLooping (bool shouldLoop);
/// Update the internal buffer used by this source
- void setBuffer (AudioSampleBuffer *audio_buffer);
+ void setBuffer (juce::AudioSampleBuffer *audio_buffer);
const ReaderInfo & getReaderInfo() const { return reader->info; }
diff --git a/include/AudioResampler.h b/include/AudioResampler.h
index 85a44b1f..de3cae41 100644
--- a/include/AudioResampler.h
+++ b/include/AudioResampler.h
@@ -55,11 +55,11 @@ namespace openshot {
*/
class AudioResampler {
private:
- AudioSampleBuffer *buffer;
- AudioSampleBuffer *resampled_buffer;
+ juce::AudioSampleBuffer *buffer;
+ juce::AudioSampleBuffer *resampled_buffer;
AudioBufferSource *buffer_source;
- ResamplingAudioSource *resample_source;
- AudioSourceChannelInfo resample_callback_buffer;
+ juce::ResamplingAudioSource *resample_source;
+ juce::AudioSourceChannelInfo resample_callback_buffer;
int num_of_samples;
int new_num_of_samples;
@@ -78,15 +78,15 @@ namespace openshot {
/// @param new_buffer The buffer of audio samples needing to be resampled
/// @param sample_rate The original sample rate of the buffered samples
/// @param new_sample_rate The requested sample rate you need
- void SetBuffer(AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate);
+ void SetBuffer(juce::AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate);
/// @brief Sets the audio buffer and key settings
/// @param new_buffer The buffer of audio samples needing to be resampled
/// @param ratio The multiplier that needs to be applied to the sample rate (this is how resampling happens)
- void SetBuffer(AudioSampleBuffer *new_buffer, double ratio);
+ void SetBuffer(juce::AudioSampleBuffer *new_buffer, double ratio);
/// Get the resampled audio buffer
- AudioSampleBuffer* GetResampledBuffer();
+ juce::AudioSampleBuffer* GetResampledBuffer();
};
}
diff --git a/include/CacheBase.h b/include/CacheBase.h
index d954e411..72880dd5 100644
--- a/include/CacheBase.h
+++ b/include/CacheBase.h
@@ -53,7 +53,7 @@ namespace openshot {
int64_t max_bytes; ///< This is the max number of bytes to cache (0 = no limit)
/// Section lock for multiple threads
- CriticalSection *cacheCriticalSection;
+ juce::CriticalSection *cacheCriticalSection;
public:
diff --git a/include/Clip.h b/include/Clip.h
index c092c2ad..101b3697 100644
--- a/include/Clip.h
+++ b/include/Clip.h
@@ -103,15 +103,15 @@ namespace openshot {
class Clip : public ClipBase {
protected:
/// Section lock for multiple threads
- CriticalSection getFrameCriticalSection;
+ juce::CriticalSection getFrameCriticalSection;
private:
bool waveform; ///< Should a waveform be used instead of the clip's image
- list effects; /// effects; /// apply_effects(std::shared_ptr frame);
/// Get file extension
- string get_file_extension(string path);
+ std::string get_file_extension(std::string path);
/// Get a frame object or create a blank one
std::shared_ptr GetOrCreateFrame(int64_t number);
diff --git a/include/Frame.h b/include/Frame.h
index 1048c9cf..8eb51ed2 100644
--- a/include/Frame.h
+++ b/include/Frame.h
@@ -119,8 +119,8 @@ namespace openshot
std::shared_ptr wave_image;
std::shared_ptr audio;
std::shared_ptr previewApp;
- CriticalSection addingImageSection;
- CriticalSection addingAudioSection;
+ juce::CriticalSection addingImageSection;
+ juce::CriticalSection addingAudioSection;
const unsigned char *qbuffer;
Fraction pixel_ratio;
int channels;
diff --git a/include/OpenShot.h b/include/OpenShot.h
index c5778f99..5273ff0d 100644
--- a/include/OpenShot.h
+++ b/include/OpenShot.h
@@ -135,7 +135,9 @@
#include "PlayerBase.h"
#include "Point.h"
#include "Profiles.h"
+#include "QtHtmlReader.h"
#include "QtImageReader.h"
+#include "QtTextReader.h"
#include "Timeline.h"
#include "Settings.h"
diff --git a/include/QtHtmlReader.h b/include/QtHtmlReader.h
new file mode 100644
index 00000000..ca5f45c4
--- /dev/null
+++ b/include/QtHtmlReader.h
@@ -0,0 +1,145 @@
+/**
+ * @file
+ * @brief Header file for QtHtmlReader class
+ * @author Jonathan Thomas
+ * @author Sergei Kolesov (jediserg)
+ * @author Jeff Shillitto (jeffski)
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#ifndef OPENSHOT_QT_HTML_READER_H
+#define OPENSHOT_QT_HTML_READER_H
+
+#include "ReaderBase.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include "CacheMemory.h"
+#include "Enums.h"
+#include "Exceptions.h"
+
+class QImage;
+
+namespace openshot
+{
+
+ /**
+ * @brief This class uses Qt libraries, to create frames with rendered HTML, and return
+ * openshot::Frame objects.
+ *
+ * Supports HTML/CSS subset available via Qt libraries, see: https://doc.qt.io/qt-5/richtext-html-subset.html
+ *
+ * @code
+ * // Any application using this class must instantiate either QGuiApplication or QApplication
+ * QApplication a(argc, argv);
+ *
+ * // Create a reader to generate an openshot::Frame containing text
+ * QtHtmlReader r(720, // width
+ * 480, // height
+ * 5, // x_offset
+ * 5, // y_offset
+ * GRAVITY_CENTER, // gravity
+ * "Check out this Text!", // html
+ * "b { color: #ff0000 }", // css
+ * "#000000" // background_color
+ * );
+ * r.Open(); // Open the reader
+ *
+ * // Get frame number 1 from the video (in fact, any frame # you request will return the same frame)
+ * std::shared_ptr f = r.GetFrame(1);
+ *
+ * // Now that we have an openshot::Frame object, lets have some fun!
+ * f->Display(); // Display the frame on the screen
+ *
+ * // Close the reader
+ * r.Close();
+ * @endcode
+ */
+ class QtHtmlReader : public ReaderBase
+ {
+ private:
+ int width;
+ int height;
+ int x_offset;
+ int y_offset;
+ std::string html;
+ std::string css;
+ std::string background_color;
+ std::shared_ptr image;
+ bool is_open;
+ openshot::GravityType gravity;
+ public:
+
+ /// Default constructor (blank text)
+ QtHtmlReader();
+
+ /// @brief Constructor for QtHtmlReader with all parameters.
+ /// @param width The width of the requested openshot::Frame (not the size of the text)
+ /// @param height The height of the requested openshot::Frame (not the size of the text)
+ /// @param x_offset The number of pixels to offset the text on the X axis (horizontal)
+ /// @param y_offset The number of pixels to offset the text on the Y axis (vertical)
+ /// @param gravity The alignment / gravity of the text
+ /// @param html The HTML you want to render / display
+ /// @param css The CSS you want to apply to style the HTML
+ /// @param background_color The background color of the frame image (valid values are a color string in #RRGGBB or #AARRGGBB notation, a CSS color name, or 'transparent')
+ QtHtmlReader(int width, int height, int x_offset, int y_offset, GravityType gravity, std::string html, std::string css, std::string background_color);
+
+ /// Close Reader
+ void Close();
+
+ /// Get the cache object used by this reader (always returns NULL for this object)
+ openshot::CacheMemory* GetCache() { return NULL; };
+
+ /// Get an openshot::Frame object for a specific frame number of this reader. All numbers
+ /// return the same Frame, since they all share the same image data.
+ ///
+ /// @returns The requested frame (containing the image)
+ /// @param requested_frame The frame number that is requested.
+ std::shared_ptr GetFrame(int64_t requested_frame);
+
+ /// Determine if reader is open or closed
+ bool IsOpen() { return is_open; };
+
+ /// Return the type name of the class
+ std::string Name() { return "QtHtmlReader"; };
+
+ /// Get and Set JSON methods
+ std::string Json(); ///< Generate JSON string of this object
+ void SetJson(std::string value); ///< Load JSON string into this object
+ Json::Value JsonValue(); ///< Generate Json::JsonValue for this object
+ void SetJsonValue(Json::Value root); ///< Load Json::JsonValue into this object
+
+ /// Open Reader - which is called by the constructor automatically
+ void Open();
+ };
+
+}
+
+#endif
diff --git a/include/QtTextReader.h b/include/QtTextReader.h
new file mode 100644
index 00000000..3bcb2236
--- /dev/null
+++ b/include/QtTextReader.h
@@ -0,0 +1,159 @@
+/**
+ * @file
+ * @brief Header file for QtTextReader class
+ * @author Jonathan Thomas
+ * @author Sergei Kolesov (jediserg)
+ * @author Jeff Shillitto (jeffski)
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#ifndef OPENSHOT_QT_TEXT_READER_H
+#define OPENSHOT_QT_TEXT_READER_H
+
+#include "ReaderBase.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include "CacheMemory.h"
+#include "Enums.h"
+#include "Exceptions.h"
+
+class QImage;
+
+namespace openshot
+{
+
+ /**
+ * @brief This class uses Qt libraries, to create frames with "Text", and return
+ * openshot::Frame objects.
+ *
+ * All system fonts are supported, including many different font properties, such as size, color,
+ * alignment, padding, etc...
+ *
+ * @code
+ * // Any application using this class must instantiate either QGuiApplication or QApplication
+ * QApplication a(argc, argv);
+ *
+ * // Create a reader to generate an openshot::Frame containing text
+ * QtTextReader r(720, // width
+ * 480, // height
+ * 5, // x_offset
+ * 5, // y_offset
+ * GRAVITY_CENTER, // gravity
+ * "Check out this Text!", // text
+ * "Arial", // font
+ * 15.0, // font size
+ * "#fff000", // text_color
+ * "#000000" // background_color
+ * );
+ * r.Open(); // Open the reader
+ *
+ * // Get frame number 1 from the video (in fact, any frame # you request will return the same frame)
+ * std::shared_ptr f = r.GetFrame(1);
+ *
+ * // Now that we have an openshot::Frame object, lets have some fun!
+ * f->Display(); // Display the frame on the screen
+ *
+ * // Close the reader
+ * r.Close();
+ * @endcode
+ */
+ class QtTextReader : public ReaderBase
+ {
+ private:
+ int width;
+ int height;
+ int x_offset;
+ int y_offset;
+ std::string text;
+ QFont font;
+ std::string text_color;
+ std::string background_color;
+ std::string text_background_color;
+ std::shared_ptr image;
+ bool is_open;
+ openshot::GravityType gravity;
+
+ public:
+
+ /// Default constructor (blank text)
+ QtTextReader();
+
+ /// @brief Constructor for QtTextReader with all parameters.
+ /// @param width The width of the requested openshot::Frame (not the size of the text)
+ /// @param height The height of the requested openshot::Frame (not the size of the text)
+ /// @param x_offset The number of pixels to offset the text on the X axis (horizontal)
+ /// @param y_offset The number of pixels to offset the text on the Y axis (vertical)
+ /// @param gravity The alignment / gravity of the text
+ /// @param text The text you want to generate / display
+ /// @param font The font of the text
+ /// @param font_size The size of the text
+ /// @param is_bold Set to true to make text bold
+ /// @param is_italic Set to true to make text italic
+ /// @param text_color The color of the text (valid values are a color string in #RRGGBB or #AARRGGBB notation or a CSS color name)
+ /// @param background_color The background color of the frame image (valid values are a color string in #RRGGBB or #AARRGGBB notation, a CSS color name, or 'transparent')
+ QtTextReader(int width, int height, int x_offset, int y_offset, GravityType gravity, std::string text, QFont font, std::string text_color, std::string background_color);
+
+ /// Draw a box under rendered text using the specified color.
+ /// @param color The background color behind the text (valid values are a color string in #RRGGBB or #AARRGGBB notation or a CSS color name)
+ void SetTextBackgroundColor(std::string color);
+
+ /// Close Reader
+ void Close();
+
+ /// Get the cache object used by this reader (always returns NULL for this object)
+ openshot::CacheMemory* GetCache() { return NULL; };
+
+ /// Get an openshot::Frame object for a specific frame number of this reader. All numbers
+ /// return the same Frame, since they all share the same image data.
+ ///
+ /// @returns The requested frame (containing the image)
+ /// @param requested_frame The frame number that is requested.
+ std::shared_ptr GetFrame(int64_t requested_frame);
+
+ /// Determine if reader is open or closed
+ bool IsOpen() { return is_open; };
+
+ /// Return the type name of the class
+ std::string Name() { return "QtTextReader"; };
+
+ /// Get and Set JSON methods
+ std::string Json(); ///< Generate JSON string of this object
+ void SetJson(string value); ///< Load JSON string into this object
+ Json::Value JsonValue(); ///< Generate Json::JsonValue for this object
+ void SetJsonValue(Json::Value root); ///< Load Json::JsonValue into this object
+
+ /// Open Reader - which is called by the constructor automatically
+ void Open();
+ };
+
+}
+
+#endif
diff --git a/include/ReaderBase.h b/include/ReaderBase.h
index 38f448c2..8aabd336 100644
--- a/include/ReaderBase.h
+++ b/include/ReaderBase.h
@@ -100,8 +100,8 @@ namespace openshot
{
protected:
/// Section lock for multiple threads
- CriticalSection getFrameCriticalSection;
- CriticalSection processingCriticalSection;
+ juce::CriticalSection getFrameCriticalSection;
+ juce::CriticalSection processingCriticalSection;
ClipBase* parent;
public:
diff --git a/include/TextReader.h b/include/TextReader.h
index 7b276f7f..0995357d 100644
--- a/include/TextReader.h
+++ b/include/TextReader.h
@@ -121,7 +121,7 @@ namespace openshot
TextReader(int width, int height, int x_offset, int y_offset, GravityType gravity, string text, string font, double size, string text_color, string background_color);
/// Draw a box under rendered text using the specified color.
- /// @param text_background_color The background color behind the text
+ /// @param color The background color behind the text
void SetTextBackgroundColor(string color);
/// Close Reader
diff --git a/include/ZmqLogger.h b/include/ZmqLogger.h
index 2dc1a0cb..c165299e 100644
--- a/include/ZmqLogger.h
+++ b/include/ZmqLogger.h
@@ -55,7 +55,7 @@ namespace openshot {
*/
class ZmqLogger {
private:
- CriticalSection loggerCriticalSection;
+ juce::CriticalSection loggerCriticalSection;
std::string connection;
// Logfile related vars
diff --git a/src/AudioBufferSource.cpp b/src/AudioBufferSource.cpp
index 912d2552..46b04916 100644
--- a/src/AudioBufferSource.cpp
+++ b/src/AudioBufferSource.cpp
@@ -34,7 +34,7 @@ using namespace std;
using namespace openshot;
// Default constructor
-AudioBufferSource::AudioBufferSource(AudioSampleBuffer *audio_buffer)
+AudioBufferSource::AudioBufferSource(juce::AudioSampleBuffer *audio_buffer)
: position(0), start(0), repeat(false), buffer(audio_buffer)
{ }
@@ -46,7 +46,7 @@ AudioBufferSource::~AudioBufferSource()
};
// Get the next block of audio samples
-void AudioBufferSource::getNextAudioBlock (const AudioSourceChannelInfo& info)
+void AudioBufferSource::getNextAudioBlock (const juce::AudioSourceChannelInfo& info)
{
int buffer_samples = buffer->getNumSamples();
int buffer_channels = buffer->getNumChannels();
@@ -98,7 +98,7 @@ void AudioBufferSource::prepareToPlay(int, double) { }
void AudioBufferSource::releaseResources() { }
// Set the next read position of this source
-void AudioBufferSource::setNextReadPosition (int64 newPosition)
+void AudioBufferSource::setNextReadPosition (juce::int64 newPosition)
{
// set position (if the new position is in range)
if (newPosition >= 0 && newPosition < buffer->getNumSamples())
@@ -106,14 +106,14 @@ void AudioBufferSource::setNextReadPosition (int64 newPosition)
}
// Get the next read position of this source
-int64 AudioBufferSource::getNextReadPosition() const
+juce::int64 AudioBufferSource::getNextReadPosition() const
{
// return the next read position
return position;
}
// Get the total length (in samples) of this audio source
-int64 AudioBufferSource::getTotalLength() const
+juce::int64 AudioBufferSource::getTotalLength() const
{
// Get the length
return buffer->getNumSamples();
@@ -134,7 +134,7 @@ void AudioBufferSource::setLooping (bool shouldLoop)
}
// Use a different AudioSampleBuffer for this source
-void AudioBufferSource::setBuffer (AudioSampleBuffer *audio_buffer)
+void AudioBufferSource::setBuffer (juce::AudioSampleBuffer *audio_buffer)
{
buffer = audio_buffer;
setNextReadPosition(0);
diff --git a/src/AudioReaderSource.cpp b/src/AudioReaderSource.cpp
index 8195d03b..41c0b3f6 100644
--- a/src/AudioReaderSource.cpp
+++ b/src/AudioReaderSource.cpp
@@ -152,7 +152,7 @@ juce::AudioSampleBuffer* AudioReaderSource::reverse_buffer(juce::AudioSampleBuff
ZmqLogger::Instance()->AppendDebugMethod("AudioReaderSource::reverse_buffer", "number_of_samples", number_of_samples, "channels", channels);
// Reverse array (create new buffer to hold the reversed version)
- AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
+ juce::AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
reversed->clear();
for (int channel = 0; channel < channels; channel++)
@@ -177,7 +177,7 @@ juce::AudioSampleBuffer* AudioReaderSource::reverse_buffer(juce::AudioSampleBuff
}
// Get the next block of audio samples
-void AudioReaderSource::getNextAudioBlock(const AudioSourceChannelInfo& info)
+void AudioReaderSource::getNextAudioBlock(const juce::AudioSourceChannelInfo& info)
{
int buffer_samples = buffer->getNumSamples();
int buffer_channels = buffer->getNumChannels();
@@ -248,7 +248,7 @@ void AudioReaderSource::prepareToPlay(int, double) { }
void AudioReaderSource::releaseResources() { }
// Set the next read position of this source
-void AudioReaderSource::setNextReadPosition (int64 newPosition)
+void AudioReaderSource::setNextReadPosition (juce::int64 newPosition)
{
// set position (if the new position is in range)
if (newPosition >= 0 && newPosition < buffer->getNumSamples())
@@ -256,14 +256,14 @@ void AudioReaderSource::setNextReadPosition (int64 newPosition)
}
// Get the next read position of this source
-int64 AudioReaderSource::getNextReadPosition() const
+juce::int64 AudioReaderSource::getNextReadPosition() const
{
// return the next read position
return position;
}
// Get the total length (in samples) of this audio source
-int64 AudioReaderSource::getTotalLength() const
+juce::int64 AudioReaderSource::getTotalLength() const
{
// Get the length
if (reader)
@@ -287,7 +287,7 @@ void AudioReaderSource::setLooping (bool shouldLoop)
}
// Update the internal buffer used by this source
-void AudioReaderSource::setBuffer (AudioSampleBuffer *audio_buffer)
+void AudioReaderSource::setBuffer (juce::AudioSampleBuffer *audio_buffer)
{
buffer = audio_buffer;
setNextReadPosition(0);
diff --git a/src/AudioResampler.cpp b/src/AudioResampler.cpp
index d9c5a609..145f5d91 100644
--- a/src/AudioResampler.cpp
+++ b/src/AudioResampler.cpp
@@ -49,10 +49,10 @@ AudioResampler::AudioResampler()
buffer_source = new AudioBufferSource(buffer);
// Init resampling source
- resample_source = new ResamplingAudioSource(buffer_source, false, 2);
+ resample_source = new juce::ResamplingAudioSource(buffer_source, false, 2);
// Init resampled buffer
- resampled_buffer = new AudioSampleBuffer(2, 1);
+ resampled_buffer = new juce::AudioSampleBuffer(2, 1);
resampled_buffer->clear();
// Init callback buffer
@@ -74,7 +74,7 @@ AudioResampler::~AudioResampler()
}
// Sets the audio buffer and updates the key settings
-void AudioResampler::SetBuffer(AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
+void AudioResampler::SetBuffer(juce::AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
{
if (sample_rate <= 0)
sample_rate = 44100;
@@ -89,7 +89,7 @@ void AudioResampler::SetBuffer(AudioSampleBuffer *new_buffer, double sample_rate
}
// Sets the audio buffer and key settings
-void AudioResampler::SetBuffer(AudioSampleBuffer *new_buffer, double ratio)
+void AudioResampler::SetBuffer(juce::AudioSampleBuffer *new_buffer, double ratio)
{
// Update buffer & buffer source
buffer = new_buffer;
@@ -120,7 +120,7 @@ void AudioResampler::SetBuffer(AudioSampleBuffer *new_buffer, double ratio)
}
// Get the resampled audio buffer
-AudioSampleBuffer* AudioResampler::GetResampledBuffer()
+juce::AudioSampleBuffer* AudioResampler::GetResampledBuffer()
{
// Resample the current frame's audio buffer (into the temp callback buffer)
resample_source->getNextAudioBlock(resample_callback_buffer);
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index d6aae2ce..64a81824 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -29,13 +29,11 @@ include_directories(${OPENSHOT_INCLUDE_DIRS})
####### Display summary of options/dependencies ######
include(FeatureSummary)
-#set_property(GLOBAL APPEND PROPERTY FeatureSummary_PKG_TYPES BUILD)
-#find_package(FOO)
-#set_package_properties(FOO PROPERTIES TYPE BUILD)
################ OPTIONS ##################
# Optional build settings for libopenshot
OPTION(USE_SYSTEM_JSONCPP "Use system installed JsonCpp" OFF)
+option(ENABLE_IWYU "Enable 'Include What You Use' scanner (CMake 3.3+)" OFF)
################ WINDOWS ##################
# Set some compiler options for Windows
@@ -84,20 +82,6 @@ IF (ImageMagick_FOUND)
ENDIF (ImageMagick_FOUND)
-################### FFMPEG #####################
-# Find FFmpeg libraries (used for video encoding / decoding)
-FIND_PACKAGE(FFmpeg REQUIRED)
-
-foreach(ffmpeg_comp AVCODEC AVDEVICE AVFORMAT AVFILTER AVUTIL POSTPROC SWSCALE SWRESAMPLE AVRESAMPLE)
- if(${ffmpeg_comp}_FOUND)
- list(APPEND FF_INCLUDES ${${ffmpeg_comp}_INCLUDE_DIRS})
- add_definitions(${${ffmpeg_comp}_DEFINITIONS})
- list(APPEND FF_LIBRARIES ${${ffmpeg_comp}_LIBRARIES})
- endif()
-endforeach()
-list(REMOVE_DUPLICATES FF_INCLUDES)
-include_directories(${FF_INCLUDES})
-
################# LIBOPENSHOT-AUDIO ###################
# Find JUCE-based openshot Audio libraries
FIND_PACKAGE(OpenShotAudio 0.1.8 REQUIRED)
@@ -168,6 +152,26 @@ endif(USE_SYSTEM_JSONCPP)
#set(PROFILER "/usr/lib/libprofiler.so.0.3.2")
#set(PROFILER "/usr/lib/libtcmalloc.so.4")
+if(CMAKE_VERSION VERSION_LESS 3.3)
+ # IWYU wasn't supported internally in 3.2
+ set(ENABLE_IWYU FALSE)
+endif()
+
+if(ENABLE_IWYU)
+ find_program(IWYU_PATH NAMES "iwyu"
+ DOC "include-what-you-use source code scanner executable")
+ if(IWYU_PATH)
+ if(IWYU_OPTS)
+ separate_arguments(IWYU_OPTS)
+ list(APPEND _iwyu_opts "-Xiwyu" ${IWYU_OPTS})
+ endif()
+ set(CMAKE_CXX_INCLUDE_WHAT_YOU_USE ${IWYU_PATH} ${_iwyu_opts})
+ else()
+ set(ENABLE_IWYU FALSE)
+ endif()
+endif()
+add_feature_info("IWYU (include-what-you-use)" ENABLE_IWYU "Scan all source files with 'iwyu'")
+
#### GET LIST OF EFFECT FILES ####
FILE(GLOB EFFECT_FILES "${CMAKE_CURRENT_SOURCE_DIR}/effects/*.cpp")
@@ -211,6 +215,9 @@ SET ( OPENSHOT_SOURCE_FILES
QtPlayer.cpp
Settings.cpp
Timeline.cpp
+ QtTextReader.cpp
+ QtHtmlReader.cpp
+
# Qt Video Player
${QT_PLAYER_FILES}
@@ -260,6 +267,17 @@ set_target_properties(openshot
INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib"
)
+
+################### FFMPEG #####################
+# Find FFmpeg libraries (used for video encoding / decoding)
+FIND_PACKAGE(FFmpeg REQUIRED COMPONENTS avcodec avdevice avformat avutil swscale)
+
+foreach(ff_comp avcodec avdevice avformat avfilter avutil postproc swscale swresample avresample)
+ if(TARGET FFmpeg::${ff_comp})
+ target_link_libraries(openshot PUBLIC FFmpeg::${ff_comp})
+ endif()
+endforeach()
+
################### OPENMP #####################
# Check for OpenMP (used for multi-core processing)
@@ -296,7 +314,6 @@ endif()
############### LINK LIBRARY #################
SET ( REQUIRED_LIBRARIES
${LIBOPENSHOT_AUDIO_LIBRARIES}
- ${FF_LIBRARIES}
${QT_LIBRARIES}
${PROFILER}
${JSONCPP_LIBRARY}
@@ -326,7 +343,7 @@ target_link_libraries(openshot PUBLIC ${REQUIRED_LIBRARIES})
# Pick up parameters from OpenMP target and propagate
target_link_libraries(openshot PUBLIC OpenMP::OpenMP_CXX)
-############### CLI EXECUTABLE ################
+############### CLI EXECUTABLES ################
# Create test executable
add_executable(openshot-example examples/Example.cpp)
@@ -341,6 +358,9 @@ target_compile_definitions(openshot-example PRIVATE
# Link test executable to the new library
target_link_libraries(openshot-example openshot)
+add_executable(openshot-html-test examples/ExampleHtml.cpp)
+target_link_libraries(openshot-html-test openshot Qt5::Gui)
+
############### PLAYER EXECUTABLE ################
# Create test executable
add_executable(openshot-player Qt/demo/main.cpp)
diff --git a/src/Clip.cpp b/src/Clip.cpp
index 7e82ff01..5252681a 100644
--- a/src/Clip.cpp
+++ b/src/Clip.cpp
@@ -367,7 +367,7 @@ void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
int channels = buffer->getNumChannels();
// Reverse array (create new buffer to hold the reversed version)
- AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
+ juce::AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
reversed->clear();
for (int channel = 0; channel < channels; channel++)
@@ -399,7 +399,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr frame, int64_t frame_num
// Check for a valid time map curve
if (time.Values.size() > 1)
{
- const GenericScopedLock lock(getFrameCriticalSection);
+ const GenericScopedLock lock(getFrameCriticalSection);
// create buffer and resampler
juce::AudioSampleBuffer *samples = NULL;
@@ -423,7 +423,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr frame, int64_t frame_num
if (time.GetRepeatFraction(frame_number).den > 1) {
// SLOWING DOWN AUDIO
// Resample data, and return new buffer pointer
- AudioSampleBuffer *resampled_buffer = NULL;
+ juce::AudioSampleBuffer *resampled_buffer = NULL;
int resampled_buffer_size = 0;
// SLOW DOWN audio (split audio)
@@ -482,7 +482,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr frame, int64_t frame_num
delta_frame <= new_frame_number; delta_frame++) {
// buffer to hold detal samples
int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
- AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
+ juce::AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
number_of_delta_samples);
delta_samples->clear();
@@ -526,7 +526,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr frame, int64_t frame_num
delta_frame >= new_frame_number; delta_frame--) {
// buffer to hold delta samples
int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
- AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
+ juce::AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
number_of_delta_samples);
delta_samples->clear();
@@ -557,7 +557,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr frame, int64_t frame_num
resampler->SetBuffer(samples, float(start) / float(number_of_samples));
// Resample data, and return new buffer pointer
- AudioSampleBuffer *buffer = resampler->GetResampledBuffer();
+ juce::AudioSampleBuffer *buffer = resampler->GetResampledBuffer();
int resampled_buffer_size = buffer->getNumSamples();
// Add the newly resized audio samples to the current frame
diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp
index f85e09ad..b54376e6 100644
--- a/src/FFmpegWriter.cpp
+++ b/src/FFmpegWriter.cpp
@@ -172,59 +172,50 @@ void FFmpegWriter::SetVideoOptions(bool has_video, string codec, Fraction fps, i
AVCodec *new_codec;
// Check if the codec selected is a hardware accelerated codec
#if IS_FFMPEG_3_2
- #if defined(__linux__)
- if ( (strcmp(codec.c_str(),"h264_vaapi") == 0)) {
+#if defined(__linux__)
+ if (strstr(codec.c_str(), "_vaapi") != NULL) {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 1;
hw_en_supported = 1;
hw_en_av_pix_fmt = AV_PIX_FMT_VAAPI;
hw_en_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
+ } else if (strstr(codec.c_str(), "_nvenc") != NULL) {
+ new_codec = avcodec_find_encoder_by_name(codec.c_str());
+ hw_en_on = 1;
+ hw_en_supported = 1;
+ hw_en_av_pix_fmt = AV_PIX_FMT_CUDA;
+ hw_en_av_device_type = AV_HWDEVICE_TYPE_CUDA;
+ } else {
+ new_codec = avcodec_find_encoder_by_name(codec.c_str());
+ hw_en_on = 0;
+ hw_en_supported = 0;
}
- else {
- if ( (strcmp(codec.c_str(),"h264_nvenc") == 0)) {
- new_codec = avcodec_find_encoder_by_name(codec.c_str());
- hw_en_on = 1;
- hw_en_supported = 1;
- hw_en_av_pix_fmt = AV_PIX_FMT_CUDA;
- hw_en_av_device_type = AV_HWDEVICE_TYPE_CUDA;
- }
- else {
- new_codec = avcodec_find_encoder_by_name(codec.c_str());
- hw_en_on = 0;
- hw_en_supported = 0;
- }
- }
- #elif defined(_WIN32)
- if ( (strcmp(codec.c_str(),"h264_dxva2") == 0)) {
+#elif defined(_WIN32)
+ if (strstr(codec.c_str(), "_dxva2") != NULL) {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 1;
hw_en_supported = 1;
hw_en_av_pix_fmt = AV_PIX_FMT_DXVA2_VLD;
hw_en_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
+ } else if (strstr(codec.c_str(), "_nvenc") != NULL) {
+ new_codec = avcodec_find_encoder_by_name(codec.c_str());
+ hw_en_on = 1;
+ hw_en_supported = 1;
+ hw_en_av_pix_fmt = AV_PIX_FMT_CUDA;
+ hw_en_av_device_type = AV_HWDEVICE_TYPE_CUDA;
+ } else {
+ new_codec = avcodec_find_encoder_by_name(codec.c_str());
+ hw_en_on = 0;
+ hw_en_supported = 0;
}
- else {
- if ( (strcmp(codec.c_str(),"h264_nvenc") == 0)) {
- new_codec = avcodec_find_encoder_by_name(codec.c_str());
- hw_en_on = 1;
- hw_en_supported = 1;
- hw_en_av_pix_fmt = AV_PIX_FMT_CUDA;
- hw_en_av_device_type = AV_HWDEVICE_TYPE_CUDA;
- }
- else {
- new_codec = avcodec_find_encoder_by_name(codec.c_str());
- hw_en_on = 0;
- hw_en_supported = 0;
- }
- }
- #elif defined(__APPLE__)
- if ( (strcmp(codec.c_str(),"h264_videotoolbox") == 0)) {
+#elif defined(__APPLE__)
+ if (strstr(codec.c_str(), "_videotoolbox") != NULL) {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 1;
hw_en_supported = 1;
hw_en_av_pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX;
hw_en_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
- }
- else {
+ } else {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 0;
hw_en_supported = 0;
@@ -350,7 +341,7 @@ void FFmpegWriter::SetOption(StreamType stream, string name, string value) {
// Was option found?
if (option || (name == "g" || name == "qmin" || name == "qmax" || name == "max_b_frames" || name == "mb_decision" ||
name == "level" || name == "profile" || name == "slices" || name == "rc_min_rate" || name == "rc_max_rate" ||
- name == "crf")) {
+ name == "crf" || name == "cqp")) {
// Check for specific named options
if (name == "g")
// Set gop_size
@@ -396,7 +387,57 @@ void FFmpegWriter::SetOption(StreamType stream, string name, string value) {
// Buffer size
convert >> c->rc_buffer_size;
- else if (name == "crf") {
+ else if (name == "cqp") {
+ // encode quality and special settings like lossless
+ // This might be better in an extra methods as more options
+ // and way to set quality are possible
+ #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55, 39, 101)
+ #if IS_FFMPEG_3_2
+ if (hw_en_on) {
+ av_opt_set_int(c->priv_data, "qp", min(stoi(value),63), 0); // 0-63
+ } else
+ #endif
+ {
+ switch (c->codec_id) {
+ #if (LIBAVCODEC_VERSION_MAJOR >= 58)
+ case AV_CODEC_ID_AV1 :
+ c->bit_rate = 0;
+ av_opt_set_int(c->priv_data, "qp", min(stoi(value),63), 0); // 0-63
+ break;
+ #endif
+ case AV_CODEC_ID_VP8 :
+ c->bit_rate = 10000000;
+ av_opt_set_int(c->priv_data, "qp", max(min(stoi(value), 63), 4), 0); // 4-63
+ break;
+ case AV_CODEC_ID_VP9 :
+ c->bit_rate = 0; // Must be zero!
+ av_opt_set_int(c->priv_data, "qp", min(stoi(value), 63), 0); // 0-63
+ if (stoi(value) == 0) {
+ av_opt_set(c->priv_data, "preset", "veryslow", 0);
+ av_opt_set_int(c->priv_data, "lossless", 1, 0);
+ }
+ break;
+ case AV_CODEC_ID_H264 :
+ av_opt_set_int(c->priv_data, "qp", min(stoi(value), 51), 0); // 0-51
+ if (stoi(value) == 0) {
+ av_opt_set(c->priv_data, "preset", "veryslow", 0);
+ }
+ break;
+ case AV_CODEC_ID_H265 :
+ av_opt_set_int(c->priv_data, "qp", min(stoi(value), 51), 0); // 0-51
+ if (stoi(value) == 0) {
+ av_opt_set(c->priv_data, "preset", "veryslow", 0);
+ av_opt_set_int(c->priv_data, "lossless", 1, 0);
+ }
+ break;
+ default:
+ // For all other codecs assume a range of 0-63
+ av_opt_set_int(c->priv_data, "qp", min(stoi(value), 63), 0); // 0-63
+ c->bit_rate = 0;
+ }
+ }
+ #endif
+ } else if (name == "crf") {
// encode quality and special settings like lossless
// This might be better in an extra methods as more options
// and way to set quality are possible
@@ -480,7 +521,7 @@ void FFmpegWriter::SetOption(StreamType stream, string name, string value) {
// write selfcontained fragmented file, minimum length of the fragment 8 sec; only for MOV, MP4
av_dict_set(&mux_dict, "movflags", "frag_keyframe", 0);
av_dict_set(&mux_dict, "min_frag_duration", "8000000", 0);
- }
+ }
} else {
throw InvalidOptions("The option is not valid for this codec.", path);
}
@@ -543,6 +584,7 @@ void FFmpegWriter::WriteHeader() {
// Write the stream header
if (avformat_write_header(oc, &dict) != 0) {
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::WriteHeader (avformat_write_header)");
throw InvalidFile("Could not write header to file.", path);
};
@@ -1241,7 +1283,7 @@ void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st) {
// Open the codec
if (avcodec_open2(audio_codec, codec, &opts) < 0)
- throw InvalidCodec("Could not open codec", path);
+ throw InvalidCodec("Could not open audio codec", path);
AV_COPY_PARAMS_FROM_CONTEXT(st, audio_codec);
// Free options
@@ -1330,7 +1372,7 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) {
#elif defined(__APPLE__)
if( adapter_ptr != NULL ) {
#endif
- ZmqLogger::Instance()->AppendDebugMethod("Encode Device present using device");
+ ZmqLogger::Instance()->AppendDebugMethod("Encode Device present using device", "adapter", adapter_num);
}
else {
adapter_ptr = NULL; // use default
@@ -1361,23 +1403,58 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) {
#if IS_FFMPEG_3_2
if (hw_en_on && hw_en_supported) {
- video_codec->max_b_frames = 0; // At least this GPU doesn't support b-frames
video_codec->pix_fmt = hw_en_av_pix_fmt;
- video_codec->profile = FF_PROFILE_H264_BASELINE | FF_PROFILE_H264_CONSTRAINED;
- av_opt_set(video_codec->priv_data,"preset","slow",0);
- av_opt_set(video_codec->priv_data,"tune","zerolatency",0);
- av_opt_set(video_codec->priv_data, "vprofile", "baseline", AV_OPT_SEARCH_CHILDREN);
+
+ // for the list of possible options, see the list of codec-specific options:
+ // e.g. ffmpeg -h encoder=h264_vaapi or ffmpeg -h encoder=hevc_vaapi
+ // and "man ffmpeg-codecs"
+
+ // For VAAPI, it is safer to explicitly set rc_mode instead of relying on auto-selection
+ // which is ffmpeg version-specific.
+ if (hw_en_av_pix_fmt == AV_PIX_FMT_VAAPI) {
+ int64_t qp;
+ if (av_opt_get_int(video_codec->priv_data, "qp", 0, &qp) != 0 || qp == 0) {
+ // unless "qp" was set for CQP, switch to VBR RC mode
+ av_opt_set(video_codec->priv_data, "rc_mode", "VBR", 0);
+
+ // In the current state (ffmpeg-4.2-4 libva-mesa-driver-19.1.5-1) to use VBR,
+ // one has to specify both bit_rate and maxrate, otherwise a small low quality file is generated on Intel iGPU).
+ video_codec->rc_max_rate = video_codec->bit_rate;
+ }
+ }
+
+ switch (video_codec->codec_id) {
+ case AV_CODEC_ID_H264:
+ video_codec->max_b_frames = 0; // At least this GPU doesn't support b-frames
+ video_codec->profile = FF_PROFILE_H264_BASELINE | FF_PROFILE_H264_CONSTRAINED;
+ av_opt_set(video_codec->priv_data, "preset", "slow", 0);
+ av_opt_set(video_codec->priv_data, "tune", "zerolatency", 0);
+ av_opt_set(video_codec->priv_data, "vprofile", "baseline", AV_OPT_SEARCH_CHILDREN);
+ break;
+ case AV_CODEC_ID_HEVC:
+ // tested to work with defaults
+ break;
+ case AV_CODEC_ID_VP9:
+ // tested to work with defaults
+ break;
+ default:
+ ZmqLogger::Instance()->AppendDebugMethod("No codec-specific options defined for this codec. HW encoding may fail",
+ "codec_id", video_codec->codec_id);
+ break;
+ }
+
// set hw_frames_ctx for encoder's AVCodecContext
int err;
if ((err = set_hwframe_ctx(video_codec, hw_device_ctx, info.width, info.height)) < 0) {
- fprintf(stderr, "Failed to set hwframe context.\n");
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_video (set_hwframe_ctx) ERROR faled to set hwframe context",
+ "width", info.width, "height", info.height, av_err2str(err), -1);
}
}
#endif
/* open the codec */
if (avcodec_open2(video_codec, codec, &opts) < 0)
- throw InvalidCodec("Could not open codec", path);
+ throw InvalidCodec("Could not open video codec", path);
AV_COPY_PARAMS_FROM_CONTEXT(st, video_codec);
// Free options
diff --git a/src/Frame.cpp b/src/Frame.cpp
index 49fb7358..d5e64c37 100644
--- a/src/Frame.cpp
+++ b/src/Frame.cpp
@@ -340,7 +340,7 @@ float* Frame::GetAudioSamples(int channel)
float* Frame::GetPlanarAudioSamples(int new_sample_rate, AudioResampler* resampler, int* sample_count)
{
float *output = NULL;
- AudioSampleBuffer *buffer(audio.get());
+ juce::AudioSampleBuffer *buffer(audio.get());
int num_of_channels = audio->getNumChannels();
int num_of_samples = GetAudioSamplesCount();
@@ -386,7 +386,7 @@ float* Frame::GetPlanarAudioSamples(int new_sample_rate, AudioResampler* resampl
float* Frame::GetInterleavedAudioSamples(int new_sample_rate, AudioResampler* resampler, int* sample_count)
{
float *output = NULL;
- AudioSampleBuffer *buffer(audio.get());
+ juce::AudioSampleBuffer *buffer(audio.get());
int num_of_channels = audio->getNumChannels();
int num_of_samples = GetAudioSamplesCount();
@@ -430,7 +430,7 @@ float* Frame::GetInterleavedAudioSamples(int new_sample_rate, AudioResampler* re
// Get number of audio channels
int Frame::GetAudioChannelsCount()
{
- const GenericScopedLock lock(addingAudioSection);
+ const GenericScopedLock lock(addingAudioSection);
if (audio)
return audio->getNumChannels();
else
@@ -440,7 +440,7 @@ int Frame::GetAudioChannelsCount()
// Get number of audio samples
int Frame::GetAudioSamplesCount()
{
- const GenericScopedLock lock(addingAudioSection);
+ const GenericScopedLock lock(addingAudioSection);
return max_audio_sample;
}
@@ -735,7 +735,7 @@ void Frame::AddColor(int new_width, int new_height, string new_color)
color = new_color;
// Create new image object, and fill with pixel data
- const GenericScopedLock lock(addingImageSection);
+ const GenericScopedLock lock(addingImageSection);
#pragma omp critical (AddImage)
{
image = std::shared_ptr(new QImage(new_width, new_height, QImage::Format_RGBA8888));
@@ -753,7 +753,7 @@ void Frame::AddColor(int new_width, int new_height, string new_color)
void Frame::AddImage(int new_width, int new_height, int bytes_per_pixel, QImage::Format type, const unsigned char *pixels_)
{
// Create new buffer
- const GenericScopedLock lock(addingImageSection);
+ const GenericScopedLock lock(addingImageSection);
int buffer_size = new_width * new_height * bytes_per_pixel;
qbuffer = new unsigned char[buffer_size]();
@@ -784,7 +784,7 @@ void Frame::AddImage(std::shared_ptr new_image)
return;
// assign image data
- const GenericScopedLock lock(addingImageSection);
+ const GenericScopedLock lock(addingImageSection);
#pragma omp critical (AddImage)
{
image = new_image;
@@ -823,7 +823,7 @@ void Frame::AddImage(std::shared_ptr new_image, bool only_odd_lines)
return;
// Get the frame's image
- const GenericScopedLock lock(addingImageSection);
+ const GenericScopedLock lock(addingImageSection);
#pragma omp critical (AddImage)
{
const unsigned char *pixels = image->bits();
@@ -851,7 +851,7 @@ void Frame::AddImage(std::shared_ptr new_image, bool only_odd_lines)
// Resize audio container to hold more (or less) samples and channels
void Frame::ResizeAudio(int channels, int length, int rate, ChannelLayout layout)
{
- const GenericScopedLock lock(addingAudioSection);
+ const GenericScopedLock lock(addingAudioSection);
// Resize JUCE audio buffer
audio->setSize(channels, length, true, true, false);
@@ -864,7 +864,7 @@ void Frame::ResizeAudio(int channels, int length, int rate, ChannelLayout layout
// Add audio samples to a specific channel
void Frame::AddAudio(bool replaceSamples, int destChannel, int destStartSample, const float* source, int numSamples, float gainToApplyToSource = 1.0f) {
- const GenericScopedLock lock(addingAudioSection);
+ const GenericScopedLock lock(addingAudioSection);
#pragma omp critical (adding_audio)
{
// Clamp starting sample to 0
@@ -895,7 +895,7 @@ void Frame::AddAudio(bool replaceSamples, int destChannel, int destStartSample,
// Apply gain ramp (i.e. fading volume)
void Frame::ApplyGainRamp(int destChannel, int destStartSample, int numSamples, float initial_gain = 0.0f, float final_gain = 1.0f)
{
- const GenericScopedLock lock(addingAudioSection);
+ const GenericScopedLock lock(addingAudioSection);
// Apply gain ramp
audio->applyGainRamp(destChannel, destStartSample, numSamples, initial_gain, final_gain);
@@ -970,7 +970,7 @@ void Frame::Play()
if (!GetAudioSamplesCount())
return;
- AudioDeviceManager deviceManager;
+ juce::AudioDeviceManager deviceManager;
String error = deviceManager.initialise (0, /* number of input channels */
2, /* number of output channels */
0, /* no XML settings.. */
@@ -981,14 +981,14 @@ void Frame::Play()
cout << "Error on initialise(): " << error.toStdString() << endl;
}
- AudioSourcePlayer audioSourcePlayer;
+ juce::AudioSourcePlayer audioSourcePlayer;
deviceManager.addAudioCallback (&audioSourcePlayer);
ScopedPointer my_source;
my_source = new AudioBufferSource(audio.get());
// Create TimeSliceThread for audio buffering
- TimeSliceThread my_thread("Audio buffer thread");
+ juce::TimeSliceThread my_thread("Audio buffer thread");
// Start thread
my_thread.startThread();
@@ -1004,7 +1004,7 @@ void Frame::Play()
// Create MIXER
- MixerAudioSource mixer;
+ juce::MixerAudioSource mixer;
mixer.addInputSource(&transport1, false);
audioSourcePlayer.setSource (&mixer);
@@ -1047,7 +1047,7 @@ void Frame::cleanUpBuffer(void *info)
// Add audio silence
void Frame::AddAudioSilence(int numSamples)
{
- const GenericScopedLock lock(addingAudioSection);
+ const GenericScopedLock lock(addingAudioSection);
// Resize audio container
audio->setSize(channels, numSamples, false, true, false);
diff --git a/src/QtHtmlReader.cpp b/src/QtHtmlReader.cpp
new file mode 100644
index 00000000..e3cdc602
--- /dev/null
+++ b/src/QtHtmlReader.cpp
@@ -0,0 +1,267 @@
+/**
+ * @file
+ * @brief Source file for QtHtmlReader class
+ * @author Jonathan Thomas
+ * @author Sergei Kolesov (jediserg)
+ * @author Jeff Shillitto (jeffski)
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#include "../include/QtHtmlReader.h"
+#include
+#include
+#include
+#include
+#include
+
+using namespace openshot;
+
+/// Default constructor (blank text)
+QtHtmlReader::QtHtmlReader() : width(1024), height(768), x_offset(0), y_offset(0), html(""), css(""), background_color("#000000"), is_open(false), gravity(GRAVITY_CENTER)
+{
+ // Open and Close the reader, to populate it's attributes (such as height, width, etc...)
+ Open();
+ Close();
+}
+
+QtHtmlReader::QtHtmlReader(int width, int height, int x_offset, int y_offset, GravityType gravity, std::string html, std::string css, std::string background_color)
+: width(width), height(height), x_offset(x_offset), y_offset(y_offset), gravity(gravity), html(html), css(css), background_color(background_color), is_open(false)
+{
+ // Open and Close the reader, to populate it's attributes (such as height, width, etc...)
+ Open();
+ Close();
+}
+
+// Open reader
+void QtHtmlReader::Open()
+{
+ // Open reader if not already open
+ if (!is_open)
+ {
+ // create image
+ image = std::shared_ptr(new QImage(width, height, QImage::Format_RGBA8888));
+ image->fill(QColor(background_color.c_str()));
+
+ //start painting
+ QPainter painter;
+ if (!painter.begin(image.get())) {
+ return;
+ }
+
+ //set background
+ painter.setBackground(QBrush(background_color.c_str()));
+
+ //draw text
+ QTextDocument text_document;
+
+ //disable redo/undo stack as not needed
+ text_document.setUndoRedoEnabled(false);
+
+ //create the HTML/CSS document
+ text_document.setTextWidth(width);
+ text_document.setDefaultStyleSheet(css.c_str());
+ text_document.setHtml(html.c_str());
+
+ int td_height = text_document.documentLayout()->documentSize().height();
+
+ if (gravity == GRAVITY_TOP_LEFT || gravity == GRAVITY_TOP || gravity == GRAVITY_TOP_RIGHT) {
+ painter.translate(x_offset, y_offset);
+ } else if (gravity == GRAVITY_LEFT || gravity == GRAVITY_CENTER || gravity == GRAVITY_RIGHT) {
+ painter.translate(x_offset, (height - td_height) / 2 + y_offset);
+ } else if (gravity == GRAVITY_BOTTOM_LEFT || gravity == GRAVITY_BOTTOM_RIGHT || gravity == GRAVITY_BOTTOM) {
+ painter.translate(x_offset, height - td_height + y_offset);
+ }
+
+ if (gravity == GRAVITY_TOP_LEFT || gravity == GRAVITY_LEFT || gravity == GRAVITY_BOTTOM_LEFT) {
+ text_document.setDefaultTextOption(QTextOption(Qt::AlignLeft));
+ } else if (gravity == GRAVITY_CENTER || gravity == GRAVITY_TOP || gravity == GRAVITY_BOTTOM) {
+ text_document.setDefaultTextOption(QTextOption(Qt::AlignHCenter));
+ } else if (gravity == GRAVITY_TOP_RIGHT || gravity == GRAVITY_RIGHT|| gravity == GRAVITY_BOTTOM_RIGHT) {
+ text_document.setDefaultTextOption(QTextOption(Qt::AlignRight));
+ }
+
+ // Draw image
+ text_document.drawContents(&painter);
+
+ painter.end();
+
+ // Update image properties
+ info.has_audio = false;
+ info.has_video = true;
+ info.file_size = 0;
+ info.vcodec = "QImage";
+ info.width = width;
+ info.height = height;
+ info.pixel_ratio.num = 1;
+ info.pixel_ratio.den = 1;
+ info.duration = 60 * 60 * 1; // 1 hour duration
+ info.fps.num = 30;
+ info.fps.den = 1;
+ info.video_timebase.num = 1;
+ info.video_timebase.den = 30;
+ info.video_length = round(info.duration * info.fps.ToDouble());
+
+ // Calculate the DAR (display aspect ratio)
+ Fraction size(info.width * info.pixel_ratio.num, info.height * info.pixel_ratio.den);
+
+ // Reduce size fraction
+ size.Reduce();
+
+ // Set the ratio based on the reduced fraction
+ info.display_ratio.num = size.num;
+ info.display_ratio.den = size.den;
+
+ // Mark as "open"
+ is_open = true;
+ }
+}
+
+// Close reader
+void QtHtmlReader::Close()
+{
+ // Close all objects, if reader is 'open'
+ if (is_open)
+ {
+ // Mark as "closed"
+ is_open = false;
+
+ // Delete the image
+ image.reset();
+
+ info.vcodec = "";
+ info.acodec = "";
+ }
+}
+
+// Get an openshot::Frame object for a specific frame number of this reader.
+std::shared_ptr QtHtmlReader::GetFrame(int64_t requested_frame)
+{
+ if (image)
+ {
+ // Create or get frame object
+ std::shared_ptr image_frame(new Frame(requested_frame, image->size().width(), image->size().height(), background_color, 0, 2));
+
+ // Add Image data to frame
+ image_frame->AddImage(image);
+
+ // return frame object
+ return image_frame;
+ } else {
+ // return empty frame
+ std::shared_ptr image_frame(new Frame(1, 640, 480, background_color, 0, 2));
+
+ // return frame object
+ return image_frame;
+ }
+
+}
+
+// Generate JSON string of this object
+std::string QtHtmlReader::Json() {
+
+ // Return formatted string
+ return JsonValue().toStyledString();
+}
+
+// Generate Json::JsonValue for this object
+Json::Value QtHtmlReader::JsonValue() {
+
+ // Create root json object
+ Json::Value root = ReaderBase::JsonValue(); // get parent properties
+ root["type"] = "QtHtmlReader";
+ root["width"] = width;
+ root["height"] = height;
+ root["x_offset"] = x_offset;
+ root["y_offset"] = y_offset;
+ root["html"] = html;
+ root["css"] = css;
+ root["background_color"] = background_color;
+ root["gravity"] = gravity;
+
+ // return JsonValue
+ return root;
+}
+
+// Load JSON string into this object
+void QtHtmlReader::SetJson(std::string value) {
+
+ // Parse JSON string into JSON objects
+ Json::Value root;
+ Json::CharReaderBuilder rbuilder;
+ Json::CharReader* reader(rbuilder.newCharReader());
+
+ std::string errors;
+ bool success = reader->parse( value.c_str(),
+ value.c_str() + value.size(), &root, &errors );
+ delete reader;
+
+ if (!success)
+ // Raise exception
+ throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
+
+ try
+ {
+ // Set all values that match
+ SetJsonValue(root);
+ }
+ catch (exception e)
+ {
+ // Error parsing JSON (or missing keys)
+ throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
+ }
+}
+
+// Load Json::JsonValue into this object
+void QtHtmlReader::SetJsonValue(Json::Value root) {
+
+ // Set parent data
+ ReaderBase::SetJsonValue(root);
+
+ // Set data from Json (if key is found)
+ if (!root["width"].isNull())
+ width = root["width"].asInt();
+ if (!root["height"].isNull())
+ height = root["height"].asInt();
+ if (!root["x_offset"].isNull())
+ x_offset = root["x_offset"].asInt();
+ if (!root["y_offset"].isNull())
+ y_offset = root["y_offset"].asInt();
+ if (!root["html"].isNull())
+ html = root["html"].asString();
+ if (!root["css"].isNull())
+ css = root["css"].asString();
+ if (!root["background_color"].isNull())
+ background_color = root["background_color"].asString();
+ if (!root["gravity"].isNull())
+ gravity = (GravityType) root["gravity"].asInt();
+
+ // Re-Open path, and re-init everything (if needed)
+ if (is_open)
+ {
+ Close();
+ Open();
+ }
+}
diff --git a/src/QtTextReader.cpp b/src/QtTextReader.cpp
new file mode 100644
index 00000000..38240c96
--- /dev/null
+++ b/src/QtTextReader.cpp
@@ -0,0 +1,290 @@
+/**
+ * @file
+ * @brief Source file for QtTextReader class
+ * @author Jonathan Thomas
+ * @author Sergei Kolesov (jediserg)
+ * @author Jeff Shillitto (jeffski)
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#include "../include/QtTextReader.h"
+#include
+#include
+
+using namespace openshot;
+
+/// Default constructor (blank text)
+QtTextReader::QtTextReader() : width(1024), height(768), x_offset(0), y_offset(0), text(""), font(QFont("Arial", 10)), text_color("#ffffff"), background_color("#000000"), is_open(false), gravity(GRAVITY_CENTER)
+{
+ // Open and Close the reader, to populate it's attributes (such as height, width, etc...)
+ Open();
+ Close();
+}
+
+QtTextReader::QtTextReader(int width, int height, int x_offset, int y_offset, GravityType gravity, std::string text, QFont font, std::string text_color, std::string background_color)
+: width(width), height(height), x_offset(x_offset), y_offset(y_offset), text(text), font(font), text_color(text_color), background_color(background_color), is_open(false), gravity(gravity)
+{
+ // Open and Close the reader, to populate it's attributes (such as height, width, etc...)
+ Open();
+ Close();
+}
+
+void QtTextReader::SetTextBackgroundColor(string color) {
+ text_background_color = color;
+
+ // Open and Close the reader, to populate it's attributes (such as height, width, etc...) plus the text background color
+ Open();
+ Close();
+}
+
+// Open reader
+void QtTextReader::Open()
+{
+ // Open reader if not already open
+ if (!is_open)
+ {
+ // create image
+ image = std::shared_ptr(new QImage(width, height, QImage::Format_RGBA8888));
+ image->fill(QColor(background_color.c_str()));
+
+ QPainter painter;
+ if (!painter.begin(image.get())) {
+ return;
+ }
+
+ // set background
+ if (!text_background_color.empty()) {
+ painter.setBackgroundMode(Qt::OpaqueMode);
+ painter.setBackground(QBrush(text_background_color.c_str()));
+ }
+
+ // set font color
+ painter.setPen(QPen(text_color.c_str()));
+
+ // set font
+ painter.setFont(font);
+
+ // Set gravity (map between OpenShot and Qt)
+ int align_flag = 0;
+ switch (gravity)
+ {
+ case GRAVITY_TOP_LEFT:
+ align_flag = Qt::AlignLeft | Qt::AlignTop;
+ break;
+ case GRAVITY_TOP:
+ align_flag = Qt::AlignHCenter | Qt::AlignTop;
+ break;
+ case GRAVITY_TOP_RIGHT:
+ align_flag = Qt::AlignRight | Qt::AlignTop;
+ break;
+ case GRAVITY_LEFT:
+ align_flag = Qt::AlignVCenter | Qt::AlignLeft;
+ break;
+ case GRAVITY_CENTER:
+ align_flag = Qt::AlignCenter;
+ break;
+ case GRAVITY_RIGHT:
+ align_flag = Qt::AlignVCenter | Qt::AlignRight;
+ break;
+ case GRAVITY_BOTTOM_LEFT:
+ align_flag = Qt::AlignLeft | Qt::AlignBottom;
+ break;
+ case GRAVITY_BOTTOM:
+ align_flag = Qt::AlignHCenter | Qt::AlignBottom;
+ break;
+ case GRAVITY_BOTTOM_RIGHT:
+ align_flag = Qt::AlignRight | Qt::AlignBottom;
+ break;
+ }
+
+ // Draw image
+ painter.drawText(x_offset, y_offset, width, height, align_flag, text.c_str());
+
+ painter.end();
+
+ // Update image properties
+ info.has_audio = false;
+ info.has_video = true;
+ info.file_size = 0;
+ info.vcodec = "QImage";
+ info.width = width;
+ info.height = height;
+ info.pixel_ratio.num = 1;
+ info.pixel_ratio.den = 1;
+ info.duration = 60 * 60 * 1; // 1 hour duration
+ info.fps.num = 30;
+ info.fps.den = 1;
+ info.video_timebase.num = 1;
+ info.video_timebase.den = 30;
+ info.video_length = round(info.duration * info.fps.ToDouble());
+
+ // Calculate the DAR (display aspect ratio)
+ Fraction font_size(info.width * info.pixel_ratio.num, info.height * info.pixel_ratio.den);
+
+ // Reduce size fraction
+ font_size.Reduce();
+
+ // Set the ratio based on the reduced fraction
+ info.display_ratio.num = font_size.num;
+ info.display_ratio.den = font_size.den;
+
+ // Mark as "open"
+ is_open = true;
+ }
+}
+
+// Close reader
+void QtTextReader::Close()
+{
+ // Close all objects, if reader is 'open'
+ if (is_open)
+ {
+ // Mark as "closed"
+ is_open = false;
+
+ // Delete the image
+ image.reset();
+
+ info.vcodec = "";
+ info.acodec = "";
+ }
+}
+
+// Get an openshot::Frame object for a specific frame number of this reader.
+std::shared_ptr QtTextReader::GetFrame(int64_t requested_frame)
+{
+ if (image)
+ {
+ // Create or get frame object
+ std::shared_ptr image_frame(new Frame(requested_frame, image->size().width(), image->size().height(), background_color, 0, 2));
+
+ // Add Image data to frame
+ image_frame->AddImage(image);
+
+ // return frame object
+ return image_frame;
+ } else {
+ // return empty frame
+ std::shared_ptr image_frame(new Frame(1, 640, 480, background_color, 0, 2));
+
+ // return frame object
+ return image_frame;
+ }
+
+}
+
+// Generate JSON string of this object
+std::string QtTextReader::Json() {
+
+ // Return formatted string
+ return JsonValue().toStyledString();
+}
+
+// Generate Json::JsonValue for this object
+Json::Value QtTextReader::JsonValue() {
+
+ // Create root json object
+ Json::Value root = ReaderBase::JsonValue(); // get parent properties
+ root["type"] = "QtTextReader";
+ root["width"] = width;
+ root["height"] = height;
+ root["x_offset"] = x_offset;
+ root["y_offset"] = y_offset;
+ root["text"] = text;
+ root["font"] = font.toString().toStdString();
+ root["text_color"] = text_color;
+ root["background_color"] = background_color;
+ root["text_background_color"] = text_background_color;
+ root["gravity"] = gravity;
+
+ // return JsonValue
+ return root;
+}
+
+// Load JSON string into this object
+void QtTextReader::SetJson(std::string value) {
+
+ // Parse JSON string into JSON objects
+ Json::Value root;
+ Json::CharReaderBuilder rbuilder;
+ Json::CharReader* reader(rbuilder.newCharReader());
+
+ std::string errors;
+ bool success = reader->parse( value.c_str(),
+ value.c_str() + value.size(), &root, &errors );
+ delete reader;
+
+ if (!success)
+ // Raise exception
+ throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
+
+ try
+ {
+ // Set all values that match
+ SetJsonValue(root);
+ }
+ catch (exception e)
+ {
+ // Error parsing JSON (or missing keys)
+ throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
+ }
+}
+
+// Load Json::JsonValue into this object
+void QtTextReader::SetJsonValue(Json::Value root) {
+
+ // Set parent data
+ ReaderBase::SetJsonValue(root);
+
+ // Set data from Json (if key is found)
+ if (!root["width"].isNull())
+ width = root["width"].asInt();
+ if (!root["height"].isNull())
+ height = root["height"].asInt();
+ if (!root["x_offset"].isNull())
+ x_offset = root["x_offset"].asInt();
+ if (!root["y_offset"].isNull())
+ y_offset = root["y_offset"].asInt();
+ if (!root["text"].isNull())
+ text = root["text"].asString();
+ if (!root["font"].isNull())
+ font.fromString(QString::fromStdString(root["font"].asString()));
+ if (!root["text_color"].isNull())
+ text_color = root["text_color"].asString();
+ if (!root["background_color"].isNull())
+ background_color = root["background_color"].asString();
+ if (!root["text_background_color"].isNull())
+ text_background_color = root["text_background_color"].asString();
+ if (!root["gravity"].isNull())
+ gravity = (GravityType) root["gravity"].asInt();
+
+ // Re-Open path, and re-init everything (if needed)
+ if (is_open)
+ {
+ Close();
+ Open();
+ }
+}
diff --git a/src/bindings/python/CMakeLists.txt b/src/bindings/python/CMakeLists.txt
index bc07571f..eb860751 100644
--- a/src/bindings/python/CMakeLists.txt
+++ b/src/bindings/python/CMakeLists.txt
@@ -78,26 +78,21 @@ if (PYTHONLIBS_FOUND AND PYTHONINTERP_FOUND)
PUBLIC ${PYTHON_LIBRARIES} openshot)
### Check if the following Debian-friendly python module path exists
- SET(PYTHON_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}/site-packages")
+ SET(PYTHON_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}/dist-packages")
if (NOT EXISTS ${PYTHON_MODULE_PATH})
- ### Check if another Debian-friendly python module path exists
- SET(PYTHON_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}/dist-packages")
- if (NOT EXISTS ${PYTHON_MODULE_PATH})
-
- ### Calculate the python module path (using distutils)
- execute_process ( COMMAND ${PYTHON_EXECUTABLE} -c "\
+ ### Calculate the python module path (using distutils)
+ execute_process ( COMMAND ${PYTHON_EXECUTABLE} -c "\
from distutils.sysconfig import get_python_lib; \
print( get_python_lib( plat_specific=True, prefix='${CMAKE_INSTALL_PREFIX}' ) )"
- OUTPUT_VARIABLE _ABS_PYTHON_MODULE_PATH
- OUTPUT_STRIP_TRAILING_WHITESPACE )
+ OUTPUT_VARIABLE _ABS_PYTHON_MODULE_PATH
+ OUTPUT_STRIP_TRAILING_WHITESPACE )
- GET_FILENAME_COMPONENT(_ABS_PYTHON_MODULE_PATH
- "${_ABS_PYTHON_MODULE_PATH}" ABSOLUTE)
- FILE(RELATIVE_PATH _REL_PYTHON_MODULE_PATH
- ${CMAKE_INSTALL_PREFIX} ${_ABS_PYTHON_MODULE_PATH})
- SET(PYTHON_MODULE_PATH ${_ABS_PYTHON_MODULE_PATH})
- endif()
+ GET_FILENAME_COMPONENT(_ABS_PYTHON_MODULE_PATH
+ "${_ABS_PYTHON_MODULE_PATH}" ABSOLUTE)
+ FILE(RELATIVE_PATH _REL_PYTHON_MODULE_PATH
+ ${CMAKE_INSTALL_PREFIX} ${_ABS_PYTHON_MODULE_PATH})
+ SET(PYTHON_MODULE_PATH ${_ABS_PYTHON_MODULE_PATH})
endif()
message("PYTHON_MODULE_PATH: ${PYTHON_MODULE_PATH}")
diff --git a/src/bindings/python/openshot.i b/src/bindings/python/openshot.i
index a19919c1..01844834 100644
--- a/src/bindings/python/openshot.i
+++ b/src/bindings/python/openshot.i
@@ -86,8 +86,10 @@
#include "../../../include/PlayerBase.h"
#include "../../../include/Point.h"
#include "../../../include/Profiles.h"
+#include "../../../include/QtHtmlReader.h"
#include "../../../include/QtImageReader.h"
#include "../../../include/QtPlayer.h"
+#include "../../../include/QtTextReader.h"
#include "../../../include/KeyFrame.h"
#include "../../../include/RendererBase.h"
#include "../../../include/Settings.h"
@@ -123,6 +125,34 @@
}
}
+/* Instantiate the required template specializations */
+%template() std::map;
+
+/* Make openshot.Fraction more Pythonic */
+%extend openshot::Fraction {
+%{
+ #include
+ #include